hweight64
ones = hweight64(imm);
if (hweight64(event->attr.branch_sample_type & ~PERF_SAMPLE_BRANCH_PLM_ALL) > 1) {
ret = hweight64(value);
if (hweight64(pmu_attr->pmu_type) == 1)
c2->weight = hweight64(c2->idxmsk64);
if (hweight64(cause_mask) > hweight64(hybrid(pmu, acr_cause_mask64)) ||
num > hweight64(hybrid(event->pmu, acr_cntr_mask64)))
new_weight = hweight64(new_mask);
check_weight = hweight64(check_mask);
c->weight = hweight64(c->idxmsk64);
c->weight = hweight64(c->idxmsk64);
sz += (hweight64(PEBS_DATACFG_CNTRS(pebs_data_cfg)) +
hweight64(PEBS_DATACFG_FIX(pebs_data_cfg))) *
return hweight64(caps);
return hweight64(hybrid(pmu, cntr_mask64));
return hweight64(hybrid(pmu, fixed_cntr_mask64));
if (hweight64(history) < STORM_BEGIN_THRESHOLD)
sbank = hweight64(valid_bank_mask & GENMASK_ULL(valid_bit_nr - 1, 0));
if (hc->var_cnt != hweight64(valid_bank_mask))
if (hc->var_cnt != hweight64(valid_bank_mask))
u8 faulty_hbms = hweight64(hdev->dram_binning);
if (hweight64(hdev->tpc_binning) > MAX_CLUSTER_BINNING_FAULTY_TPCS) {
ret = hweight64(UVH_RTC_REAL_TIME_CLOCK_MASK);
return stm32fx_end_primary_clk - 1 + hweight64(table[0]) +
(BIT_ULL_WORD(secondary) >= 1 ? hweight64(table[1]) : 0) +
(BIT_ULL_WORD(secondary) >= 2 ? hweight64(table[2]) : 0);
val = (hweight64(addr & cximsd->xormaps[i]) & 1);
src_sz = hweight64(srcvm_bits) * sizeof(*src);
mm->n_roots = hweight64(size);
int num_values = hweight64(supported_bits);
GEM_BUG_ON(hweight64(flags & (PIN_OFFSET_GUARD | PIN_OFFSET_FIXED | PIN_OFFSET_BIAS)) > 1);
pfdev->features.nr_core_groups = hweight64(pfdev->features.l2_present);
hweight64(core_mask),
hweight64(pfdev->features.shader_present));
ncoregroups = hweight64(pfdev->features.l2_present);
hweight64(core_mask),
hweight64(ptdev->gpu_info.shader_present));
const u8 shader_core_count = hweight64(ptdev->gpu_info.shader_present);
if (hweight64(group_args->compute_core_mask) < group_args->max_compute_cores ||
hweight64(group_args->fragment_core_mask) < group_args->max_fragment_cores ||
hweight64(group_args->tiler_core_mask) < group_args->max_tiler_cores)
hweight64(gt->info.engine_mask) : SZ_256;
if (hweight64(gt->info.engine_mask &
weight = hweight64(info & PORT_DISCARD_EGRESS_ERRS);
u8 num_vls = hweight64(vl_select_mask);
num_pslm = hweight64(be64_to_cpu(req->port_select_mask[3]));
num_pslm = hweight64(be64_to_cpu(req->port_select_mask[3]));
num_pslm = hweight64(be64_to_cpu(req->port_select_mask[3]));
weight = hweight64((u64)bits);
nr_matches = hweight64(match_mask);
nr_diffs = hweight64(id_diff_mask) + hweight32(type_diff_mask);
n = hweight64(gru->gs_cbr_map) * GRU_CBR_AU_SIZE;
n = hweight64(gru->gs_dsr_map) * GRU_DSR_AU_BYTES;
cbrcnt = hweight64(cch->cbr_allocation_map) *
return hweight64(gru->gs_cbr_map) >= cbr_au_count
&& hweight64(gru->gs_dsr_map) >= dsr_au_count
length = hweight64(dsrmap) * GRU_DSR_AU_BYTES;
length = hweight64(dsrmap) * GRU_DSR_AU_BYTES;
gru->gs_gid, ctxnum, ngts, cbr, dsr, hweight64(gru->gs_cbr_map),
hweight64(gru->gs_dsr_map));
cbrfree = hweight64(gru->gs_cbr_map) * GRU_CBR_AU_SIZE;
dsrfree = hweight64(gru->gs_dsr_map) * GRU_DSR_AU_BYTES;
return hweight64(ena_dev->customer_metrics.supported_metrics);
lio->linfo.num_rxpciq = hweight64(resp->cfg_info.iqmask);
lio->linfo.num_txpciq = hweight64(resp->cfg_info.iqmask);
num_iqueues = hweight64(resp->cfg_info.iqmask);
num_oqueues = hweight64(resp->cfg_info.oqmask);
vfs_referencing_pf = hweight64(vfs_mask1);
vfs_referencing_pf += hweight64(vfs_mask2);
num_iqueues = hweight64(resp->cfg_info.iqmask);
num_oqueues = hweight64(resp->cfg_info.oqmask);
res |= (hweight64(fold & (mask << i)) & 0x1) << i;
return hweight64(value) & 1;
if (hweight64(reg_val & port01_mask) & 1)
if (hweight64(reg_val & port23_mask) & 1)
count = hweight64(filtered);
trans = ipa_cmd_trans_alloc(ipa, hweight64(ep_mask));
count = 1 + hweight64(ipa->filtered);
if (count < 1 + hweight64(ipa->filtered))
hweight64(ndev->db_valid_mask));
if (hweight64(ntb_msg_read_sts(perf->ntb) & inbits) < 3)
dev_WARN_ONCE(&nvdimm->dev, hweight64(flags & state_flags) > 1,
hweight64(config1) + hweight64(config2) > MAX_AXI_PORTS_OF_CHANNEL)
info->nb_tx_ts = hweight64(chan->tx_ts_mask);
info->nb_rx_ts = hweight64(chan->rx_ts_mask);
w_rx = hweight64(chan->rx_ts_mask);
w_tx = hweight64(chan->tx_ts_mask);
if (unlikely(hweight64(flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) > 1)) {
hweight64(flags & BTRFS_BLOCK_GROUP_PROFILE_MASK));
type, hweight64(type),
if (hweight64(ack.mask & (COREDUMP_USERSPACE | COREDUMP_KERNEL |
return hweight64(realfree);
if (hweight64(allocmask) > 0) {
__entry->unavail = hweight64(iscan->__skipped_inomask);
irec->ir_freecount = hweight64(irec->ir_free);
return sizeof(w) == 4 ? hweight32(w) : hweight64((__u64)w);
size += hweight64(mask) * sizeof(u64);
size += hweight64(mask) * sizeof(u64);
if (hweight64(diff) == 1) {
n_bits = hweight64(sset_mask);
if (__ffs64(mask) + hweight64(mask) != fls64(mask)) {
num_controls = hweight64(control_list);
num_pins = hweight64(pin_list);
control->values = devm_kcalloc(dev, hweight64(control->cn_list),
nconsts += hweight64(entity->controls[j].cn_list);
return sizeof(w) == 4 ? hweight32(w) : hweight64(w);
sample_size -= (hweight64(evsel->core.attr.sample_regs_user) + 1) * sizeof(u64);
sz = hweight64(mask) * sizeof(u64);
sz = hweight64(mask) * sizeof(u64);
sz = hweight64(sample->user_regs->mask) * sizeof(u64);
sz = hweight64(sample->intr_regs->mask) * sizeof(u64);
sz = hweight64(sample->user_regs->mask) * sizeof(u64);
sz = hweight64(sample->intr_regs->mask) * sizeof(u64);