bmask
unsigned long bmask, i;
bmask = current_thread_info()->gsr[0] >> 32UL;
unsigned long which = (bmask >> (i * 4)) & 0xf;
bmask(regs, insn);
u8 bmask; /* 35 */
u32 abits[GPC_MAX] = {}, amask = 0, bmask = 0;
bmask |= ((1 << bbits) - 1) << abits[gpc] << (gpc * 8);
nvkm_wr32(device, 0x406c00 + (i * 0x20), bmask);
u64 amask = 0, bmask = 0;
bmask |= (u64)pmask << (gpc * 8);
for (j = 0; j < gr->gpc_nr; j += 4, amask >>= 32, bmask >>= 32) {
nvkm_wr32(device, 0x406c00 + (i * 0x20) + j, bmask);
unsigned long bmask = mask;
for_each_set_bit(bit, &bmask, 16) {
u32 bmask, bmask_ext;
bmask = qp->modify_flags;
if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_STATE) {
if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS)
if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PKEY)
if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_QKEY)
if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DGID) {
if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL)
if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX) {
if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT)
if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS)
if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC)
if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU)
if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT)
if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT)
if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY)
if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER)
if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN)
if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN)
if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC)
if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC)
if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID)
if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID) {
u_int bmask[32]; /* bitmask of bchannels for port */
static uint bmask[MAX_FRAGS];
module_param_array(bmask, uint, NULL, S_IRUGO | S_IWUSR);
if (!((1 << i) & hc->bmask[pt])) /* skip unused chan */
if (!((1 << i) & hc->bmask[pt])) /* skip unused chan */
if (!((1 << ch) & hc->bmask[pt])) /* skip unused channel */
hc->bmask[pt] = bmask[bmask_cnt++];
if ((maskcheck & hc->bmask[pt])
|| (dmask[E1_cnt] & hc->bmask[pt])) {
maskcheck |= hc->bmask[pt];
E1_cnt + 1, ch, hc->bmask[pt]);
hc->bmask[0] = 0xfffefffe;
int bmask;
bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
if (rdev->sb_size & bmask)
rdev->sb_size = (rdev->sb_size | bmask) + 1;
int bmask;
bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
if (rdev->sb_size & bmask)
rdev->sb_size = (rdev->sb_size | bmask) + 1;
u8 bmask;
bmask = (1 << width) - 1;
bmask = ~0;
bmask = 0;
dst[idx] &= bmask;
u8 total = 0, bmask = 0xff;
bmask = (1 << rwidth) - 1;
total += value[idx] & bmask;
u8 areg, u8 bmask, u8 breg, u8 shift, bool imm8,
FIELD_PREP(OP_LDF_BMASK, bmask) |
emit_ld_field_any(struct nfp_prog *nfp_prog, swreg dst, u8 bmask, swreg src,
__emit_ld_field(nfp_prog, sc, reg.areg, bmask, reg.breg, shift,
emit_ld_field(struct nfp_prog *nfp_prog, swreg dst, u8 bmask, swreg src,
emit_ld_field_any(nfp_prog, dst, bmask, src, sc, shift, false);
struct flow_dissector_key_basic bmask;
b43_phy_maskset(dev, addr, ~(rf_ctrl->bmask),
u16 bmask, val, tmp;
bmask = B43_NPHY_TXPCTL_CMD_COEFF |
bmask |= B43_NPHY_TXPCTL_CMD_PCTLEN;
b43_phy_maskset(dev, B43_NPHY_TXPCTL_CMD, ~(bmask), val);
u16 bmask;
u8 wr_val = 0, rd_val, cmp_val, bmask;
bmask = cmp_val;
bmask = SBSDIO_FUNC1_SLEEPCSR_KSO_MASK;
if ((rd_val & bmask) == cmp_val)
unsigned long *bmask;
bmask = phba->sli4_hba.rpi_bmask;
bmask = phba->vpi_bmask;
bmask = phba->sli4_hba.xri_bmask;
bmask = phba->sli4_hba.vfi_bmask;
bmask = NULL;
kfree(bmask);
u8 bursts, bmask;
bmask = of_getintprop_default(op->dev.of_node->parent, "burst-sizes", 0xff);
if (bmask != 0xff)
bursts &= bmask;
unsigned int bmask = sb->s_blocksize - 1;
else if (interlaced && !((map->m_pa | map->m_plen) & bmask))
int bmask = 0, xsize;
bmask = JFS_SBI(sb)->bsize - 1;
xsize = (ssize + bmask) & ~bmask;
if ((x & bmask) < b)
return x & bmask;
return x & (bmask >> 1);
static inline int ceph_stable_mod(int x, int b, int bmask)
static u8 bmask[8] = { 0x00, 0x80, 0xc0, 0xe0, 0xf0, 0xf8, 0xfc, 0xfe };
o = *out & bmask[b];