ilog2
static int ilog2(int);
sblock.fs_bshift = ilog2(sblock.fs_bsize);
sblock.fs_fshift = ilog2(sblock.fs_fsize);
sblock.fs_fragshift = ilog2(sblock.fs_frag);
sblock.fs_fsbtodb = ilog2(sblock.fs_fsize / sectorsize);
zl.l_bs = ilog2(bsize);
z.zap_block_shift = ilog2(bsize);
z.zap_block_shift = ilog2(bsize);
z.zap_block_shift = ilog2(bsize);
KASSERT(ilog2(feature) <= ilog2(mask),
KASSERT(xstate_bv != 0 && ilog2(xstate_bv) <= ilog2(mask),
idx = ilog2(feature);
idx = ilog2(feature);
last_idx = ilog2(xstate_bv);
size = 1 << (STRTAB_SPLIT + ilog2(STRTAB_STE_DWORDS) + 3);
size = STRTAB_L1_SZ_SHIFT - (ilog2(STRTAB_L1_DESC_DWORDS) + 3);
int d = ilog2(_d); \
#define HASH_BITS(name) ilog2(HASH_SIZE(name))
db->db_epoch_shift = DBR_EPOCH_SFT - ilog2(db->db_epoch_mask);
wqe->frmr.pbl_pg_sz_log = ilog2(PAGE_SIZE >> PAGE_SHIFT_4K);
wqe->frmr.pg_sz_log = ilog2(wr->mr->page_size >> PAGE_SHIFT_4K);
pgshft = ilog2(umem->page_size);
((ilog2(pg_size) <<
req.log2_pbl_pg_size = cpu_to_le16(((ilog2(PAGE_SIZE) <<
ilog2(ilt_client->page_size >> 12));
ilog2(ilt_client->page_size >> 12));
ilog2(ilt_client->page_size >> 12));
ilog2(ilt_client->page_size >> 12));
#define ILOG2(x) ilog2(x)
if (page_size > ilog2(C4IW_MAX_PAGE_SIZE) - 12)
V_FW_RI_TPTE_PS(ilog2(wr->mr->page_size) - 12));
wqe->fr.pgsz_shift = ilog2(wr->mr->page_size) - 12;
if (atomic_testandset_int(&sc->error_flags, ilog2(ADAP_STOPPED))) {
if (!atomic_testandclear_int(&sc->error_flags, ilog2(ADAP_STOPPED))) {
if (atomic_testandclear_int(&sc->error_flags, ilog2(ADAP_CIM_ERR))) {
if (atomic_testandset_int(&sc->error_flags, ilog2(ADAP_FATAL_ERR)))
V_WINDOW(ilog2(mw->mw_aperture) - 10));
V_FW_IQ_CMD_IQESIZE(ilog2(IQ_ESIZE) - 4));
V_FW_IQ_CMD_IQESIZE(ilog2(IQ_ESIZE) - 4));
v = V_INGPADBOUNDARY(ilog2(pad) - pad_shift);
v = V_INGPACKBOUNDARY(ilog2(pack) - 5);
int bits = ilog2(SHMLBA >> PAGE_SHIFT) + 1;
#define ENETC_TBICR0_SET_ICPT(n) ((ilog2(n) + 1) & ENETC_TBICR0_ICPT_MASK)
hlist_for_each_entry_rcu(o, &n[jhash(&k, sizeof(k), 0) >> (32 - ilog2(ARRAY_SIZE(n)))],\
hlist_for_each_entry(o, &n[jhash(&k, sizeof(k), 0) >> (32 - ilog2(ARRAY_SIZE(n)))],\
hlist_add_head_rcu(n, &h[jhash(&k, sizeof(k), 0) >> (32 - ilog2(ARRAY_SIZE(h)))])
hlist_add_head(n, &h[jhash(&k, sizeof(k), 0) >> (32 - ilog2(ARRAY_SIZE(h)))])
log2_num_entries = ilog2(queue->queue_size / GDMA_EQE_SIZE);
uint32_t log2_num_entries = ilog2(spec->queue_size / GDMA_CQE_SIZE);
cq_context->logsize_usrpage = cpu_to_be32(ilog2(entries) << 24);
cpu_to_be32((ilog2(nent) << 24) |
eq_context->log_eq_size = ilog2(eq->nent);
(ilog2(cache_line_size()) - 4) << 5;
MLX4_PUT(inbox, (u8)((ilog2(dev->caps.eqe_size) - 5) << 4 |
(ilog2(dev->caps.eqe_size) - 5)),
int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG);
dev->caps.max_fmr_maps = (1 << (32 - ilog2(dev->caps.num_mpts))) - 1;
init_hca.log_uar_sz = ilog2(dev->caps.num_uars) +
init_hca.log_uar_sz = ilog2(dev->caps.num_uars);
ilog2((u32)dev->caps.num_mtts /
profile[i].log_num = ilog2(profile[i].num);
init_hca->log_num_eqs = ilog2(dev->caps.num_eqs);
ilog2(mlx4_get_mgm_entry_size(dev));
srq_context->state_logsize_srqn = cpu_to_be32((ilog2(srq->max) << 24) |
context->rq_size_stride = ilog2(size) << 3 | (ilog2(stride) - 4);
context->sq_size_stride = ilog2(size) << 3 | (ilog2(stride) - 4);
context->sq_size_stride = ilog2(TXBB_SIZE) - 4;
rss_context->base_qpn = cpu_to_be32(ilog2(rss_rings) << 24 |
ring->log_stride = ilog2(sizeof(struct mlx4_en_rx_desc));
ring->log_stride = ilog2(sizeof(struct mlx4_en_rx_desc));
ilog2((*umem)->page_size), &buf->mtt);
shift = ilog2(mr->umem->page_size);
shift = ilog2(mmr->umem->page_size);
ilog2(dev->dev->caps.max_gso_sz);
ilog2(dev->dev->caps.max_msg_sz);
context->rq_size_stride = ilog2(qp->rq.wqe_cnt) << 3;
context->sq_size_stride = ilog2(qp->sq.wqe_cnt) << 3;
fseg->page_size = cpu_to_be32(ilog2(mr->ibmr.page_size));
qp->rq.wqe_shift = ilog2(qp->rq.max_gs * sizeof (struct mlx4_wqe_data_seg));
qp->sq.wqe_shift = ilog2(64);
ilog2(qp->umem->page_size), &qp->mtt);
srq->msrq.wqe_shift = ilog2(desc_size);
ilog2(srq->umem->page_size), &srq->mtt);
MLX5_SET(eqc, eqc, log_eq_size, ilog2(eq->nent));
MLX5_SET(create_flow_table_in, in, flow_table_context.log_size, size ? ilog2(size) : 0);
MLX5_SET(wq, rqc_wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe) + sizeof(struct mlx5_wqe_data_seg)));
MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe) +
MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
i = ilog2(eth_proto_oper);
MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
MLX5_SET(cqc, temp_cqc, log_cq_size, ilog2(cq_size));
MLX5_SET(cqc, cqc, log_cq_size, ilog2(cq_size));
MLX5_SET(qpc, temp_qpc, log_rq_stride, ilog2(MLX5_SEND_WQE_DS) - 4);
MLX5_SET(qpc, temp_qpc, log_rq_size, ilog2(conn->qp.rq.size));
MLX5_SET(qpc, temp_qpc, log_sq_size, ilog2(conn->qp.sq.size));
MLX5_SET(qpc, qpc, log_rq_stride, ilog2(MLX5_SEND_WQE_DS) - 4);
MLX5_SET(qpc, qpc, log_rq_size, ilog2(conn->qp.rq.size));
MLX5_SET(qpc, qpc, log_sq_size, ilog2(conn->qp.sq.size));
MLX5_SET(cqc, cqc, log_cq_size, ilog2(entries));
MLX5_SET(cqc, cqc, log_cq_size, ilog2(entries));
unsigned long umem_page_shift = ilog2(umem->page_size);
*offset = buf_off >> ilog2(off_size);
npages = ALIGN(len + offset, page_size) >> ilog2(page_size);
MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
MLX5_SET(qpc, qpc, log_rq_size, ilog2(qp->rq.wqe_cnt));
MLX5_SET(qpc, qpc, log_sq_size, ilog2(qp->sq.wqe_cnt));
qp->rq.wqe_shift = ilog2(wqe_size);
seg->log2_page_size = ilog2(mr->ibmr.page_size);
qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB);
if (ucmd->sq_wqe_count && ((1 << ilog2(ucmd->sq_wqe_count)) != ucmd->sq_wqe_count)) {
rwq->log_rq_size = ilog2(rwq->wqe_count);
qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB);
srq->msrq.wqe_shift = ilog2(desc_size);
in.log_size = ilog2(srq->msrq.max);
MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
log_mtts_per_seg, ilog2(MTHCA_MTT_SEG_SIZE / 8));
log_mtts_per_seg = ilog2(MTHCA_MTT_SEG_SIZE / 8);
static int log_mtts_per_seg = ilog2(MTHCA_MTT_SEG_SIZE / 8);
(1 << (32 - ilog2(mdev->limits.num_mpts))) - 1;
ret = mthca_RESIZE_CQ(dev, cq->cqn, lkey, ilog2(entries));
qp_context->rq_size_stride = ilog2(qp->rq.max) << 3;
qp_context->sq_size_stride = ilog2(qp->sq.max) << 3;
logsize = ilog2(srq->max);
srq->wqe_shift = ilog2(ds);
1 << (ilog2(sessionCtxSize - 1) + 1),
shift = ilog2(umem->page_size);
mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
mr->hw_mr.page_size_log = ilog2(mr->umem->page_size); /* for the MR pages */
ilog2(mr->ibmr.page_size) - 12);
mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
child->pn_clev = rounddown(ilog2(index ^ newind), PCTRIE_WIDTH);
slot = ilog2(parent->pn_popmap & ((1 << slot) - 1));
slot = ilog2(node->pn_popmap);
shift = ilog2(umem->page_size);
size = roundup2(size, 1 << ilog2(size));
return (ilog2(size) / 2 - 5);
sz = 1UL << (ilog2(size) & ~1);
tlb1_map_base = roundup2(tlb1_map_base, 1 << (ilog2(size) & ~1));
sz = 1 << (ilog2(size) & ~1);
*id |= ilog2(cells[1]);
taddr_size = ilog2(win_size) - 1;
#define rounddown_pow_of_two(n) ((__typeof(n))1 << ilog2(n))
#define order_base_2(n) ilog2(2*(n)-1)
order = min(ilog2(diff), VM_NFREEORDER - 1);
max_order = min(ilog2(lo ^ (lo + npages)), VM_NFREEORDER - 1);
((PAGE_SIZE << ilog2(npages)) - 1)) == 0,
order = ilog2(npages);
((PAGE_SIZE << ilog2(npages)) - 1)) == 0,
unit->x86c.qi_buf_maxsz = ilog2(AMDIOMMU_CMDBUF_MAX / PAGE_SIZE);
qi_sz = ilog2(unit->x86c.inv_queue_size / PAGE_SIZE) + 8;
dtep->inttablen = ilog2(unit->irte_nentries);
sc->hw_ctrl |= (uint64_t)segnum_log << ilog2(AMDIOMMU_CTRL_DEVTABSEG_2);
(((uint64_t)0x8 + ilog2(unit->event_log_size /
qi_sz = ilog2(unit->x86c.inv_queue_size / PAGE_SIZE);
sblock.fs_fsbtodb = ilog2(sblock.fs_fsize / sectorsize);
static int ilog2(int);