DIV_ROUND_UP
#define DIV_ROUND_UP_ULL(x, n) DIV_ROUND_UP((unsigned long long)(x), (n))
curr_stats->ppms = DIV_ROUND_UP(npkts * USEC_PER_MSEC, delta_us);
curr_stats->bpms = DIV_ROUND_UP(nbytes * USEC_PER_MSEC, delta_us);
curr_stats->epms = DIV_ROUND_UP(NET_DIM_NEVENTS * USEC_PER_MSEC,
#ifndef DIV_ROUND_UP
rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
DIV_ROUND_UP(fw_health->polling_dsecs * HZ,
ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
nr_tbls = DIV_ROUND_UP(ctx_pg->nr_pages, MAX_CTX_PAGES);
#define BNXT_RE_LEGACY_FENCE_PBL_SIZE DIV_ROUND_UP(BNXT_RE_LEGACY_FENCE_BYTES, PAGE_SIZE)
bytes = DIV_ROUND_UP(max, 8);
line += DIV_ROUND_UP(sc->qm_cid_count * QM_QUEUES_PER_FUNC * 4,
#ifndef DIV_ROUND_UP
(DIV_ROUND_UP(BXE_L2_CID_COUNT(sc), ILT_PAGE_CIDS))
#define CNIC_ILT_LINES DIV_ROUND_UP(CNIC_CID_MAX, ILT_PAGE_CIDS)
#define TM_ILT_LINES DIV_ROUND_UP(TM_ILT_SZ, TM_ILT_PAGE_SZ)
#define SRC_ILT_LINES DIV_ROUND_UP(SRC_ILT_SZ, SRC_ILT_PAGE_SZ)
size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
i = DIV_ROUND_UP(len, SF_SEC_SIZE);
i = DIV_ROUND_UP(size ? size : len, SF_SEC_SIZE);
i = DIV_ROUND_UP(len, SF_SEC_SIZE);
u32 exprom_header_buf[DIV_ROUND_UP(sizeof(struct exprom_header),
i = DIV_ROUND_UP(size, SF_SEC_SIZE); /* # of sectors spanned */
kwr->wr_mid = htobe32(V_FW_WR_LEN16(DIV_ROUND_UP(TLS_KEY_WR_SZ, 16)) |
DIV_ROUND_UP(TLS_KEY_WR_SZ - sizeof(struct work_request_hdr), 16));
i = DIV_ROUND_UP(size, SF_SEC_SIZE);
res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
num_wqe = DIV_ROUND_UP(len, T4_MAX_INLINE_SIZE);
V_FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16)));
V_FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16)));
init->u.write.len16 = DIV_ROUND_UP(sizeof init->u.write +
init->u.read.len16 = DIV_ROUND_UP(sizeof init->u.read, 16);
V_FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16)));
res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
*len16 = DIV_ROUND_UP(size, 16);
*len16 = DIV_ROUND_UP(size, 16);
*len16 = DIV_ROUND_UP(sizeof wqe->read, 16);
*len16 = DIV_ROUND_UP(sizeof wqe->recv +
*len16 = DIV_ROUND_UP(sizeof wqe->inv, 16);
*len16 = DIV_ROUND_UP(sizeof(*fr), 16);
*len16 = DIV_ROUND_UP(sizeof(wqe->fr) + sizeof(*sglp), 16);
*len16 = DIV_ROUND_UP(sizeof(wqe->fr) + sizeof(*imdp)
idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
wq->rq.wq_pidx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
wq->sq.wq_pidx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
num_wqe = DIV_ROUND_UP(len, T4_MAX_INLINE_SIZE);
(w)->wr_mid = htonl(V_FW_WR_LEN16(DIV_ROUND_UP(wrlen, 16)) | \
(w)->wr.wr_mid = htonl(V_FW_WR_LEN16(DIV_ROUND_UP(sizeof(*w), 16)) | \
DIV_ROUND_UP(wr_len - sizeof(ulpmc->wr), 16));
DIV_ROUND_UP(len, T4_ULPTX_MIN_IO)));
DIV_ROUND_UP(len, T4_ULPTX_MIN_IO)));
DIV_ROUND_UP(wr_len - sizeof(ulpmc->wr), 16));
cpl->mpdu = htons(DIV_ROUND_UP(mss, 4));
cpl->burst_size = htonl(DIV_ROUND_UP(burst_size, 4));
cpl->mpdu = htons(DIV_ROUND_UP(mss, 4));
cpl->burst = htonl(DIV_ROUND_UP(burst_size, 4));
pg_cnt = DIV_ROUND_UP(aeq->mem.size, PAGE_SIZE);
u32 pg_cnt = DIV_ROUND_UP(aeq->mem.size, PAGE_SIZE);
chunk_pages = DIV_ROUND_UP(sg_dma_len(sg), iwmr->page_size);
dseg += DIV_ROUND_UP(4 + len, DS_SIZE_ALIGNMENT);
dseg += DIV_ROUND_UP(4 + MIN_PKT_LEN, DS_SIZE_ALIGNMENT);
dseg += DIV_ROUND_UP(4 + len, DS_SIZE_ALIGNMENT);
dseg += DIV_ROUND_UP(8 + len, DS_SIZE_ALIGNMENT);
num_pkts = DIV_ROUND_UP(payload_len, mss);
pad = DIV_ROUND_UP(ds_cnt, DS_FACT);
tx_info->nr_txbb = DIV_ROUND_UP(ds_cnt, DS_FACT);
ind += DIV_ROUND_UP(size * 16, 1U << qp->sq.wqe_shift);
DIV_ROUND_UP(MLX4_IB_UD_HEADER_SIZE,
qp->sq_max_wqes_per_wr = DIV_ROUND_UP(s, 1U << qp->sq.wqe_shift);
ds_cnt = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS);
const u32 ds_cnt = DIV_ROUND_UP(sizeof(struct mlx5e_tx_umr_wqe) +
sq->mbuf[pi].num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
const u32 ds_cnt = DIV_ROUND_UP(sizeof(struct mlx5e_tx_psv_wqe),
sq->mbuf[pi].num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
sq->mbuf[pi].num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
const u32 ds_cnt = DIV_ROUND_UP(sizeof(struct mlx5e_tx_umr_wqe) +
iq->data[pi].num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
const u32 ds_cnt = DIV_ROUND_UP(sizeof(struct mlx5e_tx_psv_wqe),
iq->data[pi].num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
const u32 ds_cnt = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS);
iq->data[pi].num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
iq->data[pi].num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
const u32 ds_cnt = DIV_ROUND_UP(sizeof(struct mlx5e_tx_qos_remap_wqe),
iq->data[pi].num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
CTASSERT(DIV_ROUND_UP(2, MLX5_SEND_WQEBB_NUM_DS) == 1);
sq->mbuf[pi].num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
num_pkts = DIV_ROUND_UP(payload_len, mss);
num_pkts = DIV_ROUND_UP(payload_len, mss);
ds_cnt += DIV_ROUND_UP(args.ihs - sizeof(wqe->eth.inline_hdr_start),
sq->mbuf[pi].num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
*ncont = DIV_ROUND_UP(i, (1 << m));
qp->sq.cur_post += DIV_ROUND_UP(size * 16, MLX5_SEND_WQE_BB);
(DIV_ROUND_UP(sizeof(struct mlx5_aso_wqe), MLX5_SEND_WQE_BB))
(DIV_ROUND_UP(sizeof(struct mlx5_aso_wqe_data), MLX5_SEND_WQE_BB))
#define MLX5_MACSEC_ASO_DS_CNT (DIV_ROUND_UP(sizeof(struct mlx5_aso_wqe), MLX5_SEND_WQE_DS))
div = (sc->bus_hz != freq) ? DIV_ROUND_UP(sc->bus_hz, 2 * freq) : 0;
num_icm = DIV_ROUND_UP(nobj, obj_per_chunk);
#ifndef DIV_ROUND_UP
DIV_ROUND_UP(elem_cnt, USABLE_ELEMS_PER_PAGE(elem_size, mode))
lines = DIV_ROUND_UP(sz_left, p_blk->real_size_in_page) -
size = MAP_WORD_SIZE * DIV_ROUND_UP(cid_count, BITS_PER_MAP_WORD);
len = DIV_ROUND_UP(p_map->max_count,
len = DIV_ROUND_UP(p_map->max_count,
total_lines = DIV_ROUND_UP(p_fl_seg->total_size,
total_lines = DIV_ROUND_UP(p_seg->total_size,
*p_line += DIV_ROUND_UP(p_blk->total_size, p_blk->real_size_in_page);
p_mngr->t2_num_pages = DIV_ROUND_UP(total_size, psz);
#define CEIL_DWORDS(size) DIV_ROUND_UP(size, 32)
for (i = 0; i < DIV_ROUND_UP(ram_size, BRB_REG_BIG_RAM_DATA_SIZE); i++) {
trace_data_size_dwords = DIV_ROUND_UP(trace_data_size_bytes + sizeof(struct mcp_trace), BYTES_IN_DWORD);
tc_headroom_blocks = (u32)DIV_ROUND_UP(req->headroom_per_tc, BRB_BLOCK_SIZE);
min_pkt_size_blocks = (u32)DIV_ROUND_UP(req->min_pkt_size, BRB_BLOCK_SIZE);
tc_guaranteed_blocks = (u32)DIV_ROUND_UP(req->guranteed_per_tc, BRB_BLOCK_SIZE);
phys_lines -= DIV_ROUND_UP(phys_lines, PBF_CMDQ_LINES_E5_RSVD_RATIO);
#define QM_PQ_MEM_4KB(pq_size) (pq_size ? DIV_ROUND_UP((pq_size + 1) * QM_PQ_ELEMENT_SIZE, 0x1000) : 0)
#define QM_PQ_SIZE_256B(pq_size) (pq_size ? DIV_ROUND_UP(pq_size, 0x100) - 1 : 0)
DIV_ROUND_UP(p_params->retry_interval,
u32 cnt = 0, msecs = DIV_ROUND_UP(usecs, 1000);
max_retries = DIV_ROUND_UP(max_retries, 1000);
DIV_ROUND_UP(max_count, (sizeof(unsigned long) * 8));
num_pbls = DIV_ROUND_UP(num_pbes, NUM_PBES_ON_PAGE(pbl_size));
sel = DIV_ROUND_UP(min_uvolt - range->min_uvolt,
const size_t nblocks = DIV_ROUND_UP(inlen, BLAKE2S_BLOCK_SIZE);
#ifndef DIV_ROUND_UP
npages = (u32)DIV_ROUND_UP(length, PAGE_SIZE);