ALIGN
#define SPACE(sp) ((char*)(sp) + ALIGN(sizeof(struct stack_block)))
allocsize = ALIGN(sizeof(struct stack_block)) + ALIGN(nbytes);
nbytes = ALIGN(nbytes);
INT_MAX / 2 - ALIGN(sizeof(struct stack_block)))
min += ALIGN(sizeof(struct stack_block));
newlen -= ALIGN(sizeof(struct stack_block));
len = (char *)ALIGN(buf) - buf;
ptr = (char **)ALIGN(buf);
cp = (char *)ALIGN(buf) + nptr * sizeof(char *);
ift->ifa_data = data = (void *)ALIGN(data);
ift->ifa_data = data = (void *)ALIGN(data);
#ifndef ALIGN
len = (char *)ALIGN(buf) - buf;
cp = (char *)ALIGN(buf) + numptr * sizeof(char *);
nptr->n_aliases = (char **)ALIGN(buf);
len = (char *)ALIGN(buf) - buf;
cp = (char *)ALIGN(buf) + numptr * sizeof(char *);
pptr->p_aliases = (char **)ALIGN(buf);
size = ALIGN(size);
addrsize = ALIGN(hp->h_length);
for (; ((uintptr_t)s & ALIGN) && n && *s != c; s++, n--)
for (; (uintptr_t)s % ALIGN; s++)
#define L1_CACHE_ALIGN(x) ALIGN(x, CACHE_LINE_SIZE)
#define PTR_ALIGN(p, a) ((__typeof(p))ALIGN((uintptr_t)(p), (a)))
#define PAGE_ALIGN(x) ALIGN(x, PAGE_SIZE)
(_s) - ALIGN(sizeof(struct skb_shared_info), CACHE_LINE_SIZE)
size = ALIGN(size, sizeof(void *));
wqe_size = ALIGN(wqe_size, 32);
ilsize = ALIGN(init_attr->cap.max_inline_data, align);
ilsize = ALIGN(init_attr->cap.max_inline_data, sizeof(struct sq_sge));
npages = ALIGN(length, BIT(page_shift)) / BIT(page_shift);
sbuf.size = ALIGN(sizeof(*sb), BNXT_QPLIB_CMDQE_UNITS);
len = ALIGN((step * cnt), PAGE_SIZE);
wqe_size = ALIGN(wqe_size, sizeof(struct sq_sge));
sbuf.size = ALIGN(sizeof(*sb), BNXT_QPLIB_CMDQE_UNITS);
slots = ALIGN(slots, BNXT_VAR_MAX_SLOT_ALIGN);
ALIGN(sbuf->size, BNXT_QPLIB_CMDQE_UNITS) /
sbuf.size = ALIGN(resp_size, BNXT_QPLIB_CMDQE_UNITS);
#define ALIGN_DOWN(x, a) ALIGN((x) - ((a) - 1), (a))
return (size_t)((ALIGN(iova + umem->length, pgsz) -
newva = ALIGN(va, (unsigned long)mask + 1ULL);
wqe_size = ALIGN(sizeof(struct gdma_wqe) + client_oob_size +
buf_size = ALIGN(q_depth * max_msg_size, PAGE_SIZE);
cq_size = ALIGN(cq_size, PAGE_SIZE);
*rxq_size += ALIGN(MANA_WQE_HEADER_SIZE +
rq_size = ALIGN(rq_size, PAGE_SIZE);
cq_size = ALIGN(cq_size, PAGE_SIZE);
start = ALIGN(start, align);
ALIGN(sizeof(struct mlx4_vhcr_cmd),
ALIGN(sizeof(struct mlx4_vhcr),
ALIGN(fw->fw_pages, PAGE_SIZE / MLX4_ICM_PAGE_SIZE) >>
*aux_pages = ALIGN(*aux_pages, PAGE_SIZE / MLX4_ICM_PAGE_SIZE) >>
ALIGN(dev->caps.reserved_mtts * dev->caps.mtt_entry_sz,
dev->phys_caps.base_sqpn = ALIGN(bottom_reserved_for_rss_bitmap, 8);
ring->buf_size = ALIGN(size * ring->stride, MLX4_EN_PAGE_SIZE);
ALIGN(i * sizeof (struct mlx4_wqe_inline_seg) + header_size, 16);
ALIGN(i * sizeof (struct mlx4_wqe_inline_seg) + header_size, 16);
ALIGN(i * sizeof (struct mlx4_wqe_inline_seg) + sizeof (hdr), 16);
unsigned halign = ALIGN(sizeof *wqe + wr->hlen, 16);
ALIGN(MLX4_IB_UD_HEADER_SIZE +
ALIGN(4 +
ALIGN(last_id - bulk_base_id + 1, 4));
req->total_num_bfregs = ALIGN(req->total_num_bfregs, bfregs_per_sys_page);
bfregi->num_dyn_bfregs = ALIGN(calc_dynamic_bfregs(uars_per_sys_page), bfregs_per_sys_page);
req.total_num_bfregs = ALIGN(req.total_num_bfregs,
int ndescs = ALIGN(max_num_sg, 4);
npages = ALIGN(len + offset, page_size) >> ilog2(page_size);
*size = ALIGN(sizeof(u64) * npages, MLX5_UMR_MTT_ALIGNMENT);
sg->length = ALIGN(sizeof(u64) * n, 64);
pages_to_map = ALIGN(npages, page_index_alignment);
sg.length = ALIGN(npages * sizeof(u64),
seg += ALIGN(copysz - size_of_inl_hdr_start, 16);
*size += ALIGN(copysz - size_of_inl_hdr_start, 16) / 16;
seg += ALIGN(left, 16);
*size += ALIGN(left, 16) / 16;
return cpu_to_be16(ALIGN(npages, 8) / 2);
int ndescs = ALIGN(mr->ndescs, 8) >> 1;
dseg->byte_count = cpu_to_be32(ALIGN(bcount, 64));
*sz = ALIGN(inl + sizeof(seg->byte_count), 16) / 16;
ALIGN(max_t(int, inl_size, size), MLX5_SEND_WQE_BB) < MLX5_SIG_WQE_SIZE)
return ALIGN(max_t(int, inl_size, size), MLX5_SEND_WQE_BB);
wqe_size = ALIGN(sizeof(*data_klm), 64);
wqe_size = ALIGN(sizeof(*sblock_ctrl) + sizeof(*data_sentry) +
dev_lim->reserved_mtts = ALIGN((1 << (field >> 4)) * sizeof(u64),
*aux_pages = ALIGN(*aux_pages, PAGE_SIZE / MTHCA_ICM_PAGE_SIZE) >>
ALIGN(dev->fw.arbel.fw_pages, PAGE_SIZE / MTHCA_ICM_PAGE_SIZE) >>
npages = ALIGN(eq->nent * MTHCA_EQ_ENTRY_SIZE, PAGE_SIZE) / PAGE_SIZE;
mdev->limits.reserved_mtts = ALIGN(mdev->limits.reserved_mtts * mdev->limits.mtt_seg_size,
qp->send_wqe_offset = ALIGN(qp->rq.max << qp->rq.wqe_shift,
ALIGN(cap->max_inline_data + MTHCA_INLINE_HEADER_SIZE,
aligned_size = ALIGN(size, PAGE_SIZE);
dlen = ALIGN(len) - len;
len = ALIGN(len);
n->m_data = (caddr_t)(ALIGN(n->m_data + align) - align);
rt = IEEE80211_MALLOC(ALIGN(sizeof(struct ieee80211_mesh_route)) +
rt->rt_priv = (void *)ALIGN(&rt[1]);
gr = IEEE80211_MALLOC(ALIGN(sizeof(struct ieee80211_mesh_gate_route)),
gr = IEEE80211_MALLOC(ALIGN(sizeof(struct ieee80211_mesh_gate_route)),
max_linkhdr_grow(ALIGN(hdrlen));
min_t(size_t, ALIGN(bcnt, PAGE_SIZE) / PAGE_SIZE,
if (num_sge >= (U32_MAX - ALIGN(wr_size, sizeof (struct ib_sge))) /
return kmalloc(ALIGN(wr_size, sizeof (struct ib_sge)) +
ALIGN(next_size, sizeof(struct ib_sge)));
(U32_MAX - ALIGN(sizeof *next, sizeof (struct ib_sge))) /
next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) +
ALIGN(sizeof *next, sizeof (struct ib_sge)));
if (kern_filter_sz != ALIGN(kern_filter_sz, 4))
ALIGN(new_used, sizeof(*pbundle->internal_buffer));
pbundle->internal_used = ALIGN(pbundle->method_elm->key_bitmap_len *
ALIGN(bundle_size + 256, sizeof(*pbundle->internal_buffer));
ptr += ALIGN(((struct sockaddr *)ptr)->sa_len);
ptr += ALIGN(((struct sockaddr *)ptr)->sa_len);
int sa_len = ALIGN(((struct sockaddr *)ptr)->sa_len);
ptr += ALIGN(((struct sockaddr *)ptr)->sa_len);
ptr += ALIGN(((struct sockaddr *)ptr)->sa_len);
sdl = (struct sockaddr_dl *)(ALIGN(sin->sin6_len) + (char *)sin);
sdl = (struct sockaddr_dl *)(ALIGN(sin->sin6_len) + (char *)sin);
ALIGN(sin->sin6_len));
p = (char *)ALIGN(p);
p = (char *)ALIGN(p);
p = (char *)ALIGN(p);
p = (char *)ALIGN(p);