L1_CACHE_ALIGN
threshold = L1_CACHE_ALIGN((unsigned long)((uint64_t)size * alltime / rangetime));
size_t buf_size_aligned = L1_CACHE_ALIGN(buf_cfg->buf_size) *
size_t ctrl_size_aligned = L1_CACHE_ALIGN(sizeof(*hi->mmap_cfg));
hi->slot_size = L1_CACHE_ALIGN(hi->buf_size);
data_start = L1_CACHE_ALIGN(sizeof(*hi->mmap_cfg));
#define XPC_RP_HEADER_SIZE L1_CACHE_ALIGN(sizeof(struct xpc_rsvd_page))
L1_CACHE_ALIGN(sizeof(struct xpc_openclose_args) * \
if ((u64)*base == L1_CACHE_ALIGN((u64)*base))
return (void *)L1_CACHE_ALIGN((u64)*base);
DBUG_ON((u64)part != L1_CACHE_ALIGN((u64)part));
if ((u64)*base == L1_CACHE_ALIGN((u64)*base))
return (void *)L1_CACHE_ALIGN((u64)*base);
buf_len = L1_CACHE_ALIGN(len);
end_addr = L1_CACHE_ALIGN((u64)skb_tail_pointer(skb));
size = L1_CACHE_ALIGN(BNGE_MAX_RSS_TABLE_SIZE);
arr_size = L1_CACHE_ALIGN(sizeof(struct bnge_napi *) *
size = L1_CACHE_ALIGN(sizeof(struct bnge_napi));
status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
push_size = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) +
size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16));
size = L1_CACHE_ALIGN(BNXT_MAX_RSS_TABLE_SIZE_P5);
arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) *
size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi));
int size = L1_CACHE_ALIGN(BNXT_MAX_RSS_TABLE_SIZE_P5);
L1_CACHE_ALIGN(pkt_len),
L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
#define ENET_RX_FRSIZE L1_CACHE_ALIGN(PKT_MAXBUF_SIZE + ENET_RX_ALIGN - 1)
aligned_head = L1_CACHE_ALIGN(fifo->head);
align_hdr_pad = L1_CACHE_ALIGN(hlen) - hlen;
#define SXGBE_ALIGN(x) L1_CACHE_ALIGN(x)
L1_CACHE_ALIGN(ETH_FRAME_LEN + VLAN_HLEN + NET_IP_ALIGN)
L1_CACHE_ALIGN(ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN + NET_IP_ALIGN)
(u8 *)L1_CACHE_ALIGN((unsigned long)virt_buf);
size = L1_CACHE_ALIGN(dev->drv->txwi_size + sizeof(*t));
t = kzalloc(L1_CACHE_ALIGN(sizeof(*t)), GFP_ATOMIC);
p = kzalloc(L1_CACHE_ALIGN(sizeof(*p)), GFP_ATOMIC);
L1_CACHE_ALIGN(((unsigned long) pdir_ptr))
#define MSGOUT_OFFSET (L1_CACHE_ALIGN(sizeof(SCRIPT)))
#define MSGIN_OFFSET (MSGOUT_OFFSET + L1_CACHE_ALIGN(MSG_ARRAY_SIZE))
#define STATUS_OFFSET (MSGIN_OFFSET + L1_CACHE_ALIGN(MSG_ARRAY_SIZE))
#define SLOTS_OFFSET (STATUS_OFFSET + L1_CACHE_ALIGN(MSG_ARRAY_SIZE))
#define TOTAL_MEM_SIZE (SLOTS_OFFSET + L1_CACHE_ALIGN(sizeof(struct NCR_700_command_slot) * NCR_700_COMMAND_SLOTS_PER_HOST))
mem_addr = pinfo->mem_addr + L1_CACHE_ALIGN(pinfo->rx_nrfifos * pinfo->rx_fifosize);
memsz = L1_CACHE_ALIGN(pinfo->rx_nrfifos * pinfo->rx_fifosize) +
L1_CACHE_ALIGN(pinfo->tx_nrfifos * pinfo->tx_fifosize);
pinfo->tx_buf = pinfo->rx_buf + L1_CACHE_ALIGN(pinfo->rx_nrfifos
dma_free_coherent(pinfo->port.dev, L1_CACHE_ALIGN(pinfo->rx_nrfifos *
L1_CACHE_ALIGN(pinfo->tx_nrfifos *
L1_CACHE_ALIGN(qe_port->rx_nrfifos * qe_port->rx_fifosize);
rx_size = L1_CACHE_ALIGN(qe_port->rx_nrfifos * qe_port->rx_fifosize);
tx_size = L1_CACHE_ALIGN(qe_port->tx_nrfifos * qe_port->tx_fifosize);
#ifndef L1_CACHE_ALIGN
#define XPS_MIN_MAP_ALLOC ((L1_CACHE_ALIGN(offsetof(struct xps_map, queues[1])) \
#define MAX_TCP_HEADER L1_CACHE_ALIGN(128 + MAX_HEADER)
100 * L1_CACHE_ALIGN(sizeof(struct inet_peer)));
hr = max(NET_SKB_PAD, L1_CACHE_ALIGN(xs->dev->needed_headroom));
hr = max(NET_SKB_PAD, L1_CACHE_ALIGN(dev->needed_headroom));
resv = L1_CACHE_ALIGN(resv);
packet_size = L1_CACHE_ALIGN(packet_size);