XEN_PAGE_SIZE
if (size + cflush.offset > XEN_PAGE_SIZE)
cflush.length = XEN_PAGE_SIZE - cflush.offset;
if ((req->u.rw.seg[i].last_sect >= (XEN_PAGE_SIZE >> 9)) ||
if (last_sect >= (XEN_PAGE_SIZE >> 9) || last_sect < first_sect) {
#define XEN_PAGES_PER_SEGMENT (PAGE_SIZE / XEN_PAGE_SIZE)
(XEN_PAGE_SIZE/sizeof(struct blkif_request_segment))
rsp_prod, XEN_PAGE_SIZE * nr_grefs);
size = __RING_SIZE(sring_native, XEN_PAGE_SIZE * nr_grefs);
rsp_prod, XEN_PAGE_SIZE * nr_grefs);
size = __RING_SIZE(sring_x86_32, XEN_PAGE_SIZE * nr_grefs);
rsp_prod, XEN_PAGE_SIZE * nr_grefs);
size = __RING_SIZE(sring_x86_64, XEN_PAGE_SIZE * nr_grefs);
__CONST_RING_SIZE(blkif, XEN_PAGE_SIZE * (info)->nr_ring_pages)
unsigned long ring_size = info->nr_ring_pages * XEN_PAGE_SIZE;
#define GRANTS_PER_PSEG (PAGE_SIZE / XEN_PAGE_SIZE)
(XEN_PAGE_SIZE / sizeof(struct blkif_request_segment))
(BLKIF_MAX_SEGMENTS_PER_REQUEST * XEN_PAGE_SIZE) / 512;
lim->max_hw_sectors = (segments * XEN_PAGE_SIZE) / 512;
if (XEN_PAGE_SIZE != PAGE_SIZE) {
XEN_PAGE_SIZE, PAGE_SIZE);
XEN_FRONT_RING_INIT(&evtchnl->u.req.ring, sring, XEN_PAGE_SIZE);
#define XEN_NETIF_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, XEN_PAGE_SIZE)
#define XEN_NETIF_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, XEN_PAGE_SIZE)
#define MAX_XEN_SKB_FRAGS (65536 / XEN_PAGE_SIZE + 1)
len > XEN_PAGE_SIZE / sizeof(*mapping))
if (copy_op[0].dest.offset + copy_op[0].len > XEN_PAGE_SIZE) {
copy_op[1].source.offset = XEN_PAGE_SIZE - copy_op[0].dest.offset;
BACK_RING_ATTACH(&vif->ctrl, shared, rsp_prod, XEN_PAGE_SIZE);
if (unlikely((txreq.offset + txreq.size) > XEN_PAGE_SIZE)) {
BACK_RING_ATTACH(&queue->tx, txs, rsp_prod, XEN_PAGE_SIZE);
BACK_RING_ATTACH(&queue->rx, rxs, rsp_prod, XEN_PAGE_SIZE);
if (unlikely((txp->offset + txp->size) > XEN_PAGE_SIZE)) {
if (cop->dest.offset + amount > XEN_PAGE_SIZE) {
amount = XEN_PAGE_SIZE - cop->dest.offset;
chunk_len = min_t(size_t, frag_len, XEN_PAGE_SIZE - offset);
chunk_len = min_t(size_t, chunk_len, XEN_PAGE_SIZE -
} while (offset < XEN_PAGE_SIZE && pkt->remaining_len > 0);
needed = DIV_ROUND_UP(skb->len, XEN_PAGE_SIZE);
rx->offset + rx->status > XEN_PAGE_SIZE)) {
unsigned long max_mtu = XEN_PAGE_SIZE - XDP_PACKET_HEADROOM;
XEN_FRONT_RING_INIT(&queue->tx, txs, XEN_PAGE_SIZE);
XEN_FRONT_RING_INIT(&queue->rx, rxs, XEN_PAGE_SIZE);
.max_len = XEN_PAGE_SIZE - XDP_PACKET_HEADROOM,
XEN_PAGE_SIZE);
if (!IS_ALIGNED((uintptr_t)n->head, XEN_PAGE_SIZE)) {
#define MAX_XEN_SKB_FRAGS (65536 / XEN_PAGE_SIZE + 1)
#define NET_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, XEN_PAGE_SIZE)
#define NET_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, XEN_PAGE_SIZE)
xdp_init_buff(xdp, XEN_PAGE_SIZE - XDP_PACKET_HEADROOM,
info->intf = memremap(gfn << XEN_PAGE_SHIFT, XEN_PAGE_SIZE, MEMREMAP_WB);
memset(info->intf, 0, XEN_PAGE_SIZE);
nr = DIV_ROUND_UP(resource_size(r), XEN_PAGE_SIZE);
nr = DIV_ROUND_UP(resource_size(r), XEN_PAGE_SIZE);
BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE);
BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE);
#if XEN_PAGE_SIZE == PAGE_SIZE
#define EVENT_WORDS_PER_PAGE (XEN_PAGE_SIZE / sizeof(event_word_t))
if (seg->source.foreign.offset + seg->len > XEN_PAGE_SIZE)
if (seg->dest.foreign.offset + seg->len > XEN_PAGE_SIZE)
len = min(len, (size_t)XEN_PAGE_SIZE - off);
len = min(len, (size_t)XEN_PAGE_SIZE - off);
free_pages_exact(ret, n_pages * XEN_PAGE_SIZE);
free_pages_exact(vaddr, n_pages * XEN_PAGE_SIZE);
ret = alloc_pages_exact(n_pages * XEN_PAGE_SIZE, gfp);
glen = min_t(unsigned int, XEN_PAGE_SIZE - goffset, len);
fn(pfn_to_gfn(xen_pfn), goffset, XEN_PAGE_SIZE, data);
goffset += XEN_PAGE_SIZE;
.grefs_per_grant_frame = XEN_PAGE_SIZE /
.grefs_per_grant_frame = XEN_PAGE_SIZE /
vaddr = memremap(addr, XEN_PAGE_SIZE * max_nr_gframes, MEMREMAP_WB);
BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE);
BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE);
st->va += XEN_PAGE_SIZE * nr;
BACK_RING_INIT(&fedata->ring, fedata->sring, XEN_PAGE_SIZE * 1);
FRONT_RING_INIT(&bedata->ring, sring, XEN_PAGE_SIZE);
#define PVCALLS_NR_RSP_PER_RING __CONST_RING_SIZE(xen_pvcalls, XEN_PAGE_SIZE)
size = ALIGN(size, XEN_PAGE_SIZE);
size = ALIGN(size, XEN_PAGE_SIZE);
unsigned long ring_size = nr_pages * XEN_PAGE_SIZE;
addr += XEN_PAGE_SIZE;
free_pages_exact(*vaddr, nr_pages * XEN_PAGE_SIZE);
area = get_vm_area(XEN_PAGE_SIZE * nr_grefs, VM_IOREMAP);
XEN_PAGE_SIZE * nr_grefs, map_ring_apply, info))
addr = (unsigned long)vaddr + (XEN_PAGE_SIZE * i);
XEN_PAGE_SIZE, MEMREMAP_WB);
XEN_PAGE_SIZE, MEMREMAP_WB);
len = min_t(unsigned int, XEN_PAGE_SIZE - (offset & ~XEN_PAGE_MASK),
#define XENDISPL_EVENT_PAGE_SIZE XEN_PAGE_SIZE
#define XENSND_EVENT_PAGE_SIZE XEN_PAGE_SIZE
#define XEN_PAGE_MASK (~(XEN_PAGE_SIZE-1))
#define XEN_PFN_PER_PAGE (PAGE_SIZE / XEN_PAGE_SIZE)
#define XEN_PFN_UP(x) (((x) + XEN_PAGE_SIZE-1) >> XEN_PAGE_SHIFT)
if (XEN_PAGE_SIZE != PAGE_SIZE) {
XEN_PAGE_SIZE, PAGE_SIZE);
XEN_FRONT_RING_INIT(&channel->u.req.ring, sring, XEN_PAGE_SIZE);