HV_HYP_PAGE_SIZE
ghcb_va = memremap(ghcb_gpa, HV_HYP_PAGE_SIZE, MEMREMAP_WB | MEMREMAP_DEC);
memcpy_to_page(pg, 0, src, HV_HYP_PAGE_SIZE);
BUILD_BUG_ON(sizeof(union hv_ghcb) != HV_HYP_PAGE_SIZE);
} __packed __aligned(HV_HYP_PAGE_SIZE);
pfn_array = kmalloc(HV_HYP_PAGE_SIZE, GFP_KERNEL);
vaddr = (void *)kbuffer + (i * HV_HYP_PAGE_SIZE);
delta = PAGE_SIZE - HV_HYP_PAGE_SIZE;
delta = 2 * (PAGE_SIZE - HV_HYP_PAGE_SIZE);
return virt_to_hvpfn(kbuffer + delta + (HV_HYP_PAGE_SIZE * i));
return size - 2 * (PAGE_SIZE - HV_HYP_PAGE_SIZE);
return (offset - (PAGE_SIZE - HV_HYP_PAGE_SIZE)) >> HV_HYP_PAGE_SHIFT;
BUILD_BUG_ON(PAGE_SIZE < HV_HYP_PAGE_SIZE);
(HV_HYP_PAGE_SIZE >> 1));
memset(vmbus_connection.monitor_pages[0], 0x00, HV_HYP_PAGE_SIZE);
memset(vmbus_connection.monitor_pages[1], 0x00, HV_HYP_PAGE_SIZE);
memremap(base, HV_HYP_PAGE_SIZE, MEMREMAP_WB | MEMREMAP_DEC);
memremap(base, HV_HYP_PAGE_SIZE, MEMREMAP_WB | MEMREMAP_DEC);
HV_HYP_PAGE_SIZE)
memset(balloon_up_send_buffer, 0, HV_HYP_PAGE_SIZE);
HV_HYP_PAGE_SIZE, &recvlen, &requestid);
#define HV_LARGE_REPORTING_LEN (HV_HYP_PAGE_SIZE << \
WARN_ON_ONCE(sgl->length < (HV_HYP_PAGE_SIZE << page_reporting_order));
(sg->length / HV_HYP_PAGE_SIZE) - 1;
if (PAGE_SIZE != HV_HYP_PAGE_SIZE) {
dev->channel->max_pkt_size = HV_HYP_PAGE_SIZE * 2;
seq_printf(f, "%-22s: %ld\n", "page_size", HV_HYP_PAGE_SIZE);
static __u8 recv_buffer[HV_HYP_PAGE_SIZE];
static __u8 balloon_up_send_buffer[HV_HYP_PAGE_SIZE];
(void)kmsg_dump_get_buffer(&iter, false, hv_panic_page, HV_HYP_PAGE_SIZE,
hv_panic_page = kzalloc(HV_HYP_PAGE_SIZE, GFP_KERNEL);
mem = kmalloc_array(pgcount, HV_HYP_PAGE_SIZE, flags);
*outputarg = (char *)mem + HV_HYP_PAGE_SIZE;
memset(mem, 0x00, pgcount * HV_HYP_PAGE_SIZE);
if (vmbus_recvpacket(channel, recv_buffer, HV_HYP_PAGE_SIZE * 4, &recvlen, &requestid)) {
kvp_transaction.recv_channel->max_pkt_size = HV_HYP_PAGE_SIZE * 4;
#define HV_DEPOSIT_MAX (HV_HYP_PAGE_SIZE / sizeof(u64) - 1)
#define VSS_MAX_PKT_SIZE (HV_HYP_PAGE_SIZE * 2)
if (vmbus_recvpacket(channel, shut_txf_buf, HV_HYP_PAGE_SIZE, &recvlen, &requestid)) {
HV_HYP_PAGE_SIZE, &recvlen,
if (vmbus_recvpacket(channel, hbeat_txf_buf, HV_HYP_PAGE_SIZE,
#define HV_UTIL_RING_SEND_SIZE VMBUS_RING_SIZE(3 * HV_HYP_PAGE_SIZE)
#define HV_UTIL_RING_RECV_SIZE VMBUS_RING_SIZE(3 * HV_HYP_PAGE_SIZE)
srv->recv_buffer = kmalloc_array(4, HV_HYP_PAGE_SIZE, GFP_KERNEL);
#define MAX_NUM_CHANNELS ((HV_HYP_PAGE_SIZE >> 1) << 3)
(HV_HYP_PAGE_SIZE / sizeof(union hv_register_value))
((HV_HYP_PAGE_SIZE - sizeof(struct hv_input_set_vp_registers)) \
done_count * HV_HYP_PAGE_SIZE;
range.start = region->start_uaddr + page_offset * HV_HYP_PAGE_SIZE;
range.end = range.start + page_count * HV_HYP_PAGE_SIZE;
static_assert(HV_HYP_PAGE_SIZE == MSHV_HV_PAGE_SIZE);
#define MSHV_PIN_PAGES_BATCH_SIZE (0x10000000ULL / HV_HYP_PAGE_SIZE)
#define HV_WITHDRAW_BATCH_SIZE (HV_HYP_PAGE_SIZE / sizeof(u64))
((HV_HYP_PAGE_SIZE - sizeof(struct hv_input_map_gpa_pages)) \
((HV_HYP_PAGE_SIZE - sizeof(struct hv_input_get_vp_state)) \
((HV_HYP_PAGE_SIZE - sizeof(struct hv_input_set_vp_state)) \
((HV_HYP_PAGE_SIZE - sizeof(union hv_gpa_page_access_state)) \
((HV_HYP_PAGE_SIZE - \
if (sizeof(*input) + num_bytes > HV_HYP_PAGE_SIZE)
mshv_field_nonzero(args, rsvd) || args.in_sz > HV_HYP_PAGE_SIZE)
if (args.out_ptr && (!args.out_sz || args.out_sz > HV_HYP_PAGE_SIZE))
p = kmalloc(2 * HV_HYP_PAGE_SIZE, GFP_KERNEL);
*outputarg = (char *)p + HV_HYP_PAGE_SIZE;
data_sz = HV_HYP_PAGE_SIZE;
data_sz = HV_HYP_PAGE_SIZE;
data_sz = HV_HYP_PAGE_SIZE;
HV_HYP_PAGE_SIZE,
if (hvcall.input_size > HV_HYP_PAGE_SIZE)
if (hvcall.output_size > HV_HYP_PAGE_SIZE)
memset(ring_info->ring_buffer, 0, HV_HYP_PAGE_SIZE);
dma_set_min_align_mask(&dev->device, HV_HYP_PAGE_SIZE - 1);
slots = DIV_ROUND_UP(offset + len, HV_HYP_PAGE_SIZE);
#define RNDIS_EXT_LEN HV_HYP_PAGE_SIZE
.virt_boundary_mask = HV_HYP_PAGE_SIZE - 1,
dma_set_min_align_mask(&device->device, HV_HYP_PAGE_SIZE - 1);
max_xfer_bytes = round_down(stor_device->max_transfer_bytes, HV_HYP_PAGE_SIZE);
pdata->info.mem[INT_PAGE_MAP].size = HV_HYP_PAGE_SIZE;
pdata->info.mem[MON_PAGE_MAP].size = HV_HYP_PAGE_SIZE;
((HV_HYP_PAGE_SIZE - sizeof(struct hv_memory_hint)) / \
#define HV_MAX_MODIFY_GPA_REP_COUNT ((HV_HYP_PAGE_SIZE / sizeof(u64)) - 2)
#define HV_HYP_PAGE_MASK (~(HV_HYP_PAGE_SIZE - 1))
#define HV_MAX_FLUSH_REP_COUNT ((HV_HYP_PAGE_SIZE - 2 * sizeof(u64)) / \
u64 data[HV_HYP_PAGE_SIZE / sizeof(u64)];
(HV_HYP_PAGE_SIZE - sizeof(struct hv_input_get_partition_property_ex))
#define NR_HV_HYP_PAGES_IN_PAGE (PAGE_SIZE / HV_HYP_PAGE_SIZE)
#define HVPFN_UP(x) (((x) + HV_HYP_PAGE_SIZE-1) >> HV_HYP_PAGE_SHIFT)
#define RINGBUFFER_HVS_RCV_SIZE (HV_HYP_PAGE_SIZE * 6)
#define RINGBUFFER_HVS_SND_SIZE (HV_HYP_PAGE_SIZE * 6)
#define RINGBUFFER_HVS_MAX_SIZE (HV_HYP_PAGE_SIZE * 64)
sndbuf = ALIGN(sndbuf, HV_HYP_PAGE_SIZE);
rcvbuf = ALIGN(rcvbuf, HV_HYP_PAGE_SIZE);
(HV_HYP_PAGE_SIZE - sizeof(struct vmpipe_proto_header))
BUILD_BUG_ON(sizeof(*send_buf) != HV_HYP_PAGE_SIZE);