arch/alpha/boot/bootp.c
167
initrd_start = ((START_ADDR + 5*KERNEL_SIZE + PAGE_SIZE) |
arch/alpha/boot/bootp.c
168
(PAGE_SIZE-1)) + 1;
arch/alpha/boot/bootp.c
177
move_stack(initrd_start - PAGE_SIZE);
arch/alpha/boot/bootp.c
206
memset((char*)ZERO_PGE, 0, PAGE_SIZE);
arch/alpha/boot/bootpz.c
183
#define NEXT_PAGE(a) (((a) | (PAGE_SIZE - 1)) + 1)
arch/alpha/boot/bootpz.c
255
NEXT_PAGE(K_COPY_IMAGE_START + KERNEL_SIZE + PAGE_SIZE)
arch/alpha/boot/bootpz.c
387
uncompressed_image_start += PAGE_SIZE;
arch/alpha/boot/bootpz.c
388
uncompressed_image_end += PAGE_SIZE;
arch/alpha/boot/bootpz.c
389
initrd_image_start += PAGE_SIZE;
arch/alpha/boot/bootpz.c
441
move_stack(initrd_image_start - PAGE_SIZE);
arch/alpha/boot/bootpz.c
451
memset((char*)ZERO_PGE, 0, PAGE_SIZE);
arch/alpha/boot/bootpz.c
86
for (vaddr = vstart; vaddr <= vend; vaddr += PAGE_SIZE)
arch/alpha/boot/main.c
148
"r" (PAGE_SIZE + INIT_STACK));
arch/alpha/include/asm/ptrace.h
15
((struct pt_regs *) (task_stack_page(task) + 2*PAGE_SIZE) - 1)
arch/alpha/include/asm/ptrace.h
18
((struct pt_regs *) ((char *)current_thread_info() + 2*PAGE_SIZE) - 1)
arch/alpha/include/asm/shmparam.h
5
#define SHMLBA PAGE_SIZE /* attach addr a multiple of this */
arch/alpha/include/asm/thread_info.h
51
#define THREAD_SIZE (2*PAGE_SIZE)
arch/alpha/kernel/core_cia.c
349
static int page[PAGE_SIZE/4]
arch/alpha/kernel/core_cia.c
350
__attribute__((aligned(PAGE_SIZE)))
arch/alpha/kernel/core_cia.c
391
bus_addr = cia_ioremap(addr0, 8*PAGE_SIZE);
arch/alpha/kernel/core_cia.c
465
temp = cia_readl(bus_addr + 4*PAGE_SIZE);
arch/alpha/kernel/core_cia.c
489
temp = cia_readl(bus_addr + 5*PAGE_SIZE);
arch/alpha/kernel/core_cia.c
513
temp = cia_readl(bus_addr + 6*PAGE_SIZE);
arch/alpha/kernel/core_irongate.c
366
for(baddr = addr; baddr <= last; baddr += PAGE_SIZE)
arch/alpha/kernel/core_irongate.c
383
baddr += PAGE_SIZE, vaddr += PAGE_SIZE)
arch/alpha/kernel/core_irongate.c
389
pte, PAGE_SIZE, 0)) {
arch/alpha/kernel/core_marvel.c
745
baddr += PAGE_SIZE, vaddr += PAGE_SIZE) {
arch/alpha/kernel/core_marvel.c
756
PAGE_SIZE, 0)) {
arch/alpha/kernel/core_marvel.c
868
aper->pg_count = alpha_agpgart_size / PAGE_SIZE;
arch/alpha/kernel/core_marvel.c
879
aper->arena->dma_base + aper->pg_start * PAGE_SIZE;
arch/alpha/kernel/core_marvel.c
880
agp->aperture.size = aper->pg_count * PAGE_SIZE;
arch/alpha/kernel/core_titan.c
521
baddr += PAGE_SIZE, vaddr += PAGE_SIZE) {
arch/alpha/kernel/core_titan.c
532
PAGE_SIZE, 0)) {
arch/alpha/kernel/core_titan.c
602
aper->pg_count = alpha_agpgart_size / PAGE_SIZE;
arch/alpha/kernel/core_titan.c
612
aper->arena->dma_base + aper->pg_start * PAGE_SIZE;
arch/alpha/kernel/core_titan.c
613
agp->aperture.size = aper->pg_count * PAGE_SIZE;
arch/alpha/kernel/osf_sys.c
1251
addr = arch_get_unmapped_area_1 (PAGE_SIZE, len, limit);
arch/alpha/kernel/osf_sys.c
537
return PAGE_SIZE;
arch/alpha/kernel/pci_iommu.c
274
npages = iommu_num_pages(paddr, size, PAGE_SIZE);
arch/alpha/kernel/pci_iommu.c
287
for (i = 0; i < npages; ++i, paddr += PAGE_SIZE)
arch/alpha/kernel/pci_iommu.c
290
ret = arena->dma_base + dma_ofs * PAGE_SIZE;
arch/alpha/kernel/pci_iommu.c
374
if (dma_ofs * PAGE_SIZE >= arena->size) {
arch/alpha/kernel/pci_iommu.c
382
npages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
arch/alpha/kernel/pci_iommu.c
566
npages = iommu_num_pages(paddr, size, PAGE_SIZE);
arch/alpha/kernel/pci_iommu.c
579
out->dma_address = arena->dma_base + dma_ofs*PAGE_SIZE + paddr;
arch/alpha/kernel/pci_iommu.c
602
npages = iommu_num_pages(paddr, size, PAGE_SIZE);
arch/alpha/kernel/pci_iommu.c
605
for (i = 0; i < npages; ++i, paddr += PAGE_SIZE)
arch/alpha/kernel/pci_iommu.c
66
mem_size = window_size / (PAGE_SIZE / sizeof(unsigned long));
arch/alpha/kernel/pci_iommu.c
760
npages = iommu_num_pages(addr, size, PAGE_SIZE);
arch/alpha/kernel/ptrace.c
75
(PAGE_SIZE*2 - sizeof(struct pt_regs) + offsetof(struct pt_regs, reg))
arch/alpha/kernel/ptrace.c
78
(PAGE_SIZE*2 - sizeof(struct pt_regs) - sizeof(struct switch_stack) \
arch/alpha/kernel/setup.c
272
start = memblock_alloc(PAGE_ALIGN(size), PAGE_SIZE);
arch/alpha/kernel/srm_env.c
101
if (count >= PAGE_SIZE)
arch/alpha/kernel/srm_env.c
73
ret = callback_getenv(id, page, PAGE_SIZE);
arch/alpha/mm/init.c
157
kernel_end = two_pages + 2*PAGE_SIZE;
arch/alpha/mm/init.c
158
memset(two_pages, 0, 2*PAGE_SIZE);
arch/alpha/mm/init.c
165
pmd_set(pmd, (pte_t *)(two_pages + PAGE_SIZE));
arch/alpha/mm/init.c
180
vm_area_register_early(&console_remap_vm, PAGE_SIZE);
arch/alpha/mm/init.c
194
memset(kernel_end, 0, PAGE_SIZE);
arch/alpha/mm/init.c
197
kernel_end += PAGE_SIZE;
arch/alpha/mm/init.c
202
vaddr += PAGE_SIZE;
arch/alpha/mm/init.c
227
memset(absolute_pointer(ZERO_PGE), 0, PAGE_SIZE);
arch/alpha/mm/init.c
81
memset(swapper_pg_dir, 0, PAGE_SIZE);
arch/arc/include/asm/elf.h
42
#define ELF_EXEC_PAGESIZE PAGE_SIZE
arch/arc/include/asm/highmem.h
45
local_flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE)
arch/arc/include/asm/page.h
24
#define clear_page(paddr) memset((paddr), 0, PAGE_SIZE)
arch/arc/include/asm/page.h
26
#define copy_page(to, from) memcpy((to), (from), PAGE_SIZE)
arch/arc/include/asm/pgtable.h
24
extern char empty_zero_page[PAGE_SIZE];
arch/arc/include/asm/pgtable.h
27
extern pgd_t swapper_pg_dir[] __aligned(PAGE_SIZE);
arch/arc/include/asm/shmparam.h
10
#define SHMLBA (2 * PAGE_SIZE)
arch/arc/include/asm/thread_info.h
24
#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
arch/arc/include/uapi/asm/page.h
28
#define PAGE_MASK (~(PAGE_SIZE-1))
arch/arc/kernel/arc_hostlink.c
16
static unsigned char __HOSTLINK__[4 * PAGE_SIZE] __aligned(PAGE_SIZE);
arch/arc/kernel/setup.c
588
seq_printf(m, arc_cpu_mumbojumbo(cpu_id, &info, str, PAGE_SIZE));
arch/arc/mm/cache.c
120
p_ic->colors = p_ic->sz_k/assoc/TO_KB(PAGE_SIZE);
arch/arc/mm/cache.c
137
p_dc->colors = p_dc->sz_k/assoc/TO_KB(PAGE_SIZE);
arch/arc/mm/cache.c
463
const int full_page = __builtin_constant_p(sz) && sz == PAGE_SIZE;
arch/arc/mm/cache.c
499
const int full_page = __builtin_constant_p(sz) && sz == PAGE_SIZE;
arch/arc/mm/cache.c
797
if (tot_sz > PAGE_SIZE) {
arch/arc/mm/cache.c
827
off = kstart % PAGE_SIZE;
arch/arc/mm/cache.c
830
sz = min_t(unsigned int, tot_sz, PAGE_SIZE - off);
arch/arc/mm/cache.c
857
__ic_line_inv_vaddr(paddr, vaddr, nr * PAGE_SIZE);
arch/arc/mm/cache.c
866
__dc_line_op(paddr, vaddr & PAGE_MASK, nr * PAGE_SIZE, OP_FLUSH_N_INV);
arch/arc/mm/highmem.c
55
pte_k = (pte_t *)memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
arch/arc/mm/highmem.c
58
__func__, PAGE_SIZE, PAGE_SIZE);
arch/arc/mm/init.c
167
BUILD_BUG_ON((PTRS_PER_PGD * sizeof(pgd_t)) > PAGE_SIZE);
arch/arc/mm/init.c
168
BUILD_BUG_ON((PTRS_PER_PUD * sizeof(pud_t)) > PAGE_SIZE);
arch/arc/mm/init.c
169
BUILD_BUG_ON((PTRS_PER_PMD * sizeof(pmd_t)) > PAGE_SIZE);
arch/arc/mm/init.c
170
BUILD_BUG_ON((PTRS_PER_PTE * sizeof(pte_t)) > PAGE_SIZE);
arch/arc/mm/init.c
21
pgd_t swapper_pg_dir[PTRS_PER_PGD] __aligned(PAGE_SIZE);
arch/arc/mm/init.c
22
char empty_zero_page[PAGE_SIZE] __aligned(PAGE_SIZE);
arch/arc/mm/tlb.c
221
if (unlikely((end - start) >= PAGE_SIZE * 32)) {
arch/arc/mm/tlb.c
238
start += PAGE_SIZE;
arch/arc/mm/tlb.c
257
if (unlikely((end - start) >= PAGE_SIZE * 32)) {
arch/arc/mm/tlb.c
267
start += PAGE_SIZE;
arch/arc/mm/tlb.c
646
if (mmu->pg_sz_k != TO_KB(PAGE_SIZE))
arch/arc/mm/tlb.c
647
panic("MMU pg size != PAGE_SIZE (%luk)\n", TO_KB(PAGE_SIZE));
arch/arm/common/locomo.c
380
lchip->base = ioremap(mem->start, PAGE_SIZE);
arch/arm/common/sa1111.c
828
sachip->base = ioremap(mem->start, PAGE_SIZE * 2);
arch/arm/include/asm/cacheflush.h
241
__cpuc_flush_user_range(addr, addr + nr * PAGE_SIZE,
arch/arm/include/asm/fixmap.h
7
#define FIXADDR_TOP (FIXADDR_END - PAGE_SIZE)
arch/arm/include/asm/hardware/memc.h
7
#define VDMA_ALIGNMENT PAGE_SIZE
arch/arm/include/asm/highmem.h
72
__cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE); \
arch/arm/include/asm/kfence.h
34
addr += PAGE_SIZE) {
arch/arm/include/asm/page-nommu.h
11
#define clear_page(page) memset((page), 0, PAGE_SIZE)
arch/arm/include/asm/page-nommu.h
12
#define copy_page(to,from) memcpy((to), (from), PAGE_SIZE)
arch/arm/include/asm/page.h
158
#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
arch/arm/include/asm/pgalloc.h
32
#define PGD_SIZE (PAGE_SIZE << 2)
arch/arm/include/asm/pgtable.h
18
extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
arch/arm/include/asm/pgtable.h
69
#define FIRST_USER_ADDRESS (PAGE_SIZE * 2)
arch/arm/include/asm/shmparam.h
10
#define SHMLBA (4 * PAGE_SIZE) /* attach addr a multiple of this */
arch/arm/include/asm/thread_info.h
25
#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
arch/arm/include/asm/tlb.h
43
__tlb_adjust_range(tlb, addr - PAGE_SIZE, 2 * PAGE_SIZE);
arch/arm/kernel/asm-offsets.c
116
DEFINE(PAGE_SZ, PAGE_SIZE);
arch/arm/kernel/atags_compat.c
155
params->u1.s.pages_in_bank[i] * PAGE_SIZE);
arch/arm/kernel/atags_compat.c
158
tag = memtag(tag, PHYS_OFFSET, params->u1.s.nr_pages * PAGE_SIZE);
arch/arm/kernel/atags_compat.c
96
if (params->u1.s.page_size != PAGE_SIZE) {
arch/arm/kernel/atags_parse.c
47
{ 1, PAGE_SIZE, 0xff },
arch/arm/kernel/crash_dump.c
27
vaddr = ioremap(__pfn_to_phys(pfn), PAGE_SIZE);
arch/arm/kernel/hibernate.c
92
static u64 resume_stack[PAGE_SIZE/2/sizeof(u64)] __nosavedata;
arch/arm/kernel/patch.c
79
twopage = (uintaddr & ~PAGE_MASK) == PAGE_SIZE - 2;
arch/arm/kernel/process.c
316
gate_vma.vm_end = 0xffff0000 + PAGE_SIZE;
arch/arm/kernel/process.c
421
vma = _install_special_mapping(mm, addr, PAGE_SIZE,
arch/arm/kernel/process.c
436
arm_install_vdso(mm, addr + PAGE_SIZE);
arch/arm/kernel/setup.c
294
aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
arch/arm/kernel/setup.c
808
size = size & ~(phys_addr_t)(PAGE_SIZE - 1);
arch/arm/kernel/signal.c
657
PAGE_SIZE / sizeof(u32));
arch/arm/kernel/signal.c
668
flush_icache_range(ptr, ptr + PAGE_SIZE);
arch/arm/kernel/traps.c
570
unsigned long chunk = min(PAGE_SIZE, end - start);
arch/arm/kernel/traps.c
874
for (i = 0; i < PAGE_SIZE / sizeof(u32); i++)
arch/arm/kernel/traps.c
887
flush_vectors(vectors_base, 0, PAGE_SIZE * 2);
arch/arm/kernel/vdso.c
190
page = virt_to_page(vdso_start + i * PAGE_SIZE);
arch/arm/kernel/vdso.c
222
addr += VDSO_NR_PAGES * PAGE_SIZE;
arch/arm/lib/uaccess_with_memcpy.c
248
ret = __copy_to_user_memcpy(user_ptr, kernel_ptr, PAGE_SIZE);
arch/arm/lib/uaccess_with_memcpy.c
250
for (size = PAGE_SIZE; size >= 4; size /= 2) {
arch/arm/lib/uaccess_with_memcpy.c
259
for (size = PAGE_SIZE; size >= 4; size /= 2) {
arch/arm/mach-hisi/platsmp.c
110
virt = ioremap(start_addr, PAGE_SIZE);
arch/arm/mach-omap1/sram-init.c
107
omap_sram_start = ROUND_DOWN(omap_sram_start, PAGE_SIZE);
arch/arm/mach-omap1/sram-init.c
124
pages = PAGE_ALIGN(omap_sram_size) / PAGE_SIZE;
arch/arm/mach-omap1/sram-init.c
71
pages = PAGE_ALIGN(size) / PAGE_SIZE;
arch/arm/mach-omap2/omap4-common.c
141
sram_sync = (void __iomem *)gen_pool_alloc(sram_pool, PAGE_SIZE);
arch/arm/mach-omap2/omap4-common.c
151
dram_sync_size = ALIGN(PAGE_SIZE, SZ_1M);
arch/arm/mach-omap2/sram.c
200
omap_sram_start = ROUND_DOWN(omap_sram_start, PAGE_SIZE);
arch/arm/mach-omap2/sram.c
217
pages = PAGE_ALIGN(omap_sram_size) / PAGE_SIZE;
arch/arm/mach-omap2/sram.c
93
pages = PAGE_ALIGN(size) / PAGE_SIZE;
arch/arm/mach-pxa/am200epd.c
234
padding_size = PAGE_SIZE + (4 * fw);
arch/arm/mach-rpc/dma.c
64
if (end > PAGE_SIZE)
arch/arm/mach-rpc/dma.c
65
end = PAGE_SIZE;
arch/arm/mach-rpc/include/mach/isa-dma.h
23
#define IOMD_DMA_BOUNDARY (PAGE_SIZE - 1)
arch/arm/mach-rpc/include/mach/uncompress.h
116
unsigned int nr_pages = 0, page_size = PAGE_SIZE;
arch/arm/mach-rpc/include/mach/uncompress.h
129
page_size = PAGE_SIZE;
arch/arm/mach-rpc/include/mach/uncompress.h
130
nr_pages += (t->u.mem.size / PAGE_SIZE);
arch/arm/mach-rpc/riscpc.c
48
vram_size += PAGE_SIZE * 256;
arch/arm/mach-rpc/riscpc.c
51
vram_size += PAGE_SIZE * 256;
arch/arm/mach-shmobile/platsmp-scu.c
37
shmobile_scu_base = ioremap(scu_base_phys, PAGE_SIZE);
arch/arm/mach-shmobile/regulator-quirk-rcar-gen2.c
206
irqc = ioremap(IRQC_BASE, PAGE_SIZE);
arch/arm/mach-shmobile/setup-rcar-gen2.c
95
base = ioremap(0xe6080000, PAGE_SIZE);
arch/arm/mach-shmobile/setup-sh73a0.c
29
l2x0_init(ioremap(0xf0100000, PAGE_SIZE), 0x00400000, 0xc20f0fff);
arch/arm/mach-shmobile/smp-emev2.c
35
smu = ioremap(EMEV2_SMU_BASE, PAGE_SIZE);
arch/arm/mach-shmobile/smp-sh73a0.c
35
void __iomem *cpg2 = ioremap(CPG_BASE2, PAGE_SIZE);
arch/arm/mach-shmobile/smp-sh73a0.c
55
ap = ioremap(AP_BASE, PAGE_SIZE);
arch/arm/mach-shmobile/smp-sh73a0.c
56
sysc = ioremap(SYSC_BASE, PAGE_SIZE);
arch/arm/mm/cache-feroceon-l2.c
162
if (range_end > (start | (PAGE_SIZE - 1)) + 1)
arch/arm/mm/cache-feroceon-l2.c
163
range_end = (start | (PAGE_SIZE - 1)) + 1;
arch/arm/mm/cache-l2x0-pmu.c
346
return snprintf(buf, PAGE_SIZE, "config=0x%x\n", lattr->config);
arch/arm/mm/copypage-fa.c
34
: "2" (PAGE_SIZE / 32)
arch/arm/mm/copypage-fa.c
74
: "0" (kaddr), "I" (PAGE_SIZE / 32)
arch/arm/mm/copypage-feroceon.c
62
: "2" (PAGE_SIZE)
arch/arm/mm/copypage-feroceon.c
99
: "0" (kaddr), "I" (PAGE_SIZE / 32)
arch/arm/mm/copypage-v4mc.c
105
: "0" (kaddr), "I" (PAGE_SIZE / 64)
arch/arm/mm/copypage-v4mc.c
60
: "2" (PAGE_SIZE / 64)
arch/arm/mm/copypage-v4wb.c
43
: "2" (PAGE_SIZE / 64)
arch/arm/mm/copypage-v4wb.c
84
: "0" (kaddr), "I" (PAGE_SIZE / 64)
arch/arm/mm/copypage-v4wt.c
39
: "2" (PAGE_SIZE / 64)
arch/arm/mm/copypage-v4wt.c
77
: "0" (kaddr), "I" (PAGE_SIZE / 64)
arch/arm/mm/copypage-v6.c
62
"r" ((unsigned long)kto + PAGE_SIZE - 1)
arch/arm/mm/copypage-xsc3.c
60
: "2" (PAGE_SIZE / 64 - 1)
arch/arm/mm/copypage-xsc3.c
96
: "0" (kaddr), "I" (PAGE_SIZE / 32)
arch/arm/mm/copypage-xscale.c
126
: "0" (kaddr), "I" (PAGE_SIZE / 32)
arch/arm/mm/copypage-xscale.c
80
: "2" (PAGE_SIZE / 64 - 1)
arch/arm/mm/dma-mapping.c
1029
if (__in_atomic_pool(cpu_addr, PAGE_SIZE))
arch/arm/mm/dma-mapping.c
118
memset(ptr, 0, PAGE_SIZE);
arch/arm/mm/dma-mapping.c
120
dmac_flush_range(ptr, ptr + PAGE_SIZE);
arch/arm/mm/dma-mapping.c
1221
iommu_unmap(mapping->domain, iova_base, count * PAGE_SIZE);
arch/arm/mm/dma-mapping.c
123
size -= PAGE_SIZE;
arch/arm/mm/dma-mapping.c
1502
if (bitmap_size > PAGE_SIZE) {
arch/arm/mm/dma-mapping.c
1503
extensions = bitmap_size / PAGE_SIZE;
arch/arm/mm/dma-mapping.c
1504
bitmap_size = PAGE_SIZE;
arch/arm/mm/dma-mapping.c
647
if (len + offset > PAGE_SIZE)
arch/arm/mm/dma-mapping.c
648
len = PAGE_SIZE - offset;
arch/arm/mm/dma-mapping.c
706
if (dir != DMA_TO_DEVICE && size >= PAGE_SIZE) {
arch/arm/mm/dma-mapping.c
707
struct folio *folio = pfn_folio(paddr / PAGE_SIZE);
arch/arm/mm/dma-mapping.c
924
__dma_clear_buffer(pages[i], PAGE_SIZE << order, coherent_flag);
arch/arm/mm/dump.c
258
st->wx_pages += (addr - st->start_address) / PAGE_SIZE;
arch/arm/mm/dump.c
318
addr = start + i * PAGE_SIZE;
arch/arm/mm/dump.c
32
{ VECTORS_BASE + PAGE_SIZE * 2, "Vectors End" },
arch/arm/mm/fault-armv.c
54
(pfn << PAGE_SHIFT) + PAGE_SIZE);
arch/arm/mm/fault.c
176
if (addr < PAGE_SIZE) {
arch/arm/mm/flush.c
214
i * PAGE_SIZE);
arch/arm/mm/flush.c
215
__cpuc_flush_dcache_area(addr, PAGE_SIZE);
arch/arm/mm/flush.c
222
__cpuc_flush_dcache_area(addr, PAGE_SIZE);
arch/arm/mm/flush.c
274
start += offset * PAGE_SIZE;
arch/arm/mm/flush.c
276
if (start + nr * PAGE_SIZE > vma->vm_end)
arch/arm/mm/flush.c
277
nr = (vma->vm_end - start) / PAGE_SIZE;
arch/arm/mm/flush.c
410
__cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
arch/arm/mm/flush.c
48
: "r" (to), "r" (to + PAGE_SIZE - 1), "r" (zero)
arch/arm/mm/flush.c
55
unsigned long offset = vaddr & (PAGE_SIZE - 1);
arch/arm/mm/init.c
125
unsigned long pageblock_size = PAGE_SIZE * pageblock_nr_pages;
arch/arm/mm/init.c
244
BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET);
arch/arm/mm/init.c
245
BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET);
arch/arm/mm/init.c
426
start = round_down(start, PAGE_SIZE);
arch/arm/mm/init.c
428
end = round_up(end, PAGE_SIZE);
arch/arm/mm/ioremap.c
114
return vmap_page_range(virt, virt + PAGE_SIZE, phys,
arch/arm/mm/ioremap.c
436
set_memory_ro((unsigned long)ptr, PAGE_ALIGN(size) / PAGE_SIZE);
arch/arm/mm/kasan_init.c
107
kasan_alloc_block(PAGE_SIZE);
arch/arm/mm/kasan_init.c
139
void *p = kasan_alloc_block(PAGE_SIZE);
arch/arm/mm/kasan_init.c
293
set_pte_at(&init_mm, KASAN_SHADOW_START + i*PAGE_SIZE,
arch/arm/mm/kasan_init.c
302
memset(kasan_early_shadow_page, 0, PAGE_SIZE);
arch/arm/mm/kasan_init.c
53
next = addr + PAGE_SIZE;
arch/arm/mm/kasan_init.c
59
p = kasan_alloc_block_raw(PAGE_SIZE);
arch/arm/mm/kasan_init.c
65
memset(p, KASAN_SHADOW_INIT, PAGE_SIZE);
arch/arm/mm/mmu.c
1147
map.length = PAGE_SIZE;
arch/arm/mm/mmu.c
1333
#define SWAPPER_PG_DIR_SIZE (PAGE_SIZE + \
arch/arm/mm/mmu.c
1375
vectors = early_alloc(PAGE_SIZE * 2);
arch/arm/mm/mmu.c
1419
map.length = PAGE_SIZE;
arch/arm/mm/mmu.c
1429
map.length = PAGE_SIZE * 2;
arch/arm/mm/mmu.c
1436
map.virtual = 0xffff0000 + PAGE_SIZE;
arch/arm/mm/mmu.c
1437
map.length = PAGE_SIZE;
arch/arm/mm/mmu.c
1745
map.length = PAGE_SIZE;
arch/arm/mm/mmu.c
416
local_flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE);
arch/arm/mm/mmu.c
48
unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss;
arch/arm/mm/mmu.c
772
} while (pte++, addr += PAGE_SIZE, addr != end);
arch/arm/mm/nommu.c
180
__cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
arch/arm/mm/nommu.c
34
unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss;
arch/arm/mm/nommu.c
98
memblock_reserve(vectors_base, 2 * PAGE_SIZE);
arch/arm/mm/pageattr.c
104
return __change_memory_common(addr, PAGE_SIZE * numpages,
arch/arm/mm/pageattr.c
108
return __change_memory_common(addr, PAGE_SIZE * numpages,
arch/arm/mm/pageattr.c
58
unsigned long end = PAGE_ALIGN(addr) + numpages * PAGE_SIZE;
arch/arm/mm/pmsa-v7.c
464
err |= mpu_setup_region(region++, vectors_base, ilog2(2 * PAGE_SIZE),
arch/arm/mm/pmsa-v8.c
266
subtract_range(mem, ARRAY_SIZE(mem), vectors_base, vectors_base + 2 * PAGE_SIZE);
arch/arm/mm/pmsa-v8.c
267
subtract_range(io, ARRAY_SIZE(io), vectors_base, vectors_base + 2 * PAGE_SIZE);
arch/arm/mm/pmsa-v8.c
301
err |= pmsav8_setup_vector(region++, vectors_base, vectors_base + 2 * PAGE_SIZE);
arch/arm64/include/asm/assembler.h
524
tst \src, #(PAGE_SIZE - 1)
arch/arm64/include/asm/elf.h
123
#define ELF_EXEC_PAGESIZE PAGE_SIZE
arch/arm64/include/asm/fixmap.h
46
FIX_FDT = FIX_FDT_END + DIV_ROUND_UP(MAX_FDT_SIZE, PAGE_SIZE) + 1,
arch/arm64/include/asm/hugetlb.h
87
__flush_tlb_range(vma, start, end, PAGE_SIZE, last_level, 3);
arch/arm64/include/asm/hugetlb.h
90
__flush_tlb_range(vma, start, end, PAGE_SIZE, last_level, TLBI_TTL_UNKNOWN);
arch/arm64/include/asm/kernel-pgtable.h
58
#define INIT_DIR_SIZE (PAGE_SIZE * (EARLY_PAGES(SWAPPER_PGTABLE_LEVELS, KIMAGE_VADDR, _end, EXTRA_PAGE) \
arch/arm64/include/asm/kernel-pgtable.h
62
#define INIT_IDMAP_DIR_SIZE ((INIT_IDMAP_DIR_PAGES + EARLY_IDMAP_EXTRA_PAGES) * PAGE_SIZE)
arch/arm64/include/asm/kernel-pgtable.h
65
#define INIT_IDMAP_FDT_SIZE ((INIT_IDMAP_FDT_PAGES + EARLY_IDMAP_EXTRA_FDT_PAGES) * PAGE_SIZE)
arch/arm64/include/asm/kvm_pkvm.h
185
return (2 * KVM_FFA_MBOX_NR_PAGES) + DIV_ROUND_UP(desc_max, PAGE_SIZE);
arch/arm64/include/asm/kvm_pkvm.h
85
start = ALIGN_DOWN(start, PAGE_SIZE);
arch/arm64/include/asm/kvm_pkvm.h
86
end = ALIGN(end, PAGE_SIZE);
arch/arm64/include/asm/memory.h
207
#define RESERVED_SWAPPER_OFFSET (PAGE_SIZE)
arch/arm64/include/asm/memory.h
213
#define TRAMP_SWAPPER_OFFSET (2 * PAGE_SIZE)
arch/arm64/include/asm/memory.h
412
u64 __addr = PAGE_OFFSET + (__idx * PAGE_SIZE); \
arch/arm64/include/asm/memory.h
417
u64 __idx = (__tag_reset((u64)x) - PAGE_OFFSET) / PAGE_SIZE; \
arch/arm64/include/asm/mte-def.h
10
#define MTE_GRANULES_PER_PAGE (PAGE_SIZE / MTE_GRANULE_SIZE)
arch/arm64/include/asm/pgtable-hwdef.h
92
#define CONT_PTE_SIZE (CONT_PTES * PAGE_SIZE)
arch/arm64/include/asm/pgtable.h
1124
return (p4d_t *)PTR_ALIGN_DOWN(pgdp, PAGE_SIZE) + p4d_index(addr);
arch/arm64/include/asm/pgtable.h
113
extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
arch/arm64/include/asm/pgtable.h
1349
case PAGE_SIZE:
arch/arm64/include/asm/pgtable.h
1370
return __ptep_get_and_clear_anysz(mm, address, ptep, PAGE_SIZE);
arch/arm64/include/asm/pgtable.h
1381
addr += PAGE_SIZE;
arch/arm64/include/asm/pgtable.h
1394
addr += PAGE_SIZE;
arch/arm64/include/asm/pgtable.h
1442
for (i = 0; i < nr; i++, address += PAGE_SIZE, ptep++)
arch/arm64/include/asm/pgtable.h
1482
addr += PAGE_SIZE;
arch/arm64/include/asm/pgtable.h
1612
return PAGE_SIZE == SZ_4K;
arch/arm64/include/asm/pgtable.h
681
case PAGE_SIZE:
arch/arm64/include/asm/pgtable.h
715
__set_ptes_anysz(mm, addr, ptep, pte, nr, PAGE_SIZE);
arch/arm64/include/asm/pgtable.h
788
#define pte_leaf_size(pte) (pte_cont(pte) ? CONT_PTE_SIZE : PAGE_SIZE)
arch/arm64/include/asm/pgtable.h
996
return (pud_t *)PTR_ALIGN_DOWN(p4dp, PAGE_SIZE) + pud_index(addr);
arch/arm64/include/asm/processor.h
67
#define TASK_SIZE_32 (UL(0x100000000) - PAGE_SIZE)
arch/arm64/include/asm/shmparam.h
13
#define COMPAT_SHMLBA (4 * PAGE_SIZE)
arch/arm64/include/asm/tlbflush.h
341
(uaddr & PAGE_MASK) + PAGE_SIZE);
arch/arm64/include/asm/tlbflush.h
355
(uaddr & PAGE_MASK) + PAGE_SIZE);
arch/arm64/include/asm/tlbflush.h
535
__flush_tlb_range_op(vale1, addr, CONT_PTES, PAGE_SIZE, asid,
arch/arm64/include/asm/tlbflush.h
551
__flush_tlb_range(vma, start, end, PAGE_SIZE, false, TLBI_TTL_UNKNOWN);
arch/arm64/include/asm/tlbflush.h
556
const unsigned long stride = PAGE_SIZE;
arch/arm64/include/asm/tlbflush.h
592
__flush_tlb_range_nosync(mm, start, end, PAGE_SIZE, true, 3);
arch/arm64/include/asm/tlbflush.h
71
switch (PAGE_SIZE) {
arch/arm64/include/asm/vmalloc.h
32
return PAGE_SIZE;
arch/arm64/include/asm/vmalloc.h
35
return PAGE_SIZE;
arch/arm64/include/asm/vmalloc.h
38
return PAGE_SIZE;
arch/arm64/include/asm/vmalloc.h
41
return PAGE_SIZE;
arch/arm64/include/asm/vmalloc.h
54
return pte_valid_cont(__ptep_get(ptep)) ? CONT_PTE_SIZE : PAGE_SIZE;
arch/arm64/kernel/crash_dump.c
23
vaddr = memremap(__pfn_to_phys(pfn), PAGE_SIZE, MEMREMAP_WB);
arch/arm64/kernel/efi.c
23
if (PAGE_SIZE == EFI_PAGE_SIZE)
arch/arm64/kernel/elfcore.c
32
for (addr = start; addr < start + len; addr += PAGE_SIZE) {
arch/arm64/kernel/image.h
55
__HEAD_FLAG(PAGE_SIZE) | \
arch/arm64/kernel/machine_kexec.c
100
memset(vaddr, 0, PAGE_SIZE);
arch/arm64/kernel/machine_kexec.c
294
for (addr = begin; addr < end; addr += PAGE_SIZE) {
arch/arm64/kernel/machine_kexec.c
80
kimage->segment[i].memsz / PAGE_SIZE);
arch/arm64/kernel/mte.c
486
tags = min(len, (PAGE_SIZE - offset) / MTE_GRANULE_SIZE);
arch/arm64/kernel/mte.c
79
ret = memcmp(addr1, addr2, PAGE_SIZE);
arch/arm64/kernel/patching.c
119
size = min_t(size_t, PAGE_SIZE - offset_in_page(ptr),
arch/arm64/kernel/pi/map_kernel.c
136
memcpy((void *)swapper_pg_dir + va_offset, init_pg_dir, PAGE_SIZE);
arch/arm64/kernel/pi/map_kernel.c
202
static u8 ptes[INIT_IDMAP_FDT_SIZE] __initdata __aligned(PAGE_SIZE);
arch/arm64/kernel/pi/map_kernel.c
43
phys_addr_t pgdp = (phys_addr_t)init_pg_dir + PAGE_SIZE;
arch/arm64/kernel/pi/map_range.c
36
u64 lmask = (PAGE_SIZE << lshift) - 1;
arch/arm64/kernel/pi/map_range.c
93
phys_addr_t ptep = (phys_addr_t)pg_dir + PAGE_SIZE; /* MMU is off */
arch/arm64/kernel/probes/kprobes.c
49
addr = execmem_alloc(EXECMEM_KPROBES, PAGE_SIZE);
arch/arm64/kernel/process.c
783
sp -= get_random_u32_below(PAGE_SIZE);
arch/arm64/kernel/smp.c
165
cpu, PAGE_SIZE / SZ_1K);
arch/arm64/kernel/sys_compat.c
29
unsigned long chunk = min(PAGE_SIZE, end - start);
arch/arm64/kernel/vdso.c
112
vdso_mapping_len = vdso_text_len + VDSO_NR_PAGES * PAGE_SIZE;
arch/arm64/kernel/vdso.c
127
vdso_base += VDSO_NR_PAGES * PAGE_SIZE;
arch/arm64/kernel/vdso.c
212
memset32(sigpage, (__force u32)poison, PAGE_SIZE / sizeof(poison));
arch/arm64/kernel/vdso.c
256
ret = _install_special_mapping(mm, AARCH32_VECTORS_BASE, PAGE_SIZE,
arch/arm64/kernel/vdso.c
270
addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
arch/arm64/kernel/vdso.c
280
ret = _install_special_mapping(mm, addr, PAGE_SIZE,
arch/arm64/kvm/arm.c
855
if (struct_size(data, cmpidr_to_idx, nr_entries) <= PAGE_SIZE)
arch/arm64/kvm/guest.c
1072
length -= PAGE_SIZE;
arch/arm64/kvm/hyp/nvhe/ffa.c
242
if (npages != (KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE) / FFA_PAGE_SIZE) {
arch/arm64/kvm/hyp/nvhe/ffa.c
358
if (__pkvm_host_share_ffa(pfn, sz / PAGE_SIZE))
arch/arm64/kvm/hyp/nvhe/ffa.c
378
if (__pkvm_host_unshare_ffa(pfn, sz / PAGE_SIZE))
arch/arm64/kvm/hyp/nvhe/ffa.c
424
if (fraglen > KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE)
arch/arm64/kvm/hyp/nvhe/ffa.c
486
fraglen > KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE) {
arch/arm64/kvm/hyp/nvhe/ffa.c
611
fraglen > KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE)) {
arch/arm64/kvm/hyp/nvhe/ffa.c
761
if (min_rxtx_sz > PAGE_SIZE)
arch/arm64/kvm/hyp/nvhe/ffa.c
855
if (copy_sz > KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE) {
arch/arm64/kvm/hyp/nvhe/ffa.c
971
pages += KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE;
arch/arm64/kvm/hyp/nvhe/ffa.c
973
pages += KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE;
arch/arm64/kvm/hyp/nvhe/ffa.c
977
.len = PAGE_SIZE *
arch/arm64/kvm/hyp/nvhe/mem_protect.c
1254
u64 size = PAGE_SIZE << selftest_page->order;
arch/arm64/kvm/hyp/nvhe/mem_protect.c
1257
u64 ipa[2] = { selftest_ipa(), selftest_ipa() + PAGE_SIZE };
arch/arm64/kvm/hyp/nvhe/mem_protect.c
1295
size = PAGE_SIZE << selftest_page->order;
arch/arm64/kvm/hyp/nvhe/mem_protect.c
173
WARN_ON(size != (PAGE_SIZE << get_order(size)));
arch/arm64/kvm/hyp/nvhe/mem_protect.c
185
hyp_put_page(¤t_vm->pool, addr + (i * PAGE_SIZE));
arch/arm64/kvm/hyp/nvhe/mem_protect.c
201
memset(addr, 0, PAGE_SIZE);
arch/arm64/kvm/hyp/nvhe/mem_protect.c
222
size += va - PTR_ALIGN_DOWN(va, PAGE_SIZE);
arch/arm64/kvm/hyp/nvhe/mem_protect.c
223
va = PTR_ALIGN_DOWN(va, PAGE_SIZE);
arch/arm64/kvm/hyp/nvhe/mem_protect.c
227
size_t map_size = PAGE_SIZE;
arch/arm64/kvm/hyp/nvhe/mem_protect.c
733
u64 size = PAGE_SIZE;
arch/arm64/kvm/hyp/nvhe/mem_protect.c
760
u64 size = PAGE_SIZE;
arch/arm64/kvm/hyp/nvhe/mem_protect.c
79
WARN_ON(size != (PAGE_SIZE << get_order(size)));
arch/arm64/kvm/hyp/nvhe/mem_protect.c
790
u64 size = PAGE_SIZE * nr_pages;
arch/arm64/kvm/hyp/nvhe/mem_protect.c
821
u64 size = PAGE_SIZE * nr_pages;
arch/arm64/kvm/hyp/nvhe/mem_protect.c
851
u64 cur, start = ALIGN_DOWN((u64)from, PAGE_SIZE);
arch/arm64/kvm/hyp/nvhe/mem_protect.c
869
for (cur = start; cur < end; cur += PAGE_SIZE) {
arch/arm64/kvm/hyp/nvhe/mem_protect.c
874
(void *)cur + PAGE_SIZE,
arch/arm64/kvm/hyp/nvhe/mem_protect.c
887
u64 cur, start = ALIGN_DOWN((u64)from, PAGE_SIZE);
arch/arm64/kvm/hyp/nvhe/mem_protect.c
894
for (cur = start; cur < end; cur += PAGE_SIZE) {
arch/arm64/kvm/hyp/nvhe/mem_protect.c
897
WARN_ON(kvm_pgtable_hyp_unmap(&pkvm_pgtable, cur, PAGE_SIZE) != PAGE_SIZE);
arch/arm64/kvm/hyp/nvhe/mem_protect.c
908
u64 size = PAGE_SIZE * nr_pages;
arch/arm64/kvm/hyp/nvhe/mem_protect.c
926
u64 size = PAGE_SIZE * nr_pages;
arch/arm64/kvm/hyp/nvhe/mem_protect.c
946
*size = PAGE_SIZE;
arch/arm64/kvm/hyp/nvhe/mm.c
125
for (virt_addr = start; virt_addr < end; virt_addr += PAGE_SIZE) {
arch/arm64/kvm/hyp/nvhe/mm.c
129
err = kvm_pgtable_hyp_map(&pkvm_pgtable, virt_addr, PAGE_SIZE,
arch/arm64/kvm/hyp/nvhe/mm.c
156
start = ALIGN_DOWN((u64)hyp_phys_to_page(start), PAGE_SIZE);
arch/arm64/kvm/hyp/nvhe/mm.c
312
return kvm_pgtable_walk(&pkvm_pgtable, addr, PAGE_SIZE, &walker);
arch/arm64/kvm/hyp/nvhe/mm.c
371
*size = PAGE_SIZE;
arch/arm64/kvm/hyp/nvhe/mm.c
392
ret = pkvm_alloc_private_va_range(PAGE_SIZE, &addr);
arch/arm64/kvm/hyp/nvhe/mm.c
396
ret = kvm_pgtable_hyp_map(&pkvm_pgtable, addr, PAGE_SIZE,
arch/arm64/kvm/hyp/nvhe/mm.c
414
start = ALIGN_DOWN(start, PAGE_SIZE);
arch/arm64/kvm/hyp/nvhe/mm.c
417
end = ALIGN(end, PAGE_SIZE);
arch/arm64/kvm/hyp/nvhe/page_alloc.c
100
memset(hyp_page_to_virt(p), 0, PAGE_SIZE << p->order);
arch/arm64/kvm/hyp/nvhe/page_alloc.c
39
addr ^= (PAGE_SIZE << order);
arch/arm64/kvm/hyp/nvhe/pkvm.c
856
for (void *start = addr; start < addr + size; start += PAGE_SIZE)
arch/arm64/kvm/hyp/nvhe/pkvm.c
910
unmap_donated_memory_noclear(addr, PAGE_SIZE);
arch/arm64/kvm/hyp/nvhe/setup.c
222
return host_stage2_idmap_locked(phys, PAGE_SIZE, KVM_PGTABLE_PROT_R);
arch/arm64/kvm/hyp/nvhe/setup.c
224
return host_stage2_set_owner_locked(phys, PAGE_SIZE, PKVM_ID_HYP);
arch/arm64/kvm/hyp/nvhe/tlb.c
218
stride = PAGE_SIZE;
arch/arm64/kvm/hyp/pgtable.c
1096
.phys = ALIGN_DOWN(phys, PAGE_SIZE),
arch/arm64/kvm/hyp/pgtable.c
1574
pgd_sz = kvm_pgd_pages(ia_bits, start_level) * PAGE_SIZE;
arch/arm64/kvm/hyp/pgtable.c
1597
return kvm_pgd_pages(ia_bits, start_level) * PAGE_SIZE;
arch/arm64/kvm/hyp/pgtable.c
1658
pgd_sz = kvm_pgd_pages(pgt->ia_bits, pgt->start_level) * PAGE_SIZE;
arch/arm64/kvm/hyp/pgtable.c
272
.start = ALIGN_DOWN(addr, PAGE_SIZE),
arch/arm64/kvm/hyp/pgtable.c
273
.addr = ALIGN_DOWN(addr, PAGE_SIZE),
arch/arm64/kvm/hyp/pgtable.c
316
ret = kvm_pgtable_walk(pgt, ALIGN_DOWN(addr, PAGE_SIZE),
arch/arm64/kvm/hyp/pgtable.c
317
PAGE_SIZE, &walker);
arch/arm64/kvm/hyp/pgtable.c
456
.phys = ALIGN_DOWN(phys, PAGE_SIZE),
arch/arm64/kvm/hyp/vhe/tlb.c
166
stride = PAGE_SIZE;
arch/arm64/kvm/mmu.c
1032
phys_addr_t size = PAGE_SIZE * memslot->npages;
arch/arm64/kvm/mmu.c
1195
for (addr = guest_ipa; addr < guest_ipa + size; addr += PAGE_SIZE) {
arch/arm64/kvm/mmu.c
1202
ret = KVM_PGT_FN(kvm_pgtable_stage2_map)(pgt, addr, PAGE_SIZE,
arch/arm64/kvm/mmu.c
1208
pa += PAGE_SIZE;
arch/arm64/kvm/mmu.c
1337
if (map_size == PAGE_SIZE)
arch/arm64/kvm/mmu.c
1344
size = memslot->npages * PAGE_SIZE;
arch/arm64/kvm/mmu.c
1420
return PAGE_SIZE;
arch/arm64/kvm/mmu.c
1430
return PAGE_SIZE;
arch/arm64/kvm/mmu.c
1603
kvm_prepare_memory_fault_exit(vcpu, fault_ipa, PAGE_SIZE,
arch/arm64/kvm/mmu.c
1628
ret = KVM_PGT_FN(kvm_pgtable_stage2_map)(pgt, fault_ipa, PAGE_SIZE,
arch/arm64/kvm/mmu.c
1732
max_map_size = force_pte ? PAGE_SIZE : PUD_SIZE;
arch/arm64/kvm/mmu.c
1749
else if (max_map_size >= PAGE_SIZE && max_map_size < PMD_SIZE)
arch/arm64/kvm/mmu.c
1750
max_map_size = PAGE_SIZE;
arch/arm64/kvm/mmu.c
1752
force_pte = (max_map_size == PAGE_SIZE);
arch/arm64/kvm/mmu.c
1870
if (vma_pagesize == PAGE_SIZE && !(force_pte || s2_force_noncacheable)) {
arch/arm64/kvm/mmu.c
1871
if (fault_is_perm && fault_granule > PAGE_SIZE)
arch/arm64/kvm/mmu.c
2305
hyp_idmap_start = ALIGN_DOWN(hyp_idmap_start, PAGE_SIZE);
arch/arm64/kvm/mmu.c
2307
hyp_idmap_end = ALIGN(hyp_idmap_end, PAGE_SIZE);
arch/arm64/kvm/mmu.c
355
phys_addr_t end = addr + PAGE_SIZE * memslot->npages;
arch/arm64/kvm/mmu.c
553
start = ALIGN_DOWN(__pa(from), PAGE_SIZE);
arch/arm64/kvm/mmu.c
555
for (cur = start; cur < end; cur += PAGE_SIZE) {
arch/arm64/kvm/mmu.c
573
start = ALIGN_DOWN(__pa(from), PAGE_SIZE);
arch/arm64/kvm/mmu.c
575
for (cur = start; cur < end; cur += PAGE_SIZE) {
arch/arm64/kvm/mmu.c
607
for (virt_addr = start; virt_addr < end; virt_addr += PAGE_SIZE) {
arch/arm64/kvm/mmu.c
611
err = __create_hyp_mappings(virt_addr, PAGE_SIZE, phys_addr,
arch/arm64/kvm/nested.c
1318
kvm_prepare_memory_fault_exit(vcpu, vt->wr.pa, PAGE_SIZE,
arch/arm64/kvm/nested.c
1573
switch (PAGE_SIZE) {
arch/arm64/kvm/nested.c
1591
switch (PAGE_SIZE) {
arch/arm64/kvm/pkvm.c
176
kvm_account_pgtable_pages(pgd, pgd_sz / PAGE_SIZE);
arch/arm64/kvm/pkvm.c
290
return m->gfn * PAGE_SIZE;
arch/arm64/kvm/pkvm.c
295
return (m->gfn + m->nr_pages) * PAGE_SIZE - 1;
arch/arm64/kvm/pkvm.c
370
if (size != PAGE_SIZE && size != PMD_SIZE)
arch/arm64/kvm/pkvm.c
382
if (size == (mapping->nr_pages * PAGE_SIZE))
arch/arm64/kvm/pkvm.c
392
ret = kvm_call_hyp_nvhe(__pkvm_host_share_guest, pfn, gfn, size / PAGE_SIZE, prot);
arch/arm64/kvm/pkvm.c
399
mapping->nr_pages = size / PAGE_SIZE;
arch/arm64/kvm/pkvm.c
438
PAGE_SIZE * mapping->nr_pages);
arch/arm64/kvm/pkvm.c
75
hyp_mem_base = memblock_phys_alloc(hyp_mem_size, PAGE_SIZE);
arch/arm64/kvm/reset.c
299
if (!kvm_lpa2_is_enabled() && PAGE_SIZE != SZ_64K)
arch/arm64/mm/contpte.c
228
__flush_tlb_range(&vma, start_addr, addr, PAGE_SIZE, true, 3);
arch/arm64/mm/contpte.c
270
folio_start = addr - (page - &folio->page) * PAGE_SIZE;
arch/arm64/mm/contpte.c
271
folio_end = folio_start + folio_nr_pages(folio) * PAGE_SIZE;
arch/arm64/mm/contpte.c
527
unsigned long end = addr + nr * PAGE_SIZE;
arch/arm64/mm/contpte.c
531
for (; addr != end; ptep++, addr += PAGE_SIZE)
arch/arm64/mm/contpte.c
547
unsigned long end = addr + nr * PAGE_SIZE;
arch/arm64/mm/contpte.c
555
PAGE_SIZE, true, 3);
arch/arm64/mm/contpte.c
595
unsigned long end = start + nr * PAGE_SIZE;
arch/arm64/mm/contpte.c
598
__clear_young_dirty_ptes(vma, start, ptep, (end - start) / PAGE_SIZE, flags);
arch/arm64/mm/contpte.c
61
unsigned long last_addr = addr + PAGE_SIZE * (nr - 1);
arch/arm64/mm/contpte.c
684
for (i = 0; i < CONT_PTES; i++, ptep++, addr += PAGE_SIZE)
arch/arm64/mm/contpte.c
81
for (i = 0; i < CONT_PTES; i++, ptep++, addr += PAGE_SIZE) {
arch/arm64/mm/fault.c
152
mm == &init_mm ? "swapper" : "user", PAGE_SIZE / SZ_1K,
arch/arm64/mm/fault.c
396
} else if (addr < PAGE_SIZE) {
arch/arm64/mm/fixmap.c
133
flush_tlb_kernel_range(addr, addr+PAGE_SIZE);
arch/arm64/mm/fixmap.c
155
dt_phys_base = round_down(dt_phys, PAGE_SIZE);
arch/arm64/mm/fixmap.c
156
offset = dt_phys % PAGE_SIZE;
arch/arm64/mm/fixmap.c
160
create_mapping_noalloc(dt_phys_base, dt_virt_base, PAGE_SIZE, prot);
arch/arm64/mm/fixmap.c
169
if (offset + *size > PAGE_SIZE) {
arch/arm64/mm/gcs.c
38
return max(PAGE_SIZE, size);
arch/arm64/mm/hugetlbpage.c
109
*pgsize = PAGE_SIZE;
arch/arm64/mm/hugetlbpage.c
86
*pgsize = PAGE_SIZE;
arch/arm64/mm/init.c
370
if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) {
arch/arm64/mm/init.c
385
WARN_ON(!IS_ALIGNED((unsigned long)lm_init_begin, PAGE_SIZE));
arch/arm64/mm/init.c
386
WARN_ON(!IS_ALIGNED((unsigned long)lm_init_end, PAGE_SIZE));
arch/arm64/mm/init.c
425
max_pgoff = (size - (end - start)) / PAGE_SIZE;
arch/arm64/mm/init.c
428
return start - pgoff * PAGE_SIZE;
arch/arm64/mm/init.c
487
module_direct_base ? (SZ_128M - kernel_size) / PAGE_SIZE : 0);
arch/arm64/mm/init.c
489
module_plt_base ? (SZ_2G - kernel_size) / PAGE_SIZE : 0);
arch/arm64/mm/kasan_init.c
126
memset(__va(page_phys), KASAN_SHADOW_INIT, PAGE_SIZE);
arch/arm64/mm/kasan_init.c
127
next = addr + PAGE_SIZE;
arch/arm64/mm/kasan_init.c
195
return (addr % (PAGE_SIZE << shift)) == 0;
arch/arm64/mm/kasan_init.c
26
static pgd_t tmp_pg_dir[PTRS_PER_PTE] __initdata __aligned(PAGE_SIZE);
arch/arm64/mm/kasan_init.c
262
memcpy(pud, pudp, PAGE_SIZE);
arch/arm64/mm/kasan_init.c
302
static pud_t pud[2][PTRS_PER_PUD] __initdata __aligned(PAGE_SIZE);
arch/arm64/mm/kasan_init.c
37
void *p = memblock_alloc_try_nid(PAGE_SIZE, PAGE_SIZE,
arch/arm64/mm/kasan_init.c
373
memset(kasan_early_shadow_page, KASAN_SHADOW_INIT, PAGE_SIZE);
arch/arm64/mm/kasan_init.c
391
shadow_start = ALIGN_DOWN(shadow_start, PAGE_SIZE);
arch/arm64/mm/kasan_init.c
393
shadow_end = ALIGN(shadow_end, PAGE_SIZE);
arch/arm64/mm/kasan_init.c
42
__func__, PAGE_SIZE, PAGE_SIZE, node,
arch/arm64/mm/kasan_init.c
50
void *p = memblock_alloc_try_nid_raw(PAGE_SIZE, PAGE_SIZE,
arch/arm64/mm/kasan_init.c
56
__func__, PAGE_SIZE, PAGE_SIZE, node,
arch/arm64/mm/mmu.c
1058
kfence_pool = memblock_phys_alloc(KFENCE_POOL_SIZE, PAGE_SIZE);
arch/arm64/mm/mmu.c
119
phys = memblock_phys_alloc_range(PAGE_SIZE, PAGE_SIZE, 0,
arch/arm64/mm/mmu.c
1213
size += PAGE_SIZE;
arch/arm64/mm/mmu.c
1231
kpti_ng_temp_alloc -= PAGE_SIZE;
arch/arm64/mm/mmu.c
1259
kpti_ng_temp_pgd = (pgd_t *)(alloc + (levels - 1) * PAGE_SIZE);
arch/arm64/mm/mmu.c
1279
KPTI_NG_TEMP_VA, PAGE_SIZE, PAGE_KERNEL,
arch/arm64/mm/mmu.c
1340
for (i = 0; i < DIV_ROUND_UP(entry_tramp_text_size(), PAGE_SIZE); i++)
arch/arm64/mm/mmu.c
1342
pa_start + i * PAGE_SIZE, prot);
arch/arm64/mm/mmu.c
1346
pa_start + i * PAGE_SIZE, PAGE_KERNEL_RO);
arch/arm64/mm/mmu.c
1371
static u8 idmap_ptes[IDMAP_LEVELS - 1][PAGE_SIZE] __aligned(PAGE_SIZE) __ro_after_init,
arch/arm64/mm/mmu.c
1372
kpti_bbml2_ptes[IDMAP_LEVELS - 1][PAGE_SIZE] __aligned(PAGE_SIZE) __ro_after_init;
arch/arm64/mm/mmu.c
1425
free_hotplug_page_range(page, PAGE_SIZE, NULL);
arch/arm64/mm/mmu.c
1461
flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
arch/arm64/mm/mmu.c
1464
PAGE_SIZE, altmap);
arch/arm64/mm/mmu.c
1465
} while (addr += PAGE_SIZE, addr < end);
arch/arm64/mm/mmu.c
1490
flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
arch/arm64/mm/mmu.c
1523
flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
arch/arm64/mm/mmu.c
1595
} while (addr += PAGE_SIZE, addr < end);
arch/arm64/mm/mmu.c
193
phys += PAGE_SIZE;
arch/arm64/mm/mmu.c
194
} while (ptep++, addr += PAGE_SIZE, addr != end);
arch/arm64/mm/mmu.c
2151
__flush_tlb_range(vma, addr, nr * PAGE_SIZE,
arch/arm64/mm/mmu.c
2152
PAGE_SIZE, true, 3);
arch/arm64/mm/mmu.c
71
unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss;
arch/arm64/mm/mmu.c
816
if (end - start == PAGE_SIZE) {
arch/arm64/mm/pageattr.c
148
unsigned long size = PAGE_SIZE * numpages;
arch/arm64/mm/pageattr.c
193
PAGE_SIZE, set_mask, clear_mask);
arch/arm64/mm/pageattr.c
239
return __change_memory_common(addr, PAGE_SIZE * numpages,
arch/arm64/mm/pageattr.c
243
return __change_memory_common(addr, PAGE_SIZE * numpages,
arch/arm64/mm/pageattr.c
257
PAGE_SIZE, set_mask, clear_mask);
arch/arm64/mm/pageattr.c
269
PAGE_SIZE, set_mask, clear_mask);
arch/arm64/mm/pageattr.c
287
end = start + numpages * PAGE_SIZE;
arch/arm64/mm/pageattr.c
298
ret = __change_memory_common(addr, PAGE_SIZE * numpages,
arch/arm64/mm/pageattr.c
313
return __change_memory_common(addr, PAGE_SIZE * numpages,
arch/arm64/mm/pgd.c
22
if (PGD_SIZE == PAGE_SIZE)
arch/arm64/mm/ptdump.c
173
st->uxn_pages += (addr - st->start_address) / PAGE_SIZE;
arch/arm64/mm/ptdump.c
188
st->wx_pages += (addr - st->start_address) / PAGE_SIZE;
arch/arm64/mm/trans_pgd.c
80
} while (dst_ptep++, src_ptep++, addr += PAGE_SIZE, addr != end);
arch/csky/abiv2/cacheflush.c
27
icache_inv_range(address, address + nr*PAGE_SIZE);
arch/csky/abiv2/cacheflush.c
30
i * PAGE_SIZE);
arch/csky/abiv2/cacheflush.c
32
dcache_wb_range(addr, addr + PAGE_SIZE);
arch/csky/abiv2/cacheflush.c
34
icache_inv_range(addr, addr + PAGE_SIZE);
arch/csky/include/asm/memory.h
13
#define VMALLOC_START (PAGE_OFFSET + LOWMEM_LIMIT + (PAGE_SIZE * 8))
arch/csky/include/asm/memory.h
14
#define VMALLOC_END (PKMAP_BASE - (PAGE_SIZE * 2))
arch/csky/include/asm/memory.h
22
#define FIXADDR_TCM _AC(FIXADDR_TOP - (TCM_NR_PAGES * PAGE_SIZE), UL)
arch/csky/include/asm/page.h
12
#define THREAD_SIZE (PAGE_SIZE * 2)
arch/csky/include/asm/page.h
39
#define clear_page(page) memset((page), 0, PAGE_SIZE)
arch/csky/include/asm/page.h
40
#define copy_page(to, from) memcpy((to), (from), PAGE_SIZE)
arch/csky/include/asm/pgalloc.h
36
for (i = 0; i < PAGE_SIZE/sizeof(pte_t); i++)
arch/csky/include/asm/pgtable.h
22
#define PTRS_PER_PGD (PAGE_SIZE / sizeof(pgd_t))
arch/csky/include/asm/pgtable.h
24
#define PTRS_PER_PTE (PAGE_SIZE / sizeof(pte_t))
arch/csky/include/asm/pgtable.h
79
extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
arch/csky/include/asm/processor.h
29
#define TASK_SIZE (PAGE_OFFSET - (PAGE_SIZE * 8))
arch/csky/include/asm/shmparam.h
6
#define SHMLBA (4 * PAGE_SIZE)
arch/csky/kernel/ptrace.c
480
(int) (((unsigned long) current) + 2 * PAGE_SIZE));
arch/csky/mm/cachev1.c
53
if (unlikely((end - start) >= PAGE_SIZE) ||
arch/csky/mm/dma-mapping.c
25
if (offset + len > PAGE_SIZE)
arch/csky/mm/dma-mapping.c
26
len = PAGE_SIZE - offset;
arch/csky/mm/dma-mapping.c
42
start += PAGE_SIZE;
arch/csky/mm/highmem.c
29
fixrange_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, swapper_pg_dir);
arch/csky/mm/init.c
109
pte = (pte_t *) memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
arch/csky/mm/init.c
112
__func__, PAGE_SIZE,
arch/csky/mm/init.c
113
PAGE_SIZE);
arch/csky/mm/init.c
41
unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
arch/csky/mm/tcm.c
133
u32 pool_size = (u32) (TCM_NR_PAGES * PAGE_SIZE)
arch/csky/mm/tcm.c
139
u32 pool_size = (u32) (CONFIG_DTCM_NR_PAGES * PAGE_SIZE)
arch/csky/mm/tcm.c
51
paddr = paddr + PAGE_SIZE;
arch/csky/mm/tcm.c
70
paddr = paddr + PAGE_SIZE;
arch/csky/mm/tlb.c
110
start += 2*PAGE_SIZE;
arch/csky/mm/tlb.c
123
start += 2*PAGE_SIZE;
arch/csky/mm/tlb.c
16
#define TLB_ENTRY_SIZE (PAGE_SIZE * 2)
arch/csky/mm/tlb.c
70
start += 2*PAGE_SIZE;
arch/csky/mm/tlb.c
83
start += 2*PAGE_SIZE;
arch/hexagon/include/asm/elf.h
187
#define ELF_EXEC_PAGESIZE PAGE_SIZE
arch/hexagon/include/asm/mem-layout.h
103
#define VMALLOC_END (PKMAP_BASE-PAGE_SIZE*2)
arch/hexagon/include/asm/mem-layout.h
65
#define VMALLOC_OFFSET PAGE_SIZE
arch/hexagon/include/asm/mem-layout.h
97
#define PKMAP_BASE (FIXADDR_START-PAGE_SIZE*LAST_PKMAP)
arch/hexagon/include/asm/page.h
106
: "r" (PAGE_SIZE/32)
arch/hexagon/include/asm/page.h
111
#define copy_page(to, from) memcpy((to), (from), PAGE_SIZE)
arch/hexagon/kernel/asm-offsets.c
28
DEFINE(_PAGE_SIZE, PAGE_SIZE);
arch/hexagon/kernel/time.c
49
.end = RTOS_TIMER_REGS_ADDR+PAGE_SIZE-1,
arch/hexagon/kernel/vdso.c
66
vdso_base = get_unmapped_area(NULL, vdso_base, PAGE_SIZE, 0, 0);
arch/hexagon/kernel/vdso.c
74
vma = _install_special_mapping(mm, vdso_base, PAGE_SIZE,
arch/hexagon/mm/init.c
193
printk(KERN_INFO "PAGE_SIZE=%lu\n", PAGE_SIZE);
arch/hexagon/mm/init.c
54
__vmcache_idsync(addr, PAGE_SIZE);
arch/hexagon/mm/uaccess.c
26
while (count > PAGE_SIZE) {
arch/hexagon/mm/uaccess.c
27
uncleared = raw_copy_to_user(dest, &empty_zero_page, PAGE_SIZE);
arch/hexagon/mm/uaccess.c
29
return count - (PAGE_SIZE - uncleared);
arch/hexagon/mm/uaccess.c
30
count -= PAGE_SIZE;
arch/hexagon/mm/uaccess.c
31
dest += PAGE_SIZE;
arch/hexagon/mm/vm_tlb.c
40
__vmclrmap((void *)vaddr, PAGE_SIZE);
arch/hexagon/mm/vm_tlb.c
72
__vmclrmap((void *)vaddr, PAGE_SIZE);
arch/loongarch/include/asm/addrspace.h
141
#define PCI_IOBASE ((void __iomem *)(vm_map_base + (2 * PAGE_SIZE)))
arch/loongarch/include/asm/dmi.h
13
#define dmi_alloc(l) memblock_alloc(l, PAGE_SIZE)
arch/loongarch/include/asm/elf.h
300
#define ELF_EXEC_PAGESIZE PAGE_SIZE
arch/loongarch/include/asm/kasan.h
69
extern unsigned char kasan_early_shadow_page[PAGE_SIZE];
arch/loongarch/include/asm/kexec.h
22
#define KEXEC_CONTROL_PAGE_SIZE PAGE_SIZE
arch/loongarch/include/asm/kfence.h
44
kaddr += PAGE_SIZE;
arch/loongarch/include/asm/kfence.h
45
vaddr += PAGE_SIZE;
arch/loongarch/include/asm/pgtable.h
109
min(PTRS_PER_PGD * PTRS_PER_PUD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, (1UL << cpu_vabits)) - PMD_SIZE - VMEMMAP_SIZE - KFENCE_AREA_SIZE)
arch/loongarch/include/asm/pgtable.h
113
min(PTRS_PER_PGD * PTRS_PER_PUD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, (1UL << cpu_vabits) / 2) - PMD_SIZE - VMEMMAP_SIZE - KFENCE_AREA_SIZE)
arch/loongarch/include/asm/pgtable.h
478
address += PAGE_SIZE;
arch/loongarch/include/asm/pgtable.h
52
#define PTRS_PER_PGD (PAGE_SIZE >> PTRLOG)
arch/loongarch/include/asm/pgtable.h
54
#define PTRS_PER_PUD (PAGE_SIZE >> PTRLOG)
arch/loongarch/include/asm/pgtable.h
57
#define PTRS_PER_PMD (PAGE_SIZE >> PTRLOG)
arch/loongarch/include/asm/pgtable.h
59
#define PTRS_PER_PTE (PAGE_SIZE >> PTRLOG)
arch/loongarch/include/asm/pgtable.h
82
extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
arch/loongarch/include/asm/pgtable.h
88
#define VMALLOC_START (vm_map_base + PCI_IOSIZE + (2 * PAGE_SIZE))
arch/loongarch/include/asm/pgtable.h
89
#define VMALLOC_END (FIXADDR_START - (2 * PAGE_SIZE))
arch/loongarch/include/asm/pgtable.h
95
#define MODULES_VADDR (vm_map_base + PCI_IOSIZE + (2 * PAGE_SIZE))
arch/loongarch/include/asm/pgtable.h
99
#define KFENCE_AREA_SIZE (((CONFIG_KFENCE_NUM_OBJECTS + 1) * 2 + 2) * PAGE_SIZE)
arch/loongarch/include/asm/thread_info.h
61
#define THREAD_SIZE_ORDER ilog2(THREAD_SIZE / PAGE_SIZE)
arch/loongarch/kernel/asm-offsets.c
215
DEFINE(_PAGE_SIZE, PAGE_SIZE);
arch/loongarch/kernel/crash_dump.c
14
vaddr = memremap(__pfn_to_phys(pfn), PAGE_SIZE, MEMREMAP_WB);
arch/loongarch/kernel/inst.c
278
start = round_down((size_t)dst, PAGE_SIZE);
arch/loongarch/kernel/inst.c
279
end = round_up((size_t)dst + len, PAGE_SIZE);
arch/loongarch/kernel/inst.c
281
err = set_memory_rw(start, (end - start) / PAGE_SIZE);
arch/loongarch/kernel/inst.c
289
err = set_memory_rox(start, (end - start) / PAGE_SIZE);
arch/loongarch/kernel/process.c
342
sp -= get_random_u32_below(PAGE_SIZE);
arch/loongarch/kernel/setup.c
478
range->size = size = round_up(size, PAGE_SIZE);
arch/loongarch/kernel/traps.c
112
while ((unsigned long) sp & (PAGE_SIZE - 1)) {
arch/loongarch/kernel/vdso.c
55
kzalloc_objs(struct page *, vdso_info.size / PAGE_SIZE);
arch/loongarch/kernel/vdso.c
61
for (i = 0; i < vdso_info.size / PAGE_SIZE; i++)
arch/loongarch/kvm/mmu.c
396
size = new->npages * PAGE_SIZE;
arch/loongarch/kvm/mmu.c
624
end = start + memslot->npages * PAGE_SIZE;
arch/loongarch/kvm/mmu.c
740
val += PAGE_SIZE;
arch/loongarch/mm/init.c
152
pud = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE);
arch/loongarch/mm/init.c
161
pmd = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE);
arch/loongarch/mm/init.c
172
pte = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE);
arch/loongarch/mm/init.c
198
flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
arch/loongarch/mm/init.c
39
unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss;
arch/loongarch/mm/kasan_init.c
111
void *p = memblock_alloc_try_nid(PAGE_SIZE, PAGE_SIZE,
arch/loongarch/mm/kasan_init.c
115
__func__, PAGE_SIZE, PAGE_SIZE, node, __pa(MAX_DMA_ADDRESS));
arch/loongarch/mm/kasan_init.c
14
static pgd_t kasan_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE);
arch/loongarch/mm/kasan_init.c
182
next = addr + PAGE_SIZE;
arch/loongarch/mm/kasan_init.c
326
memset(kasan_early_shadow_page, 0, PAGE_SIZE);
arch/loongarch/mm/mmap.c
75
info.low_limit = PAGE_SIZE;
arch/loongarch/mm/pageattr.c
111
unsigned long end = start + PAGE_SIZE * numpages;
arch/loongarch/mm/tlb.c
100
end += ((PAGE_SIZE << 1) - 1);
arch/loongarch/mm/tlb.c
105
start += (PAGE_SIZE << 1);
arch/loongarch/mm/tlb.c
67
start = round_down(start, PAGE_SIZE << 1);
arch/loongarch/mm/tlb.c
68
end = round_up(end, PAGE_SIZE << 1);
arch/loongarch/mm/tlb.c
77
start += (PAGE_SIZE << 1);
arch/loongarch/mm/tlb.c
93
size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
arch/m68k/amiga/chipram.c
82
PAGE_SIZE, NULL, NULL);
arch/m68k/atari/stram.c
101
__func__, pool_size, PAGE_SIZE);
arch/m68k/atari/stram.c
125
stram_pool.start = PAGE_SIZE;
arch/m68k/atari/stram.c
170
PAGE_SIZE, NULL, NULL);
arch/m68k/atari/stram.c
98
PAGE_SIZE);
arch/m68k/include/asm/cacheflush_mm.h
228
addr = ((unsigned long) vaddr) & ~(PAGE_SIZE - 1);
arch/m68k/include/asm/cacheflush_mm.h
230
end = (addr + nr * PAGE_SIZE - 1) & ICACHE_SET_MASK;
arch/m68k/include/asm/cacheflush_mm.h
245
paddr += PAGE_SIZE;
arch/m68k/include/asm/page_mm.h
25
: "0" (to), "1" (from), "2" (PAGE_SIZE / 32 - 1));
arch/m68k/include/asm/page_mm.h
47
"1" ((PAGE_SIZE - 16) / 16 - 1));
arch/m68k/include/asm/page_mm.h
51
#define clear_page(page) memset((page), 0, PAGE_SIZE)
arch/m68k/include/asm/page_mm.h
52
#define copy_page(to,from) memcpy((to), (from), PAGE_SIZE)
arch/m68k/include/asm/page_no.h
10
#define clear_page(page) memset((page), 0, PAGE_SIZE)
arch/m68k/include/asm/page_no.h
11
#define copy_page(to,from) memcpy((to), (from), PAGE_SIZE)
arch/m68k/include/asm/pgtable_mm.h
97
#define VMALLOC_OFFSET PAGE_SIZE
arch/m68k/include/asm/processor.h
169
if ((tsk)->thread.esp0 > PAGE_SIZE && \
arch/m68k/include/asm/sun3_pgalloc.h
45
memcpy(new_pgd, swapper_pg_dir, PAGE_SIZE);
arch/m68k/include/asm/thread_info.h
23
#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
arch/m68k/kernel/sys_m68k.c
107
unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK);
arch/m68k/kernel/sys_m68k.c
113
tmp = PAGE_SIZE;
arch/m68k/kernel/sys_m68k.c
125
i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4;
arch/m68k/kernel/sys_m68k.c
159
addr += PAGE_SIZE;
arch/m68k/kernel/sys_m68k.c
160
i = PAGE_SIZE / 16;
arch/m68k/kernel/sys_m68k.c
170
addr += PAGE_SIZE;
arch/m68k/kernel/sys_m68k.c
180
len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1);
arch/m68k/kernel/sys_m68k.c
181
for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE)
arch/m68k/kernel/sys_m68k.c
268
unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK);
arch/m68k/kernel/sys_m68k.c
274
tmp = PAGE_SIZE;
arch/m68k/kernel/sys_m68k.c
286
i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4;
arch/m68k/kernel/sys_m68k.c
318
addr += PAGE_SIZE;
arch/m68k/kernel/sys_m68k.c
321
i = PAGE_SIZE / 16;
arch/m68k/kernel/sys_m68k.c
331
addr += PAGE_SIZE;
arch/m68k/kernel/sys_m68k.c
341
len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1);
arch/m68k/kernel/sys_m68k.c
344
for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE)
arch/m68k/kernel/sys_m68k.c
444
if (len>=3*PAGE_SIZE && scope<FLUSH_SCOPE_PAGE)
arch/m68k/kernel/sys_m68k.c
446
if (len>=10*PAGE_SIZE && scope<FLUSH_SCOPE_ALL)
arch/m68k/kernel/sys_m68k.c
566
return PAGE_SIZE;
arch/m68k/kernel/traps.c
155
addr = (addr + PAGE_SIZE - 1) & PAGE_MASK;
arch/m68k/mm/cache.c
79
address += PAGE_SIZE;
arch/m68k/mm/fault.c
47
if ((unsigned long)addr < PAGE_SIZE)
arch/m68k/mm/init.c
75
empty_zero_page = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE);
arch/m68k/mm/kmap.c
101
virtaddr += PAGE_SIZE;
arch/m68k/mm/kmap.c
102
size -= PAGE_SIZE;
arch/m68k/mm/kmap.c
282
virtaddr += PAGE_SIZE;
arch/m68k/mm/kmap.c
283
physaddr += PAGE_SIZE;
arch/m68k/mm/kmap.c
284
size -= PAGE_SIZE;
arch/m68k/mm/kmap.c
35
#define IO_SIZE PAGE_SIZE
arch/m68k/mm/kmap.c
394
virtaddr += PAGE_SIZE;
arch/m68k/mm/kmap.c
395
size -= PAGE_SIZE;
arch/m68k/mm/mcfmmu.c
44
empty_zero_page = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE);
arch/m68k/mm/mcfmmu.c
50
size = (size + PAGE_SIZE) & ~(PAGE_SIZE-1);
arch/m68k/mm/mcfmmu.c
51
next_pgtable = (unsigned long) memblock_alloc_or_panic(size, PAGE_SIZE);
arch/m68k/mm/mcfmmu.c
70
address += PAGE_SIZE;
arch/m68k/mm/memory.c
109
if ((tmp = -paddr & (PAGE_SIZE - 1))) {
arch/m68k/mm/memory.c
115
tmp = PAGE_SIZE;
arch/m68k/mm/memory.c
151
int tmp = PAGE_SIZE;
arch/m68k/mm/memory.c
158
len += paddr & (PAGE_SIZE - 1);
arch/m68k/mm/motorola.c
117
#define ptable_mask(type) ((1U << (PAGE_SIZE / ptable_size(type))) - 1)
arch/m68k/mm/motorola.c
241
pte_table = memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
arch/m68k/mm/motorola.c
244
__func__, PAGE_SIZE, PAGE_SIZE);
arch/m68k/mm/motorola.c
289
last_pmd_table = memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
arch/m68k/mm/motorola.c
292
__func__, PAGE_SIZE, PAGE_SIZE);
arch/m68k/mm/motorola.c
365
physaddr += PAGE_SIZE;
arch/m68k/mm/motorola.c
366
for (i = 1; i < PTRS_PER_PTE; physaddr += PAGE_SIZE, i++)
arch/m68k/mm/motorola.c
386
size -= PAGE_SIZE;
arch/m68k/mm/motorola.c
387
virtaddr += PAGE_SIZE;
arch/m68k/mm/motorola.c
388
physaddr += PAGE_SIZE;
arch/m68k/mm/motorola.c
505
empty_zero_page = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE);
arch/m68k/mm/sun3kmap.c
100
virt += seg_pages * PAGE_SIZE;
arch/m68k/mm/sun3kmap.c
55
phys += PAGE_SIZE;
arch/m68k/mm/sun3kmap.c
56
virt += PAGE_SIZE;
arch/m68k/mm/sun3kmap.c
72
offset = phys & (PAGE_SIZE-1);
arch/m68k/mm/sun3kmap.c
73
phys &= ~(PAGE_SIZE-1);
arch/m68k/mm/sun3kmap.c
85
pages = size / PAGE_SIZE;
arch/m68k/mm/sun3kmap.c
92
seg_pages = (SUN3_PMEG_SIZE - (virt & SUN3_PMEG_MASK)) / PAGE_SIZE;
arch/m68k/mm/sun3kmap.c
99
phys += seg_pages * PAGE_SIZE;
arch/m68k/mm/sun3mmu.c
46
empty_zero_page = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE);
arch/m68k/mm/sun3mmu.c
54
size = (size + PAGE_SIZE) & ~(PAGE_SIZE-1);
arch/m68k/mm/sun3mmu.c
56
next_pgtable = (unsigned long)memblock_alloc_or_panic(size, PAGE_SIZE);
arch/m68k/mm/sun3mmu.c
57
bootmem_end = (next_pgtable + size + PAGE_SIZE) & PAGE_MASK;
arch/m68k/mm/sun3mmu.c
75
address += PAGE_SIZE;
arch/m68k/sun3/config.c
113
memory_start = ((memory_start + (PAGE_SIZE-1)) & PAGE_MASK);
arch/m68k/sun3/config.c
144
memory_end = *(romvec->pv_sun3mem) + PAGE_OFFSET - 2*PAGE_SIZE;
arch/m68k/sun3/dvma.c
57
kaddr += PAGE_SIZE;
arch/m68k/sun3/dvma.c
58
vaddr += PAGE_SIZE;
arch/m68k/sun3/mmu_emu.c
166
for (num=0, seg=0x0F800000; seg<0x10000000; seg+=16*PAGE_SIZE) {
arch/m68k/sun3/mmu_emu.c
171
print_pte_vaddr (seg + (i*PAGE_SIZE));
arch/m68k/sun3x/dvma.c
131
kaddr += PAGE_SIZE;
arch/m68k/sun3x/dvma.c
132
vaddr += PAGE_SIZE;
arch/microblaze/include/asm/cacheflush.h
74
flush_dcache_range((unsigned) (addr), (unsigned) (addr) + PAGE_SIZE); \
arch/microblaze/include/asm/cacheflush.h
86
flush_dcache_range(pfn << PAGE_SHIFT, (pfn << PAGE_SHIFT) + PAGE_SIZE);
arch/microblaze/include/asm/cacheflush.h
95
invalidate_icache_range(addr, addr + PAGE_SIZE);
arch/microblaze/include/asm/cacheflush.h
96
flush_dcache_range(addr, addr + PAGE_SIZE);
arch/microblaze/include/asm/fixmap.h
26
#define FIXADDR_TOP ((unsigned long)(-PAGE_SIZE))
arch/microblaze/include/asm/highmem.h
45
#define PKMAP_BASE ((FIXADDR_START - PAGE_SIZE * (LAST_PKMAP + 1)) \
arch/microblaze/include/asm/page.h
45
# define copy_page(to, from) memcpy((to), (from), PAGE_SIZE)
arch/microblaze/include/asm/page.h
46
# define clear_page(pgaddr) memset((pgaddr), 0, PAGE_SIZE)
arch/microblaze/include/asm/page.h
49
memcpy((vto), (vfrom), PAGE_SIZE)
arch/microblaze/include/uapi/asm/elf.h
84
#define ELF_EXEC_PAGESIZE PAGE_SIZE
arch/microblaze/kernel/cpu/mb.c
131
seq_printf(m, "Page size:\t%lu\n", PAGE_SIZE);
arch/microblaze/mm/pgtable.c
111
for (i = 0; i < size && err == 0; i += PAGE_SIZE)
arch/microblaze/mm/pgtable.c
172
for (s = 0; s < lowmem_size; s += PAGE_SIZE) {
arch/microblaze/mm/pgtable.c
182
v += PAGE_SIZE;
arch/microblaze/mm/pgtable.c
183
p += PAGE_SIZE;
arch/microblaze/mm/pgtable.c
250
return memblock_alloc_try_nid(PAGE_SIZE, PAGE_SIZE,
arch/mips/cavium-octeon/dma-octeon.c
211
swiotlbsize = PAGE_SIZE;
arch/mips/cavium-octeon/setup.c
231
kimage_ptr->segment[i].memsz + 2*PAGE_SIZE,
arch/mips/cavium-octeon/setup.c
232
kimage_ptr->segment[i].mem - PAGE_SIZE,
arch/mips/cavium-octeon/setup.c
233
PAGE_SIZE);
arch/mips/cavium-octeon/setup.c
937
if (addr == *mem && *size > PAGE_SIZE) {
arch/mips/cavium-octeon/setup.c
938
*mem += PAGE_SIZE;
arch/mips/cavium-octeon/setup.c
939
*size -= PAGE_SIZE;
arch/mips/dec/kn01-berr.c
110
entryhi = asid & (PAGE_SIZE - 1);
arch/mips/dec/kn01-berr.c
111
entryhi |= vaddr & ~(PAGE_SIZE - 1);
arch/mips/dec/kn01-berr.c
119
offset = vaddr & (PAGE_SIZE - 1);
arch/mips/dec/kn01-berr.c
120
address = (entrylo & ~(PAGE_SIZE - 1)) | offset;
arch/mips/dec/prom/memory.c
116
free_init_pages("unused PROM memory", PAGE_SIZE, end);
arch/mips/include/asm/dmi.h
14
#define dmi_alloc(l) memblock_alloc_low(l, PAGE_SIZE)
arch/mips/include/asm/elf.h
416
#define ELF_EXEC_PAGESIZE PAGE_SIZE
arch/mips/include/asm/mach-dec/cpu-feature-overrides.h
86
#define cpu_has_dc_aliases (PAGE_SIZE < 0x4000)
arch/mips/include/asm/mach-ip22/cpu-feature-overrides.h
31
#define cpu_has_dc_aliases (PAGE_SIZE < 0x4000)
arch/mips/include/asm/mach-loongson2ef/cpu-feature-overrides.h
24
#define cpu_has_dc_aliases (PAGE_SIZE < 0x4000)
arch/mips/include/asm/mach-loongson64/cpu-feature-overrides.h
27
#define cpu_has_dc_aliases (PAGE_SIZE < 0x4000)
arch/mips/include/asm/mach-rm/cpu-feature-overrides.h
28
#define cpu_has_dc_aliases (PAGE_SIZE < 0x4000)
arch/mips/include/asm/page.h
27
if (PAGE_SIZE == (1 << 30))
arch/mips/include/asm/page.h
29
if (PAGE_SIZE == (1llu << 32))
arch/mips/include/asm/page.h
31
if (PAGE_SIZE > (256 << 10))
arch/mips/include/asm/pgtable-32.h
102
# define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE)
arch/mips/include/asm/pgtable-32.h
104
# define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE)
arch/mips/include/asm/pgtable-32.h
89
# define PTRS_PER_PTE (PAGE_SIZE / sizeof(pte_t) / 2)
arch/mips/include/asm/pgtable-32.h
91
# define PTRS_PER_PTE (PAGE_SIZE / sizeof(pte_t))
arch/mips/include/asm/pgtable-32.h
99
#define PKMAP_BASE (PKMAP_END - PAGE_SIZE * LAST_PKMAP)
arch/mips/include/asm/pgtable-64.h
125
#define PTRS_PER_PGD ((PAGE_SIZE << PGD_TABLE_ORDER) / sizeof(pgd_t))
arch/mips/include/asm/pgtable-64.h
127
#define PTRS_PER_PUD ((PAGE_SIZE << PUD_TABLE_ORDER) / sizeof(pud_t))
arch/mips/include/asm/pgtable-64.h
130
#define PTRS_PER_PMD ((PAGE_SIZE << PMD_TABLE_ORDER) / sizeof(pmd_t))
arch/mips/include/asm/pgtable-64.h
132
#define PTRS_PER_PTE (PAGE_SIZE / sizeof(pte_t))
arch/mips/include/asm/pgtable-64.h
141
#define VMALLOC_START (MAP_BASE + (2 * PAGE_SIZE))
arch/mips/include/asm/pgtable-64.h
144
min(PTRS_PER_PGD * PTRS_PER_PUD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, \
arch/mips/include/asm/pgtable-64.h
151
#define MODULES_END (FIXADDR_START-2*PAGE_SIZE)
arch/mips/include/asm/pgtable.h
585
address += PAGE_SIZE;
arch/mips/include/asm/r4kcache.h
220
unsigned long end = page + PAGE_SIZE; \
arch/mips/include/asm/r4kcache.h
232
unsigned long end = start + PAGE_SIZE; \
arch/mips/include/asm/r4kcache.h
269
unsigned long end = page + PAGE_SIZE; \
arch/mips/include/asm/thread_info.h
98
#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
arch/mips/include/asm/vdso/vdso.h
67
return (void __iomem *)((unsigned long)data & PAGE_MASK) - PAGE_SIZE;
arch/mips/kernel/asm-offsets.c
223
DEFINE(_PAGE_SIZE, PAGE_SIZE);
arch/mips/kernel/cpu-probe.c
610
PAGE_SIZE, config4);
arch/mips/kernel/machine_kexec.c
50
(unsigned long)kimage->segment[i].memsz / PAGE_SIZE);
arch/mips/kernel/process.c
689
top -= PAGE_SIZE;
arch/mips/kernel/process.c
695
top -= VDSO_NR_PAGES * PAGE_SIZE;
arch/mips/kernel/process.c
696
top -= mips_gic_present() ? PAGE_SIZE : 0;
arch/mips/kernel/process.c
717
sp -= get_random_u32_below(PAGE_SIZE);
arch/mips/kernel/relocate.c
376
RELOCATED(ALIGN((long)&_end, PAGE_SIZE));
arch/mips/kernel/smp.c
611
addr = round_down(start, PAGE_SIZE * 2);
arch/mips/kernel/smp.c
612
end = round_up(end, PAGE_SIZE * 2);
arch/mips/kernel/smp.c
616
addr += PAGE_SIZE * 2;
arch/mips/kernel/traps.c
188
while ((unsigned long) sp & (PAGE_SIZE - 1)) {
arch/mips/kernel/traps.c
2337
vec_size = PAGE_SIZE;
arch/mips/kernel/vdso.c
109
gic_size = mips_gic_present() ? PAGE_SIZE : 0;
arch/mips/kernel/vdso.c
110
size = gic_size + VDSO_NR_PAGES * PAGE_SIZE + image->size;
arch/mips/kernel/vdso.c
137
vdso_addr = data_addr + VDSO_NR_PAGES * PAGE_SIZE;
arch/mips/kernel/vdso.c
37
num_pages = image->size / PAGE_SIZE;
arch/mips/kernel/vdso.c
66
base += PAGE_SIZE;
arch/mips/kernel/vdso.c
92
base = do_mmap(NULL, STACK_TOP, PAGE_SIZE, PROT_READ | PROT_EXEC,
arch/mips/kvm/mips.c
303
gebase = kzalloc(ALIGN(size, PAGE_SIZE), GFP_KERNEL);
arch/mips/kvm/mips.c
310
ALIGN(size, PAGE_SIZE), gebase);
arch/mips/kvm/mips.c
366
(unsigned long)gebase + ALIGN(size, PAGE_SIZE));
arch/mips/loongson64/init.c
164
range->size = size = round_up(size, PAGE_SIZE);
arch/mips/loongson64/numa.c
120
memblock_reserve(0, PAGE_SIZE * start_pfn);
arch/mips/math-emu/dsemul.c
143
if (regs->cp0_epc >= (base + PAGE_SIZE))
arch/mips/math-emu/dsemul.c
68
static const int emupage_frame_count = PAGE_SIZE / sizeof(struct emuframe);
arch/mips/mm/c-octeon.c
270
shm_align_mask = PAGE_SIZE - 1;
arch/mips/mm/c-r3k.c
259
r3k_flush_dcache_range(kaddr, kaddr + PAGE_SIZE);
arch/mips/mm/c-r3k.c
261
r3k_flush_icache_range(kaddr, kaddr + PAGE_SIZE);
arch/mips/mm/c-r4k.c
1233
PAGE_SIZE <= 0x8000)
arch/mips/mm/c-r4k.c
1284
(c->icache.waysize > PAGE_SIZE))
arch/mips/mm/c-r4k.c
1296
if (has_74k_erratum || c->dcache.waysize > PAGE_SIZE)
arch/mips/mm/c-r4k.c
1729
PAGE_SIZE - 1);
arch/mips/mm/c-r4k.c
1731
shm_align_mask = PAGE_SIZE-1;
arch/mips/mm/cache.c
168
address += PAGE_SIZE;
arch/mips/mm/dma-noncoherent.c
107
if (offset + len > PAGE_SIZE)
arch/mips/mm/dma-noncoherent.c
108
len = PAGE_SIZE - offset;
arch/mips/mm/init.c
250
pte = (pte_t *) memblock_alloc_low(PAGE_SIZE,
arch/mips/mm/init.c
251
PAGE_SIZE);
arch/mips/mm/init.c
254
__func__, PAGE_SIZE,
arch/mips/mm/init.c
255
PAGE_SIZE);
arch/mips/mm/init.c
479
memset(addr, POISON_FREE_INITMEM, PAGE_SIZE);
arch/mips/mm/init.c
531
PERCPU_DYNAMIC_RESERVE, PAGE_SIZE,
arch/mips/mm/init.c
71
empty_zero_page = (unsigned long)memblock_alloc_or_panic(PAGE_SIZE << order, PAGE_SIZE);
arch/mips/mm/init.c
73
zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK;
arch/mips/mm/mmap.c
20
unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */
arch/mips/mm/mmap.c
80
info.low_limit = PAGE_SIZE;
arch/mips/mm/page.c
288
BUG_ON(PAGE_SIZE < pref_bias_clear_store);
arch/mips/mm/page.c
290
off = PAGE_SIZE - pref_bias_clear_store;
arch/mips/mm/page.c
440
BUG_ON(PAGE_SIZE < pref_bias_copy_load);
arch/mips/mm/page.c
443
off = PAGE_SIZE - pref_bias_copy_load;
arch/mips/mm/page.c
629
page_descr[cpu].dscr_b = V_DM_DSCRB_SRC_LENGTH(PAGE_SIZE);
arch/mips/mm/page.c
656
page_descr[cpu].dscr_b = from_phys | V_DM_DSCRB_SRC_LENGTH(PAGE_SIZE);
arch/mips/mm/pgtable-32.c
70
fixrange_init(vaddr & PMD_MASK, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
arch/mips/mm/tlb-r3k.c
119
size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
arch/mips/mm/tlb-r3k.c
124
end += PAGE_SIZE - 1;
arch/mips/mm/tlb-r3k.c
131
start += PAGE_SIZE; /* BARRIER */
arch/mips/mm/tlb-r3k.c
82
size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
arch/mips/mm/tlb-r3k.c
88
end += PAGE_SIZE - 1;
arch/mips/mm/tlb-r3k.c
94
start += PAGE_SIZE; /* BARRIER */
arch/mips/mm/tlb-r4k.c
120
start = round_down(start, PAGE_SIZE << 1);
arch/mips/mm/tlb-r4k.c
121
end = round_up(end, PAGE_SIZE << 1);
arch/mips/mm/tlb-r4k.c
143
start += (PAGE_SIZE << 1);
arch/mips/mm/tlb-r4k.c
175
size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
arch/mips/mm/tlb-r4k.c
183
end += ((PAGE_SIZE << 1) - 1);
arch/mips/mm/tlb-r4k.c
191
start += (PAGE_SIZE << 1);
arch/mips/mm/tlb-r4k.c
793
panic("MMU doesn't support PAGE_SIZE=0x%lx", PAGE_SIZE);
arch/mips/mti-malta/malta-dtshim.c
172
physical_memsize -= PAGE_SIZE;
arch/mips/pci/pci-lantiq.c
87
mem = get_num_physpages() * PAGE_SIZE;
arch/mips/pci/pci-rt2880.c
194
rt2880_pci_base = ioremap(RT2880_PCI_BASE, PAGE_SIZE);
arch/mips/sgi-ip27/ip27-memory.c
362
memset(__node_data[node], 0, PAGE_SIZE);
arch/nios2/include/asm/cachetype.h
8
#define cpu_dcache_is_aliasing() (NIOS2_DCACHE_SIZE > PAGE_SIZE)
arch/nios2/include/asm/page.h
43
#define clear_page(page) memset((page), 0, PAGE_SIZE)
arch/nios2/include/asm/page.h
44
#define copy_page(to, from) memcpy((to), (from), PAGE_SIZE)
arch/nios2/include/asm/pgtable.h
191
flush_dcache_range(paddr, paddr + nr * PAGE_SIZE);
arch/nios2/include/asm/pgtable.h
58
#define PTRS_PER_PGD (PAGE_SIZE / sizeof(pgd_t))
arch/nios2/include/asm/pgtable.h
59
#define PTRS_PER_PTE (PAGE_SIZE / sizeof(pte_t))
arch/nios2/include/asm/pgtable.h
72
extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
arch/nios2/include/asm/pgtable.h
76
extern pte_t invalid_pte_table[PAGE_SIZE/sizeof(pte_t)];
arch/nios2/include/asm/processor.h
38
#define KUSER_SIZE (PAGE_SIZE)
arch/nios2/include/asm/tlbflush.h
33
flush_tlb_range(vma, address, address + PAGE_SIZE);
arch/nios2/include/asm/tlbflush.h
38
flush_tlb_kernel_range(address, address + PAGE_SIZE);
arch/nios2/kernel/sys_nios2.c
59
return PAGE_SIZE;
arch/nios2/mm/cacheflush.c
146
unsigned long end = start + nr * PAGE_SIZE;
arch/nios2/mm/cacheflush.c
156
unsigned long end = start + PAGE_SIZE;
arch/nios2/mm/cacheflush.c
245
__flush_dcache(vaddr, vaddr + PAGE_SIZE);
arch/nios2/mm/cacheflush.c
246
__flush_icache(vaddr, vaddr + PAGE_SIZE);
arch/nios2/mm/cacheflush.c
248
__flush_dcache((unsigned long)vto, (unsigned long)vto + PAGE_SIZE);
arch/nios2/mm/cacheflush.c
249
__flush_icache((unsigned long)vto, (unsigned long)vto + PAGE_SIZE);
arch/nios2/mm/cacheflush.c
254
__flush_dcache(vaddr, vaddr + PAGE_SIZE);
arch/nios2/mm/cacheflush.c
255
__flush_icache(vaddr, vaddr + PAGE_SIZE);
arch/nios2/mm/cacheflush.c
257
__flush_dcache((unsigned long)addr, (unsigned long)addr + PAGE_SIZE);
arch/nios2/mm/cacheflush.c
258
__flush_icache((unsigned long)addr, (unsigned long)addr + PAGE_SIZE);
arch/nios2/mm/cacheflush.c
94
flush_cache_range(vma, start, start + nr * PAGE_SIZE);
arch/nios2/mm/fault.c
192
address < PAGE_SIZE ? "NULL pointer dereference" :
arch/nios2/mm/init.c
58
(unsigned long)empty_zero_page + PAGE_SIZE);
arch/nios2/mm/init.c
66
pgd_t swapper_pg_dir[PTRS_PER_PGD] __aligned(PAGE_SIZE);
arch/nios2/mm/init.c
67
pte_t invalid_pte_table[PTRS_PER_PTE] __aligned(PAGE_SIZE);
arch/nios2/mm/ioremap.c
44
address += PAGE_SIZE;
arch/nios2/mm/tlb.c
109
start += PAGE_SIZE;
arch/nios2/mm/tlb.c
164
start += PAGE_SIZE;
arch/nios2/mm/tlb.c
247
addr += PAGE_SIZE;
arch/nios2/mm/tlb.c
289
addr += PAGE_SIZE;
arch/openrisc/include/asm/fixmap.h
23
#define FIXADDR_TOP ((unsigned long) (-2*PAGE_SIZE))
arch/openrisc/include/asm/page.h
30
#define clear_page(page) memset((page), 0, PAGE_SIZE)
arch/openrisc/include/asm/page.h
31
#define copy_page(to, from) memcpy((to), (from), PAGE_SIZE)
arch/openrisc/include/asm/thread_info.h
32
#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
arch/openrisc/kernel/dma.c
34
flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
arch/openrisc/kernel/dma.c
56
flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
arch/openrisc/kernel/setup.c
81
unsigned long aligned_start = ALIGN_DOWN(initrd_start, PAGE_SIZE);
arch/openrisc/kernel/setup.c
82
unsigned long aligned_end = ALIGN(initrd_end, PAGE_SIZE);
arch/openrisc/kernel/smp.c
295
if ((end - start) <= PAGE_SIZE)
arch/openrisc/kernel/smp.c
305
if ((end - start) <= PAGE_SIZE)
arch/openrisc/kernel/smp.c
325
smp_flush_tlb_range(mm_cpumask(vma->vm_mm), uaddr, uaddr + PAGE_SIZE);
arch/openrisc/mm/cache.c
47
unsigned long end = paddr + PAGE_SIZE;
arch/openrisc/mm/fault.c
129
if (address + PAGE_SIZE < regs->sp)
arch/openrisc/mm/fault.c
246
if ((unsigned long)(address) < PAGE_SIZE)
arch/openrisc/mm/init.c
103
v += PAGE_SIZE, p += PAGE_SIZE, j++, pte++) {
arch/openrisc/mm/init.c
192
memset((void *)empty_zero_page, 0, PAGE_SIZE);
arch/openrisc/mm/init.c
95
pte = memblock_alloc_raw(PAGE_SIZE, PAGE_SIZE);
arch/openrisc/mm/ioremap.c
41
pte = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE);
arch/openrisc/mm/tlb.c
106
for (addr = start; addr < end; addr += PAGE_SIZE) {
arch/parisc/include/asm/dma.h
26
#define DMA_CHUNK_SIZE (BITS_PER_LONG*PAGE_SIZE)
arch/parisc/include/asm/kfence.h
39
flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
arch/parisc/include/asm/pgtable.h
106
#define PLD_SIZE PAGE_SIZE
arch/parisc/include/asm/pgtable.h
391
addr += PAGE_SIZE;
arch/parisc/include/asm/ropes.h
191
#define IOVP_SIZE PAGE_SIZE
arch/parisc/include/asm/shmparam.h
20
#define SHMLBA PAGE_SIZE /* attach addr a multiple of this */
arch/parisc/include/asm/thread_info.h
35
#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
arch/parisc/kernel/asm-offsets.c
284
DEFINE(HUGEPAGE_SIZE, PAGE_SIZE);
arch/parisc/kernel/cache.c
35
#define PTR_PAGE_ALIGN_DOWN(addr) PTR_ALIGN_DOWN(addr, PAGE_SIZE)
arch/parisc/kernel/cache.c
403
kaddr += PAGE_SIZE;
arch/parisc/kernel/cache.c
491
flush_kernel_dcache_page_addr(kaddr + i * PAGE_SIZE);
arch/parisc/kernel/cache.c
516
addr += offset * PAGE_SIZE;
arch/parisc/kernel/cache.c
518
if (addr + nr * PAGE_SIZE > vma->vm_end)
arch/parisc/kernel/cache.c
519
nr = (vma->vm_end - addr) / PAGE_SIZE;
arch/parisc/kernel/cache.c
525
addr + i * PAGE_SIZE,
arch/parisc/kernel/cache.c
526
(pfn + i) * PAGE_SIZE);
arch/parisc/kernel/cache.c
592
threshold *= PAGE_SIZE;
arch/parisc/kernel/cache.c
700
start += PAGE_SIZE;
arch/parisc/kernel/cache.c
709
for (addr = start; addr < end; addr += PAGE_SIZE)
arch/parisc/kernel/cache.c
852
for (addr = start; addr < end; addr += PAGE_SIZE) {
arch/parisc/kernel/cache.c
857
physaddr += PAGE_SIZE;
arch/parisc/kernel/inventory.c
140
if (unlikely( ((start & (PAGE_SIZE - 1)) != 0)
arch/parisc/kernel/kexec.c
30
(unsigned long)kimage->segment[n].memsz / PAGE_SIZE);
arch/parisc/kernel/kexec_file.c
47
kbuf.buf_align = PAGE_SIZE;
arch/parisc/kernel/kexec_file.c
62
kbuf.buf_align = PAGE_SIZE;
arch/parisc/kernel/kexec_file.c
64
kbuf.buf_min = PAGE0->mem_free + PAGE_SIZE;
arch/parisc/kernel/pci-dma.c
180
vaddr += PAGE_SIZE;
arch/parisc/kernel/pci-dma.c
181
orig_vaddr += PAGE_SIZE;
arch/parisc/kernel/pci-dma.c
96
vaddr += PAGE_SIZE;
arch/parisc/kernel/pci-dma.c
97
orig_vaddr += PAGE_SIZE;
arch/parisc/kernel/pci-dma.c
98
(*paddr_ptr) += PAGE_SIZE;
arch/parisc/kernel/pdt.c
237
memblock_reserve(pdt_entry[i] & PAGE_MASK, PAGE_SIZE);
arch/parisc/kernel/pdt.c
46
#define MAX_PDT_TABLE_SIZE PAGE_SIZE
arch/parisc/kernel/setup.c
114
(int)(PAGE_SIZE / 1024));
arch/parisc/kernel/sys_parisc.c
149
info.low_limit = PAGE_SIZE;
arch/parisc/kernel/vdso.c
109
pagelist[i] = virt_to_page(start + i * PAGE_SIZE);
arch/parisc/kernel/vdso.c
78
map_base -= get_random_u32_below(0x20) * PAGE_SIZE;
arch/parisc/kernel/vdso.c
88
do_munmap(mm, vdso_text_start, PAGE_SIZE, NULL);
arch/parisc/lib/memcpy.c
49
start += PAGE_SIZE;
arch/parisc/lib/memcpy.c
69
if ((unsigned long)unsafe_src < PAGE_SIZE)
arch/parisc/mm/fixmap.c
24
flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE);
arch/parisc/mm/fixmap.c
37
flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE);
arch/parisc/mm/hugetlbpage.c
114
addr += PAGE_SIZE;
arch/parisc/mm/hugetlbpage.c
115
pte_val(entry) += PAGE_SIZE;
arch/parisc/mm/init.c
380
pmd = memblock_alloc_or_panic(PAGE_SIZE << PMD_TABLE_ORDER,
arch/parisc/mm/init.c
381
PAGE_SIZE << PMD_TABLE_ORDER);
arch/parisc/mm/init.c
389
pg_table = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE);
arch/parisc/mm/init.c
426
address += PAGE_SIZE;
arch/parisc/mm/init.c
427
vaddr += PAGE_SIZE;
arch/parisc/mm/init.c
43
pmd_t pmd0[PTRS_PER_PMD] __section(".data..vm0.pmd") __attribute__ ((aligned(PAGE_SIZE)));
arch/parisc/mm/init.c
46
pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(".data..vm0.pgd") __attribute__ ((aligned(PAGE_SIZE)));
arch/parisc/mm/init.c
47
pte_t pg0[PT_INITIAL * PTRS_PER_PTE] __section(".data..vm0.pte") __attribute__ ((aligned(PAGE_SIZE)));
arch/parisc/mm/init.c
643
empty_zero_page = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE);
arch/parisc/mm/init.c
664
PAGE_SIZE, PAGE_GATEWAY, 1);
arch/parisc/mm/init.c
680
pmd = memblock_alloc_or_panic(PAGE_SIZE << PMD_TABLE_ORDER,
arch/parisc/mm/init.c
681
PAGE_SIZE << PMD_TABLE_ORDER);
arch/parisc/mm/init.c
688
pte_t *pte = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE);
arch/parisc/mm/init.c
692
addr += PAGE_SIZE;
arch/parisc/mm/init.c
723
while (start < end && *slot < slot_max && size >= PAGE_SIZE) {
arch/powerpc/boot/page.h
18
#define PAGE_MASK (~(PAGE_SIZE-1))
arch/powerpc/boot/page.h
28
#define PAGE_ALIGN(addr) _ALIGN(addr, PAGE_SIZE)
arch/powerpc/include/asm/book3s/32/pgtable.h
151
#define FIXADDR_TOP (KASAN_SHADOW_START - PAGE_SIZE)
arch/powerpc/include/asm/book3s/32/pgtable.h
153
#define FIXADDR_TOP ((unsigned long)(-PAGE_SIZE))
arch/powerpc/include/asm/book3s/32/pgtable.h
193
#define VMALLOC_END ALIGN_DOWN(ioremap_bot, PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT)
arch/powerpc/include/asm/book3s/32/tlbflush.h
44
else if (end - start <= PAGE_SIZE)
arch/powerpc/include/asm/book3s/64/hash-4k.h
59
#define H_PTE_FRAG_NR (PAGE_SIZE >> H_PTE_FRAG_SIZE_SHIFT)
arch/powerpc/include/asm/book3s/64/hash-4k.h
61
#define H_PMD_FRAG_NR (PAGE_SIZE >> H_PMD_FRAG_SIZE_SHIFT)
arch/powerpc/include/asm/book3s/64/hash-4k.h
75
remap_pfn_range((vma), (addr), (pfn), PAGE_SIZE, (prot))
arch/powerpc/include/asm/book3s/64/hash-64k.h
182
return remap_pfn_range(vma, addr, pfn, PAGE_SIZE,
arch/powerpc/include/asm/book3s/64/hash-64k.h
73
#define H_PTE_FRAG_NR (PAGE_SIZE >> H_PTE_FRAG_SIZE_SHIFT)
arch/powerpc/include/asm/book3s/64/hash-64k.h
80
#define H_PMD_FRAG_NR (PAGE_SIZE >> H_PMD_FRAG_SIZE_SHIFT)
arch/powerpc/include/asm/book3s/64/pgtable.h
1006
WARN((page_size != PAGE_SIZE), "I/O page size != PAGE_SIZE");
arch/powerpc/include/asm/book3s/64/pgtable.h
1008
return radix__map_kernel_page(ea, pa, prot, PAGE_SIZE);
arch/powerpc/include/asm/book3s/64/radix-4k.h
17
#define RADIX_PTE_FRAG_NR (PAGE_SIZE >> RADIX_PTE_FRAG_SIZE_SHIFT)
arch/powerpc/include/asm/book3s/64/radix-4k.h
20
#define RADIX_PMD_FRAG_NR (PAGE_SIZE >> RADIX_PMD_FRAG_SIZE_SHIFT)
arch/powerpc/include/asm/book3s/64/radix-64k.h
18
#define RADIX_PTE_FRAG_NR (PAGE_SIZE >> RADIX_PTE_FRAG_SIZE_SHIFT)
arch/powerpc/include/asm/book3s/64/radix-64k.h
21
#define RADIX_PMD_FRAG_NR (PAGE_SIZE >> RADIX_PMD_FRAG_SIZE_SHIFT)
arch/powerpc/include/asm/crash_reserve.h
6
#define CRASH_ALIGN PAGE_SIZE
arch/powerpc/include/asm/elf.h
20
#define ELF_EXEC_PAGESIZE PAGE_SIZE
arch/powerpc/include/asm/fixmap.h
49
FIX_EARLY_DEBUG_BASE = FIX_EARLY_DEBUG_TOP+(ALIGN(SZ_128K, PAGE_SIZE)/PAGE_SIZE)-1,
arch/powerpc/include/asm/highmem.h
51
#define PKMAP_BASE (FIXADDR_START - PAGE_SIZE*(LAST_PKMAP + 1))
arch/powerpc/include/asm/highmem.h
53
#define PKMAP_BASE ((FIXADDR_START - PAGE_SIZE*(LAST_PKMAP + 1)) & PMD_MASK)
arch/powerpc/include/asm/kfence.h
52
flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
arch/powerpc/include/asm/kvm_book3s_64.h
493
if (pagesize <= PAGE_SIZE)
arch/powerpc/include/asm/kvm_host.h
187
#define TCES_PER_PAGE (PAGE_SIZE / sizeof(u64))
arch/powerpc/include/asm/nohash/32/mmu-8xx.h
241
if (PAGE_SIZE == SZ_16K)
arch/powerpc/include/asm/nohash/32/mmu-8xx.h
245
return PAGE_SIZE;
arch/powerpc/include/asm/nohash/32/pgtable.h
111
#define VMALLOC_END ALIGN_DOWN(ioremap_bot, PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT)
arch/powerpc/include/asm/nohash/32/pgtable.h
66
#define FIXADDR_TOP (KASAN_SHADOW_START - PAGE_SIZE)
arch/powerpc/include/asm/nohash/32/pgtable.h
68
#define FIXADDR_TOP ((unsigned long)(-PAGE_SIZE))
arch/powerpc/include/asm/nohash/32/pte-8xx.h
178
return PAGE_SIZE / SZ_4K;
arch/powerpc/include/asm/nohash/32/pte-8xx.h
198
for (i = 0; i < num; i += PAGE_SIZE / SZ_4K, new += PAGE_SIZE) {
arch/powerpc/include/asm/nohash/64/pgtable-4k.h
91
remap_pfn_range((vma), (addr), (pfn), PAGE_SIZE, (prot))
arch/powerpc/include/asm/nohash/pgtable.h
39
return PAGE_SIZE;
arch/powerpc/include/asm/nohash/pgtable.h
74
sz = PAGE_SIZE;
arch/powerpc/include/asm/nohash/pgtable.h
77
pdsize = PAGE_SIZE;
arch/powerpc/include/asm/nohash/pgtable.h
90
new += (unsigned long long)(pdsize / PAGE_SIZE) << PTE_RPN_SHIFT;
arch/powerpc/include/asm/nohash/tlbflush.h
58
if (end - start <= PAGE_SIZE)
arch/powerpc/include/asm/opal.h
18
#define SG_ENTRIES_PER_NODE ((PAGE_SIZE - 16) / sizeof(struct opal_sg_entry))
arch/powerpc/include/asm/page_32.h
46
for (i = 0; i < PAGE_SIZE / L1_CACHE_BYTES; i++, addr += L1_CACHE_BYTES)
arch/powerpc/include/asm/shmparam.h
5
#define SHMLBA PAGE_SIZE /* attach addr a multiple of this */
arch/powerpc/include/asm/task_size_64.h
45
#define TASK_SIZE_USER32 (0x0000000100000000UL - (1 * PAGE_SIZE))
arch/powerpc/kernel/cacheinfo.c
751
buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
arch/powerpc/kernel/crash_dump.c
126
for (addr = begin; addr < end; addr += PAGE_SIZE) {
arch/powerpc/kernel/crash_dump.c
128
if (addr <= rtas_end && ((addr + PAGE_SIZE) > rtas_start))
arch/powerpc/kernel/crash_dump.c
81
csize = min_t(size_t, csize, PAGE_SIZE);
arch/powerpc/kernel/crash_dump.c
88
vaddr = ioremap_cache(paddr, PAGE_SIZE);
arch/powerpc/kernel/dt_cpu_ftrs.c
1093
PAGE_SIZE);
arch/powerpc/kernel/eeh.c
1652
mapped = ioremap(bar->start, PAGE_SIZE);
arch/powerpc/kernel/fadump.c
786
count = PAGE_ALIGN(size) / PAGE_SIZE;
arch/powerpc/kernel/fadump.c
849
new_size = mrange_info->mem_ranges_sz + PAGE_SIZE;
arch/powerpc/kernel/iommu.c
513
if (tbl->it_page_shift < PAGE_SHIFT && slen >= PAGE_SIZE &&
arch/powerpc/kernel/iommu.c
872
if (tbl->it_page_shift < PAGE_SHIFT && size >= PAGE_SIZE &&
arch/powerpc/kernel/mce_power.c
57
unsigned long rpnmask = (1ul << shift) - PAGE_SIZE;
arch/powerpc/kernel/paca.c
79
PAGE_SIZE, MEMBLOCK_LOW_LIMIT,
arch/powerpc/kernel/pci_64.c
157
phys_page = ALIGN_DOWN(hose->io_base_phys, PAGE_SIZE);
arch/powerpc/kernel/pci_64.c
158
size_page = ALIGN(hose->pci_io_size, PAGE_SIZE);
arch/powerpc/kernel/proc_powerpc.c
24
return fixed_size_llseek(file, off, whence, PAGE_SIZE);
arch/powerpc/kernel/proc_powerpc.c
31
pde_data(file_inode(file)), PAGE_SIZE);
arch/powerpc/kernel/proc_powerpc.c
36
if ((vma->vm_end - vma->vm_start) > PAGE_SIZE)
arch/powerpc/kernel/proc_powerpc.c
41
PAGE_SIZE, vma->vm_page_prot);
arch/powerpc/kernel/proc_powerpc.c
52
u8 page[PAGE_SIZE];
arch/powerpc/kernel/proc_powerpc.c
81
proc_set_size(pde, PAGE_SIZE);
arch/powerpc/kernel/process.c
2409
sp -= get_random_u32_below(PAGE_SIZE);
arch/powerpc/kernel/prom.c
103
return (start + size) > ALIGN_DOWN(initrd_start, PAGE_SIZE) &&
arch/powerpc/kernel/prom.c
104
start <= ALIGN(initrd_end, PAGE_SIZE);
arch/powerpc/kernel/prom.c
130
p = memblock_alloc_raw(size, PAGE_SIZE);
arch/powerpc/kernel/prom.c
690
memblock_reserve(ALIGN_DOWN(__pa(initrd_start), PAGE_SIZE),
arch/powerpc/kernel/prom.c
691
ALIGN(initrd_end, PAGE_SIZE) -
arch/powerpc/kernel/prom.c
692
ALIGN_DOWN(initrd_start, PAGE_SIZE));
arch/powerpc/kernel/prom_init.c
1638
base = ALIGN_DOWN(base, PAGE_SIZE);
arch/powerpc/kernel/prom_init.c
1639
top = ALIGN(top, PAGE_SIZE);
arch/powerpc/kernel/prom_init.c
1858
base = alloc_down(size, PAGE_SIZE, 0);
arch/powerpc/kernel/prom_init.c
1949
base = alloc_down(size, PAGE_SIZE, 0);
arch/powerpc/kernel/prom_init.c
2498
if (room < PAGE_SIZE)
arch/powerpc/kernel/prom_init.c
2719
mem_start = (unsigned long)alloc_up(room, PAGE_SIZE);
arch/powerpc/kernel/rtas.c
2069
rtas_rmo_buf = memblock_phys_alloc_range(RTAS_USER_REGION_SIZE, PAGE_SIZE,
arch/powerpc/kernel/rtas.c
2073
PAGE_SIZE, &rtas_region);
arch/powerpc/kernel/security.c
154
seq_buf_init(&s, buf, PAGE_SIZE - 1);
arch/powerpc/kernel/security.c
185
seq_buf_init(&s, buf, PAGE_SIZE - 1);
arch/powerpc/kernel/security.c
208
seq_buf_init(&s, buf, PAGE_SIZE - 1);
arch/powerpc/kernel/secvar-sysfs.c
274
if (max_size > PAGE_SIZE)
arch/powerpc/kernel/secvar-sysfs.c
276
PAGE_SIZE, max_size);
arch/powerpc/kernel/setup-common.c
960
high_memory = (void *)__va(max_low_pfn * PAGE_SIZE);
arch/powerpc/kernel/setup_64.c
556
info->blocks_per_page = PAGE_SIZE / bsize;
arch/powerpc/kernel/setup_64.c
846
atom_size = PAGE_SIZE;
arch/powerpc/kernel/setup_64.c
854
atom_size = PAGE_SIZE;
arch/powerpc/kernel/traps.c
268
PAGE_SIZE / 1024, get_mmu_str(),
arch/powerpc/kernel/vdso.c
102
unsigned long vvar_size = VDSO_NR_PAGES * PAGE_SIZE;
arch/powerpc/kernel/vdso.c
253
pagelist[i] = virt_to_page(start + i * PAGE_SIZE);
arch/powerpc/kernel/vmlinux.lds.S
30
#if STRICT_ALIGN_SIZE < PAGE_SIZE
arch/powerpc/kexec/core.c
101
pr_warn("Crash kernel base must be aligned to 0x%lx\n", PAGE_SIZE);
arch/powerpc/kexec/core_64.c
108
dest += PAGE_SIZE;
arch/powerpc/kexec/crash.c
452
size = mn->nr_pages * PAGE_SIZE;
arch/powerpc/kexec/elf_64.c
129
kbuf.buf_align = PAGE_SIZE;
arch/powerpc/kexec/elf_64.c
96
kbuf.buf_align = PAGE_SIZE;
arch/powerpc/kvm/book3s_64_mmu_hv.c
1072
n = (n + PAGE_SIZE - 1) >> PAGE_SHIFT;
arch/powerpc/kvm/book3s_64_mmu_hv.c
1148
offset = gpa & (PAGE_SIZE - 1);
arch/powerpc/kvm/book3s_64_mmu_hv.c
1150
*nb_ret = PAGE_SIZE - offset;
arch/powerpc/kvm/book3s_64_mmu_hv.c
631
pte_size = PAGE_SIZE;
arch/powerpc/kvm/book3s_64_mmu_hv.c
657
if (psize < PAGE_SIZE)
arch/powerpc/kvm/book3s_64_mmu_hv.c
658
psize = PAGE_SIZE;
arch/powerpc/kvm/book3s_64_mmu_radix.c
1178
gpa += PAGE_SIZE;
arch/powerpc/kvm/book3s_64_mmu_radix.c
1398
gpa += PAGE_SIZE;
arch/powerpc/kvm/book3s_64_mmu_radix.c
318
unsigned long psize = PAGE_SIZE;
arch/powerpc/kvm/book3s_64_mmu_radix.c
431
unsigned long page_size = PAGE_SIZE;
arch/powerpc/kvm/book3s_64_mmu_radix.c
873
(gpa & (PUD_SIZE - PAGE_SIZE)) ==
arch/powerpc/kvm/book3s_64_mmu_radix.c
874
(hva & (PUD_SIZE - PAGE_SIZE))) {
arch/powerpc/kvm/book3s_64_mmu_radix.c
877
(gpa & (PMD_SIZE - PAGE_SIZE)) ==
arch/powerpc/kvm/book3s_64_mmu_radix.c
878
(hva & (PMD_SIZE - PAGE_SIZE))) {
arch/powerpc/kvm/book3s_64_mmu_radix.c
888
unsigned long rpnmask = (1ul << shift) - PAGE_SIZE;
arch/powerpc/kvm/book3s_64_vio.c
50
return ALIGN(iommu_pages * sizeof(u64), PAGE_SIZE) / PAGE_SIZE;
arch/powerpc/kvm/book3s_64_vio.c
58
return tce_pages + ALIGN(stt_bytes, PAGE_SIZE) / PAGE_SIZE;
arch/powerpc/kvm/book3s_hv.c
1000
to_addr |= (to & (PAGE_SIZE - 1));
arch/powerpc/kvm/book3s_hv.c
988
from_addr |= (from & (PAGE_SIZE - 1));
arch/powerpc/kvm/book3s_hv_rm_mmu.c
103
npages = (psize + PAGE_SIZE - 1) / PAGE_SIZE;
arch/powerpc/kvm/book3s_hv_rm_mmu.c
257
host_pte_size = PAGE_SIZE;
arch/powerpc/kvm/book3s_hv_rm_mmu.c
947
kvmppc_update_dirty_map(memslot, dest >> PAGE_SHIFT, PAGE_SIZE);
arch/powerpc/kvm/book3s_hv_rm_mmu.c
978
kvmppc_update_dirty_map(dest_memslot, dest >> PAGE_SHIFT, PAGE_SIZE);
arch/powerpc/kvm/book3s_hv_uvmem.c
1003
vmf->address + PAGE_SIZE, PAGE_SHIFT,
arch/powerpc/kvm/book3s_hv_uvmem.c
451
memslot->npages * PAGE_SIZE,
arch/powerpc/kvm/book3s_hv_uvmem.c
625
for (i = slot->npages; i; --i, ++gfn, addr += PAGE_SIZE) {
arch/powerpc/kvm/book3s_hv_uvmem.c
644
if (__kvmppc_svm_page_out(vma, addr, addr + PAGE_SIZE,
arch/powerpc/kvm/book3s_pr.c
63
#define HW_PAGE_SIZE PAGE_SIZE
arch/powerpc/kvm/e500_mmu.c
549
PAGE_SIZE)));
arch/powerpc/kvm/e500_mmu.c
773
num_pages = DIV_ROUND_UP(cfg->array + array_len - 1, PAGE_SIZE) -
arch/powerpc/kvm/e500_mmu.c
774
cfg->array / PAGE_SIZE;
arch/powerpc/kvm/e500_mmu.c
822
(virt + (cfg->array & (PAGE_SIZE - 1)));
arch/powerpc/lib/code-patching.c
111
area = get_vm_area(PAGE_SIZE, 0);
arch/powerpc/lib/code-patching.c
147
free_pgd_range(&tlb, patching_addr, patching_addr + PAGE_SIZE, 0, 0);
arch/powerpc/lib/code-patching.c
167
addr = (1 + (get_random_long() % (DEFAULT_MAP_WINDOW / PAGE_SIZE - 2))) << PAGE_SHIFT;
arch/powerpc/lib/code-patching.c
278
flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
arch/powerpc/lib/code-patching.c
347
flush_tlb_kernel_range(text_poke_addr, text_poke_addr + PAGE_SIZE);
arch/powerpc/lib/code-patching.c
539
flush_tlb_kernel_range(text_poke_addr, text_poke_addr + PAGE_SIZE);
arch/powerpc/lib/code-patching.c
557
plen = min_t(size_t, PAGE_SIZE - offset_in_page(addr), len);
arch/powerpc/lib/test-code-patching.c
359
buf = vzalloc(PAGE_SIZE * 8);
arch/powerpc/lib/test-code-patching.c
365
addr32 = buf + PAGE_SIZE;
arch/powerpc/lib/test-code-patching.c
378
addr64 = buf + PAGE_SIZE * 2;
arch/powerpc/lib/test-code-patching.c
390
addr32 = buf + PAGE_SIZE * 3;
arch/powerpc/lib/test-code-patching.c
402
addr32 = buf + PAGE_SIZE * 4 - 8;
arch/powerpc/lib/test-code-patching.c
415
addr64 = buf + PAGE_SIZE * 5 - 8;
arch/powerpc/lib/test-code-patching.c
427
addr32 = buf + PAGE_SIZE * 6 - 12;
arch/powerpc/lib/test-code-patching.c
446
buf = vzalloc(PAGE_SIZE);
arch/powerpc/mm/book3s32/mmu.c
364
n_hpteg = total_memory / (PAGE_SIZE * 8);
arch/powerpc/mm/book3s64/hash_64k.c
89
subpg_index = (ea & (PAGE_SIZE - 1)) >> shift;
arch/powerpc/mm/book3s64/hash_pgtable.c
177
if (htab_bolt_mapping(ea, ea + PAGE_SIZE, pa, pgprot_val(prot),
arch/powerpc/mm/book3s64/hash_tlb.c
197
start = ALIGN_DOWN(start, PAGE_SIZE);
arch/powerpc/mm/book3s64/hash_tlb.c
198
end = ALIGN(end, PAGE_SIZE);
arch/powerpc/mm/book3s64/hash_tlb.c
211
for (; start < end; start += PAGE_SIZE) {
arch/powerpc/mm/book3s64/hash_tlb.c
250
addr += PAGE_SIZE;
arch/powerpc/mm/book3s64/hash_utils.c
431
kfence_pool = memblock_phys_alloc_range(KFENCE_POOL_SIZE, PAGE_SIZE,
arch/powerpc/mm/book3s64/radix_pgtable.c
1031
if (altmap && altmap_cross_boundary(altmap, addr, PAGE_SIZE))
arch/powerpc/mm/book3s64/radix_pgtable.c
1034
p = vmemmap_alloc_block_buf(PAGE_SIZE, node, altmap);
arch/powerpc/mm/book3s64/radix_pgtable.c
1036
p = vmemmap_alloc_block_buf(PAGE_SIZE, node, NULL);
arch/powerpc/mm/book3s64/radix_pgtable.c
1071
pud = early_alloc_pgtable(PAGE_SIZE, node, 0, 0);
arch/powerpc/mm/book3s64/radix_pgtable.c
108
ptep = early_alloc_pgtable(PAGE_SIZE, nid,
arch/powerpc/mm/book3s64/radix_pgtable.c
1088
pmd = early_alloc_pgtable(PAGE_SIZE, node, 0, 0);
arch/powerpc/mm/book3s64/radix_pgtable.c
1104
pte = early_alloc_pgtable(PAGE_SIZE, node, 0, 0);
arch/powerpc/mm/book3s64/radix_pgtable.c
1141
start = ALIGN_DOWN(start, PAGE_SIZE);
arch/powerpc/mm/book3s64/radix_pgtable.c
1214
vmemmap_verify(pte, node, addr, addr + PAGE_SIZE);
arch/powerpc/mm/book3s64/radix_pgtable.c
1215
next = addr + PAGE_SIZE;
arch/powerpc/mm/book3s64/radix_pgtable.c
1248
vmemmap_verify(pte, node, addr, addr + PAGE_SIZE);
arch/powerpc/mm/book3s64/radix_pgtable.c
1264
map_addr = addr - pfn_offset * sizeof(struct page) + PAGE_SIZE;
arch/powerpc/mm/book3s64/radix_pgtable.c
1291
pte = radix__vmemmap_populate_address(map_addr - PAGE_SIZE, node, NULL, NULL);
arch/powerpc/mm/book3s64/radix_pgtable.c
1300
vmemmap_verify(pte, node, map_addr, map_addr + PAGE_SIZE);
arch/powerpc/mm/book3s64/radix_pgtable.c
1352
next = addr + PAGE_SIZE;
arch/powerpc/mm/book3s64/radix_pgtable.c
1368
vmemmap_verify(pte, node, addr, addr + PAGE_SIZE);
arch/powerpc/mm/book3s64/radix_pgtable.c
1375
pte = radix__vmemmap_populate_address(addr + PAGE_SIZE, node, NULL, NULL);
arch/powerpc/mm/book3s64/radix_pgtable.c
1380
next = addr + 2 * PAGE_SIZE;
arch/powerpc/mm/book3s64/radix_pgtable.c
1393
vmemmap_verify(pte, node, addr, addr + PAGE_SIZE);
arch/powerpc/mm/book3s64/radix_pgtable.c
1396
next = addr + PAGE_SIZE;
arch/powerpc/mm/book3s64/radix_pgtable.c
1403
vmemmap_verify(pte, node, addr, addr + PAGE_SIZE);
arch/powerpc/mm/book3s64/radix_pgtable.c
1406
next = addr + PAGE_SIZE;
arch/powerpc/mm/book3s64/radix_pgtable.c
198
start = ALIGN_DOWN(start, PAGE_SIZE);
arch/powerpc/mm/book3s64/radix_pgtable.c
204
for (idx = start; idx < end; idx += PAGE_SIZE) {
arch/powerpc/mm/book3s64/radix_pgtable.c
240
for (start = PAGE_OFFSET; start < (unsigned long)_stext; start += PAGE_SIZE) {
arch/powerpc/mm/book3s64/radix_pgtable.c
241
end = start + PAGE_SIZE;
arch/powerpc/mm/book3s64/radix_pgtable.c
312
max_mapping_size = PAGE_SIZE;
arch/powerpc/mm/book3s64/radix_pgtable.c
314
start = ALIGN(start, PAGE_SIZE);
arch/powerpc/mm/book3s64/radix_pgtable.c
315
end = ALIGN_DOWN(end, PAGE_SIZE);
arch/powerpc/mm/book3s64/radix_pgtable.c
335
mapping_size = PAGE_SIZE;
arch/powerpc/mm/book3s64/radix_pgtable.c
385
kfence_pool = memblock_phys_alloc(KFENCE_POOL_SIZE, PAGE_SIZE);
arch/powerpc/mm/book3s64/radix_pgtable.c
403
-1, PAGE_KERNEL, PAGE_SIZE))
arch/powerpc/mm/book3s64/radix_pgtable.c
748
unsigned long start = ALIGN_DOWN(addr, PAGE_SIZE);
arch/powerpc/mm/book3s64/radix_pgtable.c
750
return !vmemmap_populated(start, PAGE_SIZE);
arch/powerpc/mm/book3s64/radix_pgtable.c
796
next = (addr + PAGE_SIZE) & PAGE_MASK;
arch/powerpc/mm/book3s64/radix_pgtable.c
88
pudp = early_alloc_pgtable(PAGE_SIZE, nid,
arch/powerpc/mm/book3s64/radix_pgtable.c
98
pmdp = early_alloc_pgtable(PAGE_SIZE, nid, region_start,
arch/powerpc/mm/book3s64/radix_tlb.c
1326
if (PAGE_SIZE == 0x1000) {
arch/powerpc/mm/book3s64/radix_tlb.c
1338
_tlbiel_va_range(addr, end, pid, PAGE_SIZE, mmu_virtual_psize, true);
arch/powerpc/mm/book3s64/radix_tlb.c
1353
_tlbie_va_range(addr, end, pid, PAGE_SIZE, mmu_virtual_psize, true);
arch/powerpc/mm/book3s64/radix_tlb.c
1356
addr, end, pid, PAGE_SIZE, mmu_virtual_psize, true);
arch/powerpc/mm/book3s64/slice.c
334
unsigned long min_addr = max(PAGE_SIZE, mmap_min_addr);
arch/powerpc/mm/book3s64/subpage_prot.c
44
++j, addr += PAGE_SIZE)
arch/powerpc/mm/book3s64/subpage_prot.c
79
addr += PAGE_SIZE;
arch/powerpc/mm/cacheflush.c
135
clean_dcache_range(addr, addr + PAGE_SIZE);
arch/powerpc/mm/cacheflush.c
148
invalidate_icache_range(addr, addr + PAGE_SIZE);
arch/powerpc/mm/cacheflush.c
161
__flush_dcache_icache(addr + i * PAGE_SIZE);
arch/powerpc/mm/cacheflush.c
164
void *start = kmap_local_folio(folio, i * PAGE_SIZE);
arch/powerpc/mm/cacheflush.c
172
flush_dcache_icache_phys((pfn + i) * PAGE_SIZE);
arch/powerpc/mm/cacheflush.c
87
unsigned long nb = PAGE_SIZE / bytes;
arch/powerpc/mm/dma-noncoherent.c
62
size_t seg_size = min((size_t)(PAGE_SIZE - offset), size);
arch/powerpc/mm/dma-noncoherent.c
65
int nr_segs = 1 + ((size - seg_size) + PAGE_SIZE - 1)/PAGE_SIZE;
arch/powerpc/mm/dma-noncoherent.c
79
seg_size = min((size_t)PAGE_SIZE, size - cur_size);
arch/powerpc/mm/fault.c
620
if (regs->dar < PAGE_SIZE)
arch/powerpc/mm/fault.c
637
regs->nip < PAGE_SIZE ? " (NULL pointer?)\n" : "\n");
arch/powerpc/mm/hugetlbpage.c
144
if (size <= PAGE_SIZE || !is_power_of_2(size))
arch/powerpc/mm/init_64.c
155
next = vmemmap_alloc_block(PAGE_SIZE, node);
arch/powerpc/mm/init_64.c
160
num_left = PAGE_SIZE / sizeof(struct vmemmap_backing);
arch/powerpc/mm/init_64.c
361
if (page_size < PAGE_SIZE) {
arch/powerpc/mm/ioremap.c
55
for (i = 0; i < size; i += PAGE_SIZE) {
arch/powerpc/mm/ioremap_32.c
73
err = early_ioremap_range(ioremap_bot - size - PAGE_SIZE, p, size, prot);
arch/powerpc/mm/ioremap_32.c
76
ioremap_bot -= size + PAGE_SIZE;
arch/powerpc/mm/ioremap_64.c
41
ioremap_bot += size + PAGE_SIZE;
arch/powerpc/mm/kasan/8xx.c
30
pte_t pte = pte_mkhuge(pfn_pte(PHYS_PFN(__pa(block + i * PAGE_SIZE)), PAGE_KERNEL));
arch/powerpc/mm/kasan/8xx.c
65
for (; k_cur < k_end; k_cur += PAGE_SIZE) {
arch/powerpc/mm/kasan/book3s_32.c
38
phys = memblock_phys_alloc_range(k_end - k_nobat, PAGE_SIZE, 0,
arch/powerpc/mm/kasan/book3s_32.c
50
for (k_cur = k_nobat; k_cur < k_end; k_cur += PAGE_SIZE) {
arch/powerpc/mm/kasan/init_32.c
68
block = memblock_alloc(k_end - k_start, PAGE_SIZE);
arch/powerpc/mm/kasan/init_32.c
72
for (k_cur = k_start & PAGE_MASK; k_cur < k_end; k_cur += PAGE_SIZE) {
arch/powerpc/mm/kasan/init_32.c
88
for (k_cur = k_start; k_cur != k_end; k_cur += PAGE_SIZE) {
arch/powerpc/mm/kasan/init_book3e_64.c
126
memset(kasan_early_shadow_page, 0, PAGE_SIZE);
arch/powerpc/mm/kasan/init_book3e_64.c
74
k_start = ALIGN_DOWN((unsigned long)kasan_mem_to_shadow(start), PAGE_SIZE);
arch/powerpc/mm/kasan/init_book3e_64.c
75
k_end = ALIGN((unsigned long)kasan_mem_to_shadow(end), PAGE_SIZE);
arch/powerpc/mm/kasan/init_book3e_64.c
77
va = memblock_alloc_or_panic(k_end - k_start, PAGE_SIZE);
arch/powerpc/mm/kasan/init_book3e_64.c
78
for (k_cur = k_start; k_cur < k_end; k_cur += PAGE_SIZE, va += PAGE_SIZE)
arch/powerpc/mm/kasan/init_book3s_64.c
30
k_start = ALIGN_DOWN((unsigned long)kasan_mem_to_shadow(start), PAGE_SIZE);
arch/powerpc/mm/kasan/init_book3s_64.c
31
k_end = ALIGN((unsigned long)kasan_mem_to_shadow(end), PAGE_SIZE);
arch/powerpc/mm/kasan/init_book3s_64.c
33
va = memblock_alloc_or_panic(k_end - k_start, PAGE_SIZE);
arch/powerpc/mm/kasan/init_book3s_64.c
34
for (k_cur = k_start; k_cur < k_end; k_cur += PAGE_SIZE, va += PAGE_SIZE)
arch/powerpc/mm/kasan/init_book3s_64.c
91
memset(kasan_early_shadow_page, 0, PAGE_SIZE);
arch/powerpc/mm/mem.c
128
high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) + 1;
arch/powerpc/mm/mem.c
247
for (; v < end; v += PAGE_SIZE)
arch/powerpc/mm/mem.c
41
unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss;
arch/powerpc/mm/nohash/book3e_pgtable.c
41
for (i = 0; i < page_size; i += PAGE_SIZE)
arch/powerpc/mm/nohash/tlb.c
320
if (end - start == PAGE_SIZE && !(start & ~PAGE_MASK))
arch/powerpc/mm/pageattr.c
70
flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
arch/powerpc/mm/pageattr.c
77
unsigned long start = ALIGN_DOWN(addr, PAGE_SIZE);
arch/powerpc/mm/pageattr.c
78
unsigned long size = numpages * PAGE_SIZE;
arch/powerpc/mm/pgtable.c
226
addr += PAGE_SIZE;
arch/powerpc/mm/pgtable.c
245
flush_tlb_kernel_range(va, va + PAGE_SIZE);
arch/powerpc/mm/pgtable.c
35
#define PGD_ALIGN PAGE_SIZE
arch/powerpc/mm/pgtable.c
368
pdsize = PAGE_SIZE;
arch/powerpc/mm/pgtable.c
380
pte = __pte(pte_val(pte) + ((unsigned long long)pdsize / PAGE_SIZE << PFN_PTE_SHIFT));
arch/powerpc/mm/pgtable_32.c
105
for (; s < top; s += PAGE_SIZE) {
arch/powerpc/mm/pgtable_32.c
108
v += PAGE_SIZE;
arch/powerpc/mm/pgtable_32.c
109
p += PAGE_SIZE;
arch/powerpc/mm/ptdump/hashpagetable.c
387
addr = start + i * PAGE_SIZE;
arch/powerpc/mm/ptdump/ptdump.c
198
st->wx_pages += (addr - st->start_address) / PAGE_SIZE;
arch/powerpc/perf/hv-24x7.c
847
BUILD_BUG_ON(PAGE_SIZE % 4096);
arch/powerpc/perf/hv-gpci.c
184
if (*n >= PAGE_SIZE) {
arch/powerpc/perf/hv-gpci.c
554
if (n >= PAGE_SIZE) {
arch/powerpc/platforms/book3s/vas-api.c
516
if ((vma->vm_end - vma->vm_start) > PAGE_SIZE) {
arch/powerpc/platforms/book3s/vas-api.c
518
(vma->vm_end - vma->vm_start), PAGE_SIZE);
arch/powerpc/platforms/cell/spufs/file.c
31
#define SPUFS_MMAP_4K (PAGE_SIZE == 0x1000)
arch/powerpc/platforms/cell/spufs/inode.c
701
sb->s_blocksize = PAGE_SIZE;
arch/powerpc/platforms/cell/spufs/lscsa_alloc.c
32
for (p = lscsa->ls; p < lscsa->ls + LS_SIZE; p += PAGE_SIZE)
arch/powerpc/platforms/cell/spufs/lscsa_alloc.c
46
for (p = csa->lscsa->ls; p < csa->lscsa->ls + LS_SIZE; p += PAGE_SIZE)
arch/powerpc/platforms/cell/spufs/spufs.h
26
#define SPUFS_SIGNAL_MAP_SIZE PAGE_SIZE
arch/powerpc/platforms/powermac/bootx_init.c
408
mem_end = ALIGN(mem_end, PAGE_SIZE);
arch/powerpc/platforms/powermac/bootx_init.c
572
ptr < (unsigned long)bi + space; ptr += PAGE_SIZE)
arch/powerpc/platforms/powermac/udbg_scc.c
116
sccc = ioremap(addr & PAGE_MASK, PAGE_SIZE) ;
arch/powerpc/platforms/powernv/opal-core.c
331
count = oc_conf->opalcorebuf_sz / PAGE_SIZE;
arch/powerpc/platforms/powernv/opal-fadump.c
677
fadump_conf->max_copy_size = ALIGN_DOWN(U32_MAX, PAGE_SIZE);
arch/powerpc/platforms/powernv/opal-flash.c
375
addr += PAGE_SIZE;
arch/powerpc/platforms/powernv/opal-flash.c
376
size -= PAGE_SIZE;
arch/powerpc/platforms/powernv/opal-flash.c
420
addr += PAGE_SIZE;
arch/powerpc/platforms/powernv/opal-flash.c
421
size -= PAGE_SIZE;
arch/powerpc/platforms/powernv/opal-memory-errors.c
49
for (; paddr_start < paddr_end; paddr_start += PAGE_SIZE) {
arch/powerpc/platforms/powernv/opal.c
1137
sg = kzalloc(PAGE_SIZE, GFP_KERNEL);
arch/powerpc/platforms/powernv/opal.c
1145
uint64_t length = min(vmalloc_size, PAGE_SIZE);
arch/powerpc/platforms/powernv/opal.c
1154
next = kzalloc(PAGE_SIZE, GFP_KERNEL);
arch/powerpc/platforms/powernv/pci-ioda.c
1080
if (table_size < PAGE_SIZE)
arch/powerpc/platforms/powernv/pci-ioda.c
1081
table_size = PAGE_SIZE;
arch/powerpc/platforms/powernv/pci-ioda.c
2252
return PAGE_SIZE;
arch/powerpc/platforms/powernv/vas-window.c
41
*len = PAGE_SIZE;
arch/powerpc/platforms/powernv/vas.h
99
#define VAS_UWC_SIZE PAGE_SIZE
arch/powerpc/platforms/pseries/cmm.c
100
for (i = 0; !rc && i < PAGE_SIZE; i += cmo_page_sz)
arch/powerpc/platforms/pseries/cmm.c
120
for (i = 0; !rc && i < PAGE_SIZE; i += cmo_page_sz)
arch/powerpc/platforms/pseries/cmm.c
239
signed long min_mem_pages = (min_mem_mb * 1024 * 1024) / PAGE_SIZE;
arch/powerpc/platforms/pseries/cmm.c
246
PAGE_SIZE);
arch/powerpc/platforms/pseries/htmdump.c
104
PAGE_SIZE, page);
arch/powerpc/platforms/pseries/htmdump.c
112
available = PAGE_SIZE;
arch/powerpc/platforms/pseries/htmdump.c
242
PAGE_SIZE, 0);
arch/powerpc/platforms/pseries/htmdump.c
288
PAGE_SIZE, 0);
arch/powerpc/platforms/pseries/htmdump.c
412
htm_buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
arch/powerpc/platforms/pseries/htmdump.c
440
htm_status_buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
arch/powerpc/platforms/pseries/htmdump.c
447
htm_info_buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
arch/powerpc/platforms/pseries/htmdump.c
454
htm_caps_buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
arch/powerpc/platforms/pseries/htmdump.c
94
page = ALIGN_DOWN(*ppos, PAGE_SIZE);
arch/powerpc/platforms/pseries/htmdump.c
95
offset = (*ppos) % PAGE_SIZE;
arch/powerpc/platforms/pseries/hvcserver.c
136
memset(pi_buff, 0x00, PAGE_SIZE);
arch/powerpc/platforms/pseries/ibmebus.c
419
return of_device_modalias(dev, buf, PAGE_SIZE);
arch/powerpc/platforms/pseries/iommu.c
174
unsigned long cb = ALIGN(sizeof(tbl->it_userspace[0]) * tbl->it_size, PAGE_SIZE);
arch/powerpc/platforms/pseries/lpar.c
1774
for (i = 0; i < (1 << order); i++, addr += PAGE_SIZE) {
arch/powerpc/platforms/pseries/lpar.c
1775
for (j = 0; j < PAGE_SIZE; j += cmo_page_sz)
arch/powerpc/platforms/pseries/lparcfg.c
526
maxmem += hugetlb_total_pages() * PAGE_SIZE;
arch/powerpc/platforms/pseries/papr_scm.c
1099
seq_buf_init(&s, buf, PAGE_SIZE);
arch/powerpc/platforms/pseries/papr_scm.c
1130
seq_buf_init(&s, buf, PAGE_SIZE);
arch/powerpc/platforms/pseries/rtas-fadump.c
608
fadump_conf->max_copy_size = ALIGN_DOWN(U64_MAX, PAGE_SIZE);
arch/powerpc/platforms/pseries/rtas-work-area.c
24
RTAS_WORK_AREA_ARENA_ALIGN = PAGE_SIZE,
arch/powerpc/platforms/pseries/svm.c
68
#define NR_DTL_PAGE (DISPATCH_LOG_BYTES * CONFIG_NR_CPUS / PAGE_SIZE)
arch/powerpc/platforms/pseries/vio.c
489
if (vio_cmo_alloc(viodev, roundup(size, PAGE_SIZE))) {
arch/powerpc/platforms/pseries/vio.c
498
vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE));
arch/powerpc/platforms/pseries/vio.c
512
vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE));
arch/powerpc/sysdev/fsl_pci.c
340
if ((address >= mem) && (address < (mem + PAGE_SIZE))) {
arch/powerpc/sysdev/fsl_pci.c
342
mem += PAGE_SIZE;
arch/powerpc/sysdev/indirect_pci.c
165
mbase = ioremap(base, PAGE_SIZE);
arch/powerpc/sysdev/indirect_pci.c
168
mbase = ioremap(cfg_data & PAGE_MASK, PAGE_SIZE);
arch/powerpc/sysdev/xive/spapr.c
670
char *buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
arch/powerpc/sysdev/xive/spapr.c
676
memset(buf, 0, PAGE_SIZE);
arch/riscv/include/asm/elf.h
45
#define ELF_EXEC_PAGESIZE (PAGE_SIZE)
arch/riscv/include/asm/fixmap.h
30
FIX_FDT = FIX_FDT_END + FIX_FDT_SIZE / PAGE_SIZE - 1,
arch/riscv/include/asm/kexec.h
22
#define KEXEC_CONTROL_PAGE_SIZE PAGE_SIZE
arch/riscv/include/asm/kfence.h
26
local_flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
arch/riscv/include/asm/page.h
49
#define clear_page(pgaddr) memset((pgaddr), 0, PAGE_SIZE)
arch/riscv/include/asm/page.h
51
#define copy_page(to, from) memcpy((to), (from), PAGE_SIZE)
arch/riscv/include/asm/pgtable-64.h
51
#define PTRS_PER_P4D (PAGE_SIZE / sizeof(p4d_t))
arch/riscv/include/asm/pgtable-64.h
60
#define PTRS_PER_PUD (PAGE_SIZE / sizeof(pud_t))
arch/riscv/include/asm/pgtable-64.h
70
#define PTRS_PER_PMD (PAGE_SIZE / sizeof(pmd_t))
arch/riscv/include/asm/pgtable.h
1250
#define TASK_SIZE_32 (_AC(0x80000000, UL) - PAGE_SIZE)
arch/riscv/include/asm/pgtable.h
1291
extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
arch/riscv/include/asm/pgtable.h
33
#define PTRS_PER_PGD (PAGE_SIZE / sizeof(pgd_t))
arch/riscv/include/asm/pgtable.h
35
#define PTRS_PER_PTE (PAGE_SIZE / sizeof(pte_t))
arch/riscv/include/asm/pgtable.h
539
PAGE_SIZE)
arch/riscv/include/asm/pgtable.h
592
local_flush_tlb_page(address + nr * PAGE_SIZE);
arch/riscv/include/asm/thread_info.h
22
#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
arch/riscv/kernel/crash_dump.c
20
vaddr = memremap(__pfn_to_phys(pfn), PAGE_SIZE, MEMREMAP_WB);
arch/riscv/kernel/efi.c
54
create_pgd_mapping(mm->pgd, md->virt_addr + i * PAGE_SIZE,
arch/riscv/kernel/efi.c
55
md->phys_addr + i * PAGE_SIZE,
arch/riscv/kernel/efi.c
56
PAGE_SIZE, prot);
arch/riscv/kernel/hibernate.c
190
} while (dst_ptep++, src_ptep++, start += PAGE_SIZE, start < end);
arch/riscv/kernel/hibernate.c
392
end = start + PAGE_SIZE;
arch/riscv/kernel/kexec_elf.c
97
kbuf.memsz = ALIGN(kernel_len, PAGE_SIZE);
arch/riscv/kernel/machine_kexec_file.c
335
kbuf.buf_align = PAGE_SIZE;
arch/riscv/kernel/machine_kexec_file.c
357
kbuf.buf_align = PAGE_SIZE;
arch/riscv/kernel/patch.c
114
bool across_pages = (offset_in_page(addr) + len) > PAGE_SIZE;
arch/riscv/kernel/patch.c
121
if (len + offset_in_page(addr) > 2 * PAGE_SIZE)
arch/riscv/kernel/patch.c
141
patch_map(addr + PAGE_SIZE, FIX_TEXT_POKE1);
arch/riscv/kernel/patch.c
191
size = min(len, PAGE_SIZE * 2 - offset_in_page(addr));
arch/riscv/kernel/patch.c
226
size = min(len, PAGE_SIZE * 2 - offset_in_page(addr));
arch/riscv/kernel/patch.c
69
bool across_pages = (offset_in_page(addr) + len) > PAGE_SIZE;
arch/riscv/kernel/patch.c
75
if (len + offset_in_page(addr) > 2 * PAGE_SIZE)
arch/riscv/kernel/patch.c
87
patch_map(addr + PAGE_SIZE, FIX_TEXT_POKE1);
arch/riscv/kernel/process.c
112
sp -= get_random_u32_below(PAGE_SIZE);
arch/riscv/kernel/sbi.c
604
if (PAGE_SIZE < (offset_in_page(bytes) + num_bytes))
arch/riscv/kernel/sbi.c
605
num_bytes = PAGE_SIZE - offset_in_page(bytes);
arch/riscv/kernel/sbi.c
633
if (PAGE_SIZE < (offset_in_page(bytes) + num_bytes))
arch/riscv/kernel/sbi.c
634
num_bytes = PAGE_SIZE - offset_in_page(bytes);
arch/riscv/kernel/usercfi.c
285
if (addr && (addr & (PAGE_SIZE - 1)))
arch/riscv/kvm/mmu.c
276
WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PUD_SIZE);
arch/riscv/kvm/mmu.c
299
WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PUD_SIZE);
arch/riscv/kvm/mmu.c
319
size = memslot->npages * PAGE_SIZE;
arch/riscv/kvm/mmu.c
369
int size = PAGE_SIZE;
arch/riscv/kvm/mmu.c
445
return PAGE_SIZE;
arch/riscv/kvm/mmu.c
496
vma_pagesize = PAGE_SIZE;
arch/riscv/kvm/mmu.c
514
vma_pagesize != PAGE_SIZE) {
arch/riscv/kvm/mmu.c
542
if (!logging && (vma_pagesize == PAGE_SIZE))
arch/riscv/kvm/mmu.c
57
end = (gpa + size + PAGE_SIZE - 1) & PAGE_MASK;
arch/riscv/kvm/mmu.c
61
for (addr = gpa; addr < end; addr += PAGE_SIZE) {
arch/riscv/mm/fault.c
105
if (addr < PAGE_SIZE)
arch/riscv/mm/fault.c
41
current->comm, PAGE_SIZE / SZ_1K, VA_BITS,
arch/riscv/mm/hugetlbpage.c
160
addr += PAGE_SIZE;
arch/riscv/mm/hugetlbpage.c
182
flush_tlb_range(&vma, addr, addr + (PAGE_SIZE * pte_num));
arch/riscv/mm/hugetlbpage.c
299
for (i = 0; i < pte_num; i++, addr += PAGE_SIZE, ptep++)
arch/riscv/mm/hugetlbpage.c
342
for (i = 0; i < pte_num; i++, addr += PAGE_SIZE, ptep++)
arch/riscv/mm/init.c
1301
kfence_pool = memblock_phys_alloc(KFENCE_POOL_SIZE, PAGE_SIZE);
arch/riscv/mm/init.c
1328
create_linear_mapping_range(kfence_pool, kfence_pool + KFENCE_POOL_SIZE, PAGE_SIZE, NULL);
arch/riscv/mm/init.c
1556
.alignment = PAGE_SIZE,
arch/riscv/mm/init.c
1657
next = (addr + PAGE_SIZE) & PAGE_MASK;
arch/riscv/mm/init.c
1668
free_vmemmap_storage(pte_page(pte), PAGE_SIZE, altmap);
arch/riscv/mm/init.c
285
max_mapped_addr = __va_to_pa_nodebug(-PAGE_SIZE);
arch/riscv/mm/init.c
361
pgd_t early_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE);
arch/riscv/mm/init.c
433
return memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
arch/riscv/mm/init.c
454
BUG_ON(sz != PAGE_SIZE);
arch/riscv/mm/init.c
464
static pmd_t early_pmd[PTRS_PER_PMD] __initdata __aligned(PAGE_SIZE);
arch/riscv/mm/init.c
474
static p4d_t early_p4d[PTRS_PER_P4D] __initdata __aligned(PAGE_SIZE);
arch/riscv/mm/init.c
484
static pud_t early_pud[PTRS_PER_PUD] __initdata __aligned(PAGE_SIZE);
arch/riscv/mm/init.c
518
return memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
arch/riscv/mm/init.c
548
memset(ptep, 0, PAGE_SIZE);
arch/riscv/mm/init.c
583
return memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
arch/riscv/mm/init.c
621
return memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
arch/riscv/mm/init.c
650
memset(nextp, 0, PAGE_SIZE);
arch/riscv/mm/init.c
676
memset(nextp, 0, PAGE_SIZE);
arch/riscv/mm/init.c
72
unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
arch/riscv/mm/init.c
733
memset(nextp, 0, PAGE_SIZE);
arch/riscv/mm/init.c
745
return PAGE_SIZE;
arch/riscv/mm/init.c
759
return PAGE_SIZE;
arch/riscv/mm/init.c
906
memset(early_pg_dir, 0, PAGE_SIZE);
arch/riscv/mm/init.c
912
memset(early_pg_dir, 0, PAGE_SIZE);
arch/riscv/mm/init.c
913
memset(early_p4d, 0, PAGE_SIZE);
arch/riscv/mm/init.c
914
memset(early_pud, 0, PAGE_SIZE);
arch/riscv/mm/init.c
915
memset(early_pmd, 0, PAGE_SIZE);
arch/riscv/mm/init.c
985
BUILD_BUG_ON(FIX_FDT % (PMD_SIZE / PAGE_SIZE));
arch/riscv/mm/kasan_init.c
119
p = memblock_alloc_or_panic(PTRS_PER_P4D * sizeof(p4d_t), PAGE_SIZE);
arch/riscv/mm/kasan_init.c
35
p = memblock_alloc_or_panic(PTRS_PER_PTE * sizeof(pte_t), PAGE_SIZE);
arch/riscv/mm/kasan_init.c
388
p = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE);
arch/riscv/mm/kasan_init.c
408
p = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE);
arch/riscv/mm/kasan_init.c
427
p = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE);
arch/riscv/mm/kasan_init.c
43
phys_addr = memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
arch/riscv/mm/kasan_init.c
45
memset(__va(phys_addr), KASAN_SHADOW_INIT, PAGE_SIZE);
arch/riscv/mm/kasan_init.c
47
} while (ptep++, vaddr += PAGE_SIZE, vaddr != end);
arch/riscv/mm/kasan_init.c
531
memset(kasan_early_shadow_page, KASAN_SHADOW_INIT, PAGE_SIZE);
arch/riscv/mm/kasan_init.c
57
p = memblock_alloc_or_panic(PTRS_PER_PMD * sizeof(pmd_t), PAGE_SIZE);
arch/riscv/mm/kasan_init.c
88
p = memblock_alloc_or_panic(PTRS_PER_PUD * sizeof(pud_t), PAGE_SIZE);
arch/riscv/mm/pageattr.c
267
unsigned long end = start + PAGE_SIZE * numpages;
arch/riscv/mm/pageattr.c
296
lm_end = lm_start + PAGE_SIZE;
arch/riscv/mm/pageattr.c
427
unsigned long size = PAGE_SIZE * numpages;
arch/riscv/mm/ptdump.c
275
st->wx_pages += (addr - st->start_address) / PAGE_SIZE;
arch/riscv/mm/tlbflush.c
154
__flush_tlb_range(mm, mm_cpumask(mm), 0, FLUSH_TLB_MAX_SIZE, PAGE_SIZE);
arch/riscv/mm/tlbflush.c
167
addr, PAGE_SIZE, PAGE_SIZE);
arch/riscv/mm/tlbflush.c
176
stride_size = PAGE_SIZE;
arch/riscv/mm/tlbflush.c
195
stride_size = PAGE_SIZE;
arch/riscv/mm/tlbflush.c
206
start, end - start, PAGE_SIZE);
arch/riscv/mm/tlbflush.c
240
0, FLUSH_TLB_MAX_SIZE, PAGE_SIZE);
arch/riscv/mm/tlbflush.c
81
local_flush_tlb_range_asid(start, end - start, PAGE_SIZE, FLUSH_TLB_NO_ASID);
arch/s390/boot/decompressor.c
76
return ALIGN(free_mem_end_ptr, PAGE_SIZE);
arch/s390/boot/ipl_parm.c
263
memory_limit = round_down(memparse(val, NULL), PAGE_SIZE);
arch/s390/boot/printk.c
18
char __bootdata(boot_rb)[PAGE_SIZE * 2];
arch/s390/boot/sclp_early_core.c
6
static char __sclp_early_sccb[EXT_SCCB_READ_SCP] __aligned(PAGE_SIZE);
arch/s390/boot/startup.c
331
pages = ident_map_size / PAGE_SIZE;
arch/s390/boot/startup.c
416
__memcpy_real_area = round_down(VMALLOC_START - MEMCPY_REAL_SIZE, PAGE_SIZE);
arch/s390/boot/startup.c
425
pages = __abs_lowcore / (PAGE_SIZE + sizeof(struct page));
arch/s390/boot/startup.c
431
vmemmap_size = SECTION_ALIGN_UP(ident_map_size / PAGE_SIZE) * sizeof(struct page);
arch/s390/boot/startup.c
434
vmemmap_size = SECTION_ALIGN_DOWN(ident_map_size / PAGE_SIZE) * sizeof(struct page);
arch/s390/boot/startup.c
435
ident_map_size = vmemmap_size / sizeof(struct page) * PAGE_SIZE;
arch/s390/boot/startup.c
50
static char sysinfo_page[PAGE_SIZE] __aligned(PAGE_SIZE);
arch/s390/boot/startup.c
604
amode31_lma = randomize_within_range(vmlinux.amode31_size, PAGE_SIZE, amode31_min, SZ_2G);
arch/s390/boot/uv.c
36
uv_info.max_sec_stor_addr = ALIGN(uvcb.max_guest_stor_addr, PAGE_SIZE);
arch/s390/boot/vmem.c
236
unsigned long size = PAGE_SIZE << CRST_ALLOC_ORDER;
arch/s390/boot/vmem.c
249
pte = (void *)physmem_alloc_or_die(RR_VMEM, PAGE_SIZE, PAGE_SIZE);
arch/s390/boot/vmem.c
274
addr = physmem_alloc(RR_VMEM, size, size, size == PAGE_SIZE);
arch/s390/boot/vmem.c
340
for (; addr < end; addr += PAGE_SIZE, pte++) {
arch/s390/boot/vmem.c
344
entry = __pte(resolve_pa_may_alloc(addr, PAGE_SIZE, mode));
arch/s390/boot/vmem.c
541
pgtable_populate(__memcpy_real_area, __memcpy_real_area + PAGE_SIZE,
arch/s390/crypto/aes_s390.c
556
n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(AES_BLOCK_SIZE - 1);
arch/s390/crypto/des_s390.c
308
n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(DES_BLOCK_SIZE - 1);
arch/s390/crypto/paes_s390.c
926
n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(AES_BLOCK_SIZE - 1);
arch/s390/crypto/prng.c
161
get_random_bytes(pg, PAGE_SIZE / 2);
arch/s390/crypto/prng.c
164
int offset = (PAGE_SIZE / 2) + (n * 4) - 4;
arch/s390/crypto/prng.c
169
cpacf_klmd(CPACF_KLMD_SHA_512, pblock, pg, PAGE_SIZE);
arch/s390/crypto/prng.c
178
memzero_explicit(pg, PAGE_SIZE);
arch/s390/hypfs/hypfs_diag.c
176
buf_size = PAGE_SIZE * (diag204_buf_pages + 1) + sizeof(d204->hdr);
arch/s390/hypfs/hypfs_diag.c
180
d204 = PTR_ALIGN(base + sizeof(d204->hdr), PAGE_SIZE) - sizeof(d204->hdr);
arch/s390/hypfs/hypfs_diag.c
187
d204->hdr.len = PAGE_SIZE * diag204_buf_pages;
arch/s390/hypfs/hypfs_diag.c
69
diag204_buf = __vmalloc_node(array_size(*pages, PAGE_SIZE),
arch/s390/hypfs/hypfs_diag.c
70
PAGE_SIZE, GFP_KERNEL, NUMA_NO_NODE,
arch/s390/hypfs/hypfs_sprp.c
63
*size = PAGE_SIZE;
arch/s390/hypfs/hypfs_sprp.c
92
if (copy_from_user(data, udata, PAGE_SIZE))
arch/s390/hypfs/hypfs_sprp.c
99
if (copy_to_user(udata, data, PAGE_SIZE)) {
arch/s390/hypfs/inode.c
247
sb->s_blocksize = PAGE_SIZE;
arch/s390/include/asm/boot_data.h
19
extern char boot_rb[PAGE_SIZE * 2];
arch/s390/include/asm/chsc.h
70
} __packed __aligned(PAGE_SIZE);
arch/s390/include/asm/clp.h
6
#define CLP_BLK_SIZE PAGE_SIZE
arch/s390/include/asm/debug.h
422
static char VNAME(var, data)[EARLY_PAGES][PAGE_SIZE] __initdata; \
arch/s390/include/asm/eadm.h
78
} __packed __aligned(PAGE_SIZE);
arch/s390/include/asm/elf.h
190
#define ELF_EXEC_PAGESIZE PAGE_SIZE
arch/s390/include/asm/idals.h
139
nr_chunks = (PAGE_SIZE << page_order) >> IDA_SIZE_SHIFT;
arch/s390/include/asm/idals.h
175
nr_chunks = (PAGE_SIZE << ib->page_order) >> IDA_SIZE_SHIFT;
arch/s390/include/asm/idals.h
264
if (ib->size > (PAGE_SIZE << ib->page_order))
arch/s390/include/asm/ipl.h
27
char raw[PAGE_SIZE - sizeof(struct ipl_pl_hdr)];
arch/s390/include/asm/ipl.h
29
} __packed __aligned(PAGE_SIZE);
arch/s390/include/asm/ipl.h
56
#define DIAG308_SCPDATA_SIZE (PAGE_SIZE - DIAG308_SCPDATA_OFFSET)
arch/s390/include/asm/kvm_host_types.h
111
((((sie_block)->sidad & SIDAD_SIZE_MASK) + 1) * PAGE_SIZE)
arch/s390/include/asm/maccess.h
7
#define MEMCPY_REAL_SIZE PAGE_SIZE
arch/s390/include/asm/page-states.h
43
paddr += PAGE_SIZE;
arch/s390/include/asm/page.h
287
#define AMODE31_SIZE (3 * PAGE_SIZE)
arch/s390/include/asm/page.h
46
#define clear_page(page) memset((page), 0, PAGE_SIZE)
arch/s390/include/asm/pgtable.h
1335
entry = __pte(pte_val(entry) + PAGE_SIZE);
arch/s390/include/asm/processor.h
125
#define TASK_SIZE_MAX (-PAGE_SIZE)
arch/s390/include/asm/processor.h
127
#define VDSO_BASE (STACK_TOP + PAGE_SIZE)
arch/s390/include/asm/processor.h
129
#define STACK_TOP (VDSO_LIMIT - vdso_size() - PAGE_SIZE)
arch/s390/include/asm/processor.h
130
#define STACK_TOP_MAX (_REGION2_SIZE - vdso_size() - PAGE_SIZE)
arch/s390/include/asm/qdio.h
83
} __packed __aligned(PAGE_SIZE);
arch/s390/include/asm/sclp.h
12
#define EARLY_SCCB_SIZE PAGE_SIZE
arch/s390/include/asm/sclp.h
15
#define EXT_SCCB_READ_SCP (3 * PAGE_SIZE)
arch/s390/include/asm/sclp.h
17
#define EXT_SCCB_READ_CPU (3 * PAGE_SIZE)
arch/s390/include/asm/thread_info.h
22
#define BOOT_STACK_SIZE (PAGE_SIZE << 2)
arch/s390/include/asm/thread_info.h
23
#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
arch/s390/include/asm/tlb.h
100
__tlb_adjust_range(tlb, address, PAGE_SIZE);
arch/s390/include/asm/tlb.h
119
__tlb_adjust_range(tlb, address, PAGE_SIZE);
arch/s390/include/asm/tlb.h
137
__tlb_adjust_range(tlb, address, PAGE_SIZE);
arch/s390/include/asm/tlb.h
81
__tlb_adjust_range(tlb, address, PAGE_SIZE);
arch/s390/include/asm/uv.h
432
static_assert(sizeof(struct uv_secret_list) == PAGE_SIZE);
arch/s390/include/asm/vmlinux.lds.h
15
. = ALIGN(PAGE_SIZE); \
arch/s390/include/asm/vmlinux.lds.h
28
. = ALIGN(PAGE_SIZE); \
arch/s390/kernel/abs_lowcore.c
26
addr -= PAGE_SIZE;
arch/s390/kernel/abs_lowcore.c
32
addr += PAGE_SIZE;
arch/s390/kernel/abs_lowcore.c
33
phys += PAGE_SIZE;
arch/s390/kernel/abs_lowcore.c
45
addr += PAGE_SIZE;
arch/s390/kernel/cert_store.c
509
return round_up(vcssb->max_single_vcb_length, PAGE_SIZE);
arch/s390/kernel/crash_dump.c
504
phdr->p_align = PAGE_SIZE;
arch/s390/kernel/crash_dump.c
562
phdr->p_align = PAGE_SIZE;
arch/s390/kernel/debug.c
1015
> (PAGE_SIZE - id->entry_size)) {
arch/s390/kernel/debug.c
1566
memset(id->areas[i][j], 0, PAGE_SIZE);
arch/s390/kernel/debug.c
1572
memset(id->areas[area][i], 0, PAGE_SIZE);
arch/s390/kernel/debug.c
194
areas[i][j] = kzalloc(PAGE_SIZE, GFP_KERNEL);
arch/s390/kernel/debug.c
352
memcpy(rc->areas[i][j], in->areas[i][j], PAGE_SIZE);
arch/s390/kernel/debug.c
448
if (p_info->act_entry > (PAGE_SIZE - id->entry_size)) {
arch/s390/kernel/debug.c
504
p_info->act_entry = rounddown(PAGE_SIZE, id->entry_size) - id->entry_size;
arch/s390/kernel/diag/diag.c
225
if (WARN_ON_ONCE(!IS_ALIGNED((unsigned long)addr, PAGE_SIZE)))
arch/s390/kernel/diag/diag.c
295
, "=m" (*(struct { char buf[PAGE_SIZE]; } *)ptr)
arch/s390/kernel/diag/diag310.c
240
if (put_user(pages * PAGE_SIZE, argp))
arch/s390/kernel/diag/diag310.c
263
data_size = pages * PAGE_SIZE;
arch/s390/kernel/diag/diag310.c
264
buf = __vmalloc_node(data_size, PAGE_SIZE, GFP_KERNEL | __GFP_ZERO | __GFP_ACCOUNT,
arch/s390/kernel/early.c
88
static __initdata char sysinfo_page[PAGE_SIZE] __aligned(PAGE_SIZE);
arch/s390/kernel/ipl.c
2347
BUILD_BUG_ON(sizeof(struct ipl_parameter_block) != PAGE_SIZE);
arch/s390/kernel/ipl.c
464
PAGE_SIZE);
arch/s390/kernel/ipl.c
466
DEFINE_IPL_ATTR_SCP_DATA_RO(ipl_fcp, ipl_block.fcp, PAGE_SIZE);
arch/s390/kernel/ipl.c
474
DEFINE_IPL_ATTR_SCP_DATA_RO(ipl_nvme, ipl_block.nvme, PAGE_SIZE);
arch/s390/kernel/ipl.c
482
DEFINE_IPL_ATTR_SCP_DATA_RO(ipl_eckd, ipl_block.eckd, PAGE_SIZE);
arch/s390/kernel/kprobes.c
40
page = execmem_alloc(EXECMEM_KPROBES, PAGE_SIZE);
arch/s390/kernel/lgr.c
50
static char lgr_page[PAGE_SIZE] __aligned(PAGE_SIZE);
arch/s390/kernel/machine_kexec.c
143
for (addr = begin; addr < end; addr += PAGE_SIZE)
arch/s390/kernel/machine_kexec_file.c
137
data->memsz = ALIGN(data->memsz, PAGE_SIZE);
arch/s390/kernel/machine_kexec_file.c
163
data->memsz = ALIGN(data->memsz, PAGE_SIZE);
arch/s390/kernel/machine_kexec_file.c
194
data->memsz = ALIGN(data->memsz, PAGE_SIZE);
arch/s390/kernel/module.c
478
numpages = DIV_ROUND_UP(size, PAGE_SIZE);
arch/s390/kernel/module.c
479
start = execmem_alloc(EXECMEM_FTRACE, numpages * PAGE_SIZE);
arch/s390/kernel/os_info.c
155
if (addr == 0 || addr % PAGE_SIZE)
arch/s390/kernel/os_info.c
76
BUILD_BUG_ON(sizeof(struct os_info) != PAGE_SIZE);
arch/s390/kernel/perf_cpum_cf.c
88
unsigned char start[PAGE_SIZE]; /* Counter set at event add */
arch/s390/kernel/perf_cpum_cf.c
89
unsigned char stop[PAGE_SIZE]; /* Counter set at event delete */
arch/s390/kernel/perf_cpum_cf.c
90
unsigned char data[PAGE_SIZE]; /* Counter set at /dev/hwctr */
arch/s390/kernel/perf_cpum_sf.c
136
ret += PAGE_SIZE;
arch/s390/kernel/perf_cpum_sf.c
51
#define CPUM_SF_SDB_PER_TABLE ((PAGE_SIZE - 8) / 8)
arch/s390/kernel/perf_pai.c
1147
.area_size = PAGE_SIZE,
arch/s390/kernel/process.c
233
sp -= get_random_u32_below(PAGE_SIZE);
arch/s390/kernel/setup.c
159
char __bootdata(boot_rb)[PAGE_SIZE * 2];
arch/s390/kernel/setup.c
393
BUILD_BUG_ON(sizeof(struct lowcore) != LC_PAGES * PAGE_SIZE);
arch/s390/kernel/setup.c
816
vmms = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE);
arch/s390/kernel/setup.c
819
memblock_free(vmms, PAGE_SIZE);
arch/s390/kernel/skey.c
38
address += PAGE_SIZE;
arch/s390/kernel/smp.c
624
page = memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
arch/s390/kernel/smp.c
627
PAGE_SIZE, 1UL << 31);
arch/s390/kernel/smp.c
647
memblock_free(page, PAGE_SIZE);
arch/s390/kernel/sthyi.c
319
diag204_buf = __vmalloc_node(array_size(pages, PAGE_SIZE),
arch/s390/kernel/sthyi.c
320
PAGE_SIZE, GFP_KERNEL, NUMA_NO_NODE,
arch/s390/kernel/sthyi.c
450
memset(dst, 0, PAGE_SIZE);
arch/s390/kernel/sthyi.c
461
memset(dst, 0, PAGE_SIZE);
arch/s390/kernel/sthyi.c
523
memcpy(dst, sthyi_cache.info, PAGE_SIZE);
arch/s390/kernel/sthyi.c
551
if (copy_to_user(buffer, info, PAGE_SIZE))
arch/s390/kernel/sysinfo.c
475
return simple_read_from_buffer(buf, size, ppos, file->private_data, PAGE_SIZE);
arch/s390/kernel/topology.c
573
tl_info = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE);
arch/s390/kernel/uv.c
142
rc = uv_destroy(folio_to_phys(folio) + i * PAGE_SIZE);
arch/s390/kernel/uv.c
192
rc = uv_convert_from_secure(folio_to_phys(folio) + i * PAGE_SIZE);
arch/s390/kernel/vdso.c
116
return vdso_text_size() + VDSO_NR_PAGES * PAGE_SIZE;
arch/s390/kernel/vdso.c
125
addr = vdso_addr(current->mm->start_stack + PAGE_SIZE, size);
arch/s390/kernel/vdso.c
139
pagelist[i] = virt_to_page(start + i * PAGE_SIZE);
arch/s390/kernel/vdso.c
66
vdso_text_start = vvar_start + VDSO_NR_PAGES * PAGE_SIZE;
arch/s390/kernel/vdso.c
73
do_munmap(mm, vvar_start, PAGE_SIZE, NULL);
arch/s390/kvm/dat.c
266
pt->ptes[i].val = init.val | i * PAGE_SIZE;
arch/s390/kvm/dat.c
817
for ( ; addr < end; addr += PAGE_SIZE)
arch/s390/kvm/dat.h
344
static_assert(sizeof(struct page_table) == PAGE_SIZE);
arch/s390/kvm/diag.c
44
end = vcpu->run->s.regs.gprs[vcpu->arch.sie_block->ipa & 0xf] + PAGE_SIZE;
arch/s390/kvm/diag.c
48
|| start < 2 * PAGE_SIZE)
arch/s390/kvm/diag.c
58
if (end <= prefix || start >= prefix + 2 * PAGE_SIZE) {
arch/s390/kvm/diag.c
70
if (end > prefix + PAGE_SIZE)
arch/s390/kvm/gaccess.c
1018
fragment_len = min(PAGE_SIZE - offset_in_page(gpa), len);
arch/s390/kvm/gaccess.c
1240
fragment_len = min(PAGE_SIZE - offset_in_page(gpa), length);
arch/s390/kvm/gaccess.c
1303
ptr = asce.rsto * PAGE_SIZE;
arch/s390/kvm/gaccess.c
1342
ptr = table.pgd.rto * PAGE_SIZE;
arch/s390/kvm/gaccess.c
1359
ptr = table.p4d.rto * PAGE_SIZE;
arch/s390/kvm/gaccess.c
1382
ptr = table.pud.fc0.sto * PAGE_SIZE;
arch/s390/kvm/gaccess.c
1402
ptr = table.pmd.fc0.pto * (PAGE_SIZE / 2);
arch/s390/kvm/gaccess.c
489
ptr = asce.rsto * PAGE_SIZE;
arch/s390/kvm/gaccess.c
534
ptr = rfte.rto * PAGE_SIZE + vaddr.rsx * 8;
arch/s390/kvm/gaccess.c
552
ptr = rste.rto * PAGE_SIZE + vaddr.rtx * 8;
arch/s390/kvm/gaccess.c
580
ptr = rtte.fc0.sto * PAGE_SIZE + vaddr.sx * 8;
arch/s390/kvm/gaccess.c
603
ptr = ste.fc0.pto * (PAGE_SIZE / 2) + vaddr.px * 8;
arch/s390/kvm/gaccess.c
787
while (min(PAGE_SIZE - offset, len) > 0) {
arch/s390/kvm/gaccess.c
788
fragment_len = min(PAGE_SIZE - offset, len);
arch/s390/kvm/gaccess.c
900
if (KVM_BUG_ON((len + context.offset) > PAGE_SIZE, kvm))
arch/s390/kvm/gaccess.c
916
while (min(PAGE_SIZE - offset, len) > 0) {
arch/s390/kvm/gaccess.c
917
fragment_len = min(PAGE_SIZE - offset, len);
arch/s390/kvm/gaccess.c
976
fragment_len = min(PAGE_SIZE - offset_in_page(gpas[idx]), len);
arch/s390/kvm/gaccess.h
29
if (gra < 2 * PAGE_SIZE)
arch/s390/kvm/gaccess.h
31
else if (gra >= prefix && gra < prefix + 2 * PAGE_SIZE)
arch/s390/kvm/gmap.c
1063
unsigned long align = PAGE_SIZE;
arch/s390/kvm/gmap.c
1154
#define CRST_TABLE_PAGES (_CRST_TABLE_SIZE / PAGE_SIZE)
arch/s390/kvm/gmap.c
944
for ( ; cur < end; cur += PAGE_SIZE)
arch/s390/kvm/intercept.c
453
memcpy(sida_addr(vcpu->arch.sie_block), sctns, PAGE_SIZE);
arch/s390/kvm/intercept.c
455
r = write_guest(vcpu, addr, reg2, sctns, PAGE_SIZE);
arch/s390/kvm/intercept.c
666
kvm_s390_pv_convert_to_secure(vcpu->kvm, kvm_s390_get_prefix(vcpu) + PAGE_SIZE);
arch/s390/kvm/interrupt.c
2725
bit = bit_nr + (addr % PAGE_SIZE) * 8;
arch/s390/kvm/interrupt.c
2728
WARN_ON_ONCE(bit > (PAGE_SIZE * BITS_PER_BYTE - 1));
arch/s390/kvm/interrupt.c
2877
(adapter->summary_addr & PAGE_MASK) + PAGE_SIZE)
arch/s390/kvm/interrupt.c
2880
(adapter->ind_addr & PAGE_MASK) + PAGE_SIZE)
arch/s390/kvm/kvm-s390.c
4443
return kvm_setup_async_pf(vcpu, current->thread.gmap_teid.addr * PAGE_SIZE, hva, &arch);
arch/s390/kvm/kvm-s390.c
4556
gaddr = current->thread.gmap_teid.addr * PAGE_SIZE;
arch/s390/kvm/kvm-s390.c
5671
size = new->npages * PAGE_SIZE;
arch/s390/kvm/kvm-s390.c
5675
if ((new->base_gfn * PAGE_SIZE) + size > kvm->arch.mem_limit)
arch/s390/kvm/priv.c
1085
end = (start + PAGE_SIZE) & ~(PAGE_SIZE - 1);
arch/s390/kvm/priv.c
1104
if (kvm_clear_guest(vcpu->kvm, start, PAGE_SIZE))
arch/s390/kvm/priv.c
1127
start += PAGE_SIZE;
arch/s390/kvm/priv.c
357
end = start + PAGE_SIZE;
arch/s390/kvm/priv.c
374
start += PAGE_SIZE;
arch/s390/kvm/priv.c
432
if (kvm_clear_guest(vcpu->kvm, addr, PAGE_SIZE))
arch/s390/kvm/priv.c
927
memcpy(sida_addr(vcpu->arch.sie_block), (void *)mem, PAGE_SIZE);
arch/s390/kvm/priv.c
930
rc = write_guest(vcpu, operand2, ar, (void *)mem, PAGE_SIZE);
arch/s390/kvm/pv.c
348
kvm->arch.pv.guest_len = npages * PAGE_SIZE;
arch/s390/kvm/pv.c
351
vlen = ALIGN(virt * ((npages * PAGE_SIZE) / HPAGE_SIZE), PAGE_SIZE);
arch/s390/kvm/pv.c
820
addr += PAGE_SIZE;
arch/s390/kvm/pv.c
821
offset += PAGE_SIZE;
arch/s390/kvm/vsie.c
1500
BUILD_BUG_ON(sizeof(struct vsie_page) != PAGE_SIZE);
arch/s390/kvm/vsie.c
613
if (prefix <= end && start <= prefix + 2 * PAGE_SIZE - 1)
arch/s390/kvm/vsie.c
647
rc = gaccess_shadow_fault(vcpu, sg, prefix + PAGE_SIZE, NULL, true);
arch/s390/kvm/vsie.c
70
static_assert(sizeof(struct vsie_page) == PAGE_SIZE);
arch/s390/kvm/vsie.c
754
if (gpa < 2 * PAGE_SIZE)
arch/s390/kvm/vsie.c
775
if (gpa < 2 * PAGE_SIZE) {
arch/s390/kvm/vsie.c
791
if (gpa < 2 * PAGE_SIZE) {
arch/s390/kvm/vsie.c
810
if (gpa < 2 * PAGE_SIZE) {
arch/s390/kvm/vsie.c
830
if (!gpa || gpa < 2 * PAGE_SIZE) {
arch/s390/kvm/vsie.c
936
current->thread.gmap_teid.addr * PAGE_SIZE, 1);
arch/s390/kvm/vsie.c
938
rc = gaccess_shadow_fault(vcpu, sg, current->thread.gmap_teid.addr * PAGE_SIZE, NULL, wr);
arch/s390/kvm/vsie.c
941
current->thread.gmap_teid.addr * PAGE_SIZE, wr);
arch/s390/kvm/vsie.c
943
vsie_page->fault_addr = current->thread.gmap_teid.addr * PAGE_SIZE;
arch/s390/lib/test_unwind.c
22
#define BT_BUF_SIZE (PAGE_SIZE * 4)
arch/s390/mm/cmm.c
36
#define CMM_NR_PAGES ((PAGE_SIZE / sizeof(unsigned long)) - 2)
arch/s390/mm/dump_pagetables.c
87
if (addr == PAGE_SIZE && (nospec_uses_trampoline() || !cpu_has_bear()))
arch/s390/mm/dump_pagetables.c
92
st->wx_pages += (addr - st->start_address) / PAGE_SIZE;
arch/s390/mm/extable.c
112
remainder = PAGE_SIZE - (uaddr & (PAGE_SIZE - 1));
arch/s390/mm/fault.c
393
regs->int_parm_long = (teid.addr * PAGE_SIZE) | (regs->psw.addr & PAGE_MASK);
arch/s390/mm/fault.c
65
return teid.addr * PAGE_SIZE;
arch/s390/mm/init.c
121
vaddr += PAGE_SIZE;
arch/s390/mm/init.c
132
vaddr += PAGE_SIZE;
arch/s390/mm/init.c
199
PERCPU_DYNAMIC_RESERVE, PAGE_SIZE,
arch/s390/mm/init.c
300
module_load_offset = get_random_u32_inclusive(1, 1024) * PAGE_SIZE;
arch/s390/mm/init.c
84
empty_zero_page = (unsigned long)memblock_alloc_or_panic(PAGE_SIZE << order, PAGE_SIZE);
arch/s390/mm/init.c
86
zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK;
arch/s390/mm/maccess.c
169
size = PAGE_SIZE - (addr & ~PAGE_MASK);
arch/s390/mm/maccess.c
90
BUILD_BUG_ON(MEMCPY_REAL_SIZE != PAGE_SIZE);
arch/s390/mm/mmap.c
140
info.low_limit = PAGE_SIZE;
arch/s390/mm/pageattr.c
107
addr += PAGE_SIZE;
arch/s390/mm/pageattr.c
132
pte_addr += PAGE_SIZE;
arch/s390/mm/pageattr.c
350
va_end = va_start + area->nr_pages * PAGE_SIZE;
arch/s390/mm/pageattr.c
353
rc = change_page_attr(alias, alias + PAGE_SIZE, flags);
arch/s390/mm/pageattr.c
356
addr += PAGE_SIZE;
arch/s390/mm/pageattr.c
36
start += PAGE_SIZE;
arch/s390/mm/pageattr.c
375
end = addr + numpages * PAGE_SIZE;
arch/s390/mm/pageattr.c
435
address += PAGE_SIZE;
arch/s390/mm/pageattr.c
456
address += PAGE_SIZE;
arch/s390/mm/pgalloc.c
215
BASE_ADDR_END_FUNC(page, PAGE_SIZE)
arch/s390/mm/pgalloc.c
426
end = addr + num_pages * PAGE_SIZE;
arch/s390/mm/vmem.c
178
for (; addr < end; addr += PAGE_SIZE, pte++) {
arch/s390/mm/vmem.c
183
vmem_free_pages((unsigned long)pfn_to_virt(pte_pfn(*pte)), get_order(PAGE_SIZE), altmap);
arch/s390/mm/vmem.c
187
void *new_page = vmemmap_alloc_block_buf(PAGE_SIZE, NUMA_NO_NODE, altmap);
arch/s390/mm/vmem.c
34
unsigned long size = PAGE_SIZE << order;
arch/s390/mm/vmem.c
633
if (!IS_ALIGNED(addr, PAGE_SIZE))
arch/s390/mm/vmem.c
79
pte = (pte_t *)memblock_alloc(PAGE_SIZE, PAGE_SIZE);
arch/s390/pci/pci_clp.c
494
unsigned long limit = PAGE_SIZE - sizeof(lpcb->request);
arch/s390/pci/pci_clp.c
514
unsigned long limit = PAGE_SIZE - sizeof(lpcb->request);
arch/s390/pci/pci_clp.c
524
unsigned long limit = PAGE_SIZE - sizeof(lpcb->request);
arch/s390/pci/pci_clp.c
537
unsigned long limit = PAGE_SIZE - sizeof(lpcb->request);
arch/s390/pci/pci_clp.c
550
unsigned long limit = PAGE_SIZE - sizeof(lpcb->request);
arch/s390/pci/pci_clp.c
594
if (copy_from_user(lpcb, uptr, PAGE_SIZE) != 0)
arch/s390/pci/pci_clp.c
613
if (copy_to_user(uptr, lpcb, PAGE_SIZE) != 0)
arch/s390/pci/pci_mmio.c
141
if (length <= 0 || PAGE_SIZE - (mmio_addr & ~PAGE_MASK) < length)
arch/s390/pci/pci_mmio.c
296
if (length <= 0 || PAGE_SIZE - (mmio_addr & ~PAGE_MASK) < length)
arch/s390/pci/pci_report.c
30
#define ZPCI_REPORT_SIZE (PAGE_SIZE - sizeof(struct err_notify_sccb))
arch/s390/pci/pci_sysfs.c
160
static const BIN_ATTR(report_error, S_IWUSR, NULL, report_error_write, PAGE_SIZE);
arch/sh/boards/mach-ap325rxa/setup.c
560
phys = memblock_phys_alloc(size, PAGE_SIZE);
arch/sh/boards/mach-ecovec24/setup.c
1500
phys = memblock_phys_alloc(size, PAGE_SIZE);
arch/sh/boards/mach-ecovec24/setup.c
1508
phys = memblock_phys_alloc(size, PAGE_SIZE);
arch/sh/boards/mach-kfr2r09/setup.c
632
phys = memblock_phys_alloc(size, PAGE_SIZE);
arch/sh/boards/mach-landisk/setup.c
61
cf_ide_base = ioremap_prot(paddrbase, PAGE_SIZE, prot);
arch/sh/boards/mach-lboxre2/setup.c
54
psize = PAGE_SIZE;
arch/sh/boards/mach-migor/setup.c
632
phys = memblock_phys_alloc(size, PAGE_SIZE);
arch/sh/boards/mach-se/7724/setup.c
963
phys = memblock_phys_alloc(size, PAGE_SIZE);
arch/sh/boards/mach-se/7724/setup.c
971
phys = memblock_phys_alloc(size, PAGE_SIZE);
arch/sh/boards/mach-sh03/setup.c
78
cf_ide_base = ioremap_prot(paddrbase, PAGE_SIZE, prot);
arch/sh/boot/compressed/misc.c
129
output_addr = __pa((unsigned long)&_text+PAGE_SIZE);
arch/sh/include/asm/dma.h
113
dma_write(chan, from, to, PAGE_SIZE)
arch/sh/include/asm/dma.h
118
dma_read(chan, from, to, PAGE_SIZE)
arch/sh/include/asm/elf.h
107
#define ELF_EXEC_PAGESIZE PAGE_SIZE
arch/sh/include/asm/fixmap.h
78
#define FIXADDR_TOP (P4SEG - PAGE_SIZE)
arch/sh/include/asm/io_trapped.h
18
} __aligned(PAGE_SIZE);
arch/sh/include/asm/page.h
46
#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
arch/sh/include/asm/page.h
48
#define copy_user_page(to, from, vaddr, pg) __copy_user(to, from, PAGE_SIZE)
arch/sh/include/asm/pgtable-2level.h
21
#define PTRS_PER_PGD (PAGE_SIZE / (1 << PTE_MAGNITUDE))
arch/sh/include/asm/pgtable.h
28
extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
arch/sh/include/asm/pgtable.h
60
#define PTRS_PER_PTE (PAGE_SIZE / (1 << PTE_MAGNITUDE))
arch/sh/include/asm/pgtable.h
78
#define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE)
arch/sh/kernel/cpu/init.c
326
PAGE_SIZE - 1);
arch/sh/kernel/cpu/init.c
328
shm_align_mask = PAGE_SIZE - 1;
arch/sh/kernel/cpu/sh4/sq.c
183
psz = (size + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
arch/sh/kernel/crash_dump.c
22
vaddr = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
arch/sh/kernel/io_trapped.c
127
voffs += roundup(len, PAGE_SIZE);
arch/sh/kernel/io_trapped.c
175
len = roundup(resource_size(res), PAGE_SIZE);
arch/sh/kernel/io_trapped.c
52
if ((unsigned long)tiop & (PAGE_SIZE - 1))
arch/sh/kernel/io_trapped.c
57
len += roundup(resource_size(res), PAGE_SIZE);
arch/sh/kernel/io_trapped.c
84
len += roundup(resource_size(res), PAGE_SIZE);
arch/sh/kernel/machine_kexec.c
162
PAGE_SIZE, 0, max);
arch/sh/kernel/vsyscall/vsyscall.c
100
vma = _install_special_mapping(mm, addr, PAGE_SIZE,
arch/sh/kernel/vsyscall/vsyscall.c
93
addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
arch/sh/mm/cache-sh4.c
128
pfn * PAGE_SIZE);
arch/sh/mm/cache-sh4.c
129
addr += PAGE_SIZE;
arch/sh/mm/cache-sh4.c
360
ea = base_addr + PAGE_SIZE;
arch/sh/mm/cache-sh4.c
80
__raw_writel(0, icacheaddr + (j * PAGE_SIZE));
arch/sh/mm/cache-sh7705.c
147
__flush_dcache_page((pfn + i) * PAGE_SIZE);
arch/sh/mm/cache.c
120
__flush_purge_region(vto, PAGE_SIZE);
arch/sh/mm/cache.c
135
__flush_purge_region(kaddr, PAGE_SIZE);
arch/sh/mm/cache.c
258
c->alias_mask = ((c->sets - 1) << c->entry_shift) & ~(PAGE_SIZE - 1);
arch/sh/mm/fault.c
212
address < PAGE_SIZE ? "NULL pointer dereference"
arch/sh/mm/init.c
140
pmd = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE);
arch/sh/mm/init.c
153
pte = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE);
arch/sh/mm/init.c
250
memblock_reserve(start, (PFN_PHYS(start_pfn) + PAGE_SIZE - 1) - start);
arch/sh/mm/init.c
335
memset(empty_zero_page, 0, PAGE_SIZE);
arch/sh/mm/init.c
336
__flush_wback_region(empty_zero_page, PAGE_SIZE);
arch/sh/mm/ioremap_fixed.c
91
phys_addr += PAGE_SIZE;
arch/sh/mm/kmap.c
58
__flush_purge_region((void *)vaddr, PAGE_SIZE);
arch/sh/mm/mmap.c
143
info.low_limit = PAGE_SIZE;
arch/sh/mm/mmap.c
18
unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */
arch/sh/mm/nommu.c
24
memcpy(to, from, PAGE_SIZE);
arch/sh/mm/pgtable.c
26
PAGE_SIZE, SLAB_PANIC, pgd_ctor);
arch/sh/mm/pgtable.c
30
PAGE_SIZE, SLAB_PANIC, NULL);
arch/sh/mm/tlbflush_32.c
50
size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
arch/sh/mm/tlbflush_32.c
61
end += (PAGE_SIZE - 1);
arch/sh/mm/tlbflush_32.c
69
start += PAGE_SIZE;
arch/sh/mm/tlbflush_32.c
85
size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
arch/sh/mm/tlbflush_32.c
94
end += (PAGE_SIZE - 1);
arch/sh/mm/tlbflush_32.c
99
start += PAGE_SIZE;
arch/sparc/include/asm/cachetype.h
11
#define cpu_dcache_is_aliasing() (L1DCACHE_SIZE > PAGE_SIZE)
arch/sparc/include/asm/cachetype.h
9
#define cpu_dcache_is_aliasing() (vac_cache_size > PAGE_SIZE)
arch/sparc/include/asm/elf_64.h
185
#define ELF_EXEC_PAGESIZE PAGE_SIZE
arch/sparc/include/asm/page_32.h
18
#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
arch/sparc/include/asm/page_32.h
19
#define copy_page(to,from) memcpy((void *)(to), (void *)(from), PAGE_SIZE)
arch/sparc/include/asm/page_64.h
47
#define copy_page(X,Y) memcpy((void *)(X), (void *)(Y), PAGE_SIZE)
arch/sparc/include/asm/pgtable_32.h
78
extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
arch/sparc/include/asm/pgtable_64.h
942
pte_val(pte) += PAGE_SIZE;
arch/sparc/include/asm/pgtable_64.h
943
addr += PAGE_SIZE;
arch/sparc/include/asm/processor_32.h
22
#define STACK_TOP (PAGE_OFFSET - PAGE_SIZE)
arch/sparc/include/asm/processor_64.h
38
#define STACK_TOP32 ((1UL << 32UL) - PAGE_SIZE)
arch/sparc/include/asm/ross.h
180
end = page + PAGE_SIZE;
arch/sparc/include/asm/setup.h
20
extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
arch/sparc/include/asm/shmparam_32.h
8
#define SHMLBA (vac_cache_size ? vac_cache_size : PAGE_SIZE)
arch/sparc/include/asm/shmparam_64.h
9
#define SHMLBA ((PAGE_SIZE > L1DCACHE_SIZE) ? PAGE_SIZE : L1DCACHE_SIZE)
arch/sparc/include/asm/thread_info_32.h
78
#define THREAD_SIZE (2 * PAGE_SIZE)
arch/sparc/include/asm/thread_info_64.h
103
#define THREAD_SIZE (2*PAGE_SIZE)
arch/sparc/include/asm/thread_info_64.h
106
#define THREAD_SIZE PAGE_SIZE
arch/sparc/kernel/adi_64.c
134
max_desc = PAGE_SIZE/sizeof(tag_storage_desc_t);
arch/sparc/kernel/adi_64.c
140
((addr + PAGE_SIZE - 1) <= tag_desc->end))
arch/sparc/kernel/adi_64.c
165
max_desc = PAGE_SIZE/sizeof(tag_storage_desc_t);
arch/sparc/kernel/adi_64.c
169
end_addr = addr + PAGE_SIZE - 1;
arch/sparc/kernel/adi_64.c
189
(tag_desc->end >= (addr + PAGE_SIZE - 1))) {
arch/sparc/kernel/adi_64.c
238
size = TAG_STORAGE_PAGES * PAGE_SIZE;
arch/sparc/kernel/adi_64.c
242
size = PAGE_SIZE;
arch/sparc/kernel/adi_64.c
264
size = PAGE_SIZE;
arch/sparc/kernel/adi_64.c
281
size = (size + (PAGE_SIZE-adi_blksize()))/PAGE_SIZE;
arch/sparc/kernel/adi_64.c
282
size = size * PAGE_SIZE;
arch/sparc/kernel/adi_64.c
344
for (tmp = paddr; tmp < (paddr+PAGE_SIZE); tmp += adi_blksize()) {
arch/sparc/kernel/adi_64.c
383
for (tmp = paddr; tmp < (paddr+PAGE_SIZE); tmp += adi_blksize()) {
arch/sparc/kernel/iommu.c
128
memset((void *)iommu->dummy_page, 0, PAGE_SIZE);
arch/sparc/kernel/iommu.c
219
memset((char *)first_page, 0, PAGE_SIZE << order);
arch/sparc/kernel/ioport.c
210
(offset + sz + PAGE_SIZE-1) & PAGE_MASK,
arch/sparc/kernel/ioport.c
211
sparc_iomap.start, sparc_iomap.end, PAGE_SIZE, NULL, NULL) != 0) {
arch/sparc/kernel/ioport.c
232
BUG_ON((plen & (PAGE_SIZE-1)) != 0);
arch/sparc/kernel/ioport.c
247
_sparc_dvma.end, PAGE_SIZE, NULL, NULL) != 0) {
arch/sparc/kernel/ioport.c
267
if ((addr & (PAGE_SIZE - 1)) != 0) {
arch/sparc/kernel/irq_64.c
1037
BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > PAGE_SIZE);
arch/sparc/kernel/ldc.c
1077
memset(table, 0, PAGE_SIZE << order);
arch/sparc/kernel/ldc.c
1992
switch (PAGE_SIZE) {
arch/sparc/kernel/ldc.c
2065
unsigned long new = (base + PAGE_SIZE) & PAGE_MASK;
arch/sparc/kernel/ldc.c
2088
unsigned long tlen, new = pa + PAGE_SIZE;
arch/sparc/kernel/ldc.c
2093
tlen = PAGE_SIZE;
arch/sparc/kernel/ldc.c
2095
tlen = PAGE_SIZE - off;
arch/sparc/kernel/ldc.c
90
#define LDC_DEFAULT_NUM_ENTRIES (PAGE_SIZE / LDC_PACKET_SIZE)
arch/sparc/kernel/ldc.c
991
memset(q, 0, PAGE_SIZE << order);
arch/sparc/kernel/mdesc.c
173
paddr = memblock_phys_alloc(alloc_size, PAGE_SIZE);
arch/sparc/kernel/module.c
187
for (va = 0; va < (PAGE_SIZE << 1); va += 32)
arch/sparc/kernel/pci.c
626
return scnprintf(buf, PAGE_SIZE, "%pOF\n", dp);
arch/sparc/kernel/pci_fire.c
243
memset((char *)pages, 0, PAGE_SIZE << order);
arch/sparc/kernel/pci_sun4v.c
1078
memset((char *)pages, 0, PAGE_SIZE << order);
arch/sparc/kernel/pci_sun4v.c
211
memset((char *)first_page, 0, PAGE_SIZE << order);
arch/sparc/kernel/pci_sun4v.c
238
long err = iommu_batch_add(first_page + (n * PAGE_SIZE), mask);
arch/sparc/kernel/pci_sun4v.c
54
#define PGLIST_NENTS (PAGE_SIZE / sizeof(u64))
arch/sparc/kernel/process_32.c
163
fp >= (task_base + (PAGE_SIZE << 1)))
arch/sparc/kernel/process_32.c
390
fp >= (task_base + (2 * PAGE_SIZE)))
arch/sparc/kernel/ptrace_64.c
110
BUG_ON(len > PAGE_SIZE);
arch/sparc/kernel/signal32.c
342
"r" (address & (PAGE_SIZE - 1))
arch/sparc/kernel/smp_64.c
1018
pg_addr += PAGE_SIZE;
arch/sparc/kernel/smp_64.c
927
__flush_dcache_page(folio_address(folio) + i * PAGE_SIZE,
arch/sparc/kernel/smp_64.c
935
__flush_icache_page((pfn + i) * PAGE_SIZE);
arch/sparc/kernel/smp_64.c
977
pg_addr += PAGE_SIZE;
arch/sparc/kernel/sys_sparc_32.c
101
if (len > TASK_SIZE - PAGE_SIZE || addr + len > TASK_SIZE - PAGE_SIZE)
arch/sparc/kernel/sys_sparc_32.c
40
return PAGE_SIZE; /* Possibly older binaries want 8192 on sun4's? */
arch/sparc/kernel/sys_sparc_32.c
62
if (len > TASK_SIZE - PAGE_SIZE)
arch/sparc/kernel/sys_sparc_64.c
213
info.low_limit = PAGE_SIZE;
arch/sparc/kernel/sys_sparc_64.c
248
align_goal = PAGE_SIZE;
arch/sparc/kernel/sys_sparc_64.c
258
len + (align_goal - PAGE_SIZE), pgoff, flags);
arch/sparc/kernel/sys_sparc_64.c
269
align_goal = PAGE_SIZE;
arch/sparc/kernel/sys_sparc_64.c
270
} while ((addr & ~PAGE_MASK) && align_goal > PAGE_SIZE);
arch/sparc/kernel/sys_sparc_64.c
46
return PAGE_SIZE;
arch/sparc/kernel/traps_64.c
2166
PAGE_SIZE);
arch/sparc/kernel/traps_64.c
2177
addr += PAGE_SIZE;
arch/sparc/kernel/traps_64.c
422
for (va = 0; va < (PAGE_SIZE << 1); va += 32) {
arch/sparc/kernel/traps_64.c
902
if ((PAGE_SIZE << order) >= sz)
arch/sparc/kernel/traps_64.c
912
memset(cheetah_error_log, 0, PAGE_SIZE << order);
arch/sparc/kernel/unaligned_32.c
222
if(address < PAGE_SIZE) {
arch/sparc/kernel/unaligned_64.c
271
if (address < PAGE_SIZE) {
arch/sparc/kernel/vio.c
195
return scnprintf(buf, PAGE_SIZE, "%pOF\n", dp);
arch/sparc/mm/fault_32.c
43
if ((unsigned long) address < PAGE_SIZE) {
arch/sparc/mm/fault_64.c
44
if ((unsigned long) address < PAGE_SIZE) {
arch/sparc/mm/fault_64.c
468
mm_rss -= (mm->context.thp_pte_count * (HPAGE_SIZE / PAGE_SIZE));
arch/sparc/mm/init_32.c
110
if (sparc_ramdisk_image >= (unsigned long)&_end - 2 * PAGE_SIZE)
arch/sparc/mm/init_32.c
230
start += PAGE_SIZE;
arch/sparc/mm/init_32.c
239
if (PKMAP_BASE+LAST_PKMAP*PAGE_SIZE >= FIXADDR_START) {
arch/sparc/mm/init_32.c
243
(unsigned long)PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
arch/sparc/mm/init_32.c
251
memset((void *)empty_zero_page, 0, PAGE_SIZE);
arch/sparc/mm/init_32.c
281
__flush_page_to_ram(vaddr + i * PAGE_SIZE);
arch/sparc/mm/init_64.c
1782
new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE,
arch/sparc/mm/init_64.c
1783
PAGE_SIZE);
arch/sparc/mm/init_64.c
1786
alloc_bytes += PAGE_SIZE;
arch/sparc/mm/init_64.c
1794
new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE,
arch/sparc/mm/init_64.c
1795
PAGE_SIZE);
arch/sparc/mm/init_64.c
1798
alloc_bytes += PAGE_SIZE;
arch/sparc/mm/init_64.c
1810
new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE,
arch/sparc/mm/init_64.c
1811
PAGE_SIZE);
arch/sparc/mm/init_64.c
1814
alloc_bytes += PAGE_SIZE;
arch/sparc/mm/init_64.c
1826
new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE,
arch/sparc/mm/init_64.c
1827
PAGE_SIZE);
arch/sparc/mm/init_64.c
1830
alloc_bytes += PAGE_SIZE;
arch/sparc/mm/init_64.c
1842
vstart += PAGE_SIZE;
arch/sparc/mm/init_64.c
1843
paddr += PAGE_SIZE;
arch/sparc/mm/init_64.c
1852
__func__, PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
arch/sparc/mm/init_64.c
1909
unsigned long phys_end = phys_start + (numpages * PAGE_SIZE);
arch/sparc/mm/init_64.c
209
__flush_dcache_page(folio_address(folio) + i * PAGE_SIZE,
arch/sparc/mm/init_64.c
2132
switch (PAGE_SIZE) {
arch/sparc/mm/init_64.c
216
__flush_icache_page((pfn + i) * PAGE_SIZE);
arch/sparc/mm/init_64.c
2537
for (; addr < initend; addr += PAGE_SIZE) {
arch/sparc/mm/init_64.c
2543
memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
arch/sparc/mm/init_64.c
3142
for (i = pfrom; i < (pfrom + PAGE_SIZE); i += adi_blksize()) {
arch/sparc/mm/init_64.c
3176
for (i = pfrom; i < (pfrom + PAGE_SIZE); i += adi_blksize()) {
arch/sparc/mm/init_64.c
418
unsigned long hugepage_size = PAGE_SIZE;
arch/sparc/mm/init_64.c
449
address += PAGE_SIZE;
arch/sparc/mm/init_64.c
450
pte_val(pte) += PAGE_SIZE;
arch/sparc/mm/init_64.c
509
for (kaddr = start; kaddr < end; kaddr += PAGE_SIZE) {
arch/sparc/mm/init_64.c
961
for ( ; start < end; start += PAGE_SIZE) {
arch/sparc/mm/io-unit.c
104
npages = (offset_in_page(phys) + size + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
arch/sparc/mm/io-unit.c
190
len = ((vaddr & ~PAGE_MASK) + len + (PAGE_SIZE-1)) >> PAGE_SHIFT;
arch/sparc/mm/io-unit.c
208
len = ((sg->dma_address & ~PAGE_MASK) + sg->length + (PAGE_SIZE-1)) >> PAGE_SHIFT;
arch/sparc/mm/io-unit.c
259
addr += PAGE_SIZE;
arch/sparc/mm/io-unit.c
260
va += PAGE_SIZE;
arch/sparc/mm/io-unit.c
60
xpt = of_ioremap(&op->resource[2], 0, PAGE_SIZE * 16, "XPT");
arch/sparc/mm/io-unit.c
70
xptend = iounit->page_table + (16 * PAGE_SIZE) / sizeof(iopte_t);
arch/sparc/mm/iommu.c
169
start += PAGE_SIZE;
arch/sparc/mm/iommu.c
174
start += PAGE_SIZE;
arch/sparc/mm/iommu.c
179
start += PAGE_SIZE;
arch/sparc/mm/iommu.c
189
unsigned long npages = (off + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
arch/sparc/mm/iommu.c
211
for (p = vaddr & PAGE_MASK; p < vaddr + len; p += PAGE_SIZE)
arch/sparc/mm/iommu.c
227
busa += PAGE_SIZE;
arch/sparc/mm/iommu.c
288
unsigned int npages = (off + len + PAGE_SIZE-1) >> PAGE_SHIFT;
arch/sparc/mm/iommu.c
296
busa += PAGE_SIZE;
arch/sparc/mm/iommu.c
370
addr += PAGE_SIZE;
arch/sparc/mm/iommu.c
371
va += PAGE_SIZE;
arch/sparc/mm/iommu.c
38
#define IOMMU_NPTES (IOMMU_WINSIZE/PAGE_SIZE) /* 64K PTEs, 256KB */
arch/sparc/mm/iommu.c
416
busa += PAGE_SIZE;
arch/sparc/mm/iommu.c
73
iommu->regs = of_ioremap(&op->resource[0], 0, PAGE_SIZE * 3,
arch/sparc/mm/leon_mm.c
255
if ((ssize <= (PAGE_SIZE / 1024)) && (sets == 0)) {
arch/sparc/mm/srmmu.c
262
srmmu_nocache_size = srmmu_nocache_npages * PAGE_SIZE;
arch/sparc/mm/srmmu.c
312
vaddr += PAGE_SIZE;
arch/sparc/mm/srmmu.c
313
paddr += PAGE_SIZE;
arch/sparc/mm/srmmu.c
518
len -= PAGE_SIZE;
arch/sparc/mm/srmmu.c
520
xva += PAGE_SIZE;
arch/sparc/mm/srmmu.c
521
xpa += PAGE_SIZE;
arch/sparc/mm/srmmu.c
548
len -= PAGE_SIZE;
arch/sparc/mm/srmmu.c
550
virt_addr += PAGE_SIZE;
arch/sparc/mm/srmmu.c
784
start += PAGE_SIZE;
arch/sparc/mm/srmmu.c
790
addr = start - PAGE_SIZE;
arch/sparc/mm/srmmu.c
833
start += PAGE_SIZE;
arch/sparc/mm/srmmu.c
935
srmmu_inherit_prom_mappings(0xfe400000, (LINUX_OPPROM_ENDVM - PAGE_SIZE));
arch/sparc/mm/tlb.c
173
vaddr += PAGE_SIZE;
arch/sparc/mm/tsb.c
346
PAGE_SIZE, PAGE_SIZE,
arch/sparc/mm/tsb.c
405
if (max_tsb_size > PAGE_SIZE << MAX_PAGE_ORDER)
arch/sparc/mm/tsb.c
406
max_tsb_size = PAGE_SIZE << MAX_PAGE_ORDER;
arch/sparc/mm/tsb.c
421
if (new_size > (PAGE_SIZE * 2))
arch/sparc/mm/tsb.c
562
mm_rss -= saved_thp_pte_count * (HPAGE_SIZE / PAGE_SIZE);
arch/sparc/mm/tsb.c
59
for (v = start; v < end; v += PAGE_SIZE) {
arch/sparc/mm/tsb.c
625
max_desc = PAGE_SIZE/sizeof(tag_storage_desc_t);
arch/sparc/vdso/vma.c
250
int cnpages = (image->size) / PAGE_SIZE;
arch/sparc/vdso/vma.c
266
if (WARN_ON(image->size % PAGE_SIZE != 0))
arch/sparc/vdso/vma.c
280
copy_page(page_address(cp), image->data + i * PAGE_SIZE);
arch/sparc/vdso/vma.c
288
dnpages = (sizeof(struct vvar_data) / PAGE_SIZE) + 1;
arch/sparc/vdso/vma.c
303
memset(vvar_data, 0, PAGE_SIZE);
arch/um/drivers/mconsole_kern.c
151
buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
arch/um/drivers/mconsole_kern.c
158
len = kernel_read(file, buf, PAGE_SIZE - 1, &pos);
arch/um/drivers/mconsole_kern.c
278
((PAGE_SIZE - sizeof(struct list_head)) / sizeof(unsigned long))
arch/um/drivers/mconsole_kern.c
319
diff /= PAGE_SIZE;
arch/um/drivers/mconsole_kern.c
362
err = os_drop_memory(addr, PAGE_SIZE);
arch/um/drivers/ubd_kern.c
736
ubd_dev->openflags, SECTOR_SIZE, PAGE_SIZE,
arch/um/drivers/ubd_kern.c
834
.seg_boundary_mask = PAGE_SIZE - 1,
arch/um/drivers/virtio_uml.c
1385
snprintf(buffer + len, PAGE_SIZE - len, "%s:%d:%d\n",
arch/um/drivers/virtio_uml.c
976
vq = vring_create_virtqueue(index, num, PAGE_SIZE, vdev, true, true,
arch/um/include/asm/page.h
26
#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
arch/um/include/asm/page.h
27
#define copy_page(to,from) memcpy((void *)(to), (void *)(from), PAGE_SIZE)
arch/um/include/asm/pgtable.h
248
size_t length = nr * PAGE_SIZE;
arch/um/include/asm/pgtable.h
54
#define VMALLOC_END (TASK_SIZE-2*PAGE_SIZE)
arch/um/include/asm/processor-generic.h
61
#define STACK_TOP (TASK_SIZE - 2 * PAGE_SIZE)
arch/um/include/asm/thread_info.h
10
#define THREAD_SIZE ((1 << CONFIG_KERNEL_STACK_ORDER) * PAGE_SIZE)
arch/um/include/asm/tlbflush.h
41
um_tlb_mark_sync(vma->vm_mm, address, address + PAGE_SIZE);
arch/um/kernel/asm-offsets.c
21
DEFINE(UM_KERN_PAGE_SIZE, PAGE_SIZE);
arch/um/kernel/mem.c
69
memset(empty_zero_page, 0, PAGE_SIZE);
arch/um/kernel/mem.c
94
empty_zero_page = (unsigned long *) memblock_alloc_low(PAGE_SIZE,
arch/um/kernel/mem.c
95
PAGE_SIZE);
arch/um/kernel/mem.c
98
__func__, PAGE_SIZE, PAGE_SIZE);
arch/um/kernel/physmem.c
91
os_write_file(physmem_fd, __syscall_stub_start, PAGE_SIZE);
arch/um/kernel/skas/uaccess.c
115
n = do_op_one_page(addr, PAGE_SIZE, is_write, op, arg);
arch/um/kernel/skas/uaccess.c
121
addr += PAGE_SIZE;
arch/um/kernel/skas/uaccess.c
122
remain -= PAGE_SIZE;
arch/um/kernel/tlb.c
82
ret = ops->mmap(ops->mm_idp, addr, PAGE_SIZE,
arch/um/kernel/tlb.c
85
ret = ops->unmap(ops->mm_idp, addr, PAGE_SIZE);
arch/um/kernel/tlb.c
88
} while (pte++, addr += PAGE_SIZE, ((addr < end) && !ret));
arch/um/kernel/trap.c
349
else if (!is_user && address > PAGE_SIZE && address < TASK_SIZE) {
arch/x86/boot/compressed/ident_map_64.c
212
address += PAGE_SIZE;
arch/x86/boot/compressed/ident_map_64.c
243
end = start + PAGE_SIZE;
arch/x86/boot/compressed/ident_map_64.c
71
pages->pgt_buf_offset += PAGE_SIZE;
arch/x86/boot/compressed/pgtable_64.c
187
memcpy(trampoline_32bit, new_cr3, PAGE_SIZE);
arch/x86/boot/compressed/pgtable_64.c
195
memcpy(pgtable, trampoline_32bit, PAGE_SIZE);
arch/x86/boot/compressed/pgtable_64.c
64
bios_start = round_down(bios_start, PAGE_SIZE);
arch/x86/boot/compressed/pgtable_64.c
85
new = round_down(new, PAGE_SIZE);
arch/x86/boot/compressed/sev.c
107
for (phys_addr_t pa = start; pa < end; pa += PAGE_SIZE)
arch/x86/boot/compressed/sev.c
30
static struct ghcb boot_ghcb_page __aligned(PAGE_SIZE);
arch/x86/boot/startup/sev-shared.c
562
if (!cc_info || !cc_info->cpuid_phys || cc_info->cpuid_len < PAGE_SIZE)
arch/x86/boot/startup/sev-shared.c
706
BUILD_BUG_ON(sizeof(*secrets_page) != PAGE_SIZE);
arch/x86/boot/startup/sev-shared.c
729
if (!cc_info || !cc_info->secrets_phys || cc_info->secrets_len != PAGE_SIZE)
arch/x86/boot/startup/sev-shared.c
747
if (caa & (PAGE_SIZE - 1))
arch/x86/boot/startup/sev-startup.c
205
if (cc_info->secrets_phys && cc_info->secrets_len == PAGE_SIZE)
arch/x86/boot/startup/sev-startup.c
62
vaddr += PAGE_SIZE;
arch/x86/boot/startup/sev-startup.c
63
paddr += PAGE_SIZE;
arch/x86/boot/startup/sme.c
204
ppd->vaddr += PAGE_SIZE;
arch/x86/boot/startup/sme.c
205
ppd->paddr += PAGE_SIZE;
arch/x86/boot/startup/sme.c
347
execute_end = execute_start + (PAGE_SIZE * 2) + PMD_SIZE;
arch/x86/coco/sev/core.c
1121
memset(&boot_ghcb_page, 0, PAGE_SIZE);
arch/x86/coco/sev/core.c
1194
data = memblock_alloc_node(sizeof(*data), PAGE_SIZE, cpu_to_node(cpu));
arch/x86/coco/sev/core.c
1204
caa = cpu ? memblock_alloc_or_panic(sizeof(*caa), PAGE_SIZE)
arch/x86/coco/sev/core.c
1234
BUILD_BUG_ON(offsetof(struct sev_es_runtime_data, ghcb_page) % PAGE_SIZE);
arch/x86/coco/sev/core.c
127
static struct ghcb boot_ghcb_page __bss_decrypted __aligned(PAGE_SIZE);
arch/x86/coco/sev/core.c
141
mem = ioremap_encrypted(sev_secrets_pa, PAGE_SIZE);
arch/x86/coco/sev/core.c
1592
BUILD_BUG_ON(sizeof(struct snp_guest_msg) > PAGE_SIZE);
arch/x86/coco/sev/core.c
1598
mem = ioremap_encrypted(sev_secrets_pa, PAGE_SIZE);
arch/x86/coco/sev/core.c
2035
mem = early_memremap_encrypted(sev_secrets_pa, PAGE_SIZE);
arch/x86/coco/sev/core.c
2054
early_memunmap(mem, PAGE_SIZE);
arch/x86/coco/sev/core.c
213
for (; vaddr < vaddr_end; vaddr += PAGE_SIZE, pfn++) {
arch/x86/coco/sev/core.c
357
vaddr += PAGE_SIZE;
arch/x86/coco/sev/core.c
564
npages = size / PAGE_SIZE;
arch/x86/coco/sev/core.c
600
for (; addr < end; addr += PAGE_SIZE) {
arch/x86/coco/sev/core.c
726
snp_set_memory_private(addr, (size / PAGE_SIZE));
arch/x86/coco/sev/core.c
924
jump_table = ioremap_encrypted(jump_table_pa, PAGE_SIZE);
arch/x86/coco/sev/svsm.c
13
struct svsm_ca boot_svsm_ca_page __aligned(PAGE_SIZE);
arch/x86/coco/tdx/tdx.c
1049
int pages = size / PAGE_SIZE;
arch/x86/coco/tdx/tdx.c
621
if (vaddr / PAGE_SIZE != (vaddr + size - 1) / PAGE_SIZE)
arch/x86/coco/tdx/tdx.c
967
phys_addr_t end = __pa(vaddr + numpages * PAGE_SIZE);
arch/x86/entry/vdso/vma.c
144
image->size + __VDSO_PAGES * PAGE_SIZE, 0, 0);
arch/x86/entry/vdso/vma.c
150
text_start = addr + __VDSO_PAGES * PAGE_SIZE;
arch/x86/entry/vdso/vma.c
177
VDSO_NR_VCLOCK_PAGES * PAGE_SIZE,
arch/x86/entry/vdso/vma.c
41
BUG_ON(image->size % PAGE_SIZE != 0);
arch/x86/entry/vsyscall/vsyscall_64.c
297
.vm_end = VSYSCALL_ADDR + PAGE_SIZE,
arch/x86/events/amd/core.c
1354
return snprintf(buf, PAGE_SIZE, "%d\n", x86_pmu.lbr_nr);
arch/x86/events/amd/uncore.c
342
BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \
arch/x86/events/core.c
2703
return snprintf(buf, PAGE_SIZE, "%d\n", x86_pmu_max_precise(pmu));
arch/x86/events/intel/bts.c
77
return buf_nr_pages(page) * PAGE_SIZE;
arch/x86/events/intel/core.c
6832
return snprintf(buf, PAGE_SIZE, "%d\n", x86_pmu.lbr_nr);
arch/x86/events/intel/core.c
6841
return snprintf(buf, PAGE_SIZE, "%d\n", fls(x86_pmu.lbr_counters));
arch/x86/events/intel/core.c
6850
return snprintf(buf, PAGE_SIZE, "%d\n", LBR_INFO_BR_CNTR_BITS);
arch/x86/events/intel/cstate.c
129
BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \
arch/x86/events/intel/ds.c
24
#define PEBS_FIXUP_SIZE PAGE_SIZE
arch/x86/events/intel/ds.c
3361
x86_pmu.pebs_buffer_size = PAGE_SIZE;
arch/x86/events/intel/ds.c
807
for (; msz < size; msz += PAGE_SIZE, pa += PAGE_SIZE, cea += PAGE_SIZE)
arch/x86/events/intel/ds.c
824
for (; msz < size; msz += PAGE_SIZE, cea += PAGE_SIZE)
arch/x86/events/intel/pt.c
102
return snprintf(buf, PAGE_SIZE, "%x\n", intel_pt_validate_hw_cap(cap));
arch/x86/events/intel/pt.c
1077
unsigned long table = (unsigned long)te & ~(PAGE_SIZE - 1);
arch/x86/events/intel/pt.c
1835
BUILD_BUG_ON(sizeof(struct topa) > PAGE_SIZE);
arch/x86/events/intel/pt.c
611
((PAGE_SIZE - sizeof(struct topa)) / sizeof(struct topa_entry))
arch/x86/events/intel/pt.c
654
mask = (buf->nr_pages * PAGE_SIZE - 1) >> 7;
arch/x86/events/intel/uncore.h
265
BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \
arch/x86/events/intel/uncore_snb.c
900
addr &= ~(PAGE_SIZE - 1);
arch/x86/hyperv/hv_crash.c
465
if (size + sizeof(struct hv_crash_tramp_data) > PAGE_SIZE) {
arch/x86/hyperv/hv_init.c
139
PAGE_SIZE, MEMREMAP_WB);
arch/x86/hyperv/hv_init.c
150
*hvp = __vmalloc(PAGE_SIZE, GFP_KERNEL | __GFP_ZERO);
arch/x86/hyperv/hv_init.c
160
memset(*hvp, 0, PAGE_SIZE);
arch/x86/hyperv/hv_init.c
525
hv_hypercall_pg = __vmalloc_node_range(PAGE_SIZE, 1, MODULES_VADDR,
arch/x86/hyperv/hv_init.c
552
src = memremap(hypercall_msr.guest_physical_address << PAGE_SHIFT, PAGE_SIZE,
arch/x86/hyperv/ivm.c
250
static u8 ap_start_input_arg[PAGE_SIZE] __bss_decrypted __aligned(PAGE_SIZE);
arch/x86/hyperv/ivm.c
251
static u8 ap_start_stack[PAGE_SIZE] __aligned(PAGE_SIZE);
arch/x86/hyperv/ivm.c
340
vmsa->rsp = (u64)&ap_start_stack[PAGE_SIZE];
arch/x86/hyperv/ivm.c
846
addr < (HV_IOAPIC_BASE_ADDRESS + PAGE_SIZE))
arch/x86/hyperv/ivm.c
851
addr < (VTPM_BASE_ADDRESS + PAGE_SIZE))
arch/x86/hyperv/mmu.c
142
max_gvas = (PAGE_SIZE - sizeof(*flush)) / sizeof(flush->gva_list[0]);
arch/x86/hyperv/mmu.c
17
#define HV_TLB_FLUSH_UNIT (4096 * PAGE_SIZE)
arch/x86/hyperv/mmu.c
214
(PAGE_SIZE - sizeof(*flush) - nr_bank *
arch/x86/include/asm/boot.h
77
#define TRAMPOLINE_32BIT_SIZE (2 * PAGE_SIZE)
arch/x86/include/asm/boot.h
79
#define TRAMPOLINE_32BIT_CODE_OFFSET PAGE_SIZE
arch/x86/include/asm/cpu_entry_area.h
104
char guard_doublefault_stack[PAGE_SIZE];
arch/x86/include/asm/cpu_entry_area.h
42
ESTACKS_MEMBERS(PAGE_SIZE, EXCEPTION_STKSZ)
arch/x86/include/asm/cpu_entry_area.h
71
(sizeof(struct cea_exception_stacks) / PAGE_SIZE)
arch/x86/include/asm/cpu_entry_area.h
77
unsigned long stack[(PAGE_SIZE - sizeof(struct x86_hw_tss)) / sizeof(unsigned long)];
arch/x86/include/asm/cpu_entry_area.h
79
} __aligned(PAGE_SIZE);
arch/x86/include/asm/cpu_entry_area.h
91
char gdt[PAGE_SIZE];
arch/x86/include/asm/cpu_entry_area.h
99
char guard_entry_stack[PAGE_SIZE];
arch/x86/include/asm/desc.h
46
} __attribute__((aligned(PAGE_SIZE)));
arch/x86/include/asm/fixmap.h
58
#define FIXADDR_TOP (round_up(VSYSCALL_ADDR + PAGE_SIZE, 1<<PMD_SHIFT) - \
arch/x86/include/asm/fixmap.h
59
PAGE_SIZE)
arch/x86/include/asm/fpu/types.h
374
u8 __padding[PAGE_SIZE];
arch/x86/include/asm/intel_ds.h
14
#define ARCH_PEBS_THRESH_MULTI ((PEBS_BUFFER_SIZE - PAGE_SIZE) >> PEBS_BUFFER_SHIFT)
arch/x86/include/asm/intel_ds.h
38
} __aligned(PAGE_SIZE);
arch/x86/include/asm/intel_ds.h
6
#define BTS_BUFFER_SIZE (PAGE_SIZE << 4)
arch/x86/include/asm/intel_ds.h
8
#define PEBS_BUFFER_SIZE (PAGE_SIZE << PEBS_BUFFER_SHIFT)
arch/x86/include/asm/kfence.h
27
addr += PAGE_SIZE) {
arch/x86/include/asm/kvm_host.h
161
#define KVM_PAGES_PER_HPAGE(x) (KVM_HPAGE_SIZE(x) / PAGE_SIZE)
arch/x86/include/asm/page_32.h
28
memset(page, 0, PAGE_SIZE);
arch/x86/include/asm/page_32.h
33
memcpy(to, from, PAGE_SIZE);
arch/x86/include/asm/page_32_types.h
23
#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
arch/x86/include/asm/page_64.h
145
[small] "i" ((1ul << 47)-PAGE_SIZE),
arch/x86/include/asm/page_64.h
146
[large] "i" ((1ul << 56)-PAGE_SIZE));
arch/x86/include/asm/page_64.h
87
u64 len = npages * PAGE_SIZE;
arch/x86/include/asm/page_64_types.h
16
#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
arch/x86/include/asm/page_64_types.h
19
#define EXCEPTION_STKSZ (PAGE_SIZE << EXCEPTION_STACK_ORDER)
arch/x86/include/asm/page_64_types.h
22
#define IRQ_STACK_SIZE (PAGE_SIZE << IRQ_STACK_ORDER)
arch/x86/include/asm/page_64_types.h
54
#define DEFAULT_MAP_WINDOW ((1UL << 47) - PAGE_SIZE)
arch/x86/include/asm/pgtable.h
54
extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
arch/x86/include/asm/pgtable_32_areas.h
27
#define CPU_ENTRY_AREA_PAGES (NR_CPUS * DIV_ROUND_UP(sizeof(struct cpu_entry_area), PAGE_SIZE))
arch/x86/include/asm/pgtable_32_areas.h
31
((FIXADDR_TOT_START - PAGE_SIZE*(CPU_ENTRY_AREA_PAGES+1)) & PMD_MASK)
arch/x86/include/asm/pgtable_32_areas.h
34
((CPU_ENTRY_AREA_BASE - PAGE_SIZE) & PMD_MASK)
arch/x86/include/asm/pgtable_32_areas.h
39
((LDT_BASE_ADDR - PAGE_SIZE) & PMD_MASK)
arch/x86/include/asm/pgtable_32_areas.h
42
# define VMALLOC_END (PKMAP_BASE - 2 * PAGE_SIZE)
arch/x86/include/asm/pgtable_32_areas.h
44
# define VMALLOC_END (LDT_BASE_ADDR - 2 * PAGE_SIZE)
arch/x86/include/asm/pgtable_64.h
280
SYM_START(name, SYM_L_GLOBAL, .balign PAGE_SIZE)
arch/x86/include/asm/pgtable_64_types.h
191
#define PGD_KERNEL_START ((PAGE_SIZE / 2) / sizeof(pgd_t))
arch/x86/include/asm/pgtable_areas.h
10
#define CPU_ENTRY_AREA_PER_CPU (CPU_ENTRY_AREA_RO_IDT + PAGE_SIZE)
arch/x86/include/asm/processor.h
362
char stack[PAGE_SIZE];
arch/x86/include/asm/processor.h
367
} __aligned(PAGE_SIZE);
arch/x86/include/asm/processor.h
409
} __aligned(PAGE_SIZE);
arch/x86/include/asm/realmode.h
86
return ALIGN(real_mode_blob_end - real_mode_blob, PAGE_SIZE);
arch/x86/include/asm/sev.h
208
u8 payload[PAGE_SIZE - sizeof(struct snp_guest_msg_hdr)];
arch/x86/include/asm/sev.h
332
u8 svsm_buffer[PAGE_SIZE - 8];
arch/x86/include/asm/sev.h
595
val = bytes[page_idx * PAGE_SIZE];
arch/x86/include/asm/sev.h
596
val = bytes[page_idx * PAGE_SIZE + PAGE_SIZE - 1];
arch/x86/include/asm/shmparam.h
5
#define SHMLBA PAGE_SIZE /* attach addr a multiple of this */
arch/x86/include/asm/stacktrace.h
48
return get_stack_info_noinstr((void *)stack + PAGE_SIZE, current, info);
arch/x86/include/asm/svm.h
555
#define EXPECTED_GHCB_SIZE PAGE_SIZE
arch/x86/include/asm/tlbflush.h
323
flush_tlb_mm_range(vma->vm_mm, a, a + PAGE_SIZE, PAGE_SHIFT, false);
arch/x86/include/asm/uaccess_64.h
100
if (__builtin_constant_p(size <= PAGE_SIZE) && size <= PAGE_SIZE) {
arch/x86/include/asm/vdso/vsyscall.h
8
#define VDSO_VCLOCK_PAGES_START(_b) ((_b) + (__VDSO_PAGES - VDSO_NR_VCLOCK_PAGES) * PAGE_SIZE)
arch/x86/include/asm/xen/page.h
48
#define P2M_PER_PAGE (PAGE_SIZE / sizeof(unsigned long))
arch/x86/kernel/acpi/madt_wakeup.c
113
mend = mstart + PAGE_SIZE;
arch/x86/kernel/acpi/madt_wakeup.c
65
return memblock_alloc(PAGE_SIZE, PAGE_SIZE);
arch/x86/kernel/acpi/madt_wakeup.c
70
return memblock_free(pgt, PAGE_SIZE);
arch/x86/kernel/acpi/madt_wakeup.c
98
mend = mstart + PAGE_SIZE;
arch/x86/kernel/alternative.c
124
void *page __free(execmem) = execmem_alloc_rw(EXECMEM_MODULE_TEXT, PAGE_SIZE);
arch/x86/kernel/alternative.c
177
execmem_restore_rox(page, PAGE_SIZE);
arch/x86/kernel/alternative.c
2536
bool cross_page_boundary = offset_in_page(addr) + len > PAGE_SIZE;
arch/x86/kernel/alternative.c
2553
pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
arch/x86/kernel/alternative.c
2558
pages[1] = virt_to_page(addr + PAGE_SIZE);
arch/x86/kernel/alternative.c
2589
set_pte_at(text_poke_mm, text_poke_mm_addr + PAGE_SIZE, ptep + 1, pte);
arch/x86/kernel/alternative.c
2610
pte_clear(text_poke_mm, text_poke_mm_addr + PAGE_SIZE, ptep + 1);
arch/x86/kernel/alternative.c
2624
(cross_page_boundary ? 2 : 1) * PAGE_SIZE,
arch/x86/kernel/alternative.c
263
if (!its_page || (its_offset + size - 1) >= PAGE_SIZE) {
arch/x86/kernel/alternative.c
269
memset(its_page, INT3_INSN_OPCODE, PAGE_SIZE);
arch/x86/kernel/alternative.c
2695
s = min_t(size_t, PAGE_SIZE * 2 - offset_in_page(ptr), len - patched);
arch/x86/kernel/alternative.c
2745
s = min_t(size_t, PAGE_SIZE * 2 - offset_in_page(ptr), len - patched);
arch/x86/kernel/alternative.c
2780
#define TEXT_POKE_ARRAY_MAX (PAGE_SIZE / sizeof(struct smp_text_poke_loc))
arch/x86/kernel/amd_gart_64.c
200
unsigned long npages = iommu_num_pages(phys_mem, size, PAGE_SIZE);
arch/x86/kernel/amd_gart_64.c
219
phys_mem += PAGE_SIZE;
arch/x86/kernel/amd_gart_64.c
221
return iommu_bus_base + iommu_page*PAGE_SIZE + (phys_mem & ~PAGE_MASK);
arch/x86/kernel/amd_gart_64.c
267
npages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
arch/x86/kernel/amd_gart_64.c
342
sout->dma_address += iommu_page*PAGE_SIZE + s->offset;
arch/x86/kernel/amd_gart_64.c
349
pages = iommu_num_pages(s->offset, s->length, PAGE_SIZE);
arch/x86/kernel/amd_gart_64.c
352
addr += PAGE_SIZE;
arch/x86/kernel/amd_gart_64.c
415
(ps->offset + ps->length) % PAGE_SIZE) {
arch/x86/kernel/amd_gart_64.c
432
pages += iommu_num_pages(s->offset, s->length, PAGE_SIZE);
arch/x86/kernel/amd_gart_64.c
97
PAGE_SIZE) >> PAGE_SHIFT;
arch/x86/kernel/apic/apic.c
2664
lapic_resource.end = lapic_resource.start + PAGE_SIZE - 1;
arch/x86/kernel/apic/io_apic.c
2574
ioapic_phys = (unsigned long)memblock_alloc_or_panic(PAGE_SIZE,
arch/x86/kernel/apic/io_apic.c
2575
PAGE_SIZE);
arch/x86/kernel/apic/x2apic_savic.c
21
u8 regs[PAGE_SIZE];
arch/x86/kernel/apic/x2apic_savic.c
22
} __aligned(PAGE_SIZE);
arch/x86/kernel/apm_32.c
423
(unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
arch/x86/kernel/check.c
113
corruption_check_size = round_up(corruption_check_size, PAGE_SIZE);
arch/x86/kernel/check.c
117
start = clamp_t(phys_addr_t, round_up(start, PAGE_SIZE),
arch/x86/kernel/check.c
118
PAGE_SIZE, corruption_check_size);
arch/x86/kernel/check.c
119
end = clamp_t(phys_addr_t, round_down(end, PAGE_SIZE),
arch/x86/kernel/check.c
120
PAGE_SIZE, corruption_check_size);
arch/x86/kernel/cpu/microcode/intel.c
420
ss->chunk_size = min(PAGE_SIZE, ss->ucode_len - ss->offset);
arch/x86/kernel/cpu/mtrr/if.c
52
if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1)))
arch/x86/kernel/cpu/mtrr/if.c
71
if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1)))
arch/x86/kernel/cpu/mtrr/mtrr.c
326
if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) {
arch/x86/kernel/cpu/resctrl/pseudo_lock.c
189
for (i = 0; i < size; i += PAGE_SIZE) {
arch/x86/kernel/cpu/sgx/encl.c
1151
ret = apply_to_page_range(vma->vm_mm, addr, PAGE_SIZE,
arch/x86/kernel/cpu/sgx/encl.c
1223
zap_vma_ptes(vma, addr, PAGE_SIZE);
arch/x86/kernel/cpu/sgx/encl.c
125
shmem_truncate_range(inode, PFN_PHYS(page_index), PFN_PHYS(page_index) + PAGE_SIZE - 1);
arch/x86/kernel/cpu/sgx/encl.c
18
#define PCMDS_PER_PAGE (PAGE_SIZE / sizeof(struct sgx_pcmd))
arch/x86/kernel/cpu/sgx/encl.c
188
pcmd_page_empty = !memchr_inv(pcmd_page, 0, PAGE_SIZE);
arch/x86/kernel/cpu/sgx/encl.c
201
if (memchr_inv(pcmd_page, 0, PAGE_SIZE))
arch/x86/kernel/cpu/sgx/encl.c
74
addr = start_addr + i * PAGE_SIZE;
arch/x86/kernel/cpu/sgx/encl.c
991
backing->pcmd_offset = page_pcmd_off & (PAGE_SIZE - 1);
arch/x86/kernel/cpu/sgx/ioctl.c
1084
for (c = 0 ; c < params->length; c += PAGE_SIZE) {
arch/x86/kernel/cpu/sgx/ioctl.c
167
secs = kmalloc(PAGE_SIZE, GFP_KERNEL);
arch/x86/kernel/cpu/sgx/ioctl.c
171
if (copy_from_user(secs, (void __user *)create_arg.src, PAGE_SIZE))
arch/x86/kernel/cpu/sgx/ioctl.c
253
for (offset = 0; offset < PAGE_SIZE; offset += SGX_EEXTEND_BLOCK_SIZE) {
arch/x86/kernel/cpu/sgx/ioctl.c
361
if (!IS_ALIGNED(offset, PAGE_SIZE))
arch/x86/kernel/cpu/sgx/ioctl.c
364
if (!length || !IS_ALIGNED(length, PAGE_SIZE))
arch/x86/kernel/cpu/sgx/ioctl.c
370
if (offset + length - PAGE_SIZE >= encl->size)
arch/x86/kernel/cpu/sgx/ioctl.c
429
if (!IS_ALIGNED(add_arg.src, PAGE_SIZE))
arch/x86/kernel/cpu/sgx/ioctl.c
442
for (c = 0 ; c < add_arg.length; c += PAGE_SIZE) {
arch/x86/kernel/cpu/sgx/ioctl.c
592
sigstruct = kmalloc(PAGE_SIZE, GFP_KERNEL);
arch/x86/kernel/cpu/sgx/ioctl.c
596
token = (void *)((unsigned long)sigstruct + PAGE_SIZE / 2);
arch/x86/kernel/cpu/sgx/ioctl.c
729
for (c = 0 ; c < modp->length; c += PAGE_SIZE) {
arch/x86/kernel/cpu/sgx/ioctl.c
83
encl_size = secs->size + PAGE_SIZE;
arch/x86/kernel/cpu/sgx/ioctl.c
896
for (c = 0 ; c < modt->length; c += PAGE_SIZE) {
arch/x86/kernel/cpu/sgx/sgx.h
73
return section->phys_addr + index * PAGE_SIZE;
arch/x86/kernel/cpu/sgx/sgx.h
83
return section->virt_addr + index * PAGE_SIZE;
arch/x86/kernel/cpu/sgx/virt.c
369
if (WARN_ON_ONCE(!access_ok(secs, PAGE_SIZE)))
arch/x86/kernel/cpu/sgx/virt.c
400
!access_ok(secs, PAGE_SIZE)))
arch/x86/kernel/crash.c
393
if (ei.size < PAGE_SIZE)
arch/x86/kernel/crash_dump_64.c
25
vaddr = (__force void *)ioremap_encrypted(pfn << PAGE_SHIFT, PAGE_SIZE);
arch/x86/kernel/crash_dump_64.c
27
vaddr = (__force void *)ioremap_cache(pfn << PAGE_SHIFT, PAGE_SIZE);
arch/x86/kernel/devicetree.c
299
map_len = max(PAGE_SIZE - (initial_dtb & ~PAGE_MASK), (u64)128);
arch/x86/kernel/doublefault_32.c
29
BUILD_BUG_ON(sizeof(struct doublefault_stack) != PAGE_SIZE);
arch/x86/kernel/e820.c
1373
memblock_trim_memory(PAGE_SIZE);
arch/x86/kernel/early_printk.c
231
early_serial_base = (unsigned long)early_ioremap(membase, PAGE_SIZE);
arch/x86/kernel/espfix_64.c
42
#define ESPFIX_STACKS_PER_PAGE (PAGE_SIZE/ESPFIX_STACK_SIZE)
arch/x86/kernel/espfix_64.c
66
__aligned(PAGE_SIZE);
arch/x86/kernel/espfix_64.c
88
#define PTE_STRIDE (65536/PAGE_SIZE)
arch/x86/kernel/ftrace.c
354
npages = DIV_ROUND_UP(*tramp_size, PAGE_SIZE);
arch/x86/kernel/ftrace.c
449
npages = DIV_ROUND_UP(size, PAGE_SIZE);
arch/x86/kernel/head32.c
120
pte.pte += PAGE_SIZE;
arch/x86/kernel/irq_64.c
39
struct page *pages[IRQ_STACK_SIZE / PAGE_SIZE];
arch/x86/kernel/irq_64.c
43
for (i = 0; i < IRQ_STACK_SIZE / PAGE_SIZE; i++) {
arch/x86/kernel/irq_64.c
49
va = vmap(pages, IRQ_STACK_SIZE / PAGE_SIZE, VM_MAP, PAGE_KERNEL);
arch/x86/kernel/kexec-bzimage64.c
293
kho->fdt_size = PAGE_SIZE;
arch/x86/kernel/kexec-bzimage64.c
635
kbuf.buf_align = PAGE_SIZE;
arch/x86/kernel/kvm.c
994
nr_pages = DIV_ROUND_UP(entry->size, PAGE_SIZE);
arch/x86/kernel/kvmclock.c
239
memset(hvclock_mem, 0, PAGE_SIZE << order);
arch/x86/kernel/kvmclock.c
48
(PAGE_SIZE / sizeof(struct pvclock_vsyscall_time_info))
arch/x86/kernel/kvmclock.c
51
hv_clock_boot[HVC_BOOT_ARRAY_SIZE] __bss_decrypted __aligned(PAGE_SIZE);
arch/x86/kernel/ldt.c
170
if (alloc_size > PAGE_SIZE)
arch/x86/kernel/ldt.c
309
nr_pages = DIV_ROUND_UP(ldt->nr_entries * LDT_ENTRY_SIZE, PAGE_SIZE);
arch/x86/kernel/ldt.c
361
nr_pages = DIV_ROUND_UP(ldt->nr_entries * LDT_ENTRY_SIZE, PAGE_SIZE);
arch/x86/kernel/ldt.c
377
flush_tlb_mm_range(mm, va, va + nr_pages * PAGE_SIZE, PAGE_SHIFT, false);
arch/x86/kernel/ldt.c
440
if (ldt->nr_entries * LDT_ENTRY_SIZE > PAGE_SIZE)
arch/x86/kernel/machine_kexec_64.c
672
nr_pages = (PAGE_ALIGN(end_paddr) - PAGE_ALIGN_DOWN(start_paddr))/PAGE_SIZE;
arch/x86/kernel/machine_kexec_64.c
88
mend = (kexec_debug_8250_mmio32 + PAGE_SIZE + 23) & PAGE_MASK;
arch/x86/kernel/mpparse.c
417
mpc = early_memremap(physptr, PAGE_SIZE);
arch/x86/kernel/mpparse.c
419
early_memunmap(mpc, PAGE_SIZE);
arch/x86/kernel/setup.c
327
u64 relocated_ramdisk = memblock_phys_alloc_range(area_size, PAGE_SIZE, 0,
arch/x86/kernel/setup.c
746
if (memblock_reserve(bad_pages[i], PAGE_SIZE))
arch/x86/kernel/setup.c
763
e820__range_update(0, PAGE_SIZE, E820_TYPE_RAM, E820_TYPE_RESERVED);
arch/x86/kernel/setup_percpu.c
146
atom_size = PAGE_SIZE;
arch/x86/kernel/sys_x86_64.c
121
return PAGE_SIZE;
arch/x86/kernel/sys_x86_64.c
205
info.low_limit = PAGE_SIZE;
arch/x86/kernel/tboot.c
159
for (; nr > 0; nr--, vaddr += PAGE_SIZE, start_pfn++) {
arch/x86/kernel/tboot.c
191
mr->start = round_down(start, PAGE_SIZE);
arch/x86/kernel/tboot.c
192
mr->size = round_up(end, PAGE_SIZE) - mr->start;
arch/x86/kernel/tboot.c
488
PAGE_SIZE);
arch/x86/kernel/traps.c
786
if (*addr < PAGE_SIZE)
arch/x86/kernel/uprobes.c
1170
return PAGE_SIZE - (vaddr & ~PAGE_MASK) >= 5;
arch/x86/kernel/uprobes.c
1844
vaddr = get_unmapped_area(NULL, TASK_SIZE - PAGE_SIZE, PAGE_SIZE, 0, 0);
arch/x86/kernel/uprobes.c
649
.length = PAGE_SIZE,
arch/x86/kernel/uprobes.c
657
low_limit = PAGE_SIZE;
arch/x86/kernel/uprobes.c
667
info.low_limit = max(low_limit, PAGE_SIZE);
arch/x86/kernel/uprobes.c
704
vma = _install_special_mapping(mm, tramp->vaddr, PAGE_SIZE,
arch/x86/kernel/uprobes.c
719
if (vaddr > TASK_SIZE || vaddr < PAGE_SIZE)
arch/x86/kernel/uprobes.c
884
".balign " __stringify(PAGE_SIZE) "\n"
arch/x86/kernel/uprobes.c
896
".balign " __stringify(PAGE_SIZE) "\n"
arch/x86/kvm/cpuid.h
90
return kvm_vcpu_is_legal_aligned_gpa(vcpu, gpa, PAGE_SIZE);
arch/x86/kvm/emulate.c
1375
PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI));
arch/x86/kvm/emulate.c
867
size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear));
arch/x86/kvm/hyperv.c
1990
if (is_noncanonical_invlpg_address(gva + j * PAGE_SIZE, vcpu))
arch/x86/kvm/hyperv.c
1993
kvm_x86_call(flush_tlb_gva)(vcpu, gva + j * PAGE_SIZE);
arch/x86/kvm/hyperv.c
2423
offset_in_page(gpa) + sizeof(hc->ingpa) > PAGE_SIZE)
arch/x86/kvm/hyperv.c
288
data & PAGE_MASK, PAGE_SIZE)) {
arch/x86/kvm/hyperv.c
300
data & PAGE_MASK, PAGE_SIZE)) {
arch/x86/kvm/kvm_onhyperv.h
26
*p_hv_pa_pg = kzalloc(PAGE_SIZE, GFP_KERNEL_ACCOUNT);
arch/x86/kvm/lapic.c
2862
APIC_DEFAULT_PHYS_BASE, PAGE_SIZE);
arch/x86/kvm/mmu/mmu_internal.h
336
PAGE_SIZE, fault->write, fault->exec,
arch/x86/kvm/mmu/spte.h
40
#define SPTE_BASE_ADDR_MASK (physical_mask & ~(u64)(PAGE_SIZE-1))
arch/x86/kvm/mmu/spte.h
42
#define SPTE_BASE_ADDR_MASK (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1))
arch/x86/kvm/svm/avic.c
1016
PAGE_SIZE << avic_get_physical_id_table_order(vcpu->kvm)))
arch/x86/kvm/svm/avic.c
1079
PAGE_SIZE << avic_get_physical_id_table_order(vcpu->kvm)))
arch/x86/kvm/svm/avic.c
401
BUILD_BUG_ON((AVIC_MAX_PHYSICAL_ID + 1) * sizeof(new_entry) > PAGE_SIZE ||
arch/x86/kvm/svm/avic.c
402
(X2AVIC_MAX_PHYSICAL_ID + 1) * sizeof(new_entry) > PAGE_SIZE);
arch/x86/kvm/svm/sev.c
1318
src_p = sev_pin_memory(kvm, vaddr & PAGE_MASK, PAGE_SIZE, &n, 0);
arch/x86/kvm/svm/sev.c
1322
dst_p = sev_pin_memory(kvm, dst_vaddr & PAGE_MASK, PAGE_SIZE, &n, FOLL_WRITE);
arch/x86/kvm/svm/sev.c
1342
len = min_t(size_t, (PAGE_SIZE - s_off), size);
arch/x86/kvm/svm/sev.c
1408
offset = params.guest_uaddr & (PAGE_SIZE - 1);
arch/x86/kvm/svm/sev.c
1662
offset = params.guest_uaddr & (PAGE_SIZE - 1);
arch/x86/kvm/svm/sev.c
1663
if (params.guest_len > PAGE_SIZE || (params.guest_len + offset) > PAGE_SIZE)
arch/x86/kvm/svm/sev.c
1668
PAGE_SIZE, &n, 0);
arch/x86/kvm/svm/sev.c
1839
offset = params.guest_uaddr & (PAGE_SIZE - 1);
arch/x86/kvm/svm/sev.c
1840
if (params.guest_len > PAGE_SIZE || (params.guest_len + offset) > PAGE_SIZE)
arch/x86/kvm/svm/sev.c
1861
PAGE_SIZE, &n, FOLL_WRITE);
arch/x86/kvm/svm/sev.c
2305
memcpy(dst_vaddr, src_vaddr, PAGE_SIZE);
arch/x86/kvm/svm/sev.c
2341
memcpy(src_vaddr, dst_vaddr, PAGE_SIZE);
arch/x86/kvm/svm/sev.c
2386
npages = params.len / PAGE_SIZE;
arch/x86/kvm/svm/sev.c
2427
params.len -= count * PAGE_SIZE;
arch/x86/kvm/svm/sev.c
2429
params.uaddr += count * PAGE_SIZE;
arch/x86/kvm/svm/sev.c
3223
clflush_cache_range(va, PAGE_SIZE);
arch/x86/kvm/svm/sev.c
3601
#define GHCB_SCRATCH_AREA_LIMIT (16ULL * PAGE_SIZE)
arch/x86/kvm/svm/sev.c
4121
if (kvm_read_guest(kvm, req_gpa, sev->guest_req_buf, PAGE_SIZE)) {
arch/x86/kvm/svm/sev.c
4139
if (kvm_write_guest(kvm, resp_gpa, sev->guest_resp_buf, PAGE_SIZE)) {
arch/x86/kvm/svm/sev.c
5090
use_2m_update ? PMD_SIZE : PAGE_SIZE);
arch/x86/kvm/svm/sev.c
5182
dbg.len = PAGE_SIZE;
arch/x86/kvm/svm/sev.c
710
if (size > PAGE_SIZE)
arch/x86/kvm/svm/sev.c
758
clflush_cache_range(page_virtual, PAGE_SIZE);
arch/x86/kvm/svm/sev.c
797
if ((paddr + PAGE_SIZE) == next_paddr) {
arch/x86/kvm/svm/sev.c
847
offset = vaddr & (PAGE_SIZE - 1);
arch/x86/kvm/svm/sev.c
852
len = min_t(size_t, ((pages * PAGE_SIZE) - offset), size);
arch/x86/kvm/svm/sev.c
992
clflush_cache_range(svm->sev_es.vmsa, PAGE_SIZE);
arch/x86/kvm/svm/sev.c
997
vmsa.len = PAGE_SIZE;
arch/x86/kvm/svm/svm.c
705
memset(pm, 0xff, PAGE_SIZE * (1 << order));
arch/x86/kvm/svm/svm.h
44
#define IOPM_SIZE PAGE_SIZE * 3
arch/x86/kvm/svm/svm.h
45
#define MSRPM_SIZE PAGE_SIZE * 2
arch/x86/kvm/vmx/nested.c
76
memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE);
arch/x86/kvm/vmx/nested.c
77
memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE);
arch/x86/kvm/vmx/sgx.c
282
if (sgx_read_hva(vcpu, contents_hva, (void *)contents, PAGE_SIZE)) {
arch/x86/kvm/vmx/tdx.c
1234
tdx->vcpu.run->hypercall.args[1] = size / PAGE_SIZE;
arch/x86/kvm/vmx/tdx.c
2406
kvm_tdx->td.tdcs_nr_pages = tdx_sysinfo->td_ctrl.tdcs_base_size / PAGE_SIZE;
arch/x86/kvm/vmx/tdx.c
2408
kvm_tdx->td.tdcx_nr_pages = tdx_sysinfo->td_ctrl.tdvps_base_size / PAGE_SIZE - 1;
arch/x86/kvm/vmx/tdx.c
3148
for (i = 0; i < PAGE_SIZE; i += TDX_EXTENDMR_CHUNKSIZE) {
arch/x86/kvm/vmx/tdx.c
3211
region.source_addr += PAGE_SIZE;
arch/x86/kvm/vmx/tdx.c
3212
region.gpa += PAGE_SIZE;
arch/x86/kvm/vmx/vmx.c
2885
if (vmx_basic_vmcs_size(basic_msr) > PAGE_SIZE)
arch/x86/kvm/vmx/vmx.c
289
memset(vmx_l1d_flush_pages + i * PAGE_SIZE, i + 1,
arch/x86/kvm/vmx/vmx.c
290
PAGE_SIZE);
arch/x86/kvm/vmx/vmx.c
3117
memset(loaded_vmcs->msr_bitmap, 0xff, PAGE_SIZE);
arch/x86/kvm/vmx/vmx.c
390
int size = PAGE_SIZE << L1D_CACHE_ORDER;
arch/x86/kvm/vmx/vmx.c
4072
if (__copy_to_user(ua + PAGE_SIZE * i, zero_page, PAGE_SIZE))
arch/x86/kvm/vmx/vmx.c
4106
PAGE_SIZE);
arch/x86/kvm/vmx/vmx.c
4113
for (i = 0; i < (PAGE_SIZE / sizeof(tmp)); i++) {
arch/x86/kvm/vmx/vmx.c
5331
PAGE_SIZE * 3);
arch/x86/kvm/vmx/vmx.c
6533
WARN_ON(gpa & (PAGE_SIZE - 1));
arch/x86/kvm/vmx/vmx.c
7845
BUILD_BUG_ON(sizeof(*vmx->ve_info) > PAGE_SIZE);
arch/x86/kvm/x86.c
13361
vm_munmap(hva, old_npages * PAGE_SIZE);
arch/x86/kvm/x86.c
14388
min_t(unsigned int, PAGE_SIZE / size, vcpu->arch.sev_pio_count);
arch/x86/kvm/x86.c
14426
min_t(unsigned int, PAGE_SIZE / size, vcpu->arch.sev_pio_count);
arch/x86/kvm/x86.c
6677
if (addr > (unsigned int)(-3 * PAGE_SIZE))
arch/x86/kvm/x86.c
7880
unsigned offset = addr & (PAGE_SIZE-1);
arch/x86/kvm/x86.c
7881
unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset);
arch/x86/kvm/x86.c
7918
offset = addr & (PAGE_SIZE-1);
arch/x86/kvm/x86.c
7919
if (WARN_ON(offset + bytes > PAGE_SIZE))
arch/x86/kvm/x86.c
7920
bytes = (unsigned)PAGE_SIZE - offset;
arch/x86/kvm/x86.c
7972
unsigned offset = addr & (PAGE_SIZE-1);
arch/x86/kvm/x86.c
7973
unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset);
arch/x86/kvm/x86.c
8085
(gva & (PAGE_SIZE - 1));
arch/x86/kvm/x86.c
8438
vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
arch/x86/kvm/xen.c
1326
for (i = 0; i < PAGE_SIZE / sizeof(instructions); i++) {
arch/x86/kvm/xen.c
1348
blob_addr += page_num * PAGE_SIZE;
arch/x86/kvm/xen.c
1350
page = memdup_user((u8 __user *)blob_addr, PAGE_SIZE);
arch/x86/kvm/xen.c
1354
ret = kvm_vcpu_write_guest(vcpu, page_addr, page, PAGE_SIZE);
arch/x86/kvm/xen.c
1445
if (!kvm_gpc_check(gpc, PAGE_SIZE))
arch/x86/kvm/xen.c
1820
if (!kvm_gpc_check(gpc, PAGE_SIZE))
arch/x86/kvm/xen.c
1942
rc = kvm_gpc_refresh(gpc, PAGE_SIZE);
arch/x86/kvm/xen.c
409
if ((gpc1->gpa & ~PAGE_MASK) + user_len >= PAGE_SIZE) {
arch/x86/kvm/xen.c
410
user_len1 = PAGE_SIZE - (gpc1->gpa & ~PAGE_MASK);
arch/x86/kvm/xen.c
49
while (!kvm_gpc_check(gpc, PAGE_SIZE)) {
arch/x86/kvm/xen.c
52
ret = kvm_gpc_refresh(gpc, PAGE_SIZE);
arch/x86/kvm/xen.c
787
gfn_to_gpa(gfn), PAGE_SIZE);
arch/x86/kvm/xen.c
799
(unsigned long)hva, PAGE_SIZE);
arch/x86/kvm/xen.c
987
sz1 = PAGE_SIZE - (data->u.gpa & ~PAGE_MASK);
arch/x86/mm/cpu_entry_area.c
101
for ( ; pages; pages--, cea_vaddr+= PAGE_SIZE, ptr += PAGE_SIZE)
arch/x86/mm/cpu_entry_area.c
115
npages = sizeof(struct debug_store) / PAGE_SIZE;
arch/x86/mm/cpu_entry_area.c
116
BUILD_BUG_ON(sizeof(struct debug_store) % PAGE_SIZE != 0);
arch/x86/mm/cpu_entry_area.c
125
npages = sizeof(struct debug_store_buffers) / PAGE_SIZE;
arch/x86/mm/cpu_entry_area.c
126
for (; npages; npages--, cea += PAGE_SIZE)
arch/x86/mm/cpu_entry_area.c
134
npages = sizeof(estacks->name## _stack) / PAGE_SIZE; \
arch/x86/mm/cpu_entry_area.c
145
BUILD_BUG_ON(sizeof(exception_stacks) % PAGE_SIZE != 0);
arch/x86/mm/cpu_entry_area.c
224
BUILD_BUG_ON(sizeof(struct tss_struct) % PAGE_SIZE != 0);
arch/x86/mm/cpu_entry_area.c
234
sizeof(struct tss_struct) / PAGE_SIZE, tss_prot);
arch/x86/mm/cpu_entry_area.c
251
BUILD_BUG_ON((CPU_ENTRY_AREA_PAGES+1)*PAGE_SIZE != CPU_ENTRY_AREA_MAP_SIZE);
arch/x86/mm/cpu_entry_area.c
39
max_cea = (CPU_ENTRY_AREA_MAP_SIZE - PAGE_SIZE) / CPU_ENTRY_AREA_SIZE;
arch/x86/mm/cpu_entry_area.c
73
BUILD_BUG_ON(sizeof(struct cpu_entry_area) % PAGE_SIZE != 0);
arch/x86/mm/dump_pagetables.c
152
#define PTE_LEVEL_MULT (PAGE_SIZE)
arch/x86/mm/dump_pagetables.c
231
npages = (addr - st->start_address) / PAGE_SIZE;
arch/x86/mm/extable.c
150
fault_address -= PAGE_SIZE;
arch/x86/mm/fault.c
535
if (address < PAGE_SIZE && !user_mode(regs))
arch/x86/mm/init.c
1073
offset = get_random_u32_inclusive(1, 1024) * PAGE_SIZE;
arch/x86/mm/init.c
143
PAGE_SIZE * num, PAGE_SIZE,
arch/x86/mm/init.c
148
ret = __pa(extend_brk(PAGE_SIZE * num, PAGE_SIZE));
arch/x86/mm/init.c
185
#define INIT_PGT_BUF_SIZE (INIT_PGD_PAGE_COUNT * PAGE_SIZE)
arch/x86/mm/init.c
192
base = __pa(extend_brk(tables, PAGE_SIZE));
arch/x86/mm/init.c
840
(TASK_SIZE - TASK_UNMAPPED_BASE - 3 * PAGE_SIZE);
arch/x86/mm/init.c
842
if (((text_poke_mm_addr + PAGE_SIZE) & ~PMD_MASK) == 0)
arch/x86/mm/init.c
843
text_poke_mm_addr += PAGE_SIZE;
arch/x86/mm/init.c
869
if (region_intersects(PFN_PHYS(pagenr), PAGE_SIZE,
arch/x86/mm/init_32.c
179
*adr = (void *)(((unsigned long)(*adr)) + PAGE_SIZE);
arch/x86/mm/init_32.c
306
unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
arch/x86/mm/init_32.c
324
addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
arch/x86/mm/init_32.c
325
PAGE_OFFSET + PAGE_SIZE-1;
arch/x86/mm/init_32.c
345
pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
arch/x86/mm/init_32.c
359
last_map_addr = (pfn << PAGE_SHIFT) + PAGE_SIZE;
arch/x86/mm/init_32.c
393
page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
arch/x86/mm/init_32.c
551
if (max_low_pfn - highmem_pages < 64*1024*1024/PAGE_SIZE) {
arch/x86/mm/init_32.c
623
high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1;
arch/x86/mm/init_32.c
625
high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1;
arch/x86/mm/init_32.c
701
#define __FIXADDR_TOP (-PAGE_SIZE)
arch/x86/mm/init_32.c
703
BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE > FIXADDR_START);
arch/x86/mm/init_32.c
712
BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE > FIXADDR_START);
arch/x86/mm/init_64.c
1035
vmem_altmap_free(altmap, PMD_SIZE / PAGE_SIZE);
arch/x86/mm/init_64.c
1104
next = (addr + PAGE_SIZE) & PAGE_MASK;
arch/x86/mm/init_64.c
1385
kclist_add(&kcore_vsyscall, (void *)VSYSCALL_ADDR, PAGE_SIZE, KCORE_USER);
arch/x86/mm/init_64.c
1600
next = (addr + PAGE_SIZE) & PAGE_MASK;
arch/x86/mm/init_64.c
1607
next = (addr + PAGE_SIZE) & PAGE_MASK;
arch/x86/mm/init_64.c
1614
next = (addr + PAGE_SIZE) & PAGE_MASK;
arch/x86/mm/init_64.c
1621
next = (addr + PAGE_SIZE) & PAGE_MASK;
arch/x86/mm/init_64.c
1626
next = (addr + PAGE_SIZE) & PAGE_MASK;
arch/x86/mm/init_64.c
255
ptr = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
arch/x86/mm/init_64.c
484
paddr_next = (paddr & PAGE_MASK) + PAGE_SIZE;
arch/x86/mm/init_64.c
509
paddr_last = (paddr & PAGE_MASK) + PAGE_SIZE;
arch/x86/mm/init_64.c
960
high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) + 1;
arch/x86/mm/ioremap.c
524
vaddr = memremap(start, PAGE_SIZE, MEMREMAP_WB);
arch/x86/mm/ioremap.c
79
start_pfn = (res->start + PAGE_SIZE - 1) >> PAGE_SHIFT;
arch/x86/mm/ioremap.c
824
static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
arch/x86/mm/ioremap.c
845
return ptep >= &bm_pte[0] && ptep < &bm_pte[PAGE_SIZE/sizeof(pte_t)];
arch/x86/mm/ioremap.c
853
BUILD_BUG_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1));
arch/x86/mm/ioremap.c
855
WARN_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1));
arch/x86/mm/ioremap.c
868
#define __FIXADDR_TOP (-PAGE_SIZE)
arch/x86/mm/kasan_init_64.c
109
void *p = early_alloc(PAGE_SIZE, nid, true);
arch/x86/mm/kasan_init_64.c
130
p = early_alloc(PAGE_SIZE, nid, true);
arch/x86/mm/kasan_init_64.c
148
end = round_up(end, PAGE_SIZE);
arch/x86/mm/kasan_init_64.c
23
static p4d_t tmp_p4d_table[MAX_PTRS_PER_P4D] __initdata __aligned(PAGE_SIZE);
arch/x86/mm/kasan_init_64.c
257
p = early_alloc(PAGE_SIZE, NUMA_NO_NODE, true);
arch/x86/mm/kasan_init_64.c
275
p = early_alloc(PAGE_SIZE, NUMA_NO_NODE, true);
arch/x86/mm/kasan_init_64.c
322
return round_down(shadow, PAGE_SIZE);
arch/x86/mm/kasan_init_64.c
329
return round_up(shadow, PAGE_SIZE);
arch/x86/mm/kasan_init_64.c
439
memset(kasan_early_shadow_page, 0, PAGE_SIZE);
arch/x86/mm/kasan_init_64.c
54
p = early_alloc(PAGE_SIZE, nid, true);
arch/x86/mm/kasan_init_64.c
66
p = early_alloc(PAGE_SIZE, nid, true);
arch/x86/mm/kasan_init_64.c
69
} while (pte++, addr += PAGE_SIZE, addr != end);
arch/x86/mm/kasan_init_64.c
90
p = early_alloc(PAGE_SIZE, nid, true);
arch/x86/mm/maccess.c
17
if (vaddr < TASK_SIZE_MAX + PAGE_SIZE)
arch/x86/mm/mem_encrypt_amd.c
414
vaddr_next = (vaddr & PAGE_MASK) + PAGE_SIZE;
arch/x86/mm/mem_encrypt_amd.c
50
static char sme_early_buffer[PAGE_SIZE] __initdata __aligned(PAGE_SIZE);
arch/x86/mm/pat/cpa-test.c
101
expected = (s->gpg*GPS + s->lpg*LPS)/PAGE_SIZE + s->spg + missed;
arch/x86/mm/pat/cpa-test.c
152
pte = lookup_address(addr[i] + k*PAGE_SIZE, &level);
arch/x86/mm/pat/cpa-test.c
172
addrs[k] = addr[i] + k*PAGE_SIZE;
arch/x86/mm/pat/cpa-test.c
71
i += GPS/PAGE_SIZE;
arch/x86/mm/pat/cpa-test.c
80
i += LPS/PAGE_SIZE;
arch/x86/mm/pat/memtype.c
404
unsigned long end_pfn = (end + PAGE_SIZE - 1) >> PAGE_SHIFT;
arch/x86/mm/pat/memtype.c
660
if (x86_platform.is_untracked_pat_range(paddr, paddr + PAGE_SIZE))
arch/x86/mm/pat/memtype.c
663
if (pat_pagerange_is_ram(paddr, paddr + PAGE_SIZE)) {
arch/x86/mm/pat/memtype.c
919
while (size > PAGE_SIZE) {
arch/x86/mm/pat/memtype.c
920
size -= PAGE_SIZE;
arch/x86/mm/pat/memtype.c
921
paddr += PAGE_SIZE;
arch/x86/mm/pat/memtype_interval.c
121
return interval_iter_first(&memtype_rbroot, addr, addr + PAGE_SIZE-1);
arch/x86/mm/pat/set_memory.c
1112
if (size == PAGE_SIZE)
arch/x86/mm/pat/set_memory.c
1154
lpinc = PAGE_SIZE;
arch/x86/mm/pat/set_memory.c
1434
start += PAGE_SIZE;
arch/x86/mm/pat/set_memory.c
1573
start += PAGE_SIZE;
arch/x86/mm/pat/set_memory.c
2168
ret = memtype_reserve(__pa(addr), __pa(addr) + numpages * PAGE_SIZE,
arch/x86/mm/pat/set_memory.c
2180
memtype_free(__pa(addr), __pa(addr) + numpages * PAGE_SIZE);
arch/x86/mm/pat/set_memory.c
2206
ret = memtype_reserve(__pa(addr), __pa(addr) + numpages * PAGE_SIZE,
arch/x86/mm/pat/set_memory.c
2213
memtype_free(__pa(addr), __pa(addr) + numpages * PAGE_SIZE);
arch/x86/mm/pat/set_memory.c
2240
memtype_free(__pa(addr), __pa(addr) + numpages * PAGE_SIZE);
arch/x86/mm/pat/set_memory.c
2499
end = start + PAGE_SIZE;
arch/x86/mm/pat/set_memory.c
2525
end = start + PAGE_SIZE;
arch/x86/mm/pat/set_memory.c
2568
end = start + PAGE_SIZE;
arch/x86/mm/pat/set_memory.c
2653
numpages * PAGE_SIZE);
arch/x86/mm/pat/set_memory.c
320
return *cpa->vaddr + idx * PAGE_SIZE;
arch/x86/mm/pat/set_memory.c
419
end = addr + PAGE_SIZE * cpa->numpages;
arch/x86/mm/pat/set_memory.c
449
end = start + cpa->numpages * PAGE_SIZE;
arch/x86/mm/pat/set_memory.c
469
clflush_cache_range_opt((void *)fix_addr(addr), PAGE_SIZE);
arch/x86/mm/pat/set_memory.c
631
end = start + npg * PAGE_SIZE - 1;
arch/x86/mm/pat/set_memory.c
643
if (lpsize != (npg * PAGE_SIZE) || (start & (lpsize - 1))) {
arch/x86/mm/pat/set_memory.c
692
end = start + npg * PAGE_SIZE - 1;
arch/x86/mm/pgtable.c
556
__FIXADDR_TOP = round_down(-reserve, 1 << PMD_SHIFT) - PAGE_SIZE;
arch/x86/mm/pgtable.c
558
-reserve, __FIXADDR_TOP + PAGE_SIZE);
arch/x86/mm/pgtable.c
749
flush_tlb_kernel_range(addr, addr + PAGE_SIZE-1);
arch/x86/mm/pgtable.c
781
flush_tlb_kernel_range(addr, addr + PAGE_SIZE-1);
arch/x86/mm/pti.c
396
addr = round_up(addr + 1, PAGE_SIZE);
arch/x86/mm/pti.c
416
addr = round_up(addr + 1, PAGE_SIZE);
arch/x86/mm/pti.c
488
end = start + (PAGE_SIZE * CPU_ENTRY_AREA_PAGES);
arch/x86/mm/testmmiotrace.c
107
p = ioremap(mmio_address, PAGE_SIZE);
arch/x86/mm/tlb.c
1529
for (addr = f->start; addr < f->end; addr += PAGE_SIZE)
arch/x86/net/bpf_jit_comp.c
2255
u64 limit = TASK_SIZE_MAX + PAGE_SIZE - VSYSCALL_ADDR;
arch/x86/net/bpf_jit_comp.c
3577
image = bpf_jit_alloc_exec(PAGE_SIZE);
arch/x86/net/bpf_jit_comp.c
3581
ret = __arch_prepare_bpf_trampoline(&im, image, image + PAGE_SIZE, image,
arch/x86/pci/pcbios.c
379
opt.size = PAGE_SIZE;
arch/x86/platform/efi/efi.c
552
memcpy(ret, old_memmap, PAGE_SIZE << old_shift);
arch/x86/platform/efi/efi.c
699
left += PAGE_SIZE << *pg_shift;
arch/x86/platform/efi/efi.c
754
num_pages = ALIGN(efi.memmap.nr_map * efi.memmap.desc_size, PAGE_SIZE);
arch/x86/platform/efi/runtime-map.c
35
return snprintf(buf, PAGE_SIZE, "0x%x\n", entry->md.type);
arch/x86/platform/efi/runtime-map.c
43
return snprintf(buf, PAGE_SIZE, "0x%llx\n", EFI_RUNTIME_FIELD(name)); \
arch/x86/platform/olpc/olpc_dt.c
131
const size_t chunk_size = max(PAGE_SIZE, size);
arch/x86/platform/uv/uv_time.c
283
offset = (uv_blade_processor_id() * L1_CACHE_BYTES) % PAGE_SIZE;
arch/x86/power/hibernate.c
166
__memcpy((void *)relocated_restore_code, core_restore_code, PAGE_SIZE);
arch/x86/realmode/init.c
58
mem = memblock_phys_alloc_range(size, PAGE_SIZE, 0, 1<<20);
arch/x86/um/vdso/vma.c
21
BUG_ON(vdso_end - vdso_start > PAGE_SIZE);
arch/x86/um/vdso/vma.c
23
um_vdso_addr = task_size - PAGE_SIZE;
arch/x86/um/vdso/vma.c
47
vma = _install_special_mapping(mm, um_vdso_addr, PAGE_SIZE,
arch/x86/virt/svm/sev.c
950
npages = page_level_size(rmp_level) / PAGE_SIZE;
arch/x86/virt/vmx/tdx/tdx.c
1516
clflush_cache_range(page_to_virt(page), PAGE_SIZE);
arch/x86/virt/vmx/tdx/tdx.c
470
pamt_sz = ALIGN(pamt_sz, PAGE_SIZE);
arch/x86/virt/vmx/tdx/tdx.c
664
tdx_quirk_reset_paddr(page_to_phys(page), PAGE_SIZE);
arch/x86/xen/enlighten_hvm.c
61
for (pa = PAGE_SIZE;
arch/x86/xen/enlighten_hvm.c
62
!e820__mapped_all(pa, pa + PAGE_SIZE, E820_TYPE_RAM) ||
arch/x86/xen/enlighten_hvm.c
64
pa += PAGE_SIZE)
arch/x86/xen/enlighten_hvm.c
69
memblock_reserve(pa, PAGE_SIZE);
arch/x86/xen/enlighten_hvm.c
70
HYPERVISOR_shared_info = early_memremap(pa, PAGE_SIZE);
arch/x86/xen/enlighten_hvm.c
75
early_memunmap(HYPERVISOR_shared_info, PAGE_SIZE);
arch/x86/xen/enlighten_pv.c
506
const unsigned entries_per_page = PAGE_SIZE / LDT_ENTRY_SIZE;
arch/x86/xen/enlighten_pv.c
526
const unsigned entries_per_page = PAGE_SIZE / LDT_ENTRY_SIZE;
arch/x86/xen/enlighten_pv.c
560
BUG_ON(size > PAGE_SIZE);
arch/x86/xen/enlighten_pv.c
595
BUG_ON(size > PAGE_SIZE);
arch/x86/xen/grant-table.c
107
area->area = get_vm_area(PAGE_SIZE * nr_frames, VM_IOREMAP);
arch/x86/xen/grant-table.c
111
PAGE_SIZE * nr_frames, gnttab_apply, area))
arch/x86/xen/grant-table.c
47
addr += PAGE_SIZE;
arch/x86/xen/grant-table.c
69
addr += PAGE_SIZE;
arch/x86/xen/grant-table.c
90
addr += PAGE_SIZE;
arch/x86/xen/mmu_pv.c
1095
for (; vaddr < vaddr_end; vaddr += PAGE_SIZE)
arch/x86/xen/mmu_pv.c
1109
xen_free_ro_pages(pa, PAGE_SIZE);
arch/x86/xen/mmu_pv.c
1129
xen_free_ro_pages(pa, PAGE_SIZE);
arch/x86/xen/mmu_pv.c
1246
size = xen_start_info->nr_pt_frames * PAGE_SIZE;
arch/x86/xen/mmu_pv.c
131
BUILD_BUG_ON(sizeof(discontig_frames_early) != PAGE_SIZE);
arch/x86/xen/mmu_pv.c
1354
(info->end - info->start) <= PAGE_SIZE) {
arch/x86/xen/mmu_pv.c
1883
xen_pt_size = (pt_end - pt_base) * PAGE_SIZE;
arch/x86/xen/mmu_pv.c
1965
n_pte = roundup(size, PAGE_SIZE) >> PAGE_SHIFT;
arch/x86/xen/mmu_pv.c
1993
pud = early_memremap(pud_phys, PAGE_SIZE);
arch/x86/xen/mmu_pv.c
1997
pmd = early_memremap(pmd_phys, PAGE_SIZE);
arch/x86/xen/mmu_pv.c
2001
pt = early_memremap(pt_phys, PAGE_SIZE);
arch/x86/xen/mmu_pv.c
2011
early_memunmap(pt, PAGE_SIZE);
arch/x86/xen/mmu_pv.c
2016
pt_phys += PAGE_SIZE;
arch/x86/xen/mmu_pv.c
2019
early_memunmap(pmd, PAGE_SIZE);
arch/x86/xen/mmu_pv.c
2024
pmd_phys += PAGE_SIZE;
arch/x86/xen/mmu_pv.c
2027
early_memunmap(pud, PAGE_SIZE);
arch/x86/xen/mmu_pv.c
2031
pud_phys += PAGE_SIZE;
arch/x86/xen/mmu_pv.c
2053
memblock_phys_free(PFN_PHYS(pfn), PAGE_SIZE * (pfn_end - pfn));
arch/x86/xen/mmu_pv.c
2072
memblock_reserve(__pa(xen_start_info), PAGE_SIZE);
arch/x86/xen/mmu_pv.c
2075
memblock_reserve(paddr, PAGE_SIZE);
arch/x86/xen/mmu_pv.c
2079
memblock_reserve(paddr, PAGE_SIZE);
arch/x86/xen/mmu_pv.c
2088
static unsigned char dummy_mapping[PAGE_SIZE] __page_aligned_bss;
arch/x86/xen/mmu_pv.c
2233
memset(dummy_mapping, 0xff, PAGE_SIZE);
arch/x86/xen/mmu_pv.c
2245
for (i = 0; i < (1UL<<order); i++, vaddr += PAGE_SIZE) {
arch/x86/xen/mmu_pv.c
2275
for (i = 0; i < limit; i++, vaddr += PAGE_SIZE) {
arch/x86/xen/mmu_pv.c
2365
memset((void *) vstart, 0, PAGE_SIZE << order);
arch/x86/xen/mmu_pv.c
2403
memset((void *) vstart, 0, PAGE_SIZE << order);
arch/x86/xen/p2m.c
181
return memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE);
arch/x86/xen/p2m.c
190
memblock_free(p, PAGE_SIZE);
arch/x86/xen/p2m.c
242
ptep = (pte_t *)((unsigned long)ptep & ~(PAGE_SIZE - 1));
arch/x86/xen/p2m.c
408
p2m_limit = (phys_addr_t)P2M_LIMIT * 1024 * 1024 * 1024 / PAGE_SIZE;
arch/x86/xen/p2m.c
533
pte_pg = (pte_t *)((unsigned long)ptep & ~(PAGE_SIZE - 1));
arch/x86/xen/p2m.c
88
#define P2M_MID_PER_PAGE (PAGE_SIZE / sizeof(unsigned long *))
arch/x86/xen/p2m.c
89
#define P2M_TOP_PER_PAGE (PAGE_SIZE / sizeof(unsigned long **))
arch/x86/xen/pmu.c
497
BUILD_BUG_ON(sizeof(struct xen_pmu_data) > PAGE_SIZE);
arch/x86/xen/setup.c
503
limit = MAXMEM / PAGE_SIZE;
arch/x86/xen/setup.c
505
limit = GB(512) / PAGE_SIZE;
arch/x86/xen/setup.c
545
end &= ~((phys_addr_t)PAGE_SIZE - 1);
arch/x86/xen/setup.c
60
} xen_remap_buf __initdata __aligned(PAGE_SIZE);
arch/x86/xen/setup.c
611
for (addr = start; addr < start + size; addr += PAGE_SIZE) {
arch/x86/xen/setup.c
614
start = addr + PAGE_SIZE;
arch/x86/xen/setup.c
937
set_phys_range_identity(addr / PAGE_SIZE, ~0ul);
arch/xtensa/include/asm/cacheasm.h
78
__loop_cache_unroll \ar, \at, \insn, PAGE_SIZE, \line_width, \max_immed
arch/xtensa/include/asm/cacheflush.h
156
#if defined(CONFIG_MMU) && (DCACHE_WAY_SIZE > PAGE_SIZE)
arch/xtensa/include/asm/cacheflush.h
69
#if defined(CONFIG_MMU) && (DCACHE_WAY_SIZE > PAGE_SIZE)
arch/xtensa/include/asm/cacheflush.h
78
#if defined(CONFIG_MMU) && (ICACHE_WAY_SIZE > PAGE_SIZE)
arch/xtensa/include/asm/cacheflush.h
95
((DCACHE_WAY_SIZE > PAGE_SIZE) || defined(CONFIG_SMP))
arch/xtensa/include/asm/cachetype.h
8
#define cpu_dcache_is_aliasing() (DCACHE_WAY_SIZE > PAGE_SIZE)
arch/xtensa/include/asm/elf.h
115
#define ELF_EXEC_PAGESIZE PAGE_SIZE
arch/xtensa/include/asm/fixmap.h
29
#define FIXADDR_END (XCHAL_KSEG_CACHED_VADDR - PAGE_SIZE)
arch/xtensa/include/asm/fixmap.h
33
#define FIXADDR_TOP (FIXADDR_START + FIXADDR_SIZE - PAGE_SIZE)
arch/xtensa/include/asm/highmem.h
22
(LAST_PKMAP + 1) * PAGE_SIZE) & PMD_MASK)
arch/xtensa/include/asm/highmem.h
30
#if DCACHE_WAY_SIZE > PAGE_SIZE
arch/xtensa/include/asm/highmem.h
79
local_flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE)
arch/xtensa/include/asm/page.h
118
#if defined(CONFIG_MMU) && DCACHE_WAY_SIZE > PAGE_SIZE
arch/xtensa/include/asm/page.h
62
#if DCACHE_WAY_SIZE > PAGE_SIZE
arch/xtensa/include/asm/page.h
73
#if ICACHE_WAY_SIZE > PAGE_SIZE
arch/xtensa/include/asm/pgtable.h
181
#if (DCACHE_WAY_SIZE > PAGE_SIZE)
arch/xtensa/include/asm/pgtable.h
217
extern pgd_t swapper_pg_dir[PAGE_SIZE/sizeof(pgd_t)];
arch/xtensa/include/asm/pgtable.h
290
#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
arch/xtensa/include/asm/shmparam.h
19
#define SHMLBA ((PAGE_SIZE > DCACHE_WAY_SIZE)? PAGE_SIZE : DCACHE_WAY_SIZE)
arch/xtensa/kernel/entry.S
1729
#if (DCACHE_WAY_SIZE > PAGE_SIZE)
arch/xtensa/kernel/entry.S
1861
#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
arch/xtensa/kernel/pci-dma.c
27
unsigned long off = paddr & (PAGE_SIZE - 1);
arch/xtensa/kernel/pci-dma.c
35
size_t sz = min_t(size_t, size, PAGE_SIZE - off);
arch/xtensa/kernel/setup.c
425
if (itlb_probe(tmpaddr + PAGE_SIZE) & BIT(ITLB_HIT_BIT))
arch/xtensa/kernel/setup.c
426
invalidate_itlb_entry(itlb_probe(tmpaddr + PAGE_SIZE));
arch/xtensa/kernel/setup.c
437
write_itlb_entry(__pte(((paddr & PAGE_MASK) + PAGE_SIZE) |
arch/xtensa/kernel/setup.c
441
(tmpaddr & PAGE_MASK) + PAGE_SIZE);
arch/xtensa/kernel/setup.c
495
"a"(PAGE_SIZE),
arch/xtensa/mm/cache.c
142
unsigned long phys = folio_pfn(folio) * PAGE_SIZE;
arch/xtensa/mm/cache.c
170
phys += PAGE_SIZE;
arch/xtensa/mm/cache.c
171
temp += PAGE_SIZE;
arch/xtensa/mm/cache.c
230
flush_tlb_page(vma, addr + i * PAGE_SIZE);
arch/xtensa/mm/cache.c
233
#if (DCACHE_WAY_SIZE > PAGE_SIZE)
arch/xtensa/mm/cache.c
236
unsigned long phys = folio_pfn(folio) * PAGE_SIZE;
arch/xtensa/mm/cache.c
246
phys += PAGE_SIZE;
arch/xtensa/mm/cache.c
256
void *paddr = kmap_local_folio(folio, i * PAGE_SIZE);
arch/xtensa/mm/cache.c
271
#if (DCACHE_WAY_SIZE > PAGE_SIZE)
arch/xtensa/mm/cache.c
58
#if (DCACHE_WAY_SIZE > PAGE_SIZE)
arch/xtensa/mm/highmem.c
15
#if DCACHE_WAY_SIZE > PAGE_SIZE
arch/xtensa/mm/init.c
102
(max_low_pfn - min_low_pfn) * PAGE_SIZE,
arch/xtensa/mm/init.c
104
min_low_pfn * PAGE_SIZE, max_low_pfn * PAGE_SIZE,
arch/xtensa/mm/init.c
106
((max_low_pfn - min_low_pfn) * PAGE_SIZE) >> 20,
arch/xtensa/mm/init.c
96
PKMAP_BASE, PKMAP_BASE + LAST_PKMAP * PAGE_SIZE,
arch/xtensa/mm/init.c
97
(LAST_PKMAP*PAGE_SIZE) >> 10,
arch/xtensa/mm/kasan_init.c
37
unsigned long n_pages = (end - start) / PAGE_SIZE;
arch/xtensa/mm/kasan_init.c
42
pte_t *pte = memblock_alloc_or_panic(n_pages * sizeof(pte_t), PAGE_SIZE);
arch/xtensa/mm/kasan_init.c
51
memblock_phys_alloc_range(PAGE_SIZE, PAGE_SIZE,
arch/xtensa/mm/kasan_init.c
93
memset(kasan_early_shadow_page, 0, PAGE_SIZE);
arch/xtensa/mm/misc.S
112
#if (DCACHE_WAY_SIZE > PAGE_SIZE)
arch/xtensa/mm/misc.S
218
#if (DCACHE_WAY_SIZE > PAGE_SIZE)
arch/xtensa/mm/misc.S
271
#if (ICACHE_WAY_SIZE > PAGE_SIZE)
arch/xtensa/mm/mmu.c
35
pte = memblock_alloc_low(n_pages * sizeof(pte_t), PAGE_SIZE);
arch/xtensa/mm/mmu.c
38
__func__, n_pages * sizeof(pte_t), PAGE_SIZE);
arch/xtensa/mm/tlb.c
103
if (end-start + (PAGE_SIZE-1) <= _TLB_ENTRIES << PAGE_SHIFT) {
arch/xtensa/mm/tlb.c
112
start += PAGE_SIZE;
arch/xtensa/mm/tlb.c
117
start += PAGE_SIZE;
arch/xtensa/mm/tlb.c
159
start += PAGE_SIZE;
arch/xtensa/mm/tlb.c
169
local_flush_tlb_range(vma, address, address + PAGE_SIZE * nr);
arch/xtensa/platforms/iss/simdisk.c
238
if (count == 0 || count > PAGE_SIZE)
block/badblocks.c
1430
while (len < PAGE_SIZE && i < bb->count) {
block/badblocks.c
1440
len += snprintf(page+len, PAGE_SIZE-len, "%llu %u\n",
block/badblocks.c
1501
bb->page = devm_kzalloc(dev, PAGE_SIZE, GFP_KERNEL);
block/badblocks.c
1503
bb->page = kzalloc(PAGE_SIZE, GFP_KERNEL);
block/bdev.c
145
while (bsize < PAGE_SIZE) {
block/bdev.c
232
sb->s_type->name, size, PAGE_SIZE, err_str);
block/bdev.c
238
if (size > PAGE_SIZE && sb_validate_large_blocksize(sb, size))
block/bio-integrity.c
279
size_t size = min_t(size_t, bytes, PAGE_SIZE - offset);
block/bio-integrity.c
284
size_t next = min_t(size_t, PAGE_SIZE, bytes);
block/bio.c
1092
unsigned long nr = off / PAGE_SIZE;
block/bio.c
1095
__bio_add_page(bio, folio_page(folio, nr), len, off % PAGE_SIZE);
block/bio.c
1116
unsigned long nr = off / PAGE_SIZE;
block/bio.c
1120
return bio_add_page(bio, folio_page(folio, nr), len, off % PAGE_SIZE) > 0;
block/bio.c
1142
len = min(len, PAGE_SIZE - offset);
block/bio.c
1191
nr_pages = (fi.offset + fi.length - 1) / PAGE_SIZE -
block/bio.c
1192
fi.offset / PAGE_SIZE + 1;
block/bio.c
1307
while (*size > PAGE_SIZE) {
block/bio.c
1345
if (this_len > PAGE_SIZE * 2)
block/bio.c
1428
size_t nr_pages = (bv->bv_offset + bv->bv_len - 1) / PAGE_SIZE -
block/bio.c
1429
bv->bv_offset / PAGE_SIZE + 1;
block/bio.c
981
if (bv->bv_page + bv_end / PAGE_SIZE != page + off / PAGE_SIZE)
block/blk-crypto-fallback.c
344
__bio_add_page(enc_bio, enc_pages[enc_idx], PAGE_SIZE, 0);
block/blk-lib.c
188
sector_t pages = DIV_ROUND_UP_SECTOR_T(nr_sects, PAGE_SIZE / 512);
block/blk-map.c
173
nr_pages = bio_max_segs(DIV_ROUND_UP(offset + len, PAGE_SIZE));
block/blk-map.c
182
i = map_data->offset / PAGE_SIZE;
block/blk-map.c
185
unsigned int bytes = PAGE_SIZE;
block/blk-map.c
370
unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
block/blk-map.c
389
unsigned int bytes = PAGE_SIZE;
block/blk-mq-sysfs.c
83
const size_t size = PAGE_SIZE - 1;
block/blk-mq.c
3442
return (size_t)PAGE_SIZE << order;
block/blk-settings.c
239
length += (max_segments - 2) * PAGE_SIZE;
block/blk-settings.c
500
lim->max_fast_segment_size = min_t(unsigned int, seg_size, PAGE_SIZE);
block/blk-settings.c
510
if (WARN_ON_ONCE(lim->dma_alignment > PAGE_SIZE))
block/blk-settings.c
664
if (sectors < PAGE_SIZE >> SECTOR_SHIFT)
block/blk-settings.c
665
sectors = PAGE_SIZE >> SECTOR_SHIFT;
block/ioctl.c
712
bdev->bd_disk->bdi->ra_pages = (arg * 512) / PAGE_SIZE;
block/ioctl.c
776
(bdev->bd_disk->bdi->ra_pages * PAGE_SIZE) / 512);
block/ioctl.c
837
(bdev->bd_disk->bdi->ra_pages * PAGE_SIZE) / 512);
block/partitions/acorn.c
103
strlcat(state->pp_buf, " >\n", PAGE_SIZE);
block/partitions/acorn.c
133
strlcat(state->pp_buf, " [Linux]", PAGE_SIZE);
block/partitions/acorn.c
141
strlcat(state->pp_buf, " <", PAGE_SIZE);
block/partitions/acorn.c
151
strlcat(state->pp_buf, " >", PAGE_SIZE);
block/partitions/acorn.c
296
strlcat(state->pp_buf, "\n", PAGE_SIZE);
block/partitions/acorn.c
369
strlcat(state->pp_buf, " [ICS]", PAGE_SIZE);
block/partitions/acorn.c
403
strlcat(state->pp_buf, "\n", PAGE_SIZE);
block/partitions/acorn.c
43
strlcat(state->pp_buf, " [", PAGE_SIZE);
block/partitions/acorn.c
44
strlcat(state->pp_buf, name, PAGE_SIZE);
block/partitions/acorn.c
45
strlcat(state->pp_buf, "]", PAGE_SIZE);
block/partitions/acorn.c
463
strlcat(state->pp_buf, " [POWERTEC]", PAGE_SIZE);
block/partitions/acorn.c
474
strlcat(state->pp_buf, "\n", PAGE_SIZE);
block/partitions/acorn.c
545
strlcat(state->pp_buf, "\n", PAGE_SIZE);
block/partitions/acorn.c
81
strlcat(state->pp_buf, " [RISCiX]", PAGE_SIZE);
block/partitions/acorn.c
88
strlcat(state->pp_buf, " <", PAGE_SIZE);
block/partitions/acorn.c
97
strlcat(state->pp_buf, "(", PAGE_SIZE);
block/partitions/acorn.c
98
strlcat(state->pp_buf, rr->part[part].name, PAGE_SIZE);
block/partitions/acorn.c
99
strlcat(state->pp_buf, ")", PAGE_SIZE);
block/partitions/aix.c
193
strlcat(state->pp_buf, tmp, PAGE_SIZE);
block/partitions/aix.c
261
strlcat(state->pp_buf, tmp, PAGE_SIZE);
block/partitions/amiga.c
194
strlcat(state->pp_buf, tmp, PAGE_SIZE);
block/partitions/amiga.c
198
strlcat(state->pp_buf, tmp, PAGE_SIZE);
block/partitions/amiga.c
202
strlcat(state->pp_buf, "\n", PAGE_SIZE);
block/partitions/amiga.c
89
strlcat(state->pp_buf, tmp, PAGE_SIZE);
block/partitions/atari.c
131
strlcat(state->pp_buf, " >", PAGE_SIZE);
block/partitions/atari.c
138
strlcat(state->pp_buf, " ICD<", PAGE_SIZE);
block/partitions/atari.c
147
strlcat(state->pp_buf, " >", PAGE_SIZE);
block/partitions/atari.c
153
strlcat(state->pp_buf, "\n", PAGE_SIZE);
block/partitions/atari.c
73
strlcat(state->pp_buf, " AHDI", PAGE_SIZE);
block/partitions/atari.c
92
strlcat(state->pp_buf, " XGM<", PAGE_SIZE);
block/partitions/check.h
45
strlcat(p->pp_buf, tmp, PAGE_SIZE);
block/partitions/cmdline.c
248
strlcat(state->pp_buf, tmp, PAGE_SIZE);
block/partitions/cmdline.c
382
strlcat(state->pp_buf, "\n", PAGE_SIZE);
block/partitions/cmdline.c
58
if (new_subpart->size < (sector_t)PAGE_SIZE) {
block/partitions/core.c
135
snprintf(state->pp_buf, PAGE_SIZE, " %s:", state->name);
block/partitions/core.c
168
" unable to read partition table\n", PAGE_SIZE);
block/partitions/efi.c
754
strlcat(state->pp_buf, "\n", PAGE_SIZE);
block/partitions/ibm.c
184
strlcat(state->pp_buf, tmp, PAGE_SIZE);
block/partitions/ibm.c
222
strlcat(state->pp_buf, "\n", PAGE_SIZE);
block/partitions/ibm.c
244
strlcat(state->pp_buf, tmp, PAGE_SIZE);
block/partitions/ibm.c
261
strlcat(state->pp_buf, "\n", PAGE_SIZE);
block/partitions/ibm.c
273
strlcat(state->pp_buf, "\n", PAGE_SIZE);
block/partitions/ibm.c
295
strlcat(state->pp_buf, tmp, PAGE_SIZE);
block/partitions/ibm.c
301
strlcat(state->pp_buf, tmp, PAGE_SIZE);
block/partitions/ibm.c
317
strlcat(state->pp_buf, "\n", PAGE_SIZE);
block/partitions/ibm.c
394
strlcat(state->pp_buf, "(nonl)", PAGE_SIZE);
block/partitions/ibm.c
398
strlcat(state->pp_buf, "\n", PAGE_SIZE);
block/partitions/karma.c
56
strlcat(state->pp_buf, "\n", PAGE_SIZE);
block/partitions/ldm.c
585
strlcat(pp->pp_buf, " [LDM]", PAGE_SIZE);
block/partitions/ldm.c
600
strlcat(pp->pp_buf, "\n", PAGE_SIZE);
block/partitions/mac.c
155
strlcat(state->pp_buf, "\n", PAGE_SIZE);
block/partitions/mac.c
89
strlcat(state->pp_buf, " [mac]", PAGE_SIZE);
block/partitions/msdos.c
270
strlcat(state->pp_buf, tmp, PAGE_SIZE);
block/partitions/msdos.c
277
strlcat(state->pp_buf, tmp, PAGE_SIZE);
block/partitions/msdos.c
290
strlcat(state->pp_buf, tmp, PAGE_SIZE);
block/partitions/msdos.c
298
strlcat(state->pp_buf, " >\n", PAGE_SIZE);
block/partitions/msdos.c
373
strlcat(state->pp_buf, tmp, PAGE_SIZE);
block/partitions/msdos.c
394
strlcat(state->pp_buf, "bad subpartition - ignored\n", PAGE_SIZE);
block/partitions/msdos.c
403
strlcat(state->pp_buf, tmp, PAGE_SIZE);
block/partitions/msdos.c
405
strlcat(state->pp_buf, " >\n", PAGE_SIZE);
block/partitions/msdos.c
503
strlcat(state->pp_buf, tmp, PAGE_SIZE);
block/partitions/msdos.c
518
strlcat(state->pp_buf, " >\n", PAGE_SIZE);
block/partitions/msdos.c
552
strlcat(state->pp_buf, tmp, PAGE_SIZE);
block/partitions/msdos.c
561
strlcat(state->pp_buf, " >\n", PAGE_SIZE);
block/partitions/msdos.c
605
strlcat(state->pp_buf, " [AIX]", PAGE_SIZE);
block/partitions/msdos.c
632
strlcat(state->pp_buf, "\n", PAGE_SIZE);
block/partitions/msdos.c
681
strlcat(state->pp_buf, " <", PAGE_SIZE);
block/partitions/msdos.c
683
strlcat(state->pp_buf, " >", PAGE_SIZE);
block/partitions/msdos.c
691
strlcat(state->pp_buf, "[DM]", PAGE_SIZE);
block/partitions/msdos.c
693
strlcat(state->pp_buf, "[EZD]", PAGE_SIZE);
block/partitions/msdos.c
696
strlcat(state->pp_buf, "\n", PAGE_SIZE);
block/partitions/of.c
107
strlcat(state->pp_buf, "\n", PAGE_SIZE);
block/partitions/of.c
67
strlcat(state->pp_buf, tmp, PAGE_SIZE);
block/partitions/osf.c
84
strlcat(state->pp_buf, "\n", PAGE_SIZE);
block/partitions/sgi.c
82
strlcat(state->pp_buf, "\n", PAGE_SIZE);
block/partitions/sun.c
124
strlcat(state->pp_buf, "\n", PAGE_SIZE);
block/partitions/sysv68.c
78
strlcat(state->pp_buf, tmp, PAGE_SIZE);
block/partitions/sysv68.c
88
strlcat(state->pp_buf, tmp, PAGE_SIZE);
block/partitions/sysv68.c
92
strlcat(state->pp_buf, "\n", PAGE_SIZE);
block/partitions/ultrix.c
42
strlcat(state->pp_buf, "\n", PAGE_SIZE);
crypto/acompress.c
500
max = PAGE_SIZE;
crypto/acompress.c
517
max = PAGE_SIZE;
crypto/adiantum.c
402
src->offset + req->cryptlen <= PAGE_SIZE) {
crypto/adiantum.c
466
dst->offset + req->cryptlen <= PAGE_SIZE) {
crypto/aead.c
237
PAGE_SIZE / 8)
crypto/af_alg.c
1047
plen = min_t(size_t, len, PAGE_SIZE);
crypto/af_alg.c
1074
ctx->merge = plen & (PAGE_SIZE - 1);
crypto/af_alg.c
983
PAGE_SIZE - sg->offset - sg->length);
crypto/af_alg.c
993
(PAGE_SIZE - 1);
crypto/ahash.c
232
if (nbytes > (unsigned int)PAGE_SIZE - offset)
crypto/ahash.c
77
((unsigned int)(PAGE_SIZE)) - offset);
crypto/algif_hash.c
75
DIV_ROUND_UP(sk->sk_sndbuf, PAGE_SIZE));
crypto/asymmetric_keys/asymmetric_type.c
520
parse_buf = kstrndup(restriction, PAGE_SIZE, GFP_KERNEL);
crypto/async_tx/raid6test.c
131
memset(page_address(recovi), 0xf0, PAGE_SIZE);
crypto/async_tx/raid6test.c
132
memset(page_address(recovj), 0xba, PAGE_SIZE);
crypto/async_tx/raid6test.c
137
raid6_dual_recov(disks, PAGE_SIZE, i, j, dataptrs, dataoffs);
crypto/async_tx/raid6test.c
139
erra = memcmp(page_address(data[i]), page_address(recovi), PAGE_SIZE);
crypto/async_tx/raid6test.c
140
errb = memcmp(page_address(data[j]), page_address(recovj), PAGE_SIZE);
crypto/async_tx/raid6test.c
167
memset(page_address(data[disks-2]), 0xee, PAGE_SIZE);
crypto/async_tx/raid6test.c
168
memset(page_address(data[disks-1]), 0xee, PAGE_SIZE);
crypto/async_tx/raid6test.c
173
tx = async_gen_syndrome(dataptrs, dataoffs, disks, PAGE_SIZE, &submit);
crypto/async_tx/raid6test.c
40
get_random_bytes(page_address(data[i]), PAGE_SIZE);
crypto/lskcipher.c
82
BUILD_BUG_ON(MAX_CIPHER_BLOCKSIZE > PAGE_SIZE ||
crypto/lskcipher.c
83
MAX_CIPHER_ALIGNMASK >= PAGE_SIZE);
crypto/lskcipher.c
85
tiv = kmalloc(PAGE_SIZE, GFP_ATOMIC);
crypto/lskcipher.c
91
p = kmalloc(PAGE_SIZE, GFP_ATOMIC);
crypto/lskcipher.c
97
unsigned chunk = min((unsigned)PAGE_SIZE, len);
crypto/rng.c
179
if (alg->seedsize > PAGE_SIZE / 8)
crypto/scatterwalk.c
124
const unsigned int limit = PAGE_SIZE;
crypto/scatterwalk.c
130
src_page += src_offset / PAGE_SIZE;
crypto/scatterwalk.c
131
dst_page += dst_offset / PAGE_SIZE;
crypto/scompress.c
200
dpage += doff / PAGE_SIZE;
crypto/scompress.c
203
n = (dlen - 1) / PAGE_SIZE;
crypto/scompress.c
204
n += (offset_in_page(dlen - 1) + doff) / PAGE_SIZE;
crypto/scompress.c
206
size_add(doff, dlen) > PAGE_SIZE)
crypto/scompress.c
222
spage = spage + soff / PAGE_SIZE;
crypto/scompress.c
225
n = (slen - 1) / PAGE_SIZE;
crypto/scompress.c
226
n += (offset_in_page(slen - 1) + soff) / PAGE_SIZE;
crypto/scompress.c
228
size_add(soff, slen) > PAGE_SIZE)
crypto/scompress.c
268
if (dlen <= PAGE_SIZE)
crypto/scompress.c
270
dlen -= PAGE_SIZE;
crypto/skcipher.c
678
if (alg->ivsize > PAGE_SIZE / 8 || alg->chunksize > PAGE_SIZE / 8 ||
crypto/skcipher.c
679
alg->statesize > PAGE_SIZE / 2 ||
crypto/skcipher.c
680
(alg->ivsize + alg->statesize) > PAGE_SIZE / 2)
crypto/skcipher.c
700
if (alg->walksize > PAGE_SIZE / 8)
crypto/tcrypt.c
109
int np = (buflen + PAGE_SIZE - 1)/PAGE_SIZE;
crypto/tcrypt.c
1123
if (bs > XBUFSIZE * PAGE_SIZE) {
crypto/tcrypt.c
1125
bs, XBUFSIZE * PAGE_SIZE);
crypto/tcrypt.c
113
rem = PAGE_SIZE;
crypto/tcrypt.c
1134
memset(tvmem[0], 0xff, PAGE_SIZE);
crypto/tcrypt.c
116
rem = buflen % PAGE_SIZE;
crypto/tcrypt.c
1161
unsigned int pages = DIV_ROUND_UP(k, PAGE_SIZE);
crypto/tcrypt.c
1166
while (k > PAGE_SIZE) {
crypto/tcrypt.c
1168
PAGE_SIZE);
crypto/tcrypt.c
1169
memset(cur->xbuf[p], 0xff, PAGE_SIZE);
crypto/tcrypt.c
1171
k -= PAGE_SIZE;
crypto/tcrypt.c
126
sg_set_buf(&sg[k + 1], xbuf[k], PAGE_SIZE);
crypto/tcrypt.c
1341
if ((*keysize + bs) > TVMEMSIZE * PAGE_SIZE) {
crypto/tcrypt.c
1344
TVMEMSIZE * PAGE_SIZE);
crypto/tcrypt.c
1351
memset(tvmem[0], 0xff, PAGE_SIZE);
crypto/tcrypt.c
1372
sg_init_table(sg, DIV_ROUND_UP(k, PAGE_SIZE));
crypto/tcrypt.c
1374
if (k > PAGE_SIZE) {
crypto/tcrypt.c
1376
PAGE_SIZE - *keysize);
crypto/tcrypt.c
1377
k -= PAGE_SIZE;
crypto/tcrypt.c
1379
while (k > PAGE_SIZE) {
crypto/tcrypt.c
1380
sg_set_buf(sg + j, tvmem[j], PAGE_SIZE);
crypto/tcrypt.c
1381
memset(tvmem[j], 0xff, PAGE_SIZE);
crypto/tcrypt.c
1383
k -= PAGE_SIZE;
crypto/tcrypt.c
259
if (aad_size >= PAGE_SIZE) {
crypto/tcrypt.c
339
if (bs + authsize > XBUFSIZE * PAGE_SIZE) {
crypto/tcrypt.c
342
XBUFSIZE * PAGE_SIZE);
crypto/tcrypt.c
351
memset(tvmem[0], 0xff, PAGE_SIZE);
crypto/tcrypt.c
544
if (aad_size >= PAGE_SIZE) {
crypto/tcrypt.c
603
if ((*keysize + bs) > TVMEMSIZE * PAGE_SIZE) {
crypto/tcrypt.c
606
TVMEMSIZE * PAGE_SIZE);
crypto/tcrypt.c
633
memset(tvmem[0], 0xff, PAGE_SIZE);
crypto/tcrypt.c
707
sg_set_buf(sg + i, tvmem[i], PAGE_SIZE);
crypto/tcrypt.c
708
memset(tvmem[i], 0xff, PAGE_SIZE);
crypto/tcrypt.c
908
if (speed[i].blen > TVMEMSIZE * PAGE_SIZE) {
crypto/tcrypt.c
910
speed[i].blen, TVMEMSIZE * PAGE_SIZE);
crypto/testmgr.c
1034
PAGE_SIZE - 128,
crypto/testmgr.c
1035
PAGE_SIZE - 1);
crypto/testmgr.c
1039
div->offset = prandom_u32_below(rng, PAGE_SIZE);
crypto/testmgr.c
1757
const unsigned int maxdatasize = (2 * PAGE_SIZE) - TESTMGR_POISON_LEN;
crypto/testmgr.c
2609
ctx->maxdatasize = (2 * PAGE_SIZE) - TESTMGR_POISON_LEN;
crypto/testmgr.c
2745
if (WARN_ON(template[i].len > PAGE_SIZE))
crypto/testmgr.c
3067
const unsigned int maxdatasize = (2 * PAGE_SIZE) - TESTMGR_POISON_LEN;
crypto/testmgr.c
3809
if (WARN_ON(vecs->m_size > PAGE_SIZE))
crypto/testmgr.c
3862
if (WARN_ON(c_size > PAGE_SIZE))
crypto/testmgr.c
388
.offset = PAGE_SIZE - 32
crypto/testmgr.c
391
.offset = PAGE_SIZE - 7
crypto/testmgr.c
449
.offset = PAGE_SIZE - 32,
crypto/testmgr.c
452
.offset = PAGE_SIZE - 7,
crypto/testmgr.c
636
2 * PAGE_SIZE) {
crypto/xor.c
135
b2 = b1 + 2*PAGE_SIZE + BENCH_SIZE;
drivers/accel/amdxdna/aie2_message.c
65
*size = PAGE_SIZE << order;
drivers/accel/amdxdna/amdxdna_ctx.c
317
if (buf_size > PAGE_SIZE) {
drivers/accel/amdxdna/amdxdna_ctx.c
323
buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
drivers/accel/amdxdna/amdxdna_gem.c
315
offset += PAGE_SIZE;
drivers/accel/amdxdna/amdxdna_ubuf.c
172
if (!IS_ALIGNED(va_ent[i].vaddr, PAGE_SIZE) ||
drivers/accel/amdxdna/amdxdna_ubuf.c
173
!IS_ALIGNED(va_ent[i].len, PAGE_SIZE)) {
drivers/accel/habanalabs/common/command_buffer.c
191
if (cb_args->cb_size < PAGE_SIZE)
drivers/accel/habanalabs/common/command_buffer.c
192
cb_args->cb_size = PAGE_SIZE;
drivers/accel/habanalabs/common/debugfs.c
370
total_npages * PAGE_SIZE;
drivers/accel/habanalabs/common/debugfs.c
372
(total_npages + npages) * PAGE_SIZE;
drivers/accel/habanalabs/common/debugfs.c
666
if (!IS_ALIGNED(device_va, PAGE_SIZE) ||
drivers/accel/habanalabs/common/debugfs.c
667
!IS_ALIGNED(off_bytes, PAGE_SIZE) ||
drivers/accel/habanalabs/common/debugfs.c
668
!IS_ALIGNED(len_bytes, PAGE_SIZE)) {
drivers/accel/habanalabs/common/habanalabs.h
3705
return ((((*dma_addr) & (PAGE_SIZE - 1)) + sg_dma_len(sg)) +
drivers/accel/habanalabs/common/habanalabs.h
3706
(PAGE_SIZE - 1)) >> PAGE_SHIFT;
drivers/accel/habanalabs/common/habanalabs_ioctl.c
80
if (hw_ip.dram_size > PAGE_SIZE)
drivers/accel/habanalabs/common/hldio.c
254
for (i = 0, device_va = io->device_va; i < npages ; ++i, device_va += PAGE_SIZE) {
drivers/accel/habanalabs/common/hldio.c
263
io->bv[i].bv_len = PAGE_SIZE;
drivers/accel/habanalabs/common/hldio.c
395
for (i = 0, addr = p2pr->p2pmem ; i < (p2pr->size >> PAGE_SHIFT) ; ++i, addr += PAGE_SIZE) {
drivers/accel/habanalabs/common/memory.c
1548
dma_max_seg_size = ALIGN_DOWN(dma_get_max_seg_size(dev), PAGE_SIZE);
drivers/accel/habanalabs/common/memory.c
1549
if (dma_max_seg_size < PAGE_SIZE) {
drivers/accel/habanalabs/common/memory.c
1903
addr, PAGE_SIZE);
drivers/accel/habanalabs/common/memory.c
1910
size, PAGE_SIZE);
drivers/accel/habanalabs/common/memory.c
1917
offset, PAGE_SIZE);
drivers/accel/habanalabs/common/memory.c
843
u32 npages, page_size = PAGE_SIZE,
drivers/accel/habanalabs/common/memory_mgr.c
254
if (user_mem_size != ALIGN(buf->mappable_size, PAGE_SIZE)) {
drivers/accel/habanalabs/common/state_dump.c
76
new_size = max_t(size_t, PAGE_SIZE, round_up(desired_size, PAGE_SIZE));
drivers/accel/habanalabs/common/sysfs.c
455
.size = PAGE_SIZE,
drivers/accel/habanalabs/gaudi/gaudi.c
5589
cb = hl_cb_kernel_create(hdev, PAGE_SIZE, false);
drivers/accel/habanalabs/gaudi/gaudi.c
976
cb = hl_cb_kernel_create(hdev, PAGE_SIZE, false);
drivers/accel/habanalabs/gaudi2/gaudi2.c
2861
if (PAGE_SIZE == SZ_64K) {
drivers/accel/habanalabs/gaudi2/gaudi2.c
5435
ilog2(PAGE_SIZE / sizeof(struct hl_cq_entry)));
drivers/accel/habanalabs/gaudi2/gaudi2.c
6376
if (PAGE_SIZE == SZ_64K) {
drivers/accel/ivpu/ivpu_fw.c
199
vdev->fw->primary_preempt_buf_size = ALIGN(primary_preempt_buf_size, PAGE_SIZE);
drivers/accel/ivpu/ivpu_fw.c
200
vdev->fw->secondary_preempt_buf_size = ALIGN(secondary_preempt_buf_size, PAGE_SIZE);
drivers/accel/ivpu/ivpu_mmu_context.c
59
dma_addr = dma_map_page(vdev->drm.dev, page, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
drivers/accel/ivpu/ivpu_mmu_context.c
72
dma_unmap_page(vdev->drm.dev, dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
drivers/accel/ivpu/ivpu_mmu_context.c
86
dma_unmap_page(vdev->drm.dev, dma_addr & ~IVPU_MMU_ENTRY_FLAGS_MASK, PAGE_SIZE,
drivers/accel/qaic/qaic_control.c
421
need_pages = DIV_ROUND_UP(total, PAGE_SIZE);
drivers/accel/qaic/qaic_data.c
190
size = size ? size : PAGE_SIZE;
drivers/accel/qaic/qaic_data.c
458
nr_pages = DIV_ROUND_UP(size, PAGE_SIZE);
drivers/accel/qaic/qaic_data.c
463
buf_extra = (PAGE_SIZE - size % PAGE_SIZE) % PAGE_SIZE;
drivers/accel/qaic/qaic_data.c
486
order = min(get_order(nr_pages * PAGE_SIZE), max_order);
drivers/accel/qaic/qaic_data.c
506
buf_extra += abs(nr_pages) * PAGE_SIZE;
drivers/accel/qaic/qaic_data.c
526
sg_set_page(sg, pages[k], PAGE_SIZE << pages_order[k], 0);
drivers/accel/qaic/qaic_data.c
528
sg_set_page(sg, pages[k], (PAGE_SIZE << pages_order[k]) - buf_extra, 0);
drivers/accel/qaic/qaic_debugfs.c
129
page->size = PAGE_SIZE;
drivers/accel/rocket/rocket_gem.c
95
rkt_obj->size, PAGE_SIZE,
drivers/accessibility/speakup/kobjects.c
36
size_t bufsize = PAGE_SIZE;
drivers/accessibility/speakup/kobjects.c
708
size_t bufsize = PAGE_SIZE;
drivers/acpi/acpi_dbg.c
25
#define ACPI_AML_BUF_SIZE PAGE_SIZE
drivers/acpi/apei/einj-core.c
293
if (e->length < sizeof(*e) || e->length > PAGE_SIZE) {
drivers/acpi/apei/einj-core.c
373
if (trigger_tab->table_size > PAGE_SIZE ||
drivers/acpi/apei/ghes.c
352
trunk = PAGE_SIZE - offset;
drivers/acpi/device_sysfs.c
490
PAGE_SIZE - 1);
drivers/acpi/nvs.c
111
nr_bytes = PAGE_SIZE - (start & ~PAGE_MASK);
drivers/acpi/osl.c
292
if (pg_sz > PAGE_SIZE)
drivers/acpi/osl.c
353
pg_off = round_down(phys, PAGE_SIZE);
drivers/acpi/osl.c
354
pg_sz = round_up(phys + size, PAGE_SIZE) - pg_off;
drivers/acpi/prmt.c
83
(md->phys_addr < pa && pa < md->phys_addr + PAGE_SIZE * md->num_pages)) {
drivers/acpi/tables.c
495
memblock_phys_alloc_range(all_tables_size, PAGE_SIZE,
drivers/android/binder/rust_binderfs.c
625
sb->s_blocksize = PAGE_SIZE;
drivers/android/binder_alloc.c
1001
for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
drivers/android/binder_alloc.c
1078
for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
drivers/android/binder_alloc.c
1152
page_addr = alloc->vm_start + index * PAGE_SIZE;
drivers/android/binder_alloc.c
1188
zap_page_range_single(vma, page_addr, PAGE_SIZE, NULL);
drivers/android/binder_alloc.c
1347
size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
drivers/android/binder_alloc.c
1378
size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
drivers/android/binder_alloc.c
200
for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
drivers/android/binder_alloc.c
204
index = (page_addr - alloc->vm_start) / PAGE_SIZE;
drivers/android/binder_alloc.c
371
for (page_addr = start; page_addr < final; page_addr += PAGE_SIZE) {
drivers/android/binder_alloc.c
375
index = (page_addr - alloc->vm_start) / PAGE_SIZE;
drivers/android/binder_alloc.c
400
for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
drivers/android/binder_alloc.c
404
index = (page_addr - alloc->vm_start) / PAGE_SIZE;
drivers/android/binder_alloc.c
733
buffer_start_page(buffer) + PAGE_SIZE);
drivers/android/binder_alloc.c
846
size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
drivers/android/binder_alloc.c
920
alloc->buffer_size / PAGE_SIZE);
drivers/android/binderfs.c
621
sb->s_blocksize = PAGE_SIZE;
drivers/android/tests/binder_alloc_kunit.c
151
for (; page_addr < end; page_addr += PAGE_SIZE) {
drivers/android/tests/binder_alloc_kunit.c
152
page_index = (page_addr - alloc->vm_start) / PAGE_SIZE;
drivers/android/tests/binder_alloc_kunit.c
193
for (i = 0; i <= (end - 1) / PAGE_SIZE; i++) {
drivers/android/tests/binder_alloc_kunit.c
216
for (i = 0; i < (alloc->buffer_size / PAGE_SIZE); i++) {
drivers/android/tests/binder_alloc_kunit.c
234
unsigned long pages = PAGE_ALIGN(end) / PAGE_SIZE;
drivers/android/tests/binder_alloc_kunit.c
282
failures, (alloc->buffer_size / PAGE_SIZE));
drivers/android/tests/binder_alloc_kunit.c
29
#define BUFFER_MIN_SIZE (PAGE_SIZE / 8)
drivers/android/tests/binder_alloc_kunit.c
391
BUILD_BUG_ON(BUFFER_MIN_SIZE * BUFFER_NUM >= PAGE_SIZE);
drivers/android/tests/binder_alloc_kunit.c
395
end = ALIGN(end, PAGE_SIZE);
drivers/ata/libahci.c
342
if (count > PAGE_SIZE) {
drivers/ata/libahci.c
347
hpriv->em_buf_sz, PAGE_SIZE);
drivers/ata/libahci.c
348
count = PAGE_SIZE;
drivers/ata/libata-scsi.c
1116
if (sdev->sector_size > PAGE_SIZE)
drivers/ata/libata-sff.c
618
offset %= PAGE_SIZE;
drivers/ata/libata-sff.c
630
if (offset + count > PAGE_SIZE) {
drivers/ata/libata-sff.c
631
unsigned int split_len = PAGE_SIZE - offset;
drivers/ata/libata-sff.c
755
offset %= PAGE_SIZE;
drivers/ata/libata-sff.c
761
count = min(count, (unsigned int)PAGE_SIZE - offset);
drivers/ata/sata_sil24.c
1269
if (sizeof(union sil24_cmd_block) != PAGE_SIZE)
drivers/ata/sata_sil24.c
57
SIL24_MAX_SGT = (PAGE_SIZE - SIL24_PRB_SZ)
drivers/atm/lanai.c
1432
#if (NUM_VCI * BITS_PER_LONG) <= PAGE_SIZE
drivers/atm/lanai.c
1441
APRINTK((lanai->num_vci) * sizeof(struct lanai_vcc *) <= PAGE_SIZE,
drivers/atm/lanai.c
149
#define AAL0_RX_BUFFER_SIZE (PAGE_SIZE)
drivers/atm/lanai.c
319
#define LANAI_PAGE_SIZE ((PAGE_SIZE >= 1024) ? PAGE_SIZE : 1024)
drivers/auxdisplay/cfag12864b.c
314
BUILD_BUG_ON(PAGE_SIZE < CFAG12864B_SIZE);
drivers/auxdisplay/ht16k33.c
593
BUILD_BUG_ON(PAGE_SIZE < HT16K33_FB_SIZE);
drivers/base/arch_numa.c
169
PERCPU_DYNAMIC_RESERVE, PAGE_SIZE,
drivers/base/core.c
2422
if (ret >= (ssize_t)PAGE_SIZE) {
drivers/base/cpu.c
372
if (len + sizeof(",XXXX\n") >= PAGE_SIZE) {
drivers/base/cpu.c
384
char *buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
drivers/base/dd.c
398
if (len >= (PAGE_SIZE - 1))
drivers/base/driver.c
63
if (len >= (PAGE_SIZE - 1))
drivers/base/firmware_loader/main.c
441
xz_buf.out_size = PAGE_SIZE;
drivers/base/firmware_loader/main.c
446
if (xz_buf.out_pos != PAGE_SIZE)
drivers/base/firmware_loader/sysfs.c
251
int page_ofs = offset & (PAGE_SIZE - 1);
drivers/base/firmware_loader/sysfs.c
252
int page_cnt = min_t(size_t, PAGE_SIZE - page_ofs, count);
drivers/base/node.c
632
BUILD_BUG_ON(MAX_NUMNODES * 4 > PAGE_SIZE);
drivers/base/platform.c
1289
len = of_device_modalias(dev, buf, PAGE_SIZE);
drivers/base/platform.c
1293
len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
drivers/base/regmap/regmap-debugfs.c
229
if (count > (PAGE_SIZE << MAX_PAGE_ORDER))
drivers/base/regmap/regmap-debugfs.c
230
count = PAGE_SIZE << MAX_PAGE_ORDER;
drivers/base/regmap/regmap-debugfs.c
376
if (count > (PAGE_SIZE << MAX_PAGE_ORDER))
drivers/base/regmap/regmap-debugfs.c
377
count = PAGE_SIZE << MAX_PAGE_ORDER;
drivers/base/regmap/regmap-debugfs.c
383
entry = kmalloc(PAGE_SIZE, GFP_KERNEL);
drivers/base/regmap/regmap-debugfs.c
402
entry_len = snprintf(entry, PAGE_SIZE, "%x-%x\n",
drivers/base/regmap/regmap-debugfs.c
43
buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
drivers/base/regmap/regmap-debugfs.c
50
ret = snprintf(buf, PAGE_SIZE, "%s\n", name);
drivers/base/regmap/regmap-debugfs.c
51
if (ret >= PAGE_SIZE) {
drivers/block/aoe/aoeblk.c
88
p += scnprintf(p, PAGE_SIZE - (p-page), "%s%s",
drivers/block/aoe/aoeblk.c
90
p += scnprintf(p, PAGE_SIZE - (p-page), "\n");
drivers/block/aoe/aoecmd.c
610
if (n < PAGE_SIZE)
drivers/block/aoe/aoecmd.c
613
m = PAGE_SIZE;
drivers/block/brd.c
147
bv.bv_len = min_t(u32, bv.bv_len, PAGE_SIZE - offset);
drivers/block/brd.c
308
.physical_block_size = PAGE_SIZE,
drivers/block/brd.c
311
.discard_granularity = PAGE_SIZE,
drivers/block/drbd/drbd_bitmap.c
1011
if (len_sect < PAGE_SIZE/SECTOR_SIZE)
drivers/block/drbd/drbd_bitmap.c
1014
len = PAGE_SIZE;
drivers/block/drbd/drbd_bitmap.c
1312
PAGE_SIZE*8, bm_fo & BITS_PER_PAGE_MASK);
drivers/block/drbd/drbd_bitmap.c
1315
PAGE_SIZE*8, bm_fo & BITS_PER_PAGE_MASK);
drivers/block/drbd/drbd_bitmap.c
1318
if (i < PAGE_SIZE*8) {
drivers/block/drbd/drbd_bitmap.c
1324
bm_fo = bit_offset + PAGE_SIZE*8;
drivers/block/drbd/drbd_bitmap.c
341
#define MLPP(X) ((X) & ((PAGE_SIZE/sizeof(long))-1))
drivers/block/drbd/drbd_bitmap.c
344
#define LWPP (PAGE_SIZE/sizeof(long))
drivers/block/drbd/drbd_main.c
1615
unsigned l = min_t(unsigned, len, PAGE_SIZE);
drivers/block/drbd/drbd_main.c
2079
const int number = (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * drbd_minor_count;
drivers/block/drbd/drbd_nl.c
1304
lim.seg_boundary_mask = PAGE_SIZE - 1;
drivers/block/drbd/drbd_receiver.c
1515
unsigned len = min_t(unsigned, data_size, PAGE_SIZE);
drivers/block/drbd/drbd_receiver.c
1738
unsigned len = min_t(int, ds, PAGE_SIZE);
drivers/block/drbd/drbd_receiver.c
1782
unsigned int len = min_t(int, data_size, PAGE_SIZE);
drivers/block/drbd/drbd_worker.c
1075
unsigned int l = min_t(unsigned int, len, PAGE_SIZE);
drivers/block/drbd/drbd_worker.c
302
crypto_shash_update(desc, src, PAGE_SIZE);
drivers/block/drbd/drbd_worker.c
308
len = peer_req->i.size & (PAGE_SIZE - 1);
drivers/block/drbd/drbd_worker.c
309
crypto_shash_update(desc, src, len ?: PAGE_SIZE);
drivers/block/floppy.c
332
#define MAX_DISK_SIZE (PAGE_SIZE / 1024)
drivers/block/loop.c
661
p = file_path(lo->lo_backing_file, buf, PAGE_SIZE - 1);
drivers/block/null_blk/main.c
1084
nullb->dev->curr_cache -= PAGE_SIZE;
drivers/block/null_blk/main.c
1151
PAGE_SIZE - offset_in_page(pos));
drivers/block/null_blk/main.c
1155
null_make_cache_space(nullb, PAGE_SIZE);
drivers/block/null_blk/main.c
1185
PAGE_SIZE - offset_in_page(pos));
drivers/block/null_blk/main.c
1990
lim.virt_boundary_mask = PAGE_SIZE - 1;
drivers/block/null_blk/main.c
2131
if (g_bs > PAGE_SIZE) {
drivers/block/null_blk/main.c
2133
pr_warn("defaults block size to %lu\n", PAGE_SIZE);
drivers/block/null_blk/main.c
2134
g_bs = PAGE_SIZE;
drivers/block/null_blk/main.c
287
return snprintf(page, PAGE_SIZE, "%u\n", val);
drivers/block/null_blk/main.c
293
return snprintf(page, PAGE_SIZE, "%lu\n", val);
drivers/block/null_blk/main.c
298
return snprintf(page, PAGE_SIZE, "%u\n", val);
drivers/block/null_blk/main.c
48
#define MAP_SZ ((PAGE_SIZE >> SECTOR_SHIFT) + 2)
drivers/block/null_blk/main.c
724
size_t left = PAGE_SIZE;
drivers/block/null_blk/main.c
735
memzero_explicit(page, PAGE_SIZE);
drivers/block/null_blk/main.c
921
nullb->dev->curr_cache -= PAGE_SIZE;
drivers/block/null_blk/main.c
938
nullb->dev->curr_cache += PAGE_SIZE;
drivers/block/rbd.c
105
#define RBD_IMAGE_NAME_LEN_MAX (PAGE_SIZE - sizeof (__le32) - 1)
drivers/block/rbd.c
1849
reply_len = num_pages * PAGE_SIZE;
drivers/block/rbd.c
1858
end = p + min(reply_len, (size_t)PAGE_SIZE);
drivers/block/rbd.c
3088
unsigned int len = min(obj_overlap, (u64)PAGE_SIZE);
drivers/block/rbd.c
3788
if (reply_len > 0 && reply_len <= PAGE_SIZE) {
drivers/block/rbd.c
4706
if (outbound_size > PAGE_SIZE)
drivers/block/rbd.c
5654
size_t reply_len = PAGE_SIZE;
drivers/block/rbd.c
5700
size_t reply_len = PAGE_SIZE;
drivers/block/sunvdc.c
41
#define MAX_RING_COOKIES ((MAX_XFER_BLKS / PAGE_SIZE) + 2)
drivers/block/sunvdc.c
790
.seg_boundary_mask = PAGE_SIZE - 1,
drivers/block/sunvdc.c
791
.max_segment_size = PAGE_SIZE,
drivers/block/ublk_drv.c
1150
return round_up(depth * sizeof(struct ublksrv_io_desc), PAGE_SIZE);
drivers/block/ublk_drv.c
4249
round_down(max_io_bytes, PAGE_SIZE);
drivers/block/ublk_drv.c
899
if (p->alignment >= PAGE_SIZE)
drivers/block/virtio_blk.c
897
BUILD_BUG_ON(PAGE_SIZE < VIRTIO_BLK_ID_BYTES);
drivers/block/xen-blkback/common.h
59
#define XEN_PAGES_PER_SEGMENT (PAGE_SIZE / XEN_PAGE_SIZE)
drivers/block/xen-blkfront.c
1443
BUG_ON(sg->offset + sg->length > PAGE_SIZE);
drivers/block/xen-blkfront.c
262
#define GRANTS_PER_PSEG (PAGE_SIZE / XEN_PAGE_SIZE)
drivers/block/xen-blkfront.c
832
BUG_ON(sg->offset + sg->length > PAGE_SIZE);
drivers/block/xen-blkfront.c
974
lim->seg_boundary_mask = PAGE_SIZE - 1;
drivers/block/xen-blkfront.c
975
lim->max_segment_size = PAGE_SIZE;
drivers/block/zloop.c
1403
if (count > PAGE_SIZE)
drivers/block/zram/backend_zstd.c
131
prm = zstd_get_params(params->level, PAGE_SIZE);
drivers/block/zram/backend_zstd.c
64
zp->cprm = zstd_get_params(params->level, PAGE_SIZE);
drivers/block/zram/backend_zstd.c
69
prm = zstd_get_cparams(params->level, PAGE_SIZE,
drivers/block/zram/zcomp.c
143
.src_len = PAGE_SIZE,
drivers/block/zram/zcomp.c
144
.dst_len = 2 * PAGE_SIZE,
drivers/block/zram/zcomp.c
162
.dst_len = PAGE_SIZE,
drivers/block/zram/zcomp.c
62
zstrm->local_copy = vzalloc(PAGE_SIZE);
drivers/block/zram/zcomp.c
67
zstrm->buffer = vzalloc(2 * PAGE_SIZE);
drivers/block/zram/zram_drv.c
1104
req->bio.bi_iter.bi_sector = req->blk_idx * (PAGE_SIZE >> 9);
drivers/block/zram/zram_drv.c
1106
__bio_add_page(&req->bio, req->page, PAGE_SIZE, 0);
drivers/block/zram/zram_drv.c
1352
memset_page(page, 0, 0, PAGE_SIZE);
drivers/block/zram/zram_drv.c
1455
bio->bi_iter.bi_sector = blk_idx * (PAGE_SIZE >> 9);
drivers/block/zram/zram_drv.c
1459
__bio_add_page(bio, page, PAGE_SIZE, 0);
drivers/block/zram/zram_drv.c
1471
bio.bi_iter.bi_sector = req->blk_idx * (PAGE_SIZE >> 9);
drivers/block/zram/zram_drv.c
1472
__bio_add_page(&bio, req->page, PAGE_SIZE, 0);
drivers/block/zram/zram_drv.c
2042
zram_fill_page(mem, PAGE_SIZE, get_slot_handle(zram, index));
drivers/block/zram/zram_drv.c
2054
src = zs_obj_read_begin(zram->mem_pool, handle, PAGE_SIZE, NULL);
drivers/block/zram/zram_drv.c
2058
zs_obj_read_end(zram->mem_pool, handle, PAGE_SIZE, src);
drivers/block/zram/zram_drv.c
218
#if PAGE_SIZE != 4096
drivers/block/zram/zram_drv.c
221
return bvec->bv_len != PAGE_SIZE;
drivers/block/zram/zram_drv.c
2211
handle = zs_malloc(zram->mem_pool, PAGE_SIZE,
drivers/block/zram/zram_drv.c
2223
zs_obj_write(zram->mem_pool, handle, src, PAGE_SIZE);
drivers/block/zram/zram_drv.c
2230
set_slot_size(zram, index, PAGE_SIZE);
drivers/block/zram/zram_drv.c
2233
atomic64_add(PAGE_SIZE, &zram->stats.compr_data_size);
drivers/block/zram/zram_drv.c
242
#define NUM_PP_BUCKETS ((PAGE_SIZE / PP_BUCKET_SIZE_RANGE) + 1)
drivers/block/zram/zram_drv.c
2699
if (n <= (PAGE_SIZE - offset))
drivers/block/zram/zram_drv.c
2702
n -= (PAGE_SIZE - offset);
drivers/block/zram/zram_drv.c
2706
while (n >= PAGE_SIZE) {
drivers/block/zram/zram_drv.c
2712
n -= PAGE_SIZE;
drivers/block/zram/zram_drv.c
2729
bv.bv_len = min_t(u32, bv.bv_len, PAGE_SIZE - offset);
drivers/block/zram/zram_drv.c
2760
bv.bv_len = min_t(u32, bv.bv_len, PAGE_SIZE - offset);
drivers/block/zram/zram_drv.c
3050
.physical_block_size = PAGE_SIZE,
drivers/block/zram/zram_drv.c
3051
.io_min = PAGE_SIZE,
drivers/block/zram/zram_drv.c
3052
.io_opt = PAGE_SIZE,
drivers/block/zram/zram_drv.c
3062
#if ZRAM_LOGICAL_BLOCK_SIZE == PAGE_SIZE
drivers/block/zram/zram_drv.c
344
unsigned int pos, last_pos = PAGE_SIZE / sizeof(*page) - 1;
drivers/block/zram/zram_drv.c
632
val = rounddown(val, PAGE_SIZE / 4096);
drivers/block/zram/zram_drv.c
713
p = file_path(file, buf, PAGE_SIZE - 1);
drivers/bus/bt1-apb.c
246
return scnprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&apb->count));
drivers/bus/bt1-apb.c
264
return scnprintf(buf, PAGE_SIZE, "%lu\n", timeout);
drivers/bus/bt1-apb.c
291
return scnprintf(buf, PAGE_SIZE, "Error injection: nodev irq\n");
drivers/bus/bt1-axi.c
181
return scnprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&axi->count));
drivers/bus/bt1-axi.c
188
return scnprintf(buf, PAGE_SIZE, "Error injection: bus unaligned\n");
drivers/char/adi.c
15
#define MAX_BUF_SZ PAGE_SIZE
drivers/char/agp/ali-agp.c
134
for (i = 0; i < PAGE_SIZE * page_count; i += PAGE_SIZE) {
drivers/char/agp/alpha-agp.c
169
aper_size->num_entries = agp->aperture.size / PAGE_SIZE;
drivers/char/agp/amd-k7-agp.c
163
addr = (i * PAGE_SIZE) + agp_bridge->gart_bus_addr;
drivers/char/agp/amd-k7-agp.c
301
addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr;
drivers/char/agp/amd-k7-agp.c
314
addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr;
drivers/char/agp/amd-k7-agp.c
337
addr = (i * PAGE_SIZE) + agp_bridge->gart_bus_addr;
drivers/char/agp/amd-k7-agp.c
48
for (i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++) {
drivers/char/agp/ati-agp.c
280
addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr;
drivers/char/agp/ati-agp.c
294
addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr;
drivers/char/agp/ati-agp.c
322
addr = (i * PAGE_SIZE) + agp_bridge->gart_bus_addr;
drivers/char/agp/ati-agp.c
391
addr = (i * PAGE_SIZE) + agp_bridge->gart_bus_addr;
drivers/char/agp/ati-agp.c
72
for (i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++) {
drivers/char/agp/backend.c
176
bridge->key_list = vzalloc(PAGE_SIZE * 4);
drivers/char/agp/efficeon-agp.c
222
for (offset = 0; offset < PAGE_SIZE; offset += clflush_chunk)
drivers/char/agp/generic.c
1008
table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1);
drivers/char/agp/generic.c
1064
num_entries -= agp_memory_reserved/PAGE_SIZE;
drivers/char/agp/generic.c
141
agp_alloc_page_array(PAGE_SIZE * scratch_pages, new);
drivers/char/agp/generic.c
204
#define ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(unsigned long))
drivers/char/agp/generic.c
928
table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1);
drivers/char/agp/generic.c
944
(PAGE_SIZE * (1 << page_order)));
drivers/char/agp/intel-gtt.c
1042
ret = pci_bus_alloc_resource(intel_private.bridge_dev->bus, &intel_private.ifp_resource, PAGE_SIZE,
drivers/char/agp/intel-gtt.c
1043
PAGE_SIZE, PCIBIOS_MIN_MEM, 0,
drivers/char/agp/intel-gtt.c
1064
intel_private.ifp_resource.end = temp + PAGE_SIZE;
drivers/char/agp/intel-gtt.c
1096
intel_private.ifp_resource.end = l64 + PAGE_SIZE;
drivers/char/agp/intel-gtt.c
1125
intel_private.i9xx_flush_page = ioremap(intel_private.ifp_resource.start, PAGE_SIZE);
drivers/char/agp/intel-gtt.c
113
sg_set_page(sg, pages[i], PAGE_SIZE, 0);
drivers/char/agp/intel-gtt.c
308
PAGE_SIZE, DMA_BIDIRECTIONAL);
drivers/char/agp/intel-gtt.c
570
intel_private.scratch_page_dma, PAGE_SIZE,
drivers/char/agp/intel-gtt.c
937
int start = intel_private.stolen_size / PAGE_SIZE;
drivers/char/agp/nvidia-agp.c
139
~(current_size->size * 1024 * 1024 - 1)) / PAGE_SIZE;
drivers/char/agp/nvidia-agp.c
159
(volatile u32 __iomem *) ioremap(apbase_phys, 33 * PAGE_SIZE);
drivers/char/agp/nvidia-agp.c
215
(nvidia_private.num_active_entries - agp_memory_reserved/PAGE_SIZE))
drivers/char/agp/nvidia-agp.c
288
temp = readl(nvidia_private.aperture+(i * PAGE_SIZE / sizeof(u32)));
drivers/char/agp/nvidia-agp.c
290
temp = readl(nvidia_private.aperture+(i * PAGE_SIZE / sizeof(u32)));
drivers/char/agp/parisc-agp.c
277
info->io_pages_per_kpage = PAGE_SIZE / info->io_page_size;
drivers/char/agp/sworks-agp.c
334
addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr;
drivers/char/agp/sworks-agp.c
347
addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr;
drivers/char/agp/sworks-agp.c
372
addr = (i * PAGE_SIZE) + agp_bridge->gart_bus_addr;
drivers/char/agp/sworks-agp.c
60
for (i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++)
drivers/char/agp/uninorth-agp.c
411
table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1);
drivers/char/agp/uninorth-agp.c
465
table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1);
drivers/char/bsr.c
123
if (dev->bsr_len < PAGE_SIZE && size == PAGE_SIZE)
drivers/char/bsr.c
215
if (cur->bsr_len > 4096 && cur->bsr_len < PAGE_SIZE)
drivers/char/hpet.c
368
if (addr & (PAGE_SIZE - 1))
drivers/char/hpet.c
372
return vm_iomap_memory(vma, addr, PAGE_SIZE);
drivers/char/hw_random/core.c
429
strlcat(buf, rng->name, PAGE_SIZE);
drivers/char/hw_random/core.c
430
strlcat(buf, " ", PAGE_SIZE);
drivers/char/hw_random/core.c
432
strlcat(buf, "none\n", PAGE_SIZE);
drivers/char/hw_random/s390-trng.c
175
size_t len = max <= PAGE_SIZE ? max : PAGE_SIZE;
drivers/char/hw_random/s390-trng.c
84
n = nbytes > PAGE_SIZE ? PAGE_SIZE : nbytes;
drivers/char/mem.c
112
bounce = kmalloc(PAGE_SIZE, GFP_KERNEL);
drivers/char/mem.c
187
if (p < PAGE_SIZE) {
drivers/char/mem.c
42
sz = PAGE_SIZE - (start & (PAGE_SIZE - 1));
drivers/char/mem.c
463
if (chunk > PAGE_SIZE)
drivers/char/mem.c
464
chunk = PAGE_SIZE; /* Just for latency reasons */
drivers/char/mem.c
486
size_t chunk = min_t(size_t, count, PAGE_SIZE);
drivers/char/mem.c
99
if (p < PAGE_SIZE) {
drivers/char/nvram.c
239
count = min_t(size_t, count, PAGE_SIZE);
drivers/char/nvram.c
269
count = min_t(size_t, count, PAGE_SIZE);
drivers/char/random.c
1435
BUILD_BUG_ON(PAGE_SIZE % sizeof(block) != 0);
drivers/char/random.c
1436
if (ret % PAGE_SIZE == 0) {
drivers/char/random.c
470
BUILD_BUG_ON(PAGE_SIZE % sizeof(block) != 0);
drivers/char/random.c
471
if (ret % PAGE_SIZE == 0) {
drivers/char/tpm/tpm-buf.c
111
if ((buf->length + new_length) > PAGE_SIZE) {
drivers/char/tpm/tpm-interface.c
271
len = tpm_transmit(chip, buf->data, PAGE_SIZE);
drivers/char/tpm/tpm_ibmvtpm.c
396
return CRQ_RES_BUF_SIZE + PAGE_SIZE;
drivers/char/tpm/tpm_ibmvtpm.h
47
#define CRQ_RES_BUF_SIZE PAGE_SIZE
drivers/char/tpm/xen-tpmfront.c
144
if (offset > PAGE_SIZE)
drivers/char/tpm/xen-tpmfront.c
147
if (offset + count > PAGE_SIZE)
drivers/char/tpm/xen-tpmfront.c
194
if (offset > PAGE_SIZE)
drivers/char/tpm/xen-tpmfront.c
197
if (offset + length > PAGE_SIZE)
drivers/char/tpm/xen-tpmfront.c
198
length = PAGE_SIZE - offset;
drivers/char/uv_mmtimer.c
102
PAGE_SIZE) / 8;
drivers/char/uv_mmtimer.c
151
if (vma->vm_end - vma->vm_start != PAGE_SIZE)
drivers/char/uv_mmtimer.c
157
if (PAGE_SIZE > (1 << 16))
drivers/char/uv_mmtimer.c
163
uv_mmtimer_addr &= ~(PAGE_SIZE - 1);
drivers/char/uv_mmtimer.c
167
PAGE_SIZE, vma->vm_page_prot)) {
drivers/char/virtio_console.c
1220
port->cons.hvc = hvc_alloc(port->cons.vtermno, 0, &hv_ops, PAGE_SIZE);
drivers/char/virtio_console.c
1298
buf = alloc_buf(vq->vdev, PAGE_SIZE, 0);
drivers/char/virtio_console.c
884
if (len + offset > PAGE_SIZE)
drivers/char/virtio_console.c
885
len = PAGE_SIZE - offset;
drivers/char/xilinx_hwicap/xilinx_hwicap.c
402
if (bytes_to_read > PAGE_SIZE)
drivers/char/xilinx_hwicap/xilinx_hwicap.c
403
bytes_to_read = PAGE_SIZE;
drivers/char/xilinx_hwicap/xilinx_hwicap.c
471
if (len > PAGE_SIZE)
drivers/char/xilinx_hwicap/xilinx_hwicap.c
472
len = PAGE_SIZE;
drivers/char/xillybus/xillybus_core.c
377
allocsize = PAGE_SIZE;
drivers/clk/renesas/clk-emev2.c
30
BUG_ON(!smu_base || (offs >= PAGE_SIZE));
drivers/clocksource/hyperv_timer.c
408
u8 reserved[PAGE_SIZE];
drivers/clocksource/hyperv_timer.c
409
} tsc_pg __bss_decrypted __aligned(PAGE_SIZE);
drivers/comedi/comedi_buf.c
164
int l = min_t(int, len - done, PAGE_SIZE - pgoff);
drivers/comedi/comedi_buf.c
217
new_size = (new_size + PAGE_SIZE - 1) & PAGE_MASK;
drivers/comedi/comedi_buf.c
32
dma_free_coherent(bm->dma_hw_dev, PAGE_SIZE,
drivers/comedi/comedi_buf.c
347
min(num_bytes - count, PAGE_SIZE - offset);
drivers/comedi/comedi_buf.c
583
unsigned int block_size = min(num_bytes, PAGE_SIZE - offset);
drivers/comedi/comedi_buf.c
610
unsigned int block_size = min(nbytes, PAGE_SIZE - offset);
drivers/comedi/comedi_buf.c
92
dma_alloc_coherent(bm->dma_hw_dev, PAGE_SIZE,
drivers/comedi/comedi_fops.c
2559
vma->vm_end = start + PAGE_SIZE;
drivers/comedi/comedi_fops.c
2562
buf->dma_addr, PAGE_SIZE);
drivers/comedi/comedi_fops.c
2566
start += PAGE_SIZE;
drivers/comedi/comedi_fops.c
2576
retval = remap_pfn_range(vma, start, pfn, PAGE_SIZE,
drivers/comedi/comedi_fops.c
2581
start += PAGE_SIZE;
drivers/comedi/comedi_fops.c
2659
unsigned int copy_amount = min(n, PAGE_SIZE - offset);
drivers/comedi/comedi_fops.c
2687
unsigned int copy_amount = min(n, PAGE_SIZE - offset);
drivers/comedi/comedi_fops.c
351
new_size = (new_size + PAGE_SIZE - 1) & PAGE_MASK;
drivers/comedi/drivers/addi_apci_3120.c
917
PAGE_SIZE << order,
drivers/comedi/drivers/addi_apci_3120.c
925
dmabuf->size = PAGE_SIZE << order;
drivers/comedi/drivers/adl_pci9118.c
1470
dma_alloc_coherent(dev->hw_dev, PAGE_SIZE << order,
drivers/comedi/drivers/adl_pci9118.c
1477
dmabuf->size = PAGE_SIZE << order;
drivers/comedi/drivers/dt282x.c
1024
PAGE_SIZE, 0);
drivers/comedi/drivers/mite.c
656
unsigned int remainder = nbytes % PAGE_SIZE;
drivers/comedi/drivers/mite.c
671
desc->count = cpu_to_le32(PAGE_SIZE);
drivers/comedi/drivers/pcl812.c
1117
PAGE_SIZE * 2, COMEDI_ISADMA_READ);
drivers/comedi/drivers/pcl816.c
585
PAGE_SIZE * 4, COMEDI_ISADMA_READ);
drivers/comedi/drivers/pcl818.c
968
PAGE_SIZE * 4, COMEDI_ISADMA_READ);
drivers/cpufreq/cpufreq.c
872
if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
drivers/cpufreq/cpufreq.c
890
if (i >= (PAGE_SIZE - 5))
drivers/cpufreq/cpufreq_stats.c
134
if (len >= PAGE_SIZE - 1)
drivers/cpufreq/cpufreq_stats.c
138
if (len >= PAGE_SIZE - 1)
drivers/cpufreq/cpufreq_stats.c
139
return PAGE_SIZE - 1;
drivers/cpufreq/cpufreq_stats.c
144
if (len >= PAGE_SIZE - 1)
drivers/cpufreq/cpufreq_stats.c
150
if (len >= PAGE_SIZE - 1)
drivers/cpufreq/cpufreq_stats.c
160
if (len >= PAGE_SIZE - 1)
drivers/cpufreq/cpufreq_stats.c
165
if (len >= PAGE_SIZE - 1) {
drivers/cpuidle/sysfs.c
30
if (i >= (ssize_t)(PAGE_SIZE - (CPUIDLE_NAME_LEN + 2)))
drivers/crypto/aspeed/aspeed-hace.c
197
PAGE_SIZE,
drivers/crypto/atmel-aes.c
47
#define ATMEL_AES_BUFFER_SIZE (PAGE_SIZE << ATMEL_AES_BUFFER_ORDER)
drivers/crypto/atmel-sha.c
70
#define SHA_BUFFER_LEN (PAGE_SIZE / 16)
drivers/crypto/atmel-tdes.c
318
dd->buflen = PAGE_SIZE;
drivers/crypto/ccp/dbc.c
202
BUILD_BUG_ON(sizeof(union dbc_buffer) > PAGE_SIZE);
drivers/crypto/ccp/sev-dev-tio.c
438
if (WARN_ON_ONCE(npages > ((PAGE_SIZE / sizeof(struct sla_addr_t)) + 1)))
drivers/crypto/ccp/sev-dev-tio.c
441
BUILD_BUG_ON(PAGE_SIZE < SZ_4K);
drivers/crypto/ccp/sev-dev.c
1136
(1 << entry->order) * PAGE_SIZE);
drivers/crypto/ccp/sev-dev.c
1211
if (num_elements * sizeof(*range) + sizeof(*range_list) > PAGE_SIZE) {
drivers/crypto/ccp/sev-dev.c
1334
sizeof(struct sev_data_range_list)) > PAGE_SIZE)
drivers/crypto/ccp/sev-dev.c
1397
snp_range_list = kzalloc(PAGE_SIZE, GFP_KERNEL);
drivers/crypto/ccp/sev-dev.c
2701
sev->cmd_buf_backup = (uint8_t *)sev->cmd_buf + PAGE_SIZE;
drivers/crypto/ccp/sev-dev.c
388
paddr = __sme_clr(ALIGN_DOWN(paddr, PAGE_SIZE));
drivers/crypto/ccp/sev-dev.c
390
for (i = 0; i < npages; i++, paddr += PAGE_SIZE) {
drivers/crypto/ccp/sfs.c
136
PAGE_SIZE))
drivers/crypto/ccp/sfs.c
18
#define SFS_NUM_PAGES_CMDBUF (SFS_MAX_PAYLOAD_SIZE / PAGE_SIZE)
drivers/crypto/ccp/sfs.c
50
memset(sfs_dev->command_buf->sfs_buffer, 0xc7, PAGE_SIZE);
drivers/crypto/ccp/sfs.c
51
sfs_dev->command_buf->hdr.payload_size = 2 * PAGE_SIZE;
drivers/crypto/ccp/sfs.c
80
package_size = ALIGN(firmware->size + PAGE_SIZE, 0x10000U);
drivers/crypto/ccp/sfs.h
30
u8 buf[PAGE_SIZE - sizeof(struct psp_ext_req_buffer_hdr)];
drivers/crypto/hifn_795x.c
1370
sg_set_page(s, page, PAGE_SIZE, 0);
drivers/crypto/hifn_795x.c
1443
unsigned dlen = PAGE_SIZE;
drivers/crypto/hifn_795x.c
1968
if (dev->started + DIV_ROUND_UP(req->cryptlen, PAGE_SIZE) <= HIFN_QUEUE_LENGTH)
drivers/crypto/hisilicon/qm.c
1232
switch (PAGE_SIZE) {
drivers/crypto/hisilicon/qm.c
1244
PAGE_SIZE);
drivers/crypto/hisilicon/qm.c
2565
if (sz > PAGE_SIZE * QM_DOORBELL_PAGE_NR)
drivers/crypto/hisilicon/qm.c
2568
if (sz > PAGE_SIZE * (QM_DOORBELL_PAGE_NR +
drivers/crypto/hisilicon/qm.c
2569
QM_DOORBELL_SQ_CQ_BASE_V2 / PAGE_SIZE))
drivers/crypto/hisilicon/qm.c
2899
QM_DOORBELL_SQ_CQ_BASE_V2 / PAGE_SIZE;
drivers/crypto/hisilicon/qm.c
2901
mmio_page_nr = qm->db_interval / PAGE_SIZE;
drivers/crypto/hisilicon/qm.c
2906
dus_page_nr = (PAGE_SIZE - 1 + qm->sqe_size * sq_depth +
drivers/crypto/hisilicon/qm.c
2907
sizeof(struct qm_cqe) * cq_depth + PAGE_SIZE) >>
drivers/crypto/hisilicon/qm.c
5784
qp_dma_size = PAGE_ALIGN(qp_dma_size) + PAGE_SIZE;
drivers/crypto/hisilicon/sec/sec_drv.c
1073
round_up(SEC_QUEUE_LEN * sizeof(struct sec_bd_info), PAGE_SIZE)
drivers/crypto/hisilicon/sec/sec_drv.c
1075
round_up(SEC_QUEUE_LEN * sizeof(struct sec_out_bd_info), PAGE_SIZE)
drivers/crypto/hisilicon/sec/sec_drv.c
1077
round_up(SEC_QUEUE_LEN * sizeof(struct sec_debug_bd_info), PAGE_SIZE)
drivers/crypto/hisilicon/sec2/sec_crypto.c
495
pbuf_page_offset = PAGE_SIZE * i;
drivers/crypto/hisilicon/sec2/sec_crypto.c
73
#define SEC_PBUF_NUM (PAGE_SIZE / SEC_PBUF_PKG)
drivers/crypto/hisilicon/sec2/sec_crypto.c
77
#define SEC_TOTAL_PBUF_SZ(depth) (PAGE_SIZE * SEC_PBUF_PAGE_NUM(depth) + \
drivers/crypto/intel/iaa/iaa_crypto_main.c
1384
desc->max_dst_size = PAGE_SIZE;
drivers/crypto/intel/qat/qat_common/adf_admin.c
553
admin->virt_addr = dma_alloc_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
drivers/crypto/intel/qat/qat_common/adf_admin.c
562
PAGE_SIZE,
drivers/crypto/intel/qat/qat_common/adf_admin.c
567
dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
drivers/crypto/intel/qat/qat_common/adf_admin.c
600
dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
drivers/crypto/intel/qat/qat_common/adf_admin.c
603
dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
drivers/crypto/intel/qat/qat_common/adf_gen4_pm_debugfs.c
103
pm_kv = kmalloc(PAGE_SIZE, GFP_KERNEL);
drivers/crypto/intel/qat/qat_common/adf_gen4_pm_debugfs.c
109
p_state_addr = dma_map_single(&GET_DEV(accel_dev), pm_info, PAGE_SIZE,
drivers/crypto/intel/qat/qat_common/adf_gen4_pm_debugfs.c
116
ret = adf_get_pm_info(accel_dev, p_state_addr, PAGE_SIZE);
drivers/crypto/intel/qat/qat_common/adf_gen4_pm_debugfs.c
117
dma_unmap_single(&GET_DEV(accel_dev), p_state_addr, PAGE_SIZE,
drivers/crypto/intel/qat/qat_common/adf_gen4_pm_debugfs.c
125
len += scnprintf(&pm_kv[len], PAGE_SIZE - len,
drivers/crypto/intel/qat/qat_common/adf_gen4_pm_debugfs.c
128
pm_info_regs, PAGE_SIZE - len,
drivers/crypto/intel/qat/qat_common/adf_gen4_pm_debugfs.c
130
len += scnprintf(&pm_kv[len], PAGE_SIZE - len, "max_pwrreq: %#x\n",
drivers/crypto/intel/qat/qat_common/adf_gen4_pm_debugfs.c
132
len += scnprintf(&pm_kv[len], PAGE_SIZE - len, "min_pwrreq: %#x\n",
drivers/crypto/intel/qat/qat_common/adf_gen4_pm_debugfs.c
136
len += scnprintf(&pm_kv[len], PAGE_SIZE - len,
drivers/crypto/intel/qat/qat_common/adf_gen4_pm_debugfs.c
138
len += scnprintf(&pm_kv[len], PAGE_SIZE - len, "power_level: %s\n",
drivers/crypto/intel/qat/qat_common/adf_gen4_pm_debugfs.c
141
pm_info_regs, PAGE_SIZE - len,
drivers/crypto/intel/qat/qat_common/adf_gen4_pm_debugfs.c
143
len += scnprintf(&pm_kv[len], PAGE_SIZE - len, "pm_mode: STATIC\n");
drivers/crypto/intel/qat/qat_common/adf_gen4_pm_debugfs.c
146
len += scnprintf(&pm_kv[len], PAGE_SIZE - len,
drivers/crypto/intel/qat/qat_common/adf_gen4_pm_debugfs.c
149
pm_info_regs, PAGE_SIZE - len,
drivers/crypto/intel/qat/qat_common/adf_gen4_pm_debugfs.c
153
len += scnprintf(&pm_kv[len], PAGE_SIZE - len,
drivers/crypto/intel/qat/qat_common/adf_gen4_pm_debugfs.c
156
pm_info_regs, PAGE_SIZE - len,
drivers/crypto/intel/qat/qat_common/adf_gen4_pm_debugfs.c
160
pm_info_regs, PAGE_SIZE - len,
drivers/crypto/intel/qat/qat_common/adf_gen4_pm_debugfs.c
163
len += scnprintf(&pm_kv[len], PAGE_SIZE - len, "idle_irq_count: %#x\n",
drivers/crypto/intel/qat/qat_common/adf_gen4_pm_debugfs.c
165
len += scnprintf(&pm_kv[len], PAGE_SIZE - len, "fw_irq_count: %#x\n",
drivers/crypto/intel/qat/qat_common/adf_gen4_pm_debugfs.c
167
len += scnprintf(&pm_kv[len], PAGE_SIZE - len,
drivers/crypto/intel/qat/qat_common/adf_gen4_pm_debugfs.c
169
len += scnprintf(&pm_kv[len], PAGE_SIZE - len, "host_ack_count: %#x\n",
drivers/crypto/intel/qat/qat_common/adf_gen4_pm_debugfs.c
171
len += scnprintf(&pm_kv[len], PAGE_SIZE - len, "host_nack_count: %#x\n",
drivers/crypto/intel/qat/qat_common/adf_gen4_pm_debugfs.c
175
len += scnprintf(&pm_kv[len], PAGE_SIZE - len,
drivers/crypto/intel/qat/qat_common/adf_gen4_pm_debugfs.c
178
pm_info_regs, PAGE_SIZE - len,
drivers/crypto/intel/qat/qat_common/adf_gen4_pm_debugfs.c
182
len += scnprintf(&pm_kv[len], PAGE_SIZE - len,
drivers/crypto/intel/qat/qat_common/adf_gen4_pm_debugfs.c
185
len += scnprintf(&pm_kv[len], PAGE_SIZE - len,
drivers/crypto/intel/qat/qat_common/adf_gen4_pm_debugfs.c
83
static_assert(sizeof(struct icp_qat_fw_init_admin_pm_info) < PAGE_SIZE);
drivers/crypto/intel/qat/qat_common/adf_gen4_pm_debugfs.c
99
pm_info = kmalloc(PAGE_SIZE, GFP_KERNEL);
drivers/crypto/intel/qat/qat_common/adf_gen6_pm_dbgfs.c
102
len += scnprintf(&pm_kv[len], PAGE_SIZE - len,
drivers/crypto/intel/qat/qat_common/adf_gen6_pm_dbgfs.c
105
pm_info_regs, PAGE_SIZE - len,
drivers/crypto/intel/qat/qat_common/adf_gen6_pm_dbgfs.c
109
len += scnprintf(&pm_kv[len], PAGE_SIZE - len, "CPM_PM_INTERRUPT: %#x\n", val);
drivers/crypto/intel/qat/qat_common/adf_gen6_pm_dbgfs.c
38
static_assert(sizeof(struct icp_qat_fw_init_admin_pm_info) < PAGE_SIZE);
drivers/crypto/intel/qat/qat_common/adf_gen6_pm_dbgfs.c
53
pm_info = kzalloc(PAGE_SIZE, GFP_KERNEL);
drivers/crypto/intel/qat/qat_common/adf_gen6_pm_dbgfs.c
57
pm_kv = kzalloc(PAGE_SIZE, GFP_KERNEL);
drivers/crypto/intel/qat/qat_common/adf_gen6_pm_dbgfs.c
63
p_state_addr = dma_map_single(&GET_DEV(accel_dev), pm_info, PAGE_SIZE,
drivers/crypto/intel/qat/qat_common/adf_gen6_pm_dbgfs.c
70
ret = adf_get_pm_info(accel_dev, p_state_addr, PAGE_SIZE);
drivers/crypto/intel/qat/qat_common/adf_gen6_pm_dbgfs.c
71
dma_unmap_single(&GET_DEV(accel_dev), p_state_addr, PAGE_SIZE,
drivers/crypto/intel/qat/qat_common/adf_gen6_pm_dbgfs.c
79
len += scnprintf(&pm_kv[len], PAGE_SIZE - len,
drivers/crypto/intel/qat/qat_common/adf_gen6_pm_dbgfs.c
82
pm_info_regs, PAGE_SIZE - len,
drivers/crypto/intel/qat/qat_common/adf_gen6_pm_dbgfs.c
86
len += scnprintf(&pm_kv[len], PAGE_SIZE - len,
drivers/crypto/intel/qat/qat_common/adf_gen6_pm_dbgfs.c
90
pm_info_regs, PAGE_SIZE - len,
drivers/crypto/intel/qat/qat_common/adf_gen6_pm_dbgfs.c
92
len += scnprintf(&pm_kv[len], PAGE_SIZE - len, "pm_mode: ACTIVE\n");
drivers/crypto/intel/qat/qat_common/adf_gen6_pm_dbgfs.c
95
len += scnprintf(&pm_kv[len], PAGE_SIZE - len,
drivers/crypto/intel/qat/qat_common/adf_gen6_pm_dbgfs.c
98
pm_info_regs, PAGE_SIZE - len,
drivers/crypto/intel/qat/qat_common/adf_heartbeat.c
288
hb->dma.virt_addr = dma_alloc_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
drivers/crypto/intel/qat/qat_common/adf_heartbeat.c
340
dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
drivers/crypto/intel/qat/qat_common/adf_sysfs_ras_counters.c
23
return scnprintf(buf, PAGE_SIZE, "%ld\n", counter);
drivers/crypto/intel/qat/qat_common/adf_sysfs_ras_counters.c
38
return scnprintf(buf, PAGE_SIZE, "%ld\n", counter);
drivers/crypto/intel/qat/qat_common/adf_sysfs_ras_counters.c
53
return scnprintf(buf, PAGE_SIZE, "%ld\n", counter);
drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c
812
ret = scnprintf(buf, PAGE_SIZE,
drivers/crypto/mxs-dcp.c
28
#define DCP_BUF_SZ PAGE_SIZE
drivers/crypto/nx/nx-842.c
73
((unsigned int)(PAGE_SIZE << BOUNCE_BUFFER_ORDER))
drivers/crypto/nx/nx-842.h
101
#define LEN_ON_PAGE(pa) LEN_ON_SIZE(pa, PAGE_SIZE)
drivers/crypto/nx/nx-common-powernv.c
1020
.maximum = (DDL_LEN_MAX - 1) * PAGE_SIZE,
drivers/crypto/nx/nx-common-powernv.c
1061
BUILD_BUG_ON(PAGE_SIZE % DDE_BUFFER_ALIGN);
drivers/crypto/nx/nx-common-pseries.c
52
.maximum = PAGE_SIZE, /* dynamic, max_sync_size */
drivers/crypto/nx/nx-common-pseries.c
887
p = snprintf(buf, PAGE_SIZE, "%lld\n", \
drivers/crypto/nx/nx-common-pseries.c
919
int bytes_remain = PAGE_SIZE;
drivers/crypto/nx/nx.c
115
next_page = (sg->addr & PAGE_MASK) + PAGE_SIZE;
drivers/crypto/padlock-aes.c
245
if (unlikely(offset_in_page(in) + ecb_fetch_bytes > PAGE_SIZE)) {
drivers/crypto/padlock-aes.c
257
if (unlikely(offset_in_page(in) + cbc_fetch_bytes > PAGE_SIZE))
drivers/crypto/sahara.c
33
#define SHA_BUFFER_LEN PAGE_SIZE
drivers/cxl/core/port.c
1966
cxld->interleave_granularity = PAGE_SIZE;
drivers/dax/dax-private.h
124
if (align == PAGE_SIZE)
drivers/dax/dax-private.h
131
return align == PAGE_SIZE;
drivers/dax/device.c
115
unsigned int fault_size = PAGE_SIZE;
drivers/dax/device.c
120
if (dev_dax->align > PAGE_SIZE) {
drivers/dax/device.c
129
phys = dax_pgoff_to_phys(dev_dax, vmf->pgoff, PAGE_SIZE);
drivers/dax/device.c
455
if (dev_dax->align > PAGE_SIZE)
drivers/dax/device.c
86
unsigned long i, nr_pages = fault_size / PAGE_SIZE;
drivers/dax/super.c
93
if (*start_off % PAGE_SIZE || part_size % PAGE_SIZE) {
drivers/devfreq/devfreq.c
1532
count += scnprintf(&buf[count], (PAGE_SIZE - count - 2),
drivers/devfreq/devfreq.c
1684
count += scnprintf(&buf[count], (PAGE_SIZE - count - 2),
drivers/devfreq/devfreq.c
1724
if (len >= PAGE_SIZE - 1)
drivers/devfreq/devfreq.c
1730
if (len >= PAGE_SIZE - 1)
drivers/devfreq/devfreq.c
1731
return PAGE_SIZE - 1;
drivers/devfreq/devfreq.c
1735
if (len >= PAGE_SIZE - 1)
drivers/devfreq/devfreq.c
1741
if (len >= PAGE_SIZE - 1)
drivers/devfreq/devfreq.c
1745
if (len >= PAGE_SIZE - 1)
drivers/devfreq/devfreq.c
1750
if (len >= PAGE_SIZE - 1)
drivers/devfreq/devfreq.c
1756
if (len < PAGE_SIZE - 1)
drivers/devfreq/devfreq.c
1759
if (len >= PAGE_SIZE - 1) {
drivers/dio/dio.c
141
va = ioremap(pa, PAGE_SIZE);
drivers/dio/dio.c
214
va = ioremap(pa, PAGE_SIZE);
drivers/dma-buf/heaps/system_heap.c
224
int npages = PAGE_ALIGN(buffer->len) / PAGE_SIZE;
drivers/dma-buf/heaps/system_heap.c
326
if (size < (PAGE_SIZE << orders[i]))
drivers/dma-buf/udmabuf.c
162
sg_set_folio(sgl, ubuf->folios[i], PAGE_SIZE,
drivers/dma-buf/udmabuf.c
346
for (; subpgoff < fsize; subpgoff += PAGE_SIZE) {
drivers/dma-buf/udmabuf.c
69
for (; addr < vma->vm_end; pgoff++, addr += PAGE_SIZE) {
drivers/dma/bcm2835-dma.c
835
dma_unmap_page_attrs(od->ddev.dev, od->zero_page, PAGE_SIZE,
drivers/dma/bcm2835-dma.c
940
PAGE_SIZE, DMA_TO_DEVICE,
drivers/dma/dma-jz4780.c
114
#define JZ_DMA_DESC_BLOCK_SIZE PAGE_SIZE
drivers/dma/dma-jz4780.c
766
PAGE_SIZE, 0);
drivers/dma/idxd/cdev.c
374
if ((vma->vm_end - vma->vm_start) > PAGE_SIZE) {
drivers/dma/idxd/cdev.c
421
return io_remap_pfn_range(vma, vma->vm_start, pfn, PAGE_SIZE,
drivers/dma/idxd/idxd.h
631
#define IDXD_PORTAL_MASK (PAGE_SIZE - 1)
drivers/dma/idxd/perfmon.h
113
BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \
drivers/dma/idxd/registers.h
25
#define IDXD_PORTAL_SIZE PAGE_SIZE
drivers/dma/ioat/init.c
1013
dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE);
drivers/dma/ioat/init.c
1019
dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE,
drivers/dma/ioat/init.c
1021
dma_unmap_page(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
drivers/dma/ioat/init.c
1024
dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE,
drivers/dma/ioat/init.c
818
for (i = 0; i < PAGE_SIZE; i++)
drivers/dma/ioat/init.c
828
memset(page_address(dest), 0, PAGE_SIZE);
drivers/dma/ioat/init.c
840
dest_dma = dma_map_page(dev, dest, 0, PAGE_SIZE, DMA_FROM_DEVICE);
drivers/dma/ioat/init.c
847
dma_srcs[i] = dma_map_page(dev, xor_srcs[i], 0, PAGE_SIZE,
drivers/dma/ioat/init.c
855
IOAT_NUM_SRC_TEST, PAGE_SIZE,
drivers/dma/ioat/init.c
886
dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE);
drivers/dma/ioat/init.c
888
dma_sync_single_for_cpu(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
drivers/dma/ioat/init.c
889
for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
drivers/dma/ioat/init.c
898
dma_sync_single_for_device(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
drivers/dma/ioat/init.c
900
dma_unmap_page(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
drivers/dma/ioat/init.c
916
dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE,
drivers/dma/ioat/init.c
924
IOAT_NUM_SRC_TEST + 1, PAGE_SIZE,
drivers/dma/ioat/init.c
954
dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE);
drivers/dma/ioat/init.c
962
memset(page_address(dest), 0, PAGE_SIZE);
drivers/dma/ioat/init.c
969
dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE,
drivers/dma/ioat/init.c
977
IOAT_NUM_SRC_TEST + 1, PAGE_SIZE,
drivers/dma/k3dma.c
27
#define LLI_BLOCK_SIZE (4 * PAGE_SIZE)
drivers/dma/loongson2-apb-dma.c
286
chan->device->dev, PAGE_SIZE,
drivers/dma/mv_xor.c
772
src = kmalloc(PAGE_SIZE, GFP_KERNEL);
drivers/dma/mv_xor.c
776
dest = kzalloc(PAGE_SIZE, GFP_KERNEL);
drivers/dma/mv_xor.c
783
for (i = 0; i < PAGE_SIZE; i++)
drivers/dma/mv_xor.c
799
offset_in_page(src), PAGE_SIZE,
drivers/dma/mv_xor.c
811
offset_in_page(dest), PAGE_SIZE,
drivers/dma/mv_xor.c
821
unmap->len = PAGE_SIZE;
drivers/dma/mv_xor.c
824
PAGE_SIZE, 0);
drivers/dma/mv_xor.c
853
PAGE_SIZE, DMA_FROM_DEVICE);
drivers/dma/mv_xor.c
854
if (memcmp(src, dest, PAGE_SIZE)) {
drivers/dma/mv_xor.c
907
for (i = 0; i < PAGE_SIZE; i++)
drivers/dma/mv_xor.c
917
memset(page_address(dest), 0, PAGE_SIZE);
drivers/dma/mv_xor.c
935
0, PAGE_SIZE, DMA_TO_DEVICE);
drivers/dma/mv_xor.c
945
unmap->addr[src_count] = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE,
drivers/dma/mv_xor.c
954
unmap->len = PAGE_SIZE;
drivers/dma/mv_xor.c
957
src_count, PAGE_SIZE, 0);
drivers/dma/mv_xor.c
986
PAGE_SIZE, DMA_FROM_DEVICE);
drivers/dma/mv_xor.c
987
for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
drivers/dma/mxs-dma.c
105
#define CCW_BLOCK_SIZE (4 * PAGE_SIZE)
drivers/dma/nbpfaxi.c
1315
BUILD_BUG_ON(sizeof(struct nbpf_desc_page) > PAGE_SIZE);
drivers/dma/nbpfaxi.c
161
#define NBPF_DESCS_PER_PAGE ((PAGE_SIZE - sizeof(struct list_head)) / \
drivers/dma/ppc4xx/adma.c
1378
if (src_sz > PAGE_SIZE) {
drivers/dma/ppc4xx/adma.c
3719
ppc440spe_desc_set_byte_count(iter, chan, PAGE_SIZE);
drivers/dma/ppc4xx/adma.c
3720
iter->unmap_len = PAGE_SIZE;
drivers/dma/ppc4xx/adma.c
3730
memset(page_address(pg), 0xFF, PAGE_SIZE);
drivers/dma/ppc4xx/adma.c
3732
PAGE_SIZE, DMA_BIDIRECTIONAL);
drivers/dma/ppc4xx/adma.c
3754
if ((*(u32 *)a) == 0 && memcmp(a, a+4, PAGE_SIZE-4) == 0) {
drivers/dma/ppc4xx/adma.c
4022
pool_size = PAGE_SIZE << 1;
drivers/dma/ppc4xx/adma.c
4158
PAGE_SIZE, DMA_BIDIRECTIONAL);
drivers/dma/ppc4xx/adma.c
4160
PAGE_SIZE, DMA_BIDIRECTIONAL);
drivers/dma/ppc4xx/adma.c
4201
PAGE_SIZE, DMA_BIDIRECTIONAL);
drivers/dma/ppc4xx/adma.c
4203
PAGE_SIZE, DMA_BIDIRECTIONAL);
drivers/dma/ppc4xx/adma.c
4253
PAGE_SIZE, DMA_BIDIRECTIONAL);
drivers/dma/ppc4xx/adma.c
4255
PAGE_SIZE, DMA_BIDIRECTIONAL);
drivers/dma/sh/rcar-dmac.c
112
((PAGE_SIZE - offsetof(struct rcar_dmac_desc_page, descs)) / \
drivers/dma/sh/rcar-dmac.c
115
((PAGE_SIZE - offsetof(struct rcar_dmac_desc_page, chunks)) / \
drivers/dma/sh/shdma-base.c
972
schan->max_xfer_len = PAGE_SIZE;
drivers/dma/ste_dma40.c
3375
base->lcla_pool.pages = SZ_1K * base->num_phy_chans / PAGE_SIZE;
drivers/dma/xilinx/xdma-regs.h
39
#define XDMA_DESC_BLEN_MAX (BIT(XDMA_DESC_BLEN_BITS) - PAGE_SIZE)
drivers/edac/bluefield_edac.c
324
(SZ_1G / PAGE_SIZE);
drivers/edac/cpc925_edac.c
498
*offset = pa & (PAGE_SIZE - 1);
drivers/edac/edac_mc_sysfs.c
131
count = edac_dimm_info_location(dimm, data, PAGE_SIZE);
drivers/edac/edac_mc_sysfs.c
132
count += scnprintf(data + count, PAGE_SIZE - count, "\n");
drivers/edac/edac_mc_sysfs.c
472
int len = PAGE_SIZE;
drivers/edac/i5000_edac.c
1042
space = PAGE_SIZE;
drivers/edac/i5000_edac.c
1066
space = PAGE_SIZE;
drivers/edac/i5000_edac.c
1087
space = PAGE_SIZE;
drivers/edac/i5000_edac.c
1097
space = PAGE_SIZE;
drivers/edac/i5000_edac.c
1110
space = PAGE_SIZE;
drivers/edac/i5100_edac.c
664
((unsigned long long) (1ULL << addr_lines) / PAGE_SIZE);
drivers/edac/i5400_edac.c
1007
space = PAGE_SIZE;
drivers/edac/i5400_edac.c
1017
space = PAGE_SIZE;
drivers/edac/i5400_edac.c
1031
space = PAGE_SIZE;
drivers/edac/i5400_edac.c
967
space = PAGE_SIZE;
drivers/edac/i5400_edac.c
992
space = PAGE_SIZE;
drivers/edac/i7300_edac.c
1063
pvt->tmp_prt_buffer = kmalloc(PAGE_SIZE, GFP_KERNEL);
drivers/edac/i7300_edac.c
446
snprintf(pvt->tmp_prt_buffer, PAGE_SIZE,
drivers/edac/i7300_edac.c
493
snprintf(pvt->tmp_prt_buffer, PAGE_SIZE,
drivers/edac/i7300_edac.c
691
space = PAGE_SIZE;
drivers/edac/i7300_edac.c
704
space = PAGE_SIZE;
drivers/edac/i7300_edac.c
711
space = PAGE_SIZE;
drivers/edac/i7300_edac.c
727
space = PAGE_SIZE;
drivers/edac/i7300_edac.c
736
space = PAGE_SIZE;
drivers/edac/igen6_edac.c
200
#define ECCLOG_POOL_SIZE PAGE_SIZE
drivers/edac/thunderx_edac.c
335
const unsigned int lines = PAGE_SIZE / cline_size;
drivers/edac/thunderx_edac.c
427
for (offs = 0; offs < PAGE_SIZE; offs += cline_size) {
drivers/edac/thunderx_edac.c
754
l2c_ioaddr = ioremap(L2C_CTL | FIELD_PREP(THUNDERX_NODE, lmc->node), PAGE_SIZE);
drivers/firewire/core-device.c
360
bufsize = PAGE_SIZE - 1;
drivers/firewire/core-device.c
438
length = get_modalias(unit, buf, PAGE_SIZE);
drivers/firewire/core-device.c
527
if (i >= PAGE_SIZE - (8 + 1 + 8 + 1))
drivers/firewire/core-iso.c
112
dma_unmap_page(card->device, dma_addr, PAGE_SIZE, buffer->direction);
drivers/firewire/core-iso.c
134
if (offset > 0 && offset <= PAGE_SIZE)
drivers/firewire/core-iso.c
70
dma_addr_t dma_addr = dma_map_page(card->device, buffer->pages[i], 0, PAGE_SIZE,
drivers/firewire/core-iso.c
79
dma_unmap_page(card->device, dma_addrs[i], PAGE_SIZE, buffer->direction);
drivers/firewire/device-attribute-test.c
101
char *buf = kunit_kzalloc(test, PAGE_SIZE, GFP_KERNEL);
drivers/firewire/device-attribute-test.c
183
char *buf = kunit_kzalloc(test, PAGE_SIZE, GFP_KERNEL);
drivers/firewire/net.c
1118
num_packets = (FWNET_ISO_PAGE_COUNT * PAGE_SIZE) / max_receive;
drivers/firewire/net.c
37
#define FWNET_ISO_PAGE_COUNT (PAGE_SIZE < 16*1024 ? 4 : 2)
drivers/firewire/ohci.c
1018
desc = dmam_alloc_coherent(ctx->ohci->card.device, PAGE_SIZE, &bus_addr, GFP_ATOMIC);
drivers/firewire/ohci.c
1029
desc->buffer_size = PAGE_SIZE - offset - 0x10;
drivers/firewire/ohci.c
1034
ctx->total_allocation += PAGE_SIZE;
drivers/firewire/ohci.c
1077
dmam_free_coherent(card->device, PAGE_SIZE, desc,
drivers/firewire/ohci.c
3340
z = DIV_ROUND_UP(payload_per_buffer + offset, PAGE_SIZE) + 1;
drivers/firewire/ohci.c
3362
if (offset + rest < PAGE_SIZE)
drivers/firewire/ohci.c
3365
length = PAGE_SIZE - offset;
drivers/firewire/ohci.c
3408
z = DIV_ROUND_UP(offset + rest, PAGE_SIZE);
drivers/firewire/ohci.c
3425
if (offset + rest < PAGE_SIZE)
drivers/firewire/ohci.c
3428
length = PAGE_SIZE - offset;
drivers/firewire/ohci.c
3638
BUILD_BUG_ON(AR_BUFFERS * sizeof(struct descriptor) > PAGE_SIZE/4);
drivers/firewire/ohci.c
3639
BUILD_BUG_ON(SELF_ID_BUF_SIZE > PAGE_SIZE/2);
drivers/firewire/ohci.c
3640
ohci->misc_buffer = dmam_alloc_coherent(&dev->dev, PAGE_SIZE, &ohci->misc_buffer_bus,
drivers/firewire/ohci.c
3650
err = ar_context_init(&ohci->ar_response_ctx, ohci, PAGE_SIZE/4,
drivers/firewire/ohci.c
3691
ohci->self_id = ohci->misc_buffer + PAGE_SIZE/2;
drivers/firewire/ohci.c
3692
ohci->self_id_bus = ohci->misc_buffer_bus + PAGE_SIZE/2;
drivers/firewire/ohci.c
529
d->res_count = cpu_to_le16(PAGE_SIZE);
drivers/firewire/ohci.c
551
dma_unmap_page(dev, dma_addr, PAGE_SIZE, DMA_FROM_DEVICE);
drivers/firewire/ohci.c
609
if (next_res_count == cpu_to_le16(PAGE_SIZE)) {
drivers/firewire/ohci.c
617
if (MAX_AR_PACKET_SIZE > PAGE_SIZE && i != last) {
drivers/firewire/ohci.c
621
if (next_res_count != cpu_to_le16(PAGE_SIZE))
drivers/firewire/ohci.c
635
*buffer_offset = PAGE_SIZE - le16_to_cpu(res_count);
drivers/firewire/ohci.c
636
if (*buffer_offset > PAGE_SIZE) {
drivers/firewire/ohci.c
652
dma_sync_single_for_cpu(ctx->ohci->card.device, ctx->dma_addrs[i], PAGE_SIZE,
drivers/firewire/ohci.c
798
dma_sync_single_for_device(ctx->ohci->card.device, ctx->dma_addrs[i], PAGE_SIZE,
drivers/firewire/ohci.c
817
end = ctx->buffer + end_buffer_index * PAGE_SIZE + end_buffer_offset;
drivers/firewire/ohci.c
823
void *buffer_end = ctx->buffer + AR_BUFFERS * PAGE_SIZE;
drivers/firewire/ohci.c
828
p -= AR_BUFFERS * PAGE_SIZE;
drivers/firewire/ohci.c
87
#define AR_BUFFERS_MIN DIV_ROUND_UP(AR_BUFFER_SIZE, PAGE_SIZE)
drivers/firewire/ohci.c
885
dma_addr_t dma_addr = dma_map_page(dev, pages[i], 0, PAGE_SIZE, DMA_FROM_DEVICE);
drivers/firewire/ohci.c
889
dma_sync_single_for_device(dev, dma_addr, PAGE_SIZE, DMA_FROM_DEVICE);
drivers/firewire/ohci.c
893
dma_unmap_page(dev, dma_addrs[i], PAGE_SIZE, DMA_FROM_DEVICE);
drivers/firewire/ohci.c
908
d->req_count = cpu_to_le16(PAGE_SIZE);
drivers/firewire/ohci.c
93
#define AR_WRAPAROUND_PAGES DIV_ROUND_UP(MAX_AR_PACKET_SIZE, PAGE_SIZE)
drivers/firmware/cirrus/cs_dsp.c
1621
buf_len = round_up(region_len, PAGE_SIZE);
drivers/firmware/cirrus/cs_dsp.c
2348
buf_len = round_up(region_len, PAGE_SIZE);
drivers/firmware/dmi-id.c
139
r = get_modalias(page, PAGE_SIZE-1);
drivers/firmware/dmi-id.c
28
len = scnprintf(page, PAGE_SIZE, "%s\n", dmi_get_system_info(field));
drivers/firmware/edd.c
45
#define left (PAGE_SIZE - (p - buf) - 1)
drivers/firmware/efi/capsule-loader.c
194
cap_info->page_bytes_remain = PAGE_SIZE;
drivers/firmware/efi/capsule-loader.c
201
kbuff += PAGE_SIZE - cap_info->page_bytes_remain;
drivers/firmware/efi/capsule-loader.c
44
pages_needed = ALIGN(cap_info->total_size, PAGE_SIZE) / PAGE_SIZE;
drivers/firmware/efi/capsule.c
116
#define SGLIST_PER_PAGE ((PAGE_SIZE / sizeof(efi_capsule_block_desc_t)) - 1)
drivers/firmware/efi/capsule.c
230
count = DIV_ROUND_UP(imagesize, PAGE_SIZE);
drivers/firmware/efi/capsule.c
252
PAGE_SIZE - (u64)*pages % PAGE_SIZE);
drivers/firmware/efi/capsule.c
278
efi_capsule_flush_cache_range(sglist, PAGE_SIZE);
drivers/firmware/efi/efi.c
781
p = early_memremap(ALIGN_DOWN(prsv, PAGE_SIZE),
drivers/firmware/efi/efi.c
782
PAGE_SIZE);
drivers/firmware/efi/efi.c
788
rsv = (void *)(p + prsv % PAGE_SIZE);
drivers/firmware/efi/efi.c
800
early_memunmap(p, PAGE_SIZE);
drivers/firmware/efi/libstub/arm32-stub.c
86
const int slack = TEXT_OFFSET - 5 * PAGE_SIZE;
drivers/firmware/efi/libstub/unaccepted_memory.c
38
d->phys_addr + d->num_pages * PAGE_SIZE);
drivers/firmware/efi/libstub/x86-5lvl.c
42
status = efi_allocate_pages(2 * PAGE_SIZE, (unsigned long *)&la57_code,
drivers/firmware/efi/libstub/x86-5lvl.c
48
memset(la57_code + tmpl_size, 0x90, PAGE_SIZE - tmpl_size);
drivers/firmware/efi/libstub/x86-5lvl.c
58
efi_adjust_memory_range_protection((unsigned long)la57_toggle, PAGE_SIZE);
drivers/firmware/efi/libstub/x86-5lvl.c
68
u64 *pgt = (void *)la57_toggle + PAGE_SIZE;
drivers/firmware/efi/libstub/x86-5lvl.c
81
new_cr3 = memset(pgt, 0, PAGE_SIZE);
drivers/firmware/efi/libstub/x86-5lvl.c
89
new_cr3 = memcpy(pgt, new_cr3, PAGE_SIZE);
drivers/firmware/efi/libstub/x86-stub.c
637
d->phys_addr + PAGE_SIZE * d->num_pages);
drivers/firmware/efi/memattr.c
85
if (PAGE_SIZE > EFI_PAGE_SIZE &&
drivers/firmware/iscsi_ibft_find.c
75
early_memunmap(virt, PAGE_SIZE);
drivers/firmware/iscsi_ibft_find.c
76
virt = early_memremap_ro(pos, PAGE_SIZE);
drivers/firmware/iscsi_ibft_find.c
99
early_memunmap(virt, PAGE_SIZE);
drivers/firmware/memmap.c
372
return snprintf(buf, PAGE_SIZE, "0x%llx\n",
drivers/firmware/memmap.c
378
return snprintf(buf, PAGE_SIZE, "0x%llx\n",
drivers/firmware/memmap.c
384
return snprintf(buf, PAGE_SIZE, "%s\n", entry->type);
drivers/firmware/qemu_fw_cfg.c
876
return snprintf(buf, PAGE_SIZE, PH_ADDR_PR_1_FMT,
drivers/firmware/qemu_fw_cfg.c
880
return snprintf(buf, PAGE_SIZE, PH_ADDR_PR_3_FMT,
drivers/firmware/qemu_fw_cfg.c
886
return snprintf(buf, PAGE_SIZE, PH_ADDR_PR_4_FMT,
drivers/firmware/stratix10-svc.c
934
size_t page_mask = PAGE_SIZE - 1;
drivers/firmware/stratix10-svc.c
938
begin = roundup(sh_memory->addr, PAGE_SIZE);
drivers/firmware/stratix10-svc.c
939
end = rounddown(sh_memory->addr + sh_memory->size, PAGE_SIZE);
drivers/firmware/xilinx/zynqmp-debug.c
31
static char debugfs_buf[PAGE_SIZE];
drivers/firmware/xilinx/zynqmp-debug.c
335
if (*off != 0 || len <= 1 || len > PAGE_SIZE - 1)
drivers/fpga/dfl-afu-main.c
159
return scnprintf(buf, PAGE_SIZE, "%d\n", id);
drivers/fpga/dfl-afu-main.c
478
return scnprintf(buf, PAGE_SIZE, "%016llx%016llx\n", guidh, guidl);
drivers/fpga/dfl-fme-main.c
39
return scnprintf(buf, PAGE_SIZE, "%u\n",
drivers/fpga/dfl-fme-main.c
59
return scnprintf(buf, PAGE_SIZE, "0x%llx\n", (unsigned long long)v);
drivers/fpga/dfl-fme-main.c
78
return scnprintf(buf, PAGE_SIZE, "0x%llx\n", (unsigned long long)v);
drivers/fpga/fpga-mgr.c
487
nr_pages = DIV_ROUND_UP((unsigned long)buf + count, PAGE_SIZE) -
drivers/fpga/fpga-mgr.c
488
(unsigned long)buf / PAGE_SIZE;
drivers/fpga/fpga-mgr.c
503
p += PAGE_SIZE;
drivers/fsi/fsi-master-ast-cf.c
1086
return snprintf(buf, PAGE_SIZE - 1, "%u\n",
drivers/fsi/fsi-master-gpio.c
721
return snprintf(buf, PAGE_SIZE - 1, "%u\n",
drivers/fsi/fsi-sbefifo.c
116
#define SBEFIFO_MAX_USER_CMD_LEN (0x100000 + PAGE_SIZE)
drivers/fsi/fsi-sbefifo.c
886
if (len <= PAGE_SIZE)
drivers/fwctl/pds/main.c
120
dma_free_coherent(dev->parent, PAGE_SIZE,
drivers/fwctl/pds/main.c
137
dma_free_coherent(dev->parent, PAGE_SIZE,
drivers/fwctl/pds/main.c
155
data = dma_alloc_coherent(dev->parent, PAGE_SIZE, &data_pa, GFP_KERNEL);
drivers/fwctl/pds/main.c
166
.query_data_buf_len = cpu_to_le32(PAGE_SIZE),
drivers/fwctl/pds/main.c
175
dma_free_coherent(dev->parent, PAGE_SIZE, data, data_pa);
drivers/fwctl/pds/main.c
225
data = dma_alloc_coherent(dev->parent, PAGE_SIZE, &data_pa, GFP_KERNEL);
drivers/fwctl/pds/main.c
236
.query_data_buf_len = cpu_to_le32(PAGE_SIZE),
drivers/fwctl/pds/main.c
246
dma_free_coherent(dev->parent, PAGE_SIZE, data, data_pa);
drivers/gpib/lpvo_usb_gpib/lpvo_usb_gpib.c
1237
#define MAX_TRANSFER (PAGE_SIZE - 512)
drivers/gpu/drm/adp/adp_drv.c
318
new_state->mode.vdisplay * 4, PAGE_SIZE);
drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
887
return managed_pages * PAGE_SIZE;
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
334
bp.byte_align = PAGE_SIZE;
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
809
return ALIGN_DOWN(tmp, PAGE_SIZE);
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
1696
available = ALIGN_DOWN(available, PAGE_SIZE);
drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
87
PAGE_SIZE, sdomain,
drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
94
PAGE_SIZE, ddomain,
drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
229
gpu_addr += ALIGN(le32_to_cpu(header->header.ucode_size_bytes), PAGE_SIZE);
drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
372
ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
141
if (size != PAGE_SIZE || data->offset > (size - 8))
drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
43
amdgpu_bo_create_kernel(adev, size, PAGE_SIZE,
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
1614
return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE, PAGE_SIZE,
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
1738
PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
911
if (at + 12 > PAGE_SIZE)
drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
440
ret = amdgpu_gem_object_create(adev, dma_buf->size, PAGE_SIZE,
drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell_mgr.c
159
size = ALIGN(adev->doorbell.num_kernel_doorbells * sizeof(u32), PAGE_SIZE);
drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell_mgr.c
163
size += PAGE_SIZE;
drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell_mgr.c
167
PAGE_SIZE,
drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
103
dma_unmap_page_attrs(&adev->pdev->dev, adev->dummy_page_addr, PAGE_SIZE,
drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
173
bp.byte_align = PAGE_SIZE;
drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
268
return amdgpu_bo_create_kernel(adev, adev->gart.table_size, PAGE_SIZE,
drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
513
if (PAGE_SIZE < AMDGPU_GPU_PAGE_SIZE) {
drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
521
adev->gart.num_cpu_pages = adev->gmc.gart_size / PAGE_SIZE;
drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
82
PAGE_SIZE, DMA_BIDIRECTIONAL,
drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
40
#define AMDGPU_GPU_PAGES_IN_CPU_PAGE (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE)
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
1277
args->size = ALIGN(args->size, PAGE_SIZE);
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
1455
adev->firmware.fw_size += ALIGN(fw_size, PAGE_SIZE);
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
2023
return amdgpu_bo_create_kernel(adev, cleaner_shader_size, PAGE_SIZE,
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
361
r = amdgpu_bo_create_kernel(adev, hpd_size, PAGE_SIZE,
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
404
r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
430
PAGE_SIZE, domain, &ring->mqd_obj,
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
454
PAGE_SIZE, domain, &ring->mqd_obj,
drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
1201
ret = amdgpu_bo_create_kernel(adev, size, PAGE_SIZE,
drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
197
if (bo->ttm->dma_address[0] + PAGE_SIZE >= adev->gmc.agp_size)
drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
66
bp.byte_align = PAGE_SIZE;
drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c
191
end = start + npages * PAGE_SIZE;
drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
88
r = amdgpu_bo_create_kernel(adev, ih->ring_size, PAGE_SIZE,
drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c
96
ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
55
r = amdgpu_bo_create_kernel(adev, 64 * 2 * 4, PAGE_SIZE,
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
1014
dev_info->virtual_address_alignment = max_t(u32, PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE);
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
1016
dev_info->gart_page_size = max_t(u32, PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE);
drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
223
PAGE_SIZE,
drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
39
PAGE_SIZE);
drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
48
mes->doorbell_bitmap = bitmap_zalloc(PAGE_SIZE / sizeof(u32), GFP_KERNEL);
drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
54
mes->num_mes_dbs = PAGE_SIZE / AMDGPU_ONE_DOORBELL_SIZE;
drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
70
r = amdgpu_bo_create_kernel(adev, adev->mes.event_log_size, PAGE_SIZE,
drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
735
PAGE_SIZE);
drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
742
PAGE_SIZE);
drivers/gpu/drm/amd/amdgpu/amdgpu_mes_ctx.h
104
} __aligned(PAGE_SIZE) sdma[AMDGPU_MES_CTX_MAX_SDMA_RINGS];
drivers/gpu/drm/amd/amdgpu/amdgpu_mes_ctx.h
61
uint8_t ring[PAGE_SIZE * 4];
drivers/gpu/drm/amd/amdgpu/amdgpu_mes_ctx.h
75
} __aligned(PAGE_SIZE) gfx[AMDGPU_MES_CTX_MAX_GFX_RINGS];
drivers/gpu/drm/amd/amdgpu/amdgpu_mes_ctx.h
78
uint8_t ring[PAGE_SIZE * 4];
drivers/gpu/drm/amd/amdgpu/amdgpu_mes_ctx.h
89
} __aligned(PAGE_SIZE) compute[AMDGPU_MES_CTX_MAX_COMPUTE_RINGS];
drivers/gpu/drm/amd/amdgpu/amdgpu_mes_ctx.h
92
uint8_t ring[PAGE_SIZE * 4];
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
447
size = ALIGN(size, PAGE_SIZE);
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
449
r = amdgpu_bo_create_reserved(adev, size, PAGE_SIZE,
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
653
page_align = ALIGN(bp->byte_align, PAGE_SIZE) >> PAGE_SHIFT;
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
654
size = ALIGN(size, PAGE_SIZE);
drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
1344
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM |
drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
4031
le32_to_cpu(cap_hdr_v1_0->header.ucode_size_bytes), PAGE_SIZE);
drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
529
ret = amdgpu_bo_create_kernel(adev, PSP_FENCE_BUFFER_SIZE, PAGE_SIZE,
drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
538
ret = amdgpu_bo_create_kernel(adev, PSP_CMD_BUFFER_SIZE, PAGE_SIZE,
drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
68
ret = amdgpu_bo_create_kernel(adev, ring->ring_size, PAGE_SIZE,
drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
382
PAGE_SIZE,
drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
135
r = amdgpu_bo_create_kernel(adev, dws * 4, PAGE_SIZE,
drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
163
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM |
drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
338
ALIGN(le32_to_cpu(common_hdr->ucode_size_bytes), PAGE_SIZE);
drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
372
ALIGN(adev->gfx.rlc.save_restore_list_cntl_size_bytes, PAGE_SIZE);
drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
380
ALIGN(adev->gfx.rlc.save_restore_list_gpm_size_bytes, PAGE_SIZE);
drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
388
ALIGN(adev->gfx.rlc.save_restore_list_srm_size_bytes, PAGE_SIZE);
drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
410
ALIGN(adev->gfx.rlc.rlc_iram_ucode_size_bytes, PAGE_SIZE);
drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
418
ALIGN(adev->gfx.rlc.rlc_dram_ucode_size_bytes, PAGE_SIZE);
drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
445
ALIGN(adev->gfx.rlc.rlcp_ucode_size_bytes, PAGE_SIZE);
drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
453
ALIGN(adev->gfx.rlc.rlcv_ucode_size_bytes, PAGE_SIZE);
drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
481
ALIGN(adev->gfx.rlc.global_tap_delays_ucode_size_bytes, PAGE_SIZE);
drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
489
ALIGN(adev->gfx.rlc.se0_tap_delays_ucode_size_bytes, PAGE_SIZE);
drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
497
ALIGN(adev->gfx.rlc.se1_tap_delays_ucode_size_bytes, PAGE_SIZE);
drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
505
ALIGN(adev->gfx.rlc.se2_tap_delays_ucode_size_bytes, PAGE_SIZE);
drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
513
ALIGN(adev->gfx.rlc.se3_tap_delays_ucode_size_bytes, PAGE_SIZE);
drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
539
ALIGN(adev->gfx.rlc.rlc_1_iram_ucode_size_bytes, PAGE_SIZE);
drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
547
ALIGN(adev->gfx.rlc.rlc_1_dram_ucode_size_bytes, PAGE_SIZE);
drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
97
r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
273
ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
284
ALIGN(le32_to_cpu(sdma_hdr->ctx_ucode_size_bytes), PAGE_SIZE);
drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
289
ALIGN(le32_to_cpu(sdma_hdr->ctl_ucode_size_bytes), PAGE_SIZE);
drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
298
ALIGN(le32_to_cpu(sdma_hv3->ucode_size_bytes), PAGE_SIZE);
drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c
248
PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
1360
size = (unsigned long)gtt->ttm.num_pages * PAGE_SIZE;
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
1552
if (len != PAGE_SIZE)
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
1582
PAGE_SIZE, 0);
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
1924
if (!adev->rmmio_remap.bus_addr || PAGE_SIZE > AMDGPU_GPU_PAGE_SIZE)
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
2165
r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_DOORBELL, adev->doorbell.size / PAGE_SIZE);
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
2215
if (amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE,
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
229
*size = min(*size, (uint64_t)num_pages * PAGE_SIZE - offset);
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
2741
size_t bytes = PAGE_SIZE - off;
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
2796
size_t bytes = PAGE_SIZE - off;
drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
1139
PAGE_SIZE);
drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
1152
amdgpu_bo_create_kernel(adev, adev->firmware.fw_size, PAGE_SIZE,
drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
1212
fw_offset += ALIGN(le32_to_cpu(cp_hdr->jt_size) << 2, PAGE_SIZE);
drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
1214
fw_offset += ALIGN(ucode->ucode_size, PAGE_SIZE);
drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c
165
ALIGN(le32_to_cpu(umsch_mm_hdr->umsch_mm_ucode_size_bytes), PAGE_SIZE);
drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c
171
ALIGN(le32_to_cpu(umsch_mm_hdr->umsch_mm_ucode_data_size_bytes), PAGE_SIZE);
drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c
284
r = amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE,
drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c
295
r = amdgpu_bo_create_kernel(adev, AMDGPU_UMSCHFW_LOG_SIZE, PAGE_SIZE,
drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
505
bp.byte_align = PAGE_SIZE;
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
149
r = amdgpu_bo_create_reserved(adev, size, PAGE_SIZE,
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
325
r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
221
r = amdgpu_bo_create_kernel(adev, size, PAGE_SIZE,
drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
1149
ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
222
r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
247
r = amdgpu_bo_create_kernel(adev, 64 * 2 * 4, PAGE_SIZE,
drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
215
r = amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE,
drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
226
memset((void *)adev->virt.mm_table.cpu_addr, 0, PAGE_SIZE);
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
1184
pages_addr[pfn] + PAGE_SIZE;
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
1192
pages_addr[idx - 1] + PAGE_SIZE))
drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c
255
ALIGN(le32_to_cpu(vpe_hdr->ctx_ucode_size_bytes), PAGE_SIZE);
drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c
261
ALIGN(le32_to_cpu(vpe_hdr->ctl_ucode_size_bytes), PAGE_SIZE);
drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c
370
r = amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE,
drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
930
mgr->default_page_size = PAGE_SIZE;
drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
933
err = drm_buddy_init(&mgr->mm, man->size, PAGE_SIZE);
drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h
67
return (u64)PAGE_SIZE << drm_buddy_block_order(block);
drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c
1005
if (size > PAGE_SIZE)
drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
416
return snprintf(buf, PAGE_SIZE, "%llu\n", hive->hive_id);
drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
4433
r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
4458
PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
5629
ret = amdgpu_bo_create_reserved(adev, adev->psp.toc.size_bytes, PAGE_SIZE,
drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
5648
rlc_toc->offset = ALIGN(rlc_toc->offset * 4, PAGE_SIZE);
drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
5691
r = amdgpu_bo_create_reserved(adev, total_size, PAGE_SIZE,
drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
6121
PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
6199
PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
6276
PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
8946
PAGE_SIZE);
drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
3164
PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
3382
PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
3893
PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
6226
PAGE_SIZE);
drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
957
r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
796
r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
drivers/gpu/drm/amd/amdgpu/gfx_v12_1.c
606
r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
2384
r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
2740
r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
2967
r = amdgpu_bo_create_reserved(adev, sizeof(struct cik_mqd), PAGE_SIZE,
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
1158
ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
1165
ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
1172
ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
1179
ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
1186
ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
1191
ALIGN(le32_to_cpu(cp_hdr->jt_size) << 2, PAGE_SIZE);
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
1198
ALIGN(le32_to_cpu(64 * PAGE_SIZE), PAGE_SIZE);
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
1207
ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
1300
r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
1886
r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
1912
PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
5795
PAGE_SIZE);
drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
637
r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
671
PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
drivers/gpu/drm/amd/amdgpu/ih_v6_1.c
586
r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, PAGE_SIZE, true);
drivers/gpu/drm/amd/amdgpu/ih_v7_0.c
603
IH_SW_RING_SIZE : PAGE_SIZE;
drivers/gpu/drm/amd/amdgpu/imu_v11_0.c
73
ALIGN(le32_to_cpu(imu_hdr->imu_iram_ucode_size_bytes), PAGE_SIZE);
drivers/gpu/drm/amd/amdgpu/imu_v11_0.c
78
ALIGN(le32_to_cpu(imu_hdr->imu_dram_ucode_size_bytes), PAGE_SIZE);
drivers/gpu/drm/amd/amdgpu/imu_v12_0.c
68
ALIGN(le32_to_cpu(imu_hdr->imu_iram_ucode_size_bytes), PAGE_SIZE);
drivers/gpu/drm/amd/amdgpu/imu_v12_0.c
73
ALIGN(le32_to_cpu(imu_hdr->imu_dram_ucode_size_bytes), PAGE_SIZE);
drivers/gpu/drm/amd/amdgpu/imu_v12_1.c
62
ALIGN(le32_to_cpu(imu_hdr->imu_iram_ucode_size_bytes), PAGE_SIZE);
drivers/gpu/drm/amd/amdgpu/imu_v12_1.c
67
ALIGN(le32_to_cpu(imu_hdr->imu_dram_ucode_size_bytes), PAGE_SIZE);
drivers/gpu/drm/amd/amdgpu/mes_userqueue.c
30
#define AMDGPU_USERQ_PROC_CTX_SZ PAGE_SIZE
drivers/gpu/drm/amd/amdgpu/mes_userqueue.c
31
#define AMDGPU_USERQ_GANG_CTX_SZ PAGE_SIZE
drivers/gpu/drm/amd/amdgpu/mes_userqueue.c
86
if (wptr_obj->obj->tbo.base.size > PAGE_SIZE) {
drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
1091
r = amdgpu_bo_create_reserved(adev, MES_EOP_SIZE, PAGE_SIZE,
drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
1395
r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
1463
PAGE_SIZE,
drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
851
PAGE_SIZE,
drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
1023
PAGE_SIZE,
drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
1251
r = amdgpu_bo_create_reserved(adev, MES_EOP_SIZE, PAGE_SIZE,
drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
1583
r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
1636
r = amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE, PAGE_SIZE,
drivers/gpu/drm/amd/amdgpu/mes_v12_1.c
1153
r = amdgpu_bo_create_reserved(adev, MES_EOP_SIZE, PAGE_SIZE,
drivers/gpu/drm/amd/amdgpu/mes_v12_1.c
1181
r = amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE,
drivers/gpu/drm/amd/amdgpu/mes_v12_1.c
1529
r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
drivers/gpu/drm/amd/amdgpu/mes_v12_1.c
935
PAGE_SIZE,
drivers/gpu/drm/amd/amdgpu/nbif_v6_3_1.c
489
#define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE)
drivers/gpu/drm/amd/amdgpu/nbif_v6_3_1.c
493
if (!amdgpu_sriov_vf(adev) && (PAGE_SIZE <= 4096)) {
drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
551
#define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE)
drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
555
if (!amdgpu_sriov_vf(adev) && (PAGE_SIZE <= 4096)) {
drivers/gpu/drm/amd/amdgpu/nbio_v4_3.c
474
#define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE)
drivers/gpu/drm/amd/amdgpu/nbio_v4_3.c
478
if (!amdgpu_sriov_vf(adev) && (PAGE_SIZE <= 4096)) {
drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
392
#define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE)
drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
396
if (!amdgpu_sriov_vf(adev) && (PAGE_SIZE <= 4096)) {
drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
288
#define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE)
drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
292
if (!amdgpu_sriov_vf(adev) && (PAGE_SIZE <= 4096)) {
drivers/gpu/drm/amd/amdgpu/nbio_v7_11.c
368
if (!amdgpu_sriov_vf(adev) && (PAGE_SIZE <= 4096)) {
drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c
406
#define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE)
drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c
410
if (!amdgpu_sriov_vf(adev) && (PAGE_SIZE <= 4096)) {
drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
790
#define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE)
drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
794
if (!amdgpu_sriov_vf(adev) && (PAGE_SIZE <= 4096)) {
drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c
332
#define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE)
drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c
336
if (!amdgpu_sriov_vf(adev) && (PAGE_SIZE <= 4096)) {
drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c
473
if (!amdgpu_sriov_vf(adev) && (PAGE_SIZE <= 4096)) {
drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
168
ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
327
ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
703
PAGE_SIZE,
drivers/gpu/drm/amd/amdgpu/sdma_v7_1.c
692
PAGE_SIZE,
drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
433
ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
439
ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
462
ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
drivers/gpu/drm/amd/amdgpu/vega10_ih.c
501
r = amdgpu_ih_ring_init(adev, &adev->irq.ih1, PAGE_SIZE, true);
drivers/gpu/drm/amd/amdgpu/vega10_ih.c
508
r = amdgpu_ih_ring_init(adev, &adev->irq.ih2, PAGE_SIZE, true);
drivers/gpu/drm/amd/amdgpu/vega20_ih.c
591
r = amdgpu_ih_ring_init(adev, &adev->irq.ih1, PAGE_SIZE, use_bus_addr);
drivers/gpu/drm/amd/amdgpu/vega20_ih.c
600
r = amdgpu_ih_ring_init(adev, &adev->irq.ih2, PAGE_SIZE, true);
drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
1149
if (args->size != PAGE_SIZE) {
drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
1154
if (!offset || (PAGE_SIZE > 4096)) {
drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
2339
if (bo_bucket->size != PAGE_SIZE) {
drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
2344
if (!offset || (PAGE_SIZE > 4096)) {
drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
3457
if (vma->vm_end - vma->vm_start != PAGE_SIZE)
drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
3460
if (PAGE_SIZE > 4096)
drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
3476
address, vma->vm_flags, PAGE_SIZE);
drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
3481
PAGE_SIZE,
drivers/gpu/drm/amd/amdkfd/kfd_crat.c
1738
#define VCRAT_SIZE_FOR_GPU (4 * PAGE_SIZE)
drivers/gpu/drm/amd/amdkfd/kfd_device.c
524
BUILD_BUG_ON(sizeof(cwsr_trap_gfx9_5_0_hex) > PAGE_SIZE);
drivers/gpu/drm/amd/amdkfd/kfd_device.c
545
BUILD_BUG_ON(sizeof(cwsr_trap_gfx11_hex) > PAGE_SIZE);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
232
wptr_addr_off = (uint64_t)q->properties.write_ptr & (PAGE_SIZE - 1);
drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
156
inx = find_first_zero_bit(kfd->doorbell_bitmap, PAGE_SIZE / sizeof(u32));
drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
276
PAGE_SIZE,
drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
55
PAGE_SIZE);
drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
64
int size = PAGE_SIZE;
drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
84
PAGE_SIZE,
drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
312
#define SVM_USER_BASE (u64)(KFD_CWSR_TBA_TMA_SIZE + 2*PAGE_SIZE)
drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
314
#define SVM_IB_BASE (SVM_CWSR_BASE - PAGE_SIZE)
drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
126
prop.eop_ring_buffer_size = PAGE_SIZE;
drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
83
retval = kfd_gtt_sa_allocate(dev, PAGE_SIZE, &kq->eop_mem);
drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
90
memset(kq->eop_kernel_addr, 0, PAGE_SIZE);
drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
1023
#define SVM_HMM_PAGE_STRUCT_SIZE(size) ((size)/PAGE_SIZE * sizeof(struct page))
drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
161
gart_s, gart_d, size * PAGE_SIZE,
drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
317
src[i] = dma_map_page(dev, spage, 0, PAGE_SIZE,
drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
337
amdgpu_res_next(&cursor, PAGE_SIZE);
drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
352
amdgpu_res_next(&cursor, (j + 1) * PAGE_SIZE);
drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
524
prange->npages * PAGE_SIZE,
drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
570
prange->npages * PAGE_SIZE,
drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
608
for (i = 0, j = 0; i < npages; i++, addr += PAGE_SIZE) {
drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
627
if (j > 0 && src[i] != src[i - 1] + PAGE_SIZE) {
drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
645
dst[i] = dma_map_page(dev, dpage, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
394
size = PAGE_SIZE;
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12_1.c
385
memset(m, 0, PAGE_SIZE);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
162
ALIGN(sizeof(struct v9_mqd), AMDGPU_GPU_PAGE_SIZE), PAGE_SIZE)) *
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
53
ALIGN(sizeof(struct v9_mqd), AMDGPU_GPU_PAGE_SIZE), PAGE_SIZE);
drivers/gpu/drm/amd/amdkfd/kfd_process.c
287
return snprintf(buffer, PAGE_SIZE, "%d\n", cu_cnt);
drivers/gpu/drm/amd/amdkfd/kfd_process.c
324
return snprintf(buffer, PAGE_SIZE, "%d\n", cu_cnt);
drivers/gpu/drm/amd/amdkfd/kfd_process.c
331
return snprintf(buffer, PAGE_SIZE, "%d\n", 0);
drivers/gpu/drm/amd/amdkfd/kfd_process.c
335
return snprintf(buffer, PAGE_SIZE, "%llu\n", atomic64_read(&pdd->vram_usage));
drivers/gpu/drm/amd/amdkfd/kfd_process.c
352
return snprintf(buffer, PAGE_SIZE, "%llu\n",
drivers/gpu/drm/amd/amdkfd/kfd_process.c
409
return snprintf(buffer, PAGE_SIZE, "%llu",
drivers/gpu/drm/amd/amdkfd/kfd_process.c
412
return snprintf(buffer, PAGE_SIZE, "%d", q->properties.type);
drivers/gpu/drm/amd/amdkfd/kfd_process.c
414
return snprintf(buffer, PAGE_SIZE, "%u", q->device->id);
drivers/gpu/drm/amd/amdkfd/kfd_process.c
433
PAGE_SIZE,
drivers/gpu/drm/amd/amdkfd/kfd_process.c
806
ret = kfd_process_alloc_gpuvm(pdd, qpd->ib_base, PAGE_SIZE, flags,
drivers/gpu/drm/amd/amdkfd/kfd_queue.c
262
err = kfd_queue_buffer_get(vm, properties->write_ptr, &properties->wptr_bo, PAGE_SIZE);
drivers/gpu/drm/amd/amdkfd/kfd_queue.c
266
err = kfd_queue_buffer_get(vm, properties->read_ptr, &properties->rptr_bo, PAGE_SIZE);
drivers/gpu/drm/amd/amdkfd/kfd_queue.c
290
ALIGN(properties->eop_ring_buffer_size, PAGE_SIZE));
drivers/gpu/drm/amd/amdkfd/kfd_queue.c
313
total_cwsr_size = ALIGN(total_cwsr_size, PAGE_SIZE);
drivers/gpu/drm/amd/amdkfd/kfd_queue.c
360
total_cwsr_size = ALIGN(total_cwsr_size, PAGE_SIZE);
drivers/gpu/drm/amd/amdkfd/kfd_queue.c
511
props->cwsr_size = ALIGN(ctl_stack_size + wg_data_size, PAGE_SIZE);
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
180
dma_unmap_page(dev, addr[i], PAGE_SIZE, dir);
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
193
addr[i] = dma_map_page(dev, page, 0, PAGE_SIZE, dir);
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
249
dma_unmap_page(dev, dma_addr[i], PAGE_SIZE, dir);
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
4279
prange->npages * PAGE_SIZE);
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
594
bp.size = prange->npages * PAGE_SIZE;
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
595
bp.byte_align = PAGE_SIZE;
drivers/gpu/drm/amd/amdkfd/kfd_topology.c
201
(offs += snprintf(buffer+offs, PAGE_SIZE-offs, \
drivers/gpu/drm/amd/amdkfd/kfd_topology.c
349
offs += snprintf(buffer+offs, PAGE_SIZE-offs, "sibling_map ");
drivers/gpu/drm/amd/amdkfd/kfd_topology.c
353
offs += snprintf(buffer+offs, PAGE_SIZE-offs, "%d,",
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
1091
int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
1730
ret = amdgpu_bo_create_kernel(adev, size, PAGE_SIZE,
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
2404
ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
2409
ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
2520
ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
2592
r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
drivers/gpu/drm/amd/pm/amdgpu_pm.c
1721
size = amdgpu_dpm_get_pm_metrics(adev, buf, PAGE_SIZE);
drivers/gpu/drm/amd/pm/amdgpu_pm.c
1758
if (size >= PAGE_SIZE)
drivers/gpu/drm/amd/pm/amdgpu_pm.c
1759
size = PAGE_SIZE - 1;
drivers/gpu/drm/amd/pm/amdgpu_pm.c
2123
if (size >= PAGE_SIZE) {
drivers/gpu/drm/amd/pm/amdgpu_pm.c
2161
if (size >= PAGE_SIZE) {
drivers/gpu/drm/amd/pm/amdgpu_pm.c
570
if (size >= PAGE_SIZE)
drivers/gpu/drm/amd/pm/amdgpu_pm.c
571
size = PAGE_SIZE - 1;
drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
197
PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
drivers/gpu/drm/amd/pm/powerplay/smumgr/smu10_smumgr.c
256
sizeof(Watermarks_t), PAGE_SIZE,
drivers/gpu/drm/amd/pm/powerplay/smumgr/smu10_smumgr.c
271
sizeof(DpmClocks_t), PAGE_SIZE,
drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.c
513
PAGE_SIZE,
drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.c
528
PAGE_SIZE,
drivers/gpu/drm/amd/pm/powerplay/smumgr/smu8_smumgr.c
777
PAGE_SIZE,
drivers/gpu/drm/amd/pm/powerplay/smumgr/smu8_smumgr.c
787
PAGE_SIZE,
drivers/gpu/drm/amd/pm/powerplay/smumgr/vega10_smumgr.c
231
PAGE_SIZE,
drivers/gpu/drm/amd/pm/powerplay/smumgr/vega10_smumgr.c
246
PAGE_SIZE,
drivers/gpu/drm/amd/pm/powerplay/smumgr/vega10_smumgr.c
262
PAGE_SIZE,
drivers/gpu/drm/amd/pm/powerplay/smumgr/vega10_smumgr.c
279
PAGE_SIZE,
drivers/gpu/drm/amd/pm/powerplay/smumgr/vega10_smumgr.c
294
PAGE_SIZE,
drivers/gpu/drm/amd/pm/powerplay/smumgr/vega12_smumgr.c
233
PAGE_SIZE,
drivers/gpu/drm/amd/pm/powerplay/smumgr/vega12_smumgr.c
247
PAGE_SIZE,
drivers/gpu/drm/amd/pm/powerplay/smumgr/vega12_smumgr.c
262
PAGE_SIZE,
drivers/gpu/drm/amd/pm/powerplay/smumgr/vega12_smumgr.c
276
PAGE_SIZE,
drivers/gpu/drm/amd/pm/powerplay/smumgr/vega12_smumgr.c
291
PAGE_SIZE,
drivers/gpu/drm/amd/pm/powerplay/smumgr/vega12_smumgr.c
305
PAGE_SIZE,
drivers/gpu/drm/amd/pm/powerplay/smumgr/vega20_smumgr.c
442
PAGE_SIZE,
drivers/gpu/drm/amd/pm/powerplay/smumgr/vega20_smumgr.c
456
PAGE_SIZE,
drivers/gpu/drm/amd/pm/powerplay/smumgr/vega20_smumgr.c
470
PAGE_SIZE,
drivers/gpu/drm/amd/pm/powerplay/smumgr/vega20_smumgr.c
484
PAGE_SIZE,
drivers/gpu/drm/amd/pm/powerplay/smumgr/vega20_smumgr.c
498
PAGE_SIZE,
drivers/gpu/drm/amd/pm/powerplay/smumgr/vega20_smumgr.c
512
PAGE_SIZE,
drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
1010
driver_table->align = PAGE_SIZE;
drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
1085
memory_pool->align = PAGE_SIZE;
drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
255
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
258
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
261
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
264
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
267
sizeof(DpmActivityMonitorCoeffInt_t), PAGE_SIZE,
drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c
97
PAGE_SIZE,
drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
498
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
500
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
502
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
504
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
506
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
508
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
510
sizeof(DpmActivityMonitorCoeffInt_t), PAGE_SIZE,
drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
513
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
516
dummy_read_1_table->align = PAGE_SIZE;
drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
539
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
541
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
543
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
545
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
547
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
549
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
551
sizeof(DpmActivityMonitorCoeffIntExternal_t), PAGE_SIZE,
drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
554
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
556
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
123
ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
233
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
235
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
237
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
239
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
241
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
154
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
156
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
158
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
234
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
237
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
240
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
243
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
246
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
127
ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
232
ALIGN(smu->pptable_firmware.size, PAGE_SIZE);
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
480
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
482
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
484
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
486
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
488
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
490
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
492
sizeof(DpmActivityMonitorCoeffIntExternal_t), PAGE_SIZE,
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
495
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
497
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
499
sizeof(WifiBandEntryTable_t), PAGE_SIZE,
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
158
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
160
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
162
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c
133
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c
135
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c
137
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
545
adev->firmware.fw_size += ALIGN(ucode->fw->size, PAGE_SIZE);
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
568
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
573
PAGE_SIZE,
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
577
PAGE_SIZE,
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
581
smu_v13_0_12_get_system_metrics_size(), PAGE_SIZE,
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
516
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
519
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
521
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
523
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
525
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
527
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
529
sizeof(DpmActivityMonitorCoeffIntExternal_t), PAGE_SIZE,
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
532
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
534
sizeof(WifiBandEntryTable_t), PAGE_SIZE,
drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
160
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
162
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
164
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
102
ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
208
ALIGN(smu->pptable_firmware.size, PAGE_SIZE);
drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
194
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
196
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
198
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
381
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
383
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
385
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
387
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
389
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
391
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
393
sizeof(DpmActivityMonitorCoeffIntExternal_t), PAGE_SIZE,
drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
396
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
398
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
drivers/gpu/drm/amd/pm/swsmu/smu15/smu_v15_0.c
188
ALIGN(smu->pptable_firmware.size, PAGE_SIZE);
drivers/gpu/drm/amd/pm/swsmu/smu15/smu_v15_0.c
92
ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
drivers/gpu/drm/amd/pm/swsmu/smu15/smu_v15_0_0_ppt.c
170
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
drivers/gpu/drm/amd/pm/swsmu/smu15/smu_v15_0_0_ppt.c
172
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
drivers/gpu/drm/amd/pm/swsmu/smu15/smu_v15_0_0_ppt.c
174
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_mgr.c
622
uint32_t ctx_buf_size = PAGE_SIZE;
drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_virt_ras_cmd.c
395
blks_ecc->size = PAGE_SIZE;
drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_virt_ras_cmd.c
397
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_virt_ras_cmd.c
44
ret = amdgpu_bo_create_kernel(adev, mem_len, PAGE_SIZE,
drivers/gpu/drm/armada/armada_gem.c
39
return roundup(size, PAGE_SIZE);
drivers/gpu/drm/armada/armada_gem.c
404
count = dobj->obj.size / PAGE_SIZE;
drivers/gpu/drm/armada/armada_gem.c
417
sg_set_page(sg, page, PAGE_SIZE, 0);
drivers/gpu/drm/display/drm_dp_aux_bus.c
142
return of_device_modalias(dev, buf, PAGE_SIZE);
drivers/gpu/drm/display/drm_dp_mst_topology.c
1614
char *buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
drivers/gpu/drm/display/drm_dp_mst_topology.c
1638
stack_depot_snprint(entry->backtrace, buf, PAGE_SIZE, 4);
drivers/gpu/drm/drm_cache.c
110
(unsigned long)page_virtual + PAGE_SIZE);
drivers/gpu/drm/drm_cache.c
61
for (i = 0; i < PAGE_SIZE; i += size)
drivers/gpu/drm/drm_debugfs_crc.c
134
if (len > PAGE_SIZE - 1) {
drivers/gpu/drm/drm_debugfs_crc.c
136
PAGE_SIZE);
drivers/gpu/drm/drm_dumb_buffers.c
77
hw_size_align = PAGE_SIZE;
drivers/gpu/drm/drm_dumb_buffers.c
78
else if (!IS_ALIGNED(hw_size_align, PAGE_SIZE))
drivers/gpu/drm/drm_exec.c
150
tmp = kvrealloc(exec->objects, size + PAGE_SIZE, GFP_KERNEL);
drivers/gpu/drm/drm_exec.c
155
exec->max_objects += PAGE_SIZE / sizeof(void *);
drivers/gpu/drm/drm_exec.c
84
nr = PAGE_SIZE / sizeof(void *);
drivers/gpu/drm/drm_fb_helper.c
558
end = start + PAGE_SIZE;
drivers/gpu/drm/drm_gem.c
222
BUG_ON((size & (PAGE_SIZE - 1)) != 0);
drivers/gpu/drm/drm_gem.c
605
size / PAGE_SIZE);
drivers/gpu/drm/drm_gem.c
683
WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
drivers/gpu/drm/drm_gem.c
754
WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
drivers/gpu/drm/drm_gem_dma_helper.c
142
size = round_up(size, PAGE_SIZE);
drivers/gpu/drm/drm_gem_vram_helper.c
449
size = roundup(size, PAGE_SIZE);
drivers/gpu/drm/drm_gem_vram_helper.c
972
fbpages = DIV_ROUND_UP(fbsize, PAGE_SIZE);
drivers/gpu/drm/drm_gpusvm.c
1149
PAGE_SIZE << addr->order,
drivers/gpu/drm/drm_gpusvm.c
1541
PAGE_SIZE << order,
drivers/gpu/drm/drm_modeset_lock.c
101
buf = kmalloc(PAGE_SIZE, GFP_NOWAIT | __GFP_NOWARN);
drivers/gpu/drm/drm_modeset_lock.c
106
stack_trace_snprint(buf, PAGE_SIZE, entries, nr_entries, 2);
drivers/gpu/drm/drm_pagemap.c
324
PAGE_SIZE << pagemap_addr[i].order, dir);
drivers/gpu/drm/drm_pagemap.c
741
addr += PAGE_SIZE;
drivers/gpu/drm/drm_panic.c
185
if (offset == PAGE_SIZE - 1)
drivers/gpu/drm/drm_panic.c
191
if (offset == PAGE_SIZE - 2)
drivers/gpu/drm/drm_panic.c
246
offset = offset % PAGE_SIZE;
drivers/gpu/drm/drm_panic.c
259
if (cpp == 3 && offset + 3 > PAGE_SIZE)
drivers/gpu/drm/drm_panic.c
345
offset = offset % PAGE_SIZE;
drivers/gpu/drm/drm_panic.c
356
if (cpp == 3 && offset + 3 > PAGE_SIZE)
drivers/gpu/drm/drm_panic.c
846
return scnprintf(buffer, PAGE_SIZE, "%s\n",
drivers/gpu/drm/drm_sysfs.c
289
written += scnprintf(buf + written, PAGE_SIZE - written, "%s\n",
drivers/gpu/drm/etnaviv/etnaviv_gem.c
687
uint64_t ptr = userptr->ptr + pinned * PAGE_SIZE;
drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
126
npages = size / PAGE_SIZE;
drivers/gpu/drm/exynos/exynos_drm_g2d.c
119
#define G2D_CMDLIST_SIZE (PAGE_SIZE / 4)
drivers/gpu/drm/exynos/exynos_drm_gem.c
202
size = roundup(size, PAGE_SIZE);
drivers/gpu/drm/gma500/fbdev.c
135
size = ALIGN(size, PAGE_SIZE);
drivers/gpu/drm/gma500/fbdev.c
150
size = ALIGN(size, PAGE_SIZE);
drivers/gpu/drm/gma500/fbdev.c
153
backing = psb_gem_create(dev, size, "fb", true, PAGE_SIZE);
drivers/gpu/drm/gma500/fbdev.c
38
address += PAGE_SIZE;
drivers/gpu/drm/gma500/gem.c
147
size = roundup(size, PAGE_SIZE);
drivers/gpu/drm/gma500/gem.c
213
size = roundup(size, PAGE_SIZE);
drivers/gpu/drm/gma500/gem.c
217
pobj = psb_gem_create(dev, size, "gem", false, PAGE_SIZE);
drivers/gpu/drm/gma500/gem.c
344
vram_stolen_size = pg->gtt_phys_start - dev_priv->stolen_base - PAGE_SIZE;
drivers/gpu/drm/gma500/gem.c
416
vram_stolen_size = pg->gtt_phys_start - dev_priv->stolen_base - PAGE_SIZE;
drivers/gpu/drm/gma500/gem.c
52
npages = obj->size / PAGE_SIZE;
drivers/gpu/drm/gma500/gem.c
94
npages = obj->size / PAGE_SIZE;
drivers/gpu/drm/gma500/gma_display.c
400
cursor_pages = obj->size / PAGE_SIZE;
drivers/gpu/drm/gma500/gma_display.c
407
memcpy_from_page(tmp_dst, pobj->pages[i], 0, PAGE_SIZE);
drivers/gpu/drm/gma500/gma_display.c
408
tmp_dst += PAGE_SIZE;
drivers/gpu/drm/gma500/mmu.c
189
for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
drivers/gpu/drm/gma500/mmu.c
195
for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
drivers/gpu/drm/gma500/mmu.c
266
uint32_t clflush_count = PAGE_SIZE / clflush_add;
drivers/gpu/drm/gma500/mmu.c
286
for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
drivers/gpu/drm/gma500/mmu.c
465
PAGE_SIZE * clflush_size / sizeof(uint32_t);
drivers/gpu/drm/gma500/mmu.c
548
} while (addr += PAGE_SIZE, addr < next);
drivers/gpu/drm/gma500/mmu.c
605
} while (addr += PAGE_SIZE, addr < next);
drivers/gpu/drm/gma500/mmu.c
649
} while (addr += PAGE_SIZE, addr < next);
drivers/gpu/drm/gma500/mmu.c
712
} while (addr += PAGE_SIZE, addr < next);
drivers/gpu/drm/gma500/psb_drv.c
141
stolen_gtt = (stolen_gtt + PAGE_SIZE - 1) >> PAGE_SHIFT;
drivers/gpu/drm/gma500/psb_intel_display.c
458
cursor_pobj = psb_gem_create(dev, 4 * PAGE_SIZE, "cursor", true, PAGE_SIZE);
drivers/gpu/drm/gud/gud_drv.c
409
num_pages = DIV_ROUND_UP(gdrm->bulk_len, PAGE_SIZE);
drivers/gpu/drm/gud/gud_drv.c
414
for (i = 0, ptr = gdrm->bulk_buf; i < num_pages; i++, ptr += PAGE_SIZE)
drivers/gpu/drm/i915/display/intel_fb.c
1644
view->gtt.remapped.plane_alignment = SZ_2M / PAGE_SIZE;
drivers/gpu/drm/i915/display/intel_overlay.c
1366
obj = i915_gem_object_create_stolen(i915, PAGE_SIZE);
drivers/gpu/drm/i915/display/intel_overlay.c
1368
obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
drivers/gpu/drm/i915/gem/i915_gem_create.c
107
GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE));
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
1304
PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
1639
for (; addr < end; addr += PAGE_SIZE) {
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
2477
if (intel_ring_update_space(ring) >= PAGE_SIZE)
drivers/gpu/drm/i915/gem/i915_gem_internal.c
147
GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE));
drivers/gpu/drm/i915/gem/i915_gem_internal.c
85
sg_set_page(sg, page, PAGE_SIZE << order, 0);
drivers/gpu/drm/i915/gem/i915_gem_lmem.c
93
round_up(size, PAGE_SIZE),
drivers/gpu/drm/i915/gem/i915_gem_mman.c
742
&mmo->vma_node, obj->base.size / PAGE_SIZE);
drivers/gpu/drm/i915/gem/i915_gem_mman.c
754
&mmo->vma_node, obj->base.size / PAGE_SIZE);
drivers/gpu/drm/i915/gem/i915_gem_object.c
526
PAGE_SIZE);
drivers/gpu/drm/i915/gem/i915_gem_object.c
563
GEM_BUG_ON(offset_in_page(offset) > PAGE_SIZE - size);
drivers/gpu/drm/i915/gem/i915_gem_pages.c
371
drm_clflush_virt_range(panic->vaddr, PAGE_SIZE);
drivers/gpu/drm/i915/gem/i915_gem_pages.c
423
offset = offset % PAGE_SIZE;
drivers/gpu/drm/i915/gem/i915_gem_phys.c
111
for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
drivers/gpu/drm/i915/gem/i915_gem_phys.c
118
drm_clflush_virt_range(src, PAGE_SIZE);
drivers/gpu/drm/i915/gem/i915_gem_phys.c
119
memcpy_to_page(page, 0, src, PAGE_SIZE);
drivers/gpu/drm/i915/gem/i915_gem_phys.c
126
src += PAGE_SIZE;
drivers/gpu/drm/i915/gem/i915_gem_phys.c
66
for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
drivers/gpu/drm/i915/gem/i915_gem_phys.c
73
memcpy_from_page(dst, page, 0, PAGE_SIZE);
drivers/gpu/drm/i915/gem/i915_gem_phys.c
74
drm_clflush_virt_range(dst, PAGE_SIZE);
drivers/gpu/drm/i915/gem/i915_gem_phys.c
77
dst += PAGE_SIZE;
drivers/gpu/drm/i915/gem/i915_gem_region.c
66
GEM_BUG_ON(default_page_size < PAGE_SIZE);
drivers/gpu/drm/i915/gem/i915_gem_shmem.c
159
max_segment / PAGE_SIZE,
drivers/gpu/drm/i915/gem/i915_gem_shmem.c
169
sg_set_folio(sg, folio, nr_pages * PAGE_SIZE, 0);
drivers/gpu/drm/i915/gem/i915_gem_shmem.c
172
(max_segment - sg->length) / PAGE_SIZE);
drivers/gpu/drm/i915/gem/i915_gem_shmem.c
174
sg->length += nr_pages * PAGE_SIZE;
drivers/gpu/drm/i915/gem/i915_gem_shmem.c
247
if (max_segment > PAGE_SIZE) {
drivers/gpu/drm/i915/gem/i915_gem_shmem.c
251
max_segment = PAGE_SIZE;
drivers/gpu/drm/i915/gem/i915_gem_shmem.c
632
obj = i915_gem_object_create_shmem(i915, round_up(size, PAGE_SIZE));
drivers/gpu/drm/i915/gem/i915_gem_shmem.c
700
PAGE_SIZE, 0, 0,
drivers/gpu/drm/i915/gem/i915_gem_shmem.c
77
if (overflows_type(size / PAGE_SIZE, page_count))
drivers/gpu/drm/i915/gem/i915_gem_shmem.c
80
page_count = size / PAGE_SIZE;
drivers/gpu/drm/i915/gem/i915_gem_stolen.c
1012
PAGE_SIZE, 0, 0, type, instance,
drivers/gpu/drm/i915/gem/i915_gem_stolen.c
608
GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE));
drivers/gpu/drm/i915/gem/i915_gem_stolen.c
623
PAGE_SIZE);
drivers/gpu/drm/i915/gem/i915_gem_stolen.c
624
memset_io(s, x, PAGE_SIZE);
drivers/gpu/drm/i915/gem/i915_gem_stolen.c
627
addr += PAGE_SIZE;
drivers/gpu/drm/i915/gem/i915_gem_stolen.c
628
size -= PAGE_SIZE;
drivers/gpu/drm/i915/gem/i915_gem_stolen.c
631
ggtt->vm.clear_range(&ggtt->vm, ggtt->error_capture.start, PAGE_SIZE);
drivers/gpu/drm/i915/gem/i915_gem_ttm.c
1374
PAGE_SIZE, 0, 0,
drivers/gpu/drm/i915/gem/i915_gem_ttm.c
300
PAGE_SIZE);
drivers/gpu/drm/i915/gem/i915_gem_ttm.c
723
unsigned long bytes = min(bytes_left, PAGE_SIZE - offset);
drivers/gpu/drm/i915/gem/i915_gem_userptr.c
135
if (max_segment > PAGE_SIZE) {
drivers/gpu/drm/i915/gem/i915_gem_userptr.c
136
max_segment = PAGE_SIZE;
drivers/gpu/drm/i915/gem/i915_gem_userptr.c
270
ret = pin_user_pages_fast(obj->userptr.ptr + pinned * PAGE_SIZE,
drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c
110
GEM_BUG_ON(!IS_ALIGNED(phys_size, PAGE_SIZE));
drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c
14
unsigned long nreal = obj->scratch / PAGE_SIZE;
drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c
31
const unsigned long nreal = obj->scratch / PAGE_SIZE;
drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c
37
if (overflows_type(obj->base.size / PAGE_SIZE, npages))
drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c
40
npages = obj->base.size / PAGE_SIZE;
drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c
60
sg_set_page(sg, page, PAGE_SIZE, 0);
drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c
65
sg_set_page(sg, sg_page(src), PAGE_SIZE, 0);
drivers/gpu/drm/i915/gem/selftests/huge_pages.c
1088
drm_clflush_virt_range(ptr, PAGE_SIZE);
drivers/gpu/drm/i915/gem/selftests/huge_pages.c
1131
ptr += PAGE_SIZE / sizeof(*ptr);
drivers/gpu/drm/i915/gem/selftests/huge_pages.c
1360
GEM_BUG_ON(min_page_size < PAGE_SIZE);
drivers/gpu/drm/i915/gem/selftests/huge_pages.c
1735
(PAGE_SIZE / sizeof(u32)) - 1,
drivers/gpu/drm/i915/gem/selftests/huge_pages.c
1782
PAGE_SIZE, 0, I915_BO_ALLOC_NOTHP);
drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
308
t->batch = __create_vma(t, PAGE_SIZE, false);
drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
308
const unsigned int ncachelines = PAGE_SIZE/64;
drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
362
ctx.obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
1255
obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
1637
memset(cmd, POISON_INUSE, PAGE_SIZE);
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
1673
memset(cmd, POISON_INUSE, PAGE_SIZE);
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
1763
if (memchr_inv(vaddr, *out, PAGE_SIZE)) {
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
1829
obj_a = i915_gem_object_create_internal(i915, PAGE_SIZE);
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
1835
obj_b = i915_gem_object_create_internal(i915, PAGE_SIZE);
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
31
#define DW_PER_PAGE (PAGE_SIZE / sizeof(u32))
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
496
drm_clflush_virt_range(map, PAGE_SIZE);
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
525
drm_clflush_virt_range(map, PAGE_SIZE);
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
589
size = min(vm->total / 2, 1024ull * DW_PER_PAGE * PAGE_SIZE);
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
590
size = round_down(size, DW_PER_PAGE * PAGE_SIZE);
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
592
obj = huge_gem_object(vm->i915, DW_PER_PAGE * PAGE_SIZE, size);
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
961
rpcs = i915_gem_object_create_internal(i915, PAGE_SIZE);
drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
107
obj = __i915_gem_object_create_user(i915, PAGE_SIZE, &lmem, 1);
drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
206
for (i = 0; i < native_obj->base.size / sizeof(u32); i += PAGE_SIZE / sizeof(u32)) {
drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
24
obj = i915_gem_object_create_shmem(i915, PAGE_SIZE);
drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
372
if (obj->base.size != PAGE_SIZE) {
drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
374
(long long)obj->base.size, PAGE_SIZE);
drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
396
memset(dma_map, pattern[i], PAGE_SIZE);
drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
397
if (memchr_inv(obj_map, pattern[i], PAGE_SIZE)) {
drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
406
memset(obj_map, pattern[i], PAGE_SIZE);
drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
407
if (memchr_inv(dma_map, pattern[i], PAGE_SIZE)) {
drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
449
memset(ptr, 0xc5, PAGE_SIZE);
drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
48
obj = i915_gem_object_create_shmem(i915, PAGE_SIZE);
drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
488
obj = i915_gem_object_create_shmem(i915, PAGE_SIZE);
drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
1167
obj = __i915_gem_object_create_user(i915, PAGE_SIZE,
drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
1332
if (io_size < PAGE_SIZE)
drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
150
iowrite32(page, io + n * PAGE_SIZE / sizeof(*io));
drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
1513
obj = __i915_gem_object_create_user(i915, PAGE_SIZE, &mr, 1);
drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
1659
obj = __i915_gem_object_create_user(i915, PAGE_SIZE, &mr, 1);
drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
1727
for (; addr < end; addr += PAGE_SIZE) {
drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
1813
obj = __i915_gem_object_create_user(i915, PAGE_SIZE, &mr, 1);
drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
191
const unsigned int nreal = obj->scratch / PAGE_SIZE;
drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
192
const unsigned long npages = obj->base.size / PAGE_SIZE;
drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
246
iowrite32(page, io + n * PAGE_SIZE / sizeof(*io));
drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
346
nreal, obj->base.size / PAGE_SIZE, err);
drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
483
nreal, obj->base.size / PAGE_SIZE, err);
drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
710
if (!assert_mmap_offset(i915, PAGE_SIZE, 0)) {
drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
717
if (!assert_mmap_offset(i915, 2 * PAGE_SIZE, enospc)) {
drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
724
obj = create_sys_or_internal(i915, PAGE_SIZE);
drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
737
if (!assert_mmap_offset(i915, PAGE_SIZE, enospc)) {
drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
750
obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
98
const unsigned long npages = obj->base.size / PAGE_SIZE;
drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
982
PAGE_SIZE,
drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c
21
obj = i915_gem_object_create_shmem(i915, PAGE_SIZE);
drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c
45
nreal * PAGE_SIZE,
drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c
46
to_gt(i915)->ggtt->vm.total + PAGE_SIZE);
drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c
53
nreal, obj->base.size / PAGE_SIZE, err);
drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c
57
for (n = 0; n < obj->base.size / PAGE_SIZE; n++) {
drivers/gpu/drm/i915/gem/selftests/i915_gem_phys.c
21
obj = i915_gem_object_create_shmem(i915, PAGE_SIZE);
drivers/gpu/drm/i915/gem/selftests/i915_gem_phys.c
36
err = i915_gem_object_attach_phys(obj, PAGE_SIZE);
drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
54
size = round_up(size, PAGE_SIZE);
drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
65
GEM_BUG_ON(offset + (count - 1) * PAGE_SIZE > i915_vma_size(vma));
drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
85
offset += PAGE_SIZE;
drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c
118
exp_info.size = npages * PAGE_SIZE;
drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c
28
sg_set_page(sg, mock->pages[i], PAGE_SIZE, 0);
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
751
(LRC_PPHWSP_PN * PAGE_SIZE) + HOLD_SWITCHOUT_SEMAPHORE_PPHWSP_OFFSET;
drivers/gpu/drm/i915/gt/gen8_ppgtt.c
493
drm_clflush_virt_range(vaddr, PAGE_SIZE);
drivers/gpu/drm/i915/gt/gen8_ppgtt.c
497
drm_clflush_virt_range(vaddr, PAGE_SIZE);
drivers/gpu/drm/i915/gt/gen8_ppgtt.c
606
drm_clflush_virt_range(vaddr, PAGE_SIZE);
drivers/gpu/drm/i915/gt/gen8_ppgtt.c
690
drm_clflush_virt_range(vaddr, PAGE_SIZE);
drivers/gpu/drm/i915/gt/gen8_ppgtt.c
706
drm_clflush_virt_range(vaddr, PAGE_SIZE);
drivers/gpu/drm/i915/gt/gen8_ppgtt.c
727
drm_clflush_virt_range(vaddr, PAGE_SIZE);
drivers/gpu/drm/i915/gt/gen8_ppgtt.c
968
obj = i915_gem_object_create_lmem(i915, PAGE_SIZE,
drivers/gpu/drm/i915/gt/gen8_ppgtt.c
972
obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
drivers/gpu/drm/i915/gt/intel_context.h
50
#define PARENT_SCRATCH_SIZE PAGE_SIZE
drivers/gpu/drm/i915/gt/intel_engine_cs.c
1082
obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
drivers/gpu/drm/i915/gt/intel_engine_cs.c
1110
engine->status_page.addr = memset(vaddr, 0, PAGE_SIZE);
drivers/gpu/drm/i915/gt/intel_engine_cs.c
2312
hexdump(m, rq->context->lrc_reg_state, PAGE_SIZE);
drivers/gpu/drm/i915/gt/intel_engine_cs.c
2429
hexdump(m, engine->status_page.addr, PAGE_SIZE);
drivers/gpu/drm/i915/gt/intel_engine_cs.c
278
BUILD_BUG_ON(I915_GTT_PAGE_SIZE != PAGE_SIZE);
drivers/gpu/drm/i915/gt/intel_engine_cs.c
301
PAGE_SIZE);
drivers/gpu/drm/i915/gt/intel_engine_cs.c
305
PAGE_SIZE);
drivers/gpu/drm/i915/gt/intel_engine_cs.c
322
return round_up(cxt_size * 64, PAGE_SIZE);
drivers/gpu/drm/i915/gt/intel_engine_cs.c
43
#define HSW_CXT_TOTAL_SIZE (17 * PAGE_SIZE)
drivers/gpu/drm/i915/gt/intel_engine_cs.c
45
#define DEFAULT_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE)
drivers/gpu/drm/i915/gt/intel_engine_cs.c
46
#define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE)
drivers/gpu/drm/i915/gt/intel_engine_cs.c
47
#define GEN9_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE)
drivers/gpu/drm/i915/gt/intel_engine_cs.c
48
#define GEN11_LR_CONTEXT_RENDER_SIZE (14 * PAGE_SIZE)
drivers/gpu/drm/i915/gt/intel_engine_cs.c
50
#define GEN8_LR_CONTEXT_OTHER_SIZE (2 * PAGE_SIZE)
drivers/gpu/drm/i915/gt/intel_engine_regs.h
21
#define RING_CTL_SIZE(size) ((size) - PAGE_SIZE) /* in bytes -> pages */
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2868
memset(engine->status_page.addr, POISON_INUSE, PAGE_SIZE);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2880
drm_clflush_virt_range(engine->status_page.addr, PAGE_SIZE);
drivers/gpu/drm/i915/gt/intel_ggtt.c
953
ggtt->vm.clear_range(&ggtt->vm, ggtt->vm.total - PAGE_SIZE, PAGE_SIZE);
drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
757
for (i = 0; i < PAGE_SIZE; i += 128) {
drivers/gpu/drm/i915/gt/intel_gtt.c
343
drm_clflush_virt_range(vaddr, PAGE_SIZE);
drivers/gpu/drm/i915/gt/intel_gtt.h
625
#define fill_px(px, v) fill_page_dma(px_base(px), (v), PAGE_SIZE / sizeof(u64))
drivers/gpu/drm/i915/gt/intel_gtt.h
66
#define I915_PTES(pte_len) ((unsigned int)(PAGE_SIZE / (pte_len)))
drivers/gpu/drm/i915/gt/intel_gtt.h
80
#define GEN6_PD_SIZE (I915_PDES * PAGE_SIZE)
drivers/gpu/drm/i915/gt/intel_gtt.h
81
#define GEN6_PD_ALIGN (PAGE_SIZE * 16)
drivers/gpu/drm/i915/gt/intel_lrc.c
1011
ptr += per_ctx ? PAGE_SIZE : 0;
drivers/gpu/drm/i915/gt/intel_lrc.c
1031
memset(state, 0, PAGE_SIZE);
drivers/gpu/drm/i915/gt/intel_lrc.c
1035
memset(state + context_wa_bb_offset(ce), 0, PAGE_SIZE);
drivers/gpu/drm/i915/gt/intel_lrc.c
1086
ce->wa_bb_page = context_size / PAGE_SIZE;
drivers/gpu/drm/i915/gt/intel_lrc.c
1088
context_size += PAGE_SIZE * 2;
drivers/gpu/drm/i915/gt/intel_lrc.c
1092
ce->parallel.guc.parent_page = context_size / PAGE_SIZE;
drivers/gpu/drm/i915/gt/intel_lrc.c
1463
lrc_indirect_bb(ce) + PAGE_SIZE);
drivers/gpu/drm/i915/gt/intel_lrc.c
1805
#define CTX_WA_BB_SIZE (PAGE_SIZE)
drivers/gpu/drm/i915/gt/intel_lrc.c
941
memset(regs, 0, PAGE_SIZE);
drivers/gpu/drm/i915/gt/intel_lrc.c
993
return PAGE_SIZE * ce->wa_bb_page;
drivers/gpu/drm/i915/gt/intel_lrc.h
140
#define DG2_PREDICATE_RESULT_WA (PAGE_SIZE - sizeof(u64))
drivers/gpu/drm/i915/gt/intel_lrc.h
27
#define LRC_STATE_OFFSET (LRC_STATE_PN * PAGE_SIZE)
drivers/gpu/drm/i915/gt/intel_migrate.c
594
*cs++ = BLT_DEPTH_32 | PAGE_SIZE;
drivers/gpu/drm/i915/gt/intel_migrate.c
596
*cs++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4;
drivers/gpu/drm/i915/gt/intel_migrate.c
600
*cs++ = PAGE_SIZE;
drivers/gpu/drm/i915/gt/intel_migrate.c
605
*cs++ = BLT_DEPTH_32 | BLT_ROP_SRC_COPY | PAGE_SIZE;
drivers/gpu/drm/i915/gt/intel_migrate.c
607
*cs++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4;
drivers/gpu/drm/i915/gt/intel_migrate.c
611
*cs++ = PAGE_SIZE;
drivers/gpu/drm/i915/gt/intel_migrate.c
617
*cs++ = BLT_DEPTH_32 | BLT_ROP_SRC_COPY | PAGE_SIZE;
drivers/gpu/drm/i915/gt/intel_migrate.c
618
*cs++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE;
drivers/gpu/drm/i915/gt/intel_migrate.c
620
*cs++ = PAGE_SIZE;
drivers/gpu/drm/i915/gt/intel_migrate.c
83
d->offset += PAGE_SIZE;
drivers/gpu/drm/i915/gt/intel_migrate.c
943
(PAGE_SIZE - 1);
drivers/gpu/drm/i915/gt/intel_migrate.c
945
*cs++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4;
drivers/gpu/drm/i915/gt/intel_migrate.c
963
*cs++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | PAGE_SIZE;
drivers/gpu/drm/i915/gt/intel_migrate.c
965
*cs++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4;
drivers/gpu/drm/i915/gt/intel_migrate.c
972
*cs++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | PAGE_SIZE;
drivers/gpu/drm/i915/gt/intel_migrate.c
974
*cs++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4;
drivers/gpu/drm/i915/gt/intel_rc6.c
458
rc6_ctx_base + PAGE_SIZE < i915->dsm.reserved.end)) {
drivers/gpu/drm/i915/gt/intel_renderstate.c
155
if (so->rodata->batch_items * 4 > PAGE_SIZE)
drivers/gpu/drm/i915/gt/intel_renderstate.c
158
obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
drivers/gpu/drm/i915/gt/intel_renderstate.c
47
if ((i) >= PAGE_SIZE / sizeof(u32)) \
drivers/gpu/drm/i915/gt/intel_ring_submission.c
1326
size = ALIGN(err, PAGE_SIZE);
drivers/gpu/drm/i915/gt/intel_ring_submission.c
325
memset(engine->status_page.addr, POISON_INUSE, PAGE_SIZE);
drivers/gpu/drm/i915/gt/intel_ring_submission.c
335
drm_clflush_virt_range(engine->status_page.addr, PAGE_SIZE);
drivers/gpu/drm/i915/gt/intel_timeline.c
26
obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
drivers/gpu/drm/i915/gt/mock_engine.c
350
engine = kzalloc(sizeof(*engine) + PAGE_SIZE, GFP_KERNEL);
drivers/gpu/drm/i915/gt/mock_engine.c
62
const unsigned long sz = PAGE_SIZE;
drivers/gpu/drm/i915/gt/mock_engine.c
75
ring->vma = create_ring_vma(engine->gt->ggtt, PAGE_SIZE);
drivers/gpu/drm/i915/gt/selftest_engine_cs.c
86
obj = i915_gem_object_create_internal(ce->engine->i915, PAGE_SIZE);
drivers/gpu/drm/i915/gt/selftest_execlists.c
1000
obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
drivers/gpu/drm/i915/gt/selftest_execlists.c
1028
memset(vaddr, 0, PAGE_SIZE);
drivers/gpu/drm/i915/gt/selftest_execlists.c
1307
obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
drivers/gpu/drm/i915/gt/selftest_execlists.c
1339
memset(vaddr, 0, PAGE_SIZE);
drivers/gpu/drm/i915/gt/selftest_execlists.c
1393
memset(vaddr, 0xff, PAGE_SIZE);
drivers/gpu/drm/i915/gt/selftest_execlists.c
1558
obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
drivers/gpu/drm/i915/gt/selftest_execlists.c
2777
PAGE_SIZE, 0);
drivers/gpu/drm/i915/gt/selftest_execlists.c
3193
PAGE_SIZE, 0);
drivers/gpu/drm/i915/gt/selftest_execlists.c
3524
PAGE_SIZE, 0);
drivers/gpu/drm/i915/gt/selftest_execlists.c
3656
i915_gem_object_create_internal(smoke.gt->i915, PAGE_SIZE);
drivers/gpu/drm/i915/gt/selftest_execlists.c
3667
for (n = 0; n < PAGE_SIZE / sizeof(*cs) - 1; n++)
drivers/gpu/drm/i915/gt/selftest_execlists.c
4203
PAGE_SIZE);
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
116
obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
238
err = rq->engine->emit_bb_start(rq, i915_vma_offset(vma), PAGE_SIZE, flags);
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
255
return READ_ONCE(h->seqno[rq->fence.context % (PAGE_SIZE/sizeof(u32))]);
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
55
h->hws = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
61
h->obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
73
h->seqno = memset(vaddr, 0xff, PAGE_SIZE);
drivers/gpu/drm/i915/gt/selftest_lrc.c
1031
igt_hexdump(defaults, PAGE_SIZE);
drivers/gpu/drm/i915/gt/selftest_lrc.c
1046
} while (dw < PAGE_SIZE / sizeof(u32) &&
drivers/gpu/drm/i915/gt/selftest_lrc.c
1190
igt_hexdump(defaults, PAGE_SIZE);
drivers/gpu/drm/i915/gt/selftest_lrc.c
1204
} while (dw < PAGE_SIZE / sizeof(u32) &&
drivers/gpu/drm/i915/gt/selftest_lrc.c
1343
igt_hexdump(defaults, PAGE_SIZE);
drivers/gpu/drm/i915/gt/selftest_lrc.c
1369
} while (dw < PAGE_SIZE / sizeof(u32) &&
drivers/gpu/drm/i915/gt/selftest_lrc.c
1601
(per_ctx ? PAGE_SIZE : 0);
drivers/gpu/drm/i915/gt/selftest_lrc.c
1636
(per_ctx ? PAGE_SIZE : 0);
drivers/gpu/drm/i915/gt/selftest_lrc.c
189
__lrc_init_regs(memset(lrc, POISON_INUSE, PAGE_SIZE),
drivers/gpu/drm/i915/gt/selftest_lrc.c
260
igt_hexdump(hw, PAGE_SIZE);
drivers/gpu/drm/i915/gt/selftest_lrc.c
263
igt_hexdump(lrc, PAGE_SIZE);
drivers/gpu/drm/i915/gt/selftest_lrc.c
279
for (i = 0; i < PAGE_SIZE / sizeof(u32); i++)
drivers/gpu/drm/i915/gt/selftest_lrc.c
36
return __vm_create_scratch_for_read_pinned(>->ggtt->vm, PAGE_SIZE);
drivers/gpu/drm/i915/gt/selftest_migrate.c
115
for (i = 0; !err && i < sz / PAGE_SIZE; i++) {
drivers/gpu/drm/i915/gt/selftest_migrate.c
333
for (i = 0; !err && i < sz / PAGE_SIZE; i++) {
drivers/gpu/drm/i915/gt/selftest_migrate.c
372
for (i = 0; !err && i < DIV_ROUND_UP(ccs_bytes, PAGE_SIZE); i++) {
drivers/gpu/drm/i915/gt/selftest_migrate.c
373
int offset = ((i * PAGE_SIZE) /
drivers/gpu/drm/i915/gt/selftest_migrate.c
375
int ccs_bytes_left = (ccs_bytes - i * PAGE_SIZE) / sizeof(u32);
drivers/gpu/drm/i915/gt/selftest_migrate.c
381
fn, i * PAGE_SIZE + x * sizeof(u32), ccs_bytes);
drivers/gpu/drm/i915/gt/selftest_migrate.c
568
obj = i915_gem_object_create_internal(i915, 2 * PAGE_SIZE);
drivers/gpu/drm/i915/gt/selftest_mocs.c
226
memset32(arg->vaddr, STACK_MAGIC, PAGE_SIZE / sizeof(u32));
drivers/gpu/drm/i915/gt/selftest_mocs.c
241
GEM_BUG_ON(offset > PAGE_SIZE);
drivers/gpu/drm/i915/gt/selftest_mocs.c
80
__vm_create_scratch_for_read_pinned(>->ggtt->vm, PAGE_SIZE);
drivers/gpu/drm/i915/gt/selftest_reset.c
101
memset_io(s, STACK_MAGIC, PAGE_SIZE);
drivers/gpu/drm/i915/gt/selftest_reset.c
104
if (i915_memcpy_from_wc(tmp, in, PAGE_SIZE))
drivers/gpu/drm/i915/gt/selftest_reset.c
106
crc[page] = crc32_le(0, in, PAGE_SIZE);
drivers/gpu/drm/i915/gt/selftest_reset.c
111
ggtt->vm.clear_range(&ggtt->vm, ggtt->error_capture.start, PAGE_SIZE);
drivers/gpu/drm/i915/gt/selftest_reset.c
139
PAGE_SIZE);
drivers/gpu/drm/i915/gt/selftest_reset.c
142
if (i915_memcpy_from_wc(tmp, in, PAGE_SIZE))
drivers/gpu/drm/i915/gt/selftest_reset.c
144
x = crc32_le(0, in, PAGE_SIZE);
drivers/gpu/drm/i915/gt/selftest_reset.c
153
igt_hexdump(in, PAGE_SIZE);
drivers/gpu/drm/i915/gt/selftest_reset.c
160
ggtt->vm.clear_range(&ggtt->vm, ggtt->error_capture.start, PAGE_SIZE);
drivers/gpu/drm/i915/gt/selftest_reset.c
45
tmp = kmalloc(PAGE_SIZE, GFP_KERNEL);
drivers/gpu/drm/i915/gt/selftest_reset.c
96
PAGE_SIZE);
drivers/gpu/drm/i915/gt/selftest_rps.c
665
PAGE_SIZE, 0);
drivers/gpu/drm/i915/gt/selftest_rps.c
804
PAGE_SIZE, 0);
drivers/gpu/drm/i915/gt/selftest_timeline.c
1153
count < (PAGE_SIZE / TIMELINE_SEQNO_BYTES - 1) / 2);
drivers/gpu/drm/i915/gt/selftest_timeline.c
171
state.max = PAGE_SIZE / sizeof(*state.history);
drivers/gpu/drm/i915/gt/selftest_timeline.c
63
#define CACHELINES_PER_PAGE (PAGE_SIZE / TIMELINE_SEQNO_BYTES / 2)
drivers/gpu/drm/i915/gt/selftest_workarounds.c
110
result = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
drivers/gpu/drm/i915/gt/selftest_workarounds.c
121
memset(cs, 0xc5, PAGE_SIZE);
drivers/gpu/drm/i915/gt/selftest_workarounds.c
384
obj = i915_gem_object_create_internal(vm->i915, 16 * PAGE_SIZE);
drivers/gpu/drm/i915/gt/selftest_workarounds.c
641
i915_vma_offset(batch), PAGE_SIZE,
drivers/gpu/drm/i915/gt/shmem_utils.c
103
min_t(size_t, PAGE_SIZE - offset_in_page(off), len);
drivers/gpu/drm/i915/gt/shmem_utils.c
138
min_t(size_t, PAGE_SIZE - offset_in_page(off), len);
drivers/gpu/drm/i915/gt/sysfs_engines.c
115
if (GEM_WARN_ON(len >= PAGE_SIZE))
drivers/gpu/drm/i915/gt/sysfs_engines.c
73
if (len > PAGE_SIZE)
drivers/gpu/drm/i915/gt/sysfs_engines.c
74
len = PAGE_SIZE;
drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.c
184
err = engine->emit_bb_start(rq, i915_vma_offset(pkt->bb_vma), PAGE_SIZE, 0);
drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
280
size_t size = ALIGN((pos + 1) * sizeof(*slot), PAGE_SIZE);
drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
528
#define LRC_SKIP_SIZE(i915) (LRC_PPHWSP_SZ * PAGE_SIZE + LR_HW_CONTEXT_SZ(i915))
drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
728
total_size = PAGE_SIZE;
drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
733
capture_offset += PAGE_SIZE;
drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
882
return PAGE_SIZE;
drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
169
return PAGE_SIZE +
drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
364
size_t offset = PAGE_SIZE;/* for the log_buffer_states */
drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
409
src_data += PAGE_SIZE;
drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
410
dst_data += PAGE_SIZE;
drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
921
for (i = 0; i < obj->base.size; i += PAGE_SIZE) {
drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
922
if (!i915_memcpy_from_wc(page, map + i, PAGE_SIZE))
drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
923
memcpy(page, map + i, PAGE_SIZE);
drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
925
for (j = 0; j < PAGE_SIZE / sizeof(u32); j += 4)
drivers/gpu/drm/i915/gt/uc/intel_guc_log_debugfs.c
20
return PAGE_SIZE;
drivers/gpu/drm/i915/gt/uc/intel_guc_log_debugfs.c
26
size = PAGE_ALIGN(size + PAGE_SIZE);
drivers/gpu/drm/i915/gt/uc/intel_guc_log_debugfs.c
36
return PAGE_SIZE;
drivers/gpu/drm/i915/gt/uc/intel_guc_log_debugfs.c
39
return PAGE_SIZE;
drivers/gpu/drm/i915/gt/uc/intel_guc_log_debugfs.c
65
return PAGE_SIZE;
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
4389
memset(engine->status_page.addr, POISON_INUSE, PAGE_SIZE);
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
4399
drm_clflush_virt_range(engine->status_page.addr, PAGE_SIZE);
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
453
return ce->parallel.guc.parent_page * PAGE_SIZE;
drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
1160
GEM_BUG_ON(uc_fw->rsa_size > PAGE_SIZE);
drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
1161
vma = intel_guc_allocate_vma(gt_to_guc(gt), PAGE_SIZE);
drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
1295
u32 len = min_t(u32, size, PAGE_SIZE - offset);
drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
1315
u32 len = min_t(u32, size, PAGE_SIZE - offset);
drivers/gpu/drm/i915/gvt/cmd_parser.c
1947
PAGE_SIZE));
drivers/gpu/drm/i915/gvt/cmd_parser.c
2901
PAGE_SIZE);
drivers/gpu/drm/i915/gvt/cmd_parser.c
3019
PAGE_SIZE));
drivers/gpu/drm/i915/gvt/cmd_parser.c
3113
const unsigned long start = LRC_STATE_PN * PAGE_SIZE;
drivers/gpu/drm/i915/gvt/cmd_parser.c
3177
ctx_size = workload->engine->context_size - PAGE_SIZE;
drivers/gpu/drm/i915/gvt/cmd_parser.c
3185
gma_start = i915_ggtt_offset(ce->state) + LRC_STATE_PN*PAGE_SIZE;
drivers/gpu/drm/i915/gvt/dmabuf.c
211
roundup(info->size, PAGE_SIZE));
drivers/gpu/drm/i915/gvt/dmabuf.c
328
if (info->start & (PAGE_SIZE - 1)) {
drivers/gpu/drm/i915/gvt/dmabuf.c
90
sg->length = PAGE_SIZE;
drivers/gpu/drm/i915/gvt/dmabuf.c
91
sg_dma_len(sg) = PAGE_SIZE;
drivers/gpu/drm/i915/gvt/gtt.c
1110
PAGE_SIZE, &dma_addr);
drivers/gpu/drm/i915/gvt/gtt.c
1165
PAGE_SIZE, &dma_addr);
drivers/gpu/drm/i915/gvt/gtt.c
1193
ret = intel_gvt_dma_map_guest_page(vgpu, gfn, PAGE_SIZE, &dma_addr);
drivers/gpu/drm/i915/gvt/gtt.c
1639
index = (pa & (PAGE_SIZE - 1)) >> info->gtt_entry_size_shift;
drivers/gpu/drm/i915/gvt/gtt.c
2225
ret = intel_gvt_dma_map_guest_page(vgpu, gfn, PAGE_SIZE,
drivers/gpu/drm/i915/gvt/gtt.c
805
dma_unmap_page(kdev, daddr, PAGE_SIZE, DMA_BIDIRECTIONAL);
drivers/gpu/drm/i915/gvt/kvmgt.c
133
DIV_ROUND_UP(size, PAGE_SIZE));
drivers/gpu/drm/i915/gvt/kvmgt.c
140
int total_pages = DIV_ROUND_UP(size, PAGE_SIZE);
drivers/gpu/drm/i915/gvt/kvmgt.c
174
gvt_unpin_guest_page(vgpu, gfn, npage * PAGE_SIZE);
drivers/gpu/drm/i915/gvt/kvmgt.c
627
u64 end_iov_pfn = iov_pfn + length / PAGE_SIZE;
drivers/gpu/drm/i915/gvt/kvmgt.c
66
#define EDID_BLOB_OFFSET (PAGE_SIZE/2)
drivers/gpu/drm/i915/gvt/kvmgt.c
785
ALIGN_DOWN(off, PAGE_SIZE),
drivers/gpu/drm/i915/gvt/reg.h
53
#define INTEL_GVT_OPREGION_SIZE (INTEL_GVT_OPREGION_PAGES * PAGE_SIZE)
drivers/gpu/drm/i915/i915_cmd_parser.c
1212
int len = min(remain, PAGE_SIZE - x);
drivers/gpu/drm/i915/i915_debugfs_params.c
178
new = strndup_user(ubuf, PAGE_SIZE);
drivers/gpu/drm/i915/i915_gem.c
256
unsigned int length = min_t(u64, remain, PAGE_SIZE - offset);
drivers/gpu/drm/i915/i915_gem.c
293
vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
drivers/gpu/drm/i915/i915_gem.c
335
ret = insert_mappable_node(ggtt, node, PAGE_SIZE);
drivers/gpu/drm/i915/i915_gem.c
417
unsigned page_length = PAGE_SIZE - page_offset;
drivers/gpu/drm/i915/i915_gem.c
527
vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
drivers/gpu/drm/i915/i915_gem.c
596
unsigned int page_length = PAGE_SIZE - page_offset;
drivers/gpu/drm/i915/i915_gem.c
704
unsigned int length = min_t(u64, remain, PAGE_SIZE - offset);
drivers/gpu/drm/i915/i915_gpu_error.c
1195
s = io_mapping_map_wc(&ggtt->iomap, slot, PAGE_SIZE);
drivers/gpu/drm/i915/i915_gpu_error.c
1202
ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE);
drivers/gpu/drm/i915/i915_gpu_error.c
1215
if (offset + PAGE_SIZE > resource_size(&mem->io)) {
drivers/gpu/drm/i915/i915_gpu_error.c
1220
s = io_mapping_map_wc(&mem->iomap, offset, PAGE_SIZE);
drivers/gpu/drm/i915/i915_gpu_error.c
2325
size_t buf_size = PAGE_SIZE * 128;
drivers/gpu/drm/i915/i915_gpu_error.c
309
if (wc && c->tmp && i915_memcpy_from_wc(c->tmp, src, PAGE_SIZE))
drivers/gpu/drm/i915/i915_gpu_error.c
311
zstream->avail_in = PAGE_SIZE;
drivers/gpu/drm/i915/i915_gpu_error.c
319
zstream->avail_out = PAGE_SIZE;
drivers/gpu/drm/i915/i915_gpu_error.c
347
zstream->avail_out = PAGE_SIZE;
drivers/gpu/drm/i915/i915_gpu_error.c
409
if (!(wc && i915_memcpy_from_wc(ptr, src, PAGE_SIZE)))
drivers/gpu/drm/i915/i915_gpu_error.c
410
memcpy(ptr, src, PAGE_SIZE);
drivers/gpu/drm/i915/i915_gpu_error.c
636
len = PAGE_SIZE;
drivers/gpu/drm/i915/i915_hdcp_gsc.c
179
const size_t max_msg_size = PAGE_SIZE - sizeof(*header_in);
drivers/gpu/drm/i915/i915_hdcp_gsc.c
195
addr_out = addr_in + PAGE_SIZE;
drivers/gpu/drm/i915/i915_hdcp_gsc.c
49
obj = i915_gem_object_create_shmem(i915, 2 * PAGE_SIZE);
drivers/gpu/drm/i915/i915_hdcp_gsc.c
63
cmd_out = cmd_in + PAGE_SIZE;
drivers/gpu/drm/i915/i915_mitigations.c
105
return scnprintf(buffer, PAGE_SIZE, "%s\n", "off");
drivers/gpu/drm/i915/i915_mitigations.c
108
count = scnprintf(buffer, PAGE_SIZE, "%s,", "auto");
drivers/gpu/drm/i915/i915_mitigations.c
119
count += scnprintf(buffer + count, PAGE_SIZE - count,
drivers/gpu/drm/i915/i915_mm.c
63
r->sgt.curr += PAGE_SIZE;
drivers/gpu/drm/i915/i915_perf.c
1465
u32 offset, len = (ce->engine->context_size - PAGE_SIZE) / 4;
drivers/gpu/drm/i915/i915_perf.c
2138
GEM_BUG_ON(cs - batch > PAGE_SIZE / sizeof(*batch));
drivers/gpu/drm/i915/i915_scatterlist.h
115
(((__iter).curr += PAGE_SIZE) >= (__iter).max) ? \
drivers/gpu/drm/i915/i915_scatterlist.h
133
GEM_BUG_ON(!IS_ALIGNED(sg_dma_len(sg), PAGE_SIZE));
drivers/gpu/drm/i915/i915_scatterlist.h
159
max = PAGE_SIZE;
drivers/gpu/drm/i915/i915_scatterlist.h
160
return round_down(max, PAGE_SIZE);
drivers/gpu/drm/i915/intel_memory_region.c
106
if (resource_size(&mem->io) < PAGE_SIZE)
drivers/gpu/drm/i915/intel_memory_region.c
109
last = resource_size(&mem->io) - PAGE_SIZE;
drivers/gpu/drm/i915/intel_memory_region.c
123
for (page = 0; page <= last; page += PAGE_SIZE) {
drivers/gpu/drm/i915/intel_memory_region.c
71
va = ioremap_wc(mem->io.start + offset, PAGE_SIZE);
drivers/gpu/drm/i915/intel_memory_region.c
80
err = __iopagetest(mem, va, PAGE_SIZE, val[i], offset, caller);
drivers/gpu/drm/i915/intel_memory_region.c
84
err = __iopagetest(mem, va, PAGE_SIZE, ~val[i], offset, caller);
drivers/gpu/drm/i915/intel_region_ttm.c
91
mem->min_page_size, PAGE_SIZE);
drivers/gpu/drm/i915/intel_wakeref.c
206
const size_t buf_size = PAGE_SIZE;
drivers/gpu/drm/i915/pxp/intel_pxp_gsccs.c
397
err = gsccs_create_buffer(pxp->ctrl_gt, "Batch Buffer", PAGE_SIZE,
drivers/gpu/drm/i915/pxp/intel_pxp_tee.c
114
const size_t max_msg_size = PAGE_SIZE;
drivers/gpu/drm/i915/pxp/intel_pxp_tee.c
243
obj = i915_gem_object_create_lmem(i915, PAGE_SIZE, I915_BO_ALLOC_CONTIGUOUS);
drivers/gpu/drm/i915/selftests/i915_gem.c
217
obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
drivers/gpu/drm/i915/selftests/i915_gem.c
221
obj2 = i915_gem_object_create_internal(i915, PAGE_SIZE);
drivers/gpu/drm/i915/selftests/i915_gem.c
57
for (page = 0; page < size; page += PAGE_SIZE) {
drivers/gpu/drm/i915/selftests/i915_gem.c
68
for (x = 0; x < PAGE_SIZE / sizeof(u32); x++) {
drivers/gpu/drm/i915/selftests/i915_gem.c
74
ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE);
drivers/gpu/drm/i915/selftests/i915_gem_evict.c
75
count, ggtt->vm.total / PAGE_SIZE);
drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
1184
err = misaligned_case(vm, mr, addr, PAGE_SIZE, flags);
drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
1350
const unsigned int count = PAGE_SIZE/sizeof(u32);
drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
1363
obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
1374
count * PAGE_SIZE, 0,
drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
1385
u64 offset = tmp.start + n * PAGE_SIZE;
drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
1402
u64 offset = tmp.start + order[n] * PAGE_SIZE;
drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
1413
u64 offset = tmp.start + order[n] * PAGE_SIZE;
drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
1560
2 * PAGE_SIZE);
drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
1605
2 * PAGE_SIZE);
drivers/gpu/drm/i915/selftests/i915_perf.c
317
memset(scratch, POISON_FREE, PAGE_SIZE);
drivers/gpu/drm/i915/selftests/i915_perf.c
405
if (memchr_inv(scratch, POISON_FREE, PAGE_SIZE)) {
drivers/gpu/drm/i915/selftests/i915_request.c
1127
obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
drivers/gpu/drm/i915/selftests/i915_request.c
968
obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
drivers/gpu/drm/i915/selftests/i915_syncmap.c
119
buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
drivers/gpu/drm/i915/selftests/i915_syncmap.c
123
if (i915_syncmap_print_to_buf(sync, buf, PAGE_SIZE))
drivers/gpu/drm/i915/selftests/i915_vma.c
1044
offset = (x * plane_info[0].dst_stride + y) * PAGE_SIZE;
drivers/gpu/drm/i915/selftests/i915_vma.c
1046
offset = (y * plane_info[0].dst_stride + x) * PAGE_SIZE;
drivers/gpu/drm/i915/selftests/i915_vma.c
1079
offset = src_idx * PAGE_SIZE;
drivers/gpu/drm/i915/selftests/i915_vma.c
170
obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
drivers/gpu/drm/i915/selftests/i915_vma.c
278
INVALID(0, PIN_GLOBAL | PIN_OFFSET_FIXED | round_down(U64_MAX, PAGE_SIZE)),
drivers/gpu/drm/i915/selftests/i915_vma.c
288
NOSPACE(round_down(U64_MAX, PAGE_SIZE), PIN_GLOBAL),
drivers/gpu/drm/i915/selftests/i915_vma.c
291
INVALID(8192, PIN_GLOBAL | PIN_OFFSET_FIXED | (round_down(U64_MAX, PAGE_SIZE) - 4096)),
drivers/gpu/drm/i915/selftests/i915_vma.c
324
obj = i915_gem_object_create_internal(ggtt->vm.i915, PAGE_SIZE);
drivers/gpu/drm/i915/selftests/i915_vma.c
395
if (sg_dma_len(sg) != PAGE_SIZE) {
drivers/gpu/drm/i915/selftests/i915_vma.c
397
sg_dma_len(sg), PAGE_SIZE,
drivers/gpu/drm/i915/selftests/i915_vma.c
411
left = (r->plane[n].dst_stride - y) * PAGE_SIZE;
drivers/gpu/drm/i915/selftests/i915_vma.c
476
if (left < PAGE_SIZE || left & (PAGE_SIZE-1)) {
drivers/gpu/drm/i915/selftests/i915_vma.c
478
sg_dma_len(sg), PAGE_SIZE,
drivers/gpu/drm/i915/selftests/i915_vma.c
489
left -= PAGE_SIZE;
drivers/gpu/drm/i915/selftests/i915_vma.c
490
offset += PAGE_SIZE;
drivers/gpu/drm/i915/selftests/i915_vma.c
504
left = (r->plane[n].dst_stride - r->plane[n].width) * PAGE_SIZE;
drivers/gpu/drm/i915/selftests/i915_vma.c
588
obj = i915_gem_object_create_internal(vm->i915, max_pages * PAGE_SIZE);
drivers/gpu/drm/i915/selftests/i915_vma.c
638
vma->size != expected_pages * PAGE_SIZE) {
drivers/gpu/drm/i915/selftests/i915_vma.c
640
PAGE_SIZE * expected_pages, vma->size);
drivers/gpu/drm/i915/selftests/i915_vma.c
646
vma->size > expected_pages * PAGE_SIZE) {
drivers/gpu/drm/i915/selftests/i915_vma.c
648
PAGE_SIZE * expected_pages, vma->size);
drivers/gpu/drm/i915/selftests/i915_vma.c
816
obj = i915_gem_object_create_internal(vm->i915, npages * PAGE_SIZE);
drivers/gpu/drm/i915/selftests/i915_vma.c
845
if (!assert_pin(vma, &view, sz*PAGE_SIZE, p->name)) {
drivers/gpu/drm/i915/selftests/i915_vma.c
996
obj = i915_gem_object_create_internal(i915, 10 * 10 * PAGE_SIZE);
drivers/gpu/drm/i915/selftests/igt_mmap.c
26
offset / PAGE_SIZE, size / PAGE_SIZE);
drivers/gpu/drm/i915/selftests/igt_spinner.c
210
err = engine->emit_bb_start(rq, i915_vma_offset(vma), PAGE_SIZE, flags);
drivers/gpu/drm/i915/selftests/igt_spinner.c
22
spin->hws = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
drivers/gpu/drm/i915/selftests/igt_spinner.c
29
spin->obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
drivers/gpu/drm/i915/selftests/igt_spinner.c
95
spin->seqno = memset(vaddr, 0xff, PAGE_SIZE);
drivers/gpu/drm/i915/selftests/intel_memory_region.c
1021
PAGE_SIZE,
drivers/gpu/drm/i915/selftests/intel_memory_region.c
1022
PAGE_SIZE - sizeof(u32),
drivers/gpu/drm/i915/selftests/intel_memory_region.c
1023
PAGE_SIZE - sizeof(u64),
drivers/gpu/drm/i915/selftests/intel_memory_region.c
1024
PAGE_SIZE - 64,
drivers/gpu/drm/i915/selftests/intel_memory_region.c
1041
sz = round_up(prandom_u32_state(&prng) % SZ_32M, PAGE_SIZE);
drivers/gpu/drm/i915/selftests/intel_memory_region.c
1042
sz = max_t(u32, 2 * PAGE_SIZE, sz);
drivers/gpu/drm/i915/selftests/intel_memory_region.c
1090
bytes[0] = igt_random_offset(&prng, 64, PAGE_SIZE - 64, 0, sizeof(u32));
drivers/gpu/drm/i915/selftests/intel_memory_region.c
190
size = round_up(size, PAGE_SIZE);
drivers/gpu/drm/i915/selftests/intel_memory_region.c
192
PAGE_SIZE);
drivers/gpu/drm/i915/selftests/intel_memory_region.c
210
size = max_t(u32, round_up(size, PAGE_SIZE), PAGE_SIZE);
drivers/gpu/drm/i915/selftests/intel_memory_region.c
252
obj = igt_object_create(mem, &objects, PAGE_SIZE,
drivers/gpu/drm/i915/selftests/intel_memory_region.c
281
target = round_up(target, PAGE_SIZE);
drivers/gpu/drm/i915/selftests/intel_memory_region.c
282
target = max_t(u64, PAGE_SIZE, target);
drivers/gpu/drm/i915/selftests/intel_memory_region.c
360
} while (target >= PAGE_SIZE);
drivers/gpu/drm/i915/selftests/intel_memory_region.c
387
mem = mock_region_create(i915, 0, size, PAGE_SIZE, 0, 0);
drivers/gpu/drm/i915/selftests/intel_memory_region.c
469
ps = PAGE_SIZE;
drivers/gpu/drm/i915/selftests/intel_memory_region.c
65
page_size = PAGE_SIZE;
drivers/gpu/drm/i915/selftests/intel_memory_region.c
698
ptr += PAGE_SIZE / sizeof(*ptr);
drivers/gpu/drm/i915/selftests/intel_memory_region.c
788
obj = i915_gem_object_create_lmem(i915, PAGE_SIZE, 0);
drivers/gpu/drm/i915/selftests/intel_memory_region.c
809
for (ps = PAGE_SIZE; ps <= SZ_1G; ps <<= 1) {
drivers/gpu/drm/i915/selftests/intel_memory_region.c
875
size = max_t(u32, PAGE_SIZE, i915_prandom_u32_max_state(SZ_32M, &prng));
drivers/gpu/drm/i915/selftests/intel_memory_region.c
876
size = round_up(size, PAGE_SIZE);
drivers/gpu/drm/i915/selftests/intel_memory_region.c
906
dword = i915_prandom_u32_max_state(PAGE_SIZE / sizeof(u32),
drivers/gpu/drm/i915/selftests/intel_memory_region.c
967
sz = round_up(prandom_u32_state(&prng) % SZ_32M, PAGE_SIZE);
drivers/gpu/drm/i915/selftests/mock_gtt.c
115
ggtt->gmadr = DEFINE_RES_MEM(0, 2048 * PAGE_SIZE);
drivers/gpu/drm/i915/selftests/mock_gtt.c
117
ggtt->vm.total = 4096 * PAGE_SIZE;
drivers/gpu/drm/i915/selftests/mock_gtt.c
75
ppgtt->vm.total = round_down(U64_MAX, PAGE_SIZE);
drivers/gpu/drm/i915/selftests/scatterlist.c
232
GEM_BUG_ON(overflows_type(count * PAGE_SIZE, sg->length));
drivers/gpu/drm/i915/selftests/scatterlist.c
254
sg_set_page(sg, pfn_to_page(pfn), npages * PAGE_SIZE, 0);
drivers/gpu/drm/i915/selftests/scatterlist.c
257
GEM_BUG_ON(sg->length != npages * PAGE_SIZE);
drivers/gpu/drm/i915/selftests/scatterlist.c
328
const unsigned long max = PAGE_SIZE; /* not prime! */
drivers/gpu/drm/i915/selftests/scatterlist.c
61
if (sg->length != npages * PAGE_SIZE) {
drivers/gpu/drm/i915/selftests/scatterlist.c
63
__func__, who, npages * PAGE_SIZE, sg->length);
drivers/gpu/drm/imagination/pvr_free_list.c
241
BUILD_BUG_ON(ROGUE_BIF_PM_PHYSICAL_PAGE_SIZE > PAGE_SIZE);
drivers/gpu/drm/imagination/pvr_free_list.c
243
for (u32 dma_addr_offset = 0; dma_addr_offset < PAGE_SIZE;
drivers/gpu/drm/imagination/pvr_fw_mips.h
16
#define PVR_MIPS_PT_PAGE_COUNT DIV_ROUND_UP(ROGUE_MIPSFW_MAX_NUM_PAGETABLE_PAGES * ROGUE_MIPSFW_PAGE_SIZE_4K, PAGE_SIZE)
drivers/gpu/drm/imagination/pvr_mmu.c
219
static_assert(PAGE_SIZE >= PVR_MMU_BACKING_PAGE_SIZE);
drivers/gpu/drm/imagination/pvr_mmu.c
294
kmemleak_alloc(page->host_ptr, PAGE_SIZE, 1, GFP_KERNEL);
drivers/gpu/drm/imagination/pvr_mmu.c
577
PVR_PAGE_TABLE_FIELD_PREP(1, PD, PAGE_SIZE, ROGUE_MMUCTRL_PAGE_SIZE_X) |
drivers/gpu/drm/imagination/pvr_mmu.h
51
#define PVR_DEVICE_PAGE_SIZE (PAGE_SIZE)
drivers/gpu/drm/imagination/pvr_vm.c
943
#define GET_RESERVED_SIZE(last_offset, last_size) round_up((last_offset) + (last_size), PAGE_SIZE)
drivers/gpu/drm/imagination/pvr_vm_mips.c
107
mips_data->pt_dma_addr[page_nr], PAGE_SIZE, DMA_TO_DEVICE);
drivers/gpu/drm/imagination/pvr_vm_mips.c
58
PAGE_SIZE, DMA_TO_DEVICE);
drivers/gpu/drm/imagination/pvr_vm_mips.c
86
mips_data->pt_dma_addr[page_nr], PAGE_SIZE, DMA_TO_DEVICE);
drivers/gpu/drm/lima/lima_vm.c
133
offset += PAGE_SIZE;
drivers/gpu/drm/lima/lima_vm.c
307
offset += PAGE_SIZE;
drivers/gpu/drm/loongson/lsdc_ttm.c
456
size = ALIGN(size, PAGE_SIZE);
drivers/gpu/drm/loongson/lsdc_ttm.c
54
if (lbo->tbo.base.size <= PAGE_SIZE)
drivers/gpu/drm/mediatek/mtk_crtc.c
1153
PAGE_SIZE);
drivers/gpu/drm/mediatek/mtk_gem.c
83
size = round_up(size, PAGE_SIZE);
drivers/gpu/drm/mgag200/mgag200_mode.c
796
fbpages = DIV_ROUND_UP(fbsize, PAGE_SIZE);
drivers/gpu/drm/msm/adreno/a2xx_gpummu.c
51
for (i = 0; i < PAGE_SIZE; i += GPUMMU_PAGE_SIZE)
drivers/gpu/drm/msm/adreno/a6xx_gmu.c
1403
range_start = 0x60000000 + PAGE_SIZE; /* skip dummy page */
drivers/gpu/drm/msm/adreno/a6xx_gpu.c
1184
a6xx_gpu->pwrup_reglist_ptr = msm_gem_kernel_new(gpu->dev, PAGE_SIZE,
drivers/gpu/drm/msm/adreno/a6xx_preempt.c
452
PAGE_SIZE,
drivers/gpu/drm/msm/adreno/adreno_gpu.c
462
if (len > PAGE_SIZE)
drivers/gpu/drm/msm/msm_gem.c
1299
npages = size / PAGE_SIZE;
drivers/gpu/drm/msm/msm_gem_vma.c
386
obj->size, PAGE_SIZE, 0,
drivers/gpu/drm/msm/msm_iommu.c
122
unmapped = PAGE_SIZE;
drivers/gpu/drm/msm/msm_iommu.c
144
size_t size = PAGE_SIZE;
drivers/gpu/drm/msm/msm_iommu.c
563
WARN_ON(!(ttbr0_cfg.pgsize_bitmap & PAGE_SIZE));
drivers/gpu/drm/msm/msm_iommu.c
564
ttbr0_cfg.pgsize_bitmap = PAGE_SIZE;
drivers/gpu/drm/nouveau/dispnv50/disp.c
2853
ret = nouveau_bo_new_map(&drm->client, NOUVEAU_GEM_DOMAIN_VRAM, PAGE_SIZE, &disp->sync);
drivers/gpu/drm/nouveau/nouveau_abi16.c
469
ret = nouveau_gem_new(cli, PAGE_SIZE, 0, NOUVEAU_GEM_DOMAIN_GART,
drivers/gpu/drm/nouveau/nouveau_abi16.c
489
ret = nvkm_mm_init(&chan->heap, 0, 0, PAGE_SIZE, 1);
drivers/gpu/drm/nouveau/nouveau_bo.c
207
*size = roundup_64(*size, PAGE_SIZE);
drivers/gpu/drm/nouveau/nouveau_bo.c
722
num_pages * PAGE_SIZE, DMA_TO_DEVICE);
drivers/gpu/drm/nouveau/nouveau_bo.c
758
num_pages * PAGE_SIZE, DMA_FROM_DEVICE);
drivers/gpu/drm/nouveau/nouveau_bo0039.c
75
PITCH_IN, PAGE_SIZE,
drivers/gpu/drm/nouveau/nouveau_bo0039.c
76
PITCH_OUT, PAGE_SIZE,
drivers/gpu/drm/nouveau/nouveau_bo0039.c
77
LINE_LENGTH_IN, PAGE_SIZE,
drivers/gpu/drm/nouveau/nouveau_bo0039.c
89
src_offset += (PAGE_SIZE * line_count);
drivers/gpu/drm/nouveau/nouveau_bo0039.c
90
dst_offset += (PAGE_SIZE * line_count);
drivers/gpu/drm/nouveau/nouveau_bo85b5.c
62
0x031c, PAGE_SIZE,
drivers/gpu/drm/nouveau/nouveau_bo85b5.c
63
0x0320, PAGE_SIZE,
drivers/gpu/drm/nouveau/nouveau_bo85b5.c
64
0x0324, PAGE_SIZE,
drivers/gpu/drm/nouveau/nouveau_bo85b5.c
69
src_offset += (PAGE_SIZE * line_count);
drivers/gpu/drm/nouveau/nouveau_bo85b5.c
70
dst_offset += (PAGE_SIZE * line_count);
drivers/gpu/drm/nouveau/nouveau_bo9039.c
65
PITCH_IN, PAGE_SIZE,
drivers/gpu/drm/nouveau/nouveau_bo9039.c
66
PITCH_OUT, PAGE_SIZE,
drivers/gpu/drm/nouveau/nouveau_bo9039.c
67
LINE_LENGTH_IN, PAGE_SIZE,
drivers/gpu/drm/nouveau/nouveau_bo9039.c
79
src_offset += (PAGE_SIZE * line_count);
drivers/gpu/drm/nouveau/nouveau_bo9039.c
80
dst_offset += (PAGE_SIZE * line_count);
drivers/gpu/drm/nouveau/nouveau_bo90b5.c
55
0x031c, PAGE_SIZE,
drivers/gpu/drm/nouveau/nouveau_bo90b5.c
56
0x0320, PAGE_SIZE,
drivers/gpu/drm/nouveau/nouveau_bo90b5.c
57
0x0324, PAGE_SIZE,
drivers/gpu/drm/nouveau/nouveau_bo90b5.c
62
src_offset += (PAGE_SIZE * line_count);
drivers/gpu/drm/nouveau/nouveau_bo90b5.c
63
dst_offset += (PAGE_SIZE * line_count);
drivers/gpu/drm/nouveau/nouveau_boa0b5.c
58
PITCH_IN, PAGE_SIZE,
drivers/gpu/drm/nouveau/nouveau_boa0b5.c
59
PITCH_OUT, PAGE_SIZE,
drivers/gpu/drm/nouveau/nouveau_boa0b5.c
60
LINE_LENGTH_IN, PAGE_SIZE,
drivers/gpu/drm/nouveau/nouveau_chan.c
195
return nouveau_bo_new_map_gpu(cli, NOUVEAU_GEM_DOMAIN_GART, PAGE_SIZE,
drivers/gpu/drm/nouveau/nouveau_chan.c
324
0, PAGE_SIZE, NULL, 0, &chan->mem_userd);
drivers/gpu/drm/nouveau/nouveau_dmem.c
221
args.start = ALIGN_DOWN(vmf->address, (PAGE_SIZE << order));
drivers/gpu/drm/nouveau/nouveau_dmem.c
223
args.end = args.start + (PAGE_SIZE << order);
drivers/gpu/drm/nouveau/nouveau_dmem.c
270
dma_unmap_page(drm->dev->dev, dma_info.dma_addr, PAGE_SIZE,
drivers/gpu/drm/nouveau/nouveau_dmem.c
609
PITCH_IN, PAGE_SIZE,
drivers/gpu/drm/nouveau/nouveau_dmem.c
610
PITCH_OUT, PAGE_SIZE,
drivers/gpu/drm/nouveau/nouveau_dmem.c
611
LINE_LENGTH_IN, PAGE_SIZE,
drivers/gpu/drm/nouveau/nouveau_dmem.c
775
dma_unmap_page(dev, dma_info->dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
drivers/gpu/drm/nouveau/nouveau_dmem.c
798
addr += PAGE_SIZE;
drivers/gpu/drm/nouveau/nouveau_dmem.c
806
addr += (1 << order) * PAGE_SIZE;
drivers/gpu/drm/nouveau/nouveau_svm.c
120
args->va_end = ALIGN(args->va_end, PAGE_SIZE);
drivers/gpu/drm/nouveau/nouveau_svm.c
633
args->p.size = PAGE_SIZE;
drivers/gpu/drm/nouveau/nouveau_svm.c
793
limit = start + PAGE_SIZE;
drivers/gpu/drm/nouveau/nouveau_svm.c
804
args->p.size = PAGE_SIZE;
drivers/gpu/drm/nouveau/nouveau_vmm.c
135
PAGE_SIZE, 0, NULL, 0, &vmm->vmm);
drivers/gpu/drm/nouveau/nv17_fence.c
133
ret = nouveau_bo_new_map(&drm->client, NOUVEAU_GEM_DOMAIN_VRAM, PAGE_SIZE, &priv->bo);
drivers/gpu/drm/nouveau/nv17_fence.c
82
u32 start = reg->start * PAGE_SIZE;
drivers/gpu/drm/nouveau/nv50_fence.c
41
u32 start = reg->start * PAGE_SIZE;
drivers/gpu/drm/nouveau/nv50_fence.c
84
ret = nouveau_bo_new_map(&drm->client, NOUVEAU_GEM_DOMAIN_VRAM, PAGE_SIZE, &priv->bo);
drivers/gpu/drm/nouveau/nvkm/core/firmware.c
240
len = ALIGN(fw->len, PAGE_SIZE);
drivers/gpu/drm/nouveau/nvkm/core/firmware.c
257
len = ALIGN(fw->len, PAGE_SIZE);
drivers/gpu/drm/nouveau/nvkm/core/firmware.c
280
sg_set_page(sgl, page, PAGE_SIZE, 0);
drivers/gpu/drm/nouveau/nvkm/core/firmware.c
281
data += PAGE_SIZE;
drivers/gpu/drm/nouveau/nvkm/engine/ce/gv100.c
39
size = roundup(size, PAGE_SIZE);
drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
133
if (pgsize_bitmap & PAGE_SIZE) {
drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
252
PAGE_SIZE, DMA_BIDIRECTIONAL);
drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
287
0, PAGE_SIZE, DMA_BIDIRECTIONAL);
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/bar.c
194
bar->flushBAR2PhysMode = ioremap(device->func->resource_addr(device, NVKM_BAR2_INST), PAGE_SIZE);
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c
1588
const u64 pages = DIV_ROUND_UP(size, PAGE_SIZE);
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c
1604
sg_set_page(sgl, page, PAGE_SIZE, 0);
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
337
(r->offset + i) << imem->iommu_pgshift, PAGE_SIZE);
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
338
dma_unmap_page(dev, node->dma_addrs[i], PAGE_SIZE,
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
454
dma_adr = dma_map_page(dev, p, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
478
PAGE_SIZE, IOMMU_READ | IOMMU_WRITE,
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
484
offset -= PAGE_SIZE;
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
485
iommu_unmap(imem->domain, offset, PAGE_SIZE);
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
506
dma_unmap_page(dev, dma_addr, PAGE_SIZE,
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
527
size = max(roundup(size, PAGE_SIZE), PAGE_SIZE);
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
528
align = max(roundup(align, PAGE_SIZE), PAGE_SIZE);
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mem.c
181
if (!IS_ALIGNED(size, PAGE_SIZE))
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mem.c
192
size = ALIGN(size, PAGE_SIZE) >> PAGE_SHIFT;
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mem.c
210
p, 0, PAGE_SIZE,
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mem.c
89
mem->dma[mem->pages], PAGE_SIZE,
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h
356
*MAP->dma, PAGE_SIZE, MAP->dma++)
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
45
dma_unmap_page(dev, addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
95
PAGE_SIZE, DMA_BIDIRECTIONAL);
drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
575
int num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
715
return round_up(geom[fmt].cpp * w, PAGE_SIZE) * h;
drivers/gpu/drm/omapdrm/omap_gem.c
1032
sg_set_page(sg, omap_obj->pages[i], PAGE_SIZE, 0);
drivers/gpu/drm/omapdrm/omap_gem.c
1034
sg_dma_len(sg) = PAGE_SIZE;
drivers/gpu/drm/omapdrm/omap_gem.c
1412
npages = DIV_ROUND_UP(size, PAGE_SIZE);
drivers/gpu/drm/omapdrm/omap_gem.c
1479
u16 h = 1, w = PAGE_SIZE >> i;
drivers/gpu/drm/omapdrm/omap_gem.c
1489
usergart[i].slot_shift = ilog2((PAGE_SIZE / h) >> i);
drivers/gpu/drm/omapdrm/omap_gem.c
1495
block = tiler_reserve_2d(fmts[i], w, h, PAGE_SIZE);
drivers/gpu/drm/omapdrm/omap_gem.c
180
size_t size = PAGE_SIZE * n;
drivers/gpu/drm/omapdrm/omap_gem.c
183
const int m = DIV_ROUND_UP(omap_obj->width << fmt, PAGE_SIZE);
drivers/gpu/drm/omapdrm/omap_gem.c
190
off, PAGE_SIZE, 1);
drivers/gpu/drm/omapdrm/omap_gem.c
191
off += PAGE_SIZE * m;
drivers/gpu/drm/omapdrm/omap_gem.c
265
0, PAGE_SIZE, DMA_TO_DEVICE);
drivers/gpu/drm/omapdrm/omap_gem.c
273
PAGE_SIZE, DMA_TO_DEVICE);
drivers/gpu/drm/omapdrm/omap_gem.c
313
PAGE_SIZE, DMA_TO_DEVICE);
drivers/gpu/drm/omapdrm/omap_gem.c
407
const int m = DIV_ROUND_UP(omap_obj->width << fmt, PAGE_SIZE);
drivers/gpu/drm/omapdrm/omap_gem.c
474
vaddr += PAGE_SIZE * m;
drivers/gpu/drm/omapdrm/omap_gem.c
702
PAGE_SIZE, DMA_TO_DEVICE);
drivers/gpu/drm/omapdrm/omap_gem.c
725
PAGE_SIZE, dir);
drivers/gpu/drm/omapdrm/omap_gem.c
755
PAGE_SIZE);
drivers/gpu/drm/omapdrm/tcm-sita.c
86
unsigned long slots_per_band = PAGE_SIZE / slot_bytes;
drivers/gpu/drm/panel/panel-tpo-td043mtea1.c
248
ret = snprintf(buf + len, PAGE_SIZE - len, "%u ",
drivers/gpu/drm/panfrost/panfrost_dump.c
139
WARN_ON(!IS_ALIGNED(dbo->size, PAGE_SIZE));
drivers/gpu/drm/panfrost/panfrost_mmu.c
585
#define NUM_FAULT_PAGES (SZ_2M / PAGE_SIZE)
drivers/gpu/drm/panfrost/panfrost_mmu.c
645
sgt = &bo->sgts[page_offset / (SZ_2M / PAGE_SIZE)];
drivers/gpu/drm/panthor/panthor_device.c
464
if (vma->vm_end - vma->vm_start != PAGE_SIZE ||
drivers/gpu/drm/panthor/panthor_heap.c
294
if (!IS_ALIGNED(chunk_size, PAGE_SIZE) ||
drivers/gpu/drm/qxl/qxl_dumb.c
45
args->size = ALIGN(args->size, PAGE_SIZE);
drivers/gpu/drm/qxl/qxl_gem.c
57
if (alignment < PAGE_SIZE)
drivers/gpu/drm/qxl/qxl_gem.c
58
alignment = PAGE_SIZE;
drivers/gpu/drm/qxl/qxl_image.c
153
size = PAGE_SIZE - offsetof(struct qxl_data_chunk, data);
drivers/gpu/drm/qxl/qxl_image.c
156
size = PAGE_SIZE;
drivers/gpu/drm/qxl/qxl_image.c
178
size = min((int)(PAGE_SIZE - page_offset), remain);
drivers/gpu/drm/qxl/qxl_ioctl.c
164
if (cmd->command_size > PAGE_SIZE - sizeof(union qxl_release_info))
drivers/gpu/drm/qxl/qxl_object.c
122
size = roundup(size, PAGE_SIZE);
drivers/gpu/drm/qxl/qxl_object.c
226
rptr = bo->kptr + (page_offset * PAGE_SIZE);
drivers/gpu/drm/qxl/qxl_object.c
235
rptr += page_offset * PAGE_SIZE;
drivers/gpu/drm/qxl/qxl_object.c
62
if (qbo->tbo.base.size <= PAGE_SIZE)
drivers/gpu/drm/qxl/qxl_release.c
167
return qxl_bo_create(qdev, PAGE_SIZE, false, true,
drivers/gpu/drm/qxl/qxl_release.c
41
#define RELEASES_PER_BO (PAGE_SIZE / RELEASE_SIZE)
drivers/gpu/drm/qxl/qxl_release.c
44
#define SURFACE_RELEASES_PER_BO (PAGE_SIZE / SURFACE_RELEASE_SIZE)
drivers/gpu/drm/qxl/qxl_ttm.c
206
num_io_pages = qdev->rom->ram_header_offset / PAGE_SIZE;
drivers/gpu/drm/qxl/qxl_ttm.c
213
qdev->surfaceram_size / PAGE_SIZE);
drivers/gpu/drm/qxl/qxl_ttm.c
221
((unsigned int)num_io_pages * PAGE_SIZE) / (1024 * 1024));
drivers/gpu/drm/radeon/cik.c
4395
PAGE_SIZE, true,
drivers/gpu/drm/radeon/cik.c
4566
PAGE_SIZE, true,
drivers/gpu/drm/radeon/evergreen.c
4178
r = radeon_bo_create(rdev, dws * 4, PAGE_SIZE, true,
drivers/gpu/drm/radeon/evergreen.c
4257
r = radeon_bo_create(rdev, dws * 4, PAGE_SIZE, true,
drivers/gpu/drm/radeon/evergreen.c
4334
PAGE_SIZE, true,
drivers/gpu/drm/radeon/r600.c
1513
PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
drivers/gpu/drm/radeon/r600.c
3485
PAGE_SIZE, true,
drivers/gpu/drm/radeon/radeon_benchmark.c
109
r = radeon_bo_create(rdev, size, PAGE_SIZE, true, ddomain, 0, NULL, NULL, &dobj);
drivers/gpu/drm/radeon/radeon_benchmark.c
97
r = radeon_bo_create(rdev, size, PAGE_SIZE, true, sdomain, 0, NULL, NULL, &sobj);
drivers/gpu/drm/radeon/radeon_device.c
464
r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
drivers/gpu/drm/radeon/radeon_device.c
789
0, PAGE_SIZE, DMA_BIDIRECTIONAL);
drivers/gpu/drm/radeon/radeon_device.c
812
dma_unmap_page(&rdev->pdev->dev, rdev->dummy_page.addr, PAGE_SIZE,
drivers/gpu/drm/radeon/radeon_fbdev.c
79
aligned_size = ALIGN(size, PAGE_SIZE);
drivers/gpu/drm/radeon/radeon_gart.c
134
PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
drivers/gpu/drm/radeon/radeon_gart.c
252
p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
drivers/gpu/drm/radeon/radeon_gart.c
256
for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
drivers/gpu/drm/radeon/radeon_gart.c
298
p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
drivers/gpu/drm/radeon/radeon_gart.c
304
for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
drivers/gpu/drm/radeon/radeon_gart.c
336
if (PAGE_SIZE < RADEON_GPU_PAGE_SIZE) {
drivers/gpu/drm/radeon/radeon_gart.c
344
rdev->gart.num_cpu_pages = rdev->mc.gtt_size / PAGE_SIZE;
drivers/gpu/drm/radeon/radeon_gem.c
104
if (alignment < PAGE_SIZE) {
drivers/gpu/drm/radeon/radeon_gem.c
105
alignment = PAGE_SIZE;
drivers/gpu/drm/radeon/radeon_gem.c
323
args->size = roundup(args->size, PAGE_SIZE);
drivers/gpu/drm/radeon/radeon_gem.c
846
args->size = ALIGN(args->size, PAGE_SIZE);
drivers/gpu/drm/radeon/radeon_object.c
136
unsigned long page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
drivers/gpu/drm/radeon/radeon_object.c
139
size = ALIGN(size, PAGE_SIZE);
drivers/gpu/drm/radeon/radeon_prime.c
55
ret = radeon_bo_create(rdev, attach->dmabuf->size, PAGE_SIZE, false,
drivers/gpu/drm/radeon/radeon_ring.c
394
r = radeon_bo_create(rdev, ring->ring_size, PAGE_SIZE, true,
drivers/gpu/drm/radeon/radeon_test.c
70
r = radeon_bo_create(rdev, size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
drivers/gpu/drm/radeon/radeon_test.c
90
r = radeon_bo_create(rdev, size, PAGE_SIZE, true,
drivers/gpu/drm/radeon/radeon_ttm.c
176
BUILD_BUG_ON((PAGE_SIZE % RADEON_GPU_PAGE_SIZE) != 0);
drivers/gpu/drm/radeon/radeon_ttm.c
178
num_pages = PFN_UP(new_mem->size) * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
drivers/gpu/drm/radeon/radeon_ttm.c
340
unsigned long end = gtt->userptr + (u64)ttm->num_pages * PAGE_SIZE;
drivers/gpu/drm/radeon/radeon_ttm.c
349
uint64_t userptr = gtt->userptr + pinned * PAGE_SIZE;
drivers/gpu/drm/radeon/radeon_ttm.c
704
r = radeon_bo_create(rdev, 256 * 1024, PAGE_SIZE, true,
drivers/gpu/drm/radeon/radeon_ttm.c
851
loff_t p = *pos / PAGE_SIZE;
drivers/gpu/drm/radeon/radeon_ttm.c
853
size_t cur_size = min_t(size_t, size, PAGE_SIZE - off);
drivers/gpu/drm/radeon/radeon_uvd.c
189
r = radeon_bo_create(rdev, bo_size, PAGE_SIZE, true,
drivers/gpu/drm/radeon/radeon_vce.c
142
r = radeon_bo_create(rdev, size, PAGE_SIZE, true,
drivers/gpu/drm/rockchip/rockchip_drm_gem.c
290
size = round_up(size, PAGE_SIZE);
drivers/gpu/drm/rockchip/rockchip_drm_gem.c
32
rk_obj->base.size, PAGE_SIZE,
drivers/gpu/drm/sysfb/ofdrm.c
948
fb_pgbase = round_down(fb_base, PAGE_SIZE);
drivers/gpu/drm/sysfb/ofdrm.c
949
fb_pgsize = fb_base - fb_pgbase + round_up(fb_size, PAGE_SIZE);
drivers/gpu/drm/tegra/gem.c
250
bo->mm, bo->gem.size, PAGE_SIZE, 0, 0);
drivers/gpu/drm/tegra/gem.c
312
size = round_up(size, PAGE_SIZE);
drivers/gpu/drm/tests/drm_exec_test.c
108
drm_gem_private_object_init(priv->drm, &gobj, PAGE_SIZE);
drivers/gpu/drm/tests/drm_exec_test.c
135
drm_gem_private_object_init(priv->drm, &gobj, PAGE_SIZE);
drivers/gpu/drm/tests/drm_exec_test.c
167
drm_gem_private_object_init(priv->drm, gobj1, PAGE_SIZE);
drivers/gpu/drm/tests/drm_exec_test.c
168
drm_gem_private_object_init(priv->drm, gobj2, PAGE_SIZE);
drivers/gpu/drm/tests/drm_exec_test.c
61
drm_gem_private_object_init(priv->drm, &gobj, PAGE_SIZE);
drivers/gpu/drm/tests/drm_exec_test.c
81
drm_gem_private_object_init(priv->drm, &gobj, PAGE_SIZE);
drivers/gpu/drm/tests/drm_format_helper_test.c
23
static unsigned char fmtcnv_state_mem[PAGE_SIZE];
drivers/gpu/drm/tests/drm_panic_test.c
128
npages = DIV_ROUND_UP(fb_size, PAGE_SIZE);
drivers/gpu/drm/tests/drm_panic_test.c
141
memset(vaddr, 0xa5, PAGE_SIZE);
drivers/gpu/drm/tests/drm_panic_test.c
152
int bytes_in_page = (p == npages - 1) ? fb_size - p * PAGE_SIZE : PAGE_SIZE;
drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c
1013
PAGE_SIZE, &ctx_init, NULL, NULL,
drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c
1057
PAGE_SIZE, &ctx_init, NULL, NULL,
drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c
109
u32 size = ALIGN(BO_SIZE, PAGE_SIZE);
drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c
1108
PAGE_SIZE, &ctx_init, NULL, NULL,
drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c
1115
PAGE_SIZE, &ctx_init, NULL, NULL,
drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c
125
PAGE_SIZE, &ctx, NULL, NULL,
drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c
133
KUNIT_EXPECT_EQ(test, bo->page_alignment, PAGE_SIZE);
drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c
155
u32 size = ALIGN(BO_SIZE, PAGE_SIZE);
drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c
174
PAGE_SIZE, &ctx, NULL, NULL,
drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c
197
u32 size = ALIGN(BO_SIZE, PAGE_SIZE);
drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c
216
PAGE_SIZE, &ctx, NULL, &resv,
drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c
235
u32 size = ALIGN(SZ_8K, PAGE_SIZE);
drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c
250
fst_placement, PAGE_SIZE, &ctx_init, NULL,
drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c
276
u32 size = ALIGN(BO_SIZE, PAGE_SIZE);
drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c
302
u32 size = ALIGN(BO_SIZE, PAGE_SIZE);
drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c
331
u32 size = ALIGN(BO_SIZE, PAGE_SIZE);
drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c
378
u32 size = ALIGN(BO_SIZE, PAGE_SIZE);
drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c
396
placement, PAGE_SIZE, &ctx_init, NULL,
drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c
419
u32 size = ALIGN(BO_SIZE, PAGE_SIZE);
drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c
437
PAGE_SIZE, &ctx_init, NULL, NULL,
drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c
468
u32 size = ALIGN(BO_SIZE, PAGE_SIZE);
drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c
484
placement_init, PAGE_SIZE, &ctx_init, NULL,
drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c
522
u32 size = ALIGN(BO_SIZE, PAGE_SIZE);
drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c
593
u32 size = ALIGN(BO_SIZE, PAGE_SIZE);
drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c
645
u32 size = ALIGN(BO_SIZE, PAGE_SIZE);
drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c
707
u32 size = ALIGN(BO_SIZE, PAGE_SIZE);
drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c
723
PAGE_SIZE, &ctx_init, NULL, NULL,
drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c
791
PAGE_SIZE, &ctx_init, NULL, NULL,
drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c
840
PAGE_SIZE, &ctx_init, NULL, NULL,
drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c
891
PAGE_SIZE, &ctx_init, NULL, NULL,
drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c
902
PAGE_SIZE, &ctx_init, NULL, NULL,
drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c
956
PAGE_SIZE, &ctx_init, NULL, NULL,
drivers/gpu/drm/ttm/tests/ttm_mock_manager.c
109
err = drm_buddy_init(&manager->mm, size, PAGE_SIZE);
drivers/gpu/drm/ttm/tests/ttm_mock_manager.c
116
manager->default_page_size = PAGE_SIZE;
drivers/gpu/drm/ttm/tests/ttm_pool_test.c
145
size_t size = expected_num_pages * PAGE_SIZE;
drivers/gpu/drm/ttm/tests/ttm_pool_test.c
206
size_t size = expected_num_pages * PAGE_SIZE;
drivers/gpu/drm/ttm/tests/ttm_pool_test.c
245
size_t size = PAGE_SIZE;
drivers/gpu/drm/ttm/tests/ttm_pool_test.c
273
size_t size = PAGE_SIZE;
drivers/gpu/drm/ttm/tests/ttm_pool_test.c
307
size_t fst_size = (1 << order) * PAGE_SIZE;
drivers/gpu/drm/ttm/tests/ttm_pool_test.c
308
size_t snd_size = PAGE_SIZE;
drivers/gpu/drm/ttm/tests/ttm_pool_test.c
343
size_t size = (1 << order) * PAGE_SIZE;
drivers/gpu/drm/ttm/tests/ttm_pool_test.c
374
size_t size = (1 << order) * PAGE_SIZE;
drivers/gpu/drm/ttm/tests/ttm_pool_test.c
402
size_t size = PAGE_SIZE;
drivers/gpu/drm/ttm/ttm_backup.c
37
start + PAGE_SIZE - 1);
drivers/gpu/drm/ttm/ttm_bo_util.c
107
memset_io(dst_map.vaddr_iomem, 0, PAGE_SIZE);
drivers/gpu/drm/ttm/ttm_bo_util.c
109
memset(dst_map.vaddr, 0, PAGE_SIZE);
drivers/gpu/drm/ttm/ttm_bo_util.c
120
drm_memcpy_from_wc(&dst_map, &src_map, PAGE_SIZE);
drivers/gpu/drm/ttm/ttm_bo_vm.c
276
address += PAGE_SIZE;
drivers/gpu/drm/ttm/ttm_bo_vm.c
314
address += PAGE_SIZE)
drivers/gpu/drm/ttm/ttm_bo_vm.c
381
unsigned long bytes = min(bytes_left, PAGE_SIZE - offset);
drivers/gpu/drm/ttm/ttm_pool.c
174
vaddr = dma_alloc_attrs(pool->dev, (1ULL << order) * PAGE_SIZE,
drivers/gpu/drm/ttm/ttm_pool.c
222
dma_free_attrs(pool->dev, (1UL << order) * PAGE_SIZE, vaddr, dma->addr,
drivers/gpu/drm/ttm/ttm_pool.c
260
size_t size = (1ULL << order) * PAGE_SIZE;
drivers/gpu/drm/ttm/ttm_pool.c
510
first_dma += PAGE_SIZE;
drivers/gpu/drm/ttm/ttm_resource.c
810
iosys_map_incr(dmap, i * PAGE_SIZE);
drivers/gpu/drm/udl/udl_main.c
238
if (size > PAGE_SIZE) {
drivers/gpu/drm/udl/udl_main.c
24
#define MAX_TRANSFER (PAGE_SIZE*16 - BULK_SIZE)
drivers/gpu/drm/v3d/v3d_drv.h
23
#define V3D_PAGE_FACTOR (PAGE_SIZE >> V3D_MMU_PAGE_SHIFT)
drivers/gpu/drm/vc4/vc4_bo.c
164
return (size / PAGE_SIZE) - 1;
drivers/gpu/drm/vc4/vc4_bo.c
425
size_t size = roundup(unaligned_size, PAGE_SIZE);
drivers/gpu/drm/virtio/virtgpu_gem.c
76
args->size = ALIGN(args->size, PAGE_SIZE);
drivers/gpu/drm/virtio/virtgpu_ioctl.c
170
params.size = PAGE_SIZE;
drivers/gpu/drm/virtio/virtgpu_object.c
217
params->size = roundup(params->size, PAGE_SIZE);
drivers/gpu/drm/virtio/virtgpu_vq.c
319
*sg_ents = DIV_ROUND_UP(size, PAGE_SIZE);
drivers/gpu/drm/virtio/virtgpu_vq.c
334
s = min_t(int, PAGE_SIZE, size);
drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
376
copy_size = min_t(u32, copy_size, PAGE_SIZE - dst_page_offset);
drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
377
copy_size = min_t(u32, copy_size, PAGE_SIZE - src_page_offset);
drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
428
params->size = ALIGN(params->size, PAGE_SIZE);
drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c
127
if (min < PAGE_SIZE)
drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c
128
min = PAGE_SIZE;
drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
1312
64, PAGE_SIZE);
drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
1321
64, PAGE_SIZE);
drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
469
PAGE_SIZE);
drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
620
vcotbl->res.guest_memory_size = PAGE_SIZE;
drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
621
num_entries = PAGE_SIZE / co_info[type].size;
drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
93
{PAGE_SIZE/sizeof(SVGACOTableDXElementLayoutEntry) + 1, sizeof(SVGACOTableDXElementLayoutEntry), NULL},
drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
94
{PAGE_SIZE/sizeof(SVGACOTableDXBlendStateEntry) + 1, sizeof(SVGACOTableDXBlendStateEntry), NULL},
drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.c
358
if (cmd->dma.guest.ptr.offset % PAGE_SIZE ||
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
385
.size = PAGE_SIZE,
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
938
dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE;
drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
82
param->value = dev_priv->max_mob_pages * PAGE_SIZE / 2;
drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
91
param->value = dev_priv->max_mob_pages * PAGE_SIZE;
drivers/gpu/drm/vmwgfx/vmwgfx_mksstat.h
60
return page_addr + PAGE_SIZE * 1;
drivers/gpu/drm/vmwgfx/vmwgfx_mksstat.h
73
return page_addr + PAGE_SIZE * 2;
drivers/gpu/drm/vmwgfx/vmwgfx_mksstat.h
86
return page_addr + PAGE_SIZE * 3;
drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
141
if (otable->size <= PAGE_SIZE) {
drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
374
unsigned long data_size = data_pages * PAGE_SIZE;
drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
377
while (likely(data_size > PAGE_SIZE)) {
drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
378
data_size = DIV_ROUND_UP(data_size, PAGE_SIZE);
drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
418
return vmw_bo_create_and_populate(dev_priv, mob->num_pages * PAGE_SIZE,
drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
461
unsigned long num_pt_pages = DIV_ROUND_UP(pt_size, PAGE_SIZE);
drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
472
for (i = 0; i < PAGE_SIZE / VMW_PPN_SIZE; ++i) {
drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
641
cmd->body.sizeInBytes = num_data_pages * PAGE_SIZE;
drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
66
#define MAX_USER_MSG_LENGTH PAGE_SIZE
drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
717
BUG_ON(pstrs_acc - pstrs > PAGE_SIZE);
drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
239
if (num_pages < PAGE_SIZE / sizeof(pte_t)) {
drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
308
res_end = DIV_ROUND_UP(res_end, PAGE_SIZE);
drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
344
res_end = DIV_ROUND_UP(res_end, PAGE_SIZE);
drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
386
res_end = DIV_ROUND_UP(res_end, PAGE_SIZE);
drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
467
page_offset + PAGE_SIZE,
drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
1156
PAGE_SIZE);
drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
420
PAGE_SIZE);
drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
1116
PAGE_SIZE);
drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
852
required_mem = ALIGN(required_mem, PAGE_SIZE);
drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
867
if (required_mem > dev_priv->max_mob_pages * PAGE_SIZE)
drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
108
if (size > PAGE_SIZE)
drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
118
ctx->mem_size_left = PAGE_SIZE;
drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
121
addr = (void *) (ctx->page_address + (PAGE_SIZE - ctx->mem_size_left));
drivers/gpu/drm/xe/display/xe_hdcp_gsc.c
156
const size_t max_msg_size = PAGE_SIZE - HDCP_GSC_HEADER_SIZE;
drivers/gpu/drm/xe/display/xe_hdcp_gsc.c
167
addr_out_off = PAGE_SIZE;
drivers/gpu/drm/xe/display/xe_hdcp_gsc.c
68
bo = xe_bo_create_pin_map_novm(xe, xe_device_get_root_tile(xe), PAGE_SIZE * 2,
drivers/gpu/drm/xe/display/xe_hdcp_gsc.c
80
cmd_out = cmd_in + PAGE_SIZE;
drivers/gpu/drm/xe/display/xe_panic.c
24
drm_clflush_virt_range(panic->vmap.vaddr, PAGE_SIZE);
drivers/gpu/drm/xe/display/xe_panic.c
51
offset = offset % PAGE_SIZE;
drivers/gpu/drm/xe/display/xe_panic.c
58
xe_res_first(bo->ttm.resource, new_page * PAGE_SIZE,
drivers/gpu/drm/xe/display/xe_panic.c
59
bo->ttm.base.size - new_page * PAGE_SIZE, &panic->res);
drivers/gpu/drm/xe/display/xe_panic.c
61
xe_res_next(&panic->res, PAGE_SIZE * (new_page - panic->page));
drivers/gpu/drm/xe/regs/xe_engine_regs.h
59
#define RING_CTL_SIZE(size) ((size) - PAGE_SIZE) /* in bytes -> pages */
drivers/gpu/drm/xe/tests/xe_bo.c
111
offset = min_t(u32, offset, PAGE_SIZE) / sizeof(u64) - 1;
drivers/gpu/drm/xe/tests/xe_bo.c
473
ram_and_swap = ram + get_nr_swap_pages() * PAGE_SIZE;
drivers/gpu/drm/xe/tests/xe_dma_buf.c
124
size = PAGE_SIZE;
drivers/gpu/drm/xe/xe_bo.c
1667
int byte_count = min((int)(PAGE_SIZE - page_offset), bytes_left);
drivers/gpu/drm/xe/xe_bo.c
1680
xe_res_next(&cursor, PAGE_SIZE);
drivers/gpu/drm/xe/xe_bo.c
2695
struct xe_bo *bo = xe_managed_bo_create_pin_map(xe, tile, ALIGN(size, PAGE_SIZE), flags);
drivers/gpu/drm/xe/xe_bo.c
2990
xe_assert(xe, page_size <= PAGE_SIZE);
drivers/gpu/drm/xe/xe_bo.c
2992
offset &= (PAGE_SIZE - 1);
drivers/gpu/drm/xe/xe_bo.c
3313
if (XE_IOCTL_DBG(xe, PAGE_SIZE > SZ_4K))
drivers/gpu/drm/xe/xe_bo.c
3635
u32 page_size = max_t(u32, PAGE_SIZE,
drivers/gpu/drm/xe/xe_bo.c
492
PAGE_SIZE);
drivers/gpu/drm/xe/xe_bo.h
427
return round_down(max / 2, PAGE_SIZE);
drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
1672
ALIGN(size, PAGE_SIZE), 0, ~0ull,
drivers/gpu/drm/xe/xe_guc_ads.c
629
total_size = PAGE_SIZE;
drivers/gpu/drm/xe/xe_guc_ads.c
634
capture_offset += PAGE_SIZE;
drivers/gpu/drm/xe/xe_guc_ads.c
830
base_dpa = xe_bo_main_addr(ads->bo, PAGE_SIZE) + um_queue_offset;
drivers/gpu/drm/xe/xe_guc_capture.c
766
total_size = PAGE_SIZE; /* Pad a page in front for empty lists */
drivers/gpu/drm/xe/xe_guc_ct.c
313
xe_gt_assert(gt, !(guc_ct_size() % PAGE_SIZE));
drivers/gpu/drm/xe/xe_migrate.c
1143
xe_assert(xe, IS_ALIGNED(ccs_it.start, PAGE_SIZE));
drivers/gpu/drm/xe/xe_migrate.c
1189
xe_assert(xe, IS_ALIGNED(ccs_it.start, PAGE_SIZE));
drivers/gpu/drm/xe/xe_migrate.c
1305
xe_assert(xe, IS_ALIGNED(vram_offset | sysmem_offset | size, PAGE_SIZE));
drivers/gpu/drm/xe/xe_migrate.c
2039
chunk = ALIGN_DOWN(chunk, PAGE_SIZE / XE_PAGE_SIZE);
drivers/gpu/drm/xe/xe_migrate.c
2065
if (gpu_page_size < PAGE_SIZE) {
drivers/gpu/drm/xe/xe_migrate.c
2073
i += gpu_page_size / PAGE_SIZE;
drivers/gpu/drm/xe/xe_migrate.c
2083
unsigned long i, incr = large_size / PAGE_SIZE;
drivers/gpu/drm/xe/xe_migrate.c
2085
for (i = 0; i < DIV_ROUND_UP(size, PAGE_SIZE); i += incr)
drivers/gpu/drm/xe/xe_migrate.c
2086
if (PAGE_SIZE << sram_addr[i].order != large_size)
drivers/gpu/drm/xe/xe_migrate.c
2099
if (IS_ALIGNED(len, PAGE_SIZE))
drivers/gpu/drm/xe/xe_migrate.c
2100
pitch = PAGE_SIZE;
drivers/gpu/drm/xe/xe_migrate.c
2131
unsigned long npages = DIV_ROUND_UP(len + sram_offset, PAGE_SIZE);
drivers/gpu/drm/xe/xe_migrate.c
2142
xe_assert(xe, npages * PAGE_SIZE <= MAX_PREEMPTDISABLE_TRANSFER);
drivers/gpu/drm/xe/xe_migrate.c
2166
sram_addr[i + j].addr = sram_addr[i].addr + j * PAGE_SIZE;
drivers/gpu/drm/xe/xe_migrate.c
2255
return xe_migrate_vram(m, npages * PAGE_SIZE, 0, src_addr, dst_addr,
drivers/gpu/drm/xe/xe_migrate.c
2279
return xe_migrate_vram(m, npages * PAGE_SIZE, 0, dst_addr, src_addr,
drivers/gpu/drm/xe/xe_migrate.c
2287
unsigned long i, npages = DIV_ROUND_UP(len, PAGE_SIZE);
drivers/gpu/drm/xe/xe_migrate.c
2293
dma_unmap_page(xe->drm.dev, pagemap_addr[i].addr, PAGE_SIZE,
drivers/gpu/drm/xe/xe_migrate.c
2304
unsigned long i, npages = DIV_ROUND_UP(len, PAGE_SIZE);
drivers/gpu/drm/xe/xe_migrate.c
2321
addr = dma_map_page(xe->drm.dev, page, 0, PAGE_SIZE, dir);
drivers/gpu/drm/xe/xe_migrate.c
2329
buf += PAGE_SIZE;
drivers/gpu/drm/xe/xe_migrate.c
2473
current_page = (int)(buf - orig_buf) / PAGE_SIZE;
drivers/gpu/drm/xe/xe_migrate.c
658
xe_res_next(cur, min_t(u32, size, PAGE_SIZE));
drivers/gpu/drm/xe/xe_migrate.c
951
xe_assert(xe, IS_ALIGNED(ccs_it.start, PAGE_SIZE));
drivers/gpu/drm/xe/xe_mmio_gem.c
184
for (i = 0; i < base->size; i += PAGE_SIZE) {
drivers/gpu/drm/xe/xe_mmio_gem.c
215
for (i = 0; i < base->size; i += PAGE_SIZE) {
drivers/gpu/drm/xe/xe_mmio_gem.c
78
if ((phys_addr % PAGE_SIZE != 0) || (size % PAGE_SIZE != 0))
drivers/gpu/drm/xe/xe_oa.c
1704
PAGE_SIZE, vma->vm_page_prot);
drivers/gpu/drm/xe/xe_oa.c
1708
start += PAGE_SIZE;
drivers/gpu/drm/xe/xe_psmi.c
159
val = __xe_bo_addr(bo, 0, PAGE_SIZE);
drivers/gpu/drm/xe/xe_query.c
273
mem_regions->mem_regions[0].min_page_size = PAGE_SIZE;
drivers/gpu/drm/xe/xe_query.c
287
SZ_64K : PAGE_SIZE;
drivers/gpu/drm/xe/xe_res_cursor.h
179
cur->dma_seg_size = PAGE_SIZE << addr->order;
drivers/gpu/drm/xe/xe_res_cursor.h
189
cur->dma_seg_size += PAGE_SIZE << addr->order;
drivers/gpu/drm/xe/xe_res_cursor.h
237
XE_WARN_ON(!IS_ALIGNED(start, PAGE_SIZE) ||
drivers/gpu/drm/xe/xe_res_cursor.h
238
!IS_ALIGNED(size, PAGE_SIZE));
drivers/gpu/drm/xe/xe_res_cursor.h
243
cur->dma_seg_size = PAGE_SIZE << dma_addr->order;
drivers/gpu/drm/xe/xe_svm.c
1669
PAGE_SIZE << order, dir,
drivers/gpu/drm/xe/xe_svm.c
1684
dma_unmap_resource(dev, addr->addr, PAGE_SIZE << addr->order,
drivers/gpu/drm/xe/xe_svm.c
566
chunk = (i - pos) == (XE_MIGRATE_CHUNK_SIZE / PAGE_SIZE);
drivers/gpu/drm/xe/xe_svm.c
592
match = vram_addr + PAGE_SIZE * (i - pos) == __vram_addr;
drivers/gpu/drm/xe/xe_svm.c
598
chunk = (i - pos) == (XE_MIGRATE_CHUNK_SIZE / PAGE_SIZE);
drivers/gpu/drm/xe/xe_svm.c
617
(PAGE_SIZE / SZ_1K));
drivers/gpu/drm/xe/xe_svm.c
659
(PAGE_SIZE / SZ_1K));
drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c
248
io_size, PAGE_SIZE);
drivers/gpu/drm/xe/xe_ttm_vram_mgr.c
354
PAGE_SIZE);
drivers/gpu/drm/xen/xen_drm_front.c
182
buf_cfg.num_pages = DIV_ROUND_UP(size, PAGE_SIZE);
drivers/gpu/drm/xen/xen_drm_front.c
763
if (XEN_PAGE_SIZE != PAGE_SIZE) {
drivers/gpu/drm/xen/xen_drm_front.c
765
XEN_PAGE_SIZE, PAGE_SIZE);
drivers/gpu/drm/xen/xen_drm_front_gem.c
141
size = round_up(size, PAGE_SIZE);
drivers/gpu/drm/xen/xen_drm_front_gem.c
175
xen_obj->num_pages = DIV_ROUND_UP(size, PAGE_SIZE);
drivers/gpu/drm/xen/xen_drm_front_gem.c
49
xen_obj->num_pages = DIV_ROUND_UP(buf_size, PAGE_SIZE);
drivers/gpu/ipu-v3/ipu-common.c
1346
ipu_base + devtype->cm_ofs, PAGE_SIZE);
drivers/gpu/ipu-v3/ipu-common.c
1349
PAGE_SIZE);
drivers/gpu/ipu-v3/ipu-csi.c
696
csi->base = devm_ioremap(dev, base, PAGE_SIZE);
drivers/gpu/ipu-v3/ipu-dc.c
360
priv->dc_reg = devm_ioremap(dev, base, PAGE_SIZE);
drivers/gpu/ipu-v3/ipu-dc.c
361
priv->dc_tmpl_reg = devm_ioremap(dev, template_base, PAGE_SIZE);
drivers/gpu/ipu-v3/ipu-di.c
733
di->base = devm_ioremap(dev, base, PAGE_SIZE);
drivers/gpu/ipu-v3/ipu-dmfc.c
182
priv->base = devm_ioremap(dev, base, PAGE_SIZE);
drivers/gpu/ipu-v3/ipu-dp.c
357
priv->base = devm_ioremap(dev, base, PAGE_SIZE);
drivers/gpu/ipu-v3/ipu-ic.c
637
priv->base = devm_ioremap(dev, base, PAGE_SIZE);
drivers/gpu/ipu-v3/ipu-smfc.c
186
priv->base = devm_ioremap(dev, base, PAGE_SIZE);
drivers/gpu/ipu-v3/ipu-vdi.c
211
vdi->base = devm_ioremap(dev, base, PAGE_SIZE);
drivers/greybus/control.c
407
return scnprintf(buf, PAGE_SIZE, "%s\n", control->vendor_string);
drivers/greybus/control.c
416
return scnprintf(buf, PAGE_SIZE, "%s\n", control->product_string);
drivers/greybus/interface.c
445
return scnprintf(buf, PAGE_SIZE, type"\n", intf->field); \
drivers/greybus/interface.c
519
return scnprintf(buf, PAGE_SIZE, "on\n");
drivers/greybus/interface.c
521
return scnprintf(buf, PAGE_SIZE, "off\n");
drivers/hid/hid-cp2112.c
1019
PAGE_SIZE - 1);
drivers/hid/hid-ft260.c
829
return scnprintf(buf, PAGE_SIZE, "%d\n", *field);
drivers/hid/hid-ft260.c
841
return scnprintf(buf, PAGE_SIZE, "%d\n", le16_to_cpu(*field));
drivers/hid/hid-gt683r.c
93
return scnprintf(buf, PAGE_SIZE, "%u\n", sysfs_mode);
drivers/hid/hid-lg4ff.c
829
if (count >= PAGE_SIZE - 1)
drivers/hid/hid-lg4ff.c
839
if (count >= PAGE_SIZE - 1)
drivers/hid/hid-logitech-hidpp.c
2766
return scnprintf(buf, PAGE_SIZE, "%u\n", data->range);
drivers/hid/hid-picolcd_fb.c
429
buf[min(ret, (size_t)PAGE_SIZE)-1] = '\n';
drivers/hid/hid-sensor-custom.c
321
PAGE_SIZE - len,
drivers/hid/hid-sensor-custom.c
343
len += scnprintf(&buf[len], PAGE_SIZE - len,
drivers/hid/hid-sensor-custom.c
346
len += scnprintf(&buf[len], PAGE_SIZE - len, "\n");
drivers/hid/hid-wiimote-core.c
1697
return strnlen(buf, PAGE_SIZE);
drivers/hid/hid-wiimote-modules.c
1856
return strnlen(buf, PAGE_SIZE);
drivers/hid/intel-ish-hid/ishtp-fw-loader.c
96
static int dma_buf_size_limit = 4 * PAGE_SIZE;
drivers/hid/intel-ish-hid/ishtp/bus.c
361
len = snprintf(buf, PAGE_SIZE, ISHTP_MODULE_PREFIX "%s\n", dev_name(dev));
drivers/hid/intel-ish-hid/ishtp/bus.c
362
return (len >= PAGE_SIZE) ? (PAGE_SIZE - 1) : len;
drivers/hid/wacom_sys.c
1079
return scnprintf(buf, PAGE_SIZE, "%d\n", \
drivers/hid/wacom_sys.c
1134
return scnprintf(buf, PAGE_SIZE, "%d\n", wacom->led.field); \
drivers/hid/wacom_sys.c
1304
int fifo_size = min(PAGE_SIZE, 10 * wacom_wac->features.pktlen);
drivers/hsi/clients/cmt_speech.c
29
#define CS_MMAP_SIZE PAGE_SIZE
drivers/hv/channel.c
108
delta = PAGE_SIZE - HV_HYP_PAGE_SIZE;
drivers/hv/channel.c
110
delta = 2 * (PAGE_SIZE - HV_HYP_PAGE_SIZE);
drivers/hv/channel.c
174
if (send_size % PAGE_SIZE || recv_size % PAGE_SIZE)
drivers/hv/channel.c
46
BUG_ON(size % PAGE_SIZE);
drivers/hv/channel.c
55
return size - 2 * (PAGE_SIZE - HV_HYP_PAGE_SIZE);
drivers/hv/channel.c
81
return (offset - (PAGE_SIZE - HV_HYP_PAGE_SIZE)) >> HV_HYP_PAGE_SHIFT;
drivers/hv/connection.c
219
BUILD_BUG_ON(PAGE_SIZE < HV_HYP_PAGE_SIZE);
drivers/hv/hv.c
131
memset(*page, 0, PAGE_SIZE);
drivers/hv/hv_balloon.c
1692
if (PAGE_SIZE != HV_HYP_PAGE_SIZE) {
drivers/hv/hv_balloon.c
1955
ha_pages_in_chunk = memory_block_size_bytes() / PAGE_SIZE;
drivers/hv/hv_balloon.c
1964
ha_pages_in_chunk = SZ_128M / PAGE_SIZE;
drivers/hv/hv_balloon.c
499
#define PAGES_IN_2M (2 * 1024 * 1024 / PAGE_SIZE)
drivers/hv/mshv_root_main.c
215
output_pg = (char *)input_pg + PAGE_SIZE;
drivers/hv/mshv_root_main.c
720
if (check_mul_overflow(check, PAGE_SIZE, &check))
drivers/hv/mshv_root_main.c
729
unsigned long user_addr = (user_pfn + completed) * PAGE_SIZE;
drivers/hv/mshv_synic.c
486
PAGE_SIZE, MEMREMAP_WB);
drivers/hv/mshv_synic.c
497
PAGE_SIZE, MEMREMAP_WB);
drivers/hv/ring_buffer.c
193
BUILD_BUG_ON((sizeof(struct hv_ring_buffer) != PAGE_SIZE));
drivers/hwtracing/coresight/coresight-catu.c
90
#define CATU_PAGES_PER_SYSPAGE (PAGE_SIZE / CATU_PAGE_SIZE)
drivers/hwtracing/coresight/coresight-etb10.c
417
buf->cur = head / PAGE_SIZE;
drivers/hwtracing/coresight/coresight-etb10.c
420
buf->offset = head % PAGE_SIZE;
drivers/hwtracing/coresight/coresight-etb10.c
546
if (offset >= PAGE_SIZE) {
drivers/hwtracing/coresight/coresight-etm-perf.c
857
return scnprintf(buf, PAGE_SIZE, "0x%px\n", ea->var);
drivers/hwtracing/coresight/coresight-etm-perf.c
949
return scnprintf(buf, PAGE_SIZE, "configid=0x%px\n", ea->var);
drivers/hwtracing/coresight/coresight-etm3x-sysfs.c
1185
return scnprintf(buf, PAGE_SIZE, "%d\n", val);
drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
103
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
1053
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
1108
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
115
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
1156
len = scnprintf(buf, PAGE_SIZE, "%s\n", val == ETM_CTX_NONE ? "none" :
drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
1223
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
1266
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
127
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
1329
size = scnprintf(buf, PAGE_SIZE, "addr_cmp[%i] %s %#lx", idx,
drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
1332
size += scnprintf(buf + size, PAGE_SIZE - size,
drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
1336
size += scnprintf(buf + size, PAGE_SIZE - size,
drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
1339
size = scnprintf(buf, PAGE_SIZE, "addr_cmp[%i] unused\n", idx);
drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
1356
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
1387
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
139
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
1423
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
1457
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
1490
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
151
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
1520
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
1560
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
1598
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
163
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
1636
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
1668
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
1712
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
1750
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
1784
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
1819
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
1834
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
1868
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
1915
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
1976
return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
2094
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
2139
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
2191
return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
2307
return scnprintf(buf, PAGE_SIZE, "%d\n", val);
drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
2465
return scnprintf(buf, PAGE_SIZE, "0x%x\n", val);
drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
291
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
457
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
492
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
541
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
597
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
627
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
657
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
67
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
690
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
730
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
762
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
79
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
797
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
831
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
870
len = scnprintf(buf, PAGE_SIZE, "%s\n",
drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
91
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
922
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
978
return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
drivers/hwtracing/coresight/coresight-stm.c
473
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
drivers/hwtracing/coresight/coresight-stm.c
502
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
drivers/hwtracing/coresight/coresight-stm.c
537
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
drivers/hwtracing/coresight/coresight-stm.c
584
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
drivers/hwtracing/coresight/coresight-syscfg-configfs.c
124
return scnprintf(page, PAGE_SIZE, "%d\n", fs_config->preset);
drivers/hwtracing/coresight/coresight-syscfg-configfs.c
236
return scnprintf(page, PAGE_SIZE, "%s", fs_feat->feat_desc->description);
drivers/hwtracing/coresight/coresight-syscfg-configfs.c
248
used = scnprintf(page, PAGE_SIZE, "SRC_ALL ");
drivers/hwtracing/coresight/coresight-syscfg-configfs.c
251
used += scnprintf(page + used, PAGE_SIZE - used, "SRC_ETMV4 ");
drivers/hwtracing/coresight/coresight-syscfg-configfs.c
253
used += scnprintf(page + used, PAGE_SIZE - used, "\n");
drivers/hwtracing/coresight/coresight-syscfg-configfs.c
263
return scnprintf(page, PAGE_SIZE, "%d\n", fs_feat->feat_desc->nr_params);
drivers/hwtracing/coresight/coresight-syscfg-configfs.c
286
return scnprintf(page, PAGE_SIZE, "0x%llx\n", value);
drivers/hwtracing/coresight/coresight-syscfg-configfs.c
32
return scnprintf(page, PAGE_SIZE, "%s", fs_config->config_desc->description);
drivers/hwtracing/coresight/coresight-syscfg-configfs.c
45
ch_used += scnprintf(page + ch_used, PAGE_SIZE - ch_used,
drivers/hwtracing/coresight/coresight-syscfg-configfs.c
78
used += scnprintf(page + used, PAGE_SIZE - used,
drivers/hwtracing/coresight/coresight-syscfg-configfs.c
85
used += scnprintf(page + used, PAGE_SIZE - used, "\n");
drivers/hwtracing/coresight/coresight-syscfg-configfs.c
96
return scnprintf(page, PAGE_SIZE, "%d\n", fs_config->active);
drivers/hwtracing/coresight/coresight-sysfs.c
319
return scnprintf(buf, PAGE_SIZE, "%u\n", csdev->sysfs_sink_activated);
drivers/hwtracing/coresight/coresight-sysfs.c
347
return scnprintf(buf, PAGE_SIZE, "%u\n",
drivers/hwtracing/coresight/coresight-tmc-core.c
528
if (val & (PAGE_SIZE - 1))
drivers/hwtracing/coresight/coresight-tmc-etf.c
464
buf->cur = head / PAGE_SIZE;
drivers/hwtracing/coresight/coresight-tmc-etf.c
467
buf->offset = head % PAGE_SIZE;
drivers/hwtracing/coresight/coresight-tmc-etf.c
570
if (offset >= PAGE_SIZE) {
drivers/hwtracing/coresight/coresight-tmc-etr.c
156
if (addr >= page_start && addr < (page_start + PAGE_SIZE))
drivers/hwtracing/coresight/coresight-tmc-etr.c
157
return i * PAGE_SIZE + (addr - page_start);
drivers/hwtracing/coresight/coresight-tmc-etr.c
1591
pg_offset = head & (PAGE_SIZE - 1);
drivers/hwtracing/coresight/coresight-tmc-etr.c
1609
bytes = min(bytes, (long)(PAGE_SIZE - pg_offset));
drivers/hwtracing/coresight/coresight-tmc-etr.c
1617
if (pg_offset == PAGE_SIZE) {
drivers/hwtracing/coresight/coresight-tmc-etr.c
177
PAGE_SIZE, dir);
drivers/hwtracing/coresight/coresight-tmc-etr.c
228
paddr = dma_map_page(real_dev, page, 0, PAGE_SIZE, dir);
drivers/hwtracing/coresight/coresight-tmc-etr.c
361
int npages = DIV_ROUND_UP(size, PAGE_SIZE);
drivers/hwtracing/coresight/coresight-tmc-etr.c
369
PAGE_SIZE, DMA_FROM_DEVICE);
drivers/hwtracing/coresight/coresight-tmc-etr.c
383
PAGE_SIZE, DMA_TO_DEVICE);
drivers/hwtracing/coresight/coresight-tmc-etr.c
400
int pg_offset = offset & (PAGE_SIZE - 1);
drivers/hwtracing/coresight/coresight-tmc-etr.c
410
len = (len < (PAGE_SIZE - pg_offset)) ? len : (PAGE_SIZE - pg_offset);
drivers/hwtracing/coresight/coresight-tmc-etr.c
88
#define ETR_SG_PAGES_PER_SYSPAGE (PAGE_SIZE / ETR_SG_PAGE_SIZE)
drivers/hwtracing/coresight/coresight-tmc-etr.c
90
#define ETR_SG_PTRS_PER_SYSPAGE (PAGE_SIZE / sizeof(sgte_t))
drivers/hwtracing/coresight/coresight-trbe.c
1356
cpudata->trbe_align = PAGE_SIZE;
drivers/hwtracing/coresight/coresight-trbe.c
371
return buf->nr_pages * PAGE_SIZE;
drivers/hwtracing/coresight/coresight-trbe.c
389
size += PAGE_SIZE;
drivers/hwtracing/coresight/coresight-trbe.c
407
const u64 bufsize = buf->nr_pages * PAGE_SIZE;
drivers/hwtracing/coresight/coresight-trbe.c
498
limit = round_down(tail, PAGE_SIZE);
drivers/hwtracing/coresight/coresight-trbe.c
515
limit = min(limit, round_up(wakeup, PAGE_SIZE));
drivers/hwtracing/coresight/coresight-trbe.c
606
WARN_ON(!IS_ALIGNED(addr, PAGE_SIZE));
drivers/hwtracing/coresight/coresight-trbe.c
769
buf->trbe_limit = buf->trbe_base + nr_pages * PAGE_SIZE;
drivers/hwtracing/coresight/coresight-trbe.c
950
if (WARN_ON(!IS_ALIGNED(buf->trbe_write, PAGE_SIZE)))
drivers/hwtracing/coresight/coresight-trbe.c
979
if (WARN_ON(space <= PAGE_SIZE ||
drivers/hwtracing/coresight/coresight-trbe.c
980
!IS_ALIGNED(buf->trbe_limit, PAGE_SIZE)))
drivers/hwtracing/coresight/coresight-trbe.c
982
buf->trbe_limit -= PAGE_SIZE;
drivers/hwtracing/coresight/coresight-trbe.h
120
WARN_ON(!IS_ALIGNED(addr, PAGE_SIZE));
drivers/hwtracing/coresight/coresight-trbe.h
129
WARN_ON(!IS_ALIGNED(addr, PAGE_SIZE));
drivers/hwtracing/coresight/coresight-trbe.h
137
WARN_ON(!IS_ALIGNED(addr, PAGE_SIZE));
drivers/hwtracing/coresight/ultrasoc-smb.c
343
pg_offset = head & (PAGE_SIZE - 1);
drivers/hwtracing/coresight/ultrasoc-smb.c
346
unsigned long pg_space = PAGE_SIZE - pg_offset;
drivers/hwtracing/coresight/ultrasoc-smb.c
358
if (pg_offset >= PAGE_SIZE) {
drivers/hwtracing/intel_th/core.c
211
return scnprintf(buf, PAGE_SIZE, "%u\n", thdev->output.port);
drivers/hwtracing/intel_th/core.c
213
return scnprintf(buf, PAGE_SIZE, "unassigned\n");
drivers/hwtracing/intel_th/core.c
302
return scnprintf(buf, PAGE_SIZE, "%d\n", thdev->output.active);
drivers/hwtracing/intel_th/msu-sink.c
62
nents = DIV_ROUND_UP(size, PAGE_SIZE);
drivers/hwtracing/intel_th/msu-sink.c
72
PAGE_SIZE, &sg_dma_address(sg_ptr),
drivers/hwtracing/intel_th/msu-sink.c
77
sg_set_buf(sg_ptr, block, PAGE_SIZE);
drivers/hwtracing/intel_th/msu-sink.c
91
dma_free_coherent(priv->dev->parent->parent, PAGE_SIZE,
drivers/hwtracing/intel_th/msu.c
1014
PAGE_SIZE, &sg_dma_address(sg_ptr),
drivers/hwtracing/intel_th/msu.c
1019
sg_set_buf(sg_ptr, block, PAGE_SIZE);
drivers/hwtracing/intel_th/msu.c
1026
dma_free_coherent(msc_dev(win->msc)->parent->parent, PAGE_SIZE,
drivers/hwtracing/intel_th/msu.c
1160
dma_free_coherent(msc_dev(win->msc)->parent->parent, PAGE_SIZE,
drivers/hwtracing/intel_th/msu.c
1822
return scnprintf(buf, PAGE_SIZE, "%d\n", msc->wrap);
drivers/hwtracing/intel_th/msu.c
1867
ret = scnprintf(buf, PAGE_SIZE, "%s\n", mode);
drivers/hwtracing/intel_th/msu.c
1968
count = scnprintf(buf, PAGE_SIZE, "%ld\n", msc->nr_pages);
drivers/hwtracing/intel_th/msu.c
1971
count += scnprintf(buf + count, PAGE_SIZE - count,
drivers/hwtracing/intel_th/msu.c
1976
count = scnprintf(buf, PAGE_SIZE, "unsupported\n");
drivers/hwtracing/intel_th/msu.c
976
for (off = 0; off < msc->nr_pages << PAGE_SHIFT; off += PAGE_SIZE) {
drivers/hwtracing/intel_th/msu.h
68
#define DATA_IN_PAGE (PAGE_SIZE - MSC_BDESC)
drivers/hwtracing/intel_th/pti.c
113
return scnprintf(buf, PAGE_SIZE, "%d\n", 1u << pti->clkdiv);
drivers/hwtracing/intel_th/pti.c
261
ret += scnprintf(buf + ret, PAGE_SIZE - ret,
drivers/hwtracing/intel_th/pti.c
54
return scnprintf(buf, PAGE_SIZE, "%d\n", pti_mode[pti->mode]);
drivers/hwtracing/intel_th/pti.c
85
return scnprintf(buf, PAGE_SIZE, "%d\n", pti->freeclk);
drivers/hwtracing/ptt/hisi_ptt.c
1043
if (nr_pages < HISI_PTT_TRACE_TOTAL_BUF_SIZE / PAGE_SIZE)
drivers/hwtracing/ptt/hisi_ptt.c
1064
buf->length = nr_pages * PAGE_SIZE;
drivers/hwtracing/stm/core.c
626
if (count + 1 > PAGE_SIZE)
drivers/hwtracing/stm/core.c
627
count = PAGE_SIZE - 1;
drivers/hwtracing/stm/core.c
759
wlimit = PAGE_SIZE / stm->data->sw_mmiosz;
drivers/i2c/i2c-core-base.c
679
len = of_device_modalias(dev, buf, PAGE_SIZE);
drivers/i2c/i2c-core-base.c
683
len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
drivers/i2c/muxes/i2c-demux-pinctrl.c
169
for (i = 0; i < priv->num_chan && count < PAGE_SIZE; i++)
drivers/iio/accel/adxl372.c
970
len += scnprintf(buf + len, PAGE_SIZE - len,
drivers/iio/accel/bma180.c
483
len += scnprintf(buf + len, PAGE_SIZE - len,
drivers/iio/accel/mma8452.c
267
len += scnprintf(buf + len, PAGE_SIZE - len, "%d.%06d ",
drivers/iio/accel/mma8452.c
424
len += scnprintf(buf + len, PAGE_SIZE - len, "%d ", val);
drivers/iio/adc/ad7606.c
820
len += scnprintf(buf + len, PAGE_SIZE - len, "%u.%06u ",
drivers/iio/adc/ad7606.c
993
len += scnprintf(buf + len, PAGE_SIZE - len, "%u ", vals[i]);
drivers/iio/adc/at91-sama5d2_adc.c
2018
sample_size * 2, PAGE_SIZE);
drivers/iio/adc/at91-sama5d2_adc.c
2031
pages * PAGE_SIZE,
drivers/iio/adc/at91-sama5d2_adc.c
2058
dma_free_coherent(st->dma_st.dma_chan->device->dev, pages * PAGE_SIZE,
drivers/iio/adc/at91-sama5d2_adc.c
2073
sample_size * 2, PAGE_SIZE);
drivers/iio/adc/at91-sama5d2_adc.c
2082
dma_free_coherent(st->dma_st.dma_chan->device->dev, pages * PAGE_SIZE,
drivers/iio/adc/nau7802.c
89
len += scnprintf(buf + len, PAGE_SIZE - len, "0.%09d ",
drivers/iio/adc/pac1921.c
843
if (len >= PAGE_SIZE)
drivers/iio/adc/pac1921.c
848
if (len >= PAGE_SIZE)
drivers/iio/adc/stm32-adc.c
49
#define STM32_DMA_BUFFER_SIZE PAGE_SIZE
drivers/iio/adc/stm32-dfsdm-adc.c
32
#define DFSDM_DMA_BUFFER_SIZE (4 * PAGE_SIZE)
drivers/iio/adc/stm32-dfsdm-adc.c
749
return snprintf(buf, PAGE_SIZE, "%d\n", adc->spi_freq);
drivers/iio/adc/ti-tsc2046.c
290
if (sizeof(struct tsc2046_adc_atom) * max_count > PAGE_SIZE)
drivers/iio/adc/ti-tsc2046.c
679
if (size > PAGE_SIZE) {
drivers/iio/adc/vf610_adc.c
611
len += scnprintf(buf + len, PAGE_SIZE - len,
drivers/iio/buffer/industrialio-buffer-dma.c
831
queue->buffer.length = PAGE_SIZE;
drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
468
return snprintf(buf, PAGE_SIZE, "%d\n", st->param.info.sensor_num);
drivers/iio/common/st_sensors/st_sensors_core.c
627
len += scnprintf(buf + len, PAGE_SIZE - len, "%d ",
drivers/iio/common/st_sensors/st_sensors_core.c
650
len += scnprintf(buf + len, PAGE_SIZE - len, "%u.%06u ", q, r);
drivers/iio/dac/ad3552r-hs.c
594
len += scnprintf(buf + len, PAGE_SIZE - len, "%s ",
drivers/iio/health/afe440x.h
125
len += scnprintf(buf + len, PAGE_SIZE - len, "%d.%06u ", \
drivers/iio/humidity/hts221_core.c
203
len += scnprintf(buf + len, PAGE_SIZE - len, "%d ",
drivers/iio/humidity/hts221_core.c
220
len += scnprintf(buf + len, PAGE_SIZE - len, "%d ",
drivers/iio/humidity/hts221_core.c
237
len += scnprintf(buf + len, PAGE_SIZE - len, "%d ",
drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c
683
len += scnprintf(buf + len, PAGE_SIZE - len, "%d.%03d ",
drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c
701
len += scnprintf(buf + len, PAGE_SIZE - len, "0.%06u ",
drivers/iio/industrialio-core.c
735
if (len >= PAGE_SIZE - 1)
drivers/iio/industrialio-core.c
812
if (len >= PAGE_SIZE)
drivers/iio/industrialio-core.c
817
if (len >= PAGE_SIZE)
drivers/iio/light/bh1750.c
202
len += scnprintf(buf + len, PAGE_SIZE - len, "0.%06d ",
drivers/iio/light/cm3232.c
291
len += scnprintf(buf + len, PAGE_SIZE - len, "%u.%06u ",
drivers/iio/light/cm3232.c
294
return len + scnprintf(buf + len, PAGE_SIZE - len, "\n");
drivers/iio/light/ltr501.c
1113
len += scnprintf(buf + len, PAGE_SIZE - len, "%d.%06d ",
drivers/iio/light/ltr501.c
1135
len += scnprintf(buf + len, PAGE_SIZE - len, "%d.%06d ",
drivers/iio/light/lv0104cs.c
400
len += scnprintf(buf + len, PAGE_SIZE - len, "%d.%06d ",
drivers/iio/light/lv0104cs.c
417
len += scnprintf(buf + len, PAGE_SIZE - len, "%d.%06d ",
drivers/iio/light/lv0104cs.c
434
len += scnprintf(buf + len, PAGE_SIZE - len, "%d.%06d ",
drivers/iio/light/tcs3472.c
411
len += scnprintf(buf + len, PAGE_SIZE - len, "0.%06d ",
drivers/iio/light/tsl2772.c
931
return scnprintf(buf, PAGE_SIZE, "%d\n", chip->settings.als_cal_target);
drivers/iio/light/tsl2772.c
985
offset += scnprintf(buf + offset, PAGE_SIZE - offset, "%u,%u,",
drivers/iio/light/tsl2772.c
999
offset += scnprintf(buf + offset, PAGE_SIZE - offset, "\n");
drivers/iio/light/zopt2201.c
423
len += scnprintf(buf + len, PAGE_SIZE - len, "0.%06lu ",
drivers/iio/light/zopt2201.c
440
len += scnprintf(buf + len, PAGE_SIZE - len, "%d.%06u ",
drivers/iio/light/zopt2201.c
456
len += scnprintf(buf + len, PAGE_SIZE - len, "%d.%06u ",
drivers/iio/magnetometer/bmc150_magn.c
592
len += scnprintf(buf + len, PAGE_SIZE - len, "%d ",
drivers/iio/magnetometer/hmc5843_core.c
277
len += scnprintf(buf + len, PAGE_SIZE - len,
drivers/iio/magnetometer/hmc5843_core.c
338
len += scnprintf(buf + len, PAGE_SIZE - len,
drivers/iio/magnetometer/mag3110.c
120
len += scnprintf(buf + len, PAGE_SIZE - len,
drivers/iio/multiplexer/iio-mux.c
208
if (len >= PAGE_SIZE)
drivers/iio/multiplexer/iio-mux.c
249
char *page __free(kfree) = kzalloc(PAGE_SIZE, GFP_KERNEL);
drivers/iio/multiplexer/iio-mux.c
276
if (ret >= PAGE_SIZE) {
drivers/iio/test/iio-test-format.c
112
buf = kunit_kmalloc(test, PAGE_SIZE, GFP_KERNEL);
drivers/iio/test/iio-test-format.c
152
buf = kunit_kmalloc(test, PAGE_SIZE, GFP_KERNEL);
drivers/iio/test/iio-test-format.c
192
buf = kunit_kmalloc(test, PAGE_SIZE, GFP_KERNEL);
drivers/iio/test/iio-test-format.c
207
buf = kunit_kmalloc(test, PAGE_SIZE, GFP_KERNEL);
drivers/iio/test/iio-test-format.c
21
buf = kunit_kmalloc(test, PAGE_SIZE, GFP_KERNEL);
drivers/iio/test/iio-test-format.c
51
buf = kunit_kmalloc(test, PAGE_SIZE, GFP_KERNEL);
drivers/iio/test/iio-test-rescale.c
649
char *buff = kunit_kmalloc(test, PAGE_SIZE, GFP_KERNEL);
drivers/iio/test/iio-test-rescale.c
681
char *buff_off = kunit_kmalloc(test, PAGE_SIZE, GFP_KERNEL);
drivers/iio/trigger/stm32-timer-trigger.c
361
len += scnprintf(buf + len, PAGE_SIZE - len,
drivers/iio/trigger/stm32-timer-trigger.c
681
return snprintf(buf, PAGE_SIZE, "%u\n", arr);
drivers/infiniband/core/ib_core_uverbs.c
175
if (entry->npages * PAGE_SIZE != vma->vm_end - vma->vm_start) {
drivers/infiniband/core/ib_core_uverbs.c
314
npages = (u32)DIV_ROUND_UP(length, PAGE_SIZE);
drivers/infiniband/core/rw.c
107
ret = ib_map_mr_sg(reg->mr, sg, nents, &offset, PAGE_SIZE);
drivers/infiniband/core/umem.c
238
PAGE_SIZE /
drivers/infiniband/core/umem.c
246
cur_base += pinned * PAGE_SIZE;
drivers/infiniband/core/umem.c
63
DIV_ROUND_UP(sg->length, PAGE_SIZE), make_dirty);
drivers/infiniband/core/umem_dmabuf.c
39
start = ALIGN_DOWN(umem_dmabuf->umem.address, PAGE_SIZE);
drivers/infiniband/core/umem_dmabuf.c
41
PAGE_SIZE);
drivers/infiniband/core/umem_odp.c
92
if (!(nr_entries * PAGE_SIZE / page_size))
drivers/infiniband/core/uverbs_ioctl.c
639
if (hdr.length > PAGE_SIZE ||
drivers/infiniband/core/uverbs_ioctl.c
93
WARN_ON_ONCE(method_elm->bundle_size > PAGE_SIZE);
drivers/infiniband/hw/bng_re/bng_fw.c
74
sginfo.pgsize = PAGE_SIZE;
drivers/infiniband/hw/bng_re/bng_fw.h
51
npages = BNG_FW_CMDQE_BYTES(depth) / PAGE_SIZE;
drivers/infiniband/hw/bng_re/bng_fw.h
52
if (BNG_FW_CMDQE_BYTES(depth) % PAGE_SIZE)
drivers/infiniband/hw/bng_re/bng_fw.h
59
return (bng_fw_cmdqe_npages(depth) * PAGE_SIZE);
drivers/infiniband/hw/bng_re/bng_res.c
182
sginfo.pgsize = PAGE_SIZE;
drivers/infiniband/hw/bng_re/bng_res.c
227
sginfo.pgsize = PAGE_SIZE;
drivers/infiniband/hw/bng_re/bng_res.h
11
#define PTR_CNT_PER_PG (PAGE_SIZE / sizeof(void *))
drivers/infiniband/hw/bnxt_re/ib_verbs.c
1156
qplib_qp->sq.sg_info.pgsize = PAGE_SIZE;
drivers/infiniband/hw/bnxt_re/ib_verbs.c
1169
qplib_qp->rq.sg_info.pgsize = PAGE_SIZE;
drivers/infiniband/hw/bnxt_re/ib_verbs.c
1263
qp->qplib_qp.sq.sg_info.pgsize = PAGE_SIZE;
drivers/infiniband/hw/bnxt_re/ib_verbs.c
1275
qp->qplib_qp.rq.sg_info.pgsize = PAGE_SIZE;
drivers/infiniband/hw/bnxt_re/ib_verbs.c
1335
rq->sg_info.pgsize = PAGE_SIZE;
drivers/infiniband/hw/bnxt_re/ib_verbs.c
1412
qplqp->sq.sg_info.pgsize = PAGE_SIZE;
drivers/infiniband/hw/bnxt_re/ib_verbs.c
1879
qplib_srq->sg_info.pgsize = PAGE_SIZE;
drivers/infiniband/hw/bnxt_re/ib_verbs.c
1935
srq->qplib_srq.sg_info.pgsize = PAGE_SIZE;
drivers/infiniband/hw/bnxt_re/ib_verbs.c
2781
wqe->frmr.pbl_pg_sz_log = ilog2(PAGE_SIZE >> PAGE_SHIFT_4K);
drivers/infiniband/hw/bnxt_re/ib_verbs.c
3176
cq->qplib_cq.sg_info.pgsize = PAGE_SIZE;
drivers/infiniband/hw/bnxt_re/ib_verbs.c
3332
cq->qplib_cq.sg_info.pgsize = PAGE_SIZE;
drivers/infiniband/hw/bnxt_re/ib_verbs.c
4051
PAGE_SIZE, false);
drivers/infiniband/hw/bnxt_re/ib_verbs.c
4395
resp.pg_size = PAGE_SIZE;
drivers/infiniband/hw/bnxt_re/ib_verbs.c
4604
ret = rdma_user_mmap_io(ib_uctx, vma, pfn, PAGE_SIZE,
drivers/infiniband/hw/bnxt_re/ib_verbs.c
4610
ret = rdma_user_mmap_io(ib_uctx, vma, pfn, PAGE_SIZE,
drivers/infiniband/hw/bnxt_re/ib_verbs.c
4619
ret = rdma_user_mmap_io(ib_uctx, vma, pfn, PAGE_SIZE,
drivers/infiniband/hw/bnxt_re/ib_verbs.c
4727
length = PAGE_SIZE;
drivers/infiniband/hw/bnxt_re/ib_verbs.c
4737
length = PAGE_SIZE;
drivers/infiniband/hw/bnxt_re/ib_verbs.c
4743
length = PAGE_SIZE;
drivers/infiniband/hw/bnxt_re/ib_verbs.c
482
#define BNXT_RE_FENCE_PBL_SIZE DIV_ROUND_UP(BNXT_RE_FENCE_BYTES, PAGE_SIZE)
drivers/infiniband/hw/bnxt_re/ib_verbs.c
4848
u32 length = PAGE_SIZE;
drivers/infiniband/hw/bnxt_re/ib_verbs.c
621
BNXT_RE_FENCE_PBL_SIZE, PAGE_SIZE,
drivers/infiniband/hw/bnxt_re/ib_verbs.c
665
&entry->rdma_entry, PAGE_SIZE, 0);
drivers/infiniband/hw/bnxt_re/ib_verbs.c
673
&entry->rdma_entry, PAGE_SIZE);
drivers/infiniband/hw/bnxt_re/main.c
869
memset((u8 *)rdev->pacing.dbr_page, 0, PAGE_SIZE);
drivers/infiniband/hw/bnxt_re/qplib_fp.c
1098
BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE + PAGE_SIZE - 1;
drivers/infiniband/hw/bnxt_re/qplib_fp.c
1099
req_size &= ~(PAGE_SIZE - 1);
drivers/infiniband/hw/bnxt_re/qplib_fp.c
1120
BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE + PAGE_SIZE - 1;
drivers/infiniband/hw/bnxt_re/qplib_fp.c
1121
req_size &= ~(PAGE_SIZE - 1);
drivers/infiniband/hw/bnxt_re/qplib_fp.c
1846
pg_num = (tail + hwq->pad_pgofft) / (PAGE_SIZE / hwq->pad_stride);
drivers/infiniband/hw/bnxt_re/qplib_fp.c
1847
pg_indx = (tail + hwq->pad_pgofft) % (PAGE_SIZE / hwq->pad_stride);
drivers/infiniband/hw/bnxt_re/qplib_fp.c
602
sginfo.pgsize = PAGE_SIZE;
drivers/infiniband/hw/bnxt_re/qplib_fp.c
950
if (!IS_ALIGNED(fpsne, PAGE_SIZE))
drivers/infiniband/hw/bnxt_re/qplib_fp.h
359
#define CQE_CNT_PER_PG (PAGE_SIZE / BNXT_QPLIB_MAX_CQE_ENTRY_SIZE)
drivers/infiniband/hw/bnxt_re/qplib_fp.h
489
#define NQE_CNT_PER_PG (PAGE_SIZE / BNXT_QPLIB_MAX_NQE_ENTRY_SIZE)
drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
944
sginfo.pgsize = PAGE_SIZE;
drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
82
npages = BNXT_QPLIB_CMDQE_BYTES(depth) / PAGE_SIZE;
drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
83
if (BNXT_QPLIB_CMDQE_BYTES(depth) % PAGE_SIZE)
drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
90
return (bnxt_qplib_cmdqe_npages(depth) * PAGE_SIZE);
drivers/infiniband/hw/bnxt_re/qplib_res.c
292
sginfo.pgsize = PAGE_SIZE;
drivers/infiniband/hw/bnxt_re/qplib_res.c
375
sginfo.pgsize = PAGE_SIZE;
drivers/infiniband/hw/bnxt_re/qplib_res.c
488
sginfo.pgsize = PAGE_SIZE;
drivers/infiniband/hw/bnxt_re/qplib_res.c
709
dpi->dpi = bit_num + (reg->offset - dpit->ucreg.offset) / PAGE_SIZE;
drivers/infiniband/hw/bnxt_re/qplib_res.c
711
umaddr = reg->bar_base + reg->offset + bit_num * PAGE_SIZE;
drivers/infiniband/hw/bnxt_re/qplib_res.c
718
dpit->ucreg.offset + bit_num * PAGE_SIZE;
drivers/infiniband/hw/bnxt_re/qplib_res.c
723
dpi->dbr = ioremap_wc(umaddr, PAGE_SIZE);
drivers/infiniband/hw/bnxt_re/qplib_res.c
726
dpi->dbr = ioremap(umaddr, PAGE_SIZE);
drivers/infiniband/hw/bnxt_re/qplib_res.c
789
dpit->max = (bar_len - reg->offset) / PAGE_SIZE;
drivers/infiniband/hw/bnxt_re/qplib_res.c
923
ucreg->len = ucreg->offset + PAGE_SIZE;
drivers/infiniband/hw/bnxt_re/qplib_res.c
924
if (!ucreg->len || ((ucreg->len & (PAGE_SIZE - 1)) != 0)) {
drivers/infiniband/hw/bnxt_re/qplib_res.h
101
#define PTR_CNT_PER_PG (PAGE_SIZE / sizeof(void *))
drivers/infiniband/hw/bnxt_re/qplib_sp.c
632
pg_size = PAGE_SIZE;
drivers/infiniband/hw/bnxt_re/qplib_sp.c
637
pg_size = buf_pg_size ? buf_pg_size : PAGE_SIZE;
drivers/infiniband/hw/bnxt_re/qplib_sp.c
642
req.log2_pbl_pg_size = cpu_to_le16(((ilog2(PAGE_SIZE) <<
drivers/infiniband/hw/bnxt_re/qplib_sp.c
689
sginfo.pgsize = PAGE_SIZE;
drivers/infiniband/hw/bnxt_re/qplib_sp.c
694
hwq_attr.stride = PAGE_SIZE;
drivers/infiniband/hw/cxgb4/cq.c
1073
memsize = roundup(memsize, PAGE_SIZE);
drivers/infiniband/hw/cxgb4/cq.c
1112
ucontext->key += PAGE_SIZE;
drivers/infiniband/hw/cxgb4/cq.c
1114
ucontext->key += PAGE_SIZE;
drivers/infiniband/hw/cxgb4/cq.c
1138
mm2->len = PAGE_SIZE;
drivers/infiniband/hw/cxgb4/device.c
812
if (rdev->lldi.sge_host_page_size > PAGE_SIZE) {
drivers/infiniband/hw/cxgb4/device.c
819
factor = PAGE_SIZE / rdev->lldi.sge_host_page_size;
drivers/infiniband/hw/cxgb4/mem.c
554
if (i == PAGE_SIZE / sizeof(*pages)) {
drivers/infiniband/hw/cxgb4/provider.c
101
uresp.status_page_size = PAGE_SIZE;
drivers/infiniband/hw/cxgb4/provider.c
105
context->key += PAGE_SIZE;
drivers/infiniband/hw/cxgb4/provider.c
115
mm->len = PAGE_SIZE;
drivers/infiniband/hw/cxgb4/provider.c
146
if (vma->vm_start & (PAGE_SIZE-1))
drivers/infiniband/hw/cxgb4/qp.c
2175
qhp->wq.sq.memsize = roundup(qhp->wq.sq.memsize, PAGE_SIZE);
drivers/infiniband/hw/cxgb4/qp.c
2178
roundup(qhp->wq.rq.memsize, PAGE_SIZE);
drivers/infiniband/hw/cxgb4/qp.c
2268
ucontext->key += PAGE_SIZE;
drivers/infiniband/hw/cxgb4/qp.c
2271
ucontext->key += PAGE_SIZE;
drivers/infiniband/hw/cxgb4/qp.c
2274
ucontext->key += PAGE_SIZE;
drivers/infiniband/hw/cxgb4/qp.c
2277
ucontext->key += PAGE_SIZE;
drivers/infiniband/hw/cxgb4/qp.c
2280
ucontext->key += PAGE_SIZE;
drivers/infiniband/hw/cxgb4/qp.c
2307
sq_db_key_mm->len = PAGE_SIZE;
drivers/infiniband/hw/cxgb4/qp.c
2315
rq_db_key_mm->len = PAGE_SIZE;
drivers/infiniband/hw/cxgb4/qp.c
2327
ma_sync_key_mm->len = PAGE_SIZE;
drivers/infiniband/hw/cxgb4/qp.c
2745
srq->wq.memsize = roundup(srq->wq.memsize, PAGE_SIZE);
drivers/infiniband/hw/cxgb4/qp.c
2776
ucontext->key += PAGE_SIZE;
drivers/infiniband/hw/cxgb4/qp.c
2778
ucontext->key += PAGE_SIZE;
drivers/infiniband/hw/cxgb4/qp.c
2792
srq_db_key_mm->len = PAGE_SIZE;
drivers/infiniband/hw/efa/efa_verbs.c
1120
PAGE_SIZE, EFA_MMAP_IO_NC,
drivers/infiniband/hw/efa/efa_verbs.c
1346
sg_set_page(&sglist[i], pg, PAGE_SIZE, 0);
drivers/infiniband/hw/efa/efa_verbs.c
1347
buf += PAGE_SIZE / sizeof(*buf);
drivers/infiniband/hw/efa/efa_verbs.c
1504
BUILD_BUG_ON(EFA_CHUNK_PAYLOAD_SIZE > PAGE_SIZE);
drivers/infiniband/hw/efa/efa_verbs.c
2045
entry->address, rdma_entry->npages * PAGE_SIZE,
drivers/infiniband/hw/efa/efa_verbs.c
2052
entry->rdma_entry.npages * PAGE_SIZE,
drivers/infiniband/hw/efa/efa_verbs.c
2058
entry->rdma_entry.npages * PAGE_SIZE,
drivers/infiniband/hw/efa/efa_verbs.c
2064
va += PAGE_SIZE, pfn++) {
drivers/infiniband/hw/efa/efa_verbs.c
2078
entry->address, rdma_entry->npages * PAGE_SIZE,
drivers/infiniband/hw/efa/efa_verbs.c
231
props->max_mr_size = dev_attr->max_mr_pages * PAGE_SIZE;
drivers/infiniband/hw/efa/efa_verbs.c
561
PAGE_SIZE, EFA_MMAP_IO_NC,
drivers/infiniband/hw/efa/efa_verbs.c
587
address, PAGE_SIZE,
drivers/infiniband/hw/erdma/erdma_verbs.c
1179
mr->mem.page_size = PAGE_SIZE; /* update it later. */
drivers/infiniband/hw/erdma/erdma_verbs.c
1433
err = rdma_user_mmap_io(ctx, vma, PFN_DOWN(entry->address), PAGE_SIZE,
drivers/infiniband/hw/erdma/erdma_verbs.c
1553
ctx, (void *)ctx->sdb, PAGE_SIZE, ERDMA_MMAP_IO_NC, &uresp.sdb);
drivers/infiniband/hw/erdma/erdma_verbs.c
1560
ctx, (void *)ctx->rdb, PAGE_SIZE, ERDMA_MMAP_IO_NC, &uresp.rdb);
drivers/infiniband/hw/erdma/erdma_verbs.c
1567
ctx, (void *)ctx->cdb, PAGE_SIZE, ERDMA_MMAP_IO_NC, &uresp.cdb);
drivers/infiniband/hw/erdma/erdma_verbs.c
634
dma_unmap_page(&dev->pdev->dev, pg_dma[i], PAGE_SIZE,
drivers/infiniband/hw/erdma/erdma_verbs.c
683
pg_dma[i] = dma_map_page(&dev->pdev->dev, pg, 0, PAGE_SIZE,
drivers/infiniband/hw/erdma/erdma_verbs.c
688
addr += PAGE_SIZE;
drivers/infiniband/hw/erdma/erdma_verbs.c
731
mtt->size = ALIGN(size, PAGE_SIZE);
drivers/infiniband/hw/erdma/erdma_verbs.c
900
dbrecords_va & PAGE_MASK, PAGE_SIZE, 0);
drivers/infiniband/hw/hfi1/debugfs.c
486
size = PAGE_SIZE;
drivers/infiniband/hw/hfi1/debugfs.c
685
tmp = kmalloc(PAGE_SIZE, GFP_KERNEL);
drivers/infiniband/hw/hfi1/debugfs.c
689
ret = qsfp_dump(ppd, tmp, PAGE_SIZE);
drivers/infiniband/hw/hfi1/driver.c
203
*encoded = ilog2(size / PAGE_SIZE) + 1;
drivers/infiniband/hw/hfi1/file_ops.c
1043
uctxt->subctxt_uregbase = vmalloc_user(PAGE_SIZE);
drivers/infiniband/hw/hfi1/file_ops.c
1268
(u64)dd->cr_base[uctxt->numa_id].va) % PAGE_SIZE;
drivers/infiniband/hw/hfi1/file_ops.c
389
memlen = PAGE_SIZE;
drivers/infiniband/hw/hfi1/file_ops.c
469
memlen = PAGE_SIZE;
drivers/infiniband/hw/hfi1/file_ops.c
481
memlen = PAGE_SIZE;
drivers/infiniband/hw/hfi1/file_ops.c
495
memlen = PAGE_SIZE;
drivers/infiniband/hw/hfi1/file_ops.c
511
memlen = PAGE_SIZE;
drivers/infiniband/hw/hfi1/file_ops.c
518
memlen = PAGE_SIZE;
drivers/infiniband/hw/hfi1/init.c
1096
dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
drivers/infiniband/hw/hfi1/init.c
1778
PAGE_SIZE,
drivers/infiniband/hw/hfi1/init.c
913
dd->status = vmalloc_user(PAGE_SIZE);
drivers/infiniband/hw/hfi1/ipoib_rx.c
57
if (size <= SKB_WITH_OVERHEAD(PAGE_SIZE))
drivers/infiniband/hw/hfi1/pin_system.c
207
u64 start = ALIGN_DOWN(req_start, PAGE_SIZE);
drivers/infiniband/hw/hfi1/pin_system.c
310
page_offset = start - ALIGN_DOWN(start, PAGE_SIZE);
drivers/infiniband/hw/hfi1/pin_system.c
311
from_this_page = PAGE_SIZE - page_offset;
drivers/infiniband/hw/hfi1/sdma.c
1043
return ret ? : strnlen(buf, PAGE_SIZE);
drivers/infiniband/hw/hfi1/sdma.c
1050
snprintf(buf, PAGE_SIZE, "%s\n", "empty");
drivers/infiniband/hw/hfi1/sdma.c
1054
return strnlen(buf, PAGE_SIZE);
drivers/infiniband/hw/hfi1/sysfs.c
60
static const BIN_ATTR_RO(cc_table_bin, PAGE_SIZE);
drivers/infiniband/hw/hfi1/sysfs.c
96
static const BIN_ATTR_RO(cc_setting_bin, PAGE_SIZE);
drivers/infiniband/hw/hfi1/tid_rdma.c
1039
if (v1 != (v0 + PAGE_SIZE)) {
drivers/infiniband/hw/hfi1/tid_rdma.c
1056
if (vm1 && v0 != (vm1 + PAGE_SIZE)) {
drivers/infiniband/hw/hfi1/tid_rdma.c
1094
u32 len = PAGE_SIZE;
drivers/infiniband/hw/hfi1/tid_rdma.c
1134
PAGE_SIZE * pset->count,
drivers/infiniband/hw/hfi1/tid_rdma.c
1153
PAGE_SIZE * pset->count,
drivers/infiniband/hw/hfi1/tid_rdma.c
1932
if (tidlen * PAGE_SIZE < len)
drivers/infiniband/hw/hfi1/tid_rdma.c
3094
tidlen = EXP_TID_GET(tidentry, LEN) * PAGE_SIZE;
drivers/infiniband/hw/hfi1/tid_rdma.c
3120
EXP_TID_GET(flow->tid_entry[tididx], LEN) * PAGE_SIZE) {
drivers/infiniband/hw/hfi1/tid_rdma.c
4150
if (tidlen * PAGE_SIZE < flow->length) {
drivers/infiniband/hw/hfi1/tid_rdma.c
65
#define MAX_EXPECTED_PAGES (MAX_EXPECTED_BUFFER / PAGE_SIZE)
drivers/infiniband/hw/hfi1/tid_rdma.c
907
if (this_vaddr != (vaddr + PAGE_SIZE)) {
drivers/infiniband/hw/hfi1/tid_rdma.c
922
u32 bufsize = pagecount * PAGE_SIZE;
drivers/infiniband/hw/hfi1/tid_rdma.c
946
vaddr += PAGE_SIZE;
drivers/infiniband/hw/hfi1/user_exp_rcv.c
142
node->npages * PAGE_SIZE, DMA_FROM_DEVICE);
drivers/infiniband/hw/hfi1/user_exp_rcv.c
278
tidbuf->vaddr, tidbuf->npages * PAGE_SIZE,
drivers/infiniband/hw/hfi1/user_exp_rcv.c
440
tinfo->length = mapped_pages * PAGE_SIZE;
drivers/infiniband/hw/hfi1/user_exp_rcv.c
600
u32 bufsize = pagecount * PAGE_SIZE;
drivers/infiniband/hw/hfi1/user_exp_rcv.c
741
npages * PAGE_SIZE, DMA_FROM_DEVICE);
drivers/infiniband/hw/hfi1/user_exp_rcv.c
762
tbuf->vaddr + (pageidx * PAGE_SIZE), npages * PAGE_SIZE,
drivers/infiniband/hw/hfi1/user_exp_rcv.c
779
dma_unmap_single(&dd->pcidev->dev, phys, npages * PAGE_SIZE,
drivers/infiniband/hw/hfi1/user_pages.c
41
DIV_ROUND_DOWN_ULL(rlimit(RLIMIT_MEMLOCK), PAGE_SIZE);
drivers/infiniband/hw/hfi1/user_pages.c
73
cache_limit_pages = cache_size * (1024 * 1024) / PAGE_SIZE;
drivers/infiniband/hw/hfi1/user_sdma.c
1085
PAGE_SIZE)) {
drivers/infiniband/hw/hfi1/user_sdma.c
1097
PAGE_SIZE) >=
drivers/infiniband/hw/hfi1/user_sdma.c
593
PAGE_SIZE;
drivers/infiniband/hw/hfi1/user_sdma.c
603
LEN) * PAGE_SIZE;
drivers/infiniband/hw/hfi1/user_sdma.c
867
tidlen = EXP_TID_GET(tidval, LEN) * PAGE_SIZE,
drivers/infiniband/hw/hfi1/user_sdma.c
982
PAGE_SIZE)) {
drivers/infiniband/hw/hfi1/user_sdma.c
994
omfactor = EXP_TID_GET(tidval, LEN) * PAGE_SIZE >=
drivers/infiniband/hw/hfi1/verbs.c
1305
rdi->dparms.props.page_size_cap = PAGE_SIZE;
drivers/infiniband/hw/hns/hns_roce_alloc.c
89
buf->trunk_shift = order_base_2(ALIGN(size, PAGE_SIZE));
drivers/infiniband/hw/hns/hns_roce_alloc.c
92
buf->trunk_shift = order_base_2(ALIGN(page_size, PAGE_SIZE));
drivers/infiniband/hw/hns/hns_roce_db.c
173
dma_free_coherent(hr_dev->dev, PAGE_SIZE, db->u.pgdir->page,
drivers/infiniband/hw/hns/hns_roce_db.c
33
PAGE_SIZE, 0);
drivers/infiniband/hw/hns/hns_roce_db.c
83
pgdir->page = dma_alloc_coherent(dma_device, PAGE_SIZE,
drivers/infiniband/hw/hns/hns_roce_device.h
170
HNS_ROCE_DB_PER_PAGE = PAGE_SIZE / 4
drivers/infiniband/hw/hns/hns_roce_hem.c
259
if (PAGE_SIZE << order != hem_alloc_size) {
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
2256
u64 bt_chunk_size = PAGE_SIZE;
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
2257
u64 buf_chunk_size = PAGE_SIZE;
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
2564
if (!(caps->page_size_cap & PAGE_SIZE))
drivers/infiniband/hw/hns/hns_roce_hw_v2.h
56
#define HNS_ROCE_V2_QPC_TIMER_ENTRY_SZ PAGE_SIZE
drivers/infiniband/hw/hns/hns_roce_hw_v2.h
57
#define HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ PAGE_SIZE
drivers/infiniband/hw/hns/hns_roce_main.c
414
uctx, address, PAGE_SIZE, HNS_ROCE_MMAP_TYPE_DB);
drivers/infiniband/hw/hns/hns_roce_main.c
553
ret = rdma_user_mmap_io(uctx, vma, pfn, rdma_entry->npages * PAGE_SIZE,
drivers/infiniband/hw/ionic/ionic_controlpath.c
1057
rc = ionic_pgtbl_init(dev, &mr->buf, mr->umem, 0, max_sg, PAGE_SIZE);
drivers/infiniband/hw/ionic/ionic_controlpath.c
139
rc = ionic_pgtbl_init(dev, buf, cq->umem, cq->q.dma, 1, PAGE_SIZE);
drivers/infiniband/hw/ionic/ionic_controlpath.c
1470
hdr_buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
drivers/infiniband/hw/ionic/ionic_controlpath.c
1566
query_sqbuf = kzalloc(PAGE_SIZE, GFP_KERNEL);
drivers/infiniband/hw/ionic/ionic_controlpath.c
1570
query_rqbuf = kzalloc(PAGE_SIZE, GFP_KERNEL);
drivers/infiniband/hw/ionic/ionic_controlpath.c
1576
query_sqdma = dma_map_single(dev->lif_cfg.hwdev, query_sqbuf, PAGE_SIZE,
drivers/infiniband/hw/ionic/ionic_controlpath.c
1582
query_rqdma = dma_map_single(dev->lif_cfg.hwdev, query_rqbuf, PAGE_SIZE,
drivers/infiniband/hw/ionic/ionic_controlpath.c
1589
hdr_buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
drivers/infiniband/hw/ionic/ionic_controlpath.c
1596
PAGE_SIZE, DMA_FROM_DEVICE);
drivers/infiniband/hw/ionic/ionic_controlpath.c
1655
PAGE_SIZE, DMA_FROM_DEVICE);
drivers/infiniband/hw/ionic/ionic_controlpath.c
1732
qp->sq_cmb_order = order_base_2(qp->sq.size / PAGE_SIZE);
drivers/infiniband/hw/ionic/ionic_controlpath.c
1893
1, PAGE_SIZE);
drivers/infiniband/hw/ionic/ionic_controlpath.c
1896
qp->sq_umem, qp->sq.dma, 1, PAGE_SIZE);
drivers/infiniband/hw/ionic/ionic_controlpath.c
1958
qp->rq_cmb_order = order_base_2(qp->rq.size / PAGE_SIZE);
drivers/infiniband/hw/ionic/ionic_controlpath.c
2103
1, PAGE_SIZE);
drivers/infiniband/hw/ionic/ionic_controlpath.c
2106
qp->rq_umem, qp->rq.dma, 1, PAGE_SIZE);
drivers/infiniband/hw/ionic/ionic_controlpath.c
23
if (q->addr & (PAGE_SIZE - 1))
drivers/infiniband/hw/ionic/ionic_controlpath.c
387
ctx->mmap_dbell = ionic_mmap_entry_insert(ctx, PAGE_SIZE,
drivers/infiniband/hw/ionic/ionic_controlpath.c
672
hdr_buf = kmalloc(PAGE_SIZE, gfp);
drivers/infiniband/hw/ionic/ionic_hw_stats.c
119
dev->hw_stats_buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
drivers/infiniband/hw/ionic/ionic_hw_stats.c
126
dev->hw_stats = kzalloc(PAGE_SIZE, GFP_KERNEL);
drivers/infiniband/hw/ionic/ionic_hw_stats.c
134
PAGE_SIZE, DMA_FROM_DEVICE);
drivers/infiniband/hw/ionic/ionic_hw_stats.c
139
rc = ionic_hw_stats_cmd(dev, hw_stats_dma, PAGE_SIZE, 0,
drivers/infiniband/hw/ionic/ionic_hw_stats.c
144
dma_unmap_single(dev->lif_cfg.hwdev, hw_stats_dma, PAGE_SIZE, DMA_FROM_DEVICE);
drivers/infiniband/hw/ionic/ionic_hw_stats.c
149
PAGE_SIZE / sizeof(*dev->hw_stats));
drivers/infiniband/hw/ionic/ionic_hw_stats.c
170
dma_unmap_single(dev->lif_cfg.hwdev, hw_stats_dma, PAGE_SIZE, DMA_FROM_DEVICE);
drivers/infiniband/hw/ionic/ionic_hw_stats.c
208
PAGE_SIZE, DMA_FROM_DEVICE);
drivers/infiniband/hw/ionic/ionic_hw_stats.c
213
rc = ionic_hw_stats_cmd(dev, hw_stats_dma, PAGE_SIZE,
drivers/infiniband/hw/ionic/ionic_hw_stats.c
219
PAGE_SIZE, DMA_FROM_DEVICE);
drivers/infiniband/hw/ionic/ionic_hw_stats.c
224
dev->hw_stats_buf, PAGE_SIZE);
drivers/infiniband/hw/ionic/ionic_hw_stats.c
230
PAGE_SIZE, DMA_FROM_DEVICE);
drivers/infiniband/hw/ionic/ionic_hw_stats.c
247
cntr->vals = kzalloc(PAGE_SIZE, GFP_KERNEL);
drivers/infiniband/hw/ionic/ionic_hw_stats.c
333
PAGE_SIZE, DMA_FROM_DEVICE);
drivers/infiniband/hw/ionic/ionic_hw_stats.c
341
rc = ionic_hw_stats_cmd(dev, hw_stats_dma, PAGE_SIZE,
drivers/infiniband/hw/ionic/ionic_hw_stats.c
351
PAGE_SIZE);
drivers/infiniband/hw/ionic/ionic_hw_stats.c
354
dma_unmap_single(dev->lif_cfg.hwdev, hw_stats_dma, PAGE_SIZE, DMA_FROM_DEVICE);
drivers/infiniband/hw/ionic/ionic_hw_stats.c
358
dma_unmap_single(dev->lif_cfg.hwdev, hw_stats_dma, PAGE_SIZE, DMA_FROM_DEVICE);
drivers/infiniband/hw/ionic/ionic_hw_stats.c
375
cs->hdr = kzalloc(PAGE_SIZE, GFP_KERNEL);
drivers/infiniband/hw/ionic/ionic_hw_stats.c
380
PAGE_SIZE, DMA_FROM_DEVICE);
drivers/infiniband/hw/ionic/ionic_hw_stats.c
385
rc = ionic_hw_stats_cmd(dev, hdr_dma, PAGE_SIZE, 0,
drivers/infiniband/hw/ionic/ionic_hw_stats.c
390
dma_unmap_single(dev->lif_cfg.hwdev, hdr_dma, PAGE_SIZE, DMA_FROM_DEVICE);
drivers/infiniband/hw/ionic/ionic_hw_stats.c
394
PAGE_SIZE / sizeof(*cs->hdr));
drivers/infiniband/hw/ionic/ionic_hw_stats.c
414
dma_unmap_single(dev->lif_cfg.hwdev, hdr_dma, PAGE_SIZE, DMA_FROM_DEVICE);
drivers/infiniband/hw/ionic/ionic_ibdev.c
32
attr->max_mr_size = dev->lif_cfg.npts_per_lif * PAGE_SIZE / 2;
drivers/infiniband/hw/irdma/hw.c
1472
pg_cnt = DIV_ROUND_UP(aeq->mem.size, PAGE_SIZE);
drivers/infiniband/hw/irdma/hw.c
1512
aeq_size = min(aeq_size, (u32)((PAGE_SIZE << MAX_PAGE_ORDER) /
drivers/infiniband/hw/irdma/hw.c
703
u32 pg_cnt = DIV_ROUND_UP(aeq->mem.size, PAGE_SIZE);
drivers/infiniband/hw/irdma/utils.c
2207
pg_dma[i] = dma_map_page(hw->device, vm_page, 0, PAGE_SIZE,
drivers/infiniband/hw/irdma/utils.c
2212
addr += PAGE_SIZE;
drivers/infiniband/hw/irdma/utils.c
2227
dma_unmap_page(hw->device, pg_dma[i], PAGE_SIZE, DMA_BIDIRECTIONAL);
drivers/infiniband/hw/irdma/utils.c
2264
size = PAGE_SIZE * pg_cnt;
drivers/infiniband/hw/irdma/verbs.c
138
if (vma->vm_pgoff || vma->vm_end - vma->vm_start != PAGE_SIZE)
drivers/infiniband/hw/irdma/verbs.c
145
return rdma_user_mmap_io(&ucontext->ibucontext, vma, pfn, PAGE_SIZE,
drivers/infiniband/hw/irdma/verbs.c
170
&entry->rdma_entry, PAGE_SIZE);
drivers/infiniband/hw/irdma/verbs.c
217
ret = rdma_user_mmap_io(context, vma, pfn, PAGE_SIZE,
drivers/infiniband/hw/irdma/verbs.c
222
ret = rdma_user_mmap_io(context, vma, pfn, PAGE_SIZE,
drivers/infiniband/hw/irdma/verbs.c
3023
info->page_size = PAGE_SIZE;
drivers/infiniband/hw/irdma/verbs.c
3121
info->page_size = PAGE_SIZE;
drivers/infiniband/hw/irdma/verbs.c
3176
iwmr->len = max_num_sg * PAGE_SIZE;
drivers/infiniband/hw/mana/main.c
181
req.alignment = PAGE_SIZE / MANA_PAGE_SIZE;
drivers/infiniband/hw/mana/main.c
539
ret = rdma_user_mmap_io(ibcontext, vma, pfn, PAGE_SIZE, prot,
drivers/infiniband/hw/mana/main.c
545
pfn, PAGE_SIZE, ret);
drivers/infiniband/hw/mlx4/cq.c
106
PAGE_SIZE * 2, &buf->buf);
drivers/infiniband/hw/mlx4/doorbell.c
68
PAGE_SIZE, 0);
drivers/infiniband/hw/mlx4/main.c
1160
PAGE_SIZE,
drivers/infiniband/hw/mlx4/main.c
1171
PAGE_SIZE, pgprot_writecombine(vma->vm_page_prot),
drivers/infiniband/hw/mlx4/main.c
1188
PAGE_SIZE, pgprot_noncached(vma->vm_page_prot),
drivers/infiniband/hw/mlx4/main.c
2655
PAGE_SIZE);
drivers/infiniband/hw/mlx4/main.c
584
resp.hca_core_clock_offset = clock_params.offset % PAGE_SIZE;
drivers/infiniband/hw/mlx4/mr.c
255
n = ib_umem_num_dma_blocks(mmr->umem, PAGE_SIZE);
drivers/infiniband/hw/mlx4/qp.c
1165
if (mlx4_buf_alloc(dev->dev, qp->buf_size, PAGE_SIZE * 2,
drivers/infiniband/hw/mlx4/srq.c
123
dev->dev, ib_umem_num_dma_blocks(srq->umem, PAGE_SIZE),
drivers/infiniband/hw/mlx4/srq.c
142
if (mlx4_buf_alloc(dev->dev, buf_size, PAGE_SIZE * 2,
drivers/infiniband/hw/mlx5/devx.c
2307
page_size > PAGE_SIZE)
drivers/infiniband/hw/mlx5/dm.c
20
u32 num_pages = DIV_ROUND_UP(length, PAGE_SIZE);
drivers/infiniband/hw/mlx5/dm.c
39
MLX5_SET(alloc_memic_in, in, range_size, num_pages * PAGE_SIZE);
drivers/infiniband/hw/mlx5/dm.c
61
hw_start_addr + (page_idx * PAGE_SIZE));
drivers/infiniband/hw/mlx5/dm.c
92
u32 num_pages = DIV_ROUND_UP(length, PAGE_SIZE);
drivers/infiniband/hw/mlx5/doorbell.c
70
PAGE_SIZE, 0);
drivers/infiniband/hw/mlx5/main.c
1888
if (uars_per_sys_page == 1 && PAGE_SIZE > 4096)
drivers/infiniband/hw/mlx5/main.c
2142
if (PAGE_SIZE <= 4096) {
drivers/infiniband/hw/mlx5/main.c
2147
internal_timer_h) % PAGE_SIZE;
drivers/infiniband/hw/mlx5/main.c
2390
return (dev->mdev->bar_addr + (uar_idx / fw_uars_per_page) * PAGE_SIZE);
drivers/infiniband/hw/mlx5/main.c
2439
if ((vma->vm_end - vma->vm_start != PAGE_SIZE) ||
drivers/infiniband/hw/mlx5/main.c
2489
phys_vec->len = entry->npages * PAGE_SIZE;
drivers/infiniband/hw/mlx5/main.c
2568
if (vma->vm_end - vma->vm_start != PAGE_SIZE)
drivers/infiniband/hw/mlx5/main.c
2633
err = rdma_user_mmap_io(&context->ibucontext, vma, pfn, PAGE_SIZE,
drivers/infiniband/hw/mlx5/main.c
2693
entry->npages * PAGE_SIZE,
drivers/infiniband/hw/mlx5/main.c
2731
if (vma->vm_end - vma->vm_start != PAGE_SIZE)
drivers/infiniband/hw/mlx5/main.c
2739
if (PAGE_SIZE > 4096)
drivers/infiniband/hw/mlx5/main.c
2746
PAGE_SIZE,
drivers/infiniband/hw/mlx5/main.c
4208
length = entry->rdma_entry.npages * PAGE_SIZE;
drivers/infiniband/hw/mlx5/main.c
4288
err = mlx5_rdma_user_mmap_entry_insert(c, entry, PAGE_SIZE);
drivers/infiniband/hw/mlx5/main.c
4335
length = entry->rdma_entry.npages * PAGE_SIZE;
drivers/infiniband/hw/mlx5/mlx5_ib.h
788
((PAGE_SIZE - sizeof(struct list_head)) / sizeof(u32))
drivers/infiniband/hw/mlx5/mlx5_ib.h
794
static_assert(sizeof(struct mlx5_mkeys_page) == PAGE_SIZE);
drivers/infiniband/hw/mlx5/mr.c
1137
return PAGE_SIZE;
drivers/infiniband/hw/mlx5/odp.c
1041
pages_in_range = (ALIGN(io_virt + bcnt, PAGE_SIZE) -
drivers/infiniband/hw/mlx5/odp.c
1412
ret = mlx5_ib_read_wqe_sq(qp, wqe_index, wqe, PAGE_SIZE,
drivers/infiniband/hw/mlx5/odp.c
1419
ret = mlx5_ib_read_wqe_rq(qp, wqe_index, wqe, PAGE_SIZE,
drivers/infiniband/hw/mlx5/odp.c
1428
ret = mlx5_ib_read_wqe_srq(srq, wqe_index, wqe, PAGE_SIZE,
drivers/infiniband/hw/mlx5/odp.c
611
if (!mlx5r_umr_can_load_pas(dev, mlx5_imr_mtt_entries * PAGE_SIZE))
drivers/infiniband/hw/mlx5/qp.c
1141
int sq_strides_offset = (qp->sq.offset & (PAGE_SIZE - 1)) /
drivers/infiniband/hw/mlx5/qp.c
1144
(qp->sq.offset / PAGE_SIZE),
drivers/infiniband/hw/mlx5/restrack.c
31
key.size = PAGE_SIZE;
drivers/infiniband/hw/mlx5/umr.c
501
static_assert(PAGE_SIZE % MLX5_UMR_FLEX_ALIGNMENT == 0);
drivers/infiniband/hw/mlx5/umr.c
533
*nents = PAGE_SIZE / ent_size;
drivers/infiniband/hw/mlx5/umr.c
539
memset(xlt_emergency_page, 0, PAGE_SIZE);
drivers/infiniband/hw/mlx5/umr.h
41
length >= MLX5_MAX_UMR_PAGES * PAGE_SIZE)
drivers/infiniband/hw/mthca/mthca_allocator.c
111
#define MTHCA_ARRAY_MASK (PAGE_SIZE / sizeof (void *) - 1)
drivers/infiniband/hw/mthca/mthca_allocator.c
157
int npage = (nent * sizeof (void *) + PAGE_SIZE - 1) / PAGE_SIZE;
drivers/infiniband/hw/mthca/mthca_allocator.c
176
for (i = 0; i < (nent * sizeof (void *) + PAGE_SIZE - 1) / PAGE_SIZE; ++i)
drivers/infiniband/hw/mthca/mthca_allocator.c
225
npages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
drivers/infiniband/hw/mthca/mthca_allocator.c
242
dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE,
drivers/infiniband/hw/mthca/mthca_allocator.c
288
for (i = 0; i < (size + PAGE_SIZE - 1) / PAGE_SIZE; ++i)
drivers/infiniband/hw/mthca/mthca_allocator.c
289
dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
drivers/infiniband/hw/mthca/mthca_cmd.c
1595
*aux_pages = ALIGN(*aux_pages, PAGE_SIZE / MTHCA_ICM_PAGE_SIZE) >>
drivers/infiniband/hw/mthca/mthca_cmd.c
860
ALIGN(dev->fw.arbel.fw_pages, PAGE_SIZE / MTHCA_ICM_PAGE_SIZE) >>
drivers/infiniband/hw/mthca/mthca_cq.c
165
return buf->queue.page_list[entry * MTHCA_CQ_ENTRY_SIZE / PAGE_SIZE].buf
drivers/infiniband/hw/mthca/mthca_cq.c
166
+ (entry * MTHCA_CQ_ENTRY_SIZE) % PAGE_SIZE;
drivers/infiniband/hw/mthca/mthca_cq.c
50
MTHCA_MAX_DIRECT_CQ_SIZE = 4 * PAGE_SIZE
drivers/infiniband/hw/mthca/mthca_eq.c
231
return eq->page_list[off / PAGE_SIZE].buf + off % PAGE_SIZE;
drivers/infiniband/hw/mthca/mthca_eq.c
480
npages = ALIGN(eq->nent * MTHCA_EQ_ENTRY_SIZE, PAGE_SIZE) / PAGE_SIZE;
drivers/infiniband/hw/mthca/mthca_eq.c
500
PAGE_SIZE, &t, GFP_KERNEL);
drivers/infiniband/hw/mthca/mthca_eq.c
519
0, npages * PAGE_SIZE,
drivers/infiniband/hw/mthca/mthca_eq.c
572
dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
drivers/infiniband/hw/mthca/mthca_eq.c
592
int npages = (eq->nent * MTHCA_EQ_ENTRY_SIZE + PAGE_SIZE - 1) /
drivers/infiniband/hw/mthca/mthca_eq.c
593
PAGE_SIZE;
drivers/infiniband/hw/mthca/mthca_eq.c
619
dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
drivers/infiniband/hw/mthca/mthca_eq.c
743
PAGE_SIZE, DMA_BIDIRECTIONAL);
drivers/infiniband/hw/mthca/mthca_eq.c
752
PAGE_SIZE, DMA_BIDIRECTIONAL);
drivers/infiniband/hw/mthca/mthca_eq.c
762
dma_unmap_page(&dev->pdev->dev, dev->eq_table.icm_dma, PAGE_SIZE,
drivers/infiniband/hw/mthca/mthca_main.c
175
if (dev_lim->min_page_sz > PAGE_SIZE) {
drivers/infiniband/hw/mthca/mthca_main.c
178
dev_lim->min_page_sz, PAGE_SIZE);
drivers/infiniband/hw/mthca/mthca_main.c
320
profile.num_uar = dev_lim.uar_size / PAGE_SIZE;
drivers/infiniband/hw/mthca/mthca_main.c
610
profile.num_uar = dev_lim.uar_size / PAGE_SIZE;
drivers/infiniband/hw/mthca/mthca_main.c
714
dev->kar = ioremap((phys_addr_t) dev->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
drivers/infiniband/hw/mthca/mthca_memfree.c
119
sg_set_page(mem, page, PAGE_SIZE << order, 0);
drivers/infiniband/hw/mthca/mthca_memfree.c
126
void *buf = dma_alloc_coherent(dev, PAGE_SIZE << order, &sg_dma_address(mem),
drivers/infiniband/hw/mthca/mthca_memfree.c
131
sg_set_buf(mem, buf, PAGE_SIZE << order);
drivers/infiniband/hw/mthca/mthca_memfree.c
133
sg_dma_len(mem) = PAGE_SIZE << order;
drivers/infiniband/hw/mthca/mthca_mr.c
306
return PAGE_SIZE / sizeof (u64) - 2;
drivers/infiniband/hw/mthca/mthca_mr.c
309
return mthca_is_memfree(dev) ? (PAGE_SIZE / sizeof (u64)) : 0x7ffffff;
drivers/infiniband/hw/mthca/mthca_mr.c
336
BUG_ON(s / PAGE_SIZE != (s + list_len * sizeof(u64) - 1) / PAGE_SIZE);
drivers/infiniband/hw/mthca/mthca_profile.c
117
profile[i].size = max(profile[i].size, (u64) PAGE_SIZE);
drivers/infiniband/hw/mthca/mthca_provider.c
335
if (vma->vm_end - vma->vm_start != PAGE_SIZE)
drivers/infiniband/hw/mthca/mthca_provider.c
342
PAGE_SIZE, vma->vm_page_prot))
drivers/infiniband/hw/mthca/mthca_provider.c
867
n = ib_umem_num_dma_blocks(mr->umem, PAGE_SIZE);
drivers/infiniband/hw/mthca/mthca_provider.c
883
write_mtt_size = min(mthca_write_mtt_size(dev), (int) (PAGE_SIZE / sizeof *pages));
drivers/infiniband/hw/mthca/mthca_provider.c
885
rdma_umem_for_each_dma_block(mr->umem, &biter, PAGE_SIZE) {
drivers/infiniband/hw/mthca/mthca_qp.c
214
((n << qp->rq.wqe_shift) & (PAGE_SIZE - 1));
drivers/infiniband/hw/mthca/mthca_qp.c
227
(PAGE_SIZE - 1));
drivers/infiniband/hw/mthca/mthca_qp.c
53
MTHCA_MAX_DIRECT_QP_SIZE = 4 * PAGE_SIZE,
drivers/infiniband/hw/mthca/mthca_srq.c
47
MTHCA_MAX_DIRECT_SRQ_SIZE = 4 * PAGE_SIZE
drivers/infiniband/hw/mthca/mthca_srq.c
80
((n << srq->wqe_shift) & (PAGE_SIZE - 1));
drivers/infiniband/hw/ocrdma/ocrdma_hw.c
1663
if (PAGE_SIZE == (OCRDMA_MIN_Q_PAGE_SIZE << i))
drivers/infiniband/hw/ocrdma/ocrdma_hw.c
1674
dev->av_tbl.pbl.va = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
drivers/infiniband/hw/ocrdma/ocrdma_hw.c
1691
pa += PAGE_SIZE;
drivers/infiniband/hw/ocrdma/ocrdma_hw.c
1708
dma_free_coherent(&pdev->dev, PAGE_SIZE, dev->av_tbl.pbl.va,
drivers/infiniband/hw/ocrdma/ocrdma_hw.c
1734
dma_free_coherent(&pdev->dev, PAGE_SIZE, dev->av_tbl.pbl.va,
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
473
u32 map_len = roundup(sizeof(u32) * 2048, PAGE_SIZE);
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
549
if (vma->vm_start & (PAGE_SIZE - 1))
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
605
(pd->id * PAGE_SIZE);
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
607
PAGE_SIZE);
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
623
ocrdma_del_mmap(pd->uctx, dpp_page_addr, PAGE_SIZE);
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
679
(pd->id * PAGE_SIZE);
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
681
ocrdma_del_mmap(pd->uctx, dpp_db, PAGE_SIZE);
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
829
rdma_umem_for_each_dma_block (mr->umem, &biter, PAGE_SIZE) {
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
874
dev, mr, ib_umem_num_dma_blocks(mr->umem, PAGE_SIZE));
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
878
mr->hwmr.pbe_size = PAGE_SIZE;
drivers/infiniband/hw/qedr/main.c
587
if (page_size > PAGE_SIZE) {
drivers/infiniband/hw/qedr/main.c
590
PAGE_SIZE, page_size);
drivers/infiniband/hw/qedr/verbs.c
1592
page_size = PAGE_SIZE;
drivers/infiniband/hw/qedr/verbs.c
2986
ib_umem_num_dma_blocks(mr->umem, PAGE_SIZE), 1);
drivers/infiniband/hw/qedr/verbs.c
738
aligned_size = ALIGN(size, PAGE_SIZE);
drivers/infiniband/hw/qedr/verbs.c
769
entry->length = PAGE_SIZE;
drivers/infiniband/hw/qedr/verbs.c
773
PAGE_SIZE);
drivers/infiniband/hw/usnic/usnic_uiom.c
142
PAGE_SIZE / sizeof(struct page *)),
drivers/infiniband/hw/usnic/usnic_uiom.c
163
PAGE_SIZE, 0);
drivers/infiniband/hw/usnic/usnic_uiom.c
166
cur_base + i*PAGE_SIZE, &pa);
drivers/infiniband/hw/usnic/usnic_uiom.c
168
cur_base += chunk->nents * PAGE_SIZE;
drivers/infiniband/hw/usnic/usnic_uiom.c
200
usnic_dbg("va 0x%lx size 0x%lx", va, PAGE_SIZE);
drivers/infiniband/hw/usnic/usnic_uiom.c
201
iommu_unmap(pd->domain, va, PAGE_SIZE);
drivers/infiniband/hw/usnic/usnic_uiom.c
202
va += PAGE_SIZE;
drivers/infiniband/hw/usnic/usnic_uiom.c
203
size -= PAGE_SIZE;
drivers/infiniband/hw/usnic/usnic_uiom.c
258
for (i = 0; i < chunk->nents; i++, va += PAGE_SIZE) {
drivers/infiniband/hw/usnic/usnic_uiom.c
272
if ((pa_end + PAGE_SIZE != pa) &&
drivers/infiniband/hw/usnic/usnic_uiom.c
275
size = pa_end - pa_start + PAGE_SIZE;
drivers/infiniband/hw/usnic/usnic_uiom.c
292
size = pa - pa_start + PAGE_SIZE;
drivers/infiniband/hw/usnic/usnic_uiom.c
306
pa_end += PAGE_SIZE;
drivers/infiniband/hw/usnic/usnic_uiom.c
50
((PAGE_SIZE - offsetof(struct usnic_uiom_chunk, page_list)) /\
drivers/infiniband/hw/usnic/usnic_uiom.h
50
#define USNIC_UIOM_PAGE_SIZE (PAGE_SIZE)
drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
322
return pdir->pages[offset / PAGE_SIZE] + (offset % PAGE_SIZE);
drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
149
npages = ib_umem_num_dma_blocks(cq->umem, PAGE_SIZE);
drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
153
PAGE_SIZE - 1) / PAGE_SIZE;
drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
156
cq->offset = PAGE_SIZE;
drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
417
PAGE_SIZE +
drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
426
PAGE_SIZE / sizeof(struct pvrdma_eqe);
drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
493
PAGE_SIZE +
drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
501
int ring_slots = (dev->dsr->cq_ring_pages.num_pages - 1) * PAGE_SIZE /
drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
601
dma_free_coherent(&pdev->dev, PAGE_SIZE, dev->resp_slot,
drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
604
dma_free_coherent(&pdev->dev, PAGE_SIZE, dev->cmd_slot,
drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
851
ioremap(dev->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
884
dev->cmd_slot = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
894
dev->resp_slot = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
drivers/infiniband/hw/vmw_pvrdma/pvrdma_misc.c
129
dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
drivers/infiniband/hw/vmw_pvrdma/pvrdma_misc.c
149
dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
drivers/infiniband/hw/vmw_pvrdma/pvrdma_misc.c
162
dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
drivers/infiniband/hw/vmw_pvrdma/pvrdma_misc.c
191
rdma_umem_for_each_dma_block (umem, &biter, PAGE_SIZE) {
drivers/infiniband/hw/vmw_pvrdma/pvrdma_misc.c
62
pdir->dir = dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE,
drivers/infiniband/hw/vmw_pvrdma/pvrdma_misc.c
74
pdir->tables[i] = dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE,
drivers/infiniband/hw/vmw_pvrdma/pvrdma_misc.c
92
PAGE_SIZE,
drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c
141
npages = ib_umem_num_dma_blocks(umem, PAGE_SIZE);
drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
150
qp->npages_recv = (qp->rq.wqe_cnt * qp->rq.wqe_size + PAGE_SIZE - 1) /
drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
151
PAGE_SIZE;
drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
177
(qp->sq.wqe_cnt * qp->sq.wqe_size + PAGE_SIZE - 1) /
drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
178
PAGE_SIZE;
drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
294
ib_umem_num_dma_blocks(qp->sumem, PAGE_SIZE);
drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
297
qp->rumem, PAGE_SIZE);
drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
315
qp->sq.offset = PVRDMA_QP_NUM_HEADER_PAGES * PAGE_SIZE;
drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
318
qp->rq.offset = qp->npages_send * PAGE_SIZE;
drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c
155
srq->npages = ib_umem_num_dma_blocks(srq->umem, PAGE_SIZE);
drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c
376
if ((size != PAGE_SIZE) || (offset & ~PAGE_MASK)) {
drivers/infiniband/sw/rdmavt/mmap.c
132
rdi->mmap_offset = ALIGN(PAGE_SIZE, SHMLBA);
drivers/infiniband/sw/rdmavt/mmap.c
162
rdi->mmap_offset = PAGE_SIZE;
drivers/infiniband/sw/rdmavt/mmap.c
20
rdi->mmap_offset = PAGE_SIZE;
drivers/infiniband/sw/rdmavt/mr.c
385
mr->mr.map[m]->segs[n].length = PAGE_SIZE;
drivers/infiniband/sw/rdmavt/mr.c
386
trace_rvt_mr_user_seg(&mr->mr, m, n, vaddr, PAGE_SIZE);
drivers/infiniband/sw/rdmavt/qp.c
164
llc_bits = llc_size / PAGE_SIZE;
drivers/infiniband/sw/rdmavt/qp.c
165
table_bits = table_size / PAGE_SIZE;
drivers/infiniband/sw/rdmavt/qp.c
2823
cacheless_copy = length >= PAGE_SIZE;
drivers/infiniband/sw/rdmavt/qp.c
2825
if (length >= PAGE_SIZE) {
drivers/infiniband/sw/rdmavt/qp.c
2832
if (length >= (2 * PAGE_SIZE))
drivers/infiniband/sw/rdmavt/qp.c
2833
wss_insert(wss, (sge->vaddr + PAGE_SIZE));
drivers/infiniband/sw/rxe/rxe_mmap.c
132
rxe->mmap_offset = ALIGN(PAGE_SIZE, SHMLBA);
drivers/infiniband/sw/rxe/rxe_mr.c
111
if (mr_page_size(mr) > PAGE_SIZE)
drivers/infiniband/sw/rxe/rxe_mr.c
112
return iova & (PAGE_SIZE - 1);
drivers/infiniband/sw/rxe/rxe_mr.c
122
region_intersects(paddr, PAGE_SIZE, IORESOURCE_MEM,
drivers/infiniband/sw/rxe/rxe_mr.c
132
WARN_ON(mr_page_size(mr) != PAGE_SIZE);
drivers/infiniband/sw/rxe/rxe_mr.c
268
u64 addr = dma_addr + i * PAGE_SIZE;
drivers/infiniband/sw/rxe/rxe_mr.c
280
mr->page_info[mr->nbuf].offset = addr & (PAGE_SIZE - 1);
drivers/infiniband/sw/rxe/rxe_mr.c
298
if (!IS_ALIGNED(page_size, PAGE_SIZE) &&
drivers/infiniband/sw/rxe/rxe_mr.c
299
!IS_ALIGNED(PAGE_SIZE, page_size)) {
drivers/infiniband/sw/rxe/rxe_mr.c
301
page_size, PAGE_SIZE);
drivers/infiniband/sw/rxe/rxe_mr.c
305
if (mr_page_size(mr) > PAGE_SIZE) {
drivers/infiniband/sw/rxe/rxe_mr.c
340
bytes = min_t(unsigned int, length, PAGE_SIZE - page_offset);
drivers/infiniband/sw/rxe/rxe_mr.c
360
unsigned int page_offset = dma_addr & (PAGE_SIZE - 1);
drivers/infiniband/sw/rxe/rxe_mr.c
368
PAGE_SIZE - page_offset);
drivers/infiniband/sw/rxe/rxe_mr.c
526
bytes = min_t(unsigned int, length, PAGE_SIZE - page_offset);
drivers/infiniband/sw/rxe/rxe_mr.c
578
page_offset = iova & (PAGE_SIZE - 1);
drivers/infiniband/sw/rxe/rxe_mr.c
61
mr->ibmr.page_size = PAGE_SIZE;
drivers/infiniband/sw/rxe/rxe_mr.c
631
page_offset = iova & (PAGE_SIZE - 1);
drivers/infiniband/sw/rxe/rxe_mr.c
93
if (mr_page_size(mr) > PAGE_SIZE)
drivers/infiniband/sw/rxe/rxe_verbs.h
369
return mr ? mr->ibmr.page_size : PAGE_SIZE;
drivers/infiniband/sw/siw/siw.h
38
#define SENDPAGE_THRESH PAGE_SIZE
drivers/infiniband/sw/siw/siw_qp_rx.c
52
bytes = min(len, (int)PAGE_SIZE - pg_off);
drivers/infiniband/sw/siw/siw_qp_tx.c
334
size_t bytes = min_t(size_t, PAGE_SIZE - offset, size);
drivers/infiniband/sw/siw/siw_qp_tx.c
336
if (size + offset <= PAGE_SIZE)
drivers/infiniband/sw/siw/siw_qp_tx.c
431
#define MAX_ARRAY ((0xffff / PAGE_SIZE) + 1 + (2 * (SIW_MAX_SGE - 1) + 2))
drivers/infiniband/sw/siw/siw_qp_tx.c
499
size_t plen = min((int)PAGE_SIZE - fp_off, sge_len);
drivers/infiniband/sw/siw/siw_qp_tx.c
85
if (likely(PAGE_SIZE - off >= bytes)) {
drivers/infiniband/sw/siw/siw_qp_tx.c
88
unsigned long part = bytes - (PAGE_SIZE - off);
drivers/infiniband/sw/siw/siw_verbs.c
1449
rv = siw_mr_add_mem(mr, pd, pbl, 0, max_sge * PAGE_SIZE, 0);
drivers/infiniband/sw/siw/siw_verbs.c
161
attr->page_size_cap = PAGE_SIZE;
drivers/infiniband/sw/siw/siw_verbs.c
65
if (vma->vm_start & (PAGE_SIZE - 1)) {
drivers/infiniband/ulp/ipoib/ipoib.h
73
IPOIB_CM_HEAD_SIZE = IPOIB_CM_BUF_SIZE % PAGE_SIZE,
drivers/infiniband/ulp/ipoib/ipoib.h
74
IPOIB_CM_RX_SG = ALIGN(IPOIB_CM_BUF_SIZE, PAGE_SIZE) / PAGE_SIZE,
drivers/infiniband/ulp/ipoib/ipoib_cm.c
1608
priv->cm.max_cm_mtu = max_srq_sge * PAGE_SIZE - 0x10;
drivers/infiniband/ulp/ipoib/ipoib_cm.c
173
skb_fill_page_desc(skb, i, page, 0, PAGE_SIZE);
drivers/infiniband/ulp/ipoib/ipoib_cm.c
176
0, PAGE_SIZE, DMA_FROM_DEVICE);
drivers/infiniband/ulp/ipoib/ipoib_cm.c
189
ib_dma_unmap_page(priv->ca, mapping[i], PAGE_SIZE, DMA_FROM_DEVICE);
drivers/infiniband/ulp/ipoib/ipoib_cm.c
340
sge[i].length = PAGE_SIZE;
drivers/infiniband/ulp/ipoib/ipoib_cm.c
546
0, PAGE_SIZE);
drivers/infiniband/ulp/ipoib/ipoib_cm.c
549
size = min_t(unsigned int, length, PAGE_SIZE);
drivers/infiniband/ulp/ipoib/ipoib_cm.c
645
PAGE_SIZE;
drivers/infiniband/ulp/ipoib/ipoib_cm.c
91
ib_dma_unmap_page(priv->ca, mapping[i + 1], PAGE_SIZE, DMA_FROM_DEVICE);
drivers/infiniband/ulp/iser/iscsi_iser.c
663
max_fr_sectors = (shost->sg_tablesize * PAGE_SIZE) >> 9;
drivers/infiniband/ulp/isert/ib_isert.c
1076
sg_nents = max(1UL, DIV_ROUND_UP(imm_data_len, PAGE_SIZE));
drivers/infiniband/ulp/isert/ib_isert.c
1141
sg_off = cmd->write_data_done / PAGE_SIZE;
drivers/infiniband/ulp/isert/ib_isert.c
1143
sg_nents = max(1UL, DIV_ROUND_UP(unsol_data_len, PAGE_SIZE));
drivers/infiniband/ulp/isert/ib_isert.c
1144
page_off = cmd->write_data_done % PAGE_SIZE;
drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c
385
PAGE_SIZE);
drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c
402
PAGE_SIZE);
drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c
105
page, PAGE_SIZE);
drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c
121
PAGE_SIZE);
drivers/infiniband/ulp/rtrs/rtrs-srv.c
27
#define MAX_HDR_SIZE PAGE_SIZE
drivers/input/input.c
1377
len = input_print_modalias(buf, PAGE_SIZE, id);
drivers/input/input.c
1378
if (len < PAGE_SIZE - 2)
drivers/input/input.c
1379
len += snprintf(buf + len, PAGE_SIZE - len, "\n");
drivers/input/input.c
1381
return min_t(int, len, PAGE_SIZE);
drivers/input/input.c
1393
int len = input_print_bitmap(buf, PAGE_SIZE, input_dev->propbit,
drivers/input/input.c
1395
return min_t(int, len, PAGE_SIZE);
drivers/input/input.c
1512
int len = input_print_bitmap(buf, PAGE_SIZE, \
drivers/input/input.c
1515
return min_t(int, len, PAGE_SIZE); \
drivers/input/keyboard/atkbd.c
1537
size_t len = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
drivers/input/keyboard/gpio_keys.c
224
ret = scnprintf(buf, PAGE_SIZE - 1, "%*pbl", n_events, bits);
drivers/input/misc/xen-kbdfront.c
403
memset(info->page, 0, PAGE_SIZE);
drivers/input/mouse/cyapa_gen6.c
658
memset(buf, 0, PAGE_SIZE);
drivers/input/rmi4/rmi_f34v7.c
651
(u16)(PAGE_SIZE / f34->v7.block_size));
drivers/input/rmi4/rmi_f34v7.c
722
if (f34->v7.payload_length > (PAGE_SIZE / f34->v7.block_size))
drivers/input/rmi4/rmi_f34v7.c
723
max_transfer = PAGE_SIZE / f34->v7.block_size;
drivers/input/touchscreen/raspberrypi-ts.c
117
dma_free_coherent(dev, PAGE_SIZE, ts->fw_regs_va, ts->fw_regs_phys);
drivers/input/touchscreen/raspberrypi-ts.c
145
ts->fw_regs_va = dma_alloc_coherent(dev, PAGE_SIZE, &ts->fw_regs_phys,
drivers/iommu/amd/amd_iommu_types.h
318
#define AMD_IOMMU_PGSIZES_4K (PAGE_SIZE)
drivers/iommu/amd/amd_iommu_types.h
321
#define AMD_IOMMU_PGSIZES_V2 (PAGE_SIZE | (1ULL << 21) | (1ULL << 30))
drivers/iommu/amd/init.c
1017
iommu->cmd_sem = iommu_memremap(paddr, PAGE_SIZE);
drivers/iommu/amd/init.c
4019
for (page = va; page < (va + size); page += PAGE_SIZE) {
drivers/iommu/amd/init.c
4045
ret = iommu_make_shared((void *)iommu->cmd_sem, PAGE_SIZE);
drivers/iommu/amd/init.c
858
set_memory_4k((unsigned long)buf, size / PAGE_SIZE)) {
drivers/iommu/amd/iommu.c
1272
pages = iommu_num_pages(address, size, PAGE_SIZE);
drivers/iommu/apple-dart.c
1197
dart->pgsize > PAGE_SIZE, dart->ias, dart->oas);
drivers/iommu/apple-dart.c
596
if (dart->pgsize > PAGE_SIZE)
drivers/iommu/apple-dart.c
970
if (cfg->stream_maps[0].dart->pgsize > PAGE_SIZE)
drivers/iommu/apple-dart.c
992
PAGE_SIZE, prot,
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
150
if (size >= CMDQ_MAX_TLBI_OPS * PAGE_SIZE)
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
161
PAGE_SIZE, false, smmu_domain);
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
218
if (PAGE_SIZE != SZ_64K)
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
229
if (!(smmu->pgsize_bitmap & PAGE_SIZE))
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
348
smmu_domain->domain.pgsize_bitmap = PAGE_SIZE;
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
41
static_assert(PAGE_SIZE == SZ_4K || PAGE_SIZE == SZ_16K ||
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
42
PAGE_SIZE == SZ_64K);
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
43
if (PAGE_SIZE == SZ_64K)
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
45
if (PAGE_SIZE == SZ_16K)
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
3787
if (q->base || qsz < PAGE_SIZE)
drivers/iommu/dma-iommu.c
2141
return PAGE_SIZE;
drivers/iommu/dma-iommu.c
947
if (min_size < PAGE_SIZE) {
drivers/iommu/dma-iommu.c
948
min_size = PAGE_SIZE;
drivers/iommu/dma-iommu.c
949
alloc_sizes |= PAGE_SIZE;
drivers/iommu/fsl_pamu.c
826
mem_size = (PAGE_SIZE << get_order(PAACT_SIZE)) +
drivers/iommu/fsl_pamu.c
827
(PAGE_SIZE << get_order(SPAACT_SIZE)) +
drivers/iommu/fsl_pamu.c
828
(PAGE_SIZE << get_order(OMT_SIZE));
drivers/iommu/fsl_pamu.c
842
if (ppaact_phys & ((PAGE_SIZE << order) - 1)) {
drivers/iommu/fsl_pamu.c
848
spaact = (void *)ppaact + (PAGE_SIZE << get_order(PAACT_SIZE));
drivers/iommu/fsl_pamu.c
849
omt = (void *)spaact + (PAGE_SIZE << get_order(SPAACT_SIZE));
drivers/iommu/generic_pt/iommu_pt.h
653
map->oa += PAGE_SIZE;
drivers/iommu/generic_pt/iommu_pt.h
884
if (pt_has_system_page_size(common) && pgsize == PAGE_SIZE &&
drivers/iommu/generic_pt/iommu_pt.h
886
PT_WARN_ON(!(pgsize_bitmap & PAGE_SIZE));
drivers/iommu/intel/iommu.c
1488
old_ce = memremap(old_ce_phys, PAGE_SIZE,
drivers/iommu/intel/iommu.c
1557
old_rt = memremap(old_rt_phys, PAGE_SIZE, MEMREMAP_WB);
drivers/iommu/intel/iommu.c
1601
__iommu_flush_cache(iommu, iommu->root_entry, PAGE_SIZE);
drivers/iommu/intel/iommu.c
1903
if (!IS_ALIGNED(rmrr->base_address, PAGE_SIZE) ||
drivers/iommu/intel/iommu.c
1904
!IS_ALIGNED(rmrr->end_address + 1, PAGE_SIZE) ||
drivers/iommu/intel/pasid.c
75
clflush_cache_range(pasid_table->table, (1 << order) * PAGE_SIZE);
drivers/iommu/io-pgtable-arm.c
885
if (cfg->pgsize_bitmap & PAGE_SIZE)
drivers/iommu/io-pgtable-arm.c
886
granule = PAGE_SIZE;
drivers/iommu/iommufd/device.c
1168
access->iova_alignment = PAGE_SIZE;
drivers/iommu/iommufd/device.c
1201
access->iova_alignment = PAGE_SIZE;
drivers/iommu/iommufd/device.c
1372
if (iopt_area_start_byte(iter->area, iter->cur_iova) % PAGE_SIZE)
drivers/iommu/iommufd/device.c
1377
PAGE_SIZE) != (PAGE_SIZE - 1))
drivers/iommu/iommufd/device.c
1421
WARN_ON(access->iova_alignment != PAGE_SIZE ||
drivers/iommu/iommufd/driver.c
225
iova = msi_map->sw_msi_start + msi_map->pgoff * PAGE_SIZE;
drivers/iommu/iommufd/driver.c
230
msi_map->msi_addr, PAGE_SIZE,
drivers/iommu/iommufd/driver.c
295
iova = msi_map->sw_msi_start + msi_map->pgoff * PAGE_SIZE;
drivers/iommu/iommufd/driver.c
61
PAGE_SIZE, ULONG_MAX, GFP_KERNEL);
drivers/iommu/iommufd/io_pagetable.c
1161
if (new_iova_alignment > PAGE_SIZE) {
drivers/iommu/iommufd/io_pagetable.c
1221
new_iova_alignment = PAGE_SIZE;
drivers/iommu/iommufd/io_pagetable.c
123
unsigned long page_offset = addr % PAGE_SIZE;
drivers/iommu/iommufd/io_pagetable.c
126
unsigned long max_alignment = PAGE_SIZE;
drivers/iommu/iommufd/io_pagetable.c
157
PAGE_SIZE, ULONG_MAX - PAGE_SIZE) {
drivers/iommu/iommufd/io_pagetable.c
159
allowed_span.start_used = PAGE_SIZE;
drivers/iommu/iommufd/io_pagetable.c
160
allowed_span.last_used = ULONG_MAX - PAGE_SIZE;
drivers/iommu/iommufd/io_pagetable.c
220
area->page_offset = start_byte % PAGE_SIZE;
drivers/iommu/iommufd/io_pagetable.c
228
area->pages_node.start = start_byte / PAGE_SIZE;
drivers/iommu/iommufd/io_pagetable.c
231
area->pages_node.last = area->pages_node.last / PAGE_SIZE;
drivers/iommu/iommufd/io_pagetable.c
497
start_byte = start - ALIGN_DOWN(start, PAGE_SIZE);
drivers/iommu/iommufd/io_pagetable.h
120
iopt_area_index(area) * PAGE_SIZE;
drivers/iommu/iommufd/io_pagetable.h
126
return iopt_area_start_byte(area, iova) / PAGE_SIZE;
drivers/iommu/iommufd/iova_bitmap.c
11
#define BITS_PER_PAGE (PAGE_SIZE * BITS_PER_BYTE)
drivers/iommu/iommufd/iova_bitmap.c
180
sizeof(*bitmap->bitmap), PAGE_SIZE);
drivers/iommu/iommufd/iova_bitmap.c
193
PAGE_SIZE / sizeof(struct page *));
drivers/iommu/iommufd/pages.c
1102
unsigned long start = dmabuf->start_offset + start_index * PAGE_SIZE;
drivers/iommu/iommufd/pages.c
1372
if (length > SIZE_MAX - PAGE_SIZE || length == 0)
drivers/iommu/iommufd/pages.c
1384
pages->npages = DIV_ROUND_UP(length + start_byte, PAGE_SIZE);
drivers/iommu/iommufd/pages.c
1404
(void __user *)ALIGN_DOWN((uintptr_t)uptr, PAGE_SIZE);
drivers/iommu/iommufd/pages.c
1535
length / PAGE_SIZE >= MAX_NPFNS)
drivers/iommu/iommufd/pages.c
210
return iopt_area_iova(area) - area->page_offset + index * PAGE_SIZE;
drivers/iommu/iommufd/pages.c
222
(index - iopt_area_index(area) + 1) * PAGE_SIZE - 1;
drivers/iommu/iommufd/pages.c
2312
pages->source_mm, (uintptr_t)(pages->uptr + index * PAGE_SIZE),
drivers/iommu/iommufd/pages.c
2344
unsigned long start_index = start_byte / PAGE_SIZE;
drivers/iommu/iommufd/pages.c
2345
unsigned long last_index = (start_byte + length - 1) / PAGE_SIZE;
drivers/iommu/iommufd/pages.c
2361
start_byte % PAGE_SIZE, data, length,
drivers/iommu/iommufd/pages.c
2367
start_byte % PAGE_SIZE, data,
drivers/iommu/iommufd/pages.c
2370
start_byte % PAGE_SIZE, data, length,
drivers/iommu/iommufd/pages.c
2382
start_byte % PAGE_SIZE, data,
drivers/iommu/iommufd/pages.c
428
iova += PAGE_SIZE - page_offset;
drivers/iommu/iommufd/pages.c
450
iova += PAGE_SIZE - page_offset;
drivers/iommu/iommufd/pages.c
487
WARN_ON(paddr % PAGE_SIZE || iova % PAGE_SIZE ||
drivers/iommu/iommufd/pages.c
488
size % PAGE_SIZE);
drivers/iommu/iommufd/pages.c
491
rc = iommu_map(domain, iova, paddr, PAGE_SIZE, prot,
drivers/iommu/iommufd/pages.c
495
iova += PAGE_SIZE;
drivers/iommu/iommufd/pages.c
496
paddr += PAGE_SIZE;
drivers/iommu/iommufd/pages.c
497
size -= PAGE_SIZE;
drivers/iommu/iommufd/pages.c
532
next_iova + batch->npfns[cur] * PAGE_SIZE -
drivers/iommu/iommufd/pages.c
748
unsigned long bytes = min(length, PAGE_SIZE - offset);
drivers/iommu/iommufd/pages.c
928
start = pages->start + (start_index * PAGE_SIZE);
drivers/iommu/iommufd/pages.c
931
uptr = (uintptr_t)(pages->uptr + start_index * PAGE_SIZE);
drivers/iommu/iommufd/pages.c
935
uptr = (uintptr_t)(pages->uptr + start_index * PAGE_SIZE);
drivers/iommu/iommufd/pages.c
95
*size = PAGE_SIZE;
drivers/iommu/iommufd/selftest.c
1250
pfn * PAGE_SIZE + ((uintptr_t)uptr % PAGE_SIZE)) {
drivers/iommu/iommufd/selftest.c
1271
if (length % PAGE_SIZE || (uintptr_t)uptr % PAGE_SIZE ||
drivers/iommu/iommufd/selftest.c
1275
for (; length; length -= PAGE_SIZE) {
drivers/iommu/iommufd/selftest.c
1294
uptr += PAGE_SIZE;
drivers/iommu/iommufd/selftest.c
1555
uptr += PAGE_SIZE;
drivers/iommu/iommufd/selftest.c
1592
npages = (ALIGN(iova + length, PAGE_SIZE) -
drivers/iommu/iommufd/selftest.c
1593
ALIGN_DOWN(iova, PAGE_SIZE)) /
drivers/iommu/iommufd/selftest.c
1594
PAGE_SIZE;
drivers/iommu/iommufd/selftest.c
1619
uptr - (iova - ALIGN_DOWN(iova, PAGE_SIZE)), pages,
drivers/iommu/iommufd/selftest.c
2031
len = ALIGN(len, PAGE_SIZE);
drivers/iommu/iommufd/selftest.c
2032
if (len == 0 || len > PAGE_SIZE * 512)
drivers/iommu/iommufd/selftest.c
475
PAGE_SIZE;
drivers/iommu/iommufd/selftest.c
846
PAGE_SIZE * 2,
drivers/iommu/iommufd/selftest.c
854
data.out_mmap_length = PAGE_SIZE * 2;
drivers/iommu/iommufd/vfio_compat.c
365
pgsize_bitmap |= PAGE_SIZE;
drivers/iommu/iommufd/viommu.c
305
if (check_add_overflow(length, PAGE_SIZE - 1, &length))
drivers/iommu/iommufd/viommu.c
307
max_npages = length / PAGE_SIZE;
drivers/iommu/iommufd/viommu.c
309
length = max_npages * PAGE_SIZE;
drivers/iommu/iova.c
46
BUG_ON((granule > PAGE_SIZE) || !is_power_of_2(granule));
drivers/iommu/iova.c
598
return PAGE_SIZE << (IOVA_RANGE_CACHE_MAX_SIZE - 1);
drivers/iommu/riscv/iommu.c
698
iommu->ddt_phys, PAGE_SIZE);
drivers/iommu/riscv/iommu.c
700
memset(iommu->ddt_root, 0, PAGE_SIZE);
drivers/iommu/riscv/iommu.c
976
for (iova = start; iova < end; iova += PAGE_SIZE) {
drivers/iommu/s390-iommu.c
874
page_addr += PAGE_SIZE;
drivers/iommu/s390-iommu.c
875
dma_addr += PAGE_SIZE;
drivers/iommu/s390-iommu.c
882
dma_addr -= PAGE_SIZE;
drivers/iommu/s390-iommu.c
906
dma_addr += PAGE_SIZE;
drivers/iommu/virtio-iommu.c
666
if (viommu_page_size > PAGE_SIZE) {
drivers/iommu/virtio-iommu.c
669
viommu_page_size, PAGE_SIZE);
drivers/ipack/devices/ipoctal.c
214
*pointer_write = *pointer_write % PAGE_SIZE;
drivers/ipack/devices/ipoctal.c
446
if (i <= (PAGE_SIZE - channel->nb_bytes)) {
drivers/ipack/devices/ipoctal.c
449
*pointer_read = (*pointer_read + 1) % PAGE_SIZE;
drivers/ipack/devices/ipoctal.c
486
return PAGE_SIZE - channel->nb_bytes;
drivers/irqchip/irq-gic-v3-its.c
151
#define PAGE_ORDER_TO_SIZE(o) (PAGE_SIZE << (o))
drivers/irqchip/irq-gic-v3-its.c
260
if (size >= PAGE_SIZE) {
drivers/irqchip/irq-gic-v3-its.c
275
gen_pool_add(itt_pool, (unsigned long)page_address(page), PAGE_SIZE, node);
drivers/irqchip/irq-gic-v3-its.c
286
if (size >= PAGE_SIZE) {
drivers/irqchip/irq-gic-v3-its.c
2993
np = DIV_ROUND_UP(npg * psz, PAGE_SIZE);
drivers/irqchip/irq-gic-v3-its.c
2997
page = its_alloc_pages(GFP_ATOMIC | __GFP_ZERO, get_order(np * PAGE_SIZE));
drivers/irqchip/irq-gic-v5-irs.c
271
switch (PAGE_SIZE) {
drivers/irqchip/irq-gic-v5-its.c
302
switch (PAGE_SIZE) {
drivers/leds/trigger/ledtrig-pattern.c
259
count += scnprintf(buf + count, PAGE_SIZE - count,
drivers/macintosh/macio_sysfs.c
38
return of_device_modalias(dev, buf, PAGE_SIZE);
drivers/md/bcache/bcache.h
768
#define btree_bytes(c) ((c)->btree_pages * PAGE_SIZE)
drivers/md/bcache/bset.c
270
return PAGE_SIZE << b->page_order;
drivers/md/bcache/bset.c
296
if (bset_prev_bytes(b) < PAGE_SIZE)
drivers/md/bcache/bset.c
302
if (bset_tree_bytes(b) < PAGE_SIZE)
drivers/md/bcache/bset.c
329
t->tree = bset_tree_bytes(b) < PAGE_SIZE
drivers/md/bcache/bset.c
335
t->prev = bset_prev_bytes(b) < PAGE_SIZE
drivers/md/bcache/bset.c
865
PAGE_SIZE << b->page_order);
drivers/md/bcache/bset.h
273
BUG_ON((PAGE_SIZE << b->page_order) <
drivers/md/bcache/bset.h
279
return ((PAGE_SIZE << b->page_order) -
drivers/md/bcache/btree.c
377
void *addr = (void *) ((unsigned long) i & ~(PAGE_SIZE - 1));
drivers/md/bcache/btree.c
381
memcpy(page_address(bv->bv_page), addr, PAGE_SIZE);
drivers/md/bcache/btree.c
382
addr += PAGE_SIZE;
drivers/md/bcache/btree.c
509
if (set_bytes(i) > PAGE_SIZE - 48 &&
drivers/md/bcache/debug.c
160
char buf[PAGE_SIZE];
drivers/md/bcache/debug.c
199
i->bytes = snprintf(i->buf, PAGE_SIZE, "%s\n", kbuf);
drivers/md/bcache/journal.c
84
bytes > PAGE_SIZE << JSET_BITS) {
drivers/md/bcache/super.c
56
#define BTREE_MAX_PAGES (256 * 1024 / PAGE_SIZE)
drivers/md/bcache/super.c
958
if (lim.logical_block_size > PAGE_SIZE && cached_bdev) {
drivers/md/bcache/super.c
965
PAGE_SIZE, bdev_logical_block_size(cached_bdev));
drivers/md/bcache/sysfs.c
1049
return bch_snprint_string_list(buf, PAGE_SIZE,
drivers/md/bcache/sysfs.c
180
return bch_snprint_string_list(buf, PAGE_SIZE,
drivers/md/bcache/sysfs.c
185
return bch_snprint_string_list(buf, PAGE_SIZE,
drivers/md/bcache/sysfs.c
190
return bch_snprint_string_list(buf, PAGE_SIZE,
drivers/md/bcache/sysfs.c
643
return snprintf(buf, PAGE_SIZE,
drivers/md/bcache/sysfs.c
766
return bch_snprint_string_list(buf, PAGE_SIZE, error_actions,
drivers/md/bcache/sysfs.c
801
return bch_print_cache_set_feature_compat(c, buf, PAGE_SIZE);
drivers/md/bcache/sysfs.c
803
return bch_print_cache_set_feature_ro_compat(c, buf, PAGE_SIZE);
drivers/md/bcache/sysfs.c
805
return bch_print_cache_set_feature_incompat(c, buf, PAGE_SIZE);
drivers/md/bcache/util.c
244
start: bv->bv_len = min_t(size_t, PAGE_SIZE - bv->bv_offset,
drivers/md/dm-bufio.c
1181
if (unlikely(c->block_size < PAGE_SIZE)) {
drivers/md/dm-bufio.c
2553
unsigned int align = min(1U << __ffs(block_size), (unsigned int)PAGE_SIZE);
drivers/md/dm-crypt.c
1669
unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
drivers/md/dm-crypt.c
1690
unsigned remaining_order = __fls((remaining_size + PAGE_SIZE - 1) >> PAGE_SHIFT);
drivers/md/dm-crypt.c
1718
size_to_add = min((unsigned)PAGE_SIZE << order, remaining_size);
drivers/md/dm-ebs-target.c
277
to_bytes(tmp1) > PAGE_SIZE) {
drivers/md/dm-flakey.c
438
nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
drivers/md/dm-flakey.c
457
unsigned remaining_order = __fls((remaining_size + PAGE_SIZE - 1) >> PAGE_SHIFT);
drivers/md/dm-flakey.c
470
size_to_add = min((unsigned)PAGE_SIZE << order, remaining_size);
drivers/md/dm-integrity.c
1063
pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
drivers/md/dm-integrity.c
1187
pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
drivers/md/dm-integrity.c
1836
alignment = dio->range.logical_sector | bio_sectors(bio) | (PAGE_SIZE >> SECTOR_SHIFT);
drivers/md/dm-integrity.c
1900
checksums = kmalloc((PAGE_SIZE >> SECTOR_SHIFT >> ic->sb->log2_sectors_per_block) * ic->tag_size + extra_space,
drivers/md/dm-integrity.c
1903
checksums = kmalloc(PAGE_SIZE, GFP_NOIO | __GFP_NORETRY | __GFP_NOWARN);
drivers/md/dm-integrity.c
1915
unsigned int max_size = likely(checksums != checksums_onstack) ? PAGE_SIZE : HASH_MAX_DIGESTSIZE;
drivers/md/dm-integrity.c
2535
const unsigned x_size = PAGE_SIZE << 1;
drivers/md/dm-integrity.c
2650
outgoing_data = dio->integrity_payload + PAGE_SIZE;
drivers/md/dm-integrity.c
4308
unsigned int start = 0, end = PAGE_SIZE;
drivers/md/dm-integrity.c
4430
PAGE_SIZE >> SECTOR_SHIFT) >> (PAGE_SHIFT - SECTOR_SHIFT);
drivers/md/dm-integrity.c
4514
sg_set_buf(&sg[i], va, PAGE_SIZE);
drivers/md/dm-integrity.c
4519
PAGE_SIZE * ic->journal_pages + sizeof(ic->commit_ids), crypt_iv);
drivers/md/dm-integrity.c
4942
if ((unsigned long)bi->metadata_size > PAGE_SIZE / 2) {
drivers/md/dm-integrity.c
5259
unsigned int n_bitmap_pages = DIV_ROUND_UP(ic->n_bitmap_blocks, PAGE_SIZE / BITMAP_BLOCK_SIZE);
drivers/md/dm-integrity.c
5293
pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
drivers/md/dm-integrity.c
608
page = bit / (PAGE_SIZE * 8);
drivers/md/dm-integrity.c
609
bit %= PAGE_SIZE * 8;
drivers/md/dm-integrity.c
611
end_page = end_bit / (PAGE_SIZE * 8);
drivers/md/dm-integrity.c
612
end_bit %= PAGE_SIZE * 8;
drivers/md/dm-integrity.c
616
this_end_bit = PAGE_SIZE * 8 - 1;
drivers/md/dm-integrity.c
663
if (!bit && this_end_bit == PAGE_SIZE * 8 - 1)
drivers/md/dm-integrity.c
693
unsigned int n_bitmap_pages = DIV_ROUND_UP(ic->n_bitmap_blocks, PAGE_SIZE / BITMAP_BLOCK_SIZE);
drivers/md/dm-integrity.c
738
*pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
drivers/md/dm-integrity.c
750
*n_sectors = (PAGE_SIZE - pl_offset) >> SECTOR_SHIFT;
drivers/md/dm-integrity.c
908
atomic_add(roundup(pl_offset + n_bytes, PAGE_SIZE) >> PAGE_SHIFT, &comp->in_flight);
drivers/md/dm-integrity.c
931
this_step = min(n_bytes, (size_t)PAGE_SIZE - pl_offset);
drivers/md/dm-io.c
193
*len = PAGE_SIZE - o;
drivers/md/dm-io.c
259
*len = PAGE_SIZE - dp->context_u;
drivers/md/dm-io.c
264
dp->context_ptr += PAGE_SIZE - dp->context_u;
drivers/md/dm-io.c
284
*len = PAGE_SIZE - dp->context_u;
drivers/md/dm-io.c
289
dp->context_ptr += PAGE_SIZE - dp->context_u;
drivers/md/dm-io.c
350
(PAGE_SIZE >> SECTOR_SHIFT)) + 1);
drivers/md/dm-kcopyd.c
591
unsigned int nr_pages = dm_div_up(job->dests[0].count, PAGE_SIZE >> 9);
drivers/md/dm-kcopyd.c
945
reserve_pages = DIV_ROUND_UP(kc->sub_job_size << SECTOR_SHIFT, PAGE_SIZE);
drivers/md/dm-log-writes.c
269
bio_pages = bio_max_segs(DIV_ROUND_UP(datalen, PAGE_SIZE));
drivers/md/dm-log-writes.c
281
pg_datalen = min_t(int, datalen, PAGE_SIZE);
drivers/md/dm-mpath.c
2042
if (WARN_ON_ONCE(read_size > PAGE_SIZE))
drivers/md/dm-pcache/backing_dev.c
193
len = min_t(size_t, PAGE_SIZE - offset, size);
drivers/md/dm-pcache/backing_dev.h
99
in_page = PAGE_SIZE - offset_in_page(p);
drivers/md/dm-raid.c
1382
(value && (value < MIN_FREE_RESHAPE_SPACE || value % to_sector(PAGE_SIZE)))) {
drivers/md/dm-raid.c
2551
if (rdev->sb_size < sizeof(struct dm_raid_superblock) || rdev->sb_size > PAGE_SIZE) {
drivers/md/dm-vdo/data-vio.c
788
BUILD_BUG_ON(VDO_BLOCK_SIZE > PAGE_SIZE);
drivers/md/dm-vdo/memory-alloc.c
185
return size <= PAGE_SIZE;
drivers/md/dm-vdo/memory-alloc.c
231
if (use_kmalloc(size) && (align < PAGE_SIZE)) {
drivers/md/dm-vdo/vio.c
222
bvec_count = DIV_ROUND_UP(offset + size, PAGE_SIZE);
drivers/md/dm-vdo/vio.c
228
int bytes = PAGE_SIZE - offset;
drivers/md/dm-verity-target.c
1489
num > PAGE_SIZE) {
drivers/md/dm-writecache.c
2376
wc->block_size < 512 || wc->block_size > PAGE_SIZE ||
drivers/md/dm-writecache.c
24
#define MAX_WRITEBACK_JOBS min(0x10000000 / PAGE_SIZE, totalram_pages() / 16)
drivers/md/dm-writecache.c
277
if (offset & (PAGE_SIZE / 512 - 1)) {
drivers/md/dm-writecache.c
35
#if BITMAP_GRANULARITY < PAGE_SIZE
drivers/md/dm-writecache.c
37
#define BITMAP_GRANULARITY PAGE_SIZE
drivers/md/dm-writecache.c
372
return (unsigned long)addr & (PAGE_SIZE - 1);
drivers/md/dm.c
128
#define DEFAULT_SWAP_BIOS (8 * 1048576 / PAGE_SIZE)
drivers/md/md-bitmap.c
101
#define PAGE_BITS (PAGE_SIZE << 3)
drivers/md/md-bitmap.c
1025
num_pages = DIV_ROUND_UP(bytes, PAGE_SIZE);
drivers/md/md-bitmap.c
1358
node_offset = bitmap->cluster_slot * (DIV_ROUND_UP(store->bytes, PAGE_SIZE));
drivers/md/md-bitmap.c
1366
count = store->bytes - i * PAGE_SIZE;
drivers/md/md-bitmap.c
1368
count = PAGE_SIZE;
drivers/md/md-bitmap.c
1396
memset(paddr + offset, 0xff, PAGE_SIZE - offset);
drivers/md/md-bitmap.c
1818
while (*blocks < (PAGE_SIZE>>9)) {
drivers/md/md-bitmap.c
286
mappage = kzalloc(PAGE_SIZE, GFP_NOIO);
drivers/md/md-bitmap.c
352
index * (PAGE_SIZE / SECTOR_SIZE);
drivers/md/md-bitmap.c
436
sector_t ps = pg_index * PAGE_SIZE / SECTOR_SIZE;
drivers/md/md-bitmap.c
437
unsigned int size = PAGE_SIZE;
drivers/md/md-bitmap.c
438
unsigned int opt_size = PAGE_SIZE;
drivers/md/md-bitmap.c
444
unsigned int last_page_size = store->bytes & (PAGE_SIZE - 1);
drivers/md/md-bitmap.c
447
last_page_size = PAGE_SIZE;
drivers/md/md-bitmap.c
459
sboff < (doff + mddev->dev_sectors + PAGE_SIZE / SECTOR_SIZE))
drivers/md/md-bitmap.c
564
pr_debug("read bitmap file (%dB @ %llu)\n", (int)PAGE_SIZE,
drivers/md/md-bitmap.c
612
(int)PAGE_SIZE,
drivers/md/md-bitmap.c
861
int bytes = isize > PAGE_SIZE ? PAGE_SIZE : isize;
drivers/md/md-llbitmap.c
468
if (sync_page_io(rdev, sector, PAGE_SIZE, page, REQ_OP_READ,
drivers/md/md-llbitmap.c
543
BITMAP_DATA_OFFSET, PAGE_SIZE);
drivers/md/md-llbitmap.c
936
end = min(llbitmap->chunks, PAGE_SIZE - BITMAP_DATA_OFFSET) - 1;
drivers/md/md-llbitmap.c
944
end = min(end + PAGE_SIZE, llbitmap->chunks - 1);
drivers/md/md-llbitmap.c
991
llbitmap->blocks_per_page = PAGE_SIZE / llbitmap->io_size;
drivers/md/md.c
1901
if (sectors > (PAGE_SIZE / 512))
drivers/md/md.c
2288
memset(bbp, 0xff, PAGE_SIZE);
drivers/md/md.c
2451
stats.file_pages * (PAGE_SIZE >> 9) > new_offset)
drivers/md/md.c
6183
if (lim->logical_block_size > PAGE_SIZE) {
drivers/md/md.c
9632
window = 32 * (PAGE_SIZE / 512);
drivers/md/raid0.c
746
if ((chunksect << 9) < PAGE_SIZE)
drivers/md/raid1-10.c
4
#define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
drivers/md/raid1-10.c
99
int len = min_t(int, size, PAGE_SIZE);
drivers/md/raid1.c
1223
unsigned vcnt = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
drivers/md/raid1.c
1238
int len = min_t(int, PAGE_SIZE, size);
drivers/md/raid1.c
1602
BIO_MAX_VECS * (PAGE_SIZE >> 9));
drivers/md/raid1.c
2156
if (s > (PAGE_SIZE>>9))
drivers/md/raid1.c
2157
s = PAGE_SIZE >> 9;
drivers/md/raid1.c
2258
vcnt = (r1_bio->sectors + PAGE_SIZE / 512 - 1) >> (PAGE_SHIFT - 9);
drivers/md/raid1.c
2409
if (s > (PAGE_SIZE>>9))
drivers/md/raid1.c
2410
s = PAGE_SIZE >> 9;
drivers/md/raid1.c
2984
int len = PAGE_SIZE;
drivers/md/raid10.c
2365
vcnt = (r10_bio->sectors + (PAGE_SIZE >> 9) - 1) >> (PAGE_SHIFT - 9);
drivers/md/raid10.c
2389
int len = PAGE_SIZE;
drivers/md/raid10.c
2491
if (s > (PAGE_SIZE>>9))
drivers/md/raid10.c
2492
s = PAGE_SIZE >> 9;
drivers/md/raid10.c
2641
if (s > (PAGE_SIZE>>9))
drivers/md/raid10.c
2642
s = PAGE_SIZE >> 9;
drivers/md/raid10.c
3629
int len = PAGE_SIZE;
drivers/md/raid10.c
3788
if (chunk < (PAGE_SIZE >> 9) ||
drivers/md/raid10.c
3844
mdname(mddev), PAGE_SIZE);
drivers/md/raid10.c
4793
for (s = 0 ; s < max_sectors; s += PAGE_SIZE >> 9) {
drivers/md/raid10.c
4794
struct page *page = pages[s / (PAGE_SIZE >> 9)];
drivers/md/raid10.c
4796
if (len > PAGE_SIZE)
drivers/md/raid10.c
4797
len = PAGE_SIZE;
drivers/md/raid10.c
4936
if (s > (PAGE_SIZE >> 9))
drivers/md/raid10.c
4937
s = PAGE_SIZE >> 9;
drivers/md/raid10.c
4948
addr = r10b->devs[slot].addr + idx * PAGE_SIZE;
drivers/md/raid5-cache.c
1024
addr, PAGE_SIZE);
drivers/md/raid5-cache.c
1679
__bio_add_page(&bio, ctx->ra_pool[ctx->valid_pages], PAGE_SIZE,
drivers/md/raid5-cache.c
1718
PAGE_SIZE);
drivers/md/raid5-cache.c
1744
crc = crc32c(log->uuid_checksum, mb, PAGE_SIZE);
drivers/md/raid5-cache.c
1748
if (le32_to_cpu(mb->meta_size) > PAGE_SIZE)
drivers/md/raid5-cache.c
1783
mb->checksum = cpu_to_le32(crc32c(log->uuid_checksum, mb, PAGE_SIZE));
drivers/md/raid5-cache.c
1784
if (!sync_page_io(log->rdev, pos, PAGE_SIZE, page, REQ_OP_WRITE |
drivers/md/raid5-cache.c
1891
sync_page_io(rdev, sh->sector, PAGE_SIZE,
drivers/md/raid5-cache.c
1899
sync_page_io(rrdev, sh->sector, PAGE_SIZE,
drivers/md/raid5-cache.c
1978
checksum = crc32c(log->uuid_checksum, addr, PAGE_SIZE);
drivers/md/raid5-cache.c
2382
PAGE_SIZE));
drivers/md/raid5-cache.c
2384
sync_page_io(log->rdev, write_pos, PAGE_SIZE,
drivers/md/raid5-cache.c
2395
mb, PAGE_SIZE));
drivers/md/raid5-cache.c
2396
sync_page_io(log->rdev, ctx->pos, PAGE_SIZE, page,
drivers/md/raid5-cache.c
2536
page, PAGE_SIZE, "[%s] %s\n",
drivers/md/raid5-cache.c
2542
page, PAGE_SIZE, "%s [%s]\n",
drivers/md/raid5-cache.c
2888
addr, PAGE_SIZE);
drivers/md/raid5-cache.c
2958
if (!sync_page_io(rdev, cp, PAGE_SIZE, page, REQ_OP_READ, false)) {
drivers/md/raid5-cache.c
2971
expected_crc = crc32c(log->uuid_checksum, mb, PAGE_SIZE);
drivers/md/raid5-cache.c
3056
if (PAGE_SIZE != 4096)
drivers/md/raid5-cache.c
3068
conf->raid_disks) > PAGE_SIZE) {
drivers/md/raid5-cache.c
717
crc = crc32c(log->uuid_checksum, block, PAGE_SIZE);
drivers/md/raid5-cache.c
793
__bio_add_page(io->current_bio, io->meta_page, PAGE_SIZE, 0);
drivers/md/raid5-cache.c
807
log->current_io->meta_offset + payload_size > PAGE_SIZE)
drivers/md/raid5-cache.c
853
if (!bio_add_page(io->current_bio, page, PAGE_SIZE, 0))
drivers/md/raid5-ppl.c
1055
pplhdr->checksum = cpu_to_le32(~crc32c(~0, pplhdr, PAGE_SIZE));
drivers/md/raid5-ppl.c
1096
pplhdr_offset, PAGE_SIZE, page, REQ_OP_READ,
drivers/md/raid5-ppl.c
1109
crc = ~crc32c(~0, pplhdr, PAGE_SIZE);
drivers/md/raid5-ppl.c
1326
if (PAGE_SIZE != 4096)
drivers/md/raid5-ppl.c
1498
if (len >= PAGE_SIZE)
drivers/md/raid5-ppl.c
196
tx = async_memcpy(sh->ppl_page, srcs[0], 0, 0, PAGE_SIZE,
drivers/md/raid5-ppl.c
199
tx = async_xor(sh->ppl_page, srcs, 0, count, PAGE_SIZE,
drivers/md/raid5-ppl.c
347
le32_add_cpu(&e->pp_size, PAGE_SIZE);
drivers/md/raid5-ppl.c
348
io->pp_size += PAGE_SIZE;
drivers/md/raid5-ppl.c
351
PAGE_SIZE));
drivers/md/raid5-ppl.c
468
__bio_add_page(bio, io->header_page, PAGE_SIZE, 0);
drivers/md/raid5-ppl.c
492
if (!bio_add_page(bio, sh->ppl_page, PAGE_SIZE, 0)) {
drivers/md/raid5-ppl.c
499
__bio_add_page(bio, sh->ppl_page, PAGE_SIZE, 0);
drivers/md/raid5-ppl.c
851
memset(page_address(page1), 0, PAGE_SIZE);
drivers/md/raid5-ppl.c
992
int s = pp_size > PAGE_SIZE ? PAGE_SIZE : pp_size;
drivers/md/raid5.c
2341
#if PAGE_SIZE != DEFAULT_STRIPE_SIZE
drivers/md/raid5.c
2373
#if PAGE_SIZE != DEFAULT_STRIPE_SIZE
drivers/md/raid5.c
2586
#if PAGE_SIZE != DEFAULT_STRIPE_SIZE
drivers/md/raid5.c
2644
#if PAGE_SIZE != DEFAULT_STRIPE_SIZE
drivers/md/raid5.c
466
#if PAGE_SIZE != DEFAULT_STRIPE_SIZE
drivers/md/raid5.c
513
cnt = PAGE_SIZE / conf->stripe_size;
drivers/md/raid5.c
530
#if PAGE_SIZE == DEFAULT_STRIPE_SIZE
drivers/md/raid5.c
553
#if PAGE_SIZE == DEFAULT_STRIPE_SIZE
drivers/md/raid5.c
6900
if (len >= PAGE_SIZE)
drivers/md/raid5.c
6941
if (len >= PAGE_SIZE)
drivers/md/raid5.c
6978
#if PAGE_SIZE != DEFAULT_STRIPE_SIZE
drivers/md/raid5.c
6987
if (len >= PAGE_SIZE)
drivers/md/raid5.c
6998
new > PAGE_SIZE || new == 0 ||
drivers/md/raid5.c
7075
if (len >= PAGE_SIZE)
drivers/md/raid5.c
7120
if (len >= PAGE_SIZE)
drivers/md/raid5.c
7190
if (len >= PAGE_SIZE)
drivers/md/raid5.c
7493
(mddev->new_chunk_sectors << 9) % PAGE_SIZE ||
drivers/md/raid5.c
7504
#if PAGE_SIZE != DEFAULT_STRIPE_SIZE
drivers/md/raid5.c
7575
conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL);
drivers/md/raid5.c
7667
max_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024;
drivers/md/raid5.c
8794
if (new_chunk < (PAGE_SIZE>>9))
drivers/md/raid5.c
8828
if (new_chunk < (PAGE_SIZE >> 9))
drivers/md/raid5.h
252
#if PAGE_SIZE != DEFAULT_STRIPE_SIZE
drivers/md/raid5.h
483
#if PAGE_SIZE == DEFAULT_STRIPE_SIZE
drivers/md/raid5.h
484
#define STRIPE_SIZE PAGE_SIZE
drivers/md/raid5.h
491
#define NR_HASH (PAGE_SIZE / sizeof(struct hlist_head))
drivers/md/raid5.h
581
#if PAGE_SIZE != DEFAULT_STRIPE_SIZE
drivers/md/raid5.h
697
#if PAGE_SIZE == DEFAULT_STRIPE_SIZE
drivers/md/raid5.h
785
#if PAGE_SIZE != DEFAULT_STRIPE_SIZE
drivers/media/cec/core/cec-core.c
184
buf = memdup_user_nul(ubuf, min_t(size_t, PAGE_SIZE, count));
drivers/media/common/saa7146/saa7146_core.c
148
for (i = 0; i < nr_pages; i++, virt += PAGE_SIZE) {
drivers/media/common/saa7146/saa7146_core.c
154
sg_set_page(&sglist[i], pg, PAGE_SIZE, 0);
drivers/media/common/saa7146/saa7146_core.c
168
int pages = (length+PAGE_SIZE-1)/PAGE_SIZE;
drivers/media/common/saa7146/saa7146_core.c
226
cpu = dma_alloc_coherent(&pci->dev, PAGE_SIZE, &dma_addr, GFP_KERNEL);
drivers/media/common/saa7146/saa7146_core.c
230
pt->size = PAGE_SIZE;
drivers/media/common/saa7146/saa7146_core.c
246
WARN_ON(list->offset > PAGE_SIZE))
drivers/media/common/saa7146/saa7146_video.c
112
m1 = ((size + PAGE_SIZE) / PAGE_SIZE) - 1;
drivers/media/common/saa7146/saa7146_video.c
113
m2 = ((size + (size / 4) + PAGE_SIZE) / PAGE_SIZE) - 1;
drivers/media/common/saa7146/saa7146_video.c
114
m3 = ((size + (size / 2) + PAGE_SIZE) / PAGE_SIZE) - 1;
drivers/media/common/saa7146/saa7146_video.c
115
o1 = size % PAGE_SIZE;
drivers/media/common/saa7146/saa7146_video.c
116
o2 = (size + (size / 4)) % PAGE_SIZE;
drivers/media/common/saa7146/saa7146_video.c
123
m1 = ((size + PAGE_SIZE) / PAGE_SIZE) - 1;
drivers/media/common/saa7146/saa7146_video.c
124
m2 = ((size + (size / 2) + PAGE_SIZE) / PAGE_SIZE) - 1;
drivers/media/common/saa7146/saa7146_video.c
125
m3 = ((2 * size + PAGE_SIZE) / PAGE_SIZE) - 1;
drivers/media/common/saa7146/saa7146_video.c
126
o1 = size % PAGE_SIZE;
drivers/media/common/saa7146/saa7146_video.c
127
o2 = (size + (size / 2)) % PAGE_SIZE;
drivers/media/common/saa7146/saa7146_video.c
409
if (f->fmt.pix.bytesperline > (2 * PAGE_SIZE * fmt->depth) / 8) /* arbitrary constraint */
drivers/media/common/siano/smscoreapi.c
930
msg = kmalloc(PAGE_SIZE, GFP_KERNEL | coredev->gfp_buf_flags);
drivers/media/common/siano/smsdvb-debugfs.c
27
char stats_data[PAGE_SIZE];
drivers/media/common/videobuf2/videobuf2-dma-sg.c
73
if ((PAGE_SIZE << order) > size)
drivers/media/common/videobuf2/videobuf2-dma-sg.c
95
size -= PAGE_SIZE << order;
drivers/media/common/videobuf2/videobuf2-vmalloc.c
217
int num_pages = PAGE_ALIGN(buf->size) / PAGE_SIZE;
drivers/media/common/videobuf2/videobuf2-vmalloc.c
242
sg_set_page(sg, page, PAGE_SIZE, 0);
drivers/media/common/videobuf2/videobuf2-vmalloc.c
243
vaddr += PAGE_SIZE;
drivers/media/firewire/firedtv-fw.c
72
#define PACKETS_PER_PAGE (PAGE_SIZE / MAX_PACKET_SIZE)
drivers/media/i2c/ccs/ccs-core.c
2741
rval = ccs_read_nvm(sensor, buf, PAGE_SIZE);
drivers/media/i2c/et8ek8/et8ek8_driver.c
1249
#if PAGE_SIZE < ET8EK8_PRIV_MEM_SIZE
drivers/media/pci/bt8xx/bt878.c
94
bt->risc_size = PAGE_SIZE;
drivers/media/pci/bt8xx/bttv-driver.c
3599
gbufsize = (gbufsize + PAGE_SIZE - 1) & PAGE_MASK;
drivers/media/pci/bt8xx/bttv-risc.c
135
/ PAGE_SIZE) + ylines;
drivers/media/pci/bt8xx/bttv-risc.c
446
if ((rc = btcx_riscmem_alloc(btv->c.pci,&btv->main,PAGE_SIZE)) < 0)
drivers/media/pci/bt8xx/bttv-risc.c
52
/ PAGE_SIZE + store_lines) * 8;
drivers/media/pci/cobalt/cobalt-v4l2.c
57
(COBALT_MAX_WIDTH * COBALT_MAX_BPP) / PAGE_SIZE + 2;
drivers/media/pci/cobalt/cobalt-v4l2.c
60
const size_t audio_bytes = ((1920 * 4) / PAGE_SIZE + 1) * 0x20;
drivers/media/pci/cx23885/cx23885-alsa.c
96
pg = vmalloc_to_page(buf->vaddr + i * PAGE_SIZE);
drivers/media/pci/cx23885/cx23885-alsa.c
99
sg_set_page(&buf->sglist[i], pg, PAGE_SIZE, 0);
drivers/media/pci/cx23885/cx23885-core.c
1218
/ PAGE_SIZE + lines);
drivers/media/pci/cx23885/cx23885-core.c
1255
instructions = 1 + (bpl * lines) / PAGE_SIZE + lines;
drivers/media/pci/cx23885/cx23885-core.c
1295
/ PAGE_SIZE + lines);
drivers/media/pci/cx25821/cx25821-alsa.c
159
pg = vmalloc_to_page(buf->vaddr + i * PAGE_SIZE);
drivers/media/pci/cx25821/cx25821-alsa.c
162
sg_set_page(&buf->sglist[i], pg, PAGE_SIZE, 0);
drivers/media/pci/cx25821/cx25821-core.c
1072
instructions = fields * (1 + ((bpl + padding) * lines) / PAGE_SIZE +
drivers/media/pci/cx25821/cx25821-core.c
1176
instructions = 1 + (bpl * lines) / PAGE_SIZE + lines;
drivers/media/pci/cx88/cx88-alsa.c
298
pg = vmalloc_to_page(buf->vaddr + i * PAGE_SIZE);
drivers/media/pci/cx88/cx88-alsa.c
301
sg_set_page(&buf->sglist[i], pg, PAGE_SIZE, 0);
drivers/media/pci/cx88/cx88-core.c
151
PAGE_SIZE + lines);
drivers/media/pci/cx88/cx88-core.c
190
instructions = 1 + (bpl * lines) / PAGE_SIZE + lines;
drivers/media/pci/intel/ipu3/ipu3-cio2.c
117
dma_free_coherent(dev, PAGE_SIZE, cio2->dummy_lop,
drivers/media/pci/intel/ipu3/ipu3-cio2.c
122
dma_free_coherent(dev, PAGE_SIZE, cio2->dummy_page,
drivers/media/pci/intel/ipu3/ipu3-cio2.c
133
cio2->dummy_page = dma_alloc_coherent(dev, PAGE_SIZE,
drivers/media/pci/intel/ipu3/ipu3-cio2.c
136
cio2->dummy_lop = dma_alloc_coherent(dev, PAGE_SIZE,
drivers/media/pci/intel/ipu3/ipu3-cio2.c
181
entry[1].second_entry.last_page_available_bytes = PAGE_SIZE - 1;
drivers/media/pci/intel/ipu3/ipu3-cio2.c
210
remaining = offset_in_page(remaining) ?: PAGE_SIZE;
drivers/media/pci/intel/ipu3/ipu3-cio2.c
217
remaining -= CIO2_LOP_ENTRIES * PAGE_SIZE;
drivers/media/pci/intel/ipu3/ipu3-cio2.c
867
b->lop[i] = dma_alloc_coherent(dev, PAGE_SIZE,
drivers/media/pci/intel/ipu3/ipu3-cio2.c
897
dma_free_coherent(dev, PAGE_SIZE, b->lop[i], b->lop_bus_addr[i]);
drivers/media/pci/intel/ipu3/ipu3-cio2.c
987
dma_free_coherent(dev, PAGE_SIZE,
drivers/media/pci/intel/ipu3/ipu3-cio2.h
39
#define CIO2_MAX_BUFFERS (PAGE_SIZE / 16 / CIO2_MAX_LOPS)
drivers/media/pci/intel/ipu3/ipu3-cio2.h
40
#define CIO2_LOP_ENTRIES (PAGE_SIZE / sizeof(u32))
drivers/media/pci/intel/ipu6/ipu6-buttress.c
569
addr += PAGE_SIZE;
drivers/media/pci/intel/ipu6/ipu6-dma.c
107
__clear_buffer(pages[i], PAGE_SIZE, attrs);
drivers/media/pci/intel/ipu6/ipu6-dma.c
187
PAGE_SIZE, DMA_BIDIRECTIONAL,
drivers/media/pci/intel/ipu6/ipu6-dma.c
198
PAGE_SIZE);
drivers/media/pci/intel/ipu6/ipu6-dma.c
203
PAGE_SIZE, DMA_BIDIRECTIONAL,
drivers/media/pci/intel/ipu6/ipu6-dma.c
227
dma_unmap_page_attrs(&pdev->dev, pci_dma_addr, PAGE_SIZE,
drivers/media/pci/intel/ipu6/ipu6-dma.c
230
ipu6_mmu_unmap(mmu->dmap->mmu_info, ipu6_iova, PAGE_SIZE);
drivers/media/pci/intel/ipu6/ipu6-dma.c
280
dma_unmap_page_attrs(&pdev->dev, pci_dma_addr, PAGE_SIZE,
drivers/media/pci/intel/ipu6/ipu6-dma.c
87
__clear_buffer(pages[i], PAGE_SIZE << order, attrs);
drivers/media/pci/intel/ipu6/ipu6-mmu.c
132
dma = dma_map_single(mmu_info->dev, ptr, PAGE_SIZE, DMA_BIDIRECTIONAL);
drivers/media/pci/intel/ipu6/ipu6-mmu.c
169
PAGE_SIZE, DMA_BIDIRECTIONAL);
drivers/media/pci/intel/ipu6/ipu6-mmu.c
207
PAGE_SIZE, DMA_BIDIRECTIONAL);
drivers/media/pci/intel/ipu6/ipu6-mmu.c
432
PAGE_SIZE, DMA_BIDIRECTIONAL);
drivers/media/pci/intel/ipu6/ipu6-mmu.c
448
mmu->pci_trash_page, PAGE_SIZE);
drivers/media/pci/intel/ipu6/ipu6-mmu.c
467
PAGE_SIZE, DMA_BIDIRECTIONAL);
drivers/media/pci/intel/ipu6/ipu6-mmu.c
725
PAGE_SIZE, DMA_BIDIRECTIONAL);
drivers/media/pci/intel/ipu6/ipu6-mmu.c
734
PAGE_SIZE, DMA_BIDIRECTIONAL);
drivers/media/pci/intel/ipu6/ipu6-mmu.c
742
PAGE_SIZE, DMA_BIDIRECTIONAL);
drivers/media/pci/intel/ipu6/ipu6.h
164
#define IPU6_MMUV2_L2_RANGE (1024 * PAGE_SIZE)
drivers/media/pci/intel/ipu6/ipu6.h
172
#define MMUV2_TRASH_L1_BLOCK_OFFSET (MMUV2_ENTRIES_PER_L1_BLOCK * PAGE_SIZE)
drivers/media/pci/ivtv/ivtv-driver.h
81
#define IVTV_DMA_SG_OSD_ENT (2883584/PAGE_SIZE) /* sg entities */
drivers/media/pci/ivtv/ivtv-udma.c
38
dma_page->tail : PAGE_SIZE - offset;
drivers/media/pci/mantis/mantis_dma.c
44
#define MANTIS_RISC_SIZE PAGE_SIZE /* RISC program must fit here. */
drivers/media/pci/ngene/ngene.h
490
(RING_SIZE_VIDEO * PAGE_SIZE * 2) + \
drivers/media/pci/ngene/ngene.h
491
(RING_SIZE_AUDIO * PAGE_SIZE * 2) + \
drivers/media/pci/ngene/ngene.h
492
(RING_SIZE_TS * PAGE_SIZE * 4) + \
drivers/media/pci/ngene/ngene.h
493
8 * PAGE_SIZE + OVERFLOW_BUFFER_SIZE + PAGE_SIZE)
drivers/media/pci/pt3/pt3.h
67
#define DESCS_IN_PAGE (PAGE_SIZE / sizeof(struct xfer_desc))
drivers/media/pci/pt3/pt3_dma.c
139
dma_free_coherent(&pt3->pdev->dev, PAGE_SIZE,
drivers/media/pci/pt3/pt3_dma.c
174
p = dma_alloc_coherent(&pt3->pdev->dev, PAGE_SIZE,
drivers/media/pci/saa7134/saa7134-alsa.c
280
pg = vmalloc_to_page(dma->vaddr + i * PAGE_SIZE);
drivers/media/pci/saa7134/saa7134-alsa.c
283
sg_set_page(&dma->sglist[i], pg, PAGE_SIZE, 0);
drivers/media/pci/saa7134/saa7134-alsa.c
349
(dev->dmasound.bufsize + PAGE_SIZE) >> PAGE_SHIFT);
drivers/media/pci/saa7134/saa7134-core.c
182
size += PAGE_SIZE; /* for non-page-aligned buffers */
drivers/media/pci/saa7134/saa7134-go7007.c
232
saa->bottom_dma, PAGE_SIZE, DMA_FROM_DEVICE);
drivers/media/pci/saa7134/saa7134-go7007.c
233
go7007_parse_video_stream(go, saa->bottom, PAGE_SIZE);
drivers/media/pci/saa7134/saa7134-go7007.c
237
saa->top_dma, PAGE_SIZE, DMA_FROM_DEVICE);
drivers/media/pci/saa7134/saa7134-go7007.c
238
go7007_parse_video_stream(go, saa->top, PAGE_SIZE);
drivers/media/pci/saa7134/saa7134-go7007.c
249
0, PAGE_SIZE, DMA_FROM_DEVICE);
drivers/media/pci/saa7134/saa7134-go7007.c
254
0, PAGE_SIZE, DMA_FROM_DEVICE);
drivers/media/pci/saa7134/saa7134-go7007.c
256
dma_unmap_page(&dev->pci->dev, saa->top_dma, PAGE_SIZE,
drivers/media/pci/saa7134/saa7134-go7007.c
279
saa_writeb(SAA7134_TS_DMA0, ((PAGE_SIZE >> 7) - 1) & 0xff);
drivers/media/pci/saa7134/saa7134-go7007.c
280
saa_writeb(SAA7134_TS_DMA1, (PAGE_SIZE >> 15) & 0xff);
drivers/media/pci/saa7134/saa7134-go7007.c
281
saa_writeb(SAA7134_TS_DMA2, (PAGE_SIZE >> 31) & 0x3f);
drivers/media/pci/saa7134/saa7134-go7007.c
322
dma_unmap_page(&dev->pci->dev, saa->top_dma, PAGE_SIZE,
drivers/media/pci/saa7134/saa7134-go7007.c
324
dma_unmap_page(&dev->pci->dev, saa->bottom_dma, PAGE_SIZE,
drivers/media/pci/saa7134/saa7134-video.c
1647
gbufsize = (gbufsize + PAGE_SIZE - 1) & PAGE_MASK;
drivers/media/pci/saa7164/saa7164-dvb.c
361
((SAA7164_TS_NUMBER_OF_LINES * 188) / PAGE_SIZE);
drivers/media/pci/saa7164/saa7164-encoder.c
117
((SAA7164_PS_NUMBER_OF_LINES * 128) / PAGE_SIZE);
drivers/media/pci/saa7164/saa7164-encoder.c
128
((SAA7164_TS_NUMBER_OF_LINES * 188) / PAGE_SIZE);
drivers/media/pci/saa7164/saa7164-vbi.c
83
((params->numberoflines * params->pitch) / PAGE_SIZE);
drivers/media/pci/tw68/tw68-risc.c
152
PAGE_SIZE) + lines) + 4;
drivers/media/pci/tw68/tw68-video.c
349
maxcount = (4 * 1024 * 1024) / roundup(size, PAGE_SIZE);
drivers/media/platform/amphion/vpu_v4l2.c
210
fmt->sizeimage[i] = max_t(u32, fmt->sizeimage[i], PAGE_SIZE);
drivers/media/platform/broadcom/bcm2835-unicam.c
107
#define UNICAM_DUMMY_BUF_SIZE PAGE_SIZE
drivers/media/platform/chips-media/coda/coda-common.c
589
width * height * 2), PAGE_SIZE);
drivers/media/platform/marvell/mcam-core.c
1224
int ndesc = cam->pix_format.sizeimage/PAGE_SIZE + 1;
drivers/media/platform/marvell/mcam-core.c
1258
int ndesc = cam->pix_format.sizeimage/PAGE_SIZE + 1;
drivers/media/platform/renesas/rcar_drif.c
119
#define RCAR_DRIF_DEFAULT_HWBUF_SIZE (4 * PAGE_SIZE)
drivers/media/platform/rockchip/rga/rga-buf.c
86
n_desc = DIV_ROUND_UP(f->size, PAGE_SIZE);
drivers/media/platform/rockchip/rga/rga-hw.c
437
PAGE_SIZE, DMA_BIDIRECTIONAL);
drivers/media/platform/samsung/exynos4-is/media-dev.c
1235
return strscpy(buf, "Sub-device API (sub-dev)\n", PAGE_SIZE);
drivers/media/platform/samsung/exynos4-is/media-dev.c
1237
return strscpy(buf, "V4L2 video node only API (vid-dev)\n", PAGE_SIZE);
drivers/media/platform/samsung/s5p-jpeg/jpeg-core.c
1451
pix->sizeimage = PAGE_SIZE;
drivers/media/platform/samsung/s5p-jpeg/jpeg-core.c
1573
padding = PAGE_SIZE;
drivers/media/platform/ti/omap/omap_voutlib.c
318
addr += PAGE_SIZE;
drivers/media/platform/ti/omap/omap_voutlib.c
319
size -= PAGE_SIZE;
drivers/media/platform/ti/omap/omap_voutlib.c
339
addr += PAGE_SIZE;
drivers/media/platform/ti/omap/omap_voutlib.c
340
size -= PAGE_SIZE;
drivers/media/rc/imon.c
810
strscpy(buf, "associating\n", PAGE_SIZE);
drivers/media/rc/imon.c
812
strscpy(buf, "closed\n", PAGE_SIZE);
drivers/media/rc/nuvoton-cir.c
226
buf_len += scnprintf(buf + buf_len, PAGE_SIZE - buf_len,
drivers/media/rc/nuvoton-cir.c
229
buf_len += scnprintf(buf + buf_len, PAGE_SIZE - buf_len, "\n");
drivers/media/usb/go7007/go7007-priv.h
127
#define GO7007_BUF_PAGES (128 * 1024 / PAGE_SIZE)
drivers/media/usb/gspca/vicam.c
239
firmware_buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
drivers/media/usb/gspca/vicam.c
246
if (len > PAGE_SIZE) {
drivers/media/usb/pvrusb2/pvrusb2-sysfs.c
142
buf, PAGE_SIZE - 1, &cnt);
drivers/media/usb/pvrusb2/pvrusb2-sysfs.c
161
buf, PAGE_SIZE - 1, &cnt);
drivers/media/usb/pvrusb2/pvrusb2-sysfs.c
180
buf, PAGE_SIZE - 1, &cnt);
drivers/media/usb/pvrusb2/pvrusb2-sysfs.c
199
PAGE_SIZE - bcnt, &ccnt);
drivers/media/usb/pvrusb2/pvrusb2-sysfs.c
202
if (bcnt >= PAGE_SIZE) break;
drivers/media/usb/pvrusb2/pvrusb2-sysfs.c
225
PAGE_SIZE - bcnt, &ccnt);
drivers/media/usb/pvrusb2/pvrusb2-sysfs.c
227
if (bcnt >= PAGE_SIZE) break;
drivers/media/usb/pvrusb2/pvrusb2-sysfs.c
783
return pvr2_debugifc_print_info(sfp->channel.hdw,buf,PAGE_SIZE);
drivers/media/usb/pvrusb2/pvrusb2-sysfs.c
793
return pvr2_debugifc_print_status(sfp->channel.hdw,buf,PAGE_SIZE);
drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
1069
tbuf = kmalloc(PAGE_SIZE,GFP_KERNEL);
drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
1074
if (c1 > PAGE_SIZE) c1 = PAGE_SIZE;
drivers/memstick/core/ms_block.c
1738
if ((size_t)msb->page_size > PAGE_SIZE) {
drivers/memstick/core/mspro_block.c
259
if (PAGE_SIZE - rc)
drivers/memstick/host/jmb38x_ms.c
322
p_cnt = PAGE_SIZE - p_off;
drivers/memstick/host/r592.c
786
dev->dummy_dma_page = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
drivers/memstick/host/r592.c
806
dma_free_coherent(&pdev->dev, PAGE_SIZE, dev->dummy_dma_page,
drivers/memstick/host/r592.c
840
dma_free_coherent(&pdev->dev, PAGE_SIZE, dev->dummy_dma_page,
drivers/memstick/host/tifm_ms.c
206
p_cnt = PAGE_SIZE - p_off;
drivers/message/fusion/mptscsih.c
3038
return snprintf(buf, PAGE_SIZE, "%02d.%02d.%02d.%02d\n",
drivers/message/fusion/mptscsih.c
3054
return snprintf(buf, PAGE_SIZE, "%02x.%02x.%02x.%02x\n",
drivers/message/fusion/mptscsih.c
3070
return snprintf(buf, PAGE_SIZE, "%03x\n", ioc->facts.MsgVersion);
drivers/message/fusion/mptscsih.c
3083
return snprintf(buf, PAGE_SIZE, "%s\n", ioc->prod_name);
drivers/message/fusion/mptscsih.c
3097
return snprintf(buf, PAGE_SIZE, "%02xh\n",
drivers/message/fusion/mptscsih.c
3111
return snprintf(buf, PAGE_SIZE, "%02xh\n",ioc->nvdata_version_default);
drivers/message/fusion/mptscsih.c
3124
return snprintf(buf, PAGE_SIZE, "%s\n", ioc->board_name);
drivers/message/fusion/mptscsih.c
3136
return snprintf(buf, PAGE_SIZE, "%s\n", ioc->board_assembly);
drivers/message/fusion/mptscsih.c
3149
return snprintf(buf, PAGE_SIZE, "%s\n", ioc->board_tracer);
drivers/message/fusion/mptscsih.c
3162
return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->io_missing_delay);
drivers/message/fusion/mptscsih.c
3175
return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->device_missing_delay);
drivers/message/fusion/mptscsih.c
3188
return snprintf(buf, PAGE_SIZE, "%08xh\n", ioc->debug_level);
drivers/mfd/aat2870-core.c
224
count += snprintf(buf + count, PAGE_SIZE - count, "0x%02x: ", addr);
drivers/mfd/aat2870-core.c
225
if (count >= PAGE_SIZE - 1)
drivers/mfd/aat2870-core.c
230
count += snprintf(buf + count, PAGE_SIZE - count,
drivers/mfd/aat2870-core.c
233
count += snprintf(buf + count, PAGE_SIZE - count,
drivers/mfd/aat2870-core.c
236
if (count >= PAGE_SIZE - 1)
drivers/mfd/aat2870-core.c
239
count += snprintf(buf + count, PAGE_SIZE - count, "\n");
drivers/mfd/aat2870-core.c
240
if (count >= PAGE_SIZE - 1)
drivers/mfd/aat2870-core.c
245
if (count >= PAGE_SIZE)
drivers/mfd/aat2870-core.c
246
count = PAGE_SIZE - 1;
drivers/mfd/aat2870-core.c
258
buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
drivers/misc/bcm-vk/bcm_vk_dev.c
1330
nr_scratch_pages * PAGE_SIZE,
drivers/misc/bcm-vk/bcm_vk_dev.c
1521
dma_free_coherent(&pdev->dev, nr_scratch_pages * PAGE_SIZE,
drivers/misc/bcm-vk/bcm_vk_dev.c
1569
dma_free_coherent(&pdev->dev, nr_scratch_pages * PAGE_SIZE,
drivers/misc/bcm-vk/bcm_vk_dev.c
490
vkwrite32(vk, nr_scratch_pages * PAGE_SIZE, BAR_1,
drivers/misc/bcm-vk/bcm_vk_sg.c
102
size = min_t(size_t, PAGE_SIZE - offset, remaining_size);
drivers/misc/bcm-vk/bcm_vk_sg.c
116
size = min_t(size_t, PAGE_SIZE, remaining_size);
drivers/misc/eeprom/at25.c
464
chip->page_size = PAGE_SIZE;
drivers/misc/eeprom/at25.c
78
#define io_limit PAGE_SIZE /* bytes */
drivers/misc/fastrpc.c
1053
pages[i].size = (pg_end - pg_start + 1) * PAGE_SIZE;
drivers/misc/fastrpc.c
1075
pages[i].size = (pg_end - pg_start + 1) * PAGE_SIZE;
drivers/misc/fastrpc.c
1183
msg->size = roundup(ctx->msg_sz, PAGE_SIZE);
drivers/misc/genwqe/card_ddcb.c
1032
queue_size = roundup(GENWQE_DDCB_MAX * sizeof(struct ddcb), PAGE_SIZE);
drivers/misc/genwqe/card_ddcb.c
1104
queue_size = roundup(queue->ddcb_max * sizeof(struct ddcb), PAGE_SIZE);
drivers/misc/genwqe/card_dev.c
456
dma_map->nr_pages = DIV_ROUND_UP(vsize, PAGE_SIZE);
drivers/misc/genwqe/card_dev.c
780
if (m->size > ULONG_MAX - PAGE_SIZE - (m->addr & ~PAGE_MASK))
drivers/misc/genwqe/card_dev.c
784
map_size = round_up(m->size + (m->addr & ~PAGE_MASK), PAGE_SIZE);
drivers/misc/genwqe/card_dev.c
814
map_size = round_up(m->size + (m->addr & ~PAGE_MASK), PAGE_SIZE);
drivers/misc/genwqe/card_dev.c
938
(u64)m->u_vaddr)/PAGE_SIZE;
drivers/misc/genwqe/card_utils.c
236
dma_unmap_page(&pci_dev->dev, dma_list[i], PAGE_SIZE,
drivers/misc/genwqe/card_utils.c
256
PAGE_SIZE,
drivers/misc/genwqe/card_utils.c
280
return roundup(len, PAGE_SIZE);
drivers/misc/genwqe/card_utils.c
298
sgl->fpage_size = min_t(size_t, PAGE_SIZE-sgl->fpage_offs, user_size);
drivers/misc/genwqe/card_utils.c
299
sgl->nr_pages = DIV_ROUND_UP(sgl->fpage_offs + user_size, PAGE_SIZE);
drivers/misc/genwqe/card_utils.c
300
sgl->lpage_size = (user_size - sgl->fpage_size) % PAGE_SIZE;
drivers/misc/genwqe/card_utils.c
326
if ((sgl->fpage_size != 0) && (sgl->fpage_size != PAGE_SIZE)) {
drivers/misc/genwqe/card_utils.c
327
sgl->fpage = __genwqe_alloc_consistent(cd, PAGE_SIZE,
drivers/misc/genwqe/card_utils.c
340
sgl->lpage = __genwqe_alloc_consistent(cd, PAGE_SIZE,
drivers/misc/genwqe/card_utils.c
355
__genwqe_free_consistent(cd, PAGE_SIZE, sgl->lpage,
drivers/misc/genwqe/card_utils.c
360
__genwqe_free_consistent(cd, PAGE_SIZE, sgl->fpage,
drivers/misc/genwqe/card_utils.c
401
size_to_map = min(size, PAGE_SIZE - map_offs);
drivers/misc/genwqe/card_utils.c
491
__genwqe_free_consistent(cd, PAGE_SIZE, sgl->fpage,
drivers/misc/genwqe/card_utils.c
508
__genwqe_free_consistent(cd, PAGE_SIZE, sgl->lpage,
drivers/misc/genwqe/card_utils.c
562
if (size > ULONG_MAX - PAGE_SIZE - offs) {
drivers/misc/genwqe/card_utils.c
566
m->nr_pages = DIV_ROUND_UP(offs + size, PAGE_SIZE);
drivers/misc/ibmasm/ibmasmfs.c
112
sb->s_blocksize = PAGE_SIZE;
drivers/misc/ibmvmc.c
182
memset(queue->msgs, 0x00, PAGE_SIZE);
drivers/misc/ibmvmc.c
188
queue->msg_token, PAGE_SIZE);
drivers/misc/ibmvmc.c
2125
queue->size = PAGE_SIZE / sizeof(*queue->msgs);
drivers/misc/ibmvmc.c
2136
queue->msg_token, PAGE_SIZE);
drivers/misc/keba/cp500.c
271
return cp500_get_fpga_version(cp500, buf, PAGE_SIZE);
drivers/misc/lkdtm/core.c
236
if (count >= PAGE_SIZE)
drivers/misc/lkdtm/core.c
278
n = scnprintf(buf, PAGE_SIZE, "Available crash types:\n");
drivers/misc/lkdtm/core.c
285
n += scnprintf(buf + n, PAGE_SIZE - n, "%s\n",
drivers/misc/lkdtm/core.c
311
if (count >= PAGE_SIZE)
drivers/misc/lkdtm/heap.c
203
memset((void *)p, 0x3, PAGE_SIZE);
drivers/misc/lkdtm/heap.c
207
memset((void *)p, 0x78, PAGE_SIZE);
drivers/misc/lkdtm/heap.c
294
memset(first, 0xAB, PAGE_SIZE);
drivers/misc/lkdtm/heap.c
307
if (memchr(val, 0xAB, PAGE_SIZE) == NULL) {
drivers/misc/lkdtm/heap.c
34
one = vzalloc(PAGE_SIZE);
drivers/misc/lkdtm/heap.c
36
two = vzalloc(PAGE_SIZE);
drivers/misc/lkdtm/heap.c
39
memset(one, 0xAA, PAGE_SIZE + __offset);
drivers/misc/lkdtm/perms.c
215
user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
drivers/misc/lkdtm/perms.c
223
vm_munmap(user_addr, PAGE_SIZE);
drivers/misc/lkdtm/perms.c
236
user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
drivers/misc/lkdtm/perms.c
246
vm_munmap(user_addr, PAGE_SIZE);
drivers/misc/lkdtm/perms.c
261
vm_munmap(user_addr, PAGE_SIZE);
drivers/misc/lkdtm/powerpc.c
33
p = vmalloc(PAGE_SIZE);
drivers/misc/lkdtm/usercopy.c
127
vm_munmap(user_addr, PAGE_SIZE);
drivers/misc/lkdtm/usercopy.c
149
user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
drivers/misc/lkdtm/usercopy.c
192
vm_munmap(user_addr, PAGE_SIZE);
drivers/misc/lkdtm/usercopy.c
225
user_alloc = vm_mmap(NULL, 0, PAGE_SIZE,
drivers/misc/lkdtm/usercopy.c
269
vm_munmap(user_alloc, PAGE_SIZE);
drivers/misc/lkdtm/usercopy.c
315
user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
drivers/misc/lkdtm/usercopy.c
334
unconst + PAGE_SIZE)) {
drivers/misc/lkdtm/usercopy.c
342
vm_munmap(user_addr, PAGE_SIZE);
drivers/misc/lkdtm/usercopy.c
355
uaddr = vm_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_WRITE,
drivers/misc/lkdtm/usercopy.c
363
memset(kaddr, 0xAA, PAGE_SIZE);
drivers/misc/lkdtm/usercopy.c
366
kaddr += PAGE_SIZE / 2;
drivers/misc/lkdtm/usercopy.c
371
unconst + (PAGE_SIZE / 2))) {
drivers/misc/lkdtm/usercopy.c
378
if (copy_to_user((void __user *)uaddr, kaddr, unconst + PAGE_SIZE)) {
drivers/misc/lkdtm/usercopy.c
387
vm_munmap(uaddr, PAGE_SIZE);
drivers/misc/lkdtm/usercopy.c
394
addr = vmalloc(PAGE_SIZE);
drivers/misc/lkdtm/usercopy.c
419
do_usercopy_page_span("folio", addr + PAGE_SIZE);
drivers/misc/lkdtm/usercopy.c
81
user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
drivers/misc/mei/bus.c
983
sg_len = min_t(unsigned int, sg_dma_len(sg), PAGE_SIZE);
drivers/misc/mei/bus.c
992
sg_len = min_t(unsigned int, sg_dma_len(sg), PAGE_SIZE);
drivers/misc/mei/hw-me.c
1609
.dma_size[DMA_DSCR_CTRL] = PAGE_SIZE
drivers/misc/mei/main.c
1101
cnt += scnprintf(buf + cnt, PAGE_SIZE - cnt, "%u:%u.%u.%u.%u\n",
drivers/misc/mei/main.c
999
cnt += scnprintf(buf + cnt, PAGE_SIZE - cnt, "%08X\n",
drivers/misc/ocxl/core.c
197
afu->irq_base_offset = afu->config.pp_mmio_stride + PAGE_SIZE;
drivers/misc/ocxl/link.c
500
unsigned long addr, pid, page_size = PAGE_SIZE;
drivers/misc/ocxl/link.c
739
PAGE_SIZE);
drivers/misc/sgi-gru/grufault.c
329
vaddr = min(vaddr, fault_vaddr + tlb_preload_count * PAGE_SIZE);
drivers/misc/sgi-gru/grufault.c
340
vaddr -= PAGE_SIZE;
drivers/misc/sgi-xp/xpnet.c
65
#define XPNET_MSG_NENTRIES (PAGE_SIZE / XPC_MSG_MAX_SIZE)
drivers/misc/sram-exec.c
96
pages = PAGE_ALIGN(size) / PAGE_SIZE;
drivers/misc/vmw_balloon.c
1321
b->batch_max_pages = PAGE_SIZE / sizeof(struct vmballoon_batch_entry);
drivers/misc/vmw_vmci/vmci_doorbell.c
154
if (max_notify_idx < PAGE_SIZE || notify_idx_count < PAGE_SIZE) {
drivers/misc/vmw_vmci/vmci_doorbell.c
158
last_notify_idx_released = PAGE_SIZE;
drivers/misc/vmw_vmci/vmci_doorbell.c
180
new_notify_idx = (last_notify_idx_reserved + 1) % PAGE_SIZE;
drivers/misc/vmw_vmci/vmci_doorbell.c
75
static u32 last_notify_idx_released = PAGE_SIZE;
drivers/misc/vmw_vmci/vmci_guest.c
314
BUILD_BUG_ON(VMCI_MAX_DG_SIZE < PAGE_SIZE);
drivers/misc/vmw_vmci/vmci_guest.c
318
dg_in_buffer += PAGE_SIZE;
drivers/misc/vmw_vmci/vmci_guest.c
326
current_dg_in_buffer_size = PAGE_SIZE;
drivers/misc/vmw_vmci/vmci_guest.c
339
(is_io_port && remaining_bytes > PAGE_SIZE)) {
drivers/misc/vmw_vmci/vmci_guest.c
348
(uintptr_t)dg + 1, PAGE_SIZE);
drivers/misc/vmw_vmci/vmci_guest.c
39
#define VMCI_DMA_DG_BUFFER_SIZE (VMCI_MAX_DG_SIZE + PAGE_SIZE)
drivers/misc/vmw_vmci/vmci_guest.c
704
&pdev->dev, PAGE_SIZE, &vmci_dev->notification_base,
drivers/misc/vmw_vmci/vmci_guest.c
889
dma_free_coherent(&pdev->dev, PAGE_SIZE,
drivers/misc/vmw_vmci/vmci_guest.c
949
dma_free_coherent(&pdev->dev, PAGE_SIZE,
drivers/misc/vmw_vmci/vmci_host.c
256
context->notify = kmap(context->notify_page) + (uva & (PAGE_SIZE - 1));
drivers/misc/vmw_vmci/vmci_queue_pair.c
1103
DIV_ROUND_UP(produce_size, PAGE_SIZE) + 1;
drivers/misc/vmw_vmci/vmci_queue_pair.c
1105
DIV_ROUND_UP(consume_size, PAGE_SIZE) + 1;
drivers/misc/vmw_vmci/vmci_queue_pair.c
1376
PAGE_SIZE, GFP_KERNEL);
drivers/misc/vmw_vmci/vmci_queue_pair.c
1383
tmp = (u8 *)entry->local_mem + PAGE_SIZE *
drivers/misc/vmw_vmci/vmci_queue_pair.c
1384
(DIV_ROUND_UP(entry->qp.produce_size, PAGE_SIZE) + 1);
drivers/misc/vmw_vmci/vmci_queue_pair.c
238
(DIV_ROUND_UP(_QPE.produce_size, PAGE_SIZE) + \
drivers/misc/vmw_vmci/vmci_queue_pair.c
239
DIV_ROUND_UP(_QPE.consume_size, PAGE_SIZE) + 2))
drivers/misc/vmw_vmci/vmci_queue_pair.c
256
for (i = 0; i < DIV_ROUND_UP(size, PAGE_SIZE) + 1; i++) {
drivers/misc/vmw_vmci/vmci_queue_pair.c
257
dma_free_coherent(&vmci_pdev->dev, PAGE_SIZE,
drivers/misc/vmw_vmci/vmci_queue_pair.c
280
if (size > SIZE_MAX - PAGE_SIZE)
drivers/misc/vmw_vmci/vmci_queue_pair.c
282
num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1;
drivers/misc/vmw_vmci/vmci_queue_pair.c
309
dma_alloc_coherent(&vmci_pdev->dev, PAGE_SIZE,
drivers/misc/vmw_vmci/vmci_queue_pair.c
314
qp_free_queue(queue, i * PAGE_SIZE);
drivers/misc/vmw_vmci/vmci_queue_pair.c
341
(queue_offset + bytes_copied) / PAGE_SIZE;
drivers/misc/vmw_vmci/vmci_queue_pair.c
343
(queue_offset + bytes_copied) & (PAGE_SIZE - 1);
drivers/misc/vmw_vmci/vmci_queue_pair.c
353
if (size - bytes_copied > PAGE_SIZE - page_offset)
drivers/misc/vmw_vmci/vmci_queue_pair.c
355
to_copy = PAGE_SIZE - page_offset;
drivers/misc/vmw_vmci/vmci_queue_pair.c
388
(queue_offset + bytes_copied) / PAGE_SIZE;
drivers/misc/vmw_vmci/vmci_queue_pair.c
390
(queue_offset + bytes_copied) & (PAGE_SIZE - 1);
drivers/misc/vmw_vmci/vmci_queue_pair.c
401
if (size - bytes_copied > PAGE_SIZE - page_offset)
drivers/misc/vmw_vmci/vmci_queue_pair.c
403
to_copy = PAGE_SIZE - page_offset;
drivers/misc/vmw_vmci/vmci_queue_pair.c
533
if (size > min_t(size_t, VMCI_MAX_GUEST_QP_MEMORY, SIZE_MAX - PAGE_SIZE))
drivers/misc/vmw_vmci/vmci_queue_pair.c
535
num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1;
drivers/misc/vmw_vmci/vmci_queue_pair.c
710
produce_q->kernel_if->num_pages * PAGE_SIZE;
drivers/misc/vmw_vmci/vmci_queue_pair.c
766
PAGE_SIZE);
drivers/misc/vmw_vmci/vmci_queue_pair.c
889
const u64 num_ppns = DIV_ROUND_UP(produce_size, PAGE_SIZE) +
drivers/misc/vmw_vmci/vmci_queue_pair.c
890
DIV_ROUND_UP(consume_size, PAGE_SIZE) + 2;
drivers/misc/xilinx_sdfec.c
166
#define MAX_NUM_PAGES ((XSDFEC_QC_TABLE_DEPTH / PAGE_SIZE) + 1)
drivers/misc/xilinx_sdfec.c
622
n = (len * XSDFEC_REG_WIDTH_JUMP) / PAGE_SIZE;
drivers/misc/xilinx_sdfec.c
623
if ((len * XSDFEC_REG_WIDTH_JUMP) % PAGE_SIZE)
drivers/misc/xilinx_sdfec.c
648
((reg * XSDFEC_REG_WIDTH_JUMP) % PAGE_SIZE));
drivers/mmc/core/host.c
576
host->max_seg_size = PAGE_SIZE;
drivers/mmc/core/host.c
578
host->max_req_size = PAGE_SIZE;
drivers/mmc/core/host.c
580
host->max_blk_count = PAGE_SIZE / 512;
drivers/mmc/core/mmc_test.c
1054
size = PAGE_SIZE * 2;
drivers/mmc/core/mmc_test.c
1075
size = PAGE_SIZE * 2;
drivers/mmc/core/mmc_test.c
1199
size = PAGE_SIZE * 2;
drivers/mmc/core/mmc_test.c
1226
size = PAGE_SIZE * 2;
drivers/mmc/core/mmc_test.c
1324
size = PAGE_SIZE * 2;
drivers/mmc/core/mmc_test.c
1346
size = PAGE_SIZE * 2;
drivers/mmc/core/mmc_test.c
1445
if (t->max_seg_sz >= PAGE_SIZE)
drivers/mmc/core/mmc_test.c
1446
max_tfr = t->max_segs * PAGE_SIZE;
drivers/mmc/core/mmc_test.c
2012
if (t->max_seg_sz >= PAGE_SIZE)
drivers/mmc/core/mmc_test.c
2013
max_tfr = t->max_segs * PAGE_SIZE;
drivers/mmc/core/mmc_test.c
32
#define BUFFER_SIZE (PAGE_SIZE << BUFFER_ORDER)
drivers/mmc/core/mmc_test.c
333
unsigned long max_page_cnt = DIV_ROUND_UP(max_sz, PAGE_SIZE);
drivers/mmc/core/mmc_test.c
334
unsigned long min_page_cnt = DIV_ROUND_UP(min_sz, PAGE_SIZE);
drivers/mmc/core/mmc_test.c
335
unsigned long max_seg_page_cnt = DIV_ROUND_UP(max_seg_sz, PAGE_SIZE);
drivers/mmc/core/mmc_test.c
418
unsigned long len = PAGE_SIZE << mem->arr[i].order;
drivers/mmc/core/mmc_test.c
472
addr = base + PAGE_SIZE * --cnt;
drivers/mmc/core/mmc_test.c
473
if (last_addr && last_addr + PAGE_SIZE == addr)
drivers/mmc/core/mmc_test.c
476
len = PAGE_SIZE;
drivers/mmc/core/sdio_uart.c
48
#define FIFO_SIZE PAGE_SIZE
drivers/mmc/host/dw_mmc.c
62
#define DESC_RING_BUF_SZ PAGE_SIZE
drivers/mmc/host/loongson2-mmc.c
844
host->sg_cpu = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
drivers/mmc/host/loongson2-mmc.c
849
memset(host->sg_cpu, 0, PAGE_SIZE);
drivers/mmc/host/loongson2-mmc.c
856
dma_free_coherent(dev, PAGE_SIZE, host->sg_cpu, host->sg_dma);
drivers/mmc/host/meson-gx-mmc.c
129
#define SD_EMMC_DESC_BUF_LEN PAGE_SIZE
drivers/mmc/host/mmci_stm32_sdmmc.c
17
#define SDMMC_LLI_BUF_LEN PAGE_SIZE
drivers/mmc/host/pxamci.c
633
mmc->max_seg_size = PAGE_SIZE;
drivers/mmc/host/renesas_sdhi_sys_dmac.c
177
if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_SIZE ||
drivers/mmc/host/renesas_sdhi_sys_dmac.c
249
if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_SIZE ||
drivers/mmc/host/sdhci.c
4784
if (mmc->max_seg_size < PAGE_SIZE)
drivers/mmc/host/sdhci.c
4785
mmc->max_seg_size = PAGE_SIZE;
drivers/mmc/host/sh_mmcif.c
1477
mmc->max_req_size = PAGE_SIZE * mmc->max_segs;
drivers/mmc/host/sunxi-mmc.c
1387
host->sg_cpu = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
drivers/mmc/host/sunxi-mmc.c
1420
mmc->max_segs = PAGE_SIZE / sizeof(struct sunxi_idma_des);
drivers/mmc/host/sunxi-mmc.c
1480
dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma);
drivers/mmc/host/sunxi-mmc.c
1495
dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma);
drivers/mmc/host/tifm_sd.c
196
p_cnt = PAGE_SIZE - p_off;
drivers/mmc/host/tifm_sd.c
245
p_cnt = PAGE_SIZE - p_off;
drivers/mmc/host/tifm_sd.c
975
mmc->max_blk_size = min(TIFM_MMCSD_MAX_BLOCK_SIZE, PAGE_SIZE);
drivers/mmc/host/tmio_mmc_core.c
1196
(PAGE_SIZE / mmc->max_blk_size) * mmc->max_segs;
drivers/mmc/host/usdhi6rol0.c
1848
mmc->max_req_size = PAGE_SIZE * mmc->max_segs;
drivers/mmc/host/usdhi6rol0.c
335
memcpy(host->bounce_buf, host->head_pg.mapped + PAGE_SIZE - blk_head,
drivers/mmc/host/usdhi6rol0.c
359
size_t head = PAGE_SIZE - sg->offset;
drivers/mmc/host/usdhi6rol0.c
407
memcpy(host->head_pg.mapped + PAGE_SIZE - blk_head,
drivers/mmc/host/usdhi6rol0.c
450
if (host->offset == PAGE_SIZE) {
drivers/mmc/host/usdhi6rol0.c
471
if (host->offset + data->blksz > PAGE_SIZE)
drivers/most/configfs.c
162
return snprintf(page, PAGE_SIZE, "%s\n", to_mdev_link(item)->direction);
drivers/most/configfs.c
180
return snprintf(page, PAGE_SIZE, "%s\n", to_mdev_link(item)->datatype);
drivers/most/configfs.c
199
return snprintf(page, PAGE_SIZE, "%s\n", to_mdev_link(item)->device);
drivers/most/configfs.c
214
return snprintf(page, PAGE_SIZE, "%s\n", to_mdev_link(item)->channel);
drivers/most/configfs.c
229
return snprintf(page, PAGE_SIZE, "%s\n", to_mdev_link(item)->comp);
drivers/most/configfs.c
244
return snprintf(page, PAGE_SIZE, "%s\n",
drivers/most/configfs.c
260
return snprintf(page, PAGE_SIZE, "%d\n",
drivers/most/configfs.c
278
return snprintf(page, PAGE_SIZE, "%d\n",
drivers/most/configfs.c
297
return snprintf(page, PAGE_SIZE, "%d\n",
drivers/most/configfs.c
316
return snprintf(page, PAGE_SIZE, "%d\n",
drivers/most/configfs.c
334
return snprintf(page, PAGE_SIZE, "%d\n", to_mdev_link(item)->dbr_size);
drivers/most/core.c
211
return snprintf(buf, PAGE_SIZE, "%d\n",
drivers/most/core.c
222
return snprintf(buf, PAGE_SIZE, "%d\n",
drivers/most/core.c
233
return snprintf(buf, PAGE_SIZE, "%d\n",
drivers/most/core.c
244
return snprintf(buf, PAGE_SIZE, "%d\n",
drivers/most/core.c
254
return snprintf(buf, PAGE_SIZE, "%d\n", c->is_starving);
drivers/most/core.c
263
return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.num_buffers);
drivers/most/core.c
272
return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.buffer_size);
drivers/most/core.c
282
return snprintf(buf, PAGE_SIZE, "tx\n");
drivers/most/core.c
284
return snprintf(buf, PAGE_SIZE, "rx\n");
drivers/most/core.c
285
return snprintf(buf, PAGE_SIZE, "unconfigured\n");
drivers/most/core.c
297
return snprintf(buf, PAGE_SIZE, "%s",
drivers/most/core.c
300
return snprintf(buf, PAGE_SIZE, "unconfigured\n");
drivers/most/core.c
309
return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.subbuffer_size);
drivers/most/core.c
318
return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.packets_per_xact);
drivers/most/core.c
326
return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.dbr_size);
drivers/most/core.c
398
return snprintf(buf, PAGE_SIZE, "%s\n", iface->description);
drivers/most/core.c
409
return snprintf(buf, PAGE_SIZE, "loopback\n");
drivers/most/core.c
411
return snprintf(buf, PAGE_SIZE, "i2c\n");
drivers/most/core.c
413
return snprintf(buf, PAGE_SIZE, "i2s\n");
drivers/most/core.c
415
return snprintf(buf, PAGE_SIZE, "tsi\n");
drivers/most/core.c
417
return snprintf(buf, PAGE_SIZE, "hbi\n");
drivers/most/core.c
419
return snprintf(buf, PAGE_SIZE, "mlb_dim\n");
drivers/most/core.c
421
return snprintf(buf, PAGE_SIZE, "mlb_dim2\n");
drivers/most/core.c
423
return snprintf(buf, PAGE_SIZE, "usb\n");
drivers/most/core.c
425
return snprintf(buf, PAGE_SIZE, "pcie\n");
drivers/most/core.c
427
return snprintf(buf, PAGE_SIZE, "unknown\n");
drivers/most/core.c
475
PAGE_SIZE - offs,
drivers/most/core.c
483
PAGE_SIZE - offs,
drivers/most/core.c
521
offs += scnprintf(buf + offs, PAGE_SIZE - offs, "%s\n",
drivers/mtd/chips/map_absent.c
64
mtd->erasesize = PAGE_SIZE;
drivers/mtd/chips/map_ram.c
83
mtd->erasesize = PAGE_SIZE;
drivers/mtd/devices/block2mtd.c
111
int offset = from & (PAGE_SIZE-1);
drivers/mtd/devices/block2mtd.c
115
if ((offset + len) > PAGE_SIZE)
drivers/mtd/devices/block2mtd.c
116
cpylen = PAGE_SIZE - offset; // multiple pages
drivers/mtd/devices/block2mtd.c
149
if ((offset+len) > PAGE_SIZE)
drivers/mtd/devices/block2mtd.c
150
cpylen = PAGE_SIZE - offset; // multiple pages
drivers/mtd/devices/block2mtd.c
317
dev->mtd.writebufsize = PAGE_SIZE;
drivers/mtd/devices/block2mtd.c
414
size_t erase_size = PAGE_SIZE;
drivers/mtd/devices/block2mtd.c
70
max = page_address(page) + PAGE_SIZE;
drivers/mtd/devices/block2mtd.c
74
memset(page_address(page), 0xff, PAGE_SIZE);
drivers/mtd/devices/ms02-nv.c
197
fixaddr = (addr + MS02NV_RAM + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1);
drivers/mtd/devices/ms02-nv.c
198
fixsize = (size - (fixaddr - addr)) & ~(PAGE_SIZE - 1);
drivers/mtd/devices/mtdram.c
79
while (len > PAGE_SIZE) {
drivers/mtd/devices/mtdram.c
80
len -= PAGE_SIZE;
drivers/mtd/devices/mtdram.c
81
addr += PAGE_SIZE;
drivers/mtd/devices/phram.c
267
uint64_t erasesize = PAGE_SIZE;
drivers/mtd/devices/phram.c
389
PAGE_SIZE);
drivers/mtd/mtdcore.c
2514
size_t min_alloc = max_t(size_t, mtd->writesize, PAGE_SIZE);
drivers/mtd/mtdcore.c
915
info = kmalloc(PAGE_SIZE, GFP_KERNEL);
drivers/mtd/mtdcore.c
920
ret = mtd_get_user_prot_info(mtd, PAGE_SIZE, &retlen, info);
drivers/mtd/mtdcore.c
922
ret = mtd_get_fact_prot_info(mtd, PAGE_SIZE, &retlen, info);
drivers/mtd/mtdswap.c
1065
memset(buf, 0, PAGE_SIZE - 10);
drivers/mtd/mtdswap.c
1071
memcpy(buf + PAGE_SIZE - 10, "SWAPSPACE2", 10);
drivers/mtd/mtdswap.c
1098
memset(buf, 0x0, PAGE_SIZE);
drivers/mtd/mtdswap.c
1112
ret = mtd_read(mtd, readpos, PAGE_SIZE, &retlen, buf);
drivers/mtd/mtdswap.c
1132
if (retlen != PAGE_SIZE) {
drivers/mtd/mtdswap.c
1307
d->page_buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
drivers/mtd/mtdswap.c
1358
if (mtd->erasesize < PAGE_SIZE || mtd->erasesize % PAGE_SIZE) {
drivers/mtd/mtdswap.c
1360
"%lu\n", MTDSWAP_PREFIX, mtd->erasesize, PAGE_SIZE);
drivers/mtd/mtdswap.c
1364
if (PAGE_SIZE % mtd->writesize || mtd->writesize > PAGE_SIZE) {
drivers/mtd/mtdswap.c
1366
" %u\n", MTDSWAP_PREFIX, PAGE_SIZE, mtd->writesize);
drivers/mtd/mtdswap.c
1381
size_limit = (uint64_t) BLOCK_MAX * PAGE_SIZE;
drivers/mtd/mtdswap.c
1410
(header ? PAGE_SIZE : 0);
drivers/mtd/mtdswap.c
1475
.blksize = PAGE_SIZE,
drivers/mtd/mtdswap.c
643
ret = mtd_write(mtd, writepos, PAGE_SIZE, &retlen, buf);
drivers/mtd/mtdswap.c
658
if (retlen != PAGE_SIZE) {
drivers/mtd/mtdswap.c
690
ret = mtd_read(mtd, readpos, PAGE_SIZE, &retlen, d->page_buf);
drivers/mtd/mtdswap.c
705
if (retlen != PAGE_SIZE) {
drivers/mtd/mtdswap.c
891
mtd_pages = d->pages_per_eblk * PAGE_SIZE / mtd->writesize;
drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
1357
this->data_buffer_dma = kzalloc(mtd->writesize ?: PAGE_SIZE,
drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
1367
this->raw_buffer = kzalloc((mtd->writesize ?: PAGE_SIZE) + mtd->oobsize, GFP_KERNEL);
drivers/mtd/nand/raw/mxc_nand.c
1702
host->data_buf = devm_kzalloc(&pdev->dev, PAGE_SIZE, GFP_KERNEL);
drivers/mtd/spi-nor/core.c
2032
else if (IS_ALIGNED(from, 2) && len > PAGE_SIZE)
drivers/mtd/spi-nor/core.c
2033
return spi_nor_read_data(nor, from, round_down(len, PAGE_SIZE),
drivers/mtd/spi-nor/core.c
2036
tmp_buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
drivers/mtd/spi-nor/core.c
2052
tmp_len = min_t(size_t, end - start, PAGE_SIZE);
drivers/mtd/spi-nor/core.c
3587
nor->bouncebuf_size = PAGE_SIZE;
drivers/mtd/spi-nor/core.c
3759
if (nor->params->page_size > PAGE_SIZE) {
drivers/mtd/spi-nor/sfdp.c
1512
if (sfdp_size > PAGE_SIZE) {
drivers/mtd/spi-nor/sfdp.c
1515
sfdp_size = PAGE_SIZE;
drivers/net/bonding/bond_sysfs.c
176
if (res > (PAGE_SIZE - IFNAMSIZ)) {
drivers/net/bonding/bond_sysfs.c
178
if ((PAGE_SIZE - res) > 10)
drivers/net/bonding/bond_sysfs.c
179
res = PAGE_SIZE - 10;
drivers/net/bonding/bond_sysfs.c
46
if (res > (PAGE_SIZE - IFNAMSIZ)) {
drivers/net/bonding/bond_sysfs.c
48
if ((PAGE_SIZE - res) > 10)
drivers/net/bonding/bond_sysfs.c
49
res = PAGE_SIZE - 10;
drivers/net/bonding/bond_sysfs.c
629
if (res > (PAGE_SIZE - IFNAMSIZ - 6)) {
drivers/net/bonding/bond_sysfs.c
631
if ((PAGE_SIZE - res) > 10)
drivers/net/bonding/bond_sysfs.c
632
res = PAGE_SIZE - 10;
drivers/net/can/esd/esd_402_pci-core.c
33
#define PCI402_DMA_SIZE ALIGN(0x10000, PAGE_SIZE)
drivers/net/ethernet/3com/typhoon.c
1354
dpage = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, &dpage_dma, GFP_ATOMIC);
drivers/net/ethernet/3com/typhoon.c
1404
len = min_t(u32, section_len, PAGE_SIZE);
drivers/net/ethernet/3com/typhoon.c
1459
dma_free_coherent(&pdev->dev, PAGE_SIZE, dpage, dpage_dma);
drivers/net/ethernet/8390/etherh.c
692
eh->memc = ecardm_iomap(ec, ECARD_RES_MEMC, 0, PAGE_SIZE);
drivers/net/ethernet/8390/etherh.c
700
eh->ioc_fast = ecardm_iomap(ec, ECARD_RES_IOCFAST, 0, PAGE_SIZE);
drivers/net/ethernet/aeroflex/greth.h
68
#define GRETH_RX_BUF_PPGAE (PAGE_SIZE/GRETH_RX_BUF_SIZE)
drivers/net/ethernet/aeroflex/greth.h
69
#define GRETH_TX_BUF_PPGAE (PAGE_SIZE/GRETH_TX_BUF_SIZE)
drivers/net/ethernet/airoha/airoha_eth.c
744
.max_len = PAGE_SIZE,
drivers/net/ethernet/airoha/airoha_eth.c
753
q->buf_size = PAGE_SIZE / 2;
drivers/net/ethernet/amazon/ena/ena_netdev.h
41
#if PAGE_SIZE > SZ_16K
drivers/net/ethernet/amazon/ena/ena_netdev.h
44
#define ENA_PAGE_SIZE PAGE_SIZE
drivers/net/ethernet/amd/pds_core/main.c
139
(u64)page_num << PAGE_SHIFT, PAGE_SIZE);
drivers/net/ethernet/amd/sun3lance.c
314
ioaddr = (unsigned long)ioremap(LANCE_OBIO, PAGE_SIZE);
drivers/net/ethernet/amd/xgbe/xgbe-desc.c
209
PAGE_SIZE << order, DMA_FROM_DEVICE);
drivers/net/ethernet/amd/xgbe/xgbe-desc.c
216
pa->pages_len = PAGE_SIZE << order;
drivers/net/ethernet/amd/xgbe/xgbe-drv.c
192
rx_buf_size = clamp_val(rx_buf_size, XGBE_RX_MIN_BUF_SIZE, PAGE_SIZE);
drivers/net/ethernet/apm/xgene/xgene_enet_main.c
103
hw_len = xgene_enet_set_data_len(PAGE_SIZE);
drivers/net/ethernet/apm/xgene/xgene_enet_main.c
206
dma_unmap_page(dev, dma_addr, PAGE_SIZE,
drivers/net/ethernet/apm/xgene/xgene_enet_main.c
634
dma_unmap_page(dev, dma_addr, PAGE_SIZE, DMA_FROM_DEVICE);
drivers/net/ethernet/apm/xgene/xgene_enet_main.c
739
dma_unmap_page(dev, dma_addr, PAGE_SIZE, DMA_FROM_DEVICE);
drivers/net/ethernet/apm/xgene/xgene_enet_main.c
743
frag_size, PAGE_SIZE);
drivers/net/ethernet/apm/xgene/xgene_enet_main.c
97
PAGE_SIZE, DMA_FROM_DEVICE);
drivers/net/ethernet/apple/bmac.c
37
#define trunc_page(x) ((void *)(((unsigned long)(x)) & ~((unsigned long)(PAGE_SIZE - 1))))
drivers/net/ethernet/apple/bmac.c
38
#define round_page(x) trunc_page(((unsigned long)(x)) + ((unsigned long)(PAGE_SIZE - 1)))
drivers/net/ethernet/aquantia/atlantic/aq_ring.c
196
self->page_order = fls(self->frame_max / PAGE_SIZE +
drivers/net/ethernet/aquantia/atlantic/aq_ring.c
197
(self->frame_max % PAGE_SIZE ? 1 : 0)) - 1;
drivers/net/ethernet/aquantia/atlantic/aq_ring.c
43
unsigned int len = PAGE_SIZE << rxpage->order;
drivers/net/ethernet/aquantia/atlantic/aq_ring.c
64
daddr = dma_map_page(dev, page, 0, PAGE_SIZE << order,
drivers/net/ethernet/aquantia/atlantic/aq_ring.c
99
(PAGE_SIZE << order)) {
drivers/net/ethernet/broadcom/asp2/bcmasp_intf_defs.h
250
#define RING_BUFFER_SIZE (PAGE_SIZE * NUM_4K_BUFFERS)
drivers/net/ethernet/broadcom/bnge/bnge_hwrm.c
304
memset(ctx->resp, 0, PAGE_SIZE);
drivers/net/ethernet/broadcom/bnge/bnge_hwrm.h
64
#define BNGE_HWRM_DMA_SIZE (2 * PAGE_SIZE) /* space for req+resp */
drivers/net/ethernet/broadcom/bnge/bnge_hwrm.h
65
#define BNGE_HWRM_RESP_RESERVED PAGE_SIZE
drivers/net/ethernet/broadcom/bnge/bnge_netdev.c
1051
if (PAGE_SIZE > BNGE_RX_PAGE_SIZE) {
drivers/net/ethernet/broadcom/bnge/bnge_netdev.c
2616
if (rx_space > PAGE_SIZE) {
drivers/net/ethernet/broadcom/bnge/bnge_netdev.c
329
return rxr->need_head_pool || PAGE_SIZE > BNGE_RX_PAGE_SIZE;
drivers/net/ethernet/broadcom/bnge/bnge_netdev.c
564
const unsigned int agg_size_fac = PAGE_SIZE / BNGE_RX_PAGE_SIZE;
drivers/net/ethernet/broadcom/bnge/bnge_netdev.c
565
const unsigned int rx_size_fac = PAGE_SIZE / SZ_4K;
drivers/net/ethernet/broadcom/bnge/bnge_netdev.c
575
pp.max_len = PAGE_SIZE;
drivers/net/ethernet/broadcom/bnx2.c
2732
mapping = dma_map_page(&bp->pdev->dev, page, 0, PAGE_SIZE,
drivers/net/ethernet/broadcom/bnx2.c
2756
PAGE_SIZE, DMA_FROM_DEVICE);
drivers/net/ethernet/broadcom/bnx2.c
3069
frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
drivers/net/ethernet/broadcom/bnx2.c
3112
PAGE_SIZE, DMA_FROM_DEVICE);
drivers/net/ethernet/broadcom/bnx2.c
3116
skb->truesize += PAGE_SIZE;
drivers/net/ethernet/broadcom/bnx2.c
5247
PAGE_SIZE, bp->rx_max_pg_ring);
drivers/net/ethernet/broadcom/bnx2.c
5248
val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
drivers/net/ethernet/broadcom/bnx2.c
5384
if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
2049
if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE)
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
579
if (PAGE_SIZE - pool->offset >= SGE_PAGE_SIZE)
drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
255
#define NUM_OF_ETH_BDS_IN_PAGE ((PAGE_SIZE)/(STRUCT_SIZE(eth_tx_bd)/8))
drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
268
(0xFFFF - ((PAGE_SIZE/((STRUCT_SIZE(eth_rx_sge))/8))-1))
drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
270
#define TU_ETH_CQES_PER_PAGE (PAGE_SIZE/(STRUCT_SIZE(eth_rx_cqe)/8))
drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
271
#define U_ETH_BDS_PER_PAGE (PAGE_SIZE/(STRUCT_SIZE(eth_rx_bd)/8))
drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
272
#define U_ETH_SGES_PER_PAGE (PAGE_SIZE/(STRUCT_SIZE(eth_rx_sge)/8))
drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
385
(PAGE_SIZE / BITS_TO_BYTES(STRUCT_SIZE(event_ring_elem)))
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
12523
((PAGE_SIZE - sizeof(struct bnx2x_mcast_list_elem_group)) / \
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
14970
bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
2622
((PAGE_SIZE - sizeof(struct bnx2x_mcast_elem_group)) / \
drivers/net/ethernet/broadcom/bnxt/bnxt.c
3850
const unsigned int rx_size_fac = PAGE_SIZE / SZ_4K;
drivers/net/ethernet/broadcom/bnxt/bnxt.c
3863
pp.max_len = PAGE_SIZE << pp.order;
drivers/net/ethernet/broadcom/bnxt/bnxt.c
3877
pp.max_len = PAGE_SIZE;
drivers/net/ethernet/broadcom/bnxt/bnxt.c
4786
if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) {
drivers/net/ethernet/broadcom/bnxt/bnxt.c
4817
rx_space = PAGE_SIZE;
drivers/net/ethernet/broadcom/bnxt/bnxt.c
4818
rx_size = PAGE_SIZE -
drivers/net/ethernet/broadcom/bnxt/bnxt.c
905
return rxr->need_head_pool || rxr->rx_page_size < PAGE_SIZE;
drivers/net/ethernet/broadcom/bnxt/bnxt.c
915
if (rxr->rx_page_size < PAGE_SIZE) {
drivers/net/ethernet/broadcom/bnxt/bnxt.c
936
if (rxr->rx_page_size < PAGE_SIZE) {
drivers/net/ethernet/broadcom/bnxt/bnxt.h
775
((unsigned int)PAGE_SIZE - VLAN_ETH_HLEN - NET_IP_ALIGN - \
drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
3969
if (!kmem && modify_len > PAGE_SIZE)
drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c
476
memset(ctx->resp, 0, PAGE_SIZE);
drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h
68
#define BNXT_HWRM_DMA_SIZE (2 * PAGE_SIZE) /* space for req+resp */
drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h
69
#define BNXT_HWRM_RESP_RESERVED PAGE_SIZE
drivers/net/ethernet/broadcom/tg3.c
6676
tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
drivers/net/ethernet/broadcom/tg3.c
6729
if (skb_size <= PAGE_SIZE) {
drivers/net/ethernet/broadcom/tg3.c
6742
tg3_frag_free(skb_size <= PAGE_SIZE, data);
drivers/net/ethernet/brocade/bna/bna_enet.c
1871
bfa_msgq_meminfo()), PAGE_SIZE);
drivers/net/ethernet/brocade/bna/bna_enet.c
1878
ALIGN(bfa_nw_ioc_meminfo(), PAGE_SIZE);
drivers/net/ethernet/brocade/bna/bna_enet.c
1892
PAGE_SIZE);
drivers/net/ethernet/brocade/bna/bna_tx_rx.c
1268
(PAGE_SIZE - 1)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT))
drivers/net/ethernet/brocade/bna/bna_tx_rx.c
1880
kva += PAGE_SIZE;
drivers/net/ethernet/brocade/bna/bna_tx_rx.c
1887
dma += PAGE_SIZE;
drivers/net/ethernet/brocade/bna/bna_tx_rx.c
1918
kva += PAGE_SIZE;
drivers/net/ethernet/brocade/bna/bna_tx_rx.c
1925
dma += PAGE_SIZE;
drivers/net/ethernet/brocade/bna/bna_tx_rx.c
2150
cq_size = ALIGN(cq_size, PAGE_SIZE);
drivers/net/ethernet/brocade/bna/bna_tx_rx.c
2155
dq_size = ALIGN(dq_size, PAGE_SIZE);
drivers/net/ethernet/brocade/bna/bna_tx_rx.c
2161
hq_size = ALIGN(hq_size, PAGE_SIZE);
drivers/net/ethernet/brocade/bna/bna_tx_rx.c
2193
mem_info->len = PAGE_SIZE * cpage_count;
drivers/net/ethernet/brocade/bna/bna_tx_rx.c
2211
mem_info->len = PAGE_SIZE * dpage_count;
drivers/net/ethernet/brocade/bna/bna_tx_rx.c
2229
mem_info->len = PAGE_SIZE * hpage_count;
drivers/net/ethernet/brocade/bna/bna_tx_rx.c
2299
PAGE_SIZE;
drivers/net/ethernet/brocade/bna/bna_tx_rx.c
2302
PAGE_SIZE;
drivers/net/ethernet/brocade/bna/bna_tx_rx.c
2305
PAGE_SIZE;
drivers/net/ethernet/brocade/bna/bna_tx_rx.c
2397
bna_rxq_qpt_setup(q0, rxp, dpage_count, PAGE_SIZE,
drivers/net/ethernet/brocade/bna/bna_tx_rx.c
2426
bna_rxq_qpt_setup(q1, rxp, hpage_count, PAGE_SIZE,
drivers/net/ethernet/brocade/bna/bna_tx_rx.c
2464
bna_rxp_cqpt_setup(rxp, page_count, PAGE_SIZE,
drivers/net/ethernet/brocade/bna/bna_tx_rx.c
3185
kva += PAGE_SIZE;
drivers/net/ethernet/brocade/bna/bna_tx_rx.c
3192
dma += PAGE_SIZE;
drivers/net/ethernet/brocade/bna/bna_tx_rx.c
3329
q_size = ALIGN(q_size, PAGE_SIZE);
drivers/net/ethernet/brocade/bna/bna_tx_rx.c
3347
mem_info->len = PAGE_SIZE * page_count;
drivers/net/ethernet/brocade/bna/bna_tx_rx.c
3377
PAGE_SIZE;
drivers/net/ethernet/brocade/bna/bna_tx_rx.c
3479
bna_txq_qpt_setup(txq, page_count, PAGE_SIZE,
drivers/net/ethernet/brocade/bna/bnad.c
283
PAGE_SIZE << order : 2048;
drivers/net/ethernet/brocade/bna/bnad.c
287
BUG_ON((PAGE_SIZE << order) % unmap_q->map_size);
drivers/net/ethernet/brocade/bna/bnad.c
353
alloc_size = PAGE_SIZE << unmap_q->alloc_order;
drivers/net/ethernet/cavium/liquidio/octeon_network.h
287
PAGE_SIZE, DMA_FROM_DEVICE);
drivers/net/ethernet/cavium/liquidio/octeon_network.h
344
pg_info->dma, (PAGE_SIZE << 0),
drivers/net/ethernet/cavium/liquidio/octeon_network.h
371
pg_info->dma, (PAGE_SIZE << 0),
drivers/net/ethernet/cavium/liquidio/octeon_network.h
481
buf_ptr, (PAGE_SIZE << 0),
drivers/net/ethernet/cavium/thunder/nicvf_queues.c
185
((nic->rb_page_offset + buf_len) <= PAGE_SIZE)) {
drivers/net/ethernet/cavium/thunder/nicvf_queues.c
285
rbdr->pgcnt = ring_len / (PAGE_SIZE / buf_size);
drivers/net/ethernet/chelsio/cxgb/sge.c
1117
if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN) {
drivers/net/ethernet/chelsio/cxgb/sge.c
1169
if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN) {
drivers/net/ethernet/chelsio/cxgb/sge.c
1231
if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN &&
drivers/net/ethernet/chelsio/cxgb3/sge.c
449
q->pg_chunk.p_cnt = q->pg_chunk.va + (PAGE_SIZE << order) -
drivers/net/ethernet/chelsio/cxgb3/sge.c
466
if (q->pg_chunk.offset == (PAGE_SIZE << order))
drivers/net/ethernet/chelsio/cxgb3/sge.c
64
#define FL0_PG_ALLOC_SIZE (PAGE_SIZE << FL0_PG_ORDER)
drivers/net/ethernet/chelsio/cxgb3/sge.c
65
#define FL1_PG_CHUNK_SIZE (PAGE_SIZE > 8192 ? 16384 : 8192)
drivers/net/ethernet/chelsio/cxgb3/sge.c
66
#define FL1_PG_ORDER (PAGE_SIZE > 8192 ? 0 : 1)
drivers/net/ethernet/chelsio/cxgb3/sge.c
67
#define FL1_PG_ALLOC_SIZE (PAGE_SIZE << FL1_PG_ORDER)
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
3976
#define HMA_PAGE_SIZE PAGE_SIZE
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
4282
t4_fixup_host_params(adapter, PAGE_SIZE, L1_CACHE_BYTES);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
6722
num_seg = PAGE_SIZE / SEGMENT_SIZE;
drivers/net/ethernet/chelsio/cxgb4/sge.c
407
buf_size = PAGE_SIZE;
drivers/net/ethernet/chelsio/cxgb4/sge.c
411
buf_size = PAGE_SIZE << s->fl_pg_order;
drivers/net/ethernet/chelsio/cxgb4/sge.c
5093
if (fl_small_pg != PAGE_SIZE ||
drivers/net/ethernet/chelsio/cxgb4/sge.c
568
PAGE_SIZE << s->fl_pg_order,
drivers/net/ethernet/chelsio/cxgb4/sge.c
598
mapping = dma_map_page(adap->pdev_dev, pg, 0, PAGE_SIZE,
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
2648
if (fl_small_pg != PAGE_SIZE ||
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
460
? (PAGE_SIZE << s->fl_pg_order) : PAGE_SIZE);
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
643
poison_buf(page, PAGE_SIZE << s->fl_pg_order);
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
646
PAGE_SIZE << s->fl_pg_order,
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
682
poison_buf(page, PAGE_SIZE);
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
684
dma_addr = dma_map_page(adapter->pdev_dev, page, 0, PAGE_SIZE,
drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c
1112
int pg_size = PAGE_SIZE;
drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c
1131
pg_size = PAGE_SIZE;
drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c
1148
pg_size = PAGE_SIZE;
drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c
501
ppm->tformat.pgsz_idx_dflt = cxgbi_ppm_find_page_index(ppm, PAGE_SIZE);
drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c
504
ndev->name, ppm_pp, ppm, ppm->base_idx, ppm->ppmax, PAGE_SIZE,
drivers/net/ethernet/cisco/enic/enic_main.c
1691
.max_len = (max_pkt_len > PAGE_SIZE) ? max_pkt_len : PAGE_SIZE,
drivers/net/ethernet/cortina/gemini.c
1491
gpage = gmac_get_queue_page(geth, port, mapping + PAGE_SIZE);
drivers/net/ethernet/cortina/gemini.c
786
gpage = gmac_get_queue_page(geth, port, mapping + PAGE_SIZE);
drivers/net/ethernet/cortina/gemini.c
816
PAGE_SIZE, DMA_FROM_DEVICE);
drivers/net/ethernet/emulex/benet/be_main.c
3142
adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
drivers/net/ethernet/engleder/tsnep.h
25
#define TSNEP_RING_ENTRIES_PER_PAGE (PAGE_SIZE / TSNEP_DESC_SIZE)
drivers/net/ethernet/engleder/tsnep_main.c
1386
skb = napi_build_skb(page_address(page), PAGE_SIZE);
drivers/net/ethernet/engleder/tsnep_main.c
1457
xdp_init_buff(&xdp, PAGE_SIZE, &rx->xdp_rxq);
drivers/net/ethernet/engleder/tsnep_main.c
287
dma_free_coherent(dmadev, PAGE_SIZE, tx->page[i],
drivers/net/ethernet/engleder/tsnep_main.c
305
dma_alloc_coherent(dmadev, PAGE_SIZE, &tx->page_dma[i],
drivers/net/ethernet/engleder/tsnep_main.c
36
#define TSNEP_MAX_RX_BUF_SIZE (PAGE_SIZE - TSNEP_HEADROOM - \
drivers/net/ethernet/engleder/tsnep_main.c
975
dma_free_coherent(dmadev, PAGE_SIZE, rx->page[i],
drivers/net/ethernet/engleder/tsnep_main.c
994
dma_alloc_coherent(dmadev, PAGE_SIZE, &rx->page_dma[i],
drivers/net/ethernet/faraday/ftmac100.c
39
#if RX_BUF_SIZE > 0x7ff || RX_BUF_SIZE > PAGE_SIZE
drivers/net/ethernet/faraday/ftmac100.c
471
skb->truesize += PAGE_SIZE;
drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
1884
(PAGE_SIZE - 1)) +
drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c
107
bytes += snprintf(buf + bytes, PAGE_SIZE - bytes, "%u\n",
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
259
(PAGE_SIZE - 1)) +
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
920
if (unlikely(PAGE_SIZE / sizeof(struct scatterlist) < nr_frags + 1))
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
100
#define DPAA2_ETH_RX_BUF_RAW_SIZE PAGE_SIZE
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
539
#define DPAA2_ETH_SG_ENTRIES_MAX (PAGE_SIZE / sizeof(struct scatterlist))
drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h
50
#define DPAA2_SWITCH_RX_BUF_RAW_SIZE PAGE_SIZE
drivers/net/ethernet/freescale/enetc/enetc.c
1209
dma_unmap_page(rx_ring->dev, rx_swbd.dma, PAGE_SIZE,
drivers/net/ethernet/freescale/enetc/enetc.c
128
tx_swbd->is_xdp_tx ? PAGE_SIZE : tx_swbd->len,
drivers/net/ethernet/freescale/enetc/enetc.c
1333
addr = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, rx_swbd->dir);
drivers/net/ethernet/freescale/enetc/enetc.c
1491
dma_unmap_page(rx_ring->dev, rx_swbd->dma, PAGE_SIZE,
drivers/net/ethernet/freescale/enetc/enetc.c
2449
dma_unmap_page(rx_ring->dev, rx_swbd->dma, PAGE_SIZE,
drivers/net/ethernet/freescale/enetc/enetc.h
79
#define ENETC_RXB_TRUESIZE (PAGE_SIZE >> 1)
drivers/net/ethernet/freescale/fec.h
336
#define FEC_ENET_RX_FRSIZE (PAGE_SIZE - FEC_DRV_RESERVE_SPACE)
drivers/net/ethernet/freescale/fec.h
337
#define FEC_ENET_RX_FRPPG (PAGE_SIZE / FEC_ENET_RX_FRSIZE)
drivers/net/ethernet/freescale/fec.h
340
#define FEC_ENET_TX_FRPPG (PAGE_SIZE / FEC_ENET_TX_FRSIZE)
drivers/net/ethernet/freescale/fec_main.c
1846
PAGE_SIZE << fep->pagepool_order);
drivers/net/ethernet/freescale/fec_main.c
2049
xdp_init_buff(&xdp, PAGE_SIZE << fep->pagepool_order, &rxq->xdp_rxq);
drivers/net/ethernet/freescale/fec_main.c
4880
fep->rx_frame_size = (PAGE_SIZE << order) - FEC_DRV_RESERVE_SPACE;
drivers/net/ethernet/freescale/gianfar.c
1101
PAGE_SIZE, DMA_FROM_DEVICE);
drivers/net/ethernet/freescale/gianfar.c
1210
addr = dma_map_page(rxq->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
drivers/net/ethernet/freescale/gianfar.c
2388
PAGE_SIZE, DMA_FROM_DEVICE);
drivers/net/ethernet/fungible/funcore/fun_queue.c
178
dma_unmap_page(fdev->dev, rqinfo->dma, PAGE_SIZE,
drivers/net/ethernet/fungible/funcore/fun_queue.c
199
PAGE_SIZE, DMA_FROM_DEVICE);
drivers/net/ethernet/fungible/funcore/fun_queue.c
219
dma_sync_single_for_device(dev, rqinfo->dma, PAGE_SIZE,
drivers/net/ethernet/fungible/funcore/fun_queue.c
269
fragsize = min_t(unsigned int, PAGE_SIZE, remaining);
drivers/net/ethernet/fungible/funeth/funeth.h
22
#define RQ_DEPTH (512U / (PAGE_SIZE / 4096))
drivers/net/ethernet/fungible/funeth/funeth_ethtool.c
569
kring->rx_buf_len = PAGE_SIZE;
drivers/net/ethernet/fungible/funeth/funeth_main.c
1116
(PAGE_SIZE - FUN_XDP_HEADROOM - VLAN_ETH_HLEN - FUN_RX_TAILROOM)
drivers/net/ethernet/fungible/funeth/funeth_rx.c
110
rb->dma_addr = dma_map_page(q->dma_dev, p, 0, PAGE_SIZE,
drivers/net/ethernet/fungible/funeth/funeth_rx.c
130
dma_unmap_page(q->dma_dev, rb->dma_addr, PAGE_SIZE,
drivers/net/ethernet/fungible/funeth/funeth_rx.c
258
if (q->buf_offset + len <= PAGE_SIZE || !q->buf_offset)
drivers/net/ethernet/fungible/funeth/funeth_rx.c
270
PAGE_SIZE, DMA_FROM_DEVICE);
drivers/net/ethernet/fungible/funeth/funeth_rx.c
318
PAGE_SIZE - q->buf_offset);
drivers/net/ethernet/fungible/funeth/funeth_rx.c
333
q->buf_offset = PAGE_SIZE;
drivers/net/ethernet/fungible/funeth/funeth_rx.c
58
dma_unmap_page_attrs(q->dma_dev, buf->dma_addr, PAGE_SIZE,
drivers/net/ethernet/fungible/funeth/funeth_rx.c
78
PAGE_SIZE, DMA_FROM_DEVICE);
drivers/net/ethernet/fungible/funeth/funeth_rx.c
88
dma_unmap_page_attrs(q->dma_dev, buf->dma_addr, PAGE_SIZE,
drivers/net/ethernet/google/gve/gve.h
51
#define GVE_DATA_SLOT_ADDR_PAGE_MASK (~(PAGE_SIZE - 1))
drivers/net/ethernet/google/gve/gve.h
91
#define GVE_TX_BUFS_PER_PAGE_DQO (PAGE_SIZE >> GVE_TX_BUF_SHIFT_DQO)
drivers/net/ethernet/google/gve/gve_adminq.c
1186
.page_size = cpu_to_be64(PAGE_SIZE),
drivers/net/ethernet/google/gve/gve_adminq.c
338
iowrite32be(priv->adminq_bus_addr / PAGE_SIZE,
drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c
175
if (data_buffer_size * 2 > PAGE_SIZE)
drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c
192
buf_state->page_info.page_offset &= (PAGE_SIZE - 1);
drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c
258
.max_len = PAGE_SIZE,
drivers/net/ethernet/google/gve/gve_main.c
1076
*dma = dma_map_page(dev, *page, 0, PAGE_SIZE, dir);
drivers/net/ethernet/google/gve/gve_main.c
1126
dma_unmap_page(dev, dma, PAGE_SIZE, dir);
drivers/net/ethernet/google/gve/gve_rx.c
203
dma_addr_t addr = i * PAGE_SIZE;
drivers/net/ethernet/google/gve/gve_rx.c
581
PAGE_SIZE,
drivers/net/ethernet/google/gve/gve_rx.c
853
PAGE_SIZE, DMA_FROM_DEVICE);
drivers/net/ethernet/google/gve/gve_rx_dqo.c
536
0, buf_len, PAGE_SIZE);
drivers/net/ethernet/google/gve/gve_tx.c
49
fifo->size = fifo->qpl->num_entries * PAGE_SIZE;
drivers/net/ethernet/google/gve/gve_tx.c
553
u64 last_page = (iov_offset + iov_len - 1) / PAGE_SIZE;
drivers/net/ethernet/google/gve/gve_tx.c
554
u64 first_page = iov_offset / PAGE_SIZE;
drivers/net/ethernet/google/gve/gve_tx.c
558
dma_sync_single_for_device(dev, page_buses[page], PAGE_SIZE, DMA_TO_DEVICE);
drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.c
37
#define hbg_get_page_size(ring) (PAGE_SIZE << hbg_get_page_order((ring)))
drivers/net/ethernet/hisilicon/hns/hnae.h
340
#define hnae_page_size(_ring) (PAGE_SIZE << hnae_page_order(_ring))
drivers/net/ethernet/hisilicon/hns/hns_enet.c
442
twobufs = ((PAGE_SIZE < 8192) &&
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
1077
PAGE_SIZE << order, DMA_TO_DEVICE);
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
1085
tx_spare->len = PAGE_SIZE << order;
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
5018
(PAGE_SIZE << hns3_page_order(ring)),
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
5023
.max_len = PAGE_SIZE << hns3_page_order(ring),
drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
686
#if (PAGE_SIZE < 8192)
drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
687
if (ring->buf_size > (PAGE_SIZE / 2))
drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
693
#define hns3_page_size(_ring) (PAGE_SIZE << hns3_page_order(_ring))
drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c
499
HINIC_AEQ_CTRL_1_CLEAR(val, PAGE_SIZE);
drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c
503
HINIC_AEQ_CTRL_1_SET(page_size_val, PAGE_SIZE);
drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c
515
HINIC_CEQ_CTRL_1_CLEAR(val, PAGE_SIZE);
drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c
518
HINIC_CEQ_CTRL_1_SET(page_size_val, PAGE_SIZE);
drivers/net/ethernet/huawei/hinic3/hinic3_eqs.c
422
AEQ_CTRL_1_SET(page_size_val, PAGE_SIZE);
drivers/net/ethernet/huawei/hinic3/hinic3_eqs.c
429
CEQ_CTRL_0_SET(page_size_val, PAGE_SIZE) |
drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
437
PAGE_SIZE;
drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
441
pp_params.max_len = PAGE_SIZE;
drivers/net/ethernet/i825xx/sun3_82586.c
58
#define SUN3_82586_TOTAL_SIZE PAGE_SIZE
drivers/net/ethernet/ibm/ehea/ehea.h
135
u8 entries[PAGE_SIZE];
drivers/net/ethernet/ibm/ehea/ehea_phyp.h
53
epas->kernel.addr = ioremap((paddr_kernel & PAGE_MASK), PAGE_SIZE) +
drivers/net/ethernet/ibm/ehea/ehea_qmr.c
43
int pages_per_kpage = PAGE_SIZE / pagesize;
drivers/net/ethernet/ibm/ehea/ehea_qmr.c
46
if ((pagesize > PAGE_SIZE) || (!pages_per_kpage)) {
drivers/net/ethernet/ibm/ehea/ehea_qmr.c
48
(int)PAGE_SIZE, (int)pagesize);
drivers/net/ethernet/ibm/ehea/ehea_qmr.c
621
start_section = (pfn * PAGE_SIZE) / EHEA_SECTSIZE;
drivers/net/ethernet/ibm/ehea/ehea_qmr.c
622
end_section = start_section + ((nr_pages * PAGE_SIZE) / EHEA_SECTSIZE);
drivers/net/ethernet/ibm/ehea/ehea_qmr.c
688
if ((total_nr_pages * PAGE_SIZE) < EHEA_HUGEPAGE_SIZE)
drivers/net/ethernet/ibm/ehea/ehea_qmr.c
707
pfn += (EHEA_HUGEPAGE_SIZE / PAGE_SIZE);
drivers/net/ethernet/ibm/ehea/ehea_qmr.c
710
pfn += (EHEA_SECTSIZE / PAGE_SIZE);
drivers/net/ethernet/ibm/ehea/ehea_qmr.c
98
pages_per_kpage = PAGE_SIZE / queue->pagesize;
drivers/net/ethernet/ibm/ibmvnic.c
3964
memset(scrq->msgs, 0, 4 * PAGE_SIZE);
drivers/net/ethernet/ibm/ibmvnic.c
3974
4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
drivers/net/ethernet/ibm/ibmvnic.c
4033
dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
drivers/net/ethernet/ibm/ibmvnic.c
4060
scrq->msg_token = dma_map_single(dev, scrq->msgs, 4 * PAGE_SIZE,
drivers/net/ethernet/ibm/ibmvnic.c
4068
4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
drivers/net/ethernet/ibm/ibmvnic.c
4081
scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
drivers/net/ethernet/ibm/ibmvnic.c
4108
dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
drivers/net/ethernet/ibm/ibmvnic.c
4533
int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4);
drivers/net/ethernet/ibm/ibmvnic.c
6220
memset(crq->msgs, 0, PAGE_SIZE);
drivers/net/ethernet/ibm/ibmvnic.c
6226
crq->msg_token, PAGE_SIZE);
drivers/net/ethernet/ibm/ibmvnic.c
6253
dma_unmap_single(&vdev->dev, crq->msg_token, PAGE_SIZE,
drivers/net/ethernet/ibm/ibmvnic.c
6276
crq->size = PAGE_SIZE / sizeof(*crq->msgs);
drivers/net/ethernet/ibm/ibmvnic.c
6277
crq->msg_token = dma_map_single(dev, crq->msgs, PAGE_SIZE,
drivers/net/ethernet/ibm/ibmvnic.c
6283
crq->msg_token, PAGE_SIZE);
drivers/net/ethernet/ibm/ibmvnic.c
6331
dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
drivers/net/ethernet/ibm/ibmvnic.c
6712
ret += PAGE_SIZE; /* the crq message queue */
drivers/net/ethernet/ibm/ibmvnic.c
6716
ret += 4 * PAGE_SIZE; /* the scrq message queue */
drivers/net/ethernet/ibm/ibmvnic.h
79
#define IBMVNIC_ONE_LTB_MAX ((u32)((1 << MAX_PAGE_ORDER) * PAGE_SIZE))
drivers/net/ethernet/intel/e1000/e1000_main.c
3561
#if (PAGE_SIZE >= E1000_RXBUFFER_16384)
drivers/net/ethernet/intel/e1000/e1000_main.c
3563
#elif (PAGE_SIZE >= E1000_RXBUFFER_4096)
drivers/net/ethernet/intel/e1000/e1000_main.c
3564
adapter->rx_buffer_len = PAGE_SIZE;
drivers/net/ethernet/intel/e1000/e1000_main.c
3989
skb->truesize += PAGE_SIZE;
drivers/net/ethernet/intel/e1000e/netdev.c
1402
PAGE_SIZE,
drivers/net/ethernet/intel/e1000e/netdev.c
1408
PAGE_SIZE,
drivers/net/ethernet/intel/e1000e/netdev.c
1428
dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE,
drivers/net/ethernet/intel/e1000e/netdev.c
1435
skb->truesize += PAGE_SIZE;
drivers/net/ethernet/intel/e1000e/netdev.c
1495
skb->truesize += PAGE_SIZE;
drivers/net/ethernet/intel/e1000e/netdev.c
1548
dma_unmap_page(&pdev->dev, buffer_info->dma, PAGE_SIZE,
drivers/net/ethernet/intel/e1000e/netdev.c
1690
PAGE_SIZE, DMA_FROM_DEVICE);
drivers/net/ethernet/intel/e1000e/netdev.c
1712
dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE,
drivers/net/ethernet/intel/e1000e/netdev.c
187
PAGE_SIZE, true);
drivers/net/ethernet/intel/e1000e/netdev.c
3017
(((S) & (PAGE_SIZE - 1)) ? 1 : 0))
drivers/net/ethernet/intel/e1000e/netdev.c
3125
if ((pages <= 3) && (PAGE_SIZE <= 16384) && (rctl & E1000_RCTL_LPE))
drivers/net/ethernet/intel/e1000e/netdev.c
3140
psrctl |= PAGE_SIZE << E1000_PSRCTL_BSIZE3_SHIFT;
drivers/net/ethernet/intel/e1000e/netdev.c
3143
psrctl |= PAGE_SIZE << E1000_PSRCTL_BSIZE2_SHIFT;
drivers/net/ethernet/intel/e1000e/netdev.c
3146
psrctl |= PAGE_SIZE >> E1000_PSRCTL_BSIZE1_SHIFT;
drivers/net/ethernet/intel/e1000e/netdev.c
5933
DIV_ROUND_UP(PAGE_SIZE,
drivers/net/ethernet/intel/e1000e/netdev.c
751
0, PAGE_SIZE,
drivers/net/ethernet/intel/e1000e/netdev.c
865
PAGE_SIZE,
drivers/net/ethernet/intel/fm10k/fm10k_main.c
212
#if (PAGE_SIZE < 8192)
drivers/net/ethernet/intel/fm10k/fm10k_main.c
223
if (rx_buffer->page_offset > (PAGE_SIZE - FM10K_RX_BUFSZ))
drivers/net/ethernet/intel/fm10k/fm10k_main.c
257
#if (PAGE_SIZE < 8192)
drivers/net/ethernet/intel/fm10k/fm10k_main.c
346
PAGE_SIZE, DMA_FROM_DEVICE);
drivers/net/ethernet/intel/fm10k/fm10k_main.c
92
dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
267
PAGE_SIZE, DMA_FROM_DEVICE);
drivers/net/ethernet/intel/i40e/i40e_main.c
2926
return PAGE_SIZE < 8192 ? I40E_RXBUFFER_3072 : I40E_RXBUFFER_2048;
drivers/net/ethernet/intel/i40e/i40e_main.c
3750
#if (PAGE_SIZE < 8192)
drivers/net/ethernet/intel/i40e/i40e_txrx.c
1604
#if (PAGE_SIZE >= 8192)
drivers/net/ethernet/intel/i40e/i40e_txrx.c
1932
#if (PAGE_SIZE < 8192)
drivers/net/ethernet/intel/i40e/i40e_txrx.c
1940
(SKB_WITH_OVERHEAD(PAGE_SIZE) - I40E_RXBUFFER_2048)
drivers/net/ethernet/intel/i40e/i40e_txrx.c
1967
#if (PAGE_SIZE < 8192)
drivers/net/ethernet/intel/i40e/i40e_txrx.c
1989
#if (PAGE_SIZE < 8192)
drivers/net/ethernet/intel/i40e/i40e_txrx.c
2519
#if (PAGE_SIZE > 4096)
drivers/net/ethernet/intel/i40e/i40e_txrx.h
131
#if (PAGE_SIZE < 8192)
drivers/net/ethernet/intel/i40e/i40e_txrx.h
139
page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2);
drivers/net/ethernet/intel/i40e/i40e_txrx.h
452
#if (PAGE_SIZE < 8192)
drivers/net/ethernet/intel/i40e/i40e_txrx.h
453
if (ring->rx_buf_len > (PAGE_SIZE / 2))
drivers/net/ethernet/intel/i40e/i40e_txrx.h
459
#define i40e_rx_pg_size(_ring) (PAGE_SIZE << i40e_rx_pg_order(_ring))
drivers/net/ethernet/intel/ice/ice_gnss.c
125
data_len = min_t(typeof(data_len), data_len, PAGE_SIZE);
drivers/net/ethernet/intel/ice/ice_txrx.c
161
PAGE_SIZE);
drivers/net/ethernet/intel/ice/ice_txrx.c
180
PAGE_SIZE);
drivers/net/ethernet/intel/ice/ice_txrx.c
224
PAGE_SIZE);
drivers/net/ethernet/intel/ice/ice_txrx.c
257
PAGE_SIZE);
drivers/net/ethernet/intel/ice/ice_txrx.c
426
PAGE_SIZE);
drivers/net/ethernet/intel/ice/ice_txrx.c
491
PAGE_SIZE);
drivers/net/ethernet/intel/ice/ice_txrx.c
576
PAGE_SIZE);
drivers/net/ethernet/intel/ice/ice_txrx.c
603
PAGE_SIZE);
drivers/net/ethernet/intel/ice/ice_txrx.c
622
PAGE_SIZE);
drivers/net/ethernet/intel/igb/igb.h
164
#if (PAGE_SIZE < 8192)
drivers/net/ethernet/intel/igb/igb.h
173
page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2);
drivers/net/ethernet/intel/igb/igb.h
287
#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
drivers/net/ethernet/intel/igb/igb.h
414
#if (PAGE_SIZE < 8192)
drivers/net/ethernet/intel/igb/igb.h
426
#if (PAGE_SIZE < 8192)
drivers/net/ethernet/intel/igb/igb.h
433
#define igb_rx_pg_size(_ring) (PAGE_SIZE << igb_rx_pg_order(_ring))
drivers/net/ethernet/intel/igb/igb_main.c
4846
#if (PAGE_SIZE < 8192)
drivers/net/ethernet/intel/igb/igb_main.c
4859
#if (PAGE_SIZE < 8192)
drivers/net/ethernet/intel/igb/igb_main.c
5383
#if (PAGE_SIZE < 8192)
drivers/net/ethernet/intel/igb/igb_main.c
5407
#if (PAGE_SIZE < 8192)
drivers/net/ethernet/intel/igb/igb_main.c
8558
#if (PAGE_SIZE < 8192)
drivers/net/ethernet/intel/igb/igb_main.c
8564
(SKB_WITH_OVERHEAD(PAGE_SIZE) - IGB_RXBUFFER_2048)
drivers/net/ethernet/intel/igb/igb_main.c
8596
#if (PAGE_SIZE < 8192)
drivers/net/ethernet/intel/igb/igb_main.c
8605
#if (PAGE_SIZE < 8192)
drivers/net/ethernet/intel/igb/igb_main.c
8617
#if (PAGE_SIZE < 8192)
drivers/net/ethernet/intel/igb/igb_main.c
8652
#if (PAGE_SIZE < 8192)
drivers/net/ethernet/intel/igb/igb_main.c
8669
#if (PAGE_SIZE < 8192)
drivers/net/ethernet/intel/igb/igb_main.c
8698
#if (PAGE_SIZE < 8192)
drivers/net/ethernet/intel/igb/igb_main.c
8756
#if (PAGE_SIZE < 8192)
drivers/net/ethernet/intel/igb/igb_main.c
8772
#if (PAGE_SIZE < 8192)
drivers/net/ethernet/intel/igb/igb_main.c
8944
#if (PAGE_SIZE < 8192)
drivers/net/ethernet/intel/igb/igb_main.c
9030
#if (PAGE_SIZE < 8192)
drivers/net/ethernet/intel/igb/igb_main.c
9081
#if (PAGE_SIZE > 4096)
drivers/net/ethernet/intel/igbvf/netdev.c
165
buffer_info->page_offset ^= PAGE_SIZE / 2;
drivers/net/ethernet/intel/igbvf/netdev.c
170
PAGE_SIZE / 2,
drivers/net/ethernet/intel/igbvf/netdev.c
2412
#if (PAGE_SIZE / 2) > 16384
drivers/net/ethernet/intel/igbvf/netdev.c
2415
adapter->rx_buffer_len = PAGE_SIZE / 2;
drivers/net/ethernet/intel/igbvf/netdev.c
307
PAGE_SIZE / 2,
drivers/net/ethernet/intel/igbvf/netdev.c
316
if ((adapter->rx_buffer_len > (PAGE_SIZE / 2)) ||
drivers/net/ethernet/intel/igbvf/netdev.c
324
skb->truesize += PAGE_SIZE / 2;
drivers/net/ethernet/intel/igbvf/netdev.c
586
PAGE_SIZE / 2,
drivers/net/ethernet/intel/igc/igc.h
520
#if (PAGE_SIZE < 8192)
drivers/net/ethernet/intel/igc/igc.h
583
#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
drivers/net/ethernet/intel/igc/igc.h
737
#if (PAGE_SIZE < 8192)
drivers/net/ethernet/intel/igc/igc.h
749
#if (PAGE_SIZE < 8192)
drivers/net/ethernet/intel/igc/igc.h
792
#define igc_rx_pg_size(_ring) (PAGE_SIZE << igc_rx_pg_order(_ring))
drivers/net/ethernet/intel/igc/igc_main.c
1894
#if (PAGE_SIZE < 8192)
drivers/net/ethernet/intel/igc/igc_main.c
1916
#if (PAGE_SIZE < 8192)
drivers/net/ethernet/intel/igc/igc_main.c
1928
#if (PAGE_SIZE < 8192)
drivers/net/ethernet/intel/igc/igc_main.c
1955
#if (PAGE_SIZE < 8192)
drivers/net/ethernet/intel/igc/igc_main.c
2088
#if (PAGE_SIZE < 8192)
drivers/net/ethernet/intel/igc/igc_main.c
2094
(SKB_WITH_OVERHEAD(PAGE_SIZE) - IGC_RXBUFFER_2048)
drivers/net/ethernet/intel/igc/igc_main.c
4152
#if (PAGE_SIZE < 8192)
drivers/net/ethernet/intel/ixgbe/ixgbe.h
458
#if (PAGE_SIZE < 8192)
drivers/net/ethernet/intel/ixgbe/ixgbe.h
467
#if (PAGE_SIZE < 8192)
drivers/net/ethernet/intel/ixgbe/ixgbe.h
473
#define ixgbe_rx_pg_size(_ring) (PAGE_SIZE << ixgbe_rx_pg_order(_ring))
drivers/net/ethernet/intel/ixgbe/ixgbe.h
50
#if (PAGE_SIZE < 8192)
drivers/net/ethernet/intel/ixgbe/ixgbe.h
90
#if (PAGE_SIZE < 8192)
drivers/net/ethernet/intel/ixgbe/ixgbe.h
99
page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2);
drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
618
IXGBE_FCPTR_ALIGN, PAGE_SIZE);
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
2160
#if (PAGE_SIZE < 8192)
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
2171
(SKB_WITH_OVERHEAD(PAGE_SIZE) - IXGBE_RXBUFFER_3K)
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
2208
#if (PAGE_SIZE < 8192)
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
2217
#if (PAGE_SIZE < 8192)
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
2234
#if (PAGE_SIZE < 8192)
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
2300
#if (PAGE_SIZE < 8192)
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
2339
#if (PAGE_SIZE < 8192)
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
2359
#if (PAGE_SIZE < 8192)
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
2391
#if (PAGE_SIZE < 8192)
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
2460
#if (PAGE_SIZE < 8192)
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
2476
#if (PAGE_SIZE < 8192)
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
2513
#if (PAGE_SIZE < 8192)
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
2552
#if (PAGE_SIZE > 4096)
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
4173
srrctl |= PAGE_SIZE >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
4550
#if (PAGE_SIZE < 8192)
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
4750
#if (PAGE_SIZE < 8192)
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
7346
if (PAGE_SIZE >= 8192 || adapter->flags2 & IXGBE_FLAG2_RX_LEGACY)
drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
156
#if (PAGE_SIZE < 8192)
drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
188
#if (PAGE_SIZE < 8192)
drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
200
#if (PAGE_SIZE < 8192)
drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
207
#define ixgbevf_rx_pg_size(_ring) (PAGE_SIZE << ixgbevf_rx_pg_order(_ring))
drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
48
#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
1092
#if (PAGE_SIZE < 8192)
drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
1109
#if (PAGE_SIZE < 8192)
drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
1129
#if (PAGE_SIZE < 8192)
drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
1166
#if (PAGE_SIZE > 4096)
drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
1959
#if (PAGE_SIZE < 8192)
drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
1988
if (PAGE_SIZE < 8192)
drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
4168
#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
4185
#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
793
#if (PAGE_SIZE < 8192)
drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
799
(SKB_WITH_OVERHEAD(PAGE_SIZE) - IXGBEVF_RXBUFFER_2048)
drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
832
#if (PAGE_SIZE < 8192)
drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
841
#if (PAGE_SIZE < 8192)
drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
855
#if (PAGE_SIZE < 8192)
drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
904
#if (PAGE_SIZE < 8192)
drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
930
#if (PAGE_SIZE < 8192)
drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
958
#if (PAGE_SIZE < 8192)
drivers/net/ethernet/intel/libeth/rx.c
122
pp->max_len = PAGE_SIZE << LIBETH_RX_PAGE_ORDER;
drivers/net/ethernet/intel/libeth/rx.c
50
PAGE_SIZE << LIBETH_RX_PAGE_ORDER);
drivers/net/ethernet/intel/libie/fwlog.c
1116
memset(log->data, 0, PAGE_SIZE);
drivers/net/ethernet/marvell/mv643xx_eth.c
173
#define SKB_DMA_REALIGN ((PAGE_SIZE - NET_SKB_PAD) % SMP_CACHE_BYTES)
drivers/net/ethernet/marvell/mvneta.c
2406
skb = build_skb(xdp->data_hard_start, PAGE_SIZE);
drivers/net/ethernet/marvell/mvneta.c
2438
xdp_init_buff(&xdp_buf, PAGE_SIZE, &rxq->xdp_rxq);
drivers/net/ethernet/marvell/mvneta.c
2634
skb = build_skb(data, frag_size > PAGE_SIZE ? 0 : frag_size);
drivers/net/ethernet/marvell/mvneta.c
3380
PAGE_SIZE);
drivers/net/ethernet/marvell/mvneta.c
3480
mvneta_rxq_buf_size_set(pp, rxq, PAGE_SIZE < SZ_64K ?
drivers/net/ethernet/marvell/mvneta.c
352
#define MVNETA_TSO_PAGE_SIZE (2 * PAGE_SIZE)
drivers/net/ethernet/marvell/mvneta.c
378
#define MVNETA_MAX_RX_BUF_SIZE (PAGE_SIZE - MVNETA_SKB_PAD)
drivers/net/ethernet/marvell/mvpp2/mvpp2.h
851
#define MVPP2_MAX_RX_BUF_SIZE (PAGE_SIZE - MVPP2_SKB_SHINFO_SIZE - MVPP2_SKB_HEADROOM)
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
366
if (likely(pool->frag_size <= PAGE_SIZE))
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
377
else if (likely(pool->frag_size <= PAGE_SIZE))
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
3968
if (bm_pool->frag_size > PAGE_SIZE)
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
3981
xdp_init_buff(&xdp, PAGE_SIZE, xdp_rxq);
drivers/net/ethernet/marvell/octeon_ep/octep_config.h
34
#define OCTEP_OQ_BUF_SIZE (SKB_WITH_OVERHEAD(PAGE_SIZE))
drivers/net/ethernet/marvell/octeon_ep/octep_rx.c
216
PAGE_SIZE, DMA_FROM_DEVICE);
drivers/net/ethernet/marvell/octeon_ep/octep_rx.c
369
PAGE_SIZE, DMA_FROM_DEVICE);
drivers/net/ethernet/marvell/octeon_ep/octep_rx.c
456
skb = build_skb((void *)resp_hw, PAGE_SIZE);
drivers/net/ethernet/marvell/octeon_ep/octep_rx.c
47
PAGE_SIZE,
drivers/net/ethernet/marvell/octeon_ep/octep_rx.c
65
dma_unmap_page(oq->dev, desc_ring[i].buffer_ptr, PAGE_SIZE, DMA_FROM_DEVICE);
drivers/net/ethernet/marvell/octeon_ep/octep_rx.c
97
PAGE_SIZE, DMA_FROM_DEVICE);
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_config.h
33
#define OCTEP_VF_OQ_BUF_SIZE (SKB_WITH_OVERHEAD(PAGE_SIZE))
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c
217
PAGE_SIZE, DMA_FROM_DEVICE);
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c
386
PAGE_SIZE, DMA_FROM_DEVICE);
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c
415
skb = napi_build_skb((void *)resp_hw, PAGE_SIZE);
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c
426
skb = napi_build_skb((void *)resp_hw, PAGE_SIZE);
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c
441
PAGE_SIZE, DMA_FROM_DEVICE);
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c
47
PAGE_SIZE,
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c
65
dma_unmap_page(oq->dev, desc_ring[i].buffer_ptr, PAGE_SIZE, DMA_FROM_DEVICE);
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c
97
PAGE_SIZE, DMA_FROM_DEVICE);
drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
2550
buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
965
buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
drivers/net/ethernet/marvell/octeontx2/nic/cn20k.c
384
sz = ALIGN(ALIGN(SKB_DATA_ALIGN(buf_size), OTX2_ALIGN), PAGE_SIZE);
drivers/net/ethernet/marvell/pxa168_eth.c
173
#define SKB_DMA_REALIGN ((PAGE_SIZE - NET_SKB_PAD) % SMP_CACHE_BYTES)
drivers/net/ethernet/marvell/sky2.c
1203
sky2_rx_add(sky2, OP_BUFFER, re->frag_addr[i], PAGE_SIZE);
drivers/net/ethernet/marvell/sky2.c
1470
skb_fill_page_desc(skb, i, page, 0, PAGE_SIZE);
drivers/net/ethernet/marvell/sky2.c
2507
size = min(length, (unsigned) PAGE_SIZE);
drivers/net/ethernet/marvell/sky2.c
2511
skb->truesize += PAGE_SIZE;
drivers/net/ethernet/mediatek/mtk_eth_soc.c
1893
eth->rx_napi.napi_id, PAGE_SIZE);
drivers/net/ethernet/mediatek/mtk_eth_soc.c
2260
xdp_init_buff(&xdp, PAGE_SIZE, &ring->xdp_q);
drivers/net/ethernet/mediatek/mtk_eth_soc.c
2272
skb = build_skb(data, PAGE_SIZE);
drivers/net/ethernet/mediatek/mtk_eth_soc.c
2287
if (ring->frag_size <= PAGE_SIZE)
drivers/net/ethernet/mediatek/mtk_eth_soc.c
2829
if (ring->frag_size <= PAGE_SIZE)
drivers/net/ethernet/mediatek/mtk_eth_soc.h
62
#define MTK_PP_MAX_BUF_SIZE (PAGE_SIZE - MTK_PP_PAD)
drivers/net/ethernet/mediatek/mtk_wed.c
29
#define MTK_WED_BUF_PER_PAGE (PAGE_SIZE / 2048)
drivers/net/ethernet/mediatek/mtk_wed.c
30
#define MTK_WED_RX_BUF_PER_PAGE (PAGE_SIZE / MTK_WED_PAGE_BUF_SIZE)
drivers/net/ethernet/mediatek/mtk_wed.c
33
#define MTK_WED_AMSDU_BUF_SIZE (PAGE_SIZE << 4)
drivers/net/ethernet/mediatek/mtk_wed.c
684
page_phys = dma_map_page(dev->hw->dev, page, 0, PAGE_SIZE,
drivers/net/ethernet/mediatek/mtk_wed.c
693
dma_sync_single_for_cpu(dev->hw->dev, page_phys, PAGE_SIZE,
drivers/net/ethernet/mediatek/mtk_wed.c
730
dma_sync_single_for_device(dev->hw->dev, page_phys, PAGE_SIZE,
drivers/net/ethernet/mediatek/mtk_wed.c
757
dma_unmap_page(dev->hw->dev, page_phy, PAGE_SIZE,
drivers/net/ethernet/mediatek/mtk_wed.c
807
page_phys = dma_map_page(dev->hw->dev, page, 0, PAGE_SIZE,
drivers/net/ethernet/mediatek/mtk_wed.c
816
dma_sync_single_for_cpu(dev->hw->dev, page_phys, PAGE_SIZE,
drivers/net/ethernet/mediatek/mtk_wed.c
827
dma_sync_single_for_device(dev->hw->dev, page_phys, PAGE_SIZE,
drivers/net/ethernet/mediatek/mtk_wed.c
877
dma_unmap_page(dev->hw->dev, buf_addr, PAGE_SIZE,
drivers/net/ethernet/mellanox/mlx4/alloc.c
594
buf->nbufs = DIV_ROUND_UP(size, PAGE_SIZE);
drivers/net/ethernet/mellanox/mlx4/alloc.c
604
PAGE_SIZE, &t, GFP_KERNEL);
drivers/net/ethernet/mellanox/mlx4/alloc.c
632
PAGE_SIZE,
drivers/net/ethernet/mellanox/mlx4/alloc.c
651
pgdir->db_page = dma_alloc_coherent(dma_device, PAGE_SIZE,
drivers/net/ethernet/mellanox/mlx4/alloc.c
741
dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE,
drivers/net/ethernet/mellanox/mlx4/cmd.c
2482
dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE,
drivers/net/ethernet/mellanox/mlx4/cmd.c
2516
PAGE_SIZE,
drivers/net/ethernet/mellanox/mlx4/cmd.c
2604
dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE,
drivers/net/ethernet/mellanox/mlx4/cq.c
292
int entries_per_copy = PAGE_SIZE / cqe_size;
drivers/net/ethernet/mellanox/mlx4/cq.c
297
init_ents = kmalloc(PAGE_SIZE, GFP_KERNEL);
drivers/net/ethernet/mellanox/mlx4/cq.c
305
memset(init_ents, 0xcc, PAGE_SIZE);
drivers/net/ethernet/mellanox/mlx4/cq.c
309
err = copy_to_user((void __user *)buf, init_ents, PAGE_SIZE) ?
drivers/net/ethernet/mellanox/mlx4/cq.c
314
buf += PAGE_SIZE;
drivers/net/ethernet/mellanox/mlx4/en_main.c
289
PAGE_SIZE);
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
56
#define MLX4_EN_MAX_XDP_MTU ((int)(PAGE_SIZE - ETH_HLEN - (2 * VLAN_HLEN) - \
drivers/net/ethernet/mellanox/mlx4/en_rx.c
1035
priv->frag_info[0].frag_stride = PAGE_SIZE;
drivers/net/ethernet/mellanox/mlx4/en_rx.c
1043
if (eff_mtu > PAGE_SIZE + (MLX4_EN_MAX_RX_FRAGS - 1) * 2048)
drivers/net/ethernet/mellanox/mlx4/en_rx.c
1044
frag_size_max = PAGE_SIZE;
drivers/net/ethernet/mellanox/mlx4/en_rx.c
1058
nb = PAGE_SIZE / frag_stride;
drivers/net/ethernet/mellanox/mlx4/en_rx.c
1059
pad = (PAGE_SIZE - nb * frag_stride) / nb;
drivers/net/ethernet/mellanox/mlx4/en_rx.c
262
pp.pool_size = size * DIV_ROUND_UP(priv->rx_skb_size, PAGE_SIZE);
drivers/net/ethernet/mellanox/mlx4/en_rx.c
464
if (frag_info->frag_stride == PAGE_SIZE / 2) {
drivers/net/ethernet/mellanox/mlx4/en_rx.c
467
frags->page_offset ^= PAGE_SIZE / 2;
drivers/net/ethernet/mellanox/mlx4/en_rx.c
480
release = frags->page_offset + frag_info->frag_size > PAGE_SIZE;
drivers/net/ethernet/mellanox/mlx4/en_tx.c
1127
tx_info->map0_byte_count = PAGE_SIZE;
drivers/net/ethernet/mellanox/mlx4/eq.c
1009
PAGE_SIZE, &t,
drivers/net/ethernet/mellanox/mlx4/eq.c
1073
dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE,
drivers/net/ethernet/mellanox/mlx4/eq.c
1096
int npages = PAGE_ALIGN(dev->caps.eqe_size * eq->nent) / PAGE_SIZE;
drivers/net/ethernet/mellanox/mlx4/eq.c
1107
dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE,
drivers/net/ethernet/mellanox/mlx4/eq.c
118
return eq->page_list[offset / PAGE_SIZE].buf + (offset + (eqe_factor ? MLX4_EQ_ENTRY_SIZE : 0)) % PAGE_SIZE;
drivers/net/ethernet/mellanox/mlx4/eq.c
988
npages = PAGE_ALIGN(eq->nent * dev->caps.eqe_size) / PAGE_SIZE;
drivers/net/ethernet/mellanox/mlx4/fw.c
1732
ALIGN(fw->fw_pages, PAGE_SIZE / MLX4_ICM_PAGE_SIZE) >>
drivers/net/ethernet/mellanox/mlx4/fw.c
2591
*aux_pages = ALIGN(*aux_pages, PAGE_SIZE / MLX4_ICM_PAGE_SIZE) >>
drivers/net/ethernet/mellanox/mlx4/fw.c
949
if ((1 << (field & 0x3f)) > (PAGE_SIZE / dev_cap->bf_reg_size))
drivers/net/ethernet/mellanox/mlx4/icm.c
110
sg_set_page(mem, page, PAGE_SIZE << order, 0);
drivers/net/ethernet/mellanox/mlx4/icm.c
117
buf->addr = dma_alloc_coherent(dev, PAGE_SIZE << order,
drivers/net/ethernet/mellanox/mlx4/icm.c
123
dma_free_coherent(dev, PAGE_SIZE << order, buf->addr,
drivers/net/ethernet/mellanox/mlx4/icm.c
128
buf->size = PAGE_SIZE << order;
drivers/net/ethernet/mellanox/mlx4/main.c
2739
priv->kar = ioremap((phys_addr_t) priv->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
drivers/net/ethernet/mellanox/mlx4/main.c
468
if (dev_cap->min_page_sz > PAGE_SIZE) {
drivers/net/ethernet/mellanox/mlx4/main.c
470
dev_cap->min_page_sz, PAGE_SIZE);
drivers/net/ethernet/mellanox/mlx4/main.c
501
dev->caps.uar_page_size = PAGE_SIZE;
drivers/net/ethernet/mellanox/mlx4/main.c
502
dev->caps.num_uars = dev_cap->uar_size / PAGE_SIZE;
drivers/net/ethernet/mellanox/mlx4/main.c
953
if (page_size > PAGE_SIZE) {
drivers/net/ethernet/mellanox/mlx4/main.c
955
page_size, PAGE_SIZE);
drivers/net/ethernet/mellanox/mlx4/main.c
978
dev->caps.uar_page_size = PAGE_SIZE;
drivers/net/ethernet/mellanox/mlx4/mr.c
727
mtts_per_page = PAGE_SIZE / sizeof(u64);
drivers/net/ethernet/mellanox/mlx4/pd.c
202
uar->map = ioremap(uar->pfn << PAGE_SHIFT, PAGE_SIZE);
drivers/net/ethernet/mellanox/mlx4/pd.c
210
PAGE_SIZE);
drivers/net/ethernet/mellanox/mlx4/profile.c
141
profile[i].size = max(profile[i].size, (u64) PAGE_SIZE);
drivers/net/ethernet/mellanox/mlx5/core/alloc.c
108
dma_free_coherent(mlx5_core_dma_dev(dev), PAGE_SIZE, buf->frags[i].buf,
drivers/net/ethernet/mellanox/mlx5/core/alloc.c
122
int frag_sz = min_t(int, size, PAGE_SIZE);
drivers/net/ethernet/mellanox/mlx5/core/alloc.c
135
u32 db_per_page = PAGE_SIZE / cache_line_size();
drivers/net/ethernet/mellanox/mlx5/core/alloc.c
150
pgdir->db_page = mlx5_dma_zalloc_coherent_node(dev, PAGE_SIZE,
drivers/net/ethernet/mellanox/mlx5/core/alloc.c
164
u32 db_per_page = PAGE_SIZE / cache_line_size();
drivers/net/ethernet/mellanox/mlx5/core/alloc.c
217
u32 db_per_page = PAGE_SIZE / cache_line_size();
drivers/net/ethernet/mellanox/mlx5/core/alloc.c
224
dma_free_coherent(mlx5_core_dma_dev(dev), PAGE_SIZE,
drivers/net/ethernet/mellanox/mlx5/core/alloc.c
80
buf->npages = DIV_ROUND_UP(size, PAGE_SIZE);
drivers/net/ethernet/mellanox/mlx5/core/alloc.c
88
int frag_sz = min_t(int, size, PAGE_SIZE);
drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
197
mtt[i] = cpu_to_be64(tracer->buff.dma + i * PAGE_SIZE);
drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.c
198
key.size = PAGE_SIZE;
drivers/net/ethernet/mellanox/mlx5/core/en.h
122
(ilog2(PAGE_SIZE / roundup_pow_of_two(MLX5E_MIN_SKB_FRAG_SZ)))
drivers/net/ethernet/mellanox/mlx5/core/en.h
86
(PAGE_SIZE >> MLX5E_SHAMPO_LOG_HEADER_ENTRY_SIZE)
drivers/net/ethernet/mellanox/mlx5/core/en/health.c
229
key.size = PAGE_SIZE;
drivers/net/ethernet/mellanox/mlx5/core/en/params.c
1164
useful_space = PAGE_SIZE - mlx5e_get_max_sq_wqebbs(mdev) + MLX5_SEND_WQE_BB;
drivers/net/ethernet/mellanox/mlx5/core/en/params.c
1166
wqebbs = total_pages * (PAGE_SIZE / MLX5_SEND_WQE_BB);
drivers/net/ethernet/mellanox/mlx5/core/en/params.c
276
return mpwqe ? 1 << mlx5e_mpwrq_page_shift(mdev, xsk) : PAGE_SIZE;
drivers/net/ethernet/mellanox/mlx5/core/en/params.c
289
return params->xdp_prog && sz < PAGE_SIZE ? PAGE_SIZE : sz;
drivers/net/ethernet/mellanox/mlx5/core/en/params.c
318
if (mlx5e_rx_get_linear_sz_skb(params, xsk) > PAGE_SIZE)
drivers/net/ethernet/mellanox/mlx5/core/en/params.c
626
return first_frag_size + (MLX5E_MAX_RX_FRAGS - 2) * frag_size + PAGE_SIZE;
drivers/net/ethernet/mellanox/mlx5/core/en/params.c
646
if (sum_frag_strides > PAGE_SIZE)
drivers/net/ethernet/mellanox/mlx5/core/en/params.c
647
sum_frag_strides = ALIGN(sum_frag_strides, PAGE_SIZE);
drivers/net/ethernet/mellanox/mlx5/core/en/params.c
667
PP_ALLOC_CACHE_REFILL * PAGE_SIZE);
drivers/net/ethernet/mellanox/mlx5/core/en/params.c
701
info->wqe_index_mask = (PAGE_SIZE / frag_stride) - 1;
drivers/net/ethernet/mellanox/mlx5/core/en/params.c
712
frag_size_max = PAGE_SIZE;
drivers/net/ethernet/mellanox/mlx5/core/en/params.c
757
if (frag_size_max == PAGE_SIZE) {
drivers/net/ethernet/mellanox/mlx5/core/en/params.c
764
WARN_ON(PAGE_SIZE != 2 * DEFAULT_FRAG_SIZE);
drivers/net/ethernet/mellanox/mlx5/core/en/params.c
790
*xdp_frag_size = info->num_frags > 1 && params->xdp_prog ? PAGE_SIZE : 0;
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
502
key.size = PAGE_SIZE;
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
537
key.size = PAGE_SIZE;
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
572
key.size = PAGE_SIZE;
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
435
key.size = PAGE_SIZE;
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
487
key.size = PAGE_SIZE;
drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
543
WARN_ON_ONCE(PAGE_SIZE / MLX5_SEND_WQE_BB < (u16)mlx5e_get_max_sq_wqebbs(mdev));
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
34
if ((size_t)xsk->chunk_size > PAGE_SIZE || xsk->chunk_size < MLX5E_MIN_XSK_CHUNK_SIZE) {
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
36
MLX5E_MIN_XSK_CHUNK_SIZE, PAGE_SIZE);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
540
if (next_frag.offset + frag_info[f].frag_stride > PAGE_SIZE) {
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
566
WARN_ON(rq->wqe.info.arr[0].frag_stride != PAGE_SIZE);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
644
PAGE_SIZE, rq->buff.map_dir);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
654
dma_unmap_page(rq->pdev, rq->wqe_overflow.addr, PAGE_SIZE,
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
698
dma_unmap_page(rq->pdev, info->addr, PAGE_SIZE,
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
718
addr = dma_map_page(rq->pdev, page, 0, PAGE_SIZE,
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
796
nentries = hd_buf_size / PAGE_SIZE;
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
799
hd_buf_size, PAGE_SIZE);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
969
pp_params.max_len = PAGE_SIZE;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1851
u32 pg_consumed_bytes = min_t(u32, PAGE_SIZE - data_offset, data_bcnt);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1926
if (unlikely(frag_offset >= PAGE_SIZE)) {
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1928
frag_offset -= PAGE_SIZE;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1940
min_t(u32, PAGE_SIZE - frag_offset, byte_cnt);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1977
truesize -= (nr_frags_free - 1) * PAGE_SIZE +
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2169
if (PAGE_SIZE >= GRO_LEGACY_MAX_SIZE)
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2172
return PAGE_SIZE * nr_frags + data_bcnt <= GRO_LEGACY_MAX_SIZE;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2181
u32 data_offset = wqe_offset & (PAGE_SIZE - 1);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
275
#define MLX5E_PAGECNT_BIAS_MAX (PAGE_SIZE / 64)
drivers/net/ethernet/mellanox/mlx5/core/main.c
632
if (MLX5_CAP_GEN_MAX(dev, uar_4k) && PAGE_SIZE > 4096)
drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
267
PAGE_SIZE, DMA_BIDIRECTIONAL);
drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
306
addr = dma_map_page(device, page, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
322
dma_unmap_page(device, addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
330
dma_unmap_page(device, zero_addr, PAGE_SIZE,
drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
69
MLX5_NUM_4K_IN_PAGE = PAGE_SIZE / MLX5_ADAPTER_PAGE_SIZE,
drivers/net/ethernet/mellanox/mlx5/core/uar.c
137
up->map = ioremap_wc(pfn << PAGE_SHIFT, PAGE_SIZE);
drivers/net/ethernet/mellanox/mlx5/core/uar.c
143
up->map = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
drivers/net/ethernet/mellanox/mlx5/core/wq.c
132
if (rq_byte_size < PAGE_SIZE) {
drivers/net/ethernet/mellanox/mlxsw/pci.c
1008
pp_params.max_len = PAGE_SIZE;
drivers/net/ethernet/mellanox/mlxsw/pci.c
405
linear_only = byte_count + MLXSW_PCI_RX_BUF_SW_OVERHEAD <= PAGE_SIZE;
drivers/net/ethernet/mellanox/mlxsw/pci.c
407
PAGE_SIZE -
drivers/net/ethernet/mellanox/mlxsw/pci.c
417
skb = napi_build_skb(data, PAGE_SIZE);
drivers/net/ethernet/mellanox/mlxsw/pci.c
435
frag_size = min(byte_count, PAGE_SIZE);
drivers/net/ethernet/mellanox/mlxsw/pci.c
438
page, 0, frag_size, PAGE_SIZE);
drivers/net/ethernet/mellanox/mlxsw/pci.c
458
mlxsw_pci_wqe_rx_frag_set(q->pci, page, wqe, index, PAGE_SIZE);
drivers/net/ethernet/mellanox/mlxsw/pci.c
476
PAGE_SIZE);
drivers/net/ethernet/meta/fbnic/fbnic_csr.h
127
(PAGE_SIZE / FBNIC_BD_FRAG_SIZE)
drivers/net/ethernet/meta/fbnic/fbnic_fw.c
132
addr = dma_map_single(fbd->dev, msg, PAGE_SIZE, direction);
drivers/net/ethernet/meta/fbnic/fbnic_fw.c
1618
if (length > PAGE_SIZE) {
drivers/net/ethernet/meta/fbnic/fbnic_fw.c
164
PAGE_SIZE, direction);
drivers/net/ethernet/meta/fbnic/fbnic_fw.c
189
#define FBNIC_RX_PAGE_SIZE min_t(int, PAGE_SIZE, FBNIC_MBX_MAX_PAGE_SIZE)
drivers/net/ethernet/meta/fbnic/fbnic_tlv.c
178
int attr_max_len = PAGE_SIZE - sizeof(*msg);
drivers/net/ethernet/meta/fbnic/fbnic_tlv.c
295
int attr_max_len = PAGE_SIZE - offset_in_page(msg) - sizeof(*msg);
drivers/net/ethernet/meta/fbnic/fbnic_tlv.c
364
if (offset_in_page(attr) + len > PAGE_SIZE - sizeof(*attr))
drivers/net/ethernet/meta/fbnic/fbnic_tlv.c
525
if (le16_to_cpu(msg->hdr.len) > PAGE_SIZE / sizeof(u32))
drivers/net/ethernet/meta/fbnic/fbnic_tlv.c
55
int attr_max_len = PAGE_SIZE - offset_in_page(msg) - sizeof(*msg);
drivers/net/ethernet/meta/fbnic/fbnic_tlv.c
97
int attr_max_len = PAGE_SIZE - offset_in_page(msg) - sizeof(*msg);
drivers/net/ethernet/meta/fbnic/fbnic_tlv.h
83
#define TLV_MAX_DATA ((PAGE_SIZE - 512) & 0xFFFF)
drivers/net/ethernet/meta/fbnic/fbnic_txrx.c
1563
.max_len = PAGE_SIZE,
drivers/net/ethernet/meta/fbnic/fbnic_txrx.h
103
#define FBNIC_PAGECNT_BIAS_MAX PAGE_SIZE
drivers/net/ethernet/microchip/fdma/fdma_api.c
133
return ALIGN(sizeof(struct fdma_dcb) * fdma->n_dcbs, PAGE_SIZE);
drivers/net/ethernet/microchip/fdma/fdma_api.c
144
PAGE_SIZE);
drivers/net/ethernet/microchip/lan743x_main.c
2131
PAGE_SIZE);
drivers/net/ethernet/microchip/lan743x_main.c
2677
PAGE_SIZE);
drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c
834
lan966x->rx.page_order = round_up(new_mtu, PAGE_SIZE) / PAGE_SIZE - 1;
drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c
956
lan966x->rx.fdma.db_size = PAGE_SIZE << lan966x->rx.page_order;
drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c
966
lan966x->tx.fdma.db_size = PAGE_SIZE << lan966x->rx.page_order;
drivers/net/ethernet/microchip/lan966x/lan966x_xdp.c
82
xdp_init_buff(&xdp, PAGE_SIZE << lan966x->rx.page_order,
drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_fdma.c
215
fdma->db_size = PAGE_SIZE;
drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_fdma.c
239
fdma->db_size = PAGE_SIZE;
drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c
285
fdma->db_size = ALIGN(FDMA_XTR_BUFFER_SIZE, PAGE_SIZE);
drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c
309
fdma->db_size = ALIGN(FDMA_XTR_BUFFER_SIZE, PAGE_SIZE);
drivers/net/ethernet/microsoft/mana/mana_bpf.c
93
xdp_init_buff(xdp, PAGE_SIZE, &rxq->xdp_rxq);
drivers/net/ethernet/microsoft/mana/mana_en.c
2628
pprm.max_len = PAGE_SIZE;
drivers/net/ethernet/microsoft/mana/mana_en.c
758
if (mtu + MANA_RXBUF_PAD > PAGE_SIZE / 2 || mana_xdp_get(apc)) {
drivers/net/ethernet/microsoft/mana/mana_en.c
761
*alloc_size = PAGE_SIZE;
drivers/net/ethernet/microsoft/mana/mana_en.c
775
*alloc_size = PAGE_SIZE << get_order(*alloc_size);
drivers/net/ethernet/microsoft/mana/mana_en.c
787
*frag_count = PAGE_SIZE / buf_size;
drivers/net/ethernet/mscc/ocelot_fdma.c
132
mapping = dma_map_page(ocelot->dev, page, 0, PAGE_SIZE,
drivers/net/ethernet/mscc/ocelot_fdma.c
343
dma_unmap_page(ocelot->dev, rxb->dma_addr, PAGE_SIZE,
drivers/net/ethernet/mscc/ocelot_fdma.c
711
dma_unmap_page(ocelot->dev, rxb->dma_addr, PAGE_SIZE,
drivers/net/ethernet/mscc/ocelot_fdma.h
56
#define OCELOT_FDMA_RX_SIZE (PAGE_SIZE / 2)
drivers/net/ethernet/myricom/myri10ge/myri10ge.c
100
#define MYRI10GE_ALLOC_SIZE ((1 << MYRI10GE_ALLOC_ORDER) * PAGE_SIZE)
drivers/net/ethernet/myricom/myri10ge/myri10ge.c
865
PAGE_SIZE, DMA_BIDIRECTIONAL);
drivers/net/ethernet/myricom/myri10ge/myri10ge.c
913
dma_unmap_page(&mgp->pdev->dev, dmatest_bus, PAGE_SIZE,
drivers/net/ethernet/natsemi/ns83820.c
1941
dev->base = ioremap(addr, PAGE_SIZE);
drivers/net/ethernet/netronome/nfp/nfd3/dp.c
919
true_bufsz = xdp_prog ? PAGE_SIZE : dp->fl_bufsz;
drivers/net/ethernet/netronome/nfp/nfd3/dp.c
920
xdp_init_buff(&xdp, PAGE_SIZE - NFP_NET_RX_BUF_HEADROOM,
drivers/net/ethernet/netronome/nfp/nfdk/dp.c
1041
true_bufsz = xdp_prog ? PAGE_SIZE : dp->fl_bufsz;
drivers/net/ethernet/netronome/nfp/nfdk/dp.c
1042
xdp_init_buff(&xdp, PAGE_SIZE - NFP_NET_RX_BUF_HEADROOM,
drivers/net/ethernet/netronome/nfp/nfp_net_common.c
1599
if (dp->fl_bufsz > PAGE_SIZE) {
drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
104
(u64)page_num << PAGE_SHIFT, PAGE_SIZE);
drivers/net/ethernet/pensando/ionic/ionic_dev.c
281
PAGE_SIZE) * sizeof(long);
drivers/net/ethernet/pensando/ionic/ionic_dev.c
308
(length_reg0 << IONIC_CMB_SHIFT_64K) / PAGE_SIZE;
drivers/net/ethernet/pensando/ionic/ionic_dev.c
361
idev->cmb_npages = bar->len / PAGE_SIZE;
drivers/net/ethernet/pensando/ionic/ionic_dev.c
922
*pgaddr = idev->phy_cmb_expdb64_pages + idx * PAGE_SIZE;
drivers/net/ethernet/pensando/ionic/ionic_dev.c
927
*pgaddr = idev->phy_cmb_expdb128_pages + idx * PAGE_SIZE;
drivers/net/ethernet/pensando/ionic/ionic_dev.c
932
*pgaddr = idev->phy_cmb_expdb256_pages + idx * PAGE_SIZE;
drivers/net/ethernet/pensando/ionic/ionic_dev.c
937
*pgaddr = idev->phy_cmb_expdb512_pages + idx * PAGE_SIZE;
drivers/net/ethernet/pensando/ionic/ionic_dev.c
941
*pgaddr = idev->phy_cmb_pages + idx * PAGE_SIZE;
drivers/net/ethernet/pensando/ionic/ionic_dev.c
947
nonexpdb_pgaddr = idev->phy_cmb_pages + idx * PAGE_SIZE;
drivers/net/ethernet/pensando/ionic/ionic_dev.c
950
ioremap_wc(nonexpdb_pgaddr + i * PAGE_SIZE, PAGE_SIZE);
drivers/net/ethernet/pensando/ionic/ionic_dev.c
955
memset_io(nonexpdb_pgptr, 0, PAGE_SIZE);
drivers/net/ethernet/pensando/ionic/ionic_dev.h
195
#define IONIC_PAGE_SIZE MIN(PAGE_SIZE, IONIC_MAX_BUF_LEN)
drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
590
pages_required += ALIGN(sz, PAGE_SIZE) / PAGE_SIZE;
drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
601
pages_required += ALIGN(sz, PAGE_SIZE) / PAGE_SIZE;
drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
604
pages_have = lif->ionic->bars[IONIC_PCI_BAR_CMB].len / PAGE_SIZE;
drivers/net/ethernet/pensando/ionic/ionic_lif.c
3298
lif->info_sz = ALIGN(sizeof(*lif->info), PAGE_SIZE);
drivers/net/ethernet/pensando/ionic/ionic_lif.c
597
.max_len = PAGE_SIZE,
drivers/net/ethernet/pensando/ionic/ionic_lif.c
640
q_size = ALIGN(num_descs * desc_size, PAGE_SIZE);
drivers/net/ethernet/pensando/ionic/ionic_lif.c
641
new->q_size = PAGE_SIZE + q_size +
drivers/net/ethernet/pensando/ionic/ionic_lif.c
642
ALIGN(num_descs * cq_desc_size, PAGE_SIZE);
drivers/net/ethernet/pensando/ionic/ionic_lif.c
650
new->q.base = PTR_ALIGN(new->q_base, PAGE_SIZE);
drivers/net/ethernet/pensando/ionic/ionic_lif.c
651
new->q.base_pa = ALIGN(new->q_base_pa, PAGE_SIZE);
drivers/net/ethernet/pensando/ionic/ionic_lif.c
654
new->cq.base = PTR_ALIGN(new->q.base + q_size, PAGE_SIZE);
drivers/net/ethernet/pensando/ionic/ionic_lif.c
655
new->cq.base_pa = ALIGN(new->q_base_pa + q_size, PAGE_SIZE);
drivers/net/ethernet/pensando/ionic/ionic_lif.c
659
new->q_size = PAGE_SIZE + (num_descs * desc_size);
drivers/net/ethernet/pensando/ionic/ionic_lif.c
667
new->q.base = PTR_ALIGN(new->q_base, PAGE_SIZE);
drivers/net/ethernet/pensando/ionic/ionic_lif.c
668
new->q.base_pa = ALIGN(new->q_base_pa, PAGE_SIZE);
drivers/net/ethernet/pensando/ionic/ionic_lif.c
673
new->cmb_order = order_base_2(new->cmb_q_size / PAGE_SIZE);
drivers/net/ethernet/pensando/ionic/ionic_lif.c
698
new->cq_size = PAGE_SIZE + (num_descs * cq_desc_size);
drivers/net/ethernet/pensando/ionic/ionic_lif.c
706
new->cq.base = PTR_ALIGN(new->cq_base, PAGE_SIZE);
drivers/net/ethernet/pensando/ionic/ionic_lif.c
707
new->cq.base_pa = ALIGN(new->cq_base_pa, PAGE_SIZE);
drivers/net/ethernet/pensando/ionic/ionic_lif.c
712
new->sg_size = PAGE_SIZE + (num_descs * sg_desc_size);
drivers/net/ethernet/pensando/ionic/ionic_lif.c
720
new->q.sg_base = PTR_ALIGN(new->sg_base, PAGE_SIZE);
drivers/net/ethernet/pensando/ionic/ionic_lif.c
721
new->q.sg_base_pa = ALIGN(new->sg_base_pa, PAGE_SIZE);
drivers/net/ethernet/pensando/ionic/ionic_main.c
725
idev->port_info_sz = ALIGN(sizeof(*idev->port_info), PAGE_SIZE);
drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
1157
*mem_ptr = ioremap(mem_base + (off & PAGE_MASK), PAGE_SIZE);
drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
1159
addr = *mem_ptr + (off & (PAGE_SIZE - 1));
drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
1389
mem_ptr = ioremap(mem_base, PAGE_SIZE);
drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
1395
addr = mem_ptr + (start & (PAGE_SIZE-1));
drivers/net/ethernet/qlogic/qed/qed_dev.c
2722
dpi_page_size = (dpi_page_size + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1);
drivers/net/ethernet/qlogic/qed/qed_dev.c
2775
norm_regsize = roundup(QED_PF_DEMS_SIZE * non_pwm_conn, PAGE_SIZE);
drivers/net/ethernet/qlogic/qed/qed_dev.c
2829
"disabled" : "enabled", PAGE_SIZE);
drivers/net/ethernet/qlogic/qed/qed_hw.c
860
u32 size = PAGE_SIZE / 2, val;
drivers/net/ethernet/qlogic/qed/qed_nvmetcp.c
85
p_init->func_params.log_page_size = ilog2(PAGE_SIZE);
drivers/net/ethernet/qlogic/qed/qed_rdma.c
478
dev->max_mr_mw_fmr_pbl = (PAGE_SIZE / 8) * (PAGE_SIZE / 8);
drivers/net/ethernet/qlogic/qed/qed_rdma.c
479
dev->max_mr_mw_fmr_size = dev->max_mr_mw_fmr_pbl * PAGE_SIZE;
drivers/net/ethernet/qlogic/qede/qede_ethtool.c
1037
if (new_mtu > PAGE_SIZE)
drivers/net/ethernet/qlogic/qede/qede_filter.c
918
if (edev->xdp_prog || edev->ndev->mtu > PAGE_SIZE ||
drivers/net/ethernet/qlogic/qede/qede_fp.c
1123
dma_unmap_page(rxq->dev, bd->mapping, PAGE_SIZE,
drivers/net/ethernet/qlogic/qede/qede_fp.c
1147
dma_unmap_page(rxq->dev, bd->mapping, PAGE_SIZE,
drivers/net/ethernet/qlogic/qede/qede_fp.c
1207
PAGE_SIZE, DMA_FROM_DEVICE);
drivers/net/ethernet/qlogic/qede/qede_fp.c
1210
rxq->rx_headroom, cur_size, PAGE_SIZE);
drivers/net/ethernet/qlogic/qede/qede_fp.c
422
dma_unmap_page(dev, xdp_info->mapping, PAGE_SIZE,
drivers/net/ethernet/qlogic/qede/qede_fp.c
52
PAGE_SIZE, rxq->data_direction);
drivers/net/ethernet/qlogic/qede/qede_fp.c
565
if (curr_cons->page_offset == PAGE_SIZE) {
drivers/net/ethernet/qlogic/qede/qede_fp.c
576
PAGE_SIZE, rxq->data_direction);
drivers/net/ethernet/qlogic/qede/qede_fp.c
770
if (bd->page_offset == PAGE_SIZE) {
drivers/net/ethernet/qlogic/qede/qede_fp.c
985
if (tpa_info->buffer.page_offset == PAGE_SIZE)
drivers/net/ethernet/qlogic/qede/qede_fp.c
987
PAGE_SIZE, rxq->data_direction);
drivers/net/ethernet/qlogic/qede/qede_main.c
1518
rx_buf->mapping, PAGE_SIZE, rxq->data_direction);
drivers/net/ethernet/qlogic/qede/qede_main.c
1568
if (rxq->rx_buf_size + size > PAGE_SIZE)
drivers/net/ethernet/qlogic/qede/qede_main.c
1569
rxq->rx_buf_size = PAGE_SIZE - size;
drivers/net/ethernet/qlogic/qede/qede_main.c
1578
rxq->rx_buf_seg_size = PAGE_SIZE;
drivers/net/ethernet/qlogic/qla3xxx.c
2576
if (qdev->lrg_buf_q_size < PAGE_SIZE)
drivers/net/ethernet/qlogic/qla3xxx.c
2577
qdev->lrg_buf_q_alloc_size = PAGE_SIZE;
drivers/net/ethernet/qlogic/qla3xxx.c
2602
if (qdev->small_buf_q_size < PAGE_SIZE)
drivers/net/ethernet/qlogic/qla3xxx.c
2603
qdev->small_buf_q_alloc_size = PAGE_SIZE;
drivers/net/ethernet/qlogic/qla3xxx.c
2858
dma_alloc_coherent(&qdev->pdev->dev, PAGE_SIZE,
drivers/net/ethernet/qlogic/qla3xxx.c
2914
dma_free_coherent(&qdev->pdev->dev, PAGE_SIZE,
drivers/net/ethernet/qlogic/qla3xxx.c
2929
dma_free_coherent(&qdev->pdev->dev, PAGE_SIZE,
drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
441
#define QLC_PEX_DMA_READ_SIZE (PAGE_SIZE * 16)
drivers/net/ethernet/realtek/rtase/rtase.h
21
#define RTASE_RX_BUF_SIZE (PAGE_SIZE - \
drivers/net/ethernet/realtek/rtase/rtase_main.c
550
skb = build_skb(ring->data_buf[entry], PAGE_SIZE);
drivers/net/ethernet/realtek/rtase/rtase_main.c
635
pp_params.max_len = PAGE_SIZE;
drivers/net/ethernet/rocker/rocker_main.c
181
#define ROCKER_TEST_DMA_BUF_SIZE (PAGE_SIZE * 4)
drivers/net/ethernet/rocker/rocker_main.c
619
DMA_BIDIRECTIONAL, PAGE_SIZE);
drivers/net/ethernet/rocker/rocker_main.c
640
DMA_FROM_DEVICE, PAGE_SIZE);
drivers/net/ethernet/sfc/efx_common.c
278
return PAGE_SIZE - overhead;
drivers/net/ethernet/sfc/efx_common.c
377
if (rx_buf_len <= PAGE_SIZE) {
drivers/net/ethernet/sfc/efx_common.c
385
PAGE_SIZE);
drivers/net/ethernet/sfc/falcon/efx.c
605
if (rx_buf_len <= PAGE_SIZE) {
drivers/net/ethernet/sfc/falcon/efx.c
613
PAGE_SIZE);
drivers/net/ethernet/sfc/falcon/rx.c
132
PAGE_SIZE << efx->rx_buffer_order,
drivers/net/ethernet/sfc/falcon/rx.c
173
PAGE_SIZE << efx->rx_buffer_order,
drivers/net/ethernet/sfc/falcon/rx.c
202
} while (page_offset + efx->rx_page_buf_step <= PAGE_SIZE);
drivers/net/ethernet/sfc/falcon/rx.c
222
PAGE_SIZE << efx->rx_buffer_order,
drivers/net/ethernet/sfc/falcon/rx.c
815
PAGE_SIZE << efx->rx_buffer_order,
drivers/net/ethernet/sfc/falcon/rx.c
97
((PAGE_SIZE - sizeof(struct ef4_rx_page_state)) /
drivers/net/ethernet/sfc/falcon/rx.c
99
efx->rx_buffer_truesize = (PAGE_SIZE << efx->rx_buffer_order) /
drivers/net/ethernet/sfc/falcon/tx.c
32
((index << EF4_TX_CB_ORDER) + NET_IP_ALIGN) & (PAGE_SIZE - 1);
drivers/net/ethernet/sfc/falcon/tx.c
35
ef4_nic_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE,
drivers/net/ethernet/sfc/falcon/tx.c
528
return DIV_ROUND_UP(tx_queue->ptr_mask + 1, PAGE_SIZE >> EF4_TX_CB_ORDER);
drivers/net/ethernet/sfc/falcon/tx.c
91
if (PAGE_SIZE > EF4_PAGE_SIZE)
drivers/net/ethernet/sfc/mcdi.c
211
for (i = 0; i < hdr_len / 4 && bytes < PAGE_SIZE; i++)
drivers/net/ethernet/sfc/mcdi.c
212
bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
drivers/net/ethernet/sfc/mcdi.c
216
for (i = 0; i < inlen / 4 && bytes < PAGE_SIZE; i++)
drivers/net/ethernet/sfc/mcdi.c
217
bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
drivers/net/ethernet/sfc/mcdi.c
302
for (i = 0; i < hdr_len && bytes < PAGE_SIZE; i++) {
drivers/net/ethernet/sfc/mcdi.c
304
bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
drivers/net/ethernet/sfc/mcdi.c
308
for (i = 0; i < data_len && bytes < PAGE_SIZE; i++) {
drivers/net/ethernet/sfc/mcdi.c
311
bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
drivers/net/ethernet/sfc/rx_common.c
166
PAGE_SIZE << efx->rx_buffer_order,
drivers/net/ethernet/sfc/rx_common.c
329
PAGE_SIZE << efx->rx_buffer_order,
drivers/net/ethernet/sfc/rx_common.c
391
PAGE_SIZE << efx->rx_buffer_order,
drivers/net/ethernet/sfc/rx_common.c
422
} while (page_offset + efx->rx_page_buf_step <= PAGE_SIZE);
drivers/net/ethernet/sfc/rx_common.c
436
((PAGE_SIZE - sizeof(struct efx_rx_page_state)) /
drivers/net/ethernet/sfc/rx_common.c
438
efx->rx_buffer_truesize = (PAGE_SIZE << efx->rx_buffer_order) /
drivers/net/ethernet/sfc/rx_common.c
61
PAGE_SIZE << efx->rx_buffer_order,
drivers/net/ethernet/sfc/siena/efx_common.c
282
return PAGE_SIZE - overhead;
drivers/net/ethernet/sfc/siena/efx_common.c
381
if (rx_buf_len <= PAGE_SIZE) {
drivers/net/ethernet/sfc/siena/efx_common.c
389
PAGE_SIZE);
drivers/net/ethernet/sfc/siena/mcdi.c
215
for (i = 0; i < hdr_len / 4 && bytes < PAGE_SIZE; i++)
drivers/net/ethernet/sfc/siena/mcdi.c
216
bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
drivers/net/ethernet/sfc/siena/mcdi.c
220
for (i = 0; i < inlen / 4 && bytes < PAGE_SIZE; i++)
drivers/net/ethernet/sfc/siena/mcdi.c
221
bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
drivers/net/ethernet/sfc/siena/mcdi.c
306
for (i = 0; i < hdr_len && bytes < PAGE_SIZE; i++) {
drivers/net/ethernet/sfc/siena/mcdi.c
308
bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
drivers/net/ethernet/sfc/siena/mcdi.c
312
for (i = 0; i < data_len && bytes < PAGE_SIZE; i++) {
drivers/net/ethernet/sfc/siena/mcdi.c
315
bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
drivers/net/ethernet/sfc/siena/rx_common.c
169
PAGE_SIZE << efx->rx_buffer_order,
drivers/net/ethernet/sfc/siena/rx_common.c
326
PAGE_SIZE << efx->rx_buffer_order,
drivers/net/ethernet/sfc/siena/rx_common.c
388
PAGE_SIZE << efx->rx_buffer_order,
drivers/net/ethernet/sfc/siena/rx_common.c
419
} while (page_offset + efx->rx_page_buf_step <= PAGE_SIZE);
drivers/net/ethernet/sfc/siena/rx_common.c
433
((PAGE_SIZE - sizeof(struct efx_rx_page_state)) /
drivers/net/ethernet/sfc/siena/rx_common.c
435
efx->rx_buffer_truesize = (PAGE_SIZE << efx->rx_buffer_order) /
drivers/net/ethernet/sfc/siena/rx_common.c
64
PAGE_SIZE << efx->rx_buffer_order,
drivers/net/ethernet/sfc/siena/tx.c
33
((index << EFX_TX_CB_ORDER) + NET_IP_ALIGN) & (PAGE_SIZE - 1);
drivers/net/ethernet/sfc/siena/tx.c
36
efx_siena_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE,
drivers/net/ethernet/sfc/siena/tx_common.c
20
PAGE_SIZE >> EFX_TX_CB_ORDER);
drivers/net/ethernet/sfc/siena/tx_common.c
414
if (PAGE_SIZE > EFX_PAGE_SIZE)
drivers/net/ethernet/sfc/tx.c
41
((index << EFX_TX_CB_ORDER) + NET_IP_ALIGN) & (PAGE_SIZE - 1);
drivers/net/ethernet/sfc/tx.c
44
efx_nic_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE,
drivers/net/ethernet/sfc/tx_common.c
20
PAGE_SIZE >> EFX_TX_CB_ORDER);
drivers/net/ethernet/sfc/tx_common.c
454
if (PAGE_SIZE > EFX_PAGE_SIZE)
drivers/net/ethernet/socionext/netsec.c
247
#define NETSEC_RX_BUF_SIZE (PAGE_SIZE - NETSEC_RX_BUF_NON_DATA)
drivers/net/ethernet/socionext/netsec.c
963
xdp_init_buff(&xdp, PAGE_SIZE, &dring->xdp_rxq);
drivers/net/ethernet/spacemit/k1_emac.c
401
tx_ring->total_size = ALIGN(tx_ring->total_size, PAGE_SIZE);
drivers/net/ethernet/spacemit/k1_emac.c
429
rx_ring->total_size = ALIGN(rx_ring->total_size, PAGE_SIZE);
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
2190
num_pages = DIV_ROUND_UP(dma_buf_sz_pad, PAGE_SIZE);
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
2194
rx_q->napi_skb_frag_size = num_pages * PAGE_SIZE;
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
5611
bufsz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
drivers/net/ethernet/sun/cassini.c
1798
#define CAS_ROUND_PAGE(x) (((x) + PAGE_SIZE - 1) & PAGE_MASK)
drivers/net/ethernet/sun/cassini.c
3369
cp->page_size = (PAGE_SIZE << cp->page_order);
drivers/net/ethernet/sun/niu.c
10007
memset((char *)page, 0, PAGE_SIZE << order);
drivers/net/ethernet/sun/niu.c
10236
BUILD_BUG_ON(PAGE_SIZE < 4 * 1024);
drivers/net/ethernet/sun/niu.c
3338
PAGE_SIZE, DMA_FROM_DEVICE);
drivers/net/ethernet/sun/niu.c
3403
if ((page->private + PAGE_SIZE) - rcr_size == addr) {
drivers/net/ethernet/sun/niu.c
3406
PAGE_SIZE, DMA_FROM_DEVICE);
drivers/net/ethernet/sun/niu.c
3475
PAGE_SIZE, DMA_FROM_DEVICE);
drivers/net/ethernet/sun/niu.c
3543
np->ops->unmap_page(np->device, base, PAGE_SIZE,
drivers/net/ethernet/sun/niu.c
4453
switch (PAGE_SIZE) {
drivers/net/ethernet/sun/sunbmac.c
1163
PAGE_SIZE,
drivers/net/ethernet/sun/sunbmac.c
1215
PAGE_SIZE,
drivers/net/ethernet/sun/sunbmac.c
1253
PAGE_SIZE,
drivers/net/ethernet/sun/sunhme.c
2451
hp->happy_block = dmam_alloc_coherent(hp->dma_dev, PAGE_SIZE,
drivers/net/ethernet/sun/sunqe.c
883
qe->qe_block = dma_alloc_coherent(&op->dev, PAGE_SIZE,
drivers/net/ethernet/sun/sunqe.c
918
dma_free_coherent(&op->dev, PAGE_SIZE,
drivers/net/ethernet/sun/sunqe.c
945
dma_free_coherent(&op->dev, PAGE_SIZE,
drivers/net/ethernet/sun/sunvnet_common.h
27
#define VNET_MAXCOOKIES (VNET_MAXPACKET / PAGE_SIZE + 1)
drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c
347
PAGE_SIZE << order, DMA_FROM_DEVICE);
drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c
354
pa->pages_len = PAGE_SIZE << order;
drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
199
rx_buf_size = clamp_val(rx_buf_size, XLGMAC_RX_MIN_BUF_SIZE, PAGE_SIZE);
drivers/net/ethernet/tehuti/tn40.c
138
.max_len = PAGE_SIZE,
drivers/net/ethernet/tehuti/tn40.c
378
skb = napi_build_skb(page_address(dm->page), PAGE_SIZE);
drivers/net/ethernet/ti/am65-cpsw-nuss.c
1326
xdp_init_buff(&xdp, PAGE_SIZE, &port->xdp_rxq[flow->id]);
drivers/net/ethernet/ti/am65-cpsw-nuss.c
1345
PAGE_SIZE, headroom);
drivers/net/ethernet/ti/cpsw.c
417
xdp_init_buff(&xdp, PAGE_SIZE, &priv->xdp_rxq[ch]);
drivers/net/ethernet/ti/cpsw_new.c
361
xdp_init_buff(&xdp, PAGE_SIZE, &priv->xdp_rxq[ch]);
drivers/net/ethernet/ti/icssg/icssg_common.c
1053
page_pool_dma_sync_for_cpu(pool, page, 0, PAGE_SIZE);
drivers/net/ethernet/ti/icssg/icssg_common.c
1073
xdp_init_buff(&xdp, PAGE_SIZE, &rx_chn->xdp_rxq);
drivers/net/ethernet/ti/icssg/icssg_common.c
1091
skb = napi_build_skb(pa, PAGE_SIZE);
drivers/net/ethernet/ti/icssg/icssg_common.c
482
pp_params.max_len = PAGE_SIZE;
drivers/net/ethernet/ti/netcp_core.c
600
dma_unmap_page(netcp->dev, dma_buf, PAGE_SIZE, DMA_FROM_DEVICE);
drivers/net/ethernet/ti/netcp_core.c
611
netcp_frag_free(buf_len <= PAGE_SIZE, buf_ptr);
drivers/net/ethernet/ti/netcp_core.c
706
dma_unmap_page(netcp->dev, dma_buff, PAGE_SIZE,
drivers/net/ethernet/ti/netcp_core.c
715
offset_in_page(dma_buff), buf_len, PAGE_SIZE);
drivers/net/ethernet/ti/netcp_core.c
824
netcp_frag_free((buf_len <= PAGE_SIZE), buf_ptr);
drivers/net/ethernet/ti/netcp_core.c
899
buf_len = PAGE_SIZE;
drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
1527
buf, PAGE_SIZE);
drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
2343
BUILD_BUG_ON(PAGE_SIZE <
drivers/net/ethernet/toshiba/tc35815.c
1051
dma_free_coherent(&lp->pci_dev->dev, PAGE_SIZE * FD_PAGE_NUM,
drivers/net/ethernet/toshiba/tc35815.c
351
#define RX_BUF_SIZE PAGE_SIZE
drivers/net/ethernet/toshiba/tc35815.c
878
PAGE_SIZE * FD_PAGE_NUM);
drivers/net/ethernet/toshiba/tc35815.c
881
PAGE_SIZE * FD_PAGE_NUM,
drivers/net/ethernet/toshiba/tc35815.c
897
PAGE_SIZE * FD_PAGE_NUM,
drivers/net/ethernet/toshiba/tc35815.c
909
i * PAGE_SIZE));
drivers/net/ethernet/wangxun/libwx/wx_hw.c
1802
#if (PAGE_SIZE < 8192)
drivers/net/ethernet/wangxun/libwx/wx_lib.c
229
#if (PAGE_SIZE < 8192)
drivers/net/ethernet/wangxun/libwx/wx_lib.c
277
#if (PAGE_SIZE < 8192)
drivers/net/ethernet/wangxun/libwx/wx_type.h
1498
#if (PAGE_SIZE < 8192)
drivers/net/ethernet/wangxun/libwx/wx_type.h
1505
#define wx_rx_pg_size(_ring) (PAGE_SIZE << wx_rx_pg_order(_ring))
drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
298
#if (PAGE_SIZE < 8192)
drivers/net/fddi/skfp/h/targetos.h
67
#define SMT_PAGESIZE PAGE_SIZE // Size of a memory page (power of 2).
drivers/net/hyperv/netvsc.c
347
buf_size = roundup(buf_size, PAGE_SIZE);
drivers/net/hyperv/netvsc.c
458
buf_size = round_up(buf_size, PAGE_SIZE);
drivers/net/hyperv/netvsc_bpf.c
137
if (prog && buf_max > PAGE_SIZE) {
drivers/net/hyperv/netvsc_bpf.c
56
xdp_init_buff(xdp, PAGE_SIZE, &nvchan->xdp_rxq);
drivers/net/ipa/gsi_trans.c
159
total_size = PAGE_SIZE << get_order(total_size);
drivers/net/ipa/ipa_endpoint.c
33
#define IPA_RX_BUFFER_OVERHEAD (PAGE_SIZE - SKB_MAX_ORDER(NET_SKB_PAD, 0))
drivers/net/ipa/ipa_mem.h
46
#define IPA_MEM_MAX (2 * PAGE_SIZE)
drivers/net/netdevsim/health.c
152
if (count == 0 || count > PAGE_SIZE)
drivers/net/ovpn/tcp.c
406
int ret, linear = PAGE_SIZE;
drivers/net/tap.c
550
if (prepad + len < PAGE_SIZE || !linear)
drivers/net/tap.c
553
if (len - linear > MAX_SKB_FRAGS * (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER))
drivers/net/tap.c
554
linear = len - MAX_SKB_FRAGS * (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER);
drivers/net/thunderbolt/main.c
48
#define TBNET_RX_PAGE_SIZE (PAGE_SIZE << TBNET_RX_PAGE_ORDER)
drivers/net/tun.c
1424
if (fragsz == 0 || fragsz > PAGE_SIZE) {
drivers/net/tun.c
1456
if (prepad + len < PAGE_SIZE)
drivers/net/tun.c
1459
if (len - linear > MAX_SKB_FRAGS * (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER))
drivers/net/tun.c
1460
linear = len - MAX_SKB_FRAGS * (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER);
drivers/net/tun.c
1530
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) > PAGE_SIZE)
drivers/net/usb/cdc-phonet.c
127
PAGE_SIZE, rx_complete, dev);
drivers/net/usb/cdc-phonet.c
157
PAGE_SIZE);
drivers/net/usb/cdc-phonet.c
163
PAGE_SIZE);
drivers/net/usb/cdc-phonet.c
166
if (req->actual_length < PAGE_SIZE)
drivers/net/usb/qmi_wwan.c
402
count += scnprintf(&buf[count], PAGE_SIZE - count,
drivers/net/veth.c
1584
max_mtu = SKB_WITH_OVERHEAD(PAGE_SIZE - VETH_XDP_HEADROOM) -
drivers/net/veth.c
1590
max_mtu += PAGE_SIZE * MAX_SKB_FRAGS;
drivers/net/virtio_net.c
1871
if (page_off + *len + tailroom > PAGE_SIZE)
drivers/net/virtio_net.c
1905
if ((page_off + buflen + tailroom) > PAGE_SIZE) {
drivers/net/virtio_net.c
2105
if (unlikely(len > (vi->big_packets_num_skbfrags + 1) * PAGE_SIZE)) {
drivers/net/virtio_net.c
2108
(vi->big_packets_num_skbfrags + 1) * PAGE_SIZE);
drivers/net/virtio_net.c
2112
skb = page_to_skb(vi, rq, page, 0, len, PAGE_SIZE, 0);
drivers/net/virtio_net.c
2335
if (*len + xdp_room > PAGE_SIZE)
drivers/net/virtio_net.c
2346
*frame_sz = PAGE_SIZE;
drivers/net/virtio_net.c
2713
sg_set_buf(&rq->sg[i], page_address(first), PAGE_SIZE);
drivers/net/virtio_net.c
2733
sg_set_buf(&rq->sg[1], p + offset, PAGE_SIZE - offset);
drivers/net/virtio_net.c
2754
return PAGE_SIZE - room;
drivers/net/virtio_net.c
2757
rq->min_buf_len, PAGE_SIZE - hdr_len);
drivers/net/virtio_net.c
5972
unsigned int max_sz = PAGE_SIZE - room - ETH_HLEN;
drivers/net/virtio_net.c
6656
vi->big_packets_num_skbfrags = guest_gso ? MAX_SKB_FRAGS : DIV_ROUND_UP(mtu, PAGE_SIZE);
drivers/net/virtio_net.c
909
BUG_ON(offset >= PAGE_SIZE);
drivers/net/virtio_net.c
911
unsigned int frag_size = min((unsigned)PAGE_SIZE - offset, len);
drivers/net/vmxnet3/vmxnet3_drv.c
1844
0, PAGE_SIZE,
drivers/net/vmxnet3/vmxnet3_drv.c
2174
rq->buf_info[0][i].len = PAGE_SIZE;
drivers/net/vmxnet3/vmxnet3_drv.c
2179
rq->buf_info[1][i].len = PAGE_SIZE;
drivers/net/vmxnet3/vmxnet3_drv.c
3390
adapter->rx_buf_per_pkt = 1 + (sz + PAGE_SIZE - 1) / PAGE_SIZE;
drivers/net/vmxnet3/vmxnet3_drv.c
3398
adapter->ringBufSize.ring2BufSizeType1 = cpu_to_le16(PAGE_SIZE);
drivers/net/vmxnet3/vmxnet3_drv.c
726
rbi->len != PAGE_SIZE);
drivers/net/vmxnet3/vmxnet3_drv.c
736
rbi->page, 0, PAGE_SIZE,
drivers/net/vmxnet3/vmxnet3_drv.c
788
skb->truesize += PAGE_SIZE;
drivers/net/vmxnet3/vmxnet3_xdp.c
318
skb = build_skb(page_address(page), PAGE_SIZE);
drivers/net/vmxnet3/vmxnet3_xdp.c
351
xdp_init_buff(&xdp, PAGE_SIZE, &rq->xdp_rxq);
drivers/net/vmxnet3/vmxnet3_xdp.c
398
xdp_init_buff(&xdp, PAGE_SIZE, &rq->xdp_rxq);
drivers/net/vmxnet3/vmxnet3_xdp.h
21
#define VMXNET3_XDP_MAX_FRSIZE (PAGE_SIZE - VMXNET3_XDP_HEADROOM - \
drivers/net/wireguard/ratelimiter.c
173
table_size = (totalram_pages() > (1U << 30) / PAGE_SIZE) ? 8192 :
drivers/net/wireless/ath/ath11k/ahb.c
842
msi_addr_iova = dma_map_resource(ab->dev, msi_addr_pa, PAGE_SIZE,
drivers/net/wireless/ath/ath12k/debugfs.c
909
size_t len = 0, buf_len = (PAGE_SIZE * 2);
drivers/net/wireless/ath/ath12k/dp.h
255
#define ATH12K_PAGE_SIZE PAGE_SIZE
drivers/net/wireless/ath/carl9170/debug.c
122
if (count > PAGE_SIZE)
drivers/net/wireless/broadcom/b43/debugfs.c
562
if (count > PAGE_SIZE)
drivers/net/wireless/broadcom/b43legacy/debugfs.c
264
if (count > PAGE_SIZE)
drivers/net/wireless/broadcom/b43legacy/dma.h
87
#define B43legacy_DMA_RINGMEMSIZE PAGE_SIZE
drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
405
while (req_sz > PAGE_SIZE) {
drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
406
pkt_next = brcmu_pkt_buf_get_skb(PAGE_SIZE);
drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
412
req_sz -= PAGE_SIZE;
drivers/net/wireless/broadcom/brcm80211/brcmfmac/vendor.c
29
u16 msglen, maxmsglen = PAGE_SIZE - 0x100;
drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c
447
if (!IS_ALIGNED(PAGE_SIZE, align))
drivers/net/wireless/intel/ipw2x00/ipw2100.c
3847
while (len < PAGE_SIZE - 128 && loop < 0x30000) {
drivers/net/wireless/intel/ipw2x00/ipw2100.c
3942
while (len < PAGE_SIZE - 128 && loop < ARRAY_SIZE(ord_data)) {
drivers/net/wireless/intel/ipw2x00/ipw2200.c
1255
log_size = PAGE_SIZE / sizeof(*log) > log_len ?
drivers/net/wireless/intel/ipw2x00/ipw2200.c
1256
sizeof(*log) * log_len : PAGE_SIZE;
drivers/net/wireless/intel/ipw2x00/ipw2200.c
1265
len += scnprintf(buf + len, PAGE_SIZE - len, "%08X", log_len);
drivers/net/wireless/intel/ipw2x00/ipw2200.c
1267
len += scnprintf(buf + len, PAGE_SIZE - len,
drivers/net/wireless/intel/ipw2x00/ipw2200.c
1270
len += scnprintf(buf + len, PAGE_SIZE - len, "\n");
drivers/net/wireless/intel/ipw2x00/ipw2200.c
1284
len += scnprintf(buf + len, PAGE_SIZE - len,
drivers/net/wireless/intel/ipw2x00/ipw2200.c
1290
len += scnprintf(buf + len, PAGE_SIZE - len,
drivers/net/wireless/intel/ipw2x00/ipw2200.c
1300
len += scnprintf(buf + len, PAGE_SIZE - len,
drivers/net/wireless/intel/ipw2x00/ipw2200.c
1303
len += scnprintf(buf + len, PAGE_SIZE - len,
drivers/net/wireless/intel/ipw2x00/ipw2200.c
1308
len += scnprintf(buf + len, PAGE_SIZE - len, "\n");
drivers/net/wireless/intel/ipw2x00/ipw2200.c
1333
(i != priv->cmdlog_pos) && (len < PAGE_SIZE);
drivers/net/wireless/intel/ipw2x00/ipw2200.c
1336
scnprintf(buf + len, PAGE_SIZE - len,
drivers/net/wireless/intel/ipw2x00/ipw2200.c
1341
snprintk_buf(buf + len, PAGE_SIZE - len,
drivers/net/wireless/intel/ipw2x00/ipw2200.c
1344
len += scnprintf(buf + len, PAGE_SIZE - len, "\n");
drivers/net/wireless/intel/ipw2x00/ipw2200.c
1346
len += scnprintf(buf + len, PAGE_SIZE - len, "\n");
drivers/net/wireless/intel/iwlegacy/3945-mac.c
1017
PAGE_SIZE << il->hw_params.rx_page_order,
drivers/net/wireless/intel/iwlegacy/3945-mac.c
1030
PAGE_SIZE << il->hw_params.rx_page_order,
drivers/net/wireless/intel/iwlegacy/3945-mac.c
1065
PAGE_SIZE << il->hw_params.rx_page_order,
drivers/net/wireless/intel/iwlegacy/3945-mac.c
1115
PAGE_SIZE << il->hw_params.rx_page_order,
drivers/net/wireless/intel/iwlegacy/3945-mac.c
1177
PAGE_SIZE << il->hw_params.rx_page_order,
drivers/net/wireless/intel/iwlegacy/3945-mac.c
1220
PAGE_SIZE << il->hw_params.rx_page_order,
drivers/net/wireless/intel/iwlegacy/3945-mac.c
3183
while (size && PAGE_SIZE - len) {
drivers/net/wireless/intel/iwlegacy/3945-mac.c
3185
PAGE_SIZE - len, true);
drivers/net/wireless/intel/iwlegacy/3945-mac.c
3187
if (PAGE_SIZE - len)
drivers/net/wireless/intel/iwlegacy/3945.c
475
u32 fraglen = PAGE_SIZE << il->hw_params.rx_page_order;
drivers/net/wireless/intel/iwlegacy/4965-mac.c
347
PAGE_SIZE << il->hw_params.rx_page_order,
drivers/net/wireless/intel/iwlegacy/4965-mac.c
359
PAGE_SIZE << il->hw_params.rx_page_order,
drivers/net/wireless/intel/iwlegacy/4965-mac.c
414
PAGE_SIZE << il->hw_params.rx_page_order,
drivers/net/wireless/intel/iwlegacy/4965-mac.c
4247
PAGE_SIZE << il->hw_params.rx_page_order,
drivers/net/wireless/intel/iwlegacy/4965-mac.c
4290
PAGE_SIZE << il->hw_params.rx_page_order,
drivers/net/wireless/intel/iwlegacy/4965-mac.c
598
len, PAGE_SIZE << il->hw_params.rx_page_order);
drivers/net/wireless/intel/iwlegacy/4965-mac.c
99
PAGE_SIZE << il->hw_params.rx_page_order,
drivers/net/wireless/intel/iwlegacy/debug.c
503
int pos = 0, i, bufsz = PAGE_SIZE;
drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c
284
int pos = 0, i, bufsz = PAGE_SIZE;
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
570
nents = DIV_ROUND_UP(size, PAGE_SIZE);
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
572
#define N_ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(*result))
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
621
sg_set_page(iter, new_page, PAGE_SIZE, 0);
drivers/net/wireless/intel/iwlwifi/fw/paging.c
83
PAGE_SIZE << order,
drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
557
block = dma_alloc_coherent(fwrt->dev, pages * PAGE_SIZE,
drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
564
pages * PAGE_SIZE);
drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
574
frag->size = pages * PAGE_SIZE;
drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
616
PAGE_SIZE);
drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
630
alloc_id, i, pages * PAGE_SIZE);
drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
636
(remain_pages * PAGE_SIZE);
drivers/net/wireless/intel/iwlwifi/mei/main.c
252
_IWL_MEI_SAP_SHARED_MEM_SZ_VER3, PAGE_SIZE);
drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-v2.c
206
BUILD_BUG_ON(sizeof(*prph_info) > PAGE_SIZE / 2);
drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-v2.c
207
prph_info = dma_alloc_coherent(trans->dev, PAGE_SIZE,
drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-v2.c
249
cpu_to_le64(trans_pcie->prph_info_dma_addr + PAGE_SIZE / 2);
drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-v2.c
251
cpu_to_le64(trans_pcie->prph_info_dma_addr + 3 * PAGE_SIZE / 4);
drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-v2.c
285
dma_free_coherent(trans->dev, PAGE_SIZE, prph_info,
drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-v2.c
347
dma_free_coherent(trans->dev, PAGE_SIZE, trans_pcie->prph_info,
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/internal.h
612
#define IWL_TSO_PAGE_DATA_SIZE (PAGE_SIZE - sizeof(struct iwl_tso_page_info))
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
366
unsigned int allocsize = PAGE_SIZE << trans_pcie->rx_page_order;
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans.c
3589
(PAGE_SIZE << trans_pcie->rx_page_order));
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans.c
673
chunk_sz = PAGE_SIZE;
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/tx-gen2.c
35
phys = dma_map_page_attrs(trans->dev, ret, 0, PAGE_SIZE,
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/tx.c
1790
phys = dma_map_page_attrs(trans->dev, p->page, 0, PAGE_SIZE,
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/tx.c
220
dma_unmap_page(trans->dev, info->dma_addr, PAGE_SIZE,
drivers/net/wireless/intersil/p54/fwio.c
98
if (priv->rx_mtu > maxlen && PAGE_SIZE == 4096) {
drivers/net/wireless/marvell/libertas/debugfs.c
31
static const size_t len = PAGE_SIZE;
drivers/net/wireless/marvell/mwifiex/debugfs.c
424
buf = memdup_user_nul(ubuf, min(count, (size_t)(PAGE_SIZE - 1)));
drivers/net/wireless/marvell/mwifiex/debugfs.c
470
pos += snprintf(buf, PAGE_SIZE, "0");
drivers/net/wireless/marvell/mwifiex/debugfs.c
478
pos += snprintf(buf, PAGE_SIZE, "%u 0x%x 0x%x\n",
drivers/net/wireless/marvell/mwifiex/debugfs.c
494
pos += snprintf(buf, PAGE_SIZE, "%u 0x%x 0x%x\n", saved_reg_type,
drivers/net/wireless/marvell/mwifiex/debugfs.c
522
pos += snprintf(buf, PAGE_SIZE, "debug mask=0x%08x\n",
drivers/net/wireless/marvell/mwifiex/debugfs.c
543
buf = memdup_user_nul(ubuf, min(count, (size_t)(PAGE_SIZE - 1)));
drivers/net/wireless/marvell/mwifiex/debugfs.c
614
buf = memdup_user_nul(ubuf, min(count, (size_t)(PAGE_SIZE - 1)));
drivers/net/wireless/marvell/mwifiex/debugfs.c
662
pos += snprintf(buf, PAGE_SIZE, "0x%x 0x%x\n", priv->mem_rw.addr,
drivers/net/wireless/marvell/mwifiex/debugfs.c
687
buf = memdup_user_nul(ubuf, min(count, (size_t)(PAGE_SIZE - 1)));
drivers/net/wireless/marvell/mwifiex/debugfs.c
732
pos = snprintf(buf, PAGE_SIZE, "0");
drivers/net/wireless/marvell/mwifiex/debugfs.c
744
pos = snprintf(buf, PAGE_SIZE, "%d %d ", saved_offset, saved_bytes);
drivers/net/wireless/marvell/mwifiex/debugfs.c
747
pos += scnprintf(buf + pos, PAGE_SIZE - pos, "%d ", value[i]);
drivers/net/wireless/marvell/mwifiex/debugfs.c
770
buf = memdup_user_nul(ubuf, min(count, (size_t)(PAGE_SIZE - 1)));
drivers/net/wireless/marvell/mwifiex/debugfs.c
834
pos = snprintf(buf, PAGE_SIZE, "%u 0x%x 0x%x\n", hscfg.conditions,
drivers/net/wireless/mediatek/mt76/mac80211.c
646
pp_params.max_len = PAGE_SIZE;
drivers/net/wireless/mediatek/mt76/mt7925/mcu.c
539
int i, ret, len = PAGE_SIZE - 1, nr_val;
drivers/net/wireless/mediatek/mt76/usb.c
678
q->buf_size = PAGE_SIZE;
drivers/net/wireless/mediatek/mt7601u/mt7601u.h
33
#define MT_RX_URB_SIZE (PAGE_SIZE << MT_RX_ORDER)
drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c
921
if (blksize < PAGE_SIZE)
drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c
922
blksize = PAGE_SIZE;
drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c
924
while (blksize >= PAGE_SIZE) {
drivers/net/wireless/realtek/rtw89/debug.c
152
size_t bufsz = opt->rsize ? opt->rsize : PAGE_SIZE;
drivers/net/wireless/ti/wl12xx/wl12xx.h
45
#define WL12XX_AGGR_BUFFER_SIZE (4 * PAGE_SIZE)
drivers/net/wireless/ti/wl18xx/wl18xx.h
22
#define WL18XX_AGGR_BUFFER_SIZE (13 * PAGE_SIZE)
drivers/net/wireless/ti/wlcore/debugfs.c
28
#define WLCORE_MAX_BLOCK_SIZE ((size_t)(4*PAGE_SIZE))
drivers/net/wireless/ti/wlcore/main.c
885
len = min_t(size_t, maxlen, PAGE_SIZE - wl->fwlog_size);
drivers/net/wireless/virtual/mac80211_hwsim.c
1876
if (skb->len < PAGE_SIZE && paged_rx) {
drivers/net/wwan/wwan_core.c
415
const unsigned int max_ports = PAGE_SIZE * 8;
drivers/net/xen-netback/interface.c
45
#define XENVIF_RX_QUEUE_BYTES (XEN_NETIF_RX_RING_SIZE/2 * PAGE_SIZE)
drivers/net/xen-netback/netback.c
1144
if (offset + PAGE_SIZE < skb->len)
drivers/net/xen-netback/netback.c
1145
len = PAGE_SIZE;
drivers/net/xen-netback/netback.c
1167
skb->truesize += i * PAGE_SIZE;
drivers/net/xen-netfront.c
1192
rx.offset, rx.status, PAGE_SIZE);
drivers/net/xen-netfront.c
288
skb_add_rx_frag(skb, 0, page, 0, 0, PAGE_SIZE);
drivers/net/xen-netfront.c
761
if (np->bounce || unlikely(PAGE_SIZE - offset < ETH_HLEN)) {
drivers/net/xen-netfront.c
788
if (offset == PAGE_SIZE) {
drivers/ntb/hw/intel/ntb_hw_gen4.c
380
if (addr & (PAGE_SIZE - 1))
drivers/ntb/hw/intel/ntb_hw_gen4.c
566
*addr_align = PAGE_SIZE;
drivers/nvdimm/btt.c
1453
if (len > PAGE_SIZE || len < btt->sector_size ||
drivers/nvdimm/btt.c
520
unsigned long chunk = min(len, PAGE_SIZE);
drivers/nvdimm/nd.h
484
return ALIGN(SZ_8K, PAGE_SIZE);
drivers/nvdimm/pfn_devs.c
114
return PAGE_SIZE;
drivers/nvdimm/pfn_devs.c
391
PAGE_SIZE);
drivers/nvdimm/pfn_devs.c
392
zero_len = ALIGN(num_bad << 9, PAGE_SIZE);
drivers/nvdimm/pfn_devs.c
394
unsigned long chunk = min(zero_len, PAGE_SIZE);
drivers/nvdimm/pfn_devs.c
483
pfn_sb->page_size = cpu_to_le32(PAGE_SIZE);
drivers/nvdimm/pfn_devs.c
502
if ((le32_to_cpu(pfn_sb->page_size) > PAGE_SIZE) &&
drivers/nvdimm/pfn_devs.c
587
|| !IS_ALIGNED(offset, PAGE_SIZE)) {
drivers/nvdimm/pfn_devs.c
830
pfn_sb->page_size = cpu_to_le32(PAGE_SIZE);
drivers/nvdimm/pfn_devs.c
95
alignments[0] = PAGE_SIZE;
drivers/nvdimm/pmem.c
132
chunk = min_t(unsigned int, len, PAGE_SIZE - off);
drivers/nvdimm/pmem.c
151
chunk = min_t(unsigned int, len, PAGE_SIZE - off);
drivers/nvdimm/pmem.c
300
PAGE_SIZE));
drivers/nvdimm/pmem.c
455
.physical_block_size = PAGE_SIZE,
drivers/nvdimm/region_devs.c
53
PFN_PHYS(pfn), PAGE_SIZE);
drivers/nvdimm/region_devs.c
559
if (!is_power_of_2(dpa) || dpa < PAGE_SIZE
drivers/nvdimm/region_devs.c
970
align = PAGE_SIZE;
drivers/nvdimm/region_devs.c
995
if ((mapping->start | mapping->size) % PAGE_SIZE) {
drivers/nvdimm/region_devs.c
998
caller, dev_name(&nvdimm->dev), i, PAGE_SIZE);
drivers/nvme/host/apple.c
1559
WARN_ON_ONCE(apple_nvme_iod_alloc_size() > PAGE_SIZE);
drivers/nvme/host/apple.c
359
const int npages = DIV_ROUND_UP(8 * nprps, PAGE_SIZE - 8);
drivers/nvme/host/core.c
2209
if (size > PAGE_SIZE * MAX_ORDER_NR_PAGES) {
drivers/nvme/host/core.c
5110
PAGE_SIZE);
drivers/nvme/host/fabrics.c
1393
if (count > PAGE_SIZE)
drivers/nvme/host/pci.c
2638
u64 min_chunk = min_t(u64, preferred, PAGE_SIZE * MAX_ORDER_NR_PAGES);
drivers/nvme/host/pci.c
2639
u64 hmminds = max_t(u32, dev->ctrl.hmminds * 4096, PAGE_SIZE * 2);
drivers/nvme/host/pci.c
2646
if (dma_merge_boundary && (PAGE_SIZE & dma_merge_boundary) == 0) {
drivers/nvme/host/sysfs.c
476
return ctrl->ops->get_address(ctrl, buf, PAGE_SIZE);
drivers/nvme/host/tcp.c
466
page += off / PAGE_SIZE;
drivers/nvme/host/tcp.c
467
off %= PAGE_SIZE;
drivers/nvme/host/tcp.c
470
size_t n = min(len, (size_t)PAGE_SIZE - off);
drivers/nvme/target/configfs.c
110
return snprintf(page, PAGE_SIZE, "%d\n", le16_to_cpu(portid));
drivers/nvme/target/configfs.c
1187
return snprintf(page, PAGE_SIZE, "%d\n",
drivers/nvme/target/configfs.c
1226
return snprintf(page, PAGE_SIZE, "%llu.%llu.%llu\n",
drivers/nvme/target/configfs.c
1231
return snprintf(page, PAGE_SIZE, "%llu.%llu\n",
drivers/nvme/target/configfs.c
1296
return snprintf(page, PAGE_SIZE, "%.*s\n",
drivers/nvme/target/configfs.c
1349
return snprintf(page, PAGE_SIZE, "%u\n", to_subsys(item)->cntlid_min);
drivers/nvme/target/configfs.c
1379
return snprintf(page, PAGE_SIZE, "%u\n", to_subsys(item)->cntlid_max);
drivers/nvme/target/configfs.c
138
return snprintf(page, PAGE_SIZE, "%s\n", port->disc_addr.traddr);
drivers/nvme/target/configfs.c
1409
return snprintf(page, PAGE_SIZE, "0x%x\n", to_subsys(item)->vendor_id);
drivers/nvme/target/configfs.c
1430
return snprintf(page, PAGE_SIZE, "0x%x\n",
drivers/nvme/target/configfs.c
1454
return snprintf(page, PAGE_SIZE, "%s\n", subsys->model_number);
drivers/nvme/target/configfs.c
1622
return snprintf(page, PAGE_SIZE, "%d\n", to_subsys(item)->pi_support);
drivers/nvme/target/configfs.c
1643
return snprintf(page, PAGE_SIZE, "%u\n", to_subsys(item)->max_qid);
drivers/nvme/target/configfs.c
1757
return snprintf(page, PAGE_SIZE, "%d\n", to_nvmet_port(item)->enabled);
drivers/nvme/target/configfs.c
179
return snprintf(page, PAGE_SIZE, "%s\n",
drivers/nvme/target/configfs.c
183
return snprintf(page, PAGE_SIZE, "\n");
drivers/nvme/target/configfs.c
2287
return snprintf(page, PAGE_SIZE, "%s\n", nvmet_disc_subsys->subsysnqn);
drivers/nvme/target/configfs.c
231
return snprintf(page, PAGE_SIZE, "%s\n", port->disc_addr.trsvcid);
drivers/nvme/target/configfs.c
258
return snprintf(page, PAGE_SIZE, "%d\n", port->inline_data_size);
drivers/nvme/target/configfs.c
284
return snprintf(page, PAGE_SIZE, "%d\n", port->max_queue_size);
drivers/nvme/target/configfs.c
311
return snprintf(page, PAGE_SIZE, "%d\n", port->pi_enable);
drivers/nvme/target/configfs.c
341
return snprintf(page, PAGE_SIZE,
drivers/nvme/target/configfs.c
74
return snprintf(page, PAGE_SIZE, "%s\n",
drivers/nvme/target/configfs.c
78
return snprintf(page, PAGE_SIZE, "\n");
drivers/nvme/target/configfs.c
880
return snprintf(page, PAGE_SIZE, "%s\n", subsys->passthru_ctrl_path);
drivers/nvme/target/nvmet.h
444
#define NVMET_MAX_INLINE_DATA_LEN NVMET_MAX_INLINE_BIOVEC * PAGE_SIZE
drivers/nvme/target/rdma.c
1220
inline_sge_count * PAGE_SIZE);
drivers/nvme/target/rdma.c
1221
nport->inline_data_size = inline_sge_count * PAGE_SIZE;
drivers/nvme/target/rdma.c
296
pg, 0, PAGE_SIZE, DMA_FROM_DEVICE);
drivers/nvme/target/rdma.c
299
sge->length = min_t(int, len, PAGE_SIZE);
drivers/nvme/target/rdma.c
32
#define NVMET_RDMA_DEFAULT_INLINE_DATA_SIZE PAGE_SIZE
drivers/nvme/target/rdma.c
34
#define NVMET_RDMA_MAX_INLINE_DATA_SIZE max_t(int, SZ_16K, PAGE_SIZE)
drivers/nvme/target/rdma.c
835
sg->length = min_t(int, len, PAGE_SIZE - off);
drivers/nvme/target/tcp.c
25
#define NVMET_TCP_DEF_INLINE_DATA_SIZE (4 * PAGE_SIZE)
drivers/nvme/target/tcp.c
363
nr_pages = DIV_ROUND_UP(length, PAGE_SIZE);
drivers/nvme/target/tcp.c
365
cmd->sg_idx = offset / PAGE_SIZE;
drivers/nvme/target/tcp.c
366
sg_offset = offset % PAGE_SIZE;
drivers/of/fdt.c
1153
if (size < PAGE_SIZE - (base & ~PAGE_MASK)) {
drivers/of/fdt.c
1160
size -= PAGE_SIZE - (base & ~PAGE_MASK);
drivers/of/kexec.c
278
fdt_len = PAGE_SIZE;
drivers/of/kexec.c
378
round_up(tmp_size, PAGE_SIZE));
drivers/of/unittest.c
241
prop->length = PAGE_SIZE * 8;
drivers/parisc/ccio-dma.c
267
#define IOVP_SIZE PAGE_SIZE
drivers/parisc/dino.c
962
if (!request_mem_region(hpa, PAGE_SIZE, name)) {
drivers/parisc/sba_iommu.c
1329
ioc->iovp_mask = ~(iova_space_mask + PAGE_SIZE - 1);
drivers/parisc/sba_iommu.c
1467
ioc->iovp_mask = ~(iova_space_mask + PAGE_SIZE - 1);
drivers/parport/parport_pc.c
2222
PAGE_SIZE,
drivers/parport/parport_pc.c
2309
dma_free_coherent(p->physport->dev, PAGE_SIZE,
drivers/parport/parport_pc.c
605
maxlen = PAGE_SIZE; /* sizeof(priv->dma_buf) */
drivers/pci/controller/cadence/pcie-cadence-ep.c
740
resource_size(pcie->mem_res), PAGE_SIZE);
drivers/pci/controller/dwc/pcie-designware.h
306
#define DMA_LLP_MEM_SIZE PAGE_SIZE
drivers/pci/controller/pci-hyperv.c
2243
PAGE_SIZE);
drivers/pci/controller/pci-tegra.c
1740
msi->virt = dma_alloc_attrs(dev, PAGE_SIZE, &msi->phys, GFP_KERNEL,
drivers/pci/controller/pci-tegra.c
1787
dma_free_attrs(pcie->dev, PAGE_SIZE, msi->virt, msi->phys,
drivers/pci/endpoint/pci-epc-mem.c
71
if (page_size < PAGE_SIZE)
drivers/pci/endpoint/pci-epc-mem.c
72
page_size = PAGE_SIZE;
drivers/pci/hotplug/cpqphp_sysfs.c
128
#define MAX_OUTPUT (4*PAGE_SIZE)
drivers/pci/iov.c
867
if (resource_size(res) & (PAGE_SIZE - 1)) {
drivers/pci/p2pdma.c
142
for (vaddr = vma->vm_start; vaddr < vma->vm_end; vaddr += PAGE_SIZE) {
drivers/pci/p2pdma.c
167
kaddr += PAGE_SIZE;
drivers/pci/p2pdma.c
168
len -= PAGE_SIZE;
drivers/pci/p2pdma.c
221
PAGE_SIZE, (void **)&ref);
drivers/pci/p2pdma.c
874
const int max_devs = PAGE_SIZE / sizeof(*closest_pdevs);
drivers/pci/p2pdma.c
877
closest_pdevs = kmalloc(PAGE_SIZE, GFP_KERNEL);
drivers/pci/pci-label.c
149
buf, PAGE_SIZE - 1);
drivers/pci/pci.c
6539
if (count >= (PAGE_SIZE - 1))
drivers/pci/quirks.c
592
if (r->flags & IORESOURCE_MEM && resource_size(r) < PAGE_SIZE) {
drivers/pci/quirks.c
593
resource_set_range(r, 0, PAGE_SIZE);
drivers/pcmcia/rsrc_nonstatic.c
1086
if (ret > (PAGE_SIZE - 10))
drivers/pcmcia/rsrc_nonstatic.c
1143
if (ret > (PAGE_SIZE - 10))
drivers/pcmcia/rsrc_nonstatic.c
1152
if (ret > (PAGE_SIZE - 10))
drivers/pcmcia/soc_common.c
594
map->stop = PAGE_SIZE-1;
drivers/pcmcia/soc_common.c
839
skt->socket.map_size = PAGE_SIZE;
drivers/perf/arm_spe_pmu.c
499
u64 limit = buf->nr_pages * PAGE_SIZE;
drivers/perf/arm_spe_pmu.c
517
limit = ((buf->nr_pages * PAGE_SIZE) >> 1) + handle->head;
drivers/perf/arm_spe_pmu.c
527
const u64 bufsize = buf->nr_pages * PAGE_SIZE;
drivers/perf/arm_spe_pmu.c
569
limit = round_down(tail, PAGE_SIZE);
drivers/perf/arm_spe_pmu.c
581
limit = min(limit, round_up(wakeup, PAGE_SIZE));
drivers/perf/thunderx2_pmu.c
130
BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \
drivers/platform/chrome/chromeos_acpi.c
120
sysfs_emit_at(buf, PAGE_SIZE - 4, "..\n");
drivers/platform/chrome/chromeos_acpi.c
121
return PAGE_SIZE - 1;
drivers/platform/chrome/chromeos_acpi.c
98
room_left = PAGE_SIZE - 1;
drivers/platform/chrome/cros_ec_chardev.c
32
#define CROS_MAX_EVENT_LEN PAGE_SIZE
drivers/platform/goldfish/goldfish_pipe.c
268
*iter_last_page_size = PAGE_SIZE;
drivers/platform/goldfish/goldfish_pipe.c
279
*iter_last_page_size = PAGE_SIZE;
drivers/platform/goldfish/goldfish_pipe.c
305
: (PAGE_SIZE - (address & ~PAGE_MASK));
drivers/platform/goldfish/goldfish_pipe.c
311
iter_last_page_size : PAGE_SIZE;
drivers/platform/goldfish/goldfish_pipe.c
312
if (xaddr == xaddr_prev + PAGE_SIZE) {
drivers/platform/goldfish/goldfish_pipe.c
715
BUILD_BUG_ON(sizeof(struct goldfish_pipe_command) > PAGE_SIZE);
drivers/platform/goldfish/goldfish_pipe.c
841
BUILD_BUG_ON(sizeof(struct goldfish_pipe_dev_buffers) > PAGE_SIZE);
drivers/platform/goldfish/goldfish_pipe.c
887
if (!r || resource_size(r) < PAGE_SIZE) {
drivers/platform/goldfish/goldfish_pipe.c
891
dev->base = devm_ioremap(&pdev->dev, r->start, PAGE_SIZE);
drivers/platform/mellanox/mlxbf-pmc.c
1847
if (len >= PAGE_SIZE)
drivers/platform/raspberrypi/vchiq-interface/vchiq_arm.c
224
WARN_ON(((unsigned long)slot_mem & (PAGE_SIZE - 1)) != 0);
drivers/platform/raspberrypi/vchiq-interface/vchiq_core.c
1513
if (count >= INT_MAX - PAGE_SIZE)
drivers/platform/raspberrypi/vchiq-interface/vchiq_core.c
1519
offset = (uintptr_t)bulk->offset & (PAGE_SIZE - 1);
drivers/platform/raspberrypi/vchiq-interface/vchiq_core.c
1521
offset = (uintptr_t)bulk->uoffset & (PAGE_SIZE - 1);
drivers/platform/raspberrypi/vchiq-interface/vchiq_core.c
1522
num_pages = DIV_ROUND_UP(count + offset, PAGE_SIZE);
drivers/platform/raspberrypi/vchiq-interface/vchiq_core.c
1577
(actual_pages * PAGE_SIZE)));
drivers/platform/raspberrypi/vchiq-interface/vchiq_core.c
1578
size_t bytes = PAGE_SIZE - off;
drivers/platform/raspberrypi/vchiq-interface/vchiq_core.c
1618
unsigned int len = PAGE_SIZE - offset;
drivers/platform/raspberrypi/vchiq-interface/vchiq_core.c
1653
addrs[k - 1] += ((len + PAGE_SIZE - 1) >> PAGE_SHIFT);
drivers/platform/raspberrypi/vchiq-interface/vchiq_core.c
1656
(((len + PAGE_SIZE - 1) >> PAGE_SHIFT) - 1);
drivers/platform/raspberrypi/vchiq-interface/vchiq_core.c
1731
(PAGE_SIZE - 1) & ~(cache_line_size - 1),
drivers/platform/x86/dell/dcdbas.c
576
remap_size = eps->num_of_4k_pages * PAGE_SIZE;
drivers/platform/x86/dell/dell-smbios-smm.c
105
ret = dcdbas_smi_alloc(&smi_buf, PAGE_SIZE);
drivers/platform/x86/dell/dell-wmi-sysman/enum-attributes.c
31
ret = snprintf(buf, PAGE_SIZE, "%s\n", obj->package.elements[CURRENT_VAL].string.pointer);
drivers/platform/x86/dell/dell-wmi-sysman/int-attributes.c
33
ret = snprintf(buf, PAGE_SIZE, "%lld\n", obj->package.elements[CURRENT_VAL].integer.value);
drivers/platform/x86/dell/dell-wmi-sysman/passobj-attributes.c
34
ret = snprintf(buf, PAGE_SIZE, "%lld\n", obj->package.elements[IS_PASS_SET].integer.value);
drivers/platform/x86/dell/dell-wmi-sysman/string-attributes.c
33
ret = snprintf(buf, PAGE_SIZE, "%s\n", obj->package.elements[CURRENT_VAL].string.pointer);
drivers/platform/x86/hp/hp-bioscfg/surestart-attributes.c
70
if (count * LOG_ENTRY_SIZE > PAGE_SIZE)
drivers/platform/x86/hp/hp-bioscfg/surestart-attributes.c
93
if (ret < 0 || (LOG_ENTRY_SIZE * i) > PAGE_SIZE) {
drivers/platform/x86/intel/pmt/class.c
124
psize = (PFN_UP(entry->base_addr + entry->size) - pfn) * PAGE_SIZE;
drivers/platform/x86/lenovo/thinkpad_acpi.c
932
if (count > PAGE_SIZE - 1)
drivers/platform/x86/uv_sysfs.c
661
for (sz = PAGE_SIZE; sz < 16 * PAGE_SIZE; sz += PAGE_SIZE) {
drivers/platform/x86/uv_sysfs.c
745
return uv_get_archtype(buf, PAGE_SIZE);
drivers/pnp/interface.c
221
buffer->len = PAGE_SIZE;
drivers/pnp/interface.c
264
buffer->len = PAGE_SIZE;
drivers/pnp/pnpbios/bioscalls.c
64
(unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
drivers/ptp/ptp_vmclock.c
380
if (vma->vm_end - vma->vm_start != PAGE_SIZE || vma->vm_pgoff)
drivers/ptp/ptp_vmclock.c
384
st->res.start >> PAGE_SHIFT, PAGE_SIZE,
drivers/ptp/ptp_vmclock.c
400
if (*ppos >= PAGE_SIZE)
drivers/ptp/ptp_vmclock.c
403
max_count = PAGE_SIZE - *ppos;
drivers/ptp/ptp_vmclock.c
736
if (le32_to_cpu(st->clk->size) >= PAGE_SIZE) {
drivers/rapidio/switches/idt_gen2.c
378
len += snprintf(buf + len, PAGE_SIZE - len,
drivers/rapidio/switches/idt_gen2.c
380
if (len >= (PAGE_SIZE - 10))
drivers/ras/cec.c
72
#define MAX_ELEMS (PAGE_SIZE / sizeof(u64))
drivers/ras/cec.c
83
#define FULL_COUNT_MASK (PAGE_SIZE - 1)
drivers/ras/cec.c
92
#define FULL_COUNT(e) ((e) & (PAGE_SIZE - 1))
drivers/regulator/core.c
1932
buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
drivers/regulator/core.c
1936
ret = snprintf(buf, PAGE_SIZE,
drivers/rpmsg/rpmsg_core.c
394
len = of_device_modalias(dev, buf, PAGE_SIZE);
drivers/s390/block/dasd.c
115
dasd_init_chunklist(&device->ccw_chunks, device->ccw_mem, PAGE_SIZE*2);
drivers/s390/block/dasd.c
116
dasd_init_chunklist(&device->erp_chunks, device->erp_mem, PAGE_SIZE);
drivers/s390/block/dasd.c
117
dasd_init_chunklist(&device->ese_chunks, device->ese_mem, PAGE_SIZE * 2);
drivers/s390/block/dasd.c
337
max_bytes = ALIGN_DOWN(max_bytes, PAGE_SIZE);
drivers/s390/block/dasd_devmap.c
257
kmem_cache_create("dasd_page_cache", PAGE_SIZE,
drivers/s390/block/dasd_devmap.c
258
PAGE_SIZE, SLAB_CACHE_DMA,
drivers/s390/block/dasd_diag.c
408
for (bsize = 512; bsize <= PAGE_SIZE; bsize <<= 1) {
drivers/s390/block/dasd_diag.c
42
#define DIAG_MAX_BLOCKS (((2 * PAGE_SIZE - sizeof(struct dasd_ccw_req) - \
drivers/s390/block/dasd_diag.c
433
if (bsize > PAGE_SIZE) {
drivers/s390/block/dasd_eckd.c
4836
idaws = idal_create_words(idaws, rawpadpage, PAGE_SIZE);
drivers/s390/block/dasd_eckd.c
4859
idaws = idal_create_words(idaws, rawpadpage, PAGE_SIZE);
drivers/s390/block/dasd_eer.c
134
headindex = eerb->head / PAGE_SIZE;
drivers/s390/block/dasd_eer.c
135
localhead = eerb->head % PAGE_SIZE;
drivers/s390/block/dasd_eer.c
136
len = min(rest, PAGE_SIZE - localhead);
drivers/s390/block/dasd_eer.c
161
tailindex = eerb->tail / PAGE_SIZE;
drivers/s390/block/dasd_eer.c
162
localtail = eerb->tail % PAGE_SIZE;
drivers/s390/block/dasd_eer.c
163
len = min(rest, PAGE_SIZE - localtail);
drivers/s390/block/dasd_eer.c
539
static char readbuffer[PAGE_SIZE];
drivers/s390/block/dasd_eer.c
552
eerb->buffer_page_count > INT_MAX / PAGE_SIZE) {
drivers/s390/block/dasd_eer.c
556
" bigger than %d", (int)(INT_MAX / PAGE_SIZE));
drivers/s390/block/dasd_eer.c
559
eerb->buffersize = eerb->buffer_page_count * PAGE_SIZE;
drivers/s390/block/dasd_erp.c
32
BUG_ON(datasize > PAGE_SIZE ||
drivers/s390/block/dasd_erp.c
33
(cplength*sizeof(struct ccw1)) > PAGE_SIZE);
drivers/s390/block/dasd_genhd.c
74
.max_segment_size = PAGE_SIZE,
drivers/s390/block/dasd_genhd.c
75
.seg_boundary_mask = PAGE_SIZE - 1,
drivers/s390/block/dasd_int.h
127
#define BLOCKS_PER_PAGE(blksize) (PAGE_SIZE / blksize)
drivers/s390/block/dcssblk.c
895
!IS_ALIGNED(bio->bi_iter.bi_size, PAGE_SIZE))
drivers/s390/block/dcssblk.c
917
if (unlikely(!IS_ALIGNED((unsigned long)page_addr, PAGE_SIZE) ||
drivers/s390/block/dcssblk.c
918
!IS_ALIGNED(bvec.bv_len, PAGE_SIZE)))
drivers/s390/block/dcssblk.c
937
resource_size_t offset = pgoff * PAGE_SIZE;
drivers/s390/block/dcssblk.c
946
return (dev_sz - offset) / PAGE_SIZE;
drivers/s390/block/scm_blk.c
135
IS_ALIGNED(aidaw, PAGE_SIZE))
drivers/s390/block/scm_blk.c
159
unsigned long bytes = ALIGN(_aidaw, PAGE_SIZE) - _aidaw;
drivers/s390/block/scm_blk.c
161
return (bytes / sizeof(*aidaw)) * PAGE_SIZE;
drivers/s390/block/scm_blk.c
173
memset(aidaw, 0, PAGE_SIZE);
drivers/s390/block/scm_blk.c
443
(unsigned int) (PAGE_SIZE / sizeof(struct aidaw)));
drivers/s390/char/con3270.c
464
if (flen > PAGE_SIZE)
drivers/s390/char/hmcdrv_cache.h
15
#define HMCDRV_CACHE_SIZE_DFLT (MAX_ORDER_NR_PAGES * PAGE_SIZE / 2UL)
drivers/s390/char/hmcdrv_ftp.c
194
.len = PAGE_SIZE
drivers/s390/char/sclp.c
607
sccb->length = PAGE_SIZE;
drivers/s390/char/sclp.c
827
BUILD_BUG_ON(sizeof(struct sclp_statechangebuf) > PAGE_SIZE);
drivers/s390/char/sclp.h
151
} __attribute__((packed, aligned(PAGE_SIZE)));
drivers/s390/char/sclp.h
196
} __packed __aligned(PAGE_SIZE);
drivers/s390/char/sclp_ap.c
33
sccb->header.length = PAGE_SIZE;
drivers/s390/char/sclp_cmd.c
104
length = test_facility(140) ? EXT_SCCB_READ_CPU : PAGE_SIZE;
drivers/s390/char/sclp_ctl.c
65
copied = PAGE_SIZE -
drivers/s390/char/sclp_ctl.c
66
copy_from_user(sccb, u64_to_uptr(ctl_sccb.sccb), PAGE_SIZE);
drivers/s390/char/sclp_early.c
107
sclp.hsa_size = (sccb->hsa_size - 1) * PAGE_SIZE;
drivers/s390/char/sclp_early.c
129
int length = test_facility(140) ? EXT_SCCB_READ_CPU : PAGE_SIZE;
drivers/s390/char/sclp_early.c
135
sccb = memblock_alloc_low(length, PAGE_SIZE);
drivers/s390/char/sclp_early_core.c
202
BUILD_BUG_ON(sizeof(struct init_sccb) > PAGE_SIZE);
drivers/s390/char/sclp_early_core.c
281
int length = test_facility(140) ? EXT_SCCB_READ_SCP : PAGE_SIZE;
drivers/s390/char/sclp_early_core.c
335
*hsa_size = (sclp_info_sccb.hsa_size - 1) * PAGE_SIZE;
drivers/s390/char/sclp_mem.c
136
sccb->header.length = PAGE_SIZE;
drivers/s390/char/sclp_mem.c
496
memset(sccb, 0, PAGE_SIZE);
drivers/s390/char/sclp_mem.c
497
sccb->header.length = PAGE_SIZE;
drivers/s390/char/sclp_mem.c
90
sccb->header.length = PAGE_SIZE;
drivers/s390/char/sclp_pci.c
106
if (report->length > (PAGE_SIZE - sizeof(struct err_notify_sccb)))
drivers/s390/char/sclp_pci.c
51
sccb->header.length = PAGE_SIZE;
drivers/s390/char/sclp_rw.c
27
#define MAX_SCCB_ROOM (PAGE_SIZE - sizeof(struct sclp_buffer))
drivers/s390/char/sclp_rw.c
51
buffer = ((struct sclp_buffer *) ((addr_t) sccb + PAGE_SIZE)) - 1;
drivers/s390/char/sclp_rw.h
58
#define NR_EMPTY_MSG_PER_SCCB ((PAGE_SIZE - sizeof(struct sclp_buffer) - \
drivers/s390/char/sclp_sd.c
205
memset(sccb, 0, PAGE_SIZE);
drivers/s390/char/sclp_sd.c
311
data = vzalloc(array_size((size_t)dsize, PAGE_SIZE));
drivers/s390/char/sclp_sd.c
333
(size_t)dsize * PAGE_SIZE);
drivers/s390/char/sclp_sd.c
343
result->esize_bytes = (size_t) esize * PAGE_SIZE;
drivers/s390/char/sclp_sd.c
344
result->dsize_bytes = (size_t) dsize * PAGE_SIZE;
drivers/s390/char/sclp_sd.c
50
} __packed __aligned(PAGE_SIZE);
drivers/s390/char/sclp_vt220.c
267
((addr_t) page + PAGE_SIZE)) - 1;
drivers/s390/char/sclp_vt220.c
287
return PAGE_SIZE - sizeof(struct sclp_vt220_request) -
drivers/s390/char/sclp_vt220.c
55
#define SCLP_VT220_MAX_CHARS_PER_BUFFER (PAGE_SIZE - \
drivers/s390/char/tape_core.c
677
BUG_ON(datasize > PAGE_SIZE || (cplength*sizeof(struct ccw1)) > PAGE_SIZE);
drivers/s390/char/vmcp.c
111
session->bufsize = PAGE_SIZE;
drivers/s390/char/vmcp.c
217
session->bufsize = PAGE_SIZE;
drivers/s390/char/vmcp.c
219
session->bufsize = PAGE_SIZE;
drivers/s390/char/vmcp.c
47
vmcp_cma_size = ALIGN(memparse(p, NULL), PAGE_SIZE);
drivers/s390/char/vmcp.c
65
nr_pages = ALIGN(session->bufsize, PAGE_SIZE) >> PAGE_SHIFT;
drivers/s390/char/vmcp.c
89
nr_pages = ALIGN(session->bufsize, PAGE_SIZE) >> PAGE_SHIFT;
drivers/s390/char/vmlogrdr.c
50
#define NET_BUFFER_SIZE (PAGE_SIZE - sizeof(int) - sizeof(FENCE))
drivers/s390/char/vmur.c
525
rc = diag_position_to_record(urd->dev_id.devno, *offs / PAGE_SIZE + 1);
drivers/s390/char/vmur.c
531
len = min((size_t) PAGE_SIZE, count);
drivers/s390/char/vmur.c
537
res = (size_t) (*offs % PAGE_SIZE);
drivers/s390/char/vmur.c
545
if (reclen && (copied == 0) && (*offs < PAGE_SIZE))
drivers/s390/char/vmur.c
547
len = min(count - copied, PAGE_SIZE - res);
drivers/s390/char/vmur.c
792
if (offset % PAGE_SIZE)
drivers/s390/char/zcore.c
234
PAGE_SIZE);
drivers/s390/char/zcore.c
236
rc = memcpy_real(zcore_ipl_block, ipib_info.ipib, PAGE_SIZE);
drivers/s390/char/zcore.c
256
rc = memcpy_hsa_kernel(os_info, os_info_addr, PAGE_SIZE);
drivers/s390/char/zcore.c
258
rc = memcpy_real(os_info, os_info_addr, PAGE_SIZE);
drivers/s390/char/zcore.c
55
static char hsa_buf[PAGE_SIZE] __aligned(PAGE_SIZE);
drivers/s390/char/zcore.c
74
if (sclp_sdias_copy(hsa_buf, src / PAGE_SIZE + 2, 1)) {
drivers/s390/char/zcore.c
78
offset = src % PAGE_SIZE;
drivers/s390/char/zcore.c
79
bytes = min(PAGE_SIZE - offset, count);
drivers/s390/cio/airq.c
309
cache_line_size(), PAGE_SIZE);
drivers/s390/cio/chsc.c
1005
memset(page, 0, PAGE_SIZE);
drivers/s390/cio/chsc.c
1107
memset(chsc_page, 0, PAGE_SIZE);
drivers/s390/cio/chsc.c
114
} __packed __aligned(PAGE_SIZE);
drivers/s390/cio/chsc.c
1200
memset(chsc_page, 0, PAGE_SIZE);
drivers/s390/cio/chsc.c
1230
memset(chsc_page, 0, PAGE_SIZE);
drivers/s390/cio/chsc.c
126
memset(chsc_page, 0, PAGE_SIZE);
drivers/s390/cio/chsc.c
1281
memset(chsc_page, 0, PAGE_SIZE);
drivers/s390/cio/chsc.c
1325
memset(page, 0, PAGE_SIZE);
drivers/s390/cio/chsc.c
1350
memset(page, 0, PAGE_SIZE);
drivers/s390/cio/chsc.c
1371
memset(page, 0, PAGE_SIZE);
drivers/s390/cio/chsc.c
1397
memset(chsc_page, 0, PAGE_SIZE);
drivers/s390/cio/chsc.c
1509
memset(chsc_page, 0, PAGE_SIZE);
drivers/s390/cio/chsc.c
1574
memset(chsc_page, 0, PAGE_SIZE);
drivers/s390/cio/chsc.c
325
u8 ccdf[PAGE_SIZE - 24 - 16]; /* content-code dependent field */
drivers/s390/cio/chsc.c
334
u8 ccdf[PAGE_SIZE - 24 - 56]; /* content-code dependent field */
drivers/s390/cio/chsc.c
350
u8 nt_area[PAGE_SIZE - 24];
drivers/s390/cio/chsc.c
352
} __packed __aligned(PAGE_SIZE);
drivers/s390/cio/chsc.c
892
memset(chsc_page, 0, PAGE_SIZE);
drivers/s390/cio/chsc.h
107
} __packed __aligned(PAGE_SIZE);
drivers/s390/cio/chsc.h
127
} __packed __aligned(PAGE_SIZE);
drivers/s390/cio/chsc.h
145
} __packed __aligned(PAGE_SIZE);
drivers/s390/cio/chsc.h
160
} __packed __aligned(PAGE_SIZE);
drivers/s390/cio/chsc.h
226
} __packed __aligned(PAGE_SIZE);
drivers/s390/cio/chsc_sch.c
304
if (copy_from_user(chsc_area, user_area, PAGE_SIZE)) {
drivers/s390/cio/chsc_sch.c
318
if (copy_to_user(user_area, chsc_area, PAGE_SIZE))
drivers/s390/cio/chsc_sch.c
348
if (copy_from_user(on_close_chsc_area, user_area, PAGE_SIZE)) {
drivers/s390/cio/chsc_sch.c
398
if (copy_from_user(chsc_area, user_area, PAGE_SIZE)) {
drivers/s390/cio/chsc_sch.c
412
if (copy_to_user(user_area, chsc_area, PAGE_SIZE))
drivers/s390/cio/chsc_sch.c
438
u8 data[PAGE_SIZE - 20];
drivers/s390/cio/chsc_sch.c
500
u8 data[PAGE_SIZE - 20];
drivers/s390/cio/chsc_sch.c
563
u8 data[PAGE_SIZE - 20];
drivers/s390/cio/chsc_sch.c
625
u8 data[PAGE_SIZE - 20];
drivers/s390/cio/chsc_sch.c
683
u8 data[PAGE_SIZE - 36];
drivers/s390/cio/chsc_sch.c
796
u8 data[PAGE_SIZE - 36];
drivers/s390/cio/css.c
1077
cpu_addr = dma_alloc_coherent(dma_dev, PAGE_SIZE, &dma_addr,
drivers/s390/cio/css.c
1082
dma_addr, PAGE_SIZE, -1);
drivers/s390/cio/css.c
1126
chunk_size = round_up(size, PAGE_SIZE);
drivers/s390/cio/device.c
224
len = snprint_alias(buf, PAGE_SIZE, id, "\n");
drivers/s390/cio/device.c
226
return len > PAGE_SIZE ? PAGE_SIZE : len;
drivers/s390/cio/qdio_setup.c
155
memset(sl_page, 0, PAGE_SIZE);
drivers/s390/cio/qdio_setup.c
24
#define QBUFF_PER_PAGE (PAGE_SIZE / sizeof(struct qdio_buffer))
drivers/s390/cio/vfio_ccw_cp.c
101
*first + npage * PAGE_SIZE == *last &&
drivers/s390/cio/vfio_ccw_cp.c
141
*first + npage * PAGE_SIZE == *last &&
drivers/s390/cio/vfio_ccw_cp.c
515
int idaw_size = idal_is_2k(cp) ? PAGE_SIZE / 2 : PAGE_SIZE;
drivers/s390/crypto/zcrypt_ccamisc.c
1660
varray = (u8 *)mem + PAGE_SIZE / 2;
drivers/s390/crypto/zcrypt_ccamisc.c
1661
rlen = vlen = PAGE_SIZE / 2;
drivers/s390/crypto/zcrypt_ccamisc.c
1690
rlen = vlen = PAGE_SIZE / 2;
drivers/s390/crypto/zcrypt_msgtype6.c
214
if (WARN_ON_ONCE(mex->inputdatalength > PAGE_SIZE))
drivers/s390/crypto/zcrypt_msgtype6.c
284
if (WARN_ON_ONCE(crt->inputdatalength > PAGE_SIZE))
drivers/s390/net/ism_drv.c
101
dma_free_coherent(&ism->pdev->dev, PAGE_SIZE, sba, dma_handle);
drivers/s390/net/ism_drv.c
117
ieq = dma_alloc_coherent(&ism->pdev->dev, PAGE_SIZE, &dma_handle,
drivers/s390/net/ism_drv.c
129
dma_free_coherent(&ism->pdev->dev, PAGE_SIZE, ieq, dma_handle);
drivers/s390/net/ism_drv.c
151
dma_free_coherent(&ism->pdev->dev, PAGE_SIZE,
drivers/s390/net/ism_drv.c
171
dma_free_coherent(&ism->pdev->dev, PAGE_SIZE,
drivers/s390/net/ism_drv.c
401
bytes = max_bytes(offset, size, PAGE_SIZE);
drivers/s390/net/ism_drv.c
90
sba = dma_alloc_coherent(&ism->pdev->dev, PAGE_SIZE, &dma_handle,
drivers/s390/net/qeth_core.h
194
#define QETH_RX_COPYBREAK (PAGE_SIZE >> 1)
drivers/s390/net/qeth_core_main.c
2420
card->qdio.in_buf_size = max_mtu + 2 * PAGE_SIZE;
drivers/s390/net/qeth_core_main.c
2960
buf->buffer->element[i].length = PAGE_SIZE;
drivers/s390/net/qeth_core_main.c
4091
PAGE_SIZE - offset_in_page(data));
drivers/s390/net/qeth_core_main.c
4121
PAGE_SIZE - offset_in_page(data));
drivers/s390/net/qeth_l2_main.c
1134
PAGE_SIZE * (QDIO_MAX_ELEMENTS_PER_BUFFER - 1));
drivers/s390/net/qeth_l3_main.c
1915
PAGE_SIZE * (QETH_MAX_BUFFER_ELEMENTS(card) - 1));
drivers/s390/scsi/zfcp_aux.c
239
BUILD_BUG_ON(sizeof(struct fsf_status_read_buffer) > PAGE_SIZE);
drivers/s390/scsi/zfcp_fc.c
672
sg_set_buf(sg, addr, PAGE_SIZE);
drivers/s390/scsi/zfcp_fc.h
21
#define ZFCP_FC_CT_SIZE_PAGE (PAGE_SIZE - sizeof(struct fc_ct_hdr))
drivers/s390/scsi/zfcp_fc.h
26
#define ZFCP_FC_GPN_FT_MAX_SIZE (ZFCP_FC_GPN_FT_NUM_BUFS * PAGE_SIZE \
drivers/s390/scsi/zfcp_qdio.h
16
#define ZFCP_QDIO_SBALE_LEN PAGE_SIZE
drivers/s390/scsi/zfcp_sysfs.c
407
buf, PAGE_SIZE - 1, adapter->fc_security_algorithms,
drivers/s390/scsi/zfcp_sysfs.c
498
buf, PAGE_SIZE - 1, port->connection_info,
drivers/scsi/a2091.c
196
.dma_boundary = PAGE_SIZE - 1,
drivers/scsi/aacraid/aachba.c
3930
u32 min_size = PAGE_SIZE, cur_size;
drivers/scsi/aacraid/aachba.c
3973
for (i = min_size / PAGE_SIZE; i >= 1; --i) {
drivers/scsi/aacraid/aachba.c
3977
if (rio2->sge[j].length % (i*PAGE_SIZE)) {
drivers/scsi/aacraid/aachba.c
3981
nseg_new += (rio2->sge[j].length / (i*PAGE_SIZE));
drivers/scsi/aacraid/aachba.c
4018
for (j = 0; j < rio2->sge[i].length / (pages * PAGE_SIZE); ++j) {
drivers/scsi/aacraid/aachba.c
4019
addr_low = rio2->sge[i].addrLow + j * pages * PAGE_SIZE;
drivers/scsi/aacraid/aachba.c
4024
sge[pos].length = pages * PAGE_SIZE;
drivers/scsi/aacraid/aachba.c
4035
rio2->sgeNominalSize = pages * PAGE_SIZE;
drivers/scsi/aacraid/linit.c
1200
len = snprintf(buf, PAGE_SIZE, "%s\n", cp);
drivers/scsi/aacraid/linit.c
1202
len = snprintf(buf, PAGE_SIZE, "%s\n",
drivers/scsi/aacraid/linit.c
1219
len = snprintf(buf, PAGE_SIZE, "%.*s\n",
drivers/scsi/aacraid/linit.c
1223
len = snprintf(buf, PAGE_SIZE, "%s\n",
drivers/scsi/aacraid/linit.c
1235
len = snprintf(buf, PAGE_SIZE, "dprintk\n");
drivers/scsi/aacraid/linit.c
1237
len += scnprintf(buf + len, PAGE_SIZE - len,
drivers/scsi/aacraid/linit.c
1241
len += scnprintf(buf + len, PAGE_SIZE - len,
drivers/scsi/aacraid/linit.c
1244
len += scnprintf(buf + len, PAGE_SIZE - len,
drivers/scsi/aacraid/linit.c
1248
len += scnprintf(buf + len, PAGE_SIZE - len,
drivers/scsi/aacraid/linit.c
1251
len += scnprintf(buf + len, PAGE_SIZE - len, "PCI_HAS_MSI\n");
drivers/scsi/aacraid/linit.c
1263
len = snprintf(buf, PAGE_SIZE, "%d.%d-%d[%d]\n",
drivers/scsi/aacraid/linit.c
1277
len = snprintf(buf, PAGE_SIZE, "%d.%d-%d[%d]\n",
drivers/scsi/aacraid/linit.c
1291
len = snprintf(buf, PAGE_SIZE, "%d.%d-%d[%d]\n",
drivers/scsi/aacraid/linit.c
1301
return snprintf(buf, PAGE_SIZE, "%s\n", aac_driver_version);
drivers/scsi/aacraid/linit.c
1327
return snprintf(buf, PAGE_SIZE, "%d\n",
drivers/scsi/aacraid/linit.c
1334
return snprintf(buf, PAGE_SIZE, "%d\n",
drivers/scsi/aacraid/linit.c
1365
len = snprintf(buf, PAGE_SIZE, "0x%x\n", tmp);
drivers/scsi/aacraid/linit.c
563
return snprintf(buf, PAGE_SIZE, sdev->no_uld_attach
drivers/scsi/aacraid/linit.c
566
return snprintf(buf, PAGE_SIZE, "%s\n",
drivers/scsi/aha152x.c
2957
.dma_boundary = PAGE_SIZE - 1,
drivers/scsi/aic7xxx/aic79xx_core.c
6003
while ((sg_list_size + sg_list_increment) <= PAGE_SIZE)
drivers/scsi/aic7xxx/aic79xx_core.c
6011
max_list_size = roundup(sg_list_increment, PAGE_SIZE);
drivers/scsi/aic7xxx/aic79xx_core.c
6012
if (max_list_size < 4 * PAGE_SIZE)
drivers/scsi/aic7xxx/aic79xx_core.c
6013
max_list_size = 4 * PAGE_SIZE;
drivers/scsi/aic7xxx/aic79xx_core.c
6017
&& (sg_list_size % PAGE_SIZE) != 0) {
drivers/scsi/aic7xxx/aic79xx_core.c
6022
new_mod = sg_list_size % PAGE_SIZE;
drivers/scsi/aic7xxx/aic79xx_core.c
6023
best_mod = best_list_size % PAGE_SIZE;
drivers/scsi/aic7xxx/aic79xx_core.c
6391
PAGE_SIZE, /*nsegments*/1,
drivers/scsi/aic7xxx/aic79xx_core.c
6424
PAGE_SIZE, /*nsegments*/1,
drivers/scsi/aic7xxx/aic79xx_core.c
6775
offset = (PAGE_SIZE / sizeof(*hscb)) - scb_data->scbs_left;
drivers/scsi/aic7xxx/aic79xx_core.c
6796
hscb_map->vaddr, PAGE_SIZE, ahd_dmamap_cb,
drivers/scsi/aic7xxx/aic79xx_core.c
6801
scb_data->scbs_left = PAGE_SIZE / sizeof(*hscb);
drivers/scsi/aic7xxx/aic79xx_core.c
6845
offset = PAGE_SIZE - (AHD_SENSE_BUFSIZE * scb_data->sense_left);
drivers/scsi/aic7xxx/aic79xx_core.c
6866
sense_map->vaddr, PAGE_SIZE, ahd_dmamap_cb,
drivers/scsi/aic7xxx/aic79xx_core.c
6871
scb_data->sense_left = PAGE_SIZE / AHD_SENSE_BUFSIZE;
drivers/scsi/aic7xxx/aic7xxx_core.c
4834
PAGE_SIZE, /*nsegments*/1,
drivers/scsi/aic7xxx/aic7xxx_core.c
4962
sg_map->sg_vaddr, PAGE_SIZE, ahc_dmamap_cb,
drivers/scsi/aic7xxx/aic7xxx_core.c
4968
newcount = (PAGE_SIZE / (AHC_NSEG * sizeof(struct ahc_dma_seg)));
drivers/scsi/aic94xx/aic94xx_init.c
253
return snprintf(buf, PAGE_SIZE, "%s\n",
drivers/scsi/aic94xx/aic94xx_init.c
262
return snprintf(buf, PAGE_SIZE, "%d\n", asd_ha->hw_prof.bios.bld);
drivers/scsi/aic94xx/aic94xx_init.c
270
return snprintf(buf, PAGE_SIZE, "%s\n", asd_ha->hw_prof.pcba_sn);
drivers/scsi/aic94xx/aic94xx_init.c
441
return snprintf(buf, PAGE_SIZE, "status=%x %s\n",
drivers/scsi/aic94xx/aic94xx_init.c
929
return snprintf(buf, PAGE_SIZE, "%s\n", ASD_DRIVER_VERSION);
drivers/scsi/arcmsr/arcmsr_attr.c
261
return snprintf(buf, PAGE_SIZE,
drivers/scsi/arcmsr/arcmsr_attr.c
273
return snprintf(buf, PAGE_SIZE,
drivers/scsi/arcmsr/arcmsr_attr.c
285
return snprintf(buf, PAGE_SIZE,
drivers/scsi/arcmsr/arcmsr_attr.c
297
return snprintf(buf, PAGE_SIZE,
drivers/scsi/arcmsr/arcmsr_attr.c
309
return snprintf(buf, PAGE_SIZE,
drivers/scsi/arcmsr/arcmsr_attr.c
322
return snprintf(buf, PAGE_SIZE,
drivers/scsi/arcmsr/arcmsr_attr.c
335
return snprintf(buf, PAGE_SIZE,
drivers/scsi/arcmsr/arcmsr_attr.c
348
return snprintf(buf, PAGE_SIZE,
drivers/scsi/arcmsr/arcmsr_attr.c
361
return snprintf(buf, PAGE_SIZE,
drivers/scsi/arcmsr/arcmsr_attr.c
374
return snprintf(buf, PAGE_SIZE,
drivers/scsi/arm/acornscsi.c
2794
.dma_boundary = PAGE_SIZE - 1,
drivers/scsi/arm/arxescsi.c
254
.dma_boundary = PAGE_SIZE - 1,
drivers/scsi/arm/cumana_1.c
228
.dma_boundary = PAGE_SIZE - 1,
drivers/scsi/arm/oak.c
114
.dma_boundary = PAGE_SIZE - 1,
drivers/scsi/atari_scsi.c
713
.dma_boundary = PAGE_SIZE - 1,
drivers/scsi/be2iscsi/be_cmds.c
1193
q_mem->dma = q_mem->dma + (req->num_pages * PAGE_SIZE);
drivers/scsi/be2iscsi/be_main.c
2423
PAGE_SIZE;
drivers/scsi/be2iscsi/be_main.c
2429
PAGE_SIZE;
drivers/scsi/be2iscsi/be_main.c
2435
PAGE_SIZE;
drivers/scsi/be2iscsi/be_main.c
2441
PAGE_SIZE;
drivers/scsi/be2iscsi/be_main.c
3025
num_eq_pages * PAGE_SIZE,
drivers/scsi/be2iscsi/be_main.c
3062
* PAGE_SIZE,
drivers/scsi/be2iscsi/be_main.c
3090
num_cq_pages * PAGE_SIZE,
drivers/scsi/be2iscsi/be_main.c
3125
* PAGE_SIZE,
drivers/scsi/be2iscsi/be_main.c
3293
phba->fw_config.iscsi_icd_start[ulp_num]) / PAGE_SIZE;
drivers/scsi/be2iscsi/be_main.c
3298
(pm_arr->size / PAGE_SIZE));
drivers/scsi/be2iscsi/be_main.c
3299
page_offset += pm_arr->size / PAGE_SIZE;
drivers/scsi/be2iscsi/be_main.c
597
icd_post_per_page = (PAGE_SIZE / (BE2_SGE *
drivers/scsi/be2iscsi/be_main.c
76
return snprintf(buf, PAGE_SIZE, "%d\n",\
drivers/scsi/be2iscsi/be_main.h
148
((x < PAGE_SIZE) ? 1 : ((x + PAGE_SIZE - 1) / PAGE_SIZE))
drivers/scsi/be2iscsi/be_mgmt.c
1146
return snprintf(buf, PAGE_SIZE, BE_NAME "\n");
drivers/scsi/be2iscsi/be_mgmt.c
1165
return snprintf(buf, PAGE_SIZE, "%s\n", phba->fw_ver_str);
drivers/scsi/be2iscsi/be_mgmt.c
1189
len += scnprintf(buf+len, PAGE_SIZE - len,
drivers/scsi/be2iscsi/be_mgmt.c
1193
len += scnprintf(buf+len, PAGE_SIZE - len,
drivers/scsi/be2iscsi/be_mgmt.c
1219
len += scnprintf(buf+len, PAGE_SIZE - len,
drivers/scsi/be2iscsi/be_mgmt.c
1223
len += scnprintf(buf+len, PAGE_SIZE - len,
drivers/scsi/be2iscsi/be_mgmt.c
1252
return snprintf(buf, PAGE_SIZE,
drivers/scsi/be2iscsi/be_mgmt.c
1256
return snprintf(buf, PAGE_SIZE, "BE3-R Adapter Family\n");
drivers/scsi/be2iscsi/be_mgmt.c
1258
return snprintf(buf, PAGE_SIZE, "Skyhawk-R Adapter Family\n");
drivers/scsi/be2iscsi/be_mgmt.c
1260
return snprintf(buf, PAGE_SIZE,
drivers/scsi/be2iscsi/be_mgmt.c
1281
return snprintf(buf, PAGE_SIZE, "Port Identifier : %u\n",
drivers/scsi/bnx2fc/bnx2fc.h
113
#define BNX2FC_TASKS_PER_PAGE (PAGE_SIZE/BNX2FC_TASK_SIZE)
drivers/scsi/bnx2fc/bnx2fc_els.c
159
buf = kzalloc(PAGE_SIZE, GFP_ATOMIC);
drivers/scsi/bnx2fc/bnx2fc_els.c
165
if (hdr_len + resp_len > PAGE_SIZE) {
drivers/scsi/bnx2fc/bnx2fc_els.c
330
buf = kzalloc(PAGE_SIZE, GFP_ATOMIC);
drivers/scsi/bnx2fc/bnx2fc_els.c
455
buf = kzalloc(PAGE_SIZE, GFP_ATOMIC);
drivers/scsi/bnx2fc/bnx2fc_hwi.c
1882
PAGE_SIZE,
drivers/scsi/bnx2fc/bnx2fc_hwi.c
1919
PAGE_SIZE,
drivers/scsi/bnx2fc/bnx2fc_hwi.c
1938
dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
drivers/scsi/bnx2fc/bnx2fc_hwi.c
1950
dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
drivers/scsi/bnx2fc/bnx2fc_hwi.c
1963
dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
drivers/scsi/bnx2fc/bnx2fc_hwi.c
1973
dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
drivers/scsi/bnx2fc/bnx2fc_hwi.c
2017
dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
drivers/scsi/bnx2fc/bnx2fc_hwi.c
2065
hba->hash_tbl_pbl = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
drivers/scsi/bnx2fc/bnx2fc_hwi.c
2151
PAGE_SIZE, &hba->dummy_buf_dma,
drivers/scsi/bnx2fc/bnx2fc_hwi.c
2159
hba->stats_buffer = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
drivers/scsi/bnx2fc/bnx2fc_hwi.c
2176
dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
drivers/scsi/bnx2fc/bnx2fc_hwi.c
2182
dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
drivers/scsi/bnx2i/bnx2i_iscsi.c
330
mem_size = (mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
drivers/scsi/bnx2i/bnx2i_iscsi.c
337
mem_size = (mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
drivers/scsi/csiostor/csio_scsi.c
1529
bytes_copy = min((uint32_t)(PAGE_SIZE - (sg_off & ~PAGE_MASK)),
drivers/scsi/csiostor/csio_scsi.c
2339
buf_size = (buf_size + PAGE_SIZE - 1) & PAGE_MASK;
drivers/scsi/csiostor/csio_scsi.c
2471
if (csio_scsi_alloc_ddp_bufs(scm, hw, PAGE_SIZE, csio_ddp_descs))
drivers/scsi/csiostor/csio_wr.c
1400
csio_wr_reg32(hw, PAGE_SIZE, SGE_FL_BUFFER_SIZE0_A);
drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
98
.dma_boundary = PAGE_SIZE - 1,
drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
116
.dma_boundary = PAGE_SIZE - 1,
drivers/scsi/cxgbi/libcxgbi.c
1174
offset += PAGE_SIZE;
drivers/scsi/cxgbi/libcxgbi.c
1248
((i != last_sgidx) && len != PAGE_SIZE)) {
drivers/scsi/dc395x.c
1701
(PAGE_SIZE - diff))
drivers/scsi/dc395x.c
3493
const unsigned srbs_per_page = PAGE_SIZE/SEGMENTX_LEN;
drivers/scsi/dc395x.c
3507
int pages = (mem_needed+(PAGE_SIZE-1))/PAGE_SIZE;
drivers/scsi/dc395x.c
3508
const unsigned srbs_per_page = PAGE_SIZE/SEGMENTX_LEN;
drivers/scsi/dc395x.c
3517
ptr = kmalloc(PAGE_SIZE, GFP_KERNEL);
drivers/scsi/dc395x.c
3953
.dma_boundary = PAGE_SIZE - 1,
drivers/scsi/dmx3191d.c
54
.dma_boundary = PAGE_SIZE - 1,
drivers/scsi/elx/efct/efct_lio.c
143
return snprintf(page, PAGE_SIZE, "%d\n", tpg->enabled);
drivers/scsi/elx/efct/efct_lio.c
85
return snprintf(page, PAGE_SIZE, "%d\n", tpg->enabled);
drivers/scsi/esas2r/esas2r_main.c
134
int length = min_t(size_t, sizeof(struct esas2r_sas_nvram), PAGE_SIZE);
drivers/scsi/esas2r/esas2r_main.c
176
int length = min_t(size_t, sizeof(struct atto_ioctl), PAGE_SIZE);
drivers/scsi/fcoe/fcoe_transport.c
325
clen = min(len, PAGE_SIZE - (off & ~PAGE_MASK));
drivers/scsi/fcoe/fcoe_transport.c
493
if (fps->crc_eof_offset >= PAGE_SIZE) {
drivers/scsi/fcoe/fcoe_transport.c
606
if (i >= PAGE_SIZE - IFNAMSIZ)
drivers/scsi/fdomain.c
511
.dma_boundary = PAGE_SIZE - 1,
drivers/scsi/fnic/fnic_debugfs.c
209
PAGE_SIZE));
drivers/scsi/fnic/fnic_debugfs.c
218
PAGE_SIZE));
drivers/scsi/fnic/fnic_debugfs.c
584
int buf_size = 2 * PAGE_SIZE;
drivers/scsi/fnic/fnic_trace.c
140
(trace_max_pages * PAGE_SIZE * 3) - len,
drivers/scsi/fnic/fnic_trace.c
181
(trace_max_pages * PAGE_SIZE * 3) - len,
drivers/scsi/fnic/fnic_trace.c
532
fnic_max_trace_entries = (trace_max_pages * PAGE_SIZE)/
drivers/scsi/fnic/fnic_trace.c
535
fnic_trace_buf_p = (unsigned long)vcalloc(trace_max_pages, PAGE_SIZE);
drivers/scsi/fnic/fnic_trace.c
612
fc_trace_max_entries = (fnic_fc_trace_max_pages * PAGE_SIZE)/
drivers/scsi/fnic/fnic_trace.c
615
(unsigned long)vcalloc(fnic_fc_trace_max_pages, PAGE_SIZE);
drivers/scsi/fnic/fnic_trace.c
708
fnic_fc_trace_max_pages * PAGE_SIZE);
drivers/scsi/fnic/fnic_trace.c
789
(fnic_fc_trace_max_pages * PAGE_SIZE * 3) - len,
drivers/scsi/fnic/fnic_trace.c
809
(fnic_fc_trace_max_pages * PAGE_SIZE * 3)
drivers/scsi/fnic/fnic_trace.c
813
(fnic_fc_trace_max_pages * PAGE_SIZE * 3) - len,
drivers/scsi/fnic/fnic_trace.c
845
int max_size = fnic_fc_trace_max_pages * PAGE_SIZE * 3;
drivers/scsi/g_NCR5380.c
705
.dma_boundary = PAGE_SIZE - 1,
drivers/scsi/gvp11.c
233
.dma_boundary = PAGE_SIZE - 1,
drivers/scsi/hisi_sas/hisi_sas_main.c
2295
s = max(lcm(max_command_entries_ru, sz_slot_buf_ru), PAGE_SIZE);
drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
2850
return scnprintf(buf, PAGE_SIZE, "%u\n", hisi_sas_intr_conv);
drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
2882
return scnprintf(buf, PAGE_SIZE, "%u\n",
drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
2921
return scnprintf(buf, PAGE_SIZE, "%u\n",
drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
2960
return scnprintf(buf, PAGE_SIZE, "%u\n",
drivers/scsi/hpsa.c
687
l = snprintf(buf, PAGE_SIZE, "N/A\n");
drivers/scsi/hpsa.c
695
l = snprintf(buf, PAGE_SIZE, "RAID %s\n", raid_label[rlevel]);
drivers/scsi/hpsa.c
769
return snprintf(buf, PAGE_SIZE, "0x%016llx\n", sas_address);
drivers/scsi/hpsa.c
835
PAGE_SIZE - output_len,
drivers/scsi/hpsa.c
843
PAGE_SIZE - output_len,
drivers/scsi/hpsa.c
856
PAGE_SIZE - output_len,
drivers/scsi/hpsa.c
863
PAGE_SIZE - output_len,
drivers/scsi/hpsa.c
868
PAGE_SIZE - output_len,
drivers/scsi/hpsa.c
874
PAGE_SIZE - output_len, "BOX: %hhu %s\n",
drivers/scsi/hpsa.c
878
PAGE_SIZE - output_len, "%s\n", active);
drivers/scsi/hptiop.c
1114
return snprintf(buf, PAGE_SIZE, "%s\n", driver_ver);
drivers/scsi/hptiop.c
1123
return snprintf(buf, PAGE_SIZE, "%d.%d.%d.%d\n",
drivers/scsi/ibmvscsi/ibmvfc.c
5841
queue->msg_token = dma_map_single(dev, queue->msgs.handle, PAGE_SIZE,
drivers/scsi/ibmvscsi/ibmvfc.c
5851
queue->size = PAGE_SIZE / fmt_size;
drivers/scsi/ibmvscsi/ibmvfc.c
5879
crq->msg_token, PAGE_SIZE);
drivers/scsi/ibmvscsi/ibmvfc.c
5930
rc = h_reg_sub_crq(vdev->unit_address, scrq->msg_token, PAGE_SIZE,
drivers/scsi/ibmvscsi/ibmvfc.c
6012
memset(scrq->msgs.crq, 0, PAGE_SIZE);
drivers/scsi/ibmvscsi/ibmvfc.c
6565
int min_max_sectors = PAGE_SIZE >> 9;
drivers/scsi/ibmvscsi/ibmvfc.c
715
memset(vhost->async_crq.msgs.async, 0, PAGE_SIZE);
drivers/scsi/ibmvscsi/ibmvfc.c
884
dma_unmap_single(dev, queue->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
drivers/scsi/ibmvscsi/ibmvfc.c
987
memset(crq->msgs.crq, 0, PAGE_SIZE);
drivers/scsi/ibmvscsi/ibmvfc.c
992
crq->msg_token, PAGE_SIZE);
drivers/scsi/ibmvscsi/ibmvscsi.c
1953
len = snprintf(buf, PAGE_SIZE, "%s\n",
drivers/scsi/ibmvscsi/ibmvscsi.c
1974
len = snprintf(buf, PAGE_SIZE, "%s\n",
drivers/scsi/ibmvscsi/ibmvscsi.c
1995
len = snprintf(buf, PAGE_SIZE, "%d\n",
drivers/scsi/ibmvscsi/ibmvscsi.c
2015
len = snprintf(buf, PAGE_SIZE, "%d\n",
drivers/scsi/ibmvscsi/ibmvscsi.c
2035
len = snprintf(buf, PAGE_SIZE, "%d\n",
drivers/scsi/ibmvscsi/ibmvscsi.c
298
memset(queue->msgs, 0x00, PAGE_SIZE);
drivers/scsi/ibmvscsi/ibmvscsi.c
306
queue->msg_token, PAGE_SIZE);
drivers/scsi/ibmvscsi/ibmvscsi.c
338
queue->size = PAGE_SIZE / sizeof(*queue->msgs);
drivers/scsi/ibmvscsi/ibmvscsi.c
352
queue->msg_token, PAGE_SIZE);
drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
1630
if ((olen < min_len) || (olen > PAGE_SIZE)) {
drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
278
bytes = vscsi->cmd_q.size * PAGE_SIZE;
drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
2968
bytes = vscsi->cmd_q.size * PAGE_SIZE;
drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
3014
PAGE_SIZE, DMA_BIDIRECTIONAL);
drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
3035
PAGE_SIZE, DMA_BIDIRECTIONAL);
drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
3501
vscsi->map_buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
3508
vscsi->map_ioba = dma_map_single(&vdev->dev, vscsi->map_buf, PAGE_SIZE,
drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
3517
(u64)vscsi->map_ioba | ((u64)PAGE_SIZE << 32), 0, 0, 0,
drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
3561
dma_unmap_single(&vdev->dev, vscsi->map_ioba, PAGE_SIZE,
drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
3600
dma_unmap_single(&vdev->dev, vscsi->map_ioba, PAGE_SIZE,
drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
3907
return scnprintf(page, PAGE_SIZE, "%s\n", IBMVSCSIS_VERSION);
drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
3947
.max_data_sg_nents = MAX_TXU / PAGE_SIZE,
drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
768
(u64)vscsi->map_ioba | ((u64)PAGE_SIZE << 32), 0, 0, 0,
drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
896
bytes = vscsi->cmd_q.size * PAGE_SIZE;
drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h
31
#define CRQ_PER_PAGE (PAGE_SIZE / sizeof(struct viosrp_crq))
drivers/scsi/ipr.c
2894
if (ioa_dump->page_offset >= PAGE_SIZE ||
drivers/scsi/ipr.c
2910
rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
drivers/scsi/ipr.c
3415
len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
drivers/scsi/ipr.c
3449
len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
drivers/scsi/ipr.c
3568
len = snprintf(buf, PAGE_SIZE, "offline\n");
drivers/scsi/ipr.c
3570
len = snprintf(buf, PAGE_SIZE, "online\n");
drivers/scsi/ipr.c
3687
len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->iopoll_weight);
drivers/scsi/ipr.c
3830
bsize_elem = PAGE_SIZE * (1 << sglist->order);
drivers/scsi/ipr.c
4076
len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->sis64);
drivers/scsi/ipr.c
4435
len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle);
drivers/scsi/ipr.c
4470
len = snprintf(buf, PAGE_SIZE, "%s\n",
drivers/scsi/ipr.c
4474
len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no,
drivers/scsi/ipr.c
4509
len = snprintf(buf, PAGE_SIZE, "0x%llx\n", be64_to_cpu(res->dev_id));
drivers/scsi/ipr.c
4511
len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->lun_wwn);
drivers/scsi/ipr.c
4546
len = snprintf(buf, PAGE_SIZE, "%x\n", res->type);
drivers/scsi/ipr.c
4581
len = snprintf(buf, PAGE_SIZE, "%d\n", res->raw_mode);
drivers/scsi/ipr.h
311
#define IPR_FMT2_MAX_NUM_DUMP_PAGES ((IPR_FMT2_MAX_IOA_DUMP_SIZE / PAGE_SIZE) + 1)
drivers/scsi/ipr.h
312
#define IPR_FMT3_MAX_NUM_DUMP_PAGES ((IPR_FMT3_MAX_IOA_DUMP_SIZE / PAGE_SIZE) + 1)
drivers/scsi/ips.c
1662
ha->flash_len = PAGE_SIZE << 7;
drivers/scsi/ips.c
6867
ioremap_ptr = ioremap(base, PAGE_SIZE);
drivers/scsi/ips.c
6916
PAGE_SIZE << 7, &ips_flashbusaddr, GFP_KERNEL);
drivers/scsi/ips.c
6976
if (ips_ioctlsize < PAGE_SIZE)
drivers/scsi/ips.c
6977
ips_ioctlsize = PAGE_SIZE;
drivers/scsi/iscsi_tcp.c
1041
.dma_boundary = PAGE_SIZE - 1,
drivers/scsi/libfc/fc_fcp.c
676
(size_t) (PAGE_SIZE - (off & ~PAGE_MASK)));
drivers/scsi/libfc/fc_fcp.c
685
fp_skb(fp)->truesize += PAGE_SIZE;
drivers/scsi/libfc/fc_libfc.c
131
(size_t)(PAGE_SIZE - (off & ~PAGE_MASK)));
drivers/scsi/libsas/sas_init.c
596
return scnprintf(buf, PAGE_SIZE, "%u\n", sha->event_thres);
drivers/scsi/lpfc/lpfc_attr.c
1016
return scnprintf(buf, PAGE_SIZE, "%s\n", phba->ModelDesc);
drivers/scsi/lpfc/lpfc_attr.c
1035
return scnprintf(buf, PAGE_SIZE, "%s\n", phba->ModelName);
drivers/scsi/lpfc/lpfc_attr.c
1054
return scnprintf(buf, PAGE_SIZE, "%s\n", phba->ProgramType);
drivers/scsi/lpfc/lpfc_attr.c
1073
return scnprintf(buf, PAGE_SIZE, "%s\n", phba->Port);
drivers/scsi/lpfc/lpfc_attr.c
1101
len = scnprintf(buf, PAGE_SIZE, "%s, sli-%d\n",
drivers/scsi/lpfc/lpfc_attr.c
1104
len = scnprintf(buf, PAGE_SIZE, "%s, sli-%d:%d:%x\n",
drivers/scsi/lpfc/lpfc_attr.c
1128
return scnprintf(buf, PAGE_SIZE, "%s %08x %08x\n", hdw,
drivers/scsi/lpfc/lpfc_attr.c
1150
return scnprintf(buf, PAGE_SIZE, "%s\n",
drivers/scsi/lpfc/lpfc_attr.c
1154
return scnprintf(buf, PAGE_SIZE, "%s\n", fwrev);
drivers/scsi/lpfc/lpfc_attr.c
1185
len += scnprintf(buf + len, PAGE_SIZE-len,
drivers/scsi/lpfc/lpfc_attr.c
1188
len += scnprintf(buf + len, PAGE_SIZE-len,
drivers/scsi/lpfc/lpfc_attr.c
1194
len += scnprintf(buf + len, PAGE_SIZE-len, "Link Up - ");
drivers/scsi/lpfc/lpfc_attr.c
1198
len += scnprintf(buf + len, PAGE_SIZE-len,
drivers/scsi/lpfc/lpfc_attr.c
1208
len += scnprintf(buf + len, PAGE_SIZE - len,
drivers/scsi/lpfc/lpfc_attr.c
1212
len += scnprintf(buf + len, PAGE_SIZE - len,
drivers/scsi/lpfc/lpfc_attr.c
1217
len += scnprintf(buf + len, PAGE_SIZE - len,
drivers/scsi/lpfc/lpfc_attr.c
1222
len += scnprintf(buf + len, PAGE_SIZE - len,
drivers/scsi/lpfc/lpfc_attr.c
1228
len += scnprintf(buf + len, PAGE_SIZE-len,
drivers/scsi/lpfc/lpfc_attr.c
1231
len += scnprintf(buf + len, PAGE_SIZE-len,
drivers/scsi/lpfc/lpfc_attr.c
1240
PAGE_SIZE - len,
drivers/scsi/lpfc/lpfc_attr.c
1244
PAGE_SIZE - len,
drivers/scsi/lpfc/lpfc_attr.c
1247
len += scnprintf(buf + len, PAGE_SIZE-len,
drivers/scsi/lpfc/lpfc_attr.c
1260
len += scnprintf(buf + len, PAGE_SIZE - len,
drivers/scsi/lpfc/lpfc_attr.c
1267
len += scnprintf(buf + len, PAGE_SIZE - len,
drivers/scsi/lpfc/lpfc_attr.c
1274
len += scnprintf(buf + len, PAGE_SIZE - len,
drivers/scsi/lpfc/lpfc_attr.c
1281
len += scnprintf(buf + len, PAGE_SIZE - len,
drivers/scsi/lpfc/lpfc_attr.c
1309
return scnprintf(buf, PAGE_SIZE, "fc\n");
drivers/scsi/lpfc/lpfc_attr.c
1313
return scnprintf(buf, PAGE_SIZE, "fcoe\n");
drivers/scsi/lpfc/lpfc_attr.c
1315
return scnprintf(buf, PAGE_SIZE, "fc\n");
drivers/scsi/lpfc/lpfc_attr.c
1317
return scnprintf(buf, PAGE_SIZE, "unknown\n");
drivers/scsi/lpfc/lpfc_attr.c
1337
return scnprintf(buf, PAGE_SIZE, "%d\n",
drivers/scsi/lpfc/lpfc_attr.c
1395
return scnprintf(buf, PAGE_SIZE, "%d\n",
drivers/scsi/lpfc/lpfc_attr.c
141
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
drivers/scsi/lpfc/lpfc_attr.c
161
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
drivers/scsi/lpfc/lpfc_attr.c
182
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
drivers/scsi/lpfc/lpfc_attr.c
1959
return scnprintf(buf, PAGE_SIZE, "%d\n", phba->nport_event_cnt);
drivers/scsi/lpfc/lpfc_attr.c
199
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
drivers/scsi/lpfc/lpfc_attr.c
2046
len = scnprintf(buf, PAGE_SIZE - len,
drivers/scsi/lpfc/lpfc_attr.c
2053
len = scnprintf(buf, PAGE_SIZE - len, "SFP info NA:\n");
drivers/scsi/lpfc/lpfc_attr.c
2059
len = scnprintf(buf, PAGE_SIZE - len, "VendorName:\t%s\n", chbuf);
drivers/scsi/lpfc/lpfc_attr.c
2060
len += scnprintf(buf + len, PAGE_SIZE - len,
drivers/scsi/lpfc/lpfc_attr.c
2066
len += scnprintf(buf + len, PAGE_SIZE - len, "VendorPN:\t%s\n", chbuf);
drivers/scsi/lpfc/lpfc_attr.c
2068
len += scnprintf(buf + len, PAGE_SIZE - len, "VendorSN:\t%s\n", chbuf);
drivers/scsi/lpfc/lpfc_attr.c
2070
len += scnprintf(buf + len, PAGE_SIZE - len, "VendorRev:\t%s\n", chbuf);
drivers/scsi/lpfc/lpfc_attr.c
2072
len += scnprintf(buf + len, PAGE_SIZE - len, "DateCode:\t%s\n", chbuf);
drivers/scsi/lpfc/lpfc_attr.c
2073
len += scnprintf(buf + len, PAGE_SIZE - len, "Identifier:\t%xh\n",
drivers/scsi/lpfc/lpfc_attr.c
2075
len += scnprintf(buf + len, PAGE_SIZE - len, "ExtIdentifier:\t%xh\n",
drivers/scsi/lpfc/lpfc_attr.c
2077
len += scnprintf(buf + len, PAGE_SIZE - len, "Connector:\t%xh\n",
drivers/scsi/lpfc/lpfc_attr.c
2082
len += scnprintf(buf + len, PAGE_SIZE - len, "Wavelength:\t%d nm\n",
drivers/scsi/lpfc/lpfc_attr.c
2087
len += scnprintf(buf + len, PAGE_SIZE - len, "Speeds: \t");
drivers/scsi/lpfc/lpfc_attr.c
2089
len += scnprintf(buf + len, PAGE_SIZE - len, "Unknown\n");
drivers/scsi/lpfc/lpfc_attr.c
2092
len += scnprintf(buf + len, PAGE_SIZE - len, "1 ");
drivers/scsi/lpfc/lpfc_attr.c
2094
len += scnprintf(buf + len, PAGE_SIZE - len, "2 ");
drivers/scsi/lpfc/lpfc_attr.c
2096
len += scnprintf(buf + len, PAGE_SIZE - len, "4 ");
drivers/scsi/lpfc/lpfc_attr.c
2098
len += scnprintf(buf + len, PAGE_SIZE - len, "8 ");
drivers/scsi/lpfc/lpfc_attr.c
2100
len += scnprintf(buf + len, PAGE_SIZE - len, "16 ");
drivers/scsi/lpfc/lpfc_attr.c
2102
len += scnprintf(buf + len, PAGE_SIZE - len, "32 ");
drivers/scsi/lpfc/lpfc_attr.c
2104
len += scnprintf(buf + len, PAGE_SIZE - len, "64 ");
drivers/scsi/lpfc/lpfc_attr.c
2105
len += scnprintf(buf + len, PAGE_SIZE - len, "GB\n");
drivers/scsi/lpfc/lpfc_attr.c
2118
len += scnprintf(buf + len, PAGE_SIZE - len,
drivers/scsi/lpfc/lpfc_attr.c
2120
len += scnprintf(buf + len, PAGE_SIZE - len, "Vcc:\t\tx%04x V\n", vcc);
drivers/scsi/lpfc/lpfc_attr.c
2121
len += scnprintf(buf + len, PAGE_SIZE - len,
drivers/scsi/lpfc/lpfc_attr.c
2123
len += scnprintf(buf + len, PAGE_SIZE - len, "TxPower:\tx%04x mW\n",
drivers/scsi/lpfc/lpfc_attr.c
2125
len += scnprintf(buf + len, PAGE_SIZE - len, "RxPower:\tx%04x mW\n",
drivers/scsi/lpfc/lpfc_attr.c
2158
return scnprintf(buf, PAGE_SIZE, "%s\n", state);
drivers/scsi/lpfc/lpfc_attr.c
220
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
drivers/scsi/lpfc/lpfc_attr.c
237
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
drivers/scsi/lpfc/lpfc_attr.c
2425
return scnprintf(buf, PAGE_SIZE, "%d\n", cnt);
drivers/scsi/lpfc/lpfc_attr.c
2426
return scnprintf(buf, PAGE_SIZE, "Unknown\n");
drivers/scsi/lpfc/lpfc_attr.c
2457
return scnprintf(buf, PAGE_SIZE, "%d\n",
drivers/scsi/lpfc/lpfc_attr.c
2461
return scnprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt));
drivers/scsi/lpfc/lpfc_attr.c
2463
return scnprintf(buf, PAGE_SIZE, "Unknown\n");
drivers/scsi/lpfc/lpfc_attr.c
2490
return scnprintf(buf, PAGE_SIZE, "%d\n", cnt);
drivers/scsi/lpfc/lpfc_attr.c
2491
return scnprintf(buf, PAGE_SIZE, "Unknown\n");
drivers/scsi/lpfc/lpfc_attr.c
2522
return scnprintf(buf, PAGE_SIZE, "%d\n",
drivers/scsi/lpfc/lpfc_attr.c
2526
return scnprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt));
drivers/scsi/lpfc/lpfc_attr.c
2528
return scnprintf(buf, PAGE_SIZE, "Unknown\n");
drivers/scsi/lpfc/lpfc_attr.c
254
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
drivers/scsi/lpfc/lpfc_attr.c
2555
return scnprintf(buf, PAGE_SIZE, "%d\n", cnt);
drivers/scsi/lpfc/lpfc_attr.c
2556
return scnprintf(buf, PAGE_SIZE, "Unknown\n");
drivers/scsi/lpfc/lpfc_attr.c
2587
return scnprintf(buf, PAGE_SIZE, "%d\n",
drivers/scsi/lpfc/lpfc_attr.c
2591
return scnprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt));
drivers/scsi/lpfc/lpfc_attr.c
2593
return scnprintf(buf, PAGE_SIZE, "Unknown\n");
drivers/scsi/lpfc/lpfc_attr.c
2618
return scnprintf(buf, PAGE_SIZE, "NPIV Not Supported\n");
drivers/scsi/lpfc/lpfc_attr.c
2620
return scnprintf(buf, PAGE_SIZE, "NPIV Physical\n");
drivers/scsi/lpfc/lpfc_attr.c
2621
return scnprintf(buf, PAGE_SIZE, "NPIV Virtual (VPI %d)\n", vport->vpi);
drivers/scsi/lpfc/lpfc_attr.c
2643
return scnprintf(buf, PAGE_SIZE, "%#x\n", phba->cfg_poll);
drivers/scsi/lpfc/lpfc_attr.c
269
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
drivers/scsi/lpfc/lpfc_attr.c
2754
return scnprintf(buf, PAGE_SIZE, "%d\n", max_nr_virtfn);
drivers/scsi/lpfc/lpfc_attr.c
278
strlcat(buf, tmp, PAGE_SIZE);
drivers/scsi/lpfc/lpfc_attr.c
2809
return scnprintf(buf, PAGE_SIZE, "%d\n",\
drivers/scsi/lpfc/lpfc_attr.c
281
len = strnlen(buf, PAGE_SIZE);
drivers/scsi/lpfc/lpfc_attr.c
283
if (unlikely(len >= (PAGE_SIZE - 1))) {
drivers/scsi/lpfc/lpfc_attr.c
2837
return scnprintf(buf, PAGE_SIZE, "%#x\n",\
drivers/scsi/lpfc/lpfc_attr.c
287
PAGE_SIZE);
drivers/scsi/lpfc/lpfc_attr.c
288
strscpy(buf + PAGE_SIZE - 1 - sizeof(LPFC_INFO_MORE_STR),
drivers/scsi/lpfc/lpfc_attr.c
2973
return scnprintf(buf, PAGE_SIZE, "%d\n", vport->cfg_##attr);\
drivers/scsi/lpfc/lpfc_attr.c
2998
return scnprintf(buf, PAGE_SIZE, "%#x\n", vport->cfg_##attr);\
drivers/scsi/lpfc/lpfc_attr.c
311
len += scnprintf(buf + len, PAGE_SIZE - len, "Key 'vmid':\n");
drivers/scsi/lpfc/lpfc_attr.c
315
len += scnprintf(buf + len, PAGE_SIZE - len,
drivers/scsi/lpfc/lpfc_attr.c
318
len += scnprintf(buf + len, PAGE_SIZE - len,
drivers/scsi/lpfc/lpfc_attr.c
3210
return scnprintf(buf, PAGE_SIZE, "0x%llx\n",
drivers/scsi/lpfc/lpfc_attr.c
325
len += scnprintf(buf + len, PAGE_SIZE - len,
drivers/scsi/lpfc/lpfc_attr.c
3278
return scnprintf(buf, PAGE_SIZE, "%d\n", phba->cfg_oas_priority);
drivers/scsi/lpfc/lpfc_attr.c
329
len += scnprintf(buf + len, PAGE_SIZE - len,
drivers/scsi/lpfc/lpfc_attr.c
3341
return scnprintf(buf, PAGE_SIZE, "0x%llx\n",
drivers/scsi/lpfc/lpfc_attr.c
3412
return scnprintf(buf, PAGE_SIZE, "%d\n", phba->cfg_oas_lun_state);
drivers/scsi/lpfc/lpfc_attr.c
347
len += scnprintf(buf + len, PAGE_SIZE - len,
drivers/scsi/lpfc/lpfc_attr.c
3476
return scnprintf(buf, PAGE_SIZE, "%d\n", phba->cfg_oas_lun_status);
drivers/scsi/lpfc/lpfc_attr.c
3632
len += scnprintf(buf + len, PAGE_SIZE-len, "0x%llx", oas_lun);
drivers/scsi/lpfc/lpfc_attr.c
370
len += scnprintf(buf + len, PAGE_SIZE - len,
drivers/scsi/lpfc/lpfc_attr.c
3766
return scnprintf(buf, PAGE_SIZE, "%d\n",
drivers/scsi/lpfc/lpfc_attr.c
377
len += scnprintf(buf + len, PAGE_SIZE - len, "UUID:\n");
drivers/scsi/lpfc/lpfc_attr.c
3778
return scnprintf(buf, PAGE_SIZE, "%d\n",
drivers/scsi/lpfc/lpfc_attr.c
378
len += scnprintf(buf + len, PAGE_SIZE - len, "%s\n", hxstr);
drivers/scsi/lpfc/lpfc_attr.c
3799
return scnprintf(buf, PAGE_SIZE, "%d\n", phba->iocb_max);
drivers/scsi/lpfc/lpfc_attr.c
380
len += scnprintf(buf + len, PAGE_SIZE - len, "String (%s)\n",
drivers/scsi/lpfc/lpfc_attr.c
3811
return scnprintf(buf, PAGE_SIZE, "%d\n",
drivers/scsi/lpfc/lpfc_attr.c
3825
return scnprintf(buf, PAGE_SIZE, "%d\n",
drivers/scsi/lpfc/lpfc_attr.c
384
len += scnprintf(buf + len, PAGE_SIZE - len,
drivers/scsi/lpfc/lpfc_attr.c
3858
return scnprintf(buf, PAGE_SIZE, "%d\n", vport->cfg_devloss_tmo);
drivers/scsi/lpfc/lpfc_attr.c
388
len += scnprintf(buf + len, PAGE_SIZE - len,
drivers/scsi/lpfc/lpfc_attr.c
411
len += scnprintf(buf + len, PAGE_SIZE - len,
drivers/scsi/lpfc/lpfc_attr.c
418
if (len >= PAGE_SIZE)
drivers/scsi/lpfc/lpfc_attr.c
438
return scnprintf(buf, PAGE_SIZE, LPFC_MODULE_DESC "\n");
drivers/scsi/lpfc/lpfc_attr.c
458
return scnprintf(buf, PAGE_SIZE, "1\n");
drivers/scsi/lpfc/lpfc_attr.c
4597
return scnprintf(buf, PAGE_SIZE, "%d\n", lpfc_aer_support);
drivers/scsi/lpfc/lpfc_attr.c
460
return scnprintf(buf, PAGE_SIZE, "0\n");
drivers/scsi/lpfc/lpfc_attr.c
486
len = scnprintf(buf, PAGE_SIZE, "NVME Disabled\n");
drivers/scsi/lpfc/lpfc_attr.c
491
len = scnprintf(buf, PAGE_SIZE,
drivers/scsi/lpfc/lpfc_attr.c
504
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
drivers/scsi/lpfc/lpfc_attr.c
514
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
drivers/scsi/lpfc/lpfc_attr.c
5168
len += scnprintf(buf + len, PAGE_SIZE-len,
drivers/scsi/lpfc/lpfc_attr.c
517
if (strlcat(buf, "\nNVME Target: Statistics\n", PAGE_SIZE)
drivers/scsi/lpfc/lpfc_attr.c
5173
len += scnprintf(buf + len, PAGE_SIZE-len,
drivers/scsi/lpfc/lpfc_attr.c
518
>= PAGE_SIZE)
drivers/scsi/lpfc/lpfc_attr.c
5187
len += scnprintf(buf + len, PAGE_SIZE - len,
drivers/scsi/lpfc/lpfc_attr.c
5193
buf + len, PAGE_SIZE - len,
drivers/scsi/lpfc/lpfc_attr.c
5202
buf + len, PAGE_SIZE - len,
drivers/scsi/lpfc/lpfc_attr.c
5213
buf + len, PAGE_SIZE - len,
drivers/scsi/lpfc/lpfc_attr.c
5224
buf + len, PAGE_SIZE - len,
drivers/scsi/lpfc/lpfc_attr.c
5240
(len >= (PAGE_SIZE - 64))) {
drivers/scsi/lpfc/lpfc_attr.c
5242
PAGE_SIZE - len, "more...\n");
drivers/scsi/lpfc/lpfc_attr.c
527
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
drivers/scsi/lpfc/lpfc_attr.c
536
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
drivers/scsi/lpfc/lpfc_attr.c
545
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
drivers/scsi/lpfc/lpfc_attr.c
553
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
drivers/scsi/lpfc/lpfc_attr.c
563
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
drivers/scsi/lpfc/lpfc_attr.c
572
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
drivers/scsi/lpfc/lpfc_attr.c
5807
return scnprintf(buf, PAGE_SIZE, "%u\n", phba->cfg_irq_chann);
drivers/scsi/lpfc/lpfc_attr.c
584
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
drivers/scsi/lpfc/lpfc_attr.c
592
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
drivers/scsi/lpfc/lpfc_attr.c
5937
len = scnprintf(buf, PAGE_SIZE, "SGL sz: %d total SGEs: %d\n",
drivers/scsi/lpfc/lpfc_attr.c
5940
len += scnprintf(buf + len, PAGE_SIZE - len,
drivers/scsi/lpfc/lpfc_attr.c
600
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
drivers/scsi/lpfc/lpfc_attr.c
607
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
drivers/scsi/lpfc/lpfc_attr.c
616
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
drivers/scsi/lpfc/lpfc_attr.c
624
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
drivers/scsi/lpfc/lpfc_attr.c
639
strlcat(buf, tmp, PAGE_SIZE);
drivers/scsi/lpfc/lpfc_attr.c
645
len = scnprintf(buf, PAGE_SIZE,
drivers/scsi/lpfc/lpfc_attr.c
651
if (strlcat(buf, "\nNVME Initiator Enabled\n", PAGE_SIZE) >= PAGE_SIZE)
drivers/scsi/lpfc/lpfc_attr.c
660
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
drivers/scsi/lpfc/lpfc_attr.c
676
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
drivers/scsi/lpfc/lpfc_attr.c
705
if (strlcat(buf, "NVME RPORT ", PAGE_SIZE) >= PAGE_SIZE)
drivers/scsi/lpfc/lpfc_attr.c
708
if (strlcat(buf, " ", PAGE_SIZE) >= PAGE_SIZE)
drivers/scsi/lpfc/lpfc_attr.c
714
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
drivers/scsi/lpfc/lpfc_attr.c
719
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
drivers/scsi/lpfc/lpfc_attr.c
724
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
drivers/scsi/lpfc/lpfc_attr.c
729
if (strlcat(buf, "INITIATOR ", PAGE_SIZE) >= PAGE_SIZE)
drivers/scsi/lpfc/lpfc_attr.c
733
if (strlcat(buf, "TARGET ", PAGE_SIZE) >= PAGE_SIZE)
drivers/scsi/lpfc/lpfc_attr.c
737
if (strlcat(buf, "DISCSRVC ", PAGE_SIZE) >= PAGE_SIZE)
drivers/scsi/lpfc/lpfc_attr.c
745
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
drivers/scsi/lpfc/lpfc_attr.c
750
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
drivers/scsi/lpfc/lpfc_attr.c
758
if (strlcat(buf, "\nNVME Statistics\n", PAGE_SIZE) >= PAGE_SIZE)
drivers/scsi/lpfc/lpfc_attr.c
766
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
drivers/scsi/lpfc/lpfc_attr.c
774
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
drivers/scsi/lpfc/lpfc_attr.c
792
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
drivers/scsi/lpfc/lpfc_attr.c
804
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
drivers/scsi/lpfc/lpfc_attr.c
811
strlcat(buf, tmp, PAGE_SIZE);
drivers/scsi/lpfc/lpfc_attr.c
820
len = strnlen(buf, PAGE_SIZE);
drivers/scsi/lpfc/lpfc_attr.c
822
if (unlikely(len >= (PAGE_SIZE - 1))) {
drivers/scsi/lpfc/lpfc_attr.c
826
PAGE_SIZE);
drivers/scsi/lpfc/lpfc_attr.c
827
strscpy(buf + PAGE_SIZE - 1 - sizeof(LPFC_INFO_MORE_STR),
drivers/scsi/lpfc/lpfc_attr.c
853
scnprintf(buf, PAGE_SIZE, "SCSI HDWQ Statistics\n");
drivers/scsi/lpfc/lpfc_attr.c
868
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
drivers/scsi/lpfc/lpfc_attr.c
873
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
drivers/scsi/lpfc/lpfc_attr.c
878
strlcat(buf, tmp, PAGE_SIZE);
drivers/scsi/lpfc/lpfc_attr.c
881
len = strnlen(buf, PAGE_SIZE);
drivers/scsi/lpfc/lpfc_attr.c
896
return scnprintf(buf, PAGE_SIZE,
drivers/scsi/lpfc/lpfc_attr.c
899
return scnprintf(buf, PAGE_SIZE,
drivers/scsi/lpfc/lpfc_attr.c
902
return scnprintf(buf, PAGE_SIZE,
drivers/scsi/lpfc/lpfc_attr.c
914
return scnprintf(buf, PAGE_SIZE, "%llu\n",
drivers/scsi/lpfc/lpfc_attr.c
926
return scnprintf(buf, PAGE_SIZE, "%llu\n",
drivers/scsi/lpfc/lpfc_attr.c
938
return scnprintf(buf, PAGE_SIZE, "%llu\n",
drivers/scsi/lpfc/lpfc_attr.c
956
return scnprintf(buf, PAGE_SIZE, "%s\n", lpfc_info(host));
drivers/scsi/lpfc/lpfc_attr.c
975
return scnprintf(buf, PAGE_SIZE, "%s\n", phba->SerialNumber);
drivers/scsi/lpfc/lpfc_attr.c
997
return scnprintf(buf, PAGE_SIZE, "%d\n", phba->temp_sensor_support);
drivers/scsi/lpfc/lpfc_debugfs.c
1201
len += scnprintf(buf + len, PAGE_SIZE - len,
drivers/scsi/lpfc/lpfc_debugfs.c
1205
len += scnprintf(buf + len, PAGE_SIZE - len,
drivers/scsi/lpfc/lpfc_debugfs.c
1209
len += scnprintf(buf + len, PAGE_SIZE - len,
drivers/scsi/lpfc/lpfc_debugfs.c
1404
len += scnprintf(buf + len, PAGE_SIZE - len,
drivers/scsi/lpfc/lpfc_debugfs.c
1412
buf + len, PAGE_SIZE - len,
drivers/scsi/lpfc/lpfc_debugfs.c
1416
buf + len, PAGE_SIZE - len,
drivers/scsi/lpfc/lpfc_debugfs.c
1423
buf + len, PAGE_SIZE - len,
drivers/scsi/lpfc/lpfc_debugfs.c
1427
buf + len, PAGE_SIZE - len,
drivers/scsi/lpfc/lpfc_debugfs.c
1434
buf + len, PAGE_SIZE - len,
drivers/scsi/lpfc/lpfc_debugfs.c
1438
buf + len, PAGE_SIZE - len,
drivers/scsi/lpfc/lpfc_debugfs.c
1445
buf + len, PAGE_SIZE - len,
drivers/scsi/lpfc/lpfc_debugfs.c
1449
buf + len, PAGE_SIZE - len,
drivers/scsi/lpfc/lpfc_debugfs.c
1456
buf + len, PAGE_SIZE - len,
drivers/scsi/lpfc/lpfc_debugfs.c
1467
len += scnprintf(buf + len, PAGE_SIZE-len,
drivers/scsi/lpfc/lpfc_debugfs.c
1475
len += scnprintf(buf + len, PAGE_SIZE-len,
drivers/scsi/lpfc/lpfc_debugfs.c
1478
len += scnprintf(buf + len, PAGE_SIZE-len,
drivers/scsi/lpfc/lpfc_debugfs.c
1484
len += scnprintf(buf + len, PAGE_SIZE-len,
drivers/scsi/lpfc/lpfc_debugfs.c
1487
len += scnprintf(buf + len, PAGE_SIZE-len,
drivers/scsi/lpfc/lpfc_debugfs.c
1493
len += scnprintf(buf + len, PAGE_SIZE-len,
drivers/scsi/lpfc/lpfc_debugfs.c
1496
len += scnprintf(buf + len, PAGE_SIZE-len,
drivers/scsi/lpfc/lpfc_debugfs.c
1502
len += scnprintf(buf + len, PAGE_SIZE-len,
drivers/scsi/lpfc/lpfc_debugfs.c
1505
len += scnprintf(buf + len, PAGE_SIZE-len,
drivers/scsi/lpfc/lpfc_debugfs.c
1511
len += scnprintf(buf + len, PAGE_SIZE-len,
drivers/scsi/lpfc/lpfc_debugfs.c
1514
len += scnprintf(buf + len, PAGE_SIZE-len,
drivers/scsi/lpfc/lpfc_debugfs.c
1522
len += scnprintf(buf + len, PAGE_SIZE-len,
drivers/scsi/lpfc/lpfc_debugfs.c
1525
len += scnprintf(buf + len, PAGE_SIZE-len,
drivers/scsi/lpfc/lpfc_debugfs.c
1535
len += scnprintf(buf + len, PAGE_SIZE-len,
drivers/scsi/lpfc/lpfc_debugfs.c
1538
len += scnprintf(buf + len, PAGE_SIZE-len,
drivers/scsi/lpfc/lpfc_debugfs.c
1544
len += scnprintf(buf + len, PAGE_SIZE-len,
drivers/scsi/lpfc/lpfc_debugfs.c
1547
len += scnprintf(buf + len, PAGE_SIZE-len,
drivers/scsi/lpfc/lpfc_debugfs.c
1553
len += scnprintf(buf + len, PAGE_SIZE-len,
drivers/scsi/lpfc/lpfc_debugfs.c
1556
len += scnprintf(buf + len, PAGE_SIZE-len,
drivers/scsi/lpfc/lpfc_debugfs.c
1562
len += scnprintf(buf + len, PAGE_SIZE-len,
drivers/scsi/lpfc/lpfc_debugfs.c
1565
len += scnprintf(buf + len, PAGE_SIZE-len,
drivers/scsi/lpfc/lpfc_debugfs.c
1571
len += scnprintf(buf + len, PAGE_SIZE-len,
drivers/scsi/lpfc/lpfc_debugfs.c
1574
len += scnprintf(buf + len, PAGE_SIZE-len,
drivers/scsi/lpfc/lpfc_sli.c
17395
(PAGE_SIZE/SLI4_PAGE_SIZE));
drivers/scsi/lpfc/lpfc_sli.c
17594
(PAGE_SIZE/SLI4_PAGE_SIZE));
drivers/scsi/mac_esp.c
311
host->dma_boundary = PAGE_SIZE - 1;
drivers/scsi/mac_scsi.c
437
.dma_boundary = PAGE_SIZE - 1,
drivers/scsi/megaraid/megaraid_mbox.c
3771
PAGE_SIZE, &raid_dev->sysfs_buffer_dma, GFP_KERNEL);
drivers/scsi/megaraid/megaraid_mbox.c
3808
dma_free_coherent(&adapter->pdev->dev, PAGE_SIZE,
drivers/scsi/megaraid/megaraid_sas_base.c
3052
PAGE_SIZE - bytes_wrote,
drivers/scsi/megaraid/megaraid_sas_base.c
3322
size = (size >= PAGE_SIZE) ? (PAGE_SIZE - 1) : size;
drivers/scsi/megaraid/megaraid_sas_base.c
3340
return snprintf(buf, PAGE_SIZE, "%ld\n", (unsigned long)
drivers/scsi/megaraid/megaraid_sas_base.c
3341
((instance->fw_crash_buffer_size) * 1024 * 1024)/PAGE_SIZE);
drivers/scsi/megaraid/megaraid_sas_base.c
3386
return snprintf(buf, PAGE_SIZE, "%d\n", instance->fw_crash_state);
drivers/scsi/megaraid/megaraid_sas_base.c
3393
return snprintf(buf, PAGE_SIZE, "%ld\n", (unsigned long)PAGE_SIZE - 1);
drivers/scsi/megaraid/megaraid_sas_base.c
3403
return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&instance->ldio_outstanding));
drivers/scsi/megaraid/megaraid_sas_base.c
3413
return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&instance->fw_outstanding));
drivers/scsi/megaraid/megaraid_sas_base.c
3423
return snprintf(buf, PAGE_SIZE, "%d\n", instance->enable_sdev_max_qd);
drivers/scsi/megaraid/megaraid_sas_base.c
3477
return snprintf(buf, PAGE_SIZE, "%ld\n",
drivers/scsi/mpi3mr/mpi3mr_app.c
3277
return snprintf(buf, PAGE_SIZE, "%llu\n", mrioc->fwfault_counter);
drivers/scsi/mpt3sas/mpt3sas_ctl.c
3152
return snprintf(buf, PAGE_SIZE, "%02d.%02d.%02d.%02d\n",
drivers/scsi/mpt3sas/mpt3sas_ctl.c
3177
return snprintf(buf, PAGE_SIZE, "%02d.%02d.%02d.%02d\n",
drivers/scsi/mpt3sas/mpt3sas_ctl.c
3200
return snprintf(buf, PAGE_SIZE, "%03x.%02x\n",
drivers/scsi/mpt3sas/mpt3sas_ctl.c
3239
return snprintf(buf, PAGE_SIZE, "%08xh\n",
drivers/scsi/mpt3sas/mpt3sas_ctl.c
3259
return snprintf(buf, PAGE_SIZE, "%08xh\n",
drivers/scsi/mpt3sas/mpt3sas_ctl.c
3339
return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->io_missing_delay);
drivers/scsi/mpt3sas/mpt3sas_ctl.c
3361
return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->device_missing_delay);
drivers/scsi/mpt3sas/mpt3sas_ctl.c
3382
return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->facts.RequestCredit);
drivers/scsi/mpt3sas/mpt3sas_ctl.c
3404
return snprintf(buf, PAGE_SIZE, "0x%016llx\n",
drivers/scsi/mpt3sas/mpt3sas_ctl.c
3424
return snprintf(buf, PAGE_SIZE, "%08xh\n", ioc->logging_level);
drivers/scsi/mpt3sas/mpt3sas_ctl.c
3460
return snprintf(buf, PAGE_SIZE, "%d\n", ioc->fwfault_debug);
drivers/scsi/mpt3sas/mpt3sas_ctl.c
3497
return snprintf(buf, PAGE_SIZE, "%d\n", ioc->ioc_reset_count);
drivers/scsi/mpt3sas/mpt3sas_ctl.c
3525
return snprintf(buf, PAGE_SIZE, "%d\n", reply_queue_count);
drivers/scsi/mpt3sas/mpt3sas_ctl.c
3590
rc = snprintf(buf, PAGE_SIZE, "%d\n", (backup_rail_monitor_status & 1));
drivers/scsi/mpt3sas/mpt3sas_ctl.c
3647
return snprintf(buf, PAGE_SIZE, "%d\n", size);
drivers/scsi/mpt3sas/mpt3sas_ctl.c
3689
size = (size >= PAGE_SIZE) ? (PAGE_SIZE - 1) : size;
drivers/scsi/mpt3sas/mpt3sas_ctl.c
3734
return snprintf(buf, PAGE_SIZE, "off\n");
drivers/scsi/mpt3sas/mpt3sas_ctl.c
3737
return snprintf(buf, PAGE_SIZE, "release\n");
drivers/scsi/mpt3sas/mpt3sas_ctl.c
3739
return snprintf(buf, PAGE_SIZE, "post\n");
drivers/scsi/mpt3sas/mpt3sas_ctl.c
4151
return snprintf(buf, PAGE_SIZE, "0x%08x\n", ioc->drv_support_bitmap);
drivers/scsi/mpt3sas/mpt3sas_ctl.c
4171
return snprintf(buf, PAGE_SIZE, "%d\n", ioc->enable_sdev_max_qd);
drivers/scsi/mpt3sas/mpt3sas_ctl.c
4319
return snprintf(buf, PAGE_SIZE, "0x%016llx\n",
drivers/scsi/mpt3sas/mpt3sas_ctl.c
4341
return snprintf(buf, PAGE_SIZE, "0x%04x\n",
drivers/scsi/mpt3sas/mpt3sas_ctl.c
4379
return snprintf(buf, PAGE_SIZE, "%d\n",
drivers/scsi/mvumi.c
2180
.dma_boundary = PAGE_SIZE - 1,
drivers/scsi/myrb.c
3432
if (mmio_size < PAGE_SIZE)
drivers/scsi/myrb.c
3433
mmio_size = PAGE_SIZE;
drivers/scsi/myrs.c
2308
if (mmio_size < PAGE_SIZE)
drivers/scsi/myrs.c
2309
mmio_size = PAGE_SIZE;
drivers/scsi/ncr53c8xx.c
187
#if PAGE_SIZE >= 8192
drivers/scsi/ncr53c8xx.c
235
if (size > (PAGE_SIZE << MEMO_PAGE_ORDER))
drivers/scsi/ncr53c8xx.c
245
if (s == (PAGE_SIZE << MEMO_PAGE_ORDER)) {
drivers/scsi/ncr53c8xx.c
282
if (size > (PAGE_SIZE << MEMO_PAGE_ORDER))
drivers/scsi/ncr53c8xx.c
294
if (s == (PAGE_SIZE << MEMO_PAGE_ORDER)) {
drivers/scsi/ncr53c8xx.c
385
PAGE_SIZE<<MEMO_PAGE_ORDER,
drivers/scsi/ncr53c8xx.c
413
dma_free_coherent(mp->bush, PAGE_SIZE<<MEMO_PAGE_ORDER,
drivers/scsi/nsp32.c
273
.dma_boundary = PAGE_SIZE - 1,
drivers/scsi/pcmcia/nsp_cs.c
90
.dma_boundary = PAGE_SIZE - 1,
drivers/scsi/pcmcia/nsp_cs.c
934
scsi_get_resid(SCpnt) <= PAGE_SIZE ) {
drivers/scsi/pcmcia/qlogic_stub.c
78
.dma_boundary = PAGE_SIZE - 1,
drivers/scsi/pm8001/pm80xx_hwi.c
407
PAGE_SIZE, "Not supported for SPC controller");
drivers/scsi/pm8001/pm80xx_hwi.c
483
buf_copy += snprintf(buf_copy, PAGE_SIZE, "%08x ", 0xFFFFFFFF);
drivers/scsi/pm8001/pm80xx_hwi.c
488
buf_copy += snprintf(buf_copy, PAGE_SIZE, "%08x ", 2);
drivers/scsi/pm8001/pm80xx_hwi.c
492
buf_copy += snprintf(buf_copy, PAGE_SIZE, "%08x ", 4);
drivers/scsi/pm8001/pm80xx_hwi.c
501
buf_copy += snprintf(buf_copy, PAGE_SIZE,
drivers/scsi/pmcraid.c
3544
return snprintf(buf, PAGE_SIZE, "%d\n", pinstance->current_log_level);
drivers/scsi/pmcraid.c
3605
return snprintf(buf, PAGE_SIZE, "version: %s\n",
drivers/scsi/pmcraid.c
3638
return snprintf(buf, PAGE_SIZE,
drivers/scsi/qedf/qedf_attr.c
41
return scnprintf(buf, PAGE_SIZE, "%pM\n", fcoe_mac);
drivers/scsi/qedf/qedf_attr.c
57
return scnprintf(buf, PAGE_SIZE, "%d\n", fka_period);
drivers/scsi/qedf/qedf_dbg.h
62
#define QEDF_DEBUGFS_LOG_LEN (2 * PAGE_SIZE)
drivers/scsi/qedf/qedf_io.c
707
io_req->task_params->rx_io_size = PAGE_SIZE;
drivers/scsi/qedf/qedf_io.c
743
rx_sgl_task_params.total_buffer_size = PAGE_SIZE;
drivers/scsi/qedf/qedf_main.c
3100
PAGE_SIZE) * sizeof(void *);
drivers/scsi/qla2xxx/qla_attr.c
1078
return scnprintf(buf, PAGE_SIZE, "%s\n",
drivers/scsi/qla2xxx/qla_attr.c
1091
return scnprintf(buf, PAGE_SIZE, "%s\n",
drivers/scsi/qla2xxx/qla_attr.c
1094
qla2xxx_get_vpd_field(vha, "SN", buf, PAGE_SIZE - 1);
drivers/scsi/qla2xxx/qla_attr.c
1099
return scnprintf(buf, PAGE_SIZE, "%c%05d\n", 'A' + sn / 100000,
drivers/scsi/qla2xxx/qla_attr.c
1109
return scnprintf(buf, PAGE_SIZE, "ISP%04X\n", vha->hw->pdev->device);
drivers/scsi/qla2xxx/qla_attr.c
1120
return scnprintf(buf, PAGE_SIZE, "%s\n",
drivers/scsi/qla2xxx/qla_attr.c
1123
return scnprintf(buf, PAGE_SIZE, "%04x %04x %04x %04x\n",
drivers/scsi/qla2xxx/qla_attr.c
1134
return scnprintf(buf, PAGE_SIZE, "%s\n", vha->hw->model_number);
drivers/scsi/qla2xxx/qla_attr.c
1143
return scnprintf(buf, PAGE_SIZE, "%s\n", vha->hw->model_desc);
drivers/scsi/qla2xxx/qla_attr.c
1153
return scnprintf(buf, PAGE_SIZE, "%s\n",
drivers/scsi/qla2xxx/qla_attr.c
1169
len = scnprintf(buf, PAGE_SIZE, "Link Down\n");
drivers/scsi/qla2xxx/qla_attr.c
1172
len = scnprintf(buf, PAGE_SIZE, "Unknown Link State\n");
drivers/scsi/qla2xxx/qla_attr.c
1174
len = scnprintf(buf, PAGE_SIZE, "Link Up - ");
drivers/scsi/qla2xxx/qla_attr.c
1178
len += scnprintf(buf + len, PAGE_SIZE-len, "Loop\n");
drivers/scsi/qla2xxx/qla_attr.c
1181
len += scnprintf(buf + len, PAGE_SIZE-len, "FL_Port\n");
drivers/scsi/qla2xxx/qla_attr.c
1184
len += scnprintf(buf + len, PAGE_SIZE-len,
drivers/scsi/qla2xxx/qla_attr.c
1188
len += scnprintf(buf + len, PAGE_SIZE-len, "F_Port\n");
drivers/scsi/qla2xxx/qla_attr.c
1191
len += scnprintf(buf + len, PAGE_SIZE-len, "Loop\n");
drivers/scsi/qla2xxx/qla_attr.c
1207
len += scnprintf(buf + len, PAGE_SIZE-len, "Mode 6\n");
drivers/scsi/qla2xxx/qla_attr.c
1210
len += scnprintf(buf + len, PAGE_SIZE-len, "Disabled\n");
drivers/scsi/qla2xxx/qla_attr.c
1250
return scnprintf(buf, PAGE_SIZE, "%d us\n", vha->hw->zio_timer * 100);
drivers/scsi/qla2xxx/qla_attr.c
1278
return scnprintf(buf, PAGE_SIZE, "%d exchanges\n",
drivers/scsi/qla2xxx/qla_attr.c
1308
len += scnprintf(buf + len, PAGE_SIZE-len, "Enabled\n");
drivers/scsi/qla2xxx/qla_attr.c
1310
len += scnprintf(buf + len, PAGE_SIZE-len, "Disabled\n");
drivers/scsi/qla2xxx/qla_attr.c
1362
return scnprintf(buf, PAGE_SIZE, "\n");
drivers/scsi/qla2xxx/qla_attr.c
1364
return scnprintf(buf, PAGE_SIZE, "%#04hx %#04hx %#04hx\n",
drivers/scsi/qla2xxx/qla_attr.c
1430
return scnprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->bios_revision[1],
drivers/scsi/qla2xxx/qla_attr.c
1441
return scnprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->efi_revision[1],
drivers/scsi/qla2xxx/qla_attr.c
1452
return scnprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->fcode_revision[1],
drivers/scsi/qla2xxx/qla_attr.c
1463
return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d %d\n",
drivers/scsi/qla2xxx/qla_attr.c
1477
return scnprintf(buf, PAGE_SIZE, "\n");
drivers/scsi/qla2xxx/qla_attr.c
1479
return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%d)\n",
drivers/scsi/qla2xxx/qla_attr.c
1490
return scnprintf(buf, PAGE_SIZE, "%d\n",
drivers/scsi/qla2xxx/qla_attr.c
1504
return scnprintf(buf, PAGE_SIZE, "\n");
drivers/scsi/qla2xxx/qla_attr.c
1510
return scnprintf(buf, PAGE_SIZE, "%u\n",
drivers/scsi/qla2xxx/qla_attr.c
1514
return scnprintf(buf, PAGE_SIZE, "\n");
drivers/scsi/qla2xxx/qla_attr.c
1525
return scnprintf(buf, PAGE_SIZE, "\n");
drivers/scsi/qla2xxx/qla_attr.c
1527
return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n",
drivers/scsi/qla2xxx/qla_attr.c
1541
return scnprintf(buf, PAGE_SIZE, "\n");
drivers/scsi/qla2xxx/qla_attr.c
1543
return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%x)\n",
drivers/scsi/qla2xxx/qla_attr.c
1556
return scnprintf(buf, PAGE_SIZE, "\n");
drivers/scsi/qla2xxx/qla_attr.c
1558
return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n",
drivers/scsi/qla2xxx/qla_attr.c
1569
return scnprintf(buf, PAGE_SIZE, "0x%x\n", ha->fdt_block_size);
drivers/scsi/qla2xxx/qla_attr.c
1579
return scnprintf(buf, PAGE_SIZE, "\n");
drivers/scsi/qla2xxx/qla_attr.c
1581
return scnprintf(buf, PAGE_SIZE, "%d\n", vha->fcoe_vlan_id);
drivers/scsi/qla2xxx/qla_attr.c
1591
return scnprintf(buf, PAGE_SIZE, "\n");
drivers/scsi/qla2xxx/qla_attr.c
1593
return scnprintf(buf, PAGE_SIZE, "%pMR\n", vha->fcoe_vn_port_mac);
drivers/scsi/qla2xxx/qla_attr.c
1602
return scnprintf(buf, PAGE_SIZE, "%d\n", vha->hw->switch_cap);
drivers/scsi/qla2xxx/qla_attr.c
1629
return scnprintf(buf, PAGE_SIZE, "%d\n", temp);
drivers/scsi/qla2xxx/qla_attr.c
1632
return scnprintf(buf, PAGE_SIZE, "\n");
drivers/scsi/qla2xxx/qla_attr.c
1646
return scnprintf(buf, PAGE_SIZE, "0x%x\n", pstate);
drivers/scsi/qla2xxx/qla_attr.c
1668
return scnprintf(buf, PAGE_SIZE, "0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
drivers/scsi/qla2xxx/qla_attr.c
1679
return scnprintf(buf, PAGE_SIZE, "\n");
drivers/scsi/qla2xxx/qla_attr.c
1681
return scnprintf(buf, PAGE_SIZE, "%llu\n", vha->bidi_stats.io_count);
drivers/scsi/qla2xxx/qla_attr.c
1691
return scnprintf(buf, PAGE_SIZE, "\n");
drivers/scsi/qla2xxx/qla_attr.c
1693
return scnprintf(buf, PAGE_SIZE, "%llu\n",
drivers/scsi/qla2xxx/qla_attr.c
1712
return scnprintf(buf, PAGE_SIZE, "%d\n", size);
drivers/scsi/qla2xxx/qla_attr.c
1722
return scnprintf(buf, PAGE_SIZE, "\n");
drivers/scsi/qla2xxx/qla_attr.c
1724
return scnprintf(buf, PAGE_SIZE, "%s\n",
drivers/scsi/qla2xxx/qla_attr.c
1754
return scnprintf(buf, PAGE_SIZE, "\n");
drivers/scsi/qla2xxx/qla_attr.c
1756
return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n",
drivers/scsi/qla2xxx/qla_attr.c
1768
return scnprintf(buf, PAGE_SIZE, "\n");
drivers/scsi/qla2xxx/qla_attr.c
1770
return scnprintf(buf, PAGE_SIZE, "%s\n",
drivers/scsi/qla2xxx/qla_attr.c
1787
return scnprintf(buf, PAGE_SIZE, "\n");
drivers/scsi/qla2xxx/qla_attr.c
1789
return scnprintf(buf, PAGE_SIZE, "%s\n",
drivers/scsi/qla2xxx/qla_attr.c
1897
return scnprintf(buf, PAGE_SIZE, "%s\n", speed);
drivers/scsi/qla2xxx/qla_attr.c
1932
len += scnprintf(buf + len, PAGE_SIZE-len,
drivers/scsi/qla2xxx/qla_attr.c
1936
len += scnprintf(buf + len, PAGE_SIZE-len, "Current selection: ");
drivers/scsi/qla2xxx/qla_attr.c
1940
len += scnprintf(buf + len, PAGE_SIZE-len,
drivers/scsi/qla2xxx/qla_attr.c
1944
len += scnprintf(buf + len, PAGE_SIZE-len,
drivers/scsi/qla2xxx/qla_attr.c
1948
len += scnprintf(buf + len, PAGE_SIZE-len,
drivers/scsi/qla2xxx/qla_attr.c
1952
len += scnprintf(buf + len, PAGE_SIZE-len,
drivers/scsi/qla2xxx/qla_attr.c
1956
len += scnprintf(buf + len, PAGE_SIZE-len, "\n");
drivers/scsi/qla2xxx/qla_attr.c
2281
len += scnprintf(buf + len, PAGE_SIZE-len,
drivers/scsi/qla2xxx/qla_attr.c
2285
len += scnprintf(buf + len, PAGE_SIZE-len,
drivers/scsi/qla2xxx/qla_attr.c
2318
len += scnprintf(buf + len, PAGE_SIZE-len,
drivers/scsi/qla2xxx/qla_attr.c
2322
len += scnprintf(buf + len, PAGE_SIZE-len,
drivers/scsi/qla2xxx/qla_attr.c
2355
return scnprintf(buf, PAGE_SIZE,
drivers/scsi/qla2xxx/qla_attr.c
2370
return scnprintf(buf, PAGE_SIZE, "\n");
drivers/scsi/qla2xxx/qla_attr.c
2372
return scnprintf(buf, PAGE_SIZE, "%llx\n",
drivers/scsi/qla2xxx/qla_attr.c
2385
return scnprintf(buf, PAGE_SIZE, "%u\n", vha->hw->port_no);
drivers/scsi/qla2xxx/qla_attr.c
2396
return scnprintf(buf, PAGE_SIZE, "\n");
drivers/scsi/qla2xxx/qla_attr.c
2399
return scnprintf(buf, PAGE_SIZE, "\n");
drivers/scsi/qla2xxx/qla_attr.c
2401
return scnprintf(buf, PAGE_SIZE, "%04x %04x %04x %04x\n",
drivers/scsi/qla2xxx/qla_nx.c
683
mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE * 2);
drivers/scsi/qla2xxx/qla_nx.c
685
mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE);
drivers/scsi/qla2xxx/qla_nx.c
691
addr += start & (PAGE_SIZE - 1);
drivers/scsi/qla2xxx/qla_nx.c
755
mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE*2);
drivers/scsi/qla2xxx/qla_nx.c
757
mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE);
drivers/scsi/qla2xxx/qla_nx.c
762
addr += start & (PAGE_SIZE - 1);
drivers/scsi/qla4xxx/ql4_attr.c
159
return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%x)\n",
drivers/scsi/qla4xxx/ql4_attr.c
163
return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d.%02d\n",
drivers/scsi/qla4xxx/ql4_attr.c
173
return snprintf(buf, PAGE_SIZE, "%s\n", ha->serial_number);
drivers/scsi/qla4xxx/ql4_attr.c
181
return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->fw_info.iscsi_major,
drivers/scsi/qla4xxx/ql4_attr.c
190
return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d.%02d\n",
drivers/scsi/qla4xxx/ql4_attr.c
200
return snprintf(buf, PAGE_SIZE, "0x%08X\n", ha->board_id);
drivers/scsi/qla4xxx/ql4_attr.c
210
return snprintf(buf, PAGE_SIZE, "0x%08X%8X\n", ha->firmware_state,
drivers/scsi/qla4xxx/ql4_attr.c
223
return snprintf(buf, PAGE_SIZE, "0x%04X\n", ha->phy_port_cnt);
drivers/scsi/qla4xxx/ql4_attr.c
235
return snprintf(buf, PAGE_SIZE, "0x%04X\n", ha->phy_port_num);
drivers/scsi/qla4xxx/ql4_attr.c
247
return snprintf(buf, PAGE_SIZE, "0x%04X\n", ha->iscsi_pci_func_cnt);
drivers/scsi/qla4xxx/ql4_attr.c
256
return snprintf(buf, PAGE_SIZE, "%s\n", ha->model_name);
drivers/scsi/qla4xxx/ql4_attr.c
264
return snprintf(buf, PAGE_SIZE, "%s %s\n", ha->fw_info.fw_build_date,
drivers/scsi/qla4xxx/ql4_attr.c
273
return snprintf(buf, PAGE_SIZE, "%s\n", ha->fw_info.fw_build_user);
drivers/scsi/qla4xxx/ql4_attr.c
281
return snprintf(buf, PAGE_SIZE, "%s\n", ha->fw_info.extended_timestamp);
drivers/scsi/qla4xxx/ql4_attr.c
303
return snprintf(buf, PAGE_SIZE, "%s\n", load_src);
drivers/scsi/qla4xxx/ql4_attr.c
312
return snprintf(buf, PAGE_SIZE, "%u.%u secs\n", ha->fw_uptime_secs,
drivers/scsi/qla4xxx/ql4_nx.c
737
mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE * 2);
drivers/scsi/qla4xxx/ql4_nx.c
739
mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE);
drivers/scsi/qla4xxx/ql4_nx.c
746
addr += start & (PAGE_SIZE - 1);
drivers/scsi/qla4xxx/ql4_nx.c
810
mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE*2);
drivers/scsi/qla4xxx/ql4_nx.c
812
mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE);
drivers/scsi/qla4xxx/ql4_nx.c
817
addr += start & (PAGE_SIZE - 1);
drivers/scsi/qla4xxx/ql4_os.c
4246
(PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
drivers/scsi/qlogicfas.c
200
.dma_boundary = PAGE_SIZE - 1,
drivers/scsi/scsi_debug.c
2949
#define SDEBUG_MAX_MSENSE_SZ PAGE_SIZE
drivers/scsi/scsi_debug.c
7645
return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_jdelay);
drivers/scsi/scsi_debug.c
7686
return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay);
drivers/scsi/scsi_debug.c
7729
return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts);
drivers/scsi/scsi_debug.c
7759
return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype);
drivers/scsi/scsi_debug.c
7782
return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense);
drivers/scsi/scsi_debug.c
7799
return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw);
drivers/scsi/scsi_debug.c
7847
return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0);
drivers/scsi/scsi_debug.c
7864
return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts);
drivers/scsi/scsi_debug.c
7882
return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb);
drivers/scsi/scsi_debug.c
7888
return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_per_host_store);
drivers/scsi/scsi_debug.c
7906
return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts);
drivers/scsi/scsi_debug.c
7912
return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth);
drivers/scsi/scsi_debug.c
7944
return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_lun_am);
drivers/scsi/scsi_debug.c
7981
return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns);
drivers/scsi/scsi_debug.c
8020
return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue);
drivers/scsi/scsi_debug.c
8048
return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_host_max_queue);
drivers/scsi/scsi_debug.c
8053
return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_rwlock);
drivers/scsi/scsi_debug.c
8076
return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld);
drivers/scsi/scsi_debug.c
8082
return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level);
drivers/scsi/scsi_debug.c
8088
return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb);
drivers/scsi/scsi_debug.c
8128
return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_hosts);
drivers/scsi/scsi_debug.c
8171
return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno);
drivers/scsi/scsi_debug.c
8188
return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_statistics);
drivers/scsi/scsi_debug.c
8210
return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size);
drivers/scsi/scsi_debug.c
8216
return scnprintf(buf, PAGE_SIZE, "%d\n", submit_queues);
drivers/scsi/scsi_debug.c
8222
return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix);
drivers/scsi/scsi_debug.c
8228
return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif);
drivers/scsi/scsi_debug.c
8234
return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard);
drivers/scsi/scsi_debug.c
8240
return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato);
drivers/scsi/scsi_debug.c
8249
return scnprintf(buf, PAGE_SIZE, "0-%u\n",
drivers/scsi/scsi_debug.c
8256
count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
drivers/scsi/scsi_debug.c
8268
return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_random);
drivers/scsi/scsi_debug.c
8286
return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0);
drivers/scsi/scsi_debug.c
8303
return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock);
drivers/scsi/scsi_debug.c
8321
return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict);
drivers/scsi/scsi_debug.c
8338
return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_uuid_ctl);
drivers/scsi/scsi_debug.c
8344
return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_cdb_len);
drivers/scsi/scsi_debug.c
8395
return scnprintf(buf, PAGE_SIZE, "%s\n",
drivers/scsi/scsi_debug.c
8402
return scnprintf(buf, PAGE_SIZE, "%d\n", sdeb_tur_ms_to_ready);
drivers/scsi/scsi_debug.c
8408
char *p = buf, *end = buf + PAGE_SIZE;
drivers/scsi/scsi_debug.c
9570
hpnt->dma_boundary = PAGE_SIZE - 1;
drivers/scsi/scsi_devinfo.c
691
if (!buf || length>PAGE_SIZE)
drivers/scsi/scsi_devinfo.c
700
if (length < PAGE_SIZE)
drivers/scsi/scsi_devinfo.c
702
else if (buffer[PAGE_SIZE-1]) {
drivers/scsi/scsi_ioctl.c
33
#define MAX_BUF PAGE_SIZE
drivers/scsi/scsi_ioctl.c
539
if (in_len > PAGE_SIZE || out_len > PAGE_SIZE)
drivers/scsi/scsi_lib.c
3230
sg_len = PAGE_SIZE - *offset;
drivers/scsi/scsi_proc.c
412
if (!buf || length > PAGE_SIZE)
drivers/scsi/scsi_proc.c
424
if (length < PAGE_SIZE) {
drivers/scsi/scsi_proc.c
428
end = buffer + PAGE_SIZE - 1;
drivers/scsi/scsi_proc.c
98
4 * PAGE_SIZE);
drivers/scsi/scsi_sysfs.c
1045
count = scsi_vpd_lun_id(sdev, buf, PAGE_SIZE);
drivers/scsi/scsi_sysfs.c
1078
len += scnprintf(buf + len, PAGE_SIZE - len,
drivers/scsi/scsi_sysfs.c
1081
len += scnprintf(buf + len, PAGE_SIZE - len,
drivers/scsi/scsi_sysfs.c
1085
len += scnprintf(buf + len, PAGE_SIZE - len, "\n");
drivers/scsi/scsi_transport_iscsi.c
3001
ev->u.set_param.len > PAGE_SIZE)
drivers/scsi/scsi_transport_iscsi.c
3176
ev->u.set_host_param.len > PAGE_SIZE)
drivers/scsi/sd.c
3662
if (opt_xfer_bytes < PAGE_SIZE) {
drivers/scsi/sd.c
3665
opt_xfer_bytes, (unsigned int)PAGE_SIZE);
drivers/scsi/sg.c
1675
if (scatter_elem_sz < PAGE_SIZE) {
drivers/scsi/sg.c
1676
scatter_elem_sz = PAGE_SIZE;
drivers/scsi/sg.c
1889
if (num < PAGE_SIZE) {
drivers/scsi/sg.c
1890
scatter_elem_sz = PAGE_SIZE;
drivers/scsi/sg.c
1891
scatter_elem_sz_prev = PAGE_SIZE;
drivers/scsi/sg.c
2006
if (req_size < PAGE_SIZE)
drivers/scsi/sg.c
2007
req_size = PAGE_SIZE;
drivers/scsi/sg.c
2013
} while (req_size > (PAGE_SIZE / 2));
drivers/scsi/sgiwd93.c
218
.dma_boundary = PAGE_SIZE - 1,
drivers/scsi/sgiwd93.c
53
#define HPC_DMA_SIZE PAGE_SIZE
drivers/scsi/smartpqi/smartpqi_init.c
5278
max_sg_entries = max_transfer_size / PAGE_SIZE;
drivers/scsi/smartpqi/smartpqi_init.c
5285
max_transfer_size = (max_sg_entries - 1) * PAGE_SIZE;
drivers/scsi/smartpqi/smartpqi_init.c
6961
return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->firmware_version);
drivers/scsi/smartpqi/smartpqi_init.c
6973
return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->serial_number);
drivers/scsi/smartpqi/smartpqi_init.c
6985
return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->model);
drivers/scsi/smartpqi/smartpqi_init.c
6997
return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->vendor);
drivers/scsi/smartpqi/smartpqi_init.c
7018
count += scnprintf(buffer + count, PAGE_SIZE - count,
drivers/scsi/smartpqi/smartpqi_init.c
7021
count += scnprintf(buffer + count, PAGE_SIZE - count,
drivers/scsi/smartpqi/smartpqi_init.c
7025
count += scnprintf(buffer + count, PAGE_SIZE - count, "\n");
drivers/scsi/smartpqi/smartpqi_init.c
7195
return scnprintf(buffer, PAGE_SIZE,
drivers/scsi/smartpqi/smartpqi_init.c
7231
return scnprintf(buffer, PAGE_SIZE, "0x%8phN\n", lunid);
drivers/scsi/smartpqi/smartpqi_init.c
7276
PAGE_SIZE - output_len,
drivers/scsi/smartpqi/smartpqi_init.c
7295
PAGE_SIZE - output_len,
drivers/scsi/smartpqi/smartpqi_init.c
7301
PAGE_SIZE - output_len,
drivers/scsi/smartpqi/smartpqi_init.c
7308
PAGE_SIZE - output_len,
drivers/scsi/smartpqi/smartpqi_init.c
7313
PAGE_SIZE - output_len,
drivers/scsi/smartpqi/smartpqi_init.c
7349
return scnprintf(buffer, PAGE_SIZE, "0x%016llx\n", sas_address);
drivers/scsi/smartpqi/smartpqi_init.c
7413
return scnprintf(buffer, PAGE_SIZE, "%s\n", raid_level);
drivers/scsi/smartpqi/smartpqi_init.c
7450
return scnprintf(buffer, PAGE_SIZE, "0x%llx\n", raid_bypass_cnt);
drivers/scsi/smartpqi/smartpqi_init.c
7476
output_len = snprintf(buf, PAGE_SIZE, "%d\n",
drivers/scsi/smartpqi/smartpqi_init.c
7529
return scnprintf(buffer, PAGE_SIZE, "%d\n", ctrl_info->numa_node);
drivers/scsi/smartpqi/smartpqi_init.c
7566
return scnprintf(buffer, PAGE_SIZE, "0x%llx\n", write_stream_cnt);
drivers/scsi/snic/snic_trc.c
113
tbuf_sz = (snic_trace_max_pages * PAGE_SIZE);
drivers/scsi/snic/snic_trc.c
132
tbuf_sz / PAGE_SIZE);
drivers/scsi/st.c
1320
if (!enlarge_buffer(STp->buffer, PAGE_SIZE)) {
drivers/scsi/st.c
4006
if (STbuffer->buffer_size <= PAGE_SIZE)
drivers/scsi/st.c
4018
b_size = PAGE_SIZE << order;
drivers/scsi/st.c
4020
for (b_size = PAGE_SIZE, order = 0;
drivers/scsi/st.c
4022
max_segs * (PAGE_SIZE << order) < new_size;
drivers/scsi/st.c
4027
if (max_segs * (PAGE_SIZE << order) < new_size) {
drivers/scsi/st.c
4064
PAGE_SIZE << st_bp->reserved_page_order);
drivers/scsi/st.c
4076
STbuffer->buffer_size -= (PAGE_SIZE << order);
drivers/scsi/st.c
4090
int length = PAGE_SIZE << st_bp->reserved_page_order;
drivers/scsi/st.c
4122
int length = PAGE_SIZE << st_bp->reserved_page_order;
drivers/scsi/st.c
4155
int length = PAGE_SIZE << st_bp->reserved_page_order;
drivers/scsi/st.c
4610
return scnprintf(buf, PAGE_SIZE, "%d\n", try_direct_io);
drivers/scsi/st.c
4616
return scnprintf(buf, PAGE_SIZE, "%d\n", st_fixed_buffer_size);
drivers/scsi/st.c
4622
return scnprintf(buf, PAGE_SIZE, "%d\n", st_max_sg_segs);
drivers/scsi/st.c
4628
return scnprintf(buf, PAGE_SIZE, "[%s]\n", verstr);
drivers/scsi/st.c
4654
return scnprintf(buf, PAGE_SIZE, "%d\n", debugging);
drivers/scsi/st.c
4678
l = snprintf(buf, PAGE_SIZE, "%d\n", STm->defined);
drivers/scsi/st.c
4690
l = snprintf(buf, PAGE_SIZE, "%d\n", STm->default_blksize);
drivers/scsi/st.c
4704
l = snprintf(buf, PAGE_SIZE, fmt, STm->default_density);
drivers/scsi/st.c
4716
l = snprintf(buf, PAGE_SIZE, "%d\n", STm->default_compression - 1);
drivers/scsi/st.c
4745
l = snprintf(buf, PAGE_SIZE, "0x%08x\n", options);
drivers/scsi/st.c
4990
unsigned long end = (uaddr + count + PAGE_SIZE - 1) >> PAGE_SHIFT;
drivers/scsi/st.c
648
DIV_ROUND_UP(bytes, PAGE_SIZE << mdata->page_order);
drivers/scsi/stex.c
1488
.dma_boundary = PAGE_SIZE - 1,
drivers/scsi/storvsc_drv.c
2216
((aligned_ringbuffer_size - PAGE_SIZE) /
drivers/scsi/sun3_scsi.c
507
.dma_boundary = PAGE_SIZE - 1,
drivers/scsi/vmw_pvscsi.c
1235
dma_free_coherent(&adapter->dev->dev, PAGE_SIZE,
drivers/scsi/vmw_pvscsi.c
1240
adapter->req_pages * PAGE_SIZE,
drivers/scsi/vmw_pvscsi.c
1245
adapter->cmp_pages * PAGE_SIZE,
drivers/scsi/vmw_pvscsi.c
1250
adapter->msg_pages * PAGE_SIZE,
drivers/scsi/vmw_pvscsi.c
1279
BUG_ON(!IS_ALIGNED(((unsigned long)ctx->sgl), PAGE_SIZE));
drivers/scsi/vmw_pvscsi.c
1308
config_page = dma_alloc_coherent(&adapter->dev->dev, PAGE_SIZE,
drivers/scsi/vmw_pvscsi.c
1342
dma_free_coherent(&adapter->dev->dev, PAGE_SIZE, config_page,
drivers/scsi/vmw_pvscsi.c
444
adapter->rings_state = dma_alloc_coherent(&adapter->dev->dev, PAGE_SIZE,
drivers/scsi/vmw_pvscsi.c
454
adapter->req_pages * PAGE_SIZE, &adapter->reqRingPA,
drivers/scsi/vmw_pvscsi.c
462
adapter->cmp_pages * PAGE_SIZE, &adapter->cmpRingPA,
drivers/scsi/vmw_pvscsi.c
467
BUG_ON(!IS_ALIGNED(adapter->ringStatePA, PAGE_SIZE));
drivers/scsi/vmw_pvscsi.c
468
BUG_ON(!IS_ALIGNED(adapter->reqRingPA, PAGE_SIZE));
drivers/scsi/vmw_pvscsi.c
469
BUG_ON(!IS_ALIGNED(adapter->cmpRingPA, PAGE_SIZE));
drivers/scsi/vmw_pvscsi.c
47
#define SGL_SIZE PAGE_SIZE
drivers/scsi/vmw_pvscsi.c
477
adapter->msg_pages * PAGE_SIZE, &adapter->msgRingPA,
drivers/scsi/vmw_pvscsi.c
481
BUG_ON(!IS_ALIGNED(adapter->msgRingPA, PAGE_SIZE));
drivers/scsi/vmw_pvscsi.c
499
base += PAGE_SIZE;
drivers/scsi/vmw_pvscsi.c
505
base += PAGE_SIZE;
drivers/scsi/vmw_pvscsi.c
508
memset(adapter->rings_state, 0, PAGE_SIZE);
drivers/scsi/vmw_pvscsi.c
509
memset(adapter->req_ring, 0, adapter->req_pages * PAGE_SIZE);
drivers/scsi/vmw_pvscsi.c
510
memset(adapter->cmp_ring, 0, adapter->cmp_pages * PAGE_SIZE);
drivers/scsi/vmw_pvscsi.c
523
base += PAGE_SIZE;
drivers/scsi/vmw_pvscsi.c
525
memset(adapter->msg_ring, 0, adapter->msg_pages * PAGE_SIZE);
drivers/scsi/vmw_pvscsi.h
432
(PAGE_SIZE / sizeof(struct PVSCSIRingReqDesc))
drivers/scsi/vmw_pvscsi.h
459
#define PVSCSI_MEM_SPACE_SIZE (PVSCSI_MEM_SPACE_NUM_PAGES * PAGE_SIZE)
drivers/scsi/xen-scsifront.c
1140
VSCSIIF_SG_TABLESIZE * PAGE_SIZE /
drivers/scsi/xen-scsifront.c
1151
host->max_sectors = (nr_segs - 1) * PAGE_SIZE / 512;
drivers/scsi/xen-scsifront.c
518
bytes = min_t(unsigned int, len, PAGE_SIZE - off);
drivers/scsi/xen-scsifront.c
551
bytes = min_t(unsigned int, len, PAGE_SIZE - off);
drivers/scsi/xen-scsifront.c
70
#define VSCSIIF_RING_SIZE __CONST_RING_SIZE(vscsiif, PAGE_SIZE)
drivers/scsi/xen-scsifront.c
802
XEN_FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE);
drivers/scsi/xen-scsifront.c
929
host->max_sectors = (host->sg_tablesize - 1) * PAGE_SIZE / 512;
drivers/sh/maple/maple.c
303
__flush_purge_region(maple_sendbuf + i * PAGE_SIZE,
drivers/sh/maple/maple.c
304
PAGE_SIZE);
drivers/soc/mediatek/mtk-svs.c
795
if (count >= PAGE_SIZE)
drivers/soc/pxa/mfp.c
20
#define MFPR_SIZE (PAGE_SIZE)
drivers/soundwire/cadence_master.c
317
#define RD_BUF (2 * PAGE_SIZE)
drivers/soundwire/debugfs.c
35
#define RD_BUF (3 * PAGE_SIZE)
drivers/soundwire/intel.c
57
#define RD_BUF (2 * PAGE_SIZE)
drivers/soundwire/intel_ace2x_debugfs.c
22
#define RD_BUF (2 * PAGE_SIZE)
drivers/spi/spi-ep93xx.c
321
nents = DIV_ROUND_UP(len, PAGE_SIZE);
drivers/spi/spi-ep93xx.c
332
size_t bytes = min_t(size_t, len, PAGE_SIZE);
drivers/spi/spi-fsl-cpm.c
353
mspi->dma_dummy_tx = dma_map_single(dev, ZERO_PAGE(0), PAGE_SIZE,
drivers/spi/spi-fsl-cpm.c
390
dma_unmap_single(dev, mspi->dma_dummy_tx, PAGE_SIZE, DMA_TO_DEVICE);
drivers/spi/spi-fsl-cpm.c
410
dma_unmap_single(dev, mspi->dma_dummy_tx, PAGE_SIZE, DMA_TO_DEVICE);
drivers/spi/spi-fsl-cpm.c
46
#define SPI_MRBLR ((unsigned int)PAGE_SIZE)
drivers/spi/spi-fsl-dspi.c
701
dma->bufsize = PAGE_SIZE;
drivers/spi/spi-loopback-test.c
110
.tx_buf = TX(PAGE_SIZE - 4),
drivers/spi/spi-loopback-test.c
111
.rx_buf = RX(PAGE_SIZE - 4),
drivers/spi/spi-loopback-test.c
418
#define SPI_TEST_MAX_SIZE_PLUS (SPI_TEST_MAX_SIZE + PAGE_SIZE)
drivers/spi/spi-mtk-nor.c
95
#define MTK_NOR_BOUNCE_BUF_SIZE PAGE_SIZE
drivers/spi/spi-mxs.c
171
const int desc_len = vmalloced_buf ? PAGE_SIZE : SG_MAXLEN;
drivers/spi/spi-orion.c
763
dir_acc->vaddr = devm_ioremap(&pdev->dev, r->start, PAGE_SIZE);
drivers/spi/spi-orion.c
769
dir_acc->size = PAGE_SIZE;
drivers/spi/spi-pl022.c
1012
pl022->dummypage = kmalloc(PAGE_SIZE, GFP_KERNEL);
drivers/spi/spi-pl022.c
1056
pl022->dummypage = kmalloc(PAGE_SIZE, GFP_KERNEL);
drivers/spi/spi-pl022.c
765
PAGE_SIZE - offset_in_page(bufp));
drivers/spi/spi-pl022.c
778
mapbytes = min_t(int, bytesleft, PAGE_SIZE);
drivers/spi/spi-pl022.c
910
pages = DIV_ROUND_UP(pl022->cur_transfer->len, PAGE_SIZE);
drivers/spi/spi-sh-msiof.c
1140
p->tx_dma_addr = dma_map_single(tx_dev, p->tx_dma_page, PAGE_SIZE,
drivers/spi/spi-sh-msiof.c
1146
p->rx_dma_addr = dma_map_single(rx_dev, p->rx_dma_page, PAGE_SIZE,
drivers/spi/spi-sh-msiof.c
1155
dma_unmap_single(tx_dev, p->tx_dma_addr, PAGE_SIZE, DMA_TO_DEVICE);
drivers/spi/spi-sh-msiof.c
1175
dma_unmap_single(ctlr->dma_rx->device->dev, p->rx_dma_addr, PAGE_SIZE,
drivers/spi/spi-sh-msiof.c
1177
dma_unmap_single(ctlr->dma_tx->device->dev, p->tx_dma_addr, PAGE_SIZE,
drivers/spi/spi-test.h
121
1021, 1024, 1031, 4093, PAGE_SIZE, 4099, 65536, 65537
drivers/spi/spi-test.h
13
#define SPI_TEST_MAX_SIZE (32 * PAGE_SIZE)
drivers/spi/spi-topcliff-pch.c
773
memset(data->dma.tx_buf_virt, 0, PAGE_SIZE);
drivers/spi/spi.c
1125
(LAST_PKMAP * PAGE_SIZE)));
drivers/spi/spi.c
1138
desc_len = min_t(unsigned long, max_seg_size, PAGE_SIZE);
drivers/spi/spi.c
1162
PAGE_SIZE - offset_in_page(buf)));
drivers/spi/spi.c
63
len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
drivers/ssb/sprom.c
90
count = sprom2hex(sprom, buf, PAGE_SIZE, sprom_size_words);
drivers/staging/fbtft/fb_hx8340bn.c
26
#define TXBUFLEN (4 * PAGE_SIZE)
drivers/staging/fbtft/fb_ili9341.c
25
#define TXBUFLEN (4 * PAGE_SIZE)
drivers/staging/fbtft/fbtft-core.c
339
y_high = (pageref->offset + PAGE_SIZE - 1) / info->fix.line_length;
drivers/staging/fbtft/fbtft-core.c
666
txbuflen = PAGE_SIZE; /* need buffer for byteswapping */
drivers/staging/fbtft/fbtft-sysfs.c
101
len += scnprintf(&buf[len], PAGE_SIZE,
drivers/staging/greybus/camera.c
31
char data[PAGE_SIZE];
drivers/staging/greybus/loopback.c
117
#define MAX_PACKET_SIZE (PAGE_SIZE * 2)
drivers/staging/greybus/raw.c
47
#define MAX_PACKET_SIZE (PAGE_SIZE * 2)
drivers/staging/greybus/uart.c
38
#define GB_UART_WRITE_FIFO_SIZE PAGE_SIZE
drivers/staging/media/atomisp/pci/hmm/hmm.c
191
if ((bytes + offset) >= PAGE_SIZE) {
drivers/staging/media/atomisp/pci/hmm/hmm.c
192
len = PAGE_SIZE - offset;
drivers/staging/media/atomisp/pci/hmm/hmm.c
332
if ((bytes + offset) >= PAGE_SIZE) {
drivers/staging/media/atomisp/pci/hmm/hmm.c
333
len = PAGE_SIZE - offset;
drivers/staging/media/atomisp/pci/hmm/hmm.c
394
if ((bytes + offset) >= PAGE_SIZE) {
drivers/staging/media/atomisp/pci/hmm/hmm.c
395
len = PAGE_SIZE - offset;
drivers/staging/media/atomisp/pci/hmm/hmm_bo.c
1051
if (remap_pfn_range(vma, virt, pfn, PAGE_SIZE, PAGE_SHARED)) {
drivers/staging/media/atomisp/pci/hmm/hmm_bo.c
1057
virt += PAGE_SIZE;
drivers/staging/media/atomisp/pci/hmm/hmm_bo.c
654
vaddr += PAGE_SIZE;
drivers/staging/media/atomisp/pci/hmm/hmm_bo.c
935
clflush_cache_range(bo->vmap_addr, bo->pgnr * PAGE_SIZE);
drivers/staging/media/atomisp/pci/mmu/isp_mmu.c
44
#define NR_PAGES_2GB (SZ_2G / PAGE_SIZE)
drivers/staging/media/ipu3/ipu3-dmamap.c
120
page_to_phys(pages[i]), PAGE_SIZE);
drivers/staging/media/ipu3/ipu3-dmamap.c
124
iovaddr += PAGE_SIZE;
drivers/staging/media/ipu3/ipu3-dmamap.c
143
i * PAGE_SIZE);
drivers/staging/media/ipu7/ipu7-dma.c
107
__clear_buffer(pages[i], PAGE_SIZE, attrs);
drivers/staging/media/ipu7/ipu7-dma.c
187
PAGE_SIZE, DMA_BIDIRECTIONAL,
drivers/staging/media/ipu7/ipu7-dma.c
198
PAGE_SIZE);
drivers/staging/media/ipu7/ipu7-dma.c
203
PAGE_SIZE, DMA_BIDIRECTIONAL,
drivers/staging/media/ipu7/ipu7-dma.c
227
dma_unmap_page_attrs(&pdev->dev, pci_dma_addr, PAGE_SIZE,
drivers/staging/media/ipu7/ipu7-dma.c
230
ipu7_mmu_unmap(mmu->dmap->mmu_info, ipu7_iova, PAGE_SIZE);
drivers/staging/media/ipu7/ipu7-dma.c
280
dma_unmap_page_attrs(&pdev->dev, pci_dma_addr, PAGE_SIZE,
drivers/staging/media/ipu7/ipu7-dma.c
87
__clear_buffer(pages[i], PAGE_SIZE << order, attrs);
drivers/staging/media/ipu7/ipu7-mmu.c
115
dma = dma_map_single(mmu_info->dev, ptr, PAGE_SIZE, DMA_BIDIRECTIONAL);
drivers/staging/media/ipu7/ipu7-mmu.c
152
PAGE_SIZE, DMA_BIDIRECTIONAL);
drivers/staging/media/ipu7/ipu7-mmu.c
190
PAGE_SIZE, DMA_BIDIRECTIONAL);
drivers/staging/media/ipu7/ipu7-mmu.c
415
PAGE_SIZE, DMA_BIDIRECTIONAL);
drivers/staging/media/ipu7/ipu7-mmu.c
431
mmu->pci_trash_page, PAGE_SIZE);
drivers/staging/media/ipu7/ipu7-mmu.c
450
PAGE_SIZE, DMA_BIDIRECTIONAL);
drivers/staging/media/ipu7/ipu7-mmu.c
770
PAGE_SIZE, DMA_BIDIRECTIONAL);
drivers/staging/media/ipu7/ipu7-mmu.c
779
PAGE_SIZE, DMA_BIDIRECTIONAL);
drivers/staging/media/ipu7/ipu7-mmu.c
787
PAGE_SIZE, DMA_BIDIRECTIONAL);
drivers/staging/media/ipu7/ipu7.c
2287
data += PAGE_SIZE;
drivers/staging/media/ipu7/ipu7.h
145
#define IPU_MMUV2_L2_RANGE (1024 * PAGE_SIZE)
drivers/staging/media/ipu7/ipu7.h
154
#define MMUV2_TRASH_L1_BLOCK_OFFSET (MMUV2_ENTRIES_PER_L1_BLOCK * PAGE_SIZE)
drivers/staging/media/meson/vdec/codec_hevc_common.c
284
u32 nb_pages = size / PAGE_SIZE;
drivers/staging/media/tegra-video/tegra20.c
236
apb_misc = ioremap(TEGRA_APB_MISC_BASE, PAGE_SIZE);
drivers/target/iscsi/cxgbit/cxgbit.h
54
min_t(u32, MAX_SKB_FRAGS * PAGE_SIZE, 65535)
drivers/target/iscsi/cxgbit/cxgbit_ddp.c
162
((i != last_sgidx) && (len != PAGE_SIZE))) {
drivers/target/iscsi/cxgbit/cxgbit_ddp.c
29
offset += PAGE_SIZE;
drivers/target/iscsi/cxgbit/cxgbit_main.c
47
mdsl = min_t(u32, mdsl, 4 * PAGE_SIZE);
drivers/target/iscsi/cxgbit/cxgbit_main.c
48
mdsl = min_t(u32, mdsl, (MAX_SKB_FRAGS - 1) * PAGE_SIZE);
drivers/target/iscsi/cxgbit/cxgbit_target.c
1055
u32 skip = data_offset % PAGE_SIZE;
drivers/target/iscsi/cxgbit/cxgbit_target.c
1057
sg_off = data_offset / PAGE_SIZE;
drivers/target/iscsi/cxgbit/cxgbit_target.c
1059
sg_nents = max(1UL, DIV_ROUND_UP(skip + data_len, PAGE_SIZE));
drivers/target/iscsi/cxgbit/cxgbit_target.c
355
sg = &cmd->se_cmd.t_data_sg[data_offset / PAGE_SIZE];
drivers/target/iscsi/cxgbit/cxgbit_target.c
356
page_off = (data_offset % PAGE_SIZE);
drivers/target/iscsi/cxgbit/cxgbit_target.c
895
u32 sg_nents = max(1UL, DIV_ROUND_UP(pdu_cb->dlen, PAGE_SIZE));
drivers/target/iscsi/iscsi_target.c
890
u32 ent = data_offset / PAGE_SIZE;
drivers/target/iscsi/iscsi_target.c
901
page_off = (data_offset % PAGE_SIZE);
drivers/target/iscsi/iscsi_target.c
981
u32 iov_count = max(1UL, DIV_ROUND_UP(cmd->se_cmd.data_length, PAGE_SIZE));
drivers/target/iscsi/iscsi_target_configfs.c
373
return snprintf(page, PAGE_SIZE, "%s\n", auth->name); \
drivers/target/iscsi/iscsi_target_configfs.c
438
return snprintf(page, PAGE_SIZE, "%d\n", auth->name); \
drivers/target/iscsi/iscsi_target_configfs.c
479
rb = snprintf(page, PAGE_SIZE, \
drivers/target/iscsi/iscsi_target_configfs.c
483
rb = snprintf(page, PAGE_SIZE, "%u\n", \
drivers/target/iscsi/iscsi_target_configfs.c
698
return snprintf(page, PAGE_SIZE, "%s", acl_to_nacl(item)->acl_tag);
drivers/target/iscsi/iscsi_target_configfs.c
829
return snprintf(page, PAGE_SIZE, "%s\n", auth->name); \
drivers/target/iscsi/iscsi_target_configfs.c
888
return snprintf(page, PAGE_SIZE, "%d\n", auth->name); \
drivers/target/iscsi/iscsi_target_configfs.c
933
rb = snprintf(page, PAGE_SIZE, "%s\n", param->value); \
drivers/target/iscsi/iscsi_target_configfs.c
946
buf = kzalloc(PAGE_SIZE, GFP_KERNEL); \
drivers/target/iscsi/iscsi_target_configfs.c
949
len = snprintf(buf, PAGE_SIZE, "%s=%s", __stringify(name), page); \
drivers/target/iscsi/iscsi_target_login.c
395
off = mrdsl % PAGE_SIZE;
drivers/target/iscsi/iscsi_target_login.c
399
if (mrdsl < PAGE_SIZE)
drivers/target/iscsi/iscsi_target_login.c
400
mrdsl = PAGE_SIZE;
drivers/target/iscsi/iscsi_target_stat.c
102
return snprintf(page, PAGE_SIZE, "%u\n", sess_err_count);
drivers/target/iscsi/iscsi_target_stat.c
111
return snprintf(page, PAGE_SIZE, "%u\n",
drivers/target/iscsi/iscsi_target_stat.c
121
return snprintf(page, PAGE_SIZE, "%s\n",
drivers/target/iscsi/iscsi_target_stat.c
129
return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_DISCONTINUITY_TIME);
drivers/target/iscsi/iscsi_target_stat.c
135
return snprintf(page, PAGE_SIZE, "%s\n", ISCSI_INST_DESCR);
drivers/target/iscsi/iscsi_target_stat.c
141
return snprintf(page, PAGE_SIZE, "Datera, Inc. iSCSI-Target\n");
drivers/target/iscsi/iscsi_target_stat.c
147
return snprintf(page, PAGE_SIZE, "%s\n", ISCSIT_VERSION);
drivers/target/iscsi/iscsi_target_stat.c
199
return snprintf(page, PAGE_SIZE, "%u\n",
drivers/target/iscsi/iscsi_target_stat.c
209
return snprintf(page, PAGE_SIZE, "%u\n", sess_err->digest_errors);
drivers/target/iscsi/iscsi_target_stat.c
218
return snprintf(page, PAGE_SIZE, "%u\n", sess_err->cxn_timeout_errors);
drivers/target/iscsi/iscsi_target_stat.c
227
return snprintf(page, PAGE_SIZE, "%u\n", sess_err->pdu_format_errors);
drivers/target/iscsi/iscsi_target_stat.c
261
return snprintf(page, PAGE_SIZE, "%u\n",
drivers/target/iscsi/iscsi_target_stat.c
268
return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_NODE_INDEX);
drivers/target/iscsi/iscsi_target_stat.c
284
return snprintf(page, PAGE_SIZE, "%u\n", fail_count);
drivers/target/iscsi/iscsi_target_stat.c
300
return snprintf(page, PAGE_SIZE, "%u\n", last_fail_time);
drivers/target/iscsi/iscsi_target_stat.c
314
return snprintf(page, PAGE_SIZE, "%u\n", last_fail_type);
drivers/target/iscsi/iscsi_target_stat.c
329
return snprintf(page, PAGE_SIZE, "%s\n", buf);
drivers/target/iscsi/iscsi_target_stat.c
341
ret = snprintf(page, PAGE_SIZE, "ipv6\n");
drivers/target/iscsi/iscsi_target_stat.c
343
ret = snprintf(page, PAGE_SIZE, "ipv4\n");
drivers/target/iscsi/iscsi_target_stat.c
357
ret = snprintf(page, PAGE_SIZE, "%pISc\n", &lstat->last_intr_fail_sockaddr);
drivers/target/iscsi/iscsi_target_stat.c
401
return snprintf(page, PAGE_SIZE, "%u\n",
drivers/target/iscsi/iscsi_target_stat.c
408
return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_NODE_INDEX);
drivers/target/iscsi/iscsi_target_stat.c
419
ret = snprintf(page, PAGE_SIZE, "%u\n", lstat->accepts);
drivers/target/iscsi/iscsi_target_stat.c
433
ret = snprintf(page, PAGE_SIZE, "%u\n", lstat->other_fails);
drivers/target/iscsi/iscsi_target_stat.c
447
ret = snprintf(page, PAGE_SIZE, "%u\n", lstat->redirects);
drivers/target/iscsi/iscsi_target_stat.c
461
ret = snprintf(page, PAGE_SIZE, "%u\n", lstat->authorize_fails);
drivers/target/iscsi/iscsi_target_stat.c
475
ret = snprintf(page, PAGE_SIZE, "%u\n", lstat->authenticate_fails);
drivers/target/iscsi/iscsi_target_stat.c
489
ret = snprintf(page, PAGE_SIZE, "%u\n", lstat->negotiate_fails);
drivers/target/iscsi/iscsi_target_stat.c
53
return snprintf(page, PAGE_SIZE, "%u\n",
drivers/target/iscsi/iscsi_target_stat.c
533
return snprintf(page, PAGE_SIZE, "%u\n",
drivers/target/iscsi/iscsi_target_stat.c
539
return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_NODE_INDEX);
drivers/target/iscsi/iscsi_target_stat.c
548
return snprintf(page, PAGE_SIZE, "%u\n", lstats->normal_logouts);
drivers/target/iscsi/iscsi_target_stat.c
557
return snprintf(page, PAGE_SIZE, "%u\n", lstats->abnormal_logouts);
drivers/target/iscsi/iscsi_target_stat.c
595
return snprintf(page, PAGE_SIZE, "%u\n", tiqn->tiqn_index);
drivers/target/iscsi/iscsi_target_stat.c
60
return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_DRAFT20_VERSION);
drivers/target/iscsi/iscsi_target_stat.c
611
ret = snprintf(page, PAGE_SIZE, "%u\n",
drivers/target/iscsi/iscsi_target_stat.c
632
ret = snprintf(page, PAGE_SIZE, "%u\n",
drivers/target/iscsi/iscsi_target_stat.c
654
ret = snprintf(page, PAGE_SIZE, "%lu\n",
drivers/target/iscsi/iscsi_target_stat.c
66
return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_DRAFT20_VERSION);
drivers/target/iscsi/iscsi_target_stat.c
676
ret = snprintf(page, PAGE_SIZE, "%lu\n",
drivers/target/iscsi/iscsi_target_stat.c
698
ret = snprintf(page, PAGE_SIZE, "%lu\n",
drivers/target/iscsi/iscsi_target_stat.c
72
return snprintf(page, PAGE_SIZE, "%u\n",
drivers/target/iscsi/iscsi_target_stat.c
720
ret = snprintf(page, PAGE_SIZE, "%lu\n",
drivers/target/iscsi/iscsi_target_stat.c
742
ret = snprintf(page, PAGE_SIZE, "%lu\n",
drivers/target/iscsi/iscsi_target_stat.c
764
ret = snprintf(page, PAGE_SIZE, "%lu\n",
drivers/target/iscsi/iscsi_target_stat.c
79
return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_INST_NUM_NODES);
drivers/target/iscsi/iscsi_target_stat.c
85
return snprintf(page, PAGE_SIZE, "%u\n",
drivers/target/loopback/tcm_loop.c
348
.dma_boundary = PAGE_SIZE - 1,
drivers/target/loopback/tcm_loop.c
800
ret = snprintf(page, PAGE_SIZE, "%s\n",
drivers/target/loopback/tcm_loop.c
902
ret = snprintf(page, PAGE_SIZE, "%s\n", status);
drivers/target/loopback/tcm_loop.c
941
return snprintf(page, PAGE_SIZE, "%d:0:%d\n",
drivers/target/target_core_configfs.c
1161
return snprintf(page, PAGE_SIZE, "%d\n",
drivers/target/target_core_configfs.c
1200
return snprintf(page, PAGE_SIZE, "%d\n",
drivers/target/target_core_configfs.c
1601
return snprintf(page, PAGE_SIZE, "%#08x\n",
drivers/target/target_core_configfs.c
1731
if (len + strlen(buf) >= PAGE_SIZE)
drivers/target/target_core_configfs.c
1808
if (len + strlen(buf) >= PAGE_SIZE) \
drivers/target/target_core_configfs.c
1814
if (len + strlen(buf) >= PAGE_SIZE) \
drivers/target/target_core_configfs.c
1820
if (len + strlen(buf) >= PAGE_SIZE) \
drivers/target/target_core_configfs.c
2022
if (len + strlen(buf) >= PAGE_SIZE)
drivers/target/target_core_configfs.c
2367
return snprintf(page, PAGE_SIZE, "%s\n", dev->dev_alias);
drivers/target/target_core_configfs.c
2407
return snprintf(page, PAGE_SIZE, "%s\n", dev->udev_path);
drivers/target/target_core_configfs.c
2445
return snprintf(page, PAGE_SIZE, "%d\n", target_dev_configured(dev));
drivers/target/target_core_configfs.c
2824
const char *const end = page + PAGE_SIZE;
drivers/target/target_core_configfs.c
3230
if ((cur_len + len) > PAGE_SIZE) {
drivers/target/target_core_configfs.c
542
return snprintf(page, PAGE_SIZE, "%u\n", to_attrib(item)->_name); \
drivers/target/target_core_configfs.c
907
return snprintf(page, PAGE_SIZE, "0\n");
drivers/target/target_core_device.c
518
alignment = max(1ul, PAGE_SIZE / block_size);
drivers/target/target_core_file.c
525
rc = fd_do_prot_fill(cmd->se_dev, lba, nolb, buf, PAGE_SIZE);
drivers/target/target_core_pscsi.c
832
PAGE_SIZE - 1) >> PAGE_SHIFT;
drivers/target/target_core_pscsi.c
852
if (off + len > PAGE_SIZE)
drivers/target/target_core_pscsi.c
856
bytes = min_t(unsigned int, len, PAGE_SIZE - off);
drivers/target/target_core_rd.c
101
rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE);
drivers/target/target_core_rd.c
159
sg[j].length = PAGE_SIZE;
drivers/target/target_core_rd.c
162
memset(p, init_payload, PAGE_SIZE);
drivers/target/target_core_rd.c
226
rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE);
drivers/target/target_core_rd.c
309
(unsigned long)(rd_dev->rd_page_count * PAGE_SIZE));
drivers/target/target_core_rd.c
391
prot_offset = do_div(tmp, PAGE_SIZE);
drivers/target/target_core_rd.c
437
rd_offset = do_div(tmp, PAGE_SIZE);
drivers/target/target_core_rd.c
459
src_len = PAGE_SIZE - rd_offset;
drivers/target/target_core_rd.c
503
src_len = PAGE_SIZE;
drivers/target/target_core_rd.c
603
PAGE_SIZE, rd_dev->sg_table_count,
drivers/target/target_core_rd.c
621
unsigned long long blocks_long = ((rd_dev->rd_page_count * PAGE_SIZE) /
drivers/target/target_core_sbc.c
518
if (block_size < PAGE_SIZE) {
drivers/target/target_core_spc.c
551
mtl = (cmd->se_tfo->max_data_sg_nents * PAGE_SIZE) /
drivers/target/target_core_stat.c
102
return snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index);
drivers/target/target_core_stat.c
1042
ret = snprintf(page, PAGE_SIZE, "%u\n", 0);
drivers/target/target_core_stat.c
1062
ret = snprintf(page, PAGE_SIZE, "%u\n", (u32)(((u32)deve->creation_time -
drivers/target/target_core_stat.c
107
return snprintf(page, PAGE_SIZE, "%u\n", to_stat_tgt_dev(item)->dev_index);
drivers/target/target_core_stat.c
1083
ret = snprintf(page, PAGE_SIZE, "Ready\n");
drivers/target/target_core_stat.c
113
return snprintf(page, PAGE_SIZE, "%u\n", LU_COUNT);
drivers/target/target_core_stat.c
1154
ret = snprintf(page, PAGE_SIZE, "%u\n",
drivers/target/target_core_stat.c
1176
ret = snprintf(page, PAGE_SIZE, "%u\n", deve->se_lun->lun_index);
drivers/target/target_core_stat.c
1198
ret = snprintf(page, PAGE_SIZE, "%u\n", tpg->se_tpg_tfo->tpg_get_tag(tpg));
drivers/target/target_core_stat.c
120
return snprintf(page, PAGE_SIZE, "activated\n");
drivers/target/target_core_stat.c
122
return snprintf(page, PAGE_SIZE, "deactivated\n");
drivers/target/target_core_stat.c
1221
ret = snprintf(page, PAGE_SIZE, "%u\n",
drivers/target/target_core_stat.c
1242
ret = snprintf(page, PAGE_SIZE, "%u\n", nacl->acl_index);
drivers/target/target_core_stat.c
1270
ret = snprintf(page, PAGE_SIZE, "%s+i+%s\n", nacl->initiatorname, buf);
drivers/target/target_core_stat.c
135
return snprintf(page, PAGE_SIZE, "%u\n", non_accessible_lus);
drivers/target/target_core_stat.c
141
return snprintf(page, PAGE_SIZE, "%lu\n",
drivers/target/target_core_stat.c
148
return snprintf(page, PAGE_SIZE, "%lu\n",
drivers/target/target_core_stat.c
155
return snprintf(page, PAGE_SIZE, "%lu\n",
drivers/target/target_core_stat.c
200
return snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index);
drivers/target/target_core_stat.c
205
return snprintf(page, PAGE_SIZE, "%u\n",
drivers/target/target_core_stat.c
211
return snprintf(page, PAGE_SIZE, "%u\n", SCSI_LU_INDEX);
drivers/target/target_core_stat.c
217
return snprintf(page, PAGE_SIZE, "%llu\n", (unsigned long long)0);
drivers/target/target_core_stat.c
225
return snprintf(page, PAGE_SIZE, "%s\n",
drivers/target/target_core_stat.c
234
return snprintf(page, PAGE_SIZE, "%-" __stringify(INQUIRY_VENDOR_LEN)
drivers/target/target_core_stat.c
242
return snprintf(page, PAGE_SIZE, "%-" __stringify(INQUIRY_MODEL_LEN)
drivers/target/target_core_stat.c
250
return snprintf(page, PAGE_SIZE, "%-" __stringify(INQUIRY_REVISION_LEN)
drivers/target/target_core_stat.c
259
return snprintf(page, PAGE_SIZE, "%u\n",
drivers/target/target_core_stat.c
268
return snprintf(page, PAGE_SIZE, "%s\n",
drivers/target/target_core_stat.c
276
return snprintf(page, PAGE_SIZE, "exposed\n");
drivers/target/target_core_stat.c
293
return snprintf(page, PAGE_SIZE, "%llu\n", sum >> shift); \
drivers/target/target_core_stat.c
318
return snprintf(page, PAGE_SIZE, "%lu\n",
drivers/target/target_core_stat.c
326
return snprintf(page, PAGE_SIZE, "%u\n", 0);
drivers/target/target_core_stat.c
333
return snprintf(page, PAGE_SIZE, "%u\n", 0);
drivers/target/target_core_stat.c
342
return snprintf(page, PAGE_SIZE, "%u\n", (u32)(((u32)dev->creation_time -
drivers/target/target_core_stat.c
434
ret = snprintf(page, PAGE_SIZE, "%u\n", dev->hba_index);
drivers/target/target_core_stat.c
448
ret = snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index);
drivers/target/target_core_stat.c
462
ret = snprintf(page, PAGE_SIZE, "%u\n", lun->lun_tpg->tpg_rtpi);
drivers/target/target_core_stat.c
476
ret = snprintf(page, PAGE_SIZE, "%s%u\n", "Device", dev->dev_index);
drivers/target/target_core_stat.c
492
ret = snprintf(page, PAGE_SIZE, "%u\n", 0);
drivers/target/target_core_stat.c
52
return snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index);
drivers/target/target_core_stat.c
538
ret = snprintf(page, PAGE_SIZE, "%u\n", dev->hba_index);
drivers/target/target_core_stat.c
553
ret = snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index);
drivers/target/target_core_stat.c
568
ret = snprintf(page, PAGE_SIZE, "%u\n", lun->lun_tpg->tpg_rtpi);
drivers/target/target_core_stat.c
57
return snprintf(page, PAGE_SIZE, "%u\n", to_stat_dev(item)->dev_index);
drivers/target/target_core_stat.c
584
ret = snprintf(page, PAGE_SIZE, "%sPort#%u\n",
drivers/target/target_core_stat.c
602
ret = snprintf(page, PAGE_SIZE, "%s%s%d\n",
drivers/target/target_core_stat.c
62
return snprintf(page, PAGE_SIZE, "Target\n");
drivers/target/target_core_stat.c
645
ret = snprintf(page, PAGE_SIZE, "%u\n", 0);
drivers/target/target_core_stat.c
67
return snprintf(page, PAGE_SIZE, "%u\n", to_stat_dev(item)->export_count);
drivers/target/target_core_stat.c
699
ret = snprintf(page, PAGE_SIZE, "%u\n", dev->hba_index);
drivers/target/target_core_stat.c
716
ret = snprintf(page, PAGE_SIZE, "scsiTransport%s\n",
drivers/target/target_core_stat.c
734
ret = snprintf(page, PAGE_SIZE, "%u\n",
drivers/target/target_core_stat.c
754
ret = snprintf(page, PAGE_SIZE, "%s+%s\n",
drivers/target/target_core_stat.c
774
ret = snprintf(page, PAGE_SIZE, "%u\n", tpg->proto_id);
drivers/target/target_core_stat.c
849
ret = snprintf(page, PAGE_SIZE, "%u\n",
drivers/target/target_core_stat.c
871
ret = snprintf(page, PAGE_SIZE, "%u\n", deve->se_lun->lun_index);
drivers/target/target_core_stat.c
893
ret = snprintf(page, PAGE_SIZE, "%u\n", tpg->se_tpg_tfo->tpg_get_tag(tpg));
drivers/target/target_core_stat.c
913
ret = snprintf(page, PAGE_SIZE, "%u\n", nacl->acl_index);
drivers/target/target_core_stat.c
933
ret = snprintf(page, PAGE_SIZE, "%u\n", 1);
drivers/target/target_core_stat.c
953
ret = snprintf(page, PAGE_SIZE, "%s\n", nacl->initiatorname);
drivers/target/target_core_stat.c
973
ret = snprintf(page, PAGE_SIZE, "%u\n", 0);
drivers/target/target_core_stat.c
993
ret = snprintf(page, PAGE_SIZE, "%u\n", deve->attach_count);
drivers/target/target_core_transport.c
1337
mtl = (cmd->se_tfo->max_data_sg_nents * PAGE_SIZE);
drivers/target/target_core_transport.c
527
if (strlen(se_sess->se_node_acl->initiatorname) + 1 + len > PAGE_SIZE)
drivers/target/target_core_transport.c
530
len += snprintf(page + len, PAGE_SIZE - len, "%s\n",
drivers/target/target_core_user.c
1910
dpi = (offset - udev->data_off) / PAGE_SIZE;
drivers/target/target_core_user.c
2231
udev->data_blk_size = udev->data_pages_per_blk * PAGE_SIZE;
drivers/target/target_core_user.c
2244
WARN_ON(data_size % PAGE_SIZE);
drivers/target/target_core_user.c
2679
return snprintf(page, PAGE_SIZE, "%lu\n", udev->cmd_time_out / MSEC_PER_SEC);
drivers/target/target_core_user.c
2712
return snprintf(page, PAGE_SIZE, "%ld\n", udev->qfull_time_out <= 0 ?
drivers/target/target_core_user.c
2748
return snprintf(page, PAGE_SIZE, "%u\n", udev->data_area_mb);
drivers/target/target_core_user.c
2759
return snprintf(page, PAGE_SIZE, "%u\n", udev->data_pages_per_blk);
drivers/target/target_core_user.c
2769
return snprintf(page, PAGE_SIZE, "%u\n",
drivers/target/target_core_user.c
2780
return snprintf(page, PAGE_SIZE, "%s\n", udev->dev_config);
drivers/target/target_core_user.c
2842
return snprintf(page, PAGE_SIZE, "%llu\n", udev->dev_size);
drivers/target/target_core_user.c
2898
return snprintf(page, PAGE_SIZE, "%d\n", udev->nl_reply_supported);
drivers/target/target_core_user.c
2925
return snprintf(page, PAGE_SIZE, "%i\n", da->emulate_write_cache);
drivers/target/target_core_user.c
2980
return snprintf(page, PAGE_SIZE, "%i\n",
drivers/target/target_core_user.c
3015
return snprintf(page, PAGE_SIZE, "%s\n", "blocked");
drivers/target/target_core_user.c
3017
return snprintf(page, PAGE_SIZE, "%s\n", "unblocked");
drivers/target/target_core_user.c
513
int page_cnt = DIV_ROUND_UP(length, PAGE_SIZE);
drivers/target/target_core_user.c
663
size = round_up(size+offset, PAGE_SIZE);
drivers/target/target_core_user.c
667
start += PAGE_SIZE;
drivers/target/target_core_user.c
668
size -= PAGE_SIZE;
drivers/target/target_core_user.c
729
page_cnt = DIV_ROUND_UP(data_len, PAGE_SIZE);
drivers/target/target_core_user.c
741
page_remaining = PAGE_SIZE;
drivers/target/target_core_user.c
755
PAGE_SIZE - page_remaining;
drivers/target/tcm_fc/tfc_conf.c
105
len = ft_format_wwn(buf, PAGE_SIZE - 2, *wwn);
drivers/target/tcm_fc/tfc_conf.c
170
return snprintf(page, PAGE_SIZE, "%s", acl_to_nacl(item)->acl_tag);
drivers/target/tcm_fc/tfc_io.c
144
tlen = min(tlen, (size_t)(PAGE_SIZE -
drivers/target/tcm_fc/tfc_io.c
303
tlen = min(tlen, (size_t)(PAGE_SIZE -
drivers/tee/amdtee/call.c
302
if (!start[i].kaddr || (start[i].size & (PAGE_SIZE - 1))) {
drivers/tee/amdtee/call.c
307
if ((u64)start[i].kaddr & (PAGE_SIZE - 1)) {
drivers/tee/amdtee/call.c
410
if (blob & (PAGE_SIZE - 1)) {
drivers/tee/amdtee/core.c
206
*ta_size = roundup(fw->size, PAGE_SIZE);
drivers/tee/amdtee/shm_pool.c
28
shm->size = PAGE_SIZE << order;
drivers/tee/optee/call.c
21
#define SHM_ENTRY_SIZE PAGE_SIZE
drivers/tee/optee/call.c
638
rc = __check_mem_type(mm, start, start + num_pages * PAGE_SIZE);
drivers/tee/optee/ffa_abi.c
294
num_pages * PAGE_SIZE, GFP_KERNEL);
drivers/tee/optee/protmem.c
137
size_t sz = ALIGN(size, PAGE_SIZE);
drivers/tee/optee/protmem.c
325
rp->page_count = min_size / PAGE_SIZE;
drivers/tee/optee/smc_abi.c
1445
begin = roundup(res.result.start, PAGE_SIZE);
drivers/tee/optee/smc_abi.c
1446
end = rounddown(res.result.start + res.result.size, PAGE_SIZE);
drivers/tee/optee/smc_abi.c
422
BUILD_BUG_ON(PAGE_SIZE < OPTEE_MSG_NONCONTIG_PAGE_SIZE);
drivers/tee/tee_heap.c
464
const size_t page_mask = PAGE_SIZE - 1;
drivers/tee/tee_shm.c
150
shm = shm_alloc_helper(ctx, size, PAGE_SIZE, flags, id);
drivers/tee/tee_shm.c
185
return shm_alloc_helper(ctx, size, PAGE_SIZE, flags, -1);
drivers/tee/tee_shm.c
295
page = dma_alloc_pages(&teedev->dev, page_count * PAGE_SIZE,
drivers/tee/tee_shm.c
309
dma_mem->shm.size = page_count * PAGE_SIZE;
drivers/tee/tee_shm.c
317
dma_free_pages(&teedev->dev, page_count * PAGE_SIZE, page, dma_addr,
drivers/tee/tee_shm.c
341
size_t nr_pages = roundup(size, PAGE_SIZE) / PAGE_SIZE;
drivers/tee/tee_shm.c
350
shm->kaddr = alloc_pages_exact(nr_pages * PAGE_SIZE,
drivers/tee/tee_shm.c
356
shm->size = nr_pages * PAGE_SIZE;
drivers/tee/tee_shm.c
365
pages[i] = virt_to_page((u8 *)shm->kaddr + i * PAGE_SIZE);
drivers/tee/tee_shm.c
434
start = rounddown(addr, PAGE_SIZE);
drivers/tee/tee_shm.c
452
} else if (DIV_ROUND_UP(len + off, PAGE_SIZE) != num_pages) {
drivers/tee/tee_shm.c
457
shm->num_pages = len / PAGE_SIZE;
drivers/tee/tee_shm_pool.c
60
const size_t page_mask = PAGE_SIZE - 1;
drivers/tee/tstee/core.c
265
num_pages * PAGE_SIZE, GFP_KERNEL);
drivers/thermal/thermal_sysfs.c
720
len += snprintf(buf + len, PAGE_SIZE - len, " From : To\n");
drivers/thermal/thermal_sysfs.c
721
len += snprintf(buf + len, PAGE_SIZE - len, " : ");
drivers/thermal/thermal_sysfs.c
723
if (len >= PAGE_SIZE)
drivers/thermal/thermal_sysfs.c
725
len += snprintf(buf + len, PAGE_SIZE - len, "state%2u ", i);
drivers/thermal/thermal_sysfs.c
727
if (len >= PAGE_SIZE)
drivers/thermal/thermal_sysfs.c
728
return PAGE_SIZE;
drivers/thermal/thermal_sysfs.c
730
len += snprintf(buf + len, PAGE_SIZE - len, "\n");
drivers/thermal/thermal_sysfs.c
733
if (len >= PAGE_SIZE)
drivers/thermal/thermal_sysfs.c
736
len += snprintf(buf + len, PAGE_SIZE - len, "state%2u:", i);
drivers/thermal/thermal_sysfs.c
739
if (len >= PAGE_SIZE)
drivers/thermal/thermal_sysfs.c
741
len += snprintf(buf + len, PAGE_SIZE - len, "%8u ",
drivers/thermal/thermal_sysfs.c
744
if (len >= PAGE_SIZE)
drivers/thermal/thermal_sysfs.c
746
len += snprintf(buf + len, PAGE_SIZE - len, "\n");
drivers/thermal/thermal_sysfs.c
749
if (len >= PAGE_SIZE) {
drivers/thunderbolt/debugfs.c
143
nbytes = min_t(size_t, *count, PAGE_SIZE);
drivers/thunderbolt/xdomain.c
929
get_modalias(svc, buf, PAGE_SIZE - 2);
drivers/tty/goldfish.c
89
unsigned long pg_end = (addr & PAGE_MASK) + PAGE_SIZE;
drivers/tty/hvc/hvc_iucv.c
33
#define MEMPOOL_MIN_NR (PAGE_SIZE / sizeof(struct iucv_tty_buffer)/4)
drivers/tty/hvc/hvc_iucv.c
71
#define SNDBUF_SIZE (PAGE_SIZE) /* must be < MSG_MAX_DATALEN */
drivers/tty/hvc/hvcs.c
451
return snprintf(buf, PAGE_SIZE, "%d\n", hvcs_rescan_status);
drivers/tty/serdev/core.c
33
len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
drivers/tty/serdev/core.c
37
return of_device_modalias(dev, buf, PAGE_SIZE);
drivers/tty/serial/8250/8250_dma.c
286
dma->rx_size = PAGE_SIZE;
drivers/tty/serial/amba-pl011.c
376
#define PL011_DMA_BUFFER_SIZE PAGE_SIZE
drivers/tty/serial/imx.c
2406
#define RX_DMA_PERIOD_LEN (PAGE_SIZE / 4)
drivers/tty/serial/pch_uart.c
1684
priv->rxbuf.size = PAGE_SIZE;
drivers/tty/serial/samsung_tty.c
1104
dma->rx_size = PAGE_SIZE;
drivers/tty/serial/serial_core.c
263
PAGE_SIZE);
drivers/tty/serial/sunhv.c
125
long stat = sun4v_con_read(ra, PAGE_SIZE, &bytes_read);
drivers/tty/serial/sunhv.c
421
int left = PAGE_SIZE;
drivers/tty/serial/sunhv.c
540
con_write_page = kzalloc(PAGE_SIZE, GFP_KERNEL);
drivers/tty/serial/sunhv.c
544
con_read_page = kzalloc(PAGE_SIZE, GFP_KERNEL);
drivers/tty/tty_buffer.c
41
#define TTY_BUFFER_PAGE (((PAGE_SIZE - sizeof(struct tty_buffer)) / 2) & ~TTYB_ALIGN_MASK)
drivers/tty/tty_port.c
233
kfifo_init(&port->xmit_fifo, port->xmit_buf, PAGE_SIZE);
drivers/tty/vcc.c
488
rv = scnprintf(buf, PAGE_SIZE, "%s\n", port->domain);
drivers/tty/vt/vc_screen.c
54
#define CON_BUF_SIZE (IS_ENABLED(CONFIG_BASE_SMALL) ? 256 : PAGE_SIZE)
drivers/uio/uio.c
847
+ idev->info->mem[mi].size + PAGE_SIZE -1) >> PAGE_SHIFT;
drivers/uio/uio_dfl.c
34
+ PAGE_SIZE - 1) & PAGE_MASK;
drivers/uio/uio_fsl_elbc_gpcm.c
89
return scnprintf(buf, PAGE_SIZE, "0x%08x\n",
drivers/uio/uio_fsl_elbc_gpcm.c
93
return scnprintf(buf, PAGE_SIZE, "0x%08x\n",
drivers/uio/uio_mf624.c
127
mem->size = ((start & ~PAGE_MASK) + len + PAGE_SIZE - 1) & PAGE_MASK;
drivers/uio/uio_pci_generic.c
125
(uiomem->offs + resource_size(r) + PAGE_SIZE - 1) &
drivers/uio/uio_pci_generic_sva.c
124
(uiomem->offs + resource_size(r) + PAGE_SIZE - 1) &
drivers/uio/uio_pdrv_genirq.c
208
+ PAGE_SIZE - 1) & PAGE_MASK;
drivers/usb/atm/cxacru.c
1191
instance->rcv_buf, PAGE_SIZE,
drivers/usb/atm/cxacru.c
1196
instance->snd_buf, PAGE_SIZE,
drivers/usb/atm/cxacru.c
1201
instance->rcv_buf, PAGE_SIZE,
drivers/usb/atm/cxacru.c
1206
instance->snd_buf, PAGE_SIZE,
drivers/usb/atm/cxacru.c
244
return snprintf(buf, PAGE_SIZE, "%u.%02u\n",
drivers/usb/atm/cxacru.c
248
return snprintf(buf, PAGE_SIZE, "-%u.%02u\n",
drivers/usb/atm/cxacru.c
619
if (wbuflen > PAGE_SIZE || rbuflen > PAGE_SIZE) {
drivers/usb/atm/cxacru.c
964
if ((offb >= PAGE_SIZE) || (offd >= size)) {
drivers/usb/atm/speedtch.c
269
for (offset = 0; offset < fw1->size; offset += PAGE_SIZE) {
drivers/usb/atm/speedtch.c
270
int thislen = min_t(int, PAGE_SIZE, fw1->size - offset);
drivers/usb/atm/speedtch.c
296
for (offset = 0; offset < fw2->size; offset += PAGE_SIZE) {
drivers/usb/atm/speedtch.c
297
int thislen = min_t(int, PAGE_SIZE, fw2->size - offset);
drivers/usb/chipidea/otg_fsm.c
128
size = PAGE_SIZE;
drivers/usb/chipidea/otg_fsm.c
133
return PAGE_SIZE - size;
drivers/usb/chipidea/otg_fsm.c
39
size = PAGE_SIZE;
drivers/usb/chipidea/otg_fsm.c
44
return PAGE_SIZE - size;
drivers/usb/chipidea/otg_fsm.c
88
size = PAGE_SIZE;
drivers/usb/chipidea/otg_fsm.c
93
return PAGE_SIZE - size;
drivers/usb/chipidea/udc.c
444
if (hwreq->req.dma % PAGE_SIZE)
drivers/usb/chipidea/udc.c
521
if (sg_dma_address(s) % PAGE_SIZE) {
drivers/usb/common/ulpi.c
109
len = of_device_modalias(dev, buf, PAGE_SIZE);
drivers/usb/core/buffer.c
184
size, dma, PAGE_SIZE);
drivers/usb/core/devices.c
469
data_end = usb_dump_desc(data_end, pages_start + (2 * PAGE_SIZE) - 256,
drivers/usb/core/devices.c
472
if (data_end > (pages_start + (2 * PAGE_SIZE) - 256))
drivers/usb/core/devio.c
1185
if (wLength > PAGE_SIZE)
drivers/usb/core/devio.c
1187
ret = usbfs_increase_memory_usage(PAGE_SIZE + sizeof(struct urb) +
drivers/usb/core/devio.c
1269
usbfs_decrease_memory_usage(PAGE_SIZE + sizeof(struct urb) +
drivers/usb/early/xhci-dbc.c
1004
memblock_phys_free(xdbc.table_dma, PAGE_SIZE);
drivers/usb/early/xhci-dbc.c
1005
memblock_phys_free(xdbc.out_dma, PAGE_SIZE);
drivers/usb/early/xhci-dbc.c
195
memblock_phys_free(seg->dma, PAGE_SIZE);
drivers/usb/early/xhci-dbc.c
204
memset(seg->trbs, 0, PAGE_SIZE);
drivers/usb/early/xhci-dbc.c
241
memset(xdbc.table_base, 0, PAGE_SIZE);
drivers/usb/early/xhci-dbc.c
242
memset(xdbc.out_buf, 0, PAGE_SIZE);
drivers/usb/early/xhci-dbc.c
677
memblock_phys_free(xdbc.table_dma, PAGE_SIZE);
drivers/usb/early/xhci-dbc.c
680
memblock_phys_free(xdbc.out_dma, PAGE_SIZE);
drivers/usb/early/xhci-dbc.c
97
virt = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
drivers/usb/fotg210/fotg210-hcd.c
747
buf->alloc_size = PAGE_SIZE;
drivers/usb/fotg210/fotg210-hcd.c
826
buf->alloc_size = (sizeof(void *) == 4 ? 6 : 8)*PAGE_SIZE;
drivers/usb/gadget/configfs.c
1181
UTF16_LITTLE_ENDIAN, page, PAGE_SIZE - 1);
drivers/usb/gadget/function/f_fs.c
1065
io_data->use_sg = gadget->sg_supported && data_len > PAGE_SIZE;
drivers/usb/gadget/function/f_fs.c
1928
sb->s_blocksize = PAGE_SIZE;
drivers/usb/gadget/function/f_fs.c
823
for (i = 0, ptr = vaddr; i < n_pages; ++i, ptr += PAGE_SIZE)
drivers/usb/gadget/function/f_hid.c
1403
if (len > PAGE_SIZE) {
drivers/usb/gadget/function/f_midi.c
1193
result = strscpy(page, opts->name, PAGE_SIZE); \
drivers/usb/gadget/function/f_midi2.c
2187
result = scnprintf(page, PAGE_SIZE, "%s\n", str);
drivers/usb/gadget/function/f_phonet.c
30
#if (PAGE_SIZE % MAXPACKET)
drivers/usb/gadget/function/f_phonet.c
305
req->length = PAGE_SIZE;
drivers/usb/gadget/function/f_phonet.c
344
skb->len <= 1, req->actual, PAGE_SIZE);
drivers/usb/gadget/function/f_phonet.c
594
return gether_get_ifname(to_f_phonet_opts(item)->net, page, PAGE_SIZE);
drivers/usb/gadget/function/f_printer.c
1237
result = strscpy(page, opts->pnp_string, PAGE_SIZE);
drivers/usb/gadget/function/f_printer.c
1239
result = PAGE_SIZE;
drivers/usb/gadget/function/f_printer.c
1240
} else if (page[result - 1] != '\n' && result + 1 < PAGE_SIZE) {
drivers/usb/gadget/function/storage_common.c
341
p = file_path(curlun->filp, buf, PAGE_SIZE - 1);
drivers/usb/gadget/function/u_audio.c
26
#define BUFF_SIZE_MAX (PAGE_SIZE * 16)
drivers/usb/gadget/function/u_audio.c
27
#define PRD_SIZE_MAX PAGE_SIZE
drivers/usb/gadget/function/u_ether_configfs.h
145
ret = gether_get_ifname(opts->net, page, PAGE_SIZE); \
drivers/usb/gadget/function/u_ether_configfs.h
36
result = gether_get_dev_addr(opts->net, page, PAGE_SIZE); \
drivers/usb/gadget/function/u_ether_configfs.h
71
result = gether_get_host_addr(opts->net, page, PAGE_SIZE); \
drivers/usb/gadget/function/uvc_video.c
589
PAGE_SIZE) + 2, GFP_KERNEL);
drivers/usb/gadget/legacy/inode.c
1818
(len > PAGE_SIZE * 4))
drivers/usb/gadget/legacy/inode.c
2035
sb->s_blocksize = PAGE_SIZE;
drivers/usb/gadget/udc/core.c
1826
rc = scnprintf(buf, PAGE_SIZE, "%s\n", drv->function);
drivers/usb/gadget/udc/core.c
1837
return scnprintf(buf, PAGE_SIZE, "%s\n", \
drivers/usb/gadget/udc/core.c
1852
return scnprintf(buf, PAGE_SIZE, "%d\n", gadget->name); \
drivers/usb/gadget/udc/dummy_hcd.c
2481
temp = show_urb(buf, PAGE_SIZE - size, urbp->urb);
drivers/usb/gadget/udc/dummy_hcd.c
980
return scnprintf(buf, PAGE_SIZE, "%s\n", dum->driver->function);
drivers/usb/gadget/udc/gr_udc.c
1987
buf = devm_kzalloc(dev->dev, PAGE_SIZE, GFP_DMA | GFP_ATOMIC);
drivers/usb/gadget/udc/net2280.c
1644
strlen(dev->driver->function) > PAGE_SIZE)
drivers/usb/gadget/udc/net2280.c
1646
return scnprintf(buf, PAGE_SIZE, "%s\n", dev->driver->function);
drivers/usb/gadget/udc/net2280.c
1663
size = PAGE_SIZE;
drivers/usb/gadget/udc/net2280.c
1797
return PAGE_SIZE - size;
drivers/usb/gadget/udc/net2280.c
1812
size = PAGE_SIZE;
drivers/usb/gadget/udc/net2280.c
1889
return PAGE_SIZE - size;
drivers/usb/host/ehci-dbg.c
1013
buf->alloc_size = (sizeof(void *) == 4 ? 6 : 8) * PAGE_SIZE;
drivers/usb/host/ehci-dbg.c
926
buf->alloc_size = PAGE_SIZE;
drivers/usb/host/ohci-dbg.c
472
size = PAGE_SIZE;
drivers/usb/host/ohci-dbg.c
502
size = PAGE_SIZE;
drivers/usb/host/ohci-dbg.c
577
return PAGE_SIZE - size;
drivers/usb/host/ohci-dbg.c
595
size = PAGE_SIZE;
drivers/usb/host/ohci-dbg.c
662
return PAGE_SIZE - size;
drivers/usb/host/r8a66597-hcd.c
822
ptr += PAGE_SIZE)
drivers/usb/host/xen-hcd.c
1121
XEN_FRONT_RING_INIT(&info->urb_ring, urb_sring, PAGE_SIZE);
drivers/usb/host/xen-hcd.c
1129
XEN_FRONT_RING_INIT(&info->conn_ring, conn_sring, PAGE_SIZE);
drivers/usb/host/xen-hcd.c
599
bytes = PAGE_SIZE - offset;
drivers/usb/isp1760/isp1760-hcd.c
965
ptr += PAGE_SIZE)
drivers/usb/mon/mon_bin.c
64
#define CHUNK_SIZE PAGE_SIZE
drivers/usb/mon/mon_text.c
40
#define EVENT_MAX (4*PAGE_SIZE / sizeof(struct mon_event_text))
drivers/usb/serial/sierra.c
41
#define MAX_TRANSFER (PAGE_SIZE - 512)
drivers/usb/serial/usb-serial.c
881
if (kfifo_alloc(&port->write_fifo, PAGE_SIZE, GFP_KERNEL))
drivers/usb/storage/scsiglue.c
105
max_sectors = PAGE_SIZE >> 9;
drivers/usb/storage/uas.c
930
.dma_boundary = PAGE_SIZE - 1,
drivers/usb/storage/usb.c
158
return format_delay_ms(delay_ms, 3, "ms", s, PAGE_SIZE);
drivers/usb/usb-skeleton.c
37
#define MAX_TRANSFER (PAGE_SIZE - 512)
drivers/vdpa/ifcvf/ifcvf_base.h
27
#define IFCVF_QUEUE_ALIGNMENT PAGE_SIZE
drivers/vdpa/ifcvf/ifcvf_main.c
621
area.size = PAGE_SIZE;
drivers/vdpa/mlx5/core/resources.c
280
res->kick_addr = ioremap(kick_addr, PAGE_SIZE);
drivers/vdpa/mlx5/net/mlx5_vnet.c
2649
return PAGE_SIZE;
drivers/vdpa/mlx5/net/mlx5_vnet.c
3484
ret.size = PAGE_SIZE;
drivers/vdpa/octeon_ep/octep_vdpa_main.c
317
return PAGE_SIZE;
drivers/vdpa/octeon_ep/octep_vdpa_main.c
356
area.size = PAGE_SIZE;
drivers/vdpa/solidrun/snet_main.c
21
#define SNET_QUEUE_ALIGNMENT PAGE_SIZE
drivers/vdpa/vdpa_sim/vdpa_sim.c
43
#define VDPASIM_QUEUE_ALIGN PAGE_SIZE
drivers/vdpa/vdpa_sim/vdpa_sim_net.c
230
net->buffer, PAGE_SIZE);
drivers/vdpa/vdpa_sim/vdpa_sim_net.c
499
net->buffer = kvmalloc(PAGE_SIZE, GFP_KERNEL);
drivers/vdpa/vdpa_user/iova_domain.c
169
sz = min_t(size_t, PAGE_SIZE - offset, size);
drivers/vdpa/vdpa_user/iova_domain.c
222
u64 last = start + PAGE_SIZE - 1;
drivers/vdpa/vdpa_user/iova_domain.c
285
int inner_pages = PAGE_SIZE / BOUNCE_MAP_SIZE;
drivers/vdpa/vdpa_user/iova_domain.c
316
PAGE_SIZE);
drivers/vdpa/vdpa_user/iova_domain.c
330
int inner_pages = PAGE_SIZE / BOUNCE_MAP_SIZE;
drivers/vdpa/vdpa_user/iova_domain.c
360
head_page, 0, PAGE_SIZE);
drivers/vdpa/vdpa_user/iova_domain.c
658
PAGE_SIZE, bounce_pfns);
drivers/vdpa/vdpa_user/vduse_dev.c
1961
if (config->vq_align > PAGE_SIZE)
drivers/vdpa/vdpa_user/vduse_dev.c
1964
if (config->config_size > PAGE_SIZE)
drivers/vdpa/virtio_pci/vp_vdpa.c
401
return PAGE_SIZE;
drivers/vfio/fsl-mc/vfio_fsl_mc.c
364
if (region.size < PAGE_SIZE || base + size > region.size)
drivers/vfio/group.c
305
buf = strndup_user(arg, PAGE_SIZE);
drivers/vfio/pci/mlx5/cmd.c
1415
unsigned int npages = DIV_ROUND_UP_ULL(rq_size, PAGE_SIZE);
drivers/vfio/pci/mlx5/cmd.c
1630
if (WARN_ON(buf_offset + size >= qp->recv_buf.npages * PAGE_SIZE ||
drivers/vfio/pci/mlx5/cmd.c
1635
page_index = buf_offset / PAGE_SIZE;
drivers/vfio/pci/mlx5/cmd.c
1636
page_offset = buf_offset % PAGE_SIZE;
drivers/vfio/pci/mlx5/cmd.c
1637
nent_in_page = (PAGE_SIZE - page_offset) / entry_size;
drivers/vfio/pci/mlx5/cmd.c
343
MLX5_SET64(mkc, mkc, len, npages * PAGE_SIZE);
drivers/vfio/pci/mlx5/cmd.c
366
dma_iova_destroy(mdev->device, state, npages * PAGE_SIZE, dir,
drivers/vfio/pci/mlx5/cmd.c
373
dma_unmap_page(mdev->device, addr, PAGE_SIZE, dir);
drivers/vfio/pci/mlx5/cmd.c
390
if (dma_iova_try_alloc(mdev->device, state, 0, npages * PAGE_SIZE)) {
drivers/vfio/pci/mlx5/cmd.c
395
PAGE_SIZE, dir, 0);
drivers/vfio/pci/mlx5/cmd.c
399
addr += PAGE_SIZE;
drivers/vfio/pci/mlx5/cmd.c
400
mapped += PAGE_SIZE;
drivers/vfio/pci/mlx5/cmd.c
408
PAGE_SIZE, dir);
drivers/vfio/pci/mlx5/cmd.c
774
MLX5_SET(save_vhca_state_in, in, size, buf->npages * PAGE_SIZE);
drivers/vfio/pci/mlx5/cmd.c
799
PAGE_SIZE),
drivers/vfio/pci/mlx5/cmd.h
229
int page_entry = offset / PAGE_SIZE;
drivers/vfio/pci/mlx5/main.c
1024
PAGE_SIZE),
drivers/vfio/pci/mlx5/main.c
1136
DIV_ROUND_UP(size, PAGE_SIZE), DMA_FROM_DEVICE);
drivers/vfio/pci/mlx5/main.c
140
page_offset = offset % PAGE_SIZE;
drivers/vfio/pci/mlx5/main.c
145
page_len = min_t(size_t, copy_len, PAGE_SIZE - page_offset);
drivers/vfio/pci/mlx5/main.c
280
u32 npages = DIV_ROUND_UP(required_length, PAGE_SIZE);
drivers/vfio/pci/mlx5/main.c
345
header_buf = mlx5vf_get_data_buffer(migf, DIV_ROUND_UP(size, PAGE_SIZE),
drivers/vfio/pci/mlx5/main.c
361
data.stop_copy_size = cpu_to_le64(migf->buf[0]->npages * PAGE_SIZE);
drivers/vfio/pci/mlx5/main.c
406
(BIT_ULL(__mlx5_bit_sz(save_vhca_state_in, size)) - PAGE_SIZE));
drivers/vfio/pci/mlx5/main.c
411
migf, DIV_ROUND_UP(inc_state_size, PAGE_SIZE),
drivers/vfio/pci/mlx5/main.c
422
PAGE_SIZE),
drivers/vfio/pci/mlx5/main.c
531
buf = mlx5vf_get_data_buffer(migf, DIV_ROUND_UP(inc_length, PAGE_SIZE),
drivers/vfio/pci/mlx5/main.c
691
page_offset = offset % PAGE_SIZE;
drivers/vfio/pci/mlx5/main.c
696
page_len = min_t(size_t, *len, PAGE_SIZE - page_offset);
drivers/vfio/pci/mlx5/main.c
897
u32 npages = DIV_ROUND_UP(migf->record_size, PAGE_SIZE);
drivers/vfio/pci/mlx5/main.c
927
u32 npages = DIV_ROUND_UP(size, PAGE_SIZE);
drivers/vfio/pci/nvgrace-gpu/main.c
312
addr = ALIGN_DOWN(vmf->address, PAGE_SIZE << order);
drivers/vfio/pci/pds/dirty.c
152
max_sge = DIV_ROUND_UP(page_count, PAGE_SIZE * 8);
drivers/vfio/pci/pds/dirty.c
398
npages = DIV_ROUND_UP_ULL(bmp_bytes + page_offset, PAGE_SIZE);
drivers/vfio/pci/pds/dirty.c
412
bmp += PAGE_SIZE;
drivers/vfio/pci/pds/lm.c
154
return sg_page(sg) + (offset - cur_offset) / PAGE_SIZE;
drivers/vfio/pci/pds/lm.c
206
page_offset = (*pos) % PAGE_SIZE;
drivers/vfio/pci/pds/lm.c
214
page_len = min_t(size_t, len, PAGE_SIZE - page_offset);
drivers/vfio/pci/pds/lm.c
304
page_offset = (*pos) % PAGE_SIZE;
drivers/vfio/pci/pds/lm.c
312
page_len = min_t(size_t, len, PAGE_SIZE - page_offset);
drivers/vfio/pci/pds/lm.c
44
npages = DIV_ROUND_UP_ULL(size, PAGE_SIZE);
drivers/vfio/pci/pds/lm.c
49
page_mem = kvzalloc(ALIGN(size, PAGE_SIZE), GFP_KERNEL);
drivers/vfio/pci/pds/lm.c
62
p += PAGE_SIZE;
drivers/vfio/pci/pds/lm.c
74
lm_file->alloc_size = npages * PAGE_SIZE;
drivers/vfio/pci/vfio_pci_core.c
167
if (resource_size(res) >= PAGE_SIZE) {
drivers/vfio/pci/vfio_pci_core.c
1696
unsigned long addr = vmf->address & ~((PAGE_SIZE << order) - 1);
drivers/vfio/pci/vfio_pci_core.c
184
dummy_res->resource.end = res->start + PAGE_SIZE - 1;
drivers/vfio/pci/virtio/migrate.c
1008
PAGE_SIZE));
drivers/vfio/pci/virtio/migrate.c
132
DIV_ROUND_UP_ULL(length, PAGE_SIZE));
drivers/vfio/pci/virtio/migrate.c
353
page_offset = offset % PAGE_SIZE;
drivers/vfio/pci/virtio/migrate.c
358
page_len = min_t(size_t, copy_len, PAGE_SIZE - page_offset);
drivers/vfio/pci/virtio/migrate.c
56
return sg_page(sg) + (offset - cur_offset) / PAGE_SIZE;
drivers/vfio/pci/virtio/migrate.c
73
to_fill = min_t(unsigned int, npages, PAGE_SIZE / sizeof(*page_list));
drivers/vfio/pci/virtio/migrate.c
786
page_offset = offset % PAGE_SIZE;
drivers/vfio/pci/virtio/migrate.c
792
page_len = min_t(size_t, *len, PAGE_SIZE - page_offset);
drivers/vfio/pci/virtio/migrate.c
92
buf->allocated_length += filled * PAGE_SIZE;
drivers/vfio/pci/virtio/migrate.c
96
PAGE_SIZE / sizeof(*page_list));
drivers/vfio/platform/vfio_platform_common.c
558
if (region.size < PAGE_SIZE || req_start + req_len > region.size)
drivers/vfio/vfio_iommu_type1.c
1010
iova = user_iova + PAGE_SIZE * j;
drivers/vfio/vfio_iommu_type1.c
1011
dma = vfio_find_dma(iommu, iova, PAGE_SIZE);
drivers/vfio/vfio_iommu_type1.c
1036
if (WARN_ON(check_mul_overflow(npage, PAGE_SIZE, &iova_size) ||
drivers/vfio/vfio_iommu_type1.c
1044
dma_addr_t iova = user_iova + PAGE_SIZE * i;
drivers/vfio/vfio_iommu_type1.c
1047
dma = vfio_find_dma(iommu, iova, PAGE_SIZE);
drivers/vfio/vfio_iommu_type1.c
1187
pos += PAGE_SIZE;
drivers/vfio/vfio_iommu_type1.c
1196
for (len = PAGE_SIZE; pos + len < dma->size; len += PAGE_SIZE) {
drivers/vfio/vfio_iommu_type1.c
1267
iommu->pgsize_bitmap |= PAGE_SIZE;
drivers/vfio/vfio_iommu_type1.c
1845
pos += PAGE_SIZE;
drivers/vfio/vfio_iommu_type1.c
1849
size = PAGE_SIZE;
drivers/vfio/vfio_iommu_type1.c
1854
size += PAGE_SIZE;
drivers/vfio/vfio_iommu_type1.c
1855
p += PAGE_SIZE;
drivers/vfio/vfio_iommu_type1.c
1856
i += PAGE_SIZE;
drivers/vfio/vfio_iommu_type1.c
1924
pos += PAGE_SIZE;
drivers/vfio/vfio_iommu_type1.c
1928
size = PAGE_SIZE;
drivers/vfio/vfio_iommu_type1.c
1933
size += PAGE_SIZE;
drivers/vfio/vfio_iommu_type1.c
1934
p += PAGE_SIZE;
drivers/vfio/vfio_iommu_type1.c
1935
i += PAGE_SIZE;
drivers/vfio/vfio_iommu_type1.c
492
#define VFIO_BATCH_MAX_CAPACITY (PAGE_SIZE / sizeof(struct page *))
drivers/vfio/vfio_iommu_type1.c
729
vaddr += (PAGE_SIZE * ret);
drivers/vfio/vfio_iommu_type1.c
730
iova += (PAGE_SIZE * ret);
drivers/vfio/vfio_iommu_type1.c
781
vaddr += PAGE_SIZE * nr_pages;
drivers/vfio/vfio_iommu_type1.c
782
iova += PAGE_SIZE * nr_pages;
drivers/vfio/vfio_iommu_type1.c
916
if (check_mul_overflow(npage, PAGE_SIZE, &iova_size) ||
drivers/vfio/vfio_iommu_type1.c
946
iova = user_iova + PAGE_SIZE * i;
drivers/vfio/vfio_iommu_type1.c
947
dma = vfio_find_dma(iommu, iova, PAGE_SIZE);
drivers/vfio/vfio_main.c
1047
(PAGE_SIZE / sizeof(struct vfio_device_feature_dma_logging_range))
drivers/vfio/vfio_main.c
1683
device->iommufd_access, ALIGN_DOWN(iova, PAGE_SIZE),
drivers/vfio/vfio_main.c
1684
npage * PAGE_SIZE, pages,
drivers/vfio/vfio_main.c
1716
ALIGN_DOWN(iova, PAGE_SIZE),
drivers/vfio/vfio_main.c
1717
npage * PAGE_SIZE);
drivers/vhost/net.c
703
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) > PAGE_SIZE)
drivers/vhost/scsi.c
822
n = min_t(unsigned int, PAGE_SIZE - offset, bytes);
drivers/vhost/scsi.c
857
n = min_t(unsigned int, PAGE_SIZE, bytes);
drivers/vhost/scsi.c
916
nbytes = min_t(unsigned int, PAGE_SIZE, len);
drivers/vhost/vdpa.c
1101
unsigned long list_size = PAGE_SIZE / sizeof(struct page *);
drivers/vhost/vdpa.c
1511
if (vma->vm_end - vma->vm_start != PAGE_SIZE)
drivers/vhost/vdpa.c
1527
if (notify.addr & (PAGE_SIZE - 1))
drivers/vhost/vhost.c
2417
int bit = nr + (log % PAGE_SIZE) * 8;
drivers/vhost/vringh.c
1083
io_addr & (PAGE_SIZE - 1));
drivers/video/backlight/lm3533_bl.c
149
return scnprintf(buf, PAGE_SIZE, "%x\n", linear);
drivers/video/backlight/lm3533_bl.c
192
return scnprintf(buf, PAGE_SIZE, "%u\n", val);
drivers/video/backlight/lm3533_bl.c
68
return scnprintf(buf, PAGE_SIZE, "%d\n", bl->id);
drivers/video/backlight/lm3533_bl.c
77
return scnprintf(buf, PAGE_SIZE, "%u\n", channel);
drivers/video/backlight/lm3533_bl.c
97
return scnprintf(buf, PAGE_SIZE, "%d\n", enable);
drivers/video/backlight/lp855x_bl.c
293
return scnprintf(buf, PAGE_SIZE, "%s\n", lp->chipname);
drivers/video/backlight/lp855x_bl.c
307
return scnprintf(buf, PAGE_SIZE, "%s\n", strmode);
drivers/video/backlight/lp8788_bl.c
111
return scnprintf(buf, PAGE_SIZE, "%s\n", strmode);
drivers/video/fbdev/atafb.c
3068
mem_req = PAGE_ALIGN(mem_req) + PAGE_SIZE;
drivers/video/fbdev/atafb.c
3073
pad = -(unsigned long)screen_base & (PAGE_SIZE - 1);
drivers/video/fbdev/aty/atyfb_base.c
111
#define GUI_RESERVE (1 * PAGE_SIZE)
drivers/video/fbdev/aty/atyfb_base.c
1720
u32 videoram = (info->fix.smem_len - (PAGE_SIZE << 2));
drivers/video/fbdev/aty/atyfb_base.c
1955
((off == info->fix.smem_len) && (size == PAGE_SIZE)))
drivers/video/fbdev/aty/atyfb_base.c
1978
page += PAGE_SIZE;
drivers/video/fbdev/aty/atyfb_base.c
2727
u32 videoram = (info->fix.smem_len - (PAGE_SIZE << 2));
drivers/video/fbdev/aty/atyfb_base.c
3604
par->mmap_map[1].size = PAGE_SIZE;
drivers/video/fbdev/aty/mach64_cursor.c
206
info->fix.smem_len -= PAGE_SIZE;
drivers/video/fbdev/aty/mach64_cursor.c
222
info->sprite.size = PAGE_SIZE;
drivers/video/fbdev/broadsheetfb.c
1044
videomemorysize = roundup((dpyw*dpyh), PAGE_SIZE);
drivers/video/fbdev/broadsheetfb.c
942
h_inc = DIV_ROUND_UP(PAGE_SIZE , xres);
drivers/video/fbdev/broadsheetfb.c
950
} else if ((prev_offset + PAGE_SIZE) == pageref->offset) {
drivers/video/fbdev/cg6.c
552
.size = PAGE_SIZE
drivers/video/fbdev/cg6.c
557
.size = PAGE_SIZE
drivers/video/fbdev/cg6.c
562
.size = PAGE_SIZE
drivers/video/fbdev/cg6.c
567
.size = PAGE_SIZE
drivers/video/fbdev/cg6.c
572
.size = PAGE_SIZE
drivers/video/fbdev/core/fb_defio.c
307
npagerefs = DIV_ROUND_UP(info->fix.smem_len, PAGE_SIZE);
drivers/video/fbdev/core/fb_io_fops.c
106
buffer = kmalloc((count > PAGE_SIZE) ? PAGE_SIZE : count,
drivers/video/fbdev/core/fb_io_fops.c
117
c = (count > PAGE_SIZE) ? PAGE_SIZE : count;
drivers/video/fbdev/core/fb_io_fops.c
36
buffer = kmalloc((count > PAGE_SIZE) ? PAGE_SIZE : count,
drivers/video/fbdev/core/fb_io_fops.c
47
c = (count > PAGE_SIZE) ? PAGE_SIZE : count;
drivers/video/fbdev/core/fbsysfs.c
406
len += scnprintf(&buf[len], PAGE_SIZE - len, "%8ph\n",
drivers/video/fbdev/core/fbsysfs.c
50
return snprintf(&buf[offset], PAGE_SIZE - offset, "%c:%dx%d%c-%d\n",
drivers/video/fbdev/efifb.c
436
if (size_remap % PAGE_SIZE)
drivers/video/fbdev/efifb.c
437
size_remap += PAGE_SIZE - (size_remap % PAGE_SIZE);
drivers/video/fbdev/goldfishfb.c
197
fb->reg_base = ioremap(r->start, PAGE_SIZE);
drivers/video/fbdev/grvga.c
456
page += PAGE_SIZE) {
drivers/video/fbdev/imsttfb.c
840
__u32 vram = (info->fix.smem_len - (PAGE_SIZE << 2));
drivers/video/fbdev/matrox/matroxfb_base.c
590
unsigned int m2 = PAGE_SIZE; /* or 128 if you do not need PAGE ALIGNED address */
drivers/video/fbdev/matrox/matroxfb_base.c
597
m2 = linelen * PAGE_SIZE / m2;
drivers/video/fbdev/metronomefb.c
460
for (i = 0; i < PAGE_SIZE/2; i++) {
drivers/video/fbdev/metronomefb.c
574
videomemorysize = PAGE_SIZE + (fw * fh);
drivers/video/fbdev/metronomefb.c
597
par->csum_table = vmalloc(videomemorysize/PAGE_SIZE);
drivers/video/fbdev/omap/lcdc.c
35
#define MAX_PALETTE_SIZE PAGE_SIZE
drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c
426
len = snprintf(buf, PAGE_SIZE, "%s\n", mode_str);
drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c
428
return len < PAGE_SIZE - 1 ? len : PAGE_SIZE - 1;
drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c
151
l += scnprintf(buf + l, PAGE_SIZE - l, "%s%d",
drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c
155
l += scnprintf(buf + l, PAGE_SIZE - l, "\n");
drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c
332
l += scnprintf(buf + l, PAGE_SIZE - l, "%s%d",
drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c
336
l += scnprintf(buf + l, PAGE_SIZE - l, "\n");
drivers/video/fbdev/pvr2fb.c
653
nr_pages = (count + PAGE_SIZE - 1) >> PAGE_SHIFT;
drivers/video/fbdev/pvr2fb.c
703
for (i = 0; i < nr_pages; i++, dst += PAGE_SIZE) {
drivers/video/fbdev/sa1100fb.c
1036
fbi->map_size = PAGE_ALIGN(fbi->fb.fix.smem_len + PAGE_SIZE);
drivers/video/fbdev/sa1100fb.c
1041
fbi->fb.screen_base = fbi->map_cpu + PAGE_SIZE;
drivers/video/fbdev/sa1100fb.c
1042
fbi->screen_dma = fbi->map_dma + PAGE_SIZE;
drivers/video/fbdev/sa1100fb.c
465
fbi->palette_cpu = (u16 *)(fbi->map_cpu + PAGE_SIZE - palette_mem_size);
drivers/video/fbdev/sa1100fb.c
466
fbi->palette_dma = fbi->map_dma + PAGE_SIZE - palette_mem_size;
drivers/video/fbdev/sbuslib.c
82
page += PAGE_SIZE;
drivers/video/fbdev/sh7760fb.c
408
if (vram < PAGE_SIZE)
drivers/video/fbdev/sh7760fb.c
409
vram = PAGE_SIZE;
drivers/video/fbdev/sh_mobile_lcdcfb.c
448
sg_set_page(&ch->sglist[nr_pages++], pageref->page, PAGE_SIZE, 0);
drivers/video/fbdev/sm501fb.c
194
ptr &= ~(PAGE_SIZE - 1);
drivers/video/fbdev/sm712fb.c
1053
buffer = kmalloc(PAGE_SIZE, GFP_KERNEL);
drivers/video/fbdev/sm712fb.c
1063
c = (count > PAGE_SIZE) ? PAGE_SIZE : count;
drivers/video/fbdev/sm712fb.c
1122
buffer = kmalloc(PAGE_SIZE, GFP_KERNEL);
drivers/video/fbdev/sm712fb.c
1132
c = (count > PAGE_SIZE) ? PAGE_SIZE : count;
drivers/video/fbdev/smscufx.c
60
#define MAX_TRANSFER (PAGE_SIZE*16 - BULK_SIZE)
drivers/video/fbdev/smscufx.c
802
if (remap_pfn_range(vma, start, page, PAGE_SIZE, PAGE_SHARED))
drivers/video/fbdev/smscufx.c
805
start += PAGE_SIZE;
drivers/video/fbdev/smscufx.c
806
pos += PAGE_SIZE;
drivers/video/fbdev/smscufx.c
807
if (size > PAGE_SIZE)
drivers/video/fbdev/smscufx.c
808
size -= PAGE_SIZE;
drivers/video/fbdev/smscufx.c
921
int height = (PAGE_SIZE / (width * 2)) + 1;
drivers/video/fbdev/tcx.c
270
.size = PAGE_SIZE
drivers/video/fbdev/tcx.c
274
.size = PAGE_SIZE
drivers/video/fbdev/tcx.c
278
.size = PAGE_SIZE
drivers/video/fbdev/tcx.c
282
.size = PAGE_SIZE
drivers/video/fbdev/tcx.c
286
.size = PAGE_SIZE
drivers/video/fbdev/udlfb.c
1872
if (size > PAGE_SIZE) {
drivers/video/fbdev/udlfb.c
350
if (remap_pfn_range(vma, start, page, PAGE_SIZE, PAGE_SHARED))
drivers/video/fbdev/udlfb.c
353
start += PAGE_SIZE;
drivers/video/fbdev/udlfb.c
354
pos += PAGE_SIZE;
drivers/video/fbdev/udlfb.c
355
if (size > PAGE_SIZE)
drivers/video/fbdev/udlfb.c
356
size -= PAGE_SIZE;
drivers/video/fbdev/udlfb.c
756
&cmd, pageref->offset, PAGE_SIZE,
drivers/video/fbdev/udlfb.c
759
bytes_rendered += PAGE_SIZE;
drivers/video/fbdev/uvesafb.c
1516
if (mtrr && !(info->fix.smem_start & (PAGE_SIZE - 1))) {
drivers/video/fbdev/uvesafb.c
1528
} while (temp_size >= PAGE_SIZE && rc == -EINVAL);
drivers/video/fbdev/uvesafb.c
1558
for (i = 0; i < par->vbe_modes_cnt && ret < PAGE_SIZE; i++) {
drivers/video/fbdev/uvesafb.c
1559
ret += scnprintf(buf + ret, PAGE_SIZE - ret,
drivers/video/fbdev/uvesafb.c
1861
return snprintf(buf, PAGE_SIZE, "%s\n", v86d_path);
drivers/video/fbdev/vesafb.c
449
} while (temp_size >= PAGE_SIZE && par->wc_cookie < 0);
drivers/video/fbdev/xen-fbfront.c
195
end = beg + PAGE_SIZE - 1;
drivers/video/fbdev/xen-fbfront.c
391
info->nr_pages = (fb_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
drivers/video/fbdev/xen-fbfront.c
532
int epd = PAGE_SIZE / sizeof(info->gfns[0]);
drivers/video/fbdev/xen-fbfront.c
535
info->gfns[i] = vmalloc_to_gfn(info->fb + i * PAGE_SIZE);
drivers/virt/acrn/mm.c
190
.address = memmap->vma_base + i * PAGE_SIZE,
drivers/virt/acrn/mm.c
306
region_size = PAGE_SIZE << order;
drivers/virt/coco/pkvm-guest/arm-pkvm-guest.c
115
if (res.a0 > PAGE_SIZE) /* Includes error codes */
drivers/virt/coco/pkvm-guest/arm-pkvm-guest.c
23
phys_addr_t end = phys + PAGE_SIZE;
drivers/virt/coco/pkvm-guest/arm-pkvm-guest.c
40
void *addr = (void *)start, *end = addr + numpages * PAGE_SIZE;
drivers/virt/coco/pkvm-guest/arm-pkvm-guest.c
49
addr += PAGE_SIZE;
drivers/virt/coco/pkvm-guest/arm-pkvm-guest.c
92
phys += PAGE_SIZE;
drivers/virt/coco/sev-guest/sev-guest.c
196
!IS_ALIGNED(report_req->certs_len, PAGE_SIZE))
drivers/virt/coco/sev-guest/sev-guest.c
633
BUILD_BUG_ON(sizeof(struct snp_guest_msg) > PAGE_SIZE);
drivers/virt/fsl_hypervisor.c
217
lb_offset = param.local_vaddr & (PAGE_SIZE - 1);
drivers/virt/fsl_hypervisor.c
219
param.count > U64_MAX - lb_offset - PAGE_SIZE + 1)
drivers/virt/fsl_hypervisor.c
221
num_pages = (param.count + lb_offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
drivers/virt/fsl_hypervisor.c
269
sg_list[0].size = min_t(uint64_t, param.count, PAGE_SIZE - lb_offset);
drivers/virt/fsl_hypervisor.c
284
sg_list[i].size = min_t(uint64_t, count, PAGE_SIZE);
drivers/virt/vboxguest/vboxguest_utils.c
368
buf += PAGE_SIZE;
drivers/virtio/virtio_balloon.c
27
#define VIRTIO_BALLOON_PAGES_PER_PAGE (unsigned int)(PAGE_SIZE >> VIRTIO_BALLOON_PFN_SHIFT)
drivers/virtio/virtio_mem.c
2561
sb_size = PAGE_SIZE * pageblock_nr_pages;
drivers/virtio/virtio_mem.c
2704
if (!virtio_mem_contains_range(vm, addr, PAGE_SIZE))
drivers/virtio/virtio_mem.c
411
new_array = vzalloc(new_pages * PAGE_SIZE);
drivers/virtio/virtio_mem.c
417
memcpy(new_array, vm->bbm.bb_states, old_pages * PAGE_SIZE);
drivers/virtio/virtio_mem.c
477
new_array = vzalloc(new_pages * PAGE_SIZE);
drivers/virtio/virtio_mem.c
483
memcpy(new_array, vm->sbm.mb_states, old_pages * PAGE_SIZE);
drivers/virtio/virtio_mem.c
600
new_bitmap = vzalloc(new_pages * PAGE_SIZE);
drivers/virtio/virtio_mem.c
606
memcpy(new_bitmap, vm->sbm.sb_states, old_pages * PAGE_SIZE);
drivers/virtio/virtio_mmio.c
408
writel(PAGE_SIZE, vm_dev->base + VIRTIO_MMIO_QUEUE_ALIGN);
drivers/virtio/virtio_mmio.c
551
writel(PAGE_SIZE, vm_dev->base + VIRTIO_MMIO_GUEST_PAGE_SIZE);
drivers/virtio/virtio_mmio.c
622
writel(PAGE_SIZE, vm_dev->base + VIRTIO_MMIO_GUEST_PAGE_SIZE);
drivers/virtio/virtio_mmio.c
735
snprintf(buffer + len, PAGE_SIZE - len, "0x%llx@0x%llx:%llu:%d\n",
drivers/virtio/virtio_mmio.c
77
#define VIRTIO_MMIO_VRING_ALIGN PAGE_SIZE
drivers/virtio/virtio_pci_modern_dev.c
333
if ((u64)notify_length + (notify_offset % PAGE_SIZE) <= PAGE_SIZE) {
drivers/virtio/virtio_pci_modern_dev.c
350
0, PAGE_SIZE,
drivers/virtio/virtio_ring.c
1262
for (; num && vring_size(num, vring_align) > PAGE_SIZE; num /= 2) {
drivers/w1/slaves/w1_ds2423.c
100
c -= snprintf(out_buf + PAGE_SIZE - c,
drivers/w1/slaves/w1_ds2423.c
106
c -= snprintf(out_buf + PAGE_SIZE - c, c, "Connection error");
drivers/w1/slaves/w1_ds2423.c
109
return PAGE_SIZE - c;
drivers/w1/slaves/w1_ds2423.c
45
c = PAGE_SIZE;
drivers/w1/slaves/w1_ds2423.c
65
c -= snprintf(out_buf + PAGE_SIZE - c,
drivers/w1/slaves/w1_ds2423.c
74
c -= snprintf(out_buf + PAGE_SIZE - c,
drivers/w1/slaves/w1_ds2423.c
97
c -= snprintf(out_buf + PAGE_SIZE - c,
drivers/w1/slaves/w1_therm.c
1584
ssize_t c = PAGE_SIZE;
drivers/w1/slaves/w1_therm.c
1609
c -= snprintf(buf + PAGE_SIZE - c, c, "%02x ", info.rom[i]);
drivers/w1/slaves/w1_therm.c
1610
c -= snprintf(buf + PAGE_SIZE - c, c, ": crc=%02x %s\n",
drivers/w1/slaves/w1_therm.c
1619
c -= snprintf(buf + PAGE_SIZE - c, c, "%02x ",
drivers/w1/slaves/w1_therm.c
1622
c -= snprintf(buf + PAGE_SIZE - c, c, "t=%d\n",
drivers/w1/slaves/w1_therm.c
1625
ret = PAGE_SIZE - c;
drivers/w1/slaves/w1_therm.c
2087
ssize_t c = PAGE_SIZE;
drivers/w1/slaves/w1_therm.c
2152
c -= snprintf(buf + PAGE_SIZE - c, c, "%d\n", seq);
drivers/w1/slaves/w1_therm.c
2153
return PAGE_SIZE - c;
drivers/w1/w1.c
144
static const BIN_ATTR_RW(rw, PAGE_SIZE);
drivers/w1/w1.c
362
int c = PAGE_SIZE;
drivers/w1/w1.c
371
c -= snprintf(buf + PAGE_SIZE - c, c, "%s\n", sl->name);
drivers/w1/w1.c
374
c -= snprintf(buf + PAGE_SIZE - c, c, "not found.\n");
drivers/w1/w1.c
378
return PAGE_SIZE - c;
drivers/w1/w1.c
384
int c = PAGE_SIZE;
drivers/w1/w1.c
385
c -= snprintf(buf+PAGE_SIZE - c, c,
drivers/w1/w1.c
387
return PAGE_SIZE - c;
drivers/w1/w1.c
481
int c = PAGE_SIZE;
drivers/w1/w1.c
482
c -= snprintf(buf+PAGE_SIZE - c, c,
drivers/w1/w1.c
484
return PAGE_SIZE - c;
drivers/w1/w1_netlink.c
404
cn = kmalloc(PAGE_SIZE, GFP_KERNEL);
drivers/w1/w1_netlink.c
423
if (cn->len + sizeof(*id) > PAGE_SIZE - sizeof(struct cn_msg)) {
drivers/xen/balloon.c
134
static xen_pfn_t frame_list[PAGE_SIZE / sizeof(xen_pfn_t)];
drivers/xen/balloon.c
254
PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL);
drivers/xen/balloon.c
283
resource = additional_memory_resource(balloon_hotplug * PAGE_SIZE);
drivers/xen/balloon.c
294
BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE);
drivers/xen/balloon.c
627
BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE);
drivers/xen/biomerge.c
11
#if XEN_PAGE_SIZE == PAGE_SIZE
drivers/xen/events/events_base.c
179
#define EVTCHN_ROW(e) (e / (PAGE_SIZE/sizeof(**evtchn_to_irq)))
drivers/xen/events/events_base.c
180
#define EVTCHN_COL(e) (e % (PAGE_SIZE/sizeof(**evtchn_to_irq)))
drivers/xen/events/events_base.c
181
#define EVTCHN_PER_ROW (PAGE_SIZE / sizeof(**evtchn_to_irq))
drivers/xen/evtchn.c
213
if (count > PAGE_SIZE)
drivers/xen/evtchn.c
214
count = PAGE_SIZE;
drivers/xen/evtchn.c
289
if (count > PAGE_SIZE)
drivers/xen/evtchn.c
290
count = PAGE_SIZE;
drivers/xen/gntalloc.c
139
gref->file_index = op->index + i * PAGE_SIZE;
drivers/xen/gntalloc.c
214
index += PAGE_SIZE;
drivers/xen/gntalloc.c
302
priv->index += op.count * PAGE_SIZE;
drivers/xen/gntalloc.c
383
index = op.index & ~(PAGE_SIZE - 1);
drivers/xen/gntalloc.c
384
pgoff = op.index & (PAGE_SIZE - 1);
drivers/xen/gntalloc.c
534
rv = vm_insert_page(vma, vma->vm_start + i * PAGE_SIZE,
drivers/xen/gntdev.c
453
tmp[map->notify.addr & (PAGE_SIZE-1)] = 0;
drivers/xen/grant-table.c
1207
len = min(PAGE_SIZE - offset, len);
drivers/xen/grant-table.c
169
#define RPP (PAGE_SIZE / sizeof(grant_ref_t))
drivers/xen/grant-table.c
170
#define SPP (PAGE_SIZE / sizeof(grant_status_t))
drivers/xen/mem-reservation.c
46
BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE);
drivers/xen/mem-reservation.c
72
BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE);
drivers/xen/platform-pci.c
154
grant_frames = alloc_xen_mmio(PAGE_SIZE * max_nr_gframes);
drivers/xen/privcmd.c
142
if (size > PAGE_SIZE)
drivers/xen/privcmd.c
145
pageidx = PAGE_SIZE;
drivers/xen/privcmd.c
148
if (pageidx > PAGE_SIZE-size) {
drivers/xen/privcmd.c
188
BUG_ON(size > PAGE_SIZE);
drivers/xen/privcmd.c
190
pageidx = PAGE_SIZE;
drivers/xen/privcmd.c
194
if (pageidx > PAGE_SIZE-size) {
drivers/xen/privcmd.c
223
BUG_ON(size > PAGE_SIZE);
drivers/xen/privcmd.c
226
int nr = (PAGE_SIZE/size);
drivers/xen/privcmd.c
578
BUILD_BUG_ON(((PAGE_SIZE / sizeof(xen_pfn_t)) % XEN_PFN_PER_PAGE) != 0);
drivers/xen/privcmd.c
620
PAGE_SIZE) - off;
drivers/xen/privcmd.c
625
(unsigned long)kbufs[i].uptr + off * PAGE_SIZE,
drivers/xen/privcmd.c
695
PAGE_SIZE);
drivers/xen/pvcalls-front.c
352
PAGE_SIZE << map->active.ring->ring_order);
drivers/xen/pvcalls-front.c
366
bytes = alloc_pages_exact(PAGE_SIZE << PVCALLS_RING_ORDER,
drivers/xen/unpopulated-alloc.c
132
struct page *pg = virt_to_page(vaddr + PAGE_SIZE * i);
drivers/xen/unpopulated-alloc.c
56
alloc_pages * PAGE_SIZE, mhp_range.start, mhp_range.end,
drivers/xen/unpopulated-alloc.c
57
PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL);
drivers/xen/xen-front-pgdir-shbuf.c
149
#define XEN_NUM_GREFS_PER_PAGE ((PAGE_SIZE - \
drivers/xen/xen-front-pgdir-shbuf.c
291
ptr += PAGE_SIZE;
drivers/xen/xen-front-pgdir-shbuf.c
344
ptr += PAGE_SIZE;
drivers/xen/xen-front-pgdir-shbuf.c
384
ptr += PAGE_SIZE;
drivers/xen/xen-front-pgdir-shbuf.c
452
PAGE_SIZE * i));
drivers/xen/xen-front-pgdir-shbuf.c
479
buf->directory = kcalloc(get_num_pages_dir(buf), PAGE_SIZE, GFP_KERNEL);
drivers/xen/xen-pciback/pci_stub.c
1261
if (count >= PAGE_SIZE)
drivers/xen/xen-pciback/pci_stub.c
1285
if (count >= PAGE_SIZE)
drivers/xen/xen-pciback/pci_stub.c
1375
if (count >= PAGE_SIZE)
drivers/xen/xen-pciback/pci_stub.c
1391
if (count >= PAGE_SIZE)
drivers/xen/xen-pciback/pci_stub.c
1457
if (count >= PAGE_SIZE)
drivers/xen/xen-pciback/pci_stub.c
1516
if (count >= PAGE_SIZE)
drivers/xen/xen-scsiback.c
1474
rb = snprintf(page, PAGE_SIZE, "%s\n", tpg->param_alias);
drivers/xen/xen-scsiback.c
1616
ret = snprintf(page, PAGE_SIZE, "%s\n",
drivers/xen/xen-scsiback.c
533
(unsigned)ring_req->seg[i].length > PAGE_SIZE ||
drivers/xen/xen-scsiback.c
595
if (sg->offset >= PAGE_SIZE ||
drivers/xen/xen-scsiback.c
596
sg->length > PAGE_SIZE ||
drivers/xen/xen-scsiback.c
597
sg->offset + sg->length > PAGE_SIZE)
drivers/xen/xen-scsiback.c
858
BACK_RING_INIT(&info->ring, sring, PAGE_SIZE);
drivers/xen/xenbus/xenbus_dev_backend.c
99
if ((size > PAGE_SIZE) || (vma->vm_pgoff != 0))
drivers/xen/xenfs/xenstored.c
38
if ((size > PAGE_SIZE) || (vma->vm_pgoff != 0))
fs/9p/v9fs.c
591
ssize_t n = 0, count = 0, limit = PAGE_SIZE;
fs/9p/vfs_file.c
497
(loff_t)vma->vm_pgoff * PAGE_SIZE,
fs/9p/vfs_file.c
498
(loff_t)vma->vm_pgoff * PAGE_SIZE +
fs/affs/affs.h
26
#define AFFS_CACHE_SIZE PAGE_SIZE
fs/affs/file.c
705
from = pos & (PAGE_SIZE - 1);
fs/affs/super.c
360
affs_set_blocksize(sb, PAGE_SIZE);
fs/affs/super.c
364
j = PAGE_SIZE;
fs/afs/mntpt.c
130
if (size < 2 || size > PAGE_SIZE - 1)
fs/afs/super.c
457
sb->s_blocksize = PAGE_SIZE;
fs/afs/xdr_fs.h
40
#define AFS_DIR_BLOCKS_PER_PAGE (PAGE_SIZE / AFS_DIR_BLOCK_SIZE)
fs/aio.c
269
inode->i_size = PAGE_SIZE * nr_pages;
fs/aio.c
508
nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring))
fs/aio.c
542
ctx->mmap_size = nr_pages * PAGE_SIZE;
fs/aio.c
579
#define AIO_EVENTS_PER_PAGE (PAGE_SIZE / sizeof(struct io_event))
fs/aio.c
580
#define AIO_EVENTS_FIRST_PAGE ((PAGE_SIZE - sizeof(struct aio_ring)) / sizeof(struct io_event))
fs/befs/linuxvfs.c
481
if (len == 0 || len > PAGE_SIZE) {
fs/befs/super.c
84
if (befs_sb->block_size > PAGE_SIZE) {
fs/befs/super.c
87
PAGE_SIZE);
fs/bfs/inode.c
315
if (i > PAGE_SIZE - 100) break;
fs/binfmt_elf.c
1339
mm->brk = mm->start_brk = mm->brk + PAGE_SIZE;
fs/binfmt_elf.c
1355
error = vm_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_EXEC,
fs/binfmt_elf.c
1358
retval = do_mseal(0, PAGE_SIZE, 0);
fs/binfmt_elf.c
1616
size = round_up(size, PAGE_SIZE);
fs/binfmt_elf.c
1662
data[1] = PAGE_SIZE;
fs/binfmt_elf.c
82
#if ELF_EXEC_PAGESIZE > PAGE_SIZE
fs/binfmt_elf.c
85
#define ELF_MIN_ALIGN PAGE_SIZE
fs/binfmt_elf_fdpic.c
1132
= PAGE_SIZE - ((maddr + phdr->p_filesz) & ~PAGE_MASK);
fs/binfmt_elf_fdpic.c
411
stack_size = (stack_size + PAGE_SIZE - 1) & PAGE_MASK;
fs/binfmt_elf_fdpic.c
412
if (stack_size < PAGE_SIZE * 2)
fs/binfmt_elf_fdpic.c
413
stack_size = PAGE_SIZE * 2;
fs/binfmt_elf_fdpic.c
642
NEW_AUX_ENT(AT_PAGESZ, PAGE_SIZE);
fs/binfmt_elf_fdpic.c
700
(MAX_ARG_PAGES * PAGE_SIZE - bprm->p);
fs/binfmt_flat.c
874
stack_len += PAGE_SIZE * MAX_ARG_PAGES - bprm->p; /* the strings */
fs/btrfs/bio.c
187
const u32 step = min(fs_info->sectorsize, PAGE_SIZE);
fs/btrfs/bio.c
191
phys_addr_t paddrs[BTRFS_MAX_BLOCKSIZE / PAGE_SIZE];
fs/btrfs/bio.c
247
const u32 step = min(fs_info->sectorsize, PAGE_SIZE);
fs/btrfs/bio.c
285
ASSERT(offset_in_page(paddrs[i]) + step <= PAGE_SIZE);
fs/btrfs/bio.c
307
const u32 step = min(sectorsize, PAGE_SIZE);
fs/btrfs/bio.c
312
phys_addr_t paddrs[BTRFS_MAX_BLOCKSIZE / PAGE_SIZE];
fs/btrfs/bio.c
879
const u32 alignment = min(blocksize, PAGE_SIZE);
fs/btrfs/compression.c
1148
compr_pool.thresh = BTRFS_MAX_COMPRESSED / PAGE_SIZE * 8;
fs/btrfs/compression.c
1540
i = start % PAGE_SIZE;
fs/btrfs/compression.c
1541
while (i < PAGE_SIZE - SAMPLING_READ_SIZE) {
fs/btrfs/compression.c
400
if (fs_info->sectorsize < PAGE_SIZE)
fs/btrfs/compression.c
508
if (fs_info->sectorsize < PAGE_SIZE)
fs/btrfs/compression.h
36
#define BTRFS_MAX_COMPRESSED_PAGES (BTRFS_MAX_COMPRESSED / PAGE_SIZE)
fs/btrfs/compression.h
37
static_assert((BTRFS_MAX_COMPRESSED % PAGE_SIZE) == 0);
fs/btrfs/defrag.c
1206
ASSERT(nr_pages <= CLUSTER_SIZE / PAGE_SIZE);
fs/btrfs/direct-io.c
382
if (!write && (flags & IOMAP_NOWAIT) && length > PAGE_SIZE)
fs/btrfs/disk-io.c
101
btrfs_csum_update(&csum, kaddr, PAGE_SIZE);
fs/btrfs/disk-io.c
176
const u32 step = min(fs_info->nodesize, PAGE_SIZE);
fs/btrfs/disk-io.c
178
phys_addr_t paddrs[BTRFS_MAX_BLOCKSIZE / PAGE_SIZE];
fs/btrfs/disk-io.c
2413
sectorsize, PAGE_SIZE);
fs/btrfs/disk-io.c
2504
if (sectorsize > PAGE_SIZE) {
fs/btrfs/disk-io.c
2935
fs_info->dirty_metadata_batch = PAGE_SIZE *
fs/btrfs/disk-io.c
3227
if (btrfs_super_nodesize(disk_super) > PAGE_SIZE)
fs/btrfs/disk-io.c
3269
if (fs_info->sectorsize != PAGE_SIZE && btrfs_test_opt(fs_info, SPACE_CACHE)) {
fs/btrfs/disk-io.c
3272
PAGE_SIZE, fs_info->sectorsize);
fs/btrfs/disk-io.c
3414
fs_info->block_min_order = ilog2(round_up(sectorsize, PAGE_SIZE) >> PAGE_SHIFT);
fs/btrfs/disk-io.c
3420
if (fs_info->sectorsize > PAGE_SIZE)
fs/btrfs/disk-io.c
3423
fs_info->sectorsize, PAGE_SIZE);
fs/btrfs/disk-io.c
3463
sb->s_bdi->ra_pages = max(sb->s_bdi->ra_pages, SZ_4M / PAGE_SIZE);
fs/btrfs/disk-io.c
515
if (fs_info->sectorsize == PAGE_SIZE) {
fs/btrfs/disk-io.c
86
first_page_part = min_t(u32, PAGE_SIZE, fs_info->nodesize);
fs/btrfs/extent-io-tree.c
70
if (end >= PAGE_SIZE && (end % 2) == 0 && end != isize - 1) {
fs/btrfs/extent_io.c
1301
ASSERT(IS_ALIGNED(start, PAGE_SIZE));
fs/btrfs/extent_io.c
1302
ASSERT(IS_ALIGNED(end + 1, PAGE_SIZE));
fs/btrfs/extent_io.c
1604
DIV_ROUND_UP(delalloc_end + 1 - page_start, PAGE_SIZE);
fs/btrfs/extent_io.c
2660
cur_end = min(round_down(cur, PAGE_SIZE) + PAGE_SIZE - 1, end);
fs/btrfs/extent_io.c
3298
if (unlikely(nodesize < PAGE_SIZE && !IS_ALIGNED(start, nodesize))) {
fs/btrfs/extent_io.c
3304
if (unlikely(nodesize >= PAGE_SIZE && !PAGE_ALIGNED(start))) {
fs/btrfs/extent_io.c
3456
prealloc = btrfs_alloc_folio_state(fs_info, PAGE_SIZE, BTRFS_SUBPAGE_METADATA);
fs/btrfs/extent_io.c
4485
unsigned long end = index + (PAGE_SIZE >> fs_info->nodesize_bits) - 1;
fs/btrfs/extent_io.c
711
eb->folio_size = PAGE_SIZE;
fs/btrfs/extent_io.h
85
#define INLINE_EXTENT_BUFFER_PAGES (BTRFS_MAX_METADATA_BLOCKSIZE / PAGE_SIZE)
fs/btrfs/fiemap.c
648
cache.entries_size = PAGE_SIZE / sizeof(struct btrfs_fiemap_entry);
fs/btrfs/file-item.c
144
u32 max_csum_size = round_down(PAGE_SIZE - sizeof(struct btrfs_ordered_sum),
fs/btrfs/file-item.c
27
PAGE_SIZE))
fs/btrfs/file-item.c
783
const u32 step = min(blocksize, PAGE_SIZE);
fs/btrfs/file-item.c
785
phys_addr_t paddrs[BTRFS_MAX_BLOCKSIZE / PAGE_SIZE];
fs/btrfs/file.c
2242
const u64 page_lockstart = round_up(start, PAGE_SIZE);
fs/btrfs/file.c
2243
const u64 page_lockend = round_down(end + 1, PAGE_SIZE);
fs/btrfs/free-space-cache.c
1453
for (int i = 0; i < round_up(i_size, PAGE_SIZE) / PAGE_SIZE; i++) {
fs/btrfs/free-space-cache.c
1454
u64 dirty_start = i * PAGE_SIZE;
fs/btrfs/free-space-cache.c
1455
u64 dirty_len = min_t(u64, dirty_start + PAGE_SIZE, i_size) - dirty_start;
fs/btrfs/free-space-cache.c
34
#define BITS_PER_BITMAP (PAGE_SIZE * 8UL)
fs/btrfs/free-space-cache.c
383
num_pages = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
fs/btrfs/free-space-cache.c
386
if (write && (num_pages * sizeof(u32) + sizeof(u64)) > PAGE_SIZE)
fs/btrfs/free-space-cache.c
4216
PAGE_SIZE, PAGE_SIZE,
fs/btrfs/free-space-cache.c
423
io_ctl->size = PAGE_SIZE;
fs/btrfs/free-space-cache.c
439
PAGE_SIZE);
fs/btrfs/free-space-cache.c
544
crc = crc32c(crc, io_ctl->orig + offset, PAGE_SIZE - offset);
fs/btrfs/free-space-cache.c
566
crc = crc32c(crc, io_ctl->orig + offset, PAGE_SIZE - offset);
fs/btrfs/fs.c
151
if (blocksize == PAGE_SIZE || blocksize == SZ_4K || blocksize == BTRFS_MIN_BLOCKSIZE)
fs/btrfs/fs.c
170
if (IS_ENABLED(CONFIG_HIGHMEM) && blocksize > PAGE_SIZE)
fs/btrfs/inode.c
1679
nr_pages = (async_chunk->end - async_chunk->start + PAGE_SIZE) >>
fs/btrfs/inode.c
1774
nr_pages = DIV_ROUND_UP(cur_end - start, PAGE_SIZE);
fs/btrfs/inode.c
2922
ordered = btrfs_lookup_ordered_range(inode, page_start, PAGE_SIZE);
fs/btrfs/inode.c
2947
btrfs_delalloc_release_extents(inode, PAGE_SIZE);
fs/btrfs/inode.c
2950
PAGE_SIZE, true);
fs/btrfs/inode.c
2963
btrfs_folio_clear_checked(fs_info, folio, page_start, PAGE_SIZE);
fs/btrfs/inode.c
3460
const u32 step = min(blocksize, PAGE_SIZE);
fs/btrfs/inode.c
3462
phys_addr_t paddrs[BTRFS_MAX_BLOCKSIZE / PAGE_SIZE];
fs/btrfs/inode.c
3492
const u32 step = min(blocksize, PAGE_SIZE);
fs/btrfs/inode.c
3501
ASSERT(offset_in_page(paddr) + step <= PAGE_SIZE);
fs/btrfs/inode.c
3544
const u32 step = min(blocksize, PAGE_SIZE);
fs/btrfs/inode.c
490
ASSERT(compressed_size <= PAGE_SIZE);
fs/btrfs/inode.c
5085
if (end == (u64)-1 && blocksize < PAGE_SIZE)
fs/btrfs/inode.c
588
if (size > PAGE_SIZE || compressed_size > PAGE_SIZE)
fs/btrfs/inode.c
6257
private->filldir_buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
fs/btrfs/inode.c
6360
PAGE_SIZE) {
fs/btrfs/inode.c
9607
size_t bytes = min_t(u64, disk_io_size, PAGE_SIZE);
fs/btrfs/inode.c
9658
nr_pages = DIV_ROUND_UP(disk_io_size, PAGE_SIZE);
fs/btrfs/inode.c
9682
page_offset = (iocb->ki_pos - start) & (PAGE_SIZE - 1);
fs/btrfs/inode.c
9687
PAGE_SIZE - page_offset);
fs/btrfs/ioctl.c
1233
vol_args->size > PAGE_SIZE) {
fs/btrfs/ioctl.c
2896
if (alloc_size > PAGE_SIZE)
fs/btrfs/ioctl.c
4564
size_t bytes = min_t(size_t, priv->count - cur, PAGE_SIZE - page_offset);
fs/btrfs/ioctl.c
4619
nr_pages = DIV_ROUND_UP(disk_io_size, PAGE_SIZE);
fs/btrfs/qgroup.c
3158
if (size < sizeof(*inherit) || size > PAGE_SIZE)
fs/btrfs/raid56.c
1070
const unsigned int step = min(fs_info->sectorsize, PAGE_SIZE);
fs/btrfs/raid56.c
1078
ASSERT(IS_ALIGNED(PAGE_SIZE, fs_info->sectorsize) ||
fs/btrfs/raid56.c
1079
IS_ALIGNED(fs_info->sectorsize, PAGE_SIZE));
fs/btrfs/raid56.c
1242
const u32 step = min(sectorsize, PAGE_SIZE);
fs/btrfs/raid56.c
1311
const u32 step = min(fs_info->sectorsize, PAGE_SIZE);
fs/btrfs/raid56.c
1412
const u32 step = min(rbio->bioc->fs_info->sectorsize, PAGE_SIZE);
fs/btrfs/raid56.c
1619
const u32 step = min(sectorsize, PAGE_SIZE);
fs/btrfs/raid56.c
1679
const u32 step = min(fs_info->sectorsize, PAGE_SIZE);
fs/btrfs/raid56.c
1683
phys_addr_t paddrs[BTRFS_MAX_BLOCKSIZE / PAGE_SIZE];
fs/btrfs/raid56.c
1944
const u32 step = min(fs_info->sectorsize, PAGE_SIZE);
fs/btrfs/raid56.c
233
const u32 step = min(rbio->bioc->fs_info->sectorsize, PAGE_SIZE);
fs/btrfs/raid56.c
2609
const u32 step = min(PAGE_SIZE, rbio->bioc->fs_info->sectorsize);
fs/btrfs/raid56.c
2656
const u32 step = min(rbio->bioc->fs_info->sectorsize, PAGE_SIZE);
fs/btrfs/raid56.c
3050
cur_off += PAGE_SIZE) {
fs/btrfs/raid56.c
3055
memcpy_from_folio(kaddr, data_folios[findex], foffset, PAGE_SIZE);
fs/btrfs/raid56.c
3058
foffset += PAGE_SIZE;
fs/btrfs/raid56.c
329
nr_sectors = round_up(PAGE_SIZE, fs_info->sectorsize) >> fs_info->sectorsize_bits;
fs/btrfs/raid56.c
358
const u32 step = min(rbio->bioc->fs_info->sectorsize, PAGE_SIZE);
fs/btrfs/relocation.c
4008
size_t bytes = min_t(u64, length, PAGE_SIZE);
fs/btrfs/relocation.c
4043
unsigned int nr_pages = DIV_ROUND_UP(length, PAGE_SIZE);
fs/btrfs/scrub.c
132
#define SCRUB_STRIPE_MAX_FOLIOS (BTRFS_STRIPE_LEN / PAGE_SIZE)
fs/btrfs/send.c
5697
sctx->page_cache_clear_start = round_down(offset, PAGE_SIZE);
fs/btrfs/send.c
6755
round_up(i_size, PAGE_SIZE) - 1);
fs/btrfs/send.h
31
#define BTRFS_SEND_BUF_SIZE_V2 ALIGN(SZ_16K + BTRFS_MAX_COMPRESSED, PAGE_SIZE)
fs/btrfs/subpage.h
105
return fs_info->nodesize < PAGE_SIZE;
fs/btrfs/super.c
739
if (fs_info->sectorsize != PAGE_SIZE && btrfs_test_opt(fs_info, SPACE_CACHE)) {
fs/btrfs/super.c
742
fs_info->sectorsize, PAGE_SIZE);
fs/btrfs/tests/btrfs-tests.c
263
PAGE_SIZE,
fs/btrfs/tests/extent-io-tests.c
181
end = start + PAGE_SIZE - 1;
fs/btrfs/tests/extent-io-tests.c
212
end = start + PAGE_SIZE - 1;
fs/btrfs/tests/extent-io-tests.c
246
end = start + PAGE_SIZE - 1;
fs/btrfs/tests/extent-io-tests.c
253
if (end != test_start + PAGE_SIZE - 1) {
fs/btrfs/tests/extent-io-tests.c
267
end = start + PAGE_SIZE - 1;
fs/btrfs/tests/extent-io-tests.c
302
end = start + PAGE_SIZE - 1;
fs/btrfs/tests/extent-io-tests.c
315
if (start != test_start && end != test_start + PAGE_SIZE - 1) {
fs/btrfs/tests/extent-io-tests.c
317
test_start, test_start + PAGE_SIZE - 1, start, end);
fs/btrfs/tests/extent-io-tests.c
457
if (byte_len > PAGE_SIZE) {
fs/btrfs/tests/extent-io-tests.c
459
PAGE_SIZE - sizeof(long) / 2, 0,
fs/btrfs/tests/extent-io-tests.c
470
PAGE_SIZE - sizeof(long) / 2, 0,
fs/btrfs/tests/extent-io-tests.c
691
if (memcmp(memory + (i << PAGE_SHIFT), eb_addr, PAGE_SIZE) != 0) {
fs/btrfs/tests/extent-io-tests.c
777
if (nodesize > PAGE_SIZE) {
fs/btrfs/tests/free-space-tests.c
1025
BITS_PER_BITMAP * sectorsize + PAGE_SIZE);
fs/btrfs/tests/free-space-tests.c
13
#define BITS_PER_BITMAP (PAGE_SIZE * 8UL)
fs/btrfs/tests/free-space-tree-tests.c
568
bitmap_alignment = BTRFS_FREE_SPACE_BITMAP_BITS * PAGE_SIZE;
fs/btrfs/verity.c
713
if (merkle_pos > inode->i_sb->s_maxbytes - off - PAGE_SIZE)
fs/btrfs/verity.c
754
folio_address(folio), PAGE_SIZE, folio);
fs/btrfs/verity.c
759
if (ret < PAGE_SIZE)
fs/btrfs/verity.c
760
folio_zero_segment(folio, ret, PAGE_SIZE);
fs/btrfs/zlib.c
27
#define ZLIB_DFLTCC_BUF_SIZE (4 * PAGE_SIZE)
fs/btrfs/zoned.c
2297
u64 mapped_length = PAGE_SIZE;
fs/btrfs/zoned.c
2304
if (unlikely(ret || !bioc || mapped_length < PAGE_SIZE)) {
fs/buffer.c
1113
return grow_dev_folio(bdev, block, pos / PAGE_SIZE, size, gfp);
fs/buffer.c
1740
pgoff_t index = ((loff_t)block << blkbits) / PAGE_SIZE;
fs/buffer.c
1746
end = ((loff_t)(block + len - 1) << blkbits) / PAGE_SIZE;
fs/buffer.c
193
index = ((loff_t)block << blkbits) / PAGE_SIZE;
fs/buffer.c
2517
len = PAGE_SIZE - zerofrom;
fs/buffer.c
2671
iblock = ((loff_t)index * PAGE_SIZE) >> inode->i_blkbits;
fs/buffer.c
3159
max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
fs/buffer.c
356
if (bh->b_folio->index < DIV_ROUND_UP(inode->i_size, PAGE_SIZE))
fs/cachefiles/cache.c
69
root->d_sb->s_blocksize > PAGE_SIZE)
fs/cachefiles/cache.c
92
if (stats.f_bsize > PAGE_SIZE)
fs/cachefiles/daemon.c
295
if (datalen > PAGE_SIZE - 1)
fs/cachefiles/io.c
530
start = round_down(*_start, PAGE_SIZE);
fs/cachefiles/io.c
540
*_len = round_up(len, PAGE_SIZE);
fs/cachefiles/io.c
566
if (cachefiles_has_space(cache, 0, *_len / PAGE_SIZE,
fs/cachefiles/io.c
598
return cachefiles_has_space(cache, 0, *_len / PAGE_SIZE,
fs/ceph/acl.c
196
err = ceph_pagelist_reserve(pagelist, PAGE_SIZE);
fs/ceph/addr.c
1258
PAGE_SIZE,
fs/ceph/addr.c
1996
if (off >= PAGE_SIZE) {
fs/ceph/addr.c
2018
if (err < PAGE_SIZE)
fs/ceph/addr.c
2019
zero_user_segment(page, err, PAGE_SIZE);
fs/ceph/addr.c
2176
if (len < PAGE_SIZE)
fs/ceph/addr.c
2177
zero_user_segment(page, len, PAGE_SIZE);
fs/ceph/addr.c
2478
osd_req_op_raw_data_in_pages(rd_req, 0, pages, PAGE_SIZE,
fs/ceph/dir.c
137
unsigned idx_mask = (PAGE_SIZE / sizeof(struct dentry *)) - 1;
fs/ceph/file.c
108
int len = min_t(int, bytes, PAGE_SIZE - start);
fs/ceph/file.c
1209
plen = min_t(size_t, left, PAGE_SIZE - page_off);
fs/ceph/file.c
1896
PAGE_SIZE);
fs/ceph/file.c
1912
PAGE_SIZE);
fs/ceph/file.c
1927
PAGE_SIZE);
fs/ceph/file.c
1970
size_t plen = min_t(size_t, left, PAGE_SIZE - off);
fs/ceph/file.c
2210
iocb->ki_pos < PAGE_SIZE) {
fs/ceph/file.c
2213
end = min_t(loff_t, end, PAGE_SIZE);
fs/ceph/file.c
2546
loff_t nearly = round_up(offset, PAGE_SIZE);
fs/ceph/file.c
2555
if (length >= PAGE_SIZE) {
fs/ceph/file.c
2556
loff_t size = round_down(length, PAGE_SIZE);
fs/ceph/inode.c
1918
unsigned nsize = PAGE_SIZE / sizeof(struct dentry*);
fs/ceph/inode.c
1943
memset(ctl->dentries, 0, PAGE_SIZE);
fs/ceph/inode.c
2492
memset(iov.iov_base + boff, 0, PAGE_SIZE - boff);
fs/ceph/mds_client.c
2336
PAGE_SIZE, GFP_NOFS, false);
fs/ceph/mds_client.c
2563
num_entries = (PAGE_SIZE << order) / size;
fs/ceph/mds_client.c
2566
rinfo->dir_buf_size = PAGE_SIZE << order;
fs/ceph/mds_client.c
29
#define RECONNECT_MAX_SIZE (INT_MAX - PAGE_SIZE)
fs/ceph/mds_client.h
194
#define CEPH_CAPS_PER_RELEASE ((PAGE_SIZE - sizeof(u32) - \
fs/ceph/super.c
452
if (result.uint_32 < PAGE_SIZE ||
fs/ceph/super.c
455
fsopt->wsize = ALIGN(result.uint_32, PAGE_SIZE);
fs/ceph/super.c
458
if (result.uint_32 < PAGE_SIZE ||
fs/ceph/super.c
461
fsopt->rsize = ALIGN(result.uint_32, PAGE_SIZE);
fs/ceph/super.c
464
fsopt->rasize = ALIGN(result.uint_32, PAGE_SIZE);
fs/ceph/super.c
487
if (result.uint_32 < PAGE_SIZE && result.uint_32 != 0)
fs/ceph/xattr.c
1396
err = ceph_pagelist_reserve(pagelist, PAGE_SIZE);
fs/coda/file.c
170
ppos = vma->vm_pgoff * PAGE_SIZE;
fs/coda/symlink.c
28
unsigned int len = PAGE_SIZE;
fs/configfs/dir.c
496
inode->i_size = PAGE_SIZE;
fs/configfs/mount.c
68
sb->s_blocksize = PAGE_SIZE;
fs/configfs/symlink.c
88
body = kzalloc(PAGE_SIZE, GFP_KERNEL);
fs/coredump.c
1233
static char zeroes[PAGE_SIZE];
fs/coredump.c
1243
while (nr > PAGE_SIZE) {
fs/coredump.c
1244
if (!__dump_emit(cprm, zeroes, PAGE_SIZE))
fs/coredump.c
1246
nr -= PAGE_SIZE;
fs/coredump.c
1292
if (cprm->written + PAGE_SIZE > cprm->limit)
fs/coredump.c
1297
bvec_set_page(&bvec, page, PAGE_SIZE, 0);
fs/coredump.c
1298
iov_iter_bvec(&iter, ITER_SOURCE, &bvec, 1, PAGE_SIZE);
fs/coredump.c
1300
if (n != PAGE_SIZE)
fs/coredump.c
1303
cprm->written += PAGE_SIZE;
fs/coredump.c
1304
cprm->pos += PAGE_SIZE;
fs/coredump.c
1322
size_t left = copy_mc_to_kernel(page_address(dst), buf, PAGE_SIZE);
fs/coredump.c
1351
for (addr = start; addr < start + len; addr += PAGE_SIZE) {
fs/coredump.c
1378
dump_skip(cprm, PAGE_SIZE);
fs/coredump.c
1652
return PAGE_SIZE;
fs/coredump.c
1771
m->dump_size = PAGE_SIZE;
fs/cramfs/inode.c
182
#define BUFFER_SIZE (BLKS_PER_BUF*PAGE_SIZE)
fs/cramfs/inode.c
205
offset &= PAGE_SIZE - 1;
fs/cramfs/inode.c
251
memcpy_from_page(data, page, 0, PAGE_SIZE);
fs/cramfs/inode.c
254
memset(data, 0, PAGE_SIZE);
fs/cramfs/inode.c
255
data += PAGE_SIZE;
fs/cramfs/inode.c
312
u32 block_off = i * (PAGE_SIZE >> CRAMFS_BLK_DIRECT_PTR_SHIFT);
fs/cramfs/inode.c
352
return memchr_inv(tail_data, 0, PAGE_SIZE - partial) ? true : false;
fs/cramfs/inode.c
378
max_pages = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
fs/cramfs/inode.c
412
pages * PAGE_SIZE, vma->vm_page_prot);
fs/cramfs/inode.c
423
unsigned long off = i * PAGE_SIZE;
fs/cramfs/inode.c
461
pages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
fs/cramfs/inode.c
462
max_pages = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
fs/cramfs/inode.c
471
file, pgoff*PAGE_SIZE, len, addr);
fs/cramfs/inode.c
527
sbi->size = PAGE_SIZE;
fs/cramfs/inode.c
649
err = mtd_point(sb->s_mtd, 0, PAGE_SIZE, &sbi->mtd_point_size,
fs/cramfs/inode.c
651
if (err || sbi->mtd_point_size != PAGE_SIZE) {
fs/cramfs/inode.c
666
mtd_unpoint(sb->s_mtd, 0, PAGE_SIZE);
fs/cramfs/inode.c
689
buf->f_bsize = PAGE_SIZE;
fs/cramfs/inode.c
829
maxblock = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
fs/cramfs/inode.c
854
block_len = PAGE_SIZE;
fs/cramfs/inode.c
883
block_start += PAGE_SIZE;
fs/cramfs/inode.c
896
else if (unlikely(block_len > 2*PAGE_SIZE ||
fs/cramfs/inode.c
897
(uncompressed && block_len > PAGE_SIZE))) {
fs/cramfs/inode.c
908
PAGE_SIZE,
fs/cramfs/inode.c
917
memset(pgdata + bytes_filled, 0, PAGE_SIZE - bytes_filled);
fs/crypto/bio.c
198
if (offset == PAGE_SIZE || du_remaining == 0) {
fs/crypto/inline_crypt.c
125
sb->s_blocksize != PAGE_SIZE)
fs/dax.c
1173
dax_flush(dax_dev, page_address(pfn_to_page(pfn)), count * PAGE_SIZE);
fs/dax.c
1432
copy_len += PAGE_SIZE - mod;
fs/dax.c
1494
ret = dax_iomap_copy_around(pos, size, PAGE_SIZE, srcmap,
fs/dax.c
1527
length = min_t(u64, PAGE_SIZE - offset, length);
fs/dax.c
1530
if (IS_ALIGNED(pos, PAGE_SIZE) && length == PAGE_SIZE)
fs/dax.c
1635
unsigned offset = pos & (PAGE_SIZE - 1);
fs/dax.c
1636
const size_t size = ALIGN(length + offset, PAGE_SIZE);
fs/dax.c
1662
ret = dax_iomap_copy_around(pos, length, PAGE_SIZE,
fs/dax.c
1811
size_t size = pmd ? PMD_SIZE : PAGE_SIZE;
fs/dax.c
1870
.len = PAGE_SIZE,
fs/dax.c
1909
if (WARN_ON_ONCE(iomap_length(&iter) < PAGE_SIZE)) {
fs/dax.c
1923
iter.status = iomap_iter_advance(&iter, PAGE_SIZE);
fs/dax.c
1994
max_pgoff = DIV_ROUND_UP(i_size_read(iter.inode), PAGE_SIZE);
fs/dax.c
2147
size_t len = PAGE_SIZE << order;
fs/dax.c
2178
ret = dax_iomap_direct_access(smap, pos1, ALIGN(pos1 + len, PAGE_SIZE),
fs/dax.c
2183
ret = dax_iomap_direct_access(dmap, pos2, ALIGN(pos2 + len, PAGE_SIZE),
fs/dax.c
342
return PAGE_SIZE;
fs/debugfs/file.c
1072
if (pos + count + 1 > PAGE_SIZE)
fs/direct-io.c
187
sdio->to = PAGE_SIZE;
fs/direct-io.c
194
sdio->tail = (ret + PAGE_SIZE - 1) / PAGE_SIZE;
fs/direct-io.c
195
sdio->to = ((ret - 1) & (PAGE_SIZE - 1)) + 1;
fs/direct-io.c
698
if ((sdio->cur_page_len + sdio->cur_page_offset) == PAGE_SIZE)
fs/direct-io.c
921
to = (sdio->head == sdio->tail - 1) ? sdio->to : PAGE_SIZE;
fs/dlm/config.c
189
return snprintf(buf, PAGE_SIZE, "%u\n", dlm_config.ci_##name); \
fs/dlm/debug_fs.c
709
if (count > PAGE_SIZE || count < sizeof(struct dlm_header))
fs/dlm/debug_fs.c
712
buf = kmalloc(PAGE_SIZE, GFP_NOFS);
fs/dlm/lockspace.c
104
return snprintf(buf, PAGE_SIZE, "%x\n", status);
fs/dlm/lockspace.c
109
return snprintf(buf, PAGE_SIZE, "%d\n", ls->ls_recover_nodeid);
fs/dlm/lockspace.c
72
return snprintf(buf, PAGE_SIZE, "%u\n", ls->ls_global_id);
fs/dlm/lockspace.c
86
return snprintf(buf, PAGE_SIZE, "%u\n", dlm_no_directory(ls));
fs/dlm/lowcomms.c
122
#define DLM_WQ_REMAIN_BYTES(e) (PAGE_SIZE - e->end)
fs/dlm/lowcomms.c
1269
BUILD_BUG_ON(PAGE_SIZE < DLM_MAX_SOCKET_BUFSIZE);
fs/ecryptfs/crypto.c
1312
memset(page_virt, 0, PAGE_SIZE);
fs/ecryptfs/crypto.c
1345
memset(page_virt, 0, PAGE_SIZE);
fs/ecryptfs/crypto.c
193
remainder_of_page = PAGE_SIZE - offset;
fs/ecryptfs/crypto.c
281
(loff_t)folio->index * PAGE_SIZE;
fs/ecryptfs/crypto.c
310
extent_base = (((loff_t)page_index) * (PAGE_SIZE / extent_size));
fs/ecryptfs/crypto.c
373
extent_offset < (PAGE_SIZE / crypt_stat->extent_size);
fs/ecryptfs/crypto.c
388
PAGE_SIZE);
fs/ecryptfs/crypto.c
436
rc = ecryptfs_read_lower(page_virt, lower_offset, PAGE_SIZE,
fs/ecryptfs/crypto.c
447
extent_offset < (PAGE_SIZE / crypt_stat->extent_size);
fs/ecryptfs/crypto.c
537
if (PAGE_SIZE <= ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE)
fs/ecryptfs/crypto.c
541
crypt_stat->metadata_size = PAGE_SIZE;
fs/ecryptfs/inode.c
759
size_t num_zeros = (PAGE_SIZE
fs/ecryptfs/keystore.c
1740
size_t max_packet_size = ((PAGE_SIZE - 8) - i);
fs/ecryptfs/main.c
686
.size = PAGE_SIZE,
fs/ecryptfs/main.c
691
.size = PAGE_SIZE,
fs/ecryptfs/main.c
808
if (ECRYPTFS_DEFAULT_EXTENT_SIZE > PAGE_SIZE) {
fs/ecryptfs/main.c
816
(unsigned long)PAGE_SIZE);
fs/ecryptfs/mmap.c
111
memset(page_virt, 0, PAGE_SIZE);
fs/ecryptfs/mmap.c
219
if ((i_size_read(inode) / PAGE_SIZE) != folio->index)
fs/ecryptfs/mmap.c
221
end_byte_in_page = i_size_read(inode) % PAGE_SIZE;
fs/ecryptfs/mmap.c
224
folio_zero_segment(folio, end_byte_in_page, PAGE_SIZE);
fs/ecryptfs/mmap.c
265
folio, index, 0, PAGE_SIZE, mapping->host);
fs/ecryptfs/mmap.c
291
folio, index, 0, PAGE_SIZE,
fs/ecryptfs/mmap.c
305
folio_zero_range(folio, 0, PAGE_SIZE);
fs/ecryptfs/mmap.c
307
} else if (len < PAGE_SIZE) {
fs/ecryptfs/mmap.c
340
folio_zero_range(folio, 0, PAGE_SIZE);
fs/ecryptfs/mmap.c
403
xattr_virt, PAGE_SIZE);
fs/ecryptfs/mmap.c
446
unsigned from = pos & (PAGE_SIZE - 1);
fs/ecryptfs/mmap.c
466
if (copied < PAGE_SIZE) {
fs/ecryptfs/mmap.c
95
loff_t num_extents_per_page = (PAGE_SIZE
fs/ecryptfs/read_write.c
115
size_t num_bytes = (PAGE_SIZE - start_offset_in_page);
fs/ecryptfs/read_write.c
156
PAGE_SIZE - start_offset_in_page);
fs/ecryptfs/read_write.c
255
offset = (loff_t)page_index * PAGE_SIZE + offset_in_page;
fs/ecryptfs/read_write.c
65
offset = (loff_t)folio_for_lower->index * PAGE_SIZE + offset_in_page;
fs/efivarfs/super.c
353
sb->s_blocksize = PAGE_SIZE;
fs/erofs/decompressor.c
10
#define LZ4_MAX_DISTANCE_PAGES (DIV_ROUND_UP(LZ4_DISTANCE_MAX, PAGE_SIZE) + 1)
fs/erofs/decompressor.c
171
cnt = min_t(u32, rq->inputsize - i, PAGE_SIZE - *inputmargin);
fs/erofs/decompressor.c
317
insz = min(PAGE_SIZE - rq->pageofs_in, rq->outputsize);
fs/erofs/decompressor.c
327
cnt = min(insz - pi, PAGE_SIZE - po);
fs/erofs/decompressor.c
355
dctx->avail_out = min(rq->outputsize, PAGE_SIZE - rq->pageofs_out);
fs/erofs/decompressor.c
382
dctx->inbuf_sz = min_t(u32, rq->inputsize, PAGE_SIZE);
fs/erofs/decompressor.c
44
DIV_ROUND_UP(distance, PAGE_SIZE) + 1 :
fs/erofs/decompressor.c
87
kaddr + PAGE_SIZE == page_address(page)) {
fs/erofs/decompressor.c
88
kaddr += PAGE_SIZE;
fs/erofs/decompressor_deflate.c
137
strm->z.avail_in = min(rq->inputsize, PAGE_SIZE - rq->pageofs_in);
fs/erofs/decompressor_deflate.c
8
u8 bounce[PAGE_SIZE];
fs/erofs/decompressor_lzma.c
183
buf.in_size = min(rq->inputsize, PAGE_SIZE - rq->pageofs_in);
fs/erofs/decompressor_lzma.c
8
u8 bounce[PAGE_SIZE];
fs/erofs/decompressor_zstd.c
170
in_buf.size = min_t(u32, rq->inputsize, PAGE_SIZE - rq->pageofs_in);
fs/erofs/decompressor_zstd.c
7
u8 bounce[PAGE_SIZE];
fs/erofs/dir.c
56
EROFS_I_SB(dir)->dir_ra_bytes, PAGE_SIZE);
fs/erofs/dir.c
57
pgoff_t nr_pages = DIV_ROUND_UP_POW2(dir->i_size, PAGE_SIZE);
fs/erofs/dir.c
74
pgoff_t idx = DIV_ROUND_UP_POW2(ctx->pos, PAGE_SIZE);
fs/erofs/fscache.c
269
iov_iter_xarray(&iter, ITER_DEST, &mapping->i_pages, pos, PAGE_SIZE);
fs/erofs/fscache.c
274
iov_iter_zero(PAGE_SIZE - size, &iter);
fs/erofs/fscache.c
276
req->submitted += PAGE_SIZE;
fs/erofs/fscache.c
291
DBG_BUGON(!count || count % PAGE_SIZE);
fs/erofs/fscache.c
46
pgoff_t start_page = req->start / PAGE_SIZE;
fs/erofs/fscache.c
47
pgoff_t last_page = ((req->start + req->len) / PAGE_SIZE) - 1;
fs/erofs/super.c
299
if (sbi->sb_size > PAGE_SIZE - EROFS_SUPER_OFFSET) {
fs/erofs/super.c
676
sb->s_blocksize = PAGE_SIZE;
fs/erofs/super.c
688
if (!sb_set_blocksize(sb, PAGE_SIZE)) {
fs/erofs/sysfs.c
147
return z_erofs_crypto_show_engines(buf, PAGE_SIZE, '\n');
fs/erofs/zdata.c
1019
tight = (bs == PAGE_SIZE);
fs/erofs/zdata.c
1033
pgs = round_down(cur, PAGE_SIZE);
fs/erofs/zdata.c
1057
pgs = round_down(end - 1, PAGE_SIZE);
fs/erofs/zdata.c
1094
tight = (bs == PAGE_SIZE);
fs/erofs/zdata.c
1134
if (!(poff & ~PAGE_MASK) && (bvec->end == PAGE_SIZE ||
fs/erofs/zdata.c
1175
len = min_t(unsigned int, end - cur, PAGE_SIZE - scur);
fs/erofs/zdata.c
13
#define Z_EROFS_PCLUSTER_MAX_PAGES (Z_EROFS_PCLUSTER_MAX_SIZE / PAGE_SIZE)
fs/erofs/zdata.c
1502
bvec->bv_len = PAGE_SIZE;
fs/erofs/zdata.c
1579
if (!tocache || bs != PAGE_SIZE ||
fs/erofs/zdata.c
159
iter->nr = (PAGE_SIZE - base) / sizeof(struct z_erofs_bvec);
fs/erofs/zdata.c
1841
end = headoffset + PAGE_SIZE - 1;
fs/erofs/zdata.c
1850
cur = round_up(map->m_la + map->m_llen, PAGE_SIZE);
fs/erofs/zdata.c
1854
end = round_up(end, PAGE_SIZE);
fs/erofs/zdata.c
1856
end = round_up(map->m_la, PAGE_SIZE);
fs/erofs/zdata.c
1875
if (cur < PAGE_SIZE)
fs/erofs/zdata.c
547
if (i_blocksize(fe->inode) != PAGE_SIZE ||
fs/exec.c
1627
for (; offset < PAGE_SIZE && kaddr[offset];
fs/exec.c
1633
} while (offset == PAGE_SIZE);
fs/exec.c
207
page = bprm->page[pos / PAGE_SIZE];
fs/exec.c
212
bprm->page[pos / PAGE_SIZE] = page;
fs/exec.c
272
bprm->p = PAGE_SIZE * MAX_ARG_PAGES - sizeof(void *);
fs/exec.c
490
offset = pos % PAGE_SIZE;
fs/exec.c
492
offset = PAGE_SIZE;
fs/exec.c
559
min_not_zero(offset_in_page(pos), PAGE_SIZE));
fs/exec.c
742
sp -= PAGE_SIZE - offset;
fs/exec.c
743
if (copy_to_user((void *) sp, src, PAGE_SIZE - offset) != 0)
fs/exec.c
750
bprm->exec += *sp_location - MAX_ARG_PAGES * PAGE_SIZE;
fs/exfat/dir.c
632
unsigned int page_ra_count = PAGE_SIZE >> sb->s_blocksize_bits;
fs/exfat/dir.c
664
unsigned int dentries_per_page = EXFAT_B_TO_DEN(PAGE_SIZE);
fs/exfat/file.c
605
len = PAGE_SIZE - (pos & (PAGE_SIZE - 1));
fs/ext2/dir.c
286
ctx->pos += PAGE_SIZE - offset;
fs/ext2/dir.c
43
#if (PAGE_SIZE >= 65536)
fs/ext2/dir.c
52
#if (PAGE_SIZE >= 65536)
fs/ext2/dir.c
80
if (last_byte > PAGE_SIZE)
fs/ext2/dir.c
81
last_byte = PAGE_SIZE;
fs/ext2/super.c
999
} else if (blocksize != PAGE_SIZE) {
fs/ext4/extents.c
5552
start = round_down(offset, PAGE_SIZE);
fs/ext4/extents.c
5650
start = round_down(offset, PAGE_SIZE);
fs/ext4/inline.c
526
if (len > PAGE_SIZE) {
fs/ext4/inode.c
1557
from = pos & (PAGE_SIZE - 1);
fs/ext4/inode.c
4295
if (!IS_ALIGNED(start | end, PAGE_SIZE) &&
fs/ext4/inode.c
4296
blocksize < PAGE_SIZE && start < inode->i_size) {
fs/ext4/inode.c
4297
loff_t page_boundary = round_up(start, PAGE_SIZE);
fs/ext4/inode.c
4302
round_down(end, PAGE_SIZE), end);
fs/ext4/inode.c
5735
offset = inode->i_size & (PAGE_SIZE - 1);
fs/ext4/inode.c
5745
if (!offset || offset > (PAGE_SIZE - i_blocksize(inode)))
fs/ext4/mballoc-test.c
791
if (sb->s_blocksize > PAGE_SIZE)
fs/ext4/mballoc-test.c
858
if (sb->s_blocksize > PAGE_SIZE)
fs/ext4/mballoc-test.c
905
if (sb->s_blocksize > PAGE_SIZE)
fs/ext4/readpage.c
409
if (folio->index < DIV_ROUND_UP(inode->i_size, PAGE_SIZE))
fs/ext4/readpage.c
425
if (readahead_index(rac) < DIV_ROUND_UP(inode->i_size, PAGE_SIZE))
fs/ext4/super.c
4777
if (sb->s_blocksize == PAGE_SIZE)
fs/ext4/super.c
5105
} else if (sb->s_blocksize > PAGE_SIZE) {
fs/ext4/super.c
5107
sb->s_blocksize, PAGE_SIZE, err_str);
fs/ext4/verity.c
78
PAGE_SIZE - offset_in_page(pos));
fs/f2fs/checkpoint.c
1610
memcpy(folio_address(folio), src, PAGE_SIZE);
fs/f2fs/compress.c
1264
return f2fs_do_truncate_blocks(inode, round_up(from, PAGE_SIZE), lock);
fs/f2fs/compress.c
184
cc->clen = lzo1x_worst_compress(PAGE_SIZE << cc->log_cluster_size);
fs/f2fs/compress.c
1962
memcpy(folio_address(cfolio), folio_address(folio), PAGE_SIZE);
fs/f2fs/compress.c
220
if (dic->rlen != PAGE_SIZE << dic->log_cluster_size) {
fs/f2fs/compress.c
223
dic->rlen, PAGE_SIZE << dic->log_cluster_size);
fs/f2fs/compress.c
256
cc->clen = cc->rlen - PAGE_SIZE - COMPRESS_HEADER_SIZE;
fs/f2fs/compress.c
300
if (ret != PAGE_SIZE << dic->log_cluster_size) {
fs/f2fs/compress.c
303
ret, PAGE_SIZE << dic->log_cluster_size);
fs/f2fs/compress.c
358
cc->clen = cc->rlen - PAGE_SIZE - COMPRESS_HEADER_SIZE;
fs/f2fs/compress.c
375
int dst_size = src_size - PAGE_SIZE - COMPRESS_HEADER_SIZE;
fs/f2fs/compress.c
475
PAGE_SIZE << dic->log_cluster_size);
fs/f2fs/compress.c
639
cc->nr_cpages = DIV_ROUND_UP(max_len, PAGE_SIZE);
fs/f2fs/compress.c
667
max_len = PAGE_SIZE * (cc->cluster_size - 1) - COMPRESS_HEADER_SIZE;
fs/f2fs/compress.c
683
new_nr_cpages = DIV_ROUND_UP(cc->clen + COMPRESS_HEADER_SIZE, PAGE_SIZE);
fs/f2fs/compress.c
687
(new_nr_cpages * PAGE_SIZE) -
fs/f2fs/compress.c
756
dic->rlen = PAGE_SIZE << dic->log_cluster_size;
fs/f2fs/compress.c
758
if (dic->clen > PAGE_SIZE * dic->nr_cpages - COMPRESS_HEADER_SIZE) {
fs/f2fs/compress.c
875
unsigned nr_pages = DIV_ROUND_UP(i_size, PAGE_SIZE);
fs/f2fs/data.c
1161
if (!bio_add_folio(bio, folio, PAGE_SIZE, 0))
fs/f2fs/data.c
1261
if (idx < DIV_ROUND_UP(inode->i_size, PAGE_SIZE))
fs/f2fs/data.c
2533
folio_zero_range(folio, page_offset, PAGE_SIZE);
fs/f2fs/data.c
2534
if (vi && !fsverity_verify_blocks(vi, folio, PAGE_SIZE, page_offset)) {
fs/f2fs/data.c
2802
PAGE_SIZE, 0, gfp_flags);
fs/f2fs/data.c
2818
page_address(fio->encrypted_page), PAGE_SIZE);
fs/f2fs/data.c
3121
offset = i_size & (PAGE_SIZE - 1);
fs/f2fs/data.c
3245
.rlen = PAGE_SIZE * F2FS_I(inode)->i_cluster_size,
fs/f2fs/data.c
3675
if (len == PAGE_SIZE && is_inode_flag_set(inode, FI_PREALLOCATED_ALL))
fs/f2fs/data.c
3887
if (len == PAGE_SIZE && !(f2fs_is_atomic_file(inode)))
fs/f2fs/data.c
3946
if (!(pos & (PAGE_SIZE - 1)) && (pos + len) >= i_size_read(inode) &&
fs/f2fs/data.c
769
wbc_account_cgroup_owner(fio->io_wbc, fio_folio, PAGE_SIZE);
fs/f2fs/debug.c
336
si->base_mem += PAGE_SIZE * NR_CURSEG_TYPE;
fs/f2fs/dir.c
41
return ((unsigned long long) (i_size_read(inode) + PAGE_SIZE - 1))
fs/f2fs/extent_cache.c
904
if ((f_size >> PAGE_SHIFT) == ei->fofs && f_size & (PAGE_SIZE - 1) &&
fs/f2fs/f2fs.h
1740
#define MAX_COMPRESS_WINDOW_SIZE(log_size) ((PAGE_SIZE) << (log_size))
fs/f2fs/file.c
1288
off_start = offset & (PAGE_SIZE - 1);
fs/f2fs/file.c
1289
off_end = (offset + len) & (PAGE_SIZE - 1);
fs/f2fs/file.c
1299
PAGE_SIZE - off_start);
fs/f2fs/file.c
1487
memcpy_folio(fdst, 0, fsrc, 0, PAGE_SIZE);
fs/f2fs/file.c
1559
pgoff_t nrpages = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
fs/f2fs/file.c
1701
off_start = offset & (PAGE_SIZE - 1);
fs/f2fs/file.c
1702
off_end = (offset + len) & (PAGE_SIZE - 1);
fs/f2fs/file.c
1714
PAGE_SIZE - off_start);
fs/f2fs/file.c
1825
idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
fs/f2fs/file.c
1897
off_end = (offset + len) & (PAGE_SIZE - 1);
fs/f2fs/file.c
2560
end = DIV_ROUND_UP(offset + len, PAGE_SIZE);
fs/f2fs/file.c
2908
DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE));
fs/f2fs/file.c
3901
last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
fs/f2fs/file.c
4079
last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
fs/f2fs/file.c
4513
last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
fs/f2fs/file.c
4593
last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
fs/f2fs/file.c
742
loff_t offset = from & (PAGE_SIZE - 1);
fs/f2fs/gc.c
1432
folio_address(mfolio), PAGE_SIZE);
fs/f2fs/inline.c
478
if (i_size_read(dir) < PAGE_SIZE)
fs/f2fs/inline.c
479
f2fs_i_size_write(dir, PAGE_SIZE);
fs/f2fs/node.c
163
memcpy(dst_addr, src_addr, PAGE_SIZE);
fs/f2fs/segment.c
2710
memcpy(folio_address(folio), src, PAGE_SIZE);
fs/f2fs/segment.c
2743
memset(dst, 0, PAGE_SIZE);
fs/f2fs/segment.c
305
f2fs_do_truncate_blocks(inode, start_index * PAGE_SIZE, false);
fs/f2fs/segment.c
317
pgoff_t len = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
fs/f2fs/segment.c
4474
memset(kaddr, 0, PAGE_SIZE);
fs/f2fs/segment.c
4493
memset(kaddr, 0, PAGE_SIZE);
fs/f2fs/segment.h
439
memset(raw_sit, 0, PAGE_SIZE);
fs/f2fs/verity.c
48
PAGE_SIZE - offset_in_page(pos));
fs/f2fs/verity.c
82
PAGE_SIZE - offset_in_page(pos));
fs/f2fs/xattr.h
74
#define VALID_XATTR_BLOCK_SIZE (PAGE_SIZE - sizeof(struct node_footer))
fs/file_attr.c
393
if (usize > PAGE_SIZE)
fs/file_attr.c
446
if (usize > PAGE_SIZE)
fs/file_table.c
618
n = ((nr_pages - memreserve) * (PAGE_SIZE / 1024)) / 10;
fs/freevxfs/vxfs_immed.c
38
memcpy_to_page(folio_page(folio, i), 0, src, PAGE_SIZE);
fs/freevxfs/vxfs_immed.c
39
src += PAGE_SIZE;
fs/freevxfs/vxfs_inode.c
189
offset = (ino % (PAGE_SIZE / VXFS_ISIZE)) * VXFS_ISIZE;
fs/freevxfs/vxfs_inode.c
190
pp = vxfs_get_page(ilistp->i_mapping, ino * VXFS_ISIZE / PAGE_SIZE);
fs/freevxfs/vxfs_lookup.c
229
while (pg_ofs < PAGE_SIZE && pos < limit) {
fs/freevxfs/vxfs_lookup.c
26
#define VXFS_BLOCK_PER_PAGE(sbp) ((PAGE_SIZE / (sbp)->s_blocksize))
fs/freevxfs/vxfs_lookup.c
79
while (pg_ofs < PAGE_SIZE && pos < limit) {
fs/fsopen.c
133
fs_name = strndup_user(_fs_name, PAGE_SIZE);
fs/fuse/acl.c
132
if (size > PAGE_SIZE) {
fs/fuse/acl.c
38
value = kmalloc(PAGE_SIZE, GFP_KERNEL);
fs/fuse/acl.c
41
size = fuse_getxattr(inode, name, value, PAGE_SIZE);
fs/fuse/cuse.c
438
BUILD_BUG_ON(CUSE_INIT_INFO_MAX > PAGE_SIZE);
fs/fuse/dax.c
22
#define FUSE_DAX_PAGES (FUSE_DAX_SZ / PAGE_SIZE)
fs/fuse/dax.c
413
iomap->length = ALIGN(len, PAGE_SIZE);
fs/fuse/dev.c
1160
if (folio_test_highmem(folio) && count > PAGE_SIZE - offset_in_page(offset))
fs/fuse/dev.c
1161
copy = PAGE_SIZE - offset_in_page(offset);
fs/fuse/dev.c
1816
nr_pages = (offset + nr_bytes + PAGE_SIZE - 1) >> PAGE_SHIFT;
fs/fuse/dev.c
1885
num_pages = (num + offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
fs/fuse/dev.c
1920
nr_pages = (offset + nr_bytes + PAGE_SIZE - 1) >> PAGE_SHIFT;
fs/fuse/dev.c
855
buf->len = PAGE_SIZE - cs->len;
fs/fuse/dev.c
911
cs->len = PAGE_SIZE;
fs/fuse/dev.c
917
err = iov_iter_get_pages2(cs->iter, &page, PAGE_SIZE, 1, &off);
fs/fuse/dev_uring.c
244
max_payload_size = max(max_payload_size, fc->max_pages * PAGE_SIZE);
fs/fuse/dir.c
1841
if (WARN_ON(res >= PAGE_SIZE))
fs/fuse/file.c
1245
unsigned offset = pos & (PAGE_SIZE - 1);
fs/fuse/file.c
1604
nfolios = DIV_ROUND_UP(ret + start, PAGE_SIZE);
fs/fuse/file.c
1610
unsigned int len = umin(ret, PAGE_SIZE - start);
fs/fuse/file.c
2175
if ((bytes + PAGE_SIZE - 1) >> PAGE_SHIFT > fc->max_pages)
fs/fuse/file.c
3127
ALIGN_DOWN(pos_out, PAGE_SIZE),
fs/fuse/file.c
3128
ALIGN(pos_out + bytes_copied, PAGE_SIZE) - 1);
fs/fuse/fuse_i.h
1123
descs[i].length = PAGE_SIZE - descs[i].offset;
fs/fuse/inode.c
1341
ra_pages = arg->max_readahead / PAGE_SIZE;
fs/fuse/inode.c
1460
ra_pages = fc->max_read / PAGE_SIZE;
fs/fuse/inode.c
1494
ia->in.max_readahead = fm->sb->s_bdi->ra_pages * PAGE_SIZE;
fs/fuse/inode.c
1848
sb->s_blocksize = PAGE_SIZE;
fs/fuse/ioctl.c
145
const __u32 max_buffer_len = FUSE_VERITY_ENABLE_ARG_MAX_PAGES * PAGE_SIZE;
fs/fuse/ioctl.c
251
BUILD_BUG_ON(sizeof(struct fuse_ioctl_iovec) * FUSE_IOCTL_MAX_IOV > PAGE_SIZE);
fs/fuse/ioctl.c
302
out_size = max_t(size_t, out_size, PAGE_SIZE);
fs/fuse/ioctl.c
303
max_pages = DIV_ROUND_UP(max(in_size, out_size), PAGE_SIZE);
fs/fuse/ioctl.c
330
c = copy_folio_from_iter(ap.folios[i], 0, PAGE_SIZE, &ii);
fs/fuse/ioctl.c
331
if (c != PAGE_SIZE && iov_iter_count(&ii))
fs/fuse/ioctl.c
397
c = copy_folio_to_iter(ap.folios[i], 0, PAGE_SIZE, &ii);
fs/fuse/ioctl.c
398
if (c != PAGE_SIZE && iov_iter_count(&ii))
fs/fuse/readdir.c
107
end = ALIGN(fi->rdc.size, PAGE_SIZE);
fs/fuse/readdir.c
344
size_t bufsize = clamp((unsigned int) ctx->count, PAGE_SIZE, fc->max_pages << PAGE_SHIFT);
fs/fuse/readdir.c
523
size = PAGE_SIZE;
fs/fuse/readdir.c
573
if (size == PAGE_SIZE) {
fs/fuse/readdir.c
575
ff->readdir.cache_off = ALIGN(ff->readdir.cache_off, PAGE_SIZE);
fs/fuse/readdir.c
58
if (offset + reclen > PAGE_SIZE) {
fs/fuse/virtio_fs.c
1012
size_t max_nr_pages = fs->window_len / PAGE_SIZE - pgoff;
fs/fuse/virtio_fs.c
237
const size_t size = PAGE_SIZE - 1;
fs/gfs2/aops.c
426
(i_blocksize(inode) == PAGE_SIZE && !folio_buffers(folio))) {
fs/gfs2/bmap.c
1353
if (offs && chunk > PAGE_SIZE)
fs/gfs2/bmap.c
2394
if (offs && chunk > PAGE_SIZE)
fs/gfs2/file.c
1281
gfs2_write_calc_reserv(ip, PAGE_SIZE, &data_blocks, &ind_blocks);
fs/gfs2/file.c
1335
PAGE_SIZE >> inode->i_blkbits);
fs/gfs2/file.c
796
size = PAGE_SIZE;
fs/gfs2/file.c
927
window_size <= PAGE_SIZE;
fs/gfs2/glock.c
2588
#define GFS2_SEQ_GOODSIZE min(PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER, 65536UL)
fs/gfs2/lops.c
428
for (offset = 0; offset < PAGE_SIZE; offset += sdp->sd_sb.sb_bsize) {
fs/gfs2/lops.c
549
(PAGE_SIZE - off) >> bsize_shift;
fs/gfs2/lops.c
576
blocks_read += PAGE_SIZE >> bsize_shift;
fs/gfs2/lops.c
587
blocks_read += PAGE_SIZE >> bsize_shift;
fs/gfs2/ops_fstype.c
167
if (sb->sb_bsize < SECTOR_SIZE || sb->sb_bsize > PAGE_SIZE ||
fs/gfs2/ops_fstype.c
224
sb = kmalloc(PAGE_SIZE, GFP_KERNEL);
fs/gfs2/ops_fstype.c
229
sb, PAGE_SIZE, REQ_OP_READ | REQ_META);
fs/gfs2/ops_fstype.c
482
if (sdp->sd_sb.sb_bsize > PAGE_SIZE) {
fs/gfs2/ops_fstype.c
486
sdp->sd_sb.sb_bsize, (unsigned int)PAGE_SIZE);
fs/gfs2/quota.c
1314
unsigned int max_qd = PAGE_SIZE / sizeof(struct gfs2_holder);
fs/gfs2/quota.c
809
if ((pg_off + nbytes) > PAGE_SIZE)
fs/gfs2/quota.c
810
overflow = (pg_off + nbytes) - PAGE_SIZE;
fs/hfs/bnode.c
533
min((int)PAGE_SIZE, (int)tree->node_size));
fs/hfs/bnode.c
536
memzero_page(*++pagep, 0, PAGE_SIZE);
fs/hfs/bnode.c
89
bytes_to_read = min_t(u32, len - bytes_read, PAGE_SIZE - off);
fs/hfs/btree.c
153
tree->pages_per_bnode = (tree->node_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
fs/hfs/btree.c
330
if (++off >= PAGE_SIZE) {
fs/hfs/inode.c
92
if (tree->node_size >= PAGE_SIZE) {
fs/hfs/mdb.c
136
size = min(HFS_SB(sb)->alloc_blksz, (u32)PAGE_SIZE);
fs/hfsplus/bitmap.c
17
#define PAGE_CACHE_BITS (PAGE_SIZE * 8)
fs/hfsplus/bnode.c
117
l = min_t(u32, len, PAGE_SIZE - off);
fs/hfsplus/bnode.c
123
l = min_t(u32, len, PAGE_SIZE);
fs/hfsplus/bnode.c
159
l = min_t(u32, len, PAGE_SIZE - off);
fs/hfsplus/bnode.c
164
l = min_t(u32, len, PAGE_SIZE);
fs/hfsplus/bnode.c
191
l = min_t(u32, len, PAGE_SIZE - src);
fs/hfsplus/bnode.c
196
l = min_t(u32, len, PAGE_SIZE);
fs/hfsplus/bnode.c
206
if (PAGE_SIZE - src < PAGE_SIZE - dst) {
fs/hfsplus/bnode.c
207
l = PAGE_SIZE - src;
fs/hfsplus/bnode.c
211
l = PAGE_SIZE - dst;
fs/hfsplus/bnode.c
260
src = PAGE_SIZE;
fs/hfsplus/bnode.c
277
src = PAGE_SIZE;
fs/hfsplus/bnode.c
282
dst = PAGE_SIZE;
fs/hfsplus/bnode.c
289
if (dst == PAGE_SIZE)
fs/hfsplus/bnode.c
302
l = min_t(u32, len, PAGE_SIZE - src);
fs/hfsplus/bnode.c
312
l = min_t(u32, len, PAGE_SIZE);
fs/hfsplus/bnode.c
324
if (PAGE_SIZE - src <
fs/hfsplus/bnode.c
325
PAGE_SIZE - dst) {
fs/hfsplus/bnode.c
326
l = PAGE_SIZE - src;
fs/hfsplus/bnode.c
330
l = PAGE_SIZE - dst;
fs/hfsplus/bnode.c
46
l = min_t(u32, len, PAGE_SIZE - off);
fs/hfsplus/bnode.c
51
l = min_t(u32, len, PAGE_SIZE);
fs/hfsplus/bnode.c
644
min_t(int, PAGE_SIZE, tree->node_size));
fs/hfsplus/bnode.c
647
memzero_page(*++pagep, 0, PAGE_SIZE);
fs/hfsplus/btree.c
240
(tree->node_size + PAGE_SIZE - 1) >>
fs/hfsplus/btree.c
426
if (++off >= PAGE_SIZE) {
fs/hfsplus/inode.c
88
if (tree->node_size >= PAGE_SIZE) {
fs/hfsplus/wrapper.c
214
blocksize = min_t(u32, sbi->alloc_blksz, PAGE_SIZE);
fs/hfsplus/xattr.c
221
for (; written < node_size; index++, written += PAGE_SIZE) {
fs/hfsplus/xattr.c
232
min_t(size_t, PAGE_SIZE, node_size - written));
fs/hostfs/hostfs_kern.c
437
PAGE_SIZE);
fs/hpfs/namei.c
491
err = hpfs_read_ea(i->i_sb, fnode, "SYMLINK", link, PAGE_SIZE);
fs/hugetlbfs/inode.c
218
struct page *page = folio_page(folio, offset / PAGE_SIZE);
fs/hugetlbfs/inode.c
224
safe_bytes = PAGE_SIZE - (offset % PAGE_SIZE);
fs/hugetlbfs/inode.c
228
for (; safe_bytes < bytes; safe_bytes += PAGE_SIZE, page++)
fs/ioctl.c
429
if (size > PAGE_SIZE) {
fs/iomap/bio.c
94
unsigned int nr_vecs = DIV_ROUND_UP(length, PAGE_SIZE);
fs/iomap/buffered-io.c
1190
if (chunk > PAGE_SIZE)
fs/iomap/buffered-io.c
1317
start_byte = ALIGN_DOWN(start_byte, PAGE_SIZE) +
fs/iomap/buffered-io.c
1318
PAGE_SIZE;
fs/iomap/buffered-io.c
869
len = min_t(size_t, len, PAGE_SIZE - offset_in_page(pos));
fs/iomap/ioend.c
489
return bioset_init(&iomap_ioend_bioset, 4 * (PAGE_SIZE / SECTOR_SIZE),
fs/iomap/swapfile.c
148
.len = ALIGN_DOWN(i_size_read(inode), PAGE_SIZE),
fs/iomap/swapfile.c
45
first_ppage = ALIGN(iomap->addr, PAGE_SIZE) >> PAGE_SHIFT;
fs/iomap/swapfile.c
46
next_ppage = ALIGN_DOWN(iomap->addr + iomap->length, PAGE_SIZE) >>
fs/isofs/compress.c
124
stream.avail_out = PAGE_SIZE - poffset;
fs/isofs/compress.c
128
stream.avail_out = PAGE_SIZE;
fs/isofs/compress.c
230
end_off = min_t(loff_t, start_off + PAGE_SIZE, inode->i_size);
fs/isofs/compress.c
26
static char zisofs_sink_page[PAGE_SIZE];
fs/isofs/compress.c
291
memzero_page(*pages, poffset, PAGE_SIZE - poffset);
fs/isofs/compress.c
315
end_index = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
fs/isofs/compress.c
70
memzero_page(pages[i], 0, PAGE_SIZE);
fs/isofs/inode.c
1054
if (b_off > ((inode->i_size + PAGE_SIZE - 1) >> ISOFS_BUFFER_BITS(inode))) {
fs/isofs/joliet.c
52
outname, PAGE_SIZE);
fs/isofs/rock.c
763
link + (PAGE_SIZE - 1));
fs/jbd2/journal.c
2742
if (size == PAGE_SIZE)
fs/jbd2/journal.c
2784
if (size < PAGE_SIZE)
fs/jbd2/journal.c
2798
if (size < PAGE_SIZE)
fs/jffs2/compr_lzo.c
33
lzo_compress_buf = vmalloc(lzo1x_worst_compress(PAGE_SIZE));
fs/jffs2/debug.c
105
if ((frag->ofs+frag->size) & (PAGE_SIZE-1) && frag_next(frag)
fs/jffs2/debug.c
106
&& frag_next(frag)->size < PAGE_SIZE && frag_next(frag)->node) {
fs/jffs2/debug.c
98
if (frag->ofs & (PAGE_SIZE-1) && frag_prev(frag)
fs/jffs2/debug.c
99
&& frag_prev(frag)->size < PAGE_SIZE && frag_prev(frag)->node) {
fs/jffs2/erase.c
349
ebuf = kmalloc(PAGE_SIZE, GFP_KERNEL);
fs/jffs2/erase.c
359
uint32_t readlen = min((uint32_t)PAGE_SIZE, jeb->offset + c->sector_size - ofs);
fs/jffs2/file.c
255
unsigned start = pos & (PAGE_SIZE - 1);
fs/jffs2/file.c
272
if (end == PAGE_SIZE) {
fs/jffs2/file.c
98
PAGE_SIZE);
fs/jffs2/fs.c
593
sb->s_blocksize = PAGE_SIZE;
fs/jffs2/gc.c
1196
min = start & ~(PAGE_SIZE-1);
fs/jffs2/gc.c
1197
max = min + PAGE_SIZE;
fs/jffs2/gc.c
1358
writebuf = pg_ptr + (offset & (PAGE_SIZE -1));
fs/jffs2/nodelist.c
385
if (newfrag->ofs & (PAGE_SIZE-1)) {
fs/jffs2/nodelist.c
394
if ((newfrag->ofs+newfrag->size) & (PAGE_SIZE-1)) {
fs/jffs2/nodelist.c
93
if (frag->node && (frag->ofs & (PAGE_SIZE - 1)) == 0) {
fs/jffs2/scan.c
119
try_size = PAGE_SIZE;
fs/jffs2/write.c
175
if ((je32_to_cpu(ri->dsize) >= PAGE_SIZE) ||
fs/jffs2/write.c
176
( ((je32_to_cpu(ri->offset)&(PAGE_SIZE-1))==0) &&
fs/jffs2/write.c
370
PAGE_SIZE - (offset & (PAGE_SIZE-1)));
fs/jfs/jfs_dtree.c
2917
(dirent_buf + PAGE_SIZE)) {
fs/jfs/jfs_logmgr.c
1820
for (offset = 0; offset < PAGE_SIZE; offset += LOGPSIZE) {
fs/jfs/jfs_metapage.c
398
for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) {
fs/jfs/jfs_metapage.c
449
for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) {
fs/jfs/jfs_metapage.c
618
for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) {
fs/jfs/jfs_metapage.c
702
if ((page_offset + size) > PAGE_SIZE) {
fs/jfs/jfs_metapage.c
721
if (new && (PSIZE == PAGE_SIZE)) {
fs/jfs/jfs_metapage.c
73
#define MPS_PER_PAGE (PAGE_SIZE >> L2PSIZE)
fs/jfs/jfs_metapage.c
919
for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) {
fs/kernfs/file.c
242
ssize_t len = min_t(size_t, iov_iter_count(iter), PAGE_SIZE);
fs/kernfs/file.c
322
len = min_t(size_t, len, PAGE_SIZE);
fs/kernfs/file.c
690
int len = of->atomic_write_len ?: PAGE_SIZE;
fs/kernfs/mount.c
293
sb->s_blocksize = PAGE_SIZE;
fs/kernfs/symlink.c
137
body = kzalloc(PAGE_SIZE, GFP_KERNEL);
fs/libfs.c
1044
s->s_blocksize = PAGE_SIZE;
fs/libfs.c
50
buf->f_bsize = PAGE_SIZE;
fs/libfs.c
683
s->s_blocksize = PAGE_SIZE;
fs/libfs.c
950
folio = __filemap_get_folio(mapping, pos / PAGE_SIZE, FGP_WRITEBEGIN,
fs/minix/dir.c
230
limit = kaddr + PAGE_SIZE - sbi->s_dirsize;
fs/minix/dir.c
36
unsigned last_byte = PAGE_SIZE;
fs/minix/dir.c
39
last_byte = inode->i_size & (PAGE_SIZE - 1);
fs/mpage.c
182
last_block = block_in_file + ((args->nr_pages * PAGE_SIZE) >> blkbits);
fs/namei.c
6327
nd_terminate_link(kaddr, inode->i_size, PAGE_SIZE - 1);
fs/namespace.c
3299
mntpath = d_path(mountpoint, buf, PAGE_SIZE);
fs/namespace.c
4037
copy = kmalloc(PAGE_SIZE, GFP_KERNEL);
fs/namespace.c
4041
left = copy_from_user(copy, data, PAGE_SIZE);
fs/namespace.c
4047
offset = PAGE_SIZE - left;
fs/namespace.c
4057
if (left == PAGE_SIZE) {
fs/namespace.c
4096
((char *)data_page)[PAGE_SIZE - 1] = 0;
fs/namespace.c
5079
if (unlikely(usize > PAGE_SIZE))
fs/namespace.c
5861
if (unlikely(usize > PAGE_SIZE))
fs/netfs/buffered_read.c
408
size_t nr_bvec = flen / PAGE_SIZE + 2;
fs/netfs/buffered_read.c
450
part = min_t(size_t, to - off, PAGE_SIZE);
fs/netfs/buffered_write.c
41
pgoff_t index = pos / PAGE_SIZE;
fs/netfs/buffered_write.c
45
fgp_flags |= fgf_set_order(pos % PAGE_SIZE + part);
fs/netfs/fscache_io.c
176
pgoff_t first = start / PAGE_SIZE;
fs/netfs/fscache_io.c
177
pgoff_t last = (start + len - 1) / PAGE_SIZE;
fs/netfs/iterator.c
198
pgoff_t index = pos / PAGE_SIZE;
fs/netfs/iterator.c
82
cur_npages = DIV_ROUND_UP(ret, PAGE_SIZE);
fs/netfs/iterator.c
91
len = ret > PAGE_SIZE ? PAGE_SIZE : ret;
fs/netfs/misc.c
25
size = round_up(size, PAGE_SIZE);
fs/netfs/misc.c
50
if (size - *_cur_size > PAGE_SIZE)
fs/netfs/misc.c
61
folio->index = *_cur_size / PAGE_SIZE;
fs/netfs/read_collect.c
138
fsize = PAGE_SIZE << order;
fs/netfs/read_collect.c
233
size_t fsize = PAGE_SIZE << rreq->front_folio_order;
fs/netfs/read_collect.c
479
size_t fsize = PAGE_SIZE << rreq->front_folio_order;
fs/netfs/rolling_buffer.c
140
size += PAGE_SIZE << order;
fs/nfs/blocklayout/blocklayout.c
274
if (pg_offset + bytes_left > PAGE_SIZE)
fs/nfs/blocklayout/blocklayout.c
275
pg_len = PAGE_SIZE - pg_offset;
fs/nfs/blocklayout/blocklayout.c
280
pg_len = PAGE_SIZE;
fs/nfs/blocklayout/blocklayout.c
353
PAGE_SIZE - 1) & (loff_t)PAGE_MASK;
fs/nfs/blocklayout/blocklayout.c
421
pg_len = PAGE_SIZE;
fs/nfs/blocklayout/blocklayout.c
785
end = round_down(NFS4_MAX_UINT64, PAGE_SIZE);
fs/nfs/blocklayout/blocklayout.c
812
if (server->pnfs_blksize > PAGE_SIZE) {
fs/nfs/blocklayout/blocklayout.c
894
end = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
fs/nfs/blocklayout/blocklayout.c
912
if (!is_aligned_req(pgio, req, PAGE_SIZE, true)) {
fs/nfs/blocklayout/blocklayout.c
941
if (!is_aligned_req(pgio, req, PAGE_SIZE, true))
fs/nfs/blocklayout/blocklayout.h
43
#define PAGE_CACHE_SECTORS (PAGE_SIZE >> SECTOR_SHIFT)
fs/nfs/blocklayout/extent_tree.c
496
int nr_pages = DIV_ROUND_UP(buffer_size, PAGE_SIZE), i;
fs/nfs/blocklayout/extent_tree.c
637
size_t count = 0, buffer_size = PAGE_SIZE;
fs/nfs/blocklayout/extent_tree.c
657
DIV_ROUND_UP(buffer_size, PAGE_SIZE),
fs/nfs/blocklayout/extent_tree.c
681
for ( ; p < end; p += PAGE_SIZE) {
fs/nfs/blocklayout/rpc_pipefs.c
71
if (b->simple.len > PAGE_SIZE)
fs/nfs/callback_xdr.c
918
if (maxlen > 0 && maxlen < PAGE_SIZE) {
fs/nfs/client.c
866
server->rpages = (server->rsize + PAGE_SIZE - 1) >> PAGE_SHIFT;
fs/nfs/dir.c
2644
if (pathlen > PAGE_SIZE)
fs/nfs/dir.c
2656
if (pathlen < PAGE_SIZE)
fs/nfs/dir.c
2657
memset(kaddr + pathlen, 0, PAGE_SIZE - pathlen);
fs/nfs/dir.c
293
return (PAGE_SIZE - sizeof(struct nfs_cache_array)) /
fs/nfs/dir.c
955
array_size = (dtsize + PAGE_SIZE - 1) >> PAGE_SHIFT;
fs/nfs/direct.c
348
size_t rsize = max_t(size_t, NFS_SERVER(inode)->rsize, PAGE_SIZE);
fs/nfs/direct.c
368
npages = (result + pgbase + PAGE_SIZE - 1) / PAGE_SIZE;
fs/nfs/direct.c
371
unsigned int req_len = min_t(size_t, bytes, PAGE_SIZE - pgbase);
fs/nfs/direct.c
872
size_t wsize = max_t(size_t, NFS_SERVER(inode)->wsize, PAGE_SIZE);
fs/nfs/direct.c
896
npages = (result + pgbase + PAGE_SIZE - 1) / PAGE_SIZE;
fs/nfs/direct.c
899
unsigned int req_len = min_t(size_t, bytes, PAGE_SIZE - pgbase);
fs/nfs/flexfilelayout/flexfilelayout.c
2587
.buflen = PAGE_SIZE,
fs/nfs/fs_context.c
1504
int max_namelen = PAGE_SIZE;
fs/nfs/inode.c
1095
stat->dio_offset_align = PAGE_SIZE;
fs/nfs/internal.h
831
if (proto == XPRT_TRANSPORT_UDP || iosize < PAGE_SIZE)
fs/nfs/internal.h
900
return ((unsigned long)len + (unsigned long)base + PAGE_SIZE - 1) >>
fs/nfs/localio.c
484
len = min_t(size_t, total, PAGE_SIZE - base);
fs/nfs/nfs42proc.c
1370
#define NFS4XATTR_MAXPAGES DIV_ROUND_UP(XATTR_SIZE_MAX, PAGE_SIZE)
fs/nfs/nfs42proc.c
1536
np = xdrlen / PAGE_SIZE + 1;
fs/nfs/nfs42proc.c
1603
pages, np * PAGE_SIZE);
fs/nfs/nfs42xattr.c
186
XATTR_NAME_MAX + 1 > PAGE_SIZE);
fs/nfs/nfs42xattr.c
195
if (alloclen + len <= PAGE_SIZE) {
fs/nfs/nfs4namespace.c
502
if (buf->len <= 0 || buf->len > PAGE_SIZE)
fs/nfs/nfs4proc.c
5931
#define NFS4ACL_MAXPAGES DIV_ROUND_UP(XATTR_SIZE_MAX, PAGE_SIZE)
fs/nfs/nfs4proc.c
5942
len = min_t(size_t, PAGE_SIZE, buflen);
fs/nfs/nfs4proc.c
6019
if (buflen <= PAGE_SIZE) {
fs/nfs/nfs4proc.c
6072
npages = DIV_ROUND_UP(buflen, PAGE_SIZE) + 1;
fs/nfs/nfs4proc.c
6090
args.acl_len = npages * PAGE_SIZE;
fs/nfs/nfs4proc.c
6184
unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE);
fs/nfs/nfs4xdr.c
2894
PAGE_SIZE, replen);
fs/nfs/nfs4xdr.c
5690
if ((char *)&sec_flavor[1] - (char *)res->flavors > PAGE_SIZE)
fs/nfs/nfs4xdr.c
7209
xdr_enter_page(xdr, PAGE_SIZE);
fs/nfs/nfs4xdr.c
7222
xdr_enter_page(xdr, PAGE_SIZE);
fs/nfs/pagelist.c
628
sizeof(struct page *) > PAGE_SIZE)
fs/nfs/pagelist.c
63
size_t len = PAGE_SIZE - offset_in_page(base);
fs/nfs/pnfs.c
1183
size_t npages = (max_reply_sz + PAGE_SIZE - 1) >> PAGE_SHIFT;
fs/nfs/pnfs.c
1193
lgp->args.layout.pglen = max_pages * PAGE_SIZE;
fs/nfs/pnfs.c
1199
lgp->args.minlength = PAGE_SIZE;
fs/nfs/pnfs.c
1225
size_t max_pages = lgp->args.layout.pglen / PAGE_SIZE;
fs/nfs/read.c
92
npages = (pgm->pg_bytes_written + PAGE_SIZE - 1) >> PAGE_SHIFT;
fs/nfs/super.c
631
devname = nfs_path(&dummy, root, page, PAGE_SIZE, 0);
fs/nfs/symlink.c
34
error = NFS_PROTO(inode)->readlink(inode, &folio->page, 0, PAGE_SIZE);
fs/nfs/write.c
1315
offset = round_down(offset, PAGE_SIZE);
fs/nfs/write.c
1317
end = min(round_up(end, PAGE_SIZE), pagelen);
fs/nfsd/blocklayout.c
126
nr_extents_max = (min(args->lg_maxcount, PAGE_SIZE) -
fs/nfsd/export.c
104
buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
fs/nfsd/export.c
110
if (qword_get(&mesg, buf, PAGE_SIZE) <= 0)
fs/nfsd/export.c
120
if (qword_get(&mesg, buf, PAGE_SIZE) <= 0)
fs/nfsd/export.c
127
if ((len=qword_get(&mesg, buf, PAGE_SIZE)) <= 0)
fs/nfsd/export.c
150
len = qword_get(&mesg, buf, PAGE_SIZE);
fs/nfsd/export.c
501
len = qword_get(mesg, buf, PAGE_SIZE);
fs/nfsd/export.c
510
len = qword_get(mesg, buf, PAGE_SIZE);
fs/nfsd/export.c
609
len = qword_get(mesg, buf, PAGE_SIZE);
fs/nfsd/export.c
633
buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
fs/nfsd/export.c
639
if (qword_get(&mesg, buf, PAGE_SIZE) <= 0)
fs/nfsd/export.c
649
if (qword_get(&mesg, buf, PAGE_SIZE) <= 0)
fs/nfsd/export.c
694
while (qword_get(&mesg, buf, PAGE_SIZE) > 0) {
fs/nfsd/nfs3proc.c
558
rqstp->rq_next_page += (buf->buflen + PAGE_SIZE - 1) >> PAGE_SHIFT;
fs/nfsd/nfs3proc.c
667
resp->f_rtmult = PAGE_SIZE;
fs/nfsd/nfs3proc.c
670
resp->f_wtmult = PAGE_SIZE;
fs/nfsd/nfs4idmap.c
217
buf1 = kmalloc(PAGE_SIZE, GFP_KERNEL);
fs/nfsd/nfs4idmap.c
224
len = qword_get(&buf, buf1, PAGE_SIZE);
fs/nfsd/nfs4idmap.c
230
if (qword_get(&buf, buf1, PAGE_SIZE) <= 0)
fs/nfsd/nfs4idmap.c
236
if (qword_get(&buf, buf1, PAGE_SIZE) <= 0)
fs/nfsd/nfs4idmap.c
254
len = qword_get(&buf, buf1, PAGE_SIZE);
fs/nfsd/nfs4idmap.c
386
buf1 = kmalloc(PAGE_SIZE, GFP_KERNEL);
fs/nfsd/nfs4idmap.c
393
len = qword_get(&buf, buf1, PAGE_SIZE);
fs/nfsd/nfs4idmap.c
399
if (qword_get(&buf, buf1, PAGE_SIZE) <= 0)
fs/nfsd/nfs4idmap.c
405
len = qword_get(&buf, buf1, PAGE_SIZE);
fs/nfsd/nfs4proc.c
3219
buflen = (rqstp->rq_page_end - rqstp->rq_next_page) * PAGE_SIZE;
fs/nfsd/nfs4proc.c
3363
return (op_encode_hdr_size + 1) * sizeof(__be32) + PAGE_SIZE;
fs/nfsd/nfs4xdr.c
2314
len = min_t(u32, buflen, PAGE_SIZE);
fs/nfsd/nfs4xdr.c
2696
if (readcount > 1 || max_reply > PAGE_SIZE - auth_slack)
fs/nfsd/nfs4xdr.c
4913
maxcount = PAGE_SIZE;
fs/nfsd/nfs4xdr.c
5876
cplen = min_t(u32, buflen, PAGE_SIZE);
fs/nfsd/nfs4xdr.c
5883
if (cplen < PAGE_SIZE) {
fs/nfsd/nfs4xdr.c
5893
buflen -= PAGE_SIZE;
fs/nfsd/nfs4xdr.c
5894
buf += PAGE_SIZE;
fs/nfsd/nfscache.c
376
idx = subbuf.page_base / PAGE_SIZE;
fs/nfsd/nfscache.c
380
len = min_t(unsigned int, PAGE_SIZE - base, remaining);
fs/nfsd/nfsproc.c
575
buf->buflen = clamp(count, (u32)(XDR_UNIT * 2), (u32)PAGE_SIZE);
fs/nfsd/vfs.c
1011
last_page = page + (offset + sd->len - 1) / PAGE_SIZE;
fs/nfsd/vfs.c
1012
for (page += offset / PAGE_SIZE; page <= last_page; page++) {
fs/nfsd/vfs.c
1026
rqstp->rq_res.page_base = offset % PAGE_SIZE;
fs/nfsd/vfs.c
1136
len = min_t(size_t, total, PAGE_SIZE);
fs/nfsd/vfs.c
1225
len = min_t(size_t, total, PAGE_SIZE - base);
fs/nfsd/vfs.c
2385
if (buf->used + reclen > PAGE_SIZE) {
fs/nilfs2/dir.c
255
ctx->pos += PAGE_SIZE - offset;
fs/nilfs2/dir.c
41
#if (PAGE_SIZE >= 65536)
fs/nilfs2/dir.c
50
#if (PAGE_SIZE >= 65536)
fs/nilfs2/dir.c
77
if (last_byte > PAGE_SIZE)
fs/nilfs2/dir.c
78
last_byte = PAGE_SIZE;
fs/nilfs2/inode.c
247
unsigned int start = pos & (PAGE_SIZE - 1);
fs/nilfs2/ioctl.c
61
if ((size_t)argv->v_size > PAGE_SIZE)
fs/nilfs2/ioctl.c
75
maxmembs = PAGE_SIZE / argv->v_size;
fs/ntfs3/attrib.c
1562
voff = vbo[1] & (PAGE_SIZE - 1);
fs/ntfs3/attrib.c
1576
u64 from = vbo[i] & ~(u64)(PAGE_SIZE - 1);
fs/ntfs3/attrib.c
1577
u64 to = min(from + PAGE_SIZE, wof_size);
fs/ntfs3/attrib.c
1603
off32 = Add2Ptr(addr, PAGE_SIZE - sizeof(u32));
fs/ntfs3/attrib.c
1606
off64 = Add2Ptr(addr, PAGE_SIZE - sizeof(u64));
fs/ntfs3/bitmap.c
525
ra->ra_pages = (wnd->nbits / 8 + PAGE_SIZE - 1) >> PAGE_SHIFT;
fs/ntfs3/file.c
1000
if (frame_size < PAGE_SIZE) {
fs/ntfs3/file.c
1064
folio_zero_segment(folio, off, PAGE_SIZE);
fs/ntfs3/file.c
1134
size_t cp, tail = PAGE_SIZE - off;
fs/ntfs3/file.c
458
sbi->cluster_size, PAGE_SIZE));
fs/ntfs3/fslog.c
1175
ntfs_fix_post_read(&page_buf->rhdr, PAGE_SIZE, false);
fs/ntfs3/fslog.c
3789
#if PAGE_SIZE >= DefaultLogPageSize && PAGE_SIZE <= DefaultLogPageSize * 2
fs/ntfs3/fslog.c
3790
log->page_size = norm_file_page(PAGE_SIZE, &log->l_size, true);
fs/ntfs3/fslog.c
3792
log->page_size = norm_file_page(PAGE_SIZE, &log->l_size, false);
fs/ntfs3/fsntfs.c
1547
op = PAGE_SIZE - off;
fs/ntfs3/fsntfs.c
1617
memset(kaddr, -1, PAGE_SIZE);
fs/ntfs3/fsntfs.c
1644
u32 add = len > PAGE_SIZE ? PAGE_SIZE : len;
fs/ntfs3/inode.c
1115
size_t op = bytes > PAGE_SIZE ? PAGE_SIZE : bytes;
fs/ntfs3/inode.c
1129
data = Add2Ptr(data, PAGE_SIZE);
fs/ntfs3/inode.c
2127
ret = kmalloc(PAGE_SIZE, GFP_NOFS);
fs/ntfs3/inode.c
2131
err = ntfs_readlink_hlp(de, inode, ret, PAGE_SIZE);
fs/ntfs3/inode.c
628
unsigned int nr_vecs = DIV_ROUND_UP(length, PAGE_SIZE);
fs/ntfs3/inode.c
908
u32 off_a = offset & (PAGE_SIZE - 1);
fs/ntfs3/inode.c
910
tail = PAGE_SIZE - off_a;
fs/ntfs3/inode.c
912
tail = PAGE_SIZE;
fs/ntfs3/ntfs_fs.h
128
struct buffer_head *bh[PAGE_SIZE >> SECTOR_SHIFT];
fs/ntfs3/run.c
378
} else if (used <= 16 * PAGE_SIZE) {
fs/ntfs3/run.c
385
bytes = run->allocated + (16 * PAGE_SIZE);
fs/ntfs3/super.c
1184
sb_set_blocksize(sb, min_t(u32, sbi->cluster_size, PAGE_SIZE));
fs/ntfs3/super.c
1226
u32 block_size = min_t(u32, sector_size, PAGE_SIZE);
fs/ntfs3/super.c
983
if (!sb_min_blocksize(sb, PAGE_SIZE)) {
fs/nullfs.c
16
s->s_blocksize = PAGE_SIZE;
fs/ocfs2/alloc.c
7122
unsigned int page_end = min_t(unsigned, PAGE_SIZE,
fs/ocfs2/aops.c
1250
wc->w_target_from = pos & (PAGE_SIZE - 1);
fs/ocfs2/aops.c
1287
wc->w_target_to = PAGE_SIZE;
fs/ocfs2/aops.c
1929
size_t from, to, start = pos & (PAGE_SIZE - 1);
fs/ocfs2/aops.c
492
unsigned int cluster_start = 0, cluster_end = PAGE_SIZE;
fs/ocfs2/aops.c
505
BUG_ON(cluster_start > PAGE_SIZE);
fs/ocfs2/aops.c
506
BUG_ON(cluster_end > PAGE_SIZE);
fs/ocfs2/aops.c
664
#if (PAGE_SIZE >= OCFS2_MAX_CLUSTERSIZE)
fs/ocfs2/aops.c
667
#define OCFS2_MAX_CTXT_PAGES (OCFS2_MAX_CLUSTERSIZE / PAGE_SIZE)
fs/ocfs2/aops.c
670
#define OCFS2_MAX_CLUSTERS_PER_PAGE (PAGE_SIZE / OCFS2_MIN_CLUSTERSIZE)
fs/ocfs2/aops.c
902
unsigned from = user_pos & (PAGE_SIZE - 1),
fs/ocfs2/aops.c
940
map_from = user_pos & (PAGE_SIZE - 1);
fs/ocfs2/cluster/heartbeat.c
1288
buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
fs/ocfs2/cluster/heartbeat.c
1311
out += scnprintf(buf + out, PAGE_SIZE - out, "%d\n",
fs/ocfs2/cluster/heartbeat.c
1321
out += scnprintf(buf + out, PAGE_SIZE - out, "%lu\n", lts);
fs/ocfs2/cluster/heartbeat.c
1326
out += scnprintf(buf + out, PAGE_SIZE - out, "%u\n",
fs/ocfs2/cluster/heartbeat.c
1335
out += scnprintf(buf + out, PAGE_SIZE - out, "%d ", i);
fs/ocfs2/cluster/heartbeat.c
1336
out += scnprintf(buf + out, PAGE_SIZE - out, "\n");
fs/ocfs2/cluster/heartbeat.c
1656
reg->hr_slots_per_page = PAGE_SIZE >> reg->hr_block_bits;
fs/ocfs2/cluster/heartbeat.c
538
vec_start = (cs << bits) % PAGE_SIZE;
fs/ocfs2/cluster/heartbeat.c
543
vec_len = min(PAGE_SIZE - vec_start,
fs/ocfs2/cluster/heartbeat.c
544
(max_slots-cs) * (PAGE_SIZE/spp) );
fs/ocfs2/cluster/heartbeat.c
552
cs += vec_len / (PAGE_SIZE/spp);
fs/ocfs2/cluster/masklog.c
31
return snprintf(buf, PAGE_SIZE, "%s\n", state);
fs/ocfs2/cluster/netdebug.c
444
out += scnprintf(buf + out, PAGE_SIZE - out, "%d ", i);
fs/ocfs2/cluster/netdebug.c
445
out += scnprintf(buf + out, PAGE_SIZE - out, "\n");
fs/ocfs2/cluster/netdebug.c
454
buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
fs/ocfs2/cluster/netdebug.c
458
i_size_write(inode, o2net_fill_bitmap(buf, PAGE_SIZE));
fs/ocfs2/cluster/sys.c
24
return snprintf(buf, PAGE_SIZE, "%u\n", O2NM_API_VERSION);
fs/ocfs2/dlm/dlmcommon.h
21
#if DLM_HASH_SIZE_DEFAULT < PAGE_SIZE
fs/ocfs2/dlm/dlmcommon.h
24
# define DLM_HASH_PAGES (DLM_HASH_SIZE_DEFAULT / PAGE_SIZE)
fs/ocfs2/dlm/dlmcommon.h
26
#define DLM_BUCKETS_PER_PAGE (PAGE_SIZE / sizeof(struct hlist_head))
fs/ocfs2/dlm/dlmdebug.c
265
dump_mle(mle, buf, PAGE_SIZE - 1);
fs/ocfs2/dlm/dlmdebug.c
334
i_size_write(inode, debug_purgelist_print(dlm, buf, PAGE_SIZE - 1));
fs/ocfs2/dlm/dlmdebug.c
391
i_size_write(inode, debug_mle_print(dlm, buf, PAGE_SIZE - 1));
fs/ocfs2/dlm/dlmdebug.c
564
buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
fs/ocfs2/dlm/dlmdebug.c
572
dl->dl_len = PAGE_SIZE;
fs/ocfs2/dlm/dlmdebug.c
782
i_size_write(inode, debug_state_print(dlm, buf, PAGE_SIZE - 1));
fs/ocfs2/dlmfs/dlmfs.c
511
sb->s_blocksize = PAGE_SIZE;
fs/ocfs2/file.c
2744
round_down(pos_out, PAGE_SIZE),
fs/ocfs2/file.c
2745
round_up(pos_out + len, PAGE_SIZE) - 1);
fs/ocfs2/file.c
487
new_i_size + PAGE_SIZE - 1, 0, 1);
fs/ocfs2/file.c
507
unmap_mapping_range(inode->i_mapping, new_i_size + PAGE_SIZE - 1, 0, 1);
fs/ocfs2/file.c
947
next_pos = (zero_pos & PAGE_MASK) + PAGE_SIZE;
fs/ocfs2/filecheck.c
303
ssize_t ret = 0, total = 0, remain = PAGE_SIZE;
fs/ocfs2/mmap.c
54
unsigned int len = PAGE_SIZE;
fs/ocfs2/refcounttree.c
2931
from = offset & (PAGE_SIZE - 1);
fs/ocfs2/refcounttree.c
2932
to = PAGE_SIZE;
fs/ocfs2/refcounttree.c
2933
if (map_end & (PAGE_SIZE - 1))
fs/ocfs2/refcounttree.c
2934
to = map_end & (PAGE_SIZE - 1);
fs/ocfs2/refcounttree.c
2949
if (PAGE_SIZE <= OCFS2_SB(sb)->s_clustersize) {
fs/ocfs2/stackglue.c
480
ret = snprintf(buf, PAGE_SIZE, "%u.%u\n",
fs/ocfs2/stackglue.c
496
ssize_t ret = 0, total = 0, remain = PAGE_SIZE;
fs/ocfs2/stackglue.c
528
ret = snprintf(buf, PAGE_SIZE, "%s\n",
fs/ocfs2/stackglue.c
530
if (ret >= PAGE_SIZE)
fs/ocfs2/stackglue.c
548
ret = snprintf(buf, PAGE_SIZE, "%s\n", cluster_stack_name);
fs/ocfs2/stackglue.c
598
return snprintf(buf, PAGE_SIZE, "1\n");
fs/ocfs2/super.c
348
buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
fs/ocfs2/super.c
352
i_size_write(inode, ocfs2_osb_dump(osb, buf, PAGE_SIZE));
fs/ocfs2/super.c
588
if (bytes > PAGE_SIZE) {
fs/ocfs2/super.c
589
bytes = PAGE_SIZE;
fs/omfs/inode.c
510
if (sbi->s_sys_blocksize > PAGE_SIZE) {
fs/open.c
1402
if (unlikely(usize > PAGE_SIZE))
fs/orangefs/inode.c
190
ow->maxpages = orangefs_bufmap_size_query()/PAGE_SIZE;
fs/orangefs/inode.c
229
loff_t pages_remaining = bytes_remaining / PAGE_SIZE;
fs/orangefs/inode.c
297
folio = __filemap_get_folio(mapping, pos / PAGE_SIZE, FGP_WRITEBEGIN,
fs/orangefs/inode.c
361
unsigned from = pos & (PAGE_SIZE - 1);
fs/orangefs/inode.c
367
(len == PAGE_SIZE || pos + len == inode->i_size)) {
fs/orangefs/inode.c
368
folio_zero_segment(folio, from + copied, PAGE_SIZE);
fs/orangefs/inode.c
386
if (offset == 0 && length == PAGE_SIZE) {
fs/orangefs/inode.c
638
wr->len = PAGE_SIZE;
fs/orangefs/inode.c
653
wr->len = PAGE_SIZE;
fs/orangefs/orangefs-bufmap.c
226
bufmap->page_count = bufmap->total_size / PAGE_SIZE;
fs/orangefs/orangefs-bufmap.c
250
int pages_per_desc = bufmap->desc_size / PAGE_SIZE;
fs/orangefs/orangefs-bufmap.c
283
(user_desc->ptr + (i * pages_per_desc * PAGE_SIZE));
fs/orangefs/orangefs-bufmap.c
341
if ((user_desc->size % PAGE_SIZE) != 0) {
fs/orangefs/orangefs-bufmap.c
484
if (n > PAGE_SIZE)
fs/orangefs/orangefs-bufmap.c
485
n = PAGE_SIZE;
fs/orangefs/orangefs-bufmap.c
513
if (n > PAGE_SIZE)
fs/orangefs/orangefs-bufmap.c
514
n = PAGE_SIZE;
fs/orangefs/orangefs-utils.c
329
inode->i_size = PAGE_SIZE;
fs/orangefs/super.c
421
sb->s_blocksize = PAGE_SIZE;
fs/pipe.c
1278
if (size < PAGE_SIZE)
fs/pipe.c
1279
return PAGE_SIZE;
fs/pipe.c
1403
return pipe->max_usage * PAGE_SIZE;
fs/pipe.c
1443
ret = pipe->max_usage * PAGE_SIZE;
fs/pipe.c
477
chars = total_len & (PAGE_SIZE-1);
fs/pipe.c
483
offset + chars <= PAGE_SIZE) {
fs/pipe.c
522
copied = copy_page_from_iter(page, 0, PAGE_SIZE, from);
fs/pipe.c
523
if (unlikely(copied < PAGE_SIZE && iov_iter_count(from))) {
fs/pipe.c
804
if (pipe_bufs * PAGE_SIZE > max_size && !capable(CAP_SYS_RESOURCE))
fs/proc/base.c
1041
max_len = min_t(size_t, PAGE_SIZE, count);
fs/proc/base.c
1646
if ((*ppos != 0) || (count >= PAGE_SIZE))
fs/proc/base.c
261
if (pos >= PAGE_SIZE)
fs/proc/base.c
269
got = access_remote_vm(mm, arg_start, page, PAGE_SIZE, FOLL_ANON);
fs/proc/base.c
2826
if (count > PAGE_SIZE)
fs/proc/base.c
2827
count = PAGE_SIZE;
fs/proc/base.c
357
size_t size = min_t(size_t, PAGE_SIZE, count);
fs/proc/base.c
924
size_t this_len = min_t(size_t, count, PAGE_SIZE);
fs/proc/generic.c
835
if (size == 0 || size > PAGE_SIZE - 1)
fs/proc/kcore.c
409
phdr->p_align = PAGE_SIZE;
fs/proc/kcore.c
481
if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
fs/proc/kcore.c
624
tsz = (buflen > PAGE_SIZE ? PAGE_SIZE : buflen);
fs/proc/kcore.c
645
filp->private_data = kmalloc(PAGE_SIZE, GFP_KERNEL);
fs/proc/task_mmu.c
1013
smaps_pte_hole(addr, addr + PAGE_SIZE, 0, walk);
fs/proc/task_mmu.c
1041
mss->swap += PAGE_SIZE;
fs/proc/task_mmu.c
1044
u64 pss_delta = (u64)PAGE_SIZE << PSS_SHIFT;
fs/proc/task_mmu.c
1049
mss->swap_pss += (u64)PAGE_SIZE << PSS_SHIFT;
fs/proc/task_mmu.c
1127
for (; addr != end; pte++, addr += PAGE_SIZE)
fs/proc/task_mmu.c
1715
for (; addr != end; pte++, addr += PAGE_SIZE) {
fs/proc/task_mmu.c
1912
for (; addr < hole_end; addr += PAGE_SIZE) {
fs/proc/task_mmu.c
1924
for (; addr < min(end, vma->vm_end); addr += PAGE_SIZE) {
fs/proc/task_mmu.c
2056
for (; addr != end; addr += PAGE_SIZE, idx++) {
fs/proc/task_mmu.c
2106
for (; addr < end; pte++, addr += PAGE_SIZE) {
fs/proc/task_mmu.c
2160
for (; addr != end; addr += PAGE_SIZE) {
fs/proc/task_mmu.c
2549
p->found_pages -= (end - addr) / PAGE_SIZE;
fs/proc/task_mmu.c
2661
n_pages = (*end - addr) / PAGE_SIZE;
fs/proc/task_mmu.c
2665
*end -= n_too_much * PAGE_SIZE;
fs/proc/task_mmu.c
2759
for (addr = start; addr != end; pte++, addr += PAGE_SIZE) {
fs/proc/task_mmu.c
2768
flush_end = addr + PAGE_SIZE;
fs/proc/task_mmu.c
2776
for (addr = start; addr < end; pte++, addr += PAGE_SIZE) {
fs/proc/task_mmu.c
2777
unsigned long next = addr + PAGE_SIZE;
fs/proc/task_mmu.c
2797
for (addr = start; addr != end; pte++, addr += PAGE_SIZE) {
fs/proc/task_mmu.c
2801
unsigned long next = addr + PAGE_SIZE;
fs/proc/task_mmu.c
2945
if (!IS_ALIGNED(arg->start, PAGE_SIZE))
fs/proc/task_mmu.c
2958
arg->end = ALIGN(arg->end, PAGE_SIZE);
fs/proc/task_mmu.c
3235
HPAGE_PMD_SIZE/PAGE_SIZE);
fs/proc/task_mmu.c
3252
} while (pte++, addr += PAGE_SIZE, addr != end);
fs/proc/task_mmu.c
668
if (usize > PAGE_SIZE)
fs/proc/task_mmu.c
91
return PAGE_SIZE * mm->total_vm;
fs/proc/task_mmu.c
922
unsigned long size = nr * PAGE_SIZE;
fs/proc/task_mmu.c
977
unsigned long pss = PAGE_SIZE << PSS_SHIFT;
fs/proc/task_mmu.c
986
smaps_page_accumulate(mss, folio, PAGE_SIZE, pss,
fs/proc/vmcore.c
1071
*notes_sz = roundup(phdr_sz, PAGE_SIZE);
fs/proc/vmcore.c
1085
phdr.p_offset = roundup(note_off, PAGE_SIZE);
fs/proc/vmcore.c
1100
*elfsz = roundup(*elfsz, PAGE_SIZE);
fs/proc/vmcore.c
1138
start = rounddown(paddr, PAGE_SIZE);
fs/proc/vmcore.c
1139
end = roundup(paddr + phdr_ptr->p_memsz, PAGE_SIZE);
fs/proc/vmcore.c
1175
start = rounddown(paddr, PAGE_SIZE);
fs/proc/vmcore.c
1176
end = roundup(paddr + phdr_ptr->p_memsz, PAGE_SIZE);
fs/proc/vmcore.c
1414
start = rounddown(phdr->p_offset, PAGE_SIZE);
fs/proc/vmcore.c
1416
PAGE_SIZE);
fs/proc/vmcore.c
1434
start = rounddown(phdr->p_offset, PAGE_SIZE);
fs/proc/vmcore.c
1436
PAGE_SIZE);
fs/proc/vmcore.c
1456
elfnotes_sz = roundup(elfnotes_orig_sz, PAGE_SIZE) + vmcoredd_orig_sz;
fs/proc/vmcore.c
1498
PAGE_SIZE);
fs/proc/vmcore.c
1585
start = rounddown(phdr->p_offset, PAGE_SIZE);
fs/proc/vmcore.c
1586
end = roundup(phdr->p_offset + phdr->p_memsz, PAGE_SIZE);
fs/proc/vmcore.c
1610
new_size = roundup(new_size, PAGE_SIZE);
fs/proc/vmcore.c
1628
WARN_ON_ONCE(!IS_ALIGNED(cur->paddr | cur->size, PAGE_SIZE));
fs/proc/vmcore.c
170
offset = (unsigned long)(*ppos % PAGE_SIZE);
fs/proc/vmcore.c
171
pfn = (unsigned long)(*ppos / PAGE_SIZE);
fs/proc/vmcore.c
175
if (count > (PAGE_SIZE - offset))
fs/proc/vmcore.c
176
nr_bytes = PAGE_SIZE - offset;
fs/proc/vmcore.c
487
kvec.iov_len = PAGE_SIZE;
fs/proc/vmcore.c
488
iov_iter_kvec(&iter, ITER_DEST, &kvec, 1, PAGE_SIZE);
fs/proc/vmcore.c
553
PAGE_SIZE, prot))
fs/proc/vmcore.c
555
len += PAGE_SIZE;
fs/proc/vmcore.c
880
*notes_sz = roundup(phdr_sz, PAGE_SIZE);
fs/proc/vmcore.c
894
phdr.p_offset = roundup(note_off, PAGE_SIZE);
fs/proc/vmcore.c
909
*elfsz = roundup(*elfsz, PAGE_SIZE);
fs/pstore/inode.c
417
sb->s_blocksize = PAGE_SIZE;
fs/pstore/ram_core.c
424
page_count = DIV_ROUND_UP(size + offset_in_page(start), PAGE_SIZE);
fs/pstore/ram_core.c
449
phys_addr_t addr = page_start + i * PAGE_SIZE;
fs/qnx6/dir.c
43
if (last_byte > PAGE_SIZE)
fs/qnx6/dir.c
44
last_byte = PAGE_SIZE;
fs/quota/dquot.c
3027
nr_hash = (1UL << order) * PAGE_SIZE / sizeof(struct hlist_head);
fs/quota/dquot.c
3036
" %ld bytes)\n", nr_hash, order, (PAGE_SIZE << order));
fs/ramfs/file-nommu.c
212
lpages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
fs/ramfs/file-nommu.c
216
maxpages = (isize + PAGE_SIZE - 1) >> PAGE_SHIFT;
fs/ramfs/file-nommu.c
90
npages = (newsize + PAGE_SIZE - 1) >> PAGE_SHIFT;
fs/ramfs/file-nommu.c
99
newsize = PAGE_SIZE * npages;
fs/ramfs/inode.c
266
sb->s_blocksize = PAGE_SIZE;
fs/remap_range.c
202
loff_t cmp_len = min(PAGE_SIZE - offset_in_page(srcoff),
fs/remap_range.c
203
PAGE_SIZE - offset_in_page(dstoff));
fs/resctrl/rdtgroup.c
333
.atomic_write_len = PAGE_SIZE,
fs/resctrl/rdtgroup.c
339
.atomic_write_len = PAGE_SIZE,
fs/romfs/mmap-nommu.c
33
lpages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
fs/romfs/mmap-nommu.c
37
maxpages = (isize + PAGE_SIZE - 1) >> PAGE_SHIFT;
fs/romfs/super.c
118
fillsize = size > PAGE_SIZE ? PAGE_SIZE : size;
fs/select.c
102
((unsigned long)((table)->entry+1) > PAGE_SIZE + (unsigned long)(table))
fs/select.c
840
#define POLLFD_PER_PAGE ((PAGE_SIZE-sizeof(struct poll_list)) / sizeof(struct pollfd))
fs/seq_file.c
103
m->buf = seq_buf_alloc(m->size = PAGE_SIZE);
fs/seq_file.c
211
m->buf = seq_buf_alloc(m->size = PAGE_SIZE);
fs/smb/client/cifsfs.c
1306
pgoff_t index = pos / PAGE_SIZE;
fs/smb/client/cifsfs.c
291
sb->s_bdi->ra_pages = cifs_sb->ctx->rasize / PAGE_SIZE;
fs/smb/client/cifsfs.c
293
sb->s_bdi->ra_pages = 2 * (cifs_sb->ctx->rsize / PAGE_SIZE);
fs/smb/client/cifsproto.h
560
PAGE_SIZE);
fs/smb/client/cifsproto.h
569
nents += DIV_ROUND_UP(offset_in_page(sig) + SMB2_SIGNATURE_SIZE, PAGE_SIZE);
fs/smb/client/cifsproto.h
586
unsigned int len = min_t(unsigned int, buflen, PAGE_SIZE - off);
fs/smb/client/cifsproto.h
592
addr += PAGE_SIZE;
fs/smb/client/compress.c
176
if (iov_iter_count(&iter) < PAGE_SIZE - SZ_2K)
fs/smb/client/compress.h
27
#define SMB_COMPRESS_MIN_LEN PAGE_SIZE
fs/smb/client/file.c
1831
PAGE_SIZE);
fs/smb/client/file.c
1833
PAGE_SIZE);
fs/smb/client/file.c
2207
PAGE_SIZE);
fs/smb/client/file.c
2209
PAGE_SIZE);
fs/smb/client/fs_context.h
26
if (!size || !IS_ALIGNED(size, PAGE_SIZE)) {
fs/smb/client/fs_context.h
28
name, PAGE_SIZE);
fs/smb/client/fs_context.h
29
size = umax(round_down(size, PAGE_SIZE), PAGE_SIZE);
fs/smb/client/fs_context.h
405
size = umax(server->ops->negotiate_rsize(tcon, ctx), PAGE_SIZE);
fs/smb/client/fs_context.h
407
size = umax(umin(ctx->rsize, size), PAGE_SIZE);
fs/smb/client/fs_context.h
408
ctx->rsize = round_down(size, PAGE_SIZE);
fs/smb/client/fs_context.h
417
size = umax(server->ops->negotiate_wsize(tcon, ctx), PAGE_SIZE);
fs/smb/client/fs_context.h
419
size = umax(umin(ctx->wsize, size), PAGE_SIZE);
fs/smb/client/fs_context.h
420
ctx->wsize = round_down(size, PAGE_SIZE);
fs/smb/client/inode.c
69
if (tcon->ses->server->max_read < PAGE_SIZE + MAX_CIFS_HDR_SIZE)
fs/smb/client/smb2file.c
281
BUILD_BUG_ON(sizeof(struct smb2_lock_element) > PAGE_SIZE);
fs/smb/client/smb2file.c
282
max_buf = min_t(unsigned int, max_buf, PAGE_SIZE);
fs/smb/client/smb2file.c
424
BUILD_BUG_ON(sizeof(struct smb2_lock_element) > PAGE_SIZE);
fs/smb/client/smb2file.c
425
max_buf = min_t(unsigned int, max_buf, PAGE_SIZE);
fs/smb/client/smb2ops.c
4785
cur_page_idx = pad_len / PAGE_SIZE;
fs/smb/client/smb2ops.c
4786
cur_off = pad_len % PAGE_SIZE;
fs/smb/client/smb2ops.c
4949
len = round_up(dw->len, PAGE_SIZE);
fs/smb/client/smbdirect.c
2950
rc = ib_map_mr_sg(mr->mr, mr->sgt.sgl, mr->sgt.nents, NULL, PAGE_SIZE);
fs/smb/client/smbdirect.c
3217
seg = min_t(size_t, len, PAGE_SIZE - off);
fs/smb/client/smbdirect.c
3229
kaddr += PAGE_SIZE;
fs/smb/client/smbdirect.c
703
sp->max_frmr_depth * PAGE_SIZE);
fs/smb/client/smbdirect.c
704
sp->max_frmr_depth = sp->max_read_write_size / PAGE_SIZE;
fs/smb/server/auth.c
770
PAGE_SIZE - 1) >> PAGE_SHIFT) -
fs/smb/server/auth.c
797
unsigned int bytes = PAGE_SIZE - offset;
fs/smb/server/transport_rdma.c
1434
len = min_t(int, PAGE_SIZE - offset, size);
fs/smb/server/transport_rdma.c
1445
buf += PAGE_SIZE;
fs/smb/server/transport_rdma.c
147
return DIV_ROUND_UP((uintptr_t)buf + size, PAGE_SIZE) -
fs/smb/server/transport_rdma.c
148
(uintptr_t)buf / PAGE_SIZE;
fs/smb/server/transport_rdma.c
1701
size_t fplen = min_t(size_t, PAGE_SIZE - fpofs, v->iov_len);
fs/smb/server/transport_rdma.c
1702
size_t elen = min_t(size_t, v->iov_len - fplen, epages*PAGE_SIZE);
fs/smb/server/transport_rdma.c
2210
max_send_sges = DIV_ROUND_UP(sp->max_send_size, PAGE_SIZE) + 3;
fs/smb/server/transport_rdma.c
2218
maxpages = DIV_ROUND_UP(sp->max_read_write_size, PAGE_SIZE);
fs/splice.c
1466
n = DIV_ROUND_UP(left + start, PAGE_SIZE);
fs/splice.c
1468
int size = umin(left, PAGE_SIZE - start);
fs/splice.c
333
len = min_t(size_t, len, npages * PAGE_SIZE);
fs/splice.c
334
npages = DIV_ROUND_UP(len, PAGE_SIZE);
fs/splice.c
348
remain = len = min_t(size_t, len, npages * PAGE_SIZE);
fs/splice.c
351
chunk = min_t(size_t, PAGE_SIZE, remain);
fs/splice.c
365
keep = DIV_ROUND_UP(ret, PAGE_SIZE);
fs/splice.c
385
chunk = min_t(size_t, remain, PAGE_SIZE);
fs/squashfs/block.c
225
int offset = read_start - round_down(index, PAGE_SIZE);
fs/squashfs/block.c
227
const int page_count = DIV_ROUND_UP(total_len + offset, PAGE_SIZE);
fs/squashfs/block.c
239
min_t(unsigned int, PAGE_SIZE - offset, total_len);
fs/squashfs/block.c
51
PAGE_SIZE - actor_offset);
fs/squashfs/block.c
63
if (actor_offset >= PAGE_SIZE) {
fs/squashfs/cache.c
269
entry->data[j] = kmalloc(PAGE_SIZE, GFP_KERNEL);
fs/squashfs/cache.c
308
void *buff = entry->data[offset / PAGE_SIZE]
fs/squashfs/cache.c
309
+ (offset % PAGE_SIZE);
fs/squashfs/cache.c
311
PAGE_SIZE - (offset % PAGE_SIZE));
fs/squashfs/cache.c
415
int pages = (length + PAGE_SIZE - 1) >> PAGE_SHIFT;
fs/squashfs/cache.c
436
for (i = 0; i < pages; i++, buffer += PAGE_SIZE)
fs/squashfs/decompressor.c
98
buffer = kmalloc(PAGE_SIZE, GFP_KERNEL);
fs/squashfs/file.c
167
__le32 *blist = kmalloc(PAGE_SIZE, GFP_KERNEL);
fs/squashfs/file.c
175
int blocks = min_t(int, n, PAGE_SIZE >> 2);
fs/squashfs/file.c
383
memset(pageaddr + copied, 0, PAGE_SIZE - copied);
fs/squashfs/file.c
409
bytes -= PAGE_SIZE, offset += PAGE_SIZE) {
fs/squashfs/file.c
411
size_t avail = buffer ? min(bytes, PAGE_SIZE) : 0;
fs/squashfs/file.c
476
if (folio->index >= ((i_size_read(inode) + PAGE_SIZE - 1) >>
fs/squashfs/file.c
529
for (copied = offset = 0; offset < expected; offset += PAGE_SIZE) {
fs/squashfs/file.c
530
int avail = min_t(int, expected - offset, PAGE_SIZE);
fs/squashfs/file.c
548
bytes = copied % PAGE_SIZE;
fs/squashfs/file.c
550
memzero_page(last_page, bytes, PAGE_SIZE - bytes);
fs/squashfs/file.c
605
max_pages = (expected + PAGE_SIZE - 1) >> PAGE_SHIFT;
fs/squashfs/file.c
640
bytes = res % PAGE_SIZE;
fs/squashfs/file.c
643
PAGE_SIZE - bytes);
fs/squashfs/file_direct.c
89
bytes = res % PAGE_SIZE;
fs/squashfs/file_direct.c
92
memset(pageaddr + bytes, 0, PAGE_SIZE - bytes);
fs/squashfs/inode.c
309
if (inode->i_size > PAGE_SIZE) {
fs/squashfs/lz4_wrapper.c
121
if (bytes <= PAGE_SIZE) {
fs/squashfs/lz4_wrapper.c
127
memcpy(data, buff, PAGE_SIZE);
fs/squashfs/lz4_wrapper.c
128
buff += PAGE_SIZE;
fs/squashfs/lz4_wrapper.c
129
bytes -= PAGE_SIZE;
fs/squashfs/lzo_wrapper.c
101
memcpy(data, buff, PAGE_SIZE);
fs/squashfs/lzo_wrapper.c
102
buff += PAGE_SIZE;
fs/squashfs/lzo_wrapper.c
103
bytes -= PAGE_SIZE;
fs/squashfs/lzo_wrapper.c
95
if (bytes <= PAGE_SIZE) {
fs/squashfs/page_actor.c
119
actor->tmp_buffer = kmalloc(PAGE_SIZE, GFP_KERNEL);
fs/squashfs/page_actor.c
128
actor->length = length ? : pages * PAGE_SIZE;
fs/squashfs/page_actor.c
51
actor->length = length ? : pages * PAGE_SIZE;
fs/squashfs/page_actor.c
70
int max_pages = (actor->length + PAGE_SIZE - 1) >> PAGE_SHIFT;
fs/squashfs/super.c
272
if (PAGE_SIZE > msblk->block_size) {
fs/squashfs/super.c
338
if (msblk->devblksize == PAGE_SIZE) {
fs/squashfs/symlink.c
41
int length = min_t(int, i_size_read(inode) - index, PAGE_SIZE);
fs/squashfs/symlink.c
86
memset(pageaddr + length, 0, PAGE_SIZE - length);
fs/squashfs/xz_wrapper.c
132
stream->buf.out_size = PAGE_SIZE;
fs/squashfs/xz_wrapper.c
168
total += PAGE_SIZE;
fs/squashfs/zlib_wrapper.c
61
stream->avail_out = PAGE_SIZE;
fs/squashfs/zlib_wrapper.c
97
stream->avail_out = PAGE_SIZE;
fs/squashfs/zstd_wrapper.c
122
out_buf.size = PAGE_SIZE;
fs/squashfs/zstd_wrapper.c
81
out_buf.size = PAGE_SIZE;
fs/sysfs/file.c
314
PAGE_SIZE, ops, (void *)attr, ns, key);
fs/sysfs/file.c
59
if (count < PAGE_SIZE) {
fs/sysfs/file.c
63
memset(buf, 0, PAGE_SIZE);
fs/sysfs/file.c
73
if (count >= (ssize_t)PAGE_SIZE) {
fs/sysfs/file.c
758
len = vscnprintf(buf, PAGE_SIZE, fmt, args);
fs/sysfs/file.c
77
count = PAGE_SIZE - 1;
fs/sysfs/file.c
781
if (WARN(!buf || offset_in_page(buf) || at < 0 || at >= PAGE_SIZE,
fs/sysfs/file.c
786
len = vscnprintf(buf + at, PAGE_SIZE - at, fmt, args);
fs/ubifs/file.c
666
int len = i_size & (PAGE_SIZE - 1);
fs/ubifs/super.c
2438
if (PAGE_SIZE < UBIFS_BLOCK_SIZE) {
fs/ubifs/super.c
2440
current->pid, (unsigned int)PAGE_SIZE);
fs/ubifs/ubifs.h
47
#define UBIFS_BLOCKS_PER_PAGE (PAGE_SIZE / UBIFS_BLOCK_SIZE)
fs/udf/file.c
65
end = PAGE_SIZE;
fs/udf/inode.c
265
if (WARN_ON_ONCE(pos >= PAGE_SIZE))
fs/udf/symlink.c
127
err = udf_pc_to_char(inode->i_sb, symlink, inode->i_size, p, PAGE_SIZE);
fs/ufs/dir.c
219
if (last_byte > PAGE_SIZE)
fs/ufs/dir.c
220
last_byte = PAGE_SIZE;
fs/ufs/dir.c
439
ctx->pos += PAGE_SIZE - offset;
fs/vboxsf/file.c
232
u32 nread = PAGE_SIZE;
fs/verity/open.c
149
if ((params->block_size != PAGE_SIZE && offset > 1 << 23) ||
fs/verity/open.c
217
if (vi->tree_params.block_size != PAGE_SIZE) {
fs/verity/read_metadata.c
53
PAGE_SIZE - offs_in_page);
fs/xattr.c
726
if (usize > PAGE_SIZE)
fs/xattr.c
865
if (usize > PAGE_SIZE)
fs/xfs/libxfs/xfs_bmap.c
3326
} else if (mp->m_sb.sb_blocksize >= PAGE_SIZE) {
fs/xfs/libxfs/xfs_bmap.c
3330
args->prod = PAGE_SIZE >> mp->m_sb.sb_blocklog;
fs/xfs/libxfs/xfs_ialloc.c
3081
if (mp->m_sb.sb_blocksize > PAGE_SIZE)
fs/xfs/scrub/attr_repair.c
83
#define XREP_XATTR_MAX_STASH_BYTES (PAGE_SIZE * 8)
fs/xfs/scrub/btree.c
707
if (cur_sz > PAGE_SIZE) {
fs/xfs/scrub/dir_repair.c
116
#define XREP_DIR_MAX_STASH_BYTES (PAGE_SIZE * 8)
fs/xfs/scrub/parent_repair.c
104
#define XREP_PARENT_MAX_STASH_BYTES (PAGE_SIZE * 8)
fs/xfs/scrub/parent_repair.c
184
#define XREP_PARENT_XATTR_MAX_STASH_BYTES (PAGE_SIZE * 8)
fs/xfs/scrub/scrub.c
925
if (vec_bytes > PAGE_SIZE)
fs/xfs/scrub/xfarray.c
299
if (pgoff != 0 && pgoff + array->obj_size - 1 < PAGE_SIZE)
fs/xfs/scrub/xfarray.c
80
ASSERT(obj_size < PAGE_SIZE);
fs/xfs/scrub/xfblob.c
166
xfile_discard(blob->xfile, PAGE_SIZE, MAX_LFS_FILESIZE - PAGE_SIZE);
fs/xfs/scrub/xfblob.c
167
blob->last_offset = PAGE_SIZE;
fs/xfs/scrub/xfblob.c
51
blob->last_offset = PAGE_SIZE;
fs/xfs/scrub/xfile.c
139
PAGE_SIZE - offset_in_page(pos));
fs/xfs/scrub/xfile.h
23
#define XFILE_MAX_FOLIO_SIZE (PAGE_SIZE << MAX_PAGECACHE_ORDER)
fs/xfs/xfs_bio_io.c
9
return bio_max_segs(howmany(count, PAGE_SIZE));
fs/xfs/xfs_bmap_util.c
829
rounding = max_t(xfs_off_t, xfs_inode_alloc_unitsize(ip), PAGE_SIZE);
fs/xfs/xfs_bmap_util.c
912
round_down(offset + len, PAGE_SIZE), LLONG_MAX);
fs/xfs/xfs_buf.c
113
if (!xfs_buftarg_is_mem(bp->b_target) && size >= PAGE_SIZE)
fs/xfs/xfs_buf.c
1172
roundup(BBTOB(bp->b_length), PAGE_SIZE));
fs/xfs/xfs_buf.c
133
ASSERT(size < PAGE_SIZE);
fs/xfs/xfs_buf.c
199
if (size < PAGE_SIZE && is_power_of_2(size))
fs/xfs/xfs_buf.c
206
if (size <= PAGE_SIZE)
fs/xfs/xfs_buf.c
220
if (size > PAGE_SIZE) {
fs/xfs/xfs_buf.c
228
if (size <= PAGE_SIZE)
fs/xfs/xfs_buf_mem.h
9
#define XMBUF_BLOCKSIZE (PAGE_SIZE)
fs/xfs/xfs_error.c
82
return snprintf(buf, PAGE_SIZE, "%u\n",
fs/xfs/xfs_fsmap.c
1332
PAGE_SIZE / sizeof(struct fsmap));
fs/xfs/xfs_handle.c
630
if (!size || size > 16 * PAGE_SIZE)
fs/xfs/xfs_handle.c
776
bufsize = min(bufsize, PAGE_SIZE);
fs/xfs/xfs_healthmon.c
918
min(XFS_HEALTHMON_MAX_OUTBUF, max(PAGE_SIZE, user_bufsize));
fs/xfs/xfs_healthmon.c
922
if (bufsize == PAGE_SIZE)
fs/xfs/xfs_healthmon.c
925
bufsize = PAGE_SIZE;
fs/xfs/xfs_ioctl32.c
393
if (!size || size > 16 * PAGE_SIZE)
fs/xfs/xfs_iomap.c
1000
length = min_t(loff_t, length, 1024 * PAGE_SIZE);
fs/xfs/xfs_iomap.c
1691
XFS_B_TO_FSB(mp, 1024 * PAGE_SIZE));
fs/xfs/xfs_iomap.c
1930
count = min_t(loff_t, count, 1024 * PAGE_SIZE);
fs/xfs/xfs_iops.c
1373
if (mp->m_sb.sb_blocksize != PAGE_SIZE)
fs/xfs/xfs_iops.c
571
return max_t(uint32_t, PAGE_SIZE, mp->m_sb.sb_blocksize);
fs/xfs/xfs_iwalk.c
695
(PAGE_SIZE / sizeof(struct xfs_inobt_rec_incore))
fs/xfs/xfs_log.c
1403
size_t bvec_size = howmany(log->l_iclog_size, PAGE_SIZE) *
fs/xfs/xfs_log.c
1578
howmany(count, PAGE_SIZE),
fs/xfs/xfs_super.c
1802
if (mp->m_sb.sb_blocksize > PAGE_SIZE) {
fs/xfs/xfs_super.c
1808
mp->m_sb.sb_blocksize, PAGE_SIZE);
fs/xfs/xfs_super.c
373
if (mp->m_super->s_blocksize != PAGE_SIZE) {
fs/xfs/xfs_sysfs.c
260
return snprintf(buf, PAGE_SIZE, "%d\n", xfs_globals.larp);
fs/xfs/xfs_sysfs.c
286
return snprintf(buf, PAGE_SIZE, "%d\n", xfs_globals.bload_leaf_slack);
fs/xfs/xfs_sysfs.c
312
return snprintf(buf, PAGE_SIZE, "%d\n", xfs_globals.bload_node_slack);
include/asm-generic/fixmap.h
78
________addr = fix_to_virt(idx) + ((phys) & (PAGE_SIZE - 1)); \
include/asm-generic/pgalloc.h
164
BUG_ON((unsigned long)pmd & (PAGE_SIZE-1));
include/asm-generic/pgalloc.h
215
BUG_ON((unsigned long)pud & (PAGE_SIZE-1));
include/asm-generic/pgalloc.h
263
BUG_ON((unsigned long)p4d & (PAGE_SIZE-1));
include/asm-generic/pgalloc.h
302
BUG_ON((unsigned long)pgd & (PAGE_SIZE-1));
include/asm-generic/shmparam.h
5
#define SHMLBA PAGE_SIZE /* attach addr a multiple of this */
include/asm-generic/tlb.h
214
((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))
include/asm-generic/tlb.h
280
((PAGE_SIZE - sizeof(struct mmu_gather_batch)) / sizeof(void *))
include/asm-generic/tlb.h
518
return tlb_remove_page_size(tlb, page, PAGE_SIZE);
include/asm-generic/tlb.h
657
tlb_flush_pte_range(tlb, address, PAGE_SIZE); \
include/asm-generic/tlb.h
671
tlb_flush_pte_range(tlb, address, PAGE_SIZE * nr);
include/asm-generic/tlb.h
677
address += PAGE_SIZE;
include/asm-generic/tlb.h
744
tlb_flush_pmd_range(tlb, address, PAGE_SIZE); \
include/asm-generic/tlb.h
753
tlb_flush_pud_range(tlb, address, PAGE_SIZE); \
include/asm-generic/tlb.h
762
tlb_flush_p4d_range(tlb, address, PAGE_SIZE); \
include/asm-generic/tlb.h
771
__tlb_adjust_range(tlb, address, PAGE_SIZE); \
include/asm-generic/vmlinux.lds.h
1012
. = ALIGN(PAGE_SIZE); \
include/asm-generic/vmlinux.lds.h
1014
. = ALIGN(PAGE_SIZE);
include/asm-generic/vmlinux.lds.h
1092
. = ALIGN(PAGE_SIZE); \
include/asm-generic/vmlinux.lds.h
1116
. = ALIGN(PAGE_SIZE); \
include/asm-generic/vmlinux.lds.h
1141
. = ALIGN(PAGE_SIZE); \
include/asm-generic/vmlinux.lds.h
400
. = ALIGN(PAGE_SIZE); \
include/asm-generic/vmlinux.lds.h
403
. = ALIGN(PAGE_SIZE); \
include/asm-generic/vmlinux.lds.h
613
. = ALIGN(PAGE_SIZE); \
include/asm-generic/vmlinux.lds.h
687
. = ALIGN(PAGE_SIZE); \
include/asm-generic/vmlinux.lds.h
691
. = ALIGN(PAGE_SIZE); \
include/asm-generic/vmlinux.lds.h
786
. = ALIGN(PAGE_SIZE); \
include/asm-generic/vmlinux.lds.h
788
. = ALIGN(PAGE_SIZE); \
include/crypto/if_alg.h
192
return max_t(int, max_t(int, sk->sk_sndbuf & PAGE_MASK, PAGE_SIZE) -
include/crypto/if_alg.h
204
return PAGE_SIZE <= af_alg_sndbuf(sk);
include/crypto/if_alg.h
218
return max_t(int, max_t(int, sk->sk_rcvbuf & PAGE_MASK, PAGE_SIZE) -
include/crypto/if_alg.h
230
return PAGE_SIZE <= af_alg_rcvbuf(sk);
include/crypto/internal/rsa.h
77
if (err > PAGE_SIZE)
include/crypto/scatterwalk.h
191
base_page += offset / PAGE_SIZE;
include/crypto/scatterwalk.h
192
offset %= PAGE_SIZE;
include/crypto/scatterwalk.h
198
num_pages = nbytes / PAGE_SIZE;
include/crypto/scatterwalk.h
199
num_pages += DIV_ROUND_UP(offset + (nbytes % PAGE_SIZE), PAGE_SIZE);
include/crypto/scatterwalk.h
76
limit = PAGE_SIZE - offset_in_page(walk->offset);
include/crypto/scatterwalk.h
78
limit = PAGE_SIZE;
include/linux/badblocks.h
26
#define MAX_BADBLOCKS (PAGE_SIZE/8)
include/linux/bio.h
288
PAGE_SIZE * folio_page_idx(fi->folio, bvec->bv_page);
include/linux/bio.h
458
return DIV_ROUND_UP(offset_in_page(kaddr) + len, PAGE_SIZE);
include/linux/blkdev.h
282
#define BLK_MAX_BLOCK_SIZE PAGE_SIZE
include/linux/bootmem_info.h
89
kmemleak_free_part_phys(PFN_PHYS(page_to_pfn(page)), PAGE_SIZE);
include/linux/buffer_head.h
44
#define MAX_BUF_PER_PAGE (PAGE_SIZE / 512)
include/linux/bvec.h
125
(mp_bvec_iter_offset((bvec), (iter)) / PAGE_SIZE)
include/linux/bvec.h
136
(mp_bvec_iter_offset((bvec), (iter)) % PAGE_SIZE)
include/linux/bvec.h
140
PAGE_SIZE - bvec_iter_offset((bvec), (iter)))
include/linux/bvec.h
235
bv->bv_len = min_t(unsigned int, PAGE_SIZE - bv->bv_offset,
include/linux/bvec.h
59
unsigned long nr = offset / PAGE_SIZE;
include/linux/bvec.h
62
bvec_set_page(bv, folio_page(folio, nr), len, offset % PAGE_SIZE);
include/linux/ceph/libceph.h
182
return ((off+len+PAGE_SIZE-1) >> PAGE_SHIFT) -
include/linux/cma.h
21
#define CMA_MIN_ALIGNMENT_BYTES (PAGE_SIZE * CMA_MIN_ALIGNMENT_PAGES)
include/linux/cpumask.h
1417
#define CPUMAP_FILE_MAX_BYTES (((NR_CPUS * 9)/32 > PAGE_SIZE) \
include/linux/cpumask.h
1418
? (NR_CPUS * 9)/32 - 1 : PAGE_SIZE)
include/linux/cpumask.h
1419
#define CPULIST_FILE_MAX_BYTES (((NR_CPUS * 7)/2 > PAGE_SIZE) ? (NR_CPUS * 7)/2 : PAGE_SIZE)
include/linux/damon.h
18
#define DAMON_MIN_REGION_SZ PAGE_SIZE
include/linux/execmem.h
12
#define MODULE_ALIGN (PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT)
include/linux/execmem.h
14
#define MODULE_ALIGN PAGE_SIZE
include/linux/f2fs_fs.h
18
#define F2FS_BLKSIZE PAGE_SIZE /* support only block == page */
include/linux/firewire.h
583
return __fw_iso_context_create(card, type, channel, speed, header_size, PAGE_SIZE, cb,
include/linux/folio_queue.h
264
return PAGE_SIZE << folioq_folio_order(folioq, slot);
include/linux/fs.h
3464
#define SIMPLE_TRANSACTION_LIMIT (PAGE_SIZE - sizeof(struct simple_transaction_argresp))
include/linux/highmem-internal.h
214
kunmap_flush_on_unmap(PTR_ALIGN_DOWN(addr, PAGE_SIZE));
include/linux/highmem-internal.h
241
kunmap_flush_on_unmap(PTR_ALIGN_DOWN(addr, PAGE_SIZE));
include/linux/highmem-internal.h
86
const struct page *page = folio_page(folio, offset / PAGE_SIZE);
include/linux/highmem-internal.h
87
return __kmap_local_page_prot(page, kmap_prot) + offset % PAGE_SIZE;
include/linux/highmem.h
241
addr += PAGE_SIZE;
include/linux/highmem.h
242
vaddr += PAGE_SIZE;
include/linux/highmem.h
293
vaddr += PAGE_SIZE;
include/linux/highmem.h
403
kmsan_unpoison_memory(page_address(to), PAGE_SIZE);
include/linux/highmem.h
441
ret = copy_mc_to_kernel(vto, vfrom, PAGE_SIZE);
include/linux/highmem.h
443
kmsan_unpoison_memory(page_address(to), PAGE_SIZE);
include/linux/highmem.h
460
ret = copy_mc_to_kernel(vto, vfrom, PAGE_SIZE);
include/linux/highmem.h
493
VM_BUG_ON(dst_off + len > PAGE_SIZE || src_off + len > PAGE_SIZE);
include/linux/highmem.h
511
chunk > PAGE_SIZE - offset_in_page(dst_off))
include/linux/highmem.h
512
chunk = PAGE_SIZE - offset_in_page(dst_off);
include/linux/highmem.h
514
chunk > PAGE_SIZE - offset_in_page(src_off))
include/linux/highmem.h
515
chunk = PAGE_SIZE - offset_in_page(src_off);
include/linux/highmem.h
531
VM_BUG_ON(offset + len > PAGE_SIZE);
include/linux/highmem.h
541
VM_BUG_ON(offset + len > PAGE_SIZE);
include/linux/highmem.h
551
VM_BUG_ON(offset + len > PAGE_SIZE);
include/linux/highmem.h
561
VM_BUG_ON(offset + len > PAGE_SIZE);
include/linux/highmem.h
584
chunk > PAGE_SIZE - offset_in_page(offset))
include/linux/highmem.h
585
chunk = PAGE_SIZE - offset_in_page(offset);
include/linux/highmem.h
612
chunk > PAGE_SIZE - offset_in_page(offset))
include/linux/highmem.h
613
chunk = PAGE_SIZE - offset_in_page(offset);
include/linux/highmem.h
645
size_t max = PAGE_SIZE - offset_in_page(offset);
include/linux/highmem.h
652
max = PAGE_SIZE;
include/linux/highmem.h
683
size_t max = PAGE_SIZE - offset_in_page(offset);
include/linux/highmem.h
691
max = PAGE_SIZE;
include/linux/highmem.h
721
len = min_t(size_t, len, PAGE_SIZE - offset);
include/linux/huge_mm.h
223
unsigned long hpage_size = PAGE_SIZE << order;
include/linux/hugetlb.h
1172
return PAGE_SIZE;
include/linux/hugetlb.h
1182
return PAGE_SIZE;
include/linux/hugetlb.h
1187
return PAGE_SIZE;
include/linux/hugetlb.h
777
return (unsigned long)PAGE_SIZE << h->order;
include/linux/hugetlb.h
988
VM_WARN_ON(size == PAGE_SIZE);
include/linux/hyperv.h
158
u8 reserved2[PAGE_SIZE - 68];
include/linux/hyperv.h
1764
#define NR_HV_HYP_PAGES_IN_PAGE (PAGE_SIZE / HV_HYP_PAGE_SIZE)
include/linux/io-mapping.h
176
return io_mapping_map_wc(mapping, offset, PAGE_SIZE);
include/linux/io-mapping.h
193
return io_mapping_map_wc(mapping, offset, PAGE_SIZE);
include/linux/io.h
196
cursor += PAGE_SIZE;
include/linux/iomap.h
145
return iomap->length <= PAGE_SIZE - offset_in_page(iomap->inline_data);
include/linux/iov_iter.h
118
void *kaddr = kmap_local_page(p->bv_page + offset / PAGE_SIZE);
include/linux/iov_iter.h
122
(size_t)(PAGE_SIZE - offset % PAGE_SIZE));
include/linux/iov_iter.h
123
remain = step(kaddr + offset % PAGE_SIZE, progress, part, priv, priv2);
include/linux/iov_iter.h
173
part = umin(len, PAGE_SIZE - skip % PAGE_SIZE);
include/linux/iov_iter.h
210
pgoff_t index = start / PAGE_SIZE;
include/linux/iov_iter.h
231
PAGE_SIZE - offset_in_page(offset));
include/linux/kasan.h
53
extern unsigned char kasan_early_shadow_page[PAGE_SIZE];
include/linux/kexec.h
505
for (addr = begin; addr < end; addr += PAGE_SIZE)
include/linux/kexec.h
72
#define KEXEC_CRASH_MEM_ALIGN PAGE_SIZE
include/linux/kfence.h
27
#define KFENCE_POOL_SIZE ((CONFIG_KFENCE_NUM_OBJECTS + 1) * 2 * PAGE_SIZE)
include/linux/kho/abi/kexec_handover.h
135
((PAGE_SIZE - sizeof(struct kho_vmalloc_hdr)) / \
include/linux/kho/abi/kexec_handover.h
149
static_assert(sizeof(struct kho_vmalloc_chunk) == PAGE_SIZE);
include/linux/kho/abi/luo.h
115
#define LUO_FDT_SIZE PAGE_SIZE
include/linux/kvm_host.h
144
#define KVM_HVA_ERR_RO_BAD (PAGE_OFFSET + PAGE_SIZE)
include/linux/kvm_host.h
1888
return slot->userspace_addr + offset * PAGE_SIZE;
include/linux/linkage.h
39
#define __page_aligned_data __section(".data..page_aligned") __aligned(PAGE_SIZE)
include/linux/linkage.h
40
#define __page_aligned_bss __section(".bss..page_aligned") __aligned(PAGE_SIZE)
include/linux/lp.h
31
#define LP_BUFFER_SIZE PAGE_SIZE
include/linux/memremap.h
295
return PAGE_SIZE;
include/linux/mlx4/device.h
1094
(offset & (PAGE_SIZE - 1));
include/linux/mlx4/device.h
657
MLX4_DB_PER_PAGE = PAGE_SIZE / 4
include/linux/mlx5/device.h
259
MLX5_UARS_IN_PAGE = PAGE_SIZE / MLX5_ADAPTER_PAGE_SIZE,
include/linux/mm.h
1653
return PAGE_SIZE << compound_order(page);
include/linux/mm.h
1680
return PAGE_SIZE << thp_order(page);
include/linux/mm.h
227
#define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE)
include/linux/mm.h
230
#define PAGE_ALIGN_DOWN(addr) ALIGN_DOWN(addr, PAGE_SIZE)
include/linux/mm.h
233
#define PAGE_ALIGNED(addr) IS_ALIGNED((unsigned long)(addr), PAGE_SIZE)
include/linux/mm.h
2572
return PAGE_SIZE << folio_order(folio);
include/linux/mm.h
3480
BUILD_BUG_ON(MAX_PTRS_PER_PTE * sizeof(pte_t) > PAGE_SIZE);
include/linux/mm.h
3969
return PAGE_SIZE;
include/linux/mm.h
3992
vm_end = -PAGE_SIZE;
include/linux/mm.h
4418
addr += PAGE_SIZE;
include/linux/mm.h
4776
return range_contains_unaccepted_memory(pfn << PAGE_SHIFT, PAGE_SIZE);
include/linux/mm_types_task.h
38
#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
include/linux/mm_types_task.h
58
#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) && (BITS_PER_LONG <= 32)
include/linux/mmu_notifier.h
529
___nr * PAGE_SIZE); \
include/linux/mmu_notifier.h
553
___address + PAGE_SIZE); \
include/linux/mmzone.h
1948
#define SECTIONS_PER_ROOT (PAGE_SIZE / sizeof (struct mem_section))
include/linux/ndctl.h
19
ND_MIN_NAMESPACE_SIZE = PAGE_SIZE,
include/linux/net.h
336
count += PAGE_SIZE;
include/linux/netlink.h
272
#if PAGE_SIZE < 8192UL
include/linux/netlink.h
273
#define NLMSG_GOODSIZE SKB_WITH_OVERHEAD(PAGE_SIZE)
include/linux/nfs_page.h
222
return PAGE_SIZE;
include/linux/nfsacl.h
19
#define NFSACL_MAXPAGES ((2*(8+12*NFS_ACL_MAX_ENTRIES) + PAGE_SIZE-1) \
include/linux/page-flags.h
219
if (IS_ALIGNED((unsigned long)page, PAGE_SIZE) &&
include/linux/page_counter.h
49
#define PAGE_COUNTER_MAX (LONG_MAX / PAGE_SIZE)
include/linux/page_frag_cache.h
11
#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
include/linux/pagemap.h
1047
return ((loff_t)folio->index) * PAGE_SIZE;
include/linux/pagemap.h
1057
return folio_pos(folio) + folio_page_idx(folio, page) * PAGE_SIZE;
include/linux/pagemap.h
1368
#define VM_READAHEAD_PAGES (SZ_128K / PAGE_SIZE)
include/linux/pagemap.h
1493
return (loff_t)rac->_index * PAGE_SIZE;
include/linux/pagemap.h
1502
return rac->_nr_pages * PAGE_SIZE;
include/linux/pagemap.h
1529
return rac->_batch_count * PAGE_SIZE;
include/linux/pagemap.h
1534
return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >>
include/linux/pagemap.h
399
return PAGE_SIZE;
include/linux/pagemap.h
519
return PAGE_SIZE << mapping_max_folio_order(mapping);
include/linux/percpu-defs.h
161
__aligned(PAGE_SIZE)
include/linux/percpu-defs.h
165
__aligned(PAGE_SIZE)
include/linux/percpu.h
35
#define PCPU_BITMAP_BLOCK_SIZE PAGE_SIZE
include/linux/perf_event.h
2089
BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \
include/linux/pfn.h
10
#define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
include/linux/pfn.h
9
#define PFN_ALIGN(x) (((unsigned long)(x) + (PAGE_SIZE - 1)) & PAGE_MASK)
include/linux/pgtable.h
1066
addr += PAGE_SIZE;
include/linux/pgtable.h
1099
addr += PAGE_SIZE;
include/linux/pgtable.h
1326
arch_do_swap_page(vma->vm_mm, vma, addr + i * PAGE_SIZE,
include/linux/pgtable.h
1583
addr += PAGE_SIZE;
include/linux/pgtable.h
1620
for (i = 0; i < nr; ++i, ++ptep, addr += PAGE_SIZE) {
include/linux/pgtable.h
1917
pfnmap_setup_cachemode(pfn, PAGE_SIZE, prot);
include/linux/pgtable.h
2291
#define pte_leaf_size(x) PAGE_SIZE
include/linux/pgtable.h
662
addr += PAGE_SIZE;
include/linux/pgtable.h
849
addr += PAGE_SIZE;
include/linux/pgtable.h
910
addr += PAGE_SIZE;
include/linux/pgtable.h
997
addr += PAGE_SIZE;
include/linux/pipe_fs_i.h
304
#define PIPE_SIZE PAGE_SIZE
include/linux/raid/pq.h
179
# define __get_free_pages(x, y) ((unsigned long)mmap(NULL, PAGE_SIZE << (y), \
include/linux/raid/pq.h
183
# define free_pages(x, y) munmap((void *)(x), PAGE_SIZE << (y))
include/linux/raid/pq.h
41
#ifndef PAGE_SIZE
include/linux/raid/pq.h
47
extern const char raid6_empty_zero_page[PAGE_SIZE];
include/linux/scatterlist.h
161
VM_WARN_ON_ONCE(!page_range_contiguous(page, ALIGN(len + offset, PAGE_SIZE) / PAGE_SIZE));
include/linux/scatterlist.h
535
#define SG_MAX_SINGLE_ALLOC (PAGE_SIZE / sizeof(struct scatterlist))
include/linux/scs.h
19
#define SCS_SIZE (PAGE_SIZE << SCS_ORDER)
include/linux/serial_core.h
839
#define UART_XMIT_SIZE PAGE_SIZE
include/linux/skbuff.h
268
SKB_WITH_OVERHEAD((PAGE_SIZE << (ORDER)) - (X))
include/linux/skbuff.h
438
p_off = (f_off) & (PAGE_SIZE - 1), \
include/linux/skbuff.h
440
min_t(u32, f_len, PAGE_SIZE - p_off) : f_len, \
include/linux/skbuff.h
444
p_len = min_t(u32, f_len - copied, PAGE_SIZE)) \
include/linux/slab.h
574
#define __assume_page_alignment __assume_aligned(PAGE_SIZE)
include/linux/sunrpc/svc.h
172
return DIV_ROUND_UP(serv->sv_max_mesg, PAGE_SIZE) + 2 + 1;
include/linux/sunrpc/svc.h
532
xdr->end = resv->iov_base + PAGE_SIZE;
include/linux/sunrpc/svc.h
535
buf->buflen = PAGE_SIZE * (rqstp->rq_page_end - buf->pages);
include/linux/swap.h
134
char reserved[PAGE_SIZE - 10];
include/linux/threads.h
34
#define PID_MAX_LIMIT (IS_ENABLED(CONFIG_BASE_SMALL) ? PAGE_SIZE * 8 : \
include/linux/vfio.h
344
#define VFIO_PIN_PAGES_MAX_ENTRIES (PAGE_SIZE/sizeof(unsigned long))
include/linux/vfio_pci_core.h
233
addr + (PAGE_SIZE << order) > vma->vm_end ||
include/linux/virtio_vsock.h
86
if (size <= SKB_WITH_OVERHEAD(PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER))
include/linux/vmalloc.h
117
return PAGE_SIZE;
include/linux/vmalloc.h
125
return PAGE_SIZE;
include/linux/vmalloc.h
240
return area->size - PAGE_SIZE;
include/linux/vmcore_info.h
23
#define VMCOREINFO_BYTES PAGE_SIZE
include/linux/vmstat.h
327
VM_WARN_ON_ONCE(delta & (PAGE_SIZE - 1));
include/linux/vmw_vmci_defs.h
86
#define VMCI_MAX_GUEST_QP_COUNT (VMCI_MAX_GUEST_QP_MEMORY / PAGE_SIZE / 2)
include/linux/vmw_vmci_defs.h
92
#define VMCI_MAX_GUEST_DOORBELL_COUNT PAGE_SIZE
include/media/drv-intf/saa7146.h
180
#define SAA7146_I2C_MEM ( 1*PAGE_SIZE)
include/media/drv-intf/saa7146.h
181
#define SAA7146_RPS_MEM ( 1*PAGE_SIZE)
include/net/mana/mana.h
320
#define MANA_XDP_MTU_MAX (PAGE_SIZE - MANA_RXBUF_PAD - XDP_PACKET_HEADROOM)
include/net/page_pool/helpers.h
122
unsigned int max_size = PAGE_SIZE << pool->p.order;
include/net/sock.h
1564
return (amt + PAGE_SIZE - 1) >> PAGE_SHIFT;
include/net/sock.h
1623
if (reclaimable >= (int)PAGE_SIZE)
include/net/xsk_buff_pool.h
187
bool cross_pg = (addr & (PAGE_SIZE - 1)) + len > PAGE_SIZE;
include/rdma/ib_umem.h
76
return ib_umem_num_dma_blocks(umem, PAGE_SIZE);
include/rdma/rdmavt_mr.h
25
#define RVT_SEGSZ (PAGE_SIZE / sizeof(struct rvt_seg))
include/rdma/rdmavt_qp.h
471
#define RVT_QPNMAP_ENTRIES (RVT_QPN_MAX / PAGE_SIZE / BITS_PER_BYTE)
include/rdma/rdmavt_qp.h
472
#define RVT_BITS_PER_PAGE (PAGE_SIZE * BITS_PER_BYTE)
include/rdma/uverbs_ioctl.h
492
PAGE_SIZE / sizeof(void *) || \
include/sound/emu10k1.h
1557
#define snd_emu10k1_memblk_offset(blk) (((blk)->mapped_page << PAGE_SHIFT) | ((blk)->mem.offset & (PAGE_SIZE - 1)))
include/sound/memalloc.h
68
return (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
include/trace/events/netfs.h
674
(unsigned long long)__entry->index * PAGE_SIZE, __entry->fend,
include/uapi/linux/a.out.h
113
#define N_TXTADDR(x) (N_MAGIC(x) == QMAGIC ? PAGE_SIZE : 0)
include/uapi/linux/binfmts.h
15
#define MAX_ARG_STRLEN (PAGE_SIZE * 32)
include/uapi/linux/fuse.h
1197
(PAGE_SIZE / sizeof(struct fuse_removemapping_one))
include/uapi/linux/kvm.h
549
((PAGE_SIZE - sizeof(struct kvm_coalesced_mmio_ring)) / \
include/vdso/datapage.h
175
#define VDSO_ARCH_DATA_SIZE ALIGN(sizeof(struct vdso_arch_data), PAGE_SIZE)
include/vdso/datapage.h
201
#define __vdso_u_rng_data PROVIDE(vdso_u_rng_data = vdso_u_data + 2 * PAGE_SIZE);
include/vdso/datapage.h
207
#define __vdso_u_arch_data PROVIDE(vdso_u_arch_data = vdso_u_data + 3 * PAGE_SIZE);
include/vdso/datapage.h
213
PROVIDE(vdso_u_data = . - __VDSO_PAGES * PAGE_SIZE); \
include/vdso/page.h
28
#define PAGE_MASK (~(PAGE_SIZE - 1))
include/video/udlfb.h
73
#define MAX_TRANSFER (PAGE_SIZE*16 - BULK_SIZE)
include/xen/page.h
23
#define XEN_PFN_PER_PAGE (PAGE_SIZE / XEN_PAGE_SIZE)
init/do_mounts.c
156
strscpy_pad(data_page, data, PAGE_SIZE);
init/do_mounts.c
189
num_fs = split_fs_names(fs_names, PAGE_SIZE);
init/do_mounts.c
191
num_fs = list_bdev_fs_names(fs_names, PAGE_SIZE);
init/do_mounts.c
220
num_fs = list_bdev_fs_names(fs_names, PAGE_SIZE);
init/do_mounts.c
349
num_fs = split_fs_names(fs_names, PAGE_SIZE);
init/initramfs.c
624
start = round_down(phys_initrd_start, PAGE_SIZE);
init/initramfs.c
626
size = round_up(size, PAGE_SIZE);
init/initramfs.c
656
unsigned long aligned_start = ALIGN_DOWN(start, PAGE_SIZE);
init/initramfs.c
657
unsigned long aligned_end = ALIGN(end, PAGE_SIZE);
init/main.c
810
# if THREAD_SIZE >= PAGE_SIZE
io_uring/memmap.c
313
page_limit = (sz + PAGE_SIZE - 1) >> PAGE_SHIFT;
io_uring/memmap.c
48
if (check_add_overflow(end, PAGE_SIZE - 1, &end))
io_uring/mock_file.c
34
size_t buflen = PAGE_SIZE;
io_uring/zcrx.c
1258
dst_page += dst_offset / PAGE_SIZE;
io_uring/zcrx.c
1260
src_page += src_offset / PAGE_SIZE;
io_uring/zcrx.c
1262
n = min(PAGE_SIZE - src_offset, PAGE_SIZE - dst_offset);
io_uring/zcrx.c
1303
cc.size = PAGE_SIZE;
io_uring/zcrx.c
449
reg->rx_buf_len < PAGE_SIZE)
ipc/mqueue.c
409
sb->s_blocksize = PAGE_SIZE;
ipc/msgutil.c
39
#define DATALEN_MSG ((size_t)PAGE_SIZE-sizeof(struct msg_msg))
ipc/msgutil.c
40
#define DATALEN_SEG ((size_t)PAGE_SIZE-sizeof(struct msg_msgseg))
ipc/shm.c
1776
(vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) {
ipc/shm.c
1809
((vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) &&
ipc/shm.c
1875
rss * PAGE_SIZE,
ipc/shm.c
1876
swp * PAGE_SIZE);
ipc/shm.c
337
ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT;
ipc/shm.c
567
return PAGE_SIZE;
ipc/shm.c
709
size_t numpages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
kernel/bpf/arena.c
198
vm_range = (u64)attr->max_entries * PAGE_SIZE;
kernel/bpf/arena.c
399
ret = apply_to_page_range(&init_mm, kaddr, PAGE_SIZE, apply_range_set_cb, &data);
kernel/bpf/arena.c
405
flush_vmap_cache(kaddr, PAGE_SIZE);
kernel/bpf/arena.c
46
#define GUARD_SZ round_up(1ull << sizeof_field(struct bpf_insn, off) * 8, PAGE_SIZE << 1)
kernel/bpf/arena.c
591
uaddr32 = (u32)(arena->user_vm_start + pgoff * PAGE_SIZE);
kernel/bpf/arena.c
660
PAGE_SIZE * page_cnt, NULL);
kernel/bpf/arena.c
708
flush_tlb_kernel_range(kaddr, kaddr + (page_cnt * PAGE_SIZE));
kernel/bpf/arena.c
831
flush_tlb_kernel_range(kaddr, kaddr + (page_cnt * PAGE_SIZE));
kernel/bpf/arraymap.c
459
return (void *)round_down((unsigned long)array, PAGE_SIZE);
kernel/bpf/arraymap.c
586
if (vma->vm_pgoff * PAGE_SIZE + (vma->vm_end - vma->vm_start) >
kernel/bpf/bpf_iter.c
105
seq->size = PAGE_SIZE << 3;
kernel/bpf/bpf_struct_ops.c
1147
usage += PAGE_SIZE;
kernel/bpf/bpf_struct_ops.c
128
err = bpf_jit_charge_modmem(PAGE_SIZE);
kernel/bpf/bpf_struct_ops.c
131
image = arch_alloc_bpf_trampoline(PAGE_SIZE);
kernel/bpf/bpf_struct_ops.c
133
bpf_jit_uncharge_modmem(PAGE_SIZE);
kernel/bpf/bpf_struct_ops.c
143
arch_free_bpf_trampoline(image, PAGE_SIZE);
kernel/bpf/bpf_struct_ops.c
144
bpf_jit_uncharge_modmem(PAGE_SIZE);
kernel/bpf/bpf_struct_ops.c
619
if (!image || size > PAGE_SIZE - image_off) {
kernel/bpf/bpf_struct_ops.c
868
PAGE_SIZE);
kernel/bpf/btf.c
4228
if (t->size > PAGE_SIZE)
kernel/bpf/cgroup.c
1900
.cur_len = PAGE_SIZE,
kernel/bpf/cgroup.c
1920
ctx.new_val = kmalloc_track_caller(PAGE_SIZE, GFP_KERNEL);
kernel/bpf/cgroup.c
1921
ctx.new_len = min_t(size_t, PAGE_SIZE, *pcount);
kernel/bpf/cgroup.c
1956
if (unlikely(max_optlen > PAGE_SIZE)) {
kernel/bpf/cgroup.c
1960
max_optlen = PAGE_SIZE;
kernel/bpf/cgroup.c
2038
if (*optlen > PAGE_SIZE && ctx.optlen >= 0) {
kernel/bpf/cgroup.c
2144
if (orig_optlen > PAGE_SIZE && ctx.optlen >= 0) {
kernel/bpf/cgroup.c
2341
if (buf_len > PAGE_SIZE - 1)
kernel/bpf/core.c
1042
PAGE_SIZE), LONG_MAX);
kernel/bpf/core.c
1089
size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE);
kernel/bpf/core.c
1104
PAGE_SIZE - sizeof(*hdr));
kernel/bpf/core.c
124
fp->pages = size / PAGE_SIZE;
kernel/bpf/core.c
1457
fp = __vmalloc(fp_other->pages * PAGE_SIZE, gfp_flags);
kernel/bpf/core.c
1463
memcpy(fp, fp_other, fp_other->pages * PAGE_SIZE);
kernel/bpf/core.c
264
size = round_up(size, PAGE_SIZE);
kernel/bpf/core.c
265
pages = size / PAGE_SIZE;
kernel/bpf/core.c
271
memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE);
kernel/bpf/core.c
903
#define BPF_PROG_PACK_SIZE PAGE_SIZE
kernel/bpf/core.c
924
BPF_PROG_PACK_SIZE / PAGE_SIZE);
kernel/bpf/core.c
945
size = round_up(size, PAGE_SIZE);
kernel/bpf/core.c
953
size / PAGE_SIZE);
kernel/bpf/core.c
97
__PAGE_SIZE = PAGE_SIZE
kernel/bpf/dispatcher.c
112
noff = d->image_off ^ (PAGE_SIZE / 2);
kernel/bpf/dispatcher.c
122
if (IS_ERR(bpf_arch_text_copy(new, tmp, PAGE_SIZE / 2)))
kernel/bpf/dispatcher.c
148
d->image = bpf_prog_pack_alloc(PAGE_SIZE, bpf_jit_fill_hole_with_zero);
kernel/bpf/dispatcher.c
151
d->rw_image = bpf_jit_alloc_exec(PAGE_SIZE);
kernel/bpf/dispatcher.c
153
bpf_prog_pack_free(d->image, PAGE_SIZE);
kernel/bpf/dispatcher.c
157
bpf_image_ksym_init(d->image, PAGE_SIZE, &d->ksym);
kernel/bpf/local_storage.c
493
PAGE_SIZE) >> PAGE_SHIFT;
kernel/bpf/local_storage.c
497
PAGE_SIZE) >> PAGE_SHIFT;
kernel/bpf/ringbuf.c
283
if (vma->vm_pgoff != 0 || vma->vm_end - vma->vm_start != PAGE_SIZE)
kernel/bpf/ringbuf.c
75
unsigned long consumer_pos __aligned(PAGE_SIZE);
kernel/bpf/ringbuf.c
76
unsigned long producer_pos __aligned(PAGE_SIZE);
kernel/bpf/ringbuf.c
79
char data[] __aligned(PAGE_SIZE);
kernel/bpf/syscall.c
397
} else if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
kernel/bpf/syscall.c
97
if (unlikely(actual_size > PAGE_SIZE)) /* silly large */
kernel/bpf/trampoline.c
1346
if (WARN_ON_ONCE(size > PAGE_SIZE))
kernel/bpf/trampoline.c
1348
image = bpf_jit_alloc_exec(PAGE_SIZE);
kernel/bpf/trampoline.c
1356
WARN_ON_ONCE(size > PAGE_SIZE);
kernel/bpf/trampoline.c
1365
WARN_ON_ONCE(size > PAGE_SIZE);
kernel/bpf/trampoline.c
165
PAGE_SIZE, false, ksym->name);
kernel/bpf/trampoline.c
172
PAGE_SIZE, true, ksym->name);
kernel/bpf/trampoline.c
667
if (size > PAGE_SIZE) {
kernel/cgroup/cgroup.c
4384
.atomic_write_len = PAGE_SIZE,
kernel/cgroup/cgroup.c
4393
.atomic_write_len = PAGE_SIZE,
kernel/cgroup/cgroup.c
4521
if (cft->max_write_len && cft->max_write_len != PAGE_SIZE)
kernel/cgroup/cgroup.c
4556
if (cft->max_write_len && cft->max_write_len != PAGE_SIZE) {
kernel/cgroup/cgroup.c
7490
PAGE_SIZE - ret, NULL);
kernel/cgroup/cgroup.c
7493
PAGE_SIZE - ret, NULL);
kernel/cgroup/cgroup.c
7497
PAGE_SIZE - ret,
kernel/cgroup/cgroup.c
7507
return snprintf(buf, PAGE_SIZE,
kernel/crash_core.c
48
struct page *vmcoreinfo_pages[DIV_ROUND_UP(VMCOREINFO_BYTES, PAGE_SIZE)];
kernel/crash_core.c
492
align = min(roundup_pow_of_two(sizeof(note_buf_t)), PAGE_SIZE);
kernel/crash_core.c
498
BUILD_BUG_ON(size > PAGE_SIZE);
kernel/crash_core.c
53
nr_pages = DIV_ROUND_UP(VMCOREINFO_BYTES, PAGE_SIZE);
kernel/crash_reserve.c
479
unsigned long long request_size = roundup(cma_size, PAGE_SIZE);
kernel/crash_reserve.c
493
if (request_size <= PAGE_SIZE)
kernel/crash_reserve.c
496
request_size = roundup(request_size / 2, PAGE_SIZE);
kernel/debug/kdb/kdb_support.c
356
memcpy(res, vaddr + (addr & (PAGE_SIZE - 1)), size);
kernel/dma/contiguous.c
383
if (size <= PAGE_SIZE)
kernel/dma/debug.c
1101
addr = (u8 *)current->stack + i * PAGE_SIZE +
kernel/dma/debug.c
1102
(phys % PAGE_SIZE);
kernel/dma/debug.c
36
#define DMA_DEBUG_DYNAMIC_ENTRIES (PAGE_SIZE / sizeof(struct dma_debug_entry))
kernel/dma/map_benchmark.c
40
u64 size = npages * PAGE_SIZE;
kernel/dma/swiotlb.c
1059
if (!alloc_align_mask && !iotlb_align_mask && alloc_size >= PAGE_SIZE)
kernel/dma/swiotlb.c
1060
alloc_align_mask = PAGE_SIZE - 1;
kernel/dma/swiotlb.c
330
tlb = memblock_alloc(bytes, PAGE_SIZE);
kernel/dma/swiotlb.c
332
tlb = memblock_alloc_low(bytes, PAGE_SIZE);
kernel/dma/swiotlb.c
398
mem->slots = memblock_alloc(alloc_size, PAGE_SIZE);
kernel/dma/swiotlb.c
401
__func__, alloc_size, PAGE_SIZE);
kernel/dma/swiotlb.c
493
(PAGE_SIZE << order) >> 20);
kernel/dma/swiotlb.c
901
sz = min_t(size_t, PAGE_SIZE - offset, size);
kernel/events/core.c
12139
filter_str = strndup_user(arg, PAGE_SIZE);
kernel/events/core.c
13521
if (size < PERF_ATTR_SIZE_VER0 || size > PAGE_SIZE)
kernel/events/core.c
498
static int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024);
kernel/events/core.c
6809
userpg->data_offset = PAGE_SIZE;
kernel/events/core.c
7198
unsigned long va = vma->vm_start + PAGE_SIZE * pagenum;
kernel/events/core.c
7207
err = remap_pfn_range(vma, va, page_to_pfn(page), PAGE_SIZE,
kernel/events/core.c
7216
zap_page_range_single(vma, vma->vm_start, nr_pages * PAGE_SIZE, NULL);
kernel/events/core.c
7365
if (aux_offset < perf_data_size(rb) + PAGE_SIZE)
kernel/events/core.c
7375
if (aux_size != nr_pages * PAGE_SIZE)
kernel/events/core.c
7442
nr_pages = vma_size / PAGE_SIZE;
kernel/events/core.c
7447
if (vma_size != PAGE_SIZE * nr_pages)
kernel/events/core.c
8398
phys_addr = page_to_phys(p) + virt % PAGE_SIZE;
kernel/events/internal.h
156
handle->size = PAGE_SIZE << page_order(rb); \
kernel/events/ring_buffer.c
593
tocopy = PAGE_SIZE - offset_in_page(from);
kernel/events/ring_buffer.c
900
return vmalloc_to_page((void *)rb->user_page + pgoff * PAGE_SIZE);
kernel/events/ring_buffer.c
935
all_buf = vmalloc_user((nr_pages + 1) * PAGE_SIZE);
kernel/events/ring_buffer.c
940
rb->data_pages[0] = all_buf + PAGE_SIZE;
kernel/events/uprobes.c
1083
len = min_t(int, size, PAGE_SIZE - (offs & ~PAGE_MASK));
kernel/events/uprobes.c
1700
return get_unmapped_area(NULL, TASK_SIZE - PAGE_SIZE, PAGE_SIZE, 0, 0);
kernel/events/uprobes.c
1725
vma = _install_special_mapping(mm, area->vaddr, PAGE_SIZE,
kernel/events/uprobes.c
1900
if (WARN_ON_ONCE(offset >= PAGE_SIZE))
kernel/events/uprobes.c
36
#define UINSNS_PER_PAGE (PAGE_SIZE/UPROBE_XOL_SLOT_BYTES)
kernel/events/uprobes.c
565
vaddr, vaddr + PAGE_SIZE);
kernel/fork.c
2814
if (unlikely(usize > PAGE_SIZE))
kernel/fork.c
315
BUG_ON(vm_area->nr_pages != THREAD_SIZE / PAGE_SIZE);
kernel/fork.c
317
for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++) {
kernel/fork.c
392
#if THREAD_SIZE >= PAGE_SIZE
kernel/fork.c
487
for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++)
kernel/fork.c
489
account * (PAGE_SIZE / 1024));
kernel/fork.c
508
for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++)
kernel/fork.c
817
if (fls64(nr_pages) + fls64(PAGE_SIZE) > 64)
kernel/fork.c
820
threads = div64_u64((u64) nr_pages * (u64) PAGE_SIZE,
kernel/futex/core.c
568
key->both.offset = address % PAGE_SIZE;
kernel/gcov/fs.c
89
#define ITER_STRIDE PAGE_SIZE
kernel/kcov.c
512
for (off = 0; off < size; off += PAGE_SIZE) {
kernel/kcov.c
569
unsigned long stride = PAGE_SIZE / sizeof(unsigned long);
kernel/kcsan/debugfs.c
75
unsigned long addr = iters & ((PAGE_SIZE << 8) - 1);
kernel/kcsan/encoding.h
17
#define SLOT_RANGE PAGE_SIZE
kernel/kcsan/encoding.h
58
return addr >= PAGE_SIZE && size <= MAX_ENCODABLE_SIZE;
kernel/kcsan/encoding.h
90
return (addr / PAGE_SIZE) % CONFIG_KCSAN_NUM_WATCHPOINTS;
kernel/kcsan/kcsan_test.c
303
static long test_array[3 * PAGE_SIZE / sizeof(long)];
kernel/kcsan/report.c
100
#define REPORT_TIMES_MAX (PAGE_SIZE / sizeof(struct report_time))
kernel/kcsan/selftest.c
43
if (addr < PAGE_SIZE)
kernel/kcsan/selftest.c
44
addr = PAGE_SIZE;
kernel/kexec_core.c
105
#define PAGE_COUNT(x) (((x) + PAGE_SIZE - 1) >> PAGE_SHIFT)
kernel/kexec_core.c
501
((PAGE_SIZE/sizeof(kimage_entry_t)) - 1);
kernel/kexec_core.c
639
destination += PAGE_SIZE;
kernel/kexec_core.c
704
addr + PAGE_SIZE - 1))
kernel/kexec_core.c
763
mchunk = min_t(size_t, mbytes, PAGE_SIZE);
kernel/kexec_core.c
839
mchunk = min_t(size_t, mbytes, PAGE_SIZE);
kernel/kexec_core.c
902
mchunk = min_t(size_t, mbytes, PAGE_SIZE);
kernel/kexec_core.c
995
dest_page_addr += PAGE_SIZE;
kernel/kexec_file.c
496
temp_start = temp_start - PAGE_SIZE;
kernel/kexec_file.c
502
temp_start = temp_start - PAGE_SIZE;
kernel/kexec_file.c
538
temp_start = temp_start + PAGE_SIZE;
kernel/kexec_file.c
544
temp_start = temp_start + PAGE_SIZE;
kernel/kexec_file.c
781
kbuf->memsz = ALIGN(kbuf->memsz, PAGE_SIZE);
kernel/kexec_file.c
782
kbuf->buf_align = max(kbuf->buf_align, PAGE_SIZE);
kernel/kexec_file.c
816
zero_buf_sz = PAGE_SIZE;
kernel/kprobes.c
103
return PAGE_SIZE/(c->insn_size * sizeof(kprobe_opcode_t));
kernel/kprobes.c
120
return execmem_alloc(EXECMEM_KPROBES, PAGE_SIZE);
kernel/kprobes.c
194
PAGE_SIZE, false, c->sym);
kernel/kprobes.c
219
(unsigned long)kip->insns, PAGE_SIZE, true,
kernel/kprobes.c
308
addr < (unsigned long)kip->insns + PAGE_SIZE) {
kernel/liveupdate/kexec_handover.c
1117
phys += contig_pages * PAGE_SIZE;
kernel/liveupdate/kexec_handover.c
1130
area = __get_vm_area_node(total_pages * PAGE_SIZE, align, shift,
kernel/liveupdate/kexec_handover.c
134
if (WARN_ON(kho_scratch_overlap(virt_to_phys(elm), PAGE_SIZE)))
kernel/liveupdate/kexec_handover.c
1342
err = fdt_create(root, PAGE_SIZE);
kernel/liveupdate/kexec_handover.c
1362
kho_out.fdt = kho_alloc_preserve(PAGE_SIZE);
kernel/liveupdate/kexec_handover.c
350
((PAGE_SIZE - sizeof(struct khoser_mem_chunk_hdr)) / \
kernel/liveupdate/kexec_handover.c
358
static_assert(sizeof(struct khoser_mem_chunk) == PAGE_SIZE);
kernel/liveupdate/kexec_handover.c
369
if (WARN_ON(kho_scratch_overlap(virt_to_phys(chunk), PAGE_SIZE)))
kernel/liveupdate/kexec_handover.c
660
kho_scratch = memblock_alloc(size, PAGE_SIZE);
kernel/liveupdate/kexec_handover.c
749
fdt_err = fdt_open_into(root_fdt, root_fdt, PAGE_SIZE);
kernel/liveupdate/kexec_handover.c
783
err = fdt_open_into(root_fdt, root_fdt, PAGE_SIZE);
kernel/liveupdate/kexec_handover.c
82
#define PRESERVE_BITS (PAGE_SIZE * 8)
kernel/liveupdate/kexec_handover.c
822
if (WARN_ON(kho_scratch_overlap(pfn << PAGE_SHIFT, PAGE_SIZE << order)))
kernel/liveupdate/kexec_handover.c
88
static_assert(sizeof(struct kho_mem_phys_bits) == PAGE_SIZE);
kernel/locking/qspinlock_paravirt.h
173
#define PV_HE_MIN (PAGE_SIZE / sizeof(struct pv_hash_entry))
kernel/module/decompress.c
123
s.avail_out = PAGE_SIZE;
kernel/module/decompress.c
127
new_size += PAGE_SIZE - s.avail_out;
kernel/module/decompress.c
183
xz_buf.out_size = PAGE_SIZE;
kernel/module/decompress.c
188
} while (xz_buf.out_pos == PAGE_SIZE && xz_ret == XZ_OK);
kernel/module/decompress.c
267
zstd_dec.size = PAGE_SIZE;
kernel/module/decompress.c
276
} while (zstd_dec.pos == PAGE_SIZE && ret != 0);
kernel/module/decompress.c
308
n_pages = DIV_ROUND_UP(size, PAGE_SIZE) * 2;
kernel/module/main.c
2397
#define COPY_CHUNK_SIZE (16*PAGE_SIZE)
kernel/module/main.c
465
if (align > PAGE_SIZE) {
kernel/module/main.c
467
mod->name, align, PAGE_SIZE);
kernel/module/main.c
468
align = PAGE_SIZE;
kernel/module/main.c
588
return scnprintf(buffer, PAGE_SIZE, "%s\n", mk->mod->field); \
kernel/nstree.c
435
if (unlikely(usize > PAGE_SIZE))
kernel/padata.c
849
len = snprintf(buf, PAGE_SIZE, "%*pb\n",
kernel/padata.c
852
return len < PAGE_SIZE ? len : -EINVAL;
kernel/params.c
222
return scnprintf(buffer, PAGE_SIZE, format "\n", \
kernel/params.c
292
return scnprintf(buffer, PAGE_SIZE, "%s\n", *((char **)kp->arg));
kernel/params.c
531
return scnprintf(buffer, PAGE_SIZE, "%s\n", kps->string);
kernel/params.c
855
return scnprintf(buf, PAGE_SIZE, "%s\n", vattr->version);
kernel/power/hibernate.c
303
k = nr_pages * (PAGE_SIZE / 1024);
kernel/power/main.c
1016
return show_trace_dev_match(buf, PAGE_SIZE);
kernel/power/power.h
19
} __aligned(PAGE_SIZE);
kernel/power/snapshot.c
109
flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
kernel/power/snapshot.c
128
reserved_size = SPARE_PAGES * PAGE_SIZE;
kernel/power/snapshot.c
141
image_size = ((totalram_pages() * 2) / 5) * PAGE_SIZE;
kernel/power/snapshot.c
1436
for (n = PAGE_SIZE / sizeof(long); n; n--) {
kernel/power/snapshot.c
154
#define LINKED_PAGE_DATA_SIZE (PAGE_SIZE - sizeof(void *))
kernel/power/snapshot.c
1882
- 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE);
kernel/power/snapshot.c
1884
size = DIV_ROUND_UP(image_size, PAGE_SIZE);
kernel/power/snapshot.c
2143
nr_meta_pages = DIV_ROUND_UP(nr_pages * sizeof(long), PAGE_SIZE);
kernel/power/snapshot.c
216
memset(ret, 0, PAGE_SIZE);
kernel/power/snapshot.c
2210
for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
kernel/power/snapshot.c
2279
return PAGE_SIZE;
kernel/power/snapshot.c
2366
for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
kernel/power/snapshot.c
2839
memset(handle->buffer, 0, PAGE_SIZE);
kernel/power/snapshot.c
2843
return PAGE_SIZE;
kernel/power/snapshot.c
374
#define BM_BITS_PER_BLOCK (PAGE_SIZE * BITS_PER_BYTE)
kernel/power/snapshot.c
423
#define BM_ENTRIES_PER_LEVEL (PAGE_SIZE / sizeof(unsigned long))
kernel/power/swap.c
104
char reserved[PAGE_SIZE - 20 - sizeof(sector_t) - sizeof(int) -
kernel/power/swap.c
1395
PAGE_SIZE);
kernel/power/swap.c
1406
off += PAGE_SIZE) {
kernel/power/swap.c
1408
page[pg], PAGE_SIZE);
kernel/power/swap.c
1446
data[thr].unc_len & (PAGE_SIZE - 1))) {
kernel/power/swap.c
1453
off < data[thr].unc_len; off += PAGE_SIZE) {
kernel/power/swap.c
1455
data[thr].unc + off, PAGE_SIZE);
kernel/power/swap.c
1536
if (error < (int)PAGE_SIZE)
kernel/power/swap.c
256
(unsigned long)page_address(page) + PAGE_SIZE);
kernel/power/swap.c
269
page_off * (PAGE_SIZE >> 9), addr, PAGE_SIZE, opf);
kernel/power/swap.c
279
bio->bi_iter.bi_sector = page_off * (PAGE_SIZE >> 9);
kernel/power/swap.c
280
bio_add_virt_nofail(bio, addr, PAGE_SIZE);
kernel/power/swap.c
502
#define UNC_SIZE (UNC_PAGES * PAGE_SIZE)
kernel/power/swap.c
506
CMP_HEADER, PAGE_SIZE)
kernel/power/swap.c
507
#define CMP_SIZE (CMP_PAGES * PAGE_SIZE)
kernel/power/swap.c
61
#define MAP_PAGE_ENTRIES (PAGE_SIZE / sizeof(sector_t) - 1)
kernel/power/swap.c
809
for (off = 0; off < UNC_SIZE; off += PAGE_SIZE) {
kernel/power/swap.c
818
data_of(*snapshot), PAGE_SIZE);
kernel/power/swap.c
873
off += PAGE_SIZE) {
kernel/power/swap.c
874
memcpy(page, data[thr].cmp + off, PAGE_SIZE);
kernel/power/swap.c
966
if (error < (int)PAGE_SIZE) {
kernel/power/user.c
154
res = PAGE_SIZE - pg_offp;
kernel/power/user.c
190
res = PAGE_SIZE;
kernel/profile.c
40
#define NR_PROFILE_HIT (PAGE_SIZE/sizeof(struct profile_hit))
kernel/resource.c
1933
PAGE_SIZE);
kernel/resource.c
1966
#define GFR_DEFAULT_ALIGN PAGE_SIZE
kernel/resource_kunit.c
254
region_intersects(start + RES_TEST_RAM0_OFFSET, PAGE_SIZE,
kernel/resource_kunit.c
258
RES_TEST_RAM0_SIZE - PAGE_SIZE, 2 * PAGE_SIZE,
kernel/resource_kunit.c
261
region_intersects(start + RES_TEST_HOLE0_OFFSET, PAGE_SIZE,
kernel/resource_kunit.c
265
RES_TEST_HOLE0_SIZE - PAGE_SIZE, 2 * PAGE_SIZE,
kernel/resource_kunit.c
269
RES_TEST_WIN0_SIZE - PAGE_SIZE, 2 * PAGE_SIZE,
kernel/resource_kunit.c
273
RES_TEST_RAM1_SIZE - PAGE_SIZE, 2 * PAGE_SIZE,
kernel/resource_kunit.c
277
RES_TEST_RAM2_SIZE - PAGE_SIZE, 2 * PAGE_SIZE,
kernel/resource_kunit.c
280
region_intersects(start + RES_TEST_CODE_OFFSET, PAGE_SIZE,
kernel/resource_kunit.c
284
RES_TEST_RAM2_SIZE + PAGE_SIZE,
kernel/resource_kunit.c
288
RES_TEST_RAM3_SIZE + PAGE_SIZE,
kernel/sched/syscalls.c
1067
if (unlikely(!uattr || pid < 0 || usize > PAGE_SIZE ||
kernel/sched/syscalls.c
887
if (size < SCHED_ATTR_SIZE_VER0 || size > PAGE_SIZE)
kernel/seccomp.c
1724
if (size < SECCOMP_NOTIFY_ADDFD_SIZE_VER0 || size >= PAGE_SIZE)
kernel/sys.c
1929
r->ru_maxrss = maxrss * (PAGE_SIZE / 1024); /* convert pages to KBs */
kernel/sys.c
3036
while (s.mem_unit < PAGE_SIZE) {
kernel/sysctl.c
1132
if (left > PAGE_SIZE - 1) {
kernel/sysctl.c
1133
left = PAGE_SIZE - 1;
kernel/sysctl.c
601
if (left > PAGE_SIZE - 1)
kernel/sysctl.c
602
left = PAGE_SIZE - 1;
kernel/sysctl.c
664
if (left > PAGE_SIZE - 1)
kernel/sysctl.c
665
left = PAGE_SIZE - 1;
kernel/sysctl.c
993
if (left > PAGE_SIZE - 1)
kernel/sysctl.c
994
left = PAGE_SIZE - 1;
kernel/time/clocksource.c
1450
max((ssize_t)PAGE_SIZE - count, (ssize_t)0),
kernel/time/clocksource.c
1456
max((ssize_t)PAGE_SIZE - count, (ssize_t)0), "\n");
kernel/trace/ftrace.c
1150
#define ENTRIES_PER_PAGE_GROUP(order) ((PAGE_SIZE << (order)) / ENTRY_SIZE)
kernel/trace/ftrace.c
3859
pages = DIV_ROUND_UP(count * ENTRY_SIZE, PAGE_SIZE);
kernel/trace/ftrace.c
440
(PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
kernel/trace/ftrace.c
7632
if (end_offset > PAGE_SIZE << pg->order) {
kernel/trace/ring_buffer.c
1790
int subbuf_size = PAGE_SIZE;
kernel/trace/ring_buffer.c
2130
meta->subbuf_size = PAGE_SIZE;
kernel/trace/ring_buffer.c
2497
subbuf_size = (PAGE_SIZE << order);
kernel/trace/ring_buffer.c
3256
addr &= (PAGE_SIZE << cpu_buffer->buffer->subbuf_order) - 1;
kernel/trace/ring_buffer.c
3849
addr &= ~((PAGE_SIZE << cpu_buffer->buffer->subbuf_order) - 1);
kernel/trace/ring_buffer.c
4758
addr &= ~((PAGE_SIZE << cpu_buffer->buffer->subbuf_order) - 1);
kernel/trace/ring_buffer.c
6159
flush_kernel_vmap_range(cpu_buffer->meta_page, PAGE_SIZE);
kernel/trace/ring_buffer.c
6870
psize = (1 << order) * PAGE_SIZE;
kernel/trace/ring_buffer.c
7209
vma->vm_start + (PAGE_SIZE * p);
kernel/trace/trace.c
10840
temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
kernel/trace/trace.c
2359
int max_len = PAGE_SIZE - struct_size(entry, array, 1);
kernel/trace/trace.c
6177
(size_t)PAGE_SIZE));
kernel/trace/trace.c
9170
size = (PAGE_SIZE << order) / 1024;
kernel/trace/trace.c
9194
pages = DIV_ROUND_UP(val, PAGE_SIZE);
kernel/trace/trace_boot.c
62
if (v < PAGE_SIZE)
kernel/trace/trace_events.c
2241
if (cnt >= PAGE_SIZE)
kernel/trace/trace_events.c
2394
if (cnt >= PAGE_SIZE)
kernel/trace/trace_events_inject.c
293
if (cnt >= PAGE_SIZE)
kernel/trace/trace_events_trigger.c
398
if (cnt >= PAGE_SIZE)
kernel/trace/trace_events_user.c
2399
if (size > PAGE_SIZE)
kernel/trace/trace_events_user.c
2561
if (size > PAGE_SIZE)
kernel/trace/trace_uprobe.c
877
#define MAX_UCB_BUFFER_SIZE PAGE_SIZE
kernel/trace/tracing_map.c
295
memset(a->pages[i], 0, PAGE_SIZE);
kernel/trace/tracing_map.c
332
a->entries_per_page = PAGE_SIZE / (1 << a->entry_size_shift);
kernel/trace/tracing_map.c
347
kmemleak_alloc(a->pages[i], PAGE_SIZE, 1, GFP_KERNEL);
kernel/tsacct.c
102
stats->hiwater_rss = get_mm_hiwater_rss(mm) * PAGE_SIZE / KB;
kernel/tsacct.c
103
stats->hiwater_vm = get_mm_hiwater_vm(mm) * PAGE_SIZE / KB;
kernel/tsacct.c
95
stats->coremem = p->acct_rss_mem1 * PAGE_SIZE;
kernel/tsacct.c
97
stats->virtmem = p->acct_vm_mem1 * PAGE_SIZE;
kernel/user_namespace.c
947
if ((*ppos != 0) || (count >= PAGE_SIZE))
kernel/vmcore_info.c
163
VMCOREINFO_PAGESIZE(PAGE_SIZE);
kernel/watch_queue.c
34
#define WATCH_QUEUE_NOTES_PER_PAGE (PAGE_SIZE / WATCH_QUEUE_NOTE_SIZE)
kernel/workqueue.c
7107
return scnprintf(buffer, PAGE_SIZE, "%s\n", wq_affn_names[wq_affn_dfl]);
kernel/workqueue.c
7150
return scnprintf(buf, PAGE_SIZE, "%d\n", (bool)!(wq->flags & WQ_UNBOUND));
kernel/workqueue.c
7159
return scnprintf(buf, PAGE_SIZE, "%d\n", wq->saved_max_active);
kernel/workqueue.c
7191
written = scnprintf(buf, PAGE_SIZE, "%d\n", wq->unbound_attrs->nice);
kernel/workqueue.c
7244
written = scnprintf(buf, PAGE_SIZE, "%*pb\n",
kernel/workqueue.c
7282
written = scnprintf(buf, PAGE_SIZE, "%s (%s)\n",
kernel/workqueue.c
7286
written = scnprintf(buf, PAGE_SIZE, "%s\n",
kernel/workqueue.c
7321
return scnprintf(buf, PAGE_SIZE, "%d\n",
kernel/workqueue.c
7400
written = scnprintf(buf, PAGE_SIZE, "%*pb\n", cpumask_pr_args(mask));
lib/alloc_tag.c
418
unsigned long phys_end = ALIGN_DOWN(module_tags.start_addr, PAGE_SIZE) +
lib/alloc_tag.c
429
more_pages = ALIGN(new_end - phys_end, PAGE_SIZE) >> PAGE_SHIFT;
lib/bitmap-str.c
61
ptrdiff_t len = PAGE_SIZE - offset_in_page(buf);
lib/crc/tests/crc_kunit.c
97
test_buflen = round_up(CRC_KUNIT_MAX_LEN, PAGE_SIZE);
lib/crypto/tests/hash-test-template.h
81
size_t alloc_len = round_up(TEST_BUF_LEN, PAGE_SIZE);
lib/crypto/tests/sha256_kunit.c
36
size_t full_len = round_up(len, PAGE_SIZE);
lib/dynamic_debug.c
799
return scnprintf(buffer, PAGE_SIZE, "0x%lx\n", *dcp->bits);
lib/dynamic_debug.c
803
return scnprintf(buffer, PAGE_SIZE, "%d\n", *dcp->lvl);
lib/fault-inject.c
277
return snprintf(page, PAGE_SIZE, "%u\n", val);
lib/fault-inject.c
282
return snprintf(page, PAGE_SIZE, "%lu\n", val);
lib/fault-inject.c
287
return snprintf(page, PAGE_SIZE, "%u\n", val);
lib/fault-inject.c
292
return snprintf(page, PAGE_SIZE, "%d\n", atomic_read(&val));
lib/fault-inject.c
414
return snprintf(page, PAGE_SIZE,
lib/iov_iter.c
1019
maxsize = min_t(size_t, nr * PAGE_SIZE - offset, maxsize);
lib/iov_iter.c
1058
page = i->bvec->bv_page + skip / PAGE_SIZE;
lib/iov_iter.c
1059
*start = skip % PAGE_SIZE;
lib/iov_iter.c
1086
*start = addr % PAGE_SIZE;
lib/iov_iter.c
1094
maxsize = min_t(size_t, maxsize, res * PAGE_SIZE - *start);
lib/iov_iter.c
1113
maxsize = min_t(size_t, maxsize, n * PAGE_SIZE - *start);
lib/iov_iter.c
1169
npages += DIV_ROUND_UP(offs + len, PAGE_SIZE);
lib/iov_iter.c
1184
unsigned offs = (p->bv_offset + skip) % PAGE_SIZE;
lib/iov_iter.c
1188
npages += DIV_ROUND_UP(offs + len, PAGE_SIZE);
lib/iov_iter.c
1201
int npages = DIV_ROUND_UP(offs + i->count, PAGE_SIZE);
lib/iov_iter.c
1210
unsigned offset = i->iov_offset % PAGE_SIZE;
lib/iov_iter.c
1211
int npages = DIV_ROUND_UP(offset + i->count, PAGE_SIZE);
lib/iov_iter.c
1215
unsigned offset = (i->xarray_start + i->iov_offset) % PAGE_SIZE;
lib/iov_iter.c
1216
int npages = DIV_ROUND_UP(offset + i->count, PAGE_SIZE);
lib/iov_iter.c
1528
size_t part = PAGE_SIZE - offset % PAGE_SIZE;
lib/iov_iter.c
1536
p[nr++] = folio_page(folio, offset / PAGE_SIZE);
lib/iov_iter.c
1598
maxsize = min_t(size_t, nr * PAGE_SIZE - offset, maxsize);
lib/iov_iter.c
1660
if (bv.bv_offset + bv.bv_len != PAGE_SIZE)
lib/iov_iter.c
1709
size_t seg = min_t(size_t, len, PAGE_SIZE);
lib/iov_iter.c
1718
kaddr += PAGE_SIZE;
lib/iov_iter.c
1721
size = min_t(size_t, size, maxpages * PAGE_SIZE - offset);
lib/iov_iter.c
1758
*offset0 = offset = addr % PAGE_SIZE;
lib/iov_iter.c
1766
maxsize = min_t(size_t, maxsize, res * PAGE_SIZE - offset);
lib/iov_iter.c
1853
size_t contig_sz = min_t(size_t, PAGE_SIZE - offset, left);
lib/iov_iter.c
1857
folio_offset = PAGE_SIZE * folio_page_idx(folio, pages[0]) + offset;
lib/iov_iter.c
1865
max_pages = DIV_ROUND_UP(offset + len, PAGE_SIZE);
lib/iov_iter.c
1867
size_t next = min_t(size_t, PAGE_SIZE, left);
lib/iov_iter.c
1922
nr_pages = DIV_ROUND_UP(offset + size, PAGE_SIZE);
lib/iov_iter.c
350
if (n <= v && v <= PAGE_SIZE)
lib/iov_iter.c
369
page += offset / PAGE_SIZE; // first subpage
lib/iov_iter.c
370
offset %= PAGE_SIZE;
lib/iov_iter.c
373
size_t n = min(bytes, (size_t)PAGE_SIZE - offset);
lib/iov_iter.c
381
if (offset == PAGE_SIZE) {
lib/iov_iter.c
399
page += offset / PAGE_SIZE; // first subpage
lib/iov_iter.c
400
offset %= PAGE_SIZE;
lib/iov_iter.c
403
size_t n = min(bytes, (size_t)PAGE_SIZE - offset);
lib/iov_iter.c
414
if (offset == PAGE_SIZE) {
lib/iov_iter.c
429
page += offset / PAGE_SIZE; // first subpage
lib/iov_iter.c
430
offset %= PAGE_SIZE;
lib/iov_iter.c
433
size_t n = min(bytes, (size_t)PAGE_SIZE - offset);
lib/iov_iter.c
441
if (offset == PAGE_SIZE) {
lib/iov_iter.c
487
n > PAGE_SIZE - offset_in_page(offset))
lib/iov_iter.c
488
n = PAGE_SIZE - offset_in_page(offset);
lib/iov_iter.c
900
unsigned int count = DIV_ROUND_UP(size + start, PAGE_SIZE);
lib/iov_iter.c
938
size_t part = PAGE_SIZE - offset % PAGE_SIZE;
lib/iov_iter.c
946
*pages = folio_page(folio, offset / PAGE_SIZE);
lib/math/tests/prime_numbers_kunit.c
11
static char buf[PAGE_SIZE];
lib/raid6/algos.c
181
(*algo)->gen_syndrome(disks, PAGE_SIZE, *dptrs);
lib/raid6/algos.c
224
PAGE_SIZE, *dptrs);
lib/raid6/algos.c
261
dptrs[i] = p + PAGE_SIZE * i;
lib/raid6/algos.c
263
cycle = ((disks - 2) * PAGE_SIZE) / 65536;
lib/raid6/algos.c
269
if ((disks - 2) * PAGE_SIZE % 65536)
lib/raid6/algos.c
270
memcpy(p, raid6_gfmul, (disks - 2) * PAGE_SIZE % 65536);
lib/raid6/test/test.c
112
memset(data[NDISKS-2], 0xee, 2*PAGE_SIZE);
lib/raid6/test/test.c
115
raid6_call.gen_syndrome(NDISKS, PAGE_SIZE,
lib/raid6/test/test.c
129
raid6_call.xor_syndrome(NDISKS, p1, p2, PAGE_SIZE,
lib/raid6/test/test.c
132
raid6_call.xor_syndrome(NDISKS, p1, p2, PAGE_SIZE,
lib/raid6/test/test.c
21
const char raid6_empty_zero_page[PAGE_SIZE] __attribute__((aligned(PAGE_SIZE)));
lib/raid6/test/test.c
24
char data[NDISKS][PAGE_SIZE] __attribute__((aligned(PAGE_SIZE)));
lib/raid6/test/test.c
25
char recovi[PAGE_SIZE] __attribute__((aligned(PAGE_SIZE)));
lib/raid6/test/test.c
26
char recovj[PAGE_SIZE] __attribute__((aligned(PAGE_SIZE)));
lib/raid6/test/test.c
33
for (j = 0; j < PAGE_SIZE; j++)
lib/raid6/test/test.c
56
memset(recovi, 0xf0, PAGE_SIZE);
lib/raid6/test/test.c
57
memset(recovj, 0xba, PAGE_SIZE);
lib/raid6/test/test.c
62
raid6_dual_recov(NDISKS, PAGE_SIZE, i, j, (void **)&dataptrs);
lib/raid6/test/test.c
64
erra = memcmp(data[i], recovi, PAGE_SIZE);
lib/raid6/test/test.c
65
errb = memcmp(data[j], recovj, PAGE_SIZE);
lib/rhashtable.c
134
kmalloc_noprof(PAGE_SIZE, GFP_ATOMIC|__GFP_ZERO));
lib/rhashtable.c
137
for (i = 0; i < PAGE_SIZE / sizeof(ntbl[0]); i++)
lib/scatterlist.c
1139
npages = DIV_ROUND_UP(off + len, PAGE_SIZE);
lib/scatterlist.c
1144
size_t seg = min_t(size_t, PAGE_SIZE - off, len);
lib/scatterlist.c
1244
seg = min_t(size_t, len, PAGE_SIZE - off);
lib/scatterlist.c
1256
kaddr += PAGE_SIZE;
lib/scatterlist.c
1345
pgoff_t index = start / PAGE_SIZE;
lib/scatterlist.c
168
kmemleak_alloc(ptr, PAGE_SIZE, 1, gfp_mask);
lib/scatterlist.c
471
max_segment = ALIGN_DOWN(max_segment, PAGE_SIZE);
lib/scatterlist.c
472
if (WARN_ON(max_segment < PAGE_SIZE))
lib/scatterlist.c
486
next_pfn = (sg_phys(sgt_append->prv) + prv_len) / PAGE_SIZE;
lib/scatterlist.c
490
if (sgt_append->prv->length + PAGE_SIZE > max_segment)
lib/scatterlist.c
492
sgt_append->prv->length += PAGE_SIZE;
lib/scatterlist.c
506
seg_len += PAGE_SIZE;
lib/scatterlist.c
522
seg_len += PAGE_SIZE;
lib/scatterlist.c
623
nent = round_up(length, PAGE_SIZE << order) >> (PAGE_SHIFT + order);
lib/scatterlist.c
641
elem_len = min_t(u64, length, PAGE_SIZE << order);
lib/scatterlist.c
825
miter->__offset &= PAGE_SIZE - 1;
lib/scatterlist.c
830
PAGE_SIZE - miter->__offset);
lib/stackdepot.c
216
memblock_alloc(stack_max_pools * sizeof(void *), PAGE_SIZE);
lib/string.c
129
size_t limit = PAGE_SIZE - ((long)src & (PAGE_SIZE - 1));
lib/string_helpers.c
685
buffer = kmalloc(PAGE_SIZE, GFP_KERNEL);
lib/string_helpers.c
689
res = get_cmdline(task, buffer, PAGE_SIZE - 1);
lib/test_bitmap.c
1181
0, 2 * PAGE_SIZE);
lib/test_bitmap.c
1186
0, 2 * PAGE_SIZE);
lib/test_bitmap.c
1191
if (strlen(t->list) > PAGE_SIZE) {
lib/test_bitmap.c
1193
PAGE_SIZE, PAGE_SIZE);
lib/test_bitmap.c
1194
expect_eq_uint(strlen(t->list) + 1 - PAGE_SIZE, n);
lib/test_bitmap.c
1195
expect_eq_str(t->list + PAGE_SIZE, print_buf, n);
lib/test_bitmap.c
23
static char pbl_buffer[PAGE_SIZE] __initdata;
lib/test_bitmap.c
24
static char print_buf[PAGE_SIZE * 2] __initdata;
lib/test_bitmap.c
532
unsigned long *bmap = kmalloc(PAGE_SIZE, GFP_KERNEL);
lib/test_bitmap.c
533
char *buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
lib/test_bitmap.c
541
memset(bmap, -1, PAGE_SIZE);
lib/test_bitmap.c
542
slen = snprintf(expected, 256, "0-%ld", PAGE_SIZE * 8 - 1);
lib/test_bitmap.c
547
ret = bitmap_print_to_pagebuf(true, buf, bmap, PAGE_SIZE * 8);
lib/test_dynamic_debug.c
24
return scnprintf(buffer, PAGE_SIZE, "did do_prints\n");
lib/test_firmware.c
1424
if (req->fw->size > PAGE_SIZE) {
lib/test_firmware.c
1465
if (tst->size > PAGE_SIZE) {
lib/test_firmware.c
286
len += scnprintf(buf, PAGE_SIZE - len,
lib/test_firmware.c
291
len += scnprintf(buf + len, PAGE_SIZE - len,
lib/test_firmware.c
295
len += scnprintf(buf + len, PAGE_SIZE - len,
lib/test_firmware.c
298
len += scnprintf(buf + len, PAGE_SIZE - len,
lib/test_firmware.c
301
len += scnprintf(buf + len, PAGE_SIZE - len,
lib/test_firmware.c
306
len += scnprintf(buf + len, PAGE_SIZE - len,
lib/test_firmware.c
309
len += scnprintf(buf + len, PAGE_SIZE - len,
lib/test_firmware.c
311
len += scnprintf(buf + len, PAGE_SIZE - len,
lib/test_firmware.c
313
len += scnprintf(buf + len, PAGE_SIZE - len,
lib/test_firmware.c
316
len += scnprintf(buf + len, PAGE_SIZE - len,
lib/test_firmware.c
319
len += scnprintf(buf + len, PAGE_SIZE - len,
lib/test_firmware.c
322
len += scnprintf(buf + len, PAGE_SIZE - len,
lib/test_firmware.c
326
len += scnprintf(buf + len, PAGE_SIZE - len,
lib/test_firmware.c
358
len = snprintf(dst, PAGE_SIZE, "%s\n", src);
lib/test_firmware.c
391
return snprintf(buf, PAGE_SIZE, "%d\n", val);
lib/test_firmware.c
414
return snprintf(buf, PAGE_SIZE, "%zu\n", val);
lib/test_firmware.c
419
return snprintf(buf, PAGE_SIZE, "%d\n", val);
lib/test_firmware.c
450
return snprintf(buf, PAGE_SIZE, "%u\n", val);
lib/test_hmm.c
1015
addr += PAGE_SIZE;
lib/test_hmm.c
1557
for (addr = vma->vm_start; addr < vma->vm_end; addr += PAGE_SIZE) {
lib/test_hmm.c
1651
args.start = ALIGN_DOWN(vmf->address, (PAGE_SIZE << order));
lib/test_hmm.c
1653
args.end = args.start + (PAGE_SIZE << order);
lib/test_hmm.c
380
memcpy_from_page(ptr, page, 0, PAGE_SIZE);
lib/test_hmm.c
382
ptr += PAGE_SIZE;
lib/test_hmm.c
446
memcpy_to_page(page, 0, ptr, PAGE_SIZE);
lib/test_hmm.c
448
ptr += PAGE_SIZE;
lib/test_hmm.c
741
addr += PAGE_SIZE;
lib/test_hmm.c
779
addr += PAGE_SIZE;
lib/test_hmm.c
793
addr += PAGE_SIZE;
lib/test_hmm.c
904
for (addr = start; !ret && addr < end; addr += PAGE_SIZE) {
lib/test_hmm.c
958
addr += PAGE_SIZE;
lib/test_hmm.c
964
addr += PAGE_SIZE;
lib/test_kho.c
129
fdt_size = state->nr_folios * sizeof(phys_addr_t) + PAGE_SIZE;
lib/test_kho.c
176
if (alloc_size + (PAGE_SIZE << order) > max_mem) {
lib/test_kho.c
181
size = PAGE_SIZE << order;
lib/test_kho.c
264
unsigned int size = PAGE_SIZE << order;
lib/test_kho.c
30
static long max_mem = (PAGE_SIZE << MAX_PAGE_ORDER) * 2;
lib/test_kmod.c
468
len += snprintf(buf, PAGE_SIZE,
lib/test_kmod.c
472
len += snprintf(buf+len, PAGE_SIZE - len,
lib/test_kmod.c
476
len += snprintf(buf+len, PAGE_SIZE - len,
lib/test_kmod.c
482
len += snprintf(buf+len, PAGE_SIZE - len,
lib/test_kmod.c
486
len += snprintf(buf+len, PAGE_SIZE - len,
lib/test_kmod.c
490
len += snprintf(buf+len, PAGE_SIZE - len,
lib/test_kmod.c
494
len += snprintf(buf+len, PAGE_SIZE - len,
lib/test_kmod.c
685
len = snprintf(dst, PAGE_SIZE, "%s\n", src);
lib/test_kmod.c
958
return snprintf(buf, PAGE_SIZE, "%d\n", val);
lib/test_kmod.c
971
return snprintf(buf, PAGE_SIZE, "%u\n", val);
lib/test_meminit.c
67
size_t size = PAGE_SIZE << order;
lib/test_vmalloc.c
100
size = ((rnd % 10) + 1) * PAGE_SIZE;
lib/test_vmalloc.c
125
ptr = __vmalloc_node(PAGE_SIZE, align, GFP_KERNEL|__GFP_ZERO, 0,
lib/test_vmalloc.c
142
ptr = __vmalloc_node(5 * PAGE_SIZE, THREAD_ALIGN << 1,
lib/test_vmalloc.c
162
p = vmalloc(n * PAGE_SIZE);
lib/test_vmalloc.c
186
ptr[i] = vmalloc(1 * PAGE_SIZE);
lib/test_vmalloc.c
189
ptr_1 = vmalloc(100 * PAGE_SIZE);
lib/test_vmalloc.c
193
ptr_2 = vmalloc(1 * PAGE_SIZE);
lib/test_vmalloc.c
225
junk_length *= (32 * 1024 * 1024 / PAGE_SIZE);
lib/test_vmalloc.c
238
ptr[i] = vmalloc(1 * PAGE_SIZE);
lib/test_vmalloc.c
239
junk_ptr[i] = vmalloc(1 * PAGE_SIZE);
lib/test_vmalloc.c
246
tmp = vmalloc(1 * PAGE_SIZE);
lib/test_vmalloc.c
275
ptr = vmalloc_huge((nr_pages > 0 ? nr_pages:1) * PAGE_SIZE, GFP_KERNEL);
lib/test_vmalloc.c
277
ptr = vmalloc((nr_pages > 0 ? nr_pages:1) * PAGE_SIZE);
lib/test_vmalloc.c
298
unsigned long size = (nr_pages > 0 ? nr_pages : 1) * PAGE_SIZE;
lib/test_vmalloc.c
328
size = get_random_u32_inclusive(1, PAGE_SIZE / 4);
lib/test_vmalloc.c
360
p = vmalloc(1 * PAGE_SIZE);
lib/test_vmalloc.c
378
p = vmalloc(1 * PAGE_SIZE);
lib/tests/fortify_kunit.c
282
checker((expected_pages) * PAGE_SIZE, \
lib/tests/fortify_kunit.c
283
vmalloc((alloc_pages) * PAGE_SIZE), vfree(p)); \
lib/tests/fortify_kunit.c
284
checker((expected_pages) * PAGE_SIZE, \
lib/tests/fortify_kunit.c
285
vzalloc((alloc_pages) * PAGE_SIZE), vfree(p)); \
lib/tests/fortify_kunit.c
286
checker((expected_pages) * PAGE_SIZE, \
lib/tests/fortify_kunit.c
287
__vmalloc((alloc_pages) * PAGE_SIZE, gfp), vfree(p)); \
lib/tests/fortify_kunit.c
297
checker((expected_pages) * PAGE_SIZE, \
lib/tests/fortify_kunit.c
298
kvmalloc((alloc_pages) * PAGE_SIZE, gfp), \
lib/tests/fortify_kunit.c
300
checker((expected_pages) * PAGE_SIZE, \
lib/tests/fortify_kunit.c
301
kvmalloc_node((alloc_pages) * PAGE_SIZE, gfp, NUMA_NO_NODE), \
lib/tests/fortify_kunit.c
303
checker((expected_pages) * PAGE_SIZE, \
lib/tests/fortify_kunit.c
304
kvzalloc((alloc_pages) * PAGE_SIZE, gfp), \
lib/tests/fortify_kunit.c
306
checker((expected_pages) * PAGE_SIZE, \
lib/tests/fortify_kunit.c
307
kvzalloc_node((alloc_pages) * PAGE_SIZE, gfp, NUMA_NO_NODE), \
lib/tests/fortify_kunit.c
309
checker((expected_pages) * PAGE_SIZE, \
lib/tests/fortify_kunit.c
310
kvcalloc(1, (alloc_pages) * PAGE_SIZE, gfp), \
lib/tests/fortify_kunit.c
312
checker((expected_pages) * PAGE_SIZE, \
lib/tests/fortify_kunit.c
313
kvcalloc((alloc_pages) * PAGE_SIZE, 1, gfp), \
lib/tests/fortify_kunit.c
315
checker((expected_pages) * PAGE_SIZE, \
lib/tests/fortify_kunit.c
316
kvmalloc_array(1, (alloc_pages) * PAGE_SIZE, gfp), \
lib/tests/fortify_kunit.c
318
checker((expected_pages) * PAGE_SIZE, \
lib/tests/fortify_kunit.c
319
kvmalloc_array((alloc_pages) * PAGE_SIZE, 1, gfp), \
lib/tests/fortify_kunit.c
322
prev_size = (expected_pages) * PAGE_SIZE; \
lib/tests/fortify_kunit.c
325
checker(((expected_pages) * PAGE_SIZE) * 2, \
lib/tests/fortify_kunit.c
326
kvrealloc(orig, ((alloc_pages) * PAGE_SIZE) * 2, gfp), \
lib/tests/kunit_iov_iter.c
110
npages = bufsize / PAGE_SIZE;
lib/tests/kunit_iov_iter.c
160
npages = bufsize / PAGE_SIZE;
lib/tests/kunit_iov_iter.c
232
KUNIT_ASSERT_LT(test, pr->page * PAGE_SIZE, bufsize);
lib/tests/kunit_iov_iter.c
235
KUNIT_ASSERT_LE(test, pr->to, PAGE_SIZE);
lib/tests/kunit_iov_iter.c
247
can_merge = page + pr->to / PAGE_SIZE;
lib/tests/kunit_iov_iter.c
269
npages = bufsize / PAGE_SIZE;
lib/tests/kunit_iov_iter.c
293
u8 *p = scratch + pr->page * PAGE_SIZE;
lib/tests/kunit_iov_iter.c
323
npages = bufsize / PAGE_SIZE;
lib/tests/kunit_iov_iter.c
346
size_t patt = pr->page * PAGE_SIZE;
lib/tests/kunit_iov_iter.c
397
size += PAGE_SIZE;
lib/tests/kunit_iov_iter.c
427
npages = bufsize / PAGE_SIZE;
lib/tests/kunit_iov_iter.c
451
KUNIT_EXPECT_EQ(test, iter.iov_offset, pr->to % PAGE_SIZE);
lib/tests/kunit_iov_iter.c
489
npages = bufsize / PAGE_SIZE;
lib/tests/kunit_iov_iter.c
513
KUNIT_EXPECT_EQ(test, iter.iov_offset, pr->to % PAGE_SIZE);
lib/tests/kunit_iov_iter.c
559
size += PAGE_SIZE;
lib/tests/kunit_iov_iter.c
589
npages = bufsize / PAGE_SIZE;
lib/tests/kunit_iov_iter.c
647
npages = bufsize / PAGE_SIZE;
lib/tests/kunit_iov_iter.c
711
npages = bufsize / PAGE_SIZE;
lib/tests/kunit_iov_iter.c
733
KUNIT_EXPECT_LT(test, offset0, PAGE_SIZE);
lib/tests/kunit_iov_iter.c
743
ssize_t part = min_t(ssize_t, len, PAGE_SIZE - offset0);
lib/tests/kunit_iov_iter.c
753
ix = from / PAGE_SIZE;
lib/tests/kunit_iov_iter.c
757
KUNIT_EXPECT_EQ(test, offset0, from % PAGE_SIZE);
lib/tests/kunit_iov_iter.c
790
npages = bufsize / PAGE_SIZE;
lib/tests/kunit_iov_iter.c
811
KUNIT_EXPECT_LT(test, offset0, PAGE_SIZE);
lib/tests/kunit_iov_iter.c
821
ssize_t part = min_t(ssize_t, len, PAGE_SIZE - offset0);
lib/tests/kunit_iov_iter.c
831
ix = pr->page + from / PAGE_SIZE;
lib/tests/kunit_iov_iter.c
835
KUNIT_EXPECT_EQ(test, offset0, from % PAGE_SIZE);
lib/tests/kunit_iov_iter.c
868
npages = bufsize / PAGE_SIZE;
lib/tests/kunit_iov_iter.c
900
KUNIT_EXPECT_LT(test, offset0, PAGE_SIZE);
lib/tests/kunit_iov_iter.c
904
ssize_t part = min_t(ssize_t, len, PAGE_SIZE - offset0);
lib/tests/kunit_iov_iter.c
908
ix = from / PAGE_SIZE;
lib/tests/kunit_iov_iter.c
912
KUNIT_EXPECT_EQ(test, offset0, from % PAGE_SIZE);
lib/tests/kunit_iov_iter.c
947
npages = bufsize / PAGE_SIZE;
lib/tests/kunit_iov_iter.c
978
KUNIT_EXPECT_LT(test, offset0, PAGE_SIZE);
lib/tests/kunit_iov_iter.c
982
ssize_t part = min_t(ssize_t, len, PAGE_SIZE - offset0);
lib/tests/kunit_iov_iter.c
986
ix = from / PAGE_SIZE;
lib/tests/kunit_iov_iter.c
990
KUNIT_EXPECT_EQ(test, offset0, from % PAGE_SIZE);
lib/tests/usercopy_kunit.c
183
memset(kmem, 0x3a, PAGE_SIZE * 2);
lib/tests/usercopy_kunit.c
184
KUNIT_EXPECT_EQ_MSG(test, 0, copy_to_user(usermem, kmem, PAGE_SIZE),
lib/tests/usercopy_kunit.c
186
memset(kmem, 0x0, PAGE_SIZE);
lib/tests/usercopy_kunit.c
187
KUNIT_EXPECT_EQ_MSG(test, 0, copy_from_user(kmem, usermem, PAGE_SIZE),
lib/tests/usercopy_kunit.c
189
KUNIT_EXPECT_MEMEQ_MSG(test, kmem, kmem + PAGE_SIZE, PAGE_SIZE,
lib/tests/usercopy_kunit.c
233
memset(kmem, 0x5a, PAGE_SIZE);
lib/tests/usercopy_kunit.c
234
memset(kmem + PAGE_SIZE, 0, PAGE_SIZE);
lib/tests/usercopy_kunit.c
237
KUNIT_EXPECT_NE_MSG(test, copy_from_user(kmem, (char __user *)(kmem + PAGE_SIZE),
lib/tests/usercopy_kunit.c
238
PAGE_SIZE), 0,
lib/tests/usercopy_kunit.c
242
KUNIT_EXPECT_MEMEQ_MSG(test, kmem + PAGE_SIZE, kmem, PAGE_SIZE,
lib/tests/usercopy_kunit.c
253
PAGE_SIZE), 0,
lib/tests/usercopy_kunit.c
256
KUNIT_EXPECT_NE_MSG(test, copy_to_user((char __user *)kmem, kmem + PAGE_SIZE,
lib/tests/usercopy_kunit.c
257
PAGE_SIZE), 0,
lib/tests/usercopy_kunit.c
261
PAGE_SIZE), 0,
lib/tests/usercopy_kunit.c
302
priv->size = PAGE_SIZE * 2;
lib/tests/usercopy_kunit.c
55
KUNIT_ASSERT_GE_MSG(test, size, 2 * PAGE_SIZE, "buffer too small");
lib/tests/usercopy_kunit.c
64
start = PAGE_SIZE - (size / 2);
lib/vdso/datastore.c
101
return _install_special_mapping(mm, addr, VDSO_NR_PAGES * PAGE_SIZE,
lib/vdso/datastore.c
17
u8 page[PAGE_SIZE];
lib/vdso/datastore.c
20
static_assert(sizeof(vdso_time_data_store) == PAGE_SIZE);
lib/vdso/datastore.c
26
u8 page[PAGE_SIZE];
lib/vdso/datastore.c
29
static_assert(sizeof(vdso_rng_data_store) == PAGE_SIZE);
lib/vdso/datastore.c
57
addr = vmf->address + VDSO_TIMENS_PAGE_OFFSET * PAGE_SIZE;
lib/vdso/getrandom.c
21
#define PAGE_MASK (~(PAGE_SIZE - 1))
lib/vdso/getrandom.c
93
if (unlikely(((unsigned long)opaque_state & ~PAGE_MASK) + sizeof(*state) > PAGE_SIZE))
lib/vdso/gettimeofday.c
114
return (void *)vd + PAGE_SIZE;
lib/vsprintf.c
710
if ((unsigned long)ptr < PAGE_SIZE || IS_ERR_VALUE(ptr))
lib/zlib_deflate/deflate.c
233
mem->window_memory = (Byte *) PTR_ALIGN(next, PAGE_SIZE);
lib/zlib_deflate/defutil.h
245
(2 * (1 << (windowBits)) * sizeof(Byte) + PAGE_SIZE)
lib/zlib_inflate/inflate.c
85
state->window = PTR_ALIGN(&WS(strm)->working_window[0], PAGE_SIZE);
lib/zlib_inflate/infutil.h
26
unsigned char working_window[(1 << MAX_WBITS) + PAGE_SIZE];
mm/bootmem_info.c
38
kmemleak_free_part_phys(PFN_PHYS(page_to_pfn(page)), PAGE_SIZE);
mm/bpf_memcontrol.c
115
return page_counter_read(&memcg->memory) * PAGE_SIZE;
mm/cma.c
1123
if (!IS_ALIGNED(size, (PAGE_SIZE << cma->order_per_bit)))
mm/cma.c
592
size = ALIGN_DOWN(size, (PAGE_SIZE << order_per_bit));
mm/damon/core.c
2255
throughput = PAGE_SIZE * 1024;
mm/damon/lru_sort.c
185
.min_sz_region = PAGE_SIZE,
mm/damon/ops-common.c
67
young |= mmu_notifier_clear_young(vma->vm_mm, addr, addr + PAGE_SIZE);
mm/damon/paddr.c
180
addr += PAGE_SIZE;
mm/damon/paddr.c
206
return damon_pa_core_addr(applied * PAGE_SIZE, addr_unit);
mm/damon/paddr.c
221
addr += PAGE_SIZE;
mm/damon/paddr.c
240
return damon_pa_core_addr(applied * PAGE_SIZE, addr_unit);
mm/damon/paddr.c
269
addr += PAGE_SIZE;
mm/damon/paddr.c
288
return damon_pa_core_addr(applied * PAGE_SIZE, addr_unit);
mm/damon/paddr.c
305
addr += PAGE_SIZE;
mm/damon/paddr.c
88
static unsigned long last_folio_sz = PAGE_SIZE;
mm/damon/reclaim.c
167
.min_sz_region = PAGE_SIZE,
mm/damon/vaddr.c
544
static unsigned long last_folio_sz = PAGE_SIZE;
mm/damon/vaddr.c
736
for (; addr < next; pte += nr, addr += nr * PAGE_SIZE) {
mm/damon/vaddr.c
849
return applied * PAGE_SIZE;
mm/damon/vaddr.c
901
for (; addr < next; pte += nr, addr += nr * PAGE_SIZE) {
mm/debug_vm_pgtable.c
1081
pstart = PAGE_SIZE;
mm/debug_vm_pgtable.c
1114
args->fixed_alignment = PAGE_SIZE;
mm/debug_vm_pgtable.c
966
total_user_pages = (TASK_SIZE - FIRST_USER_ADDRESS) / PAGE_SIZE;
mm/debug_vm_pgtable.c
969
random_vaddr = FIRST_USER_ADDRESS + random_pages * PAGE_SIZE;
mm/dmapool.c
247
allocation = max_t(size_t, size, PAGE_SIZE);
mm/dmapool_test.c
38
return clamp_t(int, (PAGE_SIZE / size) * 512, 1024, 8192);
mm/early_ioremap.c
162
phys_addr += PAGE_SIZE;
mm/fadvise.c
121
start_index = (offset+(PAGE_SIZE-1)) >> PAGE_SHIFT;
mm/filemap.c
3010
page = folio_page(folio, offset / PAGE_SIZE);
mm/filemap.c
3012
offset %= PAGE_SIZE;
mm/filemap.c
3016
size_t part = min_t(size_t, PAGE_SIZE - offset, size - spliced);
mm/filemap.c
3073
len = min_t(size_t, len, npages * PAGE_SIZE);
mm/filemap.c
3182
return PAGE_SIZE << xas_get_order(xas);
mm/filemap.c
3234
if (seek_size > PAGE_SIZE)
mm/filemap.c
3524
max_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
mm/filemap.c
3628
max_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
mm/filemap.c
3729
max_idx = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE);
mm/filemap.c
3768
addr0 = addr - start * PAGE_SIZE;
mm/filemap.c
3808
if (in_range(vmf->address, addr, count * PAGE_SIZE))
mm/filemap.c
3815
addr += count * PAGE_SIZE;
mm/filemap.c
3824
if (in_range(vmf->address, addr, count * PAGE_SIZE))
mm/filemap.c
3891
file_end = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE) - 1;
mm/filemap.c
4361
if (chunk > PAGE_SIZE)
mm/filemap.c
4546
invalidate_inode_pages2_range(mapping, start / PAGE_SIZE, end / PAGE_SIZE);
mm/gup.c
1499
flush_anon_page(vma, subpage, start + j * PAGE_SIZE);
mm/gup.c
1505
start += page_increm * PAGE_SIZE;
mm/gup.c
1771
start += PAGE_SIZE;
mm/gup.c
1817
unsigned long nr_pages = (end - start) / PAGE_SIZE;
mm/gup.c
1890
unsigned long nr_pages = (end - start) / PAGE_SIZE;
mm/gup.c
1971
nend = nstart + ret * PAGE_SIZE;
mm/gup.c
2026
start = (start + PAGE_SIZE) & PAGE_MASK;
mm/gup.c
2058
for (cur = start; cur && cur < end; cur = PAGE_ALIGN_DOWN(cur + PAGE_SIZE))
mm/gup.c
2128
for (cur = start; cur && cur < end; cur = PAGE_ALIGN_DOWN(cur + PAGE_SIZE))
mm/gup.c
2160
for (cur = start; cur && cur < end; cur = PAGE_ALIGN_DOWN(cur + PAGE_SIZE))
mm/gup.c
2897
} while (ptep++, addr += PAGE_SIZE, addr != end);
mm/gup_test.c
115
nr_pages = gup->size / PAGE_SIZE;
mm/gup_test.c
132
next = addr + nr * PAGE_SIZE;
mm/gup_test.c
135
nr = (next - addr) / PAGE_SIZE;
mm/gup_test.c
241
if (!IS_ALIGNED(args.addr | args.size, PAGE_SIZE))
mm/gup_test.c
245
nr_pages = args.size / PAGE_SIZE;
mm/gup_test.c
267
addr = args.addr + pin_longterm_test_nr_pages * PAGE_SIZE;
mm/gup_test.c
305
PAGE_SIZE);
mm/gup_test.c
309
user_addr += PAGE_SIZE;
mm/highmem.c
419
if (start1 >= PAGE_SIZE) {
mm/highmem.c
420
start1 -= PAGE_SIZE;
mm/highmem.c
421
end1 -= PAGE_SIZE;
mm/highmem.c
423
unsigned this_end = min_t(unsigned, end1, PAGE_SIZE);
mm/highmem.c
433
if (start2 >= PAGE_SIZE) {
mm/highmem.c
434
start2 -= PAGE_SIZE;
mm/highmem.c
435
end2 -= PAGE_SIZE;
mm/highmem.c
437
unsigned this_end = min_t(unsigned, end2, PAGE_SIZE);
mm/hmm.c
215
for (i = 0; addr < end; addr += PAGE_SIZE, i++, pfn++) {
mm/hmm.c
362
for (i = 0; addr < end; addr += PAGE_SIZE, i++, pfn++) {
mm/hmm.c
460
for (; addr < end; addr += PAGE_SIZE, ptep++, hmm_pfns++) {
mm/hmm.c
55
for (; addr < end; addr += PAGE_SIZE, i++) {
mm/hmm.c
585
for (; addr < end; addr += PAGE_SIZE, i++, pfn++) {
mm/hmm.c
704
WARN_ON_ONCE(!(nr_entries * PAGE_SIZE / dma_entry_size));
mm/hmm.c
724
nr_entries * PAGE_SIZE);
mm/hmm.c
89
for (; addr < end; addr += PAGE_SIZE)
mm/huge_memory.c
155
addr = vma->vm_end - (PAGE_SIZE << order);
mm/huge_memory.c
2972
for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) {
mm/huge_memory.c
3188
for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) {
mm/huge_memory.c
3214
for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) {
mm/huge_memory.c
4033
end = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE);
mm/huge_memory.c
4619
for (addr = vaddr_start; addr < vaddr_end; addr += PAGE_SIZE) {
mm/huge_memory.c
720
unsigned long size = (PAGE_SIZE << order) / SZ_1K;
mm/huge_memory.c
990
static char str_dup[PAGE_SIZE] __initdata;
mm/huge_memory.c
999
if (!str || strlen(str) + 1 > PAGE_SIZE)
mm/hugetlb.c
1033
return PAGE_SIZE;
mm/hugetlb.c
3163
memblock_reserved_mark_noinit(__pa((void *)m + PAGE_SIZE),
mm/hugetlb.c
3164
huge_page_size(h) - PAGE_SIZE);
mm/hugetlb.c
4044
dst = size_to_hstate(PAGE_SIZE << src->demote_order);
mm/hugetlb.c
4213
if (size_to_hstate(PAGE_SIZE << order)) {
mm/hugetlb_cgroup.c
471
seq_printf(seq, "total=%lu", usage * PAGE_SIZE);
mm/hugetlb_cgroup.c
477
PAGE_SIZE);
mm/hugetlb_cgroup.c
486
page_counter_read(&h_cg->hugepage[idx]) * PAGE_SIZE);
mm/hugetlb_cgroup.c
501
seq_printf(seq, " N%d=%lu", nid, usage * PAGE_SIZE);
mm/hugetlb_cgroup.c
521
return (u64)page_counter_read(counter) * PAGE_SIZE;
mm/hugetlb_cgroup.c
523
return (u64)page_counter_read(rsvd_counter) * PAGE_SIZE;
mm/hugetlb_cgroup.c
525
return (u64)counter->max * PAGE_SIZE;
mm/hugetlb_cgroup.c
527
return (u64)rsvd_counter->max * PAGE_SIZE;
mm/hugetlb_cgroup.c
529
return (u64)counter->watermark * PAGE_SIZE;
mm/hugetlb_cgroup.c
531
return (u64)rsvd_counter->watermark * PAGE_SIZE;
mm/hugetlb_cgroup.c
562
seq_printf(seq, "%llu\n", val * PAGE_SIZE);
mm/hugetlb_cgroup.c
572
seq_printf(seq, "%llu\n", val * PAGE_SIZE);
mm/hugetlb_cma.c
179
if (hugetlb_cma_size_in_node[nid] < (PAGE_SIZE << order)) {
mm/hugetlb_cma.c
181
nid, (PAGE_SIZE << order) / SZ_1M);
mm/hugetlb_cma.c
193
if (hugetlb_cma_size < (PAGE_SIZE << order)) {
mm/hugetlb_cma.c
195
(PAGE_SIZE << order) / SZ_1M);
mm/hugetlb_cma.c
225
size = round_up(size, PAGE_SIZE << order);
mm/hugetlb_cma.c
233
res = cma_declare_contiguous_multi(size, PAGE_SIZE << order,
mm/hugetlb_sysfs.c
244
unsigned long demote_size = (PAGE_SIZE << h->demote_order) / SZ_1K;
mm/hugetlb_vmemmap.c
252
BUILD_BUG_ON(NR_RESET_STRUCT_PAGE * 2 > PAGE_SIZE / sizeof(struct page));
mm/hugetlb_vmemmap.c
299
BUG_ON(start - reuse != PAGE_SIZE);
mm/hugetlb_vmemmap.c
363
BUG_ON(start - reuse != PAGE_SIZE);
mm/hugetlb_vmemmap.c
367
end = reuse + walk.nr_walked * PAGE_SIZE;
mm/hugetlb_vmemmap.c
436
BUG_ON(start - reuse != PAGE_SIZE);
mm/hugetlb_vmemmap.c
66
for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) {
mm/hugetlb_vmemmap.c
818
memmap_boot_pages_add(HUGETLB_VMEMMAP_RESERVE_SIZE / PAGE_SIZE);
mm/hugetlb_vmemmap.c
869
memmap_boot_pages_add(DIV_ROUND_UP(nr_mmap, PAGE_SIZE));
mm/hugetlb_vmemmap.h
19
#define HUGETLB_VMEMMAP_RESERVE_SIZE PAGE_SIZE
mm/internal.h
1191
return pvmw->address + PAGE_SIZE;
mm/kasan/common.c
141
PAGE_SIZE << order, init);
mm/kasan/common.c
151
kasan_poison(page_address(page), PAGE_SIZE << order,
mm/kasan/common.c
508
kasan_poison(ptr, PAGE_SIZE << order, KASAN_PAGE_FREE, false);
mm/kasan/hw_tags.c
312
for (addr = start; addr < start + size; addr += PAGE_SIZE) {
mm/kasan/hw_tags.c
377
redzone_size = round_up(redzone_start, PAGE_SIZE) - redzone_start;
mm/kasan/init.c
102
while (addr + PAGE_SIZE <= end) {
mm/kasan/init.c
104
addr += PAGE_SIZE;
mm/kasan/init.c
130
p = early_alloc(PAGE_SIZE, NUMA_NO_NODE);
mm/kasan/init.c
171
p = early_alloc(PAGE_SIZE, NUMA_NO_NODE);
mm/kasan/init.c
213
p = early_alloc(PAGE_SIZE, NUMA_NO_NODE);
mm/kasan/init.c
275
early_alloc(PAGE_SIZE, NUMA_NO_NODE));
mm/kasan/init.c
29
unsigned char kasan_early_shadow_page[PAGE_SIZE] __page_aligned_bss;
mm/kasan/init.c
351
next = (addr + PAGE_SIZE) & PAGE_MASK;
mm/kasan/kasan_test_c.c
1817
ptr = vmalloc(PAGE_SIZE);
mm/kasan/kasan_test_c.c
1870
size_t size = PAGE_SIZE / 2 - KASAN_GRANULE_SIZE - 5;
mm/kasan/kasan_test_c.c
2160
useraddr = kunit_vm_mmap(test, NULL, 0, PAGE_SIZE,
mm/kasan/report.c
664
if (orig_addr < PAGE_SIZE)
mm/kasan/report_generic.c
132
if ((unsigned long)info->access_addr < PAGE_SIZE)
mm/kasan/shadow.c
242
ret = __vmalloc_node_range(shadow_size, PAGE_SIZE, shadow_start,
mm/kasan/shadow.c
312
__memset(page_to_virt(page), KASAN_VMALLOC_INVALID, PAGE_SIZE);
mm/kasan/shadow.c
369
nr_pages = min(nr_total, PAGE_SIZE / sizeof(data.pages[0]));
mm/kasan/shadow.c
381
ret = apply_to_page_range(&init_mm, start, nr_pages * PAGE_SIZE,
mm/kasan/shadow.c
389
start += nr_pages * PAGE_SIZE;
mm/kasan/shadow.c
666
shadow_size = round_up(scaled_size, PAGE_SIZE);
mm/kfence/core.c
1053
free_size / PAGE_SIZE);
mm/kfence/core.c
1149
if (size > PAGE_SIZE) {
mm/kfence/core.c
1265
const int page_index = (addr - (unsigned long)__kfence_pool) / PAGE_SIZE;
mm/kfence/core.c
1284
meta = addr_to_metadata(addr - PAGE_SIZE);
mm/kfence/core.c
1291
meta = addr_to_metadata(addr + PAGE_SIZE);
mm/kfence/core.c
249
return !KFENCE_WARN_ON(!kfence_protect_page(ALIGN_DOWN(addr, PAGE_SIZE), true));
mm/kfence/core.c
254
return !KFENCE_WARN_ON(!kfence_protect_page(ALIGN_DOWN(addr, PAGE_SIZE), false));
mm/kfence/core.c
260
unsigned long offset = (meta - kfence_metadata + 1) * PAGE_SIZE * 2;
mm/kfence/core.c
274
if (KFENCE_WARN_ON(ALIGN_DOWN(meta->addr, PAGE_SIZE) != pageaddr))
mm/kfence/core.c
356
const unsigned long pageaddr = ALIGN_DOWN(meta->addr, PAGE_SIZE);
mm/kfence/core.c
367
for (; addr - pageaddr < PAGE_SIZE; addr += sizeof(u64))
mm/kfence/core.c
374
const unsigned long pageaddr = ALIGN_DOWN(meta->addr, PAGE_SIZE);
mm/kfence/core.c
407
for (; addr - pageaddr < PAGE_SIZE; addr += sizeof(u64)) {
mm/kfence/core.c
410
for (; addr - pageaddr < PAGE_SIZE; addr++) {
mm/kfence/core.c
475
meta->addr += PAGE_SIZE - size;
mm/kfence/core.c
537
kcsan_begin_scoped_access((void *)ALIGN_DOWN((unsigned long)addr, PAGE_SIZE), PAGE_SIZE,
mm/kfence/core.c
546
memzero_explicit((void *)ALIGN_DOWN(meta->unprotected_page, PAGE_SIZE), PAGE_SIZE);
mm/kfence/core.c
621
for (i = 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) {
mm/kfence/core.c
646
addr += PAGE_SIZE;
mm/kfence/core.c
660
if (unlikely(!kfence_protect(addr + 2 * i * PAGE_SIZE + PAGE_SIZE)))
mm/kfence/core.c
677
addr += 2 * PAGE_SIZE;
mm/kfence/core.c
689
addr += 2 * i * PAGE_SIZE;
mm/kfence/core.c
690
for (i = 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) {
mm/kfence/core.c
939
__kfence_pool = memblock_alloc(KFENCE_POOL_SIZE, PAGE_SIZE);
mm/kfence/core.c
947
kfence_metadata_init = memblock_alloc(KFENCE_METADATA_SIZE, PAGE_SIZE);
mm/kfence/core.c
998
const unsigned long nr_pages_pool = KFENCE_POOL_SIZE / PAGE_SIZE;
mm/kfence/core.c
999
const unsigned long nr_pages_meta = KFENCE_METADATA_SIZE / PAGE_SIZE;
mm/kfence/kfence.h
127
index = (addr - (unsigned long)__kfence_pool) / (PAGE_SIZE * 2) - 1;
mm/kfence/kfence_test.c
624
const size_t size = PAGE_SIZE; /* PAGE_SIZE so we can use ALLOCATE_ANY. */
mm/khugepaged.c
1262
_pte++, addr += PAGE_SIZE) {
mm/khugepaged.c
1537
i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) {
mm/khugepaged.c
1592
i += nr_batch_ptes, addr += nr_batch_ptes * PAGE_SIZE,
mm/khugepaged.c
543
_pte++, addr += PAGE_SIZE) {
mm/khugepaged.c
683
address += nr_ptes * PAGE_SIZE) {
mm/khugepaged.c
787
unsigned long src_addr = address + i * PAGE_SIZE;
mm/khugepaged.c
980
unsigned long addr, end = start_addr + (HPAGE_PMD_NR * PAGE_SIZE);
mm/khugepaged.c
985
for (addr = start_addr; addr < end; addr += PAGE_SIZE) {
mm/kmsan/core.c
269
PAGE_SIZE - ((addr64 + pos) % PAGE_SIZE));
mm/kmsan/core.c
333
u64 cur_addr = (u64)addr, next_addr = cur_addr + PAGE_SIZE;
mm/kmsan/core.c
341
if (ALIGN_DOWN(cur_addr + size - 1, PAGE_SIZE) ==
mm/kmsan/core.c
342
ALIGN_DOWN(cur_addr, PAGE_SIZE))
mm/kmsan/core.c
354
cur_origin = next_origin, next_addr += PAGE_SIZE) {
mm/kmsan/core.c
363
if (((u64)cur_shadow == ((u64)next_shadow - PAGE_SIZE)) &&
mm/kmsan/core.c
364
((u64)cur_origin == ((u64)next_origin - PAGE_SIZE)))
mm/kmsan/hooks.c
161
nr = (end - start) / PAGE_SIZE;
mm/kmsan/hooks.c
163
for (int i = 0; i < nr; i++, off += PAGE_SIZE, clean = i) {
mm/kmsan/hooks.c
172
vmalloc_shadow(start + off + PAGE_SIZE), prot, &shadow,
mm/kmsan/hooks.c
181
vmalloc_origin(start + off + PAGE_SIZE), prot, &origin,
mm/kmsan/hooks.c
186
vmalloc_shadow(start + off + PAGE_SIZE));
mm/kmsan/hooks.c
208
vmalloc_shadow(start + clean * PAGE_SIZE));
mm/kmsan/hooks.c
211
vmalloc_origin(start + clean * PAGE_SIZE));
mm/kmsan/hooks.c
228
nr = (end - start) / PAGE_SIZE;
mm/kmsan/hooks.c
233
i++, v_shadow += PAGE_SIZE, v_origin += PAGE_SIZE) {
mm/kmsan/hooks.c
357
to_go = min(PAGE_SIZE - page_offset, (u64)size);
mm/kmsan/init.c
39
nstart = ALIGN_DOWN(nstart, PAGE_SIZE);
mm/kmsan/init.c
40
nend = ALIGN(nend, PAGE_SIZE);
mm/kmsan/kmsan_test.c
329
memset(vbuf, 0xfe, npages * PAGE_SIZE);
mm/kmsan/kmsan_test.c
331
kmsan_check_memory(page_address(pages[i]), PAGE_SIZE);
mm/kmsan/kmsan_test.c
354
buf = vmalloc(PAGE_SIZE * npages);
mm/kmsan/kmsan_test.c
356
memset(buf, 0xfe, PAGE_SIZE * npages);
mm/kmsan/kmsan_test.c
359
kmsan_check_memory(&buf[PAGE_SIZE * i], PAGE_SIZE);
mm/kmsan/kmsan_test.c
434
value = *test_uaf_pages_helper(1, PAGE_SIZE + 3);
mm/kmsan/kmsan_test.c
621
void *buf = vmalloc(PAGE_SIZE);
mm/kmsan/kmsan_test.c
628
memset(buf + PAGE_SIZE - size, 0xff, size);
mm/kmsan/shadow.c
158
kmsan_internal_unpoison_memory(page_address(dst), PAGE_SIZE,
mm/kmsan/shadow.c
164
__memcpy(shadow_ptr_for(dst), shadow_ptr_for(src), PAGE_SIZE);
mm/kmsan/shadow.c
165
__memcpy(origin_ptr_for(dst), origin_ptr_for(src), PAGE_SIZE);
mm/kmsan/shadow.c
184
__memset(page_address(shadow), 0, PAGE_SIZE * pages);
mm/kmsan/shadow.c
185
__memset(page_address(origin), 0, PAGE_SIZE * pages);
mm/kmsan/shadow.c
193
__memset(page_address(shadow), -1, PAGE_SIZE * pages);
mm/kmsan/shadow.c
201
for (int i = 0; i < PAGE_SIZE * pages / sizeof(handle); i++)
mm/kmsan/shadow.c
210
kmsan_internal_poison_memory(page_address(page), PAGE_SIZE << order,
mm/kmsan/shadow.c
232
nr = (end - start) / PAGE_SIZE;
mm/kmsan/shadow.c
284
shadow = memblock_alloc_or_panic(size, PAGE_SIZE);
mm/kmsan/shadow.c
285
origin = memblock_alloc_or_panic(size, PAGE_SIZE);
mm/kmsan/shadow.c
287
for (u64 addr = 0; addr < size; addr += PAGE_SIZE) {
mm/kmsan/shadow.c
52
static char dummy_load_page[PAGE_SIZE] __aligned(PAGE_SIZE);
mm/kmsan/shadow.c
53
static char dummy_store_page[PAGE_SIZE] __aligned(PAGE_SIZE);
mm/kmsan/shadow.c
91
KMSAN_WARN_ON(size > PAGE_SIZE);
mm/ksm.c
1267
checksum = xxhash(addr, PAGE_SIZE, 0);
mm/ksm.c
1291
pvmw.address + PAGE_SIZE);
mm/ksm.c
1404
addr + PAGE_SIZE);
mm/ksm.c
2537
for (ptep = start_ptep; addr < end; ptep++, addr += PAGE_SIZE) {
mm/ksm.c
2681
ksm_scan.address = vma->vm_end - PAGE_SIZE;
mm/ksm.c
2698
ksm_scan.address += PAGE_SIZE;
mm/ksm.c
2707
ksm_scan.address += PAGE_SIZE;
mm/ksm.c
3442
return (long)(mm->ksm_merging_pages + mm_ksm_zero_pages(mm)) * PAGE_SIZE -
mm/ksm.c
3739
general_profit = (ksm_pages_sharing + atomic_long_read(&ksm_zero_pages)) * PAGE_SIZE -
mm/ksm.c
628
for (ptep = start_ptep; addr < end; ptep++, addr += PAGE_SIZE) {
mm/ksm.c
791
break_ksm(vma, addr, addr + PAGE_SIZE, false);
mm/madvise.c
194
for (addr = start; addr < end; addr += PAGE_SIZE) {
mm/madvise.c
346
int max_nr = (end - addr) / PAGE_SIZE;
mm/madvise.c
448
tlb_change_page_size(tlb, PAGE_SIZE);
mm/madvise.c
455
for (; addr < end; pte += nr, addr += nr * PAGE_SIZE) {
mm/madvise.c
671
tlb_change_page_size(tlb, PAGE_SIZE);
mm/madvise.c
677
for (; addr != end; pte += nr, addr += PAGE_SIZE * nr) {
mm/madvise.c
692
max_nr = (end - addr) / PAGE_SIZE;
mm/madvise.c
997
start += pages * PAGE_SIZE;
mm/mapping_dirty_helpers.c
107
addr + PAGE_SIZE);
mm/mapping_dirty_helpers.c
48
addr + PAGE_SIZE);
mm/memblock.c
2493
err |= fdt_create(fdt, PAGE_SIZE);
mm/memblock.c
319
start = max_t(phys_addr_t, start, PAGE_SIZE);
mm/memblock.c
471
new_alloc_size, PAGE_SIZE);
mm/memblock.c
475
new_alloc_size, PAGE_SIZE);
mm/memcontrol-v1.c
1598
return (u64)mem_cgroup_usage(memcg, false) * PAGE_SIZE;
mm/memcontrol-v1.c
1600
return (u64)mem_cgroup_usage(memcg, true) * PAGE_SIZE;
mm/memcontrol-v1.c
1601
return (u64)page_counter_read(counter) * PAGE_SIZE;
mm/memcontrol-v1.c
1603
return (u64)counter->max * PAGE_SIZE;
mm/memcontrol-v1.c
1605
return (u64)counter->watermark * PAGE_SIZE;
mm/memcontrol-v1.c
1609
return (u64)READ_ONCE(memcg->soft_limit) * PAGE_SIZE;
mm/memcontrol-v1.c
1914
PAGE_SIZE);
mm/memcontrol-v1.c
1923
(u64)memory * PAGE_SIZE);
mm/memcontrol-v1.c
1925
(u64)memsw * PAGE_SIZE);
mm/memcontrol-v1.c
1943
PAGE_SIZE);
mm/memcontrol.c
1420
return PAGE_SIZE;
mm/memcontrol.c
168
WARN_ON_ONCE(nr_bytes & (PAGE_SIZE - 1));
mm/memcontrol.c
2457
get_order(nr_pages * PAGE_SIZE))) {
mm/memcontrol.c
2967
if (abs(*bytes) > PAGE_SIZE) {
mm/memcontrol.c
3010
unsigned int nr_bytes = stock->nr_bytes & (PAGE_SIZE - 1);
mm/memcontrol.c
3091
nr_bytes = nr_bytes & (PAGE_SIZE - 1);
mm/memcontrol.c
3111
if (allow_uncharge && (stock->nr_bytes > PAGE_SIZE)) {
mm/memcontrol.c
3113
stock->nr_bytes &= (PAGE_SIZE - 1);
mm/memcontrol.c
3155
nr_bytes = size & (PAGE_SIZE - 1);
mm/memcontrol.c
3162
refill_obj_stock(objcg, nr_bytes ? PAGE_SIZE - nr_bytes : 0,
mm/memcontrol.c
4240
seq_printf(m, "%llu\n", (u64)value * PAGE_SIZE);
mm/memcontrol.c
4250
return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE;
mm/memcontrol.c
4266
seq_printf(sf, "%llu\n", peak * PAGE_SIZE);
mm/memcontrol.c
5173
BUILD_BUG_ON(MEMCG_CHARGE_BATCH > S32_MAX / PAGE_SIZE);
mm/memcontrol.c
5331
return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE;
mm/memcontrol.c
5481
pages = memcg_page_state(memcg, MEMCG_ZSWAP_B) / PAGE_SIZE;
mm/memcontrol.c
686
if (!val || unit == PAGE_SIZE)
mm/memcontrol.c
689
return max(val * unit / PAGE_SIZE, 1UL);
mm/memfd_luo.c
114
max_folios = PAGE_ALIGN(size) / PAGE_SIZE;
mm/memory-failure.c
762
for (; addr != end; ptep++, addr += PAGE_SIZE) {
mm/memory.c
1316
max_nr = (end - addr) / PAGE_SIZE;
mm/memory.c
1338
} while (dst_pte += nr, src_pte += nr, addr += PAGE_SIZE * nr,
mm/memory.c
1627
addr += PAGE_SIZE;
mm/memory.c
1809
int max_nr = (end - addr) / PAGE_SIZE;
mm/memory.c
1823
addr += nr * PAGE_SIZE;
mm/memory.c
1913
tlb_change_page_size(tlb, PAGE_SIZE);
mm/memory.c
1934
addr += nr * PAGE_SIZE;
mm/memory.c
1938
} while (pte += nr, addr += PAGE_SIZE * nr, addr != end);
mm/memory.c
2460
addr += PAGE_SIZE;
mm/memory.c
2493
const unsigned long end_addr = addr + (*num * PAGE_SIZE) - 1;
mm/memory.c
2887
} while (pte++, addr += PAGE_SIZE, addr != end);
mm/memory.c
3247
} while (pte++, addr += PAGE_SIZE, addr != end);
mm/memory.c
3517
if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) {
mm/memory.c
3535
if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) {
mm/memory.c
363
tlb_change_page_size(tlb, PAGE_SIZE);
mm/memory.c
3810
(vmf->address & PAGE_MASK) + PAGE_SIZE);
mm/memory.c
4358
pgoff_t hlen = ((pgoff_t)(holelen) + PAGE_SIZE - 1) >> PAGE_SHIFT;
mm/memory.c
4363
(holebegin + holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
mm/memory.c
4400
(vmf->address & PAGE_MASK) + PAGE_SIZE, NULL);
mm/memory.c
4555
addr = ALIGN_DOWN(vmf->address, nr_pages * PAGE_SIZE);
mm/memory.c
4556
idx = (vmf->address - addr) / PAGE_SIZE;
mm/memory.c
4653
addr = ALIGN_DOWN(vmf->address, PAGE_SIZE << order);
mm/memory.c
4664
addr = ALIGN_DOWN(vmf->address, PAGE_SIZE << order);
mm/memory.c
4898
unsigned long folio_start = address - idx * PAGE_SIZE;
mm/memory.c
4899
unsigned long folio_end = folio_start + nr * PAGE_SIZE;
mm/memory.c
5168
addr = ALIGN_DOWN(vmf->address, PAGE_SIZE << order);
mm/memory.c
5182
addr = ALIGN_DOWN(vmf->address, PAGE_SIZE << order);
mm/memory.c
5273
addr = ALIGN_DOWN(vmf->address, nr_pages * PAGE_SIZE);
mm/memory.c
5502
bool prefault = !in_range(vmf->address, addr, nr * PAGE_SIZE);
mm/memory.c
5592
file_end = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE);
mm/memory.c
5641
addr = vmf->address - idx * PAGE_SIZE;
mm/memory.c
5689
if (val / PAGE_SIZE > PTRS_PER_PTE)
mm/memory.c
5696
val = max(val, PAGE_SIZE);
mm/memory.c
6026
for (addr = start; addr != end; start_ptep++, addr += PAGE_SIZE) {
mm/memory.c
7012
offset = addr & (PAGE_SIZE-1);
mm/memory.c
7013
if (bytes > PAGE_SIZE-offset)
mm/memory.c
7014
bytes = PAGE_SIZE-offset;
mm/memory.c
7016
maddr = kmap_local_folio(folio, folio_page_idx(folio, page) * PAGE_SIZE);
mm/memory.c
7122
offset = addr & (PAGE_SIZE - 1);
mm/memory.c
7123
if (bytes > PAGE_SIZE - offset)
mm/memory.c
7124
bytes = PAGE_SIZE - offset;
mm/memory.c
7126
maddr = kmap_local_folio(folio, folio_page_idx(folio, page) * PAGE_SIZE);
mm/memory.c
7143
copy_from_user_page(vma, page, addr, buf, maddr + (PAGE_SIZE - 1), 1);
mm/memory.c
7252
n = (addr_hint - addr) / PAGE_SIZE;
mm/memory.c
7260
ret = process_subpage(addr + i * PAGE_SIZE, i, arg);
mm/memory.c
7271
ret = process_subpage(addr + i * PAGE_SIZE, i, arg);
mm/memory.c
7285
ret = process_subpage(addr + left_idx * PAGE_SIZE, left_idx, arg);
mm/memory.c
7289
ret = process_subpage(addr + right_idx * PAGE_SIZE, right_idx, arg);
mm/memory.c
7317
clear_user_highpages(page + i, addr + i * PAGE_SIZE, count);
mm/memory.c
7338
const long fault_idx = (addr_hint - base_addr) / PAGE_SIZE;
mm/memory.c
7359
const unsigned long addr = base_addr + r[i].start * PAGE_SIZE;
mm/memory.c
7384
addr + i*PAGE_SIZE, vma))
mm/memory.c
7430
unsigned long ret_val = nr_pages * PAGE_SIZE;
mm/memory.c
7438
rc = copy_from_user(kaddr, usr_src + i * PAGE_SIZE, PAGE_SIZE);
mm/memory.c
7443
ret_val -= (PAGE_SIZE - rc);
mm/memory_hotplug.c
1395
if (!IS_ALIGNED(vmemmap_size, PAGE_SIZE))
mm/memory_hotplug.c
400
VM_BUG_ON(!mhp_range_allowed(PFN_PHYS(pfn), nr_pages * PAGE_SIZE, false));
mm/mempolicy.c
1665
if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
mm/mempolicy.c
1705
if (copy > PAGE_SIZE)
mm/mempolicy.c
708
for (; addr != end; pte += nr, addr += nr * PAGE_SIZE) {
mm/mempool.c
100
__check_element(pool, addr, PAGE_SIZE << order);
mm/mempool.c
133
__poison_element(addr, PAGE_SIZE);
mm/mempool.c
139
__poison_element(addr, PAGE_SIZE << order);
mm/mempool.c
94
__check_element(pool, addr, PAGE_SIZE);
mm/migrate_device.c
1135
addr = migrate->start + i*PAGE_SIZE;
mm/migrate_device.c
1157
addr + j * PAGE_SIZE,
mm/migrate_device.c
1182
addr = migrate->start + i * PAGE_SIZE;
mm/migrate_device.c
225
migrate_vma_collect_skip(start + PAGE_SIZE, end, walk);
mm/migrate_device.c
275
ptep += (addr - start) / PAGE_SIZE;
mm/migrate_device.c
277
for (; addr < end; addr += PAGE_SIZE, ptep++) {
mm/migrate_device.c
28
for (addr = start; addr < end; addr += PAGE_SIZE) {
mm/migrate_device.c
62
return migrate_vma_collect_skip(start + PAGE_SIZE, end, walk);
mm/migrate_device.c
65
for (addr = start; addr < end; addr += PAGE_SIZE) {
mm/mincore.c
185
for (; addr != end; ptep += step, addr += step * PAGE_SIZE) {
mm/mincore.c
191
__mincore_unmapped_range(addr, addr + PAGE_SIZE,
mm/mincore.c
258
unsigned long pages = DIV_ROUND_UP(end - addr, PAGE_SIZE);
mm/mincore.c
327
retval = do_mincore(start, min(pages, PAGE_SIZE), tmp);
mm/mincore.c
51
for (; addr != end; vec++, addr += PAGE_SIZE)
mm/mlock.c
384
for (pte = start_pte; addr != end; pte++, addr += PAGE_SIZE) {
mm/mlock.c
797
locked = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
mm/mlock.c
822
dec_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_MEMLOCK, (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
mm/mm_init.c
1079
return VMEMMAP_RESERVE_NR * (PAGE_SIZE / sizeof(struct page));
mm/mm_init.c
1668
memmap_boot_pages_add(DIV_ROUND_UP(size, PAGE_SIZE));
mm/mm_init.c
2007
accept_memory(PFN_PHYS(pfn), nr_pages * PAGE_SIZE);
mm/mm_init.c
2402
if (PAGE_SIZE < SZ_1M)
mm/mm_init.c
2403
numentries = round_up(numentries, SZ_1M / PAGE_SIZE);
mm/mm_init.c
2421
if (unlikely((numentries * bucketsize) < PAGE_SIZE))
mm/mm_init.c
2422
numentries = PAGE_SIZE / bucketsize;
mm/mm_init.c
2464
} while (!table && size > PAGE_SIZE && --log2qty);
mm/mmap.c
189
next = vma_find(&vmi, newbrk + PAGE_SIZE + stack_guard_gap);
mm/mmap.c
190
if (next && newbrk + PAGE_SIZE > vm_start_gap(next))
mm/mmap.c
650
return PAGE_SIZE;
mm/mmap.c
766
info.low_limit = PAGE_SIZE;
mm/mmu_gather.c
177
VM_WARN_ON_ONCE(nr_pages != 1 && page_size != PAGE_SIZE);
mm/mmu_gather.c
211
PAGE_SIZE);
mm/mprotect.c
128
addr += idx * PAGE_SIZE;
mm/mprotect.c
138
tlb_flush_pte_range(tlb, addr, nr_ptes * PAGE_SIZE);
mm/mprotect.c
227
tlb_change_page_size(tlb, PAGE_SIZE);
mm/mprotect.c
381
} while (pte += nr_ptes, addr += nr_ptes * PAGE_SIZE, addr != end);
mm/mremap.c
265
for (; old_addr < old_end; old_ptep += nr_ptes, old_addr += nr_ptes * PAGE_SIZE,
mm/mremap.c
266
new_ptep += nr_ptes, new_addr += nr_ptes * PAGE_SIZE) {
mm/nommu.c
1082
pglen = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
mm/nommu.c
1100
rpglen = (rpglen + PAGE_SIZE - 1) >> PAGE_SHIFT;
mm/nommu.c
1813
high = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
mm/nommu.c
516
for (; from < to; from += PAGE_SIZE) {
mm/numa_emulation.c
451
phys_dist = memblock_alloc(phys_size, PAGE_SIZE);
mm/numa_memblks.c
66
numa_distance = memblock_alloc(size, PAGE_SIZE);
mm/oom_kill.c
232
mm_pgtables_bytes(p->mm) / PAGE_SIZE;
mm/page-writeback.c
2244
static const unsigned long dirty_bytes_min = 2 * PAGE_SIZE;
mm/page-writeback.c
2637
task_io_account_write(nr * PAGE_SIZE);
mm/page-writeback.c
2656
task_io_account_cancelled_write(nr * PAGE_SIZE);
mm/page-writeback.c
354
unsigned long ratio = (vm_dirty_ratio * PAGE_SIZE) / 100;
mm/page-writeback.c
355
unsigned long bg_ratio = (dirty_background_ratio * PAGE_SIZE) / 100;
mm/page-writeback.c
373
PAGE_SIZE);
mm/page-writeback.c
376
PAGE_SIZE);
mm/page-writeback.c
381
thresh = DIV_ROUND_UP(bytes, PAGE_SIZE);
mm/page-writeback.c
383
thresh = (ratio * available_memory) / PAGE_SIZE;
mm/page-writeback.c
386
bg_thresh = DIV_ROUND_UP(bg_bytes, PAGE_SIZE);
mm/page-writeback.c
388
bg_thresh = (bg_ratio * available_memory) / PAGE_SIZE;
mm/page-writeback.c
445
dirty = DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE) *
mm/page-writeback.c
498
if (DIV_ROUND_UP(dirty_background_bytes, PAGE_SIZE) >
mm/page-writeback.c
530
if (DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE) > UINT_MAX) {
mm/page-writeback.c
679
bytes = (dirty_thresh * PAGE_SIZE * ratio) / BDI_RATIO_SCALE / 100;
mm/page_alloc.c
1293
alloc_tag_add(&ref, task->alloc_tag, PAGE_SIZE * nr);
mm/page_alloc.c
1314
alloc_tag_sub(&ref, PAGE_SIZE * nr);
mm/page_alloc.c
1330
this_cpu_sub(tag->counters->bytes, PAGE_SIZE * nr);
mm/page_alloc.c
1439
PAGE_SIZE << order);
mm/page_alloc.c
1441
PAGE_SIZE << order);
mm/page_alloc.c
1656
accept_memory(page_to_phys(page), PAGE_SIZE << order);
mm/page_alloc.c
5405
unsigned long nr = DIV_ROUND_UP(size, PAGE_SIZE);
mm/page_alloc.c
5488
addr += PAGE_SIZE;
mm/page_alloc.c
5933
batch = min(zone_managed_pages(zone) >> 12, SZ_256K / PAGE_SIZE);
mm/page_alloc.c
6244
for (pos = start; pos < end; pos += PAGE_SIZE, pages++) {
mm/page_alloc.c
6262
memset(direct_map_addr, poison, PAGE_SIZE);
mm/page_alloc.c
6550
lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
mm/page_alloc.c
7630
return range_contains_unaccepted_memory(start, PAGE_SIZE << order);
mm/page_alloc.c
7642
accept_memory(page_to_phys(page), PAGE_SIZE << MAX_PAGE_ORDER);
mm/page_counter.c
287
*nr_pages = min(bytes / PAGE_SIZE, (u64)PAGE_COUNTER_MAX);
mm/page_ext.c
215
table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS),
mm/page_ext.c
221
memmap_boot_pages_add(DIV_ROUND_UP(table_size, PAGE_SIZE));
mm/page_ext.c
282
memmap_pages_add(DIV_ROUND_UP(size, PAGE_SIZE));
mm/page_ext.c
329
memmap_pages_add(-1L * (DIV_ROUND_UP(table_size, PAGE_SIZE)));
mm/page_frag_cache.c
119
size = PAGE_SIZE << encoded_page_decode_order(encoded_page);
mm/page_frag_cache.c
122
if (unlikely(fragsz > PAGE_SIZE)) {
mm/page_frag_cache.c
27
BUILD_BUG_ON(PAGE_FRAG_CACHE_PFMEMALLOC_BIT >= PAGE_SIZE);
mm/page_frag_cache.c
56
#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
mm/page_idle.c
72
referenced |= mmu_notifier_clear_young(vma->vm_mm, addr, addr + PAGE_SIZE);
mm/page_io.c
180
last_pos = PAGE_SIZE / sizeof(*data) - 1;
mm/page_io.c
182
data = kmap_local_folio(folio, i * PAGE_SIZE);
mm/page_io.c
95
blocks_per_page = PAGE_SIZE >> blkbits;
mm/page_owner.c
555
count = min_t(size_t, count, PAGE_SIZE);
mm/page_poison.c
28
memset(kasan_reset_tag(addr), PAGE_POISON, PAGE_SIZE);
mm/page_poison.c
87
check_poison_mem(page, kasan_reset_tag(addr), PAGE_SIZE);
mm/page_reporting.c
152
unsigned int page_len = PAGE_SIZE << order;
mm/page_table_check.c
155
page_table_check_clear(pte_pfn(pte), PAGE_SIZE >> PAGE_SHIFT);
mm/page_table_check.c
213
__page_table_check_pte_clear(mm, addr + PAGE_SIZE * i, ptep_get(ptep + i));
mm/page_table_check.c
280
addr += PAGE_SIZE;
mm/page_vma_mapped.c
305
pvmw->address += PAGE_SIZE;
mm/page_vma_mapped.c
309
if ((pvmw->address & (PMD_SIZE - PAGE_SIZE)) == 0) {
mm/pagewalk.c
1014
entry_size = PAGE_SIZE;
mm/pagewalk.c
40
err = ops->install_pte(addr, addr + PAGE_SIZE, &new_pte,
mm/pagewalk.c
50
err = ops->pte_entry(pte, addr, addr + PAGE_SIZE, walk);
mm/pagewalk.c
54
if (addr >= end - PAGE_SIZE)
mm/pagewalk.c
56
addr += PAGE_SIZE;
mm/percpu-internal.h
119
return chunk->nr_pages * PAGE_SIZE / PCPU_BITMAP_BLOCK_SIZE;
mm/percpu-internal.h
131
return pages * PAGE_SIZE / PCPU_MIN_ALLOC_SIZE;
mm/percpu.c
1087
*next_off = end * PAGE_SIZE / PCPU_MIN_ALLOC_SIZE;
mm/percpu.c
1363
region_size = ALIGN(start_offset + map_size, PAGE_SIZE);
mm/percpu.c
1472
chunk->free_bytes = chunk->nr_pages * PAGE_SIZE;
mm/percpu.c
1770
if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE ||
mm/percpu.c
2035
for (slot = pcpu_size_to_slot(PAGE_SIZE); slot <= pcpu_free_slot; slot++) {
mm/percpu.c
223
end_addr = chunk->base_addr + chunk->nr_pages * PAGE_SIZE -
mm/percpu.c
2425
ptr = memblock_alloc(PFN_ALIGN(ai_size), PAGE_SIZE);
mm/percpu.c
2602
PCPU_SETUP_BUG_ON(!(IS_ALIGNED(PCPU_BITMAP_BLOCK_SIZE, PAGE_SIZE) ||
mm/percpu.c
2603
IS_ALIGNED(PAGE_SIZE, PCPU_BITMAP_BLOCK_SIZE)));
mm/percpu.c
3131
#define P4D_TABLE_SIZE PAGE_SIZE
mm/percpu.c
3135
#define PUD_TABLE_SIZE PAGE_SIZE
mm/percpu.c
3139
#define PMD_TABLE_SIZE PAGE_SIZE
mm/percpu.c
3143
#define PTE_TABLE_SIZE PAGE_SIZE
mm/percpu.c
3206
snprintf(psize_str, sizeof(psize_str), "%luK", PAGE_SIZE >> 10);
mm/percpu.c
3208
ai = pcpu_build_alloc_info(reserved_size, 0, PAGE_SIZE, NULL);
mm/percpu.c
3233
ptr = pcpu_fc_alloc(cpu, PAGE_SIZE, PAGE_SIZE, cpu_to_nd_fn);
mm/percpu.c
3248
vm_area_register_early(&vm, PAGE_SIZE);
mm/percpu.c
3279
pcpu_fc_free(page_address(pages[j]), PAGE_SIZE);
mm/percpu.c
3315
PAGE_SIZE, NULL, NULL);
mm/percpu.c
3343
fc = memblock_alloc_from(unit_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
mm/percpu.c
509
if (size <= PAGE_SIZE)
mm/process_vm_access.c
114
bytes = pinned_pages * PAGE_SIZE - start_offset;
mm/process_vm_access.c
124
pa += pinned_pages * PAGE_SIZE;
mm/process_vm_access.c
176
/ PAGE_SIZE - (unsigned long)rvec[i].iov_base
mm/process_vm_access.c
177
/ PAGE_SIZE + 1;
mm/process_vm_access.c
188
process_pages = kmalloc(min_t(size_t, PVM_MAX_KMALLOC_PAGES * PAGE_SIZE,
mm/process_vm_access.c
36
size_t copy = PAGE_SIZE - offset;
mm/process_vm_access.c
59
#define PVM_MAX_USER_PAGES (PVM_MAX_KMALLOC_PAGES * PAGE_SIZE / sizeof(struct page *))
mm/process_vm_access.c
90
nr_pages = (addr + len - 1) / PAGE_SIZE - addr / PAGE_SIZE + 1;
mm/readahead.c
360
unsigned long this_chunk = (2 * 1024 * 1024) / PAGE_SIZE;
mm/readahead.c
776
new_index = new_start / PAGE_SIZE;
mm/readahead.c
810
new_nr_pages = DIV_ROUND_UP(new_len, PAGE_SIZE);
mm/rmap.c
2162
end_addr = address + nr_pages * PAGE_SIZE;
mm/rmap.c
2581
set_tlb_ubc_flush_pending(mm, pteval, address, address + PAGE_SIZE);
mm/rmap.c
2857
mm, addr, addr + PAGE_SIZE, owner);
mm/rmap.c
986
pvmw.address += (nr - 1) * PAGE_SIZE;
mm/secretmem.c
100
flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
mm/shmem.c
1113
pgoff_t start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT;
mm/shmem.c
1357
loff_t holebegin = round_up(newsize, PAGE_SIZE);
mm/shmem.c
1617
DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE));
mm/shmem.c
212
pages * VM_ACCT(PAGE_SIZE));
mm/shmem.c
218
vm_unacct_memory(pages * VM_ACCT(PAGE_SIZE));
mm/shmem.c
2579
DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE) <
mm/shmem.c
2848
hpage_size = PAGE_SIZE << order;
mm/shmem.c
2865
inflated_len = len + hpage_size - PAGE_SIZE;
mm/shmem.c
3240
PAGE_SIZE);
mm/shmem.c
3269
max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
mm/shmem.c
3423
fsize = PAGE_SIZE;
mm/shmem.c
3536
size = min_t(size_t, size, PAGE_SIZE - offset);
mm/shmem.c
3567
len = min_t(size_t, len, npages * PAGE_SIZE);
mm/shmem.c
3618
size = umin(size, PAGE_SIZE - offset);
mm/shmem.c
3707
loff_t unmap_start = round_up(offset, PAGE_SIZE);
mm/shmem.c
3708
loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1;
mm/shmem.c
3750
end = (offset + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
mm/shmem.c
3852
buf->f_bsize = PAGE_SIZE;
mm/shmem.c
4114
if (len > PAGE_SIZE)
mm/shmem.c
4622
ctx->blocks = DIV_ROUND_UP(size, PAGE_SIZE);
mm/shmem.c
5088
sb->s_blocksize = PAGE_SIZE;
mm/shmem.c
5647
static char str_dup[PAGE_SIZE] __initdata;
mm/shmem.c
5656
if (!str || strlen(str) + 1 > PAGE_SIZE)
mm/shmem.c
616
i_size = round_up(i_size, PAGE_SIZE);
mm/shmem.c
773
folio = filemap_get_entry(inode->i_mapping, i_size / PAGE_SIZE);
mm/shmem.c
785
end = shmem_fallocend(inode, DIV_ROUND_UP(i_size, PAGE_SIZE));
mm/show_mem.c
111
val->mem_unit = PAGE_SIZE;
mm/show_mem.c
83
val->mem_unit = PAGE_SIZE;
mm/slab.h
182
return PAGE_SIZE << slab_order(slab);
mm/slab.h
649
return PAGE_SIZE << large_kmalloc_order(page);
mm/slab_common.c
1336
((PAGE_SIZE - sizeof(struct kvfree_rcu_bulk_data)) / sizeof(void *))
mm/slab_common.c
647
if (object < (void *)PAGE_SIZE || !virt_addr_valid(object))
mm/slab_common.c
795
return PAGE_SIZE << get_order(size);
mm/slub.c
1177
min_t(unsigned int, s->object_size, PAGE_SIZE));
mm/slub.c
2515
size = PAGE_SIZE << order;
mm/slub.c
3438
PAGE_SIZE << order);
mm/slub.c
3452
-(PAGE_SIZE << order));
mm/slub.c
5208
PAGE_SIZE << order);
mm/slub.c
5224
trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << get_order(size),
mm/slub.c
5234
trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << get_order(size),
mm/slub.c
5250
PAGE_SIZE << get_order(size), flags, node);
mm/slub.c
577
return ((unsigned int)PAGE_SIZE << order) / size;
mm/slub.c
6406
-(PAGE_SIZE << order));
mm/slub.c
6708
if (size > PAGE_SIZE) {
mm/slub.c
6755
if (ret || size <= PAGE_SIZE)
mm/slub.c
7354
unsigned int slab_size = (unsigned int)PAGE_SIZE << order;
mm/slub.c
7635
if (s->size >= PAGE_SIZE)
mm/slub.c
9733
if (!alloc_loc_track(t, PAGE_SIZE / sizeof(struct location), GFP_KERNEL)) {
mm/sparse-vmemmap.c
164
p = vmemmap_alloc_block_buf(PAGE_SIZE, node, altmap);
mm/sparse-vmemmap.c
202
void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
mm/sparse-vmemmap.c
215
void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
mm/sparse-vmemmap.c
228
void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
mm/sparse-vmemmap.c
241
void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
mm/sparse-vmemmap.c
275
vmemmap_verify(pte, node, addr, addr + PAGE_SIZE);
mm/sparse-vmemmap.c
289
for (; addr < end; addr += PAGE_SIZE) {
mm/sparse-vmemmap.c
337
for (maddr = addr + headsize; maddr < end; maddr += PAGE_SIZE) {
mm/sparse-vmemmap.c
346
for (maddr = addr; headpages-- > 0; maddr += PAGE_SIZE) {
mm/sparse-vmemmap.c
350
memblock_phys_free(PFN_PHYS(pfn), PAGE_SIZE);
mm/sparse-vmemmap.c
375
for (maddr = addr + headsize; maddr < end; maddr += PAGE_SIZE) {
mm/sparse-vmemmap.c
392
for (maddr = addr; maddr < addr + headsize; maddr += PAGE_SIZE) {
mm/sparse-vmemmap.c
493
addr -= PAGE_SIZE;
mm/sparse-vmemmap.c
539
next = addr + PAGE_SIZE;
mm/sparse-vmemmap.c
548
next += PAGE_SIZE;
mm/sparse.c
431
__func__, size, PAGE_SIZE, nid, &addr);
mm/sparse.c
568
PAGE_SIZE));
mm/sparse.c
858
memmap_pages_add(-1L * (DIV_ROUND_UP(nr_pages * sizeof(struct page), PAGE_SIZE)));
mm/sparse.c
862
PAGE_SIZE)));
mm/sparse.c
909
memmap_pages_add(DIV_ROUND_UP(nr_pages * sizeof(struct page), PAGE_SIZE));
mm/swap_state.c
797
if (faddr == prev_faddr + PAGE_SIZE)
mm/swap_state.c
799
else if (prev_faddr == faddr + PAGE_SIZE)
mm/swap_state.c
800
left = faddr - (win << PAGE_SHIFT) + PAGE_SIZE;
mm/swap_state.c
847
for (addr = start; addr < end; ilx++, addr += PAGE_SIZE) {
mm/swap_table.h
14
#define SWP_TABLE_USE_PAGE (sizeof(struct swap_table) == PAGE_SIZE)
mm/swapfile.c
2253
} while (addr += PAGE_SIZE, addr != end);
mm/swapfile.c
3894
for (offset = 0; offset < si->max; offset += PAGE_SIZE) {
mm/truncate.c
254
split_at = folio_page(folio, PAGE_ALIGN_DOWN(offset) / PAGE_SIZE);
mm/truncate.c
267
PAGE_ALIGN_DOWN(offset + length) / PAGE_SIZE);
mm/truncate.c
387
start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT;
mm/truncate.c
782
loff_t holebegin = round_up(newsize, PAGE_SIZE);
mm/truncate.c
850
if (from >= to || bsize >= PAGE_SIZE)
mm/truncate.c
854
if (to <= rounded_from || !(rounded_from & (PAGE_SIZE - 1)))
mm/truncate.c
857
folio = filemap_lock_folio(inode->i_mapping, from / PAGE_SIZE);
mm/truncate.c
903
loff_t unmap_start = round_up(lstart, PAGE_SIZE);
mm/truncate.c
904
loff_t unmap_end = round_down(1 + lend, PAGE_SIZE) - 1;
mm/usercopy.c
172
if (n > PAGE_SIZE - offset)
mm/userfaultfd.c
1129
src_addr += PAGE_SIZE;
mm/userfaultfd.c
1132
dst_addr += PAGE_SIZE;
mm/userfaultfd.c
1211
return PAGE_SIZE;
mm/userfaultfd.c
1238
return PAGE_SIZE;
mm/userfaultfd.c
1325
ret = PAGE_SIZE;
mm/userfaultfd.c
158
max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
mm/userfaultfd.c
277
PAGE_SIZE);
mm/userfaultfd.c
833
PAGE_SIZE);
mm/userfaultfd.c
845
dst_addr += PAGE_SIZE;
mm/userfaultfd.c
846
src_addr += PAGE_SIZE;
mm/userfaultfd.c
847
copied += PAGE_SIZE;
mm/util.c
1042
ret = memcmp(addr1, addr2, PAGE_SIZE);
mm/vma.c
3107
address += PAGE_SIZE;
mm/vma_exec.c
140
vma->vm_start = vma->vm_end - PAGE_SIZE;
mm/vmalloc.c
101
unsigned long size = PAGE_SIZE;
mm/vmalloc.c
124
if (size != PAGE_SIZE) {
mm/vmalloc.c
1854
if (align <= PAGE_SIZE || (align > PAGE_SIZE && (vend - vstart) == size))
mm/vmalloc.c
1920
unsigned int idx = (size - 1) / PAGE_SIZE;
mm/vmalloc.c
2195
return log * (32UL * 1024 * 1024 / PAGE_SIZE);
mm/vmalloc.c
2570
#define VMALLOC_PAGES (VMALLOC_SPACE / PAGE_SIZE)
mm/vmalloc.c
2581
#define VMAP_BLOCK_SIZE (VMAP_BBMAP_BITS * PAGE_SIZE)
mm/vmalloc.c
2859
BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
mm/vmalloc.c
2915
BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
mm/vmalloc.c
3081
va = alloc_vmap_area(size, PAGE_SIZE,
mm/vmalloc.c
3226
size += PAGE_SIZE;
mm/vmalloc.c
3375
page_size = PAGE_SIZE << page_order;
mm/vmalloc.c
3611
area = get_vm_area_caller(count * PAGE_SIZE, VM_IOREMAP,
mm/vmalloc.c
3616
count * PAGE_SIZE, vmap_pfn_apply, &data)) {
mm/vmalloc.c
3622
(unsigned long)area->addr + count * PAGE_SIZE);
mm/vmalloc.c
371
unsigned long size = PAGE_SIZE;
mm/vmalloc.c
379
if (size != PAGE_SIZE) {
mm/vmalloc.c
3851
if (array_size > PAGE_SIZE) {
mm/vmalloc.c
3861
nr_small_pages * PAGE_SIZE, array_size);
mm/vmalloc.c
3904
area->nr_pages * PAGE_SIZE);
mm/vmalloc.c
3924
area->nr_pages * PAGE_SIZE);
mm/vmalloc.c
4430
num = min_t(size_t, remains, PAGE_SIZE);
mm/vmalloc.c
4458
length = PAGE_SIZE - offset;
mm/vmalloc.c
4749
uaddr += PAGE_SIZE;
mm/vmalloc.c
4750
kaddr += PAGE_SIZE;
mm/vmalloc.c
4751
size -= PAGE_SIZE;
mm/vmalloc.c
5372
vmap_zone_size = (1 << 4) * PAGE_SIZE;
mm/vmalloc.c
561
} while (pte++, addr += PAGE_SIZE, addr != end);
mm/vmscan.c
3527
for (i = pte_index(start), addr = start; addr != end; i++, addr += PAGE_SIZE) {
mm/vmscan.c
3563
if (i < PTRS_PER_PTE && get_next_vma(PMD_MASK, PAGE_SIZE, args, &start, &end))
mm/vmscan.c
4240
if (end - start == PAGE_SIZE)
mm/vmscan.c
4243
if (end - start > MIN_LRU_BATCH * PAGE_SIZE) {
mm/vmscan.c
4244
if (addr - start < MIN_LRU_BATCH * PAGE_SIZE / 2)
mm/vmscan.c
4245
end = start + MIN_LRU_BATCH * PAGE_SIZE;
mm/vmscan.c
4246
else if (end - addr < MIN_LRU_BATCH * PAGE_SIZE / 2)
mm/vmscan.c
4247
start = end - MIN_LRU_BATCH * PAGE_SIZE;
mm/vmscan.c
4249
start = addr - MIN_LRU_BATCH * PAGE_SIZE / 2;
mm/vmscan.c
4250
end = addr + MIN_LRU_BATCH * PAGE_SIZE / 2;
mm/vmscan.c
4256
pte -= (addr - start) / PAGE_SIZE;
mm/vmscan.c
4258
for (i = 0, addr = start; addr != end; i++, addr += PAGE_SIZE) {
mm/vmscan.c
7763
nr_to_reclaim = memparse(buf, &buf) / PAGE_SIZE;
mm/vmstat.c
391
VM_WARN_ON_ONCE(delta & (PAGE_SIZE - 1));
mm/vmstat.c
633
VM_WARN_ON_ONCE(delta & (PAGE_SIZE - 1));
mm/zsmalloc.c
105
#define ZS_MAX_ALLOC_SIZE PAGE_SIZE
mm/zsmalloc.c
1063
if (off + mem_len <= PAGE_SIZE) {
mm/zsmalloc.c
1071
sizes[0] = PAGE_SIZE - off;
mm/zsmalloc.c
1105
if (off + mem_len <= PAGE_SIZE) {
mm/zsmalloc.c
1139
if (off + mem_len <= PAGE_SIZE) {
mm/zsmalloc.c
1147
sizes[0] = PAGE_SIZE - off;
mm/zsmalloc.c
120
#define ZS_SIZE_CLASS_DELTA (PAGE_SIZE >> CLASS_BITS)
mm/zsmalloc.c
1201
if (off + mem_len <= PAGE_SIZE) {
mm/zsmalloc.c
1211
sizes[0] = PAGE_SIZE - off;
mm/zsmalloc.c
1437
if (s_off + class->size > PAGE_SIZE)
mm/zsmalloc.c
1438
s_size = PAGE_SIZE - s_off;
mm/zsmalloc.c
1440
if (d_off + class->size > PAGE_SIZE)
mm/zsmalloc.c
1441
d_size = PAGE_SIZE - d_off;
mm/zsmalloc.c
1466
if (s_off >= PAGE_SIZE) {
mm/zsmalloc.c
1476
if (d_off >= PAGE_SIZE) {
mm/zsmalloc.c
1504
while (offset < PAGE_SIZE) {
mm/zsmalloc.c
1746
for (addr = s_addr + offset; addr < s_addr + PAGE_SIZE;
mm/zsmalloc.c
2039
waste = (i * PAGE_SIZE) % class_size;
mm/zsmalloc.c
2092
objs_per_zspage = pages_per_zspage * PAGE_SIZE / size;
mm/zsmalloc.c
440
BUILD_BUG_ON(PAGE_SIZE > SZ_16M);
mm/zsmalloc.c
868
while ((off += class->size) < PAGE_SIZE) {
mm/zsmalloc.c
890
off %= PAGE_SIZE;
mm/zswap.c
1457
if (entry->length == PAGE_SIZE)
mm/zswap.c
1715
*val = zswap_total_pages() * PAGE_SIZE;
mm/zswap.c
726
if (entry->length == PAGE_SIZE)
mm/zswap.c
744
buffer = kmalloc_node(PAGE_SIZE, GFP_KERNEL, cpu_to_node(cpu));
mm/zswap.c
859
unsigned int dlen = PAGE_SIZE;
mm/zswap.c
868
sg_set_page(&input, page, PAGE_SIZE, 0);
mm/zswap.c
870
sg_init_one(&output, dst, PAGE_SIZE);
mm/zswap.c
871
acomp_request_set_params(acomp_ctx->req, &input, &output, PAGE_SIZE, dlen);
mm/zswap.c
895
if (comp_ret || !dlen || dlen >= PAGE_SIZE) {
mm/zswap.c
902
dlen = PAGE_SIZE;
mm/zswap.c
944
if (entry->length == PAGE_SIZE) {
mm/zswap.c
947
WARN_ON_ONCE(input->length != PAGE_SIZE);
mm/zswap.c
950
memcpy_from_sglist(dst, input, 0, PAGE_SIZE);
mm/zswap.c
951
dlen = PAGE_SIZE;
mm/zswap.c
956
sg_set_folio(&output, folio, PAGE_SIZE, 0);
mm/zswap.c
958
entry->length, PAGE_SIZE);
mm/zswap.c
967
if (!ret && dlen == PAGE_SIZE)
net/9p/trans_virtio.c
230
s = PAGE_SIZE - data_off;
net/9p/trans_virtio.c
337
nr_pages = DIV_ROUND_UP(n + *offs, PAGE_SIZE);
net/9p/trans_virtio.c
358
nr_pages = DIV_ROUND_UP((unsigned long)p + len, PAGE_SIZE) -
net/9p/trans_virtio.c
359
(unsigned long)p / PAGE_SIZE;
net/9p/trans_virtio.c
372
p += PAGE_SIZE;
net/9p/trans_virtio.c
398
n = PAGE_SIZE - offs;
net/9p/trans_virtio.c
444
out_nr_pages = DIV_ROUND_UP(n + offs, PAGE_SIZE);
net/9p/trans_virtio.c
463
in_nr_pages = DIV_ROUND_UP(n + offs, PAGE_SIZE);
net/9p/trans_virtio.c
802
.maxsize = PAGE_SIZE * (VIRTQUEUE_NUM - 3),
net/9p/trans_virtio.c
93
return PAGE_SIZE - offset_in_page(data);
net/atm/atm_sysfs.c
19
return scnprintf(buf, PAGE_SIZE, "%s\n", adev->type);
net/atm/atm_sysfs.c
27
return scnprintf(buf, PAGE_SIZE, "%pM\n", adev->esi);
net/atm/atm_sysfs.c
40
count += scnprintf(buf + count, PAGE_SIZE - count,
net/atm/atm_sysfs.c
58
return scnprintf(buf, PAGE_SIZE, "%d\n", adev->number);
net/atm/atm_sysfs.c
66
return scnprintf(buf, PAGE_SIZE, "%d\n",
net/atm/atm_sysfs.c
90
return scnprintf(buf, PAGE_SIZE, "%d\n", link_rate);
net/atm/mpoa_proc.c
215
if (nbytes >= PAGE_SIZE)
net/atm/mpoa_proc.c
216
nbytes = PAGE_SIZE-1;
net/bluetooth/hci_conn.c
2688
if (!req.conn_num || req.conn_num > (PAGE_SIZE * 2) / sizeof(*ci))
net/bluetooth/hci_core.c
797
if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
net/bluetooth/rfcomm/tty.c
510
if (!dev_num || dev_num > (PAGE_SIZE * 4) / sizeof(*di))
net/bpf/bpf_dummy_struct_ops.c
186
err = arch_protect_bpf_trampoline(image, PAGE_SIZE);
net/bpf/test_run.c
1047
linear_sz = min_t(u32, linear_sz, PAGE_SIZE - headroom - tailroom);
net/bpf/test_run.c
1096
PAGE_SIZE);
net/bpf/test_run.c
1105
skb->truesize += PAGE_SIZE;
net/bpf/test_run.c
116
#define TEST_XDP_FRAME_SIZE (PAGE_SIZE - sizeof(struct xdp_page_head))
net/bpf/test_run.c
1330
max_linear_sz = PAGE_SIZE - headroom - tailroom;
net/bpf/test_run.c
1347
rxqueue->xdp_rxq.frag_size = PAGE_SIZE;
net/bpf/test_run.c
1379
PAGE_SIZE);
net/bpf/test_run.c
657
if (user_size > PAGE_SIZE - headroom - tailroom)
net/bridge/br_ioctl.c
64
if (maxnum > PAGE_SIZE/sizeof(struct __fdb_entry))
net/bridge/br_ioctl.c
65
maxnum = PAGE_SIZE/sizeof(struct __fdb_entry);
net/ceph/cls_lock_client.c
108
if (unlock_op_buf_size > PAGE_SIZE)
net/ceph/cls_lock_client.c
161
if (break_op_buf_size > PAGE_SIZE)
net/ceph/cls_lock_client.c
210
if (cookie_op_buf_size > PAGE_SIZE)
net/ceph/cls_lock_client.c
343
size_t reply_len = PAGE_SIZE;
net/ceph/cls_lock_client.c
349
if (get_info_op_buf_size > PAGE_SIZE)
net/ceph/cls_lock_client.c
404
if (assert_op_buf_size > PAGE_SIZE)
net/ceph/cls_lock_client.c
49
if (lock_op_buf_size > PAGE_SIZE)
net/ceph/crypto.c
224
chunk_len = PAGE_SIZE;
net/ceph/messenger.c
1116
BUG_ON(*page_offset + *length > PAGE_SIZE);
net/ceph/messenger.c
861
BUG_ON(cursor->page_offset >= PAGE_SIZE);
net/ceph/messenger.c
864
*length = min_t(size_t, cursor->resid, PAGE_SIZE - *page_offset);
net/ceph/messenger.c
873
BUG_ON(cursor->page_offset + bytes > PAGE_SIZE);
net/ceph/messenger.c
937
*length = min_t(size_t, cursor->resid, PAGE_SIZE - *page_offset);
net/ceph/messenger.c
953
BUG_ON((cursor->offset & ~PAGE_MASK) + bytes > PAGE_SIZE);
net/ceph/messenger.c
993
len = iov_iter_get_pages2(&cursor->iov_iter, &page, PAGE_SIZE,
net/ceph/messenger_v1.c
47
BUG_ON(page_offset + length > PAGE_SIZE);
net/ceph/messenger_v1.c
529
size_t size = min(con->v1.out_skip, (int)PAGE_SIZE);
net/ceph/messenger_v2.c
1082
int len = min_t(int, ret, PAGE_SIZE - soff);
net/ceph/messenger_v2.c
2059
min(con->v2.in_enc_resid, (int)PAGE_SIZE), 0);
net/ceph/messenger_v2.c
291
min(con->v2.out_zero, (int)PAGE_SIZE), 0);
net/ceph/messenger_v2.c
3218
min(con->v2.out_enc_resid, (int)PAGE_SIZE), 0);
net/ceph/messenger_v2.c
3432
len = min(zero_len, (int)PAGE_SIZE);
net/ceph/messenger_v2.c
443
PAGE_SIZE
net/ceph/messenger_v2.c
901
len = min_t(int, end - p, PAGE_SIZE);
net/ceph/messenger_v2.c
960
int len = min(resid, (int)PAGE_SIZE - off);
net/ceph/osd_client.c
3166
lreq->notify_id_pages, PAGE_SIZE, 0, false, false);
net/ceph/osd_client.c
5081
pages, PAGE_SIZE, 0, false, true);
net/ceph/osd_client.c
5138
if (req_len > PAGE_SIZE)
net/ceph/osd_client.c
5230
PAGE_SIZE, CEPH_OSD_SLAB_OPS, 10, "osd_op");
net/ceph/osd_client.c
5234
PAGE_SIZE, CEPH_OSD_SLAB_OPS, 10,
net/ceph/osd_client.c
5324
end = p + PAGE_SIZE;
net/ceph/osd_client.c
5329
op->indata_len = PAGE_SIZE - (end - p);
net/ceph/pagelist.c
107
space = (space + PAGE_SIZE - 1) >> PAGE_SHIFT; /* conv to num pages */
net/ceph/pagelist.c
67
pl->room += PAGE_SIZE;
net/ceph/pagevec.c
102
while (len >= PAGE_SIZE) {
net/ceph/pagevec.c
104
zero_user_segment(pages[i], 0, PAGE_SIZE);
net/ceph/pagevec.c
105
len -= PAGE_SIZE;
net/ceph/pagevec.c
67
size_t l = min_t(size_t, PAGE_SIZE-po, left);
net/ceph/pagevec.c
73
if (po == PAGE_SIZE) {
net/ceph/pagevec.c
95
int end = min((int)PAGE_SIZE, off + len);
net/core/datagram.c
668
int size = min_t(int, copied, PAGE_SIZE - start);
net/core/datagram.c
737
size, PAGE_SIZE);
net/core/dev.c
13157
#define SYSTEM_PERCPU_PAGE_POOL_SIZE ((1 << 20) / PAGE_SIZE)
net/core/dev.c
1362
const int max_netdevices = 8*PAGE_SIZE;
net/core/devmem.c
102
index = offset / PAGE_SIZE;
net/core/devmem.c
118
PAGE_SIZE)))
net/core/devmem.c
121
gen_pool_free(binding->chunk_pool, dma_addr, PAGE_SIZE);
net/core/devmem.c
245
dmabuf->size / PAGE_SIZE);
net/core/devmem.c
279
owner->area.num_niovs = len / PAGE_SIZE;
net/core/devmem.c
305
binding->tx_vec[owner->area.base_virtual / PAGE_SIZE + i] = niov;
net/core/devmem.c
424
*off = virt_addr % PAGE_SIZE;
net/core/devmem.c
425
*size = PAGE_SIZE - *off;
net/core/devmem.c
427
return binding->tx_vec[virt_addr / PAGE_SIZE];
net/core/devmem.c
96
dma_addr = gen_pool_alloc_owner(binding->chunk_pool, PAGE_SIZE,
net/core/hwbm.c
15
if (likely(bm_pool->frag_size <= PAGE_SIZE))
net/core/hwbm.c
28
if (likely(frag_size <= PAGE_SIZE))
net/core/net-sysfs.c
1760
return len < PAGE_SIZE ? len : -EINVAL;
net/core/net-sysfs.c
978
return len < PAGE_SIZE ? len : -EINVAL;
net/core/net_test.c
178
KUNIT_ASSERT_LE(test, pg_off, PAGE_SIZE);
net/core/page_pool.c
1057
unsigned int max_size = PAGE_SIZE << pool->p.order;
net/core/page_pool.c
536
(PAGE_SIZE << pool->p.order), pool->p.dma_dir,
net/core/page_pool.c
559
PAGE_SIZE << pool->p.order, pool->p.dma_dir,
net/core/page_pool.c
743
PAGE_SIZE << pool->p.order, pool->p.dma_dir,
net/core/page_pool_user.c
241
refsz = PAGE_SIZE << pool->p.order;
net/core/pktgen.c
2839
len = datalen - frags * PAGE_SIZE;
net/core/pktgen.c
2842
datalen = frags * PAGE_SIZE;
net/core/pktgen.c
2846
frag_len = min_t(int, datalen / frags, PAGE_SIZE);
net/core/pktgen.c
2863
min(datalen, PAGE_SIZE));
net/core/skbuff.c
2018
while ((PAGE_SIZE << order) * MAX_SKB_FRAGS < __skb_pagelen(skb))
net/core/skbuff.c
2020
psize = (PAGE_SIZE << order);
net/core/skbuff.c
3209
(unsigned long) skb->data & (PAGE_SIZE - 1),
net/core/skbuff.c
4470
PAGE_SIZE - pg_off);
net/core/skbuff.c
6756
if (unlikely(data_len > MAX_SKB_FRAGS * (PAGE_SIZE << order)))
net/core/skbuff.c
6767
while (order && PAGE_ALIGN(data_len) < (PAGE_SIZE << order))
net/core/skbuff.c
6785
PAGE_SIZE << order);
net/core/skbuff.c
6788
skb->truesize += (PAGE_SIZE << order);
net/core/skbuff.c
7374
size_t part = min_t(size_t, PAGE_SIZE - off, len);
net/core/skbuff.c
773
len > SKB_WITH_OVERHEAD(PAGE_SIZE) ||
net/core/skbuff.c
851
len > SKB_WITH_OVERHEAD(PAGE_SIZE) ||
net/core/skbuff.c
954
max_head_size = SKB_WITH_OVERHEAD(PAGE_SIZE - headroom);
net/core/skbuff.c
955
if (skb->len > max_head_size + MAX_SKB_FRAGS * PAGE_SIZE)
net/core/skbuff.c
990
size = min_t(u32, len, PAGE_SIZE);
net/core/skmsg.c
339
use = min_t(int, copied, PAGE_SIZE - offset);
net/core/sock.c
1022
bytes = round_down(bytes, PAGE_SIZE);
net/core/sock.c
3151
pfrag->size = PAGE_SIZE << SKB_FRAG_PAGE_ORDER;
net/core/sock.c
3157
pfrag->size = PAGE_SIZE;
net/core/xdp.c
588
if (sizeof(*xdpf) + totsize > PAGE_SIZE)
net/core/xdp.c
607
xdpf->frame_sz = PAGE_SIZE;
net/core/xdp.c
874
if (unlikely(totalsize > PAGE_SIZE))
net/core/xdp.c
885
nxdpf->frame_sz = PAGE_SIZE;
net/ethtool/ioctl.c
1992
data = kzalloc(PAGE_SIZE, GFP_USER);
net/ethtool/ioctl.c
1998
eeprom.len = min(bytes_remaining, (u32)PAGE_SIZE);
net/ethtool/ioctl.c
2061
data = kzalloc(PAGE_SIZE, GFP_USER);
net/ethtool/ioctl.c
2067
eeprom.len = min(bytes_remaining, (u32)PAGE_SIZE);
net/ipv4/esp4.c
422
if (ALIGN(tailen, L1_CACHE_BYTES) > PAGE_SIZE ||
net/ipv4/esp4.c
423
ALIGN(skb->data_len, L1_CACHE_BYTES) > PAGE_SIZE)
net/ipv4/fib_trie.c
326
if (size <= PAGE_SIZE)
net/ipv4/inet_hashtables.c
1312
nblocks = max(nblocks, num_online_nodes() * PAGE_SIZE / locksz);
net/ipv4/tcp.c
1937
if (skb_frag_size(frag) != PAGE_SIZE || skb_frag_off(frag))
net/ipv4/tcp.c
2107
(pages_remaining * PAGE_SIZE); /* Failed map. */
net/ipv4/tcp.c
2120
bytes_mapped = PAGE_SIZE * (leftover_pages - pages_remaining);
net/ipv4/tcp.c
2130
const int bytes_not_mapped = PAGE_SIZE * pages_remaining;
net/ipv4/tcp.c
2154
bytes_mapped = PAGE_SIZE * pages_mapped;
net/ipv4/tcp.c
2245
if (address & (PAGE_SIZE - 1) || address != zc->address)
net/ipv4/tcp.c
2256
if (inq < PAGE_SIZE) {
net/ipv4/tcp.c
2270
total_bytes_to_map = avail_len & ~(PAGE_SIZE - 1);
net/ipv4/tcp.c
2282
while (length + PAGE_SIZE <= zc->length) {
net/ipv4/tcp.c
2286
if (zc->recv_skip_hint < PAGE_SIZE) {
net/ipv4/tcp.c
2323
length += PAGE_SIZE;
net/ipv4/tcp.c
2324
zc->recv_skip_hint -= PAGE_SIZE;
net/ipv4/tcp.c
2327
zc->recv_skip_hint < PAGE_SIZE) {
net/ipv4/tcp.c
5347
init_net.ipv4.sysctl_tcp_wmem[0] = PAGE_SIZE;
net/ipv4/tcp.c
5351
init_net.ipv4.sysctl_tcp_rmem[0] = PAGE_SIZE;
net/ipv4/tcp_input.c
5579
if (size > PAGE_SIZE) {
net/ipv4/tcp_input.c
5926
end - start >= SKB_WITH_OVERHEAD(PAGE_SIZE)) {
net/ipv4/tcp_output.c
4226
!skb_page_frag_refill(min_t(size_t, space, PAGE_SIZE),
net/ipv4/tcp_sigpool.c
30
#define CPOOL_SIZE (PAGE_SIZE / sizeof(struct sigpool_entry))
net/ipv4/udp.c
1655
amt = (size + sk->sk_forward_alloc - partial) & ~(PAGE_SIZE - 1);
net/ipv4/udp.c
3853
net->ipv4.sysctl_udp_rmem_min = PAGE_SIZE;
net/ipv4/udp.c
3854
net->ipv4.sysctl_udp_wmem_min = PAGE_SIZE;
net/ipv6/esp6.c
451
if (ALIGN(tailen, L1_CACHE_BYTES) > PAGE_SIZE ||
net/ipv6/esp6.c
452
ALIGN(skb->data_len, L1_CACHE_BYTES) > PAGE_SIZE)
net/ipv6/mcast.c
1774
size = min_t(int, mtu, PAGE_SIZE / 2) + hlen + tlen;
net/iucv/af_iucv.c
1115
if (len < PAGE_SIZE) {
net/iucv/af_iucv.c
1120
linear = PAGE_SIZE - headroom;
net/iucv/af_iucv.c
985
linear = min(len, PAGE_SIZE - headroom);
net/iucv/af_iucv.c
987
if (len < PAGE_SIZE) {
net/iucv/af_iucv.c
995
linear = PAGE_SIZE - headroom;
net/key/af_key.c
452
if (len > PAGE_SIZE)
net/mac80211/debugfs_sta.c
1044
size_t buf_sz = PAGE_SIZE;
net/mac80211/debugfs_sta.c
671
size_t buf_sz = PAGE_SIZE;
net/mptcp/protocol.c
2536
fwd_remaining = PAGE_SIZE - subflow->lent_mem_frag;
net/mptcp/protocol.h
678
int frag = (subflow->lent_mem_frag + size) & (PAGE_SIZE - 1);
net/netfilter/nf_conntrack_core.c
2533
nr_slots = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_nulls_head));
net/netfilter/nf_conntrack_core.c
2649
nr_pages > (4 * (1024 * 1024 * 1024 / PAGE_SIZE)))
net/netfilter/nf_conntrack_core.c
2651
else if (nr_pages > (1024 * 1024 * 1024 / PAGE_SIZE))
net/netfilter/nf_tables_api.c
4780
if (tmp < min || tmp >= min + BITS_PER_BYTE * PAGE_SIZE)
net/netfilter/nf_tables_api.c
4786
n = find_first_zero_bit(inuse, BITS_PER_BYTE * PAGE_SIZE);
net/netfilter/nf_tables_api.c
4787
if (n >= BITS_PER_BYTE * PAGE_SIZE) {
net/netfilter/nf_tables_api.c
4788
min += BITS_PER_BYTE * PAGE_SIZE;
net/netfilter/nf_tables_api.c
4789
memset(inuse, 0, PAGE_SIZE);
net/netfilter/nft_set_pipapo.c
625
const unsigned int extra = PAGE_SIZE / sizeof(*new_mt);
net/netfilter/x_tables.c
1356
if (size > PAGE_SIZE)
net/netfilter/xt_hashlimit.c
291
if (nr_pages > 1024 * 1024 * 1024 / PAGE_SIZE)
net/netlink/af_netlink.c
1183
if (head_size <= PAGE_SIZE || broadcast)
net/openvswitch/meter.c
740
free_mem_bytes = nr_free_buffer_pages() * (PAGE_SIZE >> 5);
net/packet/af_packet.c
2474
for (start = h.raw; start < end; start += PAGE_SIZE)
net/packet/af_packet.c
2615
len_max = PAGE_SIZE - offset;
net/packet/af_packet.c
2639
len_max = PAGE_SIZE;
net/packet/af_packet.c
2921
if (prepad + len < PAGE_SIZE || !linear)
net/packet/af_packet.c
2924
if (len - linear > MAX_SKB_FRAGS * (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER))
net/packet/af_packet.c
2925
linear = len - MAX_SKB_FRAGS * (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER);
net/packet/af_packet.c
4373
buffer = vzalloc(array_size((1 << order), PAGE_SIZE));
net/packet/af_packet.c
4548
rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE;
net/packet/af_packet.c
4600
* PAGE_SIZE;
net/packet/af_packet.c
4626
start += PAGE_SIZE;
net/packet/af_packet.c
4627
kaddr += PAGE_SIZE;
net/packet/af_packet.c
747
start += PAGE_SIZE;
net/packet/af_packet.c
750
for (; start < end; start += PAGE_SIZE)
net/rds/ib_frmr.c
135
&off, PAGE_SIZE);
net/rds/ib_recv.c
1064
rds_ib_sysctl_max_recv_allocation = si.totalram / 3 * PAGE_SIZE / RDS_FRAG_SIZE;
net/rds/ib_recv.c
814
to_copy = min(RDS_FRAG_SIZE - frag_off, PAGE_SIZE - map_off);
net/rds/ib_recv.c
832
if (map_off == PAGE_SIZE) {
net/rds/info.c
124
this = min(bytes, PAGE_SIZE - iter->offset);
net/rds/info.c
136
if (iter->offset == PAGE_SIZE) {
net/rds/info.c
178
if (len < 0 || len > INT_MAX - PAGE_SIZE + 1 || start + len < start) {
net/rds/info.c
216
iter.offset = start & (PAGE_SIZE - 1);
net/rds/message.c
378
int num_sgs = DIV_ROUND_UP(total_len, PAGE_SIZE);
net/rds/message.c
387
rm->data.op_nents = DIV_ROUND_UP(total_len, PAGE_SIZE);
net/rds/message.c
398
PAGE_SIZE, 0);
net/rds/message.c
433
copied = iov_iter_get_pages2(from, &pages, PAGE_SIZE,
net/rds/page.c
104
if (rem->r_page && bytes <= (PAGE_SIZE - rem->r_offset)) {
net/rds/page.c
112
if (rem->r_offset >= PAGE_SIZE) {
net/rds/page.c
80
if (bytes >= PAGE_SIZE) {
net/rds/page.c
85
sg_set_page(scat, page, PAGE_SIZE, 0);
net/rds/page.c
97
if (rem->r_page && bytes > (PAGE_SIZE - rem->r_offset)) {
net/rds/rdma.c
282
sg_set_page(&sg[i], pages[i], PAGE_SIZE, 0);
net/rds/rdma.c
61
return ((vec->addr + vec->bytes + PAGE_SIZE - 1) >> PAGE_SHIFT) -
net/rds/rdma.c
769
min_t(unsigned int, iov->bytes, PAGE_SIZE - offset),
net/rds/rds.h
60
#define RDS_CONG_MAP_PAGES (PAGE_ALIGN(RDS_CONG_MAP_BYTES) / PAGE_SIZE)
net/rds/rds.h
61
#define RDS_CONG_MAP_PAGE_BITS (PAGE_SIZE * 8)
net/rds/send.c
1146
int num_sgs = DIV_ROUND_UP(payload_len, PAGE_SIZE);
net/rds/tcp_recv.c
127
to_copy = min_t(unsigned int, PAGE_SIZE - map_off,
net/rds/tcp_recv.c
140
if (map_off == PAGE_SIZE) {
net/rxrpc/key.c
625
if (optlen <= 0 || optlen > PAGE_SIZE - 1 || rx->key)
net/rxrpc/output.c
333
size_t part = umin(probe_mtu - len, PAGE_SIZE);
net/rxrpc/rxperf.c
523
len = umin(reply_len, PAGE_SIZE);
net/rxrpc/server_key.c
131
if (optlen <= 0 || optlen > PAGE_SIZE - 1)
net/sctp/protocol.c
1509
sysctl_sctp_rmem[0] = PAGE_SIZE; /* give each asoc 1 page min */
net/sctp/protocol.c
1513
sysctl_sctp_wmem[0] = PAGE_SIZE;
net/sctp/protocol.c
1571
num_entries = (1UL << order) * PAGE_SIZE /
net/smc/smc_core.c
2096
compressed = min_t(u8, compressed, ilog2((SG_MAX_SINGLE_ALLOC * PAGE_SIZE) >> 14));
net/smc/smc_core.c
2154
nents = PAGE_ALIGN(buf_size + offset) / PAGE_SIZE;
net/smc/smc_core.c
2166
size = min_t(int, PAGE_SIZE - offset, buf_size);
net/smc/smc_core.c
2347
buf_desc->cpu_addr = vzalloc(PAGE_SIZE << buf_desc->order);
net/smc/smc_ib.c
709
&offset, PAGE_SIZE);
net/smc/smc_ib.c
855
if (SMC_MAX_CQE + 2 > (0x00000001 << smc_order) * PAGE_SIZE)
net/smc/smc_ib.c
856
cqattr.cqe = (0x00000001 << smc_order) * PAGE_SIZE - 2;
net/smc/smc_rx.c
169
PAGE_ALIGN(len + offset) / PAGE_SIZE : 1;
net/smc/smc_rx.c
200
size = min_t(int, PAGE_SIZE - offset, left);
net/smc/smc_rx.c
223
for (i = 0; i < PAGE_ALIGN(bytes + offset) / PAGE_SIZE; i++)
net/sunrpc/auth_gss/gss_rpc_upcall.c
216
arg->npages = DIV_ROUND_UP(NGROUPS_MAX * 4, PAGE_SIZE);
net/sunrpc/auth_gss/gss_rpc_xdr.c
829
PAGE_SIZE/2 /* pretty arbitrary */,
net/sunrpc/auth_gss/gss_rpc_xdr.c
830
arg->pages, 0 /* page base */, arg->npages * PAGE_SIZE);
net/sunrpc/auth_gss/svcauth_gss.c
1071
pages = DIV_ROUND_UP(inlen, PAGE_SIZE);
net/sunrpc/auth_gss/svcauth_gss.c
1099
min_t(unsigned int, PAGE_SIZE - pgto_offs,
net/sunrpc/auth_gss/svcauth_gss.c
1100
PAGE_SIZE - pgfrom_offs));
net/sunrpc/auth_gss/svcauth_gss.c
1877
if (tail->iov_base >= head->iov_base + PAGE_SIZE)
net/sunrpc/auth_gss/svcauth_gss.c
1882
+ 2 * RPC_MAX_AUTH_SIZE > PAGE_SIZE)
net/sunrpc/auth_gss/svcauth_gss.c
1896
if (head->iov_len + 2 * RPC_MAX_AUTH_SIZE > PAGE_SIZE)
net/sunrpc/backchannel_rqst.c
108
req->rq_rcv_buf.len = PAGE_SIZE;
net/sunrpc/backchannel_rqst.c
308
req->rq_rcv_buf.len = PAGE_SIZE;
net/sunrpc/backchannel_rqst.c
71
buf->head[0].iov_len = PAGE_SIZE;
net/sunrpc/backchannel_rqst.c
77
buf->buflen = PAGE_SIZE;
net/sunrpc/backchannel_rqst.c
87
xdr_buf_init(buf, page_address(page), PAGE_SIZE);
net/sunrpc/cache.c
1240
buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
net/sunrpc/cache.c
588
#define DFR_HASHSIZE (PAGE_SIZE/sizeof(struct list_head))
net/sunrpc/cache.c
828
int len = PAGE_SIZE;
net/sunrpc/cache.c
833
return PAGE_SIZE - len;
net/sunrpc/rpc_pipe.c
1125
sb->s_blocksize = PAGE_SIZE;
net/sunrpc/socklib.c
83
unsigned int len = min(PAGE_SIZE - poff, pglen);
net/sunrpc/svc.c
1606
rqstp->rq_res.buflen = PAGE_SIZE;
net/sunrpc/svc.c
1796
len = min_t(size_t, remaining, PAGE_SIZE);
net/sunrpc/svc.c
500
serv->sv_max_mesg = roundup(serv->sv_max_payload + PAGE_SIZE, PAGE_SIZE);
net/sunrpc/svc_xprt.c
678
arg->head[0].iov_len = PAGE_SIZE;
net/sunrpc/svc_xprt.c
682
arg->page_len = (pages-2)*PAGE_SIZE;
net/sunrpc/svc_xprt.c
683
arg->len = (pages-1)*PAGE_SIZE;
net/sunrpc/svcsock.c
1013
npages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
net/sunrpc/svcsock.c
1027
npages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
net/sunrpc/svcsock.c
352
for (i = 0, t = 0; t < buflen; i++, t += PAGE_SIZE)
net/sunrpc/svcsock.c
353
bvec_set_page(&bvec[i], rqstp->rq_pages[i], PAGE_SIZE, 0);
net/sunrpc/svcsock.c
684
DIV_ROUND_UP(rqstp->rq_arg.page_len, PAGE_SIZE);
net/sunrpc/svcsock.c
79
SUNRPC_MAX_UDP_SENDPAGES = 1 + RPCSVC_MAXPAYLOAD_UDP / PAGE_SIZE + 1 + 1
net/sunrpc/svcsock.c
994
npages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
net/sunrpc/sysfs.c
166
size_t buflen = PAGE_SIZE;
net/sunrpc/sysfs.c
217
size_t buflen = PAGE_SIZE;
net/sunrpc/xdr.c
1020
if (nbytes > PAGE_SIZE)
net/sunrpc/xdr.c
1049
if (space_left - frag1bytes >= PAGE_SIZE)
net/sunrpc/xdr.c
1050
xdr->end = p + PAGE_SIZE;
net/sunrpc/xdr.c
112
return (buf->page_base + buf->page_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
net/sunrpc/xdr.c
1128
thislen = xdr->buf->page_len % PAGE_SIZE;
net/sunrpc/xdr.c
1129
thislen = min_t(size_t, nbytes, PAGE_SIZE - thislen);
net/sunrpc/xdr.c
1195
xdr->end = (void *)xdr->p + PAGE_SIZE;
net/sunrpc/xdr.c
1196
xdr->p = (void *)xdr->p + (new % PAGE_SIZE);
net/sunrpc/xdr.c
125
bvec_set_page(&buf->bvec[i], buf->pages[i], PAGE_SIZE,
net/sunrpc/xdr.c
1359
if (pgend > PAGE_SIZE)
net/sunrpc/xdr.c
1360
pgend = PAGE_SIZE;
net/sunrpc/xdr.c
167
PAGE_SIZE - offset);
net/sunrpc/xdr.c
2007
avail_page = min_t(unsigned int, PAGE_SIZE - base,
net/sunrpc/xdr.c
2091
(unsigned int) PAGE_SIZE);
net/sunrpc/xdr.c
2185
page_offset = (offset + buf->page_base) & (PAGE_SIZE - 1);
net/sunrpc/xdr.c
2187
thislen = PAGE_SIZE - page_offset;
net/sunrpc/xdr.c
2198
thislen = PAGE_SIZE;
net/sunrpc/xdr.c
258
if (pgto_base >= PAGE_SIZE) {
net/sunrpc/xdr.c
262
if (pgfrom_base >= PAGE_SIZE){
net/sunrpc/xdr.c
268
if (copy > (PAGE_SIZE - pgto_base))
net/sunrpc/xdr.c
269
copy = PAGE_SIZE - pgto_base;
net/sunrpc/xdr.c
270
if (copy > (PAGE_SIZE - pgfrom_base))
net/sunrpc/xdr.c
271
copy = PAGE_SIZE - pgfrom_base;
net/sunrpc/xdr.c
328
pgto_base = PAGE_SIZE;
net/sunrpc/xdr.c
332
pgfrom_base = PAGE_SIZE;
net/sunrpc/xdr.c
381
copy = PAGE_SIZE - pgbase;
net/sunrpc/xdr.c
394
if (pgbase == PAGE_SIZE) {
net/sunrpc/xdr.c
428
copy = PAGE_SIZE - pgbase;
net/sunrpc/xdr.c
437
if (pgbase == PAGE_SIZE) {
net/sunrpc/xdr.c
488
zero = PAGE_SIZE - pgbase;
net/sunrpc/xdr.c
515
npages = (pagelen + buf->page_base + PAGE_SIZE - 1) >> PAGE_SHIFT;
net/sunrpc/xdr.c
978
xdr->end = (void *)xdr->p + min_t(u32, buf->buflen, PAGE_SIZE);
net/sunrpc/xprtrdma/backchannel.c
192
size = min_t(size_t, r_xprt->rx_ep->re_inline_recv, PAGE_SIZE);
net/sunrpc/xprtrdma/backchannel.c
48
maxmsg = min_t(unsigned int, maxmsg, PAGE_SIZE);
net/sunrpc/xprtrdma/frwr_ops.c
320
n = ib_map_mr_sg(ibmr, mr->mr_sg, dma_nents, NULL, PAGE_SIZE);
net/sunrpc/xprtrdma/rpc_rdma.c
1075
curlen = PAGE_SIZE - page_base;
net/sunrpc/xprtrdma/rpc_rdma.c
142
PAGE_SIZE - offset, remaining);
net/sunrpc/xprtrdma/rpc_rdma.c
197
len -= PAGE_SIZE;
net/sunrpc/xprtrdma/rpc_rdma.c
246
seg->mr_len = min_t(u32, PAGE_SIZE - page_base, len);
net/sunrpc/xprtrdma/rpc_rdma.c
610
len = min_t(unsigned int, PAGE_SIZE - page_base, remaining);
net/sunrpc/xprtrdma/rpc_rdma.c
692
len = min_t(unsigned int, PAGE_SIZE - page_base, remaining);
net/sunrpc/xprtrdma/svc_rdma_backchannel.c
107
if (size > PAGE_SIZE) {
net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
834
buf->head[0].iov_len = min_t(size_t, PAGE_SIZE, ctxt->rc_readbytes);
net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
853
buf->head[0].iov_len = min_t(size_t, PAGE_SIZE, ctxt->rc_readbytes);
net/sunrpc/xprtrdma/svc_rdma_rw.c
468
PAGE_SIZE - page_off);
net/sunrpc/xprtrdma/svc_rdma_rw.c
768
PAGE_SIZE - head->rc_pageoff);
net/sunrpc/xprtrdma/svc_rdma_rw.c
778
if (head->rc_pageoff == PAGE_SIZE) {
net/sunrpc/xprtrdma/svc_rdma_rw.c
865
PAGE_SIZE - head->rc_pageoff);
net/sunrpc/xprtrdma/svc_rdma_rw.c
875
if (head->rc_pageoff == PAGE_SIZE) {
net/sunrpc/xprtrdma/svc_rdma_sendto.c
646
len = min_t(u32, PAGE_SIZE - pageoff, remaining);
net/sunrpc/xprtrdma/svc_rdma_sendto.c
691
remaining -= min_t(u32, PAGE_SIZE - offset, remaining);
net/sunrpc/xprtrdma/svc_rdma_sendto.c
760
len = min_t(u32, PAGE_SIZE - pageoff, remaining);
net/sunrpc/xprtrdma/svc_rdma_transport.c
453
newxprt->sc_max_send_sges += (svcrdma_max_req_size / PAGE_SIZE) + 1;
net/sunrpc/xprtrdma/transport.c
80
static unsigned int max_padding = PAGE_SIZE;
net/sunrpc/xprtrdma/xprt_rdma.h
287
RPCRDMA_MAX_DATA_SEGS = ((1 * 1024 * 1024) / PAGE_SIZE) + 1,
net/sunrpc/xprtsock.c
1511
return PAGE_SIZE;
net/sunrpc/xprtsock.c
2952
if (size > PAGE_SIZE - sizeof(struct rpc_buffer)) {
net/sunrpc/xprtsock.c
2963
buf->len = PAGE_SIZE;
net/sunrpc/xprtsock.c
346
n = (buf->page_base + want + PAGE_SIZE - 1) >> PAGE_SHIFT;
net/sunrpc/xprtsock.c
352
i *= PAGE_SIZE;
net/tipc/msg.c
54
const int one_page_mtu = PAGE_SIZE - SKB_DATA_ALIGN(BUF_OVERHEAD) -
net/tls/tls_strp.c
214
unsigned int nfrag = skb->len / PAGE_SIZE;
net/tls/tls_strp.c
230
chunk = min_t(size_t, len, PAGE_SIZE - skb_frag_size(frag));
net/tls/tls_strp.c
268
chunk = min_t(size_t, chunk, PAGE_SIZE - skb_frag_size(frag));
net/tls/tls_strp.c
298
chunk = TLS_MAX_PAYLOAD_SIZE + PAGE_SIZE;
net/tls/tls_strp.c
407
need_spc = strp->stm.full_len ?: TLS_MAX_PAYLOAD_SIZE + PAGE_SIZE;
net/tls/tls_strp.c
409
for (len = need_spc; len > 0; len -= PAGE_SIZE) {
net/tls/tls_strp.c
427
strp->anchor->truesize = round_up(need_spc, PAGE_SIZE);
net/tls/tls_sw.c
1450
use = min_t(int, copied, PAGE_SIZE - offset);
net/unix/af_unix.c
2121
MAX_SKB_FRAGS * PAGE_SIZE);
net/unix/af_unix.c
2124
BUILD_BUG_ON(SKB_MAX_ALLOC < PAGE_SIZE);
net/unix/af_unix.c
2304
#define UNIX_SKB_FRAGS_SZ (PAGE_SIZE << get_order(32768))
net/unix/diag.c
280
if (extra_len >= PAGE_SIZE)
net/vmw_vsock/virtio_transport_common.c
380
(MAX_SKB_FRAGS * PAGE_SIZE));
net/vmw_vsock/vmci_transport_notify.c
198
min(PKT_FIELD(vsk, write_notify_window) + PAGE_SIZE,
net/vmw_vsock/vmci_transport_notify.c
317
PKT_FIELD(vsk, write_notify_window) = PAGE_SIZE;
net/vmw_vsock/vmci_transport_notify.c
318
PKT_FIELD(vsk, write_notify_min_window) = PAGE_SIZE;
net/vmw_vsock/vmci_transport_notify.c
36
if (PKT_FIELD(vsk, write_notify_window) < PAGE_SIZE) {
net/vmw_vsock/vmci_transport_notify.c
40
PKT_FIELD(vsk, write_notify_window) -= PAGE_SIZE;
net/vmw_vsock/vmci_transport_notify_qstate.c
144
PKT_FIELD(vsk, write_notify_window) = PAGE_SIZE;
net/vmw_vsock/vmci_transport_notify_qstate.c
145
PKT_FIELD(vsk, write_notify_min_window) = PAGE_SIZE;
net/vmw_vsock/vmci_transport_notify_qstate.c
152
PKT_FIELD(vsk, write_notify_window) = PAGE_SIZE;
net/vmw_vsock/vmci_transport_notify_qstate.c
153
PKT_FIELD(vsk, write_notify_min_window) = PAGE_SIZE;
net/vmw_vsock/vmci_transport_notify_qstate.c
35
if (PKT_FIELD(vsk, write_notify_window) < PAGE_SIZE) {
net/vmw_vsock/vmci_transport_notify_qstate.c
39
PKT_FIELD(vsk, write_notify_window) -= PAGE_SIZE;
net/vmw_vsock/vmci_transport_notify_qstate.c
96
min(PKT_FIELD(vsk, write_notify_window) + PAGE_SIZE,
net/wireless/debugfs.c
90
unsigned int offset = 0, buf_size = PAGE_SIZE, i;
net/xdp/xdp_umem.c
167
if (chunk_size < XDP_UMEM_MIN_CHUNK_SIZE || chunk_size > PAGE_SIZE) {
net/xdp/xdp_umem.c
193
npgs = div_u64_rem(size, PAGE_SIZE, &npgs_rem);
net/xdp/xsk.c
785
copy = min_t(u32, PAGE_SIZE - offset, len - copied);
net/xdp/xsk.c
880
skb_add_rx_frag(skb, nr_frags, page, 0, len, PAGE_SIZE);
net/xdp/xsk.c
881
refcount_add(PAGE_SIZE, &xs->sk.sk_wmem_alloc);
net/xdp/xsk_buff_pool.c
390
dma_unmap_page_attrs(dma_map->dev, *dma, PAGE_SIZE,
net/xdp/xsk_buff_pool.c
427
if (dma_map->dma_pages[i] + PAGE_SIZE == dma_map->dma_pages[i + 1])
net/xdp/xsk_buff_pool.c
484
dma = dma_map_page_attrs(dev, pages[i], 0, PAGE_SIZE,
net/xfrm/xfrm_hash.c
20
if (sz <= PAGE_SIZE)
net/xfrm/xfrm_hash.c
34
if (sz <= PAGE_SIZE)
net/xfrm/xfrm_ipcomp.c
135
if (skb->len > PAGE_SIZE) {
net/xfrm/xfrm_ipcomp.c
186
sg_set_page(dsg + i, page, PAGE_SIZE, 0);
net/xfrm/xfrm_ipcomp.c
187
total += PAGE_SIZE;
net/xfrm/xfrm_ipcomp.c
78
len = PAGE_SIZE;
net/xfrm/xfrm_state.c
2972
if (optlen <= 0 || optlen > PAGE_SIZE)
rust/bindings/bindings_helper.h
107
const size_t RUST_CONST_HELPER_PAGE_SIZE = PAGE_SIZE;
samples/damon/mtier.c
131
.min_sz_region = PAGE_SIZE,
samples/damon/prcl.c
91
.min_sz_region = PAGE_SIZE,
samples/vfio-mdev/mbochs.c
1009
if (!IS_ALIGNED(dmabuf->mode.offset, PAGE_SIZE)) {
samples/vfio-mdev/mbochs.c
77
#define MBOCHS_MMIO_BAR_OFFSET PAGE_SIZE
samples/vfio-mdev/mbochs.c
78
#define MBOCHS_MMIO_BAR_SIZE PAGE_SIZE
samples/vfio-mdev/mbochs.c
81
#define MBOCHS_EDID_SIZE PAGE_SIZE
samples/vfio-mdev/mbochs.c
942
dmabuf->pagecount = DIV_ROUND_UP(mode->size, PAGE_SIZE);
samples/vfio-mdev/mdpy.c
36
#define MDPY_MEMORY_BAR_OFFSET PAGE_SIZE
security/apparmor/apparmorfs.c
911
#define MULTI_TRANSACTION_LIMIT (PAGE_SIZE - sizeof(struct multi_transaction))
security/apparmor/lib.c
117
return val_mask_to_str(buffer, PAGE_SIZE, debug_values_table,
security/integrity/ima/ima_crypto.c
251
*allocated_size = PAGE_SIZE << order;
security/integrity/ima/ima_crypto.c
265
*allocated_size = PAGE_SIZE;
security/integrity/ima/ima_crypto.c
32
static unsigned int ima_bufsize = PAGE_SIZE;
security/integrity/ima/ima_crypto.c
44
ima_bufsize = PAGE_SIZE << order;
security/integrity/ima/ima_crypto.c
473
rbuf = kzalloc(PAGE_SIZE, GFP_KERNEL);
security/integrity/ima/ima_crypto.c
480
rbuf_len = integrity_kernel_read(file, offset, rbuf, PAGE_SIZE);
security/integrity/ima/ima_crypto.c
732
len = size < PAGE_SIZE ? size : PAGE_SIZE;
security/integrity/ima/ima_fs.c
338
if (datalen >= PAGE_SIZE)
security/integrity/ima/ima_fs.c
339
datalen = PAGE_SIZE - 1;
security/integrity/ima/ima_kexec.c
140
struct kexec_buf kbuf = { .image = image, .buf_align = PAGE_SIZE,
security/integrity/ima/ima_kexec.c
158
extra_memory = PAGE_SIZE / 2;
security/integrity/ima/ima_kexec.c
164
if (binary_runtime_size >= ULONG_MAX - PAGE_SIZE)
security/integrity/ima/ima_kexec.c
167
kexec_segment_size = ALIGN(binary_runtime_size, PAGE_SIZE);
security/keys/keyctl.c
1750
restriction = strndup_user(_restriction, PAGE_SIZE);
security/keys/keyctl.c
195
callout_info = strndup_user(_callout_info, PAGE_SIZE);
security/keys/keyctl.c
334
if (plen > PAGE_SIZE)
security/keys/keyctl.c
890
key_data_len = (buflen <= PAGE_SIZE) ? buflen : 0;
security/keys/keyctl_pkey.c
89
p = strndup_user(_info, PAGE_SIZE);
security/keys/trusted-keys/trusted_tpm2.c
27
const int SCRATCH_SIZE = PAGE_SIZE;
security/landlock/syscalls.c
79
if (usize > PAGE_SIZE)
security/security.c
3729
if (size > PAGE_SIZE)
security/selinux/avc.c
168
return scnprintf(page, PAGE_SIZE, "entries: %d\nbuckets used: %d/%d\n"
security/selinux/hooks.c
1343
path = dentry_path_raw(dentry, buffer, PAGE_SIZE);
security/selinux/selinuxfs.c
1276
if (count >= PAGE_SIZE)
security/selinux/selinuxfs.c
1331
if (count >= PAGE_SIZE)
security/selinux/selinuxfs.c
1398
len = snprintf(page, PAGE_SIZE, "/%s/%s", BOOL_DIR_NAME, names[i]);
security/selinux/selinuxfs.c
1399
if (len >= PAGE_SIZE) {
security/selinux/selinuxfs.c
144
if (count >= PAGE_SIZE)
security/selinux/selinuxfs.c
1457
if (count >= PAGE_SIZE)
security/selinux/selinuxfs.c
251
if (vma->vm_pgoff > 0 || size != PAGE_SIZE)
security/selinux/selinuxfs.c
279
if (count >= PAGE_SIZE)
security/selinux/selinuxfs.c
452
if (offset >= roundup(plm->len, PAGE_SIZE))
security/selinux/selinuxfs.c
702
if (count >= PAGE_SIZE)
security/selinux/selinuxfs.c
755
if (count >= PAGE_SIZE)
security/selinux/ss/sidtab.c
144
return scnprintf(page, PAGE_SIZE,
security/selinux/ss/sidtab.h
38
#define SIDTAB_NODE_ALLOC_SIZE PAGE_SIZE
security/selinux/xfrm.c
88
if (str_len >= PAGE_SIZE)
security/smack/smackfs.c
1179
if (count < SMK_NETLBLADDRMIN || count > PAGE_SIZE - 1)
security/smack/smackfs.c
1437
if (count < SMK_NETLBLADDRMIN || count > PAGE_SIZE - 1)
security/smack/smackfs.c
1844
if (count == 0 || count > PAGE_SIZE)
security/smack/smackfs.c
2018
if (count > PAGE_SIZE)
security/smack/smackfs.c
2108
if (count > PAGE_SIZE)
security/smack/smackfs.c
2670
if (count == 0 || count > PAGE_SIZE)
security/smack/smackfs.c
2770
if (count == 0 || count > PAGE_SIZE)
security/smack/smackfs.c
471
if (count >= PAGE_SIZE) {
security/smack/smackfs.c
472
count = PAGE_SIZE - 1;
security/smack/smackfs.c
860
if (count > PAGE_SIZE)
security/tomoyo/audit.c
31
int offset = pos % PAGE_SIZE;
security/tomoyo/audit.c
48
pos += PAGE_SIZE - offset;
security/tomoyo/audit.c
50
while (offset < PAGE_SIZE) {
security/tomoyo/condition.c
114
int offset = pos % PAGE_SIZE;
security/tomoyo/condition.c
134
pos += PAGE_SIZE - offset;
security/tomoyo/condition.c
135
while (offset < PAGE_SIZE) {
security/tomoyo/domain.c
623
int offset = pos % PAGE_SIZE;
security/tomoyo/domain.c
640
pos += PAGE_SIZE - offset;
security/tomoyo/domain.c
642
while (argv_count && offset < PAGE_SIZE) {
security/tomoyo/domain.c
650
while (offset < PAGE_SIZE) {
security/tomoyo/domain.c
916
dump->data = kzalloc(PAGE_SIZE, GFP_NOFS);
security/tomoyo/domain.c
934
page = bprm->page[pos / PAGE_SIZE];
security/tomoyo/domain.c
937
const unsigned int offset = pos % PAGE_SIZE;
security/tomoyo/domain.c
942
PAGE_SIZE - offset);
security/tomoyo/realpath.c
241
unsigned int buf_len = PAGE_SIZE / 2;
sound/arm/aaci.c
356
.period_bytes_max = PAGE_SIZE,
sound/arm/aaci.c
358
.periods_max = PAGE_SIZE / 16,
sound/core/memalloc.c
110
if (size <= PAGE_SIZE)
sound/core/memalloc.c
113
size = PAGE_SIZE << get_order(size);
sound/core/memalloc.c
376
return get_vmalloc_page_addr(dmab, offset) + offset % PAGE_SIZE;
sound/core/memalloc.c
392
start = ALIGN_DOWN(ofs, PAGE_SIZE);
sound/core/memalloc.c
397
start += PAGE_SIZE;
sound/core/memalloc.c
400
addr += PAGE_SIZE;
sound/core/memalloc.c
433
p = gen_pool_dma_alloc_align(pool, size, &dmab->addr, PAGE_SIZE);
sound/core/memalloc.c
629
return sg_page_iter_dma_address(&iter) + offset % PAGE_SIZE;
sound/core/memalloc.c
650
start = ALIGN_DOWN(ofs, PAGE_SIZE);
sound/core/memalloc.c
658
start += PAGE_SIZE;
sound/core/memalloc.c
661
addr += PAGE_SIZE;
sound/core/memalloc.c
739
if (chunk <= PAGE_SIZE)
sound/core/memalloc.c
742
chunk = PAGE_SIZE << get_order(chunk);
sound/core/pcm_native.c
3885
if (offset > dma_bytes - PAGE_SIZE)
sound/core/rawmidi.c
169
runtime->buffer_size = PAGE_SIZE;
sound/core/seq/seq_midi.c
31
static int output_buffer_size = PAGE_SIZE;
sound/core/seq/seq_midi.c
34
static int input_buffer_size = PAGE_SIZE;
sound/firewire/packets-buffer.c
37
packets_per_page = PAGE_SIZE / packet_size;
sound/firewire/packets-buffer.c
54
b->packets[i].offset = page_index * PAGE_SIZE + offset_in_page;
sound/hda/core/controller.c
730
return snd_dma_alloc_pages(dma_type, bus->dev, PAGE_SIZE, &bus->rb);
sound/parisc/harmony.h
64
#define BUF_SIZE PAGE_SIZE
sound/pci/bt87x.c
131
#define MAX_RISC_SIZE ((1 + 255 + (PAGE_ALIGN(255 * 4092) / PAGE_SIZE - 1) + 1 + 1) * 8)
sound/pci/bt87x.c
234
len = PAGE_SIZE - (offset % PAGE_SIZE);
sound/pci/cmipci.c
1302
memset(runtime->dma_area, 0, PAGE_SIZE);
sound/pci/cmipci.c
1304
val = ((PAGE_SIZE / 4) - 1) | (((PAGE_SIZE / 4) / 2 - 1) << 16);
sound/pci/cs46xx/cs46xx_lib.c
1461
PAGE_SIZE, &cpcm->hw_buf) < 0) {
sound/pci/cs46xx/cs46xx_lib.c
1547
PAGE_SIZE, &chip->capt.hw_buf) < 0)
sound/pci/ctxfi/cthw20k1.c
1272
#if PAGE_SIZE == 8192
sound/pci/ctxfi/ctvmem.c
183
PAGE_SIZE, &vm->ptp[i]);
sound/pci/ctxfi/ctvmem.h
30
#define CT_PAGE_MASK (~(PAGE_SIZE - 1))
sound/pci/echoaudio/echoaudio.c
1894
if (sz > PAGE_SIZE)
sound/pci/echoaudio/echoaudio.c
1895
sz = PAGE_SIZE; /* We map only the required part */
sound/pci/echoaudio/echoaudio.c
327
PAGE_SIZE, &pipe->sgpage);
sound/pci/echoaudio/echoaudio.c
557
edge = PAGE_SIZE;
sound/pci/echoaudio/echoaudio.c
578
edge += PAGE_SIZE;
sound/pci/echoaudio/echoaudio_dsp.c
1104
memset(pipe->sgpage.area, 0, PAGE_SIZE);
sound/pci/echoaudio/echoaudio_dsp.c
16
#if PAGE_SIZE < 4096
sound/pci/emu10k1/emu10k1_main.c
1592
emu->memhdr = snd_util_memhdr_new(emu->max_cache_pages * PAGE_SIZE);
sound/pci/emu10k1/memory.c
212
psize = get_aligned_page(size + PAGE_SIZE -1);
sound/pci/emu10k1/memory.c
27
#define UNIT_PAGES (PAGE_SIZE / EMUPAGESIZE)
sound/pci/emu10k1/memory.c
35
#if PAGE_SIZE == EMUPAGESIZE && !IS_ENABLED(CONFIG_DYNAMIC_DEBUG)
sound/pci/emu10k1/memory.c
373
size_t npages = DIV_ROUND_UP(size, PAGE_SIZE);
sound/pci/emu10k1/memory.c
374
size_t size_real = npages * PAGE_SIZE;
sound/pci/emu10k1/memory.c
381
size += PAGE_SIZE;
sound/pci/emu10k1/memory.c
483
dmab.bytes = PAGE_SIZE;
sound/pci/emu10k1/memory.c
505
if (snd_emu10k1_alloc_pages_maybe_wider(emu, PAGE_SIZE,
sound/pci/emu10k1/memory.c
549
ptr += offset & (PAGE_SIZE - 1);
sound/pci/emu10k1/memory.c
566
offset += blk->offset & (PAGE_SIZE - 1);
sound/pci/emu10k1/memory.c
626
offset += blk->offset & (PAGE_SIZE - 1);
sound/pci/lola/lola.c
349
PAGE_SIZE);
sound/pci/lola/lola_pcm.c
586
PAGE_SIZE);
sound/pci/riptide/riptide.c
1317
.period_bytes_min = PAGE_SIZE >> 1,
sound/pci/riptide/riptide.c
1318
.period_bytes_max = PAGE_SIZE << 8,
sound/pci/riptide/riptide.c
1336
.period_bytes_min = PAGE_SIZE >> 1,
sound/pci/riptide/riptide.c
1337
.period_bytes_max = PAGE_SIZE << 3,
sound/pci/riptide/riptide.c
1482
f = PAGE_SIZE;
sound/pci/riptide/riptide.c
1505
pt = (pt + f) % PAGE_SIZE;
sound/pci/trident/trident_memory.c
27
#if PAGE_SIZE == 4096
sound/pci/trident/trident_memory.c
29
#define ALIGN_PAGE_SIZE PAGE_SIZE /* minimum page size for allocation */
sound/pci/trident/trident_memory.c
42
#elif PAGE_SIZE == 8192
sound/pci/trident/trident_memory.c
44
#define ALIGN_PAGE_SIZE PAGE_SIZE
sound/pci/trident/trident_memory.c
67
#define UNIT_PAGES (PAGE_SIZE / SNDRV_TRIDENT_PAGE_SIZE)
sound/pci/via82xx_modem.c
307
r = PAGE_SIZE - (ofs % PAGE_SIZE);
sound/ppc/snd_ps3.c
1013
PAGE_SIZE,
sound/ppc/snd_ps3.c
1047
PAGE_SIZE,
sound/ppc/snd_ps3.c
913
ALIGN(SND_PS3_DMA_REGION_SIZE, PAGE_SIZE));
sound/ppc/snd_ps3.c
988
PAGE_SIZE,
sound/ppc/snd_ps3.h
120
(SND_PS3_PCM_PREALLOC_SIZE + PAGE_SIZE)
sound/soc/amd/acp-pcm-dma.c
326
addr += PAGE_SIZE;
sound/soc/amd/acp/acp-platform.c
186
addr += PAGE_SIZE;
sound/soc/amd/ps/ps-pdm-dma.c
178
addr += PAGE_SIZE;
sound/soc/amd/ps/ps-sdw-dma.c
230
addr += PAGE_SIZE;
sound/soc/amd/raven/acp3x-pcm-dma.c
152
addr += PAGE_SIZE;
sound/soc/amd/renoir/acp3x-pdm-dma.c
200
addr += PAGE_SIZE;
sound/soc/amd/vangogh/acp5x-pcm-dma.c
146
addr += PAGE_SIZE;
sound/soc/amd/yc/acp6x-pdm-dma.c
176
addr += PAGE_SIZE;
sound/soc/bcm/bcm63xx-pcm-whistler.c
39
.periods_max = PAGE_SIZE/sizeof(struct i2s_dma_desc),
sound/soc/codecs/cros_ec_codec.c
424
req = round_up(r.len, PAGE_SIZE);
sound/soc/codecs/cros_ec_codec.c
881
.period_bytes_min = PAGE_SIZE,
sound/soc/codecs/rt5514-spi.c
52
.period_bytes_min = PAGE_SIZE,
sound/soc/codecs/rt5677-spi.c
77
.period_bytes_min = PAGE_SIZE,
sound/soc/fsl/imx-audmux.c
102
ret += scnprintf(buf + ret, PAGE_SIZE - ret,
sound/soc/fsl/imx-audmux.c
106
ret += scnprintf(buf + ret, PAGE_SIZE - ret,
sound/soc/fsl/imx-audmux.c
110
ret += scnprintf(buf + ret, PAGE_SIZE - ret,
sound/soc/fsl/imx-audmux.c
114
ret += scnprintf(buf + ret, PAGE_SIZE - ret,
sound/soc/fsl/imx-audmux.c
118
ret += scnprintf(buf + ret, PAGE_SIZE - ret,
sound/soc/fsl/imx-audmux.c
73
buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
sound/soc/fsl/imx-audmux.c
80
ret += scnprintf(buf + ret, PAGE_SIZE - ret,
sound/soc/fsl/imx-audmux.c
84
ret += scnprintf(buf + ret, PAGE_SIZE - ret,
sound/soc/fsl/imx-audmux.c
88
ret += scnprintf(buf + ret, PAGE_SIZE - ret,
sound/soc/fsl/imx-audmux.c
92
ret += scnprintf(buf + ret, PAGE_SIZE - ret,
sound/soc/fsl/imx-audmux.c
95
ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");
sound/soc/fsl/imx-audmux.c
98
ret += scnprintf(buf + ret, PAGE_SIZE - ret,
sound/soc/intel/avs/debugfs.c
111
buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
sound/soc/intel/avs/debugfs.c
122
ret = scnprintf(buf + len, PAGE_SIZE - len,
sound/soc/intel/avs/debugfs.c
241
ret = kfifo_alloc(&adev->trace_fifo, PAGE_SIZE, GFP_KERNEL);
sound/soc/intel/avs/trace.c
15
#define MAX_CHUNK_SIZE ((PAGE_SIZE - 150) /* Place for trace header */ \
sound/soc/intel/catpt/pcm.c
162
pfn = PFN_DOWN(snd_sgbuf_get_addr(databuf, i * PAGE_SIZE));
sound/soc/intel/catpt/pcm.c
270
ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, cdev->dev, PAGE_SIZE,
sound/soc/intel/catpt/pcm.c
397
rinfo.num_pages = DIV_ROUND_UP(rtm->dma_bytes, PAGE_SIZE);
sound/soc/intel/catpt/pcm.c
570
.period_bytes_min = PAGE_SIZE,
sound/soc/loongson/loongson_dma.c
250
prtd->dma_desc_arr = dma_alloc_coherent(card->dev, PAGE_SIZE,
sound/soc/loongson/loongson_dma.c
256
prtd->dma_desc_arr_size = PAGE_SIZE / sizeof(*prtd->dma_desc_arr);
sound/soc/loongson/loongson_dma.c
272
dma_free_coherent(card->dev, PAGE_SIZE, prtd->dma_desc_arr,
sound/soc/loongson/loongson_dma.c
286
dma_free_coherent(card->dev, PAGE_SIZE, prtd->dma_desc_arr,
sound/soc/loongson/loongson_dma.c
70
.periods_max = PAGE_SIZE / sizeof(struct loongson_dma_desc),
sound/soc/soc-dapm.c
177
buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
sound/soc/soc-dapm.c
182
vsnprintf(buf, PAGE_SIZE, fmt, args);
sound/soc/soc-dapm.c
2453
buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
sound/soc/soc-dapm.c
2468
ret = scnprintf(buf, PAGE_SIZE, "%s: %s%s in %d out %d",
sound/soc/soc-dapm.c
2473
ret += scnprintf(buf + ret, PAGE_SIZE - ret,
sound/soc/soc-dapm.c
2477
ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");
sound/soc/soc-dapm.c
2480
ret += scnprintf(buf + ret, PAGE_SIZE - ret, " stream %s %s\n",
sound/soc/soc-dapm.c
2484
ret += scnprintf(buf + ret, PAGE_SIZE - ret, " widget-type %s\n",
sound/soc/soc-dapm.c
2498
ret += scnprintf(buf + ret, PAGE_SIZE - ret,
sound/soc/soc-pcm.c
237
ssize_t out_count = PAGE_SIZE, offset = 0, ret = 0;
sound/soc/sof/amd/acp-loader.c
157
addr += PAGE_SIZE;
sound/soc/sof/amd/acp-stream.c
107
addr = snd_sgbuf_get_addr(stream->dmab, page_idx * PAGE_SIZE);
sound/soc/sof/debug.c
281
dfse->buf = devm_kmalloc(sdev->dev, PAGE_SIZE, GFP_KERNEL);
sound/soc/sof/debug.c
284
dfse->size = PAGE_SIZE;
sound/soc/sof/intel/hda-loader-skl.c
28
#define HDA_SKL_CLDMA_MAX_BUFFER_SIZE (32 * PAGE_SIZE)
sound/soc/sof/intel/hda-loader.c
290
iccmax_stream = hda_cl_prepare(sdev->dev, HDA_CL_STREAM_FORMAT, PAGE_SIZE,
sound/soc/sof/intel/hda-stream.c
946
PAGE_SIZE, &bus->rb);
sound/soc/sof/intel/hda.h
190
#define SOF_HDA_MAX_BUFFER_SIZE (32 * PAGE_SIZE)
sound/soc/sof/ipc3-dtrace.c
520
PAGE_SIZE, &priv->dmatp);
sound/soc/sof/ipc3.c
591
ext_data = kzalloc(PAGE_SIZE, GFP_KERNEL);
sound/soc/sof/sof-client-probes.c
221
buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
sound/soc/sof/sof-client-probes.c
241
remaining = PAGE_SIZE - offset;
sound/soc/sof/sof-priv.h
74
#define DMA_BUF_SIZE_FOR_TRACE (PAGE_SIZE * 16)
sound/soc/sof/sof-utils.c
45
u32 pfn = snd_sgbuf_get_addr(dmab, i * PAGE_SIZE) >> PAGE_SHIFT;
sound/soc/sof/topology.c
1790
PAGE_SIZE, &spcm->stream[stream].page_table);
sound/soc/sof/topology.c
1817
PAGE_SIZE, &spcm->stream[stream].page_table);
sound/soc/sprd/sprd-pcm-dma.c
45
.periods_max = PAGE_SIZE / SPRD_PCM_DMA_LINKLIST_SIZE,
sound/soc/sti/uniperif.h
1369
.period_bytes_max = 64 * PAGE_SIZE,
sound/soc/sti/uniperif.h
1370
.buffer_bytes_max = 256 * PAGE_SIZE
sound/soc/sti/uniperif_player.c
52
.period_bytes_max = 64 * PAGE_SIZE,
sound/soc/sti/uniperif_player.c
53
.buffer_bytes_max = 256 * PAGE_SIZE
sound/soc/sti/uniperif_reader.c
34
.period_bytes_max = 64 * PAGE_SIZE,
sound/soc/sti/uniperif_reader.c
35
.buffer_bytes_max = 256 * PAGE_SIZE
sound/soc/stm/stm32_adfsdm.c
25
#define DFSDM_MAX_PERIOD_SIZE (PAGE_SIZE / 2)
sound/soc/stm/stm32_i2s.c
1111
.buffer_bytes_max = 8 * PAGE_SIZE,
sound/soc/stm/stm32_i2s.c
1113
.period_bytes_max = 4 * PAGE_SIZE,
sound/soc/stm/stm32_i2s.c
1121
.prealloc_buffer_size = PAGE_SIZE * 8,
sound/soc/stm/stm32_sai_sub.c
1430
.buffer_bytes_max = 8 * PAGE_SIZE,
sound/soc/stm/stm32_sai_sub.c
1432
.period_bytes_max = PAGE_SIZE,
sound/soc/stm/stm32_sai_sub.c
1439
.buffer_bytes_max = 8 * PAGE_SIZE,
sound/soc/stm/stm32_sai_sub.c
1441
.period_bytes_max = PAGE_SIZE,
sound/soc/stm/stm32_spdifrx.c
882
.buffer_bytes_max = 8 * PAGE_SIZE,
sound/soc/stm/stm32_spdifrx.c
884
.period_bytes_max = 4 * PAGE_SIZE,
sound/soc/tegra/tegra_pcm.c
32
.period_bytes_max = PAGE_SIZE,
sound/soc/tegra/tegra_pcm.c
35
.buffer_bytes_max = PAGE_SIZE * 8,
sound/soc/tegra/tegra_pcm.c
42
.prealloc_buffer_size = PAGE_SIZE * 8,
sound/soc/ux500/ux500_pcm.c
26
#define UX500_PLATFORM_PERIODS_BYTES_MAX (64 * PAGE_SIZE)
sound/soc/ux500/ux500_pcm.c
29
#define UX500_PLATFORM_BUFFER_BYTES_MAX (2048 * PAGE_SIZE)
sound/usb/misc/ua101.c
1018
packets_per_page = PAGE_SIZE / stream->max_packet_bytes;
sound/usb/misc/ua101.c
40
PAGE_SIZE / MAX_PACKET_SIZE)
sound/usb/qcom/qc_audio_offload.c
1038
len = PAGE_SIZE;
sound/usb/qcom/qc_audio_offload.c
1040
mult = len / PAGE_SIZE;
sound/usb/qcom/qc_audio_offload.c
1041
remainder = len % PAGE_SIZE;
sound/usb/qcom/qc_audio_offload.c
1042
len = mult * PAGE_SIZE;
sound/usb/qcom/qc_audio_offload.c
1043
len += remainder ? PAGE_SIZE : 0;
sound/usb/qcom/qc_audio_offload.c
1144
PAGE_SIZE);
sound/usb/qcom/qc_audio_offload.c
1151
mem_info->size = PAGE_SIZE;
sound/usb/qcom/qc_audio_offload.c
1212
PAGE_SIZE);
sound/usb/qcom/qc_audio_offload.c
1219
mem_info->size = PAGE_SIZE;
sound/usb/qcom/qc_audio_offload.c
1459
uadev[card_num].info[info_idx].data_xfer_ring_size = PAGE_SIZE;
sound/usb/qcom/qc_audio_offload.c
1462
uadev[card_num].info[info_idx].sync_xfer_ring_size = PAGE_SIZE;
sound/usb/qcom/qc_audio_offload.c
1489
uaudio_iommu_unmap(MEM_EVENT_RING, IOVA_BASE, PAGE_SIZE, PAGE_SIZE);
sound/usb/qcom/qc_audio_offload.c
1497
PAGE_SIZE, PAGE_SIZE);
sound/usb/qcom/qc_audio_offload.c
1503
PAGE_SIZE, PAGE_SIZE);
sound/usb/qcom/qc_audio_offload.c
401
if (size % PAGE_SIZE)
sound/usb/qcom/qc_audio_offload.c
61
#define IOVA_XFER_RING_BASE (IOVA_BASE + PAGE_SIZE * (SNDRV_CARDS + 1))
sound/usb/qcom/qc_audio_offload.c
62
#define IOVA_XFER_BUF_BASE (IOVA_XFER_RING_BASE + PAGE_SIZE * SNDRV_CARDS * 32)
sound/usb/qcom/qc_audio_offload.c
63
#define IOVA_XFER_RING_MAX (IOVA_XFER_BUF_BASE - PAGE_SIZE)
sound/usb/qcom/qc_audio_offload.c
64
#define IOVA_XFER_BUF_MAX (0xfffff000 - PAGE_SIZE)
sound/usb/qcom/qc_audio_offload.c
66
#define MAX_XFER_BUFF_LEN (24 * PAGE_SIZE)
sound/usb/qcom/qc_audio_offload.c
699
uaudio_iommu_unmap(MEM_EVENT_RING, IOVA_BASE, PAGE_SIZE,
sound/usb/qcom/qc_audio_offload.c
700
PAGE_SIZE);
sound/usb/usx2y/usb_stream.c
190
if (read_size >= 256*PAGE_SIZE || write_size >= 256*PAGE_SIZE) {
sound/virtio/virtio_pcm_msg.c
59
pg_length = PAGE_SIZE - offset_in_page(data);
sound/virtio/virtio_pcm_msg.c
99
pg_length = PAGE_SIZE - offset_in_page(data);
sound/xen/xen_snd_front.c
366
if (XEN_PAGE_SIZE != PAGE_SIZE) {
sound/xen/xen_snd_front.c
368
XEN_PAGE_SIZE, PAGE_SIZE);
sound/xen/xen_snd_front_alsa.c
444
stream->num_pages = DIV_ROUND_UP(stream->buffer_sz, PAGE_SIZE);
sound/xen/xen_snd_front_alsa.c
450
stream->pages[i] = virt_to_page(stream->buffer + i * PAGE_SIZE);
tools/include/linux/mm.h
11
#define PAGE_MASK (~(PAGE_SIZE - 1))
tools/include/linux/mm.h
15
#define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE)
tools/include/linux/mm.h
16
#define PAGE_ALIGN_DOWN(addr) ALIGN_DOWN(addr, PAGE_SIZE)
tools/include/linux/mm.h
21
#define pfn_to_page(pfn) ((void *)((pfn) * PAGE_SIZE))
tools/include/linux/pfn.h
7
#define PFN_UP(x) (((x) + PAGE_SIZE - 1) >> PAGE_SHIFT)
tools/include/uapi/linux/kvm.h
549
((PAGE_SIZE - sizeof(struct kvm_coalesced_mmio_ring)) / \
tools/power/acpi/os_specific/service_layers/osunixmap.c
43
#ifdef PAGE_SIZE
tools/power/acpi/os_specific/service_layers/osunixmap.c
44
return PAGE_SIZE;
tools/sched_ext/include/scx/bpf_arena_common.bpf.h
5
#ifndef PAGE_SIZE
tools/sched_ext/scx_sdt.bpf.c
113
div_round_up(max_elems * elem_size, PAGE_SIZE), NUMA_NO_NODE, 0);
tools/sched_ext/scx_sdt.bpf.c
168
pool->max_elems = (PAGE_SIZE * nr_pages) / pool->elem_size;
tools/sched_ext/scx_sdt.bpf.c
182
_Static_assert(sizeof(struct sdt_chunk) <= PAGE_SIZE,
tools/sched_ext/scx_sdt.bpf.c
201
min_chunk_size = div_round_up(SDT_TASK_MIN_ELEM_PER_ALLOC * data_size, PAGE_SIZE);
tools/testing/nvdimm/dax-dev.c
28
if (dev_dax->region->align > PAGE_SIZE)
tools/testing/nvdimm/test/ndtest.c
677
seq_buf_init(&s, buf, PAGE_SIZE);
tools/testing/nvdimm/test/nfit.c
1926
t->flush[i] = test_alloc(t, max(PAGE_SIZE,
tools/testing/scatterlist/linux/mm.h
104
return (unsigned long)malloc(PAGE_SIZE);
tools/testing/scatterlist/linux/mm.h
31
#define PAGE_MASK (~(PAGE_SIZE-1))
tools/testing/scatterlist/linux/mm.h
38
#define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE)
tools/testing/scatterlist/linux/mm.h
52
#define page_to_pfn(page) ((unsigned long)(page) / PAGE_SIZE)
tools/testing/scatterlist/linux/mm.h
53
#define pfn_to_page(pfn) (void *)((pfn) * PAGE_SIZE)
tools/testing/scatterlist/main.c
25
((1 + array[i]) * PAGE_SIZE);
tools/testing/scatterlist/main.c
56
{ -EINVAL, 1, pfn(0), NULL, PAGE_SIZE, 0, 1 },
tools/testing/scatterlist/main.c
57
{ 0, 1, pfn(0), NULL, PAGE_SIZE, PAGE_SIZE + 1, 1 },
tools/testing/scatterlist/main.c
58
{ 0, 1, pfn(0), NULL, PAGE_SIZE, sgmax, 1 },
tools/testing/scatterlist/main.c
60
{ 0, 2, pfn(0, 1), NULL, 2 * PAGE_SIZE, sgmax, 1 },
tools/testing/scatterlist/main.c
61
{ 0, 2, pfn(1, 0), NULL, 2 * PAGE_SIZE, sgmax, 2 },
tools/testing/scatterlist/main.c
62
{ 0, 3, pfn(0, 1, 2), NULL, 3 * PAGE_SIZE, sgmax, 1 },
tools/testing/scatterlist/main.c
63
{ 0, 3, pfn(0, 1, 2), NULL, 3 * PAGE_SIZE, sgmax, 1 },
tools/testing/scatterlist/main.c
64
{ 0, 3, pfn(0, 1, 2), pfn(3, 4, 5), 3 * PAGE_SIZE, sgmax, 1 },
tools/testing/scatterlist/main.c
65
{ 0, 3, pfn(0, 1, 2), pfn(4, 5, 6), 3 * PAGE_SIZE, sgmax, 2 },
tools/testing/scatterlist/main.c
66
{ 0, 3, pfn(0, 2, 1), NULL, 3 * PAGE_SIZE, sgmax, 3 },
tools/testing/scatterlist/main.c
67
{ 0, 3, pfn(0, 1, 3), NULL, 3 * PAGE_SIZE, sgmax, 2 },
tools/testing/scatterlist/main.c
68
{ 0, 3, pfn(1, 2, 4), NULL, 3 * PAGE_SIZE, sgmax, 2 },
tools/testing/scatterlist/main.c
69
{ 0, 3, pfn(1, 3, 4), NULL, 3 * PAGE_SIZE, sgmax, 2 },
tools/testing/scatterlist/main.c
70
{ 0, 4, pfn(0, 1, 3, 4), NULL, 4 * PAGE_SIZE, sgmax, 2 },
tools/testing/scatterlist/main.c
71
{ 0, 5, pfn(0, 1, 3, 4, 5), NULL, 5 * PAGE_SIZE, sgmax, 2 },
tools/testing/scatterlist/main.c
72
{ 0, 5, pfn(0, 1, 3, 4, 6), NULL, 5 * PAGE_SIZE, sgmax, 3 },
tools/testing/scatterlist/main.c
73
{ 0, 5, pfn(0, 1, 2, 3, 4), NULL, 5 * PAGE_SIZE, sgmax, 1 },
tools/testing/scatterlist/main.c
74
{ 0, 5, pfn(0, 1, 2, 3, 4), NULL, 5 * PAGE_SIZE, 2 * PAGE_SIZE,
tools/testing/scatterlist/main.c
76
{ 0, 6, pfn(0, 1, 2, 3, 4, 5), NULL, 6 * PAGE_SIZE,
tools/testing/scatterlist/main.c
77
2 * PAGE_SIZE, 3 },
tools/testing/scatterlist/main.c
78
{ 0, 6, pfn(0, 2, 3, 4, 5, 6), NULL, 6 * PAGE_SIZE,
tools/testing/scatterlist/main.c
79
2 * PAGE_SIZE, 4 },
tools/testing/scatterlist/main.c
81
6 * PAGE_SIZE, 12 * PAGE_SIZE, 2 },
tools/testing/selftests/bpf/bpf_arena_alloc.h
29
if (size >= PAGE_SIZE - 8)
tools/testing/selftests/bpf/bpf_arena_alloc.h
38
*cur_offset = PAGE_SIZE - 8;
tools/testing/selftests/bpf/bpf_arena_alloc.h
39
obj_cnt = page + PAGE_SIZE - 8;
tools/testing/selftests/bpf/bpf_arena_alloc.h
43
obj_cnt = page + PAGE_SIZE - 8;
tools/testing/selftests/bpf/bpf_arena_alloc.h
59
addr = (void __arena *)(((long)addr) & ~(PAGE_SIZE - 1));
tools/testing/selftests/bpf/bpf_arena_alloc.h
60
obj_cnt = addr + PAGE_SIZE - 8;
tools/testing/selftests/bpf/bpf_arena_common.h
23
#ifndef PAGE_SIZE
tools/testing/selftests/bpf/bpf_arena_htab.h
99
htab->n_buckets = 2 * PAGE_SIZE / sizeof(struct htab_bucket);
tools/testing/selftests/bpf/prog_tests/arena_htab.c
7
#ifndef PAGE_SIZE /* on some archs it comes in sys/user.h */
tools/testing/selftests/bpf/prog_tests/arena_list.c
7
#ifndef PAGE_SIZE /* on some archs it comes in sys/user.h */
tools/testing/selftests/bpf/prog_tests/sk_bypass_prot_mem.c
15
#define BUF_TOTAL (NR_PAGES * PAGE_SIZE / NR_SOCKETS)
tools/testing/selftests/bpf/prog_tests/sk_bypass_prot_mem.c
8
#ifndef PAGE_SIZE
tools/testing/selftests/bpf/prog_tests/sockopt.c
1114
if (test->set_optlen >= PAGE_SIZE) {
tools/testing/selftests/bpf/prog_tests/sockopt.c
1115
int num_pages = test->set_optlen / PAGE_SIZE;
tools/testing/selftests/bpf/prog_tests/sockopt.c
1116
int remainder = test->set_optlen % PAGE_SIZE;
tools/testing/selftests/bpf/prog_tests/sockopt.c
1137
if (test->get_optlen >= PAGE_SIZE) {
tools/testing/selftests/bpf/prog_tests/sockopt.c
1138
int num_pages = test->get_optlen / PAGE_SIZE;
tools/testing/selftests/bpf/prog_tests/sockopt.c
1139
int remainder = test->get_optlen % PAGE_SIZE;
tools/testing/selftests/bpf/prog_tests/sockopt.c
325
.get_optlen = PAGE_SIZE + 1,
tools/testing/selftests/bpf/prog_tests/sockopt.c
726
.set_optlen = PAGE_SIZE + 1,
tools/testing/selftests/bpf/prog_tests/sockopt.c
9
#ifndef PAGE_SIZE
tools/testing/selftests/bpf/prog_tests/task_local_storage.c
508
if (getpagesize() == PAGE_SIZE)
tools/testing/selftests/bpf/prog_tests/test_lsm.c
19
#define GET_PAGE_ADDR(ADDR, PAGE_SIZE) \
tools/testing/selftests/bpf/prog_tests/test_lsm.c
20
(char *)(((unsigned long) (ADDR + PAGE_SIZE)) & ~(PAGE_SIZE-1))
tools/testing/selftests/bpf/progs/test_sockmap_change_tail.c
6
#ifndef PAGE_SIZE
tools/testing/selftests/bpf/progs/test_sockmap_change_tail.c
9
#define BPF_SKB_MAX_LEN (PAGE_SIZE << 2)
tools/testing/selftests/bpf/progs/test_tc_change_tail.c
5
#ifndef PAGE_SIZE
tools/testing/selftests/bpf/progs/test_tc_change_tail.c
8
#define BPF_SKB_MAX_LEN (PAGE_SIZE << 2)
tools/testing/selftests/bpf/progs/verifier_arena_globals1.c
29
volatile char __arena global_data[GLOBAL_PAGES][PAGE_SIZE];
tools/testing/selftests/bpf/progs/verifier_arena_globals1.c
43
globals = (void __arena *)(arena_base(&arena) + (ARENA_PAGES - GLOBAL_PAGES) * PAGE_SIZE);
tools/testing/selftests/bpf/progs/verifier_arena_globals1.c
57
ptr = &global_data[i][PAGE_SIZE / 2];
tools/testing/selftests/bpf/progs/verifier_arena_globals1.c
78
global_data[GLOBAL_PAGES - 1][PAGE_SIZE / 2] = magic;
tools/testing/selftests/bpf/progs/verifier_arena_globals1.c
79
ptr = (u8 __arena *)((u64)(ARENA_PAGES * PAGE_SIZE - PAGE_SIZE / 2));
tools/testing/selftests/bpf/progs/verifier_arena_globals2.c
29
char __arena global_data[ARENA_PAGES][PAGE_SIZE];
tools/testing/selftests/bpf/progs/verifier_arena_large.c
106
page = (volatile char __arena *)(base + i * PAGE_SIZE);
tools/testing/selftests/bpf/progs/verifier_arena_large.c
17
__uint(max_entries, ARENA_SIZE / PAGE_SIZE);
tools/testing/selftests/bpf/progs/verifier_arena_large.c
201
pg_idx = (unsigned long) (pg - base) / PAGE_SIZE;
tools/testing/selftests/bpf/progs/verifier_arena_large.c
282
if ((pg - base) / PAGE_SIZE < PAGE_CNT)
tools/testing/selftests/bpf/progs/verifier_arena_large.c
306
pages[i * PAGE_SIZE] = 123;
tools/testing/selftests/bpf/progs/verifier_arena_large.c
308
if (pages[i * PAGE_SIZE] != 123)
tools/testing/selftests/bpf/progs/verifier_arena_large.c
38
page2 = bpf_arena_alloc_pages(&arena, (void __arena *)(ARENA_SIZE - 2 * PAGE_SIZE),
tools/testing/selftests/bpf/progs/verifier_arena_large.c
45
no_page = bpf_arena_alloc_pages(&arena, (void __arena *)ARENA_SIZE - PAGE_SIZE,
tools/testing/selftests/bpf/progs/verifier_arena_large.c
71
if (*(page1 + PAGE_SIZE) != 0)
tools/testing/selftests/bpf/progs/verifier_arena_large.c
73
if (*(page1 - PAGE_SIZE) != 0)
tools/testing/selftests/bpf/progs/verifier_arena_large.c
75
if (*(page2 + PAGE_SIZE) != 0)
tools/testing/selftests/bpf/progs/verifier_arena_large.c
77
if (*(page2 - PAGE_SIZE) != 0)
tools/testing/selftests/bpf/progs/verifier_arena_large.c
95
page = base = arena_base(&arena) + 16384 * PAGE_SIZE;
tools/testing/selftests/bpf/uptr_test_common.h
44
__u8 one_page[PAGE_SIZE];
tools/testing/selftests/cgroup/lib/cgroup_util.c
143
char buf[PAGE_SIZE];
tools/testing/selftests/cgroup/lib/cgroup_util.c
173
char buf[PAGE_SIZE];
tools/testing/selftests/cgroup/lib/cgroup_util.c
209
char buf[PAGE_SIZE];
tools/testing/selftests/cgroup/lib/cgroup_util.c
261
char buf[10 * PAGE_SIZE];
tools/testing/selftests/cgroup/lib/cgroup_util.c
316
char buf[10 * PAGE_SIZE] = {0};
tools/testing/selftests/cgroup/lib/cgroup_util.c
341
char buf[PAGE_SIZE];
tools/testing/selftests/cgroup/lib/cgroup_util.c
551
char buf[4 * PAGE_SIZE];
tools/testing/selftests/cgroup/lib/cgroup_util.c
563
char buf[PAGE_SIZE];
tools/testing/selftests/cgroup/lib/cgroup_util.c
590
char buf[PAGE_SIZE];
tools/testing/selftests/cgroup/lib/include/cgroup_util.h
5
#ifndef PAGE_SIZE
tools/testing/selftests/cgroup/test_core.c
90
char buf[PAGE_SIZE];
tools/testing/selftests/cgroup/test_freezer.c
645
char buf[PAGE_SIZE];
tools/testing/selftests/cgroup/test_memcontrol.c
1002
for (ptr = buf; ptr < buf + size; ptr += PAGE_SIZE)
tools/testing/selftests/cgroup/test_memcontrol.c
115
char buf[PAGE_SIZE];
tools/testing/selftests/cgroup/test_memcontrol.c
186
for (ptr = buf; ptr < buf + size; ptr += PAGE_SIZE)
tools/testing/selftests/cgroup/test_memcontrol.c
36
char buf[PAGE_SIZE];
tools/testing/selftests/cgroup/test_memcontrol.c
416
for (ptr = buf; ptr < buf + size; ptr += PAGE_SIZE)
tools/testing/selftests/cgroup/test_memcontrol.c
63
for (ptr = buf; ptr < buf + size; ptr += PAGE_SIZE)
tools/testing/selftests/cgroup/test_memcontrol.c
72
char buf[PAGE_SIZE];
tools/testing/selftests/cgroup/test_zswap.c
238
if (zswpin < MB(24) / PAGE_SIZE) {
tools/testing/selftests/coredump/coredump_socket_protocol_test.c
956
COREDUMP_ACK_SIZE_VER0 + PAGE_SIZE)) {
tools/testing/selftests/coredump/coredump_test.h
13
#ifndef PAGE_SIZE
tools/testing/selftests/coredump/coredump_test_helpers.c
222
if (kernel_size >= PAGE_SIZE) {
tools/testing/selftests/coredump/coredump_test_helpers.c
224
kernel_size, PAGE_SIZE);
tools/testing/selftests/coredump/coredump_test_helpers.c
243
if (PAGE_SIZE <= remaining_size)
tools/testing/selftests/coredump/coredump_test_helpers.c
251
char buffer[PAGE_SIZE];
tools/testing/selftests/coredump/coredump_test_helpers.c
272
char buffer[PAGE_SIZE];
tools/testing/selftests/coredump/coredump_test_helpers.c
35
#ifndef PAGE_SIZE
tools/testing/selftests/coredump/stackdump_test.c
110
char buf[PAGE_SIZE];
tools/testing/selftests/coredump/stackdump_test.c
31
#ifndef PAGE_SIZE
tools/testing/selftests/iommu/iommufd.c
1001
.add_reserved = { .start = PAGE_SIZE * 4,
tools/testing/selftests/iommu/iommufd.c
1002
.length = PAGE_SIZE * 100 },
tools/testing/selftests/iommu/iommufd.c
1015
ranges[0].last = ranges[0].start + PAGE_SIZE * 600;
tools/testing/selftests/iommu/iommufd.c
1017
test_cmd.add_reserved.start = ranges[0].start + PAGE_SIZE;
tools/testing/selftests/iommu/iommufd.c
1018
test_cmd.add_reserved.length = PAGE_SIZE;
tools/testing/selftests/iommu/iommufd.c
1032
ranges[0].last = ranges[0].start + PAGE_SIZE * 600;
tools/testing/selftests/iommu/iommufd.c
1044
.length = PAGE_SIZE,
tools/testing/selftests/iommu/iommufd.c
1047
test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE, self->base_iova);
tools/testing/selftests/iommu/iommufd.c
1051
copy_cmd.dst_iova = self->base_iova + PAGE_SIZE;
tools/testing/selftests/iommu/iommufd.c
1067
.add_reserved = { .start = PAGE_SIZE, .length = PAGE_SIZE },
tools/testing/selftests/iommu/iommufd.c
1108
EXPECT_EQ(PAGE_SIZE - 1, ranges[0].last);
tools/testing/selftests/iommu/iommufd.c
1109
EXPECT_EQ(PAGE_SIZE * 2, ranges[1].start);
tools/testing/selftests/iommu/iommufd.c
1125
EXPECT_EQ(PAGE_SIZE - 1, ranges[0].last);
tools/testing/selftests/iommu/iommufd.c
1142
.access_pages = { .iova = self->base_iova + PAGE_SIZE,
tools/testing/selftests/iommu/iommufd.c
1143
.length = PAGE_SIZE},
tools/testing/selftests/iommu/iommufd.c
1156
access_cmd.access_pages.uptr = (uintptr_t)buf + PAGE_SIZE;
tools/testing/selftests/iommu/iommufd.c
1193
for (npages = 1; npages < BUFFER_SIZE / PAGE_SIZE; npages++) {
tools/testing/selftests/iommu/iommufd.c
1197
access_cmd.access_pages.length = npages * PAGE_SIZE;
tools/testing/selftests/iommu/iommufd.c
1287
for (access_cmd.access_rw.iova = iova + PAGE_SIZE - 50;
tools/testing/selftests/iommu/iommufd.c
1288
access_cmd.access_rw.iova < iova + PAGE_SIZE + 50;
tools/testing/selftests/iommu/iommufd.c
1476
for (length = 1; length != PAGE_SIZE * 2; length++) {
tools/testing/selftests/iommu/iommufd.c
1507
test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE, MOCK_APERTURE_START);
tools/testing/selftests/iommu/iommufd.c
1512
PAGE_SIZE,
tools/testing/selftests/iommu/iommufd.c
1513
MOCK_APERTURE_START + PAGE_SIZE);
tools/testing/selftests/iommu/iommufd.c
1521
test_ioctl_ioas_map_fixed(buffer + MOCK_PAGE_SIZE, PAGE_SIZE,
tools/testing/selftests/iommu/iommufd.c
1522
MOCK_APERTURE_START + PAGE_SIZE);
tools/testing/selftests/iommu/iommufd.c
1526
test_ioctl_ioas_unmap(MOCK_APERTURE_START + PAGE_SIZE, PAGE_SIZE);
tools/testing/selftests/iommu/iommufd.c
1527
test_ioctl_ioas_unmap(MOCK_APERTURE_START, PAGE_SIZE);
tools/testing/selftests/iommu/iommufd.c
1554
for (iova = MOCK_APERTURE_START - PAGE_SIZE; iova <= last_iova;
tools/testing/selftests/iommu/iommufd.c
1574
size_t buf_size = PAGE_SIZE*4;
tools/testing/selftests/iommu/iommufd.c
1589
size_t buf_size = PAGE_SIZE*4;
tools/testing/selftests/iommu/iommufd.c
1653
self->mmap_buf_size = PAGE_SIZE * 8;
tools/testing/selftests/iommu/iommufd.c
1747
test_ioctl_ioas_map(buffer, PAGE_SIZE, &iova);
tools/testing/selftests/iommu/iommufd.c
1748
check_mock_iova(buffer, iova, PAGE_SIZE);
tools/testing/selftests/iommu/iommufd.c
1775
test_ioctl_ioas_map_file(mfd, 0, PAGE_SIZE, &iova);
tools/testing/selftests/iommu/iommufd.c
1776
check_mock_iova(mfd_buffer, iova, PAGE_SIZE);
tools/testing/selftests/iommu/iommufd.c
1806
buf = mmap(0, PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
tools/testing/selftests/iommu/iommufd.c
1816
_test_ioctl_ioas_map(self->fd, self->ioas_id, buf, PAGE_SIZE,
tools/testing/selftests/iommu/iommufd.c
1818
check_mock_iova(buf, iova, PAGE_SIZE);
tools/testing/selftests/iommu/iommufd.c
1819
memset(buf, 1, PAGE_SIZE);
tools/testing/selftests/iommu/iommufd.c
1820
check_mock_iova(buf, iova, PAGE_SIZE);
tools/testing/selftests/iommu/iommufd.c
1821
ASSERT_EQ(0, munmap(buf, PAGE_SIZE));
tools/testing/selftests/iommu/iommufd.c
1864
check_refs(buf + start / PAGE_SIZE * PAGE_SIZE,
tools/testing/selftests/iommu/iommufd.c
1865
end / PAGE_SIZE * PAGE_SIZE -
tools/testing/selftests/iommu/iommufd.c
1866
start / PAGE_SIZE * PAGE_SIZE,
tools/testing/selftests/iommu/iommufd.c
1926
check_refs(buf + start / PAGE_SIZE * PAGE_SIZE,
tools/testing/selftests/iommu/iommufd.c
1927
end / PAGE_SIZE * PAGE_SIZE -
tools/testing/selftests/iommu/iommufd.c
1928
start / PAGE_SIZE * PAGE_SIZE,
tools/testing/selftests/iommu/iommufd.c
2155
rc = posix_memalign(&self->bitmap, PAGE_SIZE, size + PAGE_SIZE);
tools/testing/selftests/iommu/iommufd.c
2158
assert((uintptr_t)self->bitmap % PAGE_SIZE == 0);
tools/testing/selftests/iommu/iommufd.c
2396
.size = PAGE_SIZE,
tools/testing/selftests/iommu/iommufd.c
2414
.size = PAGE_SIZE,
tools/testing/selftests/iommu/iommufd.c
2691
unsigned long pages_iova[BUFFER_SIZE / PAGE_SIZE];
tools/testing/selftests/iommu/iommufd.c
2714
MOCK_APERTURE_START + i * PAGE_SIZE;
tools/testing/selftests/iommu/iommufd.c
2715
map_cmd.vaddr = (uintptr_t)buffer + i * PAGE_SIZE;
tools/testing/selftests/iommu/iommufd.c
2716
map_cmd.size = PAGE_SIZE;
tools/testing/selftests/iommu/iommufd.c
2722
unmap_cmd.size = PAGE_SIZE;
tools/testing/selftests/iommu/iommufd.c
2931
data.out_mmap_offset + PAGE_SIZE);
tools/testing/selftests/iommu/iommufd.c
2933
data.out_mmap_offset + PAGE_SIZE * 2);
tools/testing/selftests/iommu/iommufd.c
301
test_ioctl_ioas_map(buffer, PAGE_SIZE, &iova);
tools/testing/selftests/iommu/iommufd.c
303
test_ioctl_ioas_unmap(iova, PAGE_SIZE);
tools/testing/selftests/iommu/iommufd.c
306
test_ioctl_ioas_map_file(mfd, 0, PAGE_SIZE, &iova);
tools/testing/selftests/iommu/iommufd.c
312
int nlock = PAGE_SIZE / 1024;
tools/testing/selftests/iommu/iommufd.c
3174
IOMMU_HW_QUEUE_TYPE_DEFAULT, 0, iova, PAGE_SIZE,
tools/testing/selftests/iommu/iommufd.c
3181
PAGE_SIZE, &hw_queue_id[0]);
tools/testing/selftests/iommu/iommufd.c
3184
0, iova, PAGE_SIZE, &hw_queue_id[0]);
tools/testing/selftests/iommu/iommufd.c
3187
test_ioctl_ioas_map(buffer, PAGE_SIZE, &iova);
tools/testing/selftests/iommu/iommufd.c
3188
test_ioctl_ioas_map(buffer + PAGE_SIZE, PAGE_SIZE, &iova2);
tools/testing/selftests/iommu/iommufd.c
3192
iova, PAGE_SIZE, &hw_queue_id[0]);
tools/testing/selftests/iommu/iommufd.c
3194
IOMMU_TEST_HW_QUEUE_MAX, iova, PAGE_SIZE,
tools/testing/selftests/iommu/iommufd.c
3199
iova, PAGE_SIZE, &hw_queue_id[0]);
tools/testing/selftests/iommu/iommufd.c
3202
0, iova, PAGE_SIZE, &hw_queue_id[0]);
tools/testing/selftests/iommu/iommufd.c
3204
test_err_ioctl_ioas_unmap(EBUSY, iova, PAGE_SIZE);
tools/testing/selftests/iommu/iommufd.c
3206
test_ioctl_ioas_unmap(iova2, PAGE_SIZE);
tools/testing/selftests/iommu/iommufd.c
3210
iova + PAGE_SIZE / 2, PAGE_SIZE / 2,
tools/testing/selftests/iommu/iommufd.c
3219
test_ioctl_ioas_unmap(iova, PAGE_SIZE);
tools/testing/selftests/iommu/iommufd.c
40
PAGE_SIZE = sysconf(_SC_PAGE_SIZE);
tools/testing/selftests/iommu/iommufd.c
43
BUFFER_SIZE = PAGE_SIZE * 16;
tools/testing/selftests/iommu/iommufd.c
732
test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE, self->base_iova);
tools/testing/selftests/iommu/iommufd.c
746
test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE,
tools/testing/selftests/iommu/iommufd.c
747
self->base_iova + i * PAGE_SIZE);
tools/testing/selftests/iommu/iommufd.c
815
test_err_ioctl_ioas_unmap(ENOENT, i * PAGE_SIZE, PAGE_SIZE);
tools/testing/selftests/iommu/iommufd.c
819
test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE,
tools/testing/selftests/iommu/iommufd.c
820
self->base_iova + i * PAGE_SIZE);
tools/testing/selftests/iommu/iommufd.c
822
test_ioctl_ioas_unmap(self->base_iova + i * PAGE_SIZE,
tools/testing/selftests/iommu/iommufd.c
823
PAGE_SIZE);
tools/testing/selftests/iommu/iommufd.c
826
test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE * 2,
tools/testing/selftests/iommu/iommufd.c
827
self->base_iova + 16 * PAGE_SIZE);
tools/testing/selftests/iommu/iommufd.c
828
test_err_ioctl_ioas_unmap(ENOENT, self->base_iova + 16 * PAGE_SIZE,
tools/testing/selftests/iommu/iommufd.c
829
PAGE_SIZE);
tools/testing/selftests/iommu/iommufd.c
830
test_err_ioctl_ioas_unmap(ENOENT, self->base_iova + 17 * PAGE_SIZE,
tools/testing/selftests/iommu/iommufd.c
831
PAGE_SIZE);
tools/testing/selftests/iommu/iommufd.c
834
test_err_ioctl_ioas_map_fixed(EEXIST, buffer, PAGE_SIZE * 2,
tools/testing/selftests/iommu/iommufd.c
835
self->base_iova + 16 * PAGE_SIZE);
tools/testing/selftests/iommu/iommufd.c
836
test_err_ioctl_ioas_map_fixed(EEXIST, buffer, PAGE_SIZE,
tools/testing/selftests/iommu/iommufd.c
837
self->base_iova + 16 * PAGE_SIZE);
tools/testing/selftests/iommu/iommufd.c
838
test_err_ioctl_ioas_map_fixed(EEXIST, buffer, PAGE_SIZE,
tools/testing/selftests/iommu/iommufd.c
839
self->base_iova + 17 * PAGE_SIZE);
tools/testing/selftests/iommu/iommufd.c
840
test_err_ioctl_ioas_map_fixed(EEXIST, buffer, PAGE_SIZE * 2,
tools/testing/selftests/iommu/iommufd.c
841
self->base_iova + 15 * PAGE_SIZE);
tools/testing/selftests/iommu/iommufd.c
842
test_err_ioctl_ioas_map_fixed(EEXIST, buffer, PAGE_SIZE * 3,
tools/testing/selftests/iommu/iommufd.c
843
self->base_iova + 15 * PAGE_SIZE);
tools/testing/selftests/iommu/iommufd.c
858
self->base_iova += 4 * PAGE_SIZE;
tools/testing/selftests/iommu/iommufd.c
861
test_ioctl_ioas_map_fixed(buffer, 8 * PAGE_SIZE,
tools/testing/selftests/iommu/iommufd.c
862
self->base_iova + i * 16 * PAGE_SIZE);
tools/testing/selftests/iommu/iommufd.c
865
test_err_ioctl_ioas_unmap(ENOENT, self->base_iova - 4 * PAGE_SIZE,
tools/testing/selftests/iommu/iommufd.c
866
8 * PAGE_SIZE);
tools/testing/selftests/iommu/iommufd.c
868
self->base_iova + 3 * 16 * PAGE_SIZE +
tools/testing/selftests/iommu/iommufd.c
869
8 * PAGE_SIZE - 4 * PAGE_SIZE,
tools/testing/selftests/iommu/iommufd.c
870
8 * PAGE_SIZE);
tools/testing/selftests/iommu/iommufd.c
874
self->base_iova - 4 * PAGE_SIZE,
tools/testing/selftests/iommu/iommufd.c
875
3 * 16 * PAGE_SIZE + 8 * PAGE_SIZE +
tools/testing/selftests/iommu/iommufd.c
876
4 * PAGE_SIZE,
tools/testing/selftests/iommu/iommufd.c
878
ASSERT_EQ(32 * PAGE_SIZE, unmap_len);
tools/testing/selftests/iommu/iommufd.c
887
.add_reserved = { .start = PAGE_SIZE * 4,
tools/testing/selftests/iommu/iommufd.c
888
.length = PAGE_SIZE * 100 },
tools/testing/selftests/iommu/iommufd.c
902
test_ioctl_ioas_map(buffer, PAGE_SIZE, &iovas[i]);
tools/testing/selftests/iommu/iommufd.c
904
test_ioctl_ioas_unmap(iovas[i], PAGE_SIZE);
tools/testing/selftests/iommu/iommufd.c
908
size_t length = PAGE_SIZE * (i + 1);
tools/testing/selftests/iommu/iommufd.c
919
test_ioctl_ioas_unmap(iovas[i], PAGE_SIZE * (i + 1));
tools/testing/selftests/iommu/iommufd.c
926
size_t length = PAGE_SIZE * (i + 1);
tools/testing/selftests/iommu/iommufd.c
937
test_ioctl_ioas_unmap(iovas[i], PAGE_SIZE * (i + 1));
tools/testing/selftests/iommu/iommufd.c
940
ranges[0].start = PAGE_SIZE;
tools/testing/selftests/iommu/iommufd.c
941
ranges[0].last = PAGE_SIZE * 600;
tools/testing/selftests/iommu/iommufd.c
947
ranges[0].start = MOCK_APERTURE_START + PAGE_SIZE;
tools/testing/selftests/iommu/iommufd.c
948
ranges[0].last = MOCK_APERTURE_START + PAGE_SIZE * 600 - 1;
tools/testing/selftests/iommu/iommufd.c
950
ranges[0].start = PAGE_SIZE * 200;
tools/testing/selftests/iommu/iommufd.c
951
ranges[0].last = PAGE_SIZE * 600 - 1;
tools/testing/selftests/iommu/iommufd.c
955
size_t length = PAGE_SIZE * (i + 1);
tools/testing/selftests/iommu/iommufd.c
965
test_ioctl_ioas_unmap(iovas[i], PAGE_SIZE * (i + 1));
tools/testing/selftests/iommu/iommufd.c
980
if (PAGE_SIZE == 4096) {
tools/testing/selftests/iommu/iommufd_fail_nth.c
44
PAGE_SIZE = sysconf(_SC_PAGE_SIZE);
tools/testing/selftests/iommu/iommufd_fail_nth.c
652
if (_test_ioctl_ioas_map(self->fd, ioas_id, buffer, PAGE_SIZE, &iova,
tools/testing/selftests/iommu/iommufd_fail_nth.c
657
if (_test_ioctl_ioas_map(self->fd, ioas_id2, buffer, PAGE_SIZE, &iova,
tools/testing/selftests/iommu/iommufd_fail_nth.c
702
PAGE_SIZE, &hw_queue_id))
tools/testing/selftests/iommu/iommufd_utils.h
53
static unsigned long PAGE_SIZE;
tools/testing/selftests/kvm/include/s390/processor.h
27
#define PAGE_MASK (~(PAGE_SIZE - 1))
tools/testing/selftests/kvm/include/x86/processor.h
370
#define PAGE_MASK (~(PAGE_SIZE-1) & PHYSICAL_PAGE_MASK)
tools/testing/selftests/kvm/include/x86/smm.h
9
#define SMRAM_PAGES (SMRAM_SIZE / PAGE_SIZE)
tools/testing/selftests/kvm/lib/s390/processor.c
17
TEST_ASSERT(vm->page_size == PAGE_SIZE, "Unsupported page size: 0x%x",
tools/testing/selftests/kvm/lib/s390/processor.c
171
TEST_ASSERT(vm->page_size == PAGE_SIZE, "Unsupported page size: 0x%x",
tools/testing/selftests/kvm/lib/s390/processor.c
94
TEST_ASSERT(vm->page_size == PAGE_SIZE, "Unsupported page size: 0x%x",
tools/testing/selftests/kvm/lib/x86/processor.c
335
nr_bytes / PAGE_SIZE);
tools/testing/selftests/kvm/lib/x86/processor.c
842
TEST_ASSERT(IS_ALIGNED(stack_vaddr, PAGE_SIZE),
tools/testing/selftests/kvm/pre_fault_memory_test.c
16
#define TEST_SIZE (SZ_2M + PAGE_SIZE)
tools/testing/selftests/kvm/pre_fault_memory_test.c
17
#define TEST_NPAGES (TEST_SIZE / PAGE_SIZE)
tools/testing/selftests/kvm/pre_fault_memory_test.c
194
pre_fault_memory(vcpu, gpa, SZ_2M, PAGE_SIZE * 2, PAGE_SIZE, private);
tools/testing/selftests/kvm/pre_fault_memory_test.c
195
pre_fault_memory(vcpu, gpa, TEST_SIZE, PAGE_SIZE, PAGE_SIZE, private);
tools/testing/selftests/kvm/pre_fault_memory_test.c
26
uint64_t *src = (uint64_t *)(base_gva + i * PAGE_SIZE);
tools/testing/selftests/kvm/s390/cmma_test.c
26
#define TEST_DATA_START_GFN PAGE_SIZE
tools/testing/selftests/kvm/s390/cmma_test.c
30
#define TEST_DATA_TWO_START_GFN (2 * PAGE_SIZE)
tools/testing/selftests/kvm/s390/keyop.c
74
.guest_addr = BUF_START_ADDR + page_idx * PAGE_SIZE,
tools/testing/selftests/kvm/s390/keyop.c
98
r = ioctl(vcpu->fd, KVM_S390_VCPU_FAULT, BUF_START_ADDR + i * PAGE_SIZE);
tools/testing/selftests/kvm/s390/memop.c
1023
rv = ERR_MOP(t.vm, INVALID, WRITE, mem1, PAGE_SIZE, GADDR(0));
tools/testing/selftests/kvm/s390/memop.c
233
static uint8_t __aligned(PAGE_SIZE) mem1[65536];
tools/testing/selftests/kvm/s390/memop.c
234
static uint8_t __aligned(PAGE_SIZE) mem2[65536];
tools/testing/selftests/kvm/s390/memop.c
409
for (i = _addr & PAGE_MASK; i < _addr + len; i += PAGE_SIZE) {
tools/testing/selftests/kvm/s390/memop.c
771
set_storage_key_range(mem1, PAGE_SIZE, 0x18);
tools/testing/selftests/kvm/s390/memop.c
772
set_storage_key_range(mem1 + PAGE_SIZE, sizeof(mem1) - PAGE_SIZE, 0x98);
tools/testing/selftests/kvm/s390/memop.c
858
const uint64_t last_page_addr = -PAGE_SIZE;
tools/testing/selftests/kvm/s390/memop.c
866
set_storage_key_range(0, PAGE_SIZE, 0x18);
tools/testing/selftests/kvm/s390/memop.c
867
set_storage_key_range((void *)last_page_addr, PAGE_SIZE, 0x0);
tools/testing/selftests/kvm/s390/memop.c
872
for (i = 0; i < PAGE_SIZE; i++)
tools/testing/selftests/kvm/s390/memop.c
883
guest_0_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, 0);
tools/testing/selftests/kvm/s390/memop.c
884
guest_last_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, last_page_addr);
tools/testing/selftests/kvm/s390/memop.c
897
MOP(t.vcpu, LOGICAL, WRITE, mem1, PAGE_SIZE, GADDR_V(mem1));
tools/testing/selftests/kvm/s390/memop.c
907
MOP(t.vcpu, LOGICAL, WRITE, mem1, 2 * PAGE_SIZE, GADDR_V(guest_last_page));
tools/testing/selftests/kvm/s390/memop.c
909
CHECK_N_DO(MOP, t.vcpu, LOGICAL, READ, mem2, PAGE_SIZE + 2048,
tools/testing/selftests/kvm/s390/memop.c
922
guest_0_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, 0);
tools/testing/selftests/kvm/s390/memop.c
923
guest_last_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, last_page_addr);
tools/testing/selftests/kvm/s390/memop.c
943
guest_0_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, 0);
tools/testing/selftests/kvm/s390/memop.c
944
guest_last_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, last_page_addr);
tools/testing/selftests/kvm/s390/memop.c
959
CHECK_N_DO(ERR_PROT_MOP, t.vcpu, LOGICAL, READ, mem2, PAGE_SIZE + 2048 + 1,
tools/testing/selftests/kvm/s390/tprot.c
153
skip = tests[*i].addr < (void *)PAGE_SIZE &&
tools/testing/selftests/kvm/s390/tprot.c
17
static __aligned(PAGE_SIZE) uint8_t pages[2][PAGE_SIZE];
tools/testing/selftests/kvm/s390/tprot.c
219
mprotect(addr_gva2hva(vm, (vm_vaddr_t)pages), PAGE_SIZE * 2, PROT_READ);
tools/testing/selftests/kvm/s390/tprot.c
222
guest_0_page = vm_vaddr_alloc(vm, PAGE_SIZE, 0);
tools/testing/selftests/kvm/s390/tprot.c
232
mprotect(addr_gva2hva(vm, (vm_vaddr_t)0), PAGE_SIZE, PROT_READ);
tools/testing/selftests/kvm/s390/ucontrol_test.c
153
self->sie_block = __kvm_mmap(PAGE_SIZE, PROT_READ | PROT_WRITE,
tools/testing/selftests/kvm/s390/ucontrol_test.c
187
kvm_munmap(self->sie_block, PAGE_SIZE);
tools/testing/selftests/kvm/s390/ucontrol_test.c
476
memcpy((void *)self->code_hva, &test_mem_asm, PAGE_SIZE);
tools/testing/selftests/kvm/s390/ucontrol_test.c
541
memcpy((void *)self->code_hva, &test_gprs_asm, PAGE_SIZE);
tools/testing/selftests/kvm/s390/ucontrol_test.c
581
memcpy((void *)self->code_hva, &test_skey_asm, PAGE_SIZE);
tools/testing/selftests/kvm/s390/ucontrol_test.c
631
static char uc_flic_b[PAGE_SIZE];
tools/testing/selftests/kvm/s390/ucontrol_test.c
649
.attr = PAGE_SIZE,
tools/testing/selftests/kvm/set_memory_region_test.c
500
for (i = 1; i < PAGE_SIZE; i++)
tools/testing/selftests/kvm/x86/amx_test.c
274
xstate = vm_vaddr_alloc_pages(vm, DIV_ROUND_UP(XSAVE_SIZE, PAGE_SIZE));
tools/testing/selftests/kvm/x86/amx_test.c
275
memset(addr_gva2hva(vm, xstate), 0, PAGE_SIZE * DIV_ROUND_UP(XSAVE_SIZE, PAGE_SIZE));
tools/testing/selftests/kvm/x86/amx_test.c
28
#define XSAVE_SIZE ((NUM_TILES * TILE_SIZE) + PAGE_SIZE)
tools/testing/selftests/kvm/x86/hyperv_features.c
97
output = pgs_gpa + PAGE_SIZE;
tools/testing/selftests/kvm/x86/hyperv_ipi.c
105
hyperv_hypercall(HVCALL_SEND_IPI, pgs_gpa, pgs_gpa + PAGE_SIZE);
tools/testing/selftests/kvm/x86/hyperv_ipi.c
119
memset(hcall_page, 0, PAGE_SIZE);
tools/testing/selftests/kvm/x86/hyperv_ipi.c
125
pgs_gpa, pgs_gpa + PAGE_SIZE);
tools/testing/selftests/kvm/x86/hyperv_ipi.c
141
memset(hcall_page, 0, PAGE_SIZE);
tools/testing/selftests/kvm/x86/hyperv_ipi.c
147
pgs_gpa, pgs_gpa + PAGE_SIZE);
tools/testing/selftests/kvm/x86/hyperv_ipi.c
163
memset(hcall_page, 0, PAGE_SIZE);
tools/testing/selftests/kvm/x86/hyperv_ipi.c
170
pgs_gpa, pgs_gpa + PAGE_SIZE);
tools/testing/selftests/kvm/x86/hyperv_ipi.c
186
memset(hcall_page, 0, PAGE_SIZE);
tools/testing/selftests/kvm/x86/hyperv_ipi.c
189
hyperv_hypercall(HVCALL_SEND_IPI_EX, pgs_gpa, pgs_gpa + PAGE_SIZE);
tools/testing/selftests/kvm/x86/hyperv_tlb_flush.c
127
void *exp_page = addr + PAGE_SIZE * NTEST_PAGES;
tools/testing/selftests/kvm/x86/hyperv_tlb_flush.c
162
memset((void *)data->hcall_gva, 0, PAGE_SIZE);
tools/testing/selftests/kvm/x86/hyperv_tlb_flush.c
220
hcall_gpa + PAGE_SIZE);
tools/testing/selftests/kvm/x86/hyperv_tlb_flush.c
234
hcall_gpa, hcall_gpa + PAGE_SIZE);
tools/testing/selftests/kvm/x86/hyperv_tlb_flush.c
247
hcall_gpa + PAGE_SIZE);
tools/testing/selftests/kvm/x86/hyperv_tlb_flush.c
261
hcall_gpa, hcall_gpa + PAGE_SIZE);
tools/testing/selftests/kvm/x86/hyperv_tlb_flush.c
277
hcall_gpa, hcall_gpa + PAGE_SIZE);
tools/testing/selftests/kvm/x86/hyperv_tlb_flush.c
295
hcall_gpa, hcall_gpa + PAGE_SIZE);
tools/testing/selftests/kvm/x86/hyperv_tlb_flush.c
312
hcall_gpa, hcall_gpa + PAGE_SIZE);
tools/testing/selftests/kvm/x86/hyperv_tlb_flush.c
333
hcall_gpa, hcall_gpa + PAGE_SIZE);
tools/testing/selftests/kvm/x86/hyperv_tlb_flush.c
346
hcall_gpa, hcall_gpa + PAGE_SIZE);
tools/testing/selftests/kvm/x86/hyperv_tlb_flush.c
361
hcall_gpa, hcall_gpa + PAGE_SIZE);
tools/testing/selftests/kvm/x86/hyperv_tlb_flush.c
602
memset(addr_gva2hva(vm, data->hcall_gva), 0x0, 2 * PAGE_SIZE);
tools/testing/selftests/kvm/x86/hyperv_tlb_flush.c
611
memset(addr_gva2hva(vm, data->test_pages + PAGE_SIZE * i),
tools/testing/selftests/kvm/x86/hyperv_tlb_flush.c
612
(u8)(i + 1), PAGE_SIZE);
tools/testing/selftests/kvm/x86/hyperv_tlb_flush.c
620
gva = vm_vaddr_unused_gap(vm, NTEST_PAGES * PAGE_SIZE, KVM_UTIL_MIN_VADDR);
tools/testing/selftests/kvm/x86/hyperv_tlb_flush.c
622
pte = vm_get_pte(vm, data->test_pages + i * PAGE_SIZE);
tools/testing/selftests/kvm/x86/hyperv_tlb_flush.c
624
virt_pg_map(vm, gva + PAGE_SIZE * i, gpa & PAGE_MASK);
tools/testing/selftests/kvm/x86/hyperv_tlb_flush.c
75
void *exp_page = (void *)data->test_pages + PAGE_SIZE * NTEST_PAGES;
tools/testing/selftests/kvm/x86/nested_dirty_log_test.c
146
vm_vaddr_t gva = arg & ~(PAGE_SIZE - 1);
tools/testing/selftests/kvm/x86/nested_dirty_log_test.c
249
tdp_map(vm, TEST_ALIAS_GPA(0), TEST_GPA(0), PAGE_SIZE);
tools/testing/selftests/kvm/x86/nested_dirty_log_test.c
250
tdp_map(vm, TEST_ALIAS_GPA(1), TEST_GPA(1), PAGE_SIZE);
tools/testing/selftests/kvm/x86/nested_dirty_log_test.c
262
memset(TEST_HVA(vm, 0), 0xaa, TEST_MEM_PAGES * PAGE_SIZE);
tools/testing/selftests/kvm/x86/nested_dirty_log_test.c
34
#define TEST_GUEST_ADDR(base, idx) ((base) + (idx) * PAGE_SIZE)
tools/testing/selftests/kvm/x86/nested_vmsave_vmload_test.c
164
tdp_map(vm, TEST_VMCB_L2_GPA, TEST_VMCB_L1_GPA(1), PAGE_SIZE);
tools/testing/selftests/kvm/x86/nested_vmsave_vmload_test.c
24
#define TEST_GUEST_ADDR(idx) (TEST_MEM_BASE + (idx) * PAGE_SIZE)
tools/testing/selftests/kvm/x86/nx_huge_pages_test.c
50
uint64_t hpage_2 = hpage_1 + (PAGE_SIZE * 512);
tools/testing/selftests/kvm/x86/nx_huge_pages_test.c
51
uint64_t hpage_3 = hpage_2 + (PAGE_SIZE * 512);
tools/testing/selftests/kvm/x86/private_mem_conversions_test.c
115
GUEST_STAGE(0, PAGE_SIZE),
tools/testing/selftests/kvm/x86/private_mem_conversions_test.c
117
GUEST_STAGE(PAGE_SIZE, PAGE_SIZE),
tools/testing/selftests/kvm/x86/private_mem_conversions_test.c
118
GUEST_STAGE(PAGE_SIZE, SZ_2M),
tools/testing/selftests/kvm/x86/private_mem_conversions_test.c
119
GUEST_STAGE(SZ_2M, PAGE_SIZE),
tools/testing/selftests/kvm/x86/private_mem_conversions_test.c
160
if (size > PAGE_SIZE) {
tools/testing/selftests/kvm/x86/private_mem_conversions_test.c
161
memset((void *)gpa, p2, PAGE_SIZE);
tools/testing/selftests/kvm/x86/private_mem_conversions_test.c
183
for (j = 0; j < size; j += PAGE_SIZE) {
tools/testing/selftests/kvm/x86/private_mem_conversions_test.c
185
guest_map_shared(gpa + j, PAGE_SIZE, do_fallocate);
tools/testing/selftests/kvm/x86/private_mem_conversions_test.c
186
guest_sync_shared(gpa + j, PAGE_SIZE, p1, p3);
tools/testing/selftests/kvm/x86/private_mem_conversions_test.c
188
memcmp_g(gpa + j, p3, PAGE_SIZE);
tools/testing/selftests/kvm/x86/private_mem_conversions_test.c
190
guest_sync_private(gpa + j, PAGE_SIZE, p1);
tools/testing/selftests/kvm/x86/private_mem_conversions_test.c
27
#define PER_CPU_DATA_SIZE ((uint64_t)(SZ_2M + PAGE_SIZE))
tools/testing/selftests/kvm/x86/private_mem_conversions_test.c
293
uint64_t size = run->hypercall.args[1] * PAGE_SIZE;
tools/testing/selftests/kvm/x86/private_mem_kvm_exits_test.c
17
#define EXITS_TEST_SIZE (EXITS_TEST_NPAGES * PAGE_SIZE)
tools/testing/selftests/kvm/x86/sev_smoke_test.c
118
gva = vm_vaddr_alloc_shared(vm, PAGE_SIZE, KVM_UTIL_MIN_VADDR,
tools/testing/selftests/kvm/x86/sev_smoke_test.c
137
memset(hva, 0, PAGE_SIZE);
tools/testing/selftests/kvm/x86/smaller_maxphyaddr_emulation_test.c
19
#define MEM_REGION_SIZE PAGE_SIZE
tools/testing/selftests/kvm/x86/smaller_maxphyaddr_emulation_test.c
67
MEM_REGION_SIZE / PAGE_SIZE, 0);
tools/testing/selftests/kvm/x86/smaller_maxphyaddr_emulation_test.c
68
gpa = vm_phy_pages_alloc(vm, MEM_REGION_SIZE / PAGE_SIZE,
tools/testing/selftests/kvm/x86/smaller_maxphyaddr_emulation_test.c
73
memset(hva, 0, PAGE_SIZE);
tools/testing/selftests/kvm/x86/state_test.c
144
uint8_t buffer[PAGE_SIZE];
tools/testing/selftests/kvm/x86/userspace_io_test.c
88
memset((void *)run + run->io.data_offset, 0xaa, PAGE_SIZE);
tools/testing/selftests/kvm/x86/xen_shinfo_test.c
1067
for (runstate_addr = SHINFO_REGION_GPA + PAGE_SIZE + PAGE_SIZE - sizeof(*rs) - 4;
tools/testing/selftests/kvm/x86/xen_shinfo_test.c
1068
runstate_addr < SHINFO_REGION_GPA + PAGE_SIZE + PAGE_SIZE + 4; runstate_addr++) {
tools/testing/selftests/kvm/x86/xen_shinfo_test.c
22
#define DUMMY_REGION_GPA (SHINFO_REGION_GPA + (3 * PAGE_SIZE))
tools/testing/selftests/kvm/x86/xen_shinfo_test.c
25
#define DUMMY_REGION_GPA_2 (SHINFO_REGION_GPA + (4 * PAGE_SIZE))
tools/testing/selftests/kvm/x86/xen_shinfo_test.c
30
#define PVTIME_ADDR (SHINFO_REGION_GPA + PAGE_SIZE)
tools/testing/selftests/kvm/x86/xen_shinfo_test.c
31
#define RUNSTATE_ADDR (SHINFO_REGION_GPA + PAGE_SIZE + PAGE_SIZE - 15)
tools/testing/selftests/kvm/x86/xen_shinfo_test.c
35
#define RUNSTATE_VADDR (SHINFO_REGION_GVA + PAGE_SIZE + PAGE_SIZE - 15)
tools/testing/selftests/kvm/x86/xen_shinfo_test.c
399
.u.shared_info.gfn = SHINFO_REGION_GPA / PAGE_SIZE
tools/testing/selftests/kvm/x86/xen_shinfo_test.c
504
ha.u.shared_info.gfn = SHINFO_ADDR / PAGE_SIZE;
tools/testing/selftests/kvm/x86/xen_shinfo_test.c
515
void *m = mmap(shinfo, PAGE_SIZE, PROT_READ|PROT_WRITE, MAP_FIXED|MAP_PRIVATE, zero_fd, 0);
tools/testing/selftests/kvm/x86/xen_vmcall_test.c
57
u64 msrval = HCALL_REGION_GPA + PAGE_SIZE + 1;
tools/testing/selftests/kvm/x86/xen_vmcall_test.c
74
"r"(HCALL_REGION_GPA + PAGE_SIZE),
tools/testing/selftests/mm/page_frag/page_frag_test.c
133
if (test_alloc_len > PAGE_SIZE || test_alloc_len <= 0 ||
tools/testing/selftests/mm/pkey-powerpc.h
137
ret = mprotect_pkey((void *)ptr, PAGE_SIZE, prot, pkey);
tools/testing/selftests/mm/protection_keys.c
1059
iov.iov_len = PAGE_SIZE;
tools/testing/selftests/mm/protection_keys.c
1103
err = sys_mprotect_pkey(ptr, PAGE_SIZE, PROT_READ, i);
tools/testing/selftests/mm/protection_keys.c
1115
err = sys_mprotect_pkey(ptr, PAGE_SIZE, PROT_READ, bad_pkey);
tools/testing/selftests/mm/protection_keys.c
1315
int *plain_ptr = ALIGN_PTR_UP(plain_ptr_unaligned, PAGE_SIZE);
tools/testing/selftests/mm/protection_keys.c
1383
p1 = ALIGN_PTR_UP(&lots_o_noops_around_write, PAGE_SIZE);
tools/testing/selftests/mm/protection_keys.c
1389
p1 += PAGE_SIZE;
tools/testing/selftests/mm/protection_keys.c
1395
madvise(p1, PAGE_SIZE, MADV_DONTNEED);
tools/testing/selftests/mm/protection_keys.c
1412
ret = mprotect_pkey(p1, PAGE_SIZE, PROT_EXEC, (u64)pkey);
tools/testing/selftests/mm/protection_keys.c
1421
madvise(p1, PAGE_SIZE, MADV_DONTNEED);
tools/testing/selftests/mm/protection_keys.c
1428
ret = mprotect_pkey(p1, PAGE_SIZE, PROT_EXEC | PROT_READ, (u64)pkey);
tools/testing/selftests/mm/protection_keys.c
1447
ret = mprotect(p1, PAGE_SIZE, PROT_EXEC);
tools/testing/selftests/mm/protection_keys.c
1460
madvise(p1, PAGE_SIZE, MADV_DONTNEED);
tools/testing/selftests/mm/protection_keys.c
1471
ret = mprotect(p1, PAGE_SIZE, PROT_NONE);
tools/testing/selftests/mm/protection_keys.c
1474
ret = mprotect(p1, PAGE_SIZE, PROT_READ|PROT_EXEC);
tools/testing/selftests/mm/protection_keys.c
160
__attribute__((__aligned__(PAGE_SIZE)))
tools/testing/selftests/mm/protection_keys.c
1683
int size = PAGE_SIZE;
tools/testing/selftests/mm/protection_keys.c
1736
ptr = malloc_pkey(PAGE_SIZE, prot, pkey);
tools/testing/selftests/mm/protection_keys.c
1772
int size = PAGE_SIZE;
tools/testing/selftests/mm/protection_keys.c
671
ret = mprotect_pkey((void *)ptr, PAGE_SIZE, prot, pkey);
tools/testing/selftests/powerpc/benchmarks/mmap_bench.c
18
#define CHUNK_COUNT (MEMSIZE/PAGE_SIZE)
tools/testing/selftests/powerpc/syscalls/rtas_filter.c
100
(kregion->size > (PAGE_SIZE * MAX_PAGES))) {
tools/testing/selftests/proc/proc-pid-vm.c
351
VADDR, VADDR + PAGE_SIZE,
tools/testing/selftests/proc/proc-pid-vm.c
431
VADDR, VADDR + PAGE_SIZE);
tools/testing/selftests/proc/proc-pid-vm.c
527
assert(q.vma_end == VADDR + PAGE_SIZE);
tools/testing/selftests/proc/proc-pid-vm.c
528
assert(q.vma_page_size == PAGE_SIZE);
tools/testing/selftests/proc/proc-pid-vm.c
562
assert(q.vma_end == VADDR + PAGE_SIZE);
tools/testing/selftests/proc/proc-pid-vm.c
567
q.query_addr = VADDR + PAGE_SIZE; /* point right after the VMA */
tools/testing/selftests/proc/proc-self-map-files-001.c
49
const unsigned int PAGE_SIZE = sysconf(_SC_PAGESIZE);
tools/testing/selftests/proc/proc-self-map-files-001.c
58
p = mmap(NULL, PAGE_SIZE, PROT_NONE, MAP_PRIVATE|MAP_FILE, fd, 0);
tools/testing/selftests/proc/proc-self-map-files-001.c
63
b = (unsigned long)p + PAGE_SIZE;
tools/testing/selftests/proc/proc-self-map-files-002.c
49
const int PAGE_SIZE = sysconf(_SC_PAGESIZE);
tools/testing/selftests/proc/proc-self-map-files-002.c
64
for (va = 0; va < va_max; va += PAGE_SIZE) {
tools/testing/selftests/proc/proc-self-map-files-002.c
65
p = mmap((void *)va, PAGE_SIZE, PROT_NONE, MAP_PRIVATE|MAP_FILE|MAP_FIXED, fd, 0);
tools/testing/selftests/proc/proc-self-map-files-002.c
75
b = (unsigned long)p + PAGE_SIZE;
tools/testing/selftests/proc/thread-self.c
49
const int PAGE_SIZE = sysconf(_SC_PAGESIZE);
tools/testing/selftests/proc/thread-self.c
56
stack = mmap(NULL, 2 * PAGE_SIZE, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
tools/testing/selftests/proc/thread-self.c
59
pid = clone(f, stack + PAGE_SIZE, CLONE_THREAD|CLONE_SIGHAND|CLONE_VM, (void *)1);
tools/testing/selftests/ptrace/peeksiginfo.c
101
munmap(addr_rw, 2 * PAGE_SIZE);
tools/testing/selftests/ptrace/peeksiginfo.c
35
#ifndef PAGE_SIZE
tools/testing/selftests/ptrace/peeksiginfo.c
54
addr_rw = mmap(NULL, 2 * PAGE_SIZE, PROT_READ | PROT_WRITE,
tools/testing/selftests/ptrace/peeksiginfo.c
61
addr_ro = mmap(addr_rw + PAGE_SIZE, PAGE_SIZE, PROT_READ,
tools/testing/selftests/resctrl/fill_buf.c
112
ret = posix_memalign(&buf, PAGE_SIZE, buf_size);
tools/testing/selftests/sgx/defines.h
12
#define PAGE_MASK (~(PAGE_SIZE - 1))
tools/testing/selftests/sgx/load.c
204
ptr = mmap(NULL, PAGE_SIZE, PROT_READ, MAP_SHARED, fd, 0);
tools/testing/selftests/sgx/load.c
209
munmap(ptr, PAGE_SIZE);
tools/testing/selftests/sgx/load.c
217
ptr = mmap(NULL, PAGE_SIZE, PROT_EXEC, MAP_SHARED, fd, 0);
tools/testing/selftests/sgx/load.c
222
munmap(ptr, PAGE_SIZE);
tools/testing/selftests/sgx/load.c
283
seg->size = (phdr->p_filesz + PAGE_SIZE - 1) & PAGE_MASK;
tools/testing/selftests/sgx/main.c
1005
EXPECT_LT(total_size + PAGE_SIZE, self->encl.encl_size);
tools/testing/selftests/sgx/main.c
1015
addr = mmap((void *)self->encl.encl_base + total_size, PAGE_SIZE,
tools/testing/selftests/sgx/main.c
1045
munmap(addr, PAGE_SIZE);
tools/testing/selftests/sgx/main.c
1056
self->run.tcs = self->encl.encl_base + PAGE_SIZE;
tools/testing/selftests/sgx/main.c
1100
munmap(addr, PAGE_SIZE);
tools/testing/selftests/sgx/main.c
1135
EXPECT_LT(total_size + PAGE_SIZE, self->encl.encl_size);
tools/testing/selftests/sgx/main.c
1146
addr = mmap((void *)self->encl.encl_base + total_size, PAGE_SIZE,
tools/testing/selftests/sgx/main.c
1169
munmap(addr, PAGE_SIZE);
tools/testing/selftests/sgx/main.c
1210
munmap(addr, PAGE_SIZE);
tools/testing/selftests/sgx/main.c
1284
EXPECT_LT(total_size + 3 * PAGE_SIZE, self->encl.encl_size);
tools/testing/selftests/sgx/main.c
1290
addr = mmap((void *)self->encl.encl_base + total_size, 3 * PAGE_SIZE,
tools/testing/selftests/sgx/main.c
1300
tcs = (void *)self->encl.encl_base + total_size + PAGE_SIZE;
tools/testing/selftests/sgx/main.c
1301
ssa = (void *)self->encl.encl_base + total_size + 2 * PAGE_SIZE;
tools/testing/selftests/sgx/main.c
1318
munmap(addr, 3 * PAGE_SIZE);
tools/testing/selftests/sgx/main.c
1363
init_tcs_page_op.ssa = (unsigned long)total_size + 2 * PAGE_SIZE;
tools/testing/selftests/sgx/main.c
1377
modt_ioc.offset = total_size + PAGE_SIZE;
tools/testing/selftests/sgx/main.c
1378
modt_ioc.length = PAGE_SIZE;
tools/testing/selftests/sgx/main.c
1441
modt_ioc.length = 3 * PAGE_SIZE;
tools/testing/selftests/sgx/main.c
1450
EXPECT_EQ(modt_ioc.count, 3 * PAGE_SIZE);
tools/testing/selftests/sgx/main.c
1497
remove_ioc.length = 3 * PAGE_SIZE;
tools/testing/selftests/sgx/main.c
1504
EXPECT_EQ(remove_ioc.count, 3 * PAGE_SIZE);
tools/testing/selftests/sgx/main.c
1553
munmap(addr, 3 * PAGE_SIZE);
tools/testing/selftests/sgx/main.c
1603
encl_get_data_offset(&self->encl) + PAGE_SIZE;
tools/testing/selftests/sgx/main.c
1641
modt_ioc.offset = encl_get_data_offset(&self->encl) + PAGE_SIZE;
tools/testing/selftests/sgx/main.c
1642
modt_ioc.length = PAGE_SIZE;
tools/testing/selftests/sgx/main.c
1658
remove_ioc.offset = encl_get_data_offset(&self->encl) + PAGE_SIZE;
tools/testing/selftests/sgx/main.c
1659
remove_ioc.length = PAGE_SIZE;
tools/testing/selftests/sgx/main.c
1714
encl_get_data_offset(&self->encl) + PAGE_SIZE;
tools/testing/selftests/sgx/main.c
1752
ioc.offset = encl_get_data_offset(&self->encl) + PAGE_SIZE;
tools/testing/selftests/sgx/main.c
1753
ioc.length = PAGE_SIZE;
tools/testing/selftests/sgx/main.c
1829
encl_get_data_offset(&self->encl) + PAGE_SIZE;
tools/testing/selftests/sgx/main.c
1867
ioc.offset = encl_get_data_offset(&self->encl) + PAGE_SIZE;
tools/testing/selftests/sgx/main.c
1868
ioc.length = PAGE_SIZE;
tools/testing/selftests/sgx/main.c
1949
encl_get_data_offset(&self->encl) + PAGE_SIZE;
tools/testing/selftests/sgx/main.c
1953
modt_ioc.offset = encl_get_data_offset(&self->encl) + PAGE_SIZE;
tools/testing/selftests/sgx/main.c
1954
modt_ioc.length = PAGE_SIZE;
tools/testing/selftests/sgx/main.c
1983
remove_ioc.offset = encl_get_data_offset(&self->encl) + PAGE_SIZE;
tools/testing/selftests/sgx/main.c
1984
remove_ioc.length = PAGE_SIZE;
tools/testing/selftests/sgx/main.c
593
self->run.tcs = self->encl.encl_base + PAGE_SIZE;
tools/testing/selftests/sgx/main.c
630
PAGE_SIZE;
tools/testing/selftests/sgx/main.c
666
ret = mprotect((void *)data_start, PAGE_SIZE, PROT_READ);
tools/testing/selftests/sgx/main.c
693
ret = mprotect((void *)data_start, PAGE_SIZE, PROT_READ | PROT_WRITE);
tools/testing/selftests/sgx/main.c
758
ioc.length = PAGE_SIZE;
tools/testing/selftests/sgx/main.c
826
encl_get_data_offset(&self->encl) + PAGE_SIZE;
tools/testing/selftests/sgx/main.c
867
restrict_ioc.offset = encl_get_data_offset(&self->encl) + PAGE_SIZE;
tools/testing/selftests/sgx/main.c
868
restrict_ioc.length = PAGE_SIZE;
tools/testing/selftests/sgx/main.c
918
self->run.tcs = self->encl.encl_base + PAGE_SIZE;
tools/testing/selftests/sgx/sigstruct.c
301
for (offset = 0; offset < end; offset += PAGE_SIZE) {
tools/testing/selftests/vfio/lib/drivers/dsa/registers.h
25
#define IDXD_PORTAL_SIZE PAGE_SIZE
tools/testing/selftests/x86/lam.c
156
p = mmap((void *)HIGH_ADDR, PAGE_SIZE, PROT_READ | PROT_WRITE,
tools/testing/selftests/x86/lam.c
161
munmap(p, PAGE_SIZE);
tools/testing/selftests/x86/lam.c
349
ptr = mmap((void *)test->addr, PAGE_SIZE, PROT_READ | PROT_WRITE,
tools/testing/selftests/x86/lam.c
371
munmap(ptr, PAGE_SIZE);
tools/testing/selftests/x86/lam.c
415
ptr = mmap((void *)ptr_address, PAGE_SIZE, PROT_READ | PROT_WRITE,
tools/testing/selftests/x86/lam.c
430
munmap(ptr, PAGE_SIZE);
tools/testing/selftests/x86/lam.c
469
munmap(ptr, PAGE_SIZE);
tools/testing/selftests/x86/nx_stack.c
138
uc->uc_mcontext.gregs[RIP] = stack_max_addr - PAGE_SIZE;
tools/testing/selftests/x86/nx_stack.c
142
uc->uc_mcontext.gregs[RIP] -= PAGE_SIZE;
tools/testing/selftests/x86/sysret_rip.c
107
if (ip <= (1UL << 47) - PAGE_SIZE) {
tools/testing/selftests/x86/sysret_rip.c
148
test_syscall_fallthrough_to((1UL << 47) - 2*PAGE_SIZE);
tools/testing/selftests/x86/sysret_rip.c
152
test_syscall_fallthrough_to((1UL<<i) - PAGE_SIZE);
tools/testing/selftests/x86/test_mremap_vdso.c
113
unsigned long vdso_size = PAGE_SIZE;
tools/testing/selftests/x86/test_mremap_vdso.c
127
vdso_size += PAGE_SIZE;
tools/testing/selftests/x86/test_shadow_stack.c
574
test_map = mmap(0, PAGE_SIZE, PROT_READ | PROT_WRITE,
tools/testing/selftests/x86/test_shadow_stack.c
588
munmap(cur->mapping, PAGE_SIZE);
tools/testing/selftests/x86/test_shadow_stack.c
594
if (shstk - test_map - PAGE_SIZE != PAGE_SIZE)
tools/testing/selftests/x86/test_shadow_stack.c
609
free_area = mmap(0, PAGE_SIZE * 4, PROT_READ | PROT_WRITE,
tools/testing/selftests/x86/test_shadow_stack.c
611
munmap(free_area, PAGE_SIZE * 4);
tools/testing/selftests/x86/test_shadow_stack.c
614
shstk_start = mmap(free_area, PAGE_SIZE, PROT_READ | PROT_WRITE,
tools/testing/selftests/x86/test_shadow_stack.c
620
test_map = (void *)syscall(__NR_map_shadow_stack, 0, PAGE_SIZE, 0);
tools/testing/selftests/x86/test_shadow_stack.c
633
if (test_map == free_area + PAGE_SIZE) {
tools/testing/selftests/x86/test_shadow_stack.c
643
munmap(cur->mapping, PAGE_SIZE);
tools/testing/selftests/x86/test_shadow_stack.c
647
munmap(shstk_start, PAGE_SIZE);
tools/testing/vma/include/custom.h
27
#define TASK_SIZE ((1ul << 47)-PAGE_SIZE)
tools/testing/vma/include/dup.h
1209
return PAGE_SIZE;
tools/testing/vma/include/dup.h
1232
vm_end = -PAGE_SIZE;
tools/testing/vma/include/dup.h
344
#define DEFAULT_MAP_WINDOW ((1UL << 47) - PAGE_SIZE)
tools/testing/vsock/vsock_test.c
1756
if (ctl_len > PAGE_SIZE << MAX_PAGE_ORDER) {
tools/testing/vsock/vsock_test_zerocopy.c
100
{ NULL, PAGE_SIZE },
tools/testing/vsock/vsock_test_zerocopy.c
102
{ NULL, PAGE_SIZE }
tools/testing/vsock/vsock_test_zerocopy.c
112
{ NULL, PAGE_SIZE },
tools/testing/vsock/vsock_test_zerocopy.c
113
{ MAP_FAILED, PAGE_SIZE },
tools/testing/vsock/vsock_test_zerocopy.c
114
{ NULL, PAGE_SIZE }
tools/testing/vsock/vsock_test_zerocopy.c
126
{ NULL, PAGE_SIZE }
tools/testing/vsock/vsock_test_zerocopy.c
141
{ NULL, 100 * PAGE_SIZE }
tools/testing/vsock/vsock_test_zerocopy.c
27
#ifndef PAGE_SIZE
tools/testing/vsock/vsock_test_zerocopy.c
368
char sbuf1[PAGE_SIZE + 1], sbuf2[GOOD_COPY_LEN];
tools/testing/vsock/vsock_test_zerocopy.c
407
char rbuf[PAGE_SIZE + 1];
tools/testing/vsock/vsock_test_zerocopy.c
417
vsock_ioctl_int(fd, SIOCINQ, PAGE_SIZE + 1 + GOOD_COPY_LEN);
tools/testing/vsock/vsock_test_zerocopy.c
420
recv_buf(fd, rbuf, PAGE_SIZE + 1, 0, PAGE_SIZE + 1);
tools/testing/vsock/vsock_test_zerocopy.c
62
{ NULL, PAGE_SIZE },
tools/testing/vsock/vsock_test_zerocopy.c
63
{ NULL, PAGE_SIZE },
tools/testing/vsock/vsock_test_zerocopy.c
74
{ NULL, PAGE_SIZE },
tools/testing/vsock/vsock_test_zerocopy.c
75
{ NULL, PAGE_SIZE * 2 },
tools/testing/vsock/vsock_test_zerocopy.c
76
{ NULL, PAGE_SIZE * 3 }
tools/testing/vsock/vsock_test_zerocopy.c
88
{ NULL, PAGE_SIZE * 16 },
tools/testing/vsock/vsock_test_zerocopy.c
89
{ NULL, PAGE_SIZE * 16 },
tools/testing/vsock/vsock_test_zerocopy.c
90
{ NULL, PAGE_SIZE * 16 }
tools/testing/vsock/vsock_uring_test.c
23
#ifndef PAGE_SIZE
tools/testing/vsock/vsock_uring_test.c
42
{ NULL, PAGE_SIZE },
tools/testing/vsock/vsock_uring_test.c
43
{ NULL, 2 * PAGE_SIZE },
tools/testing/vsock/vsock_uring_test.c
44
{ NULL, 3 * PAGE_SIZE },
tools/testing/vsock/vsock_uring_test.c
51
{ NULL, PAGE_SIZE },
tools/testing/vsock/vsock_uring_test.c
53
{ NULL, 3 * PAGE_SIZE },
tools/virtio/linux/kernel.h
103
posix_memalign(&p, PAGE_SIZE, PAGE_SIZE);
tools/virtio/linux/kernel.h
28
#define PAGE_MASK (~(PAGE_SIZE-1))
tools/virtio/linux/kernel.h
29
#define PAGE_ALIGN(x) ((x + PAGE_SIZE - 1) & PAGE_MASK)
tools/virtio/linux/kernel.h
50
#define offset_in_page(p) (((unsigned long)p) % PAGE_SIZE)
tools/virtio/virtio-trace/trace-agent.c
19
#define PIPE_MIN_SIZE (PAGE_SIZE*PIPE_DEF_BUFS)
tools/virtio/virtio-trace/trace-agent.c
97
round = value & (PAGE_SIZE - 1);
tools/virtio/vringh_test.c
485
if (posix_memalign(&__user_addr_min, PAGE_SIZE, USER_MEM) != 0)
virt/kvm/dirty_ring.c
264
return vmalloc_to_page((void *)ring->dirty_gfns + offset * PAGE_SIZE);
virt/kvm/guest_memfd.c
894
unsigned long uaddr = (unsigned long)src + i * PAGE_SIZE;
virt/kvm/kvm_main.c
2022
if ((mem->memory_size & (PAGE_SIZE - 1)) ||
virt/kvm/kvm_main.c
2025
if (mem->guest_phys_addr & (PAGE_SIZE - 1))
virt/kvm/kvm_main.c
2028
if ((mem->userspace_addr & (PAGE_SIZE - 1)) ||
virt/kvm/kvm_main.c
2034
(mem->guest_memfd_offset & (PAGE_SIZE - 1) ||
virt/kvm/kvm_main.c
2696
size = PAGE_SIZE;
virt/kvm/kvm_main.c
2700
return PAGE_SIZE;
virt/kvm/kvm_main.c
3143
map->hva = memremap(pfn_to_hpa(map->pfn), PAGE_SIZE, MEMREMAP_WB);
virt/kvm/kvm_main.c
3181
if (len > PAGE_SIZE - offset)
virt/kvm/kvm_main.c
3182
return PAGE_SIZE - offset;
virt/kvm/kvm_main.c
3194
if (WARN_ON_ONCE(offset + len > PAGE_SIZE))
virt/kvm/kvm_main.c
3270
if (WARN_ON_ONCE(offset + len > PAGE_SIZE))
virt/kvm/kvm_main.c
3303
if (WARN_ON_ONCE(offset + len > PAGE_SIZE))
virt/kvm/kvm_main.c
361
memset64(page, mc->init_value, PAGE_SIZE / sizeof(u64));
virt/kvm/kvm_main.c
4047
kvm->dirty_ring_size / PAGE_SIZE);
virt/kvm/kvm_main.c
4197
BUILD_BUG_ON(sizeof(struct kvm_run) > PAGE_SIZE);
virt/kvm/kvm_main.c
4960
sizeof(struct kvm_dirty_gfn) || size < PAGE_SIZE)
virt/kvm/kvm_main.c
5549
r = PAGE_SIZE; /* struct kvm_run */
virt/kvm/kvm_main.c
5551
r += PAGE_SIZE; /* pio data page */
virt/kvm/kvm_main.c
5554
r += PAGE_SIZE; /* coalesced mmio ring page */
virt/kvm/kvm_main.c
619
gfn_range.end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, slot);
virt/kvm/pfncache.c
105
return memremap(pfn_to_hpa(pfn), PAGE_SIZE, MEMREMAP_WB);
virt/kvm/pfncache.c
70
return offset + len <= PAGE_SIZE;