Symbol: vaddr
arch/alpha/boot/bootpz.c
79
unsigned long vaddr, kaddr;
arch/alpha/boot/bootpz.c
86
for (vaddr = vstart; vaddr <= vend; vaddr += PAGE_SIZE)
arch/alpha/boot/bootpz.c
88
kaddr = (find_pa(vaddr) | PAGE_OFFSET);
arch/alpha/boot/bootpz.c
94
vaddr, kaddr, kstart, kend);
arch/alpha/include/asm/page.h
15
#define vma_alloc_zeroed_movable_folio(vma, vaddr) \
arch/alpha/include/asm/page.h
16
vma_alloc_folio(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, 0, vma, vaddr)
arch/alpha/include/asm/page.h
19
#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
arch/alpha/include/asm/pgtable.h
133
#define ZERO_PAGE(vaddr) (virt_to_page(ZERO_PGE))
arch/alpha/kernel/core_irongate.c
315
unsigned long vaddr;
arch/alpha/kernel/core_irongate.c
381
for(baddr = addr, vaddr = (unsigned long)area->addr;
arch/alpha/kernel/core_irongate.c
383
baddr += PAGE_SIZE, vaddr += PAGE_SIZE)
arch/alpha/kernel/core_irongate.c
388
if (__alpha_remap_area_pages(vaddr,
arch/alpha/kernel/core_irongate.c
398
vaddr = (unsigned long)area->addr + (addr & ~PAGE_MASK);
arch/alpha/kernel/core_irongate.c
401
addr, size, vaddr);
arch/alpha/kernel/core_irongate.c
403
return (void __iomem *)vaddr;
arch/alpha/kernel/core_marvel.c
686
unsigned long vaddr;
arch/alpha/kernel/core_marvel.c
743
for (vaddr = (unsigned long)area->addr;
arch/alpha/kernel/core_marvel.c
745
baddr += PAGE_SIZE, vaddr += PAGE_SIZE) {
arch/alpha/kernel/core_marvel.c
754
if (__alpha_remap_area_pages(vaddr,
arch/alpha/kernel/core_marvel.c
765
vaddr = (unsigned long)area->addr + (addr & ~PAGE_MASK);
arch/alpha/kernel/core_marvel.c
767
return (void __iomem *) vaddr;
arch/alpha/kernel/core_marvel.c
771
vaddr = baddr + hose->mem_space->start;
arch/alpha/kernel/core_marvel.c
772
return (void __iomem *) vaddr;
arch/alpha/kernel/core_titan.c
462
unsigned long vaddr;
arch/alpha/kernel/core_titan.c
490
vaddr = addr - __direct_map_base + TITAN_MEM_BIAS;
arch/alpha/kernel/core_titan.c
491
return (void __iomem *) vaddr;
arch/alpha/kernel/core_titan.c
519
for (vaddr = (unsigned long)area->addr;
arch/alpha/kernel/core_titan.c
521
baddr += PAGE_SIZE, vaddr += PAGE_SIZE) {
arch/alpha/kernel/core_titan.c
530
if (__alpha_remap_area_pages(vaddr,
arch/alpha/kernel/core_titan.c
541
vaddr = (unsigned long)area->addr + (addr & ~PAGE_MASK);
arch/alpha/kernel/core_titan.c
542
return (void __iomem *) vaddr;
arch/alpha/kernel/core_tsunami.c
201
tsunami_probe_read(volatile unsigned long *vaddr)
arch/alpha/kernel/core_tsunami.c
210
dont_care = *vaddr;
arch/alpha/kernel/core_tsunami.c
223
tsunami_probe_write(volatile unsigned long *vaddr)
arch/alpha/kernel/core_tsunami.c
228
true_contents = *vaddr;
arch/alpha/kernel/core_tsunami.c
229
*vaddr = 0;
arch/alpha/kernel/core_tsunami.c
236
(unsigned long)vaddr);
arch/alpha/kernel/core_tsunami.c
239
*vaddr = true_contents;
arch/alpha/mm/init.c
170
unsigned long vaddr;
arch/alpha/mm/init.c
182
vaddr = (unsigned long)console_remap_vm.addr;
arch/alpha/mm/init.c
188
crb->map[i].va = vaddr;
arch/alpha/mm/init.c
193
if (pmd != pmd_offset(pud, vaddr)) {
arch/alpha/mm/init.c
195
pmd = pmd_offset(pud, vaddr);
arch/alpha/mm/init.c
199
set_pte(pte_offset_kernel(pmd, vaddr),
arch/alpha/mm/init.c
202
vaddr += PAGE_SIZE;
arch/arc/include/asm/cacheflush.h
24
void __sync_icache_dcache(phys_addr_t paddr, unsigned long vaddr, int len);
arch/arc/include/asm/cacheflush.h
25
void __inv_icache_pages(phys_addr_t paddr, unsigned long vaddr, unsigned nr);
arch/arc/include/asm/cacheflush.h
26
void __flush_dcache_pages(phys_addr_t paddr, unsigned long vaddr, unsigned nr);
arch/arc/include/asm/cacheflush.h
59
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
arch/arc/include/asm/cacheflush.h
63
__sync_icache_dcache((unsigned long)(dst), vaddr, len); \
arch/arc/include/asm/cacheflush.h
66
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
arch/arc/include/asm/highmem.h
44
#define arch_kmap_local_post_unmap(vaddr) \
arch/arc/include/asm/highmem.h
45
local_flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE)
arch/arc/include/asm/page.h
115
#define __pa(vaddr) ((unsigned long)(vaddr))
arch/arc/include/asm/page.h
25
#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
arch/arc/include/asm/pgtable.h
25
#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
arch/arc/mm/cache.c
188
void __cache_line_loop_v3(phys_addr_t paddr, unsigned long vaddr,
arch/arc/mm/cache.c
211
vaddr &= CACHE_LINE_MASK;
arch/arc/mm/cache.c
238
write_aux_reg(aux_cmd, vaddr);
arch/arc/mm/cache.c
239
vaddr += L1_CACHE_BYTES;
arch/arc/mm/cache.c
248
void __cache_line_loop_v4(phys_addr_t paddr, unsigned long vaddr,
arch/arc/mm/cache.c
302
void __cache_line_loop_v4(phys_addr_t paddr, unsigned long vaddr,
arch/arc/mm/cache.c
35
void (*_cache_line_loop_ic_fn)(phys_addr_t paddr, unsigned long vaddr,
arch/arc/mm/cache.c
460
static inline void __dc_line_op(phys_addr_t paddr, unsigned long vaddr,
arch/arc/mm/cache.c
470
__cache_line_loop(paddr, vaddr, sz, op, full_page);
arch/arc/mm/cache.c
482
#define __dc_line_op(paddr, vaddr, sz, op)
arch/arc/mm/cache.c
496
__ic_line_inv_vaddr_local(phys_addr_t paddr, unsigned long vaddr,
arch/arc/mm/cache.c
503
(*_cache_line_loop_ic_fn)(paddr, vaddr, sz, OP_INV_IC, full_page);
arch/arc/mm/cache.c
514
phys_addr_t paddr, vaddr;
arch/arc/mm/cache.c
522
__ic_line_inv_vaddr_local(ic_inv->paddr, ic_inv->vaddr, ic_inv->sz);
arch/arc/mm/cache.c
525
static void __ic_line_inv_vaddr(phys_addr_t paddr, unsigned long vaddr,
arch/arc/mm/cache.c
530
.vaddr = vaddr,
arch/arc/mm/cache.c
848
void __sync_icache_dcache(phys_addr_t paddr, unsigned long vaddr, int len)
arch/arc/mm/cache.c
850
__dc_line_op(paddr, vaddr, len, OP_FLUSH_N_INV);
arch/arc/mm/cache.c
851
__ic_line_inv_vaddr(paddr, vaddr, len);
arch/arc/mm/cache.c
855
void __inv_icache_pages(phys_addr_t paddr, unsigned long vaddr, unsigned nr)
arch/arc/mm/cache.c
857
__ic_line_inv_vaddr(paddr, vaddr, nr * PAGE_SIZE);
arch/arc/mm/cache.c
864
void __flush_dcache_pages(phys_addr_t paddr, unsigned long vaddr, unsigned nr)
arch/arc/mm/cache.c
866
__dc_line_op(paddr, vaddr & PAGE_MASK, nr * PAGE_SIZE, OP_FLUSH_N_INV);
arch/arc/mm/tlb.c
394
static void create_tlb(struct vm_area_struct *vma, unsigned long vaddr, pte_t *ptep)
arch/arc/mm/tlb.c
430
vaddr &= PAGE_MASK;
arch/arc/mm/tlb.c
440
pd0 = vaddr | asid_or_sasid | (pte_val(*ptep) & PTE_BITS_IN_PD0);
arch/arc/mm/tlb.c
475
unsigned long vaddr = vaddr_unaligned & PAGE_MASK;
arch/arc/mm/tlb.c
479
create_tlb(vma, vaddr, ptep);
arch/arc/mm/tlb.c
496
vaddr -= offset;
arch/arc/mm/tlb.c
502
__inv_icache_pages(paddr, vaddr, nr);
arch/arm/include/asm/cacheflush.h
17
#define CACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT)
arch/arm/include/asm/cacheflush.h
170
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
arch/arm/include/asm/highmem.h
66
#define arch_kmap_local_post_map(vaddr, pteval) \
arch/arm/include/asm/highmem.h
67
local_flush_tlb_kernel_page(vaddr)
arch/arm/include/asm/highmem.h
69
#define arch_kmap_local_pre_unmap(vaddr) \
arch/arm/include/asm/highmem.h
72
__cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE); \
arch/arm/include/asm/highmem.h
75
#define arch_kmap_local_post_unmap(vaddr) \
arch/arm/include/asm/highmem.h
76
local_flush_tlb_kernel_page(vaddr)
arch/arm/include/asm/mach/map.h
47
extern void debug_ll_addr(unsigned long *paddr, unsigned long *vaddr);
arch/arm/include/asm/page-nommu.h
14
#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
arch/arm/include/asm/page.h
108
void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
arch/arm/include/asm/page.h
110
unsigned long vaddr, struct vm_area_struct *vma);
arch/arm/include/asm/page.h
114
unsigned long vaddr, struct vm_area_struct *vma);
arch/arm/include/asm/page.h
115
void fa_clear_user_highpage(struct page *page, unsigned long vaddr);
arch/arm/include/asm/page.h
117
unsigned long vaddr, struct vm_area_struct *vma);
arch/arm/include/asm/page.h
118
void feroceon_clear_user_highpage(struct page *page, unsigned long vaddr);
arch/arm/include/asm/page.h
120
unsigned long vaddr, struct vm_area_struct *vma);
arch/arm/include/asm/page.h
121
void v4_mc_clear_user_highpage(struct page *page, unsigned long vaddr);
arch/arm/include/asm/page.h
123
unsigned long vaddr, struct vm_area_struct *vma);
arch/arm/include/asm/page.h
124
void v4wb_clear_user_highpage(struct page *page, unsigned long vaddr);
arch/arm/include/asm/page.h
126
unsigned long vaddr, struct vm_area_struct *vma);
arch/arm/include/asm/page.h
127
void v4wt_clear_user_highpage(struct page *page, unsigned long vaddr);
arch/arm/include/asm/page.h
129
unsigned long vaddr, struct vm_area_struct *vma);
arch/arm/include/asm/page.h
130
void xsc3_mc_clear_user_highpage(struct page *page, unsigned long vaddr);
arch/arm/include/asm/page.h
132
unsigned long vaddr, struct vm_area_struct *vma);
arch/arm/include/asm/page.h
133
void xscale_mc_clear_user_highpage(struct page *page, unsigned long vaddr);
arch/arm/include/asm/page.h
146
extern void __cpu_clear_user_highpage(struct page *page, unsigned long vaddr);
arch/arm/include/asm/page.h
148
unsigned long vaddr, struct vm_area_struct *vma);
arch/arm/include/asm/page.h
151
#define clear_user_highpage(page,vaddr) \
arch/arm/include/asm/page.h
152
__cpu_clear_user_highpage(page, vaddr)
arch/arm/include/asm/page.h
155
#define copy_user_highpage(to,from,vaddr,vma) \
arch/arm/include/asm/page.h
156
__cpu_copy_user_highpage(to, from, vaddr, vma)
arch/arm/include/asm/pgtable.h
19
#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
arch/arm/kernel/crash_dump.c
22
void *vaddr;
arch/arm/kernel/crash_dump.c
27
vaddr = ioremap(__pfn_to_phys(pfn), PAGE_SIZE);
arch/arm/kernel/crash_dump.c
28
if (!vaddr)
arch/arm/kernel/crash_dump.c
31
csize = copy_to_iter(vaddr + offset, csize, iter);
arch/arm/kernel/crash_dump.c
33
iounmap(vaddr);
arch/arm/kernel/tcm.c
77
unsigned long vaddr;
arch/arm/kernel/tcm.c
82
vaddr = gen_pool_alloc(tcm_pool, len);
arch/arm/kernel/tcm.c
83
if (!vaddr)
arch/arm/kernel/tcm.c
86
return (void *) vaddr;
arch/arm/mm/cache-feroceon-l2.c
51
void *vaddr = kmap_atomic_pfn(paddr >> PAGE_SHIFT);
arch/arm/mm/cache-feroceon-l2.c
52
return (unsigned long)vaddr + (paddr & ~PAGE_MASK);
arch/arm/mm/cache-feroceon-l2.c
58
static inline void l2_put_va(unsigned long vaddr)
arch/arm/mm/cache-feroceon-l2.c
61
kunmap_atomic((void *)vaddr);
arch/arm/mm/cache-xsc3l2.c
101
vaddr = l2_map_va(start & ~(CACHE_LINE_SIZE - 1), vaddr);
arch/arm/mm/cache-xsc3l2.c
102
xsc3_l2_clean_mva(vaddr);
arch/arm/mm/cache-xsc3l2.c
103
xsc3_l2_inv_mva(vaddr);
arch/arm/mm/cache-xsc3l2.c
111
vaddr = l2_map_va(start, vaddr);
arch/arm/mm/cache-xsc3l2.c
112
xsc3_l2_inv_mva(vaddr);
arch/arm/mm/cache-xsc3l2.c
120
vaddr = l2_map_va(start, vaddr);
arch/arm/mm/cache-xsc3l2.c
121
xsc3_l2_clean_mva(vaddr);
arch/arm/mm/cache-xsc3l2.c
122
xsc3_l2_inv_mva(vaddr);
arch/arm/mm/cache-xsc3l2.c
125
l2_unmap_va(vaddr);
arch/arm/mm/cache-xsc3l2.c
132
unsigned long vaddr;
arch/arm/mm/cache-xsc3l2.c
134
vaddr = -1; /* to force the first mapping */
arch/arm/mm/cache-xsc3l2.c
138
vaddr = l2_map_va(start, vaddr);
arch/arm/mm/cache-xsc3l2.c
139
xsc3_l2_clean_mva(vaddr);
arch/arm/mm/cache-xsc3l2.c
143
l2_unmap_va(vaddr);
arch/arm/mm/cache-xsc3l2.c
170
unsigned long vaddr;
arch/arm/mm/cache-xsc3l2.c
177
vaddr = -1; /* to force the first mapping */
arch/arm/mm/cache-xsc3l2.c
181
vaddr = l2_map_va(start, vaddr);
arch/arm/mm/cache-xsc3l2.c
182
xsc3_l2_clean_mva(vaddr);
arch/arm/mm/cache-xsc3l2.c
183
xsc3_l2_inv_mva(vaddr);
arch/arm/mm/cache-xsc3l2.c
187
l2_unmap_va(vaddr);
arch/arm/mm/cache-xsc3l2.c
88
unsigned long vaddr;
arch/arm/mm/cache-xsc3l2.c
95
vaddr = -1; /* to force the first mapping */
arch/arm/mm/copypage-fa.c
39
unsigned long vaddr, struct vm_area_struct *vma)
arch/arm/mm/copypage-fa.c
55
void fa_clear_user_highpage(struct page *page, unsigned long vaddr)
arch/arm/mm/copypage-feroceon.c
67
unsigned long vaddr, struct vm_area_struct *vma)
arch/arm/mm/copypage-feroceon.c
73
flush_cache_page(vma, vaddr, page_to_pfn(from));
arch/arm/mm/copypage-feroceon.c
79
void feroceon_clear_user_highpage(struct page *page, unsigned long vaddr)
arch/arm/mm/copypage-v4mc.c
65
unsigned long vaddr, struct vm_area_struct *vma)
arch/arm/mm/copypage-v4mc.c
87
void v4_mc_clear_user_highpage(struct page *page, unsigned long vaddr)
arch/arm/mm/copypage-v4wb.c
48
unsigned long vaddr, struct vm_area_struct *vma)
arch/arm/mm/copypage-v4wb.c
54
flush_cache_page(vma, vaddr, page_to_pfn(from));
arch/arm/mm/copypage-v4wb.c
65
void v4wb_clear_user_highpage(struct page *page, unsigned long vaddr)
arch/arm/mm/copypage-v4wt.c
44
unsigned long vaddr, struct vm_area_struct *vma)
arch/arm/mm/copypage-v4wt.c
60
void v4wt_clear_user_highpage(struct page *page, unsigned long vaddr)
arch/arm/mm/copypage-v6.c
104
static void v6_clear_user_highpage_aliasing(struct page *page, unsigned long vaddr)
arch/arm/mm/copypage-v6.c
106
unsigned long to = COPYPAGE_V6_TO + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
arch/arm/mm/copypage-v6.c
31
struct page *from, unsigned long vaddr, struct vm_area_struct *vma)
arch/arm/mm/copypage-v6.c
46
static void v6_clear_user_highpage_nonaliasing(struct page *page, unsigned long vaddr)
arch/arm/mm/copypage-v6.c
70
struct page *from, unsigned long vaddr, struct vm_area_struct *vma)
arch/arm/mm/copypage-v6.c
73
unsigned int offset = CACHE_COLOUR(vaddr);
arch/arm/mm/copypage-xsc3.c
65
unsigned long vaddr, struct vm_area_struct *vma)
arch/arm/mm/copypage-xsc3.c
71
flush_cache_page(vma, vaddr, page_to_pfn(from));
arch/arm/mm/copypage-xsc3.c
80
void xsc3_mc_clear_user_highpage(struct page *page, unsigned long vaddr)
arch/arm/mm/copypage-xscale.c
108
xscale_mc_clear_user_highpage(struct page *page, unsigned long vaddr)
arch/arm/mm/copypage-xscale.c
85
unsigned long vaddr, struct vm_area_struct *vma)
arch/arm/mm/dma-mapping.c
643
void *vaddr;
arch/arm/mm/dma-mapping.c
651
vaddr = kmap_atomic_pfn(pfn);
arch/arm/mm/dma-mapping.c
652
op(vaddr + offset, len, dir);
arch/arm/mm/dma-mapping.c
653
kunmap_atomic(vaddr);
arch/arm/mm/dma-mapping.c
657
vaddr = kmap_high_get(page);
arch/arm/mm/dma-mapping.c
658
if (vaddr) {
arch/arm/mm/dma-mapping.c
659
op(vaddr + offset, len, dir);
arch/arm/mm/dma-mapping.c
665
vaddr = phys_to_virt(phys);
arch/arm/mm/dma-mapping.c
666
op(vaddr, len, dir);
arch/arm/mm/flush.c
115
#define flush_pfn_alias(pfn,vaddr) do { } while (0)
arch/arm/mm/flush.c
116
#define flush_icache_alias(pfn,vaddr,len) do { } while (0)
arch/arm/mm/flush.c
38
static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)
arch/arm/mm/flush.c
40
unsigned long to = FLUSH_ALIAS_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
arch/arm/mm/flush.c
52
static void flush_icache_alias(unsigned long pfn, unsigned long vaddr, unsigned long len)
arch/arm/mm/flush.c
54
unsigned long va = FLUSH_ALIAS_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
arch/arm/mm/flush.c
55
unsigned long offset = vaddr & (PAGE_SIZE - 1);
arch/arm/mm/ioremap.c
100
vaddr = vm->addr;
arch/arm/mm/ioremap.c
105
if (vm->addr > vaddr)
arch/arm/mm/ioremap.c
486
unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
arch/arm/mm/ioremap.c
494
return vmap_page_range(vaddr, vaddr + resource_size(res), phys_addr,
arch/arm/mm/ioremap.c
73
struct static_vm *find_static_vm_vaddr(void *vaddr)
arch/arm/mm/ioremap.c
82
if (vm->addr > vaddr)
arch/arm/mm/ioremap.c
85
if (vm->addr <= vaddr && vm->addr + vm->size > vaddr)
arch/arm/mm/ioremap.c
96
void *vaddr;
arch/arm/mm/mm.h
74
extern struct static_vm *find_static_vm_vaddr(void *vaddr);
arch/arm/mm/mmu.c
399
unsigned long vaddr = __fix_to_virt(idx);
arch/arm/mm/mmu.c
400
pte_t *pte = pte_offset_fixmap(pmd_off_k(vaddr), vaddr);
arch/arm/mm/mmu.c
412
set_pte_at(NULL, vaddr, pte,
arch/arm/mm/mmu.c
415
pte_clear(NULL, vaddr, pte);
arch/arm/mm/mmu.c
416
local_flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE);
arch/arm/probes/uprobes/core.c
113
void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
arch/arm/probes/uprobes/core.c
117
void *dst = xol_page_kaddr + (vaddr & ~PAGE_MASK);
arch/arm/probes/uprobes/core.c
125
flush_uprobe_xol_access(page, vaddr, dst, len);
arch/arm/probes/uprobes/core.c
154
regs->ARM_pc = utask->vaddr + 4;
arch/arm/probes/uprobes/core.c
175
instruction_pointer_set(regs, utask->vaddr);
arch/arm/probes/uprobes/core.c
30
unsigned long vaddr)
arch/arm/probes/uprobes/core.c
32
return uprobe_write_opcode(auprobe, vma, vaddr,
arch/arm/xen/enlighten.c
499
&xen_auto_xlat_grant_frames.vaddr,
arch/arm64/include/asm/kvm_asm.h
249
extern int __kvm_at_s1e01(struct kvm_vcpu *vcpu, u32 op, u64 vaddr);
arch/arm64/include/asm/kvm_asm.h
250
extern int __kvm_at_s1e2(struct kvm_vcpu *vcpu, u32 op, u64 vaddr);
arch/arm64/include/asm/kvm_asm.h
251
extern int __kvm_at_s12(struct kvm_vcpu *vcpu, u32 op, u64 vaddr);
arch/arm64/include/asm/page.h
26
unsigned long vaddr, struct vm_area_struct *vma);
arch/arm64/include/asm/page.h
33
unsigned long vaddr);
arch/arm64/include/asm/page.h
39
#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
arch/arm64/include/asm/pgtable.h
114
#define ZERO_PAGE(vaddr) phys_to_page(__pa_symbol(empty_zero_page))
arch/arm64/kernel/crash_dump.c
18
void *vaddr;
arch/arm64/kernel/crash_dump.c
23
vaddr = memremap(__pfn_to_phys(pfn), PAGE_SIZE, MEMREMAP_WB);
arch/arm64/kernel/crash_dump.c
24
if (!vaddr)
arch/arm64/kernel/crash_dump.c
27
csize = copy_to_iter(vaddr + offset, csize, iter);
arch/arm64/kernel/crash_dump.c
29
memunmap(vaddr);
arch/arm64/kernel/machine_kexec.c
100
memset(vaddr, 0, PAGE_SIZE);
arch/arm64/kernel/machine_kexec.c
102
return vaddr;
arch/arm64/kernel/machine_kexec.c
94
void *vaddr = NULL;
arch/arm64/kernel/machine_kexec.c
99
vaddr = page_address(page);
arch/arm64/kernel/probes/uprobes.c
134
instruction_pointer_set(regs, utask->vaddr);
arch/arm64/kernel/probes/uprobes.c
15
void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
arch/arm64/kernel/probes/uprobes.c
19
void *dst = xol_page_kaddr + (vaddr & ~PAGE_MASK);
arch/arm64/kernel/probes/uprobes.c
93
instruction_pointer_set(regs, utask->vaddr + 4);
arch/arm64/kvm/at.c
1250
static int handle_at_slow(struct kvm_vcpu *vcpu, u32 op, u64 vaddr, u64 *par)
arch/arm64/kvm/at.c
1262
ret = setup_s1_walk(vcpu, &wi, &wr, vaddr);
arch/arm64/kvm/at.c
1271
ret = walk_s1(vcpu, &wi, &wr, vaddr);
arch/arm64/kvm/at.c
1326
static u64 __kvm_at_s1e01_fast(struct kvm_vcpu *vcpu, u32 op, u64 vaddr)
arch/arm64/kvm/at.c
1392
fail = at_s1e1p_fast(vcpu, op, vaddr);
arch/arm64/kvm/at.c
1395
fail = __kvm_at(OP_AT_S1E1R, vaddr);
arch/arm64/kvm/at.c
1398
fail = __kvm_at(OP_AT_S1E1W, vaddr);
arch/arm64/kvm/at.c
1401
fail = __kvm_at(OP_AT_S1E0R, vaddr);
arch/arm64/kvm/at.c
1404
fail = __kvm_at(OP_AT_S1E0W, vaddr);
arch/arm64/kvm/at.c
1407
fail = __kvm_at(OP_AT_S1E1A, vaddr);
arch/arm64/kvm/at.c
1442
int __kvm_at_s1e01(struct kvm_vcpu *vcpu, u32 op, u64 vaddr)
arch/arm64/kvm/at.c
1444
u64 par = __kvm_at_s1e01_fast(vcpu, op, vaddr);
arch/arm64/kvm/at.c
1458
ret = handle_at_slow(vcpu, op, vaddr, &par);
arch/arm64/kvm/at.c
1467
int __kvm_at_s1e2(struct kvm_vcpu *vcpu, u32 op, u64 vaddr)
arch/arm64/kvm/at.c
1494
fail = __kvm_at(OP_AT_S1E1R, vaddr);
arch/arm64/kvm/at.c
1497
fail = __kvm_at(OP_AT_S1E1W, vaddr);
arch/arm64/kvm/at.c
1500
fail = __kvm_at(OP_AT_S1E1A, vaddr);
arch/arm64/kvm/at.c
1516
ret = handle_at_slow(vcpu, op, vaddr, &par);
arch/arm64/kvm/at.c
1525
int __kvm_at_s12(struct kvm_vcpu *vcpu, u32 op, u64 vaddr)
arch/arm64/kvm/at.c
1555
__kvm_at_s1e01(vcpu, op, vaddr);
arch/arm64/kvm/at.c
1569
ipa = (par & GENMASK_ULL(47, 12)) | (vaddr & GENMASK_ULL(11, 0));
arch/arm64/kvm/at.c
621
static bool at_s1e1p_fast(struct kvm_vcpu *vcpu, u32 op, u64 vaddr)
arch/arm64/kvm/at.c
631
fail = __kvm_at(OP_AT_S1E1RP, vaddr);
arch/arm64/kvm/at.c
634
fail = __kvm_at(OP_AT_S1E1WP, vaddr);
arch/arm64/mm/copypage.c
69
unsigned long vaddr, struct vm_area_struct *vma)
arch/arm64/mm/fault.c
959
unsigned long vaddr)
arch/arm64/mm/fault.c
971
return vma_alloc_folio(flags, 0, vma, vaddr);
arch/csky/abiv1/inc/abi/cacheflush.h
53
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
arch/csky/abiv1/inc/abi/cacheflush.h
58
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
arch/csky/abiv1/inc/abi/page.h
14
static inline void clear_user_page(void *addr, unsigned long vaddr,
arch/csky/abiv1/inc/abi/page.h
18
if (pages_do_alias((unsigned long) addr, vaddr & PAGE_MASK))
arch/csky/abiv1/inc/abi/page.h
22
static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
arch/csky/abiv1/inc/abi/page.h
26
if (pages_do_alias((unsigned long) to, vaddr & PAGE_MASK))
arch/csky/abiv2/inc/abi/cacheflush.h
47
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
arch/csky/abiv2/inc/abi/cacheflush.h
58
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
arch/csky/abiv2/inc/abi/page.h
2
static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
arch/csky/include/asm/highmem.h
37
#define arch_kmap_local_post_map(vaddr, pteval) kmap_flush_tlb(vaddr)
arch/csky/include/asm/highmem.h
38
#define arch_kmap_local_post_unmap(vaddr) kmap_flush_tlb(vaddr)
arch/csky/include/asm/pgtable.h
80
#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
arch/csky/include/asm/tlbflush.h
22
extern void flush_tlb_one(unsigned long vaddr);
arch/csky/kernel/probes/uprobes.c
111
instruction_pointer_set(regs, utask->vaddr);
arch/csky/kernel/probes/uprobes.c
69
instruction_pointer_set(regs, utask->vaddr + auprobe->insn_size);
arch/csky/mm/highmem.c
22
unsigned long vaddr;
arch/csky/mm/highmem.c
28
vaddr = PKMAP_BASE;
arch/csky/mm/highmem.c
29
fixrange_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, swapper_pg_dir);
arch/csky/mm/highmem.c
31
pgd = swapper_pg_dir + pgd_index(vaddr);
arch/csky/mm/highmem.c
33
pmd = pmd_offset(pud, vaddr);
arch/csky/mm/highmem.c
34
pte = pte_offset_kernel(pmd, vaddr);
arch/csky/mm/init.c
100
k = pmd_index(vaddr);
arch/csky/mm/init.c
103
for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
arch/csky/mm/init.c
105
for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) {
arch/csky/mm/init.c
107
for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) {
arch/csky/mm/init.c
118
vaddr += PMD_SIZE;
arch/csky/mm/init.c
128
unsigned long vaddr;
arch/csky/mm/init.c
130
vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
arch/csky/mm/init.c
131
fixrange_init(vaddr, vaddr + PMD_SIZE, swapper_pg_dir);
arch/csky/mm/init.c
95
unsigned long vaddr;
arch/csky/mm/init.c
97
vaddr = start;
arch/csky/mm/init.c
98
i = pgd_index(vaddr);
arch/csky/mm/init.c
99
j = pud_index(vaddr);
arch/csky/mm/tcm.c
111
unsigned long vaddr;
arch/csky/mm/tcm.c
116
vaddr = gen_pool_alloc(tcm_pool, len);
arch/csky/mm/tcm.c
117
if (!vaddr)
arch/csky/mm/tcm.c
120
return (void *) vaddr;
arch/csky/mm/tcm.c
29
unsigned long vaddr, paddr;
arch/csky/mm/tcm.c
42
vaddr = __fix_to_virt(FIX_TCM - i);
arch/csky/mm/tcm.c
45
pte_offset_kernel((pmd_t *)pgd_offset_k(vaddr), vaddr);
arch/csky/mm/tcm.c
49
flush_tlb_one(vaddr);
arch/csky/mm/tcm.c
61
vaddr = __fix_to_virt(FIX_TCM - CONFIG_ITCM_NR_PAGES - i);
arch/csky/mm/tcm.c
64
pte_offset_kernel((pmd_t *) pgd_offset_k(vaddr), vaddr);
arch/csky/mm/tcm.c
68
flush_tlb_one(vaddr);
arch/hexagon/include/asm/cacheflush.h
72
unsigned long vaddr, void *dst, void *src, int len);
arch/hexagon/include/asm/cacheflush.h
75
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
arch/hexagon/include/asm/page.h
116
#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
arch/hexagon/include/asm/pgtable.h
352
#define ZERO_PAGE(vaddr) (virt_to_page(&empty_zero_page))
arch/hexagon/mm/cache.c
119
unsigned long vaddr, void *dst, void *src, int len)
arch/hexagon/mm/vm_tlb.c
38
void flush_tlb_one(unsigned long vaddr)
arch/hexagon/mm/vm_tlb.c
40
__vmclrmap((void *)vaddr, PAGE_SIZE);
arch/hexagon/mm/vm_tlb.c
67
void flush_tlb_page(struct vm_area_struct *vma, unsigned long vaddr)
arch/hexagon/mm/vm_tlb.c
72
__vmclrmap((void *)vaddr, PAGE_SIZE);
arch/loongarch/include/asm/kfence.h
20
char *kaddr, *vaddr;
arch/loongarch/include/asm/kfence.h
41
vaddr = __kfence_pool;
arch/loongarch/include/asm/kfence.h
43
set_page_address(virt_to_page(kaddr), vaddr);
arch/loongarch/include/asm/kfence.h
45
vaddr += PAGE_SIZE;
arch/loongarch/include/asm/page.h
33
#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
arch/loongarch/include/asm/page.h
40
unsigned long vaddr, struct vm_area_struct *vma);
arch/loongarch/include/asm/pgtable.h
84
#define ZERO_PAGE(vaddr) virt_to_page(empty_zero_page)
arch/loongarch/include/asm/tlbflush.h
26
extern void local_flush_tlb_one(unsigned long vaddr);
arch/loongarch/include/asm/tlbflush.h
35
extern void flush_tlb_one(unsigned long vaddr);
arch/loongarch/include/asm/tlbflush.h
44
#define flush_tlb_one(vaddr) local_flush_tlb_one(vaddr)
arch/loongarch/kernel/crash_dump.c
14
vaddr = memremap(__pfn_to_phys(pfn), PAGE_SIZE, MEMREMAP_WB);
arch/loongarch/kernel/crash_dump.c
15
if (!vaddr)
arch/loongarch/kernel/crash_dump.c
18
csize = copy_to_iter(vaddr + offset, csize, iter);
arch/loongarch/kernel/crash_dump.c
20
memunmap(vaddr);
arch/loongarch/kernel/crash_dump.c
9
void *vaddr;
arch/loongarch/kernel/setup.c
470
unsigned long vaddr;
arch/loongarch/kernel/setup.c
495
vaddr = (unsigned long)(PCI_IOBASE + range->io_start);
arch/loongarch/kernel/setup.c
496
vmap_page_range(vaddr, vaddr + size, hw_start, pgprot_device(PAGE_KERNEL));
arch/loongarch/kernel/smp.c
847
unsigned long vaddr = (unsigned long) info;
arch/loongarch/kernel/smp.c
849
local_flush_tlb_one(vaddr);
arch/loongarch/kernel/smp.c
852
void flush_tlb_one(unsigned long vaddr)
arch/loongarch/kernel/smp.c
854
on_each_cpu(flush_tlb_one_ipi, (void *)vaddr, 1);
arch/loongarch/kernel/uprobes.c
135
void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
arch/loongarch/kernel/uprobes.c
139
void *dst = kaddr + (vaddr & ~PAGE_MASK);
arch/loongarch/kernel/uprobes.c
55
instruction_pointer_set(regs, utask->vaddr + LOONGARCH_INSN_SIZE);
arch/loongarch/kernel/uprobes.c
65
instruction_pointer_set(regs, utask->vaddr);
arch/loongarch/mm/init.c
43
unsigned long vaddr, struct vm_area_struct *vma)
arch/loongarch/mm/mmap.c
117
unsigned long vaddr = (unsigned long)kaddr;
arch/loongarch/mm/mmap.c
122
if ((vaddr < PAGE_OFFSET) || (vaddr >= vm_map_base))
arch/m68k/include/asm/bitops.h
102
#define clear_bit(nr, vaddr) bclr_reg_clear_bit(nr, vaddr)
arch/m68k/include/asm/bitops.h
104
#define clear_bit(nr, vaddr) bclr_mem_clear_bit(nr, vaddr)
arch/m68k/include/asm/bitops.h
106
#define clear_bit(nr, vaddr) (__builtin_constant_p(nr) ? \
arch/m68k/include/asm/bitops.h
107
bclr_mem_clear_bit(nr, vaddr) : \
arch/m68k/include/asm/bitops.h
108
bfclr_mem_clear_bit(nr, vaddr))
arch/m68k/include/asm/bitops.h
117
static inline void bchg_reg_change_bit(int nr, volatile unsigned long *vaddr)
arch/m68k/include/asm/bitops.h
119
char *p = (char *)vaddr + (nr ^ 31) / 8;
arch/m68k/include/asm/bitops.h
127
static inline void bchg_mem_change_bit(int nr, volatile unsigned long *vaddr)
arch/m68k/include/asm/bitops.h
129
char *p = (char *)vaddr + (nr ^ 31) / 8;
arch/m68k/include/asm/bitops.h
136
static inline void bfchg_mem_change_bit(int nr, volatile unsigned long *vaddr)
arch/m68k/include/asm/bitops.h
140
: "d" (nr ^ 31), "o" (*vaddr)
arch/m68k/include/asm/bitops.h
145
#define change_bit(nr, vaddr) bchg_reg_change_bit(nr, vaddr)
arch/m68k/include/asm/bitops.h
147
#define change_bit(nr, vaddr) bchg_mem_change_bit(nr, vaddr)
arch/m68k/include/asm/bitops.h
149
#define change_bit(nr, vaddr) (__builtin_constant_p(nr) ? \
arch/m68k/include/asm/bitops.h
150
bchg_mem_change_bit(nr, vaddr) : \
arch/m68k/include/asm/bitops.h
151
bfchg_mem_change_bit(nr, vaddr))
arch/m68k/include/asm/bitops.h
164
volatile unsigned long *vaddr)
arch/m68k/include/asm/bitops.h
166
char *p = (char *)vaddr + (nr ^ 31) / 8;
arch/m68k/include/asm/bitops.h
177
volatile unsigned long *vaddr)
arch/m68k/include/asm/bitops.h
179
char *p = (char *)vaddr + (nr ^ 31) / 8;
arch/m68k/include/asm/bitops.h
189
volatile unsigned long *vaddr)
arch/m68k/include/asm/bitops.h
195
: "d" (nr ^ 31), "o" (*vaddr)
arch/m68k/include/asm/bitops.h
201
#define test_and_set_bit(nr, vaddr) bset_reg_test_and_set_bit(nr, vaddr)
arch/m68k/include/asm/bitops.h
203
#define test_and_set_bit(nr, vaddr) bset_mem_test_and_set_bit(nr, vaddr)
arch/m68k/include/asm/bitops.h
205
#define test_and_set_bit(nr, vaddr) (__builtin_constant_p(nr) ? \
arch/m68k/include/asm/bitops.h
206
bset_mem_test_and_set_bit(nr, vaddr) : \
arch/m68k/include/asm/bitops.h
207
bfset_mem_test_and_set_bit(nr, vaddr))
arch/m68k/include/asm/bitops.h
217
volatile unsigned long *vaddr)
arch/m68k/include/asm/bitops.h
219
char *p = (char *)vaddr + (nr ^ 31) / 8;
arch/m68k/include/asm/bitops.h
230
volatile unsigned long *vaddr)
arch/m68k/include/asm/bitops.h
232
char *p = (char *)vaddr + (nr ^ 31) / 8;
arch/m68k/include/asm/bitops.h
242
volatile unsigned long *vaddr)
arch/m68k/include/asm/bitops.h
248
: "d" (nr ^ 31), "o" (*vaddr)
arch/m68k/include/asm/bitops.h
254
#define test_and_clear_bit(nr, vaddr) bclr_reg_test_and_clear_bit(nr, vaddr)
arch/m68k/include/asm/bitops.h
256
#define test_and_clear_bit(nr, vaddr) bclr_mem_test_and_clear_bit(nr, vaddr)
arch/m68k/include/asm/bitops.h
258
#define test_and_clear_bit(nr, vaddr) (__builtin_constant_p(nr) ? \
arch/m68k/include/asm/bitops.h
259
bclr_mem_test_and_clear_bit(nr, vaddr) : \
arch/m68k/include/asm/bitops.h
260
bfclr_mem_test_and_clear_bit(nr, vaddr))
arch/m68k/include/asm/bitops.h
270
volatile unsigned long *vaddr)
arch/m68k/include/asm/bitops.h
272
char *p = (char *)vaddr + (nr ^ 31) / 8;
arch/m68k/include/asm/bitops.h
283
volatile unsigned long *vaddr)
arch/m68k/include/asm/bitops.h
285
char *p = (char *)vaddr + (nr ^ 31) / 8;
arch/m68k/include/asm/bitops.h
295
volatile unsigned long *vaddr)
arch/m68k/include/asm/bitops.h
301
: "d" (nr ^ 31), "o" (*vaddr)
arch/m68k/include/asm/bitops.h
307
#define test_and_change_bit(nr, vaddr) bchg_reg_test_and_change_bit(nr, vaddr)
arch/m68k/include/asm/bitops.h
309
#define test_and_change_bit(nr, vaddr) bchg_mem_test_and_change_bit(nr, vaddr)
arch/m68k/include/asm/bitops.h
31
static inline void bset_reg_set_bit(int nr, volatile unsigned long *vaddr)
arch/m68k/include/asm/bitops.h
311
#define test_and_change_bit(nr, vaddr) (__builtin_constant_p(nr) ? \
arch/m68k/include/asm/bitops.h
312
bchg_mem_test_and_change_bit(nr, vaddr) : \
arch/m68k/include/asm/bitops.h
313
bfchg_mem_test_and_change_bit(nr, vaddr))
arch/m68k/include/asm/bitops.h
33
char *p = (char *)vaddr + (nr ^ 31) / 8;
arch/m68k/include/asm/bitops.h
353
static inline unsigned long find_first_zero_bit(const unsigned long *vaddr,
arch/m68k/include/asm/bitops.h
356
const unsigned long *p = vaddr;
arch/m68k/include/asm/bitops.h
374
res += ((long)p - (long)vaddr - 4) * 8;
arch/m68k/include/asm/bitops.h
379
static inline unsigned long find_next_zero_bit(const unsigned long *vaddr,
arch/m68k/include/asm/bitops.h
383
const unsigned long *p = vaddr + (offset >> 5);
arch/m68k/include/asm/bitops.h
41
static inline void bset_mem_set_bit(int nr, volatile unsigned long *vaddr)
arch/m68k/include/asm/bitops.h
410
static inline unsigned long find_first_bit(const unsigned long *vaddr,
arch/m68k/include/asm/bitops.h
413
const unsigned long *p = vaddr;
arch/m68k/include/asm/bitops.h
43
char *p = (char *)vaddr + (nr ^ 31) / 8;
arch/m68k/include/asm/bitops.h
431
res += ((long)p - (long)vaddr - 4) * 8;
arch/m68k/include/asm/bitops.h
436
static inline unsigned long find_next_bit(const unsigned long *vaddr,
arch/m68k/include/asm/bitops.h
440
const unsigned long *p = vaddr + (offset >> 5);
arch/m68k/include/asm/bitops.h
50
static inline void bfset_mem_set_bit(int nr, volatile unsigned long *vaddr)
arch/m68k/include/asm/bitops.h
54
: "d" (nr ^ 31), "o" (*vaddr)
arch/m68k/include/asm/bitops.h
59
#define set_bit(nr, vaddr) bset_reg_set_bit(nr, vaddr)
arch/m68k/include/asm/bitops.h
61
#define set_bit(nr, vaddr) bset_mem_set_bit(nr, vaddr)
arch/m68k/include/asm/bitops.h
63
#define set_bit(nr, vaddr) (__builtin_constant_p(nr) ? \
arch/m68k/include/asm/bitops.h
64
bset_mem_set_bit(nr, vaddr) : \
arch/m68k/include/asm/bitops.h
65
bfset_mem_set_bit(nr, vaddr))
arch/m68k/include/asm/bitops.h
74
static inline void bclr_reg_clear_bit(int nr, volatile unsigned long *vaddr)
arch/m68k/include/asm/bitops.h
76
char *p = (char *)vaddr + (nr ^ 31) / 8;
arch/m68k/include/asm/bitops.h
84
static inline void bclr_mem_clear_bit(int nr, volatile unsigned long *vaddr)
arch/m68k/include/asm/bitops.h
86
char *p = (char *)vaddr + (nr ^ 31) / 8;
arch/m68k/include/asm/bitops.h
93
static inline void bfclr_mem_clear_bit(int nr, volatile unsigned long *vaddr)
arch/m68k/include/asm/bitops.h
97
: "d" (nr ^ 31), "o" (*vaddr)
arch/m68k/include/asm/cacheflush_mm.h
156
extern void cache_push_v(unsigned long vaddr, int len);
arch/m68k/include/asm/cacheflush_mm.h
224
static inline void __flush_pages_to_ram(void *vaddr, unsigned int nr)
arch/m68k/include/asm/cacheflush_mm.h
228
addr = ((unsigned long) vaddr) & ~(PAGE_SIZE - 1);
arch/m68k/include/asm/cacheflush_mm.h
237
unsigned long paddr = __pa(vaddr);
arch/m68k/include/asm/cacheflush_mm.h
273
struct page *page, unsigned long vaddr,
arch/m68k/include/asm/cacheflush_mm.h
276
flush_cache_page(vma, vaddr, page_to_pfn(page));
arch/m68k/include/asm/cacheflush_mm.h
278
flush_icache_user_page(vma, page, vaddr, len);
arch/m68k/include/asm/cacheflush_mm.h
281
struct page *page, unsigned long vaddr,
arch/m68k/include/asm/cacheflush_mm.h
284
flush_cache_page(vma, vaddr, page_to_pfn(page));
arch/m68k/include/asm/dvma.h
32
extern void dvma_free(void *vaddr);
arch/m68k/include/asm/dvma.h
63
static inline int dvma_map_cpu(unsigned long kaddr, unsigned long vaddr,
arch/m68k/include/asm/dvma.h
87
int dvma_map_cpu(unsigned long kaddr, unsigned long vaddr, int len);
arch/m68k/include/asm/page_mm.h
55
#define clear_user_page(addr, vaddr, page) \
arch/m68k/include/asm/page_mm.h
59
#define copy_user_page(to, from, vaddr, page) \
arch/m68k/include/asm/page_mm.h
70
static inline unsigned long ___pa(void *vaddr)
arch/m68k/include/asm/page_mm.h
77
: "0" (vaddr), "i" (m68k_fixup_memoffset));
arch/m68k/include/asm/page_mm.h
80
#define __pa(vaddr) ___pa((void *)(long)(vaddr))
arch/m68k/include/asm/page_mm.h
83
void *vaddr;
arch/m68k/include/asm/page_mm.h
87
: "=r" (vaddr)
arch/m68k/include/asm/page_mm.h
89
return vaddr;
arch/m68k/include/asm/page_no.h
13
#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
arch/m68k/include/asm/page_no.h
15
#define vma_alloc_zeroed_movable_folio(vma, vaddr) \
arch/m68k/include/asm/page_no.h
16
vma_alloc_folio(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, 0, vma, vaddr)
arch/m68k/include/asm/page_no.h
18
#define __pa(vaddr) ((unsigned long)(vaddr))
arch/m68k/include/asm/pgtable_mm.h
120
#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
arch/m68k/include/asm/pgtable_no.h
38
#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
arch/m68k/kernel/signal.c
113
static inline void push_cache (unsigned long vaddr)
arch/m68k/kernel/signal.c
133
: "a" (vaddr));
arch/m68k/kernel/signal.c
136
temp |= vaddr & ~PAGE_MASK;
arch/m68k/kernel/signal.c
150
: "0" (vaddr));
arch/m68k/kernel/signal.c
166
: : "r" (vaddr), "r" (temp));
arch/m68k/kernel/signal.c
169
: : "r" (vaddr + 4), "r" (temp));
arch/m68k/kernel/signal.c
176
clear_cf_icache(vaddr, vaddr + 8);
arch/m68k/kernel/signal.c
213
static inline void push_cache(unsigned long vaddr)
arch/m68k/kernel/sys_m68k.c
216
#define virt_to_phys_060(vaddr) \
arch/m68k/kernel/sys_m68k.c
223
: "0" (vaddr)); \
arch/m68k/kernel/sys_m68k.c
53
#define virt_to_phys_040(vaddr) \
arch/m68k/kernel/sys_m68k.c
62
: "a" (vaddr)); \
arch/m68k/mm/cache.c
15
static unsigned long virt_to_phys_slow(unsigned long vaddr)
arch/m68k/mm/cache.c
37
: "0" (vaddr));
arch/m68k/mm/cache.c
47
: "a" (vaddr));
arch/m68k/mm/cache.c
50
return (mmusr & PAGE_MASK) | (vaddr & ~PAGE_MASK);
arch/m68k/mm/motorola.c
51
static inline void nocache_page(void *vaddr)
arch/m68k/mm/motorola.c
53
unsigned long addr = (unsigned long)vaddr;
arch/m68k/mm/motorola.c
62
static inline void cache_page(void *vaddr)
arch/m68k/mm/motorola.c
64
unsigned long addr = (unsigned long)vaddr;
arch/m68k/mm/sun3kmap.c
25
extern void mmu_emu_map_pmeg (int context, int vaddr);
arch/m68k/sun3/dvma.c
23
static unsigned long dvma_page(unsigned long kaddr, unsigned long vaddr)
arch/m68k/sun3/dvma.c
35
if(ptelist[(vaddr & 0xff000) >> PAGE_SHIFT] != pte) {
arch/m68k/sun3/dvma.c
36
sun3_put_pte(vaddr, pte);
arch/m68k/sun3/dvma.c
37
ptelist[(vaddr & 0xff000) >> PAGE_SHIFT] = pte;
arch/m68k/sun3/dvma.c
40
return (vaddr + (kaddr & ~PAGE_MASK));
arch/m68k/sun3/dvma.c
49
unsigned long vaddr;
arch/m68k/sun3/dvma.c
51
vaddr = dvma_btov(baddr);
arch/m68k/sun3/dvma.c
53
end = vaddr + len;
arch/m68k/sun3/dvma.c
55
while(vaddr < end) {
arch/m68k/sun3/dvma.c
56
dvma_page(kaddr, vaddr);
arch/m68k/sun3/dvma.c
58
vaddr += PAGE_SIZE;
arch/m68k/sun3/mmu_emu.c
119
void print_pte_vaddr (unsigned long vaddr)
arch/m68k/sun3/mmu_emu.c
121
pr_cont(" vaddr=%lx [%02lx]", vaddr, sun3_get_segmap (vaddr));
arch/m68k/sun3/mmu_emu.c
122
print_pte (__pte (sun3_get_pte (vaddr)));
arch/m68k/sun3/mmu_emu.c
283
inline void mmu_emu_map_pmeg (int context, int vaddr)
arch/m68k/sun3/mmu_emu.c
289
vaddr &= ~SUN3_PMEG_MASK;
arch/m68k/sun3/mmu_emu.c
298
curr_pmeg, context, vaddr);
arch/m68k/sun3/mmu_emu.c
310
if(vaddr >= PAGE_OFFSET) {
arch/m68k/sun3/mmu_emu.c
316
sun3_put_segmap (vaddr, curr_pmeg);
arch/m68k/sun3/mmu_emu.c
326
sun3_put_segmap (vaddr, curr_pmeg);
arch/m68k/sun3/mmu_emu.c
329
pmeg_vaddr[curr_pmeg] = vaddr;
arch/m68k/sun3/mmu_emu.c
333
sun3_put_pte (vaddr + i, SUN3_PAGE_SYSTEM);
arch/m68k/sun3/mmu_emu.c
355
int mmu_emu_handle_fault (unsigned long vaddr, int read_flag, int kernel_fault)
arch/m68k/sun3/mmu_emu.c
374
pr_info("%s: vaddr=%lx type=%s crp=%px\n", __func__, vaddr,
arch/m68k/sun3/mmu_emu.c
378
segment = (vaddr >> SUN3_PMEG_SIZE_BITS) & 0x7FF;
arch/m68k/sun3/mmu_emu.c
379
offset = (vaddr >> SUN3_PTE_SIZE_BITS) & 0xF;
arch/m68k/sun3/mmu_emu.c
400
if (sun3_get_segmap (vaddr&~SUN3_PMEG_MASK) == SUN3_INVALID_PMEG)
arch/m68k/sun3/mmu_emu.c
401
mmu_emu_map_pmeg (context, vaddr);
arch/m68k/sun3/mmu_emu.c
404
sun3_put_pte (vaddr&PAGE_MASK, pte_val (*pte));
arch/m68k/sun3/mmu_emu.c
422
print_pte_vaddr (vaddr);
arch/m68k/sun3/sun3.h
21
int mmu_emu_handle_fault(unsigned long vaddr, int read_flag, int kernel_fault);
arch/m68k/sun3/sun3.h
22
void print_pte_vaddr(unsigned long vaddr);
arch/m68k/sun3/sun3dvma.c
321
unsigned long vaddr;
arch/m68k/sun3/sun3dvma.c
337
vaddr = dvma_btov(baddr);
arch/m68k/sun3/sun3dvma.c
339
if(dvma_map_cpu(kaddr, vaddr, len) < 0) {
arch/m68k/sun3/sun3dvma.c
348
return (void *)vaddr;
arch/m68k/sun3/sun3dvma.c
353
void dvma_free(void *vaddr)
arch/m68k/sun3x/dvma.c
101
if((pmd = pmd_alloc(&init_mm, pud, vaddr)) == NULL) {
arch/m68k/sun3x/dvma.c
106
if((end & PGDIR_MASK) > (vaddr & PGDIR_MASK))
arch/m68k/sun3x/dvma.c
107
end2 = (vaddr + (PGDIR_SIZE-1)) & PGDIR_MASK;
arch/m68k/sun3x/dvma.c
115
if((pte = pte_alloc_kernel(pmd, vaddr)) == NULL) {
arch/m68k/sun3x/dvma.c
120
if((end2 & PMD_MASK) > (vaddr & PMD_MASK))
arch/m68k/sun3x/dvma.c
121
end3 = (vaddr + (PMD_SIZE-1)) & PMD_MASK;
arch/m68k/sun3x/dvma.c
127
__pa(kaddr), vaddr);
arch/m68k/sun3x/dvma.c
132
vaddr += PAGE_SIZE;
arch/m68k/sun3x/dvma.c
133
} while(vaddr < end3);
arch/m68k/sun3x/dvma.c
135
} while(vaddr < end2);
arch/m68k/sun3x/dvma.c
137
} while(vaddr < end);
arch/m68k/sun3x/dvma.c
79
unsigned long vaddr, int len)
arch/m68k/sun3x/dvma.c
88
vaddr &= PAGE_MASK;
arch/m68k/sun3x/dvma.c
90
end = PAGE_ALIGN(vaddr + len);
arch/m68k/sun3x/dvma.c
92
pr_debug("dvma: mapping kern %08lx to virt %08lx\n", kaddr, vaddr);
arch/m68k/sun3x/dvma.c
93
pgd = pgd_offset_k(vaddr);
arch/m68k/sun3x/dvma.c
94
p4d = p4d_offset(pgd, vaddr);
arch/m68k/sun3x/dvma.c
95
pud = pud_offset(p4d, vaddr);
arch/microblaze/include/asm/cacheflush.h
89
struct page *page, unsigned long vaddr,
arch/microblaze/include/asm/highmem.h
54
#define arch_kmap_local_post_map(vaddr, pteval) \
arch/microblaze/include/asm/highmem.h
55
local_flush_tlb_page(NULL, vaddr);
arch/microblaze/include/asm/highmem.h
56
#define arch_kmap_local_post_unmap(vaddr) \
arch/microblaze/include/asm/highmem.h
57
local_flush_tlb_page(NULL, vaddr);
arch/microblaze/include/asm/page.h
120
static inline unsigned long virt_to_pfn(const void *vaddr)
arch/microblaze/include/asm/page.h
122
return phys_to_pfn(__pa(vaddr));
arch/microblaze/include/asm/page.h
130
#define virt_addr_valid(vaddr) (pfn_valid(virt_to_pfn(vaddr)))
arch/microblaze/include/asm/page.h
48
# define copy_user_page(vto, vfrom, vaddr, topg) \
arch/microblaze/include/asm/pgtable.h
216
#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
arch/mips/boot/elf2ecoff.c
107
if (base->vaddr + base->len != new->vaddr) {
arch/mips/boot/elf2ecoff.c
109
base->len = new->vaddr - base->vaddr;
arch/mips/boot/elf2ecoff.c
284
text.vaddr = data.vaddr = bss.vaddr = 0;
arch/mips/boot/elf2ecoff.c
364
ndata.vaddr = ph[i].p_vaddr;
arch/mips/boot/elf2ecoff.c
366
nbss.vaddr = ph[i].p_vaddr + ph[i].p_filesz;
arch/mips/boot/elf2ecoff.c
374
ntxt.vaddr = ph[i].p_vaddr;
arch/mips/boot/elf2ecoff.c
394
if (text.vaddr > data.vaddr || data.vaddr > bss.vaddr ||
arch/mips/boot/elf2ecoff.c
395
text.vaddr + text.len > data.vaddr
arch/mips/boot/elf2ecoff.c
396
|| data.vaddr + data.len > bss.vaddr) {
arch/mips/boot/elf2ecoff.c
408
data.vaddr = text.vaddr + text.len;
arch/mips/boot/elf2ecoff.c
416
if (text.vaddr + text.len < data.vaddr)
arch/mips/boot/elf2ecoff.c
417
text.len = data.vaddr - text.vaddr;
arch/mips/boot/elf2ecoff.c
426
eah.text_start = text.vaddr;
arch/mips/boot/elf2ecoff.c
427
eah.data_start = data.vaddr;
arch/mips/boot/elf2ecoff.c
428
eah.bss_start = bss.vaddr;
arch/mips/boot/elf2ecoff.c
60
uint32_t vaddr;
arch/mips/dec/kn01-berr.c
101
vaddr = regs->regs[insn.i_format.rs] +
arch/mips/dec/kn01-berr.c
104
vaddr = (long)pc;
arch/mips/dec/kn01-berr.c
105
if (KSEGX(vaddr) == CKSEG0 || KSEGX(vaddr) == CKSEG1)
arch/mips/dec/kn01-berr.c
106
address = CPHYSADDR(vaddr);
arch/mips/dec/kn01-berr.c
111
entryhi |= vaddr & ~(PAGE_SIZE - 1);
arch/mips/dec/kn01-berr.c
119
offset = vaddr & (PAGE_SIZE - 1);
arch/mips/dec/kn01-berr.c
79
long asid, entryhi, vaddr;
arch/mips/include/asm/cacheflush.h
114
struct page *page, unsigned long vaddr, void *dst, const void *src,
arch/mips/include/asm/cacheflush.h
118
struct page *page, unsigned long vaddr, void *dst, const void *src,
arch/mips/include/asm/cacheflush.h
141
extern void (*__flush_kernel_vmap_range)(unsigned long vaddr, int size);
arch/mips/include/asm/cacheflush.h
143
static inline void flush_kernel_vmap_range(void *vaddr, int size)
arch/mips/include/asm/cacheflush.h
146
__flush_kernel_vmap_range((unsigned long) vaddr, size);
arch/mips/include/asm/cacheflush.h
149
static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
arch/mips/include/asm/cacheflush.h
152
__flush_kernel_vmap_range((unsigned long) vaddr, size);
arch/mips/include/asm/highmem.h
54
#define arch_kmap_local_set_pte(mm, vaddr, ptep, ptev) set_pte(ptep, ptev)
arch/mips/include/asm/highmem.h
55
#define arch_kmap_local_post_map(vaddr, pteval) local_flush_tlb_one(vaddr)
arch/mips/include/asm/highmem.h
56
#define arch_kmap_local_post_unmap(vaddr) local_flush_tlb_one(vaddr)
arch/mips/include/asm/page.h
84
static inline void clear_user_page(void *addr, unsigned long vaddr,
arch/mips/include/asm/page.h
90
if (pages_do_alias((unsigned long) addr, vaddr & PAGE_MASK))
arch/mips/include/asm/page.h
97
unsigned long vaddr, struct vm_area_struct *vma);
arch/mips/include/asm/pgtable.h
55
#define ZERO_PAGE(vaddr) \
arch/mips/include/asm/pgtable.h
56
(virt_to_page((void *)(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask))))
arch/mips/include/asm/tlbflush.h
23
extern void local_flush_tlb_one(unsigned long vaddr);
arch/mips/include/asm/tlbflush.h
35
extern void flush_tlb_one(unsigned long vaddr);
arch/mips/include/asm/tlbflush.h
45
#define flush_tlb_one(vaddr) local_flush_tlb_one(vaddr)
arch/mips/jazz/jazzdma.c
517
static void jazz_dma_free(struct device *dev, size_t size, void *vaddr,
arch/mips/jazz/jazzdma.c
521
__free_pages(virt_to_page(vaddr), get_order(size));
arch/mips/kernel/crash_dump.c
14
vaddr = kmap_local_pfn(pfn);
arch/mips/kernel/crash_dump.c
15
csize = copy_to_iter(vaddr + offset, csize, iter);
arch/mips/kernel/crash_dump.c
16
kunmap_local(vaddr);
arch/mips/kernel/crash_dump.c
9
void *vaddr;
arch/mips/kernel/mips-r2-to-r6-emul.c
1207
vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
arch/mips/kernel/mips-r2-to-r6-emul.c
1208
if (!access_ok((void __user *)vaddr, 4)) {
arch/mips/kernel/mips-r2-to-r6-emul.c
1209
current->thread.cp0_baduaddr = vaddr;
arch/mips/kernel/mips-r2-to-r6-emul.c
1268
"+&r"(vaddr), "+&r"(err)
arch/mips/kernel/mips-r2-to-r6-emul.c
1280
vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
arch/mips/kernel/mips-r2-to-r6-emul.c
1281
if (!access_ok((void __user *)vaddr, 4)) {
arch/mips/kernel/mips-r2-to-r6-emul.c
1282
current->thread.cp0_baduaddr = vaddr;
arch/mips/kernel/mips-r2-to-r6-emul.c
1343
"+&r"(vaddr), "+&r"(err)
arch/mips/kernel/mips-r2-to-r6-emul.c
1354
vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
arch/mips/kernel/mips-r2-to-r6-emul.c
1355
if (!access_ok((void __user *)vaddr, 4)) {
arch/mips/kernel/mips-r2-to-r6-emul.c
1356
current->thread.cp0_baduaddr = vaddr;
arch/mips/kernel/mips-r2-to-r6-emul.c
1414
"+&r"(vaddr), "+&r"(err)
arch/mips/kernel/mips-r2-to-r6-emul.c
1424
vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
arch/mips/kernel/mips-r2-to-r6-emul.c
1425
if (!access_ok((void __user *)vaddr, 4)) {
arch/mips/kernel/mips-r2-to-r6-emul.c
1426
current->thread.cp0_baduaddr = vaddr;
arch/mips/kernel/mips-r2-to-r6-emul.c
1484
"+&r"(vaddr), "+&r"(err)
arch/mips/kernel/mips-r2-to-r6-emul.c
1499
vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
arch/mips/kernel/mips-r2-to-r6-emul.c
1500
if (!access_ok((void __user *)vaddr, 8)) {
arch/mips/kernel/mips-r2-to-r6-emul.c
1501
current->thread.cp0_baduaddr = vaddr;
arch/mips/kernel/mips-r2-to-r6-emul.c
1603
"+&r"(vaddr), "+&r"(err)
arch/mips/kernel/mips-r2-to-r6-emul.c
1618
vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
arch/mips/kernel/mips-r2-to-r6-emul.c
1619
if (!access_ok((void __user *)vaddr, 8)) {
arch/mips/kernel/mips-r2-to-r6-emul.c
1620
current->thread.cp0_baduaddr = vaddr;
arch/mips/kernel/mips-r2-to-r6-emul.c
1722
"+&r"(vaddr), "+&r"(err)
arch/mips/kernel/mips-r2-to-r6-emul.c
1737
vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
arch/mips/kernel/mips-r2-to-r6-emul.c
1738
if (!access_ok((void __user *)vaddr, 8)) {
arch/mips/kernel/mips-r2-to-r6-emul.c
1739
current->thread.cp0_baduaddr = vaddr;
arch/mips/kernel/mips-r2-to-r6-emul.c
1841
"+&r"(vaddr), "+&r"(err)
arch/mips/kernel/mips-r2-to-r6-emul.c
1855
vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
arch/mips/kernel/mips-r2-to-r6-emul.c
1856
if (!access_ok((void __user *)vaddr, 8)) {
arch/mips/kernel/mips-r2-to-r6-emul.c
1857
current->thread.cp0_baduaddr = vaddr;
arch/mips/kernel/mips-r2-to-r6-emul.c
1959
"+&r"(vaddr), "+&r"(err)
arch/mips/kernel/mips-r2-to-r6-emul.c
1967
vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
arch/mips/kernel/mips-r2-to-r6-emul.c
1968
if (vaddr & 0x3) {
arch/mips/kernel/mips-r2-to-r6-emul.c
1969
current->thread.cp0_baduaddr = vaddr;
arch/mips/kernel/mips-r2-to-r6-emul.c
1973
if (!access_ok((void __user *)vaddr, 4)) {
arch/mips/kernel/mips-r2-to-r6-emul.c
1974
current->thread.cp0_baduaddr = vaddr;
arch/mips/kernel/mips-r2-to-r6-emul.c
2013
: "r"(vaddr), "i"(SIGSEGV)
arch/mips/kernel/mips-r2-to-r6-emul.c
2023
vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
arch/mips/kernel/mips-r2-to-r6-emul.c
2024
if (vaddr & 0x3) {
arch/mips/kernel/mips-r2-to-r6-emul.c
2025
current->thread.cp0_baduaddr = vaddr;
arch/mips/kernel/mips-r2-to-r6-emul.c
2029
if (!access_ok((void __user *)vaddr, 4)) {
arch/mips/kernel/mips-r2-to-r6-emul.c
2030
current->thread.cp0_baduaddr = vaddr;
arch/mips/kernel/mips-r2-to-r6-emul.c
2071
: "r"(vaddr), "i"(SIGSEGV));
arch/mips/kernel/mips-r2-to-r6-emul.c
2086
vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
arch/mips/kernel/mips-r2-to-r6-emul.c
2087
if (vaddr & 0x7) {
arch/mips/kernel/mips-r2-to-r6-emul.c
2088
current->thread.cp0_baduaddr = vaddr;
arch/mips/kernel/mips-r2-to-r6-emul.c
2092
if (!access_ok((void __user *)vaddr, 8)) {
arch/mips/kernel/mips-r2-to-r6-emul.c
2093
current->thread.cp0_baduaddr = vaddr;
arch/mips/kernel/mips-r2-to-r6-emul.c
2132
: "r"(vaddr), "i"(SIGSEGV)
arch/mips/kernel/mips-r2-to-r6-emul.c
2147
vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
arch/mips/kernel/mips-r2-to-r6-emul.c
2148
if (vaddr & 0x7) {
arch/mips/kernel/mips-r2-to-r6-emul.c
2149
current->thread.cp0_baduaddr = vaddr;
arch/mips/kernel/mips-r2-to-r6-emul.c
2153
if (!access_ok((void __user *)vaddr, 8)) {
arch/mips/kernel/mips-r2-to-r6-emul.c
2154
current->thread.cp0_baduaddr = vaddr;
arch/mips/kernel/mips-r2-to-r6-emul.c
2195
: "r"(vaddr), "i"(SIGSEGV));
arch/mips/kernel/mips-r2-to-r6-emul.c
909
unsigned long vaddr;
arch/mips/kernel/smp.c
717
unsigned long vaddr = (unsigned long) info;
arch/mips/kernel/smp.c
719
local_flush_tlb_one(vaddr);
arch/mips/kernel/smp.c
722
void flush_tlb_one(unsigned long vaddr)
arch/mips/kernel/smp.c
724
smp_on_each_tlb(flush_tlb_one_ipi, (void *) vaddr);
arch/mips/kernel/traps.c
532
unsigned long value, __user *vaddr;
arch/mips/kernel/traps.c
545
vaddr = (unsigned long __user *)
arch/mips/kernel/traps.c
548
if ((unsigned long)vaddr & 3)
arch/mips/kernel/traps.c
550
if (get_user(value, vaddr))
arch/mips/kernel/traps.c
571
unsigned long __user *vaddr;
arch/mips/kernel/traps.c
585
vaddr = (unsigned long __user *)
arch/mips/kernel/traps.c
589
if ((unsigned long)vaddr & 3)
arch/mips/kernel/traps.c
602
if (put_user(regs->regs[reg], vaddr))
arch/mips/kernel/uprobes.c
195
instruction_pointer_set(regs, utask->vaddr);
arch/mips/kernel/uprobes.c
211
void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
arch/mips/kernel/uprobes.c
218
kstart = kaddr + (vaddr & ~PAGE_MASK);
arch/mips/loongson64/init.c
157
unsigned long vaddr;
arch/mips/loongson64/init.c
181
vaddr = (unsigned long)PCI_IOBASE + range->io_start;
arch/mips/loongson64/init.c
183
vmap_page_range(vaddr, vaddr + size, hw_start, pgprot_device(PAGE_KERNEL));
arch/mips/mm/c-octeon.c
161
static void octeon_flush_kernel_vmap_range(unsigned long vaddr, int size)
arch/mips/mm/c-r3k.c
268
static void r3k_flush_kernel_vmap_range(unsigned long vaddr, int size)
arch/mips/mm/c-r4k.c
548
void *vaddr;
arch/mips/mm/c-r4k.c
569
vaddr = NULL;
arch/mips/mm/c-r4k.c
580
vaddr = kmap_coherent(page, addr);
arch/mips/mm/c-r4k.c
582
vaddr = kmap_atomic(page);
arch/mips/mm/c-r4k.c
583
addr = (unsigned long)vaddr;
arch/mips/mm/c-r4k.c
587
vaddr ? r4k_blast_dcache_page(addr) :
arch/mips/mm/c-r4k.c
593
if (vaddr && cpu_has_vtag_icache && mm == current->active_mm) {
arch/mips/mm/c-r4k.c
596
vaddr ? r4k_blast_icache_page(addr) :
arch/mips/mm/c-r4k.c
600
if (vaddr) {
arch/mips/mm/c-r4k.c
604
kunmap_atomic(vaddr);
arch/mips/mm/c-r4k.c
867
unsigned long vaddr;
arch/mips/mm/c-r4k.c
883
unsigned long vaddr = vmra->vaddr;
arch/mips/mm/c-r4k.c
891
blast_dcache_range(vaddr, vaddr + size);
arch/mips/mm/c-r4k.c
894
static void r4k_flush_kernel_vmap_range(unsigned long vaddr, int size)
arch/mips/mm/c-r4k.c
898
args.vaddr = (unsigned long) vaddr;
arch/mips/mm/cache.c
48
void (*__flush_kernel_vmap_range)(unsigned long vaddr, int size);
arch/mips/mm/init.c
103
write_c0_entryhi(vaddr & (PAGE_MASK << 1));
arch/mips/mm/init.c
128
return (void*) vaddr;
arch/mips/mm/init.c
164
unsigned long vaddr, struct vm_area_struct *vma)
arch/mips/mm/init.c
172
vfrom = kmap_coherent(from, vaddr);
arch/mips/mm/init.c
181
pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
arch/mips/mm/init.c
189
struct page *page, unsigned long vaddr, void *dst, const void *src,
arch/mips/mm/init.c
196
void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
arch/mips/mm/init.c
205
flush_cache_page(vma, vaddr, page_to_pfn(page));
arch/mips/mm/init.c
209
struct page *page, unsigned long vaddr, void *dst, const void *src,
arch/mips/mm/init.c
216
void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
arch/mips/mm/init.c
236
unsigned long vaddr;
arch/mips/mm/init.c
238
vaddr = start;
arch/mips/mm/init.c
239
i = pgd_index(vaddr);
arch/mips/mm/init.c
240
j = pud_index(vaddr);
arch/mips/mm/init.c
241
k = pmd_index(vaddr);
arch/mips/mm/init.c
244
for ( ; (i < PTRS_PER_PGD) && (vaddr < end); pgd++, i++) {
arch/mips/mm/init.c
246
for ( ; (j < PTRS_PER_PUD) && (vaddr < end); pud++, j++) {
arch/mips/mm/init.c
248
for (; (k < PTRS_PER_PMD) && (vaddr < end); pmd++, k++) {
arch/mips/mm/init.c
260
vaddr += PMD_SIZE;
arch/mips/mm/init.c
80
unsigned long vaddr, flags, entrylo;
arch/mips/mm/init.c
91
vaddr = __fix_to_virt(FIX_CMAP_END - idx);
arch/mips/mm/ioremap.c
105
vaddr = (unsigned long)area->addr;
arch/mips/mm/ioremap.c
108
if (ioremap_page_range(vaddr, vaddr + size, phys_addr,
arch/mips/mm/ioremap.c
114
return (void __iomem *)(vaddr + offset);
arch/mips/mm/ioremap.c
53
unsigned long vaddr;
arch/mips/mm/mmap.c
122
unsigned long vaddr = (unsigned long)kaddr;
arch/mips/mm/mmap.c
124
if ((vaddr < PAGE_OFFSET) || (vaddr >= MAP_BASE))
arch/mips/mm/pgtable-32.c
43
unsigned long vaddr;
arch/mips/mm/pgtable-32.c
62
vaddr = __fix_to_virt(__end_of_fixed_addresses - 1);
arch/mips/mm/pgtable-32.c
63
fixrange_init(vaddr & PMD_MASK, vaddr + FIXADDR_SIZE, pgd_base);
arch/mips/mm/pgtable-32.c
69
vaddr = PKMAP_BASE;
arch/mips/mm/pgtable-32.c
70
fixrange_init(vaddr & PMD_MASK, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
arch/mips/mm/pgtable-32.c
72
pgd = swapper_pg_dir + pgd_index(vaddr);
arch/mips/mm/pgtable-32.c
73
p4d = p4d_offset(pgd, vaddr);
arch/mips/mm/pgtable-32.c
74
pud = pud_offset(p4d, vaddr);
arch/mips/mm/pgtable-32.c
75
pmd = pmd_offset(pud, vaddr);
arch/mips/mm/pgtable-32.c
76
pte = pte_offset_kernel(pmd, vaddr);
arch/mips/mm/pgtable-64.c
102
unsigned long vaddr;
arch/mips/mm/pgtable-64.c
117
vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
arch/mips/mm/pgtable-64.c
118
fixrange_init(vaddr, vaddr + FIXADDR_SIZE, pgd_base);
arch/mips/pci/pci-generic.c
53
unsigned long vaddr;
arch/mips/pci/pci-generic.c
60
vaddr = (unsigned long)ioremap(phys_addr, resource_size(res));
arch/mips/pci/pci-generic.c
61
set_io_port_base(vaddr);
arch/mips/sgi-ip22/ip28-berr.c
304
static int check_microtlb(u32 hi, u32 lo, unsigned long vaddr)
arch/mips/sgi-ip22/ip28-berr.c
308
vaddr &= 0x7fffffff; /* Doc. states that top bit is ignored */
arch/mips/sgi-ip22/ip28-berr.c
311
if ((lo & 2) && (vaddr >> 21) == ((hi<<1) >> 22)) {
arch/mips/sgi-ip22/ip28-berr.c
317
pte += 8*((vaddr >> pgsz) & 0x1ff);
arch/mips/sgi-ip22/ip28-berr.c
327
a += vaddr & ((1 << pgsz) - 1);
arch/nios2/include/asm/io.h
32
#define phys_to_virt(vaddr) \
arch/nios2/include/asm/io.h
33
((void *)((unsigned long)(vaddr) | CONFIG_NIOS2_KERNEL_REGION_BASE))
arch/nios2/include/asm/io.h
35
#define virt_to_phys(vaddr) \
arch/nios2/include/asm/io.h
36
((unsigned long)((unsigned long)(vaddr) & ~0xE0000000))
arch/nios2/include/asm/page.h
49
extern void clear_user_page(void *addr, unsigned long vaddr, struct page *page);
arch/nios2/include/asm/page.h
50
extern void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
arch/nios2/include/asm/page.h
85
# define virt_to_page(vaddr) pfn_to_page(PFN_DOWN(virt_to_phys(vaddr)))
arch/nios2/include/asm/page.h
86
# define virt_addr_valid(vaddr) pfn_valid(PFN_DOWN(virt_to_phys(vaddr)))
arch/nios2/include/asm/pgtable.h
73
#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
arch/nios2/mm/cacheflush.c
242
void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
arch/nios2/mm/cacheflush.c
245
__flush_dcache(vaddr, vaddr + PAGE_SIZE);
arch/nios2/mm/cacheflush.c
246
__flush_icache(vaddr, vaddr + PAGE_SIZE);
arch/nios2/mm/cacheflush.c
252
void clear_user_page(void *addr, unsigned long vaddr, struct page *page)
arch/nios2/mm/cacheflush.c
254
__flush_dcache(vaddr, vaddr + PAGE_SIZE);
arch/nios2/mm/cacheflush.c
255
__flush_icache(vaddr, vaddr + PAGE_SIZE);
arch/nios2/mm/dma-mapping.c
24
void *vaddr = phys_to_virt(paddr);
arch/nios2/mm/dma-mapping.c
28
invalidate_dcache_range((unsigned long)vaddr,
arch/nios2/mm/dma-mapping.c
29
(unsigned long)(vaddr + size));
arch/nios2/mm/dma-mapping.c
37
flush_dcache_range((unsigned long)vaddr,
arch/nios2/mm/dma-mapping.c
38
(unsigned long)(vaddr + size));
arch/nios2/mm/dma-mapping.c
48
void *vaddr = phys_to_virt(paddr);
arch/nios2/mm/dma-mapping.c
53
invalidate_dcache_range((unsigned long)vaddr,
arch/nios2/mm/dma-mapping.c
54
(unsigned long)(vaddr + size));
arch/openrisc/include/asm/page.h
33
#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
arch/openrisc/include/asm/pgtable.h
184
#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
arch/openrisc/kernel/traps.c
363
unsigned long __user *vaddr;
arch/openrisc/kernel/traps.c
376
vaddr = (unsigned long __user *)(regs->gpr[ra] + imm);
arch/openrisc/kernel/traps.c
378
if (!lwa_flag || vaddr != lwa_addr) {
arch/openrisc/kernel/traps.c
383
if ((unsigned long)vaddr & 0x3) {
arch/openrisc/kernel/traps.c
388
if (put_user(regs->gpr[rb], vaddr)) {
arch/openrisc/kernel/traps.c
400
*((unsigned long *)vaddr) = regs->gpr[rb];
arch/parisc/include/asm/cacheflush.h
39
void flush_kernel_vmap_range(void *vaddr, int size);
arch/parisc/include/asm/cacheflush.h
40
void invalidate_kernel_vmap_range(void *vaddr, int size);
arch/parisc/include/asm/page.h
24
void copy_user_highpage(struct page *to, struct page *from, unsigned long vaddr,
arch/parisc/include/asm/pgtable.h
274
#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
arch/parisc/include/asm/ropes.h
258
extern void *iosapic_register(unsigned long hpa, void __iomem *vaddr);
arch/parisc/kernel/cache.c
357
unsigned long vaddr = (unsigned long)addr;
arch/parisc/kernel/cache.c
367
flush_dcache_page_asm(__pa(vaddr), vaddr);
arch/parisc/kernel/cache.c
373
unsigned long vaddr = (unsigned long)addr;
arch/parisc/kernel/cache.c
383
flush_icache_page_asm(__pa(vaddr), vaddr);
arch/parisc/kernel/cache.c
51
void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
arch/parisc/kernel/cache.c
53
void purge_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
arch/parisc/kernel/cache.c
54
void flush_icache_page_asm(unsigned long phys_addr, unsigned long vaddr);
arch/parisc/kernel/cache.c
648
unsigned long vaddr, struct vm_area_struct *vma)
arch/parisc/kernel/cache.c
654
__flush_cache_page(vma, vaddr, PFN_PHYS(page_to_pfn(from)));
arch/parisc/kernel/cache.c
891
void flush_kernel_vmap_range(void *vaddr, int size)
arch/parisc/kernel/cache.c
893
unsigned long start = (unsigned long)vaddr;
arch/parisc/kernel/cache.c
911
void invalidate_kernel_vmap_range(void *vaddr, int size)
arch/parisc/kernel/cache.c
913
unsigned long start = (unsigned long)vaddr;
arch/parisc/kernel/pci-dma.c
100
} while (vaddr < end);
arch/parisc/kernel/pci-dma.c
104
static inline int map_pmd_uncached(pmd_t * pmd, unsigned long vaddr,
arch/parisc/kernel/pci-dma.c
108
unsigned long orig_vaddr = vaddr;
arch/parisc/kernel/pci-dma.c
110
vaddr &= ~PGDIR_MASK;
arch/parisc/kernel/pci-dma.c
111
end = vaddr + size;
arch/parisc/kernel/pci-dma.c
115
pte_t * pte = pte_alloc_kernel(pmd, vaddr);
arch/parisc/kernel/pci-dma.c
118
if (map_pte_uncached(pte, orig_vaddr, end - vaddr, paddr_ptr))
arch/parisc/kernel/pci-dma.c
120
vaddr = (vaddr + PMD_SIZE) & PMD_MASK;
arch/parisc/kernel/pci-dma.c
123
} while (vaddr < end);
arch/parisc/kernel/pci-dma.c
127
static inline int map_uncached_pages(unsigned long vaddr, unsigned long size,
arch/parisc/kernel/pci-dma.c
131
unsigned long end = vaddr + size;
arch/parisc/kernel/pci-dma.c
133
dir = pgd_offset_k(vaddr);
arch/parisc/kernel/pci-dma.c
139
p4d = p4d_offset(dir, vaddr);
arch/parisc/kernel/pci-dma.c
140
pud = pud_offset(p4d, vaddr);
arch/parisc/kernel/pci-dma.c
141
pmd = pmd_alloc(NULL, pud, vaddr);
arch/parisc/kernel/pci-dma.c
145
if (map_pmd_uncached(pmd, vaddr, end - vaddr, &paddr))
arch/parisc/kernel/pci-dma.c
147
vaddr = vaddr + PGDIR_SIZE;
arch/parisc/kernel/pci-dma.c
149
} while (vaddr && (vaddr < end));
arch/parisc/kernel/pci-dma.c
153
static inline void unmap_uncached_pte(pmd_t * pmd, unsigned long vaddr,
arch/parisc/kernel/pci-dma.c
158
unsigned long orig_vaddr = vaddr;
arch/parisc/kernel/pci-dma.c
167
pte = pte_offset_kernel(pmd, vaddr);
arch/parisc/kernel/pci-dma.c
168
vaddr &= ~PMD_MASK;
arch/parisc/kernel/pci-dma.c
169
end = vaddr + size;
arch/parisc/kernel/pci-dma.c
176
pte_clear(&init_mm, vaddr, pte);
arch/parisc/kernel/pci-dma.c
180
vaddr += PAGE_SIZE;
arch/parisc/kernel/pci-dma.c
186
} while (vaddr < end);
arch/parisc/kernel/pci-dma.c
189
static inline void unmap_uncached_pmd(pgd_t * dir, unsigned long vaddr,
arch/parisc/kernel/pci-dma.c
194
unsigned long orig_vaddr = vaddr;
arch/parisc/kernel/pci-dma.c
203
pmd = pmd_offset(pud_offset(p4d_offset(dir, vaddr), vaddr), vaddr);
arch/parisc/kernel/pci-dma.c
204
vaddr &= ~PGDIR_MASK;
arch/parisc/kernel/pci-dma.c
205
end = vaddr + size;
arch/parisc/kernel/pci-dma.c
209
unmap_uncached_pte(pmd, orig_vaddr, end - vaddr);
arch/parisc/kernel/pci-dma.c
210
vaddr = (vaddr + PMD_SIZE) & PMD_MASK;
arch/parisc/kernel/pci-dma.c
213
} while (vaddr < end);
arch/parisc/kernel/pci-dma.c
216
static void unmap_uncached_pages(unsigned long vaddr, unsigned long size)
arch/parisc/kernel/pci-dma.c
219
unsigned long end = vaddr + size;
arch/parisc/kernel/pci-dma.c
221
dir = pgd_offset_k(vaddr);
arch/parisc/kernel/pci-dma.c
223
unmap_uncached_pmd(dir, vaddr, end - vaddr);
arch/parisc/kernel/pci-dma.c
224
vaddr = vaddr + PGDIR_SIZE;
arch/parisc/kernel/pci-dma.c
226
} while (vaddr && (vaddr < end));
arch/parisc/kernel/pci-dma.c
305
pcxl_free_range(unsigned long vaddr, size_t size)
arch/parisc/kernel/pci-dma.c
308
unsigned int res_idx = (vaddr - pcxl_dma_start) >> (PAGE_SHIFT + 3);
arch/parisc/kernel/pci-dma.c
404
unsigned long vaddr;
arch/parisc/kernel/pci-dma.c
413
vaddr = pcxl_alloc_range(size);
arch/parisc/kernel/pci-dma.c
417
map_uncached_pages(vaddr, size, paddr);
arch/parisc/kernel/pci-dma.c
420
return (void *)vaddr;
arch/parisc/kernel/pci-dma.c
423
void arch_dma_free(struct device *dev, size_t size, void *vaddr,
arch/parisc/kernel/pci-dma.c
432
unmap_uncached_pages((unsigned long)vaddr, size);
arch/parisc/kernel/pci-dma.c
433
pcxl_free_range((unsigned long)vaddr, size);
arch/parisc/kernel/pci-dma.c
77
unsigned long vaddr,
arch/parisc/kernel/pci-dma.c
81
unsigned long orig_vaddr = vaddr;
arch/parisc/kernel/pci-dma.c
83
vaddr &= ~PMD_MASK;
arch/parisc/kernel/pci-dma.c
84
end = vaddr + size;
arch/parisc/kernel/pci-dma.c
96
vaddr += PAGE_SIZE;
arch/parisc/mm/fixmap.c
15
unsigned long vaddr = __fix_to_virt(idx);
arch/parisc/mm/fixmap.c
16
pgd_t *pgd = pgd_offset_k(vaddr);
arch/parisc/mm/fixmap.c
17
p4d_t *p4d = p4d_offset(pgd, vaddr);
arch/parisc/mm/fixmap.c
18
pud_t *pud = pud_offset(p4d, vaddr);
arch/parisc/mm/fixmap.c
19
pmd_t *pmd = pmd_offset(pud, vaddr);
arch/parisc/mm/fixmap.c
22
pte = pte_offset_kernel(pmd, vaddr);
arch/parisc/mm/fixmap.c
23
set_pte_at(&init_mm, vaddr, pte, __mk_pte(phys, PAGE_KERNEL_RWX));
arch/parisc/mm/fixmap.c
24
flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE);
arch/parisc/mm/fixmap.c
29
unsigned long vaddr = __fix_to_virt(idx);
arch/parisc/mm/fixmap.c
30
pte_t *pte = virt_to_kpte(vaddr);
arch/parisc/mm/fixmap.c
35
pte_clear(&init_mm, vaddr, pte);
arch/parisc/mm/fixmap.c
37
flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE);
arch/parisc/mm/init.c
355
unsigned long vaddr;
arch/parisc/mm/init.c
372
vaddr = start_vaddr;
arch/parisc/mm/init.c
374
pgd_t *pgd = pgd_offset_k(vaddr);
arch/parisc/mm/init.c
375
p4d_t *p4d = p4d_offset(pgd, vaddr);
arch/parisc/mm/init.c
376
pud_t *pud = pud_offset(p4d, vaddr);
arch/parisc/mm/init.c
386
pmd = pmd_offset(pud, vaddr);
arch/parisc/mm/init.c
393
pg_table = pte_offset_kernel(pmd, vaddr);
arch/parisc/mm/init.c
427
vaddr += PAGE_SIZE;
arch/powerpc/include/asm/highmem.h
61
#define arch_kmap_local_set_pte(mm, vaddr, ptep, ptev) \
arch/powerpc/include/asm/highmem.h
62
__set_pte_at(mm, vaddr, ptep, ptev, 1)
arch/powerpc/include/asm/highmem.h
63
#define arch_kmap_local_post_map(vaddr, pteval) \
arch/powerpc/include/asm/highmem.h
64
local_flush_tlb_page(NULL, vaddr)
arch/powerpc/include/asm/highmem.h
65
#define arch_kmap_local_post_unmap(vaddr) \
arch/powerpc/include/asm/highmem.h
66
local_flush_tlb_page(NULL, vaddr)
arch/powerpc/include/asm/iommu.h
276
void *vaddr, dma_addr_t dma_handle);
arch/powerpc/include/asm/page.h
232
#define virt_addr_valid(vaddr) ({ \
arch/powerpc/include/asm/page.h
233
unsigned long _addr = (unsigned long)vaddr; \
arch/powerpc/include/asm/page.h
273
extern void clear_user_page(void *page, unsigned long vaddr, struct page *pg);
arch/powerpc/include/asm/page.h
275
extern void copy_user_page(void *to, void *from, unsigned long vaddr,
arch/powerpc/include/asm/pgtable.h
84
#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
arch/powerpc/kernel/btext.c
100
logicalDisplayBase = (void *) (vaddr + lowbits);
arch/powerpc/kernel/btext.c
87
unsigned long vaddr = PAGE_OFFSET + 0x10000000;
arch/powerpc/kernel/btext.c
98
disp_BAT[0] = vaddr | (BL_16M<<2) | 2;
arch/powerpc/kernel/crash_dump.c
75
void *vaddr;
arch/powerpc/kernel/crash_dump.c
85
vaddr = __va(paddr);
arch/powerpc/kernel/crash_dump.c
86
csize = copy_to_iter(vaddr + offset, csize, iter);
arch/powerpc/kernel/crash_dump.c
88
vaddr = ioremap_cache(paddr, PAGE_SIZE);
arch/powerpc/kernel/crash_dump.c
89
csize = copy_to_iter(vaddr + offset, csize, iter);
arch/powerpc/kernel/crash_dump.c
90
iounmap(vaddr);
arch/powerpc/kernel/dma-iommu.c
103
void *vaddr, dma_addr_t dma_handle,
arch/powerpc/kernel/dma-iommu.c
106
iommu_free_coherent(get_iommu_table_base(dev), size, vaddr, dma_handle);
arch/powerpc/kernel/fadump.c
780
void *vaddr;
arch/powerpc/kernel/fadump.c
782
vaddr = alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO);
arch/powerpc/kernel/fadump.c
783
if (!vaddr)
arch/powerpc/kernel/fadump.c
787
page = virt_to_page(vaddr);
arch/powerpc/kernel/fadump.c
790
return vaddr;
arch/powerpc/kernel/fadump.c
793
static void fadump_free_buffer(unsigned long vaddr, unsigned long size)
arch/powerpc/kernel/fadump.c
795
free_reserved_area((void *)vaddr, (void *)(vaddr + size), -1, NULL);
arch/powerpc/kernel/iommu.c
501
unsigned long vaddr, npages, entry, slen;
arch/powerpc/kernel/iommu.c
510
vaddr = (unsigned long) sg_virt(s);
arch/powerpc/kernel/iommu.c
511
npages = iommu_num_pages(vaddr, slen, IOMMU_PAGE_SIZE(tbl));
arch/powerpc/kernel/iommu.c
514
(vaddr & ~PAGE_MASK) == 0)
arch/powerpc/kernel/iommu.c
519
DBG(" - vaddr: %lx, size: %lx\n", vaddr, slen);
arch/powerpc/kernel/iommu.c
526
"vaddr %lx npages %lu\n", tbl, vaddr,
arch/powerpc/kernel/iommu.c
534
dma_addr |= (vaddr & ~IOMMU_PAGE_MASK(tbl));
arch/powerpc/kernel/iommu.c
541
vaddr & IOMMU_PAGE_MASK(tbl),
arch/powerpc/kernel/iommu.c
600
unsigned long vaddr, npages;
arch/powerpc/kernel/iommu.c
602
vaddr = s->dma_address & IOMMU_PAGE_MASK(tbl);
arch/powerpc/kernel/iommu.c
605
__iommu_free(tbl, vaddr, npages);
arch/powerpc/kernel/iommu.c
860
void *vaddr;
arch/powerpc/kernel/iommu.c
866
vaddr = phys_to_virt(phys);
arch/powerpc/kernel/iommu.c
867
uaddr = (unsigned long)vaddr;
arch/powerpc/kernel/iommu.c
873
((unsigned long)vaddr & ~PAGE_MASK) == 0)
arch/powerpc/kernel/iommu.c
876
dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction,
arch/powerpc/kernel/iommu.c
883
"vaddr %p npages %d\n", tbl, vaddr,
arch/powerpc/kernel/iommu.c
963
void *vaddr, dma_addr_t dma_handle)
arch/powerpc/kernel/iommu.c
972
free_pages((unsigned long)vaddr, get_order(size));
arch/powerpc/kernel/legacy_serial.c
361
void __iomem *vaddr;
arch/powerpc/kernel/legacy_serial.c
372
vaddr = ioremap(info->taddr, 0x1000);
arch/powerpc/kernel/legacy_serial.c
373
if (WARN_ON(!vaddr))
arch/powerpc/kernel/legacy_serial.c
376
udbg_uart_init_mmio(vaddr, 1 << port->regshift);
arch/powerpc/kernel/uprobes.c
127
regs_set_return_ip(regs, (unsigned long)ppc_inst_next((void *)utask->vaddr, auprobe->insn));
arch/powerpc/kernel/uprobes.c
173
instruction_pointer_set(regs, utask->vaddr);
arch/powerpc/kvm/book3s_emulate.c
451
ulong addr, vaddr;
arch/powerpc/kvm/book3s_emulate.c
462
vaddr = addr;
arch/powerpc/kvm/book3s_emulate.c
467
kvmppc_set_dar(vcpu, vaddr);
arch/powerpc/kvm/book3s_emulate.c
468
vcpu->arch.fault_dar = vaddr;
arch/powerpc/mm/book3s64/hash_utils.c
278
static void kernel_map_linear_page(unsigned long vaddr, unsigned long idx,
arch/powerpc/mm/book3s64/hash_utils.c
282
unsigned long vsid = get_kernel_vsid(vaddr, mmu_kernel_ssize);
arch/powerpc/mm/book3s64/hash_utils.c
283
unsigned long vpn = hpt_vpn(vaddr, vsid, mmu_kernel_ssize);
arch/powerpc/mm/book3s64/hash_utils.c
296
ret = hpte_insert_repeating(hash, vpn, __pa(vaddr), mode,
arch/powerpc/mm/book3s64/hash_utils.c
307
static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long idx,
arch/powerpc/mm/book3s64/hash_utils.c
311
unsigned long vsid = get_kernel_vsid(vaddr, mmu_kernel_ssize);
arch/powerpc/mm/book3s64/hash_utils.c
312
unsigned long vpn = hpt_vpn(vaddr, vsid, mmu_kernel_ssize);
arch/powerpc/mm/book3s64/hash_utils.c
373
unsigned long flags, vaddr, lmi;
arch/powerpc/mm/book3s64/hash_utils.c
381
vaddr = (unsigned long)page_address(page);
arch/powerpc/mm/book3s64/hash_utils.c
382
lmi = __pa(vaddr) >> PAGE_SHIFT;
arch/powerpc/mm/book3s64/hash_utils.c
386
kernel_map_linear_page(vaddr, lmi,
arch/powerpc/mm/book3s64/hash_utils.c
389
kernel_unmap_linear_page(vaddr, lmi,
arch/powerpc/mm/book3s64/hash_utils.c
470
unsigned long vaddr = (unsigned long) __va(paddr);
arch/powerpc/mm/book3s64/hash_utils.c
471
unsigned long lmi = (vaddr - (unsigned long)__kfence_pool)
arch/powerpc/mm/book3s64/hash_utils.c
476
BUG_ON(!is_kfence_address((void *)vaddr));
arch/powerpc/mm/book3s64/hash_utils.c
483
unsigned long flags, vaddr, lmi;
arch/powerpc/mm/book3s64/hash_utils.c
489
vaddr = (unsigned long)page_address(page);
arch/powerpc/mm/book3s64/hash_utils.c
490
lmi = (vaddr - (unsigned long)__kfence_pool) >> PAGE_SHIFT;
arch/powerpc/mm/book3s64/hash_utils.c
499
kernel_map_linear_page(vaddr, lmi,
arch/powerpc/mm/book3s64/hash_utils.c
503
kernel_unmap_linear_page(vaddr, lmi,
arch/powerpc/mm/book3s64/hash_utils.c
524
void *vaddr = page_address(page);
arch/powerpc/mm/book3s64/hash_utils.c
526
if (is_kfence_address(vaddr))
arch/powerpc/mm/book3s64/hash_utils.c
625
unsigned long vaddr, paddr;
arch/powerpc/mm/book3s64/hash_utils.c
638
vaddr = ALIGN(vstart, step);
arch/powerpc/mm/book3s64/hash_utils.c
642
for (; vaddr < vend; vaddr += step, paddr += step) {
arch/powerpc/mm/book3s64/hash_utils.c
644
unsigned long vsid = get_kernel_vsid(vaddr, ssize);
arch/powerpc/mm/book3s64/hash_utils.c
645
unsigned long vpn = hpt_vpn(vaddr, vsid, ssize);
arch/powerpc/mm/book3s64/hash_utils.c
655
if (overlaps_kernel_text(vaddr, vaddr + step))
arch/powerpc/mm/book3s64/hash_utils.c
669
overlaps_interrupt_vector_text(vaddr, vaddr + step))
arch/powerpc/mm/book3s64/hash_utils.c
710
unsigned long vaddr, time_limit;
arch/powerpc/mm/book3s64/hash_utils.c
722
vaddr = ALIGN_DOWN(vstart, step);
arch/powerpc/mm/book3s64/hash_utils.c
725
for (;vaddr < vend; vaddr += step) {
arch/powerpc/mm/book3s64/hash_utils.c
726
rc = mmu_hash_ops.hpte_removebolted(vaddr, psize, ssize);
arch/powerpc/mm/book3s64/radix_pgtable.c
302
unsigned long vaddr, addr, mapping_size = 0;
arch/powerpc/mm/book3s64/radix_pgtable.c
339
vaddr = (unsigned long)__va(addr);
arch/powerpc/mm/book3s64/radix_pgtable.c
341
if (overlaps_kernel_text(vaddr, vaddr + mapping_size) ||
arch/powerpc/mm/book3s64/radix_pgtable.c
342
overlaps_interrupt_vector_text(vaddr, vaddr + mapping_size)) {
arch/powerpc/mm/book3s64/radix_pgtable.c
355
rc = __map_kernel_page(vaddr, addr, prot, mapping_size, nid, start, end);
arch/powerpc/mm/cacheflush.c
177
void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
arch/powerpc/mm/cacheflush.c
190
void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
arch/powerpc/mm/dma-noncoherent.c
22
static void __dma_sync(void *vaddr, size_t size, int direction)
arch/powerpc/mm/dma-noncoherent.c
24
unsigned long start = (unsigned long)vaddr;
arch/powerpc/platforms/44x/canyonlands.c
100
setbits32((vaddr + GPIO0_OSRH), 0x42000000);
arch/powerpc/platforms/44x/canyonlands.c
101
setbits32((vaddr + GPIO0_TSRH), 0x42000000);
arch/powerpc/platforms/44x/canyonlands.c
103
iounmap(vaddr);
arch/powerpc/platforms/44x/canyonlands.c
52
void __iomem *vaddr;
arch/powerpc/platforms/44x/canyonlands.c
77
vaddr = of_iomap(np, 0);
arch/powerpc/platforms/44x/canyonlands.c
80
if (!vaddr) {
arch/powerpc/platforms/512x/mpc512x_shared.c
208
void __iomem *vaddr;
arch/powerpc/platforms/512x/mpc512x_shared.c
232
vaddr = ioremap(desc, sizeof(struct diu_ad));
arch/powerpc/platforms/512x/mpc512x_shared.c
233
if (!vaddr) {
arch/powerpc/platforms/512x/mpc512x_shared.c
237
memcpy(&diu_shared_fb.ad0, vaddr, sizeof(struct diu_ad));
arch/powerpc/platforms/512x/mpc512x_shared.c
243
pix_fmt = in_le32(vaddr);
arch/powerpc/platforms/512x/mpc512x_shared.c
245
diu_shared_fb.fb_phys = in_le32(vaddr + 4);
arch/powerpc/platforms/512x/mpc512x_shared.c
248
iounmap(vaddr);
arch/powerpc/platforms/512x/mpc512x_shared.c
251
vaddr = ioremap(desc, sizeof(diu_shared_fb.gamma));
arch/powerpc/platforms/512x/mpc512x_shared.c
252
if (!vaddr) {
arch/powerpc/platforms/512x/mpc512x_shared.c
257
memcpy(&diu_shared_fb.gamma, vaddr, sizeof(diu_shared_fb.gamma));
arch/powerpc/platforms/512x/mpc512x_shared.c
262
iounmap(vaddr);
arch/powerpc/platforms/ps3/system-bus.c
543
static void ps3_free_coherent(struct device *_dev, size_t size, void *vaddr,
arch/powerpc/platforms/ps3/system-bus.c
549
free_pages((unsigned long)vaddr, get_order(size));
arch/powerpc/platforms/pseries/ibmebus.c
82
size_t size, void *vaddr,
arch/powerpc/platforms/pseries/ibmebus.c
86
kfree(vaddr);
arch/powerpc/platforms/pseries/rtas-fadump.c
369
void *vaddr;
arch/powerpc/platforms/pseries/rtas-fadump.c
371
vaddr = (void *)fadump_conf->cpu_state_dest_vaddr;
arch/powerpc/platforms/pseries/rtas-fadump.c
373
reg_header = vaddr;
arch/powerpc/platforms/pseries/rtas-fadump.c
384
vaddr += be32_to_cpu(reg_header->num_cpu_offset);
arch/powerpc/platforms/pseries/rtas-fadump.c
385
num_cpus = be32_to_cpu(*((__be32 *)(vaddr)));
arch/powerpc/platforms/pseries/rtas-fadump.c
387
vaddr += sizeof(u32);
arch/powerpc/platforms/pseries/rtas-fadump.c
388
reg_entry = (struct rtas_fadump_reg_entry *)vaddr;
arch/powerpc/platforms/pseries/vio.c
506
void *vaddr, dma_addr_t dma_handle,
arch/powerpc/platforms/pseries/vio.c
511
iommu_free_coherent(get_iommu_table_base(dev), size, vaddr, dma_handle);
arch/riscv/include/asm/insn-def.h
187
#define HFENCE_VVMA(vaddr, asid) \
arch/riscv/include/asm/insn-def.h
189
__RD(0), RS1(vaddr), RS2(asid))
arch/riscv/include/asm/insn-def.h
290
#define SINVAL_VMA(vaddr, asid) \
arch/riscv/include/asm/insn-def.h
292
__RD(0), RS1(vaddr), RS2(asid))
arch/riscv/include/asm/insn-def.h
302
#define HINVAL_VVMA(vaddr, asid) \
arch/riscv/include/asm/insn-def.h
304
__RD(0), RS1(vaddr), RS2(asid))
arch/riscv/include/asm/page.h
185
#define virt_to_pfn(vaddr) (phys_to_pfn(__pa(vaddr)))
arch/riscv/include/asm/page.h
188
#define virt_to_page(vaddr) (pfn_to_page(virt_to_pfn(vaddr)))
arch/riscv/include/asm/page.h
202
#define virt_addr_valid(vaddr) ({ \
arch/riscv/include/asm/page.h
203
unsigned long _addr = (unsigned long)vaddr; \
arch/riscv/include/asm/page.h
53
#define copy_user_page(vto, vfrom, vaddr, topg) copy_page(vto, vfrom)
arch/riscv/include/asm/pgtable.h
1292
#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
arch/riscv/kernel/crash_dump.c
15
void *vaddr;
arch/riscv/kernel/crash_dump.c
20
vaddr = memremap(__pfn_to_phys(pfn), PAGE_SIZE, MEMREMAP_WB);
arch/riscv/kernel/crash_dump.c
21
if (!vaddr)
arch/riscv/kernel/crash_dump.c
24
csize = copy_to_iter(vaddr + offset, csize, iter);
arch/riscv/kernel/crash_dump.c
26
memunmap(vaddr);
arch/riscv/kernel/probes/uprobes.c
117
instruction_pointer_set(regs, utask->vaddr);
arch/riscv/kernel/probes/uprobes.c
164
void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
arch/riscv/kernel/probes/uprobes.c
169
void *dst = kaddr + (vaddr & ~PAGE_MASK);
arch/riscv/kernel/probes/uprobes.c
175
if (vaddr) {
arch/riscv/kernel/probes/uprobes.c
78
instruction_pointer_set(regs, utask->vaddr + auprobe->insn_size);
arch/riscv/mm/dma-noncoherent.c
20
void *vaddr = phys_to_virt(paddr);
arch/riscv/mm/dma-noncoherent.c
28
ALT_CMO_OP(CLEAN, vaddr, size, riscv_cbom_block_size);
arch/riscv/mm/dma-noncoherent.c
33
void *vaddr = phys_to_virt(paddr);
arch/riscv/mm/dma-noncoherent.c
42
ALT_CMO_OP(INVAL, vaddr, size, riscv_cbom_block_size);
arch/riscv/mm/dma-noncoherent.c
47
void *vaddr = phys_to_virt(paddr);
arch/riscv/mm/dma-noncoherent.c
56
ALT_CMO_OP(FLUSH, vaddr, size, riscv_cbom_block_size);
arch/riscv/mm/kasan_init.c
107
kasan_populate_pmd(pudp, vaddr, next);
arch/riscv/mm/kasan_init.c
108
} while (pudp++, vaddr = next, vaddr != end);
arch/riscv/mm/kasan_init.c
112
unsigned long vaddr, unsigned long end)
arch/riscv/mm/kasan_init.c
123
p4dp = p4d_offset(pgd, vaddr);
arch/riscv/mm/kasan_init.c
126
next = p4d_addr_end(vaddr, end);
arch/riscv/mm/kasan_init.c
128
if (p4d_none(p4dp_get(p4dp)) && IS_ALIGNED(vaddr, P4D_SIZE) &&
arch/riscv/mm/kasan_init.c
129
(next - vaddr) >= P4D_SIZE) {
arch/riscv/mm/kasan_init.c
138
kasan_populate_pud(p4dp, vaddr, next);
arch/riscv/mm/kasan_init.c
139
} while (p4dp++, vaddr = next, vaddr != end);
arch/riscv/mm/kasan_init.c
143
unsigned long vaddr, unsigned long end)
arch/riscv/mm/kasan_init.c
149
next = pgd_addr_end(vaddr, end);
arch/riscv/mm/kasan_init.c
151
if (pgd_none(pgdp_get(pgdp)) && IS_ALIGNED(vaddr, PGDIR_SIZE) &&
arch/riscv/mm/kasan_init.c
152
(next - vaddr) >= PGDIR_SIZE) {
arch/riscv/mm/kasan_init.c
161
kasan_populate_p4d(pgdp, vaddr, next);
arch/riscv/mm/kasan_init.c
162
} while (pgdp++, vaddr = next, vaddr != end);
arch/riscv/mm/kasan_init.c
166
unsigned long vaddr, unsigned long end)
arch/riscv/mm/kasan_init.c
175
pudp = base_pud + pud_index(vaddr);
arch/riscv/mm/kasan_init.c
179
next = pud_addr_end(vaddr, end);
arch/riscv/mm/kasan_init.c
181
if (IS_ALIGNED(vaddr, PUD_SIZE) && (next - vaddr) >= PUD_SIZE) {
arch/riscv/mm/kasan_init.c
187
} while (pudp++, vaddr = next, vaddr != end);
arch/riscv/mm/kasan_init.c
191
unsigned long vaddr, unsigned long end)
arch/riscv/mm/kasan_init.c
200
p4dp = base_p4d + p4d_index(vaddr);
arch/riscv/mm/kasan_init.c
204
next = p4d_addr_end(vaddr, end);
arch/riscv/mm/kasan_init.c
206
if (pgtable_l4_enabled && IS_ALIGNED(vaddr, P4D_SIZE) &&
arch/riscv/mm/kasan_init.c
207
(next - vaddr) >= P4D_SIZE) {
arch/riscv/mm/kasan_init.c
212
kasan_early_clear_pud(p4dp, vaddr, next);
arch/riscv/mm/kasan_init.c
213
} while (p4dp++, vaddr = next, vaddr != end);
arch/riscv/mm/kasan_init.c
217
unsigned long vaddr, unsigned long end)
arch/riscv/mm/kasan_init.c
222
next = pgd_addr_end(vaddr, end);
arch/riscv/mm/kasan_init.c
224
if (pgtable_l5_enabled && IS_ALIGNED(vaddr, PGDIR_SIZE) &&
arch/riscv/mm/kasan_init.c
225
(next - vaddr) >= PGDIR_SIZE) {
arch/riscv/mm/kasan_init.c
230
kasan_early_clear_p4d(pgdp, vaddr, next);
arch/riscv/mm/kasan_init.c
231
} while (pgdp++, vaddr = next, vaddr != end);
arch/riscv/mm/kasan_init.c
235
unsigned long vaddr,
arch/riscv/mm/kasan_init.c
246
pudp = base_pud + pud_index(vaddr);
arch/riscv/mm/kasan_init.c
250
next = pud_addr_end(vaddr, end);
arch/riscv/mm/kasan_init.c
252
if (pud_none(pudp_get(pudp)) && IS_ALIGNED(vaddr, PUD_SIZE) &&
arch/riscv/mm/kasan_init.c
253
(next - vaddr) >= PUD_SIZE) {
arch/riscv/mm/kasan_init.c
260
} while (pudp++, vaddr = next, vaddr != end);
arch/riscv/mm/kasan_init.c
264
unsigned long vaddr,
arch/riscv/mm/kasan_init.c
284
p4dp = base_p4d + p4d_index(vaddr);
arch/riscv/mm/kasan_init.c
288
next = p4d_addr_end(vaddr, end);
arch/riscv/mm/kasan_init.c
29
static void __init kasan_populate_pte(pmd_t *pmd, unsigned long vaddr, unsigned long end)
arch/riscv/mm/kasan_init.c
290
if (p4d_none(p4dp_get(p4dp)) && IS_ALIGNED(vaddr, P4D_SIZE) &&
arch/riscv/mm/kasan_init.c
291
(next - vaddr) >= P4D_SIZE) {
arch/riscv/mm/kasan_init.c
297
kasan_early_populate_pud(p4dp, vaddr, next);
arch/riscv/mm/kasan_init.c
298
} while (p4dp++, vaddr = next, vaddr != end);
arch/riscv/mm/kasan_init.c
302
unsigned long vaddr,
arch/riscv/mm/kasan_init.c
309
next = pgd_addr_end(vaddr, end);
arch/riscv/mm/kasan_init.c
311
if (pgd_none(pgdp_get(pgdp)) && IS_ALIGNED(vaddr, PGDIR_SIZE) &&
arch/riscv/mm/kasan_init.c
312
(next - vaddr) >= PGDIR_SIZE) {
arch/riscv/mm/kasan_init.c
318
kasan_early_populate_p4d(pgdp, vaddr, next);
arch/riscv/mm/kasan_init.c
319
} while (pgdp++, vaddr = next, vaddr != end);
arch/riscv/mm/kasan_init.c
371
unsigned long vaddr = (unsigned long)start & PAGE_MASK;
arch/riscv/mm/kasan_init.c
374
kasan_populate_pgd(pgd_offset_k(vaddr), vaddr, vend);
arch/riscv/mm/kasan_init.c
378
unsigned long vaddr, unsigned long end)
arch/riscv/mm/kasan_init.c
382
pud_t *pud_k = pud_offset(p4d, vaddr);
arch/riscv/mm/kasan_init.c
385
next = pud_addr_end(vaddr, end);
arch/riscv/mm/kasan_init.c
39
ptep = pte_offset_kernel(pmd, vaddr);
arch/riscv/mm/kasan_init.c
394
} while (pud_k++, vaddr = next, vaddr != end);
arch/riscv/mm/kasan_init.c
398
unsigned long vaddr, unsigned long end)
arch/riscv/mm/kasan_init.c
402
p4d_t *p4d_k = p4d_offset(pgd, vaddr);
arch/riscv/mm/kasan_init.c
405
next = p4d_addr_end(vaddr, end);
arch/riscv/mm/kasan_init.c
413
kasan_shallow_populate_pud(p4d_k, vaddr, end);
arch/riscv/mm/kasan_init.c
414
} while (p4d_k++, vaddr = next, vaddr != end);
arch/riscv/mm/kasan_init.c
417
static void __init kasan_shallow_populate_pgd(unsigned long vaddr, unsigned long end)
arch/riscv/mm/kasan_init.c
421
pgd_t *pgd_k = pgd_offset_k(vaddr);
arch/riscv/mm/kasan_init.c
424
next = pgd_addr_end(vaddr, end);
arch/riscv/mm/kasan_init.c
432
kasan_shallow_populate_p4d(pgd_k, vaddr, next);
arch/riscv/mm/kasan_init.c
433
} while (pgd_k++, vaddr = next, vaddr != end);
arch/riscv/mm/kasan_init.c
438
unsigned long vaddr = (unsigned long)start & PAGE_MASK;
arch/riscv/mm/kasan_init.c
441
kasan_shallow_populate_pgd(vaddr, vend);
arch/riscv/mm/kasan_init.c
47
} while (ptep++, vaddr += PAGE_SIZE, vaddr != end);
arch/riscv/mm/kasan_init.c
50
static void __init kasan_populate_pmd(pud_t *pud, unsigned long vaddr, unsigned long end)
arch/riscv/mm/kasan_init.c
61
pmdp = pmd_offset(pud, vaddr);
arch/riscv/mm/kasan_init.c
64
next = pmd_addr_end(vaddr, end);
arch/riscv/mm/kasan_init.c
66
if (pmd_none(pmdp_get(pmdp)) && IS_ALIGNED(vaddr, PMD_SIZE) &&
arch/riscv/mm/kasan_init.c
67
(next - vaddr) >= PMD_SIZE) {
arch/riscv/mm/kasan_init.c
76
kasan_populate_pte(pmdp, vaddr, next);
arch/riscv/mm/kasan_init.c
77
} while (pmdp++, vaddr = next, vaddr != end);
arch/riscv/mm/kasan_init.c
81
unsigned long vaddr, unsigned long end)
arch/riscv/mm/kasan_init.c
92
pudp = pud_offset(p4d, vaddr);
arch/riscv/mm/kasan_init.c
95
next = pud_addr_end(vaddr, end);
arch/riscv/mm/kasan_init.c
97
if (pud_none(pudp_get(pudp)) && IS_ALIGNED(vaddr, PUD_SIZE) &&
arch/riscv/mm/kasan_init.c
98
(next - vaddr) >= PUD_SIZE) {
arch/riscv/mm/pageattr.c
102
pmdp = pmd_offset(pudp, vaddr);
arch/riscv/mm/pageattr.c
105
next = pmd_addr_end(vaddr, end);
arch/riscv/mm/pageattr.c
107
if (next - vaddr >= PMD_SIZE &&
arch/riscv/mm/pageattr.c
108
vaddr <= (vaddr & PMD_MASK) && end >= next)
arch/riscv/mm/pageattr.c
130
} while (pmdp++, vaddr = next, vaddr != end);
arch/riscv/mm/pageattr.c
136
unsigned long vaddr, unsigned long end)
arch/riscv/mm/pageattr.c
142
pudp = pud_offset(p4dp, vaddr);
arch/riscv/mm/pageattr.c
145
next = pud_addr_end(vaddr, end);
arch/riscv/mm/pageattr.c
147
if (next - vaddr >= PUD_SIZE &&
arch/riscv/mm/pageattr.c
148
vaddr <= (vaddr & PUD_MASK) && end >= next)
arch/riscv/mm/pageattr.c
172
ret = __split_linear_mapping_pmd(pudp, vaddr, next);
arch/riscv/mm/pageattr.c
175
} while (pudp++, vaddr = next, vaddr != end);
arch/riscv/mm/pageattr.c
181
unsigned long vaddr, unsigned long end)
arch/riscv/mm/pageattr.c
187
p4dp = p4d_offset(pgdp, vaddr);
arch/riscv/mm/pageattr.c
190
next = p4d_addr_end(vaddr, end);
arch/riscv/mm/pageattr.c
196
if (next - vaddr >= P4D_SIZE &&
arch/riscv/mm/pageattr.c
197
vaddr <= (vaddr & P4D_MASK) && end >= next)
arch/riscv/mm/pageattr.c
230
ret = __split_linear_mapping_pud(p4dp, vaddr, next);
arch/riscv/mm/pageattr.c
233
} while (p4dp++, vaddr = next, vaddr != end);
arch/riscv/mm/pageattr.c
239
unsigned long vaddr,
arch/riscv/mm/pageattr.c
246
next = pgd_addr_end(vaddr, end);
arch/riscv/mm/pageattr.c
248
ret = __split_linear_mapping_p4d(pgdp, vaddr, next);
arch/riscv/mm/pageattr.c
251
} while (pgdp++, vaddr = next, vaddr != end);
arch/riscv/mm/pageattr.c
97
unsigned long vaddr, unsigned long end)
arch/s390/include/asm/idals.h
100
idal_create_words(idal, vaddr, ccw->count);
arch/s390/include/asm/idals.h
102
vaddr = idal;
arch/s390/include/asm/idals.h
104
ccw->cda = virt_to_dma32(vaddr);
arch/s390/include/asm/idals.h
136
void *vaddr;
arch/s390/include/asm/idals.h
150
vaddr = (void *)__get_free_pages(GFP_KERNEL, page_order);
arch/s390/include/asm/idals.h
151
if (!vaddr)
arch/s390/include/asm/idals.h
153
ib->data[i] = virt_to_dma64(vaddr);
arch/s390/include/asm/idals.h
159
vaddr = dma64_to_virt(ib->data[i]);
arch/s390/include/asm/idals.h
160
free_pages((unsigned long)vaddr, ib->page_order);
arch/s390/include/asm/idals.h
172
void *vaddr;
arch/s390/include/asm/idals.h
177
vaddr = dma64_to_virt(ib->data[i]);
arch/s390/include/asm/idals.h
178
free_pages((unsigned long)vaddr, ib->page_order);
arch/s390/include/asm/idals.h
274
void *vaddr;
arch/s390/include/asm/idals.h
286
vaddr = dma64_to_virt(ib->data[0]);
arch/s390/include/asm/idals.h
287
ccw->cda = virt_to_dma32(vaddr);
arch/s390/include/asm/idals.h
298
void *vaddr;
arch/s390/include/asm/idals.h
303
vaddr = dma64_to_virt(ib->data[i]);
arch/s390/include/asm/idals.h
304
left = copy_to_user(to, vaddr, IDA_BLOCK_SIZE);
arch/s390/include/asm/idals.h
310
vaddr = dma64_to_virt(ib->data[i]);
arch/s390/include/asm/idals.h
311
return copy_to_user(to, vaddr, count);
arch/s390/include/asm/idals.h
320
void *vaddr;
arch/s390/include/asm/idals.h
325
vaddr = dma64_to_virt(ib->data[i]);
arch/s390/include/asm/idals.h
326
left = copy_from_user(vaddr, from, IDA_BLOCK_SIZE);
arch/s390/include/asm/idals.h
33
static inline bool idal_is_needed(void *vaddr, unsigned int length)
arch/s390/include/asm/idals.h
332
vaddr = dma64_to_virt(ib->data[i]);
arch/s390/include/asm/idals.h
333
return copy_from_user(vaddr, from, count);
arch/s390/include/asm/idals.h
35
dma64_t paddr = virt_to_dma64(vaddr);
arch/s390/include/asm/idals.h
43
static inline unsigned int idal_nr_words(void *vaddr, unsigned int length)
arch/s390/include/asm/idals.h
47
cidaw = (unsigned long)vaddr & (IDA_BLOCK_SIZE - 1);
arch/s390/include/asm/idals.h
56
static inline unsigned int idal_2k_nr_words(void *vaddr, unsigned int length)
arch/s390/include/asm/idals.h
60
cidaw = (unsigned long)vaddr & (IDA_2K_BLOCK_SIZE - 1);
arch/s390/include/asm/idals.h
69
static inline dma64_t *idal_create_words(dma64_t *idaws, void *vaddr, unsigned int length)
arch/s390/include/asm/idals.h
71
dma64_t paddr = virt_to_dma64(vaddr);
arch/s390/include/asm/idals.h
75
cidaw = idal_nr_words(vaddr, length);
arch/s390/include/asm/idals.h
88
static inline int set_normalized_cda(struct ccw1 *ccw, void *vaddr)
arch/s390/include/asm/idals.h
95
nridaws = idal_nr_words(vaddr, ccw->count);
arch/s390/include/asm/mem_encrypt.h
7
int set_memory_encrypted(unsigned long vaddr, int numpages);
arch/s390/include/asm/mem_encrypt.h
8
int set_memory_decrypted(unsigned long vaddr, int numpages);
arch/s390/include/asm/page.h
68
#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
arch/s390/include/asm/page.h
70
#define vma_alloc_zeroed_movable_folio(vma, vaddr) \
arch/s390/include/asm/page.h
71
vma_alloc_folio(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, 0, vma, vaddr)
arch/s390/include/asm/pgtable.h
62
#define ZERO_PAGE(vaddr) \
arch/s390/include/asm/pgtable.h
64
(((unsigned long)(vaddr)) &zero_page_mask))))
arch/s390/kernel/crash_dump.c
495
unsigned long vaddr, unsigned long size)
arch/s390/kernel/crash_dump.c
498
phdr->p_vaddr = vaddr;
arch/s390/kernel/sthyi.c
423
static int sthyi(u64 vaddr, u64 *rc)
arch/s390/kernel/sthyi.c
426
union register_pair r2 = { .even = vaddr, };
arch/s390/kernel/uprobes.c
102
current->thread.per_event.address = utask->vaddr;
arch/s390/kernel/uprobes.c
138
regs->psw.addr = current->utask->vaddr;
arch/s390/kernel/uprobes.c
139
current->thread.per_event.address = current->utask->vaddr;
arch/s390/kernel/uprobes.c
87
regs->psw.addr += utask->vaddr - utask->xol_vaddr;
arch/s390/kernel/uprobes.c
91
regs->gprs[reg] += utask->vaddr - utask->xol_vaddr;
arch/s390/kernel/uprobes.c
97
regs->psw.addr = utask->vaddr + ilen;
arch/s390/kvm/dat.c
381
union vaddress vaddr = { .addr = gfn_to_gpa(gfn) };
arch/s390/kvm/dat.c
405
*last = table->crstes + vaddr.rfx;
arch/s390/kvm/dat.c
425
*last = table->crstes + vaddr.rsx;
arch/s390/kvm/dat.c
445
*last = table->crstes + vaddr.rtx;
arch/s390/kvm/dat.c
477
*last = table->crstes + vaddr.sx;
arch/s390/kvm/dat.c
507
*ptepp = pgtable->ptes + vaddr.px;
arch/s390/kvm/gaccess.c
1284
union vaddress vaddr;
arch/s390/kvm/gaccess.c
1302
vaddr.addr = saddr;
arch/s390/kvm/gaccess.c
1309
if (vaddr.rfx01 > asce.tl)
arch/s390/kvm/gaccess.c
1313
if (vaddr.rsx01 > asce.tl)
arch/s390/kvm/gaccess.c
1317
if (vaddr.rtx01 > asce.tl)
arch/s390/kvm/gaccess.c
1321
if (vaddr.sx01 > asce.tl)
arch/s390/kvm/gaccess.c
1329
w->last_addr = ptr + vaddr.rfx * 8;
arch/s390/kvm/gaccess.c
1338
if (vaddr.rsx01 < table.pgd.tf || vaddr.rsx01 > table.pgd.tl)
arch/s390/kvm/gaccess.c
1346
w->last_addr = ptr + vaddr.rsx * 8;
arch/s390/kvm/gaccess.c
1355
if (vaddr.rtx01 < table.p4d.tf || vaddr.rtx01 > table.p4d.tl)
arch/s390/kvm/gaccess.c
1363
w->last_addr = ptr + vaddr.rtx * 8;
arch/s390/kvm/gaccess.c
1380
if (vaddr.sx01 < table.pud.fc0.tf || vaddr.sx01 > table.pud.fc0.tl)
arch/s390/kvm/gaccess.c
1386
w->last_addr = ptr + vaddr.sx * 8;
arch/s390/kvm/gaccess.c
1405
w->last_addr = ptr + vaddr.px * 8;
arch/s390/kvm/gaccess.c
474
union vaddress vaddr = {.addr = gva};
arch/s390/kvm/gaccess.c
492
if (vaddr.rfx01 > asce.tl)
arch/s390/kvm/gaccess.c
494
ptr += vaddr.rfx * 8;
arch/s390/kvm/gaccess.c
497
if (vaddr.rfx)
arch/s390/kvm/gaccess.c
499
if (vaddr.rsx01 > asce.tl)
arch/s390/kvm/gaccess.c
501
ptr += vaddr.rsx * 8;
arch/s390/kvm/gaccess.c
504
if (vaddr.rfx || vaddr.rsx)
arch/s390/kvm/gaccess.c
506
if (vaddr.rtx01 > asce.tl)
arch/s390/kvm/gaccess.c
508
ptr += vaddr.rtx * 8;
arch/s390/kvm/gaccess.c
511
if (vaddr.rfx || vaddr.rsx || vaddr.rtx)
arch/s390/kvm/gaccess.c
513
if (vaddr.sx01 > asce.tl)
arch/s390/kvm/gaccess.c
515
ptr += vaddr.sx * 8;
arch/s390/kvm/gaccess.c
530
if (vaddr.rsx01 < rfte.tf || vaddr.rsx01 > rfte.tl)
arch/s390/kvm/gaccess.c
534
ptr = rfte.rto * PAGE_SIZE + vaddr.rsx * 8;
arch/s390/kvm/gaccess.c
548
if (vaddr.rtx01 < rste.tf || vaddr.rtx01 > rste.tl)
arch/s390/kvm/gaccess.c
552
ptr = rste.rto * PAGE_SIZE + vaddr.rtx * 8;
arch/s390/kvm/gaccess.c
574
if (vaddr.sx01 < rtte.fc0.tf)
arch/s390/kvm/gaccess.c
576
if (vaddr.sx01 > rtte.fc0.tl)
arch/s390/kvm/gaccess.c
580
ptr = rtte.fc0.sto * PAGE_SIZE + vaddr.sx * 8;
arch/s390/kvm/gaccess.c
603
ptr = ste.fc0.pto * (PAGE_SIZE / 2) + vaddr.px * 8;
arch/s390/kvm/vsie.c
898
static int inject_fault(struct kvm_vcpu *vcpu, __u16 code, __u64 vaddr,
arch/s390/kvm/vsie.c
905
(vaddr & 0xfffffffffffff000UL) |
arch/s390/mm/init.c
114
int set_memory_encrypted(unsigned long vaddr, int numpages)
arch/s390/mm/init.c
120
uv_remove_shared(virt_to_phys((void *)vaddr));
arch/s390/mm/init.c
121
vaddr += PAGE_SIZE;
arch/s390/mm/init.c
126
int set_memory_decrypted(unsigned long vaddr, int numpages)
arch/s390/mm/init.c
131
uv_set_shared(virt_to_phys((void *)vaddr));
arch/s390/mm/init.c
132
vaddr += PAGE_SIZE;
arch/sh/boards/mach-sdk7786/sram.c
22
void __iomem *vaddr;
arch/sh/boards/mach-sdk7786/sram.c
50
vaddr = ioremap(phys, SZ_2K);
arch/sh/boards/mach-sdk7786/sram.c
51
if (unlikely(!vaddr)) {
arch/sh/boards/mach-sdk7786/sram.c
60
ret = gen_pool_add(sram_pool, (unsigned long)vaddr, SZ_2K, -1);
arch/sh/boards/mach-sdk7786/sram.c
63
iounmap(vaddr);
arch/sh/include/asm/cacheflush.h
119
static inline void *sh_cacheop_vaddr(void *vaddr)
arch/sh/include/asm/cacheflush.h
122
vaddr = (void *)CAC_ADDR((unsigned long)vaddr);
arch/sh/include/asm/cacheflush.h
123
return vaddr;
arch/sh/include/asm/cacheflush.h
85
struct page *page, unsigned long vaddr, void *dst, const void *src,
arch/sh/include/asm/cacheflush.h
89
struct page *page, unsigned long vaddr, void *dst, const void *src,
arch/sh/include/asm/page.h
48
#define copy_user_page(to, from, vaddr, pg) __copy_user(to, from, PAGE_SIZE)
arch/sh/include/asm/page.h
54
unsigned long vaddr, struct vm_area_struct *vma);
arch/sh/include/asm/page.h
56
extern void clear_user_highpage(struct page *page, unsigned long vaddr);
arch/sh/include/asm/pgtable.h
29
#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
arch/sh/include/cpu-sh4/cpu/sq.h
30
void sq_unmap(unsigned long vaddr);
arch/sh/kernel/cpu/sh4/sq.c
207
void sq_unmap(unsigned long vaddr)
arch/sh/kernel/cpu/sh4/sq.c
213
if (map->sq_addr == vaddr)
arch/sh/kernel/cpu/sh4/sq.c
218
__func__, vaddr);
arch/sh/kernel/crash_dump.c
17
void __iomem *vaddr;
arch/sh/kernel/crash_dump.c
22
vaddr = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
arch/sh/kernel/crash_dump.c
23
csize = copy_to_iter(vaddr + offset, csize, iter);
arch/sh/kernel/crash_dump.c
24
iounmap(vaddr);
arch/sh/kernel/io_trapped.c
169
unsigned long vaddr = (unsigned long)tiop->virt_base;
arch/sh/kernel/io_trapped.c
176
if (address < (vaddr + len))
arch/sh/kernel/io_trapped.c
177
return res->start + (address - vaddr);
arch/sh/kernel/io_trapped.c
178
vaddr += len;
arch/sh/kernel/smp.c
464
void flush_tlb_one(unsigned long asid, unsigned long vaddr)
arch/sh/kernel/smp.c
469
fd.addr2 = vaddr;
arch/sh/kernel/smp.c
472
local_flush_tlb_one(asid, vaddr);
arch/sh/mm/cache-sh4.c
223
void *vaddr;
arch/sh/mm/cache-sh4.c
242
vaddr = NULL;
arch/sh/mm/cache-sh4.c
253
vaddr = kmap_coherent(page, address);
arch/sh/mm/cache-sh4.c
255
vaddr = kmap_atomic(page);
arch/sh/mm/cache-sh4.c
257
address = (unsigned long)vaddr;
arch/sh/mm/cache-sh4.c
266
if (vaddr) {
arch/sh/mm/cache-sh4.c
268
kunmap_coherent(vaddr);
arch/sh/mm/cache-sh4.c
270
kunmap_atomic(vaddr);
arch/sh/mm/cache.c
100
unsigned long vaddr, struct vm_area_struct *vma)
arch/sh/mm/cache.c
109
vfrom = kmap_coherent(from, vaddr);
arch/sh/mm/cache.c
118
if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK) ||
arch/sh/mm/cache.c
128
void clear_user_highpage(struct page *page, unsigned long vaddr)
arch/sh/mm/cache.c
134
if (pages_do_alias((unsigned long)kaddr, vaddr & PAGE_MASK))
arch/sh/mm/cache.c
61
unsigned long vaddr, void *dst, const void *src,
arch/sh/mm/cache.c
68
void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
arch/sh/mm/cache.c
78
flush_cache_page(vma, vaddr, page_to_pfn(page));
arch/sh/mm/cache.c
82
unsigned long vaddr, void *dst, const void *src,
arch/sh/mm/cache.c
89
void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
arch/sh/mm/init.c
162
unsigned long vaddr, pte_t *lastpte)
arch/sh/mm/init.c
175
unsigned long vaddr;
arch/sh/mm/init.c
177
vaddr = start;
arch/sh/mm/init.c
178
i = pgd_index(vaddr);
arch/sh/mm/init.c
179
j = pud_index(vaddr);
arch/sh/mm/init.c
180
k = pmd_index(vaddr);
arch/sh/mm/init.c
183
for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
arch/sh/mm/init.c
185
for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) {
arch/sh/mm/init.c
190
for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) {
arch/sh/mm/init.c
192
pmd, vaddr, pte);
arch/sh/mm/init.c
193
vaddr += PMD_SIZE;
arch/sh/mm/init.c
272
unsigned long vaddr, end;
arch/sh/mm/init.c
320
vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
arch/sh/mm/init.c
322
page_table_range_init(vaddr, end, swapper_pg_dir);
arch/sh/mm/ioremap.c
126
unsigned long vaddr = (unsigned long __force)addr;
arch/sh/mm/ioremap.c
131
if (iomapping_nontranslatable(vaddr))
arch/sh/mm/kmap.c
21
unsigned long vaddr;
arch/sh/mm/kmap.c
24
vaddr = __fix_to_virt(FIX_CMAP_BEGIN);
arch/sh/mm/kmap.c
25
kmap_coherent_pte = virt_to_kpte(vaddr);
arch/sh/mm/kmap.c
32
unsigned long vaddr;
arch/sh/mm/kmap.c
43
vaddr = __fix_to_virt(idx);
arch/sh/mm/kmap.c
48
return (void *)vaddr;
arch/sh/mm/kmap.c
54
unsigned long vaddr = (unsigned long)kvaddr & PAGE_MASK;
arch/sh/mm/kmap.c
55
enum fixed_addresses idx = __virt_to_fix(vaddr);
arch/sh/mm/kmap.c
58
__flush_purge_region((void *)vaddr, PAGE_SIZE);
arch/sh/mm/kmap.c
60
pte_clear(&init_mm, vaddr, kmap_coherent_pte - idx);
arch/sh/mm/kmap.c
61
local_flush_tlb_one(get_asid(), vaddr);
arch/sh/mm/pmb.c
137
static bool pmb_mapping_exists(unsigned long vaddr, phys_addr_t phys,
arch/sh/mm/pmb.c
156
if ((vaddr < pmbe->vpn) || (vaddr >= (pmbe->vpn + pmbe->size)))
arch/sh/mm/pmb.c
335
int pmb_bolt_mapping(unsigned long vaddr, phys_addr_t phys,
arch/sh/mm/pmb.c
345
if (!pmb_addr_valid(vaddr, size))
arch/sh/mm/pmb.c
347
if (pmb_mapping_exists(vaddr, phys, size))
arch/sh/mm/pmb.c
350
orig_addr = vaddr;
arch/sh/mm/pmb.c
353
flush_tlb_kernel_range(vaddr, vaddr + size);
arch/sh/mm/pmb.c
363
pmbe = pmb_alloc(vaddr, phys, pmb_flags |
arch/sh/mm/pmb.c
377
vaddr += pmbe->size;
arch/sh/mm/pmb.c
413
unsigned long vaddr;
arch/sh/mm/pmb.c
452
vaddr = (unsigned long)area->addr;
arch/sh/mm/pmb.c
454
ret = pmb_bolt_mapping(vaddr, phys, size, prot);
arch/sh/mm/pmb.c
458
return (void __iomem *)(offset + (char *)vaddr);
arch/sh/mm/pmb.c
464
unsigned long vaddr = (unsigned long __force)addr;
arch/sh/mm/pmb.c
472
if (pmbe->vpn == vaddr) {
arch/sparc/include/asm/cacheflush_32.h
20
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
arch/sparc/include/asm/cacheflush_32.h
22
flush_cache_page(vma, vaddr, page_to_pfn(page));\
arch/sparc/include/asm/cacheflush_32.h
25
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
arch/sparc/include/asm/cacheflush_32.h
27
flush_cache_page(vma, vaddr, page_to_pfn(page));\
arch/sparc/include/asm/cacheflush_64.h
60
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
arch/sparc/include/asm/cacheflush_64.h
62
flush_cache_page(vma, vaddr, page_to_pfn(page)); \
arch/sparc/include/asm/cacheflush_64.h
64
flush_ptrace_access(vma, page, vaddr, src, len, 0); \
arch/sparc/include/asm/cacheflush_64.h
67
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
arch/sparc/include/asm/cacheflush_64.h
69
flush_cache_page(vma, vaddr, page_to_pfn(page)); \
arch/sparc/include/asm/cacheflush_64.h
71
flush_ptrace_access(vma, page, vaddr, dst, len, 1); \
arch/sparc/include/asm/floppy_64.h
202
unsigned char *vaddr = pdma_vaddr;
arch/sparc/include/asm/floppy_64.h
209
pdma_vaddr = vaddr;
arch/sparc/include/asm/floppy_64.h
214
pdma_vaddr = vaddr;
arch/sparc/include/asm/floppy_64.h
221
*vaddr++ = readb(stat + 1);
arch/sparc/include/asm/floppy_64.h
223
unsigned char data = *vaddr++;
arch/sparc/include/asm/floppy_64.h
231
pdma_vaddr = vaddr;
arch/sparc/include/asm/highmem.h
54
#define arch_kmap_local_pre_map(vaddr, pteval) flush_cache_all()
arch/sparc/include/asm/highmem.h
55
#define arch_kmap_local_pre_unmap(vaddr) flush_cache_all()
arch/sparc/include/asm/highmem.h
56
#define arch_kmap_local_post_map(vaddr, pteval) flush_tlb_all()
arch/sparc/include/asm/highmem.h
57
#define arch_kmap_local_post_unmap(vaddr) flush_tlb_all()
arch/sparc/include/asm/hvtramp.h
10
__u64 vaddr;
arch/sparc/include/asm/hypervisor.h
770
unsigned long sun4v_mmu_map_perm_addr(unsigned long vaddr,
arch/sparc/include/asm/leon.h
200
unsigned long leon_swprobe(unsigned long vaddr, unsigned long *paddr);
arch/sparc/include/asm/oplib_64.h
156
unsigned long vaddr);
arch/sparc/include/asm/oplib_64.h
160
unsigned long vaddr);
arch/sparc/include/asm/oplib_64.h
176
unsigned long vaddr, unsigned long paddr);
arch/sparc/include/asm/oplib_64.h
177
void prom_unmap(unsigned long size, unsigned long vaddr);
arch/sparc/include/asm/page_32.h
20
#define clear_user_page(addr, vaddr, page) \
arch/sparc/include/asm/page_32.h
24
#define copy_user_page(to, from, vaddr, page) \
arch/sparc/include/asm/page_64.h
45
void clear_user_page(void *addr, unsigned long vaddr, struct page *page);
arch/sparc/include/asm/page_64.h
48
void copy_user_page(void *to, void *from, unsigned long vaddr, struct page *topage);
arch/sparc/include/asm/page_64.h
52
unsigned long vaddr, struct vm_area_struct *vma);
arch/sparc/include/asm/pgtable_32.h
80
#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
arch/sparc/include/asm/pgtable_64.h
214
#define ZERO_PAGE(vaddr) (mem_map_zero)
arch/sparc/include/asm/pgtable_64.h
895
void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
arch/sparc/include/asm/pgtable_64.h
899
static void maybe_tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
arch/sparc/include/asm/pgtable_64.h
910
tlb_batch_add(mm, vaddr, ptep, orig, fullmm, hugepage_shift);
arch/sparc/include/asm/tlbflush_64.h
20
void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr,
arch/sparc/include/asm/tlbflush_64.h
48
void __flush_tlb_page(unsigned long context, unsigned long vaddr);
arch/sparc/include/asm/tlbflush_64.h
53
static inline void global_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr)
arch/sparc/include/asm/tlbflush_64.h
55
__flush_tlb_page(CTX_HWBITS(mm->context), vaddr);
arch/sparc/include/asm/tlbflush_64.h
61
void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr);
arch/sparc/include/asm/tlbflush_64.h
63
#define global_flush_tlb_page(mm, vaddr) \
arch/sparc/include/asm/tlbflush_64.h
64
smp_flush_tlb_page(mm, vaddr)
arch/sparc/include/asm/viking.h
214
static inline unsigned long viking_hwprobe(unsigned long vaddr)
arch/sparc/include/asm/viking.h
218
vaddr &= PAGE_MASK;
arch/sparc/include/asm/viking.h
222
: "r" (vaddr | 0x400), "i" (ASI_M_FLUSH_PROBE));
arch/sparc/include/asm/viking.h
229
: "r" (vaddr | 0x200), "i" (ASI_M_FLUSH_PROBE));
arch/sparc/include/asm/viking.h
231
vaddr &= ~PGDIR_MASK;
arch/sparc/include/asm/viking.h
232
vaddr >>= PAGE_SHIFT;
arch/sparc/include/asm/viking.h
233
return val | (vaddr << 8);
arch/sparc/include/asm/viking.h
239
: "r" (vaddr | 0x100), "i" (ASI_M_FLUSH_PROBE));
arch/sparc/include/asm/viking.h
241
vaddr &= ~PMD_MASK;
arch/sparc/include/asm/viking.h
242
vaddr >>= PAGE_SHIFT;
arch/sparc/include/asm/viking.h
243
return val | (vaddr << 8);
arch/sparc/include/asm/viking.h
249
: "r" (vaddr), "i" (ASI_M_FLUSH_PROBE));
arch/sparc/kernel/iommu.c
331
u32 vaddr, unsigned long ctx, unsigned long npages,
arch/sparc/kernel/iommu.c
366
for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE)
arch/sparc/kernel/iommu.c
367
iommu_write(strbuf->strbuf_pflush, vaddr);
arch/sparc/kernel/iommu.c
393
vaddr, ctx, npages);
arch/sparc/kernel/iommu.c
567
unsigned long vaddr, npages, entry, j;
arch/sparc/kernel/iommu.c
570
vaddr = s->dma_address & IO_PAGE_MASK;
arch/sparc/kernel/iommu.c
574
entry = (vaddr - iommu->tbl.table_map_base)
arch/sparc/kernel/iommu.c
581
iommu_tbl_range_free(&iommu->tbl, vaddr, npages,
arch/sparc/kernel/ioport.c
127
unsigned long vaddr = (unsigned long) virtual & PAGE_MASK;
arch/sparc/kernel/ioport.c
134
if ((res = lookup_resource(&sparc_iomap, vaddr)) == NULL) {
arch/sparc/kernel/ioport.c
135
printk("free_io/iounmap: cannot free %lx\n", vaddr);
arch/sparc/kernel/pci_sun4v.c
615
unsigned long vaddr, npages;
arch/sparc/kernel/pci_sun4v.c
617
vaddr = s->dma_address & IO_PAGE_MASK;
arch/sparc/kernel/pci_sun4v.c
620
iommu_tbl_range_free(tbl, vaddr, npages,
arch/sparc/kernel/smp_64.c
1108
void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr)
arch/sparc/kernel/smp_64.c
1115
context, vaddr, 0,
arch/sparc/kernel/smp_64.c
1118
__flush_tlb_page(context, vaddr);
arch/sparc/kernel/smp_64.c
322
hdesc->maps[i].vaddr = tte_vaddr;
arch/sparc/kernel/traps_64.c
1465
unsigned long vaddr = PAGE_OFFSET + paddr;
arch/sparc/kernel/traps_64.c
1467
if (vaddr > (unsigned long) high_memory)
arch/sparc/kernel/traps_64.c
1470
return kern_addr_valid(vaddr);
arch/sparc/kernel/uprobes.c
106
unsigned long real_pc = (unsigned long) utask->vaddr;
arch/sparc/kernel/uprobes.c
228
rc = retpc_fixup(regs, insn, (unsigned long) utask->vaddr);
arch/sparc/kernel/uprobes.c
230
regs->tnpc = utask->vaddr+4;
arch/sparc/kernel/uprobes.c
299
instruction_pointer_set(regs, utask->vaddr);
arch/sparc/kernel/uprobes.c
33
static void copy_to_page(struct page *page, unsigned long vaddr,
arch/sparc/kernel/uprobes.c
38
memcpy(kaddr + (vaddr & ~PAGE_MASK), src, len);
arch/sparc/kernel/uprobes.c
49
void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
arch/sparc/kernel/uprobes.c
69
copy_to_page(page, vaddr, &insn, len);
arch/sparc/kernel/uprobes.c
70
copy_to_page(page, vaddr+len, &stp_insn, 4);
arch/sparc/mm/fault_64.c
61
static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
arch/sparc/mm/fault_64.c
67
printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
arch/sparc/mm/init_32.c
269
unsigned long vaddr = (unsigned long)page_address(page);
arch/sparc/mm/init_32.c
271
__flush_page_to_ram(vaddr);
arch/sparc/mm/init_32.c
277
unsigned long vaddr = (unsigned long)folio_address(folio);
arch/sparc/mm/init_32.c
281
__flush_page_to_ram(vaddr + i * PAGE_SIZE);
arch/sparc/mm/init_64.c
3123
unsigned long vaddr, struct vm_area_struct *vma)
arch/sparc/mm/init_64.c
3129
copy_user_page(vto, vfrom, vaddr, to);
arch/sparc/mm/init_64.c
574
static inline int in_obp_range(unsigned long vaddr)
arch/sparc/mm/init_64.c
576
return (vaddr >= LOW_OBP_ADDRESS &&
arch/sparc/mm/init_64.c
577
vaddr < HI_OBP_ADDRESS);
arch/sparc/mm/init_64.c
658
static void __init hypervisor_tlb_lock(unsigned long vaddr,
arch/sparc/mm/init_64.c
662
unsigned long ret = sun4v_mmu_map_perm_addr(vaddr, 0, pte, mmu);
arch/sparc/mm/init_64.c
666
"errors with %lx\n", vaddr, 0, pte, mmu, ret);
arch/sparc/mm/io-unit.c
183
static void iounit_unmap_phys(struct device *dev, dma_addr_t vaddr, size_t len,
arch/sparc/mm/io-unit.c
190
len = ((vaddr & ~PAGE_MASK) + len + (PAGE_SIZE-1)) >> PAGE_SHIFT;
arch/sparc/mm/io-unit.c
191
vaddr = (vaddr - IOUNIT_DMA_BASE) >> PAGE_SHIFT;
arch/sparc/mm/io-unit.c
192
IOD(("iounit_release %08lx-%08lx\n", (long)vaddr, (long)len+vaddr));
arch/sparc/mm/io-unit.c
193
for (len += vaddr; vaddr < len; vaddr++)
arch/sparc/mm/io-unit.c
194
clear_bit(vaddr, iounit->bmap);
arch/sparc/mm/io-unit.c
202
unsigned long flags, vaddr, len;
arch/sparc/mm/io-unit.c
209
vaddr = (sg->dma_address - IOUNIT_DMA_BASE) >> PAGE_SHIFT;
arch/sparc/mm/io-unit.c
210
IOD(("iounit_release %08lx-%08lx\n", (long)vaddr, (long)len+vaddr));
arch/sparc/mm/io-unit.c
211
for (len += vaddr; vaddr < len; vaddr++)
arch/sparc/mm/io-unit.c
212
clear_bit(vaddr, iounit->bmap);
arch/sparc/mm/iommu.c
208
unsigned long vaddr, p;
arch/sparc/mm/iommu.c
210
vaddr = (unsigned long)phys_to_virt(paddr);
arch/sparc/mm/iommu.c
211
for (p = vaddr & PAGE_MASK; p < vaddr + len; p += PAGE_SIZE)
arch/sparc/mm/leon_mm.c
108
ptr += (((vaddr >> LEON_PMD_SH) & LEON_PMD_M) * 4);
arch/sparc/mm/leon_mm.c
135
ptr += (((vaddr >> LEON_PTE_SH) & LEON_PTE_M) * 4);
arch/sparc/mm/leon_mm.c
155
(vaddr & ~(-1 << LEON_PTE_SH)) | ((pte & ~0xff) << 4);
arch/sparc/mm/leon_mm.c
159
(vaddr & ~(-1 << LEON_PMD_SH)) | ((pte & ~0xff) << 4);
arch/sparc/mm/leon_mm.c
163
(vaddr & ~(-1 << LEON_PGD_SH)) | ((pte & ~0xff) << 4);
arch/sparc/mm/leon_mm.c
167
paddr_calc = vaddr;
arch/sparc/mm/leon_mm.c
36
unsigned long leon_swprobe(unsigned long vaddr, unsigned long *paddr)
arch/sparc/mm/leon_mm.c
86
ptr += ((((vaddr) >> LEON_PGD_SH) & LEON_PGD_M) * 4);
arch/sparc/mm/srmmu.c
193
unsigned long vaddr;
arch/sparc/mm/srmmu.c
196
vaddr = (unsigned long)addr;
arch/sparc/mm/srmmu.c
197
if (vaddr < SRMMU_NOCACHE_VADDR) {
arch/sparc/mm/srmmu.c
199
vaddr, (unsigned long)SRMMU_NOCACHE_VADDR);
arch/sparc/mm/srmmu.c
202
if (vaddr + size > srmmu_nocache_end) {
arch/sparc/mm/srmmu.c
204
vaddr, srmmu_nocache_end);
arch/sparc/mm/srmmu.c
215
if (vaddr & (size - 1)) {
arch/sparc/mm/srmmu.c
216
printk("Vaddr %lx is not aligned to size 0x%x\n", vaddr, size);
arch/sparc/mm/srmmu.c
220
offset = (vaddr - SRMMU_NOCACHE_VADDR) >> SRMMU_NOCACHE_BITMAP_SHIFT;
arch/sparc/mm/srmmu.c
275
unsigned long paddr, vaddr;
arch/sparc/mm/srmmu.c
296
vaddr = SRMMU_NOCACHE_VADDR;
arch/sparc/mm/srmmu.c
298
while (vaddr < srmmu_nocache_end) {
arch/sparc/mm/srmmu.c
299
pgd = pgd_offset_k(vaddr);
arch/sparc/mm/srmmu.c
300
p4d = p4d_offset(pgd, vaddr);
arch/sparc/mm/srmmu.c
301
pud = pud_offset(p4d, vaddr);
arch/sparc/mm/srmmu.c
302
pmd = pmd_offset(__nocache_fix(pud), vaddr);
arch/sparc/mm/srmmu.c
303
pte = pte_offset_kernel(__nocache_fix(pmd), vaddr);
arch/sparc/mm/srmmu.c
312
vaddr += PAGE_SIZE;
arch/sparc/mm/srmmu.c
743
static inline unsigned long srmmu_probe(unsigned long vaddr)
arch/sparc/mm/srmmu.c
749
vaddr &= PAGE_MASK;
arch/sparc/mm/srmmu.c
752
"r" (vaddr | 0x400), "i" (ASI_M_FLUSH_PROBE));
arch/sparc/mm/srmmu.c
754
retval = leon_swprobe(vaddr, NULL);
arch/sparc/mm/srmmu.c
840
static void __init do_large_mapping(unsigned long vaddr, unsigned long phys_base)
arch/sparc/mm/srmmu.c
842
pgd_t *pgdp = pgd_offset_k(vaddr);
arch/sparc/mm/tlb.c
112
tb->vaddrs[nr] = vaddr;
arch/sparc/mm/tlb.c
121
void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
arch/sparc/mm/tlb.c
146
if ((paddr ^ vaddr) & (1 << 13))
arch/sparc/mm/tlb.c
152
tlb_batch_add_one(mm, vaddr, pte_exec(orig), hugepage_shift);
arch/sparc/mm/tlb.c
156
static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr,
arch/sparc/mm/tlb.c
162
pte = pte_offset_map(&pmd, vaddr);
arch/sparc/mm/tlb.c
165
end = vaddr + HPAGE_SIZE;
arch/sparc/mm/tlb.c
166
while (vaddr < end) {
arch/sparc/mm/tlb.c
170
tlb_batch_add_one(mm, vaddr, exec, PAGE_SHIFT);
arch/sparc/mm/tlb.c
173
vaddr += PAGE_SIZE;
arch/sparc/mm/tlb.c
78
static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,
arch/sparc/mm/tlb.c
84
vaddr &= PAGE_MASK;
arch/sparc/mm/tlb.c
86
vaddr |= 0x1UL;
arch/sparc/mm/tlb.c
96
flush_tsb_user_page(mm, vaddr, hugepage_shift);
arch/sparc/mm/tlb.c
97
global_flush_tlb_page(mm, vaddr);
arch/sparc/mm/tsb.c
152
void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr,
arch/sparc/mm/tsb.c
165
__flush_tsb_one_entry(base, vaddr, PAGE_SHIFT,
arch/sparc/mm/tsb.c
169
__flush_huge_tsb_one_entry(base, vaddr, PAGE_SHIFT,
arch/sparc/mm/tsb.c
179
__flush_huge_tsb_one_entry(base, vaddr, REAL_HPAGE_SHIFT,
arch/sparc/mm/tsb.c
22
static inline unsigned long tsb_hash(unsigned long vaddr, unsigned long hash_shift, unsigned long nentries)
arch/sparc/mm/tsb.c
24
vaddr >>= hash_shift;
arch/sparc/mm/tsb.c
25
return vaddr & (nentries - 1);
arch/sparc/mm/tsb.c
28
static inline int tag_compare(unsigned long tag, unsigned long vaddr)
arch/sparc/mm/tsb.c
30
return (tag == (vaddr >> 22));
arch/sparc/prom/misc_64.c
204
unsigned long tte_data, unsigned long vaddr)
arch/sparc/prom/misc_64.c
213
args[5] = vaddr;
arch/sparc/prom/misc_64.c
225
unsigned long vaddr)
arch/sparc/prom/misc_64.c
227
return tlb_load("SUNW,itlb-load", index, tte_data, vaddr);
arch/sparc/prom/misc_64.c
232
unsigned long vaddr)
arch/sparc/prom/misc_64.c
234
return tlb_load("SUNW,dtlb-load", index, tte_data, vaddr);
arch/sparc/prom/misc_64.c
238
unsigned long vaddr, unsigned long paddr)
arch/sparc/prom/misc_64.c
250
args[7] = vaddr;
arch/sparc/prom/misc_64.c
263
void prom_unmap(unsigned long size, unsigned long vaddr)
arch/sparc/prom/misc_64.c
273
args[6] = vaddr;
arch/um/drivers/vfio_user.c
60
.vaddr = uml_reserved,
arch/um/include/asm/page.h
29
#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
arch/um/include/asm/pgtable.h
81
#define ZERO_PAGE(vaddr) virt_to_page(empty_zero_page)
arch/x86/boot/startup/sev-shared.c
638
static void pvalidate_4k_page(unsigned long vaddr, unsigned long paddr,
arch/x86/boot/startup/sev-shared.c
646
ret = pvalidate(vaddr, RMP_PG_SIZE_4K, validate);
arch/x86/boot/startup/sev-shared.c
656
sev_evict_cache((void *)vaddr, 1);
arch/x86/boot/startup/sev-shared.c
659
static void __page_state_change(unsigned long vaddr, unsigned long paddr,
arch/x86/boot/startup/sev-shared.c
669
pvalidate_4k_page(vaddr, paddr, false, desc->ca, desc->caa_pa);
arch/x86/boot/startup/sev-shared.c
691
pvalidate_4k_page(vaddr, paddr, true, desc->ca, desc->caa_pa);
arch/x86/boot/startup/sev-startup.c
111
early_set_pages_state(vaddr, paddr, npages, &d);
arch/x86/boot/startup/sev-startup.c
49
early_set_pages_state(unsigned long vaddr, unsigned long paddr,
arch/x86/boot/startup/sev-startup.c
54
vaddr = vaddr & PAGE_MASK;
arch/x86/boot/startup/sev-startup.c
60
__page_state_change(vaddr, paddr, desc);
arch/x86/boot/startup/sev-startup.c
62
vaddr += PAGE_SIZE;
arch/x86/boot/startup/sev-startup.c
67
void __init early_snp_set_memory_private(unsigned long vaddr, unsigned long paddr,
arch/x86/boot/startup/sev-startup.c
89
early_set_pages_state(vaddr, paddr, npages, &d);
arch/x86/boot/startup/sev-startup.c
92
void __init early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr,
arch/x86/boot/startup/sme.c
100
pgd_start = ppd->vaddr & PGDIR_MASK;
arch/x86/boot/startup/sme.c
105
pgd_p = ppd->pgd + pgd_index(ppd->vaddr);
arch/x86/boot/startup/sme.c
117
pgd = ppd->pgd + pgd_index(ppd->vaddr);
arch/x86/boot/startup/sme.c
125
p4d = p4d_offset(pgd, ppd->vaddr);
arch/x86/boot/startup/sme.c
133
pud = pud_offset(p4d, ppd->vaddr);
arch/x86/boot/startup/sme.c
156
pmd = pmd_offset(pud, ppd->vaddr);
arch/x86/boot/startup/sme.c
173
pmd = pmd_offset(pud, ppd->vaddr);
arch/x86/boot/startup/sme.c
184
pte = pte_offset_kernel(pmd, ppd->vaddr);
arch/x86/boot/startup/sme.c
191
while (ppd->vaddr < ppd->vaddr_end) {
arch/x86/boot/startup/sme.c
194
ppd->vaddr += PMD_SIZE;
arch/x86/boot/startup/sme.c
201
while (ppd->vaddr < ppd->vaddr_end) {
arch/x86/boot/startup/sme.c
204
ppd->vaddr += PAGE_SIZE;
arch/x86/boot/startup/sme.c
221
ppd->vaddr_end = ALIGN(ppd->vaddr, PMD_SIZE);
arch/x86/boot/startup/sme.c
387
ppd.vaddr = workarea_start;
arch/x86/boot/startup/sme.c
421
ppd.vaddr = kernel_start;
arch/x86/boot/startup/sme.c
427
ppd.vaddr = kernel_start + decrypted_base;
arch/x86/boot/startup/sme.c
434
ppd.vaddr = initrd_start;
arch/x86/boot/startup/sme.c
441
ppd.vaddr = initrd_start + decrypted_base;
arch/x86/boot/startup/sme.c
448
ppd.vaddr = workarea_start;
arch/x86/boot/startup/sme.c
453
ppd.vaddr = workarea_start + decrypted_base;
arch/x86/boot/startup/sme.c
471
ppd.vaddr = kernel_start + decrypted_base;
arch/x86/boot/startup/sme.c
476
ppd.vaddr = initrd_start + decrypted_base;
arch/x86/boot/startup/sme.c
481
ppd.vaddr = workarea_start + decrypted_base;
arch/x86/boot/startup/sme.c
78
unsigned long vaddr;
arch/x86/coco/sev/core.c
191
unsigned long vaddr;
arch/x86/coco/sev/core.c
202
vaddr = (unsigned long)pfn_to_kaddr(pfn);
arch/x86/coco/sev/core.c
206
rc = pvalidate(vaddr, size, validate);
arch/x86/coco/sev/core.c
211
unsigned long vaddr_end = vaddr + PMD_SIZE;
arch/x86/coco/sev/core.c
213
for (; vaddr < vaddr_end; vaddr += PAGE_SIZE, pfn++) {
arch/x86/coco/sev/core.c
214
rc = pvalidate(vaddr, RMP_PG_SIZE_4K, validate);
arch/x86/coco/sev/core.c
319
static unsigned long __set_pages_state(struct snp_psc_desc *data, unsigned long vaddr,
arch/x86/coco/sev/core.c
337
while (vaddr < vaddr_end && i < ARRAY_SIZE(data->entries)) {
arch/x86/coco/sev/core.c
340
if (is_vmalloc_addr((void *)vaddr)) {
arch/x86/coco/sev/core.c
341
pfn = vmalloc_to_pfn((void *)vaddr);
arch/x86/coco/sev/core.c
344
pfn = __pa(vaddr) >> PAGE_SHIFT;
arch/x86/coco/sev/core.c
351
if (use_large_entry && IS_ALIGNED(vaddr, PMD_SIZE) &&
arch/x86/coco/sev/core.c
352
(vaddr_end - vaddr) >= PMD_SIZE) {
arch/x86/coco/sev/core.c
354
vaddr += PMD_SIZE;
arch/x86/coco/sev/core.c
357
vaddr += PAGE_SIZE;
arch/x86/coco/sev/core.c
388
return vaddr;
arch/x86/coco/sev/core.c
391
static void set_pages_state(unsigned long vaddr, unsigned long npages, int op)
arch/x86/coco/sev/core.c
400
return early_set_pages_state(vaddr, __pa(vaddr), npages, &d);
arch/x86/coco/sev/core.c
403
vaddr = vaddr & PAGE_MASK;
arch/x86/coco/sev/core.c
404
vaddr_end = vaddr + (npages << PAGE_SHIFT);
arch/x86/coco/sev/core.c
406
while (vaddr < vaddr_end)
arch/x86/coco/sev/core.c
407
vaddr = __set_pages_state(&desc, vaddr, vaddr_end, op);
arch/x86/coco/sev/core.c
410
void snp_set_memory_shared(unsigned long vaddr, unsigned long npages)
arch/x86/coco/sev/core.c
415
set_pages_state(vaddr, npages, SNP_PAGE_STATE_SHARED);
arch/x86/coco/sev/core.c
418
void snp_set_memory_private(unsigned long vaddr, unsigned long npages)
arch/x86/coco/sev/core.c
423
set_pages_state(vaddr, npages, SNP_PAGE_STATE_PRIVATE);
arch/x86/coco/sev/core.c
428
unsigned long vaddr, npages;
arch/x86/coco/sev/core.c
433
vaddr = (unsigned long)__va(start);
arch/x86/coco/sev/core.c
436
set_pages_state(vaddr, npages, SNP_PAGE_STATE_PRIVATE);
arch/x86/coco/sev/internal.h
59
void early_set_pages_state(unsigned long vaddr, unsigned long paddr,
arch/x86/coco/sev/vc-handle.c
40
unsigned long vaddr, phys_addr_t *paddr)
arch/x86/coco/sev/vc-handle.c
42
unsigned long va = (unsigned long)vaddr;
arch/x86/coco/sev/vc-handle.c
53
ctxt->fi.cr2 = vaddr;
arch/x86/coco/tdx/tdx.c
579
unsigned long *reg, val, vaddr;
arch/x86/coco/tdx/tdx.c
620
vaddr = (unsigned long)insn_get_addr_ref(&insn, regs);
arch/x86/coco/tdx/tdx.c
621
if (vaddr / PAGE_SIZE != (vaddr + size - 1) / PAGE_SIZE)
arch/x86/coco/tdx/tdx.c
964
static bool tdx_enc_status_changed(unsigned long vaddr, int numpages, bool enc)
arch/x86/coco/tdx/tdx.c
966
phys_addr_t start = __pa(vaddr);
arch/x86/coco/tdx/tdx.c
967
phys_addr_t end = __pa(vaddr + numpages * PAGE_SIZE);
arch/x86/coco/tdx/tdx.c
979
static int tdx_enc_status_change_prepare(unsigned long vaddr, int numpages,
arch/x86/coco/tdx/tdx.c
986
if (enc && !tdx_enc_status_changed(vaddr, numpages, enc))
arch/x86/coco/tdx/tdx.c
992
static int tdx_enc_status_change_finish(unsigned long vaddr, int numpages,
arch/x86/coco/tdx/tdx.c
999
if (!enc && !tdx_enc_status_changed(vaddr, numpages, enc))
arch/x86/events/intel/pt.c
1412
static u64 clamp_to_ge_canonical_addr(u64 vaddr, u8 vaddr_bits)
arch/x86/events/intel/pt.c
1414
return __is_canonical_address(vaddr, vaddr_bits) ?
arch/x86/events/intel/pt.c
1415
vaddr :
arch/x86/events/intel/pt.c
1420
static u64 clamp_to_le_canonical_addr(u64 vaddr, u8 vaddr_bits)
arch/x86/events/intel/pt.c
1422
return __is_canonical_address(vaddr, vaddr_bits) ?
arch/x86/events/intel/pt.c
1423
vaddr :
arch/x86/hyperv/ivm.c
776
void *vaddr;
arch/x86/hyperv/ivm.c
792
vaddr = (void *)kbuffer + (i * HV_HYP_PAGE_SIZE);
arch/x86/hyperv/ivm.c
793
paddr = slow_virt_to_phys(vaddr);
arch/x86/include/asm/highmem.h
62
#define arch_kmap_local_post_map(vaddr, pteval) \
arch/x86/include/asm/highmem.h
65
#define arch_kmap_local_post_unmap(vaddr) \
arch/x86/include/asm/highmem.h
67
flush_tlb_one_kernel((vaddr)); \
arch/x86/include/asm/io.h
332
extern int ioremap_change_attr(unsigned long vaddr, unsigned long size,
arch/x86/include/asm/kexec.h
196
extern int arch_kexec_post_alloc_pages(void *vaddr, unsigned int pages,
arch/x86/include/asm/kexec.h
200
extern void arch_kexec_pre_free_pages(void *vaddr, unsigned int pages);
arch/x86/include/asm/mem_encrypt.h
53
int __init early_set_memory_decrypted(unsigned long vaddr, unsigned long size);
arch/x86/include/asm/mem_encrypt.h
54
int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size);
arch/x86/include/asm/mem_encrypt.h
55
void __init early_set_mem_enc_dec_hypercall(unsigned long vaddr,
arch/x86/include/asm/mem_encrypt.h
90
early_set_memory_decrypted(unsigned long vaddr, unsigned long size) { return 0; }
arch/x86/include/asm/mem_encrypt.h
92
early_set_memory_encrypted(unsigned long vaddr, unsigned long size) { return 0; }
arch/x86/include/asm/mem_encrypt.h
94
early_set_mem_enc_dec_hypercall(unsigned long vaddr, unsigned long size, bool enc) {}
arch/x86/include/asm/page.h
25
static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
arch/x86/include/asm/page.h
31
#define vma_alloc_zeroed_movable_folio(vma, vaddr) \
arch/x86/include/asm/page.h
32
vma_alloc_folio(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, 0, vma, vaddr)
arch/x86/include/asm/page.h
71
static __always_inline u64 __canonical_address(u64 vaddr, u8 vaddr_bits)
arch/x86/include/asm/page.h
73
return ((s64)vaddr << (64 - vaddr_bits)) >> (64 - vaddr_bits);
arch/x86/include/asm/page.h
76
static __always_inline u64 __is_canonical_address(u64 vaddr, u8 vaddr_bits)
arch/x86/include/asm/page.h
78
return __canonical_address(vaddr, vaddr_bits) == vaddr;
arch/x86/include/asm/pgtable.h
56
#define ZERO_PAGE(vaddr) ((void)(vaddr),virt_to_page(empty_zero_page))
arch/x86/include/asm/pgtable.h
909
pmd_t *populate_extra_pmd(unsigned long vaddr);
arch/x86/include/asm/pgtable.h
910
pte_t *populate_extra_pte(unsigned long vaddr);
arch/x86/include/asm/pgtable_32.h
41
#define kpte_clear_flush(ptep, vaddr) \
arch/x86/include/asm/pgtable_32.h
43
pte_clear(&init_mm, (vaddr), (ptep)); \
arch/x86/include/asm/pgtable_32.h
44
flush_tlb_one_kernel((vaddr)); \
arch/x86/include/asm/pgtable_64.h
58
void set_pte_vaddr_p4d(p4d_t *p4d_page, unsigned long vaddr, pte_t new_pte);
arch/x86/include/asm/pgtable_64.h
59
void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte);
arch/x86/include/asm/pgtable_types.h
536
void set_pte_vaddr(unsigned long vaddr, pte_t pte);
arch/x86/include/asm/sev.h
475
static inline int rmpadjust(unsigned long vaddr, bool rmp_psize, unsigned long attrs)
arch/x86/include/asm/sev.h
482
: "a"(vaddr), "c"(rmp_psize), "d"(attrs)
arch/x86/include/asm/sev.h
487
static inline int pvalidate(unsigned long vaddr, bool rmp_psize, bool validate)
arch/x86/include/asm/sev.h
495
: "a"(vaddr), "c"(rmp_psize), "d"(validate)
arch/x86/include/asm/sev.h
506
void early_snp_set_memory_private(unsigned long vaddr, unsigned long paddr,
arch/x86/include/asm/sev.h
508
void early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr,
arch/x86/include/asm/sev.h
510
void snp_set_memory_shared(unsigned long vaddr, unsigned long npages);
arch/x86/include/asm/sev.h
511
void snp_set_memory_private(unsigned long vaddr, unsigned long npages);
arch/x86/include/asm/sev.h
609
static inline int pvalidate(unsigned long vaddr, bool rmp_psize, bool validate) { return 0; }
arch/x86/include/asm/sev.h
610
static inline int rmpadjust(unsigned long vaddr, bool rmp_psize, unsigned long attrs) { return 0; }
arch/x86/include/asm/sev.h
613
early_snp_set_memory_private(unsigned long vaddr, unsigned long paddr, unsigned long npages) { }
arch/x86/include/asm/sev.h
615
early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr, unsigned long npages) { }
arch/x86/include/asm/sev.h
616
static inline void snp_set_memory_shared(unsigned long vaddr, unsigned long npages) { }
arch/x86/include/asm/sev.h
617
static inline void snp_set_memory_private(unsigned long vaddr, unsigned long npages) { }
arch/x86/include/asm/vsyscall.h
32
static inline bool is_vsyscall_vaddr(unsigned long vaddr)
arch/x86/include/asm/vsyscall.h
34
return unlikely((vaddr & PAGE_MASK) == VSYSCALL_ADDR);
arch/x86/include/asm/x86_init.h
162
int (*enc_status_change_prepare)(unsigned long vaddr, int npages, bool enc);
arch/x86/include/asm/x86_init.h
163
int (*enc_status_change_finish)(unsigned long vaddr, int npages, bool enc);
arch/x86/include/asm/xen/page.h
349
unsigned long arbitrary_virt_to_mfn(void *vaddr);
arch/x86/include/asm/xen/page.h
350
void make_lowmem_page_readonly(void *vaddr);
arch/x86/include/asm/xen/page.h
351
void make_lowmem_page_readwrite(void *vaddr);
arch/x86/kernel/amd_gart_64.c
468
void *vaddr;
arch/x86/kernel/amd_gart_64.c
470
vaddr = dma_direct_alloc(dev, size, dma_addr, flag, attrs);
arch/x86/kernel/amd_gart_64.c
471
if (!vaddr ||
arch/x86/kernel/amd_gart_64.c
473
return vaddr;
arch/x86/kernel/amd_gart_64.c
475
*dma_addr = dma_map_area(dev, virt_to_phys(vaddr), size,
arch/x86/kernel/amd_gart_64.c
480
return vaddr;
arch/x86/kernel/amd_gart_64.c
482
dma_direct_free(dev, size, vaddr, *dma_addr, attrs);
arch/x86/kernel/amd_gart_64.c
488
gart_free_coherent(struct device *dev, size_t size, void *vaddr,
arch/x86/kernel/amd_gart_64.c
492
dma_direct_free(dev, size, vaddr, dma_addr, attrs);
arch/x86/kernel/crash_dump_32.c
34
void *vaddr;
arch/x86/kernel/crash_dump_32.c
42
vaddr = kmap_local_pfn(pfn);
arch/x86/kernel/crash_dump_32.c
43
csize = copy_to_iter(vaddr + offset, csize, iter);
arch/x86/kernel/crash_dump_32.c
44
kunmap_local(vaddr);
arch/x86/kernel/crash_dump_64.c
19
void *vaddr;
arch/x86/kernel/crash_dump_64.c
25
vaddr = (__force void *)ioremap_encrypted(pfn << PAGE_SHIFT, PAGE_SIZE);
arch/x86/kernel/crash_dump_64.c
27
vaddr = (__force void *)ioremap_cache(pfn << PAGE_SHIFT, PAGE_SIZE);
arch/x86/kernel/crash_dump_64.c
29
if (!vaddr)
arch/x86/kernel/crash_dump_64.c
32
csize = copy_to_iter(vaddr + offset, csize, iter);
arch/x86/kernel/crash_dump_64.c
34
iounmap((void __iomem *)vaddr);
arch/x86/kernel/early_printk.c
201
u32 __iomem *vaddr = (u32 __iomem *)addr;
arch/x86/kernel/early_printk.c
203
writel(value, vaddr + offset);
arch/x86/kernel/early_printk.c
209
u32 __iomem *vaddr = (u32 __iomem *)addr;
arch/x86/kernel/early_printk.c
211
return readl(vaddr + offset);
arch/x86/kernel/machine_kexec_32.c
81
unsigned long vaddr, unsigned long paddr)
arch/x86/kernel/machine_kexec_32.c
86
pgd += pgd_index(vaddr);
arch/x86/kernel/machine_kexec_32.c
91
p4d = p4d_offset(pgd, vaddr);
arch/x86/kernel/machine_kexec_32.c
92
pud = pud_offset(p4d, vaddr);
arch/x86/kernel/machine_kexec_32.c
93
pmd = pmd_offset(pud, vaddr);
arch/x86/kernel/machine_kexec_32.c
96
pte = pte_offset_kernel(pmd, vaddr);
arch/x86/kernel/machine_kexec_64.c
167
unsigned long vaddr, paddr;
arch/x86/kernel/machine_kexec_64.c
179
vaddr = (unsigned long)__va(control_page);
arch/x86/kernel/machine_kexec_64.c
181
pgd += pgd_index(vaddr);
arch/x86/kernel/machine_kexec_64.c
189
p4d = p4d_offset(pgd, vaddr);
arch/x86/kernel/machine_kexec_64.c
197
pud = pud_offset(p4d, vaddr);
arch/x86/kernel/machine_kexec_64.c
205
pmd = pmd_offset(pud, vaddr);
arch/x86/kernel/machine_kexec_64.c
213
pte = pte_offset_kernel(pmd, vaddr);
arch/x86/kernel/machine_kexec_64.c
702
int arch_kexec_post_alloc_pages(void *vaddr, unsigned int pages, gfp_t gfp)
arch/x86/kernel/machine_kexec_64.c
712
return set_memory_decrypted((unsigned long)vaddr, pages);
arch/x86/kernel/machine_kexec_64.c
715
void arch_kexec_pre_free_pages(void *vaddr, unsigned int pages)
arch/x86/kernel/machine_kexec_64.c
724
set_memory_encrypted((unsigned long)vaddr, pages);
arch/x86/kernel/tboot.c
113
static int map_tboot_page(unsigned long vaddr, unsigned long pfn,
arch/x86/kernel/tboot.c
122
pgd = pgd_offset(&tboot_mm, vaddr);
arch/x86/kernel/tboot.c
123
p4d = p4d_alloc(&tboot_mm, pgd, vaddr);
arch/x86/kernel/tboot.c
126
pud = pud_alloc(&tboot_mm, p4d, vaddr);
arch/x86/kernel/tboot.c
129
pmd = pmd_alloc(&tboot_mm, pud, vaddr);
arch/x86/kernel/tboot.c
132
pte = pte_alloc_map(&tboot_mm, pmd, vaddr);
arch/x86/kernel/tboot.c
135
set_pte_at(&tboot_mm, vaddr, pte, pfn_pte(pfn, prot));
arch/x86/kernel/tboot.c
151
static int map_tboot_pages(unsigned long vaddr, unsigned long start_pfn,
arch/x86/kernel/tboot.c
159
for (; nr > 0; nr--, vaddr += PAGE_SIZE, start_pfn++) {
arch/x86/kernel/tboot.c
160
if (map_tboot_page(vaddr, start_pfn, PAGE_KERNEL_EXEC))
arch/x86/kernel/uprobes.c
1007
err = uprobe_write(auprobe, vma, vaddr, insn, 1, verify_insn,
arch/x86/kernel/uprobes.c
1018
unsigned long vaddr, unsigned long tramp)
arch/x86/kernel/uprobes.c
1022
__text_gen_insn(call, CALL_INSN_OPCODE, (const void *) vaddr,
arch/x86/kernel/uprobes.c
1024
return int3_update(auprobe, vma, vaddr, call, true /* optimize */);
arch/x86/kernel/uprobes.c
1028
unsigned long vaddr)
arch/x86/kernel/uprobes.c
1030
return int3_update(auprobe, vma, vaddr, auprobe->insn, false /* optimize */);
arch/x86/kernel/uprobes.c
1033
static int copy_from_vaddr(struct mm_struct *mm, unsigned long vaddr, void *dst, int len)
arch/x86/kernel/uprobes.c
1039
page = get_user_page_vma_remote(mm, vaddr, gup_flags, &vma);
arch/x86/kernel/uprobes.c
1042
uprobe_copy_from_page(page, vaddr, dst, len);
arch/x86/kernel/uprobes.c
1047
static bool __is_optimized(uprobe_opcode_t *insn, unsigned long vaddr)
arch/x86/kernel/uprobes.c
1056
return __in_uprobe_trampoline(vaddr + 5 + call->raddr);
arch/x86/kernel/uprobes.c
1059
static int is_optimized(struct mm_struct *mm, unsigned long vaddr)
arch/x86/kernel/uprobes.c
1064
err = copy_from_vaddr(mm, vaddr, &insn, 5);
arch/x86/kernel/uprobes.c
1067
return __is_optimized((uprobe_opcode_t *)&insn, vaddr);
arch/x86/kernel/uprobes.c
1077
unsigned long vaddr)
arch/x86/kernel/uprobes.c
1084
int ret = is_optimized(vma->vm_mm, vaddr);
arch/x86/kernel/uprobes.c
1090
return uprobe_write_opcode(auprobe, vma, vaddr, UPROBE_SWBP_INSN,
arch/x86/kernel/uprobes.c
1095
unsigned long vaddr)
arch/x86/kernel/uprobes.c
1098
int ret = is_optimized(vma->vm_mm, vaddr);
arch/x86/kernel/uprobes.c
1102
ret = swbp_unoptimize(auprobe, vma, vaddr);
arch/x86/kernel/uprobes.c
1107
return uprobe_write_opcode(auprobe, vma, vaddr, *(uprobe_opcode_t *)&auprobe->insn,
arch/x86/kernel/uprobes.c
1112
unsigned long vaddr)
arch/x86/kernel/uprobes.c
1119
vma = find_vma(mm, vaddr);
arch/x86/kernel/uprobes.c
1122
tramp = get_uprobe_trampoline(vaddr, &new);
arch/x86/kernel/uprobes.c
1125
err = swbp_optimize(auprobe, vma, vaddr, tramp->vaddr);
arch/x86/kernel/uprobes.c
1131
void arch_uprobe_optimize(struct arch_uprobe *auprobe, unsigned long vaddr)
arch/x86/kernel/uprobes.c
1145
if (copy_from_vaddr(mm, vaddr, &insn, 5))
arch/x86/kernel/uprobes.c
1154
if (__arch_uprobe_optimize(auprobe, mm, vaddr))
arch/x86/kernel/uprobes.c
1161
static bool can_optimize(struct insn *insn, unsigned long vaddr)
arch/x86/kernel/uprobes.c
1170
return PAGE_SIZE - (vaddr & ~PAGE_MASK) >= 5;
arch/x86/kernel/uprobes.c
1185
static bool can_optimize(struct insn *insn, unsigned long vaddr)
arch/x86/kernel/uprobes.c
1246
long correction = utask->vaddr - utask->xol_vaddr;
arch/x86/kernel/uprobes.c
1250
if (emulate_push_stack(regs, utask->vaddr + auprobe->defparam.ilen))
arch/x86/kernel/uprobes.c
1669
regs->ip = utask->vaddr;
arch/x86/kernel/uprobes.c
1733
regs->ip = utask->vaddr;
arch/x86/kernel/uprobes.c
1831
unsigned long vaddr;
arch/x86/kernel/uprobes.c
1844
vaddr = get_unmapped_area(NULL, TASK_SIZE - PAGE_SIZE, PAGE_SIZE, 0, 0);
arch/x86/kernel/uprobes.c
1847
return vaddr;
arch/x86/kernel/uprobes.c
607
*sr = utask->vaddr + auprobe->defparam.ilen;
arch/x86/kernel/uprobes.c
636
unsigned long vaddr;
arch/x86/kernel/uprobes.c
639
static bool is_reachable_by_call(unsigned long vtramp, unsigned long vaddr)
arch/x86/kernel/uprobes.c
641
long delta = (long)(vaddr + 5 - vtramp);
arch/x86/kernel/uprobes.c
646
static unsigned long find_nearest_trampoline(unsigned long vaddr)
arch/x86/kernel/uprobes.c
654
unsigned long call_end = vaddr + 5;
arch/x86/kernel/uprobes.c
685
static struct uprobe_trampoline *create_uprobe_trampoline(unsigned long vaddr)
arch/x86/kernel/uprobes.c
695
vaddr = find_nearest_trampoline(vaddr);
arch/x86/kernel/uprobes.c
696
if (IS_ERR_VALUE(vaddr))
arch/x86/kernel/uprobes.c
703
tramp->vaddr = vaddr;
arch/x86/kernel/uprobes.c
704
vma = _install_special_mapping(mm, tramp->vaddr, PAGE_SIZE,
arch/x86/kernel/uprobes.c
714
static struct uprobe_trampoline *get_uprobe_trampoline(unsigned long vaddr, bool *new)
arch/x86/kernel/uprobes.c
719
if (vaddr > TASK_SIZE || vaddr < PAGE_SIZE)
arch/x86/kernel/uprobes.c
723
if (is_reachable_by_call(tramp->vaddr, vaddr)) {
arch/x86/kernel/uprobes.c
729
tramp = create_uprobe_trampoline(vaddr);
arch/x86/kernel/uprobes.c
929
static int verify_insn(struct page *page, unsigned long vaddr, uprobe_opcode_t *new_opcode,
arch/x86/kernel/uprobes.c
966
unsigned long vaddr, char *insn, bool optimize)
arch/x86/kernel/uprobes.c
970
.base = vaddr,
arch/x86/kernel/uprobes.c
982
err = uprobe_write(auprobe, vma, vaddr, &int3, 1, verify_insn,
arch/x86/kernel/uprobes.c
993
err = uprobe_write(auprobe, vma, vaddr + 1, insn + 1, 4, verify_insn,
arch/x86/kernel/x86_init.c
138
static int enc_status_change_prepare_noop(unsigned long vaddr, int npages, bool enc) { return 0; }
arch/x86/kernel/x86_init.c
139
static int enc_status_change_finish_noop(unsigned long vaddr, int npages, bool enc) { return 0; }
arch/x86/kvm/mmu/mmu.c
4337
gpa_t vaddr, u64 access,
arch/x86/kvm/mmu/mmu.c
4342
return kvm_translate_gpa(vcpu, mmu, vaddr, access, exception);
arch/x86/kvm/svm/sev.c
1213
void __user *vaddr,
arch/x86/kvm/svm/sev.c
1223
if (!IS_ALIGNED((unsigned long)vaddr, 16)) {
arch/x86/kvm/svm/sev.c
1228
if (copy_from_user(page_address(src_tpage), vaddr, size)) {
arch/x86/kvm/svm/sev.c
1267
vaddr, size)) {
arch/x86/kvm/svm/sev.c
1290
unsigned long vaddr, vaddr_end, next_vaddr;
arch/x86/kvm/svm/sev.c
1309
vaddr = debug.src_uaddr;
arch/x86/kvm/svm/sev.c
1311
vaddr_end = vaddr + size;
arch/x86/kvm/svm/sev.c
1314
for (; vaddr < vaddr_end; vaddr = next_vaddr) {
arch/x86/kvm/svm/sev.c
1318
src_p = sev_pin_memory(kvm, vaddr & PAGE_MASK, PAGE_SIZE, &n, 0);
arch/x86/kvm/svm/sev.c
1340
s_off = vaddr & ~PAGE_MASK;
arch/x86/kvm/svm/sev.c
1353
(void __user *)vaddr,
arch/x86/kvm/svm/sev.c
1364
next_vaddr = vaddr + len;
arch/x86/kvm/svm/sev.c
810
unsigned long vaddr, vaddr_end, next_vaddr, npages, pages, size, i;
arch/x86/kvm/svm/sev.c
822
vaddr = params.uaddr;
arch/x86/kvm/svm/sev.c
824
vaddr_end = vaddr + size;
arch/x86/kvm/svm/sev.c
827
inpages = sev_pin_memory(kvm, vaddr, size, &npages, FOLL_WRITE);
arch/x86/kvm/svm/sev.c
840
for (i = 0; vaddr < vaddr_end; vaddr = next_vaddr, i += pages) {
arch/x86/kvm/svm/sev.c
847
offset = vaddr & (PAGE_SIZE - 1);
arch/x86/kvm/svm/sev.c
861
next_vaddr = vaddr + len;
arch/x86/kvm/x86.c
12586
unsigned long vaddr = tr->linear_address;
arch/x86/kvm/x86.c
12593
gpa = kvm_mmu_gva_to_gpa_system(vcpu, vaddr, NULL);
arch/x86/mm/init_32.c
107
pmd_t * __init populate_extra_pmd(unsigned long vaddr)
arch/x86/mm/init_32.c
109
int pgd_idx = pgd_index(vaddr);
arch/x86/mm/init_32.c
110
int pmd_idx = pmd_index(vaddr);
arch/x86/mm/init_32.c
115
pte_t * __init populate_extra_pte(unsigned long vaddr)
arch/x86/mm/init_32.c
117
int pte_idx = pte_index(vaddr);
arch/x86/mm/init_32.c
120
pmd = populate_extra_pmd(vaddr);
arch/x86/mm/init_32.c
132
unsigned long vaddr;
arch/x86/mm/init_32.c
137
vaddr = start;
arch/x86/mm/init_32.c
138
pgd_idx = pgd_index(vaddr);
arch/x86/mm/init_32.c
139
pmd_idx = pmd_index(vaddr);
arch/x86/mm/init_32.c
141
for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd_idx++) {
arch/x86/mm/init_32.c
142
for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
arch/x86/mm/init_32.c
144
if ((vaddr >> PMD_SHIFT) >= pmd_idx_kmap_begin &&
arch/x86/mm/init_32.c
145
(vaddr >> PMD_SHIFT) <= pmd_idx_kmap_end)
arch/x86/mm/init_32.c
147
vaddr += PMD_SIZE;
arch/x86/mm/init_32.c
156
unsigned long vaddr, pte_t *lastpte,
arch/x86/mm/init_32.c
170
&& (vaddr >> PMD_SHIFT) >= pmd_idx_kmap_begin
arch/x86/mm/init_32.c
171
&& (vaddr >> PMD_SHIFT) <= pmd_idx_kmap_end) {
arch/x86/mm/init_32.c
187
BUG_ON(vaddr < fix_to_virt(FIX_KMAP_BEGIN - 1)
arch/x86/mm/init_32.c
188
&& vaddr > fix_to_virt(FIX_KMAP_END)
arch/x86/mm/init_32.c
207
unsigned long vaddr;
arch/x86/mm/init_32.c
217
vaddr = start;
arch/x86/mm/init_32.c
218
pgd_idx = pgd_index(vaddr);
arch/x86/mm/init_32.c
219
pmd_idx = pmd_index(vaddr);
arch/x86/mm/init_32.c
222
for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
arch/x86/mm/init_32.c
224
pmd = pmd + pmd_index(vaddr);
arch/x86/mm/init_32.c
225
for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
arch/x86/mm/init_32.c
228
pmd, vaddr, pte, &adr);
arch/x86/mm/init_32.c
230
vaddr += PMD_SIZE;
arch/x86/mm/init_32.c
391
unsigned long vaddr = PKMAP_BASE;
arch/x86/mm/init_32.c
393
page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
arch/x86/mm/init_32.c
395
pkmap_page_table = virt_to_kpte(vaddr);
arch/x86/mm/init_32.c
481
unsigned long vaddr, end;
arch/x86/mm/init_32.c
487
vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
arch/x86/mm/init_32.c
489
page_table_range_init(vaddr, end, pgd_base);
arch/x86/mm/init_64.c
267
static p4d_t *fill_p4d(pgd_t *pgd, unsigned long vaddr)
arch/x86/mm/init_64.c
276
return p4d_offset(pgd, vaddr);
arch/x86/mm/init_64.c
279
static pud_t *fill_pud(p4d_t *p4d, unsigned long vaddr)
arch/x86/mm/init_64.c
288
return pud_offset(p4d, vaddr);
arch/x86/mm/init_64.c
291
static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr)
arch/x86/mm/init_64.c
300
return pmd_offset(pud, vaddr);
arch/x86/mm/init_64.c
303
static pte_t *fill_pte(pmd_t *pmd, unsigned long vaddr)
arch/x86/mm/init_64.c
311
return pte_offset_kernel(pmd, vaddr);
arch/x86/mm/init_64.c
314
static void __set_pte_vaddr(pud_t *pud, unsigned long vaddr, pte_t new_pte)
arch/x86/mm/init_64.c
316
pmd_t *pmd = fill_pmd(pud, vaddr);
arch/x86/mm/init_64.c
317
pte_t *pte = fill_pte(pmd, vaddr);
arch/x86/mm/init_64.c
325
flush_tlb_one_kernel(vaddr);
arch/x86/mm/init_64.c
328
void set_pte_vaddr_p4d(p4d_t *p4d_page, unsigned long vaddr, pte_t new_pte)
arch/x86/mm/init_64.c
330
p4d_t *p4d = p4d_page + p4d_index(vaddr);
arch/x86/mm/init_64.c
331
pud_t *pud = fill_pud(p4d, vaddr);
arch/x86/mm/init_64.c
333
__set_pte_vaddr(pud, vaddr, new_pte);
arch/x86/mm/init_64.c
336
void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
arch/x86/mm/init_64.c
338
pud_t *pud = pud_page + pud_index(vaddr);
arch/x86/mm/init_64.c
340
__set_pte_vaddr(pud, vaddr, new_pte);
arch/x86/mm/init_64.c
343
void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
arch/x86/mm/init_64.c
348
pr_debug("set_pte_vaddr %lx to %lx\n", vaddr, native_pte_val(pteval));
arch/x86/mm/init_64.c
350
pgd = pgd_offset_k(vaddr);
arch/x86/mm/init_64.c
358
set_pte_vaddr_p4d(p4d_page, vaddr, pteval);
arch/x86/mm/init_64.c
361
pmd_t * __init populate_extra_pmd(unsigned long vaddr)
arch/x86/mm/init_64.c
367
pgd = pgd_offset_k(vaddr);
arch/x86/mm/init_64.c
368
p4d = fill_p4d(pgd, vaddr);
arch/x86/mm/init_64.c
369
pud = fill_pud(p4d, vaddr);
arch/x86/mm/init_64.c
370
return fill_pmd(pud, vaddr);
arch/x86/mm/init_64.c
373
pte_t * __init populate_extra_pte(unsigned long vaddr)
arch/x86/mm/init_64.c
377
pmd = populate_extra_pmd(vaddr);
arch/x86/mm/init_64.c
378
return fill_pte(pmd, vaddr);
arch/x86/mm/init_64.c
446
unsigned long vaddr = __START_KERNEL_map;
arch/x86/mm/init_64.c
459
for (; vaddr + PMD_SIZE - 1 < vaddr_end; pmd++, vaddr += PMD_SIZE) {
arch/x86/mm/init_64.c
462
if (vaddr < (unsigned long) _text || vaddr > end)
arch/x86/mm/init_64.c
612
unsigned long vaddr = (unsigned long)__va(paddr);
arch/x86/mm/init_64.c
613
int i = pud_index(vaddr);
arch/x86/mm/init_64.c
620
vaddr = (unsigned long)__va(paddr);
arch/x86/mm/init_64.c
621
pud = pud_page + pud_index(vaddr);
arch/x86/mm/init_64.c
693
unsigned long vaddr, vaddr_end, vaddr_next, paddr_next, paddr_last;
arch/x86/mm/init_64.c
696
vaddr = (unsigned long)__va(paddr);
arch/x86/mm/init_64.c
703
for (; vaddr < vaddr_end; vaddr = vaddr_next) {
arch/x86/mm/init_64.c
704
p4d_t *p4d = p4d_page + p4d_index(vaddr);
arch/x86/mm/init_64.c
707
vaddr_next = (vaddr & P4D_MASK) + P4D_SIZE;
arch/x86/mm/init_64.c
708
paddr = __pa(vaddr);
arch/x86/mm/init_64.c
747
unsigned long vaddr, vaddr_start, vaddr_end, vaddr_next, paddr_last;
arch/x86/mm/init_64.c
750
vaddr = (unsigned long)__va(paddr_start);
arch/x86/mm/init_64.c
752
vaddr_start = vaddr;
arch/x86/mm/init_64.c
754
for (; vaddr < vaddr_end; vaddr = vaddr_next) {
arch/x86/mm/init_64.c
755
pgd_t *pgd = pgd_offset_k(vaddr);
arch/x86/mm/init_64.c
758
vaddr_next = (vaddr & PGDIR_MASK) + PGDIR_SIZE;
arch/x86/mm/init_64.c
762
paddr_last = phys_p4d_init(p4d, __pa(vaddr),
arch/x86/mm/init_64.c
770
paddr_last = phys_p4d_init(p4d, __pa(vaddr), __pa(vaddr_end),
arch/x86/mm/init_64.c
777
p4d_populate_init(&init_mm, p4d_offset(pgd, vaddr),
arch/x86/mm/ioremap.c
187
unsigned long offset, vaddr;
arch/x86/mm/ioremap.c
296
vaddr = (unsigned long) area->addr;
arch/x86/mm/ioremap.c
301
if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot))
arch/x86/mm/ioremap.c
304
ret_addr = (void __iomem *) (vaddr + offset);
arch/x86/mm/ioremap.c
45
int ioremap_change_attr(unsigned long vaddr, unsigned long size,
arch/x86/mm/ioremap.c
521
void *vaddr;
arch/x86/mm/ioremap.c
524
vaddr = memremap(start, PAGE_SIZE, MEMREMAP_WB);
arch/x86/mm/ioremap.c
527
if (vaddr)
arch/x86/mm/ioremap.c
528
vaddr += offset;
arch/x86/mm/ioremap.c
530
return vaddr;
arch/x86/mm/ioremap.c
54
err = _set_memory_uc(vaddr, nrpages);
arch/x86/mm/ioremap.c
57
err = _set_memory_wc(vaddr, nrpages);
arch/x86/mm/ioremap.c
60
err = _set_memory_wt(vaddr, nrpages);
arch/x86/mm/ioremap.c
63
err = _set_memory_wb(vaddr, nrpages);
arch/x86/mm/kaslr.c
151
vaddr += entropy;
arch/x86/mm/kaslr.c
152
*kaslr_regions[i].base = vaddr;
arch/x86/mm/kaslr.c
155
vaddr += get_padding(&kaslr_regions[i]);
arch/x86/mm/kaslr.c
163
*kaslr_regions[i].end = __pa_nodebug(vaddr - 1);
arch/x86/mm/kaslr.c
166
vaddr = round_up(vaddr + 1, PUD_SIZE);
arch/x86/mm/kaslr.c
175
unsigned long paddr, vaddr;
arch/x86/mm/kaslr.c
188
vaddr = (unsigned long)__va(paddr);
arch/x86/mm/kaslr.c
189
pgd = pgd_offset_k(vaddr);
arch/x86/mm/kaslr.c
191
p4d = p4d_offset(pgd, vaddr);
arch/x86/mm/kaslr.c
192
pud = pud_offset(p4d, vaddr);
arch/x86/mm/kaslr.c
82
unsigned long vaddr_start, vaddr;
arch/x86/mm/kaslr.c
89
vaddr = vaddr_start;
arch/x86/mm/maccess.c
11
unsigned long vaddr = (unsigned long)unsafe_src;
arch/x86/mm/maccess.c
17
if (vaddr < TASK_SIZE_MAX + PAGE_SIZE)
arch/x86/mm/maccess.c
25
if (is_vsyscall_vaddr(vaddr))
arch/x86/mm/maccess.c
36
return __is_canonical_address(vaddr, boot_cpu_data.x86_virt_bits);
arch/x86/mm/mem_encrypt_amd.c
156
static void __init __sme_early_map_unmap_mem(void *vaddr, unsigned long size,
arch/x86/mm/mem_encrypt_amd.c
159
unsigned long paddr = (unsigned long)vaddr - __PAGE_OFFSET;
arch/x86/mm/mem_encrypt_amd.c
167
__early_make_pgtable((unsigned long)vaddr, pmd);
arch/x86/mm/mem_encrypt_amd.c
169
vaddr += PMD_SIZE;
arch/x86/mm/mem_encrypt_amd.c
256
static void enc_dec_hypercall(unsigned long vaddr, unsigned long size, bool enc)
arch/x86/mm/mem_encrypt_amd.c
259
unsigned long vaddr_end = vaddr + size;
arch/x86/mm/mem_encrypt_amd.c
261
while (vaddr < vaddr_end) {
arch/x86/mm/mem_encrypt_amd.c
266
kpte = lookup_address(vaddr, &level);
arch/x86/mm/mem_encrypt_amd.c
281
vaddr = (vaddr & pmask) + psize;
arch/x86/mm/mem_encrypt_amd.c
286
static int amd_enc_status_change_prepare(unsigned long vaddr, int npages, bool enc)
arch/x86/mm/mem_encrypt_amd.c
293
snp_set_memory_shared(vaddr, npages);
arch/x86/mm/mem_encrypt_amd.c
299
static int amd_enc_status_change_finish(unsigned long vaddr, int npages, bool enc)
arch/x86/mm/mem_encrypt_amd.c
306
snp_set_memory_private(vaddr, npages);
arch/x86/mm/mem_encrypt_amd.c
309
enc_dec_hypercall(vaddr, npages << PAGE_SHIFT, enc);
arch/x86/mm/mem_encrypt_amd.c
392
static int __init early_set_memory_enc_dec(unsigned long vaddr,
arch/x86/mm/mem_encrypt_amd.c
401
start = vaddr;
arch/x86/mm/mem_encrypt_amd.c
402
vaddr_next = vaddr;
arch/x86/mm/mem_encrypt_amd.c
403
vaddr_end = vaddr + size;
arch/x86/mm/mem_encrypt_amd.c
405
for (; vaddr < vaddr_end; vaddr = vaddr_next) {
arch/x86/mm/mem_encrypt_amd.c
406
kpte = lookup_address(vaddr, &level);
arch/x86/mm/mem_encrypt_amd.c
414
vaddr_next = (vaddr & PAGE_MASK) + PAGE_SIZE;
arch/x86/mm/mem_encrypt_amd.c
427
if (vaddr == (vaddr & pmask) &&
arch/x86/mm/mem_encrypt_amd.c
428
((vaddr_end - vaddr) >= psize)) {
arch/x86/mm/mem_encrypt_amd.c
430
vaddr_next = (vaddr & pmask) + psize;
arch/x86/mm/mem_encrypt_amd.c
449
kernel_physical_mapping_change(__pa(vaddr & pmask),
arch/x86/mm/mem_encrypt_amd.c
462
int __init early_set_memory_decrypted(unsigned long vaddr, unsigned long size)
arch/x86/mm/mem_encrypt_amd.c
464
return early_set_memory_enc_dec(vaddr, size, false);
arch/x86/mm/mem_encrypt_amd.c
467
int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size)
arch/x86/mm/mem_encrypt_amd.c
469
return early_set_memory_enc_dec(vaddr, size, true);
arch/x86/mm/mem_encrypt_amd.c
472
void __init early_set_mem_enc_dec_hypercall(unsigned long vaddr, unsigned long size, bool enc)
arch/x86/mm/mem_encrypt_amd.c
474
enc_dec_hypercall(vaddr, size, enc);
arch/x86/mm/mem_encrypt_amd.c
545
unsigned long vaddr, vaddr_end, npages;
arch/x86/mm/mem_encrypt_amd.c
548
vaddr = (unsigned long)__start_bss_decrypted_unused;
arch/x86/mm/mem_encrypt_amd.c
550
npages = (vaddr_end - vaddr) >> PAGE_SHIFT;
arch/x86/mm/mem_encrypt_amd.c
561
r = set_memory_encrypted(vaddr, npages);
arch/x86/mm/mem_encrypt_amd.c
568
free_init_pages("unused decrypted", vaddr, vaddr_end);
arch/x86/mm/pat/set_memory.c
1784
static int __cpa_process_fault(struct cpa_data *cpa, unsigned long vaddr,
arch/x86/mm/pat/set_memory.c
1793
return populate_pgd(cpa, vaddr);
arch/x86/mm/pat/set_memory.c
1811
if (within(vaddr, PAGE_OFFSET,
arch/x86/mm/pat/set_memory.c
1814
cpa->pfn = __pa(vaddr) >> PAGE_SHIFT;
arch/x86/mm/pat/set_memory.c
1822
"vaddr = %lx cpa->vaddr = %lx\n", vaddr,
arch/x86/mm/pat/set_memory.c
1823
*cpa->vaddr);
arch/x86/mm/pat/set_memory.c
1916
unsigned long vaddr;
arch/x86/mm/pat/set_memory.c
1926
vaddr = __cpa_addr(cpa, cpa->curpage);
arch/x86/mm/pat/set_memory.c
1927
if (!(within(vaddr, PAGE_OFFSET,
arch/x86/mm/pat/set_memory.c
1931
alias_cpa.vaddr = &laddr;
arch/x86/mm/pat/set_memory.c
1954
if (!within(vaddr, (unsigned long)_text, _brk_end) &&
arch/x86/mm/pat/set_memory.c
1959
alias_cpa.vaddr = &temp_cpa_vaddr;
arch/x86/mm/pat/set_memory.c
2084
cpa.vaddr = addr;
arch/x86/mm/pat/set_memory.c
2374
cpa.vaddr = &addr;
arch/x86/mm/pat/set_memory.c
2593
struct cpa_data cpa = { .vaddr = &tempaddr,
arch/x86/mm/pat/set_memory.c
2612
struct cpa_data cpa = { .vaddr = &tempaddr,
arch/x86/mm/pat/set_memory.c
2698
.vaddr = &address,
arch/x86/mm/pat/set_memory.c
2741
.vaddr = &address,
arch/x86/mm/pat/set_memory.c
318
return cpa->vaddr[idx];
arch/x86/mm/pat/set_memory.c
320
return *cpa->vaddr + idx * PAGE_SIZE;
arch/x86/mm/pat/set_memory.c
327
static void clflush_cache_range_opt(void *vaddr, unsigned int size)
arch/x86/mm/pat/set_memory.c
330
void *p = (void *)((unsigned long)vaddr & ~(clflush_size - 1));
arch/x86/mm/pat/set_memory.c
331
void *vend = vaddr + size;
arch/x86/mm/pat/set_memory.c
348
void clflush_cache_range(void *vaddr, unsigned int size)
arch/x86/mm/pat/set_memory.c
351
clflush_cache_range_opt(vaddr, size);
arch/x86/mm/pat/set_memory.c
42
unsigned long *vaddr;
arch/x86/mm/pgtable_32.c
27
void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
arch/x86/mm/pgtable_32.c
35
pgd = swapper_pg_dir + pgd_index(vaddr);
arch/x86/mm/pgtable_32.c
40
p4d = p4d_offset(pgd, vaddr);
arch/x86/mm/pgtable_32.c
45
pud = pud_offset(p4d, vaddr);
arch/x86/mm/pgtable_32.c
50
pmd = pmd_offset(pud, vaddr);
arch/x86/mm/pgtable_32.c
55
pte = pte_offset_kernel(pmd, vaddr);
arch/x86/mm/pgtable_32.c
57
set_pte_at(&init_mm, vaddr, pte, pteval);
arch/x86/mm/pgtable_32.c
59
pte_clear(&init_mm, vaddr, pte);
arch/x86/mm/pgtable_32.c
65
flush_tlb_one_kernel(vaddr);
arch/x86/virt/svm/sev.c
912
unsigned long vaddr;
arch/x86/virt/svm/sev.c
921
vaddr = (unsigned long)pfn_to_kaddr(pfn);
arch/x86/virt/svm/sev.c
943
pte = lookup_address(vaddr, &level);
arch/x86/virt/svm/sev.c
951
ret = set_memory_4k(vaddr, npages);
arch/x86/xen/grant-table.c
163
&xen_auto_xlat_grant_frames.vaddr,
arch/x86/xen/mmu.c
10
unsigned long arbitrary_virt_to_mfn(void *vaddr)
arch/x86/xen/mmu.c
12
xmaddr_t maddr = arbitrary_virt_to_machine(vaddr);
arch/x86/xen/mmu.c
17
xmaddr_t arbitrary_virt_to_machine(void *vaddr)
arch/x86/xen/mmu.c
19
unsigned long address = (unsigned long)vaddr;
arch/x86/xen/mmu.c
28
if (virt_addr_valid(vaddr))
arch/x86/xen/mmu.c
29
return virt_to_machine(vaddr);
arch/x86/xen/mmu_pv.c
1067
static void __init xen_cleanhighmap(unsigned long vaddr,
arch/x86/xen/mmu_pv.c
1071
pmd_t *pmd = level2_kernel_pgt + pmd_index(vaddr);
arch/x86/xen/mmu_pv.c
1075
for (; vaddr <= vaddr_end && (pmd < (level2_kernel_pgt + PTRS_PER_PMD));
arch/x86/xen/mmu_pv.c
1076
pmd++, vaddr += PMD_SIZE) {
arch/x86/xen/mmu_pv.c
1079
if (vaddr < (unsigned long) _text || vaddr > kernel_end)
arch/x86/xen/mmu_pv.c
1092
void *vaddr = __va(paddr);
arch/x86/xen/mmu_pv.c
1093
void *vaddr_end = vaddr + size;
arch/x86/xen/mmu_pv.c
1095
for (; vaddr < vaddr_end; vaddr += PAGE_SIZE)
arch/x86/xen/mmu_pv.c
1096
make_lowmem_page_readwrite(vaddr);
arch/x86/xen/mmu_pv.c
1183
static void __init xen_cleanmfnmap(unsigned long vaddr)
arch/x86/xen/mmu_pv.c
1189
unpin = (vaddr == 2 * PGDIR_SIZE);
arch/x86/xen/mmu_pv.c
1190
vaddr &= PMD_MASK;
arch/x86/xen/mmu_pv.c
1191
pgd = pgd_offset_k(vaddr);
arch/x86/xen/mmu_pv.c
189
void make_lowmem_page_readonly(void *vaddr)
arch/x86/xen/mmu_pv.c
1895
unsigned long *vaddr;
arch/x86/xen/mmu_pv.c
1898
vaddr = early_memremap_ro(addr, sizeof(val));
arch/x86/xen/mmu_pv.c
1899
val = *vaddr;
arch/x86/xen/mmu_pv.c
1900
early_memunmap(vaddr, sizeof(val));
arch/x86/xen/mmu_pv.c
1909
static phys_addr_t __init xen_early_virt_to_phys(unsigned long vaddr)
arch/x86/xen/mmu_pv.c
1918
pgd = native_make_pgd(xen_read_phys_ulong(pa + pgd_index(vaddr) *
arch/x86/xen/mmu_pv.c
192
unsigned long address = (unsigned long)vaddr;
arch/x86/xen/mmu_pv.c
1924
pud = native_make_pud(xen_read_phys_ulong(pa + pud_index(vaddr) *
arch/x86/xen/mmu_pv.c
1930
return pa + (vaddr & ~PUD_MASK);
arch/x86/xen/mmu_pv.c
1932
pmd = native_make_pmd(xen_read_phys_ulong(pa + pmd_index(vaddr) *
arch/x86/xen/mmu_pv.c
1938
return pa + (vaddr & ~PMD_MASK);
arch/x86/xen/mmu_pv.c
1940
pte = native_make_pte(xen_read_phys_ulong(pa + pte_index(vaddr) *
arch/x86/xen/mmu_pv.c
1946
return pa | (vaddr & ~PAGE_MASK);
arch/x86/xen/mmu_pv.c
205
void make_lowmem_page_readwrite(void *vaddr)
arch/x86/xen/mmu_pv.c
208
unsigned long address = (unsigned long)vaddr;
arch/x86/xen/mmu_pv.c
2093
unsigned long vaddr;
arch/x86/xen/mmu_pv.c
2134
vaddr = __fix_to_virt(idx);
arch/x86/xen/mmu_pv.c
2135
if (HYPERVISOR_update_va_mapping(vaddr, pte, UVMF_INVLPG))
arch/x86/xen/mmu_pv.c
2142
set_pte_vaddr_pud(level3_user_vsyscall, vaddr, pte);
arch/x86/xen/mmu_pv.c
2237
static void xen_zap_pfn_range(unsigned long vaddr, unsigned int order,
arch/x86/xen/mmu_pv.c
2245
for (i = 0; i < (1UL<<order); i++, vaddr += PAGE_SIZE) {
arch/x86/xen/mmu_pv.c
2249
in_frames[i] = virt_to_mfn((void *)vaddr);
arch/x86/xen/mmu_pv.c
2251
MULTI_update_va_mapping(mcs.mc, vaddr, VOID_PTE, 0);
arch/x86/xen/mmu_pv.c
2252
__set_phys_to_machine(virt_to_pfn((void *)vaddr), INVALID_P2M_ENTRY);
arch/x86/xen/mmu_pv.c
2255
out_frames[i] = virt_to_pfn((void *)vaddr);
arch/x86/xen/mmu_pv.c
2265
static void xen_remap_exchanged_ptes(unsigned long vaddr, int order,
arch/x86/xen/mmu_pv.c
2275
for (i = 0; i < limit; i++, vaddr += PAGE_SIZE) {
arch/x86/xen/mmu_pv.c
2294
MULTI_update_va_mapping(mcs.mc, vaddr,
arch/x86/xen/mmu_pv.c
2297
set_phys_to_machine(virt_to_pfn((void *)vaddr), mfn);
arch/x86/xen/mmu_pv.c
308
void __init set_pte_mfn(unsigned long vaddr, unsigned long mfn, pgprot_t flags)
arch/x86/xen/mmu_pv.c
310
if (HYPERVISOR_update_va_mapping(vaddr, mfn_pte(mfn, flags),
arch/x86/xen/p2m.c
465
unsigned long vaddr;
arch/x86/xen/p2m.c
479
vaddr = addr & ~(PMD_SIZE * PMDS_PER_MID_PAGE - 1);
arch/x86/xen/p2m.c
485
pmdp = lookup_pmd_address(vaddr);
arch/x86/xen/p2m.c
490
ptechk = lookup_address(vaddr, &level);
arch/x86/xen/p2m.c
508
vaddr += PMD_SIZE;
arch/x86/xen/xen-ops.h
198
void set_pte_mfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags);
arch/xtensa/include/asm/cacheflush.h
167
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
arch/xtensa/include/asm/cacheflush.h
174
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
arch/xtensa/include/asm/highmem.h
78
#define arch_kmap_local_post_unmap(vaddr) \
arch/xtensa/include/asm/highmem.h
79
local_flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE)
arch/xtensa/include/asm/page.h
119
extern void clear_page_alias(void *vaddr, unsigned long paddr);
arch/xtensa/include/asm/page.h
124
void clear_user_highpage(struct page *page, unsigned long vaddr);
arch/xtensa/include/asm/page.h
127
unsigned long vaddr, struct vm_area_struct *vma);
arch/xtensa/include/asm/page.h
129
# define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
arch/xtensa/include/asm/pgtable.h
214
#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
arch/xtensa/kernel/pci-dma.c
36
void *vaddr = kmap_atomic(page);
arch/xtensa/kernel/pci-dma.c
38
fn((unsigned long)vaddr + off, sz);
arch/xtensa/kernel/pci-dma.c
39
kunmap_atomic(vaddr);
arch/xtensa/kernel/setup.c
407
unsigned long vaddr = (unsigned long)cpu_reset;
arch/xtensa/kernel/setup.c
408
unsigned long paddr = __pa(vaddr);
arch/xtensa/kernel/setup.c
409
unsigned long tmpaddr = vaddr + SZ_512M;
arch/xtensa/kernel/setup.c
492
: "a"(tmpaddr - vaddr),
arch/xtensa/kernel/setup.c
493
"a"(paddr - vaddr),
arch/xtensa/mm/cache.c
103
unsigned long vaddr, struct vm_area_struct *vma)
arch/xtensa/mm/cache.c
107
void *dst_vaddr = coherent_kvaddr(dst, TLBTEMP_BASE_1, vaddr,
arch/xtensa/mm/cache.c
109
void *src_vaddr = coherent_kvaddr(src, TLBTEMP_BASE_2, vaddr,
arch/xtensa/mm/cache.c
113
kmap_invalidate_coherent(dst, vaddr);
arch/xtensa/mm/cache.c
274
unsigned long vaddr, void *dst, const void *src,
arch/xtensa/mm/cache.c
278
unsigned long alias = !(DCACHE_ALIAS_EQ(vaddr, phys));
arch/xtensa/mm/cache.c
283
unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
arch/xtensa/mm/cache.c
299
unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
arch/xtensa/mm/cache.c
314
unsigned long vaddr, void *dst, const void *src,
arch/xtensa/mm/cache.c
318
unsigned long alias = !(DCACHE_ALIAS_EQ(vaddr, phys));
arch/xtensa/mm/cache.c
326
unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
arch/xtensa/mm/cache.c
60
unsigned long vaddr)
arch/xtensa/mm/cache.c
62
if (!DCACHE_ALIAS_EQ(page_to_phys(page), vaddr)) {
arch/xtensa/mm/cache.c
82
unsigned long vaddr, unsigned long *paddr)
arch/xtensa/mm/cache.c
85
return (void *)(base + (vaddr & DCACHE_ALIAS_MASK));
arch/xtensa/mm/cache.c
88
void clear_user_highpage(struct page *page, unsigned long vaddr)
arch/xtensa/mm/cache.c
92
void *kvaddr = coherent_kvaddr(page, TLBTEMP_BASE_1, vaddr, &paddr);
arch/xtensa/mm/cache.c
95
kmap_invalidate_coherent(page, vaddr);
arch/xtensa/mm/kasan_init.c
20
unsigned long vaddr = KASAN_SHADOW_START;
arch/xtensa/mm/kasan_init.c
21
pmd_t *pmd = pmd_off_k(vaddr);
arch/xtensa/mm/kasan_init.c
29
for (vaddr = 0; vaddr < KASAN_SHADOW_SIZE; vaddr += PMD_SIZE, ++pmd) {
arch/xtensa/mm/kasan_init.c
40
unsigned long vaddr = (unsigned long)start;
arch/xtensa/mm/kasan_init.c
41
pmd_t *pmd = pmd_off_k(vaddr);
arch/xtensa/mm/mmu.c
24
static void * __init init_pmd(unsigned long vaddr, unsigned long n_pages)
arch/xtensa/mm/mmu.c
26
pmd_t *pmd = pmd_off_k(vaddr);
arch/xtensa/mm/mmu.c
33
__func__, vaddr, n_pages);
arch/xtensa/mm/tlb.c
174
static unsigned get_pte_for_vaddr(unsigned vaddr)
arch/xtensa/mm/tlb.c
187
pgd = pgd_offset(mm, vaddr);
arch/xtensa/mm/tlb.c
190
p4d = p4d_offset(pgd, vaddr);
arch/xtensa/mm/tlb.c
193
pud = pud_offset(p4d, vaddr);
arch/xtensa/mm/tlb.c
196
pmd = pmd_offset(pud, vaddr);
arch/xtensa/mm/tlb.c
199
pte = pte_offset_map(pmd, vaddr);
block/bio.c
1046
void bio_add_virt_nofail(struct bio *bio, void *vaddr, unsigned len)
block/bio.c
1048
__bio_add_page(bio, virt_to_page(vaddr), len, offset_in_page(vaddr));
block/bio.c
1138
unsigned int bio_add_vmalloc_chunk(struct bio *bio, void *vaddr, unsigned len)
block/bio.c
1140
unsigned int offset = offset_in_page(vaddr);
block/bio.c
1143
if (bio_add_page(bio, vmalloc_to_page(vaddr), len, offset) < len)
block/bio.c
1146
flush_kernel_vmap_range(vaddr, len);
block/bio.c
1164
bool bio_add_vmalloc(struct bio *bio, void *vaddr, unsigned int len)
block/bio.c
1167
unsigned int added = bio_add_vmalloc_chunk(bio, vaddr, len);
block/bio.c
1171
vaddr += added;
drivers/accel/amdxdna/aie2_ctx.c
578
.vaddr = 0,
drivers/accel/amdxdna/amdxdna_gem.c
393
static int amdxdna_gem_obj_vmap(struct amdxdna_gem_obj *abo, void **vaddr)
drivers/accel/amdxdna/amdxdna_gem.c
403
*vaddr = map.vaddr;
drivers/accel/amdxdna/amdxdna_gem.c
551
if (copy_from_user(&va_tbl, u64_to_user_ptr(args->vaddr), sizeof(va_tbl))) {
drivers/accel/amdxdna/amdxdna_gem.c
561
u64_to_user_ptr(args->vaddr + sizeof(va_tbl)));
drivers/accel/amdxdna/amdxdna_gem.c
586
if (args->vaddr)
drivers/accel/amdxdna/amdxdna_gem.c
773
args->type, args->vaddr, args->size, args->flags);
drivers/accel/amdxdna/amdxdna_gem.c
907
args->vaddr = abo->mem.userptr;
drivers/accel/amdxdna/amdxdna_gem.c
916
args->handle, args->map_offset, args->vaddr, args->xdna_addr);
drivers/accel/amdxdna/amdxdna_ubuf.c
123
vunmap(map->vaddr);
drivers/accel/amdxdna/amdxdna_ubuf.c
172
if (!IS_ALIGNED(va_ent[i].vaddr, PAGE_SIZE) ||
drivers/accel/amdxdna/amdxdna_ubuf.c
175
va_ent[i].vaddr, va_ent[i].len);
drivers/accel/amdxdna/amdxdna_ubuf.c
205
ret = pin_user_pages_fast(va_ent[i].vaddr, npages,
drivers/accel/ethosu/ethosu_gem.c
360
u32 *bocmds = bo->base.vaddr;
drivers/accel/ethosu/ethosu_job.c
249
u32 *bocmds = to_drm_gem_dma_obj(job->cmd_bo)->vaddr;
drivers/accel/habanalabs/common/debugfs.c
271
hnode->vaddr, userptr->size);
drivers/accel/habanalabs/common/debugfs.c
276
hnode->vaddr, phys_pg_pack->total_size,
drivers/accel/habanalabs/common/debugfs.c
293
lnode->vaddr, lnode->block_size, lnode->mapped_size,
drivers/accel/habanalabs/common/debugfs.c
908
if ((virt_addr >= hnode->vaddr) &&
drivers/accel/habanalabs/common/debugfs.c
909
(end_address <= hnode->vaddr + range_size)) {
drivers/accel/habanalabs/common/device.c
178
void hl_asic_dma_pool_free_caller(struct hl_device *hdev, void *vaddr, dma_addr_t dma_addr,
drivers/accel/habanalabs/common/device.c
181
hl_asic_dma_free_common(hdev, 0, vaddr, dma_addr, DMA_ALLOC_POOL, caller);
drivers/accel/habanalabs/common/device.c
189
void hl_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size, void *vaddr)
drivers/accel/habanalabs/common/device.c
191
hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev, size, vaddr);
drivers/accel/habanalabs/common/device.c
2764
pgf_info->user_mappings[map_idx].dev_va = hnode->vaddr;
drivers/accel/habanalabs/common/device.c
2769
pgf_info->user_mappings[map_idx].dev_va = hnode->vaddr;
drivers/accel/habanalabs/common/firmware_if.c
642
void *vaddr)
drivers/accel/habanalabs/common/firmware_if.c
644
gen_pool_free(hdev->cpu_accessible_dma_pool, (u64) (uintptr_t) vaddr,
drivers/accel/habanalabs/common/habanalabs.h
159
#define hl_asic_dma_pool_free(hdev, vaddr, dma_addr) \
drivers/accel/habanalabs/common/habanalabs.h
160
hl_asic_dma_pool_free_caller(hdev, vaddr, dma_addr, __func__)
drivers/accel/habanalabs/common/habanalabs.h
1673
void (*asic_dma_pool_free)(struct hl_device *hdev, void *vaddr,
drivers/accel/habanalabs/common/habanalabs.h
1678
size_t size, void *vaddr);
drivers/accel/habanalabs/common/habanalabs.h
2192
u64 vaddr;
drivers/accel/habanalabs/common/habanalabs.h
2211
unsigned long vaddr;
drivers/accel/habanalabs/common/habanalabs.h
3756
void hl_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size, void *vaddr);
drivers/accel/habanalabs/common/habanalabs.h
3763
void hl_asic_dma_pool_free_caller(struct hl_device *hdev, void *vaddr, dma_addr_t dma_addr,
drivers/accel/habanalabs/common/habanalabs.h
4006
void *vaddr);
drivers/accel/habanalabs/common/memory.c
1005
static void unmap_phys_pg_pack(struct hl_ctx *ctx, u64 vaddr,
drivers/accel/habanalabs/common/memory.c
1013
is_host_addr = !hl_is_dram_va(hdev, vaddr);
drivers/accel/habanalabs/common/memory.c
1015
next_vaddr = vaddr;
drivers/accel/habanalabs/common/memory.c
1215
hnode->vaddr = ret_vaddr;
drivers/accel/habanalabs/common/memory.c
1251
static struct hl_vm_hash_node *get_vm_hash_node_locked(struct hl_ctx *ctx, u64 vaddr)
drivers/accel/habanalabs/common/memory.c
1255
hash_for_each_possible(ctx->mem_hash, hnode, node, vaddr)
drivers/accel/habanalabs/common/memory.c
1256
if (vaddr == hnode->vaddr)
drivers/accel/habanalabs/common/memory.c
1276
u64 vaddr = args->unmap.device_virt_addr;
drivers/accel/habanalabs/common/memory.c
1290
hnode = get_vm_hash_node_locked(ctx, vaddr);
drivers/accel/habanalabs/common/memory.c
1293
dev_err(hdev->dev, "unmap failed, no mem hnode for vaddr 0x%llx\n", vaddr);
drivers/accel/habanalabs/common/memory.c
1299
dev_err(hdev->dev, "failed to unmap %#llx, memory is exported\n", vaddr);
drivers/accel/habanalabs/common/memory.c
1317
vaddr);
drivers/accel/habanalabs/common/memory.c
1333
vaddr);
drivers/accel/habanalabs/common/memory.c
1339
dev_err(hdev->dev, "vaddr 0x%llx is not mapped\n", vaddr);
drivers/accel/habanalabs/common/memory.c
1345
vaddr = prop->dram_base_address +
drivers/accel/habanalabs/common/memory.c
1346
DIV_ROUND_DOWN_ULL(vaddr - prop->dram_base_address,
drivers/accel/habanalabs/common/memory.c
1350
vaddr &= ~(((u64) phys_pg_pack->page_size) - 1);
drivers/accel/habanalabs/common/memory.c
1354
unmap_phys_pg_pack(ctx, vaddr, phys_pg_pack);
drivers/accel/habanalabs/common/memory.c
1362
rc = hl_mmu_invalidate_cache_range(hdev, true, *vm_type, ctx->asid, vaddr,
drivers/accel/habanalabs/common/memory.c
1376
tmp_rc = add_va_block(hdev, va_range, vaddr,
drivers/accel/habanalabs/common/memory.c
1377
vaddr + phys_pg_pack->total_size - 1);
drivers/accel/habanalabs/common/memory.c
1381
vaddr);
drivers/accel/habanalabs/common/memory.c
1402
hash_add(ctx->mem_hash, &hnode->node, vaddr);
drivers/accel/habanalabs/common/memory.c
1498
lnode->vaddr = vma->vm_start;
drivers/accel/habanalabs/common/memory.c
2775
hnode->vaddr, ctx->asid);
drivers/accel/habanalabs/common/memory.c
2776
args.unmap.device_virt_addr = hnode->vaddr;
drivers/accel/habanalabs/common/memory.c
942
static int map_phys_pg_pack(struct hl_ctx *ctx, u64 vaddr,
drivers/accel/habanalabs/common/memory.c
946
u64 next_vaddr = vaddr, paddr, mapped_pg_cnt = 0, i;
drivers/accel/habanalabs/common/memory.c
971
is_host_addr = !hl_is_dram_va(hdev, vaddr);
drivers/accel/habanalabs/common/memory.c
973
next_vaddr = vaddr;
drivers/accel/habanalabs/gaudi/gaudi.c
4835
static void gaudi_dma_pool_free(struct hl_device *hdev, void *vaddr,
drivers/accel/habanalabs/gaudi/gaudi.c
4841
dma_pool_free(hdev->dma_pool, vaddr, fixed_dma_addr);
drivers/accel/habanalabs/gaudi/gaudi.c
4851
size_t size, void *vaddr)
drivers/accel/habanalabs/gaudi/gaudi.c
4853
hl_fw_cpu_accessible_dma_pool_free(hdev, size, vaddr);
drivers/accel/habanalabs/gaudi2/gaudi2.c
7116
static void gaudi2_dma_pool_free(struct hl_device *hdev, void *vaddr, dma_addr_t dma_addr)
drivers/accel/habanalabs/gaudi2/gaudi2.c
7118
dma_pool_free(hdev->dma_pool, vaddr, dma_addr);
drivers/accel/habanalabs/gaudi2/gaudi2.c
7127
static void gaudi2_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size, void *vaddr)
drivers/accel/habanalabs/gaudi2/gaudi2.c
7129
hl_fw_cpu_accessible_dma_pool_free(hdev, size, vaddr);
drivers/accel/habanalabs/goya/goya.c
3259
static void goya_dma_pool_free(struct hl_device *hdev, void *vaddr,
drivers/accel/habanalabs/goya/goya.c
3265
dma_pool_free(hdev->dma_pool, vaddr, fixed_dma_addr);
drivers/accel/habanalabs/goya/goya.c
3271
void *vaddr;
drivers/accel/habanalabs/goya/goya.c
3273
vaddr = hl_fw_cpu_accessible_dma_pool_alloc(hdev, size, dma_handle);
drivers/accel/habanalabs/goya/goya.c
3277
return vaddr;
drivers/accel/habanalabs/goya/goya.c
3281
void *vaddr)
drivers/accel/habanalabs/goya/goya.c
3283
hl_fw_cpu_accessible_dma_pool_free(hdev, size, vaddr);
drivers/accel/habanalabs/goya/goyaP.h
244
void *vaddr);
drivers/accel/ivpu/ivpu_gem.c
320
drm_WARN_ON(&vdev->drm, bo->base.vaddr);
drivers/accel/ivpu/ivpu_gem.c
474
struct iosys_map map = IOSYS_MAP_INIT_VADDR(bo->base.vaddr);
drivers/accel/ivpu/ivpu_gem.h
54
return bo->base.vaddr;
drivers/acpi/apei/erst.c
1253
erst_erange.vaddr = ioremap_cache(erst_erange.base,
drivers/acpi/apei/erst.c
1255
if (!erst_erange.vaddr)
drivers/acpi/apei/erst.c
73
void __iomem *vaddr;
drivers/acpi/apei/erst.c
825
memcpy(erst_erange.vaddr, record, record->record_length);
drivers/acpi/apei/erst.c
826
rcd_erange = erst_erange.vaddr;
drivers/acpi/apei/erst.c
863
rcd_tmp = erst_erange.vaddr + offset;
drivers/acpi/apei/ghes.c
191
static void ghes_unmap(void __iomem *vaddr, enum fixed_addresses fixmap_idx)
drivers/acpi/apei/ghes.c
193
int _idx = virt_to_fix((unsigned long)vaddr);
drivers/acpi/apei/ghes.c
345
void __iomem *vaddr;
drivers/acpi/apei/ghes.c
351
vaddr = ghes_map(PHYS_PFN(paddr), fixmap_idx);
drivers/acpi/apei/ghes.c
355
memcpy_fromio(buffer, vaddr + offset, trunk);
drivers/acpi/apei/ghes.c
357
memcpy_toio(vaddr + offset, buffer, trunk);
drivers/acpi/apei/ghes.c
361
ghes_unmap(vaddr, fixmap_idx);
drivers/acpi/cppc_acpi.c
1004
void __iomem *vaddr = NULL;
drivers/acpi/cppc_acpi.c
1039
vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id);
drivers/acpi/cppc_acpi.c
1042
vaddr = reg_res->sys_mem_vaddr;
drivers/acpi/cppc_acpi.c
1051
*val = readb_relaxed(vaddr);
drivers/acpi/cppc_acpi.c
1054
*val = readw_relaxed(vaddr);
drivers/acpi/cppc_acpi.c
1057
*val = readl_relaxed(vaddr);
drivers/acpi/cppc_acpi.c
1060
*val = readq_relaxed(vaddr);
drivers/acpi/cppc_acpi.c
1084
void __iomem *vaddr = NULL;
drivers/acpi/cppc_acpi.c
1112
vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id);
drivers/acpi/cppc_acpi.c
1115
vaddr = reg_res->sys_mem_vaddr;
drivers/acpi/cppc_acpi.c
1132
prev_val = readb_relaxed(vaddr);
drivers/acpi/cppc_acpi.c
1135
prev_val = readw_relaxed(vaddr);
drivers/acpi/cppc_acpi.c
1138
prev_val = readl_relaxed(vaddr);
drivers/acpi/cppc_acpi.c
1141
prev_val = readq_relaxed(vaddr);
drivers/acpi/cppc_acpi.c
1152
writeb_relaxed(val, vaddr);
drivers/acpi/cppc_acpi.c
1155
writew_relaxed(val, vaddr);
drivers/acpi/cppc_acpi.c
1158
writel_relaxed(val, vaddr);
drivers/acpi/cppc_acpi.c
1161
writeq_relaxed(val, vaddr);
drivers/acpi/osl.c
299
static void acpi_unmap(acpi_physical_address pg_off, void __iomem *vaddr)
drivers/acpi/osl.c
307
iounmap(vaddr);
drivers/block/xen-blkback/blkback.c
310
vaddr(persistent_gnt->page),
drivers/block/xen-blkback/blkback.c
662
gnttab_set_unmap_op(&unmap_ops[invcount], vaddr(pages[i]->page),
drivers/block/xen-blkback/blkback.c
804
addr = vaddr(pages[i]->page);
drivers/block/z2ram.c
166
unsigned long size, paddr, vaddr;
drivers/block/z2ram.c
183
vaddr = (unsigned long)ioremap_wt(paddr, size);
drivers/block/z2ram.c
186
vaddr =
drivers/block/z2ram.c
198
z2ram_map[z2ram_size++] = vaddr;
drivers/block/z2ram.c
200
vaddr += Z2RAM_CHUNKSIZE;
drivers/crypto/cavium/nitrox/nitrox_aead.c
298
nctx->u.ctx_handle = (uintptr_t)((u8 *)chdr->vaddr +
drivers/crypto/cavium/nitrox/nitrox_lib.c
219
void *vaddr;
drivers/crypto/cavium/nitrox/nitrox_lib.c
226
vaddr = dma_pool_zalloc(ndev->ctx_pool, GFP_KERNEL, &dma);
drivers/crypto/cavium/nitrox/nitrox_lib.c
227
if (!vaddr) {
drivers/crypto/cavium/nitrox/nitrox_lib.c
233
ctx = vaddr;
drivers/crypto/cavium/nitrox/nitrox_lib.c
240
chdr->vaddr = vaddr;
drivers/crypto/cavium/nitrox/nitrox_lib.c
257
dma_pool_free(ctxp->pool, ctxp->vaddr, ctxp->dma);
drivers/crypto/cavium/nitrox/nitrox_req.h
199
void *vaddr;
drivers/crypto/cavium/nitrox/nitrox_skcipher.c
127
nctx->u.ctx_handle = (uintptr_t)((u8 *)chdr->vaddr +
drivers/crypto/hisilicon/sec/sec_drv.c
1087
ring_cmd->vaddr = dma_alloc_coherent(dev, SEC_Q_CMD_SIZE,
drivers/crypto/hisilicon/sec/sec_drv.c
1089
if (!ring_cmd->vaddr)
drivers/crypto/hisilicon/sec/sec_drv.c
1096
ring_cq->vaddr = dma_alloc_coherent(dev, SEC_Q_CQ_SIZE,
drivers/crypto/hisilicon/sec/sec_drv.c
1098
if (!ring_cq->vaddr) {
drivers/crypto/hisilicon/sec/sec_drv.c
1103
ring_db->vaddr = dma_alloc_coherent(dev, SEC_Q_DB_SIZE,
drivers/crypto/hisilicon/sec/sec_drv.c
1105
if (!ring_db->vaddr) {
drivers/crypto/hisilicon/sec/sec_drv.c
1119
dma_free_coherent(dev, SEC_Q_DB_SIZE, queue->ring_db.vaddr,
drivers/crypto/hisilicon/sec/sec_drv.c
1122
dma_free_coherent(dev, SEC_Q_CQ_SIZE, queue->ring_cq.vaddr,
drivers/crypto/hisilicon/sec/sec_drv.c
1125
dma_free_coherent(dev, SEC_Q_CMD_SIZE, queue->ring_cmd.vaddr,
drivers/crypto/hisilicon/sec/sec_drv.c
1135
dma_free_coherent(dev, SEC_Q_DB_SIZE, queue->ring_db.vaddr,
drivers/crypto/hisilicon/sec/sec_drv.c
1137
dma_free_coherent(dev, SEC_Q_CQ_SIZE, queue->ring_cq.vaddr,
drivers/crypto/hisilicon/sec/sec_drv.c
1139
dma_free_coherent(dev, SEC_Q_CMD_SIZE, queue->ring_cmd.vaddr,
drivers/crypto/hisilicon/sec/sec_drv.c
692
outorder_msg = cq_ring->vaddr + ooo_read;
drivers/crypto/hisilicon/sec/sec_drv.c
694
msg = msg_ring->vaddr + q_id;
drivers/crypto/hisilicon/sec/sec_drv.c
705
msg = msg_ring->vaddr + queue->expected;
drivers/crypto/hisilicon/sec/sec_drv.c
718
outorder_msg = cq_ring->vaddr + ooo_read;
drivers/crypto/hisilicon/sec/sec_drv.c
720
msg = msg_ring->vaddr + q_id;
drivers/crypto/hisilicon/sec/sec_drv.c
871
memcpy(msg_ring->vaddr + write, msg, sizeof(*msg));
drivers/crypto/hisilicon/sec/sec_drv.h
183
struct sec_bd_info *vaddr;
drivers/crypto/hisilicon/sec/sec_drv.h
190
struct sec_debug_bd_info *vaddr;
drivers/crypto/hisilicon/sec/sec_drv.h
196
struct sec_out_bd_info *vaddr;
drivers/crypto/intel/keembay/keembay-ocs-aes-core.c
367
if (dll->vaddr)
drivers/crypto/intel/keembay/keembay-ocs-aes-core.c
368
dma_free_coherent(dev, dll->size, dll->vaddr, dll->dma_addr);
drivers/crypto/intel/keembay/keembay-ocs-aes-core.c
369
dll->vaddr = NULL;
drivers/crypto/intel/keembay/ocs-aes.c
1434
dll_desc->vaddr = NULL;
drivers/crypto/intel/keembay/ocs-aes.c
1468
dll_desc->vaddr = dma_alloc_coherent(aes_dev->dev, dll_desc->size,
drivers/crypto/intel/keembay/ocs-aes.c
1470
if (!dll_desc->vaddr)
drivers/crypto/intel/keembay/ocs-aes.c
1474
ll = dll_desc->vaddr;
drivers/crypto/intel/keembay/ocs-aes.h
62
void *vaddr;
drivers/crypto/intel/qat/qat_common/qat_asym_algs.c
230
u8 *vaddr;
drivers/crypto/intel/qat/qat_common/qat_asym_algs.c
292
vaddr = sg_virt(req->src);
drivers/crypto/intel/qat/qat_common/qat_asym_algs.c
303
vaddr = qat_req->src_align;
drivers/crypto/intel/qat/qat_common/qat_asym_algs.c
306
qat_req->in.dh.in.b = dma_map_single(dev, vaddr, ctx->p_size,
drivers/crypto/intel/qat/qat_common/qat_asym_algs.c
320
vaddr = sg_virt(req->dst);
drivers/crypto/intel/qat/qat_common/qat_asym_algs.c
326
vaddr = qat_req->dst_align;
drivers/crypto/intel/qat/qat_common/qat_asym_algs.c
328
qat_req->out.dh.r = dma_map_single(dev, vaddr, ctx->p_size,
drivers/crypto/intel/qat/qat_common/qat_asym_algs.c
708
u8 *vaddr;
drivers/crypto/intel/qat/qat_common/qat_asym_algs.c
750
vaddr = sg_virt(req->src);
drivers/crypto/intel/qat/qat_common/qat_asym_algs.c
760
vaddr = qat_req->src_align;
drivers/crypto/intel/qat/qat_common/qat_asym_algs.c
763
qat_req->in.rsa.enc.m = dma_map_single(dev, vaddr, ctx->key_sz,
drivers/crypto/intel/qat/qat_common/qat_asym_algs.c
770
vaddr = sg_virt(req->dst);
drivers/crypto/intel/qat/qat_common/qat_asym_algs.c
775
vaddr = qat_req->dst_align;
drivers/crypto/intel/qat/qat_common/qat_asym_algs.c
778
qat_req->out.rsa.enc.c = dma_map_single(dev, vaddr, ctx->key_sz,
drivers/crypto/intel/qat/qat_common/qat_asym_algs.c
842
u8 *vaddr;
drivers/crypto/intel/qat/qat_common/qat_asym_algs.c
894
vaddr = sg_virt(req->src);
drivers/crypto/intel/qat/qat_common/qat_asym_algs.c
904
vaddr = qat_req->src_align;
drivers/crypto/intel/qat/qat_common/qat_asym_algs.c
907
qat_req->in.rsa.dec.c = dma_map_single(dev, vaddr, ctx->key_sz,
drivers/crypto/intel/qat/qat_common/qat_asym_algs.c
914
vaddr = sg_virt(req->dst);
drivers/crypto/intel/qat/qat_common/qat_asym_algs.c
919
vaddr = qat_req->dst_align;
drivers/crypto/intel/qat/qat_common/qat_asym_algs.c
921
qat_req->out.rsa.dec.m = dma_map_single(dev, vaddr, ctx->key_sz,
drivers/crypto/marvell/octeontx2/otx2_cptlf.h
151
iq->vaddr = NULL;
drivers/crypto/marvell/octeontx2/otx2_cptlf.h
176
iq->vaddr = iq->real_vaddr + OTX2_CPT_INST_GRP_QLEN_BYTES;
drivers/crypto/marvell/octeontx2/otx2_cptlf.h
180
iq->vaddr = PTR_ALIGN(iq->vaddr, OTX2_CPT_INST_Q_ALIGNMENT);
drivers/crypto/marvell/octeontx2/otx2_cptlf.h
69
u8 *vaddr;
drivers/dma-buf/heaps/cma_heap.c
150
invalidate_kernel_vmap_range(buffer->vaddr, buffer->len);
drivers/dma-buf/heaps/cma_heap.c
171
flush_kernel_vmap_range(buffer->vaddr, buffer->len);
drivers/dma-buf/heaps/cma_heap.c
215
void *vaddr;
drivers/dma-buf/heaps/cma_heap.c
217
vaddr = vmap(buffer->pages, buffer->pagecount, VM_MAP, PAGE_KERNEL);
drivers/dma-buf/heaps/cma_heap.c
218
if (!vaddr)
drivers/dma-buf/heaps/cma_heap.c
221
return vaddr;
drivers/dma-buf/heaps/cma_heap.c
227
void *vaddr;
drivers/dma-buf/heaps/cma_heap.c
233
iosys_map_set_vaddr(map, buffer->vaddr);
drivers/dma-buf/heaps/cma_heap.c
237
vaddr = cma_heap_do_vmap(buffer);
drivers/dma-buf/heaps/cma_heap.c
238
if (IS_ERR(vaddr)) {
drivers/dma-buf/heaps/cma_heap.c
239
ret = PTR_ERR(vaddr);
drivers/dma-buf/heaps/cma_heap.c
242
buffer->vaddr = vaddr;
drivers/dma-buf/heaps/cma_heap.c
244
iosys_map_set_vaddr(map, buffer->vaddr);
drivers/dma-buf/heaps/cma_heap.c
257
vunmap(buffer->vaddr);
drivers/dma-buf/heaps/cma_heap.c
258
buffer->vaddr = NULL;
drivers/dma-buf/heaps/cma_heap.c
271
vunmap(buffer->vaddr);
drivers/dma-buf/heaps/cma_heap.c
272
buffer->vaddr = NULL;
drivers/dma-buf/heaps/cma_heap.c
332
void *vaddr = kmap_local_page(page);
drivers/dma-buf/heaps/cma_heap.c
334
clear_page(vaddr);
drivers/dma-buf/heaps/cma_heap.c
335
kunmap_local(vaddr);
drivers/dma-buf/heaps/cma_heap.c
60
void *vaddr;
drivers/dma-buf/heaps/system_heap.c
151
invalidate_kernel_vmap_range(buffer->vaddr, buffer->len);
drivers/dma-buf/heaps/system_heap.c
172
flush_kernel_vmap_range(buffer->vaddr, buffer->len);
drivers/dma-buf/heaps/system_heap.c
228
void *vaddr;
drivers/dma-buf/heaps/system_heap.c
238
vaddr = vmap(pages, npages, VM_MAP, PAGE_KERNEL);
drivers/dma-buf/heaps/system_heap.c
241
if (!vaddr)
drivers/dma-buf/heaps/system_heap.c
244
return vaddr;
drivers/dma-buf/heaps/system_heap.c
250
void *vaddr;
drivers/dma-buf/heaps/system_heap.c
256
iosys_map_set_vaddr(map, buffer->vaddr);
drivers/dma-buf/heaps/system_heap.c
260
vaddr = system_heap_do_vmap(buffer);
drivers/dma-buf/heaps/system_heap.c
261
if (IS_ERR(vaddr)) {
drivers/dma-buf/heaps/system_heap.c
262
ret = PTR_ERR(vaddr);
drivers/dma-buf/heaps/system_heap.c
266
buffer->vaddr = vaddr;
drivers/dma-buf/heaps/system_heap.c
268
iosys_map_set_vaddr(map, buffer->vaddr);
drivers/dma-buf/heaps/system_heap.c
281
vunmap(buffer->vaddr);
drivers/dma-buf/heaps/system_heap.c
282
buffer->vaddr = NULL;
drivers/dma-buf/heaps/system_heap.c
31
void *vaddr;
drivers/dma-buf/udmabuf.c
113
void *vaddr;
drivers/dma-buf/udmabuf.c
126
vaddr = vm_map_ram(pages, ubuf->pagecount, -1);
drivers/dma-buf/udmabuf.c
128
if (!vaddr)
drivers/dma-buf/udmabuf.c
131
iosys_map_set_vaddr(map, vaddr);
drivers/dma-buf/udmabuf.c
141
vm_unmap_ram(map->vaddr, ubuf->pagecount);
drivers/dma/at_hdmac.c
1100
void __iomem *vaddr;
drivers/dma/at_hdmac.c
1119
vaddr = dma_pool_alloc(atdma->memset_pool, GFP_NOWAIT, &paddr);
drivers/dma/at_hdmac.c
1120
if (!vaddr) {
drivers/dma/at_hdmac.c
1129
*(u32*)vaddr = (fill_pattern << 24) |
drivers/dma/at_hdmac.c
1144
desc->memset_vaddr = vaddr;
drivers/dma/at_hdmac.c
1157
dma_pool_free(atdma->memset_pool, vaddr, paddr);
drivers/dma/at_hdmac.c
1171
void __iomem *vaddr;
drivers/dma/at_hdmac.c
1186
vaddr = dma_pool_alloc(atdma->memset_pool, GFP_NOWAIT, &paddr);
drivers/dma/at_hdmac.c
1187
if (!vaddr) {
drivers/dma/at_hdmac.c
1192
*(u32*)vaddr = value;
drivers/dma/at_hdmac.c
1222
desc->memset_vaddr = vaddr;
drivers/dma/at_hdmac.c
1235
dma_pool_free(atdma->memset_pool, vaddr, paddr);
drivers/dma/dw-edma/dw-edma-core.c
86
chunk->ll_region.vaddr = chip->ll_region_wr[chan->id].vaddr;
drivers/dma/dw-edma/dw-edma-core.c
89
chunk->ll_region.vaddr = chip->ll_region_rd[chan->id].vaddr;
drivers/dma/dw-edma/dw-edma-pcie.c
248
ll_region->vaddr.io = pcim_iomap_table(pdev)[ll_block->bar];
drivers/dma/dw-edma/dw-edma-pcie.c
249
if (!ll_region->vaddr.io)
drivers/dma/dw-edma/dw-edma-pcie.c
252
ll_region->vaddr.io += ll_block->off;
drivers/dma/dw-edma/dw-edma-pcie.c
257
dt_region->vaddr.io = pcim_iomap_table(pdev)[dt_block->bar];
drivers/dma/dw-edma/dw-edma-pcie.c
258
if (!dt_region->vaddr.io)
drivers/dma/dw-edma/dw-edma-pcie.c
261
dt_region->vaddr.io += dt_block->off;
drivers/dma/dw-edma/dw-edma-pcie.c
273
ll_region->vaddr.io = pcim_iomap_table(pdev)[ll_block->bar];
drivers/dma/dw-edma/dw-edma-pcie.c
274
if (!ll_region->vaddr.io)
drivers/dma/dw-edma/dw-edma-pcie.c
277
ll_region->vaddr.io += ll_block->off;
drivers/dma/dw-edma/dw-edma-pcie.c
282
dt_region->vaddr.io = pcim_iomap_table(pdev)[dt_block->bar];
drivers/dma/dw-edma/dw-edma-pcie.c
283
if (!dt_region->vaddr.io)
drivers/dma/dw-edma/dw-edma-pcie.c
286
dt_region->vaddr.io += dt_block->off;
drivers/dma/dw-edma/dw-edma-pcie.c
313
chip->ll_region_wr[i].vaddr.io, &chip->ll_region_wr[i].paddr);
drivers/dma/dw-edma/dw-edma-pcie.c
318
chip->dt_region_wr[i].vaddr.io, &chip->dt_region_wr[i].paddr);
drivers/dma/dw-edma/dw-edma-pcie.c
325
chip->ll_region_rd[i].vaddr.io, &chip->ll_region_rd[i].paddr);
drivers/dma/dw-edma/dw-edma-pcie.c
330
chip->dt_region_rd[i].vaddr.io, &chip->dt_region_rd[i].paddr);
drivers/dma/dw-edma/dw-edma-v0-core.c
285
struct dw_edma_v0_lli *lli = chunk->ll_region.vaddr.mem + ofs;
drivers/dma/dw-edma/dw-edma-v0-core.c
292
struct dw_edma_v0_lli __iomem *lli = chunk->ll_region.vaddr.io + ofs;
drivers/dma/dw-edma/dw-edma-v0-core.c
307
struct dw_edma_v0_llp *llp = chunk->ll_region.vaddr.mem + ofs;
drivers/dma/dw-edma/dw-edma-v0-core.c
312
struct dw_edma_v0_llp __iomem *llp = chunk->ll_region.vaddr.io + ofs;
drivers/dma/dw-edma/dw-edma-v0-core.c
360
readl(chunk->ll_region.vaddr.io);
drivers/dma/dw-edma/dw-hdma-v0-core.c
161
struct dw_hdma_v0_lli *lli = chunk->ll_region.vaddr.mem + ofs;
drivers/dma/dw-edma/dw-hdma-v0-core.c
168
struct dw_hdma_v0_lli __iomem *lli = chunk->ll_region.vaddr.io + ofs;
drivers/dma/dw-edma/dw-hdma-v0-core.c
183
struct dw_hdma_v0_llp *llp = chunk->ll_region.vaddr.mem + ofs;
drivers/dma/dw-edma/dw-hdma-v0-core.c
188
struct dw_hdma_v0_llp __iomem *llp = chunk->ll_region.vaddr.io + ofs;
drivers/dma/dw-edma/dw-hdma-v0-core.c
225
readl(chunk->ll_region.vaddr.io);
drivers/firewire/ohci.c
853
void *vaddr;
drivers/firewire/ohci.c
875
vaddr = vmap(pages, ARRAY_SIZE(pages), VM_MAP, PAGE_KERNEL);
drivers/firewire/ohci.c
876
if (!vaddr) {
drivers/firewire/ohci.c
894
vunmap(vaddr);
drivers/firewire/ohci.c
900
ctx->buffer = vaddr;
drivers/firmware/qcom/qcom_tzmem.c
206
area->vaddr = dma_alloc_coherent(qcom_tzmem_dev, area->size,
drivers/firmware/qcom/qcom_tzmem.c
208
if (!area->vaddr)
drivers/firmware/qcom/qcom_tzmem.c
214
area->vaddr, area->paddr);
drivers/firmware/qcom/qcom_tzmem.c
218
ret = gen_pool_add_virt(pool->genpool, (unsigned long)area->vaddr,
drivers/firmware/qcom/qcom_tzmem.c
222
area->vaddr, area->paddr);
drivers/firmware/qcom/qcom_tzmem.c
28
void *vaddr;
drivers/firmware/qcom/qcom_tzmem.c
330
area->vaddr, area->paddr);
drivers/firmware/qcom/qcom_tzmem.c
409
unsigned long vaddr;
drivers/firmware/qcom/qcom_tzmem.c
423
vaddr = gen_pool_alloc(pool->genpool, size);
drivers/firmware/qcom/qcom_tzmem.c
424
if (!vaddr) {
drivers/firmware/qcom/qcom_tzmem.c
435
ret = radix_tree_insert(&qcom_tzmem_chunks, vaddr, chunk);
drivers/firmware/qcom/qcom_tzmem.c
437
gen_pool_free(pool->genpool, vaddr, size);
drivers/firmware/qcom/qcom_tzmem.c
444
return (void *)vaddr;
drivers/firmware/qcom/qcom_tzmem.c
454
void qcom_tzmem_free(void *vaddr)
drivers/firmware/qcom/qcom_tzmem.c
460
(unsigned long)vaddr, NULL);
drivers/firmware/qcom/qcom_tzmem.c
464
vaddr);
drivers/firmware/qcom/qcom_tzmem.c
469
gen_pool_free(chunk->owner->genpool, (unsigned long)vaddr,
drivers/firmware/qcom/qcom_tzmem.c
485
phys_addr_t qcom_tzmem_to_phys(void *vaddr)
drivers/firmware/qcom/qcom_tzmem.c
499
(unsigned long)vaddr);
drivers/firmware/stratix10-svc.c
145
void *vaddr;
drivers/firmware/stratix10-svc.c
1757
if (p_mem->vaddr == p_msg->payload) {
drivers/firmware/stratix10-svc.c
1764
if (p_mem->vaddr == p_msg->payload_output) {
drivers/firmware/stratix10-svc.c
1851
pmem->vaddr = (void *)va;
drivers/firmware/stratix10-svc.c
1856
chan->name, pmem->vaddr, (unsigned int)pmem->paddr);
drivers/firmware/stratix10-svc.c
1875
if (pmem->vaddr == kaddr) {
drivers/firmware/stratix10-svc.c
1878
pmem->vaddr = NULL;
drivers/firmware/stratix10-svc.c
323
return pmem->vaddr;
drivers/firmware/stratix10-svc.c
928
unsigned long vaddr;
drivers/firmware/stratix10-svc.c
947
vaddr = (unsigned long)va;
drivers/firmware/stratix10-svc.c
951
if ((vaddr & page_mask) || (paddr & page_mask) ||
drivers/firmware/stratix10-svc.c
962
ret = gen_pool_add_virt(genpool, vaddr, paddr, size, -1);
drivers/firmware/tegra/ivc.c
635
return (unsigned long)map->vaddr;
drivers/firmware/tegra/ivc.c
643
return map->vaddr;
drivers/gpio/gpio-ts5500.c
60
#define TS5500_DIO_IN_OUT(vaddr, vbit, caddr, cbit) \
drivers/gpio/gpio-ts5500.c
62
.value_addr = vaddr, \
drivers/gpio/gpio-ts5500.c
95
#define TS5500_DIO_GROUP(vaddr, vbitfrom, caddr, cbit) \
drivers/gpio/gpio-ts5500.c
96
TS5500_DIO_IN_OUT(vaddr, vbitfrom + 0, caddr, cbit), \
drivers/gpio/gpio-ts5500.c
97
TS5500_DIO_IN_OUT(vaddr, vbitfrom + 1, caddr, cbit), \
drivers/gpio/gpio-ts5500.c
98
TS5500_DIO_IN_OUT(vaddr, vbitfrom + 2, caddr, cbit), \
drivers/gpio/gpio-ts5500.c
99
TS5500_DIO_IN_OUT(vaddr, vbitfrom + 3, caddr, cbit)
drivers/gpu/drm/ast/ast_cursor.c
211
argb4444 = argb4444_dst[0].vaddr;
drivers/gpu/drm/ast/ast_cursor.c
213
argb4444 = shadow_plane_state->data[0].vaddr;
drivers/gpu/drm/ast/ast_cursor.c
228
argb4444 = argb4444_dst[0].vaddr;
drivers/gpu/drm/drm_cache.c
216
memcpy(dst->vaddr, src->vaddr, len);
drivers/gpu/drm/drm_cache.c
218
iosys_map_memcpy_to(dst, 0, src->vaddr, len);
drivers/gpu/drm/drm_cache.c
220
memcpy_fromio(dst->vaddr, src->vaddr_iomem, len);
drivers/gpu/drm/drm_cache.c
314
dst->vaddr,
drivers/gpu/drm/drm_cache.c
317
src->vaddr,
drivers/gpu/drm/drm_fb_dma_helper.c
187
if (!dma_obj->vaddr)
drivers/gpu/drm/drm_fb_dma_helper.c
190
iosys_map_set_vaddr(&sb->map[0], dma_obj->vaddr);
drivers/gpu/drm/drm_fbdev_dma.c
216
info->screen_buffer = map.vaddr;
drivers/gpu/drm/drm_fbdev_shmem.c
180
info->screen_buffer = map.vaddr;
drivers/gpu/drm/drm_format_helper.c
1247
void *vaddr = src[0].vaddr;
drivers/gpu/drm/drm_format_helper.c
1250
u8 *mono = dst[0].vaddr, *gray8;
drivers/gpu/drm/drm_format_helper.c
1283
vaddr += clip_offset(clip, fb->pitches[0], cpp);
drivers/gpu/drm/drm_format_helper.c
1285
src32 = memcpy(src32, vaddr, len_src32);
drivers/gpu/drm/drm_format_helper.c
1288
vaddr += fb->pitches[0];
drivers/gpu/drm/drm_format_helper.c
1335
void *vaddr = src[0].vaddr;
drivers/gpu/drm/drm_format_helper.c
1338
u8 *gray2 = dst[0].vaddr, *gray8;
drivers/gpu/drm/drm_format_helper.c
1371
vaddr += clip_offset(clip, fb->pitches[0], cpp);
drivers/gpu/drm/drm_format_helper.c
1373
src32 = memcpy(src32, vaddr, len_src32);
drivers/gpu/drm/drm_format_helper.c
1376
vaddr += fb->pitches[0];
drivers/gpu/drm/drm_format_helper.c
140
const void *vaddr, const struct drm_framebuffer *fb,
drivers/gpu/drm/drm_format_helper.c
165
vaddr += clip_offset(clip, fb->pitches[0], fb->format->cpp[0]);
drivers/gpu/drm/drm_format_helper.c
169
sbuf = memcpy(stmp, vaddr, sbuf_len);
drivers/gpu/drm/drm_format_helper.c
171
sbuf = vaddr;
drivers/gpu/drm/drm_format_helper.c
173
vaddr += fb->pitches[0];
drivers/gpu/drm/drm_format_helper.c
182
const void *vaddr, const struct drm_framebuffer *fb,
drivers/gpu/drm/drm_format_helper.c
208
vaddr += clip_offset(clip, fb->pitches[0], fb->format->cpp[0]);
drivers/gpu/drm/drm_format_helper.c
212
sbuf = memcpy(stmp, vaddr, sbuf_len);
drivers/gpu/drm/drm_format_helper.c
214
sbuf = vaddr;
drivers/gpu/drm/drm_format_helper.c
217
vaddr += fb->pitches[0];
drivers/gpu/drm/drm_format_helper.c
242
src[0].vaddr, fb, clip, vaddr_cached_hint, state,
drivers/gpu/drm/drm_format_helper.c
245
return __drm_fb_xfrm(dst[0].vaddr, dst_pitch[0], dst_pixsize[0],
drivers/gpu/drm/drm_format_helper.c
246
src[0].vaddr, fb, clip, vaddr_cached_hint, state,
drivers/gpu/drm/drm_format_helper.c
442
iosys_map_memcpy_to(&dst_i, 0, src_i.vaddr, len_i);
drivers/gpu/drm/drm_gem_dma_helper.c
149
dma_obj->vaddr = dma_alloc_noncoherent(drm->dev, size,
drivers/gpu/drm/drm_gem_dma_helper.c
154
dma_obj->vaddr = dma_alloc_wc(drm->dev, size,
drivers/gpu/drm/drm_gem_dma_helper.c
158
if (!dma_obj->vaddr) {
drivers/gpu/drm/drm_gem_dma_helper.c
231
struct iosys_map map = IOSYS_MAP_INIT_VADDR(dma_obj->vaddr);
drivers/gpu/drm/drm_gem_dma_helper.c
234
if (dma_obj->vaddr)
drivers/gpu/drm/drm_gem_dma_helper.c
237
} else if (dma_obj->vaddr) {
drivers/gpu/drm/drm_gem_dma_helper.c
240
dma_obj->vaddr, dma_obj->dma_addr,
drivers/gpu/drm/drm_gem_dma_helper.c
244
dma_obj->vaddr, dma_obj->dma_addr);
drivers/gpu/drm/drm_gem_dma_helper.c
393
return dma_obj->vaddr ? (unsigned long)dma_obj->vaddr : -EINVAL;
drivers/gpu/drm/drm_gem_dma_helper.c
410
drm_printf_indent(p, indent, "vaddr=%p\n", dma_obj->vaddr);
drivers/gpu/drm/drm_gem_dma_helper.c
435
ret = dma_get_sgtable(obj->dev->dev, sgt, dma_obj->vaddr,
drivers/gpu/drm/drm_gem_dma_helper.c
508
iosys_map_set_vaddr(map, dma_obj->vaddr);
drivers/gpu/drm/drm_gem_dma_helper.c
544
virt_to_page(dma_obj->vaddr));
drivers/gpu/drm/drm_gem_dma_helper.c
546
ret = dma_mmap_wc(dma_obj->base.dev->dev, vma, dma_obj->vaddr,
drivers/gpu/drm/drm_gem_dma_helper.c
600
dma_obj->vaddr = map.vaddr;
drivers/gpu/drm/drm_gem_shmem_helper.c
383
iosys_map_set_vaddr(map, shmem->vaddr);
drivers/gpu/drm/drm_gem_shmem_helper.c
393
shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT,
drivers/gpu/drm/drm_gem_shmem_helper.c
395
if (!shmem->vaddr) {
drivers/gpu/drm/drm_gem_shmem_helper.c
398
iosys_map_set_vaddr(map, shmem->vaddr);
drivers/gpu/drm/drm_gem_shmem_helper.c
443
vunmap(shmem->vaddr);
drivers/gpu/drm/drm_gem_shmem_helper.c
444
shmem->vaddr = NULL;
drivers/gpu/drm/drm_gem_shmem_helper.c
724
drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr);
drivers/gpu/drm/drm_mipi_dbi.c
301
tr = src->vaddr; /* TODO: Use mapping abstraction properly */
drivers/gpu/drm/drm_panic.c
153
static void drm_panic_write_pixel16(void *vaddr, unsigned int offset, u16 color)
drivers/gpu/drm/drm_panic.c
155
u16 *p = vaddr + offset;
drivers/gpu/drm/drm_panic.c
160
static void drm_panic_write_pixel24(void *vaddr, unsigned int offset, u32 color)
drivers/gpu/drm/drm_panic.c
162
u8 *p = vaddr + offset;
drivers/gpu/drm/drm_panic.c
174
static void drm_panic_write_pixel24_xpage(void *vaddr, struct page *next_page,
drivers/gpu/drm/drm_panic.c
178
u8 *p = vaddr + offset;
drivers/gpu/drm/drm_panic.c
198
static void drm_panic_write_pixel32(void *vaddr, unsigned int offset, u32 color)
drivers/gpu/drm/drm_panic.c
200
u32 *p = vaddr + offset;
drivers/gpu/drm/drm_panic.c
205
static void drm_panic_write_pixel(void *vaddr, unsigned int offset, u32 color, unsigned int cpp)
drivers/gpu/drm/drm_panic.c
209
drm_panic_write_pixel16(vaddr, offset, color);
drivers/gpu/drm/drm_panic.c
212
drm_panic_write_pixel24(vaddr, offset, color);
drivers/gpu/drm/drm_panic.c
215
drm_panic_write_pixel32(vaddr, offset, color);
drivers/gpu/drm/drm_panic.c
236
void *vaddr = NULL;
drivers/gpu/drm/drm_panic.c
250
if (vaddr)
drivers/gpu/drm/drm_panic.c
251
kunmap_local(vaddr);
drivers/gpu/drm/drm_panic.c
253
vaddr = kmap_local_page_try_from_panic(pages[page]);
drivers/gpu/drm/drm_panic.c
255
if (!vaddr)
drivers/gpu/drm/drm_panic.c
260
drm_panic_write_pixel24_xpage(vaddr, pages[page + 1],
drivers/gpu/drm/drm_panic.c
263
drm_panic_write_pixel(vaddr, offset, fg32, cpp);
drivers/gpu/drm/drm_panic.c
267
if (vaddr)
drivers/gpu/drm/drm_panic.c
268
kunmap_local(vaddr);
drivers/gpu/drm/drm_panic.c
336
void *vaddr = NULL;
drivers/gpu/drm/drm_panic.c
347
if (vaddr)
drivers/gpu/drm/drm_panic.c
348
kunmap_local(vaddr);
drivers/gpu/drm/drm_panic.c
350
vaddr = kmap_local_page_try_from_panic(pages[page]);
drivers/gpu/drm/drm_panic.c
352
if (!vaddr)
drivers/gpu/drm/drm_panic.c
357
drm_panic_write_pixel24_xpage(vaddr, pages[page + 1],
drivers/gpu/drm/drm_panic.c
360
drm_panic_write_pixel(vaddr, offset, color, cpp);
drivers/gpu/drm/drm_panic.c
363
if (vaddr)
drivers/gpu/drm/drm_panic.c
364
kunmap_local(vaddr);
drivers/gpu/drm/etnaviv/etnaviv_buffer.c
455
cmdbuf->vaddr);
drivers/gpu/drm/etnaviv/etnaviv_buffer.c
459
cmdbuf->vaddr, cmdbuf->size, 0);
drivers/gpu/drm/etnaviv/etnaviv_buffer.c
461
pr_info("link op: %p\n", buffer->vaddr + waitlink_offset);
drivers/gpu/drm/etnaviv/etnaviv_buffer.c
54
u32 *ptr = buf->vaddr + off;
drivers/gpu/drm/etnaviv/etnaviv_buffer.c
74
u32 *lw = buffer->vaddr + wl_offset;
drivers/gpu/drm/etnaviv/etnaviv_buffer.h
24
u32 *vaddr = (u32 *)buffer->vaddr;
drivers/gpu/drm/etnaviv/etnaviv_buffer.h
28
vaddr[buffer->user_size / 4] = data;
drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c
112
cmdbuf->vaddr = suballoc->vaddr + cmdbuf->suballoc_offset;
drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c
20
void *vaddr;
drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c
45
suballoc->vaddr = dma_alloc_wc(dev, SUBALLOC_SIZE,
drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c
47
if (!suballoc->vaddr) {
drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c
78
dma_free_wc(suballoc->dev, SUBALLOC_SIZE, suballoc->vaddr,
drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.h
22
void *vaddr;
drivers/gpu/drm/etnaviv/etnaviv_drv.c
185
u32 *ptr = buf->vaddr;
drivers/gpu/drm/etnaviv/etnaviv_drv.c
189
buf->vaddr, (u64)etnaviv_cmdbuf_get_pa(buf),
drivers/gpu/drm/etnaviv/etnaviv_dump.c
177
etnaviv_core_dump_mem(&iter, ETDUMP_BUF_RING, gpu->buffer.vaddr,
drivers/gpu/drm/etnaviv/etnaviv_dump.c
183
submit->cmdbuf.vaddr, submit->cmdbuf.size,
drivers/gpu/drm/etnaviv/etnaviv_dump.c
203
void *vaddr;
drivers/gpu/drm/etnaviv/etnaviv_dump.c
222
vaddr = etnaviv_gem_vmap(&obj->base);
drivers/gpu/drm/etnaviv/etnaviv_dump.c
223
if (vaddr)
drivers/gpu/drm/etnaviv/etnaviv_dump.c
224
memcpy(iter.data, vaddr, obj->base.size);
drivers/gpu/drm/etnaviv/etnaviv_flop_reset.c
197
void *buffer_base = priv->flop_reset_data_ppu->vaddr;
drivers/gpu/drm/etnaviv/etnaviv_gem.c
328
if (etnaviv_obj->vaddr)
drivers/gpu/drm/etnaviv/etnaviv_gem.c
329
return etnaviv_obj->vaddr;
drivers/gpu/drm/etnaviv/etnaviv_gem.c
336
if (!etnaviv_obj->vaddr)
drivers/gpu/drm/etnaviv/etnaviv_gem.c
337
etnaviv_obj->vaddr = etnaviv_obj->ops->vmap(etnaviv_obj);
drivers/gpu/drm/etnaviv/etnaviv_gem.c
340
return etnaviv_obj->vaddr;
drivers/gpu/drm/etnaviv/etnaviv_gem.c
456
off, etnaviv_obj->vaddr, obj->size);
drivers/gpu/drm/etnaviv/etnaviv_gem.c
490
vunmap(etnaviv_obj->vaddr);
drivers/gpu/drm/etnaviv/etnaviv_gem.h
51
void *vaddr;
drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
30
void *vaddr;
drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
32
vaddr = etnaviv_gem_vmap(obj);
drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
33
if (!vaddr)
drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
35
iosys_map_set_vaddr(map, vaddr);
drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
65
struct iosys_map map = IOSYS_MAP_INIT_VADDR(etnaviv_obj->vaddr);
drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
67
if (etnaviv_obj->vaddr)
drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
88
return map.vaddr;
drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
578
memcpy(submit->cmdbuf.vaddr, stream, args->stream_size);
drivers/gpu/drm/gud/gud_pipe.c
160
void *vaddr, *buf;
drivers/gpu/drm/gud/gud_pipe.c
168
vaddr = src[0].vaddr;
drivers/gpu/drm/gud/gud_pipe.c
182
len = gud_xrgb8888_to_r124(buf, format, vaddr, fb, rect, fmtcnv_state);
drivers/gpu/drm/gud/gud_pipe.c
200
len = gud_xrgb8888_to_color(buf, format, vaddr, fb, rect);
drivers/gpu/drm/gud/gud_pipe.c
206
buf = vaddr + rect->y1 * pitch;
drivers/gpu/drm/i915/display/intel_fbdev_fb.c
102
info->screen_base = vaddr;
drivers/gpu/drm/i915/display/intel_fbdev_fb.c
63
void __iomem *vaddr;
drivers/gpu/drm/i915/display/intel_fbdev_fb.c
90
vaddr = i915_vma_pin_iomap(vma);
drivers/gpu/drm/i915/display/intel_fbdev_fb.c
91
if (IS_ERR(vaddr)) {
drivers/gpu/drm/i915/display/intel_fbdev_fb.c
93
"Failed to remap framebuffer into virtual memory (%pe)\n", vaddr);
drivers/gpu/drm/i915/display/intel_fbdev_fb.c
94
ret = PTR_ERR(vaddr);
drivers/gpu/drm/i915/display/intel_plane.c
1363
drm_clflush_virt_range(map.vaddr, fb->base.pitches[0] * fb->base.height);
drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
73
void *vaddr;
drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
75
vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
76
if (IS_ERR(vaddr))
drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
77
return PTR_ERR(vaddr);
drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
79
iosys_map_set_vaddr(map, vaddr);
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
1109
cache->vaddr = 0;
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
1140
void *vaddr;
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
1142
if (!cache->vaddr)
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
1145
vaddr = unmask_page(cache->vaddr);
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
1146
if (cache->vaddr & KMAP)
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
1147
kunmap_local(vaddr);
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
1149
io_mapping_unmap_atomic((void __iomem *)vaddr);
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
1155
void *vaddr;
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
1157
if (!cache->vaddr)
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
1160
if (cache->vaddr & KMAP) {
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
1163
vaddr = kmap_local_page(page);
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
1164
cache->vaddr = unmask_flags(cache->vaddr) |
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
1165
(unsigned long)vaddr;
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
1174
cache->vaddr = (unsigned long)
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
1181
void *vaddr;
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
1183
if (!cache->vaddr)
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
1186
vaddr = unmask_page(cache->vaddr);
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
1187
if (cache->vaddr & KMAP) {
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
1190
if (cache->vaddr & CLFLUSH_AFTER)
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
1193
kunmap_local(vaddr);
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
1199
io_mapping_unmap_atomic((void __iomem *)vaddr);
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
1213
cache->vaddr = 0;
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
1221
void *vaddr;
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
1224
if (cache->vaddr) {
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
1225
kunmap_local(unmask_page(cache->vaddr));
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
1237
cache->vaddr = flushes | KMAP;
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
1247
vaddr = kmap_local_page(page);
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
1248
cache->vaddr = unmask_flags(cache->vaddr) | (unsigned long)vaddr;
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
1251
return vaddr;
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
1262
void *vaddr;
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
1264
if (cache->vaddr) {
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
1266
io_mapping_unmap_atomic((void __force __iomem *) unmask_page(cache->vaddr));
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
1328
vaddr = (void __force *)io_mapping_map_atomic_wc(&ggtt->iomap,
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
1331
cache->vaddr = (unsigned long)vaddr;
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
1333
return vaddr;
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
1341
void *vaddr;
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
1344
vaddr = unmask_page(cache->vaddr);
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
1346
vaddr = NULL;
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
1347
if ((cache->vaddr & KMAP) == 0)
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
1348
vaddr = reloc_iomap(vma, eb, page);
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
1349
if (!vaddr)
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
1350
vaddr = reloc_kmap(vma->obj, cache, page);
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
1353
return vaddr;
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
1387
void *vaddr;
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
1390
vaddr = reloc_vaddr(vma, eb,
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
1392
if (IS_ERR(vaddr))
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
1393
return PTR_ERR(vaddr);
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
1396
clflush_write32(vaddr + offset_in_page(offset),
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
1398
eb->reloc_cache.vaddr);
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
288
unsigned long vaddr; /** Current kmap address */
drivers/gpu/drm/i915/gem/i915_gem_mman.c
514
void *vaddr;
drivers/gpu/drm/i915/gem/i915_gem_mman.c
531
vaddr = i915_gem_object_pin_map(obj, I915_MAP_FORCE_WC);
drivers/gpu/drm/i915/gem/i915_gem_mman.c
532
if (IS_ERR(vaddr)) {
drivers/gpu/drm/i915/gem/i915_gem_mman.c
533
err = PTR_ERR(vaddr);
drivers/gpu/drm/i915/gem/i915_gem_mman.c
538
memcpy(vaddr + addr, buf, len);
drivers/gpu/drm/i915/gem/i915_gem_mman.c
541
memcpy(buf, vaddr + addr, len);
drivers/gpu/drm/i915/gem/i915_gem_object.c
882
void *vaddr;
drivers/gpu/drm/i915/gem/i915_gem_object.c
884
vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
drivers/gpu/drm/i915/gem/i915_gem_object.c
885
if (IS_ERR(vaddr))
drivers/gpu/drm/i915/gem/i915_gem_object.c
886
return PTR_ERR(vaddr);
drivers/gpu/drm/i915/gem/i915_gem_object.c
888
iosys_map_set_vaddr(map, vaddr);
drivers/gpu/drm/i915/gem/i915_gem_pages.c
282
void *vaddr;
drivers/gpu/drm/i915/gem/i915_gem_pages.c
325
vaddr = vmap(pages, n_pages, 0, pgprot);
drivers/gpu/drm/i915/gem/i915_gem_pages.c
329
return vaddr ?: ERR_PTR(-ENOMEM);
drivers/gpu/drm/i915/gem/i915_gem_pages.c
341
void *vaddr;
drivers/gpu/drm/i915/gem/i915_gem_pages.c
355
vaddr = vmap_pfn(pfns, n_pfn, pgprot_writecombine(PAGE_KERNEL_IO));
drivers/gpu/drm/i915/gem/i915_gem_pages.c
359
return vaddr ?: ERR_PTR(-ENOMEM);
drivers/gpu/drm/i915/gem/i915_gem_pages.c
365
void *vaddr;
drivers/gpu/drm/i915/gem/i915_gem_pages.c
370
if (panic->vaddr) {
drivers/gpu/drm/i915/gem/i915_gem_pages.c
371
drm_clflush_virt_range(panic->vaddr, PAGE_SIZE);
drivers/gpu/drm/i915/gem/i915_gem_pages.c
372
kunmap_local(panic->vaddr);
drivers/gpu/drm/i915/gem/i915_gem_pages.c
373
panic->vaddr = NULL;
drivers/gpu/drm/i915/gem/i915_gem_pages.c
427
panic->vaddr =
drivers/gpu/drm/i915/gem/i915_gem_pages.c
430
if (panic->vaddr) {
drivers/gpu/drm/i915/gem/i915_gem_pages.c
431
u32 *pix = panic->vaddr + offset;
drivers/gpu/drm/i915/gem/i915_gem_phys.c
102
void *vaddr = sg_page(pages->sgl);
drivers/gpu/drm/i915/gem/i915_gem_phys.c
108
void *src = vaddr;
drivers/gpu/drm/i915/gem/i915_gem_phys.c
136
vaddr, dma);
drivers/gpu/drm/i915/gem/i915_gem_phys.c
142
void *vaddr = sg_page(obj->mm.pages->sgl) + args->offset;
drivers/gpu/drm/i915/gem/i915_gem_phys.c
160
if (copy_from_user(vaddr, user_data, args->size))
drivers/gpu/drm/i915/gem/i915_gem_phys.c
163
drm_clflush_virt_range(vaddr, args->size);
drivers/gpu/drm/i915/gem/i915_gem_phys.c
173
void *vaddr = sg_page(obj->mm.pages->sgl) + args->offset;
drivers/gpu/drm/i915/gem/i915_gem_phys.c
183
drm_clflush_virt_range(vaddr, args->size);
drivers/gpu/drm/i915/gem/i915_gem_phys.c
184
if (copy_to_user(user_data, vaddr, args->size))
drivers/gpu/drm/i915/gem/i915_gem_phys.c
28
void *vaddr;
drivers/gpu/drm/i915/gem/i915_gem_phys.c
44
vaddr = dma_alloc_coherent(obj->base.dev->dev,
drivers/gpu/drm/i915/gem/i915_gem_phys.c
47
if (!vaddr)
drivers/gpu/drm/i915/gem/i915_gem_phys.c
61
sg_assign_page(sg, (struct page *)vaddr);
drivers/gpu/drm/i915/gem/i915_gem_phys.c
65
dst = vaddr;
drivers/gpu/drm/i915/gem/i915_gem_phys.c
93
vaddr, dma);
drivers/gpu/drm/i915/gem/selftests/huge_pages.c
1767
u32 *vaddr;
drivers/gpu/drm/i915/gem/selftests/huge_pages.c
1788
vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WB);
drivers/gpu/drm/i915/gem/selftests/huge_pages.c
1789
if (IS_ERR(vaddr)) {
drivers/gpu/drm/i915/gem/selftests/huge_pages.c
1790
err = PTR_ERR(vaddr);
drivers/gpu/drm/i915/gem/selftests/huge_pages.c
1793
*vaddr = 0xdeadbeaf;
drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
335
static void fill_scratch(struct tiled_blits *t, u32 *vaddr, u32 val)
drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
341
vaddr[i] = val++;
drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
422
const u32 *vaddr;
drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
430
vaddr = i915_gem_object_pin_map_unlocked(buf->vma->obj, I915_MAP_WC);
drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
431
if (IS_ERR(vaddr))
drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
432
return PTR_ERR(vaddr);
drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
434
if (vaddr[0] != buf->start_val) {
drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
441
if (vaddr[v / sizeof(*vaddr)] != buf->start_val + p)
drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
448
igt_hexdump(vaddr, 4096);
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
1748
u32 *vaddr;
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
1760
vaddr = __px_vaddr(vm->scratch[0]);
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
1762
memcpy(out, vaddr, sizeof(*out));
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
1763
if (memchr_inv(vaddr, *out, PAGE_SIZE)) {
drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
157
u32 *vaddr;
drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
200
vaddr = i915_gem_object_pin_map_unlocked(native_obj, mode);
drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
201
if (IS_ERR(vaddr)) {
drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
202
err = PTR_ERR(vaddr);
drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
207
if (vaddr[i] != 0xdeadbeaf) {
drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
208
pr_err("Data mismatch [%d]=%u\n", i, vaddr[i]);
drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
380
dma_map = err ? NULL : map.vaddr;
drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
442
ptr = err ? NULL : map.vaddr;
drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
502
ptr = err ? NULL : map.vaddr;
drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
841
void *vaddr;
drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
843
vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
844
if (IS_ERR(vaddr))
drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
845
return PTR_ERR(vaddr);
drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
847
memset(vaddr, POISON_INUSE, obj->base.size);
drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
856
void *vaddr;
drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
859
vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
860
if (IS_ERR(vaddr))
drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
861
return PTR_ERR(vaddr);
drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
863
if (memchr_inv(vaddr, POISON_FREE, obj->base.size)) {
drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c
68
void *vaddr;
drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c
70
vaddr = vm_map_ram(mock->pages, mock->npages, 0);
drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c
71
if (!vaddr)
drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c
73
iosys_map_set_vaddr(map, vaddr);
drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c
82
vm_unmap_ram(map->vaddr, mock->npages);
drivers/gpu/drm/i915/gt/gen6_ppgtt.c
103
vaddr = px_vaddr(pt);
drivers/gpu/drm/i915/gt/gen6_ppgtt.c
104
memset32(vaddr + pte, scratch_pte, count);
drivers/gpu/drm/i915/gt/gen6_ppgtt.c
122
gen6_pte_t *vaddr;
drivers/gpu/drm/i915/gt/gen6_ppgtt.c
126
vaddr = px_vaddr(i915_pt_entry(pd, act_pt));
drivers/gpu/drm/i915/gt/gen6_ppgtt.c
129
vaddr[act_pte] = pte_encode | GEN6_PTE_ADDR_ENCODE(iter.dma);
drivers/gpu/drm/i915/gt/gen6_ppgtt.c
142
vaddr = px_vaddr(i915_pt_entry(pd, ++act_pt));
drivers/gpu/drm/i915/gt/gen6_ppgtt.c
88
gen6_pte_t *vaddr;
drivers/gpu/drm/i915/gt/gen8_ppgtt.c
274
u64 *vaddr;
drivers/gpu/drm/i915/gt/gen8_ppgtt.c
291
vaddr = px_vaddr(pt);
drivers/gpu/drm/i915/gt/gen8_ppgtt.c
292
memset64(vaddr + pte,
drivers/gpu/drm/i915/gt/gen8_ppgtt.c
464
gen8_pte_t *vaddr;
drivers/gpu/drm/i915/gt/gen8_ppgtt.c
467
vaddr = px_vaddr(i915_pt_entry(pd, gen8_pd_index(idx, 1)));
drivers/gpu/drm/i915/gt/gen8_ppgtt.c
470
vaddr[gen8_pd_index(idx, 0)] = pte_encode | iter->dma;
drivers/gpu/drm/i915/gt/gen8_ppgtt.c
493
drm_clflush_virt_range(vaddr, PAGE_SIZE);
drivers/gpu/drm/i915/gt/gen8_ppgtt.c
494
vaddr = px_vaddr(i915_pt_entry(pd, gen8_pd_index(idx, 1)));
drivers/gpu/drm/i915/gt/gen8_ppgtt.c
497
drm_clflush_virt_range(vaddr, PAGE_SIZE);
drivers/gpu/drm/i915/gt/gen8_ppgtt.c
525
gen8_pte_t *vaddr;
drivers/gpu/drm/i915/gt/gen8_ppgtt.c
539
vaddr = px_vaddr(pd);
drivers/gpu/drm/i915/gt/gen8_ppgtt.c
561
vaddr = px_vaddr(pd);
drivers/gpu/drm/i915/gt/gen8_ppgtt.c
562
vaddr[__gen8_pte_index(start, 1)] |= GEN12_PDE_64K;
drivers/gpu/drm/i915/gt/gen8_ppgtt.c
574
vaddr = px_vaddr(pt);
drivers/gpu/drm/i915/gt/gen8_ppgtt.c
581
vaddr[index++] =
drivers/gpu/drm/i915/gt/gen8_ppgtt.c
606
drm_clflush_virt_range(vaddr, PAGE_SIZE);
drivers/gpu/drm/i915/gt/gen8_ppgtt.c
631
gen8_pte_t *vaddr;
drivers/gpu/drm/i915/gt/gen8_ppgtt.c
642
vaddr = px_vaddr(pd);
drivers/gpu/drm/i915/gt/gen8_ppgtt.c
657
vaddr = px_vaddr(pt);
drivers/gpu/drm/i915/gt/gen8_ppgtt.c
662
vaddr[index++] = encode | iter->dma;
drivers/gpu/drm/i915/gt/gen8_ppgtt.c
690
drm_clflush_virt_range(vaddr, PAGE_SIZE);
drivers/gpu/drm/i915/gt/gen8_ppgtt.c
704
vaddr = px_vaddr(pd);
drivers/gpu/drm/i915/gt/gen8_ppgtt.c
705
vaddr[maybe_64K] |= GEN8_PDE_IPS_64K;
drivers/gpu/drm/i915/gt/gen8_ppgtt.c
706
drm_clflush_virt_range(vaddr, PAGE_SIZE);
drivers/gpu/drm/i915/gt/gen8_ppgtt.c
722
vaddr = px_vaddr(i915_pt_entry(pd, maybe_64K));
drivers/gpu/drm/i915/gt/gen8_ppgtt.c
725
memset64(vaddr + i, encode, 15);
drivers/gpu/drm/i915/gt/gen8_ppgtt.c
727
drm_clflush_virt_range(vaddr, PAGE_SIZE);
drivers/gpu/drm/i915/gt/gen8_ppgtt.c
775
gen8_pte_t *vaddr;
drivers/gpu/drm/i915/gt/gen8_ppgtt.c
779
vaddr = px_vaddr(pt);
drivers/gpu/drm/i915/gt/gen8_ppgtt.c
780
vaddr[gen8_pd_index(idx, 0)] = vm->pte_encode(addr, pat_index, flags);
drivers/gpu/drm/i915/gt/gen8_ppgtt.c
781
drm_clflush_virt_range(&vaddr[gen8_pd_index(idx, 0)], sizeof(*vaddr));
drivers/gpu/drm/i915/gt/gen8_ppgtt.c
796
gen8_pte_t *vaddr;
drivers/gpu/drm/i915/gt/gen8_ppgtt.c
804
vaddr = px_vaddr(pd);
drivers/gpu/drm/i915/gt/gen8_ppgtt.c
805
vaddr[gen8_pd_index(idx, 1)] |= GEN12_PDE_64K;
drivers/gpu/drm/i915/gt/gen8_ppgtt.c
809
vaddr = px_vaddr(pt);
drivers/gpu/drm/i915/gt/gen8_ppgtt.c
810
vaddr[gen8_pd_index(idx, 0) / 16] = vm->pte_encode(addr, pat_index, flags);
drivers/gpu/drm/i915/gt/intel_context.c
223
void *vaddr;
drivers/gpu/drm/i915/gt/intel_context.c
248
err = ce->ops->pre_pin(ce, ww, &vaddr);
drivers/gpu/drm/i915/gt/intel_context.c
272
err = ce->ops->pin(ce, vaddr);
drivers/gpu/drm/i915/gt/intel_context_types.h
48
int (*pre_pin)(struct intel_context *ce, struct i915_gem_ww_ctx *ww, void **vaddr);
drivers/gpu/drm/i915/gt/intel_context_types.h
49
int (*pin)(struct intel_context *ce, void *vaddr);
drivers/gpu/drm/i915/gt/intel_engine_cs.c
1070
void *vaddr;
drivers/gpu/drm/i915/gt/intel_engine_cs.c
1104
vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
drivers/gpu/drm/i915/gt/intel_engine_cs.c
1105
if (IS_ERR(vaddr)) {
drivers/gpu/drm/i915/gt/intel_engine_cs.c
1106
ret = PTR_ERR(vaddr);
drivers/gpu/drm/i915/gt/intel_engine_cs.c
1110
engine->status_page.addr = memset(vaddr, 0, PAGE_SIZE);
drivers/gpu/drm/i915/gt/intel_engine_cs.c
1324
frame->ring.vaddr = frame->cs;
drivers/gpu/drm/i915/gt/intel_engine_cs.c
2234
const void *vaddr = rq->ring->vaddr;
drivers/gpu/drm/i915/gt/intel_engine_cs.c
2240
memcpy(ring, vaddr + head, len);
drivers/gpu/drm/i915/gt/intel_engine_cs.c
2243
memcpy(ring + len, vaddr + head, size - len);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2605
struct i915_gem_ww_ctx *ww, void **vaddr)
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2609
err = lrc_pre_pin(ce, engine, ww, vaddr);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2614
lrc_init_state(ce, engine, *vaddr);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2624
void **vaddr)
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2626
return __execlists_context_pre_pin(ce, ce->engine, ww, vaddr);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2629
static int execlists_context_pin(struct intel_context *ce, void *vaddr)
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2631
return lrc_pin(ce, ce->engine, vaddr);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
3714
void **vaddr)
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
3719
return __execlists_context_pre_pin(ce, ve->siblings[0], ww, vaddr);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
3722
static int virtual_context_pin(struct intel_context *ce, void *vaddr)
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
3726
return lrc_pin(ce, ve->siblings[0], vaddr);
drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
752
char *vaddr;
drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
755
vaddr = kmap_local_page(page);
drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
758
memcpy(temp, &vaddr[i], 64);
drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
759
memcpy(&vaddr[i], &vaddr[i + 64], 64);
drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
760
memcpy(&vaddr[i + 64], temp, 64);
drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
763
kunmap_local(vaddr);
drivers/gpu/drm/i915/gt/intel_gtt.c
102
void *vaddr;
drivers/gpu/drm/i915/gt/intel_gtt.c
115
vaddr = i915_gem_object_pin_map_unlocked(obj, type);
drivers/gpu/drm/i915/gt/intel_gtt.c
116
if (IS_ERR(vaddr))
drivers/gpu/drm/i915/gt/intel_gtt.c
117
return PTR_ERR(vaddr);
drivers/gpu/drm/i915/gt/intel_gtt.c
126
void *vaddr;
drivers/gpu/drm/i915/gt/intel_gtt.c
139
vaddr = i915_gem_object_pin_map(obj, type);
drivers/gpu/drm/i915/gt/intel_gtt.c
140
if (IS_ERR(vaddr))
drivers/gpu/drm/i915/gt/intel_gtt.c
141
return PTR_ERR(vaddr);
drivers/gpu/drm/i915/gt/intel_gtt.c
340
void *vaddr = __px_vaddr(p);
drivers/gpu/drm/i915/gt/intel_gtt.c
342
memset64(vaddr, val, count);
drivers/gpu/drm/i915/gt/intel_gtt.c
343
drm_clflush_virt_range(vaddr, PAGE_SIZE);
drivers/gpu/drm/i915/gt/intel_gtt.c
348
void *vaddr = __px_vaddr(scratch);
drivers/gpu/drm/i915/gt/intel_gtt.c
355
memset(vaddr, val, scratch->base.size);
drivers/gpu/drm/i915/gt/intel_gtt.c
356
drm_clflush_virt_range(vaddr, scratch->base.size);
drivers/gpu/drm/i915/gt/intel_lrc.c
1196
void **vaddr)
drivers/gpu/drm/i915/gt/intel_lrc.c
1201
*vaddr = i915_gem_object_pin_map(ce->state->obj,
drivers/gpu/drm/i915/gt/intel_lrc.c
1207
return PTR_ERR_OR_ZERO(*vaddr);
drivers/gpu/drm/i915/gt/intel_lrc.c
1213
void *vaddr)
drivers/gpu/drm/i915/gt/intel_lrc.c
1215
ce->lrc_reg_state = vaddr + LRC_STATE_OFFSET;
drivers/gpu/drm/i915/gt/intel_lrc.c
1218
lrc_init_state(ce, engine, vaddr);
drivers/gpu/drm/i915/gt/intel_lrc.c
967
set_redzone(void *vaddr, const struct intel_engine_cs *engine)
drivers/gpu/drm/i915/gt/intel_lrc.c
972
vaddr += engine->context_size;
drivers/gpu/drm/i915/gt/intel_lrc.c
974
memset(vaddr, CONTEXT_REDZONE, I915_GTT_PAGE_SIZE);
drivers/gpu/drm/i915/gt/intel_lrc.c
978
check_redzone(const void *vaddr, const struct intel_engine_cs *engine)
drivers/gpu/drm/i915/gt/intel_lrc.c
983
vaddr += engine->context_size;
drivers/gpu/drm/i915/gt/intel_lrc.c
985
if (memchr_inv(vaddr, CONTEXT_REDZONE, I915_GTT_PAGE_SIZE))
drivers/gpu/drm/i915/gt/intel_lrc.h
46
void **vaddr);
drivers/gpu/drm/i915/gt/intel_lrc.h
50
void *vaddr);
drivers/gpu/drm/i915/gt/intel_migrate.c
422
ring->emit = (void *)cs - ring->vaddr;
drivers/gpu/drm/i915/gt/intel_migrate.c
471
ring->emit = (void *)cs - ring->vaddr;
drivers/gpu/drm/i915/gt/intel_ppgtt.c
92
u64 * const vaddr = __px_vaddr(pdma);
drivers/gpu/drm/i915/gt/intel_ppgtt.c
94
vaddr[idx] = encoded_entry;
drivers/gpu/drm/i915/gt/intel_ppgtt.c
95
drm_clflush_virt_range(&vaddr[idx], sizeof(u64));
drivers/gpu/drm/i915/gt/intel_ring.c
295
memset64(ring->vaddr + ring->emit, 0, need_wrap / sizeof(u64));
drivers/gpu/drm/i915/gt/intel_ring.c
302
cs = ring->vaddr + ring->emit;
drivers/gpu/drm/i915/gt/intel_ring.c
75
ring->vaddr = addr;
drivers/gpu/drm/i915/gt/intel_ring.h
50
GEM_BUG_ON((rq->ring->vaddr + rq->ring->emit) != cs);
drivers/gpu/drm/i915/gt/intel_ring.h
83
u32 offset = addr - rq->ring->vaddr;
drivers/gpu/drm/i915/gt/intel_ring_submission.c
507
void *vaddr;
drivers/gpu/drm/i915/gt/intel_ring_submission.c
509
vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
drivers/gpu/drm/i915/gt/intel_ring_submission.c
510
if (IS_ERR(vaddr))
drivers/gpu/drm/i915/gt/intel_ring_submission.c
511
return PTR_ERR(vaddr);
drivers/gpu/drm/i915/gt/intel_ring_submission.c
513
shmem_read(ce->default_state, 0, vaddr, ce->engine->context_size);
drivers/gpu/drm/i915/gt/intel_ring_types.h
27
void *vaddr;
drivers/gpu/drm/i915/gt/intel_timeline.c
63
void *vaddr;
drivers/gpu/drm/i915/gt/intel_timeline.c
65
vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
drivers/gpu/drm/i915/gt/intel_timeline.c
66
if (IS_ERR(vaddr))
drivers/gpu/drm/i915/gt/intel_timeline.c
67
return PTR_ERR(vaddr);
drivers/gpu/drm/i915/gt/intel_timeline.c
69
timeline->hwsp_map = vaddr;
drivers/gpu/drm/i915/gt/intel_timeline.c
70
timeline->hwsp_seqno = memset(vaddr + ofs, 0, TIMELINE_SEQNO_BYTES);
drivers/gpu/drm/i915/gt/intel_timeline.c
71
drm_clflush_virt_range(vaddr + ofs, TIMELINE_SEQNO_BYTES);
drivers/gpu/drm/i915/gt/mock_engine.c
72
ring->vaddr = (void *)(ring + 1);
drivers/gpu/drm/i915/gt/selftest_context.c
114
vaddr += engine->context_size - I915_GTT_PAGE_SIZE;
drivers/gpu/drm/i915/gt/selftest_context.c
115
memset(vaddr, POISON_INUSE, I915_GTT_PAGE_SIZE);
drivers/gpu/drm/i915/gt/selftest_context.c
138
if (memchr_inv(vaddr, POISON_INUSE, I915_GTT_PAGE_SIZE)) {
drivers/gpu/drm/i915/gt/selftest_context.c
81
void *vaddr;
drivers/gpu/drm/i915/gt/selftest_context.c
92
vaddr = i915_gem_object_pin_map_unlocked(ce->state->obj,
drivers/gpu/drm/i915/gt/selftest_context.c
96
if (IS_ERR(vaddr)) {
drivers/gpu/drm/i915/gt/selftest_context.c
97
err = PTR_ERR(vaddr);
drivers/gpu/drm/i915/gt/selftest_execlists.c
1010
vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
drivers/gpu/drm/i915/gt/selftest_execlists.c
1011
if (IS_ERR(vaddr)) {
drivers/gpu/drm/i915/gt/selftest_execlists.c
1012
err = PTR_ERR(vaddr);
drivers/gpu/drm/i915/gt/selftest_execlists.c
1028
memset(vaddr, 0, PAGE_SIZE);
drivers/gpu/drm/i915/gt/selftest_execlists.c
1294
void *vaddr;
drivers/gpu/drm/i915/gt/selftest_execlists.c
1317
vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
drivers/gpu/drm/i915/gt/selftest_execlists.c
1318
if (IS_ERR(vaddr)) {
drivers/gpu/drm/i915/gt/selftest_execlists.c
1319
err = PTR_ERR(vaddr);
drivers/gpu/drm/i915/gt/selftest_execlists.c
1339
memset(vaddr, 0, PAGE_SIZE);
drivers/gpu/drm/i915/gt/selftest_execlists.c
1393
memset(vaddr, 0xff, PAGE_SIZE);
drivers/gpu/drm/i915/gt/selftest_execlists.c
221
memset(tmp->ring->vaddr,
drivers/gpu/drm/i915/gt/selftest_execlists.c
2829
memset32(tmp->ring->vaddr,
drivers/gpu/drm/i915/gt/selftest_execlists.c
378
memset32(tmp->ring->vaddr,
drivers/gpu/drm/i915/gt/selftest_execlists.c
527
memset32(ring->vaddr, STACK_MAGIC, ring->size / sizeof(u32));
drivers/gpu/drm/i915/gt/selftest_execlists.c
986
void *vaddr;
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
112
void *vaddr;
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
122
vaddr = i915_gem_object_pin_map_unlocked(obj, intel_gt_coherent_map_type(gt, obj, false));
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
123
if (IS_ERR(vaddr)) {
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
126
return ERR_CAST(vaddr);
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
133
h->batch = vaddr;
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
43
void *vaddr;
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
68
vaddr = i915_gem_object_pin_map_unlocked(h->hws, I915_MAP_WB);
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
69
if (IS_ERR(vaddr)) {
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
70
err = PTR_ERR(vaddr);
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
73
h->seqno = memset(vaddr, 0xff, PAGE_SIZE);
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
75
vaddr = i915_gem_object_pin_map_unlocked(h->obj,
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
77
if (IS_ERR(vaddr)) {
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
78
err = PTR_ERR(vaddr);
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
81
h->batch = vaddr;
drivers/gpu/drm/i915/gt/selftest_migrate.c
118
if (vaddr[x] != x) {
drivers/gpu/drm/i915/gt/selftest_migrate.c
121
igt_hexdump(vaddr + i * 1024, 4096);
drivers/gpu/drm/i915/gt/selftest_migrate.c
267
u32 *vaddr, val = 0;
drivers/gpu/drm/i915/gt/selftest_migrate.c
289
vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC);
drivers/gpu/drm/i915/gt/selftest_migrate.c
290
if (IS_ERR(vaddr)) {
drivers/gpu/drm/i915/gt/selftest_migrate.c
291
err = PTR_ERR(vaddr);
drivers/gpu/drm/i915/gt/selftest_migrate.c
296
vaddr[i] = ~i;
drivers/gpu/drm/i915/gt/selftest_migrate.c
337
if (vaddr[x] != val) {
drivers/gpu/drm/i915/gt/selftest_migrate.c
339
fn, vaddr[x], val, x * sizeof(u32));
drivers/gpu/drm/i915/gt/selftest_migrate.c
340
igt_hexdump(vaddr + i * 1024, 4096);
drivers/gpu/drm/i915/gt/selftest_migrate.c
349
vaddr[i] = ~i;
drivers/gpu/drm/i915/gt/selftest_migrate.c
379
if (vaddr[offset + x]) {
drivers/gpu/drm/i915/gt/selftest_migrate.c
382
igt_hexdump(vaddr + offset,
drivers/gpu/drm/i915/gt/selftest_migrate.c
48
u32 *vaddr;
drivers/gpu/drm/i915/gt/selftest_migrate.c
70
vaddr = i915_gem_object_pin_map(src, I915_MAP_WC);
drivers/gpu/drm/i915/gt/selftest_migrate.c
71
if (IS_ERR(vaddr)) {
drivers/gpu/drm/i915/gt/selftest_migrate.c
72
err = PTR_ERR(vaddr);
drivers/gpu/drm/i915/gt/selftest_migrate.c
77
vaddr[i] = i;
drivers/gpu/drm/i915/gt/selftest_migrate.c
80
vaddr = i915_gem_object_pin_map(dst, I915_MAP_WC);
drivers/gpu/drm/i915/gt/selftest_migrate.c
81
if (IS_ERR(vaddr)) {
drivers/gpu/drm/i915/gt/selftest_migrate.c
82
err = PTR_ERR(vaddr);
drivers/gpu/drm/i915/gt/selftest_migrate.c
87
vaddr[i] = ~i;
drivers/gpu/drm/i915/gt/selftest_mocs.c
162
u32 **vaddr)
drivers/gpu/drm/i915/gt/selftest_mocs.c
171
if (**vaddr != expect) {
drivers/gpu/drm/i915/gt/selftest_mocs.c
173
engine->name, i, **vaddr, expect);
drivers/gpu/drm/i915/gt/selftest_mocs.c
176
++*vaddr;
drivers/gpu/drm/i915/gt/selftest_mocs.c
194
u32 **vaddr)
drivers/gpu/drm/i915/gt/selftest_mocs.c
205
if (!mcr_range(engine->i915, reg) && **vaddr != expect) {
drivers/gpu/drm/i915/gt/selftest_mocs.c
207
engine->name, i, **vaddr, expect);
drivers/gpu/drm/i915/gt/selftest_mocs.c
21
void *vaddr;
drivers/gpu/drm/i915/gt/selftest_mocs.c
210
++*vaddr;
drivers/gpu/drm/i915/gt/selftest_mocs.c
223
u32 *vaddr;
drivers/gpu/drm/i915/gt/selftest_mocs.c
226
memset32(arg->vaddr, STACK_MAGIC, PAGE_SIZE / sizeof(u32));
drivers/gpu/drm/i915/gt/selftest_mocs.c
248
vaddr = arg->vaddr;
drivers/gpu/drm/i915/gt/selftest_mocs.c
250
err = check_mocs_table(ce->engine, arg->mocs, &vaddr);
drivers/gpu/drm/i915/gt/selftest_mocs.c
252
err = check_l3cc_table(ce->engine, arg->l3cc, &vaddr);
drivers/gpu/drm/i915/gt/selftest_mocs.c
256
GEM_BUG_ON(arg->vaddr + offset != vaddr);
drivers/gpu/drm/i915/gt/selftest_mocs.c
84
arg->vaddr = i915_gem_object_pin_map_unlocked(arg->scratch->obj, I915_MAP_WB);
drivers/gpu/drm/i915/gt/selftest_mocs.c
85
if (IS_ERR(arg->vaddr)) {
drivers/gpu/drm/i915/gt/selftest_mocs.c
86
err = PTR_ERR(arg->vaddr);
drivers/gpu/drm/i915/gt/selftest_ring.c
18
ring->vaddr = (void *)(ring + 1);
drivers/gpu/drm/i915/gt/selftest_tlb.c
239
void *vaddr;
drivers/gpu/drm/i915/gt/selftest_tlb.c
254
vaddr = i915_gem_object_pin_map_unlocked(A, I915_MAP_WC);
drivers/gpu/drm/i915/gt/selftest_tlb.c
255
if (IS_ERR(vaddr)) {
drivers/gpu/drm/i915/gt/selftest_tlb.c
256
err = PTR_ERR(vaddr);
drivers/gpu/drm/i915/gt/selftest_tlb.c
266
vaddr = i915_gem_object_pin_map_unlocked(B, I915_MAP_WC);
drivers/gpu/drm/i915/gt/selftest_tlb.c
267
if (IS_ERR(vaddr)) {
drivers/gpu/drm/i915/gt/selftest_tlb.c
268
err = PTR_ERR(vaddr);
drivers/gpu/drm/i915/gt/selftest_workarounds.c
206
u32 *vaddr;
drivers/gpu/drm/i915/gt/selftest_workarounds.c
224
vaddr = i915_gem_object_pin_map(results, I915_MAP_WB);
drivers/gpu/drm/i915/gt/selftest_workarounds.c
225
if (IS_ERR(vaddr)) {
drivers/gpu/drm/i915/gt/selftest_workarounds.c
226
err = PTR_ERR(vaddr);
drivers/gpu/drm/i915/gt/selftest_workarounds.c
232
u32 actual = vaddr[i];
drivers/gpu/drm/i915/gt/selftest_workarounds.c
235
print_results(engine, vaddr);
drivers/gpu/drm/i915/gt/shmem_utils.c
105
void *vaddr;
drivers/gpu/drm/i915/gt/shmem_utils.c
112
vaddr = kmap_local_page(page);
drivers/gpu/drm/i915/gt/shmem_utils.c
114
memcpy(vaddr + offset_in_page(off), ptr, this);
drivers/gpu/drm/i915/gt/shmem_utils.c
117
memcpy(ptr, vaddr + offset_in_page(off), this);
drivers/gpu/drm/i915/gt/shmem_utils.c
120
kunmap_local(vaddr);
drivers/gpu/drm/i915/gt/shmem_utils.c
140
void *vaddr;
drivers/gpu/drm/i915/gt/shmem_utils.c
147
vaddr = kmap_local_page(page);
drivers/gpu/drm/i915/gt/shmem_utils.c
148
iosys_map_memcpy_to(map, map_off, vaddr + offset_in_page(off),
drivers/gpu/drm/i915/gt/shmem_utils.c
151
kunmap_local(vaddr);
drivers/gpu/drm/i915/gt/shmem_utils.c
63
void *vaddr;
drivers/gpu/drm/i915/gt/shmem_utils.c
77
vaddr = vmap(pages, n_pages, VM_MAP_PUT_PAGES, PAGE_KERNEL);
drivers/gpu/drm/i915/gt/shmem_utils.c
78
if (!vaddr)
drivers/gpu/drm/i915/gt/shmem_utils.c
81
return vaddr;
drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c
396
void *vaddr;
drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c
400
&vma, &vaddr);
drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c
407
msg_in = vaddr;
drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c
408
msg_out = vaddr + GSC_VER_PKT_SZ;
drivers/gpu/drm/i915/gt/uc/intel_gsc_proxy.c
360
void *vaddr;
drivers/gpu/drm/i915/gt/uc/intel_gsc_proxy.c
365
&vma, &vaddr);
drivers/gpu/drm/i915/gt/uc/intel_gsc_proxy.c
370
gsc->proxy.to_gsc = vaddr;
drivers/gpu/drm/i915/gt/uc/intel_gsc_proxy.c
371
gsc->proxy.to_csme = vaddr + GSC_PROXY_BUFFER_SIZE;
drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.c
151
void __iomem *vaddr;
drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.c
179
vaddr = i915_vma_pin_iomap(vma);
drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.c
181
if (IS_ERR(vaddr)) {
drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.c
182
ret = PTR_ERR(vaddr);
drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.c
189
gsc->local_vaddr = vaddr;
drivers/gpu/drm/i915/gt/uc/intel_guc.c
838
void *vaddr;
drivers/gpu/drm/i915/gt/uc/intel_guc.c
844
vaddr = i915_gem_object_pin_map_unlocked(vma->obj,
drivers/gpu/drm/i915/gt/uc/intel_guc.c
847
if (IS_ERR(vaddr)) {
drivers/gpu/drm/i915/gt/uc/intel_guc.c
849
return PTR_ERR(vaddr);
drivers/gpu/drm/i915/gt/uc/intel_guc.c
853
*out_vaddr = vaddr;
drivers/gpu/drm/i915/gt/uc/intel_guc_hwconfig.c
78
void *vaddr;
drivers/gpu/drm/i915/gt/uc/intel_guc_hwconfig.c
83
ret = intel_guc_allocate_and_map_vma(guc, hwconfig->size, &vma, &vaddr);
drivers/gpu/drm/i915/gt/uc/intel_guc_hwconfig.c
91
memcpy(hwconfig->ptr, vaddr, hwconfig->size);
drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
623
void *vaddr;
drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
642
vaddr = i915_gem_object_pin_map_unlocked(log->vma->obj, I915_MAP_WC);
drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
643
if (IS_ERR(vaddr)) {
drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
644
ret = PTR_ERR(vaddr);
drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
648
log->buf_addr = vaddr;
drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
127
drm_clflush_virt_range(slpc->vaddr, sizeof(u32));
drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
128
data = slpc->vaddr;
drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
202
drm_clflush_virt_range(slpc->vaddr, SLPC_PAGE_SIZE_BYTES);
drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
283
err = intel_guc_allocate_and_map_vma(guc, size, &slpc->vma, (void **)&slpc->vaddr);
drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
373
struct slpc_shared_data *data = slpc->vaddr;
drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
384
struct slpc_shared_data *data = slpc->vaddr;
drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
396
struct slpc_shared_data *data = slpc->vaddr;
drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
854
struct slpc_shared_data *data = slpc->vaddr;
drivers/gpu/drm/i915/gt/uc/intel_guc_slpc_types.h
18
struct slpc_shared_data *vaddr;
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
2996
void **vaddr)
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
2998
return lrc_pre_pin(ce, engine, ww, vaddr);
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
3003
void *vaddr)
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
3014
return lrc_pin(ce, engine, vaddr);
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
3019
void **vaddr)
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
3021
return __guc_context_pre_pin(ce, ce->engine, ww, vaddr);
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
3024
static int guc_context_pin(struct intel_context *ce, void *vaddr)
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
3026
int ret = __guc_context_pin(ce, ce->engine, vaddr);
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
3991
void **vaddr)
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
3995
return __guc_context_pre_pin(ce, engine, ww, vaddr);
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
3998
static int guc_virtual_context_pin(struct intel_context *ce, void *vaddr)
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
4001
int ret = __guc_context_pin(ce, engine, vaddr);
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
4082
static int guc_parent_context_pin(struct intel_context *ce, void *vaddr)
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
4095
return __guc_context_pin(ce, engine, vaddr);
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
4098
static int guc_child_context_pin(struct intel_context *ce, void *vaddr)
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
4106
return __guc_context_pin(ce, engine, vaddr);
drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
1145
void *vaddr;
drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
1165
vaddr = i915_gem_object_pin_map_unlocked(vma->obj,
drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
1167
if (IS_ERR(vaddr)) {
drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
1169
err = PTR_ERR(vaddr);
drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
1173
copied = intel_uc_fw_copy_rsa(uc_fw, vaddr, vma->size);
drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
1316
void __iomem *vaddr;
drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
1323
vaddr = io_mapping_map_atomic_wc(&mr->iomap,
drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
1325
memcpy_fromio(dst, vaddr + offset, len);
drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
1326
io_mapping_unmap_atomic(vaddr);
drivers/gpu/drm/i915/gvt/cmd_parser.c
3124
void *vaddr;
drivers/gpu/drm/i915/gvt/cmd_parser.c
3130
vaddr = shmem_pin_map(engine->default_state);
drivers/gpu/drm/i915/gvt/cmd_parser.c
3131
if (!vaddr) {
drivers/gpu/drm/i915/gvt/cmd_parser.c
3145
s.rb_va = vaddr + start;
drivers/gpu/drm/i915/gvt/cmd_parser.c
3158
shmem_unpin_map(engine->default_state, vaddr);
drivers/gpu/drm/i915/gvt/gtt.c
1030
clear_page(spt->shadow_page.vaddr);
drivers/gpu/drm/i915/gvt/gtt.c
646
ppgtt_spt_get_entry(spt, spt->shadow_page.vaddr, \
drivers/gpu/drm/i915/gvt/gtt.c
650
ppgtt_spt_set_entry(spt, spt->shadow_page.vaddr, \
drivers/gpu/drm/i915/gvt/gtt.c
795
spt->shadow_page.vaddr = page_address(spt->shadow_page.page);
drivers/gpu/drm/i915/gvt/gtt.h
248
void *vaddr;
drivers/gpu/drm/i915/i915_gem.c
210
char *vaddr;
drivers/gpu/drm/i915/i915_gem.c
213
vaddr = kmap(page);
drivers/gpu/drm/i915/i915_gem.c
216
drm_clflush_virt_range(vaddr + offset, len);
drivers/gpu/drm/i915/i915_gem.c
218
ret = __copy_to_user(user_data, vaddr + offset, len);
drivers/gpu/drm/i915/i915_gem.c
283
void __iomem *vaddr;
drivers/gpu/drm/i915/i915_gem.c
287
vaddr = io_mapping_map_atomic_wc(mapping, base);
drivers/gpu/drm/i915/i915_gem.c
289
(void __force *)vaddr + offset,
drivers/gpu/drm/i915/i915_gem.c
291
io_mapping_unmap_atomic(vaddr);
drivers/gpu/drm/i915/i915_gem.c
293
vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
drivers/gpu/drm/i915/i915_gem.c
295
(void __force *)vaddr + offset,
drivers/gpu/drm/i915/i915_gem.c
297
io_mapping_unmap(vaddr);
drivers/gpu/drm/i915/i915_gem.c
518
void __iomem *vaddr;
drivers/gpu/drm/i915/i915_gem.c
522
vaddr = io_mapping_map_atomic_wc(mapping, base);
drivers/gpu/drm/i915/i915_gem.c
523
unwritten = __copy_from_user_inatomic_nocache((void __force *)vaddr + offset,
drivers/gpu/drm/i915/i915_gem.c
525
io_mapping_unmap_atomic(vaddr);
drivers/gpu/drm/i915/i915_gem.c
527
vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
drivers/gpu/drm/i915/i915_gem.c
528
unwritten = copy_from_user((void __force *)vaddr + offset,
drivers/gpu/drm/i915/i915_gem.c
530
io_mapping_unmap(vaddr);
drivers/gpu/drm/i915/i915_gem.c
647
char *vaddr;
drivers/gpu/drm/i915/i915_gem.c
650
vaddr = kmap(page);
drivers/gpu/drm/i915/i915_gem.c
653
drm_clflush_virt_range(vaddr + offset, len);
drivers/gpu/drm/i915/i915_gem.c
655
ret = __copy_from_user(vaddr + offset, user_data, len);
drivers/gpu/drm/i915/i915_gem.c
657
drm_clflush_virt_range(vaddr + offset, len);
drivers/gpu/drm/i915/i915_perf.c
1045
u8 *oa_buf_base = stream->oa_buffer.vaddr;
drivers/gpu/drm/i915/i915_perf.c
1161
if (drm_WARN_ON(&uncore->i915->drm, !stream->oa_buffer.vaddr))
drivers/gpu/drm/i915/i915_perf.c
1636
stream->oa_buffer.vaddr = NULL;
drivers/gpu/drm/i915/i915_perf.c
1742
memset(stream->oa_buffer.vaddr, 0, OA_BUFFER_SIZE);
drivers/gpu/drm/i915/i915_perf.c
1795
memset(stream->oa_buffer.vaddr, 0, OA_BUFFER_SIZE);
drivers/gpu/drm/i915/i915_perf.c
1848
memset(stream->oa_buffer.vaddr, 0,
drivers/gpu/drm/i915/i915_perf.c
1893
stream->oa_buffer.vaddr =
drivers/gpu/drm/i915/i915_perf.c
1895
if (IS_ERR(stream->oa_buffer.vaddr)) {
drivers/gpu/drm/i915/i915_perf.c
1896
ret = PTR_ERR(stream->oa_buffer.vaddr);
drivers/gpu/drm/i915/i915_perf.c
1908
stream->oa_buffer.vaddr = NULL;
drivers/gpu/drm/i915/i915_perf.c
588
void *report = stream->oa_buffer.vaddr + tail;
drivers/gpu/drm/i915/i915_perf.c
687
oa_buf_end = stream->oa_buffer.vaddr + OA_BUFFER_SIZE;
drivers/gpu/drm/i915/i915_perf.c
695
if (copy_to_user(buf, stream->oa_buffer.vaddr,
drivers/gpu/drm/i915/i915_perf.c
735
u8 *oa_buf_base = stream->oa_buffer.vaddr;
drivers/gpu/drm/i915/i915_perf.c
890
u8 *oa_buf_end = stream->oa_buffer.vaddr +
drivers/gpu/drm/i915/i915_perf.c
957
if (drm_WARN_ON(&uncore->i915->drm, !stream->oa_buffer.vaddr))
drivers/gpu/drm/i915/i915_perf_types.h
289
u8 *vaddr;
drivers/gpu/drm/i915/i915_request.c
224
void *vaddr = rq->ring->vaddr;
drivers/gpu/drm/i915/i915_request.c
229
memset(vaddr + head, val, rq->ring->size - head);
drivers/gpu/drm/i915/i915_request.c
232
memset(vaddr + head, val, rq->postfix - head);
drivers/gpu/drm/i915/i915_request.c
664
request->ring->vaddr + request->postfix);
drivers/gpu/drm/i915/pxp/intel_pxp_tee.c
135
memcpy(pxp->stream_cmd.vaddr, msg_in, msg_in_len);
drivers/gpu/drm/i915/pxp/intel_pxp_tee.c
142
memcpy(msg_out, pxp->stream_cmd.vaddr, msg_out_len);
drivers/gpu/drm/i915/pxp/intel_pxp_tee.c
237
pxp->stream_cmd.vaddr = NULL;
drivers/gpu/drm/i915/pxp/intel_pxp_tee.c
268
pxp->stream_cmd.vaddr = cmd;
drivers/gpu/drm/i915/pxp/intel_pxp_types.h
101
void *vaddr; /* virtual memory for PXP command */
drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
1403
u32 __iomem *vaddr;
drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
1405
vaddr = io_mapping_map_atomic_wc(&ggtt->iomap, offset);
drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
1406
iowrite32(n, vaddr + n);
drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
1407
io_mapping_unmap_atomic(vaddr);
drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
1414
u32 __iomem *vaddr;
drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
1417
vaddr = io_mapping_map_atomic_wc(&ggtt->iomap, offset);
drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
1418
val = ioread32(vaddr + n);
drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
1419
io_mapping_unmap_atomic(vaddr);
drivers/gpu/drm/i915/selftests/igt_spinner.c
102
vaddr = igt_spinner_pin_obj(ce, ww, spin->obj, mode, &spin->batch_vma);
drivers/gpu/drm/i915/selftests/igt_spinner.c
103
if (IS_ERR(vaddr))
drivers/gpu/drm/i915/selftests/igt_spinner.c
104
return PTR_ERR(vaddr);
drivers/gpu/drm/i915/selftests/igt_spinner.c
106
spin->batch = vaddr;
drivers/gpu/drm/i915/selftests/igt_spinner.c
48
void *vaddr;
drivers/gpu/drm/i915/selftests/igt_spinner.c
59
vaddr = i915_gem_object_pin_map(obj, mode);
drivers/gpu/drm/i915/selftests/igt_spinner.c
64
if (IS_ERR(vaddr))
drivers/gpu/drm/i915/selftests/igt_spinner.c
65
return vaddr;
drivers/gpu/drm/i915/selftests/igt_spinner.c
77
return vaddr;
drivers/gpu/drm/i915/selftests/igt_spinner.c
84
void *vaddr;
drivers/gpu/drm/i915/selftests/igt_spinner.c
91
vaddr = igt_spinner_pin_obj(ce, ww, spin->hws, I915_MAP_WB, &spin->hws_vma);
drivers/gpu/drm/i915/selftests/igt_spinner.c
92
if (IS_ERR(vaddr))
drivers/gpu/drm/i915/selftests/igt_spinner.c
93
return PTR_ERR(vaddr);
drivers/gpu/drm/i915/selftests/igt_spinner.c
95
spin->seqno = memset(vaddr, 0xff, PAGE_SIZE);
drivers/gpu/drm/i915/selftests/intel_memory_region.c
1028
u32 *vaddr;
drivers/gpu/drm/i915/selftests/intel_memory_region.c
1048
vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
drivers/gpu/drm/i915/selftests/intel_memory_region.c
1049
if (IS_ERR(vaddr)) {
drivers/gpu/drm/i915/selftests/intel_memory_region.c
1050
err = PTR_ERR(vaddr);
drivers/gpu/drm/i915/selftests/intel_memory_region.c
1113
memset32(vaddr + offset / sizeof(u32), val ^ 0xdeadbeaf,
drivers/gpu/drm/i915/selftests/intel_memory_region.c
1124
if (vaddr[dword] != (val ^ 0xdeadbeaf)) {
drivers/gpu/drm/i915/selftests/intel_memory_region.c
1126
__func__, dword, vaddr[dword], val ^ 0xdeadbeaf,
drivers/gpu/drm/i915/selftests/intel_memory_region.c
883
void *vaddr;
drivers/gpu/drm/i915/selftests/intel_memory_region.c
918
vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC);
drivers/gpu/drm/i915/selftests/intel_memory_region.c
919
if (IS_ERR(vaddr)) {
drivers/gpu/drm/i915/selftests/intel_memory_region.c
920
err = PTR_ERR(vaddr);
drivers/gpu/drm/i915/selftests/intel_memory_region.c
926
memset32(vaddr, val, obj->base.size / sizeof(u32));
drivers/gpu/drm/imagination/pvr_gem.c
232
return map.vaddr;
drivers/gpu/drm/imagination/pvr_gem.c
252
struct iosys_map map = IOSYS_MAP_INIT_VADDR(shmem_obj->vaddr);
drivers/gpu/drm/imagination/pvr_gem.c
255
if (WARN_ON(!map.vaddr))
drivers/gpu/drm/lima/lima_sched.c
384
memcpy(buffer_chunk + 1, map.vaddr, buffer_chunk->size);
drivers/gpu/drm/mediatek/mtk_gem.c
122
dma_obj->vaddr = dma_alloc_wc(priv->dma_dev, obj->size,
drivers/gpu/drm/mediatek/mtk_gem.c
125
if (!dma_obj->vaddr) {
drivers/gpu/drm/mediatek/mtk_gem.c
132
dma_obj->vaddr, &dma_obj->dma_addr,
drivers/gpu/drm/mediatek/mtk_gem.c
204
ret = dma_mmap_wc(priv->dma_dev, vma, dma_obj->vaddr,
drivers/gpu/drm/mediatek/mtk_gem.c
32
dma_obj->vaddr, dma_obj->dma_addr);
drivers/gpu/drm/mediatek/mtk_gem.c
57
ret = dma_get_sgtable(priv->dma_dev, sgt, dma_obj->vaddr,
drivers/gpu/drm/msm/msm_gem.c
1087
GEM_WARN_ON(msm_obj->vaddr);
drivers/gpu/drm/msm/msm_gem.c
1340
void *vaddr;
drivers/gpu/drm/msm/msm_gem.c
1353
vaddr = msm_gem_get_vaddr(obj);
drivers/gpu/drm/msm/msm_gem.c
1354
if (IS_ERR(vaddr)) {
drivers/gpu/drm/msm/msm_gem.c
1356
ret = PTR_ERR(vaddr);
drivers/gpu/drm/msm/msm_gem.c
1363
return vaddr;
drivers/gpu/drm/msm/msm_gem.c
730
if (!msm_obj->vaddr) {
drivers/gpu/drm/msm/msm_gem.c
731
msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
drivers/gpu/drm/msm/msm_gem.c
733
if (msm_obj->vaddr == NULL) {
drivers/gpu/drm/msm/msm_gem.c
739
return msm_obj->vaddr;
drivers/gpu/drm/msm/msm_gem.c
882
if (!msm_obj->vaddr || GEM_WARN_ON(!is_vunmapable(msm_obj)))
drivers/gpu/drm/msm/msm_gem.c
885
vunmap(msm_obj->vaddr);
drivers/gpu/drm/msm/msm_gem.c
886
msm_obj->vaddr = NULL;
drivers/gpu/drm/msm/msm_gem.c
974
off, msm_obj->vaddr);
drivers/gpu/drm/msm/msm_gem.h
220
void *vaddr;
drivers/gpu/drm/msm/msm_gem.h
413
return (msm_obj->vmap_count == 0) && msm_obj->vaddr;
drivers/gpu/drm/msm/msm_gem.h
418
return is_unpurgeable(msm_obj) || msm_obj->vaddr;
drivers/gpu/drm/msm/msm_gem_prime.c
31
void *vaddr;
drivers/gpu/drm/msm/msm_gem_prime.c
33
vaddr = msm_gem_get_vaddr_locked(obj);
drivers/gpu/drm/msm/msm_gem_prime.c
34
if (IS_ERR(vaddr))
drivers/gpu/drm/msm/msm_gem_prime.c
35
return PTR_ERR(vaddr);
drivers/gpu/drm/msm/msm_gem_prime.c
36
iosys_map_set_vaddr(map, vaddr);
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
149
vunmap(obj->base.vaddr);
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
150
obj->base.vaddr = NULL;
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
182
return node->vaddr;
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
197
if (node->base.vaddr) {
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
209
node->base.vaddr = vmap(node->pages, size >> PAGE_SHIFT, VM_MAP,
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
211
if (!node->base.vaddr) {
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
225
return node->base.vaddr;
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
269
return node->vaddr[offset / 4];
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
277
node->vaddr[offset / 4] = data;
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
301
if (unlikely(!node->base.vaddr))
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
305
node->base.vaddr, node->handle, imem->attrs);
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
326
if (node->base.vaddr)
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
397
node->base.vaddr = dma_alloc_attrs(dev, npages << PAGE_SHIFT,
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
400
if (!node->base.vaddr) {
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
57
u32 *vaddr;
drivers/gpu/drm/omapdrm/dss/dispc.c
4486
void *vaddr;
drivers/gpu/drm/omapdrm/dss/dispc.c
4497
i734_buf.vaddr = dma_alloc_wc(&dispc->pdev->dev, i734_buf.size,
drivers/gpu/drm/omapdrm/dss/dispc.c
4499
if (!i734_buf.vaddr) {
drivers/gpu/drm/omapdrm/dss/dispc.c
4513
dma_free_wc(&dispc->pdev->dev, i734_buf.size, i734_buf.vaddr,
drivers/gpu/drm/omapdrm/omap_gem.c
1069
void *vaddr;
drivers/gpu/drm/omapdrm/omap_gem.c
1074
if (!omap_obj->vaddr) {
drivers/gpu/drm/omapdrm/omap_gem.c
1077
vaddr = ERR_PTR(ret);
drivers/gpu/drm/omapdrm/omap_gem.c
1081
omap_obj->vaddr = vmap(omap_obj->pages, obj->size >> PAGE_SHIFT,
drivers/gpu/drm/omapdrm/omap_gem.c
1085
vaddr = omap_obj->vaddr;
drivers/gpu/drm/omapdrm/omap_gem.c
1089
return vaddr;
drivers/gpu/drm/omapdrm/omap_gem.c
1146
omap_obj->vaddr, omap_obj->roll);
drivers/gpu/drm/omapdrm/omap_gem.c
1218
dma_free_wc(dev->dev, obj->size, omap_obj->vaddr,
drivers/gpu/drm/omapdrm/omap_gem.c
1220
} else if (omap_obj->vaddr) {
drivers/gpu/drm/omapdrm/omap_gem.c
1221
vunmap(omap_obj->vaddr);
drivers/gpu/drm/omapdrm/omap_gem.c
1363
omap_obj->vaddr = dma_alloc_wc(dev->dev, size,
drivers/gpu/drm/omapdrm/omap_gem.c
1366
if (!omap_obj->vaddr)
drivers/gpu/drm/omapdrm/omap_gem.c
389
unsigned long vaddr;
drivers/gpu/drm/omapdrm/omap_gem.c
421
vaddr = vmf->address - ((pgoff - base_pgoff) << PAGE_SHIFT);
drivers/gpu/drm/omapdrm/omap_gem.c
442
vaddr += off << PAGE_SHIFT;
drivers/gpu/drm/omapdrm/omap_gem.c
470
ret = vmf_insert_mixed(vma, vaddr, pfn);
drivers/gpu/drm/omapdrm/omap_gem.c
474
vaddr += PAGE_SIZE * m;
drivers/gpu/drm/omapdrm/omap_gem.c
97
void *vaddr;
drivers/gpu/drm/panfrost/panfrost_dump.c
201
void *vaddr;
drivers/gpu/drm/panfrost/panfrost_dump.c
228
vaddr = map.vaddr;
drivers/gpu/drm/panfrost/panfrost_dump.c
229
memcpy(iter.data, vaddr, bo->base.base.size);
drivers/gpu/drm/panfrost/panfrost_gem.c
314
if (shmem->vaddr)
drivers/gpu/drm/panfrost/panfrost_gem.c
315
invalidate_kernel_vmap_range(shmem->vaddr, shmem->base.size);
drivers/gpu/drm/panfrost/panfrost_gem.c
345
if (shmem->vaddr)
drivers/gpu/drm/panfrost/panfrost_gem.c
346
flush_kernel_vmap_range(shmem->vaddr, shmem->base.size);
drivers/gpu/drm/panfrost/panfrost_perfcnt.c
112
perfcnt->buf = map.vaddr;
drivers/gpu/drm/panthor/panthor_gem.c
273
if (shmem->vaddr)
drivers/gpu/drm/panthor/panthor_gem.c
274
invalidate_kernel_vmap_range(shmem->vaddr, shmem->base.size);
drivers/gpu/drm/panthor/panthor_gem.c
304
if (shmem->vaddr)
drivers/gpu/drm/panthor/panthor_gem.c
305
flush_kernel_vmap_range(shmem->vaddr, shmem->base.size);
drivers/gpu/drm/panthor/panthor_gem.h
183
bo->kmap = map.vaddr;
drivers/gpu/drm/panthor/panthor_sched.c
890
queue->syncwait.kmap = map.vaddr;
drivers/gpu/drm/qxl/qxl_display.c
634
user_map.vaddr, size);
drivers/gpu/drm/qxl/qxl_display.c
636
memcpy(cursor_map.vaddr,
drivers/gpu/drm/qxl/qxl_display.c
638
memcpy(cursor_map.vaddr + sizeof(cursor),
drivers/gpu/drm/qxl/qxl_display.c
639
user_map.vaddr, size);
drivers/gpu/drm/qxl/qxl_draw.c
209
surface_base = surface_map.vaddr; /* TODO: Use mapping abstraction properly */
drivers/gpu/drm/qxl/qxl_draw.c
55
dev_clips = map.vaddr; /* TODO: Use mapping abstraction properly */
drivers/gpu/drm/qxl/qxl_object.c
178
bo->kptr = bo->map.vaddr;
drivers/gpu/drm/qxl/qxl_object.c
233
rptr = bo_map.vaddr; /* TODO: Use mapping abstraction properly */
drivers/gpu/drm/radeon/radeon_prime.h
35
void radeon_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
drivers/gpu/drm/renesas/rcar-du/rcar_du_vsp.c
294
ret = dma_get_sgtable(rcdu->dev, sgt, gem->vaddr,
drivers/gpu/drm/rockchip/rockchip_drm_gem.c
519
void *vaddr;
drivers/gpu/drm/rockchip/rockchip_drm_gem.c
522
vaddr = rk_obj->kvaddr;
drivers/gpu/drm/rockchip/rockchip_drm_gem.c
524
vaddr = vmap(rk_obj->pages, rk_obj->num_pages, VM_MAP,
drivers/gpu/drm/rockchip/rockchip_drm_gem.c
527
if (!vaddr)
drivers/gpu/drm/rockchip/rockchip_drm_gem.c
529
iosys_map_set_vaddr(map, vaddr);
drivers/gpu/drm/rockchip/rockchip_drm_gem.c
546
if (map->vaddr != rk_obj->kvaddr)
drivers/gpu/drm/rockchip/rockchip_drm_gem.c
547
vunmap(map->vaddr);
drivers/gpu/drm/sitronix/st7571.c
173
memcpy(st7571->hwbuf, vmap->vaddr, size);
drivers/gpu/drm/sitronix/st7571.c
198
memcpy(st7571->hwbuf, vmap->vaddr, size);
drivers/gpu/drm/sitronix/st7571.c
203
memcpy(st7571->hwbuf, vmap->vaddr, size);
drivers/gpu/drm/sitronix/st7586.c
107
st7586_xrgb8888_to_gray332(dst, src->vaddr, fb, clip, fmtcnv_state);
drivers/gpu/drm/sitronix/st7586.c
67
static void st7586_xrgb8888_to_gray332(u8 *dst, void *vaddr,
drivers/gpu/drm/sitronix/st7586.c
82
iosys_map_set_vaddr(&vmap, vaddr);
drivers/gpu/drm/sti/sti_cursor.c
288
sti_cursor_argb8888_to_clut8(cursor, (u32 *)dma_obj->vaddr);
drivers/gpu/drm/tegra/fbdev.c
123
bo->vaddr = vmap(bo->pages, bo->num_pages, VM_MAP,
drivers/gpu/drm/tegra/fbdev.c
125
if (!bo->vaddr) {
drivers/gpu/drm/tegra/fbdev.c
133
info->screen_buffer = bo->vaddr + offset;
drivers/gpu/drm/tegra/fbdev.c
50
vunmap(bo->vaddr);
drivers/gpu/drm/tegra/fbdev.c
51
bo->vaddr = NULL;
drivers/gpu/drm/tegra/gem.c
127
err = dma_get_sgtable(dev, map->sgt, obj->vaddr, obj->iova, gem->size);
drivers/gpu/drm/tegra/gem.c
181
void *vaddr;
drivers/gpu/drm/tegra/gem.c
184
if (obj->vaddr)
drivers/gpu/drm/tegra/gem.c
185
return obj->vaddr;
drivers/gpu/drm/tegra/gem.c
192
return map.vaddr;
drivers/gpu/drm/tegra/gem.c
195
vaddr = vmap(obj->pages, obj->num_pages, VM_MAP,
drivers/gpu/drm/tegra/gem.c
197
if (!vaddr)
drivers/gpu/drm/tegra/gem.c
200
return vaddr;
drivers/gpu/drm/tegra/gem.c
208
if (obj->vaddr)
drivers/gpu/drm/tegra/gem.c
338
} else if (bo->vaddr) {
drivers/gpu/drm/tegra/gem.c
339
dma_free_wc(drm->dev, bo->gem.size, bo->vaddr, bo->iova);
drivers/gpu/drm/tegra/gem.c
391
bo->vaddr = dma_alloc_wc(drm->dev, size, &bo->iova,
drivers/gpu/drm/tegra/gem.c
393
if (!bo->vaddr) {
drivers/gpu/drm/tegra/gem.c
601
err = dma_mmap_wc(gem->dev->dev, vma, bo->vaddr, bo->iova,
drivers/gpu/drm/tegra/gem.c
651
if (dma_get_sgtable(attach->dev, sgt, bo->vaddr, bo->iova,
drivers/gpu/drm/tegra/gem.c
728
void *vaddr;
drivers/gpu/drm/tegra/gem.c
730
vaddr = tegra_bo_mmap(&bo->base);
drivers/gpu/drm/tegra/gem.c
731
if (IS_ERR(vaddr))
drivers/gpu/drm/tegra/gem.c
732
return PTR_ERR(vaddr);
drivers/gpu/drm/tegra/gem.c
734
iosys_map_set_vaddr(map, vaddr);
drivers/gpu/drm/tegra/gem.c
744
tegra_bo_munmap(&bo->base, map->vaddr);
drivers/gpu/drm/tegra/gem.h
61
void *vaddr;
drivers/gpu/drm/tests/drm_format_helper_test.c
1039
buf = dst.vaddr; /* restore original value of buf */
drivers/gpu/drm/tests/drm_format_helper_test.c
1080
buf = dst.vaddr; /* restore original value of buf */
drivers/gpu/drm/tests/drm_format_helper_test.c
1154
buf = dst.vaddr; /* restore original value of buf */
drivers/gpu/drm/tests/drm_format_helper_test.c
1161
buf = dst.vaddr;
drivers/gpu/drm/tests/drm_format_helper_test.c
1168
buf = dst.vaddr;
drivers/gpu/drm/tests/drm_format_helper_test.c
742
buf = dst.vaddr; /* restore original value of buf */
drivers/gpu/drm/tests/drm_format_helper_test.c
748
buf = dst.vaddr;
drivers/gpu/drm/tests/drm_format_helper_test.c
790
buf = dst.vaddr; /* restore original value of buf */
drivers/gpu/drm/tests/drm_format_helper_test.c
831
buf = dst.vaddr; /* restore original value of buf */
drivers/gpu/drm/tests/drm_format_helper_test.c
872
buf = dst.vaddr; /* restore original value of buf */
drivers/gpu/drm/tests/drm_format_helper_test.c
916
buf = dst.vaddr; /* restore original value of buf */
drivers/gpu/drm/tests/drm_format_helper_test.c
957
buf = dst.vaddr; /* restore original value of buf */
drivers/gpu/drm/tests/drm_format_helper_test.c
998
buf = dst.vaddr; /* restore original value of buf */
drivers/gpu/drm/tests/drm_gem_shmem_test.c
175
KUNIT_EXPECT_NULL(test, shmem->vaddr);
drivers/gpu/drm/tests/drm_gem_shmem_test.c
183
KUNIT_ASSERT_NOT_NULL(test, shmem->vaddr);
drivers/gpu/drm/tests/drm_gem_shmem_test.c
192
KUNIT_EXPECT_NULL(test, shmem->vaddr);
drivers/gpu/drm/tests/drm_panic_test.c
124
u8 *vaddr;
drivers/gpu/drm/tests/drm_panic_test.c
140
vaddr = kmap_local_page(pages[p]);
drivers/gpu/drm/tests/drm_panic_test.c
141
memset(vaddr, 0xa5, PAGE_SIZE);
drivers/gpu/drm/tests/drm_panic_test.c
142
kunmap_local(vaddr);
drivers/gpu/drm/tests/drm_panic_test.c
154
vaddr = kmap_local_page(pages[p]);
drivers/gpu/drm/tests/drm_panic_test.c
156
drm_panic_check_color_byte(test, vaddr[i]);
drivers/gpu/drm/tests/drm_panic_test.c
158
kunmap_local(vaddr);
drivers/gpu/drm/tiny/cirrus-qemu.c
337
struct iosys_map vaddr = IOSYS_MAP_INIT_VADDR_IOMEM(cirrus->vram);
drivers/gpu/drm/tiny/cirrus-qemu.c
356
struct iosys_map dst = IOSYS_MAP_INIT_OFFSET(&vaddr, offset);
drivers/gpu/drm/tiny/gm12u320.c
258
void *vaddr;
drivers/gpu/drm/tiny/gm12u320.c
271
vaddr = gm12u320->fb_update.src_map.vaddr; /* TODO: Use mapping abstraction properly */
drivers/gpu/drm/tiny/gm12u320.c
279
src = vaddr + y1 * fb->pitches[0] + x1 * 4;
drivers/gpu/drm/tiny/ili9225.c
107
tr = src->vaddr; /* TODO: Use mapping abstraction properly */
drivers/gpu/drm/tiny/pixpaper.c
873
void *vaddr = map.vaddr;
drivers/gpu/drm/tiny/pixpaper.c
889
src_pixels = (__le32 *)vaddr;
drivers/gpu/drm/ttm/ttm_bo_util.c
109
memset(dst_map.vaddr, 0, PAGE_SIZE);
drivers/gpu/drm/ttm/ttm_bo_util.c
534
void *vaddr;
drivers/gpu/drm/ttm/ttm_bo_util.c
545
vaddr = vmap(ttm->pages, ttm->num_pages, 0, prot);
drivers/gpu/drm/ttm/ttm_bo_util.c
546
if (!vaddr)
drivers/gpu/drm/ttm/ttm_bo_util.c
549
iosys_map_set_vaddr(map, vaddr);
drivers/gpu/drm/ttm/ttm_bo_util.c
574
vunmap(map->vaddr);
drivers/gpu/drm/ttm/ttm_pool.c
143
void *vaddr;
drivers/gpu/drm/ttm/ttm_pool.c
174
vaddr = dma_alloc_attrs(pool->dev, (1ULL << order) * PAGE_SIZE,
drivers/gpu/drm/ttm/ttm_pool.c
176
if (!vaddr)
drivers/gpu/drm/ttm/ttm_pool.c
182
if (is_vmalloc_addr(vaddr))
drivers/gpu/drm/ttm/ttm_pool.c
183
p = vmalloc_to_page(vaddr);
drivers/gpu/drm/ttm/ttm_pool.c
185
p = virt_to_page(vaddr);
drivers/gpu/drm/ttm/ttm_pool.c
187
dma->vaddr = (unsigned long)vaddr | order;
drivers/gpu/drm/ttm/ttm_pool.c
202
void *vaddr;
drivers/gpu/drm/ttm/ttm_pool.c
221
vaddr = (void *)(dma->vaddr & PAGE_MASK);
drivers/gpu/drm/ttm/ttm_pool.c
222
dma_free_attrs(pool->dev, (1UL << order) * PAGE_SIZE, vaddr, dma->addr,
drivers/gpu/drm/ttm/ttm_pool.c
411
return dma->vaddr & ~PAGE_MASK;
drivers/gpu/drm/ttm/ttm_pool.c
68
unsigned long vaddr;
drivers/gpu/drm/ttm/ttm_resource.c
901
memunmap(iter_io->dmap.vaddr);
drivers/gpu/drm/ttm/ttm_tt.c
501
kunmap_local(map->vaddr);
drivers/gpu/drm/udl/udl_modeset.c
209
void *vaddr = map->vaddr; /* TODO: Use mapping abstraction properly */
drivers/gpu/drm/udl/udl_modeset.c
230
ret = udl_render_hline(udl, log_bpp, &urb, (char *)vaddr,
drivers/gpu/drm/v3d/v3d_bo.c
160
bo->vaddr = NULL;
drivers/gpu/drm/v3d/v3d_bo.c
198
bo->vaddr = vmap(obj->pages, obj->base.size >> PAGE_SHIFT, VM_MAP,
drivers/gpu/drm/v3d/v3d_bo.c
204
vunmap(bo->vaddr);
drivers/gpu/drm/v3d/v3d_bo.c
205
bo->vaddr = NULL;
drivers/gpu/drm/v3d/v3d_bo.c
45
if (bo->vaddr)
drivers/gpu/drm/v3d/v3d_drv.h
250
void *vaddr;
drivers/gpu/drm/v3d/v3d_sched.c
443
wg_counts = (uint32_t *)(bo->vaddr + indirect_csd->offset);
drivers/gpu/drm/v3d/v3d_sched.c
467
((uint32_t *)indirect->vaddr)[uniform_idx] = wg_counts[i];
drivers/gpu/drm/v3d/v3d_sched.c
485
value_addr = ((u8 *)bo->vaddr) + timestamp_query->queries[i].offset;
drivers/gpu/drm/v3d/v3d_sched.c
506
value_addr = ((u8 *)bo->vaddr) + queries[i].offset;
drivers/gpu/drm/v3d/v3d_sched.c
551
data = ((u8 *)bo->vaddr) + copy->offset;
drivers/gpu/drm/v3d/v3d_sched.c
559
query_addr = ((u8 *)timestamp->vaddr) + queries[i].offset;
drivers/gpu/drm/v3d/v3d_sched.c
655
data = ((u8 *)bo->vaddr) + copy->offset;
drivers/gpu/drm/vboxvideo/vbox_mode.c
405
u8 *src = map.vaddr; /* TODO: Use mapping abstraction properly */
drivers/gpu/drm/vc4/vc4_bo.c
307
dma_free_wc(dev->dev, obj->size, bo->base.vaddr, bo->base.dma_addr);
drivers/gpu/drm/vc4/vc4_bo.c
308
bo->base.vaddr = NULL;
drivers/gpu/drm/vc4/vc4_bo.c
440
memset(bo->base.vaddr, 0, bo->base.base.size);
drivers/gpu/drm/vc4/vc4_bo.c
574
if (!bo->base.vaddr) {
drivers/gpu/drm/vc4/vc4_bo.c
871
if (copy_from_user(bo->base.vaddr,
drivers/gpu/drm/vc4/vc4_bo.c
880
memset(bo->base.vaddr + args->size, 0,
drivers/gpu/drm/vc4/vc4_gem.c
816
exec->shader_rec_v = exec->exec_bo->vaddr + shader_rec_offset;
drivers/gpu/drm/vc4/vc4_gem.c
820
exec->uniforms_v = exec->exec_bo->vaddr + uniforms_offset;
drivers/gpu/drm/vc4/vc4_gem.c
825
exec->exec_bo->vaddr + bin_offset,
drivers/gpu/drm/vc4/vc4_render_cl.c
60
*(u8 *)(setup->rcl->vaddr + setup->next_offset) = val;
drivers/gpu/drm/vc4/vc4_render_cl.c
66
*(u16 *)(setup->rcl->vaddr + setup->next_offset) = val;
drivers/gpu/drm/vc4/vc4_render_cl.c
72
*(u32 *)(setup->rcl->vaddr + setup->next_offset) = val;
drivers/gpu/drm/vc4/vc4_validate_shaders.c
795
validation_state.shader = shader_obj->vaddr;
drivers/gpu/drm/virtio/virtgpu_plane.c
515
if (bo->base.vaddr) {
drivers/gpu/drm/virtio/virtgpu_plane.c
516
iosys_map_set_vaddr(&sb->map[0], bo->base.vaddr);
drivers/gpu/drm/vkms/vkms_formats.c
142
*addr = (u8 *)frame_info->map[0].vaddr + offset;
drivers/gpu/drm/vkms/vkms_formats.c
81
*addr = (u8 *)frame_info->map[0].vaddr + offset;
drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
439
ptr = map->vaddr;
drivers/gpu/drm/xe/display/xe_panic.c
24
drm_clflush_virt_range(panic->vmap.vaddr, PAGE_SIZE);
drivers/gpu/drm/xe/display/xe_panic.c
25
kunmap_local(panic->vmap.vaddr);
drivers/gpu/drm/xe/xe_bo.c
1303
xe_map_memcpy_from(xe, backup->vmap.vaddr, &bo->vmap, 0,
drivers/gpu/drm/xe/xe_bo.c
1453
xe_map_memcpy_to(xe, &bo->vmap, 0, backup->vmap.vaddr,
drivers/gpu/drm/xe/xe_bo.c
2729
bo = xe_managed_bo_create_from_data(xe, tile, (*src)->vmap.vaddr,
drivers/gpu/drm/xe/xe_eu_stall.c
37
u8 *vaddr;
drivers/gpu/drm/xe/xe_eu_stall.c
471
xecore_start_vaddr = xecore_buf->vaddr;
drivers/gpu/drm/xe/xe_eu_stall.c
769
xecore_buf->vaddr = stream->bo->vmap.vaddr + vaddr_offset;
drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c
105
ret = xe_gt_sriov_pf_config_ggtt_restore(gt, vfid, data->vaddr, data->hdr.size);
drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c
263
ret = pf_send_guc_save_vf_mig_data(gt, vfid, data->vaddr, size);
drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c
332
ret = pf_send_guc_restore_vf_mig_data(gt, vfid, data->vaddr, data->hdr.size);
drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c
436
ret = pf_migration_mmio_save(gt, vfid, data->vaddr, size);
drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c
461
ret = pf_migration_mmio_restore(gt, vfid, data->vaddr, data->hdr.size);
drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c
50
drm_print_hex_dump(&p, "mig_data: ", data->vaddr, min(SZ_64, data->hdr.size));
drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c
80
ret = xe_gt_sriov_pf_config_ggtt_save(gt, vfid, data->vaddr, size);
drivers/gpu/drm/xe/xe_lrc.c
1241
state->ptr = state->lrc->bo->vmap.vaddr + state->offset;
drivers/gpu/drm/xe/xe_map.h
55
return READ_ONCE(*(u32 *)map->vaddr);
drivers/gpu/drm/xe/xe_map.h
66
*(u32 *)map->vaddr = val;
drivers/gpu/drm/xe/xe_memirq.c
433
memirq_debug(memirq, "STATUS %s %*ph\n", hwe->name, 16, status->vaddr);
drivers/gpu/drm/xe/xe_memirq.c
444
memirq_debug(memirq, "STATUS %s %*ph\n", name, 16, status->vaddr);
drivers/gpu/drm/xe/xe_memirq.c
520
memirq_debug(memirq, "SOURCE %*ph\n", 32, memirq->source.vaddr);
drivers/gpu/drm/xe/xe_memirq.c
521
memirq_debug(memirq, "SOURCE %*ph\n", 32, memirq->source.vaddr + 32);
drivers/gpu/drm/xe/xe_oa.c
278
void *report = stream->oa_buffer.vaddr + tail;
drivers/gpu/drm/xe/xe_oa.c
326
oa_buf_end = stream->oa_buffer.vaddr + stream->oa_buffer.circ_size;
drivers/gpu/drm/xe/xe_oa.c
334
if (copy_to_user(buf, stream->oa_buffer.vaddr,
drivers/gpu/drm/xe/xe_oa.c
350
u8 *oa_buf_base = stream->oa_buffer.vaddr;
drivers/gpu/drm/xe/xe_oa.c
378
u8 *oa_buf_end = stream->oa_buffer.vaddr + stream->oa_buffer.circ_size;
drivers/gpu/drm/xe/xe_oa.c
439
memset(stream->oa_buffer.vaddr, 0, xe_bo_size(stream->oa_buffer.bo));
drivers/gpu/drm/xe/xe_oa.c
896
stream->oa_buffer.vaddr = bo->vmap.vaddr;
drivers/gpu/drm/xe/xe_oa_types.h
166
u8 *vaddr;
drivers/gpu/drm/xe/xe_pt.c
1848
memset64(map->vaddr + qword_ofs * sizeof(u64), empty,
drivers/gpu/drm/xe/xe_sa.c
131
sa_manager->cpu_ptr = sa_manager->bo->vmap.vaddr;
drivers/gpu/drm/xe/xe_sa.c
81
sa_manager->cpu_ptr = bo->vmap.vaddr;
drivers/gpu/drm/xe/xe_sriov_packet.c
139
data->vaddr = bo->vmap.vaddr;
drivers/gpu/drm/xe/xe_sriov_packet.c
147
data->vaddr = buff;
drivers/gpu/drm/xe/xe_sriov_packet.c
225
if (copy_to_user(buf, data->vaddr + (data->hdr.size - data->remaining), len))
drivers/gpu/drm/xe/xe_sriov_packet.c
304
if (copy_from_user(data->vaddr + (data->hdr.size - data->remaining), buf, len))
drivers/gpu/drm/xe/xe_sriov_packet.c
381
klvs = data->vaddr;
drivers/gpu/drm/xe/xe_sriov_packet.c
411
u32 *klvs = data->vaddr;
drivers/gpu/drm/xe/xe_sriov_packet_types.h
60
void *vaddr;
drivers/gpu/drm/xen/xen_drm_front_gem.c
286
void *vaddr;
drivers/gpu/drm/xen/xen_drm_front_gem.c
292
vaddr = vmap(xen_obj->pages, xen_obj->num_pages,
drivers/gpu/drm/xen/xen_drm_front_gem.c
294
if (!vaddr)
drivers/gpu/drm/xen/xen_drm_front_gem.c
296
iosys_map_set_vaddr(map, vaddr);
drivers/gpu/drm/xen/xen_drm_front_gem.c
304
vunmap(map->vaddr);
drivers/hwtracing/coresight/coresight-tmc-core.c
117
mdata = drvdata->crash_mdata.vaddr;
drivers/hwtracing/coresight/coresight-tmc-core.c
178
*bufpp = (char *)rbuf->vaddr + offset;
drivers/hwtracing/coresight/coresight-tmc-core.c
192
mdata = drvdata->crash_mdata.vaddr;
drivers/hwtracing/coresight/coresight-tmc-core.c
359
mdata = drvdata->crash_mdata.vaddr;
drivers/hwtracing/coresight/coresight-tmc-core.c
658
drvdata->resrv_buf.vaddr = memremap(res.start,
drivers/hwtracing/coresight/coresight-tmc-core.c
661
if (IS_ERR_OR_NULL(drvdata->resrv_buf.vaddr)) {
drivers/hwtracing/coresight/coresight-tmc-core.c
672
drvdata->crash_mdata.vaddr = memremap(res.start,
drivers/hwtracing/coresight/coresight-tmc-core.c
675
if (IS_ERR_OR_NULL(drvdata->crash_mdata.vaddr)) {
drivers/hwtracing/coresight/coresight-tmc-etf.c
612
mdata = (struct tmc_crash_metadata *)drvdata->crash_mdata.vaddr;
drivers/hwtracing/coresight/coresight-tmc-etf.c
647
drvdata->buf = drvdata->resrv_buf.vaddr;
drivers/hwtracing/coresight/coresight-tmc-etr.c
1842
mdata = (struct tmc_crash_metadata *)drvdata->crash_mdata.vaddr;
drivers/hwtracing/coresight/coresight-tmc-etr.c
25
void *vaddr;
drivers/hwtracing/coresight/coresight-tmc-etr.c
617
flat_buf->vaddr = dma_alloc_noncoherent(real_dev, etr_buf->size,
drivers/hwtracing/coresight/coresight-tmc-etr.c
621
if (!flat_buf->vaddr) {
drivers/hwtracing/coresight/coresight-tmc-etr.c
642
flat_buf->vaddr, flat_buf->daddr,
drivers/hwtracing/coresight/coresight-tmc-etr.c
682
*bufpp = (char *)flat_buf->vaddr + offset;
drivers/hwtracing/coresight/coresight-tmc-etr.c
724
resrv_buf->vaddr = drvdata->resrv_buf.vaddr;
drivers/hwtracing/coresight/coresight-tmc.h
204
void *vaddr;
drivers/hwtracing/coresight/coresight-tmc.h
399
if (drvdata->resrv_buf.vaddr &&
drivers/hwtracing/coresight/coresight-tmc.h
407
if (drvdata->crash_mdata.vaddr &&
drivers/hwtracing/coresight/coresight-tmc.h
417
mdata = (struct tmc_crash_metadata *)drvdata->crash_mdata.vaddr;
drivers/hwtracing/coresight/coresight-tmc.h
439
return crc32_le(0, (void *)drvdata->resrv_buf.vaddr, crc_size);
drivers/i2c/busses/i2c-ibm_iic.c
127
out_8(&dev->vaddr->intmsk, enable ? INTRMSK_EIMTC : 0);
drivers/i2c/busses/i2c-ibm_iic.c
135
volatile struct iic_regs __iomem *iic = dev->vaddr;
drivers/i2c/busses/i2c-ibm_iic.c
180
volatile struct iic_regs __iomem *iic = dev->vaddr;
drivers/i2c/busses/i2c-ibm_iic.c
241
volatile struct iic_regs __iomem *iic = dev->vaddr;
drivers/i2c/busses/i2c-ibm_iic.c
327
volatile struct iic_regs __iomem *iic = dev->vaddr;
drivers/i2c/busses/i2c-ibm_iic.c
345
volatile struct iic_regs __iomem *iic = dev->vaddr;
drivers/i2c/busses/i2c-ibm_iic.c
378
volatile struct iic_regs __iomem *iic = dev->vaddr;
drivers/i2c/busses/i2c-ibm_iic.c
410
volatile struct iic_regs __iomem *iic = dev->vaddr;
drivers/i2c/busses/i2c-ibm_iic.c
458
volatile struct iic_regs __iomem *iic = dev->vaddr;
drivers/i2c/busses/i2c-ibm_iic.c
514
volatile struct iic_regs __iomem *iic = dev->vaddr;
drivers/i2c/busses/i2c-ibm_iic.c
548
volatile struct iic_regs __iomem *iic = dev->vaddr;
drivers/i2c/busses/i2c-ibm_iic.c
696
dev->vaddr = of_iomap(np, 0);
drivers/i2c/busses/i2c-ibm_iic.c
697
if (dev->vaddr == NULL) {
drivers/i2c/busses/i2c-ibm_iic.c
754
if (dev->vaddr)
drivers/i2c/busses/i2c-ibm_iic.c
755
iounmap(dev->vaddr);
drivers/i2c/busses/i2c-ibm_iic.c
775
iounmap(dev->vaddr);
drivers/i2c/busses/i2c-ibm_iic.c
83
volatile struct iic_regs __iomem *iic = dev->vaddr;
drivers/i2c/busses/i2c-ibm_iic.h
42
volatile struct iic_regs __iomem *vaddr;
drivers/iio/buffer/industrialio-buffer-dma.c
108
block->vaddr, block->phys_addr);
drivers/iio/buffer/industrialio-buffer-dma.c
182
block->vaddr = dma_alloc_coherent(queue->dev, PAGE_ALIGN(size),
drivers/iio/buffer/industrialio-buffer-dma.c
184
if (!block->vaddr)
drivers/iio/buffer/industrialio-buffer-dma.c
580
addr = block->vaddr + queue->fileio.pos;
drivers/infiniband/hw/cxgb4/cq.c
1130
mm->vaddr = chp->cq.queue;
drivers/infiniband/hw/cxgb4/cq.c
1139
mm2->vaddr = NULL;
drivers/infiniband/hw/cxgb4/iw_cxgb4.h
546
void *vaddr;
drivers/infiniband/hw/cxgb4/provider.c
116
mm->vaddr = NULL;
drivers/infiniband/hw/cxgb4/provider.c
139
void *vaddr;
drivers/infiniband/hw/cxgb4/provider.c
156
vaddr = mm->vaddr;
drivers/infiniband/hw/cxgb4/provider.c
182
vaddr, dma_addr, size);
drivers/infiniband/hw/cxgb4/qp.c
2288
sq_key_mm->vaddr = qhp->wq.sq.queue;
drivers/infiniband/hw/cxgb4/qp.c
2296
rq_key_mm->vaddr = qhp->wq.rq.queue;
drivers/infiniband/hw/cxgb4/qp.c
2305
sq_db_key_mm->vaddr = NULL;
drivers/infiniband/hw/cxgb4/qp.c
2316
rq_db_key_mm->vaddr = NULL;
drivers/infiniband/hw/cxgb4/qp.c
2328
ma_sync_key_mm->vaddr = NULL;
drivers/infiniband/hw/cxgb4/qp.c
2786
srq_key_mm->vaddr = srq->wq.queue;
drivers/infiniband/hw/cxgb4/qp.c
2793
srq_db_key_mm->vaddr = NULL;
drivers/infiniband/hw/hfi1/hfi.h
2098
int hfi1_acquire_user_pages(struct mm_struct *mm, unsigned long vaddr,
drivers/infiniband/hw/hfi1/rc.c
2033
u64 *vaddr = wqe->sg_list[0].vaddr;
drivers/infiniband/hw/hfi1/rc.c
2034
*vaddr = val;
drivers/infiniband/hw/hfi1/rc.c
2595
u64 vaddr = get_ib_reth_vaddr(reth);
drivers/infiniband/hw/hfi1/rc.c
2598
ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr, rkey,
drivers/infiniband/hw/hfi1/rc.c
2603
e->rdma_sge.vaddr = NULL;
drivers/infiniband/hw/hfi1/rc.c
2983
u64 vaddr = get_ib_reth_vaddr(reth);
drivers/infiniband/hw/hfi1/rc.c
2987
ok = rvt_rkey_ok(qp, &qp->r_sge.sge, qp->r_len, vaddr,
drivers/infiniband/hw/hfi1/rc.c
2995
qp->r_sge.sge.vaddr = NULL;
drivers/infiniband/hw/hfi1/rc.c
3038
u64 vaddr = get_ib_reth_vaddr(reth);
drivers/infiniband/hw/hfi1/rc.c
3042
ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr,
drivers/infiniband/hw/hfi1/rc.c
3053
e->rdma_sge.vaddr = NULL;
drivers/infiniband/hw/hfi1/rc.c
3086
u64 vaddr = get_ib_ateth_vaddr(ateth);
drivers/infiniband/hw/hfi1/rc.c
3088
vaddr == HFI1_VERBS_E_ATOMIC_VADDR;
drivers/infiniband/hw/hfi1/rc.c
3114
if (unlikely(vaddr & (sizeof(u64) - 1)))
drivers/infiniband/hw/hfi1/rc.c
3119
vaddr, rkey,
drivers/infiniband/hw/hfi1/rc.c
3123
maddr = (atomic64_t *)qp->r_sge.sge.vaddr;
drivers/infiniband/hw/hfi1/rc.c
3127
(u64)cmpxchg((u64 *)qp->r_sge.sge.vaddr,
drivers/infiniband/hw/hfi1/tid_rdma.c
1098
pages[i++] = virt_to_page(sge->vaddr);
drivers/infiniband/hw/hfi1/tid_rdma.c
1100
sge->vaddr += len;
drivers/infiniband/hw/hfi1/tid_rdma.c
1111
sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr;
drivers/infiniband/hw/hfi1/tid_rdma.c
1727
wpriv->ss.sge.vaddr = req_addr;
drivers/infiniband/hw/hfi1/tid_rdma.c
1748
rreq->reth.vaddr = cpu_to_be64(wqe->rdma_wr.remote_addr +
drivers/infiniband/hw/hfi1/tid_rdma.c
1893
u32 bth0, u32 psn, u64 vaddr, u32 len)
drivers/infiniband/hw/hfi1/tid_rdma.c
2027
u64 vaddr;
drivers/infiniband/hw/hfi1/tid_rdma.c
2043
vaddr = get_ib_reth_vaddr(reth);
drivers/infiniband/hw/hfi1/tid_rdma.c
2046
ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr, rkey,
drivers/infiniband/hw/hfi1/tid_rdma.c
2063
vaddr, len))
drivers/infiniband/hw/hfi1/tid_rdma.c
2241
u64 vaddr;
drivers/infiniband/hw/hfi1/tid_rdma.c
2260
vaddr = be64_to_cpu(reth->vaddr);
drivers/infiniband/hw/hfi1/tid_rdma.c
2290
if (unlikely(!rvt_rkey_ok(qp, &e->rdma_sge, qp->r_len, vaddr,
drivers/infiniband/hw/hfi1/tid_rdma.c
2295
if (tid_rdma_rcv_read_request(qp, e, packet, ohdr, bth0, psn, vaddr,
drivers/infiniband/hw/hfi1/tid_rdma.c
3275
if ((u64)sge->vaddr & ~PAGE_MASK ||
drivers/infiniband/hw/hfi1/tid_rdma.c
3385
ohdr->u.tid_rdma.w_req.reth.vaddr =
drivers/infiniband/hw/hfi1/tid_rdma.c
3671
u64 vaddr;
drivers/infiniband/hw/hfi1/tid_rdma.c
3689
vaddr = be64_to_cpu(reth->vaddr);
drivers/infiniband/hw/hfi1/tid_rdma.c
3746
if (unlikely(!rvt_rkey_ok(qp, &e->rdma_sge, qp->r_len, vaddr,
drivers/infiniband/hw/hfi1/tid_rdma.c
3894
epriv->ss.sge.vaddr = resp_addr;
drivers/infiniband/hw/hfi1/tid_rdma.c
887
void *vaddr, *this_vaddr;
drivers/infiniband/hw/hfi1/tid_rdma.c
897
vaddr = page_address(pages[0]);
drivers/infiniband/hw/hfi1/tid_rdma.c
898
trace_hfi1_tid_flow_page(flow->req->qp, flow, 0, 0, 0, vaddr);
drivers/infiniband/hw/hfi1/tid_rdma.c
907
if (this_vaddr != (vaddr + PAGE_SIZE)) {
drivers/infiniband/hw/hfi1/tid_rdma.c
944
vaddr = this_vaddr;
drivers/infiniband/hw/hfi1/tid_rdma.c
946
vaddr += PAGE_SIZE;
drivers/infiniband/hw/hfi1/trace.c
305
ib_u64_get(&eh->tid_rdma.w_req.reth.vaddr),
drivers/infiniband/hw/hfi1/trace.c
344
ib_u64_get(&eh->tid_rdma.r_req.reth.vaddr),
drivers/infiniband/hw/hfi1/trace_tid.h
1330
__field(u64, vaddr)
drivers/infiniband/hw/hfi1/trace_tid.h
1337
__entry->vaddr = (u64)sge->vaddr;
drivers/infiniband/hw/hfi1/trace_tid.h
1345
__entry->vaddr,
drivers/infiniband/hw/hfi1/trace_tid.h
417
char mtu8k, char v1, void *vaddr),
drivers/infiniband/hw/hfi1/trace_tid.h
418
TP_ARGS(qp, flow, index, mtu8k, v1, vaddr),
drivers/infiniband/hw/hfi1/trace_tid.h
426
__field(u64, vaddr)
drivers/infiniband/hw/hfi1/trace_tid.h
434
__entry->page = vaddr ? (u64)virt_to_page(vaddr) : 0ULL;
drivers/infiniband/hw/hfi1/trace_tid.h
435
__entry->vaddr = (u64)vaddr;
drivers/infiniband/hw/hfi1/trace_tid.h
444
__entry->vaddr
drivers/infiniband/hw/hfi1/trace_tid.h
451
char mtu8k, char v1, void *vaddr),
drivers/infiniband/hw/hfi1/trace_tid.h
452
TP_ARGS(qp, flow, index, mtu8k, v1, vaddr)
drivers/infiniband/hw/hfi1/uc.c
144
ohdr->u.rc.reth.vaddr =
drivers/infiniband/hw/hfi1/uc.c
450
u64 vaddr = be64_to_cpu(reth->vaddr);
drivers/infiniband/hw/hfi1/uc.c
455
vaddr, rkey, IB_ACCESS_REMOTE_WRITE);
drivers/infiniband/hw/hfi1/uc.c
462
qp->r_sge.sge.vaddr = NULL;
drivers/infiniband/hw/hfi1/ud.c
186
rvt_copy_sge(qp, &qp->r_sge, sge->vaddr, len, true, false);
drivers/infiniband/hw/hfi1/user_exp_rcv.c
160
unsigned long vaddr = tidbuf->vaddr;
drivers/infiniband/hw/hfi1/user_exp_rcv.c
184
pinned = hfi1_acquire_user_pages(current->mm, vaddr, npages, true, pages);
drivers/infiniband/hw/hfi1/user_exp_rcv.c
255
if (!PAGE_ALIGNED(tinfo->vaddr))
drivers/infiniband/hw/hfi1/user_exp_rcv.c
265
tidbuf->vaddr = tinfo->vaddr;
drivers/infiniband/hw/hfi1/user_exp_rcv.c
267
tidbuf->npages = num_user_pages(tidbuf->vaddr, tidbuf->length);
drivers/infiniband/hw/hfi1/user_exp_rcv.c
278
tidbuf->vaddr, tidbuf->npages * PAGE_SIZE,
drivers/infiniband/hw/hfi1/user_exp_rcv.c
762
tbuf->vaddr + (pageidx * PAGE_SIZE), npages * PAGE_SIZE,
drivers/infiniband/hw/hfi1/user_exp_rcv.h
21
unsigned long vaddr;
drivers/infiniband/hw/hfi1/user_pages.c
80
int hfi1_acquire_user_pages(struct mm_struct *mm, unsigned long vaddr, size_t npages,
drivers/infiniband/hw/hfi1/user_pages.c
86
ret = pin_user_pages_fast(vaddr, npages, gup_flags, pages);
drivers/infiniband/hw/hfi1/verbs.c
1040
void *addr = ss->sge.vaddr;
drivers/infiniband/hw/hfi1/verbs.c
672
ss->sge.vaddr,
drivers/infiniband/hw/irdma/pble.c
108
chunk->vaddr = sd_entry->u.bp.addr.va + offset;
drivers/infiniband/hw/irdma/pble.c
112
chunk->size, chunk->size, chunk->vaddr, chunk->fpm_addr);
drivers/infiniband/hw/irdma/pble.c
160
addr = chunk->vaddr;
drivers/infiniband/hw/irdma/pble.h
122
u64 **vaddr, u64 *fpm_addr);
drivers/infiniband/hw/irdma/pble.h
76
void *vaddr;
drivers/infiniband/hw/irdma/utils.c
2131
u64 **vaddr, u64 *fpm_addr)
drivers/infiniband/hw/irdma/utils.c
2139
*vaddr = NULL;
drivers/infiniband/hw/irdma/utils.c
2164
*vaddr = pchunk->vaddr + offset;
drivers/infiniband/hw/irdma/utils.c
2245
vfree(chunk->vaddr);
drivers/infiniband/hw/irdma/utils.c
2246
chunk->vaddr = NULL;
drivers/infiniband/hw/irdma/utils.c
2274
chunk->vaddr = va;
drivers/infiniband/hw/qedr/qedr_roce_cm.c
211
pkt->header.vaddr, pkt->header.baddr);
drivers/infiniband/hw/qedr/qedr_roce_cm.c
512
packet->header.vaddr = dma_alloc_coherent(&pdev->dev, header_size,
drivers/infiniband/hw/qedr/qedr_roce_cm.c
515
if (!packet->header.vaddr) {
drivers/infiniband/hw/qedr/qedr_roce_cm.c
526
memcpy(packet->header.vaddr, ud_header_buffer, header_size);
drivers/infiniband/hw/qedr/qedr_roce_cm.c
84
dma_free_coherent(&dev->pdev->dev, pkt->header.len, pkt->header.vaddr,
drivers/infiniband/hw/qedr/verbs.c
3018
mr->hw_mr.vaddr = usr_addr;
drivers/infiniband/hw/qedr/verbs.c
3116
mr->hw_mr.vaddr = 0;
drivers/infiniband/hw/qedr/verbs.c
3358
#define RQ_SGE_SET(sge, vaddr, vlength, vflags) \
drivers/infiniband/hw/qedr/verbs.c
3360
DMA_REGPAIR_LE(sge->addr, vaddr); \
drivers/infiniband/hw/qedr/verbs.c
3371
#define SRQ_SGE_SET(sge, vaddr, vlength, vlkey) \
drivers/infiniband/hw/qedr/verbs.c
3373
DMA_REGPAIR_LE(sge->addr, vaddr); \
drivers/infiniband/hw/usnic/usnic_ib_verbs.c
700
&bus_addr, bar->vaddr, bar->len);
drivers/infiniband/hw/usnic/usnic_vnic.c
106
bar0->vaddr, bar0->len);
drivers/infiniband/hw/usnic/usnic_vnic.c
360
vnic->bar[i].vaddr = pci_iomap(pdev, i, vnic->bar[i].len);
drivers/infiniband/hw/usnic/usnic_vnic.c
361
if (!vnic->bar[i].vaddr) {
drivers/infiniband/hw/usnic/usnic_vnic.c
397
if (!vnic->bar[i].vaddr)
drivers/infiniband/hw/usnic/usnic_vnic.c
400
iounmap(vnic->bar[i].vaddr);
drivers/infiniband/hw/usnic/usnic_vnic.c
434
iounmap(vnic->bar[i].vaddr);
drivers/infiniband/sw/rdmavt/mr.c
377
void *vaddr;
drivers/infiniband/sw/rdmavt/mr.c
379
vaddr = page_address(sg_page_iter_page(&sg_iter));
drivers/infiniband/sw/rdmavt/mr.c
380
if (!vaddr) {
drivers/infiniband/sw/rdmavt/mr.c
384
mr->mr.map[m]->segs[n].vaddr = vaddr;
drivers/infiniband/sw/rdmavt/mr.c
386
trace_rvt_mr_user_seg(&mr->mr, m, n, vaddr, PAGE_SIZE);
drivers/infiniband/sw/rdmavt/mr.c
574
mr->mr.map[m]->segs[n].vaddr = (void *)addr;
drivers/infiniband/sw/rdmavt/mr.c
604
mr->mr.offset = ibmr->iova - (u64)mr->mr.map[0]->segs[0].vaddr;
drivers/infiniband/sw/rdmavt/mr.c
690
((uint64_t)(last_sge->vaddr + last_sge->length) == sge->addr)) {
drivers/infiniband/sw/rdmavt/mr.c
748
isge->vaddr = (void *)sge->addr;
drivers/infiniband/sw/rdmavt/mr.c
802
isge->vaddr = mr->map[m]->segs[n].vaddr + off;
drivers/infiniband/sw/rdmavt/mr.c
832
u32 len, u64 vaddr, u32 rkey, int acc)
drivers/infiniband/sw/rdmavt/mr.c
858
sge->vaddr = (void *)vaddr;
drivers/infiniband/sw/rdmavt/mr.c
877
off = vaddr - mr->iova;
drivers/infiniband/sw/rdmavt/mr.c
878
if (unlikely(vaddr < mr->iova || off + len > mr->length ||
drivers/infiniband/sw/rdmavt/mr.c
909
sge->vaddr = mr->map[m]->segs[n].vaddr + off;
drivers/infiniband/sw/rdmavt/qp.c
2831
wss_insert(wss, sge->vaddr);
drivers/infiniband/sw/rdmavt/qp.c
2833
wss_insert(wss, (sge->vaddr + PAGE_SIZE));
drivers/infiniband/sw/rdmavt/qp.c
2858
((u8 *)sge->vaddr)[i] = ((u8 *)data)[i];
drivers/infiniband/sw/rdmavt/qp.c
2860
cacheless_memcpy(sge->vaddr, data, len);
drivers/infiniband/sw/rdmavt/qp.c
2862
memcpy(sge->vaddr, data, len);
drivers/infiniband/sw/rdmavt/qp.c
3080
maddr = (atomic64_t *)qp->r_sge.sge.vaddr;
drivers/infiniband/sw/rdmavt/qp.c
3082
*(u64 *)sqp->s_sge.sge.vaddr =
drivers/infiniband/sw/rdmavt/qp.c
3085
(u64)cmpxchg((u64 *)qp->r_sge.sge.vaddr,
drivers/infiniband/sw/rdmavt/qp.c
3101
rvt_copy_sge(qp, &qp->r_sge, sge->vaddr,
drivers/infiniband/sw/rdmavt/trace_mr.h
104
__entry->vaddr = sge->vaddr;
drivers/infiniband/sw/rdmavt/trace_mr.h
120
__entry->vaddr,
drivers/infiniband/sw/rdmavt/trace_mr.h
25
__field(void *, vaddr)
drivers/infiniband/sw/rdmavt/trace_mr.h
38
__entry->vaddr = v;
drivers/infiniband/sw/rdmavt/trace_mr.h
56
(unsigned long long)__entry->vaddr,
drivers/infiniband/sw/rdmavt/trace_mr.h
89
__field(void *, vaddr)
drivers/iommu/amd/amd_iommu.h
132
static inline u64 iommu_virt_to_phys(void *vaddr)
drivers/iommu/amd/amd_iommu.h
134
return (u64)__sme_set(virt_to_phys(vaddr));
drivers/iommu/dma-iommu.c
1008
void *vaddr;
drivers/iommu/dma-iommu.c
1016
vaddr = dma_common_pages_remap(pages, size, prot,
drivers/iommu/dma-iommu.c
1018
if (!vaddr)
drivers/iommu/dma-iommu.c
1020
return vaddr;
drivers/iommu/generic_pt/kunit_iommu_pt.h
258
pt_vaddr_t vaddr = top_range.va;
drivers/iommu/generic_pt/kunit_iommu_pt.h
265
do_map(test, vaddr, paddr, next_len);
drivers/iommu/generic_pt/kunit_iommu_pt.h
266
gnmapped = iommu_unmap(&priv->domain, vaddr, base_len);
drivers/iommu/generic_pt/kunit_iommu_pt.h
270
do_map(test, vaddr, paddr, next_len);
drivers/iommu/generic_pt/kunit_iommu_pt.h
271
do_map(test, vaddr + next_len, paddr, next_len);
drivers/iommu/generic_pt/kunit_iommu_pt.h
272
gnmapped = iommu_unmap(&priv->domain, vaddr, base_len);
drivers/iommu/generic_pt/kunit_iommu_pt.h
274
gnmapped = iommu_unmap(&priv->domain, vaddr + next_len,
drivers/iommu/iommufd/vfio_compat.c
191
rc = iopt_map_user_pages(ictx, &ioas->iopt, &iova, u64_to_user_ptr(map.vaddr),
drivers/iommu/sun50i-iommu.c
292
void *vaddr, unsigned int count)
drivers/iommu/sun50i-iommu.c
295
dma_addr_t dma = virt_to_phys(vaddr);
drivers/irqchip/qcom-irq-combiner.c
177
void __iomem *vaddr;
drivers/irqchip/qcom-irq-combiner.c
192
vaddr = devm_ioremap(ctx->dev, reg->address, REG_SIZE);
drivers/irqchip/qcom-irq-combiner.c
193
if (!vaddr) {
drivers/irqchip/qcom-irq-combiner.c
199
ctx->combiner->regs[ctx->combiner->nregs].addr = vaddr;
drivers/mailbox/bcm-pdc-mailbox.c
817
void *vaddr;
drivers/mailbox/bcm-pdc-mailbox.c
828
vaddr = dma_pool_zalloc(pdcs->rx_buf_pool, GFP_ATOMIC, &daddr);
drivers/mailbox/bcm-pdc-mailbox.c
829
if (unlikely(!vaddr))
drivers/mailbox/bcm-pdc-mailbox.c
850
rx_ctx->resp_hdr = vaddr;
drivers/mailbox/pcc.c
128
static void read_register(void __iomem *vaddr, u64 *val, unsigned int bit_width)
drivers/mailbox/pcc.c
132
*val = readb(vaddr);
drivers/mailbox/pcc.c
135
*val = readw(vaddr);
drivers/mailbox/pcc.c
138
*val = readl(vaddr);
drivers/mailbox/pcc.c
141
*val = readq(vaddr);
drivers/mailbox/pcc.c
146
static void write_register(void __iomem *vaddr, u64 val, unsigned int bit_width)
drivers/mailbox/pcc.c
150
writeb(val, vaddr);
drivers/mailbox/pcc.c
153
writew(val, vaddr);
drivers/mailbox/pcc.c
156
writel(val, vaddr);
drivers/mailbox/pcc.c
159
writeq(val, vaddr);
drivers/mailbox/pcc.c
173
if (reg->vaddr)
drivers/mailbox/pcc.c
174
read_register(reg->vaddr, val, reg->gas->bit_width);
drivers/mailbox/pcc.c
188
if (reg->vaddr)
drivers/mailbox/pcc.c
189
write_register(reg->vaddr, val, reg->gas->bit_width);
drivers/mailbox/pcc.c
544
reg->vaddr = acpi_os_ioremap(gas->address, gas->bit_width / 8);
drivers/mailbox/pcc.c
545
if (!reg->vaddr) {
drivers/mailbox/pcc.c
76
void __iomem *vaddr;
drivers/md/dm-pcache/cache_dev.c
103
cache_dev->mapping = vaddr;
drivers/md/dm-pcache/cache_dev.c
106
ret = build_vmap(dax_dev, total_pages, &vaddr);
drivers/md/dm-pcache/cache_dev.c
112
cache_dev->mapping = vaddr;
drivers/md/dm-pcache/cache_dev.c
19
static int build_vmap(struct dax_device *dax_dev, long total_pages, void **vaddr)
drivers/md/dm-pcache/cache_dev.c
51
*vaddr = vmap(pages, total_pages, VM_MAP, PAGE_KERNEL);
drivers/md/dm-pcache/cache_dev.c
52
if (!*vaddr) {
drivers/md/dm-pcache/cache_dev.c
70
void *vaddr;
drivers/md/dm-pcache/cache_dev.c
89
DAX_ACCESS, &vaddr, &pfn);
drivers/media/common/videobuf2/videobuf2-core.c
1162
return call_ptr_memop(vaddr, vb, vb->planes[plane_no].mem_priv);
drivers/media/common/videobuf2/videobuf2-core.c
2566
void *vaddr;
drivers/media/common/videobuf2/videobuf2-core.c
2579
vaddr = vb2_plane_vaddr(vb, plane);
drivers/media/common/videobuf2/videobuf2-core.c
2581
return vaddr ? (unsigned long)vaddr : -EINVAL;
drivers/media/common/videobuf2/videobuf2-core.c
2787
void *vaddr;
drivers/media/common/videobuf2/videobuf2-core.c
2851
if (!q->mem_ops->vaddr)
drivers/media/common/videobuf2/videobuf2-core.c
2912
fileio->bufs[i].vaddr = vb2_plane_vaddr(vb, 0);
drivers/media/common/videobuf2/videobuf2-core.c
2913
if (fileio->bufs[i].vaddr == NULL) {
drivers/media/common/videobuf2/videobuf2-core.c
3085
ret = copy_to_user(data, buf->vaddr + buf->pos, count);
drivers/media/common/videobuf2/videobuf2-core.c
3087
ret = copy_from_user(buf->vaddr + buf->pos, data, count);
drivers/media/common/videobuf2/videobuf2-dma-contig.c
105
buf->vaddr = map.vaddr;
drivers/media/common/videobuf2/videobuf2-dma-contig.c
107
return buf->vaddr;
drivers/media/common/videobuf2/videobuf2-dma-contig.c
111
buf->vaddr = dma_vmap_noncontiguous(buf->dev, buf->size,
drivers/media/common/videobuf2/videobuf2-dma-contig.c
113
return buf->vaddr;
drivers/media/common/videobuf2/videobuf2-dma-contig.c
136
if (buf->vaddr)
drivers/media/common/videobuf2/videobuf2-dma-contig.c
137
flush_kernel_vmap_range(buf->vaddr, buf->size);
drivers/media/common/videobuf2/videobuf2-dma-contig.c
156
if (buf->vaddr)
drivers/media/common/videobuf2/videobuf2-dma-contig.c
157
invalidate_kernel_vmap_range(buf->vaddr, buf->size);
drivers/media/common/videobuf2/videobuf2-dma-contig.c
175
if (buf->vaddr)
drivers/media/common/videobuf2/videobuf2-dma-contig.c
176
dma_vunmap_noncontiguous(buf->dev, buf->vaddr);
drivers/media/common/videobuf2/videobuf2-dma-contig.c
206
buf->vaddr = buf->cookie;
drivers/media/common/videobuf2/videobuf2-dma-contig.c
28
void *vaddr;
drivers/media/common/videobuf2/videobuf2-dma-contig.c
444
void *vaddr;
drivers/media/common/videobuf2/videobuf2-dma-contig.c
447
vaddr = vb2_dc_vaddr(buf->vb, buf);
drivers/media/common/videobuf2/videobuf2-dma-contig.c
448
if (!vaddr)
drivers/media/common/videobuf2/videobuf2-dma-contig.c
451
iosys_map_set_vaddr(map, vaddr);
drivers/media/common/videobuf2/videobuf2-dma-contig.c
565
unsigned long vaddr, unsigned long size)
drivers/media/common/videobuf2/videobuf2-dma-contig.c
577
if (!IS_ALIGNED(vaddr | size, dma_align)) {
drivers/media/common/videobuf2/videobuf2-dma-contig.c
598
offset = lower_32_bits(offset_in_page(vaddr));
drivers/media/common/videobuf2/videobuf2-dma-contig.c
599
vec = vb2_create_framevec(vaddr, size, buf->dma_dir == DMA_FROM_DEVICE ||
drivers/media/common/videobuf2/videobuf2-dma-contig.c
726
buf->vaddr = NULL;
drivers/media/common/videobuf2/videobuf2-dma-contig.c
735
struct iosys_map map = IOSYS_MAP_INIT_VADDR(buf->vaddr);
drivers/media/common/videobuf2/videobuf2-dma-contig.c
747
if (buf->vaddr) {
drivers/media/common/videobuf2/videobuf2-dma-contig.c
749
buf->vaddr = NULL;
drivers/media/common/videobuf2/videobuf2-dma-contig.c
813
.vaddr = vb2_dc_vaddr,
drivers/media/common/videobuf2/videobuf2-dma-contig.c
98
if (buf->vaddr)
drivers/media/common/videobuf2/videobuf2-dma-contig.c
99
return buf->vaddr;
drivers/media/common/videobuf2/videobuf2-dma-sg.c
116
buf->vaddr = NULL;
drivers/media/common/videobuf2/videobuf2-dma-sg.c
190
if (buf->vaddr)
drivers/media/common/videobuf2/videobuf2-dma-sg.c
191
vm_unmap_ram(buf->vaddr, buf->num_pages);
drivers/media/common/videobuf2/videobuf2-dma-sg.c
224
unsigned long vaddr, unsigned long size)
drivers/media/common/videobuf2/videobuf2-dma-sg.c
237
buf->vaddr = NULL;
drivers/media/common/videobuf2/videobuf2-dma-sg.c
240
buf->offset = vaddr & ~PAGE_MASK;
drivers/media/common/videobuf2/videobuf2-dma-sg.c
244
vec = vb2_create_framevec(vaddr, size,
drivers/media/common/videobuf2/videobuf2-dma-sg.c
293
if (buf->vaddr)
drivers/media/common/videobuf2/videobuf2-dma-sg.c
294
vm_unmap_ram(buf->vaddr, buf->num_pages);
drivers/media/common/videobuf2/videobuf2-dma-sg.c
312
if (!buf->vaddr) {
drivers/media/common/videobuf2/videobuf2-dma-sg.c
315
buf->vaddr = ret ? NULL : map.vaddr;
drivers/media/common/videobuf2/videobuf2-dma-sg.c
317
buf->vaddr = vm_map_ram(buf->pages, buf->num_pages, -1);
drivers/media/common/videobuf2/videobuf2-dma-sg.c
322
return buf->vaddr ? buf->vaddr + buf->offset : NULL;
drivers/media/common/videobuf2/videobuf2-dma-sg.c
36
void *vaddr;
drivers/media/common/videobuf2/videobuf2-dma-sg.c
491
void *vaddr;
drivers/media/common/videobuf2/videobuf2-dma-sg.c
494
vaddr = vb2_dma_sg_vaddr(buf->vb, buf);
drivers/media/common/videobuf2/videobuf2-dma-sg.c
495
if (!vaddr)
drivers/media/common/videobuf2/videobuf2-dma-sg.c
498
iosys_map_set_vaddr(map, vaddr);
drivers/media/common/videobuf2/videobuf2-dma-sg.c
574
buf->vaddr = NULL;
drivers/media/common/videobuf2/videobuf2-dma-sg.c
583
struct iosys_map map = IOSYS_MAP_INIT_VADDR(buf->vaddr);
drivers/media/common/videobuf2/videobuf2-dma-sg.c
595
if (buf->vaddr) {
drivers/media/common/videobuf2/videobuf2-dma-sg.c
597
buf->vaddr = NULL;
drivers/media/common/videobuf2/videobuf2-dma-sg.c
664
.vaddr = vb2_dma_sg_vaddr,
drivers/media/common/videobuf2/videobuf2-vmalloc.c
107
buf->vaddr = (__force void *)
drivers/media/common/videobuf2/videobuf2-vmalloc.c
110
buf->vaddr = vm_map_ram(frame_vector_pages(vec), n_pages, -1);
drivers/media/common/videobuf2/videobuf2-vmalloc.c
113
if (!buf->vaddr)
drivers/media/common/videobuf2/videobuf2-vmalloc.c
115
buf->vaddr += offset;
drivers/media/common/videobuf2/videobuf2-vmalloc.c
129
unsigned long vaddr = (unsigned long)buf->vaddr & PAGE_MASK;
drivers/media/common/videobuf2/videobuf2-vmalloc.c
136
if (vaddr)
drivers/media/common/videobuf2/videobuf2-vmalloc.c
137
vm_unmap_ram((void *)vaddr, n_pages);
drivers/media/common/videobuf2/videobuf2-vmalloc.c
146
iounmap((__force void __iomem *)buf->vaddr);
drivers/media/common/videobuf2/videobuf2-vmalloc.c
156
if (!buf->vaddr) {
drivers/media/common/videobuf2/videobuf2-vmalloc.c
161
return buf->vaddr;
drivers/media/common/videobuf2/videobuf2-vmalloc.c
180
ret = remap_vmalloc_range(vma, buf->vaddr, 0);
drivers/media/common/videobuf2/videobuf2-vmalloc.c
220
void *vaddr = buf->vaddr;
drivers/media/common/videobuf2/videobuf2-vmalloc.c
235
struct page *page = vmalloc_to_page(vaddr);
drivers/media/common/videobuf2/videobuf2-vmalloc.c
243
vaddr += PAGE_SIZE;
drivers/media/common/videobuf2/videobuf2-vmalloc.c
26
void *vaddr;
drivers/media/common/videobuf2/videobuf2-vmalloc.c
315
iosys_map_set_vaddr(map, buf->vaddr);
drivers/media/common/videobuf2/videobuf2-vmalloc.c
349
if (WARN_ON(!buf->vaddr))
drivers/media/common/videobuf2/videobuf2-vmalloc.c
377
buf->vaddr = map.vaddr;
drivers/media/common/videobuf2/videobuf2-vmalloc.c
385
struct iosys_map map = IOSYS_MAP_INIT_VADDR(buf->vaddr);
drivers/media/common/videobuf2/videobuf2-vmalloc.c
388
buf->vaddr = NULL;
drivers/media/common/videobuf2/videobuf2-vmalloc.c
394
struct iosys_map map = IOSYS_MAP_INIT_VADDR(buf->vaddr);
drivers/media/common/videobuf2/videobuf2-vmalloc.c
396
if (buf->vaddr)
drivers/media/common/videobuf2/videobuf2-vmalloc.c
436
.vaddr = vb2_vmalloc_vaddr,
drivers/media/common/videobuf2/videobuf2-vmalloc.c
47
buf->vaddr = vmalloc_user(buf->size);
drivers/media/common/videobuf2/videobuf2-vmalloc.c
48
if (!buf->vaddr) {
drivers/media/common/videobuf2/videobuf2-vmalloc.c
68
vfree(buf->vaddr);
drivers/media/common/videobuf2/videobuf2-vmalloc.c
74
unsigned long vaddr, unsigned long size)
drivers/media/common/videobuf2/videobuf2-vmalloc.c
86
offset = vaddr & ~PAGE_MASK;
drivers/media/common/videobuf2/videobuf2-vmalloc.c
88
vec = vb2_create_framevec(vaddr, size,
drivers/media/pci/bt8xx/bttv-driver.c
2784
u32 *vaddr = vb2_plane_vaddr(&wakeup->vbuf.vb2_buf, 0);
drivers/media/pci/bt8xx/bttv-driver.c
2788
if (vaddr && size) {
drivers/media/pci/bt8xx/bttv-driver.c
2789
vaddr += size - 1;
drivers/media/pci/bt8xx/bttv-driver.c
2790
*vaddr = wakeup->vbuf.sequence;
drivers/media/pci/cx23885/cx23885-alsa.c
107
vfree(buf->vaddr);
drivers/media/pci/cx23885/cx23885-alsa.c
108
buf->vaddr = NULL;
drivers/media/pci/cx23885/cx23885-alsa.c
142
vfree(buf->vaddr);
drivers/media/pci/cx23885/cx23885-alsa.c
143
buf->vaddr = NULL;
drivers/media/pci/cx23885/cx23885-alsa.c
405
substream->runtime->dma_area = chip->buf->vaddr;
drivers/media/pci/cx23885/cx23885-alsa.c
78
buf->vaddr = vmalloc_32(nr_pages << PAGE_SHIFT);
drivers/media/pci/cx23885/cx23885-alsa.c
79
if (NULL == buf->vaddr) {
drivers/media/pci/cx23885/cx23885-alsa.c
85
buf->vaddr, nr_pages << PAGE_SHIFT);
drivers/media/pci/cx23885/cx23885-alsa.c
87
memset(buf->vaddr, 0, nr_pages << PAGE_SHIFT);
drivers/media/pci/cx23885/cx23885-alsa.c
96
pg = vmalloc_to_page(buf->vaddr + i * PAGE_SIZE);
drivers/media/pci/cx23885/cx23885.h
327
void *vaddr;
drivers/media/pci/cx25821/cx25821-alsa.c
140
buf->vaddr = vmalloc_32(nr_pages << PAGE_SHIFT);
drivers/media/pci/cx25821/cx25821-alsa.c
141
if (NULL == buf->vaddr) {
drivers/media/pci/cx25821/cx25821-alsa.c
147
buf->vaddr,
drivers/media/pci/cx25821/cx25821-alsa.c
150
memset(buf->vaddr, 0, nr_pages << PAGE_SHIFT);
drivers/media/pci/cx25821/cx25821-alsa.c
159
pg = vmalloc_to_page(buf->vaddr + i * PAGE_SIZE);
drivers/media/pci/cx25821/cx25821-alsa.c
170
vfree(buf->vaddr);
drivers/media/pci/cx25821/cx25821-alsa.c
171
buf->vaddr = NULL;
drivers/media/pci/cx25821/cx25821-alsa.c
205
vfree(buf->vaddr);
drivers/media/pci/cx25821/cx25821-alsa.c
206
buf->vaddr = NULL;
drivers/media/pci/cx25821/cx25821-alsa.c
54
void *vaddr;
drivers/media/pci/cx25821/cx25821-alsa.c
547
substream->runtime->dma_area = chip->buf->vaddr;
drivers/media/pci/cx88/cx88-alsa.c
280
buf->vaddr = vmalloc_32(nr_pages << PAGE_SHIFT);
drivers/media/pci/cx88/cx88-alsa.c
281
if (!buf->vaddr) {
drivers/media/pci/cx88/cx88-alsa.c
287
buf->vaddr, nr_pages << PAGE_SHIFT);
drivers/media/pci/cx88/cx88-alsa.c
289
memset(buf->vaddr, 0, nr_pages << PAGE_SHIFT);
drivers/media/pci/cx88/cx88-alsa.c
298
pg = vmalloc_to_page(buf->vaddr + i * PAGE_SIZE);
drivers/media/pci/cx88/cx88-alsa.c
309
vfree(buf->vaddr);
drivers/media/pci/cx88/cx88-alsa.c
310
buf->vaddr = NULL;
drivers/media/pci/cx88/cx88-alsa.c
345
vfree(buf->vaddr);
drivers/media/pci/cx88/cx88-alsa.c
346
buf->vaddr = NULL;
drivers/media/pci/cx88/cx88-alsa.c
47
void *vaddr;
drivers/media/pci/cx88/cx88-alsa.c
495
substream->runtime->dma_area = chip->buf->vaddr;
drivers/media/pci/intel/ipu6/ipu6-dma.c
117
void *vaddr;
drivers/media/pci/intel/ipu6/ipu6-dma.c
130
vaddr = info->vaddr + offset;
drivers/media/pci/intel/ipu6/ipu6-dma.c
131
clflush_cache_range(vaddr, size);
drivers/media/pci/intel/ipu6/ipu6-dma.c
209
info->vaddr = vmap(pages, count, VM_USERMAP, PAGE_KERNEL);
drivers/media/pci/intel/ipu6/ipu6-dma.c
210
if (!info->vaddr)
drivers/media/pci/intel/ipu6/ipu6-dma.c
220
return info->vaddr;
drivers/media/pci/intel/ipu6/ipu6-dma.c
244
void ipu6_dma_free(struct ipu6_bus_device *sys, size_t size, void *vaddr,
drivers/media/pci/intel/ipu6/ipu6-dma.c
25
void *vaddr;
drivers/media/pci/intel/ipu6/ipu6-dma.c
262
if (WARN_ON(!info->vaddr))
drivers/media/pci/intel/ipu6/ipu6-dma.c
274
vunmap(vaddr);
drivers/media/pci/intel/ipu6/ipu6-dma.c
311
if (!info->vaddr)
drivers/media/pci/intel/ipu6/ipu6-dma.h
28
void ipu6_dma_free(struct ipu6_bus_device *sys, size_t size, void *vaddr,
drivers/media/pci/saa7134/saa7134-alsa.c
262
dma->vaddr = vmalloc_32(nr_pages << PAGE_SHIFT);
drivers/media/pci/saa7134/saa7134-alsa.c
263
if (NULL == dma->vaddr) {
drivers/media/pci/saa7134/saa7134-alsa.c
269
dma->vaddr, nr_pages << PAGE_SHIFT);
drivers/media/pci/saa7134/saa7134-alsa.c
271
memset(dma->vaddr, 0, nr_pages << PAGE_SHIFT);
drivers/media/pci/saa7134/saa7134-alsa.c
280
pg = vmalloc_to_page(dma->vaddr + i * PAGE_SIZE);
drivers/media/pci/saa7134/saa7134-alsa.c
291
vfree(dma->vaddr);
drivers/media/pci/saa7134/saa7134-alsa.c
292
dma->vaddr = NULL;
drivers/media/pci/saa7134/saa7134-alsa.c
326
vfree(dma->vaddr);
drivers/media/pci/saa7134/saa7134-alsa.c
327
dma->vaddr = NULL;
drivers/media/pci/saa7134/saa7134-alsa.c
728
substream->runtime->dma_area = dev->dmasound.vaddr;
drivers/media/pci/saa7134/saa7134.h
509
void *vaddr;
drivers/media/platform/allegro-dvt/allegro-core.c
118
void *vaddr;
drivers/media/platform/allegro-dvt/allegro-core.c
1374
size = allegro_encode_config_blob(blob->vaddr, &param);
drivers/media/platform/allegro-dvt/allegro-core.c
1383
msg.blob = blob->vaddr;
drivers/media/platform/allegro-dvt/allegro-core.c
2237
err = allegro_decode_config_blob(&param, msg, channel->config_blob.vaddr);
drivers/media/platform/allegro-dvt/allegro-core.c
2425
memcpy(dev->firmware.vaddr, buf, size);
drivers/media/platform/allegro-dvt/allegro-core.c
772
buffer->vaddr = dma_alloc_coherent(&dev->plat_dev->dev, size,
drivers/media/platform/allegro-dvt/allegro-core.c
774
if (!buffer->vaddr)
drivers/media/platform/allegro-dvt/allegro-core.c
784
if (buffer->vaddr) {
drivers/media/platform/allegro-dvt/allegro-core.c
786
buffer->vaddr, buffer->paddr);
drivers/media/platform/allegro-dvt/allegro-core.c
787
buffer->vaddr = NULL;
drivers/media/platform/amlogic/c3/isp/c3-isp-capture.c
229
dummy_buff->vaddr = dma_alloc_attrs(cap->isp->dev, dummy_buff->size,
drivers/media/platform/amlogic/c3/isp/c3-isp-capture.c
232
if (!dummy_buff->vaddr)
drivers/media/platform/amlogic/c3/isp/c3-isp-capture.c
241
cap->dummy_buff.vaddr, cap->dummy_buff.dma_addr,
drivers/media/platform/amlogic/c3/isp/c3-isp-common.h
141
void *vaddr;
drivers/media/platform/arm/mali-c55/mali-c55-resizer.c
361
unsigned int vaddr = rsz->id == MALI_C55_RSZ_FR ?
drivers/media/platform/arm/mali-c55/mali-c55-resizer.c
369
mali_c55_write(mali_c55, vaddr,
drivers/media/platform/arm/mali-c55/mali-c55-resizer.c
373
vaddr += sizeof(u32);
drivers/media/platform/chips-media/coda/coda-bit.c
1731
ctx->bitstream.vaddr, ctx->bitstream.size);
drivers/media/platform/chips-media/coda/coda-bit.c
1768
if (ctx->bitstream.vaddr)
drivers/media/platform/chips-media/coda/coda-bit.c
1772
ctx->bitstream.vaddr = dma_alloc_wc(ctx->dev->dev, ctx->bitstream.size,
drivers/media/platform/chips-media/coda/coda-bit.c
1774
if (!ctx->bitstream.vaddr) {
drivers/media/platform/chips-media/coda/coda-bit.c
1780
ctx->bitstream.vaddr, ctx->bitstream.size);
drivers/media/platform/chips-media/coda/coda-bit.c
1787
if (ctx->bitstream.vaddr == NULL)
drivers/media/platform/chips-media/coda/coda-bit.c
1790
dma_free_wc(ctx->dev->dev, ctx->bitstream.size, ctx->bitstream.vaddr,
drivers/media/platform/chips-media/coda/coda-bit.c
1792
ctx->bitstream.vaddr = NULL;
drivers/media/platform/chips-media/coda/coda-bit.c
216
kfifo_init(&ctx->bitstream_fifo, ctx->bitstream.vaddr,
drivers/media/platform/chips-media/coda/coda-bit.c
2331
ctx->bitstream.vaddr, ctx->bitstream.size);
drivers/media/platform/chips-media/coda/coda-bit.c
234
u8 *vaddr = vb2_plane_vaddr(&src_buf->vb2_buf, 0);
drivers/media/platform/chips-media/coda/coda-bit.c
239
size = coda_mpeg2_parse_headers(ctx, vaddr, payload);
drivers/media/platform/chips-media/coda/coda-bit.c
242
size = coda_mpeg4_parse_headers(ctx, vaddr, payload);
drivers/media/platform/chips-media/coda/coda-bit.c
255
u8 *vaddr = vb2_plane_vaddr(&src_buf->vb2_buf, 0);
drivers/media/platform/chips-media/coda/coda-bit.c
263
if (!vaddr) {
drivers/media/platform/chips-media/coda/coda-bit.c
283
ret = coda_bitstream_queue(ctx, vaddr,
drivers/media/platform/chips-media/coda/coda-bit.c
303
ret = coda_bitstream_queue(ctx, vaddr, payload);
drivers/media/platform/chips-media/coda/coda-bit.c
453
u32 *p = ctx->parabuf.vaddr;
drivers/media/platform/chips-media/coda/coda-bit.c
576
if (!ctx->parabuf.vaddr) {
drivers/media/platform/chips-media/coda/coda-bit.c
586
if (!ctx->slicebuf.vaddr && q_data->fourcc == V4L2_PIX_FMT_H264) {
drivers/media/platform/chips-media/coda/coda-bit.c
596
if (!ctx->psbuf.vaddr && (dev->devtype->product == CODA_HX4 ||
drivers/media/platform/chips-media/coda/coda-bit.c
604
if (!ctx->workbuf.vaddr) {
drivers/media/platform/chips-media/coda/coda-bit.c
806
if (!dev->iram.vaddr)
drivers/media/platform/chips-media/coda/coda-common.c
1935
buf->vaddr = dma_alloc_coherent(dev->dev, size, &buf->paddr,
drivers/media/platform/chips-media/coda/coda-common.c
1937
if (!buf->vaddr) {
drivers/media/platform/chips-media/coda/coda-common.c
1947
buf->blob.data = buf->vaddr;
drivers/media/platform/chips-media/coda/coda-common.c
1959
if (buf->vaddr) {
drivers/media/platform/chips-media/coda/coda-common.c
1960
dma_free_coherent(dev->dev, buf->size, buf->vaddr, buf->paddr);
drivers/media/platform/chips-media/coda/coda-common.c
1961
buf->vaddr = NULL;
drivers/media/platform/chips-media/coda/coda-common.c
2159
ctx->bitstream.vaddr, ctx->bitstream.size);
drivers/media/platform/chips-media/coda/coda-common.c
2804
p = (u16 *)dev->codebuf.vaddr;
drivers/media/platform/chips-media/coda/coda-common.c
2943
u32 *dst = dev->codebuf.vaddr;
drivers/media/platform/chips-media/coda/coda-common.c
2958
memcpy(dev->codebuf.vaddr, src, size);
drivers/media/platform/chips-media/coda/coda-common.c
3256
dev->iram.vaddr = gen_pool_dma_alloc(dev->iram_pool, dev->iram.size,
drivers/media/platform/chips-media/coda/coda-common.c
3258
if (!dev->iram.vaddr) {
drivers/media/platform/chips-media/coda/coda-common.c
3261
memset(dev->iram.vaddr, 0, dev->iram.size);
drivers/media/platform/chips-media/coda/coda-common.c
3262
dev->iram.blob.data = dev->iram.vaddr;
drivers/media/platform/chips-media/coda/coda-common.c
3315
if (dev->iram.vaddr)
drivers/media/platform/chips-media/coda/coda-common.c
3316
gen_pool_free(dev->iram_pool, (unsigned long)dev->iram.vaddr,
drivers/media/platform/chips-media/coda/coda-common.c
3331
if (dev->pm_domain && cdev->codebuf.vaddr) {
drivers/media/platform/chips-media/coda/coda-jpeg.c
232
coda_memcpy_parabuf(ctx->parabuf.vaddr, huff + i);
drivers/media/platform/chips-media/coda/coda-jpeg.c
236
coda_memcpy_parabuf(ctx->parabuf.vaddr, qmat + i);
drivers/media/platform/chips-media/coda/coda-jpeg.c
243
void *vaddr = vb2_plane_vaddr(vb, 0);
drivers/media/platform/chips-media/coda/coda-jpeg.c
247
soi = be16_to_cpup((__be16 *)vaddr);
drivers/media/platform/chips-media/coda/coda-jpeg.c
252
vaddr += len - 2;
drivers/media/platform/chips-media/coda/coda-jpeg.c
254
eoi = be16_to_cpup((__be16 *)(vaddr - i));
drivers/media/platform/chips-media/coda/coda.h
71
void *vaddr;
drivers/media/platform/chips-media/wave5/wave5-vdi.c
102
if (!vb || !vb->vaddr) {
drivers/media/platform/chips-media/wave5/wave5-vdi.c
112
memcpy(vb->vaddr + offset, data, len);
drivers/media/platform/chips-media/wave5/wave5-vdi.c
119
void *vaddr;
drivers/media/platform/chips-media/wave5/wave5-vdi.c
127
vaddr = dma_alloc_coherent(vpu_dev->dev, vb->size, &daddr, GFP_KERNEL);
drivers/media/platform/chips-media/wave5/wave5-vdi.c
128
if (!vaddr)
drivers/media/platform/chips-media/wave5/wave5-vdi.c
130
vb->vaddr = vaddr;
drivers/media/platform/chips-media/wave5/wave5-vdi.c
141
if (!vb->vaddr)
drivers/media/platform/chips-media/wave5/wave5-vdi.c
144
dma_free_coherent(vpu_dev->dev, vb->size, vb->vaddr, vb->daddr);
drivers/media/platform/chips-media/wave5/wave5-vdi.c
18
if (!vpu_dev->common_mem.vaddr) {
drivers/media/platform/chips-media/wave5/wave5-vdi.c
182
void *vaddr;
drivers/media/platform/chips-media/wave5/wave5-vdi.c
185
if (!vpu_dev->sram_pool || vb->vaddr)
drivers/media/platform/chips-media/wave5/wave5-vdi.c
189
vaddr = gen_pool_dma_alloc(vpu_dev->sram_pool, size, &daddr);
drivers/media/platform/chips-media/wave5/wave5-vdi.c
190
if (vaddr) {
drivers/media/platform/chips-media/wave5/wave5-vdi.c
191
vb->vaddr = vaddr;
drivers/media/platform/chips-media/wave5/wave5-vdi.c
197
__func__, &vb->daddr, vb->size, vb->vaddr);
drivers/media/platform/chips-media/wave5/wave5-vdi.c
204
if (!vb->size || !vb->vaddr)
drivers/media/platform/chips-media/wave5/wave5-vdi.c
207
gen_pool_free(vpu_dev->sram_pool, (unsigned long)vb->vaddr, vb->size);
drivers/media/platform/chips-media/wave5/wave5-vdi.c
34
&vpu_dev->common_mem.daddr, vpu_dev->common_mem.size, vpu_dev->common_mem.vaddr);
drivers/media/platform/chips-media/wave5/wave5-vdi.c
90
if (!vb || !vb->vaddr) {
drivers/media/platform/chips-media/wave5/wave5-vdi.c
95
memset(vb->vaddr, 0, vb->size);
drivers/media/platform/chips-media/wave5/wave5-vdi.h
29
void *vaddr;
drivers/media/platform/nuvoton/npcm-video.c
1018
unsigned int dma_addr, void *vaddr)
drivers/media/platform/nuvoton/npcm-video.c
1040
len = npcm_video_ece_get_ed_size(video, offset, vaddr);
drivers/media/platform/nuvoton/npcm-video.c
1041
npcm_video_ece_prepend_rect_header(vaddr + offset,
drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
628
void *vaddr;
drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
639
vaddr = mxc_jpeg_get_plane_vaddr(buf, plane_no);
drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
642
plane_no, vaddr, dma_addr, payload);
drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
644
vaddr, len, false);
drivers/media/platform/rockchip/rkcif/rkcif-common.h
96
void *vaddr;
drivers/media/platform/rockchip/rkcif/rkcif-stream.c
123
stream->dummy.vaddr =
drivers/media/platform/rockchip/rkcif/rkcif-stream.c
127
if (!stream->dummy.vaddr)
drivers/media/platform/rockchip/rkcif/rkcif-stream.c
166
if (stream->dummy.vaddr) {
drivers/media/platform/rockchip/rkcif/rkcif-stream.c
168
stream->dummy.vaddr,
drivers/media/platform/rockchip/rkcif/rkcif-stream.c
171
stream->dummy.vaddr = NULL;
drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c
715
dummy_buf->vaddr = dma_alloc_attrs(cap->rkisp1->dev,
drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c
720
if (!dummy_buf->vaddr)
drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c
729
cap->buf.dummy.size, cap->buf.dummy.vaddr,
drivers/media/platform/rockchip/rkisp1/rkisp1-common.h
291
void *vaddr;
drivers/media/platform/samsung/exynos4-is/fimc-capture.c
224
void *vaddr;
drivers/media/platform/samsung/exynos4-is/fimc-capture.c
229
vaddr = vb2_plane_vaddr(&v_buf->vb.vb2_buf, plane);
drivers/media/platform/samsung/exynos4-is/fimc-capture.c
231
vaddr, &size);
drivers/media/platform/samsung/exynos4-is/fimc-is.c
242
buf = is->memory.vaddr + is->setfile.base;
drivers/media/platform/samsung/exynos4-is/fimc-is.c
247
pr_debug("mem vaddr: %p, setfile buf: %p\n", is->memory.vaddr, buf);
drivers/media/platform/samsung/exynos4-is/fimc-is.c
318
memcpy(is->memory.vaddr, is->fw.f_w->data, is->fw.f_w->size);
drivers/media/platform/samsung/exynos4-is/fimc-is.c
338
is->memory.vaddr = dma_alloc_coherent(dev, FIMC_IS_CPU_MEM_SIZE,
drivers/media/platform/samsung/exynos4-is/fimc-is.c
340
if (is->memory.vaddr == NULL)
drivers/media/platform/samsung/exynos4-is/fimc-is.c
350
dma_free_coherent(dev, is->memory.size, is->memory.vaddr,
drivers/media/platform/samsung/exynos4-is/fimc-is.c
355
is->is_p_region = (struct is_region *)(is->memory.vaddr +
drivers/media/platform/samsung/exynos4-is/fimc-is.c
361
is->is_shared_region = (struct is_share_region *)(is->memory.vaddr +
drivers/media/platform/samsung/exynos4-is/fimc-is.c
370
if (is->memory.vaddr == NULL)
drivers/media/platform/samsung/exynos4-is/fimc-is.c
373
dma_free_coherent(dev, is->memory.size, is->memory.vaddr,
drivers/media/platform/samsung/exynos4-is/fimc-is.c
403
memcpy(is->memory.vaddr, fw->data, fw->size);
drivers/media/platform/samsung/exynos4-is/fimc-is.c
407
buf = (void *)(is->memory.vaddr + fw->size - FIMC_IS_FW_DESC_LEN);
drivers/media/platform/samsung/exynos4-is/fimc-is.c
411
buf = (void *)(is->memory.vaddr + fw->size - FIMC_IS_FW_VER_LEN);
drivers/media/platform/samsung/exynos4-is/fimc-is.c
740
const u8 *buf = is->memory.vaddr + FIMC_IS_DEBUG_REGION_OFFSET;
drivers/media/platform/samsung/exynos4-is/fimc-is.c
742
if (is->memory.vaddr == NULL) {
drivers/media/platform/samsung/exynos4-is/fimc-is.h
177
void *vaddr;
drivers/media/platform/samsung/exynos4-is/fimc-is.h
190
void *vaddr;
drivers/media/platform/st/sti/delta/delta-ipc.c
181
memcpy(ctx->ipc_buf->vaddr, param->data, msg.param_size);
drivers/media/platform/st/sti/delta/delta-ipc.c
272
ctx->ipc_buf->vaddr,
drivers/media/platform/st/sti/delta/delta-ipc.c
273
ctx->ipc_buf->vaddr + ctx->ipc_buf->size - 1);
drivers/media/platform/st/sti/delta/delta-ipc.c
372
ctx->ipc_buf->vaddr,
drivers/media/platform/st/sti/delta/delta-ipc.c
373
ctx->ipc_buf->vaddr + ctx->ipc_buf->size - 1);
drivers/media/platform/st/sti/delta/delta-ipc.c
383
ctx->ipc_buf->vaddr,
drivers/media/platform/st/sti/delta/delta-ipc.c
384
ctx->ipc_buf->vaddr + ctx->ipc_buf->size - 1);
drivers/media/platform/st/sti/delta/delta-ipc.c
41
static inline dma_addr_t to_paddr(struct delta_ipc_ctx *ctx, void *vaddr)
drivers/media/platform/st/sti/delta/delta-ipc.c
43
return (ctx->ipc_buf->paddr + (vaddr - ctx->ipc_buf->vaddr));
drivers/media/platform/st/sti/delta/delta-ipc.c
49
return ((data >= ctx->ipc_buf->vaddr) &&
drivers/media/platform/st/sti/delta/delta-ipc.c
50
((data + size) <= (ctx->ipc_buf->vaddr + ctx->ipc_buf->size)));
drivers/media/platform/st/sti/delta/delta-mem.c
30
buf->vaddr = addr;
drivers/media/platform/st/sti/delta/delta-mem.c
36
ctx->name, size, buf->vaddr, &buf->paddr, buf->name);
drivers/media/platform/st/sti/delta/delta-mem.c
47
ctx->name, buf->size, buf->vaddr, &buf->paddr, buf->name);
drivers/media/platform/st/sti/delta/delta-mem.c
50
buf->vaddr, buf->paddr, buf->attrs);
drivers/media/platform/st/sti/delta/delta-mjpeg-dec.c
247
struct jpeg_decode_params_t *params = ctx->ipc_buf->vaddr;
drivers/media/platform/st/sti/delta/delta-mjpeg-dec.c
249
ctx->ipc_buf->vaddr + sizeof(*params);
drivers/media/platform/st/sti/delta/delta-mjpeg-dec.c
377
void *au_vaddr = pau->vaddr;
drivers/media/platform/st/sti/delta/delta-v4l2.c
1128
au->vaddr = vb2_plane_vaddr(&au->vbuf.vb2_buf, 0);
drivers/media/platform/st/sti/delta/delta-v4l2.c
1133
ctx->name, vb->index, au->vaddr, &au->paddr);
drivers/media/platform/st/sti/delta/delta-v4l2.c
1475
frame->vaddr = vb2_plane_vaddr(&vbuf->vb2_buf, 0);
drivers/media/platform/st/sti/delta/delta-v4l2.c
1489
ctx->name, vb->index, frame->vaddr,
drivers/media/platform/st/sti/delta/delta-v4l2.c
69
u8 *data = (u8 *)(au->vaddr);
drivers/media/platform/st/sti/delta/delta-v4l2.c
85
u8 *data = (u8 *)(frame->vaddr);
drivers/media/platform/st/sti/delta/delta.h
105
void *vaddr;
drivers/media/platform/st/sti/delta/delta.h
175
void *vaddr;
drivers/media/platform/st/sti/delta/delta.h
205
void *vaddr;
drivers/media/platform/st/sti/hva/hva-h264.c
1011
struct hva_h264_task *task = ctx->task->vaddr;
drivers/media/platform/st/sti/hva/hva-h264.c
1030
(u8 *)stream->vaddr,
drivers/media/platform/st/sti/hva/hva-h264.c
825
slice_header_vaddr = seq_info->vaddr + (td->addr_slice_header -
drivers/media/platform/st/sti/hva/hva-h264.c
856
(u8 *)stream->vaddr,
drivers/media/platform/st/sti/hva/hva-mem.c
37
b->vaddr = base;
drivers/media/platform/st/sti/hva/hva-mem.c
42
ctx->name, size, b->vaddr, &b->paddr, b->name);
drivers/media/platform/st/sti/hva/hva-mem.c
56
ctx->name, buf->size, buf->vaddr, &buf->paddr, buf->name);
drivers/media/platform/st/sti/hva/hva-mem.c
58
dma_free_attrs(dev, buf->size, buf->vaddr, buf->paddr,
drivers/media/platform/st/sti/hva/hva-mem.h
22
void *vaddr;
drivers/media/platform/st/sti/hva/hva-v4l2.c
953
frame->vaddr = vb2_plane_vaddr(&vbuf->vb2_buf, 0);
drivers/media/platform/st/sti/hva/hva-v4l2.c
962
frame->vaddr, &frame->paddr);
drivers/media/platform/st/sti/hva/hva-v4l2.c
969
stream->vaddr = vb2_plane_vaddr(&vbuf->vb2_buf, 0);
drivers/media/platform/st/sti/hva/hva-v4l2.c
978
stream->vaddr, &stream->paddr);
drivers/media/platform/st/sti/hva/hva.h
118
void *vaddr;
drivers/media/platform/st/sti/hva/hva.h
143
void *vaddr;
drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.h
124
void *vaddr;
drivers/media/platform/sunxi/sun4i-csi/sun4i_dma.c
259
csi->scratch.vaddr = dma_alloc_coherent(csi->dev,
drivers/media/platform/sunxi/sun4i-csi/sun4i_dma.c
263
if (!csi->scratch.vaddr) {
drivers/media/platform/sunxi/sun4i-csi/sun4i_dma.c
336
dma_free_coherent(csi->dev, csi->scratch.size, csi->scratch.vaddr,
drivers/media/platform/sunxi/sun4i-csi/sun4i_dma.c
364
dma_free_coherent(csi->dev, csi->scratch.size, csi->scratch.vaddr,
drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c
1025
csi->dma.drain.vaddr = dma_alloc_coherent(csi->dev, csi->dma.drain.len,
drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c
1028
if (!csi->dma.drain.vaddr)
drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c
1098
csi->dma.drain.vaddr, csi->dma.drain.paddr);
drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c
1099
csi->dma.drain.vaddr = NULL;
drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c
95
void *vaddr;
drivers/media/test-drivers/visl/visl-debugfs.c
42
u8 *vaddr = vb2_plane_vaddr(&run->src->vb2_buf, 0);
drivers/media/test-drivers/visl/visl-debugfs.c
59
memcpy(blob->blob.data, vaddr, data_sz);
drivers/misc/dw-xdata-pcie.c
323
dw->rg_region.vaddr = pcim_iomap_table(pdev)[BAR_0];
drivers/misc/dw-xdata-pcie.c
324
if (!dw->rg_region.vaddr)
drivers/misc/dw-xdata-pcie.c
62
void __iomem *vaddr; /* virtual address */
drivers/misc/dw-xdata-pcie.c
76
return dw->rg_region.vaddr;
drivers/misc/fastrpc.c
135
u64 vaddr;
drivers/misc/fastrpc.c
141
u64 vaddr;
drivers/misc/fastrpc.c
157
u64 vaddr;
drivers/misc/fastrpc.c
1856
req_msg.vaddr = buf->raddr;
drivers/misc/fastrpc.c
1942
req_msg.vaddr = req.vaddrin;
drivers/misc/fastrpc.c
1967
buf->raddr = (uintptr_t) rsp_msg.vaddr;
drivers/misc/fastrpc.c
1970
req.vaddrout = rsp_msg.vaddr;
drivers/misc/fastrpc.c
2017
if ((req->fd < 0 || iter->fd == req->fd) && (iter->raddr == req->vaddr)) {
drivers/misc/fastrpc.c
2116
map->raddr = rsp_msg.vaddr;
drivers/misc/fastrpc.c
2119
req.vaddrout = rsp_msg.vaddr;
drivers/misc/fastrpc.c
2123
req_unmap.vaddr = (uintptr_t) rsp_msg.vaddr;
drivers/misc/genwqe/card_base.h
536
void *vaddr, dma_addr_t dma_handle);
drivers/misc/genwqe/card_utils.c
221
void *vaddr, dma_addr_t dma_handle)
drivers/misc/genwqe/card_utils.c
223
if (vaddr == NULL)
drivers/misc/genwqe/card_utils.c
226
dma_free_coherent(&cd->pci_dev->dev, size, vaddr, dma_handle);
drivers/misc/ibmvmc.c
306
static void free_dma_buffer(struct vio_dev *vdev, size_t size, void *vaddr,
drivers/misc/ibmvmc.c
313
kfree_sensitive(vaddr);
drivers/misc/kgdbts.c
232
static void break_helper(char *bp_type, char *arg, unsigned long vaddr)
drivers/misc/kgdbts.c
239
addr = vaddr;
drivers/misc/mei/bus.c
762
return cl->dma.vaddr;
drivers/misc/mei/client.c
2246
cl->dma.vaddr = dmam_alloc_coherent(&cl->dev->dev, size,
drivers/misc/mei/client.c
2248
if (!cl->dma.vaddr)
drivers/misc/mei/client.c
2261
cl->dma.size, cl->dma.vaddr, cl->dma.daddr);
drivers/misc/mei/client.c
2263
cl->dma.vaddr = NULL;
drivers/misc/mei/dma-ring.c
104
return (struct hbm_dma_ring_ctrl *)dev->dr_dscr[DMA_DSCR_CTRL].vaddr;
drivers/misc/mei/dma-ring.c
133
unsigned char *dbuf = dev->dr_dscr[DMA_DSCR_DEVICE].vaddr;
drivers/misc/mei/dma-ring.c
155
unsigned char *hbuf = dev->dr_dscr[DMA_DSCR_HOST].vaddr;
drivers/misc/mei/dma-ring.c
30
if (dscr->vaddr)
drivers/misc/mei/dma-ring.c
33
dscr->vaddr = dmam_alloc_coherent(dev->parent, dscr->size, &dscr->daddr,
drivers/misc/mei/dma-ring.c
35
if (!dscr->vaddr)
drivers/misc/mei/dma-ring.c
50
if (!dscr->vaddr)
drivers/misc/mei/dma-ring.c
53
dmam_free_coherent(dev->parent, dscr->size, dscr->vaddr, dscr->daddr);
drivers/misc/mei/dma-ring.c
54
dscr->vaddr = NULL;
drivers/misc/mei/dma-ring.c
98
return !!dev->dr_dscr[DMA_DSCR_HOST].vaddr;
drivers/misc/mei/mei_dev.h
160
void *vaddr;
drivers/misc/mei/mei_dev.h
173
void *vaddr;
drivers/misc/sgi-gru/gru_instructions.h
722
static inline int gru_get_tri(void *vaddr)
drivers/misc/sgi-gru/gru_instructions.h
724
return ((unsigned long)vaddr & (GRU_GSEG_PAGESIZE - 1)) - GRU_DS_BASE;
drivers/misc/sgi-gru/grufault.c
178
unsigned long vaddr, int write,
drivers/misc/sgi-gru/grufault.c
188
if (get_user_pages(vaddr, 1, write ? FOLL_WRITE : 0, &page) <= 0)
drivers/misc/sgi-gru/grufault.c
205
static int atomic_pte_lookup(struct vm_area_struct *vma, unsigned long vaddr,
drivers/misc/sgi-gru/grufault.c
214
pgdp = pgd_offset(vma->vm_mm, vaddr);
drivers/misc/sgi-gru/grufault.c
218
p4dp = p4d_offset(pgdp, vaddr);
drivers/misc/sgi-gru/grufault.c
222
pudp = pud_offset(p4dp, vaddr);
drivers/misc/sgi-gru/grufault.c
226
pmdp = pmd_offset(pudp, vaddr);
drivers/misc/sgi-gru/grufault.c
234
pte = *pte_offset_kernel(pmdp, vaddr);
drivers/misc/sgi-gru/grufault.c
252
static int gru_vtop(struct gru_thread_state *gts, unsigned long vaddr,
drivers/misc/sgi-gru/grufault.c
260
vma = find_vma(mm, vaddr);
drivers/misc/sgi-gru/grufault.c
269
ret = atomic_pte_lookup(vma, vaddr, write, &paddr, &ps);
drivers/misc/sgi-gru/grufault.c
273
if (non_atomic_pte_lookup(vma, vaddr, write, &paddr, &ps))
drivers/misc/sgi-gru/grufault.c
316
unsigned long vaddr = 0, gpa;
drivers/misc/sgi-gru/grufault.c
323
vaddr = fault_vaddr + GRU_CACHE_LINE_BYTES * cbe->cbe_src_cl - 1;
drivers/misc/sgi-gru/grufault.c
325
vaddr = fault_vaddr + (1 << cbe->xtypecpy) * cbe->cbe_nelemcur - 1;
drivers/misc/sgi-gru/grufault.c
328
vaddr &= PAGE_MASK;
drivers/misc/sgi-gru/grufault.c
329
vaddr = min(vaddr, fault_vaddr + tlb_preload_count * PAGE_SIZE);
drivers/misc/sgi-gru/grufault.c
331
while (vaddr > fault_vaddr) {
drivers/misc/sgi-gru/grufault.c
332
ret = gru_vtop(gts, vaddr, write, atomic, &gpa, &pageshift);
drivers/misc/sgi-gru/grufault.c
333
if (ret || tfh_write_only(tfh, gpa, GAA_RAM, vaddr, asid, write,
drivers/misc/sgi-gru/grufault.c
339
vaddr, asid, write, pageshift, gpa);
drivers/misc/sgi-gru/grufault.c
340
vaddr -= PAGE_SIZE;
drivers/misc/sgi-gru/grufault.c
363
unsigned long gpa = 0, vaddr = 0;
drivers/misc/sgi-gru/grufault.c
398
vaddr = tfh->missvaddr;
drivers/misc/sgi-gru/grufault.c
413
ret = gru_vtop(gts, vaddr, write, atomic, &gpa, &pageshift);
drivers/misc/sgi-gru/grufault.c
428
gru_preload_tlb(gru, gts, atomic, vaddr, asid, write, tlb_preload_count, tfh, cbe);
drivers/misc/sgi-gru/grufault.c
434
tfh_write_restart(tfh, gpa, GAA_RAM, vaddr, asid, write,
drivers/misc/sgi-gru/grufault.c
439
atomic ? "atomic" : "non-atomic", gru->gs_gid, gts, tfh, vaddr, asid,
drivers/misc/sgi-gru/grufault.c
447
gru_dbg(grudev, "FAILED no_asid tfh: 0x%p, vaddr 0x%lx\n", tfh, vaddr);
drivers/misc/sgi-gru/grufault.c
460
gru_dbg(grudev, "FAILED upm tfh: 0x%p, vaddr 0x%lx\n", tfh, vaddr);
drivers/misc/sgi-gru/grufault.c
48
struct vm_area_struct *gru_find_vma(unsigned long vaddr)
drivers/misc/sgi-gru/grufault.c
497
gru_dbg(grudev, "FAILED inval tfh: 0x%p, vaddr 0x%lx\n", tfh, vaddr);
drivers/misc/sgi-gru/grufault.c
509
tfh, vaddr);
drivers/misc/sgi-gru/grufault.c
52
vma = vma_lookup(current->mm, vaddr);
drivers/misc/sgi-gru/grufault.c
66
static struct gru_thread_state *gru_find_lock_gts(unsigned long vaddr)
drivers/misc/sgi-gru/grufault.c
73
vma = gru_find_vma(vaddr);
drivers/misc/sgi-gru/grufault.c
75
gts = gru_find_thread_state(vma, TSID(vaddr, vma));
drivers/misc/sgi-gru/grufault.c
807
req.vaddr, req.len);
drivers/misc/sgi-gru/grufault.c
815
gru_flush_tlb_range(gms, req.vaddr, req.len);
drivers/misc/sgi-gru/grufault.c
83
static struct gru_thread_state *gru_alloc_locked_gts(unsigned long vaddr)
drivers/misc/sgi-gru/grufault.c
90
vma = gru_find_vma(vaddr);
drivers/misc/sgi-gru/grufault.c
94
gts = gru_alloc_thread_state(vma, TSID(vaddr, vma));
drivers/misc/sgi-gru/grufile.c
231
void *vaddr, int blade_id, int chiplet_id)
drivers/misc/sgi-gru/grufile.c
236
gru->gs_gru_base_vaddr = vaddr;
drivers/misc/sgi-gru/grufile.c
260
void *vaddr;
drivers/misc/sgi-gru/grufile.c
282
vaddr = gru_chiplet_vaddr(gru_base_vaddr, pnode, chip);
drivers/misc/sgi-gru/grufile.c
283
gru_init_chiplet(gru, paddr, vaddr, bid, chip);
drivers/misc/sgi-gru/gruhandles.c
134
unsigned long vaddr, unsigned long vaddrmask,
drivers/misc/sgi-gru/gruhandles.c
138
tgh->vaddr = vaddr;
drivers/misc/sgi-gru/gruhandles.c
152
unsigned long vaddr, int asid, int dirty,
drivers/misc/sgi-gru/gruhandles.c
156
tfh->fillvaddr = vaddr;
drivers/misc/sgi-gru/gruhandles.c
168
unsigned long vaddr, int asid, int dirty,
drivers/misc/sgi-gru/gruhandles.c
172
tfh->fillvaddr = vaddr;
drivers/misc/sgi-gru/gruhandles.h
149
static inline void *gru_chiplet_vaddr(void *vaddr, int pnode, int chiplet)
drivers/misc/sgi-gru/gruhandles.h
151
return vaddr + GRU_SIZE * (2 * pnode + chiplet);
drivers/misc/sgi-gru/gruhandles.h
199
unsigned long vaddr:64; /* DW 1 */
drivers/misc/sgi-gru/gruhandles.h
507
int tgh_invalidate(struct gru_tlb_global_handle *tgh, unsigned long vaddr,
drivers/misc/sgi-gru/gruhandles.h
511
int gaa, unsigned long vaddr, int asid, int dirty, int pagesize);
drivers/misc/sgi-gru/gruhandles.h
513
int gaa, unsigned long vaddr, int asid, int dirty, int pagesize);
drivers/misc/sgi-gru/grukdump.c
143
hdr.vaddr = gts->ts_vma->vm_start;
drivers/misc/sgi-gru/grukservices.c
144
void *vaddr;
drivers/misc/sgi-gru/grukservices.c
175
vaddr = gru->gs_gru_base_vaddr;
drivers/misc/sgi-gru/grukservices.c
177
bs->kernel_cb = get_gseg_base_address_cb(vaddr, ctxnum, 0);
drivers/misc/sgi-gru/grukservices.c
178
bs->kernel_dsr = get_gseg_base_address_ds(vaddr, ctxnum, 0);
drivers/misc/sgi-gru/grulib.h
106
unsigned long vaddr;
drivers/misc/sgi-gru/grulib.h
137
unsigned long vaddr;
drivers/misc/sgi-gru/grumain.c
925
unsigned long paddr, vaddr;
drivers/misc/sgi-gru/grumain.c
928
vaddr = vmf->address;
drivers/misc/sgi-gru/grumain.c
930
vma, vaddr, GSEG_BASE(vaddr));
drivers/misc/sgi-gru/grumain.c
934
gts = gru_find_thread_state(vma, TSID(vaddr, vma));
drivers/misc/sgi-gru/grumain.c
960
remap_pfn_range(vma, vaddr & ~(GRU_GSEG_PAGESIZE - 1),
drivers/misc/sgi-gru/grutables.h
637
extern struct vm_area_struct *gru_find_vma(unsigned long vaddr);
drivers/mtd/nand/qpic_common.c
231
int reg_off, const void *vaddr,
drivers/mtd/nand/qpic_common.c
254
(__le32 *)vaddr + i));
drivers/mtd/nand/qpic_common.c
258
*((__le32 *)vaddr + i));
drivers/mtd/nand/qpic_common.c
304
const void *vaddr, int size, unsigned int flags)
drivers/mtd/nand/qpic_common.c
316
vaddr, size);
drivers/mtd/nand/qpic_common.c
325
vaddr, size);
drivers/mtd/nand/qpic_common.c
356
int reg_off, const void *vaddr, int size,
drivers/mtd/nand/qpic_common.c
373
sg_init_one(sgl, vaddr, size);
drivers/mtd/nand/qpic_common.c
447
void *vaddr;
drivers/mtd/nand/qpic_common.c
449
vaddr = nandc->reg_read_buf + nandc->reg_read_pos;
drivers/mtd/nand/qpic_common.c
456
return qcom_prep_bam_dma_desc_cmd(nandc, true, first, vaddr,
drivers/mtd/nand/qpic_common.c
462
return qcom_prep_adm_dma_desc(nandc, true, first, vaddr,
drivers/mtd/nand/qpic_common.c
479
int qcom_write_reg_dma(struct qcom_nand_controller *nandc, __le32 *vaddr,
drivers/mtd/nand/qpic_common.c
494
return qcom_prep_bam_dma_desc_cmd(nandc, false, first, vaddr,
drivers/mtd/nand/qpic_common.c
500
return qcom_prep_adm_dma_desc(nandc, false, first, vaddr,
drivers/mtd/nand/qpic_common.c
517
const u8 *vaddr, int size, unsigned int flags)
drivers/mtd/nand/qpic_common.c
520
return qcom_prep_bam_dma_desc_data(nandc, true, vaddr, size, flags);
drivers/mtd/nand/qpic_common.c
522
return qcom_prep_adm_dma_desc(nandc, true, reg_off, vaddr, size, false);
drivers/mtd/nand/qpic_common.c
538
const u8 *vaddr, int size, unsigned int flags)
drivers/mtd/nand/qpic_common.c
541
return qcom_prep_bam_dma_desc_data(nandc, false, vaddr, size, flags);
drivers/mtd/nand/qpic_common.c
543
return qcom_prep_adm_dma_desc(nandc, false, reg_off, vaddr, size, false);
drivers/mtd/nand/raw/davinci_nand.c
117
void __iomem *vaddr;
drivers/mtd/nand/raw/davinci_nand.c
807
info->current_cs = info->vaddr + (op->cs * info->mask_chipsel);
drivers/mtd/nand/raw/davinci_nand.c
904
void __iomem *vaddr;
drivers/mtd/nand/raw/davinci_nand.c
935
vaddr = devm_ioremap_resource(&pdev->dev, res1);
drivers/mtd/nand/raw/davinci_nand.c
936
if (IS_ERR(vaddr))
drivers/mtd/nand/raw/davinci_nand.c
937
return PTR_ERR(vaddr);
drivers/mtd/nand/raw/davinci_nand.c
957
info->vaddr = vaddr;
drivers/mtd/nand/raw/davinci_nand.c
971
info->current_cs = info->vaddr;
drivers/mtd/nand/raw/omap2.c
2221
void __iomem *vaddr;
drivers/mtd/nand/raw/omap2.c
2254
vaddr = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
drivers/mtd/nand/raw/omap2.c
2255
if (IS_ERR(vaddr))
drivers/mtd/nand/raw/omap2.c
2256
return PTR_ERR(vaddr);
drivers/mtd/nand/raw/omap2.c
2258
info->fifo = vaddr;
drivers/net/caif/caif_virtio.c
136
u8 *vaddr;
drivers/net/caif/caif_virtio.c
152
gen_pool_free(cfv->genpool, (unsigned long) buf_info->vaddr,
drivers/net/caif/caif_virtio.c
506
buf_info->vaddr = (void *)gen_pool_alloc(cfv->genpool, buf_info->size);
drivers/net/caif/caif_virtio.c
507
if (unlikely(!buf_info->vaddr))
drivers/net/caif/caif_virtio.c
511
skb_copy_bits(skb, 0, buf_info->vaddr + cfv->tx_hr + pad_len, skb->len);
drivers/net/caif/caif_virtio.c
512
sg_init_one(sg, buf_info->vaddr + pad_len,
drivers/net/ethernet/amd/au1000_eth.c
1134
aup->vaddr = dma_alloc_coherent(&pdev->dev, MAX_BUF_SIZE *
drivers/net/ethernet/amd/au1000_eth.c
1137
if (!aup->vaddr) {
drivers/net/ethernet/amd/au1000_eth.c
1237
pDB->vaddr = aup->vaddr + MAX_BUF_SIZE * i;
drivers/net/ethernet/amd/au1000_eth.c
1313
aup->vaddr, aup->dma_addr);
drivers/net/ethernet/amd/au1000_eth.c
1346
aup->vaddr, aup->dma_addr);
drivers/net/ethernet/amd/au1000_eth.c
795
(unsigned char *)pDB->vaddr, frmlen);
drivers/net/ethernet/amd/au1000_eth.c
987
skb_copy_from_linear_data(skb, (void *)pDB->vaddr, skb->len);
drivers/net/ethernet/amd/au1000_eth.c
990
((char *)pDB->vaddr)[i] = 0;
drivers/net/ethernet/amd/au1000_eth.h
109
void *vaddr; /* virtual address of rx/tx buffers */
drivers/net/ethernet/amd/au1000_eth.h
31
u32 *vaddr;
drivers/net/ethernet/amd/pds_core/core.h
27
void __iomem *vaddr;
drivers/net/ethernet/amd/pds_core/main.c
105
pdsc->info_regs = bar->vaddr + PDS_CORE_BAR0_DEV_INFO_REGS_OFFSET;
drivers/net/ethernet/amd/pds_core/main.c
106
pdsc->cmd_regs = bar->vaddr + PDS_CORE_BAR0_DEV_CMD_REGS_OFFSET;
drivers/net/ethernet/amd/pds_core/main.c
107
pdsc->intr_status = bar->vaddr + PDS_CORE_BAR0_INTR_STATUS_OFFSET;
drivers/net/ethernet/amd/pds_core/main.c
108
pdsc->intr_ctrl = bar->vaddr + PDS_CORE_BAR0_INTR_CTRL_OFFSET;
drivers/net/ethernet/amd/pds_core/main.c
125
pdsc->db_pages = bar->vaddr;
drivers/net/ethernet/amd/pds_core/main.c
46
if (bars[i].vaddr)
drivers/net/ethernet/amd/pds_core/main.c
47
pci_iounmap(pdsc->pdev, bars[i].vaddr);
drivers/net/ethernet/amd/pds_core/main.c
48
bars[i].vaddr = NULL;
drivers/net/ethernet/amd/pds_core/main.c
79
bars[j].vaddr = NULL;
drivers/net/ethernet/amd/pds_core/main.c
81
bars[j].vaddr = pci_iomap(pdev, i, bars[j].len);
drivers/net/ethernet/amd/pds_core/main.c
82
if (!bars[j].vaddr) {
drivers/net/ethernet/apple/bmac.c
558
void *vaddr;
drivers/net/ethernet/apple/bmac.c
563
vaddr = skb->data;
drivers/net/ethernet/apple/bmac.c
564
baddr = virt_to_bus(vaddr);
drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
518
int vaddr, rc;
drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
520
vaddr = t4_eeprom_ptov(addr, padap->pf, EEPROMPFSIZE);
drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
521
if (vaddr < 0)
drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
522
return vaddr;
drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
524
rc = pci_read_vpd(padap->pdev, vaddr, len, dest);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
1193
int vaddr = t4_eeprom_ptov(phys_addr, adap->pf, EEPROMPFSIZE);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
1195
if (vaddr >= 0)
drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
1196
vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
1197
return vaddr < 0 ? vaddr : 0;
drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
1202
int vaddr = t4_eeprom_ptov(phys_addr, adap->pf, EEPROMPFSIZE);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
1204
if (vaddr >= 0)
drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
1205
vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
1206
return vaddr < 0 ? vaddr : 0;
drivers/net/ethernet/cisco/enic/enic_main.c
2617
if (enic->bar[i].vaddr)
drivers/net/ethernet/cisco/enic/enic_main.c
2618
iounmap(enic->bar[i].vaddr);
drivers/net/ethernet/cisco/enic/enic_main.c
2691
enic->bar[i].vaddr = pci_iomap(pdev, i, enic->bar[i].len);
drivers/net/ethernet/cisco/enic/enic_main.c
2692
if (!enic->bar[i].vaddr) {
drivers/net/ethernet/cisco/enic/vnic_dev.c
112
vdev->res[type].vaddr = (char __iomem *)bar[bar_num].vaddr +
drivers/net/ethernet/cisco/enic/vnic_dev.c
130
if (!vdev->res[type].vaddr)
drivers/net/ethernet/cisco/enic/vnic_dev.c
138
return (char __iomem *)vdev->res[type].vaddr +
drivers/net/ethernet/cisco/enic/vnic_dev.c
141
return (char __iomem *)vdev->res[type].vaddr;
drivers/net/ethernet/cisco/enic/vnic_dev.c
47
rh = bar->vaddr;
drivers/net/ethernet/cisco/enic/vnic_dev.c
48
mrh = bar->vaddr;
drivers/net/ethernet/cisco/enic/vnic_dev.c
85
if (!bar[bar_num].len || !bar[bar_num].vaddr)
drivers/net/ethernet/cisco/enic/vnic_dev.h
47
void __iomem *vaddr;
drivers/net/ethernet/cisco/enic/vnic_dev.h
72
void __iomem *vaddr;
drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
1401
void *vaddr;
drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
1411
vaddr = phys_to_virt(qm_fd_addr(fd));
drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
1412
sgt = vaddr + qm_fd_get_offset(fd);
drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
1420
virt_to_page(vaddr), 0, DPAA_BP_RAW_SIZE,
drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
1686
void *vaddr = phys_to_virt(addr);
drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
1701
sgt = vaddr + qm_fd_get_offset(fd);
drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
1721
swbp = (struct dpaa_eth_swbp *)vaddr;
drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
1737
if (!fman_port_get_tstamp(priv->mac_dev->port[TX], vaddr,
drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
1748
free_pages((unsigned long)vaddr, 0);
drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
1783
void *vaddr;
drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
1785
vaddr = phys_to_virt(addr);
drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
1786
WARN_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES));
drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
1792
skb = build_skb(vaddr, dpaa_bp->size +
drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
1804
free_pages((unsigned long)vaddr, 0);
drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
1821
void *vaddr, *sg_vaddr;
drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
1829
vaddr = phys_to_virt(addr);
drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
1830
WARN_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES));
drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
1833
sgt = vaddr + fd_off;
drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
1911
free_pages((unsigned long)vaddr, 0);
drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
1938
free_pages((unsigned long)vaddr, 0);
drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
2591
static u32 dpaa_run_xdp(struct dpaa_priv *priv, struct qm_fd *fd, void *vaddr,
drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
2607
xdp_prepare_buff(&xdp, vaddr + fd_off - XDP_PACKET_HEADROOM,
drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
2619
xdp.data_hard_start = vaddr;
drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
2627
qm_fd_set_contig(fd, xdp.data - vaddr, xdp.data_end - xdp.data);
drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
2642
xdp.data_hard_start = vaddr;
drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
2646
free_pages((unsigned long)vaddr, 0);
drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
2656
xdp.data_hard_start = vaddr;
drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
2662
free_pages((unsigned long)vaddr, 0);
drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
2673
free_pages((unsigned long)vaddr, 0);
drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
2703
void *vaddr;
drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
2750
vaddr = phys_to_virt(addr);
drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
2751
prefetch(vaddr + qm_fd_get_offset(fd));
drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
2764
if (!fman_port_get_tstamp(priv->mac_dev->port[RX], vaddr, &ns))
drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
2774
hash = be32_to_cpu(*(__be32 *)(vaddr + hash_offset));
drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
2779
xdp_act = dpaa_run_xdp(priv, (struct qm_fd *)fd, vaddr,
drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
2794
sgt = vaddr + qm_fd_get_offset(fd);
drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
2796
free_pages((unsigned long)vaddr, 0);
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-trace.h
113
void *vaddr,
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-trace.h
122
TP_ARGS(netdev, vaddr, size, dma_addr, map_size, bpid),
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-trace.h
130
__field(void *, vaddr)
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-trace.h
142
__entry->vaddr = vaddr;
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-trace.h
155
__entry->vaddr,
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-trace.h
165
void *vaddr,
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-trace.h
171
TP_ARGS(netdev, vaddr, size, dma_addr, map_size, bpid)
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-trace.h
177
void *vaddr,
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-trace.h
183
TP_ARGS(netdev, vaddr, size, dma_addr, map_size, bpid)
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
141
void *vaddr)
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
160
sgt = vaddr + dpaa2_fd_get_offset(fd);
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
173
free_pages((unsigned long)vaddr, 0);
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
287
void *vaddr;
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
291
vaddr = dpaa2_iova_to_virt(priv->iommu_domain, buf_array[i]);
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
296
free_pages((unsigned long)vaddr, 0);
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
299
(vaddr + DPAA2_ETH_RX_HWA_SIZE);
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
428
struct dpaa2_fd *fd, void *vaddr)
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
442
xdp_prepare_buff(&xdp, vaddr + offset, XDP_PACKET_HEADROOM,
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
448
dpaa2_fd_set_offset(fd, xdp.data - vaddr);
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
455
dpaa2_eth_xdp_enqueue(priv, ch, fd, vaddr, rx_fq->flowid);
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
473
xdp.data_hard_start = vaddr;
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
479
virt_to_page(vaddr), 0,
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
482
free_pages((unsigned long)vaddr, 0);
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
537
const struct dpaa2_fd *fd, void *vaddr,
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
545
fas = dpaa2_get_fas(vaddr, false);
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
552
__le64 *ts = dpaa2_get_ts(vaddr, false);
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
585
void *vaddr;
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
597
vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
601
buf_data = vaddr + dpaa2_fd_get_offset(fd);
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
608
xdp_act = dpaa2_eth_run_xdp(priv, ch, fq, (struct dpaa2_fd *)fd, vaddr);
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
615
skb = dpaa2_eth_copybreak(ch, fd, vaddr);
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
619
skb = dpaa2_eth_build_linear_skb(ch, fd, vaddr);
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
629
free_pages((unsigned long)vaddr, 0);
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
640
dpaa2_eth_receive_skb(priv, ch, fd, vaddr, fq, percpu_stats, skb);
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
647
dpaa2_eth_free_rx_fd(priv, fd, vaddr);
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
668
void *vaddr;
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
670
vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
674
buf_data = vaddr + dpaa2_fd_get_offset(fd);
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
679
skb = dpaa2_eth_build_linear_skb(ch, fd, vaddr);
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
684
free_pages((unsigned long)vaddr, 0);
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
687
dpaa2_eth_free_rx_fd(priv, fd, vaddr);
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
691
fapr = dpaa2_get_fapr(vaddr, false);
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
822
const struct dpaa2_fd *fd, void *vaddr,
drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
2553
void *vaddr;
drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
2557
vaddr = dpaa2_iova_to_virt(ethsw->iommu_domain, buf_array[i]);
drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
2560
free_pages((unsigned long)vaddr, 0);
drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk.c
113
void *vaddr;
drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk.c
118
vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk.c
127
xdp_act = dpaa2_xsk_run_xdp(priv, ch, fq, (struct dpaa2_fd *)fd, vaddr);
drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk.c
135
skb = dpaa2_eth_alloc_skb(priv, ch, fd, fd_length, vaddr);
drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk.c
143
dpaa2_eth_receive_skb(priv, ch, fd, vaddr, fq, percpu_stats, skb);
drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk.c
35
struct dpaa2_fd *fd, void *vaddr)
drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk.c
48
swa = (struct dpaa2_eth_swa *)(vaddr + DPAA2_ETH_RX_HWA_SIZE +
drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk.c
52
xdp_buff->data_hard_start = vaddr;
drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk.c
53
xdp_buff->data = vaddr + dpaa2_fd_get_offset(fd);
drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk.c
62
dpaa2_fd_set_offset(fd, xdp_buff->data - vaddr);
drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk.c
82
dpaa2_eth_xdp_enqueue(priv, ch, fd, vaddr, rx_fq->flowid);
drivers/net/ethernet/freescale/enetc/enetc.h
261
void *vaddr;
drivers/net/ethernet/freescale/enetc/enetc_msg.c
100
dma_free_coherent(&si->pdev->dev, msg->size, msg->vaddr, msg->dma);
drivers/net/ethernet/freescale/enetc/enetc_msg.c
76
msg->vaddr = dma_alloc_coherent(dev, msg->size, &msg->dma,
drivers/net/ethernet/freescale/enetc/enetc_msg.c
78
if (!msg->vaddr) {
drivers/net/ethernet/freescale/enetc/enetc_pf.c
490
cmd = (struct enetc_msg_cmd_set_primary_mac *)msg->vaddr;
drivers/net/ethernet/freescale/enetc/enetc_pf.c
513
cmd_hdr = (struct enetc_msg_cmd_header *)msg->vaddr;
drivers/net/ethernet/freescale/enetc/enetc_vf.c
56
msg.vaddr = dma_alloc_coherent(priv->dev, msg.size, &msg.dma,
drivers/net/ethernet/freescale/enetc/enetc_vf.c
58
if (!msg.vaddr) {
drivers/net/ethernet/freescale/enetc/enetc_vf.c
64
cmd = (struct enetc_msg_cmd_set_primary_mac *)msg.vaddr;
drivers/net/ethernet/freescale/enetc/enetc_vf.c
72
dma_free_coherent(priv->dev, msg.size, msg.vaddr, msg.dma);
drivers/net/ethernet/freescale/fec_main.c
415
void *vaddr;
drivers/net/ethernet/freescale/fec_main.c
423
fec_dma_free(dev, this->size, this->vaddr, this->dma_handle);
drivers/net/ethernet/freescale/fec_main.c
430
void *vaddr;
drivers/net/ethernet/freescale/fec_main.c
435
vaddr = fec_dma_alloc(dev, size, handle, gfp);
drivers/net/ethernet/freescale/fec_main.c
436
if (!vaddr) {
drivers/net/ethernet/freescale/fec_main.c
440
dr->vaddr = vaddr;
drivers/net/ethernet/freescale/fec_main.c
444
return vaddr;
drivers/net/ethernet/freescale/fman/fman_dtsec.c
1365
dtsec->regs = mac_dev->vaddr;
drivers/net/ethernet/freescale/fman/fman_memac.c
1107
memac->regs = mac_dev->vaddr;
drivers/net/ethernet/freescale/fman/fman_muram.c
106
unsigned long vaddr;
drivers/net/ethernet/freescale/fman/fman_muram.c
108
vaddr = gen_pool_alloc(muram->pool, size);
drivers/net/ethernet/freescale/fman/fman_muram.c
109
if (!vaddr)
drivers/net/ethernet/freescale/fman/fman_muram.c
112
memset_io((void __iomem *)vaddr, 0, size);
drivers/net/ethernet/freescale/fman/fman_muram.c
114
return fman_muram_vbase_to_offset(muram, vaddr);
drivers/net/ethernet/freescale/fman/fman_muram.c
19
unsigned long vaddr)
drivers/net/ethernet/freescale/fman/fman_muram.c
21
return vaddr - (unsigned long)muram->vbase;
drivers/net/ethernet/freescale/fman/fman_muram.c
40
void __iomem *vaddr;
drivers/net/ethernet/freescale/fman/fman_muram.c
53
vaddr = ioremap(base, size);
drivers/net/ethernet/freescale/fman/fman_muram.c
54
if (!vaddr) {
drivers/net/ethernet/freescale/fman/fman_muram.c
59
ret = gen_pool_add_virt(muram->pool, (unsigned long)vaddr,
drivers/net/ethernet/freescale/fman/fman_muram.c
63
iounmap(vaddr);
drivers/net/ethernet/freescale/fman/fman_muram.c
67
memset_io(vaddr, 0, (int)size);
drivers/net/ethernet/freescale/fman/fman_muram.c
69
muram->vbase = vaddr;
drivers/net/ethernet/freescale/fman/fman_tgec.c
730
tgec->regs = mac_dev->vaddr;
drivers/net/ethernet/freescale/fman/mac.c
198
mac_dev->vaddr = devm_ioremap(dev, mac_dev->res->start,
drivers/net/ethernet/freescale/fman/mac.c
200
if (!mac_dev->vaddr) {
drivers/net/ethernet/freescale/fman/mac.h
29
void __iomem *vaddr;
drivers/net/ethernet/freescale/gianfar.c
1325
void *vaddr;
drivers/net/ethernet/freescale/gianfar.c
1342
vaddr = dma_alloc_coherent(dev,
drivers/net/ethernet/freescale/gianfar.c
1348
if (!vaddr)
drivers/net/ethernet/freescale/gianfar.c
1353
tx_queue->tx_bd_base = vaddr;
drivers/net/ethernet/freescale/gianfar.c
1358
vaddr += sizeof(struct txbd8) * tx_queue->tx_ring_size;
drivers/net/ethernet/freescale/gianfar.c
1364
rx_queue->rx_bd_base = vaddr;
drivers/net/ethernet/freescale/gianfar.c
1369
vaddr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
716
mac_drv->io_base = mac_param->vaddr;
drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
1054
mac_cb->vaddr = hns_mac_get_vaddr(dsaf_dev, mac_cb, mac_mode_idx);
drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
398
param->vaddr = mac_cb->vaddr;
drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
186
u8 __iomem *vaddr; /*virtual address*/
drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
308
u8 __iomem *vaddr;
drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
793
mac_drv->io_base = mac_param->vaddr;
drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
103
static int queue_alloc_page(struct hinic_hwif *hwif, u64 **vaddr, u64 *paddr,
drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
109
*vaddr = dma_alloc_coherent(&pdev->dev, page_sz, &dma_addr,
drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
111
if (!*vaddr) {
drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
126
dma_free_coherent(&pdev->dev, page_sz, *vaddr, dma_addr);
drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
424
void **vaddr = &wq->shadow_block_vaddr[i];
drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
429
dma_free_coherent(&pdev->dev, wq->wq_page_size, *vaddr,
drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
470
void **vaddr = &wq->shadow_block_vaddr[i];
drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
474
*vaddr = dma_alloc_coherent(&pdev->dev, wq->wq_page_size,
drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
476
if (!*vaddr) {
drivers/net/ethernet/huawei/hinic3/hinic3_common.c
15
void *vaddr, *align_vaddr;
drivers/net/ethernet/huawei/hinic3/hinic3_common.c
18
vaddr = dma_alloc_coherent(dev, real_size, &paddr, flag);
drivers/net/ethernet/huawei/hinic3/hinic3_common.c
19
if (!vaddr)
drivers/net/ethernet/huawei/hinic3/hinic3_common.c
24
align_vaddr = vaddr;
drivers/net/ethernet/huawei/hinic3/hinic3_common.c
28
dma_free_coherent(dev, real_size, vaddr, paddr);
drivers/net/ethernet/huawei/hinic3/hinic3_common.c
32
vaddr = dma_alloc_coherent(dev, real_size, &paddr, flag);
drivers/net/ethernet/huawei/hinic3/hinic3_common.c
33
if (!vaddr)
drivers/net/ethernet/huawei/hinic3/hinic3_common.c
37
align_vaddr = vaddr + (align_paddr - paddr);
drivers/net/ethernet/huawei/hinic3/hinic3_common.c
41
mem_align->ori_vaddr = vaddr;
drivers/net/ethernet/ibm/ehea/ehea.h
312
u64 vaddr;
drivers/net/ethernet/ibm/ehea/ehea_main.c
1631
sg1entry->vaddr =
drivers/net/ethernet/ibm/ehea/ehea_main.c
1664
sg1entry->vaddr =
drivers/net/ethernet/ibm/ehea/ehea_main.c
1677
sgentry->vaddr = ehea_map_vaddr(skb_frag_address(frag));
drivers/net/ethernet/ibm/ehea/ehea_main.c
2598
rwqe->sg_list[0].vaddr = ehea_map_vaddr(skb->data);
drivers/net/ethernet/ibm/ehea/ehea_main.c
2607
rwqe->sg_list[0].vaddr = ehea_map_vaddr(skb->data);
drivers/net/ethernet/ibm/ehea/ehea_main.c
493
rwqe->sg_list[0].vaddr = tmp_addr;
drivers/net/ethernet/ibm/ehea/ehea_phyp.c
477
u64 ehea_h_alloc_resource_mr(const u64 adapter_handle, const u64 vaddr,
drivers/net/ethernet/ibm/ehea/ehea_phyp.c
488
vaddr, /* R6 */
drivers/net/ethernet/ibm/ehea/ehea_phyp.h
392
u64 ehea_h_alloc_resource_mr(const u64 adapter_handle, const u64 vaddr,
drivers/net/ethernet/ibm/ehea/ehea_qmr.c
571
u64 vaddr = EHEA_BUSMAP_START;
drivers/net/ethernet/ibm/ehea/ehea_qmr.c
593
ehea_dir->ent[idx] = vaddr;
drivers/net/ethernet/ibm/ehea/ehea_qmr.c
594
vaddr += EHEA_SECTSIZE;
drivers/net/ethernet/ibm/ehea/ehea_qmr.c
906
mr->vaddr = EHEA_BUSMAP_START;
drivers/net/ethernet/ibm/ehea/ehea_qmr.c
937
old_mr->vaddr, EHEA_MR_ACC_CTRL,
drivers/net/ethernet/ibm/ehea/ehea_qmr.h
59
u64 vaddr;
drivers/net/ethernet/intel/idpf/idpf.h
885
return adapter->hw.mbx.vaddr + reg_offset;
drivers/net/ethernet/intel/idpf/idpf.h
900
return adapter->hw.rstat.vaddr + reg_offset;
drivers/net/ethernet/intel/idpf/idpf.h
926
return region->vaddr + reg_offset;
drivers/net/ethernet/intel/idpf/idpf_controlq.h
103
void __iomem *vaddr;
drivers/net/ethernet/intel/idpf/idpf_idc.c
444
adapter->hw.lan_regs[i].vaddr;
drivers/net/ethernet/intel/idpf/idpf_main.c
202
hw->mbx.vaddr = devm_ioremap(dev, mbx_start, len);
drivers/net/ethernet/intel/idpf/idpf_main.c
203
if (!hw->mbx.vaddr) {
drivers/net/ethernet/intel/idpf/idpf_main.c
214
hw->rstat.vaddr = devm_ioremap(dev, rstat_start, len);
drivers/net/ethernet/intel/idpf/idpf_main.c
215
if (!hw->rstat.vaddr) {
drivers/net/ethernet/intel/idpf/idpf_mem.h
15
#define idpf_mbx_wr32(a, reg, value) writel((value), ((a)->mbx.vaddr + (reg)))
drivers/net/ethernet/intel/idpf/idpf_mem.h
16
#define idpf_mbx_rd32(a, reg) readl((a)->mbx.vaddr + (reg))
drivers/net/ethernet/intel/idpf/idpf_mem.h
17
#define idpf_mbx_wr64(a, reg, value) writeq((value), ((a)->mbx.vaddr + (reg)))
drivers/net/ethernet/intel/idpf/idpf_mem.h
18
#define idpf_mbx_rd64(a, reg) readq((a)->mbx.vaddr + (reg))
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
1138
hw->lan_regs[i].vaddr = devm_ioremap(&pdev->dev, start, len);
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
1139
if (!hw->lan_regs[i].vaddr) {
drivers/net/ethernet/marvell/mvneta_bm.c
230
u32 *vaddr;
drivers/net/ethernet/marvell/mvneta_bm.c
241
vaddr = phys_to_virt(buf_phys_addr);
drivers/net/ethernet/marvell/mvneta_bm.c
242
if (!vaddr)
drivers/net/ethernet/marvell/mvneta_bm.c
247
hwbm_buf_free(&bm_pool->hwbm_pool, vaddr);
drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c
246
iq->vaddr = iq->real_vaddr + CN10K_CPT_INST_GRP_QLEN_BYTES;
drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c
250
iq->vaddr = PTR_ALIGN(iq->vaddr, OTX2_ALIGN);
drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c
264
iq->vaddr = NULL;
drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.h
71
u8 *vaddr;
drivers/net/ethernet/marvell/skge.c
915
static int skge_ring_alloc(struct skge_ring *ring, void *vaddr, u32 base)
drivers/net/ethernet/marvell/skge.c
925
for (i = 0, e = ring->start, d = vaddr; i < ring->count; i++, e++, d++) {
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
304
struct vport_addr *vaddr);
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
306
static int esw_add_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
308
u8 *mac = vaddr->node.addr;
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
309
u16 vport = vaddr->vport;
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
325
vaddr->mpfs = true;
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
330
vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport);
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
333
vport, mac, vaddr->flow_rule);
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
339
static int esw_del_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
341
u8 *mac = vaddr->node.addr;
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
342
u16 vport = vaddr->vport;
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
348
if (!vaddr->mpfs || mlx5_esw_is_manager_vport(esw, vport))
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
356
vaddr->mpfs = false;
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
359
if (vaddr->flow_rule)
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
360
mlx5_del_flow_rules(vaddr->flow_rule);
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
361
vaddr->flow_rule = NULL;
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
367
struct vport_addr *vaddr,
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
370
u8 *mac = vaddr->node.addr;
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
383
vaddr->vport == vport_num)
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
385
switch (vaddr->action) {
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
415
static int esw_add_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
419
u8 *mac = vaddr->node.addr;
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
420
u16 vport = vaddr->vport;
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
437
update_allmulti_vports(esw, vaddr, esw_mc);
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
443
if (!vaddr->mc_promisc)
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
447
vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport);
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
450
vport, mac, vaddr->flow_rule,
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
455
static int esw_del_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
459
u8 *mac = vaddr->node.addr;
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
460
u16 vport = vaddr->vport;
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
474
vport, mac, vaddr->flow_rule, esw_mc->refcnt,
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
477
if (vaddr->flow_rule)
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
478
mlx5_del_flow_rules(vaddr->flow_rule);
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
479
vaddr->flow_rule = NULL;
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
484
if (vaddr->mc_promisc || (--esw_mc->refcnt > 0))
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
488
update_allmulti_vports(esw, vaddr, esw_mc);
drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
64
bars[j].vaddr = NULL;
drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
66
bars[j].vaddr = pci_iomap(pdev, i, bars[j].len);
drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
67
if (!bars[j].vaddr) {
drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
90
if (bars[i].vaddr) {
drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
91
iounmap(bars[i].vaddr);
drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
93
bars[i].vaddr = NULL;
drivers/net/ethernet/pensando/ionic/ionic_dev.c
412
idev->dev_info_regs = bar->vaddr + IONIC_BAR0_DEV_INFO_REGS_OFFSET;
drivers/net/ethernet/pensando/ionic/ionic_dev.c
413
idev->dev_cmd_regs = bar->vaddr + IONIC_BAR0_DEV_CMD_REGS_OFFSET;
drivers/net/ethernet/pensando/ionic/ionic_dev.c
414
idev->intr_status = bar->vaddr + IONIC_BAR0_INTR_STATUS_OFFSET;
drivers/net/ethernet/pensando/ionic/ionic_dev.c
415
idev->intr_ctrl = bar->vaddr + IONIC_BAR0_INTR_CTRL_OFFSET;
drivers/net/ethernet/pensando/ionic/ionic_dev.c
438
idev->db_pages = bar->vaddr;
drivers/net/ethernet/pensando/ionic/ionic_dev.h
44
void __iomem *vaddr;
drivers/net/ethernet/qlogic/qed/qed_rdma.c
1557
DMA_REGPAIR_LE(p_ramrod->va, params->vaddr);
drivers/net/ethernet/sfc/tx.c
200
u8 *vaddr;
drivers/net/ethernet/sfc/tx.c
202
vaddr = kmap_local_page(skb_frag_page(f));
drivers/net/ethernet/sfc/tx.c
204
efx_memcpy_toio_aligned_cb(efx, piobuf, vaddr + skb_frag_off(f),
drivers/net/ethernet/sfc/tx.c
206
kunmap_local(vaddr);
drivers/net/ethernet/socionext/netsec.c
1208
if (!dring->vaddr || !dring->desc)
drivers/net/ethernet/socionext/netsec.c
1234
memset(dring->vaddr, 0, DESC_SZ * DESC_NUM);
drivers/net/ethernet/socionext/netsec.c
1247
if (dring->vaddr) {
drivers/net/ethernet/socionext/netsec.c
1249
dring->vaddr, dring->desc_dma);
drivers/net/ethernet/socionext/netsec.c
1250
dring->vaddr = NULL;
drivers/net/ethernet/socionext/netsec.c
1261
dring->vaddr = dma_alloc_coherent(priv->dev, DESC_SZ * DESC_NUM,
drivers/net/ethernet/socionext/netsec.c
1263
if (!dring->vaddr)
drivers/net/ethernet/socionext/netsec.c
1285
de = dring->vaddr + (DESC_SZ * i);
drivers/net/ethernet/socionext/netsec.c
283
void *vaddr;
drivers/net/ethernet/socionext/netsec.c
616
struct netsec_de *de = dring->vaddr + DESC_SZ * idx;
drivers/net/ethernet/socionext/netsec.c
648
entry = dring->vaddr + DESC_SZ * tail;
drivers/net/ethernet/socionext/netsec.c
695
entry = dring->vaddr + DESC_SZ * tail;
drivers/net/ethernet/socionext/netsec.c
798
de = dring->vaddr + (DESC_SZ * idx);
drivers/net/ethernet/socionext/netsec.c
970
struct netsec_de *de = dring->vaddr + (DESC_SZ * idx);
drivers/net/ethernet/sun/sunvnet_common.c
1086
u8 *vaddr;
drivers/net/ethernet/sun/sunvnet_common.c
1089
vaddr = kmap_local_page(skb_frag_page(f));
drivers/net/ethernet/sun/sunvnet_common.c
1092
err = ldc_map_single(lp, vaddr + skb_frag_off(f),
drivers/net/ethernet/sun/sunvnet_common.c
1095
kunmap_local(vaddr);
drivers/net/wireless/ath/ath10k/core.c
2905
void *vaddr = NULL;
drivers/net/wireless/ath/ath10k/core.c
2935
vaddr = ar->wmi.mem_chunks[i].vaddr;
drivers/net/wireless/ath/ath10k/core.c
2941
if (!vaddr || !len) {
drivers/net/wireless/ath/ath10k/core.c
2951
ret = ath10k_hif_diag_read(ar, paddr, vaddr,
drivers/net/wireless/ath/ath10k/core.c
2960
vaddr += TGT_IRAM_READ_PER_ITR;
drivers/net/wireless/ath/ath10k/core.c
2964
ret = ath10k_hif_diag_read(ar, paddr, vaddr, remaining_len);
drivers/net/wireless/ath/ath10k/core.h
1019
void *vaddr;
drivers/net/wireless/ath/ath10k/core.h
186
void *vaddr;
drivers/net/wireless/ath/ath10k/hif.h
24
void *vaddr; /* for debugging mostly */
drivers/net/wireless/ath/ath10k/htc.c
208
sg_item.vaddr = skb->data;
drivers/net/wireless/ath/ath10k/htc.c
660
sg_item.vaddr = bundle_skb->data;
drivers/net/wireless/ath/ath10k/htt.h
1848
__le32 *vaddr;
drivers/net/wireless/ath/ath10k/htt.h
1916
struct htt_q_state *vaddr;
drivers/net/wireless/ath/ath10k/htt.h
2021
void (*htt_config_paddrs_ring)(struct ath10k_htt *htt, void *vaddr);
drivers/net/wireless/ath/ath10k/htt.h
2040
void *vaddr)
drivers/net/wireless/ath/ath10k/htt.h
2043
htt->rx_ops->htt_config_paddrs_ring(htt, vaddr);
drivers/net/wireless/ath/ath10k/htt_rx.c
101
htt->rx_ring.paddrs_ring_64 = vaddr;
drivers/net/wireless/ath/ath10k/htt_rx.c
152
idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
drivers/net/wireless/ath/ath10k/htt_rx.c
210
*htt->rx_ring.alloc_idx.vaddr = __cpu_to_le32(idx);
drivers/net/wireless/ath/ath10k/htt_rx.c
312
sizeof(*htt->rx_ring.alloc_idx.vaddr),
drivers/net/wireless/ath/ath10k/htt_rx.c
313
htt->rx_ring.alloc_idx.vaddr,
drivers/net/wireless/ath/ath10k/htt_rx.c
315
htt->rx_ring.alloc_idx.vaddr = NULL;
drivers/net/wireless/ath/ath10k/htt_rx.c
788
void *vaddr, *vaddr_ring;
drivers/net/wireless/ath/ath10k/htt_rx.c
823
vaddr = dma_alloc_coherent(htt->ar->dev,
drivers/net/wireless/ath/ath10k/htt_rx.c
824
sizeof(*htt->rx_ring.alloc_idx.vaddr),
drivers/net/wireless/ath/ath10k/htt_rx.c
826
if (!vaddr)
drivers/net/wireless/ath/ath10k/htt_rx.c
829
htt->rx_ring.alloc_idx.vaddr = vaddr;
drivers/net/wireless/ath/ath10k/htt_rx.c
832
*htt->rx_ring.alloc_idx.vaddr = 0;
drivers/net/wireless/ath/ath10k/htt_rx.c
93
void *vaddr)
drivers/net/wireless/ath/ath10k/htt_rx.c
95
htt->rx_ring.paddrs_ring_32 = vaddr;
drivers/net/wireless/ath/ath10k/htt_rx.c
99
void *vaddr)
drivers/net/wireless/ath/ath10k/htt_tx.c
103
seq = le32_to_cpu(ar->htt.tx_q_state.vaddr->seq);
drivers/net/wireless/ath/ath10k/htt_tx.c
105
ar->htt.tx_q_state.vaddr->seq = cpu_to_le32(seq);
drivers/net/wireless/ath/ath10k/htt_tx.c
110
size = sizeof(*ar->htt.tx_q_state.vaddr);
drivers/net/wireless/ath/ath10k/htt_tx.c
1559
sg_items[0].vaddr = &txbuf->htc_hdr;
drivers/net/wireless/ath/ath10k/htt_tx.c
1568
sg_items[1].vaddr = msdu->data;
drivers/net/wireless/ath/ath10k/htt_tx.c
1771
sg_items[0].vaddr = &txbuf->htc_hdr;
drivers/net/wireless/ath/ath10k/htt_tx.c
1780
sg_items[1].vaddr = msdu->data;
drivers/net/wireless/ath/ath10k/htt_tx.c
382
size = sizeof(*htt->tx_q_state.vaddr);
drivers/net/wireless/ath/ath10k/htt_tx.c
385
kfree(htt->tx_q_state.vaddr);
drivers/net/wireless/ath/ath10k/htt_tx.c
402
size = sizeof(*htt->tx_q_state.vaddr);
drivers/net/wireless/ath/ath10k/htt_tx.c
403
htt->tx_q_state.vaddr = kzalloc(size, GFP_KERNEL);
drivers/net/wireless/ath/ath10k/htt_tx.c
404
if (!htt->tx_q_state.vaddr)
drivers/net/wireless/ath/ath10k/htt_tx.c
407
htt->tx_q_state.paddr = dma_map_single(ar->dev, htt->tx_q_state.vaddr,
drivers/net/wireless/ath/ath10k/htt_tx.c
412
kfree(htt->tx_q_state.vaddr);
drivers/net/wireless/ath/ath10k/htt_tx.c
82
ar->htt.tx_q_state.vaddr->count[tid][peer_id] = count;
drivers/net/wireless/ath/ath10k/htt_tx.c
83
ar->htt.tx_q_state.vaddr->map[tid][idx] &= ~bit;
drivers/net/wireless/ath/ath10k/htt_tx.c
84
ar->htt.tx_q_state.vaddr->map[tid][idx] |= count ? bit : 0;
drivers/net/wireless/ath/ath10k/htt_tx.c
866
fw_idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
drivers/net/wireless/ath/ath10k/htt_tx.c
937
fw_idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
drivers/net/wireless/ath/ath10k/pci.c
1382
items[i].vaddr, items[i].len);
drivers/net/wireless/ath/ath10k/pci.c
1400
items[i].vaddr, items[i].len);
drivers/net/wireless/ath/ath10k/snoc.c
1471
hdr->start = cpu_to_le32((unsigned long)ar->msa.vaddr);
drivers/net/wireless/ath/ath10k/snoc.c
1475
memcpy(buf, ar->msa.vaddr, current_region->len);
drivers/net/wireless/ath/ath10k/snoc.c
1479
memcpy(buf, ar->msa.vaddr, ar->msa.mem_size);
drivers/net/wireless/ath/ath10k/snoc.c
1585
ar->msa.vaddr = devm_memremap(dev, ar->msa.paddr,
drivers/net/wireless/ath/ath10k/snoc.c
1588
if (IS_ERR(ar->msa.vaddr)) {
drivers/net/wireless/ath/ath10k/snoc.c
1591
return PTR_ERR(ar->msa.vaddr);
drivers/net/wireless/ath/ath10k/snoc.c
1594
ar->msa.vaddr = dmam_alloc_coherent(dev, msa_size,
drivers/net/wireless/ath/ath10k/snoc.c
1597
if (!ar->msa.vaddr) {
drivers/net/wireless/ath/ath10k/snoc.c
1606
ar->msa.vaddr);
drivers/net/wireless/ath/ath10k/wmi-tlv.c
3070
pkt_addr->vaddr = skb;
drivers/net/wireless/ath/ath10k/wmi-tlv.h
1647
void *vaddr;
drivers/net/wireless/ath/ath10k/wmi.c
2447
msdu = pkt_addr->vaddr;
drivers/net/wireless/ath/ath10k/wmi.c
5366
void *vaddr;
drivers/net/wireless/ath/ath10k/wmi.c
5369
vaddr = dma_alloc_coherent(ar->dev, pool_size, &paddr, GFP_KERNEL);
drivers/net/wireless/ath/ath10k/wmi.c
5371
if (!vaddr)
drivers/net/wireless/ath/ath10k/wmi.c
5374
ar->wmi.mem_chunks[idx].vaddr = vaddr;
drivers/net/wireless/ath/ath10k/wmi.c
9602
ar->wmi.mem_chunks[i].vaddr,
drivers/net/wireless/ath/ath10k/wmi.c
9619
msdu = pkt_addr->vaddr;
drivers/net/wireless/ath/ath11k/dp.c
267
ring->vaddr = PTR_ALIGN(ring->vaddr_unaligned, HAL_RING_BASE_ALIGN);
drivers/net/wireless/ath/ath11k/dp.c
268
ring->paddr = ring->paddr_unaligned + ((unsigned long)ring->vaddr -
drivers/net/wireless/ath/ath11k/dp.c
271
params.ring_base_vaddr = ring->vaddr;
drivers/net/wireless/ath/ath11k/dp.c
497
if (!slist[i].vaddr)
drivers/net/wireless/ath/ath11k/dp.c
501
slist[i].vaddr, slist[i].paddr);
drivers/net/wireless/ath/ath11k/dp.c
502
slist[i].vaddr = NULL;
drivers/net/wireless/ath/ath11k/dp.c
533
slist[i].vaddr = dma_alloc_coherent(ab->dev,
drivers/net/wireless/ath/ath11k/dp.c
536
if (!slist[i].vaddr) {
drivers/net/wireless/ath/ath11k/dp.c
543
scatter_buf = slist[scatter_idx].vaddr;
drivers/net/wireless/ath/ath11k/dp.c
547
align_bytes = link_desc_banks[i].vaddr -
drivers/net/wireless/ath/ath11k/dp.c
564
scatter_buf = slist[scatter_idx].vaddr;
drivers/net/wireless/ath/ath11k/dp.c
568
end_offset = (scatter_buf - slist[scatter_idx].vaddr) *
drivers/net/wireless/ath/ath11k/dp.c
621
desc_bank[i].vaddr = PTR_ALIGN(desc_bank[i].vaddr_unaligned,
drivers/net/wireless/ath/ath11k/dp.c
624
((unsigned long)desc_bank[i].vaddr -
drivers/net/wireless/ath/ath11k/dp.c
748
align_bytes = link_desc_banks[i].vaddr -
drivers/net/wireless/ath/ath11k/dp.h
133
void *vaddr;
drivers/net/wireless/ath/ath11k/dp.h
65
u32 *vaddr;
drivers/net/wireless/ath/ath11k/dp_rx.c
1004
u32 hw_desc_sz, *vaddr;
drivers/net/wireless/ath/ath11k/dp_rx.c
1062
vaddr = PTR_ALIGN(vaddr_unaligned, HAL_LINK_DESC_ALIGN);
drivers/net/wireless/ath/ath11k/dp_rx.c
1064
rx_tid->paddr = rx_tid->paddr_unaligned + ((unsigned long)vaddr -
drivers/net/wireless/ath/ath11k/dp_rx.c
1066
ath11k_hal_reo_qdesc_setup(vaddr, tid, ba_win_sz, ssn, pn_type);
drivers/net/wireless/ath/ath11k/dp_rx.c
3441
msdu_link = (struct hal_rx_msdu_link *)(link_desc_banks[desc_bank].vaddr +
drivers/net/wireless/ath/ath11k/dp_rx.c
3859
link_desc_va = link_desc_banks[desc_bank].vaddr +
drivers/net/wireless/ath/ath11k/dp_rx.c
4286
link_desc_va = link_desc_banks[desc_bank].vaddr +
drivers/net/wireless/ath/ath11k/dp_rx.c
4744
(void *)pmon->link_desc_banks[sw_cookie].vaddr +
drivers/net/wireless/ath/ath11k/dp_rx.c
4748
(void *)ar->ab->dp.link_desc_banks[sw_cookie].vaddr +
drivers/net/wireless/ath/ath11k/dp_rx.c
5385
(u8 *)pmon->link_desc_banks[sw_cookie].vaddr +
drivers/net/wireless/ath/ath11k/hal.c
1052
srng->u.src_ring.tp_addr = (void *)(hal->rdp.vaddr + ring_id);
drivers/net/wireless/ath/ath11k/hal.c
1057
srng->u.src_ring.hp_addr = (void *)(hal->wrp.vaddr +
drivers/net/wireless/ath/ath11k/hal.c
1084
srng->u.dst_ring.hp_addr = (void *)(hal->rdp.vaddr + ring_id);
drivers/net/wireless/ath/ath11k/hal.c
1090
srng->u.dst_ring.tp_addr = (void *)(hal->wrp.vaddr +
drivers/net/wireless/ath/ath11k/hal.c
200
hal->rdp.vaddr = dma_alloc_coherent(ab->dev, size, &hal->rdp.paddr,
drivers/net/wireless/ath/ath11k/hal.c
202
if (!hal->rdp.vaddr)
drivers/net/wireless/ath/ath11k/hal.c
213
if (!hal->rdp.vaddr)
drivers/net/wireless/ath/ath11k/hal.c
218
hal->rdp.vaddr, hal->rdp.paddr);
drivers/net/wireless/ath/ath11k/hal.c
219
hal->rdp.vaddr = NULL;
drivers/net/wireless/ath/ath11k/hal.c
228
hal->wrp.vaddr = dma_alloc_coherent(ab->dev, size, &hal->wrp.paddr,
drivers/net/wireless/ath/ath11k/hal.c
230
if (!hal->wrp.vaddr)
drivers/net/wireless/ath/ath11k/hal.c
241
if (!hal->wrp.vaddr)
drivers/net/wireless/ath/ath11k/hal.c
246
hal->wrp.vaddr, hal->wrp.paddr);
drivers/net/wireless/ath/ath11k/hal.c
247
hal->wrp.vaddr = NULL;
drivers/net/wireless/ath/ath11k/hal.c
322
(unsigned long)hal->rdp.vaddr);
drivers/net/wireless/ath/ath11k/hal.c
424
(unsigned long)hal->rdp.vaddr);
drivers/net/wireless/ath/ath11k/hal.c
535
(unsigned long)ab->hal.wrp.vaddr);
drivers/net/wireless/ath/ath11k/hal.c
539
(unsigned long)ab->hal.rdp.vaddr);
drivers/net/wireless/ath/ath11k/hal.c
551
(unsigned long)ab->hal.rdp.vaddr);
drivers/net/wireless/ath/ath11k/hal.c
555
(unsigned long)ab->hal.wrp.vaddr);
drivers/net/wireless/ath/ath11k/hal.c
915
link_addr = (void *)sbuf[0].vaddr + HAL_WBM_IDLE_SCATTER_BUF_SIZE;
drivers/net/wireless/ath/ath11k/hal.c
926
link_addr = (void *)sbuf[i].vaddr +
drivers/net/wireless/ath/ath11k/hal.h
495
struct hal_wbm_link_desc *vaddr;
drivers/net/wireless/ath/ath11k/hal.h
897
u32 *vaddr;
drivers/net/wireless/ath/ath11k/hal.h
903
u32 *vaddr;
drivers/net/wireless/ath/ath11k/hal.h
920
void ath11k_hal_reo_qdesc_setup(void *vaddr, int tid, u32 ba_window_size,
drivers/net/wireless/ath/ath11k/hal_rx.c
701
void ath11k_hal_reo_qdesc_setup(void *vaddr, int tid, u32 ba_window_size,
drivers/net/wireless/ath/ath11k/hal_rx.c
704
struct hal_rx_reo_queue *qdesc = vaddr;
drivers/net/wireless/ath/ath11k/qmi.c
1974
ab->qmi.target_mem[i].vaddr,
drivers/net/wireless/ath/ath11k/qmi.c
1976
ab->qmi.target_mem[i].vaddr = NULL;
drivers/net/wireless/ath/ath11k/qmi.c
1993
if (chunk->vaddr) {
drivers/net/wireless/ath/ath11k/qmi.c
2009
chunk->vaddr, chunk->paddr);
drivers/net/wireless/ath/ath11k/qmi.c
2010
chunk->vaddr = NULL;
drivers/net/wireless/ath/ath11k/qmi.c
2013
chunk->vaddr = dma_alloc_coherent(ab->dev,
drivers/net/wireless/ath/ath11k/qmi.c
2017
if (!chunk->vaddr) {
drivers/net/wireless/ath/ath11k/qmi.c
2521
if (m3_mem->vaddr)
drivers/net/wireless/ath/ath11k/qmi.c
2546
m3_mem->vaddr = dma_alloc_coherent(ab->dev,
drivers/net/wireless/ath/ath11k/qmi.c
2549
if (!m3_mem->vaddr) {
drivers/net/wireless/ath/ath11k/qmi.c
2556
memcpy(m3_mem->vaddr, m3_data, m3_len);
drivers/net/wireless/ath/ath11k/qmi.c
2571
if (!ab->hw_params.m3_fw_support || !m3_mem->vaddr)
drivers/net/wireless/ath/ath11k/qmi.c
2575
m3_mem->vaddr, m3_mem->paddr);
drivers/net/wireless/ath/ath11k/qmi.c
2576
m3_mem->vaddr = NULL;
drivers/net/wireless/ath/ath11k/qmi.h
106
u32 *vaddr;
drivers/net/wireless/ath/ath11k/qmi.h
127
void *vaddr;
drivers/net/wireless/ath/ath11k/wmi.h
2299
void *vaddr;
drivers/net/wireless/ath/ath11k/wmi.h
5324
void *vaddr;
drivers/net/wireless/ath/ath12k/dp.c
1082
if (!dp->spt_info[i].vaddr)
drivers/net/wireless/ath/ath12k/dp.c
1086
dp->spt_info[i].vaddr, dp->spt_info[i].paddr);
drivers/net/wireless/ath/ath12k/dp.c
1087
dp->spt_info[i].vaddr = NULL;
drivers/net/wireless/ath/ath12k/dp.c
1155
return dp->spt_info[ppt_idx].vaddr + spt_idx;
drivers/net/wireless/ath/ath12k/dp.c
1375
dp->spt_info[i].vaddr = dma_alloc_coherent(ab->dev,
drivers/net/wireless/ath/ath12k/dp.c
1380
if (!dp->spt_info[i].vaddr) {
drivers/net/wireless/ath/ath12k/dp.c
1426
lut->vaddr = PTR_ALIGN(lut->vaddr_unaligned, HAL_REO_QLUT_ADDR_ALIGN);
drivers/net/wireless/ath/ath12k/dp.c
1428
((unsigned long)lut->vaddr - (unsigned long)lut->vaddr_unaligned);
drivers/net/wireless/ath/ath12k/dp.c
257
ring->vaddr = PTR_ALIGN(ring->vaddr_unaligned, HAL_RING_BASE_ALIGN);
drivers/net/wireless/ath/ath12k/dp.c
258
ring->paddr = ring->paddr_unaligned + ((unsigned long)ring->vaddr -
drivers/net/wireless/ath/ath12k/dp.c
261
params.ring_base_vaddr = ring->vaddr;
drivers/net/wireless/ath/ath12k/dp.c
555
if (!slist[i].vaddr)
drivers/net/wireless/ath/ath12k/dp.c
559
slist[i].vaddr, slist[i].paddr);
drivers/net/wireless/ath/ath12k/dp.c
560
slist[i].vaddr = NULL;
drivers/net/wireless/ath/ath12k/dp.c
592
slist[i].vaddr = dma_alloc_coherent(ab->dev,
drivers/net/wireless/ath/ath12k/dp.c
595
if (!slist[i].vaddr) {
drivers/net/wireless/ath/ath12k/dp.c
602
scatter_buf = slist[scatter_idx].vaddr;
drivers/net/wireless/ath/ath12k/dp.c
606
align_bytes = link_desc_banks[i].vaddr -
drivers/net/wireless/ath/ath12k/dp.c
625
scatter_buf = slist[scatter_idx].vaddr;
drivers/net/wireless/ath/ath12k/dp.c
629
end_offset = (scatter_buf - slist[scatter_idx].vaddr) *
drivers/net/wireless/ath/ath12k/dp.c
682
desc_bank[i].vaddr = PTR_ALIGN(desc_bank[i].vaddr_unaligned,
drivers/net/wireless/ath/ath12k/dp.c
685
((unsigned long)desc_bank[i].vaddr -
drivers/net/wireless/ath/ath12k/dp.c
812
align_bytes = link_desc_banks[i].vaddr -
drivers/net/wireless/ath/ath12k/dp.h
35
u32 *vaddr;
drivers/net/wireless/ath/ath12k/dp.h
360
u64 *vaddr;
drivers/net/wireless/ath/ath12k/dp.h
370
u32 *vaddr;
drivers/net/wireless/ath/ath12k/dp.h
93
void *vaddr;
drivers/net/wireless/ath/ath12k/dp_rx.c
361
if (tid_qbuf->vaddr) {
drivers/net/wireless/ath/ath12k/dp_rx.c
364
kfree(tid_qbuf->vaddr);
drivers/net/wireless/ath/ath12k/dp_rx.c
365
tid_qbuf->vaddr = NULL;
drivers/net/wireless/ath/ath12k/dp_rx.c
613
(!dp->reoq_lut.vaddr || !dp->ml_reoq_lut.vaddr)) {
drivers/net/wireless/ath/ath12k/dp_rx.h
16
void *vaddr;
drivers/net/wireless/ath/ath12k/hal.c
159
hal->rdp.vaddr = dma_alloc_coherent(hal->dev, size, &hal->rdp.paddr,
drivers/net/wireless/ath/ath12k/hal.c
161
if (!hal->rdp.vaddr)
drivers/net/wireless/ath/ath12k/hal.c
171
if (!hal->rdp.vaddr)
drivers/net/wireless/ath/ath12k/hal.c
176
hal->rdp.vaddr, hal->rdp.paddr);
drivers/net/wireless/ath/ath12k/hal.c
177
hal->rdp.vaddr = NULL;
drivers/net/wireless/ath/ath12k/hal.c
185
hal->wrp.vaddr = dma_alloc_coherent(hal->dev, size, &hal->wrp.paddr,
drivers/net/wireless/ath/ath12k/hal.c
187
if (!hal->wrp.vaddr)
drivers/net/wireless/ath/ath12k/hal.c
197
if (!hal->wrp.vaddr)
drivers/net/wireless/ath/ath12k/hal.c
202
hal->wrp.vaddr, hal->wrp.paddr);
drivers/net/wireless/ath/ath12k/hal.c
203
hal->wrp.vaddr = NULL;
drivers/net/wireless/ath/ath12k/hal.c
267
(unsigned long)ab->hal.wrp.vaddr);
drivers/net/wireless/ath/ath12k/hal.c
271
(unsigned long)ab->hal.rdp.vaddr);
drivers/net/wireless/ath/ath12k/hal.c
283
(unsigned long)ab->hal.rdp.vaddr);
drivers/net/wireless/ath/ath12k/hal.c
287
(unsigned long)ab->hal.wrp.vaddr);
drivers/net/wireless/ath/ath12k/hal.c
623
srng->u.src_ring.tp_addr = (void *)(hal->rdp.vaddr + ring_id);
drivers/net/wireless/ath/ath12k/hal.c
630
srng->u.src_ring.hp_addr = (void *)(hal->wrp.vaddr +
drivers/net/wireless/ath/ath12k/hal.c
646
srng->u.dst_ring.hp_addr = (void *)(hal->rdp.vaddr + ring_id);
drivers/net/wireless/ath/ath12k/hal.c
654
srng->u.dst_ring.tp_addr = (void *)(hal->wrp.vaddr +
drivers/net/wireless/ath/ath12k/hal.h
1221
u32 *vaddr;
drivers/net/wireless/ath/ath12k/hal.h
1227
u32 *vaddr;
drivers/net/wireless/ath/ath12k/hal.h
477
struct hal_wbm_link_desc *vaddr;
drivers/net/wireless/ath/ath12k/qmi.c
3156
if (!m3_mem->vaddr)
drivers/net/wireless/ath/ath12k/qmi.c
3160
m3_mem->vaddr, m3_mem->paddr);
drivers/net/wireless/ath/ath12k/qmi.c
3161
m3_mem->vaddr = NULL;
drivers/net/wireless/ath/ath12k/qmi.c
3197
if (m3_mem->vaddr) {
drivers/net/wireless/ath/ath12k/qmi.c
3205
m3_mem->vaddr = dma_alloc_coherent(ab->dev,
drivers/net/wireless/ath/ath12k/qmi.c
3208
if (!m3_mem->vaddr) {
drivers/net/wireless/ath/ath12k/qmi.c
3218
memcpy(m3_mem->vaddr, m3_data, m3_len);
drivers/net/wireless/ath/ath12k/qmi.c
3285
if (!aux_uc_mem->vaddr)
drivers/net/wireless/ath/ath12k/qmi.c
3289
aux_uc_mem->vaddr, aux_uc_mem->paddr);
drivers/net/wireless/ath/ath12k/qmi.c
3290
aux_uc_mem->vaddr = NULL;
drivers/net/wireless/ath/ath12k/qmi.c
3327
if (aux_uc_mem->vaddr) {
drivers/net/wireless/ath/ath12k/qmi.c
3335
aux_uc_mem->vaddr = dma_alloc_coherent(ab->dev, aux_uc_len,
drivers/net/wireless/ath/ath12k/qmi.c
3337
if (!aux_uc_mem->vaddr) {
drivers/net/wireless/ath/ath12k/qmi.c
3345
memcpy(aux_uc_mem->vaddr, aux_uc_data, aux_uc_len);
drivers/net/wireless/ath/ath12k/qmi.h
128
void *vaddr;
drivers/net/wireless/ath/ath12k/wifi7/dp_mon.c
2644
dp->link_desc_banks[desc_bank].vaddr +
drivers/net/wireless/ath/ath12k/wifi7/dp_rx.c
1473
link_desc_va = link_desc_banks[desc_bank].vaddr +
drivers/net/wireless/ath/ath12k/wifi7/dp_rx.c
258
void *vaddr, *vaddr_aligned;
drivers/net/wireless/ath/ath12k/wifi7/dp_rx.c
265
if (!buf->vaddr) {
drivers/net/wireless/ath/ath12k/wifi7/dp_rx.c
275
vaddr = kzalloc(hw_desc_sz + HAL_LINK_DESC_ALIGN - 1, GFP_ATOMIC);
drivers/net/wireless/ath/ath12k/wifi7/dp_rx.c
276
if (!vaddr)
drivers/net/wireless/ath/ath12k/wifi7/dp_rx.c
279
vaddr_aligned = PTR_ALIGN(vaddr, HAL_LINK_DESC_ALIGN);
drivers/net/wireless/ath/ath12k/wifi7/dp_rx.c
288
kfree(vaddr);
drivers/net/wireless/ath/ath12k/wifi7/dp_rx.c
292
buf->vaddr = vaddr;
drivers/net/wireless/ath/ath12k/wifi7/dp_rx.c
53
qref = (struct ath12k_reo_queue_ref *)dp->ml_reoq_lut.vaddr +
drivers/net/wireless/ath/ath12k/wifi7/dp_rx.c
56
qref = (struct ath12k_reo_queue_ref *)dp->reoq_lut.vaddr +
drivers/net/wireless/ath/ath12k/wifi7/dp_rx.c
84
qref = (struct ath12k_reo_queue_ref *)dp->ml_reoq_lut.vaddr +
drivers/net/wireless/ath/ath12k/wifi7/dp_rx.c
858
msdu_link = (struct hal_rx_msdu_link *)(link_desc_banks[desc_bank].vaddr +
drivers/net/wireless/ath/ath12k/wifi7/dp_rx.c
87
qref = (struct ath12k_reo_queue_ref *)dp->reoq_lut.vaddr +
drivers/net/wireless/ath/ath12k/wifi7/hal.c
192
(unsigned long)hal->rdp.vaddr);
drivers/net/wireless/ath/ath12k/wifi7/hal.c
280
(unsigned long)hal->rdp.vaddr);
drivers/net/wireless/ath/ath12k/wifi7/hal.c
505
link_addr = (void *)sbuf[0].vaddr + HAL_WBM_IDLE_SCATTER_BUF_SIZE;
drivers/net/wireless/ath/ath12k/wifi7/hal.c
516
link_addr = (void *)sbuf[i].vaddr +
drivers/net/wireless/ath/ath12k/wmi.h
2414
void *vaddr;
drivers/net/wireless/ath/ath6kl/target.h
333
#define AR6003_VTOP(vaddr) ((vaddr) & 0x001fffff)
drivers/net/wireless/ath/ath6kl/target.h
334
#define AR6004_VTOP(vaddr) (vaddr)
drivers/net/wireless/ath/ath6kl/target.h
336
#define TARG_VTOP(target_type, vaddr) \
drivers/net/wireless/ath/ath6kl/target.h
337
(((target_type) == TARGET_TYPE_AR6003) ? AR6003_VTOP(vaddr) : \
drivers/net/wireless/ath/ath6kl/target.h
338
(((target_type) == TARGET_TYPE_AR6004) ? AR6004_VTOP(vaddr) : 0))
drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c
213
void __iomem *vaddr;
drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c
224
vaddr = pcim_iomap_table(pdev)[index];
drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c
225
if (!vaddr)
drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c
229
index, vaddr, &busaddr, (int)len);
drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c
231
return vaddr;
drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c
67
struct sk_buff **vaddr;
drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c
72
vaddr = devm_kzalloc(&priv->pdev->dev, len, GFP_KERNEL);
drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c
74
if (!vaddr)
drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c
77
priv->tx_skb = vaddr;
drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c
79
vaddr += priv->tx_bd_num;
drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c
80
priv->rx_skb = vaddr;
drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c
236
void *vaddr;
drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c
242
vaddr = dmam_alloc_coherent(&priv->pdev->dev, len, &paddr, GFP_KERNEL);
drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c
243
if (!vaddr)
drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c
248
ps->bd_table_vaddr = vaddr;
drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c
252
ps->tx_bd_vbase = vaddr;
drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c
255
pr_debug("TX descriptor table: vaddr=0x%p paddr=%pad\n", vaddr, &paddr);
drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c
262
vaddr = ((struct qtnf_pearl_tx_bd *)vaddr) + priv->tx_bd_num;
drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c
265
ps->rx_bd_vbase = vaddr;
drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c
277
pr_debug("RX descriptor table: vaddr=0x%p paddr=%pad\n", vaddr, &paddr);
drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c
189
void *vaddr;
drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c
199
vaddr = dmam_alloc_coherent(&priv->pdev->dev, len, &paddr, GFP_KERNEL);
drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c
200
if (!vaddr)
drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c
205
ts->tx_bd_vbase = vaddr;
drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c
211
pr_debug("TX descriptor table: vaddr=0x%p paddr=%pad\n", vaddr, &paddr);
drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c
218
vaddr = ((struct qtnf_topaz_tx_bd *)vaddr) + priv->tx_bd_num;
drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c
221
ts->rx_bd_vbase = vaddr;
drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c
224
pr_debug("RX descriptor table: vaddr=0x%p paddr=%pad\n", vaddr, &paddr);
drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c
228
vaddr = ((struct qtnf_topaz_rx_bd *)vaddr) + priv->rx_bd_num;
drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c
231
extra_params = (struct qtnf_extra_bd_params __iomem *)vaddr;
drivers/net/wireless/realtek/rtw89/pci.c
1527
txwp_info = txwd->vaddr + txwd_len;
drivers/net/wireless/realtek/rtw89/pci.c
1534
txaddr_info_addr = txwd->vaddr + txwd_len + txwp_len;
drivers/net/wireless/realtek/rtw89/pci.c
1541
rtw89_chip_fill_txdesc(rtwdev, desc_info, txwd->vaddr);
drivers/net/wireless/realtek/rtw89/pci.c
3561
txwd->vaddr = cur_vaddr;
drivers/net/wireless/realtek/rtw89/pci.h
1537
void *vaddr;
drivers/net/wireless/realtek/rtw89/pci.h
1714
memset(txwd->vaddr, 0, wd_ring->page_size);
drivers/nvme/host/tcp.c
469
const void *vaddr = kmap_local_page(page);
drivers/nvme/host/tcp.c
472
*crcp = crc32c(*crcp, vaddr + off, n);
drivers/nvme/host/tcp.c
473
kunmap_local(vaddr);
drivers/parisc/iosapic.c
895
void *iosapic_register(unsigned long hpa, void __iomem *vaddr)
drivers/parisc/iosapic.c
924
isi->addr = vaddr;
drivers/parisc/sba_iommu.c
912
sba_free(struct device *hwdev, size_t size, void *vaddr,
drivers/parisc/sba_iommu.c
916
free_pages((unsigned long) vaddr, get_order(size));
drivers/pci/controller/dwc/pcie-designware.c
1165
ll->vaddr.mem = dmam_alloc_coherent(pci->dev, ll->sz,
drivers/pci/controller/dwc/pcie-designware.c
1167
if (!ll->vaddr.mem)
drivers/pci/controller/dwc/pcie-designware.c
1176
ll->vaddr.mem = dmam_alloc_coherent(pci->dev, ll->sz,
drivers/pci/controller/dwc/pcie-designware.c
1178
if (!ll->vaddr.mem)
drivers/pci/endpoint/functions/pci-epf-mhi.c
167
phys_addr_t *paddr, void __iomem **vaddr,
drivers/pci/endpoint/functions/pci-epf-mhi.c
175
*vaddr = pci_epc_mem_alloc_addr(epc, paddr, size + offset);
drivers/pci/endpoint/functions/pci-epf-mhi.c
176
if (!*vaddr)
drivers/pci/endpoint/functions/pci-epf-mhi.c
182
pci_epc_mem_free_addr(epc, *paddr, *vaddr, size + offset);
drivers/pci/endpoint/functions/pci-epf-mhi.c
187
*vaddr = *vaddr + offset;
drivers/pci/endpoint/functions/pci-epf-mhi.c
193
phys_addr_t *paddr, void __iomem **vaddr,
drivers/pci/endpoint/functions/pci-epf-mhi.c
199
return __pci_epf_mhi_alloc_map(mhi_cntrl, pci_addr, paddr, vaddr,
drivers/pci/endpoint/functions/pci-epf-mhi.c
205
void __iomem *vaddr, size_t offset,
drivers/pci/endpoint/functions/pci-epf-mhi.c
213
pci_epc_mem_free_addr(epc, paddr - offset, vaddr - offset,
drivers/pci/endpoint/functions/pci-epf-mhi.c
218
phys_addr_t paddr, void __iomem *vaddr,
drivers/pci/endpoint/functions/pci-epf-mhi.c
224
__pci_epf_mhi_unmap_free(mhi_cntrl, pci_addr, paddr, vaddr, offset,
drivers/pci/p2pdma.c
142
for (vaddr = vma->vm_start; vaddr < vma->vm_end; vaddr += PAGE_SIZE) {
drivers/pci/p2pdma.c
152
ret = vm_insert_page(vma, vaddr, page);
drivers/pci/p2pdma.c
99
unsigned long vaddr;
drivers/pci/pci.c
4040
unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
drivers/pci/pci.c
4048
return vmap_page_range(vaddr, vaddr + resource_size(res), phys_addr,
drivers/pci/pci.c
4073
unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
drivers/pci/pci.c
4075
vunmap_range(vaddr, vaddr + resource_size(res));
drivers/scsi/aic7xxx/aic79xx.h
543
uint8_t *vaddr;
drivers/scsi/aic7xxx/aic79xx_core.c
523
/*offset*/(uint8_t*)scb->hscb - scb->hscb_map->vaddr,
drivers/scsi/aic7xxx/aic79xx_core.c
6510
sns_map->vaddr, sns_map->dmamap);
drivers/scsi/aic7xxx/aic79xx_core.c
6525
sg_map->vaddr, sg_map->dmamap);
drivers/scsi/aic7xxx/aic79xx_core.c
6540
hscb_map->vaddr, hscb_map->dmamap);
drivers/scsi/aic7xxx/aic79xx_core.c
6777
hscb = &((struct hardware_scb *)hscb_map->vaddr)[offset];
drivers/scsi/aic7xxx/aic79xx_core.c
6787
(void **)&hscb_map->vaddr,
drivers/scsi/aic7xxx/aic79xx_core.c
6796
hscb_map->vaddr, PAGE_SIZE, ahd_dmamap_cb,
drivers/scsi/aic7xxx/aic79xx_core.c
6799
hscb = (struct hardware_scb *)hscb_map->vaddr;
drivers/scsi/aic7xxx/aic79xx_core.c
6810
segs = sg_map->vaddr + offset;
drivers/scsi/aic7xxx/aic79xx_core.c
6820
(void **)&sg_map->vaddr,
drivers/scsi/aic7xxx/aic79xx_core.c
6829
sg_map->vaddr, ahd_sglist_allocsize(ahd),
drivers/scsi/aic7xxx/aic79xx_core.c
6832
segs = sg_map->vaddr;
drivers/scsi/aic7xxx/aic79xx_core.c
6847
sense_data = sense_map->vaddr + offset;
drivers/scsi/aic7xxx/aic79xx_core.c
6857
(void **)&sense_map->vaddr,
drivers/scsi/aic7xxx/aic79xx_core.c
6866
sense_map->vaddr, PAGE_SIZE, ahd_dmamap_cb,
drivers/scsi/aic7xxx/aic79xx_core.c
6869
sense_data = sense_map->vaddr;
drivers/scsi/aic7xxx/aic79xx_core.c
7060
(void **)&ahd->shared_data_map.vaddr,
drivers/scsi/aic7xxx/aic79xx_core.c
7070
ahd->shared_data_map.vaddr, driver_data_size,
drivers/scsi/aic7xxx/aic79xx_core.c
7073
ahd->qoutfifo = (struct ahd_completion *)ahd->shared_data_map.vaddr;
drivers/scsi/aic7xxx/aic79xx_osm.c
951
ahd_dmamem_alloc(struct ahd_softc *ahd, bus_dma_tag_t dmat, void** vaddr,
drivers/scsi/aic7xxx/aic79xx_osm.c
954
*vaddr = dma_alloc_coherent(&ahd->dev_softc->dev, dmat->maxsize, mapp,
drivers/scsi/aic7xxx/aic79xx_osm.c
956
if (*vaddr == NULL)
drivers/scsi/aic7xxx/aic79xx_osm.c
963
void* vaddr, bus_dmamap_t map)
drivers/scsi/aic7xxx/aic79xx_osm.c
965
dma_free_coherent(&ahd->dev_softc->dev, dmat->maxsize, vaddr, map);
drivers/scsi/aic7xxx/aic7xxx_osm.c
851
ahc_dmamem_alloc(struct ahc_softc *ahc, bus_dma_tag_t dmat, void** vaddr,
drivers/scsi/aic7xxx/aic7xxx_osm.c
855
*vaddr = dma_alloc_coherent(ahc->dev, dmat->maxsize, mapp, GFP_ATOMIC);
drivers/scsi/aic7xxx/aic7xxx_osm.c
856
if (*vaddr == NULL)
drivers/scsi/aic7xxx/aic7xxx_osm.c
863
void* vaddr, bus_dmamap_t map)
drivers/scsi/aic7xxx/aic7xxx_osm.c
865
dma_free_coherent(ahc->dev, dmat->maxsize, vaddr, map);
drivers/scsi/aic94xx/aic94xx_hwi.c
1038
ascb->dma_scb.vaddr = dma_pool_zalloc(asd_ha->scb_pool,
drivers/scsi/aic94xx/aic94xx_hwi.c
1041
if (!ascb->dma_scb.vaddr) {
drivers/scsi/aic94xx/aic94xx_hwi.c
1058
dma_pool_free(asd_ha->scb_pool, ascb->dma_scb.vaddr,
drivers/scsi/aic94xx/aic94xx_hwi.c
1134
memcpy(seq->next_scb.vaddr, ascb->scb, sizeof(*ascb->scb));
drivers/scsi/aic94xx/aic94xx_hwi.c
1136
ascb->scb = ascb->dma_scb.vaddr;
drivers/scsi/aic94xx/aic94xx_hwi.c
221
seq->next_scb.vaddr = dma_pool_alloc(asd_ha->scb_pool, GFP_KERNEL,
drivers/scsi/aic94xx/aic94xx_hwi.c
223
if (!seq->next_scb.vaddr) {
drivers/scsi/aic94xx/aic94xx_hwi.c
259
asd_ha->seq.dl = asd_ha->seq.actual_dl->vaddr;
drivers/scsi/aic94xx/aic94xx_hwi.c
284
memset(seq->edb_arr[i]->vaddr, 0, ASD_EDB_SIZE);
drivers/scsi/aic94xx/aic94xx_hwi.c
55
phy->identify_frame = phy->id_frm_tok->vaddr;
drivers/scsi/aic94xx/aic94xx_hwi.h
248
token->vaddr = dma_alloc_coherent(&asd_ha->pcidev->dev,
drivers/scsi/aic94xx/aic94xx_hwi.h
252
if (!token->vaddr) {
drivers/scsi/aic94xx/aic94xx_hwi.h
265
token->vaddr, token->dma_handle);
drivers/scsi/aic94xx/aic94xx_hwi.h
274
ascb->scb = ascb->dma_scb.vaddr;
drivers/scsi/aic94xx/aic94xx_hwi.h
329
dma_pool_free(asd_ha->scb_pool, ascb->dma_scb.vaddr,
drivers/scsi/aic94xx/aic94xx_hwi.h
80
void *vaddr;
drivers/scsi/aic94xx/aic94xx_init.c
586
if (asd_ha->seq.next_scb.vaddr) {
drivers/scsi/aic94xx/aic94xx_init.c
587
dma_pool_free(asd_ha->scb_pool, asd_ha->seq.next_scb.vaddr,
drivers/scsi/aic94xx/aic94xx_init.c
589
asd_ha->seq.next_scb.vaddr = NULL;
drivers/scsi/aic94xx/aic94xx_scb.c
231
memcpy(phy->sas_phy.frame_rcvd, edb->vaddr, size);
drivers/scsi/aic94xx/aic94xx_scb.c
373
memset(edb->vaddr, 0, ASD_EDB_SIZE);
drivers/scsi/aic94xx/aic94xx_scb.c
418
ascb->dma_scb.vaddr,
drivers/scsi/aic94xx/aic94xx_scb.c
584
ascb->dma_scb.vaddr,
drivers/scsi/aic94xx/aic94xx_seq.c
309
memcpy(token->vaddr, prog + page*MAX_DMA_OVLY_COUNT, left);
drivers/scsi/aic94xx/aic94xx_task.c
174
r = edb->vaddr;
drivers/scsi/aic94xx/aic94xx_task.c
84
&((struct sg_el *)ascb->sg_arr->vaddr)[i];
drivers/scsi/aic94xx/aic94xx_tmf.c
286
ascb->tag = *(__be16 *)(edb->vaddr+4);
drivers/scsi/aic94xx/aic94xx_tmf.c
287
fh = edb->vaddr + 16;
drivers/scsi/aic94xx/aic94xx_tmf.c
288
ru = edb->vaddr + 16 + sizeof(*fh);
drivers/scsi/csiostor/csio_hw.c
3856
fl_sg->flbufs[n].vaddr,
drivers/scsi/csiostor/csio_lnode.c
1774
csio_wr_copy_to_wrp(pld->vaddr, &wrp, wr_off, im_len);
drivers/scsi/csiostor/csio_lnode.c
1850
dma_buf->vaddr = dma_alloc_coherent(&hw->pdev->dev, dma_buf->len,
drivers/scsi/csiostor/csio_lnode.c
1852
if (!dma_buf->vaddr) {
drivers/scsi/csiostor/csio_lnode.c
1877
if (dma_buf->vaddr)
drivers/scsi/csiostor/csio_lnode.c
1878
dma_free_coherent(&hw->pdev->dev, dma_buf->len, dma_buf->vaddr,
drivers/scsi/csiostor/csio_lnode.c
278
cmd = fdmi_req->dma_buf.vaddr;
drivers/scsi/csiostor/csio_lnode.c
312
cmd = fdmi_req->dma_buf.vaddr;
drivers/scsi/csiostor/csio_lnode.c
428
cmd = fdmi_req->dma_buf.vaddr;
drivers/scsi/csiostor/csio_lnode.c
529
cmd = fdmi_req->dma_buf.vaddr;
drivers/scsi/csiostor/csio_lnode.c
588
cmd = fdmi_req->dma_buf.vaddr;
drivers/scsi/csiostor/csio_scsi.c
1525
buf_addr = dma_buf->vaddr + buf_off;
drivers/scsi/csiostor/csio_scsi.c
1586
fcp_resp = (struct fcp_resp_with_ext *)dma_buf->vaddr;
drivers/scsi/csiostor/csio_scsi.c
2044
fcp_resp = (struct fcp_resp_with_ext *)dma_buf->vaddr;
drivers/scsi/csiostor/csio_scsi.c
2354
ddp_desc->vaddr = dma_alloc_coherent(&hw->pdev->dev, unit_size,
drivers/scsi/csiostor/csio_scsi.c
2356
if (!ddp_desc->vaddr) {
drivers/scsi/csiostor/csio_scsi.c
2378
ddp_desc->vaddr, ddp_desc->paddr);
drivers/scsi/csiostor/csio_scsi.c
2405
ddp_desc->vaddr, ddp_desc->paddr);
drivers/scsi/csiostor/csio_scsi.c
2450
dma_buf->vaddr = dma_pool_alloc(hw->scsi_dma_pool, GFP_KERNEL,
drivers/scsi/csiostor/csio_scsi.c
2452
if (!dma_buf->vaddr) {
drivers/scsi/csiostor/csio_scsi.c
2490
dma_pool_free(hw->scsi_dma_pool, dma_buf->vaddr,
drivers/scsi/csiostor/csio_scsi.c
2521
dma_pool_free(scm->hw->scsi_dma_pool, dma_buf->vaddr,
drivers/scsi/csiostor/csio_wr.c
1077
fbuf->vaddr = buf->vaddr;
drivers/scsi/csiostor/csio_wr.c
127
buf->vaddr = dma_alloc_coherent(&hw->pdev->dev, buf->len,
drivers/scsi/csiostor/csio_wr.c
129
if (!buf->vaddr) {
drivers/scsi/csiostor/csio_wr.c
1701
if (!buf->vaddr)
drivers/scsi/csiostor/csio_wr.c
1704
buf->len, buf->vaddr,
drivers/scsi/csiostor/csio_wr.h
233
void *vaddr; /* Virtual address */
drivers/scsi/fnic/fnic_main.c
565
if (fnic->bar0.vaddr)
drivers/scsi/fnic/fnic_main.c
566
iounmap(fnic->bar0.vaddr);
drivers/scsi/fnic/fnic_main.c
788
fnic->bar0.vaddr = pci_iomap(pdev, 0, 0);
drivers/scsi/fnic/fnic_main.c
792
if (!fnic->bar0.vaddr) {
drivers/scsi/fnic/vnic_dev.c
143
vdev->res[type].vaddr = (char __iomem *)bar->vaddr + bar_offset;
drivers/scsi/fnic/vnic_dev.c
162
if (!vdev->res[type].vaddr)
drivers/scsi/fnic/vnic_dev.c
170
return (char __iomem *)vdev->res[type].vaddr +
drivers/scsi/fnic/vnic_dev.c
173
return (char __iomem *)vdev->res[type].vaddr;
drivers/scsi/fnic/vnic_dev.c
38
void __iomem *vaddr;
drivers/scsi/fnic/vnic_dev.c
87
rh = bar->vaddr;
drivers/scsi/fnic/vnic_dev.h
79
void __iomem *vaddr;
drivers/scsi/hpsa.c
1161
writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
drivers/scsi/hpsa.c
1165
writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32);
drivers/scsi/hpsa.c
1169
writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32);
drivers/scsi/hpsa.c
303
static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
drivers/scsi/hpsa.c
313
static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
drivers/scsi/hpsa.c
7087
void __iomem *vaddr;
drivers/scsi/hpsa.c
7090
vaddr = pci_ioremap_bar(pdev, 0);
drivers/scsi/hpsa.c
7091
if (vaddr == NULL)
drivers/scsi/hpsa.c
7100
iounmap(vaddr);
drivers/scsi/hpsa.c
7106
iounmap(vaddr);
drivers/scsi/hpsa.c
7133
writel(le32_to_cpu(paddr32), vaddr + SA5_REQUEST_PORT_OFFSET);
drivers/scsi/hpsa.c
7136
tag = readl(vaddr + SA5_REPLY_PORT_OFFSET);
drivers/scsi/hpsa.c
7142
iounmap(vaddr);
drivers/scsi/hpsa.c
7169
void __iomem *vaddr, u32 use_doorbell)
drivers/scsi/hpsa.c
7178
writel(use_doorbell, vaddr + SA5_DOORBELL);
drivers/scsi/hpsa.c
7280
void __iomem *vaddr;
drivers/scsi/hpsa.c
7318
vaddr = remap_pci_mem(paddr, 0x250);
drivers/scsi/hpsa.c
7319
if (!vaddr)
drivers/scsi/hpsa.c
7323
rc = hpsa_find_cfg_addrs(pdev, vaddr, &cfg_base_addr,
drivers/scsi/hpsa.c
7354
rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell);
drivers/scsi/hpsa.c
7365
rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_READY);
drivers/scsi/hpsa.c
7372
rc = controller_reset_failed(vaddr);
drivers/scsi/hpsa.c
7387
iounmap(vaddr);
drivers/scsi/hpsa.c
7573
static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
drivers/scsi/hpsa.c
7584
scratchpad = readl(vaddr + SA5_SCRATCHPAD_OFFSET);
drivers/scsi/hpsa.c
7598
static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
drivers/scsi/hpsa.c
7602
*cfg_base_addr = readl(vaddr + SA5_CTCFG_OFFSET);
drivers/scsi/hpsa.c
7603
*cfg_offset = readl(vaddr + SA5_CTMEM_OFFSET);
drivers/scsi/hpsa.c
7636
rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
drivers/scsi/hpsa.c
7758
dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG);
drivers/scsi/hpsa.c
7760
writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG);
drivers/scsi/hpsa.c
7771
doorbell_value = readl(h->vaddr + SA5_DOORBELL);
drivers/scsi/hpsa.c
7797
doorbell_value = readl(h->vaddr + SA5_DOORBELL);
drivers/scsi/hpsa.c
7823
writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
drivers/scsi/hpsa.c
7840
iounmap(h->vaddr); /* pci_init 3 */
drivers/scsi/hpsa.c
7841
h->vaddr = NULL;
drivers/scsi/hpsa.c
7893
h->vaddr = remap_pci_mem(h->paddr, 0x250);
drivers/scsi/hpsa.c
7894
if (!h->vaddr) {
drivers/scsi/hpsa.c
7899
err = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
drivers/scsi/hpsa.c
7921
iounmap(h->vaddr);
drivers/scsi/hpsa.c
7922
h->vaddr = NULL;
drivers/scsi/hpsa.c
7954
void __iomem *vaddr;
drivers/scsi/hpsa.c
7978
vaddr = pci_ioremap_bar(pdev, 0);
drivers/scsi/hpsa.c
7979
if (vaddr == NULL) {
drivers/scsi/hpsa.c
7983
writel(SA5_INTR_OFF, vaddr + SA5_REPLY_INTR_MASK_OFFSET);
drivers/scsi/hpsa.c
7984
iounmap(vaddr);
drivers/scsi/hpsa.c
8155
rc = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_NOT_READY);
drivers/scsi/hpsa.c
8162
rc = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
drivers/scsi/hpsa.c
8258
lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
drivers/scsi/hpsa.c
8272
writel(DOORBELL_GENERATE_CHKPT, h->vaddr + SA5_DOORBELL);
drivers/scsi/hpsa.c
8423
writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL);
drivers/scsi/hpsa.c
8430
writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL);
drivers/scsi/hpsa.c
9247
writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
drivers/scsi/hpsa.c
9270
writel(i, h->vaddr + IOACCEL_MODE1_REPLY_QUEUE_INDEX);
drivers/scsi/hpsa.c
9272
readl(h->vaddr + IOACCEL_MODE1_PRODUCER_INDEX);
drivers/scsi/hpsa.c
9309
hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
drivers/scsi/hpsa.c
9327
writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
drivers/scsi/hpsa.h
172
void __iomem *vaddr;
drivers/scsi/hpsa.h
423
writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
drivers/scsi/hpsa.h
424
(void) readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
drivers/scsi/hpsa.h
430
writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
drivers/scsi/hpsa.h
436
writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
drivers/scsi/hpsa.h
448
writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
drivers/scsi/hpsa.h
449
(void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
drivers/scsi/hpsa.h
453
h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
drivers/scsi/hpsa.h
454
(void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
drivers/scsi/hpsa.h
465
writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
drivers/scsi/hpsa.h
466
(void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
drivers/scsi/hpsa.h
470
h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
drivers/scsi/hpsa.h
471
(void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
drivers/scsi/hpsa.h
479
writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
drivers/scsi/hpsa.h
480
(void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
drivers/scsi/hpsa.h
484
h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
drivers/scsi/hpsa.h
485
(void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
drivers/scsi/hpsa.h
499
(void) readl(h->vaddr + SA5_OUTDB_STATUS);
drivers/scsi/hpsa.h
500
writel(SA5_OUTDB_CLEAR_PERF_BIT, h->vaddr + SA5_OUTDB_CLEAR);
drivers/scsi/hpsa.h
504
(void) readl(h->vaddr + SA5_OUTDB_STATUS);
drivers/scsi/hpsa.h
530
= readl(h->vaddr + SA5_REPLY_PORT_OFFSET);
drivers/scsi/hpsa.h
551
readl(h->vaddr + SA5_INTR_STATUS);
drivers/scsi/hpsa.h
557
unsigned long register_value = readl(h->vaddr + SA5_INTR_STATUS);
drivers/scsi/hpsa.h
563
register_value = readl(h->vaddr + SA5_OUTDB_STATUS);
drivers/scsi/hpsa.h
571
unsigned long register_value = readl(h->vaddr + SA5_INTR_STATUS);
drivers/scsi/hpsa.h
582
return readl(h->vaddr + SA5_INTR_STATUS) & SA5B_INTR_PENDING;
drivers/scsi/hpsa.h
609
writel((q << 24) | rq->current_entry, h->vaddr +
drivers/scsi/megaraid/mega_common.h
280
caddr_t vaddr;
drivers/scsi/megaraid/megaraid_ioctl.h
238
caddr_t vaddr;
drivers/scsi/megaraid/megaraid_mbox.c
1046
ccb->mbox = (mbox_t *)(mbox_pci_blk[i].vaddr + 16);
drivers/scsi/megaraid/megaraid_mbox.c
1048
ccb->mbox64 = (mbox64_t *)(mbox_pci_blk[i].vaddr + 8);
drivers/scsi/megaraid/megaraid_mbox.c
1060
epthru_pci_blk[i].vaddr;
drivers/scsi/megaraid/megaraid_mbox.c
1066
ccb->sgl64 = (mbox_sgl64 *)sg_pci_blk[i].vaddr;
drivers/scsi/megaraid/megaraid_mbox.c
1157
mbox_pci_blk[i].vaddr = dma_pool_alloc(
drivers/scsi/megaraid/megaraid_mbox.c
1161
if (!mbox_pci_blk[i].vaddr) {
drivers/scsi/megaraid/megaraid_mbox.c
1183
epthru_pci_blk[i].vaddr = dma_pool_alloc(
drivers/scsi/megaraid/megaraid_mbox.c
1187
if (!epthru_pci_blk[i].vaddr) {
drivers/scsi/megaraid/megaraid_mbox.c
1206
sg_pci_blk[i].vaddr = dma_pool_alloc(
drivers/scsi/megaraid/megaraid_mbox.c
1210
if (!sg_pci_blk[i].vaddr) {
drivers/scsi/megaraid/megaraid_mbox.c
1241
for (i = 0; i < MBOX_MAX_SCSI_CMDS && sg_pci_blk[i].vaddr; i++) {
drivers/scsi/megaraid/megaraid_mbox.c
1242
dma_pool_free(raid_dev->sg_pool_handle, sg_pci_blk[i].vaddr,
drivers/scsi/megaraid/megaraid_mbox.c
1249
for (i = 0; i < MBOX_MAX_SCSI_CMDS && epthru_pci_blk[i].vaddr; i++) {
drivers/scsi/megaraid/megaraid_mbox.c
1251
epthru_pci_blk[i].vaddr, epthru_pci_blk[i].dma_addr);
drivers/scsi/megaraid/megaraid_mbox.c
1257
for (i = 0; i < MBOX_MAX_SCSI_CMDS && mbox_pci_blk[i].vaddr; i++) {
drivers/scsi/megaraid/megaraid_mbox.c
1259
mbox_pci_blk[i].vaddr, mbox_pci_blk[i].dma_addr);
drivers/scsi/megaraid/megaraid_mbox.c
1544
caddr_t vaddr;
drivers/scsi/megaraid/megaraid_mbox.c
1548
vaddr = (caddr_t) sg_virt(&sgl[0]);
drivers/scsi/megaraid/megaraid_mbox.c
1550
memset(vaddr, 0, scp->cmnd[4]);
drivers/scsi/megaraid/megaraid_mm.c
1090
pool->vaddr = dma_pool_alloc(pool->handle, GFP_KERNEL,
drivers/scsi/megaraid/megaraid_mm.c
1093
if (!pool->vaddr)
drivers/scsi/megaraid/megaraid_mm.c
1190
if (pool->vaddr)
drivers/scsi/megaraid/megaraid_mm.c
1191
dma_pool_free(pool->handle, pool->vaddr,
drivers/scsi/megaraid/megaraid_mm.c
541
kioc->buf_vaddr = pool->vaddr;
drivers/scsi/ncr53c8xx.c
1645
void __iomem *vaddr; /* Virtual and bus address of */
drivers/scsi/ncr53c8xx.c
208
m_addr_t vaddr;
drivers/scsi/ncr53c8xx.c
389
vbp->vaddr = vp;
drivers/scsi/ncr53c8xx.c
408
while (*vbpp && (*vbpp)->vaddr != m)
drivers/scsi/ncr53c8xx.c
414
(void *)vbp->vaddr, (dma_addr_t)vbp->baddr);
drivers/scsi/ncr53c8xx.c
499
while (vp && (m_addr_t) vp->vaddr != a)
drivers/scsi/ncr53c8xx.c
8155
np->vaddr = device->slot.base_v;
drivers/scsi/ncr53c8xx.c
8157
np->vaddr = ioremap(device->slot.base_c, 128);
drivers/scsi/ncr53c8xx.c
8159
if (!np->vaddr) {
drivers/scsi/ncr53c8xx.c
8166
"%s: using memory mapped IO at virtual address 0x%lx\n", ncr_name(np), (u_long) np->vaddr);
drivers/scsi/ncr53c8xx.c
8173
np->reg = (struct ncr_reg __iomem *)np->vaddr;
drivers/scsi/snic/snic_main.c
260
if (snic->bar0.vaddr)
drivers/scsi/snic/snic_main.c
261
iounmap(snic->bar0.vaddr);
drivers/scsi/snic/snic_main.c
436
snic->bar0.vaddr = pci_iomap(pdev, 0, 0);
drivers/scsi/snic/snic_main.c
437
if (!snic->bar0.vaddr) {
drivers/scsi/snic/vnic_dev.c
112
if (!bar[bar_num].len || !bar[bar_num].vaddr)
drivers/scsi/snic/vnic_dev.c
143
vdev->res[type].vaddr = (char __iomem *)bar->vaddr + bar_offset;
drivers/scsi/snic/vnic_dev.c
158
if (!vdev->res[type].vaddr)
drivers/scsi/snic/vnic_dev.c
166
return (char __iomem *)vdev->res[type].vaddr +
drivers/scsi/snic/vnic_dev.c
170
return (char __iomem *)vdev->res[type].vaddr;
drivers/scsi/snic/vnic_dev.c
32
void __iomem *vaddr;
drivers/scsi/snic/vnic_dev.c
82
rh = bar->vaddr;
drivers/scsi/snic/vnic_dev.h
35
void __iomem *vaddr;
drivers/scsi/sun3_scsi.c
390
unsigned char *vaddr;
drivers/scsi/sun3_scsi.c
392
vaddr = (unsigned char *)dvma_vmetov(sun3_dma_orig_addr);
drivers/scsi/sun3_scsi.c
394
vaddr += (sun3_dma_orig_count - fifo);
drivers/scsi/sun3_scsi.c
395
vaddr--;
drivers/scsi/sun3_scsi.c
399
*vaddr = (dregs->bpack_lo & 0xff00) >> 8;
drivers/scsi/sun3_scsi.c
400
vaddr--;
drivers/scsi/sun3_scsi.c
404
*vaddr = (dregs->bpack_hi & 0x00ff);
drivers/scsi/sun3_scsi.c
405
vaddr--;
drivers/scsi/sun3_scsi.c
409
*vaddr = (dregs->bpack_hi & 0xff00) >> 8;
drivers/scsi/sun3_scsi.c
441
unsigned char *vaddr;
drivers/scsi/sun3_scsi.c
444
vaddr = (unsigned char *)dvma_btov(sun3_dma_orig_addr);
drivers/scsi/sun3_scsi.c
446
vaddr += (sun3_dma_orig_count - fifo);
drivers/scsi/sun3_scsi.c
448
vaddr[-2] = (data & 0xff00) >> 8;
drivers/scsi/sun3_scsi.c
449
vaddr[-1] = (data & 0xff);
drivers/scsi/sym53c8xx_2/sym_hipd.h
1130
void *vaddr; /* Virtual address */
drivers/scsi/sym53c8xx_2/sym_hipd.h
1195
void *vaddr = NULL;
drivers/scsi/sym53c8xx_2/sym_hipd.h
1198
vaddr = dma_alloc_coherent(mp->dev_dmat, SYM_MEM_CLUSTER_SIZE, &baddr,
drivers/scsi/sym53c8xx_2/sym_hipd.h
1200
if (vaddr) {
drivers/scsi/sym53c8xx_2/sym_hipd.h
1201
vbp->vaddr = vaddr;
drivers/scsi/sym53c8xx_2/sym_hipd.h
1204
return vaddr;
drivers/scsi/sym53c8xx_2/sym_hipd.h
1209
dma_free_coherent(mp->dev_dmat, SYM_MEM_CLUSTER_SIZE, vbp->vaddr,
drivers/scsi/sym53c8xx_2/sym_malloc.c
213
void *vaddr;
drivers/scsi/sym53c8xx_2/sym_malloc.c
219
vaddr = sym_m_get_dma_mem_cluster(mp, vbp);
drivers/scsi/sym53c8xx_2/sym_malloc.c
220
if (vaddr) {
drivers/scsi/sym53c8xx_2/sym_malloc.c
221
int hc = VTOB_HASH_CODE(vaddr);
drivers/scsi/sym53c8xx_2/sym_malloc.c
226
return vaddr;
drivers/scsi/sym53c8xx_2/sym_malloc.c
239
while (*vbpp && (*vbpp)->vaddr != m)
drivers/scsi/sym53c8xx_2/sym_malloc.c
357
while (vp && vp->vaddr != a)
drivers/soc/fsl/dpio/dpio-service.c
402
qbman_pull_desc_set_storage(&pd, s->vaddr, s->paddr, 1);
drivers/soc/fsl/dpio/dpio-service.c
433
qbman_pull_desc_set_storage(&pd, s->vaddr, s->paddr, 1);
drivers/soc/fsl/dpio/dpio-service.c
45
struct dpaa2_dq *vaddr;
drivers/soc/fsl/dpio/dpio-service.c
673
ret->vaddr = PTR_ALIGN(ret->alloced_addr, 64);
drivers/soc/fsl/dpio/dpio-service.c
674
ret->paddr = dma_map_single(dev, ret->vaddr,
drivers/soc/fsl/dpio/dpio-service.c
724
struct dpaa2_dq *ret = &s->vaddr[s->idx];
drivers/soc/fsl/dpio/dpio-service.c
745
prefetch(&s->vaddr[s->idx]);
drivers/spi/spi-orion.c
473
void __iomem *vaddr;
drivers/spi/spi-orion.c
485
vaddr = orion_spi->child[cs].direct_access.vaddr;
drivers/spi/spi-orion.c
487
if (vaddr && xfer->tx_buf && word_len == 8 && (spi->mode & SPI_CS_WORD) == 0) {
drivers/spi/spi-orion.c
495
iowrite32_rep(vaddr, xfer->tx_buf, cnt);
drivers/spi/spi-orion.c
499
iowrite8_rep(vaddr, &buf[cnt], rem);
drivers/spi/spi-orion.c
763
dir_acc->vaddr = devm_ioremap(&pdev->dev, r->start, PAGE_SIZE);
drivers/spi/spi-orion.c
764
if (!dir_acc->vaddr) {
drivers/spi/spi-orion.c
84
void __iomem *vaddr;
drivers/staging/media/atomisp/include/hmm/hmm_bo.h
243
struct hmm_bo_device *bdev, ia_css_ptr vaddr);
drivers/staging/media/atomisp/include/hmm/hmm_bo.h
252
struct hmm_bo_device *bdev, ia_css_ptr vaddr);
drivers/staging/media/atomisp/include/hmm/hmm_bo.h
259
struct hmm_bo_device *bdev, const void *vaddr);
drivers/staging/media/atomisp/pci/hmm/hmm_bo.c
542
struct hmm_bo_device *bdev, ia_css_ptr vaddr)
drivers/staging/media/atomisp/pci/hmm/hmm_bo.c
549
bo = __bo_search_by_addr(&bdev->allocated_rbtree, vaddr);
drivers/staging/media/atomisp/pci/hmm/hmm_bo.c
553
__func__, vaddr);
drivers/staging/media/atomisp/pci/hmm/hmm_bo.c
562
struct hmm_bo_device *bdev, unsigned int vaddr)
drivers/staging/media/atomisp/pci/hmm/hmm_bo.c
569
bo = __bo_search_by_addr_in_range(&bdev->allocated_rbtree, vaddr);
drivers/staging/media/atomisp/pci/hmm/hmm_bo.c
573
__func__, vaddr);
drivers/staging/media/atomisp/pci/hmm/hmm_bo.c
582
struct hmm_bo_device *bdev, const void *vaddr)
drivers/staging/media/atomisp/pci/hmm/hmm_bo.c
596
if (bo->vmap_addr == vaddr)
drivers/staging/media/atomisp/pci/hmm/hmm_bo.c
645
void *vaddr = vmalloc_addr;
drivers/staging/media/atomisp/pci/hmm/hmm_bo.c
649
bo->pages[i] = vmalloc_to_page(vaddr);
drivers/staging/media/atomisp/pci/hmm/hmm_bo.c
654
vaddr += PAGE_SIZE;
drivers/staging/media/ipu3/ipu3-css-fw.c
254
memcpy(css->binary[i].vaddr, blob, size);
drivers/staging/media/ipu3/ipu3-css-pool.c
13
if (map->size < size && map->vaddr) {
drivers/staging/media/ipu3/ipu3-css-pool.c
41
pool->entry[i].param.vaddr = NULL;
drivers/staging/media/ipu3/ipu3-css-pool.h
22
void *vaddr;
drivers/staging/media/ipu3/ipu3-css.c
1002
sp_group = css->xmem_sp_group_ptrs.vaddr;
drivers/staging/media/ipu3/ipu3-css.c
1912
abi_buf = css->pipes[pipe].abi_buffers[b->queue][b->queue_pos].vaddr;
drivers/staging/media/ipu3/ipu3-css.c
2135
0)->vaddr;
drivers/staging/media/ipu3/ipu3-css.c
2139
if (set_params || !map->vaddr) {
drivers/staging/media/ipu3/ipu3-css.c
2142
acc = map->vaddr;
drivers/staging/media/ipu3/ipu3-css.c
2148
if (!map->vaddr || (set_params && (set_params->use.lin_vmem_params ||
drivers/staging/media/ipu3/ipu3-css.c
2153
vmem0 = map->vaddr;
drivers/staging/media/ipu3/ipu3-css.c
2159
if (!map->vaddr || (set_params && (set_params->use.tnr3_dmem_params ||
drivers/staging/media/ipu3/ipu3-css.c
2163
dmem0 = map->vaddr;
drivers/staging/media/ipu3/ipu3-css.c
2171
r = imgu_css_cfg_acc(css, pipe, use, acc, map->vaddr,
drivers/staging/media/ipu3/ipu3-css.c
2182
map->vaddr, set_params);
drivers/staging/media/ipu3/ipu3-css.c
2191
map->vaddr, set_params);
drivers/staging/media/ipu3/ipu3-css.c
2203
if (!map->vaddr) {
drivers/staging/media/ipu3/ipu3-css.c
2206
gdc = map->vaddr;
drivers/staging/media/ipu3/ipu3-css.c
2207
imgu_css_cfg_gdc_table(map->vaddr,
drivers/staging/media/ipu3/ipu3-css.c
2220
if (!map->vaddr || (set_params && set_params->use.obgrid_param)) {
drivers/staging/media/ipu3/ipu3-css.c
2223
obgrid = map->vaddr;
drivers/staging/media/ipu3/ipu3-css.c
560
memset(css->xmem_sp_group_ptrs.vaddr, 0,
drivers/staging/media/ipu3/ipu3-css.c
716
void *vaddr = css_pipe->binary_params_cs[cfg - 1][m0].vaddr;
drivers/staging/media/ipu3/ipu3-css.c
726
sizeof(*cfg_iter), vaddr);
drivers/staging/media/ipu3/ipu3-css.c
775
sizeof(*cfg_ref), vaddr);
drivers/staging/media/ipu3/ipu3-css.c
805
vaddr);
drivers/staging/media/ipu3/ipu3-css.c
822
vaddr);
drivers/staging/media/ipu3/ipu3-css.c
848
vaddr = css_pipe->binary_params_cs[cfg - 1][m0].vaddr;
drivers/staging/media/ipu3/ipu3-css.c
853
vaddr);
drivers/staging/media/ipu3/ipu3-css.c
866
vaddr);
drivers/staging/media/ipu3/ipu3-css.c
880
isp_stage = css_pipe->xmem_isp_stage_ptrs[pipe][stage].vaddr;
drivers/staging/media/ipu3/ipu3-css.c
895
sp_stage = css_pipe->xmem_sp_stage_ptrs[pipe][stage].vaddr;
drivers/staging/media/ipu3/ipu3-dmamap.c
127
map->vaddr = vmap(pages, count, VM_USERMAP, PAGE_KERNEL);
drivers/staging/media/ipu3/ipu3-dmamap.c
128
if (!map->vaddr)
drivers/staging/media/ipu3/ipu3-dmamap.c
136
size, &map->daddr, map->vaddr);
drivers/staging/media/ipu3/ipu3-dmamap.c
138
return map->vaddr;
drivers/staging/media/ipu3/ipu3-dmamap.c
172
__func__, map->size, &map->daddr, map->vaddr);
drivers/staging/media/ipu3/ipu3-dmamap.c
174
if (!map->vaddr)
drivers/staging/media/ipu3/ipu3-dmamap.c
179
vunmap(map->vaddr);
drivers/staging/media/ipu3/ipu3-dmamap.c
181
map->vaddr = NULL;
drivers/staging/media/ipu3/ipu3.c
163
if (WARN_ON(!imgu_pipe->queues[queue].dmap.vaddr))
drivers/staging/media/ipu7/ipu7-dma.c
117
void *vaddr;
drivers/staging/media/ipu7/ipu7-dma.c
130
vaddr = info->vaddr + offset;
drivers/staging/media/ipu7/ipu7-dma.c
131
clflush_cache_range(vaddr, size);
drivers/staging/media/ipu7/ipu7-dma.c
209
info->vaddr = vmap(pages, count, VM_USERMAP, PAGE_KERNEL);
drivers/staging/media/ipu7/ipu7-dma.c
210
if (!info->vaddr)
drivers/staging/media/ipu7/ipu7-dma.c
220
return info->vaddr;
drivers/staging/media/ipu7/ipu7-dma.c
244
void ipu7_dma_free(struct ipu7_bus_device *sys, size_t size, void *vaddr,
drivers/staging/media/ipu7/ipu7-dma.c
25
void *vaddr;
drivers/staging/media/ipu7/ipu7-dma.c
262
if (WARN_ON(!info->vaddr))
drivers/staging/media/ipu7/ipu7-dma.c
274
vunmap(vaddr);
drivers/staging/media/ipu7/ipu7-dma.c
311
if (!info->vaddr)
drivers/staging/media/ipu7/ipu7-dma.h
31
void ipu7_dma_free(struct ipu7_bus_device *sys, size_t size, void *vaddr,
drivers/staging/media/meson/vdec/codec_hevc_common.c
181
void *vaddr = dma_alloc_coherent(dev, am21_size, &paddr,
drivers/staging/media/meson/vdec/codec_hevc_common.c
183
if (!vaddr) {
drivers/staging/media/meson/vdec/codec_hevc_common.c
188
comm->fbc_buffer_vaddr[idx] = vaddr;
drivers/staging/media/meson/vdec/codec_hevc_common.c
234
void *vaddr = dma_alloc_coherent(dev, MMU_COMPRESS_HEADER_SIZE,
drivers/staging/media/meson/vdec/codec_hevc_common.c
236
if (!vaddr) {
drivers/staging/media/meson/vdec/codec_hevc_common.c
241
comm->mmu_header_vaddr[idx] = vaddr;
drivers/staging/media/meson/vdec/esparser.c
193
u8 *vaddr = vb2_plane_vaddr(vb, 0);
drivers/staging/media/meson/vdec/esparser.c
197
memset(vaddr + payload_size, 0, pad_size);
drivers/staging/media/meson/vdec/esparser.c
206
memset(vaddr + payload_size + pad_size, 0, SEARCH_PATTERN_LEN);
drivers/staging/media/meson/vdec/esparser.c
207
vaddr[payload_size + pad_size] = 0x00;
drivers/staging/media/meson/vdec/esparser.c
208
vaddr[payload_size + pad_size + 1] = 0x00;
drivers/staging/media/meson/vdec/esparser.c
209
vaddr[payload_size + pad_size + 2] = 0x01;
drivers/staging/media/meson/vdec/esparser.c
210
vaddr[payload_size + pad_size + 3] = 0xff;
drivers/staging/vme_user/vme.c
106
void *vaddr, dma_addr_t dma)
drivers/staging/vme_user/vme.c
117
bridge->free_consistent(bridge->parent, size, vaddr, dma);
drivers/staging/vme_user/vme.h
132
void vme_free_consistent(struct vme_resource *resource, size_t size, void *vaddr, dma_addr_t dma);
drivers/staging/vme_user/vme_bridge.h
177
void (*free_consistent)(struct device *dev, size_t size, void *vaddr, dma_addr_t dma);
drivers/staging/vme_user/vme_fake.c
1007
void *vaddr, dma_addr_t dma)
drivers/staging/vme_user/vme_fake.c
1009
kfree(vaddr);
drivers/staging/vme_user/vme_tsi148.c
2140
void *vaddr, dma_addr_t dma)
drivers/staging/vme_user/vme_tsi148.c
2147
dma_free_coherent(&pdev->dev, size, vaddr, dma);
drivers/target/target_core_user.c
658
static inline void tcmu_flush_dcache_range(void *vaddr, size_t size)
drivers/target/target_core_user.c
660
unsigned long offset = offset_in_page(vaddr);
drivers/target/target_core_user.c
661
void *start = vaddr - offset;
drivers/tee/optee/smc_abi.c
1426
unsigned long vaddr;
drivers/tee/optee/smc_abi.c
1455
vaddr = (unsigned long)va;
drivers/tee/optee/smc_abi.c
1457
rc = tee_shm_pool_alloc_res_mem(vaddr, paddr, size,
drivers/tee/tee_shm_pool.c
56
struct tee_shm_pool *tee_shm_pool_alloc_res_mem(unsigned long vaddr,
drivers/tee/tee_shm_pool.c
65
if (vaddr & page_mask || paddr & page_mask || size & page_mask)
drivers/tee/tee_shm_pool.c
78
rc = gen_pool_add_virt(pool->private_data, vaddr, paddr, size, -1);
drivers/tty/serial/8250/8250_acorn.c
35
void __iomem *vaddr;
drivers/tty/serial/8250/8250_acorn.c
54
info->vaddr = ecardm_iomap(ec, type->type, 0, 0);
drivers/tty/serial/8250/8250_acorn.c
55
if (!info->vaddr) {
drivers/tty/serial/8250/8250_acorn.c
71
uart.port.membase = info->vaddr + type->offset[i];
drivers/tty/serial/jsm/jsm_tty.c
369
void __iomem *vaddr;
drivers/tty/serial/jsm/jsm_tty.c
404
vaddr = brd->re_map_membase;
drivers/tty/serial/jsm/jsm_tty.c
415
ch->ch_neo_uart = vaddr + (brd->bd_uart_offset * i);
drivers/tty/serial/jsm/jsm_tty.c
417
ch->ch_cls_uart = vaddr + (brd->bd_uart_offset * i);
drivers/usb/core/hcd.c
1258
unsigned char *vaddr;
drivers/usb/core/hcd.c
1265
vaddr = hcd_buffer_alloc(bus, size + sizeof(unsigned long),
drivers/usb/core/hcd.c
1267
if (!vaddr)
drivers/usb/core/hcd.c
1279
(unsigned long *)(vaddr + size));
drivers/usb/core/hcd.c
1282
memcpy(vaddr, *vaddr_handle, size);
drivers/usb/core/hcd.c
1284
*vaddr_handle = vaddr;
drivers/usb/core/hcd.c
1292
unsigned char *vaddr = *vaddr_handle;
drivers/usb/core/hcd.c
1294
vaddr = (void *)get_unaligned((unsigned long *)(vaddr + size));
drivers/usb/core/hcd.c
1297
memcpy(vaddr, *vaddr_handle, size);
drivers/usb/core/hcd.c
1299
hcd_buffer_free(bus, size + sizeof(vaddr), *vaddr_handle, *dma_handle);
drivers/usb/core/hcd.c
1301
*vaddr_handle = vaddr;
drivers/usb/gadget/function/f_fs.c
808
void *vaddr, *ptr;
drivers/usb/gadget/function/f_fs.c
812
vaddr = vmalloc(sz);
drivers/usb/gadget/function/f_fs.c
813
if (!vaddr)
drivers/usb/gadget/function/f_fs.c
819
vfree(vaddr);
drivers/usb/gadget/function/f_fs.c
823
for (i = 0, ptr = vaddr; i < n_pages; ++i, ptr += PAGE_SIZE)
drivers/usb/gadget/function/f_fs.c
828
vfree(vaddr);
drivers/usb/gadget/function/f_fs.c
834
return vaddr;
drivers/usb/gadget/udc/fsl_qe_udc.c
1476
u32 vaddr, fsize;
drivers/usb/gadget/udc/fsl_qe_udc.c
1497
vaddr = (u32)phys_to_virt(in_be32(&bd->buf));
drivers/usb/gadget/udc/fsl_qe_udc.c
1498
frame_set_data(pframe, (u8 *)vaddr);
drivers/usb/gadget/udc/fsl_qe_udc.c
821
u32 vaddr;
drivers/usb/gadget/udc/fsl_qe_udc.c
843
vaddr = (u32)phys_to_virt(in_be32(&bd->buf));
drivers/usb/gadget/udc/fsl_qe_udc.c
844
frame_set_data(pframe, (u8 *)vaddr);
drivers/usb/gadget/udc/fsl_qe_udc.c
941
u32 vaddr, i;
drivers/usb/gadget/udc/fsl_qe_udc.c
971
vaddr = (u32)phys_to_virt(in_be32(&bd->buf));
drivers/usb/gadget/udc/fsl_qe_udc.c
972
frame_set_data(pframe, (u8 *)vaddr);
drivers/usb/host/xen-hcd.c
580
unsigned long vaddr = (unsigned long)addr;
drivers/usb/host/xen-hcd.c
582
return PFN_UP(vaddr + length) - PFN_DOWN(vaddr);
drivers/usb/mon/mon_bin.c
1340
unsigned long vaddr;
drivers/usb/mon/mon_bin.c
1343
vaddr = get_zeroed_page(GFP_KERNEL);
drivers/usb/mon/mon_bin.c
1344
if (vaddr == 0) {
drivers/usb/mon/mon_bin.c
1349
map[n].ptr = (unsigned char *) vaddr;
drivers/usb/mon/mon_bin.c
1350
map[n].pg = virt_to_page((void *) vaddr);
drivers/vdpa/vdpa_user/vduse_dev.c
1001
void *vaddr, dma_addr_t dma_addr,
drivers/vdpa/vdpa_user/vduse_dev.c
1015
free_pages_exact(vaddr, size);
drivers/vfio/vfio_iommu_spapr_tce.c
1040
ret = tce_iommu_register_pages(container, param.vaddr,
drivers/vfio/vfio_iommu_spapr_tce.c
105
__u64 vaddr, __u64 size)
drivers/vfio/vfio_iommu_spapr_tce.c
1069
ret = tce_iommu_unregister_pages(container, param.vaddr,
drivers/vfio/vfio_iommu_spapr_tce.c
112
if ((vaddr & ~PAGE_MASK) || (size & ~PAGE_MASK))
drivers/vfio/vfio_iommu_spapr_tce.c
115
mem = mm_iommu_get(container->mm, vaddr, size >> PAGE_SHIFT);
drivers/vfio/vfio_iommu_spapr_tce.c
137
__u64 vaddr, __u64 size)
drivers/vfio/vfio_iommu_spapr_tce.c
144
if ((vaddr & ~PAGE_MASK) || (size & ~PAGE_MASK) ||
drivers/vfio/vfio_iommu_spapr_tce.c
145
((vaddr + size) < vaddr))
drivers/vfio/vfio_iommu_spapr_tce.c
148
mem = mm_iommu_get(container->mm, vaddr, entries);
drivers/vfio/vfio_iommu_spapr_tce.c
157
ret = mm_iommu_new(container->mm, vaddr, entries, &mem);
drivers/vfio/vfio_iommu_spapr_tce.c
935
(param.vaddr & ~IOMMU_PAGE_MASK(tbl)))
drivers/vfio/vfio_iommu_spapr_tce.c
951
ret = iommu_tce_put_param_check(tbl, param.iova, param.vaddr);
drivers/vfio/vfio_iommu_spapr_tce.c
958
param.vaddr,
drivers/vfio/vfio_iommu_spapr_tce.c
964
param.vaddr,
drivers/vfio/vfio_iommu_type1.c
1585
unsigned long vaddr = dma->vaddr;
drivers/vfio/vfio_iommu_type1.c
1596
npage = vfio_pin_pages_remote(dma, vaddr + dma->size,
drivers/vfio/vfio_iommu_type1.c
1687
unsigned long vaddr = map->vaddr;
drivers/vfio/vfio_iommu_type1.c
1695
if (map->size != size || map->vaddr != vaddr || map->iova != iova)
drivers/vfio/vfio_iommu_type1.c
1702
check_add_overflow(vaddr, size - 1, &vaddr_end))
drivers/vfio/vfio_iommu_type1.c
1720
if ((size | iova | vaddr) & (pgsize - 1)) {
drivers/vfio/vfio_iommu_type1.c
1736
dma->vaddr = vaddr;
drivers/vfio/vfio_iommu_type1.c
1764
dma->vaddr = vaddr;
drivers/vfio/vfio_iommu_type1.c
1860
unsigned long vaddr = dma->vaddr + pos;
drivers/vfio/vfio_iommu_type1.c
1864
npage = vfio_pin_pages_remote(dma, vaddr,
drivers/vfio/vfio_iommu_type1.c
3145
unsigned long vaddr;
drivers/vfio/vfio_iommu_type1.c
3174
vaddr = dma->vaddr + offset;
drivers/vfio/vfio_iommu_type1.c
3177
*copied = copy_to_user((void __user *)vaddr, data,
drivers/vfio/vfio_iommu_type1.c
3190
*copied = copy_from_user(data, (void __user *)vaddr,
drivers/vfio/vfio_iommu_type1.c
542
unsigned long vaddr, unsigned long *pfn,
drivers/vfio/vfio_iommu_type1.c
545
struct follow_pfnmap_args args = { .vma = vma, .address = vaddr };
drivers/vfio/vfio_iommu_type1.c
552
ret = fixup_user_fault(mm, vaddr,
drivers/vfio/vfio_iommu_type1.c
585
static long vaddr_get_pfns(struct mm_struct *mm, unsigned long vaddr,
drivers/vfio/vfio_iommu_type1.c
598
ret = pin_user_pages_remote(mm, vaddr, pin_pages, flags | FOLL_LONGTERM,
drivers/vfio/vfio_iommu_type1.c
609
vaddr = untagged_addr_remote(mm, vaddr);
drivers/vfio/vfio_iommu_type1.c
612
vma = vma_lookup(mm, vaddr);
drivers/vfio/vfio_iommu_type1.c
617
ret = follow_fault_pfn(vma, mm, vaddr, pfn, &addr_mask,
drivers/vfio/vfio_iommu_type1.c
676
static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr,
drivers/vfio/vfio_iommu_type1.c
684
dma_addr_t iova = vaddr - dma->vaddr + dma->iova;
drivers/vfio/vfio_iommu_type1.c
712
ret = vaddr_get_pfns(mm, vaddr, npage, dma->prot,
drivers/vfio/vfio_iommu_type1.c
729
vaddr += (PAGE_SIZE * ret);
drivers/vfio/vfio_iommu_type1.c
781
vaddr += PAGE_SIZE * nr_pages;
drivers/vfio/vfio_iommu_type1.c
840
static int vfio_pin_page_external(struct vfio_dma *dma, unsigned long vaddr,
drivers/vfio/vfio_iommu_type1.c
853
ret = vaddr_get_pfns(mm, vaddr, 1, dma->prot, pfn_base, &batch);
drivers/vfio/vfio_iommu_type1.c
91
unsigned long vaddr; /* Process virtual addr */
drivers/vfio/vfio_iommu_type1.c
964
remote_vaddr = dma->vaddr + (iova - dma->iova);
drivers/video/fbdev/hpfb.c
332
unsigned long paddr, vaddr;
drivers/video/fbdev/hpfb.c
339
vaddr = (unsigned long)ioremap(paddr, resource_size(&d->resource));
drivers/video/fbdev/hpfb.c
341
vaddr = paddr + DIO_VIRADDRBASE;
drivers/video/fbdev/hpfb.c
345
if (hpfb_init_one(paddr, vaddr)) {
drivers/video/fbdev/hpfb.c
347
iounmap((void *)vaddr);
drivers/video/fbdev/matrox/matroxfb_accel.c
467
fb_writel((*chardata) << 24, mmio.vaddr);
drivers/video/fbdev/matrox/matroxfb_accel.c
469
fb_writel(*chardata, mmio.vaddr);
drivers/video/fbdev/matrox/matroxfb_accel.c
477
fb_writel((*(u_int16_t*)chardata) << 16, mmio.vaddr);
drivers/video/fbdev/matrox/matroxfb_accel.c
479
fb_writel(*(u_int16_t*)chardata, mmio.vaddr);
drivers/video/fbdev/matrox/matroxfb_accel.c
490
fb_writel(get_unaligned((u_int32_t*)(chardata + i)),mmio.vaddr);
drivers/video/fbdev/matrox/matroxfb_base.c
1730
minfo->mmio.vbase.vaddr = ioremap(ctrlptr_phys, 16384);
drivers/video/fbdev/matrox/matroxfb_base.c
1731
if (!minfo->mmio.vbase.vaddr) {
drivers/video/fbdev/matrox/matroxfb_base.c
1738
minfo->video.vbase.vaddr = ioremap_wc(video_base_phys, memsize);
drivers/video/fbdev/matrox/matroxfb_base.c
1739
if (!minfo->video.vbase.vaddr) {
drivers/video/fbdev/matrox/matroxfb_base.c
1957
iounmap(minfo->video.vbase.vaddr);
drivers/video/fbdev/matrox/matroxfb_base.c
1959
iounmap(minfo->mmio.vbase.vaddr);
drivers/video/fbdev/matrox/matroxfb_base.c
377
iounmap(minfo->mmio.vbase.vaddr);
drivers/video/fbdev/matrox/matroxfb_base.c
378
iounmap(minfo->video.vbase.vaddr);
drivers/video/fbdev/matrox/matroxfb_base.h
124
void __iomem* vaddr;
drivers/video/fbdev/matrox/matroxfb_base.h
128
return readb(va.vaddr + offs);
drivers/video/fbdev/matrox/matroxfb_base.h
132
writeb(value, va.vaddr + offs);
drivers/video/fbdev/matrox/matroxfb_base.h
136
writew(value, va.vaddr + offs);
drivers/video/fbdev/matrox/matroxfb_base.h
140
return readl(va.vaddr + offs);
drivers/video/fbdev/matrox/matroxfb_base.h
144
writel(value, va.vaddr + offs);
drivers/video/fbdev/matrox/matroxfb_base.h
156
iowrite32_rep(va.vaddr, src, len >> 2);
drivers/video/fbdev/matrox/matroxfb_base.h
158
u_int32_t __iomem* addr = va.vaddr;
drivers/video/fbdev/matrox/matroxfb_base.h
179
va->vaddr += offs;
drivers/video/fbdev/matrox/matroxfb_base.h
183
return va.vaddr;
drivers/video/fbdev/matrox/matroxfb_crtc2.c
625
m2info->video.vbase.vaddr = vaddr_va(minfo->video.vbase) + m2info->video.offbase;
drivers/video/fbdev/omap/lcdc.c
644
region->vaddr = lcdc.vram_virt;
drivers/video/fbdev/omap/omapfb.h
41
void __iomem *vaddr;
drivers/video/fbdev/omap/omapfb_main.c
1032
*(u16 *)fbdev->mem_desc.region[0].vaddr = pixval;
drivers/video/fbdev/omap/omapfb_main.c
164
fbdev->mem_desc.region[i].vaddr,
drivers/video/fbdev/omap/omapfb_main.c
377
fbi->screen_base = rg->vaddr;
drivers/video/fbdev/omap2/omapfb/omapfb-main.c
1313
if (rg->vrfb.vaddr[0]) {
drivers/video/fbdev/omap2/omapfb/omapfb-main.c
1314
iounmap(rg->vrfb.vaddr[0]);
drivers/video/fbdev/omap2/omapfb/omapfb-main.c
1315
rg->vrfb.vaddr[0] = NULL;
drivers/video/fbdev/omap2/omapfb/omapfb-main.c
1325
rg->vaddr = NULL;
drivers/video/fbdev/omap2/omapfb/omapfb-main.c
1367
rg->vaddr = NULL;
drivers/video/fbdev/omap2/omapfb/omapfb-main.c
1409
rg->vaddr = (void __iomem *)token;
drivers/video/fbdev/omap2/omapfb/omapfb-main.c
1560
rg->vaddr,
drivers/video/fbdev/omap2/omapfb/omapfb-main.c
199
return ofbi->region->vrfb.vaddr[0];
drivers/video/fbdev/omap2/omapfb/omapfb-main.c
201
return ofbi->region->vaddr;
drivers/video/fbdev/omap2/omapfb/omapfb-main.c
531
if (vrfb->vaddr[0] && reconf) {
drivers/video/fbdev/omap2/omapfb/omapfb-main.c
535
iounmap(vrfb->vaddr[0]);
drivers/video/fbdev/omap2/omapfb/omapfb-main.c
536
vrfb->vaddr[0] = NULL;
drivers/video/fbdev/omap2/omapfb/omapfb-main.c
540
if (vrfb->vaddr[0])
drivers/video/fbdev/omap2/omapfb/omapfb-main.c
554
fbi->screen_base = ofbi->region->vrfb.vaddr[0];
drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c
505
return sysfs_emit(buf, "%p\n", ofbi->region->vaddr);
drivers/video/fbdev/omap2/omapfb/omapfb.h
46
void __iomem *vaddr;
drivers/video/fbdev/omap2/omapfb/vrfb.c
236
vrfb->vaddr[rot] = ioremap_wc(vrfb->paddr[rot], size);
drivers/video/fbdev/omap2/omapfb/vrfb.c
238
if (!vrfb->vaddr[rot]) {
drivers/video/fbdev/omap2/omapfb/vrfb.c
244
vrfb->vaddr[rot]);
drivers/virtio/virtio_ring.c
3707
union virtio_map map, size_t size, void *vaddr,
drivers/virtio/virtio_ring.c
3711
vdev->map->free(map, size, vaddr,
drivers/virtio/virtio_ring.c
3714
dma_free_coherent(map.dma_dev, size, vaddr, map_handle);
drivers/watchdog/rti_wdt.c
219
u32 *vaddr;
drivers/watchdog/rti_wdt.c
315
vaddr = memremap(paddr, reserved_mem_size, MEMREMAP_WB);
drivers/watchdog/rti_wdt.c
316
if (!vaddr) {
drivers/watchdog/rti_wdt.c
322
if (vaddr[0] == PON_REASON_SOF_NUM &&
drivers/watchdog/rti_wdt.c
323
vaddr[1] == PON_REASON_MAGIC_NUM &&
drivers/watchdog/rti_wdt.c
324
vaddr[2] == PON_REASON_EOF_NUM) {
drivers/watchdog/rti_wdt.c
327
memset(vaddr, 0, reserved_mem_size);
drivers/watchdog/rti_wdt.c
328
memunmap(vaddr);
drivers/xen/gntdev.c
116
args.vaddr = map->dma_vaddr;
drivers/xen/gntdev.c
193
add->dma_vaddr = args.vaddr;
drivers/xen/gntdev.c
714
pr_debug("priv %p, offset for vaddr %lx\n", priv, (unsigned long)op.vaddr);
drivers/xen/gntdev.c
717
vma = find_vma(current->mm, op.vaddr);
drivers/xen/grant-dma-ops.c
116
static void xen_grant_dma_free(struct device *dev, size_t size, void *vaddr,
drivers/xen/grant-dma-ops.c
142
free_pages_exact(vaddr, n_pages * XEN_PAGE_SIZE);
drivers/xen/grant-dma-ops.c
150
void *vaddr;
drivers/xen/grant-dma-ops.c
152
vaddr = xen_grant_dma_alloc(dev, size, dma_handle, gfp, 0);
drivers/xen/grant-dma-ops.c
153
if (!vaddr)
drivers/xen/grant-dma-ops.c
156
return virt_to_page(vaddr);
drivers/xen/grant-dma-ops.c
160
struct page *vaddr, dma_addr_t dma_handle,
drivers/xen/grant-dma-ops.c
163
xen_grant_dma_free(dev, size, page_to_virt(vaddr), dma_handle, 0);
drivers/xen/grant-table.c
1071
args->vaddr = dma_alloc_coherent(args->dev, size,
drivers/xen/grant-table.c
1075
args->vaddr = dma_alloc_wc(args->dev, size,
drivers/xen/grant-table.c
1078
if (!args->vaddr) {
drivers/xen/grant-table.c
1142
args->vaddr, args->dev_bus_addr);
drivers/xen/grant-table.c
1145
args->vaddr, args->dev_bus_addr);
drivers/xen/grant-table.c
1574
gnttab_shared.addr = xen_auto_xlat_grant_frames.vaddr;
drivers/xen/grant-table.c
823
void *vaddr;
drivers/xen/grant-table.c
828
vaddr = memremap(addr, XEN_PAGE_SIZE * max_nr_gframes, MEMREMAP_WB);
drivers/xen/grant-table.c
829
if (vaddr == NULL) {
drivers/xen/grant-table.c
836
memunmap(vaddr);
drivers/xen/grant-table.c
842
xen_auto_xlat_grant_frames.vaddr = vaddr;
drivers/xen/grant-table.c
855
memunmap(xen_auto_xlat_grant_frames.vaddr);
drivers/xen/grant-table.c
859
xen_auto_xlat_grant_frames.vaddr = NULL;
drivers/xen/swiotlb-xen.c
178
xen_swiotlb_free_coherent(struct device *dev, size_t size, void *vaddr,
drivers/xen/swiotlb-xen.c
181
phys_addr_t phys = virt_to_phys(vaddr);
drivers/xen/swiotlb-xen.c
192
if (TestClearPageXenRemapped(virt_to_page(vaddr)))
drivers/xen/swiotlb-xen.c
194
free_pages((unsigned long)vaddr, get_order(size));
drivers/xen/unpopulated-alloc.c
124
vaddr = memremap_pages(pgmap, NUMA_NO_NODE);
drivers/xen/unpopulated-alloc.c
125
if (IS_ERR(vaddr)) {
drivers/xen/unpopulated-alloc.c
127
ret = PTR_ERR(vaddr);
drivers/xen/unpopulated-alloc.c
132
struct page *pg = virt_to_page(vaddr + PAGE_SIZE * i);
drivers/xen/unpopulated-alloc.c
41
void *vaddr;
drivers/xen/xen-pciback/xenbus.c
111
void *vaddr;
drivers/xen/xen-pciback/xenbus.c
117
err = xenbus_map_ring_valloc(pdev->xdev, &gnt_ref, 1, &vaddr);
drivers/xen/xen-pciback/xenbus.c
124
pdev->sh_info = vaddr;
drivers/xen/xen-scsiback.c
248
gnttab_set_unmap_op(&unmap[invcount], vaddr(req, i),
drivers/xen/xen-scsiback.c
568
vaddr(pending_req, i) + ring_req->seg[i].offset);
drivers/xen/xen-scsiback.c
578
end_seg = vaddr(pending_req, 0) + ring_req->seg[0].offset;
drivers/xen/xen-scsiback.c
590
end_seg = vaddr(pending_req, i_seg) +
drivers/xen/xenbus/xenbus_client.c
395
int xenbus_setup_ring(struct xenbus_device *dev, gfp_t gfp, void **vaddr,
drivers/xen/xenbus/xenbus_client.c
404
addr = *vaddr = alloc_pages_exact(ring_size, gfp | __GFP_ZERO);
drivers/xen/xenbus/xenbus_client.c
405
if (!*vaddr) {
drivers/xen/xenbus/xenbus_client.c
420
if (is_vmalloc_addr(*vaddr))
drivers/xen/xenbus/xenbus_client.c
435
if (*vaddr)
drivers/xen/xenbus/xenbus_client.c
436
free_pages_exact(*vaddr, ring_size);
drivers/xen/xenbus/xenbus_client.c
439
*vaddr = NULL;
drivers/xen/xenbus/xenbus_client.c
454
void xenbus_teardown_ring(void **vaddr, unsigned int nr_pages,
drivers/xen/xenbus/xenbus_client.c
466
if (*vaddr)
drivers/xen/xenbus/xenbus_client.c
467
free_pages_exact(*vaddr, nr_pages * XEN_PAGE_SIZE);
drivers/xen/xenbus/xenbus_client.c
468
*vaddr = NULL;
drivers/xen/xenbus/xenbus_client.c
533
unsigned int nr_grefs, void **vaddr)
drivers/xen/xenbus/xenbus_client.c
538
*vaddr = NULL;
drivers/xen/xenbus/xenbus_client.c
551
err = ring_ops->map(dev, info, gnt_refs, nr_grefs, vaddr);
drivers/xen/xenbus/xenbus_client.c
666
unsigned long vaddr = (unsigned long)gfn_to_virt(gfn);
drivers/xen/xenbus/xenbus_client.c
668
info->phys_addrs[info->idx] = vaddr;
drivers/xen/xenbus/xenbus_client.c
669
info->addrs[info->idx] = vaddr;
drivers/xen/xenbus/xenbus_client.c
678
void **vaddr)
drivers/xen/xenbus/xenbus_client.c
714
*vaddr = addr;
drivers/xen/xenbus/xenbus_client.c
745
int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr)
drivers/xen/xenbus/xenbus_client.c
747
return ring_ops->unmap(dev, vaddr);
drivers/xen/xenbus/xenbus_client.c
764
void **vaddr)
drivers/xen/xenbus/xenbus_client.c
790
*vaddr = area->addr;
drivers/xen/xenbus/xenbus_client.c
804
static int xenbus_unmap_ring_pv(struct xenbus_device *dev, void *vaddr)
drivers/xen/xenbus/xenbus_client.c
815
if (node->pv.area->addr == vaddr) {
drivers/xen/xenbus/xenbus_client.c
826
"can't find mapped virtual address %p", vaddr);
drivers/xen/xenbus/xenbus_client.c
834
addr = (unsigned long)vaddr + (XEN_PAGE_SIZE * i);
drivers/xen/xenbus/xenbus_client.c
890
static int xenbus_unmap_ring_hvm(struct xenbus_device *dev, void *vaddr)
drivers/xen/xenbus/xenbus_client.c
903
if (addr == vaddr) {
drivers/xen/xenbus/xenbus_client.c
91
void **vaddr);
drivers/xen/xenbus/xenbus_client.c
914
"can't find mapped virtual address %p", vaddr);
drivers/xen/xenbus/xenbus_client.c
92
int (*unmap)(struct xenbus_device *dev, void *vaddr);
drivers/xen/xenbus/xenbus_client.c
927
vunmap(vaddr);
drivers/xen/xenbus/xenbus_client.c
931
WARN(1, "Leaking %p, size %u page(s)\n", vaddr, nr_pages);
drivers/xen/xlate_mmu.c
219
void *vaddr;
drivers/xen/xlate_mmu.c
248
vaddr = vmap(pages, nr_pages, 0, PAGE_KERNEL);
drivers/xen/xlate_mmu.c
249
if (!vaddr) {
drivers/xen/xlate_mmu.c
260
*virt = vaddr;
fs/binfmt_elf.c
1046
unsigned long k, vaddr;
fs/binfmt_elf.c
1058
vaddr = elf_ppnt->p_vaddr;
fs/binfmt_elf.c
1186
load_bias = ELF_PAGESTART(load_bias - vaddr);
fs/binfmt_elf.c
1189
error = elf_load(bprm->file, load_bias + vaddr, elf_ppnt,
fs/binfmt_elf.c
1201
ELF_PAGESTART(load_bias + vaddr);
fs/binfmt_elf.c
681
unsigned long vaddr = 0;
fs/binfmt_elf.c
684
vaddr = eppnt->p_vaddr;
fs/binfmt_elf.c
688
load_addr = -vaddr;
fs/binfmt_elf.c
690
map_addr = elf_load(interpreter, load_addr + vaddr,
fs/binfmt_elf.c
699
load_addr = map_addr - ELF_PAGESTART(vaddr);
fs/dax.c
1362
unsigned long vaddr = vmf->address;
fs/dax.c
1363
unsigned long pfn = my_zero_pfn(vaddr);
fs/fuse/ioctl.c
348
void *vaddr;
fs/fuse/ioctl.c
368
vaddr = kmap_local_folio(ap.folios[0], 0);
fs/fuse/ioctl.c
369
err = fuse_copy_ioctl_iovec(fm->fc, iov_page, vaddr,
fs/fuse/ioctl.c
372
kunmap_local(vaddr);
fs/minix/minix.h
129
static inline int minix_find_first_zero_bit(const void *vaddr, unsigned size)
fs/minix/minix.h
131
const unsigned short *p = vaddr, *addr = vaddr;
fs/minix/minix.h
154
static inline int minix_test_bit(int nr, const void *vaddr)
fs/minix/minix.h
156
const unsigned short *p = vaddr;
fs/pstore/ram_core.c
421
void *vaddr;
fs/pstore/ram_core.c
456
vaddr = vmap(pages, page_count, VM_MAP | VM_IOREMAP, prot);
fs/pstore/ram_core.c
463
if (!vaddr)
fs/pstore/ram_core.c
471
return vaddr + offset_in_page(start);
fs/pstore/ram_core.c
506
prz->vaddr = persistent_ram_vmap(start, size, memtype);
fs/pstore/ram_core.c
508
prz->vaddr = persistent_ram_iomap(start, size, memtype,
fs/pstore/ram_core.c
511
if (!prz->vaddr) {
fs/pstore/ram_core.c
517
prz->buffer = prz->vaddr;
fs/pstore/ram_core.c
578
if (prz->vaddr) {
fs/pstore/ram_core.c
581
vunmap(prz->vaddr - offset_in_page(prz->paddr));
fs/pstore/ram_core.c
583
iounmap(prz->vaddr);
fs/pstore/ram_core.c
586
prz->vaddr = NULL;
fs/pstore/ram_internal.h
62
void *vaddr;
include/asm-generic/cacheflush.h
107
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
include/asm-generic/cacheflush.h
111
flush_icache_user_page(vma, page, vaddr, len); \
include/asm-generic/cacheflush.h
117
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
include/asm-generic/fixmap.h
36
static inline unsigned long virt_to_fix(const unsigned long vaddr)
include/asm-generic/fixmap.h
38
BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START);
include/asm-generic/fixmap.h
39
return __virt_to_fix(vaddr);
include/drm/drm_gem_dma_helper.h
27
void *vaddr;
include/drm/drm_gem_shmem_helper.h
77
void *vaddr;
include/linux/bio.h
445
void bio_add_virt_nofail(struct bio *bio, void *vaddr, unsigned len);
include/linux/bio.h
462
unsigned int bio_add_vmalloc_chunk(struct bio *bio, void *vaddr, unsigned len);
include/linux/bio.h
463
bool bio_add_vmalloc(struct bio *bio, void *vaddr, unsigned int len);
include/linux/bvec.h
71
static inline void bvec_set_virt(struct bio_vec *bv, void *vaddr,
include/linux/bvec.h
74
bvec_set_page(bv, virt_to_page(vaddr), len, offset_in_page(vaddr));
include/linux/dma-map-ops.h
161
int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr);
include/linux/dma-map-ops.h
172
#define dma_release_from_dev_coherent(dev, order, vaddr) (0)
include/linux/dma-map-ops.h
173
#define dma_mmap_from_dev_coherent(dev, vma, vaddr, order, ret) (0)
include/linux/dma-map-ops.h
180
int dma_release_from_global_coherent(int order, void *vaddr);
include/linux/dma-map-ops.h
190
static inline int dma_release_from_global_coherent(int order, void *vaddr)
include/linux/dma-map-ops.h
20
void (*free)(struct device *dev, size_t size, void *vaddr,
include/linux/dma-map-ops.h
209
void dma_common_free_pages(struct device *dev, size_t size, struct page *vaddr,
include/linux/dma-map-ops.h
25
void (*free_pages)(struct device *dev, size_t size, struct page *vaddr,
include/linux/dma-map-ops.h
58
void (*cache_sync)(struct device *dev, void *vaddr, size_t size,
include/linux/dma-mapping.h
177
void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
include/linux/dma-mapping.h
200
void dma_vunmap_noncontiguous(struct device *dev, void *vaddr);
include/linux/dma-mapping.h
268
void *vaddr, dma_addr_t dma_handle)
include/linux/dma-mapping.h
334
static inline void dma_vunmap_noncontiguous(struct device *dev, void *vaddr)
include/linux/dma-mapping.h
504
void *vaddr, dma_addr_t dma_handle, enum dma_data_direction dir)
include/linux/dma-mapping.h
506
dma_free_pages(dev, size, virt_to_page(vaddr), dma_handle, dir);
include/linux/dma/edma.h
25
} vaddr;
include/linux/dmapool.h
29
void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t addr);
include/linux/dmapool.h
48
static inline void dma_pool_free(struct dma_pool *pool, void *vaddr,
include/linux/highmem-internal.h
100
static inline void __kunmap_local(const void *vaddr)
include/linux/highmem-internal.h
102
kunmap_local_indexed(vaddr);
include/linux/highmem-internal.h
11
void kunmap_local_indexed(const void *vaddr);
include/linux/highmem.h
192
static inline void flush_kernel_vmap_range(void *vaddr, int size)
include/linux/highmem.h
195
static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
include/linux/highmem.h
215
static inline void clear_user_page(void *addr, unsigned long vaddr, struct page *page)
include/linux/highmem.h
234
static inline void clear_user_pages(void *addr, unsigned long vaddr,
include/linux/highmem.h
240
clear_user_page(addr, vaddr, page);
include/linux/highmem.h
242
vaddr += PAGE_SIZE;
include/linux/highmem.h
262
static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
include/linux/highmem.h
265
clear_user_page(addr, vaddr, page);
include/linux/highmem.h
279
static inline void clear_user_highpages(struct page *page, unsigned long vaddr,
include/linux/highmem.h
292
clear_user_highpage(page, vaddr);
include/linux/highmem.h
293
vaddr += PAGE_SIZE;
include/linux/highmem.h
302
clear_user_pages(page_address(page), vaddr, page, npages);
include/linux/highmem.h
321
unsigned long vaddr)
include/linux/highmem.h
325
folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, vaddr);
include/linux/highmem.h
327
clear_user_highpage(&folio->page, vaddr);
include/linux/highmem.h
396
unsigned long vaddr, struct vm_area_struct *vma)
include/linux/highmem.h
402
copy_user_page(vto, vfrom, vaddr, to);
include/linux/highmem.h
434
unsigned long vaddr, struct vm_area_struct *vma)
include/linux/highmem.h
473
unsigned long vaddr, struct vm_area_struct *vma)
include/linux/highmem.h
475
copy_user_highpage(to, from, vaddr, vma);
include/linux/iio/buffer-dma.h
62
void *vaddr;
include/linux/io-mapping.h
101
static inline void io_mapping_unmap_local(void __iomem *vaddr)
include/linux/io-mapping.h
103
kunmap_local_indexed((void __force *)vaddr);
include/linux/io-mapping.h
120
io_mapping_unmap(void __iomem *vaddr)
include/linux/io-mapping.h
122
iounmap(vaddr);
include/linux/io-mapping.h
162
io_mapping_unmap(void __iomem *vaddr)
include/linux/io-mapping.h
180
io_mapping_unmap_atomic(void __iomem *vaddr)
include/linux/io-mapping.h
182
io_mapping_unmap(vaddr);
include/linux/io-mapping.h
196
static inline void io_mapping_unmap_local(void __iomem *vaddr)
include/linux/io-mapping.h
198
io_mapping_unmap(vaddr);
include/linux/io-mapping.h
81
io_mapping_unmap_atomic(void __iomem *vaddr)
include/linux/io-mapping.h
83
kunmap_local_indexed((void __force *)vaddr);
include/linux/iommu-dma.h
51
#define iommu_dma_vunmap_noncontiguous(dev, vaddr) \
include/linux/iommu-dma.h
52
vunmap(vaddr);
include/linux/iosys-map.h
112
void *vaddr;
include/linux/iosys-map.h
123
.vaddr = (vaddr_), \
include/linux/iosys-map.h
183
static inline void iosys_map_set_vaddr(struct iosys_map *map, void *vaddr)
include/linux/iosys-map.h
185
map->vaddr = vaddr;
include/linux/iosys-map.h
222
return lhs->vaddr == rhs->vaddr;
include/linux/iosys-map.h
239
return !map->vaddr;
include/linux/iosys-map.h
287
memcpy(dst->vaddr + dst_offset, src, len);
include/linux/iosys-map.h
307
memcpy(dst, src->vaddr + src_offset, len);
include/linux/iosys-map.h
323
map->vaddr += incr;
include/linux/iosys-map.h
342
memset(dst->vaddr + offset, value, len);
include/linux/iosys-map.h
394
__iosys_map_rd_sys(val_, (map__)->vaddr + (offset__), type__); \
include/linux/iosys-map.h
416
__iosys_map_wr_sys(val_, (map__)->vaddr + (offset__), type__); \
include/linux/kexec.h
521
static inline int arch_kexec_post_alloc_pages(void *vaddr, unsigned int pages, gfp_t gfp) { return 0; }
include/linux/kexec.h
525
static inline void arch_kexec_pre_free_pages(void *vaddr, unsigned int pages) { }
include/linux/mtd/nand-qpic-common.h
191
#define reg_buf_dma_addr(chip, vaddr) \
include/linux/mtd/nand-qpic-common.h
193
((u8 *)(vaddr) - (u8 *)(chip)->reg_read_buf))
include/linux/mtd/nand-qpic-common.h
465
int reg_off, const void *vaddr, int size, unsigned int flags);
include/linux/mtd/nand-qpic-common.h
467
const void *vaddr, int size, unsigned int flags);
include/linux/mtd/nand-qpic-common.h
469
const void *vaddr, int size, bool flow_control);
include/linux/mtd/nand-qpic-common.h
472
int qcom_write_reg_dma(struct qcom_nand_controller *nandc, __le32 *vaddr, int first,
include/linux/mtd/nand-qpic-common.h
474
int qcom_read_data_dma(struct qcom_nand_controller *nandc, int reg_off, const u8 *vaddr,
include/linux/mtd/nand-qpic-common.h
476
int qcom_write_data_dma(struct qcom_nand_controller *nandc, int reg_off, const u8 *vaddr,
include/linux/net.h
168
struct sockaddr_unsized *vaddr,
include/linux/pgtable.h
175
static inline pte_t *virt_to_kpte(unsigned long vaddr)
include/linux/pgtable.h
177
pmd_t *pmd = pmd_off_k(vaddr);
include/linux/pgtable.h
179
return pmd_none(*pmd) ? NULL : pte_offset_kernel(pmd, vaddr);
include/linux/qed/qed_rdma_if.h
246
u64 vaddr;
include/linux/qed/qed_rdma_if.h
552
void *vaddr;
include/linux/swiotlb.h
73
void *vaddr;
include/linux/tee_core.h
300
struct tee_shm_pool *tee_shm_pool_alloc_res_mem(unsigned long vaddr,
include/linux/uprobes.h
138
unsigned long vaddr;
include/linux/uprobes.h
194
typedef int (*uprobe_write_verify_t)(struct page *page, unsigned long vaddr,
include/linux/uprobes.h
198
extern int set_swbp(struct arch_uprobe *aup, struct vm_area_struct *vma, unsigned long vaddr);
include/linux/uprobes.h
199
extern int set_orig_insn(struct arch_uprobe *aup, struct vm_area_struct *vma, unsigned long vaddr);
include/linux/uprobes.h
204
extern int uprobe_write_opcode(struct arch_uprobe *auprobe, struct vm_area_struct *vma, unsigned long vaddr, uprobe_opcode_t,
include/linux/uprobes.h
235
extern void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
include/linux/uprobes.h
240
extern void uprobe_copy_from_page(struct page *page, unsigned long vaddr, void *dst, int len);
include/linux/uprobes.h
244
extern void arch_uprobe_optimize(struct arch_uprobe *auprobe, unsigned long vaddr);
include/linux/virtio.h
291
size_t size, void *vaddr,
include/linux/virtio_config.h
207
void (*free)(union virtio_map map, size_t size, void *vaddr,
include/media/videobuf2-core.h
129
unsigned long vaddr,
include/media/videobuf2-core.h
144
void *(*vaddr)(struct vb2_buffer *vb, void *buf_priv);
include/net/ip_vs.h
1321
const union nf_inet_addr *vaddr,
include/net/ip_vs.h
1330
p->vaddr = vaddr;
include/net/ip_vs.h
1389
IP_VS_DBG_ADDR(cp->af, &cp->vaddr),
include/net/ip_vs.h
1408
IP_VS_DBG_ADDR(cp->af, &cp->vaddr),
include/net/ip_vs.h
1424
IP_VS_DBG_ADDR(cp->af, &cp->vaddr),
include/net/ip_vs.h
1541
const union nf_inet_addr *vaddr, __be16 vport);
include/net/ip_vs.h
1562
const union nf_inet_addr *vaddr, __be16 vport,
include/net/ip_vs.h
551
const union nf_inet_addr *vaddr;
include/net/ip_vs.h
571
union nf_inet_addr vaddr; /* virtual address */
include/net/ip_vs.h
751
union nf_inet_addr vaddr; /* virtual IP address */
include/rdma/ib_hdrs.h
122
return ib_u64_get(&reth->vaddr);
include/rdma/ib_hdrs.h
127
ib_u64_put(val, &reth->vaddr);
include/rdma/ib_hdrs.h
132
return ib_u64_get(&ateth->vaddr);
include/rdma/ib_hdrs.h
137
ib_u64_put(val, &ateth->vaddr);
include/rdma/ib_hdrs.h
49
__be64 vaddr; /* potentially unaligned */
include/rdma/ib_hdrs.h
55
__be64 vaddr; /* potentially unaligned */
include/rdma/rdma_vt.h
525
u32 len, u64 vaddr, u32 rkey, int acc);
include/rdma/rdmavt_mr.h
119
sge->vaddr += length;
include/rdma/rdmavt_mr.h
133
sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr;
include/rdma/rdmavt_mr.h
20
void *vaddr;
include/rdma/rdmavt_mr.h
69
void *vaddr; /* kernel virtual address of segment */
include/uapi/drm/amdxdna_accel.h
171
__u64 vaddr;
include/uapi/drm/amdxdna_accel.h
200
__u64 vaddr;
include/uapi/drm/amdxdna_accel.h
222
__u64 vaddr;
include/uapi/linux/netfilter/xt_ipvs.h
21
union nf_inet_addr vaddr, vmask;
include/uapi/linux/vfio.h
1615
__u64 vaddr; /* Process virtual address */
include/uapi/linux/vfio.h
1825
__u64 vaddr; /* Process virtual address */
include/uapi/misc/fastrpc.h
131
__u64 vaddr; /* remote process (dsp) virtual address */
include/uapi/rdma/hfi/hfi1_ioctl.h
101
__aligned_u64 vaddr;
include/uapi/xen/gntdev.h
106
__u64 vaddr;
include/video/omapvrfb.h
16
void __iomem *vaddr[4];
include/xen/arm/page.h
90
static inline xmaddr_t arbitrary_virt_to_machine(void *vaddr)
include/xen/grant_table.h
205
void *vaddr;
include/xen/grant_table.h
244
void *vaddr;
include/xen/xen-ops.h
188
int xen_xlate_map_ballooned_pages(xen_pfn_t **pfns, void **vaddr,
include/xen/xenbus.h
220
int xenbus_setup_ring(struct xenbus_device *dev, gfp_t gfp, void **vaddr,
include/xen/xenbus.h
222
void xenbus_teardown_ring(void **vaddr, unsigned int nr_pages,
include/xen/xenbus.h
225
unsigned int nr_grefs, void **vaddr);
include/xen/xenbus.h
227
int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr);
kernel/debug/kdb/kdb_support.c
348
void *vaddr;
kernel/debug/kdb/kdb_support.c
355
vaddr = kmap_local_page(page);
kernel/debug/kdb/kdb_support.c
356
memcpy(res, vaddr + (addr & (PAGE_SIZE - 1)), size);
kernel/debug/kdb/kdb_support.c
357
kunmap_local(vaddr);
kernel/dma/coherent.c
200
int order, void *vaddr)
kernel/dma/coherent.c
202
if (mem && vaddr >= mem->virt_base && vaddr <
kernel/dma/coherent.c
204
int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
kernel/dma/coherent.c
227
int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr)
kernel/dma/coherent.c
231
return __dma_release_from_coherent(mem, order, vaddr);
kernel/dma/coherent.c
235
struct vm_area_struct *vma, void *vaddr, size_t size, int *ret)
kernel/dma/coherent.c
237
if (mem && vaddr >= mem->virt_base && vaddr + size <=
kernel/dma/coherent.c
240
int start = (vaddr - mem->virt_base) >> PAGE_SHIFT;
kernel/dma/coherent.c
272
void *vaddr, size_t size, int *ret)
kernel/dma/coherent.c
276
return __dma_mmap_from_coherent(mem, vma, vaddr, size, ret);
kernel/dma/coherent.c
292
int dma_release_from_global_coherent(int order, void *vaddr)
kernel/dma/coherent.c
298
vaddr);
kernel/dma/coherent.c
301
int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *vaddr,
kernel/dma/coherent.c
308
vaddr, size, ret);
kernel/dma/direct.c
380
void *vaddr = page_address(page);
kernel/dma/direct.c
384
dma_free_from_pool(dev, vaddr, size))
kernel/dma/direct.c
387
if (dma_set_encrypted(dev, vaddr, size))
kernel/dma/direct.c
80
static int dma_set_decrypted(struct device *dev, void *vaddr, size_t size)
kernel/dma/direct.c
84
return set_memory_decrypted((unsigned long)vaddr, PFN_UP(size));
kernel/dma/direct.c
87
static int dma_set_encrypted(struct device *dev, void *vaddr, size_t size)
kernel/dma/direct.c
93
ret = set_memory_encrypted((unsigned long)vaddr, PFN_UP(size));
kernel/dma/mapping.c
103
vaddr = dma_alloc_attrs(dev, size, dma_handle, gfp, attrs);
kernel/dma/mapping.c
104
if (!vaddr) {
kernel/dma/mapping.c
109
dr->vaddr = vaddr;
kernel/dma/mapping.c
116
return vaddr;
kernel/dma/mapping.c
35
void *vaddr;
kernel/dma/mapping.c
44
dma_free_attrs(dev, this->size, this->vaddr, this->dma_handle,
kernel/dma/mapping.c
52
if (this->vaddr == match->vaddr) {
kernel/dma/mapping.c
69
void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
kernel/dma/mapping.c
72
struct dma_devres match_data = { size, vaddr, dma_handle };
kernel/dma/mapping.c
75
dma_free_coherent(dev, size, vaddr, dma_handle);
kernel/dma/mapping.c
854
void dma_vunmap_noncontiguous(struct device *dev, void *vaddr)
kernel/dma/mapping.c
857
iommu_dma_vunmap_noncontiguous(dev, vaddr);
kernel/dma/mapping.c
97
void *vaddr;
kernel/dma/remap.c
27
void *vaddr;
kernel/dma/remap.c
29
vaddr = vmap(pages, PAGE_ALIGN(size) >> PAGE_SHIFT,
kernel/dma/remap.c
31
if (vaddr)
kernel/dma/remap.c
32
find_vm_area(vaddr)->pages = pages;
kernel/dma/remap.c
33
return vaddr;
kernel/dma/remap.c
45
void *vaddr;
kernel/dma/remap.c
53
vaddr = vmap(pages, count, VM_DMA_COHERENT, prot);
kernel/dma/remap.c
56
return vaddr;
kernel/dma/swiotlb.c
265
set_memory_decrypted((unsigned long)mem->vaddr, bytes >> PAGE_SHIFT);
kernel/dma/swiotlb.c
271
void *vaddr = phys_to_virt(start);
kernel/dma/swiotlb.c
295
memset(vaddr, 0, bytes);
kernel/dma/swiotlb.c
296
mem->vaddr = vaddr;
kernel/dma/swiotlb.c
578
void *vaddr;
kernel/dma/swiotlb.c
590
vaddr = phys_to_virt(paddr);
kernel/dma/swiotlb.c
591
if (set_memory_decrypted((unsigned long)vaddr, PFN_UP(bytes)))
kernel/dma/swiotlb.c
597
if (!set_memory_encrypted((unsigned long)vaddr, PFN_UP(bytes)))
kernel/dma/swiotlb.c
621
void *vaddr;
kernel/dma/swiotlb.c
626
return dma_alloc_from_pool(dev, bytes, &vaddr, gfp,
kernel/dma/swiotlb.c
656
static void swiotlb_free_tlb(void *vaddr, size_t bytes)
kernel/dma/swiotlb.c
659
dma_free_from_pool(NULL, vaddr, bytes))
kernel/dma/swiotlb.c
663
if (!set_memory_encrypted((unsigned long)vaddr, PFN_UP(bytes)))
kernel/dma/swiotlb.c
664
__free_pages(virt_to_page(vaddr), get_order(bytes));
kernel/dma/swiotlb.c
760
swiotlb_free_tlb(pool->vaddr, tlb_size);
kernel/dma/swiotlb.c
865
unsigned char *vaddr = mem->vaddr + tlb_addr - mem->start;
kernel/dma/swiotlb.c
913
memcpy_from_page(vaddr, page, offset, sz);
kernel/dma/swiotlb.c
915
kmsan_unpoison_memory(vaddr, sz);
kernel/dma/swiotlb.c
916
memcpy_to_page(page, offset, vaddr, sz);
kernel/dma/swiotlb.c
922
vaddr += sz;
kernel/dma/swiotlb.c
932
memcpy(vaddr, phys_to_virt(orig_addr), size);
kernel/dma/swiotlb.c
934
kmsan_unpoison_memory(vaddr, size);
kernel/dma/swiotlb.c
935
memcpy(phys_to_virt(orig_addr), vaddr, size);
kernel/events/uprobes.c
1097
struct mm_struct *mm, unsigned long vaddr)
kernel/events/uprobes.c
1117
ret = arch_uprobe_analyze_insn(&uprobe->arch, mm, vaddr);
kernel/events/uprobes.c
1152
unsigned long vaddr)
kernel/events/uprobes.c
1158
ret = prepare_uprobe(uprobe, vma->vm_file, mm, vaddr);
kernel/events/uprobes.c
1170
ret = set_swbp(&uprobe->arch, vma, vaddr);
kernel/events/uprobes.c
1180
unsigned long vaddr)
kernel/events/uprobes.c
1185
return set_orig_insn(&uprobe->arch, vma, vaddr);
kernel/events/uprobes.c
119
unsigned long vaddr; /* Page(s) of instruction slots */
kernel/events/uprobes.c
1191
unsigned long vaddr;
kernel/events/uprobes.c
1241
info->vaddr = offset_to_vaddr(vma, offset);
kernel/events/uprobes.c
1303
vma = find_vma(mm, info->vaddr);
kernel/events/uprobes.c
1308
if (vma->vm_start > info->vaddr ||
kernel/events/uprobes.c
1309
vaddr_to_offset(vma, info->vaddr) != uprobe->offset)
kernel/events/uprobes.c
1315
err = install_breakpoint(uprobe, vma, info->vaddr);
kernel/events/uprobes.c
1318
err |= remove_breakpoint(uprobe, vma, info->vaddr);
kernel/events/uprobes.c
1478
unsigned long vaddr;
kernel/events/uprobes.c
1490
vaddr = offset_to_vaddr(vma, uprobe->offset);
kernel/events/uprobes.c
1491
err |= remove_breakpoint(uprobe, vma, vaddr);
kernel/events/uprobes.c
150
static loff_t vaddr_to_offset(struct vm_area_struct *vma, unsigned long vaddr)
kernel/events/uprobes.c
152
return ((loff_t)vma->vm_pgoff << PAGE_SHIFT) + (vaddr - vma->vm_start);
kernel/events/uprobes.c
1567
unsigned long vaddr;
kernel/events/uprobes.c
1578
vaddr = offset_to_vaddr(vma, du->uprobe->ref_ctr_offset);
kernel/events/uprobes.c
1579
ret = __update_ref_ctr(vma->vm_mm, vaddr, 1);
kernel/events/uprobes.c
1628
unsigned long vaddr = offset_to_vaddr(vma, uprobe->offset);
kernel/events/uprobes.c
1629
install_breakpoint(uprobe, vma, vaddr);
kernel/events/uprobes.c
1717
if (!area->vaddr) {
kernel/events/uprobes.c
1718
area->vaddr = arch_uprobe_get_xol_area();
kernel/events/uprobes.c
1719
if (IS_ERR_VALUE(area->vaddr)) {
kernel/events/uprobes.c
1720
ret = area->vaddr;
kernel/events/uprobes.c
1725
vma = _install_special_mapping(mm, area->vaddr, PAGE_SIZE,
kernel/events/uprobes.c
1751
static struct xol_area *__create_xol_area(unsigned long vaddr)
kernel/events/uprobes.c
1771
area->vaddr = vaddr;
kernel/events/uprobes.c
180
void uprobe_copy_from_page(struct page *page, unsigned long vaddr, void *dst, int len)
kernel/events/uprobes.c
183
memcpy(dst, kaddr + (vaddr & ~PAGE_MASK), len);
kernel/events/uprobes.c
187
static void copy_to_page(struct page *page, unsigned long vaddr, const void *src, int len)
kernel/events/uprobes.c
1883
utask->xol_vaddr = area->vaddr + slot_nr * UPROBE_XOL_SLOT_BYTES;
kernel/events/uprobes.c
1895
unsigned long offset = utask->xol_vaddr - area->vaddr;
kernel/events/uprobes.c
190
memcpy(kaddr + (vaddr & ~PAGE_MASK), src, len);
kernel/events/uprobes.c
1910
void __weak arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
kernel/events/uprobes.c
1914
copy_to_page(page, vaddr, src, len);
kernel/events/uprobes.c
194
static int verify_opcode(struct page *page, unsigned long vaddr, uprobe_opcode_t *insn,
kernel/events/uprobes.c
1941
return utask->vaddr;
kernel/events/uprobes.c
209
uprobe_copy_from_page(page, vaddr, &old_opcode, UPROBE_SWBP_INSN_SIZE);
kernel/events/uprobes.c
2212
t->utask->dup_xol_addr = area->vaddr;
kernel/events/uprobes.c
2231
trampoline_vaddr = area->vaddr;
kernel/events/uprobes.c
2332
utask->vaddr = bp_vaddr;
kernel/events/uprobes.c
2400
static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr)
kernel/events/uprobes.c
2406
if (WARN_ON_ONCE(!IS_ALIGNED(vaddr, UPROBE_SWBP_INSN_SIZE)))
kernel/events/uprobes.c
2410
result = __get_user(opcode, (uprobe_opcode_t __user *)vaddr);
kernel/events/uprobes.c
2416
result = get_user_pages(vaddr, 1, FOLL_FORCE, &page);
kernel/events/uprobes.c
2420
uprobe_copy_from_page(page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE);
kernel/events/uprobes.c
2704
void __weak arch_uprobe_optimize(struct arch_uprobe *auprobe, unsigned long vaddr)
kernel/events/uprobes.c
282
unsigned long vaddr = offset_to_vaddr(vma, uprobe->ref_ctr_offset);
kernel/events/uprobes.c
288
vma->vm_start <= vaddr &&
kernel/events/uprobes.c
289
vma->vm_end > vaddr;
kernel/events/uprobes.c
306
__update_ref_ctr(struct mm_struct *mm, unsigned long vaddr, short d)
kernel/events/uprobes.c
313
if (!vaddr || !d)
kernel/events/uprobes.c
316
ret = get_user_pages_remote(mm, vaddr, 1,
kernel/events/uprobes.c
327
ptr = kaddr + (vaddr & ~PAGE_MASK);
kernel/events/uprobes.c
331
"curr val: %d, delta: %d\n", vaddr, *ptr, d);
kernel/events/uprobes.c
384
unsigned long vaddr, struct page *page, bool *pmd_mappable)
kernel/events/uprobes.c
386
const pgoff_t index = vaddr_to_offset(vma, vaddr) >> PAGE_SHIFT;
kernel/events/uprobes.c
408
const unsigned long vaddr = insn_vaddr & PAGE_MASK;
kernel/events/uprobes.c
431
flush_cache_page(vma, vaddr, pte_pfn(fw->pte));
kernel/events/uprobes.c
432
fw->pte = ptep_clear_flush(vma, vaddr, fw->ptep);
kernel/events/uprobes.c
447
if (!orig_page_is_identical(vma, vaddr, fw->page, &pmd_mappable))
kernel/events/uprobes.c
467
set_pte_at(vma->vm_mm, vaddr, fw->ptep, pte_mkdirty(fw->pte));
kernel/events/uprobes.c
502
const unsigned long vaddr = insn_vaddr & PAGE_MASK;
kernel/events/uprobes.c
529
ret = get_user_pages_remote(mm, vaddr, 1, gup_flags, &page, NULL);
kernel/events/uprobes.c
565
vaddr, vaddr + PAGE_SIZE);
kernel/events/uprobes.c
571
if (folio_walk_start(&fw, vma, vaddr, 0)) {
kernel/events/uprobes.c
598
collapse_pte_mapped_thp(mm, vaddr, false);
kernel/events/uprobes.c
613
unsigned long vaddr)
kernel/events/uprobes.c
615
return uprobe_write_opcode(auprobe, vma, vaddr, UPROBE_SWBP_INSN, true);
kernel/events/uprobes.c
628
struct vm_area_struct *vma, unsigned long vaddr)
kernel/events/uprobes.c
630
return uprobe_write_opcode(auprobe, vma, vaddr,
kernel/kexec_core.c
1003
vaddr = vmap(src_pages, npages, VM_MAP, PAGE_KERNEL);
kernel/kexec_core.c
1006
if (!vaddr)
kernel/kexec_core.c
1009
return vaddr;
kernel/kexec_core.c
964
void *vaddr = NULL;
kernel/trace/bpf_trace.c
1062
return ((struct uprobe_dispatch_data *)current->utask->vaddr)->bp_addr;
kernel/trace/trace_uprobe.c
1016
entry->vaddr[0] = func;
kernel/trace/trace_uprobe.c
1017
entry->vaddr[1] = instruction_pointer(regs);
kernel/trace/trace_uprobe.c
1020
entry->vaddr[0] = instruction_pointer(regs);
kernel/trace/trace_uprobe.c
1082
entry->vaddr[1], entry->vaddr[0]);
kernel/trace/trace_uprobe.c
1087
entry->vaddr[0]);
kernel/trace/trace_uprobe.c
1232
DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_FUNC, 0);
kernel/trace/trace_uprobe.c
1233
DEFINE_FIELD(unsigned long, vaddr[1], FIELD_STRING_RETIP, 0);
kernel/trace/trace_uprobe.c
1236
DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_IP, 0);
kernel/trace/trace_uprobe.c
131
void __user *vaddr = (void __force __user *)src;
kernel/trace/trace_uprobe.c
133
return copy_from_user(dest, vaddr, size) ? -EFAULT : 0;
kernel/trace/trace_uprobe.c
1430
entry->vaddr[0] = func;
kernel/trace/trace_uprobe.c
1431
entry->vaddr[1] = instruction_pointer(regs);
kernel/trace/trace_uprobe.c
1434
entry->vaddr[0] = instruction_pointer(regs);
kernel/trace/trace_uprobe.c
1542
current->utask->vaddr = (unsigned long) &udd;
kernel/trace/trace_uprobe.c
1573
current->utask->vaddr = (unsigned long) &udd;
kernel/trace/trace_uprobe.c
190
void __user *vaddr = (void __force __user *) addr;
kernel/trace/trace_uprobe.c
195
len = strnlen_user(vaddr, MAX_STRING_SIZE);
kernel/trace/trace_uprobe.c
211
udd = (void *) current->utask->vaddr;
kernel/trace/trace_uprobe.c
32
unsigned long vaddr[];
lib/genalloc.c
365
unsigned long vaddr;
lib/genalloc.c
370
vaddr = gen_pool_alloc_algo(pool, size, algo, data);
lib/genalloc.c
371
if (!vaddr)
lib/genalloc.c
375
*dma = gen_pool_virt_to_phys(pool, vaddr);
lib/genalloc.c
377
return (void *)vaddr;
lib/genalloc.c
443
void *vaddr = gen_pool_dma_alloc_algo(pool, size, dma, algo, data);
lib/genalloc.c
445
if (vaddr)
lib/genalloc.c
446
memset(vaddr, 0, size);
lib/genalloc.c
448
return vaddr;
mm/debug_vm_pgtable.c
1157
args->vaddr = get_random_vaddr();
mm/debug_vm_pgtable.c
1190
args->pgdp = pgd_offset(args->mm, args->vaddr);
mm/debug_vm_pgtable.c
1191
args->p4dp = p4d_alloc(args->mm, args->pgdp, args->vaddr);
mm/debug_vm_pgtable.c
1200
args->pudp = pud_alloc(args->mm, args->p4dp, args->vaddr);
mm/debug_vm_pgtable.c
1209
args->pmdp = pmd_alloc(args->mm, args->pudp, args->vaddr);
mm/debug_vm_pgtable.c
1341
args.ptep = pte_offset_map_lock(args.mm, args.pmdp, args.vaddr, &ptl);
mm/debug_vm_pgtable.c
139
set_pte_at(args->mm, args->vaddr, args->ptep, pte);
mm/debug_vm_pgtable.c
141
ptep_set_wrprotect(args->mm, args->vaddr, args->ptep);
mm/debug_vm_pgtable.c
144
ptep_get_and_clear(args->mm, args->vaddr, args->ptep);
mm/debug_vm_pgtable.c
151
set_pte_at(args->mm, args->vaddr, args->ptep, pte);
mm/debug_vm_pgtable.c
155
ptep_set_access_flags(args->vma, args->vaddr, args->ptep, pte, 1);
mm/debug_vm_pgtable.c
158
ptep_get_and_clear_full(args->mm, args->vaddr, args->ptep, 1);
mm/debug_vm_pgtable.c
164
set_pte_at(args->mm, args->vaddr, args->ptep, pte);
mm/debug_vm_pgtable.c
166
ptep_test_and_clear_young(args->vma, args->vaddr, args->ptep);
mm/debug_vm_pgtable.c
170
ptep_get_and_clear_full(args->mm, args->vaddr, args->ptep, 1);
mm/debug_vm_pgtable.c
223
unsigned long vaddr = args->vaddr;
mm/debug_vm_pgtable.c
241
vaddr &= HPAGE_PMD_MASK;
mm/debug_vm_pgtable.c
246
set_pmd_at(args->mm, vaddr, args->pmdp, pmd);
mm/debug_vm_pgtable.c
248
pmdp_set_wrprotect(args->mm, vaddr, args->pmdp);
mm/debug_vm_pgtable.c
251
pmdp_huge_get_and_clear(args->mm, vaddr, args->pmdp);
mm/debug_vm_pgtable.c
258
set_pmd_at(args->mm, vaddr, args->pmdp, pmd);
mm/debug_vm_pgtable.c
262
pmdp_set_access_flags(args->vma, vaddr, args->pmdp, pmd, 1);
mm/debug_vm_pgtable.c
265
pmdp_huge_get_and_clear_full(args->vma, vaddr, args->pmdp, 1);
mm/debug_vm_pgtable.c
271
set_pmd_at(args->mm, vaddr, args->pmdp, pmd);
mm/debug_vm_pgtable.c
273
pmdp_test_and_clear_young(args->vma, vaddr, args->pmdp);
mm/debug_vm_pgtable.c
278
pmdp_huge_get_and_clear(args->mm, vaddr, args->pmdp);
mm/debug_vm_pgtable.c
344
unsigned long vaddr = args->vaddr;
mm/debug_vm_pgtable.c
363
vaddr &= HPAGE_PUD_MASK;
mm/debug_vm_pgtable.c
366
set_pud_at(args->mm, vaddr, args->pudp, pud);
mm/debug_vm_pgtable.c
368
pudp_set_wrprotect(args->mm, vaddr, args->pudp);
mm/debug_vm_pgtable.c
373
pudp_huge_get_and_clear(args->mm, vaddr, args->pudp);
mm/debug_vm_pgtable.c
380
set_pud_at(args->mm, vaddr, args->pudp, pud);
mm/debug_vm_pgtable.c
384
pudp_set_access_flags(args->vma, vaddr, args->pudp, pud, 1);
mm/debug_vm_pgtable.c
389
pudp_huge_get_and_clear_full(args->vma, vaddr, args->pudp, 1);
mm/debug_vm_pgtable.c
396
set_pud_at(args->mm, vaddr, args->pudp, pud);
mm/debug_vm_pgtable.c
398
pudp_test_and_clear_young(args->vma, vaddr, args->pudp);
mm/debug_vm_pgtable.c
402
pudp_huge_get_and_clear(args->mm, vaddr, args->pudp);
mm/debug_vm_pgtable.c
60
unsigned long vaddr;
mm/debug_vm_pgtable.c
626
set_pte_at(args->mm, args->vaddr, args->ptep, pte);
mm/debug_vm_pgtable.c
630
ptep_clear(args->mm, args->vaddr, args->ptep);
mm/dmapool.c
134
static bool pool_block_err(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
mm/dmapool.c
142
__func__, pool->name, vaddr, &dma);
mm/dmapool.c
147
if (block != vaddr) {
mm/dmapool.c
156
memset(vaddr, POOL_POISON_FREED, pool->size);
mm/dmapool.c
162
memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
mm/dmapool.c
170
static bool pool_block_err(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
mm/dmapool.c
173
memset(vaddr, 0, pool->size);
mm/dmapool.c
316
block = page->vaddr + offset;
mm/dmapool.c
345
page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation,
mm/dmapool.c
347
if (!page->vaddr) {
mm/dmapool.c
388
page->vaddr, page->dma);
mm/dmapool.c
453
void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
mm/dmapool.c
455
struct dma_block *block = vaddr;
mm/dmapool.c
459
if (!pool_block_err(pool, vaddr, dma)) {
mm/dmapool.c
66
void *vaddr;
mm/highmem.c
165
struct page *__kmap_to_page(void *vaddr)
mm/highmem.c
167
unsigned long base = (unsigned long) vaddr & PAGE_MASK;
mm/highmem.c
169
unsigned long addr = (unsigned long)vaddr;
mm/highmem.c
193
return virt_to_page(vaddr);
mm/highmem.c
248
unsigned long vaddr;
mm/highmem.c
290
vaddr = PKMAP_ADDR(last_pkmap_nr);
mm/highmem.c
291
set_pte_at(&init_mm, vaddr,
mm/highmem.c
295
set_page_address(page, (void *)vaddr);
mm/highmem.c
297
return vaddr;
mm/highmem.c
310
unsigned long vaddr;
mm/highmem.c
317
vaddr = (unsigned long)page_address(page);
mm/highmem.c
318
if (!vaddr)
mm/highmem.c
319
vaddr = map_new_virtual(page);
mm/highmem.c
320
pkmap_count[PKMAP_NR(vaddr)]++;
mm/highmem.c
321
BUG_ON(pkmap_count[PKMAP_NR(vaddr)] < 2);
mm/highmem.c
323
return (void *) vaddr;
mm/highmem.c
340
unsigned long vaddr, flags;
mm/highmem.c
343
vaddr = (unsigned long)page_address(page);
mm/highmem.c
344
if (vaddr) {
mm/highmem.c
345
BUG_ON(pkmap_count[PKMAP_NR(vaddr)] < 1);
mm/highmem.c
346
pkmap_count[PKMAP_NR(vaddr)]++;
mm/highmem.c
349
return (void *) vaddr;
mm/highmem.c
362
unsigned long vaddr;
mm/highmem.c
370
vaddr = (unsigned long)page_address(page);
mm/highmem.c
371
BUG_ON(!vaddr);
mm/highmem.c
372
nr = PKMAP_NR(vaddr);
mm/highmem.c
496
# define arch_kmap_local_post_map(vaddr, pteval) do { } while (0)
mm/highmem.c
500
# define arch_kmap_local_pre_unmap(vaddr) do { } while (0)
mm/highmem.c
504
# define arch_kmap_local_post_unmap(vaddr) do { } while (0)
mm/highmem.c
508
#define arch_kmap_local_unmap_idx(idx, vaddr) kmap_local_calc_idx(idx)
mm/highmem.c
519
#define arch_kmap_local_set_pte(mm, vaddr, ptep, ptev) \
mm/highmem.c
520
set_pte_at(mm, vaddr, ptep, ptev)
mm/highmem.c
524
static inline bool kmap_high_unmap_local(unsigned long vaddr)
mm/highmem.c
527
if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) {
mm/highmem.c
528
kunmap_high(pte_page(ptep_get(&pkmap_page_table[PKMAP_NR(vaddr)])));
mm/highmem.c
537
static pte_t *kmap_get_pte(unsigned long vaddr, int idx)
mm/highmem.c
544
return virt_to_kpte(vaddr);
mm/highmem.c
553
unsigned long vaddr;
mm/highmem.c
563
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
mm/highmem.c
564
kmap_pte = kmap_get_pte(vaddr, idx);
mm/highmem.c
567
arch_kmap_local_set_pte(&init_mm, vaddr, kmap_pte, pteval);
mm/highmem.c
568
arch_kmap_local_post_map(vaddr, pteval);
mm/highmem.c
572
return (void *)vaddr;
mm/highmem.c
597
void kunmap_local_indexed(const void *vaddr)
mm/highmem.c
599
unsigned long addr = (unsigned long) vaddr & PAGE_MASK;
mm/ioremap.c
18
unsigned long offset, vaddr;
mm/ioremap.c
40
vaddr = (unsigned long)area->addr;
mm/ioremap.c
43
if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) {
mm/ioremap.c
48
return (void __iomem *)(vaddr + offset);
mm/ioremap.c
62
void *vaddr = (void *)((unsigned long)addr & PAGE_MASK);
mm/ioremap.c
64
if (is_ioremap_addr(vaddr))
mm/ioremap.c
65
vunmap(vaddr);
mm/kmsan/core.c
239
struct page *kmsan_vmalloc_to_page_or_null(void *vaddr)
mm/kmsan/core.c
243
if (!kmsan_internal_is_vmalloc_addr(vaddr) &&
mm/kmsan/core.c
244
!kmsan_internal_is_module_addr(vaddr))
mm/kmsan/core.c
246
page = vmalloc_to_page(vaddr);
mm/kmsan/kmsan.h
168
struct page *kmsan_vmalloc_to_page_or_null(void *vaddr);
mm/kmsan/kmsan.h
177
static inline bool kmsan_internal_is_module_addr(void *vaddr)
mm/kmsan/kmsan.h
179
return ((u64)vaddr >= MODULES_VADDR) && ((u64)vaddr < MODULES_END);
mm/kmsan/shadow.c
73
static struct page *virt_to_page_or_null(void *vaddr)
mm/kmsan/shadow.c
75
if (kmsan_virt_addr_valid(vaddr))
mm/kmsan/shadow.c
76
return virt_to_page(vaddr);
mm/memblock.c
1488
void *vaddr = kzalloc_node(size, GFP_NOWAIT, nid);
mm/memblock.c
1490
return vaddr ? virt_to_phys(vaddr) : 0;
mm/vmalloc.c
2711
void *vaddr;
mm/vmalloc.c
2728
vaddr = vmap_block_vaddr(va->va_start, 0);
mm/vmalloc.c
2762
return vaddr;
mm/vmalloc.c
2855
void *vaddr = NULL;
mm/vmalloc.c
2885
vaddr = vmap_block_vaddr(vb->va->va_start, pages_off);
mm/vmalloc.c
2901
if (!vaddr)
mm/vmalloc.c
2902
vaddr = new_vmap_block(order, gfp_mask);
mm/vmalloc.c
2904
return vaddr;
mm/vmalloc.c
4600
char *vaddr;
mm/vmalloc.c
4643
vaddr = (char *) va->va_start;
mm/vmalloc.c
4646
if (addr >= vaddr + size)
mm/vmalloc.c
4649
if (addr < vaddr) {
mm/vmalloc.c
4650
size_t to_zero = min_t(size_t, vaddr - addr, remains);
mm/vmalloc.c
4660
n = vaddr + size - addr;
mm/zsmalloc.c
1254
void *vaddr;
mm/zsmalloc.c
1267
vaddr = kmap_local_zpdesc(m_zpdesc);
mm/zsmalloc.c
1268
link = (struct link_free *)vaddr + m_offset / sizeof(*link);
mm/zsmalloc.c
1276
kunmap_local(vaddr);
mm/zsmalloc.c
1363
void *vaddr;
mm/zsmalloc.c
1370
vaddr = kmap_local_zpdesc(f_zpdesc);
mm/zsmalloc.c
1371
link = (struct link_free *)(vaddr + f_offset);
mm/zsmalloc.c
1380
kunmap_local(vaddr);
mm/zsmalloc.c
861
void *vaddr;
mm/zsmalloc.c
865
vaddr = kmap_local_zpdesc(zpdesc);
mm/zsmalloc.c
866
link = (struct link_free *)vaddr + off / sizeof(*link);
mm/zsmalloc.c
888
kunmap_local(vaddr);
net/appletalk/ddp.c
973
u8 *vaddr;
net/appletalk/ddp.c
977
vaddr = kmap_atomic(skb_frag_page(frag));
net/appletalk/ddp.c
978
sum = atalk_sum_partial(vaddr + skb_frag_off(frag) +
net/appletalk/ddp.c
980
kunmap_atomic(vaddr);
net/core/datagram.c
425
u8 *vaddr;
net/core/datagram.c
434
vaddr = kmap_local_page(p);
net/core/datagram.c
436
vaddr + p_off, p_len, data, to);
net/core/datagram.c
437
kunmap_local(vaddr);
net/core/skbuff.c
1383
u8 *vaddr;
net/core/skbuff.c
1397
vaddr = kmap_atomic(p);
net/core/skbuff.c
1400
16, 1, vaddr + p_off, seg_len, false);
net/core/skbuff.c
1401
kunmap_atomic(vaddr);
net/core/skbuff.c
2043
u8 *vaddr;
net/core/skbuff.c
2048
vaddr = kmap_atomic(p);
net/core/skbuff.c
2057
vaddr + p_off + done, copy);
net/core/skbuff.c
2061
kunmap_atomic(vaddr);
net/core/skbuff.c
3043
u8 *vaddr;
net/core/skbuff.c
3051
vaddr = kmap_atomic(p);
net/core/skbuff.c
3052
memcpy(to + copied, vaddr + p_off, p_len);
net/core/skbuff.c
3053
kunmap_atomic(vaddr);
net/core/skbuff.c
3471
u8 *vaddr;
net/core/skbuff.c
3479
vaddr = kmap_atomic(p);
net/core/skbuff.c
3480
memcpy(vaddr + p_off, from + copied, p_len);
net/core/skbuff.c
3481
kunmap_atomic(vaddr);
net/core/skbuff.c
3552
u8 *vaddr;
net/core/skbuff.c
3560
vaddr = kmap_atomic(p);
net/core/skbuff.c
3561
csum2 = csum_partial(vaddr + p_off, p_len, 0);
net/core/skbuff.c
3562
kunmap_atomic(vaddr);
net/core/skbuff.c
3638
u8 *vaddr;
net/core/skbuff.c
3646
vaddr = kmap_atomic(p);
net/core/skbuff.c
3647
csum2 = csum_partial_copy_nocheck(vaddr + p_off,
net/core/skbuff.c
3650
kunmap_atomic(vaddr);
net/core/skbuff.c
3720
u8 *vaddr;
net/core/skbuff.c
3726
vaddr = kmap_atomic(p);
net/core/skbuff.c
3727
crc = crc32c(crc, vaddr + p_off, p_len);
net/core/skbuff.c
3728
kunmap_atomic(vaddr);
net/ipv4/tcp.c
4916
const void *vaddr;
net/ipv4/tcp.c
4921
vaddr = kmap_local_page(p);
net/ipv4/tcp.c
4922
md5_update(ctx, vaddr + p_off, p_len);
net/ipv4/tcp.c
4923
kunmap_local(vaddr);
net/netfilter/ipvs/ip_vs_conn.c
1163
&cp->vaddr.in6, ntohs(cp->vport),
net/netfilter/ipvs/ip_vs_conn.c
1176
ntohl(cp->vaddr.ip), ntohs(cp->vport),
net/netfilter/ipvs/ip_vs_conn.c
1229
&cp->vaddr.in6, ntohs(cp->vport),
net/netfilter/ipvs/ip_vs_conn.c
1242
ntohl(cp->vaddr.ip), ntohs(cp->vport),
net/netfilter/ipvs/ip_vs_conn.c
135
addr = p->vaddr;
net/netfilter/ipvs/ip_vs_conn.c
275
ip_vs_addr_equal(p->af, p->vaddr, &cp->vaddr) &&
net/netfilter/ipvs/ip_vs_conn.c
306
IP_VS_DBG_ADDR(p->af, p->vaddr), ntohs(p->vport),
net/netfilter/ipvs/ip_vs_conn.c
373
p->af, p->vaddr, &cp->vaddr) &&
net/netfilter/ipvs/ip_vs_conn.c
390
IP_VS_DBG_ADDR(p->af, p->vaddr), ntohs(p->vport),
net/netfilter/ipvs/ip_vs_conn.c
420
saddr = &cp->vaddr;
net/netfilter/ipvs/ip_vs_conn.c
427
ip_vs_addr_equal(p->af, p->vaddr, &cp->caddr) &&
net/netfilter/ipvs/ip_vs_conn.c
444
IP_VS_DBG_ADDR(p->af, p->vaddr), ntohs(p->vport),
net/netfilter/ipvs/ip_vs_conn.c
618
IP_VS_DBG_ADDR(cp->af, &cp->vaddr), ntohs(cp->vport),
net/netfilter/ipvs/ip_vs_conn.c
662
cp->dport, &cp->vaddr, cp->vport,
net/netfilter/ipvs/ip_vs_conn.c
715
IP_VS_DBG_ADDR(cp->af, &cp->vaddr), ntohs(cp->vport),
net/netfilter/ipvs/ip_vs_conn.c
784
IP_VS_DBG_ADDR(ct->af, &ct->vaddr),
net/netfilter/ipvs/ip_vs_conn.c
965
&cp->vaddr, p->vaddr);
net/netfilter/ipvs/ip_vs_core.c
1137
const union nf_inet_addr *vaddr, *daddr, *caddr;
net/netfilter/ipvs/ip_vs_core.c
1142
vaddr = &svc->addr;
net/netfilter/ipvs/ip_vs_core.c
1165
&snet, 0, vaddr,
net/netfilter/ipvs/ip_vs_core.c
1188
caddr, cport, vaddr, vport, &param);
net/netfilter/ipvs/ip_vs_core.c
1206
IP_VS_DBG_ADDR(cp->af, &cp->vaddr), ntohs(cp->vport),
net/netfilter/ipvs/ip_vs_core.c
1278
ipv6_hdr(skb)->saddr = cp->vaddr.in6;
net/netfilter/ipvs/ip_vs_core.c
1282
ip_hdr(skb)->saddr = cp->vaddr.ip;
net/netfilter/ipvs/ip_vs_core.c
232
const union nf_inet_addr *vaddr, __be16 vport,
net/netfilter/ipvs/ip_vs_core.c
235
ip_vs_conn_fill_param(svc->ipvs, svc->af, protocol, caddr, cport, vaddr,
net/netfilter/ipvs/ip_vs_core.c
306
const union nf_inet_addr *vaddr = dst_addr;
net/netfilter/ipvs/ip_vs_core.c
327
vaddr = &fwmark;
net/netfilter/ipvs/ip_vs_core.c
332
vaddr, vport, &param) < 0) {
net/netfilter/ipvs/ip_vs_core.c
449
const void *caddr, *vaddr;
net/netfilter/ipvs/ip_vs_core.c
464
vaddr = &iph->daddr;
net/netfilter/ipvs/ip_vs_core.c
469
vaddr = &iph->saddr;
net/netfilter/ipvs/ip_vs_core.c
547
caddr, cport, vaddr, vport, &p);
net/netfilter/ipvs/ip_vs_core.c
561
IP_VS_DBG_ADDR(cp->af, &cp->vaddr), ntohs(cp->vport),
net/netfilter/ipvs/ip_vs_core.c
757
iph->saddr = cp->vaddr.ip;
net/netfilter/ipvs/ip_vs_core.c
759
ciph->daddr = cp->vaddr.ip;
net/netfilter/ipvs/ip_vs_core.c
812
iph->saddr = cp->vaddr.in6;
net/netfilter/ipvs/ip_vs_core.c
813
ciph->daddr = cp->vaddr.in6;
net/netfilter/ipvs/ip_vs_ctl.c
1096
dest->vaddr = svc->addr;
net/netfilter/ipvs/ip_vs_ctl.c
1175
IP_VS_DBG_ADDR(svc->af, &dest->vaddr),
net/netfilter/ipvs/ip_vs_ctl.c
408
const union nf_inet_addr *vaddr, __be16 vport)
net/netfilter/ipvs/ip_vs_ctl.c
414
hash = ip_vs_svc_hashkey(ipvs, af, protocol, vaddr, vport);
net/netfilter/ipvs/ip_vs_ctl.c
418
&& ip_vs_addr_equal(af, &svc->addr, vaddr)
net/netfilter/ipvs/ip_vs_ctl.c
457
const union nf_inet_addr *vaddr, __be16 vport)
net/netfilter/ipvs/ip_vs_ctl.c
474
svc = __ip_vs_service_find(ipvs, af, protocol, vaddr, vport);
net/netfilter/ipvs/ip_vs_ctl.c
483
svc = __ip_vs_service_find(ipvs, af, protocol, vaddr, FTPPORT);
net/netfilter/ipvs/ip_vs_ctl.c
491
svc = __ip_vs_service_find(ipvs, af, protocol, vaddr, 0);
net/netfilter/ipvs/ip_vs_ctl.c
497
IP_VS_DBG_ADDR(af, vaddr), ntohs(vport),
net/netfilter/ipvs/ip_vs_ctl.c
728
const union nf_inet_addr *vaddr,
net/netfilter/ipvs/ip_vs_ctl.c
736
svc = ip_vs_service_find(ipvs, svc_af, fwmark, protocol, vaddr, vport);
net/netfilter/ipvs/ip_vs_ctl.c
803
(ip_vs_addr_equal(svc->af, &dest->vaddr, &svc->addr) &&
net/netfilter/ipvs/ip_vs_ftp.c
330
0, &cp->vaddr, port, &p);
net/netfilter/ipvs/ip_vs_ftp.c
344
from.ip = n_cp->vaddr.ip;
net/netfilter/ipvs/ip_vs_ftp.c
354
from = n_cp->vaddr;
net/netfilter/ipvs/ip_vs_ftp.c
504
&to.ip, ntohs(port), &cp->vaddr.ip,
net/netfilter/ipvs/ip_vs_ftp.c
520
IP_VS_DBG_ADDR(cp->af, &cp->vaddr),
net/netfilter/ipvs/ip_vs_ftp.c
532
ipvsh->protocol, &to, port, &cp->vaddr,
net/netfilter/ipvs/ip_vs_nfct.c
113
new_tuple.dst.u3 = cp->vaddr;
net/netfilter/ipvs/ip_vs_nfct.c
170
new_reply.dst.u3 = cp->vaddr;
net/netfilter/ipvs/ip_vs_nfct.c
225
from_rs ? &cp->caddr : &cp->vaddr,
net/netfilter/ipvs/ip_vs_nfct.c
255
tuple.dst.u3 = cp->vaddr;
net/netfilter/ipvs/ip_vs_nfct.c
63
IP_VS_DBG_ADDR((C)->af, &((C)->vaddr)), \
net/netfilter/ipvs/ip_vs_pe_sip.c
117
p->vaddr, &ct->vaddr) &&
net/netfilter/ipvs/ip_vs_pe_sip.c
128
IP_VS_DBG_ADDR(p->af, p->vaddr), ntohs(p->vport),
net/netfilter/ipvs/ip_vs_proto_sctp.c
538
IP_VS_DBG_ADDR(cp->af, &cp->vaddr),
net/netfilter/ipvs/ip_vs_proto_tcp.c
187
tcp_partial_csum_update(cp->af, tcph, &cp->daddr, &cp->vaddr,
net/netfilter/ipvs/ip_vs_proto_tcp.c
192
tcp_fast_csum_update(cp->af, tcph, &cp->daddr, &cp->vaddr,
net/netfilter/ipvs/ip_vs_proto_tcp.c
203
tcph->check = csum_ipv6_magic(&cp->vaddr.in6,
net/netfilter/ipvs/ip_vs_proto_tcp.c
209
tcph->check = csum_tcpudp_magic(cp->vaddr.ip,
net/netfilter/ipvs/ip_vs_proto_tcp.c
270
tcp_partial_csum_update(cp->af, tcph, &cp->vaddr, &cp->daddr,
net/netfilter/ipvs/ip_vs_proto_tcp.c
275
tcp_fast_csum_update(cp->af, tcph, &cp->vaddr, &cp->daddr,
net/netfilter/ipvs/ip_vs_proto_tcp.c
545
IP_VS_DBG_ADDR(cp->af, &cp->vaddr),
net/netfilter/ipvs/ip_vs_proto_tcp.c
667
IP_VS_DBG_ADDR(cp->af, &cp->vaddr),
net/netfilter/ipvs/ip_vs_proto_udp.c
180
udp_partial_csum_update(cp->af, udph, &cp->daddr, &cp->vaddr,
net/netfilter/ipvs/ip_vs_proto_udp.c
185
udp_fast_csum_update(cp->af, udph, &cp->daddr, &cp->vaddr,
net/netfilter/ipvs/ip_vs_proto_udp.c
196
udph->check = csum_ipv6_magic(&cp->vaddr.in6,
net/netfilter/ipvs/ip_vs_proto_udp.c
202
udph->check = csum_tcpudp_magic(cp->vaddr.ip,
net/netfilter/ipvs/ip_vs_proto_udp.c
264
udp_partial_csum_update(cp->af, udph, &cp->vaddr, &cp->daddr,
net/netfilter/ipvs/ip_vs_proto_udp.c
269
udp_fast_csum_update(cp->af, udph, &cp->vaddr, &cp->daddr,
net/netfilter/ipvs/ip_vs_proto_udp.c
412
IP_VS_DBG_ADDR(cp->af, &cp->vaddr),
net/netfilter/ipvs/ip_vs_sync.c
1014
(const union nf_inet_addr *)&s->vaddr,
net/netfilter/ipvs/ip_vs_sync.c
146
__be32 vaddr; /* virtual address */
net/netfilter/ipvs/ip_vs_sync.c
168
struct in6_addr vaddr; /* virtual address */
net/netfilter/ipvs/ip_vs_sync.c
598
s->vaddr = cp->vaddr.ip;
net/netfilter/ipvs/ip_vs_sync.c
737
s->v6.vaddr = cp->vaddr.in6;
net/netfilter/ipvs/ip_vs_sync.c
744
s->v4.vaddr = cp->vaddr.ip;
net/netfilter/ipvs/ip_vs_sync.c
79
__be32 vaddr; /* virtual address */
net/netfilter/ipvs/ip_vs_sync.c
798
(const union nf_inet_addr *)&sc->v6.vaddr,
net/netfilter/ipvs/ip_vs_sync.c
805
(const union nf_inet_addr *)&sc->v4.vaddr,
net/netfilter/ipvs/ip_vs_sync.c
908
param->vaddr, param->vport, protocol,
net/netfilter/ipvs/ip_vs_xmit.c
666
!ip_vs_addr_equal(cp->af, &cp->vaddr, &cp->daddr))
net/netfilter/xt_ipvs.c
140
if (ipvs_mt_addrcmp(&cp->vaddr, &data->vaddr,
net/xdp/xsk.c
848
u8 *vaddr;
net/xdp/xsk.c
876
vaddr = kmap_local_page(page);
net/xdp/xsk.c
877
memcpy(vaddr, buffer, len);
net/xdp/xsk.c
878
kunmap_local(vaddr);
sound/pci/asihpi/hpios.c
39
p_mem_area->vaddr =
sound/pci/asihpi/hpios.c
43
if (p_mem_area->vaddr) {
sound/pci/asihpi/hpios.c
46
p_mem_area->vaddr);
sound/pci/asihpi/hpios.c
62
p_mem_area->vaddr, p_mem_area->dma_handle);
sound/pci/asihpi/hpios.c
66
p_mem_area->vaddr);
sound/pci/asihpi/hpios.h
40
void *vaddr;
sound/pci/asihpi/hpios.h
54
*pp_virtual_addr = locked_mem_handle->vaddr;
sound/soc/intel/catpt/loader.c
548
void *vaddr;
sound/soc/intel/catpt/loader.c
562
vaddr = dma_alloc_coherent(cdev->dev, img->size, &paddr, GFP_KERNEL);
sound/soc/intel/catpt/loader.c
563
if (!vaddr) {
sound/soc/intel/catpt/loader.c
568
memcpy(vaddr, img->data, img->size);
sound/soc/intel/catpt/loader.c
569
fw = (struct catpt_fw_hdr *)vaddr;
sound/soc/intel/catpt/loader.c
575
dma_free_coherent(cdev->dev, img->size, vaddr, paddr);
sound/usb/usx2y/us122l.c
107
vaddr = (char *)s + offset;
sound/usb/usx2y/us122l.c
113
vaddr = us122l->sk.write_page + offset;
sound/usb/usx2y/us122l.c
115
page = virt_to_page(vaddr);
sound/usb/usx2y/us122l.c
96
void *vaddr;
sound/usb/usx2y/usX2Yhwdep.c
25
void *vaddr;
sound/usb/usx2y/usX2Yhwdep.c
28
vaddr = (char *)((struct usx2ydev *)vmf->vma->vm_private_data)->us428ctls_sharedmem + offset;
sound/usb/usx2y/usX2Yhwdep.c
29
page = virt_to_page(vaddr);
sound/usb/usx2y/usx2yhwdeppcm.c
672
void *vaddr;
sound/usb/usx2y/usx2yhwdeppcm.c
675
vaddr = (char *)((struct usx2ydev *)vmf->vma->vm_private_data)->hwdep_pcm_shm + offset;
sound/usb/usx2y/usx2yhwdeppcm.c
676
vmf->page = virt_to_page(vaddr);
tools/perf/util/symbol-elf.c
240
static int elf_read_program_header(Elf *elf, u64 vaddr, GElf_Phdr *phdr)
tools/perf/util/symbol-elf.c
259
if (vaddr >= phdr->p_vaddr && (vaddr < phdr->p_vaddr + sz))
tools/testing/selftests/iommu/iommufd.c
2415
.vaddr = (__u64)buffer,
tools/testing/selftests/iommu/iommufd.c
2682
.vaddr = (uintptr_t)buffer,
tools/testing/selftests/iommu/iommufd.c
2715
map_cmd.vaddr = (uintptr_t)buffer + i * PAGE_SIZE;
tools/testing/selftests/iommu/iommufd.c
2750
map_cmd.vaddr = (uintptr_t)buf;
tools/testing/selftests/kvm/arm64/page_fault_test.c
73
static inline void flush_tlb_page(uint64_t vaddr)
tools/testing/selftests/kvm/arm64/page_fault_test.c
75
uint64_t page = vaddr >> 12;
tools/testing/selftests/kvm/arm64/sea_to_user.c
59
static uint64_t translate_to_host_paddr(unsigned long vaddr)
tools/testing/selftests/kvm/arm64/sea_to_user.c
62
int64_t offset = vaddr / getpagesize() * sizeof(pinfo);
tools/testing/selftests/kvm/arm64/sea_to_user.c
81
paddr = page_addr + (vaddr & (getpagesize() - 1));
tools/testing/selftests/kvm/include/kvm_util.h
1225
void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr);
tools/testing/selftests/kvm/include/kvm_util.h
1227
static inline void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
tools/testing/selftests/kvm/include/kvm_util.h
1229
virt_arch_pg_map(vm, vaddr, paddr);
tools/testing/selftests/kvm/include/kvm_util.h
1230
sparsebit_set(vm->vpages_mapped, vaddr >> vm->page_shift);
tools/testing/selftests/kvm/include/kvm_util.h
731
void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
tools/testing/selftests/kvm/include/x86/processor.h
1386
uint64_t *vm_get_pte(struct kvm_vm *vm, uint64_t vaddr);
tools/testing/selftests/kvm/include/x86/processor.h
1502
void __virt_pg_map(struct kvm_vm *vm, struct kvm_mmu *mmu, uint64_t vaddr,
tools/testing/selftests/kvm/include/x86/processor.h
1504
void virt_map_level(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
tools/testing/selftests/kvm/lib/arm64/processor.c
124
static void _virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
tools/testing/selftests/kvm/lib/arm64/processor.c
131
TEST_ASSERT((vaddr % vm->page_size) == 0,
tools/testing/selftests/kvm/lib/arm64/processor.c
133
" vaddr: 0x%lx vm->page_size: 0x%x", vaddr, vm->page_size);
tools/testing/selftests/kvm/lib/arm64/processor.c
135
(vaddr >> vm->page_shift)),
tools/testing/selftests/kvm/lib/arm64/processor.c
136
"Invalid virtual address, vaddr: 0x%lx", vaddr);
tools/testing/selftests/kvm/lib/arm64/processor.c
145
ptep = addr_gpa2hva(vm, vm->mmu.pgd) + pgd_index(vm, vaddr) * 8;
tools/testing/selftests/kvm/lib/arm64/processor.c
152
ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pud_index(vm, vaddr) * 8;
tools/testing/selftests/kvm/lib/arm64/processor.c
158
ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pmd_index(vm, vaddr) * 8;
tools/testing/selftests/kvm/lib/arm64/processor.c
164
ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pte_index(vm, vaddr) * 8;
tools/testing/selftests/kvm/lib/arm64/processor.c
177
void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
tools/testing/selftests/kvm/lib/arm64/processor.c
181
_virt_pg_map(vm, vaddr, paddr, attr_idx);
tools/testing/selftests/kvm/lib/elf.c
165
vm_vaddr_t vaddr = __vm_vaddr_alloc(vm, seg_size, seg_vstart,
tools/testing/selftests/kvm/lib/elf.c
167
TEST_ASSERT(vaddr == seg_vstart, "Unable to allocate "
tools/testing/selftests/kvm/lib/elf.c
172
n1, seg_vstart, vaddr);
tools/testing/selftests/kvm/lib/elf.c
173
memset(addr_gva2hva(vm, vaddr), 0, seg_size);
tools/testing/selftests/kvm/lib/kvm_util.c
1481
for (vm_vaddr_t vaddr = vaddr_start; pages > 0;
tools/testing/selftests/kvm/lib/kvm_util.c
1482
pages--, vaddr += vm->page_size, paddr += vm->page_size) {
tools/testing/selftests/kvm/lib/kvm_util.c
1484
virt_pg_map(vm, vaddr, paddr);
tools/testing/selftests/kvm/lib/kvm_util.c
1587
void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
tools/testing/selftests/kvm/lib/kvm_util.c
1593
TEST_ASSERT(vaddr + size > vaddr, "Vaddr overflow");
tools/testing/selftests/kvm/lib/kvm_util.c
1597
virt_pg_map(vm, vaddr, paddr);
tools/testing/selftests/kvm/lib/kvm_util.c
1599
vaddr += page_size;
tools/testing/selftests/kvm/lib/loongarch/processor.c
118
void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
tools/testing/selftests/kvm/lib/loongarch/processor.c
123
TEST_ASSERT((vaddr % vm->page_size) == 0,
tools/testing/selftests/kvm/lib/loongarch/processor.c
125
"vaddr: 0x%lx vm->page_size: 0x%x", vaddr, vm->page_size);
tools/testing/selftests/kvm/lib/loongarch/processor.c
127
(vaddr >> vm->page_shift)),
tools/testing/selftests/kvm/lib/loongarch/processor.c
128
"Invalid virtual address, vaddr: 0x%lx", vaddr);
tools/testing/selftests/kvm/lib/loongarch/processor.c
137
ptep = virt_populate_pte(vm, vaddr, 1);
tools/testing/selftests/kvm/lib/riscv/processor.c
107
pte_index(vm, vaddr, level) * 8;
tools/testing/selftests/kvm/lib/riscv/processor.c
78
void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
tools/testing/selftests/kvm/lib/riscv/processor.c
83
TEST_ASSERT((vaddr % vm->page_size) == 0,
tools/testing/selftests/kvm/lib/riscv/processor.c
85
" vaddr: 0x%lx vm->page_size: 0x%x", vaddr, vm->page_size);
tools/testing/selftests/kvm/lib/riscv/processor.c
87
(vaddr >> vm->page_shift)),
tools/testing/selftests/kvm/lib/riscv/processor.c
88
"Invalid virtual address, vaddr: 0x%lx", vaddr);
tools/testing/selftests/kvm/lib/riscv/processor.c
97
ptep = addr_gpa2hva(vm, vm->mmu.pgd) + pte_index(vm, vaddr, level) * 8;
tools/testing/selftests/kvm/lib/ucall_common.c
32
vm_vaddr_t vaddr;
tools/testing/selftests/kvm/lib/ucall_common.c
35
vaddr = vm_vaddr_alloc_shared(vm, sizeof(*hdr), KVM_UTIL_MIN_VADDR,
tools/testing/selftests/kvm/lib/ucall_common.c
37
hdr = (struct ucall_header *)addr_gva2hva(vm, vaddr);
tools/testing/selftests/kvm/lib/ucall_common.c
45
write_guest_global(vm, ucall_pool, (struct ucall_header *)vaddr);
tools/testing/selftests/kvm/lib/x86/processor.c
208
uint64_t *parent_pte, uint64_t vaddr, int level)
tools/testing/selftests/kvm/lib/x86/processor.c
212
int index = (vaddr >> PG_LEVEL_SHIFT(level)) & 0x1ffu;
tools/testing/selftests/kvm/lib/x86/processor.c
216
level + 1, vaddr);
tools/testing/selftests/kvm/lib/x86/processor.c
224
uint64_t vaddr,
tools/testing/selftests/kvm/lib/x86/processor.c
229
uint64_t *pte = virt_get_pte(vm, mmu, parent_pte, vaddr, current_level);
tools/testing/selftests/kvm/lib/x86/processor.c
249
current_level, vaddr);
tools/testing/selftests/kvm/lib/x86/processor.c
252
current_level, vaddr);
tools/testing/selftests/kvm/lib/x86/processor.c
257
void __virt_pg_map(struct kvm_vm *vm, struct kvm_mmu *mmu, uint64_t vaddr,
tools/testing/selftests/kvm/lib/x86/processor.c
267
TEST_ASSERT((vaddr % pg_size) == 0,
tools/testing/selftests/kvm/lib/x86/processor.c
269
"vaddr: 0x%lx page size: 0x%lx", vaddr, pg_size);
tools/testing/selftests/kvm/lib/x86/processor.c
270
TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, (vaddr >> vm->page_shift)),
tools/testing/selftests/kvm/lib/x86/processor.c
271
"Invalid virtual address, vaddr: 0x%lx", vaddr);
tools/testing/selftests/kvm/lib/x86/processor.c
292
pte = virt_create_upper_pte(vm, mmu, pte, vaddr, paddr,
tools/testing/selftests/kvm/lib/x86/processor.c
299
pte = virt_get_pte(vm, mmu, pte, vaddr, PG_LEVEL_4K);
tools/testing/selftests/kvm/lib/x86/processor.c
301
"PTE already present for 4k page at vaddr: 0x%lx", vaddr);
tools/testing/selftests/kvm/lib/x86/processor.c
316
void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
tools/testing/selftests/kvm/lib/x86/processor.c
318
__virt_pg_map(vm, &vm->mmu, vaddr, paddr, PG_LEVEL_4K);
tools/testing/selftests/kvm/lib/x86/processor.c
321
void virt_map_level(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
tools/testing/selftests/kvm/lib/x86/processor.c
333
__virt_pg_map(vm, &vm->mmu, vaddr, paddr, level);
tools/testing/selftests/kvm/lib/x86/processor.c
334
sparsebit_set_num(vm->vpages_mapped, vaddr >> vm->page_shift,
tools/testing/selftests/kvm/lib/x86/processor.c
337
vaddr += pg_size;
tools/testing/selftests/kvm/lib/x86/processor.c
357
uint64_t vaddr,
tools/testing/selftests/kvm/lib/x86/processor.c
373
(vaddr >> vm->page_shift)),
tools/testing/selftests/kvm/lib/x86/processor.c
375
vaddr);
tools/testing/selftests/kvm/lib/x86/processor.c
379
TEST_ASSERT(vaddr ==
tools/testing/selftests/kvm/lib/x86/processor.c
380
(((int64_t)vaddr << (64 - va_width) >> (64 - va_width))),
tools/testing/selftests/kvm/lib/x86/processor.c
386
pte = virt_get_pte(vm, mmu, pte, vaddr, current_level);
tools/testing/selftests/kvm/lib/x86/processor.c
391
return virt_get_pte(vm, mmu, pte, vaddr, PG_LEVEL_4K);
tools/testing/selftests/kvm/lib/x86/processor.c
401
uint64_t *vm_get_pte(struct kvm_vm *vm, uint64_t vaddr)
tools/testing/selftests/kvm/lib/x86/processor.c
405
return __vm_get_page_table_entry(vm, &vm->mmu, vaddr, &level);
tools/testing/selftests/mm/hugepage-mremap.c
150
void *vaddr =
tools/testing/selftests/mm/hugepage-mremap.c
152
ksft_print_msg("Map vaddr: Returned address is %p\n", vaddr);
tools/testing/selftests/mm/hugepage-mremap.c
153
if (vaddr == MAP_FAILED)
tools/testing/selftests/mm/hugepage-mremap.c
159
MREMAP_MAYMOVE | MREMAP_FIXED, vaddr);
tools/testing/selftests/mm/memory-failure.c
134
void *vaddr)
tools/testing/selftests/mm/memory-failure.c
136
self->pfn = pagemap_get_pfn(self->pagemap_fd, vaddr);
tools/testing/selftests/mm/memory-failure.c
142
static bool check_memory(void *vaddr, unsigned long size)
tools/testing/selftests/mm/memory-failure.c
148
if (memcmp(vaddr, buf, sizeof(buf)))
tools/testing/selftests/mm/memory-failure.c
151
vaddr += sizeof(buf);
tools/testing/selftests/mm/memory-failure.c
158
void *vaddr, enum result_type type, int setjmp)
tools/testing/selftests/mm/memory-failure.c
172
ASSERT_TRUE(check_memory(vaddr, self->page_size));
tools/testing/selftests/mm/memory-failure.c
175
ASSERT_NE(pagemap_get_pfn(self->pagemap_fd, vaddr), self->pfn);
tools/testing/selftests/mm/memory-failure.c
186
ASSERT_EQ(siginfo.si_addr, vaddr);
tools/testing/selftests/mm/memory-failure.c
189
ASSERT_TRUE(pagemap_is_swapped(self->pagemap_fd, vaddr));
tools/testing/selftests/mm/memory-failure.c
205
void *vaddr)
tools/testing/selftests/mm/memory-failure.c
56
int (*inject)(FIXTURE_DATA(memory_failure) * self, void *vaddr);
tools/testing/selftests/mm/memory-failure.c
59
static int madv_hard_inject(FIXTURE_DATA(memory_failure) * self, void *vaddr)
tools/testing/selftests/mm/memory-failure.c
61
return madvise(vaddr, self->page_size, MADV_HWPOISON);
tools/testing/selftests/mm/memory-failure.c
70
static int madv_soft_inject(FIXTURE_DATA(memory_failure) * self, void *vaddr)
tools/testing/selftests/mm/memory-failure.c
72
return madvise(vaddr, self->page_size, MADV_SOFT_OFFLINE);
tools/testing/selftests/mm/split_huge_page_test.c
106
static int vaddr_pageflags_get(char *vaddr, int pagemap_fd, int kpageflags_fd,
tools/testing/selftests/mm/split_huge_page_test.c
111
pfn = pagemap_get_pfn(pagemap_fd, vaddr);
tools/testing/selftests/mm/split_huge_page_test.c
149
char *vaddr;
tools/testing/selftests/mm/split_huge_page_test.c
158
for (vaddr = vaddr_start; vaddr < vaddr_start + len;) {
tools/testing/selftests/mm/split_huge_page_test.c
162
status = vaddr_pageflags_get(vaddr, pagemap_fd, kpageflags_fd,
tools/testing/selftests/mm/split_huge_page_test.c
169
vaddr += psize();
tools/testing/selftests/mm/split_huge_page_test.c
176
vaddr += psize();
tools/testing/selftests/mm/split_huge_page_test.c
182
vaddr += psize();
tools/testing/selftests/mm/split_huge_page_test.c
190
vaddr += psize();
tools/testing/selftests/mm/split_huge_page_test.c
194
next_folio_vaddr = vaddr + (1UL << (cur_order + pshift()));
tools/testing/selftests/mm/split_huge_page_test.c
212
vaddr = next_folio_vaddr;
tools/testing/selftests/mm/split_huge_page_test.c
218
next_folio_vaddr = vaddr + (1UL << (cur_order + pshift()));
tools/testing/selftests/mm/split_huge_page_test.c
44
static bool is_backed_by_folio(char *vaddr, int order, int pagemap_fd,
tools/testing/selftests/mm/split_huge_page_test.c
55
pfn = pagemap_get_pfn(pagemap_fd, vaddr);
tools/testing/selftests/vfio/lib/drivers/dsa/dsa.c
119
void *bar0 = device->bars[0].vaddr;
tools/testing/selftests/vfio/lib/drivers/dsa/dsa.c
187
void *bar0 = device->bars[0].vaddr;
tools/testing/selftests/vfio/lib/drivers/dsa/dsa.c
307
iosubmit_cmds512(device->bars[2].vaddr, desc, 1);
tools/testing/selftests/vfio/lib/drivers/dsa/dsa.c
49
return device->driver.region.vaddr;
tools/testing/selftests/vfio/lib/drivers/dsa/dsa.c
54
void *bar0 = device->bars[0].vaddr;
tools/testing/selftests/vfio/lib/drivers/dsa/dsa.c
82
void *reg = device->bars[0].vaddr + IDXD_SWERR_OFFSET;
tools/testing/selftests/vfio/lib/drivers/ioat/ioat.c
118
writeb(intrctrl, device->bars[0].vaddr + IOAT_INTRCTRL_OFFSET);
tools/testing/selftests/vfio/lib/drivers/ioat/ioat.c
124
1UL << readb(device->bars[0].vaddr + IOAT_XFERCAP_OFFSET);
tools/testing/selftests/vfio/lib/drivers/ioat/ioat.c
28
return device->driver.region.vaddr;
tools/testing/selftests/vfio/lib/drivers/ioat/ioat.c
33
return device->bars[0].vaddr + IOAT_CHANNEL_MMIO_SIZE;
tools/testing/selftests/vfio/lib/drivers/ioat/ioat.c
45
VFIO_ASSERT_NOT_NULL(device->bars[0].vaddr);
tools/testing/selftests/vfio/lib/drivers/ioat/ioat.c
47
version = readb(device->bars[0].vaddr + IOAT_VER_OFFSET);
tools/testing/selftests/vfio/lib/include/libvfio/iommu.h
22
void *vaddr;
tools/testing/selftests/vfio/lib/include/libvfio/iommu.h
59
int __iommu_hva2iova(struct iommu *iommu, void *vaddr, iova_t *iova);
tools/testing/selftests/vfio/lib/include/libvfio/iommu.h
60
iova_t iommu_hva2iova(struct iommu *iommu, void *vaddr);
tools/testing/selftests/vfio/lib/include/libvfio/vfio_pci_device.h
106
static inline int __to_iova(struct vfio_pci_device *device, void *vaddr, iova_t *iova)
tools/testing/selftests/vfio/lib/include/libvfio/vfio_pci_device.h
108
return __iommu_hva2iova(device->iommu, vaddr, iova);
tools/testing/selftests/vfio/lib/include/libvfio/vfio_pci_device.h
111
static inline iova_t to_iova(struct vfio_pci_device *device, void *vaddr)
tools/testing/selftests/vfio/lib/include/libvfio/vfio_pci_device.h
113
return iommu_hva2iova(device->iommu, vaddr);
tools/testing/selftests/vfio/lib/include/libvfio/vfio_pci_device.h
15
void *vaddr;
tools/testing/selftests/vfio/lib/iommu.c
105
.vaddr = (u64)region->vaddr,
tools/testing/selftests/vfio/lib/iommu.c
123
.user_va = (u64)region->vaddr,
tools/testing/selftests/vfio/lib/iommu.c
69
int __iommu_hva2iova(struct iommu *iommu, void *vaddr, iova_t *iova)
tools/testing/selftests/vfio/lib/iommu.c
74
if (vaddr < region->vaddr)
tools/testing/selftests/vfio/lib/iommu.c
77
if (vaddr >= region->vaddr + region->size)
tools/testing/selftests/vfio/lib/iommu.c
81
*iova = region->iova + (vaddr - region->vaddr);
tools/testing/selftests/vfio/lib/iommu.c
89
iova_t iommu_hva2iova(struct iommu *iommu, void *vaddr)
tools/testing/selftests/vfio/lib/iommu.c
94
ret = __iommu_hva2iova(iommu, vaddr, &iova);
tools/testing/selftests/vfio/lib/iommu.c
95
VFIO_ASSERT_EQ(ret, 0, "%p is not mapped into the iommu\n", vaddr);
tools/testing/selftests/vfio/lib/vfio_pci_device.c
132
void *vaddr;
tools/testing/selftests/vfio/lib/vfio_pci_device.c
135
VFIO_ASSERT_NULL(bar->vaddr);
tools/testing/selftests/vfio/lib/vfio_pci_device.c
156
vaddr = mmap_reserve(size, align, 0);
tools/testing/selftests/vfio/lib/vfio_pci_device.c
157
bar->vaddr = mmap(vaddr, size, prot, MAP_SHARED | MAP_FIXED,
tools/testing/selftests/vfio/lib/vfio_pci_device.c
159
VFIO_ASSERT_NE(bar->vaddr, MAP_FAILED);
tools/testing/selftests/vfio/lib/vfio_pci_device.c
161
madvise(bar->vaddr, size, MADV_HUGEPAGE);
tools/testing/selftests/vfio/lib/vfio_pci_device.c
169
VFIO_ASSERT_NOT_NULL(bar->vaddr);
tools/testing/selftests/vfio/lib/vfio_pci_device.c
171
VFIO_ASSERT_EQ(munmap(bar->vaddr, bar->info.size), 0);
tools/testing/selftests/vfio/lib/vfio_pci_device.c
172
bar->vaddr = NULL;
tools/testing/selftests/vfio/lib/vfio_pci_device.c
180
if (device->bars[i].vaddr)
tools/testing/selftests/vfio/lib/vfio_pci_driver.c
52
VFIO_ASSERT_NOT_NULL(driver->region.vaddr);
tools/testing/selftests/vfio/vfio_dma_mapping_mmio_test.c
110
self->bar->vaddr, self->bar->info.size);
tools/testing/selftests/vfio/vfio_dma_mapping_mmio_test.c
119
self->bar->vaddr, getpagesize());
tools/testing/selftests/vfio/vfio_dma_mapping_mmio_test.c
127
void *vaddr;
tools/testing/selftests/vfio/vfio_dma_mapping_mmio_test.c
129
vaddr = mmap_reserve(size, SZ_1G, getpagesize());
tools/testing/selftests/vfio/vfio_dma_mapping_mmio_test.c
130
vaddr = mmap(vaddr, size, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED,
tools/testing/selftests/vfio/vfio_dma_mapping_mmio_test.c
132
VFIO_ASSERT_NE(vaddr, MAP_FAILED);
tools/testing/selftests/vfio/vfio_dma_mapping_mmio_test.c
134
do_mmio_map_test(self->iommu, self->iova_allocator, vaddr, size);
tools/testing/selftests/vfio/vfio_dma_mapping_mmio_test.c
136
VFIO_ASSERT_EQ(munmap(vaddr, size), 0);
tools/testing/selftests/vfio/vfio_dma_mapping_mmio_test.c
26
if (!bar->vaddr)
tools/testing/selftests/vfio/vfio_dma_mapping_mmio_test.c
85
void *vaddr, size_t size)
tools/testing/selftests/vfio/vfio_dma_mapping_mmio_test.c
88
.vaddr = vaddr,
tools/testing/selftests/vfio/vfio_dma_mapping_test.c
144
region.vaddr = mmap(NULL, size, PROT_READ | PROT_WRITE, flags, -1, 0);
tools/testing/selftests/vfio/vfio_dma_mapping_test.c
147
if (flags & MAP_HUGETLB && region.vaddr == MAP_FAILED)
tools/testing/selftests/vfio/vfio_dma_mapping_test.c
150
ASSERT_NE(region.vaddr, MAP_FAILED);
tools/testing/selftests/vfio/vfio_dma_mapping_test.c
156
printf("Mapped HVA %p (size 0x%lx) at IOVA 0x%lx\n", region.vaddr, size, region.iova);
tools/testing/selftests/vfio/vfio_dma_mapping_test.c
158
ASSERT_EQ(region.iova, to_iova(self->device, region.vaddr));
tools/testing/selftests/vfio/vfio_dma_mapping_test.c
197
ASSERT_NE(0, __to_iova(self->device, region.vaddr, NULL));
tools/testing/selftests/vfio/vfio_dma_mapping_test.c
200
ASSERT_TRUE(!munmap(region.vaddr, size));
tools/testing/selftests/vfio/vfio_dma_mapping_test.c
239
region->vaddr = mmap(NULL, self->mmap_size, PROT_READ | PROT_WRITE,
tools/testing/selftests/vfio/vfio_dma_mapping_test.c
241
ASSERT_NE(region->vaddr, MAP_FAILED);
tools/testing/selftests/vfio/vfio_dma_mapping_test.c
257
ASSERT_EQ(munmap(self->region.vaddr, self->mmap_size), 0);
tools/testing/selftests/vfio/vfio_dma_mapping_test.c
267
ASSERT_EQ(region->iova, to_iova(self->device, region->vaddr));
tools/testing/selftests/vfio/vfio_dma_mapping_test.c
281
ASSERT_EQ(region->iova, to_iova(self->device, region->vaddr));
tools/testing/selftests/vfio/vfio_pci_device_test.c
100
printf("BAR %d mapped at %p (size 0x%llx)\n", i, bar->vaddr, bar->info.size);
tools/testing/selftests/vfio/vfio_pci_device_test.c
90
ASSERT_EQ(NULL, bar->vaddr);
tools/testing/selftests/vfio/vfio_pci_device_test.c
98
ASSERT_NE(NULL, bar->vaddr);
tools/testing/selftests/vfio/vfio_pci_driver_test.c
27
void *vaddr;
tools/testing/selftests/vfio/vfio_pci_driver_test.c
29
vaddr = mmap(NULL, size, prot, flags, -1, 0);
tools/testing/selftests/vfio/vfio_pci_driver_test.c
30
VFIO_ASSERT_NE(vaddr, MAP_FAILED);
tools/testing/selftests/vfio/vfio_pci_driver_test.c
32
region->vaddr = vaddr;
tools/testing/selftests/vfio/vfio_pci_driver_test.c
42
VFIO_ASSERT_EQ(munmap(region->vaddr, region->size), 0);
tools/testing/selftests/vfio/vfio_pci_driver_test.c
50
void *vaddr;
tools/testing/selftests/vfio/vfio_pci_driver_test.c
99
self->src = self->memcpy_region.vaddr;