pfn_to_kaddr
__clean_dcache_guest_page(pfn_to_kaddr(mapping->pfn),
addr = pfn_to_kaddr(max_low_pfn);
hpt = (unsigned long)pfn_to_kaddr(page_to_pfn(page));
return __pa(pfn_to_kaddr(pfn)) + offset_in_page(va);
flush_dcache_range_chunked((unsigned long)pfn_to_kaddr(start_pfn),
(unsigned long)pfn_to_kaddr(start_pfn + nr_pages),
return pfn_to_kaddr(pte_pfn(entry));
vaddr = (unsigned long)pfn_to_kaddr(pfn);
sev_evict_cache(pfn_to_kaddr(e->gfn), e->pagesize ? 512 : 1);
start = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->start));
end = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->end));
unsigned long addr = (unsigned long) pfn_to_kaddr(pfn);
vaddr = (unsigned long)pfn_to_kaddr(pfn);
#define vaddr(page) ((unsigned long)pfn_to_kaddr(page_to_pfn(page)))
(unsigned long) pfn_to_kaddr(page_to_pfn(
return (unsigned long)pfn_to_kaddr(idx_to_pfn(queue, idx));
pfn_to_kaddr(page_to_pfn(map->pages[i]));
pfn_to_kaddr(page_to_pfn(map->pages[i]));
uint8_t *tmp = pfn_to_kaddr(page_to_pfn(map->pages[pgno]));
((uintptr_t)pfn_to_kaddr(page_to_xen_pfn(page)))
return (unsigned long)pfn_to_kaddr(pfn);
start_kaddr = (unsigned long)pfn_to_kaddr(mem_data->start_pfn);