kvm_pfn_t
static inline kvm_pfn_t kvm_pte_to_pfn(kvm_pte_t pte)
unsigned long hva, kvm_pfn_t *pfnp,
kvm_pfn_t pfn = *pfnp;
static void sanitise_mte_tags(struct kvm *kvm, kvm_pfn_t pfn,
kvm_pfn_t pfn;
kvm_pfn_t pfn;
kvm_pfn_t pfn;
kvm_pfn_t pfn;
extern kvm_pfn_t kvmppc_gpa_to_pfn(struct kvm_vcpu *vcpu, gpa_t gpa,
static inline void kvmppc_mmu_flush_icache(kvm_pfn_t pfn)
kvm_pfn_t kvmppc_gpa_to_pfn(struct kvm_vcpu *vcpu, gpa_t gpa, bool writing,
kvm_pfn_t pfn;
pfn = (kvm_pfn_t)virt_to_phys((void*)shared_page) >> PAGE_SHIFT;
kvm_pfn_t hpaddr;
kvm_pfn_t hpaddr;
kvm_pfn_t pfn;
kvm_pfn_t pfn; /* valid only for TLB0, except briefly */
kvm_pfn_t pfn;
pfn = (kvm_pfn_t)virt_to_phys((void *)shared_page) >> PAGE_SHIFT;
kvm_pfn_t pfn, unsigned int wimg,
kvm_pfn_t pfn = tlbe->pfn;
TP_PROTO(int rflags, ulong hpteg, ulong va, kvm_pfn_t hpaddr,
kvm_pfn_t *hfnp, gpa_t *gpa)
kvm_pfn_t hfn = *hfnp;
kvm_pfn_t hfn;
static inline union pte _pte(kvm_pfn_t pfn, bool writable, bool dirty, bool special)
static inline union crste _crste_fc0(kvm_pfn_t pfn, int tt)
static inline union crste _crste_fc1(kvm_pfn_t pfn, int tt, bool writable, bool dirty)
kvm_pfn_t pfn; /* Host PFN */
kvm_pfn_t pfn, int level, bool wr)
kvm_pfn_t pfn, int level, bool wr);
int (*gmem_prepare)(struct kvm *kvm, kvm_pfn_t pfn, gfn_t gfn, int max_order);
void (*gmem_invalidate)(kvm_pfn_t start, kvm_pfn_t end);
int (*gmem_max_mapping_level)(struct kvm *kvm, kvm_pfn_t pfn, bool is_private);
int kvm_tdp_mmu_map_private_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn);
kvm_pfn_t pfn, struct kvm_page_fault *fault)
kvm_pfn_t pfn;
kvm_pfn_t mask;
int kvm_tdp_mmu_map_private_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn)
kvm_pfn_t pfn;
static bool __kvm_is_mmio_pfn(kvm_pfn_t pfn)
static bool kvm_is_mmio_pfn(kvm_pfn_t pfn, int *is_host_mmio)
unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn,
static inline kvm_pfn_t spte_to_pfn(u64 pte)
unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn,
static int sev_gmem_post_populate(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn,
static int snp_rmptable_psmash(kvm_pfn_t pfn)
kvm_pfn_t pfn;
kvm_pfn_t pfn;
static bool is_pfn_range_shared(kvm_pfn_t start, kvm_pfn_t end)
kvm_pfn_t pfn = start;
static bool is_large_rmp_possible(struct kvm *kvm, kvm_pfn_t pfn, int order)
kvm_pfn_t pfn_aligned = ALIGN_DOWN(pfn, PTRS_PER_PMD);
int sev_gmem_prepare(struct kvm *kvm, kvm_pfn_t pfn, gfn_t gfn, int max_order)
kvm_pfn_t pfn_aligned;
void sev_gmem_invalidate(kvm_pfn_t start, kvm_pfn_t end)
kvm_pfn_t pfn;
int sev_gmem_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn, bool is_private)
int sev_gmem_prepare(struct kvm *kvm, kvm_pfn_t pfn, gfn_t gfn, int max_order);
void sev_gmem_invalidate(kvm_pfn_t start, kvm_pfn_t end);
int sev_gmem_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn, bool is_private);
static inline int sev_gmem_prepare(struct kvm *kvm, kvm_pfn_t pfn, gfn_t gfn, int max_order)
static inline void sev_gmem_invalidate(kvm_pfn_t start, kvm_pfn_t end) {}
static inline int sev_gmem_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn, bool is_private)
static int vt_gmem_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn,
kvm_pfn_t pfn)
enum pg_level level, kvm_pfn_t pfn)
kvm_pfn_t pfn = spte_to_pfn(mirror_spte);
static int tdx_gmem_post_populate(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn,
int tdx_gmem_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn, bool is_private)
kvm_pfn_t pfn;
int tdx_gmem_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn, bool is_private);
int kvm_arch_gmem_prepare(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn, int max_order)
void kvm_arch_gmem_invalidate(kvm_pfn_t start, kvm_pfn_t end)
static inline bool is_error_pfn(kvm_pfn_t pfn)
static inline bool is_sigpending_pfn(kvm_pfn_t pfn)
static inline bool is_error_noslot_pfn(kvm_pfn_t pfn)
kvm_pfn_t __kvm_faultin_pfn(const struct kvm_memory_slot *slot, gfn_t gfn,
static inline kvm_pfn_t kvm_faultin_pfn(struct kvm_vcpu *vcpu, gfn_t gfn,
static inline bool is_noslot_pfn(kvm_pfn_t pfn)
static inline hpa_t pfn_to_hpa(kvm_pfn_t pfn)
gfn_t gfn, kvm_pfn_t *pfn, struct page **page,
kvm_pfn_t *pfn, struct page **page,
int kvm_arch_gmem_prepare(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn, int max_order);
typedef int (*kvm_gmem_populate_cb)(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn,
void kvm_arch_gmem_invalidate(kvm_pfn_t start, kvm_pfn_t end);
kvm_pfn_t pfn;
kvm_pfn_t gfn;
kvm_pfn_t pfn;
static inline kvm_pfn_t folio_file_pfn(struct folio *folio, pgoff_t index)
kvm_pfn_t pfn = page_to_pfn(page);
kvm_pfn_t pfn = folio_file_pfn(folio, index);
pgoff_t index, kvm_pfn_t *pfn,
gfn_t gfn, kvm_pfn_t *pfn, struct page **page,
kvm_pfn_t pfn;
static kvm_pfn_t kvm_resolve_pfn(struct kvm_follow_pfn *kfp, struct page *page,
kvm_pfn_t pfn;
static bool hva_to_pfn_fast(struct kvm_follow_pfn *kfp, kvm_pfn_t *pfn)
static int hva_to_pfn_slow(struct kvm_follow_pfn *kfp, kvm_pfn_t *pfn)
struct kvm_follow_pfn *kfp, kvm_pfn_t *p_pfn)
kvm_pfn_t hva_to_pfn(struct kvm_follow_pfn *kfp)
kvm_pfn_t pfn;
static kvm_pfn_t kvm_follow_pfn(struct kvm_follow_pfn *kfp)
kvm_pfn_t __kvm_faultin_pfn(const struct kvm_memory_slot *slot, gfn_t gfn,
kvm_pfn_t hva_to_pfn(struct kvm_follow_pfn *kfp);
static void gpc_unmap(kvm_pfn_t pfn, void *khva)
static kvm_pfn_t hva_to_pfn_retry(struct gfn_to_pfn_cache *gpc)
kvm_pfn_t new_pfn = KVM_PFN_ERR_FAULT;
kvm_pfn_t old_pfn;
kvm_pfn_t old_pfn;
static void *gpc_map(kvm_pfn_t pfn)