hpa_t
hpa_t addr;
static int pin_guest_page(struct kvm *kvm, gpa_t gpa, hpa_t *hpa)
*hpa = (hpa_t)page_to_phys(page) + (gpa & ~PAGE_MASK);
static void unpin_guest_page(struct kvm *kvm, gpa_t gpa, hpa_t hpa)
hpa_t hpa;
hpa_t hpa;
hpa_t hpa = virt_to_phys(vsie_page->scb_o);
hpa_t hpa;
hpa_t hv_root_tdp;
#define INVALID_PAGE (~(hpa_t)0)
hpa_t hv_root_tdp;
void (*load_mmu_pgd)(struct kvm_vcpu *vcpu, hpa_t root_hpa,
hpa_t hpa;
hpa_t mirror_root_hpa;
void hv_track_root_tdp(struct kvm_vcpu *vcpu, hpa_t root_tdp)
static inline int hv_remote_flush_root_tdp(hpa_t root_tdp,
hpa_t root;
void hv_track_root_tdp(struct kvm_vcpu *vcpu, hpa_t root_tdp);
static inline hpa_t hv_get_partition_assist_page(struct kvm_vcpu *vcpu)
static inline void hv_track_root_tdp(struct kvm_vcpu *vcpu, hpa_t root_tdp)
hpa_t shadow_addr;
struct kvm_vcpu *vcpu, hpa_t root,
static void mmu_free_root_page(struct kvm *kvm, hpa_t *root_hpa,
hpa_t root_hpa;
static hpa_t mmu_alloc_root(struct kvm_vcpu *vcpu, gfn_t gfn, int quadrant,
hpa_t root;
hpa_t root;
static bool is_unsync_root(hpa_t root)
hpa_t root = vcpu->arch.mmu->root.hpa;
hpa_t root = vcpu->arch.mmu->pae_root[i];
static bool is_obsolete_root(struct kvm *kvm, hpa_t root_hpa)
u64 addr, hpa_t root_hpa)
static inline hpa_t kvm_mmu_get_dummy_root(void)
static inline bool kvm_mmu_is_dummy_root(hpa_t shadow_page)
static inline struct kvm_mmu_page *to_shadow_page(hpa_t shadow_page)
static inline struct kvm_mmu_page *root_to_sp(hpa_t root)
hpa_t root_tdp = vcpu->arch.mmu->root.hpa;
static void svm_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa,
hpa_t partition_assist_page = hv_get_partition_assist_page(vcpu);
static void vt_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa,
static bool nested_ept_root_matches(hpa_t root_hpa, u64 root_eptp, u64 eptp)
void tdx_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa, int pgd_level)
static u64 construct_eptp(hpa_t root_hpa)
static void vmx_flush_tlb_ept_root(hpa_t root_hpa)
void vmx_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa, int root_level)
hpa_t partition_assist_page = hv_get_partition_assist_page(vcpu);
void tdx_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa, int root_level);
void vmx_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa, int root_level);
static inline hpa_t pfn_to_hpa(kvm_pfn_t pfn)
return (hpa_t)pfn << PAGE_SHIFT;