gva_t
gpa_t (*gva_to_gpa)(gva_t gva);
static gpa_t kvm_vz_gva_to_gpa_cb(gva_t gva)
extern long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr,
unsigned long gpa, gva_t ea, int is_store);
gva_t eaddr, void *to, void *from,
extern long kvmhv_copy_from_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr,
extern long kvmhv_copy_to_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr,
extern int kvmppc_mmu_walk_radix_tree(struct kvm_vcpu *vcpu, gva_t eaddr,
extern int kvmppc_mmu_radix_translate_table(struct kvm_vcpu *vcpu, gva_t eaddr,
extern int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
int (*slbfee)(struct kvm_vcpu *vcpu, gva_t eaddr, ulong *ret_slb);
int (*xlate)(struct kvm_vcpu *vcpu, gva_t eaddr,
u64 (*ea_to_vp)(struct kvm_vcpu *vcpu, gva_t eaddr, bool data);
gva_t vaddr_accessed;
extern int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
extern int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
gva_t eaddr);
static u32 kvmppc_mmu_book3s_32_get_ptem(u32 sre, gva_t eaddr, bool primary)
static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr,
static int kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu *vcpu, gva_t eaddr,
static int kvmppc_mmu_book3s_32_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr,
static u32 find_sr(struct kvm_vcpu *vcpu, gva_t eaddr)
static u64 kvmppc_mmu_book3s_32_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr,
u32 sre, gva_t eaddr,
static u32 kvmppc_mmu_book3s_64_get_page(struct kvmppc_slb *slbe, gva_t eaddr)
struct kvmppc_slb *slbe, gva_t eaddr,
static u64 kvmppc_mmu_book3s_64_get_avpn(struct kvmppc_slb *slbe, gva_t eaddr)
static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
gva_t eaddr)
static int kvmppc_mmu_book3s_64_slbfee(struct kvm_vcpu *vcpu, gva_t eaddr,
static u64 kvmppc_slb_calc_vpn(struct kvmppc_slb *slb, gva_t eaddr)
static u64 kvmppc_mmu_book3s_64_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr,
gva_t eaddr)
static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
unsigned long gpa, gva_t ea, int is_store)
long kvmhv_copy_from_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr, void *to,
long kvmhv_copy_to_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr, void *from,
int kvmppc_mmu_walk_radix_tree(struct kvm_vcpu *vcpu, gva_t eaddr,
int kvmppc_mmu_radix_translate_table(struct kvm_vcpu *vcpu, gva_t eaddr,
int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
gva_t eaddr, void *to, void *from,
static long kvmhv_copy_tofrom_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr,
gva_t eaddr = kvmppc_get_gpr(vcpu, 6);
long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v,
int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *vcpu, gva_t ea);
int kvmppc_e500_emul_tlbilx(struct kvm_vcpu *vcpu, int type, gva_t ea);
int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, gva_t ea);
static inline gva_t get_tlb_eaddr(const struct kvm_book3e_206_tlb_entry *tlbe)
static inline gva_t get_tlb_end(const struct kvm_book3e_206_tlb_entry *tlbe)
gva_t ea;
gva_t eaddr, int as)
gva_t eaddr;
int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *vcpu, gva_t ea)
gva_t ea)
int kvmppc_e500_emul_tlbilx(struct kvm_vcpu *vcpu, int type, gva_t ea)
int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, gva_t ea)
gva_t eaddr, unsigned int pid, int as)
gva_t eaddr;
static int tlb0_set_base(gva_t addr, int sets, int ways)
int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr)
int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr)
gva_t eaddr)
static int gtlb0_set_base(struct kvmppc_vcpu_e500 *vcpu_e500, gva_t addr)
gva_t eaddr, int tlbsel, unsigned int pid, int as)
gva_t geaddr;
gva_t eaddr;
void (*flush_tlb_gva)(struct kvm_vcpu *vcpu, gva_t addr);
gva_t (*get_untagged_addr)(struct kvm_vcpu *vcpu, gva_t gva, unsigned int flags);
gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva,
gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva,
gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva);
void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid);
gva_t cs_addr;
gva_t eip_addr;
gva_t gva;
gva_t (*get_untagged_addr)(struct x86_emulate_ctxt *ctxt, gva_t addr,
bool (*is_canonical_addr)(struct x86_emulate_ctxt *ctxt, gva_t addr,
gva_t gva = fault->is_tdp ? 0 : fault->addr;
static void shadow_page_table_clear_flood(struct kvm_vcpu *vcpu, gva_t addr)
void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid)
gva_t gva = kvm_rax_read(vcpu);
gva_t gva;
static void svm_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t gva)
TP_PROTO(gva_t gva, gpa_t gpa, bool write, bool gpa_match),
__field(gva_t, gva)
static void vt_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t addr)
u32 vmx_instruction_info, bool wr, int len, gva_t *ret)
gva_t off;
off = (gva_t)sign_extend64(off, 31);
off = (gva_t)sign_extend64(off, 15);
gva_t gva;
gva_t gva = 0;
gva_t gva;
gva_t gva;
gva_t gva;
gva_t gva;
u32 vmx_instruction_info, bool wr, int len, gva_t *ret);
static int sgx_inject_fault(struct kvm_vcpu *vcpu, gva_t gva, int trapnr)
gva_t secs_gva)
gva_t pageinfo_gva, secs_gva;
gva_t metadata_gva, contents_gva;
int size, int alignment, gva_t *gva)
gva_t sig_gva, secs_gva, token_gva;
static int sgx_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t gva, bool write,
void vmx_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t addr)
noinline void invvpid_error(unsigned long ext, u16 vpid, gva_t gva)
gva_t gva;
gva_t vmx_get_untagged_addr(struct kvm_vcpu *vcpu, gva_t gva, unsigned int flags)
gva_t vmx_get_untagged_addr(struct kvm_vcpu *vcpu, gva_t gva, unsigned int flags);
void invvpid_error(unsigned long ext, u16 vpid, gva_t gva);
static inline void __invvpid(unsigned long ext, u16 vpid, gva_t gva)
static inline void vpid_sync_vcpu_addr(int vpid, gva_t addr)
void vmx_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t addr);
void kvm_fixup_and_inject_pf_error(struct kvm_vcpu *vcpu, gva_t gva, u16 error_code)
int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva)
gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva,
gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva,
gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
gva_t addr, void *val, unsigned int bytes,
gva_t addr, void *val, unsigned int bytes,
gva_t addr, void *val, unsigned int bytes,
static int kvm_write_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
static int emulator_write_std(struct x86_emulate_ctxt *ctxt, gva_t addr, void *val,
int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu, gva_t addr, void *val,
static gva_t emulator_get_untagged_addr(struct x86_emulate_ctxt *ctxt,
gva_t addr, unsigned int flags)
gva_t addr, unsigned int flags)
gva_t gva, gfn_t gfn, unsigned access)
#define MMIO_GVA_ANY (~(gva_t)0)
static inline void vcpu_clear_mmio_info(struct kvm_vcpu *vcpu, gva_t gva)
gva_t addr, void *val, unsigned int bytes,
gva_t addr, void *val, unsigned int bytes,
void kvm_fixup_and_inject_pf_error(struct kvm_vcpu *vcpu, gva_t gva, u16 error_code);
int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva);
if (kvm_read_guest_virt(vcpu, (gva_t)sched_poll.ports, ports,