kvm_pte_t
static inline bool kvm_pte_valid(kvm_pte_t pte)
static inline u64 kvm_pte_to_phys(kvm_pte_t pte)
static inline kvm_pte_t kvm_phys_to_pte(u64 pa)
kvm_pte_t pte;
static inline kvm_pfn_t kvm_pte_to_pfn(kvm_pte_t pte)
kvm_pte_t *ptep;
kvm_pte_t old;
typedef kvm_pte_t *kvm_pteref_t;
static inline kvm_pte_t *kvm_dereference_pteref(struct kvm_pgtable_walker *walker,
static inline kvm_pte_t *kvm_dereference_pteref_raw(kvm_pteref_t pteref)
typedef kvm_pte_t __rcu *kvm_pteref_t;
static inline kvm_pte_t *kvm_dereference_pteref(struct kvm_pgtable_walker *walker,
static inline kvm_pte_t *kvm_dereference_pteref_raw(kvm_pteref_t pteref)
kvm_pte_t *kvm_pgtable_stage2_create_unlinked(struct kvm_pgtable *pgt,
kvm_pte_t *ptep, s8 *level);
enum kvm_pgtable_prot kvm_pgtable_stage2_pte_prot(kvm_pte_t pte);
enum kvm_pgtable_prot kvm_pgtable_hyp_pte_prot(kvm_pte_t pte);
kvm_pte_t *pkvm_pgtable_stage2_create_unlinked(struct kvm_pgtable *pgt, u64 phys, s8 level,
kvm_pte_t pte;
kvm_pte_t pte;
enum pkvm_page_state (*get_page_state)(kvm_pte_t pte, u64 addr);
static enum pkvm_page_state guest_get_page_state(kvm_pte_t pte, u64 addr)
kvm_pte_t pte, *ptep = slot->ptep;
kvm_pte_t *ptep = slot->ptep;
kvm_pte_t *ptep;
kvm_pte_t *childp = kvm_pte_follow(ctx->old, mm_ops);
kvm_pte_t *childp, new;
static kvm_pte_t kvm_init_valid_leaf_pte(u64 pa, kvm_pte_t attr, s8 level)
kvm_pte_t pte = kvm_phys_to_pte(pa);
kvm_pte_t *childp = NULL;
static kvm_pte_t kvm_init_invalid_leaf_owner(u8 owner_id)
kvm_pte_t attr_set;
kvm_pte_t attr_clr;
kvm_pte_t pte;
kvm_pte_t pte = ctx->old;
u64 size, kvm_pte_t attr_set,
kvm_pte_t attr_clr, kvm_pte_t *orig_pte,
kvm_pte_t attr_mask = KVM_PTE_LEAF_ATTR_LO | KVM_PTE_LEAF_ATTR_HI;
kvm_pte_t new = ctx->old & ~KVM_PTE_LEAF_ATTR_LO_S2_AF;
kvm_pte_t xn = 0, set = 0, clr = 0;
kvm_pte_t *kvm_pgtable_stage2_create_unlinked(struct kvm_pgtable *pgt,
kvm_pte_t *pgtable;
kvm_pte_t pte = ctx->old, new, *childp;
kvm_pte_t *ptep = kvm_dereference_pteref(data->walker, pteref);
kvm_pte_t *childp = kvm_pte_follow(ctx->old, mm_ops);
kvm_pte_t pte;
kvm_pte_t *ptep, s8 *level)
kvm_pte_t attr;
static int hyp_set_prot_attr(enum kvm_pgtable_prot prot, kvm_pte_t *ptep)
kvm_pte_t attr = FIELD_PREP(KVM_PTE_LEAF_ATTR_LO_S1_ATTRIDX, mtype);
enum kvm_pgtable_prot kvm_pgtable_hyp_pte_prot(kvm_pte_t pte)
kvm_pte_t new;
kvm_pte_t *childp, new;
childp = (kvm_pte_t *)mm_ops->zalloc_page(NULL);
kvm_pte_t *childp = NULL;
kvm_pte_t attr;
kvm_pte_t *anchor;
kvm_pte_t *childp;
kvm_pte_t __attr; \
static int stage2_set_xn_attr(enum kvm_pgtable_prot prot, kvm_pte_t *attr)
kvm_pte_t *ptep)
kvm_pte_t attr;
static bool kvm_pte_table(kvm_pte_t pte, s8 level)
enum kvm_pgtable_prot kvm_pgtable_stage2_pte_prot(kvm_pte_t pte)
static bool stage2_pte_needs_update(kvm_pte_t old, kvm_pte_t new)
static bool stage2_pte_is_counted(kvm_pte_t pte)
static bool stage2_pte_is_locked(kvm_pte_t pte)
static bool stage2_try_set_pte(const struct kvm_pgtable_visit_ctx *ctx, kvm_pte_t new)
static kvm_pte_t *kvm_pte_follow(kvm_pte_t pte, struct kvm_pgtable_mm_ops *mm_ops)
static void stage2_make_pte(const struct kvm_pgtable_visit_ctx *ctx, kvm_pte_t new)
static void kvm_clear_pte(kvm_pte_t *ptep)
static bool stage2_pte_cacheable(struct kvm_pgtable *pgt, kvm_pte_t pte)
static bool stage2_pte_executable(kvm_pte_t pte)
static kvm_pte_t kvm_init_table_pte(kvm_pte_t *childp, struct kvm_pgtable_mm_ops *mm_ops)
kvm_pte_t new;
kvm_pte_t pte = kvm_phys_to_pte(mm_ops->virt_to_phys(childp));
kvm_pte_t pte = 0; /* Keep GCC quiet... */
kvm_pte_t pte;
kvm_pte_t *pkvm_pgtable_stage2_create_unlinked(struct kvm_pgtable *pgt, u64 phys, s8 level,
kvm_pte_t *pgd;
static inline kvm_pte_t *kvm_pgtable_offset(kvm_ptw_ctx *ctx, kvm_pte_t *table,
static inline int kvm_pte_present(kvm_ptw_ctx *ctx, kvm_pte_t *entry)
static inline int kvm_pte_none(kvm_ptw_ctx *ctx, kvm_pte_t *entry)
typedef int (*kvm_pte_ops)(kvm_pte_t *pte, phys_addr_t addr, kvm_ptw_ctx *ctx);
kvm_pte_t *kvm_pgd_alloc(void);
static inline void kvm_set_pte(kvm_pte_t *ptep, kvm_pte_t val)
static inline int kvm_pte_young(kvm_pte_t pte) { return pte & _PAGE_ACCESSED; }
static inline int kvm_pte_huge(kvm_pte_t pte) { return pte & _PAGE_HUGE; }
static inline int kvm_pte_dirty(kvm_pte_t pte) { return pte & __WRITEABLE; }
static inline int kvm_pte_writeable(kvm_pte_t pte) { return pte & KVM_PAGE_WRITEABLE; }
static inline kvm_pte_t kvm_pte_mkyoung(kvm_pte_t pte)
static inline kvm_pte_t kvm_pte_mkold(kvm_pte_t pte)
static inline kvm_pte_t kvm_pte_mkdirty(kvm_pte_t pte)
static inline kvm_pte_t kvm_pte_mkclean(kvm_pte_t pte)
static inline kvm_pte_t kvm_pte_mkhuge(kvm_pte_t pte)
static inline kvm_pte_t kvm_pte_mksmall(kvm_pte_t pte)
static inline kvm_pte_t kvm_pte_mkwriteable(kvm_pte_t pte)
kvm_pte_t *kvm_pgd_alloc(void)
kvm_pte_t *pgd;
pgd = (kvm_pte_t *)__get_free_pages(GFP_KERNEL, 0);
static kvm_pte_t *kvm_populate_gpa(struct kvm *kvm,
kvm_pte_t *entry, *child;
child = (kvm_pte_t *)__va(PHYSADDR(*entry));
static int kvm_ptw_leaf(kvm_pte_t *dir, phys_addr_t addr, phys_addr_t end, kvm_ptw_ctx *ctx)
kvm_pte_t *entry, *child;
child = (kvm_pte_t *)__va(PHYSADDR(*dir));
static int kvm_ptw_dir(kvm_pte_t *dir, phys_addr_t addr, phys_addr_t end, kvm_ptw_ctx *ctx)
kvm_pte_t *entry, *child;
child = (kvm_pte_t *)__va(PHYSADDR(*dir));
static int kvm_ptw_top(kvm_pte_t *dir, phys_addr_t addr, phys_addr_t end, kvm_ptw_ctx *ctx)
kvm_pte_t *entry;
static int kvm_mkold_pte(kvm_pte_t *pte, phys_addr_t addr, kvm_ptw_ctx *ctx)
kvm_pte_t *ptep = kvm_populate_gpa(kvm, NULL, gpa, 0);
static int kvm_mkclean_pte(kvm_pte_t *pte, phys_addr_t addr, kvm_ptw_ctx *ctx)
kvm_pte_t *ptep, changed, new;
kvm_pte_t val;
static kvm_pte_t *kvm_split_huge(struct kvm_vcpu *vcpu, kvm_pte_t *ptep, gfn_t gfn)
kvm_pte_t val, *child;
kvm_pte_t *ptep, new_pte;
static int kvm_flush_pte(kvm_pte_t *pte, phys_addr_t addr, kvm_ptw_ctx *ctx)