host_mmu
extern struct host_mmu host_mmu;
__load_stage2(&host_mmu.arch.mmu, &host_mmu.arch);
kvm_pgtable_stage2_free_unlinked(&host_mmu.mm_ops, addr, level);
host_mmu.mm_ops = (struct kvm_pgtable_mm_ops) {
selftest_vm.kvm.arch.mmu.vtcr = host_mmu.arch.mmu.vtcr;
host_mmu.arch.mmu.vtcr = kvm_get_vtcr(id_aa64mmfr0_el1_sys_val,
struct kvm_s2_mmu *mmu = &host_mmu.arch.mmu;
hyp_spin_lock_init(&host_mmu.lock);
mmu->arch = &host_mmu.arch;
ret = __kvm_pgtable_stage2_init(&host_mmu.pgt, mmu,
&host_mmu.mm_ops, KVM_HOST_S2_FLAGS,
mmu->pgd_phys = __hyp_pa(host_mmu.pgt.pgd);
mmu->pgt = &host_mmu.pgt;
struct host_mmu host_mmu;
struct kvm_s2_mmu *mmu = &host_mmu.arch.mmu;
__load_stage2(&host_mmu.arch.mmu, &host_mmu.arch);
struct kvm_pgtable *pgt = &host_mmu.pgt;
u64 limit = BIT(kvm_phys_shift(&host_mmu.arch.mmu) - PAGE_SHIFT);
hyp_spin_lock(&host_mmu.lock);
return kvm_pgtable_stage2_map(&host_mmu.pgt, start, end - start, start,
hyp_assert_lock_held(&host_mmu.lock); \
hyp_spin_unlock(&host_mmu.lock);
hyp_assert_lock_held(&host_mmu.lock);
ret = kvm_pgtable_get_leaf(&host_mmu.pgt, addr, &pte, &level);
ret = host_stage2_try(kvm_pgtable_stage2_set_owner, &host_mmu.pgt,
hyp_assert_lock_held(&host_mmu.lock);
mmu->vtcr = host_mmu.arch.mmu.vtcr;
pgd_size = kvm_pgtable_stage2_pgd_size(host_mmu.arch.mmu.vtcr);
struct kvm_s2_mmu *host_s2_mmu = &host_mmu.arch.mmu;