#include <asm/kvm_hyp.h>
#include <asm/kvm_mmu.h>
#include <asm/tlbflush.h>
#include <nvhe/mem_protect.h>
struct tlb_inv_context {
struct kvm_s2_mmu *mmu;
u64 tcr;
u64 sctlr;
};
static void enter_vmid_context(struct kvm_s2_mmu *mmu,
struct tlb_inv_context *cxt,
bool nsh)
{
struct kvm_s2_mmu *host_s2_mmu = &host_mmu.arch.mmu;
struct kvm_cpu_context *host_ctxt;
struct kvm_vcpu *vcpu;
host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
vcpu = host_ctxt->__hyp_running_vcpu;
cxt->mmu = NULL;
if (nsh)
dsb(nsh);
else
dsb(ish);
if (vcpu) {
if (mmu == vcpu->arch.hw_mmu || WARN_ON(mmu != host_s2_mmu))
return;
cxt->mmu = vcpu->arch.hw_mmu;
} else {
if (mmu == host_s2_mmu)
return;
cxt->mmu = host_s2_mmu;
}
if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
u64 val;
val = cxt->tcr = read_sysreg_el1(SYS_TCR);
val |= TCR_EPD1_MASK | TCR_EPD0_MASK;
write_sysreg_el1(val, SYS_TCR);
isb();
if (vcpu) {
val = cxt->sctlr = read_sysreg_el1(SYS_SCTLR);
if (!(val & SCTLR_ELx_M)) {
val |= SCTLR_ELx_M;
write_sysreg_el1(val, SYS_SCTLR);
isb();
}
} else {
cxt->sctlr = SCTLR_ELx_M;
}
}
if (vcpu)
__load_host_stage2();
else
__load_stage2(mmu, kern_hyp_va(mmu->arch));
asm(ALTERNATIVE("isb", "nop", ARM64_WORKAROUND_SPECULATIVE_AT));
}
static void exit_vmid_context(struct tlb_inv_context *cxt)
{
struct kvm_s2_mmu *mmu = cxt->mmu;
struct kvm_cpu_context *host_ctxt;
struct kvm_vcpu *vcpu;
host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
vcpu = host_ctxt->__hyp_running_vcpu;
if (!mmu)
return;
if (vcpu)
__load_stage2(mmu, kern_hyp_va(mmu->arch));
else
__load_host_stage2();
isb();
if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
if (!(cxt->sctlr & SCTLR_ELx_M)) {
write_sysreg_el1(cxt->sctlr, SYS_SCTLR);
isb();
}
write_sysreg_el1(cxt->tcr, SYS_TCR);
}
}
void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu,
phys_addr_t ipa, int level)
{
struct tlb_inv_context cxt;
enter_vmid_context(mmu, &cxt, false);
ipa >>= 12;
__tlbi_level(ipas2e1is, ipa, level);
dsb(ish);
__tlbi(vmalle1is);
__tlbi_sync_s1ish_hyp();
isb();
exit_vmid_context(&cxt);
}
void __kvm_tlb_flush_vmid_ipa_nsh(struct kvm_s2_mmu *mmu,
phys_addr_t ipa, int level)
{
struct tlb_inv_context cxt;
enter_vmid_context(mmu, &cxt, true);
ipa >>= 12;
__tlbi_level(ipas2e1, ipa, level);
dsb(nsh);
__tlbi(vmalle1);
dsb(nsh);
isb();
exit_vmid_context(&cxt);
}
void __kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu,
phys_addr_t start, unsigned long pages)
{
struct tlb_inv_context cxt;
unsigned long stride;
stride = PAGE_SIZE;
start = round_down(start, stride);
enter_vmid_context(mmu, &cxt, false);
__flush_s2_tlb_range_op(ipas2e1is, start, pages, stride,
TLBI_TTL_UNKNOWN);
dsb(ish);
__tlbi(vmalle1is);
__tlbi_sync_s1ish_hyp();
isb();
exit_vmid_context(&cxt);
}
void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu)
{
struct tlb_inv_context cxt;
enter_vmid_context(mmu, &cxt, false);
__tlbi(vmalls12e1is);
__tlbi_sync_s1ish_hyp();
isb();
exit_vmid_context(&cxt);
}
void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu)
{
struct tlb_inv_context cxt;
enter_vmid_context(mmu, &cxt, false);
__tlbi(vmalle1);
asm volatile("ic iallu");
dsb(nsh);
isb();
exit_vmid_context(&cxt);
}
void __kvm_flush_vm_context(void)
{
dsb(ish);
__tlbi(alle1is);
__tlbi_sync_s1ish_hyp();
}