#ifndef __ARM64_KVM_HYP_SWITCH_H__
#define __ARM64_KVM_HYP_SWITCH_H__
#include <hyp/adjust_pc.h>
#include <hyp/fault.h>
#include <linux/arm-smccc.h>
#include <linux/kvm_host.h>
#include <linux/types.h>
#include <linux/jump_label.h>
#include <uapi/linux/psci.h>
#include <kvm/arm_psci.h>
#include <asm/barrier.h>
#include <asm/cpufeature.h>
#include <asm/extable.h>
#include <asm/kprobes.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_emulate.h>
#include <asm/kvm_hyp.h>
#include <asm/kvm_mmu.h>
#include <asm/kvm_nested.h>
#include <asm/fpsimd.h>
#include <asm/debug-monitors.h>
#include <asm/processor.h>
#include <asm/traps.h>
struct kvm_exception_table_entry {
int insn, fixup;
};
extern struct kvm_exception_table_entry __start___kvm_ex_table;
extern struct kvm_exception_table_entry __stop___kvm_ex_table;
static inline void __fpsimd_save_fpexc32(struct kvm_vcpu *vcpu)
{
if (!vcpu_el1_is_32bit(vcpu))
return;
__vcpu_assign_sys_reg(vcpu, FPEXC32_EL2, read_sysreg(fpexc32_el2));
}
static inline void __activate_traps_fpsimd32(struct kvm_vcpu *vcpu)
{
if (vcpu_el1_is_32bit(vcpu) && system_supports_fpsimd())
write_sysreg(1 << 30, fpexc32_el2);
}
static inline void __activate_cptr_traps_nvhe(struct kvm_vcpu *vcpu)
{
u64 val = CPTR_NVHE_EL2_RES1 | CPTR_EL2_TAM | CPTR_EL2_TTA;
val |= CPTR_EL2_TSM;
if (!vcpu_has_sve(vcpu) || !guest_owns_fp_regs())
val |= CPTR_EL2_TZ;
if (!guest_owns_fp_regs())
val |= CPTR_EL2_TFP;
write_sysreg(val, cptr_el2);
}
static inline void __activate_cptr_traps_vhe(struct kvm_vcpu *vcpu)
{
u64 val = CPTR_EL2_TAM | CPACR_EL1_TTA;
u64 cptr;
if (guest_owns_fp_regs()) {
val |= CPACR_EL1_FPEN;
if (vcpu_has_sve(vcpu))
val |= CPACR_EL1_ZEN;
}
if (!vcpu_has_nv(vcpu))
goto write;
if (vcpu_el2_e2h_is_set(vcpu) && is_hyp_ctxt(vcpu))
val |= CPTR_EL2_TCPAC;
if (is_hyp_ctxt(vcpu))
goto write;
cptr = vcpu_sanitised_cptr_el2(vcpu);
if (!(SYS_FIELD_GET(CPACR_EL1, FPEN, cptr) & BIT(0)))
val &= ~CPACR_EL1_FPEN;
if (!(SYS_FIELD_GET(CPACR_EL1, ZEN, cptr) & BIT(0)))
val &= ~CPACR_EL1_ZEN;
if (kvm_has_feat(vcpu->kvm, ID_AA64MMFR3_EL1, S2POE, IMP))
val |= cptr & CPACR_EL1_E0POE;
val |= cptr & CPTR_EL2_TCPAC;
write:
write_sysreg(val, cpacr_el1);
}
static inline void __activate_cptr_traps(struct kvm_vcpu *vcpu)
{
if (!guest_owns_fp_regs())
__activate_traps_fpsimd32(vcpu);
if (has_vhe() || has_hvhe())
__activate_cptr_traps_vhe(vcpu);
else
__activate_cptr_traps_nvhe(vcpu);
}
static inline void __deactivate_cptr_traps_nvhe(struct kvm_vcpu *vcpu)
{
u64 val = CPTR_NVHE_EL2_RES1;
if (!cpus_have_final_cap(ARM64_SVE))
val |= CPTR_EL2_TZ;
if (!cpus_have_final_cap(ARM64_SME))
val |= CPTR_EL2_TSM;
write_sysreg(val, cptr_el2);
}
static inline void __deactivate_cptr_traps_vhe(struct kvm_vcpu *vcpu)
{
u64 val = CPACR_EL1_FPEN;
if (cpus_have_final_cap(ARM64_SVE))
val |= CPACR_EL1_ZEN;
if (cpus_have_final_cap(ARM64_SME))
val |= CPACR_EL1_SMEN;
write_sysreg(val, cpacr_el1);
}
static inline void __deactivate_cptr_traps(struct kvm_vcpu *vcpu)
{
if (has_vhe() || has_hvhe())
__deactivate_cptr_traps_vhe(vcpu);
else
__deactivate_cptr_traps_nvhe(vcpu);
}
static inline bool cpu_has_amu(void)
{
u64 pfr0 = read_sysreg_s(SYS_ID_AA64PFR0_EL1);
return cpuid_feature_extract_unsigned_field(pfr0,
ID_AA64PFR0_EL1_AMU_SHIFT);
}
#define __activate_fgt(hctxt, vcpu, reg) \
do { \
ctxt_sys_reg(hctxt, reg) = read_sysreg_s(SYS_ ## reg); \
write_sysreg_s(*vcpu_fgt(vcpu, reg), SYS_ ## reg); \
} while (0)
static inline void __activate_traps_hfgxtr(struct kvm_vcpu *vcpu)
{
struct kvm_cpu_context *hctxt = host_data_ptr(host_ctxt);
if (!cpus_have_final_cap(ARM64_HAS_FGT))
return;
__activate_fgt(hctxt, vcpu, HFGRTR_EL2);
__activate_fgt(hctxt, vcpu, HFGWTR_EL2);
__activate_fgt(hctxt, vcpu, HFGITR_EL2);
__activate_fgt(hctxt, vcpu, HDFGRTR_EL2);
__activate_fgt(hctxt, vcpu, HDFGWTR_EL2);
if (cpu_has_amu())
__activate_fgt(hctxt, vcpu, HAFGRTR_EL2);
if (!cpus_have_final_cap(ARM64_HAS_FGT2))
return;
__activate_fgt(hctxt, vcpu, HFGRTR2_EL2);
__activate_fgt(hctxt, vcpu, HFGWTR2_EL2);
__activate_fgt(hctxt, vcpu, HFGITR2_EL2);
__activate_fgt(hctxt, vcpu, HDFGRTR2_EL2);
__activate_fgt(hctxt, vcpu, HDFGWTR2_EL2);
}
#define __deactivate_fgt(htcxt, vcpu, reg) \
do { \
write_sysreg_s(ctxt_sys_reg(hctxt, reg), \
SYS_ ## reg); \
} while(0)
static inline void __deactivate_traps_hfgxtr(struct kvm_vcpu *vcpu)
{
struct kvm_cpu_context *hctxt = host_data_ptr(host_ctxt);
if (!cpus_have_final_cap(ARM64_HAS_FGT))
return;
__deactivate_fgt(hctxt, vcpu, HFGRTR_EL2);
__deactivate_fgt(hctxt, vcpu, HFGWTR_EL2);
__deactivate_fgt(hctxt, vcpu, HFGITR_EL2);
__deactivate_fgt(hctxt, vcpu, HDFGRTR_EL2);
__deactivate_fgt(hctxt, vcpu, HDFGWTR_EL2);
if (cpu_has_amu())
__deactivate_fgt(hctxt, vcpu, HAFGRTR_EL2);
if (!cpus_have_final_cap(ARM64_HAS_FGT2))
return;
__deactivate_fgt(hctxt, vcpu, HFGRTR2_EL2);
__deactivate_fgt(hctxt, vcpu, HFGWTR2_EL2);
__deactivate_fgt(hctxt, vcpu, HFGITR2_EL2);
__deactivate_fgt(hctxt, vcpu, HDFGRTR2_EL2);
__deactivate_fgt(hctxt, vcpu, HDFGWTR2_EL2);
}
static inline void __activate_traps_mpam(struct kvm_vcpu *vcpu)
{
u64 r = MPAM2_EL2_TRAPMPAM0EL1 | MPAM2_EL2_TRAPMPAM1EL1;
if (!system_supports_mpam())
return;
if (system_supports_mpam_hcr()) {
write_sysreg_s(MPAMHCR_EL2_TRAP_MPAMIDR_EL1, SYS_MPAMHCR_EL2);
} else {
r |= MPAM2_EL2_TIDR;
}
write_sysreg_s(r, SYS_MPAM2_EL2);
}
static inline void __deactivate_traps_mpam(void)
{
if (!system_supports_mpam())
return;
write_sysreg_s(0, SYS_MPAM2_EL2);
if (system_supports_mpam_hcr())
write_sysreg_s(MPAMHCR_HOST_FLAGS, SYS_MPAMHCR_EL2);
}
static inline void __activate_traps_common(struct kvm_vcpu *vcpu)
{
struct kvm_cpu_context *hctxt = host_data_ptr(host_ctxt);
write_sysreg(1 << 15, hstr_el2);
if (system_supports_pmuv3()) {
write_sysreg(0, pmselr_el0);
ctxt_sys_reg(hctxt, PMUSERENR_EL0) = read_sysreg(pmuserenr_el0);
write_sysreg(ARMV8_PMU_USERENR_MASK, pmuserenr_el0);
vcpu_set_flag(vcpu, PMUSERENR_ON_CPU);
}
if (cpus_have_final_cap(ARM64_HAS_HCX)) {
u64 hcrx = vcpu->arch.hcrx_el2;
if (is_nested_ctxt(vcpu)) {
u64 val = __vcpu_sys_reg(vcpu, HCRX_EL2);
hcrx |= val & __HCRX_EL2_MASK;
hcrx &= ~(~val & __HCRX_EL2_nMASK);
}
ctxt_sys_reg(hctxt, HCRX_EL2) = read_sysreg_s(SYS_HCRX_EL2);
write_sysreg_s(hcrx, SYS_HCRX_EL2);
}
__activate_traps_hfgxtr(vcpu);
__activate_traps_mpam(vcpu);
}
static inline void __deactivate_traps_common(struct kvm_vcpu *vcpu)
{
struct kvm_cpu_context *hctxt = host_data_ptr(host_ctxt);
write_sysreg(0, hstr_el2);
if (system_supports_pmuv3()) {
write_sysreg(ctxt_sys_reg(hctxt, PMUSERENR_EL0), pmuserenr_el0);
vcpu_clear_flag(vcpu, PMUSERENR_ON_CPU);
}
if (cpus_have_final_cap(ARM64_HAS_HCX))
write_sysreg_s(ctxt_sys_reg(hctxt, HCRX_EL2), SYS_HCRX_EL2);
__deactivate_traps_hfgxtr(vcpu);
__deactivate_traps_mpam();
}
static inline void ___activate_traps(struct kvm_vcpu *vcpu, u64 hcr)
{
if (cpus_have_final_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM))
hcr |= HCR_TVM;
write_sysreg_hcr(hcr);
if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN) && (hcr & HCR_VSE)) {
u64 vsesr;
if (!vserror_state_is_nested(vcpu))
vsesr = vcpu->arch.vsesr_el2;
else if (kvm_has_ras(kern_hyp_va(vcpu->kvm)))
vsesr = __vcpu_sys_reg(vcpu, VSESR_EL2);
else
vsesr = ESR_ELx_ISV;
write_sysreg_s(vsesr, SYS_VSESR_EL2);
}
}
static inline void ___deactivate_traps(struct kvm_vcpu *vcpu)
{
u64 *hcr;
if (vserror_state_is_nested(vcpu))
hcr = __ctxt_sys_reg(&vcpu->arch.ctxt, HCR_EL2);
else
hcr = &vcpu->arch.hcr_el2;
if (*hcr & HCR_VSE) {
*hcr &= ~HCR_VSE;
*hcr |= read_sysreg(hcr_el2) & HCR_VSE;
}
}
static inline bool __populate_fault_info(struct kvm_vcpu *vcpu)
{
return __get_fault_info(vcpu->arch.fault.esr_el2, &vcpu->arch.fault);
}
static inline bool kvm_hyp_handle_mops(struct kvm_vcpu *vcpu, u64 *exit_code)
{
*vcpu_pc(vcpu) = read_sysreg_el2(SYS_ELR);
arm64_mops_reset_regs(vcpu_gp_regs(vcpu), vcpu->arch.fault.esr_el2);
write_sysreg_el2(*vcpu_pc(vcpu), SYS_ELR);
*vcpu_cpsr(vcpu) &= ~DBG_SPSR_SS;
write_sysreg_el2(*vcpu_cpsr(vcpu), SYS_SPSR);
return true;
}
static inline void __hyp_sve_restore_guest(struct kvm_vcpu *vcpu)
{
sve_cond_update_zcr_vq(vcpu_sve_max_vq(vcpu) - 1, SYS_ZCR_EL2);
__sve_restore_state(vcpu_sve_pffr(vcpu),
&vcpu->arch.ctxt.fp_regs.fpsr,
true);
if (is_nested_ctxt(vcpu))
sve_cond_update_zcr_vq(__vcpu_sys_reg(vcpu, ZCR_EL2), SYS_ZCR_EL2);
write_sysreg_el1(__vcpu_sys_reg(vcpu, vcpu_sve_zcr_elx(vcpu)), SYS_ZCR);
}
static inline void __hyp_sve_save_host(void)
{
struct cpu_sve_state *sve_state = *host_data_ptr(sve_state);
sve_state->zcr_el1 = read_sysreg_el1(SYS_ZCR);
write_sysreg_s(sve_vq_from_vl(kvm_host_sve_max_vl) - 1, SYS_ZCR_EL2);
__sve_save_state(sve_state->sve_regs + sve_ffr_offset(kvm_host_sve_max_vl),
&sve_state->fpsr,
true);
}
static inline void fpsimd_lazy_switch_to_guest(struct kvm_vcpu *vcpu)
{
u64 zcr_el1, zcr_el2;
if (!guest_owns_fp_regs())
return;
if (vcpu_has_sve(vcpu)) {
if (is_nested_ctxt(vcpu))
zcr_el2 = __vcpu_sys_reg(vcpu, ZCR_EL2);
else
zcr_el2 = vcpu_sve_max_vq(vcpu) - 1;
write_sysreg_el2(zcr_el2, SYS_ZCR);
zcr_el1 = __vcpu_sys_reg(vcpu, vcpu_sve_zcr_elx(vcpu));
write_sysreg_el1(zcr_el1, SYS_ZCR);
}
}
static inline void fpsimd_lazy_switch_to_host(struct kvm_vcpu *vcpu)
{
u64 zcr_el1, zcr_el2;
if (!guest_owns_fp_regs())
return;
if (vcpu_has_sve(vcpu)) {
zcr_el1 = read_sysreg_el1(SYS_ZCR);
__vcpu_assign_sys_reg(vcpu, vcpu_sve_zcr_elx(vcpu), zcr_el1);
if (has_vhe()) {
zcr_el2 = vcpu_sve_max_vq(vcpu) - 1;
write_sysreg_el2(zcr_el2, SYS_ZCR);
} else {
zcr_el2 = sve_vq_from_vl(kvm_host_sve_max_vl) - 1;
write_sysreg_el2(zcr_el2, SYS_ZCR);
zcr_el1 = vcpu_sve_max_vq(vcpu) - 1;
write_sysreg_el1(zcr_el1, SYS_ZCR);
}
}
}
static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu)
{
if (system_supports_sve()) {
__hyp_sve_save_host();
} else {
__fpsimd_save_state(host_data_ptr(host_ctxt.fp_regs));
}
if (kvm_has_fpmr(kern_hyp_va(vcpu->kvm)))
*host_data_ptr(fpmr) = read_sysreg_s(SYS_FPMR);
}
static inline bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
{
bool sve_guest;
u8 esr_ec;
if (!system_supports_fpsimd())
return false;
sve_guest = vcpu_has_sve(vcpu);
esr_ec = kvm_vcpu_trap_get_class(vcpu);
switch (esr_ec) {
case ESR_ELx_EC_FP_ASIMD:
if (guest_hyp_fpsimd_traps_enabled(vcpu))
return false;
break;
case ESR_ELx_EC_SYS64:
if (WARN_ON_ONCE(!is_hyp_ctxt(vcpu)))
return false;
fallthrough;
case ESR_ELx_EC_SVE:
if (!sve_guest)
return false;
if (guest_hyp_sve_traps_enabled(vcpu))
return false;
break;
default:
return false;
}
__deactivate_cptr_traps(vcpu);
isb();
if (is_protected_kvm_enabled() && host_owns_fp_regs())
kvm_hyp_save_fpsimd_host(vcpu);
if (sve_guest)
__hyp_sve_restore_guest(vcpu);
else
__fpsimd_restore_state(&vcpu->arch.ctxt.fp_regs);
if (kvm_has_fpmr(kern_hyp_va(vcpu->kvm)))
write_sysreg_s(__vcpu_sys_reg(vcpu, FPMR), SYS_FPMR);
if (!(read_sysreg(hcr_el2) & HCR_RW))
write_sysreg(__vcpu_sys_reg(vcpu, FPEXC32_EL2), fpexc32_el2);
*host_data_ptr(fp_owner) = FP_STATE_GUEST_OWNED;
__activate_cptr_traps(vcpu);
return true;
}
static inline bool handle_tx2_tvm(struct kvm_vcpu *vcpu)
{
u32 sysreg = esr_sys64_to_sysreg(kvm_vcpu_get_esr(vcpu));
int rt = kvm_vcpu_sys_get_rt(vcpu);
u64 val = vcpu_get_reg(vcpu, rt);
if (vcpu->arch.hcr_el2 & HCR_TVM)
return false;
switch (sysreg) {
case SYS_SCTLR_EL1:
write_sysreg_el1(val, SYS_SCTLR);
break;
case SYS_TTBR0_EL1:
write_sysreg_el1(val, SYS_TTBR0);
break;
case SYS_TTBR1_EL1:
write_sysreg_el1(val, SYS_TTBR1);
break;
case SYS_TCR_EL1:
write_sysreg_el1(val, SYS_TCR);
break;
case SYS_ESR_EL1:
write_sysreg_el1(val, SYS_ESR);
break;
case SYS_FAR_EL1:
write_sysreg_el1(val, SYS_FAR);
break;
case SYS_AFSR0_EL1:
write_sysreg_el1(val, SYS_AFSR0);
break;
case SYS_AFSR1_EL1:
write_sysreg_el1(val, SYS_AFSR1);
break;
case SYS_MAIR_EL1:
write_sysreg_el1(val, SYS_MAIR);
break;
case SYS_AMAIR_EL1:
write_sysreg_el1(val, SYS_AMAIR);
break;
case SYS_CONTEXTIDR_EL1:
write_sysreg_el1(val, SYS_CONTEXTIDR);
break;
default:
return false;
}
__kvm_skip_instr(vcpu);
return true;
}
static inline u64 hyp_timer_get_offset(struct arch_timer_context *ctxt)
{
u64 offset = 0;
if (ctxt->offset.vm_offset)
offset += *kern_hyp_va(ctxt->offset.vm_offset);
if (ctxt->offset.vcpu_offset)
offset += *kern_hyp_va(ctxt->offset.vcpu_offset);
return offset;
}
static inline u64 compute_counter_value(struct arch_timer_context *ctxt)
{
return arch_timer_read_cntpct_el0() - hyp_timer_get_offset(ctxt);
}
static bool kvm_handle_cntxct(struct kvm_vcpu *vcpu)
{
struct arch_timer_context *ctxt;
u32 sysreg;
u64 val;
if (is_hyp_ctxt(vcpu))
return false;
sysreg = esr_sys64_to_sysreg(kvm_vcpu_get_esr(vcpu));
switch (sysreg) {
case SYS_CNTPCT_EL0:
case SYS_CNTPCTSS_EL0:
if (vcpu_has_nv(vcpu)) {
val = __vcpu_sys_reg(vcpu, CNTHCTL_EL2);
if (!vcpu_el2_e2h_is_set(vcpu))
val = (val & CNTHCTL_EL1PCTEN) << 10;
if (!(val & (CNTHCTL_EL1PCTEN << 10)))
return false;
}
ctxt = vcpu_ptimer(vcpu);
break;
case SYS_CNTVCT_EL0:
case SYS_CNTVCTSS_EL0:
if (vcpu_has_nv(vcpu)) {
val = __vcpu_sys_reg(vcpu, CNTHCTL_EL2);
if (val & CNTHCTL_EL1TVCT)
return false;
}
ctxt = vcpu_vtimer(vcpu);
break;
default:
return false;
}
val = compute_counter_value(ctxt);
vcpu_set_reg(vcpu, kvm_vcpu_sys_get_rt(vcpu), val);
__kvm_skip_instr(vcpu);
return true;
}
static bool handle_ampere1_tcr(struct kvm_vcpu *vcpu)
{
u32 sysreg = esr_sys64_to_sysreg(kvm_vcpu_get_esr(vcpu));
int rt = kvm_vcpu_sys_get_rt(vcpu);
u64 val = vcpu_get_reg(vcpu, rt);
if (sysreg != SYS_TCR_EL1)
return false;
val &= ~(TCR_HD | TCR_HA);
write_sysreg_el1(val, SYS_TCR);
__kvm_skip_instr(vcpu);
return true;
}
static inline bool kvm_hyp_handle_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code)
{
if (cpus_have_final_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM) &&
handle_tx2_tvm(vcpu))
return true;
if (cpus_have_final_cap(ARM64_WORKAROUND_AMPERE_AC03_CPU_38) &&
handle_ampere1_tcr(vcpu))
return true;
if (static_branch_unlikely(&vgic_v3_cpuif_trap) &&
__vgic_v3_perform_cpuif_access(vcpu) == 1)
return true;
if (kvm_handle_cntxct(vcpu))
return true;
return false;
}
static inline bool kvm_hyp_handle_cp15_32(struct kvm_vcpu *vcpu, u64 *exit_code)
{
if (static_branch_unlikely(&vgic_v3_cpuif_trap) &&
__vgic_v3_perform_cpuif_access(vcpu) == 1)
return true;
return false;
}
static inline bool kvm_hyp_handle_memory_fault(struct kvm_vcpu *vcpu,
u64 *exit_code)
{
if (!__populate_fault_info(vcpu))
return true;
return false;
}
#define kvm_hyp_handle_iabt_low kvm_hyp_handle_memory_fault
#define kvm_hyp_handle_watchpt_low kvm_hyp_handle_memory_fault
static inline bool kvm_hyp_handle_dabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
{
if (kvm_hyp_handle_memory_fault(vcpu, exit_code))
return true;
if (static_branch_unlikely(&vgic_v2_cpuif_trap)) {
bool valid;
valid = kvm_vcpu_trap_is_translation_fault(vcpu) &&
kvm_vcpu_dabt_isvalid(vcpu) &&
!kvm_vcpu_abt_issea(vcpu) &&
!kvm_vcpu_abt_iss1tw(vcpu);
if (valid) {
int ret = __vgic_v2_perform_cpuif_access(vcpu);
if (ret == 1)
return true;
if (ret == -1)
*exit_code = ARM_EXCEPTION_EL1_SERROR;
}
}
return false;
}
typedef bool (*exit_handler_fn)(struct kvm_vcpu *, u64 *);
static inline bool kvm_hyp_handle_exit(struct kvm_vcpu *vcpu, u64 *exit_code,
const exit_handler_fn *handlers)
{
exit_handler_fn fn = handlers[kvm_vcpu_trap_get_class(vcpu)];
if (fn)
return fn(vcpu, exit_code);
return false;
}
static inline void synchronize_vcpu_pstate(struct kvm_vcpu *vcpu)
{
if (cpus_have_final_cap(ARM64_WORKAROUND_2077057) &&
vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP &&
*vcpu_cpsr(vcpu) & DBG_SPSR_SS &&
ESR_ELx_EC(read_sysreg_el2(SYS_ESR)) == ESR_ELx_EC_PAC)
write_sysreg_el2(*vcpu_cpsr(vcpu), SYS_SPSR);
vcpu->arch.ctxt.regs.pstate = read_sysreg_el2(SYS_SPSR);
}
static inline bool __fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code,
const exit_handler_fn *handlers)
{
if (ARM_EXCEPTION_CODE(*exit_code) != ARM_EXCEPTION_IRQ)
vcpu->arch.fault.esr_el2 = read_sysreg_el2(SYS_ESR);
if (ARM_SERROR_PENDING(*exit_code) &&
ARM_EXCEPTION_CODE(*exit_code) != ARM_EXCEPTION_IRQ) {
u8 esr_ec = kvm_vcpu_trap_get_class(vcpu);
if (esr_ec == ESR_ELx_EC_HVC32 || esr_ec == ESR_ELx_EC_HVC64)
write_sysreg_el2(read_sysreg_el2(SYS_ELR) - 4, SYS_ELR);
}
if (*exit_code != ARM_EXCEPTION_TRAP)
goto exit;
if (kvm_hyp_handle_exit(vcpu, exit_code, handlers))
goto guest;
exit:
return false;
guest:
asm(ALTERNATIVE("nop", "dmb sy", ARM64_WORKAROUND_1508412));
return true;
}
static inline void __kvm_unexpected_el2_exception(void)
{
extern char __guest_exit_restore_elr_and_panic[];
unsigned long addr, fixup;
struct kvm_exception_table_entry *entry, *end;
unsigned long elr_el2 = read_sysreg(elr_el2);
entry = &__start___kvm_ex_table;
end = &__stop___kvm_ex_table;
while (entry < end) {
addr = (unsigned long)&entry->insn + entry->insn;
fixup = (unsigned long)&entry->fixup + entry->fixup;
if (addr != elr_el2) {
entry++;
continue;
}
write_sysreg(fixup, elr_el2);
return;
}
this_cpu_ptr(&kvm_hyp_ctxt)->sys_regs[ELR_EL2] = elr_el2;
write_sysreg(__guest_exit_restore_elr_and_panic, elr_el2);
}
#endif