vmcb
struct vmcb *vmcb = (struct vmcb *)vcpu->vc_control_va;
gprs[VCPU_REGS_RAX] = vmcb->v_rax;
gprs[VCPU_REGS_RIP] = vmcb->v_rip;
gprs[VCPU_REGS_RSP] = vmcb->v_rsp;
gprs[VCPU_REGS_RFLAGS] = vmcb->v_rflags;
sregs[VCPU_REGS_CS].vsi_sel = vmcb->v_cs.vs_sel;
sregs[VCPU_REGS_CS].vsi_limit = vmcb->v_cs.vs_lim;
attr = vmcb->v_cs.vs_attr;
sregs[VCPU_REGS_CS].vsi_base = vmcb->v_cs.vs_base;
sregs[VCPU_REGS_DS].vsi_sel = vmcb->v_ds.vs_sel;
sregs[VCPU_REGS_DS].vsi_limit = vmcb->v_ds.vs_lim;
attr = vmcb->v_ds.vs_attr;
sregs[VCPU_REGS_DS].vsi_base = vmcb->v_ds.vs_base;
sregs[VCPU_REGS_ES].vsi_sel = vmcb->v_es.vs_sel;
sregs[VCPU_REGS_ES].vsi_limit = vmcb->v_es.vs_lim;
attr = vmcb->v_es.vs_attr;
sregs[VCPU_REGS_ES].vsi_base = vmcb->v_es.vs_base;
sregs[VCPU_REGS_FS].vsi_sel = vmcb->v_fs.vs_sel;
sregs[VCPU_REGS_FS].vsi_limit = vmcb->v_fs.vs_lim;
attr = vmcb->v_fs.vs_attr;
sregs[VCPU_REGS_FS].vsi_base = vmcb->v_fs.vs_base;
sregs[VCPU_REGS_GS].vsi_sel = vmcb->v_gs.vs_sel;
sregs[VCPU_REGS_GS].vsi_limit = vmcb->v_gs.vs_lim;
attr = vmcb->v_gs.vs_attr;
sregs[VCPU_REGS_GS].vsi_base = vmcb->v_gs.vs_base;
sregs[VCPU_REGS_SS].vsi_sel = vmcb->v_ss.vs_sel;
sregs[VCPU_REGS_SS].vsi_limit = vmcb->v_ss.vs_lim;
attr = vmcb->v_ss.vs_attr;
sregs[VCPU_REGS_SS].vsi_base = vmcb->v_ss.vs_base;
sregs[VCPU_REGS_LDTR].vsi_sel = vmcb->v_ldtr.vs_sel;
sregs[VCPU_REGS_LDTR].vsi_limit = vmcb->v_ldtr.vs_lim;
attr = vmcb->v_ldtr.vs_attr;
sregs[VCPU_REGS_LDTR].vsi_base = vmcb->v_ldtr.vs_base;
sregs[VCPU_REGS_TR].vsi_sel = vmcb->v_tr.vs_sel;
sregs[VCPU_REGS_TR].vsi_limit = vmcb->v_tr.vs_lim;
attr = vmcb->v_tr.vs_attr;
sregs[VCPU_REGS_TR].vsi_base = vmcb->v_tr.vs_base;
vrs->vrs_gdtr.vsi_limit = vmcb->v_gdtr.vs_lim;
vrs->vrs_gdtr.vsi_base = vmcb->v_gdtr.vs_base;
vrs->vrs_idtr.vsi_limit = vmcb->v_idtr.vs_lim;
vrs->vrs_idtr.vsi_base = vmcb->v_idtr.vs_base;
crs[VCPU_REGS_CR0] = vmcb->v_cr0;
crs[VCPU_REGS_CR3] = vmcb->v_cr3;
crs[VCPU_REGS_CR4] = vmcb->v_cr4;
msrs[VCPU_REGS_EFER] = vmcb->v_efer;
msrs[VCPU_REGS_STAR] = vmcb->v_star;
msrs[VCPU_REGS_LSTAR] = vmcb->v_lstar;
msrs[VCPU_REGS_CSTAR] = vmcb->v_cstar;
msrs[VCPU_REGS_SFMASK] = vmcb->v_sfmask;
msrs[VCPU_REGS_KGSBASE] = vmcb->v_kgsbase;
drs[VCPU_REGS_DR6] = vmcb->v_dr6;
drs[VCPU_REGS_DR7] = vmcb->v_dr7;
int svm_get_guest_faulttype(struct vmcb *);
struct vmcb *vmcb = (struct vmcb *)vcpu->vc_control_va;
vmcb->v_rax = gprs[VCPU_REGS_RAX];
vmcb->v_rip = gprs[VCPU_REGS_RIP];
vmcb->v_rsp = gprs[VCPU_REGS_RSP];
vmcb->v_rflags = gprs[VCPU_REGS_RFLAGS];
vmcb->v_cs.vs_sel = sregs[VCPU_REGS_CS].vsi_sel;
vmcb->v_cs.vs_lim = sregs[VCPU_REGS_CS].vsi_limit;
vmcb->v_cs.vs_attr = (attr & 0xff) | ((attr >> 4) & 0xf00);
vmcb->v_cs.vs_base = sregs[VCPU_REGS_CS].vsi_base;
vmcb->v_ds.vs_sel = sregs[VCPU_REGS_DS].vsi_sel;
vmcb->v_ds.vs_lim = sregs[VCPU_REGS_DS].vsi_limit;
vmcb->v_ds.vs_attr = (attr & 0xff) | ((attr >> 4) & 0xf00);
vmcb->v_ds.vs_base = sregs[VCPU_REGS_DS].vsi_base;
vmcb->v_es.vs_sel = sregs[VCPU_REGS_ES].vsi_sel;
vmcb->v_es.vs_lim = sregs[VCPU_REGS_ES].vsi_limit;
vmcb->v_es.vs_attr = (attr & 0xff) | ((attr >> 4) & 0xf00);
vmcb->v_es.vs_base = sregs[VCPU_REGS_ES].vsi_base;
vmcb->v_fs.vs_sel = sregs[VCPU_REGS_FS].vsi_sel;
vmcb->v_fs.vs_lim = sregs[VCPU_REGS_FS].vsi_limit;
vmcb->v_fs.vs_attr = (attr & 0xff) | ((attr >> 4) & 0xf00);
vmcb->v_fs.vs_base = sregs[VCPU_REGS_FS].vsi_base;
vmcb->v_gs.vs_sel = sregs[VCPU_REGS_GS].vsi_sel;
vmcb->v_gs.vs_lim = sregs[VCPU_REGS_GS].vsi_limit;
vmcb->v_gs.vs_attr = (attr & 0xff) | ((attr >> 4) & 0xf00);
vmcb->v_gs.vs_base = sregs[VCPU_REGS_GS].vsi_base;
vmcb->v_ss.vs_sel = sregs[VCPU_REGS_SS].vsi_sel;
vmcb->v_ss.vs_lim = sregs[VCPU_REGS_SS].vsi_limit;
vmcb->v_ss.vs_attr = (attr & 0xff) | ((attr >> 4) & 0xf00);
vmcb->v_ss.vs_base = sregs[VCPU_REGS_SS].vsi_base;
vmcb->v_ldtr.vs_sel = sregs[VCPU_REGS_LDTR].vsi_sel;
vmcb->v_ldtr.vs_lim = sregs[VCPU_REGS_LDTR].vsi_limit;
vmcb->v_ldtr.vs_attr = (attr & 0xff) | ((attr >> 4) & 0xf00);
vmcb->v_ldtr.vs_base = sregs[VCPU_REGS_LDTR].vsi_base;
vmcb->v_tr.vs_sel = sregs[VCPU_REGS_TR].vsi_sel;
vmcb->v_tr.vs_lim = sregs[VCPU_REGS_TR].vsi_limit;
vmcb->v_tr.vs_attr = (attr & 0xff) | ((attr >> 4) & 0xf00);
vmcb->v_tr.vs_base = sregs[VCPU_REGS_TR].vsi_base;
vmcb->v_gdtr.vs_lim = vrs->vrs_gdtr.vsi_limit;
vmcb->v_gdtr.vs_base = vrs->vrs_gdtr.vsi_base;
vmcb->v_idtr.vs_lim = vrs->vrs_idtr.vsi_limit;
vmcb->v_idtr.vs_base = vrs->vrs_idtr.vsi_base;
vmcb->v_cr0 = crs[VCPU_REGS_CR0];
vmcb->v_cr3 = crs[VCPU_REGS_CR3];
vmcb->v_cr4 = crs[VCPU_REGS_CR4];
vmcb->v_efer |= msrs[VCPU_REGS_EFER];
vmcb->v_star = msrs[VCPU_REGS_STAR];
vmcb->v_lstar = msrs[VCPU_REGS_LSTAR];
vmcb->v_cstar = msrs[VCPU_REGS_CSTAR];
vmcb->v_sfmask = msrs[VCPU_REGS_SFMASK];
vmcb->v_kgsbase = msrs[VCPU_REGS_KGSBASE];
vmcb->v_dr6 = drs[VCPU_REGS_DR6];
vmcb->v_dr7 = drs[VCPU_REGS_DR7];
struct vmcb *vmcb;
vmcb = (struct vmcb *)vcpu->vc_control_va;
vmcb->v_intercept1 = SVM_INTERCEPT_INTR | SVM_INTERCEPT_NMI |
vmcb->v_intercept2 = SVM_INTERCEPT_VMRUN | SVM_INTERCEPT_VMMCALL |
vmcb->v_intercept2 |= SVM_INTERCEPT_XSETBV;
vmcb->v_intercept2 |= SVM_INTERCEPT_EFER_WRITE;
vmcb->v_intercept2 |= SVM_INTERCEPT_CR0_WRITE_POST;
vmcb->v_intercept2 |= SVM_INTERCEPT_CR4_WRITE_POST;
vmcb->v_iopm_pa = (uint64_t)(vcpu->vc_svm_ioio_pa);
vmcb->v_msrpm_pa = (uint64_t)(vcpu->vc_msr_bitmap_pa);
vmcb->v_asid = vcpu->vc_vpid;
vmcb->v_tlb_control = SVM_TLB_CONTROL_FLUSH_ALL;
vmcb->v_intr_masking = 1;
vmcb->v_g_pat = PATENTRY(0, PAT_WB) | PATENTRY(1, PAT_WC) |
vmcb->v_np_enable = SVM_ENABLE_NP;
vmcb->v_n_cr3 = vcpu->vc_parent->vm_pmap->pm_pdirpa;
vmcb->v_np_enable |= SVM_ENABLE_SEV;
vmcb->v_np_enable |= SVM_SEVES_ENABLE;
vmcb->v_lbr_virt_enable |= SVM_LBRVIRT_ENABLE;
vmcb->v_vmsa_pa = vcpu->vc_svm_vmsa_pa;
vmcb->v_efer |= EFER_SVME;
struct vmcb *vmcb = (struct vmcb *)vcpu->vc_control_va;
memcpy(vmsa, &vmcb->vmcb_layout, sizeof(vmcb->vmcb_layout));
struct vmcb *vmcb;
vmcb = (struct vmcb *)vcpu->vc_control_va;
vmcb->v_vmcb_clean_bits |= value;
struct vmcb *vmcb;
vmcb = (struct vmcb *)vcpu->vc_control_va;
vmcb->v_vmcb_clean_bits &= ~value;
struct vmcb *vmcb = (struct vmcb *)vcpu->vc_control_va;
uint64_t rflags = vmcb->v_rflags;
struct vmcb *vmcb = (struct vmcb *)vcpu->vc_control_va;
vmcb->v_irq = 0;
vmcb->v_intr_vector = 0;
vmcb->v_intercept1 &= ~SVM_INTERCEPT_VINTR;
vmcb->v_rip = vcpu->vc_gueststate.vg_rip;
vmcb->v_efer |= EFER_SVME;
struct vmcb *vmcb = (struct vmcb *)vcpu->vc_control_va;
vmcb->v_exitcode = vcpu->vc_gueststate.vg_exit_reason =
vmcb->v_exitinfo1 = ghcb->v_sw_exitinfo1;
vmcb->v_exitinfo2 = ghcb->v_sw_exitinfo2;
vmcb->v_rax = vcpu->vc_gueststate.vg_rax = ghcb->v_rax;
struct vmcb *vmcb = (struct vmcb *)vcpu->vc_control_va;
if ((vmcb->v_ghcb_gpa & ~PG_FRAME) == 0 &&
(vmcb->v_ghcb_gpa & PG_FRAME) != 0) {
ghcb_gpa = vmcb->v_ghcb_gpa & PG_FRAME;
} else if ((vmcb->v_ghcb_gpa & ~PG_FRAME) != 0) {
req = (vmcb->v_ghcb_gpa & 0xffffffff);
vmcb->v_exitcode = SVM_VMEXIT_CPUID;
vmcb->v_rax = vmcb->v_ghcb_gpa >> 32;
result = vmcb->v_rax;
vmcb->v_ghcb_gpa = (result << 32) | resp;
switch (vmcb->v_exitcode) {
vmcb->v_rip = vcpu->vc_gueststate.vg_rip;
vcpu->vc_gueststate.vg_rax = vmcb->v_rax;
vmcb->v_rip = vcpu->vc_gueststate.vg_rip;
vmcb->v_exitcode);
struct vmcb *vmcb = (struct vmcb *)vcpu->vc_control_va;
vmcb->v_efer = vmcb->v_exitinfo1;
vmcb->v_cr0 = vmcb->v_exitinfo1;
vmcb->v_cr4 = vmcb->v_exitinfo1;
struct vmcb *vmcb = (struct vmcb *)vcpu->vc_control_va;
return (vmcb->v_intr_shadow & SMV_GUEST_INTR_MASK);
svm_get_guest_faulttype(struct vmcb *vmcb)
if (!(vmcb->v_exitinfo1 & 0x1))
struct vmcb *vmcb = (struct vmcb *)vcpu->vc_control_va;
gpa = vmcb->v_exitinfo2;
vee->vee_insn_len = vmcb->v_n_bytes_fetched;
memcpy(&vee->vee_insn_bytes, vmcb->v_guest_ins_bytes,
struct vmcb *vmcb;
vmcb = (struct vmcb *)vcpu->vc_control_va;
return (vmcb->v_cpl);
struct vmcb *vmcb;
vmcb = (struct vmcb *)vcpu->vc_control_va;
cr0 = vmcb->v_cr0;
efer = vmcb->v_efer;
cs_ar = vmcb->v_cs.vs_attr;
struct vmcb *vmcb = (struct vmcb *)vcpu->vc_control_va;
insn_length = vmcb->v_exitinfo2 - vmcb->v_rip;
exit_qual = vmcb->v_exitinfo1;
vcpu->vc_exit.vei.vei_data = vmcb->v_rax;
struct vmcb *vmcb = (struct vmcb *)vcpu->vc_control_va;
rax = &vmcb->v_rax;
struct vmcb *vmcb = (struct vmcb *)vcpu->vc_control_va;
rax = &vmcb->v_rax;
if (vmcb->v_exitinfo1 == 1) {
vmcb->v_efer = *rax | EFER_SVME;
struct vmcb *vmcb;
vmcb = (struct vmcb *)vcpu->vc_control_va;
rax = &vmcb->v_rax;
cr4 = vmcb->v_cr4;
vmcb->v_rax = *rax;
struct vmcb *vmcb = (struct vmcb *)vcpu->vc_control_va;
vmcb->v_rax = vcpu->vc_gueststate.vg_rax;
vmcb->v_rip = vcpu->vc_gueststate.vg_rip;
vmcb->v_tlb_control =
vmcb->v_tlb_control =
vmcb->v_eventinj = vcpu->vc_inject.vie_vector |
vmcb->v_eventinj = vcpu->vc_inject.vie_vector;
vmcb->v_eventinj |= (4ULL << 8);
vmcb->v_eventinj |= (3ULL << 8);
vmcb->v_eventinj |= (3ULL << 8);
if (vmcb->v_cr0 & CR0_PE) {
vmcb->v_eventinj |= (1ULL << 11);
vmcb->v_eventinj |= (uint64_t)
vmcb->v_eventinj |= (1U << 31);
KASSERT(vmcb->v_intercept1 & SVM_INTERCEPT_INTR);
vcpu->vc_gueststate.vg_rip = vmcb->v_rip;
vmcb->v_tlb_control = SVM_TLB_CONTROL_FLUSH_NONE;
exit_reason = vmcb->v_exitcode;
vcpu->vc_gueststate.vg_rflags = vmcb->v_rflags;
vmcb->v_intercept1 |= SVM_INTERCEPT_VINTR;
vmcb->v_irq = 1;
vmcb->v_intr_misc = SVM_INTR_MISC_V_IGN_TPR;
vmcb->v_intr_vector = 0;