#ifndef _VMM_H_
#define _VMM_H_
#include <sys/cpuset.h>
#include <sys/sdt.h>
#include <x86/segments.h>
struct vcpu;
struct vm_snapshot_meta;
#ifdef _KERNEL
SDT_PROVIDER_DECLARE(vmm);
#endif
enum vm_suspend_how {
VM_SUSPEND_NONE,
VM_SUSPEND_RESET,
VM_SUSPEND_POWEROFF,
VM_SUSPEND_HALT,
VM_SUSPEND_TRIPLEFAULT,
VM_SUSPEND_DESTROY,
VM_SUSPEND_LAST
};
enum vm_reg_name {
VM_REG_GUEST_RAX,
VM_REG_GUEST_RBX,
VM_REG_GUEST_RCX,
VM_REG_GUEST_RDX,
VM_REG_GUEST_RSI,
VM_REG_GUEST_RDI,
VM_REG_GUEST_RBP,
VM_REG_GUEST_R8,
VM_REG_GUEST_R9,
VM_REG_GUEST_R10,
VM_REG_GUEST_R11,
VM_REG_GUEST_R12,
VM_REG_GUEST_R13,
VM_REG_GUEST_R14,
VM_REG_GUEST_R15,
VM_REG_GUEST_CR0,
VM_REG_GUEST_CR3,
VM_REG_GUEST_CR4,
VM_REG_GUEST_DR7,
VM_REG_GUEST_RSP,
VM_REG_GUEST_RIP,
VM_REG_GUEST_RFLAGS,
VM_REG_GUEST_ES,
VM_REG_GUEST_CS,
VM_REG_GUEST_SS,
VM_REG_GUEST_DS,
VM_REG_GUEST_FS,
VM_REG_GUEST_GS,
VM_REG_GUEST_LDTR,
VM_REG_GUEST_TR,
VM_REG_GUEST_IDTR,
VM_REG_GUEST_GDTR,
VM_REG_GUEST_EFER,
VM_REG_GUEST_CR2,
VM_REG_GUEST_PDPTE0,
VM_REG_GUEST_PDPTE1,
VM_REG_GUEST_PDPTE2,
VM_REG_GUEST_PDPTE3,
VM_REG_GUEST_INTR_SHADOW,
VM_REG_GUEST_DR0,
VM_REG_GUEST_DR1,
VM_REG_GUEST_DR2,
VM_REG_GUEST_DR3,
VM_REG_GUEST_DR6,
VM_REG_GUEST_ENTRY_INST_LENGTH,
VM_REG_GUEST_FS_BASE,
VM_REG_GUEST_GS_BASE,
VM_REG_GUEST_KGS_BASE,
VM_REG_GUEST_TPR,
VM_REG_LAST
};
enum x2apic_state {
X2APIC_DISABLED,
X2APIC_ENABLED,
X2APIC_STATE_LAST
};
#define VM_INTINFO_VECTOR(info) ((info) & 0xff)
#define VM_INTINFO_DEL_ERRCODE 0x800
#define VM_INTINFO_RSVD 0x7ffff000
#define VM_INTINFO_VALID 0x80000000
#define VM_INTINFO_TYPE 0x700
#define VM_INTINFO_HWINTR (0 << 8)
#define VM_INTINFO_NMI (2 << 8)
#define VM_INTINFO_HWEXCEPTION (3 << 8)
#define VM_INTINFO_SWINTR (4 << 8)
#ifdef _KERNEL
#define VMM_VCPU_MD_FIELDS \
struct vlapic *vlapic; \
enum x2apic_state x2apic_state; \
uint64_t exitintinfo; \
int nmi_pending; \
int extint_pending; \
int exception_pending; \
int exc_vector; \
int exc_errcode_valid; \
uint32_t exc_errcode; \
struct savefpu *guestfpu; \
uint64_t guest_xcr0; \
struct vm_exit exitinfo; \
cpuset_t exitinfo_cpuset; \
uint64_t nextrip; \
uint64_t tsc_offset
#define VMM_VM_MD_FIELDS \
cpuset_t startup_cpus; \
void *iommu; \
struct vioapic *vioapic; \
struct vatpic *vatpic; \
struct vatpit *vatpit; \
struct vpmtmr *vpmtmr; \
struct vrtc *vrtc; \
struct vhpet *vhpet
struct vm;
struct vm_exception;
struct vm_mem;
struct seg_desc;
struct vm_exit;
struct vm_run;
struct vhpet;
struct vioapic;
struct vlapic;
struct vmspace;
struct vm_eventinfo;
struct vm_object;
struct vm_guest_paging;
struct pmap;
enum snapshot_req;
#define DECLARE_VMMOPS_FUNC(ret_type, opname, args) \
typedef ret_type (*vmmops_##opname##_t) args; \
ret_type vmmops_##opname args
DECLARE_VMMOPS_FUNC(int, modinit, (int ipinum));
DECLARE_VMMOPS_FUNC(int, modcleanup, (void));
DECLARE_VMMOPS_FUNC(void, modresume, (void));
DECLARE_VMMOPS_FUNC(void, modsuspend, (void));
DECLARE_VMMOPS_FUNC(void *, init, (struct vm *vm, struct pmap *pmap));
DECLARE_VMMOPS_FUNC(int, run, (void *vcpui, register_t pc,
struct pmap *pmap, struct vm_eventinfo *info));
DECLARE_VMMOPS_FUNC(void, cleanup, (void *vmi));
DECLARE_VMMOPS_FUNC(void *, vcpu_init, (void *vmi, struct vcpu *vcpu,
int vcpu_id));
DECLARE_VMMOPS_FUNC(void, vcpu_cleanup, (void *vcpui));
DECLARE_VMMOPS_FUNC(int, getreg, (void *vcpui, int num, uint64_t *retval));
DECLARE_VMMOPS_FUNC(int, setreg, (void *vcpui, int num, uint64_t val));
DECLARE_VMMOPS_FUNC(int, getdesc, (void *vcpui, int num,
struct seg_desc *desc));
DECLARE_VMMOPS_FUNC(int, setdesc, (void *vcpui, int num,
struct seg_desc *desc));
DECLARE_VMMOPS_FUNC(int, getcap, (void *vcpui, int num, int *retval));
DECLARE_VMMOPS_FUNC(int, setcap, (void *vcpui, int num, int val));
DECLARE_VMMOPS_FUNC(struct vmspace *, vmspace_alloc,
(vm_offset_t min, vm_offset_t max));
DECLARE_VMMOPS_FUNC(void, vmspace_free, (struct vmspace *vmspace));
DECLARE_VMMOPS_FUNC(struct vlapic *, vlapic_init, (void *vcpui));
DECLARE_VMMOPS_FUNC(void, vlapic_cleanup, (struct vlapic *vlapic));
DECLARE_VMMOPS_FUNC(int, vcpu_snapshot, (void *vcpui,
struct vm_snapshot_meta *meta));
DECLARE_VMMOPS_FUNC(int, restore_tsc, (void *vcpui, uint64_t now));
struct vmm_ops {
vmmops_modinit_t modinit;
vmmops_modcleanup_t modcleanup;
vmmops_modresume_t modsuspend;
vmmops_modresume_t modresume;
vmmops_init_t init;
vmmops_run_t run;
vmmops_cleanup_t cleanup;
vmmops_vcpu_init_t vcpu_init;
vmmops_vcpu_cleanup_t vcpu_cleanup;
vmmops_getreg_t getreg;
vmmops_setreg_t setreg;
vmmops_getdesc_t getdesc;
vmmops_setdesc_t setdesc;
vmmops_getcap_t getcap;
vmmops_setcap_t setcap;
vmmops_vmspace_alloc_t vmspace_alloc;
vmmops_vmspace_free_t vmspace_free;
vmmops_vlapic_init_t vlapic_init;
vmmops_vlapic_cleanup_t vlapic_cleanup;
vmmops_vcpu_snapshot_t vcpu_snapshot;
vmmops_restore_tsc_t restore_tsc;
};
extern const struct vmm_ops vmm_ops_intel;
extern const struct vmm_ops vmm_ops_amd;
int vm_map_mmio(struct vm *vm, vm_paddr_t gpa, size_t len, vm_paddr_t hpa);
int vm_unmap_mmio(struct vm *vm, vm_paddr_t gpa, size_t len);
int vm_assign_pptdev(struct vm *vm, int bus, int slot, int func);
int vm_unassign_pptdev(struct vm *vm, int bus, int slot, int func);
int vm_get_register(struct vcpu *vcpu, int reg, uint64_t *retval);
int vm_set_register(struct vcpu *vcpu, int reg, uint64_t val);
int vm_get_seg_desc(struct vcpu *vcpu, int reg,
struct seg_desc *ret_desc);
int vm_set_seg_desc(struct vcpu *vcpu, int reg,
struct seg_desc *desc);
int vm_run(struct vcpu *vcpu);
int vm_inject_nmi(struct vcpu *vcpu);
int vm_nmi_pending(struct vcpu *vcpu);
void vm_nmi_clear(struct vcpu *vcpu);
int vm_inject_extint(struct vcpu *vcpu);
int vm_extint_pending(struct vcpu *vcpu);
void vm_extint_clear(struct vcpu *vcpu);
struct vlapic *vm_lapic(struct vcpu *vcpu);
struct vioapic *vm_ioapic(struct vm *vm);
struct vhpet *vm_hpet(struct vm *vm);
int vm_get_capability(struct vcpu *vcpu, int type, int *val);
int vm_set_capability(struct vcpu *vcpu, int type, int val);
int vm_get_x2apic_state(struct vcpu *vcpu, enum x2apic_state *state);
int vm_set_x2apic_state(struct vcpu *vcpu, enum x2apic_state state);
int vm_apicid2vcpuid(struct vm *vm, int apicid);
int vm_restart_instruction(struct vcpu *vcpu);
struct vm_exit *vm_exitinfo(struct vcpu *vcpu);
cpuset_t *vm_exitinfo_cpuset(struct vcpu *vcpu);
void vm_exit_suspended(struct vcpu *vcpu, uint64_t rip);
void vm_exit_debug(struct vcpu *vcpu, uint64_t rip);
void vm_exit_rendezvous(struct vcpu *vcpu, uint64_t rip);
void vm_exit_astpending(struct vcpu *vcpu, uint64_t rip);
void vm_exit_reqidle(struct vcpu *vcpu, uint64_t rip);
int vm_snapshot_req(struct vm *vm, struct vm_snapshot_meta *meta);
int vm_restore_time(struct vm *vm);
#ifdef _SYS__CPUSET_H_
cpuset_t vm_start_cpus(struct vm *vm, const cpuset_t *tostart);
void vm_await_start(struct vm *vm, const cpuset_t *waiting);
#endif
bool vmm_is_pptdev(int bus, int slot, int func);
void *vm_iommu_domain(struct vm *vm);
void vcpu_notify_lapic(struct vcpu *vcpu);
struct vatpic *vm_atpic(struct vm *vm);
struct vatpit *vm_atpit(struct vm *vm);
struct vpmtmr *vm_pmtmr(struct vm *vm);
struct vrtc *vm_rtc(struct vm *vm);
int vm_inject_exception(struct vcpu *vcpu, int vector, int err_valid,
uint32_t errcode, int restart_instruction);
int vm_exit_intinfo(struct vcpu *vcpu, uint64_t intinfo);
int vm_entry_intinfo(struct vcpu *vcpu, uint64_t *info);
int vm_get_intinfo(struct vcpu *vcpu, uint64_t *info1, uint64_t *info2);
void vm_set_tsc_offset(struct vcpu *vcpu, uint64_t offset);
enum vm_reg_name vm_segment_name(int seg_encoding);
struct vm_copyinfo {
uint64_t gpa;
size_t len;
void *hva;
void *cookie;
};
int vm_copy_setup(struct vcpu *vcpu, struct vm_guest_paging *paging,
uint64_t gla, size_t len, int prot, struct vm_copyinfo *copyinfo,
int num_copyinfo, int *is_fault);
void vm_copy_teardown(struct vm_copyinfo *copyinfo, int num_copyinfo);
void vm_copyin(struct vm_copyinfo *copyinfo, void *kaddr, size_t len);
void vm_copyout(const void *kaddr, struct vm_copyinfo *copyinfo, size_t len);
int vcpu_trace_exceptions(struct vcpu *vcpu);
int vcpu_trap_wbinvd(struct vcpu *vcpu);
#endif
enum vm_cap_type {
VM_CAP_HALT_EXIT,
VM_CAP_MTRAP_EXIT,
VM_CAP_PAUSE_EXIT,
VM_CAP_UNRESTRICTED_GUEST,
VM_CAP_ENABLE_INVPCID,
VM_CAP_BPT_EXIT,
VM_CAP_RDPID,
VM_CAP_RDTSCP,
VM_CAP_IPI_EXIT,
VM_CAP_MASK_HWINTR,
VM_CAP_RFLAGS_TF,
VM_CAP_MAX
};
enum vm_intr_trigger {
EDGE_TRIGGER,
LEVEL_TRIGGER
};
struct seg_desc {
uint64_t base;
uint32_t limit;
uint32_t access;
};
#define SEG_DESC_TYPE(access) ((access) & 0x001f)
#define SEG_DESC_DPL(access) (((access) >> 5) & 0x3)
#define SEG_DESC_PRESENT(access) (((access) & 0x0080) ? 1 : 0)
#define SEG_DESC_DEF32(access) (((access) & 0x4000) ? 1 : 0)
#define SEG_DESC_GRANULARITY(access) (((access) & 0x8000) ? 1 : 0)
#define SEG_DESC_UNUSABLE(access) (((access) & 0x10000) ? 1 : 0)
enum vm_cpu_mode {
CPU_MODE_REAL,
CPU_MODE_PROTECTED,
CPU_MODE_COMPATIBILITY,
CPU_MODE_64BIT,
};
enum vm_paging_mode {
PAGING_MODE_FLAT,
PAGING_MODE_32,
PAGING_MODE_PAE,
PAGING_MODE_64,
PAGING_MODE_64_LA57,
};
struct vm_guest_paging {
uint64_t cr3;
int cpl;
enum vm_cpu_mode cpu_mode;
enum vm_paging_mode paging_mode;
};
struct vie_op {
uint8_t op_byte;
uint8_t op_type;
uint16_t op_flags;
};
_Static_assert(sizeof(struct vie_op) == 4, "ABI");
_Static_assert(_Alignof(struct vie_op) == 2, "ABI");
#define VIE_INST_SIZE 15
struct vie {
uint8_t inst[VIE_INST_SIZE];
uint8_t num_valid;
#define vie_startzero num_processed
uint8_t num_processed;
uint8_t addrsize:4, opsize:4;
uint8_t rex_w:1,
rex_r:1,
rex_x:1,
rex_b:1,
rex_present:1,
repz_present:1,
repnz_present:1,
opsize_override:1,
addrsize_override:1,
segment_override:1;
uint8_t mod:2,
reg:4,
rm:4;
uint8_t ss:2,
vex_present:1,
vex_l:1,
index:4,
base:4;
uint8_t disp_bytes;
uint8_t imm_bytes;
uint8_t scale;
uint8_t vex_reg:4,
vex_pp:2,
_sparebits:2;
uint8_t _sparebytes[2];
int base_register;
int index_register;
int segment_register;
int64_t displacement;
int64_t immediate;
uint8_t decoded;
uint8_t _sparebyte;
struct vie_op op;
};
_Static_assert(sizeof(struct vie) == 64, "ABI");
_Static_assert(__offsetof(struct vie, disp_bytes) == 22, "ABI");
_Static_assert(__offsetof(struct vie, scale) == 24, "ABI");
_Static_assert(__offsetof(struct vie, base_register) == 28, "ABI");
enum vm_exitcode {
VM_EXITCODE_INOUT,
VM_EXITCODE_VMX,
VM_EXITCODE_BOGUS,
VM_EXITCODE_RDMSR,
VM_EXITCODE_WRMSR,
VM_EXITCODE_HLT,
VM_EXITCODE_MTRAP,
VM_EXITCODE_PAUSE,
VM_EXITCODE_PAGING,
VM_EXITCODE_INST_EMUL,
VM_EXITCODE_SPINUP_AP,
VM_EXITCODE_DEPRECATED1,
VM_EXITCODE_RENDEZVOUS,
VM_EXITCODE_IOAPIC_EOI,
VM_EXITCODE_SUSPENDED,
VM_EXITCODE_INOUT_STR,
VM_EXITCODE_TASK_SWITCH,
VM_EXITCODE_MONITOR,
VM_EXITCODE_MWAIT,
VM_EXITCODE_SVM,
VM_EXITCODE_REQIDLE,
VM_EXITCODE_DEBUG,
VM_EXITCODE_VMINSN,
VM_EXITCODE_BPT,
VM_EXITCODE_IPI,
VM_EXITCODE_DB,
VM_EXITCODE_MAX
};
struct vm_inout {
uint16_t bytes:3;
uint16_t in:1;
uint16_t string:1;
uint16_t rep:1;
uint16_t port;
uint32_t eax;
};
struct vm_inout_str {
struct vm_inout inout;
struct vm_guest_paging paging;
uint64_t rflags;
uint64_t cr0;
uint64_t index;
uint64_t count;
int addrsize;
enum vm_reg_name seg_name;
struct seg_desc seg_desc;
int cs_d;
uint64_t cs_base;
};
enum task_switch_reason {
TSR_CALL,
TSR_IRET,
TSR_JMP,
TSR_IDT_GATE,
};
struct vm_task_switch {
uint16_t tsssel;
int ext;
uint32_t errcode;
int errcode_valid;
enum task_switch_reason reason;
struct vm_guest_paging paging;
};
struct vm_exit {
enum vm_exitcode exitcode;
int inst_length;
uint64_t rip;
union {
struct vm_inout inout;
struct vm_inout_str inout_str;
struct {
uint64_t gpa;
int fault_type;
} paging;
struct {
uint64_t gpa;
uint64_t gla;
uint64_t cs_base;
int cs_d;
struct vm_guest_paging paging;
struct vie vie;
} inst_emul;
struct {
int status;
uint32_t exit_reason;
uint64_t exit_qualification;
int inst_type;
int inst_error;
} vmx;
struct {
uint64_t exitcode;
uint64_t exitinfo1;
uint64_t exitinfo2;
} svm;
struct {
int inst_length;
} bpt;
struct {
int trace_trap;
int pushf_intercept;
int tf_shadow_val;
struct vm_guest_paging paging;
} dbg;
struct {
uint32_t code;
uint64_t wval;
} msr;
struct {
int vcpu;
uint64_t rip;
} spinup_ap;
struct {
uint64_t rflags;
uint64_t intr_status;
} hlt;
struct {
int vector;
} ioapic_eoi;
struct {
enum vm_suspend_how how;
} suspended;
struct {
uint32_t mode;
uint8_t vector;
} ipi;
struct vm_task_switch task_switch;
} u;
};
void vm_inject_fault(struct vcpu *vcpu, int vector, int errcode_valid,
int errcode);
static __inline void
vm_inject_ud(struct vcpu *vcpu)
{
vm_inject_fault(vcpu, IDT_UD, 0, 0);
}
static __inline void
vm_inject_gp(struct vcpu *vcpu)
{
vm_inject_fault(vcpu, IDT_GP, 1, 0);
}
static __inline void
vm_inject_ac(struct vcpu *vcpu, int errcode)
{
vm_inject_fault(vcpu, IDT_AC, 1, errcode);
}
static __inline void
vm_inject_ss(struct vcpu *vcpu, int errcode)
{
vm_inject_fault(vcpu, IDT_SS, 1, errcode);
}
void vm_inject_pf(struct vcpu *vcpu, int error_code, uint64_t cr2);
#endif