#ifndef _DEV_VMM_VM_H_
#define _DEV_VMM_VM_H_
#ifdef _KERNEL
#include <sys/_cpuset.h>
#include <machine/vmm.h>
#include <dev/vmm/vmm_param.h>
#include <dev/vmm/vmm_mem.h>
struct vcpu;
enum vcpu_state {
VCPU_IDLE,
VCPU_FROZEN,
VCPU_RUNNING,
VCPU_SLEEPING,
};
struct vcpu {
struct mtx mtx;
enum vcpu_state state;
int vcpuid;
int hostcpu;
int reqidle;
struct vm *vm;
void *cookie;
void *stats;
VMM_VCPU_MD_FIELDS;
};
#define vcpu_lock_init(v) mtx_init(&((v)->mtx), "vcpu lock", 0, MTX_SPIN)
#define vcpu_lock_destroy(v) mtx_destroy(&((v)->mtx))
#define vcpu_lock(v) mtx_lock_spin(&((v)->mtx))
#define vcpu_unlock(v) mtx_unlock_spin(&((v)->mtx))
#define vcpu_assert_locked(v) mtx_assert(&((v)->mtx), MA_OWNED)
extern int vmm_ipinum;
int vcpu_set_state(struct vcpu *vcpu, enum vcpu_state state, bool from_idle);
int vcpu_set_state_locked(struct vcpu *vcpu, enum vcpu_state newstate,
bool from_idle);
int vcpu_set_state_all(struct vm *vm, enum vcpu_state state);
enum vcpu_state vcpu_get_state(struct vcpu *vcpu, int *hostcpu);
void vcpu_notify_event(struct vcpu *vcpu);
void vcpu_notify_event_locked(struct vcpu *vcpu);
int vcpu_debugged(struct vcpu *vcpu);
static inline void *
vcpu_stats(struct vcpu *vcpu)
{
return (vcpu->stats);
}
static inline struct vm *
vcpu_vm(struct vcpu *vcpu)
{
return (vcpu->vm);
}
static inline int
vcpu_vcpuid(struct vcpu *vcpu)
{
return (vcpu->vcpuid);
}
static int __inline
vcpu_is_running(struct vcpu *vcpu, int *hostcpu)
{
return (vcpu_get_state(vcpu, hostcpu) == VCPU_RUNNING);
}
#ifdef _SYS_PROC_H_
static int __inline
vcpu_should_yield(struct vcpu *vcpu)
{
struct thread *td;
td = curthread;
return (td->td_ast != 0 || td->td_owepreempt != 0);
}
#endif
typedef void (*vm_rendezvous_func_t)(struct vcpu *vcpu, void *arg);
int vm_handle_rendezvous(struct vcpu *vcpu);
int vm_smp_rendezvous(struct vcpu *vcpu, cpuset_t dest,
vm_rendezvous_func_t func, void *arg);
struct vm {
void *cookie;
struct vcpu **vcpu;
struct vm_mem mem;
char name[VM_MAX_NAMELEN + 1];
struct sx vcpus_init_lock;
bool dying;
int suspend;
volatile cpuset_t active_cpus;
volatile cpuset_t debug_cpus;
volatile cpuset_t suspended_cpus;
volatile cpuset_t halted_cpus;
cpuset_t rendezvous_req_cpus;
cpuset_t rendezvous_done_cpus;
void *rendezvous_arg;
vm_rendezvous_func_t rendezvous_func;
struct mtx rendezvous_mtx;
uint16_t sockets;
uint16_t cores;
uint16_t threads;
uint16_t maxcpus;
VMM_VM_MD_FIELDS;
};
int vm_create(const char *name, struct vm **retvm);
struct vcpu *vm_alloc_vcpu(struct vm *vm, int vcpuid);
void vm_destroy(struct vm *vm);
int vm_reinit(struct vm *vm);
void vm_reset(struct vm *vm);
void vm_lock_vcpus(struct vm *vm);
void vm_unlock_vcpus(struct vm *vm);
void vm_disable_vcpu_creation(struct vm *vm);
int vm_suspend(struct vm *vm, enum vm_suspend_how how);
int vm_activate_cpu(struct vcpu *vcpu);
int vm_suspend_cpu(struct vm *vm, struct vcpu *vcpu);
int vm_resume_cpu(struct vm *vm, struct vcpu *vcpu);
cpuset_t vm_active_cpus(struct vm *vm);
cpuset_t vm_debug_cpus(struct vm *vm);
cpuset_t vm_suspended_cpus(struct vm *vm);
uint16_t vm_get_maxcpus(struct vm *vm);
void vm_get_topology(struct vm *vm, uint16_t *sockets, uint16_t *cores,
uint16_t *threads, uint16_t *maxcpus);
int vm_set_topology(struct vm *vm, uint16_t sockets, uint16_t cores,
uint16_t threads, uint16_t maxcpus);
static inline const char *
vm_name(struct vm *vm)
{
return (vm->name);
}
static inline struct vm_mem *
vm_mem(struct vm *vm)
{
return (&vm->mem);
}
static inline struct vcpu *
vm_vcpu(struct vm *vm, int vcpuid)
{
return (vm->vcpu[vcpuid]);
}
struct vm_eventinfo {
cpuset_t *rptr;
int *sptr;
int *iptr;
};
static inline int
vcpu_rendezvous_pending(struct vcpu *vcpu, struct vm_eventinfo *info)
{
return (CPU_ISSET(vcpu_vcpuid(vcpu), info->rptr));
}
static inline int
vcpu_suspended(struct vm_eventinfo *info)
{
return (*info->sptr);
}
static inline int
vcpu_reqidle(struct vm_eventinfo *info)
{
return (*info->iptr);
}
#endif
#endif