#include <sys/param.h>
#include <sys/types.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
#include <sys/resource.h>
#include <dev/vmm/vmm.h>
#include <errno.h>
#include <event.h>
#include <fcntl.h>
#include <imsg.h>
#include <poll.h>
#include <pthread.h>
#include <pthread_np.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <util.h>
#include "atomicio.h"
#include "pci.h"
#include "virtio.h"
#include "vmd.h"
#define MMIO_NOTYET 0
static int run_vm(struct vmd_vm *, struct vcpu_reg_state *);
static void vm_dispatch_vmm(int, short, void *);
static void *event_thread(void *);
static void *vcpu_run_loop(void *);
static int vmm_create_vm(struct vmd_vm *);
static void pause_vm(struct vmd_vm *);
static void unpause_vm(struct vmd_vm *);
static int start_vm(struct vmd_vm *, int);
int con_fd;
struct vmd_vm *current_vm;
extern struct vmd *env;
pthread_mutex_t threadmutex;
pthread_cond_t threadcond;
pthread_cond_t vcpu_run_cond[VMM_MAX_VCPUS_PER_VM];
pthread_mutex_t vcpu_run_mtx[VMM_MAX_VCPUS_PER_VM];
pthread_barrier_t vm_pause_barrier;
pthread_cond_t vcpu_unpause_cond[VMM_MAX_VCPUS_PER_VM];
pthread_mutex_t vcpu_unpause_mtx[VMM_MAX_VCPUS_PER_VM];
pthread_mutex_t vm_mtx;
uint8_t vcpu_hlt[VMM_MAX_VCPUS_PER_VM];
uint8_t vcpu_done[VMM_MAX_VCPUS_PER_VM];
void
vm_main(int fd, int fd_vmm)
{
struct vmd_vm vm;
size_t sz = 0;
int ret = 0;
env->vmd_fd = fd_vmm;
if (unveil(env->argv0, "x") == -1)
fatal("unveil %s", env->argv0);
if (unveil(NULL, NULL) == -1)
fatal("unveil lock");
if (pledge("stdio vmm proc exec", NULL) == -1)
fatal("pledge");
memset(&vm, 0, sizeof(vm));
sz = atomicio(read, fd, &vm, sizeof(vm));
if (sz != sizeof(vm)) {
log_warnx("failed to receive start message");
_exit(EIO);
}
setproctitle("%s", vm.vm_params.vmc_name);
log_procinit("vm/%s", vm.vm_params.vmc_name);
sz = atomicio(read, fd, &env->vmd_cfg.cfg_localprefix,
sizeof(env->vmd_cfg.cfg_localprefix));
if (sz != sizeof(env->vmd_cfg.cfg_localprefix)) {
log_warnx("failed to receive local prefix");
_exit(EIO);
}
if (vm.vm_kernel == -1) {
log_warnx("failed to receive boot fd");
_exit(EINVAL);
}
if (vm.vm_params.vmc_sev && env->vmd_psp_fd < 0) {
log_warnx("%s not available", PSP_NODE);
_exit(EINVAL);
}
ret = start_vm(&vm, fd);
_exit(ret);
}
int
start_vm(struct vmd_vm *vm, int fd)
{
struct vcpu_reg_state vrs;
int ret, nicfds[VM_MAX_NICS_PER_VM];
size_t i;
create_memory_map(vm);
ret = vmm_create_vm(vm);
if (ret) {
struct rlimit lim;
char buf[FMT_SCALED_STRSIZE];
if (ret == ENOMEM && getrlimit(RLIMIT_DATA, &lim) == 0) {
if (fmt_scaled(lim.rlim_cur, buf) == 0)
fatalx("could not allocate guest memory (data "
"limit is %s)", buf);
} else {
errno = ret;
log_warn("could not create vm");
}
vm->vm_vmmid = 0;
atomicio(vwrite, fd, &vm->vm_vmmid, sizeof(vm->vm_vmmid));
return (ret);
}
ret = sev_init(vm);
if (ret) {
log_warnx("could not initialize SEV");
return (ret);
}
current_vm = vm;
con_fd = vm->vm_tty;
if (fcntl(con_fd, F_SETFL, O_NONBLOCK) == -1) {
log_warn("failed to set nonblocking mode on console");
return (1);
}
if (atomicio(vwrite, fd, &vm->vm_vmmid, sizeof(vm->vm_vmmid)) !=
sizeof(vm->vm_vmmid)) {
log_warn("failed to send created vm id to vmm process");
return (1);
}
if (load_firmware(vm, &vrs))
fatalx("failed to load kernel or firmware image");
if (vm->vm_kernel != -1)
close_fd(vm->vm_kernel);
ret = pthread_mutex_init(&threadmutex, NULL);
if (ret) {
log_warn("%s: could not initialize thread state mutex",
__func__);
return (ret);
}
ret = pthread_cond_init(&threadcond, NULL);
if (ret) {
log_warn("%s: could not initialize thread state "
"condition variable", __func__);
return (ret);
}
ret = pthread_mutex_init(&vm_mtx, NULL);
if (ret) {
log_warn("%s: could not initialize vm state mutex",
__func__);
return (ret);
}
mutex_lock(&threadmutex);
event_init();
if (vmm_pipe(vm, fd, vm_dispatch_vmm) == -1)
fatal("setup vm pipe");
for (i = 0; i < VMM_MAX_NICS_PER_VM; i++)
nicfds[i] = vm->vm_ifs[i].vif_fd;
ret = init_emulated_hw(vm, vm->vm_cdrom, vm->vm_disks, nicfds);
if (ret) {
virtio_shutdown(vm);
return (ret);
}
if (pledge("stdio vmm", NULL) == -1)
fatal("pledge");
ret = run_vm(vm, &vrs);
if (sev_shutdown(vm))
log_warnx("%s: could not shutdown SEV", __func__);
virtio_shutdown(vm);
return (ret);
}
void
vm_dispatch_vmm(int fd, short event, void *arg)
{
struct vmd_vm *vm = arg;
struct vmop_result vmr;
struct vmop_addr_result var;
struct imsgev *iev = &vm->vm_iev;
struct imsgbuf *ibuf = &iev->ibuf;
struct imsg imsg;
uint32_t id, type;
pid_t pid;
ssize_t n;
int verbose;
if (event & EV_READ) {
if ((n = imsgbuf_read(ibuf)) == -1)
fatal("%s: imsgbuf_read", __func__);
if (n == 0)
_exit(0);
}
if (event & EV_WRITE) {
if (imsgbuf_write(ibuf) == -1) {
if (errno == EPIPE)
_exit(0);
fatal("%s: imsgbuf_write fd %d", __func__, ibuf->fd);
}
}
for (;;) {
if ((n = imsg_get(ibuf, &imsg)) == -1)
fatal("%s: imsg_get", __func__);
if (n == 0)
break;
type = imsg_get_type(&imsg);
id = imsg_get_id(&imsg);
pid = imsg_get_pid(&imsg);
#if DEBUG > 1
log_debug("%s: got imsg %d from %s", __func__, type,
vm->vm_params.vmc_params.vcp_name);
#endif
switch (type) {
case IMSG_CTL_VERBOSE:
verbose = imsg_int_read(&imsg);
log_setverbose(verbose);
virtio_broadcast_imsg(vm, IMSG_CTL_VERBOSE, &verbose,
sizeof(verbose));
break;
case IMSG_VMDOP_VM_SHUTDOWN:
if (vmmci_ctl(&vmmci, VMMCI_SHUTDOWN) == -1)
_exit(0);
break;
case IMSG_VMDOP_VM_REBOOT:
if (vmmci_ctl(&vmmci, VMMCI_REBOOT) == -1)
_exit(0);
break;
case IMSG_VMDOP_PAUSE_VM:
vmr.vmr_result = 0;
vmr.vmr_id = vm->vm_vmid;
pause_vm(vm);
imsg_compose_event(&vm->vm_iev,
IMSG_VMDOP_PAUSE_VM_RESPONSE, id, pid, -1, &vmr,
sizeof(vmr));
break;
case IMSG_VMDOP_UNPAUSE_VM:
vmr.vmr_result = 0;
vmr.vmr_id = vm->vm_vmid;
unpause_vm(vm);
imsg_compose_event(&vm->vm_iev,
IMSG_VMDOP_UNPAUSE_VM_RESPONSE, id, pid, -1, &vmr,
sizeof(vmr));
break;
case IMSG_VMDOP_PRIV_GET_ADDR_RESPONSE:
vmop_addr_result_read(&imsg, &var);
log_debug("%s: received tap addr %s for nic %d",
vm->vm_params.vmc_name,
ether_ntoa((void *)var.var_addr), var.var_nic_idx);
vionet_set_hostmac(vm, var.var_nic_idx, var.var_addr);
break;
default:
fatalx("%s: got invalid imsg %d from %s", __func__,
type, vm->vm_params.vmc_name);
}
imsg_free(&imsg);
}
imsg_event_add(iev);
}
__dead void
vm_shutdown(unsigned int cmd)
{
switch (cmd) {
case VMMCI_NONE:
case VMMCI_SHUTDOWN:
(void)imsg_compose_event(¤t_vm->vm_iev,
IMSG_VMDOP_VM_SHUTDOWN, 0, 0, -1, NULL, 0);
break;
case VMMCI_REBOOT:
(void)imsg_compose_event(¤t_vm->vm_iev,
IMSG_VMDOP_VM_REBOOT, 0, 0, -1, NULL, 0);
break;
default:
fatalx("invalid vm ctl command: %d", cmd);
}
imsgbuf_flush(¤t_vm->vm_iev.ibuf);
if (sev_shutdown(current_vm))
log_warnx("%s: could not shutdown SEV", __func__);
_exit(0);
}
static void
pause_vm(struct vmd_vm *vm)
{
unsigned int n;
int ret;
mutex_lock(&vm_mtx);
if (vm->vm_state & VM_STATE_PAUSED) {
mutex_unlock(&vm_mtx);
return;
}
current_vm->vm_state |= VM_STATE_PAUSED;
mutex_unlock(&vm_mtx);
for (n = 0; n < vm->vm_params.vmc_ncpus; n++) {
ret = pthread_cond_broadcast(&vcpu_run_cond[n]);
if (ret) {
log_warnx("%s: can't broadcast vcpu run cond (%d)",
__func__, (int)ret);
return;
}
}
ret = pthread_barrier_wait(&vm_pause_barrier);
if (ret != 0 && ret != PTHREAD_BARRIER_SERIAL_THREAD) {
log_warnx("%s: could not wait on pause barrier (%d)",
__func__, (int)ret);
return;
}
pause_vm_md(vm);
}
static void
unpause_vm(struct vmd_vm *vm)
{
unsigned int n;
int ret;
mutex_lock(&vm_mtx);
if (!(vm->vm_state & VM_STATE_PAUSED)) {
mutex_unlock(&vm_mtx);
return;
}
current_vm->vm_state &= ~VM_STATE_PAUSED;
mutex_unlock(&vm_mtx);
for (n = 0; n < vm->vm_params.vmc_ncpus; n++) {
ret = pthread_cond_broadcast(&vcpu_unpause_cond[n]);
if (ret) {
log_warnx("%s: can't broadcast vcpu unpause cond (%d)",
__func__, (int)ret);
return;
}
}
unpause_vm_md(vm);
}
int
vcpu_reset(uint32_t vmid, uint32_t vcpu_id, struct vcpu_reg_state *vrs)
{
struct vm_resetcpu_params vrp;
memset(&vrp, 0, sizeof(vrp));
vrp.vrp_vm_id = vmid;
vrp.vrp_vcpu_id = vcpu_id;
memcpy(&vrp.vrp_init_state, vrs, sizeof(struct vcpu_reg_state));
log_debug("%s: resetting vcpu %d for vm %d", __func__, vcpu_id, vmid);
if (ioctl(env->vmd_fd, VMM_IOC_RESETCPU, &vrp) == -1)
return (errno);
return (0);
}
static int
vmm_create_vm(struct vmd_vm *vm)
{
struct vm_create_params vcp;
struct vmop_create_params *vmc = &vm->vm_params;
size_t i;
if (vmc->vmc_ncpus > VMM_MAX_VCPUS_PER_VM)
return (EINVAL);
if (vmc->vmc_nmemranges == 0 ||
vmc->vmc_nmemranges > VMM_MAX_MEM_RANGES)
return (EINVAL);
if (vmc->vmc_ndisks > VM_MAX_DISKS_PER_VM)
return (EINVAL);
if (vmc->vmc_nnics > VM_MAX_NICS_PER_VM)
return (EINVAL);
memset(&vcp, 0, sizeof(vcp));
vcp.vcp_nmemranges = vmc->vmc_nmemranges;
vcp.vcp_ncpus = vmc->vmc_ncpus;
memcpy(vcp.vcp_memranges, vmc->vmc_memranges,
sizeof(vcp.vcp_memranges));
memcpy(vcp.vcp_name, vmc->vmc_name, sizeof(vcp.vcp_name));
vcp.vcp_sev = vmc->vmc_sev;
vcp.vcp_seves = vmc->vmc_seves;
if (ioctl(env->vmd_fd, VMM_IOC_CREATE, &vcp) == -1)
return (errno);
vm->vm_vmmid = vcp.vcp_id;
for (i = 0; i < vcp.vcp_ncpus; i++)
vm->vm_sev_asid[i] = vcp.vcp_asid[i];
for (i = 0; i < vmc->vmc_nmemranges; i++)
vmc->vmc_memranges[i].vmr_va = vcp.vcp_memranges[i].vmr_va;
vm->vm_poscbit = vcp.vcp_poscbit;
return (0);
}
static int
run_vm(struct vmd_vm *vm, struct vcpu_reg_state *vrs)
{
struct vmop_create_params *vmc;
uint8_t evdone = 0;
size_t i;
int ret;
pthread_t *tid, evtid;
char tname[MAXCOMLEN + 1];
struct vm_run_params **vrp;
void *exit_status;
vmc = &vm->vm_params;
if (vmc->vmc_nmemranges == 0 ||
vmc->vmc_nmemranges > VMM_MAX_MEM_RANGES)
return (EINVAL);
tid = calloc(vmc->vmc_ncpus, sizeof(pthread_t));
if (tid == NULL) {
log_warn("failed to allocate pthread structures");
return (ENOMEM);
}
vrp = calloc(vmc->vmc_ncpus, sizeof(struct vm_run_params *));
if (vrp == NULL) {
log_warn("failed to allocate vm run params array");
return (ENOMEM);
}
ret = pthread_barrier_init(&vm_pause_barrier, NULL, vmc->vmc_ncpus + 1);
if (ret) {
log_warnx("cannot initialize pause barrier (%d)", ret);
return (ret);
}
log_debug("%s: starting %zu vcpu thread(s) for vm %s", __func__,
vmc->vmc_ncpus, vmc->vmc_name);
for (i = 0 ; i < vmc->vmc_ncpus; i++) {
vrp[i] = malloc(sizeof(struct vm_run_params));
if (vrp[i] == NULL) {
log_warn("failed to allocate vm run parameters");
return (ENOMEM);
}
vrp[i]->vrp_exit = malloc(sizeof(struct vm_exit));
if (vrp[i]->vrp_exit == NULL) {
log_warn("failed to allocate vm exit area");
return (ENOMEM);
}
vrp[i]->vrp_vm_id = vm->vm_vmmid;
vrp[i]->vrp_vcpu_id = i;
if (vcpu_reset(vm->vm_vmmid, i, vrs)) {
log_warnx("cannot reset vcpu %zu", i);
return (EIO);
}
if (sev_activate(vm, i)) {
log_warnx("SEV activatation failed for vcpu %zu", i);
return (EIO);
}
if (sev_encrypt_memory(vm)) {
log_warnx("memory encryption failed for vcpu %zu", i);
return (EIO);
}
if (sev_encrypt_state(vm, i)) {
log_warnx("state encryption failed for vcpu %zu", i);
return (EIO);
}
if (sev_launch_finalize(vm)) {
log_warnx("encryption failed for vcpu %zu", i);
return (EIO);
}
ret = pthread_cond_init(&vcpu_run_cond[i], NULL);
if (ret) {
log_warnx("cannot initialize cond var (%d)", ret);
return (ret);
}
ret = pthread_mutex_init(&vcpu_run_mtx[i], NULL);
if (ret) {
log_warnx("cannot initialize mtx (%d)", ret);
return (ret);
}
ret = pthread_cond_init(&vcpu_unpause_cond[i], NULL);
if (ret) {
log_warnx("cannot initialize unpause var (%d)", ret);
return (ret);
}
ret = pthread_mutex_init(&vcpu_unpause_mtx[i], NULL);
if (ret) {
log_warnx("cannot initialize unpause mtx (%d)", ret);
return (ret);
}
vcpu_hlt[i] = 0;
ret = pthread_create(&tid[i], NULL, vcpu_run_loop, vrp[i]);
if (ret) {
ret = errno;
log_warn("%s: could not create vcpu thread %zu",
__func__, i);
return (ret);
}
snprintf(tname, sizeof(tname), "vcpu-%zu", i);
pthread_set_name_np(tid[i], tname);
}
log_debug("%s: waiting on events for VM %s", __func__, vmc->vmc_name);
ret = pthread_create(&evtid, NULL, event_thread, &evdone);
if (ret) {
errno = ret;
log_warn("%s: could not create event thread", __func__);
return (ret);
}
pthread_set_name_np(evtid, "event");
for (;;) {
ret = pthread_cond_wait(&threadcond, &threadmutex);
if (ret) {
log_warn("%s: waiting on thread state condition "
"variable failed", __func__);
return (ret);
}
mutex_lock(&vm_mtx);
for (i = 0; i < vmc->vmc_ncpus; i++) {
if (vcpu_done[i] == 0)
continue;
if (pthread_join(tid[i], &exit_status)) {
log_warn("failed to join thread %zd", i);
mutex_unlock(&vm_mtx);
return (EIO);
}
ret = (intptr_t)exit_status;
}
mutex_unlock(&vm_mtx);
if (evdone) {
if (pthread_join(evtid, &exit_status)) {
log_warn("failed to join event thread");
return (EIO);
}
log_warnx("event thread exited unexpectedly");
return (EIO);
}
mutex_lock(&vm_mtx);
for (i = 0; i < vmc->vmc_ncpus; i++) {
if (vcpu_done[i] == 0)
break;
}
mutex_unlock(&vm_mtx);
if (i == vmc->vmc_ncpus)
break;
}
if (pthread_barrier_destroy(&vm_pause_barrier))
log_warnx("could not destroy pause barrier");
return (ret);
}
static void *
event_thread(void *arg)
{
uint8_t *donep = arg;
intptr_t ret;
ret = event_dispatch();
*donep = 1;
mutex_lock(&threadmutex);
pthread_cond_signal(&threadcond);
mutex_unlock(&threadmutex);
return (void *)ret;
}
static void *
vcpu_run_loop(void *arg)
{
struct vm_run_params *vrp = (struct vm_run_params *)arg;
intptr_t ret = 0;
uint32_t n = vrp->vrp_vcpu_id;
int paused = 0, halted = 0;
for (;;) {
ret = pthread_mutex_lock(&vcpu_run_mtx[n]);
if (ret) {
log_warnx("%s: can't lock vcpu run mtx (%d)",
__func__, (int)ret);
return ((void *)ret);
}
mutex_lock(&vm_mtx);
paused = (current_vm->vm_state & VM_STATE_PAUSED) != 0;
halted = vcpu_hlt[n];
mutex_unlock(&vm_mtx);
if (paused) {
ret = pthread_barrier_wait(&vm_pause_barrier);
if (ret != 0 && ret != PTHREAD_BARRIER_SERIAL_THREAD) {
log_warnx("%s: could not wait on pause barrier (%d)",
__func__, (int)ret);
return ((void *)ret);
}
ret = pthread_mutex_lock(&vcpu_unpause_mtx[n]);
if (ret) {
log_warnx("%s: can't lock vcpu unpause mtx (%d)",
__func__, (int)ret);
return ((void *)ret);
}
mutex_unlock(&vcpu_run_mtx[n]);
ret = pthread_cond_wait(&vcpu_unpause_cond[n],
&vcpu_unpause_mtx[n]);
if (ret) {
log_warnx(
"%s: can't wait on unpause cond (%d)",
__func__, (int)ret);
break;
}
mutex_lock(&vcpu_run_mtx[n]);
ret = pthread_mutex_unlock(&vcpu_unpause_mtx[n]);
if (ret) {
log_warnx("%s: can't unlock unpause mtx (%d)",
__func__, (int)ret);
break;
}
}
if (halted) {
ret = pthread_cond_wait(&vcpu_run_cond[n],
&vcpu_run_mtx[n]);
if (ret) {
log_warnx(
"%s: can't wait on cond (%d)",
__func__, (int)ret);
(void)pthread_mutex_unlock(
&vcpu_run_mtx[n]);
break;
}
}
ret = pthread_mutex_unlock(&vcpu_run_mtx[n]);
if (ret) {
log_warnx("%s: can't unlock mutex on cond (%d)",
__func__, (int)ret);
break;
}
if (vrp->vrp_irqready && intr_pending(current_vm)) {
vrp->vrp_inject.vie_vector = intr_ack(current_vm);
vrp->vrp_inject.vie_type = VCPU_INJECT_INTR;
} else
vrp->vrp_inject.vie_type = VCPU_INJECT_NONE;
vrp->vrp_intr_pending = intr_pending(current_vm);
if (ioctl(env->vmd_fd, VMM_IOC_RUN, vrp) == -1) {
ret = errno;
log_warn("%s: vm %d / vcpu %d run ioctl failed",
__func__, current_vm->vm_vmid, n);
break;
}
if (vrp->vrp_exit_reason == VM_EXIT_TERMINATED) {
ret = (intptr_t)NULL;
break;
}
if (vrp->vrp_exit_reason != VM_EXIT_NONE) {
ret = vcpu_exit(vrp);
if (ret)
break;
}
}
mutex_lock(&vm_mtx);
vcpu_done[n] = 1;
mutex_unlock(&vm_mtx);
mutex_lock(&threadmutex);
pthread_cond_signal(&threadcond);
mutex_unlock(&threadmutex);
return ((void *)ret);
}
int
vcpu_intr(uint32_t vmm_id, uint32_t vcpu_id, uint8_t intr)
{
struct vm_intr_params vip;
memset(&vip, 0, sizeof(vip));
vip.vip_vm_id = vmm_id;
vip.vip_vcpu_id = vcpu_id;
vip.vip_intr = intr;
if (ioctl(env->vmd_fd, VMM_IOC_INTR, &vip) == -1)
return (errno);
return (0);
}
int
fd_hasdata(int fd)
{
struct pollfd pfd[1];
int nready, hasdata = 0;
pfd[0].fd = fd;
pfd[0].events = POLLIN;
nready = poll(pfd, 1, 0);
if (nready == -1)
log_warn("checking file descriptor for data failed");
else if (nready == 1 && pfd[0].revents & POLLIN)
hasdata = 1;
return (hasdata);
}
void
mutex_lock(pthread_mutex_t *m)
{
int ret;
ret = pthread_mutex_lock(m);
if (ret) {
errno = ret;
fatal("could not acquire mutex");
}
}
void
mutex_unlock(pthread_mutex_t *m)
{
int ret;
ret = pthread_mutex_unlock(m);
if (ret) {
errno = ret;
fatal("could not release mutex");
}
}
void
vm_pipe_init(struct vm_dev_pipe *p, void (*cb)(int, short, void *))
{
vm_pipe_init2(p, cb, NULL);
}
void
vm_pipe_init2(struct vm_dev_pipe *p, void (*cb)(int, short, void *), void *arg)
{
int ret;
int fds[2];
memset(p, 0, sizeof(struct vm_dev_pipe));
ret = pipe2(fds, O_CLOEXEC);
if (ret)
fatal("failed to create vm_dev_pipe pipe");
p->read = fds[0];
p->write = fds[1];
event_set(&p->read_ev, p->read, EV_READ | EV_PERSIST, cb, arg);
}
void
vm_pipe_send(struct vm_dev_pipe *p, enum pipe_msg_type msg)
{
size_t n;
n = write(p->write, &msg, sizeof(msg));
if (n != sizeof(msg))
fatal("failed to write to device pipe");
}
enum pipe_msg_type
vm_pipe_recv(struct vm_dev_pipe *p)
{
size_t n;
enum pipe_msg_type msg;
n = read(p->read, &msg, sizeof(msg));
if (n != sizeof(msg))
fatal("failed to read from device pipe");
return msg;
}
int
remap_guest_mem(struct vmd_vm *vm, int vmm_fd)
{
size_t i;
struct vm_sharemem_params vsp;
if (vm == NULL)
return (EINVAL);
memset(&vsp, 0, sizeof(vsp));
vsp.vsp_nmemranges = vm->vm_params.vmc_nmemranges;
vsp.vsp_vm_id = vm->vm_vmmid;
memcpy(&vsp.vsp_memranges, &vm->vm_params.vmc_memranges,
sizeof(vsp.vsp_memranges));
if (ioctl(vmm_fd, VMM_IOC_SHAREMEM, &vsp) == -1)
return (errno);
for (i = 0; i < vsp.vsp_nmemranges; i++)
vm->vm_params.vmc_memranges[i].vmr_va = vsp.vsp_va[i];
return (0);
}
void
vcpu_halt(uint32_t vcpu_id)
{
mutex_lock(&vm_mtx);
vcpu_hlt[vcpu_id] = 1;
mutex_unlock(&vm_mtx);
}
void
vcpu_unhalt(uint32_t vcpu_id)
{
mutex_lock(&vm_mtx);
vcpu_hlt[vcpu_id] = 0;
mutex_unlock(&vm_mtx);
}
void
vcpu_signal_run(uint32_t vcpu_id)
{
int ret;
mutex_lock(&vcpu_run_mtx[vcpu_id]);
ret = pthread_cond_signal(&vcpu_run_cond[vcpu_id]);
if (ret)
fatalx("%s: can't signal (%d)", __func__, ret);
mutex_unlock(&vcpu_run_mtx[vcpu_id]);
}