Symbol: vcpu
usr/src/cmd/bhyve/amd64/bhyverun_machdep.c
299
bhyve_init_vcpu(struct vcpu *vcpu)
usr/src/cmd/bhyve/amd64/bhyverun_machdep.c
305
err = vm_get_capability(vcpu, VM_CAP_HALT_EXIT, &tmp);
usr/src/cmd/bhyve/amd64/bhyverun_machdep.c
310
vm_set_capability(vcpu, VM_CAP_HALT_EXIT, 1);
usr/src/cmd/bhyve/amd64/bhyverun_machdep.c
319
err = vm_set_capability(vcpu, VM_CAP_HALT_EXIT, tmp);
usr/src/cmd/bhyve/amd64/bhyverun_machdep.c
330
err = vm_get_capability(vcpu, VM_CAP_PAUSE_EXIT, &tmp);
usr/src/cmd/bhyve/amd64/bhyverun_machdep.c
335
vm_set_capability(vcpu, VM_CAP_PAUSE_EXIT, 1);
usr/src/cmd/bhyve/amd64/bhyverun_machdep.c
339
err = vm_set_x2apic_state(vcpu, X2APIC_ENABLED);
usr/src/cmd/bhyve/amd64/bhyverun_machdep.c
341
err = vm_set_x2apic_state(vcpu, X2APIC_DISABLED);
usr/src/cmd/bhyve/amd64/bhyverun_machdep.c
349
vm_set_capability(vcpu, VM_CAP_ENABLE_INVPCID, 1);
usr/src/cmd/bhyve/amd64/bhyverun_machdep.c
351
err = vm_set_capability(vcpu, VM_CAP_IPI_EXIT, 1);
usr/src/cmd/bhyve/amd64/bhyverun_machdep.c
357
bhyve_start_vcpu(struct vcpu *vcpu, bool bsp, bool suspend)
usr/src/cmd/bhyve/amd64/bhyverun_machdep.c
367
spinup_ap(vcpu, 0);
usr/src/cmd/bhyve/amd64/bhyverun_machdep.c
370
bhyve_init_vcpu(vcpu);
usr/src/cmd/bhyve/amd64/bhyverun_machdep.c
378
error = vm_set_capability(vcpu, VM_CAP_UNRESTRICTED_GUEST, 1);
usr/src/cmd/bhyve/amd64/bhyverun_machdep.c
390
error = vm_set_run_state(vcpu, bsp ? VRS_RUN : VRS_HALT, 0);
usr/src/cmd/bhyve/amd64/bhyverun_machdep.c
394
fbsdrun_addcpu(vcpu_id(vcpu), suspend);
usr/src/cmd/bhyve/amd64/bhyverun_machdep.c
398
bhyve_init_platform(struct vmctx *ctx, struct vcpu *bsp __unused)
usr/src/cmd/bhyve/amd64/bhyverun_machdep.c
433
bhyve_init_platform_late(struct vmctx *ctx, struct vcpu *bsp __unused)
usr/src/cmd/bhyve/amd64/inout.c
116
emulate_inout(struct vmctx *ctx, struct vcpu *vcpu, struct vm_inout *inout)
usr/src/cmd/bhyve/amd64/inout.h
46
struct vcpu;
usr/src/cmd/bhyve/amd64/inout.h
90
int emulate_inout(struct vmctx *, struct vcpu *vcpu, struct vm_exit *vmexit);
usr/src/cmd/bhyve/amd64/inout.h
92
int emulate_inout(struct vmctx *, struct vcpu *vcpu, struct vm_inout *inout);
usr/src/cmd/bhyve/amd64/kernemu_dev.c
47
apic_handler(struct vcpu *vcpu, int dir, uint64_t addr, int size,
usr/src/cmd/bhyve/amd64/kernemu_dev.c
50
if (vm_readwrite_kernemu_device(vcpu, addr, (dir == MEM_F_WRITE),
usr/src/cmd/bhyve/amd64/spinup_ap.c
57
spinup_ap_realmode(struct vcpu *newcpu, uint64_t rip)
usr/src/cmd/bhyve/amd64/spinup_ap.c
89
spinup_ap(struct vcpu *newcpu, uint64_t rip)
usr/src/cmd/bhyve/amd64/spinup_ap.h
32
void spinup_ap(struct vcpu *newcpu, uint64_t rip);
usr/src/cmd/bhyve/amd64/task_switch.c
1004
tss32_save(vcpu, task_switch, eip, &oldtss, ot_iov);
usr/src/cmd/bhyve/amd64/task_switch.c
1012
error = desc_table_write(vcpu, &sup_paging, nt_sel,
usr/src/cmd/bhyve/amd64/task_switch.c
1018
SETREG(vcpu, VM_REG_GUEST_TR, nt_sel);
usr/src/cmd/bhyve/amd64/task_switch.c
1022
update_seg_desc(vcpu, VM_REG_GUEST_TR, &nt);
usr/src/cmd/bhyve/amd64/task_switch.c
1025
cr0 = GETREG(vcpu, VM_REG_GUEST_CR0);
usr/src/cmd/bhyve/amd64/task_switch.c
1026
SETREG(vcpu, VM_REG_GUEST_CR0, cr0 | CR0_TS);
usr/src/cmd/bhyve/amd64/task_switch.c
1033
error = vm_set_register(vcpu, VM_REG_GUEST_RIP, newtss.tss_eip);
usr/src/cmd/bhyve/amd64/task_switch.c
1037
error = tss32_restore(ctx, vcpu, task_switch, ot_sel, &newtss, nt_iov,
usr/src/cmd/bhyve/amd64/task_switch.c
1049
error = push_errcode(vcpu, &task_switch->paging, nt_type,
usr/src/cmd/bhyve/amd64/task_switch.c
1085
error = vm_set_intinfo(vcpu, 0);
usr/src/cmd/bhyve/amd64/task_switch.c
113
GETREG(struct vcpu *vcpu, int reg)
usr/src/cmd/bhyve/amd64/task_switch.c
118
error = vm_get_register(vcpu, reg, &val);
usr/src/cmd/bhyve/amd64/task_switch.c
124
SETREG(struct vcpu *vcpu, int reg, uint64_t val)
usr/src/cmd/bhyve/amd64/task_switch.c
128
error = vm_set_register(vcpu, reg, val);
usr/src/cmd/bhyve/amd64/task_switch.c
164
sel_exception(struct vcpu *vcpu, int vector, uint16_t sel, int ext)
usr/src/cmd/bhyve/amd64/task_switch.c
178
vm_inject_fault(vcpu, vector, 1, sel);
usr/src/cmd/bhyve/amd64/task_switch.c
186
desc_table_limit_check(struct vcpu *vcpu, uint16_t sel)
usr/src/cmd/bhyve/amd64/task_switch.c
193
error = vm_get_desc(vcpu, reg, &base, &limit, &access);
usr/src/cmd/bhyve/amd64/task_switch.c
216
desc_table_rw(struct vcpu *vcpu, struct vm_guest_paging *paging,
usr/src/cmd/bhyve/amd64/task_switch.c
226
error = vm_get_desc(vcpu, reg, &base, &limit, &access);
usr/src/cmd/bhyve/amd64/task_switch.c
230
error = vm_copy_setup(vcpu, paging, base + SEL_START(sel),
usr/src/cmd/bhyve/amd64/task_switch.c
244
desc_table_read(struct vcpu *vcpu, struct vm_guest_paging *paging,
usr/src/cmd/bhyve/amd64/task_switch.c
247
return (desc_table_rw(vcpu, paging, sel, desc, true, faultptr));
usr/src/cmd/bhyve/amd64/task_switch.c
251
desc_table_write(struct vcpu *vcpu, struct vm_guest_paging *paging,
usr/src/cmd/bhyve/amd64/task_switch.c
254
return (desc_table_rw(vcpu, paging, sel, desc, false, faultptr));
usr/src/cmd/bhyve/amd64/task_switch.c
265
read_tss_descriptor(struct vcpu *vcpu, struct vm_task_switch *ts,
usr/src/cmd/bhyve/amd64/task_switch.c
275
if (desc_table_limit_check(vcpu, sel)) {
usr/src/cmd/bhyve/amd64/task_switch.c
277
sel_exception(vcpu, IDT_TS, sel, ts->ext);
usr/src/cmd/bhyve/amd64/task_switch.c
279
sel_exception(vcpu, IDT_GP, sel, ts->ext);
usr/src/cmd/bhyve/amd64/task_switch.c
285
error = desc_table_read(vcpu, &sup_paging, sel, desc, faultptr);
usr/src/cmd/bhyve/amd64/task_switch.c
321
validate_seg_desc(struct vcpu *vcpu, struct vm_task_switch *ts,
usr/src/cmd/bhyve/amd64/task_switch.c
353
sel = GETREG(vcpu, segment);
usr/src/cmd/bhyve/amd64/task_switch.c
357
sel_exception(vcpu, IDT_TS, sel, ts->ext);
usr/src/cmd/bhyve/amd64/task_switch.c
362
if (desc_table_limit_check(vcpu, sel)) {
usr/src/cmd/bhyve/amd64/task_switch.c
363
sel_exception(vcpu, IDT_TS, sel, ts->ext);
usr/src/cmd/bhyve/amd64/task_switch.c
371
sel_exception(vcpu, IDT_TS, sel, ts->ext);
usr/src/cmd/bhyve/amd64/task_switch.c
383
error = desc_table_read(vcpu, &sup_paging, sel, &usd, faultptr);
usr/src/cmd/bhyve/amd64/task_switch.c
392
sel_exception(vcpu, IDT_TS, sel, ts->ext);
usr/src/cmd/bhyve/amd64/task_switch.c
404
sel_exception(vcpu, idtvec, sel, ts->ext);
usr/src/cmd/bhyve/amd64/task_switch.c
408
cs = GETREG(vcpu, VM_REG_GUEST_CS);
usr/src/cmd/bhyve/amd64/task_switch.c
414
sel_exception(vcpu, IDT_TS, sel, ts->ext);
usr/src/cmd/bhyve/amd64/task_switch.c
422
sel_exception(vcpu, IDT_TS, sel, ts->ext);
usr/src/cmd/bhyve/amd64/task_switch.c
438
sel_exception(vcpu, IDT_TS, sel, ts->ext);
usr/src/cmd/bhyve/amd64/task_switch.c
447
tss32_save(struct vcpu *vcpu, struct vm_task_switch *task_switch,
usr/src/cmd/bhyve/amd64/task_switch.c
452
tss->tss_eax = GETREG(vcpu, VM_REG_GUEST_RAX);
usr/src/cmd/bhyve/amd64/task_switch.c
453
tss->tss_ecx = GETREG(vcpu, VM_REG_GUEST_RCX);
usr/src/cmd/bhyve/amd64/task_switch.c
454
tss->tss_edx = GETREG(vcpu, VM_REG_GUEST_RDX);
usr/src/cmd/bhyve/amd64/task_switch.c
455
tss->tss_ebx = GETREG(vcpu, VM_REG_GUEST_RBX);
usr/src/cmd/bhyve/amd64/task_switch.c
456
tss->tss_esp = GETREG(vcpu, VM_REG_GUEST_RSP);
usr/src/cmd/bhyve/amd64/task_switch.c
457
tss->tss_ebp = GETREG(vcpu, VM_REG_GUEST_RBP);
usr/src/cmd/bhyve/amd64/task_switch.c
458
tss->tss_esi = GETREG(vcpu, VM_REG_GUEST_RSI);
usr/src/cmd/bhyve/amd64/task_switch.c
459
tss->tss_edi = GETREG(vcpu, VM_REG_GUEST_RDI);
usr/src/cmd/bhyve/amd64/task_switch.c
462
tss->tss_es = GETREG(vcpu, VM_REG_GUEST_ES);
usr/src/cmd/bhyve/amd64/task_switch.c
463
tss->tss_cs = GETREG(vcpu, VM_REG_GUEST_CS);
usr/src/cmd/bhyve/amd64/task_switch.c
464
tss->tss_ss = GETREG(vcpu, VM_REG_GUEST_SS);
usr/src/cmd/bhyve/amd64/task_switch.c
465
tss->tss_ds = GETREG(vcpu, VM_REG_GUEST_DS);
usr/src/cmd/bhyve/amd64/task_switch.c
466
tss->tss_fs = GETREG(vcpu, VM_REG_GUEST_FS);
usr/src/cmd/bhyve/amd64/task_switch.c
467
tss->tss_gs = GETREG(vcpu, VM_REG_GUEST_GS);
usr/src/cmd/bhyve/amd64/task_switch.c
470
tss->tss_eflags = GETREG(vcpu, VM_REG_GUEST_RFLAGS);
usr/src/cmd/bhyve/amd64/task_switch.c
480
update_seg_desc(struct vcpu *vcpu, int reg, struct seg_desc *sd)
usr/src/cmd/bhyve/amd64/task_switch.c
484
error = vm_set_desc(vcpu, reg, sd->base, sd->limit, sd->access);
usr/src/cmd/bhyve/amd64/task_switch.c
492
tss32_restore(struct vmctx *ctx, struct vcpu *vcpu, struct vm_task_switch *ts,
usr/src/cmd/bhyve/amd64/task_switch.c
512
SETREG(vcpu, VM_REG_GUEST_LDTR, tss->tss_ldt);
usr/src/cmd/bhyve/amd64/task_switch.c
532
vm_inject_gp(vcpu);
usr/src/cmd/bhyve/amd64/task_switch.c
536
SETREG(vcpu, VM_REG_GUEST_PDPTE0, pdpte[0]);
usr/src/cmd/bhyve/amd64/task_switch.c
537
SETREG(vcpu, VM_REG_GUEST_PDPTE1, pdpte[1]);
usr/src/cmd/bhyve/amd64/task_switch.c
538
SETREG(vcpu, VM_REG_GUEST_PDPTE2, pdpte[2]);
usr/src/cmd/bhyve/amd64/task_switch.c
539
SETREG(vcpu, VM_REG_GUEST_PDPTE3, pdpte[3]);
usr/src/cmd/bhyve/amd64/task_switch.c
541
SETREG(vcpu, VM_REG_GUEST_CR3, tss->tss_cr3);
usr/src/cmd/bhyve/amd64/task_switch.c
546
SETREG(vcpu, VM_REG_GUEST_RFLAGS, eflags);
usr/src/cmd/bhyve/amd64/task_switch.c
547
SETREG(vcpu, VM_REG_GUEST_RIP, tss->tss_eip);
usr/src/cmd/bhyve/amd64/task_switch.c
550
SETREG(vcpu, VM_REG_GUEST_RAX, tss->tss_eax);
usr/src/cmd/bhyve/amd64/task_switch.c
551
SETREG(vcpu, VM_REG_GUEST_RCX, tss->tss_ecx);
usr/src/cmd/bhyve/amd64/task_switch.c
552
SETREG(vcpu, VM_REG_GUEST_RDX, tss->tss_edx);
usr/src/cmd/bhyve/amd64/task_switch.c
553
SETREG(vcpu, VM_REG_GUEST_RBX, tss->tss_ebx);
usr/src/cmd/bhyve/amd64/task_switch.c
554
SETREG(vcpu, VM_REG_GUEST_RSP, tss->tss_esp);
usr/src/cmd/bhyve/amd64/task_switch.c
555
SETREG(vcpu, VM_REG_GUEST_RBP, tss->tss_ebp);
usr/src/cmd/bhyve/amd64/task_switch.c
556
SETREG(vcpu, VM_REG_GUEST_RSI, tss->tss_esi);
usr/src/cmd/bhyve/amd64/task_switch.c
557
SETREG(vcpu, VM_REG_GUEST_RDI, tss->tss_edi);
usr/src/cmd/bhyve/amd64/task_switch.c
560
SETREG(vcpu, VM_REG_GUEST_ES, tss->tss_es);
usr/src/cmd/bhyve/amd64/task_switch.c
561
SETREG(vcpu, VM_REG_GUEST_CS, tss->tss_cs);
usr/src/cmd/bhyve/amd64/task_switch.c
562
SETREG(vcpu, VM_REG_GUEST_SS, tss->tss_ss);
usr/src/cmd/bhyve/amd64/task_switch.c
563
SETREG(vcpu, VM_REG_GUEST_DS, tss->tss_ds);
usr/src/cmd/bhyve/amd64/task_switch.c
564
SETREG(vcpu, VM_REG_GUEST_FS, tss->tss_fs);
usr/src/cmd/bhyve/amd64/task_switch.c
565
SETREG(vcpu, VM_REG_GUEST_GS, tss->tss_gs);
usr/src/cmd/bhyve/amd64/task_switch.c
575
error = validate_seg_desc(vcpu, ts, VM_REG_GUEST_LDTR, &seg_desc,
usr/src/cmd/bhyve/amd64/task_switch.c
579
update_seg_desc(vcpu, VM_REG_GUEST_LDTR, &seg_desc);
usr/src/cmd/bhyve/amd64/task_switch.c
590
error = validate_seg_desc(vcpu, ts, VM_REG_GUEST_CS, &seg_desc,
usr/src/cmd/bhyve/amd64/task_switch.c
595
error = validate_seg_desc(vcpu, ts, VM_REG_GUEST_SS, &seg_desc2,
usr/src/cmd/bhyve/amd64/task_switch.c
599
update_seg_desc(vcpu, VM_REG_GUEST_CS, &seg_desc);
usr/src/cmd/bhyve/amd64/task_switch.c
600
update_seg_desc(vcpu, VM_REG_GUEST_SS, &seg_desc2);
usr/src/cmd/bhyve/amd64/task_switch.c
603
error = validate_seg_desc(vcpu, ts, VM_REG_GUEST_DS, &seg_desc,
usr/src/cmd/bhyve/amd64/task_switch.c
607
update_seg_desc(vcpu, VM_REG_GUEST_DS, &seg_desc);
usr/src/cmd/bhyve/amd64/task_switch.c
609
error = validate_seg_desc(vcpu, ts, VM_REG_GUEST_ES, &seg_desc,
usr/src/cmd/bhyve/amd64/task_switch.c
613
update_seg_desc(vcpu, VM_REG_GUEST_ES, &seg_desc);
usr/src/cmd/bhyve/amd64/task_switch.c
615
error = validate_seg_desc(vcpu, ts, VM_REG_GUEST_FS, &seg_desc,
usr/src/cmd/bhyve/amd64/task_switch.c
619
update_seg_desc(vcpu, VM_REG_GUEST_FS, &seg_desc);
usr/src/cmd/bhyve/amd64/task_switch.c
621
error = validate_seg_desc(vcpu, ts, VM_REG_GUEST_GS, &seg_desc,
usr/src/cmd/bhyve/amd64/task_switch.c
625
update_seg_desc(vcpu, VM_REG_GUEST_GS, &seg_desc);
usr/src/cmd/bhyve/amd64/task_switch.c
780
push_errcode(struct vcpu *vcpu, struct vm_guest_paging *paging,
usr/src/cmd/bhyve/amd64/task_switch.c
792
cr0 = GETREG(vcpu, VM_REG_GUEST_CR0);
usr/src/cmd/bhyve/amd64/task_switch.c
793
rflags = GETREG(vcpu, VM_REG_GUEST_RFLAGS);
usr/src/cmd/bhyve/amd64/task_switch.c
794
stacksel = GETREG(vcpu, VM_REG_GUEST_SS);
usr/src/cmd/bhyve/amd64/task_switch.c
796
error = vm_get_desc(vcpu, VM_REG_GUEST_SS, &seg_desc.base,
usr/src/cmd/bhyve/amd64/task_switch.c
820
esp = GETREG(vcpu, VM_REG_GUEST_RSP);
usr/src/cmd/bhyve/amd64/task_switch.c
825
sel_exception(vcpu, IDT_SS, stacksel, 1);
usr/src/cmd/bhyve/amd64/task_switch.c
831
vm_inject_ac(vcpu, 1);
usr/src/cmd/bhyve/amd64/task_switch.c
836
error = vm_copy_setup(vcpu, paging, gla, bytes, PROT_WRITE,
usr/src/cmd/bhyve/amd64/task_switch.c
842
SETREG(vcpu, VM_REG_GUEST_RSP, esp);
usr/src/cmd/bhyve/amd64/task_switch.c
860
vmexit_task_switch(struct vmctx *ctx, struct vcpu *vcpu, struct vm_exit *vmexit)
usr/src/cmd/bhyve/amd64/task_switch.c
897
error = read_tss_descriptor(vcpu, task_switch, nt_sel, &nt_desc,
usr/src/cmd/bhyve/amd64/task_switch.c
907
sel_exception(vcpu, IDT_TS, nt_sel, ext);
usr/src/cmd/bhyve/amd64/task_switch.c
913
sel_exception(vcpu, IDT_NP, nt_sel, ext);
usr/src/cmd/bhyve/amd64/task_switch.c
930
sel_exception(vcpu, IDT_TS, nt_sel, ext);
usr/src/cmd/bhyve/amd64/task_switch.c
936
sel_exception(vcpu, IDT_TS, nt_sel, ext);
usr/src/cmd/bhyve/amd64/task_switch.c
945
sel_exception(vcpu, IDT_GP, nt_sel, ext);
usr/src/cmd/bhyve/amd64/task_switch.c
950
error = vm_copy_setup(vcpu, &sup_paging, nt.base, minlimit + 1,
usr/src/cmd/bhyve/amd64/task_switch.c
956
ot_sel = GETREG(vcpu, VM_REG_GUEST_TR);
usr/src/cmd/bhyve/amd64/task_switch.c
964
sel_exception(vcpu, IDT_TS, ot_sel, task_switch->ext);
usr/src/cmd/bhyve/amd64/task_switch.c
969
error = vm_get_desc(vcpu, VM_REG_GUEST_TR, &ot_base, &ot_lim,
usr/src/cmd/bhyve/amd64/task_switch.c
977
error = read_tss_descriptor(vcpu, task_switch, ot_sel, &ot_desc,
usr/src/cmd/bhyve/amd64/task_switch.c
982
error = vm_copy_setup(vcpu, &sup_paging, ot_base, minlimit + 1,
usr/src/cmd/bhyve/amd64/task_switch.c
993
error = desc_table_write(vcpu, &sup_paging, ot_sel,
usr/src/cmd/bhyve/amd64/vga.c
666
vga_mem_handler(struct vcpu *vcpu __unused, int dir, uint64_t addr, int size,
usr/src/cmd/bhyve/amd64/vmexit.c
100
vmentry_mmio_read(struct vcpu *vcpu, uint64_t gpa, uint8_t bytes, uint64_t data)
usr/src/cmd/bhyve/amd64/vmexit.c
102
struct vm_entry *entry = &vmentry[vcpu_id(vcpu)];
usr/src/cmd/bhyve/amd64/vmexit.c
115
vmentry_mmio_write(struct vcpu *vcpu, uint64_t gpa, uint8_t bytes)
usr/src/cmd/bhyve/amd64/vmexit.c
117
struct vm_entry *entry = &vmentry[vcpu_id(vcpu)];
usr/src/cmd/bhyve/amd64/vmexit.c
130
vmentry_inout_read(struct vcpu *vcpu, uint16_t port, uint8_t bytes,
usr/src/cmd/bhyve/amd64/vmexit.c
133
struct vm_entry *entry = &vmentry[vcpu_id(vcpu)];
usr/src/cmd/bhyve/amd64/vmexit.c
146
vmentry_inout_write(struct vcpu *vcpu, uint16_t port, uint8_t bytes)
usr/src/cmd/bhyve/amd64/vmexit.c
148
struct vm_entry *entry = &vmentry[vcpu_id(vcpu)];
usr/src/cmd/bhyve/amd64/vmexit.c
163
vm_inject_fault(struct vcpu *vcpu, int vector, int errcode_valid,
usr/src/cmd/bhyve/amd64/vmexit.c
170
error = vm_inject_exception(vcpu, vector, errcode_valid, errcode,
usr/src/cmd/bhyve/amd64/vmexit.c
177
vmexit_inout(struct vmctx *ctx, struct vcpu *vcpu, struct vm_exit *vme)
usr/src/cmd/bhyve/amd64/vmexit.c
188
error = emulate_inout(ctx, vcpu, &inout);
usr/src/cmd/bhyve/amd64/vmexit.c
201
vmentry_inout_read(vcpu, inout.port, bytes, inout.eax);
usr/src/cmd/bhyve/amd64/vmexit.c
203
vmentry_inout_write(vcpu, inout.port, bytes);
usr/src/cmd/bhyve/amd64/vmexit.c
210
vmexit_rdmsr(struct vmctx *ctx __unused, struct vcpu *vcpu, struct vm_exit *vme)
usr/src/cmd/bhyve/amd64/vmexit.c
217
error = emulate_rdmsr(vcpu, vme->u.msr.code, &val);
usr/src/cmd/bhyve/amd64/vmexit.c
220
vme->u.msr.code, vcpu_id(vcpu));
usr/src/cmd/bhyve/amd64/vmexit.c
222
vm_inject_gp(vcpu);
usr/src/cmd/bhyve/amd64/vmexit.c
228
error = vm_set_register(vcpu, VM_REG_GUEST_RAX, eax);
usr/src/cmd/bhyve/amd64/vmexit.c
232
error = vm_set_register(vcpu, VM_REG_GUEST_RDX, edx);
usr/src/cmd/bhyve/amd64/vmexit.c
239
vmexit_wrmsr(struct vmctx *ctx __unused, struct vcpu *vcpu, struct vm_exit *vme)
usr/src/cmd/bhyve/amd64/vmexit.c
243
error = emulate_wrmsr(vcpu, vme->u.msr.code, vme->u.msr.wval);
usr/src/cmd/bhyve/amd64/vmexit.c
246
vme->u.msr.code, vme->u.msr.wval, vcpu_id(vcpu));
usr/src/cmd/bhyve/amd64/vmexit.c
248
vm_inject_gp(vcpu);
usr/src/cmd/bhyve/amd64/vmexit.c
324
vmexit_run_state(struct vmctx *ctx __unused, struct vcpu *vcpu __unused,
usr/src/cmd/bhyve/amd64/vmexit.c
336
vmexit_paging(struct vmctx *ctx __unused, struct vcpu *vcpu,
usr/src/cmd/bhyve/amd64/vmexit.c
339
fprintf(stderr, "vm exit[%d]\n", vcpu_id(vcpu));
usr/src/cmd/bhyve/amd64/vmexit.c
373
vmexit_vmx(struct vmctx *ctx, struct vcpu *vcpu, struct vm_exit *vme)
usr/src/cmd/bhyve/amd64/vmexit.c
376
EPRINTLN("vm exit[%d]", vcpu_id(vcpu));
usr/src/cmd/bhyve/amd64/vmexit.c
389
vm_get_register(vcpu,
usr/src/cmd/bhyve/amd64/vmexit.c
406
vmexit_svm(struct vmctx *ctx __unused, struct vcpu *vcpu, struct vm_exit *vme)
usr/src/cmd/bhyve/amd64/vmexit.c
408
EPRINTLN("vm exit[%d]", vcpu_id(vcpu));
usr/src/cmd/bhyve/amd64/vmexit.c
419
vmexit_bogus(struct vmctx *ctx __unused, struct vcpu *vcpu __unused,
usr/src/cmd/bhyve/amd64/vmexit.c
429
vmexit_hlt(struct vmctx *ctx __unused, struct vcpu *vcpu __unused,
usr/src/cmd/bhyve/amd64/vmexit.c
442
vmexit_pause(struct vmctx *ctx __unused, struct vcpu *vcpu __unused,
usr/src/cmd/bhyve/amd64/vmexit.c
449
vmexit_mtrap(struct vmctx *ctx __unused, struct vcpu *vcpu, struct vm_exit *vme)
usr/src/cmd/bhyve/amd64/vmexit.c
454
gdb_cpu_mtrap(vcpu);
usr/src/cmd/bhyve/amd64/vmexit.c
460
vmexit_inst_emul(struct vmctx *ctx __unused, struct vcpu *vcpu,
usr/src/cmd/bhyve/amd64/vmexit.c
489
vmexit_mmio(struct vmctx *ctx __unused, struct vcpu *vcpu, struct vm_exit *vme)
usr/src/cmd/bhyve/amd64/vmexit.c
498
err = emulate_mem(vcpu, &mmio);
usr/src/cmd/bhyve/amd64/vmexit.c
514
vmentry_mmio_read(vcpu, mmio.gpa, mmio.bytes,
usr/src/cmd/bhyve/amd64/vmexit.c
517
vmentry_mmio_write(vcpu, mmio.gpa, mmio.bytes);
usr/src/cmd/bhyve/amd64/vmexit.c
528
vmexit_suspend(struct vmctx *ctx, struct vcpu *vcpu, struct vm_exit *vme)
usr/src/cmd/bhyve/amd64/vmexit.c
531
int vcpuid = vcpu_id(vcpu);
usr/src/cmd/bhyve/amd64/vmexit.c
556
vmexit_debug(struct vmctx *ctx __unused, struct vcpu *vcpu,
usr/src/cmd/bhyve/amd64/vmexit.c
559
gdb_cpu_suspend(vcpu);
usr/src/cmd/bhyve/amd64/vmexit.c
569
vmexit_breakpoint(struct vmctx *ctx __unused, struct vcpu *vcpu,
usr/src/cmd/bhyve/amd64/vmexit.c
573
gdb_cpu_breakpoint(vcpu, vme);
usr/src/cmd/bhyve/amd64/vmexit.c
579
vmexit_ipi(struct vmctx *ctx __unused, struct vcpu *vcpu __unused,
usr/src/cmd/bhyve/amd64/vmexit.c
587
error = vm_suspend_cpu(vcpu_info[i].vcpu);
usr/src/cmd/bhyve/amd64/vmexit.c
597
spinup_ap(vcpu_info[i].vcpu,
usr/src/cmd/bhyve/amd64/xmsr.c
111
emulate_rdmsr(struct vcpu *vcpu __unused, uint32_t num, uint64_t *val)
usr/src/cmd/bhyve/amd64/xmsr.c
50
emulate_wrmsr(struct vcpu *vcpu __unused, uint32_t num, uint64_t val __unused)
usr/src/cmd/bhyve/amd64/xmsr.h
33
int emulate_wrmsr(struct vcpu *vcpu, uint32_t code, uint64_t val);
usr/src/cmd/bhyve/amd64/xmsr.h
34
int emulate_rdmsr(struct vcpu *vcpu, uint32_t code, uint64_t *val);
usr/src/cmd/bhyve/common/bhyverun.c
120
static void vm_loop(struct vmctx *ctx, struct vcpu *vcpu);
usr/src/cmd/bhyve/common/bhyverun.c
124
struct vcpu *vcpu;
usr/src/cmd/bhyve/common/bhyverun.c
255
int vcpu, pcpu;
usr/src/cmd/bhyve/common/bhyverun.c
260
if (sscanf(opt, "%d:%d", &vcpu, &pcpu) != 2) {
usr/src/cmd/bhyve/common/bhyverun.c
265
if (vcpu < 0) {
usr/src/cmd/bhyve/common/bhyverun.c
266
fprintf(stderr, "invalid vcpu '%d'\n", vcpu);
usr/src/cmd/bhyve/common/bhyverun.c
276
snprintf(key, sizeof(key), "vcpu.%d.cpuset", vcpu);
usr/src/cmd/bhyve/common/bhyverun.c
291
parse_cpuset(int vcpu, const char *list, cpuset_t *set)
usr/src/cmd/bhyve/common/bhyverun.c
302
errx(4, "invalid cpuset for vcpu %d: '%s'", vcpu, list);
usr/src/cmd/bhyve/common/bhyverun.c
324
vcpu, list);
usr/src/cmd/bhyve/common/bhyverun.c
328
errx(4, "invalid cpuset for vcpu %d: '%s'", vcpu, list);
usr/src/cmd/bhyve/common/bhyverun.c
341
int vcpu;
usr/src/cmd/bhyve/common/bhyverun.c
344
for (vcpu = 0; vcpu < guest_ncpus; vcpu++) {
usr/src/cmd/bhyve/common/bhyverun.c
345
snprintf(key, sizeof(key), "vcpu.%d.cpuset", vcpu);
usr/src/cmd/bhyve/common/bhyverun.c
349
vcpumap[vcpu] = malloc(sizeof(cpuset_t));
usr/src/cmd/bhyve/common/bhyverun.c
350
if (vcpumap[vcpu] == NULL)
usr/src/cmd/bhyve/common/bhyverun.c
351
err(4, "Failed to allocate cpuset for vcpu %d", vcpu);
usr/src/cmd/bhyve/common/bhyverun.c
352
parse_cpuset(vcpu, value, vcpumap[vcpu]);
usr/src/cmd/bhyve/common/bhyverun.c
377
struct vcpu *
usr/src/cmd/bhyve/common/bhyverun.c
380
return (vcpu_info[vcpuid].vcpu);
usr/src/cmd/bhyve/common/bhyverun.c
403
gdb_cpu_add(vi->vcpu);
usr/src/cmd/bhyve/common/bhyverun.c
405
vm_loop(vi->ctx, vi->vcpu);
usr/src/cmd/bhyve/common/bhyverun.c
421
error = vm_activate_cpu(vi->vcpu);
usr/src/cmd/bhyve/common/bhyverun.c
428
error = vm_suspend_cpu(vi->vcpu);
usr/src/cmd/bhyve/common/bhyverun.c
437
fbsdrun_deletecpu(int vcpu)
usr/src/cmd/bhyve/common/bhyverun.c
443
if (!CPU_ISSET(vcpu, &cpumask)) {
usr/src/cmd/bhyve/common/bhyverun.c
444
EPRINTLN("Attempting to delete unknown cpu %d", vcpu);
usr/src/cmd/bhyve/common/bhyverun.c
448
CPU_CLR(vcpu, &cpumask);
usr/src/cmd/bhyve/common/bhyverun.c
450
if (vcpu != BSP) {
usr/src/cmd/bhyve/common/bhyverun.c
464
vm_loop(struct vmctx *ctx, struct vcpu *vcpu)
usr/src/cmd/bhyve/common/bhyverun.c
473
assert(CPU_ISSET(vcpu_id(vcpu), &active_cpus));
usr/src/cmd/bhyve/common/bhyverun.c
475
ventry = vmentry_vcpu(vcpu_id(vcpu));
usr/src/cmd/bhyve/common/bhyverun.c
478
error = vm_run(vcpu, ventry, &vme);
usr/src/cmd/bhyve/common/bhyverun.c
498
rc = (*vmexit_handlers[exitcode])(ctx, vcpu, &vme);
usr/src/cmd/bhyve/common/bhyverun.c
513
num_vcpus_allowed(struct vmctx *ctx, struct vcpu *vcpu)
usr/src/cmd/bhyve/common/bhyverun.c
523
error = vm_get_capability(vcpu, VM_CAP_UNRESTRICTED_GUEST, &tmp);
usr/src/cmd/bhyve/common/bhyverun.c
693
struct vcpu *bsp;
usr/src/cmd/bhyve/common/bhyverun.c
752
vcpu_info[vcpuid].vcpu = bsp;
usr/src/cmd/bhyve/common/bhyverun.c
754
vcpu_info[vcpuid].vcpu = vm_vcpu_open(ctx, vcpuid);
usr/src/cmd/bhyve/common/bhyverun.c
884
bhyve_start_vcpu(vcpu_info[vcpuid].vcpu, vcpuid == BSP,
usr/src/cmd/bhyve/common/bhyverun.h
53
struct vcpu;
usr/src/cmd/bhyve/common/bhyverun.h
59
struct vcpu *fbsdrun_vcpu(int vcpuid);
usr/src/cmd/bhyve/common/bhyverun.h
65
typedef int (*vmexit_handler_t)(struct vmctx *, struct vcpu *,
usr/src/cmd/bhyve/common/bhyverun.h
68
extern int vmexit_task_switch(struct vmctx *, struct vcpu *, struct vm_exit *);
usr/src/cmd/bhyve/common/bhyverun.h
72
void bhyve_init_vcpu(struct vcpu *vcpu);
usr/src/cmd/bhyve/common/bhyverun.h
73
void bhyve_start_vcpu(struct vcpu *vcpu, bool bsp, bool suspend);
usr/src/cmd/bhyve/common/bhyverun.h
74
int bhyve_init_platform(struct vmctx *ctx, struct vcpu *bsp);
usr/src/cmd/bhyve/common/bhyverun.h
75
int bhyve_init_platform_late(struct vmctx *ctx, struct vcpu *bsp);
usr/src/cmd/bhyve/common/bootrom.c
86
bootrom_var_mem_handler(struct vcpu *vcpu __unused, int dir, uint64_t addr,
usr/src/cmd/bhyve/common/gdb.c
1251
int vcpu;
usr/src/cmd/bhyve/common/gdb.c
1255
vcpu = CPU_FFS(&mask) - 1;
usr/src/cmd/bhyve/common/gdb.c
1256
CPU_CLR(vcpu, &mask);
usr/src/cmd/bhyve/common/gdb.c
1257
if (vm_set_capability(vcpus[vcpu], VM_CAP_BPT_EXIT,
usr/src/cmd/bhyve/common/gdb.c
1260
debug("$vCPU %d %sabled breakpoint exits\n", vcpu,
usr/src/cmd/bhyve/common/gdb.c
145
static struct vcpu **vcpus;
usr/src/cmd/bhyve/common/gdb.c
1516
int vcpu;
usr/src/cmd/bhyve/common/gdb.c
1527
vcpu = CPU_FFS(&mask) - 1;
usr/src/cmd/bhyve/common/gdb.c
1528
CPU_CLR(vcpu, &mask);
usr/src/cmd/bhyve/common/gdb.c
1533
append_integer(vcpu + 1);
usr/src/cmd/bhyve/common/gdb.c
223
guest_paging_info(struct vcpu *vcpu, struct vm_guest_paging *paging)
usr/src/cmd/bhyve/common/gdb.c
233
if (vm_get_register_set(vcpu, nitems(regset), regset, regs) == -1)
usr/src/cmd/bhyve/common/gdb.c
267
guest_vaddr2paddr(struct vcpu *vcpu, uint64_t vaddr, uint64_t *paddr)
usr/src/cmd/bhyve/common/gdb.c
272
if (guest_paging_info(vcpu, &paging) == -1)
usr/src/cmd/bhyve/common/gdb.c
279
if (vm_gla2gpa_nofault(vcpu, &paging, vaddr, PROT_READ, paddr,
usr/src/cmd/bhyve/common/gdb.c
747
_gdb_cpu_suspend(struct vcpu *vcpu, bool report_stop)
usr/src/cmd/bhyve/common/gdb.c
749
int vcpuid = vcpu_id(vcpu);
usr/src/cmd/bhyve/common/gdb.c
766
gdb_cpu_add(struct vcpu *vcpu)
usr/src/cmd/bhyve/common/gdb.c
772
vcpuid = vcpu_id(vcpu);
usr/src/cmd/bhyve/common/gdb.c
777
vcpus[vcpuid] = vcpu;
usr/src/cmd/bhyve/common/gdb.c
780
vm_set_capability(vcpu, VM_CAP_BPT_EXIT, 1);
usr/src/cmd/bhyve/common/gdb.c
791
_gdb_cpu_suspend(vcpu, false);
usr/src/cmd/bhyve/common/gdb.c
801
gdb_cpu_resume(struct vcpu *vcpu)
usr/src/cmd/bhyve/common/gdb.c
806
vs = &vcpu_state[vcpu_id(vcpu)];
usr/src/cmd/bhyve/common/gdb.c
815
error = vm_set_capability(vcpu, VM_CAP_MTRAP_EXIT, 1);
usr/src/cmd/bhyve/common/gdb.c
826
gdb_cpu_suspend(struct vcpu *vcpu)
usr/src/cmd/bhyve/common/gdb.c
832
_gdb_cpu_suspend(vcpu, true);
usr/src/cmd/bhyve/common/gdb.c
833
gdb_cpu_resume(vcpu);
usr/src/cmd/bhyve/common/gdb.c
854
gdb_cpu_mtrap(struct vcpu *vcpu)
usr/src/cmd/bhyve/common/gdb.c
861
vcpuid = vcpu_id(vcpu);
usr/src/cmd/bhyve/common/gdb.c
868
vm_set_capability(vcpu, VM_CAP_MTRAP_EXIT, 0);
usr/src/cmd/bhyve/common/gdb.c
875
_gdb_cpu_suspend(vcpu, true);
usr/src/cmd/bhyve/common/gdb.c
877
gdb_cpu_resume(vcpu);
usr/src/cmd/bhyve/common/gdb.c
895
gdb_cpu_breakpoint(struct vcpu *vcpu, struct vm_exit *vmexit)
usr/src/cmd/bhyve/common/gdb.c
906
vcpuid = vcpu_id(vcpu);
usr/src/cmd/bhyve/common/gdb.c
908
error = guest_vaddr2paddr(vcpu, guest_pc(vmexit), &gpa);
usr/src/cmd/bhyve/common/gdb.c
917
vm_set_register(vcpu, GDB_PC_REGNAME, guest_pc(vmexit));
usr/src/cmd/bhyve/common/gdb.c
925
_gdb_cpu_suspend(vcpu, true);
usr/src/cmd/bhyve/common/gdb.c
937
gdb_cpu_resume(vcpu);
usr/src/cmd/bhyve/common/gdb.c
941
error = vm_set_register(vcpu, VM_REG_GUEST_ENTRY_INST_LENGTH,
usr/src/cmd/bhyve/common/gdb.c
944
error = vm_inject_exception(vcpu, IDT_BP, 0, 0, 0);
usr/src/cmd/bhyve/common/gdb.c
951
gdb_step_vcpu(struct vcpu *vcpu)
usr/src/cmd/bhyve/common/gdb.c
955
vcpuid = vcpu_id(vcpu);
usr/src/cmd/bhyve/common/gdb.c
957
error = vm_get_capability(vcpu, VM_CAP_MTRAP_EXIT, &val);
usr/src/cmd/bhyve/common/gdb.c
963
vm_resume_cpu(vcpu);
usr/src/cmd/bhyve/common/gdb.h
31
void gdb_cpu_add(struct vcpu *vcpu);
usr/src/cmd/bhyve/common/gdb.h
32
void gdb_cpu_breakpoint(struct vcpu *vcpu, struct vm_exit *vmexit);
usr/src/cmd/bhyve/common/gdb.h
33
void gdb_cpu_mtrap(struct vcpu *vcpu);
usr/src/cmd/bhyve/common/gdb.h
34
void gdb_cpu_suspend(struct vcpu *vcpu);
usr/src/cmd/bhyve/common/mem.c
151
typedef int (mem_cb_t)(struct vcpu *vcpu, uint64_t gpa, struct mem_range *mr,
usr/src/cmd/bhyve/common/mem.c
155
mem_read(struct vcpu *vcpu, uint64_t gpa, uint64_t *rval, int size, void *arg)
usr/src/cmd/bhyve/common/mem.c
160
error = (*mr->handler)(vcpu, MEM_F_READ, gpa, size, rval, mr->arg1,
usr/src/cmd/bhyve/common/mem.c
166
mem_write(struct vcpu *vcpu, uint64_t gpa, uint64_t wval, int size, void *arg)
usr/src/cmd/bhyve/common/mem.c
171
error = (*mr->handler)(vcpu, MEM_F_WRITE, gpa, size, &wval, mr->arg1,
usr/src/cmd/bhyve/common/mem.c
177
access_memory(struct vcpu *vcpu, uint64_t paddr, mem_cb_t *cb, void *arg)
usr/src/cmd/bhyve/common/mem.c
182
vcpuid = vcpu_id(vcpu);
usr/src/cmd/bhyve/common/mem.c
224
err = cb(vcpu, paddr, &entry->mr_param, arg);
usr/src/cmd/bhyve/common/mem.c
235
emulate_mem_cb(struct vcpu *vcpu, uint64_t paddr, struct mem_range *mr,
usr/src/cmd/bhyve/common/mem.c
244
err = mem_read(vcpu, paddr, &mmio->data, mmio->bytes, mr);
usr/src/cmd/bhyve/common/mem.c
246
err = mem_write(vcpu, paddr, mmio->data, mmio->bytes, mr);
usr/src/cmd/bhyve/common/mem.c
253
emulate_mem(struct vcpu *vcpu, struct vm_mmio *mmio)
usr/src/cmd/bhyve/common/mem.c
255
return (access_memory(vcpu, mmio->gpa, emulate_mem_cb, mmio));
usr/src/cmd/bhyve/common/mem.c
265
rw_mem_cb(struct vcpu *vcpu, uint64_t paddr, struct mem_range *mr,
usr/src/cmd/bhyve/common/mem.c
271
return (mr->handler(vcpu, rma->operation, paddr, rma->size,
usr/src/cmd/bhyve/common/mem.c
276
read_mem(struct vcpu *vcpu, uint64_t gpa, uint64_t *rval, int size)
usr/src/cmd/bhyve/common/mem.c
283
return (access_memory(vcpu, gpa, rw_mem_cb, &rma));
usr/src/cmd/bhyve/common/mem.c
287
write_mem(struct vcpu *vcpu, uint64_t gpa, uint64_t wval, int size)
usr/src/cmd/bhyve/common/mem.c
294
return (access_memory(vcpu, gpa, rw_mem_cb, &rma));
usr/src/cmd/bhyve/common/mem.h
34
struct vcpu;
usr/src/cmd/bhyve/common/mem.h
36
typedef int (*mem_func_t)(struct vcpu *vcpu, int dir, uint64_t addr,
usr/src/cmd/bhyve/common/mem.h
55
int emulate_mem(struct vcpu *vcpu, struct vm_mmio *mmio);
usr/src/cmd/bhyve/common/mem.h
57
int read_mem(struct vcpu *vpu, uint64_t gpa, uint64_t *rval, int size);
usr/src/cmd/bhyve/common/mem.h
61
int write_mem(struct vcpu *vcpu, uint64_t gpa, uint64_t wval, int size);
usr/src/cmd/bhyve/common/pci_emul.c
1423
pci_emul_fallback_handler(struct vcpu *vcpu __unused, int dir,
usr/src/cmd/bhyve/common/pci_emul.c
1439
pci_emul_ecfg_handler(struct vcpu *vcpu __unused, int dir, uint64_t addr,
usr/src/cmd/bhyve/common/pci_emul.c
542
pci_emul_mem_handler(struct vcpu *vcpu __unused, int dir,
usr/src/cmd/bhyve/common/pctestdev.c
191
pctestdev_iomem_io(struct vcpu *vcpu __unused, int dir,
usr/src/cmd/bhyve/common/pctestdev.c
75
static int pctestdev_iomem_io(struct vcpu *vcpu, int dir,
usr/src/cmd/bhyve/common/tpm_intf_crb.c
309
tpm_crb_mem_handler(struct vcpu *vcpu __unused, const int dir,
usr/src/cmd/bhyve/common/tpm_ppi_qemu.c
56
tpm_ppi_mem_handler(struct vcpu *const vcpu __unused, const int dir,
usr/src/cmd/bhyvectl/bhyvectl.c
1003
error = vm_get_vmcb_field(vcpu, VMCB_OFF_AVIC_BAR, 8,
usr/src/cmd/bhyvectl/bhyvectl.c
1010
error = vm_get_vmcb_field(vcpu, VMCB_OFF_AVIC_PAGE, 8,
usr/src/cmd/bhyvectl/bhyvectl.c
1017
error = vm_get_vmcb_field(vcpu, VMCB_OFF_AVIC_LT, 8,
usr/src/cmd/bhyvectl/bhyvectl.c
1022
error = vm_get_vmcb_field(vcpu, VMCB_OFF_AVIC_PT, 8,
usr/src/cmd/bhyvectl/bhyvectl.c
1442
show_fpu(struct vcpu *vcpu)
usr/src/cmd/bhyvectl/bhyvectl.c
1445
int vcpuid = vcpu_id(vcpu);
usr/src/cmd/bhyvectl/bhyvectl.c
1446
struct vmctx *ctx = vcpu_ctx(vcpu);
usr/src/cmd/bhyvectl/bhyvectl.c
1478
.vcpuid = vcpu_id(vcpu),
usr/src/cmd/bhyvectl/bhyvectl.c
1509
i, vcpu,
usr/src/cmd/bhyvectl/bhyvectl.c
1518
i, vcpu,
usr/src/cmd/bhyvectl/bhyvectl.c
1555
show_msrs(struct vcpu *vcpu)
usr/src/cmd/bhyvectl/bhyvectl.c
1559
.vdx_vcpuid = vcpu_id(vcpu),
usr/src/cmd/bhyvectl/bhyvectl.c
1565
struct vmctx *ctx = vcpu_ctx(vcpu);
usr/src/cmd/bhyvectl/bhyvectl.c
1621
struct vcpu *vcpu;
usr/src/cmd/bhyvectl/bhyvectl.c
171
dump_vm_run_exitcode(struct vm_exit *vmexit, int vcpu)
usr/src/cmd/bhyvectl/bhyvectl.c
173
printf("vm exit[%d]\n", vcpu);
usr/src/cmd/bhyvectl/bhyvectl.c
1814
vcpu = vm_vcpu_open(ctx, vcpuid);
usr/src/cmd/bhyvectl/bhyvectl.c
1831
error = vm_set_register(vcpu, VM_REG_GUEST_EFER, efer);
usr/src/cmd/bhyvectl/bhyvectl.c
1834
error = vm_set_register(vcpu, VM_REG_GUEST_CR0, cr0);
usr/src/cmd/bhyvectl/bhyvectl.c
1837
error = vm_set_register(vcpu, VM_REG_GUEST_CR2, cr2);
usr/src/cmd/bhyvectl/bhyvectl.c
1840
error = vm_set_register(vcpu, VM_REG_GUEST_CR3, cr3);
usr/src/cmd/bhyvectl/bhyvectl.c
1843
error = vm_set_register(vcpu, VM_REG_GUEST_CR4, cr4);
usr/src/cmd/bhyvectl/bhyvectl.c
1846
error = vm_set_register(vcpu, VM_REG_GUEST_DR0, dr0);
usr/src/cmd/bhyvectl/bhyvectl.c
1849
error = vm_set_register(vcpu, VM_REG_GUEST_DR1, dr1);
usr/src/cmd/bhyvectl/bhyvectl.c
1852
error = vm_set_register(vcpu, VM_REG_GUEST_DR2, dr2);
usr/src/cmd/bhyvectl/bhyvectl.c
1855
error = vm_set_register(vcpu, VM_REG_GUEST_DR3, dr3);
usr/src/cmd/bhyvectl/bhyvectl.c
1858
error = vm_set_register(vcpu, VM_REG_GUEST_DR6, dr6);
usr/src/cmd/bhyvectl/bhyvectl.c
1861
error = vm_set_register(vcpu, VM_REG_GUEST_DR7, dr7);
usr/src/cmd/bhyvectl/bhyvectl.c
1864
error = vm_set_register(vcpu, VM_REG_GUEST_RSP, rsp);
usr/src/cmd/bhyvectl/bhyvectl.c
1867
error = vm_set_register(vcpu, VM_REG_GUEST_RIP, rip);
usr/src/cmd/bhyvectl/bhyvectl.c
1870
error = vm_set_register(vcpu, VM_REG_GUEST_RAX, rax);
usr/src/cmd/bhyvectl/bhyvectl.c
1873
error = vm_set_register(vcpu, VM_REG_GUEST_RFLAGS,
usr/src/cmd/bhyvectl/bhyvectl.c
1878
error = vm_set_desc(vcpu, VM_REG_GUEST_DS,
usr/src/cmd/bhyvectl/bhyvectl.c
1883
error = vm_set_desc(vcpu, VM_REG_GUEST_ES,
usr/src/cmd/bhyvectl/bhyvectl.c
1888
error = vm_set_desc(vcpu, VM_REG_GUEST_SS,
usr/src/cmd/bhyvectl/bhyvectl.c
1893
error = vm_set_desc(vcpu, VM_REG_GUEST_CS,
usr/src/cmd/bhyvectl/bhyvectl.c
1898
error = vm_set_desc(vcpu, VM_REG_GUEST_FS,
usr/src/cmd/bhyvectl/bhyvectl.c
1903
error = vm_set_desc(vcpu, VM_REG_GUEST_GS,
usr/src/cmd/bhyvectl/bhyvectl.c
1908
error = vm_set_desc(vcpu, VM_REG_GUEST_TR,
usr/src/cmd/bhyvectl/bhyvectl.c
1913
error = vm_set_desc(vcpu, VM_REG_GUEST_LDTR,
usr/src/cmd/bhyvectl/bhyvectl.c
1918
error = vm_set_desc(vcpu, VM_REG_GUEST_GDTR,
usr/src/cmd/bhyvectl/bhyvectl.c
1923
error = vm_set_desc(vcpu, VM_REG_GUEST_IDTR,
usr/src/cmd/bhyvectl/bhyvectl.c
1928
error = vm_set_register(vcpu, VM_REG_GUEST_CS, cs);
usr/src/cmd/bhyvectl/bhyvectl.c
1931
error = vm_set_register(vcpu, VM_REG_GUEST_DS, ds);
usr/src/cmd/bhyvectl/bhyvectl.c
1934
error = vm_set_register(vcpu, VM_REG_GUEST_ES, es);
usr/src/cmd/bhyvectl/bhyvectl.c
1937
error = vm_set_register(vcpu, VM_REG_GUEST_FS, fs);
usr/src/cmd/bhyvectl/bhyvectl.c
1940
error = vm_set_register(vcpu, VM_REG_GUEST_GS, gs);
usr/src/cmd/bhyvectl/bhyvectl.c
1943
error = vm_set_register(vcpu, VM_REG_GUEST_SS, ss);
usr/src/cmd/bhyvectl/bhyvectl.c
1946
error = vm_set_register(vcpu, VM_REG_GUEST_TR, tr);
usr/src/cmd/bhyvectl/bhyvectl.c
1949
error = vm_set_register(vcpu, VM_REG_GUEST_LDTR, ldtr);
usr/src/cmd/bhyvectl/bhyvectl.c
1952
error = vm_set_x2apic_state(vcpu, x2apic_state);
usr/src/cmd/bhyvectl/bhyvectl.c
1956
error = vm_set_vmcs_field(vcpu,
usr/src/cmd/bhyvectl/bhyvectl.c
1960
error = vm_set_vmcb_field(vcpu,
usr/src/cmd/bhyvectl/bhyvectl.c
1966
error = vm_set_vmcs_field(vcpu, VMCS_ENTRY_INTR_INFO,
usr/src/cmd/bhyvectl/bhyvectl.c
1971
error = vm_inject_nmi(vcpu);
usr/src/cmd/bhyvectl/bhyvectl.c
1975
error = vm_lapic_local_irq(vcpu, assert_lapic_lvt);
usr/src/cmd/bhyvectl/bhyvectl.c
1985
error = get_all_registers(vcpu);
usr/src/cmd/bhyvectl/bhyvectl.c
1988
error = get_all_segments(vcpu);
usr/src/cmd/bhyvectl/bhyvectl.c
1991
error = show_fpu(vcpu);
usr/src/cmd/bhyvectl/bhyvectl.c
1996
error = get_misc_vmcs(vcpu);
usr/src/cmd/bhyvectl/bhyvectl.c
1998
error = get_misc_vmcb(vcpu);
usr/src/cmd/bhyvectl/bhyvectl.c
2002
error = vm_get_x2apic_state(vcpu, &x2apic_state);
usr/src/cmd/bhyvectl/bhyvectl.c
2009
error = vm_get_vmcs_field(vcpu, VMCS_EPTP, &eptp);
usr/src/cmd/bhyvectl/bhyvectl.c
2011
error = vm_get_vmcb_field(vcpu, VMCB_OFF_NPT_BASE,
usr/src/cmd/bhyvectl/bhyvectl.c
2015
cpu_intel ? "eptp" : "rvi/npt", vcpu, eptp);
usr/src/cmd/bhyvectl/bhyvectl.c
2020
error = vm_get_vmcs_field(vcpu,
usr/src/cmd/bhyvectl/bhyvectl.c
2023
error = vm_get_vmcb_field(vcpu,
usr/src/cmd/bhyvectl/bhyvectl.c
2032
error = vm_get_vmcs_field(vcpu, VMCS_IO_BITMAP_A,
usr/src/cmd/bhyvectl/bhyvectl.c
2036
error = vm_get_vmcs_field(vcpu, VMCS_IO_BITMAP_B,
usr/src/cmd/bhyvectl/bhyvectl.c
2041
error = vm_get_vmcb_field(vcpu,
usr/src/cmd/bhyvectl/bhyvectl.c
2051
error = vm_get_vmcs_field(vcpu, VMCS_TSC_OFFSET,
usr/src/cmd/bhyvectl/bhyvectl.c
2054
error = vm_get_vmcb_field(vcpu,
usr/src/cmd/bhyvectl/bhyvectl.c
2063
error = vm_get_vmcs_field(vcpu, VMCS_MSR_BITMAP,
usr/src/cmd/bhyvectl/bhyvectl.c
2066
error = vm_get_vmcb_field(vcpu,
usr/src/cmd/bhyvectl/bhyvectl.c
2075
error = vm_get_vmcs_field(vcpu, VMCS_VPID, &vpid);
usr/src/cmd/bhyvectl/bhyvectl.c
2077
error = vm_get_vmcb_field(vcpu, VMCB_OFF_ASID,
usr/src/cmd/bhyvectl/bhyvectl.c
2081
cpu_intel ? "vpid" : "asid", vcpu, vpid);
usr/src/cmd/bhyvectl/bhyvectl.c
2085
error = show_msrs(vcpu);
usr/src/cmd/bhyvectl/bhyvectl.c
2090
error = vm_get_vmcs_field(vcpu, VMCS_EXIT_REASON,
usr/src/cmd/bhyvectl/bhyvectl.c
2093
error = vm_get_vmcb_field(vcpu,
usr/src/cmd/bhyvectl/bhyvectl.c
2103
error = vm_set_capability(vcpu, captype, capval);
usr/src/cmd/bhyvectl/bhyvectl.c
2163
error = vm_get_capability(vcpu, captype, &val);
usr/src/cmd/bhyvectl/bhyvectl.c
2167
val ? "set" : "not set", vcpu);
usr/src/cmd/bhyvectl/bhyvectl.c
2191
error = vm_get_intinfo(vcpu, &info[0], &info[1]);
usr/src/cmd/bhyvectl/bhyvectl.c
2204
stats = vm_get_stats(vcpu, &tv, &num_stats);
usr/src/cmd/bhyvectl/bhyvectl.c
2227
error = vm_run(vcpu, &entry, &vmexit);
usr/src/cmd/bhyvectl/bhyvectl.c
2235
error = ioctl(vm_get_device_fd(ctx), VM_PAUSE, vcpu);
usr/src/cmd/bhyvectl/bhyvectl.c
224
vm_get_vmcs_field(struct vcpu *vcpu, int field, uint64_t *ret_val)
usr/src/cmd/bhyvectl/bhyvectl.c
2242
error = ioctl(vm_get_device_fd(ctx), VM_RESUME, vcpu);
usr/src/cmd/bhyvectl/bhyvectl.c
231
vm_set_vmcs_field(struct vcpu *vcpu, int field, uint64_t val)
usr/src/cmd/bhyvectl/bhyvectl.c
238
vm_get_vmcb_field(struct vcpu *vcpu, int off, int bytes,
usr/src/cmd/bhyvectl/bhyvectl.c
246
vm_set_vmcb_field(struct vcpu *vcpu, int off, int bytes,
usr/src/cmd/bhyvectl/bhyvectl.c
365
get_all_registers(struct vcpu *vcpu)
usr/src/cmd/bhyvectl/bhyvectl.c
371
int vcpuid = vcpu_id(vcpu);
usr/src/cmd/bhyvectl/bhyvectl.c
375
error = vm_get_register(vcpu, VM_REG_GUEST_EFER, &efer);
usr/src/cmd/bhyvectl/bhyvectl.c
381
error = vm_get_register(vcpu, VM_REG_GUEST_CR0, &cr0);
usr/src/cmd/bhyvectl/bhyvectl.c
387
error = vm_get_register(vcpu, VM_REG_GUEST_CR2, &cr2);
usr/src/cmd/bhyvectl/bhyvectl.c
393
error = vm_get_register(vcpu, VM_REG_GUEST_CR3, &cr3);
usr/src/cmd/bhyvectl/bhyvectl.c
399
error = vm_get_register(vcpu, VM_REG_GUEST_CR4, &cr4);
usr/src/cmd/bhyvectl/bhyvectl.c
405
error = vm_get_register(vcpu, VM_REG_GUEST_DR0, &dr0);
usr/src/cmd/bhyvectl/bhyvectl.c
411
error = vm_get_register(vcpu, VM_REG_GUEST_DR1, &dr1);
usr/src/cmd/bhyvectl/bhyvectl.c
417
error = vm_get_register(vcpu, VM_REG_GUEST_DR2, &dr2);
usr/src/cmd/bhyvectl/bhyvectl.c
423
error = vm_get_register(vcpu, VM_REG_GUEST_DR3, &dr3);
usr/src/cmd/bhyvectl/bhyvectl.c
429
error = vm_get_register(vcpu, VM_REG_GUEST_DR6, &dr6);
usr/src/cmd/bhyvectl/bhyvectl.c
435
error = vm_get_register(vcpu, VM_REG_GUEST_DR7, &dr7);
usr/src/cmd/bhyvectl/bhyvectl.c
441
error = vm_get_register(vcpu, VM_REG_GUEST_RSP, &rsp);
usr/src/cmd/bhyvectl/bhyvectl.c
447
error = vm_get_register(vcpu, VM_REG_GUEST_RIP, &rip);
usr/src/cmd/bhyvectl/bhyvectl.c
453
error = vm_get_register(vcpu, VM_REG_GUEST_RAX, &rax);
usr/src/cmd/bhyvectl/bhyvectl.c
459
error = vm_get_register(vcpu, VM_REG_GUEST_RBX, &rbx);
usr/src/cmd/bhyvectl/bhyvectl.c
465
error = vm_get_register(vcpu, VM_REG_GUEST_RCX, &rcx);
usr/src/cmd/bhyvectl/bhyvectl.c
471
error = vm_get_register(vcpu, VM_REG_GUEST_RDX, &rdx);
usr/src/cmd/bhyvectl/bhyvectl.c
477
error = vm_get_register(vcpu, VM_REG_GUEST_RSI, &rsi);
usr/src/cmd/bhyvectl/bhyvectl.c
483
error = vm_get_register(vcpu, VM_REG_GUEST_RDI, &rdi);
usr/src/cmd/bhyvectl/bhyvectl.c
489
error = vm_get_register(vcpu, VM_REG_GUEST_RBP, &rbp);
usr/src/cmd/bhyvectl/bhyvectl.c
495
error = vm_get_register(vcpu, VM_REG_GUEST_R8, &r8);
usr/src/cmd/bhyvectl/bhyvectl.c
501
error = vm_get_register(vcpu, VM_REG_GUEST_R9, &r9);
usr/src/cmd/bhyvectl/bhyvectl.c
507
error = vm_get_register(vcpu, VM_REG_GUEST_R10, &r10);
usr/src/cmd/bhyvectl/bhyvectl.c
513
error = vm_get_register(vcpu, VM_REG_GUEST_R11, &r11);
usr/src/cmd/bhyvectl/bhyvectl.c
519
error = vm_get_register(vcpu, VM_REG_GUEST_R12, &r12);
usr/src/cmd/bhyvectl/bhyvectl.c
525
error = vm_get_register(vcpu, VM_REG_GUEST_R13, &r13);
usr/src/cmd/bhyvectl/bhyvectl.c
531
error = vm_get_register(vcpu, VM_REG_GUEST_R14, &r14);
usr/src/cmd/bhyvectl/bhyvectl.c
537
error = vm_get_register(vcpu, VM_REG_GUEST_R15, &r15);
usr/src/cmd/bhyvectl/bhyvectl.c
543
error = vm_get_register(vcpu, VM_REG_GUEST_RFLAGS,
usr/src/cmd/bhyvectl/bhyvectl.c
553
get_all_segments(struct vcpu *vcpu)
usr/src/cmd/bhyvectl/bhyvectl.c
556
int vcpuid = vcpu_id(vcpu);
usr/src/cmd/bhyvectl/bhyvectl.c
560
error = vm_get_desc(vcpu, VM_REG_GUEST_DS,
usr/src/cmd/bhyvectl/bhyvectl.c
569
error = vm_get_desc(vcpu, VM_REG_GUEST_ES,
usr/src/cmd/bhyvectl/bhyvectl.c
578
error = vm_get_desc(vcpu, VM_REG_GUEST_FS,
usr/src/cmd/bhyvectl/bhyvectl.c
587
error = vm_get_desc(vcpu, VM_REG_GUEST_GS,
usr/src/cmd/bhyvectl/bhyvectl.c
596
error = vm_get_desc(vcpu, VM_REG_GUEST_SS,
usr/src/cmd/bhyvectl/bhyvectl.c
605
error = vm_get_desc(vcpu, VM_REG_GUEST_CS,
usr/src/cmd/bhyvectl/bhyvectl.c
614
error = vm_get_desc(vcpu, VM_REG_GUEST_TR,
usr/src/cmd/bhyvectl/bhyvectl.c
623
error = vm_get_desc(vcpu, VM_REG_GUEST_LDTR,
usr/src/cmd/bhyvectl/bhyvectl.c
632
error = vm_get_desc(vcpu, VM_REG_GUEST_GDTR,
usr/src/cmd/bhyvectl/bhyvectl.c
641
error = vm_get_desc(vcpu, VM_REG_GUEST_IDTR,
usr/src/cmd/bhyvectl/bhyvectl.c
650
error = vm_get_register(vcpu, VM_REG_GUEST_CS, &cs);
usr/src/cmd/bhyvectl/bhyvectl.c
656
error = vm_get_register(vcpu, VM_REG_GUEST_DS, &ds);
usr/src/cmd/bhyvectl/bhyvectl.c
662
error = vm_get_register(vcpu, VM_REG_GUEST_ES, &es);
usr/src/cmd/bhyvectl/bhyvectl.c
668
error = vm_get_register(vcpu, VM_REG_GUEST_FS, &fs);
usr/src/cmd/bhyvectl/bhyvectl.c
674
error = vm_get_register(vcpu, VM_REG_GUEST_GS, &gs);
usr/src/cmd/bhyvectl/bhyvectl.c
680
error = vm_get_register(vcpu, VM_REG_GUEST_SS, &ss);
usr/src/cmd/bhyvectl/bhyvectl.c
686
error = vm_get_register(vcpu, VM_REG_GUEST_TR, &tr);
usr/src/cmd/bhyvectl/bhyvectl.c
692
error = vm_get_register(vcpu, VM_REG_GUEST_LDTR, &ldtr);
usr/src/cmd/bhyvectl/bhyvectl.c
701
get_misc_vmcs(struct vcpu *vcpu)
usr/src/cmd/bhyvectl/bhyvectl.c
704
int vcpuid = vcpu_id(vcpu);
usr/src/cmd/bhyvectl/bhyvectl.c
709
error = vm_get_vmcs_field(vcpu, VMCS_CR0_MASK, &cr0mask);
usr/src/cmd/bhyvectl/bhyvectl.c
716
error = vm_get_vmcs_field(vcpu, VMCS_CR0_SHADOW,
usr/src/cmd/bhyvectl/bhyvectl.c
726
error = vm_get_vmcs_field(vcpu, VMCS_CR4_MASK, &cr4mask);
usr/src/cmd/bhyvectl/bhyvectl.c
735
error = vm_get_vmcs_field(vcpu, VMCS_CR4_SHADOW,
usr/src/cmd/bhyvectl/bhyvectl.c
743
error = vm_get_vmcs_field(vcpu, VMCS_CR3_TARGET_COUNT,
usr/src/cmd/bhyvectl/bhyvectl.c
750
error = vm_get_vmcs_field(vcpu, VMCS_CR3_TARGET0,
usr/src/cmd/bhyvectl/bhyvectl.c
757
error = vm_get_vmcs_field(vcpu, VMCS_CR3_TARGET1,
usr/src/cmd/bhyvectl/bhyvectl.c
764
error = vm_get_vmcs_field(vcpu, VMCS_CR3_TARGET2,
usr/src/cmd/bhyvectl/bhyvectl.c
771
error = vm_get_vmcs_field(vcpu, VMCS_CR3_TARGET3,
usr/src/cmd/bhyvectl/bhyvectl.c
780
error = vm_get_vmcs_field(vcpu, VMCS_PIN_BASED_CTLS, &ctl);
usr/src/cmd/bhyvectl/bhyvectl.c
786
error = vm_get_vmcs_field(vcpu,
usr/src/cmd/bhyvectl/bhyvectl.c
793
error = vm_get_vmcs_field(vcpu,
usr/src/cmd/bhyvectl/bhyvectl.c
800
error = vm_get_vmcs_field(vcpu,
usr/src/cmd/bhyvectl/bhyvectl.c
807
error = vm_get_vmcs_field(vcpu,
usr/src/cmd/bhyvectl/bhyvectl.c
815
error = vm_get_vmcs_field(vcpu, VMCS_ENTRY_INTR_INFO,&u64);
usr/src/cmd/bhyvectl/bhyvectl.c
824
error = vm_get_vmcs_field(vcpu, VMCS_TPR_THRESHOLD,
usr/src/cmd/bhyvectl/bhyvectl.c
832
error = vm_get_vmcs_field(vcpu, VMCS_INSTRUCTION_ERROR,
usr/src/cmd/bhyvectl/bhyvectl.c
841
error = vm_get_vmcs_field(vcpu, VMCS_EXIT_CTLS, &ctl);
usr/src/cmd/bhyvectl/bhyvectl.c
847
error = vm_get_vmcs_field(vcpu, VMCS_ENTRY_CTLS, &ctl);
usr/src/cmd/bhyvectl/bhyvectl.c
853
error = vm_get_vmcs_field(vcpu, VMCS_HOST_IA32_PAT, &pat);
usr/src/cmd/bhyvectl/bhyvectl.c
859
error = vm_get_vmcs_field(vcpu, VMCS_HOST_CR0, &cr0);
usr/src/cmd/bhyvectl/bhyvectl.c
865
error = vm_get_vmcs_field(vcpu, VMCS_HOST_CR3, &cr3);
usr/src/cmd/bhyvectl/bhyvectl.c
871
error = vm_get_vmcs_field(vcpu, VMCS_HOST_CR4, &cr4);
usr/src/cmd/bhyvectl/bhyvectl.c
877
error = vm_get_vmcs_field(vcpu, VMCS_HOST_RIP, &rip);
usr/src/cmd/bhyvectl/bhyvectl.c
883
error = vm_get_vmcs_field(vcpu, VMCS_HOST_RSP, &rsp);
usr/src/cmd/bhyvectl/bhyvectl.c
889
error = vm_get_vmcs_field(vcpu, VMCS_LINK_POINTER, &addr);
usr/src/cmd/bhyvectl/bhyvectl.c
895
error = vm_get_vmcs_field(vcpu, VMCS_EXIT_INTR_INFO, &u64);
usr/src/cmd/bhyvectl/bhyvectl.c
903
error = vm_get_vmcs_field(vcpu, VMCS_EXIT_INTR_ERRCODE,
usr/src/cmd/bhyvectl/bhyvectl.c
912
error = vm_get_vmcs_field(vcpu,
usr/src/cmd/bhyvectl/bhyvectl.c
921
error = vm_get_vmcs_field(vcpu,
usr/src/cmd/bhyvectl/bhyvectl.c
929
error = vm_get_vmcs_field(vcpu, VMCS_EXIT_QUALIFICATION,
usr/src/cmd/bhyvectl/bhyvectl.c
940
get_misc_vmcb(struct vcpu *vcpu)
usr/src/cmd/bhyvectl/bhyvectl.c
943
int vcpuid = vcpu_id(vcpu);
usr/src/cmd/bhyvectl/bhyvectl.c
947
error = vm_get_vmcb_field(vcpu, VMCB_OFF_CR_INTERCEPT, 4,
usr/src/cmd/bhyvectl/bhyvectl.c
952
error = vm_get_vmcb_field(vcpu, VMCB_OFF_DR_INTERCEPT, 4,
usr/src/cmd/bhyvectl/bhyvectl.c
957
error = vm_get_vmcb_field(vcpu, VMCB_OFF_EXC_INTERCEPT, 4,
usr/src/cmd/bhyvectl/bhyvectl.c
962
error = vm_get_vmcb_field(vcpu, VMCB_OFF_INST1_INTERCEPT,
usr/src/cmd/bhyvectl/bhyvectl.c
967
error = vm_get_vmcb_field(vcpu, VMCB_OFF_INST2_INTERCEPT,
usr/src/cmd/bhyvectl/bhyvectl.c
974
error = vm_get_vmcb_field(vcpu, VMCB_OFF_TLB_CTRL,
usr/src/cmd/bhyvectl/bhyvectl.c
981
error = vm_get_vmcb_field(vcpu, VMCB_OFF_EXITINFO1,
usr/src/cmd/bhyvectl/bhyvectl.c
985
error = vm_get_vmcb_field(vcpu, VMCB_OFF_EXITINFO2,
usr/src/cmd/bhyvectl/bhyvectl.c
989
error = vm_get_vmcb_field(vcpu, VMCB_OFF_EXITINTINFO,
usr/src/cmd/bhyvectl/bhyvectl.c
996
error = vm_get_vmcb_field(vcpu, VMCB_OFF_VIRQ,
usr/src/cmd/mdb/i86xpv/modules/xpv/xpv.c
62
struct vcpu *vcpu[MAX_VIRT_CPUS];
usr/src/cmd/mdb/intel/mdb/mdb_bhyve.c
354
const mdb_arg_t *argv, int vcpu, mdb_stack_frame_flags_t sflags)
usr/src/cmd/mdb/intel/mdb/mdb_bhyve.c
365
if (vcpu == -1) {
usr/src/cmd/mdb/intel/mdb/mdb_bhyve.c
366
vcpu = bd->bd_curcpu;
usr/src/cmd/mdb/intel/mdb/mdb_bhyve.c
367
} else if (vcpu >= vmm_ncpu(bd->bd_vmm)) {
usr/src/cmd/mdb/intel/mdb/mdb_bhyve.c
375
} else if (bhyve_get_gregset(bd, vcpu, &gregs) != 0)
usr/src/cmd/mdb/intel/mdb/mdb_bhyve.c
393
switch (vmm_vcpu_isa(bd->bd_vmm, vcpu)) {
usr/src/cmd/mdb/intel/mdb/mdb_bhyve.c
404
mdb_warn("CPU %d mode unknown", vcpu);
usr/src/cmd/mdb/intel/modules/mdb_kb/mdb_kb.c
1506
struct vcpu_guest_context *vcpu;
usr/src/cmd/mdb/intel/modules/mdb_kb/mdb_kb.c
1517
vcpu = xkb->xkb_vcpus[cpu];
usr/src/cmd/mdb/intel/modules/mdb_kb/mdb_kb.c
1518
ur = &vcpu->user_regs;
usr/src/cmd/mdb/intel/modules/mdb_kb/mdb_kb.c
1566
bcopy(&vcpu->ctrlreg, &mregs->pm_cr, 8 * sizeof (ulong_t));
usr/src/cmd/mdb/intel/modules/mdb_kb/mdb_kb.c
1567
bcopy(&vcpu->debugreg, &mregs->pm_dr, 8 * sizeof (ulong_t));
usr/src/cmd/psrinfo/psrinfo.c
246
struct vcpu *vcpu;
usr/src/cmd/psrinfo/psrinfo.c
256
vcpu = chip->p_vcpus->l_ptr;
usr/src/cmd/psrinfo/psrinfo.c
283
if (strlen(vcpu->v_impl)) {
usr/src/cmd/psrinfo/psrinfo.c
284
(void) printf(" %s\n", vcpu->v_impl);
usr/src/cmd/psrinfo/psrinfo.c
286
if (((len = strlen(vcpu->v_brand)) != 0) &&
usr/src/cmd/psrinfo/psrinfo.c
287
(strncmp(vcpu->v_brand, vcpu->v_impl, len) != 0))
usr/src/cmd/psrinfo/psrinfo.c
288
(void) printf("\t%s", vcpu->v_brand);
usr/src/cmd/psrinfo/psrinfo.c
300
if (strlen(vcpu->v_impl)) {
usr/src/cmd/psrinfo/psrinfo.c
301
(void) printf(" %s\n", vcpu->v_impl);
usr/src/cmd/psrinfo/psrinfo.c
303
if (((len = strlen(vcpu->v_brand)) != 0) &&
usr/src/cmd/psrinfo/psrinfo.c
304
(strncmp(vcpu->v_brand, vcpu->v_impl, len) != 0))
usr/src/cmd/psrinfo/psrinfo.c
305
(void) printf(" %s", vcpu->v_brand);
usr/src/cmd/psrinfo/psrinfo.c
307
if (strcmp(vcpu->v_socket, "Unknown") != 0) {
usr/src/cmd/psrinfo/psrinfo.c
309
vcpu->v_socket);
usr/src/cmd/psrinfo/psrinfo.c
320
struct vcpu *v;
usr/src/cmd/psrinfo/psrinfo.c
354
struct vcpu *v = l->l_ptr;
usr/src/cmd/psrinfo/psrinfo.c
378
struct vcpu *v = l2->l_ptr;
usr/src/cmd/psrinfo/psrinfo.c
395
struct vcpu *v = l->l_ptr;
usr/src/cmd/psrinfo/psrinfo.c
441
struct vcpu *v;
usr/src/cmd/psrinfo/psrinfo.c
530
struct vcpu *v = l->l_ptr;
usr/src/cmd/psrinfo/psrinfo.c
544
struct vcpu *vc;
usr/src/cmd/psrinfo/psrinfo.c
585
vc = zalloc(sizeof (struct vcpu));
usr/src/cmd/psrinfo/psrinfo.c
786
((struct vcpu *)l->l_ptr)->v_doit = 1;
usr/src/cmd/psrinfo/psrinfo.c
787
((struct vcpu *)l->l_ptr)->v_pchip->p_doit = 1;
usr/src/cmd/psrinfo/psrinfo.c
788
((struct vcpu *)l->l_ptr)->v_core->c_doit = 1;
usr/src/lib/libvmm/libvmm.c
126
vmm->vmm_vcpu = calloc(vmm->vmm_ncpu, sizeof (struct vcpu *));
usr/src/lib/libvmm/libvmm.c
65
struct vcpu **vmm_vcpu;
usr/src/lib/libvmmapi/common/vmmapi.c
1001
exc.cpuid = vcpu->vcpuid;
usr/src/lib/libvmmapi/common/vmmapi.c
1006
error = vcpu_ioctl(vcpu, VM_INJECT_EXCEPTION, &exc);
usr/src/lib/libvmmapi/common/vmmapi.c
1023
vm_lapic_irq(struct vcpu *vcpu, int vector)
usr/src/lib/libvmmapi/common/vmmapi.c
1028
vmirq.cpuid = vcpu->vcpuid;
usr/src/lib/libvmmapi/common/vmmapi.c
1031
return (vcpu_ioctl(vcpu, VM_LAPIC_IRQ, &vmirq));
usr/src/lib/libvmmapi/common/vmmapi.c
1035
vm_lapic_local_irq(struct vcpu *vcpu, int vector)
usr/src/lib/libvmmapi/common/vmmapi.c
1040
vmirq.cpuid = vcpu->vcpuid;
usr/src/lib/libvmmapi/common/vmmapi.c
1043
return (vcpu_ioctl(vcpu, VM_LAPIC_LOCAL_IRQ, &vmirq));
usr/src/lib/libvmmapi/common/vmmapi.c
1099
vm_readwrite_kernemu_device(struct vcpu *vcpu, vm_paddr_t gpa,
usr/src/lib/libvmmapi/common/vmmapi.c
1103
.vcpuid = vcpu->vcpuid,
usr/src/lib/libvmmapi/common/vmmapi.c
1111
rc = vcpu_ioctl(vcpu, cmd, &irp);
usr/src/lib/libvmmapi/common/vmmapi.c
1167
vm_inject_nmi(struct vcpu *vcpu)
usr/src/lib/libvmmapi/common/vmmapi.c
1172
vmnmi.cpuid = vcpu->vcpuid;
usr/src/lib/libvmmapi/common/vmmapi.c
1174
return (vcpu_ioctl(vcpu, VM_INJECT_NMI, &vmnmi));
usr/src/lib/libvmmapi/common/vmmapi.c
1211
vm_get_capability(struct vcpu *vcpu, enum vm_cap_type cap,
usr/src/lib/libvmmapi/common/vmmapi.c
1218
vmcap.cpuid = vcpu->vcpuid;
usr/src/lib/libvmmapi/common/vmmapi.c
1221
error = vcpu_ioctl(vcpu, VM_GET_CAPABILITY, &vmcap);
usr/src/lib/libvmmapi/common/vmmapi.c
1227
vm_set_capability(struct vcpu *vcpu, enum vm_cap_type cap, int val)
usr/src/lib/libvmmapi/common/vmmapi.c
1232
vmcap.cpuid = vcpu->vcpuid;
usr/src/lib/libvmmapi/common/vmmapi.c
1236
return (vcpu_ioctl(vcpu, VM_SET_CAPABILITY, &vmcap));
usr/src/lib/libvmmapi/common/vmmapi.c
1447
vm_get_stats(struct vcpu *vcpu, struct timeval *ret_tv,
usr/src/lib/libvmmapi/common/vmmapi.c
1458
vmstats.cpuid = vcpu->vcpuid;
usr/src/lib/libvmmapi/common/vmmapi.c
1462
if (vcpu_ioctl(vcpu, VM_STATS_IOC, &vmstats) != 0)
usr/src/lib/libvmmapi/common/vmmapi.c
1506
vm_get_x2apic_state(struct vcpu *vcpu, enum x2apic_state *state)
usr/src/lib/libvmmapi/common/vmmapi.c
1512
x2apic.cpuid = vcpu->vcpuid;
usr/src/lib/libvmmapi/common/vmmapi.c
1514
error = vcpu_ioctl(vcpu, VM_GET_X2APIC_STATE, &x2apic);
usr/src/lib/libvmmapi/common/vmmapi.c
1520
vm_set_x2apic_state(struct vcpu *vcpu, enum x2apic_state state)
usr/src/lib/libvmmapi/common/vmmapi.c
1526
x2apic.cpuid = vcpu->vcpuid;
usr/src/lib/libvmmapi/common/vmmapi.c
1529
error = vcpu_ioctl(vcpu, VM_SET_X2APIC_STATE, &x2apic);
usr/src/lib/libvmmapi/common/vmmapi.c
1536
vcpu_reset(struct vcpu *vcpu)
usr/src/lib/libvmmapi/common/vmmapi.c
1540
vvr.vcpuid = vcpu->vcpuid;
usr/src/lib/libvmmapi/common/vmmapi.c
1543
return (vcpu_ioctl(vcpu, VM_RESET_CPU, &vvr));
usr/src/lib/libvmmapi/common/vmmapi.c
1551
vcpu_reset(struct vcpu *vcpu)
usr/src/lib/libvmmapi/common/vmmapi.c
1561
error = vm_set_register(vcpu, VM_REG_GUEST_RFLAGS, rflags);
usr/src/lib/libvmmapi/common/vmmapi.c
1566
if ((error = vm_set_register(vcpu, VM_REG_GUEST_RIP, rip)) != 0)
usr/src/lib/libvmmapi/common/vmmapi.c
1575
if ((error = vm_set_register(vcpu, VM_REG_GUEST_CR0, cr0)) != 0)
usr/src/lib/libvmmapi/common/vmmapi.c
1578
if ((error = vm_set_register(vcpu, VM_REG_GUEST_CR2, zero)) != 0)
usr/src/lib/libvmmapi/common/vmmapi.c
1581
if ((error = vm_set_register(vcpu, VM_REG_GUEST_CR3, zero)) != 0)
usr/src/lib/libvmmapi/common/vmmapi.c
1585
if ((error = vm_set_register(vcpu, VM_REG_GUEST_CR4, cr4)) != 0)
usr/src/lib/libvmmapi/common/vmmapi.c
1594
error = vm_set_desc(vcpu, VM_REG_GUEST_CS,
usr/src/lib/libvmmapi/common/vmmapi.c
1600
if ((error = vm_set_register(vcpu, VM_REG_GUEST_CS, sel)) != 0)
usr/src/lib/libvmmapi/common/vmmapi.c
1609
error = vm_set_desc(vcpu, VM_REG_GUEST_SS,
usr/src/lib/libvmmapi/common/vmmapi.c
1614
error = vm_set_desc(vcpu, VM_REG_GUEST_DS,
usr/src/lib/libvmmapi/common/vmmapi.c
1619
error = vm_set_desc(vcpu, VM_REG_GUEST_ES,
usr/src/lib/libvmmapi/common/vmmapi.c
1624
error = vm_set_desc(vcpu, VM_REG_GUEST_FS,
usr/src/lib/libvmmapi/common/vmmapi.c
1629
error = vm_set_desc(vcpu, VM_REG_GUEST_GS,
usr/src/lib/libvmmapi/common/vmmapi.c
1635
if ((error = vm_set_register(vcpu, VM_REG_GUEST_SS, sel)) != 0)
usr/src/lib/libvmmapi/common/vmmapi.c
1637
if ((error = vm_set_register(vcpu, VM_REG_GUEST_DS, sel)) != 0)
usr/src/lib/libvmmapi/common/vmmapi.c
1639
if ((error = vm_set_register(vcpu, VM_REG_GUEST_ES, sel)) != 0)
usr/src/lib/libvmmapi/common/vmmapi.c
1641
if ((error = vm_set_register(vcpu, VM_REG_GUEST_FS, sel)) != 0)
usr/src/lib/libvmmapi/common/vmmapi.c
1643
if ((error = vm_set_register(vcpu, VM_REG_GUEST_GS, sel)) != 0)
usr/src/lib/libvmmapi/common/vmmapi.c
1646
if ((error = vm_set_register(vcpu, VM_REG_GUEST_EFER, zero)) != 0)
usr/src/lib/libvmmapi/common/vmmapi.c
1651
if ((error = vm_set_register(vcpu, VM_REG_GUEST_RAX, zero)) != 0)
usr/src/lib/libvmmapi/common/vmmapi.c
1653
if ((error = vm_set_register(vcpu, VM_REG_GUEST_RBX, zero)) != 0)
usr/src/lib/libvmmapi/common/vmmapi.c
1655
if ((error = vm_set_register(vcpu, VM_REG_GUEST_RCX, zero)) != 0)
usr/src/lib/libvmmapi/common/vmmapi.c
1657
if ((error = vm_set_register(vcpu, VM_REG_GUEST_RDX, rdx)) != 0)
usr/src/lib/libvmmapi/common/vmmapi.c
1659
if ((error = vm_set_register(vcpu, VM_REG_GUEST_RSI, zero)) != 0)
usr/src/lib/libvmmapi/common/vmmapi.c
1661
if ((error = vm_set_register(vcpu, VM_REG_GUEST_RDI, zero)) != 0)
usr/src/lib/libvmmapi/common/vmmapi.c
1663
if ((error = vm_set_register(vcpu, VM_REG_GUEST_RBP, zero)) != 0)
usr/src/lib/libvmmapi/common/vmmapi.c
1665
if ((error = vm_set_register(vcpu, VM_REG_GUEST_RSP, zero)) != 0)
usr/src/lib/libvmmapi/common/vmmapi.c
1667
if ((error = vm_set_register(vcpu, VM_REG_GUEST_R8, zero)) != 0)
usr/src/lib/libvmmapi/common/vmmapi.c
1669
if ((error = vm_set_register(vcpu, VM_REG_GUEST_R9, zero)) != 0)
usr/src/lib/libvmmapi/common/vmmapi.c
1671
if ((error = vm_set_register(vcpu, VM_REG_GUEST_R10, zero)) != 0)
usr/src/lib/libvmmapi/common/vmmapi.c
1673
if ((error = vm_set_register(vcpu, VM_REG_GUEST_R11, zero)) != 0)
usr/src/lib/libvmmapi/common/vmmapi.c
1675
if ((error = vm_set_register(vcpu, VM_REG_GUEST_R12, zero)) != 0)
usr/src/lib/libvmmapi/common/vmmapi.c
1677
if ((error = vm_set_register(vcpu, VM_REG_GUEST_R13, zero)) != 0)
usr/src/lib/libvmmapi/common/vmmapi.c
1679
if ((error = vm_set_register(vcpu, VM_REG_GUEST_R14, zero)) != 0)
usr/src/lib/libvmmapi/common/vmmapi.c
1681
if ((error = vm_set_register(vcpu, VM_REG_GUEST_R15, zero)) != 0)
usr/src/lib/libvmmapi/common/vmmapi.c
1688
error = vm_set_desc(vcpu, VM_REG_GUEST_GDTR,
usr/src/lib/libvmmapi/common/vmmapi.c
1693
error = vm_set_desc(vcpu, VM_REG_GUEST_IDTR,
usr/src/lib/libvmmapi/common/vmmapi.c
1702
error = vm_set_desc(vcpu, VM_REG_GUEST_TR, 0, 0, desc_access);
usr/src/lib/libvmmapi/common/vmmapi.c
1707
if ((error = vm_set_register(vcpu, VM_REG_GUEST_TR, sel)) != 0)
usr/src/lib/libvmmapi/common/vmmapi.c
1714
error = vm_set_desc(vcpu, VM_REG_GUEST_LDTR, desc_base,
usr/src/lib/libvmmapi/common/vmmapi.c
1720
if ((error = vm_set_register(vcpu, VM_REG_GUEST_LDTR, 0)) != 0)
usr/src/lib/libvmmapi/common/vmmapi.c
1723
if ((error = vm_set_register(vcpu, VM_REG_GUEST_DR6,
usr/src/lib/libvmmapi/common/vmmapi.c
1726
if ((error = vm_set_register(vcpu, VM_REG_GUEST_DR7, 0x400)) !=
usr/src/lib/libvmmapi/common/vmmapi.c
1730
if ((error = vm_set_register(vcpu, VM_REG_GUEST_INTR_SHADOW,
usr/src/lib/libvmmapi/common/vmmapi.c
1774
vm_gla2gpa(struct vcpu *vcpu, struct vm_guest_paging *paging,
usr/src/lib/libvmmapi/common/vmmapi.c
1781
gg.vcpuid = vcpu->vcpuid;
usr/src/lib/libvmmapi/common/vmmapi.c
1786
error = vcpu_ioctl(vcpu, VM_GLA2GPA, &gg);
usr/src/lib/libvmmapi/common/vmmapi.c
1795
vm_gla2gpa_nofault(struct vcpu *vcpu, struct vm_guest_paging *paging,
usr/src/lib/libvmmapi/common/vmmapi.c
1802
gg.vcpuid = vcpu->vcpuid;
usr/src/lib/libvmmapi/common/vmmapi.c
1807
error = vcpu_ioctl(vcpu, VM_GLA2GPA_NOFAULT, &gg);
usr/src/lib/libvmmapi/common/vmmapi.c
1820
vm_copy_setup(struct vcpu *vcpu, struct vm_guest_paging *paging,
usr/src/lib/libvmmapi/common/vmmapi.c
1835
error = vm_gla2gpa(vcpu, paging, gla, prot, &gpa, fault);
usr/src/lib/libvmmapi/common/vmmapi.c
1842
va = vm_map_gpa(vcpu->ctx, gpa, n);
usr/src/lib/libvmmapi/common/vmmapi.c
1946
vm_activate_cpu(struct vcpu *vcpu)
usr/src/lib/libvmmapi/common/vmmapi.c
1952
ac.vcpuid = vcpu->vcpuid;
usr/src/lib/libvmmapi/common/vmmapi.c
1953
error = vcpu_ioctl(vcpu, VM_ACTIVATE_CPU, &ac);
usr/src/lib/libvmmapi/common/vmmapi.c
1970
vm_suspend_cpu(struct vcpu *vcpu)
usr/src/lib/libvmmapi/common/vmmapi.c
1976
ac.vcpuid = vcpu->vcpuid;
usr/src/lib/libvmmapi/common/vmmapi.c
1977
error = vcpu_ioctl(vcpu, VM_SUSPEND_CPU, &ac);
usr/src/lib/libvmmapi/common/vmmapi.c
1982
vm_resume_cpu(struct vcpu *vcpu)
usr/src/lib/libvmmapi/common/vmmapi.c
1988
ac.vcpuid = vcpu->vcpuid;
usr/src/lib/libvmmapi/common/vmmapi.c
1989
error = vcpu_ioctl(vcpu, VM_RESUME_CPU, &ac);
usr/src/lib/libvmmapi/common/vmmapi.c
2006
vm_get_intinfo(struct vcpu *vcpu, uint64_t *info1, uint64_t *info2)
usr/src/lib/libvmmapi/common/vmmapi.c
2012
vmii.vcpuid = vcpu->vcpuid;
usr/src/lib/libvmmapi/common/vmmapi.c
2013
error = vcpu_ioctl(vcpu, VM_GET_INTINFO, &vmii);
usr/src/lib/libvmmapi/common/vmmapi.c
2022
vm_set_intinfo(struct vcpu *vcpu, uint64_t info1)
usr/src/lib/libvmmapi/common/vmmapi.c
2028
vmii.vcpuid = vcpu->vcpuid;
usr/src/lib/libvmmapi/common/vmmapi.c
2030
error = vcpu_ioctl(vcpu, VM_SET_INTINFO, &vmii);
usr/src/lib/libvmmapi/common/vmmapi.c
2103
vm_restart_instruction(void *ctxp, int vcpu __unused)
usr/src/lib/libvmmapi/common/vmmapi.c
2213
vm_get_run_state(struct vcpu *vcpu, enum vcpu_run_state *state,
usr/src/lib/libvmmapi/common/vmmapi.c
2218
data.vcpuid = vcpu->vcpuid;
usr/src/lib/libvmmapi/common/vmmapi.c
2219
if (vcpu_ioctl(vcpu, VM_GET_RUN_STATE, &data) != 0) {
usr/src/lib/libvmmapi/common/vmmapi.c
2229
vm_set_run_state(struct vcpu *vcpu, enum vcpu_run_state state,
usr/src/lib/libvmmapi/common/vmmapi.c
2234
data.vcpuid = vcpu->vcpuid;
usr/src/lib/libvmmapi/common/vmmapi.c
2237
if (vcpu_ioctl(vcpu, VM_SET_RUN_STATE, &data) != 0) {
usr/src/lib/libvmmapi/common/vmmapi.c
2245
vm_vcpu_barrier(struct vcpu *vcpu)
usr/src/lib/libvmmapi/common/vmmapi.c
2247
if (ioctl(vcpu->ctx->fd, VM_VCPU_BARRIER, vcpu->vcpuid) != 0) {
usr/src/lib/libvmmapi/common/vmmapi.c
249
struct vcpu *
usr/src/lib/libvmmapi/common/vmmapi.c
252
struct vcpu *vcpu;
usr/src/lib/libvmmapi/common/vmmapi.c
254
vcpu = malloc(sizeof(*vcpu));
usr/src/lib/libvmmapi/common/vmmapi.c
256
if (vcpu == NULL)
usr/src/lib/libvmmapi/common/vmmapi.c
257
return (vcpu);
usr/src/lib/libvmmapi/common/vmmapi.c
259
vcpu->ctx = ctx;
usr/src/lib/libvmmapi/common/vmmapi.c
260
vcpu->vcpuid = vcpuid;
usr/src/lib/libvmmapi/common/vmmapi.c
261
return (vcpu);
usr/src/lib/libvmmapi/common/vmmapi.c
265
vm_vcpu_close(struct vcpu *vcpu)
usr/src/lib/libvmmapi/common/vmmapi.c
267
free(vcpu);
usr/src/lib/libvmmapi/common/vmmapi.c
271
vcpu_id(struct vcpu *vcpu)
usr/src/lib/libvmmapi/common/vmmapi.c
273
return (vcpu->vcpuid);
usr/src/lib/libvmmapi/common/vmmapi.c
277
vcpu_ctx(struct vcpu *vcpu)
usr/src/lib/libvmmapi/common/vmmapi.c
279
return (vcpu->ctx);
usr/src/lib/libvmmapi/common/vmmapi.c
779
vcpu_ioctl(struct vcpu *vcpu, u_long cmd, void *arg)
usr/src/lib/libvmmapi/common/vmmapi.c
786
*(int *)arg = vcpu->vcpuid;
usr/src/lib/libvmmapi/common/vmmapi.c
787
return (ioctl(vcpu->cfx->fd, cmd, arg));
usr/src/lib/libvmmapi/common/vmmapi.c
796
vcpu_ioctl(struct vcpu *vcpu, u_long cmd, void *arg)
usr/src/lib/libvmmapi/common/vmmapi.c
798
return (ioctl(vcpu->ctx->fd, cmd, arg));
usr/src/lib/libvmmapi/common/vmmapi.c
803
vm_set_desc(struct vcpu *vcpu, int reg,
usr/src/lib/libvmmapi/common/vmmapi.c
810
vmsegdesc.cpuid = vcpu->vcpuid;
usr/src/lib/libvmmapi/common/vmmapi.c
816
error = vcpu_ioctl(vcpu, VM_SET_SEGMENT_DESCRIPTOR, &vmsegdesc);
usr/src/lib/libvmmapi/common/vmmapi.c
821
vm_get_desc(struct vcpu *vcpu, int reg, uint64_t *base, uint32_t *limit,
usr/src/lib/libvmmapi/common/vmmapi.c
828
vmsegdesc.cpuid = vcpu->vcpuid;
usr/src/lib/libvmmapi/common/vmmapi.c
831
error = vcpu_ioctl(vcpu, VM_GET_SEGMENT_DESCRIPTOR, &vmsegdesc);
usr/src/lib/libvmmapi/common/vmmapi.c
841
vm_get_seg_desc(struct vcpu *vcpu, int reg, struct seg_desc *seg_desc)
usr/src/lib/libvmmapi/common/vmmapi.c
845
error = vm_get_desc(vcpu, reg, &seg_desc->base, &seg_desc->limit,
usr/src/lib/libvmmapi/common/vmmapi.c
851
vm_set_register(struct vcpu *vcpu, int reg, uint64_t val)
usr/src/lib/libvmmapi/common/vmmapi.c
857
vmreg.cpuid = vcpu->vcpuid;
usr/src/lib/libvmmapi/common/vmmapi.c
861
error = vcpu_ioctl(vcpu, VM_SET_REGISTER, &vmreg);
usr/src/lib/libvmmapi/common/vmmapi.c
866
vm_get_register(struct vcpu *vcpu, int reg, uint64_t *ret_val)
usr/src/lib/libvmmapi/common/vmmapi.c
872
vmreg.cpuid = vcpu->vcpuid;
usr/src/lib/libvmmapi/common/vmmapi.c
875
error = vcpu_ioctl(vcpu, VM_GET_REGISTER, &vmreg);
usr/src/lib/libvmmapi/common/vmmapi.c
881
vm_set_register_set(struct vcpu *vcpu, unsigned int count,
usr/src/lib/libvmmapi/common/vmmapi.c
888
vmregset.cpuid = vcpu->vcpuid;
usr/src/lib/libvmmapi/common/vmmapi.c
893
error = vcpu_ioctl(vcpu, VM_SET_REGISTER_SET, &vmregset);
usr/src/lib/libvmmapi/common/vmmapi.c
898
vm_get_register_set(struct vcpu *vcpu, unsigned int count,
usr/src/lib/libvmmapi/common/vmmapi.c
905
vmregset.cpuid = vcpu->vcpuid;
usr/src/lib/libvmmapi/common/vmmapi.c
910
error = vcpu_ioctl(vcpu, VM_GET_REGISTER_SET, &vmregset);
usr/src/lib/libvmmapi/common/vmmapi.c
916
vm_run(struct vcpu *vcpu, struct vm_exit *vmexit)
usr/src/lib/libvmmapi/common/vmmapi.c
923
error = vcpu_ioctl(vcpu, VM_RUN, &vmrun);
usr/src/lib/libvmmapi/common/vmmapi.c
929
vm_run(struct vcpu *vcpu, const struct vm_entry *vm_entry,
usr/src/lib/libvmmapi/common/vmmapi.c
935
entry.cpuid = vcpu->vcpuid;
usr/src/lib/libvmmapi/common/vmmapi.c
938
return (vcpu_ioctl(vcpu, VM_RUN, &entry));
usr/src/lib/libvmmapi/common/vmmapi.c
979
vm_inject_exception(struct vcpu *vcpu, int vector, int errcode_valid,
usr/src/lib/libvmmapi/common/vmmapi.c
984
exc.cpuid = vcpu->vcpuid;
usr/src/lib/libvmmapi/common/vmmapi.c
990
return (vcpu_ioctl(vcpu, VM_INJECT_EXCEPTION, &exc));
usr/src/lib/libvmmapi/common/vmmapi.c
995
vm_inject_fault(struct vcpu *vcpu, int vector, int errcode_valid,
usr/src/lib/libvmmapi/common/vmmapi.h
158
struct vcpu *vm_vcpu_open(struct vmctx *ctx, int vcpuid);
usr/src/lib/libvmmapi/common/vmmapi.h
159
void vm_vcpu_close(struct vcpu *vcpu);
usr/src/lib/libvmmapi/common/vmmapi.h
160
int vcpu_id(struct vcpu *vcpu);
usr/src/lib/libvmmapi/common/vmmapi.h
162
struct vmctx *vcpu_ctx(struct vcpu *vcpu);
usr/src/lib/libvmmapi/common/vmmapi.h
172
int vm_gla2gpa(struct vcpu *vcpu, struct vm_guest_paging *paging,
usr/src/lib/libvmmapi/common/vmmapi.h
174
int vm_gla2gpa_nofault(struct vcpu *vcpu,
usr/src/lib/libvmmapi/common/vmmapi.h
185
int vm_set_desc(struct vcpu *vcpu, int reg,
usr/src/lib/libvmmapi/common/vmmapi.h
187
int vm_get_desc(struct vcpu *vcpu, int reg,
usr/src/lib/libvmmapi/common/vmmapi.h
189
int vm_get_seg_desc(struct vcpu *vcpu, int reg, struct seg_desc *seg_desc);
usr/src/lib/libvmmapi/common/vmmapi.h
190
int vm_set_register(struct vcpu *vcpu, int reg, uint64_t val);
usr/src/lib/libvmmapi/common/vmmapi.h
191
int vm_get_register(struct vcpu *vcpu, int reg, uint64_t *retval);
usr/src/lib/libvmmapi/common/vmmapi.h
192
int vm_set_register_set(struct vcpu *vcpu, unsigned int count,
usr/src/lib/libvmmapi/common/vmmapi.h
194
int vm_get_register_set(struct vcpu *vcpu, unsigned int count,
usr/src/lib/libvmmapi/common/vmmapi.h
197
int vm_run(struct vcpu *vcpu, struct vm_exit *ret_vmexit);
usr/src/lib/libvmmapi/common/vmmapi.h
199
int vm_run(struct vcpu *vcpu, const struct vm_entry *vm_entry,
usr/src/lib/libvmmapi/common/vmmapi.h
209
int vm_inject_exception(struct vcpu *vcpu, int vector,
usr/src/lib/libvmmapi/common/vmmapi.h
212
void vm_inject_fault(struct vcpu *vcpu, int vector,
usr/src/lib/libvmmapi/common/vmmapi.h
216
vm_inject_gp(struct vcpu *vcpu)
usr/src/lib/libvmmapi/common/vmmapi.h
218
vm_inject_fault(vcpu, IDT_GP, 1, 0);
usr/src/lib/libvmmapi/common/vmmapi.h
222
vm_inject_ac(struct vcpu *vcpu, int errcode)
usr/src/lib/libvmmapi/common/vmmapi.h
224
vm_inject_fault(vcpu, IDT_AC, 1, errcode);
usr/src/lib/libvmmapi/common/vmmapi.h
227
vm_inject_ss(struct vcpu *vcpu, int errcode)
usr/src/lib/libvmmapi/common/vmmapi.h
229
vm_inject_fault(vcpu, IDT_SS, 1, errcode);
usr/src/lib/libvmmapi/common/vmmapi.h
232
int vm_lapic_irq(struct vcpu *vcpu, int vector);
usr/src/lib/libvmmapi/common/vmmapi.h
233
int vm_lapic_local_irq(struct vcpu *vcpu, int vector);
usr/src/lib/libvmmapi/common/vmmapi.h
239
int vm_readwrite_kernemu_device(struct vcpu *vcpu,
usr/src/lib/libvmmapi/common/vmmapi.h
246
int vm_inject_nmi(struct vcpu *vcpu);
usr/src/lib/libvmmapi/common/vmmapi.h
249
int vm_get_capability(struct vcpu *vcpu, enum vm_cap_type cap,
usr/src/lib/libvmmapi/common/vmmapi.h
251
int vm_set_capability(struct vcpu *vcpu, enum vm_cap_type cap,
usr/src/lib/libvmmapi/common/vmmapi.h
282
int vm_get_intinfo(struct vcpu *vcpu, uint64_t *i1, uint64_t *i2);
usr/src/lib/libvmmapi/common/vmmapi.h
283
int vm_set_intinfo(struct vcpu *vcpu, uint64_t exit_intinfo);
usr/src/lib/libvmmapi/common/vmmapi.h
288
uint64_t *vm_get_stats(struct vcpu *vcpu, struct timeval *ret_tv,
usr/src/lib/libvmmapi/common/vmmapi.h
292
int vm_get_x2apic_state(struct vcpu *vcpu, enum x2apic_state *s);
usr/src/lib/libvmmapi/common/vmmapi.h
293
int vm_set_x2apic_state(struct vcpu *vcpu, enum x2apic_state s);
usr/src/lib/libvmmapi/common/vmmapi.h
306
int vm_copy_setup(struct vcpu *vcpu, struct vm_guest_paging *pg,
usr/src/lib/libvmmapi/common/vmmapi.h
325
int vcpu_reset(struct vcpu *vcpu);
usr/src/lib/libvmmapi/common/vmmapi.h
332
int vm_activate_cpu(struct vcpu *vcpu);
usr/src/lib/libvmmapi/common/vmmapi.h
334
int vm_suspend_cpu(struct vcpu *vcpu);
usr/src/lib/libvmmapi/common/vmmapi.h
336
int vm_resume_cpu(struct vcpu *vcpu);
usr/src/lib/libvmmapi/common/vmmapi.h
348
int vm_get_run_state(struct vcpu *vcpu, enum vcpu_run_state *state,
usr/src/lib/libvmmapi/common/vmmapi.h
350
int vm_set_run_state(struct vcpu *vcpu, enum vcpu_run_state state,
usr/src/lib/libvmmapi/common/vmmapi.h
352
int vm_vcpu_barrier(struct vcpu *vcpu);
usr/src/lib/libvmmapi/common/vmmapi.h
359
int vm_setup_freebsd_registers(struct vcpu *vcpu,
usr/src/lib/libvmmapi/common/vmmapi.h
362
int vm_setup_freebsd_registers_i386(struct vcpu *vcpu,
usr/src/lib/libvmmapi/common/vmmapi.h
59
struct vcpu;
usr/src/test/bhyve-tests/tests/common/in_guest.c
457
test_setup_vcpu(struct vcpu *vcpu, uint64_t rip, uint64_t rsp)
usr/src/test/bhyve-tests/tests/common/in_guest.c
461
err = vm_activate_cpu(vcpu);
usr/src/test/bhyve-tests/tests/common/in_guest.c
470
err = vm_set_desc(vcpu, VM_REG_GUEST_CS, 0, UINT32_MAX,
usr/src/test/bhyve-tests/tests/common/in_guest.c
476
err = vm_set_desc(vcpu, VM_REG_GUEST_SS, 0, UINT32_MAX,
usr/src/test/bhyve-tests/tests/common/in_guest.c
483
err = vm_set_desc(vcpu, VM_REG_GUEST_DS, 0, UINT32_MAX,
usr/src/test/bhyve-tests/tests/common/in_guest.c
493
err = vm_set_desc(vcpu, VM_REG_GUEST_TR, MEM_LOC_TSS, 0xff,
usr/src/test/bhyve-tests/tests/common/in_guest.c
498
err = vm_set_desc(vcpu, VM_REG_GUEST_GDTR, MEM_LOC_GDT, 0x1ff, 0);
usr/src/test/bhyve-tests/tests/common/in_guest.c
502
err = vm_set_desc(vcpu, VM_REG_GUEST_IDTR, MEM_LOC_IDT, 0xfff, 0);
usr/src/test/bhyve-tests/tests/common/in_guest.c
515
err = vm_set_desc(vcpu, unsable_segs[i], 0, 0,
usr/src/test/bhyve-tests/tests/common/in_guest.c
553
err = vm_set_register_set(vcpu, ARRAY_SIZE(regnums), regnums,
usr/src/test/bhyve-tests/tests/common/in_guest.c
559
err = vm_set_run_state(vcpu, VRS_RUN, 0);
usr/src/test/bhyve-tests/tests/common/in_guest.c
600
test_run_vcpu(struct vcpu *vcpu, struct vm_entry *ventry, struct vm_exit *vexit)
usr/src/test/bhyve-tests/tests/common/in_guest.c
604
err = vm_run(vcpu, ventry, vexit);
usr/src/test/bhyve-tests/tests/common/in_guest.h
34
int test_setup_vcpu(struct vcpu *, uint64_t, uint64_t);
usr/src/test/bhyve-tests/tests/common/in_guest.h
49
enum vm_exit_kind test_run_vcpu(struct vcpu *, struct vm_entry *,
usr/src/test/bhyve-tests/tests/inst_emul/cpuid.c
111
struct vcpu *vcpu;
usr/src/test/bhyve-tests/tests/inst_emul/cpuid.c
116
if ((vcpu = vm_vcpu_open(ctx, 0)) == NULL) {
usr/src/test/bhyve-tests/tests/inst_emul/cpuid.c
120
err = test_setup_vcpu(vcpu, MEM_LOC_PAYLOAD, MEM_LOC_STACK);
usr/src/test/bhyve-tests/tests/inst_emul/cpuid.c
146
test_run_vcpu(vcpu, &ventry, &vexit);
usr/src/test/bhyve-tests/tests/inst_emul/cpuid_guest_state.c
147
struct vcpu *vcpu;
usr/src/test/bhyve-tests/tests/inst_emul/cpuid_guest_state.c
151
if ((vcpu = vm_vcpu_open(ctx, 0)) == NULL) {
usr/src/test/bhyve-tests/tests/inst_emul/cpuid_guest_state.c
155
err = test_setup_vcpu(vcpu, MEM_LOC_PAYLOAD, MEM_LOC_STACK);
usr/src/test/bhyve-tests/tests/inst_emul/cpuid_guest_state.c
181
kind = test_run_vcpu(vcpu, &ventry, &vexit);
usr/src/test/bhyve-tests/tests/inst_emul/exit_consistent.c
100
if (vm_get_register(vcpu, VM_REG_GUEST_RCX, &rcx) != 0) {
usr/src/test/bhyve-tests/tests/inst_emul/exit_consistent.c
103
if (vm_get_register(vcpu, VM_REG_GUEST_RIP, &rip) != 0) {
usr/src/test/bhyve-tests/tests/inst_emul/exit_consistent.c
115
if (vm_run(vcpu, &ventry, &vexit) != 0) {
usr/src/test/bhyve-tests/tests/inst_emul/exit_consistent.c
142
repeat_consistent_exit(vcpu, &ventry, &vexit, vexit.rip);
usr/src/test/bhyve-tests/tests/inst_emul/exit_consistent.c
146
run_until_unhandled(vcpu, &ventry, &vexit);
usr/src/test/bhyve-tests/tests/inst_emul/exit_consistent.c
160
if (vm_run(vcpu, &ventry, &vexit) != 0) {
usr/src/test/bhyve-tests/tests/inst_emul/exit_consistent.c
172
if (vm_run(vcpu, &ventry, &vexit) != 0) {
usr/src/test/bhyve-tests/tests/inst_emul/exit_consistent.c
186
repeat_consistent_exit(vcpu, &ventry, &vexit, vexit.rip);
usr/src/test/bhyve-tests/tests/inst_emul/exit_consistent.c
34
run_until_unhandled(struct vcpu *vcpu, struct vm_entry *ventry,
usr/src/test/bhyve-tests/tests/inst_emul/exit_consistent.c
39
test_run_vcpu(vcpu, ventry, vexit);
usr/src/test/bhyve-tests/tests/inst_emul/exit_consistent.c
57
repeat_consistent_exit(struct vcpu *vcpu, struct vm_entry *ventry,
usr/src/test/bhyve-tests/tests/inst_emul/exit_consistent.c
61
if (vm_run(vcpu, ventry, vexit) != 0) {
usr/src/test/bhyve-tests/tests/inst_emul/exit_consistent.c
75
struct vcpu *vcpu;
usr/src/test/bhyve-tests/tests/inst_emul/exit_consistent.c
80
if ((vcpu = vm_vcpu_open(ctx, 0)) == NULL) {
usr/src/test/bhyve-tests/tests/inst_emul/exit_consistent.c
83
err = test_setup_vcpu(vcpu, MEM_LOC_PAYLOAD, MEM_LOC_STACK);
usr/src/test/bhyve-tests/tests/inst_emul/exit_consistent.c
95
run_until_unhandled(vcpu, &ventry, &vexit);
usr/src/test/bhyve-tests/tests/inst_emul/exit_paging.c
39
struct vcpu *vcpu;
usr/src/test/bhyve-tests/tests/inst_emul/exit_paging.c
44
if ((vcpu = vm_vcpu_open(ctx, 0)) == NULL) {
usr/src/test/bhyve-tests/tests/inst_emul/exit_paging.c
48
err = test_setup_vcpu(vcpu, MEM_LOC_PAYLOAD, MEM_LOC_STACK);
usr/src/test/bhyve-tests/tests/inst_emul/exit_paging.c
61
test_run_vcpu(vcpu, &ventry, &vexit);
usr/src/test/bhyve-tests/tests/inst_emul/imul.c
103
test_run_vcpu(vcpu, &ventry, &vexit);
usr/src/test/bhyve-tests/tests/inst_emul/imul.c
84
struct vcpu *vcpu;
usr/src/test/bhyve-tests/tests/inst_emul/imul.c
89
if ((vcpu = vm_vcpu_open(ctx, 0)) == NULL) {
usr/src/test/bhyve-tests/tests/inst_emul/imul.c
93
err = test_setup_vcpu(vcpu, MEM_LOC_PAYLOAD, MEM_LOC_STACK);
usr/src/test/bhyve-tests/tests/inst_emul/inout.c
118
int err = vm_set_register(vcpu, 0, vexit->rip + 2);
usr/src/test/bhyve-tests/tests/inst_emul/inout.c
143
struct vcpu *vcpu;
usr/src/test/bhyve-tests/tests/inst_emul/inout.c
176
if ((vcpu = vm_vcpu_open(ctx, 0)) == NULL) {
usr/src/test/bhyve-tests/tests/inst_emul/inout.c
180
err = test_setup_vcpu(vcpu, MEM_LOC_PAYLOAD, MEM_LOC_STACK);
usr/src/test/bhyve-tests/tests/inst_emul/inout.c
192
test_run_vcpu(vcpu, &ventry, &vexit);
usr/src/test/bhyve-tests/tests/inst_emul/inout.c
194
const bool exit_ok = advance_test_state(&vexit, &ventry, vcpu,
usr/src/test/bhyve-tests/tests/inst_emul/inout.c
43
struct vcpu *vcpu, enum vm_exit_kind kind, enum test_state *state,
usr/src/test/bhyve-tests/tests/inst_emul/inout.c
81
int err = vm_set_register(vcpu, 0, vexit->rip + 2);
usr/src/test/bhyve-tests/tests/inst_emul/page_dirty.c
112
struct vcpu *vcpu;
usr/src/test/bhyve-tests/tests/inst_emul/page_dirty.c
125
if ((vcpu = vm_vcpu_open(ctx, 0)) == NULL) {
usr/src/test/bhyve-tests/tests/inst_emul/page_dirty.c
129
err = test_setup_vcpu(vcpu, MEM_LOC_PAYLOAD, MEM_LOC_STACK);
usr/src/test/bhyve-tests/tests/inst_emul/page_dirty.c
151
struct vcpu *vcpu;
usr/src/test/bhyve-tests/tests/inst_emul/page_dirty.c
162
if ((vcpu = vm_vcpu_open(ctx, 0)) == NULL) {
usr/src/test/bhyve-tests/tests/inst_emul/page_dirty.c
165
err = test_setup_vcpu(vcpu, MEM_LOC_PAYLOAD, MEM_LOC_STACK);
usr/src/test/bhyve-tests/tests/inst_emul/page_dirty.c
211
test_run_vcpu(vcpu, &ventry, &vexit);
usr/src/test/bhyve-tests/tests/inst_emul/rdmsr.c
38
struct vcpu *vcpu;
usr/src/test/bhyve-tests/tests/inst_emul/rdmsr.c
43
if ((vcpu = vm_vcpu_open(ctx, 0)) == NULL) {
usr/src/test/bhyve-tests/tests/inst_emul/rdmsr.c
47
err = test_setup_vcpu(vcpu, MEM_LOC_PAYLOAD, MEM_LOC_STACK);
usr/src/test/bhyve-tests/tests/inst_emul/rdmsr.c
58
test_run_vcpu(vcpu, &ventry, &vexit);
usr/src/test/bhyve-tests/tests/inst_emul/suspend_info.c
114
test_plain_suspend(struct vmctx *ctx, struct vcpu *vcpu,
usr/src/test/bhyve-tests/tests/inst_emul/suspend_info.c
120
.vcpu = vcpu,
usr/src/test/bhyve-tests/tests/inst_emul/suspend_info.c
127
vcpu0_setup(vcpu);
usr/src/test/bhyve-tests/tests/inst_emul/suspend_info.c
149
test_emitted_triplefault(struct vmctx *ctx, struct vcpu *vcpu)
usr/src/test/bhyve-tests/tests/inst_emul/suspend_info.c
154
.vcpu = vcpu,
usr/src/test/bhyve-tests/tests/inst_emul/suspend_info.c
158
struct vcpu *vcpu1;
usr/src/test/bhyve-tests/tests/inst_emul/suspend_info.c
162
vcpu0_setup(vcpu);
usr/src/test/bhyve-tests/tests/inst_emul/suspend_info.c
232
struct vcpu *vcpu;
usr/src/test/bhyve-tests/tests/inst_emul/suspend_info.c
236
if ((vcpu = vm_vcpu_open(ctx, 0)) == NULL) {
usr/src/test/bhyve-tests/tests/inst_emul/suspend_info.c
244
test_plain_suspend(ctx, vcpu, VM_SUSPEND_RESET);
usr/src/test/bhyve-tests/tests/inst_emul/suspend_info.c
245
test_plain_suspend(ctx, vcpu, VM_SUSPEND_POWEROFF);
usr/src/test/bhyve-tests/tests/inst_emul/suspend_info.c
246
test_plain_suspend(ctx, vcpu, VM_SUSPEND_HALT);
usr/src/test/bhyve-tests/tests/inst_emul/suspend_info.c
252
test_emitted_triplefault(ctx, vcpu);
usr/src/test/bhyve-tests/tests/inst_emul/suspend_info.c
43
struct vcpu *vcpu;
usr/src/test/bhyve-tests/tests/inst_emul/suspend_info.c
52
struct vcpu *vcpu = vtc->vcpu;
usr/src/test/bhyve-tests/tests/inst_emul/suspend_info.c
60
test_run_vcpu(vcpu, &ventry, &vexit);
usr/src/test/bhyve-tests/tests/inst_emul/suspend_info.c
78
vcpu0_setup(struct vcpu *vcpu)
usr/src/test/bhyve-tests/tests/inst_emul/suspend_info.c
82
err = test_setup_vcpu(vcpu, MEM_LOC_PAYLOAD, VCPU0_STACK);
usr/src/test/bhyve-tests/tests/inst_emul/suspend_info.c
86
err = vm_set_register(vcpu, VM_REG_GUEST_RDI, 0);
usr/src/test/bhyve-tests/tests/inst_emul/triple_fault.c
38
struct vcpu *vcpu;
usr/src/test/bhyve-tests/tests/inst_emul/triple_fault.c
43
if ((vcpu = vm_vcpu_open(ctx, 0)) == NULL) {
usr/src/test/bhyve-tests/tests/inst_emul/triple_fault.c
47
err = test_setup_vcpu(vcpu, MEM_LOC_PAYLOAD, MEM_LOC_STACK);
usr/src/test/bhyve-tests/tests/inst_emul/triple_fault.c
58
test_run_vcpu(vcpu, &ventry, &vexit);
usr/src/test/bhyve-tests/tests/inst_emul/vcpu_barrier.c
103
err = vm_activate_cpu(vcpu);
usr/src/test/bhyve-tests/tests/inst_emul/vcpu_barrier.c
114
err = vm_set_run_state(vcpu, VRS_INIT, 0);
usr/src/test/bhyve-tests/tests/inst_emul/vcpu_barrier.c
120
if (pthread_create(&vcpu0_tid, NULL, vcpu0_thread, (void *)vcpu) != 0) {
usr/src/test/bhyve-tests/tests/inst_emul/vcpu_barrier.c
128
err = vm_vcpu_barrier(vcpu);
usr/src/test/bhyve-tests/tests/inst_emul/vcpu_barrier.c
44
struct vcpu *vcpu = arg;
usr/src/test/bhyve-tests/tests/inst_emul/vcpu_barrier.c
50
int err = vm_run(vcpu, &ventry, &vexit);
usr/src/test/bhyve-tests/tests/inst_emul/vcpu_barrier.c
92
struct vcpu *vcpu;
usr/src/test/bhyve-tests/tests/inst_emul/vcpu_barrier.c
98
if ((vcpu = vm_vcpu_open(ctx, 0)) == NULL) {
usr/src/test/bhyve-tests/tests/inst_emul/wrmsr.c
38
struct vcpu *vcpu;
usr/src/test/bhyve-tests/tests/inst_emul/wrmsr.c
43
if ((vcpu = vm_vcpu_open(ctx, 0)) == NULL) {
usr/src/test/bhyve-tests/tests/inst_emul/wrmsr.c
47
err = test_setup_vcpu(vcpu, MEM_LOC_PAYLOAD, MEM_LOC_STACK);
usr/src/test/bhyve-tests/tests/inst_emul/wrmsr.c
59
test_run_vcpu(vcpu, &ventry, &vexit);
usr/src/test/bhyve-tests/tests/kdev/guest_tsc_adjust.c
40
struct vcpu *vcpu;
usr/src/test/bhyve-tests/tests/kdev/guest_tsc_adjust.c
45
if ((vcpu = vm_vcpu_open(ctx, 0)) == NULL) {
usr/src/test/bhyve-tests/tests/kdev/guest_tsc_adjust.c
49
err = test_setup_vcpu(vcpu, MEM_LOC_PAYLOAD, MEM_LOC_STACK);
usr/src/test/bhyve-tests/tests/kdev/guest_tsc_adjust.c
89
test_run_vcpu(vcpu, &ventry, &vexit);
usr/src/test/bhyve-tests/tests/kdev/rdmsr_tsc.c
38
struct vcpu *vcpu;
usr/src/test/bhyve-tests/tests/kdev/rdmsr_tsc.c
43
if ((vcpu = vm_vcpu_open(ctx, 0)) == NULL) {
usr/src/test/bhyve-tests/tests/kdev/rdmsr_tsc.c
47
err = test_setup_vcpu(vcpu, MEM_LOC_PAYLOAD, MEM_LOC_STACK);
usr/src/test/bhyve-tests/tests/kdev/rdmsr_tsc.c
58
test_run_vcpu(vcpu, &ventry, &vexit);
usr/src/test/bhyve-tests/tests/kdev/tsc_freq_ctrl.c
145
test_run_vcpu(vcpu, &ventry, &vexit);
usr/src/test/bhyve-tests/tests/kdev/tsc_freq_ctrl.c
194
struct vcpu *vcpu;
usr/src/test/bhyve-tests/tests/kdev/tsc_freq_ctrl.c
199
if ((vcpu = vm_vcpu_open(ctx, 0)) == NULL) {
usr/src/test/bhyve-tests/tests/kdev/tsc_freq_ctrl.c
203
err = test_setup_vcpu(vcpu, MEM_LOC_PAYLOAD, MEM_LOC_STACK);
usr/src/test/bhyve-tests/tests/kdev/tsc_freq_ctrl.c
237
do_freq_test(guest_freq, per_sec, seconds, vmfd, vcpu, &time_info);
usr/src/test/bhyve-tests/tests/kdev/tsc_freq_ctrl.c
240
vm_vcpu_close(vcpu);
usr/src/test/bhyve-tests/tests/kdev/tsc_freq_ctrl.c
243
if ((vcpu = vm_vcpu_open(ctx, 0)) == NULL) {
usr/src/test/bhyve-tests/tests/kdev/tsc_freq_ctrl.c
246
err = test_setup_vcpu(vcpu, MEM_LOC_PAYLOAD, MEM_LOC_STACK);
usr/src/test/bhyve-tests/tests/kdev/tsc_freq_ctrl.c
256
do_freq_test(guest_freq, per_sec, seconds, vmfd, vcpu, &time_info);
usr/src/test/bhyve-tests/tests/kdev/tsc_freq_ctrl.c
259
vm_vcpu_close(vcpu);
usr/src/test/bhyve-tests/tests/kdev/tsc_freq_ctrl.c
262
if ((vcpu = vm_vcpu_open(ctx, 0)) == NULL) {
usr/src/test/bhyve-tests/tests/kdev/tsc_freq_ctrl.c
265
err = test_setup_vcpu(vcpu, MEM_LOC_PAYLOAD, MEM_LOC_STACK);
usr/src/test/bhyve-tests/tests/kdev/tsc_freq_ctrl.c
275
do_freq_test(guest_freq, per_sec, seconds, vmfd, vcpu, &time_info);
usr/src/test/bhyve-tests/tests/kdev/tsc_freq_ctrl.c
278
vm_vcpu_close(vcpu);
usr/src/test/bhyve-tests/tests/kdev/tsc_freq_ctrl.c
281
if ((vcpu = vm_vcpu_open(ctx, 0)) == NULL) {
usr/src/test/bhyve-tests/tests/kdev/tsc_freq_ctrl.c
284
err = test_setup_vcpu(vcpu, MEM_LOC_PAYLOAD, MEM_LOC_STACK);
usr/src/test/bhyve-tests/tests/kdev/tsc_freq_ctrl.c
294
do_freq_test(guest_freq, per_sec, seconds, vmfd, vcpu, &time_info);
usr/src/test/bhyve-tests/tests/kdev/tsc_freq_ctrl.c
80
const int vmfd, struct vcpu *vcpu, struct vdi_time_info_v1 *src)
usr/src/test/bhyve-tests/tests/kdev/vatpit_freq.c
112
test_run_vcpu(vcpu, &ventry, &vexit);
usr/src/test/bhyve-tests/tests/kdev/vatpit_freq.c
83
struct vcpu *vcpu;
usr/src/test/bhyve-tests/tests/kdev/vatpit_freq.c
88
if ((vcpu = vm_vcpu_open(ctx, 0)) == NULL) {
usr/src/test/bhyve-tests/tests/kdev/vatpit_freq.c
92
err = test_setup_vcpu(vcpu, MEM_LOC_PAYLOAD, MEM_LOC_STACK);
usr/src/test/bhyve-tests/tests/kdev/vhpet_freq.c
113
test_run_vcpu(vcpu, &ventry, &vexit);
usr/src/test/bhyve-tests/tests/kdev/vhpet_freq.c
88
struct vcpu *vcpu;
usr/src/test/bhyve-tests/tests/kdev/vhpet_freq.c
93
if ((vcpu = vm_vcpu_open(ctx, 0)) == NULL) {
usr/src/test/bhyve-tests/tests/kdev/vhpet_freq.c
97
err = test_setup_vcpu(vcpu, MEM_LOC_PAYLOAD, MEM_LOC_STACK);
usr/src/test/bhyve-tests/tests/kdev/vlapic_freq.c
104
test_run_vcpu(vcpu, ventry, vexit);
usr/src/test/bhyve-tests/tests/kdev/vlapic_freq.c
153
struct vcpu *vcpu;
usr/src/test/bhyve-tests/tests/kdev/vlapic_freq.c
158
if ((vcpu = vm_vcpu_open(ctx, 0)) == NULL) {
usr/src/test/bhyve-tests/tests/kdev/vlapic_freq.c
162
err = test_setup_vcpu(vcpu, MEM_LOC_PAYLOAD, MEM_LOC_STACK);
usr/src/test/bhyve-tests/tests/kdev/vlapic_freq.c
170
test_for_divisor(vcpu, 2, &ventry, &vexit);
usr/src/test/bhyve-tests/tests/kdev/vlapic_freq.c
171
test_for_divisor(vcpu, 4, &ventry, &vexit);
usr/src/test/bhyve-tests/tests/kdev/vlapic_freq.c
172
test_for_divisor(vcpu, 16, &ventry, &vexit);
usr/src/test/bhyve-tests/tests/kdev/vlapic_freq.c
92
test_for_divisor(struct vcpu *vcpu, uint_t divisor, struct vm_entry *ventry,
usr/src/test/bhyve-tests/tests/kdev/vlapic_freq_periodic.c
109
test_run_vcpu(vcpu, ventry, vexit);
usr/src/test/bhyve-tests/tests/kdev/vlapic_freq_periodic.c
163
struct vcpu *vcpu;
usr/src/test/bhyve-tests/tests/kdev/vlapic_freq_periodic.c
168
if ((vcpu = vm_vcpu_open(ctx, 0)) == NULL) {
usr/src/test/bhyve-tests/tests/kdev/vlapic_freq_periodic.c
172
err = test_setup_vcpu(vcpu, MEM_LOC_PAYLOAD, MEM_LOC_STACK);
usr/src/test/bhyve-tests/tests/kdev/vlapic_freq_periodic.c
180
run_test(vcpu, 4, 3, &ventry, &vexit);
usr/src/test/bhyve-tests/tests/kdev/vlapic_freq_periodic.c
181
run_test(vcpu, 2, 4, &ventry, &vexit);
usr/src/test/bhyve-tests/tests/kdev/vlapic_freq_periodic.c
97
run_test(struct vcpu *vcpu, uint_t divisor, uint_t loops,
usr/src/test/bhyve-tests/tests/kdev/vlapic_mmio_access.c
55
struct vcpu *vcpu;
usr/src/test/bhyve-tests/tests/kdev/vlapic_mmio_access.c
60
if ((vcpu = vm_vcpu_open(ctx, 0)) == NULL) {
usr/src/test/bhyve-tests/tests/kdev/vlapic_mmio_access.c
64
err = test_setup_vcpu(vcpu, MEM_LOC_PAYLOAD, MEM_LOC_STACK);
usr/src/test/bhyve-tests/tests/kdev/vlapic_mmio_access.c
73
err = vm_set_x2apic_state(vcpu, X2APIC_DISABLED);
usr/src/test/bhyve-tests/tests/kdev/vlapic_mmio_access.c
83
test_run_vcpu(vcpu, &ventry, &vexit);
usr/src/test/bhyve-tests/tests/kdev/vlapic_msr_access.c
39
struct vcpu *vcpu;
usr/src/test/bhyve-tests/tests/kdev/vlapic_msr_access.c
44
if ((vcpu = vm_vcpu_open(ctx, 0)) == NULL) {
usr/src/test/bhyve-tests/tests/kdev/vlapic_msr_access.c
48
err = test_setup_vcpu(vcpu, MEM_LOC_PAYLOAD, MEM_LOC_STACK);
usr/src/test/bhyve-tests/tests/kdev/vlapic_msr_access.c
53
err = vm_set_x2apic_state(vcpu, X2APIC_ENABLED);
usr/src/test/bhyve-tests/tests/kdev/vlapic_msr_access.c
63
test_run_vcpu(vcpu, &ventry, &vexit);
usr/src/test/bhyve-tests/tests/kdev/vpmtmr_freq.c
102
err = test_setup_vcpu(vcpu, MEM_LOC_PAYLOAD, MEM_LOC_STACK);
usr/src/test/bhyve-tests/tests/kdev/vpmtmr_freq.c
118
test_run_vcpu(vcpu, &ventry, &vexit);
usr/src/test/bhyve-tests/tests/kdev/vpmtmr_freq.c
88
struct vcpu *vcpu;
usr/src/test/bhyve-tests/tests/kdev/vpmtmr_freq.c
93
if ((vcpu = vm_vcpu_open(ctx, 0)) == NULL) {
usr/src/test/bhyve-tests/tests/kdev/vrtc_ops.c
62
struct vcpu *vcpu;
usr/src/test/bhyve-tests/tests/kdev/vrtc_ops.c
67
if ((vcpu = vm_vcpu_open(ctx, 0)) == NULL) {
usr/src/test/bhyve-tests/tests/kdev/vrtc_ops.c
71
err = test_setup_vcpu(vcpu, MEM_LOC_PAYLOAD, MEM_LOC_STACK);
usr/src/test/bhyve-tests/tests/kdev/vrtc_ops.c
90
test_run_vcpu(vcpu, &ventry, &vexit);
usr/src/test/bhyve-tests/tests/kdev/wrmsr_tsc.c
38
struct vcpu *vcpu;
usr/src/test/bhyve-tests/tests/kdev/wrmsr_tsc.c
43
if ((vcpu = vm_vcpu_open(ctx, 0)) == NULL) {
usr/src/test/bhyve-tests/tests/kdev/wrmsr_tsc.c
47
err = test_setup_vcpu(vcpu, MEM_LOC_PAYLOAD, MEM_LOC_STACK);
usr/src/test/bhyve-tests/tests/kdev/wrmsr_tsc.c
58
test_run_vcpu(vcpu, &ventry, &vexit);
usr/src/test/bhyve-tests/tests/perf/entry_exit.c
180
struct vcpu *vcpu;
usr/src/test/bhyve-tests/tests/perf/entry_exit.c
187
if ((vcpu = vm_vcpu_open(ctx, 0)) == NULL) {
usr/src/test/bhyve-tests/tests/perf/entry_exit.c
191
err = test_setup_vcpu(vcpu, MEM_LOC_PAYLOAD, MEM_LOC_STACK);
usr/src/test/bhyve-tests/tests/perf/entry_exit.c
202
test_run_vcpu(vcpu, &ventry, &vexit);
usr/src/test/bhyve-tests/tests/vmm/datarw_msrs.c
84
struct vcpu *vcpu;
usr/src/test/bhyve-tests/tests/vmm/datarw_msrs.c
91
if ((vcpu = vm_vcpu_open(ctx, 0)) == NULL) {
usr/src/test/bhyve-tests/tests/vmm/datarw_msrs.c
95
if (vm_activate_cpu(vcpu) != 0) {
usr/src/test/bhyve-tests/tests/vmm/datarw_vcpu.c
103
if ((vcpu = vm_vcpu_open(ctx, 0)) == NULL) {
usr/src/test/bhyve-tests/tests/vmm/datarw_vcpu.c
107
if (vm_activate_cpu(vcpu) != 0) {
usr/src/test/bhyve-tests/tests/vmm/datarw_vcpu.c
193
vm_vcpu_close(vcpu);
usr/src/test/bhyve-tests/tests/vmm/datarw_vcpu.c
96
struct vcpu *vcpu;
usr/src/test/bhyve-tests/tests/vmm/default_capabs.c
36
check_caps(struct vcpu *vcpu)
usr/src/test/bhyve-tests/tests/vmm/default_capabs.c
50
if (vm_get_capability(vcpu, checks[i].cap, &val) != 0) {
usr/src/test/bhyve-tests/tests/vmm/default_capabs.c
66
struct vcpu *vcpu;
usr/src/test/bhyve-tests/tests/vmm/default_capabs.c
73
if ((vcpu = vm_vcpu_open(ctx, 0)) == NULL) {
usr/src/test/bhyve-tests/tests/vmm/default_capabs.c
78
check_caps(vcpu);
usr/src/test/bhyve-tests/tests/vmm/default_capabs.c
84
check_caps(vcpu);
usr/src/test/bhyve-tests/tests/vmm/default_capabs.c
86
vm_vcpu_close(vcpu);
usr/src/test/bhyve-tests/tests/vmm/import_vlapic.c
103
test_timer_icr_constraints(int vmfd, struct vcpu *vcpu)
usr/src/test/bhyve-tests/tests/vmm/import_vlapic.c
112
int error = vm_readwrite_kernemu_device(vcpu, APIC_ADDR_TIMER_CCR,
usr/src/test/bhyve-tests/tests/vmm/import_vlapic.c
140
if (vcpu_reset(vcpu) != 0) {
usr/src/test/bhyve-tests/tests/vmm/import_vlapic.c
173
struct vcpu *vcpu;
usr/src/test/bhyve-tests/tests/vmm/import_vlapic.c
180
if ((vcpu = vm_vcpu_open(ctx, 0)) == NULL) {
usr/src/test/bhyve-tests/tests/vmm/import_vlapic.c
184
if (vm_activate_cpu(vcpu) != 0) {
usr/src/test/bhyve-tests/tests/vmm/import_vlapic.c
190
test_ccr_clamp(vmfd, vcpu);
usr/src/test/bhyve-tests/tests/vmm/import_vlapic.c
191
test_timer_icr_constraints(vmfd, vcpu);
usr/src/test/bhyve-tests/tests/vmm/import_vlapic.c
41
test_ccr_clamp(int vmfd, struct vcpu *vcpu)
usr/src/test/bhyve-tests/tests/vmm/import_vlapic.c
91
int error = vm_readwrite_kernemu_device(vcpu, APIC_ADDR_TIMER_CCR,
usr/src/test/bhyve-tests/tests/vmm/maxcpu.c
54
struct vcpu *vcpu;
usr/src/test/bhyve-tests/tests/vmm/maxcpu.c
57
if ((vcpu = vm_vcpu_open(ctx, i)) == NULL) {
usr/src/test/bhyve-tests/tests/vmm/maxcpu.c
62
if (vm_activate_cpu(vcpu) != 0) {
usr/src/test/bhyve-tests/tests/vmm/maxcpu.c
67
if (vm_get_register(vcpu, VM_REG_GUEST_RAX, &val) != 0) {
usr/src/test/bhyve-tests/tests/vmm/maxcpu.c
71
vm_vcpu_close(vcpu);
usr/src/test/bhyve-tests/tests/vmm/maxcpu.c
84
struct vcpu *vcpu = vm_vcpu_open(ctx, vcpuid);
usr/src/test/bhyve-tests/tests/vmm/maxcpu.c
85
if (vcpu == NULL) {
usr/src/test/bhyve-tests/tests/vmm/maxcpu.c
90
if (vm_activate_cpu(vcpu) == 0) {
usr/src/test/bhyve-tests/tests/vmm/maxcpu.c
95
vm_vcpu_close(vcpu);
usr/src/test/bhyve-tests/tests/vmm/pause_resume.c
108
if (vm_run(vcpu, &ventry, &vexit) == 0) {
usr/src/test/bhyve-tests/tests/vmm/pause_resume.c
136
vm_vcpu_close(vcpu);
usr/src/test/bhyve-tests/tests/vmm/pause_resume.c
63
struct vcpu *vcpu;
usr/src/test/bhyve-tests/tests/vmm/pause_resume.c
70
if ((vcpu = vm_vcpu_open(ctx, 0)) == NULL) {
usr/src/test/bhyve-tests/tests/vmm/pause_resume.c
74
if (vm_activate_cpu(vcpu) != 0) {
usr/src/test/bhyve-tests/tests/vmm/self_destruct.c
37
struct vcpu *vcpu;
usr/src/test/bhyve-tests/tests/vmm/self_destruct.c
50
if ((vcpu = vm_vcpu_open(ctx, 0)) == NULL) {
usr/src/test/bhyve-tests/tests/vmm/self_destruct.c
74
if (vm_get_register(vcpu, VM_REG_GUEST_RAX, &reg) == 0) {
usr/src/test/bhyve-tests/tests/vmm/self_destruct.c
85
vm_vcpu_close(vcpu);
usr/src/test/bhyve-tests/tests/vmm/time_data.c
526
struct vcpu *vcpu;
usr/src/test/bhyve-tests/tests/vmm/time_data.c
532
if ((vcpu = vm_vcpu_open(ctx, 0)) == NULL) {
usr/src/test/bhyve-tests/tests/vmm/time_data.c
535
if (vm_activate_cpu(vcpu) != 0) {
usr/src/test/bhyve-tests/tests/vmm/time_data.c
601
vm_vcpu_close(vcpu);
usr/src/tools/smatch/src/validation/attr-optimize.c
6
static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
usr/src/uts/common/vm/vpm.c
719
vpmd_cpu[CPU->cpu_seqid].vcpu.vcpu_misses++;
usr/src/uts/common/vm/vpm.c
721
vpmd_cpu[CPU->cpu_seqid].vcpu.vcpu_hits++;
usr/src/uts/common/vm/vpm.c
94
} vcpu;
usr/src/uts/common/vm/vpm.c
99
#define vfree_ndx vcpu.vcpu_free_ndx
usr/src/uts/common/xen/dtrace/xdt.c
1721
vcpu = data[1] >> 16;
usr/src/uts/common/xen/dtrace/xdt.c
1723
xdt_update_sched_context(cpuid, dom, vcpu);
usr/src/uts/common/xen/dtrace/xdt.c
1724
xdt_update_domain_context(dom, vcpu);
usr/src/uts/common/xen/dtrace/xdt.c
676
xdt_update_sched_context(uint_t cpuid, uint_t dom, uint_t vcpu)
usr/src/uts/common/xen/dtrace/xdt.c
681
sp->cur_vcpuid = vcpu;
usr/src/uts/common/xen/dtrace/xdt.c
686
xdt_update_domain_context(uint_t dom, uint_t vcpu)
usr/src/uts/common/xen/dtrace/xdt.c
689
xdt_curvcpu = vcpu;
usr/src/uts/common/xen/dtrace/xdt.c
696
uint_t dom, vcpu;
usr/src/uts/common/xen/public/arch-x86/xen.h
205
struct vcpu *pi_vcpu; /* Panicking vcpu */
usr/src/uts/common/xen/public/domctl.h
252
uint32_t vcpu; /* IN */
usr/src/uts/common/xen/public/domctl.h
262
uint32_t vcpu;
usr/src/uts/common/xen/public/domctl.h
278
uint32_t vcpu; /* IN */
usr/src/uts/common/xen/public/domctl.h
436
uint32_t vcpu; /* IN */
usr/src/uts/common/xen/public/domctl.h
547
uint32_t vcpu;
usr/src/uts/common/xen/public/domctl.h
630
uint32_t vcpu; /* IN */
usr/src/uts/common/xen/public/event_channel.h
123
uint32_t vcpu;
usr/src/uts/common/xen/public/event_channel.h
173
uint32_t vcpu; /* VCPU to which this channel is bound. */
usr/src/uts/common/xen/public/event_channel.h
204
uint32_t vcpu;
usr/src/uts/common/xen/public/event_channel.h
92
uint32_t vcpu;
usr/src/uts/i86xpv/os/evtchn.c
215
bind.vcpu = cpu;
usr/src/uts/i86xpv/os/evtchn.c
280
bind.vcpu = cpu;
usr/src/uts/i86xpv/os/evtchn.c
295
bind.vcpu = cpu;
usr/src/uts/i86xpv/os/xen_machdep.c
653
vcpu_info_t *vcpu = &si->vcpu_info[i];
usr/src/uts/i86xpv/os/xen_machdep.c
657
i, vcpu->evtchn_upcall_pending,
usr/src/uts/i86xpv/os/xen_machdep.c
658
vcpu->evtchn_upcall_mask,
usr/src/uts/i86xpv/os/xen_machdep.c
659
vcpu->evtchn_pending_sel);
usr/src/uts/intel/io/vmm/amd/svm.c
1006
svm_set_dirty(sc, vcpu, VMCB_CACHE_TPR);
usr/src/uts/intel/io/vmm/amd/svm.c
1007
svm_disable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR);
usr/src/uts/intel/io/vmm/amd/svm.c
1016
svm_nmi_blocked(struct svm_softc *sc, int vcpu)
usr/src/uts/intel/io/vmm/amd/svm.c
1018
return (svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT,
usr/src/uts/intel/io/vmm/amd/svm.c
1023
svm_clear_nmi_blocking(struct svm_softc *sc, int vcpu)
usr/src/uts/intel/io/vmm/amd/svm.c
1027
KASSERT(svm_nmi_blocked(sc, vcpu), ("vNMI already unblocked"));
usr/src/uts/intel/io/vmm/amd/svm.c
1039
svm_disable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET);
usr/src/uts/intel/io/vmm/amd/svm.c
1045
ctrl = svm_get_vmcb_ctrl(sc, vcpu);
usr/src/uts/intel/io/vmm/amd/svm.c
108
static int svm_setreg(void *arg, int vcpu, int ident, uint64_t val);
usr/src/uts/intel/io/vmm/amd/svm.c
109
static int svm_getreg(void *arg, int vcpu, int ident, uint64_t *val);
usr/src/uts/intel/io/vmm/amd/svm.c
1099
svm_inject_nmi(struct svm_softc *sc, int vcpu)
usr/src/uts/intel/io/vmm/amd/svm.c
1101
struct vmcb_ctrl *ctrl = svm_get_vmcb_ctrl(sc, vcpu);
usr/src/uts/intel/io/vmm/amd/svm.c
1103
ASSERT(!svm_nmi_blocked(sc, vcpu));
usr/src/uts/intel/io/vmm/amd/svm.c
1106
vm_nmi_clear(sc->vm, vcpu);
usr/src/uts/intel/io/vmm/amd/svm.c
1115
svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET);
usr/src/uts/intel/io/vmm/amd/svm.c
1119
svm_inject_irq(struct svm_softc *sc, int vcpu, int vector)
usr/src/uts/intel/io/vmm/amd/svm.c
1121
struct vmcb_ctrl *ctrl = svm_get_vmcb_ctrl(sc, vcpu);
usr/src/uts/intel/io/vmm/amd/svm.c
1131
svm_write_efer(struct svm_softc *sc, int vcpu, uint64_t newval)
usr/src/uts/intel/io/vmm/amd/svm.c
1133
struct vmcb_state *state = svm_get_vmcb_state(sc, vcpu);
usr/src/uts/intel/io/vmm/amd/svm.c
1162
!vm_cpuid_capability(sc->vm, vcpu, VCC_NO_EXECUTE)) {
usr/src/uts/intel/io/vmm/amd/svm.c
1166
!vm_cpuid_capability(sc->vm, vcpu, VCC_FFXSR)) {
usr/src/uts/intel/io/vmm/amd/svm.c
1170
!vm_cpuid_capability(sc->vm, vcpu, VCC_TCE)) {
usr/src/uts/intel/io/vmm/amd/svm.c
1182
error = svm_setreg(sc, vcpu, VM_REG_GUEST_EFER, newval);
usr/src/uts/intel/io/vmm/amd/svm.c
1188
svm_handle_msr(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit,
usr/src/uts/intel/io/vmm/amd/svm.c
1191
struct vmcb_state *state = svm_get_vmcb_state(svm_sc, vcpu);
usr/src/uts/intel/io/vmm/amd/svm.c
1192
struct svm_regctx *ctx = svm_get_guest_regctx(svm_sc, vcpu);
usr/src/uts/intel/io/vmm/amd/svm.c
1198
vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_WRMSR, 1);
usr/src/uts/intel/io/vmm/amd/svm.c
1202
struct vlapic *vlapic = vm_lapic(svm_sc->vm, vcpu);
usr/src/uts/intel/io/vmm/amd/svm.c
1206
res = svm_write_efer(svm_sc, vcpu, val);
usr/src/uts/intel/io/vmm/amd/svm.c
1208
res = svm_pmu_wrmsr(svm_sc, vcpu, ecx, val);
usr/src/uts/intel/io/vmm/amd/svm.c
1210
res = svm_wrmsr(svm_sc, vcpu, ecx, val);
usr/src/uts/intel/io/vmm/amd/svm.c
1213
vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_RDMSR, 1);
usr/src/uts/intel/io/vmm/amd/svm.c
1216
struct vlapic *vlapic = vm_lapic(svm_sc->vm, vcpu);
usr/src/uts/intel/io/vmm/amd/svm.c
1220
res = svm_pmu_rdmsr(svm_sc, vcpu, ecx, &val);
usr/src/uts/intel/io/vmm/amd/svm.c
1222
res = svm_rdmsr(svm_sc, vcpu, ecx, &val);
usr/src/uts/intel/io/vmm/amd/svm.c
1235
vm_inject_gp(svm_sc->vm, vcpu);
usr/src/uts/intel/io/vmm/amd/svm.c
1249
svm_handle_rdpmc(struct svm_softc *svm_sc, int vcpu)
usr/src/uts/intel/io/vmm/amd/svm.c
1251
struct vmcb_state *state = svm_get_vmcb_state(svm_sc, vcpu);
usr/src/uts/intel/io/vmm/amd/svm.c
1252
struct svm_regctx *ctx = svm_get_guest_regctx(svm_sc, vcpu);
usr/src/uts/intel/io/vmm/amd/svm.c
1256
if (svm_pmu_rdpmc(svm_sc, vcpu, ecx, &val)) {
usr/src/uts/intel/io/vmm/amd/svm.c
1260
vm_inject_gp(svm_sc->vm, vcpu);
usr/src/uts/intel/io/vmm/amd/svm.c
1291
svm_vmexit(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit)
usr/src/uts/intel/io/vmm/amd/svm.c
1300
ctx = svm_get_guest_regctx(svm_sc, vcpu);
usr/src/uts/intel/io/vmm/amd/svm.c
1301
vmcb = svm_get_vmcb(svm_sc, vcpu);
usr/src/uts/intel/io/vmm/amd/svm.c
1314
vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_COUNT, 1);
usr/src/uts/intel/io/vmm/amd/svm.c
1333
svm_update_virqinfo(svm_sc, vcpu);
usr/src/uts/intel/io/vmm/amd/svm.c
1334
svm_save_exitintinfo(svm_sc, vcpu);
usr/src/uts/intel/io/vmm/amd/svm.c
1339
svm_handle_cr0_read(svm_sc, vcpu,
usr/src/uts/intel/io/vmm/amd/svm.c
1350
svm_inst_emul_other(svm_sc, vcpu, vmexit);
usr/src/uts/intel/io/vmm/amd/svm.c
1356
svm_handle_cr0_write(svm_sc, vcpu,
usr/src/uts/intel/io/vmm/amd/svm.c
1369
svm_inst_emul_other(svm_sc, vcpu, vmexit);
usr/src/uts/intel/io/vmm/amd/svm.c
1377
svm_clear_nmi_blocking(svm_sc, vcpu);
usr/src/uts/intel/io/vmm/amd/svm.c
1381
vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_VINTR, 1);
usr/src/uts/intel/io/vmm/amd/svm.c
1382
svm_disable_intr_window_exiting(svm_sc, vcpu);
usr/src/uts/intel/io/vmm/amd/svm.c
1386
vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_EXTINT, 1);
usr/src/uts/intel/io/vmm/amd/svm.c
1400
vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_EXCEPTION, 1);
usr/src/uts/intel/io/vmm/amd/svm.c
1414
VERIFY0(svm_setreg(svm_sc, vcpu, VM_REG_GUEST_CR2,
usr/src/uts/intel/io/vmm/amd/svm.c
1452
VERIFY0(vm_inject_exception(svm_sc->vm, vcpu, idtvec,
usr/src/uts/intel/io/vmm/amd/svm.c
1459
handled = svm_handle_msr(svm_sc, vcpu, vmexit, info1 != 0);
usr/src/uts/intel/io/vmm/amd/svm.c
1462
svm_handle_rdpmc(svm_sc, vcpu);
usr/src/uts/intel/io/vmm/amd/svm.c
1466
handled = svm_handle_inout(svm_sc, vcpu, vmexit);
usr/src/uts/intel/io/vmm/amd/svm.c
1467
vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_INOUT, 1);
usr/src/uts/intel/io/vmm/amd/svm.c
1470
(void) vm_suspend(svm_sc->vm, VM_SUSPEND_TRIPLEFAULT, vcpu);
usr/src/uts/intel/io/vmm/amd/svm.c
1475
vm_inject_ud(svm_sc->vm, vcpu);
usr/src/uts/intel/io/vmm/amd/svm.c
1485
vm_inject_ud(svm_sc->vm, vcpu);
usr/src/uts/intel/io/vmm/amd/svm.c
1495
vm_inject_ud(svm_sc->vm, vcpu);
usr/src/uts/intel/io/vmm/amd/svm.c
1499
vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_CPUID, 1);
usr/src/uts/intel/io/vmm/amd/svm.c
1500
vcpu_emulate_cpuid(svm_sc->vm, vcpu, &state->rax,
usr/src/uts/intel/io/vmm/amd/svm.c
1505
vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_HLT, 1);
usr/src/uts/intel/io/vmm/amd/svm.c
1511
vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_PAUSE, 1);
usr/src/uts/intel/io/vmm/amd/svm.c
1517
} else if (vm_mem_allocated(svm_sc->vm, vcpu, info2)) {
usr/src/uts/intel/io/vmm/amd/svm.c
1521
vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_NESTED_FAULT, 1);
usr/src/uts/intel/io/vmm/amd/svm.c
1523
svm_handle_mmio_emul(svm_sc, vcpu, vmexit, info2);
usr/src/uts/intel/io/vmm/amd/svm.c
1524
vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_MMIO_EMUL, 1);
usr/src/uts/intel/io/vmm/amd/svm.c
1534
vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_UNKNOWN, 1);
usr/src/uts/intel/io/vmm/amd/svm.c
1538
DTRACE_PROBE3(vmm__vexit, int, vcpu, uint64_t, vmexit->rip, uint32_t,
usr/src/uts/intel/io/vmm/amd/svm.c
1572
svm_inject_events(struct svm_softc *sc, int vcpu)
usr/src/uts/intel/io/vmm/amd/svm.c
1580
state = svm_get_vmcb_state(sc, vcpu);
usr/src/uts/intel/io/vmm/amd/svm.c
1581
ctrl = svm_get_vmcb_ctrl(sc, vcpu);
usr/src/uts/intel/io/vmm/amd/svm.c
1582
vcpustate = svm_get_vcpu(sc, vcpu);
usr/src/uts/intel/io/vmm/amd/svm.c
1607
if (vm_entry_intinfo(sc->vm, vcpu, &intinfo)) {
usr/src/uts/intel/io/vmm/amd/svm.c
1609
vmm_stat_incr(sc->vm, vcpu, VCPU_INTINFO_INJECTED, 1);
usr/src/uts/intel/io/vmm/amd/svm.c
1614
if (vm_nmi_pending(sc->vm, vcpu) && !svm_nmi_blocked(sc, vcpu)) {
usr/src/uts/intel/io/vmm/amd/svm.c
1621
svm_inject_nmi(sc, vcpu);
usr/src/uts/intel/io/vmm/amd/svm.c
1628
if (vm_extint_pending(sc->vm, vcpu)) {
usr/src/uts/intel/io/vmm/amd/svm.c
1648
svm_inject_irq(sc, vcpu, vector);
usr/src/uts/intel/io/vmm/amd/svm.c
1649
vm_extint_clear(sc->vm, vcpu);
usr/src/uts/intel/io/vmm/amd/svm.c
1664
svm_inject_vlapic(struct svm_softc *sc, int vcpu, struct vlapic *vlapic,
usr/src/uts/intel/io/vmm/amd/svm.c
1672
state = svm_get_vmcb_state(sc, vcpu);
usr/src/uts/intel/io/vmm/amd/svm.c
1673
ctrl = svm_get_vmcb_ctrl(sc, vcpu);
usr/src/uts/intel/io/vmm/amd/svm.c
1688
svm_set_dirty(sc, vcpu, VMCB_CACHE_TPR);
usr/src/uts/intel/io/vmm/amd/svm.c
1710
svm_inject_irq(sc, vcpu, vector);
usr/src/uts/intel/io/vmm/amd/svm.c
1723
svm_inject_recheck(struct svm_softc *sc, int vcpu,
usr/src/uts/intel/io/vmm/amd/svm.c
1728
ctrl = svm_get_vmcb_ctrl(sc, vcpu);
usr/src/uts/intel/io/vmm/amd/svm.c
1739
if (vm_nmi_pending(sc->vm, vcpu) &&
usr/src/uts/intel/io/vmm/amd/svm.c
1740
!svm_nmi_blocked(sc, vcpu)) {
usr/src/uts/intel/io/vmm/amd/svm.c
1744
if (vm_extint_pending(sc->vm, vcpu)) {
usr/src/uts/intel/io/vmm/amd/svm.c
1762
svm_enable_intr_window_exiting(sc, vcpu);
usr/src/uts/intel/io/vmm/amd/svm.c
1884
svm_vmrun(void *arg, int vcpu, uint64_t rip)
usr/src/uts/intel/io/vmm/amd/svm.c
1901
vcpustate = svm_get_vcpu(svm_sc, vcpu);
usr/src/uts/intel/io/vmm/amd/svm.c
1902
state = svm_get_vmcb_state(svm_sc, vcpu);
usr/src/uts/intel/io/vmm/amd/svm.c
1903
vmexit = vm_exitinfo(vm, vcpu);
usr/src/uts/intel/io/vmm/amd/svm.c
1904
vlapic = vm_lapic(vm, vcpu);
usr/src/uts/intel/io/vmm/amd/svm.c
1905
vmc = vm_get_vmclient(vm, vcpu);
usr/src/uts/intel/io/vmm/amd/svm.c
1907
gctx = svm_get_guest_regctx(svm_sc, vcpu);
usr/src/uts/intel/io/vmm/amd/svm.c
1908
vmcb_pa = svm_sc->vcpu[vcpu].vmcb_pa;
usr/src/uts/intel/io/vmm/amd/svm.c
1919
svm_set_dirty(svm_sc, vcpu, 0xffffffff);
usr/src/uts/intel/io/vmm/amd/svm.c
1932
vmm_stat_incr(vm, vcpu, VCPU_MIGRATIONS, 1);
usr/src/uts/intel/io/vmm/amd/svm.c
1935
svm_apply_tsc_adjust(svm_sc, vcpu);
usr/src/uts/intel/io/vmm/amd/svm.c
1937
svm_msr_guest_enter(svm_sc, vcpu);
usr/src/uts/intel/io/vmm/amd/svm.c
1954
inject_state = svm_inject_events(svm_sc, vcpu);
usr/src/uts/intel/io/vmm/amd/svm.c
1967
inject_state = svm_inject_vlapic(svm_sc, vcpu, vlapic,
usr/src/uts/intel/io/vmm/amd/svm.c
1974
if (vcpu_entry_bailout_checks(vm, vcpu, state->rip)) {
usr/src/uts/intel/io/vmm/amd/svm.c
1979
if (vcpu_run_state_pending(vm, vcpu)) {
usr/src/uts/intel/io/vmm/amd/svm.c
1981
vm_exit_run_state(vm, vcpu, state->rip);
usr/src/uts/intel/io/vmm/amd/svm.c
1989
if (svm_inject_recheck(svm_sc, vcpu, inject_state)) {
usr/src/uts/intel/io/vmm/amd/svm.c
2009
check_asid(svm_sc, vcpu, curcpu, nptgen);
usr/src/uts/intel/io/vmm/amd/svm.c
2011
svm_pmu_enter(svm_sc, vcpu);
usr/src/uts/intel/io/vmm/amd/svm.c
2012
vcpu_ustate_change(vm, vcpu, VU_RUN);
usr/src/uts/intel/io/vmm/amd/svm.c
2014
svm_apply_dirty(svm_sc, vcpu);
usr/src/uts/intel/io/vmm/amd/svm.c
2027
vcpu_ustate_change(vm, vcpu, VU_EMU_KERN);
usr/src/uts/intel/io/vmm/amd/svm.c
2028
svm_pmu_exit(svm_sc, vcpu);
usr/src/uts/intel/io/vmm/amd/svm.c
2045
handled = svm_vmexit(svm_sc, vcpu, vmexit);
usr/src/uts/intel/io/vmm/amd/svm.c
2048
svm_msr_guest_exit(svm_sc, vcpu);
usr/src/uts/intel/io/vmm/amd/svm.c
2113
svm_getreg(void *arg, int vcpu, int ident, uint64_t *val)
usr/src/uts/intel/io/vmm/amd/svm.c
2122
vmcb = svm_get_vmcb(sc, vcpu);
usr/src/uts/intel/io/vmm/amd/svm.c
2124
regp = swctx_regptr(svm_get_guest_regctx(sc, vcpu), ident);
usr/src/uts/intel/io/vmm/amd/svm.c
2136
svm_get_cr0(sc, vcpu, val);
usr/src/uts/intel/io/vmm/amd/svm.c
2194
svm_setreg(void *arg, int vcpu, int ident, uint64_t val)
usr/src/uts/intel/io/vmm/amd/svm.c
2204
vmcb = svm_get_vmcb(sc, vcpu);
usr/src/uts/intel/io/vmm/amd/svm.c
2206
regp = swctx_regptr(svm_get_guest_regctx(sc, vcpu), ident);
usr/src/uts/intel/io/vmm/amd/svm.c
2226
svm_set_cr0(sc, vcpu, val, false);
usr/src/uts/intel/io/vmm/amd/svm.c
2275
svm_set_dirty(sc, vcpu, dirty);
usr/src/uts/intel/io/vmm/amd/svm.c
2288
svm_setdesc(void *arg, int vcpu, int reg, const struct seg_desc *desc)
usr/src/uts/intel/io/vmm/amd/svm.c
2295
vmcb = svm_get_vmcb(sc, vcpu);
usr/src/uts/intel/io/vmm/amd/svm.c
2306
svm_set_dirty(sc, vcpu, VMCB_CACHE_SEG);
usr/src/uts/intel/io/vmm/amd/svm.c
2333
svm_set_dirty(sc, vcpu, VMCB_CACHE_DT);
usr/src/uts/intel/io/vmm/amd/svm.c
2349
svm_getdesc(void *arg, int vcpu, int reg, struct seg_desc *desc)
usr/src/uts/intel/io/vmm/amd/svm.c
2356
vmcb = svm_get_vmcb(sc, vcpu);
usr/src/uts/intel/io/vmm/amd/svm.c
2424
svm_get_msr(void *arg, int vcpu, uint32_t msr, uint64_t *valp)
usr/src/uts/intel/io/vmm/amd/svm.c
2427
struct vmcb *vmcb = svm_get_vmcb(sc, vcpu);
usr/src/uts/intel/io/vmm/amd/svm.c
2439
svm_set_msr(void *arg, int vcpu, uint32_t msr, uint64_t val)
usr/src/uts/intel/io/vmm/amd/svm.c
2442
struct vmcb *vmcb = svm_get_vmcb(sc, vcpu);
usr/src/uts/intel/io/vmm/amd/svm.c
245
svm_get_intercept(struct svm_softc *sc, int vcpu, int idx, uint32_t bitmask)
usr/src/uts/intel/io/vmm/amd/svm.c
2466
svm_set_dirty(sc, vcpu, dirty);
usr/src/uts/intel/io/vmm/amd/svm.c
2473
svm_setcap(void *arg, int vcpu, int type, int val)
usr/src/uts/intel/io/vmm/amd/svm.c
2482
svm_set_intercept(sc, vcpu, VMCB_CTRL1_INTCPT,
usr/src/uts/intel/io/vmm/amd/svm.c
2486
svm_set_intercept(sc, vcpu, VMCB_CTRL1_INTCPT,
usr/src/uts/intel/io/vmm/amd/svm.c
2497
svm_getcap(void *arg, int vcpu, int type, int *retval)
usr/src/uts/intel/io/vmm/amd/svm.c
2507
*retval = svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT,
usr/src/uts/intel/io/vmm/amd/svm.c
251
ctrl = svm_get_vmcb_ctrl(sc, vcpu);
usr/src/uts/intel/io/vmm/amd/svm.c
2511
*retval = svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT,
usr/src/uts/intel/io/vmm/amd/svm.c
2546
svm_pause(void *arg, int vcpu)
usr/src/uts/intel/io/vmm/amd/svm.c
2549
struct vmcb_ctrl *ctrl = svm_get_vmcb_ctrl(sc, vcpu);
usr/src/uts/intel/io/vmm/amd/svm.c
2557
svm_stash_intinfo(sc, vcpu, intinfo);
usr/src/uts/intel/io/vmm/amd/svm.c
256
svm_set_intercept(struct svm_softc *sc, int vcpu, int idx, uint32_t bitmask,
usr/src/uts/intel/io/vmm/amd/svm.c
2567
svm_disable_intr_window_exiting(sc, vcpu);
usr/src/uts/intel/io/vmm/amd/svm.c
2568
svm_disable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET);
usr/src/uts/intel/io/vmm/amd/svm.c
2572
svm_savectx(void *arg, int vcpu)
usr/src/uts/intel/io/vmm/amd/svm.c
2579
if (sc->vcpu[vcpu].loaded) {
usr/src/uts/intel/io/vmm/amd/svm.c
2580
svm_msr_guest_exit(sc, vcpu);
usr/src/uts/intel/io/vmm/amd/svm.c
2585
svm_restorectx(void *arg, int vcpu)
usr/src/uts/intel/io/vmm/amd/svm.c
2589
if (sc->vcpu[vcpu].loaded) {
usr/src/uts/intel/io/vmm/amd/svm.c
2590
svm_msr_guest_enter(sc, vcpu);
usr/src/uts/intel/io/vmm/amd/svm.c
264
ctrl = svm_get_vmcb_ctrl(sc, vcpu);
usr/src/uts/intel/io/vmm/amd/svm.c
273
svm_set_dirty(sc, vcpu, VMCB_CACHE_I);
usr/src/uts/intel/io/vmm/amd/svm.c
278
vmcb_init(struct svm_softc *sc, int vcpu, uint64_t iopm_base_pa,
usr/src/uts/intel/io/vmm/amd/svm.c
286
ctrl = svm_get_vmcb_ctrl(sc, vcpu);
usr/src/uts/intel/io/vmm/amd/svm.c
287
state = svm_get_vmcb_state(sc, vcpu);
usr/src/uts/intel/io/vmm/amd/svm.c
303
svm_disable_intercept(sc, vcpu, VMCB_CR_INTCPT, mask);
usr/src/uts/intel/io/vmm/amd/svm.c
305
svm_enable_intercept(sc, vcpu, VMCB_CR_INTCPT, mask);
usr/src/uts/intel/io/vmm/amd/svm.c
312
svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT,
usr/src/uts/intel/io/vmm/amd/svm.c
319
if (vcpu_trace_exceptions(sc->vm, vcpu)) {
usr/src/uts/intel/io/vmm/amd/svm.c
327
svm_enable_intercept(sc, vcpu, VMCB_EXC_INTCPT, BIT(n));
usr/src/uts/intel/io/vmm/amd/svm.c
330
svm_enable_intercept(sc, vcpu, VMCB_EXC_INTCPT, BIT(IDT_MC));
usr/src/uts/intel/io/vmm/amd/svm.c
334
svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IO);
usr/src/uts/intel/io/vmm/amd/svm.c
335
svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_MSR);
usr/src/uts/intel/io/vmm/amd/svm.c
336
svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_CPUID);
usr/src/uts/intel/io/vmm/amd/svm.c
337
svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INTR);
usr/src/uts/intel/io/vmm/amd/svm.c
338
svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INIT);
usr/src/uts/intel/io/vmm/amd/svm.c
339
svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_NMI);
usr/src/uts/intel/io/vmm/amd/svm.c
340
svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_SMI);
usr/src/uts/intel/io/vmm/amd/svm.c
341
svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_RDPMC);
usr/src/uts/intel/io/vmm/amd/svm.c
342
svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_SHUTDOWN);
usr/src/uts/intel/io/vmm/amd/svm.c
343
svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT,
usr/src/uts/intel/io/vmm/amd/svm.c
347
svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_HLT);
usr/src/uts/intel/io/vmm/amd/svm.c
349
svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_MONITOR);
usr/src/uts/intel/io/vmm/amd/svm.c
350
svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_MWAIT);
usr/src/uts/intel/io/vmm/amd/svm.c
353
svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INVD);
usr/src/uts/intel/io/vmm/amd/svm.c
354
svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INVLPGA);
usr/src/uts/intel/io/vmm/amd/svm.c
362
svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMRUN);
usr/src/uts/intel/io/vmm/amd/svm.c
363
svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMMCALL);
usr/src/uts/intel/io/vmm/amd/svm.c
364
svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMLOAD);
usr/src/uts/intel/io/vmm/amd/svm.c
365
svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMSAVE);
usr/src/uts/intel/io/vmm/amd/svm.c
366
svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_STGI);
usr/src/uts/intel/io/vmm/amd/svm.c
367
svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_CLGI);
usr/src/uts/intel/io/vmm/amd/svm.c
368
svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_SKINIT);
usr/src/uts/intel/io/vmm/amd/svm.c
369
if (vcpu_trap_wbinvd(sc->vm, vcpu) != 0) {
usr/src/uts/intel/io/vmm/amd/svm.c
370
svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT,
usr/src/uts/intel/io/vmm/amd/svm.c
417
struct svm_vcpu *vcpu;
usr/src/uts/intel/io/vmm/amd/svm.c
473
vcpu = svm_get_vcpu(svm_sc, i);
usr/src/uts/intel/io/vmm/amd/svm.c
474
vcpu->nextrip = ~0;
usr/src/uts/intel/io/vmm/amd/svm.c
475
vcpu->lastcpu = NOCPU;
usr/src/uts/intel/io/vmm/amd/svm.c
476
vcpu->vmcb_pa = vtophys(&vcpu->vmcb);
usr/src/uts/intel/io/vmm/amd/svm.c
558
svm_handle_inout(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit)
usr/src/uts/intel/io/vmm/amd/svm.c
567
state = svm_get_vmcb_state(svm_sc, vcpu);
usr/src/uts/intel/io/vmm/amd/svm.c
568
ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu);
usr/src/uts/intel/io/vmm/amd/svm.c
584
svm_paging_info(svm_get_vmcb(svm_sc, vcpu), &paging);
usr/src/uts/intel/io/vmm/amd/svm.c
585
vie = vm_vie_ctx(svm_sc->vm, vcpu);
usr/src/uts/intel/io/vmm/amd/svm.c
670
svm_handle_mmio_emul(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit,
usr/src/uts/intel/io/vmm/amd/svm.c
681
vmcb = svm_get_vmcb(svm_sc, vcpu);
usr/src/uts/intel/io/vmm/amd/svm.c
719
vie = vm_vie_ctx(svm_sc->vm, vcpu);
usr/src/uts/intel/io/vmm/amd/svm.c
730
svm_set_cr0(struct svm_softc *svm_sc, int vcpu, uint64_t val, bool guest_write)
usr/src/uts/intel/io/vmm/amd/svm.c
736
state = svm_get_vmcb_state(svm_sc, vcpu);
usr/src/uts/intel/io/vmm/amd/svm.c
737
regctx = svm_get_guest_regctx(svm_sc, vcpu);
usr/src/uts/intel/io/vmm/amd/svm.c
749
flush_asid(svm_sc, vcpu);
usr/src/uts/intel/io/vmm/amd/svm.c
773
svm_set_dirty(svm_sc, vcpu, VMCB_CACHE_CR);
usr/src/uts/intel/io/vmm/amd/svm.c
787
svm_enable_intercept(svm_sc, vcpu, VMCB_CR_INTCPT,
usr/src/uts/intel/io/vmm/amd/svm.c
799
svm_disable_intercept(svm_sc, vcpu, VMCB_CR_INTCPT,
usr/src/uts/intel/io/vmm/amd/svm.c
802
svm_set_dirty(svm_sc, vcpu, VMCB_CACHE_I);
usr/src/uts/intel/io/vmm/amd/svm.c
806
svm_get_cr0(struct svm_softc *svm_sc, int vcpu, uint64_t *val)
usr/src/uts/intel/io/vmm/amd/svm.c
811
vmcb = svm_get_vmcb(svm_sc, vcpu);
usr/src/uts/intel/io/vmm/amd/svm.c
812
regctx = svm_get_guest_regctx(svm_sc, vcpu);
usr/src/uts/intel/io/vmm/amd/svm.c
822
svm_handle_cr0_read(struct svm_softc *svm_sc, int vcpu, enum vm_reg_name reg)
usr/src/uts/intel/io/vmm/amd/svm.c
827
svm_get_cr0(svm_sc, vcpu, &val);
usr/src/uts/intel/io/vmm/amd/svm.c
828
err = svm_setreg(svm_sc, vcpu, reg, val);
usr/src/uts/intel/io/vmm/amd/svm.c
833
svm_handle_cr0_write(struct svm_softc *svm_sc, int vcpu, enum vm_reg_name reg)
usr/src/uts/intel/io/vmm/amd/svm.c
839
state = svm_get_vmcb_state(svm_sc, vcpu);
usr/src/uts/intel/io/vmm/amd/svm.c
841
err = svm_getreg(svm_sc, vcpu, reg, &val);
usr/src/uts/intel/io/vmm/amd/svm.c
846
vm_inject_gp(svm_sc->vm, vcpu);
usr/src/uts/intel/io/vmm/amd/svm.c
851
vm_inject_gp(svm_sc->vm, vcpu);
usr/src/uts/intel/io/vmm/amd/svm.c
858
vm_inject_gp(svm_sc->vm, vcpu);
usr/src/uts/intel/io/vmm/amd/svm.c
863
svm_set_cr0(svm_sc, vcpu, val, true);
usr/src/uts/intel/io/vmm/amd/svm.c
867
svm_inst_emul_other(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit)
usr/src/uts/intel/io/vmm/amd/svm.c
875
vie = vm_vie_ctx(svm_sc->vm, vcpu);
usr/src/uts/intel/io/vmm/amd/svm.c
876
svm_paging_info(svm_get_vmcb(svm_sc, vcpu), &paging);
usr/src/uts/intel/io/vmm/amd/svm.c
884
svm_update_virqinfo(struct svm_softc *sc, int vcpu)
usr/src/uts/intel/io/vmm/amd/svm.c
891
vlapic = vm_lapic(vm, vcpu);
usr/src/uts/intel/io/vmm/amd/svm.c
892
ctrl = svm_get_vmcb_ctrl(sc, vcpu);
usr/src/uts/intel/io/vmm/amd/svm.c
915
svm_stash_intinfo(struct svm_softc *svm_sc, int vcpu, uint64_t intinfo)
usr/src/uts/intel/io/vmm/amd/svm.c
928
VERIFY0(vm_exit_intinfo(svm_sc->vm, vcpu, intinfo));
usr/src/uts/intel/io/vmm/amd/svm.c
932
svm_save_exitintinfo(struct svm_softc *svm_sc, int vcpu)
usr/src/uts/intel/io/vmm/amd/svm.c
934
struct vmcb_ctrl *ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu);
usr/src/uts/intel/io/vmm/amd/svm.c
942
vmm_stat_incr(svm_sc->vm, vcpu, VCPU_EXITINTINFO, 1);
usr/src/uts/intel/io/vmm/amd/svm.c
944
svm_stash_intinfo(svm_sc, vcpu, intinfo);
usr/src/uts/intel/io/vmm/amd/svm.c
949
vintr_intercept_enabled(struct svm_softc *sc, int vcpu)
usr/src/uts/intel/io/vmm/amd/svm.c
952
return (svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT,
usr/src/uts/intel/io/vmm/amd/svm.c
957
svm_enable_intr_window_exiting(struct svm_softc *sc, int vcpu)
usr/src/uts/intel/io/vmm/amd/svm.c
962
ctrl = svm_get_vmcb_ctrl(sc, vcpu);
usr/src/uts/intel/io/vmm/amd/svm.c
963
state = svm_get_vmcb_state(sc, vcpu);
usr/src/uts/intel/io/vmm/amd/svm.c
968
KASSERT(vintr_intercept_enabled(sc, vcpu),
usr/src/uts/intel/io/vmm/amd/svm.c
987
svm_set_dirty(sc, vcpu, VMCB_CACHE_TPR);
usr/src/uts/intel/io/vmm/amd/svm.c
988
svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR);
usr/src/uts/intel/io/vmm/amd/svm.c
992
svm_disable_intr_window_exiting(struct svm_softc *sc, int vcpu)
usr/src/uts/intel/io/vmm/amd/svm.c
996
ctrl = svm_get_vmcb_ctrl(sc, vcpu);
usr/src/uts/intel/io/vmm/amd/svm.c
999
KASSERT(!vintr_intercept_enabled(sc, vcpu),
usr/src/uts/intel/io/vmm/amd/svm_msr.c
112
svm_msr_guest_exit(struct svm_softc *sc, int vcpu)
usr/src/uts/intel/io/vmm/amd/svm_msr.c
114
uint64_t *host_msrs = sc->host_msrs[vcpu];
usr/src/uts/intel/io/vmm/amd/svm_msr.c
133
svm_rdmsr(struct svm_softc *sc, int vcpu, uint32_t num, uint64_t *result)
usr/src/uts/intel/io/vmm/amd/svm_msr.c
160
svm_wrmsr(struct svm_softc *sc, int vcpu, uint32_t num, uint64_t val)
usr/src/uts/intel/io/vmm/amd/svm_msr.c
76
svm_msr_guest_init(struct svm_softc *sc, int vcpu)
usr/src/uts/intel/io/vmm/amd/svm_msr.c
89
svm_msr_guest_enter(struct svm_softc *sc, int vcpu)
usr/src/uts/intel/io/vmm/amd/svm_msr.c
91
uint64_t *host_msrs = sc->host_msrs[vcpu];
usr/src/uts/intel/io/vmm/amd/svm_msr.h
34
void svm_msr_guest_init(struct svm_softc *sc, int vcpu);
usr/src/uts/intel/io/vmm/amd/svm_msr.h
35
void svm_msr_guest_enter(struct svm_softc *sc, int vcpu);
usr/src/uts/intel/io/vmm/amd/svm_msr.h
36
void svm_msr_guest_exit(struct svm_softc *sc, int vcpu);
usr/src/uts/intel/io/vmm/amd/svm_pmu.c
228
svm_pmu_rdmsr(struct svm_softc *svm_sc, int vcpu, uint32_t msr, uint64_t *valp)
usr/src/uts/intel/io/vmm/amd/svm_pmu.c
232
struct svm_pmu_vcpu *pmu = svm_get_pmu(svm_sc, vcpu);
usr/src/uts/intel/io/vmm/amd/svm_pmu.c
255
svm_pmu_wrmsr(struct svm_softc *svm_sc, int vcpu, uint32_t msr, uint64_t val)
usr/src/uts/intel/io/vmm/amd/svm_pmu.c
259
struct svm_pmu_vcpu *pmu = svm_get_pmu(svm_sc, vcpu);
usr/src/uts/intel/io/vmm/amd/svm_pmu.c
298
svm_pmu_rdpmc(struct svm_softc *svm_sc, int vcpu, uint32_t ecx, uint64_t *valp)
usr/src/uts/intel/io/vmm/amd/svm_pmu.c
300
struct svm_pmu_vcpu *pmu = svm_get_pmu(svm_sc, vcpu);
usr/src/uts/intel/io/vmm/amd/svm_pmu.c
321
svm_pmu_enter(struct svm_softc *svm_sc, int vcpu)
usr/src/uts/intel/io/vmm/amd/svm_pmu.c
323
struct svm_pmu_vcpu *pmu = svm_get_pmu(svm_sc, vcpu);
usr/src/uts/intel/io/vmm/amd/svm_pmu.c
340
svm_disable_intercept(svm_sc, vcpu, VMCB_CTRL1_INTCPT,
usr/src/uts/intel/io/vmm/amd/svm_pmu.c
343
svm_enable_intercept(svm_sc, vcpu, VMCB_CTRL1_INTCPT,
usr/src/uts/intel/io/vmm/amd/svm_pmu.c
354
svm_pmu_exit(struct svm_softc *svm_sc, int vcpu)
usr/src/uts/intel/io/vmm/amd/svm_pmu.c
356
struct svm_pmu_vcpu *pmu = svm_get_pmu(svm_sc, vcpu);
usr/src/uts/intel/io/vmm/amd/svm_softc.h
100
svm_get_vmcb_ctrl(struct svm_softc *sc, int vcpu)
usr/src/uts/intel/io/vmm/amd/svm_softc.h
102
return (&(sc->vcpu[vcpu].vmcb.ctrl));
usr/src/uts/intel/io/vmm/amd/svm_softc.h
106
svm_get_guest_regctx(struct svm_softc *sc, int vcpu)
usr/src/uts/intel/io/vmm/amd/svm_softc.h
108
return (&(sc->vcpu[vcpu].swctx));
usr/src/uts/intel/io/vmm/amd/svm_softc.h
112
svm_get_pmu(struct svm_softc *sc, int vcpu)
usr/src/uts/intel/io/vmm/amd/svm_softc.h
114
return (&(sc->vcpu[vcpu].pmu));
usr/src/uts/intel/io/vmm/amd/svm_softc.h
118
svm_set_dirty(struct svm_softc *sc, int vcpu, uint32_t dirtybits)
usr/src/uts/intel/io/vmm/amd/svm_softc.h
120
struct svm_vcpu *vcpustate = svm_get_vcpu(sc, vcpu);
usr/src/uts/intel/io/vmm/amd/svm_softc.h
126
svm_apply_dirty(struct svm_softc *sc, int vcpu)
usr/src/uts/intel/io/vmm/amd/svm_softc.h
128
struct svm_vcpu *vcpustate = svm_get_vcpu(sc, vcpu);
usr/src/uts/intel/io/vmm/amd/svm_softc.h
129
struct vmcb_ctrl *ctrl = svm_get_vmcb_ctrl(sc, vcpu);
usr/src/uts/intel/io/vmm/amd/svm_softc.h
139
svm_disable_intercept(struct svm_softc *sc, int vcpu, int off, uint32_t bitmask)
usr/src/uts/intel/io/vmm/amd/svm_softc.h
141
svm_set_intercept(sc, vcpu, off, bitmask, 0);
usr/src/uts/intel/io/vmm/amd/svm_softc.h
145
svm_enable_intercept(struct svm_softc *sc, int vcpu, int off, uint32_t bitmask)
usr/src/uts/intel/io/vmm/amd/svm_softc.h
147
svm_set_intercept(sc, vcpu, off, bitmask, 1);
usr/src/uts/intel/io/vmm/amd/svm_softc.h
65
struct svm_vcpu vcpu[VM_MAXCPU];
usr/src/uts/intel/io/vmm/amd/svm_softc.h
82
svm_get_vcpu(struct svm_softc *sc, int vcpu)
usr/src/uts/intel/io/vmm/amd/svm_softc.h
84
return (&(sc->vcpu[vcpu]));
usr/src/uts/intel/io/vmm/amd/svm_softc.h
88
svm_get_vmcb(struct svm_softc *sc, int vcpu)
usr/src/uts/intel/io/vmm/amd/svm_softc.h
90
return (&(sc->vcpu[vcpu].vmcb));
usr/src/uts/intel/io/vmm/amd/svm_softc.h
94
svm_get_vmcb_state(struct svm_softc *sc, int vcpu)
usr/src/uts/intel/io/vmm/amd/svm_softc.h
96
return (&(sc->vcpu[vcpu].vmcb.state));
usr/src/uts/intel/io/vmm/intel/vmx.c
1013
vmx_set_pcpu_defaults(struct vmx *vmx, int vcpu)
usr/src/uts/intel/io/vmm/intel/vmx.c
1029
vmx_apply_tsc_adjust(vmx, vcpu);
usr/src/uts/intel/io/vmm/intel/vmx.c
1031
vmxstate = &vmx->state[vcpu];
usr/src/uts/intel/io/vmm/intel/vmx.c
1037
vmm_stat_incr(vmx->vm, vcpu, VCPU_MIGRATIONS, 1);
usr/src/uts/intel/io/vmm/intel/vmx.c
1044
vmx_invvpid(vmx, vcpu, 1);
usr/src/uts/intel/io/vmm/intel/vmx.c
1048
vmx_int_window_exiting(struct vmx *vmx, int vcpu)
usr/src/uts/intel/io/vmm/intel/vmx.c
1050
return ((vmx->cap[vcpu].proc_ctls & PROCBASED_INT_WINDOW_EXITING) != 0);
usr/src/uts/intel/io/vmm/intel/vmx.c
1054
vmx_set_int_window_exiting(struct vmx *vmx, int vcpu)
usr/src/uts/intel/io/vmm/intel/vmx.c
1056
if (!vmx_int_window_exiting(vmx, vcpu)) {
usr/src/uts/intel/io/vmm/intel/vmx.c
1058
vmx->cap[vcpu].proc_ctls |= PROCBASED_INT_WINDOW_EXITING;
usr/src/uts/intel/io/vmm/intel/vmx.c
1059
vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls);
usr/src/uts/intel/io/vmm/intel/vmx.c
1064
vmx_clear_int_window_exiting(struct vmx *vmx, int vcpu)
usr/src/uts/intel/io/vmm/intel/vmx.c
1067
vmx->cap[vcpu].proc_ctls &= ~PROCBASED_INT_WINDOW_EXITING;
usr/src/uts/intel/io/vmm/intel/vmx.c
1068
vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls);
usr/src/uts/intel/io/vmm/intel/vmx.c
1072
vmx_nmi_window_exiting(struct vmx *vmx, int vcpu)
usr/src/uts/intel/io/vmm/intel/vmx.c
1074
return ((vmx->cap[vcpu].proc_ctls & PROCBASED_NMI_WINDOW_EXITING) != 0);
usr/src/uts/intel/io/vmm/intel/vmx.c
1078
vmx_set_nmi_window_exiting(struct vmx *vmx, int vcpu)
usr/src/uts/intel/io/vmm/intel/vmx.c
1080
if (!vmx_nmi_window_exiting(vmx, vcpu)) {
usr/src/uts/intel/io/vmm/intel/vmx.c
1081
vmx->cap[vcpu].proc_ctls |= PROCBASED_NMI_WINDOW_EXITING;
usr/src/uts/intel/io/vmm/intel/vmx.c
1082
vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls);
usr/src/uts/intel/io/vmm/intel/vmx.c
1087
vmx_clear_nmi_window_exiting(struct vmx *vmx, int vcpu)
usr/src/uts/intel/io/vmm/intel/vmx.c
1089
vmx->cap[vcpu].proc_ctls &= ~PROCBASED_NMI_WINDOW_EXITING;
usr/src/uts/intel/io/vmm/intel/vmx.c
1090
vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls);
usr/src/uts/intel/io/vmm/intel/vmx.c
1101
vmx_apply_tsc_adjust(struct vmx *vmx, int vcpu)
usr/src/uts/intel/io/vmm/intel/vmx.c
1103
const uint64_t offset = vcpu_tsc_offset(vmx->vm, vcpu, true);
usr/src/uts/intel/io/vmm/intel/vmx.c
1105
ASSERT(vmx->cap[vcpu].proc_ctls & PROCBASED_TSC_OFFSET);
usr/src/uts/intel/io/vmm/intel/vmx.c
1107
if (vmx->tsc_offset_active[vcpu] != offset) {
usr/src/uts/intel/io/vmm/intel/vmx.c
1109
vmx->tsc_offset_active[vcpu] = offset;
usr/src/uts/intel/io/vmm/intel/vmx.c
1159
vmx_stash_intinfo(struct vmx *vmx, int vcpu)
usr/src/uts/intel/io/vmm/intel/vmx.c
1169
VERIFY0(vm_exit_intinfo(vmx->vm, vcpu,
usr/src/uts/intel/io/vmm/intel/vmx.c
1215
vmx_inject_nmi(struct vmx *vmx, int vcpu)
usr/src/uts/intel/io/vmm/intel/vmx.c
1228
vm_nmi_clear(vmx->vm, vcpu);
usr/src/uts/intel/io/vmm/intel/vmx.c
1241
vmx_inject_events(struct vmx *vmx, int vcpu, uint64_t rip)
usr/src/uts/intel/io/vmm/intel/vmx.c
1253
if (vmx->state[vcpu].nextrip != rip && (gi & HWINTR_BLOCKING) != 0) {
usr/src/uts/intel/io/vmm/intel/vmx.c
1267
if (vm_entry_intinfo(vmx->vm, vcpu, &entryinfo)) {
usr/src/uts/intel/io/vmm/intel/vmx.c
1272
if (vm_nmi_pending(vmx->vm, vcpu)) {
usr/src/uts/intel/io/vmm/intel/vmx.c
1286
vmx_inject_nmi(vmx, vcpu);
usr/src/uts/intel/io/vmm/intel/vmx.c
1292
vmx_set_nmi_window_exiting(vmx, vcpu);
usr/src/uts/intel/io/vmm/intel/vmx.c
1296
if (vm_extint_pending(vmx->vm, vcpu)) {
usr/src/uts/intel/io/vmm/intel/vmx.c
1321
vm_extint_clear(vmx->vm, vcpu);
usr/src/uts/intel/io/vmm/intel/vmx.c
1337
vmx_inject_vlapic(struct vmx *vmx, int vcpu, struct vlapic *vlapic)
usr/src/uts/intel/io/vmm/intel/vmx.c
1407
vmx_inject_recheck(struct vmx *vmx, int vcpu, enum event_inject_state state)
usr/src/uts/intel/io/vmm/intel/vmx.c
1410
if (vm_nmi_pending(vmx->vm, vcpu) &&
usr/src/uts/intel/io/vmm/intel/vmx.c
1411
!vmx_nmi_window_exiting(vmx, vcpu)) {
usr/src/uts/intel/io/vmm/intel/vmx.c
1415
if (vm_extint_pending(vmx->vm, vcpu)) {
usr/src/uts/intel/io/vmm/intel/vmx.c
1433
vmx_set_int_window_exiting(vmx, vcpu);
usr/src/uts/intel/io/vmm/intel/vmx.c
1479
vmx_emulate_xsetbv(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
usr/src/uts/intel/io/vmm/intel/vmx.c
1485
vmxctx = &vmx->ctx[vcpu];
usr/src/uts/intel/io/vmm/intel/vmx.c
1496
vm_inject_gp(vmx->vm, vcpu);
usr/src/uts/intel/io/vmm/intel/vmx.c
1503
vm_inject_ud(vmx->vm, vcpu);
usr/src/uts/intel/io/vmm/intel/vmx.c
1509
vm_inject_gp(vmx->vm, vcpu);
usr/src/uts/intel/io/vmm/intel/vmx.c
1514
vm_inject_gp(vmx->vm, vcpu);
usr/src/uts/intel/io/vmm/intel/vmx.c
1521
vm_inject_gp(vmx->vm, vcpu);
usr/src/uts/intel/io/vmm/intel/vmx.c
1532
vm_inject_gp(vmx->vm, vcpu);
usr/src/uts/intel/io/vmm/intel/vmx.c
1542
vm_inject_gp(vmx->vm, vcpu);
usr/src/uts/intel/io/vmm/intel/vmx.c
1556
vmx_get_guest_reg(struct vmx *vmx, int vcpu, int ident)
usr/src/uts/intel/io/vmm/intel/vmx.c
1560
vmxctx = &vmx->ctx[vcpu];
usr/src/uts/intel/io/vmm/intel/vmx.c
1601
vmx_set_guest_reg(struct vmx *vmx, int vcpu, int ident, uint64_t regval)
usr/src/uts/intel/io/vmm/intel/vmx.c
1605
vmxctx = &vmx->ctx[vcpu];
usr/src/uts/intel/io/vmm/intel/vmx.c
1662
vmx_sync_efer_state(struct vmx *vmx, int vcpu, uint64_t efer)
usr/src/uts/intel/io/vmm/intel/vmx.c
1681
vmx_emulate_cr0_access(struct vmx *vmx, int vcpu, uint64_t exitqual)
usr/src/uts/intel/io/vmm/intel/vmx.c
1689
regval = vmx_get_guest_reg(vmx, vcpu, (exitqual >> 8) & 0xf);
usr/src/uts/intel/io/vmm/intel/vmx.c
1700
vmx_invvpid(vmx, vcpu, 1);
usr/src/uts/intel/io/vmm/intel/vmx.c
1713
vmx_sync_efer_state(vmx, vcpu, efer);
usr/src/uts/intel/io/vmm/intel/vmx.c
1721
vmx_emulate_cr4_access(struct vmx *vmx, int vcpu, uint64_t exitqual)
usr/src/uts/intel/io/vmm/intel/vmx.c
1729
regval = vmx_get_guest_reg(vmx, vcpu, (exitqual >> 8) & 0xf);
usr/src/uts/intel/io/vmm/intel/vmx.c
1741
vmx_emulate_cr8_access(struct vmx *vmx, int vcpu, uint64_t exitqual)
usr/src/uts/intel/io/vmm/intel/vmx.c
1752
vlapic = vm_lapic(vmx->vm, vcpu);
usr/src/uts/intel/io/vmm/intel/vmx.c
1756
vmx_set_guest_reg(vmx, vcpu, regnum, cr8);
usr/src/uts/intel/io/vmm/intel/vmx.c
1758
cr8 = vmx_get_guest_reg(vmx, vcpu, regnum);
usr/src/uts/intel/io/vmm/intel/vmx.c
2182
vmx_exit_process(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
usr/src/uts/intel/io/vmm/intel/vmx.c
2197
vmxctx = &vmx->ctx[vcpu];
usr/src/uts/intel/io/vmm/intel/vmx.c
2203
vmm_stat_incr(vmx->vm, vcpu, VMEXIT_COUNT, 1);
usr/src/uts/intel/io/vmm/intel/vmx.c
2204
SDT_PROBE3(vmm, vmx, exit, entry, vmx, vcpu, vmexit);
usr/src/uts/intel/io/vmm/intel/vmx.c
2233
VERIFY0(vm_exit_intinfo(vmx->vm, vcpu,
usr/src/uts/intel/io/vmm/intel/vmx.c
2250
vmx_clear_nmi_blocking(vmx, vcpu);
usr/src/uts/intel/io/vmm/intel/vmx.c
2252
vmx_assert_nmi_blocking(vmx, vcpu);
usr/src/uts/intel/io/vmm/intel/vmx.c
2268
(void) vm_suspend(vmx->vm, VM_SUSPEND_TRIPLEFAULT, vcpu);
usr/src/uts/intel/io/vmm/intel/vmx.c
2310
SDT_PROBE4(vmm, vmx, exit, taskswitch, vmx, vcpu, vmexit, ts);
usr/src/uts/intel/io/vmm/intel/vmx.c
2313
vmm_stat_incr(vmx->vm, vcpu, VMEXIT_CR_ACCESS, 1);
usr/src/uts/intel/io/vmm/intel/vmx.c
2314
SDT_PROBE4(vmm, vmx, exit, craccess, vmx, vcpu, vmexit, qual);
usr/src/uts/intel/io/vmm/intel/vmx.c
2317
handled = vmx_emulate_cr0_access(vmx, vcpu, qual);
usr/src/uts/intel/io/vmm/intel/vmx.c
2320
handled = vmx_emulate_cr4_access(vmx, vcpu, qual);
usr/src/uts/intel/io/vmm/intel/vmx.c
2323
handled = vmx_emulate_cr8_access(vmx, vcpu, qual);
usr/src/uts/intel/io/vmm/intel/vmx.c
2329
handled = vmx_handle_msr(vmx, vcpu, vmexit,
usr/src/uts/intel/io/vmm/intel/vmx.c
2333
vmm_stat_incr(vmx->vm, vcpu, VMEXIT_HLT, 1);
usr/src/uts/intel/io/vmm/intel/vmx.c
2334
SDT_PROBE3(vmm, vmx, exit, halt, vmx, vcpu, vmexit);
usr/src/uts/intel/io/vmm/intel/vmx.c
2339
vmm_stat_incr(vmx->vm, vcpu, VMEXIT_MTRAP, 1);
usr/src/uts/intel/io/vmm/intel/vmx.c
2340
SDT_PROBE3(vmm, vmx, exit, mtrap, vmx, vcpu, vmexit);
usr/src/uts/intel/io/vmm/intel/vmx.c
2345
vmm_stat_incr(vmx->vm, vcpu, VMEXIT_PAUSE, 1);
usr/src/uts/intel/io/vmm/intel/vmx.c
2346
SDT_PROBE3(vmm, vmx, exit, pause, vmx, vcpu, vmexit);
usr/src/uts/intel/io/vmm/intel/vmx.c
2350
vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INTR_WINDOW, 1);
usr/src/uts/intel/io/vmm/intel/vmx.c
2351
SDT_PROBE3(vmm, vmx, exit, intrwindow, vmx, vcpu, vmexit);
usr/src/uts/intel/io/vmm/intel/vmx.c
2352
ASSERT(vmx_int_window_exiting(vmx, vcpu));
usr/src/uts/intel/io/vmm/intel/vmx.c
2353
vmx_clear_int_window_exiting(vmx, vcpu);
usr/src/uts/intel/io/vmm/intel/vmx.c
2367
vmx, vcpu, vmexit, intr_info);
usr/src/uts/intel/io/vmm/intel/vmx.c
2384
vmm_stat_incr(vmx->vm, vcpu, VMEXIT_EXTINT, 1);
usr/src/uts/intel/io/vmm/intel/vmx.c
2387
SDT_PROBE3(vmm, vmx, exit, nmiwindow, vmx, vcpu, vmexit);
usr/src/uts/intel/io/vmm/intel/vmx.c
2389
if (vm_nmi_pending(vmx->vm, vcpu))
usr/src/uts/intel/io/vmm/intel/vmx.c
2390
vmx_inject_nmi(vmx, vcpu);
usr/src/uts/intel/io/vmm/intel/vmx.c
2391
ASSERT(vmx_nmi_window_exiting(vmx, vcpu));
usr/src/uts/intel/io/vmm/intel/vmx.c
2392
vmx_clear_nmi_window_exiting(vmx, vcpu);
usr/src/uts/intel/io/vmm/intel/vmx.c
2393
vmm_stat_incr(vmx->vm, vcpu, VMEXIT_NMI_WINDOW, 1);
usr/src/uts/intel/io/vmm/intel/vmx.c
2396
vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INOUT, 1);
usr/src/uts/intel/io/vmm/intel/vmx.c
2397
vie = vm_vie_ctx(vmx->vm, vcpu);
usr/src/uts/intel/io/vmm/intel/vmx.c
2399
SDT_PROBE3(vmm, vmx, exit, inout, vmx, vcpu, vmexit);
usr/src/uts/intel/io/vmm/intel/vmx.c
2402
vmm_stat_incr(vmx->vm, vcpu, VMEXIT_CPUID, 1);
usr/src/uts/intel/io/vmm/intel/vmx.c
2403
SDT_PROBE3(vmm, vmx, exit, cpuid, vmx, vcpu, vmexit);
usr/src/uts/intel/io/vmm/intel/vmx.c
2404
vcpu_emulate_cpuid(vmx->vm, vcpu,
usr/src/uts/intel/io/vmm/intel/vmx.c
2412
vmm_stat_incr(vmx->vm, vcpu, VMEXIT_EXCEPTION, 1);
usr/src/uts/intel/io/vmm/intel/vmx.c
2432
vmx_restore_nmi_blocking(vmx, vcpu);
usr/src/uts/intel/io/vmm/intel/vmx.c
2455
(vmx->cap[vcpu].set & (1 << VM_CAP_BPT_EXIT))) {
usr/src/uts/intel/io/vmm/intel/vmx.c
2482
vmx, vcpu, vmexit, intr_vec, errcode);
usr/src/uts/intel/io/vmm/intel/vmx.c
2483
error = vm_inject_exception(vmx->vm, vcpu, intr_vec,
usr/src/uts/intel/io/vmm/intel/vmx.c
2496
if (vm_mem_allocated(vmx->vm, vcpu, gpa) ||
usr/src/uts/intel/io/vmm/intel/vmx.c
2497
apic_access_fault(vmx, vcpu, gpa)) {
usr/src/uts/intel/io/vmm/intel/vmx.c
2502
vmm_stat_incr(vmx->vm, vcpu, VMEXIT_NESTED_FAULT, 1);
usr/src/uts/intel/io/vmm/intel/vmx.c
2504
vmx, vcpu, vmexit, gpa, qual);
usr/src/uts/intel/io/vmm/intel/vmx.c
2506
vie = vm_vie_ctx(vmx->vm, vcpu);
usr/src/uts/intel/io/vmm/intel/vmx.c
2509
vmm_stat_incr(vmx->vm, vcpu, VMEXIT_MMIO_EMUL, 1);
usr/src/uts/intel/io/vmm/intel/vmx.c
2511
vmx, vcpu, vmexit, gpa);
usr/src/uts/intel/io/vmm/intel/vmx.c
2523
vmx_restore_nmi_blocking(vmx, vcpu);
usr/src/uts/intel/io/vmm/intel/vmx.c
2528
SDT_PROBE3(vmm, vmx, exit, eoi, vmx, vcpu, vmexit);
usr/src/uts/intel/io/vmm/intel/vmx.c
2532
SDT_PROBE3(vmm, vmx, exit, apicaccess, vmx, vcpu, vmexit);
usr/src/uts/intel/io/vmm/intel/vmx.c
2533
handled = vmx_handle_apic_access(vmx, vcpu, vmexit);
usr/src/uts/intel/io/vmm/intel/vmx.c
2541
vlapic = vm_lapic(vmx->vm, vcpu);
usr/src/uts/intel/io/vmm/intel/vmx.c
2543
vmx, vcpu, vmexit, vlapic);
usr/src/uts/intel/io/vmm/intel/vmx.c
2544
handled = vmx_handle_apic_write(vmx, vcpu, vlapic, qual);
usr/src/uts/intel/io/vmm/intel/vmx.c
2547
SDT_PROBE3(vmm, vmx, exit, xsetbv, vmx, vcpu, vmexit);
usr/src/uts/intel/io/vmm/intel/vmx.c
2548
handled = vmx_emulate_xsetbv(vmx, vcpu, vmexit);
usr/src/uts/intel/io/vmm/intel/vmx.c
2551
SDT_PROBE3(vmm, vmx, exit, monitor, vmx, vcpu, vmexit);
usr/src/uts/intel/io/vmm/intel/vmx.c
2555
SDT_PROBE3(vmm, vmx, exit, mwait, vmx, vcpu, vmexit);
usr/src/uts/intel/io/vmm/intel/vmx.c
2559
vlapic = vm_lapic(vmx->vm, vcpu);
usr/src/uts/intel/io/vmm/intel/vmx.c
2574
SDT_PROBE3(vmm, vmx, exit, vminsn, vmx, vcpu, vmexit);
usr/src/uts/intel/io/vmm/intel/vmx.c
2584
vmx, vcpu, vmexit, reason);
usr/src/uts/intel/io/vmm/intel/vmx.c
2585
vmm_stat_incr(vmx->vm, vcpu, VMEXIT_UNKNOWN, 1);
usr/src/uts/intel/io/vmm/intel/vmx.c
2622
vmx, vcpu, vmexit, handled);
usr/src/uts/intel/io/vmm/intel/vmx.c
2745
vmx_run(void *arg, int vcpu, uint64_t rip)
usr/src/uts/intel/io/vmm/intel/vmx.c
2760
vmcs_pa = vmx->vmcs_pa[vcpu];
usr/src/uts/intel/io/vmm/intel/vmx.c
2761
vmxctx = &vmx->ctx[vcpu];
usr/src/uts/intel/io/vmm/intel/vmx.c
2762
vlapic = vm_lapic(vm, vcpu);
usr/src/uts/intel/io/vmm/intel/vmx.c
2763
vmexit = vm_exitinfo(vm, vcpu);
usr/src/uts/intel/io/vmm/intel/vmx.c
2764
vmc = vm_get_vmclient(vm, vcpu);
usr/src/uts/intel/io/vmm/intel/vmx.c
2768
(vmx->cap[vcpu].proc_ctls & PROCBASED_USE_TPR_SHADOW) != 0;
usr/src/uts/intel/io/vmm/intel/vmx.c
2770
vmx_msr_guest_enter(vmx, vcpu);
usr/src/uts/intel/io/vmm/intel/vmx.c
2774
VERIFY(vmx->vmcs_state[vcpu] == VS_NONE && curthread->t_preempt != 0);
usr/src/uts/intel/io/vmm/intel/vmx.c
2775
vmx->vmcs_state[vcpu] = VS_LOADED;
usr/src/uts/intel/io/vmm/intel/vmx.c
2788
vmx_set_pcpu_defaults(vmx, vcpu);
usr/src/uts/intel/io/vmm/intel/vmx.c
2801
inject_state = vmx_inject_events(vmx, vcpu, rip);
usr/src/uts/intel/io/vmm/intel/vmx.c
2829
inject_state = vmx_inject_vlapic(vmx, vcpu, vlapic);
usr/src/uts/intel/io/vmm/intel/vmx.c
2836
if (vcpu_entry_bailout_checks(vmx->vm, vcpu, rip)) {
usr/src/uts/intel/io/vmm/intel/vmx.c
2841
if (vcpu_run_state_pending(vm, vcpu)) {
usr/src/uts/intel/io/vmm/intel/vmx.c
2843
vm_exit_run_state(vmx->vm, vcpu, rip);
usr/src/uts/intel/io/vmm/intel/vmx.c
2851
if (vmx_inject_recheck(vmx, vcpu, inject_state)) {
usr/src/uts/intel/io/vmm/intel/vmx.c
2875
launched = (vmx->vmcs_state[vcpu] & VS_LAUNCHED) != 0;
usr/src/uts/intel/io/vmm/intel/vmx.c
2916
vcpu_ustate_change(vm, vcpu, VU_RUN);
usr/src/uts/intel/io/vmm/intel/vmx.c
2923
vcpu_ustate_change(vm, vcpu, VU_EMU_KERN);
usr/src/uts/intel/io/vmm/intel/vmx.c
2925
vmx->vmcs_state[vcpu] |= VS_LAUNCHED;
usr/src/uts/intel/io/vmm/intel/vmx.c
2940
vmx->state[vcpu].nextrip = rip;
usr/src/uts/intel/io/vmm/intel/vmx.c
2949
handled = vmx_exit_process(vmx, vcpu, vmexit);
usr/src/uts/intel/io/vmm/intel/vmx.c
2953
DTRACE_PROBE3(vmm__vexit, int, vcpu, uint64_t, rip,
usr/src/uts/intel/io/vmm/intel/vmx.c
2965
vmx_msr_guest_exit(vmx, vcpu);
usr/src/uts/intel/io/vmm/intel/vmx.c
2967
VERIFY(vmx->vmcs_state[vcpu] != VS_NONE && curthread->t_preempt != 0);
usr/src/uts/intel/io/vmm/intel/vmx.c
2968
vmx->vmcs_state[vcpu] = VS_NONE;
usr/src/uts/intel/io/vmm/intel/vmx.c
3001
vmx_vmcs_access_ensure(struct vmx *vmx, int vcpu)
usr/src/uts/intel/io/vmm/intel/vmx.c
3005
if (vcpu_is_running(vmx->vm, vcpu, &hostcpu)) {
usr/src/uts/intel/io/vmm/intel/vmx.c
3013
vmcs_load(vmx->vmcs_pa[vcpu]);
usr/src/uts/intel/io/vmm/intel/vmx.c
3019
vmx_vmcs_access_done(struct vmx *vmx, int vcpu)
usr/src/uts/intel/io/vmm/intel/vmx.c
3023
if (vcpu_is_running(vmx->vm, vcpu, &hostcpu)) {
usr/src/uts/intel/io/vmm/intel/vmx.c
3030
vmcs_clear(vmx->vmcs_pa[vcpu]);
usr/src/uts/intel/io/vmm/intel/vmx.c
305
static int vmx_getdesc(void *arg, int vcpu, int reg, struct seg_desc *desc);
usr/src/uts/intel/io/vmm/intel/vmx.c
306
static int vmx_getreg(void *arg, int vcpu, int reg, uint64_t *retval);
usr/src/uts/intel/io/vmm/intel/vmx.c
3087
vmx_getreg(void *arg, int vcpu, int reg, uint64_t *retval)
usr/src/uts/intel/io/vmm/intel/vmx.c
3093
if ((regp = vmxctx_regptr(&vmx->ctx[vcpu], reg)) != NULL) {
usr/src/uts/intel/io/vmm/intel/vmx.c
3098
bool vmcs_loaded = vmx_vmcs_access_ensure(vmx, vcpu);
usr/src/uts/intel/io/vmm/intel/vmx.c
3129
vmx_vmcs_access_done(vmx, vcpu);
usr/src/uts/intel/io/vmm/intel/vmx.c
3135
vmx_setreg(void *arg, int vcpu, int reg, uint64_t val)
usr/src/uts/intel/io/vmm/intel/vmx.c
3141
if ((regp = vmxctx_regptr(&vmx->ctx[vcpu], reg)) != NULL) {
usr/src/uts/intel/io/vmm/intel/vmx.c
3146
bool vmcs_loaded = vmx_vmcs_access_ensure(vmx, vcpu);
usr/src/uts/intel/io/vmm/intel/vmx.c
3172
vmx_sync_efer_state(vmx, vcpu, val);
usr/src/uts/intel/io/vmm/intel/vmx.c
3201
vmx_invvpid(vmx, vcpu,
usr/src/uts/intel/io/vmm/intel/vmx.c
3202
vcpu_is_running(vmx->vm, vcpu, NULL));
usr/src/uts/intel/io/vmm/intel/vmx.c
3214
vmx_vmcs_access_done(vmx, vcpu);
usr/src/uts/intel/io/vmm/intel/vmx.c
3220
vmx_getdesc(void *arg, int vcpu, int seg, struct seg_desc *desc)
usr/src/uts/intel/io/vmm/intel/vmx.c
3225
bool vmcs_loaded = vmx_vmcs_access_ensure(vmx, vcpu);
usr/src/uts/intel/io/vmm/intel/vmx.c
3237
vmx_vmcs_access_done(vmx, vcpu);
usr/src/uts/intel/io/vmm/intel/vmx.c
3243
vmx_setdesc(void *arg, int vcpu, int seg, const struct seg_desc *desc)
usr/src/uts/intel/io/vmm/intel/vmx.c
3248
bool vmcs_loaded = vmx_vmcs_access_ensure(vmx, vcpu);
usr/src/uts/intel/io/vmm/intel/vmx.c
3258
vmx_vmcs_access_done(vmx, vcpu);
usr/src/uts/intel/io/vmm/intel/vmx.c
3264
vmx_msr_ptr(struct vmx *vmx, int vcpu, uint32_t msr)
usr/src/uts/intel/io/vmm/intel/vmx.c
3266
uint64_t *guest_msrs = vmx->guest_msrs[vcpu];
usr/src/uts/intel/io/vmm/intel/vmx.c
3287
vmx_msr_get(void *arg, int vcpu, uint32_t msr, uint64_t *valp)
usr/src/uts/intel/io/vmm/intel/vmx.c
3293
const uint64_t *msrp = vmx_msr_ptr(vmx, vcpu, msr);
usr/src/uts/intel/io/vmm/intel/vmx.c
3301
bool vmcs_loaded = vmx_vmcs_access_ensure(vmx, vcpu);
usr/src/uts/intel/io/vmm/intel/vmx.c
3306
vmx_vmcs_access_done(vmx, vcpu);
usr/src/uts/intel/io/vmm/intel/vmx.c
3315
vmx_msr_set(void *arg, int vcpu, uint32_t msr, uint64_t val)
usr/src/uts/intel/io/vmm/intel/vmx.c
3321
uint64_t *msrp = vmx_msr_ptr(vmx, vcpu, msr);
usr/src/uts/intel/io/vmm/intel/vmx.c
3329
bool vmcs_loaded = vmx_vmcs_access_ensure(vmx, vcpu);
usr/src/uts/intel/io/vmm/intel/vmx.c
3334
vmx_sync_efer_state(vmx, vcpu, val);
usr/src/uts/intel/io/vmm/intel/vmx.c
3338
vmx_vmcs_access_done(vmx, vcpu);
usr/src/uts/intel/io/vmm/intel/vmx.c
3346
vmx_getcap(void *arg, int vcpu, int type, int *retval)
usr/src/uts/intel/io/vmm/intel/vmx.c
3354
vcap = vmx->cap[vcpu].set;
usr/src/uts/intel/io/vmm/intel/vmx.c
3386
vmx_setcap(void *arg, int vcpu, int type, int val)
usr/src/uts/intel/io/vmm/intel/vmx.c
3399
pptr = &vmx->cap[vcpu].proc_ctls;
usr/src/uts/intel/io/vmm/intel/vmx.c
3407
pptr = &vmx->cap[vcpu].proc_ctls;
usr/src/uts/intel/io/vmm/intel/vmx.c
3416
pptr = &vmx->cap[vcpu].proc_ctls;
usr/src/uts/intel/io/vmm/intel/vmx.c
3425
pptr = &vmx->cap[vcpu].proc_ctls2;
usr/src/uts/intel/io/vmm/intel/vmx.c
3435
if (vmx->cap[vcpu].exc_bitmap != 0xffffffff) {
usr/src/uts/intel/io/vmm/intel/vmx.c
3436
pptr = &vmx->cap[vcpu].exc_bitmap;
usr/src/uts/intel/io/vmm/intel/vmx.c
3456
vmcs_load(vmx->vmcs_pa[vcpu]);
usr/src/uts/intel/io/vmm/intel/vmx.c
3458
vmcs_clear(vmx->vmcs_pa[vcpu]);
usr/src/uts/intel/io/vmm/intel/vmx.c
3468
vmx->cap[vcpu].set |= (1 << type);
usr/src/uts/intel/io/vmm/intel/vmx.c
3470
vmx->cap[vcpu].set &= ~(1 << type);
usr/src/uts/intel/io/vmm/intel/vmx.c
3820
vmx_savectx(void *arg, int vcpu)
usr/src/uts/intel/io/vmm/intel/vmx.c
3824
if ((vmx->vmcs_state[vcpu] & VS_LOADED) != 0) {
usr/src/uts/intel/io/vmm/intel/vmx.c
3825
vmcs_clear(vmx->vmcs_pa[vcpu]);
usr/src/uts/intel/io/vmm/intel/vmx.c
3826
vmx_msr_guest_exit(vmx, vcpu);
usr/src/uts/intel/io/vmm/intel/vmx.c
3831
vmx->vmcs_state[vcpu] &= ~VS_LAUNCHED;
usr/src/uts/intel/io/vmm/intel/vmx.c
3838
vmx_restorectx(void *arg, int vcpu)
usr/src/uts/intel/io/vmm/intel/vmx.c
3842
ASSERT0(vmx->vmcs_state[vcpu] & VS_LAUNCHED);
usr/src/uts/intel/io/vmm/intel/vmx.c
3844
if ((vmx->vmcs_state[vcpu] & VS_LOADED) != 0) {
usr/src/uts/intel/io/vmm/intel/vmx.c
3845
vmx_msr_guest_enter(vmx, vcpu);
usr/src/uts/intel/io/vmm/intel/vmx.c
3846
vmcs_load(vmx->vmcs_pa[vcpu]);
usr/src/uts/intel/io/vmm/intel/vmx.c
940
vmx_invvpid(struct vmx *vmx, int vcpu, int running)
usr/src/uts/intel/io/vmm/intel/vmx.c
945
vmxstate = &vmx->state[vcpu];
usr/src/uts/intel/io/vmm/intel/vmx.c
978
vmm_stat_incr(vmx->vm, vcpu, VCPU_INVVPID_DONE, 1);
usr/src/uts/intel/io/vmm/intel/vmx.c
986
vmm_stat_incr(vmx->vm, vcpu, VCPU_INVVPID_SAVED, 1);
usr/src/uts/intel/io/vmm/intel/vmx.h
197
int vmx_set_tsc_offset(struct vmx *vmx, int vcpu, uint64_t offset);
usr/src/uts/intel/io/vmm/io/ppt.c
1292
ppt_setup_msi(struct vm *vm, int vcpu, int pptfd, uint64_t addr, uint64_t msg,
usr/src/uts/intel/io/vmm/io/ppt.c
1396
ppt_setup_msix(struct vm *vm, int vcpu, int pptfd, int idx, uint64_t addr,
usr/src/uts/intel/io/vmm/io/ppt.h
36
int ppt_setup_msi(struct vm *vm, int vcpu, int pptfd, uint64_t addr,
usr/src/uts/intel/io/vmm/io/ppt.h
38
int ppt_setup_msix(struct vm *vm, int vcpu, int pptfd, int idx, uint64_t addr,
usr/src/uts/intel/io/vmm/sys/vmm_kernel.h
100
typedef void (*vmi_pause_t)(void *vmi, int vcpu);
usr/src/uts/intel/io/vmm/sys/vmm_kernel.h
102
typedef int (*vmi_get_msr_t)(void *vmi, int vcpu, uint32_t msr,
usr/src/uts/intel/io/vmm/sys/vmm_kernel.h
104
typedef int (*vmi_set_msr_t)(void *vmi, int vcpu, uint32_t msr,
usr/src/uts/intel/io/vmm/sys/vmm_kernel.h
184
int vm_get_register(struct vm *vm, int vcpu, int reg, uint64_t *retval);
usr/src/uts/intel/io/vmm/sys/vmm_kernel.h
185
int vm_set_register(struct vm *vm, int vcpu, int reg, uint64_t val);
usr/src/uts/intel/io/vmm/sys/vmm_kernel.h
186
int vm_get_seg_desc(struct vm *vm, int vcpu, int reg,
usr/src/uts/intel/io/vmm/sys/vmm_kernel.h
188
int vm_set_seg_desc(struct vm *vm, int vcpu, int reg,
usr/src/uts/intel/io/vmm/sys/vmm_kernel.h
198
int vm_inject_nmi(struct vm *vm, int vcpu);
usr/src/uts/intel/io/vmm/sys/vmm_kernel.h
201
int vm_inject_extint(struct vm *vm, int vcpu);
usr/src/uts/intel/io/vmm/sys/vmm_kernel.h
209
int vm_get_capability(struct vm *vm, int vcpu, int type, int *val);
usr/src/uts/intel/io/vmm/sys/vmm_kernel.h
210
int vm_set_capability(struct vm *vm, int vcpu, int type, int val);
usr/src/uts/intel/io/vmm/sys/vmm_kernel.h
211
int vm_get_x2apic_state(struct vm *vm, int vcpu, enum x2apic_state *state);
usr/src/uts/intel/io/vmm/sys/vmm_kernel.h
212
int vm_set_x2apic_state(struct vm *vm, int vcpu, enum x2apic_state state);
usr/src/uts/intel/io/vmm/sys/vmm_kernel.h
214
int vm_activate_cpu(struct vm *vm, int vcpu);
usr/src/uts/intel/io/vmm/sys/vmm_kernel.h
215
int vm_suspend_cpu(struct vm *vm, int vcpu);
usr/src/uts/intel/io/vmm/sys/vmm_kernel.h
216
int vm_resume_cpu(struct vm *vm, int vcpu);
usr/src/uts/intel/io/vmm/sys/vmm_kernel.h
256
int vcpu_set_state(struct vm *vm, int vcpu, enum vcpu_state state,
usr/src/uts/intel/io/vmm/sys/vmm_kernel.h
258
enum vcpu_state vcpu_get_state(struct vm *vm, int vcpu, int *hostcpu);
usr/src/uts/intel/io/vmm/sys/vmm_kernel.h
268
vcpu_is_running(struct vm *vm, int vcpu, int *hostcpu)
usr/src/uts/intel/io/vmm/sys/vmm_kernel.h
270
return (vcpu_get_state(vm, vcpu, hostcpu) == VCPU_RUNNING);
usr/src/uts/intel/io/vmm/sys/vmm_kernel.h
275
vcpu_should_yield(struct vm *vm, int vcpu)
usr/src/uts/intel/io/vmm/sys/vmm_kernel.h
293
void *vcpu_stats(struct vm *vm, int vcpu);
usr/src/uts/intel/io/vmm/sys/vmm_kernel.h
84
typedef int (*vmi_run_func_t)(void *vmi, int vcpu, uint64_t rip);
usr/src/uts/intel/io/vmm/sys/vmm_kernel.h
86
typedef int (*vmi_get_register_t)(void *vmi, int vcpu, int num,
usr/src/uts/intel/io/vmm/sys/vmm_kernel.h
88
typedef int (*vmi_set_register_t)(void *vmi, int vcpu, int num,
usr/src/uts/intel/io/vmm/sys/vmm_kernel.h
90
typedef int (*vmi_get_desc_t)(void *vmi, int vcpu, int num,
usr/src/uts/intel/io/vmm/sys/vmm_kernel.h
92
typedef int (*vmi_set_desc_t)(void *vmi, int vcpu, int num,
usr/src/uts/intel/io/vmm/sys/vmm_kernel.h
94
typedef int (*vmi_get_cap_t)(void *vmi, int vcpu, int num, int *retval);
usr/src/uts/intel/io/vmm/sys/vmm_kernel.h
95
typedef int (*vmi_set_cap_t)(void *vmi, int vcpu, int num, int val);
usr/src/uts/intel/io/vmm/sys/vmm_kernel.h
98
typedef void (*vmi_savectx)(void *vmi, int vcpu);
usr/src/uts/intel/io/vmm/sys/vmm_kernel.h
99
typedef void (*vmi_restorectx)(void *vmi, int vcpu);
usr/src/uts/intel/io/vmm/vmm.c
1209
struct vcpu *vcpu = &vm->vcpu[vcpuid];
usr/src/uts/intel/io/vmm/vmm.c
1212
*retval = vcpu->guest_xcr0;
usr/src/uts/intel/io/vmm/vmm.c
1229
struct vcpu *vcpu = &vm->vcpu[vcpuid];
usr/src/uts/intel/io/vmm/vmm.c
1234
vcpu->nextrip = val;
usr/src/uts/intel/io/vmm/vmm.c
1241
vcpu->guest_xcr0 = val;
usr/src/uts/intel/io/vmm/vmm.c
1279
vm_get_seg_desc(struct vm *vm, int vcpu, int reg, struct seg_desc *desc)
usr/src/uts/intel/io/vmm/vmm.c
1282
if (vcpu < 0 || vcpu >= vm->maxcpus)
usr/src/uts/intel/io/vmm/vmm.c
1288
return (VMGETDESC(vm->cookie, vcpu, reg, desc));
usr/src/uts/intel/io/vmm/vmm.c
1292
vm_set_seg_desc(struct vm *vm, int vcpu, int reg, const struct seg_desc *desc)
usr/src/uts/intel/io/vmm/vmm.c
1294
if (vcpu < 0 || vcpu >= vm->maxcpus)
usr/src/uts/intel/io/vmm/vmm.c
1300
return (VMSETDESC(vm->cookie, vcpu, reg, desc));
usr/src/uts/intel/io/vmm/vmm.c
1327
struct vcpu *vcpu = &vm->vcpu[vcpuid];
usr/src/uts/intel/io/vmm/vmm.c
1330
res = hma_fpu_get_xsave_state(vcpu->guestfpu, buf, len);
usr/src/uts/intel/io/vmm/vmm.c
1340
struct vcpu *vcpu = &vm->vcpu[vcpuid];
usr/src/uts/intel/io/vmm/vmm.c
1343
res = hma_fpu_set_xsave_state(vcpu->guestfpu, buf, len);
usr/src/uts/intel/io/vmm/vmm.c
1350
struct vcpu *vcpu;
usr/src/uts/intel/io/vmm/vmm.c
1356
vcpu = &vm->vcpu[vcpuid];
usr/src/uts/intel/io/vmm/vmm.c
1358
vcpu_lock(vcpu);
usr/src/uts/intel/io/vmm/vmm.c
1359
*state = vcpu->run_state;
usr/src/uts/intel/io/vmm/vmm.c
1360
*sipi_vec = vcpu->sipi_vector;
usr/src/uts/intel/io/vmm/vmm.c
1361
vcpu_unlock(vcpu);
usr/src/uts/intel/io/vmm/vmm.c
1369
struct vcpu *vcpu;
usr/src/uts/intel/io/vmm/vmm.c
1378
vcpu = &vm->vcpu[vcpuid];
usr/src/uts/intel/io/vmm/vmm.c
1380
vcpu_lock(vcpu);
usr/src/uts/intel/io/vmm/vmm.c
1381
vcpu->run_state = state;
usr/src/uts/intel/io/vmm/vmm.c
1382
vcpu->sipi_vector = sipi_vec;
usr/src/uts/intel/io/vmm/vmm.c
1383
vcpu_notify_event_locked(vcpu, VCPU_NOTIFY_EXIT);
usr/src/uts/intel/io/vmm/vmm.c
1384
vcpu_unlock(vcpu);
usr/src/uts/intel/io/vmm/vmm.c
1452
restore_guest_fpustate(struct vcpu *vcpu)
usr/src/uts/intel/io/vmm/vmm.c
1456
hma_fpu_start_guest(vcpu->guestfpu);
usr/src/uts/intel/io/vmm/vmm.c
1460
load_xcr(0, vcpu->guest_xcr0);
usr/src/uts/intel/io/vmm/vmm.c
1470
save_guest_fpustate(struct vcpu *vcpu)
usr/src/uts/intel/io/vmm/vmm.c
1478
vcpu->guest_xcr0 = rxcr(0);
usr/src/uts/intel/io/vmm/vmm.c
1484
hma_fpu_stop_guest(vcpu->guestfpu);
usr/src/uts/intel/io/vmm/vmm.c
1495
struct vcpu *vcpu;
usr/src/uts/intel/io/vmm/vmm.c
1498
vcpu = &vm->vcpu[vcpuid];
usr/src/uts/intel/io/vmm/vmm.c
1499
vcpu_assert_locked(vcpu);
usr/src/uts/intel/io/vmm/vmm.c
1507
while (vcpu->state != VCPU_IDLE) {
usr/src/uts/intel/io/vmm/vmm.c
1508
vcpu->reqidle = true;
usr/src/uts/intel/io/vmm/vmm.c
1509
vcpu_notify_event_locked(vcpu, VCPU_NOTIFY_EXIT);
usr/src/uts/intel/io/vmm/vmm.c
1510
cv_wait(&vcpu->state_cv, &vcpu->lock);
usr/src/uts/intel/io/vmm/vmm.c
1511
vcpu->reqidle = false;
usr/src/uts/intel/io/vmm/vmm.c
1514
KASSERT(vcpu->state != VCPU_IDLE, ("invalid transition from "
usr/src/uts/intel/io/vmm/vmm.c
1518
if (vcpu->state == VCPU_RUNNING) {
usr/src/uts/intel/io/vmm/vmm.c
1519
KASSERT(vcpu->hostcpu == curcpu, ("curcpu %d and hostcpu %d "
usr/src/uts/intel/io/vmm/vmm.c
1520
"mismatch for running vcpu", curcpu, vcpu->hostcpu));
usr/src/uts/intel/io/vmm/vmm.c
1522
KASSERT(vcpu->hostcpu == NOCPU, ("Invalid hostcpu %d for a "
usr/src/uts/intel/io/vmm/vmm.c
1523
"vcpu that is not running", vcpu->hostcpu));
usr/src/uts/intel/io/vmm/vmm.c
1532
switch (vcpu->state) {
usr/src/uts/intel/io/vmm/vmm.c
1549
vcpu->state = newstate;
usr/src/uts/intel/io/vmm/vmm.c
1551
vcpu->hostcpu = curcpu;
usr/src/uts/intel/io/vmm/vmm.c
1553
vcpu->hostcpu = NOCPU;
usr/src/uts/intel/io/vmm/vmm.c
1556
cv_broadcast(&vcpu->state_cv);
usr/src/uts/intel/io/vmm/vmm.c
1586
struct vcpu *vcpu;
usr/src/uts/intel/io/vmm/vmm.c
1592
vcpu = &vm->vcpu[vcpuid];
usr/src/uts/intel/io/vmm/vmm.c
1596
vcpu_lock(vcpu);
usr/src/uts/intel/io/vmm/vmm.c
1608
vlapic_pending_intr(vcpu->vlapic, NULL)) {
usr/src/uts/intel/io/vmm/vmm.c
1642
(void) cv_wait_sig(&vcpu->vcpu_cv, &vcpu->lock);
usr/src/uts/intel/io/vmm/vmm.c
1650
vcpu_unlock(vcpu);
usr/src/uts/intel/io/vmm/vmm.c
1662
struct vcpu *vcpu = &vm->vcpu[vcpuid];
usr/src/uts/intel/io/vmm/vmm.c
1663
vm_client_t *vmc = vcpu->vmclient;
usr/src/uts/intel/io/vmm/vmm.c
1664
struct vm_exit *vme = &vcpu->exitinfo;
usr/src/uts/intel/io/vmm/vmm.c
1745
struct vcpu *vcpu;
usr/src/uts/intel/io/vmm/vmm.c
1750
vcpu = &vm->vcpu[vcpuid];
usr/src/uts/intel/io/vmm/vmm.c
1751
vme = &vcpu->exitinfo;
usr/src/uts/intel/io/vmm/vmm.c
1752
vie = vcpu->vie_ctx;
usr/src/uts/intel/io/vmm/vmm.c
1819
vie_advance_pc(vie, &vcpu->nextrip);
usr/src/uts/intel/io/vmm/vmm.c
1827
struct vcpu *vcpu;
usr/src/uts/intel/io/vmm/vmm.c
1831
vcpu = &vm->vcpu[vcpuid];
usr/src/uts/intel/io/vmm/vmm.c
1832
vie = vcpu->vie_ctx;
usr/src/uts/intel/io/vmm/vmm.c
1871
vie_advance_pc(vie, &vcpu->nextrip);
usr/src/uts/intel/io/vmm/vmm.c
1879
struct vcpu *vcpu;
usr/src/uts/intel/io/vmm/vmm.c
1884
vcpu = &vm->vcpu[vcpuid];
usr/src/uts/intel/io/vmm/vmm.c
1885
vme = &vcpu->exitinfo;
usr/src/uts/intel/io/vmm/vmm.c
1886
vie = vcpu->vie_ctx;
usr/src/uts/intel/io/vmm/vmm.c
1920
vie_advance_pc(vie, &vcpu->nextrip);
usr/src/uts/intel/io/vmm/vmm.c
1928
struct vcpu *vcpu = &vm->vcpu[vcpuid];
usr/src/uts/intel/io/vmm/vmm.c
1931
vcpu_lock(vcpu);
usr/src/uts/intel/io/vmm/vmm.c
1933
if ((vcpu->run_state & VRS_PEND_INIT) != 0) {
usr/src/uts/intel/io/vmm/vmm.c
1934
vcpu_unlock(vcpu);
usr/src/uts/intel/io/vmm/vmm.c
1936
vcpu_lock(vcpu);
usr/src/uts/intel/io/vmm/vmm.c
1938
vcpu->run_state &= ~(VRS_RUN | VRS_PEND_INIT);
usr/src/uts/intel/io/vmm/vmm.c
1939
vcpu->run_state |= VRS_INIT;
usr/src/uts/intel/io/vmm/vmm.c
1942
if ((vcpu->run_state & (VRS_INIT | VRS_RUN | VRS_PEND_SIPI)) ==
usr/src/uts/intel/io/vmm/vmm.c
1944
const uint8_t vector = vcpu->sipi_vector;
usr/src/uts/intel/io/vmm/vmm.c
1946
vcpu_unlock(vcpu);
usr/src/uts/intel/io/vmm/vmm.c
1948
vcpu_lock(vcpu);
usr/src/uts/intel/io/vmm/vmm.c
1950
vcpu->run_state &= ~VRS_PEND_SIPI;
usr/src/uts/intel/io/vmm/vmm.c
1951
vcpu->run_state |= VRS_RUN;
usr/src/uts/intel/io/vmm/vmm.c
1958
if ((vcpu->run_state & VRS_RUN) != 0) {
usr/src/uts/intel/io/vmm/vmm.c
1974
(void) cv_wait_sig(&vcpu->vcpu_cv, &vcpu->lock);
usr/src/uts/intel/io/vmm/vmm.c
1978
vcpu_unlock(vcpu);
usr/src/uts/intel/io/vmm/vmm.c
2084
struct vcpu *vcpu = &vm->vcpu[vcpuid];
usr/src/uts/intel/io/vmm/vmm.c
2100
if (vm_rdmtrr(&vcpu->mtrr, code, &val) != 0)
usr/src/uts/intel/io/vmm/vmm.c
2141
struct vcpu *vcpu = &vm->vcpu[vcpuid];
usr/src/uts/intel/io/vmm/vmm.c
2157
if (vm_wrmtrr(&vcpu->mtrr, code, val) != 0)
usr/src/uts/intel/io/vmm/vmm.c
2176
vcpu->tsc_offset = val - calc_guest_tsc(rdtsc_offset(),
usr/src/uts/intel/io/vmm/vmm.c
2252
struct vcpu *vcpu = &vm->vcpu[i];
usr/src/uts/intel/io/vmm/vmm.c
2254
vcpu_lock(vcpu);
usr/src/uts/intel/io/vmm/vmm.c
2262
vcpu_unlock(vcpu);
usr/src/uts/intel/io/vmm/vmm.c
2266
switch (vcpu->state) {
usr/src/uts/intel/io/vmm/vmm.c
2287
vcpu_notify_event_locked(vcpu, VCPU_NOTIFY_EXIT);
usr/src/uts/intel/io/vmm/vmm.c
2290
vcpu_unlock(vcpu);
usr/src/uts/intel/io/vmm/vmm.c
231
struct vcpu vcpu[VM_MAXCPU]; /* (i) guest vcpus */
usr/src/uts/intel/io/vmm/vmm.c
2313
vm_localize_resources(struct vm *vm, struct vcpu *vcpu)
usr/src/uts/intel/io/vmm/vmm.c
2325
if (vcpu->lastloccpu == curcpu)
usr/src/uts/intel/io/vmm/vmm.c
2333
if (vcpu == &vm->vcpu[0]) {
usr/src/uts/intel/io/vmm/vmm.c
2339
vlapic_localize_resources(vcpu->vlapic);
usr/src/uts/intel/io/vmm/vmm.c
2341
vcpu->lastloccpu = curcpu;
usr/src/uts/intel/io/vmm/vmm.c
2359
if (vm->vcpu[vcpuid].ustate != VU_IDLE) {
usr/src/uts/intel/io/vmm/vmm.c
2360
vtc->vtc_ustate = vm->vcpu[vcpuid].ustate;
usr/src/uts/intel/io/vmm/vmm.c
2369
struct vcpu *vcpu = &vm->vcpu[vcpuid];
usr/src/uts/intel/io/vmm/vmm.c
2371
save_guest_fpustate(vcpu);
usr/src/uts/intel/io/vmm/vmm.c
2384
if (vm->vcpu[vcpuid].ustate != VU_IDLE) {
usr/src/uts/intel/io/vmm/vmm.c
2400
struct vcpu *vcpu = &vm->vcpu[vcpuid];
usr/src/uts/intel/io/vmm/vmm.c
2402
restore_guest_fpustate(vcpu);
usr/src/uts/intel/io/vmm/vmm.c
2420
struct vcpu *vcpu = &vm->vcpu[vcpuid];
usr/src/uts/intel/io/vmm/vmm.c
2421
struct vie *vie = vcpu->vie_ctx;
usr/src/uts/intel/io/vmm/vmm.c
2438
vie_advance_pc(vie, &vcpu->nextrip);
usr/src/uts/intel/io/vmm/vmm.c
2457
vie_advance_pc(vie, &vcpu->nextrip);
usr/src/uts/intel/io/vmm/vmm.c
2481
vcpu->reqconsist = true;
usr/src/uts/intel/io/vmm/vmm.c
2492
vie = vm->vcpu[vcpuid].vie_ctx;
usr/src/uts/intel/io/vmm/vmm.c
2510
struct vcpu *vcpu;
usr/src/uts/intel/io/vmm/vmm.c
2523
vcpu = &vm->vcpu[vcpuid];
usr/src/uts/intel/io/vmm/vmm.c
2524
vme = &vcpu->exitinfo;
usr/src/uts/intel/io/vmm/vmm.c
2528
vcpu->vtc.vtc_status = 0;
usr/src/uts/intel/io/vmm/vmm.c
2529
ctxop_attach(curthread, vcpu->ctxop);
usr/src/uts/intel/io/vmm/vmm.c
2551
vm_localize_resources(vm, vcpu);
usr/src/uts/intel/io/vmm/vmm.c
2558
if ((vcpu->vtc.vtc_status & VTCS_FPU_RESTORED) == 0) {
usr/src/uts/intel/io/vmm/vmm.c
2559
restore_guest_fpustate(vcpu);
usr/src/uts/intel/io/vmm/vmm.c
2560
vcpu->vtc.vtc_status |= VTCS_FPU_RESTORED;
usr/src/uts/intel/io/vmm/vmm.c
2562
vcpu->vtc.vtc_status |= VTCS_FPU_CTX_CRITICAL;
usr/src/uts/intel/io/vmm/vmm.c
2565
error = VMRUN(vm->cookie, vcpuid, vcpu->nextrip);
usr/src/uts/intel/io/vmm/vmm.c
2572
vcpu->vtc.vtc_status &= ~VTCS_FPU_CTX_CRITICAL;
usr/src/uts/intel/io/vmm/vmm.c
2581
vcpu->nextrip = vme->rip + vme->inst_length;
usr/src/uts/intel/io/vmm/vmm.c
2637
ctxop_detach(curthread, vcpu->ctxop);
usr/src/uts/intel/io/vmm/vmm.c
2639
vmm_savectx(&vcpu->vtc);
usr/src/uts/intel/io/vmm/vmm.c
2656
struct vcpu *vcpu;
usr/src/uts/intel/io/vmm/vmm.c
2665
vcpu = &vm->vcpu[vcpuid];
usr/src/uts/intel/io/vmm/vmm.c
2674
vcpu->exitinfo.inst_length = 0;
usr/src/uts/intel/io/vmm/vmm.c
2684
vcpu->nextrip = rip;
usr/src/uts/intel/io/vmm/vmm.c
2694
struct vcpu *vcpu;
usr/src/uts/intel/io/vmm/vmm.c
2699
vcpu = &vm->vcpu[vcpuid];
usr/src/uts/intel/io/vmm/vmm.c
2714
vcpu->exit_intinfo = info;
usr/src/uts/intel/io/vmm/vmm.c
2776
struct vcpu *vcpu = &vm->vcpu[vcpuid];
usr/src/uts/intel/io/vmm/vmm.c
2777
const uint64_t info1 = vcpu->exit_intinfo;
usr/src/uts/intel/io/vmm/vmm.c
2778
vcpu->exit_intinfo = 0;
usr/src/uts/intel/io/vmm/vmm.c
2779
const uint64_t info2 = vcpu->exc_pending;
usr/src/uts/intel/io/vmm/vmm.c
2780
vcpu->exc_pending = 0;
usr/src/uts/intel/io/vmm/vmm.c
2810
vcpu->exit_intinfo = info1;
usr/src/uts/intel/io/vmm/vmm.c
2828
struct vcpu *vcpu;
usr/src/uts/intel/io/vmm/vmm.c
2833
vcpu = &vm->vcpu[vcpuid];
usr/src/uts/intel/io/vmm/vmm.c
2834
*info1 = vcpu->exit_intinfo;
usr/src/uts/intel/io/vmm/vmm.c
2835
*info2 = vcpu->exc_pending;
usr/src/uts/intel/io/vmm/vmm.c
2843
struct vcpu *vcpu;
usr/src/uts/intel/io/vmm/vmm.c
2870
vcpu = &vm->vcpu[vcpuid];
usr/src/uts/intel/io/vmm/vmm.c
2872
if (VM_INTINFO_PENDING(vcpu->exc_pending)) {
usr/src/uts/intel/io/vmm/vmm.c
2906
vcpu->exc_pending = val;
usr/src/uts/intel/io/vmm/vmm.c
293
#define VMRUN(vmi, vcpu, rip) ((*ops->vmrun)(vmi, vcpu, rip))
usr/src/uts/intel/io/vmm/vmm.c
2946
struct vcpu *vcpu;
usr/src/uts/intel/io/vmm/vmm.c
2951
vcpu = &vm->vcpu[vcpuid];
usr/src/uts/intel/io/vmm/vmm.c
2953
vcpu->nmi_pending = true;
usr/src/uts/intel/io/vmm/vmm.c
296
#define VMGETREG(vmi, vcpu, num, rv) ((*ops->vmgetreg)(vmi, vcpu, num, rv))
usr/src/uts/intel/io/vmm/vmm.c
2961
struct vcpu *vcpu = &vm->vcpu[vcpuid];
usr/src/uts/intel/io/vmm/vmm.c
2963
return (vcpu->nmi_pending);
usr/src/uts/intel/io/vmm/vmm.c
2969
struct vcpu *vcpu = &vm->vcpu[vcpuid];
usr/src/uts/intel/io/vmm/vmm.c
297
#define VMSETREG(vmi, vcpu, num, val) ((*ops->vmsetreg)(vmi, vcpu, num, val))
usr/src/uts/intel/io/vmm/vmm.c
2971
ASSERT(vcpu->nmi_pending);
usr/src/uts/intel/io/vmm/vmm.c
2973
vcpu->nmi_pending = false;
usr/src/uts/intel/io/vmm/vmm.c
298
#define VMGETDESC(vmi, vcpu, num, dsc) ((*ops->vmgetdesc)(vmi, vcpu, num, dsc))
usr/src/uts/intel/io/vmm/vmm.c
2982
struct vcpu *vcpu;
usr/src/uts/intel/io/vmm/vmm.c
2987
vcpu = &vm->vcpu[vcpuid];
usr/src/uts/intel/io/vmm/vmm.c
2989
vcpu->extint_pending = true;
usr/src/uts/intel/io/vmm/vmm.c
299
#define VMSETDESC(vmi, vcpu, num, dsc) ((*ops->vmsetdesc)(vmi, vcpu, num, dsc))
usr/src/uts/intel/io/vmm/vmm.c
2997
struct vcpu *vcpu = &vm->vcpu[vcpuid];
usr/src/uts/intel/io/vmm/vmm.c
2999
return (vcpu->extint_pending);
usr/src/uts/intel/io/vmm/vmm.c
300
#define VMGETCAP(vmi, vcpu, num, rv) ((*ops->vmgetcap)(vmi, vcpu, num, rv))
usr/src/uts/intel/io/vmm/vmm.c
3005
struct vcpu *vcpu = &vm->vcpu[vcpuid];
usr/src/uts/intel/io/vmm/vmm.c
3007
ASSERT(vcpu->extint_pending);
usr/src/uts/intel/io/vmm/vmm.c
3009
vcpu->extint_pending = false;
usr/src/uts/intel/io/vmm/vmm.c
301
#define VMSETCAP(vmi, vcpu, num, val) ((*ops->vmsetcap)(vmi, vcpu, num, val))
usr/src/uts/intel/io/vmm/vmm.c
3016
struct vcpu *vcpu;
usr/src/uts/intel/io/vmm/vmm.c
302
#define VLAPIC_INIT(vmi, vcpu) ((*ops->vlapic_init)(vmi, vcpu))
usr/src/uts/intel/io/vmm/vmm.c
3021
vcpu = &vm->vcpu[vcpuid];
usr/src/uts/intel/io/vmm/vmm.c
3022
vcpu_lock(vcpu);
usr/src/uts/intel/io/vmm/vmm.c
3023
vcpu->run_state |= VRS_PEND_INIT;
usr/src/uts/intel/io/vmm/vmm.c
3030
vcpu->run_state &= ~VRS_PEND_SIPI;
usr/src/uts/intel/io/vmm/vmm.c
3031
vcpu_notify_event_locked(vcpu, VCPU_NOTIFY_EXIT);
usr/src/uts/intel/io/vmm/vmm.c
3033
vcpu_unlock(vcpu);
usr/src/uts/intel/io/vmm/vmm.c
3040
struct vcpu *vcpu;
usr/src/uts/intel/io/vmm/vmm.c
3045
vcpu = &vm->vcpu[vcpuid];
usr/src/uts/intel/io/vmm/vmm.c
3046
vcpu_lock(vcpu);
usr/src/uts/intel/io/vmm/vmm.c
3047
vcpu->run_state |= VRS_PEND_SIPI;
usr/src/uts/intel/io/vmm/vmm.c
3048
vcpu->sipi_vector = vector;
usr/src/uts/intel/io/vmm/vmm.c
3050
if ((vcpu->run_state & (VRS_INIT | VRS_RUN)) == VRS_INIT) {
usr/src/uts/intel/io/vmm/vmm.c
3051
vcpu_notify_event_locked(vcpu, VCPU_NOTIFY_EXIT);
usr/src/uts/intel/io/vmm/vmm.c
3053
vcpu_unlock(vcpu);
usr/src/uts/intel/io/vmm/vmm.c
3060
struct vcpu *vcpu;
usr/src/uts/intel/io/vmm/vmm.c
3063
vcpu = &vm->vcpu[vcpuid];
usr/src/uts/intel/io/vmm/vmm.c
3066
return ((vcpu->run_state & (VRS_RUN | VRS_PEND_INIT)) != VRS_RUN);
usr/src/uts/intel/io/vmm/vmm.c
3105
struct vcpu *vcpu = &vm->vcpu[vcpuid];
usr/src/uts/intel/io/vmm/vmm.c
3171
vcpu->exit_intinfo = 0;
usr/src/uts/intel/io/vmm/vmm.c
3172
vcpu->exc_pending = 0;
usr/src/uts/intel/io/vmm/vmm.c
3173
vcpu->nmi_pending = false;
usr/src/uts/intel/io/vmm/vmm.c
3174
vcpu->extint_pending = 0;
usr/src/uts/intel/io/vmm/vmm.c
3181
vcpu->guest_xcr0 = XFEATURE_ENABLED_X87;
usr/src/uts/intel/io/vmm/vmm.c
3182
(void) hma_fpu_init(vcpu->guestfpu);
usr/src/uts/intel/io/vmm/vmm.c
3185
bzero(&vcpu->mtrr, sizeof (vcpu->mtrr));
usr/src/uts/intel/io/vmm/vmm.c
3213
vm_get_capability(struct vm *vm, int vcpu, int type, int *retval)
usr/src/uts/intel/io/vmm/vmm.c
3215
if (vcpu < 0 || vcpu >= vm->maxcpus)
usr/src/uts/intel/io/vmm/vmm.c
3221
return (VMGETCAP(vm->cookie, vcpu, type, retval));
usr/src/uts/intel/io/vmm/vmm.c
3225
vm_set_capability(struct vm *vm, int vcpu, int type, int val)
usr/src/uts/intel/io/vmm/vmm.c
3227
if (vcpu < 0 || vcpu >= vm->maxcpus)
usr/src/uts/intel/io/vmm/vmm.c
3233
return (VMSETCAP(vm->cookie, vcpu, type, val));
usr/src/uts/intel/io/vmm/vmm.c
3242
return (&vm->vcpu[vcpuid].cpuid_cfg);
usr/src/uts/intel/io/vmm/vmm.c
3251
return (vm->vcpu[cpu].vlapic);
usr/src/uts/intel/io/vmm/vmm.c
327
static void vcpu_notify_event_locked(struct vcpu *vcpu, vcpu_notify_t);
usr/src/uts/intel/io/vmm/vmm.c
3280
struct vcpu *vcpu;
usr/src/uts/intel/io/vmm/vmm.c
3285
vcpu = &vm->vcpu[vcpuid];
usr/src/uts/intel/io/vmm/vmm.c
3287
vcpu_lock(vcpu);
usr/src/uts/intel/io/vmm/vmm.c
3289
vcpu_unlock(vcpu);
usr/src/uts/intel/io/vmm/vmm.c
3297
struct vcpu *vcpu;
usr/src/uts/intel/io/vmm/vmm.c
3303
vcpu = &vm->vcpu[vcpuid];
usr/src/uts/intel/io/vmm/vmm.c
3305
vcpu_lock(vcpu);
usr/src/uts/intel/io/vmm/vmm.c
3306
state = vcpu->state;
usr/src/uts/intel/io/vmm/vmm.c
3308
*hostcpu = vcpu->hostcpu;
usr/src/uts/intel/io/vmm/vmm.c
3309
vcpu_unlock(vcpu);
usr/src/uts/intel/io/vmm/vmm.c
3323
uint64_t vcpu_off = vm->tsc_offset + vm->vcpu[vcpuid].tsc_offset;
usr/src/uts/intel/io/vmm/vmm.c
3427
struct vcpu *vcpu = &vm->vcpu[vcpuid];
usr/src/uts/intel/io/vmm/vmm.c
3428
struct vm_exit *vme = &vcpu->exitinfo;
usr/src/uts/intel/io/vmm/vmm.c
3444
if (vcpu->reqidle) {
usr/src/uts/intel/io/vmm/vmm.c
3454
if (vcpu->reqbarrier) {
usr/src/uts/intel/io/vmm/vmm.c
3461
vcpu->reqbarrier = false;
usr/src/uts/intel/io/vmm/vmm.c
3464
if (vcpu->reqconsist) {
usr/src/uts/intel/io/vmm/vmm.c
3472
vcpu->reqconsist = false;
usr/src/uts/intel/io/vmm/vmm.c
3492
struct vcpu *vcpu = &vm->vcpu[vcpuid];
usr/src/uts/intel/io/vmm/vmm.c
3493
struct vm_exit *vme = &vcpu->exitinfo;
usr/src/uts/intel/io/vmm/vmm.c
3511
struct vcpu *vcpu = &vm->vcpu[vcpuid];
usr/src/uts/intel/io/vmm/vmm.c
3512
struct vm_exit *vme = &vcpu->exitinfo;
usr/src/uts/intel/io/vmm/vmm.c
3530
struct vcpu *vcpu = &vm->vcpu[vcpuid];
usr/src/uts/intel/io/vmm/vmm.c
3533
vcpu_lock(vcpu);
usr/src/uts/intel/io/vmm/vmm.c
3535
vcpu->reqbarrier = true;
usr/src/uts/intel/io/vmm/vmm.c
3536
vcpu_notify_event_locked(vcpu, VCPU_NOTIFY_EXIT);
usr/src/uts/intel/io/vmm/vmm.c
3538
vcpu_unlock(vcpu);
usr/src/uts/intel/io/vmm/vmm.c
3544
struct vcpu *vcpu = &vm->vcpu[i];
usr/src/uts/intel/io/vmm/vmm.c
3546
vcpu_lock(vcpu);
usr/src/uts/intel/io/vmm/vmm.c
3548
vcpu->reqbarrier = true;
usr/src/uts/intel/io/vmm/vmm.c
3549
vcpu_notify_event_locked(vcpu,
usr/src/uts/intel/io/vmm/vmm.c
3552
vcpu_unlock(vcpu);
usr/src/uts/intel/io/vmm/vmm.c
3577
return (vm->vcpu[vcpuid].stats);
usr/src/uts/intel/io/vmm/vmm.c
3586
*state = vm->vcpu[vcpuid].x2apic_state;
usr/src/uts/intel/io/vmm/vmm.c
3600
vm->vcpu[vcpuid].x2apic_state = state;
usr/src/uts/intel/io/vmm/vmm.c
3615
vcpu_notify_event_locked(struct vcpu *vcpu, vcpu_notify_t ntype)
usr/src/uts/intel/io/vmm/vmm.c
3621
hostcpu = vcpu->hostcpu;
usr/src/uts/intel/io/vmm/vmm.c
3622
if (vcpu->state == VCPU_RUNNING) {
usr/src/uts/intel/io/vmm/vmm.c
3626
vlapic_post_intr(vcpu->vlapic, hostcpu);
usr/src/uts/intel/io/vmm/vmm.c
3640
"with hostcpu %d", vcpu->state, hostcpu));
usr/src/uts/intel/io/vmm/vmm.c
3641
if (vcpu->state == VCPU_SLEEPING) {
usr/src/uts/intel/io/vmm/vmm.c
3642
cv_signal(&vcpu->vcpu_cv);
usr/src/uts/intel/io/vmm/vmm.c
3650
struct vcpu *vcpu = &vm->vcpu[vcpuid];
usr/src/uts/intel/io/vmm/vmm.c
3652
vcpu_lock(vcpu);
usr/src/uts/intel/io/vmm/vmm.c
3653
vcpu_notify_event_locked(vcpu, VCPU_NOTIFY_EXIT);
usr/src/uts/intel/io/vmm/vmm.c
3654
vcpu_unlock(vcpu);
usr/src/uts/intel/io/vmm/vmm.c
3660
struct vcpu *vcpu = &vm->vcpu[vcpuid];
usr/src/uts/intel/io/vmm/vmm.c
3666
vcpu_lock(vcpu);
usr/src/uts/intel/io/vmm/vmm.c
3667
vcpu_notify_event_locked(vcpu, ntype);
usr/src/uts/intel/io/vmm/vmm.c
3668
vcpu_unlock(vcpu);
usr/src/uts/intel/io/vmm/vmm.c
3674
struct vcpu *vcpu = &vm->vcpu[vcpuid];
usr/src/uts/intel/io/vmm/vmm.c
3680
if (ustate == vcpu->ustate) {
usr/src/uts/intel/io/vmm/vmm.c
3684
const hrtime_t delta = now - vcpu->ustate_when;
usr/src/uts/intel/io/vmm/vmm.c
3685
vcpu->ustate_total[vcpu->ustate] += delta;
usr/src/uts/intel/io/vmm/vmm.c
3689
vcpu->ustate_when = now;
usr/src/uts/intel/io/vmm/vmm.c
3690
vcpu->ustate = ustate;
usr/src/uts/intel/io/vmm/vmm.c
3709
return (vm->vcpu[vcpuid].vmclient);
usr/src/uts/intel/io/vmm/vmm.c
375
struct vcpu *vcpu = &vm->vcpu[i];
usr/src/uts/intel/io/vmm/vmm.c
377
VLAPIC_CLEANUP(vm->cookie, vcpu->vlapic);
usr/src/uts/intel/io/vmm/vmm.c
379
vmm_stat_free(vcpu->stats);
usr/src/uts/intel/io/vmm/vmm.c
381
vcpu_cpuid_cleanup(&vcpu->cpuid_cfg);
usr/src/uts/intel/io/vmm/vmm.c
383
hma_fpu_free(vcpu->guestfpu);
usr/src/uts/intel/io/vmm/vmm.c
384
vcpu->guestfpu = NULL;
usr/src/uts/intel/io/vmm/vmm.c
386
vie_free(vcpu->vie_ctx);
usr/src/uts/intel/io/vmm/vmm.c
387
vcpu->vie_ctx = NULL;
usr/src/uts/intel/io/vmm/vmm.c
3879
vm_get_rescnt(struct vm *vm, int vcpu, struct vmm_stat_type *stat)
usr/src/uts/intel/io/vmm/vmm.c
3881
if (vcpu == 0) {
usr/src/uts/intel/io/vmm/vmm.c
3882
vmm_stat_set(vm, vcpu, VMM_MEM_RESIDENT,
usr/src/uts/intel/io/vmm/vmm.c
389
vmc_destroy(vcpu->vmclient);
usr/src/uts/intel/io/vmm/vmm.c
390
vcpu->vmclient = NULL;
usr/src/uts/intel/io/vmm/vmm.c
392
ctxop_free(vcpu->ctxop);
usr/src/uts/intel/io/vmm/vmm.c
393
mutex_destroy(&vcpu->lock);
usr/src/uts/intel/io/vmm/vmm.c
3968
struct vcpu *vcpu = &vm->vcpu[vcpuid];
usr/src/uts/intel/io/vmm/vmm.c
3972
vvk->vvk_time_init.value.ui64 = vcpu->ustate_total[VU_INIT];
usr/src/uts/intel/io/vmm/vmm.c
3973
vvk->vvk_time_run.value.ui64 = vcpu->ustate_total[VU_RUN];
usr/src/uts/intel/io/vmm/vmm.c
3974
vvk->vvk_time_idle.value.ui64 = vcpu->ustate_total[VU_IDLE];
usr/src/uts/intel/io/vmm/vmm.c
3975
vvk->vvk_time_emu_kern.value.ui64 = vcpu->ustate_total[VU_EMU_KERN];
usr/src/uts/intel/io/vmm/vmm.c
3976
vvk->vvk_time_emu_user.value.ui64 = vcpu->ustate_total[VU_EMU_USER];
usr/src/uts/intel/io/vmm/vmm.c
3977
vvk->vvk_time_sched.value.ui64 = vcpu->ustate_total[VU_SCHED];
usr/src/uts/intel/io/vmm/vmm.c
400
struct vcpu *vcpu;
usr/src/uts/intel/io/vmm/vmm.c
405
vcpu = &vm->vcpu[vcpu_id];
usr/src/uts/intel/io/vmm/vmm.c
408
mutex_init(&vcpu->lock, NULL, MUTEX_ADAPTIVE, NULL);
usr/src/uts/intel/io/vmm/vmm.c
410
vcpu->state = VCPU_IDLE;
usr/src/uts/intel/io/vmm/vmm.c
411
vcpu->hostcpu = NOCPU;
usr/src/uts/intel/io/vmm/vmm.c
412
vcpu->lastloccpu = NOCPU;
usr/src/uts/intel/io/vmm/vmm.c
413
vcpu->guestfpu = hma_fpu_alloc(KM_SLEEP);
usr/src/uts/intel/io/vmm/vmm.c
414
vcpu->stats = vmm_stat_alloc();
usr/src/uts/intel/io/vmm/vmm.c
4141
*value = vm->vcpu[vcpuid].tsc_offset;
usr/src/uts/intel/io/vmm/vmm.c
4146
err = vm_rdmtrr(&vm->vcpu[vcpuid].mtrr, msr, value);
usr/src/uts/intel/io/vmm/vmm.c
415
vcpu->vie_ctx = vie_alloc();
usr/src/uts/intel/io/vmm/vmm.c
416
vcpu_cpuid_init(&vcpu->cpuid_cfg);
usr/src/uts/intel/io/vmm/vmm.c
4164
vm->vcpu[vcpuid].tsc_offset = value;
usr/src/uts/intel/io/vmm/vmm.c
4172
err = vm_rdmtrr(&vm->vcpu[vcpuid].mtrr, msr, &comp);
usr/src/uts/intel/io/vmm/vmm.c
418
vcpu->ustate = VU_INIT;
usr/src/uts/intel/io/vmm/vmm.c
4183
err = vm_wrmtrr(&vm->vcpu[vcpuid].mtrr, msr, value);
usr/src/uts/intel/io/vmm/vmm.c
419
vcpu->ustate_when = gethrtime();
usr/src/uts/intel/io/vmm/vmm.c
421
vcpu->vtc.vtc_vm = vm;
usr/src/uts/intel/io/vmm/vmm.c
422
vcpu->vtc.vtc_vcpuid = vcpu_id;
usr/src/uts/intel/io/vmm/vmm.c
423
vcpu->ctxop = ctxop_allocate(&vmm_ctxop_tpl, &vcpu->vtc);
usr/src/uts/intel/io/vmm/vmm.c
425
vie_reset(vcpu->vie_ctx);
usr/src/uts/intel/io/vmm/vmm.c
426
bzero(&vcpu->exitinfo, sizeof (vcpu->exitinfo));
usr/src/uts/intel/io/vmm/vmm.c
428
bzero(&vcpu->mtrr, sizeof (vcpu->mtrr));
usr/src/uts/intel/io/vmm/vmm.c
431
vcpu->run_state = VRS_HALT;
usr/src/uts/intel/io/vmm/vmm.c
432
vcpu->vlapic = VLAPIC_INIT(vm->cookie, vcpu_id);
usr/src/uts/intel/io/vmm/vmm.c
4331
struct vcpu *vcpu = &vm->vcpu[vcpuid];
usr/src/uts/intel/io/vmm/vmm.c
4334
*valp = vcpu->nmi_pending != 0 ? 1 : 0;
usr/src/uts/intel/io/vmm/vmm.c
4337
*valp = vcpu->extint_pending != 0 ? 1 : 0;
usr/src/uts/intel/io/vmm/vmm.c
434
vcpu->reqidle = false;
usr/src/uts/intel/io/vmm/vmm.c
4340
*valp = vcpu->exc_pending;
usr/src/uts/intel/io/vmm/vmm.c
4343
*valp = vcpu->exit_intinfo;
usr/src/uts/intel/io/vmm/vmm.c
435
vcpu->reqconsist = false;
usr/src/uts/intel/io/vmm/vmm.c
436
vcpu->reqbarrier = false;
usr/src/uts/intel/io/vmm/vmm.c
437
vcpu->exit_intinfo = 0;
usr/src/uts/intel/io/vmm/vmm.c
438
vcpu->nmi_pending = false;
usr/src/uts/intel/io/vmm/vmm.c
439
vcpu->extint_pending = false;
usr/src/uts/intel/io/vmm/vmm.c
440
vcpu->exc_pending = 0;
usr/src/uts/intel/io/vmm/vmm.c
441
vcpu->guest_xcr0 = XFEATURE_ENABLED_X87;
usr/src/uts/intel/io/vmm/vmm.c
442
(void) hma_fpu_init(vcpu->guestfpu);
usr/src/uts/intel/io/vmm/vmm.c
4422
struct vcpu *vcpu = &vm->vcpu[vcpuid];
usr/src/uts/intel/io/vmm/vmm.c
4429
vcpu->nmi_pending = (val != 0);
usr/src/uts/intel/io/vmm/vmm.c
443
vmm_stat_init(vcpu->stats);
usr/src/uts/intel/io/vmm/vmm.c
4432
vcpu->extint_pending = (val != 0);
usr/src/uts/intel/io/vmm/vmm.c
4436
vcpu->exc_pending = 0;
usr/src/uts/intel/io/vmm/vmm.c
444
vcpu->tsc_offset = 0;
usr/src/uts/intel/io/vmm/vmm.c
4442
vcpu->exc_pending = val;
usr/src/uts/intel/io/vmm/vmm.c
462
struct vcpu *vcpu;
usr/src/uts/intel/io/vmm/vmm.c
467
vcpu = &vm->vcpu[cpuid];
usr/src/uts/intel/io/vmm/vmm.c
469
return (&vcpu->exitinfo);
usr/src/uts/intel/io/vmm/vmm.c
478
return (vm->vcpu[cpuid].vie_ctx);
usr/src/uts/intel/io/vmm/vmm.c
644
vm->vcpu[i].vmclient = vmspace_client_alloc(vmspace);
usr/src/uts/intel/io/vmm/vmm.c
787
struct vcpu *vcpu = &vm->vcpu[i];
usr/src/uts/intel/io/vmm/vmm.c
792
vlapic_pause(vcpu->vlapic);
usr/src/uts/intel/io/vmm/vmm.c
820
struct vcpu *vcpu = &vm->vcpu[i];
usr/src/uts/intel/io/vmm/vmm.c
825
vlapic_resume(vcpu->vlapic);
usr/src/uts/intel/io/vmm/vmm_instruction_emul.c
3073
ptp_hold(struct vm *vm, int vcpu, uintptr_t gpa, size_t len, vm_page_t **vmp)
usr/src/uts/intel/io/vmm/vmm_instruction_emul.c
3075
vm_client_t *vmc = vm_get_vmclient(vm, vcpu);
usr/src/uts/intel/io/vmm/vmm_sol_dev.c
1048
error = vm_get_register(sc->vmm_vm, vcpu, vmreg.regnum,
usr/src/uts/intel/io/vmm/vmm_sol_dev.c
1064
error = vm_set_register(sc->vmm_vm, vcpu, vmreg.regnum,
usr/src/uts/intel/io/vmm/vmm_sol_dev.c
1075
error = vm_set_seg_desc(sc->vmm_vm, vcpu, vmsegd.regnum,
usr/src/uts/intel/io/vmm/vmm_sol_dev.c
1086
error = vm_get_seg_desc(sc->vmm_vm, vcpu, vmsegd.regnum,
usr/src/uts/intel/io/vmm/vmm_sol_dev.c
1120
error = vm_get_register(sc->vmm_vm, vcpu, regnums[i],
usr/src/uts/intel/io/vmm/vmm_sol_dev.c
1165
error = vm_set_register(sc->vmm_vm, vcpu, regnums[i],
usr/src/uts/intel/io/vmm/vmm_sol_dev.c
1181
error = vcpu_arch_reset(sc->vmm_vm, vcpu, vvr.kind == VRK_INIT);
usr/src/uts/intel/io/vmm/vmm_sol_dev.c
1188
error = vm_get_run_state(sc->vmm_vm, vcpu, &vrs.state,
usr/src/uts/intel/io/vmm/vmm_sol_dev.c
1205
error = vm_set_run_state(sc->vmm_vm, vcpu, vrs.state,
usr/src/uts/intel/io/vmm/vmm_sol_dev.c
1223
error = vm_get_fpu(sc->vmm_vm, vcpu, kbuf, req.len);
usr/src/uts/intel/io/vmm/vmm_sol_dev.c
1249
error = vm_set_fpu(sc->vmm_vm, vcpu, kbuf, req.len);
usr/src/uts/intel/io/vmm/vmm_sol_dev.c
1277
error = vm_get_cpuid(sc->vmm_vm, vcpu, &vm_cfg);
usr/src/uts/intel/io/vmm/vmm_sol_dev.c
1341
error = vm_set_cpuid(sc->vmm_vm, vcpu, &vm_cfg);
usr/src/uts/intel/io/vmm/vmm_sol_dev.c
1354
vlc.vlc_vcpuid = vcpu;
usr/src/uts/intel/io/vmm/vmm_sol_dev.c
1356
legacy_emulate_cpuid(sc->vmm_vm, vcpu, &vlc.vlc_eax,
usr/src/uts/intel/io/vmm/vmm_sol_dev.c
1384
error = vm_service_mmio_write(sc->vmm_vm, vcpu,
usr/src/uts/intel/io/vmm/vmm_sol_dev.c
1387
error = vm_service_mmio_read(sc->vmm_vm, vcpu,
usr/src/uts/intel/io/vmm/vmm_sol_dev.c
1407
error = vm_get_capability(sc->vmm_vm, vcpu, vmcap.captype,
usr/src/uts/intel/io/vmm/vmm_sol_dev.c
1423
error = vm_set_capability(sc->vmm_vm, vcpu, vmcap.captype,
usr/src/uts/intel/io/vmm/vmm_sol_dev.c
1434
error = vm_set_x2apic_state(sc->vmm_vm, vcpu, x2apic.state);
usr/src/uts/intel/io/vmm/vmm_sol_dev.c
1479
gg.vcpuid = vcpu;
usr/src/uts/intel/io/vmm/vmm_sol_dev.c
1480
error = vm_gla2gpa(sc->vmm_vm, vcpu, &gg.paging, gg.gla,
usr/src/uts/intel/io/vmm/vmm_sol_dev.c
1495
gg.vcpuid = vcpu;
usr/src/uts/intel/io/vmm/vmm_sol_dev.c
1496
error = vm_gla2gpa_nofault(sc->vmm_vm, vcpu, &gg.paging,
usr/src/uts/intel/io/vmm/vmm_sol_dev.c
1506
error = vm_activate_cpu(sc->vmm_vm, vcpu);
usr/src/uts/intel/io/vmm/vmm_sol_dev.c
1510
if (ddi_copyin(datap, &vcpu, sizeof (vcpu), md)) {
usr/src/uts/intel/io/vmm/vmm_sol_dev.c
1513
error = vm_suspend_cpu(sc->vmm_vm, vcpu);
usr/src/uts/intel/io/vmm/vmm_sol_dev.c
1518
if (ddi_copyin(datap, &vcpu, sizeof (vcpu), md)) {
usr/src/uts/intel/io/vmm/vmm_sol_dev.c
1521
error = vm_resume_cpu(sc->vmm_vm, vcpu);
usr/src/uts/intel/io/vmm/vmm_sol_dev.c
1526
vcpu = arg;
usr/src/uts/intel/io/vmm/vmm_sol_dev.c
1527
error = vm_vcpu_barrier(sc->vmm_vm, vcpu);
usr/src/uts/intel/io/vmm/vmm_sol_dev.c
1577
error = vm_exit_intinfo(sc->vmm_vm, vcpu, vmii.info1);
usr/src/uts/intel/io/vmm/vmm_sol_dev.c
1583
vmii.vcpuid = vcpu;
usr/src/uts/intel/io/vmm/vmm_sol_dev.c
1584
error = vm_get_intinfo(sc->vmm_vm, vcpu, &vmii.info1,
usr/src/uts/intel/io/vmm/vmm_sol_dev.c
1648
error = vm_restart_instruction(sc->vmm_vm, vcpu);
usr/src/uts/intel/io/vmm/vmm_sol_dev.c
1964
vcpu_unlock_one(sc, vcpu);
usr/src/uts/intel/io/vmm/vmm_sol_dev.c
2706
for (int vcpu = 0; vcpu < maxcpus; vcpu++) {
usr/src/uts/intel/io/vmm/vmm_sol_dev.c
2707
vcpu_lock_one(sc, vcpu);
usr/src/uts/intel/io/vmm/vmm_sol_dev.c
2708
vcpu_unlock_one(sc, vcpu);
usr/src/uts/intel/io/vmm/vmm_sol_dev.c
341
vcpu_lock_one(vmm_softc_t *sc, int vcpu)
usr/src/uts/intel/io/vmm/vmm_sol_dev.c
343
ASSERT(vcpu >= 0 && vcpu < VM_MAXCPU);
usr/src/uts/intel/io/vmm/vmm_sol_dev.c
349
VERIFY0(vcpu_set_state(sc->vmm_vm, vcpu, VCPU_FROZEN, true));
usr/src/uts/intel/io/vmm/vmm_sol_dev.c
353
vcpu_unlock_one(vmm_softc_t *sc, int vcpu)
usr/src/uts/intel/io/vmm/vmm_sol_dev.c
355
ASSERT(vcpu >= 0 && vcpu < VM_MAXCPU);
usr/src/uts/intel/io/vmm/vmm_sol_dev.c
357
VERIFY3U(vcpu_get_state(sc->vmm_vm, vcpu, NULL), ==, VCPU_FROZEN);
usr/src/uts/intel/io/vmm/vmm_sol_dev.c
358
VERIFY0(vcpu_set_state(sc->vmm_vm, vcpu, VCPU_IDLE, false));
usr/src/uts/intel/io/vmm/vmm_sol_dev.c
380
for (int vcpu = 0; vcpu < maxcpus; vcpu++) {
usr/src/uts/intel/io/vmm/vmm_sol_dev.c
381
vcpu_lock_one(sc, vcpu);
usr/src/uts/intel/io/vmm/vmm_sol_dev.c
417
for (int vcpu = 0; vcpu < maxcpus; vcpu++) {
usr/src/uts/intel/io/vmm/vmm_sol_dev.c
418
vcpu_unlock_one(sc, vcpu);
usr/src/uts/intel/io/vmm/vmm_sol_dev.c
426
int error = 0, vcpu = -1;
usr/src/uts/intel/io/vmm/vmm_sol_dev.c
473
if (ddi_copyin(datap, &vcpu, sizeof (vcpu), md)) {
usr/src/uts/intel/io/vmm/vmm_sol_dev.c
476
if (vcpu < 0 || vcpu >= vm_get_maxcpus(sc->vmm_vm)) {
usr/src/uts/intel/io/vmm/vmm_sol_dev.c
479
vcpu_lock_one(sc, vcpu);
usr/src/uts/intel/io/vmm/vmm_sol_dev.c
523
if (ddi_copyin(datap, &vcpu, sizeof (vcpu), md)) {
usr/src/uts/intel/io/vmm/vmm_sol_dev.c
526
if (vcpu == -1) {
usr/src/uts/intel/io/vmm/vmm_sol_dev.c
530
} else if (vcpu >= 0 && vcpu < vm_get_maxcpus(sc->vmm_vm)) {
usr/src/uts/intel/io/vmm/vmm_sol_dev.c
532
vcpu_lock_one(sc, vcpu);
usr/src/uts/intel/io/vmm/vmm_sol_dev.c
564
error = vm_run(sc->vmm_vm, vcpu, &entry);
usr/src/uts/intel/io/vmm/vmm_sol_dev.c
579
vme = vm_exitinfo(sc->vmm_vm, vcpu);
usr/src/uts/intel/io/vmm/vmm_sol_dev.c
656
error = ppt_setup_msi(sc->vmm_vm, pptmsi.vcpu, pptmsi.pptfd,
usr/src/uts/intel/io/vmm/vmm_sol_dev.c
667
error = ppt_setup_msix(sc->vmm_vm, pptmsix.vcpu, pptmsix.pptfd,
usr/src/uts/intel/io/vmm/vmm_sol_dev.c
746
error = vm_inject_exception(sc->vmm_vm, vcpu, vmexc.vector,
usr/src/uts/intel/io/vmm/vmm_stat.c
107
(*vst->func)(vm, vcpu, vst);
usr/src/uts/intel/io/vmm/vmm_stat.c
111
stats = vcpu_stats(vm, vcpu);
usr/src/uts/intel/io/vmm/vmm_stat.c
80
vmm_stat_copy(struct vm *vm, int vcpu, int index, int count, int *num_stats,
usr/src/uts/intel/io/vmm/vmm_stat.c
87
if (vcpu < 0 || vcpu >= vm_get_maxcpus(vm))
usr/src/uts/intel/io/vmm/vmm_stat.h
102
vmm_stat_array_incr(struct vm *vm, int vcpu, struct vmm_stat_type *vst,
usr/src/uts/intel/io/vmm/vmm_stat.h
108
stats = vcpu_stats(vm, vcpu);
usr/src/uts/intel/io/vmm/vmm_stat.h
116
vmm_stat_array_set(struct vm *vm, int vcpu, struct vmm_stat_type *vst,
usr/src/uts/intel/io/vmm/vmm_stat.h
122
stats = vcpu_stats(vm, vcpu);
usr/src/uts/intel/io/vmm/vmm_stat.h
130
vmm_stat_incr(struct vm *vm, int vcpu, struct vmm_stat_type *vst, uint64_t x)
usr/src/uts/intel/io/vmm/vmm_stat.h
134
vmm_stat_array_incr(vm, vcpu, vst, 0, x);
usr/src/uts/intel/io/vmm/vmm_stat.h
139
vmm_stat_set(struct vm *vm, int vcpu, struct vmm_stat_type *vst, uint64_t val)
usr/src/uts/intel/io/vmm/vmm_stat.h
143
vmm_stat_array_set(vm, vcpu, vst, 0, val);
usr/src/uts/intel/io/vmm/vmm_stat.h
55
typedef void (*vmm_stat_func_t)(struct vm *vm, int vcpu,
usr/src/uts/intel/io/vmm/vmm_stat.h
97
int vmm_stat_copy(struct vm *vm, int vcpu, int index, int count,
usr/src/uts/intel/sys/vmm_dev.h
155
int vcpu;
usr/src/uts/intel/sys/vmm_dev.h
163
int vcpu;