kvm_x86_ops
struct kvm_x86_ops *runtime_ops;
extern struct kvm_x86_ops kvm_x86_ops;
DECLARE_STATIC_CALL(kvm_x86_##func, *(((struct kvm_x86_ops *)0)->func));
return kvzalloc(kvm_x86_ops.vm_size, GFP_KERNEL_ACCOUNT);
if (kvm_x86_ops.flush_remote_tlbs &&
if (!kvm_x86_ops.flush_remote_tlbs_range)
kvm_x86_ops.nested_ops->hv_inject_synthetic_vmexit_post_tlb_flush(vcpu);
if (kvm_x86_ops.nested_ops->get_evmcs_version)
evmcs_ver = kvm_x86_ops.nested_ops->get_evmcs_version(vcpu);
if (kvm_x86_ops.flush_remote_tlbs == hv_flush_remote_tlbs) {
return kvm_x86_ops.set_hv_timer
if (kvm_x86_ops.x2apic_icr_is_split) {
if (kvm_x86_ops.x2apic_icr_is_split)
if (kvm_x86_ops.alloc_apic_backing_page)
if (!kvm_x86_ops.x2apic_icr_is_split) {
if (kvm_x86_ops.sync_pir_to_irr)
if (kvm_x86_ops.set_apic_access_page_addr &&
return kvm_x86_ops.flush_remote_tlbs_range;
if (kvm_x86_ops.nested_ops->write_log_dirty(vcpu, addr))
if (kvm_x86_ops.get_mt_mask)
struct kvm_x86_ops svm_x86_ops __initdata = {
extern struct kvm_x86_ops svm_x86_ops __initdata;
struct kvm_x86_ops vt_x86_ops __initdata = {
extern struct kvm_x86_ops vt_x86_ops __initdata;
memcpy(&kvm_x86_ops, ops->runtime_ops, sizeof(kvm_x86_ops));
static_call_update(kvm_x86_##func, kvm_x86_ops.func);
WARN_ON(!kvm_x86_ops.func); __KVM_X86_OP(func)
static_call_update(kvm_x86_##func, (void *)kvm_x86_ops.func ? : \
if (kvm_x86_ops.enable_virtualization_cpu) {
pr_err("already loaded vendor module '%s'\n", kvm_x86_ops.name);
kvm_x86_ops.enable_virtualization_cpu = NULL;
kvm_x86_ops.enable_virtualization_cpu = NULL;
if (!kvm_x86_ops.update_cr8_intercept)
kvm_x86_ops.nested_ops->triple_fault(vcpu);
return kvm_x86_ops.nested_ops->check_events(vcpu);
kvm_x86_ops.nested_ops->has_events &&
kvm_x86_ops.nested_ops->has_events(vcpu, true))
kvm_x86_ops.allow_apicv_in_x2apic_without_x2apic_virtualization)
if (!(kvm_x86_ops.required_apicv_inhibits & BIT(reason)))
if (unlikely(!kvm_x86_ops.nested_ops->get_nested_state_pages(vcpu))) {
kvm_x86_ops.nested_ops->triple_fault(vcpu);
if ((debug_ctl ^ vcpu->arch.host_debugctl) & kvm_x86_ops.HOST_OWNED_DEBUGCTL &&
kvm_x86_ops.nested_ops->has_events &&
kvm_x86_ops.nested_ops->has_events(vcpu, false))
kvm_x86_ops.nested_ops->is_exception_vmexit(vcpu, ex->vector,
cpu_emergency_register_virt_callback(kvm_x86_ops.emergency_disable_virtualization_cpu);
cpu_emergency_unregister_virt_callback(kvm_x86_ops.emergency_disable_virtualization_cpu);
struct kvm_x86_ops kvm_x86_ops __read_mostly;
*(((struct kvm_x86_ops *)0)->func));
r = kvm_x86_ops.nested_ops->get_state ?
kvm_x86_ops.nested_ops->get_state(NULL, NULL, 0) : 0;
r = kvm_x86_ops.enable_l2_tlb_flush != NULL;
r = kvm_x86_ops.nested_ops->enable_evmcs != NULL;
if (kvm_x86_ops.dev_get_attr)
if (!kvm_x86_ops.nested_ops->enable_evmcs)
r = kvm_x86_ops.nested_ops->enable_evmcs(vcpu, &vmcs_version);
if (!kvm_x86_ops.enable_l2_tlb_flush)
if (!kvm_x86_ops.nested_ops->get_state)
r = kvm_x86_ops.nested_ops->get_state(vcpu, user_kvm_nested_state,
if (!kvm_x86_ops.nested_ops->set_state)
r = kvm_x86_ops.nested_ops->set_state(vcpu, user_kvm_nested_state, &kvm_state);
if (!kvm_x86_ops.vcpu_mem_enc_ioctl)
r = kvm_x86_ops.vcpu_mem_enc_ioctl(vcpu, argp);
if (!kvm_x86_ops.vm_copy_enc_context_from)
if (!kvm_x86_ops.vm_move_enc_context_from)
kvm_x86_ops.vcpu_mem_enc_unlocked_ioctl)
if (!kvm_x86_ops.mem_enc_ioctl)
if (!kvm_x86_ops.mem_enc_register_region)
if (!kvm_x86_ops.mem_enc_unregister_region)
kvm_x86_ops.nested_ops->is_exception_vmexit(vcpu, nr, error_code)) {
if (!kvm_x86_ops.get_untagged_addr)
kvm_x86_ops.nested_ops->leave_nested(vcpu);