kvm_pmu_ops
struct kvm_pmu_ops *pmu_ops;
struct kvm_pmu_ops;
WARN_ON(!kvm_pmu_ops.func); __KVM_X86_PMU_OP(func)
void kvm_init_pmu_capability(struct kvm_pmu_ops *pmu_ops)
u64 mask = kvm_pmu_ops.EVENTSEL_EVENT |
if (filter->events[i] & ~(kvm_pmu_ops.EVENTSEL_EVENT |
return kvm_pmu_ops.FIXED_COUNTER_BASE + idx * kvm_pmu_ops.MSR_STRIDE;
return kvm_pmu_ops.GP_COUNTER_BASE + idx * kvm_pmu_ops.MSR_STRIDE;
return kvm_pmu_ops.GP_EVENTSEL_BASE + idx * kvm_pmu_ops.MSR_STRIDE;
wrmsrq(kvm_pmu_ops.PERF_GLOBAL_CTRL, 0);
u64 event_select = eventsel & kvm_pmu_ops.EVENTSEL_EVENT;
if (!kvm_pmu_ops.check_rdpmc_early)
static struct kvm_pmu_ops kvm_pmu_ops __read_mostly;
*(((struct kvm_pmu_ops *)0)->func));
void kvm_pmu_ops_update(const struct kvm_pmu_ops *pmu_ops)
memcpy(&kvm_pmu_ops, pmu_ops, sizeof(kvm_pmu_ops));
static_call_update(kvm_x86_pmu_##func, kvm_pmu_ops.func);
void kvm_init_pmu_capability(struct kvm_pmu_ops *pmu_ops);
extern struct kvm_pmu_ops intel_pmu_ops;
extern struct kvm_pmu_ops amd_pmu_ops;
void kvm_pmu_ops_update(const struct kvm_pmu_ops *pmu_ops);
struct kvm_pmu_ops amd_pmu_ops __initdata = {
struct kvm_pmu_ops intel_pmu_ops __initdata = {