root/arch/x86/kvm/pmu.c
// SPDX-License-Identifier: GPL-2.0-only
/*
 * Kernel-based Virtual Machine -- Performance Monitoring Unit support
 *
 * Copyright 2015 Red Hat, Inc. and/or its affiliates.
 *
 * Authors:
 *   Avi Kivity   <avi@redhat.com>
 *   Gleb Natapov <gleb@redhat.com>
 *   Wei Huang    <wei@redhat.com>
 */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

#include <linux/types.h>
#include <linux/kvm_host.h>
#include <linux/perf_event.h>
#include <linux/bsearch.h>
#include <linux/sort.h>
#include <asm/perf_event.h>
#include <asm/cpu_device_id.h>
#include "x86.h"
#include "cpuid.h"
#include "lapic.h"
#include "pmu.h"

/* This is enough to filter the vast majority of currently defined events. */
#define KVM_PMU_EVENT_FILTER_MAX_EVENTS 300

/* Unadultered PMU capabilities of the host, i.e. of hardware. */
static struct x86_pmu_capability __read_mostly kvm_host_pmu;

/* KVM's PMU capabilities, i.e. the intersection of KVM and hardware support. */
struct x86_pmu_capability __read_mostly kvm_pmu_cap;
EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_pmu_cap);

struct kvm_pmu_emulated_event_selectors {
        u64 INSTRUCTIONS_RETIRED;
        u64 BRANCH_INSTRUCTIONS_RETIRED;
};
static struct kvm_pmu_emulated_event_selectors __read_mostly kvm_pmu_eventsel;

/* Precise Distribution of Instructions Retired (PDIR) */
static const struct x86_cpu_id vmx_pebs_pdir_cpu[] = {
        X86_MATCH_VFM(INTEL_ICELAKE_D, NULL),
        X86_MATCH_VFM(INTEL_ICELAKE_X, NULL),
        /* Instruction-Accurate PDIR (PDIR++) */
        X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X, NULL),
        {}
};

/* Precise Distribution (PDist) */
static const struct x86_cpu_id vmx_pebs_pdist_cpu[] = {
        X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X, NULL),
        {}
};

/* NOTE:
 * - Each perf counter is defined as "struct kvm_pmc";
 * - There are two types of perf counters: general purpose (gp) and fixed.
 *   gp counters are stored in gp_counters[] and fixed counters are stored
 *   in fixed_counters[] respectively. Both of them are part of "struct
 *   kvm_pmu";
 * - pmu.c understands the difference between gp counters and fixed counters.
 *   However AMD doesn't support fixed-counters;
 * - There are three types of index to access perf counters (PMC):
 *     1. MSR (named msr): For example Intel has MSR_IA32_PERFCTRn and AMD
 *        has MSR_K7_PERFCTRn and, for families 15H and later,
 *        MSR_F15H_PERF_CTRn, where MSR_F15H_PERF_CTR[0-3] are
 *        aliased to MSR_K7_PERFCTRn.
 *     2. MSR Index (named idx): This normally is used by RDPMC instruction.
 *        For instance AMD RDPMC instruction uses 0000_0003h in ECX to access
 *        C001_0007h (MSR_K7_PERCTR3). Intel has a similar mechanism, except
 *        that it also supports fixed counters. idx can be used to as index to
 *        gp and fixed counters.
 *     3. Global PMC Index (named pmc): pmc is an index specific to PMU
 *        code. Each pmc, stored in kvm_pmc.idx field, is unique across
 *        all perf counters (both gp and fixed). The mapping relationship
 *        between pmc and perf counters is as the following:
 *        * Intel: [0 .. KVM_MAX_NR_INTEL_GP_COUNTERS-1] <=> gp counters
 *                 [KVM_FIXED_PMC_BASE_IDX .. KVM_FIXED_PMC_BASE_IDX + 2] <=> fixed
 *        * AMD:   [0 .. AMD64_NUM_COUNTERS-1] and, for families 15H
 *          and later, [0 .. AMD64_NUM_COUNTERS_CORE-1] <=> gp counters
 */

static struct kvm_pmu_ops kvm_pmu_ops __read_mostly;

#define KVM_X86_PMU_OP(func)                                         \
        DEFINE_STATIC_CALL_NULL(kvm_x86_pmu_##func,                          \
                                *(((struct kvm_pmu_ops *)0)->func));
#define KVM_X86_PMU_OP_OPTIONAL KVM_X86_PMU_OP
#include <asm/kvm-x86-pmu-ops.h>

void kvm_pmu_ops_update(const struct kvm_pmu_ops *pmu_ops)
{
        memcpy(&kvm_pmu_ops, pmu_ops, sizeof(kvm_pmu_ops));

#define __KVM_X86_PMU_OP(func) \
        static_call_update(kvm_x86_pmu_##func, kvm_pmu_ops.func);
#define KVM_X86_PMU_OP(func) \
        WARN_ON(!kvm_pmu_ops.func); __KVM_X86_PMU_OP(func)
#define KVM_X86_PMU_OP_OPTIONAL __KVM_X86_PMU_OP
#include <asm/kvm-x86-pmu-ops.h>
#undef __KVM_X86_PMU_OP
}

void kvm_init_pmu_capability(struct kvm_pmu_ops *pmu_ops)
{
        bool is_intel = boot_cpu_data.x86_vendor == X86_VENDOR_INTEL;
        int min_nr_gp_ctrs = pmu_ops->MIN_NR_GP_COUNTERS;

        /*
         * Hybrid PMUs don't play nice with virtualization without careful
         * configuration by userspace, and KVM's APIs for reporting supported
         * vPMU features do not account for hybrid PMUs.  Disable vPMU support
         * for hybrid PMUs until KVM gains a way to let userspace opt-in.
         */
        if (cpu_feature_enabled(X86_FEATURE_HYBRID_CPU)) {
                enable_pmu = false;
                memset(&kvm_host_pmu, 0, sizeof(kvm_host_pmu));
        } else {
                perf_get_x86_pmu_capability(&kvm_host_pmu);
        }

        if (enable_pmu) {
                /*
                 * WARN if perf did NOT disable hardware PMU if the number of
                 * architecturally required GP counters aren't present, i.e. if
                 * there are a non-zero number of counters, but fewer than what
                 * is architecturally required.
                 */
                if (!kvm_host_pmu.num_counters_gp ||
                    WARN_ON_ONCE(kvm_host_pmu.num_counters_gp < min_nr_gp_ctrs))
                        enable_pmu = false;
                else if (is_intel && !kvm_host_pmu.version)
                        enable_pmu = false;
        }

        if (!enable_pmu || !enable_mediated_pmu || !kvm_host_pmu.mediated ||
            !pmu_ops->is_mediated_pmu_supported(&kvm_host_pmu))
                enable_mediated_pmu = false;

        if (!enable_mediated_pmu)
                pmu_ops->write_global_ctrl = NULL;

        if (!enable_pmu) {
                memset(&kvm_pmu_cap, 0, sizeof(kvm_pmu_cap));
                return;
        }

        memcpy(&kvm_pmu_cap, &kvm_host_pmu, sizeof(kvm_host_pmu));
        kvm_pmu_cap.version = min(kvm_pmu_cap.version, 2);
        kvm_pmu_cap.num_counters_gp = min(kvm_pmu_cap.num_counters_gp,
                                          pmu_ops->MAX_NR_GP_COUNTERS);
        kvm_pmu_cap.num_counters_fixed = min(kvm_pmu_cap.num_counters_fixed,
                                             KVM_MAX_NR_FIXED_COUNTERS);

        kvm_pmu_eventsel.INSTRUCTIONS_RETIRED =
                perf_get_hw_event_config(PERF_COUNT_HW_INSTRUCTIONS);
        kvm_pmu_eventsel.BRANCH_INSTRUCTIONS_RETIRED =
                perf_get_hw_event_config(PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
}

void kvm_handle_guest_mediated_pmi(void)
{
        struct kvm_vcpu *vcpu = kvm_get_running_vcpu();

        if (WARN_ON_ONCE(!vcpu || !kvm_vcpu_has_mediated_pmu(vcpu)))
                return;

        kvm_make_request(KVM_REQ_PMI, vcpu);
}

static inline void __kvm_perf_overflow(struct kvm_pmc *pmc, bool in_pmi)
{
        struct kvm_pmu *pmu = pmc_to_pmu(pmc);
        bool skip_pmi = false;

        if (pmc->perf_event && pmc->perf_event->attr.precise_ip) {
                if (!in_pmi) {
                        /*
                         * TODO: KVM is currently _choosing_ to not generate records
                         * for emulated instructions, avoiding BUFFER_OVF PMI when
                         * there are no records. Strictly speaking, it should be done
                         * as well in the right context to improve sampling accuracy.
                         */
                        skip_pmi = true;
                } else {
                        /* Indicate PEBS overflow PMI to guest. */
                        skip_pmi = __test_and_set_bit(GLOBAL_STATUS_BUFFER_OVF_BIT,
                                                      (unsigned long *)&pmu->global_status);
                }
        } else {
                __set_bit(pmc->idx, (unsigned long *)&pmu->global_status);
        }

        if (pmc->intr && !skip_pmi)
                kvm_make_request(KVM_REQ_PMI, pmc->vcpu);
}

static void kvm_perf_overflow(struct perf_event *perf_event,
                              struct perf_sample_data *data,
                              struct pt_regs *regs)
{
        struct kvm_pmc *pmc = perf_event->overflow_handler_context;

        /*
         * Ignore asynchronous overflow events for counters that are scheduled
         * to be reprogrammed, e.g. if a PMI for the previous event races with
         * KVM's handling of a related guest WRMSR.
         */
        if (test_and_set_bit(pmc->idx, pmc_to_pmu(pmc)->reprogram_pmi))
                return;

        __kvm_perf_overflow(pmc, true);

        kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
}

static u64 pmc_get_pebs_precise_level(struct kvm_pmc *pmc)
{
        /*
         * For some model specific pebs counters with special capabilities
         * (PDIR, PDIR++, PDIST), KVM needs to raise the event precise
         * level to the maximum value (currently 3, backwards compatible)
         * so that the perf subsystem would assign specific hardware counter
         * with that capability for vPMC.
         */
        if ((pmc->idx == 0 && x86_match_cpu(vmx_pebs_pdist_cpu)) ||
            (pmc->idx == 32 && x86_match_cpu(vmx_pebs_pdir_cpu)))
                return 3;

        /*
         * The non-zero precision level of guest event makes the ordinary
         * guest event becomes a guest PEBS event and triggers the host
         * PEBS PMI handler to determine whether the PEBS overflow PMI
         * comes from the host counters or the guest.
         */
        return 1;
}

static u64 get_sample_period(struct kvm_pmc *pmc, u64 counter_value)
{
        u64 sample_period = (-counter_value) & pmc_bitmask(pmc);

        if (!sample_period)
                sample_period = pmc_bitmask(pmc) + 1;
        return sample_period;
}

static int pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type, u64 config,
                                 bool exclude_user, bool exclude_kernel,
                                 bool intr)
{
        struct kvm_pmu *pmu = pmc_to_pmu(pmc);
        struct perf_event *event;
        struct perf_event_attr attr = {
                .type = type,
                .size = sizeof(attr),
                .pinned = true,
                .exclude_idle = true,
                .exclude_host = 1,
                .exclude_user = exclude_user,
                .exclude_kernel = exclude_kernel,
                .config = config,
        };
        bool pebs = test_bit(pmc->idx, (unsigned long *)&pmu->pebs_enable);

        attr.sample_period = get_sample_period(pmc, pmc->counter);

        if ((attr.config & HSW_IN_TX_CHECKPOINTED) &&
            (boot_cpu_has(X86_FEATURE_RTM) || boot_cpu_has(X86_FEATURE_HLE))) {
                /*
                 * HSW_IN_TX_CHECKPOINTED is not supported with nonzero
                 * period. Just clear the sample period so at least
                 * allocating the counter doesn't fail.
                 */
                attr.sample_period = 0;
        }
        if (pebs) {
                /*
                 * For most PEBS hardware events, the difference in the software
                 * precision levels of guest and host PEBS events will not affect
                 * the accuracy of the PEBS profiling result, because the "event IP"
                 * in the PEBS record is calibrated on the guest side.
                 */
                attr.precise_ip = pmc_get_pebs_precise_level(pmc);
        }

        event = perf_event_create_kernel_counter(&attr, -1, current,
                                                 kvm_perf_overflow, pmc);
        if (IS_ERR(event)) {
                pr_debug_ratelimited("kvm_pmu: event creation failed %ld for pmc->idx = %d\n",
                            PTR_ERR(event), pmc->idx);
                return PTR_ERR(event);
        }

        pmc->perf_event = event;
        pmc_to_pmu(pmc)->event_count++;
        pmc->is_paused = false;
        pmc->intr = intr || pebs;
        return 0;
}

static bool pmc_pause_counter(struct kvm_pmc *pmc)
{
        u64 counter = pmc->counter;
        u64 prev_counter;

        /* update counter, reset event value to avoid redundant accumulation */
        if (pmc->perf_event && !pmc->is_paused)
                counter += perf_event_pause(pmc->perf_event, true);

        /*
         * Snapshot the previous counter *after* accumulating state from perf.
         * If overflow already happened, hardware (via perf) is responsible for
         * generating a PMI.  KVM just needs to detect overflow on emulated
         * counter events that haven't yet been processed.
         */
        prev_counter = counter & pmc_bitmask(pmc);

        counter += pmc->emulated_counter;
        pmc->counter = counter & pmc_bitmask(pmc);

        pmc->emulated_counter = 0;
        pmc->is_paused = true;

        return pmc->counter < prev_counter;
}

static bool pmc_resume_counter(struct kvm_pmc *pmc)
{
        if (!pmc->perf_event)
                return false;

        /* recalibrate sample period and check if it's accepted by perf core */
        if (is_sampling_event(pmc->perf_event) &&
            perf_event_period(pmc->perf_event,
                              get_sample_period(pmc, pmc->counter)))
                return false;

        if (test_bit(pmc->idx, (unsigned long *)&pmc_to_pmu(pmc)->pebs_enable) !=
            (!!pmc->perf_event->attr.precise_ip))
                return false;

        /* reuse perf_event to serve as pmc_reprogram_counter() does*/
        perf_event_enable(pmc->perf_event);
        pmc->is_paused = false;

        return true;
}

static void pmc_release_perf_event(struct kvm_pmc *pmc)
{
        if (pmc->perf_event) {
                perf_event_release_kernel(pmc->perf_event);
                pmc->perf_event = NULL;
                pmc->current_config = 0;
                pmc_to_pmu(pmc)->event_count--;
        }
}

static void pmc_stop_counter(struct kvm_pmc *pmc)
{
        if (pmc->perf_event) {
                pmc->counter = pmc_read_counter(pmc);
                pmc_release_perf_event(pmc);
        }
}

static void pmc_update_sample_period(struct kvm_pmc *pmc)
{
        if (!pmc->perf_event || pmc->is_paused ||
            !is_sampling_event(pmc->perf_event))
                return;

        perf_event_period(pmc->perf_event,
                          get_sample_period(pmc, pmc->counter));
}

void pmc_write_counter(struct kvm_pmc *pmc, u64 val)
{
        if (kvm_vcpu_has_mediated_pmu(pmc->vcpu)) {
                pmc->counter = val & pmc_bitmask(pmc);
                return;
        }

        /*
         * Drop any unconsumed accumulated counts, the WRMSR is a write, not a
         * read-modify-write.  Adjust the counter value so that its value is
         * relative to the current count, as reading the current count from
         * perf is faster than pausing and repgrogramming the event in order to
         * reset it to '0'.  Note, this very sneakily offsets the accumulated
         * emulated count too, by using pmc_read_counter()!
         */
        pmc->emulated_counter = 0;
        pmc->counter += val - pmc_read_counter(pmc);
        pmc->counter &= pmc_bitmask(pmc);
        pmc_update_sample_period(pmc);
}
EXPORT_SYMBOL_FOR_KVM_INTERNAL(pmc_write_counter);

static int filter_cmp(const void *pa, const void *pb, u64 mask)
{
        u64 a = *(u64 *)pa & mask;
        u64 b = *(u64 *)pb & mask;

        return (a > b) - (a < b);
}


static int filter_sort_cmp(const void *pa, const void *pb)
{
        return filter_cmp(pa, pb, (KVM_PMU_MASKED_ENTRY_EVENT_SELECT |
                                   KVM_PMU_MASKED_ENTRY_EXCLUDE));
}

/*
 * For the event filter, searching is done on the 'includes' list and
 * 'excludes' list separately rather than on the 'events' list (which
 * has both).  As a result the exclude bit can be ignored.
 */
static int filter_event_cmp(const void *pa, const void *pb)
{
        return filter_cmp(pa, pb, (KVM_PMU_MASKED_ENTRY_EVENT_SELECT));
}

static int find_filter_index(u64 *events, u64 nevents, u64 key)
{
        u64 *fe = bsearch(&key, events, nevents, sizeof(events[0]),
                          filter_event_cmp);

        if (!fe)
                return -1;

        return fe - events;
}

static bool is_filter_entry_match(u64 filter_event, u64 umask)
{
        u64 mask = filter_event >> (KVM_PMU_MASKED_ENTRY_UMASK_MASK_SHIFT - 8);
        u64 match = filter_event & KVM_PMU_MASKED_ENTRY_UMASK_MATCH;

        BUILD_BUG_ON((KVM_PMU_ENCODE_MASKED_ENTRY(0, 0xff, 0, false) >>
                     (KVM_PMU_MASKED_ENTRY_UMASK_MASK_SHIFT - 8)) !=
                     ARCH_PERFMON_EVENTSEL_UMASK);

        return (umask & mask) == match;
}

static bool filter_contains_match(u64 *events, u64 nevents, u64 eventsel)
{
        u64 event_select = eventsel & kvm_pmu_ops.EVENTSEL_EVENT;
        u64 umask = eventsel & ARCH_PERFMON_EVENTSEL_UMASK;
        int i, index;

        index = find_filter_index(events, nevents, event_select);
        if (index < 0)
                return false;

        /*
         * Entries are sorted by the event select.  Walk the list in both
         * directions to process all entries with the targeted event select.
         */
        for (i = index; i < nevents; i++) {
                if (filter_event_cmp(&events[i], &event_select))
                        break;

                if (is_filter_entry_match(events[i], umask))
                        return true;
        }

        for (i = index - 1; i >= 0; i--) {
                if (filter_event_cmp(&events[i], &event_select))
                        break;

                if (is_filter_entry_match(events[i], umask))
                        return true;
        }

        return false;
}

static bool is_gp_event_allowed(struct kvm_x86_pmu_event_filter *f,
                                u64 eventsel)
{
        if (filter_contains_match(f->includes, f->nr_includes, eventsel) &&
            !filter_contains_match(f->excludes, f->nr_excludes, eventsel))
                return f->action == KVM_PMU_EVENT_ALLOW;

        return f->action == KVM_PMU_EVENT_DENY;
}

static bool is_fixed_event_allowed(struct kvm_x86_pmu_event_filter *filter,
                                   int idx)
{
        int fixed_idx = idx - KVM_FIXED_PMC_BASE_IDX;

        if (filter->action == KVM_PMU_EVENT_DENY &&
            test_bit(fixed_idx, (ulong *)&filter->fixed_counter_bitmap))
                return false;
        if (filter->action == KVM_PMU_EVENT_ALLOW &&
            !test_bit(fixed_idx, (ulong *)&filter->fixed_counter_bitmap))
                return false;

        return true;
}

static bool pmc_is_event_allowed(struct kvm_pmc *pmc)
{
        struct kvm_x86_pmu_event_filter *filter;
        struct kvm *kvm = pmc->vcpu->kvm;

        filter = srcu_dereference(kvm->arch.pmu_event_filter, &kvm->srcu);
        if (!filter)
                return true;

        if (pmc_is_gp(pmc))
                return is_gp_event_allowed(filter, pmc->eventsel);

        return is_fixed_event_allowed(filter, pmc->idx);
}

static void kvm_mediated_pmu_refresh_event_filter(struct kvm_pmc *pmc)
{
        bool allowed = pmc_is_event_allowed(pmc);
        struct kvm_pmu *pmu = pmc_to_pmu(pmc);

        if (pmc_is_gp(pmc)) {
                pmc->eventsel_hw &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
                if (allowed)
                        pmc->eventsel_hw |= pmc->eventsel &
                                            ARCH_PERFMON_EVENTSEL_ENABLE;
        } else {
                u64 mask = intel_fixed_bits_by_idx(pmc->idx - KVM_FIXED_PMC_BASE_IDX, 0xf);

                pmu->fixed_ctr_ctrl_hw &= ~mask;
                if (allowed)
                        pmu->fixed_ctr_ctrl_hw |= pmu->fixed_ctr_ctrl & mask;
        }
}

static int reprogram_counter(struct kvm_pmc *pmc)
{
        struct kvm_pmu *pmu = pmc_to_pmu(pmc);
        u64 eventsel = pmc->eventsel;
        u64 new_config = eventsel;
        bool emulate_overflow;
        u8 fixed_ctr_ctrl;

        if (kvm_vcpu_has_mediated_pmu(pmu_to_vcpu(pmu))) {
                kvm_mediated_pmu_refresh_event_filter(pmc);
                return 0;
        }

        emulate_overflow = pmc_pause_counter(pmc);

        if (!pmc_is_globally_enabled(pmc) || !pmc_is_locally_enabled(pmc) ||
            !pmc_is_event_allowed(pmc))
                return 0;

        if (emulate_overflow)
                __kvm_perf_overflow(pmc, false);

        if (eventsel & ARCH_PERFMON_EVENTSEL_PIN_CONTROL)
                printk_once("kvm pmu: pin control bit is ignored\n");

        if (pmc_is_fixed(pmc)) {
                fixed_ctr_ctrl = fixed_ctrl_field(pmu->fixed_ctr_ctrl,
                                                  pmc->idx - KVM_FIXED_PMC_BASE_IDX);
                if (fixed_ctr_ctrl & INTEL_FIXED_0_KERNEL)
                        eventsel |= ARCH_PERFMON_EVENTSEL_OS;
                if (fixed_ctr_ctrl & INTEL_FIXED_0_USER)
                        eventsel |= ARCH_PERFMON_EVENTSEL_USR;
                if (fixed_ctr_ctrl & INTEL_FIXED_0_ENABLE_PMI)
                        eventsel |= ARCH_PERFMON_EVENTSEL_INT;
                new_config = (u64)fixed_ctr_ctrl;
        }

        if (pmc->current_config == new_config && pmc_resume_counter(pmc))
                return 0;

        pmc_release_perf_event(pmc);

        pmc->current_config = new_config;

        return pmc_reprogram_counter(pmc, PERF_TYPE_RAW,
                                     (eventsel & pmu->raw_event_mask),
                                     !(eventsel & ARCH_PERFMON_EVENTSEL_USR),
                                     !(eventsel & ARCH_PERFMON_EVENTSEL_OS),
                                     eventsel & ARCH_PERFMON_EVENTSEL_INT);
}

static bool pmc_is_event_match(struct kvm_pmc *pmc, u64 eventsel)
{
        /*
         * Ignore checks for edge detect (all events currently emulated by KVM
         * are always rising edges), pin control (unsupported by modern CPUs),
         * and counter mask and its invert flag (KVM doesn't emulate multiple
         * events in a single clock cycle).
         *
         * Note, the uppermost nibble of AMD's mask overlaps Intel's IN_TX (bit
         * 32) and IN_TXCP (bit 33), as well as two reserved bits (bits 35:34).
         * Checking the "in HLE/RTM transaction" flags is correct as the vCPU
         * can't be in a transaction if KVM is emulating an instruction.
         *
         * Checking the reserved bits might be wrong if they are defined in the
         * future, but so could ignoring them, so do the simple thing for now.
         */
        return !((pmc->eventsel ^ eventsel) & AMD64_RAW_EVENT_MASK_NB);
}

void kvm_pmu_recalc_pmc_emulation(struct kvm_pmu *pmu, struct kvm_pmc *pmc)
{
        bitmap_clear(pmu->pmc_counting_instructions, pmc->idx, 1);
        bitmap_clear(pmu->pmc_counting_branches, pmc->idx, 1);

        /*
         * Do NOT consult the PMU event filters, as the filters must be checked
         * at the time of emulation to ensure KVM uses fresh information, e.g.
         * omitting a PMC from a bitmap could result in a missed event if the
         * filter is changed to allow counting the event.
         */
        if (!pmc_is_locally_enabled(pmc))
                return;

        if (pmc_is_event_match(pmc, kvm_pmu_eventsel.INSTRUCTIONS_RETIRED))
                bitmap_set(pmu->pmc_counting_instructions, pmc->idx, 1);

        if (pmc_is_event_match(pmc, kvm_pmu_eventsel.BRANCH_INSTRUCTIONS_RETIRED))
                bitmap_set(pmu->pmc_counting_branches, pmc->idx, 1);
}
EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_pmu_recalc_pmc_emulation);

void kvm_pmu_handle_event(struct kvm_vcpu *vcpu)
{
        DECLARE_BITMAP(bitmap, X86_PMC_IDX_MAX);
        struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
        struct kvm_pmc *pmc;
        int bit;

        bitmap_copy(bitmap, pmu->reprogram_pmi, X86_PMC_IDX_MAX);

        /*
         * The reprogramming bitmap can be written asynchronously by something
         * other than the task that holds vcpu->mutex, take care to clear only
         * the bits that will actually processed.
         */
        BUILD_BUG_ON(sizeof(bitmap) != sizeof(atomic64_t));
        atomic64_andnot(*(s64 *)bitmap, &pmu->__reprogram_pmi);

        kvm_for_each_pmc(pmu, pmc, bit, bitmap) {
                /*
                 * If reprogramming fails, e.g. due to contention, re-set the
                 * regprogram bit set, i.e. opportunistically try again on the
                 * next PMU refresh.  Don't make a new request as doing so can
                 * stall the guest if reprogramming repeatedly fails.
                 */
                if (reprogram_counter(pmc))
                        set_bit(pmc->idx, pmu->reprogram_pmi);
        }

        /*
         * Release unused perf_events if the corresponding guest MSRs weren't
         * accessed during the last vCPU time slice (need_cleanup is set when
         * the vCPU is scheduled back in).
         */
        if (unlikely(pmu->need_cleanup))
                kvm_pmu_cleanup(vcpu);

        kvm_for_each_pmc(pmu, pmc, bit, bitmap)
                kvm_pmu_recalc_pmc_emulation(pmu, pmc);
}

int kvm_pmu_check_rdpmc_early(struct kvm_vcpu *vcpu, unsigned int idx)
{
        /*
         * On Intel, VMX interception has priority over RDPMC exceptions that
         * aren't already handled by the emulator, i.e. there are no additional
         * check needed for Intel PMUs.
         *
         * On AMD, _all_ exceptions on RDPMC have priority over SVM intercepts,
         * i.e. an invalid PMC results in a #GP, not #VMEXIT.
         */
        if (!kvm_pmu_ops.check_rdpmc_early)
                return 0;

        return kvm_pmu_call(check_rdpmc_early)(vcpu, idx);
}

bool is_vmware_backdoor_pmc(u32 pmc_idx)
{
        switch (pmc_idx) {
        case VMWARE_BACKDOOR_PMC_HOST_TSC:
        case VMWARE_BACKDOOR_PMC_REAL_TIME:
        case VMWARE_BACKDOOR_PMC_APPARENT_TIME:
                return true;
        }
        return false;
}

static int kvm_pmu_rdpmc_vmware(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
{
        u64 ctr_val;

        switch (idx) {
        case VMWARE_BACKDOOR_PMC_HOST_TSC:
                ctr_val = rdtsc();
                break;
        case VMWARE_BACKDOOR_PMC_REAL_TIME:
                ctr_val = ktime_get_boottime_ns();
                break;
        case VMWARE_BACKDOOR_PMC_APPARENT_TIME:
                ctr_val = ktime_get_boottime_ns() +
                        vcpu->kvm->arch.kvmclock_offset;
                break;
        default:
                return 1;
        }

        *data = ctr_val;
        return 0;
}

int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
{
        struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
        struct kvm_pmc *pmc;
        u64 mask = ~0ull;

        if (!pmu->version)
                return 1;

        if (is_vmware_backdoor_pmc(idx))
                return kvm_pmu_rdpmc_vmware(vcpu, idx, data);

        pmc = kvm_pmu_call(rdpmc_ecx_to_pmc)(vcpu, idx, &mask);
        if (!pmc)
                return 1;

        if (!kvm_is_cr4_bit_set(vcpu, X86_CR4_PCE) &&
            (kvm_x86_call(get_cpl)(vcpu) != 0) &&
            kvm_is_cr0_bit_set(vcpu, X86_CR0_PE))
                return 1;

        *data = pmc_read_counter(pmc) & mask;
        return 0;
}

static bool kvm_need_any_pmc_intercept(struct kvm_vcpu *vcpu)
{
        struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);

        if (!kvm_vcpu_has_mediated_pmu(vcpu))
                return true;

        /*
         * Note!  Check *host* PMU capabilities, not KVM's PMU capabilities, as
         * KVM's capabilities are constrained based on KVM support, i.e. KVM's
         * capabilities themselves may be a subset of hardware capabilities.
         */
        return pmu->nr_arch_gp_counters != kvm_host_pmu.num_counters_gp ||
               pmu->nr_arch_fixed_counters != kvm_host_pmu.num_counters_fixed;
}

bool kvm_need_perf_global_ctrl_intercept(struct kvm_vcpu *vcpu)
{
        return kvm_need_any_pmc_intercept(vcpu) ||
               !kvm_pmu_has_perf_global_ctrl(vcpu_to_pmu(vcpu));
}
EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_need_perf_global_ctrl_intercept);

bool kvm_need_rdpmc_intercept(struct kvm_vcpu *vcpu)
{
        struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);

        /*
         * VMware allows access to these Pseduo-PMCs even when read via RDPMC
         * in Ring3 when CR4.PCE=0.
         */
        if (enable_vmware_backdoor)
                return true;

        return kvm_need_any_pmc_intercept(vcpu) ||
               pmu->counter_bitmask[KVM_PMC_GP] != (BIT_ULL(kvm_host_pmu.bit_width_gp) - 1) ||
               pmu->counter_bitmask[KVM_PMC_FIXED] != (BIT_ULL(kvm_host_pmu.bit_width_fixed) - 1);
}
EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_need_rdpmc_intercept);

void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu)
{
        if (lapic_in_kernel(vcpu)) {
                kvm_pmu_call(deliver_pmi)(vcpu);
                kvm_apic_local_deliver(vcpu->arch.apic, APIC_LVTPC);
        }
}

bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
{
        switch (msr) {
        case MSR_CORE_PERF_GLOBAL_STATUS:
        case MSR_CORE_PERF_GLOBAL_CTRL:
        case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
                return kvm_pmu_has_perf_global_ctrl(vcpu_to_pmu(vcpu));
        default:
                break;
        }
        return kvm_pmu_call(msr_idx_to_pmc)(vcpu, msr) ||
               kvm_pmu_call(is_valid_msr)(vcpu, msr);
}

static void kvm_pmu_mark_pmc_in_use(struct kvm_vcpu *vcpu, u32 msr)
{
        struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
        struct kvm_pmc *pmc = kvm_pmu_call(msr_idx_to_pmc)(vcpu, msr);

        if (pmc)
                __set_bit(pmc->idx, pmu->pmc_in_use);
}

int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
{
        struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
        u32 msr = msr_info->index;

        switch (msr) {
        case MSR_CORE_PERF_GLOBAL_STATUS:
        case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS:
                msr_info->data = pmu->global_status;
                break;
        case MSR_AMD64_PERF_CNTR_GLOBAL_CTL:
        case MSR_CORE_PERF_GLOBAL_CTRL:
                msr_info->data = pmu->global_ctrl;
                break;
        case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR:
        case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_SET:
        case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
                msr_info->data = 0;
                break;
        default:
                return kvm_pmu_call(get_msr)(vcpu, msr_info);
        }

        return 0;
}

int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
{
        struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
        u32 msr = msr_info->index;
        u64 data = msr_info->data;
        u64 diff;

        /*
         * Note, AMD ignores writes to reserved bits and read-only PMU MSRs,
         * whereas Intel generates #GP on attempts to write reserved/RO MSRs.
         */
        switch (msr) {
        case MSR_CORE_PERF_GLOBAL_STATUS:
                if (!msr_info->host_initiated)
                        return 1; /* RO MSR */
                fallthrough;
        case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS:
                /* Per PPR, Read-only MSR. Writes are ignored. */
                if (!msr_info->host_initiated)
                        break;

                if (data & pmu->global_status_rsvd)
                        return 1;

                pmu->global_status = data;
                break;
        case MSR_AMD64_PERF_CNTR_GLOBAL_CTL:
                data &= ~pmu->global_ctrl_rsvd;
                fallthrough;
        case MSR_CORE_PERF_GLOBAL_CTRL:
                if (!kvm_valid_perf_global_ctrl(pmu, data))
                        return 1;

                if (pmu->global_ctrl != data) {
                        diff = pmu->global_ctrl ^ data;
                        pmu->global_ctrl = data;
                        reprogram_counters(pmu, diff);
                }
                /*
                 * Unconditionally forward writes to vendor code, i.e. to the
                 * VMC{B,S}, as pmu->global_ctrl is per-VCPU, not per-VMC{B,S}.
                 */
                if (kvm_vcpu_has_mediated_pmu(vcpu))
                        kvm_pmu_call(write_global_ctrl)(data);
                break;
        case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
                /*
                 * GLOBAL_OVF_CTRL, a.k.a. GLOBAL STATUS_RESET, clears bits in
                 * GLOBAL_STATUS, and so the set of reserved bits is the same.
                 */
                if (data & pmu->global_status_rsvd)
                        return 1;
                fallthrough;
        case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR:
                if (!msr_info->host_initiated)
                        pmu->global_status &= ~data;
                break;
        case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_SET:
                if (!msr_info->host_initiated)
                        pmu->global_status |= data & ~pmu->global_status_rsvd;
                break;
        default:
                kvm_pmu_mark_pmc_in_use(vcpu, msr_info->index);
                return kvm_pmu_call(set_msr)(vcpu, msr_info);
        }

        return 0;
}

static void kvm_pmu_reset(struct kvm_vcpu *vcpu)
{
        struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
        struct kvm_pmc *pmc;
        int i;

        pmu->need_cleanup = false;

        bitmap_zero(pmu->reprogram_pmi, X86_PMC_IDX_MAX);

        kvm_for_each_pmc(pmu, pmc, i, pmu->all_valid_pmc_idx) {
                pmc_stop_counter(pmc);
                pmc->counter = 0;
                pmc->emulated_counter = 0;

                if (pmc_is_gp(pmc)) {
                        pmc->eventsel = 0;
                        pmc->eventsel_hw = 0;
                }
        }

        pmu->fixed_ctr_ctrl = pmu->fixed_ctr_ctrl_hw = 0;
        pmu->global_ctrl = pmu->global_status = 0;

        kvm_pmu_call(reset)(vcpu);
}


/*
 * Refresh the PMU configuration for the vCPU, e.g. if userspace changes CPUID
 * and/or PERF_CAPABILITIES.
 */
void kvm_pmu_refresh(struct kvm_vcpu *vcpu)
{
        struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);

        if (KVM_BUG_ON(!kvm_can_set_cpuid_and_feature_msrs(vcpu), vcpu->kvm))
                return;

        /*
         * Stop/release all existing counters/events before realizing the new
         * vPMU model.
         */
        kvm_pmu_reset(vcpu);

        pmu->version = 0;
        pmu->nr_arch_gp_counters = 0;
        pmu->nr_arch_fixed_counters = 0;
        pmu->counter_bitmask[KVM_PMC_GP] = 0;
        pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
        pmu->reserved_bits = 0xffffffff00200000ull;
        pmu->raw_event_mask = X86_RAW_EVENT_MASK;
        pmu->global_ctrl_rsvd = ~0ull;
        pmu->global_status_rsvd = ~0ull;
        pmu->fixed_ctr_ctrl_rsvd = ~0ull;
        pmu->pebs_enable_rsvd = ~0ull;
        pmu->pebs_data_cfg_rsvd = ~0ull;
        bitmap_zero(pmu->all_valid_pmc_idx, X86_PMC_IDX_MAX);

        if (!vcpu->kvm->arch.enable_pmu)
                return;

        kvm_pmu_call(refresh)(vcpu);

        /*
         * At RESET, both Intel and AMD CPUs set all enable bits for general
         * purpose counters in IA32_PERF_GLOBAL_CTRL (so that software that
         * was written for v1 PMUs don't unknowingly leave GP counters disabled
         * in the global controls).  Emulate that behavior when refreshing the
         * PMU so that userspace doesn't need to manually set PERF_GLOBAL_CTRL.
         */
        if (pmu->nr_arch_gp_counters &&
            (kvm_pmu_has_perf_global_ctrl(pmu) || kvm_vcpu_has_mediated_pmu(vcpu)))
                pmu->global_ctrl = GENMASK_ULL(pmu->nr_arch_gp_counters - 1, 0);

        if (kvm_vcpu_has_mediated_pmu(vcpu))
                kvm_pmu_call(write_global_ctrl)(pmu->global_ctrl);

        bitmap_set(pmu->all_valid_pmc_idx, 0, pmu->nr_arch_gp_counters);
        bitmap_set(pmu->all_valid_pmc_idx, KVM_FIXED_PMC_BASE_IDX,
                   pmu->nr_arch_fixed_counters);
}

void kvm_pmu_init(struct kvm_vcpu *vcpu)
{
        struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);

        memset(pmu, 0, sizeof(*pmu));
        kvm_pmu_call(init)(vcpu);
}

/* Release perf_events for vPMCs that have been unused for a full time slice.  */
void kvm_pmu_cleanup(struct kvm_vcpu *vcpu)
{
        struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
        struct kvm_pmc *pmc = NULL;
        DECLARE_BITMAP(bitmask, X86_PMC_IDX_MAX);
        int i;

        pmu->need_cleanup = false;

        bitmap_andnot(bitmask, pmu->all_valid_pmc_idx,
                      pmu->pmc_in_use, X86_PMC_IDX_MAX);

        kvm_for_each_pmc(pmu, pmc, i, bitmask) {
                if (pmc->perf_event && !pmc_is_locally_enabled(pmc))
                        pmc_stop_counter(pmc);
        }

        kvm_pmu_call(cleanup)(vcpu);

        bitmap_zero(pmu->pmc_in_use, X86_PMC_IDX_MAX);
}

void kvm_pmu_destroy(struct kvm_vcpu *vcpu)
{
        kvm_pmu_reset(vcpu);
}

static bool pmc_is_pmi_enabled(struct kvm_pmc *pmc)
{
        u8 fixed_ctr_ctrl;

        if (pmc_is_gp(pmc))
                return pmc->eventsel & ARCH_PERFMON_EVENTSEL_INT;

        fixed_ctr_ctrl = fixed_ctrl_field(pmc_to_pmu(pmc)->fixed_ctr_ctrl,
                                          pmc->idx - KVM_FIXED_PMC_BASE_IDX);
        return fixed_ctr_ctrl & INTEL_FIXED_0_ENABLE_PMI;
}

static void kvm_pmu_incr_counter(struct kvm_pmc *pmc)
{
        struct kvm_vcpu *vcpu = pmc->vcpu;

        /*
         * For perf-based PMUs, accumulate software-emulated events separately
         * from pmc->counter, as pmc->counter is offset by the count of the
         * associated perf event. Request reprogramming, which will consult
         * both emulated and hardware-generated events to detect overflow.
         */
        if (!kvm_vcpu_has_mediated_pmu(vcpu)) {
                pmc->emulated_counter++;
                kvm_pmu_request_counter_reprogram(pmc);
                return;
        }

        /*
         * For mediated PMUs, pmc->counter is updated when the vCPU's PMU is
         * put, and will be loaded into hardware when the PMU is loaded. Simply
         * increment the counter and signal overflow if it wraps to zero.
         */
        pmc->counter = (pmc->counter + 1) & pmc_bitmask(pmc);
        if (!pmc->counter) {
                pmc_to_pmu(pmc)->global_status |= BIT_ULL(pmc->idx);
                if (pmc_is_pmi_enabled(pmc))
                        kvm_make_request(KVM_REQ_PMI, vcpu);
        }
}

static inline bool cpl_is_matched(struct kvm_pmc *pmc)
{
        bool select_os, select_user;
        u64 config;

        if (pmc_is_gp(pmc)) {
                config = pmc->eventsel;
                select_os = config & ARCH_PERFMON_EVENTSEL_OS;
                select_user = config & ARCH_PERFMON_EVENTSEL_USR;
        } else {
                config = fixed_ctrl_field(pmc_to_pmu(pmc)->fixed_ctr_ctrl,
                                          pmc->idx - KVM_FIXED_PMC_BASE_IDX);
                select_os = config & INTEL_FIXED_0_KERNEL;
                select_user = config & INTEL_FIXED_0_USER;
        }

        /*
         * Skip the CPL lookup, which isn't free on Intel, if the result will
         * be the same regardless of the CPL.
         */
        if (select_os == select_user)
                return select_os;

        return (kvm_x86_call(get_cpl)(pmc->vcpu) == 0) ? select_os :
                                                         select_user;
}

static void kvm_pmu_trigger_event(struct kvm_vcpu *vcpu,
                                  const unsigned long *event_pmcs)
{
        DECLARE_BITMAP(bitmap, X86_PMC_IDX_MAX);
        struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
        struct kvm_pmc *pmc;
        int i, idx;

        BUILD_BUG_ON(sizeof(pmu->global_ctrl) * BITS_PER_BYTE != X86_PMC_IDX_MAX);

        if (bitmap_empty(event_pmcs, X86_PMC_IDX_MAX))
                return;

        if (!kvm_pmu_has_perf_global_ctrl(pmu))
                bitmap_copy(bitmap, event_pmcs, X86_PMC_IDX_MAX);
        else if (!bitmap_and(bitmap, event_pmcs,
                             (unsigned long *)&pmu->global_ctrl, X86_PMC_IDX_MAX))
                return;

        idx = srcu_read_lock(&vcpu->kvm->srcu);
        kvm_for_each_pmc(pmu, pmc, i, bitmap) {
                if (!pmc_is_event_allowed(pmc) || !cpl_is_matched(pmc))
                        continue;

                kvm_pmu_incr_counter(pmc);
        }
        srcu_read_unlock(&vcpu->kvm->srcu, idx);
}

void kvm_pmu_instruction_retired(struct kvm_vcpu *vcpu)
{
        kvm_pmu_trigger_event(vcpu, vcpu_to_pmu(vcpu)->pmc_counting_instructions);
}
EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_pmu_instruction_retired);

void kvm_pmu_branch_retired(struct kvm_vcpu *vcpu)
{
        kvm_pmu_trigger_event(vcpu, vcpu_to_pmu(vcpu)->pmc_counting_branches);
}
EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_pmu_branch_retired);

static bool is_masked_filter_valid(const struct kvm_x86_pmu_event_filter *filter)
{
        u64 mask = kvm_pmu_ops.EVENTSEL_EVENT |
                   KVM_PMU_MASKED_ENTRY_UMASK_MASK |
                   KVM_PMU_MASKED_ENTRY_UMASK_MATCH |
                   KVM_PMU_MASKED_ENTRY_EXCLUDE;
        int i;

        for (i = 0; i < filter->nevents; i++) {
                if (filter->events[i] & ~mask)
                        return false;
        }

        return true;
}

static void convert_to_masked_filter(struct kvm_x86_pmu_event_filter *filter)
{
        int i, j;

        for (i = 0, j = 0; i < filter->nevents; i++) {
                /*
                 * Skip events that are impossible to match against a guest
                 * event.  When filtering, only the event select + unit mask
                 * of the guest event is used.  To maintain backwards
                 * compatibility, impossible filters can't be rejected :-(
                 */
                if (filter->events[i] & ~(kvm_pmu_ops.EVENTSEL_EVENT |
                                          ARCH_PERFMON_EVENTSEL_UMASK))
                        continue;
                /*
                 * Convert userspace events to a common in-kernel event so
                 * only one code path is needed to support both events.  For
                 * the in-kernel events use masked events because they are
                 * flexible enough to handle both cases.  To convert to masked
                 * events all that's needed is to add an "all ones" umask_mask,
                 * (unmasked filter events don't support EXCLUDE).
                 */
                filter->events[j++] = filter->events[i] |
                                      (0xFFULL << KVM_PMU_MASKED_ENTRY_UMASK_MASK_SHIFT);
        }

        filter->nevents = j;
}

static int prepare_filter_lists(struct kvm_x86_pmu_event_filter *filter)
{
        int i;

        if (!(filter->flags & KVM_PMU_EVENT_FLAG_MASKED_EVENTS))
                convert_to_masked_filter(filter);
        else if (!is_masked_filter_valid(filter))
                return -EINVAL;

        /*
         * Sort entries by event select and includes vs. excludes so that all
         * entries for a given event select can be processed efficiently during
         * filtering.  The EXCLUDE flag uses a more significant bit than the
         * event select, and so the sorted list is also effectively split into
         * includes and excludes sub-lists.
         */
        sort(&filter->events, filter->nevents, sizeof(filter->events[0]),
             filter_sort_cmp, NULL);

        i = filter->nevents;
        /* Find the first EXCLUDE event (only supported for masked events). */
        if (filter->flags & KVM_PMU_EVENT_FLAG_MASKED_EVENTS) {
                for (i = 0; i < filter->nevents; i++) {
                        if (filter->events[i] & KVM_PMU_MASKED_ENTRY_EXCLUDE)
                                break;
                }
        }

        filter->nr_includes = i;
        filter->nr_excludes = filter->nevents - filter->nr_includes;
        filter->includes = filter->events;
        filter->excludes = filter->events + filter->nr_includes;

        return 0;
}

int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp)
{
        struct kvm_pmu_event_filter __user *user_filter = argp;
        struct kvm_x86_pmu_event_filter *filter;
        struct kvm_pmu_event_filter tmp;
        struct kvm_vcpu *vcpu;
        unsigned long i;
        size_t size;
        int r;

        if (copy_from_user(&tmp, user_filter, sizeof(tmp)))
                return -EFAULT;

        if (tmp.action != KVM_PMU_EVENT_ALLOW &&
            tmp.action != KVM_PMU_EVENT_DENY)
                return -EINVAL;

        if (tmp.flags & ~KVM_PMU_EVENT_FLAGS_VALID_MASK)
                return -EINVAL;

        if (tmp.nevents > KVM_PMU_EVENT_FILTER_MAX_EVENTS)
                return -E2BIG;

        size = struct_size(filter, events, tmp.nevents);
        filter = kzalloc(size, GFP_KERNEL_ACCOUNT);
        if (!filter)
                return -ENOMEM;

        filter->action = tmp.action;
        filter->nevents = tmp.nevents;
        filter->fixed_counter_bitmap = tmp.fixed_counter_bitmap;
        filter->flags = tmp.flags;

        r = -EFAULT;
        if (copy_from_user(filter->events, user_filter->events,
                           sizeof(filter->events[0]) * filter->nevents))
                goto cleanup;

        r = prepare_filter_lists(filter);
        if (r)
                goto cleanup;

        mutex_lock(&kvm->lock);
        filter = rcu_replace_pointer(kvm->arch.pmu_event_filter, filter,
                                     mutex_is_locked(&kvm->lock));
        mutex_unlock(&kvm->lock);
        synchronize_srcu_expedited(&kvm->srcu);

        BUILD_BUG_ON(sizeof(((struct kvm_pmu *)0)->reprogram_pmi) >
                     sizeof(((struct kvm_pmu *)0)->__reprogram_pmi));

        kvm_for_each_vcpu(i, vcpu, kvm)
                atomic64_set(&vcpu_to_pmu(vcpu)->__reprogram_pmi, -1ull);

        kvm_make_all_cpus_request(kvm, KVM_REQ_PMU);

        r = 0;
cleanup:
        kfree(filter);
        return r;
}

static __always_inline u32 fixed_counter_msr(u32 idx)
{
        return kvm_pmu_ops.FIXED_COUNTER_BASE + idx * kvm_pmu_ops.MSR_STRIDE;
}

static __always_inline u32 gp_counter_msr(u32 idx)
{
        return kvm_pmu_ops.GP_COUNTER_BASE + idx * kvm_pmu_ops.MSR_STRIDE;
}

static __always_inline u32 gp_eventsel_msr(u32 idx)
{
        return kvm_pmu_ops.GP_EVENTSEL_BASE + idx * kvm_pmu_ops.MSR_STRIDE;
}

static void kvm_pmu_load_guest_pmcs(struct kvm_vcpu *vcpu)
{
        struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
        struct kvm_pmc *pmc;
        u32 i;

        /*
         * No need to zero out unexposed GP/fixed counters/selectors since RDPMC
         * is intercepted if hardware has counters that aren't visible to the
         * guest (KVM will inject #GP as appropriate).
         */
        for (i = 0; i < pmu->nr_arch_gp_counters; i++) {
                pmc = &pmu->gp_counters[i];

                if (pmc->counter != rdpmc(i))
                        wrmsrl(gp_counter_msr(i), pmc->counter);
                wrmsrl(gp_eventsel_msr(i), pmc->eventsel_hw);
        }
        for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {
                pmc = &pmu->fixed_counters[i];

                if (pmc->counter != rdpmc(INTEL_PMC_FIXED_RDPMC_BASE | i))
                        wrmsrl(fixed_counter_msr(i), pmc->counter);
        }
}

void kvm_mediated_pmu_load(struct kvm_vcpu *vcpu)
{
        if (!kvm_vcpu_has_mediated_pmu(vcpu) ||
            KVM_BUG_ON(!lapic_in_kernel(vcpu), vcpu->kvm))
                return;

        lockdep_assert_irqs_disabled();

        perf_load_guest_context();

        /*
         * Explicitly clear PERF_GLOBAL_CTRL, as "loading" the guest's context
         * disables all individual counters (if any were enabled), but doesn't
         * globally disable the entire PMU.  Loading event selectors and PMCs
         * with guest values while PERF_GLOBAL_CTRL is non-zero will generate
         * unexpected events and PMIs.
         *
         * VMX will enable/disable counters at VM-Enter/VM-Exit by atomically
         * loading PERF_GLOBAL_CONTROL.  SVM effectively performs the switch by
         * configuring all events to be GUEST_ONLY.  Clear PERF_GLOBAL_CONTROL
         * even for SVM to minimize the damage if a perf event is left enabled,
         * and to ensure a consistent starting state.
         */
        wrmsrq(kvm_pmu_ops.PERF_GLOBAL_CTRL, 0);

        perf_load_guest_lvtpc(kvm_lapic_get_reg(vcpu->arch.apic, APIC_LVTPC));

        kvm_pmu_load_guest_pmcs(vcpu);

        kvm_pmu_call(mediated_load)(vcpu);
}

static void kvm_pmu_put_guest_pmcs(struct kvm_vcpu *vcpu)
{
        struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
        struct kvm_pmc *pmc;
        u32 i;

        /*
         * Clear selectors and counters to ensure hardware doesn't count using
         * guest controls when the host (perf) restores its state.
         */
        for (i = 0; i < pmu->nr_arch_gp_counters; i++) {
                pmc = &pmu->gp_counters[i];

                pmc->counter = rdpmc(i);
                if (pmc->counter)
                        wrmsrq(gp_counter_msr(i), 0);
                if (pmc->eventsel_hw)
                        wrmsrq(gp_eventsel_msr(i), 0);
        }

        for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {
                pmc = &pmu->fixed_counters[i];

                pmc->counter = rdpmc(INTEL_PMC_FIXED_RDPMC_BASE | i);
                if (pmc->counter)
                        wrmsrq(fixed_counter_msr(i), 0);
        }
}

void kvm_mediated_pmu_put(struct kvm_vcpu *vcpu)
{
        if (!kvm_vcpu_has_mediated_pmu(vcpu) ||
            KVM_BUG_ON(!lapic_in_kernel(vcpu), vcpu->kvm))
                return;

        lockdep_assert_irqs_disabled();

        /*
         * Defer handling of PERF_GLOBAL_CTRL to vendor code.  On Intel, it's
         * atomically cleared on VM-Exit, i.e. doesn't need to be clear here.
         */
        kvm_pmu_call(mediated_put)(vcpu);

        kvm_pmu_put_guest_pmcs(vcpu);

        perf_put_guest_lvtpc();

        perf_put_guest_context();
}