INTEL_PMC_IDX_FIXED
case INTEL_PMC_IDX_FIXED ... INTEL_PMC_IDX_FIXED_BTS-1:
hwc->event_base = x86_pmu_fixed_ctr_addr(idx - INTEL_PMC_IDX_FIXED);
hwc->event_base_rdpmc = (idx - INTEL_PMC_IDX_FIXED) |
if (i >= INTEL_PMC_IDX_FIXED) {
if (!test_bit(i - INTEL_PMC_IDX_FIXED, hybrid(cpuc->pmu, fixed_cntr_mask)))
wrmsrq(x86_pmu_fixed_ctr_addr(i - INTEL_PMC_IDX_FIXED), 0);
if (c->idxmsk64 & (~0ULL << INTEL_PMC_IDX_FIXED)) {
idx = INTEL_PMC_IDX_FIXED;
for_each_set_bit_from(idx, c->idxmsk, INTEL_PMC_IDX_FIXED) {
mask = intel_fixed_bits_by_idx(idx - INTEL_PMC_IDX_FIXED, INTEL_FIXED_BITS_MASK);
if (idx < INTEL_PMC_IDX_FIXED) {
x86_pmu.addr_offset(idx - INTEL_PMC_IDX_FIXED, false);
case 0 ... INTEL_PMC_IDX_FIXED - 1:
case INTEL_PMC_IDX_FIXED ... INTEL_PMC_IDX_FIXED_BTS - 1:
idx -= INTEL_PMC_IDX_FIXED;
if (idx < INTEL_PMC_IDX_FIXED) {
msr_offset = x86_pmu.addr_offset(idx - INTEL_PMC_IDX_FIXED, false);
case 0 ... INTEL_PMC_IDX_FIXED - 1:
case INTEL_PMC_IDX_FIXED ... INTEL_PMC_IDX_FIXED_BTS - 1:
*intel_ctrl |= *fixed_cntr_mask << INTEL_PMC_IDX_FIXED;
if (!new_weight && fls64(c1->idxmsk64) < INTEL_PMC_IDX_FIXED) {
#define counter_mask(_gp, _fixed) ((_gp) | ((u64)(_fixed) << INTEL_PMC_IDX_FIXED))
c->idxmsk64 &= cntr_mask | (fixed_cntr_mask << INTEL_PMC_IDX_FIXED);
if (!(x86_pmu.events_maskl & (INTEL_PMC_MSK_FIXED_REF_CYCLES >> INTEL_PMC_IDX_FIXED)))
if (idx >= INTEL_PMC_IDX_FIXED)
*pebs_data_cfg |= PEBS_DATACFG_FIX_BIT(idx - INTEL_PMC_IDX_FIXED);
if (hwc->idx >= INTEL_PMC_IDX_FIXED) {
idx = hwc->idx - INTEL_PMC_IDX_FIXED;
if (idx >= INTEL_PMC_IDX_FIXED) {
idx = MAX_PEBS_EVENTS_FMT4 + (idx - INTEL_PMC_IDX_FIXED);
idx = MAX_PEBS_EVENTS + (idx - INTEL_PMC_IDX_FIXED);
(bit + INTEL_PMC_IDX_FIXED == INTEL_PMC_IDX_FIXED_SLOTS)) {
intel_perf_event_update_pmc(cpuc->events[bit + INTEL_PMC_IDX_FIXED],
short counts[INTEL_PMC_IDX_FIXED + MAX_FIXED_PEBS_EVENTS] = {};
short error[INTEL_PMC_IDX_FIXED + MAX_FIXED_PEBS_EVENTS] = {};
mask |= x86_pmu.fixed_cntr_mask64 << INTEL_PMC_IDX_FIXED;
size = INTEL_PMC_IDX_FIXED + x86_pmu_max_num_counters_fixed(NULL);
short counts[INTEL_PMC_IDX_FIXED + MAX_FIXED_PEBS_EVENTS] = {};
void *last[INTEL_PMC_IDX_FIXED + MAX_FIXED_PEBS_EVENTS];
(hybrid(cpuc->pmu, fixed_cntr_mask64) << INTEL_PMC_IDX_FIXED);
short counts[INTEL_PMC_IDX_FIXED + MAX_FIXED_PEBS_EVENTS] = {};
void *last[INTEL_PMC_IDX_FIXED + MAX_FIXED_PEBS_EVENTS];
return !(intel_ctrl >> (i + INTEL_PMC_IDX_FIXED));
int idx = hwc->idx - INTEL_PMC_IDX_FIXED;
int idx = hwc->idx - INTEL_PMC_IDX_FIXED;
x86_pmu.intel_ctrl |= x86_pmu.fixed_cntr_mask64 << INTEL_PMC_IDX_FIXED;
#define INTEL_PMC_IDX_FIXED_INSTRUCTIONS (INTEL_PMC_IDX_FIXED + 0)
#define INTEL_PMC_IDX_FIXED_CPU_CYCLES (INTEL_PMC_IDX_FIXED + 1)
#define INTEL_PMC_IDX_FIXED_REF_CYCLES (INTEL_PMC_IDX_FIXED + 2)
#define INTEL_PMC_IDX_FIXED_SLOTS (INTEL_PMC_IDX_FIXED + 3)
#define INTEL_PMC_IDX_FIXED_BTS (INTEL_PMC_IDX_FIXED + 15)
#define INTEL_PMC_IDX_METRIC_BASE (INTEL_PMC_IDX_FIXED + 16)
#define KVM_FIXED_PMC_BASE_IDX INTEL_PMC_IDX_FIXED