hybrid
static inline void set_thread_fp_mode(int hybrid, int regs32)
if (hybrid)
union perf_capabilities intel_cap = hybrid(cpuc->pmu, intel_cap);
cntr_mask = hybrid(cpuc->pmu, cntr_mask);
fixed_cntr_mask = hybrid(cpuc->pmu, fixed_cntr_mask);
pebs_constraints = hybrid(cpuc->pmu, pebs_constraints);
union perf_capabilities intel_cap = hybrid(cpuc->pmu, intel_cap);
struct extra_reg *extra_regs = hybrid(event->pmu, extra_regs);
pr_info("... generic bitmap: %016llx\n", hybrid(pmu, cntr_mask64));
pr_info("... fixed-purpose bitmap: %016llx\n", hybrid(pmu, fixed_cntr_mask64));
pr_info("... global_ctrl mask: %016llx\n", hybrid(pmu, intel_ctrl));
if (!test_bit(i - INTEL_PMC_IDX_FIXED, hybrid(cpuc->pmu, fixed_cntr_mask)))
if (hybrid(pmu, arch_pebs_cap).pdists)
u64 intel_ctrl = hybrid(cpuc->pmu, intel_ctrl);
cap = hybrid(cpuc->pmu, arch_pebs_cap);
unsigned long *cntr_mask = hybrid(cpuc->pmu, cntr_mask);
unsigned long *fixed_cntr_mask = hybrid(cpuc->pmu, fixed_cntr_mask);
status &= hybrid(cpuc->pmu, intel_ctrl);
struct extra_reg *extra_regs = hybrid(cpuc->pmu, extra_regs);
struct extra_reg *extra_regs = hybrid(event->pmu, extra_regs);
struct event_constraint *event_constraints = hybrid(cpuc->pmu, event_constraints);
union perf_capabilities *intel_cap = &hybrid(event->pmu, intel_cap);
return !!hybrid(pmu, acr_cause_mask64);
caps = hybrid(pmu, arch_pebs_cap).caps;
event->hw.dyn_constraint &= hybrid(event->pmu, acr_cntr_mask64);
event->hw.dyn_constraint &= hybrid(event->pmu, acr_cause_mask64);
struct arch_pebs_cap pebs_cap = hybrid(event->pmu, arch_pebs_cap);
u64 cntr_mask = hybrid(event->pmu, intel_ctrl) &
if (hweight64(cause_mask) > hweight64(hybrid(pmu, acr_cause_mask64)) ||
num > hweight64(hybrid(event->pmu, acr_cntr_mask64)))
u64 intel_ctrl = hybrid(cpuc->pmu, intel_ctrl);
u64 mask = hybrid(dev_get_drvdata(dev), config_mask) & ARCH_PERFMON_EVENTSEL_UMASK2;
mask = hybrid(dev_get_drvdata(dev), config_mask);
union perf_capabilities intel_cap = hybrid(dev_get_drvdata(dev), intel_cap);
mask = hybrid(pmu, acr_cntr_mask64) & GENMASK_ULL(INTEL_PMC_MAX_GENERIC - 1, 0);
if (hybrid(pmu, acr_cntr_mask64) == hybrid(pmu, acr_cause_mask64))
mask = hybrid(pmu, acr_cause_mask64) & GENMASK_ULL(INTEL_PMC_MAX_GENERIC - 1, 0);
mask = hybrid(pmu, arch_pebs_cap).counters;
mask = hybrid(pmu, arch_pebs_cap).pdists;
struct event_constraint *event_constraints = hybrid(pmu, event_constraints);
struct event_constraint *pebs_constraints = hybrid(pmu, pebs_constraints);
u64 cntr_mask = hybrid(pmu, cntr_mask64);
u64 fixed_cntr_mask = hybrid(pmu, fixed_cntr_mask64);
u64 intel_ctrl = hybrid(pmu, intel_ctrl);
if (hybrid(pmu, arch_pebs_cap).caps & ARCH_PEBS_VECR_XMM)
u64 caps = hybrid(pmu, arch_pebs_cap).caps;
hybrid(pmu, config_mask) |= ARCH_PERFMON_EVENTSEL_UMASK2;
hybrid(pmu, config_mask) |= ARCH_PERFMON_EVENTSEL_EQ;
hybrid(pmu, config_mask) |= ARCH_PERFMON_EVENTSEL_RDPMC_USER_DISABLE;
hybrid(pmu, cntr_mask64) = eax;
hybrid(pmu, fixed_cntr_mask64) = ebx;
hybrid(pmu, acr_cntr_mask64) = counter_mask(eax, ebx);
hybrid(pmu, acr_cause_mask64) = counter_mask(ecx, edx);
hybrid(pmu, arch_pebs_cap).caps = (u64)ebx << 32;
hybrid(pmu, arch_pebs_cap).counters = pebs_mask;
hybrid(pmu, arch_pebs_cap).pdists = pdists_mask;
rdmsrq(MSR_IA32_PERF_CAPABILITIES, hybrid(pmu, intel_cap).capabilities);
hybrid(pmu, event_constraints) = intel_glc_event_constraints;
hybrid(pmu, pebs_constraints) = intel_glc_pebs_event_constraints;
hybrid(pmu, event_constraints) = intel_grt_event_constraints;
hybrid(pmu, pebs_constraints) = intel_grt_pebs_event_constraints;
hybrid(pmu, extra_regs) = intel_grt_extra_regs;
hybrid(pmu, event_constraints) = intel_lnc_event_constraints;
hybrid(pmu, pebs_constraints) = intel_lnc_pebs_event_constraints;
hybrid(pmu, extra_regs) = intel_lnc_extra_regs;
hybrid(pmu, event_constraints) = intel_pnc_event_constraints;
hybrid(pmu, pebs_constraints) = intel_pnc_pebs_event_constraints;
hybrid(pmu, extra_regs) = intel_pnc_extra_regs;
hybrid(pmu, event_constraints) = intel_skt_event_constraints;
hybrid(pmu, extra_regs) = intel_cmt_extra_regs;
hybrid(pmu, event_constraints) = intel_arw_event_constraints;
hybrid(pmu, pebs_constraints) = intel_arw_pebs_event_constraints;
hybrid(pmu, extra_regs) = intel_arw_extra_regs;
struct event_constraint *pebs_constraints = hybrid(event->pmu, pebs_constraints);
mask = hybrid(cpuc->pmu, pebs_events_mask) |
(hybrid(cpuc->pmu, fixed_cntr_mask64) << INTEL_PMC_IDX_FIXED);
mask = hybrid(cpuc->pmu, arch_pebs_cap).counters & cpuc->pebs_enabled;
return hweight64(hybrid(pmu, cntr_mask64));
return fls64(hybrid(pmu, cntr_mask64));
return hweight64(hybrid(pmu, fixed_cntr_mask64));
return fls64(hybrid(pmu, fixed_cntr_mask64));
return event->attr.config & hybrid(event->pmu, config_mask);
return !!(hybrid(pmu, config_mask) &
u64 intel_ctrl = hybrid(pmu, intel_ctrl);
return fls((u32)hybrid(pmu, pebs_events_mask));
unsigned int bio_based = 0, request_based = 0, hybrid = 0;
hybrid = 1;
if (hybrid && !bio_based && !request_based) {
TEST_CASE_REASON("x86 hybrid event parsing", hybrid, "not hybrid"),