idxd_pmu
struct idxd_pmu *idxd_pmu;
static int perfmon_assign_event(struct idxd_pmu *idxd_pmu,
if (!test_and_set_bit(i, idxd_pmu->used_mask))
static int perfmon_validate_group(struct idxd_pmu *pmu,
struct idxd_pmu *fake_pmu;
if (event->pmu != &idxd->idxd_pmu->pmu)
ret = perfmon_validate_group(idxd->idxd_pmu, event);
int shift = 64 - idxd->idxd_pmu->counter_width;
n_counters = min(idxd->idxd_pmu->n_counters, OVERFLOW_SIZE);
event = idxd->idxd_pmu->event_list[i];
if (flt_wq && test_bit(FLT_WQ, &idxd->idxd_pmu->supported_filters))
if (flt_tc && test_bit(FLT_TC, &idxd->idxd_pmu->supported_filters))
if (flt_pg_sz && test_bit(FLT_PG_SZ, &idxd->idxd_pmu->supported_filters))
if (flt_xfer_sz && test_bit(FLT_XFER_SZ, &idxd->idxd_pmu->supported_filters))
if (flt_eng && test_bit(FLT_ENG, &idxd->idxd_pmu->supported_filters))
for (i = 0; i < idxd->idxd_pmu->n_events; i++) {
if (event != idxd->idxd_pmu->event_list[i])
for (++i; i < idxd->idxd_pmu->n_events; i++)
idxd->idxd_pmu->event_list[i - 1] = idxd->idxd_pmu->event_list[i];
--idxd->idxd_pmu->n_events;
clear_bit(cntr, idxd->idxd_pmu->used_mask);
struct idxd_pmu *idxd_pmu = idxd->idxd_pmu;
n = perfmon_collect_events(idxd_pmu, event, false);
idx = perfmon_assign_event(idxd_pmu, event);
perfmon_assign_hw_event(idxd_pmu, event, idx);
idxd_pmu->n_events = n;
static void idxd_pmu_init(struct idxd_pmu *idxd_pmu)
if (!test_bit(i, &idxd_pmu->supported_filters))
idxd_pmu->pmu.name = idxd_pmu->name;
idxd_pmu->pmu.attr_groups = perfmon_attr_groups;
idxd_pmu->pmu.task_ctx_nr = perf_invalid_context;
idxd_pmu->pmu.event_init = perfmon_pmu_event_init;
idxd_pmu->pmu.pmu_enable = perfmon_pmu_enable;
idxd_pmu->pmu.pmu_disable = perfmon_pmu_disable;
idxd_pmu->pmu.add = perfmon_pmu_event_add;
idxd_pmu->pmu.del = perfmon_pmu_event_del;
idxd_pmu->pmu.start = perfmon_pmu_event_start;
idxd_pmu->pmu.stop = perfmon_pmu_event_stop;
idxd_pmu->pmu.read = perfmon_pmu_event_update;
idxd_pmu->pmu.capabilities = PERF_PMU_CAP_NO_EXCLUDE;
idxd_pmu->pmu.scope = PERF_PMU_SCOPE_SYS_WIDE;
idxd_pmu->pmu.module = THIS_MODULE;
if (!idxd->idxd_pmu)
perf_pmu_unregister(&idxd->idxd_pmu->pmu);
kfree(idxd->idxd_pmu);
idxd->idxd_pmu = NULL;
struct idxd_pmu *idxd_pmu;
idxd_pmu = kzalloc_obj(*idxd_pmu);
if (!idxd_pmu)
idxd_pmu->idxd = idxd;
idxd->idxd_pmu = idxd_pmu;
rc = sprintf(idxd_pmu->name, "dsa%d", idxd->id);
rc = sprintf(idxd_pmu->name, "iax%d", idxd->id);
static bool is_idxd_event(struct idxd_pmu *idxd_pmu, struct perf_event *event)
return &idxd_pmu->pmu == event->pmu;
idxd_pmu->n_event_categories = perfcap.num_event_category;
idxd_pmu->supported_event_categories = perfcap.global_event_category;
idxd_pmu->per_counter_caps_supported = perfcap.cap_per_counter;
idxd_pmu->supported_filters = perfcap.filter;
idxd_pmu->n_filters = hweight8(perfcap.filter);
idxd_pmu->n_counters = perfcap.num_perf_counter;
idxd_pmu->counter_width = perfcap.counter_width;
idxd_pmu_init(idxd_pmu);
static int perfmon_collect_events(struct idxd_pmu *idxd_pmu,
rc = perf_pmu_register(&idxd_pmu->pmu, idxd_pmu->name, -1);
kfree(idxd_pmu);
idxd->idxd_pmu = NULL;
max_count = idxd_pmu->n_counters;
n = idxd_pmu->n_events;
if (is_idxd_event(idxd_pmu, leader)) {
idxd_pmu->event_list[n] = leader;
idxd_pmu->event_list[n]->hw.idx = n;
if (!is_idxd_event(idxd_pmu, event) ||
idxd_pmu->event_list[n] = event;
idxd_pmu->event_list[n]->hw.idx = n;
static void perfmon_assign_hw_event(struct idxd_pmu *idxd_pmu,
struct idxd_device *idxd = idxd_pmu->idxd;
static inline struct idxd_pmu *event_to_pmu(struct perf_event *event)
struct idxd_pmu *idxd_pmu;
idxd_pmu = container_of(pmu, struct idxd_pmu, pmu);
return idxd_pmu;
struct idxd_pmu *idxd_pmu;
idxd_pmu = container_of(pmu, struct idxd_pmu, pmu);
return idxd_pmu->idxd;
struct idxd_pmu *idxd_pmu;
idxd_pmu = container_of(pmu, struct idxd_pmu, pmu);
return idxd_pmu->idxd;