dsu_pmu
static inline struct dsu_pmu *to_dsu_pmu(struct pmu *pmu)
return container_of(pmu, struct dsu_pmu, pmu);
struct dsu_pmu *dsu_pmu = to_dsu_pmu(pmu);
cpumask = &dsu_pmu->active_cpu;
cpumask = &dsu_pmu->associated_cpus;
struct dsu_pmu *dsu_pmu = to_dsu_pmu(pmu);
return test_bit(evt, dsu_pmu->cpmceid_bitmap) ? attr->mode : 0;
static inline bool dsu_pmu_counter_valid(struct dsu_pmu *dsu_pmu, u32 idx)
return (idx < dsu_pmu->num_counters) ||
struct dsu_pmu *dsu_pmu = to_dsu_pmu(event->pmu);
&dsu_pmu->associated_cpus)))
if (!dsu_pmu_counter_valid(dsu_pmu, idx)) {
raw_spin_lock_irqsave(&dsu_pmu->pmu_lock, flags);
raw_spin_unlock_irqrestore(&dsu_pmu->pmu_lock, flags);
struct dsu_pmu *dsu_pmu = to_dsu_pmu(event->pmu);
&dsu_pmu->associated_cpus)))
if (!dsu_pmu_counter_valid(dsu_pmu, idx)) {
raw_spin_lock_irqsave(&dsu_pmu->pmu_lock, flags);
raw_spin_unlock_irqrestore(&dsu_pmu->pmu_lock, flags);
struct dsu_pmu *dsu_pmu = to_dsu_pmu(event->pmu);
if (evtype == DSU_PMU_EVT_CYCLES && dsu_pmu->has_pmccntr) {
idx = find_first_zero_bit(used_mask, dsu_pmu->num_counters);
if (idx >= dsu_pmu->num_counters)
static void dsu_pmu_enable_counter(struct dsu_pmu *dsu_pmu, int idx)
static void dsu_pmu_disable_counter(struct dsu_pmu *dsu_pmu, int idx)
static inline void dsu_pmu_set_event(struct dsu_pmu *dsu_pmu,
if (!dsu_pmu_counter_valid(dsu_pmu, idx)) {
raw_spin_lock_irqsave(&dsu_pmu->pmu_lock, flags);
raw_spin_unlock_irqrestore(&dsu_pmu->pmu_lock, flags);
struct dsu_pmu *dsu_pmu = dev;
struct dsu_hw_events *hw_events = &dsu_pmu->hw_events;
struct dsu_pmu *dsu_pmu = to_dsu_pmu(event->pmu);
dsu_pmu_set_event(dsu_pmu, event);
dsu_pmu_enable_counter(dsu_pmu, event->hw.idx);
struct dsu_pmu *dsu_pmu = to_dsu_pmu(event->pmu);
dsu_pmu_disable_counter(dsu_pmu, event->hw.idx);
struct dsu_pmu *dsu_pmu = to_dsu_pmu(event->pmu);
struct dsu_hw_events *hw_events = &dsu_pmu->hw_events;
&dsu_pmu->associated_cpus)))
struct dsu_pmu *dsu_pmu = to_dsu_pmu(event->pmu);
struct dsu_hw_events *hw_events = &dsu_pmu->hw_events;
struct dsu_pmu *dsu_pmu = to_dsu_pmu(pmu);
if (bitmap_empty(dsu_pmu->hw_events.used_mask, DSU_PMU_MAX_HW_CNTRS))
raw_spin_lock_irqsave(&dsu_pmu->pmu_lock, flags);
raw_spin_unlock_irqrestore(&dsu_pmu->pmu_lock, flags);
struct dsu_pmu *dsu_pmu = to_dsu_pmu(pmu);
raw_spin_lock_irqsave(&dsu_pmu->pmu_lock, flags);
raw_spin_unlock_irqrestore(&dsu_pmu->pmu_lock, flags);
struct dsu_pmu *dsu_pmu = to_dsu_pmu(event->pmu);
dev_dbg(dsu_pmu->pmu.dev, "Can't support sampling events\n");
dev_dbg(dsu_pmu->pmu.dev, "Can't support per-task counters\n");
dev_dbg(dsu_pmu->pmu.dev, "Can't support filtering\n");
if (!cpumask_test_cpu(event->cpu, &dsu_pmu->associated_cpus)) {
dev_dbg(dsu_pmu->pmu.dev,
event->cpu = cpumask_first(&dsu_pmu->active_cpu);
event->hw.flags = dsu_pmu->has_32b_pmevcntr;
static struct dsu_pmu *dsu_pmu_alloc(struct platform_device *pdev)
struct dsu_pmu *dsu_pmu;
dsu_pmu = devm_kzalloc(&pdev->dev, sizeof(*dsu_pmu), GFP_KERNEL);
if (!dsu_pmu)
raw_spin_lock_init(&dsu_pmu->pmu_lock);
dsu_pmu->num_counters = -1;
return dsu_pmu;
static void dsu_pmu_probe_pmu(struct dsu_pmu *dsu_pmu)
dsu_pmu->num_counters = num_counters;
if (!dsu_pmu->num_counters)
bitmap_from_arr32(dsu_pmu->cpmceid_bitmap, cpmceid,
dsu_pmu->has_32b_pmevcntr = true;
dsu_pmu->has_pmccntr = true;
static void dsu_pmu_set_active_cpu(int cpu, struct dsu_pmu *dsu_pmu)
cpumask_set_cpu(cpu, &dsu_pmu->active_cpu);
if (irq_set_affinity(dsu_pmu->irq, &dsu_pmu->active_cpu))
static void dsu_pmu_init_pmu(struct dsu_pmu *dsu_pmu)
if (dsu_pmu->num_counters == -1)
dsu_pmu_probe_pmu(dsu_pmu);
struct dsu_pmu *dsu_pmu;
dsu_pmu = dsu_pmu_alloc(pdev);
if (IS_ERR(dsu_pmu))
return PTR_ERR(dsu_pmu);
rc = dsu_pmu_dt_get_cpus(&pdev->dev, &dsu_pmu->associated_cpus);
rc = dsu_pmu_acpi_get_cpus(&pdev->dev, &dsu_pmu->associated_cpus);
IRQF_NOBALANCING, name, dsu_pmu);
dsu_pmu->irq = irq;
platform_set_drvdata(pdev, dsu_pmu);
&dsu_pmu->cpuhp_node);
dsu_pmu->pmu = (struct pmu) {
rc = perf_pmu_register(&dsu_pmu->pmu, name, -1);
&dsu_pmu->cpuhp_node);
struct dsu_pmu *dsu_pmu = platform_get_drvdata(pdev);
perf_pmu_unregister(&dsu_pmu->pmu);
cpuhp_state_remove_instance(dsu_pmu_cpuhp_state, &dsu_pmu->cpuhp_node);
struct dsu_pmu *dsu_pmu = hlist_entry_safe(node, struct dsu_pmu,
if (!cpumask_test_cpu(cpu, &dsu_pmu->associated_cpus))
if (!cpumask_empty(&dsu_pmu->active_cpu))
dsu_pmu_init_pmu(dsu_pmu);
dsu_pmu_set_active_cpu(cpu, dsu_pmu);
struct dsu_pmu *dsu_pmu;
dsu_pmu = hlist_entry_safe(node, struct dsu_pmu, cpuhp_node);
if (!cpumask_test_and_clear_cpu(cpu, &dsu_pmu->active_cpu))
dst = cpumask_any_and_but(&dsu_pmu->associated_cpus,
perf_pmu_migrate_context(&dsu_pmu->pmu, cpu, dst);
dsu_pmu_set_active_cpu(dst, dsu_pmu);