smmu_pmu
#define to_smmu_pmu(p) (container_of(p, struct smmu_pmu, pmu))
struct smmu_pmu *smmu_pmu = to_smmu_pmu(pmu);
smmu_pmu->reg_base + SMMU_PMCG_IRQ_CTRL);
writel(SMMU_PMCG_CR_ENABLE, smmu_pmu->reg_base + SMMU_PMCG_CR);
static int smmu_pmu_apply_event_filter(struct smmu_pmu *smmu_pmu,
struct smmu_pmu *smmu_pmu = to_smmu_pmu(pmu);
for_each_set_bit(idx, smmu_pmu->used_counters, smmu_pmu->num_counters)
smmu_pmu_apply_event_filter(smmu_pmu, smmu_pmu->events[idx], idx);
struct smmu_pmu *smmu_pmu = to_smmu_pmu(pmu);
writel(0, smmu_pmu->reg_base + SMMU_PMCG_CR);
writel(0, smmu_pmu->reg_base + SMMU_PMCG_IRQ_CTRL);
struct smmu_pmu *smmu_pmu = to_smmu_pmu(pmu);
for_each_set_bit(idx, smmu_pmu->used_counters, smmu_pmu->num_counters)
writel(0xffff, smmu_pmu->reg_base + SMMU_PMCG_EVTYPER(idx));
static inline void smmu_pmu_counter_set_value(struct smmu_pmu *smmu_pmu,
if (smmu_pmu->counter_mask & BIT(32))
writeq(value, smmu_pmu->reloc_base + SMMU_PMCG_EVCNTR(idx, 8));
writel(value, smmu_pmu->reloc_base + SMMU_PMCG_EVCNTR(idx, 4));
static inline u64 smmu_pmu_counter_get_value(struct smmu_pmu *smmu_pmu, u32 idx)
if (smmu_pmu->counter_mask & BIT(32))
value = readq(smmu_pmu->reloc_base + SMMU_PMCG_EVCNTR(idx, 8));
value = readl(smmu_pmu->reloc_base + SMMU_PMCG_EVCNTR(idx, 4));
static inline void smmu_pmu_counter_enable(struct smmu_pmu *smmu_pmu, u32 idx)
writeq(BIT(idx), smmu_pmu->reg_base + SMMU_PMCG_CNTENSET0);
static inline void smmu_pmu_counter_disable(struct smmu_pmu *smmu_pmu, u32 idx)
writeq(BIT(idx), smmu_pmu->reg_base + SMMU_PMCG_CNTENCLR0);
static inline void smmu_pmu_interrupt_enable(struct smmu_pmu *smmu_pmu, u32 idx)
writeq(BIT(idx), smmu_pmu->reg_base + SMMU_PMCG_INTENSET0);
static inline void smmu_pmu_interrupt_disable(struct smmu_pmu *smmu_pmu,
writeq(BIT(idx), smmu_pmu->reg_base + SMMU_PMCG_INTENCLR0);
static inline void smmu_pmu_set_evtyper(struct smmu_pmu *smmu_pmu, u32 idx,
writel(val, smmu_pmu->reg_base + SMMU_PMCG_EVTYPER(idx));
static inline void smmu_pmu_set_smr(struct smmu_pmu *smmu_pmu, u32 idx, u32 val)
writel(val, smmu_pmu->reg_base + SMMU_PMCG_SMR(idx));
struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
now = smmu_pmu_counter_get_value(smmu_pmu, idx);
delta &= smmu_pmu->counter_mask;
static void smmu_pmu_set_period(struct smmu_pmu *smmu_pmu,
if (smmu_pmu->options & SMMU_PMCG_EVCNTR_RDONLY) {
new = smmu_pmu_counter_get_value(smmu_pmu, idx);
new = smmu_pmu->counter_mask >> 1;
smmu_pmu_counter_set_value(smmu_pmu, idx, new);
struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
smmu_pmu_set_evtyper(smmu_pmu, idx, evtyper);
smmu_pmu_set_smr(smmu_pmu, idx, sid);
static int smmu_pmu_apply_event_filter(struct smmu_pmu *smmu_pmu,
unsigned int cur_idx, num_ctrs = smmu_pmu->num_counters;
cur_idx = find_first_bit(smmu_pmu->used_counters, num_ctrs);
if (!smmu_pmu->global_filter || cur_idx == num_ctrs) {
if (smmu_pmu_check_global_filter(smmu_pmu->events[cur_idx], event)) {
smmu_pmu_set_evtyper(smmu_pmu, idx, get_event(event));
static int smmu_pmu_get_event_idx(struct smmu_pmu *smmu_pmu,
unsigned int num_ctrs = smmu_pmu->num_counters;
idx = find_first_zero_bit(smmu_pmu->used_counters, num_ctrs);
err = smmu_pmu_apply_event_filter(smmu_pmu, event, idx);
set_bit(idx, smmu_pmu->used_counters);
struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
struct device *dev = smmu_pmu->dev;
(!test_bit(event_id, smmu_pmu->supported_events))) {
if (++group_num_events > smmu_pmu->num_counters)
event->cpu = smmu_pmu->on_cpu;
if (++group_num_events > smmu_pmu->num_counters)
struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
smmu_pmu_set_period(smmu_pmu, hwc);
smmu_pmu_counter_enable(smmu_pmu, idx);
struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
smmu_pmu_counter_disable(smmu_pmu, idx);
struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
idx = smmu_pmu_get_event_idx(smmu_pmu, event);
smmu_pmu->events[idx] = event;
smmu_pmu_interrupt_enable(smmu_pmu, idx);
struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
smmu_pmu_interrupt_disable(smmu_pmu, idx);
smmu_pmu->events[idx] = NULL;
clear_bit(idx, smmu_pmu->used_counters);
struct smmu_pmu *smmu_pmu = to_smmu_pmu(dev_get_drvdata(dev));
return cpumap_print_to_pagebuf(true, buf, cpumask_of(smmu_pmu->on_cpu));
struct smmu_pmu *smmu_pmu = to_smmu_pmu(dev_get_drvdata(dev));
if (test_bit(pmu_attr->id, smmu_pmu->supported_events))
struct smmu_pmu *smmu_pmu = to_smmu_pmu(dev_get_drvdata(dev));
return sysfs_emit(page, "0x%08x\n", smmu_pmu->iidr);
struct smmu_pmu *smmu_pmu = to_smmu_pmu(dev_get_drvdata(dev));
if (!smmu_pmu->iidr)
struct smmu_pmu *smmu_pmu;
smmu_pmu = hlist_entry_safe(node, struct smmu_pmu, node);
if (cpu != smmu_pmu->on_cpu)
perf_pmu_migrate_context(&smmu_pmu->pmu, cpu, target);
smmu_pmu->on_cpu = target;
WARN_ON(irq_set_affinity(smmu_pmu->irq, cpumask_of(target)));
struct smmu_pmu *smmu_pmu = data;
ovsr = readq(smmu_pmu->reloc_base + SMMU_PMCG_OVSSET0);
writeq(ovsr, smmu_pmu->reloc_base + SMMU_PMCG_OVSCLR0);
for_each_set_bit(idx, ovs, smmu_pmu->num_counters) {
struct perf_event *event = smmu_pmu->events[idx];
smmu_pmu_set_period(smmu_pmu, hwc);
struct smmu_pmu *pmu = dev_get_drvdata(dev);
static void smmu_pmu_setup_msi(struct smmu_pmu *pmu)
static int smmu_pmu_setup_irq(struct smmu_pmu *pmu)
static void smmu_pmu_reset(struct smmu_pmu *smmu_pmu)
u64 counter_present_mask = GENMASK_ULL(smmu_pmu->num_counters - 1, 0);
smmu_pmu_disable(&smmu_pmu->pmu);
smmu_pmu->reg_base + SMMU_PMCG_CNTENCLR0);
smmu_pmu->reg_base + SMMU_PMCG_INTENCLR0);
smmu_pmu->reloc_base + SMMU_PMCG_OVSCLR0);
static void smmu_pmu_get_acpi_options(struct smmu_pmu *smmu_pmu)
model = *(u32 *)dev_get_platdata(smmu_pmu->dev);
smmu_pmu->options |= SMMU_PMCG_EVCNTR_RDONLY | SMMU_PMCG_HARDEN_DISABLE;
smmu_pmu->options |= SMMU_PMCG_HARDEN_DISABLE;
dev_notice(smmu_pmu->dev, "option mask 0x%x\n", smmu_pmu->options);
static bool smmu_pmu_coresight_id_regs(struct smmu_pmu *smmu_pmu)
return of_device_is_compatible(smmu_pmu->dev->of_node,
static void smmu_pmu_get_iidr(struct smmu_pmu *smmu_pmu)
u32 iidr = readl_relaxed(smmu_pmu->reg_base + SMMU_PMCG_IIDR);
if (!iidr && smmu_pmu_coresight_id_regs(smmu_pmu)) {
u32 pidr0 = readl(smmu_pmu->reg_base + SMMU_PMCG_PIDR0);
u32 pidr1 = readl(smmu_pmu->reg_base + SMMU_PMCG_PIDR1);
u32 pidr2 = readl(smmu_pmu->reg_base + SMMU_PMCG_PIDR2);
u32 pidr3 = readl(smmu_pmu->reg_base + SMMU_PMCG_PIDR3);
u32 pidr4 = readl(smmu_pmu->reg_base + SMMU_PMCG_PIDR4);
smmu_pmu->iidr = iidr;
struct smmu_pmu *smmu_pmu;
smmu_pmu = devm_kzalloc(dev, sizeof(*smmu_pmu), GFP_KERNEL);
if (!smmu_pmu)
smmu_pmu->dev = dev;
platform_set_drvdata(pdev, smmu_pmu);
smmu_pmu->pmu = (struct pmu) {
smmu_pmu->reg_base = devm_platform_get_and_ioremap_resource(pdev, 0, &res_0);
if (IS_ERR(smmu_pmu->reg_base))
return PTR_ERR(smmu_pmu->reg_base);
cfgr = readl_relaxed(smmu_pmu->reg_base + SMMU_PMCG_CFGR);
smmu_pmu->reloc_base = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(smmu_pmu->reloc_base))
return PTR_ERR(smmu_pmu->reloc_base);
smmu_pmu->reloc_base = smmu_pmu->reg_base;
smmu_pmu->irq = irq;
ceid_64[0] = readq_relaxed(smmu_pmu->reg_base + SMMU_PMCG_CEID0);
ceid_64[1] = readq_relaxed(smmu_pmu->reg_base + SMMU_PMCG_CEID1);
bitmap_from_arr32(smmu_pmu->supported_events, (u32 *)ceid_64,
smmu_pmu->num_counters = FIELD_GET(SMMU_PMCG_CFGR_NCTR, cfgr) + 1;
smmu_pmu->global_filter = !!(cfgr & SMMU_PMCG_CFGR_SID_FILTER_TYPE);
smmu_pmu->counter_mask = GENMASK_ULL(reg_size, 0);
smmu_pmu_reset(smmu_pmu);
err = smmu_pmu_setup_irq(smmu_pmu);
smmu_pmu_get_iidr(smmu_pmu);
smmu_pmu_get_acpi_options(smmu_pmu);
if (smmu_pmu->options & SMMU_PMCG_HARDEN_DISABLE) {
smmu_pmu->pmu.pmu_enable = smmu_pmu_enable_quirk_hip08_09;
smmu_pmu->pmu.pmu_disable = smmu_pmu_disable_quirk_hip08_09;
smmu_pmu->on_cpu = raw_smp_processor_id();
WARN_ON(irq_set_affinity(smmu_pmu->irq, cpumask_of(smmu_pmu->on_cpu)));
&smmu_pmu->node);
err = perf_pmu_register(&smmu_pmu->pmu, name, -1);
&res_0->start, smmu_pmu->num_counters,
smmu_pmu->global_filter ? "Global(Counter0)" :
cpuhp_state_remove_instance_nocalls(cpuhp_state_num, &smmu_pmu->node);
struct smmu_pmu *smmu_pmu = platform_get_drvdata(pdev);
perf_pmu_unregister(&smmu_pmu->pmu);
cpuhp_state_remove_instance_nocalls(cpuhp_state_num, &smmu_pmu->node);
struct smmu_pmu *smmu_pmu = platform_get_drvdata(pdev);
smmu_pmu_disable(&smmu_pmu->pmu);