arc_pmu
static struct arc_pmu *arc_pmu;
hwc->sample_period = arc_pmu->max_period;
if (arc_pmu->ev_hw_idx[event->attr.config] < 0)
hwc->config |= arc_pmu->ev_hw_idx[event->attr.config];
hwc->config |= arc_pmu->ev_hw_idx[ret];
if (event->attr.config >= arc_pmu->n_events)
arc_pmu->raw_entry[event->attr.config].name);
if (left > arc_pmu->max_period)
left = arc_pmu->max_period;
value = arc_pmu->max_period - left;
if (idx == arc_pmu->n_counters)
lower_32_bits(arc_pmu->max_period));
upper_32_bits(arc_pmu->max_period));
arc_pmu_disable(&arc_pmu->pmu);
arc_pmu_enable(&arc_pmu->pmu);
memmove(arc_pmu->raw_entry[j].name, str, ARCPMU_EVENT_NAME_LEN - 1);
arc_pmu->attr[j].attr.attr.name = arc_pmu->raw_entry[j].name;
arc_pmu->attr[j].attr.attr.mode = VERIFY_OCTAL_PERMISSIONS(0444);
arc_pmu->attr[j].attr.show = arc_pmu_events_sysfs_show;
arc_pmu->attr[j].id = j;
arc_pmu->attrs[j] = &(arc_pmu->attr[j].attr.attr);
arc_pmu->attr = devm_kmalloc_array(dev, arc_pmu->n_events + 1,
sizeof(*arc_pmu->attr), GFP_KERNEL | __GFP_ZERO);
if (!arc_pmu->attr)
arc_pmu->attrs = devm_kmalloc_array(dev, arc_pmu->n_events + 1,
sizeof(*arc_pmu->attrs), GFP_KERNEL | __GFP_ZERO);
if (!arc_pmu->attrs)
arc_pmu->raw_entry = devm_kmalloc_array(dev, arc_pmu->n_events,
sizeof(*arc_pmu->raw_entry), GFP_KERNEL | __GFP_ZERO);
if (!arc_pmu->raw_entry)
arc_pmu->ev_hw_idx[i] = j;
arc_pmu = devm_kzalloc(&pdev->dev, sizeof(struct arc_pmu), GFP_KERNEL);
if (!arc_pmu)
arc_pmu->n_events = cc_bcr.c;
arc_pmu->n_counters = pct_bcr.c;
arc_pmu->max_period = (1ULL << counter_size) / 2 - 1ULL;
arc_pmu->n_counters, counter_size, cc_bcr.c,
arc_pmu->ev_hw_idx[i] = -1;
arc_pmu_events_attr_gr.attrs = arc_pmu->attrs;
arc_pmu->attr_groups[ARCPMU_ATTR_GR_EVENTS] = &arc_pmu_events_attr_gr;
arc_pmu->attr_groups[ARCPMU_ATTR_GR_FORMATS] = &arc_pmu_format_attr_gr;
arc_pmu->pmu = (struct pmu) {
.attr_groups = arc_pmu->attr_groups,
arc_pmu->irq = irq;
arc_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
return perf_pmu_register(&arc_pmu->pmu, "arc_pct", PERF_TYPE_RAW);