iommu_pmu
static const struct pmu iommu_pmu __initconst = {
perf_iommu->pmu = iommu_pmu;
struct iommu_pmu *pmu;
if ((iommu_pmu->filter & _filter) && iommu_pmu_en_##_name(_econfig)) { \
dmar_writel(iommu_pmu->cfg_reg + _idx * IOMMU_PMU_CFG_OFFSET + \
if (iommu_pmu->filter & _filter) { \
dmar_writel(iommu_pmu->cfg_reg + _idx * IOMMU_PMU_CFG_OFFSET + \
struct iommu_pmu *iommu_pmu = dev_to_iommu_pmu(dev); \
if (!iommu_pmu) \
return (iommu_pmu->evcap[_g_idx] & _event) ? attr->mode : 0; \
iommu_event_base(struct iommu_pmu *iommu_pmu, int idx)
return iommu_pmu->cntr_reg + idx * iommu_pmu->cntr_stride;
iommu_config_base(struct iommu_pmu *iommu_pmu, int idx)
return iommu_pmu->cfg_reg + idx * IOMMU_PMU_CFG_OFFSET;
static inline struct iommu_pmu *iommu_event_to_pmu(struct perf_event *event)
return container_of(event->pmu, struct iommu_pmu, pmu);
static inline bool is_iommu_pmu_event(struct iommu_pmu *iommu_pmu,
return event->pmu == &iommu_pmu->pmu;
struct iommu_pmu *iommu_pmu = iommu_event_to_pmu(event);
if (event_group >= iommu_pmu->num_eg)
struct iommu_pmu *iommu_pmu = iommu_event_to_pmu(event);
if (!is_iommu_pmu_event(iommu_pmu, sibling) ||
if (++nr > iommu_pmu->num_cntr)
struct iommu_pmu *iommu_pmu = iommu_event_to_pmu(event);
int shift = 64 - iommu_pmu->cntr_width;
new_count = dmar_readq(iommu_event_base(iommu_pmu, hwc->idx));
struct iommu_pmu *iommu_pmu = iommu_event_to_pmu(event);
struct intel_iommu *iommu = iommu_pmu->iommu;
count = dmar_readq(iommu_event_base(iommu_pmu, hwc->idx));
struct iommu_pmu *iommu_pmu = iommu_event_to_pmu(event);
struct intel_iommu *iommu = iommu_pmu->iommu;
iommu_pmu_validate_per_cntr_event(struct iommu_pmu *iommu_pmu,
if (!(iommu_pmu->cntr_evcap[idx][event_group] & select))
static int iommu_pmu_assign_event(struct iommu_pmu *iommu_pmu,
for (idx = iommu_pmu->num_cntr - 1; idx >= 0; idx--) {
if (test_and_set_bit(idx, iommu_pmu->used_mask))
if (!iommu_pmu_validate_per_cntr_event(iommu_pmu, idx, event))
clear_bit(idx, iommu_pmu->used_mask);
iommu_pmu->event_list[idx] = event;
dmar_writeq(iommu_config_base(iommu_pmu, idx), hwc->config);
static inline struct iommu_pmu *dev_to_iommu_pmu(struct device *dev)
struct iommu_pmu *iommu_pmu = iommu_event_to_pmu(event);
ret = iommu_pmu_assign_event(iommu_pmu, event);
struct iommu_pmu *iommu_pmu = iommu_event_to_pmu(event);
iommu_pmu->event_list[idx] = NULL;
clear_bit(idx, iommu_pmu->used_mask);
struct iommu_pmu *iommu_pmu = container_of(pmu, struct iommu_pmu, pmu);
struct intel_iommu *iommu = iommu_pmu->iommu;
struct iommu_pmu *iommu_pmu = container_of(pmu, struct iommu_pmu, pmu);
struct intel_iommu *iommu = iommu_pmu->iommu;
static void iommu_pmu_counter_overflow(struct iommu_pmu *iommu_pmu)
return container_of(dev_get_drvdata(dev), struct iommu_pmu, pmu);
while ((status = dmar_readq(iommu_pmu->overflow))) {
for_each_set_bit(i, (unsigned long *)&status, iommu_pmu->num_cntr) {
event = iommu_pmu->event_list[i];
dmar_writeq(iommu_pmu->overflow, status);
struct iommu_pmu *iommu_pmu = iommu->pmu;
iommu_pmu->pmu.name = iommu->name;
iommu_pmu->pmu.task_ctx_nr = perf_invalid_context;
iommu_pmu->pmu.event_init = iommu_pmu_event_init;
iommu_pmu->pmu.pmu_enable = iommu_pmu_enable;
iommu_pmu->pmu.pmu_disable = iommu_pmu_disable;
iommu_pmu->pmu.add = iommu_pmu_add;
iommu_pmu->pmu.del = iommu_pmu_del;
iommu_pmu->pmu.start = iommu_pmu_start;
iommu_pmu->pmu.stop = iommu_pmu_stop;
iommu_pmu->pmu.read = iommu_pmu_event_update;
iommu_pmu->pmu.attr_groups = iommu_pmu_attr_groups;
iommu_pmu->pmu.attr_update = iommu_pmu_attr_update;
iommu_pmu->pmu.capabilities = PERF_PMU_CAP_NO_EXCLUDE;
iommu_pmu->pmu.scope = PERF_PMU_SCOPE_SYS_WIDE;
iommu_pmu->pmu.module = THIS_MODULE;
return perf_pmu_register(&iommu_pmu->pmu, iommu_pmu->pmu.name, -1);
struct iommu_pmu *iommu_pmu;
iommu_pmu = kzalloc_obj(*iommu_pmu);
if (!iommu_pmu)
iommu_pmu->num_cntr = pcap_num_cntr(perfcap);
if (iommu_pmu->num_cntr > IOMMU_PMU_IDX_MAX) {
iommu_pmu->num_cntr, IOMMU_PMU_IDX_MAX);
iommu_pmu->num_cntr = IOMMU_PMU_IDX_MAX;
iommu_pmu->cntr_width = pcap_cntr_width(perfcap);
iommu_pmu->filter = pcap_filters_mask(perfcap);
iommu_pmu->cntr_stride = pcap_cntr_stride(perfcap);
iommu_pmu->num_eg = pcap_num_event_group(perfcap);
iommu_pmu->evcap = kcalloc(iommu_pmu->num_eg, sizeof(u64), GFP_KERNEL);
if (!iommu_pmu->evcap) {
for (i = 0; i < iommu_pmu->num_eg; i++) {
iommu_pmu->evcap[i] = pecap_es(pcap);
iommu_pmu->cntr_evcap = kcalloc(iommu_pmu->num_cntr, sizeof(u32 *), GFP_KERNEL);
if (!iommu_pmu->cntr_evcap) {
for (i = 0; i < iommu_pmu->num_cntr; i++) {
iommu_pmu->cntr_evcap[i] = kcalloc(iommu_pmu->num_eg, sizeof(u32), GFP_KERNEL);
if (!iommu_pmu->cntr_evcap[i]) {
struct iommu_pmu *iommu_pmu = dev_to_iommu_pmu(dev); \
for (j = 0; j < iommu_pmu->num_eg; j++)
iommu_pmu->cntr_evcap[i][j] = (u32)iommu_pmu->evcap[j];
iommu_pmu->cfg_reg = get_perf_reg_address(iommu, DMAR_PERFCFGOFF_REG);
iommu_pmu->cntr_reg = get_perf_reg_address(iommu, DMAR_PERFCNTROFF_REG);
iommu_pmu->overflow = get_perf_reg_address(iommu, DMAR_PERFOVFOFF_REG);
for (i = 0; i < iommu_pmu->num_cntr; i++) {
cap = dmar_readl(iommu_pmu->cfg_reg +
if (!iommu_pmu) \
if ((iommu_cntrcap_cw(cap) != iommu_pmu->cntr_width) ||
iommu_pmu->num_cntr = i;
iommu_pmu->num_cntr);
for (j = 0; j < iommu_pmu->num_eg; j++)
iommu_pmu->cntr_evcap[i][j] = 0;
cap = dmar_readl(iommu_pmu->cfg_reg + i * IOMMU_PMU_CFG_OFFSET +
return (iommu_pmu->filter & _filter) ? attr->mode : 0; \
iommu_pmu->cntr_evcap[i][iommu_event_group(cap)] = iommu_event_select(cap);
iommu_pmu->evcap[iommu_event_group(cap)] |= iommu_event_select(cap);
iommu_pmu->iommu = iommu;
iommu->pmu = iommu_pmu;
for (i = 0; i < iommu_pmu->num_cntr; i++)
kfree(iommu_pmu->cntr_evcap[i]);
kfree(iommu_pmu->cntr_evcap);
kfree(iommu_pmu->evcap);
kfree(iommu_pmu);
struct iommu_pmu *iommu_pmu = iommu->pmu;
if (!iommu_pmu)
if (iommu_pmu->evcap) {
for (i = 0; i < iommu_pmu->num_cntr; i++)
kfree(iommu_pmu->cntr_evcap[i]);
kfree(iommu_pmu->cntr_evcap);
kfree(iommu_pmu->evcap);
kfree(iommu_pmu);
struct iommu_pmu *iommu_pmu = iommu->pmu;
snprintf(iommu_pmu->irq_name, sizeof(iommu_pmu->irq_name), "dmar%d-perf", iommu->seq_id);
IRQF_ONESHOT, iommu_pmu->irq_name, iommu);
struct iommu_pmu *iommu_pmu = iommu->pmu;
if (!iommu_pmu)
perf_pmu_unregister(&iommu_pmu->pmu);
struct iommu_pmu *iommu_pmu = iommu->pmu;
if (!iommu_pmu)
perf_pmu_unregister(&iommu_pmu->pmu);