event_map
.event_map = ev67_perfmon_event_map,
ev = alpha_pmu->event_map[attr->config];
const int *event_map;
int (*event_map)(int);
.event_map = sh7750_event_map,
.event_map = sh4a_event_map,
config = sh_pmu->event_map(attr->config);
pmap = sparc_pmu->event_map(attr->config);
const struct perf_event_map *(*event_map)(int);
.event_map = ultra3_event_map,
.event_map = niagara1_event_map,
.event_map = niagara2_event_map,
.event_map = niagara4_event_map,
.event_map = niagara4_event_map,
.event_map = amd_pmu_event_map,
config = x86_pmu.event_map(pmu_attr->id);
return pmu_attr->event_str || x86_pmu.event_map(idx) ? attr->mode : 0;
return x86_pmu.event_map(array_index_nospec(hw_event, max));
config = x86_pmu.event_map(attr->config);
.event_map = intel_pmu_event_map,
.event_map = intel_pmu_event_map,
.event_map = knc_pmu_event_map,
.event_map = p4_pmu_event_map,
.event_map = p6_pmu_event_map,
bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
u64 (*event_map)(int);
.event_map = zhaoxin_pmu_event_map,
struct event_lpi_map event_map;
prev_cpu = its_dev->event_map.col_map[id];
its_dev->event_map.col_map[id] = cpu;
if (!its_dev->event_map.vm) {
maps = kzalloc_objs(*maps, its_dev->event_map.nr_lpis,
its_dev->event_map.vm = info->map->vm;
its_dev->event_map.vlpi_maps = maps;
} else if (its_dev->event_map.vm != info->map->vm) {
its_dev->event_map.vlpi_maps[event] = *info->map;
its_dev->event_map.nr_vlpis++;
if (!its_dev->event_map.vm || !map)
if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d))
its_unmap_vm(its_dev->its, its_dev->event_map.vm);
if (!--its_dev->event_map.nr_vlpis) {
its_dev->event_map.vm = NULL;
kfree(its_dev->event_map.vlpi_maps);
if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d))
guard(raw_spinlock)(&its_dev->event_map.vlpi_lock);
return d->hwirq - its_dev->event_map.lpi_base;
return its->collections + its_dev->event_map.col_map[event];
if (WARN_ON_ONCE(event >= its_dev->event_map.nr_lpis))
return &its_dev->event_map.vlpi_maps[event];
dev->event_map.lpi_map = lpi_map;
dev->event_map.col_map = col_map;
dev->event_map.lpi_base = lpi_base;
dev->event_map.nr_lpis = nr_lpis;
raw_spin_lock_init(&dev->event_map.vlpi_lock);
kfree(its_dev->event_map.col_map);
idx = bitmap_find_free_region(dev->event_map.lpi_map,
dev->event_map.nr_lpis,
*hwirq = dev->event_map.lpi_base + idx;
if (WARN_ON_ONCE(!bitmap_empty(its_dev->event_map.lpi_map,
its_dev->event_map.nr_lpis)))
its_lpi_free(its_dev->event_map.lpi_map,
its_dev->event_map.lpi_base,
its_dev->event_map.nr_lpis);
(int)(hwirq + i - its_dev->event_map.lpi_base),
its_dev->event_map.col_map[event] = cpu;
its_dec_lpi_count(d, its_dev->event_map.col_map[event]);
bitmap_release_region(its_dev->event_map.lpi_map,
vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = vpe->col_idx;
vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = to;
cpu = its_dev->event_map.col_map[its_get_event_id(d)];
bitmap_release_region(its_dev->event_map, event_id_base,
unsigned long *event_map;
its_dev->event_map = (unsigned long *)bitmap_zalloc(its_dev->num_events, GFP_KERNEL);
if (!its_dev->event_map) {
bitmap_free(its_dev->event_map);
if (WARN_ON_ONCE(!bitmap_empty(its_dev->event_map, its_dev->num_events)))
bitmap_free(its_dev->event_map);
event_id_base = bitmap_find_free_region(its_dev->event_map,
if (test_and_set_bit(event_id_base, its_dev->event_map)) {
bitmap_release_region(its_dev->event_map, event_id_base,
free_bit = find_first_zero_bit(vint_desc->event_map,
set_bit(free_bit, vint_desc->event_map);
free_bit = find_first_zero_bit(vint_desc->event_map,
set_bit(free_bit, vint_desc->event_map);
clear_bit(free_bit, vint_desc->event_map);
if (find_first_bit(vint_desc->event_map, MAX_EVENTS_PER_VINT) == MAX_EVENTS_PER_VINT) {
clear_bit(event_desc->vint_bit, vint_desc->event_map);
DECLARE_BITMAP(event_map, MAX_EVENTS_PER_VINT);
u8 event_map[IQS62X_EVENT_SIZE];
ret = regmap_raw_read(iqs62x->regmap, IQS62X_SYS_FLAGS, event_map,
sizeof(event_map));
for (i = 0; i < sizeof(event_map); i++) {
event_data.ui_data = get_unaligned_le16(&event_map[i]);
event_data.als_flags = event_map[i];
event_data.ir_flags = event_map[i];
event_data.interval = event_map[i];
event_map[i] <<= iqs62x->dev_desc->hyst_shift;
if ((event_map[i] & event_desc.mask) == event_desc.val)
fweh->event_map = &brcmf_cyw_event_map;
if (fweh->event_map) {
for (i = 0; i < fweh->event_map->n_items; i++) {
if (fweh->event_map->items[i].code == code) {
*fw_code = fweh->event_map->items[i].fwevt_code;
if (fweh->event_map) {
for (i = 0; i < fweh->event_map->n_items; i++) {
if (fweh->event_map->items[i].fwevt_code == fw_code) {
*code = fweh->event_map->items[i].code;
const struct brcmf_fweh_event_map *event_map;
static struct event_map pcie_event_to_event[] = {
static struct event_map sec_error_to_event[] = {
static struct event_map ded_error_to_event[] = {
static struct event_map local_status_to_event[] = {
static inline u32 reg_to_event(u32 reg, struct event_map field)
armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
if (!event_map)
mapping = (*event_map)[config];
const unsigned (*event_map)[PERF_COUNT_HW_MAX],
return armpmu_map_hw_event(event_map, config);
mapped_event = rvpmu->event_map(event, &event_config);
pmu->event_map = pmu_legacy_event_map;
pmu->event_map = pmu_sbi_event_map;
unsigned long event_map;
requested_events = bitmap_weight(&lpc18xx_pwm->event_map,
event = find_first_zero_bit(&lpc18xx_pwm->event_map,
set_bit(event, &lpc18xx_pwm->event_map);
clear_bit(lpc18xx_data->duty_event, &lpc18xx_pwm->event_map);
set_bit(LPC18XX_PWM_EVENT_PERIOD, &lpc18xx_pwm->event_map);
const unsigned (*event_map)[PERF_COUNT_HW_MAX],
int (*event_map)(struct perf_event *event, u64 *config);
ASSERT_EQ(expected_events, result->event_map, "event_map");
global.event_map |= (1 << op);
__u32 event_map;