Symbol: perf_ibs
arch/x86/events/amd/ibs.c
1140
static bool perf_ibs_is_mem_sample_type(struct perf_ibs *perf_ibs,
arch/x86/events/amd/ibs.c
1145
return perf_ibs == &perf_ibs_op &&
arch/x86/events/amd/ibs.c
1152
static int perf_ibs_get_offset_max(struct perf_ibs *perf_ibs,
arch/x86/events/amd/ibs.c
1157
perf_ibs_is_mem_sample_type(perf_ibs, event) ||
arch/x86/events/amd/ibs.c
1158
perf_ibs_ldlat_event(perf_ibs, event))
arch/x86/events/amd/ibs.c
1159
return perf_ibs->offset_max;
arch/x86/events/amd/ibs.c
1193
static bool perf_ibs_swfilt_discard(struct perf_ibs *perf_ibs, struct perf_event *event,
arch/x86/events/amd/ibs.c
1200
if (perf_ibs != &perf_ibs_op || !event->attr.exclude_kernel)
arch/x86/events/amd/ibs.c
1213
static void perf_ibs_phyaddr_clear(struct perf_ibs *perf_ibs,
arch/x86/events/amd/ibs.c
1216
if (perf_ibs == &perf_ibs_op) {
arch/x86/events/amd/ibs.c
1226
static int perf_ibs_handle_irq(struct perf_ibs *perf_ibs, struct pt_regs *iregs)
arch/x86/events/amd/ibs.c
1228
struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu);
arch/x86/events/amd/ibs.c
1261
if (!(*buf++ & perf_ibs->valid_mask))
arch/x86/events/amd/ibs.c
1265
perf_ibs_event_update(perf_ibs, event, config);
arch/x86/events/amd/ibs.c
1267
if (!perf_ibs_set_period(perf_ibs, hwc, &period))
arch/x86/events/amd/ibs.c
1273
check_rip = (perf_ibs == &perf_ibs_op && (ibs_caps & IBS_CAPS_RIPINVALIDCHK));
arch/x86/events/amd/ibs.c
1275
offset_max = perf_ibs_get_offset_max(perf_ibs, event, check_rip);
arch/x86/events/amd/ibs.c
1280
offset = find_next_bit(perf_ibs->offset_mask,
arch/x86/events/amd/ibs.c
1281
perf_ibs->offset_max,
arch/x86/events/amd/ibs.c
1285
if (perf_ibs_ldlat_event(perf_ibs, event)) {
arch/x86/events/amd/ibs.c
1306
if (perf_ibs == &perf_ibs_op) {
arch/x86/events/amd/ibs.c
1317
if (perf_ibs == &perf_ibs_fetch && (ibs_caps & IBS_CAPS_FETCHCTLEXTD)) {
arch/x86/events/amd/ibs.c
1329
if (perf_ibs->fetch_ignore_if_zero_rip && !(ibs_data.regs[1]))
arch/x86/events/amd/ibs.c
1337
perf_ibs_swfilt_discard(perf_ibs, event, &regs, &ibs_data, br_target_idx)) {
arch/x86/events/amd/ibs.c
1348
perf_ibs_phyaddr_clear(perf_ibs, &ibs_data);
arch/x86/events/amd/ibs.c
1361
if (perf_ibs == &perf_ibs_op)
arch/x86/events/amd/ibs.c
1373
if (event->attr.freq && hwc->sample_period < perf_ibs->min_period)
arch/x86/events/amd/ibs.c
1374
hwc->sample_period = perf_ibs->min_period;
arch/x86/events/amd/ibs.c
1378
if (perf_ibs == &perf_ibs_op) {
arch/x86/events/amd/ibs.c
1388
perf_ibs_enable_event(perf_ibs, hwc, new_config);
arch/x86/events/amd/ibs.c
1414
static __init int perf_ibs_pmu_init(struct perf_ibs *perf_ibs, char *name)
arch/x86/events/amd/ibs.c
1423
perf_ibs->pcpu = pcpu;
arch/x86/events/amd/ibs.c
1425
ret = perf_pmu_register(&perf_ibs->pmu, name, -1);
arch/x86/events/amd/ibs.c
1427
perf_ibs->pcpu = NULL;
arch/x86/events/amd/ibs.c
181
static struct perf_ibs perf_ibs_fetch;
arch/x86/events/amd/ibs.c
182
static struct perf_ibs perf_ibs_op;
arch/x86/events/amd/ibs.c
184
static struct perf_ibs *get_ibs_pmu(int type)
arch/x86/events/amd/ibs.c
272
static bool perf_ibs_ldlat_event(struct perf_ibs *perf_ibs,
arch/x86/events/amd/ibs.c
275
return perf_ibs == &perf_ibs_op &&
arch/x86/events/amd/ibs.c
283
struct perf_ibs *perf_ibs;
arch/x86/events/amd/ibs.c
287
perf_ibs = get_ibs_pmu(event->attr.type);
arch/x86/events/amd/ibs.c
288
if (!perf_ibs)
arch/x86/events/amd/ibs.c
293
if (event->pmu != &perf_ibs->pmu)
arch/x86/events/amd/ibs.c
296
if (config & ~perf_ibs->config_mask)
arch/x86/events/amd/ibs.c
317
if (config & perf_ibs->cnt_mask)
arch/x86/events/amd/ibs.c
322
hwc->sample_period = perf_ibs->min_period;
arch/x86/events/amd/ibs.c
326
if (hwc->sample_period < perf_ibs->min_period)
arch/x86/events/amd/ibs.c
335
if (perf_ibs == &perf_ibs_op) {
arch/x86/events/amd/ibs.c
343
config &= ~perf_ibs->cnt_mask;
arch/x86/events/amd/ibs.c
347
if (hwc->sample_period < perf_ibs->min_period)
arch/x86/events/amd/ibs.c
351
if (perf_ibs_ldlat_event(perf_ibs, event)) {
arch/x86/events/amd/ibs.c
369
hwc->config_base = perf_ibs->msr;
arch/x86/events/amd/ibs.c
375
static int perf_ibs_set_period(struct perf_ibs *perf_ibs,
arch/x86/events/amd/ibs.c
381
overflow = perf_event_set_period(hwc, perf_ibs->min_period,
arch/x86/events/amd/ibs.c
382
perf_ibs->max_period, period);
arch/x86/events/amd/ibs.c
417
perf_ibs_event_update(struct perf_ibs *perf_ibs, struct perf_event *event,
arch/x86/events/amd/ibs.c
420
u64 count = perf_ibs->get_count(*config);
arch/x86/events/amd/ibs.c
429
count = perf_ibs->get_count(*config);
arch/x86/events/amd/ibs.c
433
static inline void perf_ibs_enable_event(struct perf_ibs *perf_ibs,
arch/x86/events/amd/ibs.c
438
if (perf_ibs->fetch_count_reset_broken)
arch/x86/events/amd/ibs.c
439
wrmsrq(hwc->config_base, tmp & ~perf_ibs->enable_mask);
arch/x86/events/amd/ibs.c
441
wrmsrq(hwc->config_base, tmp | perf_ibs->enable_mask);
arch/x86/events/amd/ibs.c
451
static inline void perf_ibs_disable_event(struct perf_ibs *perf_ibs,
arch/x86/events/amd/ibs.c
454
config &= ~perf_ibs->cnt_mask;
arch/x86/events/amd/ibs.c
457
config &= ~perf_ibs->enable_mask;
arch/x86/events/amd/ibs.c
470
struct perf_ibs *perf_ibs = container_of(event->pmu, struct perf_ibs, pmu);
arch/x86/events/amd/ibs.c
471
struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu);
arch/x86/events/amd/ibs.c
480
if (event->attr.freq && hwc->sample_period < perf_ibs->min_period)
arch/x86/events/amd/ibs.c
481
hwc->sample_period = perf_ibs->min_period;
arch/x86/events/amd/ibs.c
483
perf_ibs_set_period(perf_ibs, hwc, &period);
arch/x86/events/amd/ibs.c
484
if (perf_ibs == &perf_ibs_op && (ibs_caps & IBS_CAPS_OPCNTEXT)) {
arch/x86/events/amd/ibs.c
496
perf_ibs_enable_event(perf_ibs, hwc, config);
arch/x86/events/amd/ibs.c
504
struct perf_ibs *perf_ibs = container_of(event->pmu, struct perf_ibs, pmu);
arch/x86/events/amd/ibs.c
505
struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu);
arch/x86/events/amd/ibs.c
527
perf_ibs_disable_event(perf_ibs, hwc, config);
arch/x86/events/amd/ibs.c
549
config &= ~perf_ibs->valid_mask;
arch/x86/events/amd/ibs.c
551
perf_ibs_event_update(perf_ibs, event, &config);
arch/x86/events/amd/ibs.c
557
struct perf_ibs *perf_ibs = container_of(event->pmu, struct perf_ibs, pmu);
arch/x86/events/amd/ibs.c
558
struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu);
arch/x86/events/amd/ibs.c
575
struct perf_ibs *perf_ibs = container_of(event->pmu, struct perf_ibs, pmu);
arch/x86/events/amd/ibs.c
576
struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu);
arch/x86/events/amd/ibs.c
592
struct perf_ibs *perf_ibs;
arch/x86/events/amd/ibs.c
598
perf_ibs = container_of(event->pmu, struct perf_ibs, pmu);
arch/x86/events/amd/ibs.c
606
if (low_nibble || value < perf_ibs->min_period)
arch/x86/events/amd/ibs.c
785
static struct perf_ibs perf_ibs_fetch = {
arch/x86/events/amd/ibs.c
810
static struct perf_ibs perf_ibs_op = {