perf_ibs
static bool perf_ibs_is_mem_sample_type(struct perf_ibs *perf_ibs,
return perf_ibs == &perf_ibs_op &&
static int perf_ibs_get_offset_max(struct perf_ibs *perf_ibs,
perf_ibs_is_mem_sample_type(perf_ibs, event) ||
perf_ibs_ldlat_event(perf_ibs, event))
return perf_ibs->offset_max;
static bool perf_ibs_swfilt_discard(struct perf_ibs *perf_ibs, struct perf_event *event,
if (perf_ibs != &perf_ibs_op || !event->attr.exclude_kernel)
static void perf_ibs_phyaddr_clear(struct perf_ibs *perf_ibs,
if (perf_ibs == &perf_ibs_op) {
static int perf_ibs_handle_irq(struct perf_ibs *perf_ibs, struct pt_regs *iregs)
struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu);
if (!(*buf++ & perf_ibs->valid_mask))
perf_ibs_event_update(perf_ibs, event, config);
if (!perf_ibs_set_period(perf_ibs, hwc, &period))
check_rip = (perf_ibs == &perf_ibs_op && (ibs_caps & IBS_CAPS_RIPINVALIDCHK));
offset_max = perf_ibs_get_offset_max(perf_ibs, event, check_rip);
offset = find_next_bit(perf_ibs->offset_mask,
perf_ibs->offset_max,
if (perf_ibs_ldlat_event(perf_ibs, event)) {
if (perf_ibs == &perf_ibs_op) {
if (perf_ibs == &perf_ibs_fetch && (ibs_caps & IBS_CAPS_FETCHCTLEXTD)) {
if (perf_ibs->fetch_ignore_if_zero_rip && !(ibs_data.regs[1]))
perf_ibs_swfilt_discard(perf_ibs, event, ®s, &ibs_data, br_target_idx)) {
perf_ibs_phyaddr_clear(perf_ibs, &ibs_data);
if (perf_ibs == &perf_ibs_op)
if (event->attr.freq && hwc->sample_period < perf_ibs->min_period)
hwc->sample_period = perf_ibs->min_period;
if (perf_ibs == &perf_ibs_op) {
perf_ibs_enable_event(perf_ibs, hwc, new_config);
static __init int perf_ibs_pmu_init(struct perf_ibs *perf_ibs, char *name)
perf_ibs->pcpu = pcpu;
ret = perf_pmu_register(&perf_ibs->pmu, name, -1);
perf_ibs->pcpu = NULL;
static struct perf_ibs perf_ibs_fetch;
static struct perf_ibs perf_ibs_op;
static struct perf_ibs *get_ibs_pmu(int type)
static bool perf_ibs_ldlat_event(struct perf_ibs *perf_ibs,
return perf_ibs == &perf_ibs_op &&
struct perf_ibs *perf_ibs;
perf_ibs = get_ibs_pmu(event->attr.type);
if (!perf_ibs)
if (event->pmu != &perf_ibs->pmu)
if (config & ~perf_ibs->config_mask)
if (config & perf_ibs->cnt_mask)
hwc->sample_period = perf_ibs->min_period;
if (hwc->sample_period < perf_ibs->min_period)
if (perf_ibs == &perf_ibs_op) {
config &= ~perf_ibs->cnt_mask;
if (hwc->sample_period < perf_ibs->min_period)
if (perf_ibs_ldlat_event(perf_ibs, event)) {
hwc->config_base = perf_ibs->msr;
static int perf_ibs_set_period(struct perf_ibs *perf_ibs,
overflow = perf_event_set_period(hwc, perf_ibs->min_period,
perf_ibs->max_period, period);
perf_ibs_event_update(struct perf_ibs *perf_ibs, struct perf_event *event,
u64 count = perf_ibs->get_count(*config);
count = perf_ibs->get_count(*config);
static inline void perf_ibs_enable_event(struct perf_ibs *perf_ibs,
if (perf_ibs->fetch_count_reset_broken)
wrmsrq(hwc->config_base, tmp & ~perf_ibs->enable_mask);
wrmsrq(hwc->config_base, tmp | perf_ibs->enable_mask);
static inline void perf_ibs_disable_event(struct perf_ibs *perf_ibs,
config &= ~perf_ibs->cnt_mask;
config &= ~perf_ibs->enable_mask;
struct perf_ibs *perf_ibs = container_of(event->pmu, struct perf_ibs, pmu);
struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu);
if (event->attr.freq && hwc->sample_period < perf_ibs->min_period)
hwc->sample_period = perf_ibs->min_period;
perf_ibs_set_period(perf_ibs, hwc, &period);
if (perf_ibs == &perf_ibs_op && (ibs_caps & IBS_CAPS_OPCNTEXT)) {
perf_ibs_enable_event(perf_ibs, hwc, config);
struct perf_ibs *perf_ibs = container_of(event->pmu, struct perf_ibs, pmu);
struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu);
perf_ibs_disable_event(perf_ibs, hwc, config);
config &= ~perf_ibs->valid_mask;
perf_ibs_event_update(perf_ibs, event, &config);
struct perf_ibs *perf_ibs = container_of(event->pmu, struct perf_ibs, pmu);
struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu);
struct perf_ibs *perf_ibs = container_of(event->pmu, struct perf_ibs, pmu);
struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu);
struct perf_ibs *perf_ibs;
perf_ibs = container_of(event->pmu, struct perf_ibs, pmu);
if (low_nibble || value < perf_ibs->min_period)
static struct perf_ibs perf_ibs_fetch = {
static struct perf_ibs perf_ibs_op = {