ring_buffer_per_cpu
struct ring_buffer_per_cpu *cpu_buffer;
if (__same_type(*(b), struct ring_buffer_per_cpu)) { \
struct ring_buffer_per_cpu *__b = \
static void rb_head_page_activate(struct ring_buffer_per_cpu *cpu_buffer)
rb_head_page_deactivate(struct ring_buffer_per_cpu *cpu_buffer)
static int rb_head_page_set(struct ring_buffer_per_cpu *cpu_buffer,
static int rb_head_page_set_update(struct ring_buffer_per_cpu *cpu_buffer,
static int rb_head_page_set_head(struct ring_buffer_per_cpu *cpu_buffer,
static int rb_head_page_set_normal(struct ring_buffer_per_cpu *cpu_buffer,
rb_set_head_page(struct ring_buffer_per_cpu *cpu_buffer)
static void rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
static void rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer,
static bool rb_check_links(struct ring_buffer_per_cpu *cpu_buffer,
static void rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
static void *rb_range_buffer(struct ring_buffer_per_cpu *cpu_buffer, int idx)
static void rb_meta_validate_events(struct ring_buffer_per_cpu *cpu_buffer)
struct ring_buffer_per_cpu *cpu_buffer = m->private;
struct ring_buffer_per_cpu *cpu_buffer = m->private;
static void rb_meta_buffer_update(struct ring_buffer_per_cpu *cpu_buffer,
static int __rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
static struct ring_buffer_per_cpu *
struct ring_buffer_per_cpu *cpu_buffer __free(kfree) =
static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages)
rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer)
static void rb_update_pages(struct ring_buffer_per_cpu *cpu_buffer)
struct ring_buffer_per_cpu *cpu_buffer = container_of(work,
struct ring_buffer_per_cpu, update_pages_work);
struct ring_buffer_per_cpu *cpu_buffer;
rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
rb_event_index(struct ring_buffer_per_cpu *cpu_buffer, struct ring_buffer_event *event)
struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
static void rb_update_meta_head(struct ring_buffer_per_cpu *cpu_buffer,
static void rb_update_meta_reader(struct ring_buffer_per_cpu *cpu_buffer,
rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer);
rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer,
rb_check_timestamp(struct ring_buffer_per_cpu *cpu_buffer,
static void rb_add_timestamp(struct ring_buffer_per_cpu *cpu_buffer,
rb_update_event(struct ring_buffer_per_cpu *cpu_buffer,
rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer)
rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
static __always_inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer)
rb_wakeups(struct trace_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
#define alloc_cpu_buffer(cpu) (struct ring_buffer_per_cpu *) \
kzalloc_node(ALIGN(sizeof(struct ring_buffer_per_cpu), \
trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer)
trace_recursive_unlock(struct ring_buffer_per_cpu *cpu_buffer)
struct ring_buffer_per_cpu *cpu_buffer;
struct ring_buffer_per_cpu *cpu_buffer;
struct ring_buffer_per_cpu *cpu_buffer;
static void check_buffer(struct ring_buffer_per_cpu *cpu_buffer,
static inline void check_buffer(struct ring_buffer_per_cpu *cpu_buffer,
__rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
struct ring_buffer_per_cpu *cpu_buffer,
struct ring_buffer_per_cpu *cpu_buffer;
rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
struct ring_buffer_per_cpu *cpu_buffer;
struct ring_buffer_per_cpu *cpu_buffer;
rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
static bool rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
struct ring_buffer_per_cpu *cpu_buffer;
struct ring_buffer_per_cpu *cpu_buffer;
struct ring_buffer_per_cpu *cpu_buffer;
struct ring_buffer_per_cpu *cpu_buffer;
struct ring_buffer_per_cpu *cpu_buffer;
struct ring_buffer_per_cpu *cpu_buffer;
struct ring_buffer_per_cpu *cpu_buffer;
struct ring_buffer_per_cpu *cpu_buffer;
struct ring_buffer_per_cpu *cpu_buffer;
struct ring_buffer_per_cpu *cpu_buffer;
struct ring_buffer_per_cpu *cpu_buffer;
struct ring_buffer_per_cpu *cpu_buffer;
struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
struct ring_buffer_per_cpu *cpu_buffer;
struct ring_buffer_per_cpu *cpu_buffer;
rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
struct ring_buffer_per_cpu *cpu_buffer;
static int rb_lost_events(struct ring_buffer_per_cpu *cpu_buffer)
rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts,
struct ring_buffer_per_cpu *cpu_buffer;
struct ring_buffer_per_cpu **buffers;
static inline bool rb_reader_lock(struct ring_buffer_per_cpu *cpu_buffer)
rb_reader_unlock(struct ring_buffer_per_cpu *cpu_buffer, bool locked)
struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
struct ring_buffer_per_cpu *cpu_buffer;
struct ring_buffer_per_cpu *cpu_buffer;
struct ring_buffer_per_cpu *cpu_buffer;
struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
static int rb_page_id(struct ring_buffer_per_cpu *cpu_buffer,
static void rb_update_meta_page(struct ring_buffer_per_cpu *cpu_buffer)
rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
static void reset_disabled_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
struct ring_buffer_per_cpu *cpu_buffer;
struct ring_buffer_per_cpu *cpu_buffer;
struct ring_buffer_per_cpu *cpu_buffer;
struct ring_buffer_per_cpu *cpu_buffer;
struct ring_buffer_per_cpu *cpu_buffer_a;
struct ring_buffer_per_cpu *cpu_buffer_b;
struct ring_buffer_per_cpu *cpu_buffer;
struct ring_buffer_per_cpu *cpu_buffer;
static void verify_event(struct ring_buffer_per_cpu *cpu_buffer,
struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
struct ring_buffer_per_cpu *cpu_buffer;
static inline void verify_event(struct ring_buffer_per_cpu *cpu_buffer,
static int rb_alloc_meta_page(struct ring_buffer_per_cpu *cpu_buffer)
static void rb_free_meta_page(struct ring_buffer_per_cpu *cpu_buffer)
static void rb_setup_ids_meta_page(struct ring_buffer_per_cpu *cpu_buffer,
static struct ring_buffer_per_cpu *
struct ring_buffer_per_cpu *cpu_buffer;
static void rb_put_mapped_buffer(struct ring_buffer_per_cpu *cpu_buffer)
static int __rb_inc_dec_mapped(struct ring_buffer_per_cpu *cpu_buffer,
static int __rb_map_vma(struct ring_buffer_per_cpu *cpu_buffer,
static int __rb_map_vma(struct ring_buffer_per_cpu *cpu_buffer,
struct ring_buffer_per_cpu *cpu_buffer;
struct ring_buffer_per_cpu *cpu_buffer;
struct ring_buffer_per_cpu *cpu_buffer;
struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[smp_processor_id()];
struct ring_buffer_per_cpu *cpu_buffer;
struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
struct ring_buffer_per_cpu *cpu_buffer =
container_of(rbwork, struct ring_buffer_per_cpu, irq_work);
struct ring_buffer_per_cpu *cpu_buffer;
struct ring_buffer_per_cpu *cpu_buffer;
struct ring_buffer_per_cpu *cpu_buffer;