#include <linux/bits.h>
#include <linux/jump_label.h>
#include <linux/suspend.h>
#include <linux/ftrace.h>
#include <linux/static_call.h>
#include <linux/slab.h>
#include <trace/events/sched.h>
#include "ftrace_internal.h"
#include "trace.h"
#define FGRAPH_FRAME_SIZE sizeof(struct ftrace_ret_stack)
#define FGRAPH_FRAME_OFFSET DIV_ROUND_UP(FGRAPH_FRAME_SIZE, sizeof(long))
#define FGRAPH_FRAME_OFFSET_BITS 10
#define FGRAPH_FRAME_OFFSET_MASK GENMASK(FGRAPH_FRAME_OFFSET_BITS - 1, 0)
#define FGRAPH_TYPE_BITS 2
#define FGRAPH_TYPE_MASK GENMASK(FGRAPH_TYPE_BITS - 1, 0)
#define FGRAPH_TYPE_SHIFT FGRAPH_FRAME_OFFSET_BITS
enum {
FGRAPH_TYPE_RESERVED = 0,
FGRAPH_TYPE_BITMAP = 1,
FGRAPH_TYPE_DATA = 2,
};
#define FGRAPH_INDEX_BITS 16
#define FGRAPH_INDEX_MASK GENMASK(FGRAPH_INDEX_BITS - 1, 0)
#define FGRAPH_INDEX_SHIFT (FGRAPH_TYPE_SHIFT + FGRAPH_TYPE_BITS)
#define FGRAPH_DATA_BITS 5
#define FGRAPH_DATA_MASK GENMASK(FGRAPH_DATA_BITS - 1, 0)
#define FGRAPH_DATA_SHIFT (FGRAPH_TYPE_SHIFT + FGRAPH_TYPE_BITS)
#define FGRAPH_MAX_DATA_SIZE (sizeof(long) * (1 << FGRAPH_DATA_BITS))
#define FGRAPH_DATA_INDEX_BITS 4
#define FGRAPH_DATA_INDEX_MASK GENMASK(FGRAPH_DATA_INDEX_BITS - 1, 0)
#define FGRAPH_DATA_INDEX_SHIFT (FGRAPH_DATA_SHIFT + FGRAPH_DATA_BITS)
#define FGRAPH_MAX_INDEX \
((FGRAPH_INDEX_SIZE << FGRAPH_DATA_BITS) + FGRAPH_RET_INDEX)
#define FGRAPH_ARRAY_SIZE FGRAPH_INDEX_BITS
#define SHADOW_STACK_SIZE (4096)
#define SHADOW_STACK_OFFSET (SHADOW_STACK_SIZE / sizeof(long))
#define SHADOW_STACK_MAX_OFFSET \
(SHADOW_STACK_OFFSET - (FGRAPH_FRAME_OFFSET + 1 + FGRAPH_ARRAY_SIZE))
#define RET_STACK(t, offset) ((struct ftrace_ret_stack *)(&(t)->ret_stack[offset]))
#define SHADOW_STACK_TASK_VARS(ret_stack) \
((unsigned long *)(&(ret_stack)[SHADOW_STACK_OFFSET - FGRAPH_ARRAY_SIZE]))
DEFINE_STATIC_KEY_FALSE(kill_ftrace_graph);
int ftrace_graph_active;
static struct kmem_cache *fgraph_stack_cachep;
static struct fgraph_ops *fgraph_array[FGRAPH_ARRAY_SIZE];
static unsigned long fgraph_array_bitmask;
static int fgraph_lru_table[FGRAPH_ARRAY_SIZE];
static int fgraph_lru_next;
static int fgraph_lru_last;
static void fgraph_lru_init(void)
{
int i;
for (i = 0; i < FGRAPH_ARRAY_SIZE; i++)
fgraph_lru_table[i] = i;
}
static int fgraph_lru_release_index(int idx)
{
if (idx < 0 || idx >= FGRAPH_ARRAY_SIZE ||
WARN_ON_ONCE(fgraph_lru_table[fgraph_lru_last] != -1))
return -1;
fgraph_lru_table[fgraph_lru_last] = idx;
fgraph_lru_last = (fgraph_lru_last + 1) % FGRAPH_ARRAY_SIZE;
clear_bit(idx, &fgraph_array_bitmask);
return 0;
}
static int fgraph_lru_alloc_index(void)
{
int idx = fgraph_lru_table[fgraph_lru_next];
if (idx == -1)
return -1;
fgraph_lru_table[fgraph_lru_next] = -1;
fgraph_lru_next = (fgraph_lru_next + 1) % FGRAPH_ARRAY_SIZE;
set_bit(idx, &fgraph_array_bitmask);
return idx;
}
static inline int __get_offset(unsigned long val)
{
return val & FGRAPH_FRAME_OFFSET_MASK;
}
static inline int __get_type(unsigned long val)
{
return (val >> FGRAPH_TYPE_SHIFT) & FGRAPH_TYPE_MASK;
}
static inline int __get_data_index(unsigned long val)
{
return (val >> FGRAPH_DATA_INDEX_SHIFT) & FGRAPH_DATA_INDEX_MASK;
}
static inline int __get_data_size(unsigned long val)
{
return ((val >> FGRAPH_DATA_SHIFT) & FGRAPH_DATA_MASK) + 1;
}
static inline unsigned long get_fgraph_entry(struct task_struct *t, int offset)
{
return t->ret_stack[offset];
}
static inline int get_frame_offset(struct task_struct *t, int offset)
{
return __get_offset(t->ret_stack[offset]);
}
static inline unsigned long
get_bitmap_bits(struct task_struct *t, int offset)
{
return (t->ret_stack[offset] >> FGRAPH_INDEX_SHIFT) & FGRAPH_INDEX_MASK;
}
static inline void
set_bitmap(struct task_struct *t, int offset, unsigned long bitmap)
{
t->ret_stack[offset] = (bitmap << FGRAPH_INDEX_SHIFT) |
(FGRAPH_TYPE_BITMAP << FGRAPH_TYPE_SHIFT) | FGRAPH_FRAME_OFFSET;
}
static inline void *get_data_type_data(struct task_struct *t, int offset)
{
unsigned long val = t->ret_stack[offset];
if (__get_type(val) != FGRAPH_TYPE_DATA)
return NULL;
offset -= __get_data_size(val);
return (void *)&t->ret_stack[offset];
}
static inline unsigned long make_data_type_val(int idx, int size, int offset)
{
return (idx << FGRAPH_DATA_INDEX_SHIFT) |
((size - 1) << FGRAPH_DATA_SHIFT) |
(FGRAPH_TYPE_DATA << FGRAPH_TYPE_SHIFT) | offset;
}
static int entry_run(struct ftrace_graph_ent *trace, struct fgraph_ops *ops,
struct ftrace_regs *fregs)
{
return 0;
}
static void return_run(struct ftrace_graph_ret *trace, struct fgraph_ops *ops,
struct ftrace_regs *fregs)
{
}
static void ret_stack_set_task_var(struct task_struct *t, int idx, long val)
{
unsigned long *gvals = SHADOW_STACK_TASK_VARS(t->ret_stack);
gvals[idx] = val;
}
static unsigned long *
ret_stack_get_task_var(struct task_struct *t, int idx)
{
unsigned long *gvals = SHADOW_STACK_TASK_VARS(t->ret_stack);
return &gvals[idx];
}
static void ret_stack_init_task_vars(unsigned long *ret_stack)
{
unsigned long *gvals = SHADOW_STACK_TASK_VARS(ret_stack);
memset(gvals, 0, sizeof(*gvals) * FGRAPH_ARRAY_SIZE);
}
void *fgraph_reserve_data(int idx, int size_bytes)
{
unsigned long val;
void *data;
int curr_ret_stack = current->curr_ret_stack;
int data_size;
if (size_bytes > FGRAPH_MAX_DATA_SIZE)
return NULL;
data_size = (size_bytes + sizeof(long) - 1) >> (sizeof(long) == 4 ? 2 : 3);
val = get_fgraph_entry(current, curr_ret_stack - 1);
data = ¤t->ret_stack[curr_ret_stack];
curr_ret_stack += data_size + 1;
if (unlikely(curr_ret_stack >= SHADOW_STACK_MAX_OFFSET))
return NULL;
val = make_data_type_val(idx, data_size, __get_offset(val) + data_size + 1);
current->ret_stack[curr_ret_stack - 1] = val;
barrier();
current->curr_ret_stack = curr_ret_stack;
current->ret_stack[curr_ret_stack - 1] = val;
return data;
}
void *fgraph_retrieve_data(int idx, int *size_bytes)
{
return fgraph_retrieve_parent_data(idx, size_bytes, 0);
}
unsigned long *fgraph_get_task_var(struct fgraph_ops *gops)
{
return ret_stack_get_task_var(current, gops->idx);
}
static inline struct ftrace_ret_stack *
get_ret_stack(struct task_struct *t, int offset, int *frame_offset)
{
int offs;
BUILD_BUG_ON(FGRAPH_FRAME_SIZE % sizeof(long));
if (unlikely(offset <= 0))
return NULL;
offs = get_frame_offset(t, --offset);
if (WARN_ON_ONCE(offs <= 0 || offs > offset))
return NULL;
offset -= offs;
*frame_offset = offset;
return RET_STACK(t, offset);
}
void *fgraph_retrieve_parent_data(int idx, int *size_bytes, int depth)
{
struct ftrace_ret_stack *ret_stack = NULL;
int offset = current->curr_ret_stack;
unsigned long val;
if (offset <= 0)
return NULL;
for (;;) {
int next_offset;
ret_stack = get_ret_stack(current, offset, &next_offset);
if (!ret_stack || --depth < 0)
break;
offset = next_offset;
}
if (!ret_stack)
return NULL;
offset--;
val = get_fgraph_entry(current, offset);
while (__get_type(val) == FGRAPH_TYPE_DATA) {
if (__get_data_index(val) == idx)
goto found;
offset -= __get_data_size(val) + 1;
val = get_fgraph_entry(current, offset);
}
return NULL;
found:
if (size_bytes)
*size_bytes = __get_data_size(val) * sizeof(long);
return get_data_type_data(current, offset);
}
#ifdef CONFIG_DYNAMIC_FTRACE
int __weak ftrace_enable_ftrace_graph_caller(void)
{
return 0;
}
int __weak ftrace_disable_ftrace_graph_caller(void)
{
return 0;
}
#endif
int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace,
struct fgraph_ops *gops,
struct ftrace_regs *fregs)
{
return 0;
}
static void ftrace_graph_ret_stub(struct ftrace_graph_ret *trace,
struct fgraph_ops *gops,
struct ftrace_regs *fregs)
{
}
static struct fgraph_ops fgraph_stub = {
.entryfunc = ftrace_graph_entry_stub,
.retfunc = ftrace_graph_ret_stub,
};
static struct fgraph_ops *fgraph_direct_gops = &fgraph_stub;
DEFINE_STATIC_CALL(fgraph_func, ftrace_graph_entry_stub);
DEFINE_STATIC_CALL(fgraph_retfunc, ftrace_graph_ret_stub);
#if FGRAPH_NO_DIRECT
static DEFINE_STATIC_KEY_FALSE(fgraph_do_direct);
#else
static DEFINE_STATIC_KEY_TRUE(fgraph_do_direct);
#endif
void ftrace_graph_stop(void)
{
static_branch_enable(&kill_ftrace_graph);
}
static int
ftrace_push_return_trace(unsigned long ret, unsigned long func,
unsigned long frame_pointer, unsigned long *retp,
int fgraph_idx)
{
struct ftrace_ret_stack *ret_stack;
unsigned long val;
int offset;
if (unlikely(ftrace_graph_is_dead()))
return -EBUSY;
if (!current->ret_stack)
return -EBUSY;
BUILD_BUG_ON(SHADOW_STACK_SIZE % sizeof(long));
val = (FGRAPH_TYPE_RESERVED << FGRAPH_TYPE_SHIFT) | FGRAPH_FRAME_OFFSET;
smp_rmb();
if (current->curr_ret_stack + FGRAPH_FRAME_OFFSET + 1 >= SHADOW_STACK_MAX_OFFSET) {
atomic_inc(¤t->trace_overrun);
return -EBUSY;
}
offset = READ_ONCE(current->curr_ret_stack);
ret_stack = RET_STACK(current, offset);
offset += FGRAPH_FRAME_OFFSET;
current->ret_stack[offset] = val;
ret_stack->ret = ret;
barrier();
WRITE_ONCE(current->curr_ret_stack, offset + 1);
barrier();
current->ret_stack[offset] = val;
ret_stack->ret = ret;
ret_stack->func = func;
#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
ret_stack->fp = frame_pointer;
#endif
ret_stack->retp = retp;
return offset;
}
#ifndef MCOUNT_INSN_SIZE
# ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
# error MCOUNT_INSN_SIZE not defined with direct calls enabled
# endif
# define MCOUNT_INSN_SIZE 0
#endif
int function_graph_enter_regs(unsigned long ret, unsigned long func,
unsigned long frame_pointer, unsigned long *retp,
struct ftrace_regs *fregs)
{
struct ftrace_graph_ent trace;
unsigned long bitmap = 0;
int offset;
int bit;
int i;
bit = ftrace_test_recursion_trylock(func, ret);
if (bit < 0)
return -EBUSY;
trace.func = func;
trace.depth = ++current->curr_ret_depth;
offset = ftrace_push_return_trace(ret, func, frame_pointer, retp, 0);
if (offset < 0)
goto out;
#ifdef CONFIG_HAVE_STATIC_CALL
if (static_branch_likely(&fgraph_do_direct)) {
int save_curr_ret_stack = current->curr_ret_stack;
if (static_call(fgraph_func)(&trace, fgraph_direct_gops, fregs))
bitmap |= BIT(fgraph_direct_gops->idx);
else
current->curr_ret_stack = save_curr_ret_stack;
} else
#endif
{
for_each_set_bit(i, &fgraph_array_bitmask,
sizeof(fgraph_array_bitmask) * BITS_PER_BYTE) {
struct fgraph_ops *gops = READ_ONCE(fgraph_array[i]);
int save_curr_ret_stack;
if (gops == &fgraph_stub)
continue;
save_curr_ret_stack = current->curr_ret_stack;
if (ftrace_ops_test(&gops->ops, func, NULL) &&
gops->entryfunc(&trace, gops, fregs))
bitmap |= BIT(i);
else
current->curr_ret_stack = save_curr_ret_stack;
}
}
if (!bitmap)
goto out_ret;
set_bitmap(current, offset, bitmap | BIT(0));
ftrace_test_recursion_unlock(bit);
return 0;
out_ret:
current->curr_ret_stack -= FGRAPH_FRAME_OFFSET + 1;
out:
current->curr_ret_depth--;
ftrace_test_recursion_unlock(bit);
return -EBUSY;
}
static struct ftrace_ret_stack *
ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
unsigned long frame_pointer, int *offset)
{
struct ftrace_ret_stack *ret_stack;
ret_stack = get_ret_stack(current, current->curr_ret_stack, offset);
if (unlikely(!ret_stack)) {
ftrace_graph_stop();
WARN(1, "Bad function graph ret_stack pointer: %d",
current->curr_ret_stack);
*ret = (unsigned long)panic;
return NULL;
}
#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
if (unlikely(ret_stack->fp != frame_pointer)) {
ftrace_graph_stop();
WARN(1, "Bad frame pointer: expected %lx, received %lx\n"
" from func %ps return to %lx\n",
ret_stack->fp,
frame_pointer,
(void *)ret_stack->func,
ret_stack->ret);
*ret = (unsigned long)panic;
return NULL;
}
#endif
*offset += FGRAPH_FRAME_OFFSET;
*ret = ret_stack->ret;
trace->func = ret_stack->func;
trace->overrun = atomic_read(¤t->trace_overrun);
trace->depth = current->curr_ret_depth;
barrier();
return ret_stack;
}
static int
ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
void *unused)
{
switch (state) {
case PM_HIBERNATION_PREPARE:
pause_graph_tracing();
break;
case PM_POST_HIBERNATION:
unpause_graph_tracing();
break;
}
return NOTIFY_DONE;
}
static struct notifier_block ftrace_suspend_notifier = {
.notifier_call = ftrace_suspend_notifier_call,
};
static inline unsigned long
__ftrace_return_to_handler(struct ftrace_regs *fregs, unsigned long frame_pointer)
{
struct ftrace_ret_stack *ret_stack;
struct ftrace_graph_ret trace;
unsigned long bitmap;
unsigned long ret;
int offset;
int bit;
int i;
ret_stack = ftrace_pop_return_trace(&trace, &ret, frame_pointer, &offset);
if (unlikely(!ret_stack)) {
ftrace_graph_stop();
WARN_ON(1);
return (unsigned long)panic;
}
if (fregs)
ftrace_regs_set_instruction_pointer(fregs, ret);
bit = ftrace_test_recursion_trylock(trace.func, ret);
if (unlikely(bit < 0))
goto out;
#ifdef CONFIG_FUNCTION_GRAPH_RETVAL
trace.retval = ftrace_regs_get_return_value(fregs);
#endif
bitmap = get_bitmap_bits(current, offset);
#ifdef CONFIG_HAVE_STATIC_CALL
if (!FGRAPH_NO_DIRECT && static_branch_likely(&fgraph_do_direct)) {
if (test_bit(fgraph_direct_gops->idx, &bitmap))
static_call(fgraph_retfunc)(&trace, fgraph_direct_gops, fregs);
} else
#endif
{
for_each_set_bit(i, &bitmap, sizeof(bitmap) * BITS_PER_BYTE) {
struct fgraph_ops *gops = READ_ONCE(fgraph_array[i]);
if (gops == &fgraph_stub)
continue;
gops->retfunc(&trace, gops, fregs);
}
}
ftrace_test_recursion_unlock(bit);
out:
barrier();
current->curr_ret_stack = offset - FGRAPH_FRAME_OFFSET;
current->curr_ret_depth--;
return ret;
}
#ifdef CONFIG_HAVE_FUNCTION_GRAPH_FREGS
unsigned long ftrace_return_to_handler(struct ftrace_regs *fregs)
{
return __ftrace_return_to_handler(fregs,
ftrace_regs_get_frame_pointer(fregs));
}
#else
unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
{
return __ftrace_return_to_handler(NULL, frame_pointer);
}
#endif
struct ftrace_ret_stack *
ftrace_graph_get_ret_stack(struct task_struct *task, int idx)
{
struct ftrace_ret_stack *ret_stack = NULL;
int offset = task->curr_ret_stack;
if (offset < 0)
return NULL;
do {
ret_stack = get_ret_stack(task, offset, &offset);
} while (ret_stack && --idx >= 0);
return ret_stack;
}
unsigned long ftrace_graph_top_ret_addr(struct task_struct *task)
{
unsigned long return_handler = (unsigned long)dereference_kernel_function_descriptor(return_to_handler);
struct ftrace_ret_stack *ret_stack = NULL;
int offset = task->curr_ret_stack;
if (offset < 0)
return 0;
do {
ret_stack = get_ret_stack(task, offset, &offset);
} while (ret_stack && ret_stack->ret == return_handler);
return ret_stack ? ret_stack->ret : 0;
}
unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
unsigned long ret, unsigned long *retp)
{
struct ftrace_ret_stack *ret_stack;
unsigned long return_handler = (unsigned long)dereference_kernel_function_descriptor(return_to_handler);
int i;
if (ret != return_handler)
return ret;
if (!idx)
return ret;
i = *idx ? : task->curr_ret_stack;
while (i > 0) {
ret_stack = get_ret_stack(task, i, &i);
if (!ret_stack)
break;
if (ret_stack->retp == retp &&
ret_stack->ret != return_handler) {
*idx = i;
return ret_stack->ret;
}
}
return ret;
}
static struct ftrace_ops graph_ops = {
.func = ftrace_graph_func,
.flags = FTRACE_OPS_GRAPH_STUB,
#ifdef FTRACE_GRAPH_TRAMP_ADDR
.trampoline = FTRACE_GRAPH_TRAMP_ADDR,
#endif
};
void fgraph_init_ops(struct ftrace_ops *dst_ops,
struct ftrace_ops *src_ops)
{
dst_ops->flags = FTRACE_OPS_FL_PID | FTRACE_OPS_GRAPH_STUB;
#ifdef CONFIG_DYNAMIC_FTRACE
if (src_ops) {
dst_ops->func_hash = &src_ops->local_hash;
mutex_init(&dst_ops->local_hash.regex_lock);
INIT_LIST_HEAD(&dst_ops->subop_list);
dst_ops->flags |= FTRACE_OPS_FL_INITIALIZED;
dst_ops->private = src_ops->private;
}
#endif
}
void ftrace_stub_graph(struct ftrace_graph_ret *trace, struct fgraph_ops *gops,
struct ftrace_regs *fregs);
trace_func_graph_ret_t ftrace_graph_return = ftrace_stub_graph;
trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
static int alloc_retstack_tasklist(unsigned long **ret_stack_list)
{
int i;
int ret = 0;
int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
struct task_struct *g, *t;
if (WARN_ON_ONCE(!fgraph_stack_cachep))
return -ENOMEM;
for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
ret_stack_list[i] = kmem_cache_alloc(fgraph_stack_cachep, GFP_KERNEL);
if (!ret_stack_list[i]) {
start = 0;
end = i;
ret = -ENOMEM;
goto free;
}
}
rcu_read_lock();
for_each_process_thread(g, t) {
if (start == end) {
ret = -EAGAIN;
goto unlock;
}
if (t->ret_stack == NULL) {
atomic_set(&t->trace_overrun, 0);
ret_stack_init_task_vars(ret_stack_list[start]);
t->curr_ret_stack = 0;
t->curr_ret_depth = -1;
smp_wmb();
t->ret_stack = ret_stack_list[start++];
}
}
unlock:
rcu_read_unlock();
free:
for (i = start; i < end; i++)
kmem_cache_free(fgraph_stack_cachep, ret_stack_list[i]);
return ret;
}
static void
ftrace_graph_probe_sched_switch(void *ignore, bool preempt,
struct task_struct *prev,
struct task_struct *next,
unsigned int prev_state)
{
unsigned long long timestamp;
if (!fgraph_no_sleep_time)
return;
timestamp = trace_clock_local();
prev->ftrace_timestamp = timestamp;
if (!next->ftrace_timestamp)
return;
next->ftrace_sleeptime += timestamp - next->ftrace_timestamp;
}
static DEFINE_PER_CPU(unsigned long *, idle_ret_stack);
static void
graph_init_task(struct task_struct *t, unsigned long *ret_stack)
{
atomic_set(&t->trace_overrun, 0);
ret_stack_init_task_vars(ret_stack);
t->ftrace_timestamp = 0;
t->curr_ret_stack = 0;
t->curr_ret_depth = -1;
smp_wmb();
t->ret_stack = ret_stack;
}
void ftrace_graph_init_idle_task(struct task_struct *t, int cpu)
{
t->curr_ret_stack = 0;
t->curr_ret_depth = -1;
if (t->ret_stack)
WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu));
if (ftrace_graph_active) {
unsigned long *ret_stack;
if (WARN_ON_ONCE(!fgraph_stack_cachep))
return;
ret_stack = per_cpu(idle_ret_stack, cpu);
if (!ret_stack) {
ret_stack = kmem_cache_alloc(fgraph_stack_cachep, GFP_KERNEL);
if (!ret_stack)
return;
per_cpu(idle_ret_stack, cpu) = ret_stack;
}
graph_init_task(t, ret_stack);
}
}
void ftrace_graph_init_task(struct task_struct *t)
{
t->ret_stack = NULL;
t->curr_ret_stack = 0;
t->curr_ret_depth = -1;
if (ftrace_graph_active) {
unsigned long *ret_stack;
if (WARN_ON_ONCE(!fgraph_stack_cachep))
return;
ret_stack = kmem_cache_alloc(fgraph_stack_cachep, GFP_KERNEL);
if (!ret_stack)
return;
graph_init_task(t, ret_stack);
}
}
void ftrace_graph_exit_task(struct task_struct *t)
{
unsigned long *ret_stack = t->ret_stack;
t->ret_stack = NULL;
barrier();
if (ret_stack) {
if (WARN_ON_ONCE(!fgraph_stack_cachep))
return;
kmem_cache_free(fgraph_stack_cachep, ret_stack);
}
}
#ifdef CONFIG_DYNAMIC_FTRACE
static int fgraph_pid_func(struct ftrace_graph_ent *trace,
struct fgraph_ops *gops,
struct ftrace_regs *fregs)
{
struct trace_array *tr = gops->ops.private;
int pid;
if (tr) {
pid = this_cpu_read(tr->array_buffer.data->ftrace_ignore_pid);
if (pid == FTRACE_PID_IGNORE)
return 0;
if (pid != FTRACE_PID_TRACE &&
pid != current->pid)
return 0;
}
return gops->saved_func(trace, gops, fregs);
}
void fgraph_update_pid_func(void)
{
struct fgraph_ops *gops;
struct ftrace_ops *op;
if (!(graph_ops.flags & FTRACE_OPS_FL_INITIALIZED))
return;
list_for_each_entry(op, &graph_ops.subop_list, list) {
if (op->flags & FTRACE_OPS_FL_PID) {
gops = container_of(op, struct fgraph_ops, ops);
gops->entryfunc = ftrace_pids_enabled(op) ?
fgraph_pid_func : gops->saved_func;
if (ftrace_graph_active == 1)
static_call_update(fgraph_func, gops->entryfunc);
}
}
}
#endif
static int start_graph_tracing(void)
{
unsigned long **ret_stack_list;
int ret, cpu;
ret_stack_list = kcalloc(FTRACE_RETSTACK_ALLOC_SIZE,
sizeof(*ret_stack_list), GFP_KERNEL);
if (!ret_stack_list)
return -ENOMEM;
for_each_online_cpu(cpu) {
if (!idle_task(cpu)->ret_stack)
ftrace_graph_init_idle_task(idle_task(cpu), cpu);
}
do {
ret = alloc_retstack_tasklist(ret_stack_list);
} while (ret == -EAGAIN);
if (!ret) {
ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
if (ret)
pr_info("ftrace_graph: Couldn't activate tracepoint"
" probe to kernel_sched_switch\n");
}
kfree(ret_stack_list);
return ret;
}
static void init_task_vars(int idx)
{
struct task_struct *g, *t;
int cpu;
for_each_online_cpu(cpu) {
if (idle_task(cpu)->ret_stack)
ret_stack_set_task_var(idle_task(cpu), idx, 0);
}
read_lock(&tasklist_lock);
for_each_process_thread(g, t) {
if (t->ret_stack)
ret_stack_set_task_var(t, idx, 0);
}
read_unlock(&tasklist_lock);
}
static void ftrace_graph_enable_direct(bool enable_branch, struct fgraph_ops *gops)
{
trace_func_graph_ent_t func = NULL;
trace_func_graph_ret_t retfunc = NULL;
int i;
if (FGRAPH_NO_DIRECT)
return;
if (gops) {
func = gops->entryfunc;
retfunc = gops->retfunc;
fgraph_direct_gops = gops;
} else {
for_each_set_bit(i, &fgraph_array_bitmask,
sizeof(fgraph_array_bitmask) * BITS_PER_BYTE) {
func = fgraph_array[i]->entryfunc;
retfunc = fgraph_array[i]->retfunc;
fgraph_direct_gops = fgraph_array[i];
}
}
if (WARN_ON_ONCE(!func))
return;
static_call_update(fgraph_func, func);
static_call_update(fgraph_retfunc, retfunc);
if (enable_branch)
static_branch_enable(&fgraph_do_direct);
}
static void ftrace_graph_disable_direct(bool disable_branch)
{
if (FGRAPH_NO_DIRECT)
return;
if (disable_branch)
static_branch_disable(&fgraph_do_direct);
static_call_update(fgraph_func, ftrace_graph_entry_stub);
static_call_update(fgraph_retfunc, ftrace_graph_ret_stub);
fgraph_direct_gops = &fgraph_stub;
}
static int fgraph_cpu_init(unsigned int cpu)
{
if (!idle_task(cpu)->ret_stack)
ftrace_graph_init_idle_task(idle_task(cpu), cpu);
return 0;
}
int register_ftrace_graph(struct fgraph_ops *gops)
{
static bool fgraph_initialized;
int command = 0;
int ret = 0;
int i = -1;
if (WARN_ONCE(gops->ops.flags & FTRACE_OPS_FL_GRAPH,
"function graph ops registered again"))
return -EBUSY;
guard(mutex)(&ftrace_lock);
if (!fgraph_stack_cachep) {
fgraph_stack_cachep = kmem_cache_create("fgraph_stack",
SHADOW_STACK_SIZE,
SHADOW_STACK_SIZE, 0, NULL);
if (!fgraph_stack_cachep)
return -ENOMEM;
}
if (!fgraph_initialized) {
ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "fgraph:online",
fgraph_cpu_init, NULL);
if (ret < 0) {
pr_warn("fgraph: Error to init cpu hotplug support\n");
return ret;
}
fgraph_initialized = true;
ret = 0;
}
if (!fgraph_array[0]) {
for (i = 0; i < FGRAPH_ARRAY_SIZE; i++)
fgraph_array[i] = &fgraph_stub;
fgraph_lru_init();
}
i = fgraph_lru_alloc_index();
if (i < 0 || WARN_ON_ONCE(fgraph_array[i] != &fgraph_stub))
return -ENOSPC;
gops->idx = i;
ftrace_graph_active++;
gops->saved_func = gops->entryfunc;
#ifdef CONFIG_DYNAMIC_FTRACE
if (ftrace_pids_enabled(&gops->ops))
gops->entryfunc = fgraph_pid_func;
#endif
if (ftrace_graph_active == 2)
ftrace_graph_disable_direct(true);
if (ftrace_graph_active == 1) {
ftrace_graph_enable_direct(false, gops);
register_pm_notifier(&ftrace_suspend_notifier);
ret = start_graph_tracing();
if (ret)
goto error;
ftrace_graph_return = return_run;
ftrace_graph_entry = entry_run;
command = FTRACE_START_FUNC_RET;
} else {
init_task_vars(gops->idx);
}
gops->ops.flags |= FTRACE_OPS_FL_GRAPH;
ret = ftrace_startup_subops(&graph_ops, &gops->ops, command);
if (!ret)
fgraph_array[i] = gops;
error:
if (ret) {
ftrace_graph_active--;
gops->saved_func = NULL;
fgraph_lru_release_index(i);
if (!ftrace_graph_active)
unregister_pm_notifier(&ftrace_suspend_notifier);
}
return ret;
}
void unregister_ftrace_graph(struct fgraph_ops *gops)
{
int command = 0;
if (WARN_ONCE(!(gops->ops.flags & FTRACE_OPS_FL_GRAPH),
"function graph ops unregistered without registering"))
return;
guard(mutex)(&ftrace_lock);
if (unlikely(!ftrace_graph_active))
goto out;
if (unlikely(gops->idx < 0 || gops->idx >= FGRAPH_ARRAY_SIZE ||
fgraph_array[gops->idx] != gops))
goto out;
if (fgraph_lru_release_index(gops->idx) < 0)
goto out;
fgraph_array[gops->idx] = &fgraph_stub;
ftrace_graph_active--;
if (!ftrace_graph_active)
command = FTRACE_STOP_FUNC_RET;
ftrace_shutdown_subops(&graph_ops, &gops->ops, command);
if (ftrace_graph_active == 1)
ftrace_graph_enable_direct(true, NULL);
else if (!ftrace_graph_active)
ftrace_graph_disable_direct(false);
if (!ftrace_graph_active) {
ftrace_graph_return = ftrace_stub_graph;
ftrace_graph_entry = ftrace_graph_entry_stub;
unregister_pm_notifier(&ftrace_suspend_notifier);
unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
}
gops->saved_func = NULL;
out:
gops->ops.flags &= ~FTRACE_OPS_FL_GRAPH;
}