context_tracking
return raw_atomic_read(this_cpu_ptr(&context_tracking.state)) & CT_RCU_WATCHING;
return raw_atomic_add_return(incby, this_cpu_ptr(&context_tracking.state));
struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu);
return __this_cpu_read(context_tracking.nesting);
struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu);
return __this_cpu_read(context_tracking.nmi_nesting);
struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu);
return context_tracking_enabled() && per_cpu(context_tracking.active, cpu);
return context_tracking_enabled() && __this_cpu_read(context_tracking.active);
#define CT_SIZE (sizeof(((struct context_tracking *)0)->state) * BITS_PER_BYTE)
DECLARE_PER_CPU(struct context_tracking, context_tracking);
return raw_atomic_read(this_cpu_ptr(&context_tracking.state)) & CT_STATE_MASK;
return atomic_read(this_cpu_ptr(&context_tracking.state)) & CT_RCU_WATCHING_MASK;
struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu);
#define TRACE_SYSTEM context_tracking
struct context_tracking *ct = this_cpu_ptr(&context_tracking);
struct context_tracking *ct = this_cpu_ptr(&context_tracking);
struct context_tracking *ct = this_cpu_ptr(&context_tracking);
struct context_tracking *ct = this_cpu_ptr(&context_tracking);
DEFINE_PER_CPU(struct context_tracking, context_tracking) = {
EXPORT_SYMBOL_GPL(context_tracking);
recursion = __this_cpu_inc_return(context_tracking.recursion);
__this_cpu_dec(context_tracking.recursion);
__this_cpu_dec(context_tracking.recursion);
struct context_tracking *ct = this_cpu_ptr(&context_tracking);
struct context_tracking *ct = this_cpu_ptr(&context_tracking);
if (!per_cpu(context_tracking.active, cpu)) {
per_cpu(context_tracking.active, cpu) = true;
struct context_tracking *ct = this_cpu_ptr(&context_tracking);
struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu);