preempt_count
int preempt_count; /* 0 => preemptable, <0 => BUG */
.preempt_count = INIT_PREEMPT_COUNT, \
int preempt_count; /* 0 => preemptible, <0 => BUG */
.preempt_count = INIT_PREEMPT_COUNT, \
offsetof(struct thread_info, preempt_count));
int preempt_count; /* 0 => preemptable, <0 => bug */
.preempt_count = INIT_PREEMPT_COUNT, \
DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
task_thread_info(p)->preempt_count = FORK_PREEMPT_COUNT; \
task_thread_info(p)->preempt_count = PREEMPT_DISABLED; \
u64 pc = READ_ONCE(ti->preempt_count);
return !pc || !READ_ONCE(ti->preempt_count);
u64 pc = READ_ONCE(current_thread_info()->preempt_count);
.preempt_count = INIT_PREEMPT_COUNT, \
u64 preempt_count; /* 0 => preemptible, <0 => bug */
DEFINE(TSK_TI_PREEMPT, offsetof(struct task_struct, thread_info.preempt_count));
int preempt_count;
.preempt_count = INIT_PREEMPT_COUNT, \
DEFINE(TINFO_PREEMPT, offsetof(struct thread_info, preempt_count));
int preempt_count; /* 0=>preemptible,<0=>BUG */
.preempt_count = 1, \
int preempt_count; /* 0 => preemptible, <0 => BUG */
.preempt_count = INIT_PREEMPT_COUNT, \
OFFSET(TI_PRE_COUNT, thread_info, preempt_count);
int preempt_count; /* 0 => preemptable, <0 => BUG */
.preempt_count = INIT_PREEMPT_COUNT, \
DEFINE(TINFO_PREEMPT, offsetof(struct thread_info, preempt_count));
__s32 preempt_count; /* 0 => preemptable,< 0 => BUG*/
.preempt_count = INIT_PREEMPT_COUNT, \
DEFINE(TI_PREEMPT_COUNT, offsetof(struct thread_info, preempt_count));
int preempt_count; /* 0 => preemptible, <0 => BUG */
.preempt_count = INIT_PREEMPT_COUNT, \
OFFSET(TI_PRE_COUNT, thread_info, preempt_count);
int preempt_count; /* 0 => preemptable,<0 => BUG */
.preempt_count = INIT_PREEMPT_COUNT, \
OFFSET(TI_PREEMPT_COUNT, thread_info, preempt_count);
__s32 preempt_count; /* 0 => preemptable, <0 => BUG */
.preempt_count = INIT_PREEMPT_COUNT, \
DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
int preempt_count; /* 0=premptable, <0=BUG; will also serve as bh-counter */
.preempt_count = INIT_PREEMPT_COUNT, \
DEFINE(TI_PRE_COUNT, offsetof(struct task_struct, thread_info.preempt_count));
int preempt_count; /* 0 => preemptable,
.preempt_count = INIT_PREEMPT_COUNT, \
if (preempt_count() == 0)
current_thread_info()->preempt_count = HARDIRQ_OFFSET;
int preempt_count; /* 0=>preemptible, <0=>BUG */
.preempt_count = INIT_PREEMPT_COUNT, \
OFFSET(TASK_TI_PREEMPT_COUNT, task_struct, thread_info.preempt_count);
__s32 preempt_count; /* 0x03a8 */
__atomic_add(val, &get_lowcore()->preempt_count);
lc_preempt = offsetof(struct lowcore, preempt_count);
: "=@cc" (cc), "+m" (((struct lowcore *)0)->preempt_count)
return __atomic_add_const_and_test(-1, &get_lowcore()->preempt_count);
return unlikely(READ_ONCE(get_lowcore()->preempt_count) == preempt_offset);
BUILD_BUG_ON(sizeof_field(struct lowcore, preempt_count) != sizeof(int));
lc_preempt = offsetof(struct lowcore, preempt_count);
"m" (((struct lowcore *)0)->preempt_count));
old = READ_ONCE(get_lowcore()->preempt_count);
} while (!arch_try_cmpxchg(&get_lowcore()->preempt_count, &old, new));
__atomic_and(~PREEMPT_NEED_RESCHED, &get_lowcore()->preempt_count);
__atomic_or(PREEMPT_NEED_RESCHED, &get_lowcore()->preempt_count);
return !(READ_ONCE(get_lowcore()->preempt_count) & PREEMPT_NEED_RESCHED);
lc_preempt = offsetof(struct lowcore, preempt_count);
: "+m" (((struct lowcore *)0)->preempt_count)
lc->preempt_count = INIT_PREEMPT_COUNT;
lc->preempt_count = get_lowcore()->preempt_count;
lc->preempt_count = PREEMPT_DISABLED;
lc->preempt_count = PREEMPT_DISABLED;
int preempt_count; /* 0 => preemptable, <0 => BUG */
.preempt_count = INIT_PREEMPT_COUNT, \
DEFINE(TI_PRE_COUNT, offsetof(struct thread_info, preempt_count));
irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
irqctx->tinfo.preempt_count = 0;
irqctx->tinfo.preempt_count =
(irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
(curctx->tinfo.preempt_count & SOFTIRQ_MASK);
int preempt_count; /* 0 => preemptable,
.preempt_count = INIT_PREEMPT_COUNT, \
.preempt_count = INIT_PREEMPT_COUNT, \
int preempt_count; /* 0 => preemptable, <0 => BUG */
TI_PREEMPT != offsetof(struct thread_info, preempt_count) ||
preempt_count) ||
int preempt_count; /* 0 => preemptable,
.preempt_count = INIT_PREEMPT_COUNT, \
__s32 preempt_count; /* 0 => preemptable,< 0 => BUG*/
.preempt_count = INIT_PREEMPT_COUNT, \
OFFSET(TI_PRE_COUNT, thread_info, preempt_count);
return READ_ONCE(current_thread_info()->preempt_count);
return ¤t_thread_info()->preempt_count;
task_thread_info(p)->preempt_count = FORK_PREEMPT_COUNT; \
task_thread_info(p)->preempt_count = PREEMPT_DISABLED; \
return unlikely(preempt_count() == preempt_offset &&
return preempt_count();
WARN_ON_ONCE(!preempt_count() || !irqs_disabled());
WARN_ON_ONCE(!preempt_count() || !irqs_disabled());
(preempt_count() != 0 || \
(preempt_count() == 0 && \
#define nmi_count() (preempt_count() & NMI_MASK)
#define hardirq_count() (preempt_count() & HARDIRQ_MASK)
# define irq_count() ((preempt_count() & (NMI_MASK | HARDIRQ_MASK)) | softirq_count())
# define softirq_count() (preempt_count() & SOFTIRQ_MASK)
# define irq_count() (preempt_count() & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_MASK))
# define in_task() (!((preempt_count() & (NMI_MASK | HARDIRQ_MASK)) | in_serving_softirq()))
# define in_task() (!(preempt_count() & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET)))
#define in_atomic() (preempt_count() != 0)
#define in_atomic_preempt_off() (preempt_count() != PREEMPT_DISABLE_OFFSET)
#define preemptible() (preempt_count() == 0 && !irqs_disabled())
unsigned long pc = preempt_count();
entry->preempt_count = trace_ctx & 0xff;
unsigned char preempt_count;
int count = preempt_count();
if (preempt_count() != count) {
return preempt_count() == 0 && !irqs_disabled() &&
if (!preempt_count()) {
preempt_count());
#define ROEC_ARGS "%s %s: Current %#x To add %#x To remove %#x preempt_count() %#x\n", __func__, s, curstate, new, old, preempt_count()
!(preempt_count() & PREEMPT_MASK), ROEC_ARGS);
(preempt_count() & PREEMPT_MASK), ROEC_ARGS);
if ((preempt_count() & HARDIRQ_MASK) || softirq_count())
if (!preempt_count() &&
return (preempt_count() & PREEMPT_MASK);
if (IS_ENABLED(CONFIG_PREEMPT_COUNT) && (!(preempt_count() & PREEMPT_MASK))) {
if (!(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK)) ||
bool preempt_bh_enabled = !(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK));
(preempt_count() == HARDIRQ_OFFSET))) {
!!(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK));
(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK))) {
if (WARN_ONCE(preempt_count() != 2*PREEMPT_DISABLE_OFFSET,
current->comm, current->pid, preempt_count()))
if (preempt_count() == val) {
if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
if (preempt_count() == val)
if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
!(preempt_count() & PREEMPT_MASK)))
prev->comm, prev->pid, preempt_count());
BUG_ON(preempt_count() || !irqs_disabled());
if (preempt_count() == preempt_offset)
unsigned int nested = preempt_count();
pr_err("preempt_count: %x, expected: %x\n", preempt_count(),
if (preempt_count() > preempt_offset)
if (preempt_count() > 0)
if (preempt_count() == cnt) {
if (preempt_count() == cnt)
prev_count = preempt_count();
if (unlikely(prev_count != preempt_count())) {
prev_count, preempt_count());
WARN_ONCE(preempt_count(),
int count = preempt_count();
if (count != preempt_count()) {
fn, count, preempt_count());
if (preempt_count() != 0 || irqs_disabled()) {
unsigned long pc = preempt_count();
pc = preempt_count();
__common_field(unsigned char, preempt_count);
if (!irqs_disabled_flags(*flags) && !preempt_count())
if (preempt_trace(preempt_count()) || irq_trace())
if (preempt_trace(preempt_count()) || irq_trace())
if (!preempt_trace(preempt_count()) && irq_trace())
if (!preempt_trace(preempt_count()) && irq_trace())
if (preempt_trace(preempt_count()) && !irq_trace())
if (preempt_trace(preempt_count()) && !irq_trace())
if (entry->preempt_count & 0xf)
trace_seq_printf(s, "%x", entry->preempt_count & 0xf);
if (entry->preempt_count & 0xf0)
trace_seq_printf(s, "%x", entry->preempt_count >> 4);
entry->preempt_count & 0xf, iter->idx);
if (preempt_count()) {
if (preempt_count()) {
current->comm, task_pid_nr(current), preempt_count(),
int saved_preempt_count = preempt_count();
if (likely(preempt_count()))
what1, what2, preempt_count() - 1, current->comm, current->pid);
unsigned char preempt_count;
nr_allocated, preempt_count);
nr_allocated, preempt_count);
nr_allocated, preempt_count);
nr_allocated, preempt_count);
nr_allocated, preempt_count);
nr_allocated, preempt_count);
nr_allocated, preempt_count);
nr_allocated, preempt_count);
nr_allocated, preempt_count);
nr_allocated, preempt_count);
int preempt_count;
if (bpf_core_field_exists(pcpu_hot.preempt_count))
bpf_this_cpu_ptr(&pcpu_hot))->preempt_count;
if (skel->bss->preempt_count)
int preempt_count;
preempt_count = get_preempt_count();
int preempt_count;
extern int preempt_count;
#define preempt_disable() uatomic_inc(&preempt_count)
#define preempt_enable() uatomic_dec(&preempt_count)