Symbol: vcpu_is_preempted
arch/arm64/include/asm/spinlock.h
21
#define vcpu_is_preempted vcpu_is_preempted
arch/loongarch/include/asm/qspinlock.h
37
#define vcpu_is_preempted vcpu_is_preempted
arch/loongarch/include/asm/qspinlock.h
39
bool vcpu_is_preempted(int cpu);
arch/loongarch/kernel/paravirt.c
274
EXPORT_SYMBOL(vcpu_is_preempted);
arch/powerpc/include/asm/paravirt.h
126
#define vcpu_is_preempted vcpu_is_preempted
arch/powerpc/lib/qspinlock.c
372
if (vcpu_is_preempted(owner))
arch/powerpc/lib/qspinlock.c
392
if (node->sleepy || vcpu_is_preempted(prev_cpu)) {
arch/powerpc/lib/qspinlock.c
404
if (vcpu_is_preempted(get_owner_cpu(val)))
arch/powerpc/lib/qspinlock.c
692
if (vcpu_is_preempted(next_cpu))
arch/s390/kvm/diag.c
211
if (!vcpu_is_preempted(tcpu_cpu))
arch/x86/hyperv/hv_spinlock.c
85
pv_ops_lock.vcpu_is_preempted = PV_CALLEE_SAVE(hv_vcpu_is_preempted);
arch/x86/include/asm/paravirt-spinlock.h
20
struct paravirt_callee_save vcpu_is_preempted;
arch/x86/include/asm/paravirt-spinlock.h
47
return PVOP_ALT_CALLEE1(bool, pv_ops_lock, vcpu_is_preempted, cpu,
arch/x86/include/asm/paravirt-spinlock.h
75
#define vcpu_is_preempted vcpu_is_preempted
arch/x86/kernel/kvm.c
658
if (!idle_cpu(cpu) && vcpu_is_preempted(cpu)) {
arch/x86/kernel/kvm.c
846
pv_ops_lock.vcpu_is_preempted =
arch/x86/kernel/paravirt-spinlocks.c
42
return pv_ops_lock.vcpu_is_preempted.func ==
arch/x86/kernel/paravirt-spinlocks.c
60
.vcpu_is_preempted = PV_CALLEE_SAVE(__native_vcpu_is_preempted),
arch/x86/xen/spinlock.c
142
pv_ops_lock.vcpu_is_preempted = PV_CALLEE_SAVE(xen_vcpu_stolen);
include/linux/sched.h
2294
#ifndef vcpu_is_preempted
include/linux/sched.h
2314
return READ_ONCE(owner->on_cpu) && !vcpu_is_preempted(task_cpu(owner));
kernel/locking/osq_lock.c
147
vcpu_is_preempted(node_cpu(node->prev))))
kernel/sched/sched.h
1412
if (vcpu_is_preempted(cpu))