READ_ONCE
return READ_ONCE(v->counter);
__ret.val = READ_ONCE(*ptr); \
__ret.val = READ_ONCE(*ptr); \
return READ_ONCE(v->counter);
return READ_ONCE(v->counter);
#define smp_load_acquire(p) ({ typeof(*p) _v = READ_ONCE(*p); smp_mb(); _v; })
!!(READ_ONCE(((volatile const unsigned long *)(a))[BIT_WORD(i)]) & BIT_MASK(i))
#define lockless_dereference(p) READ_ONCE(p)
val = READ_ONCE(*(_type *)((_ism)->vaddr + (_off))); \
return !READ_ONCE(h->first);
#define rcu_dereference_rtnl(x) READ_ONCE(x)
container_of(READ_ONCE(ptr), type, member)
((__typeof(*p) *)READ_ONCE(p))
((__typeof(*p) *)READ_ONCE(p))
((__typeof(*p) *)READ_ONCE(p))
return (READ_ONCE(q->next) == (const struct sk_buff *)q);
skb = READ_ONCE(q->prev);
return (READ_ONCE(q->qlen));
((__typeof(*(p)) *)READ_ONCE(p))
file1 = READ_ONCE(*f);
file2 = READ_ONCE(*f);
old = READ_ONCE(napi->state);
new = old = READ_ONCE(napi->state);
new = old = READ_ONCE(napi->state);
READ_ONCE(*(volatile u_int *)&(ts)->tasklet_state)
((READ_ONCE(IRDMA_RING_CURRENT_HEAD(_ring)) + IRDMA_RING_SIZE(_ring) - READ_ONCE(IRDMA_RING_CURRENT_TAIL(_ring))) % IRDMA_RING_SIZE(_ring)) \
} while (!READ_ONCE(cqp_request->request_done) && --cnt);
while (!READ_ONCE(cqp_request->request_done)) {
READ_ONCE(cqp_request->request_done),
watchdog_time = READ_ONCE(ring->watchdog_time);
packets += READ_ONCE(ring->packets);
bytes += READ_ONCE(ring->bytes);
packets += READ_ONCE(ring->packets);
bytes += READ_ONCE(ring->bytes);
READ_ONCE(priv->port_up) == 0)) {
if (READ_ONCE(iq->running) == 0)
if (READ_ONCE(sq->running) == 0)
if (READ_ONCE(sq->running) != 0)
while (READ_ONCE(priv->rl.stats.tx_active_connections) != 0) {
if (sq != NULL && READ_ONCE(sq->running) != 0) {
READ_ONCE(sq->running) == 0)) {
if (sq != NULL && READ_ONCE(sq->running) != 0)
if (likely(READ_ONCE(sq->running) != 0))
ib_uverbs_async_handler(READ_ONCE(file->async_file), 0,
READ_ONCE(uobj->uobject.ufile->async_file);
ib_uverbs_async_handler(READ_ONCE(eobj->uobject.ufile->async_file),
struct ib_ucontext *ucontext = READ_ONCE(ufile->ucontext);