raw_cpu_ptr
clockevents_update_freq(raw_cpu_ptr(twd_evt), twd_timer_rate);
if (twd_evt && raw_cpu_ptr(twd_evt) && !IS_ERR(twd_clk))
struct clock_event_device *clk = raw_cpu_ptr(twd_evt);
struct clock_event_device *clk = raw_cpu_ptr(twd_evt);
op(raw_cpu_ptr(&(pcp)), __VA_ARGS__); \
__retval = (typeof(pcp))op(raw_cpu_ptr(&(pcp)), ##args); \
ptr__ = raw_cpu_ptr(&(pcp)); \
#define this_cpu_ptr raw_cpu_ptr
unsigned long low = (unsigned long)raw_cpu_ptr(overflow_stack);
typeof(*raw_cpu_ptr(&(pcp))) __ret; \
__ret = cmpxchg_local(raw_cpu_ptr(&(pcp)), o, n); \
__retval = (typeof(pcp))operation(raw_cpu_ptr(&(pcp)), \
ptr__ = raw_cpu_ptr(&(pcp)); \
ptr__ = raw_cpu_ptr(&(pcp)); \
ptr__ = raw_cpu_ptr(&(pcp)); \
ptr__ = raw_cpu_ptr(&(pcp)); \
ptr__ = raw_cpu_ptr(&(pcp)); \
ptr__ = raw_cpu_ptr(&(pcp)); \
ptr__ = raw_cpu_ptr(&(pcp)); \
desc = raw_cpu_ptr(gdt_page.gdt) + idx;
rdmsr_on_cpu(0, msr_no, raw_cpu_ptr(&msrs->l), raw_cpu_ptr(&msrs->h));
__mcheck_cpu_init_vendor(raw_cpu_ptr(&cpu_info));
if (!mce_available(raw_cpu_ptr(&cpu_info)))
if (!mce_available(raw_cpu_ptr(&cpu_info)))
if (!mce_available(raw_cpu_ptr(&cpu_info)))
if (!mce_available(raw_cpu_ptr(&cpu_info)))
if (!mce_available(raw_cpu_ptr(&cpu_info)))
if (!mce_available(raw_cpu_ptr(&cpu_info)) || !cmci_supported(&banks))
__monitorx(raw_cpu_ptr(&cpu_tss_rw), 0, 0);
struct zcomp_strm *zstrm = raw_cpu_ptr(comp->stream);
crng = raw_cpu_ptr(&crngs);
batch = raw_cpu_ptr(&batched_entropy_##type); \
ppriv = raw_cpu_ptr(priv->ppriv);
struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev->dev);
struct caam_napi *caam_napi = raw_cpu_ptr(&pcpu_qipriv.caam_napi);
struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev->dev);
if (!list_empty(raw_cpu_ptr(clt_path->mp_skip_entry)))
list_add(raw_cpu_ptr(min_path->mp_skip_entry), &it->skip_list);
if (!list_empty(raw_cpu_ptr(clt_path->mp_skip_entry)))
list_add(raw_cpu_ptr(min_path->mp_skip_entry), &it->skip_list);
fq = raw_cpu_ptr(cookie->percpu_fq);
cpu_rcache = raw_cpu_ptr(rcache->cpu_rcaches);
cpu_rcache = raw_cpu_ptr(rcache->cpu_rcaches);
#define gic_data_rdist() (raw_cpu_ptr(gic_rdists->rdist))
ptr = raw_cpu_ptr(gic->saved_ppi_enable);
ptr = raw_cpu_ptr(gic->saved_ppi_active);
ptr = raw_cpu_ptr(gic->saved_ppi_conf);
ptr = raw_cpu_ptr(gic->saved_ppi_enable);
ptr = raw_cpu_ptr(gic->saved_ppi_active);
ptr = raw_cpu_ptr(gic->saved_ppi_conf);
last = raw_cpu_ptr(stats->last);
ac->ac_lg = raw_cpu_ptr(sbi->s_locality_groups);
#define __mmiowb_state() raw_cpu_ptr(&__mmiowb_state)
TYPEOF_UNQUAL(pcp) *__p = raw_cpu_ptr(&(pcp)); \
TYPEOF_UNQUAL(pcp) *__p = raw_cpu_ptr(&(pcp)); \
___ret = READ_ONCE(*raw_cpu_ptr(&(pcp))); \
*raw_cpu_ptr(&(pcp)); \
*raw_cpu_ptr(&(pcp)) op val; \
TYPEOF_UNQUAL(pcp) *__p = raw_cpu_ptr(&(pcp)); \
#define this_cpu_ptr(ptr) raw_cpu_ptr(ptr)
#define this_cpu_ptr(ptr) raw_cpu_ptr(ptr)
*raw_cpu_ptr(sb->alloc_hint) = bitnr;
atomic_long_inc(raw_cpu_ptr(&scp->srcu_locks)); // Y, and implicit RCU reader.
atomic_long_inc(raw_cpu_ptr(&scp->srcu_unlocks)); // Z, and implicit RCU reader.
atomic_long_inc(raw_cpu_ptr(&scp->srcu_locks)); // Y, and implicit RCU reader.
atomic_long_inc(raw_cpu_ptr(&scp->srcu_unlocks)); // Z, and implicit RCU reader.
TYPEOF_UNQUAL(*mib) *ptr = raw_cpu_ptr(mib); \
TYPEOF_UNQUAL(*mib) *ptr = raw_cpu_ptr(mib); \
res = action->handler(irq, raw_cpu_ptr(action->percpu_dev_id));
return in_task() ? ¤t->kcsan_ctx : raw_cpu_ptr(&kcsan_cpu_ctx);
return raw_cpu_ptr(&nbcon_pcpu_emergency_nesting);
sdp = raw_cpu_ptr(ssp->sda);
sdp = raw_cpu_ptr(ssp->sda);
sdp = raw_cpu_ptr(ssp->sda);
struct srcu_ctr *scp = raw_cpu_ptr(scpp);
atomic_long_inc(&raw_cpu_ptr(__srcu_ctr_to_ptr(ssp, idx))->srcu_unlocks);
struct rcu_data *rdp = raw_cpu_ptr(&rcu_data);
#define raw_rq() raw_cpu_ptr(&runqueues)
listeners = raw_cpu_ptr(&listener_array);
cpu_base = raw_cpu_ptr(&hrtimer_bases);
struct hrtimer_cpu_base *cpu_base = raw_cpu_ptr(&hrtimer_bases);
return in_task() ? ¤t->kmsan_ctx : raw_cpu_ptr(&kmsan_percpu_ctx);
vbq = raw_cpu_ptr(&vmap_block_queue);
struct vfree_deferred *p = raw_cpu_ptr(&vfree_deferred);
acomp_ctx = raw_cpu_ptr(pool->acomp_ctx);
p = (struct rtable **)raw_cpu_ptr(nhc->nhc_pcpu_rth_output);
struct uncached_list *ul = raw_cpu_ptr(&rt_uncached_list);
prth = raw_cpu_ptr(nhc->nhc_pcpu_rth_output);
struct uncached_list *ul = raw_cpu_ptr(&rt6_uncached_list);
raw_cpu_ptr((mdev)->stats); \
raw_cpu_ptr((mdev)->stats); \
this_cpu = raw_cpu_ptr(cpu_stats);
this_cpu = raw_cpu_ptr(cpu_stats);
scratch = *raw_cpu_ptr(m->scratch);
scratch = *raw_cpu_ptr(m->scratch);
state_cache_input = raw_cpu_ptr(net->xfrm.state_cache_input);