softnet_data
struct softnet_data *rps_ipi_list;
struct softnet_data *rps_ipi_next;
DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
return this_cpu_read(softnet_data.xmit.recursion);
return unlikely(__this_cpu_read(softnet_data.xmit.recursion) >
__this_cpu_inc(softnet_data.xmit.recursion);
__this_cpu_dec(softnet_data.xmit.recursion);
__this_cpu_write(softnet_data.xmit.more, more);
return __this_cpu_read(softnet_data.xmit.more);
static inline u32 rps_input_queue_tail_incr(struct softnet_data *sd)
static inline void rps_input_queue_head_add(struct softnet_data *sd, int val)
static inline void rps_input_queue_head_incr(struct softnet_data *sd)
void xfrm_dev_backlog(struct softnet_data *sd);
static inline void xfrm_dev_backlog(struct softnet_data *sd)
struct softnet_data *sd, *oldsd, *remsd = NULL;
sd = &per_cpu(softnet_data, cpu);
oldsd = &per_cpu(softnet_data, oldcpu);
struct softnet_data *sd = per_cpu_ptr(&softnet_data, cpu);
struct softnet_data *sd = per_cpu_ptr(&softnet_data, cpu);
struct softnet_data *sd = per_cpu_ptr(&softnet_data, cpu);
struct softnet_data *sd = &per_cpu(softnet_data, i);
static inline void backlog_lock_irq_save(struct softnet_data *sd,
static inline void backlog_lock_irq_disable(struct softnet_data *sd)
static inline void backlog_unlock_irq_restore(struct softnet_data *sd,
static inline void backlog_unlock_irq_enable(struct softnet_data *sd)
struct softnet_data *sd;
sd = this_cpu_ptr(&softnet_data);
skb->next = __this_cpu_read(softnet_data.completion_queue);
__this_cpu_write(softnet_data.completion_queue, skb);
return __this_cpu_read(softnet_data.xmit.skip_txqueue);
__this_cpu_write(softnet_data.xmit.skip_txqueue, skip);
DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data) = {
EXPORT_PER_CPU_SYMBOL(softnet_data);
static inline void ____napi_schedule(struct softnet_data *sd,
sd_input_head = READ_ONCE(per_cpu(softnet_data, cpu).input_queue_head);
head = READ_ONCE(per_cpu(softnet_data, next_cpu).input_queue_head);
((int)(READ_ONCE(per_cpu(softnet_data, tcpu).input_queue_head) -
struct softnet_data *sd = data;
struct softnet_data *sd = data;
static void napi_schedule_rps(struct softnet_data *sd)
struct softnet_data *mysd = this_cpu_ptr(&softnet_data);
struct softnet_data *sd = &per_cpu(softnet_data, cpu);
const struct softnet_data *sd;
sd = this_cpu_ptr(&softnet_data);
struct softnet_data *sd;
sd = &per_cpu(softnet_data, cpu);
struct softnet_data *sd = this_cpu_ptr(&softnet_data);
__this_cpu_inc(softnet_data.processed);
struct softnet_data *sd;
sd = this_cpu_ptr(&softnet_data);
local_lock_nested_bh(&softnet_data.process_queue_bh_lock);
local_unlock_nested_bh(&softnet_data.process_queue_bh_lock);
struct softnet_data *sd = &per_cpu(softnet_data, cpu);
static void net_rps_send_ipi(struct softnet_data *remsd)
struct softnet_data *next = remsd->rps_ipi_next;
static void net_rps_action_and_irq_enable(struct softnet_data *sd)
struct softnet_data *remsd = sd->rps_ipi_list;
static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
local_lock_nested_bh(&softnet_data.process_queue_bh_lock);
local_unlock_nested_bh(&softnet_data.process_queue_bh_lock);
local_lock_nested_bh(&softnet_data.process_queue_bh_lock);
local_unlock_nested_bh(&softnet_data.process_queue_bh_lock);
local_lock_nested_bh(&softnet_data.process_queue_bh_lock);
local_unlock_nested_bh(&softnet_data.process_queue_bh_lock);
____napi_schedule(this_cpu_ptr(&softnet_data), n);
____napi_schedule(this_cpu_ptr(&softnet_data), n);
struct softnet_data *sd;
sd = this_cpu_ptr(&softnet_data);
struct softnet_data *sd = this_cpu_ptr(&softnet_data);
sd = &per_cpu(softnet_data, *pos);
struct softnet_data *sd = v;
static u32 softnet_input_pkt_queue_len(struct softnet_data *sd)
static u32 softnet_process_queue_len(struct softnet_data *sd)
static struct softnet_data *softnet_get_online(loff_t *pos)
struct softnet_data *sd = NULL;
struct softnet_data *sd = &get_cpu_var(softnet_data);
put_cpu_var(softnet_data);
struct softnet_data *sd;
sd = &per_cpu(softnet_data, i);
sd = &per_cpu(softnet_data, i);
return this_cpu_ptr(&softnet_data.xmit.nf_dup_skb_recursion);
xmit = this_cpu_ptr(&softnet_data.xmit);
struct softnet_data *sd;
sd = this_cpu_ptr(&softnet_data);
struct softnet_data *sd;
sd = this_cpu_ptr(&softnet_data);
void xfrm_dev_backlog(struct softnet_data *sd)