Symbol: sched_domain
block/kyber-iosched.c
205
unsigned int sched_domain, unsigned int type)
block/kyber-iosched.c
207
unsigned int *buckets = kqd->latency_buckets[sched_domain][type];
block/kyber-iosched.c
208
atomic_t *cpu_buckets = cpu_latency->buckets[sched_domain][type];
block/kyber-iosched.c
220
unsigned int sched_domain, unsigned int type,
block/kyber-iosched.c
223
unsigned int *buckets = kqd->latency_buckets[sched_domain][type];
block/kyber-iosched.c
236
if (!kqd->latency_timeout[sched_domain])
block/kyber-iosched.c
237
kqd->latency_timeout[sched_domain] = max(jiffies + HZ, 1UL);
block/kyber-iosched.c
239
time_is_after_jiffies(kqd->latency_timeout[sched_domain])) {
block/kyber-iosched.c
242
kqd->latency_timeout[sched_domain] = 0;
block/kyber-iosched.c
250
memset(buckets, 0, sizeof(kqd->latency_buckets[sched_domain][type]));
block/kyber-iosched.c
252
trace_kyber_latency(kqd->dev, kyber_domain_names[sched_domain],
block/kyber-iosched.c
260
unsigned int sched_domain, unsigned int depth)
block/kyber-iosched.c
262
depth = clamp(depth, 1U, kyber_depth[sched_domain]);
block/kyber-iosched.c
263
if (depth != kqd->domain_tokens[sched_domain].sb.depth) {
block/kyber-iosched.c
264
sbitmap_queue_resize(&kqd->domain_tokens[sched_domain], depth);
block/kyber-iosched.c
265
trace_kyber_adjust(kqd->dev, kyber_domain_names[sched_domain],
block/kyber-iosched.c
273
unsigned int sched_domain;
block/kyber-iosched.c
282
for (sched_domain = 0; sched_domain < KYBER_OTHER; sched_domain++) {
block/kyber-iosched.c
283
flush_latency_buckets(kqd, cpu_latency, sched_domain,
block/kyber-iosched.c
285
flush_latency_buckets(kqd, cpu_latency, sched_domain,
block/kyber-iosched.c
295
for (sched_domain = 0; sched_domain < KYBER_OTHER; sched_domain++) {
block/kyber-iosched.c
298
p90 = calculate_percentile(kqd, sched_domain, KYBER_IO_LATENCY,
block/kyber-iosched.c
309
for (sched_domain = 0; sched_domain < KYBER_OTHER; sched_domain++) {
block/kyber-iosched.c
313
p99 = calculate_percentile(kqd, sched_domain,
block/kyber-iosched.c
325
p99 = kqd->domain_p99[sched_domain];
block/kyber-iosched.c
326
kqd->domain_p99[sched_domain] = -1;
block/kyber-iosched.c
328
kqd->domain_p99[sched_domain] = p99;
block/kyber-iosched.c
343
orig_depth = kqd->domain_tokens[sched_domain].sb.depth;
block/kyber-iosched.c
345
kyber_resize_domain(kqd, sched_domain, depth);
block/kyber-iosched.c
536
unsigned int sched_domain;
block/kyber-iosched.c
541
sched_domain = kyber_sched_domain(rq->cmd_flags);
block/kyber-iosched.c
542
sbitmap_queue_clear(&kqd->domain_tokens[sched_domain], nr,
block/kyber-iosched.c
560
unsigned int sched_domain = kyber_sched_domain(bio->bi_opf);
block/kyber-iosched.c
561
struct list_head *rq_list = &kcq->rq_list[sched_domain];
block/kyber-iosched.c
584
unsigned int sched_domain = kyber_sched_domain(rq->cmd_flags);
block/kyber-iosched.c
586
struct list_head *head = &kcq->rq_list[sched_domain];
block/kyber-iosched.c
594
sbitmap_set_bit(&khd->kcq_map[sched_domain],
block/kyber-iosched.c
608
unsigned int sched_domain, unsigned int type,
block/kyber-iosched.c
622
atomic_inc(&cpu_latency->buckets[sched_domain][type][bucket]);
block/kyber-iosched.c
629
unsigned int sched_domain;
block/kyber-iosched.c
632
sched_domain = kyber_sched_domain(rq->cmd_flags);
block/kyber-iosched.c
633
if (sched_domain == KYBER_OTHER)
block/kyber-iosched.c
637
target = kqd->latency_targets[sched_domain];
block/kyber-iosched.c
638
add_latency_sample(cpu_latency, sched_domain, KYBER_TOTAL_LATENCY,
block/kyber-iosched.c
640
add_latency_sample(cpu_latency, sched_domain, KYBER_IO_LATENCY, target,
block/kyber-iosched.c
649
unsigned int sched_domain;
block/kyber-iosched.c
659
list_splice_tail_init(&kcq->rq_list[flush_data->sched_domain],
block/kyber-iosched.c
668
unsigned int sched_domain,
block/kyber-iosched.c
673
.sched_domain = sched_domain,
block/kyber-iosched.c
677
sbitmap_for_each_set(&khd->kcq_map[sched_domain],
block/kyber-iosched.c
696
unsigned int sched_domain = khd->cur_domain;
block/kyber-iosched.c
697
struct sbitmap_queue *domain_tokens = &kqd->domain_tokens[sched_domain];
block/kyber-iosched.c
698
struct sbq_wait *wait = &khd->domain_wait[sched_domain];
block/kyber-iosched.c
711
&khd->wait_index[sched_domain]);
block/kyber-iosched.c
712
khd->domain_ws[sched_domain] = ws;
block/kyber-iosched.c
730
ws = khd->domain_ws[sched_domain];
include/linux/sched/topology.h
153
static inline struct cpumask *sched_domain_span(struct sched_domain *sd)
include/linux/sched/topology.h
173
struct sched_domain *__percpu *sd;
include/linux/sched/topology.h
75
struct sched_domain __rcu *parent; /* top domain must be null terminated */
include/linux/sched/topology.h
76
struct sched_domain __rcu *child; /* bottom domain must be null terminated */
kernel/sched/core.c
1208
struct sched_domain *sd;
kernel/sched/core.c
3602
struct sched_domain *sd;
kernel/sched/core.c
6301
static bool steal_cookie_task(int cpu, struct sched_domain *sd)
kernel/sched/core.c
6322
struct sched_domain *sd;
kernel/sched/deadline.c
2717
struct sched_domain *sd;
kernel/sched/debug.c
667
static void register_sd(struct sched_domain *sd, struct dentry *parent)
kernel/sched/debug.c
720
struct sched_domain *sd;
kernel/sched/ext_idle.c
233
struct sched_domain *sd;
kernel/sched/ext_idle.c
248
struct sched_domain *sd;
kernel/sched/ext_idle.c
263
struct sched_domain *sd;
kernel/sched/ext_idle.c
282
struct sched_domain *sd;
kernel/sched/fair.c
10115
static void update_cpu_capacity(struct sched_domain *sd, int cpu)
kernel/sched/fair.c
10131
void update_group_capacity(struct sched_domain *sd, int cpu)
kernel/sched/fair.c
10133
struct sched_domain *child = sd->child;
kernel/sched/fair.c
10192
check_cpu_capacity(struct rq *rq, struct sched_domain *sd)
kernel/sched/fair.c
10329
static bool sched_use_asym_prio(struct sched_domain *sd, int cpu)
kernel/sched/fair.c
10340
static inline bool sched_asym(struct sched_domain *sd, int dst_cpu, int src_cpu)
kernel/sched/fair.c
10442
sched_reduced_capacity(struct rq *rq, struct sched_domain *sd)
kernel/sched/fair.c
10783
static inline void update_sg_wakeup_stats(struct sched_domain *sd,
kernel/sched/fair.c
10898
sched_balance_find_dst_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
kernel/sched/fair.c
11713
struct sched_domain *sd = env->sd;
kernel/sched/fair.c
11729
struct sched_domain *sd = env->sd;
kernel/sched/fair.c
11824
static void update_lb_imbalance_stat(struct lb_env *env, struct sched_domain *sd,
kernel/sched/fair.c
11866
struct sched_domain *sd, enum cpu_idle_type idle,
kernel/sched/fair.c
11870
struct sched_domain *sd_parent = sd->parent;
kernel/sched/fair.c
12153
get_sd_balance_interval(struct sched_domain *sd, int cpu_busy)
kernel/sched/fair.c
12177
update_next_balance(struct sched_domain *sd, unsigned long *next_balance)
kernel/sched/fair.c
12201
struct sched_domain *sd;
kernel/sched/fair.c
12282
static inline void update_newidle_stats(struct sched_domain *sd, unsigned int success)
kernel/sched/fair.c
12295
update_newidle_cost(struct sched_domain *sd, u64 cost, unsigned int success)
kernel/sched/fair.c
12337
struct sched_domain *sd;
kernel/sched/fair.c
12488
struct sched_domain *sd;
kernel/sched/fair.c
12611
struct sched_domain *sd;
kernel/sched/fair.c
12640
struct sched_domain *sd;
kernel/sched/fair.c
12929
struct sched_domain *sd;
kernel/sched/fair.c
2577
struct sched_domain *sd;
kernel/sched/fair.c
7377
wake_affine_weight(struct sched_domain *sd, struct task_struct *p,
kernel/sched/fair.c
7419
static int wake_affine(struct sched_domain *sd, struct task_struct *p,
kernel/sched/fair.c
7440
sched_balance_find_dst_group(struct sched_domain *sd, struct task_struct *p, int this_cpu);
kernel/sched/fair.c
7502
static inline int sched_balance_find_dst_cpu(struct sched_domain *sd, struct task_struct *p,
kernel/sched/fair.c
7519
struct sched_domain *tmp;
kernel/sched/fair.c
7653
static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target)
kernel/sched/fair.c
7689
static inline int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target)
kernel/sched/fair.c
7701
static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, bool has_idle_core, int target)
kernel/sched/fair.c
7771
select_idle_capacity(struct task_struct *p, struct sched_domain *sd, int target)
kernel/sched/fair.c
7839
struct sched_domain *sd;
kernel/sched/fair.c
8395
struct sched_domain *sd;
kernel/sched/fair.c
8583
struct sched_domain *tmp, *sd = NULL;
kernel/sched/fair.c
9326
struct sched_domain *sd;
kernel/sched/rt.c
1765
struct sched_domain *sd;
kernel/sched/sched.h
1234
struct sched_domain __rcu *sd;
kernel/sched/sched.h
2117
static inline struct sched_domain *highest_flag_domain(int cpu, int flag)
kernel/sched/sched.h
2119
struct sched_domain *sd, *hsd = NULL;
kernel/sched/sched.h
2138
static inline struct sched_domain *lowest_flag_domain(int cpu, int flag)
kernel/sched/sched.h
2140
struct sched_domain *sd;
kernel/sched/sched.h
2150
DECLARE_PER_CPU(struct sched_domain __rcu *, sd_llc);
kernel/sched/sched.h
2155
DECLARE_PER_CPU(struct sched_domain __rcu *, sd_numa);
kernel/sched/sched.h
2156
DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_packing);
kernel/sched/sched.h
2157
DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_cpucapacity);
kernel/sched/sched.h
2794
extern void update_group_capacity(struct sched_domain *sd, int cpu);
kernel/sched/stats.c
118
struct sched_domain *sd;
kernel/sched/topology.c
1004
static struct sched_domain *
kernel/sched/topology.c
1005
find_descended_sibling(struct sched_domain *sd, struct sched_domain *sibling)
kernel/sched/topology.c
1030
build_overlap_sched_groups(struct sched_domain *sd, int cpu)
kernel/sched/topology.c
1036
struct sched_domain *sibling;
kernel/sched/topology.c
1197
struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu);
kernel/sched/topology.c
1198
struct sched_domain *child = sd->child;
kernel/sched/topology.c
1241
build_sched_groups(struct sched_domain *sd, int cpu)
kernel/sched/topology.c
1286
static void init_sched_groups_capacity(int cpu, struct sched_domain *sd)
kernel/sched/topology.c
1332
struct sched_domain *sd;
kernel/sched/topology.c
139
static void sched_domain_debug(struct sched_domain *sd, int cpu)
kernel/sched/topology.c
1516
static void set_domain_attribute(struct sched_domain *sd,
kernel/sched/topology.c
1563
d->sd = alloc_percpu(struct sched_domain *);
kernel/sched/topology.c
1578
static void claim_allocations(int cpu, struct sched_domain *sd)
kernel/sched/topology.c
1636
static struct sched_domain *
kernel/sched/topology.c
1639
struct sched_domain *child, int cpu)
kernel/sched/topology.c
1642
struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu);
kernel/sched/topology.c
1654
*sd = (struct sched_domain){
kernel/sched/topology.c
170
static int sd_degenerate(struct sched_domain *sd)
kernel/sched/topology.c
188
sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
kernel/sched/topology.c
2371
sdd->sd = alloc_percpu(struct sched_domain *);
kernel/sched/topology.c
2388
struct sched_domain *sd;
kernel/sched/topology.c
2393
sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(),
kernel/sched/topology.c
2439
struct sched_domain *sd;
kernel/sched/topology.c
2466
static struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl,
kernel/sched/topology.c
2468
struct sched_domain *child, int cpu)
kernel/sched/topology.c
2470
struct sched_domain *sd = sd_init(tl, cpu_map, child, cpu);
kernel/sched/topology.c
2559
struct sched_domain *sd;
kernel/sched/topology.c
2617
struct sched_domain *child = sd->child;
kernel/sched/topology.c
2621
struct sched_domain __rcu *top_p;
kernel/sched/topology.c
43
static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
kernel/sched/topology.c
624
static void destroy_sched_domain(struct sched_domain *sd)
kernel/sched/topology.c
640
struct sched_domain *sd = container_of(rcu, struct sched_domain, rcu);
kernel/sched/topology.c
643
struct sched_domain *parent = sd->parent;
kernel/sched/topology.c
649
static void destroy_sched_domains(struct sched_domain *sd)
kernel/sched/topology.c
664
DEFINE_PER_CPU(struct sched_domain __rcu *, sd_llc);
kernel/sched/topology.c
669
DEFINE_PER_CPU(struct sched_domain __rcu *, sd_numa);
kernel/sched/topology.c
670
DEFINE_PER_CPU(struct sched_domain __rcu *, sd_asym_packing);
kernel/sched/topology.c
671
DEFINE_PER_CPU(struct sched_domain __rcu *, sd_asym_cpucapacity);
kernel/sched/topology.c
679
struct sched_domain *sd;
kernel/sched/topology.c
721
cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
kernel/sched/topology.c
724
struct sched_domain *tmp;
kernel/sched/topology.c
728
struct sched_domain *parent = tmp->parent;
kernel/sched/topology.c
784
struct sched_domain * __percpu *sd;
kernel/sched/topology.c
916
build_balance_mask(struct sched_domain *sd, struct sched_group *sg, struct cpumask *mask)
kernel/sched/topology.c
920
struct sched_domain *sibling;
kernel/sched/topology.c
953
build_group_from_child_sched_domain(struct sched_domain *sd, int cpu)
kernel/sched/topology.c
976
static void init_overlap_sched_group(struct sched_domain *sd,
tools/testing/selftests/bpf/progs/test_access_variable_array.c
12
struct sched_domain *sd)