sched_domain
unsigned int sched_domain, unsigned int type)
unsigned int *buckets = kqd->latency_buckets[sched_domain][type];
atomic_t *cpu_buckets = cpu_latency->buckets[sched_domain][type];
unsigned int sched_domain, unsigned int type,
unsigned int *buckets = kqd->latency_buckets[sched_domain][type];
if (!kqd->latency_timeout[sched_domain])
kqd->latency_timeout[sched_domain] = max(jiffies + HZ, 1UL);
time_is_after_jiffies(kqd->latency_timeout[sched_domain])) {
kqd->latency_timeout[sched_domain] = 0;
memset(buckets, 0, sizeof(kqd->latency_buckets[sched_domain][type]));
trace_kyber_latency(kqd->dev, kyber_domain_names[sched_domain],
unsigned int sched_domain, unsigned int depth)
depth = clamp(depth, 1U, kyber_depth[sched_domain]);
if (depth != kqd->domain_tokens[sched_domain].sb.depth) {
sbitmap_queue_resize(&kqd->domain_tokens[sched_domain], depth);
trace_kyber_adjust(kqd->dev, kyber_domain_names[sched_domain],
unsigned int sched_domain;
for (sched_domain = 0; sched_domain < KYBER_OTHER; sched_domain++) {
flush_latency_buckets(kqd, cpu_latency, sched_domain,
flush_latency_buckets(kqd, cpu_latency, sched_domain,
for (sched_domain = 0; sched_domain < KYBER_OTHER; sched_domain++) {
p90 = calculate_percentile(kqd, sched_domain, KYBER_IO_LATENCY,
for (sched_domain = 0; sched_domain < KYBER_OTHER; sched_domain++) {
p99 = calculate_percentile(kqd, sched_domain,
p99 = kqd->domain_p99[sched_domain];
kqd->domain_p99[sched_domain] = -1;
kqd->domain_p99[sched_domain] = p99;
orig_depth = kqd->domain_tokens[sched_domain].sb.depth;
kyber_resize_domain(kqd, sched_domain, depth);
unsigned int sched_domain;
sched_domain = kyber_sched_domain(rq->cmd_flags);
sbitmap_queue_clear(&kqd->domain_tokens[sched_domain], nr,
unsigned int sched_domain = kyber_sched_domain(bio->bi_opf);
struct list_head *rq_list = &kcq->rq_list[sched_domain];
unsigned int sched_domain = kyber_sched_domain(rq->cmd_flags);
struct list_head *head = &kcq->rq_list[sched_domain];
sbitmap_set_bit(&khd->kcq_map[sched_domain],
unsigned int sched_domain, unsigned int type,
atomic_inc(&cpu_latency->buckets[sched_domain][type][bucket]);
unsigned int sched_domain;
sched_domain = kyber_sched_domain(rq->cmd_flags);
if (sched_domain == KYBER_OTHER)
target = kqd->latency_targets[sched_domain];
add_latency_sample(cpu_latency, sched_domain, KYBER_TOTAL_LATENCY,
add_latency_sample(cpu_latency, sched_domain, KYBER_IO_LATENCY, target,
unsigned int sched_domain;
list_splice_tail_init(&kcq->rq_list[flush_data->sched_domain],
unsigned int sched_domain,
.sched_domain = sched_domain,
sbitmap_for_each_set(&khd->kcq_map[sched_domain],
unsigned int sched_domain = khd->cur_domain;
struct sbitmap_queue *domain_tokens = &kqd->domain_tokens[sched_domain];
struct sbq_wait *wait = &khd->domain_wait[sched_domain];
&khd->wait_index[sched_domain]);
khd->domain_ws[sched_domain] = ws;
ws = khd->domain_ws[sched_domain];
static inline struct cpumask *sched_domain_span(struct sched_domain *sd)
struct sched_domain *__percpu *sd;
struct sched_domain __rcu *parent; /* top domain must be null terminated */
struct sched_domain __rcu *child; /* bottom domain must be null terminated */
struct sched_domain *sd;
struct sched_domain *sd;
static bool steal_cookie_task(int cpu, struct sched_domain *sd)
struct sched_domain *sd;
struct sched_domain *sd;
static void register_sd(struct sched_domain *sd, struct dentry *parent)
struct sched_domain *sd;
struct sched_domain *sd;
struct sched_domain *sd;
struct sched_domain *sd;
struct sched_domain *sd;
static void update_cpu_capacity(struct sched_domain *sd, int cpu)
void update_group_capacity(struct sched_domain *sd, int cpu)
struct sched_domain *child = sd->child;
check_cpu_capacity(struct rq *rq, struct sched_domain *sd)
static bool sched_use_asym_prio(struct sched_domain *sd, int cpu)
static inline bool sched_asym(struct sched_domain *sd, int dst_cpu, int src_cpu)
sched_reduced_capacity(struct rq *rq, struct sched_domain *sd)
static inline void update_sg_wakeup_stats(struct sched_domain *sd,
sched_balance_find_dst_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
struct sched_domain *sd = env->sd;
struct sched_domain *sd = env->sd;
static void update_lb_imbalance_stat(struct lb_env *env, struct sched_domain *sd,
struct sched_domain *sd, enum cpu_idle_type idle,
struct sched_domain *sd_parent = sd->parent;
get_sd_balance_interval(struct sched_domain *sd, int cpu_busy)
update_next_balance(struct sched_domain *sd, unsigned long *next_balance)
struct sched_domain *sd;
static inline void update_newidle_stats(struct sched_domain *sd, unsigned int success)
update_newidle_cost(struct sched_domain *sd, u64 cost, unsigned int success)
struct sched_domain *sd;
struct sched_domain *sd;
struct sched_domain *sd;
struct sched_domain *sd;
struct sched_domain *sd;
struct sched_domain *sd;
wake_affine_weight(struct sched_domain *sd, struct task_struct *p,
static int wake_affine(struct sched_domain *sd, struct task_struct *p,
sched_balance_find_dst_group(struct sched_domain *sd, struct task_struct *p, int this_cpu);
static inline int sched_balance_find_dst_cpu(struct sched_domain *sd, struct task_struct *p,
struct sched_domain *tmp;
static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target)
static inline int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target)
static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, bool has_idle_core, int target)
select_idle_capacity(struct task_struct *p, struct sched_domain *sd, int target)
struct sched_domain *sd;
struct sched_domain *sd;
struct sched_domain *tmp, *sd = NULL;
struct sched_domain *sd;
struct sched_domain *sd;
struct sched_domain __rcu *sd;
static inline struct sched_domain *highest_flag_domain(int cpu, int flag)
struct sched_domain *sd, *hsd = NULL;
static inline struct sched_domain *lowest_flag_domain(int cpu, int flag)
struct sched_domain *sd;
DECLARE_PER_CPU(struct sched_domain __rcu *, sd_llc);
DECLARE_PER_CPU(struct sched_domain __rcu *, sd_numa);
DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_packing);
DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_cpucapacity);
extern void update_group_capacity(struct sched_domain *sd, int cpu);
struct sched_domain *sd;
static struct sched_domain *
find_descended_sibling(struct sched_domain *sd, struct sched_domain *sibling)
build_overlap_sched_groups(struct sched_domain *sd, int cpu)
struct sched_domain *sibling;
struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu);
struct sched_domain *child = sd->child;
build_sched_groups(struct sched_domain *sd, int cpu)
static void init_sched_groups_capacity(int cpu, struct sched_domain *sd)
struct sched_domain *sd;
static void sched_domain_debug(struct sched_domain *sd, int cpu)
static void set_domain_attribute(struct sched_domain *sd,
d->sd = alloc_percpu(struct sched_domain *);
static void claim_allocations(int cpu, struct sched_domain *sd)
static struct sched_domain *
struct sched_domain *child, int cpu)
struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu);
*sd = (struct sched_domain){
static int sd_degenerate(struct sched_domain *sd)
sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
sdd->sd = alloc_percpu(struct sched_domain *);
struct sched_domain *sd;
sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(),
struct sched_domain *sd;
static struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl,
struct sched_domain *child, int cpu)
struct sched_domain *sd = sd_init(tl, cpu_map, child, cpu);
struct sched_domain *sd;
struct sched_domain *child = sd->child;
struct sched_domain __rcu *top_p;
static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
static void destroy_sched_domain(struct sched_domain *sd)
struct sched_domain *sd = container_of(rcu, struct sched_domain, rcu);
struct sched_domain *parent = sd->parent;
static void destroy_sched_domains(struct sched_domain *sd)
DEFINE_PER_CPU(struct sched_domain __rcu *, sd_llc);
DEFINE_PER_CPU(struct sched_domain __rcu *, sd_numa);
DEFINE_PER_CPU(struct sched_domain __rcu *, sd_asym_packing);
DEFINE_PER_CPU(struct sched_domain __rcu *, sd_asym_cpucapacity);
struct sched_domain *sd;
cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
struct sched_domain *tmp;
struct sched_domain *parent = tmp->parent;
struct sched_domain * __percpu *sd;
build_balance_mask(struct sched_domain *sd, struct sched_group *sg, struct cpumask *mask)
struct sched_domain *sibling;
build_group_from_child_sched_domain(struct sched_domain *sd, int cpu)
static void init_overlap_sched_group(struct sched_domain *sd,
struct sched_domain *sd)