sched_group_span
return sched_group_span(sg);
for_each_cpu(cpu, sched_group_span(sdg)) {
for_each_cpu_and(i, sched_group_span(group), env->cpus) {
for_each_cpu_and(i, sched_group_span(group), p->cpus_ptr) {
if (!cpumask_intersects(sched_group_span(group),
sched_group_span(group));
idlest_cpu = cpumask_first(sched_group_span(idlest));
sched_group_span(local));
local_group = cpumask_test_cpu(env->dst_cpu, sched_group_span(sg));
for_each_cpu_and(i, sched_group_span(group), env->cpus) {
return cpumask_first(sched_group_span(group));
for_each_cpu_and(i, sched_group_span(group), p->cpus_ptr) {
for_each_cpu_wrap(cpu, sched_group_span(sg), target + 1) {
cpumask_andnot(cpus, cpus, sched_group_span(sg));
static inline struct cpumask *sched_group_span(struct sched_group *sg);
for_each_cpu_and(cpu, sched_group_span(group), p->cpus_ptr) {
cpumask_pr_args(sched_group_span(group)));
!cpumask_equal(group_balance_mask(group), sched_group_span(group))) {
sg_span = sched_group_span(sg);
sched_group_span(group))) {
cpumask_copy(sched_group_span(sg), sched_domain_span(child));
cpumask_copy(group_balance_mask(sg), sched_group_span(sg));
cpumask_set_cpu(cpu, sched_group_span(sg));
sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sched_group_span(sg));
cpumask_or(covered, covered, sched_group_span(sg));
sg->group_weight = cpumask_weight(sched_group_span(sg));
cpumask_copy(mask, sched_group_span(sg));
for_each_cpu(cpu, sched_group_span(sg)) {
for_each_cpu(group_cpu, sched_group_span(sg)) {
if (group && !cpumask_test_cpu(cpu, sched_group_span(group))) {
if (cpumask_empty(sched_group_span(group))) {
const struct cpumask *sg_span = sched_group_span(sg);
cpumask_intersects(groupmask, sched_group_span(group))) {
sg_span = sched_group_span(sg);
cpumask_or(groupmask, groupmask, sched_group_span(group));
sg_span = sched_group_span(sg);