cpumask_subset
unlikely(cpumask_subset(cpus, amu_fie_cpus)))
if (unlikely(!cpumask_subset(policy->cpus, amu_fie_cpus)))
if (!cpumask_subset(dest, cpumask_of_node(cd->ciu_node)))
if (!cpumask_subset(effective_mask, cpus_allowed)) {
return cpumask_subset(mm_cpumask(mm),
!cpumask_subset(submask_fn(cpu), cpu_l2_cache_mask(cpu))) {
BUG_ON(!cpumask_subset(cpu_present_mask, cpu_possible_mask));
if (cpumask_subset(*cpu_mask, candidate_mask))
if (!cpumask_subset(cpu_possible_mask, cpumask)) {
if (cpumask_subset(cpu_online_mask, affinity) || cpu >= nr_cpu_ids)
if (!cpumask_subset(irq_data_get_effective_affinity_mask(irqd),
if (cpumask_subset(&cpu_topology[cpu].core_sibling, core_mask)) {
if (cpumask_subset(&cpu_topology[cpu].llc_sibling, core_mask))
cpumask_subset(core_mask, &cpu_topology[cpu].cluster_sibling))
if (cpumask_subset(cpu_coregroup_mask(cpu),
if (!cpumask_subset(mask, cpu_online_mask)) {
if (!cpumask_subset(mask, &ioapic_max_cpumask))
if (!cpumask_subset(iter_mask, req_mask))
if (!cpumask_subset(current->cpus_ptr, &plr->d->hdr.cpu_mask)) {
if (cpumask_subset(mask, allowed_mask))
return cpumask_subset(src1, src2);
return cpumask_subset(p->cpus_allowed, q->cpus_allowed) &&
if (WARN_ON_ONCE(!cpumask_subset(doms[i], cpu_active_mask)))
return (cpumask_subset(parent->effective_cpus, xcpus) &&
if (cpumask_subset(xcpus, isolated_cpus))
!cpumask_subset(new_cpus, housekeeping_cpumask(HK_TYPE_DOMAIN_BOOT)))
cpumask_subset(top_cpuset.effective_cpus, tmp->new_cpus))
WARN_ON_ONCE(!cpumask_subset(cs->effective_xcpus, subpartitions_cpus) &&
WARN_ON_ONCE(!cpumask_subset(cs->effective_xcpus, subpartitions_cpus));
cpumask_subset(top_cpuset.effective_cpus, tmp->addmask))
WARN_ON_ONCE(!cpumask_subset(tmp->new_cpus, parent->effective_cpus));
WARN_ON_ONCE(!cpumask_subset(tmp->new_cpus, parent->effective_cpus));
cpumask_subset(xcpus, parent->effective_xcpus)) {
if (!cpumask_subset(child->effective_xcpus,
cpumask_subset(new_ecpus, child->effective_xcpus))
if (!cpumask_subset(out_mask, top_cpuset.cpus_allowed))
if (cpumask_subset(&new_cpus, subpartitions_cpus)) {
if (is_in_v2_mode() && cpumask_subset(cs_mask, possible_mask)) {
cpumask_subset(sibling->cpus_allowed, trial->exclusive_cpus))
if (cpumask_subset(irq_data_get_effective_affinity_mask(data), hk_mask))
if (!ipimask || WARN_ON(!cpumask_subset(dest, ipimask)))
if (!cpumask_subset(dest, ipimask))
if (!cpumask_subset(dest, cpu_possible_mask)) {
if (!cpumask_subset(rcu_nocb_mask, cpu_possible_mask)) {
if (!kthread && !cpumask_subset(ctx->new_mask, cpu_allowed_mask)) {
if (!cpumask_subset(smt, idle_cpus))
if (!cpumask_subset(cpus, env.dst_grpmask)) {
if (!cpumask_subset(task_rq(p)->rd->span, mask))
if (!cpumask_subset(new_mask, cpus_allowed)) {
if (!cpumask_subset(span, p->cpus_ptr) ||
!cpumask_subset(sched_domain_span(sibling->child),
!cpumask_subset(sched_domain_span(sibling->child), span))
!cpumask_subset(groupmask, sched_domain_span(sd->parent)))
if (!cpumask_subset(sched_domain_span(child),
if (!cpumask_subset(mask, cpu_possible_mask))
if (WARN_ON_ONCE(!cpumask_subset(tmpmask, cpu_online_mask)))
if (cpumask_subset(attrs->__pod_cpumask, pt->pod_cpus[pod])) {