cpu_possible_mask
num_online_cpus(), cpumask_bits(cpu_possible_mask)[0]);
init_cpu_present(cpu_possible_mask);
init_cpu_present(cpu_possible_mask);
*((u32 *)cpu_possible_mask);
return __task_cpu_possible_mask(p, cpu_possible_mask);
return cpu_possible_mask;
cpumask_copy(kvm->arch.supported_cpus, cpu_possible_mask);
cpu = find_first_zero_bit(cpumask_bits(cpu_possible_mask), NR_CPUS);
init_cpu_present(cpu_possible_mask);
.cpumask = cpu_possible_mask,
mips_smp_ipi_allocate(cpu_possible_mask);
init_cpu_present(cpu_possible_mask);
cpumask_copy(&cpu_coherent_mask, cpu_possible_mask);
init_cpu_present(cpu_possible_mask);
fdh->cpu_mask = *cpu_possible_mask;
BUG_ON(!cpumask_subset(cpu_present_mask, cpu_possible_mask));
cpumask_xor(candidate_mask, cpu_possible_mask, cpu_present_mask);
if (!cpumask_subset(cpu_possible_mask, cpumask)) {
return cpu_possible_mask;
cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask);
init_cpu_present(cpu_possible_mask);
return *cpu_possible_mask;
ce->cpumask = cpu_possible_mask;
return blk_mq_num_queues(cpu_possible_mask, max_queues);
i = cpumask_first(cpu_possible_mask);
ps = per_cpu_ptr(streams, cpumask_first(cpu_possible_mask));
unsigned int i = cpumask_first(cpu_possible_mask);
scratch = per_cpu_ptr(&scomp_scratch, cpumask_first(cpu_possible_mask));
cpumask_copy(cpus_to_visit, cpu_possible_mask);
cpu_node, cpumask_pr_args(cpu_possible_mask));
cpumask_andnot(offline, cpu_possible_mask, cpu_online_mask);
cpumask_andnot(isolated, cpu_possible_mask,
return mask ?: cpu_possible_mask;
.cpumask = cpu_possible_mask,
dw_ced->ced.cpumask = cpu < 0 ? cpu_possible_mask : cpumask_of(cpu);
ced->cpumask = cpu_possible_mask;
ce->clkevt.cpumask = cpu_possible_mask;
ced->cpumask = cpu_possible_mask;
ced->cpumask = cpu_possible_mask;
ced->cpumask = cpu_possible_mask;
ttcce->ce.cpumask = cpu_possible_mask;
dc_timer_dev.ce.cpumask = cpu_possible_mask;
.cpumask = cpu_possible_mask,
to_sysctr.clkevt.cpumask = cpu_possible_mask;
.cpumask = cpu_possible_mask,
event_dev->cpumask = cpu_possible_mask;
const unsigned long *irq_mask = cpumask_bits(cpu_possible_mask);
.cpumask = cpu_possible_mask,
.cpumask = cpu_possible_mask,
meson6_clockevent.cpumask = cpu_possible_mask;
.cpumask = cpu_possible_mask,
msc313e_clkevt.cpumask = cpu_possible_mask;
.cpumask = cpu_possible_mask,
ce->cpumask = cpu_possible_mask;
evt->cpumask = cpu_possible_mask;
.cpumask = cpu_possible_mask,
priv->clkevt.cpumask = cpu_possible_mask;
.cpumask = cpu_possible_mask,
ce->clkevt.cpumask = cpu_possible_mask;
cpu_possible_mask, "clockevent",
timer->clkevt.cpumask = cpu_possible_mask;
cpumask_copy(&cpus, cpu_possible_mask);
cpu_possible_mask);
of_cpu_device_node_get(cpumask_first(cpu_possible_mask));
return cpuidle_register(&tegra_idle_driver, cpu_possible_mask);
for_each_cpu(lcpu, cpu_possible_mask) {
drv->cpumask = (struct cpumask *)cpu_possible_mask;
cpumask = drv->cpumask ? : cpu_possible_mask;
mask = cpu_possible_mask;
cpumask_equal(&cpus_booted_once_mask, cpu_possible_mask))
mask = cpu_possible_mask;
cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask);
return cpu_possible_mask;
cpumask_copy(&cspmu->associated_cpus, cpu_possible_mask);
cpu_possible_mask; /* ACPI */
if (!cpumask_equal(&msc->accessibility, cpu_possible_mask)) {
cpumask_copy(&msc->accessibility, cpu_possible_mask);
cpumask_copy(&fake_msc.accessibility, cpu_possible_mask);
phba->sli4_hba.num_possible_cpu = cpumask_last(cpu_possible_mask) + 1;
if (cpumask_equal(&portal_cpus, cpu_possible_mask)) {
for (*cpu = cpumask_next(*cpu, cpu_possible_mask); *cpu < nr_cpu_ids;
*cpu = cpumask_next(*cpu, cpu_possible_mask)) {
#define for_each_possible_cpu(cpu) for_each_cpu((cpu), cpu_possible_mask)
for_each_cpu_wrap((cpu), cpu_possible_mask, (start))
return cpumask_test_cpu(cpu, cpu_possible_mask);
# define task_cpu_possible_mask(p) cpu_possible_mask
return cpu_possible_mask;
return cpu_possible_mask;
steal = cpumask_next_wrap(steal, cpu_possible_mask);
for_each_cpu_wrap(cpu, cpu_possible_mask, raw_smp_processor_id()) {
for_each_cpu_wrap(cpu, cpu_possible_mask, raw_smp_processor_id()) {
cpumask_copy(top_cpuset.cpus_allowed, cpu_possible_mask);
cpumask_copy(top_cpuset.effective_xcpus, cpu_possible_mask);
cpumask_andnot(isolated_cpus, cpu_possible_mask,
return cpu_possible_mask;
set_vecs = cpumask_weight(cpu_possible_mask);
if (!cpumask_subset(dest, cpu_possible_mask)) {
return kstat_irqs_desc(desc, cpu_possible_mask);
kthread_bind_mask(t, cpu_possible_mask);
affinity = cpu_possible_mask;
if (!cpumask_equal(affinity, cpu_possible_mask))
(!affinity || cpumask_equal(affinity, cpu_possible_mask)))
cpumask_copy(pinst->cpumask.pcpu, cpu_possible_mask);
cpumask_copy(pinst->cpumask.cbcpu, cpu_possible_mask);
(cpu) = cpumask_next((rnp)->grplo - 1, cpu_possible_mask); \
(cpu) = cpumask_next((cpu), cpu_possible_mask))
chosen_cpu = cpumask_next(ideal_cpu - 1, cpu_possible_mask);
cpumask = cpu_possible_mask;
if (!cpumask_subset(rcu_nocb_mask, cpu_possible_mask)) {
cpumask_and(rcu_nocb_mask, cpu_possible_mask,
for_each_cpu_andnot(cpu, cpu_possible_mask, cpu_online_mask)
cpumask_copy(sd_sysctl_cpus, cpu_possible_mask);
cpu_possible_mask)) {
return cpu_possible_mask;
cpu_possible_mask, non_housekeeping_mask);
mask = cpu_possible_mask;
return cpu_possible_mask; /* &init_task.cpus_mask */
for_each_cpu_and(cpu, cpu_possible_mask, housekeeping_cpumask(HK_TYPE_DOMAIN))
set_nr_cpu_ids(find_last_bit(cpumask_bits(cpu_possible_mask), NR_CPUS) + 1);
if (!cpumask_subset(mask, cpu_possible_mask))
dev->cpumask = cpu_possible_mask;
.cpumask = cpu_possible_mask,
cpumask_andnot(cpumask, cpu_possible_mask, housekeeping_cpumask(HK_TYPE_DOMAIN));
cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
cpumask_and(&watchdog_cpumask, &watchdog_cpumask, cpu_possible_mask);
WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, cpu_possible_mask) < 0);
cpumask_copy(attrs->cpumask, cpu_possible_mask);
cpumask_copy(attrs->cpumask, cpu_possible_mask);
cpumask_and(new_attrs->cpumask, new_attrs->cpumask, cpu_possible_mask);
kthread_bind_mask(rescuer->task, cpu_possible_mask);
cpumask_andnot(wq_isolated_cpumask, cpu_possible_mask, hk);
cpumask_and(cpumask, cpumask, cpu_possible_mask);
cpumask_copy(wq_unbound_cpumask, cpu_possible_mask);
cpumask_andnot(wq_isolated_cpumask, cpu_possible_mask,
cpumask_copy(pt->pod_cpus[0], cpu_possible_mask);
cpumask_andnot(npresmsk, cpu_possible_mask, npresmsk);
cpumask_copy(&masks[0], cpu_possible_mask);
MASK_MSG(cpu_possible_mask));
EXPECT_FOR_EACH_CPU_OP_EQ(test, and, cpu_possible_mask, &mask_empty);
EXPECT_FOR_EACH_CPU_EQ(test, cpu_possible_mask);
EXPECT_FOR_EACH_CPU_WRAP_EQ(test, cpu_possible_mask);
EXPECT_FOR_EACH_CPU_OP_EQ(test, and, cpu_possible_mask, cpu_possible_mask);
EXPECT_FOR_EACH_CPU_OP_EQ(test, andnot, cpu_possible_mask, &mask_empty);
KUNIT_EXPECT_EQ_MSG(test, nr_cpu_ids, cpumask_weight(cpu_possible_mask),
MASK_MSG(cpu_possible_mask));
KUNIT_EXPECT_EQ_MSG(test, 0, cpumask_first(cpu_possible_mask), MASK_MSG(cpu_possible_mask));
KUNIT_EXPECT_LE_MSG(test, nr_cpu_ids, cpumask_first_zero(cpu_possible_mask),
MASK_MSG(cpu_possible_mask));
KUNIT_EXPECT_EQ_MSG(test, nr_cpu_ids - 1, cpumask_last(cpu_possible_mask),
MASK_MSG(cpu_possible_mask));
KUNIT_EXPECT_LE_MSG(test, nr_cpu_ids, cpumask_next_zero(-1, cpu_possible_mask),
MASK_MSG(cpu_possible_mask));
KUNIT_EXPECT_EQ_MSG(test, 0, cpumask_next(-1, cpu_possible_mask),
cpumask_pr_args(cpu_possible_mask)); \
cpumask_copy(&mask, cpu_possible_mask);
index = cpumask_next(index, cpu_possible_mask);