Symbol: nr_node_ids
arch/loongarch/kernel/numa.c
255
loongson_sysconf.nr_nodes = nr_node_ids;
arch/loongarch/kernel/numa.c
72
if (nr_node_ids >= 8)
arch/powerpc/mm/numa.c
183
if (nid == 0xffff || nid >= nr_node_ids)
arch/powerpc/mm/numa.c
78
if (nr_node_ids == MAX_NUMNODES)
arch/powerpc/mm/numa.c
86
pr_debug("Node to cpumask map for %u nodes\n", nr_node_ids);
arch/powerpc/platforms/pseries/hotplug-cpu.c
244
if (rc && nr_node_ids > 1) {
arch/powerpc/sysdev/xive/common.c
1142
ipi_domain = irq_domain_create_linear(fwnode, nr_node_ids,
arch/powerpc/sysdev/xive/common.c
1147
xive_ipis = kzalloc_objs(*xive_ipis, nr_node_ids,
arch/x86/events/intel/uncore_snbep.c
1434
if (nr_node_ids <= 8) {
arch/x86/kernel/setup_percpu.c
118
NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids);
arch/x86/mm/numa.c
115
if (nr_node_ids == MAX_NUMNODES)
arch/x86/mm/numa.c
119
for (node = 0; node < nr_node_ids; node++)
arch/x86/mm/numa.c
123
pr_debug("Node to cpumask map for %u nodes\n", nr_node_ids);
arch/x86/mm/numa.c
419
if ((unsigned)node >= nr_node_ids) {
arch/x86/mm/numa.c
422
node, nr_node_ids);
drivers/base/arch_numa.c
105
if (nr_node_ids == MAX_NUMNODES)
drivers/base/arch_numa.c
109
for (node = 0; node < nr_node_ids; node++) {
drivers/base/arch_numa.c
115
pr_debug("Node to cpumask map for %u nodes\n", nr_node_ids);
drivers/base/arch_numa.c
50
if (WARN_ON(node < 0 || node >= nr_node_ids))
drivers/hv/channel_mgmt.c
771
if (numa_node == nr_node_ids) {
drivers/hv/hv.c
186
hv_context.hv_numa_map = kzalloc_objs(struct cpumask, nr_node_ids);
drivers/infiniband/sw/siw/siw_main.c
124
int i, num_nodes = nr_node_ids;
drivers/net/ethernet/cavium/thunder/nic_main.c
1244
if (nr_node_ids > 1)
drivers/nvme/host/pci.c
3647
dev = kzalloc_node(struct_size(dev, descriptor_pools, nr_node_ids),
drivers/nvme/host/pci.c
621
for (i = 0; i < nr_node_ids; i++) {
drivers/pci/probe.c
1079
if (nr_node_ids > 1 && pcibus_to_node(bus) == NUMA_NO_NODE)
include/linux/nodemask.h
441
extern unsigned int nr_node_ids;
include/net/udp.h
297
up->udp_prod_queue = kzalloc_objs(*up->udp_prod_queue, nr_node_ids);
include/net/udp.h
300
for (int i = 0; i < nr_node_ids; i++)
kernel/bpf/syscall.c
1408
((unsigned int)numa_node >= nr_node_ids ||
kernel/futex/core.c
442
node = (hash >> futex_hashshift) % nr_node_ids;
kernel/futex/core.c
445
nr_node_ids, node);
kernel/futex/futex.h
103
if (nr_node_ids >= max)
kernel/sched/ext.c
4936
sch->global_dsqs = kzalloc_objs(sch->global_dsqs[0], nr_node_ids);
kernel/sched/ext.c
6500
if (unlikely(node >= (int)nr_node_ids ||
kernel/sched/ext.c
7144
return nr_node_ids;
kernel/sched/ext_idle.c
667
scx_idle_node_masks = kzalloc_objs(*scx_idle_node_masks, nr_node_ids);
kernel/sched/ext_idle.c
833
if (node < 0 || node >= nr_node_ids) {
kernel/sched/fair.c
1692
return NR_NUMA_HINT_FAULT_TYPES * (s * nr_node_ids + nid) + priv;
kernel/sched/fair.c
3089
nr_node_ids * sizeof(unsigned long);
kernel/sched/fair.c
3101
for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
kernel/sched/fair.c
3159
for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) {
kernel/sched/fair.c
3202
for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
kernel/sched/fair.c
3217
for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
kernel/sched/fair.c
3253
NR_NUMA_HINT_FAULT_BUCKETS * nr_node_ids;
kernel/sched/topology.c
1844
for (i = 0; i < nr_node_ids; i++) {
kernel/sched/topology.c
1846
for (j = 0; j < nr_node_ids; j++) {
kernel/sched/topology.c
2072
masks[i] = kzalloc(nr_node_ids * sizeof(void *), GFP_KERNEL);
kernel/sched/topology.c
2201
for (j = 0; j < nr_node_ids; j++) {
kernel/sched/topology.c
2218
for (j = 0; j < nr_node_ids; j++) {
kernel/sched/topology.c
2350
if (node >= nr_node_ids || hops >= sched_domains_numa_levels)
kernel/workqueue.c
1578
node = nr_node_ids;
kernel/workqueue.c
4937
kfree(nna_ar[nr_node_ids]);
kernel/workqueue.c
4938
nna_ar[nr_node_ids] = NULL;
kernel/workqueue.c
4971
nna_ar[nr_node_ids] = nna;
kernel/workqueue.c
5777
wq_size = struct_size(wq, node_nr_active, nr_node_ids + 1);
lib/group_cpus.c
237
for (n = 0; n < nr_node_ids; n++) {
lib/group_cpus.c
255
alloc_groups_to_nodes(numgrps, numcpus, node_groups, nr_node_ids);
lib/group_cpus.c
435
node_groups = kzalloc_objs(struct node_groups, nr_node_ids);
lib/group_cpus.c
442
for (i = 0; i < nr_node_ids; i++) {
lib/group_cpus.c
50
masks = kzalloc_objs(cpumask_var_t, nr_node_ids);
lib/group_cpus.c
54
for (node = 0; node < nr_node_ids; node++) {
lib/group_cpus.c
72
for (node = 0; node < nr_node_ids; node++)
mm/compaction.c
3001
if (nid >= 0 && nid < nr_node_ids && node_online(nid)) {
mm/hugetlb.c
3412
.size = nr_node_ids,
mm/hugetlb_cgroup.c
142
h_cgroup = kzalloc_flex(*h_cgroup, nodeinfo, nr_node_ids);
mm/hugetlb_sysfs.c
377
for (nid = 0; nid < nr_node_ids; nid++) {
mm/ksm.c
3589
buf = kzalloc_objs(*buf, nr_node_ids + nr_node_ids);
mm/ksm.c
3595
root_unstable_tree = buf + nr_node_ids;
mm/ksm.c
3602
ksm_nr_node_ids = knob ? 1 : nr_node_ids;
mm/list_lru.c
410
mlru = kmalloc_flex(*mlru, node, nr_node_ids, gfp);
mm/list_lru.c
588
lru->node = kzalloc_objs(*lru->node, nr_node_ids);
mm/memcontrol-v1.c
1769
VM_BUG_ON((unsigned int)nid >= nr_node_ids);
mm/memcontrol.c
5188
memcg_size = struct_size_t(struct mem_cgroup, nodeinfo, nr_node_ids);
mm/memory-tiers.c
915
node_demotion = kzalloc_objs(struct demotion_nodes, nr_node_ids);
mm/mempolicy.c
1698
unsigned int nbytes = BITS_TO_LONGS(nr_node_ids) * sizeof(long);
mm/mempolicy.c
1702
nbytes = BITS_TO_COMPAT_LONGS(nr_node_ids) * sizeof(compat_long_t);
mm/mempolicy.c
1710
maxnode = nr_node_ids;
mm/mempolicy.c
1964
if (nmask != NULL && maxnode < nr_node_ids)
mm/mempolicy.c
228
new_bw = kcalloc(nr_node_ids, sizeof(unsigned int), GFP_KERNEL);
mm/mempolicy.c
232
new_wi_state = kmalloc_flex(*new_wi_state, iw_table, nr_node_ids);
mm/mempolicy.c
238
for (i = 0; i < nr_node_ids; i++)
mm/mempolicy.c
250
memcpy(new_bw, old_bw, nr_node_ids * sizeof(*old_bw));
mm/mempolicy.c
2686
weights = kzalloc(nr_node_ids, GFP_KERNEL);
mm/mempolicy.c
2693
memcpy(weights, state->iw_table, nr_node_ids * sizeof(u8));
mm/mempolicy.c
2697
for (i = 0; i < nr_node_ids; i++)
mm/mempolicy.c
3650
new_wi_state = kzalloc_flex(*new_wi_state, iw_table, nr_node_ids);
mm/mempolicy.c
3659
nr_node_ids * sizeof(u8));
mm/mempolicy.c
3661
for (i = 0; i < nr_node_ids; i++)
mm/mempolicy.c
3702
new_wi_state = kzalloc_flex(*new_wi_state, iw_table, nr_node_ids);
mm/mempolicy.c
3705
for (i = 0; i < nr_node_ids; i++)
mm/mempolicy.c
3720
nr_node_ids * sizeof(u8));
mm/mempolicy.c
3748
if (nid < 0 || nid >= nr_node_ids)
mm/mempolicy.c
3770
for (nid = 0; nid < nr_node_ids; nid++)
mm/mempolicy.c
3816
if (nid < 0 || nid >= nr_node_ids) {
mm/mempolicy.c
3886
wi_group = kzalloc_flex(*wi_group, nattrs, nr_node_ids);
mm/mm_init.c
1774
nr_node_ids = highest + 1;
mm/page_alloc.c
313
unsigned int nr_node_ids __read_mostly = MAX_NUMNODES;
mm/page_alloc.c
315
EXPORT_SYMBOL(nr_node_ids);
mm/shrinker.c
722
size *= nr_node_ids;
mm/shrinker_debug.c
126
if (nid < 0 || nid >= nr_node_ids)
mm/shrinker_debug.c
56
count_per_node = kcalloc(nr_node_ids, sizeof(unsigned long), GFP_KERNEL);
mm/slub.c
466
for (__node = 0; __node < nr_node_ids; __node++) \
mm/slub.c
8017
if (nr_node_ids > 1) {
mm/slub.c
8395
nr_node_ids * sizeof(struct kmem_cache_node *),
mm/slub.c
8416
nr_cpu_ids, nr_node_ids);
mm/slub.c
8815
nodes = kcalloc(nr_node_ids, sizeof(unsigned long), GFP_KERNEL);
mm/slub.c
8865
for (node = 0; node < nr_node_ids; node++) {
mm/vmalloc.c
5191
memset(counters, 0, nr_node_ids * sizeof(unsigned int));
mm/vmalloc.c
5224
counters = kmalloc_array(nr_node_ids, sizeof(unsigned int), GFP_KERNEL);
net/core/dev.c
13274
__alloc_percpu(sizeof(struct skb_defer_node) * nr_node_ids,
net/sunrpc/svc.c
261
unsigned int maxpools = nr_node_ids;