nr_node_ids
loongson_sysconf.nr_nodes = nr_node_ids;
if (nr_node_ids >= 8)
if (nid == 0xffff || nid >= nr_node_ids)
if (nr_node_ids == MAX_NUMNODES)
pr_debug("Node to cpumask map for %u nodes\n", nr_node_ids);
if (rc && nr_node_ids > 1) {
ipi_domain = irq_domain_create_linear(fwnode, nr_node_ids,
xive_ipis = kzalloc_objs(*xive_ipis, nr_node_ids,
if (nr_node_ids <= 8) {
NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids);
if (nr_node_ids == MAX_NUMNODES)
for (node = 0; node < nr_node_ids; node++)
pr_debug("Node to cpumask map for %u nodes\n", nr_node_ids);
if ((unsigned)node >= nr_node_ids) {
node, nr_node_ids);
if (nr_node_ids == MAX_NUMNODES)
for (node = 0; node < nr_node_ids; node++) {
pr_debug("Node to cpumask map for %u nodes\n", nr_node_ids);
if (WARN_ON(node < 0 || node >= nr_node_ids))
if (numa_node == nr_node_ids) {
hv_context.hv_numa_map = kzalloc_objs(struct cpumask, nr_node_ids);
int i, num_nodes = nr_node_ids;
if (nr_node_ids > 1)
dev = kzalloc_node(struct_size(dev, descriptor_pools, nr_node_ids),
for (i = 0; i < nr_node_ids; i++) {
if (nr_node_ids > 1 && pcibus_to_node(bus) == NUMA_NO_NODE)
extern unsigned int nr_node_ids;
up->udp_prod_queue = kzalloc_objs(*up->udp_prod_queue, nr_node_ids);
for (int i = 0; i < nr_node_ids; i++)
((unsigned int)numa_node >= nr_node_ids ||
node = (hash >> futex_hashshift) % nr_node_ids;
nr_node_ids, node);
if (nr_node_ids >= max)
sch->global_dsqs = kzalloc_objs(sch->global_dsqs[0], nr_node_ids);
if (unlikely(node >= (int)nr_node_ids ||
return nr_node_ids;
scx_idle_node_masks = kzalloc_objs(*scx_idle_node_masks, nr_node_ids);
if (node < 0 || node >= nr_node_ids) {
return NR_NUMA_HINT_FAULT_TYPES * (s * nr_node_ids + nid) + priv;
nr_node_ids * sizeof(unsigned long);
for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) {
for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
NR_NUMA_HINT_FAULT_BUCKETS * nr_node_ids;
for (i = 0; i < nr_node_ids; i++) {
for (j = 0; j < nr_node_ids; j++) {
masks[i] = kzalloc(nr_node_ids * sizeof(void *), GFP_KERNEL);
for (j = 0; j < nr_node_ids; j++) {
for (j = 0; j < nr_node_ids; j++) {
if (node >= nr_node_ids || hops >= sched_domains_numa_levels)
node = nr_node_ids;
kfree(nna_ar[nr_node_ids]);
nna_ar[nr_node_ids] = NULL;
nna_ar[nr_node_ids] = nna;
wq_size = struct_size(wq, node_nr_active, nr_node_ids + 1);
for (n = 0; n < nr_node_ids; n++) {
alloc_groups_to_nodes(numgrps, numcpus, node_groups, nr_node_ids);
node_groups = kzalloc_objs(struct node_groups, nr_node_ids);
for (i = 0; i < nr_node_ids; i++) {
masks = kzalloc_objs(cpumask_var_t, nr_node_ids);
for (node = 0; node < nr_node_ids; node++) {
for (node = 0; node < nr_node_ids; node++)
if (nid >= 0 && nid < nr_node_ids && node_online(nid)) {
.size = nr_node_ids,
h_cgroup = kzalloc_flex(*h_cgroup, nodeinfo, nr_node_ids);
for (nid = 0; nid < nr_node_ids; nid++) {
buf = kzalloc_objs(*buf, nr_node_ids + nr_node_ids);
root_unstable_tree = buf + nr_node_ids;
ksm_nr_node_ids = knob ? 1 : nr_node_ids;
mlru = kmalloc_flex(*mlru, node, nr_node_ids, gfp);
lru->node = kzalloc_objs(*lru->node, nr_node_ids);
VM_BUG_ON((unsigned int)nid >= nr_node_ids);
memcg_size = struct_size_t(struct mem_cgroup, nodeinfo, nr_node_ids);
node_demotion = kzalloc_objs(struct demotion_nodes, nr_node_ids);
unsigned int nbytes = BITS_TO_LONGS(nr_node_ids) * sizeof(long);
nbytes = BITS_TO_COMPAT_LONGS(nr_node_ids) * sizeof(compat_long_t);
maxnode = nr_node_ids;
if (nmask != NULL && maxnode < nr_node_ids)
new_bw = kcalloc(nr_node_ids, sizeof(unsigned int), GFP_KERNEL);
new_wi_state = kmalloc_flex(*new_wi_state, iw_table, nr_node_ids);
for (i = 0; i < nr_node_ids; i++)
memcpy(new_bw, old_bw, nr_node_ids * sizeof(*old_bw));
weights = kzalloc(nr_node_ids, GFP_KERNEL);
memcpy(weights, state->iw_table, nr_node_ids * sizeof(u8));
for (i = 0; i < nr_node_ids; i++)
new_wi_state = kzalloc_flex(*new_wi_state, iw_table, nr_node_ids);
nr_node_ids * sizeof(u8));
for (i = 0; i < nr_node_ids; i++)
new_wi_state = kzalloc_flex(*new_wi_state, iw_table, nr_node_ids);
for (i = 0; i < nr_node_ids; i++)
nr_node_ids * sizeof(u8));
if (nid < 0 || nid >= nr_node_ids)
for (nid = 0; nid < nr_node_ids; nid++)
if (nid < 0 || nid >= nr_node_ids) {
wi_group = kzalloc_flex(*wi_group, nattrs, nr_node_ids);
nr_node_ids = highest + 1;
unsigned int nr_node_ids __read_mostly = MAX_NUMNODES;
EXPORT_SYMBOL(nr_node_ids);
size *= nr_node_ids;
if (nid < 0 || nid >= nr_node_ids)
count_per_node = kcalloc(nr_node_ids, sizeof(unsigned long), GFP_KERNEL);
for (__node = 0; __node < nr_node_ids; __node++) \
if (nr_node_ids > 1) {
nr_node_ids * sizeof(struct kmem_cache_node *),
nr_cpu_ids, nr_node_ids);
nodes = kcalloc(nr_node_ids, sizeof(unsigned long), GFP_KERNEL);
for (node = 0; node < nr_node_ids; node++) {
memset(counters, 0, nr_node_ids * sizeof(unsigned int));
counters = kmalloc_array(nr_node_ids, sizeof(unsigned int), GFP_KERNEL);
__alloc_percpu(sizeof(struct skb_defer_node) * nr_node_ids,
unsigned int maxpools = nr_node_ids;