NODE_DATA
NODE_DATA(node)->node_start_pfn = start_pfn;
NODE_DATA(node)->node_spanned_pages = end_pfn - start_pfn;
NODE_DATA(node)->node_start_pfn = start_pfn;
NODE_DATA(node)->node_spanned_pages = end_pfn - start_pfn;
NODE_DATA(nid)->node_id = nid;
NODE_DATA(nid)->node_start_pfn = start_pfn;
NODE_DATA(nid)->node_spanned_pages = spanned_pages;
NODE_DATA(nid) = memblock_alloc_or_panic(sizeof(pg_data_t), 8);
NODE_DATA(0)->node_spanned_pages = memblock_end_of_DRAM() >> PAGE_SHIFT;
NODE_DATA(0)->node_id = 0;
return NODE_DATA(pfn_to_nid(pfn));
NODE_DATA(nid)->node_start_pfn = start_pfn;
NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
NODE_DATA(nid) = memblock_alloc_node(sizeof(struct pglist_data),
if (!NODE_DATA(nid))
NODE_DATA(nid)->node_start_pfn = start_pfn;
NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
NODE_DATA(nid)->node_id = nid;
p = NODE_DATA(nid);
if (NODE_DATA(i)->node_spanned_pages)
register_page_bootmem_info_node(NODE_DATA(i));
if (node_online(node) && NODE_DATA(node) &&
last && last != NODE_DATA(node))
last = NODE_DATA(node);
register_page_bootmem_info_node(NODE_DATA(i));
struct zone *zone = NODE_DATA(nid)->node_zones + z;
NODE_DATA(nid)->node_id = nid;
NODE_DATA(nid)->node_start_pfn = start_pfn;
NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
pg_data_t *pgdat = NODE_DATA(nid);
struct pglist_data *pgdat = NODE_DATA(nid);
struct pglist_data *pgdat = NODE_DATA(nid);
pg_data_t *pgdat = NODE_DATA(nid);
pgdat = NODE_DATA(numa_node_id);
return NODE_DATA(nid)->node_zonelists + gfp_zonelist(flags);
return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)];
return NODE_DATA(page_to_nid(page));
return NODE_DATA(folio_nid(folio));
#define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages)
#define node_spanned_pages(nid) (NODE_DATA(nid)->node_spanned_pages)
#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
#define node_end_pfn(nid) pgdat_end_pfn(NODE_DATA(nid))
zonelist = &NODE_DATA(nid)->node_zonelists[ZONELIST_FALLBACK];
pgdat = NODE_DATA(dst_nid);
ret = compact_node(NODE_DATA(nid), false);
pg_data_t *pgdat = NODE_DATA(nid);
compact_node(NODE_DATA(nid), false);
pg_data_t *pgdat = NODE_DATA(nid);
struct task_struct *kcompactd = NODE_DATA(nid)->kcompactd;
NODE_DATA(nid)->kcompactd = NULL;
lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(goal->nid));
NODE_DATA(nid),
NODE_DATA(nid),
struct pglist_data *pgdata = NODE_DATA(nid);
struct pglist_data *pgdata = NODE_DATA(sc->nid);
NODE_DATA(nid), (char *)NODE_DATA(nid) + nd_size);
struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
&NODE_DATA(dev->id)->mf_stats; \
mf_stats = &NODE_DATA(nid)->mf_stats;
pgdat = NODE_DATA(node);
pgdat = NODE_DATA(node);
pg_data_t *pgdat = NODE_DATA(node);
pgdat = NODE_DATA(node);
return &NODE_DATA(nid)->node_zones[ZONE_MOVABLE];
struct zone *movable_zone = &NODE_DATA(nid)->node_zones[ZONE_MOVABLE];
return &NODE_DATA(nid)->node_zones[ZONE_MOVABLE];
pgdat = NODE_DATA(nid);
pg_data_t *pgdat = NODE_DATA(nid);
struct pglist_data *pgdat = NODE_DATA(nid);
zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK];
zone = &NODE_DATA(nid)->node_zones[ZONE_DEVICE];
memmap_init_zone_device(&NODE_DATA(nid)->node_zones[ZONE_DEVICE],
pg_data_t *pgdat = NODE_DATA(node);
pg_data_t *pgdat = NODE_DATA(node);
zone->zone_pgdat = NODE_DATA(nid);
WARN_ON(pgdat != NODE_DATA(0));
pg_data_t *pgdat = NODE_DATA(nid);
if (!NODE_DATA(nid))
pgdat = NODE_DATA(nid);
kthread_run(deferred_init_memmap, NODE_DATA(nid), "pgdatinit%d", nid);
shuffle_free_memory(NODE_DATA(nid));
pgdat = NODE_DATA(nid);
pg_data_t *pgdat = NODE_DATA(nid);
if (node_online(nid) && pfn >= NODE_DATA(nid)->first_deferred_pfn)
if (end_pfn < pgdat_end_pfn(NODE_DATA(nid)))
if (NODE_DATA(nid)->first_deferred_pfn != ULONG_MAX)
NODE_DATA(nid)->first_deferred_pfn = pfn;
struct pglist_data *node = NODE_DATA(nid);
return NODE_DATA(first_online_node);
return NODE_DATA(nid);
memset(NODE_DATA(nid), 0, sizeof(pg_data_t));
reclaim_throttle(NODE_DATA(numa_node_id()),
z = &NODE_DATA(node)->node_zones[i];
pg_data_t *node = NODE_DATA(node_order[i]);
pg_data_t *pgdat = NODE_DATA(nid);
base = NODE_DATA(page_to_nid(page))->node_page_ext;
nr_pages = NODE_DATA(nid)->node_spanned_pages;
NODE_DATA(nid)->node_page_ext = base;
if (node == NUMA_NO_NODE || !node_online(node) || !NODE_DATA(node)) {
pg_data_t *pgdat = NODE_DATA(nid);
return NODE_DATA(slab_nid(slab));
struct pglist_data *pgdat = NODE_DATA(nid);
NODE_DATA(nid), size);
nr_reclaimed += reclaim_folio_list(&node_folio_list, NODE_DATA(nid));
nr_reclaimed += reclaim_folio_list(&node_folio_list, NODE_DATA(nid));
struct pglist_data *pgdat = NODE_DATA(nid);
struct pglist_data *pgdat = NODE_DATA(nid);
struct pglist_data *pgdat = NODE_DATA(nid);
struct pglist_data *pgdat = NODE_DATA(nid);
pg_data_t *pgdat = NODE_DATA(nid);
pg_data_t *pgdat = NODE_DATA(nid);
ret = user_proactive_reclaim((char *)buf, NULL, NODE_DATA(nid));
struct zone *zones = NODE_DATA(node)->node_zones;
struct zone *zones = NODE_DATA(node)->node_zones;
*pgdat = NODE_DATA(nid);
lruvec = mem_cgroup_lruvec(sc->memcg, NODE_DATA(sc->nid));
struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(sc->nid));