PAGES_PER_SECTION
WARN_ON_ONCE(end - start > PAGES_PER_SECTION * sizeof(struct page));
(end - start < PAGES_PER_SECTION * sizeof(struct page)))
for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
if (end - start < PAGES_PER_SECTION * sizeof(struct page))
unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
unsigned long pages_per_block = PAGES_PER_SECTION * sections_per_block;
const unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
rc = virtio_mem_fake_offline(vm, pfn, PAGES_PER_SECTION);
for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
virtio_mem_fake_online(pfn, PAGES_PER_SECTION);
pfn += PAGES_PER_SECTION) {
pfn += PAGES_PER_SECTION) {
chunk_size = max_t(uint64_t, PFN_PHYS(PAGES_PER_SECTION),
PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL);
balloon_hotplug = round_up(credit, PAGES_PER_SECTION);
unsigned int i, alloc_pages = round_up(nr_pages, PAGES_PER_SECTION);
PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL);
return round_up(max_pfn, PAGES_PER_SECTION);
#define PAGE_SECTION_MASK (~(PAGES_PER_SECTION-1))
#define SECTION_ALIGN_UP(pfn) (((pfn) + PAGES_PER_SECTION - 1) & PAGE_SECTION_MASK)
return next_pfn % PAGES_PER_SECTION;
for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
mapsize = sizeof(struct page) * PAGES_PER_SECTION;
PAGES_PER_SECTION);
return section_nr_to_pfn(start_nr) + PAGES_PER_SECTION;
start = min(start, ALIGN(prev_end, PAGES_PER_SECTION));
if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION)) {
free_memmap(prev_end, ALIGN(prev_end, PAGES_PER_SECTION));
online_pages += PAGES_PER_SECTION;
if (nr_pages >= PAGES_PER_SECTION)
online_mem_sections(pfn, ALIGN_DOWN(end_pfn, PAGES_PER_SECTION));
if (nr_pages >= PAGES_PER_SECTION)
offline_mem_sections(pfn, ALIGN_DOWN(end_pfn, PAGES_PER_SECTION));
!IS_ALIGNED(pfn + nr_pages, PAGES_PER_SECTION)))
!IS_ALIGNED(start_pfn + nr_pages, PAGES_PER_SECTION)))
min_align = PAGES_PER_SECTION;
if (!IS_ALIGNED(start_pfn, PAGES_PER_SECTION))
if (!IS_ALIGNED(start_pfn + nr_pages, PAGES_PER_SECTION))
for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
.align = PAGES_PER_SECTION,
.min_chunk = PAGES_PER_SECTION,
spfn = epfn, epfn += PAGES_PER_SECTION) {
if ((nr_initialised > PAGES_PER_SECTION) &&
(pfn & (PAGES_PER_SECTION - 1)) == 0) {
end_pfn = round_up(end_pfn, PAGES_PER_SECTION);
if (pfn_align && pfn_align < PAGES_PER_SECTION) {
unsigned long sect_align_mb = PFN_PHYS(PAGES_PER_SECTION) / SZ_1M;
pfn &= (PAGES_PER_SECTION-1);
table_size = page_ext_size * PAGES_PER_SECTION;
table_size = page_ext_size * PAGES_PER_SECTION;
for (pfn = start; !fail && pfn < end; pfn += PAGES_PER_SECTION)
end = pfn - PAGES_PER_SECTION;
for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
pfn = ALIGN(pfn + 1, PAGES_PER_SECTION)) {
pfns = min(nr_pages, PAGES_PER_SECTION
for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) {
return ALIGN(sizeof(struct page) * PAGES_PER_SECTION, PMD_SIZE);
return PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION);
map = __populate_section_memmap(pfn, PAGES_PER_SECTION,
memmap_boot_pages_add(DIV_ROUND_UP(PAGES_PER_SECTION * sizeof(struct page),
for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION);
PAGES_PER_SECTION), GFP_KERNEL, nid);
nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page))
if (nr_pages < PAGES_PER_SECTION && early_section(ms))
for (pfn = ALIGN(start_pfn, PAGES_PER_SECTION);
pfn < end_pfn; pfn += PAGES_PER_SECTION)