SECTION_SIZE
#define FDT_FIXED_SIZE (2 * SECTION_SIZE)
#define FDT_VIRT_BASE(physbase) ((void *)(FDT_FIXED_BASE | (physbase) % SECTION_SIZE))
#define SECTION_MASK (~(SECTION_SIZE-1))
if (round_down(base + size, SECTION_SIZE) <
round_up(base, SECTION_SIZE) + SECTION_SIZE)
start = memblock_phys_alloc_range(crash_size, SECTION_SIZE,
size = ALIGN(size, SECTION_SIZE);
omap_secure_memblock_base = arm_memblock_steal(size, SECTION_SIZE);
#define IO_PPSB_SIZE SECTION_SIZE
#define IO_APB_SIZE SECTION_SIZE
if (SECTION_SIZE < PMD_SIZE && pmd_leaf(pmd[1])) {
addr += SECTION_SIZE;
addr += SECTION_SIZE;
if (addr & SECTION_SIZE)
local_flush_tlb_kernel_range(addr, addr + SECTION_SIZE);
if (!IS_ALIGNED(perms[i].start, SECTION_SIZE) ||
!IS_ALIGNED(perms[i].end, SECTION_SIZE)) {
SECTION_SIZE);
addr += SECTION_SIZE)
vm_reserve_area_early(addr, SECTION_SIZE, pmd_empty_section_gap);
if ((addr & ~PMD_MASK) == SECTION_SIZE) {
if ((addr & ~PMD_MASK) == SECTION_SIZE) {
phys_addr_t kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
kernel_sec_end = round_up(__pa(_end), SECTION_SIZE);
if (addr & SECTION_SIZE)
phys += SECTION_SIZE;
} while (pmd++, addr += SECTION_SIZE, addr != end);