SZ_4G
#define CRASH_ADDR_LOW_MAX SZ_4G
end = SZ_4G - 1;
if (firmware_has_feature(FW_FEATURE_LPAR) && mce_limit > SZ_4G)
mce_limit = SZ_4G;
res_end = min(window_size, SZ_4G) >> tbl->it_page_shift;
return PAGE_ALIGN(min_t(unsigned long long, rlimit(RLIMIT_STACK), SZ_4G));
# define CRASH_ADDR_LOW_MAX SZ_4G
#define MAX_GAP_END SZ_4G
.mask_lo = (u32)(~(SZ_4G - tolud - 1)) | MTRR_PHYSMASK_V,
return PAGE_ALIGN(min_t(unsigned long long, rlimit(RLIMIT_STACK), SZ_4G));
if (addr && addr < SZ_4G)
info.low_limit = SZ_4G;
return memblock_start_of_DRAM() + SZ_4G;
return (sys < SZ_4G) ? sys : sys - (SZ_4G - top_lm);
(addr >= top_lm && addr < SZ_4G) || addr >= top_hm) {
#define GUC_TOP_RESERVE_SIZE (SZ_4G - GUC_GGTT_TOP)
size = (SZ_4G - 1) & PAGE_MASK;
#define ROGUE_PDSCODEDATA_HEAP_SIZE SZ_4G
#define ROGUE_USCCODE_HEAP_SIZE SZ_4G
return SZ_4G;
return SZ_4G;
#define PFN_4G (SZ_4G >> PAGE_SHIFT)
drm_mm_init(&mmu->mm, SZ_32M >> PAGE_SHIFT, (SZ_4G - SZ_32M) >> PAGE_SHIFT);
0, SZ_4G,
user_va_range = full_va_range > SZ_4G ?
va_range = SZ_4G;
u64 mm_size = SZ_4G;
ggtt_size = SZ_4G - ggtt_start;
#define MTK_IOMMU_IOVA_SZ_4G (SZ_4G - SZ_8M) /* 8M as gap */
{ .iova_base = SZ_4G, .size = SZ_4G * 3}, /* APU VPU */
{ .iova_base = SZ_4G, .size = MTK_IOMMU_IOVA_SZ_4G}, /* 4G ~ 8G */
{ .iova_base = SZ_4G * 2, .size = MTK_IOMMU_IOVA_SZ_4G}, /* 8G ~ 12G */
{ .iova_base = SZ_4G * 3, .size = MTK_IOMMU_IOVA_SZ_4G}, /* 12G ~ 16G */
case SZ_4G:
if (size + cpu_addr >= SZ_4G) {
if (size + cpu_addr >= SZ_4G) {
pci->region_limit = (max << 32) | (SZ_4G - 1);
(pci_offset < SZ_4G && pci_offset > SZ_2G)) {
if (inbound_wins[2].pci_offset >= SZ_4G ||
(inbound_wins[2].size + inbound_wins[2].pci_offset) < SZ_4G)
mc_pcie_setup_inbound_atr(port, 0, 0, 0, SZ_4G);
ret = dma_direct_set_offset(dev, PHYS_OFFSET, 0, SZ_4G);
cs1_top_address = SZ_4G - 1;
cs0_top_address = SZ_4G - 1;
cs1_top_address = SZ_4G - 1;
map->start = SZ_4G;
.mapped_logical= {SZ_4G + SZ_4M}
.physical_start = SZ_4G,
#define CRASH_ADDR_LOW_MAX SZ_4G
if (vm_range > SZ_4G)
if (len > SZ_4G)
return round_up(ret, SZ_4G);
#define KERN_VM_SZ (SZ_4G + GUARD_SZ)
if (WARN_ON_ONCE(vma->vm_end - vma->vm_start > SZ_4G || vma->vm_pgoff))
if (!memblock_bottom_up() && limit >= SZ_4G + size) {
addr = memblock_alloc_range_nid(size, align, SZ_4G, limit,
if (usable_startpfn < PHYS_PFN(SZ_4G)) {
if (gen_pool_add(ndtest_pool, SZ_4G, SZ_4G, NUMA_NO_NODE)) {
if (gen_pool_add(nfit_pool, SZ_4G, SZ_4G, NUMA_NO_NODE)) {
const uint64_t gpa = SZ_4G;
const uint64_t start_gpa = SZ_4G;