SZ_4M
return round_down(image_addr, SZ_4M) + SZ_512M;
#define L4_34XX_SIZE SZ_4M /* 1MB of 128MB used, want 1MB sect */
#define L4_WK_AM33XX_SIZE SZ_4M /* 1MB of 128MB used, want 1MB sect */
#define L4_44XX_SIZE SZ_4M
#define L4_PER_44XX_SIZE SZ_4M
#define L4_54XX_SIZE SZ_4M
#define L4_PER_54XX_SIZE SZ_4M
.size = SZ_4M,
.offset = SZ_4M,
.offset = SZ_4M + SZ_64M,
.size = SZ_256M - (SZ_4M + SZ_64M),
.size = SZ_4M,
.size = SZ_4M,
BUG_ON(offset < -SZ_4M || offset >= SZ_4M);
return SZ_4M / SZ_4K;
#define MODULES_BASE (MODULES_VADDR & ~(UL(SZ_4M) - 1))
#define USER_TOP (MODULES_BASE - SZ_4M)
#define USER_TOP (MODULES_BASE - SZ_4M)
for (k_cur = k_start; k_cur != k_end; k_cur = k_next, pmd++, block += SZ_4M) {
__set_huge_pte_at(pmdp, pte_offset_kernel(pmdp + 1, 0), pte_val(pte) + SZ_4M);
return IS_ENABLED(CONFIG_64BIT) ? SZ_2M : SZ_4M;
.size = SZ_4M,
.end = SZ_4M - 1,
.end = SZ_4M - 1,
.end = 0xfc800000 + SZ_4M - 1,
CACHE_ENTRY(0x29, CACHE_L3, SZ_4M ), /* 8-way set assoc, sectored cache, 64 byte line size */
CACHE_ENTRY(0x46, CACHE_L3, SZ_4M ), /* 4-way set assoc, 64 byte line size */
CACHE_ENTRY(0x49, CACHE_L3, SZ_4M ), /* 16-way set assoc, 64 byte line size */
CACHE_ENTRY(0xd8, CACHE_L3, SZ_4M ), /* 12-way set assoc, 64 byte line size */
CACHE_ENTRY(0xdd, CACHE_L3, SZ_4M ), /* 12-way set assoc, 64 byte line size */
CACHE_ENTRY(0xe3, CACHE_L3, SZ_4M ), /* 16-way set assoc, 64 byte line size */
max_gap_size = SZ_4M;
#define BLK_DEF_MAX_SECTORS_CAP (SZ_4M >> SECTOR_SHIFT)
if (telemetry_data_sz > SZ_4M) {
size_t pool_chunk_size = SZ_4M;
SZ_4M);
#define SE_MAX_MEM_ALLOC SZ_4M
if (d->residue >= SZ_4M) {
if (period_len >= SZ_4M)
#define MAX_SHM_MEM_SZ SZ_4M
vram_size -= SZ_4M;
#define ETNAVIV_SOFTPIN_START_ADDRESS SZ_4M /* must be >= SUBALLOC_SIZE */
obj = i915_gem_object_create_lmem(i915, SZ_4M, 0);
sz = i915_prandom_u32_max_state(SZ_4M, &prng);
SZ_4M,
return (GRAPHICS_VER(i915) < 8) ? SZ_4M : SZ_16M;
.lmem_size = SZ_4M,
return i915_gem_object_create_internal(gt->i915, SZ_4M);
err = gsc_allocate_and_map_vma(gsc, SZ_4M);
SZ_4M,
.gmem = SZ_4M,
#define XE_REG_ADDR_MAX SZ_4M
big = xe_bo_create_pin_map(xe, tile, m->q->vm, SZ_4M,
sys_bo = xe_bo_create_user(xe, NULL, SZ_4M,
ccs_bo = xe_bo_create_user(xe, NULL, SZ_4M,
vram_bo = xe_bo_create_user(xe, NULL, SZ_4M,
bo = xe_managed_bo_create_pin_map(xe, tile, SZ_4M,
#define MAX_CCS_LIMITED_TRANSFER SZ_4M /* XE_PAGE_SIZE * (FIELD_MAX(XE2_CCS_SIZE_MASK) + 1) */
xe_mmio_init(&root_tile->mmio, root_tile, xe->mmio.regs, SZ_4M);
xe_mmio_init(&tile->mmio, tile, xe->mmio.regs + id * tile_mmio_size, SZ_4M);
#define DGFX_WOPCM_SIZE SZ_4M
#define MTL_WOPCM_SIZE SZ_4M
#define HISI_PTT_TRACE_BUF_SIZE SZ_4M
caps->page_size_cap |= (SZ_4M | SZ_1G | SZ_2G);
#define M2701_IOMMU_PGT_SIZE SZ_4M
#define VPU_EXT_D_SIZE SZ_4M
#define FIMC_IS_FW_SIZE_MAX (SZ_4M)
unsigned long mem_size = SZ_4M;
ret = aperture_remove_conflicting_devices(base, SZ_4M, "simple-framebuffer");
DEFINE_RES_MEM_NAMED(LS2K_DISPLAY_RES_START, SZ_4M, "simpledrm-res"),
max_buf = SZ_4M;
SZ_2M / 512, SZ_4M / 512, SZ_8M / 512, (SZ_8M + SZ_4M) / 512,
SZ_16K, SZ_8K, SZ_4M, 0, 6, 1280, NAND_ECC_INFO(40, SZ_1K) },
SZ_16K, SZ_8K, SZ_4M, 0, 6, 1280, NAND_ECC_INFO(40, SZ_1K) },
SZ_16K, SZ_8K, SZ_4M, NAND_NEED_SCRAMBLING, 6, 1664,
.size = SZ_4M,
.size = SZ_4M,
.size = SZ_4M,
.size = SZ_4M,
.size = SZ_4M,
.size = SZ_4M,
.size = SZ_4M,
.size = SZ_4M,
.size = SZ_4M,
.size = SZ_4M,
.size = SZ_4M,
.size = SZ_4M,
.size = SZ_4M,
.size = SZ_4M,
.size = SZ_4M,
.size = SZ_4M,
.size = SZ_4M,
.size = SZ_4M,
.size = SZ_4M,
.size = SZ_4M,
.size = SZ_4M,
.size = SZ_4M,
.size = SZ_4M,
.size = SZ_4M,
.size = SZ_4M,
.size = SZ_4M,
.size = SZ_4M,
.size = SZ_4M,
.size = SZ_4M,
.size = SZ_4M,
.size = SZ_4M,
.size = SZ_4M,
.size = SZ_4M,
.size = SZ_4M,
.size = SZ_4M,
#define HINIC_DB_SIZE SZ_4M
case SZ_4M:
case SZ_4M:
ispi->chip0_size = SZ_4M;
#define NXP_FSPI_MIN_IOMAP SZ_4M
#define NXP_XSPI_MIN_IOMAP SZ_4M
#define MAX_INBOUND_BUFFER_SIZE SZ_4M
musb_dma->max_len = SZ_4M;
(unsigned long)PTR_ALIGN(guest_mappings[i], SZ_4M);
size = PAGE_ALIGN(req->hypervisor_size) + SZ_4M;
!size || size & 2047 || size > SZ_4M)
if (len > SZ_4M / sizeof(struct adfs_bigdirentry) ||
sb->s_bdi->ra_pages = max(sb->s_bdi->ra_pages, SZ_4M / PAGE_SIZE);
thresh = SZ_4M;
btrfs_set_extent_bit(&tree, SZ_1M, SZ_4M - 1,
if (start != SZ_4M || end != SZ_32M - 1) {
if (start != SZ_4M || end != SZ_32M - 1) {
.physical_start = SZ_64M - SZ_4M,
{SZ_64M - SZ_4M, SZ_64M - SZ_4M + SZ_256M},
.mapped_logical= {SZ_4G + SZ_4M}
ret = btrfs_remove_free_space(cache, 0, SZ_4M);
if (test_check_exists(cache, 0, SZ_4M)) {
ret = test_add_free_space_entry(cache, 0, SZ_4M, 1);
SZ_4M, 1);
ret = test_add_free_space_entry(cache, SZ_4M, SZ_1M, 1);
ret = btrfs_remove_free_space(cache, SZ_4M, SZ_1M);
if (test_check_exists(cache, SZ_4M, SZ_1M)) {
ret = test_add_free_space_entry(cache, SZ_1M, SZ_4M, 1);
ret = test_add_free_space_entry(cache, SZ_4M, SZ_4M, 1);
ret = btrfs_remove_free_space(cache, 3 * SZ_1M, SZ_4M);
if (test_check_exists(cache, 3 * SZ_1M, SZ_4M)) {
ret = btrfs_add_free_space(cache, 0, SZ_4M);
ret = test_add_free_space_entry(cache, bitmap_offset + SZ_4M, SZ_4M, 1);
ret = btrfs_remove_free_space(cache, 0, SZ_4M);
if (test_check_exists(cache, 0, SZ_4M)) {
ret = btrfs_add_free_space(cache, 0, SZ_4M);
ret = test_add_free_space_entry(cache, 0, SZ_4M, 1);
em = btrfs_get_extent(BTRFS_I(inode), NULL, offset, SZ_4M);
#define BTRFS_MIN_ZONE_SIZE SZ_4M
const size_t read_size = SZ_2K, bkt_size = 256, max = SZ_4M;
ND_IOCTL_MAX_BUFLEN = SZ_4M,
.size = SZ_4M
.size = SZ_4M
.base = SZ_4M,
.base = SZ_4M,
.size = SZ_4M
ASSERT_FALSE(memblock_overlaps_region(&memblock.memory, SZ_1G + SZ_4M, SZ_1M));
ASSERT_TRUE(memblock_overlaps_region(&memblock.memory, SZ_1G, SZ_4M));
.size = SZ_4M
ndtest_pool = gen_pool_create(ilog2(SZ_4M), NUMA_NO_NODE);
SPA_VCD_SIZE = SZ_4M,
nfit_pool = gen_pool_create(ilog2(SZ_4M), NUMA_NO_NODE);