SZ_32K
vectors_base = ioremap(VECTORS_BASE, SZ_32K);
da8xx_ddr2_ctlr_base = ioremap(DA8XX_DDR2_CTL_BASE, SZ_32K);
if (imm < 0 || imm >= SZ_32K) {
int_vector_size = max_t(phys_addr_t, SZ_32K, int_vector_size);
int_vector_size = SZ_32K;
CACHE_ENTRY(0x09, CACHE_L1_INST, SZ_32K ), /* 4-way set assoc, 64 byte line size */
CACHE_ENTRY(0x2c, CACHE_L1_DATA, SZ_32K ), /* 8-way set assoc, 64 byte line size */
CACHE_ENTRY(0x30, CACHE_L1_INST, SZ_32K ), /* 8-way set assoc, 64 byte line size */
CACHE_ENTRY(0x68, CACHE_L1_DATA, SZ_32K ), /* 4-way set assoc, sectored cache, 64 byte line size */
if (abo->mem.size > SZ_32K) {
#define BAM_DESC_FIFO_SIZE SZ_32K
#define BAM_FIFO_SIZE (SZ_32K - 8)
array_size = SZ_32K - 1;
array_size = SZ_32K - 2;
array_size = SZ_32K - 4;
s_ddev->max_burst = SZ_32K - 1; /* CIDX: 16bit signed */
ast->dp501_fw_addr = drmm_kzalloc(dev, SZ_32K, GFP_KERNEL);
if (ast_backup_fw(ast, ast->dp501_fw_addr, SZ_32K)) {
{ IPP_SIZE_LIMIT(BUFFER, .h = { 32, SZ_32K }, .v = { 32, SZ_32K }) },
#define ICL_WOPCM_HW_CTX_RESERVED (SZ_32K + SZ_4K)
#define GSC_PROXY_BUFFER_SIZE SZ_32K
#define MSM_GPU_RINGBUFFER_SZ SZ_32K
KUNIT_ASSERT_EQ(test, SZ_32K, pf_profile_fair_ctxs(gt, num_vfs));
KUNIT_EXPECT_EQ(test, SZ_32K, pf_profile_fair_ctxs(gt, 1));
#define GSC_PROXY_BUFFER_SIZE SZ_32K
#define WOPCM_HW_CTX_RESERVED (SZ_32K + SZ_4K)
(SZ_4K | SZ_8K | SZ_16K | SZ_32K | SZ_64K | SZ_128K | SZ_256K | \
mb->channel.regs = hsp->regs + SZ_64K + i * SZ_32K;
#define SXE_BUFFER SZ_32K
#define FIMC_IS_FW_SIZE_MIN (SZ_32K)
#define DRAIN_BUFFER_SIZE SZ_32K
dev->base = devm_ioremap(&pdev->dev, dev->res->start, SZ_32K);
0, SZ_16K / 512, SZ_32K / 512, SZ_64K / 512,
.size = SZ_32K,
.sector_size = SZ_32K,
.size = SZ_32K,
.sector_size = SZ_32K,
.sector_size = SZ_32K,
.sector_size = SZ_32K,
.sector_size = SZ_32K,
.sector_size = SZ_32K,
.sector_size = SZ_32K,
.sector_size = SZ_32K,
.sector_size = SZ_32K,
#define MSMC_RAM_SIZE_SR1 (SZ_64K + SZ_32K + SZ_2K) /* 0x1880 x 8 x 2 */
resource_size_t mw_size = SZ_32K;
static unsigned int sgl_threshold = SZ_32K;
msi_addr &= ~(SZ_32K - 1);
entry.res->end = msi_addr + SZ_32K - 1;
#define MLXBF_TMFIFO_CON_TX_BUF_SIZE SZ_32K
page_size = SZ_32K;
#define NVM_MIN_SIZE SZ_32K
ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b2), SZ_32K - 1);
ufshcd_dme_set(hba, UIC_ARG_MIB(0xd043), SZ_32K - 1);
ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b5), SZ_32K - 1);
ufshcd_dme_set(hba, UIC_ARG_MIB(0xd046), SZ_32K - 1);
if (blocknr < other && other - (blocknr + blocksize) < SZ_32K)
if (blocknr > other && blocknr - (other + blocksize) < SZ_32K)
#define BTRFS_ASYNC_DISCARD_MIN_FILTER (SZ_32K)
ctl->extents_thresh = (SZ_32K / 2) / sizeof(struct btrfs_free_space);
em->disk_bytenr = SZ_32K; /* avoid merging */
em->len = SZ_32K;
em->disk_num_bytes = SZ_32K;
em->ram_bytes = SZ_32K;
{ .start = SZ_32K + SZ_4K, .len = SZ_4K}, /* [36k, 40k) */
{ .start = SZ_32K + SZ_4K, .len = SZ_4K}, /* [36k, 40k) */
{ .start = SZ_32K, .len = SZ_4K}, /* [32k, 36k) */
{ .start = SZ_32K + SZ_4K, .len = SZ_4K}, /* [36k, 40k) */
ret = add_compressed_extent(inode, SZ_32K + SZ_4K, SZ_4K, SZ_4K * 3);
start = SZ_32K - SZ_4K;
end = SZ_32K - 1;
start = SZ_32K;
em->start = SZ_32K;
em->disk_bytenr = SZ_32K;
em = btrfs_lookup_extent_mapping(em_tree, SZ_32K, SZ_16K);
if (btrfs_extent_map_block_start(em) != SZ_32K + SZ_4K) {
#define SZ_48K (SZ_32K + SZ_16K)
u64 hole_start = logical1 + SZ_32K;
u64 len1 = SZ_32K;
if (len1 != SZ_32K) {
(u64)SZ_32K, len1);
#define XFS_HEALTHMON_MAX_EVENTS (SZ_32K / \
#define PREALLOC_BUFFER (SZ_32K)
#define PREALLOC_BUFFER_MAX (SZ_32K)
#define SPRD_COMPR_IRAM_BUF_SIZE SZ_32K
const unsigned int default_size = SZ_32K;
MEMORY_BASE_OFFSET(0, offset) + SZ_32K);
#define MEM_SIZE SZ_32K