SLAB_AREA_SIZE
&areaBase, B_ANY_KERNEL_BLOCK_ADDRESS, SLAB_AREA_SIZE,
(addr_t)area, (addr_t)areaBase + SLAB_AREA_SIZE - 1);
areaBase = (void*)vm_allocate_early(sKernelArgs, SLAB_AREA_SIZE,
SLAB_AREA_SIZE, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA,
SLAB_AREA_SIZE);
SLAB_AREA_SIZE, B_ALREADY_WIRED,
+ (address % SLAB_AREA_SIZE) / SLAB_CHUNK_SIZE_LARGE;
areaCount * SLAB_AREA_SIZE / 1024);
((addr_t)pages % SLAB_AREA_SIZE) / SLAB_CHUNK_SIZE_LARGE];
((addr_t)pages % SLAB_AREA_SIZE) / SLAB_CHUNK_SIZE_LARGE];
ASSERT(reference <= areaBase + SLAB_AREA_SIZE - 1);
((addr_t)address % SLAB_AREA_SIZE) / SLAB_CHUNK_SIZE_LARGE];
((addr_t)address % SLAB_AREA_SIZE) / SLAB_CHUNK_SIZE_LARGE];
((addr_t)address % SLAB_AREA_SIZE) / SLAB_CHUNK_SIZE_LARGE];
return key / SLAB_AREA_SIZE;
return ROUNDDOWN((addr_t)address, SLAB_AREA_SIZE);
#define SLAB_META_CHUNKS_PER_AREA (SLAB_AREA_SIZE / SLAB_CHUNK_SIZE_LARGE)