MALLOC_PAGESIZE
count = MALLOC_PAGESIZE / B2ALLOC(bucket);
count = MALLOC_PAGESIZE / size;
q = MMAP(MALLOC_PAGESIZE * chunk_pages, d->mmap_flag);
STATS_ADD(d->malloc_used, MALLOC_PAGESIZE *
MALLOC_PAGESIZE;
pp = map(d, MALLOC_PAGESIZE, 0);
ff = map(d, MALLOC_PAGESIZE, 0);
memset(ff, 0, sizeof(void *) * MALLOC_PAGESIZE /
if (bucket == 0 && mprotect(pp, MALLOC_PAGESIZE, PROT_NONE) == -1)
memset(pp, SOME_FREEJUNK, MALLOC_PAGESIZE);
unmap(d, pp, MALLOC_PAGESIZE, 0);
unmap(d, ff, MALLOC_PAGESIZE, 0);
mprotect(info->page, MALLOC_PAGESIZE, PROT_READ | PROT_WRITE);
unmap(d, info->page, MALLOC_PAGESIZE, 0);
unmap(d, r->f, MALLOC_PAGESIZE, MALLOC_PAGESIZE);
if (sz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) {
if ((p = MMAPNONE(roundup_sz + 2 * MALLOC_PAGESIZE, 0)) ==
if (mprotect(p + MALLOC_PAGESIZE, roundup_sz,
if (mimmutable(p, roundup_sz + 2 * MALLOC_PAGESIZE))
d = (struct dir_info *)(p + MALLOC_PAGESIZE +
STATS_ADD(d[1].malloc_used, roundup_sz + 2 * MALLOC_PAGESIZE);
if (newsz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) {
roldsz - rnewsz < mopts.def_maxcache * MALLOC_PAGESIZE &&
if (d < oldsize / 2 && d < MALLOC_PAGESIZE) {
if (alignment < MALLOC_PAGESIZE || ((alignment - 1) & alignment) != 0)
if (sz > MALLOC_MAXCHUNK && sz < MALLOC_PAGESIZE)
sz = MALLOC_PAGESIZE;
if (alignment <= MALLOC_PAGESIZE) {
if (sz < MALLOC_PAGESIZE) {
if (sz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) {
if (sz < MALLOC_PAGESIZE)
sz = MALLOC_PAGESIZE;
d->btnodesused >= MALLOC_PAGESIZE / sizeof(struct btnode)) {
d->btnodes = map(d, MALLOC_PAGESIZE, 0);
used >= MALLOC_PAGESIZE / sizeof(struct leaknode)) {
page = MMAP(MALLOC_PAGESIZE, 0);
u_char _pad[MALLOC_PAGESIZE];
} malloc_readonly __attribute__((aligned(MALLOC_PAGESIZE)))
mopts.malloc_guard = MALLOC_PAGESIZE;
#define MALLOC_PAGEMASK (MALLOC_PAGESIZE - 1)
#define MALLOC_INITIAL_REGIONS (MALLOC_PAGESIZE / sizeof(struct region_info))
if (sz > MALLOC_PAGESIZE / sizeof(uint64_t))
sz = MALLOC_PAGESIZE / sizeof(uint64_t);
if (sz > MALLOC_PAGESIZE / sizeof(uint64_t))
sz = MALLOC_PAGESIZE / sizeof(uint64_t);
MALLOC_PAGESIZE - MALLOC_LEEWAY)
((MALLOC_PAGESIZE - MALLOC_LEEWAY - \
p->total = p->free = MALLOC_PAGESIZE / B2ALLOC(bucket);
if (alignment < MALLOC_PAGESIZE || ((alignment - 1) & alignment) != 0)
if (sz > MALLOC_MAXCHUNK && sz < MALLOC_PAGESIZE)
sz = MALLOC_PAGESIZE;
if (alignment <= MALLOC_PAGESIZE) {
if (sz >= SIZE_MAX - MALLOC_GUARD - MALLOC_PAGESIZE) {
#define MALLOC_GUARD ((size_t)MALLOC_PAGESIZE)
p = MMAPNONE(DIR_INFO_RSZ + (MALLOC_PAGESIZE * 2));
_dl_mprotect(p + MALLOC_PAGESIZE, DIR_INFO_RSZ, PROT_READ | PROT_WRITE);
d = (struct dir_info *)(p + MALLOC_PAGESIZE +
#define MALLOC_PAGEMASK (MALLOC_PAGESIZE - 1)
#define MALLOC_INITIAL_REGIONS (MALLOC_PAGESIZE / sizeof(struct region_info))
p->total = p->free = MALLOC_PAGESIZE >> p->shift;
p->total = p->free = MALLOC_PAGESIZE >> p->shift;
count = MALLOC_PAGESIZE / MALLOC_MINSIZE;
count = MALLOC_PAGESIZE >> bits;
q = MMAP(MALLOC_PAGESIZE);
count = MALLOC_PAGESIZE / size;
pp = map(d, MALLOC_PAGESIZE, 0);
if (bits == 0 && _dl_mprotect(pp, MALLOC_PAGESIZE, PROT_NONE) < 0)
unmap(d, pp, MALLOC_PAGESIZE, MALLOC_JUNK);
_dl_mprotect(info->page, MALLOC_PAGESIZE, PROT_READ | PROT_WRITE);
unmap(d, info->page, MALLOC_PAGESIZE, 0);
if (sz >= SIZE_MAX - MALLOC_GUARD - MALLOC_PAGESIZE) {
if (sz - MALLOC_GUARD < MALLOC_PAGESIZE - MALLOC_LEEWAY) {
p = ((char *)p) + ((MALLOC_PAGESIZE - MALLOC_LEEWAY -
if (sz - MALLOC_GUARD >= MALLOC_PAGESIZE -
if (p != ((char *)r->p) + ((MALLOC_PAGESIZE -