#include <sys/kmem_impl.h>
#include <sys/vmem_impl.h>
#include <sys/param.h>
#include <sys/sysmacros.h>
#include <sys/vm.h>
#include <sys/proc.h>
#include <sys/tuneable.h>
#include <sys/systm.h>
#include <sys/cmn_err.h>
#include <sys/debug.h>
#include <sys/sdt.h>
#include <sys/mutex.h>
#include <sys/bitmap.h>
#include <sys/atomic.h>
#include <sys/kobj.h>
#include <sys/disp.h>
#include <vm/seg_kmem.h>
#include <sys/log.h>
#include <sys/callb.h>
#include <sys/taskq.h>
#include <sys/modctl.h>
#include <sys/reboot.h>
#include <sys/id32.h>
#include <sys/zone.h>
#include <sys/netstack.h>
#ifdef DEBUG
#include <sys/random.h>
#endif
extern void streams_msg_init(void);
extern int segkp_fromheap;
extern void segkp_cache_free(void);
extern int callout_init_done;
struct kmem_cache_kstat {
kstat_named_t kmc_buf_size;
kstat_named_t kmc_align;
kstat_named_t kmc_chunk_size;
kstat_named_t kmc_slab_size;
kstat_named_t kmc_alloc;
kstat_named_t kmc_alloc_fail;
kstat_named_t kmc_free;
kstat_named_t kmc_depot_alloc;
kstat_named_t kmc_depot_free;
kstat_named_t kmc_depot_contention;
kstat_named_t kmc_slab_alloc;
kstat_named_t kmc_slab_free;
kstat_named_t kmc_buf_constructed;
kstat_named_t kmc_buf_avail;
kstat_named_t kmc_buf_inuse;
kstat_named_t kmc_buf_total;
kstat_named_t kmc_buf_max;
kstat_named_t kmc_slab_create;
kstat_named_t kmc_slab_destroy;
kstat_named_t kmc_vmem_source;
kstat_named_t kmc_hash_size;
kstat_named_t kmc_hash_lookup_depth;
kstat_named_t kmc_hash_rescale;
kstat_named_t kmc_full_magazines;
kstat_named_t kmc_empty_magazines;
kstat_named_t kmc_magazine_size;
kstat_named_t kmc_reap;
kstat_named_t kmc_defrag;
kstat_named_t kmc_scan;
kstat_named_t kmc_move_callbacks;
kstat_named_t kmc_move_yes;
kstat_named_t kmc_move_no;
kstat_named_t kmc_move_later;
kstat_named_t kmc_move_dont_need;
kstat_named_t kmc_move_dont_know;
kstat_named_t kmc_move_hunt_found;
kstat_named_t kmc_move_slabs_freed;
kstat_named_t kmc_move_reclaimable;
} kmem_cache_kstat = {
{ "buf_size", KSTAT_DATA_UINT64 },
{ "align", KSTAT_DATA_UINT64 },
{ "chunk_size", KSTAT_DATA_UINT64 },
{ "slab_size", KSTAT_DATA_UINT64 },
{ "alloc", KSTAT_DATA_UINT64 },
{ "alloc_fail", KSTAT_DATA_UINT64 },
{ "free", KSTAT_DATA_UINT64 },
{ "depot_alloc", KSTAT_DATA_UINT64 },
{ "depot_free", KSTAT_DATA_UINT64 },
{ "depot_contention", KSTAT_DATA_UINT64 },
{ "slab_alloc", KSTAT_DATA_UINT64 },
{ "slab_free", KSTAT_DATA_UINT64 },
{ "buf_constructed", KSTAT_DATA_UINT64 },
{ "buf_avail", KSTAT_DATA_UINT64 },
{ "buf_inuse", KSTAT_DATA_UINT64 },
{ "buf_total", KSTAT_DATA_UINT64 },
{ "buf_max", KSTAT_DATA_UINT64 },
{ "slab_create", KSTAT_DATA_UINT64 },
{ "slab_destroy", KSTAT_DATA_UINT64 },
{ "vmem_source", KSTAT_DATA_UINT64 },
{ "hash_size", KSTAT_DATA_UINT64 },
{ "hash_lookup_depth", KSTAT_DATA_UINT64 },
{ "hash_rescale", KSTAT_DATA_UINT64 },
{ "full_magazines", KSTAT_DATA_UINT64 },
{ "empty_magazines", KSTAT_DATA_UINT64 },
{ "magazine_size", KSTAT_DATA_UINT64 },
{ "reap", KSTAT_DATA_UINT64 },
{ "defrag", KSTAT_DATA_UINT64 },
{ "scan", KSTAT_DATA_UINT64 },
{ "move_callbacks", KSTAT_DATA_UINT64 },
{ "move_yes", KSTAT_DATA_UINT64 },
{ "move_no", KSTAT_DATA_UINT64 },
{ "move_later", KSTAT_DATA_UINT64 },
{ "move_dont_need", KSTAT_DATA_UINT64 },
{ "move_dont_know", KSTAT_DATA_UINT64 },
{ "move_hunt_found", KSTAT_DATA_UINT64 },
{ "move_slabs_freed", KSTAT_DATA_UINT64 },
{ "move_reclaimable", KSTAT_DATA_UINT64 },
};
static kmutex_t kmem_cache_kstat_lock;
static const int kmem_alloc_sizes[] = {
1 * 8,
2 * 8,
3 * 8,
4 * 8, 5 * 8, 6 * 8, 7 * 8,
4 * 16, 5 * 16, 6 * 16, 7 * 16,
4 * 32, 5 * 32, 6 * 32, 7 * 32,
4 * 64, 5 * 64, 6 * 64, 7 * 64,
4 * 128, 5 * 128, 6 * 128, 7 * 128,
P2ALIGN(8192 / 7, 64),
P2ALIGN(8192 / 6, 64),
P2ALIGN(8192 / 5, 64),
P2ALIGN(8192 / 4, 64),
P2ALIGN(8192 / 3, 64),
P2ALIGN(8192 / 2, 64),
};
static const int kmem_big_alloc_sizes[] = {
2 * 4096, 3 * 4096,
2 * 8192, 3 * 8192,
4 * 8192, 5 * 8192, 6 * 8192, 7 * 8192,
8 * 8192, 9 * 8192, 10 * 8192, 11 * 8192,
12 * 8192, 13 * 8192, 14 * 8192, 15 * 8192,
16 * 8192
};
#define KMEM_MAXBUF 4096
#define KMEM_BIG_MAXBUF_32BIT 32768
#define KMEM_BIG_MAXBUF 131072
#define KMEM_BIG_MULTIPLE 4096
#define KMEM_BIG_SHIFT 12
static kmem_cache_t *kmem_alloc_table[KMEM_MAXBUF >> KMEM_ALIGN_SHIFT];
static kmem_cache_t *kmem_big_alloc_table[KMEM_BIG_MAXBUF >> KMEM_BIG_SHIFT];
#define KMEM_ALLOC_TABLE_MAX (KMEM_MAXBUF >> KMEM_ALIGN_SHIFT)
static size_t kmem_big_alloc_table_max = 0;
static kmem_magtype_t kmem_magtype[] = {
{ 1, 8, 3200, 65536 },
{ 3, 16, 256, 32768 },
{ 7, 32, 64, 16384 },
{ 15, 64, 0, 8192 },
{ 31, 64, 0, 4096 },
{ 47, 64, 0, 2048 },
{ 63, 64, 0, 1024 },
{ 95, 64, 0, 512 },
{ 143, 64, 0, 0 },
};
static uint32_t kmem_reaping;
static uint32_t kmem_reaping_idspace;
clock_t kmem_reap_interval;
int kmem_depot_contention = 3;
pgcnt_t kmem_reapahead = 0;
int kmem_panic = 1;
int kmem_logging = 1;
uint32_t kmem_mtbf = 0;
size_t kmem_transaction_log_size;
size_t kmem_content_log_size;
size_t kmem_failure_log_size;
size_t kmem_slab_log_size;
size_t kmem_zerosized_log_size;
size_t kmem_content_maxsave = 256;
size_t kmem_lite_minsize = 0;
size_t kmem_lite_maxalign = 1024;
int kmem_lite_pcs = 4;
size_t kmem_maxverify;
size_t kmem_minfirewall;
#ifdef DEBUG
int kmem_warn_zerosized = 1;
#else
int kmem_warn_zerosized = 0;
#endif
int kmem_panic_zerosized = 0;
#ifdef _LP64
size_t kmem_max_cached = KMEM_BIG_MAXBUF;
#else
size_t kmem_max_cached = KMEM_BIG_MAXBUF_32BIT;
#endif
#ifdef DEBUG
int kmem_flags = KMF_AUDIT | KMF_DEADBEEF | KMF_REDZONE | KMF_CONTENTS;
#else
int kmem_flags = 0;
#endif
int kmem_ready;
static kmem_cache_t *kmem_slab_cache;
static kmem_cache_t *kmem_bufctl_cache;
static kmem_cache_t *kmem_bufctl_audit_cache;
static kmutex_t kmem_cache_lock;
static list_t kmem_caches;
static taskq_t *kmem_taskq;
static kmutex_t kmem_flags_lock;
static vmem_t *kmem_metadata_arena;
static vmem_t *kmem_msb_arena;
static vmem_t *kmem_cache_arena;
static vmem_t *kmem_hash_arena;
static vmem_t *kmem_log_arena;
static vmem_t *kmem_oversize_arena;
static vmem_t *kmem_va_arena;
static vmem_t *kmem_default_arena;
static vmem_t *kmem_firewall_va_arena;
static vmem_t *kmem_firewall_arena;
static int kmem_zerosized;
size_t kmem_frag_minslabs = 101;
size_t kmem_frag_numer = 1;
size_t kmem_frag_denom = KMEM_VOID_FRACTION;
size_t kmem_reclaim_max_slabs = 1;
size_t kmem_reclaim_scan_range = 12;
boolean_t kmem_move_noreap;
boolean_t kmem_move_blocked;
boolean_t kmem_move_fulltilt;
boolean_t kmem_move_any_partial;
#ifdef DEBUG
uint32_t kmem_mtb_move = 60;
uint32_t kmem_mtb_reap = 1800;
#endif
static kmem_cache_t *kmem_defrag_cache;
static kmem_cache_t *kmem_move_cache;
static taskq_t *kmem_move_taskq;
static void kmem_cache_scan(kmem_cache_t *);
static void kmem_cache_defrag(kmem_cache_t *);
static void kmem_slab_prefill(kmem_cache_t *, kmem_slab_t *);
kmem_log_header_t *kmem_transaction_log;
kmem_log_header_t *kmem_content_log;
kmem_log_header_t *kmem_failure_log;
kmem_log_header_t *kmem_slab_log;
kmem_log_header_t *kmem_zerosized_log;
static int kmem_lite_count;
#define KMEM_BUFTAG_LITE_ENTER(bt, count, caller) \
if ((count) > 0) { \
pc_t *_s = ((kmem_buftag_lite_t *)(bt))->bt_history; \
pc_t *_e; \
\
for (_e = &_s[(count) - 1]; _e > _s; _e--) \
*_e = *(_e - 1); \
*_s = (uintptr_t)(caller); \
}
#define KMERR_MODIFIED 0
#define KMERR_REDZONE 1
#define KMERR_DUPFREE 2
#define KMERR_BADADDR 3
#define KMERR_BADBUFTAG 4
#define KMERR_BADBUFCTL 5
#define KMERR_BADCACHE 6
#define KMERR_BADSIZE 7
#define KMERR_BADBASE 8
struct {
hrtime_t kmp_timestamp;
int kmp_error;
void *kmp_buffer;
void *kmp_realbuf;
kmem_cache_t *kmp_cache;
kmem_cache_t *kmp_realcache;
kmem_slab_t *kmp_slab;
kmem_bufctl_t *kmp_bufctl;
} kmem_panic_info;
static void
copy_pattern(uint64_t pattern, void *buf_arg, size_t size)
{
uint64_t *bufend = (uint64_t *)((char *)buf_arg + size);
uint64_t *buf = buf_arg;
while (buf < bufend)
*buf++ = pattern;
}
static void *
verify_pattern(uint64_t pattern, void *buf_arg, size_t size)
{
uint64_t *bufend = (uint64_t *)((char *)buf_arg + size);
uint64_t *buf;
for (buf = buf_arg; buf < bufend; buf++)
if (*buf != pattern)
return (buf);
return (NULL);
}
static void *
verify_and_copy_pattern(uint64_t old, uint64_t new, void *buf_arg, size_t size)
{
uint64_t *bufend = (uint64_t *)((char *)buf_arg + size);
uint64_t *buf;
for (buf = buf_arg; buf < bufend; buf++) {
if (*buf != old) {
copy_pattern(old, buf_arg,
(char *)buf - (char *)buf_arg);
return (buf);
}
*buf = new;
}
return (NULL);
}
static void
kmem_cache_applyall(void (*func)(kmem_cache_t *), taskq_t *tq, int tqflag)
{
kmem_cache_t *cp;
mutex_enter(&kmem_cache_lock);
for (cp = list_head(&kmem_caches); cp != NULL;
cp = list_next(&kmem_caches, cp))
if (tq != NULL)
(void) taskq_dispatch(tq, (task_func_t *)func, cp,
tqflag);
else
func(cp);
mutex_exit(&kmem_cache_lock);
}
static void
kmem_cache_applyall_id(void (*func)(kmem_cache_t *), taskq_t *tq, int tqflag)
{
kmem_cache_t *cp;
mutex_enter(&kmem_cache_lock);
for (cp = list_head(&kmem_caches); cp != NULL;
cp = list_next(&kmem_caches, cp)) {
if (!(cp->cache_cflags & KMC_IDENTIFIER))
continue;
if (tq != NULL)
(void) taskq_dispatch(tq, (task_func_t *)func, cp,
tqflag);
else
func(cp);
}
mutex_exit(&kmem_cache_lock);
}
static kmem_slab_t *
kmem_findslab(kmem_cache_t *cp, void *buf)
{
kmem_slab_t *sp;
mutex_enter(&cp->cache_lock);
for (sp = list_head(&cp->cache_complete_slabs); sp != NULL;
sp = list_next(&cp->cache_complete_slabs, sp)) {
if (KMEM_SLAB_MEMBER(sp, buf)) {
mutex_exit(&cp->cache_lock);
return (sp);
}
}
for (sp = avl_first(&cp->cache_partial_slabs); sp != NULL;
sp = AVL_NEXT(&cp->cache_partial_slabs, sp)) {
if (KMEM_SLAB_MEMBER(sp, buf)) {
mutex_exit(&cp->cache_lock);
return (sp);
}
}
mutex_exit(&cp->cache_lock);
return (NULL);
}
static void
kmem_error(int error, kmem_cache_t *cparg, void *bufarg)
{
kmem_buftag_t *btp = NULL;
kmem_bufctl_t *bcp = NULL;
kmem_cache_t *cp = cparg;
kmem_slab_t *sp;
uint64_t *off;
void *buf = bufarg;
kmem_logging = 0;
kmem_panic_info.kmp_timestamp = gethrtime();
sp = kmem_findslab(cp, buf);
if (sp == NULL) {
for (cp = list_tail(&kmem_caches); cp != NULL;
cp = list_prev(&kmem_caches, cp)) {
if ((sp = kmem_findslab(cp, buf)) != NULL)
break;
}
}
if (sp == NULL) {
cp = NULL;
error = KMERR_BADADDR;
} else {
if (cp != cparg)
error = KMERR_BADCACHE;
else
buf = (char *)bufarg - ((uintptr_t)bufarg -
(uintptr_t)sp->slab_base) % cp->cache_chunksize;
if (buf != bufarg)
error = KMERR_BADBASE;
if (cp->cache_flags & KMF_BUFTAG)
btp = KMEM_BUFTAG(cp, buf);
if (cp->cache_flags & KMF_HASH) {
mutex_enter(&cp->cache_lock);
for (bcp = *KMEM_HASH(cp, buf); bcp; bcp = bcp->bc_next)
if (bcp->bc_addr == buf)
break;
mutex_exit(&cp->cache_lock);
if (bcp == NULL && btp != NULL)
bcp = btp->bt_bufctl;
if (kmem_findslab(cp->cache_bufctl_cache, bcp) ==
NULL || P2PHASE((uintptr_t)bcp, KMEM_ALIGN) ||
bcp->bc_addr != buf) {
error = KMERR_BADBUFCTL;
bcp = NULL;
}
}
}
kmem_panic_info.kmp_error = error;
kmem_panic_info.kmp_buffer = bufarg;
kmem_panic_info.kmp_realbuf = buf;
kmem_panic_info.kmp_cache = cparg;
kmem_panic_info.kmp_realcache = cp;
kmem_panic_info.kmp_slab = sp;
kmem_panic_info.kmp_bufctl = bcp;
printf("kernel memory allocator: ");
switch (error) {
case KMERR_MODIFIED:
printf("buffer modified after being freed\n");
off = verify_pattern(KMEM_FREE_PATTERN, buf, cp->cache_verify);
if (off == NULL)
off = buf;
printf("modification occurred at offset 0x%lx "
"(0x%llx replaced by 0x%llx)\n",
(uintptr_t)off - (uintptr_t)buf,
(longlong_t)KMEM_FREE_PATTERN, (longlong_t)*off);
break;
case KMERR_REDZONE:
printf("redzone violation: write past end of buffer\n");
break;
case KMERR_BADADDR:
printf("invalid free: buffer not in cache\n");
break;
case KMERR_DUPFREE:
printf("duplicate free: buffer freed twice\n");
break;
case KMERR_BADBUFTAG:
printf("boundary tag corrupted\n");
printf("bcp ^ bxstat = %lx, should be %lx\n",
(intptr_t)btp->bt_bufctl ^ btp->bt_bxstat,
KMEM_BUFTAG_FREE);
break;
case KMERR_BADBUFCTL:
printf("bufctl corrupted\n");
break;
case KMERR_BADCACHE:
printf("buffer freed to wrong cache\n");
printf("buffer was allocated from %s,\n", cp->cache_name);
printf("caller attempting free to %s.\n", cparg->cache_name);
break;
case KMERR_BADSIZE:
printf("bad free: free size (%u) != alloc size (%u)\n",
KMEM_SIZE_DECODE(((uint32_t *)btp)[0]),
KMEM_SIZE_DECODE(((uint32_t *)btp)[1]));
break;
case KMERR_BADBASE:
printf("bad free: free address (%p) != alloc address (%p)\n",
bufarg, buf);
break;
}
printf("buffer=%p bufctl=%p cache: %s\n",
bufarg, (void *)bcp, cparg->cache_name);
if (bcp != NULL && (cp->cache_flags & KMF_AUDIT) &&
error != KMERR_BADBUFCTL) {
int d;
timestruc_t ts;
kmem_bufctl_audit_t *bcap = (kmem_bufctl_audit_t *)bcp;
hrt2ts(kmem_panic_info.kmp_timestamp - bcap->bc_timestamp, &ts);
printf("previous transaction on buffer %p:\n", buf);
printf("thread=%p time=T-%ld.%09ld slab=%p cache: %s\n",
(void *)bcap->bc_thread, ts.tv_sec, ts.tv_nsec,
(void *)sp, cp->cache_name);
for (d = 0; d < MIN(bcap->bc_depth, KMEM_STACK_DEPTH); d++) {
ulong_t off;
char *sym = kobj_getsymname(bcap->bc_stack[d], &off);
printf("%s+%lx\n", sym ? sym : "?", off);
}
}
if (kmem_panic > 0)
panic("kernel heap corruption detected");
if (kmem_panic == 0)
debug_enter(NULL);
kmem_logging = 1;
}
static kmem_log_header_t *
kmem_log_init(size_t logsize)
{
kmem_log_header_t *lhp;
int nchunks = 4 * max_ncpus;
size_t lhsize = (size_t)&((kmem_log_header_t *)0)->lh_cpu[max_ncpus];
int i;
lhsize = P2ROUNDUP(lhsize, KMEM_ALIGN);
lhp = vmem_xalloc(kmem_log_arena, lhsize, 64, P2NPHASE(lhsize, 64), 0,
NULL, NULL, VM_SLEEP);
bzero(lhp, lhsize);
mutex_init(&lhp->lh_lock, NULL, MUTEX_DEFAULT, NULL);
lhp->lh_nchunks = nchunks;
lhp->lh_chunksize = P2ROUNDUP(logsize / nchunks + 1, PAGESIZE);
lhp->lh_base = vmem_alloc(kmem_log_arena,
lhp->lh_chunksize * nchunks, VM_SLEEP);
lhp->lh_free = vmem_alloc(kmem_log_arena,
nchunks * sizeof (int), VM_SLEEP);
bzero(lhp->lh_base, lhp->lh_chunksize * nchunks);
for (i = 0; i < max_ncpus; i++) {
kmem_cpu_log_header_t *clhp = &lhp->lh_cpu[i];
mutex_init(&clhp->clh_lock, NULL, MUTEX_DEFAULT, NULL);
clhp->clh_chunk = i;
}
for (i = max_ncpus; i < nchunks; i++)
lhp->lh_free[i] = i;
lhp->lh_head = max_ncpus;
lhp->lh_tail = 0;
return (lhp);
}
static void *
kmem_log_enter(kmem_log_header_t *lhp, void *data, size_t size)
{
void *logspace;
kmem_cpu_log_header_t *clhp;
if (lhp == NULL || kmem_logging == 0 || panicstr)
return (NULL);
clhp = &lhp->lh_cpu[CPU->cpu_seqid];
mutex_enter(&clhp->clh_lock);
clhp->clh_hits++;
if (size > clhp->clh_avail) {
mutex_enter(&lhp->lh_lock);
lhp->lh_hits++;
lhp->lh_free[lhp->lh_tail] = clhp->clh_chunk;
lhp->lh_tail = (lhp->lh_tail + 1) % lhp->lh_nchunks;
clhp->clh_chunk = lhp->lh_free[lhp->lh_head];
lhp->lh_head = (lhp->lh_head + 1) % lhp->lh_nchunks;
clhp->clh_current = lhp->lh_base +
clhp->clh_chunk * lhp->lh_chunksize;
clhp->clh_avail = lhp->lh_chunksize;
if (size > lhp->lh_chunksize)
size = lhp->lh_chunksize;
mutex_exit(&lhp->lh_lock);
}
logspace = clhp->clh_current;
clhp->clh_current += size;
clhp->clh_avail -= size;
bcopy(data, logspace, size);
mutex_exit(&clhp->clh_lock);
return (logspace);
}
#define KMEM_AUDIT(lp, cp, bcp) \
{ \
kmem_bufctl_audit_t *_bcp = (kmem_bufctl_audit_t *)(bcp); \
_bcp->bc_timestamp = gethrtime(); \
_bcp->bc_thread = curthread; \
_bcp->bc_depth = getpcstack(_bcp->bc_stack, KMEM_STACK_DEPTH); \
_bcp->bc_lastlog = kmem_log_enter((lp), _bcp, sizeof (*_bcp)); \
}
static void
kmem_log_event(kmem_log_header_t *lp, kmem_cache_t *cp,
kmem_slab_t *sp, void *addr)
{
kmem_bufctl_audit_t bca;
bzero(&bca, sizeof (kmem_bufctl_audit_t));
bca.bc_addr = addr;
bca.bc_slab = sp;
bca.bc_cache = cp;
KMEM_AUDIT(lp, cp, &bca);
}
static kmem_slab_t *
kmem_slab_create(kmem_cache_t *cp, int kmflag)
{
size_t slabsize = cp->cache_slabsize;
size_t chunksize = cp->cache_chunksize;
int cache_flags = cp->cache_flags;
size_t color, chunks;
char *buf, *slab;
kmem_slab_t *sp;
kmem_bufctl_t *bcp;
vmem_t *vmp = cp->cache_arena;
ASSERT(MUTEX_NOT_HELD(&cp->cache_lock));
color = cp->cache_color + cp->cache_align;
if (color > cp->cache_maxcolor)
color = cp->cache_mincolor;
cp->cache_color = color;
slab = vmem_alloc(vmp, slabsize, kmflag & KM_VMFLAGS);
if (slab == NULL)
goto vmem_alloc_failure;
ASSERT(P2PHASE((uintptr_t)slab, vmp->vm_quantum) == 0);
ASSERT((cp->cache_move == NULL) || !(cp->cache_cflags & KMC_NOTOUCH));
if (!(cp->cache_cflags & KMC_NOTOUCH))
copy_pattern(KMEM_UNINITIALIZED_PATTERN, slab, slabsize);
if (cache_flags & KMF_HASH) {
if ((sp = kmem_cache_alloc(kmem_slab_cache, kmflag)) == NULL)
goto slab_alloc_failure;
chunks = (slabsize - color) / chunksize;
} else {
sp = KMEM_SLAB(cp, slab);
chunks = (slabsize - sizeof (kmem_slab_t) - color) / chunksize;
}
sp->slab_cache = cp;
sp->slab_head = NULL;
sp->slab_refcnt = 0;
sp->slab_base = buf = slab + color;
sp->slab_chunks = chunks;
sp->slab_stuck_offset = (uint32_t)-1;
sp->slab_later_count = 0;
sp->slab_flags = 0;
ASSERT(chunks > 0);
while (chunks-- != 0) {
if (cache_flags & KMF_HASH) {
bcp = kmem_cache_alloc(cp->cache_bufctl_cache, kmflag);
if (bcp == NULL)
goto bufctl_alloc_failure;
if (cache_flags & KMF_AUDIT) {
kmem_bufctl_audit_t *bcap =
(kmem_bufctl_audit_t *)bcp;
bzero(bcap, sizeof (kmem_bufctl_audit_t));
bcap->bc_cache = cp;
}
bcp->bc_addr = buf;
bcp->bc_slab = sp;
} else {
bcp = KMEM_BUFCTL(cp, buf);
}
if (cache_flags & KMF_BUFTAG) {
kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
btp->bt_redzone = KMEM_REDZONE_PATTERN;
btp->bt_bufctl = bcp;
btp->bt_bxstat = (intptr_t)bcp ^ KMEM_BUFTAG_FREE;
if (cache_flags & KMF_DEADBEEF) {
copy_pattern(KMEM_FREE_PATTERN, buf,
cp->cache_verify);
}
}
bcp->bc_next = sp->slab_head;
sp->slab_head = bcp;
buf += chunksize;
}
kmem_log_event(kmem_slab_log, cp, sp, slab);
return (sp);
bufctl_alloc_failure:
while ((bcp = sp->slab_head) != NULL) {
sp->slab_head = bcp->bc_next;
kmem_cache_free(cp->cache_bufctl_cache, bcp);
}
kmem_cache_free(kmem_slab_cache, sp);
slab_alloc_failure:
vmem_free(vmp, slab, slabsize);
vmem_alloc_failure:
kmem_log_event(kmem_failure_log, cp, NULL, NULL);
atomic_inc_64(&cp->cache_alloc_fail);
return (NULL);
}
static void
kmem_slab_destroy(kmem_cache_t *cp, kmem_slab_t *sp)
{
vmem_t *vmp = cp->cache_arena;
void *slab = (void *)P2ALIGN((uintptr_t)sp->slab_base, vmp->vm_quantum);
ASSERT(MUTEX_NOT_HELD(&cp->cache_lock));
ASSERT(sp->slab_refcnt == 0);
if (cp->cache_flags & KMF_HASH) {
kmem_bufctl_t *bcp;
while ((bcp = sp->slab_head) != NULL) {
sp->slab_head = bcp->bc_next;
kmem_cache_free(cp->cache_bufctl_cache, bcp);
}
kmem_cache_free(kmem_slab_cache, sp);
}
vmem_free(vmp, slab, cp->cache_slabsize);
}
static void *
kmem_slab_alloc_impl(kmem_cache_t *cp, kmem_slab_t *sp, boolean_t prefill)
{
kmem_bufctl_t *bcp, **hash_bucket;
void *buf;
boolean_t new_slab = (sp->slab_refcnt == 0);
ASSERT(MUTEX_HELD(&cp->cache_lock));
ASSERT(new_slab || (KMEM_SLAB_IS_PARTIAL(sp) &&
(sp == avl_first(&cp->cache_partial_slabs))));
ASSERT(sp->slab_cache == cp);
cp->cache_slab_alloc++;
cp->cache_bufslab--;
sp->slab_refcnt++;
bcp = sp->slab_head;
sp->slab_head = bcp->bc_next;
if (cp->cache_flags & KMF_HASH) {
buf = bcp->bc_addr;
hash_bucket = KMEM_HASH(cp, buf);
bcp->bc_next = *hash_bucket;
*hash_bucket = bcp;
if ((cp->cache_flags & (KMF_AUDIT | KMF_BUFTAG)) == KMF_AUDIT) {
KMEM_AUDIT(kmem_transaction_log, cp, bcp);
}
} else {
buf = KMEM_BUF(cp, bcp);
}
ASSERT(KMEM_SLAB_MEMBER(sp, buf));
if (sp->slab_head == NULL) {
ASSERT(KMEM_SLAB_IS_ALL_USED(sp));
if (new_slab) {
ASSERT(sp->slab_chunks == 1);
} else {
ASSERT(sp->slab_chunks > 1);
avl_remove(&cp->cache_partial_slabs, sp);
sp->slab_later_count = 0;
sp->slab_flags &= ~KMEM_SLAB_NOMOVE;
sp->slab_stuck_offset = (uint32_t)-1;
}
list_insert_head(&cp->cache_complete_slabs, sp);
cp->cache_complete_slab_count++;
return (buf);
}
ASSERT(KMEM_SLAB_IS_PARTIAL(sp));
if (new_slab && prefill && (cp->cache_flags & KMF_PREFILL) &&
(KMEM_CPU_CACHE(cp)->cc_magsize != 0)) {
kmem_slab_prefill(cp, sp);
return (buf);
}
if (new_slab) {
avl_add(&cp->cache_partial_slabs, sp);
return (buf);
}
ASSERT(!avl_update(&cp->cache_partial_slabs, sp));
return (buf);
}
static void *
kmem_slab_alloc(kmem_cache_t *cp, int kmflag)
{
kmem_slab_t *sp;
void *buf;
boolean_t test_destructor;
mutex_enter(&cp->cache_lock);
test_destructor = (cp->cache_slab_alloc == 0);
sp = avl_first(&cp->cache_partial_slabs);
if (sp == NULL) {
ASSERT(cp->cache_bufslab == 0);
mutex_exit(&cp->cache_lock);
if ((sp = kmem_slab_create(cp, kmflag)) == NULL) {
return (NULL);
}
mutex_enter(&cp->cache_lock);
cp->cache_slab_create++;
if ((cp->cache_buftotal += sp->slab_chunks) > cp->cache_bufmax)
cp->cache_bufmax = cp->cache_buftotal;
cp->cache_bufslab += sp->slab_chunks;
}
buf = kmem_slab_alloc_impl(cp, sp, B_TRUE);
ASSERT((cp->cache_slab_create - cp->cache_slab_destroy) ==
(cp->cache_complete_slab_count +
avl_numnodes(&cp->cache_partial_slabs) +
(cp->cache_defrag == NULL ? 0 : cp->cache_defrag->kmd_deadcount)));
mutex_exit(&cp->cache_lock);
if (test_destructor && cp->cache_destructor != NULL) {
if ((cp->cache_constructor == NULL) ||
cp->cache_constructor(buf, cp->cache_private,
kmflag) == 0) {
cp->cache_destructor(buf, cp->cache_private);
}
copy_pattern(KMEM_UNINITIALIZED_PATTERN, buf,
cp->cache_bufsize);
if (cp->cache_flags & KMF_DEADBEEF) {
copy_pattern(KMEM_FREE_PATTERN, buf, cp->cache_verify);
}
}
return (buf);
}
static void kmem_slab_move_yes(kmem_cache_t *, kmem_slab_t *, void *);
static void
kmem_slab_free(kmem_cache_t *cp, void *buf)
{
kmem_slab_t *sp;
kmem_bufctl_t *bcp, **prev_bcpp;
ASSERT(buf != NULL);
mutex_enter(&cp->cache_lock);
cp->cache_slab_free++;
if (cp->cache_flags & KMF_HASH) {
prev_bcpp = KMEM_HASH(cp, buf);
while ((bcp = *prev_bcpp) != NULL) {
if (bcp->bc_addr == buf) {
*prev_bcpp = bcp->bc_next;
sp = bcp->bc_slab;
break;
}
cp->cache_lookup_depth++;
prev_bcpp = &bcp->bc_next;
}
} else {
bcp = KMEM_BUFCTL(cp, buf);
sp = KMEM_SLAB(cp, buf);
}
if (bcp == NULL || sp->slab_cache != cp || !KMEM_SLAB_MEMBER(sp, buf)) {
mutex_exit(&cp->cache_lock);
kmem_error(KMERR_BADADDR, cp, buf);
return;
}
if (KMEM_SLAB_OFFSET(sp, buf) == sp->slab_stuck_offset) {
kmem_slab_move_yes(cp, sp, buf);
}
if ((cp->cache_flags & (KMF_AUDIT | KMF_BUFTAG)) == KMF_AUDIT) {
if (cp->cache_flags & KMF_CONTENTS)
((kmem_bufctl_audit_t *)bcp)->bc_contents =
kmem_log_enter(kmem_content_log, buf,
cp->cache_contents);
KMEM_AUDIT(kmem_transaction_log, cp, bcp);
}
bcp->bc_next = sp->slab_head;
sp->slab_head = bcp;
cp->cache_bufslab++;
ASSERT(sp->slab_refcnt >= 1);
if (--sp->slab_refcnt == 0) {
if (sp->slab_chunks == 1) {
list_remove(&cp->cache_complete_slabs, sp);
cp->cache_complete_slab_count--;
} else {
avl_remove(&cp->cache_partial_slabs, sp);
}
cp->cache_buftotal -= sp->slab_chunks;
cp->cache_bufslab -= sp->slab_chunks;
if (cp->cache_defrag == NULL ||
(avl_is_empty(&cp->cache_defrag->kmd_moves_pending) &&
!(sp->slab_flags & KMEM_SLAB_MOVE_PENDING))) {
cp->cache_slab_destroy++;
mutex_exit(&cp->cache_lock);
kmem_slab_destroy(cp, sp);
} else {
list_t *deadlist = &cp->cache_defrag->kmd_deadlist;
if (sp->slab_flags & KMEM_SLAB_MOVE_PENDING) {
list_insert_tail(deadlist, sp);
} else {
list_insert_head(deadlist, sp);
}
cp->cache_defrag->kmd_deadcount++;
mutex_exit(&cp->cache_lock);
}
return;
}
if (bcp->bc_next == NULL) {
ASSERT(sp->slab_refcnt == (sp->slab_chunks - 1));
ASSERT(sp->slab_chunks > 1);
list_remove(&cp->cache_complete_slabs, sp);
cp->cache_complete_slab_count--;
avl_add(&cp->cache_partial_slabs, sp);
} else {
(void) avl_update_gt(&cp->cache_partial_slabs, sp);
}
ASSERT((cp->cache_slab_create - cp->cache_slab_destroy) ==
(cp->cache_complete_slab_count +
avl_numnodes(&cp->cache_partial_slabs) +
(cp->cache_defrag == NULL ? 0 : cp->cache_defrag->kmd_deadcount)));
mutex_exit(&cp->cache_lock);
}
static int
kmem_cache_alloc_debug(kmem_cache_t *cp, void *buf, int kmflag, int construct,
caddr_t caller)
{
kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
kmem_bufctl_audit_t *bcp = (kmem_bufctl_audit_t *)btp->bt_bufctl;
uint32_t mtbf;
if (btp->bt_bxstat != ((intptr_t)bcp ^ KMEM_BUFTAG_FREE)) {
kmem_error(KMERR_BADBUFTAG, cp, buf);
return (-1);
}
btp->bt_bxstat = (intptr_t)bcp ^ KMEM_BUFTAG_ALLOC;
if ((cp->cache_flags & KMF_HASH) && bcp->bc_addr != buf) {
kmem_error(KMERR_BADBUFCTL, cp, buf);
return (-1);
}
if (cp->cache_flags & KMF_DEADBEEF) {
if (!construct && (cp->cache_flags & KMF_LITE)) {
if (*(uint64_t *)buf != KMEM_FREE_PATTERN) {
kmem_error(KMERR_MODIFIED, cp, buf);
return (-1);
}
if (cp->cache_constructor != NULL)
*(uint64_t *)buf = btp->bt_redzone;
else
*(uint64_t *)buf = KMEM_UNINITIALIZED_PATTERN;
} else {
construct = 1;
if (verify_and_copy_pattern(KMEM_FREE_PATTERN,
KMEM_UNINITIALIZED_PATTERN, buf,
cp->cache_verify)) {
kmem_error(KMERR_MODIFIED, cp, buf);
return (-1);
}
}
}
btp->bt_redzone = KMEM_REDZONE_PATTERN;
if ((mtbf = kmem_mtbf | cp->cache_mtbf) != 0 &&
gethrtime() % mtbf == 0 &&
(kmflag & (KM_NOSLEEP | KM_PANIC)) == KM_NOSLEEP) {
kmem_log_event(kmem_failure_log, cp, NULL, NULL);
if (!construct && cp->cache_destructor != NULL)
cp->cache_destructor(buf, cp->cache_private);
} else {
mtbf = 0;
}
if (mtbf || (construct && cp->cache_constructor != NULL &&
cp->cache_constructor(buf, cp->cache_private, kmflag) != 0)) {
atomic_inc_64(&cp->cache_alloc_fail);
btp->bt_bxstat = (intptr_t)bcp ^ KMEM_BUFTAG_FREE;
if (cp->cache_flags & KMF_DEADBEEF)
copy_pattern(KMEM_FREE_PATTERN, buf, cp->cache_verify);
kmem_slab_free(cp, buf);
return (1);
}
if (cp->cache_flags & KMF_AUDIT) {
KMEM_AUDIT(kmem_transaction_log, cp, bcp);
}
if ((cp->cache_flags & KMF_LITE) &&
!(cp->cache_cflags & KMC_KMEM_ALLOC)) {
KMEM_BUFTAG_LITE_ENTER(btp, kmem_lite_count, caller);
}
return (0);
}
static int
kmem_cache_free_debug(kmem_cache_t *cp, void *buf, caddr_t caller)
{
kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
kmem_bufctl_audit_t *bcp = (kmem_bufctl_audit_t *)btp->bt_bufctl;
kmem_slab_t *sp;
if (btp->bt_bxstat != ((intptr_t)bcp ^ KMEM_BUFTAG_ALLOC)) {
if (btp->bt_bxstat == ((intptr_t)bcp ^ KMEM_BUFTAG_FREE)) {
kmem_error(KMERR_DUPFREE, cp, buf);
return (-1);
}
sp = kmem_findslab(cp, buf);
if (sp == NULL || sp->slab_cache != cp)
kmem_error(KMERR_BADADDR, cp, buf);
else
kmem_error(KMERR_REDZONE, cp, buf);
return (-1);
}
btp->bt_bxstat = (intptr_t)bcp ^ KMEM_BUFTAG_FREE;
if ((cp->cache_flags & KMF_HASH) && bcp->bc_addr != buf) {
kmem_error(KMERR_BADBUFCTL, cp, buf);
return (-1);
}
if (btp->bt_redzone != KMEM_REDZONE_PATTERN) {
kmem_error(KMERR_REDZONE, cp, buf);
return (-1);
}
if (cp->cache_flags & KMF_AUDIT) {
if (cp->cache_flags & KMF_CONTENTS)
bcp->bc_contents = kmem_log_enter(kmem_content_log,
buf, cp->cache_contents);
KMEM_AUDIT(kmem_transaction_log, cp, bcp);
}
if ((cp->cache_flags & KMF_LITE) &&
!(cp->cache_cflags & KMC_KMEM_ALLOC)) {
KMEM_BUFTAG_LITE_ENTER(btp, kmem_lite_count, caller);
}
if (cp->cache_flags & KMF_DEADBEEF) {
if (cp->cache_flags & KMF_LITE)
btp->bt_redzone = *(uint64_t *)buf;
else if (cp->cache_destructor != NULL)
cp->cache_destructor(buf, cp->cache_private);
copy_pattern(KMEM_FREE_PATTERN, buf, cp->cache_verify);
}
return (0);
}
static void
kmem_magazine_destroy(kmem_cache_t *cp, kmem_magazine_t *mp, int nrounds)
{
int round;
ASSERT(!list_link_active(&cp->cache_link) ||
taskq_member(kmem_taskq, curthread));
for (round = 0; round < nrounds; round++) {
void *buf = mp->mag_round[round];
if (cp->cache_flags & KMF_DEADBEEF) {
if (verify_pattern(KMEM_FREE_PATTERN, buf,
cp->cache_verify) != NULL) {
kmem_error(KMERR_MODIFIED, cp, buf);
continue;
}
if ((cp->cache_flags & KMF_LITE) &&
cp->cache_destructor != NULL) {
kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
*(uint64_t *)buf = btp->bt_redzone;
cp->cache_destructor(buf, cp->cache_private);
*(uint64_t *)buf = KMEM_FREE_PATTERN;
}
} else if (cp->cache_destructor != NULL) {
cp->cache_destructor(buf, cp->cache_private);
}
kmem_slab_free(cp, buf);
}
ASSERT(KMEM_MAGAZINE_VALID(cp, mp));
kmem_cache_free(cp->cache_magtype->mt_cache, mp);
}
static kmem_magazine_t *
kmem_depot_alloc(kmem_cache_t *cp, kmem_maglist_t *mlp)
{
kmem_magazine_t *mp;
if (!mutex_tryenter(&cp->cache_depot_lock)) {
mutex_enter(&cp->cache_depot_lock);
cp->cache_depot_contention++;
}
if ((mp = mlp->ml_list) != NULL) {
ASSERT(KMEM_MAGAZINE_VALID(cp, mp));
mlp->ml_list = mp->mag_next;
if (--mlp->ml_total < mlp->ml_min)
mlp->ml_min = mlp->ml_total;
mlp->ml_alloc++;
}
mutex_exit(&cp->cache_depot_lock);
return (mp);
}
static void
kmem_depot_free(kmem_cache_t *cp, kmem_maglist_t *mlp, kmem_magazine_t *mp)
{
mutex_enter(&cp->cache_depot_lock);
ASSERT(KMEM_MAGAZINE_VALID(cp, mp));
mp->mag_next = mlp->ml_list;
mlp->ml_list = mp;
mlp->ml_total++;
mutex_exit(&cp->cache_depot_lock);
}
static void
kmem_depot_ws_update(kmem_cache_t *cp)
{
mutex_enter(&cp->cache_depot_lock);
cp->cache_full.ml_reaplimit = cp->cache_full.ml_min;
cp->cache_full.ml_min = cp->cache_full.ml_total;
cp->cache_empty.ml_reaplimit = cp->cache_empty.ml_min;
cp->cache_empty.ml_min = cp->cache_empty.ml_total;
mutex_exit(&cp->cache_depot_lock);
}
static void
kmem_depot_ws_zero(kmem_cache_t *cp)
{
mutex_enter(&cp->cache_depot_lock);
cp->cache_full.ml_reaplimit = cp->cache_full.ml_total;
cp->cache_full.ml_min = cp->cache_full.ml_total;
cp->cache_empty.ml_reaplimit = cp->cache_empty.ml_total;
cp->cache_empty.ml_min = cp->cache_empty.ml_total;
mutex_exit(&cp->cache_depot_lock);
}
size_t kmem_reap_preempt_bytes = 1024 * 1024;
static void
kmem_depot_ws_reap(kmem_cache_t *cp)
{
size_t bytes = 0;
long reap;
kmem_magazine_t *mp;
ASSERT(!list_link_active(&cp->cache_link) ||
taskq_member(kmem_taskq, curthread));
reap = MIN(cp->cache_full.ml_reaplimit, cp->cache_full.ml_min);
while (reap-- &&
(mp = kmem_depot_alloc(cp, &cp->cache_full)) != NULL) {
kmem_magazine_destroy(cp, mp, cp->cache_magtype->mt_magsize);
bytes += cp->cache_magtype->mt_magsize * cp->cache_bufsize;
if (bytes > kmem_reap_preempt_bytes) {
kpreempt(KPREEMPT_SYNC);
bytes = 0;
}
}
reap = MIN(cp->cache_empty.ml_reaplimit, cp->cache_empty.ml_min);
while (reap-- &&
(mp = kmem_depot_alloc(cp, &cp->cache_empty)) != NULL) {
kmem_magazine_destroy(cp, mp, 0);
bytes += cp->cache_magtype->mt_magsize * cp->cache_bufsize;
if (bytes > kmem_reap_preempt_bytes) {
kpreempt(KPREEMPT_SYNC);
bytes = 0;
}
}
}
static void
kmem_cpu_reload(kmem_cpu_cache_t *ccp, kmem_magazine_t *mp, int rounds)
{
ASSERT((ccp->cc_loaded == NULL && ccp->cc_rounds == -1) ||
(ccp->cc_loaded && ccp->cc_rounds + rounds == ccp->cc_magsize));
ASSERT(ccp->cc_magsize > 0);
ccp->cc_ploaded = ccp->cc_loaded;
ccp->cc_prounds = ccp->cc_rounds;
ccp->cc_loaded = mp;
ccp->cc_rounds = rounds;
}
static void *kmem_dump_start;
static void *kmem_dump_end;
static void *kmem_dump_curr;
static size_t kmem_dump_size;
typedef struct kmem_dumpctl {
void *kdc_next;
} kmem_dumpctl_t;
#define KMEM_DUMPCTL(cp, buf) \
((kmem_dumpctl_t *)P2ROUNDUP((uintptr_t)(buf) + (cp)->cache_bufsize, \
sizeof (void *)))
uint_t kmem_dump_verbose = 0;
uint_t kmem_dump_oversize_allocs = 0;
uint_t kmem_dump_oversize_max = 0;
static void
kmem_dumppr(char **pp, char *e, const char *format, ...)
{
char *p = *pp;
if (p < e) {
int n;
va_list ap;
va_start(ap, format);
n = vsnprintf(p, e - p, format, ap);
va_end(ap);
*pp = p + n;
}
}
void
kmem_dump_init(size_t size)
{
ASSERT3U(size, >, 0);
if (kmem_dump_start != NULL)
kmem_free(kmem_dump_start, kmem_dump_size);
kmem_dump_start = kmem_alloc(size, KM_SLEEP);
kmem_dump_size = size;
kmem_dump_curr = kmem_dump_start;
kmem_dump_end = (void *)((char *)kmem_dump_start + size);
copy_pattern(KMEM_UNINITIALIZED_PATTERN, kmem_dump_start, size);
}
void
kmem_dump_begin(void)
{
kmem_cache_t *cp;
ASSERT(panicstr != NULL);
for (cp = list_head(&kmem_caches); cp != NULL;
cp = list_next(&kmem_caches, cp)) {
kmem_cpu_cache_t *ccp = KMEM_CPU_CACHE(cp);
if (cp->cache_arena->vm_cflags & VMC_DUMPSAFE) {
cp->cache_flags |= KMF_DUMPDIVERT;
ccp->cc_flags |= KMF_DUMPDIVERT;
ccp->cc_dump_rounds = ccp->cc_rounds;
ccp->cc_dump_prounds = ccp->cc_prounds;
ccp->cc_rounds = ccp->cc_prounds = -1;
} else {
cp->cache_flags |= KMF_DUMPUNSAFE;
ccp->cc_flags |= KMF_DUMPUNSAFE;
}
}
}
size_t
kmem_dump_finish(char *buf, size_t size)
{
int percent = 0;
size_t used;
char *e = buf + size;
char *p = buf;
if (kmem_dump_curr == kmem_dump_end) {
cmn_err(CE_WARN, "exceeded kmem_dump space of %lu "
"bytes: kmem state in dump may be inconsistent",
kmem_dump_size);
}
if (kmem_dump_verbose == 0)
return (0);
used = (char *)kmem_dump_curr - (char *)kmem_dump_start;
percent = (used * 100) / kmem_dump_size;
kmem_dumppr(&p, e, "%% heap used,%d\n", percent);
kmem_dumppr(&p, e, "used bytes,%ld\n", used);
kmem_dumppr(&p, e, "heap size,%ld\n", kmem_dump_size);
kmem_dumppr(&p, e, "Oversize allocs,%d\n",
kmem_dump_oversize_allocs);
kmem_dumppr(&p, e, "Oversize max size,%ld\n",
kmem_dump_oversize_max);
if (p < e)
bzero(p, e - p);
return (p - buf);
}
void *
kmem_cache_alloc_dump(kmem_cache_t *cp, int kmflag)
{
void *buf;
void *curr;
char *bufend;
if ((buf = cp->cache_dump.kd_freelist) != NULL) {
cp->cache_dump.kd_freelist = KMEM_DUMPCTL(cp, buf)->kdc_next;
return (buf);
}
curr = kmem_dump_curr;
buf = (void *)P2ROUNDUP((uintptr_t)curr, cp->cache_align);
bufend = (char *)KMEM_DUMPCTL(cp, buf) + sizeof (kmem_dumpctl_t);
if (cp->cache_align < PAGESIZE) {
char *page = (char *)P2ROUNDUP((uintptr_t)buf, PAGESIZE);
if (bufend > page) {
bufend += page - (char *)buf;
buf = (void *)page;
}
}
if (bufend > (char *)kmem_dump_end) {
kmem_dump_curr = kmem_dump_end;
cp->cache_dump.kd_alloc_fails++;
return (NULL);
}
kmem_dump_curr = bufend;
if (cp->cache_constructor != NULL &&
cp->cache_constructor(buf, cp->cache_private, kmflag)
!= 0) {
#ifdef DEBUG
printf("name='%s' cache=0x%p: kmem cache constructor failed\n",
cp->cache_name, (void *)cp);
#endif
if (kmem_dump_curr == bufend)
kmem_dump_curr = curr;
cp->cache_dump.kd_alloc_fails++;
return (NULL);
}
return (buf);
}
int
kmem_cache_free_dump(kmem_cache_t *cp, void *buf)
{
if ((char *)buf >= (char *)kmem_dump_start &&
(char *)buf < (char *)kmem_dump_end) {
KMEM_DUMPCTL(cp, buf)->kdc_next = cp->cache_dump.kd_freelist;
cp->cache_dump.kd_freelist = buf;
return (0);
}
if (kmem_dump_curr < kmem_dump_end)
return (0);
return (1);
}
void *
kmem_cache_alloc(kmem_cache_t *cp, int kmflag)
{
kmem_cpu_cache_t *ccp = KMEM_CPU_CACHE(cp);
kmem_magazine_t *fmp;
void *buf;
mutex_enter(&ccp->cc_lock);
for (;;) {
if (ccp->cc_rounds > 0) {
buf = ccp->cc_loaded->mag_round[--ccp->cc_rounds];
ccp->cc_alloc++;
mutex_exit(&ccp->cc_lock);
if (ccp->cc_flags & (KMF_BUFTAG | KMF_DUMPUNSAFE)) {
if (ccp->cc_flags & KMF_DUMPUNSAFE) {
ASSERT(!(ccp->cc_flags &
KMF_DUMPDIVERT));
cp->cache_dump.kd_unsafe++;
}
if ((ccp->cc_flags & KMF_BUFTAG) &&
kmem_cache_alloc_debug(cp, buf, kmflag, 0,
caller()) != 0) {
if (kmflag & KM_NOSLEEP)
return (NULL);
mutex_enter(&ccp->cc_lock);
continue;
}
}
return (buf);
}
if (ccp->cc_prounds > 0) {
kmem_cpu_reload(ccp, ccp->cc_ploaded, ccp->cc_prounds);
continue;
}
if (ccp->cc_flags & (KMF_DUMPDIVERT | KMF_DUMPUNSAFE)) {
if (ccp->cc_flags & KMF_DUMPUNSAFE) {
ASSERT(!(ccp->cc_flags & KMF_DUMPDIVERT));
cp->cache_dump.kd_unsafe++;
} else {
if ((buf = kmem_cache_alloc_dump(cp, kmflag)) !=
NULL) {
mutex_exit(&ccp->cc_lock);
return (buf);
}
break;
}
}
if (ccp->cc_magsize == 0)
break;
fmp = kmem_depot_alloc(cp, &cp->cache_full);
if (fmp != NULL) {
if (ccp->cc_ploaded != NULL)
kmem_depot_free(cp, &cp->cache_empty,
ccp->cc_ploaded);
kmem_cpu_reload(ccp, fmp, ccp->cc_magsize);
continue;
}
break;
}
mutex_exit(&ccp->cc_lock);
buf = kmem_slab_alloc(cp, kmflag);
if (buf == NULL)
return (NULL);
if (cp->cache_flags & KMF_BUFTAG) {
int rc = kmem_cache_alloc_debug(cp, buf, kmflag, 1, caller());
if (rc != 0) {
if (kmflag & KM_NOSLEEP)
return (NULL);
ASSERT(rc == -1);
return (kmem_cache_alloc(cp, kmflag));
}
return (buf);
}
if (cp->cache_constructor != NULL &&
cp->cache_constructor(buf, cp->cache_private, kmflag) != 0) {
atomic_inc_64(&cp->cache_alloc_fail);
kmem_slab_free(cp, buf);
return (NULL);
}
return (buf);
}
static void
kmem_slab_free_constructed(kmem_cache_t *cp, void *buf, boolean_t freed)
{
if (!freed && (cp->cache_flags & KMF_BUFTAG))
if (kmem_cache_free_debug(cp, buf, caller()) == -1)
return;
if ((cp->cache_flags & (KMF_DEADBEEF | KMF_LITE)) != KMF_DEADBEEF &&
cp->cache_destructor != NULL) {
if (cp->cache_flags & KMF_DEADBEEF) {
kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
*(uint64_t *)buf = btp->bt_redzone;
cp->cache_destructor(buf, cp->cache_private);
*(uint64_t *)buf = KMEM_FREE_PATTERN;
} else {
cp->cache_destructor(buf, cp->cache_private);
}
}
kmem_slab_free(cp, buf);
}
static int
kmem_cpucache_magazine_alloc(kmem_cpu_cache_t *ccp, kmem_cache_t *cp)
{
kmem_magazine_t *emp;
kmem_magtype_t *mtp;
ASSERT(MUTEX_HELD(&ccp->cc_lock));
ASSERT(((uint_t)ccp->cc_rounds == ccp->cc_magsize ||
((uint_t)ccp->cc_rounds == -1)) &&
((uint_t)ccp->cc_prounds == ccp->cc_magsize ||
((uint_t)ccp->cc_prounds == -1)));
emp = kmem_depot_alloc(cp, &cp->cache_empty);
if (emp != NULL) {
if (ccp->cc_ploaded != NULL)
kmem_depot_free(cp, &cp->cache_full,
ccp->cc_ploaded);
kmem_cpu_reload(ccp, emp, 0);
return (1);
}
mtp = cp->cache_magtype;
mutex_exit(&ccp->cc_lock);
emp = kmem_cache_alloc(mtp->mt_cache, KM_NOSLEEP);
mutex_enter(&ccp->cc_lock);
if (emp != NULL) {
if (ccp->cc_magsize != mtp->mt_magsize) {
mutex_exit(&ccp->cc_lock);
kmem_cache_free(mtp->mt_cache, emp);
mutex_enter(&ccp->cc_lock);
return (1);
}
kmem_depot_free(cp, &cp->cache_empty, emp);
return (1);
}
return (0);
}
void
kmem_cache_free(kmem_cache_t *cp, void *buf)
{
kmem_cpu_cache_t *ccp = KMEM_CPU_CACHE(cp);
ASSERT(cp->cache_defrag == NULL ||
cp->cache_defrag->kmd_thread != curthread ||
(buf != cp->cache_defrag->kmd_from_buf &&
buf != cp->cache_defrag->kmd_to_buf));
if (ccp->cc_flags & (KMF_BUFTAG | KMF_DUMPDIVERT | KMF_DUMPUNSAFE)) {
if (ccp->cc_flags & KMF_DUMPUNSAFE) {
ASSERT(!(ccp->cc_flags & KMF_DUMPDIVERT));
cp->cache_dump.kd_unsafe++;
} else if (KMEM_DUMPCC(ccp) && !kmem_cache_free_dump(cp, buf)) {
return;
}
if (ccp->cc_flags & KMF_BUFTAG) {
if (kmem_cache_free_debug(cp, buf, caller()) == -1)
return;
}
}
mutex_enter(&ccp->cc_lock);
for (;;) {
if ((uint_t)ccp->cc_rounds < ccp->cc_magsize) {
ccp->cc_loaded->mag_round[ccp->cc_rounds++] = buf;
ccp->cc_free++;
mutex_exit(&ccp->cc_lock);
return;
}
if (ccp->cc_prounds == 0) {
kmem_cpu_reload(ccp, ccp->cc_ploaded, ccp->cc_prounds);
continue;
}
if (ccp->cc_magsize == 0)
break;
if (!kmem_cpucache_magazine_alloc(ccp, cp)) {
break;
}
}
mutex_exit(&ccp->cc_lock);
kmem_slab_free_constructed(cp, buf, B_TRUE);
}
static void
kmem_slab_prefill(kmem_cache_t *cp, kmem_slab_t *sp)
{
kmem_cpu_cache_t *ccp = KMEM_CPU_CACHE(cp);
int cache_flags = cp->cache_flags;
kmem_bufctl_t *next, *head;
size_t nbufs;
ASSERT(MUTEX_HELD(&cp->cache_lock));
ASSERT((cache_flags & (KMF_PREFILL|KMF_BUFTAG)) == KMF_PREFILL);
ASSERT(cp->cache_constructor == NULL);
ASSERT(sp->slab_cache == cp);
ASSERT(sp->slab_refcnt == 1);
ASSERT(sp->slab_head != NULL && sp->slab_chunks > sp->slab_refcnt);
ASSERT(avl_find(&cp->cache_partial_slabs, sp, NULL) == NULL);
head = sp->slab_head;
nbufs = (sp->slab_chunks - sp->slab_refcnt);
sp->slab_head = NULL;
sp->slab_refcnt += nbufs;
cp->cache_bufslab -= nbufs;
cp->cache_slab_alloc += nbufs;
list_insert_head(&cp->cache_complete_slabs, sp);
cp->cache_complete_slab_count++;
mutex_exit(&cp->cache_lock);
mutex_enter(&ccp->cc_lock);
while (head != NULL) {
void *buf = KMEM_BUF(cp, head);
if ((uint_t)ccp->cc_rounds < ccp->cc_magsize) {
ccp->cc_loaded->mag_round[ccp->cc_rounds++] =
buf;
ccp->cc_free++;
nbufs--;
head = head->bc_next;
continue;
}
if (ccp->cc_prounds == 0) {
kmem_cpu_reload(ccp, ccp->cc_ploaded,
ccp->cc_prounds);
continue;
}
if (ccp->cc_magsize == 0) {
break;
}
if (!kmem_cpucache_magazine_alloc(ccp, cp))
break;
}
mutex_exit(&ccp->cc_lock);
if (nbufs != 0) {
ASSERT(head != NULL);
while (head != NULL) {
ASSERT(nbufs != 0);
next = head->bc_next;
head->bc_next = NULL;
kmem_slab_free(cp, KMEM_BUF(cp, head));
head = next;
nbufs--;
}
}
ASSERT(head == NULL);
ASSERT(nbufs == 0);
mutex_enter(&cp->cache_lock);
}
void *
kmem_rezalloc(void *oldbuf, size_t oldsize, size_t newsize, int kmflag)
{
void *newbuf = kmem_alloc(newsize, kmflag);
if (newbuf == NULL) {
return (NULL);
}
bcopy(oldbuf, newbuf, MIN(oldsize, newsize));
if (newsize > oldsize) {
void *start = (void *)((uintptr_t)newbuf + oldsize);
bzero(start, newsize - oldsize);
}
if (oldbuf != NULL) {
ASSERT3U(oldsize, !=, 0);
kmem_free(oldbuf, oldsize);
}
return (newbuf);
}
void *
kmem_zalloc(size_t size, int kmflag)
{
size_t index;
void *buf;
if ((index = ((size - 1) >> KMEM_ALIGN_SHIFT)) < KMEM_ALLOC_TABLE_MAX) {
kmem_cache_t *cp = kmem_alloc_table[index];
buf = kmem_cache_alloc(cp, kmflag);
if (buf != NULL) {
if ((cp->cache_flags & KMF_BUFTAG) && !KMEM_DUMP(cp)) {
kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
((uint8_t *)buf)[size] = KMEM_REDZONE_BYTE;
((uint32_t *)btp)[1] = KMEM_SIZE_ENCODE(size);
if (cp->cache_flags & KMF_LITE) {
KMEM_BUFTAG_LITE_ENTER(btp,
kmem_lite_count, caller());
}
}
bzero(buf, size);
}
} else {
buf = kmem_alloc(size, kmflag);
if (buf != NULL)
bzero(buf, size);
}
return (buf);
}
void *
kmem_alloc(size_t size, int kmflag)
{
size_t index;
kmem_cache_t *cp;
void *buf;
if ((index = ((size - 1) >> KMEM_ALIGN_SHIFT)) < KMEM_ALLOC_TABLE_MAX) {
cp = kmem_alloc_table[index];
} else if ((index = ((size - 1) >> KMEM_BIG_SHIFT)) <
kmem_big_alloc_table_max) {
cp = kmem_big_alloc_table[index];
} else {
if (size == 0) {
if (kmflag != KM_SLEEP && !(kmflag & KM_PANIC))
return (NULL);
if (kmem_panic && kmem_panic_zerosized)
panic("attempted to kmem_alloc() size of 0");
if (kmem_warn_zerosized) {
cmn_err(CE_WARN, "kmem_alloc(): sleeping "
"allocation with size of 0; "
"see kmem_zerosized_log for details");
}
kmem_log_event(kmem_zerosized_log, NULL, NULL, NULL);
return (NULL);
}
buf = vmem_alloc(kmem_oversize_arena, size,
kmflag & KM_VMFLAGS);
if (buf == NULL)
kmem_log_event(kmem_failure_log, NULL, NULL,
(void *)size);
else if (KMEM_DUMP(kmem_slab_cache)) {
kmem_dump_oversize_allocs++;
if (size > kmem_dump_oversize_max)
kmem_dump_oversize_max = size;
}
return (buf);
}
buf = kmem_cache_alloc(cp, kmflag);
if ((cp->cache_flags & KMF_BUFTAG) && !KMEM_DUMP(cp) && buf != NULL) {
kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
((uint8_t *)buf)[size] = KMEM_REDZONE_BYTE;
((uint32_t *)btp)[1] = KMEM_SIZE_ENCODE(size);
if (cp->cache_flags & KMF_LITE) {
KMEM_BUFTAG_LITE_ENTER(btp, kmem_lite_count, caller());
}
}
return (buf);
}
void
kmem_free(void *buf, size_t size)
{
size_t index;
kmem_cache_t *cp;
if ((index = (size - 1) >> KMEM_ALIGN_SHIFT) < KMEM_ALLOC_TABLE_MAX) {
cp = kmem_alloc_table[index];
} else if ((index = ((size - 1) >> KMEM_BIG_SHIFT)) <
kmem_big_alloc_table_max) {
cp = kmem_big_alloc_table[index];
} else {
EQUIV(buf == NULL, size == 0);
if (buf == NULL && size == 0)
return;
vmem_free(kmem_oversize_arena, buf, size);
return;
}
if ((cp->cache_flags & KMF_BUFTAG) && !KMEM_DUMP(cp)) {
kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
uint32_t *ip = (uint32_t *)btp;
if (ip[1] != KMEM_SIZE_ENCODE(size)) {
if (*(uint64_t *)buf == KMEM_FREE_PATTERN) {
kmem_error(KMERR_DUPFREE, cp, buf);
return;
}
if (KMEM_SIZE_VALID(ip[1])) {
ip[0] = KMEM_SIZE_ENCODE(size);
kmem_error(KMERR_BADSIZE, cp, buf);
} else {
kmem_error(KMERR_REDZONE, cp, buf);
}
return;
}
if (((uint8_t *)buf)[size] != KMEM_REDZONE_BYTE) {
kmem_error(KMERR_REDZONE, cp, buf);
return;
}
btp->bt_redzone = KMEM_REDZONE_PATTERN;
if (cp->cache_flags & KMF_LITE) {
KMEM_BUFTAG_LITE_ENTER(btp, kmem_lite_count,
caller());
}
}
kmem_cache_free(cp, buf);
}
void *
kmem_firewall_va_alloc(vmem_t *vmp, size_t size, int vmflag)
{
size_t realsize = size + vmp->vm_quantum;
void *addr;
if (realsize < size)
realsize = size;
addr = vmem_alloc(vmp, realsize, vmflag | VM_NEXTFIT);
if (addr != NULL && kvseg.s_base == NULL && realsize != size)
(void) boot_virt_alloc((char *)addr + size, vmp->vm_quantum);
return (addr);
}
void
kmem_firewall_va_free(vmem_t *vmp, void *addr, size_t size)
{
ASSERT((kvseg.s_base == NULL ?
va_to_pfn((char *)addr + size) :
hat_getpfnum(kas.a_hat, (caddr_t)addr + size)) == PFN_INVALID);
vmem_free(vmp, addr, size + vmp->vm_quantum);
}
void *
kmem_alloc_tryhard(size_t size, size_t *asize, int kmflag)
{
void *p;
*asize = P2ROUNDUP(size, KMEM_ALIGN);
do {
p = kmem_alloc(*asize, (kmflag | KM_NOSLEEP) & ~KM_PANIC);
if (p != NULL)
return (p);
*asize += KMEM_ALIGN;
} while (*asize <= PAGESIZE);
*asize = P2ROUNDUP(size, KMEM_ALIGN);
return (kmem_alloc(*asize, kmflag));
}
static void
kmem_cache_reap(kmem_cache_t *cp)
{
ASSERT(taskq_member(kmem_taskq, curthread));
cp->cache_reap++;
if (cp->cache_reclaim != NULL) {
long delta;
delta = cp->cache_full.ml_total;
cp->cache_reclaim(cp->cache_private);
delta = cp->cache_full.ml_total - delta;
if (delta > 0) {
mutex_enter(&cp->cache_depot_lock);
cp->cache_full.ml_reaplimit += delta;
cp->cache_full.ml_min += delta;
mutex_exit(&cp->cache_depot_lock);
}
}
kmem_depot_ws_reap(cp);
if (cp->cache_defrag != NULL && !kmem_move_noreap) {
kmem_cache_defrag(cp);
}
}
static void
kmem_reap_timeout(void *flag_arg)
{
uint32_t *flag = (uint32_t *)flag_arg;
ASSERT(flag == &kmem_reaping || flag == &kmem_reaping_idspace);
*flag = 0;
}
static void
kmem_reap_done(void *flag)
{
if (!callout_init_done) {
kmem_reap_timeout(flag);
} else {
(void) timeout(kmem_reap_timeout, flag, kmem_reap_interval);
}
}
static void
kmem_reap_start(void *flag)
{
ASSERT(flag == &kmem_reaping || flag == &kmem_reaping_idspace);
if (flag == &kmem_reaping) {
kmem_cache_applyall(kmem_cache_reap, kmem_taskq, TQ_NOSLEEP);
if (segkp_fromheap)
segkp_cache_free();
}
else
kmem_cache_applyall_id(kmem_cache_reap, kmem_taskq, TQ_NOSLEEP);
if (taskq_dispatch(kmem_taskq, kmem_reap_done, flag, TQ_NOSLEEP) ==
TASKQID_INVALID)
kmem_reap_done(flag);
}
static void
kmem_reap_common(void *flag_arg)
{
uint32_t *flag = (uint32_t *)flag_arg;
if (MUTEX_HELD(&kmem_cache_lock) || kmem_taskq == NULL ||
atomic_cas_32(flag, 0, 1) != 0)
return;
if (taskq_dispatch(kmem_taskq, kmem_reap_start, flag, TQ_NOALLOC) ==
TASKQID_INVALID)
*flag = 0;
}
void
kmem_reap(void)
{
kmem_reap_common(&kmem_reaping);
}
void
kmem_reap_idspace(void)
{
kmem_reap_common(&kmem_reaping_idspace);
}
static void
kmem_cache_magazine_purge(kmem_cache_t *cp)
{
kmem_cpu_cache_t *ccp;
kmem_magazine_t *mp, *pmp;
int rounds, prounds, cpu_seqid;
ASSERT(!list_link_active(&cp->cache_link) ||
taskq_member(kmem_taskq, curthread));
ASSERT(MUTEX_NOT_HELD(&cp->cache_lock));
for (cpu_seqid = 0; cpu_seqid < max_ncpus; cpu_seqid++) {
ccp = &cp->cache_cpu[cpu_seqid];
mutex_enter(&ccp->cc_lock);
mp = ccp->cc_loaded;
pmp = ccp->cc_ploaded;
rounds = ccp->cc_rounds;
prounds = ccp->cc_prounds;
ccp->cc_loaded = NULL;
ccp->cc_ploaded = NULL;
ccp->cc_rounds = -1;
ccp->cc_prounds = -1;
ccp->cc_magsize = 0;
mutex_exit(&ccp->cc_lock);
if (mp)
kmem_magazine_destroy(cp, mp, rounds);
if (pmp)
kmem_magazine_destroy(cp, pmp, prounds);
}
kmem_depot_ws_zero(cp);
kmem_depot_ws_reap(cp);
}
static void
kmem_cache_magazine_enable(kmem_cache_t *cp)
{
int cpu_seqid;
if (cp->cache_flags & KMF_NOMAGAZINE)
return;
for (cpu_seqid = 0; cpu_seqid < max_ncpus; cpu_seqid++) {
kmem_cpu_cache_t *ccp = &cp->cache_cpu[cpu_seqid];
mutex_enter(&ccp->cc_lock);
ccp->cc_magsize = cp->cache_magtype->mt_magsize;
mutex_exit(&ccp->cc_lock);
}
}
boolean_t
kmem_cache_reap_active(void)
{
return (!taskq_empty(kmem_taskq));
}
void
kmem_cache_reap_soon(kmem_cache_t *cp)
{
ASSERT(list_link_active(&cp->cache_link));
kmem_depot_ws_zero(cp);
(void) taskq_dispatch(kmem_taskq,
(task_func_t *)kmem_depot_ws_reap, cp, TQ_SLEEP);
}
static void
kmem_cache_magazine_resize(kmem_cache_t *cp)
{
kmem_magtype_t *mtp = cp->cache_magtype;
ASSERT(taskq_member(kmem_taskq, curthread));
if (cp->cache_chunksize < mtp->mt_maxbuf) {
kmem_cache_magazine_purge(cp);
mutex_enter(&cp->cache_depot_lock);
cp->cache_magtype = ++mtp;
cp->cache_depot_contention_prev =
cp->cache_depot_contention + INT_MAX;
mutex_exit(&cp->cache_depot_lock);
kmem_cache_magazine_enable(cp);
}
}
static void
kmem_hash_rescale(kmem_cache_t *cp)
{
kmem_bufctl_t **old_table, **new_table, *bcp;
size_t old_size, new_size, h;
ASSERT(taskq_member(kmem_taskq, curthread));
new_size = MAX(KMEM_HASH_INITIAL,
1 << (highbit(3 * cp->cache_buftotal + 4) - 2));
old_size = cp->cache_hash_mask + 1;
if ((old_size >> 1) <= new_size && new_size <= (old_size << 1))
return;
new_table = vmem_alloc(kmem_hash_arena, new_size * sizeof (void *),
VM_NOSLEEP);
if (new_table == NULL)
return;
bzero(new_table, new_size * sizeof (void *));
mutex_enter(&cp->cache_lock);
old_size = cp->cache_hash_mask + 1;
old_table = cp->cache_hash_table;
cp->cache_hash_mask = new_size - 1;
cp->cache_hash_table = new_table;
cp->cache_rescale++;
for (h = 0; h < old_size; h++) {
bcp = old_table[h];
while (bcp != NULL) {
void *addr = bcp->bc_addr;
kmem_bufctl_t *next_bcp = bcp->bc_next;
kmem_bufctl_t **hash_bucket = KMEM_HASH(cp, addr);
bcp->bc_next = *hash_bucket;
*hash_bucket = bcp;
bcp = next_bcp;
}
}
mutex_exit(&cp->cache_lock);
vmem_free(kmem_hash_arena, old_table, old_size * sizeof (void *));
}
static void
kmem_cache_update(kmem_cache_t *cp)
{
int need_hash_rescale = 0;
int need_magazine_resize = 0;
ASSERT(MUTEX_HELD(&kmem_cache_lock));
mutex_enter(&cp->cache_lock);
if ((cp->cache_flags & KMF_HASH) &&
(cp->cache_buftotal > (cp->cache_hash_mask << 1) ||
(cp->cache_buftotal < (cp->cache_hash_mask >> 1) &&
cp->cache_hash_mask > KMEM_HASH_INITIAL)))
need_hash_rescale = 1;
mutex_exit(&cp->cache_lock);
kmem_depot_ws_update(cp);
mutex_enter(&cp->cache_depot_lock);
if (cp->cache_chunksize < cp->cache_magtype->mt_maxbuf &&
(int)(cp->cache_depot_contention -
cp->cache_depot_contention_prev) > kmem_depot_contention)
need_magazine_resize = 1;
cp->cache_depot_contention_prev = cp->cache_depot_contention;
mutex_exit(&cp->cache_depot_lock);
if (need_hash_rescale)
(void) taskq_dispatch(kmem_taskq,
(task_func_t *)kmem_hash_rescale, cp, TQ_NOSLEEP);
if (need_magazine_resize)
(void) taskq_dispatch(kmem_taskq,
(task_func_t *)kmem_cache_magazine_resize, cp, TQ_NOSLEEP);
if (cp->cache_defrag != NULL)
(void) taskq_dispatch(kmem_taskq,
(task_func_t *)kmem_cache_scan, cp, TQ_NOSLEEP);
}
static void kmem_update(void *);
static void
kmem_update_timeout(void *dummy)
{
(void) timeout(kmem_update, dummy, kmem_reap_interval);
}
static void
kmem_update(void *dummy)
{
kmem_cache_applyall(kmem_cache_update, NULL, TQ_NOSLEEP);
if (taskq_dispatch(kmem_taskq, kmem_update_timeout, dummy, TQ_NOSLEEP)
== TASKQID_INVALID)
kmem_update_timeout(NULL);
}
static int
kmem_cache_kstat_update(kstat_t *ksp, int rw)
{
struct kmem_cache_kstat *kmcp = &kmem_cache_kstat;
kmem_cache_t *cp = ksp->ks_private;
uint64_t cpu_buf_avail;
uint64_t buf_avail = 0;
int cpu_seqid;
long reap;
ASSERT(MUTEX_HELD(&kmem_cache_kstat_lock));
if (rw == KSTAT_WRITE)
return (EACCES);
mutex_enter(&cp->cache_lock);
kmcp->kmc_alloc_fail.value.ui64 = cp->cache_alloc_fail;
kmcp->kmc_alloc.value.ui64 = cp->cache_slab_alloc;
kmcp->kmc_free.value.ui64 = cp->cache_slab_free;
kmcp->kmc_slab_alloc.value.ui64 = cp->cache_slab_alloc;
kmcp->kmc_slab_free.value.ui64 = cp->cache_slab_free;
for (cpu_seqid = 0; cpu_seqid < max_ncpus; cpu_seqid++) {
kmem_cpu_cache_t *ccp = &cp->cache_cpu[cpu_seqid];
mutex_enter(&ccp->cc_lock);
cpu_buf_avail = 0;
if (ccp->cc_rounds > 0)
cpu_buf_avail += ccp->cc_rounds;
if (ccp->cc_prounds > 0)
cpu_buf_avail += ccp->cc_prounds;
kmcp->kmc_alloc.value.ui64 += ccp->cc_alloc;
kmcp->kmc_free.value.ui64 += ccp->cc_free;
buf_avail += cpu_buf_avail;
mutex_exit(&ccp->cc_lock);
}
mutex_enter(&cp->cache_depot_lock);
kmcp->kmc_depot_alloc.value.ui64 = cp->cache_full.ml_alloc;
kmcp->kmc_depot_free.value.ui64 = cp->cache_empty.ml_alloc;
kmcp->kmc_depot_contention.value.ui64 = cp->cache_depot_contention;
kmcp->kmc_full_magazines.value.ui64 = cp->cache_full.ml_total;
kmcp->kmc_empty_magazines.value.ui64 = cp->cache_empty.ml_total;
kmcp->kmc_magazine_size.value.ui64 =
(cp->cache_flags & KMF_NOMAGAZINE) ?
0 : cp->cache_magtype->mt_magsize;
kmcp->kmc_alloc.value.ui64 += cp->cache_full.ml_alloc;
kmcp->kmc_free.value.ui64 += cp->cache_empty.ml_alloc;
buf_avail += cp->cache_full.ml_total * cp->cache_magtype->mt_magsize;
reap = MIN(cp->cache_full.ml_reaplimit, cp->cache_full.ml_min);
reap = MIN(reap, cp->cache_full.ml_total);
mutex_exit(&cp->cache_depot_lock);
kmcp->kmc_buf_size.value.ui64 = cp->cache_bufsize;
kmcp->kmc_align.value.ui64 = cp->cache_align;
kmcp->kmc_chunk_size.value.ui64 = cp->cache_chunksize;
kmcp->kmc_slab_size.value.ui64 = cp->cache_slabsize;
kmcp->kmc_buf_constructed.value.ui64 = buf_avail;
buf_avail += cp->cache_bufslab;
kmcp->kmc_buf_avail.value.ui64 = buf_avail;
kmcp->kmc_buf_inuse.value.ui64 = cp->cache_buftotal - buf_avail;
kmcp->kmc_buf_total.value.ui64 = cp->cache_buftotal;
kmcp->kmc_buf_max.value.ui64 = cp->cache_bufmax;
kmcp->kmc_slab_create.value.ui64 = cp->cache_slab_create;
kmcp->kmc_slab_destroy.value.ui64 = cp->cache_slab_destroy;
kmcp->kmc_hash_size.value.ui64 = (cp->cache_flags & KMF_HASH) ?
cp->cache_hash_mask + 1 : 0;
kmcp->kmc_hash_lookup_depth.value.ui64 = cp->cache_lookup_depth;
kmcp->kmc_hash_rescale.value.ui64 = cp->cache_rescale;
kmcp->kmc_vmem_source.value.ui64 = cp->cache_arena->vm_id;
kmcp->kmc_reap.value.ui64 = cp->cache_reap;
if (cp->cache_defrag == NULL) {
kmcp->kmc_move_callbacks.value.ui64 = 0;
kmcp->kmc_move_yes.value.ui64 = 0;
kmcp->kmc_move_no.value.ui64 = 0;
kmcp->kmc_move_later.value.ui64 = 0;
kmcp->kmc_move_dont_need.value.ui64 = 0;
kmcp->kmc_move_dont_know.value.ui64 = 0;
kmcp->kmc_move_hunt_found.value.ui64 = 0;
kmcp->kmc_move_slabs_freed.value.ui64 = 0;
kmcp->kmc_defrag.value.ui64 = 0;
kmcp->kmc_scan.value.ui64 = 0;
kmcp->kmc_move_reclaimable.value.ui64 = 0;
} else {
int64_t reclaimable;
kmem_defrag_t *kd = cp->cache_defrag;
kmcp->kmc_move_callbacks.value.ui64 = kd->kmd_callbacks;
kmcp->kmc_move_yes.value.ui64 = kd->kmd_yes;
kmcp->kmc_move_no.value.ui64 = kd->kmd_no;
kmcp->kmc_move_later.value.ui64 = kd->kmd_later;
kmcp->kmc_move_dont_need.value.ui64 = kd->kmd_dont_need;
kmcp->kmc_move_dont_know.value.ui64 = kd->kmd_dont_know;
kmcp->kmc_move_hunt_found.value.ui64 = 0;
kmcp->kmc_move_slabs_freed.value.ui64 = kd->kmd_slabs_freed;
kmcp->kmc_defrag.value.ui64 = kd->kmd_defrags;
kmcp->kmc_scan.value.ui64 = kd->kmd_scans;
reclaimable = cp->cache_bufslab - (cp->cache_maxchunks - 1);
reclaimable = MAX(reclaimable, 0);
reclaimable += ((uint64_t)reap * cp->cache_magtype->mt_magsize);
kmcp->kmc_move_reclaimable.value.ui64 = reclaimable;
}
mutex_exit(&cp->cache_lock);
return (0);
}
uint64_t
kmem_cache_stat(kmem_cache_t *cp, char *name)
{
int i;
kstat_t *ksp = cp->cache_kstat;
kstat_named_t *knp = (kstat_named_t *)&kmem_cache_kstat;
uint64_t value = 0;
if (ksp != NULL) {
mutex_enter(&kmem_cache_kstat_lock);
(void) kmem_cache_kstat_update(ksp, KSTAT_READ);
for (i = 0; i < ksp->ks_ndata; i++) {
if (strcmp(knp[i].name, name) == 0) {
value = knp[i].value.ui64;
break;
}
}
mutex_exit(&kmem_cache_kstat_lock);
}
return (value);
}
size_t
kmem_avail(void)
{
spgcnt_t rmem = availrmem - tune.t_minarmem;
spgcnt_t fmem = freemem - minfree;
return ((size_t)ptob(MIN(MAX(MIN(rmem, fmem), 0),
1 << (30 - PAGESHIFT))));
}
size_t
kmem_maxavail(void)
{
spgcnt_t pmem = availrmem - tune.t_minarmem;
spgcnt_t vmem = btop(vmem_size(heap_arena, VMEM_FREE));
return ((size_t)ptob(MAX(MIN(pmem, vmem), 0)));
}
int
kmem_debugging(void)
{
return (kmem_flags & (KMF_AUDIT | KMF_REDZONE));
}
#define KMEM_PARTIAL_SLAB_WEIGHT(sp, binshift) \
((((sp)->slab_refcnt <= (binshift)) || \
(((sp)->slab_chunks - (sp)->slab_refcnt) <= (binshift))) \
? -(sp)->slab_refcnt \
: -((binshift) + ((sp)->slab_refcnt >> (binshift))))
static int
kmem_partial_slab_cmp(const void *p0, const void *p1)
{
const kmem_cache_t *cp;
const kmem_slab_t *s0 = p0;
const kmem_slab_t *s1 = p1;
int w0, w1;
size_t binshift;
ASSERT(KMEM_SLAB_IS_PARTIAL(s0));
ASSERT(KMEM_SLAB_IS_PARTIAL(s1));
ASSERT(s0->slab_cache == s1->slab_cache);
cp = s1->slab_cache;
ASSERT(MUTEX_HELD(&cp->cache_lock));
binshift = cp->cache_partial_binshift;
w0 = KMEM_PARTIAL_SLAB_WEIGHT(s0, binshift);
if (s0->slab_flags & KMEM_SLAB_NOMOVE) {
w0 -= cp->cache_maxchunks;
}
w1 = KMEM_PARTIAL_SLAB_WEIGHT(s1, binshift);
if (s1->slab_flags & KMEM_SLAB_NOMOVE) {
w1 -= cp->cache_maxchunks;
}
if (w0 < w1)
return (-1);
if (w0 > w1)
return (1);
if ((uintptr_t)s0 < (uintptr_t)s1)
return (-1);
if ((uintptr_t)s0 > (uintptr_t)s1)
return (1);
return (0);
}
kmem_cache_t *
kmem_cache_create(
char *name,
size_t bufsize,
size_t align,
int (*constructor)(void *, void *, int),
void (*destructor)(void *, void *),
void (*reclaim)(void *),
void *private,
vmem_t *vmp,
int cflags)
{
int cpu_seqid;
size_t chunksize;
kmem_cache_t *cp;
kmem_magtype_t *mtp;
size_t csize = KMEM_CACHE_SIZE(max_ncpus);
#ifdef DEBUG
if (!strident_valid(name)) {
cmn_err(CE_CONT,
"kmem_cache_create: '%s' is an invalid cache name\n"
"cache names must conform to the rules for "
"C identifiers\n", name);
}
#endif
if (vmp == NULL)
vmp = kmem_default_arena;
ASSERT(!(cflags & KMC_IDENTIFIER));
if (vmp->vm_cflags & VMC_IDENTIFIER)
cflags |= KMC_IDENTIFIER;
cp = vmem_xalloc(kmem_cache_arena, csize, KMEM_CPU_CACHE_SIZE,
P2NPHASE(csize, KMEM_CPU_CACHE_SIZE), 0, NULL, NULL, VM_SLEEP);
bzero(cp, csize);
list_link_init(&cp->cache_link);
if (align == 0)
align = KMEM_ALIGN;
if (align < KMEM_ALIGN)
cflags |= KMC_NOTOUCH;
if (!ISP2(align) || align > vmp->vm_quantum)
panic("kmem_cache_create: bad alignment %lu", align);
mutex_enter(&kmem_flags_lock);
if (kmem_flags & KMF_RANDOMIZE)
kmem_flags = (((kmem_flags | ~KMF_RANDOM) + 1) & KMF_RANDOM) |
KMF_RANDOMIZE;
cp->cache_flags = (kmem_flags | cflags) & KMF_DEBUG;
mutex_exit(&kmem_flags_lock);
ASSERT(!(cflags & KMC_NOHASH) || !(cflags & KMC_NOTOUCH));
if (cp->cache_flags & KMF_LITE) {
if (bufsize >= kmem_lite_minsize &&
align <= kmem_lite_maxalign &&
P2PHASE(bufsize, kmem_lite_maxalign) != 0) {
cp->cache_flags |= KMF_BUFTAG;
cp->cache_flags &= ~(KMF_AUDIT | KMF_FIREWALL);
} else {
cp->cache_flags &= ~KMF_DEBUG;
}
}
if (cp->cache_flags & KMF_DEADBEEF)
cp->cache_flags |= KMF_REDZONE;
if ((cflags & KMC_QCACHE) && (cp->cache_flags & KMF_AUDIT))
cp->cache_flags |= KMF_NOMAGAZINE;
if (cflags & KMC_NODEBUG)
cp->cache_flags &= ~KMF_DEBUG;
if (cflags & KMC_NOTOUCH)
cp->cache_flags &= ~KMF_TOUCH;
if (cflags & KMC_PREFILL)
cp->cache_flags |= KMF_PREFILL;
if (cflags & KMC_NOHASH)
cp->cache_flags &= ~(KMF_AUDIT | KMF_FIREWALL);
if (cflags & KMC_NOMAGAZINE)
cp->cache_flags |= KMF_NOMAGAZINE;
if ((cp->cache_flags & KMF_AUDIT) && !(cflags & KMC_NOTOUCH))
cp->cache_flags |= KMF_REDZONE;
if (!(cp->cache_flags & KMF_AUDIT))
cp->cache_flags &= ~KMF_CONTENTS;
if ((cp->cache_flags & KMF_BUFTAG) && bufsize >= kmem_minfirewall &&
!(cp->cache_flags & KMF_LITE) && !(cflags & KMC_NOHASH))
cp->cache_flags |= KMF_FIREWALL;
if (vmp != kmem_default_arena || kmem_firewall_arena == NULL)
cp->cache_flags &= ~KMF_FIREWALL;
if (cp->cache_flags & KMF_FIREWALL) {
cp->cache_flags &= ~KMF_BUFTAG;
cp->cache_flags |= KMF_NOMAGAZINE;
ASSERT(vmp == kmem_default_arena);
vmp = kmem_firewall_arena;
}
(void) strncpy(cp->cache_name, name, KMEM_CACHE_NAMELEN);
strident_canon(cp->cache_name, KMEM_CACHE_NAMELEN + 1);
cp->cache_bufsize = bufsize;
cp->cache_align = align;
cp->cache_constructor = constructor;
cp->cache_destructor = destructor;
cp->cache_reclaim = reclaim;
cp->cache_private = private;
cp->cache_arena = vmp;
cp->cache_cflags = cflags;
chunksize = bufsize;
if (align >= KMEM_ALIGN) {
chunksize = P2ROUNDUP(chunksize, KMEM_ALIGN);
cp->cache_bufctl = chunksize - KMEM_ALIGN;
}
if (cp->cache_flags & KMF_BUFTAG) {
cp->cache_bufctl = chunksize;
cp->cache_buftag = chunksize;
if (cp->cache_flags & KMF_LITE)
chunksize += KMEM_BUFTAG_LITE_SIZE(kmem_lite_count);
else
chunksize += sizeof (kmem_buftag_t);
}
if (cp->cache_flags & KMF_DEADBEEF) {
cp->cache_verify = MIN(cp->cache_buftag, kmem_maxverify);
if (cp->cache_flags & KMF_LITE)
cp->cache_verify = sizeof (uint64_t);
}
cp->cache_contents = MIN(cp->cache_bufctl, kmem_content_maxsave);
cp->cache_chunksize = chunksize = P2ROUNDUP(chunksize, align);
if (vmp == kmem_firewall_arena) {
cp->cache_slabsize = P2ROUNDUP(chunksize, vmp->vm_quantum);
cp->cache_mincolor = cp->cache_slabsize - chunksize;
cp->cache_maxcolor = cp->cache_mincolor;
cp->cache_flags |= KMF_HASH;
ASSERT(!(cp->cache_flags & KMF_BUFTAG));
} else if ((cflags & KMC_NOHASH) || (!(cflags & KMC_NOTOUCH) &&
!(cp->cache_flags & KMF_AUDIT) &&
chunksize < vmp->vm_quantum / KMEM_VOID_FRACTION)) {
cp->cache_slabsize = vmp->vm_quantum;
cp->cache_mincolor = 0;
cp->cache_maxcolor =
(cp->cache_slabsize - sizeof (kmem_slab_t)) % chunksize;
ASSERT(chunksize + sizeof (kmem_slab_t) <= cp->cache_slabsize);
ASSERT(!(cp->cache_flags & KMF_AUDIT));
} else {
size_t chunks, bestfit, waste, slabsize;
size_t minwaste = LONG_MAX;
bestfit = 0;
for (chunks = 1; chunks <= KMEM_VOID_FRACTION; chunks++) {
slabsize = P2ROUNDUP(chunksize * chunks,
vmp->vm_quantum);
chunks = slabsize / chunksize;
waste = (slabsize % chunksize) / chunks;
if (waste < minwaste) {
minwaste = waste;
bestfit = slabsize;
}
}
if (cflags & KMC_QCACHE)
bestfit = VMEM_QCACHE_SLABSIZE(vmp->vm_qcache_max);
cp->cache_slabsize = bestfit;
cp->cache_mincolor = 0;
cp->cache_maxcolor = bestfit % chunksize;
cp->cache_flags |= KMF_HASH;
}
cp->cache_maxchunks = (cp->cache_slabsize / cp->cache_chunksize);
cp->cache_partial_binshift = highbit(cp->cache_maxchunks / 16) + 1;
if (vmp == kmem_msb_arena ||
cp->cache_flags & (KMF_HASH | KMF_BUFTAG) ||
cp->cache_constructor != NULL)
cp->cache_flags &= ~KMF_PREFILL;
if (cp->cache_flags & KMF_HASH) {
ASSERT(!(cflags & KMC_NOHASH));
cp->cache_bufctl_cache = (cp->cache_flags & KMF_AUDIT) ?
kmem_bufctl_audit_cache : kmem_bufctl_cache;
}
if (cp->cache_maxcolor >= vmp->vm_quantum)
cp->cache_maxcolor = vmp->vm_quantum - 1;
cp->cache_color = cp->cache_mincolor;
mutex_init(&cp->cache_lock, NULL, MUTEX_DEFAULT, NULL);
avl_create(&cp->cache_partial_slabs, kmem_partial_slab_cmp,
sizeof (kmem_slab_t), offsetof(kmem_slab_t, slab_link));
ASSERT(sizeof (list_node_t) <= sizeof (avl_node_t));
list_create(&cp->cache_complete_slabs,
sizeof (kmem_slab_t), offsetof(kmem_slab_t, slab_link));
if (cp->cache_flags & KMF_HASH) {
cp->cache_hash_table = vmem_alloc(kmem_hash_arena,
KMEM_HASH_INITIAL * sizeof (void *), VM_SLEEP);
bzero(cp->cache_hash_table,
KMEM_HASH_INITIAL * sizeof (void *));
cp->cache_hash_mask = KMEM_HASH_INITIAL - 1;
cp->cache_hash_shift = highbit((ulong_t)chunksize) - 1;
}
mutex_init(&cp->cache_depot_lock, NULL, MUTEX_DEFAULT, NULL);
for (mtp = kmem_magtype; chunksize <= mtp->mt_minbuf; mtp++)
continue;
cp->cache_magtype = mtp;
for (cpu_seqid = 0; cpu_seqid < max_ncpus; cpu_seqid++) {
kmem_cpu_cache_t *ccp = &cp->cache_cpu[cpu_seqid];
mutex_init(&ccp->cc_lock, NULL, MUTEX_DEFAULT, NULL);
ccp->cc_flags = cp->cache_flags;
ccp->cc_rounds = -1;
ccp->cc_prounds = -1;
}
if ((cp->cache_kstat = kstat_create("unix", 0, cp->cache_name,
"kmem_cache", KSTAT_TYPE_NAMED,
sizeof (kmem_cache_kstat) / sizeof (kstat_named_t),
KSTAT_FLAG_VIRTUAL)) != NULL) {
cp->cache_kstat->ks_data = &kmem_cache_kstat;
cp->cache_kstat->ks_update = kmem_cache_kstat_update;
cp->cache_kstat->ks_private = cp;
cp->cache_kstat->ks_lock = &kmem_cache_kstat_lock;
kstat_install(cp->cache_kstat);
}
mutex_enter(&kmem_cache_lock);
list_insert_tail(&kmem_caches, cp);
mutex_exit(&kmem_cache_lock);
if (kmem_ready)
kmem_cache_magazine_enable(cp);
return (cp);
}
static int
kmem_move_cmp(const void *buf, const void *p)
{
const kmem_move_t *kmm = p;
uintptr_t v1 = (uintptr_t)buf;
uintptr_t v2 = (uintptr_t)kmm->kmm_from_buf;
return (v1 < v2 ? -1 : (v1 > v2 ? 1 : 0));
}
static void
kmem_reset_reclaim_threshold(kmem_defrag_t *kmd)
{
kmd->kmd_reclaim_numer = 1;
}
static void
kmem_adjust_reclaim_threshold(kmem_defrag_t *kmd, int direction)
{
if (direction > 0) {
if (kmd->kmd_reclaim_numer < (KMEM_VOID_FRACTION - 1)) {
kmd->kmd_reclaim_numer++;
}
} else {
if (kmd->kmd_reclaim_numer > 1) {
kmd->kmd_reclaim_numer--;
}
}
}
void
kmem_cache_set_move(kmem_cache_t *cp,
kmem_cbrc_t (*move)(void *, void *, size_t, void *))
{
kmem_defrag_t *defrag;
ASSERT(move != NULL);
ASSERT(!(cp->cache_cflags & KMC_NOTOUCH));
ASSERT(!(cp->cache_cflags & KMC_IDENTIFIER));
defrag = kmem_cache_alloc(kmem_defrag_cache, KM_SLEEP);
mutex_enter(&cp->cache_lock);
if (KMEM_IS_MOVABLE(cp)) {
if (cp->cache_move == NULL) {
ASSERT(cp->cache_slab_alloc == 0);
cp->cache_defrag = defrag;
defrag = NULL;
bzero(cp->cache_defrag, sizeof (kmem_defrag_t));
avl_create(&cp->cache_defrag->kmd_moves_pending,
kmem_move_cmp, sizeof (kmem_move_t),
offsetof(kmem_move_t, kmm_entry));
ASSERT(sizeof (list_node_t) <= sizeof (avl_node_t));
list_create(&cp->cache_defrag->kmd_deadlist,
sizeof (kmem_slab_t),
offsetof(kmem_slab_t, slab_link));
kmem_reset_reclaim_threshold(cp->cache_defrag);
}
cp->cache_move = move;
}
mutex_exit(&cp->cache_lock);
if (defrag != NULL) {
kmem_cache_free(kmem_defrag_cache, defrag);
}
}
void
kmem_cache_destroy(kmem_cache_t *cp)
{
int cpu_seqid;
mutex_enter(&kmem_cache_lock);
list_remove(&kmem_caches, cp);
mutex_exit(&kmem_cache_lock);
if (kmem_taskq != NULL)
taskq_wait(kmem_taskq);
if (kmem_move_taskq != NULL && cp->cache_defrag != NULL)
taskq_wait(kmem_move_taskq);
kmem_cache_magazine_purge(cp);
mutex_enter(&cp->cache_lock);
if (cp->cache_buftotal != 0)
cmn_err(CE_WARN, "kmem_cache_destroy: '%s' (%p) not empty",
cp->cache_name, (void *)cp);
if (cp->cache_defrag != NULL) {
avl_destroy(&cp->cache_defrag->kmd_moves_pending);
list_destroy(&cp->cache_defrag->kmd_deadlist);
kmem_cache_free(kmem_defrag_cache, cp->cache_defrag);
cp->cache_defrag = NULL;
}
cp->cache_constructor = (int (*)(void *, void *, int))1;
cp->cache_destructor = (void (*)(void *, void *))2;
cp->cache_reclaim = (void (*)(void *))3;
cp->cache_move = (kmem_cbrc_t (*)(void *, void *, size_t, void *))4;
mutex_exit(&cp->cache_lock);
kstat_delete(cp->cache_kstat);
if (cp->cache_hash_table != NULL)
vmem_free(kmem_hash_arena, cp->cache_hash_table,
(cp->cache_hash_mask + 1) * sizeof (void *));
for (cpu_seqid = 0; cpu_seqid < max_ncpus; cpu_seqid++)
mutex_destroy(&cp->cache_cpu[cpu_seqid].cc_lock);
mutex_destroy(&cp->cache_depot_lock);
mutex_destroy(&cp->cache_lock);
vmem_free(kmem_cache_arena, cp, KMEM_CACHE_SIZE(max_ncpus));
}
static int
kmem_cpu_setup(cpu_setup_t what, int id, void *arg)
{
ASSERT(MUTEX_HELD(&cpu_lock));
if (what == CPU_UNCONFIG) {
kmem_cache_applyall(kmem_cache_magazine_purge,
kmem_taskq, TQ_SLEEP);
kmem_cache_applyall(kmem_cache_magazine_enable,
kmem_taskq, TQ_SLEEP);
}
return (0);
}
static void
kmem_alloc_caches_create(const int *array, size_t count,
kmem_cache_t **alloc_table, size_t maxbuf, uint_t shift)
{
char name[KMEM_CACHE_NAMELEN + 1];
size_t table_unit = (1 << shift);
size_t size = table_unit;
int i;
for (i = 0; i < count; i++) {
size_t cache_size = array[i];
size_t align = KMEM_ALIGN;
kmem_cache_t *cp;
if (size > maxbuf)
break;
ASSERT(P2PHASE(cache_size, table_unit) == 0);
if (IS_P2ALIGNED(cache_size, 64))
align = 64;
if (IS_P2ALIGNED(cache_size, PAGESIZE))
align = PAGESIZE;
(void) snprintf(name, sizeof (name),
"kmem_alloc_%lu", cache_size);
cp = kmem_cache_create(name, cache_size, align,
NULL, NULL, NULL, NULL, NULL, KMC_KMEM_ALLOC);
while (size <= cache_size) {
alloc_table[(size - 1) >> shift] = cp;
size += table_unit;
}
}
ASSERT(size > maxbuf);
}
static void
kmem_cache_init(int pass, int use_large_pages)
{
int i;
size_t maxbuf;
kmem_magtype_t *mtp;
for (i = 0; i < sizeof (kmem_magtype) / sizeof (*mtp); i++) {
char name[KMEM_CACHE_NAMELEN + 1];
mtp = &kmem_magtype[i];
(void) sprintf(name, "kmem_magazine_%d", mtp->mt_magsize);
mtp->mt_cache = kmem_cache_create(name,
(mtp->mt_magsize + 1) * sizeof (void *),
mtp->mt_align, NULL, NULL, NULL, NULL,
kmem_msb_arena, KMC_NOHASH);
}
kmem_slab_cache = kmem_cache_create("kmem_slab_cache",
sizeof (kmem_slab_t), 0, NULL, NULL, NULL, NULL,
kmem_msb_arena, KMC_NOHASH);
kmem_bufctl_cache = kmem_cache_create("kmem_bufctl_cache",
sizeof (kmem_bufctl_t), 0, NULL, NULL, NULL, NULL,
kmem_msb_arena, KMC_NOHASH);
kmem_bufctl_audit_cache = kmem_cache_create("kmem_bufctl_audit_cache",
sizeof (kmem_bufctl_audit_t), 0, NULL, NULL, NULL, NULL,
kmem_msb_arena, KMC_NOHASH);
if (pass == 2) {
kmem_va_arena = vmem_create("kmem_va",
NULL, 0, PAGESIZE,
vmem_alloc, vmem_free, heap_arena,
8 * PAGESIZE, VM_SLEEP);
if (use_large_pages) {
kmem_default_arena = vmem_xcreate("kmem_default",
NULL, 0, PAGESIZE,
segkmem_alloc_lp, segkmem_free_lp, kmem_va_arena,
0, VMC_DUMPSAFE | VM_SLEEP);
} else {
kmem_default_arena = vmem_create("kmem_default",
NULL, 0, PAGESIZE,
segkmem_alloc, segkmem_free, kmem_va_arena,
0, VMC_DUMPSAFE | VM_SLEEP);
}
maxbuf = kmem_max_cached;
if (maxbuf <= KMEM_MAXBUF) {
maxbuf = 0;
kmem_max_cached = KMEM_MAXBUF;
} else {
size_t size = 0;
size_t max =
sizeof (kmem_big_alloc_sizes) / sizeof (int);
for (i = 0; i < max; i++) {
size = kmem_big_alloc_sizes[i];
if (maxbuf <= size)
break;
}
kmem_max_cached = maxbuf = size;
}
bzero(kmem_big_alloc_table, sizeof (kmem_big_alloc_table));
} else {
kmem_default_arena = kmem_msb_arena;
maxbuf = KMEM_BIG_MAXBUF_32BIT;
}
kmem_alloc_caches_create(
kmem_alloc_sizes, sizeof (kmem_alloc_sizes) / sizeof (int),
kmem_alloc_table, KMEM_MAXBUF, KMEM_ALIGN_SHIFT);
kmem_alloc_caches_create(
kmem_big_alloc_sizes, sizeof (kmem_big_alloc_sizes) / sizeof (int),
kmem_big_alloc_table, maxbuf, KMEM_BIG_SHIFT);
kmem_big_alloc_table_max = maxbuf >> KMEM_BIG_SHIFT;
}
void
kmem_init(void)
{
kmem_cache_t *cp;
int old_kmem_flags = kmem_flags;
int use_large_pages = 0;
size_t maxverify, minfirewall;
kstat_init();
#if defined(_LP64)
maxverify = minfirewall = PAGESIZE / 2;
#else
maxverify = minfirewall = ULONG_MAX;
#endif
ASSERT(sizeof (kmem_cpu_cache_t) == KMEM_CPU_CACHE_SIZE);
list_create(&kmem_caches, sizeof (kmem_cache_t),
offsetof(kmem_cache_t, cache_link));
kmem_metadata_arena = vmem_create("kmem_metadata", NULL, 0, PAGESIZE,
vmem_alloc, vmem_free, heap_arena, 8 * PAGESIZE,
VM_SLEEP | VMC_NO_QCACHE);
kmem_msb_arena = vmem_create("kmem_msb", NULL, 0,
PAGESIZE, segkmem_alloc, segkmem_free, kmem_metadata_arena, 0,
VMC_DUMPSAFE | VM_SLEEP);
kmem_cache_arena = vmem_create("kmem_cache", NULL, 0, KMEM_ALIGN,
segkmem_alloc, segkmem_free, kmem_metadata_arena, 0, VM_SLEEP);
kmem_hash_arena = vmem_create("kmem_hash", NULL, 0, KMEM_ALIGN,
segkmem_alloc, segkmem_free, kmem_metadata_arena, 0, VM_SLEEP);
kmem_log_arena = vmem_create("kmem_log", NULL, 0, KMEM_ALIGN,
segkmem_alloc, segkmem_free, heap_arena, 0, VM_SLEEP);
kmem_firewall_va_arena = vmem_create("kmem_firewall_va",
NULL, 0, PAGESIZE,
kmem_firewall_va_alloc, kmem_firewall_va_free, heap_arena,
0, VM_SLEEP);
kmem_firewall_arena = vmem_create("kmem_firewall", NULL, 0, PAGESIZE,
segkmem_alloc, segkmem_free, kmem_firewall_va_arena, 0,
VMC_DUMPSAFE | VM_SLEEP);
kmem_oversize_arena = vmem_create("kmem_oversize", NULL, 0, PAGESIZE,
segkmem_alloc, segkmem_free, heap_arena, 0, VM_SLEEP);
kmem_reap_interval = 15 * hz;
kmem_cache_init(1, 0);
mod_read_system_file(boothowto & RB_ASKNAME);
while ((cp = list_tail(&kmem_caches)) != NULL)
kmem_cache_destroy(cp);
vmem_destroy(kmem_oversize_arena);
if (old_kmem_flags & KMF_STICKY)
kmem_flags = old_kmem_flags;
if (!(kmem_flags & KMF_AUDIT))
vmem_seg_size = offsetof(vmem_seg_t, vs_thread);
if (kmem_maxverify == 0)
kmem_maxverify = maxverify;
if (kmem_minfirewall == 0)
kmem_minfirewall = minfirewall;
use_large_pages = segkmem_lpsetup();
kmem_lite_count = MIN(MAX(0, kmem_lite_pcs), 16);
kmem_lite_pcs = kmem_lite_count;
if (use_large_pages &&
((kmem_flags & KMF_LITE) || !(kmem_flags & KMF_DEBUG))) {
kmem_oversize_arena = vmem_xcreate("kmem_oversize", NULL, 0,
PAGESIZE, segkmem_alloc_lp, segkmem_free_lp, heap_arena,
0, VMC_DUMPSAFE | VM_SLEEP);
} else {
kmem_oversize_arena = vmem_create("kmem_oversize",
NULL, 0, PAGESIZE,
segkmem_alloc, segkmem_free, kmem_minfirewall < ULONG_MAX?
kmem_firewall_va_arena : heap_arena, 0, VMC_DUMPSAFE |
VM_SLEEP);
}
kmem_cache_init(2, use_large_pages);
if (kmem_flags & (KMF_AUDIT | KMF_RANDOMIZE)) {
if (kmem_transaction_log_size == 0)
kmem_transaction_log_size = kmem_maxavail() / 50;
kmem_transaction_log = kmem_log_init(kmem_transaction_log_size);
}
if (kmem_flags & (KMF_CONTENTS | KMF_RANDOMIZE)) {
if (kmem_content_log_size == 0)
kmem_content_log_size = kmem_maxavail() / 50;
kmem_content_log = kmem_log_init(kmem_content_log_size);
}
kmem_failure_log = kmem_log_init(kmem_failure_log_size);
kmem_slab_log = kmem_log_init(kmem_slab_log_size);
kmem_zerosized_log = kmem_log_init(kmem_zerosized_log_size);
streams_msg_init();
zone_zsd_init();
log_init();
taskq_init();
if (((kmem_flags & ~(KMF_AUDIT | KMF_DEADBEEF | KMF_REDZONE |
KMF_CONTENTS | KMF_LITE)) != 0) ||
((kmem_flags & KMF_LITE) && kmem_flags != KMF_LITE))
cmn_err(CE_WARN, "kmem_flags set to unsupported value 0x%x.",
kmem_flags);
#ifdef DEBUG
if ((kmem_flags & KMF_DEBUG) == 0)
cmn_err(CE_NOTE, "kmem debugging disabled.");
#else
if (!(kmem_flags & KMF_LITE) &&
(kmem_flags & (KMF_AUDIT | KMF_DEADBEEF)) != 0)
cmn_err(CE_WARN, "High-overhead kmem debugging features "
"enabled (kmem_flags = 0x%x). Performance degradation "
"and large memory overhead possible.", kmem_flags);
#endif
kmem_cache_applyall(kmem_cache_magazine_enable, NULL, TQ_SLEEP);
kmem_ready = 1;
ka_init();
id32_init();
netstack_init();
}
static void
kmem_move_init(void)
{
kmem_defrag_cache = kmem_cache_create("kmem_defrag_cache",
sizeof (kmem_defrag_t), 0, NULL, NULL, NULL, NULL,
kmem_msb_arena, KMC_NOHASH);
kmem_move_cache = kmem_cache_create("kmem_move_cache",
sizeof (kmem_move_t), 0, NULL, NULL, NULL, NULL,
kmem_msb_arena, KMC_NOHASH);
kmem_move_taskq = taskq_create_instance("kmem_move_taskq", 0, 1,
minclsyspri, 100, INT_MAX, TASKQ_PREPOPULATE);
}
void
kmem_thread_init(void)
{
kmem_move_init();
kmem_taskq = taskq_create_instance("kmem_taskq", 0, 1, minclsyspri,
600, INT_MAX, TASKQ_PREPOPULATE);
}
void
kmem_mp_init(void)
{
mutex_enter(&cpu_lock);
register_cpu_setup_func(kmem_cpu_setup, NULL);
mutex_exit(&cpu_lock);
kmem_update_timeout(NULL);
taskq_mp_init();
}
static kmem_slab_t *
kmem_slab_allocated(kmem_cache_t *cp, kmem_slab_t *sp, void *buf)
{
kmem_bufctl_t *bcp, *bufbcp;
ASSERT(MUTEX_HELD(&cp->cache_lock));
ASSERT(sp == NULL || KMEM_SLAB_MEMBER(sp, buf));
if (cp->cache_flags & KMF_HASH) {
for (bcp = *KMEM_HASH(cp, buf);
(bcp != NULL) && (bcp->bc_addr != buf);
bcp = bcp->bc_next) {
continue;
}
ASSERT(sp != NULL && bcp != NULL ? sp == bcp->bc_slab : 1);
return (bcp == NULL ? NULL : bcp->bc_slab);
}
if (sp == NULL) {
sp = KMEM_SLAB(cp, buf);
}
bufbcp = KMEM_BUFCTL(cp, buf);
for (bcp = sp->slab_head;
(bcp != NULL) && (bcp != bufbcp);
bcp = bcp->bc_next) {
continue;
}
return (bcp == NULL ? sp : NULL);
}
static boolean_t
kmem_slab_is_reclaimable(kmem_cache_t *cp, kmem_slab_t *sp, int flags)
{
long refcnt = sp->slab_refcnt;
ASSERT(cp->cache_defrag != NULL);
if (flags & KMM_DEBUG) {
return ((flags & KMM_DESPERATE) ||
((sp->slab_flags & KMEM_SLAB_NOMOVE) == 0));
}
if (flags & KMM_DESPERATE) {
return (refcnt < sp->slab_chunks);
}
if (sp->slab_flags & KMEM_SLAB_NOMOVE) {
return (B_FALSE);
}
if ((refcnt == 1) || kmem_move_any_partial) {
return (refcnt < sp->slab_chunks);
}
return ((refcnt * KMEM_VOID_FRACTION) <
(sp->slab_chunks * cp->cache_defrag->kmd_reclaim_numer));
}
static void
kmem_slab_move_yes(kmem_cache_t *cp, kmem_slab_t *sp, void *from_buf)
{
ASSERT(MUTEX_HELD(&cp->cache_lock));
ASSERT(KMEM_SLAB_MEMBER(sp, from_buf));
if (!KMEM_SLAB_IS_PARTIAL(sp)) {
return;
}
if (sp->slab_flags & KMEM_SLAB_NOMOVE) {
if (KMEM_SLAB_OFFSET(sp, from_buf) == sp->slab_stuck_offset) {
avl_remove(&cp->cache_partial_slabs, sp);
sp->slab_flags &= ~KMEM_SLAB_NOMOVE;
sp->slab_stuck_offset = (uint32_t)-1;
avl_add(&cp->cache_partial_slabs, sp);
}
} else {
sp->slab_later_count = 0;
sp->slab_stuck_offset = (uint32_t)-1;
}
}
static void
kmem_slab_move_no(kmem_cache_t *cp, kmem_slab_t *sp, void *from_buf)
{
ASSERT(taskq_member(kmem_move_taskq, curthread));
ASSERT(MUTEX_HELD(&cp->cache_lock));
ASSERT(KMEM_SLAB_MEMBER(sp, from_buf));
if (!KMEM_SLAB_IS_PARTIAL(sp)) {
return;
}
avl_remove(&cp->cache_partial_slabs, sp);
sp->slab_later_count = 0;
sp->slab_flags |= KMEM_SLAB_NOMOVE;
sp->slab_stuck_offset = KMEM_SLAB_OFFSET(sp, from_buf);
avl_add(&cp->cache_partial_slabs, sp);
}
static void kmem_move_end(kmem_cache_t *, kmem_move_t *);
static void
kmem_move_buffer(kmem_move_t *callback)
{
kmem_cbrc_t response;
kmem_slab_t *sp = callback->kmm_from_slab;
kmem_cache_t *cp = sp->slab_cache;
boolean_t free_on_slab;
ASSERT(taskq_member(kmem_move_taskq, curthread));
ASSERT(MUTEX_NOT_HELD(&cp->cache_lock));
ASSERT(KMEM_SLAB_MEMBER(sp, callback->kmm_from_buf));
if (!kmem_slab_is_reclaimable(cp, sp, callback->kmm_flags)) {
kmem_slab_free(cp, callback->kmm_to_buf);
kmem_move_end(cp, callback);
return;
}
mutex_enter(&cp->cache_lock);
free_on_slab = (kmem_slab_allocated(cp, sp,
callback->kmm_from_buf) == NULL);
mutex_exit(&cp->cache_lock);
if (free_on_slab) {
kmem_slab_free(cp, callback->kmm_to_buf);
kmem_move_end(cp, callback);
return;
}
if (cp->cache_flags & KMF_BUFTAG) {
if (kmem_cache_alloc_debug(cp, callback->kmm_to_buf,
KM_NOSLEEP, 1, caller()) != 0) {
kmem_move_end(cp, callback);
return;
}
} else if (cp->cache_constructor != NULL &&
cp->cache_constructor(callback->kmm_to_buf, cp->cache_private,
KM_NOSLEEP) != 0) {
atomic_inc_64(&cp->cache_alloc_fail);
kmem_slab_free(cp, callback->kmm_to_buf);
kmem_move_end(cp, callback);
return;
}
cp->cache_defrag->kmd_callbacks++;
cp->cache_defrag->kmd_thread = curthread;
cp->cache_defrag->kmd_from_buf = callback->kmm_from_buf;
cp->cache_defrag->kmd_to_buf = callback->kmm_to_buf;
DTRACE_PROBE2(kmem__move__start, kmem_cache_t *, cp, kmem_move_t *,
callback);
response = cp->cache_move(callback->kmm_from_buf,
callback->kmm_to_buf, cp->cache_bufsize, cp->cache_private);
DTRACE_PROBE3(kmem__move__end, kmem_cache_t *, cp, kmem_move_t *,
callback, kmem_cbrc_t, response);
cp->cache_defrag->kmd_thread = NULL;
cp->cache_defrag->kmd_from_buf = NULL;
cp->cache_defrag->kmd_to_buf = NULL;
if (response == KMEM_CBRC_YES) {
cp->cache_defrag->kmd_yes++;
kmem_slab_free_constructed(cp, callback->kmm_from_buf, B_FALSE);
if (sp->slab_refcnt == 0)
cp->cache_defrag->kmd_slabs_freed++;
mutex_enter(&cp->cache_lock);
kmem_slab_move_yes(cp, sp, callback->kmm_from_buf);
mutex_exit(&cp->cache_lock);
kmem_move_end(cp, callback);
return;
}
switch (response) {
case KMEM_CBRC_NO:
cp->cache_defrag->kmd_no++;
mutex_enter(&cp->cache_lock);
kmem_slab_move_no(cp, sp, callback->kmm_from_buf);
mutex_exit(&cp->cache_lock);
break;
case KMEM_CBRC_LATER:
cp->cache_defrag->kmd_later++;
mutex_enter(&cp->cache_lock);
if (!KMEM_SLAB_IS_PARTIAL(sp)) {
mutex_exit(&cp->cache_lock);
break;
}
if (++sp->slab_later_count >= KMEM_DISBELIEF) {
kmem_slab_move_no(cp, sp, callback->kmm_from_buf);
} else if (!(sp->slab_flags & KMEM_SLAB_NOMOVE)) {
sp->slab_stuck_offset = KMEM_SLAB_OFFSET(sp,
callback->kmm_from_buf);
}
mutex_exit(&cp->cache_lock);
break;
case KMEM_CBRC_DONT_NEED:
cp->cache_defrag->kmd_dont_need++;
kmem_slab_free_constructed(cp, callback->kmm_from_buf, B_FALSE);
if (sp->slab_refcnt == 0)
cp->cache_defrag->kmd_slabs_freed++;
mutex_enter(&cp->cache_lock);
kmem_slab_move_yes(cp, sp, callback->kmm_from_buf);
mutex_exit(&cp->cache_lock);
break;
case KMEM_CBRC_DONT_KNOW:
cp->cache_defrag->kmd_dont_know++;
break;
default:
panic("'%s' (%p) unexpected move callback response %d\n",
cp->cache_name, (void *)cp, response);
}
kmem_slab_free_constructed(cp, callback->kmm_to_buf, B_FALSE);
kmem_move_end(cp, callback);
}
static boolean_t
kmem_move_begin(kmem_cache_t *cp, kmem_slab_t *sp, void *buf, int flags)
{
void *to_buf;
avl_index_t index;
kmem_move_t *callback, *pending;
ulong_t n;
ASSERT(taskq_member(kmem_taskq, curthread));
ASSERT(MUTEX_NOT_HELD(&cp->cache_lock));
ASSERT(sp->slab_flags & KMEM_SLAB_MOVE_PENDING);
callback = kmem_cache_alloc(kmem_move_cache, KM_NOSLEEP);
if (callback == NULL)
return (B_FALSE);
callback->kmm_from_slab = sp;
callback->kmm_from_buf = buf;
callback->kmm_flags = flags;
mutex_enter(&cp->cache_lock);
n = avl_numnodes(&cp->cache_partial_slabs);
if ((n == 0) || ((n == 1) && !(flags & KMM_DEBUG))) {
mutex_exit(&cp->cache_lock);
kmem_cache_free(kmem_move_cache, callback);
return (B_TRUE);
}
pending = avl_find(&cp->cache_defrag->kmd_moves_pending, buf, &index);
if (pending != NULL) {
if (flags & KMM_DESPERATE) {
pending->kmm_flags |= KMM_DESPERATE;
}
mutex_exit(&cp->cache_lock);
kmem_cache_free(kmem_move_cache, callback);
return (B_TRUE);
}
to_buf = kmem_slab_alloc_impl(cp, avl_first(&cp->cache_partial_slabs),
B_FALSE);
callback->kmm_to_buf = to_buf;
avl_insert(&cp->cache_defrag->kmd_moves_pending, callback, index);
mutex_exit(&cp->cache_lock);
if (taskq_dispatch(kmem_move_taskq, (task_func_t *)kmem_move_buffer,
callback, TQ_NOSLEEP) == TASKQID_INVALID) {
mutex_enter(&cp->cache_lock);
avl_remove(&cp->cache_defrag->kmd_moves_pending, callback);
mutex_exit(&cp->cache_lock);
kmem_slab_free(cp, to_buf);
kmem_cache_free(kmem_move_cache, callback);
return (B_FALSE);
}
return (B_TRUE);
}
static void
kmem_move_end(kmem_cache_t *cp, kmem_move_t *callback)
{
avl_index_t index;
ASSERT(cp->cache_defrag != NULL);
ASSERT(taskq_member(kmem_move_taskq, curthread));
ASSERT(MUTEX_NOT_HELD(&cp->cache_lock));
mutex_enter(&cp->cache_lock);
VERIFY(avl_find(&cp->cache_defrag->kmd_moves_pending,
callback->kmm_from_buf, &index) != NULL);
avl_remove(&cp->cache_defrag->kmd_moves_pending, callback);
if (avl_is_empty(&cp->cache_defrag->kmd_moves_pending)) {
list_t *deadlist = &cp->cache_defrag->kmd_deadlist;
kmem_slab_t *sp;
while ((sp = list_remove_head(deadlist)) != NULL) {
if (sp->slab_flags & KMEM_SLAB_MOVE_PENDING) {
list_insert_tail(deadlist, sp);
break;
}
cp->cache_defrag->kmd_deadcount--;
cp->cache_slab_destroy++;
mutex_exit(&cp->cache_lock);
kmem_slab_destroy(cp, sp);
mutex_enter(&cp->cache_lock);
}
}
mutex_exit(&cp->cache_lock);
kmem_cache_free(kmem_move_cache, callback);
}
static int
kmem_move_buffers(kmem_cache_t *cp, size_t max_scan, size_t max_slabs,
int flags)
{
kmem_slab_t *sp;
void *buf;
int i, j;
int s;
int b;
boolean_t success;
int refcnt;
int nomove;
ASSERT(taskq_member(kmem_taskq, curthread));
ASSERT(MUTEX_HELD(&cp->cache_lock));
ASSERT(kmem_move_cache != NULL);
ASSERT(cp->cache_move != NULL && cp->cache_defrag != NULL);
ASSERT((flags & KMM_DEBUG) ? !avl_is_empty(&cp->cache_partial_slabs) :
avl_numnodes(&cp->cache_partial_slabs) > 1);
if (kmem_move_blocked) {
return (0);
}
if (kmem_move_fulltilt) {
flags |= KMM_DESPERATE;
}
if (max_scan == 0 || (flags & KMM_DESPERATE)) {
max_scan = (size_t)-1;
}
if (max_slabs == 0 || (flags & KMM_DESPERATE)) {
max_slabs = (size_t)-1;
}
sp = avl_last(&cp->cache_partial_slabs);
ASSERT(KMEM_SLAB_IS_PARTIAL(sp));
for (i = 0, s = 0; (i < max_scan) && (s < max_slabs) && (sp != NULL) &&
((sp != avl_first(&cp->cache_partial_slabs)) ||
(flags & KMM_DEBUG));
sp = AVL_PREV(&cp->cache_partial_slabs, sp), i++) {
if (!kmem_slab_is_reclaimable(cp, sp, flags)) {
continue;
}
s++;
for (j = 0, b = 0, buf = sp->slab_base;
(j < sp->slab_chunks) && (b < sp->slab_refcnt);
buf = (((char *)buf) + cp->cache_chunksize), j++) {
if (kmem_slab_allocated(cp, sp, buf) == NULL) {
continue;
}
b++;
ASSERT(!(sp->slab_flags & KMEM_SLAB_MOVE_PENDING));
sp->slab_flags |= KMEM_SLAB_MOVE_PENDING;
refcnt = sp->slab_refcnt;
nomove = (sp->slab_flags & KMEM_SLAB_NOMOVE);
mutex_exit(&cp->cache_lock);
success = kmem_move_begin(cp, sp, buf, flags);
mutex_enter(&cp->cache_lock);
ASSERT(sp->slab_flags & KMEM_SLAB_MOVE_PENDING);
sp->slab_flags &= ~KMEM_SLAB_MOVE_PENDING;
if (sp->slab_refcnt == 0) {
list_t *deadlist =
&cp->cache_defrag->kmd_deadlist;
list_remove(deadlist, sp);
if (!avl_is_empty(
&cp->cache_defrag->kmd_moves_pending)) {
list_insert_head(deadlist, sp);
return (-1);
}
cp->cache_defrag->kmd_deadcount--;
cp->cache_slab_destroy++;
mutex_exit(&cp->cache_lock);
kmem_slab_destroy(cp, sp);
mutex_enter(&cp->cache_lock);
return (-1);
}
if (!success) {
return (-1);
}
if (sp->slab_refcnt != refcnt) {
return (-1);
}
if ((sp->slab_flags & KMEM_SLAB_NOMOVE) != nomove)
return (-1);
ASSERT(!avl_is_empty(&cp->cache_partial_slabs));
if (sp == avl_first(&cp->cache_partial_slabs)) {
goto end_scan;
}
}
}
end_scan:
return (s);
}
typedef struct kmem_move_notify_args {
kmem_cache_t *kmna_cache;
void *kmna_buf;
} kmem_move_notify_args_t;
static void
kmem_cache_move_notify_task(void *arg)
{
kmem_move_notify_args_t *args = arg;
kmem_cache_t *cp = args->kmna_cache;
void *buf = args->kmna_buf;
kmem_slab_t *sp;
ASSERT(taskq_member(kmem_taskq, curthread));
ASSERT(list_link_active(&cp->cache_link));
kmem_free(args, sizeof (kmem_move_notify_args_t));
mutex_enter(&cp->cache_lock);
sp = kmem_slab_allocated(cp, NULL, buf);
if (sp == NULL) {
mutex_exit(&cp->cache_lock);
return;
}
if (avl_numnodes(&cp->cache_partial_slabs) > 1) {
if (!(sp->slab_flags & KMEM_SLAB_NOMOVE) &&
(sp->slab_later_count == 0)) {
mutex_exit(&cp->cache_lock);
return;
}
kmem_slab_move_yes(cp, sp, buf);
ASSERT(!(sp->slab_flags & KMEM_SLAB_MOVE_PENDING));
sp->slab_flags |= KMEM_SLAB_MOVE_PENDING;
mutex_exit(&cp->cache_lock);
(void) kmem_move_begin(cp, sp, buf, KMM_NOTIFY);
mutex_enter(&cp->cache_lock);
ASSERT(sp->slab_flags & KMEM_SLAB_MOVE_PENDING);
sp->slab_flags &= ~KMEM_SLAB_MOVE_PENDING;
if (sp->slab_refcnt == 0) {
list_t *deadlist = &cp->cache_defrag->kmd_deadlist;
list_remove(deadlist, sp);
if (!avl_is_empty(
&cp->cache_defrag->kmd_moves_pending)) {
list_insert_head(deadlist, sp);
mutex_exit(&cp->cache_lock);
return;
}
cp->cache_defrag->kmd_deadcount--;
cp->cache_slab_destroy++;
mutex_exit(&cp->cache_lock);
kmem_slab_destroy(cp, sp);
return;
}
} else {
kmem_slab_move_yes(cp, sp, buf);
}
mutex_exit(&cp->cache_lock);
}
void
kmem_cache_move_notify(kmem_cache_t *cp, void *buf)
{
kmem_move_notify_args_t *args;
args = kmem_alloc(sizeof (kmem_move_notify_args_t), KM_NOSLEEP);
if (args != NULL) {
args->kmna_cache = cp;
args->kmna_buf = buf;
if (taskq_dispatch(kmem_taskq,
(task_func_t *)kmem_cache_move_notify_task, args,
TQ_NOSLEEP) == TASKQID_INVALID)
kmem_free(args, sizeof (kmem_move_notify_args_t));
}
}
static void
kmem_cache_defrag(kmem_cache_t *cp)
{
size_t n;
ASSERT(cp->cache_defrag != NULL);
mutex_enter(&cp->cache_lock);
n = avl_numnodes(&cp->cache_partial_slabs);
if (n > 1) {
cp->cache_defrag->kmd_defrags++;
(void) kmem_move_buffers(cp, n, 0, KMM_DESPERATE);
}
mutex_exit(&cp->cache_lock);
}
static boolean_t
kmem_cache_frag_threshold(kmem_cache_t *cp, uint64_t nfree)
{
return ((nfree * kmem_frag_denom) >
(cp->cache_buftotal * kmem_frag_numer));
}
static boolean_t
kmem_cache_is_fragmented(kmem_cache_t *cp, boolean_t *doreap)
{
boolean_t fragmented;
uint64_t nfree;
ASSERT(MUTEX_HELD(&cp->cache_lock));
*doreap = B_FALSE;
if (kmem_move_fulltilt) {
if (avl_numnodes(&cp->cache_partial_slabs) > 1) {
return (B_TRUE);
}
} else {
if ((cp->cache_complete_slab_count + avl_numnodes(
&cp->cache_partial_slabs)) < kmem_frag_minslabs) {
return (B_FALSE);
}
}
nfree = cp->cache_bufslab;
fragmented = ((avl_numnodes(&cp->cache_partial_slabs) > 1) &&
kmem_cache_frag_threshold(cp, nfree));
if (!fragmented) {
long reap;
mutex_enter(&cp->cache_depot_lock);
reap = MIN(cp->cache_full.ml_reaplimit, cp->cache_full.ml_min);
reap = MIN(reap, cp->cache_full.ml_total);
mutex_exit(&cp->cache_depot_lock);
nfree += ((uint64_t)reap * cp->cache_magtype->mt_magsize);
if (kmem_cache_frag_threshold(cp, nfree)) {
*doreap = B_TRUE;
}
}
return (fragmented);
}
static void
kmem_cache_scan(kmem_cache_t *cp)
{
boolean_t reap = B_FALSE;
kmem_defrag_t *kmd;
ASSERT(taskq_member(kmem_taskq, curthread));
mutex_enter(&cp->cache_lock);
kmd = cp->cache_defrag;
if (kmd->kmd_consolidate > 0) {
kmd->kmd_consolidate--;
mutex_exit(&cp->cache_lock);
kmem_cache_reap(cp);
return;
}
if (kmem_cache_is_fragmented(cp, &reap)) {
int slabs_found;
kmd->kmd_scans++;
slabs_found = kmem_move_buffers(cp, kmem_reclaim_scan_range,
kmem_reclaim_max_slabs, 0);
if (slabs_found >= 0) {
kmd->kmd_slabs_sought += kmem_reclaim_max_slabs;
kmd->kmd_slabs_found += slabs_found;
}
if (++kmd->kmd_tries >= kmem_reclaim_scan_range) {
kmd->kmd_tries = 0;
if (kmd->kmd_slabs_found == kmd->kmd_slabs_sought) {
kmem_adjust_reclaim_threshold(kmd, -1);
} else if ((kmd->kmd_slabs_found * 2) <
kmd->kmd_slabs_sought) {
kmem_adjust_reclaim_threshold(kmd, 1);
}
kmd->kmd_slabs_sought = 0;
kmd->kmd_slabs_found = 0;
}
} else {
kmem_reset_reclaim_threshold(cp->cache_defrag);
#ifdef DEBUG
if (!avl_is_empty(&cp->cache_partial_slabs)) {
uint16_t debug_rand;
(void) random_get_bytes((uint8_t *)&debug_rand, 2);
if (!kmem_move_noreap &&
((debug_rand % kmem_mtb_reap) == 0)) {
mutex_exit(&cp->cache_lock);
kmem_cache_reap(cp);
return;
} else if ((debug_rand % kmem_mtb_move) == 0) {
kmd->kmd_scans++;
(void) kmem_move_buffers(cp,
kmem_reclaim_scan_range, 1, KMM_DEBUG);
}
}
#endif
}
mutex_exit(&cp->cache_lock);
if (reap)
kmem_depot_ws_reap(cp);
}