#include <sys/vmem_impl.h>
#include <sys/kmem.h>
#include <sys/kstat.h>
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/atomic.h>
#include <sys/bitmap.h>
#include <sys/sysmacros.h>
#include <sys/cmn_err.h>
#include <sys/debug.h>
#include <sys/panic.h>
#define VMEM_INITIAL 10
#define VMEM_SEG_INITIAL 200
#define VMEM_SEGS_PER_SPAN_CREATE 2
#define VMEM_SEGS_PER_EXACT_ALLOC 0
#define VMEM_SEGS_PER_LEFT_ALLOC 1
#define VMEM_SEGS_PER_RIGHT_ALLOC 1
#define VMEM_SEGS_PER_MIDDLE_ALLOC 2
#define VMEM_SEGS_PER_ALLOC_MAX \
(VMEM_SEGS_PER_SPAN_CREATE + VMEM_SEGS_PER_MIDDLE_ALLOC)
#define VMEM_POPULATE_RESERVE 12
#define VMEM_MINFREE (VMEM_POPULATE_RESERVE + VMEM_SEGS_PER_ALLOC_MAX)
static vmem_t vmem0[VMEM_INITIAL];
static vmem_t *vmem_populator[VMEM_INITIAL];
static uint32_t vmem_id;
static uint32_t vmem_populators;
static vmem_seg_t vmem_seg0[VMEM_SEG_INITIAL];
static vmem_seg_t *vmem_segfree;
static kmutex_t vmem_list_lock;
static kmutex_t vmem_segfree_lock;
static kmutex_t vmem_sleep_lock;
static kmutex_t vmem_nosleep_lock;
static kmutex_t vmem_pushpage_lock;
static kmutex_t vmem_panic_lock;
static vmem_t *vmem_list;
static vmem_t *vmem_metadata_arena;
static vmem_t *vmem_seg_arena;
static vmem_t *vmem_hash_arena;
static vmem_t *vmem_vmem_arena;
static long vmem_update_interval = 15;
uint32_t vmem_mtbf;
size_t vmem_seg_size = sizeof (vmem_seg_t);
static vmem_kstat_t vmem_kstat_template = {
{ "mem_inuse", KSTAT_DATA_UINT64 },
{ "mem_import", KSTAT_DATA_UINT64 },
{ "mem_total", KSTAT_DATA_UINT64 },
{ "vmem_source", KSTAT_DATA_UINT32 },
{ "alloc", KSTAT_DATA_UINT64 },
{ "free", KSTAT_DATA_UINT64 },
{ "wait", KSTAT_DATA_UINT64 },
{ "fail", KSTAT_DATA_UINT64 },
{ "lookup", KSTAT_DATA_UINT64 },
{ "search", KSTAT_DATA_UINT64 },
{ "populate_wait", KSTAT_DATA_UINT64 },
{ "populate_fail", KSTAT_DATA_UINT64 },
{ "contains", KSTAT_DATA_UINT64 },
{ "contains_search", KSTAT_DATA_UINT64 },
};
#define VMEM_INSERT(vprev, vsp, type) \
{ \
vmem_seg_t *vnext = (vprev)->vs_##type##next; \
(vsp)->vs_##type##next = (vnext); \
(vsp)->vs_##type##prev = (vprev); \
(vprev)->vs_##type##next = (vsp); \
(vnext)->vs_##type##prev = (vsp); \
}
#define VMEM_DELETE(vsp, type) \
{ \
vmem_seg_t *vprev = (vsp)->vs_##type##prev; \
vmem_seg_t *vnext = (vsp)->vs_##type##next; \
(vprev)->vs_##type##next = (vnext); \
(vnext)->vs_##type##prev = (vprev); \
}
static vmem_seg_t *
vmem_getseg_global(void)
{
vmem_seg_t *vsp;
mutex_enter(&vmem_segfree_lock);
if ((vsp = vmem_segfree) != NULL)
vmem_segfree = vsp->vs_knext;
mutex_exit(&vmem_segfree_lock);
return (vsp);
}
static void
vmem_putseg_global(vmem_seg_t *vsp)
{
mutex_enter(&vmem_segfree_lock);
vsp->vs_knext = vmem_segfree;
vmem_segfree = vsp;
mutex_exit(&vmem_segfree_lock);
}
static vmem_seg_t *
vmem_getseg(vmem_t *vmp)
{
vmem_seg_t *vsp;
ASSERT(vmp->vm_nsegfree > 0);
vsp = vmp->vm_segfree;
vmp->vm_segfree = vsp->vs_knext;
vmp->vm_nsegfree--;
return (vsp);
}
static void
vmem_putseg(vmem_t *vmp, vmem_seg_t *vsp)
{
vsp->vs_knext = vmp->vm_segfree;
vmp->vm_segfree = vsp;
vmp->vm_nsegfree++;
}
static void
vmem_freelist_insert(vmem_t *vmp, vmem_seg_t *vsp)
{
vmem_seg_t *vprev;
ASSERT(*VMEM_HASH(vmp, vsp->vs_start) != vsp);
vprev = (vmem_seg_t *)&vmp->vm_freelist[highbit(VS_SIZE(vsp)) - 1];
vsp->vs_type = VMEM_FREE;
vmp->vm_freemap |= VS_SIZE(vprev);
VMEM_INSERT(vprev, vsp, k);
cv_broadcast(&vmp->vm_cv);
}
static void
vmem_freelist_delete(vmem_t *vmp, vmem_seg_t *vsp)
{
ASSERT(*VMEM_HASH(vmp, vsp->vs_start) != vsp);
ASSERT(vsp->vs_type == VMEM_FREE);
if (vsp->vs_knext->vs_start == 0 && vsp->vs_kprev->vs_start == 0) {
ASSERT(vmp->vm_freemap & VS_SIZE(vsp->vs_kprev));
vmp->vm_freemap ^= VS_SIZE(vsp->vs_kprev);
}
VMEM_DELETE(vsp, k);
}
static void
vmem_hash_insert(vmem_t *vmp, vmem_seg_t *vsp)
{
vmem_seg_t **bucket;
vsp->vs_type = VMEM_ALLOC;
bucket = VMEM_HASH(vmp, vsp->vs_start);
vsp->vs_knext = *bucket;
*bucket = vsp;
if (vmem_seg_size == sizeof (vmem_seg_t)) {
vsp->vs_depth = (uint8_t)getpcstack(vsp->vs_stack,
VMEM_STACK_DEPTH);
vsp->vs_thread = curthread;
vsp->vs_timestamp = gethrtime();
} else {
vsp->vs_depth = 0;
}
vmp->vm_kstat.vk_alloc.value.ui64++;
vmp->vm_kstat.vk_mem_inuse.value.ui64 += VS_SIZE(vsp);
}
static vmem_seg_t *
vmem_hash_delete(vmem_t *vmp, uintptr_t addr, size_t size)
{
vmem_seg_t *vsp, **prev_vspp;
prev_vspp = VMEM_HASH(vmp, addr);
while ((vsp = *prev_vspp) != NULL) {
if (vsp->vs_start == addr) {
*prev_vspp = vsp->vs_knext;
break;
}
vmp->vm_kstat.vk_lookup.value.ui64++;
prev_vspp = &vsp->vs_knext;
}
if (vsp == NULL)
panic("vmem_hash_delete(%p, %lx, %lu): bad free",
(void *)vmp, addr, size);
if (VS_SIZE(vsp) != size)
panic("vmem_hash_delete(%p, %lx, %lu): wrong size (expect %lu)",
(void *)vmp, addr, size, VS_SIZE(vsp));
vmp->vm_kstat.vk_free.value.ui64++;
vmp->vm_kstat.vk_mem_inuse.value.ui64 -= size;
return (vsp);
}
static vmem_seg_t *
vmem_seg_create(vmem_t *vmp, vmem_seg_t *vprev, uintptr_t start, uintptr_t end)
{
vmem_seg_t *newseg = vmem_getseg(vmp);
newseg->vs_start = start;
newseg->vs_end = end;
newseg->vs_type = 0;
newseg->vs_import = 0;
VMEM_INSERT(vprev, newseg, a);
return (newseg);
}
static void
vmem_seg_destroy(vmem_t *vmp, vmem_seg_t *vsp)
{
ASSERT(vsp->vs_type != VMEM_ROTOR);
VMEM_DELETE(vsp, a);
vmem_putseg(vmp, vsp);
}
static vmem_seg_t *
vmem_span_create(vmem_t *vmp, void *vaddr, size_t size, uint8_t import)
{
vmem_seg_t *newseg, *span;
uintptr_t start = (uintptr_t)vaddr;
uintptr_t end = start + size;
ASSERT(MUTEX_HELD(&vmp->vm_lock));
if ((start | end) & (vmp->vm_quantum - 1))
panic("vmem_span_create(%p, %p, %lu): misaligned",
(void *)vmp, vaddr, size);
span = vmem_seg_create(vmp, vmp->vm_seg0.vs_aprev, start, end);
span->vs_type = VMEM_SPAN;
span->vs_import = import;
VMEM_INSERT(vmp->vm_seg0.vs_kprev, span, k);
newseg = vmem_seg_create(vmp, span, start, end);
vmem_freelist_insert(vmp, newseg);
if (import)
vmp->vm_kstat.vk_mem_import.value.ui64 += size;
vmp->vm_kstat.vk_mem_total.value.ui64 += size;
return (newseg);
}
static void
vmem_span_destroy(vmem_t *vmp, vmem_seg_t *vsp)
{
vmem_seg_t *span = vsp->vs_aprev;
size_t size = VS_SIZE(vsp);
ASSERT(MUTEX_HELD(&vmp->vm_lock));
ASSERT(span->vs_type == VMEM_SPAN);
if (span->vs_import)
vmp->vm_kstat.vk_mem_import.value.ui64 -= size;
vmp->vm_kstat.vk_mem_total.value.ui64 -= size;
VMEM_DELETE(span, k);
vmem_seg_destroy(vmp, vsp);
vmem_seg_destroy(vmp, span);
}
static vmem_seg_t *
vmem_seg_alloc(vmem_t *vmp, vmem_seg_t *vsp, uintptr_t addr, size_t size)
{
uintptr_t vs_start = vsp->vs_start;
uintptr_t vs_end = vsp->vs_end;
size_t vs_size = vs_end - vs_start;
size_t realsize = P2ROUNDUP(size, vmp->vm_quantum);
uintptr_t addr_end = addr + realsize;
ASSERT(P2PHASE(vs_start, vmp->vm_quantum) == 0);
ASSERT(P2PHASE(addr, vmp->vm_quantum) == 0);
ASSERT(vsp->vs_type == VMEM_FREE);
ASSERT(addr >= vs_start && addr_end - 1 <= vs_end - 1);
ASSERT(addr - 1 <= addr_end - 1);
if (P2SAMEHIGHBIT(vs_size, vs_size - realsize) && addr == vs_start) {
ASSERT(highbit(vs_size) == highbit(vs_size - realsize));
vsp->vs_start = addr_end;
vsp = vmem_seg_create(vmp, vsp->vs_aprev, addr, addr + size);
vmem_hash_insert(vmp, vsp);
return (vsp);
}
vmem_freelist_delete(vmp, vsp);
if (vs_end != addr_end)
vmem_freelist_insert(vmp,
vmem_seg_create(vmp, vsp, addr_end, vs_end));
if (vs_start != addr)
vmem_freelist_insert(vmp,
vmem_seg_create(vmp, vsp->vs_aprev, vs_start, addr));
vsp->vs_start = addr;
vsp->vs_end = addr + size;
vmem_hash_insert(vmp, vsp);
return (vsp);
}
int
vmem_is_populator()
{
return (mutex_owner(&vmem_sleep_lock) == curthread ||
mutex_owner(&vmem_nosleep_lock) == curthread ||
mutex_owner(&vmem_pushpage_lock) == curthread ||
mutex_owner(&vmem_panic_lock) == curthread);
}
static int
vmem_populate(vmem_t *vmp, int vmflag)
{
char *p;
vmem_seg_t *vsp;
ssize_t nseg;
size_t size;
kmutex_t *lp;
int i;
while (vmp->vm_nsegfree < VMEM_MINFREE &&
(vsp = vmem_getseg_global()) != NULL)
vmem_putseg(vmp, vsp);
if (vmp->vm_nsegfree >= VMEM_MINFREE)
return (1);
if (vmem_is_populator()) {
ASSERT(vmp->vm_cflags & VMC_POPULATOR);
return (1);
}
mutex_exit(&vmp->vm_lock);
if (panic_thread == curthread)
lp = &vmem_panic_lock;
else if (vmflag & VM_NOSLEEP)
lp = &vmem_nosleep_lock;
else if (vmflag & VM_PUSHPAGE)
lp = &vmem_pushpage_lock;
else
lp = &vmem_sleep_lock;
mutex_enter(lp);
nseg = VMEM_MINFREE + vmem_populators * VMEM_POPULATE_RESERVE;
size = P2ROUNDUP(nseg * vmem_seg_size, vmem_seg_arena->vm_quantum);
nseg = size / vmem_seg_size;
p = vmem_alloc(vmem_seg_arena, size, vmflag & VM_KMFLAGS);
if (p == NULL) {
mutex_exit(lp);
mutex_enter(&vmp->vm_lock);
vmp->vm_kstat.vk_populate_fail.value.ui64++;
return (0);
}
for (i = 0; i < vmem_populators; i++) {
mutex_enter(&vmem_populator[i]->vm_lock);
while (vmem_populator[i]->vm_nsegfree < VMEM_POPULATE_RESERVE)
vmem_putseg(vmem_populator[i],
(vmem_seg_t *)(p + --nseg * vmem_seg_size));
mutex_exit(&vmem_populator[i]->vm_lock);
}
mutex_exit(lp);
mutex_enter(&vmp->vm_lock);
ASSERT(nseg >= VMEM_MINFREE);
while (vmp->vm_nsegfree < VMEM_MINFREE)
vmem_putseg(vmp, (vmem_seg_t *)(p + --nseg * vmem_seg_size));
while (nseg > 0)
vmem_putseg_global((vmem_seg_t *)(p + --nseg * vmem_seg_size));
return (1);
}
static void
vmem_advance(vmem_t *vmp, vmem_seg_t *walker, vmem_seg_t *afterme)
{
vmem_seg_t *vprev = walker->vs_aprev;
vmem_seg_t *vnext = walker->vs_anext;
vmem_seg_t *vsp = NULL;
VMEM_DELETE(walker, a);
if (afterme != NULL)
VMEM_INSERT(afterme, walker, a);
if (vprev->vs_type == VMEM_FREE) {
if (vnext->vs_type == VMEM_FREE) {
ASSERT(vprev->vs_end == vnext->vs_start);
vmem_freelist_delete(vmp, vnext);
vmem_freelist_delete(vmp, vprev);
vprev->vs_end = vnext->vs_end;
vmem_freelist_insert(vmp, vprev);
vmem_seg_destroy(vmp, vnext);
}
vsp = vprev;
} else if (vnext->vs_type == VMEM_FREE) {
vsp = vnext;
}
if (vsp != NULL && vsp->vs_aprev->vs_import &&
vmp->vm_source_free != NULL &&
vsp->vs_aprev->vs_type == VMEM_SPAN &&
vsp->vs_anext->vs_type == VMEM_SPAN) {
void *vaddr = (void *)vsp->vs_start;
size_t size = VS_SIZE(vsp);
ASSERT(size == VS_SIZE(vsp->vs_aprev));
vmem_freelist_delete(vmp, vsp);
vmem_span_destroy(vmp, vsp);
mutex_exit(&vmp->vm_lock);
vmp->vm_source_free(vmp->vm_source, vaddr, size);
mutex_enter(&vmp->vm_lock);
}
}
static void *
vmem_nextfit_alloc(vmem_t *vmp, size_t size, int vmflag)
{
vmem_seg_t *vsp, *rotor;
uintptr_t addr;
size_t realsize = P2ROUNDUP(size, vmp->vm_quantum);
size_t vs_size;
mutex_enter(&vmp->vm_lock);
if (vmp->vm_nsegfree < VMEM_MINFREE && !vmem_populate(vmp, vmflag)) {
mutex_exit(&vmp->vm_lock);
return (NULL);
}
rotor = &vmp->vm_rotor;
vsp = rotor->vs_anext;
if (vsp->vs_type == VMEM_FREE && (vs_size = VS_SIZE(vsp)) > realsize &&
P2SAMEHIGHBIT(vs_size, vs_size - realsize)) {
ASSERT(highbit(vs_size) == highbit(vs_size - realsize));
addr = vsp->vs_start;
vsp->vs_start = addr + realsize;
vmem_hash_insert(vmp,
vmem_seg_create(vmp, rotor->vs_aprev, addr, addr + size));
mutex_exit(&vmp->vm_lock);
return ((void *)addr);
}
for (;;) {
vmp->vm_kstat.vk_search.value.ui64++;
if (vsp->vs_type == VMEM_FREE && VS_SIZE(vsp) >= size)
break;
vsp = vsp->vs_anext;
if (vsp == rotor) {
vmem_advance(vmp, rotor, rotor->vs_anext);
vsp = rotor->vs_aprev;
if (vsp->vs_type == VMEM_FREE && VS_SIZE(vsp) >= size)
break;
if (vmp->vm_source_alloc != NULL ||
(vmflag & VM_NOSLEEP)) {
mutex_exit(&vmp->vm_lock);
return (vmem_xalloc(vmp, size, vmp->vm_quantum,
0, 0, NULL, NULL, vmflag & VM_KMFLAGS));
}
vmp->vm_kstat.vk_wait.value.ui64++;
cv_wait(&vmp->vm_cv, &vmp->vm_lock);
vsp = rotor->vs_anext;
}
}
addr = vsp->vs_start;
vsp = vmem_seg_alloc(vmp, vsp, addr, size);
ASSERT(vsp->vs_type == VMEM_ALLOC &&
vsp->vs_start == addr && vsp->vs_end == addr + size);
vmem_advance(vmp, rotor, vsp);
mutex_exit(&vmp->vm_lock);
return ((void *)addr);
}
static int
vmem_canalloc(vmem_t *vmp, size_t size)
{
int hb;
int flist = 0;
ASSERT(MUTEX_HELD(&vmp->vm_lock));
if (ISP2(size))
flist = lowbit(P2ALIGN(vmp->vm_freemap, size));
else if ((hb = highbit(size)) < VMEM_FREELISTS)
flist = lowbit(P2ALIGN(vmp->vm_freemap, 1UL << hb));
return (flist);
}
void *
vmem_xalloc(vmem_t *vmp, size_t size, size_t align_arg, size_t phase,
size_t nocross, void *minaddr, void *maxaddr, int vmflag)
{
vmem_seg_t *vsp;
vmem_seg_t *vbest = NULL;
uintptr_t addr, taddr, start, end;
uintptr_t align = (align_arg != 0) ? align_arg : vmp->vm_quantum;
void *vaddr, *xvaddr = NULL;
size_t xsize;
int hb, flist, resv;
uint32_t mtbf;
if ((align | phase | nocross) & (vmp->vm_quantum - 1))
panic("vmem_xalloc(%p, %lu, %lu, %lu, %lu, %p, %p, %x): "
"parameters not vm_quantum aligned",
(void *)vmp, size, align_arg, phase, nocross,
minaddr, maxaddr, vmflag);
if (nocross != 0 &&
(align > nocross || P2ROUNDUP(phase + size, align) > nocross))
panic("vmem_xalloc(%p, %lu, %lu, %lu, %lu, %p, %p, %x): "
"overconstrained allocation",
(void *)vmp, size, align_arg, phase, nocross,
minaddr, maxaddr, vmflag);
if (phase >= align || !ISP2(align) || !ISP2(nocross))
panic("vmem_xalloc(%p, %lu, %lu, %lu, %lu, %p, %p, %x): "
"parameters inconsistent or invalid",
(void *)vmp, size, align_arg, phase, nocross,
minaddr, maxaddr, vmflag);
if ((mtbf = vmem_mtbf | vmp->vm_mtbf) != 0 && gethrtime() % mtbf == 0 &&
(vmflag & (VM_NOSLEEP | VM_PANIC)) == VM_NOSLEEP)
return (NULL);
addr = 0;
xsize = 0;
mutex_enter(&vmp->vm_lock);
for (;;) {
if (vmp->vm_nsegfree < VMEM_MINFREE &&
!vmem_populate(vmp, vmflag))
break;
do_alloc:
if (ISP2(size)) {
flist = lowbit(P2ALIGN(vmp->vm_freemap, size));
} else {
hb = highbit(size);
if ((vmp->vm_freemap >> hb) == 0 ||
hb == VMEM_FREELISTS ||
(vmflag & (VM_BESTFIT | VM_FIRSTFIT)))
hb--;
flist = lowbit(P2ALIGN(vmp->vm_freemap, 1UL << hb));
}
for (vbest = NULL, vsp = (flist == 0) ? NULL :
vmp->vm_freelist[flist - 1].vs_knext;
vsp != NULL; vsp = vsp->vs_knext) {
vmp->vm_kstat.vk_search.value.ui64++;
if (vsp->vs_start == 0) {
if (vbest != NULL)
break;
flist = lowbit(P2ALIGN(vmp->vm_freemap,
VS_SIZE(vsp)));
if (flist-- == 0)
break;
vsp = (vmem_seg_t *)&vmp->vm_freelist[flist];
ASSERT(vsp->vs_knext->vs_type == VMEM_FREE);
continue;
}
if (vsp->vs_end - 1 < (uintptr_t)minaddr)
continue;
if (vsp->vs_start > (uintptr_t)maxaddr - 1)
continue;
start = MAX(vsp->vs_start, (uintptr_t)minaddr);
end = MIN(vsp->vs_end - 1, (uintptr_t)maxaddr - 1) + 1;
taddr = P2PHASEUP(start, align, phase);
if (P2BOUNDARY(taddr, size, nocross))
taddr +=
P2ROUNDUP(P2NPHASE(taddr, nocross), align);
if ((taddr - start) + size > end - start ||
(vbest != NULL && VS_SIZE(vsp) >= VS_SIZE(vbest)))
continue;
vbest = vsp;
addr = taddr;
if (!(vmflag & VM_BESTFIT) || VS_SIZE(vbest) == size)
break;
}
if (vbest != NULL)
break;
ASSERT(xvaddr == NULL);
if (size == 0)
panic("vmem_xalloc(): size == 0");
if (vmp->vm_source_alloc != NULL && nocross == 0 &&
minaddr == NULL && maxaddr == NULL) {
size_t aneeded, asize;
size_t aquantum = MAX(vmp->vm_quantum,
vmp->vm_source->vm_quantum);
size_t aphase = phase;
if ((align > aquantum) &&
!(vmp->vm_cflags & VMC_XALIGN)) {
aphase = (P2PHASE(phase, aquantum) != 0) ?
align - vmp->vm_quantum : align - aquantum;
ASSERT(aphase >= phase);
}
aneeded = MAX(size + aphase, vmp->vm_min_import);
asize = P2ROUNDUP(aneeded, aquantum);
if (asize < size) {
if ((vmflag & VM_NOSLEEP) &&
!(vmflag & VM_PANIC)) {
mutex_exit(&vmp->vm_lock);
return (NULL);
}
panic("vmem_xalloc(): size overflow");
}
if (size == asize && !(vmp->vm_cflags & VMC_XALLOC))
resv = VMEM_SEGS_PER_SPAN_CREATE +
VMEM_SEGS_PER_EXACT_ALLOC;
else if (phase == 0 &&
align <= vmp->vm_source->vm_quantum)
resv = VMEM_SEGS_PER_SPAN_CREATE +
VMEM_SEGS_PER_LEFT_ALLOC;
else
resv = VMEM_SEGS_PER_ALLOC_MAX;
ASSERT(vmp->vm_nsegfree >= resv);
vmp->vm_nsegfree -= resv;
mutex_exit(&vmp->vm_lock);
if (vmp->vm_cflags & VMC_XALLOC) {
size_t oasize = asize;
vmem_ximport_t *vmem_ximport;
vmem_ximport = (vmem_ximport_t *)
(uintptr_t)vmp->vm_source_alloc;
vaddr = vmem_ximport(vmp->vm_source,
&asize, align, vmflag & VM_KMFLAGS);
ASSERT(asize >= oasize);
ASSERT(P2PHASE(asize,
vmp->vm_source->vm_quantum) == 0);
ASSERT(!(vmp->vm_cflags & VMC_XALIGN) ||
IS_P2ALIGNED(vaddr, align));
} else {
vaddr = vmp->vm_source_alloc(vmp->vm_source,
asize, vmflag & VM_KMFLAGS);
}
mutex_enter(&vmp->vm_lock);
vmp->vm_nsegfree += resv;
aneeded = size + align - vmp->vm_quantum;
aneeded = P2ROUNDUP(aneeded, vmp->vm_quantum);
if (vaddr != NULL) {
if (asize > aneeded &&
vmp->vm_source_free != NULL &&
vmem_canalloc(vmp, aneeded)) {
ASSERT(resv >=
VMEM_SEGS_PER_MIDDLE_ALLOC);
xvaddr = vaddr;
xsize = asize;
goto do_alloc;
}
vbest = vmem_span_create(vmp, vaddr, asize, 1);
addr = P2PHASEUP(vbest->vs_start, align, phase);
break;
} else if (vmem_canalloc(vmp, aneeded)) {
ASSERT(resv >= VMEM_SEGS_PER_MIDDLE_ALLOC);
goto do_alloc;
}
}
if (vmflag & VM_ABORT)
break;
mutex_exit(&vmp->vm_lock);
if (vmp->vm_cflags & VMC_IDENTIFIER)
kmem_reap_idspace();
else
kmem_reap();
mutex_enter(&vmp->vm_lock);
if (vmflag & VM_NOSLEEP)
break;
vmp->vm_kstat.vk_wait.value.ui64++;
cv_wait(&vmp->vm_cv, &vmp->vm_lock);
}
if (vbest != NULL) {
ASSERT(vbest->vs_type == VMEM_FREE);
ASSERT(vbest->vs_knext != vbest);
if (vmflag & VM_ENDALLOC) {
addr += ((vbest->vs_end - (addr + size)) / align) *
align;
}
(void) vmem_seg_alloc(vmp, vbest, addr, size);
mutex_exit(&vmp->vm_lock);
if (xvaddr)
vmp->vm_source_free(vmp->vm_source, xvaddr, xsize);
ASSERT(P2PHASE(addr, align) == phase);
ASSERT(!P2BOUNDARY(addr, size, nocross));
ASSERT(addr >= (uintptr_t)minaddr);
ASSERT(addr + size - 1 <= (uintptr_t)maxaddr - 1);
return ((void *)addr);
}
vmp->vm_kstat.vk_fail.value.ui64++;
mutex_exit(&vmp->vm_lock);
if (vmflag & VM_PANIC)
panic("vmem_xalloc(%p, %lu, %lu, %lu, %lu, %p, %p, %x): "
"cannot satisfy mandatory allocation",
(void *)vmp, size, align_arg, phase, nocross,
minaddr, maxaddr, vmflag);
ASSERT(xvaddr == NULL);
return (NULL);
}
void
vmem_xfree(vmem_t *vmp, void *vaddr, size_t size)
{
vmem_seg_t *vsp, *vnext, *vprev;
mutex_enter(&vmp->vm_lock);
vsp = vmem_hash_delete(vmp, (uintptr_t)vaddr, size);
vsp->vs_end = P2ROUNDUP(vsp->vs_end, vmp->vm_quantum);
vnext = vsp->vs_anext;
if (vnext->vs_type == VMEM_FREE) {
ASSERT(vsp->vs_end == vnext->vs_start);
vmem_freelist_delete(vmp, vnext);
vsp->vs_end = vnext->vs_end;
vmem_seg_destroy(vmp, vnext);
}
vprev = vsp->vs_aprev;
if (vprev->vs_type == VMEM_FREE) {
ASSERT(vprev->vs_end == vsp->vs_start);
vmem_freelist_delete(vmp, vprev);
vprev->vs_end = vsp->vs_end;
vmem_seg_destroy(vmp, vsp);
vsp = vprev;
}
if (vsp->vs_aprev->vs_import && vmp->vm_source_free != NULL &&
vsp->vs_aprev->vs_type == VMEM_SPAN &&
vsp->vs_anext->vs_type == VMEM_SPAN) {
vaddr = (void *)vsp->vs_start;
size = VS_SIZE(vsp);
ASSERT(size == VS_SIZE(vsp->vs_aprev));
vmem_span_destroy(vmp, vsp);
mutex_exit(&vmp->vm_lock);
vmp->vm_source_free(vmp->vm_source, vaddr, size);
} else {
vmem_freelist_insert(vmp, vsp);
mutex_exit(&vmp->vm_lock);
}
}
void *
vmem_alloc(vmem_t *vmp, size_t size, int vmflag)
{
vmem_seg_t *vsp;
uintptr_t addr;
int hb;
int flist = 0;
uint32_t mtbf;
if (size - 1 < vmp->vm_qcache_max)
return (kmem_cache_alloc(vmp->vm_qcache[(size - 1) >>
vmp->vm_qshift], vmflag & VM_KMFLAGS));
if ((mtbf = vmem_mtbf | vmp->vm_mtbf) != 0 && gethrtime() % mtbf == 0 &&
(vmflag & (VM_NOSLEEP | VM_PANIC)) == VM_NOSLEEP)
return (NULL);
if (vmflag & VM_NEXTFIT)
return (vmem_nextfit_alloc(vmp, size, vmflag));
if (vmflag & (VM_BESTFIT | VM_FIRSTFIT))
return (vmem_xalloc(vmp, size, vmp->vm_quantum, 0, 0,
NULL, NULL, vmflag));
mutex_enter(&vmp->vm_lock);
if (vmp->vm_nsegfree >= VMEM_MINFREE || vmem_populate(vmp, vmflag)) {
if (ISP2(size))
flist = lowbit(P2ALIGN(vmp->vm_freemap, size));
else if ((hb = highbit(size)) < VMEM_FREELISTS)
flist = lowbit(P2ALIGN(vmp->vm_freemap, 1UL << hb));
}
if (flist-- == 0) {
mutex_exit(&vmp->vm_lock);
return (vmem_xalloc(vmp, size, vmp->vm_quantum,
0, 0, NULL, NULL, vmflag));
}
ASSERT(size <= (1UL << flist));
vsp = vmp->vm_freelist[flist].vs_knext;
addr = vsp->vs_start;
if (vmflag & VM_ENDALLOC) {
addr += vsp->vs_end - (addr + size);
}
(void) vmem_seg_alloc(vmp, vsp, addr, size);
mutex_exit(&vmp->vm_lock);
return ((void *)addr);
}
void
vmem_free(vmem_t *vmp, void *vaddr, size_t size)
{
if (size - 1 < vmp->vm_qcache_max)
kmem_cache_free(vmp->vm_qcache[(size - 1) >> vmp->vm_qshift],
vaddr);
else
vmem_xfree(vmp, vaddr, size);
}
int
vmem_contains(vmem_t *vmp, void *vaddr, size_t size)
{
uintptr_t start = (uintptr_t)vaddr;
uintptr_t end = start + size;
vmem_seg_t *vsp;
vmem_seg_t *seg0 = &vmp->vm_seg0;
mutex_enter(&vmp->vm_lock);
vmp->vm_kstat.vk_contains.value.ui64++;
for (vsp = seg0->vs_knext; vsp != seg0; vsp = vsp->vs_knext) {
vmp->vm_kstat.vk_contains_search.value.ui64++;
ASSERT(vsp->vs_type == VMEM_SPAN);
if (start >= vsp->vs_start && end - 1 <= vsp->vs_end - 1)
break;
}
mutex_exit(&vmp->vm_lock);
return (vsp != seg0);
}
void *
vmem_add(vmem_t *vmp, void *vaddr, size_t size, int vmflag)
{
if (vaddr == NULL || size == 0)
panic("vmem_add(%p, %p, %lu): bad arguments",
(void *)vmp, vaddr, size);
ASSERT(!vmem_contains(vmp, vaddr, size));
mutex_enter(&vmp->vm_lock);
if (vmem_populate(vmp, vmflag))
(void) vmem_span_create(vmp, vaddr, size, 0);
else
vaddr = NULL;
mutex_exit(&vmp->vm_lock);
return (vaddr);
}
void
vmem_walk(vmem_t *vmp, int typemask,
void (*func)(void *, void *, size_t), void *arg)
{
vmem_seg_t *vsp;
vmem_seg_t *seg0 = &vmp->vm_seg0;
vmem_seg_t walker;
if (typemask & VMEM_WALKER)
return;
bzero(&walker, sizeof (walker));
walker.vs_type = VMEM_WALKER;
mutex_enter(&vmp->vm_lock);
VMEM_INSERT(seg0, &walker, a);
for (vsp = seg0->vs_anext; vsp != seg0; vsp = vsp->vs_anext) {
if (vsp->vs_type & typemask) {
void *start = (void *)vsp->vs_start;
size_t size = VS_SIZE(vsp);
if (typemask & VMEM_REENTRANT) {
vmem_advance(vmp, &walker, vsp);
mutex_exit(&vmp->vm_lock);
func(arg, start, size);
mutex_enter(&vmp->vm_lock);
vsp = &walker;
} else {
func(arg, start, size);
}
}
}
vmem_advance(vmp, &walker, NULL);
mutex_exit(&vmp->vm_lock);
}
size_t
vmem_size(vmem_t *vmp, int typemask)
{
uint64_t size = 0;
if (typemask & VMEM_ALLOC)
size += vmp->vm_kstat.vk_mem_inuse.value.ui64;
if (typemask & VMEM_FREE)
size += vmp->vm_kstat.vk_mem_total.value.ui64 -
vmp->vm_kstat.vk_mem_inuse.value.ui64;
return ((size_t)size);
}
static vmem_t *
vmem_create_common(const char *name, void *base, size_t size, size_t quantum,
void *(*afunc)(vmem_t *, size_t, int),
void (*ffunc)(vmem_t *, void *, size_t),
vmem_t *source, size_t qcache_max, int vmflag)
{
int i;
size_t nqcache;
vmem_t *vmp, *cur, **vmpp;
vmem_seg_t *vsp;
vmem_freelist_t *vfp;
uint32_t id = atomic_inc_32_nv(&vmem_id);
if (vmem_vmem_arena != NULL) {
vmp = vmem_alloc(vmem_vmem_arena, sizeof (vmem_t),
vmflag & VM_KMFLAGS);
} else {
ASSERT(id <= VMEM_INITIAL);
vmp = &vmem0[id - 1];
}
ASSERT(source == NULL || ((source->vm_cflags & VMC_IDENTIFIER) ==
(vmflag & VMC_IDENTIFIER)));
if (vmp == NULL)
return (NULL);
bzero(vmp, sizeof (vmem_t));
(void) snprintf(vmp->vm_name, VMEM_NAMELEN, "%s", name);
mutex_init(&vmp->vm_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&vmp->vm_cv, NULL, CV_DEFAULT, NULL);
vmp->vm_cflags = vmflag;
vmflag &= VM_KMFLAGS;
vmp->vm_quantum = quantum;
vmp->vm_qshift = highbit(quantum) - 1;
nqcache = MIN(qcache_max >> vmp->vm_qshift, VMEM_NQCACHE_MAX);
for (i = 0; i <= VMEM_FREELISTS; i++) {
vfp = &vmp->vm_freelist[i];
vfp->vs_end = 1UL << i;
vfp->vs_knext = (vmem_seg_t *)(vfp + 1);
vfp->vs_kprev = (vmem_seg_t *)(vfp - 1);
}
vmp->vm_freelist[0].vs_kprev = NULL;
vmp->vm_freelist[VMEM_FREELISTS].vs_knext = NULL;
vmp->vm_freelist[VMEM_FREELISTS].vs_end = 0;
vmp->vm_hash_table = vmp->vm_hash0;
vmp->vm_hash_mask = VMEM_HASH_INITIAL - 1;
vmp->vm_hash_shift = highbit(vmp->vm_hash_mask);
vsp = &vmp->vm_seg0;
vsp->vs_anext = vsp;
vsp->vs_aprev = vsp;
vsp->vs_knext = vsp;
vsp->vs_kprev = vsp;
vsp->vs_type = VMEM_SPAN;
vsp = &vmp->vm_rotor;
vsp->vs_type = VMEM_ROTOR;
VMEM_INSERT(&vmp->vm_seg0, vsp, a);
bcopy(&vmem_kstat_template, &vmp->vm_kstat, sizeof (vmem_kstat_t));
vmp->vm_id = id;
if (source != NULL)
vmp->vm_kstat.vk_source_id.value.ui32 = source->vm_id;
vmp->vm_source = source;
vmp->vm_source_alloc = afunc;
vmp->vm_source_free = ffunc;
if (vmp->vm_cflags & VMC_NO_QCACHE) {
vmp->vm_min_import =
VMEM_QCACHE_SLABSIZE(nqcache << vmp->vm_qshift);
nqcache = 0;
}
if (nqcache != 0) {
ASSERT(!(vmflag & VM_NOSLEEP));
vmp->vm_qcache_max = nqcache << vmp->vm_qshift;
for (i = 0; i < nqcache; i++) {
char buf[VMEM_NAMELEN + 21];
(void) sprintf(buf, "%s_%lu", vmp->vm_name,
(i + 1) * quantum);
vmp->vm_qcache[i] = kmem_cache_create(buf,
(i + 1) * quantum, quantum, NULL, NULL, NULL,
NULL, vmp, KMC_QCACHE | KMC_NOTOUCH);
}
}
if ((vmp->vm_ksp = kstat_create("vmem", vmp->vm_id, vmp->vm_name,
"vmem", KSTAT_TYPE_NAMED, sizeof (vmem_kstat_t) /
sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL)) != NULL) {
vmp->vm_ksp->ks_data = &vmp->vm_kstat;
kstat_install(vmp->vm_ksp);
}
mutex_enter(&vmem_list_lock);
vmpp = &vmem_list;
while ((cur = *vmpp) != NULL)
vmpp = &cur->vm_next;
*vmpp = vmp;
mutex_exit(&vmem_list_lock);
if (vmp->vm_cflags & VMC_POPULATOR) {
ASSERT(vmem_populators < VMEM_INITIAL);
vmem_populator[atomic_inc_32_nv(&vmem_populators) - 1] = vmp;
mutex_enter(&vmp->vm_lock);
(void) vmem_populate(vmp, vmflag | VM_PANIC);
mutex_exit(&vmp->vm_lock);
}
if ((base || size) && vmem_add(vmp, base, size, vmflag) == NULL) {
vmem_destroy(vmp);
return (NULL);
}
return (vmp);
}
vmem_t *
vmem_xcreate(const char *name, void *base, size_t size, size_t quantum,
vmem_ximport_t *afunc, vmem_free_t *ffunc, vmem_t *source,
size_t qcache_max, int vmflag)
{
vmem_alloc_t *af;
af = (vmem_alloc_t *)(uintptr_t)afunc;
ASSERT(!(vmflag & (VMC_POPULATOR | VMC_XALLOC)));
vmflag &= ~(VMC_POPULATOR | VMC_XALLOC);
return (vmem_create_common(name, base, size, quantum,
af, ffunc, source, qcache_max, vmflag | VMC_XALLOC));
}
vmem_t *
vmem_create(const char *name, void *base, size_t size, size_t quantum,
vmem_alloc_t *afunc, vmem_free_t *ffunc, vmem_t *source,
size_t qcache_max, int vmflag)
{
ASSERT(!(vmflag & (VMC_XALLOC | VMC_XALIGN)));
vmflag &= ~(VMC_XALLOC | VMC_XALIGN);
return (vmem_create_common(name, base, size, quantum,
afunc, ffunc, source, qcache_max, vmflag));
}
void
vmem_destroy(vmem_t *vmp)
{
vmem_t *cur, **vmpp;
vmem_seg_t *seg0 = &vmp->vm_seg0;
vmem_seg_t *vsp, *anext;
size_t leaked;
int i;
mutex_enter(&vmem_list_lock);
vmpp = &vmem_list;
while ((cur = *vmpp) != vmp)
vmpp = &cur->vm_next;
*vmpp = vmp->vm_next;
mutex_exit(&vmem_list_lock);
for (i = 0; i < VMEM_NQCACHE_MAX; i++)
if (vmp->vm_qcache[i])
kmem_cache_destroy(vmp->vm_qcache[i]);
leaked = vmem_size(vmp, VMEM_ALLOC);
if (leaked != 0)
cmn_err(CE_WARN, "vmem_destroy('%s'): leaked %lu %s",
vmp->vm_name, leaked, (vmp->vm_cflags & VMC_IDENTIFIER) ?
"identifiers" : "bytes");
if (vmp->vm_hash_table != vmp->vm_hash0)
vmem_free(vmem_hash_arena, vmp->vm_hash_table,
(vmp->vm_hash_mask + 1) * sizeof (void *));
VMEM_DELETE(&vmp->vm_rotor, a);
for (vsp = seg0->vs_anext; vsp != seg0; vsp = anext) {
anext = vsp->vs_anext;
vmem_putseg_global(vsp);
}
while (vmp->vm_nsegfree > 0)
vmem_putseg_global(vmem_getseg(vmp));
kstat_delete(vmp->vm_ksp);
mutex_destroy(&vmp->vm_lock);
cv_destroy(&vmp->vm_cv);
vmem_free(vmem_vmem_arena, vmp, sizeof (vmem_t));
}
int vmem_rescale_minshift = 3;
static void
vmem_hash_rescale(vmem_t *vmp)
{
vmem_seg_t **old_table, **new_table, *vsp;
size_t old_size, new_size, h, nseg;
nseg = (size_t)(vmp->vm_kstat.vk_alloc.value.ui64 -
vmp->vm_kstat.vk_free.value.ui64);
new_size = MAX(VMEM_HASH_INITIAL, 1 << (highbit(3 * nseg + 4) - 2));
old_size = vmp->vm_hash_mask + 1;
if ((old_size >> vmem_rescale_minshift) <= new_size &&
new_size <= (old_size << 1))
return;
new_table = vmem_alloc(vmem_hash_arena, new_size * sizeof (void *),
VM_NOSLEEP);
if (new_table == NULL)
return;
bzero(new_table, new_size * sizeof (void *));
mutex_enter(&vmp->vm_lock);
old_size = vmp->vm_hash_mask + 1;
old_table = vmp->vm_hash_table;
vmp->vm_hash_mask = new_size - 1;
vmp->vm_hash_table = new_table;
vmp->vm_hash_shift = highbit(vmp->vm_hash_mask);
for (h = 0; h < old_size; h++) {
vsp = old_table[h];
while (vsp != NULL) {
uintptr_t addr = vsp->vs_start;
vmem_seg_t *next_vsp = vsp->vs_knext;
vmem_seg_t **hash_bucket = VMEM_HASH(vmp, addr);
vsp->vs_knext = *hash_bucket;
*hash_bucket = vsp;
vsp = next_vsp;
}
}
mutex_exit(&vmp->vm_lock);
if (old_table != vmp->vm_hash0)
vmem_free(vmem_hash_arena, old_table,
old_size * sizeof (void *));
}
void
vmem_update(void *dummy)
{
vmem_t *vmp;
mutex_enter(&vmem_list_lock);
for (vmp = vmem_list; vmp != NULL; vmp = vmp->vm_next) {
cv_broadcast(&vmp->vm_cv);
vmem_hash_rescale(vmp);
}
mutex_exit(&vmem_list_lock);
(void) timeout(vmem_update, dummy, vmem_update_interval * hz);
}
void
vmem_qcache_reap(vmem_t *vmp)
{
int i;
for (i = 0; i < VMEM_NQCACHE_MAX; i++)
if (vmp->vm_qcache[i])
kmem_cache_reap_soon(vmp->vm_qcache[i]);
}
vmem_t *
vmem_init(const char *heap_name,
void *heap_start, size_t heap_size, size_t heap_quantum,
void *(*heap_alloc)(vmem_t *, size_t, int),
void (*heap_free)(vmem_t *, void *, size_t))
{
uint32_t id;
int nseg = VMEM_SEG_INITIAL;
vmem_t *heap;
while (--nseg >= 0)
vmem_putseg_global(&vmem_seg0[nseg]);
heap = vmem_create(heap_name,
heap_start, heap_size, heap_quantum,
NULL, NULL, NULL, 0,
VM_SLEEP | VMC_POPULATOR);
vmem_metadata_arena = vmem_create("vmem_metadata",
NULL, 0, heap_quantum,
vmem_alloc, vmem_free, heap, 8 * heap_quantum,
VM_SLEEP | VMC_POPULATOR | VMC_NO_QCACHE);
vmem_seg_arena = vmem_create("vmem_seg",
NULL, 0, heap_quantum,
heap_alloc, heap_free, vmem_metadata_arena, 0,
VM_SLEEP | VMC_POPULATOR);
vmem_hash_arena = vmem_create("vmem_hash",
NULL, 0, 8,
heap_alloc, heap_free, vmem_metadata_arena, 0,
VM_SLEEP);
vmem_vmem_arena = vmem_create("vmem_vmem",
vmem0, sizeof (vmem0), 1,
heap_alloc, heap_free, vmem_metadata_arena, 0,
VM_SLEEP);
for (id = 0; id < vmem_id; id++)
(void) vmem_xalloc(vmem_vmem_arena, sizeof (vmem_t),
1, 0, 0, &vmem0[id], &vmem0[id + 1],
VM_NOSLEEP | VM_BESTFIT | VM_PANIC);
return (heap);
}