#include <sys/param.h>
#include <sys/malloc.h>
#include <sys/systm.h>
#include <sys/proc.h>
#include <sys/kthread.h>
#include <uvm/uvm.h>
struct vm_map *kernel_map = NULL;
struct uvm_constraint_range no_constraint = { 0x0, (paddr_t)-1 };
static struct vm_map kernel_map_store;
void
uvm_km_init(vaddr_t base, vaddr_t start, vaddr_t end)
{
uao_init();
uvm.kernel_object = uao_create(VM_KERNEL_SPACE_SIZE, UAO_FLAG_KERNOBJ);
uvm_map_setup(&kernel_map_store, pmap_kernel(), base, end,
#ifdef KVA_GUARDPAGES
VM_MAP_PAGEABLE | VM_MAP_GUARDPAGES
#else
VM_MAP_PAGEABLE
#endif
);
if (base != start && uvm_map(&kernel_map_store, &base, start - base,
NULL, UVM_UNKNOWN_OFFSET, 0,
UVM_MAPFLAG(PROT_READ | PROT_WRITE, PROT_READ | PROT_WRITE,
MAP_INHERIT_NONE, MADV_RANDOM, UVM_FLAG_FIXED)) != 0)
panic("uvm_km_init: could not reserve space for kernel");
kernel_map = &kernel_map_store;
#ifndef __HAVE_PMAP_DIRECT
mtx_init(&uvm_km_pages.mtx, IPL_VM);
#endif
}
struct vm_map *
uvm_km_suballoc(struct vm_map *map, vaddr_t *min, vaddr_t *max, vsize_t size,
int flags, boolean_t fixed, struct vm_map *submap)
{
int mapflags = UVM_FLAG_NOMERGE | (fixed ? UVM_FLAG_FIXED : 0);
size = round_page(size);
if (uvm_map(map, min, size, NULL, UVM_UNKNOWN_OFFSET, 0,
UVM_MAPFLAG(PROT_READ | PROT_WRITE, PROT_READ | PROT_WRITE,
MAP_INHERIT_NONE, MADV_RANDOM, mapflags)) != 0) {
panic("uvm_km_suballoc: unable to allocate space in parent map");
}
*max = *min + size;
pmap_reference(vm_map_pmap(map));
if (submap == NULL) {
submap = uvm_map_create(vm_map_pmap(map), *min, *max, flags);
if (submap == NULL)
panic("uvm_km_suballoc: unable to create submap");
} else {
uvm_map_setup(submap, vm_map_pmap(map), *min, *max, flags);
}
if (uvm_map_submap(map, *min, *max, submap) != 0)
panic("uvm_km_suballoc: submap allocation failed");
return(submap);
}
void
uvm_km_pgremove(struct uvm_object *uobj, vaddr_t startva, vaddr_t endva)
{
const voff_t start = startva - vm_map_min(kernel_map);
const voff_t end = endva - vm_map_min(kernel_map);
struct vm_page *pp;
voff_t curoff;
int slot;
int swpgonlydelta = 0;
KASSERT(UVM_OBJ_IS_AOBJ(uobj));
KASSERT(rw_write_held(uobj->vmobjlock));
pmap_remove(pmap_kernel(), startva, endva);
for (curoff = start ; curoff < end ; curoff += PAGE_SIZE) {
pp = uvm_pagelookup(uobj, curoff);
if (pp && pp->pg_flags & PG_BUSY) {
uvm_pagewait(pp, uobj->vmobjlock, "km_pgrm");
rw_enter(uobj->vmobjlock, RW_WRITE);
curoff -= PAGE_SIZE;
continue;
}
slot = uao_dropswap(uobj, curoff >> PAGE_SHIFT);
if (pp != NULL) {
uvm_pagefree(pp);
} else if (slot != 0) {
swpgonlydelta++;
}
}
if (swpgonlydelta > 0) {
KASSERT(uvmexp.swpgonly >= swpgonlydelta);
atomic_add_int(&uvmexp.swpgonly, -swpgonlydelta);
}
}
void
uvm_km_pgremove_intrsafe(vaddr_t start, vaddr_t end)
{
struct vm_page *pg;
vaddr_t va;
paddr_t pa;
for (va = start; va < end; va += PAGE_SIZE) {
if (!pmap_extract(pmap_kernel(), va, &pa))
continue;
pg = PHYS_TO_VM_PAGE(pa);
if (pg == NULL)
panic("uvm_km_pgremove_intrsafe: no page");
uvm_pagefree(pg);
}
pmap_kremove(start, end - start);
}
#if defined(__HAVE_PMAP_DIRECT)
void
uvm_km_page_init(void)
{
}
void
uvm_km_page_lateinit(void)
{
}
#else
struct uvm_km_pages uvm_km_pages;
void uvm_km_createthread(void *);
void uvm_km_thread(void *);
struct uvm_km_free_page *uvm_km_doputpage(struct uvm_km_free_page *);
void
uvm_km_page_init(void)
{
int lowat_min;
int i;
int len, bulk;
vaddr_t addr;
if (!uvm_km_pages.lowat) {
uvm_km_pages.lowat = physmem / 256;
lowat_min = physmem < atop(16 * 1024 * 1024) ? 32 : 128;
if (uvm_km_pages.lowat < lowat_min)
uvm_km_pages.lowat = lowat_min;
}
if (uvm_km_pages.lowat > UVM_KM_PAGES_LOWAT_MAX)
uvm_km_pages.lowat = UVM_KM_PAGES_LOWAT_MAX;
uvm_km_pages.hiwat = 4 * uvm_km_pages.lowat;
if (uvm_km_pages.hiwat > UVM_KM_PAGES_HIWAT_MAX)
uvm_km_pages.hiwat = UVM_KM_PAGES_HIWAT_MAX;
len = 0;
bulk = uvm_km_pages.hiwat;
while (len < uvm_km_pages.hiwat && bulk > 0) {
bulk = MIN(bulk, uvm_km_pages.hiwat - len);
addr = vm_map_min(kernel_map);
if (uvm_map(kernel_map, &addr, (vsize_t)bulk << PAGE_SHIFT,
NULL, UVM_UNKNOWN_OFFSET, 0,
UVM_MAPFLAG(PROT_READ | PROT_WRITE,
PROT_READ | PROT_WRITE, MAP_INHERIT_NONE,
MADV_RANDOM, UVM_FLAG_TRYLOCK)) != 0) {
bulk /= 2;
continue;
}
for (i = len; i < len + bulk; i++, addr += PAGE_SIZE)
uvm_km_pages.page[i] = addr;
len += bulk;
}
uvm_km_pages.free = len;
for (i = len; i < UVM_KM_PAGES_HIWAT_MAX; i++)
uvm_km_pages.page[i] = 0;
if (uvm_km_pages.lowat > 512)
uvm_km_pages.lowat = 512;
}
void
uvm_km_page_lateinit(void)
{
kthread_create_deferred(uvm_km_createthread, NULL);
}
void
uvm_km_createthread(void *arg)
{
kthread_create(uvm_km_thread, NULL, &uvm_km_pages.km_proc, "kmthread");
}
void
uvm_km_thread(void *arg)
{
vaddr_t pg[16];
int i;
int allocmore = 0;
int flags;
struct uvm_km_free_page *fp = NULL;
KERNEL_UNLOCK();
for (;;) {
mtx_enter(&uvm_km_pages.mtx);
if (uvm_km_pages.free >= uvm_km_pages.lowat &&
uvm_km_pages.freelist == NULL) {
msleep_nsec(&uvm_km_pages.km_proc, &uvm_km_pages.mtx,
PVM, "kmalloc", INFSLP);
}
allocmore = uvm_km_pages.free < uvm_km_pages.lowat;
fp = uvm_km_pages.freelist;
uvm_km_pages.freelist = NULL;
uvm_km_pages.freelistlen = 0;
mtx_leave(&uvm_km_pages.mtx);
if (allocmore) {
flags = UVM_MAPFLAG(PROT_READ | PROT_WRITE,
PROT_READ | PROT_WRITE, MAP_INHERIT_NONE,
MADV_RANDOM, fp != NULL ? UVM_FLAG_TRYLOCK : 0);
memset(pg, 0, sizeof(pg));
for (i = 0; i < nitems(pg); i++) {
pg[i] = vm_map_min(kernel_map);
if (uvm_map(kernel_map, &pg[i], PAGE_SIZE,
NULL, UVM_UNKNOWN_OFFSET, 0, flags) != 0) {
pg[i] = 0;
break;
}
flags = UVM_MAPFLAG(PROT_READ | PROT_WRITE,
PROT_READ | PROT_WRITE, MAP_INHERIT_NONE,
MADV_RANDOM, UVM_FLAG_TRYLOCK);
}
mtx_enter(&uvm_km_pages.mtx);
for (i = 0; i < nitems(pg); i++) {
if (uvm_km_pages.free ==
nitems(uvm_km_pages.page))
break;
else if (pg[i] != 0)
uvm_km_pages.page[uvm_km_pages.free++]
= pg[i];
}
wakeup(&uvm_km_pages.free);
mtx_leave(&uvm_km_pages.mtx);
for (; i < nitems(pg); i++) {
if (pg[i] != 0) {
uvm_unmap(kernel_map,
pg[i], pg[i] + PAGE_SIZE);
}
}
}
while (fp) {
fp = uvm_km_doputpage(fp);
}
}
}
struct uvm_km_free_page *
uvm_km_doputpage(struct uvm_km_free_page *fp)
{
vaddr_t va = (vaddr_t)fp;
struct vm_page *pg;
int freeva = 1;
struct uvm_km_free_page *nextfp = fp->next;
pg = uvm_atopg(va);
pmap_kremove(va, PAGE_SIZE);
pmap_update(kernel_map->pmap);
mtx_enter(&uvm_km_pages.mtx);
if (uvm_km_pages.free < uvm_km_pages.hiwat) {
uvm_km_pages.page[uvm_km_pages.free++] = va;
freeva = 0;
}
mtx_leave(&uvm_km_pages.mtx);
if (freeva)
uvm_unmap(kernel_map, va, va + PAGE_SIZE);
uvm_pagefree(pg);
return (nextfp);
}
#endif
void *
km_alloc(size_t sz, const struct kmem_va_mode *kv,
const struct kmem_pa_mode *kp, const struct kmem_dyn_mode *kd)
{
struct vm_map *map;
struct vm_page *pg;
struct pglist pgl;
int mapflags = 0;
vm_prot_t prot;
paddr_t pla_align;
int pla_flags;
int pla_maxseg;
vaddr_t va, sva = 0;
KASSERT(sz == round_page(sz));
TAILQ_INIT(&pgl);
if (kp->kp_nomem || kp->kp_pageable)
goto alloc_va;
pla_flags = kd->kd_waitok ? UVM_PLA_WAITOK : UVM_PLA_NOWAIT;
pla_flags |= UVM_PLA_TRYCONTIG;
if (kp->kp_zero)
pla_flags |= UVM_PLA_ZERO;
pla_align = kp->kp_align;
#ifdef __HAVE_PMAP_DIRECT
if (pla_align < kv->kv_align)
pla_align = kv->kv_align;
#endif
pla_maxseg = kp->kp_maxseg;
if (pla_maxseg == 0)
pla_maxseg = sz / PAGE_SIZE;
if (uvm_pglistalloc(sz, kp->kp_constraint->ucr_low,
kp->kp_constraint->ucr_high, pla_align, kp->kp_boundary,
&pgl, pla_maxseg, pla_flags)) {
return (NULL);
}
#ifdef __HAVE_PMAP_DIRECT
if (kv->kv_singlepage || kp->kp_maxseg == 1) {
while ((pg = TAILQ_FIRST(&pgl)) != NULL) {
TAILQ_REMOVE(&pgl, pg, pageq);
va = pmap_map_direct(pg);
if (sva == 0)
sva = va;
}
return ((void *)sva);
}
#endif
alloc_va:
prot = PROT_READ | PROT_WRITE;
if (kp->kp_pageable) {
KASSERT(kp->kp_object);
KASSERT(!kv->kv_singlepage);
} else {
KASSERT(kp->kp_object == NULL);
}
if (kv->kv_singlepage) {
KASSERT(sz == PAGE_SIZE);
#ifdef __HAVE_PMAP_DIRECT
panic("km_alloc: DIRECT single page");
#else
mtx_enter(&uvm_km_pages.mtx);
while (uvm_km_pages.free == 0) {
if (kd->kd_waitok == 0) {
mtx_leave(&uvm_km_pages.mtx);
uvm_pglistfree(&pgl);
return NULL;
}
msleep_nsec(&uvm_km_pages.free, &uvm_km_pages.mtx,
PVM, "getpage", INFSLP);
}
va = uvm_km_pages.page[--uvm_km_pages.free];
if (uvm_km_pages.free < uvm_km_pages.lowat &&
curproc != uvm_km_pages.km_proc) {
if (kd->kd_slowdown)
*kd->kd_slowdown = 1;
wakeup(&uvm_km_pages.km_proc);
}
mtx_leave(&uvm_km_pages.mtx);
#endif
} else {
struct uvm_object *uobj = NULL;
if (kd->kd_trylock)
mapflags |= UVM_FLAG_TRYLOCK;
if (kp->kp_object)
uobj = *kp->kp_object;
try_map:
map = *kv->kv_map;
va = vm_map_min(map);
if (uvm_map(map, &va, sz, uobj, kd->kd_prefer,
kv->kv_align, UVM_MAPFLAG(prot, prot, MAP_INHERIT_NONE,
MADV_RANDOM, mapflags))) {
if (kv->kv_wait && kd->kd_waitok) {
tsleep_nsec(map, PVM, "km_allocva", INFSLP);
goto try_map;
}
uvm_pglistfree(&pgl);
return (NULL);
}
}
sva = va;
while ((pg = TAILQ_FIRST(&pgl)) != NULL) {
TAILQ_REMOVE(&pgl, pg, pageq);
if (kp->kp_pageable)
pmap_enter(pmap_kernel(), va, VM_PAGE_TO_PHYS(pg),
prot, prot | PMAP_WIRED);
else
pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg), prot);
va += PAGE_SIZE;
}
pmap_update(pmap_kernel());
return ((void *)sva);
}
void
km_free(void *v, size_t sz, const struct kmem_va_mode *kv,
const struct kmem_pa_mode *kp)
{
vaddr_t sva, eva, va;
struct vm_page *pg;
struct pglist pgl;
sva = (vaddr_t)v;
eva = sva + sz;
if (kp->kp_nomem)
goto free_va;
#ifdef __HAVE_PMAP_DIRECT
if (kv->kv_singlepage || kp->kp_maxseg == 1) {
TAILQ_INIT(&pgl);
for (va = sva; va < eva; va += PAGE_SIZE) {
pg = pmap_unmap_direct(va);
TAILQ_INSERT_TAIL(&pgl, pg, pageq);
}
uvm_pglistfree(&pgl);
return;
}
#else
if (kv->kv_singlepage) {
struct uvm_km_free_page *fp = v;
mtx_enter(&uvm_km_pages.mtx);
fp->next = uvm_km_pages.freelist;
uvm_km_pages.freelist = fp;
if (uvm_km_pages.freelistlen++ > 16)
wakeup(&uvm_km_pages.km_proc);
mtx_leave(&uvm_km_pages.mtx);
return;
}
#endif
if (kp->kp_pageable) {
pmap_remove(pmap_kernel(), sva, eva);
pmap_update(pmap_kernel());
} else {
TAILQ_INIT(&pgl);
for (va = sva; va < eva; va += PAGE_SIZE) {
paddr_t pa;
if (!pmap_extract(pmap_kernel(), va, &pa))
continue;
pg = PHYS_TO_VM_PAGE(pa);
if (pg == NULL) {
panic("km_free: unmanaged page 0x%lx", pa);
}
TAILQ_INSERT_TAIL(&pgl, pg, pageq);
}
pmap_kremove(sva, sz);
pmap_update(pmap_kernel());
uvm_pglistfree(&pgl);
}
free_va:
uvm_unmap(*kv->kv_map, sva, eva);
if (kv->kv_wait)
wakeup(*kv->kv_map);
}
const struct kmem_va_mode kv_any = {
.kv_map = &kernel_map,
};
const struct kmem_va_mode kv_intrsafe = {
.kv_map = &kmem_map,
};
const struct kmem_va_mode kv_page = {
.kv_singlepage = 1
};
const struct kmem_pa_mode kp_dirty = {
.kp_constraint = &no_constraint
};
const struct kmem_pa_mode kp_dma = {
.kp_constraint = &dma_constraint
};
const struct kmem_pa_mode kp_dma_contig = {
.kp_constraint = &dma_constraint,
.kp_maxseg = 1
};
const struct kmem_pa_mode kp_dma_zero = {
.kp_constraint = &dma_constraint,
.kp_zero = 1
};
const struct kmem_pa_mode kp_zero = {
.kp_constraint = &no_constraint,
.kp_zero = 1
};
const struct kmem_pa_mode kp_pageable = {
.kp_object = &uvm.kernel_object,
.kp_pageable = 1
};
const struct kmem_pa_mode kp_none = {
.kp_nomem = 1
};
const struct kmem_dyn_mode kd_waitok = {
.kd_waitok = 1,
.kd_prefer = UVM_UNKNOWN_OFFSET
};
const struct kmem_dyn_mode kd_nowait = {
.kd_prefer = UVM_UNKNOWN_OFFSET
};
const struct kmem_dyn_mode kd_trylock = {
.kd_trylock = 1,
.kd_prefer = UVM_UNKNOWN_OFFSET
};