#include <sys/param.h>
#include <sys/systm.h>
#include <sys/malloc.h>
#include <sys/pool.h>
#include <sys/kernel.h>
#include <sys/atomic.h>
#include <uvm/uvm.h>
#include <uvm/uvm_swap.h>
struct pool uvm_anon_pool;
void
uvm_anon_init(void)
{
pool_init(&uvm_anon_pool, sizeof(struct vm_anon), 0, IPL_MPFLOOR,
PR_WAITOK, "anonpl", NULL);
pool_sethiwat(&uvm_anon_pool, atomic_load_sint(&uvmexp.free) / 16);
}
void
uvm_anon_init_percpu(void)
{
#ifdef MULTIPROCESSOR
pool_cache_init(&uvm_anon_pool);
#endif
}
struct vm_anon *
uvm_analloc(void)
{
struct vm_anon *anon;
anon = pool_get(&uvm_anon_pool, PR_NOWAIT);
if (anon) {
anon->an_lock = NULL;
anon->an_ref = 1;
anon->an_page = NULL;
anon->an_swslot = 0;
}
return anon;
}
void
uvm_anfree(struct vm_anon *anon)
{
struct vm_page *pg = anon->an_page;
KASSERT(anon->an_lock == NULL || rw_write_held(anon->an_lock));
KASSERT(anon->an_ref == 0);
if (pg != NULL) {
KASSERT(anon->an_lock != NULL);
if ((pg->pg_flags & PG_BUSY) != 0) {
atomic_setbits_int(&pg->pg_flags, PG_RELEASED);
rw_obj_hold(anon->an_lock);
return;
}
pmap_page_protect(pg, PROT_NONE);
uvm_pagefree(pg);
} else {
if (anon->an_swslot != 0 && anon->an_swslot != SWSLOT_BAD) {
KASSERT(atomic_load_sint(&uvmexp.swpgonly) > 0);
atomic_dec_int(&uvmexp.swpgonly);
}
}
anon->an_lock = NULL;
uvm_anon_dropswap(anon);
KASSERT(anon->an_page == NULL);
KASSERT(anon->an_swslot == 0);
pool_put(&uvm_anon_pool, anon);
}
void
uvm_anwait(void)
{
struct vm_anon *anon;
anon = pool_get(&uvm_anon_pool, PR_WAITOK);
pool_put(&uvm_anon_pool, anon);
}
boolean_t
uvm_anon_pagein(struct vm_amap *amap, struct vm_anon *anon)
{
struct vm_page *pg;
int rv;
KASSERT(rw_write_held(anon->an_lock));
KASSERT(anon->an_lock == amap->am_lock);
rv = uvmfault_anonget(NULL, amap, anon);
switch (rv) {
case 0:
KASSERT(rw_write_held(anon->an_lock));
break;
case EACCES:
case ERESTART:
return FALSE;
case ENOLCK:
default:
#ifdef DIAGNOSTIC
panic("anon_pagein: uvmfault_anonget -> %d", rv);
#else
return FALSE;
#endif
}
pg = anon->an_page;
if (anon->an_swslot > 0) {
uvm_swap_free(anon->an_swslot, 1);
}
anon->an_swslot = 0;
atomic_clearbits_int(&pg->pg_flags, PG_CLEAN);
uvm_pagedeactivate(pg);
rw_exit(anon->an_lock);
return FALSE;
}
void
uvm_anon_dropswap(struct vm_anon *anon)
{
KASSERT(anon->an_ref == 0 || rw_lock_held(anon->an_lock));
if (anon->an_swslot == 0)
return;
uvm_swap_free(anon->an_swslot, 1);
anon->an_swslot = 0;
}
void
uvm_anon_release(struct vm_anon *anon)
{
struct vm_page *pg = anon->an_page;
struct rwlock *lock;
KASSERT(rw_write_held(anon->an_lock));
KASSERT(pg != NULL);
KASSERT((pg->pg_flags & PG_RELEASED) != 0);
KASSERT((pg->pg_flags & PG_BUSY) != 0);
KASSERT(pg->uobject == NULL);
KASSERT(pg->uanon == anon);
KASSERT(anon->an_ref == 0);
pmap_page_protect(pg, PROT_NONE);
uvm_pagefree(pg);
KASSERT(anon->an_page == NULL);
lock = anon->an_lock;
uvm_anon_dropswap(anon);
pool_put(&uvm_anon_pool, anon);
rw_exit(lock);
rw_obj_free(lock);
}