#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/pool.h>
#include <sys/proc.h>
#include <sys/buf.h>
#include <sys/mount.h>
#include <sys/atomic.h>
#ifdef HIBERNATE
#include <sys/hibernate.h>
#endif
#include <uvm/uvm.h>
#include "drm.h"
#if NDRM > 0
extern unsigned long drmbackoff(long);
#endif
#define UVMPD_NUMDIRTYREACTS 16
struct rwlock *uvmpd_trylockowner(struct vm_page *);
void uvmpd_scan(struct uvm_constraint_range *, int, int);
int uvmpd_scan_inactive(struct uvm_constraint_range *, int);
void uvmpd_tune(void);
void uvmpd_drop(struct pglist *);
int uvmpd_dropswap(struct vm_page *);
volatile int uvm_wait_counter;
void
uvm_wait(const char *wmsg)
{
uint64_t timo = INFSLP;
#ifdef DIAGNOSTIC
if (curproc == &proc0)
panic("%s: cannot sleep for memory during boot", __func__);
#endif
if (curproc == uvm.pagedaemon_proc) {
printf("uvm_wait emergency bufbackoff\n");
if (bufbackoff(NULL, 4) >= 4)
return;
printf("pagedaemon: deadlock detected!\n");
timo = MSEC_TO_NSEC(125);
#if defined(DEBUG)
panic("pagedaemon deadlock");
#endif
}
uvm_lock_fpageq();
atomic_inc_int(&uvm_wait_counter);
wakeup(&uvm.pagedaemon);
msleep_nsec(&uvmexp.free, &uvm.fpageqlock, PVM | PNORELOCK, wmsg, timo);
atomic_dec_int(&uvm_wait_counter);
}
void
uvmpd_tune(void)
{
int val;
val = uvmexp.npages / 30;
val = max(val, (16*1024) >> PAGE_SHIFT);
if (val < uvmexp.reserve_kernel + 1)
val = uvmexp.reserve_kernel + 1;
uvmexp.freemin = val;
val = (uvmexp.freemin * 4) / 3;
if (val <= uvmexp.freemin)
val = uvmexp.freemin + 1;
uvmexp.freetarg = val;
uvmexp.wiredmax = uvmexp.npages / 3;
}
struct uvm_pmalloc nowait_pma;
void
uvm_pageout(void *arg)
{
struct uvm_constraint_range constraint;
struct uvm_pmalloc *pma;
int shortage, inactive_shortage;
uvm.pagedaemon_proc = curproc;
(void) spl0();
uvmpd_tune();
nowait_pma.pm_constraint = dma_constraint;
nowait_pma.pm_size = (16 << PAGE_SHIFT);
nowait_pma.pm_flags = 0;
for (;;) {
long size = 0;
uvm_lock_fpageq();
if (TAILQ_EMPTY(&uvm.pmr_control.allocs) &&
uvm_wait_counter == 0) {
msleep_nsec(&uvm.pagedaemon, &uvm.fpageqlock, PVM,
"pgdaemon", INFSLP);
atomic_inc_int(&uvmexp.pdwoke);
}
if ((pma = TAILQ_FIRST(&uvm.pmr_control.allocs)) != NULL) {
size = pma->pm_size >> PAGE_SHIFT;
constraint = pma->pm_constraint;
} else {
constraint = no_constraint;
}
shortage = uvmexp.freetarg - atomic_load_sint(&uvmexp.free) +
BUFPAGES_DEFICIT;
uvm_unlock_fpageq();
uvm_lock_pageq();
atomic_store_int(&uvmexp.inactarg,
(atomic_load_sint(&uvmexp.active) +
atomic_load_sint(&uvmexp.inactive)) / 3);
if (atomic_load_sint(&uvmexp.inactarg) <= uvmexp.freetarg) {
atomic_store_int(&uvmexp.inactarg, uvmexp.freetarg + 1);
}
inactive_shortage =
atomic_load_sint(&uvmexp.inactarg) -
atomic_load_sint(&uvmexp.inactive) - BUFPAGES_INACT;
uvm_unlock_pageq();
if (shortage > 0)
size += shortage;
if (size == 0)
size = 16;
shortage -= bufbackoff(&constraint, size * 2);
#if NDRM > 0
if (shortage > 0)
shortage -= drmbackoff(size * 2);
#endif
if (shortage > 0)
shortage -= uvm_pmr_cache_drain();
if (shortage < 0)
shortage = 0;
uvm_lock_pageq();
if (pma || shortage > 0 || inactive_shortage > 0)
uvmpd_scan(&constraint, shortage, inactive_shortage);
uvm_lock_fpageq();
if (atomic_load_sint(&uvmexp.free) > uvmexp.reserve_kernel ||
atomic_load_sint(&uvmexp.paging) == 0)
wakeup(&uvmexp.free);
uvm_unlock_fpageq();
uvm_unlock_pageq();
sched_pause(yield);
}
}
void
uvm_aiodone_daemon(void *arg)
{
int s, free;
struct buf *bp, *nbp;
uvm.aiodoned_proc = curproc;
KERNEL_UNLOCK();
for (;;) {
mtx_enter(&uvm.aiodoned_lock);
while ((bp = TAILQ_FIRST(&uvm.aio_done)) == NULL)
msleep_nsec(&uvm.aiodoned, &uvm.aiodoned_lock,
PVM, "aiodoned", INFSLP);
TAILQ_INIT(&uvm.aio_done);
mtx_leave(&uvm.aiodoned_lock);
KERNEL_LOCK();
free = atomic_load_sint(&uvmexp.free);
while (bp != NULL) {
if (bp->b_flags & B_PDAEMON) {
atomic_sub_int(&uvmexp.paging,
bp->b_bufsize >> PAGE_SHIFT);
}
nbp = TAILQ_NEXT(bp, b_freelist);
s = splbio();
(*bp->b_iodone)(bp);
splx(s);
bp = nbp;
sched_pause(yield);
}
KERNEL_UNLOCK();
uvm_lock_fpageq();
wakeup(free <= uvmexp.reserve_kernel ? &uvm.pagedaemon :
&uvmexp.free);
uvm_unlock_fpageq();
}
}
struct rwlock *
uvmpd_trylockowner(struct vm_page *pg)
{
struct uvm_object *uobj = pg->uobject;
struct rwlock *slock;
if (uobj != NULL) {
slock = uobj->vmobjlock;
} else {
struct vm_anon *anon = pg->uanon;
KASSERT(anon != NULL);
slock = anon->an_lock;
}
if (rw_enter(slock, RW_WRITE|RW_NOSLEEP)) {
return NULL;
}
return slock;
}
struct swapcluster {
int swc_slot;
int swc_nallocated;
int swc_nused;
struct vm_page *swc_pages[SWCLUSTPAGES];
};
void
swapcluster_init(struct swapcluster *swc)
{
swc->swc_slot = 0;
swc->swc_nused = 0;
}
int
swapcluster_allocslots(struct swapcluster *swc)
{
int slot, npages;
if (swc->swc_slot != 0)
return 0;
npages = SWCLUSTPAGES;
slot = uvm_swap_alloc(&npages, TRUE);
if (slot == 0)
return ENOMEM;
swc->swc_slot = slot;
swc->swc_nallocated = npages;
swc->swc_nused = 0;
return 0;
}
int
swapcluster_add(struct swapcluster *swc, struct vm_page *pg)
{
int slot;
struct uvm_object *uobj;
KASSERT(swc->swc_slot != 0);
KASSERT(swc->swc_nused < swc->swc_nallocated);
KASSERT((pg->pg_flags & PQ_SWAPBACKED) != 0);
slot = swc->swc_slot + swc->swc_nused;
uobj = pg->uobject;
if (uobj == NULL) {
KASSERT(rw_write_held(pg->uanon->an_lock));
pg->uanon->an_swslot = slot;
} else {
int result;
KASSERT(rw_write_held(uobj->vmobjlock));
result = uao_set_swslot(uobj, pg->offset >> PAGE_SHIFT, slot);
if (result == -1)
return ENOMEM;
}
swc->swc_pages[swc->swc_nused] = pg;
swc->swc_nused++;
return 0;
}
int
swapcluster_flush(struct swapcluster *swc)
{
int slot, nused, nallocated;
int result;
if (swc->swc_slot == 0)
return 0;
KASSERT(swc->swc_nused <= swc->swc_nallocated);
slot = swc->swc_slot;
nused = swc->swc_nused;
nallocated = swc->swc_nallocated;
if (nused < nallocated)
uvm_swap_free(slot + nused, nallocated - nused);
atomic_inc_int(&uvmexp.pdpageouts);
result = uvm_swap_put(slot, swc->swc_pages, nused, 0);
if (result != VM_PAGER_PEND) {
KASSERT(result == VM_PAGER_AGAIN);
uvm_swap_dropcluster(swc->swc_pages, nused, ENOMEM);
uvm_swap_free(slot, nused);
}
swapcluster_init(swc);
return result;
}
static inline int
swapcluster_nused(struct swapcluster *swc)
{
return swc->swc_nused;
}
int
uvmpd_dropswap(struct vm_page *pg)
{
struct vm_anon *anon = pg->uanon;
int slot, result = 0;
if ((pg->pg_flags & PQ_ANON) && anon->an_swslot) {
uvm_swap_free(anon->an_swslot, 1);
anon->an_swslot = 0;
result = 1;
} else if (pg->pg_flags & PQ_AOBJ) {
slot = uao_dropswap(pg->uobject, pg->offset >> PAGE_SHIFT);
if (slot)
result = 1;
}
return result;
}
static inline int
uvmpd_match_constraint(struct vm_page *p,
struct uvm_constraint_range *constraint)
{
paddr_t paddr;
paddr = atop(VM_PAGE_TO_PHYS(p));
if (paddr >= constraint->ucr_low && paddr < constraint->ucr_high)
return 1;
return 0;
}
struct vm_page *
uvmpd_iterator(struct pglist *pglst, struct vm_page *p, struct vm_page *iter)
{
struct vm_page *nextpg = NULL;
MUTEX_ASSERT_LOCKED(&uvm.pageqlock);
if (p == NULL)
return NULL;
do {
nextpg = TAILQ_NEXT(iter, pageq);
} while (nextpg && (nextpg->pg_flags & PQ_ITER));
if (nextpg) {
TAILQ_REMOVE(pglst, iter, pageq);
TAILQ_INSERT_AFTER(pglst, nextpg, iter, pageq);
}
return nextpg;
}
int
uvmpd_scan_inactive(struct uvm_constraint_range *constraint, int shortage)
{
struct pglist *pglst = &uvm.page_inactive;
int result, freed = 0;
struct vm_page *p, iter = { .pg_flags = PQ_ITER };
struct uvm_object *uobj;
struct vm_page *pps[SWCLUSTPAGES], **ppsp;
int npages;
struct swapcluster swc;
struct rwlock *slock;
struct vm_anon *anon;
boolean_t swap_backed;
int dirtyreacts;
swapcluster_init(&swc);
dirtyreacts = 0;
p = NULL;
TAILQ_FOREACH(p, pglst, pageq) {
if (uvmpd_match_constraint(p, constraint))
break;
}
if (p == NULL)
return 0;
TAILQ_INSERT_AFTER(pglst, p, &iter, pageq);
for (; p != NULL; p = uvmpd_iterator(pglst, p, &iter)) {
if (atomic_load_sint(&uvmexp.paging) + swapcluster_nused(&swc) >=
(shortage - freed) ||
dirtyreacts == UVMPD_NUMDIRTYREACTS) {
break;
}
atomic_inc_int(&uvmexp.pdscans);
if (atomic_load_sint(&uvmexp.paging) >= (shortage - freed) &&
!uvmpd_match_constraint(p, constraint))
continue;
anon = p->uanon;
uobj = p->uobject;
slock = uvmpd_trylockowner(p);
if (slock == NULL) {
continue;
}
if (pmap_is_referenced(p)) {
uvm_unlock_pageq();
uvm_pageactivate(p);
rw_exit(slock);
uvm_lock_pageq();
atomic_inc_int(&uvmexp.pdreact);
continue;
}
if (p->pg_flags & PG_BUSY) {
rw_exit(slock);
atomic_inc_int(&uvmexp.pdbusy);
continue;
}
if (uobj != NULL) {
atomic_inc_int(&uvmexp.pdobscan);
} else {
KASSERT(anon != NULL);
atomic_inc_int(&uvmexp.pdanscan);
}
if (p->pg_flags & PG_CLEAN) {
if (p->pg_flags & PQ_SWAPBACKED) {
atomic_inc_int(&uvmexp.swpgonly);
}
pmap_page_protect(p, PROT_NONE);
if (p->pg_flags & (PQ_ACTIVE|PQ_INACTIVE))
uvm_pagedequeue(p);
uvm_pagefree(p);
freed++;
if (anon) {
KASSERT(anon->an_swslot != 0);
anon->an_page = NULL;
}
rw_exit(slock);
continue;
}
if (atomic_load_sint(&uvmexp.paging) > (shortage - freed)) {
rw_exit(slock);
continue;
}
if ((p->pg_flags & PQ_SWAPBACKED) && uvm_swapisfull()) {
dirtyreacts++;
uvm_unlock_pageq();
uvm_pageactivate(p);
rw_exit(slock);
uvm_lock_pageq();
continue;
}
if ((p->pg_flags & PQ_SWAPBACKED) && uvm_swapisfilled())
uvmpd_dropswap(p);
swap_backed = ((p->pg_flags & PQ_SWAPBACKED) != 0);
atomic_setbits_int(&p->pg_flags, PG_BUSY);
UVM_PAGE_OWN(p, "scan_inactive");
pmap_page_protect(p, PROT_READ);
atomic_inc_int(&uvmexp.pgswapout);
if (swap_backed) {
uvmpd_dropswap(p);
if (swapcluster_allocslots(&swc)) {
atomic_clearbits_int(&p->pg_flags, PG_BUSY);
UVM_PAGE_OWN(p, NULL);
dirtyreacts++;
uvm_unlock_pageq();
uvm_pageactivate(p);
rw_exit(slock);
uvm_lock_pageq();
continue;
}
if (swapcluster_add(&swc, p)) {
atomic_clearbits_int(&p->pg_flags, PG_BUSY);
UVM_PAGE_OWN(p, NULL);
dirtyreacts++;
uvm_unlock_pageq();
uvm_pageactivate(p);
rw_exit(slock);
uvm_lock_pageq();
continue;
}
rw_exit(slock);
if (swc.swc_nused < swc.swc_nallocated)
continue;
}
atomic_inc_int(&uvmexp.pdpageouts);
if (swap_backed) {
uvm_unlock_pageq();
npages = swc.swc_nused;
result = swapcluster_flush(&swc);
} else {
ppsp = pps;
npages = nitems(pps);
result = uvm_pager_put(uobj, p, &ppsp, &npages,
PGO_ALLPAGES|PGO_PDFREECLUST, 0, 0);
rw_exit(slock);
}
uvm_lock_pageq();
if (result == VM_PAGER_PEND) {
atomic_add_int(&uvmexp.paging, npages);
atomic_inc_int(&uvmexp.pdpending);
}
}
TAILQ_REMOVE(pglst, &iter, pageq);
if (swc.swc_slot > 0) {
uvm_unlock_pageq();
npages = swc.swc_nused;
result = swapcluster_flush(&swc);
uvm_lock_pageq();
if (result == VM_PAGER_PEND) {
atomic_add_int(&uvmexp.paging, npages);
atomic_inc_int(&uvmexp.pdpending);
}
}
return freed;
}
void
uvmpd_scan(struct uvm_constraint_range *constraint, int shortage, int inactive_shortage)
{
int swap_shortage, pages_freed;
struct pglist *pglst = &uvm.page_active;
struct vm_page *p, iter = { .pg_flags = PQ_ITER };
struct rwlock *slock;
MUTEX_ASSERT_LOCKED(&uvm.pageqlock);
atomic_inc_int(&uvmexp.pdrevs);
pages_freed = uvmpd_scan_inactive(constraint, shortage);
atomic_add_int(&uvmexp.pdfreed, pages_freed);
shortage -= pages_freed;
swap_shortage = 0;
if ((shortage > 0) && uvm_swapisfilled() && !uvm_swapisfull() &&
pages_freed == 0) {
swap_shortage = shortage;
}
if ((p = TAILQ_FIRST(pglst)) == NULL)
return;
TAILQ_INSERT_AFTER(pglst, p, &iter, pageq);
for (; p != NULL && (inactive_shortage > 0 || swap_shortage > 0);
p = uvmpd_iterator(pglst, p, &iter)) {
if (p->pg_flags & PG_BUSY) {
continue;
}
if (inactive_shortage > 0 && swap_shortage == 0 &&
!uvmpd_match_constraint(p, constraint))
continue;
slock = uvmpd_trylockowner(p);
if (slock == NULL) {
continue;
}
if ((p->pg_flags & PG_BUSY) != 0) {
rw_exit(slock);
continue;
}
if (swap_shortage > 0) {
if (uvmpd_dropswap(p)) {
atomic_clearbits_int(&p->pg_flags, PG_CLEAN);
swap_shortage--;
}
}
if (inactive_shortage > 0) {
uvm_unlock_pageq();
uvm_pagedeactivate(p);
uvm_lock_pageq();
atomic_inc_int(&uvmexp.pddeact);
inactive_shortage--;
}
rw_exit(slock);
}
TAILQ_REMOVE(pglst, &iter, pageq);
}
#ifdef HIBERNATE
void
uvmpd_drop(struct pglist *pglst)
{
struct vm_page *p, *nextpg;
for (p = TAILQ_FIRST(pglst); p != NULL; p = nextpg) {
nextpg = TAILQ_NEXT(p, pageq);
if (p->pg_flags & PQ_ANON || p->uobject == NULL)
continue;
if (p->pg_flags & PG_BUSY)
continue;
if (p->pg_flags & PG_CLEAN) {
struct uvm_object * uobj = p->uobject;
rw_enter(uobj->vmobjlock, RW_WRITE);
if (p->pg_flags & PG_CLEAN) {
if (p->pg_flags & PQ_SWAPBACKED) {
atomic_inc_int(&uvmexp.swpgonly);
}
pmap_page_protect(p, PROT_NONE);
uvm_pagefree(p);
}
rw_exit(uobj->vmobjlock);
}
}
}
void
uvmpd_hibernate(void)
{
uvmpd_drop(&uvm.page_inactive);
uvmpd_drop(&uvm.page_active);
}
#endif