#include <sys/param.h>
#include <sys/systm.h>
#include <sys/malloc.h>
#include <sys/kernel.h>
#include <sys/pool.h>
#include <sys/stdint.h>
#include <sys/atomic.h>
#include <uvm/uvm.h>
#define UAO_SWHASH_CLUSTER_SHIFT 4
#define UAO_SWHASH_CLUSTER_SIZE (1 << UAO_SWHASH_CLUSTER_SHIFT)
#define UAO_SWHASH_ELT_TAG(idx) ((idx) >> UAO_SWHASH_CLUSTER_SHIFT)
#define UAO_SWHASH_ELT_PAGESLOT_IDX(idx) \
((idx) & (UAO_SWHASH_CLUSTER_SIZE - 1))
#define UAO_SWHASH_ELT_PAGESLOT(elt, idx) \
((elt)->slots[UAO_SWHASH_ELT_PAGESLOT_IDX(idx)])
#define UAO_SWHASH_ELT_PAGEIDX_BASE(elt) \
((elt)->tag << UAO_SWHASH_CLUSTER_SHIFT)
#define UAO_SWHASH_HASH(aobj, idx) \
(&(aobj)->u_swhash[(((idx) >> UAO_SWHASH_CLUSTER_SHIFT) \
& (aobj)->u_swhashmask)])
#define UAO_SWHASH_THRESHOLD (UAO_SWHASH_CLUSTER_SIZE * 4)
#define UAO_USES_SWHASH(aobj) \
((aobj)->u_pages > UAO_SWHASH_THRESHOLD)
#define UAO_SWHASH_MAXBUCKETS 256
#define UAO_SWHASH_BUCKETS(pages) \
(min((pages) >> UAO_SWHASH_CLUSTER_SHIFT, UAO_SWHASH_MAXBUCKETS))
struct uao_swhash_elt {
LIST_ENTRY(uao_swhash_elt) list;
voff_t tag;
int count;
int slots[UAO_SWHASH_CLUSTER_SIZE];
};
LIST_HEAD(uao_swhash, uao_swhash_elt);
struct pool uao_swhash_elt_pool;
struct uvm_aobj {
struct uvm_object u_obj;
int u_pages;
int u_flags;
#define u_swslots u_swap.slot_array
#define u_swhash u_swap.slot_hash
union swslots {
int *slot_array;
struct uao_swhash *slot_hash;
} u_swap;
u_long u_swhashmask;
LIST_ENTRY(uvm_aobj) u_list;
};
struct pool uvm_aobj_pool;
static struct uao_swhash_elt *uao_find_swhash_elt(struct uvm_aobj *, int,
boolean_t, boolean_t);
static boolean_t uao_flush(struct uvm_object *, voff_t,
voff_t, int);
static void uao_free(struct uvm_aobj *);
static int uao_get(struct uvm_object *, voff_t,
vm_page_t *, int *, int, vm_prot_t,
int, int);
static boolean_t uao_pagein(struct uvm_aobj *, int, int);
static boolean_t uao_pagein_page(struct uvm_aobj *, int);
void uao_dropswap_range(struct uvm_object *, voff_t, voff_t);
void uao_shrink_flush(struct uvm_object *, int, int);
int uao_shrink_hash(struct uvm_object *, int);
int uao_shrink_array(struct uvm_object *, int);
int uao_shrink_convert(struct uvm_object *, int);
int uao_grow_hash(struct uvm_object *, int);
int uao_grow_array(struct uvm_object *, int);
int uao_grow_convert(struct uvm_object *, int);
const struct uvm_pagerops aobj_pager = {
.pgo_reference = uao_reference,
.pgo_detach = uao_detach,
.pgo_flush = uao_flush,
.pgo_get = uao_get,
};
static LIST_HEAD(aobjlist, uvm_aobj) uao_list = LIST_HEAD_INITIALIZER(uao_list);
static struct mutex uao_list_lock = MUTEX_INITIALIZER(IPL_MPFLOOR);
static struct uao_swhash_elt *
uao_find_swhash_elt(struct uvm_aobj *aobj, int pageidx, boolean_t create,
boolean_t wait)
{
struct uao_swhash *swhash;
struct uao_swhash_elt *elt;
int waitf = wait ? PR_WAITOK : PR_NOWAIT;
voff_t page_tag;
swhash = UAO_SWHASH_HASH(aobj, pageidx);
page_tag = UAO_SWHASH_ELT_TAG(pageidx);
LIST_FOREACH(elt, swhash, list) {
if (elt->tag == page_tag)
return elt;
}
if (!create)
return NULL;
elt = pool_get(&uao_swhash_elt_pool, waitf | PR_ZERO);
if (elt == NULL)
return NULL;
LIST_INSERT_HEAD(swhash, elt, list);
elt->tag = page_tag;
return elt;
}
int
uao_find_swslot(struct uvm_object *uobj, int pageidx)
{
struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
KASSERT(UVM_OBJ_IS_AOBJ(uobj));
if (aobj->u_flags & UAO_FLAG_NOSWAP)
return 0;
if (UAO_USES_SWHASH(aobj)) {
struct uao_swhash_elt *elt =
uao_find_swhash_elt(aobj, pageidx, FALSE, FALSE);
if (elt)
return UAO_SWHASH_ELT_PAGESLOT(elt, pageidx);
else
return 0;
}
return aobj->u_swslots[pageidx];
}
int
uao_set_swslot(struct uvm_object *uobj, int pageidx, int slot)
{
struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
struct uao_swhash_elt *elt;
int oldslot;
KASSERT(rw_write_held(uobj->vmobjlock) || uobj->uo_refs == 0);
KASSERT(UVM_OBJ_IS_AOBJ(uobj));
if (aobj->u_flags & UAO_FLAG_NOSWAP) {
if (slot == 0)
return 0;
printf("uao_set_swslot: uobj = %p\n", uobj);
panic("uao_set_swslot: attempt to set a slot on a NOSWAP object");
}
if (UAO_USES_SWHASH(aobj)) {
elt = uao_find_swhash_elt(aobj, pageidx, slot != 0, FALSE);
if (elt == NULL) {
return slot ? - 1 : 0;
}
oldslot = UAO_SWHASH_ELT_PAGESLOT(elt, pageidx);
UAO_SWHASH_ELT_PAGESLOT(elt, pageidx) = slot;
if (slot) {
if (oldslot == 0)
elt->count++;
} else {
if (oldslot)
elt->count--;
if (elt->count == 0) {
LIST_REMOVE(elt, list);
pool_put(&uao_swhash_elt_pool, elt);
}
}
} else {
oldslot = aobj->u_swslots[pageidx];
aobj->u_swslots[pageidx] = slot;
}
return oldslot;
}
static void
uao_free(struct uvm_aobj *aobj)
{
struct uvm_object *uobj = &aobj->u_obj;
KASSERT(UVM_OBJ_IS_AOBJ(uobj));
KASSERT(rw_write_held(uobj->vmobjlock));
uao_dropswap_range(uobj, 0, 0);
rw_exit(uobj->vmobjlock);
if (UAO_USES_SWHASH(aobj)) {
hashfree(aobj->u_swhash, UAO_SWHASH_BUCKETS(aobj->u_pages), M_UVMAOBJ);
} else {
free(aobj->u_swslots, M_UVMAOBJ, aobj->u_pages * sizeof(int));
}
uvm_obj_destroy(uobj);
pool_put(&uvm_aobj_pool, aobj);
}
#ifdef TMPFS
void
uao_shrink_flush(struct uvm_object *uobj, int startpg, int endpg)
{
KASSERT(startpg < endpg);
KASSERT(uobj->uo_refs == 1);
uao_flush(uobj, (voff_t)startpg << PAGE_SHIFT,
(voff_t)endpg << PAGE_SHIFT, PGO_FREE);
uao_dropswap_range(uobj, startpg, endpg);
}
int
uao_shrink_hash(struct uvm_object *uobj, int pages)
{
struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
struct uao_swhash *new_swhash;
struct uao_swhash_elt *elt;
unsigned long new_hashmask;
int i;
KASSERT(UAO_USES_SWHASH(aobj));
if (UAO_SWHASH_BUCKETS(aobj->u_pages) == UAO_SWHASH_BUCKETS(pages)) {
uao_shrink_flush(uobj, pages, aobj->u_pages);
aobj->u_pages = pages;
return 0;
}
new_swhash = hashinit(UAO_SWHASH_BUCKETS(pages), M_UVMAOBJ,
M_WAITOK | M_CANFAIL, &new_hashmask);
if (new_swhash == NULL)
return ENOMEM;
uao_shrink_flush(uobj, pages, aobj->u_pages);
for (i = 0; i < UAO_SWHASH_BUCKETS(aobj->u_pages); i++) {
while (LIST_EMPTY(&aobj->u_swhash[i]) == 0) {
elt = LIST_FIRST(&aobj->u_swhash[i]);
LIST_REMOVE(elt, list);
LIST_INSERT_HEAD(&new_swhash[i], elt, list);
}
}
hashfree(aobj->u_swhash, UAO_SWHASH_BUCKETS(aobj->u_pages), M_UVMAOBJ);
aobj->u_swhash = new_swhash;
aobj->u_pages = pages;
aobj->u_swhashmask = new_hashmask;
return 0;
}
int
uao_shrink_convert(struct uvm_object *uobj, int pages)
{
struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
struct uao_swhash_elt *elt;
int i, *new_swslots;
new_swslots = mallocarray(pages, sizeof(int), M_UVMAOBJ,
M_WAITOK | M_CANFAIL | M_ZERO);
if (new_swslots == NULL)
return ENOMEM;
uao_shrink_flush(uobj, pages, aobj->u_pages);
for (i = 0; i < pages; i++) {
elt = uao_find_swhash_elt(aobj, i, FALSE, FALSE);
if (elt != NULL) {
new_swslots[i] = UAO_SWHASH_ELT_PAGESLOT(elt, i);
if (new_swslots[i] != 0)
elt->count--;
if (elt->count == 0) {
LIST_REMOVE(elt, list);
pool_put(&uao_swhash_elt_pool, elt);
}
}
}
hashfree(aobj->u_swhash, UAO_SWHASH_BUCKETS(aobj->u_pages), M_UVMAOBJ);
aobj->u_swslots = new_swslots;
aobj->u_pages = pages;
return 0;
}
int
uao_shrink_array(struct uvm_object *uobj, int pages)
{
struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
int i, *new_swslots;
new_swslots = mallocarray(pages, sizeof(int), M_UVMAOBJ,
M_WAITOK | M_CANFAIL | M_ZERO);
if (new_swslots == NULL)
return ENOMEM;
uao_shrink_flush(uobj, pages, aobj->u_pages);
for (i = 0; i < pages; i++)
new_swslots[i] = aobj->u_swslots[i];
free(aobj->u_swslots, M_UVMAOBJ, aobj->u_pages * sizeof(int));
aobj->u_swslots = new_swslots;
aobj->u_pages = pages;
return 0;
}
int
uao_shrink(struct uvm_object *uobj, int pages)
{
struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
KASSERT(pages < aobj->u_pages);
if (pages > UAO_SWHASH_THRESHOLD)
return uao_shrink_hash(uobj, pages);
else if (aobj->u_pages > UAO_SWHASH_THRESHOLD)
return uao_shrink_convert(uobj, pages);
else
return uao_shrink_array(uobj, pages);
}
int
uao_grow_array(struct uvm_object *uobj, int pages)
{
struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
int i, *new_swslots;
KASSERT(aobj->u_pages <= UAO_SWHASH_THRESHOLD);
new_swslots = mallocarray(pages, sizeof(int), M_UVMAOBJ,
M_WAITOK | M_CANFAIL | M_ZERO);
if (new_swslots == NULL)
return ENOMEM;
for (i = 0; i < aobj->u_pages; i++)
new_swslots[i] = aobj->u_swslots[i];
free(aobj->u_swslots, M_UVMAOBJ, aobj->u_pages * sizeof(int));
aobj->u_swslots = new_swslots;
aobj->u_pages = pages;
return 0;
}
int
uao_grow_hash(struct uvm_object *uobj, int pages)
{
struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
struct uao_swhash *new_swhash;
struct uao_swhash_elt *elt;
unsigned long new_hashmask;
int i;
KASSERT(pages > UAO_SWHASH_THRESHOLD);
if (UAO_SWHASH_BUCKETS(aobj->u_pages) == UAO_SWHASH_BUCKETS(pages)) {
aobj->u_pages = pages;
return 0;
}
KASSERT(UAO_SWHASH_BUCKETS(aobj->u_pages) < UAO_SWHASH_BUCKETS(pages));
new_swhash = hashinit(UAO_SWHASH_BUCKETS(pages), M_UVMAOBJ,
M_WAITOK | M_CANFAIL, &new_hashmask);
if (new_swhash == NULL)
return ENOMEM;
for (i = 0; i < UAO_SWHASH_BUCKETS(aobj->u_pages); i++) {
while (LIST_EMPTY(&aobj->u_swhash[i]) == 0) {
elt = LIST_FIRST(&aobj->u_swhash[i]);
LIST_REMOVE(elt, list);
LIST_INSERT_HEAD(&new_swhash[i], elt, list);
}
}
hashfree(aobj->u_swhash, UAO_SWHASH_BUCKETS(aobj->u_pages), M_UVMAOBJ);
aobj->u_swhash = new_swhash;
aobj->u_pages = pages;
aobj->u_swhashmask = new_hashmask;
return 0;
}
int
uao_grow_convert(struct uvm_object *uobj, int pages)
{
struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
struct uao_swhash *new_swhash;
struct uao_swhash_elt *elt;
unsigned long new_hashmask;
int i, *old_swslots;
new_swhash = hashinit(UAO_SWHASH_BUCKETS(pages), M_UVMAOBJ,
M_WAITOK | M_CANFAIL, &new_hashmask);
if (new_swhash == NULL)
return ENOMEM;
old_swslots = aobj->u_swslots;
aobj->u_swhash = new_swhash;
aobj->u_swhashmask = new_hashmask;
for (i = 0; i < aobj->u_pages; i++) {
if (old_swslots[i] != 0) {
elt = uao_find_swhash_elt(aobj, i, TRUE, TRUE);
elt->count++;
UAO_SWHASH_ELT_PAGESLOT(elt, i) = old_swslots[i];
}
}
free(old_swslots, M_UVMAOBJ, aobj->u_pages * sizeof(int));
aobj->u_pages = pages;
return 0;
}
int
uao_grow(struct uvm_object *uobj, int pages)
{
struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
KASSERT(pages > aobj->u_pages);
if (pages <= UAO_SWHASH_THRESHOLD)
return uao_grow_array(uobj, pages);
else if (aobj->u_pages > UAO_SWHASH_THRESHOLD)
return uao_grow_hash(uobj, pages);
else
return uao_grow_convert(uobj, pages);
}
#endif
struct uvm_object *
uao_create(vsize_t size, int flags)
{
static struct uvm_aobj kernel_object_store;
static struct rwlock bootstrap_kernel_object_lock;
static int kobj_alloced = 0;
int pages = round_page(size) >> PAGE_SHIFT;
struct uvm_aobj *aobj;
int refs;
if (flags & UAO_FLAG_KERNOBJ) {
KASSERT(!kobj_alloced);
aobj = &kernel_object_store;
aobj->u_pages = pages;
aobj->u_flags = UAO_FLAG_NOSWAP;
refs = UVM_OBJ_KERN;
kobj_alloced = UAO_FLAG_KERNOBJ;
} else if (flags & UAO_FLAG_KERNSWAP) {
KASSERT(kobj_alloced == UAO_FLAG_KERNOBJ);
aobj = &kernel_object_store;
kobj_alloced = UAO_FLAG_KERNSWAP;
} else {
aobj = pool_get(&uvm_aobj_pool, PR_WAITOK);
aobj->u_pages = pages;
aobj->u_flags = 0;
refs = 1;
}
if (flags == 0 || (flags & (UAO_FLAG_KERNSWAP | UAO_FLAG_CANFAIL))) {
int mflags;
if (flags)
mflags = M_NOWAIT;
else
mflags = M_WAITOK;
if (UAO_USES_SWHASH(aobj)) {
aobj->u_swhash = hashinit(UAO_SWHASH_BUCKETS(pages),
M_UVMAOBJ, mflags, &aobj->u_swhashmask);
if (aobj->u_swhash == NULL) {
if (flags & UAO_FLAG_CANFAIL) {
pool_put(&uvm_aobj_pool, aobj);
return NULL;
}
panic("uao_create: hashinit swhash failed");
}
} else {
aobj->u_swslots = mallocarray(pages, sizeof(int),
M_UVMAOBJ, mflags|M_ZERO);
if (aobj->u_swslots == NULL) {
if (flags & UAO_FLAG_CANFAIL) {
pool_put(&uvm_aobj_pool, aobj);
return NULL;
}
panic("uao_create: malloc swslots failed");
}
}
if (flags & UAO_FLAG_KERNSWAP) {
aobj->u_flags &= ~UAO_FLAG_NOSWAP;
return &aobj->u_obj;
}
}
uvm_obj_init(&aobj->u_obj, &aobj_pager, refs);
if (flags & UAO_FLAG_KERNOBJ) {
rw_init(&bootstrap_kernel_object_lock, "kobjlk");
uvm_obj_setlock(&aobj->u_obj, &bootstrap_kernel_object_lock);
}
mtx_enter(&uao_list_lock);
LIST_INSERT_HEAD(&uao_list, aobj, u_list);
mtx_leave(&uao_list_lock);
return &aobj->u_obj;
}
void
uao_init(void)
{
pool_init(&uao_swhash_elt_pool, sizeof(struct uao_swhash_elt), 0,
IPL_NONE, PR_WAITOK, "uaoeltpl", NULL);
pool_init(&uvm_aobj_pool, sizeof(struct uvm_aobj), 0,
IPL_NONE, PR_WAITOK, "aobjpl", NULL);
}
void
uao_reference(struct uvm_object *uobj)
{
if (UVM_OBJ_IS_KERN_OBJECT(uobj))
return;
atomic_inc_int(&uobj->uo_refs);
}
void
uao_detach(struct uvm_object *uobj)
{
struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
struct vm_page *pg;
if (UVM_OBJ_IS_KERN_OBJECT(uobj))
return;
if (atomic_dec_int_nv(&uobj->uo_refs) > 0) {
return;
}
mtx_enter(&uao_list_lock);
LIST_REMOVE(aobj, u_list);
mtx_leave(&uao_list_lock);
rw_enter(uobj->vmobjlock, RW_WRITE);
while ((pg = RBT_ROOT(uvm_objtree, &uobj->memt)) != NULL) {
pmap_page_protect(pg, PROT_NONE);
if (pg->pg_flags & PG_BUSY) {
uvm_pagewait(pg, uobj->vmobjlock, "uao_det");
rw_enter(uobj->vmobjlock, RW_WRITE);
continue;
}
uao_dropswap(&aobj->u_obj, pg->offset >> PAGE_SHIFT);
uvm_pagefree(pg);
}
uao_free(aobj);
}
boolean_t
uao_flush(struct uvm_object *uobj, voff_t start, voff_t stop, int flags)
{
struct uvm_aobj *aobj = (struct uvm_aobj *) uobj;
struct vm_page *pg;
voff_t curoff;
KASSERT(UVM_OBJ_IS_AOBJ(uobj));
KASSERT(rw_write_held(uobj->vmobjlock));
if (flags & PGO_ALLPAGES) {
start = 0;
stop = (voff_t)aobj->u_pages << PAGE_SHIFT;
} else {
start = trunc_page(start);
stop = round_page(stop);
if (stop > ((voff_t)aobj->u_pages << PAGE_SHIFT)) {
printf("uao_flush: strange, got an out of range "
"flush (fixed)\n");
stop = (voff_t)aobj->u_pages << PAGE_SHIFT;
}
}
if ((flags & (PGO_DEACTIVATE|PGO_FREE)) == 0) {
return TRUE;
}
curoff = start;
for (;;) {
if (curoff < stop) {
pg = uvm_pagelookup(uobj, curoff);
curoff += PAGE_SIZE;
if (pg == NULL)
continue;
} else {
break;
}
if (pg->pg_flags & PG_BUSY) {
uvm_pagewait(pg, uobj->vmobjlock, "uaoflsh");
rw_enter(uobj->vmobjlock, RW_WRITE);
curoff -= PAGE_SIZE;
continue;
}
switch (flags & (PGO_CLEANIT|PGO_FREE|PGO_DEACTIVATE)) {
case PGO_CLEANIT|PGO_FREE:
case PGO_CLEANIT|PGO_DEACTIVATE:
case PGO_DEACTIVATE:
deactivate_it:
uvm_pagedeactivate(pg);
continue;
case PGO_FREE:
if (uobj->uo_refs > 1)
goto deactivate_it;
if (pg->wire_count != 0)
continue;
pmap_page_protect(pg, PROT_NONE);
uao_dropswap(uobj, pg->offset >> PAGE_SHIFT);
uvm_pagefree(pg);
continue;
default:
panic("uao_flush: weird flags");
}
}
return TRUE;
}
static int
uao_get(struct uvm_object *uobj, voff_t offset, struct vm_page **pps,
int *npagesp, int centeridx, vm_prot_t access_type, int advice, int flags)
{
struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
voff_t current_offset;
vm_page_t ptmp;
int lcv, gotpages, maxpages, swslot, rv, pageidx;
boolean_t done;
KASSERT(UVM_OBJ_IS_AOBJ(uobj));
KASSERT(rw_lock_held(uobj->vmobjlock));
KASSERT(rw_write_held(uobj->vmobjlock) ||
((flags & PGO_LOCKED) != 0 && (access_type & PROT_WRITE) == 0));
maxpages = *npagesp;
if (flags & PGO_LOCKED) {
done = TRUE;
gotpages = 0;
for (lcv = 0, current_offset = offset ; lcv < maxpages ;
lcv++, current_offset += PAGE_SIZE) {
if (pps[lcv] == PGO_DONTCARE)
continue;
ptmp = uvm_pagelookup(uobj, current_offset);
if (ptmp == NULL || (ptmp->pg_flags & PG_BUSY) != 0) {
if (lcv == centeridx) {
done = FALSE;
}
if ((flags & PGO_ALLPAGES) != 0) {
done = FALSE;
break;
}
continue;
}
pps[lcv] = ptmp;
gotpages++;
}
*npagesp = gotpages;
return done ? VM_PAGER_OK : VM_PAGER_UNLOCK;
}
for (lcv = 0, current_offset = offset ; lcv < maxpages ;
lcv++, current_offset += PAGE_SIZE) {
if (pps[lcv] != NULL ||
(lcv != centeridx && (flags & PGO_ALLPAGES) == 0))
continue;
pageidx = current_offset >> PAGE_SHIFT;
while (pps[lcv] == NULL) {
ptmp = uvm_pagelookup(uobj, current_offset);
if (ptmp == NULL) {
ptmp = uvm_pagealloc(uobj, current_offset,
NULL, 0);
if (ptmp == NULL) {
rw_exit(uobj->vmobjlock);
uvm_wait("uao_getpage");
rw_enter(uobj->vmobjlock, RW_WRITE);
continue;
}
atomic_setbits_int(&ptmp->pg_flags, PQ_AOBJ);
break;
}
if ((ptmp->pg_flags & PG_BUSY) != 0) {
uvm_pagewait(ptmp, uobj->vmobjlock, "uao_get");
rw_enter(uobj->vmobjlock, RW_WRITE);
continue;
}
atomic_setbits_int(&ptmp->pg_flags, PG_BUSY);
UVM_PAGE_OWN(ptmp, "uao_get2");
pps[lcv] = ptmp;
}
if (pps[lcv])
continue;
swslot = uao_find_swslot(uobj, pageidx);
if (swslot == 0) {
uvm_pagezero(ptmp);
} else {
rw_exit(uobj->vmobjlock);
rv = uvm_swap_get(ptmp, swslot, PGO_SYNCIO);
rw_enter(uobj->vmobjlock, RW_WRITE);
if (rv != VM_PAGER_OK) {
swslot = uao_set_swslot(&aobj->u_obj, pageidx,
SWSLOT_BAD);
uvm_swap_markbad(swslot, 1);
if (ptmp->pg_flags & PG_WANTED)
wakeup(ptmp);
atomic_clearbits_int(&ptmp->pg_flags,
PG_WANTED|PG_BUSY);
UVM_PAGE_OWN(ptmp, NULL);
uvm_pagefree(ptmp);
rw_exit(uobj->vmobjlock);
return rv;
}
}
atomic_clearbits_int(&ptmp->pg_flags, PG_FAKE);
pmap_clear_modify(ptmp);
pps[lcv] = ptmp;
}
rw_exit(uobj->vmobjlock);
return VM_PAGER_OK;
}
int
uao_dropswap(struct uvm_object *uobj, int pageidx)
{
int slot;
KASSERT(UVM_OBJ_IS_AOBJ(uobj));
slot = uao_set_swslot(uobj, pageidx, 0);
if (slot) {
uvm_swap_free(slot, 1);
}
return slot;
}
boolean_t
uao_swap_off(int startslot, int endslot)
{
struct uvm_aobj *aobj;
mtx_enter(&uao_list_lock);
if ((aobj = LIST_FIRST(&uao_list)) == NULL) {
mtx_leave(&uao_list_lock);
return FALSE;
}
uao_reference(&aobj->u_obj);
do {
struct uvm_aobj *nextaobj;
boolean_t rv;
if ((nextaobj = LIST_NEXT(aobj, u_list)) != NULL) {
uao_reference(&nextaobj->u_obj);
}
mtx_leave(&uao_list_lock);
rw_enter(aobj->u_obj.vmobjlock, RW_WRITE);
rv = uao_pagein(aobj, startslot, endslot);
rw_exit(aobj->u_obj.vmobjlock);
uao_detach(&aobj->u_obj);
if (rv) {
if (nextaobj) {
uao_detach(&nextaobj->u_obj);
}
return rv;
}
aobj = nextaobj;
mtx_enter(&uao_list_lock);
} while (aobj);
mtx_leave(&uao_list_lock);
return FALSE;
}
static boolean_t
uao_pagein(struct uvm_aobj *aobj, int startslot, int endslot)
{
boolean_t rv;
if (UAO_USES_SWHASH(aobj)) {
struct uao_swhash_elt *elt;
int bucket;
restart:
for (bucket = aobj->u_swhashmask; bucket >= 0; bucket--) {
for (elt = LIST_FIRST(&aobj->u_swhash[bucket]);
elt != NULL;
elt = LIST_NEXT(elt, list)) {
int i;
for (i = 0; i < UAO_SWHASH_CLUSTER_SIZE; i++) {
int slot = elt->slots[i];
if (slot < startslot ||
slot >= endslot) {
continue;
}
rv = uao_pagein_page(aobj,
UAO_SWHASH_ELT_PAGEIDX_BASE(elt) + i);
if (rv) {
return rv;
}
goto restart;
}
}
}
} else {
int i;
for (i = 0; i < aobj->u_pages; i++) {
int slot = aobj->u_swslots[i];
if (slot < startslot || slot >= endslot) {
continue;
}
rv = uao_pagein_page(aobj, i);
if (rv) {
return rv;
}
}
}
return FALSE;
}
static boolean_t
uao_pagein_page(struct uvm_aobj *aobj, int pageidx)
{
struct uvm_object *uobj = &aobj->u_obj;
struct vm_page *pg;
int rv, npages;
pg = NULL;
npages = 1;
KASSERT(rw_write_held(uobj->vmobjlock));
rv = uao_get(&aobj->u_obj, (voff_t)pageidx << PAGE_SHIFT,
&pg, &npages, 0, PROT_READ | PROT_WRITE, 0, 0);
rw_enter(uobj->vmobjlock, RW_WRITE);
switch (rv) {
case VM_PAGER_OK:
break;
case VM_PAGER_ERROR:
case VM_PAGER_REFAULT:
return FALSE;
}
uao_dropswap(&aobj->u_obj, pageidx);
atomic_clearbits_int(&pg->pg_flags, PG_BUSY|PG_CLEAN|PG_FAKE);
UVM_PAGE_OWN(pg, NULL);
uvm_pagedeactivate(pg);
return FALSE;
}
void
uao_dropswap_range(struct uvm_object *uobj, voff_t start, voff_t end)
{
struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
int swpgonlydelta = 0;
KASSERT(UVM_OBJ_IS_AOBJ(uobj));
KASSERT(rw_write_held(uobj->vmobjlock));
if (end == 0) {
end = INT64_MAX;
}
if (UAO_USES_SWHASH(aobj)) {
int i, hashbuckets = aobj->u_swhashmask + 1;
voff_t taghi;
voff_t taglo;
taglo = UAO_SWHASH_ELT_TAG(start);
taghi = UAO_SWHASH_ELT_TAG(end);
for (i = 0; i < hashbuckets; i++) {
struct uao_swhash_elt *elt, *next;
for (elt = LIST_FIRST(&aobj->u_swhash[i]);
elt != NULL;
elt = next) {
int startidx, endidx;
int j;
next = LIST_NEXT(elt, list);
if (elt->tag < taglo || taghi < elt->tag) {
continue;
}
if (elt->tag == taglo) {
startidx =
UAO_SWHASH_ELT_PAGESLOT_IDX(start);
} else {
startidx = 0;
}
if (elt->tag == taghi) {
endidx =
UAO_SWHASH_ELT_PAGESLOT_IDX(end);
} else {
endidx = UAO_SWHASH_CLUSTER_SIZE;
}
for (j = startidx; j < endidx; j++) {
int slot = elt->slots[j];
KASSERT(uvm_pagelookup(&aobj->u_obj,
(voff_t)(UAO_SWHASH_ELT_PAGEIDX_BASE(elt)
+ j) << PAGE_SHIFT) == NULL);
if (slot > 0) {
uvm_swap_free(slot, 1);
swpgonlydelta++;
KASSERT(elt->count > 0);
elt->slots[j] = 0;
elt->count--;
}
}
if (elt->count == 0) {
LIST_REMOVE(elt, list);
pool_put(&uao_swhash_elt_pool, elt);
}
}
}
} else {
int i;
if (aobj->u_pages < end) {
end = aobj->u_pages;
}
for (i = start; i < end; i++) {
int slot = aobj->u_swslots[i];
if (slot > 0) {
uvm_swap_free(slot, 1);
swpgonlydelta++;
}
}
}
if (swpgonlydelta > 0) {
KASSERT(atomic_load_sint(&uvmexp.swpgonly) >= swpgonlydelta);
atomic_add_int(&uvmexp.swpgonly, -swpgonlydelta);
}
}