#include <sys/param.h>
#include <sys/systm.h>
#include <sys/proc.h>
#include <sys/buf.h>
#include <sys/vnode.h>
#include <sys/mount.h>
#include <sys/malloc.h>
#include <sys/pool.h>
#include <sys/specdev.h>
#include <sys/tracepoint.h>
#include <uvm/uvm_extern.h>
struct uvm_constraint_range high_constraint;
int fliphigh;
int nobuffers;
int needbuffer;
void bufcache_init(void);
void bufcache_adjust(void);
struct buf *bufcache_gethighcleanbuf(void);
struct pool bufpool;
struct bufhead bufhead = LIST_HEAD_INITIALIZER(bufhead);
void buf_put(struct buf *);
struct buf *bio_doread(struct vnode *, daddr_t, int, int);
struct buf *buf_get(struct vnode *, daddr_t, size_t);
void bread_cluster_callback(struct buf *);
int64_t bufcache_recover_dmapages(int discard, int64_t howmany);
static struct buf *incore_locked(struct vnode *vp, daddr_t blkno);
struct bcachestats bcstats;
long lodirtypages;
long hidirtypages;
long targetpages;
long buflowpages;
long bufhighpages;
long bufbackpages;
vsize_t bufkvm;
struct proc *cleanerproc;
int bd_req;
#define NUM_CACHES 2
#define DMA_CACHE 0
struct bufcache cleancache[NUM_CACHES];
struct bufqueue dirtyqueue;
void
buf_put(struct buf *bp)
{
splassert(IPL_BIO);
#ifdef DIAGNOSTIC
if (bp->b_pobj != NULL)
KASSERT(bp->b_bufsize > 0);
if (ISSET(bp->b_flags, B_DELWRI))
panic("buf_put: releasing dirty buffer");
if (bp->b_freelist.tqe_next != NOLIST &&
bp->b_freelist.tqe_next != (void *)-1)
panic("buf_put: still on the free list");
if (bp->b_vnbufs.le_next != NOLIST &&
bp->b_vnbufs.le_next != (void *)-1)
panic("buf_put: still on the vnode list");
#endif
LIST_REMOVE(bp, b_list);
bcstats.numbufs--;
if (buf_dealloc_mem(bp) != 0)
return;
pool_put(&bufpool, bp);
}
void
bufinit(void)
{
u_int64_t dmapages;
u_int64_t highpages;
dmapages = uvm_pagecount(&dma_constraint);
dmapages -= (atop(physmem) - atop(atomic_load_sint(&uvmexp.free)));
high_constraint.ucr_low = dma_constraint.ucr_high;
high_constraint.ucr_high = no_constraint.ucr_high;
if (high_constraint.ucr_low != high_constraint.ucr_high)
high_constraint.ucr_low++;
highpages = uvm_pagecount(&high_constraint);
if (highpages > dmapages / 4)
fliphigh = 1;
else
fliphigh = 0;
if (bufcachepercent == 0)
bufcachepercent = 10;
KASSERT(bufcachepercent <= 90);
KASSERT(bufcachepercent >= 5);
if (bufpages == 0)
bufpages = dmapages * bufcachepercent / 100;
if (bufpages < BCACHE_MIN)
bufpages = BCACHE_MIN;
KASSERT(bufpages < dmapages);
bufhighpages = bufpages;
buflowpages = dmapages * 5 / 100;
if (buflowpages < BCACHE_MIN)
buflowpages = BCACHE_MIN;
bufbackpages = buflowpages * 10 / 100;
if (bufbackpages > 100)
bufbackpages = 100;
if (bufkvm == 0)
bufkvm = VM_KERNEL_SPACE_SIZE / 10;
if (bufkvm > bufpages * PAGE_SIZE)
bufkvm = bufpages * PAGE_SIZE;
bufkvm &= ~(MAXPHYS - 1);
pool_init(&bufpool, sizeof(struct buf), 0, IPL_BIO, 0, "bufpl", NULL);
bufcache_init();
buf_mem_init(bufkvm);
hidirtypages = (buflowpages / 4) * 3;
lodirtypages = buflowpages / 2;
targetpages = bufpages - RESERVE_PAGES;
}
void
bufadjust(int newbufpages)
{
int s;
int64_t npages;
if (newbufpages < buflowpages)
newbufpages = buflowpages;
s = splbio();
bufpages = newbufpages;
targetpages = bufpages - RESERVE_PAGES;
npages = bcstats.dmapages - targetpages;
if (bcstats.dmapages > targetpages)
(void) bufcache_recover_dmapages(0, bcstats.dmapages - targetpages);
bufcache_adjust();
if ((UNCLEAN_PAGES >= hidirtypages) ||
bcstats.kvaslots_avail <= 2 * RESERVE_SLOTS)
wakeup(&bd_req);
splx(s);
}
unsigned long
bufbackoff(struct uvm_constraint_range *range, long size)
{
long pdelta, oldbufpages;
int64_t dmarecovered, recovered = 0;
if (range != NULL && range->ucr_high > dma_constraint.ucr_high) {
struct buf *bp;
int64_t start;
int s;
start = bcstats.numbufpages;
s = splbio();
while (recovered < size && (bp = bufcache_gethighcleanbuf())) {
bufcache_take(bp);
if (bp->b_vp) {
RBT_REMOVE(buf_rb_bufs,
&bp->b_vp->v_bufs_tree, bp);
brelvp(bp);
}
buf_put(bp);
recovered = start - bcstats.numbufpages;
}
bufcache_adjust();
splx(s);
if (recovered >= size)
return recovered;
if (range->ucr_low > dma_constraint.ucr_high)
return recovered;
size -= recovered;
}
pdelta = (size > bufbackpages) ? size : bufbackpages;
if (bufpages <= buflowpages)
return recovered;
if (bufpages - pdelta < buflowpages)
pdelta = bufpages - buflowpages;
oldbufpages = bufpages;
bufadjust(bufpages - pdelta);
dmarecovered = oldbufpages - bufpages;
return recovered + dmarecovered;
}
int
buf_flip_high(struct buf *bp)
{
int ret = -1;
KASSERT(ISSET(bp->b_flags, B_BC));
KASSERT(ISSET(bp->b_flags, B_DMA));
KASSERT(bp->cache == DMA_CACHE);
KASSERT(fliphigh);
splassert(IPL_BIO);
if (buf_realloc_pages(bp, &high_constraint, UVM_PLA_NOWAIT) == 0) {
KASSERT(!ISSET(bp->b_flags, B_DMA));
bcstats.highflips++;
ret = 0;
} else
bcstats.highflops++;
return ret;
}
void
buf_flip_dma(struct buf *bp)
{
KASSERT(ISSET(bp->b_flags, B_BC));
KASSERT(ISSET(bp->b_flags, B_BUSY));
KASSERT(bp->cache < NUM_CACHES);
splassert(IPL_BIO);
if (!ISSET(bp->b_flags, B_DMA)) {
(void) buf_realloc_pages(bp, &dma_constraint, UVM_PLA_WAITOK);
KASSERT(ISSET(bp->b_flags, B_DMA));
bcstats.dmaflips++;
}
if (bp->cache > DMA_CACHE) {
CLR(bp->b_flags, B_COLD);
CLR(bp->b_flags, B_WARM);
bp->cache = DMA_CACHE;
}
}
struct buf *
bio_doread(struct vnode *vp, daddr_t blkno, int size, int async)
{
struct buf *bp;
struct mount *mp;
bp = getblk(vp, blkno, size, 0, INFSLP);
if (!ISSET(bp->b_flags, (B_DONE | B_DELWRI))) {
SET(bp->b_flags, B_READ | async);
bcstats.pendingreads++;
bcstats.numreads++;
VOP_STRATEGY(bp->b_vp, bp);
curproc->p_ru.ru_inblock++;
} else if (async) {
brelse(bp);
}
mp = vp->v_type == VBLK ? vp->v_specmountpoint : vp->v_mount;
if (mp != NULL) {
if (async == 0)
mp->mnt_stat.f_syncreads++;
else
mp->mnt_stat.f_asyncreads++;
}
return (bp);
}
int
bread(struct vnode *vp, daddr_t blkno, int size, struct buf **bpp)
{
struct buf *bp;
bp = *bpp = bio_doread(vp, blkno, size, 0);
return (biowait(bp));
}
int
breadn(struct vnode *vp, daddr_t blkno, int size, daddr_t rablks[],
int rasizes[], int nrablks, struct buf **bpp)
{
struct buf *bp;
int i;
bp = *bpp = bio_doread(vp, blkno, size, 0);
for (i = 0; i < nrablks; i++) {
if (incore(vp, rablks[i]))
continue;
(void) bio_doread(vp, rablks[i], rasizes[i], B_ASYNC);
}
return (biowait(bp));
}
void
bread_cluster_callback(struct buf *bp)
{
struct buf **xbpp = bp->b_saveaddr;
int i;
if (xbpp[1] != NULL) {
size_t newsize = xbpp[1]->b_bufsize;
buf_fix_mapping(bp, newsize);
bp->b_bcount = newsize;
}
if (bp->b_resid > 0) {
for (i = 1; xbpp[i] != NULL; i++)
continue;
for (i = i - 1; i != 0; i--) {
if (xbpp[i]->b_bufsize <= bp->b_resid) {
bp->b_resid -= xbpp[i]->b_bufsize;
SET(xbpp[i]->b_flags, B_INVAL);
} else if (bp->b_resid > 0) {
bp->b_resid = 0;
SET(xbpp[i]->b_flags, B_INVAL);
} else
break;
}
}
for (i = 1; xbpp[i] != NULL; i++) {
if (ISSET(bp->b_flags, B_ERROR))
SET(xbpp[i]->b_flags, B_INVAL | B_ERROR);
struct uvm_object *newobj = &xbpp[i]->b_uobj;
struct uvm_object *oldobj = &bp->b_uobj;
int page;
uvm_obj_init(newobj, &bufcache_pager, 1);
for (page = 0; page < atop(xbpp[i]->b_bufsize); page++) {
struct vm_page *pg = uvm_pagelookup(oldobj,
xbpp[i]->b_poffs + ptoa(page));
KASSERT(pg != NULL);
KASSERT(pg->wire_count == 1);
uvm_pagerealloc(pg, newobj, xbpp[i]->b_poffs + ptoa(page));
}
xbpp[i]->b_pobj = newobj;
biodone(xbpp[i]);
}
free(xbpp, M_TEMP, (i + 1) * sizeof(*xbpp));
if (ISSET(bp->b_flags, B_ASYNC)) {
brelse(bp);
} else {
CLR(bp->b_flags, B_WANTED);
wakeup(bp);
}
}
int
bread_cluster(struct vnode *vp, daddr_t blkno, int size, struct buf **rbpp)
{
struct buf *bp, **xbpp;
int howmany, maxra, i, inc;
daddr_t sblkno;
*rbpp = bio_doread(vp, blkno, size, 0);
if (ISSET((*rbpp)->b_flags, B_CACHE))
goto out;
if (size != round_page(size))
goto out;
if (VOP_BMAP(vp, blkno + 1, NULL, &sblkno, &maxra))
goto out;
maxra++;
if (sblkno == -1 || maxra < 2)
goto out;
howmany = MAXPHYS / size;
if (howmany > maxra)
howmany = maxra;
xbpp = mallocarray(howmany + 1, sizeof(*xbpp), M_TEMP, M_NOWAIT);
if (xbpp == NULL)
goto out;
for (i = howmany - 1; i >= 0; i--) {
size_t sz;
sz = i == 0 ? howmany * size : 0;
xbpp[i] = buf_get(vp, blkno + i + 1, sz);
if (xbpp[i] == NULL) {
for (++i; i < howmany; i++) {
SET(xbpp[i]->b_flags, B_INVAL);
brelse(xbpp[i]);
}
free(xbpp, M_TEMP, (howmany + 1) * sizeof(*xbpp));
goto out;
}
}
bp = xbpp[0];
xbpp[howmany] = NULL;
inc = btodb(size);
for (i = 1; i < howmany; i++) {
bcstats.pendingreads++;
bcstats.numreads++;
SET(xbpp[i]->b_flags, B_DMA | B_READ | B_ASYNC);
xbpp[i]->b_blkno = sblkno + (i * inc);
xbpp[i]->b_bufsize = xbpp[i]->b_bcount = size;
xbpp[i]->b_data = NULL;
xbpp[i]->b_pobj = bp->b_pobj;
xbpp[i]->b_poffs = bp->b_poffs + (i * size);
}
KASSERT(bp->b_lblkno == blkno + 1);
KASSERT(bp->b_vp == vp);
bp->b_blkno = sblkno;
SET(bp->b_flags, B_READ | B_ASYNC | B_CALL);
bp->b_saveaddr = (void *)xbpp;
bp->b_iodone = bread_cluster_callback;
bcstats.pendingreads++;
bcstats.numreads++;
VOP_STRATEGY(bp->b_vp, bp);
curproc->p_ru.ru_inblock++;
out:
return (biowait(*rbpp));
}
int
bwrite(struct buf *bp)
{
int rv, async, wasdelayed, s;
struct vnode *vp;
struct mount *mp;
vp = bp->b_vp;
if (vp != NULL)
mp = vp->v_type == VBLK? vp->v_specmountpoint : vp->v_mount;
else
mp = NULL;
async = ISSET(bp->b_flags, B_ASYNC);
if (!async && mp && ISSET(mp->mnt_flag, MNT_ASYNC)) {
if (!ISSET(bp->b_flags, B_NOCACHE)) {
bdwrite(bp);
return (0);
}
}
if (mp != NULL) {
if (async)
mp->mnt_stat.f_asyncwrites++;
else
mp->mnt_stat.f_syncwrites++;
}
bcstats.pendingwrites++;
bcstats.numwrites++;
wasdelayed = ISSET(bp->b_flags, B_DELWRI);
CLR(bp->b_flags, (B_READ | B_DONE | B_ERROR | B_DELWRI));
s = splbio();
if (wasdelayed) {
reassignbuf(bp);
} else
curproc->p_ru.ru_oublock++;
bp->b_vp->v_numoutput++;
buf_flip_dma(bp);
SET(bp->b_flags, B_WRITEINPROG);
splx(s);
VOP_STRATEGY(bp->b_vp, bp);
if (bp->b_bq)
bufq_wait(bp->b_bq);
if (async)
return (0);
rv = biowait(bp);
brelse(bp);
return (rv);
}
void
bdwrite(struct buf *bp)
{
int s;
if (!ISSET(bp->b_flags, B_DELWRI)) {
SET(bp->b_flags, B_DELWRI);
s = splbio();
buf_flip_dma(bp);
reassignbuf(bp);
splx(s);
curproc->p_ru.ru_oublock++;
}
CLR(bp->b_flags, B_NEEDCOMMIT);
CLR(bp->b_flags, B_NOCACHE);
SET(bp->b_flags, B_DONE);
brelse(bp);
}
void
bawrite(struct buf *bp)
{
SET(bp->b_flags, B_ASYNC);
VOP_BWRITE(bp);
}
void
buf_dirty(struct buf *bp)
{
splassert(IPL_BIO);
#ifdef DIAGNOSTIC
if (!ISSET(bp->b_flags, B_BUSY))
panic("Trying to dirty buffer on freelist!");
#endif
if (ISSET(bp->b_flags, B_DELWRI) == 0) {
SET(bp->b_flags, B_DELWRI);
buf_flip_dma(bp);
reassignbuf(bp);
}
}
void
buf_undirty(struct buf *bp)
{
splassert(IPL_BIO);
#ifdef DIAGNOSTIC
if (!ISSET(bp->b_flags, B_BUSY))
panic("Trying to undirty buffer on freelist!");
#endif
if (ISSET(bp->b_flags, B_DELWRI)) {
CLR(bp->b_flags, B_DELWRI);
reassignbuf(bp);
}
}
void
brelse(struct buf *bp)
{
int s;
s = splbio();
if (bp->b_data != NULL)
KASSERT(bp->b_bufsize > 0);
if (ISSET(bp->b_flags, (B_NOCACHE|B_ERROR)))
SET(bp->b_flags, B_INVAL);
if (ISSET(bp->b_flags, B_ERROR) && !ISSET(bp->b_flags, B_READ)) {
if (bp->b_vp && bp->b_vp->v_type == VREG)
SET(bp->b_vp->v_bioflag, VBIOERROR);
}
if (ISSET(bp->b_flags, B_INVAL)) {
if (ISSET(bp->b_flags, B_DELWRI)) {
CLR(bp->b_flags, B_DELWRI);
}
if (bp->b_vp) {
RBT_REMOVE(buf_rb_bufs, &bp->b_vp->v_bufs_tree, bp);
brelvp(bp);
}
bp->b_vp = NULL;
if (ISSET(bp->b_flags, B_WANTED)) {
CLR(bp->b_flags, B_WANTED);
wakeup(bp);
}
buf_put(bp);
} else {
bufcache_release(bp);
CLR(bp->b_flags, (B_AGE | B_ASYNC | B_NOCACHE | B_DEFERRED));
buf_release(bp);
if (ISSET(bp->b_flags, B_WANTED)) {
CLR(bp->b_flags, B_WANTED);
wakeup(bp);
}
if (bcstats.dmapages > targetpages)
(void) bufcache_recover_dmapages(0,
bcstats.dmapages - targetpages);
bufcache_adjust();
}
if (nobuffers) {
nobuffers = 0;
wakeup(&nobuffers);
}
if (needbuffer && bcstats.dmapages < targetpages &&
bcstats.kvaslots_avail > RESERVE_SLOTS) {
needbuffer = 0;
wakeup(&needbuffer);
}
splx(s);
}
static struct buf *
incore_locked(struct vnode *vp, daddr_t blkno)
{
struct buf *bp;
struct buf b;
splassert(IPL_BIO);
b.b_lblkno = blkno;
bp = RBT_FIND(buf_rb_bufs, &vp->v_bufs_tree, &b);
if (bp != NULL && ISSET(bp->b_flags, B_INVAL))
bp = NULL;
return (bp);
}
struct buf *
incore(struct vnode *vp, daddr_t blkno)
{
struct buf *bp;
int s;
s = splbio();
bp = incore_locked(vp, blkno);
splx(s);
return (bp);
}
struct buf *
getblk(struct vnode *vp, daddr_t blkno, int size, int slpflag,
uint64_t slptimeo)
{
struct buf *bp;
struct buf b;
int s, error;
start:
s = splbio();
b.b_lblkno = blkno;
bp = RBT_FIND(buf_rb_bufs, &vp->v_bufs_tree, &b);
if (bp != NULL) {
if (ISSET(bp->b_flags, B_BUSY)) {
SET(bp->b_flags, B_WANTED);
error = tsleep_nsec(bp, slpflag | (PRIBIO + 1),
"getblk", slptimeo);
splx(s);
if (error)
return (NULL);
goto start;
}
if (!ISSET(bp->b_flags, B_INVAL)) {
bcstats.cachehits++;
SET(bp->b_flags, B_CACHE);
bufcache_take(bp);
buf_acquire(bp);
splx(s);
return (bp);
}
}
splx(s);
if ((bp = buf_get(vp, blkno, size)) == NULL)
goto start;
return (bp);
}
struct buf *
geteblk(size_t size)
{
struct buf *bp;
while ((bp = buf_get(NULL, 0, size)) == NULL)
continue;
return (bp);
}
struct buf *
buf_get(struct vnode *vp, daddr_t blkno, size_t size)
{
struct buf *bp;
int poolwait = size == 0 ? PR_NOWAIT : PR_WAITOK;
int npages;
int s;
s = splbio();
if (size) {
if (UNCLEAN_PAGES >= hidirtypages ||
bcstats.kvaslots_avail <= 2 * RESERVE_SLOTS)
wakeup(&bd_req);
npages = atop(round_page(size));
if (bufpages < bufhighpages)
bufadjust(bufhighpages);
if (bcstats.dmapages + npages > targetpages) {
(void) bufcache_recover_dmapages(0, npages);
bufcache_adjust();
}
if ((bcstats.dmapages + npages > targetpages ||
bcstats.kvaslots_avail <= RESERVE_SLOTS) &&
curproc != syncerproc && curproc != cleanerproc) {
wakeup(&bd_req);
needbuffer++;
tsleep_nsec(&needbuffer, PRIBIO, "needbuffer", INFSLP);
splx(s);
return (NULL);
}
if (bcstats.dmapages + npages > bufpages) {
nobuffers = 1;
tsleep_nsec(&nobuffers, PRIBIO, "nobuffers", INFSLP);
splx(s);
return (NULL);
}
}
bp = pool_get(&bufpool, poolwait|PR_ZERO);
if (bp == NULL) {
splx(s);
return (NULL);
}
bp->b_freelist.tqe_next = NOLIST;
bp->b_dev = NODEV;
bp->b_bcount = size;
buf_acquire_nomap(bp);
if (vp != NULL) {
if (incore_locked(vp, blkno)) {
pool_put(&bufpool, bp);
splx(s);
return (NULL);
}
bp->b_blkno = bp->b_lblkno = blkno;
bgetvp(vp, bp);
if (RBT_INSERT(buf_rb_bufs, &vp->v_bufs_tree, bp))
panic("buf_get: dup lblk vp %p bp %p", vp, bp);
} else {
bp->b_vnbufs.le_next = NOLIST;
SET(bp->b_flags, B_INVAL);
bp->b_vp = NULL;
}
LIST_INSERT_HEAD(&bufhead, bp, b_list);
bcstats.numbufs++;
if (size) {
buf_alloc_pages(bp, round_page(size));
KASSERT(ISSET(bp->b_flags, B_DMA));
buf_map(bp);
}
SET(bp->b_flags, B_BC);
splx(s);
return (bp);
}
void
buf_daemon(void *arg)
{
struct buf *bp = NULL;
int s, pushed = 0;
s = splbio();
for (;;) {
if (bp == NULL || (pushed >= 16 &&
UNCLEAN_PAGES < hidirtypages &&
bcstats.kvaslots_avail > 2 * RESERVE_SLOTS)){
pushed = 0;
if (needbuffer) {
needbuffer = 0;
wakeup(&needbuffer);
}
tsleep_nsec(&bd_req, PRIBIO - 7, "cleaner", INFSLP);
}
while ((bp = bufcache_getdirtybuf())) {
TRACEPOINT(vfs, cleaner, bp->b_flags, pushed,
lodirtypages, hidirtypages);
if (UNCLEAN_PAGES < lodirtypages &&
bcstats.kvaslots_avail > 2 * RESERVE_SLOTS &&
pushed >= 16)
break;
bufcache_take(bp);
buf_acquire(bp);
splx(s);
if (ISSET(bp->b_flags, B_INVAL)) {
brelse(bp);
s = splbio();
continue;
}
#ifdef DIAGNOSTIC
if (!ISSET(bp->b_flags, B_DELWRI))
panic("Clean buffer on dirty queue");
#endif
bawrite(bp);
pushed++;
sched_pause(yield);
s = splbio();
}
}
}
int
biowait(struct buf *bp)
{
int s;
KASSERT(!(bp->b_flags & B_ASYNC));
s = splbio();
while (!ISSET(bp->b_flags, B_DONE))
tsleep_nsec(bp, PRIBIO + 1, "biowait", INFSLP);
splx(s);
if (ISSET(bp->b_flags, B_EINTR)) {
CLR(bp->b_flags, B_EINTR);
return (EINTR);
}
if (ISSET(bp->b_flags, B_ERROR))
return (bp->b_error ? bp->b_error : EIO);
else
return (0);
}
void
biodone(struct buf *bp)
{
splassert(IPL_BIO);
if (ISSET(bp->b_flags, B_DONE))
panic("biodone already");
SET(bp->b_flags, B_DONE);
if (bp->b_bq)
bufq_done(bp->b_bq, bp);
if (!ISSET(bp->b_flags, B_READ)) {
CLR(bp->b_flags, B_WRITEINPROG);
vwakeup(bp->b_vp);
}
if (bcstats.numbufs &&
(!(ISSET(bp->b_flags, B_RAW) || ISSET(bp->b_flags, B_PHYS)))) {
if (!ISSET(bp->b_flags, B_READ)) {
bcstats.pendingwrites--;
} else
bcstats.pendingreads--;
}
if (ISSET(bp->b_flags, B_CALL)) {
CLR(bp->b_flags, B_CALL);
(*bp->b_iodone)(bp);
} else {
if (ISSET(bp->b_flags, B_ASYNC)) {
brelse(bp);
} else {
CLR(bp->b_flags, B_WANTED);
wakeup(bp);
}
}
}
#ifdef DDB
void bcstats_print(int (*)(const char *, ...)
__attribute__((__format__(__kprintf__,1,2))));
void
bcstats_print(
int (*pr)(const char *, ...) __attribute__((__format__(__kprintf__,1,2))))
{
(*pr)("Current Buffer Cache status:\n");
(*pr)("numbufs %lld busymapped %lld, delwri %lld\n",
bcstats.numbufs, bcstats.busymapped, bcstats.delwribufs);
(*pr)("kvaslots %lld avail kva slots %lld\n",
bcstats.kvaslots, bcstats.kvaslots_avail);
(*pr)("bufpages %lld, dmapages %lld, dirtypages %lld\n",
bcstats.numbufpages, bcstats.dmapages, bcstats.numdirtypages);
(*pr)("pendingreads %lld, pendingwrites %lld\n",
bcstats.pendingreads, bcstats.pendingwrites);
(*pr)("highflips %lld, highflops %lld, dmaflips %lld\n",
bcstats.highflips, bcstats.highflops, bcstats.dmaflips);
}
#endif
void
buf_adjcnt(struct buf *bp, long ncount)
{
KASSERT(ncount <= bp->b_bufsize);
bp->b_bcount = ncount;
}
int chillbufs(struct
bufcache *cache, struct bufqueue *queue, int64_t *queuepages);
void
bufcache_init(void)
{
int i;
for (i = 0; i < NUM_CACHES; i++) {
TAILQ_INIT(&cleancache[i].hotqueue);
TAILQ_INIT(&cleancache[i].coldqueue);
TAILQ_INIT(&cleancache[i].warmqueue);
}
TAILQ_INIT(&dirtyqueue);
}
void
bufcache_adjust(void)
{
int i;
for (i = 0; i < NUM_CACHES; i++) {
while (chillbufs(&cleancache[i], &cleancache[i].warmqueue,
&cleancache[i].warmbufpages) ||
chillbufs(&cleancache[i], &cleancache[i].hotqueue,
&cleancache[i].hotbufpages))
continue;
}
}
struct buf *
bufcache_getcleanbuf(int cachenum, int discard)
{
struct buf *bp = NULL;
struct bufcache *cache = &cleancache[cachenum];
struct bufqueue * queue;
splassert(IPL_BIO);
while ((bp = TAILQ_FIRST(&cache->coldqueue)) ||
(bp = TAILQ_FIRST(&cache->warmqueue)) ||
(bp = TAILQ_FIRST(&cache->hotqueue))) {
int64_t pages = atop(bp->b_bufsize);
struct bufcache *newcache;
if (discard || cachenum >= NUM_CACHES - 1) {
return bp;
}
KASSERT(bp->cache == cachenum);
if (fliphigh) {
SET(bp->b_flags, B_BUSY);
if (bp->cache == 0 && buf_flip_high(bp) == -1) {
CLR(bp->b_flags, B_BUSY);
return bp;
}
CLR(bp->b_flags, B_BUSY);
}
if (ISSET(bp->b_flags, B_COLD)) {
queue = &cache->coldqueue;
} else if (ISSET(bp->b_flags, B_WARM)) {
queue = &cache->warmqueue;
cache->warmbufpages -= pages;
} else {
queue = &cache->hotqueue;
cache->hotbufpages -= pages;
}
TAILQ_REMOVE(queue, bp, b_freelist);
cache->cachepages -= pages;
CLR(bp->b_flags, B_WARM);
CLR(bp->b_flags, B_COLD);
bp->cache++;
newcache= &cleancache[bp->cache];
newcache->cachepages += pages;
newcache->hotbufpages += pages;
chillbufs(newcache, &newcache->hotqueue,
&newcache->hotbufpages);
TAILQ_INSERT_TAIL(&newcache->hotqueue, bp, b_freelist);
}
return bp;
}
void
discard_buffer(struct buf *bp)
{
splassert(IPL_BIO);
bufcache_take(bp);
if (bp->b_vp) {
RBT_REMOVE(buf_rb_bufs,
&bp->b_vp->v_bufs_tree, bp);
brelvp(bp);
}
buf_put(bp);
}
int64_t
bufcache_recover_dmapages(int discard, int64_t howmany)
{
struct buf *bp = NULL;
struct bufcache *cache = &cleancache[DMA_CACHE];
struct bufqueue * queue;
int64_t recovered = 0;
splassert(IPL_BIO);
while ((recovered < howmany) &&
((bp = TAILQ_FIRST(&cache->coldqueue)) ||
(bp = TAILQ_FIRST(&cache->warmqueue)) ||
(bp = TAILQ_FIRST(&cache->hotqueue)))) {
int64_t pages = atop(bp->b_bufsize);
struct bufcache *newcache;
if (discard || DMA_CACHE >= NUM_CACHES - 1) {
discard_buffer(bp);
continue;
}
KASSERT(bp->cache == DMA_CACHE);
recovered += pages;
if (!fliphigh) {
discard_buffer(bp);
continue;
}
SET(bp->b_flags, B_BUSY);
if (bp->cache == 0 && buf_flip_high(bp) == -1) {
CLR(bp->b_flags, B_BUSY);
discard_buffer(bp);
continue;
}
CLR(bp->b_flags, B_BUSY);
if (ISSET(bp->b_flags, B_COLD)) {
queue = &cache->coldqueue;
} else if (ISSET(bp->b_flags, B_WARM)) {
queue = &cache->warmqueue;
cache->warmbufpages -= pages;
} else {
queue = &cache->hotqueue;
cache->hotbufpages -= pages;
}
TAILQ_REMOVE(queue, bp, b_freelist);
cache->cachepages -= pages;
CLR(bp->b_flags, B_WARM);
CLR(bp->b_flags, B_COLD);
bp->cache++;
newcache= &cleancache[bp->cache];
newcache->cachepages += pages;
newcache->hotbufpages += pages;
chillbufs(newcache, &newcache->hotqueue,
&newcache->hotbufpages);
TAILQ_INSERT_TAIL(&newcache->hotqueue, bp, b_freelist);
}
return recovered;
}
struct buf *
bufcache_getcleanbuf_range(int start, int end, int discard)
{
int i, j = start, q = end;
struct buf *bp = NULL;
while (j <= q) {
for (i = q; i >= j; i--)
if ((bp = bufcache_getcleanbuf(i, discard)))
return (bp);
j++;
}
return bp;
}
struct buf *
bufcache_gethighcleanbuf(void)
{
if (!fliphigh)
return NULL;
return bufcache_getcleanbuf_range(DMA_CACHE + 1, NUM_CACHES - 1, 0);
}
struct buf *
bufcache_getdirtybuf(void)
{
return TAILQ_FIRST(&dirtyqueue);
}
void
bufcache_take(struct buf *bp)
{
struct bufqueue *queue;
int64_t pages;
splassert(IPL_BIO);
KASSERT(ISSET(bp->b_flags, B_BC));
KASSERT(bp->cache >= DMA_CACHE);
KASSERT((bp->cache < NUM_CACHES));
pages = atop(bp->b_bufsize);
TRACEPOINT(vfs, bufcache_take, bp->b_flags, bp->cache, pages);
struct bufcache *cache = &cleancache[bp->cache];
if (!ISSET(bp->b_flags, B_DELWRI)) {
if (ISSET(bp->b_flags, B_COLD)) {
queue = &cache->coldqueue;
} else if (ISSET(bp->b_flags, B_WARM)) {
queue = &cache->warmqueue;
cache->warmbufpages -= pages;
} else {
queue = &cache->hotqueue;
cache->hotbufpages -= pages;
}
bcstats.numcleanpages -= pages;
cache->cachepages -= pages;
} else {
queue = &dirtyqueue;
bcstats.numdirtypages -= pages;
bcstats.delwribufs--;
}
TAILQ_REMOVE(queue, bp, b_freelist);
}
int
chillbufs(struct bufcache *cache, struct bufqueue *queue, int64_t *queuepages)
{
struct buf *bp;
int64_t limit, pages;
if (queue == &cache->hotqueue)
limit = min(cache->cachepages / 20, 4096);
else if (queue == &cache->warmqueue)
limit = (cache->cachepages / 2);
else
panic("chillbufs: invalid queue");
if (*queuepages > 96 && *queuepages > limit) {
bp = TAILQ_FIRST(queue);
if (!bp)
panic("inconsistent bufpage counts");
pages = atop(bp->b_bufsize);
*queuepages -= pages;
TAILQ_REMOVE(queue, bp, b_freelist);
SET(bp->b_flags, B_COLD);
TAILQ_INSERT_TAIL(&cache->coldqueue, bp, b_freelist);
return 1;
}
return 0;
}
void
bufcache_release(struct buf *bp)
{
struct bufqueue *queue;
int64_t pages;
struct bufcache *cache = &cleancache[bp->cache];
KASSERT(ISSET(bp->b_flags, B_BC));
pages = atop(bp->b_bufsize);
TRACEPOINT(vfs, bufcache_rel, bp->b_flags, bp->cache, pages);
if (fliphigh) {
if (ISSET(bp->b_flags, B_DMA) && bp->cache > 0)
panic("B_DMA buffer release from cache %d",
bp->cache);
else if ((!ISSET(bp->b_flags, B_DMA)) && bp->cache == 0)
panic("Non B_DMA buffer release from cache %d",
bp->cache);
}
if (!ISSET(bp->b_flags, B_DELWRI)) {
int64_t *queuepages;
if (ISSET(bp->b_flags, B_WARM | B_COLD)) {
SET(bp->b_flags, B_WARM);
CLR(bp->b_flags, B_COLD);
queue = &cache->warmqueue;
queuepages = &cache->warmbufpages;
} else {
queue = &cache->hotqueue;
queuepages = &cache->hotbufpages;
}
*queuepages += pages;
bcstats.numcleanpages += pages;
cache->cachepages += pages;
chillbufs(cache, queue, queuepages);
} else {
queue = &dirtyqueue;
bcstats.numdirtypages += pages;
bcstats.delwribufs++;
}
TAILQ_INSERT_TAIL(queue, bp, b_freelist);
}
#ifdef HIBERNATE
void
hibernate_suspend_bufcache(void)
{
struct buf *bp;
int s;
s = splbio();
while ((bp = bufcache_getcleanbuf_range(DMA_CACHE, NUM_CACHES - 1, 1))) {
bufcache_take(bp);
if (bp->b_vp) {
RBT_REMOVE(buf_rb_bufs, &bp->b_vp->v_bufs_tree, bp);
brelvp(bp);
}
buf_put(bp);
}
splx(s);
}
void
hibernate_resume_bufcache(void)
{
}
#endif