#ifndef _SYS_BUF_H_
#define _SYS_BUF_H_
#include <sys/_exterr.h>
#include <sys/bufobj.h>
#include <sys/queue.h>
#include <sys/lock.h>
#include <sys/lockmgr.h>
#include <vm/uma.h>
struct bio;
struct buf;
struct bufobj;
struct mount;
struct vnode;
struct uio;
LIST_HEAD(workhead, worklist);
extern struct bio_ops {
void (*io_start)(struct buf *);
void (*io_complete)(struct buf *);
void (*io_deallocate)(struct buf *);
int (*io_countdeps)(struct buf *, int);
} bioops;
struct vm_object;
struct vm_page;
typedef uint32_t b_xflags_t;
struct buf {
struct bufobj *b_bufobj;
long b_bcount;
void *b_caller1;
caddr_t b_data;
uint16_t b_iocmd;
uint16_t b_ioflags;
off_t b_iooffset;
long b_resid;
void (*b_iodone)(struct buf *);
void (*b_ckhashcalc)(struct buf *);
uint64_t b_ckhash;
daddr_t b_blkno;
off_t b_offset;
TAILQ_ENTRY(buf) b_bobufs;
uint32_t b_vflags;
uint8_t b_qindex;
uint8_t b_domain;
uint16_t b_subqueue;
uint32_t b_flags;
b_xflags_t b_xflags;
struct lock b_lock;
long b_bufsize;
int b_runningbufspace;
int b_kvasize;
int b_dirtyoff;
int b_dirtyend;
caddr_t b_kvabase;
daddr_t b_lblkno;
struct vnode *b_vp;
struct ucred *b_rcred;
struct ucred *b_wcred;
union {
TAILQ_ENTRY(buf) b_freelist;
struct {
void (*b_pgiodone)(void *, struct vm_page **,
int, int);
int b_pgbefore;
int b_pgafter;
};
};
union cluster_info {
TAILQ_HEAD(cluster_list_head, buf) cluster_head;
TAILQ_ENTRY(buf) cluster_entry;
} b_cluster;
int b_npages;
struct workhead b_dep;
void *b_fsprivate1;
void *b_fsprivate2;
void *b_fsprivate3;
#if defined(FULL_BUF_TRACKING)
#define BUF_TRACKING_SIZE 32
#define BUF_TRACKING_ENTRY(x) ((x) & (BUF_TRACKING_SIZE - 1))
const char *b_io_tracking[BUF_TRACKING_SIZE];
uint32_t b_io_tcnt;
#elif defined(BUF_TRACKING)
const char *b_io_tracking;
#endif
struct kexterr b_exterr;
struct vm_page *b_pages[];
};
#define b_object b_bufobj->bo_object
#define b_error b_exterr.error
#define B_AGE 0x00000001
#define B_NEEDCOMMIT 0x00000002
#define B_ASYNC 0x00000004
#define B_DIRECT 0x00000008
#define B_DEFERRED 0x00000010
#define B_CACHE 0x00000020
#define B_VALIDSUSPWRT 0x00000040
#define B_DELWRI 0x00000080
#define B_CKHASH 0x00000100
#define B_DONE 0x00000200
#define B_EINTR 0x00000400
#define B_NOREUSE 0x00000800
#define B_REUSE 0x00001000
#define B_INVAL 0x00002000
#define B_BARRIER 0x00004000
#define B_NOCACHE 0x00008000
#define B_MALLOC 0x00010000
#define B_CLUSTEROK 0x00020000
#define B_INVALONERR 0x00040000
#define B_IOSTARTED 0x00080000
#define B_00100000 0x00100000
#define B_MAXPHYS 0x00200000
#define B_RELBUF 0x00400000
#define B_FS_FLAG1 0x00800000
#define B_NOCOPY 0x01000000
#define B_INFREECNT 0x02000000
#define B_PAGING 0x04000000
#define B_MANAGED 0x08000000
#define B_RAM 0x10000000
#define B_VMIO 0x20000000
#define B_CLUSTER 0x40000000
#define B_REMFREE 0x80000000
#define PRINT_BUF_FLAGS "\20\40remfree\37cluster\36vmio\35ram\34managed" \
"\33paging\32infreecnt\31nocopy\30b23\27relbuf\26maxphys\25b20" \
"\24iostarted\23invalonerr\22clusterok\21malloc\20nocache\17b14" \
"\16inval\15reuse\14noreuse\13eintr\12done\11b8\10delwri" \
"\7validsuspwrt\6cache\5deferred\4direct\3async\2needcommit\1age"
#define BX_VNDIRTY 0x00000001
#define BX_VNCLEAN 0x00000002
#define BX_CVTENXIO 0x00000004
#define BX_BKGRDWRITE 0x00000010
#define BX_BKGRDMARKER 0x00000020
#define BX_ALTDATA 0x00000040
#define BX_FSPRIV 0x00FF0000
#define PRINT_BUF_XFLAGS "\20\7altdata\6bkgrdmarker\5bkgrdwrite\3cvtenxio" \
"\2clean\1dirty"
#define NOOFFSET (-1LL)
#define BV_SCANNED 0x00000001
#define BV_BKGRDINPROG 0x00000002
#define BV_BKGRDWAIT 0x00000004
#define BV_BKGRDERR 0x00000008
#define PRINT_BUF_VFLAGS "\20\4bkgrderr\3bkgrdwait\2bkgrdinprog\1scanned"
#ifdef _KERNEL
#ifndef NSWBUF_MIN
#define NSWBUF_MIN 16
#endif
#include <sys/proc.h>
#include <sys/mutex.h>
#define BUF_LOCKINIT(bp, wmesg) \
lockinit(&(bp)->b_lock, PVFS, wmesg, 0, LK_NEW)
#define BUF_LOCK(bp, locktype, interlock) \
_lockmgr_args_rw(&(bp)->b_lock, (locktype), (interlock), \
LK_WMESG_DEFAULT, LK_PRIO_DEFAULT, LK_TIMO_DEFAULT, \
LOCK_FILE, LOCK_LINE)
#define BUF_TIMELOCK(bp, locktype, interlock, wmesg, catch, timo) \
_lockmgr_args_rw(&(bp)->b_lock, (locktype) | LK_TIMELOCK, \
(interlock), (wmesg), PVFS | (catch), (timo), \
LOCK_FILE, LOCK_LINE)
#define BUF_UNLOCK(bp) do { \
KASSERT(((bp)->b_flags & B_REMFREE) == 0 || BUF_LOCKRECURSED(bp), \
("BUF_UNLOCK %p while B_REMFREE is still set.", (bp))); \
\
BUF_UNLOCK_RAW((bp)); \
} while (0)
#define BUF_UNLOCK_RAW(bp) do { \
(void)_lockmgr_args(&(bp)->b_lock, LK_RELEASE, NULL, \
LK_WMESG_DEFAULT, LK_PRIO_DEFAULT, LK_TIMO_DEFAULT, \
LOCK_FILE, LOCK_LINE); \
} while (0)
#define BUF_LOCKRECURSED(bp) \
lockmgr_recursed(&(bp)->b_lock)
#define BUF_ISLOCKED(bp) \
lockstatus(&(bp)->b_lock)
#define BUF_DISOWNED(bp) \
lockmgr_disowned(&(bp)->b_lock)
#define BUF_LOCKFREE(bp) \
lockdestroy(&(bp)->b_lock)
#define BUF_LOCKPRINTINFO(bp) \
lockmgr_printinfo(&(bp)->b_lock)
#if defined(INVARIANTS) && defined(INVARIANT_SUPPORT)
#define BUF_ASSERT_LOCKED(bp) \
_lockmgr_assert(&(bp)->b_lock, KA_LOCKED, LOCK_FILE, LOCK_LINE)
#define BUF_ASSERT_SLOCKED(bp) \
_lockmgr_assert(&(bp)->b_lock, KA_SLOCKED, LOCK_FILE, LOCK_LINE)
#define BUF_ASSERT_XLOCKED(bp) \
_lockmgr_assert(&(bp)->b_lock, KA_XLOCKED, LOCK_FILE, LOCK_LINE)
#define BUF_ASSERT_UNLOCKED(bp) \
_lockmgr_assert(&(bp)->b_lock, KA_UNLOCKED, LOCK_FILE, LOCK_LINE)
#else
#define BUF_ASSERT_LOCKED(bp)
#define BUF_ASSERT_SLOCKED(bp)
#define BUF_ASSERT_XLOCKED(bp)
#define BUF_ASSERT_UNLOCKED(bp)
#endif
#ifdef _SYS_PROC_H_
#define BUF_KERNPROC(bp) \
_lockmgr_disown(&(bp)->b_lock, LOCK_FILE, LOCK_LINE)
#endif
#define BUF_EXTERR_FROM_CURTHR(bp) \
bp->b_exterr = curthread->td_kexterr
#define BUF_EXTERR_TO_CURTHR(bp) \
curthread->td_kexterr = bp->b_exterr
#endif
struct buf_queue_head {
TAILQ_HEAD(buf_queue, buf) queue;
daddr_t last_pblkno;
struct buf *insert_point;
struct buf *switch_point;
};
struct cluster_save {
long bs_bcount;
long bs_bufsize;
int bs_nchildren;
struct buf **bs_children;
};
struct vn_clusterw {
daddr_t v_cstart;
daddr_t v_lasta;
daddr_t v_lastw;
int v_clen;
};
#ifdef _KERNEL
static __inline int
bwrite(struct buf *bp)
{
KASSERT(bp->b_bufobj != NULL, ("bwrite: no bufobj bp=%p", bp));
KASSERT(bp->b_bufobj->bo_ops != NULL, ("bwrite: no bo_ops bp=%p", bp));
KASSERT(bp->b_bufobj->bo_ops->bop_write != NULL,
("bwrite: no bop_write bp=%p", bp));
return (BO_WRITE(bp->b_bufobj, bp));
}
static __inline void
bstrategy(struct buf *bp)
{
KASSERT(bp->b_bufobj != NULL, ("bstrategy: no bufobj bp=%p", bp));
KASSERT(bp->b_bufobj->bo_ops != NULL,
("bstrategy: no bo_ops bp=%p", bp));
KASSERT(bp->b_bufobj->bo_ops->bop_strategy != NULL,
("bstrategy: no bop_strategy bp=%p", bp));
BO_STRATEGY(bp->b_bufobj, bp);
}
static __inline void
buf_start(struct buf *bp)
{
KASSERT((bp->b_flags & B_IOSTARTED) == 0,
("recursed buf_start %p", bp));
bp->b_flags |= B_IOSTARTED;
if (bioops.io_start)
(*bioops.io_start)(bp);
}
static __inline void
buf_complete(struct buf *bp)
{
if ((bp->b_flags & B_IOSTARTED) != 0) {
bp->b_flags &= ~B_IOSTARTED;
if (bioops.io_complete)
(*bioops.io_complete)(bp);
}
}
static __inline void
buf_deallocate(struct buf *bp)
{
if (bioops.io_deallocate)
(*bioops.io_deallocate)(bp);
}
static __inline int
buf_countdeps(struct buf *bp, int i)
{
if (bioops.io_countdeps)
return ((*bioops.io_countdeps)(bp, i));
else
return (0);
}
static __inline void
buf_track(struct buf *bp __unused, const char *location __unused)
{
#if defined(FULL_BUF_TRACKING)
bp->b_io_tracking[BUF_TRACKING_ENTRY(bp->b_io_tcnt++)] = location;
#elif defined(BUF_TRACKING)
bp->b_io_tracking = location;
#endif
}
#endif
#define clrbuf(bp) { \
bzero((bp)->b_data, (u_int)(bp)->b_bcount); \
(bp)->b_resid = 0; \
}
#define GB_LOCK_NOWAIT 0x0001
#define GB_NOCREAT 0x0002
#define GB_NOWAIT_BD 0x0004
#define GB_UNMAPPED 0x0008
#define GB_KVAALLOC 0x0010
#define GB_CKHASH 0x0020
#define GB_NOSPARSE 0x0040
#define GB_CVTENXIO 0x0080
#define GB_NOWITNESS 0x0100
#ifdef _KERNEL
extern int nbuf;
extern u_long maxswzone;
extern u_long maxbcache;
extern int maxbcachebuf;
extern long hibufspace;
extern int dirtybufthresh;
extern int bdwriteskip;
extern int dirtybufferflushes;
extern int altbufferflushes;
extern int nswbuf;
extern caddr_t __read_mostly unmapped_buf;
static inline int
buf_mapped(struct buf *bp)
{
return (bp->b_data != unmapped_buf);
}
long runningbufclaim(struct buf *, int);
void runningbufwakeup(struct buf *);
void waitrunningbufspace(void);
caddr_t kern_vfs_bio_buffer_alloc(caddr_t v, long physmem_est);
void bufinit(void);
void bufshutdown(int);
void bdata2bio(struct buf *bp, struct bio *bip);
void bwillwrite(void);
int buf_dirty_count_severe(void);
void bremfree(struct buf *);
void bremfreef(struct buf *);
#define bread(vp, blkno, size, cred, bpp) \
breadn_flags(vp, blkno, blkno, size, NULL, NULL, 0, cred, 0, \
NULL, bpp)
#define bread_gb(vp, blkno, size, cred, gbflags, bpp) \
breadn_flags(vp, blkno, blkno, size, NULL, NULL, 0, cred, \
gbflags, NULL, bpp)
#define breadn(vp, blkno, size, rablkno, rabsize, cnt, cred, bpp) \
breadn_flags(vp, blkno, blkno, size, rablkno, rabsize, cnt, cred, \
0, NULL, bpp)
int breadn_flags(struct vnode *, daddr_t, daddr_t, int, daddr_t *, int *,
int, struct ucred *, int, void (*)(struct buf *), struct buf **);
void bdwrite(struct buf *);
void bawrite(struct buf *);
void babarrierwrite(struct buf *);
int bbarrierwrite(struct buf *);
void bdirty(struct buf *);
void bundirty(struct buf *);
void bufstrategy(struct bufobj *, struct buf *);
void brelse(struct buf *);
void bqrelse(struct buf *);
int vfs_bio_awrite(struct buf *);
void vfs_busy_pages_acquire(struct buf *bp);
void vfs_busy_pages_release(struct buf *bp);
struct buf *incore(struct bufobj *, daddr_t);
bool inmem(struct vnode *, daddr_t);
struct buf *gbincore(struct bufobj *, daddr_t);
struct buf *gbincore_unlocked(struct bufobj *, daddr_t);
struct buf *getblk(struct vnode *, daddr_t, int, int, int, int);
int getblkx(struct vnode *vp, daddr_t blkno, daddr_t dblkno, int size,
int slpflag, int slptimeo, int flags, struct buf **bpp);
struct buf *geteblk(int, int);
int bufwait(struct buf *);
int bufwrite(struct buf *);
void bufdone(struct buf *);
void bd_speedup(void);
extern uma_zone_t pbuf_zone;
uma_zone_t pbuf_zsecond_create(const char *name, int max);
struct vn_clusterw;
void cluster_init_vn(struct vn_clusterw *vnc);
int cluster_read(struct vnode *, u_quad_t, daddr_t, long,
struct ucred *, long, int, int, struct buf **);
int cluster_wbuild(struct vnode *, long, daddr_t, int, int);
void cluster_write(struct vnode *, struct vn_clusterw *, struct buf *,
u_quad_t, int, int);
void vfs_bio_brelse(struct buf *bp, int ioflags);
void vfs_bio_bzero_buf(struct buf *bp, int base, int size);
void vfs_bio_clrbuf(struct buf *);
void vfs_bio_set_flags(struct buf *bp, int ioflags);
void vfs_bio_set_valid(struct buf *, int base, int size);
void vfs_busy_pages(struct buf *, int clear_modify);
void vfs_unbusy_pages(struct buf *);
int vmapbuf(struct buf *, void *, size_t, int);
void vunmapbuf(struct buf *);
void brelvp(struct buf *);
int bgetvp(struct vnode *, struct buf *) __result_use_check;
void pbgetbo(struct bufobj *bo, struct buf *bp);
void pbgetvp(struct vnode *, struct buf *);
void pbrelbo(struct buf *);
void pbrelvp(struct buf *);
int allocbuf(struct buf *bp, int size);
void reassignbuf(struct buf *);
void bwait(struct buf *, u_char, const char *);
void bdone(struct buf *);
typedef daddr_t (vbg_get_lblkno_t)(struct vnode *, vm_ooffset_t);
typedef int (vbg_get_blksize_t)(struct vnode *, daddr_t, long *);
int vfs_bio_getpages(struct vnode *vp, struct vm_page **ma, int count,
int *rbehind, int *rahead, vbg_get_lblkno_t get_lblkno,
vbg_get_blksize_t get_blksize);
#endif
#endif