#include <sys/types.h>
#include <sys/t_lock.h>
#include <sys/param.h>
#include <sys/time.h>
#include <sys/systm.h>
#include <sys/sysmacros.h>
#include <sys/resource.h>
#include <sys/signal.h>
#include <sys/cred.h>
#include <sys/user.h>
#include <sys/buf.h>
#include <sys/vfs.h>
#include <sys/vfs_opreg.h>
#include <sys/stat.h>
#include <sys/vnode.h>
#include <sys/mode.h>
#include <sys/proc.h>
#include <sys/disp.h>
#include <sys/file.h>
#include <sys/fcntl.h>
#include <sys/flock.h>
#include <sys/kmem.h>
#include <sys/uio.h>
#include <sys/conf.h>
#include <sys/errno.h>
#include <sys/mman.h>
#include <sys/pathname.h>
#include <sys/debug.h>
#include <sys/vmsystm.h>
#include <sys/cmn_err.h>
#include <sys/fbuf.h>
#include <sys/dirent.h>
#include <sys/errno.h>
#include <sys/dkio.h>
#include <sys/cmn_err.h>
#include <sys/atomic.h>
#include <vm/hat.h>
#include <vm/page.h>
#include <vm/pvn.h>
#include <vm/as.h>
#include <vm/seg.h>
#include <vm/seg_map.h>
#include <vm/seg_kmem.h>
#include <vm/seg_vn.h>
#include <vm/rm.h>
#include <vm/page.h>
#include <sys/swap.h>
#include <sys/avl.h>
#include <sys/sunldi.h>
#include <sys/ddi.h>
#include <sys/sunddi.h>
#include <sys/sdt.h>
#include <sys/modctl.h>
#include <sys/fs/hsfs_spec.h>
#include <sys/fs/hsfs_node.h>
#include <sys/fs/hsfs_impl.h>
#include <sys/fs/hsfs_susp.h>
#include <sys/fs/hsfs_rrip.h>
#include <fs/fs_subr.h>
static int seq_contig_requests = 2;
static int hsfs_taskq_nthreads = 8;
static int hsched_coalesce_min = 2;
struct kmem_cache *hio_cache;
struct kmem_cache *hio_info_cache;
extern int use_rrip_inodes;
static int hsched_deadline_compare(const void *x1, const void *x2);
static int hsched_offset_compare(const void *x1, const void *x2);
static void hsched_enqueue_io(struct hsfs *fsp, struct hio *hsio, int ra);
int hsched_invoke_strategy(struct hsfs *fsp);
static int
hsfs_fsync(vnode_t *cp, int syncflag, cred_t *cred, caller_context_t *ct)
{
return (0);
}
static int
hsfs_read(struct vnode *vp, struct uio *uiop, int ioflag, struct cred *cred,
struct caller_context *ct)
{
caddr_t base;
offset_t diff;
int error;
struct hsnode *hp;
uint_t filesize;
hp = VTOH(vp);
if (vp->v_type == VDIR) {
if (hp->hs_dirent.ext_size == 0)
hs_filldirent(vp, &hp->hs_dirent);
}
filesize = hp->hs_dirent.ext_size;
if (uiop->uio_resid == 0 ||
uiop->uio_loffset > HS_MAXFILEOFF ||
uiop->uio_loffset >= filesize)
return (0);
do {
size_t nbytes;
offset_t mapon;
size_t n;
uint_t flags;
mapon = uiop->uio_loffset & MAXBOFFSET;
diff = filesize - uiop->uio_loffset;
nbytes = (size_t)MIN(MAXBSIZE - mapon, uiop->uio_resid);
n = MIN(diff, nbytes);
if (n <= 0) {
return (0);
}
base = segmap_getmapflt(segkmap, vp,
(u_offset_t)uiop->uio_loffset, n, 1, S_READ);
error = uiomove(base + mapon, n, UIO_READ, uiop);
if (error == 0) {
if (n + mapon == MAXBSIZE ||
uiop->uio_loffset == filesize)
flags = SM_DONTNEED;
else
flags = 0;
error = segmap_release(segkmap, base, flags);
} else
(void) segmap_release(segkmap, base, 0);
} while (error == 0 && uiop->uio_resid > 0);
return (error);
}
static int
hsfs_getattr(struct vnode *vp, struct vattr *vap, int flags, struct cred *cred,
caller_context_t *ct)
{
struct hsnode *hp;
struct vfs *vfsp;
struct hsfs *fsp;
hp = VTOH(vp);
fsp = VFS_TO_HSFS(vp->v_vfsp);
vfsp = vp->v_vfsp;
if ((hp->hs_dirent.ext_size == 0) && (vp->v_type == VDIR)) {
hs_filldirent(vp, &hp->hs_dirent);
}
vap->va_type = IFTOVT(hp->hs_dirent.mode);
vap->va_mode = hp->hs_dirent.mode;
vap->va_uid = hp->hs_dirent.uid;
vap->va_gid = hp->hs_dirent.gid;
vap->va_fsid = vfsp->vfs_dev;
vap->va_nodeid = (ino64_t)hp->hs_nodeid;
vap->va_nlink = hp->hs_dirent.nlink;
vap->va_size = (offset_t)hp->hs_dirent.ext_size;
vap->va_atime.tv_sec = hp->hs_dirent.adate.tv_sec;
vap->va_atime.tv_nsec = hp->hs_dirent.adate.tv_usec*1000;
vap->va_mtime.tv_sec = hp->hs_dirent.mdate.tv_sec;
vap->va_mtime.tv_nsec = hp->hs_dirent.mdate.tv_usec*1000;
vap->va_ctime.tv_sec = hp->hs_dirent.cdate.tv_sec;
vap->va_ctime.tv_nsec = hp->hs_dirent.cdate.tv_usec*1000;
if (vp->v_type == VCHR || vp->v_type == VBLK)
vap->va_rdev = hp->hs_dirent.r_dev;
else
vap->va_rdev = 0;
vap->va_blksize = vfsp->vfs_bsize;
vap->va_nblocks = (fsblkcnt64_t)howmany(vap->va_size + (u_longlong_t)
(hp->hs_dirent.xar_len << fsp->hsfs_vol.lbn_shift), DEV_BSIZE);
vap->va_seq = hp->hs_seq;
return (0);
}
static int
hsfs_readlink(struct vnode *vp, struct uio *uiop, struct cred *cred,
caller_context_t *ct)
{
struct hsnode *hp;
if (vp->v_type != VLNK)
return (EINVAL);
hp = VTOH(vp);
if (hp->hs_dirent.sym_link == (char *)NULL)
return (ENOENT);
return (uiomove(hp->hs_dirent.sym_link,
(size_t)MIN(hp->hs_dirent.ext_size,
uiop->uio_resid), UIO_READ, uiop));
}
static void
hsfs_inactive(struct vnode *vp, struct cred *cred, caller_context_t *ct)
{
struct hsnode *hp;
struct hsfs *fsp;
int nopage;
hp = VTOH(vp);
fsp = VFS_TO_HSFS(vp->v_vfsp);
rw_enter(&fsp->hsfs_hash_lock, RW_WRITER);
mutex_enter(&hp->hs_contents_lock);
mutex_enter(&vp->v_lock);
if (vp->v_count < 1) {
panic("hsfs_inactive: v_count < 1");
}
VN_RELE_LOCKED(vp);
if (vp->v_count > 0 || (hp->hs_flags & HREF) == 0) {
mutex_exit(&vp->v_lock);
mutex_exit(&hp->hs_contents_lock);
rw_exit(&fsp->hsfs_hash_lock);
return;
}
if (vp->v_count == 0) {
nopage = !vn_has_cached_data(vp);
hp->hs_flags = 0;
mutex_exit(&vp->v_lock);
mutex_exit(&hp->hs_contents_lock);
hs_freenode(vp, fsp, nopage);
} else {
mutex_exit(&vp->v_lock);
mutex_exit(&hp->hs_contents_lock);
}
rw_exit(&fsp->hsfs_hash_lock);
}
static int
hsfs_lookup(struct vnode *dvp, char *nm, struct vnode **vpp,
struct pathname *pnp, int flags, struct vnode *rdir, struct cred *cred,
caller_context_t *ct, int *direntflags, pathname_t *realpnp)
{
int error;
int namelen = (int)strlen(nm);
if (*nm == '\0') {
VN_HOLD(dvp);
*vpp = dvp;
return (0);
}
if (namelen == 1 && *nm == '.') {
if (error = hs_access(dvp, (mode_t)VEXEC, cred))
return (error);
VN_HOLD(dvp);
*vpp = dvp;
return (0);
}
return (hs_dirlook(dvp, nm, namelen, vpp, cred));
}
static int
hsfs_readdir(struct vnode *vp, struct uio *uiop, struct cred *cred, int *eofp,
caller_context_t *ct, int flags)
{
struct hsnode *dhp;
struct hsfs *fsp;
struct hs_direntry hd;
struct dirent64 *nd;
int error;
uint_t offset;
uint_t dirsiz;
uchar_t *blkp;
int hdlen;
long ndlen;
int bytes_wanted;
size_t bufsize;
char *outbuf;
char *dname;
int dnamelen;
size_t dname_size;
struct fbuf *fbp;
uint_t last_offset;
ino64_t dirino;
off_t diroff;
dhp = VTOH(vp);
fsp = VFS_TO_HSFS(vp->v_vfsp);
if (dhp->hs_dirent.ext_size == 0)
hs_filldirent(vp, &dhp->hs_dirent);
dirsiz = dhp->hs_dirent.ext_size;
if (uiop->uio_loffset >= dirsiz) {
if (eofp)
*eofp = 1;
return (0);
}
ASSERT(uiop->uio_loffset <= HS_MAXFILEOFF);
offset = uiop->uio_loffset;
dname_size = fsp->hsfs_namemax + 1;
dname = kmem_alloc(dname_size, KM_SLEEP);
bufsize = uiop->uio_resid + sizeof (struct dirent64);
outbuf = kmem_alloc(bufsize, KM_SLEEP);
nd = (struct dirent64 *)outbuf;
while (offset < dirsiz) {
bytes_wanted = MIN(MAXBSIZE, dirsiz - (offset & MAXBMASK));
error = fbread(vp, (offset_t)(offset & MAXBMASK),
(unsigned int)bytes_wanted, S_READ, &fbp);
if (error)
goto done;
blkp = (uchar_t *)fbp->fb_addr;
last_offset = (offset & MAXBMASK) + fbp->fb_count;
#define rel_offset(offset) ((offset) & MAXBOFFSET)
while (offset < last_offset) {
hdlen = (int)((uchar_t)
HDE_DIR_LEN(&blkp[rel_offset(offset)]));
if (hdlen < HDE_ROOT_DIR_REC_SIZE ||
offset + hdlen > last_offset) {
offset = roundup(offset + 1, HS_SECTOR_SIZE);
if (hdlen)
hs_log_bogus_disk_warning(fsp,
HSFS_ERR_TRAILING_JUNK, 0);
continue;
}
bzero(&hd, sizeof (hd));
if (!hs_parsedir(fsp, &blkp[rel_offset(offset)],
&hd, dname, &dnamelen, last_offset - offset)) {
ndlen = (long)DIRENT64_RECLEN((dnamelen));
if ((ndlen + ((char *)nd - outbuf)) >
uiop->uio_resid) {
fbrelse(fbp, S_READ);
goto done;
}
diroff = offset + hdlen;
if (hd.inode != 0 && use_rrip_inodes) {
dirino = hd.inode;
} else if ((hd.ext_size == 0 ||
hd.sym_link != (char *)NULL) &&
(fsp->hsfs_flags & HSFSMNT_INODE) == 0) {
dirino = HS_DUMMY_INO;
} else {
dirino = hd.ext_lbn;
}
ASSERT(strlen(dname) + 1 <=
DIRENT64_NAMELEN(ndlen));
(void) strncpy(nd->d_name, dname,
DIRENT64_NAMELEN(ndlen));
nd->d_reclen = (ushort_t)ndlen;
nd->d_off = (offset_t)diroff;
nd->d_ino = dirino;
nd = (struct dirent64 *)((char *)nd + ndlen);
if (hd.sym_link != (char *)NULL) {
kmem_free(hd.sym_link,
(size_t)(hd.ext_size+1));
hd.sym_link = (char *)NULL;
}
}
offset += hdlen;
}
fbrelse(fbp, S_READ);
}
done:
ndlen = ((char *)nd - outbuf);
if (ndlen != 0) {
error = uiomove(outbuf, (size_t)ndlen, UIO_READ, uiop);
uiop->uio_loffset = offset;
}
kmem_free(dname, dname_size);
kmem_free(outbuf, bufsize);
if (eofp && error == 0)
*eofp = (uiop->uio_loffset >= dirsiz);
return (error);
}
static int
hsfs_fid(struct vnode *vp, struct fid *fidp, caller_context_t *ct)
{
struct hsnode *hp;
struct hsfid *fid;
if (fidp->fid_len < (sizeof (*fid) - sizeof (fid->hf_len))) {
fidp->fid_len = sizeof (*fid) - sizeof (fid->hf_len);
return (ENOSPC);
}
fid = (struct hsfid *)fidp;
fid->hf_len = sizeof (*fid) - sizeof (fid->hf_len);
hp = VTOH(vp);
mutex_enter(&hp->hs_contents_lock);
fid->hf_dir_lbn = hp->hs_dir_lbn;
fid->hf_dir_off = (ushort_t)hp->hs_dir_off;
fid->hf_ino = hp->hs_nodeid;
mutex_exit(&hp->hs_contents_lock);
return (0);
}
static int
hsfs_open(struct vnode **vpp, int flag, struct cred *cred, caller_context_t *ct)
{
return (0);
}
static int
hsfs_close(struct vnode *vp, int flag, int count, offset_t offset,
struct cred *cred, caller_context_t *ct)
{
(void) cleanlocks(vp, ttoproc(curthread)->p_pid, 0);
cleanshares(vp, ttoproc(curthread)->p_pid);
return (0);
}
static int
hsfs_access(struct vnode *vp, int mode, int flags, cred_t *cred,
caller_context_t *ct)
{
return (hs_access(vp, (mode_t)mode, cred));
}
#define KLUSTSIZE (56 * 1024)
int hsfs_lostpage;
int
hsfs_iodone(struct buf *bp)
{
sema_v(&bp->b_io);
return (0);
}
void
hsfs_ra_task(void *arg)
{
struct hio_info *info = arg;
uint_t count;
struct buf *wbuf;
ASSERT(info->pp != NULL);
for (count = 0; count < info->bufsused; count++) {
wbuf = &(info->bufs[count]);
DTRACE_PROBE1(hsfs_io_wait_ra, struct buf *, wbuf);
while (sema_tryp(&(info->sema[count])) == 0) {
if (hsched_invoke_strategy(info->fsp)) {
sema_p(&(info->sema[count]));
break;
}
}
sema_destroy(&(info->sema[count]));
DTRACE_PROBE1(hsfs_io_done_ra, struct buf *, wbuf);
biofini(&(info->bufs[count]));
}
for (count = 0; count < info->bufsused; count++) {
if (info->vas[count] != NULL) {
ppmapout(info->vas[count]);
}
}
kmem_free(info->vas, info->bufcnt * sizeof (caddr_t));
kmem_free(info->bufs, info->bufcnt * sizeof (struct buf));
kmem_free(info->sema, info->bufcnt * sizeof (ksema_t));
pvn_read_done(info->pp, 0);
kmem_cache_free(hio_info_cache, info);
}
static int
hsfs_getpage_ra(struct vnode *vp, u_offset_t off, struct seg *seg,
caddr_t addr, struct hsnode *hp, struct hsfs *fsp, int xarsiz,
offset_t bof, int chunk_lbn_count, int chunk_data_bytes)
{
struct buf *bufs;
caddr_t *vas;
caddr_t va;
struct page *pp, *searchp, *lastp;
struct vnode *devvp;
ulong_t byte_offset;
size_t io_len_tmp;
uint_t io_off, io_len;
uint_t xlen;
uint_t filsiz;
uint_t secsize;
uint_t bufcnt;
uint_t bufsused;
uint_t count;
uint_t io_end;
uint_t which_chunk_lbn;
uint_t offset_lbn;
uint_t offset_extra;
offset_t offset_bytes;
uint_t remaining_bytes;
uint_t extension;
int remainder;
diskaddr_t driver_block;
u_offset_t io_off_tmp;
ksema_t *fio_done;
struct hio_info *info;
size_t len;
ASSERT(fsp->hqueue != NULL);
if (addr >= seg->s_base + seg->s_size) {
return (-1);
}
devvp = fsp->hsfs_devvp;
secsize = fsp->hsfs_vol.lbn_size;
filsiz = hp->hs_dirent.ext_size;
if (off >= filsiz)
return (0);
extension = 0;
pp = NULL;
extension += hp->hs_ra_bytes;
len = MIN(extension ? extension : PAGESIZE, filsiz - off);
if (len <= 0)
return (-1);
len = roundup(len, DEV_BSIZE);
pp = pvn_read_kluster(vp, off, seg, addr, &io_off_tmp,
&io_len_tmp, off, len, 1);
if (pp == NULL) {
hp->hs_num_contig = 0;
hp->hs_ra_bytes = 0;
hp->hs_prev_offset = 0;
return (-1);
}
io_off = (uint_t)io_off_tmp;
io_len = (uint_t)io_len_tmp;
ASSERT(io_off == io_off_tmp);
bufcnt = (len + secsize - 1) / secsize;
bufs = kmem_alloc(bufcnt * sizeof (struct buf), KM_SLEEP);
vas = kmem_alloc(bufcnt * sizeof (caddr_t), KM_SLEEP);
fio_done = kmem_alloc(bufcnt * sizeof (ksema_t), KM_SLEEP);
xlen = io_len & PAGEOFFSET;
if (xlen != 0)
pagezero(pp->p_prev, xlen, PAGESIZE - xlen);
DTRACE_PROBE2(hsfs_readahead, struct vnode *, vp, uint_t, io_len);
va = NULL;
lastp = NULL;
searchp = pp;
io_end = io_off + io_len;
for (count = 0, byte_offset = io_off;
byte_offset < io_end;
count++) {
ASSERT(count < bufcnt);
bioinit(&bufs[count]);
bufs[count].b_edev = devvp->v_rdev;
bufs[count].b_dev = cmpdev(devvp->v_rdev);
bufs[count].b_flags = B_NOCACHE|B_BUSY|B_READ;
bufs[count].b_iodone = hsfs_iodone;
bufs[count].b_vp = vp;
bufs[count].b_file = vp;
which_chunk_lbn = byte_offset / chunk_data_bytes;
offset_lbn = which_chunk_lbn * chunk_lbn_count;
offset_bytes = LBN_TO_BYTE(offset_lbn, vp->v_vfsp);
offset_extra = byte_offset % chunk_data_bytes;
driver_block = lbtodb(bof + xarsiz
+ offset_bytes + offset_extra);
if (lastp != searchp) {
va = vas[count] = ppmapin(searchp, PROT_WRITE,
(caddr_t)-1);
} else {
vas[count] = NULL;
}
bufs[count].b_un.b_addr = va + byte_offset % PAGESIZE;
bufs[count].b_offset =
(offset_t)(byte_offset - io_off + off);
bufs[count].b_lblkno = driver_block;
remaining_bytes = ((which_chunk_lbn + 1) * chunk_data_bytes)
- byte_offset;
if ((remaining_bytes + byte_offset) < (off + len)) {
bufs[count].b_bcount = remaining_bytes;
} else {
bufs[count].b_bcount = off + len - byte_offset;
}
remainder = PAGESIZE - (byte_offset % PAGESIZE);
if (bufs[count].b_bcount > remainder) {
bufs[count].b_bcount = remainder;
}
bufs[count].b_bufsize = bufs[count].b_bcount;
if (((offset_t)byte_offset + bufs[count].b_bcount) >
HS_MAXFILEOFF) {
break;
}
byte_offset += bufs[count].b_bcount;
struct hio *hsio = kmem_cache_alloc(hio_cache,
KM_SLEEP);
sema_init(&fio_done[count], 0, NULL,
SEMA_DEFAULT, NULL);
hsio->bp = &bufs[count];
hsio->sema = &fio_done[count];
hsio->io_lblkno = bufs[count].b_lblkno;
hsio->nblocks = howmany(hsio->bp->b_bcount,
DEV_BSIZE);
hsio->io_timestamp = drv_hztousec(ddi_get_lbolt());
hsio->contig_chain = NULL;
hsched_enqueue_io(fsp, hsio, 1);
lwp_stat_update(LWP_STAT_INBLK, 1);
lastp = searchp;
if ((remainder - bufs[count].b_bcount) < 1) {
searchp = searchp->p_next;
}
}
bufsused = count;
info = kmem_cache_alloc(hio_info_cache, KM_SLEEP);
info->bufs = bufs;
info->vas = vas;
info->sema = fio_done;
info->bufsused = bufsused;
info->bufcnt = bufcnt;
info->fsp = fsp;
info->pp = pp;
(void) taskq_dispatch(fsp->hqueue->ra_task,
hsfs_ra_task, info, KM_SLEEP);
return (0);
}
static int
hsfs_getapage(struct vnode *vp, u_offset_t off, size_t len, uint_t *protp,
struct page *pl[], size_t plsz, struct seg *seg, caddr_t addr,
enum seg_rw rw, struct cred *cred)
{
struct hsnode *hp;
struct hsfs *fsp;
int err;
struct buf *bufs;
caddr_t *vas;
caddr_t va;
struct page *pp, *searchp, *lastp;
page_t *pagefound;
offset_t bof;
struct vnode *devvp;
ulong_t byte_offset;
size_t io_len_tmp;
uint_t io_off, io_len;
uint_t xlen;
uint_t filsiz;
uint_t secsize;
uint_t bufcnt;
uint_t bufsused;
uint_t count;
uint_t io_end;
uint_t which_chunk_lbn;
uint_t offset_lbn;
uint_t offset_extra;
offset_t offset_bytes;
uint_t remaining_bytes;
uint_t extension;
int remainder;
int chunk_lbn_count;
int chunk_data_bytes;
int xarsiz;
diskaddr_t driver_block;
u_offset_t io_off_tmp;
ksema_t *fio_done;
int calcdone;
if (pl == NULL)
return (0);
hp = VTOH(vp);
fsp = VFS_TO_HSFS(vp->v_vfsp);
devvp = fsp->hsfs_devvp;
secsize = fsp->hsfs_vol.lbn_size;
filsiz = hp->hs_dirent.ext_size;
bof = LBN_TO_BYTE((offset_t)hp->hs_dirent.ext_lbn, vp->v_vfsp);
xarsiz = hp->hs_dirent.xar_len << fsp->hsfs_vol.lbn_shift;
chunk_lbn_count = hp->hs_dirent.intlf_sz + hp->hs_dirent.intlf_sk;
if (chunk_lbn_count == 0) {
chunk_lbn_count = 1;
}
if (hp->hs_dirent.intlf_sz == 0) {
chunk_data_bytes = LBN_TO_BYTE(1, vp->v_vfsp);
if (PAGESIZE % chunk_data_bytes == 0) {
chunk_lbn_count = BYTE_TO_LBN(PAGESIZE, vp->v_vfsp);
chunk_data_bytes = PAGESIZE;
}
} else {
chunk_data_bytes =
LBN_TO_BYTE(hp->hs_dirent.intlf_sz, vp->v_vfsp);
}
reread:
err = 0;
pagefound = 0;
calcdone = 0;
if (hp->hs_ra_bytes > 0 && chunk_data_bytes != PAGESIZE) {
which_chunk_lbn = (off + len) / chunk_data_bytes;
extension = ((which_chunk_lbn + 1) * chunk_data_bytes) - off;
extension -= (extension % PAGESIZE);
} else {
extension = roundup(len, PAGESIZE);
}
atomic_inc_64(&fsp->total_pages_requested);
pp = NULL;
again:
if ((pagefound = page_exists(vp, off)) == 0) {
if (!calcdone) {
extension += hp->hs_ra_bytes;
len = MIN(extension ? extension : PAGESIZE,
filsiz - off);
ASSERT(len > 0);
len = roundup(len, DEV_BSIZE);
calcdone = 1;
}
pp = pvn_read_kluster(vp, off, seg, addr, &io_off_tmp,
&io_len_tmp, off, len, 0);
if (pp == NULL) {
hp->hs_num_contig = 0;
hp->hs_ra_bytes = 0;
hp->hs_prev_offset = 0;
goto again;
}
io_off = (uint_t)io_off_tmp;
io_len = (uint_t)io_len_tmp;
ASSERT(io_off == io_off_tmp);
bufcnt = (len + secsize - 1) / secsize;
bufs = kmem_zalloc(bufcnt * sizeof (struct buf), KM_SLEEP);
vas = kmem_alloc(bufcnt * sizeof (caddr_t), KM_SLEEP);
if (fsp->hqueue != NULL)
fio_done = kmem_alloc(bufcnt * sizeof (ksema_t),
KM_SLEEP);
for (count = 0; count < bufcnt; count++) {
bioinit(&bufs[count]);
bufs[count].b_edev = devvp->v_rdev;
bufs[count].b_dev = cmpdev(devvp->v_rdev);
bufs[count].b_flags = B_NOCACHE|B_BUSY|B_READ;
bufs[count].b_iodone = hsfs_iodone;
bufs[count].b_vp = vp;
bufs[count].b_file = vp;
}
xlen = io_len & PAGEOFFSET;
if (xlen != 0)
pagezero(pp->p_prev, xlen, PAGESIZE - xlen);
va = NULL;
lastp = NULL;
searchp = pp;
io_end = io_off + io_len;
for (count = 0, byte_offset = io_off;
byte_offset < io_end; count++) {
ASSERT(count < bufcnt);
which_chunk_lbn = byte_offset / chunk_data_bytes;
offset_lbn = which_chunk_lbn * chunk_lbn_count;
offset_bytes = LBN_TO_BYTE(offset_lbn, vp->v_vfsp);
offset_extra = byte_offset % chunk_data_bytes;
driver_block =
lbtodb(bof + xarsiz + offset_bytes + offset_extra);
if (lastp != searchp) {
va = vas[count] =
ppmapin(searchp, PROT_WRITE, (caddr_t)-1);
} else {
vas[count] = NULL;
}
bufs[count].b_un.b_addr = va + byte_offset % PAGESIZE;
bufs[count].b_offset =
(offset_t)(byte_offset - io_off + off);
bufs[count].b_lblkno = driver_block;
remaining_bytes =
((which_chunk_lbn + 1) * chunk_data_bytes)
- byte_offset;
if ((remaining_bytes + byte_offset) < (off + len)) {
bufs[count].b_bcount = remaining_bytes;
} else {
bufs[count].b_bcount = off + len - byte_offset;
}
remainder = PAGESIZE - (byte_offset % PAGESIZE);
if (bufs[count].b_bcount > remainder) {
bufs[count].b_bcount = remainder;
}
bufs[count].b_bufsize = bufs[count].b_bcount;
if (((offset_t)byte_offset + bufs[count].b_bcount) >
HS_MAXFILEOFF) {
break;
}
byte_offset += bufs[count].b_bcount;
if (fsp->hqueue == NULL) {
(void) bdev_strategy(&bufs[count]);
} else {
struct hio *hsio = kmem_cache_alloc(hio_cache,
KM_SLEEP);
sema_init(&fio_done[count], 0, NULL,
SEMA_DEFAULT, NULL);
hsio->bp = &bufs[count];
hsio->sema = &fio_done[count];
hsio->io_lblkno = bufs[count].b_lblkno;
hsio->nblocks = howmany(hsio->bp->b_bcount,
DEV_BSIZE);
hsio->io_timestamp =
drv_hztousec(ddi_get_lbolt());
hsio->contig_chain = NULL;
hsched_enqueue_io(fsp, hsio, 0);
}
lwp_stat_update(LWP_STAT_INBLK, 1);
lastp = searchp;
if ((remainder - bufs[count].b_bcount) < 1) {
searchp = searchp->p_next;
}
}
bufsused = count;
if (fsp->hqueue == NULL) {
for (count = 0; count < bufsused; count++) {
if (err == 0) {
err = biowait(&bufs[count]);
} else
(void) biowait(&bufs[count]);
}
} else {
for (count = 0; count < bufsused; count++) {
struct buf *wbuf;
wbuf = &bufs[count];
DTRACE_PROBE1(hsfs_io_wait, struct buf *, wbuf);
while (sema_tryp(&fio_done[count]) == 0) {
if (hsched_invoke_strategy(fsp)) {
sema_p(&fio_done[count]);
break;
}
}
sema_destroy(&fio_done[count]);
DTRACE_PROBE1(hsfs_io_done, struct buf *, wbuf);
if (err == 0) {
err = geterror(wbuf);
}
}
kmem_free(fio_done, bufcnt * sizeof (ksema_t));
}
for (count = 0; count < bufcnt; count++) {
biofini(&bufs[count]);
if (count < bufsused && vas[count] != NULL) {
ppmapout(vas[count]);
}
}
kmem_free(vas, bufcnt * sizeof (caddr_t));
kmem_free(bufs, bufcnt * sizeof (struct buf));
}
if (err) {
pvn_read_done(pp, B_ERROR);
return (err);
}
if (pagefound) {
int index;
ulong_t soff;
if ((pp = page_lookup(vp, off, SE_SHARED)) == NULL) {
hsfs_lostpage++;
goto reread;
}
pl[0] = pp;
index = 1;
atomic_inc_64(&fsp->cache_read_pages);
plsz -= PAGESIZE;
for (soff = off + PAGESIZE; plsz > 0;
soff += PAGESIZE, plsz -= PAGESIZE) {
pp = page_lookup_nowait(vp, (u_offset_t)soff,
SE_SHARED);
if (pp == NULL)
break;
pl[index++] = pp;
}
pl[index] = NULL;
if (fsp->hqueue != NULL &&
hp->hs_prev_offset - off == PAGESIZE &&
hp->hs_prev_offset < filsiz &&
hp->hs_ra_bytes > 0 &&
!page_exists(vp, hp->hs_prev_offset)) {
(void) hsfs_getpage_ra(vp, hp->hs_prev_offset, seg,
addr + PAGESIZE, hp, fsp, xarsiz, bof,
chunk_lbn_count, chunk_data_bytes);
}
return (0);
}
if (pp != NULL) {
pvn_plist_init(pp, pl, plsz, off, io_len, rw);
}
return (err);
}
static int
hsfs_getpage(struct vnode *vp, offset_t off, size_t len, uint_t *protp,
struct page *pl[], size_t plsz, struct seg *seg, caddr_t addr,
enum seg_rw rw, struct cred *cred, caller_context_t *ct)
{
uint_t filsiz;
struct hsfs *fsp;
struct hsnode *hp;
fsp = VFS_TO_HSFS(vp->v_vfsp);
hp = VTOH(vp);
if (rw == S_WRITE) {
return (EROFS);
}
if (vp->v_flag & VNOMAP) {
return (ENOSYS);
}
ASSERT(off <= HS_MAXFILEOFF);
filsiz = hp->hs_dirent.ext_size;
if ((off + len) > (offset_t)(filsiz + PAGEOFFSET) && seg != segkmap)
return (EFAULT);
if (fsp->hqueue != NULL) {
if (hp->hs_prev_offset == off || (off <
hp->hs_prev_offset && off + MAX(len, PAGESIZE)
>= hp->hs_prev_offset)) {
if (hp->hs_num_contig <
(seq_contig_requests - 1)) {
hp->hs_num_contig++;
} else {
if (hp->hs_ra_bytes <
fsp->hqueue->max_ra_bytes) {
hp->hs_ra_bytes += PAGESIZE;
}
}
} else {
if (hp->hs_ra_bytes > 0)
hp->hs_ra_bytes -= PAGESIZE;
if (hp->hs_ra_bytes <= 0) {
hp->hs_ra_bytes = 0;
if (hp->hs_num_contig > 0)
hp->hs_num_contig--;
}
}
hp->hs_prev_offset = off + roundup(len, PAGESIZE);
DTRACE_PROBE1(hsfs_compute_ra, struct hsnode *, hp);
}
if (protp != NULL)
*protp = PROT_ALL;
return (pvn_getpages(hsfs_getapage, vp, off, len, protp, pl, plsz,
seg, addr, rw, cred));
}
int
hsfs_putapage(vnode_t *vp, page_t *pp, u_offset_t *offp, size_t *lenp,
int flags, cred_t *cr)
{
cmn_err(CE_NOTE, "hsfs_putapage: dirty HSFS page");
pvn_write_done(pp, B_ERROR | B_WRITE | B_INVAL | B_FORCE | flags);
return (0);
}
static int
hsfs_putpage(struct vnode *vp, offset_t off, size_t len, int flags,
struct cred *cr, caller_context_t *ct)
{
int error = 0;
if (vp->v_count == 0) {
panic("hsfs_putpage: bad v_count");
}
if (vp->v_flag & VNOMAP)
return (ENOSYS);
ASSERT(off <= HS_MAXFILEOFF);
if (!vn_has_cached_data(vp))
return (0);
if (len == 0) {
error = pvn_vplist_dirty(vp, off, hsfs_putapage, flags, cr);
} else {
offset_t end_off = off + len;
offset_t file_size = VTOH(vp)->hs_dirent.ext_size;
offset_t io_off;
file_size = (file_size + PAGESIZE - 1) & PAGEMASK;
if (end_off > file_size)
end_off = file_size;
for (io_off = off; io_off < end_off; io_off += PAGESIZE) {
page_t *pp;
if ((flags & B_INVAL) || ((flags & B_ASYNC) == 0)) {
pp = page_lookup(vp, io_off,
(flags & (B_INVAL | B_FREE)) ?
SE_EXCL : SE_SHARED);
} else {
pp = page_lookup_nowait(vp, io_off,
(flags & B_FREE) ? SE_EXCL : SE_SHARED);
}
if (pp == NULL)
continue;
if (pvn_getdirty(pp, flags) == 1) {
cmn_err(CE_NOTE,
"hsfs_putpage: dirty HSFS page");
pvn_write_done(pp, flags |
B_ERROR | B_WRITE | B_INVAL | B_FORCE);
}
}
}
return (error);
}
static int
hsfs_map(struct vnode *vp, offset_t off, struct as *as, caddr_t *addrp,
size_t len, uchar_t prot, uchar_t maxprot, uint_t flags, struct cred *cred,
caller_context_t *ct)
{
struct segvn_crargs vn_a;
int error;
if (vp->v_flag & VNOMAP)
return (ENOSYS);
if ((prot & PROT_WRITE) && (flags & MAP_SHARED))
return (ENOSYS);
if (off > HS_MAXFILEOFF || off < 0 ||
(off + len) < 0 || (off + len) > HS_MAXFILEOFF)
return (ENXIO);
if (vp->v_type != VREG) {
return (ENODEV);
}
if (vn_has_mandatory_locks(vp, VTOH(vp)->hs_dirent.mode))
return (EAGAIN);
as_rangelock(as);
error = choose_addr(as, addrp, len, off, ADDR_VACALIGN, flags);
if (error != 0) {
as_rangeunlock(as);
return (error);
}
vn_a.vp = vp;
vn_a.offset = off;
vn_a.type = flags & MAP_TYPE;
vn_a.prot = prot;
vn_a.maxprot = maxprot;
vn_a.flags = flags & ~MAP_TYPE;
vn_a.cred = cred;
vn_a.amp = NULL;
vn_a.szc = 0;
vn_a.lgrp_mem_policy_flags = 0;
error = as_map(as, *addrp, len, segvn_create, &vn_a);
as_rangeunlock(as);
return (error);
}
static int
hsfs_addmap(struct vnode *vp, offset_t off, struct as *as, caddr_t addr,
size_t len, uchar_t prot, uchar_t maxprot, uint_t flags, struct cred *cr,
caller_context_t *ct)
{
struct hsnode *hp;
if (vp->v_flag & VNOMAP)
return (ENOSYS);
hp = VTOH(vp);
mutex_enter(&hp->hs_contents_lock);
hp->hs_mapcnt += btopr(len);
mutex_exit(&hp->hs_contents_lock);
return (0);
}
static int
hsfs_delmap(struct vnode *vp, offset_t off, struct as *as, caddr_t addr,
size_t len, uint_t prot, uint_t maxprot, uint_t flags, struct cred *cr,
caller_context_t *ct)
{
struct hsnode *hp;
if (vp->v_flag & VNOMAP)
return (ENOSYS);
hp = VTOH(vp);
mutex_enter(&hp->hs_contents_lock);
hp->hs_mapcnt -= btopr(len);
ASSERT(hp->hs_mapcnt >= 0);
mutex_exit(&hp->hs_contents_lock);
return (0);
}
static int
hsfs_seek(struct vnode *vp, offset_t ooff, offset_t *noffp,
caller_context_t *ct)
{
return (*noffp < 0 ? EINVAL : 0);
}
static int
hsfs_frlock(struct vnode *vp, int cmd, struct flock64 *bfp, int flag,
offset_t offset, struct flk_callback *flk_cbp, cred_t *cr,
caller_context_t *ct)
{
struct hsnode *hp = VTOH(vp);
if (hp->hs_mapcnt > 0 && MANDLOCK(vp, hp->hs_dirent.mode))
return (EAGAIN);
return (fs_frlock(vp, cmd, bfp, flag, offset, flk_cbp, cr, ct));
}
static int
hsched_deadline_compare(const void *x1, const void *x2)
{
const struct hio *h1 = x1;
const struct hio *h2 = x2;
if (h1->io_timestamp < h2->io_timestamp)
return (-1);
if (h1->io_timestamp > h2->io_timestamp)
return (1);
if (h1->io_lblkno < h2->io_lblkno)
return (-1);
if (h1->io_lblkno > h2->io_lblkno)
return (1);
if (h1 < h2)
return (-1);
if (h1 > h2)
return (1);
return (0);
}
static int
hsched_offset_compare(const void *x1, const void *x2)
{
const struct hio *h1 = x1;
const struct hio *h2 = x2;
if (h1->io_lblkno < h2->io_lblkno)
return (-1);
if (h1->io_lblkno > h2->io_lblkno)
return (1);
if (h1 < h2)
return (-1);
if (h1 > h2)
return (1);
return (0);
}
void
hsched_init_caches(void)
{
hio_cache = kmem_cache_create("hsfs_hio_cache",
sizeof (struct hio), 0, NULL,
NULL, NULL, NULL, NULL, 0);
hio_info_cache = kmem_cache_create("hsfs_hio_info_cache",
sizeof (struct hio_info), 0, NULL,
NULL, NULL, NULL, NULL, 0);
}
void
hsched_fini_caches(void)
{
kmem_cache_destroy(hio_cache);
kmem_cache_destroy(hio_info_cache);
}
void
hsched_init(struct hsfs *fsp, int fsid, struct modlinkage *modlinkage)
{
struct hsfs_queue *hqueue = fsp->hqueue;
struct vnode *vp = fsp->hsfs_devvp;
char namebuf[23];
int error, err;
struct dk_cinfo info;
ldi_handle_t lh;
ldi_ident_t li;
hqueue->dev_maxtransfer = 16384;
err = ldi_ident_from_mod(modlinkage, &li);
if (err) {
cmn_err(CE_NOTE, "hsched_init: Querying device failed");
cmn_err(CE_NOTE, "hsched_init: ldi_ident_from_mod err=%d\n",
err);
goto set_ra;
}
err = ldi_open_by_dev(&(vp->v_rdev), OTYP_CHR, FREAD, CRED(), &lh, li);
ldi_ident_release(li);
if (err) {
cmn_err(CE_NOTE, "hsched_init: Querying device failed");
cmn_err(CE_NOTE, "hsched_init: ldi_open err=%d\n", err);
goto set_ra;
}
error = ldi_ioctl(lh, DKIOCINFO, (intptr_t)&info, FKIOCTL,
CRED(), &err);
err = ldi_close(lh, FREAD, CRED());
if (err) {
cmn_err(CE_NOTE, "hsched_init: Querying device failed");
cmn_err(CE_NOTE, "hsched_init: ldi_close err=%d\n", err);
}
if (error == 0) {
hqueue->dev_maxtransfer = ldbtob(info.dki_maxtransfer);
}
set_ra:
hqueue->max_ra_bytes = PAGESIZE * 8;
mutex_init(&(hqueue->hsfs_queue_lock), NULL, MUTEX_DEFAULT, NULL);
mutex_init(&(hqueue->strategy_lock), NULL, MUTEX_DEFAULT, NULL);
avl_create(&(hqueue->read_tree), hsched_offset_compare,
sizeof (struct hio), offsetof(struct hio, io_offset_node));
avl_create(&(hqueue->deadline_tree), hsched_deadline_compare,
sizeof (struct hio), offsetof(struct hio, io_deadline_node));
(void) snprintf(namebuf, sizeof (namebuf), "hsched_task_%d", fsid);
hqueue->ra_task = taskq_create(namebuf, hsfs_taskq_nthreads,
minclsyspri + 2, 1, 104857600 / PAGESIZE, TASKQ_DYNAMIC);
hqueue->next = NULL;
hqueue->nbuf = kmem_zalloc(sizeof (struct buf), KM_SLEEP);
}
void
hsched_fini(struct hsfs_queue *hqueue)
{
if (hqueue != NULL) {
if (hqueue->next != NULL) {
avl_remove(&hqueue->read_tree, hqueue->next);
kmem_cache_free(hio_cache, hqueue->next);
}
avl_destroy(&(hqueue->read_tree));
avl_destroy(&(hqueue->deadline_tree));
mutex_destroy(&(hqueue->hsfs_queue_lock));
mutex_destroy(&(hqueue->strategy_lock));
taskq_destroy(hqueue->ra_task);
kmem_free(hqueue->nbuf, sizeof (struct buf));
}
}
#define IS_ADJACENT(io, nio) \
(((io)->io_lblkno + (io)->nblocks == (nio)->io_lblkno) && \
(io)->bp->b_edev == (nio)->bp->b_edev)
int
hsched_invoke_strategy(struct hsfs *fsp)
{
struct hsfs_queue *hqueue;
struct buf *nbuf;
struct hio *fio, *nio, *tio, *prev, *last;
size_t bsize, soffset, offset, data;
int bioret, bufcount;
struct vnode *fvp;
ksema_t *io_done;
caddr_t iodata;
hqueue = fsp->hqueue;
mutex_enter(&hqueue->strategy_lock);
mutex_enter(&hqueue->hsfs_queue_lock);
fio = avl_first(&hqueue->deadline_tree);
if (fio == NULL) {
if (hqueue->next != NULL) {
avl_remove(&hqueue->read_tree, hqueue->next);
kmem_cache_free(hio_cache, hqueue->next);
hqueue->next = NULL;
}
mutex_exit(&hqueue->hsfs_queue_lock);
mutex_exit(&hqueue->strategy_lock);
return (1);
}
if (drv_hztousec(ddi_get_lbolt()) - fio->io_timestamp
< HSFS_READ_DEADLINE) {
fio = NULL;
if (hqueue->next != NULL) {
fio = AVL_NEXT(&hqueue->read_tree, hqueue->next);
avl_remove(&hqueue->read_tree, hqueue->next);
kmem_cache_free(hio_cache, hqueue->next);
hqueue->next = NULL;
}
if (fio == NULL) {
fio = avl_first(&hqueue->read_tree);
}
} else if (hqueue->next != NULL) {
DTRACE_PROBE1(hsfs_deadline_expiry, struct hio *, fio);
avl_remove(&hqueue->read_tree, hqueue->next);
kmem_cache_free(hio_cache, hqueue->next);
hqueue->next = NULL;
}
bufcount = 1;
bsize = ldbtob(fio->nblocks);
fvp = fio->bp->b_file;
nio = AVL_NEXT(&hqueue->read_tree, fio);
tio = fio;
while (nio != NULL && IS_ADJACENT(tio, nio) &&
bsize < hqueue->dev_maxtransfer) {
avl_remove(&hqueue->deadline_tree, tio);
avl_remove(&hqueue->read_tree, tio);
tio->contig_chain = nio;
bsize += ldbtob(nio->nblocks);
prev = tio;
tio = nio;
if (fvp && tio->bp->b_file != fvp) {
fvp = NULL;
}
nio = AVL_NEXT(&hqueue->read_tree, nio);
bufcount++;
}
hqueue->next = tio;
avl_remove(&hqueue->deadline_tree, tio);
mutex_exit(&hqueue->hsfs_queue_lock);
DTRACE_PROBE3(hsfs_io_dequeued, struct hio *, fio, int, bufcount,
size_t, bsize);
if (bufcount > hsched_coalesce_min) {
fsp->coalesced_bytes += bsize;
nbuf = hqueue->nbuf;
bioinit(nbuf);
nbuf->b_edev = fio->bp->b_edev;
nbuf->b_dev = fio->bp->b_dev;
nbuf->b_flags = fio->bp->b_flags;
nbuf->b_iodone = fio->bp->b_iodone;
iodata = kmem_alloc(bsize, KM_SLEEP);
nbuf->b_un.b_addr = iodata;
nbuf->b_lblkno = fio->bp->b_lblkno;
nbuf->b_vp = fvp;
nbuf->b_file = fvp;
nbuf->b_bcount = bsize;
nbuf->b_bufsize = bsize;
DTRACE_PROBE3(hsfs_coalesced_io_start, struct hio *, fio, int,
bufcount, size_t, bsize);
(void) bdev_strategy(nbuf);
prev->contig_chain = kmem_cache_alloc(hio_cache, KM_SLEEP);
prev->contig_chain->bp = tio->bp;
prev->contig_chain->sema = tio->sema;
tio = prev->contig_chain;
tio->contig_chain = NULL;
soffset = ldbtob(fio->bp->b_lblkno);
nio = fio;
bioret = biowait(nbuf);
data = bsize - nbuf->b_resid;
biofini(nbuf);
mutex_exit(&hqueue->strategy_lock);
tio = nio;
DTRACE_PROBE3(hsfs_coalesced_io_done, struct hio *, nio,
int, bioret, size_t, data);
while (nio != NULL && data >= nio->bp->b_bcount) {
offset = ldbtob(nio->bp->b_lblkno) - soffset;
bcopy(iodata + offset, nio->bp->b_un.b_addr,
nio->bp->b_bcount);
data -= nio->bp->b_bcount;
bioerror(nio->bp, 0);
biodone(nio->bp);
sema_v(nio->sema);
tio = nio;
nio = nio->contig_chain;
kmem_cache_free(hio_cache, tio);
}
while (nio != NULL) {
nio->bp->b_resid = nio->bp->b_bcount - data;
bzero(nio->bp->b_un.b_addr + data, nio->bp->b_resid);
bioerror(nio->bp, bioret);
biodone(nio->bp);
sema_v(nio->sema);
tio = nio;
nio = nio->contig_chain;
kmem_cache_free(hio_cache, tio);
data = 0;
}
kmem_free(iodata, bsize);
} else {
nbuf = tio->bp;
io_done = tio->sema;
nio = fio;
last = tio;
while (nio != NULL) {
(void) bdev_strategy(nio->bp);
nio = nio->contig_chain;
}
nio = fio;
mutex_exit(&hqueue->strategy_lock);
while (nio != NULL) {
if (nio == last) {
(void) biowait(nbuf);
sema_v(io_done);
break;
} else {
(void) biowait(nio->bp);
sema_v(nio->sema);
}
tio = nio;
nio = nio->contig_chain;
kmem_cache_free(hio_cache, tio);
}
}
return (0);
}
static void
hsched_enqueue_io(struct hsfs *fsp, struct hio *hsio, int ra)
{
struct hsfs_queue *hqueue = fsp->hqueue;
mutex_enter(&hqueue->hsfs_queue_lock);
fsp->physical_read_bytes += hsio->bp->b_bcount;
if (ra)
fsp->readahead_bytes += hsio->bp->b_bcount;
avl_add(&hqueue->deadline_tree, hsio);
avl_add(&hqueue->read_tree, hsio);
DTRACE_PROBE3(hsfs_io_enqueued, struct hio *, hsio,
struct hsfs_queue *, hqueue, int, ra);
mutex_exit(&hqueue->hsfs_queue_lock);
}
static int
hsfs_pathconf(struct vnode *vp, int cmd, ulong_t *valp, struct cred *cr,
caller_context_t *ct)
{
struct hsfs *fsp;
int error = 0;
switch (cmd) {
case _PC_NAME_MAX:
fsp = VFS_TO_HSFS(vp->v_vfsp);
*valp = fsp->hsfs_namemax;
break;
case _PC_FILESIZEBITS:
*valp = 33;
break;
case _PC_TIMESTAMP_RESOLUTION:
*valp = 10000000L;
break;
default:
error = fs_pathconf(vp, cmd, valp, cr, ct);
break;
}
return (error);
}
const fs_operation_def_t hsfs_vnodeops_template[] = {
VOPNAME_OPEN, { .vop_open = hsfs_open },
VOPNAME_CLOSE, { .vop_close = hsfs_close },
VOPNAME_READ, { .vop_read = hsfs_read },
VOPNAME_GETATTR, { .vop_getattr = hsfs_getattr },
VOPNAME_ACCESS, { .vop_access = hsfs_access },
VOPNAME_LOOKUP, { .vop_lookup = hsfs_lookup },
VOPNAME_READDIR, { .vop_readdir = hsfs_readdir },
VOPNAME_READLINK, { .vop_readlink = hsfs_readlink },
VOPNAME_FSYNC, { .vop_fsync = hsfs_fsync },
VOPNAME_INACTIVE, { .vop_inactive = hsfs_inactive },
VOPNAME_FID, { .vop_fid = hsfs_fid },
VOPNAME_SEEK, { .vop_seek = hsfs_seek },
VOPNAME_FRLOCK, { .vop_frlock = hsfs_frlock },
VOPNAME_GETPAGE, { .vop_getpage = hsfs_getpage },
VOPNAME_PUTPAGE, { .vop_putpage = hsfs_putpage },
VOPNAME_MAP, { .vop_map = hsfs_map },
VOPNAME_ADDMAP, { .vop_addmap = hsfs_addmap },
VOPNAME_DELMAP, { .vop_delmap = hsfs_delmap },
VOPNAME_PATHCONF, { .vop_pathconf = hsfs_pathconf },
NULL, NULL
};
struct vnodeops *hsfs_vnodeops;