#include <sys/param.h>
#include <sys/types.h>
#include <sys/systm.h>
#include <sys/cred.h>
#include <sys/time.h>
#include <sys/vnode.h>
#include <sys/vfs.h>
#include <sys/vfs_opreg.h>
#include <sys/file.h>
#include <sys/filio.h>
#include <sys/uio.h>
#include <sys/buf.h>
#include <sys/mman.h>
#include <sys/pathname.h>
#include <sys/dirent.h>
#include <sys/debug.h>
#include <sys/vmsystm.h>
#include <sys/fcntl.h>
#include <sys/flock.h>
#include <sys/swap.h>
#include <sys/errno.h>
#include <sys/strsubr.h>
#include <sys/sysmacros.h>
#include <sys/kmem.h>
#include <sys/cmn_err.h>
#include <sys/pathconf.h>
#include <sys/utsname.h>
#include <sys/dnlc.h>
#include <sys/acl.h>
#include <sys/atomic.h>
#include <sys/policy.h>
#include <sys/sdt.h>
#include <rpc/types.h>
#include <rpc/auth.h>
#include <rpc/clnt.h>
#include <nfs/nfs.h>
#include <nfs/nfs_clnt.h>
#include <nfs/rnode.h>
#include <nfs/nfs_acl.h>
#include <nfs/lm.h>
#include <vm/hat.h>
#include <vm/as.h>
#include <vm/page.h>
#include <vm/pvn.h>
#include <vm/seg.h>
#include <vm/seg_map.h>
#include <vm/seg_kpm.h>
#include <vm/seg_vn.h>
#include <fs/fs_subr.h>
#include <sys/ddi.h>
static int nfs_rdwrlbn(vnode_t *, page_t *, u_offset_t, size_t, int,
cred_t *);
static int nfswrite(vnode_t *, caddr_t, uint_t, int, cred_t *);
static int nfsread(vnode_t *, caddr_t, uint_t, int, size_t *, cred_t *);
static int nfssetattr(vnode_t *, struct vattr *, int, cred_t *);
static int nfslookup_dnlc(vnode_t *, char *, vnode_t **, cred_t *);
static int nfslookup_otw(vnode_t *, char *, vnode_t **, cred_t *, int);
static int nfsrename(vnode_t *, char *, vnode_t *, char *, cred_t *,
caller_context_t *);
static int nfsreaddir(vnode_t *, rddir_cache *, cred_t *);
static int nfs_bio(struct buf *, cred_t *);
static int nfs_getapage(vnode_t *, u_offset_t, size_t, uint_t *,
page_t *[], size_t, struct seg *, caddr_t,
enum seg_rw, cred_t *);
static void nfs_readahead(vnode_t *, u_offset_t, caddr_t, struct seg *,
cred_t *);
static int nfs_sync_putapage(vnode_t *, page_t *, u_offset_t, size_t,
int, cred_t *);
static int nfs_sync_pageio(vnode_t *, page_t *, u_offset_t, size_t,
int, cred_t *);
static void nfs_delmap_callback(struct as *, void *, uint_t);
#define NFS_EOF -98
static int nfs_open(vnode_t **, int, cred_t *, caller_context_t *);
static int nfs_close(vnode_t *, int, int, offset_t, cred_t *,
caller_context_t *);
static int nfs_read(vnode_t *, struct uio *, int, cred_t *,
caller_context_t *);
static int nfs_write(vnode_t *, struct uio *, int, cred_t *,
caller_context_t *);
static int nfs_ioctl(vnode_t *, int, intptr_t, int, cred_t *, int *,
caller_context_t *);
static int nfs_getattr(vnode_t *, struct vattr *, int, cred_t *,
caller_context_t *);
static int nfs_setattr(vnode_t *, struct vattr *, int, cred_t *,
caller_context_t *);
static int nfs_access(vnode_t *, int, int, cred_t *, caller_context_t *);
static int nfs_accessx(void *, int, cred_t *);
static int nfs_readlink(vnode_t *, struct uio *, cred_t *,
caller_context_t *);
static int nfs_fsync(vnode_t *, int, cred_t *, caller_context_t *);
static void nfs_inactive(vnode_t *, cred_t *, caller_context_t *);
static int nfs_lookup(vnode_t *, char *, vnode_t **, struct pathname *,
int, vnode_t *, cred_t *, caller_context_t *,
int *, pathname_t *);
static int nfs_create(vnode_t *, char *, struct vattr *, enum vcexcl,
int, vnode_t **, cred_t *, int, caller_context_t *,
vsecattr_t *);
static int nfs_remove(vnode_t *, char *, cred_t *, caller_context_t *,
int);
static int nfs_link(vnode_t *, vnode_t *, char *, cred_t *,
caller_context_t *, int);
static int nfs_rename(vnode_t *, char *, vnode_t *, char *, cred_t *,
caller_context_t *, int);
static int nfs_mkdir(vnode_t *, char *, struct vattr *, vnode_t **,
cred_t *, caller_context_t *, int, vsecattr_t *);
static int nfs_rmdir(vnode_t *, char *, vnode_t *, cred_t *,
caller_context_t *, int);
static int nfs_symlink(vnode_t *, char *, struct vattr *, char *,
cred_t *, caller_context_t *, int);
static int nfs_readdir(vnode_t *, struct uio *, cred_t *, int *,
caller_context_t *, int);
static int nfs_fid(vnode_t *, fid_t *, caller_context_t *);
static int nfs_rwlock(vnode_t *, int, caller_context_t *);
static void nfs_rwunlock(vnode_t *, int, caller_context_t *);
static int nfs_seek(vnode_t *, offset_t, offset_t *, caller_context_t *);
static int nfs_getpage(vnode_t *, offset_t, size_t, uint_t *,
page_t *[], size_t, struct seg *, caddr_t,
enum seg_rw, cred_t *, caller_context_t *);
static int nfs_putpage(vnode_t *, offset_t, size_t, int, cred_t *,
caller_context_t *);
static int nfs_map(vnode_t *, offset_t, struct as *, caddr_t *, size_t,
uchar_t, uchar_t, uint_t, cred_t *, caller_context_t *);
static int nfs_addmap(vnode_t *, offset_t, struct as *, caddr_t, size_t,
uchar_t, uchar_t, uint_t, cred_t *, caller_context_t *);
static int nfs_frlock(vnode_t *, int, struct flock64 *, int, offset_t,
struct flk_callback *, cred_t *, caller_context_t *);
static int nfs_space(vnode_t *, int, struct flock64 *, int, offset_t,
cred_t *, caller_context_t *);
static int nfs_realvp(vnode_t *, vnode_t **, caller_context_t *);
static int nfs_delmap(vnode_t *, offset_t, struct as *, caddr_t, size_t,
uint_t, uint_t, uint_t, cred_t *, caller_context_t *);
static int nfs_pathconf(vnode_t *, int, ulong_t *, cred_t *,
caller_context_t *);
static int nfs_pageio(vnode_t *, page_t *, u_offset_t, size_t, int,
cred_t *, caller_context_t *);
static int nfs_setsecattr(vnode_t *, vsecattr_t *, int, cred_t *,
caller_context_t *);
static int nfs_getsecattr(vnode_t *, vsecattr_t *, int, cred_t *,
caller_context_t *);
static int nfs_shrlock(vnode_t *, int, struct shrlock *, int, cred_t *,
caller_context_t *);
struct vnodeops *nfs_vnodeops;
const fs_operation_def_t nfs_vnodeops_template[] = {
VOPNAME_OPEN, { .vop_open = nfs_open },
VOPNAME_CLOSE, { .vop_close = nfs_close },
VOPNAME_READ, { .vop_read = nfs_read },
VOPNAME_WRITE, { .vop_write = nfs_write },
VOPNAME_IOCTL, { .vop_ioctl = nfs_ioctl },
VOPNAME_GETATTR, { .vop_getattr = nfs_getattr },
VOPNAME_SETATTR, { .vop_setattr = nfs_setattr },
VOPNAME_ACCESS, { .vop_access = nfs_access },
VOPNAME_LOOKUP, { .vop_lookup = nfs_lookup },
VOPNAME_CREATE, { .vop_create = nfs_create },
VOPNAME_REMOVE, { .vop_remove = nfs_remove },
VOPNAME_LINK, { .vop_link = nfs_link },
VOPNAME_RENAME, { .vop_rename = nfs_rename },
VOPNAME_MKDIR, { .vop_mkdir = nfs_mkdir },
VOPNAME_RMDIR, { .vop_rmdir = nfs_rmdir },
VOPNAME_READDIR, { .vop_readdir = nfs_readdir },
VOPNAME_SYMLINK, { .vop_symlink = nfs_symlink },
VOPNAME_READLINK, { .vop_readlink = nfs_readlink },
VOPNAME_FSYNC, { .vop_fsync = nfs_fsync },
VOPNAME_INACTIVE, { .vop_inactive = nfs_inactive },
VOPNAME_FID, { .vop_fid = nfs_fid },
VOPNAME_RWLOCK, { .vop_rwlock = nfs_rwlock },
VOPNAME_RWUNLOCK, { .vop_rwunlock = nfs_rwunlock },
VOPNAME_SEEK, { .vop_seek = nfs_seek },
VOPNAME_FRLOCK, { .vop_frlock = nfs_frlock },
VOPNAME_SPACE, { .vop_space = nfs_space },
VOPNAME_REALVP, { .vop_realvp = nfs_realvp },
VOPNAME_GETPAGE, { .vop_getpage = nfs_getpage },
VOPNAME_PUTPAGE, { .vop_putpage = nfs_putpage },
VOPNAME_MAP, { .vop_map = nfs_map },
VOPNAME_ADDMAP, { .vop_addmap = nfs_addmap },
VOPNAME_DELMAP, { .vop_delmap = nfs_delmap },
VOPNAME_DUMP, { .vop_dump = nfs_dump },
VOPNAME_PATHCONF, { .vop_pathconf = nfs_pathconf },
VOPNAME_PAGEIO, { .vop_pageio = nfs_pageio },
VOPNAME_SETSECATTR, { .vop_setsecattr = nfs_setsecattr },
VOPNAME_GETSECATTR, { .vop_getsecattr = nfs_getsecattr },
VOPNAME_SHRLOCK, { .vop_shrlock = nfs_shrlock },
VOPNAME_VNEVENT, { .vop_vnevent = fs_vnevent_support },
NULL, NULL
};
struct vnodeops *
nfs_getvnodeops(void)
{
return (nfs_vnodeops);
}
static int
nfs_open(vnode_t **vpp, int flag, cred_t *cr, caller_context_t *ct)
{
int error;
struct vattr va;
rnode_t *rp;
vnode_t *vp;
vp = *vpp;
rp = VTOR(vp);
if (nfs_zone() != VTOMI(vp)->mi_zone)
return (EIO);
mutex_enter(&rp->r_statelock);
if (rp->r_cred == NULL) {
crhold(cr);
rp->r_cred = cr;
}
mutex_exit(&rp->r_statelock);
if (vp->v_count > 1 ||
((vn_has_cached_data(vp) || HAVE_RDDIR_CACHE(rp)) &&
!(VTOMI(vp)->mi_flags & MI_NOCTO))) {
if (vn_is_readonly(vp))
error = nfs_validate_caches(vp, cr);
else if (rp->r_mapcnt == 0 && vp->v_count == 1) {
PURGE_ATTRCACHE(vp);
error = 0;
} else {
va.va_mask = AT_ALL;
error = nfs_getattr_otw(vp, &va, cr);
}
} else
error = 0;
return (error);
}
static int
nfs_close(vnode_t *vp, int flag, int count, offset_t offset, cred_t *cr,
caller_context_t *ct)
{
rnode_t *rp;
int error;
struct vattr va;
if (VTOMI(vp)->mi_zone != nfs_zone()) {
return (EIO);
}
if (VTOMI(vp)->mi_flags & MI_LLOCK) {
cleanlocks(vp, ttoproc(curthread)->p_pid, 0);
cleanshares(vp, ttoproc(curthread)->p_pid);
} else
nfs_lockrelease(vp, flag, offset, cr);
if (count > 1)
return (0);
rp = VTOR(vp);
if (rp->r_unldvp != NULL)
dnlc_purge_vp(vp);
if ((flag & FWRITE) && vn_has_cached_data(vp)) {
if ((VTOMI(vp)->mi_flags & MI_NOCTO)) {
error = nfs_putpage(vp, (offset_t)0, 0, B_ASYNC,
cr, ct);
if (error == EAGAIN)
error = 0;
} else
error = nfs_putpage(vp, (offset_t)0, 0, 0, cr, ct);
if (!error) {
mutex_enter(&rp->r_statelock);
error = rp->r_error;
rp->r_error = 0;
mutex_exit(&rp->r_statelock);
}
} else {
mutex_enter(&rp->r_statelock);
error = rp->r_error;
rp->r_error = 0;
mutex_exit(&rp->r_statelock);
}
if (rp->r_flags & RWRITEATTR)
(void) nfs_getattr_otw(vp, &va, cr);
return (error);
}
static int
nfs_read(vnode_t *vp, struct uio *uiop, int ioflag, cred_t *cr,
caller_context_t *ct)
{
rnode_t *rp;
u_offset_t off;
offset_t diff;
int on;
size_t n;
caddr_t base;
uint_t flags;
int error;
mntinfo_t *mi;
rp = VTOR(vp);
mi = VTOMI(vp);
if (nfs_zone() != mi->mi_zone)
return (EIO);
ASSERT(nfs_rw_lock_held(&rp->r_rwlock, RW_READER));
if (vp->v_type != VREG)
return (EISDIR);
if (uiop->uio_resid == 0)
return (0);
if (uiop->uio_loffset > MAXOFF32_T)
return (EFBIG);
if (uiop->uio_loffset < 0 ||
uiop->uio_loffset + uiop->uio_resid > MAXOFF32_T)
return (EINVAL);
if ((vp->v_flag & VNOCACHE) ||
(((rp->r_flags & RDIRECTIO) || (mi->mi_flags & MI_DIRECTIO)) &&
rp->r_mapcnt == 0 && rp->r_inmap == 0 &&
!vn_has_cached_data(vp))) {
size_t bufsize;
size_t resid = 0;
bufsize = MIN(uiop->uio_resid, VTOMI(vp)->mi_curread);
base = kmem_alloc(bufsize, KM_SLEEP);
do {
n = MIN(uiop->uio_resid, bufsize);
error = nfsread(vp, base, uiop->uio_offset, n,
&resid, cr);
if (!error) {
n -= resid;
error = uiomove(base, n, UIO_READ, uiop);
}
} while (!error && uiop->uio_resid > 0 && n > 0);
kmem_free(base, bufsize);
return (error);
}
error = 0;
do {
off = uiop->uio_loffset & MAXBMASK;
on = uiop->uio_loffset & MAXBOFFSET;
n = MIN(MAXBSIZE - on, uiop->uio_resid);
error = nfs_validate_caches(vp, cr);
if (error)
break;
mutex_enter(&rp->r_statelock);
while (rp->r_flags & RINCACHEPURGE) {
if (!cv_wait_sig(&rp->r_cv, &rp->r_statelock)) {
mutex_exit(&rp->r_statelock);
return (EINTR);
}
}
diff = rp->r_size - uiop->uio_loffset;
mutex_exit(&rp->r_statelock);
if (diff <= 0)
break;
if (diff < n)
n = (size_t)diff;
if (vpm_enable) {
error = vpm_data_copy(vp, off + on, n, uiop,
1, NULL, 0, S_READ);
} else {
base = segmap_getmapflt(segkmap, vp, off + on, n,
1, S_READ);
error = uiomove(base + on, n, UIO_READ, uiop);
}
if (!error) {
mutex_enter(&rp->r_statelock);
if (n + on == MAXBSIZE ||
uiop->uio_loffset == rp->r_size)
flags = SM_DONTNEED;
else
flags = 0;
mutex_exit(&rp->r_statelock);
if (vpm_enable) {
error = vpm_sync_pages(vp, off, n, flags);
} else {
error = segmap_release(segkmap, base, flags);
}
} else {
if (vpm_enable) {
(void) vpm_sync_pages(vp, off, n, 0);
} else {
(void) segmap_release(segkmap, base, 0);
}
}
} while (!error && uiop->uio_resid > 0);
return (error);
}
static int
nfs_write(vnode_t *vp, struct uio *uiop, int ioflag, cred_t *cr,
caller_context_t *ct)
{
rnode_t *rp;
u_offset_t off;
caddr_t base;
uint_t flags;
int remainder;
size_t n;
int on;
int error;
int resid;
offset_t offset;
rlim_t limit;
mntinfo_t *mi;
rp = VTOR(vp);
mi = VTOMI(vp);
if (nfs_zone() != mi->mi_zone)
return (EIO);
if (vp->v_type != VREG)
return (EISDIR);
if (uiop->uio_resid == 0)
return (0);
if (ioflag & FAPPEND) {
struct vattr va;
if (nfs_rw_lock_held(&rp->r_rwlock, RW_READER)) {
nfs_rw_exit(&rp->r_rwlock);
if (nfs_rw_enter_sig(&rp->r_rwlock, RW_WRITER,
INTR(vp)))
return (EINTR);
}
va.va_mask = AT_SIZE;
error = nfsgetattr(vp, &va, cr);
if (error)
return (error);
uiop->uio_loffset = va.va_size;
}
if (uiop->uio_loffset > MAXOFF32_T)
return (EFBIG);
offset = uiop->uio_loffset + uiop->uio_resid;
if (uiop->uio_loffset < 0 || offset > MAXOFF32_T)
return (EINVAL);
if (uiop->uio_llimit > (rlim64_t)MAXOFF32_T) {
limit = MAXOFF32_T;
} else {
limit = (rlim_t)uiop->uio_llimit;
}
remainder = 0;
if (offset > limit) {
remainder = offset - limit;
uiop->uio_resid = limit - uiop->uio_offset;
if (uiop->uio_resid <= 0) {
proc_t *p = ttoproc(curthread);
uiop->uio_resid += remainder;
mutex_enter(&p->p_lock);
(void) rctl_action(rctlproc_legacy[RLIMIT_FSIZE],
p->p_rctls, p, RCA_UNSAFE_SIGINFO);
mutex_exit(&p->p_lock);
return (EFBIG);
}
}
if (nfs_rw_enter_sig(&rp->r_lkserlock, RW_READER, INTR(vp)))
return (EINTR);
if ((vp->v_flag & VNOCACHE) ||
(((rp->r_flags & RDIRECTIO) || (mi->mi_flags & MI_DIRECTIO)) &&
rp->r_mapcnt == 0 && rp->r_inmap == 0 &&
!vn_has_cached_data(vp))) {
size_t bufsize;
int count;
uint_t org_offset;
nfs_fwrite:
if (rp->r_flags & RSTALE) {
resid = uiop->uio_resid;
offset = uiop->uio_loffset;
error = rp->r_error;
if (error == 0)
error = ESTALE;
goto bottom;
}
bufsize = MIN(uiop->uio_resid, mi->mi_curwrite);
base = kmem_alloc(bufsize, KM_SLEEP);
do {
resid = uiop->uio_resid;
offset = uiop->uio_loffset;
count = MIN(uiop->uio_resid, bufsize);
org_offset = uiop->uio_offset;
error = uiomove(base, count, UIO_WRITE, uiop);
if (!error) {
error = nfswrite(vp, base, org_offset,
count, cr);
}
} while (!error && uiop->uio_resid > 0);
kmem_free(base, bufsize);
goto bottom;
}
do {
off = uiop->uio_loffset & MAXBMASK;
on = uiop->uio_loffset & MAXBOFFSET;
n = MIN(MAXBSIZE - on, uiop->uio_resid);
resid = uiop->uio_resid;
offset = uiop->uio_loffset;
if (rp->r_flags & RSTALE) {
error = rp->r_error;
if (error == 0)
error = ESTALE;
break;
}
mutex_enter(&rp->r_statelock);
while ((mi->mi_max_threads != 0 &&
rp->r_awcount > 2 * mi->mi_max_threads) ||
rp->r_gcount > 0) {
if (INTR(vp)) {
klwp_t *lwp = ttolwp(curthread);
if (lwp != NULL)
lwp->lwp_nostop++;
if (!cv_wait_sig(&rp->r_cv, &rp->r_statelock)) {
mutex_exit(&rp->r_statelock);
if (lwp != NULL)
lwp->lwp_nostop--;
error = EINTR;
goto bottom;
}
if (lwp != NULL)
lwp->lwp_nostop--;
} else
cv_wait(&rp->r_cv, &rp->r_statelock);
}
mutex_exit(&rp->r_statelock);
uio_prefaultpages((long)n, uiop);
if (vpm_enable) {
error = writerp(rp, NULL, n, uiop, 0);
} else {
if (segmap_kpm) {
int pon = uiop->uio_loffset & PAGEOFFSET;
size_t pn = MIN(PAGESIZE - pon,
uiop->uio_resid);
int pagecreate;
mutex_enter(&rp->r_statelock);
pagecreate = (pon == 0) && (pn == PAGESIZE ||
uiop->uio_loffset + pn >= rp->r_size);
mutex_exit(&rp->r_statelock);
base = segmap_getmapflt(segkmap, vp, off + on,
pn, !pagecreate, S_WRITE);
error = writerp(rp, base + pon, n, uiop,
pagecreate);
} else {
base = segmap_getmapflt(segkmap, vp, off + on,
n, 0, S_READ);
error = writerp(rp, base + on, n, uiop, 0);
}
}
if (!error) {
if (mi->mi_flags & MI_NOAC)
flags = SM_WRITE;
else if (n + on == MAXBSIZE || IS_SWAPVP(vp)) {
flags = SM_WRITE | SM_ASYNC | SM_DONTNEED;
} else
flags = 0;
if ((ioflag & (FSYNC|FDSYNC)) ||
(rp->r_flags & ROUTOFSPACE)) {
flags &= ~SM_ASYNC;
flags |= SM_WRITE;
}
if (vpm_enable) {
error = vpm_sync_pages(vp, off, n, flags);
} else {
error = segmap_release(segkmap, base, flags);
}
} else {
if (vpm_enable) {
(void) vpm_sync_pages(vp, off, n, 0);
} else {
(void) segmap_release(segkmap, base, 0);
}
if (error == EACCES)
goto nfs_fwrite;
}
} while (!error && uiop->uio_resid > 0);
bottom:
if (error) {
uiop->uio_resid = resid + remainder;
uiop->uio_loffset = offset;
} else
uiop->uio_resid += remainder;
nfs_rw_exit(&rp->r_lkserlock);
return (error);
}
static int
nfs_rdwrlbn(vnode_t *vp, page_t *pp, u_offset_t off, size_t len,
int flags, cred_t *cr)
{
struct buf *bp;
int error;
ASSERT(nfs_zone() == VTOMI(vp)->mi_zone);
bp = pageio_setup(pp, len, vp, flags);
ASSERT(bp != NULL);
ASSERT(bp->b_un.b_addr == 0);
bp->b_edev = 0;
bp->b_dev = 0;
bp->b_lblkno = lbtodb(off);
bp->b_file = vp;
bp->b_offset = (offset_t)off;
bp_mapin(bp);
error = nfs_bio(bp, cr);
bp_mapout(bp);
pageio_done(bp);
return (error);
}
static int
nfswrite(vnode_t *vp, caddr_t base, uint_t offset, int count, cred_t *cr)
{
rnode_t *rp;
mntinfo_t *mi;
struct nfswriteargs wa;
struct nfsattrstat ns;
int error;
int tsize;
int douprintf;
douprintf = 1;
rp = VTOR(vp);
mi = VTOMI(vp);
ASSERT(nfs_zone() == mi->mi_zone);
wa.wa_args = &wa.wa_args_buf;
wa.wa_fhandle = *VTOFH(vp);
do {
tsize = MIN(mi->mi_curwrite, count);
wa.wa_data = base;
wa.wa_begoff = offset;
wa.wa_totcount = tsize;
wa.wa_count = tsize;
wa.wa_offset = offset;
if (mi->mi_io_kstats) {
mutex_enter(&mi->mi_lock);
kstat_runq_enter(KSTAT_IO_PTR(mi->mi_io_kstats));
mutex_exit(&mi->mi_lock);
}
wa.wa_mblk = NULL;
do {
error = rfs2call(mi, RFS_WRITE,
xdr_writeargs, (caddr_t)&wa,
xdr_attrstat, (caddr_t)&ns, cr,
&douprintf, &ns.ns_status, 0, NULL);
} while (error == ENFS_TRYAGAIN);
if (mi->mi_io_kstats) {
mutex_enter(&mi->mi_lock);
kstat_runq_exit(KSTAT_IO_PTR(mi->mi_io_kstats));
mutex_exit(&mi->mi_lock);
}
if (!error) {
error = geterrno(ns.ns_status);
if (!error) {
count -= tsize;
base += tsize;
offset += tsize;
if (mi->mi_io_kstats) {
mutex_enter(&mi->mi_lock);
KSTAT_IO_PTR(mi->mi_io_kstats)->
writes++;
KSTAT_IO_PTR(mi->mi_io_kstats)->
nwritten += tsize;
mutex_exit(&mi->mi_lock);
}
lwp_stat_update(LWP_STAT_OUBLK, 1);
mutex_enter(&rp->r_statelock);
PURGE_ATTRCACHE_LOCKED(rp);
rp->r_flags |= RWRITEATTR;
mutex_exit(&rp->r_statelock);
}
}
} while (!error && count);
return (error);
}
static int
nfsread(vnode_t *vp, caddr_t base, uint_t offset,
int count, size_t *residp, cred_t *cr)
{
mntinfo_t *mi;
struct nfsreadargs ra;
struct nfsrdresult rr;
int tsize;
int error;
int douprintf;
failinfo_t fi;
rnode_t *rp;
struct vattr va;
hrtime_t t;
rp = VTOR(vp);
mi = VTOMI(vp);
ASSERT(nfs_zone() == mi->mi_zone);
douprintf = 1;
ra.ra_fhandle = *VTOFH(vp);
fi.vp = vp;
fi.fhp = (caddr_t)&ra.ra_fhandle;
fi.copyproc = nfscopyfh;
fi.lookupproc = nfslookup;
fi.xattrdirproc = acl_getxattrdir2;
do {
if (mi->mi_io_kstats) {
mutex_enter(&mi->mi_lock);
kstat_runq_enter(KSTAT_IO_PTR(mi->mi_io_kstats));
mutex_exit(&mi->mi_lock);
}
do {
tsize = MIN(mi->mi_curread, count);
rr.rr_data = base;
ra.ra_offset = offset;
ra.ra_totcount = tsize;
ra.ra_count = tsize;
ra.ra_data = base;
t = gethrtime();
error = rfs2call(mi, RFS_READ,
xdr_readargs, (caddr_t)&ra,
xdr_rdresult, (caddr_t)&rr, cr,
&douprintf, &rr.rr_status, 0, &fi);
} while (error == ENFS_TRYAGAIN);
if (mi->mi_io_kstats) {
mutex_enter(&mi->mi_lock);
kstat_runq_exit(KSTAT_IO_PTR(mi->mi_io_kstats));
mutex_exit(&mi->mi_lock);
}
if (!error) {
error = geterrno(rr.rr_status);
if (!error) {
count -= rr.rr_count;
base += rr.rr_count;
offset += rr.rr_count;
if (mi->mi_io_kstats) {
mutex_enter(&mi->mi_lock);
KSTAT_IO_PTR(mi->mi_io_kstats)->reads++;
KSTAT_IO_PTR(mi->mi_io_kstats)->nread +=
rr.rr_count;
mutex_exit(&mi->mi_lock);
}
lwp_stat_update(LWP_STAT_INBLK, 1);
}
}
} while (!error && count && rr.rr_count == tsize);
*residp = count;
if (!error) {
error = nattr_to_vattr(vp, &rr.rr_attr, &va);
mutex_enter(&rp->r_statelock);
if (error || !CACHE_VALID(rp, va.va_mtime, va.va_size) ||
(mi->mi_flags & MI_ACL)) {
mutex_exit(&rp->r_statelock);
PURGE_ATTRCACHE(vp);
} else {
if (rp->r_mtime <= t) {
nfs_attrcache_va(vp, &va);
}
mutex_exit(&rp->r_statelock);
}
}
return (error);
}
static int
nfs_ioctl(vnode_t *vp, int cmd, intptr_t arg, int flag, cred_t *cr, int *rvalp,
caller_context_t *ct)
{
if (nfs_zone() != VTOMI(vp)->mi_zone)
return (EIO);
switch (cmd) {
case _FIODIRECTIO:
return (nfs_directio(vp, (int)arg, cr));
default:
return (ENOTTY);
}
}
static int
nfs_getattr(vnode_t *vp, struct vattr *vap, int flags, cred_t *cr,
caller_context_t *ct)
{
int error;
rnode_t *rp;
if (nfs_zone() != VTOMI(vp)->mi_zone)
return (EIO);
rp = VTOR(vp);
if (flags & ATTR_HINT) {
if (vap->va_mask ==
(vap->va_mask & (AT_SIZE | AT_FSID | AT_RDEV))) {
mutex_enter(&rp->r_statelock);
if (vap->va_mask | AT_SIZE)
vap->va_size = rp->r_size;
if (vap->va_mask | AT_FSID)
vap->va_fsid = rp->r_attr.va_fsid;
if (vap->va_mask | AT_RDEV)
vap->va_rdev = rp->r_attr.va_rdev;
mutex_exit(&rp->r_statelock);
return (0);
}
}
if (vap->va_mask & AT_MTIME) {
if (vn_has_cached_data(vp) &&
((rp->r_flags & RDIRTY) || rp->r_awcount > 0)) {
mutex_enter(&rp->r_statelock);
rp->r_gcount++;
mutex_exit(&rp->r_statelock);
error = nfs_putpage(vp, (offset_t)0, 0, 0, cr, ct);
mutex_enter(&rp->r_statelock);
if (error && (error == ENOSPC || error == EDQUOT)) {
if (!rp->r_error)
rp->r_error = error;
}
if (--rp->r_gcount == 0)
cv_broadcast(&rp->r_cv);
mutex_exit(&rp->r_statelock);
}
}
return (nfsgetattr(vp, vap, cr));
}
static int
nfs_setattr(vnode_t *vp, struct vattr *vap, int flags, cred_t *cr,
caller_context_t *ct)
{
int error;
uint_t mask;
struct vattr va;
mask = vap->va_mask;
if (mask & AT_NOSET)
return (EINVAL);
if ((mask & AT_SIZE) &&
vap->va_type == VREG &&
vap->va_size > MAXOFF32_T)
return (EFBIG);
if (nfs_zone() != VTOMI(vp)->mi_zone)
return (EIO);
va.va_mask = AT_UID | AT_MODE;
error = nfsgetattr(vp, &va, cr);
if (error)
return (error);
error = secpolicy_vnode_setattr(cr, vp, vap, &va, flags, nfs_accessx,
vp);
if (error)
return (error);
error = nfssetattr(vp, vap, flags, cr);
if (error == 0 && (mask & AT_SIZE) && vap->va_size == 0)
vnevent_truncate(vp, ct);
return (error);
}
static int
nfssetattr(vnode_t *vp, struct vattr *vap, int flags, cred_t *cr)
{
int error;
uint_t mask;
struct nfssaargs args;
struct nfsattrstat ns;
int douprintf;
rnode_t *rp;
struct vattr va;
mode_t omode;
mntinfo_t *mi;
vsecattr_t *vsp;
hrtime_t t;
mask = vap->va_mask;
ASSERT(nfs_zone() == VTOMI(vp)->mi_zone);
rp = VTOR(vp);
if (vn_has_cached_data(vp) &&
((rp->r_flags & RDIRTY) ||
rp->r_count > 0 ||
rp->r_mapcnt > 0)) {
ASSERT(vp->v_type != VCHR);
error = nfs_putpage(vp, (offset_t)0, 0, 0, cr, NULL);
if (error && (error == ENOSPC || error == EDQUOT)) {
mutex_enter(&rp->r_statelock);
if (!rp->r_error)
rp->r_error = error;
mutex_exit(&rp->r_statelock);
}
}
if ((mask & AT_MTIME) && !(flags & ATTR_UTIME)) {
vap->va_mtime.tv_nsec = 1000000000;
if (NFS_TIME_T_OK(vap->va_mtime.tv_sec) &&
NFS_TIME_T_OK(vap->va_atime.tv_sec)) {
error = vattr_to_sattr(vap, &args.saa_sa);
} else {
timestruc_t mtime = vap->va_mtime;
timestruc_t atime = vap->va_atime;
time_t now;
now = gethrestime_sec();
if (NFS_TIME_T_OK(now)) {
vap->va_mtime.tv_sec = now;
vap->va_atime.tv_sec = now;
} else {
vap->va_mtime.tv_sec = 0;
vap->va_atime.tv_sec = 0;
}
error = vattr_to_sattr(vap, &args.saa_sa);
vap->va_mtime = mtime;
vap->va_atime = atime;
}
} else {
error = vattr_to_sattr(vap, &args.saa_sa);
}
if (error) {
return (error);
}
args.saa_fh = *VTOFH(vp);
va.va_mask = AT_MODE;
error = nfsgetattr(vp, &va, cr);
if (error)
return (error);
omode = va.va_mode;
mi = VTOMI(vp);
douprintf = 1;
t = gethrtime();
error = rfs2call(mi, RFS_SETATTR,
xdr_saargs, (caddr_t)&args,
xdr_attrstat, (caddr_t)&ns, cr,
&douprintf, &ns.ns_status, 0, NULL);
if ((mask & (AT_UID | AT_GID | AT_MODE)) && (mi->mi_flags & MI_ACL)) {
(void) nfs_access_purge_rp(rp);
if (rp->r_secattr != NULL) {
mutex_enter(&rp->r_statelock);
vsp = rp->r_secattr;
rp->r_secattr = NULL;
mutex_exit(&rp->r_statelock);
if (vsp != NULL)
nfs_acl_free(vsp);
}
}
if (!error) {
error = geterrno(ns.ns_status);
if (!error) {
if (mask & AT_SIZE) {
nfs_invalidate_pages(vp,
(vap->va_size & PAGEMASK), cr);
}
(void) nfs_cache_fattr(vp, &ns.ns_attr, &va, t, cr);
if (mi->mi_flags & MI_ACL) {
PURGE_ATTRCACHE(vp);
}
if ((mask & AT_SIZE) &&
ns.ns_attr.na_size < (uint32_t)vap->va_size) {
char zb = '\0';
error = nfswrite(vp, &zb,
vap->va_size - sizeof (zb),
sizeof (zb), cr);
}
if (mask & (AT_UID | AT_GID)) {
int terror;
va.va_mask = AT_MODE;
terror = nfsgetattr(vp, &va, cr);
if (!terror &&
(((mask & AT_MODE) &&
va.va_mode != vap->va_mode) ||
(!(mask & AT_MODE) &&
va.va_mode != omode))) {
va.va_mask = AT_MODE;
if (mask & AT_MODE)
va.va_mode = vap->va_mode;
else
va.va_mode = omode;
(void) nfssetattr(vp, &va, 0, cr);
}
}
} else {
PURGE_ATTRCACHE(vp);
PURGE_STALE_FH(error, vp, cr);
}
} else {
PURGE_ATTRCACHE(vp);
}
return (error);
}
static int
nfs_accessx(void *vp, int mode, cred_t *cr)
{
ASSERT(nfs_zone() == VTOMI((vnode_t *)vp)->mi_zone);
return (nfs_access(vp, mode, 0, cr, NULL));
}
static int
nfs_access(vnode_t *vp, int mode, int flags, cred_t *cr, caller_context_t *ct)
{
struct vattr va;
int error;
mntinfo_t *mi;
int shift = 0;
mi = VTOMI(vp);
if (nfs_zone() != mi->mi_zone)
return (EIO);
if (mi->mi_flags & MI_ACL) {
error = acl_access2(vp, mode, flags, cr);
if (mi->mi_flags & MI_ACL)
return (error);
}
va.va_mask = AT_MODE | AT_UID | AT_GID;
error = nfsgetattr(vp, &va, cr);
if (error)
return (error);
if ((mode & VWRITE) && vn_is_readonly(vp) && !IS_DEVVP(vp))
return (EROFS);
if ((mode & (VWRITE | VREAD | VEXEC)) &&
MANDLOCK(vp, va.va_mode))
return (EACCES);
if (crgetuid(cr) != va.va_uid) {
shift += 3;
if (!groupmember(va.va_gid, cr))
shift += 3;
}
return (secpolicy_vnode_access2(cr, vp, va.va_uid,
va.va_mode << shift, mode));
}
static int nfs_do_symlink_cache = 1;
static int
nfs_readlink(vnode_t *vp, struct uio *uiop, cred_t *cr, caller_context_t *ct)
{
int error;
struct nfsrdlnres rl;
rnode_t *rp;
int douprintf;
failinfo_t fi;
if (vp->v_type != VLNK)
return (EINVAL);
if (nfs_zone() != VTOMI(vp)->mi_zone)
return (EIO);
rp = VTOR(vp);
if (nfs_do_symlink_cache && rp->r_symlink.contents != NULL) {
error = nfs_validate_caches(vp, cr);
if (error)
return (error);
mutex_enter(&rp->r_statelock);
if (rp->r_symlink.contents != NULL) {
error = uiomove(rp->r_symlink.contents,
rp->r_symlink.len, UIO_READ, uiop);
mutex_exit(&rp->r_statelock);
return (error);
}
mutex_exit(&rp->r_statelock);
}
rl.rl_data = kmem_alloc(NFS_MAXPATHLEN, KM_SLEEP);
fi.vp = vp;
fi.fhp = NULL;
fi.copyproc = nfscopyfh;
fi.lookupproc = nfslookup;
fi.xattrdirproc = acl_getxattrdir2;
douprintf = 1;
error = rfs2call(VTOMI(vp), RFS_READLINK,
xdr_readlink, (caddr_t)VTOFH(vp),
xdr_rdlnres, (caddr_t)&rl, cr,
&douprintf, &rl.rl_status, 0, &fi);
if (error) {
kmem_free((void *)rl.rl_data, NFS_MAXPATHLEN);
return (error);
}
error = geterrno(rl.rl_status);
if (!error) {
error = uiomove(rl.rl_data, (int)rl.rl_count, UIO_READ, uiop);
if (nfs_do_symlink_cache && rp->r_symlink.contents == NULL) {
mutex_enter(&rp->r_statelock);
if (rp->r_symlink.contents == NULL) {
rp->r_symlink.contents = rl.rl_data;
rp->r_symlink.len = (int)rl.rl_count;
rp->r_symlink.size = NFS_MAXPATHLEN;
mutex_exit(&rp->r_statelock);
} else {
mutex_exit(&rp->r_statelock);
kmem_free((void *)rl.rl_data,
NFS_MAXPATHLEN);
}
} else {
kmem_free((void *)rl.rl_data, NFS_MAXPATHLEN);
}
} else {
PURGE_STALE_FH(error, vp, cr);
kmem_free((void *)rl.rl_data, NFS_MAXPATHLEN);
}
return (error == ENXIO ? EINVAL : error);
}
static int
nfs_fsync(vnode_t *vp, int syncflag, cred_t *cr, caller_context_t *ct)
{
int error;
if ((syncflag & FNODSYNC) || IS_SWAPVP(vp))
return (0);
if (nfs_zone() != VTOMI(vp)->mi_zone)
return (EIO);
error = nfs_putpage(vp, (offset_t)0, 0, 0, cr, ct);
if (!error)
error = VTOR(vp)->r_error;
return (error);
}
static void
nfs_inactive(vnode_t *vp, cred_t *cr, caller_context_t *ct)
{
rnode_t *rp;
ASSERT(vp != DNLC_NO_VNODE);
if (nfs_zone() != VTOMI(vp)->mi_zone) {
nfs_async_inactive(vp, cr, nfs_inactive);
return;
}
rp = VTOR(vp);
redo:
if (rp->r_unldvp != NULL) {
mutex_enter(&rp->r_statelock);
if (rp->r_unldvp != NULL) {
vnode_t *unldvp;
char *unlname;
cred_t *unlcred;
struct nfsdiropargs da;
enum nfsstat status;
int douprintf;
int error;
unldvp = rp->r_unldvp;
rp->r_unldvp = NULL;
unlname = rp->r_unlname;
rp->r_unlname = NULL;
unlcred = rp->r_unlcred;
rp->r_unlcred = NULL;
mutex_exit(&rp->r_statelock);
if (vn_has_cached_data(vp) &&
((rp->r_flags & RDIRTY) || rp->r_count > 0)) {
ASSERT(vp->v_type != VCHR);
error = nfs_putpage(vp, (offset_t)0, 0, 0,
cr, ct);
if (error) {
mutex_enter(&rp->r_statelock);
if (!rp->r_error)
rp->r_error = error;
mutex_exit(&rp->r_statelock);
}
}
setdiropargs(&da, unlname, unldvp);
douprintf = 1;
(void) rfs2call(VTOMI(unldvp), RFS_REMOVE,
xdr_diropargs, (caddr_t)&da,
xdr_enum, (caddr_t)&status, unlcred,
&douprintf, &status, 0, NULL);
if (HAVE_RDDIR_CACHE(VTOR(unldvp)))
nfs_purge_rddir_cache(unldvp);
PURGE_ATTRCACHE(unldvp);
VN_RELE(unldvp);
kmem_free(unlname, MAXNAMELEN);
crfree(unlcred);
goto redo;
}
mutex_exit(&rp->r_statelock);
}
rp_addfree(rp, cr);
}
static int
nfs_lookup(vnode_t *dvp, char *nm, vnode_t **vpp, struct pathname *pnp,
int flags, vnode_t *rdir, cred_t *cr, caller_context_t *ct,
int *direntflags, pathname_t *realpnp)
{
int error;
vnode_t *vp;
vnode_t *avp = NULL;
rnode_t *drp;
if (nfs_zone() != VTOMI(dvp)->mi_zone)
return (EPERM);
drp = VTOR(dvp);
if (flags & LOOKUP_XATTR) {
bool_t cflag = ((flags & CREATE_XATTR_DIR) != 0);
mntinfo_t *mi;
mi = VTOMI(dvp);
if (!(mi->mi_flags & MI_EXTATTR))
return (EINVAL);
if (nfs_rw_enter_sig(&drp->r_rwlock, RW_READER, INTR(dvp)))
return (EINTR);
(void) nfslookup_dnlc(dvp, XATTR_DIR_NAME, &avp, cr);
if (avp == NULL)
error = acl_getxattrdir2(dvp, &avp, cflag, cr, 0);
else
error = 0;
nfs_rw_exit(&drp->r_rwlock);
if (error) {
if (mi->mi_flags & MI_EXTATTR)
return (error);
return (EINVAL);
}
dvp = avp;
drp = VTOR(dvp);
}
if (nfs_rw_enter_sig(&drp->r_rwlock, RW_READER, INTR(dvp))) {
error = EINTR;
goto out;
}
error = nfslookup(dvp, nm, vpp, pnp, flags, rdir, cr, 0);
nfs_rw_exit(&drp->r_rwlock);
if (!error && IS_DEVVP(*vpp)) {
vp = *vpp;
*vpp = specvp(vp, vp->v_rdev, vp->v_type, cr);
VN_RELE(vp);
}
out:
if (avp != NULL)
VN_RELE(avp);
return (error);
}
static int nfs_lookup_neg_cache = 1;
#ifdef DEBUG
static int nfs_lookup_dnlc_hits = 0;
static int nfs_lookup_dnlc_misses = 0;
static int nfs_lookup_dnlc_neg_hits = 0;
static int nfs_lookup_dnlc_disappears = 0;
static int nfs_lookup_dnlc_lookups = 0;
#endif
int
nfslookup(vnode_t *dvp, char *nm, vnode_t **vpp, struct pathname *pnp,
int flags, vnode_t *rdir, cred_t *cr, int rfscall_flags)
{
int error;
ASSERT(nfs_zone() == VTOMI(dvp)->mi_zone);
if (*nm == '\0') {
VN_HOLD(dvp);
*vpp = dvp;
return (0);
}
if (dvp->v_type != VDIR)
return (ENOTDIR);
if (rfscall_flags & RFSCALL_SOFT)
goto callit;
if (strcmp(nm, ".") == 0) {
error = nfs_access(dvp, VEXEC, 0, cr, NULL);
if (error)
return (error);
VN_HOLD(dvp);
*vpp = dvp;
return (0);
}
error = nfslookup_dnlc(dvp, nm, vpp, cr);
if (error || *vpp != NULL)
return (error);
callit:
error = nfslookup_otw(dvp, nm, vpp, cr, rfscall_flags);
return (error);
}
static int
nfslookup_dnlc(vnode_t *dvp, char *nm, vnode_t **vpp, cred_t *cr)
{
int error;
vnode_t *vp;
ASSERT(*nm != '\0');
ASSERT(nfs_zone() == VTOMI(dvp)->mi_zone);
#ifdef DEBUG
nfs_lookup_dnlc_lookups++;
#endif
vp = dnlc_lookup(dvp, nm);
if (vp != NULL) {
VN_RELE(vp);
if (vp == DNLC_NO_VNODE && !vn_is_readonly(dvp)) {
PURGE_ATTRCACHE(dvp);
}
error = nfs_validate_caches(dvp, cr);
if (error)
return (error);
vp = dnlc_lookup(dvp, nm);
if (vp != NULL) {
error = nfs_access(dvp, VEXEC, 0, cr, NULL);
if (error) {
VN_RELE(vp);
return (error);
}
if (vp == DNLC_NO_VNODE) {
VN_RELE(vp);
#ifdef DEBUG
nfs_lookup_dnlc_neg_hits++;
#endif
return (ENOENT);
}
*vpp = vp;
#ifdef DEBUG
nfs_lookup_dnlc_hits++;
#endif
return (0);
}
#ifdef DEBUG
nfs_lookup_dnlc_disappears++;
#endif
}
#ifdef DEBUG
else
nfs_lookup_dnlc_misses++;
#endif
*vpp = NULL;
return (0);
}
static int
nfslookup_otw(vnode_t *dvp, char *nm, vnode_t **vpp, cred_t *cr,
int rfscall_flags)
{
int error;
struct nfsdiropargs da;
struct nfsdiropres dr;
int douprintf;
failinfo_t fi;
hrtime_t t;
ASSERT(*nm != '\0');
ASSERT(dvp->v_type == VDIR);
ASSERT(nfs_zone() == VTOMI(dvp)->mi_zone);
setdiropargs(&da, nm, dvp);
fi.vp = dvp;
fi.fhp = NULL;
fi.copyproc = nfscopyfh;
fi.lookupproc = nfslookup;
fi.xattrdirproc = acl_getxattrdir2;
douprintf = 1;
t = gethrtime();
error = rfs2call(VTOMI(dvp), RFS_LOOKUP,
xdr_diropargs, (caddr_t)&da,
xdr_diropres, (caddr_t)&dr, cr,
&douprintf, &dr.dr_status, rfscall_flags, &fi);
if (!error) {
error = geterrno(dr.dr_status);
if (!error) {
*vpp = makenfsnode(&dr.dr_fhandle, &dr.dr_attr,
dvp->v_vfsp, t, cr, VTOR(dvp)->r_path, nm);
if (VTOMI(*vpp)->mi_flags & MI_ACL) {
PURGE_ATTRCACHE(*vpp);
}
if (!(rfscall_flags & RFSCALL_SOFT))
dnlc_update(dvp, nm, *vpp);
} else {
PURGE_STALE_FH(error, dvp, cr);
if (error == ENOENT && nfs_lookup_neg_cache)
dnlc_enter(dvp, nm, DNLC_NO_VNODE);
}
}
return (error);
}
static int
nfs_create(vnode_t *dvp, char *nm, struct vattr *va, enum vcexcl exclusive,
int mode, vnode_t **vpp, cred_t *cr, int lfaware, caller_context_t *ct,
vsecattr_t *vsecp)
{
int error;
struct nfscreatargs args;
struct nfsdiropres dr;
int douprintf;
vnode_t *vp;
rnode_t *rp;
struct vattr vattr;
rnode_t *drp;
vnode_t *tempvp;
hrtime_t t;
drp = VTOR(dvp);
if (nfs_zone() != VTOMI(dvp)->mi_zone)
return (EPERM);
if (nfs_rw_enter_sig(&drp->r_rwlock, RW_WRITER, INTR(dvp)))
return (EINTR);
vattr = *va;
if (*nm == '\0') {
error = 0;
VN_HOLD(dvp);
vp = dvp;
} else if (strcmp(nm, ".") == 0) {
error = nfs_access(dvp, VEXEC, 0, cr, ct);
if (error) {
nfs_rw_exit(&drp->r_rwlock);
return (error);
}
VN_HOLD(dvp);
vp = dvp;
} else {
error = nfslookup_otw(dvp, nm, &vp, cr, 0);
}
if (!error) {
if (exclusive == EXCL)
error = EEXIST;
else if (vp->v_type == VDIR && (mode & VWRITE))
error = EISDIR;
else {
if (IS_DEVVP(vp)) {
tempvp = vp;
vp = specvp(vp, vp->v_rdev, vp->v_type, cr);
VN_RELE(tempvp);
}
if (!(error = VOP_ACCESS(vp, mode, 0, cr, ct))) {
if ((vattr.va_mask & AT_SIZE) &&
vp->v_type == VREG) {
vattr.va_mask = AT_SIZE;
error = nfssetattr(vp, &vattr, 0, cr);
if (!error) {
vnevent_create(vp, ct);
}
}
}
}
nfs_rw_exit(&drp->r_rwlock);
if (error) {
VN_RELE(vp);
} else {
*vpp = vp;
}
return (error);
}
ASSERT(vattr.va_mask & AT_TYPE);
if (vattr.va_type == VREG) {
ASSERT(vattr.va_mask & AT_MODE);
if (MANDMODE(vattr.va_mode)) {
nfs_rw_exit(&drp->r_rwlock);
return (EACCES);
}
}
dnlc_remove(dvp, nm);
setdiropargs(&args.ca_da, nm, dvp);
error = setdirgid(dvp, &vattr.va_gid, cr);
if (error) {
nfs_rw_exit(&drp->r_rwlock);
return (error);
}
vattr.va_mask |= AT_GID;
#define IFCHR 0020000
#define IFBLK 0060000
#define IFSOCK 0140000
if (vattr.va_type == VCHR || vattr.va_type == VBLK) {
dev_t d = vattr.va_rdev;
dev32_t dev32;
if (vattr.va_type == VCHR)
vattr.va_mode |= IFCHR;
else
vattr.va_mode |= IFBLK;
(void) cmpldev(&dev32, d);
if (dev32 & ~((SO4_MAXMAJ << L_BITSMINOR32) | SO4_MAXMIN))
vattr.va_size = (u_offset_t)dev32;
else
vattr.va_size = (u_offset_t)nfsv2_cmpdev(d);
vattr.va_mask |= AT_MODE|AT_SIZE;
} else if (vattr.va_type == VFIFO) {
vattr.va_mode |= IFCHR;
vattr.va_size = (u_offset_t)NFS_FIFO_DEV;
vattr.va_mask |= AT_MODE|AT_SIZE;
} else if (vattr.va_type == VSOCK) {
vattr.va_mode |= IFSOCK;
vattr.va_size = 0;
vattr.va_mask |= AT_MODE|AT_SIZE;
}
args.ca_sa = &args.ca_sa_buf;
error = vattr_to_sattr(&vattr, args.ca_sa);
if (error) {
nfs_rw_exit(&drp->r_rwlock);
return (error);
}
douprintf = 1;
t = gethrtime();
error = rfs2call(VTOMI(dvp), RFS_CREATE,
xdr_creatargs, (caddr_t)&args,
xdr_diropres, (caddr_t)&dr, cr,
&douprintf, &dr.dr_status, 0, NULL);
PURGE_ATTRCACHE(dvp);
if (!error) {
error = geterrno(dr.dr_status);
if (!error) {
if (HAVE_RDDIR_CACHE(drp))
nfs_purge_rddir_cache(dvp);
vp = makenfsnode(&dr.dr_fhandle, &dr.dr_attr,
dvp->v_vfsp, t, cr, NULL, NULL);
if (VTOMI(vp)->mi_flags & MI_ACL) {
PURGE_ATTRCACHE(vp);
}
dnlc_update(dvp, nm, vp);
rp = VTOR(vp);
if (vattr.va_size == 0) {
mutex_enter(&rp->r_statelock);
rp->r_size = 0;
mutex_exit(&rp->r_statelock);
if (vn_has_cached_data(vp)) {
ASSERT(vp->v_type != VCHR);
nfs_invalidate_pages(vp,
(u_offset_t)0, cr);
}
}
if (vattr.va_gid != rp->r_attr.va_gid) {
vattr.va_mask = AT_GID;
(void) nfssetattr(vp, &vattr, 0, cr);
}
if (IS_DEVVP(vp)) {
*vpp = specvp(vp, vp->v_rdev, vp->v_type, cr);
VN_RELE(vp);
} else
*vpp = vp;
} else {
PURGE_STALE_FH(error, dvp, cr);
}
}
nfs_rw_exit(&drp->r_rwlock);
return (error);
}
static int
nfs_remove(vnode_t *dvp, char *nm, cred_t *cr, caller_context_t *ct, int flags)
{
int error;
struct nfsdiropargs da;
enum nfsstat status;
vnode_t *vp;
char *tmpname;
int douprintf;
rnode_t *rp;
rnode_t *drp;
if (nfs_zone() != VTOMI(dvp)->mi_zone)
return (EPERM);
drp = VTOR(dvp);
if (nfs_rw_enter_sig(&drp->r_rwlock, RW_WRITER, INTR(dvp)))
return (EINTR);
error = nfslookup(dvp, nm, &vp, NULL, 0, NULL, cr, 0);
if (error) {
nfs_rw_exit(&drp->r_rwlock);
return (error);
}
if (vp->v_type == VDIR && secpolicy_fs_linkdir(cr, dvp->v_vfsp)) {
VN_RELE(vp);
nfs_rw_exit(&drp->r_rwlock);
return (EPERM);
}
dnlc_remove(dvp, nm);
if (vp->v_count > 1)
dnlc_purge_vp(vp);
rp = VTOR(vp);
mutex_enter(&rp->r_statelock);
if (vp->v_count > 1 &&
(rp->r_unldvp == NULL || strcmp(nm, rp->r_unlname) == 0)) {
mutex_exit(&rp->r_statelock);
tmpname = newname();
error = nfsrename(dvp, nm, dvp, tmpname, cr, ct);
if (error)
kmem_free(tmpname, MAXNAMELEN);
else {
mutex_enter(&rp->r_statelock);
if (rp->r_unldvp == NULL) {
VN_HOLD(dvp);
rp->r_unldvp = dvp;
if (rp->r_unlcred != NULL)
crfree(rp->r_unlcred);
crhold(cr);
rp->r_unlcred = cr;
rp->r_unlname = tmpname;
} else {
kmem_free(rp->r_unlname, MAXNAMELEN);
rp->r_unlname = tmpname;
}
mutex_exit(&rp->r_statelock);
}
} else {
mutex_exit(&rp->r_statelock);
if (vn_has_cached_data(vp) &&
((rp->r_flags & RDIRTY) || rp->r_count > 0)) {
error = nfs_putpage(vp, (offset_t)0, 0, 0, cr, ct);
if (error && (error == ENOSPC || error == EDQUOT)) {
mutex_enter(&rp->r_statelock);
if (!rp->r_error)
rp->r_error = error;
mutex_exit(&rp->r_statelock);
}
}
setdiropargs(&da, nm, dvp);
douprintf = 1;
error = rfs2call(VTOMI(dvp), RFS_REMOVE,
xdr_diropargs, (caddr_t)&da,
xdr_enum, (caddr_t)&status, cr,
&douprintf, &status, 0, NULL);
if (dvp->v_flag & V_XATTRDIR)
dnlc_purge_vp(dvp);
PURGE_ATTRCACHE(dvp);
PURGE_ATTRCACHE(vp);
if (!error) {
error = geterrno(status);
if (!error) {
if (HAVE_RDDIR_CACHE(drp))
nfs_purge_rddir_cache(dvp);
} else {
PURGE_STALE_FH(error, dvp, cr);
}
}
}
if (error == 0) {
vnevent_remove(vp, dvp, nm, ct);
}
VN_RELE(vp);
nfs_rw_exit(&drp->r_rwlock);
return (error);
}
static int
nfs_link(vnode_t *tdvp, vnode_t *svp, char *tnm, cred_t *cr,
caller_context_t *ct, int flags)
{
int error;
struct nfslinkargs args;
enum nfsstat status;
vnode_t *realvp;
int douprintf;
rnode_t *tdrp;
if (nfs_zone() != VTOMI(tdvp)->mi_zone)
return (EPERM);
if (VOP_REALVP(svp, &realvp, ct) == 0)
svp = realvp;
args.la_from = VTOFH(svp);
setdiropargs(&args.la_to, tnm, tdvp);
tdrp = VTOR(tdvp);
if (nfs_rw_enter_sig(&tdrp->r_rwlock, RW_WRITER, INTR(tdvp)))
return (EINTR);
dnlc_remove(tdvp, tnm);
douprintf = 1;
error = rfs2call(VTOMI(svp), RFS_LINK,
xdr_linkargs, (caddr_t)&args,
xdr_enum, (caddr_t)&status, cr,
&douprintf, &status, 0, NULL);
PURGE_ATTRCACHE(tdvp);
PURGE_ATTRCACHE(svp);
if (!error) {
error = geterrno(status);
if (!error) {
if (HAVE_RDDIR_CACHE(tdrp))
nfs_purge_rddir_cache(tdvp);
}
}
nfs_rw_exit(&tdrp->r_rwlock);
if (!error) {
vnevent_link(svp, ct);
}
return (error);
}
static int
nfs_rename(vnode_t *odvp, char *onm, vnode_t *ndvp, char *nnm, cred_t *cr,
caller_context_t *ct, int flags)
{
vnode_t *realvp;
if (nfs_zone() != VTOMI(odvp)->mi_zone)
return (EPERM);
if (VOP_REALVP(ndvp, &realvp, ct) == 0)
ndvp = realvp;
return (nfsrename(odvp, onm, ndvp, nnm, cr, ct));
}
static int
nfsrename(vnode_t *odvp, char *onm, vnode_t *ndvp, char *nnm, cred_t *cr,
caller_context_t *ct)
{
int error;
enum nfsstat status;
struct nfsrnmargs args;
int douprintf;
vnode_t *nvp = NULL;
vnode_t *ovp = NULL;
char *tmpname;
rnode_t *rp;
rnode_t *odrp;
rnode_t *ndrp;
ASSERT(nfs_zone() == VTOMI(odvp)->mi_zone);
if (strcmp(onm, ".") == 0 || strcmp(onm, "..") == 0 ||
strcmp(nnm, ".") == 0 || strcmp(nnm, "..") == 0)
return (EINVAL);
odrp = VTOR(odvp);
ndrp = VTOR(ndvp);
if ((intptr_t)odrp < (intptr_t)ndrp) {
if (nfs_rw_enter_sig(&odrp->r_rwlock, RW_WRITER, INTR(odvp)))
return (EINTR);
if (nfs_rw_enter_sig(&ndrp->r_rwlock, RW_WRITER, INTR(ndvp))) {
nfs_rw_exit(&odrp->r_rwlock);
return (EINTR);
}
} else {
if (nfs_rw_enter_sig(&ndrp->r_rwlock, RW_WRITER, INTR(ndvp)))
return (EINTR);
if (nfs_rw_enter_sig(&odrp->r_rwlock, RW_WRITER, INTR(odvp))) {
nfs_rw_exit(&ndrp->r_rwlock);
return (EINTR);
}
}
error = nfslookup(ndvp, nnm, &nvp, NULL, 0, NULL, cr, 0);
if (!error) {
if (vn_mountedvfs(nvp) != NULL) {
VN_RELE(nvp);
nfs_rw_exit(&odrp->r_rwlock);
nfs_rw_exit(&ndrp->r_rwlock);
return (EBUSY);
}
dnlc_remove(ndvp, nnm);
if (nvp->v_count > 1)
dnlc_purge_vp(nvp);
if (nvp->v_count > 1 && nvp->v_type != VDIR) {
error = nfslookup(odvp, onm, &ovp, NULL, 0, NULL,
cr, 0);
if (error) {
VN_RELE(nvp);
nfs_rw_exit(&odrp->r_rwlock);
nfs_rw_exit(&ndrp->r_rwlock);
return (error);
}
if (ovp == nvp) {
VN_RELE(ovp);
VN_RELE(nvp);
nfs_rw_exit(&odrp->r_rwlock);
nfs_rw_exit(&ndrp->r_rwlock);
return (0);
}
if (ovp->v_type == VDIR) {
VN_RELE(ovp);
VN_RELE(nvp);
nfs_rw_exit(&odrp->r_rwlock);
nfs_rw_exit(&ndrp->r_rwlock);
return (ENOTDIR);
}
tmpname = newname();
error = nfs_link(ndvp, nvp, tmpname, cr, NULL, 0);
if (error == EOPNOTSUPP) {
error = nfs_rename(ndvp, nnm, ndvp, tmpname,
cr, NULL, 0);
}
if (error) {
kmem_free(tmpname, MAXNAMELEN);
VN_RELE(ovp);
VN_RELE(nvp);
nfs_rw_exit(&odrp->r_rwlock);
nfs_rw_exit(&ndrp->r_rwlock);
return (error);
}
rp = VTOR(nvp);
mutex_enter(&rp->r_statelock);
if (rp->r_unldvp == NULL) {
VN_HOLD(ndvp);
rp->r_unldvp = ndvp;
if (rp->r_unlcred != NULL)
crfree(rp->r_unlcred);
crhold(cr);
rp->r_unlcred = cr;
rp->r_unlname = tmpname;
} else {
kmem_free(rp->r_unlname, MAXNAMELEN);
rp->r_unlname = tmpname;
}
mutex_exit(&rp->r_statelock);
}
}
if (ovp == NULL) {
error = nfslookup(odvp, onm, &ovp, NULL, 0, NULL, cr, 0);
if (error) {
nfs_rw_exit(&odrp->r_rwlock);
nfs_rw_exit(&ndrp->r_rwlock);
if (nvp) {
VN_RELE(nvp);
}
return (error);
}
ASSERT(ovp != NULL);
}
dnlc_remove(odvp, onm);
dnlc_remove(ndvp, nnm);
setdiropargs(&args.rna_from, onm, odvp);
setdiropargs(&args.rna_to, nnm, ndvp);
douprintf = 1;
error = rfs2call(VTOMI(odvp), RFS_RENAME,
xdr_rnmargs, (caddr_t)&args,
xdr_enum, (caddr_t)&status, cr,
&douprintf, &status, 0, NULL);
PURGE_ATTRCACHE(odvp);
PURGE_ATTRCACHE(ndvp);
if (!error) {
error = geterrno(status);
if (!error) {
if (HAVE_RDDIR_CACHE(odrp))
nfs_purge_rddir_cache(odvp);
if (HAVE_RDDIR_CACHE(ndrp))
nfs_purge_rddir_cache(ndvp);
rp = VTOR(ovp);
if (ndvp != odvp) {
if (ovp->v_type == VDIR) {
dnlc_remove(ovp, "..");
if (HAVE_RDDIR_CACHE(rp))
nfs_purge_rddir_cache(ovp);
}
}
mutex_enter(&rp->r_statelock);
if (rp->r_unldvp != NULL) {
if (strcmp(rp->r_unlname, onm) == 0) {
(void) strncpy(rp->r_unlname,
nnm, MAXNAMELEN);
rp->r_unlname[MAXNAMELEN - 1] = '\0';
if (ndvp != rp->r_unldvp) {
VN_RELE(rp->r_unldvp);
rp->r_unldvp = ndvp;
VN_HOLD(ndvp);
}
}
}
mutex_exit(&rp->r_statelock);
} else {
if (error == ENOTEMPTY)
error = EEXIST;
}
}
if (error == 0) {
if (nvp)
vnevent_rename_dest(nvp, ndvp, nnm, ct);
if (odvp != ndvp)
vnevent_rename_dest_dir(ndvp, ct);
ASSERT(ovp != NULL);
vnevent_rename_src(ovp, odvp, onm, ct);
}
if (nvp) {
VN_RELE(nvp);
}
VN_RELE(ovp);
nfs_rw_exit(&odrp->r_rwlock);
nfs_rw_exit(&ndrp->r_rwlock);
return (error);
}
static int
nfs_mkdir(vnode_t *dvp, char *nm, struct vattr *va, vnode_t **vpp, cred_t *cr,
caller_context_t *ct, int flags, vsecattr_t *vsecp)
{
int error;
struct nfscreatargs args;
struct nfsdiropres dr;
int douprintf;
rnode_t *drp;
hrtime_t t;
if (nfs_zone() != VTOMI(dvp)->mi_zone)
return (EPERM);
setdiropargs(&args.ca_da, nm, dvp);
error = setdirgid(dvp, &va->va_gid, cr);
if (error)
return (error);
error = setdirmode(dvp, &va->va_mode, cr);
if (error)
return (error);
va->va_mask |= AT_MODE|AT_GID;
args.ca_sa = &args.ca_sa_buf;
error = vattr_to_sattr(va, args.ca_sa);
if (error) {
return (error);
}
drp = VTOR(dvp);
if (nfs_rw_enter_sig(&drp->r_rwlock, RW_WRITER, INTR(dvp)))
return (EINTR);
dnlc_remove(dvp, nm);
douprintf = 1;
t = gethrtime();
error = rfs2call(VTOMI(dvp), RFS_MKDIR,
xdr_creatargs, (caddr_t)&args,
xdr_diropres, (caddr_t)&dr, cr,
&douprintf, &dr.dr_status, 0, NULL);
PURGE_ATTRCACHE(dvp);
if (!error) {
error = geterrno(dr.dr_status);
if (!error) {
if (HAVE_RDDIR_CACHE(drp))
nfs_purge_rddir_cache(dvp);
*vpp = makenfsnode(&dr.dr_fhandle, &dr.dr_attr,
dvp->v_vfsp, t, cr, NULL, NULL);
PURGE_ATTRCACHE(*vpp);
dnlc_update(dvp, nm, *vpp);
if (va->va_gid != VTOR(*vpp)->r_attr.va_gid) {
va->va_mask = AT_GID;
(void) nfssetattr(*vpp, va, 0, cr);
}
} else {
PURGE_STALE_FH(error, dvp, cr);
}
}
nfs_rw_exit(&drp->r_rwlock);
return (error);
}
static int
nfs_rmdir(vnode_t *dvp, char *nm, vnode_t *cdir, cred_t *cr,
caller_context_t *ct, int flags)
{
int error;
enum nfsstat status;
struct nfsdiropargs da;
vnode_t *vp;
int douprintf;
rnode_t *drp;
if (nfs_zone() != VTOMI(dvp)->mi_zone)
return (EPERM);
drp = VTOR(dvp);
if (nfs_rw_enter_sig(&drp->r_rwlock, RW_WRITER, INTR(dvp)))
return (EINTR);
error = nfslookup(dvp, nm, &vp, NULL, 0, NULL, cr, 0);
if (error) {
nfs_rw_exit(&drp->r_rwlock);
return (error);
}
if (vp == cdir) {
VN_RELE(vp);
nfs_rw_exit(&drp->r_rwlock);
return (EINVAL);
}
setdiropargs(&da, nm, dvp);
dnlc_remove(dvp, nm);
if (vp->v_count > 1) {
dnlc_remove(vp, "..");
if (vp->v_count > 1)
dnlc_purge_vp(vp);
}
douprintf = 1;
error = rfs2call(VTOMI(dvp), RFS_RMDIR,
xdr_diropargs, (caddr_t)&da,
xdr_enum, (caddr_t)&status, cr,
&douprintf, &status, 0, NULL);
PURGE_ATTRCACHE(dvp);
if (error) {
VN_RELE(vp);
nfs_rw_exit(&drp->r_rwlock);
return (error);
}
error = geterrno(status);
if (!error) {
if (HAVE_RDDIR_CACHE(drp))
nfs_purge_rddir_cache(dvp);
if (HAVE_RDDIR_CACHE(VTOR(vp)))
nfs_purge_rddir_cache(vp);
} else {
PURGE_STALE_FH(error, dvp, cr);
if (error == ENOTEMPTY)
error = EEXIST;
}
if (error == 0) {
vnevent_rmdir(vp, dvp, nm, ct);
}
VN_RELE(vp);
nfs_rw_exit(&drp->r_rwlock);
return (error);
}
static int
nfs_symlink(vnode_t *dvp, char *lnm, struct vattr *tva, char *tnm, cred_t *cr,
caller_context_t *ct, int flags)
{
int error;
struct nfsslargs args;
enum nfsstat status;
int douprintf;
rnode_t *drp;
if (nfs_zone() != VTOMI(dvp)->mi_zone)
return (EPERM);
setdiropargs(&args.sla_from, lnm, dvp);
args.sla_sa = &args.sla_sa_buf;
error = vattr_to_sattr(tva, args.sla_sa);
if (error) {
return (error);
}
args.sla_tnm = tnm;
drp = VTOR(dvp);
if (nfs_rw_enter_sig(&drp->r_rwlock, RW_WRITER, INTR(dvp)))
return (EINTR);
dnlc_remove(dvp, lnm);
douprintf = 1;
error = rfs2call(VTOMI(dvp), RFS_SYMLINK,
xdr_slargs, (caddr_t)&args,
xdr_enum, (caddr_t)&status, cr,
&douprintf, &status, 0, NULL);
PURGE_ATTRCACHE(dvp);
if (!error) {
error = geterrno(status);
if (!error) {
if (HAVE_RDDIR_CACHE(drp))
nfs_purge_rddir_cache(dvp);
} else {
PURGE_STALE_FH(error, dvp, cr);
}
}
nfs_rw_exit(&drp->r_rwlock);
return (error);
}
#ifdef DEBUG
static int nfs_readdir_cache_hits = 0;
static int nfs_readdir_cache_shorts = 0;
static int nfs_readdir_cache_waits = 0;
static int nfs_readdir_cache_misses = 0;
static int nfs_readdir_readahead = 0;
#endif
static int nfs_shrinkreaddir = 0;
static int
nfs_readdir(vnode_t *vp, struct uio *uiop, cred_t *cr, int *eofp,
caller_context_t *ct, int flags)
{
int error;
size_t count;
rnode_t *rp;
rddir_cache *rdc;
rddir_cache *nrdc;
rddir_cache *rrdc;
#ifdef DEBUG
int missed;
#endif
rddir_cache srdc;
avl_index_t where;
rp = VTOR(vp);
ASSERT(nfs_rw_lock_held(&rp->r_rwlock, RW_READER));
if (nfs_zone() != VTOMI(vp)->mi_zone)
return (EIO);
if (HAVE_RDDIR_CACHE(rp)) {
if (nfs_disable_rddir_cache) {
nfs_purge_rddir_cache(vp);
} else {
error = nfs_validate_caches(vp, cr);
if (error)
return (error);
}
}
count = MIN(uiop->uio_iov->iov_len,
nfs_shrinkreaddir ? 0x400 : NFS_MAXDATA);
nrdc = NULL;
#ifdef DEBUG
missed = 0;
#endif
top:
mutex_enter(&rp->r_statelock);
if (rp->r_direof != NULL &&
uiop->uio_offset == rp->r_direof->nfs_ncookie) {
mutex_exit(&rp->r_statelock);
#ifdef DEBUG
nfs_readdir_cache_shorts++;
#endif
if (eofp)
*eofp = 1;
if (nrdc != NULL)
rddir_cache_rele(nrdc);
return (0);
}
srdc.nfs_cookie = uiop->uio_offset;
srdc.buflen = count;
rdc = avl_find(&rp->r_dir, &srdc, &where);
if (rdc != NULL) {
rddir_cache_hold(rdc);
if (rdc->flags & RDDIR) {
nfs_rw_exit(&rp->r_rwlock);
rdc->flags |= RDDIRWAIT;
#ifdef DEBUG
nfs_readdir_cache_waits++;
#endif
if (!cv_wait_sig(&rdc->cv, &rp->r_statelock)) {
mutex_exit(&rp->r_statelock);
(void) nfs_rw_enter_sig(&rp->r_rwlock,
RW_READER, FALSE);
rddir_cache_rele(rdc);
if (nrdc != NULL)
rddir_cache_rele(nrdc);
return (EINTR);
}
mutex_exit(&rp->r_statelock);
(void) nfs_rw_enter_sig(&rp->r_rwlock,
RW_READER, FALSE);
rddir_cache_rele(rdc);
goto top;
}
if (rdc->flags & RDDIRREQ) {
rdc->flags &= ~RDDIRREQ;
rdc->flags |= RDDIR;
if (nrdc != NULL)
rddir_cache_rele(nrdc);
nrdc = rdc;
mutex_exit(&rp->r_statelock);
goto bottom;
}
#ifdef DEBUG
if (!missed)
nfs_readdir_cache_hits++;
#endif
if (rdc->error) {
error = rdc->error;
mutex_exit(&rp->r_statelock);
rddir_cache_rele(rdc);
if (nrdc != NULL)
rddir_cache_rele(nrdc);
return (error);
}
error = uiomove(rdc->entries, rdc->entlen, UIO_READ, uiop);
if (!error) {
uiop->uio_offset = rdc->nfs_ncookie;
if (eofp)
*eofp = rdc->eof;
}
if (rdc->eof) {
rp->r_direof = rdc;
mutex_exit(&rp->r_statelock);
rddir_cache_rele(rdc);
if (nrdc != NULL)
rddir_cache_rele(nrdc);
return (error);
}
srdc.nfs_cookie = rdc->nfs_ncookie;
srdc.buflen = count;
rrdc = avl_find(&rp->r_dir, &srdc, &where);
if (rrdc != NULL) {
if (nrdc != NULL)
rddir_cache_rele(nrdc);
} else {
if (nrdc != NULL)
rrdc = nrdc;
else {
rrdc = rddir_cache_alloc(KM_NOSLEEP);
}
if (rrdc != NULL) {
rrdc->nfs_cookie = rdc->nfs_ncookie;
rrdc->buflen = count;
avl_insert(&rp->r_dir, rrdc, where);
rddir_cache_hold(rrdc);
mutex_exit(&rp->r_statelock);
rddir_cache_rele(rdc);
#ifdef DEBUG
nfs_readdir_readahead++;
#endif
nfs_async_readdir(vp, rrdc, cr, nfsreaddir);
return (error);
}
}
mutex_exit(&rp->r_statelock);
rddir_cache_rele(rdc);
return (error);
}
if (nrdc == NULL) {
mutex_exit(&rp->r_statelock);
nrdc = rddir_cache_alloc(KM_SLEEP);
nrdc->nfs_cookie = uiop->uio_offset;
nrdc->buflen = count;
goto top;
}
avl_insert(&rp->r_dir, nrdc, where);
rddir_cache_hold(nrdc);
mutex_exit(&rp->r_statelock);
bottom:
#ifdef DEBUG
missed = 1;
nfs_readdir_cache_misses++;
#endif
error = nfsreaddir(vp, nrdc, cr);
if (error != 0)
return (error);
nrdc = NULL;
goto top;
}
static int
nfsreaddir(vnode_t *vp, rddir_cache *rdc, cred_t *cr)
{
int error;
struct nfsrddirargs rda;
struct nfsrddirres rd;
rnode_t *rp;
mntinfo_t *mi;
uint_t count;
int douprintf;
failinfo_t fi, *fip;
ASSERT(nfs_zone() == VTOMI(vp)->mi_zone);
count = rdc->buflen;
rp = VTOR(vp);
mi = VTOMI(vp);
rda.rda_fh = *VTOFH(vp);
rda.rda_offset = rdc->nfs_cookie;
if (rdc->nfs_cookie == (off_t)0) {
fi.vp = vp;
fi.fhp = (caddr_t)&rda.rda_fh;
fi.copyproc = nfscopyfh;
fi.lookupproc = nfslookup;
fi.xattrdirproc = acl_getxattrdir2;
fip = &fi;
} else {
fip = NULL;
}
rd.rd_entries = kmem_alloc(rdc->buflen, KM_SLEEP);
rd.rd_size = count;
rd.rd_offset = rda.rda_offset;
douprintf = 1;
if (mi->mi_io_kstats) {
mutex_enter(&mi->mi_lock);
kstat_runq_enter(KSTAT_IO_PTR(mi->mi_io_kstats));
mutex_exit(&mi->mi_lock);
}
do {
rda.rda_count = MIN(count, mi->mi_curread);
error = rfs2call(mi, RFS_READDIR,
xdr_rddirargs, (caddr_t)&rda,
xdr_getrddirres, (caddr_t)&rd, cr,
&douprintf, &rd.rd_status, 0, fip);
} while (error == ENFS_TRYAGAIN);
if (mi->mi_io_kstats) {
mutex_enter(&mi->mi_lock);
kstat_runq_exit(KSTAT_IO_PTR(mi->mi_io_kstats));
mutex_exit(&mi->mi_lock);
}
ASSERT(rdc->flags & RDDIR);
if (!error) {
error = geterrno(rd.rd_status);
if (!error) {
rdc->nfs_ncookie = rd.rd_offset;
rdc->eof = rd.rd_eof ? 1 : 0;
rdc->entlen = rd.rd_size;
ASSERT(rdc->entlen <= rdc->buflen);
#ifdef DEBUG
rdc->entries = rddir_cache_buf_alloc(rdc->buflen,
KM_SLEEP);
#else
rdc->entries = kmem_alloc(rdc->buflen, KM_SLEEP);
#endif
bcopy(rd.rd_entries, rdc->entries, rdc->entlen);
rdc->error = 0;
if (mi->mi_io_kstats) {
mutex_enter(&mi->mi_lock);
KSTAT_IO_PTR(mi->mi_io_kstats)->reads++;
KSTAT_IO_PTR(mi->mi_io_kstats)->nread +=
rd.rd_size;
mutex_exit(&mi->mi_lock);
}
} else {
PURGE_STALE_FH(error, vp, cr);
}
}
if (error) {
rdc->entries = NULL;
rdc->error = error;
}
kmem_free(rd.rd_entries, rdc->buflen);
mutex_enter(&rp->r_statelock);
rdc->flags &= ~RDDIR;
if (rdc->flags & RDDIRWAIT) {
rdc->flags &= ~RDDIRWAIT;
cv_broadcast(&rdc->cv);
}
if (error)
rdc->flags |= RDDIRREQ;
mutex_exit(&rp->r_statelock);
rddir_cache_rele(rdc);
return (error);
}
#ifdef DEBUG
static int nfs_bio_do_stop = 0;
#endif
static int
nfs_bio(struct buf *bp, cred_t *cr)
{
rnode_t *rp = VTOR(bp->b_vp);
int count;
int error;
cred_t *cred;
uint_t offset;
DTRACE_IO1(start, struct buf *, bp);
ASSERT(nfs_zone() == VTOMI(bp->b_vp)->mi_zone);
offset = dbtob(bp->b_blkno);
if (bp->b_flags & B_READ) {
mutex_enter(&rp->r_statelock);
if (rp->r_cred != NULL) {
cred = rp->r_cred;
crhold(cred);
} else {
rp->r_cred = cr;
crhold(cr);
cred = cr;
crhold(cred);
}
mutex_exit(&rp->r_statelock);
read_again:
error = bp->b_error = nfsread(bp->b_vp, bp->b_un.b_addr,
offset, bp->b_bcount, &bp->b_resid, cred);
crfree(cred);
if (!error) {
if (bp->b_resid) {
bzero(bp->b_un.b_addr +
bp->b_bcount - bp->b_resid, bp->b_resid);
}
mutex_enter(&rp->r_statelock);
if (bp->b_resid == bp->b_bcount &&
offset >= rp->r_size) {
error = NFS_EOF;
}
mutex_exit(&rp->r_statelock);
} else if (error == EACCES) {
mutex_enter(&rp->r_statelock);
if (cred != cr) {
if (rp->r_cred != NULL)
crfree(rp->r_cred);
rp->r_cred = cr;
crhold(cr);
cred = cr;
crhold(cred);
mutex_exit(&rp->r_statelock);
goto read_again;
}
mutex_exit(&rp->r_statelock);
}
} else {
if (!(rp->r_flags & RSTALE)) {
mutex_enter(&rp->r_statelock);
if (rp->r_cred != NULL) {
cred = rp->r_cred;
crhold(cred);
} else {
rp->r_cred = cr;
crhold(cr);
cred = cr;
crhold(cred);
}
mutex_exit(&rp->r_statelock);
write_again:
mutex_enter(&rp->r_statelock);
count = MIN(bp->b_bcount, rp->r_size - offset);
mutex_exit(&rp->r_statelock);
if (count < 0)
cmn_err(CE_PANIC, "nfs_bio: write count < 0");
#ifdef DEBUG
if (count == 0) {
zcmn_err(getzoneid(), CE_WARN,
"nfs_bio: zero length write at %d",
offset);
nfs_printfhandle(&rp->r_fh);
if (nfs_bio_do_stop)
debug_enter("nfs_bio");
}
#endif
error = nfswrite(bp->b_vp, bp->b_un.b_addr, offset,
count, cred);
if (error == EACCES) {
mutex_enter(&rp->r_statelock);
if (cred != cr) {
if (rp->r_cred != NULL)
crfree(rp->r_cred);
rp->r_cred = cr;
crhold(cr);
crfree(cred);
cred = cr;
crhold(cred);
mutex_exit(&rp->r_statelock);
goto write_again;
}
mutex_exit(&rp->r_statelock);
}
bp->b_error = error;
if (error && error != EINTR) {
if (error != EDQUOT && error != EFBIG &&
(error != EACCES ||
!(bp->b_flags & B_ASYNC)))
nfs_write_error(bp->b_vp, error, cred);
mutex_enter(&rp->r_statelock);
if (error == ESTALE) {
rp->r_flags |= RSTALE;
if (!rp->r_error)
rp->r_error = error;
} else if (!rp->r_error &&
(bp->b_flags &
(B_INVAL|B_FORCE|B_ASYNC)) ==
(B_INVAL|B_FORCE|B_ASYNC)) {
rp->r_error = error;
}
mutex_exit(&rp->r_statelock);
}
crfree(cred);
} else {
error = rp->r_error;
if (error == 0)
error = ESTALE;
}
}
if (error != 0 && error != NFS_EOF)
bp->b_flags |= B_ERROR;
DTRACE_IO1(done, struct buf *, bp);
return (error);
}
static int
nfs_fid(vnode_t *vp, fid_t *fidp, caller_context_t *ct)
{
struct nfs_fid *fp;
rnode_t *rp;
rp = VTOR(vp);
if (fidp->fid_len < (sizeof (struct nfs_fid) - sizeof (short))) {
fidp->fid_len = sizeof (struct nfs_fid) - sizeof (short);
return (ENOSPC);
}
fp = (struct nfs_fid *)fidp;
fp->nf_pad = 0;
fp->nf_len = sizeof (struct nfs_fid) - sizeof (short);
bcopy(rp->r_fh.fh_buf, fp->nf_data, NFS_FHSIZE);
return (0);
}
static int
nfs_rwlock(vnode_t *vp, int write_lock, caller_context_t *ctp)
{
rnode_t *rp = VTOR(vp);
if (!write_lock) {
(void) nfs_rw_enter_sig(&rp->r_rwlock, RW_READER, FALSE);
return (V_WRITELOCK_FALSE);
}
if ((rp->r_flags & RDIRECTIO) || (VTOMI(vp)->mi_flags & MI_DIRECTIO)) {
(void) nfs_rw_enter_sig(&rp->r_rwlock, RW_READER, FALSE);
if (rp->r_mapcnt == 0 && !vn_has_cached_data(vp))
return (V_WRITELOCK_FALSE);
nfs_rw_exit(&rp->r_rwlock);
}
(void) nfs_rw_enter_sig(&rp->r_rwlock, RW_WRITER, FALSE);
return (V_WRITELOCK_TRUE);
}
static void
nfs_rwunlock(vnode_t *vp, int write_lock, caller_context_t *ctp)
{
rnode_t *rp = VTOR(vp);
nfs_rw_exit(&rp->r_rwlock);
}
static int
nfs_seek(vnode_t *vp, offset_t ooff, offset_t *noffp, caller_context_t *ct)
{
if (vp->v_type == VDIR)
return (0);
if (*noffp < 0 || *noffp > MAXOFF32_T)
return (EINVAL);
return (0);
}
static int nfs_nra = 4;
#ifdef DEBUG
static int nfs_lostpage = 0;
#endif
static int
nfs_getpage(vnode_t *vp, offset_t off, size_t len, uint_t *protp,
page_t *pl[], size_t plsz, struct seg *seg, caddr_t addr,
enum seg_rw rw, cred_t *cr, caller_context_t *ct)
{
rnode_t *rp;
int error;
mntinfo_t *mi;
if (vp->v_flag & VNOMAP)
return (ENOSYS);
ASSERT(off <= MAXOFF32_T);
if (nfs_zone() != VTOMI(vp)->mi_zone)
return (EIO);
if (protp != NULL)
*protp = PROT_ALL;
error = nfs_validate_caches(vp, cr);
if (error)
return (error);
rp = VTOR(vp);
mi = VTOMI(vp);
retry:
mutex_enter(&rp->r_statelock);
if (rw == S_CREATE) {
while ((mi->mi_max_threads != 0 &&
rp->r_awcount > 2 * mi->mi_max_threads) ||
rp->r_gcount > 0)
cv_wait(&rp->r_cv, &rp->r_statelock);
}
if (off + len > rp->r_size + PAGEOFFSET && seg != segkmap) {
mutex_exit(&rp->r_statelock);
return (EFAULT);
}
mutex_exit(&rp->r_statelock);
error = pvn_getpages(nfs_getapage, vp, off, len, protp, pl, plsz,
seg, addr, rw, cr);
switch (error) {
case NFS_EOF:
nfs_purge_caches(vp, NFS_NOPURGE_DNLC, cr);
goto retry;
case ESTALE:
PURGE_STALE_FH(error, vp, cr);
}
return (error);
}
static int
nfs_getapage(vnode_t *vp, u_offset_t off, size_t len, uint_t *protp,
page_t *pl[], size_t plsz, struct seg *seg, caddr_t addr,
enum seg_rw rw, cred_t *cr)
{
rnode_t *rp;
uint_t bsize;
struct buf *bp;
page_t *pp;
u_offset_t lbn;
u_offset_t io_off;
u_offset_t blkoff;
u_offset_t rablkoff;
size_t io_len;
uint_t blksize;
int error;
int readahead;
int readahead_issued = 0;
int ra_window;
page_t *pagefound;
if (nfs_zone() != VTOMI(vp)->mi_zone)
return (EIO);
rp = VTOR(vp);
bsize = MAX(vp->v_vfsp->vfs_bsize, PAGESIZE);
reread:
bp = NULL;
pp = NULL;
pagefound = NULL;
if (pl != NULL)
pl[0] = NULL;
error = 0;
lbn = off / bsize;
blkoff = lbn * bsize;
if ((off & ((vp->v_vfsp->vfs_bsize) - 1)) == 0 &&
rw != S_CREATE &&
!(vp->v_flag & VNOCACHE)) {
mutex_enter(&rp->r_statelock);
if (off == 0)
readahead = 0;
else if (blkoff == rp->r_nextr)
readahead = nfs_nra;
else if (rp->r_nextr > blkoff &&
((ra_window = (rp->r_nextr - blkoff) / bsize)
<= (nfs_nra - 1)))
readahead = nfs_nra - ra_window;
else
readahead = 0;
rablkoff = rp->r_nextr;
while (readahead > 0 && rablkoff + bsize < rp->r_size) {
mutex_exit(&rp->r_statelock);
if (nfs_async_readahead(vp, rablkoff + bsize,
addr + (rablkoff + bsize - off), seg, cr,
nfs_readahead) < 0) {
mutex_enter(&rp->r_statelock);
break;
}
readahead--;
rablkoff += bsize;
readahead_issued = 1;
mutex_enter(&rp->r_statelock);
rp->r_nextr = rablkoff;
}
mutex_exit(&rp->r_statelock);
}
again:
if ((pagefound = page_exists(vp, off)) == NULL) {
if (pl == NULL) {
(void) nfs_async_readahead(vp, blkoff, addr, seg, cr,
nfs_readahead);
} else if (rw == S_CREATE) {
if ((pp = page_create_va(vp, off,
PAGESIZE, PG_WAIT, seg, addr)) == NULL)
cmn_err(CE_PANIC, "nfs_getapage: page_create");
io_len = PAGESIZE;
mutex_enter(&rp->r_statelock);
rp->r_nextr = off + PAGESIZE;
mutex_exit(&rp->r_statelock);
} else {
mutex_enter(&rp->r_statelock);
if (blkoff < rp->r_size &&
blkoff + bsize >= rp->r_size) {
if (rp->r_size <= off) {
blksize = off + PAGESIZE - blkoff;
} else
blksize = rp->r_size - blkoff;
} else if ((off == 0) ||
(off != rp->r_nextr && !readahead_issued)) {
blksize = PAGESIZE;
blkoff = off;
} else
blksize = bsize;
mutex_exit(&rp->r_statelock);
pp = pvn_read_kluster(vp, off, seg, addr, &io_off,
&io_len, blkoff, blksize, 0);
if (pp == NULL)
goto again;
io_len = ptob(btopr(io_len));
bp = pageio_setup(pp, io_len, vp, B_READ);
ASSERT(bp != NULL);
ASSERT(bp->b_un.b_addr == 0);
bp->b_edev = 0;
bp->b_dev = 0;
bp->b_lblkno = lbtodb(io_off);
bp->b_file = vp;
bp->b_offset = (offset_t)off;
bp_mapin(bp);
mutex_enter(&rp->r_statelock);
if (io_off >= rp->r_size && seg == segkmap) {
mutex_exit(&rp->r_statelock);
bzero(bp->b_un.b_addr, io_len);
} else {
mutex_exit(&rp->r_statelock);
error = nfs_bio(bp, cr);
}
bp_mapout(bp);
pageio_done(bp);
if (error == NFS_EOF) {
if (seg == segkmap)
error = 0;
else
error = EFAULT;
}
if (!readahead_issued && !error) {
mutex_enter(&rp->r_statelock);
rp->r_nextr = io_off + io_len;
mutex_exit(&rp->r_statelock);
}
}
}
if (pl == NULL)
return (error);
if (error) {
if (pp != NULL)
pvn_read_done(pp, B_ERROR);
return (error);
}
if (pagefound) {
se_t se = (rw == S_CREATE ? SE_EXCL : SE_SHARED);
if ((pp = page_lookup(vp, off, se)) == NULL) {
#ifdef DEBUG
nfs_lostpage++;
#endif
goto reread;
}
pl[0] = pp;
pl[1] = NULL;
return (0);
}
if (pp != NULL)
pvn_plist_init(pp, pl, plsz, off, io_len, rw);
return (error);
}
static void
nfs_readahead(vnode_t *vp, u_offset_t blkoff, caddr_t addr, struct seg *seg,
cred_t *cr)
{
int error;
page_t *pp;
u_offset_t io_off;
size_t io_len;
struct buf *bp;
uint_t bsize, blksize;
rnode_t *rp = VTOR(vp);
ASSERT(nfs_zone() == VTOMI(vp)->mi_zone);
bsize = MAX(vp->v_vfsp->vfs_bsize, PAGESIZE);
mutex_enter(&rp->r_statelock);
if (blkoff < rp->r_size && blkoff + bsize > rp->r_size) {
blksize = rp->r_size - blkoff;
} else
blksize = bsize;
mutex_exit(&rp->r_statelock);
pp = pvn_read_kluster(vp, blkoff, segkmap, addr,
&io_off, &io_len, blkoff, blksize, 1);
if (pp == NULL)
return;
io_len = ptob(btopr(io_len));
bp = pageio_setup(pp, io_len, vp, B_READ);
ASSERT(bp != NULL);
ASSERT(bp->b_un.b_addr == 0);
bp->b_edev = 0;
bp->b_dev = 0;
bp->b_lblkno = lbtodb(io_off);
bp->b_file = vp;
bp->b_offset = (offset_t)blkoff;
bp_mapin(bp);
mutex_enter(&rp->r_statelock);
if (io_off >= rp->r_size && seg == segkmap) {
mutex_exit(&rp->r_statelock);
bzero(bp->b_un.b_addr, io_len);
error = 0;
} else {
mutex_exit(&rp->r_statelock);
error = nfs_bio(bp, cr);
if (error == NFS_EOF)
error = 0;
}
bp_mapout(bp);
pageio_done(bp);
pvn_read_done(pp, error ? B_READ | B_ERROR : B_READ);
if (error && rp->r_nextr > io_off) {
mutex_enter(&rp->r_statelock);
if (rp->r_nextr > io_off)
rp->r_nextr = io_off;
mutex_exit(&rp->r_statelock);
}
}
static int
nfs_putpage(vnode_t *vp, offset_t off, size_t len, int flags, cred_t *cr,
caller_context_t *ct)
{
int error;
rnode_t *rp;
ASSERT(cr != NULL);
if (vp->v_flag & VNOMAP)
return (ENOSYS);
if (len == 0 && !(flags & B_INVAL) && vn_is_readonly(vp))
return (0);
if (!(flags & B_ASYNC) && nfs_zone() != VTOMI(vp)->mi_zone)
return (EIO);
ASSERT(off <= MAXOFF32_T);
rp = VTOR(vp);
mutex_enter(&rp->r_statelock);
rp->r_count++;
mutex_exit(&rp->r_statelock);
error = nfs_putpages(vp, off, len, flags, cr);
mutex_enter(&rp->r_statelock);
rp->r_count--;
cv_broadcast(&rp->r_cv);
mutex_exit(&rp->r_statelock);
return (error);
}
int
nfs_putapage(vnode_t *vp, page_t *pp, u_offset_t *offp, size_t *lenp,
int flags, cred_t *cr)
{
u_offset_t io_off;
u_offset_t lbn_off;
u_offset_t lbn;
size_t io_len;
uint_t bsize;
int error;
rnode_t *rp;
ASSERT(!vn_is_readonly(vp));
ASSERT(pp != NULL);
ASSERT(cr != NULL);
ASSERT((flags & B_ASYNC) || nfs_zone() == VTOMI(vp)->mi_zone);
rp = VTOR(vp);
ASSERT(rp->r_count > 0);
ASSERT(pp->p_offset <= MAXOFF32_T);
bsize = MAX(vp->v_vfsp->vfs_bsize, PAGESIZE);
lbn = pp->p_offset / bsize;
lbn_off = lbn * bsize;
pp = pvn_write_kluster(vp, pp, &io_off, &io_len, lbn_off,
roundup(bsize, PAGESIZE), flags);
ASSERT((pp->p_offset / bsize) >= lbn);
if (io_off + io_len > lbn_off + bsize) {
ASSERT((io_off + io_len) - (lbn_off + bsize) < PAGESIZE);
io_len = lbn_off + bsize - io_off;
}
if (rp->r_flags & RMODINPROGRESS) {
mutex_enter(&rp->r_statelock);
if ((rp->r_flags & RMODINPROGRESS) &&
rp->r_modaddr + MAXBSIZE > io_off &&
rp->r_modaddr < io_off + io_len) {
page_t *plist;
plist = pp;
while (plist != NULL) {
pp = plist;
page_sub(&plist, pp);
hat_setmod(pp);
page_io_unlock(pp);
page_unlock(pp);
}
rp->r_flags |= RDIRTY;
mutex_exit(&rp->r_statelock);
if (offp)
*offp = io_off;
if (lenp)
*lenp = io_len;
return (0);
}
mutex_exit(&rp->r_statelock);
}
if (flags & B_ASYNC) {
error = nfs_async_putapage(vp, pp, io_off, io_len, flags, cr,
nfs_sync_putapage);
} else
error = nfs_sync_putapage(vp, pp, io_off, io_len, flags, cr);
if (offp)
*offp = io_off;
if (lenp)
*lenp = io_len;
return (error);
}
static int
nfs_sync_putapage(vnode_t *vp, page_t *pp, u_offset_t io_off, size_t io_len,
int flags, cred_t *cr)
{
int error;
rnode_t *rp;
flags |= B_WRITE;
ASSERT(nfs_zone() == VTOMI(vp)->mi_zone);
error = nfs_rdwrlbn(vp, pp, io_off, io_len, flags, cr);
rp = VTOR(vp);
if ((error == ENOSPC || error == EDQUOT || error == EACCES) &&
(flags & (B_INVAL|B_FORCE)) != (B_INVAL|B_FORCE)) {
if (!(rp->r_flags & ROUTOFSPACE)) {
mutex_enter(&rp->r_statelock);
rp->r_flags |= ROUTOFSPACE;
mutex_exit(&rp->r_statelock);
}
flags |= B_ERROR;
pvn_write_done(pp, flags);
if (!(flags & B_ASYNC)) {
error = nfs_putpage(vp, io_off, io_len,
B_INVAL | B_FORCE, cr, NULL);
}
} else {
if (error)
flags |= B_ERROR;
else if (rp->r_flags & ROUTOFSPACE) {
mutex_enter(&rp->r_statelock);
rp->r_flags &= ~ROUTOFSPACE;
mutex_exit(&rp->r_statelock);
}
pvn_write_done(pp, flags);
}
return (error);
}
static int
nfs_map(vnode_t *vp, offset_t off, struct as *as, caddr_t *addrp,
size_t len, uchar_t prot, uchar_t maxprot, uint_t flags, cred_t *cr,
caller_context_t *ct)
{
struct segvn_crargs vn_a;
int error;
rnode_t *rp;
struct vattr va;
if (nfs_zone() != VTOMI(vp)->mi_zone)
return (EIO);
if (vp->v_flag & VNOMAP)
return (ENOSYS);
if (off > MAXOFF32_T)
return (EFBIG);
if (off < 0 || off + len < 0)
return (ENXIO);
if (vp->v_type != VREG)
return (ENODEV);
va.va_mask = AT_ALL;
if (vn_has_cached_data(vp) &&
!(VTOMI(vp)->mi_flags & MI_NOCTO) && !vn_is_readonly(vp))
error = nfs_getattr_otw(vp, &va, cr);
else
error = nfsgetattr(vp, &va, cr);
if (error)
return (error);
rp = VTOR(vp);
if (nfs_rw_enter_sig(&rp->r_rwlock, RW_WRITER, INTR(vp)))
return (EINTR);
atomic_inc_uint(&rp->r_inmap);
nfs_rw_exit(&rp->r_rwlock);
if (nfs_rw_enter_sig(&rp->r_lkserlock, RW_READER, INTR(vp))) {
atomic_dec_uint(&rp->r_inmap);
return (EINTR);
}
if (vp->v_flag & VNOCACHE) {
error = EAGAIN;
goto done;
}
if ((flk_has_remote_locks(vp) || lm_has_sleep(vp)) &&
MANDLOCK(vp, va.va_mode)) {
error = EAGAIN;
goto done;
}
as_rangelock(as);
error = choose_addr(as, addrp, len, off, ADDR_VACALIGN, flags);
if (error != 0) {
as_rangeunlock(as);
goto done;
}
vn_a.vp = vp;
vn_a.offset = off;
vn_a.type = (flags & MAP_TYPE);
vn_a.prot = (uchar_t)prot;
vn_a.maxprot = (uchar_t)maxprot;
vn_a.flags = (flags & ~MAP_TYPE);
vn_a.cred = cr;
vn_a.amp = NULL;
vn_a.szc = 0;
vn_a.lgrp_mem_policy_flags = 0;
error = as_map(as, *addrp, len, segvn_create, &vn_a);
as_rangeunlock(as);
done:
nfs_rw_exit(&rp->r_lkserlock);
atomic_dec_uint(&rp->r_inmap);
return (error);
}
static int
nfs_addmap(vnode_t *vp, offset_t off, struct as *as, caddr_t addr,
size_t len, uchar_t prot, uchar_t maxprot, uint_t flags, cred_t *cr,
caller_context_t *ct)
{
rnode_t *rp;
if (vp->v_flag & VNOMAP)
return (ENOSYS);
if (nfs_zone() != VTOMI(vp)->mi_zone)
return (EIO);
rp = VTOR(vp);
atomic_add_long((ulong_t *)&rp->r_mapcnt, btopr(len));
return (0);
}
static int
nfs_frlock(vnode_t *vp, int cmd, struct flock64 *bfp, int flag, offset_t offset,
struct flk_callback *flk_cbp, cred_t *cr, caller_context_t *ct)
{
netobj lm_fh;
int rc;
u_offset_t start, end;
rnode_t *rp;
int error = 0, intr = INTR(vp);
if (cmd != F_GETLK && cmd != F_SETLK && cmd != F_SETLKW)
return (EINVAL);
if (nfs_zone() != VTOMI(vp)->mi_zone)
return (EIO);
switch (bfp->l_type) {
case F_RDLCK:
if (cmd != F_GETLK && !(flag & FREAD))
return (EBADF);
break;
case F_WRLCK:
if (cmd != F_GETLK && !(flag & FWRITE))
return (EBADF);
break;
case F_UNLCK:
intr = 0;
break;
default:
return (EINVAL);
}
if (rc = flk_convert_lock_data(vp, bfp, &start, &end, offset))
return (rc);
if (rc = flk_check_lock_data(start, end, MAXOFF32_T))
return (rc);
if (VTOMI(vp)->mi_flags & MI_LLOCK) {
if (offset > MAXOFF32_T)
return (EFBIG);
if (cmd == F_SETLK || cmd == F_SETLKW) {
if (!lm_safelock(vp, bfp, cr))
return (EAGAIN);
}
return (fs_frlock(vp, cmd, bfp, flag, offset, flk_cbp, cr, ct));
}
rp = VTOR(vp);
if (nfs_rw_enter_sig(&rp->r_lkserlock, RW_WRITER, intr))
return (EINTR);
if (cmd == F_SETLK || cmd == F_SETLKW) {
if (!lm_safelock(vp, bfp, cr)) {
rc = EAGAIN;
goto done;
}
}
if (cmd != F_GETLK) {
mutex_enter(&rp->r_statelock);
while (rp->r_count > 0) {
if (intr) {
klwp_t *lwp = ttolwp(curthread);
if (lwp != NULL)
lwp->lwp_nostop++;
if (cv_wait_sig(&rp->r_cv, &rp->r_statelock)
== 0) {
if (lwp != NULL)
lwp->lwp_nostop--;
rc = EINTR;
break;
}
if (lwp != NULL)
lwp->lwp_nostop--;
} else
cv_wait(&rp->r_cv, &rp->r_statelock);
}
mutex_exit(&rp->r_statelock);
if (rc != 0)
goto done;
error = nfs_putpage(vp, (offset_t)0, 0, B_INVAL, cr, ct);
if (error) {
if (error == ENOSPC || error == EDQUOT) {
mutex_enter(&rp->r_statelock);
if (!rp->r_error)
rp->r_error = error;
mutex_exit(&rp->r_statelock);
}
if (bfp->l_type != F_UNLCK) {
rc = ENOLCK;
goto done;
}
}
}
lm_fh.n_len = sizeof (fhandle_t);
lm_fh.n_bytes = (char *)VTOFH(vp);
rc = lm_frlock(vp, cmd, bfp, flag, offset, cr, &lm_fh, flk_cbp);
if (rc == 0)
nfs_lockcompletion(vp, cmd);
done:
nfs_rw_exit(&rp->r_lkserlock);
return (rc);
}
static int
nfs_space(vnode_t *vp, int cmd, struct flock64 *bfp, int flag,
offset_t offset, cred_t *cr, caller_context_t *ct)
{
int error;
ASSERT(vp->v_type == VREG);
if (cmd != F_FREESP)
return (EINVAL);
if (offset > MAXOFF32_T)
return (EFBIG);
if ((bfp->l_start > MAXOFF32_T) || (bfp->l_end > MAXOFF32_T) ||
(bfp->l_len > MAXOFF32_T))
return (EFBIG);
if (nfs_zone() != VTOMI(vp)->mi_zone)
return (EIO);
error = convoff(vp, bfp, 0, offset);
if (!error) {
ASSERT(bfp->l_start >= 0);
if (bfp->l_len == 0) {
struct vattr va;
va.va_mask = AT_SIZE;
error = nfsgetattr(vp, &va, cr);
if (error || va.va_size == bfp->l_start)
return (error);
va.va_mask = AT_SIZE;
va.va_size = bfp->l_start;
error = nfssetattr(vp, &va, 0, cr);
if (error == 0 && bfp->l_start == 0)
vnevent_truncate(vp, ct);
} else
error = EINVAL;
}
return (error);
}
static int
nfs_realvp(vnode_t *vp, vnode_t **vpp, caller_context_t *ct)
{
return (EINVAL);
}
static int
nfs_delmap(vnode_t *vp, offset_t off, struct as *as, caddr_t addr,
size_t len, uint_t prot, uint_t maxprot, uint_t flags, cred_t *cr,
caller_context_t *ct)
{
int caller_found;
int error;
rnode_t *rp;
nfs_delmap_args_t *dmapp;
nfs_delmapcall_t *delmap_call;
if (vp->v_flag & VNOMAP)
return (ENOSYS);
ASSERT(nfs_zone() == VTOMI(vp)->mi_zone);
rp = VTOR(vp);
caller_found = nfs_find_and_delete_delmapcall(rp, &error);
if (caller_found) {
if (error == EAGAIN)
return (0);
else
return (error);
}
delmap_call = nfs_init_delmapcall();
mutex_enter(&rp->r_statelock);
list_insert_tail(&rp->r_indelmap, delmap_call);
mutex_exit(&rp->r_statelock);
dmapp = kmem_alloc(sizeof (nfs_delmap_args_t), KM_SLEEP);
dmapp->vp = vp;
dmapp->off = off;
dmapp->addr = addr;
dmapp->len = len;
dmapp->prot = prot;
dmapp->maxprot = maxprot;
dmapp->flags = flags;
dmapp->cr = cr;
dmapp->caller = delmap_call;
error = as_add_callback(as, nfs_delmap_callback, dmapp,
AS_UNMAP_EVENT, addr, len, KM_SLEEP);
return (error ? error : EAGAIN);
}
static void
nfs_delmap_callback(struct as *as, void *arg, uint_t event)
{
int error;
rnode_t *rp;
mntinfo_t *mi;
nfs_delmap_args_t *dmapp = (nfs_delmap_args_t *)arg;
rp = VTOR(dmapp->vp);
mi = VTOMI(dmapp->vp);
atomic_add_long((ulong_t *)&rp->r_mapcnt, -btopr(dmapp->len));
ASSERT(rp->r_mapcnt >= 0);
if (vn_has_cached_data(dmapp->vp) && !vn_is_readonly(dmapp->vp) &&
dmapp->flags == MAP_SHARED && (dmapp->maxprot & PROT_WRITE)) {
mutex_enter(&rp->r_statelock);
rp->r_flags |= RDIRTY;
mutex_exit(&rp->r_statelock);
if ((mi->mi_flags & MI_NOCTO) ||
nfs_zone() != mi->mi_zone)
error = nfs_putpage(dmapp->vp, dmapp->off, dmapp->len,
B_ASYNC, dmapp->cr, NULL);
else
error = nfs_putpage(dmapp->vp, dmapp->off, dmapp->len,
0, dmapp->cr, NULL);
if (!error) {
mutex_enter(&rp->r_statelock);
error = rp->r_error;
rp->r_error = 0;
mutex_exit(&rp->r_statelock);
}
} else
error = 0;
if ((rp->r_flags & RDIRECTIO) || (mi->mi_flags & MI_DIRECTIO))
(void) nfs_putpage(dmapp->vp, dmapp->off, dmapp->len,
B_INVAL, dmapp->cr, NULL);
dmapp->caller->error = error;
(void) as_delete_callback(as, arg);
kmem_free(dmapp, sizeof (nfs_delmap_args_t));
}
static int
nfs_pathconf(vnode_t *vp, int cmd, ulong_t *valp, cred_t *cr,
caller_context_t *ct)
{
int error = 0;
if (nfs_zone() != VTOMI(vp)->mi_zone)
return (EIO);
switch (cmd) {
case _PC_FILESIZEBITS:
*valp = 32;
return (0);
case _PC_LINK_MAX:
case _PC_NAME_MAX:
case _PC_PATH_MAX:
case _PC_SYMLINK_MAX:
case _PC_CHOWN_RESTRICTED:
case _PC_NO_TRUNC: {
mntinfo_t *mi;
struct pathcnf *pc;
if ((mi = VTOMI(vp)) == NULL || (pc = mi->mi_pathconf) == NULL)
return (EINVAL);
error = _PC_ISSET(cmd, pc->pc_mask);
switch (cmd) {
case _PC_LINK_MAX:
*valp = pc->pc_link_max;
break;
case _PC_NAME_MAX:
*valp = pc->pc_name_max;
break;
case _PC_PATH_MAX:
case _PC_SYMLINK_MAX:
*valp = pc->pc_path_max;
break;
case _PC_CHOWN_RESTRICTED:
*valp = error ? 1 : 0;
error = 0;
break;
case _PC_NO_TRUNC:
*valp = error ? 1 : 0;
error = 0;
break;
}
return (error ? EINVAL : 0);
}
case _PC_XATTR_EXISTS:
*valp = 0;
if (vp->v_vfsp->vfs_flag & VFS_XATTR) {
vnode_t *avp;
rnode_t *rp;
mntinfo_t *mi = VTOMI(vp);
if (!(mi->mi_flags & MI_EXTATTR))
return (0);
rp = VTOR(vp);
if (nfs_rw_enter_sig(&rp->r_rwlock, RW_READER,
INTR(vp)))
return (EINTR);
error = nfslookup_dnlc(vp, XATTR_DIR_NAME, &avp, cr);
if (error || avp == NULL)
error = acl_getxattrdir2(vp, &avp, 0, cr, 0);
nfs_rw_exit(&rp->r_rwlock);
if (error == 0 && avp != NULL) {
error = do_xattr_exists_check(avp, valp, cr);
VN_RELE(avp);
}
}
return (error ? EINVAL : 0);
case _PC_ACL_ENABLED:
*valp = _ACL_ACLENT_ENABLED;
return (0);
default:
return (EINVAL);
}
}
static int
nfs_sync_pageio(vnode_t *vp, page_t *pp, u_offset_t io_off, size_t io_len,
int flags, cred_t *cr)
{
int error;
ASSERT(nfs_zone() == VTOMI(vp)->mi_zone);
error = nfs_rdwrlbn(vp, pp, io_off, io_len, flags, cr);
if (flags & B_READ)
pvn_read_done(pp, (error ? B_ERROR : 0) | flags);
else
pvn_write_done(pp, (error ? B_ERROR : 0) | flags);
return (error);
}
static int
nfs_pageio(vnode_t *vp, page_t *pp, u_offset_t io_off, size_t io_len,
int flags, cred_t *cr, caller_context_t *ct)
{
int error;
rnode_t *rp;
if (pp == NULL)
return (EINVAL);
if (io_off > MAXOFF32_T)
return (EFBIG);
if (nfs_zone() != VTOMI(vp)->mi_zone)
return (EIO);
rp = VTOR(vp);
mutex_enter(&rp->r_statelock);
rp->r_count++;
mutex_exit(&rp->r_statelock);
if (flags & B_ASYNC) {
error = nfs_async_pageio(vp, pp, io_off, io_len, flags, cr,
nfs_sync_pageio);
} else
error = nfs_rdwrlbn(vp, pp, io_off, io_len, flags, cr);
mutex_enter(&rp->r_statelock);
rp->r_count--;
cv_broadcast(&rp->r_cv);
mutex_exit(&rp->r_statelock);
return (error);
}
static int
nfs_setsecattr(vnode_t *vp, vsecattr_t *vsecattr, int flag, cred_t *cr,
caller_context_t *ct)
{
int error;
mntinfo_t *mi;
mi = VTOMI(vp);
if (nfs_zone() != mi->mi_zone)
return (EIO);
if (mi->mi_flags & MI_ACL) {
error = acl_setacl2(vp, vsecattr, flag, cr);
if (mi->mi_flags & MI_ACL)
return (error);
}
return (ENOSYS);
}
static int
nfs_getsecattr(vnode_t *vp, vsecattr_t *vsecattr, int flag, cred_t *cr,
caller_context_t *ct)
{
int error;
mntinfo_t *mi;
mi = VTOMI(vp);
if (nfs_zone() != mi->mi_zone)
return (EIO);
if (mi->mi_flags & MI_ACL) {
error = acl_getacl2(vp, vsecattr, flag, cr);
if (mi->mi_flags & MI_ACL)
return (error);
}
return (fs_fab_acl(vp, vsecattr, flag, cr, ct));
}
static int
nfs_shrlock(vnode_t *vp, int cmd, struct shrlock *shr, int flag, cred_t *cr,
caller_context_t *ct)
{
int error;
struct shrlock nshr;
struct nfs_owner nfs_owner;
netobj lm_fh;
if (nfs_zone() != VTOMI(vp)->mi_zone)
return (EIO);
if (cmd != F_SHARE && cmd != F_UNSHARE && cmd != F_HASREMOTELOCKS)
return (EINVAL);
if (cmd == F_SHARE &&
(((shr->s_access & F_RDACC) && !(flag & FREAD)) ||
((shr->s_access & F_WRACC) && !(flag & FWRITE))))
return (EBADF);
if (VTOMI(vp)->mi_flags & MI_LLOCK)
return (fs_shrlock(vp, cmd, shr, flag, cr, ct));
switch (cmd) {
case F_SHARE:
case F_UNSHARE:
lm_fh.n_len = sizeof (fhandle_t);
lm_fh.n_bytes = (char *)VTOFH(vp);
if (shr->s_own_len > sizeof (nfs_owner.lowner)) {
if (((struct nfs_owner *)shr->s_owner)->magic !=
NFS_OWNER_MAGIC)
return (EINVAL);
if (error = lm_shrlock(vp, cmd, shr, flag, &lm_fh)) {
error = set_errno(error);
}
return (error);
}
bzero(&nfs_owner, sizeof (nfs_owner));
nfs_owner.magic = NFS_OWNER_MAGIC;
(void) strncpy(nfs_owner.hname, uts_nodename(),
sizeof (nfs_owner.hname));
bcopy(shr->s_owner, nfs_owner.lowner, shr->s_own_len);
nshr.s_access = shr->s_access;
nshr.s_deny = shr->s_deny;
nshr.s_sysid = 0;
nshr.s_pid = ttoproc(curthread)->p_pid;
nshr.s_own_len = sizeof (nfs_owner);
nshr.s_owner = (caddr_t)&nfs_owner;
if (error = lm_shrlock(vp, cmd, &nshr, flag, &lm_fh)) {
error = set_errno(error);
}
break;
case F_HASREMOTELOCKS:
shr->s_access = 0;
error = 0;
break;
default:
error = EINVAL;
break;
}
return (error);
}