#include <sys/param.h>
#include <sys/types.h>
#include <sys/systm.h>
#include <sys/thread.h>
#include <sys/t_lock.h>
#include <sys/time.h>
#include <sys/vnode.h>
#include <sys/vfs.h>
#include <sys/errno.h>
#include <sys/buf.h>
#include <sys/stat.h>
#include <sys/cred.h>
#include <sys/kmem.h>
#include <sys/debug.h>
#include <sys/dnlc.h>
#include <sys/vmsystm.h>
#include <sys/flock.h>
#include <sys/share.h>
#include <sys/cmn_err.h>
#include <sys/tiuser.h>
#include <sys/sysmacros.h>
#include <sys/callb.h>
#include <sys/acl.h>
#include <sys/kstat.h>
#include <sys/signal.h>
#include <sys/list.h>
#include <sys/zone.h>
#include <rpc/types.h>
#include <rpc/xdr.h>
#include <rpc/auth.h>
#include <rpc/clnt.h>
#include <nfs/nfs.h>
#include <nfs/nfs_clnt.h>
#include <nfs/nfs_cmd.h>
#include <nfs/rnode.h>
#include <nfs/nfs_acl.h>
#include <nfs/lm.h>
#include <vm/hat.h>
#include <vm/as.h>
#include <vm/page.h>
#include <vm/pvn.h>
#include <vm/seg.h>
#include <vm/seg_map.h>
#include <vm/seg_vn.h>
static void nfs3_attr_cache(vnode_t *, vattr_t *, vattr_t *, hrtime_t,
cred_t *);
static int nfs_getattr_cache(vnode_t *, struct vattr *);
static int nfs_remove_locking_id(vnode_t *, int, char *, char *, int *);
struct mi_globals {
kmutex_t mig_lock;
list_t mig_list;
boolean_t mig_destructor_called;
};
static zone_key_t mi_list_key;
extern int share_debug;
int
nfs_waitfor_purge_complete(vnode_t *vp)
{
rnode_t *rp;
k_sigset_t smask;
rp = VTOR(vp);
if (rp->r_serial != NULL && rp->r_serial != curthread) {
mutex_enter(&rp->r_statelock);
sigintr(&smask, VTOMI(vp)->mi_flags & MI_INT);
while (rp->r_serial != NULL) {
if (!cv_wait_sig(&rp->r_cv, &rp->r_statelock)) {
sigunintr(&smask);
mutex_exit(&rp->r_statelock);
return (EINTR);
}
}
sigunintr(&smask);
mutex_exit(&rp->r_statelock);
}
return (0);
}
int
nfs_validate_caches(vnode_t *vp, cred_t *cr)
{
int error;
struct vattr va;
if (ATTRCACHE_VALID(vp)) {
error = nfs_waitfor_purge_complete(vp);
if (error)
return (error);
return (0);
}
va.va_mask = AT_ALL;
return (nfs_getattr_otw(vp, &va, cr));
}
int
nfs3_validate_caches(vnode_t *vp, cred_t *cr)
{
int error;
struct vattr va;
if (ATTRCACHE_VALID(vp)) {
error = nfs_waitfor_purge_complete(vp);
if (error)
return (error);
return (0);
}
va.va_mask = AT_ALL;
return (nfs3_getattr_otw(vp, &va, cr));
}
void
nfs_purge_caches(vnode_t *vp, int purge_dnlc, cred_t *cr)
{
rnode_t *rp;
char *contents;
int size;
int error;
rp = VTOR(vp);
mutex_enter(&rp->r_statelock);
if (vp->v_count > 1 &&
(vp->v_type == VDIR || purge_dnlc == NFS_PURGE_DNLC) &&
!(rp->r_flags & RINDNLCPURGE)) {
if (vp->v_type == VDIR)
rp->r_flags |= RINDNLCPURGE;
mutex_exit(&rp->r_statelock);
dnlc_purge_vp(vp);
mutex_enter(&rp->r_statelock);
if (rp->r_flags & RINDNLCPURGE)
rp->r_flags &= ~RINDNLCPURGE;
}
contents = rp->r_symlink.contents;
size = rp->r_symlink.size;
rp->r_symlink.contents = NULL;
mutex_exit(&rp->r_statelock);
if (contents != NULL) {
kmem_free((void *)contents, size);
}
if (vn_has_cached_data(vp)) {
error = VOP_PUTPAGE(vp, (u_offset_t)0, 0, B_INVAL, cr, NULL);
if (error && (error == ENOSPC || error == EDQUOT)) {
mutex_enter(&rp->r_statelock);
if (!rp->r_error)
rp->r_error = error;
mutex_exit(&rp->r_statelock);
}
}
if (HAVE_RDDIR_CACHE(rp))
nfs_purge_rddir_cache(vp);
}
void
nfs_purge_rddir_cache(vnode_t *vp)
{
rnode_t *rp;
rddir_cache *rdc;
rddir_cache *nrdc;
rp = VTOR(vp);
mutex_enter(&rp->r_statelock);
rp->r_direof = NULL;
rp->r_flags &= ~RLOOKUP;
rp->r_flags |= RREADDIRPLUS;
rdc = avl_first(&rp->r_dir);
while (rdc != NULL) {
nrdc = AVL_NEXT(&rp->r_dir, rdc);
avl_remove(&rp->r_dir, rdc);
rddir_cache_rele(rdc);
rdc = nrdc;
}
mutex_exit(&rp->r_statelock);
}
void
nfs3_cache_post_op_attr(vnode_t *vp, post_op_attr *poap, hrtime_t t, cred_t *cr)
{
vattr_t attr;
if (!poap->attributes) {
PURGE_ATTRCACHE(vp);
return;
}
(void) nfs3_cache_fattr3(vp, &poap->attr, &attr, t, cr);
}
void
nfs3_cache_post_op_vattr(vnode_t *vp, post_op_vattr *poap, hrtime_t t,
cred_t *cr)
{
if (!poap->attributes) {
PURGE_ATTRCACHE(vp);
return;
}
nfs_attr_cache(vp, poap->fres.vap, t, cr);
}
void
nfs3_cache_wcc_data(vnode_t *vp, wcc_data *wccp, hrtime_t t, cred_t *cr)
{
vattr_t bva;
vattr_t ava;
if (wccp->after.attributes) {
if (fattr3_to_vattr(vp, &wccp->after.attr, &ava)) {
PURGE_ATTRCACHE(vp);
return;
}
if (wccp->before.attributes) {
bva.va_ctime.tv_sec = wccp->before.attr.ctime.seconds;
bva.va_ctime.tv_nsec = wccp->before.attr.ctime.nseconds;
bva.va_mtime.tv_sec = wccp->before.attr.mtime.seconds;
bva.va_mtime.tv_nsec = wccp->before.attr.mtime.nseconds;
bva.va_size = wccp->before.attr.size;
nfs3_attr_cache(vp, &bva, &ava, t, cr);
} else
nfs_attr_cache(vp, &ava, t, cr);
} else {
PURGE_ATTRCACHE(vp);
}
}
void
nfs_attrcache(vnode_t *vp, struct nfsfattr *na, hrtime_t t)
{
rnode_t *rp;
struct vattr va;
if (!nattr_to_vattr(vp, na, &va)) {
rp = VTOR(vp);
mutex_enter(&rp->r_statelock);
if (rp->r_mtime <= t)
nfs_attrcache_va(vp, &va);
mutex_exit(&rp->r_statelock);
} else {
PURGE_ATTRCACHE(vp);
}
}
void
nfs3_attrcache(vnode_t *vp, fattr3 *na, hrtime_t t)
{
rnode_t *rp;
struct vattr va;
if (!fattr3_to_vattr(vp, na, &va)) {
rp = VTOR(vp);
mutex_enter(&rp->r_statelock);
if (rp->r_mtime <= t)
nfs_attrcache_va(vp, &va);
mutex_exit(&rp->r_statelock);
} else {
PURGE_ATTRCACHE(vp);
}
}
int
nfs_cache_fattr(vnode_t *vp, struct nfsfattr *na, vattr_t *vap, hrtime_t t,
cred_t *cr)
{
int error;
error = nattr_to_vattr(vp, na, vap);
if (error)
return (error);
nfs_attr_cache(vp, vap, t, cr);
return (0);
}
int
nfs3_cache_fattr3(vnode_t *vp, fattr3 *na, vattr_t *vap, hrtime_t t, cred_t *cr)
{
int error;
error = fattr3_to_vattr(vp, na, vap);
if (error)
return (error);
nfs_attr_cache(vp, vap, t, cr);
return (0);
}
void
nfs_attr_cache(vnode_t *vp, vattr_t *vap, hrtime_t t, cred_t *cr)
{
rnode_t *rp;
int mtime_changed = 0;
int ctime_changed = 0;
vsecattr_t *vsp;
int was_serial;
len_t preattr_rsize;
boolean_t writeattr_set = B_FALSE;
boolean_t cachepurge_set = B_FALSE;
rp = VTOR(vp);
mutex_enter(&rp->r_statelock);
if (rp->r_serial != curthread) {
klwp_t *lwp = ttolwp(curthread);
was_serial = 0;
if (lwp != NULL)
lwp->lwp_nostop++;
while (rp->r_serial != NULL) {
if (!cv_wait_sig(&rp->r_cv, &rp->r_statelock)) {
mutex_exit(&rp->r_statelock);
if (lwp != NULL)
lwp->lwp_nostop--;
return;
}
}
if (lwp != NULL)
lwp->lwp_nostop--;
} else
was_serial = 1;
if (rp->r_mtime > t) {
if (!CACHE_VALID(rp, vap->va_mtime, vap->va_size))
PURGE_ATTRCACHE_LOCKED(rp);
mutex_exit(&rp->r_statelock);
return;
}
if (!(rp->r_flags & RWRITEATTR)) {
if (!CACHE_VALID(rp, vap->va_mtime, vap->va_size))
mtime_changed = 1;
if (rp->r_attr.va_ctime.tv_sec != vap->va_ctime.tv_sec ||
rp->r_attr.va_ctime.tv_nsec != vap->va_ctime.tv_nsec)
ctime_changed = 1;
} else {
writeattr_set = B_TRUE;
}
preattr_rsize = rp->r_size;
nfs_attrcache_va(vp, vap);
if ((vp->v_type == VREG) && (rp->r_size != preattr_rsize)) {
if (writeattr_set)
mtime_changed = 1;
if (mtime_changed && !(rp->r_flags & RINCACHEPURGE)) {
rp->r_flags |= RINCACHEPURGE;
cachepurge_set = B_TRUE;
}
}
if (!mtime_changed && !ctime_changed) {
mutex_exit(&rp->r_statelock);
return;
}
rp->r_serial = curthread;
mutex_exit(&rp->r_statelock);
if (mtime_changed)
nfs_purge_caches(vp, NFS_NOPURGE_DNLC, cr);
if ((rp->r_flags & RINCACHEPURGE) && cachepurge_set) {
mutex_enter(&rp->r_statelock);
rp->r_flags &= ~RINCACHEPURGE;
cv_broadcast(&rp->r_cv);
mutex_exit(&rp->r_statelock);
cachepurge_set = B_FALSE;
}
if (ctime_changed) {
(void) nfs_access_purge_rp(rp);
if (rp->r_secattr != NULL) {
mutex_enter(&rp->r_statelock);
vsp = rp->r_secattr;
rp->r_secattr = NULL;
mutex_exit(&rp->r_statelock);
if (vsp != NULL)
nfs_acl_free(vsp);
}
}
if (!was_serial) {
mutex_enter(&rp->r_statelock);
rp->r_serial = NULL;
cv_broadcast(&rp->r_cv);
mutex_exit(&rp->r_statelock);
}
}
static void
nfs3_attr_cache(vnode_t *vp, vattr_t *bvap, vattr_t *avap, hrtime_t t,
cred_t *cr)
{
rnode_t *rp;
int mtime_changed = 0;
int ctime_changed = 0;
vsecattr_t *vsp;
int was_serial;
len_t preattr_rsize;
boolean_t writeattr_set = B_FALSE;
boolean_t cachepurge_set = B_FALSE;
rp = VTOR(vp);
mutex_enter(&rp->r_statelock);
if (rp->r_serial != curthread) {
klwp_t *lwp = ttolwp(curthread);
was_serial = 0;
if (lwp != NULL)
lwp->lwp_nostop++;
while (rp->r_serial != NULL) {
if (!cv_wait_sig(&rp->r_cv, &rp->r_statelock)) {
mutex_exit(&rp->r_statelock);
if (lwp != NULL)
lwp->lwp_nostop--;
return;
}
}
if (lwp != NULL)
lwp->lwp_nostop--;
} else
was_serial = 1;
if (rp->r_mtime > t) {
if (!CACHE_VALID(rp, avap->va_mtime, avap->va_size))
PURGE_ATTRCACHE_LOCKED(rp);
mutex_exit(&rp->r_statelock);
return;
}
if (!(rp->r_flags & RWRITEATTR)) {
if (!CACHE_VALID(rp, bvap->va_mtime, bvap->va_size))
mtime_changed = 1;
if (rp->r_attr.va_ctime.tv_sec != bvap->va_ctime.tv_sec ||
rp->r_attr.va_ctime.tv_nsec != bvap->va_ctime.tv_nsec)
ctime_changed = 1;
} else {
writeattr_set = B_TRUE;
}
preattr_rsize = rp->r_size;
nfs_attrcache_va(vp, avap);
if ((vp->v_type == VREG) && (rp->r_size != preattr_rsize)) {
if (writeattr_set)
mtime_changed = 1;
if (mtime_changed && !(rp->r_flags & RINCACHEPURGE)) {
rp->r_flags |= RINCACHEPURGE;
cachepurge_set = B_TRUE;
}
}
if (!mtime_changed && !ctime_changed) {
mutex_exit(&rp->r_statelock);
return;
}
rp->r_serial = curthread;
mutex_exit(&rp->r_statelock);
if (mtime_changed)
nfs_purge_caches(vp, NFS_NOPURGE_DNLC, cr);
if ((rp->r_flags & RINCACHEPURGE) && cachepurge_set) {
mutex_enter(&rp->r_statelock);
rp->r_flags &= ~RINCACHEPURGE;
cv_broadcast(&rp->r_cv);
mutex_exit(&rp->r_statelock);
cachepurge_set = B_FALSE;
}
if (ctime_changed) {
(void) nfs_access_purge_rp(rp);
if (rp->r_secattr != NULL) {
mutex_enter(&rp->r_statelock);
vsp = rp->r_secattr;
rp->r_secattr = NULL;
mutex_exit(&rp->r_statelock);
if (vsp != NULL)
nfs_acl_free(vsp);
}
}
if (!was_serial) {
mutex_enter(&rp->r_statelock);
rp->r_serial = NULL;
cv_broadcast(&rp->r_cv);
mutex_exit(&rp->r_statelock);
}
}
void
nfs_attrcache_va(vnode_t *vp, struct vattr *va)
{
rnode_t *rp;
mntinfo_t *mi;
hrtime_t delta;
hrtime_t now;
rp = VTOR(vp);
ASSERT(MUTEX_HELD(&rp->r_statelock));
now = gethrtime();
mi = VTOMI(vp);
if (va->va_mtime.tv_sec != rp->r_attr.va_mtime.tv_sec ||
va->va_mtime.tv_nsec != rp->r_attr.va_mtime.tv_nsec ||
va->va_size != rp->r_attr.va_size)
rp->r_mtime = now;
if ((mi->mi_flags & MI_NOAC) || (vp->v_flag & VNOCACHE))
delta = 0;
else {
delta = now - rp->r_mtime;
if (vp->v_type == VDIR) {
if (delta < mi->mi_acdirmin)
delta = mi->mi_acdirmin;
else if (delta > mi->mi_acdirmax)
delta = mi->mi_acdirmax;
} else {
if (delta < mi->mi_acregmin)
delta = mi->mi_acregmin;
else if (delta > mi->mi_acregmax)
delta = mi->mi_acregmax;
}
}
rp->r_attrtime = now + delta;
rp->r_attr = *va;
if (rp->r_size != va->va_size &&
(!vn_has_cached_data(vp) ||
(!(rp->r_flags & RDIRTY) && rp->r_count == 0)))
rp->r_size = va->va_size;
nfs_setswaplike(vp, va);
rp->r_flags &= ~RWRITEATTR;
}
static int
nfs_getattr_cache(vnode_t *vp, struct vattr *vap)
{
rnode_t *rp;
uint_t mask = vap->va_mask;
rp = VTOR(vp);
mutex_enter(&rp->r_statelock);
if (ATTRCACHE_VALID(vp)) {
*vap = rp->r_attr;
vap->va_mask &= mask;
mutex_exit(&rp->r_statelock);
return (0);
}
mutex_exit(&rp->r_statelock);
return (1);
}
int
nfs_getattr_otw(vnode_t *vp, struct vattr *vap, cred_t *cr)
{
int error;
struct nfsattrstat ns;
int douprintf;
mntinfo_t *mi;
failinfo_t fi;
hrtime_t t;
mi = VTOMI(vp);
fi.vp = vp;
fi.fhp = NULL;
fi.copyproc = nfscopyfh;
fi.lookupproc = nfslookup;
fi.xattrdirproc = acl_getxattrdir2;
if (mi->mi_flags & MI_ACL) {
error = acl_getattr2_otw(vp, vap, cr);
if (mi->mi_flags & MI_ACL)
return (error);
}
douprintf = 1;
t = gethrtime();
error = rfs2call(mi, RFS_GETATTR,
xdr_fhandle, (caddr_t)VTOFH(vp),
xdr_attrstat, (caddr_t)&ns, cr,
&douprintf, &ns.ns_status, 0, &fi);
if (!error) {
error = geterrno(ns.ns_status);
if (!error)
error = nfs_cache_fattr(vp, &ns.ns_attr, vap, t, cr);
else {
PURGE_STALE_FH(error, vp, cr);
}
}
return (error);
}
int
nfsgetattr(vnode_t *vp, struct vattr *vap, cred_t *cr)
{
int error;
rnode_t *rp;
error = nfs_getattr_cache(vp, vap);
if (error)
error = nfs_getattr_otw(vp, vap, cr);
rp = VTOR(vp);
mutex_enter(&rp->r_statelock);
vap->va_size = rp->r_size;
mutex_exit(&rp->r_statelock);
return (error);
}
int
nfs3_getattr_otw(vnode_t *vp, struct vattr *vap, cred_t *cr)
{
int error;
GETATTR3args args;
GETATTR3vres res;
int douprintf;
failinfo_t fi;
hrtime_t t;
args.object = *VTOFH3(vp);
fi.vp = vp;
fi.fhp = (caddr_t)&args.object;
fi.copyproc = nfs3copyfh;
fi.lookupproc = nfs3lookup;
fi.xattrdirproc = acl_getxattrdir3;
res.fres.vp = vp;
res.fres.vap = vap;
douprintf = 1;
t = gethrtime();
error = rfs3call(VTOMI(vp), NFSPROC3_GETATTR,
xdr_nfs_fh3, (caddr_t)&args,
xdr_GETATTR3vres, (caddr_t)&res, cr,
&douprintf, &res.status, 0, &fi);
if (error)
return (error);
error = geterrno3(res.status);
if (error) {
PURGE_STALE_FH(error, vp, cr);
return (error);
}
if (res.fres.status)
return (res.fres.status);
nfs_attr_cache(vp, vap, t, cr);
return (0);
}
int
nfs3getattr(vnode_t *vp, struct vattr *vap, cred_t *cr)
{
int error;
rnode_t *rp;
error = nfs_getattr_cache(vp, vap);
if (error)
error = nfs3_getattr_otw(vp, vap, cr);
rp = VTOR(vp);
mutex_enter(&rp->r_statelock);
vap->va_size = rp->r_size;
mutex_exit(&rp->r_statelock);
return (error);
}
vtype_t nf_to_vt[] = {
VNON, VREG, VDIR, VBLK, VCHR, VLNK, VSOCK
};
int
nattr_to_vattr(vnode_t *vp, struct nfsfattr *na, struct vattr *vap)
{
#ifndef _LP64
if (!NFS2_FATTR_TIME_OK(na))
return (EOVERFLOW);
#endif
vap->va_mask = AT_ALL;
if (na->na_type < NFNON || na->na_type > NFSOC)
vap->va_type = VBAD;
else
vap->va_type = nf_to_vt[na->na_type];
vap->va_mode = na->na_mode;
vap->va_uid = (na->na_uid == NFS_UID_NOBODY) ? UID_NOBODY : na->na_uid;
vap->va_gid = (na->na_gid == NFS_GID_NOBODY) ? GID_NOBODY : na->na_gid;
vap->va_fsid = vp->v_vfsp->vfs_dev;
vap->va_nodeid = na->na_nodeid;
vap->va_nlink = na->na_nlink;
vap->va_size = na->na_size;
NFS_TIME_T_CONVERT(vap->va_atime.tv_sec, na->na_atime.tv_sec);
vap->va_atime.tv_nsec = (uint32_t)(na->na_atime.tv_usec * 1000);
NFS_TIME_T_CONVERT(vap->va_mtime.tv_sec, na->na_mtime.tv_sec);
vap->va_mtime.tv_nsec = (uint32_t)(na->na_mtime.tv_usec * 1000);
NFS_TIME_T_CONVERT(vap->va_ctime.tv_sec, na->na_ctime.tv_sec);
vap->va_ctime.tv_nsec = (uint32_t)(na->na_ctime.tv_usec * 1000);
if ((na->na_rdev & 0xffff0000) == 0)
vap->va_rdev = nfsv2_expdev(na->na_rdev);
else
vap->va_rdev = expldev(na->na_rdev);
vap->va_nblocks = na->na_blocks;
switch (na->na_type) {
case NFBLK:
vap->va_blksize = DEV_BSIZE;
break;
case NFCHR:
vap->va_blksize = MAXBSIZE;
break;
case NFSOC:
default:
vap->va_blksize = na->na_blocksize;
break;
}
if (NA_ISFIFO(na)) {
vap->va_type = VFIFO;
vap->va_mode = (vap->va_mode & ~S_IFMT) | S_IFIFO;
vap->va_rdev = 0;
vap->va_blksize = na->na_blocksize;
}
vap->va_seq = 0;
return (0);
}
vtype_t nf3_to_vt[] = {
VBAD, VREG, VDIR, VBLK, VCHR, VLNK, VSOCK, VFIFO
};
int
fattr3_to_vattr(vnode_t *vp, fattr3 *na, struct vattr *vap)
{
#ifndef _LP64
if (!NFS3_FATTR_TIME_OK(na))
return (EOVERFLOW);
#endif
if (!NFS3_SIZE_OK(na->size))
return (EFBIG);
vap->va_mask = AT_ALL;
if (na->type < NF3REG || na->type > NF3FIFO)
vap->va_type = VBAD;
else
vap->va_type = nf3_to_vt[na->type];
vap->va_mode = na->mode;
vap->va_uid = (na->uid == NFS_UID_NOBODY) ? UID_NOBODY : (uid_t)na->uid;
vap->va_gid = (na->gid == NFS_GID_NOBODY) ? GID_NOBODY : (gid_t)na->gid;
vap->va_fsid = vp->v_vfsp->vfs_dev;
vap->va_nodeid = na->fileid;
vap->va_nlink = na->nlink;
vap->va_size = na->size;
NFS_TIME_T_CONVERT(vap->va_atime.tv_sec, na->atime.seconds);
vap->va_atime.tv_nsec = (uint32_t)na->atime.nseconds;
NFS_TIME_T_CONVERT(vap->va_mtime.tv_sec, na->mtime.seconds);
vap->va_mtime.tv_nsec = (uint32_t)na->mtime.nseconds;
NFS_TIME_T_CONVERT(vap->va_ctime.tv_sec, na->ctime.seconds);
vap->va_ctime.tv_nsec = (uint32_t)na->ctime.nseconds;
switch (na->type) {
case NF3BLK:
vap->va_rdev = makedevice(na->rdev.specdata1,
na->rdev.specdata2);
vap->va_blksize = DEV_BSIZE;
vap->va_nblocks = 0;
break;
case NF3CHR:
vap->va_rdev = makedevice(na->rdev.specdata1,
na->rdev.specdata2);
vap->va_blksize = MAXBSIZE;
vap->va_nblocks = 0;
break;
case NF3REG:
case NF3DIR:
case NF3LNK:
vap->va_rdev = 0;
vap->va_blksize = MAXBSIZE;
vap->va_nblocks = (u_longlong_t)
((na->used + (size3)DEV_BSIZE - (size3)1) /
(size3)DEV_BSIZE);
break;
case NF3SOCK:
case NF3FIFO:
default:
vap->va_rdev = 0;
vap->va_blksize = MAXBSIZE;
vap->va_nblocks = 0;
break;
}
vap->va_seq = 0;
return (0);
}
int nfs_async_timeout = -1;
static void nfs_async_start(struct vfs *);
static void nfs_async_pgops_start(struct vfs *);
static void nfs_async_common_start(struct vfs *, int);
static void
free_async_args(struct nfs_async_reqs *args)
{
rnode_t *rp;
if (args->a_io != NFS_INACTIVE) {
rp = VTOR(args->a_vp);
mutex_enter(&rp->r_statelock);
rp->r_count--;
if (args->a_io == NFS_PUTAPAGE ||
args->a_io == NFS_PAGEIO)
rp->r_awcount--;
cv_broadcast(&rp->r_cv);
mutex_exit(&rp->r_statelock);
VN_RELE(args->a_vp);
}
crfree(args->a_cred);
kmem_free(args, sizeof (*args));
}
void
nfs_async_manager(vfs_t *vfsp)
{
callb_cpr_t cprinfo;
mntinfo_t *mi;
uint_t max_threads;
mi = VFTOMI(vfsp);
CALLB_CPR_INIT(&cprinfo, &mi->mi_async_lock, callb_generic_cpr,
"nfs_async_manager");
mutex_enter(&mi->mi_async_lock);
max_threads = MAX(mi->mi_max_threads, 1);
for (;;) {
while (mi->mi_async_req_count > 0) {
if (mi->mi_threads[NFS_ASYNC_QUEUE] <
MAX(mi->mi_max_threads, max_threads)) {
mi->mi_threads[NFS_ASYNC_QUEUE]++;
mutex_exit(&mi->mi_async_lock);
VFS_HOLD(vfsp);
(void) zthread_create(NULL, 0, nfs_async_start,
vfsp, 0, minclsyspri);
mutex_enter(&mi->mi_async_lock);
} else if (mi->mi_threads[NFS_ASYNC_PGOPS_QUEUE] <
NUM_ASYNC_PGOPS_THREADS) {
mi->mi_threads[NFS_ASYNC_PGOPS_QUEUE]++;
mutex_exit(&mi->mi_async_lock);
VFS_HOLD(vfsp);
(void) zthread_create(NULL, 0,
nfs_async_pgops_start, vfsp, 0,
minclsyspri);
mutex_enter(&mi->mi_async_lock);
}
NFS_WAKE_ASYNC_WORKER(mi->mi_async_work_cv);
ASSERT(mi->mi_async_req_count != 0);
mi->mi_async_req_count--;
}
mutex_enter(&mi->mi_lock);
if (mi->mi_flags & MI_ASYNC_MGR_STOP) {
mutex_exit(&mi->mi_lock);
break;
}
mutex_exit(&mi->mi_lock);
CALLB_CPR_SAFE_BEGIN(&cprinfo);
cv_wait(&mi->mi_async_reqs_cv, &mi->mi_async_lock);
CALLB_CPR_SAFE_END(&cprinfo, &mi->mi_async_lock);
}
mi->mi_manager_thread = NULL;
cv_broadcast(&mi->mi_async_cv);
CALLB_CPR_EXIT(&cprinfo);
VFS_RELE(vfsp);
zthread_exit();
}
void
nfs_async_manager_stop(vfs_t *vfsp)
{
mntinfo_t *mi = VFTOMI(vfsp);
mutex_enter(&mi->mi_async_lock);
mutex_enter(&mi->mi_lock);
mi->mi_flags |= MI_ASYNC_MGR_STOP;
mutex_exit(&mi->mi_lock);
cv_broadcast(&mi->mi_async_reqs_cv);
while (mi->mi_manager_thread != NULL)
cv_wait(&mi->mi_async_cv, &mi->mi_async_lock);
mutex_exit(&mi->mi_async_lock);
}
int
nfs_async_readahead(vnode_t *vp, u_offset_t blkoff, caddr_t addr,
struct seg *seg, cred_t *cr, void (*readahead)(vnode_t *,
u_offset_t, caddr_t, struct seg *, cred_t *))
{
rnode_t *rp;
mntinfo_t *mi;
struct nfs_async_reqs *args;
rp = VTOR(vp);
ASSERT(rp->r_freef == NULL);
mi = VTOMI(vp);
if (addr >= seg->s_base + seg->s_size)
return (-1);
if ((args = kmem_alloc(sizeof (*args), KM_NOSLEEP)) == NULL)
return (-1);
if (!nfs_rw_tryenter(&rp->r_lkserlock, RW_READER)) {
kmem_free(args, sizeof (*args));
return (-1);
}
mutex_enter(&rp->r_statelock);
rp->r_count++;
mutex_exit(&rp->r_statelock);
nfs_rw_exit(&rp->r_lkserlock);
args->a_next = NULL;
#ifdef DEBUG
args->a_queuer = curthread;
#endif
VN_HOLD(vp);
args->a_vp = vp;
ASSERT(cr != NULL);
crhold(cr);
args->a_cred = cr;
args->a_io = NFS_READ_AHEAD;
args->a_nfs_readahead = readahead;
args->a_nfs_blkoff = blkoff;
args->a_nfs_seg = seg;
args->a_nfs_addr = addr;
mutex_enter(&mi->mi_async_lock);
if (mi->mi_max_threads == 0) {
mutex_exit(&mi->mi_async_lock);
goto noasync;
}
if (mi->mi_async_reqs[NFS_READ_AHEAD] == NULL) {
mi->mi_async_reqs[NFS_READ_AHEAD] = args;
mi->mi_async_tail[NFS_READ_AHEAD] = args;
} else {
mi->mi_async_tail[NFS_READ_AHEAD]->a_next = args;
mi->mi_async_tail[NFS_READ_AHEAD] = args;
}
if (mi->mi_io_kstats) {
mutex_enter(&mi->mi_lock);
kstat_waitq_enter(KSTAT_IO_PTR(mi->mi_io_kstats));
mutex_exit(&mi->mi_lock);
}
mi->mi_async_req_count++;
ASSERT(mi->mi_async_req_count != 0);
cv_signal(&mi->mi_async_reqs_cv);
mutex_exit(&mi->mi_async_lock);
return (0);
noasync:
mutex_enter(&rp->r_statelock);
rp->r_count--;
cv_broadcast(&rp->r_cv);
mutex_exit(&rp->r_statelock);
VN_RELE(vp);
crfree(cr);
kmem_free(args, sizeof (*args));
return (-1);
}
int
nfs_async_putapage(vnode_t *vp, page_t *pp, u_offset_t off, size_t len,
int flags, cred_t *cr, int (*putapage)(vnode_t *, page_t *,
u_offset_t, size_t, int, cred_t *))
{
rnode_t *rp;
mntinfo_t *mi;
struct nfs_async_reqs *args;
ASSERT(flags & B_ASYNC);
ASSERT(vp->v_vfsp != NULL);
rp = VTOR(vp);
ASSERT(rp->r_count > 0);
mi = VTOMI(vp);
if ((args = kmem_alloc(sizeof (*args), KM_NOSLEEP)) == NULL)
goto noasync;
args->a_next = NULL;
#ifdef DEBUG
args->a_queuer = curthread;
#endif
VN_HOLD(vp);
args->a_vp = vp;
ASSERT(cr != NULL);
crhold(cr);
args->a_cred = cr;
args->a_io = NFS_PUTAPAGE;
args->a_nfs_putapage = putapage;
args->a_nfs_pp = pp;
args->a_nfs_off = off;
args->a_nfs_len = (uint_t)len;
args->a_nfs_flags = flags;
mutex_enter(&mi->mi_async_lock);
if (mi->mi_max_threads == 0) {
mutex_exit(&mi->mi_async_lock);
goto noasync;
}
if (mi->mi_async_reqs[NFS_PUTAPAGE] == NULL) {
mi->mi_async_reqs[NFS_PUTAPAGE] = args;
mi->mi_async_tail[NFS_PUTAPAGE] = args;
} else {
mi->mi_async_tail[NFS_PUTAPAGE]->a_next = args;
mi->mi_async_tail[NFS_PUTAPAGE] = args;
}
mutex_enter(&rp->r_statelock);
rp->r_count++;
rp->r_awcount++;
mutex_exit(&rp->r_statelock);
if (mi->mi_io_kstats) {
mutex_enter(&mi->mi_lock);
kstat_waitq_enter(KSTAT_IO_PTR(mi->mi_io_kstats));
mutex_exit(&mi->mi_lock);
}
mi->mi_async_req_count++;
ASSERT(mi->mi_async_req_count != 0);
cv_signal(&mi->mi_async_reqs_cv);
mutex_exit(&mi->mi_async_lock);
return (0);
noasync:
if (args != NULL) {
VN_RELE(vp);
crfree(cr);
kmem_free(args, sizeof (*args));
}
if (curproc == proc_pageout || curproc == proc_fsflush) {
if (flags & B_FORCE)
flags &= ~(B_INVAL | B_FORCE);
pvn_write_done(pp, flags | B_ERROR);
return (0);
}
if (nfs_zone() != mi->mi_zone) {
pvn_write_done(pp, flags | B_ERROR);
return (EPERM);
}
return ((*putapage)(vp, pp, off, len, flags, cr));
}
int
nfs_async_pageio(vnode_t *vp, page_t *pp, u_offset_t io_off, size_t io_len,
int flags, cred_t *cr, int (*pageio)(vnode_t *, page_t *, u_offset_t,
size_t, int, cred_t *))
{
rnode_t *rp;
mntinfo_t *mi;
struct nfs_async_reqs *args;
ASSERT(flags & B_ASYNC);
ASSERT(vp->v_vfsp != NULL);
rp = VTOR(vp);
ASSERT(rp->r_count > 0);
mi = VTOMI(vp);
if ((args = kmem_alloc(sizeof (*args), KM_NOSLEEP)) == NULL)
goto noasync;
args->a_next = NULL;
#ifdef DEBUG
args->a_queuer = curthread;
#endif
VN_HOLD(vp);
args->a_vp = vp;
ASSERT(cr != NULL);
crhold(cr);
args->a_cred = cr;
args->a_io = NFS_PAGEIO;
args->a_nfs_pageio = pageio;
args->a_nfs_pp = pp;
args->a_nfs_off = io_off;
args->a_nfs_len = (uint_t)io_len;
args->a_nfs_flags = flags;
mutex_enter(&mi->mi_async_lock);
if (mi->mi_max_threads == 0) {
mutex_exit(&mi->mi_async_lock);
goto noasync;
}
if (mi->mi_async_reqs[NFS_PAGEIO] == NULL) {
mi->mi_async_reqs[NFS_PAGEIO] = args;
mi->mi_async_tail[NFS_PAGEIO] = args;
} else {
mi->mi_async_tail[NFS_PAGEIO]->a_next = args;
mi->mi_async_tail[NFS_PAGEIO] = args;
}
mutex_enter(&rp->r_statelock);
rp->r_count++;
rp->r_awcount++;
mutex_exit(&rp->r_statelock);
if (mi->mi_io_kstats) {
mutex_enter(&mi->mi_lock);
kstat_waitq_enter(KSTAT_IO_PTR(mi->mi_io_kstats));
mutex_exit(&mi->mi_lock);
}
mi->mi_async_req_count++;
ASSERT(mi->mi_async_req_count != 0);
cv_signal(&mi->mi_async_reqs_cv);
mutex_exit(&mi->mi_async_lock);
return (0);
noasync:
if (args != NULL) {
VN_RELE(vp);
crfree(cr);
kmem_free(args, sizeof (*args));
}
if (flags & B_READ) {
pvn_read_done(pp, flags | B_ERROR);
return (0);
}
if (curproc == proc_pageout || curproc == proc_fsflush) {
if (flags & B_FORCE)
flags &= ~(B_INVAL | B_FORCE);
pvn_write_done(pp, flags | B_ERROR);
return (0);
}
if (nfs_zone() != mi->mi_zone) {
pvn_write_done(pp, flags | B_ERROR);
return (EPERM);
}
return ((*pageio)(vp, pp, io_off, io_len, flags, cr));
}
void
nfs_async_readdir(vnode_t *vp, rddir_cache *rdc, cred_t *cr,
int (*readdir)(vnode_t *, rddir_cache *, cred_t *))
{
rnode_t *rp;
mntinfo_t *mi;
struct nfs_async_reqs *args;
rp = VTOR(vp);
ASSERT(rp->r_freef == NULL);
mi = VTOMI(vp);
if ((args = kmem_alloc(sizeof (*args), KM_NOSLEEP)) == NULL)
goto noasync;
args->a_next = NULL;
#ifdef DEBUG
args->a_queuer = curthread;
#endif
VN_HOLD(vp);
args->a_vp = vp;
ASSERT(cr != NULL);
crhold(cr);
args->a_cred = cr;
args->a_io = NFS_READDIR;
args->a_nfs_readdir = readdir;
args->a_nfs_rdc = rdc;
mutex_enter(&mi->mi_async_lock);
if (mi->mi_max_threads == 0) {
mutex_exit(&mi->mi_async_lock);
goto noasync;
}
if (mi->mi_async_reqs[NFS_READDIR] == NULL) {
mi->mi_async_reqs[NFS_READDIR] = args;
mi->mi_async_tail[NFS_READDIR] = args;
} else {
mi->mi_async_tail[NFS_READDIR]->a_next = args;
mi->mi_async_tail[NFS_READDIR] = args;
}
mutex_enter(&rp->r_statelock);
rp->r_count++;
mutex_exit(&rp->r_statelock);
if (mi->mi_io_kstats) {
mutex_enter(&mi->mi_lock);
kstat_waitq_enter(KSTAT_IO_PTR(mi->mi_io_kstats));
mutex_exit(&mi->mi_lock);
}
mi->mi_async_req_count++;
ASSERT(mi->mi_async_req_count != 0);
cv_signal(&mi->mi_async_reqs_cv);
mutex_exit(&mi->mi_async_lock);
return;
noasync:
if (args != NULL) {
VN_RELE(vp);
crfree(cr);
kmem_free(args, sizeof (*args));
}
rdc->entries = NULL;
mutex_enter(&rp->r_statelock);
ASSERT(rdc->flags & RDDIR);
rdc->flags &= ~RDDIR;
rdc->flags |= RDDIRREQ;
if (rdc->flags & RDDIRWAIT) {
rdc->flags &= ~RDDIRWAIT;
cv_broadcast(&rdc->cv);
}
mutex_exit(&rp->r_statelock);
rddir_cache_rele(rdc);
}
void
nfs_async_commit(vnode_t *vp, page_t *plist, offset3 offset, count3 count,
cred_t *cr, void (*commit)(vnode_t *, page_t *, offset3, count3, cred_t *))
{
rnode_t *rp;
mntinfo_t *mi;
struct nfs_async_reqs *args;
page_t *pp;
rp = VTOR(vp);
mi = VTOMI(vp);
if ((args = kmem_alloc(sizeof (*args), KM_NOSLEEP)) == NULL)
goto noasync;
args->a_next = NULL;
#ifdef DEBUG
args->a_queuer = curthread;
#endif
VN_HOLD(vp);
args->a_vp = vp;
ASSERT(cr != NULL);
crhold(cr);
args->a_cred = cr;
args->a_io = NFS_COMMIT;
args->a_nfs_commit = commit;
args->a_nfs_plist = plist;
args->a_nfs_offset = offset;
args->a_nfs_count = count;
mutex_enter(&mi->mi_async_lock);
if (mi->mi_max_threads == 0) {
mutex_exit(&mi->mi_async_lock);
goto noasync;
}
if (mi->mi_async_reqs[NFS_COMMIT] == NULL) {
mi->mi_async_reqs[NFS_COMMIT] = args;
mi->mi_async_tail[NFS_COMMIT] = args;
} else {
mi->mi_async_tail[NFS_COMMIT]->a_next = args;
mi->mi_async_tail[NFS_COMMIT] = args;
}
mutex_enter(&rp->r_statelock);
rp->r_count++;
mutex_exit(&rp->r_statelock);
if (mi->mi_io_kstats) {
mutex_enter(&mi->mi_lock);
kstat_waitq_enter(KSTAT_IO_PTR(mi->mi_io_kstats));
mutex_exit(&mi->mi_lock);
}
mi->mi_async_req_count++;
ASSERT(mi->mi_async_req_count != 0);
cv_signal(&mi->mi_async_reqs_cv);
mutex_exit(&mi->mi_async_lock);
return;
noasync:
if (args != NULL) {
VN_RELE(vp);
crfree(cr);
kmem_free(args, sizeof (*args));
}
if (curproc == proc_pageout || curproc == proc_fsflush ||
nfs_zone() != mi->mi_zone) {
while (plist != NULL) {
pp = plist;
page_sub(&plist, pp);
pp->p_fsdata = C_COMMIT;
page_unlock(pp);
}
return;
}
(*commit)(vp, plist, offset, count, cr);
}
void
nfs_async_inactive(vnode_t *vp, cred_t *cr,
void (*inactive)(vnode_t *, cred_t *, caller_context_t *))
{
mntinfo_t *mi;
struct nfs_async_reqs *args;
mi = VTOMI(vp);
args = kmem_alloc(sizeof (*args), KM_SLEEP);
args->a_next = NULL;
#ifdef DEBUG
args->a_queuer = curthread;
#endif
args->a_vp = vp;
ASSERT(cr != NULL);
crhold(cr);
args->a_cred = cr;
args->a_io = NFS_INACTIVE;
args->a_nfs_inactive = inactive;
mutex_enter(&mi->mi_async_lock);
if (mi->mi_manager_thread == NULL) {
rnode_t *rp = VTOR(vp);
mutex_exit(&mi->mi_async_lock);
crfree(cr);
kmem_free(args, sizeof (*args));
mutex_enter(&rp->r_statelock);
if (rp->r_unldvp != NULL) {
vnode_t *unldvp;
char *unlname;
cred_t *unlcred;
unldvp = rp->r_unldvp;
rp->r_unldvp = NULL;
unlname = rp->r_unlname;
rp->r_unlname = NULL;
unlcred = rp->r_unlcred;
rp->r_unlcred = NULL;
mutex_exit(&rp->r_statelock);
VN_RELE(unldvp);
kmem_free(unlname, MAXNAMELEN);
crfree(unlcred);
} else {
mutex_exit(&rp->r_statelock);
}
rp_addfree(VTOR(vp), cr);
return;
}
if (mi->mi_async_reqs[NFS_INACTIVE] == NULL) {
mi->mi_async_reqs[NFS_INACTIVE] = args;
} else {
mi->mi_async_tail[NFS_INACTIVE]->a_next = args;
}
mi->mi_async_tail[NFS_INACTIVE] = args;
mi->mi_async_req_count++;
ASSERT(mi->mi_async_req_count != 0);
cv_signal(&mi->mi_async_reqs_cv);
mutex_exit(&mi->mi_async_lock);
}
static void
nfs_async_start(struct vfs *vfsp)
{
nfs_async_common_start(vfsp, NFS_ASYNC_QUEUE);
}
static void
nfs_async_pgops_start(struct vfs *vfsp)
{
nfs_async_common_start(vfsp, NFS_ASYNC_PGOPS_QUEUE);
}
static void
nfs_async_common_start(struct vfs *vfsp, int async_queue)
{
struct nfs_async_reqs *args;
mntinfo_t *mi = VFTOMI(vfsp);
clock_t time_left = 1;
callb_cpr_t cprinfo;
int i;
int async_types;
kcondvar_t *async_work_cv;
if (async_queue == NFS_ASYNC_QUEUE) {
async_types = NFS_ASYNC_TYPES;
async_work_cv = &mi->mi_async_work_cv[NFS_ASYNC_QUEUE];
} else {
async_types = NFS_ASYNC_PGOPS_TYPES;
async_work_cv = &mi->mi_async_work_cv[NFS_ASYNC_PGOPS_QUEUE];
}
if (nfs_async_timeout == -1)
nfs_async_timeout = NFS_ASYNC_TIMEOUT;
CALLB_CPR_INIT(&cprinfo, &mi->mi_async_lock, callb_generic_cpr, "nas");
mutex_enter(&mi->mi_async_lock);
for (;;) {
for (i = 0; i < async_types; i++) {
args = *mi->mi_async_curr[async_queue];
if (args != NULL)
break;
mi->mi_async_curr[async_queue]++;
if (mi->mi_async_curr[async_queue] ==
&mi->mi_async_reqs[async_types]) {
mi->mi_async_curr[async_queue] =
&mi->mi_async_reqs[0];
}
}
if (args == NULL) {
CALLB_CPR_SAFE_BEGIN(&cprinfo);
if (mi->mi_max_threads == 0 || time_left <= 0) {
--mi->mi_threads[async_queue];
if (mi->mi_threads[NFS_ASYNC_QUEUE] == 0 &&
mi->mi_threads[NFS_ASYNC_PGOPS_QUEUE] == 0)
cv_signal(&mi->mi_async_cv);
CALLB_CPR_EXIT(&cprinfo);
VFS_RELE(vfsp);
zthread_exit();
}
time_left = cv_reltimedwait(async_work_cv,
&mi->mi_async_lock, nfs_async_timeout,
TR_CLOCK_TICK);
CALLB_CPR_SAFE_END(&cprinfo, &mi->mi_async_lock);
continue;
}
time_left = 1;
*mi->mi_async_curr[async_queue] = args->a_next;
if (*mi->mi_async_curr[async_queue] == NULL ||
--mi->mi_async_clusters[args->a_io] == 0) {
mi->mi_async_clusters[args->a_io] =
mi->mi_async_init_clusters;
mi->mi_async_curr[async_queue]++;
if (mi->mi_async_curr[async_queue] ==
&mi->mi_async_reqs[async_types]) {
mi->mi_async_curr[async_queue] =
&mi->mi_async_reqs[0];
}
}
if (args->a_io != NFS_INACTIVE && mi->mi_io_kstats) {
mutex_enter(&mi->mi_lock);
kstat_waitq_exit(KSTAT_IO_PTR(mi->mi_io_kstats));
mutex_exit(&mi->mi_lock);
}
mutex_exit(&mi->mi_async_lock);
if (args->a_io == NFS_READ_AHEAD && mi->mi_max_threads > 0) {
(*args->a_nfs_readahead)(args->a_vp, args->a_nfs_blkoff,
args->a_nfs_addr, args->a_nfs_seg,
args->a_cred);
} else if (args->a_io == NFS_PUTAPAGE) {
(void) (*args->a_nfs_putapage)(args->a_vp,
args->a_nfs_pp, args->a_nfs_off,
args->a_nfs_len, args->a_nfs_flags,
args->a_cred);
} else if (args->a_io == NFS_PAGEIO) {
(void) (*args->a_nfs_pageio)(args->a_vp,
args->a_nfs_pp, args->a_nfs_off,
args->a_nfs_len, args->a_nfs_flags,
args->a_cred);
} else if (args->a_io == NFS_READDIR) {
(void) ((*args->a_nfs_readdir)(args->a_vp,
args->a_nfs_rdc, args->a_cred));
} else if (args->a_io == NFS_COMMIT) {
(*args->a_nfs_commit)(args->a_vp, args->a_nfs_plist,
args->a_nfs_offset, args->a_nfs_count,
args->a_cred);
} else if (args->a_io == NFS_INACTIVE) {
(*args->a_nfs_inactive)(args->a_vp, args->a_cred, NULL);
}
free_async_args(args);
mutex_enter(&mi->mi_async_lock);
}
}
void
nfs_async_stop(struct vfs *vfsp)
{
mntinfo_t *mi = VFTOMI(vfsp);
mutex_enter(&mi->mi_async_lock);
mi->mi_max_threads = 0;
NFS_WAKEALL_ASYNC_WORKERS(mi->mi_async_work_cv);
while (mi->mi_threads[NFS_ASYNC_QUEUE] != 0 ||
mi->mi_threads[NFS_ASYNC_PGOPS_QUEUE] != 0)
cv_wait(&mi->mi_async_cv, &mi->mi_async_lock);
mutex_exit(&mi->mi_async_lock);
}
int
nfs_async_stop_sig(struct vfs *vfsp)
{
mntinfo_t *mi = VFTOMI(vfsp);
ushort_t omax;
int rval;
mutex_enter(&mi->mi_async_lock);
omax = mi->mi_max_threads;
mi->mi_max_threads = 0;
NFS_WAKEALL_ASYNC_WORKERS(mi->mi_async_work_cv);
while (mi->mi_threads[NFS_ASYNC_QUEUE] != 0 ||
mi->mi_threads[NFS_ASYNC_PGOPS_QUEUE] != 0) {
if (!cv_wait_sig(&mi->mi_async_cv, &mi->mi_async_lock))
break;
}
rval = (mi->mi_threads[NFS_ASYNC_QUEUE] != 0 ||
mi->mi_threads[NFS_ASYNC_PGOPS_QUEUE] != 0);
if (rval)
mi->mi_max_threads = omax;
mutex_exit(&mi->mi_async_lock);
return (rval);
}
int
writerp(rnode_t *rp, caddr_t base, int tcount, struct uio *uio, int pgcreated)
{
int pagecreate;
int n;
int saved_n;
caddr_t saved_base;
u_offset_t offset;
int error;
int sm_error;
vnode_t *vp = RTOV(rp);
ASSERT(tcount <= MAXBSIZE && tcount <= uio->uio_resid);
ASSERT(nfs_rw_lock_held(&rp->r_rwlock, RW_WRITER));
if (!vpm_enable) {
ASSERT(((uintptr_t)base & MAXBOFFSET) + tcount <= MAXBSIZE);
}
do {
offset = uio->uio_loffset;
pagecreate = 0;
n = (int)MIN((PAGESIZE - (offset & PAGEOFFSET)), tcount);
mutex_enter(&rp->r_statelock);
pagecreate = pgcreated ||
((offset & PAGEOFFSET) == 0 &&
(n == PAGESIZE || ((offset + n) >= rp->r_size)));
mutex_exit(&rp->r_statelock);
if (!vpm_enable && pagecreate) {
if (pgcreated == 0)
(void) segmap_pagecreate(segkmap, base,
(uint_t)n, 1);
saved_base = base;
saved_n = n;
}
ASSERT(!(rp->r_flags & RMODINPROGRESS));
mutex_enter(&rp->r_statelock);
rp->r_flags |= RMODINPROGRESS;
rp->r_modaddr = (offset & MAXBMASK);
mutex_exit(&rp->r_statelock);
if (vpm_enable) {
error = vpm_data_copy(vp, offset, n, uio,
!pagecreate, NULL, 0, S_WRITE);
} else {
error = uiomove(base, n, UIO_WRITE, uio);
}
mutex_enter(&rp->r_statelock);
if (rp->r_size < uio->uio_loffset)
rp->r_size = uio->uio_loffset;
rp->r_flags &= ~RMODINPROGRESS;
rp->r_flags |= RDIRTY;
mutex_exit(&rp->r_statelock);
n = (int)(uio->uio_loffset - offset);
if (!vpm_enable) {
base += n;
}
tcount -= n;
if (!vpm_enable && pagecreate) {
if ((uio->uio_loffset & PAGEOFFSET) || n == 0)
(void) kzero(base, PAGESIZE - n);
if (pgcreated) {
pgcreated = 0;
} else {
sm_error = segmap_fault(kas.a_hat, segkmap,
saved_base, saved_n,
F_SOFTUNLOCK, S_WRITE);
if (error == 0)
error = sm_error;
}
}
} while (tcount > 0 && error == 0);
return (error);
}
int
nfs_putpages(vnode_t *vp, u_offset_t off, size_t len, int flags, cred_t *cr)
{
rnode_t *rp;
page_t *pp;
u_offset_t eoff;
u_offset_t io_off;
size_t io_len;
int error;
int rdirty;
int err;
rp = VTOR(vp);
ASSERT(rp->r_count > 0);
if (!vn_has_cached_data(vp))
return (0);
ASSERT(vp->v_type != VCHR);
if ((rp->r_flags & ROUTOFSPACE) ||
(vp->v_vfsp->vfs_flag & VFS_UNMOUNTED))
flags = (flags & ~B_FREE) | B_INVAL | B_FORCE;
if (len == 0) {
if (off == (u_offset_t)0 &&
!(flags & B_ASYNC) &&
(rp->r_flags & RDIRTY)) {
mutex_enter(&rp->r_statelock);
rdirty = (rp->r_flags & RDIRTY);
rp->r_flags &= ~RDIRTY;
mutex_exit(&rp->r_statelock);
} else if (flags & B_ASYNC && off == (u_offset_t)0) {
mutex_enter(&rp->r_statelock);
if (rp->r_flags & RDIRTY && rp->r_awcount == 0) {
rdirty = (rp->r_flags & RDIRTY);
rp->r_flags &= ~RDIRTY;
}
mutex_exit(&rp->r_statelock);
} else
rdirty = 0;
error = pvn_vplist_dirty(vp, off, rp->r_putapage,
flags, cr);
if (error && rdirty &&
(flags & (B_INVAL | B_FORCE)) != (B_INVAL | B_FORCE)) {
mutex_enter(&rp->r_statelock);
rp->r_flags |= RDIRTY;
mutex_exit(&rp->r_statelock);
}
} else {
error = 0;
#ifdef lint
io_len = 0;
#endif
eoff = off + len;
mutex_enter(&rp->r_statelock);
for (io_off = off; io_off < eoff && io_off < rp->r_size;
io_off += io_len) {
mutex_exit(&rp->r_statelock);
if ((flags & B_INVAL) || !(flags & B_ASYNC)) {
pp = page_lookup(vp, io_off,
(flags & (B_INVAL | B_FREE)) ?
SE_EXCL : SE_SHARED);
} else {
pp = page_lookup_nowait(vp, io_off,
(flags & B_FREE) ? SE_EXCL : SE_SHARED);
}
if (pp == NULL || !pvn_getdirty(pp, flags))
io_len = PAGESIZE;
else {
err = (*rp->r_putapage)(vp, pp, &io_off,
&io_len, flags, cr);
if (!error)
error = err;
}
mutex_enter(&rp->r_statelock);
}
mutex_exit(&rp->r_statelock);
}
return (error);
}
void
nfs_invalidate_pages(vnode_t *vp, u_offset_t off, cred_t *cr)
{
rnode_t *rp;
rp = VTOR(vp);
mutex_enter(&rp->r_statelock);
while (rp->r_flags & RTRUNCATE)
cv_wait(&rp->r_cv, &rp->r_statelock);
rp->r_flags |= RTRUNCATE;
if (off == (u_offset_t)0) {
rp->r_flags &= ~RDIRTY;
if (!(rp->r_flags & RSTALE))
rp->r_error = 0;
}
rp->r_truncaddr = off;
mutex_exit(&rp->r_statelock);
(void) pvn_vplist_dirty(vp, off, rp->r_putapage,
B_INVAL | B_TRUNC, cr);
mutex_enter(&rp->r_statelock);
rp->r_flags &= ~RTRUNCATE;
cv_broadcast(&rp->r_cv);
mutex_exit(&rp->r_statelock);
}
static int nfs_write_error_to_cons_only = 0;
#define MSG(x) (nfs_write_error_to_cons_only ? (x) : (x) + 1)
void
nfs_printfhandle(nfs_fhandle *fhp)
{
int *ip;
char *buf;
size_t bufsize;
char *cp;
bufsize = 13 + ((NFS_FHANDLE_LEN / sizeof (*ip)) * (1 + 8)) + 3;
buf = kmem_alloc(bufsize, KM_NOSLEEP);
if (buf == NULL)
return;
cp = buf;
(void) strcpy(cp, "(file handle:");
while (*cp != '\0')
cp++;
for (ip = (int *)fhp->fh_buf;
ip < (int *)&fhp->fh_buf[fhp->fh_len];
ip++) {
(void) sprintf(cp, " %x", *ip);
while (*cp != '\0')
cp++;
}
(void) strcpy(cp, ")\n");
zcmn_err(getzoneid(), CE_CONT, MSG("^%s"), buf);
kmem_free(buf, bufsize);
}
clock_t nfs_write_error_interval = 5;
void
nfs_write_error(vnode_t *vp, int error, cred_t *cr)
{
mntinfo_t *mi;
clock_t now;
mi = VTOMI(vp);
if (FS_OR_ZONE_GONE(mi->mi_vfsp))
return;
now = ddi_get_lbolt();
if ((error != ENOSPC && error != EDQUOT) ||
now - mi->mi_printftime > 0) {
zoneid_t zoneid = mi->mi_zone->zone_id;
#ifdef DEBUG
nfs_perror(error, "NFS%ld write error on host %s: %m.\n",
mi->mi_vers, VTOR(vp)->r_server->sv_hostname, NULL);
#else
nfs_perror(error, "NFS write error on host %s: %m.\n",
VTOR(vp)->r_server->sv_hostname, NULL);
#endif
if (error == ENOSPC || error == EDQUOT) {
zcmn_err(zoneid, CE_CONT,
MSG("^File: userid=%d, groupid=%d\n"),
crgetuid(cr), crgetgid(cr));
if (crgetuid(CRED()) != crgetuid(cr) ||
crgetgid(CRED()) != crgetgid(cr)) {
zcmn_err(zoneid, CE_CONT,
MSG("^User: userid=%d, groupid=%d\n"),
crgetuid(CRED()), crgetgid(CRED()));
}
mi->mi_printftime = now +
nfs_write_error_interval * hz;
}
nfs_printfhandle(&VTOR(vp)->r_fh);
#ifdef DEBUG
if (error == EACCES) {
zcmn_err(zoneid, CE_CONT,
MSG("^nfs_bio: cred is%s kcred\n"),
cr == kcred ? "" : " not");
}
#endif
}
}
static void *
nfs_mi_init(zoneid_t zoneid)
{
struct mi_globals *mig;
mig = kmem_alloc(sizeof (*mig), KM_SLEEP);
mutex_init(&mig->mig_lock, NULL, MUTEX_DEFAULT, NULL);
list_create(&mig->mig_list, sizeof (mntinfo_t),
offsetof(mntinfo_t, mi_zone_node));
mig->mig_destructor_called = B_FALSE;
return (mig);
}
static void
nfs_mi_shutdown(zoneid_t zoneid, void *data)
{
struct mi_globals *mig = data;
mntinfo_t *mi;
ASSERT(mig != NULL);
again:
mutex_enter(&mig->mig_lock);
for (mi = list_head(&mig->mig_list); mi != NULL;
mi = list_next(&mig->mig_list, mi)) {
if (mi->mi_flags & MI_DEAD)
continue;
VFS_HOLD(mi->mi_vfsp);
(void) dnlc_purge_vfsp(mi->mi_vfsp, 0);
mutex_enter(&mi->mi_async_lock);
mi->mi_max_threads = 0;
NFS_WAKEALL_ASYNC_WORKERS(mi->mi_async_work_cv);
mutex_enter(&mi->mi_lock);
mi->mi_flags |= (MI_ASYNC_MGR_STOP|MI_DEAD);
mutex_exit(&mi->mi_lock);
cv_broadcast(&mi->mi_async_reqs_cv);
mutex_exit(&mi->mi_async_lock);
mutex_exit(&mig->mig_lock);
VFS_RELE(mi->mi_vfsp);
goto again;
}
mutex_exit(&mig->mig_lock);
}
static void
nfs_mi_free_globals(struct mi_globals *mig)
{
list_destroy(&mig->mig_list);
mutex_destroy(&mig->mig_lock);
kmem_free(mig, sizeof (*mig));
}
static void
nfs_mi_destroy(zoneid_t zoneid, void *data)
{
struct mi_globals *mig = data;
ASSERT(mig != NULL);
mutex_enter(&mig->mig_lock);
if (list_head(&mig->mig_list) != NULL) {
mig->mig_destructor_called = B_TRUE;
mutex_exit(&mig->mig_lock);
return;
}
nfs_mi_free_globals(mig);
}
void
nfs_mi_zonelist_add(mntinfo_t *mi)
{
struct mi_globals *mig;
mig = zone_getspecific(mi_list_key, mi->mi_zone);
mutex_enter(&mig->mig_lock);
list_insert_head(&mig->mig_list, mi);
mutex_exit(&mig->mig_lock);
}
static void
nfs_mi_zonelist_remove(mntinfo_t *mi)
{
struct mi_globals *mig;
mig = zone_getspecific(mi_list_key, mi->mi_zone);
mutex_enter(&mig->mig_lock);
list_remove(&mig->mig_list, mi);
if (list_head(&mig->mig_list) == NULL &&
mig->mig_destructor_called == B_TRUE) {
nfs_mi_free_globals(mig);
return;
}
mutex_exit(&mig->mig_lock);
}
int
nfs_clntinit(void)
{
#ifdef DEBUG
static boolean_t nfs_clntup = B_FALSE;
#endif
int error;
#ifdef DEBUG
ASSERT(nfs_clntup == B_FALSE);
#endif
error = nfs_subrinit();
if (error)
return (error);
error = nfs_vfsinit();
if (error) {
nfs_subrfini();
return (error);
}
zone_key_create(&mi_list_key, nfs_mi_init, nfs_mi_shutdown,
nfs_mi_destroy);
nfs4_clnt_init();
nfscmd_init();
#ifdef DEBUG
nfs_clntup = B_TRUE;
#endif
return (0);
}
void
nfs_clntfini(void)
{
(void) zone_key_delete(mi_list_key);
nfs_subrfini();
nfs_vfsfini();
nfs4_clnt_fini();
nfscmd_fini();
}
void
nfs_lockrelease(vnode_t *vp, int flag, offset_t offset, cred_t *cr)
{
flock64_t ld;
struct shrlock shr;
char *buf;
int remote_lock_possible;
int ret;
ASSERT((uintptr_t)vp > KERNELBASE);
remote_lock_possible = nfs_remove_locking_id(vp, RLMPL_PID,
(char *)&(ttoproc(curthread)->p_pid), NULL, NULL);
if (remote_lock_possible || flk_has_remote_locks(vp)) {
ld.l_type = F_UNLCK;
ld.l_whence = 0;
ld.l_start = 0;
ld.l_len = 0;
ret = VOP_FRLOCK(vp, F_SETLK, &ld, flag, offset, NULL, cr,
NULL);
if (ret != 0) {
ld.l_pid = ttoproc(curthread)->p_pid;
lm_register_lock_locally(vp, NULL, &ld, flag, offset);
#ifdef DEBUG
nfs_perror(ret,
"NFS lock release error on vp %p: %m.\n",
(void *)vp, NULL);
#endif
}
(void) nfs_remove_locking_id(vp, RLMPL_PID,
(char *)&(ttoproc(curthread)->p_pid), NULL, NULL);
}
buf = kmem_alloc(MAX_SHR_OWNER_LEN, KM_SLEEP);
while (nfs_remove_locking_id(vp, RLMPL_OWNER,
(char *)NULL, buf, &shr.s_own_len)) {
shr.s_owner = buf;
shr.s_access = 0;
shr.s_deny = 0;
shr.s_sysid = 0;
shr.s_pid = curproc->p_pid;
ret = VOP_SHRLOCK(vp, F_UNSHARE, &shr, flag, cr, NULL);
#ifdef DEBUG
if (ret != 0) {
nfs_perror(ret,
"NFS share release error on vp %p: %m.\n",
(void *)vp, NULL);
}
#endif
}
kmem_free(buf, MAX_SHR_OWNER_LEN);
}
void
nfs_lockcompletion(vnode_t *vp, int cmd)
{
#ifdef DEBUG
rnode_t *rp = VTOR(vp);
ASSERT(nfs_rw_lock_held(&rp->r_lkserlock, RW_WRITER));
#endif
if (cmd == F_SETLK || cmd == F_SETLKW) {
if (!lm_safemap(vp)) {
mutex_enter(&vp->v_lock);
vp->v_flag |= VNOCACHE;
mutex_exit(&vp->v_lock);
} else {
mutex_enter(&vp->v_lock);
vp->v_flag &= ~VNOCACHE;
mutex_exit(&vp->v_lock);
}
}
PURGE_ATTRCACHE(vp);
}
#ifdef DEBUG
int nfs_lmpl_high_water = 128;
int nfs_cnt_add_locking_id = 0;
int nfs_len_add_locking_id = 0;
#endif
void
nfs_add_locking_id(vnode_t *vp, pid_t pid, int type, char *id, int len)
{
rnode_t *rp;
lmpl_t *new;
lmpl_t *cur;
lmpl_t **lmplp;
#ifdef DEBUG
int list_len = 1;
#endif
#ifdef DEBUG
++nfs_cnt_add_locking_id;
#endif
ASSERT(len < MAX_SHR_OWNER_LEN);
new = kmem_alloc(sizeof (*new), KM_SLEEP);
new->lmpl_type = type;
new->lmpl_pid = pid;
new->lmpl_owner = kmem_alloc(len, KM_SLEEP);
bcopy(id, new->lmpl_owner, len);
new->lmpl_own_len = len;
new->lmpl_next = (lmpl_t *)NULL;
#ifdef DEBUG
if (type == RLMPL_PID) {
ASSERT(len == sizeof (pid_t));
ASSERT(pid == *(pid_t *)new->lmpl_owner);
} else {
ASSERT(type == RLMPL_OWNER);
}
#endif
rp = VTOR(vp);
mutex_enter(&rp->r_statelock);
ASSERT(rp->r_flags & RHASHED);
lmplp = &(rp->r_lmpl);
for (cur = rp->r_lmpl; cur != (lmpl_t *)NULL; cur = cur->lmpl_next) {
if (cur->lmpl_pid == pid &&
cur->lmpl_type == type &&
cur->lmpl_own_len == len &&
bcmp(cur->lmpl_owner, new->lmpl_owner, len) == 0) {
kmem_free(new->lmpl_owner, len);
kmem_free(new, sizeof (*new));
break;
}
lmplp = &cur->lmpl_next;
#ifdef DEBUG
++list_len;
#endif
}
if (cur == (lmpl_t *)NULL) {
*lmplp = new;
#ifdef DEBUG
if (list_len > nfs_len_add_locking_id) {
nfs_len_add_locking_id = list_len;
}
if (list_len > nfs_lmpl_high_water) {
cmn_err(CE_WARN, "nfs_add_locking_id: long list "
"vp=%p is %d", (void *)vp, list_len);
}
#endif
}
#ifdef DEBUG
if (share_debug) {
int nitems = 0;
int npids = 0;
int nowners = 0;
for (cur = rp->r_lmpl; cur != (lmpl_t *)NULL;
cur = cur->lmpl_next) {
nitems++;
if (cur->lmpl_type == RLMPL_PID) {
npids++;
} else if (cur->lmpl_type == RLMPL_OWNER) {
nowners++;
} else {
cmn_err(CE_PANIC, "nfs_add_locking_id: "
"unrecognized lmpl_type %d",
cur->lmpl_type);
}
}
cmn_err(CE_CONT, "nfs_add_locking_id(%s): %d PIDs + %d "
"OWNs = %d items left on r_lmpl\n",
(type == RLMPL_PID) ? "P" : "O", npids, nowners, nitems);
}
#endif
mutex_exit(&rp->r_statelock);
}
static int
nfs_remove_locking_id(vnode_t *vp, int type, char *id, char *rid, int *rlen)
{
lmpl_t *cur;
lmpl_t **lmplp;
rnode_t *rp;
int rv = 0;
ASSERT(type == RLMPL_PID || type == RLMPL_OWNER);
rp = VTOR(vp);
mutex_enter(&rp->r_statelock);
ASSERT(rp->r_flags & RHASHED);
lmplp = &(rp->r_lmpl);
for (cur = rp->r_lmpl; cur != (lmpl_t *)NULL; cur = cur->lmpl_next) {
if (cur->lmpl_type == type &&
cur->lmpl_pid == curproc->p_pid &&
(id == (char *)NULL ||
bcmp(cur->lmpl_owner, id, cur->lmpl_own_len) == 0)) {
*lmplp = cur->lmpl_next;
ASSERT(cur->lmpl_own_len < MAX_SHR_OWNER_LEN);
if (rid != NULL) {
bcopy(cur->lmpl_owner, rid, cur->lmpl_own_len);
*rlen = cur->lmpl_own_len;
}
kmem_free(cur->lmpl_owner, cur->lmpl_own_len);
kmem_free(cur, sizeof (*cur));
rv = 1;
break;
}
lmplp = &cur->lmpl_next;
}
#ifdef DEBUG
if (share_debug) {
int nitems = 0;
int npids = 0;
int nowners = 0;
for (cur = rp->r_lmpl; cur != (lmpl_t *)NULL;
cur = cur->lmpl_next) {
nitems++;
if (cur->lmpl_type == RLMPL_PID) {
npids++;
} else if (cur->lmpl_type == RLMPL_OWNER) {
nowners++;
} else {
cmn_err(CE_PANIC,
"nrli: unrecognized lmpl_type %d",
cur->lmpl_type);
}
}
cmn_err(CE_CONT,
"nrli(%s): %d PIDs + %d OWNs = %d items left on r_lmpl\n",
(type == RLMPL_PID) ? "P" : "O",
npids,
nowners,
nitems);
}
#endif
mutex_exit(&rp->r_statelock);
return (rv);
}
void
nfs_free_mi(mntinfo_t *mi)
{
ASSERT(mi->mi_flags & MI_ASYNC_MGR_STOP);
ASSERT(mi->mi_manager_thread == NULL);
ASSERT(mi->mi_threads[NFS_ASYNC_QUEUE] == 0 &&
mi->mi_threads[NFS_ASYNC_PGOPS_QUEUE] == 0);
nfs_mi_zonelist_remove(mi);
if (mi->mi_klmconfig) {
lm_free_config(mi->mi_klmconfig);
kmem_free(mi->mi_klmconfig, sizeof (struct knetconfig));
}
mutex_destroy(&mi->mi_lock);
mutex_destroy(&mi->mi_remap_lock);
mutex_destroy(&mi->mi_async_lock);
mutex_destroy(&mi->mi_rnodes_lock);
cv_destroy(&mi->mi_failover_cv);
cv_destroy(&mi->mi_async_work_cv[NFS_ASYNC_QUEUE]);
cv_destroy(&mi->mi_async_work_cv[NFS_ASYNC_PGOPS_QUEUE]);
cv_destroy(&mi->mi_async_reqs_cv);
cv_destroy(&mi->mi_async_cv);
list_destroy(&mi->mi_rnodes);
zone_rele_ref(&mi->mi_zone_ref, ZONE_REF_NFS);
kmem_free(mi, sizeof (*mi));
}
static int
mnt_kstat_update(kstat_t *ksp, int rw)
{
mntinfo_t *mi;
struct mntinfo_kstat *mik;
vfs_t *vfsp;
int i;
if (rw == KSTAT_WRITE)
return (EACCES);
vfsp = (struct vfs *)ksp->ks_private;
mi = VFTOMI(vfsp);
mik = (struct mntinfo_kstat *)ksp->ks_data;
(void) strcpy(mik->mik_proto, mi->mi_curr_serv->sv_knconf->knc_proto);
mik->mik_vers = (uint32_t)mi->mi_vers;
mik->mik_flags = mi->mi_flags;
mik->mik_secmod = mi->mi_curr_serv->sv_secdata->secmod;
mik->mik_curread = (uint32_t)mi->mi_curread;
mik->mik_curwrite = (uint32_t)mi->mi_curwrite;
mik->mik_retrans = mi->mi_retrans;
mik->mik_timeo = mi->mi_timeo;
mik->mik_acregmin = HR2SEC(mi->mi_acregmin);
mik->mik_acregmax = HR2SEC(mi->mi_acregmax);
mik->mik_acdirmin = HR2SEC(mi->mi_acdirmin);
mik->mik_acdirmax = HR2SEC(mi->mi_acdirmax);
for (i = 0; i < NFS_CALLTYPES + 1; i++) {
mik->mik_timers[i].srtt = (uint32_t)mi->mi_timers[i].rt_srtt;
mik->mik_timers[i].deviate =
(uint32_t)mi->mi_timers[i].rt_deviate;
mik->mik_timers[i].rtxcur =
(uint32_t)mi->mi_timers[i].rt_rtxcur;
}
mik->mik_noresponse = (uint32_t)mi->mi_noresponse;
mik->mik_failover = (uint32_t)mi->mi_failover;
mik->mik_remap = (uint32_t)mi->mi_remap;
(void) strcpy(mik->mik_curserver, mi->mi_curr_serv->sv_hostname);
return (0);
}
void
nfs_mnt_kstat_init(struct vfs *vfsp)
{
mntinfo_t *mi = VFTOMI(vfsp);
mi->mi_io_kstats = kstat_create_zone("nfs", getminor(vfsp->vfs_dev),
NULL, "nfs", KSTAT_TYPE_IO, 1, 0, mi->mi_zone->zone_id);
if (mi->mi_io_kstats) {
if (mi->mi_zone->zone_id != GLOBAL_ZONEID)
kstat_zone_add(mi->mi_io_kstats, GLOBAL_ZONEID);
mi->mi_io_kstats->ks_lock = &mi->mi_lock;
kstat_install(mi->mi_io_kstats);
}
if ((mi->mi_ro_kstats = kstat_create_zone("nfs",
getminor(vfsp->vfs_dev), "mntinfo", "misc", KSTAT_TYPE_RAW,
sizeof (struct mntinfo_kstat), 0, mi->mi_zone->zone_id)) != NULL) {
if (mi->mi_zone->zone_id != GLOBAL_ZONEID)
kstat_zone_add(mi->mi_ro_kstats, GLOBAL_ZONEID);
mi->mi_ro_kstats->ks_update = mnt_kstat_update;
mi->mi_ro_kstats->ks_private = (void *)vfsp;
kstat_install(mi->mi_ro_kstats);
}
}
nfs_delmapcall_t *
nfs_init_delmapcall()
{
nfs_delmapcall_t *delmap_call;
delmap_call = kmem_alloc(sizeof (nfs_delmapcall_t), KM_SLEEP);
delmap_call->call_id = curthread;
delmap_call->error = 0;
return (delmap_call);
}
void
nfs_free_delmapcall(nfs_delmapcall_t *delmap_call)
{
kmem_free(delmap_call, sizeof (nfs_delmapcall_t));
}
int
nfs_find_and_delete_delmapcall(rnode_t *rp, int *errp)
{
nfs_delmapcall_t *delmap_call;
mutex_enter(&rp->r_statelock);
if (!(rp->r_flags & RDELMAPLIST)) {
list_create(&rp->r_indelmap, sizeof (nfs_delmapcall_t),
offsetof(nfs_delmapcall_t, call_node));
rp->r_flags |= RDELMAPLIST;
mutex_exit(&rp->r_statelock);
return (0);
} else {
for (delmap_call = list_head(&rp->r_indelmap);
delmap_call != NULL;
delmap_call = list_next(&rp->r_indelmap, delmap_call)) {
if (delmap_call->call_id == curthread) {
*errp = delmap_call->error;
list_remove(&rp->r_indelmap, delmap_call);
mutex_exit(&rp->r_statelock);
nfs_free_delmapcall(delmap_call);
return (1);
}
}
}
mutex_exit(&rp->r_statelock);
return (0);
}