#include <sys/param.h>
#include <sys/systm.h>
#include <sys/conf.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/malloc.h>
#include <sys/mount.h>
#include <sys/mutex.h>
#include <sys/namei.h>
#include <sys/proc.h>
#include <sys/smr.h>
#include <sys/sysctl.h>
#include <sys/vnode.h>
#include <sys/stat.h>
#include <fs/nullfs/null.h>
#include <vm/vm.h>
#include <vm/vm_extern.h>
#include <vm/vm_object.h>
#include <vm/vnode_pager.h>
VFS_SMR_DECLARE;
static int null_bug_bypass = 0;
SYSCTL_INT(_debug, OID_AUTO, nullfs_bug_bypass, CTLFLAG_RW,
&null_bug_bypass, 0, "");
static void
null_copy_inotify(struct vnode *vp, struct vnode *lvp, short flag)
{
if ((vn_irflag_read(vp) & flag) != 0) {
if (__predict_false((vn_irflag_read(lvp) & flag) == 0))
vn_irflag_unset(vp, flag);
} else if ((vn_irflag_read(lvp) & flag) != 0) {
if (__predict_false((vn_irflag_read(vp) & flag) == 0))
vn_irflag_set(vp, flag);
}
}
int
null_bypass(struct vop_generic_args *ap)
{
struct vnode **this_vp_p;
struct vnode *old_vps[VDESC_MAX_VPS];
struct vnode **vps_p[VDESC_MAX_VPS];
struct vnode ***vppp;
struct vnode *lvp;
struct vnodeop_desc *descp = ap->a_desc;
int error, i, reles;
if (null_bug_bypass)
printf ("null_bypass: %s\n", descp->vdesc_name);
#ifdef DIAGNOSTIC
if (descp->vdesc_vp_offsets == NULL ||
descp->vdesc_vp_offsets[0] == VDESC_NO_OFFSET)
panic ("null_bypass: no vp's in map");
#endif
reles = descp->vdesc_flags;
for (i = 0; i < VDESC_MAX_VPS; reles >>= 1, i++) {
if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET)
break;
vps_p[i] = this_vp_p = VOPARG_OFFSETTO(struct vnode **,
descp->vdesc_vp_offsets[i], ap);
if (i != 0 && (*this_vp_p == NULL ||
!null_is_nullfs_vnode(*this_vp_p))) {
old_vps[i] = NULL;
} else {
old_vps[i] = *this_vp_p;
*(vps_p[i]) = NULLVPTOLOWERVP(*this_vp_p);
vhold(*this_vp_p);
if (reles & VDESC_VP0_WILLRELE)
vref(*this_vp_p);
}
}
if (vps_p[0] != NULL && *vps_p[0] != NULL) {
error = ap->a_desc->vdesc_call(ap);
} else {
printf("null_bypass: no map for %s\n", descp->vdesc_name);
error = EINVAL;
}
reles = descp->vdesc_flags;
for (i = 0; i < VDESC_MAX_VPS; reles >>= 1, i++) {
if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET)
break;
if (old_vps[i] != NULL) {
lvp = *(vps_p[i]);
if (lvp != NULL) {
null_copy_inotify(old_vps[i], lvp,
VIRF_INOTIFY);
null_copy_inotify(old_vps[i], lvp,
VIRF_INOTIFY_PARENT);
if (VOP_ISLOCKED(lvp) == LK_EXCLUSIVE &&
old_vps[i]->v_vnlock != lvp->v_vnlock) {
VOP_UNLOCK(lvp);
VOP_LOCK(old_vps[i], LK_EXCLUSIVE |
LK_RETRY);
}
vdrop(lvp);
}
*(vps_p[i]) = old_vps[i];
#if 0
if (reles & VDESC_VP0_WILLUNLOCK)
VOP_UNLOCK(*(vps_p[i]), 0);
#endif
if (reles & VDESC_VP0_WILLRELE)
vrele(*(vps_p[i]));
}
}
if (descp->vdesc_vpp_offset != VDESC_NO_OFFSET && error == 0) {
vppp = VOPARG_OFFSETTO(struct vnode ***,
descp->vdesc_vpp_offset, ap);
if (*vppp != NULL)
error = null_nodeget(old_vps[0]->v_mount, **vppp,
*vppp);
}
return (error);
}
static int
null_add_writecount(struct vop_add_writecount_args *ap)
{
struct vnode *lvp, *vp;
int error;
vp = ap->a_vp;
lvp = NULLVPTOLOWERVP(vp);
VI_LOCK(vp);
VNASSERT(vp->v_writecount >= 0, vp, ("wrong null writecount"));
VNASSERT(vp->v_writecount + ap->a_inc >= 0, vp,
("wrong writecount inc %d", ap->a_inc));
error = VOP_ADD_WRITECOUNT(lvp, ap->a_inc);
if (error == 0)
vp->v_writecount += ap->a_inc;
VI_UNLOCK(vp);
return (error);
}
static int
null_lookup(struct vop_lookup_args *ap)
{
struct componentname *cnp = ap->a_cnp;
struct vnode *dvp = ap->a_dvp;
uint64_t flags = cnp->cn_flags;
struct vnode *vp, *ldvp, *lvp;
struct mount *mp;
int error;
mp = dvp->v_mount;
if ((flags & ISLASTCN) != 0 && (mp->mnt_flag & MNT_RDONLY) != 0 &&
(cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME))
return (EROFS);
ldvp = NULLVPTOLOWERVP(dvp);
vp = lvp = NULL;
if ((flags & ISDOTDOT) != 0) {
struct nameidata *ndp;
if ((ldvp->v_vflag & VV_ROOT) != 0) {
KASSERT((dvp->v_vflag & VV_ROOT) == 0,
("ldvp %p fl %#x dvp %p fl %#x flags %#jx",
ldvp, ldvp->v_vflag, dvp, dvp->v_vflag,
(uintmax_t)flags));
return (ENOENT);
}
ndp = vfs_lookup_nameidata(cnp);
if (ndp != NULL && vfs_lookup_isroot(ndp, ldvp))
return (ENOENT);
}
vhold(ldvp);
error = VOP_LOOKUP(ldvp, &lvp, cnp);
if (VN_IS_DOOMED(dvp)) {
if (error == 0 || error == EJUSTRETURN) {
if (lvp != NULL)
vput(lvp);
error = ENOENT;
}
VOP_UNLOCK(ldvp);
vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
}
vdrop(ldvp);
if (error == EJUSTRETURN && (flags & ISLASTCN) != 0 &&
(mp->mnt_flag & MNT_RDONLY) != 0 &&
(cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME))
error = EROFS;
if ((error == 0 || error == EJUSTRETURN) && lvp != NULL) {
if (ldvp == lvp) {
*ap->a_vpp = dvp;
vref(dvp);
vrele(lvp);
} else {
error = null_nodeget(mp, lvp, &vp);
if (error == 0)
*ap->a_vpp = vp;
}
}
return (error);
}
static int
null_open(struct vop_open_args *ap)
{
int retval;
struct vnode *vp, *ldvp;
vp = ap->a_vp;
ldvp = NULLVPTOLOWERVP(vp);
retval = null_bypass(&ap->a_gen);
if (retval == 0) {
vp->v_object = ldvp->v_object;
if ((vn_irflag_read(ldvp) & VIRF_PGREAD) != 0) {
MPASS(vp->v_object != NULL);
if ((vn_irflag_read(vp) & VIRF_PGREAD) == 0) {
vn_irflag_set_cond(vp, VIRF_PGREAD);
}
}
}
return (retval);
}
static int
null_setattr(struct vop_setattr_args *ap)
{
struct vnode *vp = ap->a_vp;
struct vattr *vap = ap->a_vap;
if ((vap->va_flags != VNOVAL || vap->va_uid != (uid_t)VNOVAL ||
vap->va_gid != (gid_t)VNOVAL || vap->va_atime.tv_sec != VNOVAL ||
vap->va_mtime.tv_sec != VNOVAL || vap->va_mode != (mode_t)VNOVAL) &&
(vp->v_mount->mnt_flag & MNT_RDONLY))
return (EROFS);
if (vap->va_size != VNOVAL) {
switch (vp->v_type) {
case VDIR:
return (EISDIR);
case VCHR:
case VBLK:
case VSOCK:
case VFIFO:
if (vap->va_flags != VNOVAL)
return (EOPNOTSUPP);
return (0);
case VREG:
case VLNK:
default:
if (vp->v_mount->mnt_flag & MNT_RDONLY)
return (EROFS);
}
}
return (null_bypass(&ap->a_gen));
}
static int
null_stat(struct vop_stat_args *ap)
{
int error;
if ((error = null_bypass(&ap->a_gen)) != 0)
return (error);
ap->a_sb->st_dev = ap->a_vp->v_mount->mnt_stat.f_fsid.val[0];
return (0);
}
static int
null_getattr(struct vop_getattr_args *ap)
{
int error;
if ((error = null_bypass(&ap->a_gen)) != 0)
return (error);
ap->a_vap->va_fsid = ap->a_vp->v_mount->mnt_stat.f_fsid.val[0];
return (0);
}
static int
null_access(struct vop_access_args *ap)
{
struct vnode *vp = ap->a_vp;
accmode_t accmode = ap->a_accmode;
if (accmode & VWRITE) {
switch (vp->v_type) {
case VDIR:
case VLNK:
case VREG:
if (vp->v_mount->mnt_flag & MNT_RDONLY)
return (EROFS);
break;
default:
break;
}
}
return (null_bypass(&ap->a_gen));
}
static int
null_accessx(struct vop_accessx_args *ap)
{
struct vnode *vp = ap->a_vp;
accmode_t accmode = ap->a_accmode;
if (accmode & VWRITE) {
switch (vp->v_type) {
case VDIR:
case VLNK:
case VREG:
if (vp->v_mount->mnt_flag & MNT_RDONLY)
return (EROFS);
break;
default:
break;
}
}
return (null_bypass(&ap->a_gen));
}
static int
null_remove(struct vop_remove_args *ap)
{
int retval, vreleit;
struct vnode *lvp, *vp;
vp = ap->a_vp;
if (vrefcnt(vp) > 1) {
lvp = NULLVPTOLOWERVP(vp);
vref(lvp);
vreleit = 1;
} else
vreleit = 0;
VTONULL(vp)->null_flags |= NULLV_DROP;
retval = null_bypass(&ap->a_gen);
if (vreleit != 0)
vrele(lvp);
return (retval);
}
static int
null_rename(struct vop_rename_args *ap)
{
struct vnode *fdvp, *fvp, *tdvp, *tvp;
struct vnode *lfdvp, *lfvp, *ltdvp, *ltvp;
struct null_node *fdnn, *fnn, *tdnn, *tnn;
int error;
tdvp = ap->a_tdvp;
fvp = ap->a_fvp;
fdvp = ap->a_fdvp;
tvp = ap->a_tvp;
lfdvp = NULL;
if ((fvp->v_mount != tdvp->v_mount) ||
(tvp != NULL && fvp->v_mount != tvp->v_mount)) {
error = EXDEV;
goto upper_err;
}
VI_LOCK(fdvp);
fdnn = VTONULL(fdvp);
if (fdnn == NULL) {
VI_UNLOCK(fdvp);
error = ENOENT;
goto upper_err;
}
lfdvp = fdnn->null_lowervp;
vref(lfdvp);
VI_UNLOCK(fdvp);
VI_LOCK(fvp);
fnn = VTONULL(fvp);
if (fnn == NULL) {
VI_UNLOCK(fvp);
error = ENOENT;
goto upper_err;
}
lfvp = fnn->null_lowervp;
vref(lfvp);
VI_UNLOCK(fvp);
tdnn = VTONULL(tdvp);
ltdvp = tdnn->null_lowervp;
vref(ltdvp);
if (tvp != NULL) {
tnn = VTONULL(tvp);
ltvp = tnn->null_lowervp;
vref(ltvp);
tnn->null_flags |= NULLV_DROP;
} else {
ltvp = NULL;
}
error = VOP_RENAME(lfdvp, lfvp, ap->a_fcnp, ltdvp, ltvp, ap->a_tcnp,
ap->a_flags);
vrele(fdvp);
vrele(fvp);
vrele(tdvp);
if (tvp != NULL)
vrele(tvp);
return (error);
upper_err:
if (tdvp == tvp)
vrele(tdvp);
else
vput(tdvp);
if (tvp)
vput(tvp);
if (lfdvp != NULL)
vrele(lfdvp);
vrele(fdvp);
vrele(fvp);
return (error);
}
static int
null_rmdir(struct vop_rmdir_args *ap)
{
VTONULL(ap->a_vp)->null_flags |= NULLV_DROP;
return (null_bypass(&ap->a_gen));
}
static struct vnode *
null_lock_prep_with_smr(struct vop_lock1_args *ap)
{
struct null_node *nn;
struct vnode *lvp;
lvp = NULL;
vfs_smr_enter();
nn = VTONULL_SMR(ap->a_vp);
if (__predict_true(nn != NULL)) {
lvp = nn->null_lowervp;
if (lvp != NULL && !vhold_smr(lvp))
lvp = NULL;
}
vfs_smr_exit();
return (lvp);
}
static struct vnode *
null_lock_prep_with_interlock(struct vop_lock1_args *ap)
{
struct null_node *nn;
struct vnode *lvp;
ASSERT_VI_LOCKED(ap->a_vp, __func__);
ap->a_flags &= ~LK_INTERLOCK;
lvp = NULL;
nn = VTONULL(ap->a_vp);
if (__predict_true(nn != NULL)) {
lvp = nn->null_lowervp;
if (lvp != NULL)
vholdnz(lvp);
}
VI_UNLOCK(ap->a_vp);
return (lvp);
}
static int
null_lock(struct vop_lock1_args *ap)
{
struct vnode *lvp;
int error, flags;
if (__predict_true((ap->a_flags & LK_INTERLOCK) == 0)) {
lvp = null_lock_prep_with_smr(ap);
if (__predict_false(lvp == NULL)) {
VI_LOCK(ap->a_vp);
lvp = null_lock_prep_with_interlock(ap);
}
} else {
lvp = null_lock_prep_with_interlock(ap);
}
ASSERT_VI_UNLOCKED(ap->a_vp, __func__);
if (__predict_false(lvp == NULL))
return (vop_stdlock(ap));
VNPASS(lvp->v_holdcnt > 0, lvp);
error = VOP_LOCK(lvp, ap->a_flags);
if (VTONULL(ap->a_vp) == NULL && error == 0) {
VOP_UNLOCK(lvp);
flags = ap->a_flags;
ap->a_flags &= ~LK_TYPE_MASK;
switch (flags & LK_TYPE_MASK) {
case LK_SHARED:
ap->a_flags |= LK_SHARED;
break;
case LK_UPGRADE:
case LK_EXCLUSIVE:
ap->a_flags |= LK_EXCLUSIVE;
break;
default:
panic("Unsupported lock request %d\n",
flags);
}
error = vop_stdlock(ap);
}
vdrop(lvp);
return (error);
}
static int
null_unlock(struct vop_unlock_args *ap)
{
struct vnode *vp = ap->a_vp;
struct null_node *nn;
struct vnode *lvp;
int error;
nn = VTONULL(vp);
if (nn != NULL && (lvp = NULLVPTOLOWERVP(vp)) != NULL) {
error = VOP_UNLOCK(lvp);
} else {
error = vop_stdunlock(ap);
}
return (error);
}
static int
null_want_recycle(struct vnode *vp)
{
struct vnode *lvp;
struct null_node *xp;
struct mount *mp;
struct null_mount *xmp;
xp = VTONULL(vp);
lvp = NULLVPTOLOWERVP(vp);
mp = vp->v_mount;
xmp = MOUNTTONULLMOUNT(mp);
if ((xmp->nullm_flags & NULLM_CACHE) == 0 ||
(xp->null_flags & NULLV_DROP) != 0 ||
(lvp->v_vflag & VV_NOSYNC) != 0) {
return (1);
}
return (0);
}
static int
null_inactive(struct vop_inactive_args *ap)
{
struct vnode *vp;
vp = ap->a_vp;
if (null_want_recycle(vp)) {
vp->v_object = NULL;
vrecycle(vp);
}
return (0);
}
static int
null_need_inactive(struct vop_need_inactive_args *ap)
{
return (null_want_recycle(ap->a_vp) || vn_need_pageq_flush(ap->a_vp));
}
static int
null_reclaim(struct vop_reclaim_args *ap)
{
struct vnode *vp;
struct null_node *xp;
struct vnode *lowervp;
vp = ap->a_vp;
xp = VTONULL(vp);
lowervp = xp->null_lowervp;
KASSERT(lowervp != NULL && vp->v_vnlock != &vp->v_lock,
("Reclaiming incomplete null vnode %p", vp));
null_hashrem(xp);
lockmgr(&vp->v_lock, LK_EXCLUSIVE, NULL);
VI_LOCK(vp);
vp->v_data = NULL;
vp->v_object = NULL;
vp->v_vnlock = &vp->v_lock;
if (vp->v_writecount > 0)
VOP_ADD_WRITECOUNT(lowervp, -vp->v_writecount);
else if (vp->v_writecount < 0)
vp->v_writecount = 0;
VI_UNLOCK(vp);
if ((xp->null_flags & NULLV_NOUNLOCK) != 0)
vunref(lowervp);
else
vput(lowervp);
uma_zfree_smr(null_node_zone, xp);
return (0);
}
static int
null_print(struct vop_print_args *ap)
{
struct vnode *vp = ap->a_vp;
printf("\tvp=%p, lowervp=%p\n", vp, VTONULL(vp)->null_lowervp);
return (0);
}
static int
null_getwritemount(struct vop_getwritemount_args *ap)
{
struct null_node *xp;
struct vnode *lowervp;
struct vnode *vp;
vp = ap->a_vp;
VI_LOCK(vp);
xp = VTONULL(vp);
if (xp && (lowervp = xp->null_lowervp)) {
vholdnz(lowervp);
VI_UNLOCK(vp);
VOP_GETWRITEMOUNT(lowervp, ap->a_mpp);
vdrop(lowervp);
} else {
VI_UNLOCK(vp);
*(ap->a_mpp) = NULL;
}
return (0);
}
static int
null_vptofh(struct vop_vptofh_args *ap)
{
struct vnode *lvp;
lvp = NULLVPTOLOWERVP(ap->a_vp);
return VOP_VPTOFH(lvp, ap->a_fhp);
}
static int
null_vptocnp(struct vop_vptocnp_args *ap)
{
struct vnode *vp = ap->a_vp;
struct vnode **dvp = ap->a_vpp;
struct vnode *lvp, *ldvp;
struct mount *mp;
int error, locked;
locked = VOP_ISLOCKED(vp);
lvp = NULLVPTOLOWERVP(vp);
mp = vp->v_mount;
error = vfs_busy(mp, MBF_NOWAIT);
if (error != 0)
return (error);
vhold(lvp);
VOP_UNLOCK(vp);
ldvp = lvp;
vref(lvp);
error = vn_vptocnp(&ldvp, ap->a_buf, ap->a_buflen);
vdrop(lvp);
if (error != 0) {
vn_lock(vp, locked | LK_RETRY);
vfs_unbusy(mp);
return (ENOENT);
}
error = vn_lock(ldvp, LK_SHARED);
if (error != 0) {
vrele(ldvp);
vn_lock(vp, locked | LK_RETRY);
vfs_unbusy(mp);
return (ENOENT);
}
error = null_nodeget(mp, ldvp, dvp);
if (error == 0) {
#ifdef DIAGNOSTIC
NULLVPTOLOWERVP(*dvp);
#endif
VOP_UNLOCK(*dvp);
}
vn_lock(vp, locked | LK_RETRY);
vfs_unbusy(mp);
return (error);
}
static int
null_read_pgcache(struct vop_read_pgcache_args *ap)
{
struct vnode *lvp, *vp;
struct null_node *xp;
int error;
vp = ap->a_vp;
VI_LOCK(vp);
xp = VTONULL(vp);
if (xp == NULL) {
VI_UNLOCK(vp);
return (EJUSTRETURN);
}
lvp = xp->null_lowervp;
vref(lvp);
VI_UNLOCK(vp);
error = VOP_READ_PGCACHE(lvp, ap->a_uio, ap->a_ioflag, ap->a_cred);
vrele(lvp);
return (error);
}
static int
null_advlock(struct vop_advlock_args *ap)
{
struct vnode *lvp, *vp;
struct null_node *xp;
int error;
vp = ap->a_vp;
VI_LOCK(vp);
xp = VTONULL(vp);
if (xp == NULL) {
VI_UNLOCK(vp);
return (EBADF);
}
lvp = xp->null_lowervp;
vref(lvp);
VI_UNLOCK(vp);
error = VOP_ADVLOCK(lvp, ap->a_id, ap->a_op, ap->a_fl, ap->a_flags);
vrele(lvp);
return (error);
}
static int
null_vput_pair(struct vop_vput_pair_args *ap)
{
struct mount *mp;
struct vnode *dvp, *ldvp, *lvp, *vp, *vp1, **vpp;
int error, res;
dvp = ap->a_dvp;
ldvp = NULLVPTOLOWERVP(dvp);
vref(ldvp);
vpp = ap->a_vpp;
vp = NULL;
lvp = NULL;
mp = NULL;
if (vpp != NULL)
vp = *vpp;
if (vp != NULL) {
lvp = NULLVPTOLOWERVP(vp);
vref(lvp);
if (!ap->a_unlock_vp) {
vhold(vp);
vhold(lvp);
mp = vp->v_mount;
vfs_ref(mp);
}
}
res = VOP_VPUT_PAIR(ldvp, lvp != NULL ? &lvp : NULL, true);
if (vp != NULL && ap->a_unlock_vp)
vrele(vp);
vrele(dvp);
if (vp == NULL || ap->a_unlock_vp)
return (res);
VOP_LOCK(vp, LK_EXCLUSIVE | LK_RETRY);
if (vp->v_data == NULL && vfs_busy(mp, MBF_NOWAIT) == 0) {
vput(vp);
vget(lvp, LK_EXCLUSIVE | LK_RETRY);
if (VN_IS_DOOMED(lvp)) {
vput(lvp);
vget(vp, LK_EXCLUSIVE | LK_RETRY);
} else {
error = null_nodeget(mp, lvp, &vp1);
if (error == 0) {
*vpp = vp1;
} else {
vget(vp, LK_EXCLUSIVE | LK_RETRY);
}
}
vfs_unbusy(mp);
}
vdrop(lvp);
vdrop(vp);
vfs_rel(mp);
return (res);
}
static int
null_getlowvnode(struct vop_getlowvnode_args *ap)
{
struct vnode *vp, *vpl;
vp = ap->a_vp;
if (vn_lock(vp, LK_SHARED) != 0)
return (EBADF);
vpl = NULLVPTOLOWERVP(vp);
vhold(vpl);
VOP_UNLOCK(vp);
VOP_GETLOWVNODE(vpl, ap->a_vplp, ap->a_flags);
vdrop(vpl);
return (0);
}
struct vop_vector null_vnodeops = {
.vop_bypass = null_bypass,
.vop_access = null_access,
.vop_accessx = null_accessx,
.vop_advlock = null_advlock,
.vop_advlockpurge = vop_stdadvlockpurge,
.vop_bmap = VOP_EOPNOTSUPP,
.vop_stat = null_stat,
.vop_getattr = null_getattr,
.vop_getlowvnode = null_getlowvnode,
.vop_getwritemount = null_getwritemount,
.vop_inactive = null_inactive,
.vop_need_inactive = null_need_inactive,
.vop_islocked = vop_stdislocked,
.vop_lock1 = null_lock,
.vop_lookup = null_lookup,
.vop_open = null_open,
.vop_print = null_print,
.vop_read_pgcache = null_read_pgcache,
.vop_reclaim = null_reclaim,
.vop_remove = null_remove,
.vop_rename = null_rename,
.vop_rmdir = null_rmdir,
.vop_setattr = null_setattr,
.vop_strategy = VOP_EOPNOTSUPP,
.vop_unlock = null_unlock,
.vop_vptocnp = null_vptocnp,
.vop_vptofh = null_vptofh,
.vop_add_writecount = null_add_writecount,
.vop_vput_pair = null_vput_pair,
.vop_copy_file_range = VOP_PANIC,
};
VFS_VOP_VECTOR_REGISTER(null_vnodeops);
struct vop_vector null_vnodeops_no_unp_bypass = {
.vop_default = &null_vnodeops,
.vop_unp_bind = vop_stdunp_bind,
.vop_unp_connect = vop_stdunp_connect,
.vop_unp_detach = vop_stdunp_detach,
};
VFS_VOP_VECTOR_REGISTER(null_vnodeops_no_unp_bypass);