#include <sys/param.h>
#include <sys/systm.h>
#include <sys/cred.h>
#include <sys/vnode.h>
#include <sys/vfs.h>
#include <sys/filio.h>
#include <sys/uio.h>
#include <sys/dirent.h>
#include <sys/errno.h>
#include <sys/sunddi.h>
#include <sys/sysmacros.h>
#include <sys/kmem.h>
#include <sys/cmn_err.h>
#include <sys/vfs_opreg.h>
#include <sys/policy.h>
#include <sys/sdt.h>
#include <sys/taskq_impl.h>
#include <sys/zone.h>
#ifdef _KERNEL
#include <sys/vmsystm.h>
#include <vm/hat.h>
#include <vm/as.h>
#include <vm/page.h>
#include <vm/pvn.h>
#include <vm/seg.h>
#include <vm/seg_map.h>
#include <vm/seg_kpm.h>
#include <vm/seg_vn.h>
#endif
#include <netsmb/smb_osdep.h>
#include <netsmb/smb.h>
#include <netsmb/smb_conn.h>
#include <netsmb/smb_subr.h>
#include <smbfs/smbfs.h>
#include <smbfs/smbfs_node.h>
#include <smbfs/smbfs_subr.h>
#include <sys/fs/smbfs_ioctl.h>
#include <fs/fs_subr.h>
#ifndef MAXOFF32_T
#define MAXOFF32_T 0x7fffffff
#endif
#define FIRST_DIROFS 2
static const char illegal_chars[] = {
':',
'\\',
'/',
'*',
'?',
'"',
'<',
'>',
'|',
0
};
int smbfs_fastlookup = 1;
struct vnodeops *smbfs_vnodeops = NULL;
static int smbfslookup_cache(vnode_t *, char *, int, vnode_t **,
cred_t *);
static int smbfslookup(vnode_t *dvp, char *nm, vnode_t **vpp, cred_t *cr,
int cache_ok, caller_context_t *);
static int smbfsremove(vnode_t *dvp, vnode_t *vp, struct smb_cred *scred,
int flags);
static int smbfsrename(vnode_t *odvp, vnode_t *ovp, vnode_t *ndvp,
char *nnm, struct smb_cred *scred, int flags);
static int smbfssetattr(vnode_t *, struct vattr *, int, cred_t *);
static int smbfs_accessx(void *, int, cred_t *);
static int smbfs_readvdir(vnode_t *vp, uio_t *uio, cred_t *cr, int *eofp,
caller_context_t *);
static int smbfsflush(smbnode_t *, struct smb_cred *);
static void smbfs_rele_fid(smbnode_t *, struct smb_cred *);
static uint32_t xvattr_to_dosattr(smbnode_t *, struct vattr *);
static int smbfs_fsync(vnode_t *, int, cred_t *, caller_context_t *);
static int smbfs_putpage(vnode_t *, offset_t, size_t, int, cred_t *,
caller_context_t *);
#ifdef _KERNEL
static int smbfs_getapage(vnode_t *, u_offset_t, size_t, uint_t *,
page_t *[], size_t, struct seg *, caddr_t,
enum seg_rw, cred_t *);
static int smbfs_putapage(vnode_t *, page_t *, u_offset_t *, size_t *,
int, cred_t *);
static void smbfs_delmap_async(void *);
static int smbfs_rdwrlbn(vnode_t *, page_t *, u_offset_t, size_t, int,
cred_t *);
static int smbfs_bio(struct buf *, int, cred_t *);
static int smbfs_writenp(smbnode_t *np, caddr_t base, int tcount,
struct uio *uiop, int pgcreated);
#endif
#define SMBFS_EOF -98
#define smbfs_lm_has_sleep(vp) 0
static int
smbfs_open(vnode_t **vpp, int flag, cred_t *cr, caller_context_t *ct)
{
smbnode_t *np;
vnode_t *vp;
smbfattr_t fa;
smb_fh_t *fid = NULL;
smb_fh_t *oldfid;
uint32_t rights;
struct smb_cred scred;
smbmntinfo_t *smi;
smb_share_t *ssp;
cred_t *oldcr;
int error = 0;
vp = *vpp;
np = VTOSMB(vp);
smi = VTOSMI(vp);
ssp = smi->smi_share;
if (curproc->p_zone != smi->smi_zone_ref.zref_zone)
return (EIO);
if (smi->smi_flags & SMI_DEAD || vp->v_vfsp->vfs_flag & VFS_UNMOUNTED)
return (EIO);
if (vp->v_type != VREG && vp->v_type != VDIR) {
SMBVDEBUG("open eacces vtype=%d\n", vp->v_type);
return (EACCES);
}
if (smbfs_rw_enter_sig(&np->r_lkserlock, RW_WRITER, SMBINTR(vp)))
return (EINTR);
smb_credinit(&scred, cr);
if (np->n_ovtype == VNON) {
ASSERT(np->n_dirrefs == 0);
ASSERT(np->n_fidrefs == 0);
} else if (np->n_ovtype != vp->v_type) {
SMBVDEBUG("open n_ovtype=%d v_type=%d\n",
np->n_ovtype, vp->v_type);
error = EACCES;
goto out;
}
if (vp->v_type == VDIR) {
if (np->n_dirseq == NULL) {
error = smbfs_smb_findopen(np, "*", 1,
SMB_FA_SYSTEM | SMB_FA_HIDDEN | SMB_FA_DIR,
&scred, &np->n_dirseq);
if (error != 0)
goto out;
}
np->n_dirofs = FIRST_DIROFS;
np->n_dirrefs++;
goto have_fid;
}
if (flag & FTRUNC)
flag |= FWRITE;
if (np->n_fidrefs > 0 &&
(fid = np->n_fid) != NULL &&
fid->fh_vcgenid == ssp->ss_vcgenid) {
int upgrade = 0;
if ((flag & FWRITE) &&
!(fid->fh_rights & SA_RIGHT_FILE_WRITE_DATA))
upgrade = 1;
if ((flag & FREAD) &&
!(fid->fh_rights & SA_RIGHT_FILE_READ_DATA))
upgrade = 1;
if (!upgrade) {
np->n_fidrefs++;
goto have_fid;
}
fid = NULL;
}
rights = (fid != NULL) ? fid->fh_rights : 0;
rights |= (STD_RIGHT_READ_CONTROL_ACCESS |
SA_RIGHT_FILE_READ_ATTRIBUTES);
if ((flag & FREAD))
rights |= SA_RIGHT_FILE_READ_DATA;
if ((flag & FWRITE))
rights |= SA_RIGHT_FILE_WRITE_DATA |
SA_RIGHT_FILE_APPEND_DATA |
SA_RIGHT_FILE_WRITE_ATTRIBUTES;
bzero(&fa, sizeof (fa));
error = smbfs_smb_open(np,
NULL, 0, 0,
rights, &scred,
&fid, &fa);
if (error)
goto out;
smbfs_attrcache_fa(vp, &fa);
VERIFY(fid != NULL);
oldfid = np->n_fid;
np->n_fid = fid;
np->n_fidrefs++;
if (oldfid != NULL)
smb_fh_rele(oldfid);
mutex_enter(&np->r_statelock);
oldcr = np->r_cred;
np->r_cred = cr;
crhold(cr);
if (oldcr)
crfree(oldcr);
mutex_exit(&np->r_statelock);
have_fid:
if (np->n_ovtype == VNON)
np->n_ovtype = vp->v_type;
out:
smb_credrele(&scred);
smbfs_rw_exit(&np->r_lkserlock);
return (error);
}
static int
smbfs_close(vnode_t *vp, int flag, int count, offset_t offset, cred_t *cr,
caller_context_t *ct)
{
smbnode_t *np;
smbmntinfo_t *smi;
struct smb_cred scred;
int error = 0;
np = VTOSMB(vp);
smi = VTOSMI(vp);
if (smi->smi_zone_ref.zref_zone != curproc->p_zone) {
return (EIO);
}
if (smi->smi_flags & SMI_LLOCK) {
pid_t pid = ddi_get_pid();
cleanlocks(vp, pid, 0);
cleanshares(vp, pid);
}
if (count > 1)
return (0);
if ((flag & FWRITE) && vn_has_cached_data(vp)) {
error = smbfs_putpage(vp, (offset_t)0, 0, 0, cr, ct);
if (error == EAGAIN)
error = 0;
}
if (error == 0) {
mutex_enter(&np->r_statelock);
np->r_flags &= ~RSTALE;
np->r_error = 0;
mutex_exit(&np->r_statelock);
}
(void) smbfs_rw_enter_sig(&np->r_lkserlock, RW_WRITER, 0);
smb_credinit(&scred, cr);
smbfs_rele_fid(np, &scred);
smb_credrele(&scred);
smbfs_rw_exit(&np->r_lkserlock);
return (0);
}
static void
smbfs_rele_fid(smbnode_t *np, struct smb_cred *scred)
{
cred_t *oldcr;
struct smbfs_fctx *fctx;
int error;
smb_fh_t *ofid;
error = 0;
ASSERT(smbfs_rw_lock_held(&np->r_lkserlock, RW_WRITER));
switch (np->n_ovtype) {
case VDIR:
ASSERT(np->n_dirrefs > 0);
if (--np->n_dirrefs)
return;
if ((fctx = np->n_dirseq) != NULL) {
np->n_dirseq = NULL;
np->n_dirofs = 0;
error = smbfs_smb_findclose(fctx, scred);
}
break;
case VREG:
ASSERT(np->n_fidrefs > 0);
if (--np->n_fidrefs)
return;
if ((ofid = np->n_fid) != NULL) {
np->n_fid = NULL;
smb_fh_rele(ofid);
}
break;
default:
SMBVDEBUG("bad n_ovtype %d\n", np->n_ovtype);
break;
}
if (error) {
SMBVDEBUG("error %d closing %s\n",
error, np->n_rpath);
}
np->n_ovtype = VNON;
mutex_enter(&np->r_statelock);
if (np->n_flag & NATTRCHANGED)
smbfs_attrcache_rm_locked(np);
oldcr = np->r_cred;
np->r_cred = NULL;
mutex_exit(&np->r_statelock);
if (oldcr != NULL)
crfree(oldcr);
}
static int
smbfs_read(vnode_t *vp, struct uio *uiop, int ioflag, cred_t *cr,
caller_context_t *ct)
{
struct smb_cred scred;
struct vattr va;
smbnode_t *np;
smbmntinfo_t *smi;
offset_t endoff;
ssize_t past_eof;
int error;
np = VTOSMB(vp);
smi = VTOSMI(vp);
if (curproc->p_zone != smi->smi_zone_ref.zref_zone)
return (EIO);
if (smi->smi_flags & SMI_DEAD || vp->v_vfsp->vfs_flag & VFS_UNMOUNTED)
return (EIO);
if (np->n_fid == NULL)
return (EIO);
ASSERT(smbfs_rw_lock_held(&np->r_rwlock, RW_READER));
if (vp->v_type != VREG)
return (EISDIR);
if (uiop->uio_resid == 0)
return (0);
endoff = uiop->uio_loffset + uiop->uio_resid;
if (uiop->uio_loffset < 0 || endoff < 0)
return (EINVAL);
va.va_mask = AT_SIZE | AT_MTIME;
if (error = smbfsgetattr(vp, &va, cr))
return (error);
if (uiop->uio_loffset >= va.va_size)
return (0);
if (endoff > va.va_size) {
past_eof = (ssize_t)(endoff - va.va_size);
uiop->uio_resid -= past_eof;
} else
past_eof = 0;
if ((vp->v_flag & VNOCACHE) ||
(((np->r_flags & RDIRECTIO) || (smi->smi_flags & SMI_DIRECTIO)) &&
np->r_mapcnt == 0 && np->r_inmap == 0 &&
!vn_has_cached_data(vp))) {
if (smbfs_rw_enter_sig(&np->r_lkserlock, RW_READER,
SMBINTR(vp)))
return (EINTR);
smb_credinit(&scred, cr);
error = smb_rwuio(np->n_fid, UIO_READ,
uiop, &scred, smb_timo_read);
smb_credrele(&scred);
smbfs_rw_exit(&np->r_lkserlock);
uiop->uio_resid += past_eof;
return (error);
}
#ifdef _KERNEL
do {
caddr_t base;
u_offset_t off;
size_t n;
int on;
uint_t flags;
off = uiop->uio_loffset & MAXBMASK;
on = uiop->uio_loffset & MAXBOFFSET;
n = MIN(MAXBSIZE - on, uiop->uio_resid);
error = smbfs_validate_caches(vp, cr);
if (error)
break;
if (vpm_enable) {
error = vpm_data_copy(vp, off + on, n, uiop,
1, NULL, 0, S_READ);
} else {
base = segmap_getmapflt(segkmap, vp, off + on, n, 1,
S_READ);
error = uiomove(base + on, n, UIO_READ, uiop);
}
if (!error) {
mutex_enter(&np->r_statelock);
if (n + on == MAXBSIZE ||
uiop->uio_loffset == np->r_size)
flags = SM_DONTNEED;
else
flags = 0;
mutex_exit(&np->r_statelock);
if (vpm_enable) {
error = vpm_sync_pages(vp, off, n, flags);
} else {
error = segmap_release(segkmap, base, flags);
}
} else {
if (vpm_enable) {
(void) vpm_sync_pages(vp, off, n, 0);
} else {
(void) segmap_release(segkmap, base, 0);
}
}
} while (!error && uiop->uio_resid > 0);
#else
error = ENOSYS;
#endif
uiop->uio_resid += past_eof;
return (error);
}
static int
smbfs_write(vnode_t *vp, struct uio *uiop, int ioflag, cred_t *cr,
caller_context_t *ct)
{
struct smb_cred scred;
struct vattr va;
smbnode_t *np;
smbmntinfo_t *smi;
offset_t endoff, limit;
ssize_t past_limit;
int error, timo;
u_offset_t last_off;
size_t last_resid;
#ifdef _KERNEL
uint_t bsize;
#endif
np = VTOSMB(vp);
smi = VTOSMI(vp);
if (curproc->p_zone != smi->smi_zone_ref.zref_zone)
return (EIO);
if (smi->smi_flags & SMI_DEAD || vp->v_vfsp->vfs_flag & VFS_UNMOUNTED)
return (EIO);
if (np->n_fid == NULL)
return (EIO);
ASSERT(smbfs_rw_lock_held(&np->r_rwlock, RW_WRITER));
if (vp->v_type != VREG)
return (EISDIR);
if (uiop->uio_resid == 0)
return (0);
if (ioflag & (FAPPEND | FSYNC)) {
if (np->n_flag & NMODIFIED) {
smbfs_attrcache_remove(np);
}
}
if (ioflag & FAPPEND) {
va.va_mask = AT_SIZE;
if (error = smbfsgetattr(vp, &va, cr))
return (error);
uiop->uio_loffset = va.va_size;
}
endoff = uiop->uio_loffset + uiop->uio_resid;
if (uiop->uio_loffset < 0 || endoff < 0)
return (EINVAL);
limit = uiop->uio_llimit;
if (limit == RLIM64_INFINITY)
limit = MAXOFFSET_T;
if (uiop->uio_loffset >= limit) {
#ifdef _KERNEL
proc_t *p = ttoproc(curthread);
mutex_enter(&p->p_lock);
(void) rctl_action(rctlproc_legacy[RLIMIT_FSIZE],
p->p_rctls, p, RCA_UNSAFE_SIGINFO);
mutex_exit(&p->p_lock);
#endif
return (EFBIG);
}
if (endoff > limit) {
past_limit = (ssize_t)(endoff - limit);
uiop->uio_resid -= past_limit;
} else
past_limit = 0;
if ((vp->v_flag & VNOCACHE) ||
(((np->r_flags & RDIRECTIO) || (smi->smi_flags & SMI_DIRECTIO)) &&
np->r_mapcnt == 0 && np->r_inmap == 0 &&
!vn_has_cached_data(vp))) {
#ifdef _KERNEL
smbfs_fwrite:
#endif
if (np->r_flags & RSTALE) {
last_resid = uiop->uio_resid;
last_off = uiop->uio_loffset;
error = np->r_error;
if (error == 0)
error = ESTALE;
goto bottom;
}
timo = smb_timo_write;
if (endoff > np->r_size)
timo = smb_timo_append;
if (smbfs_rw_enter_sig(&np->r_lkserlock, RW_READER,
SMBINTR(vp)))
return (EINTR);
smb_credinit(&scred, cr);
error = smb_rwuio(np->n_fid, UIO_WRITE,
uiop, &scred, timo);
if (error == 0) {
mutex_enter(&np->r_statelock);
np->n_flag |= (NFLUSHWIRE | NATTRCHANGED);
if (uiop->uio_loffset > (offset_t)np->r_size)
np->r_size = (len_t)uiop->uio_loffset;
mutex_exit(&np->r_statelock);
if (ioflag & (FSYNC | FDSYNC)) {
(void) smbfsflush(np, &scred);
}
}
smb_credrele(&scred);
smbfs_rw_exit(&np->r_lkserlock);
uiop->uio_resid += past_limit;
return (error);
}
#ifdef _KERNEL
bsize = vp->v_vfsp->vfs_bsize;
do {
caddr_t base;
u_offset_t off;
size_t n;
int on;
uint_t flags;
off = uiop->uio_loffset & MAXBMASK;
on = uiop->uio_loffset & MAXBOFFSET;
n = MIN(MAXBSIZE - on, uiop->uio_resid);
last_resid = uiop->uio_resid;
last_off = uiop->uio_loffset;
if (np->r_flags & RSTALE) {
error = np->r_error;
if (error == 0)
error = ESTALE;
break;
}
mutex_enter(&np->r_statelock);
while (np->r_gcount > 0) {
if (SMBINTR(vp)) {
klwp_t *lwp = ttolwp(curthread);
if (lwp != NULL)
lwp->lwp_nostop++;
if (!cv_wait_sig(&np->r_cv, &np->r_statelock)) {
mutex_exit(&np->r_statelock);
if (lwp != NULL)
lwp->lwp_nostop--;
error = EINTR;
goto bottom;
}
if (lwp != NULL)
lwp->lwp_nostop--;
} else
cv_wait(&np->r_cv, &np->r_statelock);
}
mutex_exit(&np->r_statelock);
uio_prefaultpages((long)n, uiop);
if (vpm_enable) {
error = smbfs_writenp(np, NULL, n, uiop, 0);
} else {
if (segmap_kpm) {
int pon = uiop->uio_loffset & PAGEOFFSET;
size_t pn = MIN(PAGESIZE - pon,
uiop->uio_resid);
int pagecreate;
mutex_enter(&np->r_statelock);
pagecreate = (pon == 0) && (pn == PAGESIZE ||
uiop->uio_loffset + pn >= np->r_size);
mutex_exit(&np->r_statelock);
base = segmap_getmapflt(segkmap, vp, off + on,
pn, !pagecreate, S_WRITE);
error = smbfs_writenp(np, base + pon, n, uiop,
pagecreate);
} else {
base = segmap_getmapflt(segkmap, vp, off + on,
n, 0, S_READ);
error = smbfs_writenp(np, base + on, n, uiop,
0);
}
}
if (!error) {
if (smi->smi_flags & SMI_NOAC)
flags = SM_WRITE;
else if ((uiop->uio_loffset % bsize) == 0 ||
IS_SWAPVP(vp)) {
flags = SM_WRITE | SM_ASYNC | SM_DONTNEED;
} else
flags = 0;
if ((ioflag & (FSYNC|FDSYNC)) ||
(np->r_flags & ROUTOFSPACE)) {
flags &= ~SM_ASYNC;
flags |= SM_WRITE;
}
if (vpm_enable) {
error = vpm_sync_pages(vp, off, n, flags);
} else {
error = segmap_release(segkmap, base, flags);
}
} else {
if (vpm_enable) {
(void) vpm_sync_pages(vp, off, n, 0);
} else {
(void) segmap_release(segkmap, base, 0);
}
if (error == EACCES)
goto smbfs_fwrite;
}
} while (!error && uiop->uio_resid > 0);
#else
last_resid = uiop->uio_resid;
last_off = uiop->uio_loffset;
error = ENOSYS;
#endif
bottom:
if (error) {
uiop->uio_resid = last_resid + past_limit;
uiop->uio_loffset = last_off;
} else {
uiop->uio_resid += past_limit;
}
return (error);
}
#ifdef _KERNEL
int
smbfs_writenp(smbnode_t *np, caddr_t base, int tcount, struct uio *uio,
int pgcreated)
{
int pagecreate;
int n;
int saved_n;
caddr_t saved_base;
u_offset_t offset;
int error;
int sm_error;
vnode_t *vp = SMBTOV(np);
ASSERT(tcount <= MAXBSIZE && tcount <= uio->uio_resid);
ASSERT(smbfs_rw_lock_held(&np->r_rwlock, RW_WRITER));
if (!vpm_enable) {
ASSERT(((uintptr_t)base & MAXBOFFSET) + tcount <= MAXBSIZE);
}
do {
offset = uio->uio_loffset;
pagecreate = 0;
n = (int)MIN((PAGESIZE - (offset & PAGEOFFSET)), tcount);
mutex_enter(&np->r_statelock);
pagecreate = pgcreated ||
((offset & PAGEOFFSET) == 0 &&
(n == PAGESIZE || ((offset + n) >= np->r_size)));
mutex_exit(&np->r_statelock);
if (!vpm_enable && pagecreate) {
if (pgcreated == 0)
(void) segmap_pagecreate(segkmap, base,
(uint_t)n, 1);
saved_base = base;
saved_n = n;
}
ASSERT(!(np->r_flags & RMODINPROGRESS));
mutex_enter(&np->r_statelock);
np->r_flags |= RMODINPROGRESS;
np->r_modaddr = (offset & MAXBMASK);
mutex_exit(&np->r_statelock);
if (vpm_enable) {
error = vpm_data_copy(vp, offset, n, uio,
!pagecreate, NULL, 0, S_WRITE);
} else {
error = uiomove(base, n, UIO_WRITE, uio);
}
mutex_enter(&np->r_statelock);
if (np->r_size < uio->uio_loffset)
np->r_size = uio->uio_loffset;
np->r_flags &= ~RMODINPROGRESS;
np->r_flags |= RDIRTY;
mutex_exit(&np->r_statelock);
n = (int)(uio->uio_loffset - offset);
if (!vpm_enable) {
base += n;
}
tcount -= n;
if (!vpm_enable && pagecreate) {
if ((uio->uio_loffset & PAGEOFFSET) || n == 0)
(void) kzero(base, PAGESIZE - n);
if (pgcreated) {
pgcreated = 0;
} else {
sm_error = segmap_fault(kas.a_hat, segkmap,
saved_base, saved_n,
F_SOFTUNLOCK, S_WRITE);
if (error == 0)
error = sm_error;
}
}
} while (tcount > 0 && error == 0);
return (error);
}
static int
smbfs_rdwrlbn(vnode_t *vp, page_t *pp, u_offset_t off, size_t len,
int flags, cred_t *cr)
{
smbmntinfo_t *smi = VTOSMI(vp);
struct buf *bp;
int error;
int sync;
if (curproc->p_zone != smi->smi_zone_ref.zref_zone)
return (EIO);
if (smi->smi_flags & SMI_DEAD || vp->v_vfsp->vfs_flag & VFS_UNMOUNTED)
return (EIO);
bp = pageio_setup(pp, len, vp, flags);
ASSERT(bp != NULL);
ASSERT(bp->b_un.b_addr == 0);
bp->b_edev = 0;
bp->b_dev = 0;
bp->b_lblkno = lbtodb(off);
bp->b_file = vp;
bp->b_offset = (offset_t)off;
bp_mapin(bp);
if ((flags & (B_WRITE|B_ASYNC)) == (B_WRITE|B_ASYNC) &&
freemem > desfree) {
sync = 0;
} else {
sync = 1;
}
error = smbfs_bio(bp, sync, cr);
bp_mapout(bp);
pageio_done(bp);
return (error);
}
static int
smbfs_bio(struct buf *bp, int sync, cred_t *cr)
{
struct iovec aiov[1];
struct uio auio;
struct smb_cred scred;
smbnode_t *np = VTOSMB(bp->b_vp);
smbmntinfo_t *smi = np->n_mount;
offset_t offset;
offset_t endoff;
size_t count;
size_t past_eof;
int error;
ASSERT(curproc->p_zone == smi->smi_zone_ref.zref_zone);
offset = ldbtob(bp->b_lblkno);
count = bp->b_bcount;
endoff = offset + count;
if (offset < 0 || endoff < 0)
return (EINVAL);
mutex_enter(&np->r_statelock);
if (offset >= np->r_size) {
mutex_exit(&np->r_statelock);
if (bp->b_flags & B_READ) {
return (SMBFS_EOF);
} else {
return (EINVAL);
}
}
if (endoff > np->r_size) {
past_eof = (size_t)(endoff - np->r_size);
count -= past_eof;
} else
past_eof = 0;
mutex_exit(&np->r_statelock);
ASSERT(count > 0);
aiov[0].iov_base = bp->b_un.b_addr;
aiov[0].iov_len = count;
auio.uio_iov = aiov;
auio.uio_iovcnt = 1;
auio.uio_loffset = offset;
auio.uio_segflg = UIO_SYSSPACE;
auio.uio_fmode = 0;
auio.uio_resid = count;
if (smbfs_rw_enter_sig(&np->r_lkserlock, RW_READER,
smi->smi_flags & SMI_INT))
return (EINTR);
smb_credinit(&scred, cr);
DTRACE_IO1(start, struct buf *, bp);
if (bp->b_flags & B_READ) {
error = smb_rwuio(np->n_fid, UIO_READ,
&auio, &scred, smb_timo_read);
bp->b_error = error;
bp->b_resid = auio.uio_resid;
if (!error && auio.uio_resid != 0)
error = EIO;
if (!error && past_eof != 0) {
bzero(bp->b_un.b_addr + count, past_eof);
}
} else {
error = smb_rwuio(np->n_fid, UIO_WRITE,
&auio, &scred, smb_timo_write);
bp->b_error = error;
bp->b_resid = auio.uio_resid;
if (!error && auio.uio_resid != 0)
error = EIO;
if (!error && sync) {
(void) smbfsflush(np, &scred);
}
}
if (error != 0) {
mutex_enter(&np->r_statelock);
if (error == ESTALE)
np->r_flags |= RSTALE;
if (!np->r_error)
np->r_error = error;
mutex_exit(&np->r_statelock);
bp->b_flags |= B_ERROR;
}
DTRACE_IO1(done, struct buf *, bp);
smb_credrele(&scred);
smbfs_rw_exit(&np->r_lkserlock);
if (error == ESTALE)
smbfs_attrcache_remove(np);
return (error);
}
#endif
static int
smbfs_ioctl(vnode_t *vp, int cmd, intptr_t arg, int flag,
cred_t *cr, int *rvalp, caller_context_t *ct)
{
int error;
smbmntinfo_t *smi;
smi = VTOSMI(vp);
if (curproc->p_zone != smi->smi_zone_ref.zref_zone)
return (EIO);
if (smi->smi_flags & SMI_DEAD || vp->v_vfsp->vfs_flag & VFS_UNMOUNTED)
return (EIO);
switch (cmd) {
case _FIOFFS:
error = smbfs_fsync(vp, 0, cr, ct);
break;
case _FIOGDIO:
case _FIOSDIO:
error = 0;
break;
#if 0
case _FIO_SEEK_DATA:
case _FIO_SEEK_HOLE:
#endif
case _FIODIRECTIO:
error = smbfs_directio(vp, (int)arg, cr);
break;
case SMBFSIO_GETSD:
error = smbfs_acl_iocget(vp, arg, flag, cr);
break;
case SMBFSIO_SETSD:
error = smbfs_acl_iocset(vp, arg, flag, cr);
break;
default:
error = ENOTTY;
break;
}
return (error);
}
static int
smbfs_getattr(vnode_t *vp, struct vattr *vap, int flags, cred_t *cr,
caller_context_t *ct)
{
smbnode_t *np;
smbmntinfo_t *smi;
int error;
smi = VTOSMI(vp);
if (curproc->p_zone != smi->smi_zone_ref.zref_zone)
return (EIO);
if (smi->smi_flags & SMI_DEAD || vp->v_vfsp->vfs_flag & VFS_UNMOUNTED)
return (EIO);
np = VTOSMB(vp);
if (flags & ATTR_HINT) {
if (vap->va_mask ==
(vap->va_mask & (AT_SIZE | AT_FSID | AT_RDEV))) {
mutex_enter(&np->r_statelock);
if (vap->va_mask | AT_SIZE)
vap->va_size = np->r_size;
if (vap->va_mask | AT_FSID)
vap->va_fsid = vp->v_vfsp->vfs_dev;
if (vap->va_mask | AT_RDEV)
vap->va_rdev = vp->v_rdev;
mutex_exit(&np->r_statelock);
return (0);
}
}
if (vap->va_mask & AT_MTIME) {
if (vn_has_cached_data(vp) &&
((np->r_flags & RDIRTY) != 0)) {
mutex_enter(&np->r_statelock);
np->r_gcount++;
mutex_exit(&np->r_statelock);
error = smbfs_putpage(vp, (offset_t)0, 0, 0, cr, ct);
mutex_enter(&np->r_statelock);
if (error && (error == ENOSPC || error == EDQUOT)) {
if (!np->r_error)
np->r_error = error;
}
if (--np->r_gcount == 0)
cv_broadcast(&np->r_cv);
mutex_exit(&np->r_statelock);
}
}
return (smbfsgetattr(vp, vap, cr));
}
static int
smbfs_setattr(vnode_t *vp, struct vattr *vap, int flags, cred_t *cr,
caller_context_t *ct)
{
vfs_t *vfsp;
smbmntinfo_t *smi;
int error;
uint_t mask;
struct vattr oldva;
vfsp = vp->v_vfsp;
smi = VFTOSMI(vfsp);
if (curproc->p_zone != smi->smi_zone_ref.zref_zone)
return (EIO);
if (smi->smi_flags & SMI_DEAD || vfsp->vfs_flag & VFS_UNMOUNTED)
return (EIO);
mask = vap->va_mask;
if (mask & AT_NOSET)
return (EINVAL);
if (vfsp->vfs_flag & VFS_RDONLY)
return (EROFS);
bzero(&oldva, sizeof (oldva));
oldva.va_mask = AT_TYPE | AT_MODE;
error = smbfsgetattr(vp, &oldva, cr);
if (error)
return (error);
oldva.va_mask |= AT_UID | AT_GID;
oldva.va_uid = smi->smi_uid;
oldva.va_gid = smi->smi_gid;
error = secpolicy_vnode_setattr(cr, vp, vap, &oldva, flags,
smbfs_accessx, vp);
if (error)
return (error);
if (mask & (AT_UID | AT_GID)) {
if (smi->smi_flags & SMI_ACL)
error = smbfs_acl_setids(vp, vap, cr);
else
error = ENOSYS;
if (error != 0) {
SMBVDEBUG("error %d seting UID/GID on %s",
error, VTOSMB(vp)->n_rpath);
}
}
error = smbfssetattr(vp, vap, flags, cr);
#ifdef SMBFS_VNEVENT
if (error == 0 && (vap->va_mask & AT_SIZE) && vap->va_size == 0)
vnevent_truncate(vp, ct);
#endif
return (error);
}
static int
smbfssetattr(vnode_t *vp, struct vattr *vap, int flags, cred_t *cr)
{
int error = 0;
smbnode_t *np = VTOSMB(vp);
smbmntinfo_t *smi = np->n_mount;
uint_t mask = vap->va_mask;
struct timespec *mtime, *atime;
struct smb_cred scred;
int modified = 0;
smb_fh_t *fid = NULL;
uint32_t rights = 0;
uint32_t dosattr = 0;
ASSERT(curproc->p_zone == VTOSMI(vp)->smi_zone_ref.zref_zone);
if (vp->v_flag & V_XATTRDIR)
return (0);
if (np->n_flag & N_XATTR) {
if (mask & AT_TIMES)
SMBVDEBUG("ignore set time on xattr\n");
mask &= AT_SIZE;
}
if (vn_has_cached_data(vp) &&
((np->r_flags & RDIRTY) ||
np->r_count > 0 ||
np->r_mapcnt > 0)) {
ASSERT(vp->v_type != VCHR);
error = smbfs_putpage(vp, (offset_t)0, 0, 0, cr, NULL);
if (error && (error == ENOSPC || error == EDQUOT)) {
mutex_enter(&np->r_statelock);
if (!np->r_error)
np->r_error = error;
mutex_exit(&np->r_statelock);
}
}
smb_credinit(&scred, cr);
if (mask & AT_XVATTR)
dosattr = xvattr_to_dosattr(np, vap);
if (dosattr || (mask & (AT_ATIME | AT_MTIME))) {
rights |=
SA_RIGHT_FILE_WRITE_ATTRIBUTES;
}
if (mask & AT_SIZE) {
rights |=
SA_RIGHT_FILE_WRITE_DATA |
SA_RIGHT_FILE_APPEND_DATA;
}
if (rights != 0) {
error = smbfs_smb_tmpopen(np, rights, &scred, &fid);
if (error) {
SMBVDEBUG("error %d opening %s\n",
error, np->n_rpath);
goto out;
}
ASSERT(fid != NULL);
}
if (mask & AT_SIZE) {
ASSERT(fid != NULL);
error = smbfs_smb_setfsize(smi->smi_share, fid,
vap->va_size, &scred);
if (error) {
SMBVDEBUG("setsize error %d file %s\n",
error, np->n_rpath);
} else {
mutex_enter(&np->r_statelock);
np->r_size = vap->va_size;
np->n_flag |= (NFLUSHWIRE | NATTRCHANGED);
mutex_exit(&np->r_statelock);
modified = 1;
}
}
mtime = ((mask & AT_MTIME) ? &vap->va_mtime : 0);
atime = ((mask & AT_ATIME) ? &vap->va_atime : 0);
if (dosattr || mtime || atime) {
ASSERT(fid != NULL);
error = smbfs_smb_setfattr(smi->smi_share, fid,
dosattr, mtime, atime, &scred);
if (error) {
SMBVDEBUG("set times error %d file %s\n",
error, np->n_rpath);
} else {
modified = 1;
}
}
out:
if (fid != NULL)
smbfs_smb_tmpclose(np, fid);
smb_credrele(&scred);
if (modified) {
smbfs_attrcache_remove(np);
if (mask & AT_SIZE) {
smbfs_invalidate_pages(vp,
(vap->va_size & PAGEMASK), cr);
}
}
return (error);
}
static uint32_t
xvattr_to_dosattr(smbnode_t *np, struct vattr *vap)
{
xvattr_t *xvap = (xvattr_t *)vap;
xoptattr_t *xoap = NULL;
uint32_t attr = np->r_attr.fa_attr;
boolean_t anyset = B_FALSE;
if ((xoap = xva_getxoptattr(xvap)) == NULL)
return (0);
if (XVA_ISSET_REQ(xvap, XAT_ARCHIVE)) {
if (xoap->xoa_archive)
attr |= SMB_FA_ARCHIVE;
else
attr &= ~SMB_FA_ARCHIVE;
XVA_SET_RTN(xvap, XAT_ARCHIVE);
anyset = B_TRUE;
}
if (XVA_ISSET_REQ(xvap, XAT_SYSTEM)) {
if (xoap->xoa_system)
attr |= SMB_FA_SYSTEM;
else
attr &= ~SMB_FA_SYSTEM;
XVA_SET_RTN(xvap, XAT_SYSTEM);
anyset = B_TRUE;
}
if (XVA_ISSET_REQ(xvap, XAT_READONLY)) {
if (xoap->xoa_readonly)
attr |= SMB_FA_RDONLY;
else
attr &= ~SMB_FA_RDONLY;
XVA_SET_RTN(xvap, XAT_READONLY);
anyset = B_TRUE;
}
if (XVA_ISSET_REQ(xvap, XAT_HIDDEN)) {
if (xoap->xoa_hidden)
attr |= SMB_FA_HIDDEN;
else
attr &= ~SMB_FA_HIDDEN;
XVA_SET_RTN(xvap, XAT_HIDDEN);
anyset = B_TRUE;
}
if (anyset == B_FALSE)
return (0);
if (attr == 0)
attr = SMB_EFA_NORMAL;
return (attr);
}
static int
smbfs_access_rwx(vfs_t *vfsp, int vtype, int mode, cred_t *cr)
{
static const vnode_t tmpl_vdir = { .v_type = VDIR };
static const vnode_t tmpl_vreg = { .v_type = VREG };
vattr_t va;
vnode_t *tvp;
struct smbmntinfo *smi = VFTOSMI(vfsp);
int shift = 0;
bzero(&va, sizeof (va));
va.va_mask = AT_TYPE | AT_MODE | AT_UID | AT_GID;
va.va_type = vtype;
va.va_mode = (vtype == VDIR) ?
smi->smi_dmode : smi->smi_fmode;
va.va_uid = smi->smi_uid;
va.va_gid = smi->smi_gid;
if ((mode & VWRITE) &&
(vfsp->vfs_flag & VFS_RDONLY) &&
!(vtype == VCHR || vtype == VBLK || vtype == VFIFO))
return (EROFS);
if ((mode & (VWRITE | VREAD | VEXEC)) &&
va.va_type == VREG && MANDMODE(va.va_mode))
return (EACCES);
if (crgetuid(cr) != va.va_uid) {
shift += 3;
if (!groupmember(va.va_gid, cr))
shift += 3;
}
tvp = (va.va_type == VDIR) ?
(vnode_t *)&tmpl_vdir :
(vnode_t *)&tmpl_vreg;
return (secpolicy_vnode_access2(cr, tvp, va.va_uid,
va.va_mode << shift, mode));
}
static int
smbfs_accessx(void *arg, int mode, cred_t *cr)
{
vnode_t *vp = arg;
return (smbfs_access_rwx(vp->v_vfsp, vp->v_type, mode, cr));
}
static int
smbfs_access(vnode_t *vp, int mode, int flags, cred_t *cr, caller_context_t *ct)
{
vfs_t *vfsp;
smbmntinfo_t *smi;
vfsp = vp->v_vfsp;
smi = VFTOSMI(vfsp);
if (curproc->p_zone != smi->smi_zone_ref.zref_zone)
return (EIO);
if (smi->smi_flags & SMI_DEAD || vfsp->vfs_flag & VFS_UNMOUNTED)
return (EIO);
return (smbfs_access_rwx(vfsp, vp->v_type, mode, cr));
}
static int
smbfs_readlink(vnode_t *vp, struct uio *uiop, cred_t *cr, caller_context_t *ct)
{
return (ENOSYS);
}
static int
smbfs_fsync(vnode_t *vp, int syncflag, cred_t *cr, caller_context_t *ct)
{
int error = 0;
smbmntinfo_t *smi;
smbnode_t *np;
struct smb_cred scred;
np = VTOSMB(vp);
smi = VTOSMI(vp);
if (curproc->p_zone != smi->smi_zone_ref.zref_zone)
return (EIO);
if (smi->smi_flags & SMI_DEAD || vp->v_vfsp->vfs_flag & VFS_UNMOUNTED)
return (EIO);
if ((syncflag & FNODSYNC) || IS_SWAPVP(vp))
return (0);
if ((syncflag & (FSYNC|FDSYNC)) == 0)
return (0);
error = smbfs_putpage(vp, (offset_t)0, 0, 0, cr, ct);
if (error)
return (error);
if (smbfs_rw_enter_sig(&np->r_lkserlock, RW_READER, SMBINTR(vp)))
return (EINTR);
smb_credinit(&scred, cr);
error = smbfsflush(np, &scred);
smb_credrele(&scred);
smbfs_rw_exit(&np->r_lkserlock);
return (error);
}
static int
smbfsflush(smbnode_t *np, struct smb_cred *scrp)
{
struct smb_share *ssp = np->n_mount->smi_share;
smb_fh_t *fhp;
int error;
ASSERT(smbfs_rw_lock_held(&np->r_lkserlock, RW_READER));
if (!(np->n_flag & NFLUSHWIRE))
return (0);
if (np->n_fidrefs == 0)
return (0);
if ((fhp = np->n_fid) == NULL)
return (0);
if (fhp->fh_vcgenid != ssp->ss_vcgenid)
return (ESTALE);
error = smbfs_smb_flush(ssp, fhp, scrp);
if (!error) {
mutex_enter(&np->r_statelock);
np->n_flag &= ~NFLUSHWIRE;
mutex_exit(&np->r_statelock);
}
return (error);
}
static void
smbfs_inactive(vnode_t *vp, cred_t *cr, caller_context_t *ct)
{
smbnode_t *np = VTOSMB(vp);
int error;
mutex_enter(&np->r_statelock);
while (np->r_count > 0)
cv_wait(&np->r_cv, &np->r_statelock);
mutex_exit(&np->r_statelock);
if (vn_has_cached_data(vp)) {
if ((np->r_flags & RDIRTY) && !np->r_error) {
error = smbfs_putpage(vp, (u_offset_t)0, 0, 0, cr, ct);
if (error && (error == ENOSPC || error == EDQUOT)) {
mutex_enter(&np->r_statelock);
if (!np->r_error)
np->r_error = error;
mutex_exit(&np->r_statelock);
}
}
smbfs_invalidate_pages(vp, (u_offset_t)0, cr);
}
smbfs_addfree(np);
}
static int
smbfs_lookup(vnode_t *dvp, char *nm, vnode_t **vpp, struct pathname *pnp,
int flags, vnode_t *rdir, cred_t *cr, caller_context_t *ct,
int *direntflags, pathname_t *realpnp)
{
vfs_t *vfs;
smbmntinfo_t *smi;
smbnode_t *dnp;
int error;
vfs = dvp->v_vfsp;
smi = VFTOSMI(vfs);
if (curproc->p_zone != smi->smi_zone_ref.zref_zone)
return (EPERM);
if (smi->smi_flags & SMI_DEAD || vfs->vfs_flag & VFS_UNMOUNTED)
return (EIO);
dnp = VTOSMB(dvp);
if (flags & LOOKUP_XATTR) {
if ((vfs->vfs_flag & VFS_XATTR) == 0)
return (EINVAL);
error = smbfs_get_xattrdir(dvp, vpp, cr, flags);
return (error);
}
if (smbfs_rw_enter_sig(&dnp->r_rwlock, RW_READER, SMBINTR(dvp)))
return (EINTR);
error = smbfslookup(dvp, nm, vpp, cr, 1, ct);
smbfs_rw_exit(&dnp->r_rwlock);
if (error == EINVAL)
error = ENOENT;
return (error);
}
static int
smbfslookup(vnode_t *dvp, char *nm, vnode_t **vpp, cred_t *cr,
int cache_ok, caller_context_t *ct)
{
int error;
int supplen;
vnode_t *vp;
smbnode_t *np;
smbnode_t *dnp;
smbmntinfo_t *smi;
const char *ill;
const char *name = (const char *)nm;
int nmlen = strlen(nm);
int rplen;
struct smb_cred scred;
struct smbfattr fa;
smi = VTOSMI(dvp);
dnp = VTOSMB(dvp);
ASSERT(curproc->p_zone == smi->smi_zone_ref.zref_zone);
supplen = 255;
ASSERT(dnp->r_rwlock.count != 0);
if (nmlen == 0) {
VN_HOLD(dvp);
*vpp = dvp;
return (0);
}
if (dvp->v_type != VDIR)
return (ENOTDIR);
error = smbfs_access(dvp, VEXEC, 0, cr, ct);
if (error)
return (error);
if (nmlen == 1 && name[0] == '.') {
VN_HOLD(dvp);
*vpp = dvp;
return (0);
}
if (nmlen > supplen)
return (ENAMETOOLONG);
ill = illegal_chars;
if (dnp->n_flag & N_XATTR)
ill++;
if (strpbrk(nm, ill))
return (EINVAL);
if (nmlen == 2 && name[0] == '.' && name[1] == '.') {
if (dvp->v_flag & VROOT) {
VN_HOLD(dvp);
*vpp = dvp;
return (0);
}
if (dvp->v_flag & V_XATTRDIR) {
error = smbfs_xa_parent(dvp, vpp);
return (error);
}
rplen = dnp->n_rplen;
ASSERT(rplen > 0);
while (--rplen >= 0) {
if (dnp->n_rpath[rplen] == '\\')
break;
}
if (rplen <= 0) {
vp = SMBTOV(smi->smi_root);
VN_HOLD(vp);
*vpp = vp;
return (0);
}
np = smbfs_node_findcreate(smi,
dnp->n_rpath, rplen, NULL, 0, 0,
&smbfs_fattr0);
ASSERT(np != NULL);
vp = SMBTOV(np);
vp->v_type = VDIR;
*vpp = vp;
return (0);
}
if (cache_ok) {
error = smbfslookup_cache(dvp, nm, nmlen, &vp, cr);
if (error)
return (error);
if (vp != NULL) {
*vpp = vp;
return (0);
}
}
smb_credinit(&scred, cr);
error = smbfs_smb_lookup(dnp, &name, &nmlen, &fa, &scred);
smb_credrele(&scred);
if (error == ENOTDIR) {
smbfs_attrcache_remove(dnp);
smbfs_attrcache_prune(dnp);
}
if (error)
goto out;
error = smbfs_nget(dvp, name, nmlen, &fa, &vp);
if (error)
goto out;
*vpp = vp;
out:
if (name != nm)
smbfs_name_free(name, nmlen);
return (error);
}
#ifdef DEBUG
int smbfs_lookup_cache_calls = 0;
int smbfs_lookup_cache_error = 0;
int smbfs_lookup_cache_miss = 0;
int smbfs_lookup_cache_stale = 0;
int smbfs_lookup_cache_hits = 0;
#endif
static int
smbfslookup_cache(vnode_t *dvp, char *nm, int nmlen,
vnode_t **vpp, cred_t *cr)
{
struct vattr va;
smbnode_t *dnp;
smbnode_t *np;
vnode_t *vp;
int error;
char sep;
dnp = VTOSMB(dvp);
*vpp = NULL;
#ifdef DEBUG
smbfs_lookup_cache_calls++;
#endif
va.va_mask = AT_TYPE | AT_MODE;
error = smbfsgetattr(dvp, &va, cr);
if (error) {
#ifdef DEBUG
smbfs_lookup_cache_error++;
#endif
return (error);
}
sep = SMBFS_DNP_SEP(dnp);
np = smbfs_node_findcreate(dnp->n_mount,
dnp->n_rpath, dnp->n_rplen,
nm, nmlen, sep, NULL);
if (np == NULL) {
#ifdef DEBUG
smbfs_lookup_cache_miss++;
#endif
return (0);
}
vp = SMBTOV(np);
if (np->r_attrtime <= gethrtime()) {
#ifdef DEBUG
smbfs_lookup_cache_stale++;
#endif
VN_RELE(vp);
return (0);
}
#ifdef DEBUG
smbfs_lookup_cache_hits++;
#endif
*vpp = vp;
return (0);
}
static int
smbfs_create(vnode_t *dvp, char *nm, struct vattr *va, enum vcexcl exclusive,
int mode, vnode_t **vpp, cred_t *cr, int lfaware, caller_context_t *ct,
vsecattr_t *vsecp)
{
int error;
vfs_t *vfsp;
vnode_t *vp;
smbnode_t *np;
smbnode_t *dnp;
smbmntinfo_t *smi;
struct vattr vattr;
struct smbfattr fattr;
struct smb_cred scred;
const char *name = (const char *)nm;
int nmlen = strlen(nm);
uint32_t disp;
smb_fh_t *fid = NULL;
int xattr;
vfsp = dvp->v_vfsp;
smi = VFTOSMI(vfsp);
dnp = VTOSMB(dvp);
vp = NULL;
if (curproc->p_zone != smi->smi_zone_ref.zref_zone)
return (EPERM);
if (smi->smi_flags & SMI_DEAD || vfsp->vfs_flag & VFS_UNMOUNTED)
return (EIO);
if (va->va_type != VREG)
return (EINVAL);
if (nmlen == 0) {
VN_HOLD(dvp);
*vpp = dvp;
return (0);
}
if ((nmlen == 1 && name[0] == '.') ||
(nmlen == 2 && name[0] == '.' && name[1] == '.'))
return (EISDIR);
vattr = *va;
if (smbfs_rw_enter_sig(&dnp->r_rwlock, RW_WRITER, SMBINTR(dvp)))
return (EINTR);
smb_credinit(&scred, cr);
error = smbfslookup(dvp, nm, &vp, cr, 0, ct);
if (error == 0) {
if (exclusive == EXCL) {
error = EEXIST;
VN_RELE(vp);
goto out;
}
error = smbfs_access(vp, mode, 0, cr, ct);
if (error) {
VN_RELE(vp);
goto out;
}
if ((vattr.va_mask & AT_SIZE) && vp->v_type == VREG) {
np = VTOSMB(vp);
if (!(lfaware & FOFFMAX)) {
mutex_enter(&np->r_statelock);
if (np->r_size > MAXOFF32_T)
error = EOVERFLOW;
mutex_exit(&np->r_statelock);
}
if (error) {
VN_RELE(vp);
goto out;
}
vattr.va_mask = AT_SIZE;
error = smbfssetattr(vp, &vattr, 0, cr);
if (error) {
VN_RELE(vp);
goto out;
}
#ifdef SMBFS_VNEVENT
vnevent_create(vp, ct);
#endif
}
*vpp = vp;
goto out;
}
error = smbfs_access(dvp, VWRITE, 0, cr, ct);
if (error)
goto out;
error = smbfs_access_rwx(vfsp, VREG, mode, cr);
if (error)
goto out;
if (exclusive == EXCL)
disp = NTCREATEX_DISP_CREATE;
else {
if ((va->va_type == VREG) &&
(va->va_mask & AT_SIZE) &&
(va->va_size == 0))
disp = NTCREATEX_DISP_OVERWRITE_IF;
else
disp = NTCREATEX_DISP_OPEN_IF;
}
xattr = (dnp->n_flag & N_XATTR) ? 1 : 0;
error = smbfs_smb_create(dnp,
name, nmlen, xattr,
disp, &scred, &fid);
if (error)
goto out;
smbfs_smb_close(fid);
error = smbfs_smb_lookup(dnp, &name, &nmlen, &fattr, &scred);
if (error)
goto out;
smbfs_attr_touchdir(dnp);
error = smbfs_nget(dvp, name, nmlen, &fattr, &vp);
if (error)
goto out;
*vpp = vp;
error = 0;
out:
smb_credrele(&scred);
smbfs_rw_exit(&dnp->r_rwlock);
if (name != nm)
smbfs_name_free(name, nmlen);
return (error);
}
static int
smbfs_remove(vnode_t *dvp, char *nm, cred_t *cr, caller_context_t *ct,
int flags)
{
struct smb_cred scred;
vnode_t *vp = NULL;
smbnode_t *dnp = VTOSMB(dvp);
smbmntinfo_t *smi = VTOSMI(dvp);
int error;
if (curproc->p_zone != smi->smi_zone_ref.zref_zone)
return (EPERM);
if (smi->smi_flags & SMI_DEAD || dvp->v_vfsp->vfs_flag & VFS_UNMOUNTED)
return (EIO);
error = smbfs_access(dvp, VWRITE|VEXEC, 0, cr, ct);
if (error)
return (error);
if (smbfs_rw_enter_sig(&dnp->r_rwlock, RW_WRITER, SMBINTR(dvp)))
return (EINTR);
smb_credinit(&scred, cr);
error = smbfslookup(dvp, nm, &vp, cr, 0, ct);
if (error != 0)
goto out;
if (vp->v_type == VDIR) {
error = EPERM;
goto out;
}
error = smbfsremove(dvp, vp, &scred, flags);
if (error != 0)
goto out;
#ifdef SMBFS_VNEVENT
vnevent_remove(vp, dvp, nm, ct);
#endif
out:
if (vp != NULL)
VN_RELE(vp);
smb_credrele(&scred);
smbfs_rw_exit(&dnp->r_rwlock);
return (error);
}
static int
smbfsremove(vnode_t *dvp, vnode_t *vp, struct smb_cred *scred,
int flags)
{
smbnode_t *dnp = VTOSMB(dvp);
smbnode_t *np = VTOSMB(vp);
smbmntinfo_t *smi = np->n_mount;
char *tmpname = NULL;
int tnlen;
int error;
smb_fh_t *fid = NULL;
boolean_t renamed = B_FALSE;
ASSERT(dnp->r_rwlock.owner == curthread);
if (vn_has_cached_data(vp) &&
((np->r_flags & RDIRTY) || np->r_count > 0)) {
error = smbfs_putpage(vp, (offset_t)0, 0, 0,
scred->scr_cred, NULL);
if (error && (error == ENOSPC || error == EDQUOT)) {
mutex_enter(&np->r_statelock);
if (!np->r_error)
np->r_error = error;
mutex_exit(&np->r_statelock);
}
}
error = smbfs_smb_tmpopen(np, STD_RIGHT_DELETE_ACCESS,
scred, &fid);
if (error) {
SMBVDEBUG("error %d opening %s\n",
error, np->n_rpath);
goto out;
}
ASSERT(fid != NULL);
if (vp->v_type != VDIR && vp->v_count > 1 && np->n_fidrefs > 0) {
tmpname = kmem_alloc(MAXNAMELEN, KM_SLEEP);
tnlen = smbfs_newname(tmpname, MAXNAMELEN);
error = smbfs_smb_rename(dnp, np, dnp, tmpname, tnlen,
fid, scred);
if (error != 0) {
SMBVDEBUG("error %d renaming %s -> %s\n",
error, np->n_rpath, tmpname);
} else {
renamed = B_TRUE;
}
}
error = smbfs_smb_setdisp(smi->smi_share, fid, 1, scred);
if (error != 0) {
SMBVDEBUG("error %d setting DoC on %s\n",
error, np->n_rpath);
if (renamed) {
char *oldname;
int oldnlen;
int err2;
oldname = np->n_rpath + (dnp->n_rplen + 1);
oldnlen = np->n_rplen - (dnp->n_rplen + 1);
err2 = smbfs_smb_rename(dnp, np, dnp, oldname, oldnlen,
fid, scred);
SMBVDEBUG("error %d un-renaming %s -> %s\n",
err2, tmpname, np->n_rpath);
}
error = EBUSY;
goto out;
}
smbfs_attrcache_remove(np);
smbfs_attrcache_prune(np);
out:
if (tmpname != NULL)
kmem_free(tmpname, MAXNAMELEN);
if (fid != NULL)
smbfs_smb_tmpclose(np, fid);
if (error == 0) {
smbfs_rmhash(np);
}
return (error);
}
static int
smbfs_link(vnode_t *tdvp, vnode_t *svp, char *tnm, cred_t *cr,
caller_context_t *ct, int flags)
{
return (ENOSYS);
}
static int
smbfs_rename(vnode_t *odvp, char *onm, vnode_t *ndvp, char *nnm, cred_t *cr,
caller_context_t *ct, int flags)
{
struct smb_cred scred;
smbnode_t *odnp = VTOSMB(odvp);
smbnode_t *ndnp = VTOSMB(ndvp);
vnode_t *ovp;
int error;
if (curproc->p_zone != VTOSMI(odvp)->smi_zone_ref.zref_zone ||
curproc->p_zone != VTOSMI(ndvp)->smi_zone_ref.zref_zone)
return (EPERM);
if (VTOSMI(odvp)->smi_flags & SMI_DEAD ||
VTOSMI(ndvp)->smi_flags & SMI_DEAD ||
odvp->v_vfsp->vfs_flag & VFS_UNMOUNTED ||
ndvp->v_vfsp->vfs_flag & VFS_UNMOUNTED)
return (EIO);
if (strcmp(onm, ".") == 0 || strcmp(onm, "..") == 0 ||
strcmp(nnm, ".") == 0 || strcmp(nnm, "..") == 0)
return (EINVAL);
if (odvp->v_vfsp != ndvp->v_vfsp)
return (EXDEV);
error = smbfs_access(odvp, VWRITE|VEXEC, 0, cr, ct);
if (error)
return (error);
if (odvp != ndvp) {
error = smbfs_access(ndvp, VWRITE, 0, cr, ct);
if (error)
return (error);
}
if (odnp < ndnp) {
if (smbfs_rw_enter_sig(&odnp->r_rwlock, RW_WRITER,
SMBINTR(odvp)))
return (EINTR);
if (smbfs_rw_enter_sig(&ndnp->r_rwlock, RW_WRITER,
SMBINTR(ndvp))) {
smbfs_rw_exit(&odnp->r_rwlock);
return (EINTR);
}
} else {
if (smbfs_rw_enter_sig(&ndnp->r_rwlock, RW_WRITER,
SMBINTR(ndvp)))
return (EINTR);
if (smbfs_rw_enter_sig(&odnp->r_rwlock, RW_WRITER,
SMBINTR(odvp))) {
smbfs_rw_exit(&ndnp->r_rwlock);
return (EINTR);
}
}
smb_credinit(&scred, cr);
error = smbfslookup(odvp, onm, &ovp, cr, 0, ct);
if (error == 0) {
error = smbfsrename(odvp, ovp, ndvp, nnm, &scred, flags);
VN_RELE(ovp);
}
smb_credrele(&scred);
smbfs_rw_exit(&odnp->r_rwlock);
smbfs_rw_exit(&ndnp->r_rwlock);
return (error);
}
static int
smbfsrename(vnode_t *odvp, vnode_t *ovp, vnode_t *ndvp, char *nnm,
struct smb_cred *scred, int flags)
{
smbnode_t *odnp = VTOSMB(odvp);
smbnode_t *onp = VTOSMB(ovp);
smbnode_t *ndnp = VTOSMB(ndvp);
vnode_t *nvp = NULL;
int error;
int nvp_locked = 0;
smb_fh_t *fid = NULL;
ASSERT(curproc->p_zone == VTOSMI(odvp)->smi_zone_ref.zref_zone);
ASSERT(odvp->v_vfsp == ndvp->v_vfsp);
ASSERT(odnp->r_rwlock.owner == curthread);
ASSERT(ndnp->r_rwlock.owner == curthread);
error = smbfslookup(ndvp, nnm, &nvp, scred->scr_cred, 0, NULL);
if (!error) {
if (ovp->v_type == VDIR) {
if (nvp->v_type != VDIR) {
error = ENOTDIR;
goto out;
}
} else {
if (nvp->v_type == VDIR) {
error = EISDIR;
goto out;
}
}
if (ovp == nvp) {
error = 0;
goto out;
}
if (vn_vfsrlock(nvp)) {
error = EBUSY;
goto out;
}
nvp_locked = 1;
if (vn_mountedvfs(nvp) != NULL) {
error = EBUSY;
goto out;
}
if (nvp->v_type == VDIR) {
error = EEXIST;
goto out;
}
error = smbfsremove(ndvp, nvp, scred, flags);
if (error != 0)
goto out;
vn_vfsunlock(nvp);
nvp_locked = 0;
VN_RELE(nvp);
nvp = NULL;
}
error = smbfs_smb_tmpopen(onp, STD_RIGHT_DELETE_ACCESS,
scred, &fid);
if (error) {
SMBVDEBUG("error %d opening %s\n",
error, onp->n_rpath);
goto out;
}
smbfs_attrcache_remove(onp);
error = smbfs_smb_rename(odnp, onp, ndnp, nnm, strlen(nnm),
fid, scred);
smbfs_smb_tmpclose(onp, fid);
if (error == 0) {
smbfs_attrcache_prune(onp);
}
out:
if (nvp) {
if (nvp_locked)
vn_vfsunlock(nvp);
VN_RELE(nvp);
}
return (error);
}
static int
smbfs_mkdir(vnode_t *dvp, char *nm, struct vattr *va, vnode_t **vpp,
cred_t *cr, caller_context_t *ct, int flags, vsecattr_t *vsecp)
{
vnode_t *vp;
struct smbnode *dnp = VTOSMB(dvp);
struct smbmntinfo *smi = VTOSMI(dvp);
struct smb_cred scred;
struct smbfattr fattr;
const char *name = (const char *) nm;
int nmlen = strlen(name);
int error;
if (curproc->p_zone != smi->smi_zone_ref.zref_zone)
return (EPERM);
if (smi->smi_flags & SMI_DEAD || dvp->v_vfsp->vfs_flag & VFS_UNMOUNTED)
return (EIO);
if ((nmlen == 1 && name[0] == '.') ||
(nmlen == 2 && name[0] == '.' && name[1] == '.'))
return (EEXIST);
if (dvp->v_flag & V_XATTRDIR)
return (EINVAL);
if (smbfs_rw_enter_sig(&dnp->r_rwlock, RW_WRITER, SMBINTR(dvp)))
return (EINTR);
smb_credinit(&scred, cr);
error = smbfs_access(dvp, VWRITE, 0, cr, ct);
if (error)
goto out;
error = smbfs_smb_mkdir(dnp, name, nmlen, &scred);
if (error)
goto out;
error = smbfs_smb_lookup(dnp, &name, &nmlen, &fattr, &scred);
if (error)
goto out;
smbfs_attr_touchdir(dnp);
error = smbfs_nget(dvp, name, nmlen, &fattr, &vp);
if (error)
goto out;
*vpp = vp;
error = 0;
out:
smb_credrele(&scred);
smbfs_rw_exit(&dnp->r_rwlock);
if (name != nm)
smbfs_name_free(name, nmlen);
return (error);
}
static int
smbfs_rmdir(vnode_t *dvp, char *nm, vnode_t *cdir, cred_t *cr,
caller_context_t *ct, int flags)
{
struct smb_cred scred;
vnode_t *vp = NULL;
int vp_locked = 0;
struct smbmntinfo *smi = VTOSMI(dvp);
struct smbnode *dnp = VTOSMB(dvp);
struct smbnode *np;
int error;
if (curproc->p_zone != smi->smi_zone_ref.zref_zone)
return (EPERM);
if (smi->smi_flags & SMI_DEAD || dvp->v_vfsp->vfs_flag & VFS_UNMOUNTED)
return (EIO);
error = smbfs_access(dvp, VWRITE|VEXEC, 0, cr, ct);
if (error)
return (error);
if (smbfs_rw_enter_sig(&dnp->r_rwlock, RW_WRITER, SMBINTR(dvp)))
return (EINTR);
smb_credinit(&scred, cr);
error = smbfslookup(dvp, nm, &vp, cr, 0, ct);
if (error)
goto out;
np = VTOSMB(vp);
if ((vp == dvp) || (vp == cdir) || (vp->v_flag & VROOT)) {
error = EINVAL;
goto out;
}
if (vp->v_type != VDIR) {
error = ENOTDIR;
goto out;
}
if (vn_vfsrlock(vp)) {
error = EBUSY;
goto out;
}
vp_locked = 1;
if (vn_mountedvfs(vp) != NULL) {
error = EBUSY;
goto out;
}
error = smbfsremove(dvp, vp, &scred, flags);
if (error)
goto out;
#ifdef SMBFS_VNEVENT
vnevent_rmdir(vp, dvp, nm, ct);
#endif
mutex_enter(&np->r_statelock);
dnp->n_flag |= NMODIFIED;
mutex_exit(&np->r_statelock);
smbfs_attr_touchdir(dnp);
smbfs_rmhash(np);
out:
if (vp) {
if (vp_locked)
vn_vfsunlock(vp);
VN_RELE(vp);
}
smb_credrele(&scred);
smbfs_rw_exit(&dnp->r_rwlock);
return (error);
}
static int
smbfs_symlink(vnode_t *dvp, char *lnm, struct vattr *tva, char *tnm, cred_t *cr,
caller_context_t *ct, int flags)
{
return (ENOSYS);
}
static int
smbfs_readdir(vnode_t *vp, struct uio *uiop, cred_t *cr, int *eofp,
caller_context_t *ct, int flags)
{
struct smbnode *np = VTOSMB(vp);
int error = 0;
smbmntinfo_t *smi;
smi = VTOSMI(vp);
if (curproc->p_zone != smi->smi_zone_ref.zref_zone)
return (EIO);
if (smi->smi_flags & SMI_DEAD || vp->v_vfsp->vfs_flag & VFS_UNMOUNTED)
return (EIO);
error = smbfs_access(vp, VREAD, 0, cr, ct);
if (error)
return (error);
ASSERT(smbfs_rw_lock_held(&np->r_rwlock, RW_READER));
if (smbfs_rw_enter_sig(&np->r_lkserlock, RW_WRITER, SMBINTR(vp)))
return (EINTR);
error = smbfs_readvdir(vp, uiop, cr, eofp, ct);
smbfs_rw_exit(&np->r_lkserlock);
return (error);
}
static int
smbfs_readvdir(vnode_t *vp, uio_t *uio, cred_t *cr, int *eofp,
caller_context_t *ct)
{
static const int limit = 1000;
static const size_t dbufsiz = DIRENT64_RECLEN(SMB_MAXFNAMELEN);
struct smb_cred scred;
vnode_t *newvp;
struct smbnode *np = VTOSMB(vp);
struct smbfs_fctx *ctx;
struct dirent64 *dp;
ssize_t save_resid;
offset_t save_offset;
int offset;
int nmlen, error;
ushort_t reclen;
ASSERT(curproc->p_zone == VTOSMI(vp)->smi_zone_ref.zref_zone);
ASSERT(smbfs_rw_lock_held(&np->r_lkserlock, RW_WRITER));
if (np->n_dirseq == NULL)
return (EBADF);
if (uio->uio_loffset < 0 || uio->uio_loffset > INT32_MAX ||
(uio->uio_loffset + uio->uio_resid) > INT32_MAX)
return (EINVAL);
if (uio->uio_resid < dbufsiz)
return (EINVAL);
SMBVDEBUG("dirname='%s'\n", np->n_rpath);
smb_credinit(&scred, cr);
dp = kmem_alloc(dbufsiz, KM_SLEEP);
save_resid = uio->uio_resid;
save_offset = uio->uio_loffset;
offset = uio->uio_offset;
SMBVDEBUG("in: offset=%d, resid=%d\n",
(int)uio->uio_offset, (int)uio->uio_resid);
error = 0;
while (offset < FIRST_DIROFS) {
reclen = DIRENT64_RECLEN(offset + 1);
if (uio->uio_resid < reclen)
goto out;
bzero(dp, reclen);
dp->d_reclen = reclen;
dp->d_name[0] = '.';
dp->d_name[1] = '.';
dp->d_name[offset + 1] = '\0';
error = smbfslookup(vp, dp->d_name, &newvp, cr, 1, ct);
if (error) {
dp->d_ino = np->n_ino + offset;
} else {
dp->d_ino = VTOSMB(newvp)->n_ino;
VN_RELE(newvp);
}
dp->d_off = offset + 1;
error = uiomove(dp, reclen, UIO_READ, uio);
if (error)
goto out;
uio->uio_offset = ++offset;
}
if (offset < np->n_dirofs) {
SMBVDEBUG("Reopening search %d:%d\n",
offset, np->n_dirofs);
error = smbfs_smb_findopen(np, "*", 1,
SMB_FA_SYSTEM | SMB_FA_HIDDEN | SMB_FA_DIR,
&scred, &ctx);
if (error) {
SMBVDEBUG("can not open search, error = %d", error);
goto out;
}
(void) smbfs_smb_findclose(np->n_dirseq, &scred);
np->n_dirseq = ctx;
np->n_dirofs = FIRST_DIROFS;
} else {
ctx = np->n_dirseq;
}
while (np->n_dirofs < offset) {
error = smbfs_smb_findnext(ctx, limit, &scred);
if (error != 0)
goto out;
np->n_dirofs++;
}
while (uio->uio_resid >= dbufsiz) {
error = smbfs_smb_findnext(ctx, limit, &scred);
if (error != 0)
goto out;
np->n_dirofs++;
nmlen = ctx->f_nmlen;
if (nmlen > SMB_MAXFNAMELEN) {
nmlen = SMB_MAXFNAMELEN;
SMBVDEBUG("Truncating name: %s\n", ctx->f_name);
}
if (smbfs_fastlookup) {
if (smbfs_nget(vp, ctx->f_name, nmlen,
&ctx->f_attr, &newvp) == 0)
VN_RELE(newvp);
}
reclen = DIRENT64_RECLEN(nmlen);
bzero(dp, reclen);
dp->d_reclen = reclen;
bcopy(ctx->f_name, dp->d_name, nmlen);
dp->d_name[nmlen] = '\0';
dp->d_ino = ctx->f_inum;
dp->d_off = offset + 1;
error = uiomove(dp, reclen, UIO_READ, uio);
if (error)
goto out;
uio->uio_offset = ++offset;
}
out:
if (error == ENOENT) {
error = 0;
if (eofp)
*eofp = 1;
}
if (error != 0 && offset == FIRST_DIROFS) {
uio->uio_loffset = save_offset;
uio->uio_resid = save_resid;
}
SMBVDEBUG("out: offset=%d, resid=%d\n",
(int)uio->uio_offset, (int)uio->uio_resid);
kmem_free(dp, dbufsiz);
smb_credrele(&scred);
return (error);
}
static int
smbfs_fid(vnode_t *vp, fid_t *fidp, caller_context_t *ct)
{
return (ENOSYS);
}
static int
smbfs_rwlock(vnode_t *vp, int write_lock, caller_context_t *ctp)
{
smbnode_t *np = VTOSMB(vp);
if (!write_lock) {
(void) smbfs_rw_enter_sig(&np->r_rwlock, RW_READER, FALSE);
return (V_WRITELOCK_FALSE);
}
(void) smbfs_rw_enter_sig(&np->r_rwlock, RW_WRITER, FALSE);
return (V_WRITELOCK_TRUE);
}
static void
smbfs_rwunlock(vnode_t *vp, int write_lock, caller_context_t *ctp)
{
smbnode_t *np = VTOSMB(vp);
smbfs_rw_exit(&np->r_rwlock);
}
static int
smbfs_seek(vnode_t *vp, offset_t ooff, offset_t *noffp, caller_context_t *ct)
{
smbmntinfo_t *smi;
smi = VTOSMI(vp);
if (curproc->p_zone != smi->smi_zone_ref.zref_zone)
return (EPERM);
if (smi->smi_flags & SMI_DEAD || vp->v_vfsp->vfs_flag & VFS_UNMOUNTED)
return (EIO);
if (vp->v_type == VDIR)
return (0);
if (*noffp < 0)
return (EINVAL);
return (0);
}
#ifdef _KERNEL
#ifdef DEBUG
static int smbfs_lostpage = 0;
#endif
static int
smbfs_getpage(vnode_t *vp, offset_t off, size_t len, uint_t *protp,
page_t *pl[], size_t plsz, struct seg *seg, caddr_t addr,
enum seg_rw rw, cred_t *cr, caller_context_t *ct)
{
smbnode_t *np;
smbmntinfo_t *smi;
int error;
np = VTOSMB(vp);
smi = VTOSMI(vp);
if (curproc->p_zone != smi->smi_zone_ref.zref_zone)
return (EIO);
if (smi->smi_flags & SMI_DEAD || vp->v_vfsp->vfs_flag & VFS_UNMOUNTED)
return (EIO);
if (vp->v_flag & VNOMAP)
return (ENOSYS);
if (protp != NULL)
*protp = PROT_ALL;
error = smbfs_validate_caches(vp, cr);
if (error)
return (error);
retry:
mutex_enter(&np->r_statelock);
if (rw == S_CREATE) {
while (np->r_gcount > 0)
cv_wait(&np->r_cv, &np->r_statelock);
}
if (off + len > np->r_size + PAGEOFFSET && seg != segkmap) {
mutex_exit(&np->r_statelock);
return (EFAULT);
}
mutex_exit(&np->r_statelock);
error = pvn_getpages(smbfs_getapage, vp, off, len, protp,
pl, plsz, seg, addr, rw, cr);
switch (error) {
case SMBFS_EOF:
smbfs_purge_caches(vp, cr);
goto retry;
case ESTALE:
mutex_enter(&np->r_statelock);
np->r_flags |= RSTALE;
if (!np->r_error)
np->r_error = (error);
mutex_exit(&np->r_statelock);
if (vn_has_cached_data(vp))
smbfs_invalidate_pages(vp, (u_offset_t)0, cr);
smbfs_purge_caches(vp, cr);
break;
default:
break;
}
return (error);
}
static int
smbfs_getapage(vnode_t *vp, u_offset_t off, size_t len, uint_t *protp,
page_t *pl[], size_t plsz, struct seg *seg, caddr_t addr,
enum seg_rw rw, cred_t *cr)
{
smbnode_t *np;
smbmntinfo_t *smi;
uint_t bsize;
struct buf *bp;
page_t *pp;
u_offset_t lbn;
u_offset_t io_off;
u_offset_t blkoff;
size_t io_len;
uint_t blksize;
int error;
int readahead_issued = 0;
page_t *pagefound;
np = VTOSMB(vp);
smi = VTOSMI(vp);
if (curproc->p_zone != smi->smi_zone_ref.zref_zone)
return (EIO);
if (smi->smi_flags & SMI_DEAD || vp->v_vfsp->vfs_flag & VFS_UNMOUNTED)
return (EIO);
bsize = MAX(vp->v_vfsp->vfs_bsize, PAGESIZE);
reread:
bp = NULL;
pp = NULL;
pagefound = NULL;
if (pl != NULL)
pl[0] = NULL;
error = 0;
lbn = off / bsize;
blkoff = lbn * bsize;
again:
if ((pagefound = page_exists(vp, off)) == NULL) {
if (pl == NULL) {
(void) 0;
} else if (rw == S_CREATE) {
if ((pp = page_create_va(vp, off,
PAGESIZE, PG_WAIT, seg, addr)) == NULL)
cmn_err(CE_PANIC,
"smbfs_getapage: page_create");
io_len = PAGESIZE;
mutex_enter(&np->r_statelock);
np->r_nextr = off + PAGESIZE;
mutex_exit(&np->r_statelock);
} else {
mutex_enter(&np->r_statelock);
if (blkoff < np->r_size &&
blkoff + bsize >= np->r_size) {
if (np->r_size <= off) {
blksize = off + PAGESIZE - blkoff;
} else
blksize = np->r_size - blkoff;
} else if ((off == 0) ||
(off != np->r_nextr && !readahead_issued)) {
blksize = PAGESIZE;
blkoff = off;
} else
blksize = bsize;
mutex_exit(&np->r_statelock);
pp = pvn_read_kluster(vp, off, seg, addr, &io_off,
&io_len, blkoff, blksize, 0);
if (pp == NULL)
goto again;
io_len = ptob(btopr(io_len));
bp = pageio_setup(pp, io_len, vp, B_READ);
ASSERT(bp != NULL);
ASSERT(bp->b_un.b_addr == 0);
bp->b_edev = 0;
bp->b_dev = 0;
bp->b_lblkno = lbtodb(io_off);
bp->b_file = vp;
bp->b_offset = (offset_t)off;
bp_mapin(bp);
mutex_enter(&np->r_statelock);
if (io_off >= np->r_size && seg == segkmap) {
mutex_exit(&np->r_statelock);
bzero(bp->b_un.b_addr, io_len);
} else {
mutex_exit(&np->r_statelock);
error = smbfs_bio(bp, 0, cr);
}
bp_mapout(bp);
pageio_done(bp);
if (error == SMBFS_EOF) {
if (seg == segkmap)
error = 0;
else
error = EFAULT;
}
if (!readahead_issued && !error) {
mutex_enter(&np->r_statelock);
np->r_nextr = io_off + io_len;
mutex_exit(&np->r_statelock);
}
}
}
if (pl == NULL)
return (error);
if (error) {
if (pp != NULL)
pvn_read_done(pp, B_ERROR);
return (error);
}
if (pagefound) {
se_t se = (rw == S_CREATE ? SE_EXCL : SE_SHARED);
if ((pp = page_lookup(vp, off, se)) == NULL) {
#ifdef DEBUG
smbfs_lostpage++;
#endif
goto reread;
}
pl[0] = pp;
pl[1] = NULL;
return (0);
}
if (pp != NULL)
pvn_plist_init(pp, pl, plsz, off, io_len, rw);
return (error);
}
#endif
static int
smbfs_putpage(vnode_t *vp, offset_t off, size_t len, int flags, cred_t *cr,
caller_context_t *ct)
{
#ifdef _KERNEL
smbnode_t *np;
smbmntinfo_t *smi;
page_t *pp;
u_offset_t eoff;
u_offset_t io_off;
size_t io_len;
int error;
int rdirty;
int err;
np = VTOSMB(vp);
smi = VTOSMI(vp);
if (curproc->p_zone != smi->smi_zone_ref.zref_zone)
return (EIO);
if (smi->smi_flags & SMI_DEAD || vp->v_vfsp->vfs_flag & VFS_UNMOUNTED)
return (EIO);
if (vp->v_flag & VNOMAP)
return (ENOSYS);
if (!vn_has_cached_data(vp))
return (0);
if ((np->r_flags & ROUTOFSPACE) ||
(vp->v_vfsp->vfs_flag & VFS_UNMOUNTED))
flags = (flags & ~B_FREE) | B_INVAL | B_FORCE;
if (len == 0) {
if (off == (u_offset_t)0 &&
(np->r_flags & RDIRTY)) {
mutex_enter(&np->r_statelock);
rdirty = (np->r_flags & RDIRTY);
np->r_flags &= ~RDIRTY;
mutex_exit(&np->r_statelock);
} else
rdirty = 0;
error = pvn_vplist_dirty(vp, off, smbfs_putapage,
flags, cr);
if (error && rdirty &&
(flags & (B_INVAL | B_FORCE)) != (B_INVAL | B_FORCE)) {
mutex_enter(&np->r_statelock);
np->r_flags |= RDIRTY;
mutex_exit(&np->r_statelock);
}
} else {
error = 0;
io_len = 1;
eoff = off + len;
for (io_off = off; io_off < eoff; io_off += io_len) {
mutex_enter(&np->r_statelock);
if (io_off >= np->r_size) {
mutex_exit(&np->r_statelock);
break;
}
mutex_exit(&np->r_statelock);
if ((flags & B_INVAL) || !(flags & B_ASYNC)) {
pp = page_lookup(vp, io_off,
(flags & (B_INVAL | B_FREE)) ?
SE_EXCL : SE_SHARED);
} else {
pp = page_lookup_nowait(vp, io_off,
(flags & B_FREE) ? SE_EXCL : SE_SHARED);
}
if (pp == NULL || !pvn_getdirty(pp, flags))
io_len = PAGESIZE;
else {
err = smbfs_putapage(vp, pp, &io_off,
&io_len, flags, cr);
if (!error)
error = err;
}
}
}
return (error);
#else
return (ENOSYS);
#endif
}
#ifdef _KERNEL
static int
smbfs_putapage(vnode_t *vp, page_t *pp, u_offset_t *offp, size_t *lenp,
int flags, cred_t *cr)
{
smbnode_t *np;
u_offset_t io_off;
u_offset_t lbn_off;
u_offset_t lbn;
size_t io_len;
uint_t bsize;
int error;
np = VTOSMB(vp);
ASSERT(!vn_is_readonly(vp));
bsize = MAX(vp->v_vfsp->vfs_bsize, PAGESIZE);
lbn = pp->p_offset / bsize;
lbn_off = lbn * bsize;
pp = pvn_write_kluster(vp, pp, &io_off, &io_len, lbn_off,
roundup(bsize, PAGESIZE), flags);
ASSERT((pp->p_offset / bsize) >= lbn);
if (io_off + io_len > lbn_off + bsize) {
ASSERT((io_off + io_len) - (lbn_off + bsize) < PAGESIZE);
io_len = lbn_off + bsize - io_off;
}
if (np->r_flags & RMODINPROGRESS) {
mutex_enter(&np->r_statelock);
if ((np->r_flags & RMODINPROGRESS) &&
np->r_modaddr + MAXBSIZE > io_off &&
np->r_modaddr < io_off + io_len) {
page_t *plist;
plist = pp;
while (plist != NULL) {
pp = plist;
page_sub(&plist, pp);
hat_setmod(pp);
page_io_unlock(pp);
page_unlock(pp);
}
np->r_flags |= RDIRTY;
mutex_exit(&np->r_statelock);
if (offp)
*offp = io_off;
if (lenp)
*lenp = io_len;
return (0);
}
mutex_exit(&np->r_statelock);
}
flags |= B_WRITE;
error = smbfs_rdwrlbn(vp, pp, io_off, io_len, flags, cr);
if ((error == ENOSPC || error == EDQUOT || error == EFBIG ||
error == EACCES) &&
(flags & (B_INVAL|B_FORCE)) != (B_INVAL|B_FORCE)) {
if (!(np->r_flags & ROUTOFSPACE)) {
mutex_enter(&np->r_statelock);
np->r_flags |= ROUTOFSPACE;
mutex_exit(&np->r_statelock);
}
flags |= B_ERROR;
pvn_write_done(pp, flags);
if (!(flags & B_ASYNC)) {
error = smbfs_putpage(vp, io_off, io_len,
B_INVAL | B_FORCE, cr, NULL);
}
} else {
if (error)
flags |= B_ERROR;
else if (np->r_flags & ROUTOFSPACE) {
mutex_enter(&np->r_statelock);
np->r_flags &= ~ROUTOFSPACE;
mutex_exit(&np->r_statelock);
}
pvn_write_done(pp, flags);
}
if (offp)
*offp = io_off;
if (lenp)
*lenp = io_len;
return (error);
}
#endif
void
smbfs_invalidate_pages(vnode_t *vp, u_offset_t off, cred_t *cr)
{
smbnode_t *np;
np = VTOSMB(vp);
mutex_enter(&np->r_statelock);
while (np->r_flags & RTRUNCATE)
cv_wait(&np->r_cv, &np->r_statelock);
np->r_flags |= RTRUNCATE;
if (off == (u_offset_t)0) {
np->r_flags &= ~RDIRTY;
if (!(np->r_flags & RSTALE))
np->r_error = 0;
}
mutex_exit(&np->r_statelock);
#ifdef _KERNEL
(void) pvn_vplist_dirty(vp, off, smbfs_putapage,
B_INVAL | B_TRUNC, cr);
#endif
mutex_enter(&np->r_statelock);
np->r_flags &= ~RTRUNCATE;
cv_broadcast(&np->r_cv);
mutex_exit(&np->r_statelock);
}
#ifdef _KERNEL
static int
smbfs_map(vnode_t *vp, offset_t off, struct as *as, caddr_t *addrp,
size_t len, uchar_t prot, uchar_t maxprot, uint_t flags,
cred_t *cr, caller_context_t *ct)
{
segvn_crargs_t vn_a;
struct vattr va;
smbnode_t *np;
smbmntinfo_t *smi;
int error;
np = VTOSMB(vp);
smi = VTOSMI(vp);
if (curproc->p_zone != smi->smi_zone_ref.zref_zone)
return (EIO);
if (smi->smi_flags & SMI_DEAD || vp->v_vfsp->vfs_flag & VFS_UNMOUNTED)
return (EIO);
if (np->n_fid == NULL)
return (EIO);
if (vp->v_flag & VNOMAP)
return (ENOSYS);
if (off < 0 || off + (ssize_t)len < 0)
return (ENXIO);
if (vp->v_type != VREG)
return (ENODEV);
va.va_mask = AT_ALL;
if ((error = smbfsgetattr(vp, &va, cr)) != 0)
return (error);
if (smbfs_rw_enter_sig(&np->r_rwlock, RW_WRITER, SMBINTR(vp)))
return (EINTR);
atomic_inc_uint(&np->r_inmap);
smbfs_rw_exit(&np->r_rwlock);
if (smbfs_rw_enter_sig(&np->r_lkserlock, RW_WRITER, SMBINTR(vp))) {
atomic_dec_uint(&np->r_inmap);
return (EINTR);
}
if (vp->v_flag & VNOCACHE) {
error = EAGAIN;
goto done;
}
if ((flk_has_remote_locks(vp) || smbfs_lm_has_sleep(vp)) &&
MANDLOCK(vp, va.va_mode)) {
error = EAGAIN;
goto done;
}
as_rangelock(as);
error = choose_addr(as, addrp, len, off, ADDR_VACALIGN, flags);
if (error != 0) {
as_rangeunlock(as);
goto done;
}
vn_a.vp = vp;
vn_a.offset = off;
vn_a.type = (flags & MAP_TYPE);
vn_a.prot = (uchar_t)prot;
vn_a.maxprot = (uchar_t)maxprot;
vn_a.flags = (flags & ~MAP_TYPE);
vn_a.cred = cr;
vn_a.amp = NULL;
vn_a.szc = 0;
vn_a.lgrp_mem_policy_flags = 0;
error = as_map(as, *addrp, len, segvn_create, &vn_a);
as_rangeunlock(as);
done:
smbfs_rw_exit(&np->r_lkserlock);
atomic_dec_uint(&np->r_inmap);
return (error);
}
static int
smbfs_addmap(vnode_t *vp, offset_t off, struct as *as, caddr_t addr,
size_t len, uchar_t prot, uchar_t maxprot, uint_t flags,
cred_t *cr, caller_context_t *ct)
{
smbnode_t *np = VTOSMB(vp);
boolean_t inc_fidrefs = B_FALSE;
mutex_enter(&np->r_statelock);
if (np->r_mapcnt == 0)
inc_fidrefs = B_TRUE;
np->r_mapcnt += btopr(len);
mutex_exit(&np->r_statelock);
if (inc_fidrefs) {
(void) smbfs_rw_enter_sig(&np->r_lkserlock, RW_WRITER, 0);
np->n_fidrefs++;
smbfs_rw_exit(&np->r_lkserlock);
}
return (0);
}
typedef struct smbfs_delmap_args {
taskq_ent_t dm_tqent;
cred_t *dm_cr;
vnode_t *dm_vp;
offset_t dm_off;
caddr_t dm_addr;
size_t dm_len;
uint_t dm_prot;
uint_t dm_maxprot;
uint_t dm_flags;
boolean_t dm_rele_fid;
} smbfs_delmap_args_t;
static int
smbfs_delmap(vnode_t *vp, offset_t off, struct as *as, caddr_t addr,
size_t len, uint_t prot, uint_t maxprot, uint_t flags,
cred_t *cr, caller_context_t *ct)
{
smbnode_t *np = VTOSMB(vp);
smbmntinfo_t *smi = VTOSMI(vp);
smbfs_delmap_args_t *dmapp;
dmapp = kmem_zalloc(sizeof (*dmapp), KM_SLEEP);
crhold(cr);
VN_HOLD(vp);
dmapp->dm_vp = vp;
dmapp->dm_cr = cr;
dmapp->dm_off = off;
dmapp->dm_addr = addr;
dmapp->dm_len = len;
dmapp->dm_prot = prot;
dmapp->dm_maxprot = maxprot;
dmapp->dm_flags = flags;
dmapp->dm_rele_fid = B_FALSE;
mutex_enter(&np->r_statelock);
np->r_mapcnt -= btopr(len);
ASSERT(np->r_mapcnt >= 0);
if (np->r_mapcnt == 0)
dmapp->dm_rele_fid = B_TRUE;
mutex_exit(&np->r_statelock);
taskq_dispatch_ent(smi->smi_taskq, smbfs_delmap_async, dmapp, 0,
&dmapp->dm_tqent);
return (0);
}
static void
smbfs_delmap_async(void *varg)
{
smbfs_delmap_args_t *dmapp = varg;
cred_t *cr;
vnode_t *vp;
smbnode_t *np;
smbmntinfo_t *smi;
cr = dmapp->dm_cr;
vp = dmapp->dm_vp;
np = VTOSMB(vp);
smi = VTOSMI(vp);
if (vn_has_cached_data(vp) && !vn_is_readonly(vp) &&
dmapp->dm_flags == MAP_SHARED &&
(dmapp->dm_maxprot & PROT_WRITE) != 0) {
mutex_enter(&np->r_statelock);
np->r_flags |= RDIRTY;
mutex_exit(&np->r_statelock);
(void) smbfs_putpage(vp, dmapp->dm_off, dmapp->dm_len, 0,
dmapp->dm_cr, NULL);
}
if ((np->r_flags & RDIRECTIO) || (smi->smi_flags & SMI_DIRECTIO))
(void) smbfs_putpage(vp, dmapp->dm_off, dmapp->dm_len,
B_INVAL, dmapp->dm_cr, NULL);
if (dmapp->dm_rele_fid) {
struct smb_cred scred;
(void) smbfs_rw_enter_sig(&np->r_lkserlock, RW_WRITER, 0);
smb_credinit(&scred, dmapp->dm_cr);
smbfs_rele_fid(np, &scred);
smb_credrele(&scred);
smbfs_rw_exit(&np->r_lkserlock);
}
VN_RELE(vp);
crfree(cr);
kmem_free(dmapp, sizeof (*dmapp));
}
#endif
static int
smbfs_frlock(vnode_t *vp, int cmd, struct flock64 *bfp, int flag,
offset_t offset, struct flk_callback *flk_cbp, cred_t *cr,
caller_context_t *ct)
{
if (curproc->p_zone != VTOSMI(vp)->smi_zone_ref.zref_zone)
return (EIO);
if (VTOSMI(vp)->smi_flags & SMI_LLOCK)
return (fs_frlock(vp, cmd, bfp, flag, offset, flk_cbp, cr, ct));
else
return (ENOSYS);
}
static int
smbfs_space(vnode_t *vp, int cmd, struct flock64 *bfp, int flag,
offset_t offset, cred_t *cr, caller_context_t *ct)
{
int error;
smbmntinfo_t *smi;
smi = VTOSMI(vp);
if (curproc->p_zone != smi->smi_zone_ref.zref_zone)
return (EIO);
if (smi->smi_flags & SMI_DEAD || vp->v_vfsp->vfs_flag & VFS_UNMOUNTED)
return (EIO);
ASSERT(vp->v_type == VREG);
if (cmd != F_FREESP)
return (EINVAL);
error = convoff(vp, bfp, 0, offset);
if (!error) {
ASSERT(bfp->l_start >= 0);
if (bfp->l_len == 0) {
struct vattr va;
va.va_mask = AT_SIZE;
error = smbfsgetattr(vp, &va, cr);
if (error || va.va_size == bfp->l_start)
return (error);
va.va_mask = AT_SIZE;
va.va_size = bfp->l_start;
error = smbfssetattr(vp, &va, 0, cr);
} else
error = EINVAL;
}
return (error);
}
static int
smbfs_realvp(vnode_t *vp, vnode_t **vpp, caller_context_t *ct)
{
return (ENOSYS);
}
static int
smbfs_pathconf(vnode_t *vp, int cmd, ulong_t *valp, cred_t *cr,
caller_context_t *ct)
{
vfs_t *vfs;
smbmntinfo_t *smi;
struct smb_share *ssp;
vfs = vp->v_vfsp;
smi = VFTOSMI(vfs);
if (curproc->p_zone != smi->smi_zone_ref.zref_zone)
return (EIO);
if (smi->smi_flags & SMI_DEAD || vp->v_vfsp->vfs_flag & VFS_UNMOUNTED)
return (EIO);
switch (cmd) {
case _PC_FILESIZEBITS:
ssp = smi->smi_share;
if (SSTOVC(ssp)->vc_sopt.sv_caps & SMB_CAP_LARGE_FILES)
*valp = 64;
else
*valp = 32;
break;
case _PC_LINK_MAX:
*valp = 1;
break;
case _PC_ACL_ENABLED:
*valp = _ACL_ACE_ENABLED;
break;
case _PC_SYMLINK_MAX:
*valp = 0;
break;
case _PC_XATTR_EXISTS:
if (vfs->vfs_flag & VFS_XATTR) {
*valp = smbfs_xa_exists(vp, cr);
break;
}
return (EINVAL);
case _PC_SATTR_ENABLED:
case _PC_SATTR_EXISTS:
*valp = 1;
break;
case _PC_TIMESTAMP_RESOLUTION:
*valp = 100L;
break;
default:
return (fs_pathconf(vp, cmd, valp, cr, ct));
}
return (0);
}
static int
smbfs_getsecattr(vnode_t *vp, vsecattr_t *vsa, int flag, cred_t *cr,
caller_context_t *ct)
{
vfs_t *vfsp;
smbmntinfo_t *smi;
int error;
uint_t mask;
vfsp = vp->v_vfsp;
smi = VFTOSMI(vfsp);
if (curproc->p_zone != smi->smi_zone_ref.zref_zone)
return (EIO);
if (smi->smi_flags & SMI_DEAD || vfsp->vfs_flag & VFS_UNMOUNTED)
return (EIO);
mask = vsa->vsa_mask & (VSA_ACE | VSA_ACECNT |
VSA_ACE_ACLFLAGS | VSA_ACE_ALLTYPES);
if (mask == 0)
return (ENOSYS);
if (smi->smi_flags & SMI_ACL)
error = smbfs_acl_getvsa(vp, vsa, flag, cr);
else
error = ENOSYS;
if (error == ENOSYS)
error = fs_fab_acl(vp, vsa, flag, cr, ct);
return (error);
}
static int
smbfs_setsecattr(vnode_t *vp, vsecattr_t *vsa, int flag, cred_t *cr,
caller_context_t *ct)
{
vfs_t *vfsp;
smbmntinfo_t *smi;
int error;
uint_t mask;
vfsp = vp->v_vfsp;
smi = VFTOSMI(vfsp);
if (curproc->p_zone != smi->smi_zone_ref.zref_zone)
return (EIO);
if (smi->smi_flags & SMI_DEAD || vfsp->vfs_flag & VFS_UNMOUNTED)
return (EIO);
mask = vsa->vsa_mask & (VSA_ACE | VSA_ACECNT);
if (mask == 0)
return (ENOSYS);
if (vfsp->vfs_flag & VFS_RDONLY)
return (EROFS);
error = secpolicy_vnode_setdac(cr, smi->smi_uid);
if (error != 0)
return (error);
if (smi->smi_flags & SMI_ACL)
error = smbfs_acl_setvsa(vp, vsa, flag, cr);
else
error = ENOSYS;
return (error);
}
static int
smbfs_shrlock(vnode_t *vp, int cmd, struct shrlock *shr, int flag, cred_t *cr,
caller_context_t *ct)
{
if (curproc->p_zone != VTOSMI(vp)->smi_zone_ref.zref_zone)
return (EIO);
if (VTOSMI(vp)->smi_flags & SMI_LLOCK)
return (fs_shrlock(vp, cmd, shr, flag, cr, ct));
else
return (ENOSYS);
}
const fs_operation_def_t smbfs_vnodeops_template[] = {
VOPNAME_OPEN, { .vop_open = smbfs_open },
VOPNAME_CLOSE, { .vop_close = smbfs_close },
VOPNAME_READ, { .vop_read = smbfs_read },
VOPNAME_WRITE, { .vop_write = smbfs_write },
VOPNAME_IOCTL, { .vop_ioctl = smbfs_ioctl },
VOPNAME_GETATTR, { .vop_getattr = smbfs_getattr },
VOPNAME_SETATTR, { .vop_setattr = smbfs_setattr },
VOPNAME_ACCESS, { .vop_access = smbfs_access },
VOPNAME_LOOKUP, { .vop_lookup = smbfs_lookup },
VOPNAME_CREATE, { .vop_create = smbfs_create },
VOPNAME_REMOVE, { .vop_remove = smbfs_remove },
VOPNAME_LINK, { .vop_link = smbfs_link },
VOPNAME_RENAME, { .vop_rename = smbfs_rename },
VOPNAME_MKDIR, { .vop_mkdir = smbfs_mkdir },
VOPNAME_RMDIR, { .vop_rmdir = smbfs_rmdir },
VOPNAME_READDIR, { .vop_readdir = smbfs_readdir },
VOPNAME_SYMLINK, { .vop_symlink = smbfs_symlink },
VOPNAME_READLINK, { .vop_readlink = smbfs_readlink },
VOPNAME_FSYNC, { .vop_fsync = smbfs_fsync },
VOPNAME_INACTIVE, { .vop_inactive = smbfs_inactive },
VOPNAME_FID, { .vop_fid = smbfs_fid },
VOPNAME_RWLOCK, { .vop_rwlock = smbfs_rwlock },
VOPNAME_RWUNLOCK, { .vop_rwunlock = smbfs_rwunlock },
VOPNAME_SEEK, { .vop_seek = smbfs_seek },
VOPNAME_FRLOCK, { .vop_frlock = smbfs_frlock },
VOPNAME_SPACE, { .vop_space = smbfs_space },
VOPNAME_REALVP, { .vop_realvp = smbfs_realvp },
#ifdef _KERNEL
VOPNAME_GETPAGE, { .vop_getpage = smbfs_getpage },
VOPNAME_PUTPAGE, { .vop_putpage = smbfs_putpage },
VOPNAME_MAP, { .vop_map = smbfs_map },
VOPNAME_ADDMAP, { .vop_addmap = smbfs_addmap },
VOPNAME_DELMAP, { .vop_delmap = smbfs_delmap },
#endif
VOPNAME_PATHCONF, { .vop_pathconf = smbfs_pathconf },
VOPNAME_SETSECATTR, { .vop_setsecattr = smbfs_setsecattr },
VOPNAME_GETSECATTR, { .vop_getsecattr = smbfs_getsecattr },
VOPNAME_SHRLOCK, { .vop_shrlock = smbfs_shrlock },
#ifdef SMBFS_VNEVENT
VOPNAME_VNEVENT, { .vop_vnevent = fs_vnevent_support },
#endif
{ NULL, NULL }
};