#include <sys/param.h>
#include <sys/isa_defs.h>
#include <sys/types.h>
#include <sys/inttypes.h>
#include <sys/sysmacros.h>
#include <sys/cred.h>
#include <sys/user.h>
#include <sys/systm.h>
#include <sys/errno.h>
#include <sys/vnode.h>
#include <sys/file.h>
#include <sys/proc.h>
#include <sys/cpuvar.h>
#include <sys/uio.h>
#include <sys/debug.h>
#include <sys/rctl.h>
#include <sys/nbmlock.h>
#include <sys/limits.h>
#define COPYOUT_MAX_CACHE (1<<17)
size_t copyout_max_cached = COPYOUT_MAX_CACHE;
ssize_t
read(int fdes, void *cbuf, size_t count)
{
struct uio auio;
struct iovec aiov;
file_t *fp;
register vnode_t *vp;
struct cpu *cp;
int fflag, ioflag, rwflag;
ssize_t cnt, bcount;
int error = 0;
u_offset_t fileoff;
int in_crit = 0;
if ((cnt = (ssize_t)count) < 0)
return (set_errno(EINVAL));
if ((fp = getf(fdes)) == NULL)
return (set_errno(EBADF));
if (((fflag = fp->f_flag) & FREAD) == 0) {
error = EBADF;
goto out;
}
vp = fp->f_vnode;
if (vp->v_type == VREG && cnt == 0) {
goto out;
}
rwflag = 0;
aiov.iov_base = cbuf;
aiov.iov_len = cnt;
if (nbl_need_check(vp)) {
int svmand;
nbl_start_crit(vp, RW_READER);
in_crit = 1;
error = nbl_svmand(vp, fp->f_cred, &svmand);
if (error != 0)
goto out;
if (nbl_conflict(vp, NBL_READ, fp->f_offset, cnt, svmand,
NULL)) {
error = EACCES;
goto out;
}
}
(void) VOP_RWLOCK(vp, rwflag, NULL);
fileoff = (u_offset_t)fp->f_offset;
if (fileoff >= OFFSET_MAX(fp) && (vp->v_type == VREG)) {
struct vattr va;
va.va_mask = AT_SIZE;
if ((error = VOP_GETATTR(vp, &va, 0, fp->f_cred, NULL))) {
VOP_RWUNLOCK(vp, rwflag, NULL);
goto out;
}
if (fileoff >= va.va_size) {
cnt = 0;
VOP_RWUNLOCK(vp, rwflag, NULL);
goto out;
} else {
error = EOVERFLOW;
VOP_RWUNLOCK(vp, rwflag, NULL);
goto out;
}
}
if ((vp->v_type == VREG) &&
(fileoff + cnt > OFFSET_MAX(fp))) {
cnt = (ssize_t)(OFFSET_MAX(fp) - fileoff);
}
auio.uio_loffset = fileoff;
auio.uio_iov = &aiov;
auio.uio_iovcnt = 1;
auio.uio_resid = bcount = cnt;
auio.uio_segflg = UIO_USERSPACE;
auio.uio_llimit = MAXOFFSET_T;
auio.uio_fmode = fflag;
if (bcount <= copyout_max_cached)
auio.uio_extflg = UIO_COPY_CACHED;
else
auio.uio_extflg = UIO_COPY_DEFAULT;
ioflag = auio.uio_fmode & (FAPPEND|FSYNC|FDSYNC|FRSYNC);
if ((ioflag & FRSYNC) == 0)
ioflag &= ~(FSYNC|FDSYNC);
error = VOP_READ(vp, &auio, ioflag, fp->f_cred, NULL);
cnt -= auio.uio_resid;
CPU_STATS_ENTER_K();
cp = CPU;
CPU_STATS_ADDQ(cp, sys, sysread, 1);
CPU_STATS_ADDQ(cp, sys, readch, (ulong_t)cnt);
CPU_STATS_EXIT_K();
ttolwp(curthread)->lwp_ru.ioch += (ulong_t)cnt;
if (vp->v_type == VFIFO)
fp->f_offset = cnt;
else if (((fp->f_flag & FAPPEND) == 0) ||
(vp->v_type != VREG) || (bcount != 0))
fp->f_offset = auio.uio_loffset;
VOP_RWUNLOCK(vp, rwflag, NULL);
if (error == EINTR && cnt != 0)
error = 0;
out:
if (in_crit)
nbl_end_crit(vp);
releasef(fdes);
if (error)
return (set_errno(error));
return (cnt);
}
ssize_t
write(int fdes, void *cbuf, size_t count)
{
struct uio auio;
struct iovec aiov;
file_t *fp;
register vnode_t *vp;
struct cpu *cp;
int fflag, ioflag, rwflag;
ssize_t cnt, bcount;
int error = 0;
u_offset_t fileoff;
int in_crit = 0;
if ((cnt = (ssize_t)count) < 0)
return (set_errno(EINVAL));
if ((fp = getf(fdes)) == NULL)
return (set_errno(EBADF));
if (((fflag = fp->f_flag) & FWRITE) == 0) {
error = EBADF;
goto out;
}
vp = fp->f_vnode;
if (vp->v_type == VREG && cnt == 0) {
goto out;
}
rwflag = 1;
aiov.iov_base = cbuf;
aiov.iov_len = cnt;
if (nbl_need_check(vp)) {
int svmand;
nbl_start_crit(vp, RW_READER);
in_crit = 1;
error = nbl_svmand(vp, fp->f_cred, &svmand);
if (error != 0)
goto out;
if (nbl_conflict(vp, NBL_WRITE, fp->f_offset, cnt, svmand,
NULL)) {
error = EACCES;
goto out;
}
}
(void) VOP_RWLOCK(vp, rwflag, NULL);
fileoff = fp->f_offset;
if (vp->v_type == VREG) {
if (fileoff >= curproc->p_fsz_ctl) {
VOP_RWUNLOCK(vp, rwflag, NULL);
mutex_enter(&curproc->p_lock);
(void) rctl_action(rctlproc_legacy[RLIMIT_FSIZE],
curproc->p_rctls, curproc, RCA_UNSAFE_SIGINFO);
mutex_exit(&curproc->p_lock);
error = EFBIG;
goto out;
}
if (fileoff >= OFFSET_MAX(fp)) {
VOP_RWUNLOCK(vp, rwflag, NULL);
error = EFBIG;
goto out;
}
if (fileoff + cnt > OFFSET_MAX(fp))
cnt = (ssize_t)(OFFSET_MAX(fp) - fileoff);
}
auio.uio_loffset = fileoff;
auio.uio_iov = &aiov;
auio.uio_iovcnt = 1;
auio.uio_resid = bcount = cnt;
auio.uio_segflg = UIO_USERSPACE;
auio.uio_llimit = curproc->p_fsz_ctl;
auio.uio_fmode = fflag;
auio.uio_extflg = UIO_COPY_DEFAULT;
ioflag = auio.uio_fmode & (FAPPEND|FSYNC|FDSYNC|FRSYNC);
error = VOP_WRITE(vp, &auio, ioflag, fp->f_cred, NULL);
cnt -= auio.uio_resid;
CPU_STATS_ENTER_K();
cp = CPU;
CPU_STATS_ADDQ(cp, sys, syswrite, 1);
CPU_STATS_ADDQ(cp, sys, writech, (ulong_t)cnt);
CPU_STATS_EXIT_K();
ttolwp(curthread)->lwp_ru.ioch += (ulong_t)cnt;
if (vp->v_type == VFIFO)
fp->f_offset = cnt;
else if (((fp->f_flag & FAPPEND) == 0) ||
(vp->v_type != VREG) || (bcount != 0))
fp->f_offset = auio.uio_loffset;
VOP_RWUNLOCK(vp, rwflag, NULL);
if (error == EINTR && cnt != 0)
error = 0;
out:
if (in_crit)
nbl_end_crit(vp);
releasef(fdes);
if (error)
return (set_errno(error));
return (cnt);
}
ssize_t
pread(int fdes, void *cbuf, size_t count, off_t offset)
{
struct uio auio;
struct iovec aiov;
file_t *fp;
register vnode_t *vp;
struct cpu *cp;
int fflag, ioflag, rwflag;
ssize_t bcount;
int error = 0;
u_offset_t fileoff = (u_offset_t)(ulong_t)offset;
#ifdef _SYSCALL32_IMPL
u_offset_t maxoff = get_udatamodel() == DATAMODEL_ILP32 ?
MAXOFF32_T : MAXOFFSET_T;
#else
const u_offset_t maxoff = MAXOFF32_T;
#endif
int in_crit = 0;
if ((bcount = (ssize_t)count) < 0)
return (set_errno(EINVAL));
if ((fp = getf(fdes)) == NULL)
return (set_errno(EBADF));
if (((fflag = fp->f_flag) & (FREAD)) == 0) {
error = EBADF;
goto out;
}
rwflag = 0;
vp = fp->f_vnode;
if (vp->v_type == VREG) {
if (bcount == 0)
goto out;
if (fileoff > maxoff) {
error = EINVAL;
goto out;
}
if (fileoff + bcount > maxoff)
bcount = (ssize_t)((offset_t)maxoff - fileoff);
} else if (vp->v_type == VFIFO) {
error = ESPIPE;
goto out;
}
if (nbl_need_check(vp)) {
int svmand;
nbl_start_crit(vp, RW_READER);
in_crit = 1;
error = nbl_svmand(vp, fp->f_cred, &svmand);
if (error != 0)
goto out;
if (nbl_conflict(vp, NBL_READ, fileoff, bcount, svmand,
NULL)) {
error = EACCES;
goto out;
}
}
aiov.iov_base = cbuf;
aiov.iov_len = bcount;
(void) VOP_RWLOCK(vp, rwflag, NULL);
if (vp->v_type == VREG && fileoff == (u_offset_t)maxoff) {
struct vattr va;
va.va_mask = AT_SIZE;
if ((error = VOP_GETATTR(vp, &va, 0, fp->f_cred, NULL))) {
VOP_RWUNLOCK(vp, rwflag, NULL);
goto out;
}
VOP_RWUNLOCK(vp, rwflag, NULL);
if (fileoff >= va.va_size) {
bcount = 0;
goto out;
}
error = EOVERFLOW;
goto out;
}
auio.uio_loffset = fileoff;
auio.uio_iov = &aiov;
auio.uio_iovcnt = 1;
auio.uio_resid = bcount;
auio.uio_segflg = UIO_USERSPACE;
auio.uio_llimit = MAXOFFSET_T;
auio.uio_fmode = fflag;
auio.uio_extflg = UIO_COPY_CACHED;
ioflag = auio.uio_fmode & (FAPPEND|FSYNC|FDSYNC|FRSYNC);
if ((ioflag & FRSYNC) == 0)
ioflag &= ~(FSYNC|FDSYNC);
error = VOP_READ(vp, &auio, ioflag, fp->f_cred, NULL);
bcount -= auio.uio_resid;
CPU_STATS_ENTER_K();
cp = CPU;
CPU_STATS_ADDQ(cp, sys, sysread, 1);
CPU_STATS_ADDQ(cp, sys, readch, (ulong_t)bcount);
CPU_STATS_EXIT_K();
ttolwp(curthread)->lwp_ru.ioch += (ulong_t)bcount;
VOP_RWUNLOCK(vp, rwflag, NULL);
if (error == EINTR && bcount != 0)
error = 0;
out:
if (in_crit)
nbl_end_crit(vp);
releasef(fdes);
if (error)
return (set_errno(error));
return (bcount);
}
ssize_t
pwrite(int fdes, void *cbuf, size_t count, off_t offset)
{
struct uio auio;
struct iovec aiov;
file_t *fp;
register vnode_t *vp;
struct cpu *cp;
int fflag, ioflag, rwflag;
ssize_t bcount;
int error = 0;
u_offset_t fileoff = (u_offset_t)(ulong_t)offset;
#ifdef _SYSCALL32_IMPL
u_offset_t maxoff = get_udatamodel() == DATAMODEL_ILP32 ?
MAXOFF32_T : MAXOFFSET_T;
#else
const u_offset_t maxoff = MAXOFF32_T;
#endif
int in_crit = 0;
if ((bcount = (ssize_t)count) < 0)
return (set_errno(EINVAL));
if ((fp = getf(fdes)) == NULL)
return (set_errno(EBADF));
if (((fflag = fp->f_flag) & (FWRITE)) == 0) {
error = EBADF;
goto out;
}
rwflag = 1;
vp = fp->f_vnode;
if (vp->v_type == VREG) {
if (bcount == 0)
goto out;
if (fileoff > maxoff) {
error = EINVAL;
goto out;
}
if (fileoff >= curproc->p_fsz_ctl) {
mutex_enter(&curproc->p_lock);
(void) rctl_action(rctlproc_legacy[RLIMIT_FSIZE],
curproc->p_rctls, curproc, RCA_UNSAFE_SIGINFO);
mutex_exit(&curproc->p_lock);
error = EFBIG;
goto out;
}
if (fileoff == maxoff) {
error = EFBIG;
goto out;
}
if (fileoff + count > maxoff)
bcount = (ssize_t)((u_offset_t)maxoff - fileoff);
} else if (vp->v_type == VFIFO) {
error = ESPIPE;
goto out;
}
if (nbl_need_check(vp)) {
int svmand;
nbl_start_crit(vp, RW_READER);
in_crit = 1;
error = nbl_svmand(vp, fp->f_cred, &svmand);
if (error != 0)
goto out;
if (nbl_conflict(vp, NBL_WRITE, fileoff, bcount, svmand,
NULL)) {
error = EACCES;
goto out;
}
}
aiov.iov_base = cbuf;
aiov.iov_len = bcount;
(void) VOP_RWLOCK(vp, rwflag, NULL);
auio.uio_loffset = fileoff;
auio.uio_iov = &aiov;
auio.uio_iovcnt = 1;
auio.uio_resid = bcount;
auio.uio_segflg = UIO_USERSPACE;
auio.uio_llimit = curproc->p_fsz_ctl;
auio.uio_fmode = fflag;
auio.uio_extflg = UIO_COPY_CACHED;
ioflag = auio.uio_fmode & (FSYNC|FDSYNC|FRSYNC);
error = VOP_WRITE(vp, &auio, ioflag, fp->f_cred, NULL);
bcount -= auio.uio_resid;
CPU_STATS_ENTER_K();
cp = CPU;
CPU_STATS_ADDQ(cp, sys, syswrite, 1);
CPU_STATS_ADDQ(cp, sys, writech, (ulong_t)bcount);
CPU_STATS_EXIT_K();
ttolwp(curthread)->lwp_ru.ioch += (ulong_t)bcount;
VOP_RWUNLOCK(vp, rwflag, NULL);
if (error == EINTR && bcount != 0)
error = 0;
out:
if (in_crit)
nbl_end_crit(vp);
releasef(fdes);
if (error)
return (set_errno(error));
return (bcount);
}
ssize_t
readv(int fdes, struct iovec *iovp, int iovcnt)
{
struct uio auio;
struct iovec buf[IOV_MAX_STACK], *aiov = buf;
int aiovlen = 0;
file_t *fp;
register vnode_t *vp;
struct cpu *cp;
int fflag, ioflag, rwflag;
ssize_t count, bcount;
int error = 0;
int i;
u_offset_t fileoff;
int in_crit = 0;
if (iovcnt <= 0 || iovcnt > IOV_MAX)
return (set_errno(EINVAL));
if (iovcnt > IOV_MAX_STACK) {
aiovlen = iovcnt * sizeof (iovec_t);
aiov = kmem_alloc(aiovlen, KM_SLEEP);
}
#ifdef _SYSCALL32_IMPL
if (get_udatamodel() == DATAMODEL_ILP32) {
struct iovec32 buf32[IOV_MAX_STACK], *aiov32 = buf32;
int aiov32len;
ssize32_t count32;
aiov32len = iovcnt * sizeof (iovec32_t);
if (aiovlen != 0)
aiov32 = kmem_alloc(aiov32len, KM_SLEEP);
if (copyin(iovp, aiov32, aiov32len)) {
if (aiovlen != 0) {
kmem_free(aiov32, aiov32len);
kmem_free(aiov, aiovlen);
}
return (set_errno(EFAULT));
}
count32 = 0;
for (i = 0; i < iovcnt; i++) {
ssize32_t iovlen32 = aiov32[i].iov_len;
count32 += iovlen32;
if (iovlen32 < 0 || count32 < 0) {
if (aiovlen != 0) {
kmem_free(aiov32, aiov32len);
kmem_free(aiov, aiovlen);
}
return (set_errno(EINVAL));
}
aiov[i].iov_len = iovlen32;
aiov[i].iov_base =
(caddr_t)(uintptr_t)aiov32[i].iov_base;
}
if (aiovlen != 0)
kmem_free(aiov32, aiov32len);
} else
#endif
if (copyin(iovp, aiov, iovcnt * sizeof (iovec_t))) {
if (aiovlen != 0)
kmem_free(aiov, aiovlen);
return (set_errno(EFAULT));
}
count = 0;
for (i = 0; i < iovcnt; i++) {
ssize_t iovlen = aiov[i].iov_len;
count += iovlen;
if (iovlen < 0 || count < 0) {
if (aiovlen != 0)
kmem_free(aiov, aiovlen);
return (set_errno(EINVAL));
}
}
if ((fp = getf(fdes)) == NULL) {
if (aiovlen != 0)
kmem_free(aiov, aiovlen);
return (set_errno(EBADF));
}
if (((fflag = fp->f_flag) & FREAD) == 0) {
error = EBADF;
goto out;
}
vp = fp->f_vnode;
if (vp->v_type == VREG && count == 0) {
goto out;
}
rwflag = 0;
if (nbl_need_check(vp)) {
int svmand;
nbl_start_crit(vp, RW_READER);
in_crit = 1;
error = nbl_svmand(vp, fp->f_cred, &svmand);
if (error != 0)
goto out;
if (nbl_conflict(vp, NBL_READ, fp->f_offset, count, svmand,
NULL)) {
error = EACCES;
goto out;
}
}
(void) VOP_RWLOCK(vp, rwflag, NULL);
fileoff = fp->f_offset;
if ((vp->v_type == VREG) && (fileoff >= OFFSET_MAX(fp))) {
struct vattr va;
va.va_mask = AT_SIZE;
if ((error = VOP_GETATTR(vp, &va, 0, fp->f_cred, NULL))) {
VOP_RWUNLOCK(vp, rwflag, NULL);
goto out;
}
if (fileoff >= va.va_size) {
VOP_RWUNLOCK(vp, rwflag, NULL);
count = 0;
goto out;
} else {
VOP_RWUNLOCK(vp, rwflag, NULL);
error = EOVERFLOW;
goto out;
}
}
if ((vp->v_type == VREG) && (fileoff + count > OFFSET_MAX(fp))) {
count = (ssize_t)(OFFSET_MAX(fp) - fileoff);
}
auio.uio_loffset = fileoff;
auio.uio_iov = aiov;
auio.uio_iovcnt = iovcnt;
auio.uio_resid = bcount = count;
auio.uio_segflg = UIO_USERSPACE;
auio.uio_llimit = MAXOFFSET_T;
auio.uio_fmode = fflag;
if (bcount <= copyout_max_cached)
auio.uio_extflg = UIO_COPY_CACHED;
else
auio.uio_extflg = UIO_COPY_DEFAULT;
ioflag = auio.uio_fmode & (FAPPEND|FSYNC|FDSYNC|FRSYNC);
if ((ioflag & FRSYNC) == 0)
ioflag &= ~(FSYNC|FDSYNC);
error = VOP_READ(vp, &auio, ioflag, fp->f_cred, NULL);
count -= auio.uio_resid;
CPU_STATS_ENTER_K();
cp = CPU;
CPU_STATS_ADDQ(cp, sys, sysread, 1);
CPU_STATS_ADDQ(cp, sys, readch, (ulong_t)count);
CPU_STATS_EXIT_K();
ttolwp(curthread)->lwp_ru.ioch += (ulong_t)count;
if (vp->v_type == VFIFO)
fp->f_offset = count;
else if (((fp->f_flag & FAPPEND) == 0) ||
(vp->v_type != VREG) || (bcount != 0))
fp->f_offset = auio.uio_loffset;
VOP_RWUNLOCK(vp, rwflag, NULL);
if (error == EINTR && count != 0)
error = 0;
out:
if (in_crit)
nbl_end_crit(vp);
releasef(fdes);
if (aiovlen != 0)
kmem_free(aiov, aiovlen);
if (error)
return (set_errno(error));
return (count);
}
ssize_t
writev(int fdes, struct iovec *iovp, int iovcnt)
{
struct uio auio;
struct iovec buf[IOV_MAX_STACK], *aiov = buf;
int aiovlen = 0;
file_t *fp;
register vnode_t *vp;
struct cpu *cp;
int fflag, ioflag, rwflag;
ssize_t count, bcount;
int error = 0;
int i;
u_offset_t fileoff;
int in_crit = 0;
if (iovcnt <= 0 || iovcnt > IOV_MAX)
return (set_errno(EINVAL));
if (iovcnt > IOV_MAX_STACK) {
aiovlen = iovcnt * sizeof (iovec_t);
aiov = kmem_alloc(aiovlen, KM_SLEEP);
}
#ifdef _SYSCALL32_IMPL
if (get_udatamodel() == DATAMODEL_ILP32) {
struct iovec32 buf32[IOV_MAX_STACK], *aiov32 = buf32;
int aiov32len;
ssize32_t count32;
aiov32len = iovcnt * sizeof (iovec32_t);
if (aiovlen != 0)
aiov32 = kmem_alloc(aiov32len, KM_SLEEP);
if (copyin(iovp, aiov32, aiov32len)) {
if (aiovlen != 0) {
kmem_free(aiov32, aiov32len);
kmem_free(aiov, aiovlen);
}
return (set_errno(EFAULT));
}
count32 = 0;
for (i = 0; i < iovcnt; i++) {
ssize32_t iovlen = aiov32[i].iov_len;
count32 += iovlen;
if (iovlen < 0 || count32 < 0) {
if (aiovlen != 0) {
kmem_free(aiov32, aiov32len);
kmem_free(aiov, aiovlen);
}
return (set_errno(EINVAL));
}
aiov[i].iov_len = iovlen;
aiov[i].iov_base =
(caddr_t)(uintptr_t)aiov32[i].iov_base;
}
if (aiovlen != 0)
kmem_free(aiov32, aiov32len);
} else
#endif
if (copyin(iovp, aiov, iovcnt * sizeof (iovec_t))) {
if (aiovlen != 0)
kmem_free(aiov, aiovlen);
return (set_errno(EFAULT));
}
count = 0;
for (i = 0; i < iovcnt; i++) {
ssize_t iovlen = aiov[i].iov_len;
count += iovlen;
if (iovlen < 0 || count < 0) {
if (aiovlen != 0)
kmem_free(aiov, aiovlen);
return (set_errno(EINVAL));
}
}
if ((fp = getf(fdes)) == NULL) {
if (aiovlen != 0)
kmem_free(aiov, aiovlen);
return (set_errno(EBADF));
}
if (((fflag = fp->f_flag) & FWRITE) == 0) {
error = EBADF;
goto out;
}
vp = fp->f_vnode;
if (vp->v_type == VREG && count == 0) {
goto out;
}
rwflag = 1;
if (nbl_need_check(vp)) {
int svmand;
nbl_start_crit(vp, RW_READER);
in_crit = 1;
error = nbl_svmand(vp, fp->f_cred, &svmand);
if (error != 0)
goto out;
if (nbl_conflict(vp, NBL_WRITE, fp->f_offset, count, svmand,
NULL)) {
error = EACCES;
goto out;
}
}
(void) VOP_RWLOCK(vp, rwflag, NULL);
fileoff = fp->f_offset;
if (vp->v_type == VREG) {
if (fileoff >= curproc->p_fsz_ctl) {
VOP_RWUNLOCK(vp, rwflag, NULL);
mutex_enter(&curproc->p_lock);
(void) rctl_action(rctlproc_legacy[RLIMIT_FSIZE],
curproc->p_rctls, curproc, RCA_UNSAFE_SIGINFO);
mutex_exit(&curproc->p_lock);
error = EFBIG;
goto out;
}
if (fileoff >= OFFSET_MAX(fp)) {
VOP_RWUNLOCK(vp, rwflag, NULL);
error = EFBIG;
goto out;
}
if (fileoff + count > OFFSET_MAX(fp))
count = (ssize_t)(OFFSET_MAX(fp) - fileoff);
}
auio.uio_loffset = fileoff;
auio.uio_iov = aiov;
auio.uio_iovcnt = iovcnt;
auio.uio_resid = bcount = count;
auio.uio_segflg = UIO_USERSPACE;
auio.uio_llimit = curproc->p_fsz_ctl;
auio.uio_fmode = fflag;
auio.uio_extflg = UIO_COPY_DEFAULT;
ioflag = auio.uio_fmode & (FAPPEND|FSYNC|FDSYNC|FRSYNC);
error = VOP_WRITE(vp, &auio, ioflag, fp->f_cred, NULL);
count -= auio.uio_resid;
CPU_STATS_ENTER_K();
cp = CPU;
CPU_STATS_ADDQ(cp, sys, syswrite, 1);
CPU_STATS_ADDQ(cp, sys, writech, (ulong_t)count);
CPU_STATS_EXIT_K();
ttolwp(curthread)->lwp_ru.ioch += (ulong_t)count;
if (vp->v_type == VFIFO)
fp->f_offset = count;
else if (((fp->f_flag & FAPPEND) == 0) ||
(vp->v_type != VREG) || (bcount != 0))
fp->f_offset = auio.uio_loffset;
VOP_RWUNLOCK(vp, rwflag, NULL);
if (error == EINTR && count != 0)
error = 0;
out:
if (in_crit)
nbl_end_crit(vp);
releasef(fdes);
if (aiovlen != 0)
kmem_free(aiov, aiovlen);
if (error)
return (set_errno(error));
return (count);
}
ssize_t
preadv(int fdes, struct iovec *iovp, int iovcnt, off_t offset,
off_t extended_offset)
{
struct uio auio;
struct iovec buf[IOV_MAX_STACK], *aiov = buf;
int aiovlen = 0;
file_t *fp;
register vnode_t *vp;
struct cpu *cp;
int fflag, ioflag, rwflag;
ssize_t count, bcount;
int error = 0;
int i;
#if defined(_SYSCALL32_IMPL) || defined(_ILP32)
u_offset_t fileoff = ((u_offset_t)extended_offset << 32) |
(u_offset_t)offset;
#else
u_offset_t fileoff = (u_offset_t)(ulong_t)offset;
#endif
int in_crit = 0;
if (iovcnt <= 0 || iovcnt > IOV_MAX)
return (set_errno(EINVAL));
if (iovcnt > IOV_MAX_STACK) {
aiovlen = iovcnt * sizeof (iovec_t);
aiov = kmem_alloc(aiovlen, KM_SLEEP);
}
#ifdef _SYSCALL32_IMPL
if (get_udatamodel() == DATAMODEL_ILP32) {
struct iovec32 buf32[IOV_MAX_STACK], *aiov32 = buf32;
int aiov32len;
ssize32_t count32;
aiov32len = iovcnt * sizeof (iovec32_t);
if (aiovlen != 0)
aiov32 = kmem_alloc(aiov32len, KM_SLEEP);
if (copyin(iovp, aiov32, aiov32len)) {
if (aiovlen != 0) {
kmem_free(aiov32, aiov32len);
kmem_free(aiov, aiovlen);
}
return (set_errno(EFAULT));
}
count32 = 0;
for (i = 0; i < iovcnt; i++) {
ssize32_t iovlen32 = aiov32[i].iov_len;
count32 += iovlen32;
if (iovlen32 < 0 || count32 < 0) {
if (aiovlen != 0) {
kmem_free(aiov32, aiov32len);
kmem_free(aiov, aiovlen);
}
return (set_errno(EINVAL));
}
aiov[i].iov_len = iovlen32;
aiov[i].iov_base =
(caddr_t)(uintptr_t)aiov32[i].iov_base;
}
if (aiovlen != 0)
kmem_free(aiov32, aiov32len);
} else
#endif
if (copyin(iovp, aiov, iovcnt * sizeof (iovec_t))) {
if (aiovlen != 0)
kmem_free(aiov, aiovlen);
return (set_errno(EFAULT));
}
count = 0;
for (i = 0; i < iovcnt; i++) {
ssize_t iovlen = aiov[i].iov_len;
count += iovlen;
if (iovlen < 0 || count < 0) {
if (aiovlen != 0)
kmem_free(aiov, aiovlen);
return (set_errno(EINVAL));
}
}
if ((bcount = count) < 0) {
if (aiovlen != 0)
kmem_free(aiov, aiovlen);
return (set_errno(EINVAL));
}
if ((fp = getf(fdes)) == NULL) {
if (aiovlen != 0)
kmem_free(aiov, aiovlen);
return (set_errno(EBADF));
}
if (((fflag = fp->f_flag) & FREAD) == 0) {
error = EBADF;
goto out;
}
vp = fp->f_vnode;
rwflag = 0;
if (vp->v_type == VREG) {
if (bcount == 0)
goto out;
if (fileoff >= OFFSET_MAX(fp)) {
struct vattr va;
va.va_mask = AT_SIZE;
error = VOP_GETATTR(vp, &va, 0, fp->f_cred, NULL);
if (error == 0) {
if (fileoff >= va.va_size) {
count = 0;
} else {
error = EOVERFLOW;
}
}
goto out;
}
ASSERT(bcount == count);
if ((fileoff + count) > OFFSET_MAX(fp))
count = (ssize_t)(OFFSET_MAX(fp) - fileoff);
} else if (vp->v_type == VFIFO) {
error = ESPIPE;
goto out;
}
if (nbl_need_check(vp)) {
int svmand;
nbl_start_crit(vp, RW_READER);
in_crit = 1;
error = nbl_svmand(vp, fp->f_cred, &svmand);
if (error != 0)
goto out;
if (nbl_conflict(vp, NBL_WRITE, fileoff, count, svmand, NULL)) {
error = EACCES;
goto out;
}
}
(void) VOP_RWLOCK(vp, rwflag, NULL);
auio.uio_loffset = fileoff;
auio.uio_iov = aiov;
auio.uio_iovcnt = iovcnt;
auio.uio_resid = bcount = count;
auio.uio_segflg = UIO_USERSPACE;
auio.uio_llimit = MAXOFFSET_T;
auio.uio_fmode = fflag;
if (bcount <= copyout_max_cached)
auio.uio_extflg = UIO_COPY_CACHED;
else
auio.uio_extflg = UIO_COPY_DEFAULT;
ioflag = auio.uio_fmode & (FAPPEND|FSYNC|FDSYNC|FRSYNC);
error = VOP_READ(vp, &auio, ioflag, fp->f_cred, NULL);
count -= auio.uio_resid;
CPU_STATS_ENTER_K();
cp = CPU;
CPU_STATS_ADDQ(cp, sys, sysread, 1);
CPU_STATS_ADDQ(cp, sys, readch, (ulong_t)count);
CPU_STATS_EXIT_K();
ttolwp(curthread)->lwp_ru.ioch += (ulong_t)count;
VOP_RWUNLOCK(vp, rwflag, NULL);
if (error == EINTR && count != 0)
error = 0;
out:
if (in_crit)
nbl_end_crit(vp);
releasef(fdes);
if (aiovlen != 0)
kmem_free(aiov, aiovlen);
if (error)
return (set_errno(error));
return (count);
}
ssize_t
pwritev(int fdes, struct iovec *iovp, int iovcnt, off_t offset,
off_t extended_offset)
{
struct uio auio;
struct iovec buf[IOV_MAX_STACK], *aiov = buf;
int aiovlen = 0;
file_t *fp;
register vnode_t *vp;
struct cpu *cp;
int fflag, ioflag, rwflag;
ssize_t count, bcount;
int error = 0;
int i;
#if defined(_SYSCALL32_IMPL) || defined(_ILP32)
u_offset_t fileoff = ((u_offset_t)extended_offset << 32) |
(u_offset_t)offset;
#else
u_offset_t fileoff = (u_offset_t)(ulong_t)offset;
#endif
int in_crit = 0;
if (iovcnt <= 0 || iovcnt > IOV_MAX)
return (set_errno(EINVAL));
if (iovcnt > IOV_MAX_STACK) {
aiovlen = iovcnt * sizeof (iovec_t);
aiov = kmem_alloc(aiovlen, KM_SLEEP);
}
#ifdef _SYSCALL32_IMPL
if (get_udatamodel() == DATAMODEL_ILP32) {
struct iovec32 buf32[IOV_MAX_STACK], *aiov32 = buf32;
int aiov32len;
ssize32_t count32;
aiov32len = iovcnt * sizeof (iovec32_t);
if (aiovlen != 0)
aiov32 = kmem_alloc(aiov32len, KM_SLEEP);
if (copyin(iovp, aiov32, aiov32len)) {
if (aiovlen != 0) {
kmem_free(aiov32, aiov32len);
kmem_free(aiov, aiovlen);
}
return (set_errno(EFAULT));
}
count32 = 0;
for (i = 0; i < iovcnt; i++) {
ssize32_t iovlen32 = aiov32[i].iov_len;
count32 += iovlen32;
if (iovlen32 < 0 || count32 < 0) {
if (aiovlen != 0) {
kmem_free(aiov32, aiov32len);
kmem_free(aiov, aiovlen);
}
return (set_errno(EINVAL));
}
aiov[i].iov_len = iovlen32;
aiov[i].iov_base =
(caddr_t)(uintptr_t)aiov32[i].iov_base;
}
if (aiovlen != 0)
kmem_free(aiov32, aiov32len);
} else
#endif
if (copyin(iovp, aiov, iovcnt * sizeof (iovec_t))) {
if (aiovlen != 0)
kmem_free(aiov, aiovlen);
return (set_errno(EFAULT));
}
count = 0;
for (i = 0; i < iovcnt; i++) {
ssize_t iovlen = aiov[i].iov_len;
count += iovlen;
if (iovlen < 0 || count < 0) {
if (aiovlen != 0)
kmem_free(aiov, aiovlen);
return (set_errno(EINVAL));
}
}
if ((bcount = count) < 0) {
if (aiovlen != 0)
kmem_free(aiov, aiovlen);
return (set_errno(EINVAL));
}
if ((fp = getf(fdes)) == NULL) {
if (aiovlen != 0)
kmem_free(aiov, aiovlen);
return (set_errno(EBADF));
}
if (((fflag = fp->f_flag) & FWRITE) == 0) {
error = EBADF;
goto out;
}
vp = fp->f_vnode;
rwflag = 1;
if (vp->v_type == VREG) {
if (bcount == 0)
goto out;
if (fileoff >= OFFSET_MAX(fp)) {
error = EFBIG;
goto out;
}
if (fileoff >= curproc->p_fsz_ctl) {
mutex_enter(&curproc->p_lock);
(void) rctl_action(
rctlproc_legacy[RLIMIT_FSIZE],
curproc->p_rctls, curproc,
RCA_UNSAFE_SIGINFO);
mutex_exit(&curproc->p_lock);
error = EFBIG;
goto out;
}
ASSERT(bcount == count);
if ((fileoff + count) > OFFSET_MAX(fp))
count = (ssize_t)(OFFSET_MAX(fp) - fileoff);
} else if (vp->v_type == VFIFO) {
error = ESPIPE;
goto out;
}
if (nbl_need_check(vp)) {
int svmand;
nbl_start_crit(vp, RW_READER);
in_crit = 1;
error = nbl_svmand(vp, fp->f_cred, &svmand);
if (error != 0)
goto out;
if (nbl_conflict(vp, NBL_WRITE, fileoff, count, svmand, NULL)) {
error = EACCES;
goto out;
}
}
(void) VOP_RWLOCK(vp, rwflag, NULL);
auio.uio_loffset = fileoff;
auio.uio_iov = aiov;
auio.uio_iovcnt = iovcnt;
auio.uio_resid = bcount = count;
auio.uio_segflg = UIO_USERSPACE;
auio.uio_llimit = curproc->p_fsz_ctl;
auio.uio_fmode = fflag;
auio.uio_extflg = UIO_COPY_CACHED;
ioflag = auio.uio_fmode & (FSYNC|FDSYNC|FRSYNC);
error = VOP_WRITE(vp, &auio, ioflag, fp->f_cred, NULL);
count -= auio.uio_resid;
CPU_STATS_ENTER_K();
cp = CPU;
CPU_STATS_ADDQ(cp, sys, syswrite, 1);
CPU_STATS_ADDQ(cp, sys, writech, (ulong_t)count);
CPU_STATS_EXIT_K();
ttolwp(curthread)->lwp_ru.ioch += (ulong_t)count;
VOP_RWUNLOCK(vp, rwflag, NULL);
if (error == EINTR && count != 0)
error = 0;
out:
if (in_crit)
nbl_end_crit(vp);
releasef(fdes);
if (aiovlen != 0)
kmem_free(aiov, aiovlen);
if (error)
return (set_errno(error));
return (count);
}
#if defined(_SYSCALL32_IMPL) || defined(_ILP32)
ssize32_t
pread64(int fdes, void *cbuf, size32_t count, uint32_t offset_1,
uint32_t offset_2)
{
struct uio auio;
struct iovec aiov;
file_t *fp;
register vnode_t *vp;
struct cpu *cp;
int fflag, ioflag, rwflag;
ssize_t bcount;
int error = 0;
u_offset_t fileoff;
int in_crit = 0;
#if defined(_LITTLE_ENDIAN)
fileoff = ((u_offset_t)offset_2 << 32) | (u_offset_t)offset_1;
#else
fileoff = ((u_offset_t)offset_1 << 32) | (u_offset_t)offset_2;
#endif
if ((bcount = (ssize_t)count) < 0 || bcount > INT32_MAX)
return (set_errno(EINVAL));
if ((fp = getf(fdes)) == NULL)
return (set_errno(EBADF));
if (((fflag = fp->f_flag) & (FREAD)) == 0) {
error = EBADF;
goto out;
}
rwflag = 0;
vp = fp->f_vnode;
if (vp->v_type == VREG) {
if (bcount == 0)
goto out;
if (fileoff > MAXOFFSET_T) {
error = EINVAL;
goto out;
}
if (fileoff + bcount > MAXOFFSET_T)
bcount = (ssize_t)(MAXOFFSET_T - fileoff);
} else if (vp->v_type == VFIFO) {
error = ESPIPE;
goto out;
}
if (nbl_need_check(vp)) {
int svmand;
nbl_start_crit(vp, RW_READER);
in_crit = 1;
error = nbl_svmand(vp, fp->f_cred, &svmand);
if (error != 0)
goto out;
if (nbl_conflict(vp, NBL_READ, fileoff, bcount, svmand,
NULL)) {
error = EACCES;
goto out;
}
}
aiov.iov_base = cbuf;
aiov.iov_len = bcount;
(void) VOP_RWLOCK(vp, rwflag, NULL);
auio.uio_loffset = fileoff;
auio.uio_iov = &aiov;
auio.uio_iovcnt = 1;
auio.uio_resid = bcount;
auio.uio_segflg = UIO_USERSPACE;
auio.uio_llimit = MAXOFFSET_T;
auio.uio_fmode = fflag;
auio.uio_extflg = UIO_COPY_CACHED;
ioflag = auio.uio_fmode & (FAPPEND|FSYNC|FDSYNC|FRSYNC);
if ((ioflag & FRSYNC) == 0)
ioflag &= ~(FSYNC|FDSYNC);
error = VOP_READ(vp, &auio, ioflag, fp->f_cred, NULL);
bcount -= auio.uio_resid;
CPU_STATS_ENTER_K();
cp = CPU;
CPU_STATS_ADDQ(cp, sys, sysread, 1);
CPU_STATS_ADDQ(cp, sys, readch, (ulong_t)bcount);
CPU_STATS_EXIT_K();
ttolwp(curthread)->lwp_ru.ioch += (ulong_t)bcount;
VOP_RWUNLOCK(vp, rwflag, NULL);
if (error == EINTR && bcount != 0)
error = 0;
out:
if (in_crit)
nbl_end_crit(vp);
releasef(fdes);
if (error)
return (set_errno(error));
return (bcount);
}
ssize32_t
pwrite64(int fdes, void *cbuf, size32_t count, uint32_t offset_1,
uint32_t offset_2)
{
struct uio auio;
struct iovec aiov;
file_t *fp;
register vnode_t *vp;
struct cpu *cp;
int fflag, ioflag, rwflag;
ssize_t bcount;
int error = 0;
u_offset_t fileoff;
int in_crit = 0;
#if defined(_LITTLE_ENDIAN)
fileoff = ((u_offset_t)offset_2 << 32) | (u_offset_t)offset_1;
#else
fileoff = ((u_offset_t)offset_1 << 32) | (u_offset_t)offset_2;
#endif
if ((bcount = (ssize_t)count) < 0 || bcount > INT32_MAX)
return (set_errno(EINVAL));
if ((fp = getf(fdes)) == NULL)
return (set_errno(EBADF));
if (((fflag = fp->f_flag) & (FWRITE)) == 0) {
error = EBADF;
goto out;
}
rwflag = 1;
vp = fp->f_vnode;
if (vp->v_type == VREG) {
if (bcount == 0)
goto out;
if (fileoff > MAXOFFSET_T) {
error = EINVAL;
goto out;
}
if (fileoff >= curproc->p_fsz_ctl) {
mutex_enter(&curproc->p_lock);
(void) rctl_action(rctlproc_legacy[RLIMIT_FSIZE],
curproc->p_rctls, curproc, RCA_SAFE);
mutex_exit(&curproc->p_lock);
error = EFBIG;
goto out;
}
if (fileoff == MAXOFFSET_T) {
error = EFBIG;
goto out;
}
if (fileoff + bcount > MAXOFFSET_T)
bcount = (ssize_t)((u_offset_t)MAXOFFSET_T - fileoff);
} else if (vp->v_type == VFIFO) {
error = ESPIPE;
goto out;
}
if (nbl_need_check(vp)) {
int svmand;
nbl_start_crit(vp, RW_READER);
in_crit = 1;
error = nbl_svmand(vp, fp->f_cred, &svmand);
if (error != 0)
goto out;
if (nbl_conflict(vp, NBL_WRITE, fileoff, bcount, svmand,
NULL)) {
error = EACCES;
goto out;
}
}
aiov.iov_base = cbuf;
aiov.iov_len = bcount;
(void) VOP_RWLOCK(vp, rwflag, NULL);
auio.uio_loffset = fileoff;
auio.uio_iov = &aiov;
auio.uio_iovcnt = 1;
auio.uio_resid = bcount;
auio.uio_segflg = UIO_USERSPACE;
auio.uio_llimit = curproc->p_fsz_ctl;
auio.uio_fmode = fflag;
auio.uio_extflg = UIO_COPY_CACHED;
ioflag = auio.uio_fmode & (FSYNC|FDSYNC|FRSYNC);
error = VOP_WRITE(vp, &auio, ioflag, fp->f_cred, NULL);
bcount -= auio.uio_resid;
CPU_STATS_ENTER_K();
cp = CPU;
CPU_STATS_ADDQ(cp, sys, syswrite, 1);
CPU_STATS_ADDQ(cp, sys, writech, (ulong_t)bcount);
CPU_STATS_EXIT_K();
ttolwp(curthread)->lwp_ru.ioch += (ulong_t)bcount;
VOP_RWUNLOCK(vp, rwflag, NULL);
if (error == EINTR && bcount != 0)
error = 0;
out:
if (in_crit)
nbl_end_crit(vp);
releasef(fdes);
if (error)
return (set_errno(error));
return (bcount);
}
#endif
#ifdef _SYSCALL32_IMPL
ssize_t
read32(int32_t fdes, caddr32_t cbuf, size32_t count)
{
return (read(fdes,
(void *)(uintptr_t)cbuf, (ssize32_t)count));
}
ssize_t
write32(int32_t fdes, caddr32_t cbuf, size32_t count)
{
return (write(fdes,
(void *)(uintptr_t)cbuf, (ssize32_t)count));
}
ssize_t
pread32(int32_t fdes, caddr32_t cbuf, size32_t count, off32_t offset)
{
return (pread(fdes,
(void *)(uintptr_t)cbuf, (ssize32_t)count,
(off_t)(uint32_t)offset));
}
ssize_t
pwrite32(int32_t fdes, caddr32_t cbuf, size32_t count, off32_t offset)
{
return (pwrite(fdes,
(void *)(uintptr_t)cbuf, (ssize32_t)count,
(off_t)(uint32_t)offset));
}
ssize_t
readv32(int32_t fdes, caddr32_t iovp, int32_t iovcnt)
{
return (readv(fdes, (void *)(uintptr_t)iovp, iovcnt));
}
ssize_t
writev32(int32_t fdes, caddr32_t iovp, int32_t iovcnt)
{
return (writev(fdes, (void *)(uintptr_t)iovp, iovcnt));
}
#endif