#include "lint.h"
#include "thr_uberdata.h"
#include "libc.h"
#include "asyncio.h"
#include <atomic.h>
#include <sys/param.h>
#include <sys/file.h>
#include <sys/port.h>
static int _aio_hash_insert(aio_result_t *, aio_req_t *);
static aio_req_t *_aio_req_get(aio_worker_t *);
static void _aio_req_add(aio_req_t *, aio_worker_t **, int);
static void _aio_req_del(aio_worker_t *, aio_req_t *, int);
static void _aio_work_done(aio_worker_t *);
static void _aio_enq_doneq(aio_req_t *);
extern void _aio_lio_free(aio_lio_t *);
extern int __fcntl(int, int, ...);
extern int _port_dispatch(int, int, int, int, uintptr_t, void *);
static int _aio_fsync_del(aio_worker_t *, aio_req_t *);
static void _aiodone(aio_req_t *, ssize_t, int);
static void _aio_cancel_work(aio_worker_t *, int, int *, int *);
static void _aio_finish_request(aio_worker_t *, ssize_t, int);
int _kaio_ok = 0;
pthread_key_t _aio_key;
uint32_t *_kaio_supported = NULL;
aio_worker_t *__workers_rw;
aio_worker_t *__nextworker_rw;
int __rw_workerscnt;
aio_worker_t *__workers_no;
aio_worker_t *__nextworker_no;
int __no_workerscnt;
aio_req_t *_aio_done_tail;
aio_req_t *_aio_done_head;
mutex_t __aio_initlock = DEFAULTMUTEX;
cond_t __aio_initcv = DEFAULTCV;
int __aio_initbusy = 0;
mutex_t __aio_mutex = DEFAULTMUTEX;
cond_t _aio_iowait_cv = DEFAULTCV;
pid_t __pid = (pid_t)-1;
int _sigio_enabled = 0;
aio_hash_t *_aio_hash;
aio_req_t *_aio_doneq;
int _aio_donecnt = 0;
int _aio_waitncnt = 0;
int _aio_doneq_cnt = 0;
int _aio_outstand_cnt = 0;
int _kaio_outstand_cnt = 0;
int _aio_req_done_cnt = 0;
int _aio_kernel_suspend = 0;
int _aio_suscv_cnt = 0;
int _max_workers = 256;
int _min_workers = 4;
int _minworkload = 2;
int _aio_worker_cnt = 0;
int __uaio_ok = 0;
sigset_t _worker_set;
int _aiowait_flag = 0;
int _aio_flags = 0;
aio_worker_t *_kaiowp = NULL;
int hz;
static int
_kaio_supported_init(void)
{
void *ptr;
size_t size;
if (_kaio_supported != NULL)
return (0);
size = MAX_KAIO_FDARRAY_SIZE * sizeof (uint32_t);
ptr = mmap(NULL, size, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANON, -1, (off_t)0);
if (ptr == MAP_FAILED)
return (-1);
_kaio_supported = ptr;
return (0);
}
int
__uaio_init(void)
{
int ret = -1;
int i;
int cancel_state;
lmutex_lock(&__aio_initlock);
(void) pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &cancel_state);
while (__aio_initbusy)
(void) cond_wait(&__aio_initcv, &__aio_initlock);
(void) pthread_setcancelstate(cancel_state, NULL);
if (__uaio_ok) {
lmutex_unlock(&__aio_initlock);
return (0);
}
__aio_initbusy = 1;
lmutex_unlock(&__aio_initlock);
hz = (int)sysconf(_SC_CLK_TCK);
__pid = getpid();
setup_cancelsig(SIGAIOCANCEL);
if (_kaio_supported_init() != 0)
goto out;
if (_aio_hash == NULL) {
_aio_hash = (aio_hash_t *)mmap(NULL,
HASHSZ * sizeof (aio_hash_t), PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANON, -1, (off_t)0);
if ((void *)_aio_hash == MAP_FAILED) {
_aio_hash = NULL;
goto out;
}
for (i = 0; i < HASHSZ; i++)
(void) mutex_init(&_aio_hash[i].hash_lock,
USYNC_THREAD, NULL);
}
(void) sigfillset(&_worker_set);
(void) sigdelset(&_worker_set, SIGAIOCANCEL);
if (__no_workerscnt == 0 &&
(_aio_create_worker(NULL, AIONOTIFY) != 0)) {
errno = EAGAIN;
goto out;
}
for (i = 0; i < _min_workers; i++)
(void) _aio_create_worker(NULL, AIOREAD);
if (__rw_workerscnt == 0) {
errno = EAGAIN;
goto out;
}
ret = 0;
out:
lmutex_lock(&__aio_initlock);
if (ret == 0)
__uaio_ok = 1;
__aio_initbusy = 0;
(void) cond_broadcast(&__aio_initcv);
lmutex_unlock(&__aio_initlock);
return (ret);
}
void
_aio_close(int fd)
{
if (fd < 0)
return;
if (__uaio_ok)
(void) aiocancel_all(fd);
if (_kaio_supported != NULL)
CLEAR_KAIO_SUPPORTED(fd);
}
void *
_kaio_cleanup_thread(void *arg)
{
if (pthread_setspecific(_aio_key, arg) != 0)
aio_panic("_kaio_cleanup_thread, pthread_setspecific()");
(void) _kaio(AIOSTART);
return (arg);
}
void
_kaio_init()
{
int error;
sigset_t oset;
int cancel_state;
lmutex_lock(&__aio_initlock);
(void) pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &cancel_state);
while (__aio_initbusy)
(void) cond_wait(&__aio_initcv, &__aio_initlock);
(void) pthread_setcancelstate(cancel_state, NULL);
if (_kaio_ok) {
lmutex_unlock(&__aio_initlock);
return;
}
__aio_initbusy = 1;
lmutex_unlock(&__aio_initlock);
if (_kaio_supported_init() != 0)
error = ENOMEM;
else if ((_kaiowp = _aio_worker_alloc()) == NULL)
error = ENOMEM;
else if ((error = (int)_kaio(AIOINIT)) == 0) {
(void) pthread_sigmask(SIG_SETMASK, &maskset, &oset);
error = thr_create(NULL, AIOSTKSIZE, _kaio_cleanup_thread,
_kaiowp, THR_DAEMON, &_kaiowp->work_tid);
(void) pthread_sigmask(SIG_SETMASK, &oset, NULL);
}
if (error && _kaiowp != NULL) {
_aio_worker_free(_kaiowp);
_kaiowp = NULL;
}
lmutex_lock(&__aio_initlock);
if (error)
_kaio_ok = -1;
else
_kaio_ok = 1;
__aio_initbusy = 0;
(void) cond_broadcast(&__aio_initcv);
lmutex_unlock(&__aio_initlock);
}
int
aioread(int fd, caddr_t buf, int bufsz, off_t offset, int whence,
aio_result_t *resultp)
{
return (_aiorw(fd, buf, bufsz, offset, whence, resultp, AIOREAD));
}
int
aiowrite(int fd, caddr_t buf, int bufsz, off_t offset, int whence,
aio_result_t *resultp)
{
return (_aiorw(fd, buf, bufsz, offset, whence, resultp, AIOWRITE));
}
#if !defined(_LP64)
int
aioread64(int fd, caddr_t buf, int bufsz, off64_t offset, int whence,
aio_result_t *resultp)
{
return (_aiorw(fd, buf, bufsz, offset, whence, resultp, AIOAREAD64));
}
int
aiowrite64(int fd, caddr_t buf, int bufsz, off64_t offset, int whence,
aio_result_t *resultp)
{
return (_aiorw(fd, buf, bufsz, offset, whence, resultp, AIOAWRITE64));
}
#endif
int
_aiorw(int fd, caddr_t buf, int bufsz, offset_t offset, int whence,
aio_result_t *resultp, int mode)
{
aio_req_t *reqp;
aio_args_t *ap;
offset_t loffset;
struct stat64 stat64;
int error = 0;
int kerr;
int umode;
switch (whence) {
case SEEK_SET:
loffset = offset;
break;
case SEEK_CUR:
if ((loffset = llseek(fd, 0, SEEK_CUR)) == -1)
error = -1;
else
loffset += offset;
break;
case SEEK_END:
if (fstat64(fd, &stat64) == -1)
error = -1;
else
loffset = offset + stat64.st_size;
break;
default:
errno = EINVAL;
error = -1;
}
if (error)
return (error);
if (!_kaio_ok)
_kaio_init();
if (mode == AIOAREAD64 || mode == AIOAWRITE64)
umode = mode - AIOAREAD64;
else
umode = mode;
if (_kaio_ok > 0 && KAIO_SUPPORTED(fd)) {
resultp->aio_errno = 0;
sig_mutex_lock(&__aio_mutex);
_kaio_outstand_cnt++;
sig_mutex_unlock(&__aio_mutex);
kerr = (int)_kaio(((resultp->aio_return == AIO_INPROGRESS) ?
(umode | AIO_POLL_BIT) : umode),
fd, buf, bufsz, loffset, resultp);
if (kerr == 0) {
return (0);
}
sig_mutex_lock(&__aio_mutex);
_kaio_outstand_cnt--;
sig_mutex_unlock(&__aio_mutex);
if (errno != ENOTSUP && errno != EBADFD)
return (-1);
if (errno == EBADFD)
SET_KAIO_NOT_SUPPORTED(fd);
}
if (!__uaio_ok && __uaio_init() == -1)
return (-1);
if ((reqp = _aio_req_alloc()) == NULL) {
errno = EAGAIN;
return (-1);
}
reqp->req_op = mode;
reqp->req_resultp = resultp;
ap = &reqp->req_args;
ap->fd = fd;
ap->buf = buf;
ap->bufsz = bufsz;
ap->offset = loffset;
if (_aio_hash_insert(resultp, reqp) != 0) {
_aio_req_free(reqp);
errno = EINVAL;
return (-1);
}
_aio_req_add(reqp, &__nextworker_rw, umode);
return (0);
}
int
aiocancel(aio_result_t *resultp)
{
aio_req_t *reqp;
aio_worker_t *aiowp;
int ret;
int done = 0;
int canceled = 0;
if (!__uaio_ok) {
errno = EINVAL;
return (-1);
}
sig_mutex_lock(&__aio_mutex);
reqp = _aio_hash_find(resultp);
if (reqp == NULL) {
if (_aio_outstand_cnt == _aio_req_done_cnt)
errno = EINVAL;
else
errno = EACCES;
ret = -1;
} else {
aiowp = reqp->req_worker;
sig_mutex_lock(&aiowp->work_qlock1);
(void) _aio_cancel_req(aiowp, reqp, &canceled, &done);
sig_mutex_unlock(&aiowp->work_qlock1);
if (canceled) {
ret = 0;
} else {
if (_aio_outstand_cnt == 0 ||
_aio_outstand_cnt == _aio_req_done_cnt)
errno = EINVAL;
else
errno = EACCES;
ret = -1;
}
}
sig_mutex_unlock(&__aio_mutex);
return (ret);
}
static void
_aiowait_cleanup(void *arg __unused)
{
sig_mutex_lock(&__aio_mutex);
_aiowait_flag--;
sig_mutex_unlock(&__aio_mutex);
}
aio_result_t *
aiowait(struct timeval *uwait)
{
aio_result_t *uresultp;
aio_result_t *kresultp;
aio_result_t *resultp;
int dontblock;
int timedwait = 0;
int kaio_errno = 0;
struct timeval twait;
struct timeval *wait = NULL;
hrtime_t hrtend;
hrtime_t hres;
if (uwait) {
if (uwait->tv_sec < 0 || uwait->tv_usec < 0 ||
uwait->tv_usec >= MICROSEC) {
errno = EINVAL;
return ((aio_result_t *)-1);
}
if (uwait->tv_sec > 0 || uwait->tv_usec > 0) {
hrtend = gethrtime() +
(hrtime_t)uwait->tv_sec * NANOSEC +
(hrtime_t)uwait->tv_usec * (NANOSEC / MICROSEC);
twait = *uwait;
wait = &twait;
timedwait++;
} else {
sig_mutex_lock(&__aio_mutex);
if (_kaio_outstand_cnt == 0) {
kresultp = (aio_result_t *)-1;
} else {
kresultp = (aio_result_t *)_kaio(AIOWAIT,
(struct timeval *)-1, 1);
if (kresultp != (aio_result_t *)-1 &&
kresultp != NULL &&
kresultp != (aio_result_t *)1) {
_kaio_outstand_cnt--;
sig_mutex_unlock(&__aio_mutex);
return (kresultp);
}
}
uresultp = _aio_req_done();
sig_mutex_unlock(&__aio_mutex);
if (uresultp != NULL &&
uresultp != (aio_result_t *)-1) {
return (uresultp);
}
if (uresultp == (aio_result_t *)-1 &&
kresultp == (aio_result_t *)-1) {
errno = EINVAL;
return ((aio_result_t *)-1);
} else {
return (NULL);
}
}
}
for (;;) {
sig_mutex_lock(&__aio_mutex);
uresultp = _aio_req_done();
if (uresultp != NULL && uresultp != (aio_result_t *)-1) {
sig_mutex_unlock(&__aio_mutex);
resultp = uresultp;
break;
}
_aiowait_flag++;
dontblock = (uresultp == (aio_result_t *)-1);
if (dontblock && _kaio_outstand_cnt == 0) {
kresultp = (aio_result_t *)-1;
kaio_errno = EINVAL;
} else {
sig_mutex_unlock(&__aio_mutex);
pthread_cleanup_push(_aiowait_cleanup, NULL);
_cancel_prologue();
kresultp = (aio_result_t *)_kaio(AIOWAIT,
wait, dontblock);
_cancel_epilogue();
pthread_cleanup_pop(0);
sig_mutex_lock(&__aio_mutex);
kaio_errno = errno;
}
_aiowait_flag--;
sig_mutex_unlock(&__aio_mutex);
if (kresultp == (aio_result_t *)1) {
continue;
} else if (kresultp != NULL &&
kresultp != (aio_result_t *)-1) {
resultp = kresultp;
sig_mutex_lock(&__aio_mutex);
_kaio_outstand_cnt--;
sig_mutex_unlock(&__aio_mutex);
break;
} else if (kresultp == (aio_result_t *)-1 &&
kaio_errno == EINVAL &&
uresultp == (aio_result_t *)-1) {
errno = kaio_errno;
resultp = (aio_result_t *)-1;
break;
} else if (kresultp == (aio_result_t *)-1 &&
kaio_errno == EINTR) {
errno = kaio_errno;
resultp = (aio_result_t *)-1;
break;
} else if (timedwait) {
hres = hrtend - gethrtime();
if (hres <= 0) {
resultp = NULL;
break;
} else {
hres += (NANOSEC / MICROSEC) - 1;
wait->tv_sec = hres / NANOSEC;
wait->tv_usec =
(hres % NANOSEC) / (NANOSEC / MICROSEC);
}
} else {
ASSERT(kresultp == NULL && uresultp == NULL);
resultp = NULL;
continue;
}
}
return (resultp);
}
int
_aio_get_timedelta(timespec_t *end, timespec_t *wait)
{
int ret = 0;
struct timeval cur;
timespec_t curtime;
(void) gettimeofday(&cur, NULL);
curtime.tv_sec = cur.tv_sec;
curtime.tv_nsec = cur.tv_usec * 1000;
if (end->tv_sec >= curtime.tv_sec) {
wait->tv_sec = end->tv_sec - curtime.tv_sec;
if (end->tv_nsec >= curtime.tv_nsec) {
wait->tv_nsec = end->tv_nsec - curtime.tv_nsec;
if (wait->tv_sec == 0 && wait->tv_nsec == 0)
ret = -1;
} else {
if (end->tv_sec > curtime.tv_sec) {
wait->tv_sec -= 1;
wait->tv_nsec = NANOSEC -
(curtime.tv_nsec - end->tv_nsec);
} else {
ret = -1;
}
}
} else {
ret = -1;
}
return (ret);
}
int
aiocancel_all(int fd)
{
aio_req_t *reqp;
aio_req_t **reqpp, *last;
aio_worker_t *first;
aio_worker_t *next;
int canceled = 0;
int done = 0;
int cancelall = 0;
sig_mutex_lock(&__aio_mutex);
if (_aio_outstand_cnt == 0) {
sig_mutex_unlock(&__aio_mutex);
return (AIO_ALLDONE);
}
first = __nextworker_rw;
next = first;
do {
_aio_cancel_work(next, fd, &canceled, &done);
} while ((next = next->work_forw) != first);
if (fd < 0)
cancelall = 1;
reqpp = &_aio_done_tail;
last = _aio_done_tail;
while ((reqp = *reqpp) != NULL) {
if (cancelall || reqp->req_args.fd == fd) {
*reqpp = reqp->req_next;
if (last == reqp) {
last = reqp->req_next;
}
if (_aio_done_head == reqp) {
_aio_done_head = last;
}
_aio_donecnt--;
_aio_set_result(reqp, -1, ECANCELED);
(void) _aio_hash_del(reqp->req_resultp);
_aio_req_free(reqp);
} else {
reqpp = &reqp->req_next;
last = reqp;
}
}
if (cancelall) {
ASSERT(_aio_donecnt == 0);
_aio_done_head = NULL;
}
sig_mutex_unlock(&__aio_mutex);
if (canceled && done == 0)
return (AIO_CANCELED);
else if (done && canceled == 0)
return (AIO_ALLDONE);
else if ((canceled + done == 0) && KAIO_SUPPORTED(fd))
return ((int)_kaio(AIOCANCEL, fd, NULL));
return (AIO_NOTCANCELED);
}
static void
_aio_cancel_work(aio_worker_t *aiowp, int fd, int *canceled, int *done)
{
aio_req_t *reqp;
sig_mutex_lock(&aiowp->work_qlock1);
reqp = aiowp->work_tail1;
while (reqp != NULL) {
if (fd < 0 || reqp->req_args.fd == fd) {
if (_aio_cancel_req(aiowp, reqp, canceled, done)) {
reqp = aiowp->work_tail1;
continue;
}
}
reqp = reqp->req_next;
}
if ((reqp = aiowp->work_req) != NULL &&
(fd < 0 || reqp->req_args.fd == fd))
(void) _aio_cancel_req(aiowp, reqp, canceled, done);
sig_mutex_unlock(&aiowp->work_qlock1);
}
int
_aio_cancel_req(aio_worker_t *aiowp, aio_req_t *reqp, int *canceled, int *done)
{
int ostate = reqp->req_state;
ASSERT(MUTEX_HELD(&__aio_mutex));
ASSERT(MUTEX_HELD(&aiowp->work_qlock1));
if (ostate == AIO_REQ_CANCELED)
return (0);
if (ostate == AIO_REQ_DONE && !POSIX_AIO(reqp) &&
aiowp->work_prev1 == reqp) {
ASSERT(aiowp->work_done1 != 0);
_aio_set_result(reqp, -1, ECANCELED);
(void) _aio_hash_del(reqp->req_resultp);
reqp->req_state = AIO_REQ_CANCELED;
(*canceled)++;
return (0);
}
if (ostate == AIO_REQ_DONE || ostate == AIO_REQ_DONEQ) {
(*done)++;
return (0);
}
if (reqp->req_op == AIOFSYNC && reqp != aiowp->work_req) {
ASSERT(POSIX_AIO(reqp));
if (!reqp->req_head->lio_canned) {
reqp->req_head->lio_canned = 1;
_aio_outstand_cnt--;
(*canceled)++;
}
return (0);
}
reqp->req_state = AIO_REQ_CANCELED;
_aio_req_del(aiowp, reqp, ostate);
(void) _aio_hash_del(reqp->req_resultp);
(*canceled)++;
if (reqp == aiowp->work_req) {
ASSERT(ostate == AIO_REQ_INPROGRESS);
_aio_set_result(reqp, -1, ECANCELED);
(void) thr_kill(aiowp->work_tid, SIGAIOCANCEL);
return (0);
}
if (!POSIX_AIO(reqp)) {
_aio_outstand_cnt--;
_aio_set_result(reqp, -1, ECANCELED);
_aio_req_free(reqp);
return (0);
}
sig_mutex_unlock(&aiowp->work_qlock1);
sig_mutex_unlock(&__aio_mutex);
_aiodone(reqp, -1, ECANCELED);
sig_mutex_lock(&__aio_mutex);
sig_mutex_lock(&aiowp->work_qlock1);
return (1);
}
int
_aio_create_worker(aio_req_t *reqp, int mode)
{
aio_worker_t *aiowp, **workers, **nextworker;
int *aio_workerscnt;
void *(*func)(void *);
sigset_t oset;
int error;
switch (mode) {
case AIOREAD:
case AIOWRITE:
case AIOAREAD:
case AIOAWRITE:
#if !defined(_LP64)
case AIOAREAD64:
case AIOAWRITE64:
#endif
workers = &__workers_rw;
nextworker = &__nextworker_rw;
aio_workerscnt = &__rw_workerscnt;
func = _aio_do_request;
break;
case AIONOTIFY:
workers = &__workers_no;
nextworker = &__nextworker_no;
func = _aio_do_notify;
aio_workerscnt = &__no_workerscnt;
break;
default:
aio_panic("_aio_create_worker: invalid mode");
break;
}
if ((aiowp = _aio_worker_alloc()) == NULL)
return (-1);
if (reqp) {
reqp->req_state = AIO_REQ_QUEUED;
reqp->req_worker = aiowp;
aiowp->work_head1 = reqp;
aiowp->work_tail1 = reqp;
aiowp->work_next1 = reqp;
aiowp->work_count1 = 1;
aiowp->work_minload1 = 1;
}
(void) pthread_sigmask(SIG_SETMASK, &maskset, &oset);
error = thr_create(NULL, AIOSTKSIZE, func, aiowp,
THR_DAEMON | THR_SUSPENDED, &aiowp->work_tid);
(void) pthread_sigmask(SIG_SETMASK, &oset, NULL);
if (error) {
if (reqp) {
reqp->req_state = 0;
reqp->req_worker = NULL;
}
_aio_worker_free(aiowp);
return (-1);
}
lmutex_lock(&__aio_mutex);
(*aio_workerscnt)++;
if (*workers == NULL) {
aiowp->work_forw = aiowp;
aiowp->work_backw = aiowp;
*nextworker = aiowp;
*workers = aiowp;
} else {
aiowp->work_backw = (*workers)->work_backw;
aiowp->work_forw = (*workers);
(*workers)->work_backw->work_forw = aiowp;
(*workers)->work_backw = aiowp;
}
_aio_worker_cnt++;
lmutex_unlock(&__aio_mutex);
(void) thr_continue(aiowp->work_tid);
return (0);
}
void *
_aio_do_request(void *arglist)
{
aio_worker_t *aiowp = (aio_worker_t *)arglist;
ulwp_t *self = curthread;
struct aio_args *arg;
aio_req_t *reqp;
ssize_t retval;
int append;
int error;
if (pthread_setspecific(_aio_key, aiowp) != 0)
aio_panic("_aio_do_request, pthread_setspecific()");
(void) pthread_sigmask(SIG_SETMASK, &_worker_set, NULL);
ASSERT(aiowp->work_req == NULL);
(void) sigsetjmp(aiowp->work_jmp_buf, 0);
ASSERT(self->ul_sigdefer == 0);
sigoff(self);
if (aiowp->work_req != NULL)
_aio_finish_request(aiowp, -1, ECANCELED);
for (;;) {
if (aiowp->work_done1)
_aio_work_done(aiowp);
top:
sigon(self);
sigoff(self);
while ((reqp = _aio_req_get(aiowp)) == NULL) {
if (_aio_idle(aiowp) != 0)
goto top;
}
arg = &reqp->req_args;
ASSERT(reqp->req_state == AIO_REQ_INPROGRESS ||
reqp->req_state == AIO_REQ_CANCELED);
error = 0;
switch (reqp->req_op) {
case AIOREAD:
case AIOAREAD:
sigon(self);
retval = pread(arg->fd, arg->buf,
arg->bufsz, arg->offset);
if (retval == -1) {
if (errno == ESPIPE) {
retval = read(arg->fd,
arg->buf, arg->bufsz);
if (retval == -1)
error = errno;
} else {
error = errno;
}
}
sigoff(self);
break;
case AIOWRITE:
case AIOAWRITE:
append = (__fcntl(arg->fd, F_GETFL) & O_APPEND);
sigon(self);
retval = append?
write(arg->fd, arg->buf, arg->bufsz) :
pwrite(arg->fd, arg->buf, arg->bufsz,
arg->offset);
if (retval == -1) {
if (errno == ESPIPE) {
retval = write(arg->fd,
arg->buf, arg->bufsz);
if (retval == -1)
error = errno;
} else {
error = errno;
}
}
sigoff(self);
break;
#if !defined(_LP64)
case AIOAREAD64:
sigon(self);
retval = pread64(arg->fd, arg->buf,
arg->bufsz, arg->offset);
if (retval == -1) {
if (errno == ESPIPE) {
retval = read(arg->fd,
arg->buf, arg->bufsz);
if (retval == -1)
error = errno;
} else {
error = errno;
}
}
sigoff(self);
break;
case AIOAWRITE64:
append = (__fcntl(arg->fd, F_GETFL) & O_APPEND);
sigon(self);
retval = append?
write(arg->fd, arg->buf, arg->bufsz) :
pwrite64(arg->fd, arg->buf, arg->bufsz,
arg->offset);
if (retval == -1) {
if (errno == ESPIPE) {
retval = write(arg->fd,
arg->buf, arg->bufsz);
if (retval == -1)
error = errno;
} else {
error = errno;
}
}
sigoff(self);
break;
#endif
case AIOFSYNC:
if (_aio_fsync_del(aiowp, reqp))
goto top;
ASSERT(reqp->req_head == NULL);
if (reqp->req_state == AIO_REQ_CANCELED) {
;
} else if (arg->offset == O_SYNC) {
if ((retval = __fdsync(arg->fd, FDSYNC_FILE)) ==
-1) {
error = errno;
}
} else {
if ((retval = __fdsync(arg->fd, FDSYNC_DATA)) ==
-1) {
error = errno;
}
}
if (_aio_hash_insert(reqp->req_resultp, reqp) != 0)
aio_panic("_aio_do_request(): AIOFSYNC: "
"request already in hash table");
break;
default:
aio_panic("_aio_do_request, bad op");
}
_aio_finish_request(aiowp, retval, error);
}
return (NULL);
}
static void
_aio_finish_request(aio_worker_t *aiowp, ssize_t retval, int error)
{
aio_req_t *reqp;
sig_mutex_lock(&aiowp->work_qlock1);
if ((reqp = aiowp->work_req) == NULL)
sig_mutex_unlock(&aiowp->work_qlock1);
else {
aiowp->work_req = NULL;
if (reqp->req_state == AIO_REQ_CANCELED) {
retval = -1;
error = ECANCELED;
}
if (!POSIX_AIO(reqp)) {
int notify;
if (reqp->req_state == AIO_REQ_INPROGRESS) {
reqp->req_state = AIO_REQ_DONE;
_aio_set_result(reqp, retval, error);
}
sig_mutex_unlock(&aiowp->work_qlock1);
sig_mutex_lock(&__aio_mutex);
if (error == ECANCELED) {
_aio_outstand_cnt--;
_aio_req_free(reqp);
} else {
_aio_req_done_cnt++;
}
notify = 0;
if (_aio_outstand_cnt == 0 && _aiowait_flag) {
notify = 1;
}
sig_mutex_unlock(&__aio_mutex);
if (notify) {
(void) _kaio(AIONOTIFY);
}
} else {
if (reqp->req_state == AIO_REQ_INPROGRESS)
reqp->req_state = AIO_REQ_DONE;
sig_mutex_unlock(&aiowp->work_qlock1);
_aiodone(reqp, retval, error);
}
}
}
void
_aio_req_mark_done(aio_req_t *reqp)
{
#if !defined(_LP64)
if (reqp->req_largefile)
((aiocb64_t *)reqp->req_aiocbp)->aio_state = USERAIO_DONE;
else
#endif
((aiocb_t *)reqp->req_aiocbp)->aio_state = USERAIO_DONE;
}
static void
_aio_delay(int ticks)
{
(void) usleep(ticks * (MICROSEC / hz));
}
static void
send_notification(notif_param_t *npp)
{
extern int __sigqueue(pid_t pid, int signo,
void *value, int si_code, int block);
if (npp->np_signo)
(void) __sigqueue(__pid, npp->np_signo, npp->np_user,
SI_ASYNCIO, 1);
else if (npp->np_port >= 0)
(void) _port_dispatch(npp->np_port, 0, PORT_SOURCE_AIO,
npp->np_event, npp->np_object, npp->np_user);
if (npp->np_lio_signo)
(void) __sigqueue(__pid, npp->np_lio_signo, npp->np_lio_user,
SI_ASYNCIO, 1);
else if (npp->np_lio_port >= 0)
(void) _port_dispatch(npp->np_lio_port, 0, PORT_SOURCE_AIO,
npp->np_lio_event, npp->np_lio_object, npp->np_lio_user);
}
void *
_aio_do_notify(void *arg)
{
aio_worker_t *aiowp = (aio_worker_t *)arg;
aio_req_t *reqp;
if (pthread_setspecific(_aio_key, aiowp) != 0)
aio_panic("_aio_do_notify, pthread_setspecific()");
for (;;) {
while ((reqp = _aio_req_get(aiowp)) == NULL) {
if (_aio_idle(aiowp) != 0)
aio_panic("_aio_do_notify: _aio_idle() failed");
}
send_notification(&reqp->req_notify);
_aio_req_free(reqp);
}
return (NULL);
}
static void
_aiodone(aio_req_t *reqp, ssize_t retval, int error)
{
aio_result_t *resultp = reqp->req_resultp;
int notify = 0;
aio_lio_t *head;
int sigev_none;
int sigev_signal;
int sigev_thread;
int sigev_port;
notif_param_t np;
ASSERT(POSIX_AIO(reqp));
sigev_none = 0;
sigev_signal = 0;
sigev_thread = 0;
sigev_port = 0;
np.np_signo = 0;
np.np_port = -1;
np.np_lio_signo = 0;
np.np_lio_port = -1;
switch (reqp->req_sigevent.sigev_notify) {
case SIGEV_NONE:
sigev_none = 1;
break;
case SIGEV_SIGNAL:
sigev_signal = 1;
break;
case SIGEV_THREAD:
sigev_thread = 1;
break;
case SIGEV_PORT:
sigev_port = 1;
break;
default:
aio_panic("_aiodone: improper sigev_notify");
break;
}
sig_mutex_lock(&__aio_mutex);
if (sigev_signal) {
if ((np.np_signo = reqp->req_sigevent.sigev_signo) != 0)
notify = 1;
np.np_user = reqp->req_sigevent.sigev_value.sival_ptr;
} else if (sigev_thread | sigev_port) {
if ((np.np_port = reqp->req_sigevent.sigev_signo) >= 0)
notify = 1;
np.np_event = reqp->req_op;
if (np.np_event == AIOFSYNC && reqp->req_largefile)
np.np_event = AIOFSYNC64;
np.np_object = (uintptr_t)reqp->req_aiocbp;
np.np_user = reqp->req_sigevent.sigev_value.sival_ptr;
}
if (resultp->aio_errno == EINPROGRESS)
_aio_set_result(reqp, retval, error);
_aio_outstand_cnt--;
head = reqp->req_head;
reqp->req_head = NULL;
if (sigev_none) {
_aio_enq_doneq(reqp);
reqp = NULL;
} else {
(void) _aio_hash_del(resultp);
_aio_req_mark_done(reqp);
}
_aio_waitn_wakeup();
if ((_aio_flags & AIO_WAIT_INPROGRESS) || _aio_kernel_suspend > 0)
(void) _kaio(AIONOTIFY);
sig_mutex_unlock(&__aio_mutex);
if (head != NULL) {
sig_mutex_lock(&head->lio_mutex);
ASSERT(head->lio_refcnt == head->lio_nent);
if (head->lio_refcnt == 1) {
int waiting = 0;
if (head->lio_mode == LIO_WAIT) {
if ((waiting = head->lio_waiting) != 0)
(void) cond_signal(&head->lio_cond_cv);
} else if (head->lio_port < 0) {
if ((np.np_lio_signo = head->lio_signo) != 0)
notify = 1;
np.np_lio_user = head->lio_sigval.sival_ptr;
} else {
notify = 1;
np.np_lio_port = head->lio_port;
np.np_lio_event = head->lio_event;
np.np_lio_object =
(uintptr_t)head->lio_sigevent;
np.np_lio_user = head->lio_sigval.sival_ptr;
}
head->lio_nent = head->lio_refcnt = 0;
sig_mutex_unlock(&head->lio_mutex);
if (waiting == 0)
_aio_lio_free(head);
} else {
head->lio_nent--;
head->lio_refcnt--;
sig_mutex_unlock(&head->lio_mutex);
}
}
if (notify) {
if (reqp != NULL) {
reqp->req_notify = np;
reqp->req_op = AIONOTIFY;
_aio_req_add(reqp, &__workers_no, AIONOTIFY);
reqp = NULL;
} else {
send_notification(&np);
}
}
if (reqp != NULL)
_aio_req_free(reqp);
}
static int
_aio_fsync_del(aio_worker_t *aiowp, aio_req_t *reqp)
{
aio_lio_t *head = reqp->req_head;
int rval = 0;
ASSERT(reqp == aiowp->work_req);
sig_mutex_lock(&aiowp->work_qlock1);
sig_mutex_lock(&head->lio_mutex);
if (head->lio_refcnt > 1) {
head->lio_refcnt--;
head->lio_nent--;
aiowp->work_req = NULL;
sig_mutex_unlock(&head->lio_mutex);
sig_mutex_unlock(&aiowp->work_qlock1);
sig_mutex_lock(&__aio_mutex);
_aio_outstand_cnt--;
_aio_waitn_wakeup();
sig_mutex_unlock(&__aio_mutex);
_aio_req_free(reqp);
return (1);
}
ASSERT(head->lio_nent == 1 && head->lio_refcnt == 1);
reqp->req_head = NULL;
if (head->lio_canned)
reqp->req_state = AIO_REQ_CANCELED;
if (head->lio_mode == LIO_DESTROY) {
aiowp->work_req = NULL;
rval = 1;
}
sig_mutex_unlock(&head->lio_mutex);
sig_mutex_unlock(&aiowp->work_qlock1);
head->lio_refcnt--;
head->lio_nent--;
_aio_lio_free(head);
if (rval != 0)
_aio_req_free(reqp);
return (rval);
}
int
_aio_idle(aio_worker_t *aiowp)
{
int error = 0;
sig_mutex_lock(&aiowp->work_qlock1);
if (aiowp->work_count1 == 0) {
ASSERT(aiowp->work_minload1 == 0);
aiowp->work_idleflg = 1;
error = sig_cond_wait(&aiowp->work_idle_cv,
&aiowp->work_qlock1);
if (error)
aiowp->work_idleflg = 0;
}
sig_mutex_unlock(&aiowp->work_qlock1);
return (error);
}
static void
_aio_work_done(aio_worker_t *aiowp)
{
aio_req_t *reqp;
sig_mutex_lock(&__aio_mutex);
sig_mutex_lock(&aiowp->work_qlock1);
reqp = aiowp->work_prev1;
reqp->req_next = NULL;
aiowp->work_done1 = 0;
aiowp->work_tail1 = aiowp->work_next1;
if (aiowp->work_tail1 == NULL)
aiowp->work_head1 = NULL;
aiowp->work_prev1 = NULL;
_aio_outstand_cnt--;
_aio_req_done_cnt--;
if (reqp->req_state == AIO_REQ_CANCELED) {
sig_mutex_unlock(&aiowp->work_qlock1);
_aio_req_free(reqp);
if (_aio_outstand_cnt == 0 && _aiowait_flag) {
sig_mutex_unlock(&__aio_mutex);
(void) _kaio(AIONOTIFY);
} else {
sig_mutex_unlock(&__aio_mutex);
}
return;
}
sig_mutex_unlock(&aiowp->work_qlock1);
_aio_donecnt++;
ASSERT(_aio_donecnt > 0 &&
_aio_outstand_cnt >= 0 &&
_aio_req_done_cnt >= 0);
ASSERT(reqp != NULL);
if (_aio_done_tail == NULL) {
_aio_done_head = _aio_done_tail = reqp;
} else {
_aio_done_head->req_next = reqp;
_aio_done_head = reqp;
}
if (_aiowait_flag) {
sig_mutex_unlock(&__aio_mutex);
(void) _kaio(AIONOTIFY);
} else {
sig_mutex_unlock(&__aio_mutex);
if (_sigio_enabled)
(void) kill(__pid, SIGIO);
}
}
aio_result_t *
_aio_req_done(void)
{
aio_req_t *reqp;
aio_result_t *resultp;
ASSERT(MUTEX_HELD(&__aio_mutex));
if ((reqp = _aio_done_tail) != NULL) {
if ((_aio_done_tail = reqp->req_next) == NULL)
_aio_done_head = NULL;
ASSERT(_aio_donecnt > 0);
_aio_donecnt--;
(void) _aio_hash_del(reqp->req_resultp);
resultp = reqp->req_resultp;
ASSERT(reqp->req_state == AIO_REQ_DONE);
_aio_req_free(reqp);
return (resultp);
}
if (reqp == NULL && _aio_outstand_cnt == 0) {
return ((aio_result_t *)-1);
}
return (NULL);
}
void
_aio_set_result(aio_req_t *reqp, ssize_t retval, int error)
{
aio_result_t *resultp = reqp->req_resultp;
if (POSIX_AIO(reqp)) {
resultp->aio_return = retval;
membar_producer();
resultp->aio_errno = error;
} else {
resultp->aio_errno = error;
membar_producer();
resultp->aio_return = retval;
}
}
void
_aio_req_add(aio_req_t *reqp, aio_worker_t **nextworker, int mode)
{
ulwp_t *self = curthread;
aio_worker_t *aiowp;
aio_worker_t *first;
int load_bal_flg = 1;
int found;
ASSERT(reqp->req_state != AIO_REQ_DONEQ);
reqp->req_next = NULL;
sigoff(self);
sig_mutex_lock(&__aio_mutex);
first = aiowp = *nextworker;
if (mode != AIONOTIFY)
_aio_outstand_cnt++;
sig_mutex_unlock(&__aio_mutex);
switch (mode) {
case AIOREAD:
case AIOWRITE:
case AIOAREAD:
case AIOAWRITE:
#if !defined(_LP64)
case AIOAREAD64:
case AIOAWRITE64:
#endif
found = 0;
do {
if (sig_mutex_trylock(&aiowp->work_qlock1) == 0) {
if (aiowp->work_idleflg) {
found = 1;
break;
}
sig_mutex_unlock(&aiowp->work_qlock1);
}
} while ((aiowp = aiowp->work_forw) != first);
if (found) {
aiowp->work_minload1++;
break;
}
do {
if (sig_mutex_trylock(&aiowp->work_qlock1) == 0) {
found = 1;
break;
}
} while ((aiowp = aiowp->work_forw) != first);
if (!found) {
if (_aio_worker_cnt < _max_workers) {
if (_aio_create_worker(reqp, mode))
aio_panic("_aio_req_add: add worker");
sigon(self);
return;
}
while (sig_mutex_trylock(&aiowp->work_qlock1) != 0) {
_aio_delay(1);
aiowp = aiowp->work_forw;
}
}
ASSERT(MUTEX_HELD(&aiowp->work_qlock1));
if (_aio_worker_cnt < _max_workers &&
aiowp->work_minload1 >= _minworkload) {
sig_mutex_unlock(&aiowp->work_qlock1);
sig_mutex_lock(&__aio_mutex);
*nextworker = aiowp->work_forw;
sig_mutex_unlock(&__aio_mutex);
if (_aio_create_worker(reqp, mode))
aio_panic("aio_req_add: add worker");
sigon(self);
return;
}
aiowp->work_minload1++;
break;
case AIOFSYNC:
case AIONOTIFY:
load_bal_flg = 0;
sig_mutex_lock(&aiowp->work_qlock1);
break;
default:
aio_panic("_aio_req_add: invalid mode");
break;
}
if (aiowp->work_tail1 == NULL) {
ASSERT(aiowp->work_count1 == 0);
aiowp->work_tail1 = reqp;
aiowp->work_next1 = reqp;
} else {
aiowp->work_head1->req_next = reqp;
if (aiowp->work_next1 == NULL)
aiowp->work_next1 = reqp;
}
reqp->req_state = AIO_REQ_QUEUED;
reqp->req_worker = aiowp;
aiowp->work_head1 = reqp;
if (aiowp->work_count1++ == 0 && aiowp->work_idleflg) {
aiowp->work_idleflg = 0;
(void) cond_signal(&aiowp->work_idle_cv);
}
sig_mutex_unlock(&aiowp->work_qlock1);
if (load_bal_flg) {
sig_mutex_lock(&__aio_mutex);
*nextworker = aiowp->work_forw;
sig_mutex_unlock(&__aio_mutex);
}
sigon(self);
}
aio_req_t *
_aio_req_get(aio_worker_t *aiowp)
{
aio_req_t *reqp;
sig_mutex_lock(&aiowp->work_qlock1);
if ((reqp = aiowp->work_next1) != NULL) {
if (POSIX_AIO(reqp)) {
if (aiowp->work_prev1 == NULL) {
aiowp->work_tail1 = reqp->req_next;
if (aiowp->work_tail1 == NULL)
aiowp->work_head1 = NULL;
} else {
aiowp->work_prev1->req_next = reqp->req_next;
if (aiowp->work_head1 == reqp)
aiowp->work_head1 = reqp->req_next;
}
} else {
aiowp->work_prev1 = reqp;
ASSERT(aiowp->work_done1 >= 0);
aiowp->work_done1++;
}
ASSERT(reqp != reqp->req_next);
aiowp->work_next1 = reqp->req_next;
ASSERT(aiowp->work_count1 >= 1);
aiowp->work_count1--;
switch (reqp->req_op) {
case AIOREAD:
case AIOWRITE:
case AIOAREAD:
case AIOAWRITE:
#if !defined(_LP64)
case AIOAREAD64:
case AIOAWRITE64:
#endif
ASSERT(aiowp->work_minload1 > 0);
aiowp->work_minload1--;
break;
}
reqp->req_state = AIO_REQ_INPROGRESS;
}
aiowp->work_req = reqp;
ASSERT(reqp != NULL || aiowp->work_count1 == 0);
sig_mutex_unlock(&aiowp->work_qlock1);
return (reqp);
}
static void
_aio_req_del(aio_worker_t *aiowp, aio_req_t *reqp, int ostate)
{
aio_req_t **last;
aio_req_t *lastrp;
aio_req_t *next;
ASSERT(aiowp != NULL);
ASSERT(MUTEX_HELD(&aiowp->work_qlock1));
if (POSIX_AIO(reqp)) {
if (ostate != AIO_REQ_QUEUED)
return;
}
last = &aiowp->work_tail1;
lastrp = aiowp->work_tail1;
ASSERT(ostate == AIO_REQ_QUEUED || ostate == AIO_REQ_INPROGRESS);
while ((next = *last) != NULL) {
if (next == reqp) {
*last = next->req_next;
if (aiowp->work_next1 == next)
aiowp->work_next1 = next->req_next;
if (lastrp == next)
lastrp = next->req_next;
if (aiowp->work_head1 == next)
aiowp->work_head1 = lastrp;
if (aiowp->work_prev1 == next) {
ASSERT(ostate == AIO_REQ_INPROGRESS &&
!POSIX_AIO(reqp) && aiowp->work_done1 > 0);
aiowp->work_prev1 = NULL;
aiowp->work_done1--;
}
if (ostate == AIO_REQ_QUEUED) {
ASSERT(aiowp->work_count1 >= 1);
aiowp->work_count1--;
ASSERT(aiowp->work_minload1 >= 1);
aiowp->work_minload1--;
}
return;
}
last = &next->req_next;
lastrp = next;
}
}
static void
_aio_enq_doneq(aio_req_t *reqp)
{
if (_aio_doneq == NULL) {
_aio_doneq = reqp;
reqp->req_next = reqp->req_prev = reqp;
} else {
reqp->req_next = _aio_doneq;
reqp->req_prev = _aio_doneq->req_prev;
_aio_doneq->req_prev->req_next = reqp;
_aio_doneq->req_prev = reqp;
}
reqp->req_state = AIO_REQ_DONEQ;
_aio_doneq_cnt++;
}
aio_req_t *
_aio_req_remove(aio_req_t *reqp)
{
if (reqp && reqp->req_state != AIO_REQ_DONEQ)
return (NULL);
if (reqp) {
if (_aio_doneq == reqp)
_aio_doneq = reqp->req_next;
if (_aio_doneq == reqp) {
_aio_doneq = NULL;
} else {
aio_req_t *tmp = reqp->req_next;
reqp->req_prev->req_next = tmp;
tmp->req_prev = reqp->req_prev;
}
} else if ((reqp = _aio_doneq) != NULL) {
if (reqp == reqp->req_next) {
_aio_doneq = NULL;
} else {
reqp->req_prev->req_next = _aio_doneq = reqp->req_next;
_aio_doneq->req_prev = reqp->req_prev;
}
}
if (reqp) {
_aio_doneq_cnt--;
reqp->req_next = reqp->req_prev = reqp;
reqp->req_state = AIO_REQ_DONE;
}
return (reqp);
}
static int
_aio_hash_insert(aio_result_t *resultp, aio_req_t *reqp)
{
aio_hash_t *hashp;
aio_req_t **prev;
aio_req_t *next;
hashp = _aio_hash + AIOHASH(resultp);
lmutex_lock(&hashp->hash_lock);
prev = &hashp->hash_ptr;
while ((next = *prev) != NULL) {
if (resultp == next->req_resultp) {
lmutex_unlock(&hashp->hash_lock);
return (-1);
}
prev = &next->req_link;
}
*prev = reqp;
ASSERT(reqp->req_link == NULL);
lmutex_unlock(&hashp->hash_lock);
return (0);
}
aio_req_t *
_aio_hash_del(aio_result_t *resultp)
{
aio_hash_t *hashp;
aio_req_t **prev;
aio_req_t *next = NULL;
if (_aio_hash != NULL) {
hashp = _aio_hash + AIOHASH(resultp);
lmutex_lock(&hashp->hash_lock);
prev = &hashp->hash_ptr;
while ((next = *prev) != NULL) {
if (resultp == next->req_resultp) {
*prev = next->req_link;
next->req_link = NULL;
break;
}
prev = &next->req_link;
}
lmutex_unlock(&hashp->hash_lock);
}
return (next);
}
aio_req_t *
_aio_hash_find(aio_result_t *resultp)
{
aio_hash_t *hashp;
aio_req_t **prev;
aio_req_t *next = NULL;
if (_aio_hash != NULL) {
hashp = _aio_hash + AIOHASH(resultp);
lmutex_lock(&hashp->hash_lock);
prev = &hashp->hash_ptr;
while ((next = *prev) != NULL) {
if (resultp == next->req_resultp)
break;
prev = &next->req_link;
}
lmutex_unlock(&hashp->hash_lock);
}
return (next);
}
int
_aio_rw(aiocb_t *aiocbp, aio_lio_t *lio_head, aio_worker_t **nextworker,
int mode, int flg)
{
aio_req_t *reqp;
aio_args_t *ap;
int kerr;
if (aiocbp == NULL) {
errno = EINVAL;
return (-1);
}
if (!_kaio_ok)
_kaio_init();
aiocbp->aio_state = NOCHECK;
if (flg & AIO_KAIO) {
if (_kaio_ok > 0 && KAIO_SUPPORTED(aiocbp->aio_fildes)) {
aiocbp->aio_resultp.aio_errno = EINPROGRESS;
aiocbp->aio_state = CHECK;
kerr = (int)_kaio(mode, aiocbp);
if (kerr == 0)
return (0);
if (errno != ENOTSUP && errno != EBADFD) {
aiocbp->aio_resultp.aio_errno = errno;
aiocbp->aio_resultp.aio_return = -1;
aiocbp->aio_state = NOCHECK;
return (-1);
}
if (errno == EBADFD)
SET_KAIO_NOT_SUPPORTED(aiocbp->aio_fildes);
}
}
aiocbp->aio_resultp.aio_errno = EINPROGRESS;
aiocbp->aio_state = USERAIO;
if (!__uaio_ok && __uaio_init() == -1)
return (-1);
if ((reqp = _aio_req_alloc()) == NULL) {
errno = EAGAIN;
return (-1);
}
reqp->req_head = lio_head;
reqp->req_type = AIO_POSIX_REQ;
reqp->req_op = mode;
reqp->req_largefile = 0;
if (aiocbp->aio_sigevent.sigev_notify == SIGEV_NONE) {
reqp->req_sigevent.sigev_notify = SIGEV_NONE;
} else if (aiocbp->aio_sigevent.sigev_notify == SIGEV_SIGNAL) {
reqp->req_sigevent.sigev_notify = SIGEV_SIGNAL;
reqp->req_sigevent.sigev_signo =
aiocbp->aio_sigevent.sigev_signo;
reqp->req_sigevent.sigev_value.sival_ptr =
aiocbp->aio_sigevent.sigev_value.sival_ptr;
} else if (aiocbp->aio_sigevent.sigev_notify == SIGEV_PORT) {
port_notify_t *pn = aiocbp->aio_sigevent.sigev_value.sival_ptr;
reqp->req_sigevent.sigev_notify = SIGEV_PORT;
reqp->req_sigevent.sigev_signo =
pn->portnfy_port;
reqp->req_sigevent.sigev_value.sival_ptr =
pn->portnfy_user;
} else if (aiocbp->aio_sigevent.sigev_notify == SIGEV_THREAD) {
reqp->req_sigevent.sigev_notify = SIGEV_THREAD;
reqp->req_sigevent.sigev_signo =
aiocbp->aio_sigevent.sigev_signo;
reqp->req_sigevent.sigev_value.sival_ptr =
aiocbp->aio_sigevent.sigev_value.sival_ptr;
}
reqp->req_resultp = &aiocbp->aio_resultp;
reqp->req_aiocbp = aiocbp;
ap = &reqp->req_args;
ap->fd = aiocbp->aio_fildes;
ap->buf = (caddr_t)aiocbp->aio_buf;
ap->bufsz = aiocbp->aio_nbytes;
ap->offset = aiocbp->aio_offset;
if ((flg & AIO_NO_DUPS) &&
_aio_hash_insert(&aiocbp->aio_resultp, reqp) != 0) {
aio_panic("_aio_rw(): request already in hash table");
}
_aio_req_add(reqp, nextworker, mode);
return (0);
}
#if !defined(_LP64)
int
_aio_rw64(aiocb64_t *aiocbp, aio_lio_t *lio_head, aio_worker_t **nextworker,
int mode, int flg)
{
aio_req_t *reqp;
aio_args_t *ap;
int kerr;
if (aiocbp == NULL) {
errno = EINVAL;
return (-1);
}
if (!_kaio_ok)
_kaio_init();
aiocbp->aio_state = NOCHECK;
if (flg & AIO_KAIO) {
if (_kaio_ok > 0 && KAIO_SUPPORTED(aiocbp->aio_fildes)) {
aiocbp->aio_resultp.aio_errno = EINPROGRESS;
aiocbp->aio_state = CHECK;
kerr = (int)_kaio(mode, aiocbp);
if (kerr == 0)
return (0);
if (errno != ENOTSUP && errno != EBADFD) {
aiocbp->aio_resultp.aio_errno = errno;
aiocbp->aio_resultp.aio_return = -1;
aiocbp->aio_state = NOCHECK;
return (-1);
}
if (errno == EBADFD)
SET_KAIO_NOT_SUPPORTED(aiocbp->aio_fildes);
}
}
aiocbp->aio_resultp.aio_errno = EINPROGRESS;
aiocbp->aio_state = USERAIO;
if (!__uaio_ok && __uaio_init() == -1)
return (-1);
if ((reqp = _aio_req_alloc()) == NULL) {
errno = EAGAIN;
return (-1);
}
reqp->req_head = lio_head;
reqp->req_type = AIO_POSIX_REQ;
reqp->req_op = mode;
reqp->req_largefile = 1;
if (aiocbp->aio_sigevent.sigev_notify == SIGEV_NONE) {
reqp->req_sigevent.sigev_notify = SIGEV_NONE;
} else if (aiocbp->aio_sigevent.sigev_notify == SIGEV_SIGNAL) {
reqp->req_sigevent.sigev_notify = SIGEV_SIGNAL;
reqp->req_sigevent.sigev_signo =
aiocbp->aio_sigevent.sigev_signo;
reqp->req_sigevent.sigev_value.sival_ptr =
aiocbp->aio_sigevent.sigev_value.sival_ptr;
} else if (aiocbp->aio_sigevent.sigev_notify == SIGEV_PORT) {
port_notify_t *pn = aiocbp->aio_sigevent.sigev_value.sival_ptr;
reqp->req_sigevent.sigev_notify = SIGEV_PORT;
reqp->req_sigevent.sigev_signo =
pn->portnfy_port;
reqp->req_sigevent.sigev_value.sival_ptr =
pn->portnfy_user;
} else if (aiocbp->aio_sigevent.sigev_notify == SIGEV_THREAD) {
reqp->req_sigevent.sigev_notify = SIGEV_THREAD;
reqp->req_sigevent.sigev_signo =
aiocbp->aio_sigevent.sigev_signo;
reqp->req_sigevent.sigev_value.sival_ptr =
aiocbp->aio_sigevent.sigev_value.sival_ptr;
}
reqp->req_resultp = &aiocbp->aio_resultp;
reqp->req_aiocbp = aiocbp;
ap = &reqp->req_args;
ap->fd = aiocbp->aio_fildes;
ap->buf = (caddr_t)aiocbp->aio_buf;
ap->bufsz = aiocbp->aio_nbytes;
ap->offset = aiocbp->aio_offset;
if ((flg & AIO_NO_DUPS) &&
_aio_hash_insert(&aiocbp->aio_resultp, reqp) != 0) {
aio_panic("_aio_rw64(): request already in hash table");
}
_aio_req_add(reqp, nextworker, mode);
return (0);
}
#endif