/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
*/
#include "lint.h"
#include "thr_uberdata.h"
#include "asyncio.h"
#include <atomic.h>
static void _aio_work_done(aio_worker_t *);
static void _aio_enq_doneq(aio_req_t *);
extern void _aio_lio_free(aio_lio_t *);
extern int __fdsync(int, int);
extern int __fcntl(int, int, ...);
extern int _port_dispatch(int, int, int, int, uintptr_t, void *);
static void _aio_cancel_work(aio_worker_t *, int, int *, int *);
/*
* switch for kernel async I/O
*/
/*
* Key for thread-specific data
*/
/*
* Array for determining whether or not a file supports kaio.
* Initialized in _kaio_init().
*/
/*
* (__aio_mutex lock protects circular linked list of workers)
*/
/*
* worker for notification requests.
*/
int __aio_initbusy = 0;
int _aio_donecnt = 0;
int _aio_doneq_cnt = 0;
static int
_kaio_supported_init(void)
{
void *ptr;
return (0);
if (ptr == MAP_FAILED)
return (-1);
return (0);
}
/*
* The aio subsystem is initialized when an AIO request is made.
* Constants are initialized like the max number of workers that
* the subsystem can create, and the minimum number of workers
* permitted before imposing some restrictions. Also, some
* workers are created.
*/
int
__uaio_init(void)
{
int i;
int cancel_state;
while (__aio_initbusy)
if (__uaio_ok) { /* already initialized */
return (0);
}
__aio_initbusy = 1;
if (_kaio_supported_init() != 0)
goto out;
/*
* Allocate and initialize the hash table.
* Do this only once, even if __uaio_init() is called twice.
*/
if ((void *)_aio_hash == MAP_FAILED) {
goto out;
}
for (i = 0; i < HASHSZ; i++)
USYNC_THREAD, NULL);
}
/*
* Initialize worker's signal mask to only catch SIGAIOCANCEL.
*/
(void) sigfillset(&_worker_set);
/*
* Create one worker to send asynchronous notifications.
* Do this only once, even if __uaio_init() is called twice.
*/
if (__no_workerscnt == 0 &&
goto out;
}
/*
* And later check whether atleast one worker is created;
* lwp_create() calls could fail because of segkp exhaustion.
*/
for (i = 0; i < _min_workers; i++)
if (__rw_workerscnt == 0) {
goto out;
}
ret = 0;
out:
if (ret == 0)
__uaio_ok = 1;
__aio_initbusy = 0;
(void) cond_broadcast(&__aio_initcv);
return (ret);
}
/*
* Called from close() before actually performing the real _close().
*/
void
{
if (fd < 0) /* avoid cancelling everything */
return;
/*
* Cancel all outstanding aio requests for this file descriptor.
*/
if (__uaio_ok)
(void) aiocancel_all(fd);
/*
* If we have allocated the bit array, clear the bit for this file.
* The next open may re-use this file descriptor and the new file
* may have different kaio() behaviour.
*/
if (_kaio_supported != NULL)
}
/*
* special kaio cleanup thread sits in a loop in the
* kernel waiting for pending kaio requests to complete.
*/
void *
{
aio_panic("_kaio_cleanup_thread, pthread_setspecific()");
return (arg);
}
/*
* initialize kaio.
*/
void
{
int error;
int cancel_state;
while (__aio_initbusy)
if (_kaio_ok) { /* already initialized */
return;
}
__aio_initbusy = 1;
if (_kaio_supported_init() != 0)
}
}
if (error)
_kaio_ok = -1;
else
_kaio_ok = 1;
__aio_initbusy = 0;
(void) cond_broadcast(&__aio_initcv);
}
int
{
}
int
{
}
#if !defined(_LP64)
int
{
}
int
{
}
#endif /* !defined(_LP64) */
int
{
int error = 0;
int kerr;
int umode;
switch (whence) {
case SEEK_SET:
break;
case SEEK_CUR:
error = -1;
else
break;
case SEEK_END:
error = -1;
else
break;
default:
error = -1;
}
if (error)
return (error);
/* initialize kaio */
if (!_kaio_ok)
_kaio_init();
/*
* _aio_do_request() needs the original request code (mode) to be able
* to choose the appropiate 32/64 bit function. All other functions
* only require the difference between READ and WRITE (umode).
*/
else
/*
* Try kernel aio first.
*/
if (kerr == 0) {
return (0);
}
return (-1);
}
return (-1);
return (-1);
}
/*
* _aio_do_request() checks reqp->req_op to differentiate
* between 32 and 64 bit access.
*/
return (-1);
}
/*
* _aio_req_add() only needs the difference between READ and
* WRITE to choose the right worker queue.
*/
return (0);
}
int
{
int ret;
int done = 0;
int canceled = 0;
if (!__uaio_ok) {
return (-1);
}
if (_aio_outstand_cnt == _aio_req_done_cnt)
else
ret = -1;
} else {
if (canceled) {
ret = 0;
} else {
if (_aio_outstand_cnt == 0 ||
else
ret = -1;
}
}
return (ret);
}
/* ARGSUSED */
static void
{
}
/*
* This must be asynch safe and cancel safe
*/
{
int dontblock;
int timedwait = 0;
int kaio_errno = 0;
if (uwait) {
/*
* Check for a valid specified wait time.
* If it is invalid, fail the call right away.
*/
return ((aio_result_t *)-1);
}
timedwait++;
} else {
/* polling */
if (_kaio_outstand_cnt == 0) {
} else {
return (kresultp);
}
}
uresultp = _aio_req_done();
return (uresultp);
}
return ((aio_result_t *)-1);
} else {
return (NULL);
}
}
}
for (;;) {
uresultp = _aio_req_done();
break;
}
if (dontblock && _kaio_outstand_cnt == 0) {
kaio_errno = EINVAL;
} else {
kaio_errno = errno;
}
/* aiowait() awakened by an aionotify() */
continue;
break;
kaio_errno == EINVAL &&
errno = kaio_errno;
break;
kaio_errno == EINTR) {
errno = kaio_errno;
break;
} else if (timedwait) {
if (hres <= 0) {
/* time is up; return */
break;
} else {
/*
* Some time left. Round up the remaining time
* in nanoseconds to microsec. Retry the call.
*/
}
} else {
continue;
}
}
return (resultp);
}
/*
* _aio_get_timedelta calculates the remaining time and stores the result
* into timespec_t *wait.
*/
int
{
int ret = 0;
} else {
} else {
}
}
} else {
ret = -1;
}
return (ret);
}
/*
* If closing by file descriptor: we will simply cancel all the outstanding
* aio`s and return. Those aio's in question will have either noticed the
* cancellation notice before, during, or after initiating io.
*/
int
{
int canceled = 0;
int done = 0;
int cancelall = 0;
if (_aio_outstand_cnt == 0) {
return (AIO_ALLDONE);
}
/*
*/
do {
/*
* finally, check if there are requests on the done queue that
* should be canceled.
*/
if (fd < 0)
cancelall = 1;
reqpp = &_aio_done_tail;
}
if (_aio_done_head == reqp) {
/* this should be the last req in list */
}
_aio_donecnt--;
} else {
}
}
if (cancelall) {
ASSERT(_aio_donecnt == 0);
}
return (AIO_CANCELED);
return (AIO_ALLDONE);
return (AIO_NOTCANCELED);
}
/*
* Cancel requests from a given work queue. If the file descriptor
* parameter, fd, is non-negative, then only cancel those requests
* in this queue that are to this file descriptor. If the fd
* parameter is -1, then cancel all requests.
*/
static void
{
/*
* cancel queued requests first.
*/
/*
* Callers locks were dropped.
* reqp is invalid; start traversing
* the list from the beginning again.
*/
continue;
}
}
}
/*
* Since the queued requests have been canceled, there can
* only be one inprogress request that should be canceled.
*/
}
/*
* Cancel a request. Return 1 if the callers locks were temporarily
* dropped, otherwise return 0.
*/
int
{
if (ostate == AIO_REQ_CANCELED)
return (0);
/*
* If not on the done queue yet, just mark it CANCELED,
* _aio_work_done() will do the necessary clean up.
* This is required to ensure that aiocancel_all() cancels
* all the outstanding requests, including this one which
* is not yet on done queue but has been marked done.
*/
(*canceled)++;
return (0);
}
(*done)++;
return (0);
}
/* Cancel the queued aio_fsync() request */
(*canceled)++;
}
return (0);
}
(*canceled)++;
/*
* Set the result values now, before _aiodone() is called.
* We do this because the application can expect aio_return
* and aio_errno to be set to -1 and ECANCELED, respectively,
* immediately after a successful return from aiocancel()
* or aio_cancel().
*/
return (0);
}
return (0);
}
return (1);
}
int
{
int *aio_workerscnt;
void *(*func)(void *);
int error;
/*
* Put the new worker thread in the right queue.
*/
switch (mode) {
case AIOREAD:
case AIOWRITE:
case AIOAREAD:
case AIOAWRITE:
#if !defined(_LP64)
case AIOAREAD64:
case AIOAWRITE64:
#endif
workers = &__workers_rw;
break;
case AIONOTIFY:
workers = &__workers_no;
break;
default:
aio_panic("_aio_create_worker: invalid mode");
break;
}
return (-1);
if (reqp) {
}
if (error) {
if (reqp) {
}
return (-1);
}
(*aio_workerscnt)++;
*nextworker = aiowp;
} else {
}
return (0);
}
/*
* This is the worker's main routine.
* The task of this function is to execute all queued requests;
* once the last pending request is executed this function will block
* in _aio_idle(). A new incoming request must wakeup this thread to
* restart the work.
* Every worker has an own work queue. The queue lock is required
* to synchronize the addition of new requests for this worker or
*
* Cancellation scenarios:
* The cancellation of a request is being done asynchronously using
* _aio_cancel_req() from another thread context.
* A queued request can be cancelled in different manners :
* a) request is queued but not "in progress" or "done" (AIO_REQ_QUEUED):
* - lock the queue -> remove the request -> unlock the queue
* b) request is in progress (AIO_REQ_INPROGRESS) :
* - this function first allow the cancellation of the running
* request with the flag "work_cancel_flg=1"
* see _aio_req_get() -> _aio_cancel_on()
* During this phase, it is allowed to interrupt the worker
* thread running the request (this thread) using the SIGAIOCANCEL
* signal.
* Once this thread returns from the kernel (because the request
* is just done), then it must disable a possible cancellation
* and proceed to finish the request. To disable the cancellation
* this thread must use _aio_cancel_off() to set "work_cancel_flg=0".
* c) request is already done (AIO_REQ_DONE || AIO_REQ_DONEQ):
* same procedure as in a)
*
* To b)
* This thread uses sigsetjmp() to define the position in the code, where
* it wish to continue working in the case that a SIGAIOCANCEL signal
* is detected.
* Normally this thread should get the cancellation signal during the
* kernel phase (reading or writing). In that case the signal handler
* aiosigcancelhndlr() is activated using the worker thread context,
* which again will use the siglongjmp() function to break the standard
* code flow and jump to the "sigsetjmp" position, provided that
* "work_cancel_flg" is set to "1".
* Because the "work_cancel_flg" is only manipulated by this worker
* thread and it can only run on one CPU at a given time, it is not
* necessary to protect that flag with the queue lock.
* Returning from the kernel (read or write system call) we must
* first disable the use of the SIGAIOCANCEL signal and accordingly
* the use of the siglongjmp() function to prevent a possible deadlock:
* - It can happens that this worker thread returns from the kernel and
* blocks in "work_qlock1",
* - then a second thread cancels the apparently "in progress" request
* and sends the SIGAIOCANCEL signal to the worker thread,
* - the worker thread gets assigned the "work_qlock1" and will returns
* from the kernel,
* - the kernel detects the pending signal and activates the signal
* handler instead,
* - if the "work_cancel_flg" is still set then the signal handler
* should use siglongjmp() to cancel the "in progress" request and
* it would try to acquire the same work_qlock1 in _aio_req_get()
* for a second time => deadlock.
* To avoid that situation we disable the cancellation of the request
* in progress BEFORE we try to acquire the work_qlock1.
* In that case the signal handler will not call siglongjmp() and the
* worker thread will continue running the standard code flow.
* Then this thread must check the AIO_REQ_CANCELED flag to emulate
* an eventually required siglongjmp() freeing the work_qlock1 and
* avoiding a deadlock.
*/
void *
{
int append;
int error;
aio_panic("_aio_do_request, pthread_setspecific()");
/*
* We resume here when an operation is cancelled.
* On first entry, aiowp->work_req == NULL, so all
* we do is block SIGAIOCANCEL.
*/
for (;;) {
/*
* Put completed requests on aio_done_list. This has
* to be done as part of the main loop to ensure that
* we don't artificially starve any aiowait'ers.
*/
if (aiowp->work_done1)
top:
/* consume any deferred SIGAIOCANCEL signal here */
goto top;
}
error = 0;
case AIOREAD:
case AIOAREAD:
if (retval == -1) {
if (retval == -1)
} else {
}
}
break;
case AIOWRITE:
case AIOAWRITE:
/*
* The SUSv3 POSIX spec for aio_write() states:
* If O_APPEND is set for the file descriptor,
* write operations append to the file in the
* same order as the calls were made.
* but, somewhat inconsistently, it requires pwrite()
* to ignore the O_APPEND setting. So we have to use
* fcntl() to get the open modes and call write() for
* the O_APPEND case.
*/
if (retval == -1) {
if (retval == -1)
} else {
}
}
break;
#if !defined(_LP64)
case AIOAREAD64:
if (retval == -1) {
if (retval == -1)
} else {
}
}
break;
case AIOAWRITE64:
/*
* The SUSv3 POSIX spec for aio_write() states:
* If O_APPEND is set for the file descriptor,
* write operations append to the file in the
* same order as the calls were made.
* but, somewhat inconsistently, it requires pwrite()
* to ignore the O_APPEND setting. So we have to use
* fcntl() to get the open modes and call write() for
* the O_APPEND case.
*/
if (retval == -1) {
if (retval == -1)
} else {
}
}
break;
#endif /* !defined(_LP64) */
case AIOFSYNC:
goto top;
/*
* All writes for this fsync request are now
* acknowledged. Now make these writes visible
* and put the final request into the hash table.
*/
/* EMPTY */;
} else {
}
aio_panic("_aio_do_request(): AIOFSYNC: "
"request already in hash table");
break;
default:
aio_panic("_aio_do_request, bad op");
}
}
/* NOTREACHED */
return (NULL);
}
/*
* Perform the tail processing for _aio_do_request().
* The in-progress request may or may not have been cancelled.
*/
static void
{
else {
retval = -1;
}
int notify;
}
/*
* If it was canceled, this request will not be
* added to done list. Just free it.
*/
} else {
}
/*
* Notify any thread that may have blocked
* because it saw an outstanding request.
*/
notify = 0;
if (_aio_outstand_cnt == 0 && _aiowait_flag) {
notify = 1;
}
if (notify) {
}
} else {
}
}
}
void
{
#if !defined(_LP64)
if (reqp->req_largefile)
else
#endif
}
/*
* Sleep for 'ticks' clock ticks to give somebody else a chance to run,
* hopefully to consume one of our queued signals.
*/
static void
{
}
/*
* Actually send the notifications.
* We could block indefinitely here if the application
* is not listening for the signal or port notifications.
*/
static void
{
SI_ASYNCIO, NULL);
if (npp->np_lio_signo)
SI_ASYNCIO, NULL);
else if (npp->np_lio_port >= 0)
}
/*
* Asynchronous notification worker.
*/
void *
{
/*
* This isn't really necessary. All signals are blocked.
*/
aio_panic("_aio_do_notify, pthread_setspecific()");
/*
* Notifications are never cancelled.
* All signals remain blocked, forever.
*/
for (;;) {
aio_panic("_aio_do_notify: _aio_idle() failed");
}
}
/* NOTREACHED */
return (NULL);
}
/*
* Do the completion semantics for a request that was either canceled
* by _aio_cancel_req() or was completed by _aio_do_request().
*/
static void
{
int notify = 0;
int sigev_none;
int sigev_signal;
int sigev_thread;
int sigev_port;
/*
* We call _aiodone() only for Posix I/O.
*/
sigev_none = 0;
sigev_signal = 0;
sigev_thread = 0;
sigev_port = 0;
np.np_lio_signo = 0;
case SIGEV_NONE:
sigev_none = 1;
break;
case SIGEV_SIGNAL:
sigev_signal = 1;
break;
case SIGEV_THREAD:
sigev_thread = 1;
break;
case SIGEV_PORT:
sigev_port = 1;
break;
default:
aio_panic("_aiodone: improper sigev_notify");
break;
}
/*
* Figure out the notification parameters while holding __aio_mutex.
* Actually perform the notifications after dropping __aio_mutex.
* This allows us to sleep for a long time (if the notifications
* incur delays) without impeding other async I/O operations.
*/
if (sigev_signal) {
notify = 1;
} else if (sigev_thread | sigev_port) {
notify = 1;
}
if (sigev_none) {
} else {
(void) _aio_hash_del(resultp);
}
/*
* __aio_waitn() sets AIO_WAIT_INPROGRESS and
* __aio_suspend() increments "_aio_kernel_suspend"
*
* _kaio(AIONOTIFY) awakes the corresponding function
* in the kernel; then the corresponding __aio_waitn() or
* __aio_suspend() function could reap the recently
*/
/*
* If all the lio requests have completed,
* prepare to notify the waiting thread.
*/
int waiting = 0;
notify = 1;
} else { /* thread or port */
notify = 1;
}
if (waiting == 0)
} else {
head->lio_refcnt--;
}
}
/*
* The request is completed; now perform the notifications.
*/
if (notify) {
/*
* We usually put the request on the notification
* queue because we don't want to block and delay
* other operations behind us in the work queue.
* Also we must never block on a cancel notification
* because we are being called from an application
* thread in this case and that could lead to deadlock
* if no other thread is receiving notificatins.
*/
} else {
/*
* We already put the request on the done queue,
* so we can't queue it to the notification queue.
* Just do the notification directly.
*/
}
}
}
/*
* Delete fsync requests from list head until there is
* only one left. Return 0 when there is only one,
* otherwise return a non-zero value.
*/
static int
{
int rval = 0;
head->lio_refcnt--;
return (1);
}
if (head->lio_canned)
rval = 1;
}
head->lio_refcnt--;
if (rval != 0)
return (rval);
}
/*
* A worker is set idle when its work queue is empty.
* The worker checks again that it has no more work
* and then goes to sleep waiting for more work.
*/
int
{
int error = 0;
if (aiowp->work_count1 == 0) {
/*
* A cancellation handler is not needed here.
* aio worker threads are never cancelled via pthread_cancel().
*/
&aiowp->work_qlock1);
/*
* The idle flag is normally cleared before worker is awakened
* by aio_req_add(). On error (EINTR), we clear it ourself.
*/
if (error)
aiowp->work_idleflg = 0;
}
return (error);
}
/*
* A worker's completed AIO requests are placed onto a global
* done queue. The application is only sent a SIGIO signal if
* the process has a handler enabled and it is not waiting via
* aiowait().
*/
static void
{
aiowp->work_done1 = 0;
/*
* Request got cancelled after it was marked done. This can
* happen because _aio_finish_request() marks it AIO_REQ_DONE
* and drops all locks. Don't add the request to the done
* queue and just discard it.
*/
if (_aio_outstand_cnt == 0 && _aiowait_flag) {
} else {
}
return;
}
_aio_donecnt++;
ASSERT(_aio_donecnt > 0 &&
_aio_outstand_cnt >= 0 &&
_aio_req_done_cnt >= 0);
if (_aio_done_tail == NULL) {
} else {
}
if (_aiowait_flag) {
} else {
if (_sigio_enabled)
}
}
/*
* The done queue consists of AIO requests that are in either the
* AIO_REQ_DONE or AIO_REQ_CANCELED state. Requests that were cancelled
* are discarded. If the done queue is empty then NULL is returned.
* Otherwise the address of a done aio_result_t is returned.
*/
_aio_req_done(void)
{
ASSERT(_aio_donecnt > 0);
_aio_donecnt--;
return (resultp);
}
/* is queue empty? */
return ((aio_result_t *)-1);
}
return (NULL);
}
/*
* Set the return and errno values for the application's use.
*
* For the Posix interfaces, we must set the return value first followed
* by the errno value because the Posix interfaces allow for a change
* in the errno value from EINPROGRESS to something else to signal
* the completion of the asynchronous request.
*
* The opposite is true for the Solaris interfaces. These allow for
* a change in the return value from AIO_INPROGRESS to something else
* to signal the completion of the asynchronous request.
*/
void
{
} else {
}
}
/*
* Add an AIO request onto the next work queue.
* A circular list of workers is used to choose the next worker.
*/
void
{
int found;
/*
* Try to acquire the next worker's work queue. If it is locked,
* then search the list of workers until a queue is found unlocked,
* or until the list is completely traversed at which point another
* worker will be created.
*/
switch (mode) {
case AIOREAD:
case AIOWRITE:
case AIOAREAD:
case AIOAWRITE:
#if !defined(_LP64)
case AIOAREAD64:
case AIOAWRITE64:
#endif
/* try to find an idle worker */
found = 0;
do {
if (aiowp->work_idleflg) {
found = 1;
break;
}
}
if (found) {
aiowp->work_minload1++;
break;
}
/* try to acquire some worker's queue lock */
do {
found = 1;
break;
}
/*
* Create more workers when the workers appear overloaded.
* Either all the workers are busy draining their queues
* or no worker's queue lock could be acquired.
*/
if (!found) {
if (_aio_worker_cnt < _max_workers) {
aio_panic("_aio_req_add: add worker");
return;
}
/*
* No worker available and we have created
* _max_workers, keep going through the
* list slowly until we get a lock
*/
/*
* give someone else a chance
*/
_aio_delay(1);
}
}
if (_aio_worker_cnt < _max_workers &&
aio_panic("aio_req_add: add worker");
return;
}
aiowp->work_minload1++;
break;
case AIOFSYNC:
case AIONOTIFY:
load_bal_flg = 0;
break;
default:
aio_panic("_aio_req_add: invalid mode");
break;
}
/*
* Put request onto worker's work queue.
*/
} else {
}
/*
* Awaken worker if it is not currently active.
*/
aiowp->work_idleflg = 0;
}
if (load_bal_flg) {
}
}
/*
* Get an AIO request for a specified worker.
* If the work queue is empty, return NULL.
*/
{
/*
* Remove a POSIX request from the queue; the
* request queue is a singularly linked list
* with a previous pointer. The request is
* removed by updating the previous pointer.
*
* Non-posix requests are left on the queue
* to eventually be placed on the done queue.
*/
} else {
}
} else {
aiowp->work_done1++;
}
aiowp->work_count1--;
case AIOREAD:
case AIOWRITE:
case AIOAREAD:
case AIOAWRITE:
#if !defined(_LP64)
case AIOAREAD64:
case AIOAWRITE64:
#endif
aiowp->work_minload1--;
break;
}
}
return (reqp);
}
static void
{
if (ostate != AIO_REQ_QUEUED)
return;
}
/*
* if this is the first request on the queue, move
* the lastrp pointer forward.
*/
/*
* if this request is pointed by work_head1, then
* make work_head1 point to the last request that is
* present on the queue.
*/
/*
* work_prev1 is used only in non posix case and it
* points to the current AIO_REQ_INPROGRESS request.
* If work_prev1 points to this request which is being
* deleted, make work_prev1 NULL and set work_done1
* to 0.
*
* A worker thread can be processing only one request
* at a time.
*/
aiowp->work_done1--;
}
if (ostate == AIO_REQ_QUEUED) {
aiowp->work_count1--;
aiowp->work_minload1--;
}
return;
}
}
/* NOTREACHED */
}
static void
{
if (_aio_doneq == NULL) {
_aio_doneq = reqp;
} else {
}
}
/*
* caller owns the _aio_mutex
*/
{
return (NULL);
if (reqp) {
/* request in done queue */
if (_aio_doneq == reqp)
if (_aio_doneq == reqp) {
/* only one request on queue */
_aio_doneq = NULL;
} else {
}
/* only one request on queue */
_aio_doneq = NULL;
} else {
}
}
if (reqp) {
}
return (reqp);
}
/*
* An AIO request is identified by an aio_result_t pointer. The library
* maps this aio_result_t pointer to its internal representation using a
* hash table. This function adds an aio_result_t pointer to the hash table.
*/
static int
{
return (-1);
}
}
return (0);
}
/*
* Remove an entry from the hash table.
*/
{
break;
}
}
}
return (next);
}
/*
* find an entry in the hash table
*/
{
break;
}
}
return (next);
}
/*
* AIO interface for POSIX
*/
int
{
int kerr;
return (-1);
}
/* initialize kaio */
if (!_kaio_ok)
_kaio_init();
/*
* If we have been called because a list I/O
* kaio() failed, we dont want to repeat the
* system call
*/
/*
* Try kernel aio first.
* fall back to the thread implementation.
*/
if (kerr == 0)
return (0);
return (-1);
}
}
}
return (-1);
return (-1);
}
/*
* If an LIO request, add the list head to the aio request
*/
reqp->req_largefile = 0;
/*
* Reuse the sigevent structure to contain the port number
* and the user value. Same for SIGEV_THREAD, below.
*/
/*
* The sigevent structure contains the port number
* and the user value. Same for SIGEV_PORT, above.
*/
}
if ((flg & AIO_NO_DUPS) &&
aio_panic("_aio_rw(): request already in hash table");
return (-1);
}
return (0);
}
#if !defined(_LP64)
/*
* 64-bit AIO interface for POSIX
*/
int
{
int kerr;
return (-1);
}
/* initialize kaio */
if (!_kaio_ok)
_kaio_init();
/*
* If we have been called because a list I/O
* kaio() failed, we dont want to repeat the
* system call
*/
/*
* Try kernel aio first.
* fall back to the thread implementation.
*/
if (kerr == 0)
return (0);
return (-1);
}
}
}
return (-1);
return (-1);
}
/*
* If an LIO request, add the list head to the aio request
*/
}
if ((flg & AIO_NO_DUPS) &&
aio_panic("_aio_rw64(): request already in hash table");
return (-1);
}
return (0);
}
#endif /* !defined(_LP64) */