aio_subr.c revision d2749ac6e20fe35dbfa822f9b55c185325a2147e
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#include <sys/sysmacros.h>
#include <sys/aio_impl.h>
#include <sys/tnf_probe.h>
void aphysio_unlock(aio_req_t *);
void aio_cleanup(int);
void aio_cleanup_exit(void);
/*
* private functions
*/
/*
* async version of physio() that doesn't wait synchronously
* for the driver's strategy routine to complete.
*/
int
int rw,
{
char *a;
int error;
size_t c;
if (uio->uio_loffset < 0)
return (EINVAL);
#ifdef _ILP32
/*
* For 32-bit kernels, check against SPEC_MAXOFFSET_T which represents
* the maximum size that can be supported by the IO subsystem.
* XXX this code assumes a D_64BIT driver.
*/
return (EINVAL);
#endif /* _ILP32 */
} else {
}
/*
* Clustering: Clustering can set the b_iodone, b_forw and
* b_proc fields to cluster-specifc values.
*/
/* b_forw points at an aio_req_t structure */
}
return (ENOTSUP);
if (error != 0) {
return (error);
}
}
"aphysio: cancellation not supported, use anocancel");
}
/*ARGSUSED*/
int
{
return (ENXIO);
}
/*
* Called from biodone().
* Notify process that a pending AIO has finished.
*/
/*
* Clustering: This function is made non-static as it is used
* by clustering s/w as contract private interface.
*/
void
{
proc_t *p;
int fd;
int cleanupqflag;
int pollqflag;
int portevpend;
void (*func)();
int use_port = 0;
int reqp_flags = 0;
int send_signal = 0;
/*
* mapout earlier so that more kmem is available when aio is
* heavily used. bug #1262082
*/
/* decrement fd's ref count by one, now that aio request is done. */
aiop->aio_pending--;
/* Event port notification is desired for this transaction */
/*
* The port is being closed and it is waiting for
* pending asynchronous I/O transactions to complete.
*/
if (portevpend == 0)
return;
}
/*
* aio_cleanup_thread() is waiting for completion of
* transactions.
*/
return;
}
use_port = 1;
} else {
/*
* when the AIO_CLEANUP flag is enabled for this
* process, or when the AIO_POLL bit is set for
* this request, special handling is required.
* otherwise the request is put onto the doneq.
*/
if (cleanupqflag | pollqflag) {
if (cleanupqflag)
/*
* requests with their AIO_POLL bit set are put
* on the pollq, requests with sigevent structures
* or with listio heads are put on the notifyq, and
* the remaining requests don't require any special
* cleanup handling, so they're put onto the default
* cleanupq.
*/
if (pollqflag)
else
if (cleanupqflag) {
} else {
/* block aio_cleanup_exit until we're done */
/*
* let the cleanup processing happen from an AST
* set an AST on all threads in this process
*/
mutex_enter(&p->p_lock);
set_proc_ast(p);
mutex_exit(&p->p_lock);
/* wakeup anybody waiting in aiowait() */
/* wakeup aio_cleanup_exit if needed */
}
return;
}
/*
* save req's sigevent pointer, and check its
* value after releasing aio_mutex lock.
*/
/* put request on done queue. */
} /* portkevent */
/*
* when list IO notification is enabled, a notification or
* signal is sent only when all entries in the list are done.
*/
if (--head->lio_refcnt == 0) {
/*
* save lio's sigevent pointer, and check
* its value after releasing aio_mutex lock.
*/
}
}
/*
* if AIO_WAITN set then
* send signal only when we reached the
* required amount of IO's finished
* or when all IO's are done
*/
if (aiop->aio_waitncnt > 0)
aiop->aio_waitncnt--;
if (aiop->aio_pending == 0 ||
aiop->aio_waitncnt == 0)
} else {
}
/*
* No need to set this flag for pollq, portq, lio requests.
* If this is an old Solaris aio request, and the process has
* a SIGIO signal handler enabled, then send a SIGIO signal.
*/
send_signal = 1;
}
/*
* Could the cleanup thread be waiting for AIO with locked
* resources to finish?
* Ideally in that case cleanup thread should block on cleanupcv,
* but there is a window, where it could miss to see a new aio
* request that sneaked in.
*/
if (sigev)
aio_sigev_send(p, sigev);
else if (send_signal)
if (pkevp)
if (lio_sigev)
aio_sigev_send(p, lio_sigev);
if (lio_pkevp)
}
/*
* send a queued signal to the specified process when
* the event signal is non-NULL. A return value of 1
* will indicate that a signal is queued, and 0 means that
* no signal was specified, nor sent.
*/
static void
{
mutex_enter(&p->p_lock);
mutex_exit(&p->p_lock);
}
/*
* special case handling for zero length requests. the aio request
* short circuits the normal completion path since all that's required
* to complete this request is to copyout a zero to the aio request's
* return value.
*/
void
{
}
/*
* unlock pages previously locked by as_pagelock
*/
void
{
int flags;
return;
return;
}
}
/*
* deletes a requests id from the hash table of outstanding io.
*/
static void
{
long index;
return;
}
}
}
/*
* Put a list head struct onto its free list.
*/
static void
{
}
/*
* Put a reqp onto the freelist.
*/
void
{
if (reqp->aio_req_portkev) {
}
}
}
aiop->aio_outstanding--;
if (aiop->aio_outstanding == 0)
}
/*
* Put a reqp onto the freelist.
*/
void
{
aiop->aio_outstanding--;
}
/*
* Verify the integrity of a queue.
*/
#if defined(DEBUG)
static void
{
int found = 0;
int present = 0;
do {
if (entry_present == reqp)
found++;
if (entry_missing == reqp)
present++;
}
}
#else
#define aio_verify_queue(x, y, z)
#endif
/*
* Put a request onto the tail of a queue.
*/
void
{
} else {
}
}
/*
* Remove a request from its queue.
*/
void
{
} else {
}
}
/*
* concatenate a specified queue with the cleanupq. the specified
* queue is put onto the tail of the cleanupq. all elements on the
* specified queue should have their aio_req_flags field cleared.
*/
/*ARGSUSED*/
void
{
do {
if (cleanupqhead == NULL)
else {
}
}
/*
* cleanup aio requests that are on the per-process poll queue.
*/
void
aio_cleanup(int flag)
{
void (*func)();
int signalled = 0;
int qflag = 0;
int exitflg;
if (flag == AIO_CLEANUP_EXIT)
else
exitflg = 0;
/*
* We need to get the aio_cleanupq_mutex because we are calling
* aio_cleanup_cleanupq()
*/
/*
* take all the requests off the cleanupq, the notifyq,
* and the pollq.
*/
qflag++;
}
qflag++;
}
qflag++;
}
if (flag) {
qflag++;
qflag++;
}
}
/*
* return immediately if cleanupq, pollq, and
* notifyq are all empty. someone else must have
* emptied them.
*/
if (!qflag) {
return;
}
/*
* do cleanup for the various queues.
*/
if (cleanupqhead)
if (notifyqhead)
if (pollqhead)
if (exitflg)
return;
/*
* If we have an active aio_cleanup_thread it's possible for
* this routine to push something on to the done queue after
* an aiowait/aiosuspend thread has already decided to block.
* This being the case, we need a cv_broadcast here to wake
* these threads up. It is simpler and cleaner to do this
* broadcast here than in the individual cleanup routines.
*/
/*
* If there has never been an old solaris aio request
* issued by this process, then do not send a SIGIO signal.
*/
signalled = 1;
/*
* Only if the process wasn't already signalled,
* determine if a SIGIO signal should be delievered.
*/
if (!signalled &&
}
/*
* Do cleanup for every element of the port cleanup queue.
*/
static void
{
/* first check the portq */
/*
* It is not allowed to hold locks during aphysio_unlock().
* The aio_done() interrupt function will try to acquire
* aio_mutex and aio_portq_mutex. Therefore we disconnect
* the portq list from the aiop for the duration of the
* aphysio_unlock() loop below.
*/
do {
if (exitflag) {
}
}
/* move unlocked requests back to the port queue */
}
}
}
/* now check the port cleanup queue */
return;
do {
if (exitflag) {
} else {
int send_event = 0;
if (--liop->lio_refcnt == 0) {
liop->lio_portkev) {
send_event = 1;
}
}
if (send_event)
}
}
}
/*
* Do cleanup for every element of the cleanupq.
*/
static int
{
int signalled = 0;
/*
* Since aio_req_done() or aio_req_find() use the HASH list to find
* the required requests, they could potentially take away elements
* if they are already done (AIO_DONEQ is set).
* The aio_cleanupq_mutex protects the queue for the duration of the
* loop from aio_req_done() and aio_req_find().
*/
return (0);
do {
if (exitflg)
else
if (!exitflg) {
signalled++;
else
}
return (signalled);
}
/*
* do cleanup for every element of the notify queue.
*/
static int
{
int signalled = 0;
return (0);
do {
if (exitflg) {
} else {
if (--liohead->lio_refcnt == 0) {
}
}
if (sigev) {
signalled++;
sigev);
}
if (lio_sigev) {
signalled++;
}
}
return (signalled);
}
/*
* Do cleanup for every element of the poll queue.
*/
static void
{
/*
* As no other threads should be accessing the queue at this point,
* it isn't necessary to hold aio_mutex while we traverse its elements.
*/
return;
do {
if (exitflg) {
} else {
}
}
/*
* called by exit(). waits for all outstanding kaio to finish
* before the kaio resources are freed.
*/
void
aio_cleanup_exit(void)
{
/*
* wait for all outstanding kaio to complete. process
* is now single-threaded; no other kaio requests can
* happen once aio_pending is zero.
*/
/* cleanup the cleanup-thread queues. */
/*
* Although this process is now single-threaded, we
* still need to protect ourselves against a race with
* aio_cleanup_dr_delete_memory().
*/
mutex_enter(&p->p_lock);
/*
* free up the done queue's resources.
*/
do {
}
/*
* release aio request freelist.
*/
}
/*
* release io list head freelist.
*/
}
mutex_exit(&p->p_lock);
}
/*
* copy out aio request's result to a user-level result_t buffer.
*/
void
{
void *resultp;
int error;
return;
/* "resultp" points to user-level result_t buffer */
else
} else {
error = 0;
}
#ifdef _SYSCALL32_IMPL
if (get_udatamodel() == DATAMODEL_NATIVE) {
} else {
(int)retval);
}
#else
#endif
}
void
{
int errno;
else
} else {
errno = 0;
}
#ifdef _SYSCALL32_IMPL
if (get_udatamodel() == DATAMODEL_NATIVE) {
} else {
(int)retval);
}
#else
#endif
}
/*
* This function is used to remove a request from the done queue.
*/
void
{
/*
* aio_portq is set to NULL when aio_cleanup_portq()
* is working with the event queue.
* The aio_cleanup_thread() uses aio_cleanup_portq()
* to unlock all AIO buffers with completed transactions.
* Wait here until aio_cleanup_portq() restores the
* list of completed transactions in aio_portq.
*/
}
}
/* ARGSUSED */
void
{
int counter;
else
/*
* The PORT_SOURCE_AIO source is always associated with every new
* created port by default.
* If no asynchronous I/O transactions were associated with the port
* then the aiop pointer will still be set to NULL.
*/
return;
/*
* Within a process event ports can be used to collect events other
* than PORT_SOURCE_AIO events. At the same time the process can submit
* current port.
* The current process oriented model of AIO uses a sigle queue for
* pending events. On close the pending queue (queue of asynchronous
* I/O transactions using event port notification) must be scanned
*/
counter = 0;
do {
if (reqp->aio_req_portkev &&
counter++;
}
}
if (counter == 0) {
/* no AIOs pending */
return;
}
while (aiop->aio_portpendcnt)
/*
* all pending AIOs are completed.
* check port doneq
*/
do {
/* dequeue request and discard event */
/* put request in temporary queue */
}
}
/* headp points to the list of requests to be discarded */
}
}
/*
* aio_cleanup_dr_delete_memory is used by dr's delete_memory_thread
* to kick start the aio_cleanup_thread for the give process to do the
* necessary cleanup.
* This is needed so that delete_memory_thread can obtain writer locks
* on pages that need to be relocated during a dr memory delete operation,
* otherwise a deadly embrace may occur.
*/
int
{
int ret = 0;
ret = 1;
}
return (ret);
}