devpoll.c revision 7c478bd95313f5f23a4c958a745db2134aa03244
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License, Version 1.0 only
* (the "License"). You may not use this file except in compliance
* with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2004 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#pragma ident "%Z%%M% %I% %E% SMI"
#include <sys/poll_impl.h>
#include <sys/sysmacros.h>
#include <sys/resource.h>
#define RESERVED 1
/* local data struct */
int devpoll_init; /* is /dev/poll initialized already */
/* device local functions */
int *rvalp);
static dev_info_t *dpdevi;
dpopen, /* open */
dpclose, /* close */
nodev, /* strategy */
nodev, /* print */
nodev, /* dump */
nodev, /* read */
dpwrite, /* write */
dpioctl, /* ioctl */
nodev, /* devmap */
nodev, /* mmap */
nodev, /* segmap */
dppoll, /* poll */
nodev, /* prop_op */
(struct streamtab *)0, /* streamtab */
};
DEVO_REV, /* devo_rev */
0, /* refcnt */
dpinfo, /* info */
nulldev, /* identify */
nulldev, /* probe */
dpattach, /* attach */
dpdetach, /* detach */
nodev, /* reset */
&dp_cb_ops, /* driver operations */
nulldev /* power */
};
&mod_driverops, /* type of module - a driver */
"Dev Poll driver %I%",
&dp_ops,
};
static struct modlinkage modlinkage = {
(void *)&modldrv,
};
/*
* Locking Design
*
* structure is per lwp. An implicit assumption is made there that some
* portion of pollcache will never be touched by other lwps. E.g., in
* poll(2) design, no lwp will ever need to grow bitmap of other lwp.
* locking.
*
* minor number) has its own lock. Since read (dpioctl) is a much more
* frequent operation than write, we want to allow multiple reads on same
* priority to write operation. Theoretically writes can starve reads as
* well. But in pratical sense this is not important because (1) writes
* happens less often than reads, and (2) write operation defines the
* content of poll fd a cache set. If writes happens so often that they
* can starve reads, that means the cached set is very unstable. It may
* not make sense to read an unstable cache set anyway. Therefore, the
* writers starving readers case is not handled in this design.
*/
int
_init()
{
int error;
devpoll_init = 1;
devpoll_init = 0;
}
return (error);
}
int
_fini()
{
int error;
return (error);
}
return (0);
}
int
{
}
/*ARGSUSED*/
static int
{
== DDI_FAILURE) {
return (DDI_FAILURE);
}
return (DDI_SUCCESS);
}
static int
{
if (cmd != DDI_DETACH)
return (DDI_FAILURE);
return (DDI_SUCCESS);
}
/* ARGSUSED */
static int
{
int error;
switch (infocmd) {
case DDI_INFO_DEVT2DEVINFO:
error = DDI_SUCCESS;
break;
case DDI_INFO_DEVT2INSTANCE:
*result = (void *)0;
error = DDI_SUCCESS;
break;
default:
error = DDI_FAILURE;
}
return (error);
}
/*
* dp_pcache_poll has similar logic to pcache_poll() in poll.c. The major
* where it was stopped last time, instead of always starting from 0,
* (2) since user may not have cleaned up the cached fds when they are
* closed, some polldats in cache may refer to closed or reused fds. We
* need to check for those cases.
*
* NOTE: Upon closing an fd, automatic poll cache cleanup is done for
* stale entries!
*/
static int
{
short revent;
int error = 0;
/*
* No Need to search because no poll fd
* has been cached.
*/
return (error);
}
if (start == 0) {
/*
* started from every begining, no need to wrap around.
*/
} else {
}
fdcnt = 0;
revent = 0;
/*
* Examine the bit map in a circular fashion
* to avoid starvation. Always resume from
* last stop. Scan till end of the map. Then
* wrap around.
*/
if (fd >= 0) {
if (no_wrap) {
} else {
start = 0;
}
} else {
}
/*
* The fd is POLLREMOVed. This fd is
* logically no longer cached. So move
* on to the next one.
*/
continue;
}
/*
* The fd has been closed, but user has not
* done a POLLREMOVE on this fd yet. Instead
* of cleaning it here implicitly, we return
* POLLNVAL. This is consistent with poll(2)
* polling a closed fd. Hope this will remind
* user to do a POLLREMOVE.
*/
fdcnt++;
continue;
}
/*
* user is polling on a cached fd which was
* closed and then reused. Unfortunately
* there is no good way to inform user.
* If the file struct is also reused, we
* may not be able to detect the fd reuse
* at all. As long as this does not
* we will play along. Man page states if
* user does not clean up closed fds, polling
* results will be indeterministic.
*
* XXX - perhaps log the detection of fd
* reuse?
*/
}
/*
* XXX - pollrelock() logic needs to know which
* which pollcache lock to grab. It'd be a
* cleaner solution if we could pass pcp as
* an arguement in VOP_POLL interface instead
* of implicitly passing it using thread_t
* struct. On the other hand, changing VOP_POLL
* poll routine to change. May want to revisit
* the tradeoff later.
*/
if (error != 0) {
break;
}
/*
* layered devices (e.g. console driver)
* may change the vnode and thus the pollhead
* pointer out from underneath us.
*/
/*
* The bit should still be set.
*/
goto retry;
}
if (revent != 0) {
fdcnt++;
/*
* We clear a bit or cache a poll fd if
* the driver returns a poll head ptr,
* which is expected in the case of 0
* revents. Some buggy driver may return
* NULL php pointer with 0 revents. In
* this case, we just treat the driver as
* "noncachable" and not clearing the bit
* in bitmap.
*/
}
}
}
} else {
/*
* No bit set in the range. Check for wrap around.
*/
if (!no_wrap) {
start = 0;
} else {
}
}
}
if (!done) {
}
return (error);
}
/*ARGSUSED*/
static int
{
break;
}
}
dp_entry_t **newtbl;
/*
* Used up every entry in the existing devpoll table.
* Grow the table by DEVPOLLSIZE.
*/
return (ENXIO);
}
dptblsize += DEVPOLLSIZE;
}
devpolltbl = newtbl;
}
/*
* allocate a pollcache skeleton here. Delay allocating bitmap
* structures until dpwrite() time, since we don't know the
* optimal size yet.
*/
pcp = pcache_alloc();
return (0);
}
/*
* or change poll events for a watched fd.
*/
/*ARGSUSED*/
static int
{
int error;
int fd;
return (EACCES);
}
}
/*
* Copy in the pollfd array. Walk through the array and add
* each polled fd to the cached set.
*/
/*
* not supposed to function as a seekable device. To prevent offset
* from growing and eventually exceed the maximum, reset the offset
* here for every call.
*/
uiop->uio_loffset = 0;
!= 0) {
return (error);
}
/*
* We are about to enter the core portion of dpwrite(). Make sure this
* write has exclusive access in this portion of the code, i.e., no
* other writers in this code and no other readers in dpioctl.
*/
dpep->dpe_writerwait++;
while (dpep->dpe_refcnt != 0) {
dpep->dpe_writerwait--;
}
}
dpep->dpe_writerwait--;
dpep->dpe_refcnt++;
}
continue;
pdp = pcache_alloc_fd(0);
}
}
}
/*
* The fd is not valid. Since we can't pass
* this error back in the write() call, set
* the bit in bitmap to force DP_POLL ioctl
* to examine it.
*/
continue;
}
/*
* Don't do VOP_POLL for an already cached fd with
* same poll events.
*/
/*
* the events are already cached
*/
continue;
}
/*
* do VOP_POLL and cache this poll fd.
*/
/*
* XXX - pollrelock() logic needs to know which
* which pollcache lock to grab. It'd be a
* cleaner solution if we could pass pcp as
* an arguement in VOP_POLL interface instead
* of implicitly passing it using thread_t
* struct. On the other hand, changing VOP_POLL
* poll routine to change. May want to revisit
* the tradeoff later.
*/
/*
* We always set the bit when this fd is cached.
* So we don't have to worry about missing a
* pollwakeup between VOP_POLL and pollhead_insert.
* This forces the first DP_POLL to poll this fd.
* Real performance gain comes from subsequent
* DP_POLL.
*/
if (error != 0) {
break;
}
} else {
pdp);
}
}
}
} else {
continue;
}
}
}
}
dpep->dpe_refcnt--;
return (error);
}
/*ARGSUSED*/
static int
{
int timecheck = 0;
int error = 0;
/* do this now, before we sleep on DP_WRITER_PRESENT below */
gethrestime(&now);
}
return (EACCES);
(dpep->dpe_writerwait != 0)) {
return (EINTR);
}
}
dpep->dpe_refcnt++;
switch (cmd) {
case DP_POLL:
{
int fdcnt = 0;
int time_out;
int rval;
if (error) {
return (EFAULT);
}
if (time_out > 0) {
/*
* Determine the future time of the requested timeout.
*/
}
/*
* We are just using DP_POLL to sleep, so
* we don't any of the devpoll apparatus.
* Do not check for signals if we have a zero timeout.
*/
if (time_out == 0)
return (0);
continue;
}
/*
* XXX It'd be nice not to have to alloc each time.
* But it requires another per thread structure hook.
* Do it later if there is data suggest that.
*/
}
/*
* The maximum size should be no large than
* current maximum open file count.
*/
mutex_enter(&p->p_lock);
mutex_exit(&p->p_lock);
return (EINVAL);
}
mutex_exit(&p->p_lock);
ps->ps_dpbufsize);
}
for (;;) {
break;
/*
* A pollwake has happened since we polled cache.
*/
continue;
/*
* Sleep until we are notified, signalled, or timed out.
* Do not check for signals if we have a zero timeout.
*/
if (time_out == 0) /* immediate timeout */
break;
/*
* If we were awakened by a signal or timeout
* then break the loop, else poll again.
*/
if (rval <= 0) {
if (rval == 0) /* signal */
break;
}
}
return (EFAULT);
}
}
break;
}
case DP_ISPOLLED:
{
if (error) {
return (EFAULT);
}
/*
* No Need to search because no poll fd
* has been cached.
*/
return (0);
}
break;
}
return (EFAULT);
}
*rvalp = 1;
}
break;
}
default:
return (EINVAL);
}
return (error);
}
/*ARGSUSED*/
static int
{
/*
*/
return (0);
}
/*
* devpoll close should do enough clean up before the pollcache is deleted,
* i.e., it should ensure no one still references the pollcache later.
* There is no "permission" check in here. Any process having the last
*/
/*ARGSUSED*/
static int
{
int i;
/*
* At this point, no other lwp can access this pollcache via the
* up without the pc_lock.
*/
for (i = 0; i < pcp->pc_hashsize; i++) {
}
}
}
/*
* pollwakeup() may still interact with this pollcache. Wait until
* it is done.
*/
return (0);
}