fio.c revision b0f673c4626e4cb1db7785287eaeed2731dfefe8
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2012, Joyent Inc. All rights reserved.
*/
/* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
/* All Rights Reserved */
#include <sys/sysmacros.h>
#include <sys/pathname.h>
#include <sys/priocntl.h>
#include <sys/port_impl.h>
#ifdef DEBUG
#else /* DEBUG */
#define MAXFD(x)
#define COUNT(x)
#endif /* DEBUG */
static void port_close_fd(portfd_t *);
/*
* File descriptor allocation.
*
* fd_find(fip, minfd) finds the first available descriptor >= minfd.
* The most common case is open(2), in which minfd = 0, but we must also
* support fcntl(fd, F_DUPFD, minfd).
*
* The algorithm is as follows: we keep all file descriptors in an infix
* binary tree in which each node records the number of descriptors
* allocated in its right subtree, including itself. Starting at minfd,
* we ascend the tree until we find a non-fully allocated right subtree.
* We then descend that subtree in a binary search for the smallest fd.
* Finally, we ascend the tree again to increment the allocation count
* of every subtree containing the newly-allocated fd. Freeing an fd
* requires only the last step: we ascend the tree to decrement allocation
* counts. Each of these three steps (ascent to find non-full subtree,
* descent to find lowest fd, ascent to update allocation counts) is
* O(log n), thus the algorithm as a whole is O(log n).
*
* pointers, but instead take advantage of the glorious mathematics of
* full infix binary trees. For reference, here's an illustration of the
* logical structure of such a tree, rooted at 4 (binary 100), covering
* the range 1-7 (binary 001-111). Our canonical trees do not include
* fd 0; we'll deal with that later.
*
* 100
* / \
* / \
* 010 110
* / \ / \
* 001 011 101 111
*
* We make the following observations, all of which are easily proven by
* induction on the depth of the tree:
*
* (T1) The least-significant bit (LSB) of any node is equal to its level
* in the tree. In our example, nodes 001, 011, 101 and 111 are at
* level 0; nodes 010 and 110 are at level 1; and node 100 is at level 2.
*
* (T2) The child size (CSIZE) of node N -- that is, the total number of
* right-branch descendants in a child of node N, including itself -- is
* given by clearing all but the least significant bit of N. This
* follows immediately from (T1). Applying this rule to our example, we
* see that CSIZE(100) = 100, CSIZE(x10) = 10, and CSIZE(xx1) = 1.
*
* (T3) The nearest left ancestor (LPARENT) of node N -- that is, the nearest
* ancestor containing node N in its right child -- is given by clearing
* the LSB of N. For example, LPARENT(111) = 110 and LPARENT(110) = 100.
* Clearing the LSB of nodes 001, 010 or 100 yields zero, reflecting
* the fact that these are leftmost nodes. Note that this algorithm
* automatically skips generations as necessary. For example, the parent
* of node 101 is 110, which is a *right* ancestor (not what we want);
* but its grandparent is 100, which is a left ancestor. Clearing the LSB
* of 101 gets us to 100 directly, skipping right past the uninteresting
* generation (110).
*
* Note that since LPARENT clears the LSB, whereas CSIZE clears all *but*
* the LSB, we can express LPARENT() nicely in terms of CSIZE():
*
* LPARENT(N) = N - CSIZE(N)
*
* (T4) The nearest right ancestor (RPARENT) of node N is given by:
*
* RPARENT(N) = N + CSIZE(N)
*
* (T5) For every interior node, the children differ from their parent by
* CSIZE(parent) / 2. In our example, CSIZE(100) / 2 = 2 = 10 binary,
* and indeed, the children of 100 are 100 +/- 10 = 010 and 110.
*
* Next, we'll need a few two's-complement math tricks. Suppose a number,
* N, has the following form:
*
* N = xxxx10...0
*
* That is, the binary representation of N consists of some string of bits,
* then a 1, then all zeroes. This amounts to nothing more than saying that
* N has a least-significant bit, which is true for any N != 0. If we look
* at N and N - 1 together, we see that we can combine them in useful ways:
*
* N = xxxx10...0
* N - 1 = xxxx01...1
* ------------------------
* N & (N - 1) = xxxx000000
* N | (N - 1) = xxxx111111
* N ^ (N - 1) = 111111
*
* In particular, this suggests several easy ways to clear all but the LSB,
* which by (T2) is exactly what we need to determine CSIZE(N) = 10...0.
* We'll opt for this formulation:
*
* (C1) CSIZE(N) = (N - 1) ^ (N | (N - 1))
*
* Similarly, we have an easy way to determine LPARENT(N), which requires
* that we clear the LSB of N:
*
* (L1) LPARENT(N) = N & (N - 1)
*
* We note in the above relations that (N | (N - 1)) - N = CSIZE(N) - 1.
* When combined with (T4), this yields an easy way to compute RPARENT(N):
*
* (R1) RPARENT(N) = (N | (N - 1)) + 1
*
* Finally, to accommodate fd 0 we must adjust all of our results by +/-1 to
* move the fd range from [1, 2^n) to [0, 2^n - 1). This is straightforward,
* so there's no need to belabor the algebra; the revised relations become:
*
* (C1a) CSIZE(N) = N ^ (N | (N + 1))
*
* (L1a) LPARENT(N) = (N & (N + 1)) - 1
*
* (R1a) RPARENT(N) = N | (N + 1)
*
* This completes the mathematical framework. We now have all the tools
* we need to implement fd_find() and fd_reserve().
*
* fd_find(fip, minfd) finds the smallest available file descriptor >= minfd.
* It does not actually allocate the descriptor; that's done by fd_reserve().
* fd_find() proceeds in two steps:
*
* (1) Find the leftmost subtree that contains a descriptor >= minfd.
* We start at the right subtree rooted at minfd. If this subtree is
* not full -- if fip->fi_list[minfd].uf_alloc != CSIZE(minfd) -- then
* step 1 is done. Otherwise, we know that all fds in this subtree
* are taken, so we ascend to RPARENT(minfd) using (R1a). We repeat
* this process until we either find a candidate subtree or exceed
* fip->fi_nfiles. We use (C1a) to compute CSIZE().
*
* (2) Find the smallest fd in the subtree discovered by step 1.
* Starting at the root of this subtree, we descend to find the
* smallest available fd. Since the left children have the smaller
* fds, we will descend rightward only when the left child is full.
*
* We begin by comparing the number of allocated fds in the root
* to the number of allocated fds in its right child; if they differ
* by exactly CSIZE(child), we know the left subtree is full, so we
* descend right; that is, the right child becomes the search root.
* Otherwise we leave the root alone and start following the right
* child's left children. As fortune would have it, this is very
* simple computationally: by (T5), the right child of fd is just
* fd + size, where size = CSIZE(fd) / 2. Applying (T5) again,
* we find that the right child's left child is fd + size - (size / 2) =
* fd + (size / 2); *its* left child is fd + (size / 2) - (size / 4) =
* fd + (size / 4), and so on. In general, fd's right child's
* leftmost nth descendant is fd + (size >> n). Thus, to follow
* the right child's left descendants, we just halve the size in
* each iteration of the search.
*
* When we descend leftward, we must keep track of the number of fds
* that were allocated in all the right subtrees we rejected, so we
* know how many of the root fd's allocations are in the remaining
* (as yet unexplored) leftmost part of its right subtree. When we
* encounter a fully-allocated left child -- that is, when we find
* that fip->fi_list[fd].uf_alloc == ralloc + size -- we descend right
* (as described earlier), resetting ralloc to zero.
*
* fd_reserve(fip, fd, incr) either allocates or frees fd, depending
* on whether incr is 1 or -1. Starting at fd, fd_reserve() ascends
* the leftmost ancestors (see (T3)) and updates the allocation counts.
* At each step we use (L1a) to compute LPARENT(), the next left ancestor.
*
* flist_minsize() finds the minimal tree that still covers all
* used fds; as long as the allocation count of a root node is zero, we
* don't need that node or its right subtree.
*
* flist_nalloc() counts the number of allocated fds in the tree, by starting
* at the top of the tree and summing the right-subtree allocation counts as
* it descends leftwards.
*
* Note: we assume that flist_grow() will keep fip->fi_nfiles of the form
* 2^n - 1. This ensures that the fd trees are always full, which saves
* quite a bit of boundary checking.
*/
static int
{
continue;
ralloc = 0;
}
}
return (fd);
}
return (-1);
}
static void
{
int pfd;
}
static int
{
int fd;
/*
* We'd like to ASSERT(MUTEX_HELD(&fip->fi_lock)), but we're called
* by flist_fork(), which relies on other mechanisms for mutual
* exclusion.
*/
break;
return (fd);
}
static int
{
int fd;
int nalloc = 0;
return (nalloc);
}
/*
* Increase size of the fi_list array to accommodate at least maxfd.
* We keep the size of the form 2^n - 1 for benefit of fd_find().
*/
static void
flist_grow(int maxfd)
{
continue;
return;
}
/*
* fi_list and fi_nfiles cannot change while any uf_lock is held,
* so we must grab all the old locks *and* the new locks up to oldcnt.
* (Locks beyond the end of oldcnt aren't visible until we store
* the new fi_nfiles, which is the last thing we do before dropping
* all the locks, so there's no need to acquire these locks).
* Holding the new locks is necessary because when fi_list changes
* to point to the new list, fi_nfiles won't have been stored yet.
* If we *didn't* hold the new locks, someone doing a UF_ENTER()
* could see the new fi_list, grab the new uf_lock, and then see
* fi_nfiles change while the lock is held -- in violation of
* UF_ENTER() semantics.
*/
}
/*
* As soon as we store the new flist, future locking operations
* will use it. Therefore, we must ensure that all the state
* we've just established reaches global visibility before the
* new flist does.
*/
/*
* Routines like getf() make an optimistic check on the validity
* of the supplied file descriptor: if it's less than the current
* value of fi_nfiles -- examined without any locks -- then it's
* safe to attempt a UF_ENTER() on that fd (which is a valid
* assumption because fi_nfiles only increases). Therefore, it
* is critical that the new value of fi_nfiles not reach global
* visibility until after the new fi_list: if it happened the
* other way around, getf() could see the new fi_nfiles and attempt
* a UF_ENTER() on the old fi_list, which would write beyond its
* end if the fd exceeded the old fi_nfiles.
*/
/*
* The new state is consistent now, so we can drop all the locks.
*/
/*
* If any threads are blocked on the old cvs, wake them.
* This will force them to wake up, discover that fi_list
* has changed, and go back to sleep on the new cvs.
*/
}
/*
* Retire the old flist. We can't actually kmem_free() it now
* because someone may still have a pointer to it. Instead,
* we link it onto a list of retired flists. The new flist
* is at least double the size of the previous flist, so the
* total size of all retired flists will be less than the size
* of the current one (to prove, consider the sum of a geometric
* series in powers of 2). exit() frees the retired flists.
*/
}
/*
* Utility functions for keeping track of the active file descriptors.
*/
void
clear_stale_fd() /* called from post_syscall() */
{
int i;
/* uninitialized is ok here, a_nfd is then zero */
/* assert that this should not be necessary */
}
}
void
{
int i;
/* free the buffer if it was kmem_alloc()ed */
}
/* (re)initialize the structure */
}
static void
set_active_fd(int fd)
{
int i;
int *old_fd;
int old_nfd;
int *new_fd;
int new_nfd;
}
/* insert fd into vacant slot, if any */
return;
}
}
/*
* Reallocate the a_fd[] array to add one more slot.
*/
for (i = 0; i < old_nfd; i++)
}
}
void
{
int i;
break;
}
}
}
/*
* Does this thread have this fd active?
*/
static int
{
int i;
/* uninitialized is ok here, a_nfd is then zero */
return (1);
}
}
return (0);
}
/*
* Convert a user supplied file descriptor into a pointer to a file
* structure. Only task is to check range of the descriptor (soft
* resource limit was enforced at open time and shouldn't be checked
* here).
*/
file_t *
{
return (NULL);
/*
* Reserve a slot in the active fd array now so we can call
* set_active_fd(fd) for real below, while still inside UF_ENTER().
*/
set_active_fd(-1);
return (NULL);
}
return (fp);
}
/*
* Close whatever file currently occupies the file descriptor slot
* and install the new file, usually NULL, in the file descriptor slot.
* The close must complete before we release the file descriptor slot.
* If newfp != NULL we only return an error if we can't allocate the
* slot so the caller knows that it needs to free the filep;
* in the other cases we return the error number from closef().
*/
int
{
int error;
return (EBADF);
flist_grow(fd);
}
/*
* If ufp is reserved but has no file pointer, it's in the
* transition between ufalloc() and setf(). We must wait
* for this transition to complete before assigning the
* new non-NULL file pointer.
*/
return (EBADF);
}
}
return (0);
}
} else {
return (EBADF);
}
}
/*
* If the file descriptor reference count is non-zero, then
* some other lwp in the process is performing system call
* activity on the file. To avoid blocking here for a long
* time (the other lwp might be in a long term sleep in its
* system call), we scan all other lwps in the process to
* find the ones with this fd as one of their active fds,
* set their a_stale flag, and set them running if they
* are in an interruptible sleep so they will emerge from
* their system calls immediately. post_syscall() will
* test the a_stale flag and set errno to EBADF.
*/
kthread_t *t;
/*
* We call sprlock_proc(p) to ensure that the thread
* list will not change while we are scanning it.
* To do this, we must drop ufp->uf_lock and then
* reacquire it (so we are not holding both p->p_lock
* and ufp->uf_lock at the same time). ufp->uf_lock
* must be held for is_active_fd() to be correct
* (set_active_fd() is called while holding ufp->uf_lock).
*
* This is a convoluted dance, but it is better than
* the old brute-force method of stopping every thread
* in the process by calling holdlwps(SHOLDFORK1).
*/
mutex_enter(&p->p_lock);
sprlock_proc(p);
mutex_exit(&p->p_lock);
t != curthread;
t = t->t_forw) {
if (is_active_fd(t, fd)) {
thread_lock(t);
t->t_post_sys = 1;
if (ISWAKEABLE(t))
setrun_locked(t);
thread_unlock(t);
}
}
}
mutex_enter(&p->p_lock);
sprunlock(p);
}
/*
* Wait for other lwps to stop using this file descriptor.
*/
/*
* cv_wait_stop() drops ufp->uf_lock, so the file list
* can change. Drop the lock on our (possibly) stale
* ufp and let UF_ENTER() find and lock the current ufp.
*/
}
#ifdef DEBUG
/*
* catch a watchfd on device's pollhead list but not on fpollinfo list
*/
#endif /* DEBUG */
/*
* We may need to cleanup some cached poll states in t_pollstate
* before the fd can be reused. It is important that we don't
* access a stale thread structure. We will do the cleanup in two
* phases to avoid deadlock and holding uf_lock for too long.
* In phase 1, hold the uf_lock and call pollblockexit() to set
* state in t_pollstate struct so that a thread does not exit on
* us. In phase 2, we drop the uf_lock and call pollcacheclean().
*/
if (pfd)
/*
* Keep the file descriptor entry reserved across the closef().
*/
/* Only return closef() error when closing is all we do */
}
/*
* Decrement uf_refcnt; wakeup anyone waiting to close the file.
*/
void
{
}
/*
* Identical to releasef() but can be called from another process.
*/
void
{
}
/*
* Duplicate all file descriptors across a fork.
*/
void
{
/*
* We don't need to hold fi_lock because all other lwp's in the
* parent have been held.
*/
/*
* Grab locks to appease ASSERTs in fd_reserve
*/
}
}
}
}
/*
* Close all open file descriptors for the current process.
* This is only called from exit(), which is single-threaded,
* so we don't need any locking.
*/
void
{
int fd;
/* remove event port association */
}
}
}
}
}
/*
* Internal form of close. Decrement reference count on file
* structure. Decrement reference count on the vnode following
* removal of the referencing file structure.
*/
int
{
int error;
int count;
int flag;
/*
* audit close of file (may be exit)
*/
if (AU_AUDITING())
if (count > 1) {
return (error);
}
/*
* If DTrace has getf() subroutines active, it will set dtrace_closef
* to point to code that implements a barrier with respect to probe
* context. This must be called before the file_t is freed (and the
* vnode that it refers to is released) -- but it must be after the
* file_t has been removed from the uf_entry_t. That is, there must
* be no way for a racing getf() in probe context to yield the fp that
* we're operating upon.
*/
if (dtrace_closef != NULL)
(*dtrace_closef)();
/*
* deallocate resources to audit_data
*/
if (audit_active)
return (error);
}
/*
* This is a combination of ufalloc() and setf().
*/
int
{
int filelimit;
int nfiles;
int fd;
/*
* Assertion is to convince the correctness of the following
* assignment for filelimit after casting to int.
*/
for (;;) {
continue;
}
break;
mutex_enter(&p->p_lock);
mutex_exit(&p->p_lock);
return (-1);
}
/* fd_find() returned -1 */
}
return (fd);
}
/*
* Allocate a user file descriptor greater than or equal to "start".
*/
int
{
}
/*
* Check that a future allocation of count fds on proc p has a good
* chance of succeeding. If not, do rctl processing as if we'd failed
* the allocation.
*
* Our caller must guarantee that p cannot disappear underneath us.
*/
int
{
int filelimit;
int current;
if (count == 0)
return (1);
/*
* If count is a positive integer, the worst that can happen is
* an overflow to a negative value, which is caught by the >= 0 check.
*/
return (1);
mutex_enter(&p->p_lock);
mutex_exit(&p->p_lock);
return (0);
}
/*
* Allocate a user file descriptor and a file structure.
* Initialize the descriptor to point at the file structure.
* If fdp is NULL, the user file descriptor will not be allocated.
*/
int
{
int fd;
if (fdp) {
return (EMFILE);
}
/*
* Note: falloc returns the fp locked
*/
fp->f_audit_data = 0;
/*
* allocate resources to audit_data
*/
if (audit_active)
if (fdp)
return (0);
}
/*ARGSUSED*/
static int
{
return (0);
}
/*ARGSUSED*/
static void
{
}
void
finit()
{
}
void
{
/*
* deallocate resources to audit_data
*/
if (audit_active)
} else
}
/*
* Given a file descriptor, set the user's
* file pointer to the given parameter.
*/
void
{
if (AU_AUDITING())
} else {
}
}
/*
* Given a file descriptor, return the file table flags, plus,
* if this is a socket in asynchronous mode, the FASYNC flag.
* getf() may or may not have been called before calling f_getfl().
*/
int
{
int error;
else {
else {
/*
* BSD fcntl() FASYNC compatibility.
*/
error = 0;
}
}
return (error);
}
/*
* Given a file descriptor, return the user's file flags.
* Force the FD_CLOEXEC flag for writable self-open /proc files.
* getf() may or may not have been called before calling f_getfd_error().
*/
int
{
int flag;
int error;
else {
else {
flag |= FD_CLOEXEC;
error = 0;
}
}
return (error);
}
/*
* getf() must have been called before calling f_getfd().
*/
char
{
int flag = 0;
return ((char)flag);
}
/*
* Given a file descriptor and file flags, set the user's file flags.
* At present, the only valid flag is FD_CLOEXEC.
* getf() may or may not have been called before calling f_setfd_error().
*/
int
{
int error;
else {
else {
error = 0;
}
}
return (error);
}
void
{
}
#define BADFD_MIN 3
#define BADFD_MAX 255
/*
* Attempt to allocate a file descriptor which is bad and which
* is "poison" to the application. It cannot be closed (except
* on exec), allocated for a different use, etc.
*/
int
{
int fdr;
int badfd;
#ifdef _LP64
/* No restrictions on 64 bit _file */
if (get_udatamodel() != DATAMODEL_ILP32)
return (EINVAL);
#endif
return (EINVAL);
return (EINVAL);
if (badfd != -1)
return (EAGAIN);
return (EMFILE);
}
if (fdr < 0)
return (EMFILE);
/* Lost race */
return (EAGAIN);
}
return (0);
}
/*
* Allocate a file descriptor and assign it to the vnode "*vpp",
* performing the usual open protocol upon it and returning the
* file descriptor allocated. It is the responsibility of the
* caller to dispose of "*vpp" if any error occurs.
*/
int
{
int error;
int fd;
return (error);
return (error);
}
/*
* Fill in the slot falloc reserved.
*/
return (0);
}
/*
* When a process forks it must increment the f_count of all file pointers
* since there is a new process pointing at them. fcnt_add(fip, 1) does this.
* Since we are called when there is only 1 active lwp we don't need to
* hold fi_lock or any uf_lock. If the fork fails, fork_fail() calls
* fcnt_add(fip, -1) to restore the counts.
*/
void
{
int i;
}
}
}
/*
* This is called from exec to close all fd's that have the FD_CLOEXEC flag
* set and also to close all self-open for write /proc file descriptors.
*/
void
{
int fd;
/*
* We may need to cleanup some cached poll states
* in t_pollstate before the fd can be reused. It
* is important that we don't access a stale thread
* structure. We will do the cleanup in two
* phases to avoid deadlock and holding uf_lock for
* too long. In phase 1, hold the uf_lock and call
* pollblockexit() to set state in t_pollstate struct
* so that a thread does not exit on us. In phase 2,
* we drop the uf_lock and call pollcacheclean().
*/
if (pfd)
}
}
/* Reset bad fd */
}
/*
* Utility function called by most of the *at() system call interfaces.
*
* Generate a starting vnode pointer for an (fd, path) pair where 'fd'
* is an open file descriptor for a directory to be used as the starting
* point for the lookup of the relative pathname 'path' (or, if path is
* NULL, generate a vnode pointer for the direct target of the operation).
*
* If we successfully return a non-NULL startvp, it has been the target
* of VN_HOLD() and the caller must call VN_RELE() on it.
*/
int
{
char startchar;
return (EFAULT);
/*
* Start from the current working directory.
*/
} else {
startchar = '\0';
return (EFAULT);
if (startchar == '/') {
/*
* 'path' is an absolute pathname.
*/
} else {
/*
* 'path' is a relative pathname or we will
* be applying the operation to 'fd' itself.
*/
return (EBADF);
}
}
return (0);
}
/*
* Called from fchownat() and fchmodat() to set ownership and mode.
* The contents of *vap must be set before calling here.
*/
int
{
int error;
/*
* Since we are never called to set the size of a file, we don't
* need to check for non-blocking locks (via nbl_need_check(vp)).
*/
return (error);
/*
*/
(flags == AT_SYMLINK_NOFOLLOW) ?
return (error);
}
} else {
}
if (vn_is_readonly(vp)) {
} else {
}
return (error);
}
/*
* Return true if the given vnode is referenced by any
* entry in the current process's file descriptor table.
*/
int
{
int fd;
return (1);
}
}
return (0);
}
/*
* Return zero if at least one file currently open (by curproc) shouldn't be
* allowed to change zones.
*/
int
files_can_change_zones(void)
{
int fd;
return (0);
}
}
return (1);
}
#ifdef DEBUG
/*
* The following functions are only used in ASSERT()s elsewhere.
* They do not modify the state of the system.
*/
/*
* Return true (1) if the current thread is in the fpollinfo
* list for this file descriptor, else false (0).
*/
static int
{
return (1);
return (0);
}
/*
* Sanity check to make sure that after lwp_exit(),
* curthread does not appear on any fd's fpollinfo list.
*/
void
checkfpollinfo(void)
{
int fd;
}
}
/*
* Return true (1) if the current thread is in the fpollinfo
* list for this file descriptor, else false (0).
* This is the same as curthread_in_plist(),
* but is called w/o holding uf_lock.
*/
int
infpollinfo(int fd)
{
int rc;
return (rc);
}
#endif /* DEBUG */
/*
* Add the curthread to fpollinfo list, meaning this fd is currently in the
* thread's poll cache. Each lwp polling this file descriptor should call
* this routine once.
*/
void
addfpollinfo(int fd)
{
/*
* Assert we are not already on the list, that is, that
* this lwp did not call addfpollinfo twice for the same fd.
*/
/*
*/
}
/*
* Delete curthread from fpollinfo list if it is there.
*/
void
delfpollinfo(int fd)
{
break;
}
}
/*
* Assert that we are not still on the list, that is, that
* this lwp did not call addfpollinfo twice for the same fd.
*/
}
/*
* fd is associated with a port. pfd is a pointer to the fd entry in the
* cache of the port.
*/
void
{
/*
*/
/* first entry */
} else {
}
}
void
{
/*
*/
/* remove first entry */
} else {
}
}
static void
{
/*
* At this point, no other thread should access
* the portfd_t list for this fd. The uf_file, uf_portfd
* pointers in the uf_entry_t struct for this fd would
* be set to NULL.
*/
}
}