/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
*
* Copyright (c) 1983,1984,1985,1986,1987,1988,1989 AT&T.
* All rights reserved.
*/
#include <sys/sysmacros.h>
#include <nfs/nfs_clnt.h>
cred_t *);
static int nfs_remove_locking_id(vnode_t *, int, char *, char *, int *);
struct mi_globals {
};
/* Debugging flag for PC file shares. */
extern int share_debug;
/*
* Attributes caching:
*
* Attributes are cached in the rnode in struct vattr form.
* There is a time associated with the cached attributes (r_attrtime)
* which tells whether the attributes are valid. The time is initialized
* to the difference between current time and the modify time of the vnode
* when new attributes are cached. This allows the attributes for
* files that have changed recently to be timed out sooner than for files
* that have not changed for a long time. There are minimum and maximum
* timeout values that can be set per mount point.
*/
int
{
return (EINTR);
}
}
}
return (0);
}
/*
* Validate caches by checking cached attributes. If the cached
* attributes have timed out, then get new attributes from the server.
* As a side affect, this will do cache invalidation if the attributes
* have changed.
*
* If the attributes have not timed out and if there is a cache
* invalidation being done by some other thread, then wait until that
* thread has completed the cache invalidation.
*/
int
{
int error;
if (ATTRCACHE_VALID(vp)) {
if (error)
return (error);
return (0);
}
}
/*
* Validate caches by checking cached attributes. If the cached
* attributes have timed out, then get new attributes from the server.
* As a side affect, this will do cache invalidation if the attributes
* have changed.
*
* If the attributes have not timed out and if there is a cache
* invalidation being done by some other thread, then wait until that
* thread has completed the cache invalidation.
*/
int
{
int error;
if (ATTRCACHE_VALID(vp)) {
if (error)
return (error);
return (0);
}
}
/*
* Purge all of the various NFS `data' caches.
*/
void
{
char *contents;
int size;
int error;
/*
* Purge the DNLC for any entries which refer to this file.
* Avoid recursive entry into dnlc_purge_vp() in case of a directory.
*/
/*
* Set the RINDNLCPURGE flag to prevent recursive entry
* into dnlc_purge_vp()
*/
}
/*
* Clear any readdir state bits and purge the readlink response cache.
*/
}
/*
* Flush the page cache.
*/
if (vn_has_cached_data(vp)) {
}
}
/*
* Flush the readdir response cache.
*/
if (HAVE_RDDIR_CACHE(rp))
}
/*
* Purge the readdir cache of all entries
*/
void
{
top:
}
}
/*
* Do a cache check based on the post-operation attributes.
* Then make them the new cached attributes. If no attributes
* were returned, then mark the attributes as timed out.
*/
void
{
if (!poap->attributes) {
return;
}
}
/*
* Same as above, but using a vattr
*/
void
{
if (!poap->attributes) {
return;
}
}
/*
* Do a cache check based on the weak cache consistency attributes.
* These consist of a small set of pre-operation attributes and the
* full set of post-operation attributes.
*
* If we are given the pre-operation attributes, then use them to
* check the validity of the various caches. Then, if we got the
* post-operation attributes, make them the new cached attributes.
* If we didn't get the post-operation attributes, then mark the
* attribute cache as timed out so that the next reference will
* cause a GETATTR to the server to refresh with the current
* attributes.
*
* Otherwise, if we didn't get the pre-operation attributes, but
* we did get the post-operation attributes, then use these
* attributes to check the validity of the various caches. This
* will probably cause a flush of the caches because if the
* operation succeeded, the attributes of the object were changed
* in some way from the old post-operation attributes. This
* should be okay because it is the safe thing to do. After
* checking the data caches, then we make these the new cached
* attributes.
*
* Otherwise, we didn't get either the pre- or post-operation
* attributes. Simply mark the attribute cache as timed out so
* the next reference will cause a GETATTR to the server to
* refresh with the current attributes.
*
* If an error occurred trying to convert the over the wire
* attributes to a vattr, then simply mark the attribute cache as
* timed out.
*/
void
{
return;
}
} else
} else {
}
}
/*
* Set attributes cache for given vnode using nfsattr.
*
* This routine does not do cache validation with the attributes.
*
* If an error occurred trying to convert the over the wire
* attributes to a vattr, then simply mark the attribute cache as
* timed out.
*/
void
{
} else {
}
}
/*
* Set attributes cache for given vnode using fattr3.
*
* This routine does not do cache validation with the attributes.
*
* If an error occurred trying to convert the over the wire
* attributes to a vattr, then simply mark the attribute cache as
* timed out.
*/
void
{
} else {
}
}
/*
* Do a cache check based on attributes returned over the wire. The
* new attributes are cached.
*
* If an error occurred trying to convert the over the wire attributes
* to a vattr, then just return that error.
*
* As a side affect, the vattr argument is filled in with the converted
* attributes.
*/
int
{
int error;
if (error)
return (error);
return (0);
}
/*
* Do a cache check based on attributes returned over the wire. The
* new attributes are cached.
*
* If an error occurred trying to convert the over the wire attributes
* to a vattr, then just return that error.
*
* As a side affect, the vattr argument is filled in with the converted
* attributes.
*/
int
{
int error;
if (error)
return (error);
return (0);
}
/*
* Use the passed in virtual attributes to check to see whether the
* data and metadata caches are valid, cache the new attributes, and
* then do the cache invalidation if required.
*
* The cache validation and caching of the new attributes is done
* atomically via the use of the mutex, r_statelock. If required,
* the cache invalidation is done atomically w.r.t. the cache
* validation and caching of the attributes via the pseudo lock,
* r_serial.
*
* This routine is used to do cache validation and attributes caching
* for operations with a single set of post operation attributes.
*/
void
{
int mtime_changed = 0;
int ctime_changed = 0;
int was_serial;
was_serial = 0;
lwp->lwp_nostop++;
lwp->lwp_nostop--;
return;
}
}
lwp->lwp_nostop--;
} else
was_serial = 1;
return;
}
/*
* Write thread after writing data to file on remote server,
* will always set RWRITEATTR to indicate that file on remote
* server was modified with a WRITE operation and would have
* marked attribute cache as timed out. If RWRITEATTR
* is set, then do not check for mtime and ctime change.
*/
mtime_changed = 1;
ctime_changed = 1;
} else {
}
/*
* If we have updated filesize in nfs_attrcache_va, as soon as we
* drop statelock we will be in transition of purging all
* our caches and updating them. It is possible for another
* thread to pick this new file size and read in zeroed data.
* stall other threads till cache purge is complete.
*/
/*
* If RWRITEATTR was set and we have updated the file
* size, Server's returned file size need not necessarily
* be because of this Client's WRITE. We need to purge
* all caches.
*/
if (writeattr_set)
mtime_changed = 1;
}
}
if (!mtime_changed && !ctime_changed) {
return;
}
if (mtime_changed)
}
if (ctime_changed) {
(void) nfs_access_purge_rp(rp);
}
}
if (!was_serial) {
}
}
/*
* Use the passed in "before" virtual attributes to check to see
* whether the data and metadata caches are valid, cache the "after"
* new attributes, and then do the cache invalidation if required.
*
* The cache validation and caching of the new attributes is done
* atomically via the use of the mutex, r_statelock. If required,
* the cache invalidation is done atomically w.r.t. the cache
* validation and caching of the attributes via the pseudo lock,
* r_serial.
*
* This routine is used to do cache validation and attributes caching
* for operations with both pre operation attributes and post operation
* attributes.
*/
static void
{
int mtime_changed = 0;
int ctime_changed = 0;
int was_serial;
was_serial = 0;
lwp->lwp_nostop++;
lwp->lwp_nostop--;
return;
}
}
lwp->lwp_nostop--;
} else
was_serial = 1;
return;
}
/*
* Write thread after writing data to file on remote server,
* will always set RWRITEATTR to indicate that file on remote
* server was modified with a WRITE operation and would have
* marked attribute cache as timed out. If RWRITEATTR
* is set, then do not check for mtime and ctime change.
*/
mtime_changed = 1;
ctime_changed = 1;
} else {
}
/*
* If we have updated filesize in nfs_attrcache_va, as soon as we
* drop statelock we will be in transition of purging all
* our caches and updating them. It is possible for another
* thread to pick this new file size and read in zeroed data.
* stall other threads till cache purge is complete.
*/
/*
* If RWRITEATTR was set and we have updated the file
* size, Server's returned file size need not necessarily
* be because of this Client's WRITE. We need to purge
* all caches.
*/
if (writeattr_set)
mtime_changed = 1;
}
}
if (!mtime_changed && !ctime_changed) {
return;
}
if (mtime_changed)
}
if (ctime_changed) {
(void) nfs_access_purge_rp(rp);
}
}
if (!was_serial) {
}
}
/*
* Set attributes cache for given vnode using virtual attributes.
*
* Set the timeout value on the attribute cache and fill it
* with the passed in attributes.
*
* The caller must be holding r_statelock.
*/
void
{
/*
* Delta is the number of nanoseconds that we will
* cache the attributes of the file. It is based on
* the number of nanoseconds since the last time that
* we detected a change. The assumption is that files
* that changed recently are likely to change again.
* There is a minimum and a maximum for regular files
* and for directories which is enforced though.
*
* Using the time since last change was detected
* eliminates direct comparison or calculation
* using mixed client and server times. NFS does
* not make any assumptions regarding the client
* and server clocks being synchronized.
*/
delta = 0;
else {
} else {
}
}
/*
* Update the size of the file if there is no cached data or if
* the cached data is clean and there is no data being written
* out.
*/
(!vn_has_cached_data(vp) ||
}
/*
* Fill in attribute from the cache.
* If valid, then return 0 to indicate that no error occurred,
* otherwise return 1 to indicate that an error occurred.
*/
static int
{
if (ATTRCACHE_VALID(vp)) {
/*
* Cached attributes are valid
*/
/*
* Set the caller's va_mask to the set of attributes
* that were requested ANDed with the attributes that
* are available. If attributes were requested that
* are not available, those bits must be turned off
* in the callers va_mask.
*/
return (0);
}
return (1);
}
/*
* Get attributes over-the-wire and update attributes cache
* if no error occurred in the over-the-wire operation.
* Return 0 if successful, otherwise error.
*/
int
{
int error;
int douprintf;
hrtime_t t;
return (error);
}
douprintf = 1;
t = gethrtime();
if (!error) {
if (!error)
else {
}
}
return (error);
}
/*
* Return either cached ot remote attributes. If get remote attr
* use them to check and invalidate caches, then cache the new attributes.
*/
int
{
int error;
/*
* If we've got cached attributes, we're done, otherwise go
* to the server to get attributes, which will update the cache
* in the process.
*/
if (error)
/* Return the client's view of file size */
return (error);
}
/*
* Get attributes over-the-wire and update attributes cache
* if no error occurred in the over-the-wire operation.
* Return 0 if successful, otherwise error.
*/
int
{
int error;
int douprintf;
hrtime_t t;
douprintf = 1;
t = gethrtime();
if (error)
return (error);
if (error) {
return (error);
}
/*
* Catch status codes that indicate fattr3 to vattr translation failure
*/
return (0);
}
/*
* Return either cached or remote attributes. If get remote attr
* use them to check and invalidate caches, then cache the new attributes.
*/
int
{
int error;
/*
* If we've got cached attributes, we're done, otherwise go
* to the server to get attributes, which will update the cache
* in the process.
*/
if (error)
/* Return the client's view of file size */
return (error);
}
};
/*
* Convert NFS Version 2 over the network attributes to the local
* virtual attributes. The mapping between the UID_NOBODY/GID_NOBODY
* network representation and the local representation is done here.
* Returns 0 for success, error if failed due to overflow.
*/
int
{
/* overflow in time attributes? */
#ifndef _LP64
if (!NFS2_FATTR_TIME_OK(na))
return (EOVERFLOW);
#endif
else
/*
* nfs protocol defines times as unsigned so don't extend sign,
* unless sysadmin set nfs_allow_preepoch_time.
*/
/*
* Shannon's law - uncompress the received dev_t
* if the top half of is zero indicating a response
* from an `older style' OS. Except for when it is a
* `new style' OS sending the maj device of zero,
* in which case the algorithm still works because the
* fact that it is a new style server
* is hidden by the minor device not being greater
* than 255 (a requirement in this case).
*/
else
case NFBLK:
break;
case NFCHR:
break;
case NFSOC:
default:
break;
}
/*
* This bit of ugliness is a hack to preserve the
* over-the-wire protocols for named-pipe vnodes.
* It remaps the special over-the-wire type to the
* VFIFO type. (see note in nfs.h)
*/
}
return (0);
}
/*
* Convert NFS Version 3 over the network attributes to the local
* virtual attributes. The mapping between the UID_NOBODY/GID_NOBODY
* network representation and the local representation is done here.
*/
};
int
{
#ifndef _LP64
/* overflow in time attributes? */
if (!NFS3_FATTR_TIME_OK(na))
return (EOVERFLOW);
#endif
/* file too big */
return (EFBIG);
else
/*
* nfs protocol defines times as unsigned so don't extend sign,
* unless sysadmin set nfs_allow_preepoch_time.
*/
case NF3BLK:
vap->va_nblocks = 0;
break;
case NF3CHR:
vap->va_nblocks = 0;
break;
case NF3REG:
case NF3DIR:
case NF3LNK:
break;
case NF3SOCK:
case NF3FIFO:
default:
vap->va_nblocks = 0;
break;
}
return (0);
}
/*
* Asynchronous I/O parameters. nfs_async_threads is the high-water mark
* for the demand-based allocation of async threads per-mount. The
* nfs_async_timeout is the amount of time a thread will live after it
* becomes idle, unless new I/O requests are received before the thread
* dies. See nfs_async_putpage and nfs_async_start.
*/
static void nfs_async_start(struct vfs *);
static void nfs_async_pgops_start(struct vfs *);
static void nfs_async_common_start(struct vfs *, int);
static void
{
}
}
/*
* Cross-zone thread creation and NFS access is disallowed, yet fsflush() and
* pageout(), running in the global zone, have legitimate reasons to do
* VOP_PUTPAGE(B_ASYNC) on other zones' NFS mounts. We avoid the problem by
* use of a a per-mount "asynchronous requests manager thread" which is
* signaled by the various asynchronous work routines when there is
* asynchronous work to be done. It is responsible for creating new
* worker threads if necessary, and notifying existing worker threads
* that there is work to be done.
*
* In other words, it will "take the specifications from the customers and
* give them to the engineers."
*
* Worker threads die off of their own accord if they are no longer
* needed.
*
* This thread is killed when the zone is going away or the filesystem
* is being unmounted.
*/
void
{
"nfs_async_manager");
/*
* We want to stash the max number of threads that this mount was
* allowed so we can use it later when the variable is set to zero as
*
* We want to be able to create at least one thread to handle
* asynchronous inactive calls.
*/
/*
* We don't want to wait for mi_max_threads to go to zero, since that
* happens as part of a failed unmount, but this thread should only
*
* Once MI_ASYNC_MGR_STOP is set, no more async operations will be
* attempted: the various _async_*() functions know to do things
* inline if mi_max_threads == 0. Henceforth we just drain out the
* outstanding requests.
*
* Note that we still create zthreads even if we notice the zone is
* shutting down (MI_ASYNC_MGR_STOP is set); this may cause the zone
* shutdown sequence to take slightly longer in some cases, but
* doesn't violate the protocol, as all threads will exit as soon as
* they're done processing the remaining requests.
*/
for (;;) {
while (mi->mi_async_req_count > 0) {
/*
* Paranoia: If the mount started out having
* (mi->mi_max_threads == 0), and the value was
* later changed (via a debugger or somesuch),
* we could be confused since we will think we
* can't create any threads, and the calling
* code (which looks at the current value of
* mi->mi_max_threads, now non-zero) thinks we
* can.
*
* So, because we're paranoid, we create threads
* up to the maximum of the original and the
* current value. This means that future
* (debugger-induced) lowerings of
* mi->mi_max_threads are ignored for our
* purposes, but who told them they could change
* random values on a live kernel anyhow?
*/
vfsp, 0, minclsyspri);
(void) zthread_create(NULL, 0,
}
mi->mi_async_req_count--;
}
break;
}
}
/*
* Let everyone know we're done.
*/
/*
* There is no explicit call to mutex_exit(&mi->mi_async_lock)
* since CALLB_CPR_EXIT is actually responsible for releasing
* 'mi_async_lock'.
*/
zthread_exit();
}
/*
* Signal (and wait for) the async manager thread to clean up and go away.
*/
void
{
}
int
{
/*
* If addr falls in a different segment, don't bother doing readahead.
*/
return (-1);
/*
* If we can't allocate a request structure, punt on the readahead.
*/
return (-1);
/*
* If a lock operation is pending, don't initiate any new
* readaheads. Otherwise, bump r_count to indicate the new
* asynchronous I/O.
*/
return (-1);
}
#ifdef DEBUG
#endif
/*
* If asyncio has been disabled, don't bother readahead.
*/
if (mi->mi_max_threads == 0) {
goto noasync;
}
/*
* Link request structure into the async list and
* wakeup async thread to do the i/o.
*/
} else {
}
if (mi->mi_io_kstats) {
}
mi->mi_async_req_count++;
return (0);
return (-1);
}
int
{
/*
* If we can't allocate a request structure, do the putpage
* operation synchronously in this thread's context.
*/
goto noasync;
#ifdef DEBUG
#endif
/*
* If asyncio has been disabled, then make a synchronous request.
* This check is done a second time in case async io was diabled
* while this thread was blocked waiting for memory pressure to
* reduce or for the queue to drain.
*/
if (mi->mi_max_threads == 0) {
goto noasync;
}
/*
* Link request structure into the async list and
* wakeup async thread to do the i/o.
*/
} else {
}
if (mi->mi_io_kstats) {
}
mi->mi_async_req_count++;
return (0);
}
/*
* we refuse to do a sync write, because this may hang
* pageout (and the machine). In this case, we just
* re-mark the page as dirty and punt on the page.
*
* Make sure B_FORCE isn't set. We can re-mark the
* pages as dirty and unlock the pages in one swoop by
* passing in B_ERROR to pvn_write_done(). However,
* we should make sure B_FORCE isn't set - we don't
* want the page tossed before it gets written out.
*/
return (0);
}
/*
* So this was a cross-zone sync putpage. We pass in B_ERROR
* to pvn_write_done() to re-mark the pages as dirty and unlock
* them.
*
* We don't want to clear B_FORCE here as the caller presumably
* knows what they're doing if they set it.
*/
return (EPERM);
}
}
int
{
/*
* If we can't allocate a request structure, do the pageio
* request synchronously in this thread's context.
*/
goto noasync;
#ifdef DEBUG
#endif
/*
* If asyncio has been disabled, then make a synchronous request.
* This check is done a second time in case async io was diabled
* while this thread was blocked waiting for memory pressure to
* reduce or for the queue to drain.
*/
if (mi->mi_max_threads == 0) {
goto noasync;
}
/*
* Link request structure into the async list and
* wakeup async thread to do the i/o.
*/
} else {
}
if (mi->mi_io_kstats) {
}
mi->mi_async_req_count++;
return (0);
}
/*
* If we can't do it ASYNC, for reads we do nothing (but cleanup
* the page list), for writes we do it synchronously, except for
* proc_pageout/proc_fsflush as described below.
*/
return (0);
}
/*
* we refuse to do a sync write, because this may hang
* re-mark the page as dirty and punt on the page.
*
* Make sure B_FORCE isn't set. We can re-mark the
* pages as dirty and unlock the pages in one swoop by
* passing in B_ERROR to pvn_write_done(). However,
* we should make sure B_FORCE isn't set - we don't
* want the page tossed before it gets written out.
*/
return (0);
}
/*
* So this was a cross-zone sync pageio. We pass in B_ERROR
* to pvn_write_done() to re-mark the pages as dirty and unlock
* them.
*
* We don't want to clear B_FORCE here as the caller presumably
* knows what they're doing if they set it.
*/
return (EPERM);
}
}
void
{
/*
* If we can't allocate a request structure, do the readdir
* operation synchronously in this thread's context.
*/
goto noasync;
#ifdef DEBUG
#endif
/*
* If asyncio has been disabled, then make a synchronous request.
*/
if (mi->mi_max_threads == 0) {
goto noasync;
}
/*
* Link request structure into the async list and
* wakeup async thread to do the i/o.
*/
} else {
}
if (mi->mi_io_kstats) {
}
mi->mi_async_req_count++;
return;
}
/*
* Check the flag to see if RDDIRWAIT is set. If RDDIRWAIT
* is set, wakeup the thread sleeping in cv_wait_sig().
* The woken up thread will reset the flag to RDDIR and will
* continue with the readdir opeartion.
*/
}
}
void
cred_t *))
{
/*
* If we can't allocate a request structure, do the commit
* operation synchronously in this thread's context.
*/
goto noasync;
#ifdef DEBUG
#endif
/*
* If asyncio has been disabled, then make a synchronous request.
* This check is done a second time in case async io was diabled
* while this thread was blocked waiting for memory pressure to
* reduce or for the queue to drain.
*/
if (mi->mi_max_threads == 0) {
goto noasync;
}
/*
* Link request structure into the async list and
* wakeup async thread to do the i/o.
*/
} else {
}
if (mi->mi_io_kstats) {
}
mi->mi_async_req_count++;
return;
}
}
return;
}
}
void
{
#ifdef DEBUG
#endif
/*
* Note that we don't check mi->mi_max_threads here, since we
* *need* to get rid of this vnode regardless of whether someone
* set nfs3_max_threads/nfs_max_threads to zero in /etc/system.
*
* The manager thread knows about this and is willing to create
* at least one thread to accommodate us.
*/
/*
* We can't do an over-the-wire call since we're in the wrong
* zone, so we need to clean up state as best we can and then
* throw away the vnode.
*/
char *unlname;
} else {
}
/*
* No need to explicitly throw away any cached pages. The
* eventual rinactive() will attempt a synchronous
* VOP_PUTPAGE() which will immediately fail since the request
* is coming from the wrong zone, and then will proceed to call
* nfs_invalidate_pages() which will clean things up for us.
*/
return;
}
} else {
}
/*
* Don't increment r_count, since we're trying to get rid of the vnode.
*/
mi->mi_async_req_count++;
}
static void
{
}
static void
{
}
/*
* The async queues for each mounted file system are arranged as a
* set of queues, one for each async i/o type. Requests are taken
* from the queues in a round-robin fashion. A number of consecutive
* requests are taken from each queue before moving on to the next
* queue. This functionality may allow the NFS Version 2 server to do
* write clustering, even if the client is mixing writes and reads
* because it will take multiple write requests from the queue
* before processing any of the other async i/o types.
*
* XXX The nfs_async_common_start thread is unsafe in the light of the present
* model defined by cpr to suspend the system. Specifically over the
* wire calls are cpr-unsafe. The thread should be reevaluated in
* case of future updates to the cpr model.
*/
static void
{
int i;
int async_types;
if (async_queue == NFS_ASYNC_QUEUE) {
} else {
}
/*
* Dynamic initialization of nfs_async_timeout to allow nfs to be
* built in an implementation independent manner.
*/
if (nfs_async_timeout == -1)
for (;;) {
/*
* Find the next queue containing an entry. We start
* at the current queue pointer and then round robin
* through all of them until we either find a non-empty
* queue or have looked through all of them.
*/
for (i = 0; i < async_types; i++) {
break;
&mi->mi_async_reqs[0];
}
}
/*
* If we didn't find a entry, then block until woken up
* again and then look through the queues again.
*/
/*
* Exiting is considered to be safe for CPR as well
*/
/*
* Wakeup thread waiting to unmount the file
* system only if all async threads are inactive.
*
* If we've timed-out and there's nothing to do,
* then get rid of this thread.
*/
zthread_exit();
/* NOTREACHED */
}
continue;
}
time_left = 1;
/*
* Remove the request from the async queue and then
* update the current async request queue pointer. If
* the current queue is empty or we have removed enough
* consecutive entries from it, then reset the counter
* for this queue and then move the current pointer to
* the next queue.
*/
&mi->mi_async_reqs[0];
}
}
}
/*
* Obtain arguments from the async request structure.
*/
}
/*
* Now, release the vnode and free the credentials
* structure.
*/
/*
* Reacquire the mutex because it will be needed above.
*/
}
}
void
{
/*
* Wait for all outstanding async operations to complete and for the
* worker threads to exit.
*/
mi->mi_max_threads = 0;
}
/*
* nfs_async_stop_sig:
* Wait for all outstanding putpage operation to complete. If a signal
* is deliver we will abort and return non-zero. If we can put all the
* pages we will return 0. This routine is called from nfs_unmount and
* nfs3_unmount to make these operations interruptible.
*/
int
{
int rval;
/*
* Wait for all outstanding async operations to complete and for the
* worker threads to exit.
*/
mi->mi_max_threads = 0;
/*
* Tell all the worker threads to exit.
*/
break;
}
if (rval)
return (rval);
}
int
{
int pagecreate;
int n;
int saved_n;
int error;
int sm_error;
if (!vpm_enable) {
}
/*
* Move bytes in at most PAGESIZE chunks. We must avoid
* spanning pages in uiomove() because page faults may cause
* the cache to be invalidated out from under us. The r_size is not
* updated until after the uiomove. If we push the last page of a
* file before r_size is correct, we will lose the data written past
* the current (and invalid) r_size.
*/
do {
pagecreate = 0;
/*
* n is the number of bytes required to satisfy the request
* or the number of bytes to fill out the page.
*/
/*
* Check to see if we can skip reading in the page
* and just allocate the memory. We can do this
* if we are going to rewrite the entire mapping
* or if we are going to write to or beyond the current
* end of file from the beginning of the mapping.
*
* The read of r_size is now protected by r_statelock.
*/
/*
* When pgcreated is nonzero the caller has already done
* a segmap_getmapflt with forcefault 0 and S_WRITE. With
* segkpm this means we already have at least one page
* created and mapped at base.
*/
pagecreate = pgcreated ||
((offset & PAGEOFFSET) == 0 &&
if (!vpm_enable && pagecreate) {
/*
* The last argument tells segmap_pagecreate() to
* always lock the page, as opposed to sometimes
* returning with the page locked. This way we avoid a
* fault on the ensuing uiomove(), but also
* more importantly (to fix bug 1094402) we can
* call segmap_fault() to unlock the page in all
* cases. An alternative would be to modify
* segmap_pagecreate() to tell us when it is
* locking a page, but that's a fairly major
* interface change.
*/
if (pgcreated == 0)
(uint_t)n, 1);
saved_base = base;
saved_n = n;
}
/*
* The number of bytes of data in the last page can not
* be accurately be determined while page is being
* uiomove'd to and the size of the file being updated.
* Thus, inform threads which need to know accurately
* how much data is in the last page of the file. They
* will not do the i/o immediately, but will arrange for
* the i/o to happen later when this modify operation
* will have finished.
*/
if (vpm_enable) {
/*
* Copy data. If new pages are created, part of
* the page that is not written will be initizliazed
* with zeros.
*/
} else {
}
/*
* r_size is the maximum number of
* bytes known to be in the file.
* Make sure it is at least as high as the
* first unwritten byte pointed to by uio_loffset.
*/
/* n = # of bytes written */
if (!vpm_enable) {
base += n;
}
tcount -= n;
/*
* If we created pages w/o initializing them completely,
* we need to zero the part that wasn't set up.
* This happens on a most EOF write cases and if
* we had some sort of error during the uiomove.
*/
if (!vpm_enable && pagecreate) {
if (pgcreated) {
/*
* Caller is responsible for this page,
* it was not created in this loop.
*/
pgcreated = 0;
} else {
/*
* For bug 1094402: segmap_pagecreate locks
* page. Unlock it. This also unlocks the
* pages allocated by page_create_va() in
* segmap_pagecreate().
*/
if (error == 0)
}
}
return (error);
}
int
{
int error;
int rdirty;
int err;
if (!vn_has_cached_data(vp))
return (0);
/*
* If ROUTOFSPACE is set, then all writes turn into B_INVAL
* writes. B_FORCE is set to force the VM system to actually
* invalidate the pages, even if the i/o failed. The pages
* need to get invalidated because they can't be written out
* because there isn't any space left on either the server's
* file system or in the user's disk quota. The B_FREE bit
* is cleared to avoid confusion as to whether this is a
* request to place the page on the freelist or to destroy
* it.
*/
if (len == 0) {
/*
* If doing a full file synchronous operation, then clear
* the RDIRTY bit. If a page gets dirtied while the flush
* is happening, then RDIRTY will get set again. The
* RDIRTY bit must get cleared before the flush so that
* we don't lose this information.
*
* If there are no full file async write operations
* pending and RDIRTY bit is set, clear it.
*/
if (off == (u_offset_t)0 &&
}
} else
rdirty = 0;
/*
* Search the entire vp list for pages >= off, and flush
* the dirty pages.
*/
/*
* If an error occurred and the file was marked as dirty
* before and we aren't forcibly invalidating pages, then
* reset the RDIRTY flag.
*/
}
} else {
/*
* Do a range from [off...off + len) looking for pages
* to deal with.
*/
error = 0;
#ifdef lint
io_len = 0;
#endif
/*
* If we are not invalidating, synchronously
* freeing or writing pages use the routine
* page_lookup_nowait() to prevent reclaiming
* them from the free list.
*/
} else {
}
else {
if (!error)
/*
* "io_off" and "io_len" are returned as
* the range of pages we actually wrote.
* This allows us to skip ahead more quickly
* since several pages may've been dealt
* with by this iteration of the loop.
*/
}
}
}
return (error);
}
void
{
if (off == (u_offset_t)0) {
}
}
static int nfs_write_error_to_cons_only = 0;
/*
* Print a file handle
*/
void
{
int *ip;
char *buf;
char *cp;
/*
* 13 == "(file handle:"
* maximum of NFS_FHANDLE / sizeof (*ip) elements in fh_buf times
* 1 == ' '
* 8 == maximum strlen of "%x"
* 3 == ")\n\0"
*/
return;
while (*cp != '\0')
cp++;
ip++) {
while (*cp != '\0')
cp++;
}
}
/*
* Notify the system administrator that an NFS write error has
* occurred.
*/
void
{
/*
* In case of forced unmount or zone shutdown, do not print any
* messages since it can flood the console with error messages.
*/
return;
/*
* No use in flooding the console with ENOSPC
* messages from the same file system.
*/
now = ddi_get_lbolt();
#ifdef DEBUG
#else
#endif
MSG("^File: userid=%d, groupid=%d\n"),
MSG("^User: userid=%d, groupid=%d\n"),
}
}
#ifdef DEBUG
MSG("^nfs_bio: cred is%s kcred\n"),
}
#endif
}
}
/* ARGSUSED */
static void *
{
return (mig);
}
/*
* Callback routine to tell all NFS mounts in the zone to stop creating new
* threads. Existing threads should exit.
*/
/* ARGSUSED */
static void
{
/*
* If we've done the shutdown work for this FS, skip.
* Once we go off the end of the list, we're done.
*/
continue;
/*
* We will do work, so not done. Get a hold on the FS.
*/
/*
* purge the DNLC for this filesystem
*/
/*
* Tell existing async worker threads to exit.
*/
mi->mi_max_threads = 0;
/*
* Set MI_ASYNC_MGR_STOP so the async manager thread starts
* getting ready to exit when it's done with its current work.
* Also set MI_DEAD to note we've acted on this FS.
*/
/*
* Wake up the async manager thread.
*/
/*
* Drop lock and release FS, which may change list, then repeat.
* We're done when every mi has been done or the list is empty.
*/
goto again;
}
}
static void
{
}
/* ARGSUSED */
static void
{
/* Still waiting for VFS_FREEVFS() */
return;
}
}
/*
* Add an NFS mount to the per-zone list of NFS mounts.
*/
void
{
}
/*
* Remove an NFS mount from the per-zone list of NFS mounts.
*/
static void
{
/*
* We can be called asynchronously by VFS_FREEVFS() after the zone
* mi globals.
*/
return;
}
}
/*
* NFS Client initialization routine. This routine should only be called
* once. It performs the following tasks:
* - Initalize all global locks
* - Call sub-initialization routines (localize access to variables)
*/
int
nfs_clntinit(void)
{
#ifdef DEBUG
#endif
int error;
#ifdef DEBUG
#endif
error = nfs_subrinit();
if (error)
return (error);
error = nfs_vfsinit();
if (error) {
/*
* Cleanup nfs_subrinit() work
*/
nfs_subrfini();
return (error);
}
#ifdef DEBUG
nfs_clntup = B_TRUE;
#endif
return (0);
}
/*
* This routine is only called if the NFS Client has been initialized but
* the module failed to be installed. This routine will cleanup the previously
* allocated/initialized work.
*/
void
nfs_clntfini(void)
{
(void) zone_key_delete(mi_list_key);
nfs_subrfini();
nfs_vfsfini();
}
/*
* nfs_lockrelease:
*
* Release any locks on the given vnode that are held by the current
* process.
*/
void
{
char *buf;
int remote_lock_possible;
int ret;
/*
* Generate an explicit unlock operation for the entire file. As a
* partial optimization, only generate the unlock if there is a
* lock registered for the file. We could check whether this
* particular process has any locks on the file, but that would
* require the local locking code to provide yet another query
* routine. Note that no explicit synchronization is needed here.
* At worst, flk_has_remote_locks() will return a false positive,
* in which case the unlock call wastes time but doesn't harm
* correctness.
*
* In addition, an unlock request is generated if the process
* is listed as possibly having a lock on the file because the
* server and client lock managers may have gotten out of sync.
* N.B. It is important to make sure nfs_remove_locking_id() is
* called here even if flk_has_remote_locks(vp) reports true.
* If it is not called and there is an entry on the process id
* list, that entry will never get removed.
*/
NULL);
if (ret != 0) {
/*
* If VOP_FRLOCK fails, make sure we unregister
* local locks before we continue.
*/
#ifdef DEBUG
"NFS lock release error on vp %p: %m.\n",
#endif
}
/*
* The call to VOP_FRLOCK may put the pid back on the
* list. We need to remove it.
*/
}
/*
* As long as the vp has a share matching our pid,
* pluck it off and unshare it. There are circumstances in
* which the call to nfs_remove_locking_id() may put the
* owner back on the list, in which case we simply do a
* redundant and harmless unshare.
*/
#ifdef DEBUG
if (ret != 0) {
"NFS share release error on vp %p: %m.\n",
}
#endif
}
}
/*
* nfs_lockcompletion:
*
* If the vnode has a lock that makes it unsafe to cache the file, mark it
* as non cachable (set VNOCACHE bit).
*/
void
{
#ifdef DEBUG
#endif
if (!lm_safemap(vp)) {
} else {
}
}
/*
* The cached attributes of the file are stale after acquiring
* the lock on the file. They were updated when the file was
* opened, but not updated when the lock was acquired. Therefore the
* cached attributes are invalidated after the lock is obtained.
*/
}
/*
* The lock manager holds state making it possible for the client
* and server to be out of sync. For example, if the response from
* the server granting a lock request is lost, the server will think
* the lock is granted and the client will think the lock is lost.
* The client can tell when it is not positive if it is in sync with
* the server.
*
* To deal with this, a list of processes for which the client is
* not sure if the server holds a lock is attached to the rnode.
* When such a process closes the rnode, an unlock request is sent
* to the server to unlock the entire file.
*
* The list is kept as a singularly linked NULL terminated list.
* Because it is only added to under extreme error conditions, the
* list shouldn't get very big. DEBUG kernels print a message if
* the list gets bigger than nfs_lmpl_high_water. This is arbitrarily
* choosen to be 8, but can be tuned at runtime.
*/
#ifdef DEBUG
/* int nfs_lmpl_high_water = 8; */
int nfs_cnt_add_locking_id = 0;
int nfs_len_add_locking_id = 0;
#endif /* DEBUG */
/*
* Record that the nfs lock manager server may be holding a lock on
* a vnode for a process.
*
* Because the nfs lock manager server holds state, it is possible
* for the server to get out of sync with the client. This routine is called
* from the client when it is no longer sure if the server is in sync
* with the client. nfs_lockrelease() will then notice this and send
* an unlock request when the file is closed
*/
void
{
#ifdef DEBUG
#endif /* DEBUG */
#ifdef DEBUG
#endif /* DEBUG */
/*
* allocate new lmpl_t now so we don't sleep
* later after grabbing mutexes
*/
#ifdef DEBUG
} else {
}
#endif
/*
* Add this id to the list for this rnode only if the
* rnode is active and the id is not already there.
*/
break;
}
#ifdef DEBUG
++list_len;
#endif /* DEBUG */
}
#ifdef DEBUG
if (list_len > nfs_len_add_locking_id) {
}
if (list_len > nfs_lmpl_high_water) {
}
#endif /* DEBUG */
}
#ifdef DEBUG
if (share_debug) {
int nitems = 0;
int npids = 0;
int nowners = 0;
/*
* Count the number of things left on r_lmpl after the remove.
*/
nitems++;
npids++;
nowners++;
} else {
"unrecognized lmpl_type %d",
}
}
"OWNs = %d items left on r_lmpl\n",
}
#endif
}
/*
* Remove an id from the lock manager id list.
*
* If the id is not in the list return 0. If it was found and
* removed, return 1.
*/
static int
{
int rv = 0;
/*
* Search through the list and remove the entry for this id
* if it is there. The special case id == NULL allows removal
* of the first share on the r_lmpl list belonging to the
* current process (if any), without regard to further details
* of its identity.
*/
}
rv = 1;
break;
}
}
#ifdef DEBUG
if (share_debug) {
int nitems = 0;
int npids = 0;
int nowners = 0;
/*
* Count the number of things left on r_lmpl after the remove.
*/
nitems++;
npids++;
nowners++;
} else {
"nrli: unrecognized lmpl_type %d",
}
}
"nrli(%s): %d PIDs + %d OWNs = %d items left on r_lmpl\n",
nitems);
}
#endif
return (rv);
}
void
{
/*
* Remove the node from the global list before we start tearing it down.
*/
if (mi->mi_klmconfig) {
}
}
static int
{
int i;
/* this is a read-only kstat. Bail out on a write */
if (rw == KSTAT_WRITE)
return (EACCES);
/*
* We don't want to wait here as kstat_chain_lock could be held by
* dounmount(). dounmount() takes vfs_reflock before the chain lock
* and thus could lead to a deadlock.
*/
for (i = 0; i < NFS_CALLTYPES + 1; i++) {
}
return (0);
}
void
{
/*
* Create the version specific kstats.
*
* PSARC 2001/697 Contract Private Interface
* All nfs kstats are under SunMC contract
* Please refer to the PSARC listed above and contact
* SunMC before making any changes!
*
* Changes must be reviewed by Solaris File Sharing
* Changes must be communicated to contract-2001-697@sun.com
*
*/
if (mi->mi_io_kstats) {
}
}
}
{
delmap_call->error = 0;
return (delmap_call);
}
void
{
}
/*
* Searches for the current delmap caller (based on curthread) in the list of
* callers. If it is found, we remove it and free the delmap caller.
* Returns:
* 0 if the caller wasn't found
* 1 if the caller was found, removed and freed. *errp is set to what
* the result of the delmap was.
*/
int
{
/*
* If the list doesn't exist yet, we create it and return
* that the caller wasn't found. No list = no callers.
*/
/* The list does not exist */
return (0);
} else {
/* The list exists so search it */
delmap_call != NULL;
/* current caller is in the list */
return (1);
}
}
}
return (0);
}