nfs4_client.c revision b9238976491622ad75a67ab0c12edf99e36212b9
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2007 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/*
* Copyright (c) 1983,1984,1985,1986,1987,1988,1989 AT&T.
* All Rights Reserved
*/
#pragma ident "%Z%%M% %I% %E% SMI"
#include <sys/sysmacros.h>
#include <nfs/nfs_clnt.h>
#include <nfs/nfs4_clnt.h>
/*
* Arguments to page-flush thread.
*/
typedef struct {
} pgflush_t;
#ifdef DEBUG
int nfs4_sharedfh_debug;
int nfs4_fname_debug;
/* temporary: panic if v_type is inconsistent with r_attr va_type */
int nfs4_vtype_debug;
#endif
static time_t nfs4_client_resumed = 0;
static callb_id_t cid = 0;
static int nfs4renew(nfs4_server_t *);
static void nfs4_pgflush_thread(pgflush_t *);
static boolean_t nfs4_client_cpr_callb(void *, int);
struct mi4_globals {
};
static zone_key_t mi4_list_key;
/*
* Attributes caching:
*
* Attributes are cached in the rnode in struct vattr form.
* There is a time associated with the cached attributes (r_time_attr_inval)
* which tells whether the attributes are valid. The time is initialized
* to the difference between current time and the modify time of the vnode
* when new attributes are cached. This allows the attributes for
* files that have changed recently to be timed out sooner than for files
* that have not changed for a long time. There are minimum and maximum
* timeout values that can be set per mount point.
*/
/*
* If a cache purge is in progress, wait for it to finish.
*
* The current thread must not be in the middle of an
* nfs4_start_op/nfs4_end_op region. Otherwise, there could be a deadlock
* between this thread, a recovery thread, and the page flush thread.
*/
int
{
return (EINTR);
}
}
}
return (0);
}
/*
* Validate caches by checking cached attributes. If they have timed out,
* then get new attributes from the server. As a side effect, cache
* invalidation is done if the attributes have changed.
*
* If the attributes have not timed out and if there is a cache
* invalidation being done by some other thread, then wait until that
* thread has completed the cache invalidation.
*/
int
{
int error;
if (ATTRCACHE4_VALID(vp)) {
if (error)
return (error);
return (0);
}
}
/*
* Fill in attribute from the cache.
* If valid, then return 0 to indicate that no error occurred,
* otherwise return 1 to indicate that an error occurred.
*/
static int
{
if (ATTRCACHE4_VALID(vp)) {
/*
* Cached attributes are valid
*/
return (0);
}
return (1);
}
/*
* If returned error is ESTALE flush all caches. The nfs4_purge_caches()
* call is synchronous because all the pages were invalidated by the
* nfs4_invalidate_pages() call.
*/
void
{
/* Ensure that the ..._end_op() call has been done */
return;
if (nfs4_has_pages(vp))
}
/*
* Purge all of the various NFS `data' caches. If "asyncpg" is TRUE, the
* page purge is done asynchronously.
*/
void
{
char *contents;
int size;
int pgflush; /* are we the page flush thread? */
/*
* Purge the DNLC for any entries which refer to this file.
*/
/*
* Clear any readdir state bits and purge the readlink response cache.
*/
/*
* Purge pathconf cache too.
*/
}
/*
* Flush the page cache. If the current thread is the page flush
* thread, don't initiate a new page flush. There's no need for
* it, and doing it correctly is hard.
*/
if (!asyncpg) {
(void) nfs4_waitfor_purge_complete(vp);
} else {
/*
* We don't hold r_statelock while creating the
* thread, in case the call blocks. So we use a
* flag to indicate that a page flush thread is
* active.
*/
} else {
KM_SLEEP);
(void) zthread_create(NULL, 0,
nfs4_pgflush_thread, args, 0,
}
}
}
/*
* Flush the readdir response cache.
*/
}
/*
* Invalidate all pages for the given file, after writing back the dirty
* ones.
*/
static void
{
int error;
}
}
/*
* Page flush thread.
*/
static void
{
/* remember which thread we are, so we don't deadlock ourselves */
zthread_exit();
}
/*
* Purge the readdir cache of all entries which are not currently
* being filled.
*/
void
{
}
/*
* Set attributes cache for given vnode using virtual attributes. There is
* no cache validation, but if the attributes are deemed to be stale, they
* are ignored. This corresponds to nfs3_attrcache().
*
* Set the timeout value on the attribute cache and fill it
* with the passed in attributes.
*/
void
{
if (rp->r_time_attr_saved <= t)
}
/*
* Use the passed in virtual attributes to check to see whether the
* data and metadata caches are valid, cache the new attributes, and
* then do the cache invalidation if required.
*
* The cache validation and caching of the new attributes is done
* atomically via the use of the mutex, r_statelock. If required,
* the cache invalidation is done atomically w.r.t. the cache
* validation and caching of the attributes via the pseudo lock,
* r_serial.
*
* This routine is used to do cache validation and attributes caching
* for operations with a single set of post operation attributes.
*/
void
{
int mtime_changed;
int ctime_changed;
/* Is curthread the recovery thread? */
/*
* If we're the recovery thread, then purge current attrs
* and bail out to avoid potential deadlock between another
* thread caching attrs (r_serial thread), recov thread,
* and an async writer thread.
*/
if (recov) {
return;
}
lwp->lwp_nostop++;
lwp->lwp_nostop--;
return;
}
}
lwp->lwp_nostop--;
}
/*
* If there is a page flush thread, the current thread needs to
* bail out, to prevent a possible deadlock between the current
* recovery thread, and the page flush thread. Expire the
* attribute cache, so that any attributes the current thread was
* going to set are not lost.
*/
return;
}
if (rp->r_time_attr_saved > t) {
/*
* Attributes have been cached since these attributes were
* probably made. If there is an inconsistency in what is
* cached, mark them invalid. If not, don't act on them.
*/
return;
}
set_time_cache_inval = 0;
if (cinfo) {
/*
* Only directory modifying callers pass non-NULL cinfo.
*/
/*
* If the cache timeout either doesn't exist or hasn't expired,
* and dir didn't changed on server before dirmod op
* and dir didn't change after dirmod op but before getattr
* then there's a chance that the client's cached data for
* this object is current (not stale). No immediate cache
* flush is required.
*
*/
(garp->n4g_change_valid &&
/*
* cannot be blindly trusted. For this case, we tell
* nfs4_attrcache_va to cache the attrs but also
* establish an absolute maximum cache timeout. When
* the timeout is reached, caches will be flushed.
*/
set_time_cache_inval = 1;
mtime_changed = 0;
ctime_changed = 0;
} else {
/*
* We're not sure exactly what changed, but we know
* what to do. flush all caches for dir. remove the
* attr timeout.
*
* a) timeout expired. flush all caches.
* b) r_change != cinfo.before. flush all caches.
* c) r_change == cinfo.before, but cinfo.after !=
* post-op getattr(change). flush all caches.
* d) post-op getattr(change) not provided by server.
* flush all caches.
*/
mtime_changed = 1;
ctime_changed = 1;
rp->r_time_cache_inval = 0;
}
} else {
mtime_changed = 1;
else
mtime_changed = 0;
ctime_changed = 1;
else
ctime_changed = 0;
} else {
mtime_changed = 0;
ctime_changed = 0;
}
}
if (!mtime_changed && !ctime_changed) {
return;
}
/*
* If we're the recov thread, then force async nfs4_purge_caches
* to avoid potential deadlock.
*/
if (mtime_changed)
if (ctime_changed) {
(void) nfs4_access_purge_rp(rp);
}
}
if (!was_serial) {
}
}
/*
* Set attributes cache for given vnode using virtual attributes.
*
* Set the timeout value on the attribute cache and fill it
* with the passed in attributes.
*
* The caller must be holding r_statelock.
*/
static void
{
mntinfo4_t *mi;
/* Switch to master before checking v_flag */
/*
* Only establish a new cache timeout (if requested). Never
* extend a timeout. Never clear a timeout. Clearing a timeout
* is done by nfs4_update_dircaches (ancestor in our call chain)
*/
/*
* Delta is the number of nanoseconds that we will
* cache the attributes of the file. It is based on
* the number of nanoseconds since the last time that
* we detected a change. The assumption is that files
* that changed recently are likely to change again.
* There is a minimum and a maximum for regular files
* and for directories which is enforced though.
*
* Using the time since last change was detected
* eliminates direct comparison or calculation
* using mixed client and server times. NFS does
* not make any assumptions regarding the client
* and server clocks being synchronized.
*/
}
delta = 0;
else {
} else {
}
}
if (garp->n4g_change_valid)
/*
* The attributes that were returned may be valid and can
* be used, but they may not be allowed to be cached.
* Reset the timers to cause immediate invalidation and
* clear r_change so no VERIFY operations will suceed
*/
}
/*
* If mounted_on_fileid returned AND the object is a stub,
* then set object's va_nodeid to the mounted over fid
* returned by server.
*
* just set it to 0 for now. Eventually it would be
* better to set it to a hashed version of FH. This
* would probably be good enough to provide a unique
*
* We don't need to carry mounted_on_fileid in the
* rnode as long as the client never requests fileid
* without also requesting mounted_on_fileid. For
* now, it stays.
*/
if (garp->n4g_mon_fid_valid) {
}
/*
* Check to see if there are valid pathconf bits to
* cache in the rnode.
*/
if (garp->n4g_ext_res) {
} else {
}
}
}
/*
* Update the size of the file if there is no cached data or if
* the cached data is clean and there is no data being written
* out.
*/
(!vn_has_cached_data(vp) ||
}
}
/*
* Get attributes over-the-wire and update attributes cache
* if no error occurred in the over-the-wire operation.
* Return 0 if successful, otherwise error.
*/
int
{
hrtime_t t;
recov_state.rs_flags = 0;
/* Save the original mount point security flavor */
&recov_state, NULL))) {
return (e.error);
}
t = gethrtime();
&recov_state, 1);
goto recov_retry;
}
}
if (!e.error) {
} else {
}
}
/*
* If getattr a node that is a stub for a crossed
* mount point, keep the original secinfo flavor for
* the current file system, not the crossed one.
*/
return (e.error);
}
/*
* Generate a compound to get attributes over-the-wire.
*/
void
{
int doqueue;
/* putfh */
/* getattr */
/*
* Unlike nfs version 2 and 3, where getattr returns all the
* attributes, nfs version 4 returns only the ones explicitely
* asked for. This creates problems, as some system functions
* (e.g. cache check) require certain attributes and if the
* affect system utilities (e.g. "ls") that rely on the information
* to be there. This can lead to anything from system crashes to
* corrupted information processed by user apps.
* So to ensure that all bases are covered, request at least
* the AT_ALL attribute mask.
*/
if (get_acl)
doqueue = 1;
return;
return;
}
}
/*
* Return either cached or remote attributes. If get remote attr
* use them to check and invalidate caches, then cache the new attributes.
*/
int
{
int error;
/*
* If we've got cached attributes, we're done, otherwise go
* to the server to get attributes, which will update the cache
* in the process. Either way, use the cached attributes for
* the caller's vattr_t.
*
* Note that we ignore the gar set by the OTW call: the attr caching
* code may make adjustments when storing to the rnode, and we want
* to see those changes here.
*/
error = 0;
if (!ATTRCACHE4_VALID(vp)) {
}
if (!error)
/* Return the client's view of file size */
return (error);
}
int
{
int doqueue;
recov_state.rs_flags = 0;
if (e.error)
return (e.error);
/* putfh */
/* getattr */
doqueue = 1;
return (e.error);
}
if (needrecov) {
"nfs4_attr_otw: initiating recovery\n"));
if (!e.error) {
}
goto recov_retry;
return (e.error);
}
} else {
garp, sizeof (nfs4_ga_res_t));
if (garp->n4g_ext_res &&
}
return (e.error);
}
/*
* Asynchronous I/O parameters. nfs_async_threads is the high-water mark
* for the demand-based allocation of async threads per-mount. The
* nfs_async_timeout is the amount of time a thread will live after it
* becomes idle, unless new I/O requests are received before the thread
* dies. See nfs4_async_putpage and nfs4_async_start.
*/
static void nfs4_async_start(struct vfs *);
static void
{
}
}
/*
* Cross-zone thread creation and NFS access is disallowed, yet fsflush() and
* pageout(), running in the global zone, have legitimate reasons to do
* VOP_PUTPAGE(B_ASYNC) on other zones' NFS mounts. We avoid the problem by
* use of a a per-mount "asynchronous requests manager thread" which is
* signaled by the various asynchronous work routines when there is
* asynchronous work to be done. It is responsible for creating new
* worker threads if necessary, and notifying existing worker threads
* that there is work to be done.
*
* In other words, it will "take the specifications from the customers and
* give them to the engineers."
*
* Worker threads die off of their own accord if they are no longer
* needed.
*
* This thread is killed when the zone is going away or the filesystem
* is being unmounted.
*/
void
{
mntinfo4_t *mi;
"nfs4_async_manager");
/*
* We want to stash the max number of threads that this mount was
* allowed so we can use it later when the variable is set to zero as
*
* We want to be able to create at least one thread to handle
* asyncrhonous inactive calls.
*/
/*
* We don't want to wait for mi_max_threads to go to zero, since that
* happens as part of a failed unmount, but this thread should only
* exit when the mount is really going away.
*
* Once MI4_ASYNC_MGR_STOP is set, no more async operations will be
* attempted: the various _async_*() functions know to do things
* inline if mi_max_threads == 0. Henceforth we just drain out the
* outstanding requests.
*
* Note that we still create zthreads even if we notice the zone is
* shutting down (MI4_ASYNC_MGR_STOP is set); this may cause the zone
* shutdown sequence to take slightly longer in some cases, but
* doesn't violate the protocol, as all threads will exit as soon as
* they're done processing the remaining requests.
*/
mi->mi_async_req_count > 0) {
while (mi->mi_async_req_count > 0) {
/*
* Paranoia: If the mount started out having
* (mi->mi_max_threads == 0), and the value was
* later changed (via a debugger or somesuch),
* we could be confused since we will think we
* can't create any threads, and the calling
* code (which looks at the current value of
* mi->mi_max_threads, now non-zero) thinks we
* can.
*
* So, because we're paranoid, we create threads
* up to the maximum of the original and the
* current value. This means that future
* (debugger-induced) alterations of
* mi->mi_max_threads are ignored for our
* purposes, but who told them they could change
* random values on a live kernel anyhow?
*/
if (mi->mi_threads <
mi->mi_threads++;
vfsp, 0, minclsyspri);
}
mi->mi_async_req_count--;
}
}
/*
* Let everyone know we're done.
*/
/*
* Wake up the inactive thread.
*/
/*
* Wake up anyone sitting in nfs4_async_manager_stop()
*/
/*
* There is no explicit call to mutex_exit(&mi->mi_async_lock)
* since CALLB_CPR_EXIT is actually responsible for releasing
* 'mi_async_lock'.
*/
zthread_exit();
}
/*
* Signal (and wait for) the async manager thread to clean up and go away.
*/
void
{
/*
* Wait for the async manager thread to die.
*/
}
int
{
mntinfo4_t *mi;
struct nfs4_async_reqs *args;
/*
* If addr falls in a different segment, don't bother doing readahead.
*/
return (-1);
/*
* If we can't allocate a request structure, punt on the readahead.
*/
return (-1);
/*
* If a lock operation is pending, don't initiate any new
* readaheads. Otherwise, bump r_count to indicate the new
* asynchronous I/O.
*/
return (-1);
}
#ifdef DEBUG
#endif
/*
* If asyncio has been disabled, don't bother readahead.
*/
if (mi->mi_max_threads == 0) {
goto noasync;
}
/*
* Link request structure into the async list and
* wakeup async thread to do the i/o.
*/
} else {
}
if (mi->mi_io_kstats) {
}
mi->mi_async_req_count++;
return (0);
return (-1);
}
/*
* The async queues for each mounted file system are arranged as a
* set of queues, one for each async i/o type. Requests are taken
* from the queues in a round-robin fashion. A number of consecutive
* requests are taken from each queue before moving on to the next
* queue. This functionality may allow the NFS Version 2 server to do
* write clustering, even if the client is mixing writes and reads
* because it will take multiple write requests from the queue
* before processing any of the other async i/o types.
*
* XXX The nfs4_async_start thread is unsafe in the light of the present
* model defined by cpr to suspend the system. Specifically over the
* wire calls are cpr-unsafe. The thread should be reevaluated in
* case of future updates to the cpr model.
*/
static void
{
struct nfs4_async_reqs *args;
int i;
extern int nfs_async_timeout;
/*
* Dynamic initialization of nfs_async_timeout to allow nfs to be
* built in an implementation independent manner.
*/
if (nfs_async_timeout == -1)
for (;;) {
/*
* Find the next queue containing an entry. We start
* at the current queue pointer and then round robin
* through all of them until we either find a non-empty
* queue or have looked through all of them.
*/
for (i = 0; i < NFS4_ASYNC_TYPES; i++) {
break;
mi->mi_async_curr++;
if (mi->mi_async_curr ==
}
/*
* If we didn't find a entry, then block until woken up
* again and then look through the queues again.
*/
/*
* Exiting is considered to be safe for CPR as well
*/
/*
* Wakeup thread waiting to unmount the file
* system only if all async threads are inactive.
*
* If we've timed-out and there's nothing to do,
* then get rid of this thread.
*/
if (--mi->mi_threads == 0)
zthread_exit();
/* NOTREACHED */
}
continue;
} else {
time_left = 1;
}
/*
* Remove the request from the async queue and then
* update the current async request queue pointer. If
* the current queue is empty or we have removed enough
* consecutive entries from it, then reset the counter
* for this queue and then move the current pointer to
* the next queue.
*/
mi->mi_async_curr++;
if (mi->mi_async_curr ==
}
}
/*
* Obtain arguments from the async request structure.
*/
}
/*
* Now, release the vnode and free the credentials
* structure.
*/
/*
* Reacquire the mutex because it will be needed above.
*/
}
}
/*
* nfs4_inactive_thread - look for vnodes that need over-the-wire calls as
* part of VOP_INACTIVE.
*/
void
{
struct nfs4_async_reqs *args;
"nfs4_inactive_thread");
for (;;) {
/*
* We don't want to exit until the async manager is done
* with its work; hence the check for mi_manager_thread
* being NULL.
*
* The async manager thread will cv_broadcast() on
* mi_inact_req_cv when it's done, at which point we'll
* wake up and exit.
*/
goto die;
} else {
}
}
die:
/*
* There is no explicit call to mutex_exit(&mi->mi_async_lock) since
* CALLB_CPR_EXIT is actually responsible for releasing 'mi_async_lock'.
*/
"nfs4_inactive_thread exiting for vfs %p\n", (void *)vfsp));
zthread_exit();
/* NOTREACHED */
}
/*
* nfs_async_stop:
* Wait for all outstanding putpage operations and the inactive thread to
* complete; nfs4_async_stop_sig() without interruptibility.
*/
void
{
/*
* Wait for all outstanding async operations to complete and for
* worker threads to exit.
*/
mi->mi_max_threads = 0;
while (mi->mi_threads != 0)
/*
* Wait for the inactive thread to finish doing what it's doing. It
* won't exit until the last reference to the vfs_t goes away.
*/
}
}
}
/*
* nfs_async_stop_sig:
* Wait for all outstanding putpage operations and the inactive thread to
* complete. If a signal is delivered we will abort and return non-zero;
* otherwise return 0. Since this routine is called from nfs4_unmount, we
* need to make it interruptable.
*/
int
{
/*
* Wait for all outstanding putpage operations to complete and for
* worker threads to exit.
*/
mi->mi_max_threads = 0;
while (mi->mi_threads != 0) {
goto interrupted;
}
}
/*
* Wait for the inactive thread to finish doing what it's doing. It
* won't exit until the a last reference to the vfs_t goes away.
*/
&mi->mi_async_lock)) {
goto interrupted;
}
}
}
if (intr)
return (intr);
}
int
{
mntinfo4_t *mi;
struct nfs4_async_reqs *args;
/*
* If we can't allocate a request structure, do the putpage
* operation synchronously in this thread's context.
*/
goto noasync;
#ifdef DEBUG
#endif
/*
* If asyncio has been disabled, then make a synchronous request.
* This check is done a second time in case async io was diabled
* while this thread was blocked waiting for memory pressure to
* reduce or for the queue to drain.
*/
if (mi->mi_max_threads == 0) {
goto noasync;
}
/*
* Link request structure into the async list and
* wakeup async thread to do the i/o.
*/
} else {
}
if (mi->mi_io_kstats) {
}
mi->mi_async_req_count++;
return (0);
/*
* or we have run out of memory or we're attempting to
* unmount we refuse to do a sync write, because this may
* we just re-mark the page as dirty and punt on the page.
*
* Make sure B_FORCE isn't set. We can re-mark the
* pages as dirty and unlock the pages in one swoop by
* passing in B_ERROR to pvn_write_done(). However,
* we should make sure B_FORCE isn't set - we don't
* want the page tossed before it gets written out.
*/
return (0);
}
/*
* We'll get here only if (nfs_zone() != mi->mi_zone)
* which means that this was a cross-zone sync putpage.
*
* We pass in B_ERROR to pvn_write_done() to re-mark the pages
* as dirty and unlock them.
*
* We don't want to clear B_FORCE here as the caller presumably
* knows what they're doing if they set it.
*/
return (EPERM);
}
int
{
mntinfo4_t *mi;
struct nfs4_async_reqs *args;
/*
* If we can't allocate a request structure, do the pageio
* request synchronously in this thread's context.
*/
goto noasync;
#ifdef DEBUG
#endif
/*
* If asyncio has been disabled, then make a synchronous request.
* This check is done a second time in case async io was diabled
* while this thread was blocked waiting for memory pressure to
* reduce or for the queue to drain.
*/
if (mi->mi_max_threads == 0) {
goto noasync;
}
/*
* Link request structure into the async list and
* wakeup async thread to do the i/o.
*/
} else {
}
if (mi->mi_io_kstats) {
}
mi->mi_async_req_count++;
return (0);
/*
* If we can't do it ASYNC, for reads we do nothing (but cleanup
* the page list), for writes we do it synchronously, except for
* proc_pageout/proc_fsflush as described below.
*/
return (0);
}
/*
* we refuse to do a sync write, because this may hang
* re-mark the page as dirty and punt on the page.
*
* Make sure B_FORCE isn't set. We can re-mark the
* pages as dirty and unlock the pages in one swoop by
* passing in B_ERROR to pvn_write_done(). However,
* we should make sure B_FORCE isn't set - we don't
* want the page tossed before it gets written out.
*/
return (0);
}
/*
* So this was a cross-zone sync pageio. We pass in B_ERROR
* to pvn_write_done() to re-mark the pages as dirty and unlock
* them.
*
* We don't want to clear B_FORCE here as the caller presumably
* knows what they're doing if they set it.
*/
return (EPERM);
}
}
void
{
mntinfo4_t *mi;
struct nfs4_async_reqs *args;
/*
* If we can't allocate a request structure, skip the readdir.
*/
goto noasync;
#ifdef DEBUG
#endif
/*
* If asyncio has been disabled, then skip this request
*/
if (mi->mi_max_threads == 0) {
goto noasync;
}
/*
* Link request structure into the async list and
* wakeup async thread to do the i/o.
*/
} else {
}
if (mi->mi_io_kstats) {
}
mi->mi_async_req_count++;
return;
/*
* Indicate that no one is trying to fill this entry and
* it still needs to be filled.
*/
}
void
cred_t *))
{
mntinfo4_t *mi;
struct nfs4_async_reqs *args;
/*
* If we can't allocate a request structure, do the commit
* operation synchronously in this thread's context.
*/
goto noasync;
#ifdef DEBUG
#endif
/*
* If asyncio has been disabled, then make a synchronous request.
* This check is done a second time in case async io was diabled
* while this thread was blocked waiting for memory pressure to
* reduce or for the queue to drain.
*/
if (mi->mi_max_threads == 0) {
goto noasync;
}
/*
* Link request structure into the async list and
* wakeup async thread to do the i/o.
*/
} else {
}
if (mi->mi_io_kstats) {
}
mi->mi_async_req_count++;
return;
}
return;
}
}
/*
* nfs4_async_inactive - hand off a VOP_INACTIVE call to a thread. The
* reference to the vnode is handed over to the thread; the caller should
* no longer refer to the vnode.
*
* Unlike most of the async routines, this handoff is needed for
* correctness reasons, not just performance. So doing operations in the
* context of the current thread is not an option.
*/
void
{
mntinfo4_t *mi;
struct nfs4_async_reqs *args;
#ifdef DEBUG
#endif
/*
* Note that we don't check mi->mi_max_threads here, since we
* *need* to get rid of this vnode regardless of whether someone
*
* The manager thread knows about this and is willing to create
* at least one thread to accomodate us.
*/
char *unlname;
/*
* We just need to free up the memory associated with the
* vnode, which can be safely done from within the current
* context.
*/
}
/*
* No need to explicitly throw away any cached pages. The
* eventual r4inactive() will attempt a synchronous
* VOP_PUTPAGE() which will immediately fail since the request
* is coming from the wrong zone, and then will proceed to call
* nfs4_invalidate_pages() which will clean things up for us.
*
* Throw away the delegation here so rp4_addfree()'s attempt to
* return any existing delegations becomes a no-op.
*/
FALSE);
}
}
return;
}
/*
* We want to talk to the inactive thread.
*/
}
/*
* Enqueue the vnode and wake up either the special thread (empty
* list) or an async thread.
*/
} else {
}
if (signal_inactive_thread) {
} else {
mi->mi_async_req_count++;
}
}
int
{
int pagecreate;
int n;
int saved_n;
int error;
int sm_error;
if (!vpm_enable) {
}
/*
* Move bytes in at most PAGESIZE chunks. We must avoid
* spanning pages in uiomove() because page faults may cause
* the cache to be invalidated out from under us. The r_size is not
* updated until after the uiomove. If we push the last page of a
* file before r_size is correct, we will lose the data written past
* the current (and invalid) r_size.
*/
do {
pagecreate = 0;
/*
* n is the number of bytes required to satisfy the request
* or the number of bytes to fill out the page.
*/
/*
* Check to see if we can skip reading in the page
* and just allocate the memory. We can do this
* if we are going to rewrite the entire mapping
* or if we are going to write to or beyond the current
* end of file from the beginning of the mapping.
*
* The read of r_size is now protected by r_statelock.
*/
/*
* When pgcreated is nonzero the caller has already done
* a segmap_getmapflt with forcefault 0 and S_WRITE. With
* segkpm this means we already have at least one page
* created and mapped at base.
*/
pagecreate = pgcreated ||
((offset & PAGEOFFSET) == 0 &&
if (!vpm_enable && pagecreate) {
/*
* The last argument tells segmap_pagecreate() to
* always lock the page, as opposed to sometimes
* returning with the page locked. This way we avoid a
* fault on the ensuing uiomove(), but also
* more importantly (to fix bug 1094402) we can
* call segmap_fault() to unlock the page in all
* cases. An alternative would be to modify
* segmap_pagecreate() to tell us when it is
* locking a page, but that's a fairly major
* interface change.
*/
if (pgcreated == 0)
(uint_t)n, 1);
saved_base = base;
saved_n = n;
}
/*
* The number of bytes of data in the last page can not
* be accurately be determined while page is being
* uiomove'd to and the size of the file being updated.
* Thus, inform threads which need to know accurately
* how much data is in the last page of the file. They
* will not do the i/o immediately, but will arrange for
* the i/o to happen later when this modify operation
* will have finished.
*/
if (vpm_enable) {
/*
* Copy data. If new pages are created, part of
* the page that is not written will be initizliazed
* with zeros.
*/
} else {
}
/*
* r_size is the maximum number of
* bytes known to be in the file.
* Make sure it is at least as high as the
* first unwritten byte pointed to by uio_loffset.
*/
/* n = # of bytes written */
if (!vpm_enable) {
base += n;
}
tcount -= n;
/*
* If we created pages w/o initializing them completely,
* we need to zero the part that wasn't set up.
* This happens on a most EOF write cases and if
* we had some sort of error during the uiomove.
*/
if (!vpm_enable && pagecreate) {
if (pgcreated) {
/*
* Caller is responsible for this page,
* it was not created in this loop.
*/
pgcreated = 0;
} else {
/*
* For bug 1094402: segmap_pagecreate locks
* page. Unlock it. This also unlocks the
* pages allocated by page_create_va() in
* segmap_pagecreate().
*/
if (error == 0)
}
}
return (error);
}
int
{
int error;
int rdirty;
int err;
if (!nfs4_has_pages(vp))
return (0);
/*
* If R4OUTOFSPACE is set, then all writes turn into B_INVAL
* writes. B_FORCE is set to force the VM system to actually
* invalidate the pages, even if the i/o failed. The pages
* need to get invalidated because they can't be written out
* because there isn't any space left on either the server's
* file system or in the user's disk quota. The B_FREE bit
* is cleared to avoid confusion as to whether this is a
* request to place the page on the freelist or to destroy
* it.
*/
if (len == 0) {
/*
* If doing a full file synchronous operation, then clear
* the R4DIRTY bit. If a page gets dirtied while the flush
* is happening, then R4DIRTY will get set again. The
* R4DIRTY bit must get cleared before the flush so that
* we don't lose this information.
*
* If there are no full file async write operations
* pending and RDIRTY bit is set, clear it.
*/
if (off == (u_offset_t)0 &&
}
} else
rdirty = 0;
/*
* Search the entire vp list for pages >= off, and flush
* the dirty pages.
*/
/*
* If an error occured and the file was marked as dirty
* before and we aren't forcibly invalidating pages, then
* reset the R4DIRTY flag.
*/
}
} else {
/*
* Do a range from [off...off + len) looking for pages
* to deal with.
*/
error = 0;
io_len = 0;
/*
* If we are not invalidating, synchronously
* freeing or writing pages use the routine
* page_lookup_nowait() to prevent reclaiming
* them from the free list.
*/
} else {
}
else {
if (!error)
/*
* "io_off" and "io_len" are returned as
* the range of pages we actually wrote.
* This allows us to skip ahead more quickly
* since several pages may've been dealt
* with by this iteration of the loop.
*/
}
}
}
return (error);
}
void
{
if (off == (u_offset_t)0) {
}
}
static int
{
mntinfo4_t *mi;
struct mntinfo_kstat *mik;
/* this is a read-only kstat. Bail out on a write */
if (rw == KSTAT_WRITE)
return (EACCES);
/*
* We don't want to wait here as kstat_chain_lock could be held by
* dounmount(). dounmount() takes vfs_reflock before the chain lock
* and thus could lead to a deadlock.
*/
/*
* The sv_secdata holds the flavor the client specifies.
* If the client uses default and a security negotiation
* occurs, sv_currsec will point to the current flavor
* selected from the server flavor list.
* sv_currsec is NULL if no security negotiation takes place.
*/
return (0);
}
void
{
/*
* PSARC 2001/697 Contract Private Interface
* All nfs kstats are under SunMC contract
* Please refer to the PSARC listed above and contact
* SunMC before making any changes!
*
* Changes must be reviewed by Solaris File Sharing
* Changes must be communicated to contract-2001-697@sun.com
*
*/
if (mi->mi_io_kstats) {
}
}
}
void
{
mntinfo4_t *mi;
/*
* In case of forced unmount, do not print any messages
* since it can flood the console with error messages.
*/
return;
/*
* If the mount point is dead, not recoverable, do not
* print error messages that can flood the console.
*/
return;
/*
* No use in flooding the console with ENOSPC
* messages from the same file system.
*/
#ifdef DEBUG
#else
#endif
"^File: userid=%d, groupid=%d\n",
"^User: userid=%d, groupid=%d\n",
}
}
#ifdef DEBUG
"nfs_bio: cred is%s kcred\n",
}
#endif
}
}
/*
* Return non-zero if the given file can be safely memory mapped. Locks
* are safe if whole-file (length and offset are both zero).
*/
static int
{
int safe = 1;
"vp = %p", (void *)vp));
/*
* Review all the locks for the vnode, both ones that have been
* acquired and ones that are pending. We assume that
* flk_active_locks_for_vp() has merged any locks that can be
* merged (so that if a process has the entire file locked, it is
* represented as a single lock).
*
* Note that we can't bail out of the loop if we find a non-safe
* lock, because we have to free all the elements in the llp list.
* We might be able to speed up this code slightly by not looking
* at each lock's l_start and l_len fields once we've found a
* non-safe lock.
*/
while (llp) {
safe = 0;
"nfs4_safemap: unsafe active lock (%" PRId64
}
}
return (safe);
}
/*
* Return whether there is a lost LOCK or LOCKU queued up for the given
* file that would make an mmap request unsafe. cf. nfs4_safemap().
*/
{
continue;
continue; /* different file */
break;
}
}
return (conflict);
}
/*
* nfs_lockcompletion:
*
* If the vnode has a lock that makes it unsafe to cache the file, mark it
* as non cachable (set VNOCACHE bit).
*/
void
{
if (!nfs4_safemap(vp)) {
} else {
}
}
/*
* The cached attributes of the file are stale after acquiring
* the lock on the file. They were updated when the file was
* opened, but not updated when the lock was acquired. Therefore the
* cached attributes are invalidated after the lock is obtained.
*/
}
/* ARGSUSED */
static void *
{
struct mi4_globals *mig;
return (mig);
}
/*
* Callback routine to tell all NFSv4 mounts in the zone to start tearing down
* state and killing off threads.
*/
/* ARGSUSED */
static void
{
mntinfo4_t *mi;
"nfs4_mi_shutdown zone %d\n", zoneid));
for (;;) {
break;
}
/*
* purge the DNLC for this filesystem
*/
/*
* Tell existing async worker threads to exit.
*/
mi->mi_max_threads = 0;
/*
* Set the appropriate flags, signal and wait for both the
* async manager and the inactive thread to exit when they're
* done with their current work.
*/
if (mi->mi_manager_thread) {
}
if (mi->mi_inactive_thread) {
/*
* Wait for the inactive thread to exit.
*/
}
}
/*
* Wait for the recovery thread to complete, that is, it will
* signal when it is done using the "mi" structure and about
* to exit
*/
while (mi->mi_in_recovery > 0)
/*
* We're done when every mi has been done or the list is empty.
* This one is done, remove it from the list.
*/
/*
* Release hold on vfs and mi done to prevent race with zone
* shutdown. This releases the hold in nfs4_mi_zonelist_add.
*/
}
/*
* Tell each renew thread in the zone to exit
*/
/*
* We add another hold onto the nfs4_server_t
* because this will make sure tha the nfs4_server_t
* stays around until nfs4_callback_fini_zone destroys
* the zone. This way, the renew thread can
* unconditionally release its holds on the
* nfs4_server_t.
*/
}
}
}
static void
{
}
/* ARGSUSED */
static void
{
"nfs4_mi_destroy zone %d\n", zoneid));
/* Still waiting for VFS_FREEVFS() */
return;
}
}
/*
* Add an NFS mount to the per-zone list of NFS mounts.
*/
void
{
struct mi4_globals *mig;
/*
* hold added to eliminate race with zone shutdown -this will be
* released in mi_shutdown
*/
}
/*
* Remove an NFS mount from the per-zone list of NFS mounts.
*/
int
{
struct mi4_globals *mig;
int ret = 0;
/* if this mi is marked dead, then the zone already released it */
/* release the holds put on in zonelist_add(). */
ret = 1;
}
/*
* We can be called asynchronously by VFS_FREEVFS() after the zone
* mi globals.
*/
return (ret);
}
return (ret);
}
void
{
int i;
if (mi->mi_io_kstats) {
}
if (mi->mi_ro_kstats) {
}
if (mi->mi_recov_ksp) {
}
}
/*
* Destroy the oo hash lists and mutexes for the cred hash table.
*/
for (i = 0; i < NFS4_NUM_OO_BUCKETS; i++) {
/* Destroy any remaining open owners on the list */
}
}
/*
* Empty and destroy the freed open owner list.
*/
}
}
void
{
}
void
{
}
}
void
nfs4_clnt_init(void)
{
(void) nfs4_rnode_init();
(void) nfs4_shadow_init();
(void) nfs4_acache_init();
(void) nfs4_subr_init();
#ifdef DEBUG
#endif
/*
* Add a CPR callback so that we can update client
* lease after a suspend and resume.
*/
/*
* Initialise the reference count of the notsupp xattr cache vnode to 1
* so that it never goes away (VOP_INACTIVE isn't called on it).
*/
}
void
nfs4_clnt_fini(void)
{
(void) zone_key_delete(mi4_list_key);
(void) nfs4_rnode_fini();
(void) nfs4_shadow_fini();
(void) nfs4_acache_fini();
(void) nfs4_subr_fini();
#ifdef DEBUG
#endif
if (cid)
(void) callb_delete(cid);
}
/*ARGSUSED*/
static boolean_t
{
/*
* We get called for Suspend and Resume events.
* For the suspend case we simply don't care!
*/
if (code == CB_CODE_CPR_CHKPT) {
return (B_TRUE);
}
/*
* When we get to here we are in the process of
* resuming the system from a previous suspend.
*/
return (B_TRUE);
}
void
{
int error = 0;
clock_t tick_delay = 0;
"nfs4_renew_lease_thread: acting on sp 0x%p", (void*)sp));
/* sp->s_lease_time is set via a GETATTR */
for (;;) {
if (!sp->state_ref_count ||
"nfs4_renew_lease_thread: no renew : thread "
"wait %ld secs", kip_secs));
"nfs4_renew_lease_thread: no renew : "
"state_ref_count %d, lease_valid %d",
"nfs4_renew_lease_thread: no renew: "
"time left %ld", time_left));
goto die;
continue;
}
"nfs4_renew_lease_thread: tmp_time %ld, "
"sp->last_renewal_time %ld", tmp_time,
sp->last_renewal_time));
"nfs4_renew_lease_thread: valid lease: sleep for %ld "
"secs", kip_secs));
tick_delay + lbolt);
"nfs4_renew_lease_thread: valid lease: time left %ld :"
"sp last_renewal_time %ld, nfs4_client_resumed %ld, "
"tmp_last_renewal_time %ld", time_left,
goto die;
(nfs4_client_resumed != 0 &&
/*
* Issue RENEW op since we haven't renewed the lease
* since we slept.
*/
/*
* Need to re-acquire sp's lock, nfs4renew()
* relinqueshes it.
*/
/*
* See if someone changed s_thread_exit while we gave
* up s_lock.
*/
goto die;
if (!error) {
/*
* check to see if we implicitly renewed while
* we waited for a reply for our RENEW call.
*/
if (tmp_last_renewal_time ==
sp->last_renewal_time) {
/* no implicit renew came */
} else {
(CE_NOTE, "renew_thread: did "
"implicit renewal before reply "
"from server for RENEW"));
}
} else {
/* figure out error */
"renew_thread: nfs4renew returned error"
" %d", error));
}
}
}
die:
"nfs4_renew_lease_thread: thread exiting"));
while (sp->s_otw_call_count != 0) {
"nfs4_renew_lease_thread: waiting for outstanding "
"otw calls to finish for sp 0x%p, current "
"s_otw_call_count %d", (void *)sp,
sp->s_otw_call_count));
}
done:
"nfs4_renew_lease_thread: renew thread exit officially"));
zthread_exit();
/* NOT REACHED */
}
/*
* Send out a RENEW op to the server.
* Assumes sp is locked down.
*/
static int
{
int doqueue = 1;
int rpc_error;
mntinfo4_t *mi;
recov_state.rs_flags = 0;
if (e.error) {
return (e.error);
}
/* Check to see if we're dealing with a marked-dead sp */
return (0);
}
/* Make sure mi hasn't changed on us */
/* Must drop sp's lock to avoid a recursive mutex enter */
goto recov_retry;
}
/* used to figure out RTT for sp */
(void*)sp));
mntinfo4_t *, mi);
mntinfo4_t *, mi);
/*
* If the server returns CB_PATH_DOWN, it has renewed
* the lease and informed us that the callback path is
* down. Since the lease is renewed, just return 0 and
* let the renew thread proceed as normal.
*/
return (0);
}
return (e.error);
}
if (needrecov) {
"nfs4renew: initiating recovery\n"));
if (!e.error)
(void) xdr_free(xdr_COMPOUND4res_clnt,
goto recov_retry;
}
/* fall through for res.status case */
}
/*EMPTY*/
/*
* XXX need to try every mntinfo4 in sp->mntinfo4_list
* to renew the lease on that server
*/
}
}
if (!rpc_error)
return (e.error);
}
void
{
/* this locks down sp if it is found */
}
}
/*
* Bump the number of OPEN files (ie: those with state) so we know if this
* nfs4_server has any state to maintain a lease for or not.
*
* Also, marks the nfs4_server's lease valid if it hasn't been done so already.
*/
void
{
sp->state_ref_count++;
"nfs4_inc_state_ref_count: state_ref_count now %d",
sp->state_ref_count));
/*
* took the state_ref_count from 0 to 1, then start the time
* on lease renewal.
*/
/* update the number of open files for mi */
mi->mi_open_files++;
}
void
{
/* this locks down sp if it is found */
}
}
/*
* Decrement the number of OPEN files (ie: those with state) so we know if
* this nfs4_server has any state to maintain a lease for or not.
*/
void
{
sp->state_ref_count--;
"nfs4_dec_state_ref_count: state ref count now %d",
sp->state_ref_count));
mi->mi_open_files--;
"nfs4_dec_state_ref_count: mi open files %d, v4 flags 0x%x",
/* We don't have to hold the mi_lock to test mi_flags */
if (mi->mi_open_files == 0 &&
"nfs4_dec_state_ref_count: remove mntinfo4 %p since "
"we have closed the last open file", (void*)mi));
}
}
{
else
return (result);
}
/*
* Return non-zero if the given nfs4_server_t is going through recovery.
*/
int
{
}
/*
* Compare two shared filehandle objects. Returns -1, 0, or +1, if the
* first is less than, equal to, or greater than the second.
*/
int
{
}
/*
* Create a table for shared filehandle objects.
*/
void
{
}
/*
* Return a shared filehandle object for the given filehandle. The caller
* is responsible for eventually calling sfh4_rele().
*/
{
if (!key) {
}
/*
* We allocate the largest possible filehandle size because it's
* not that big, and it saves us from possibly having to resize the
* buffer later.
*/
(void *)nsfh));
sfh->sfh_refcnt++;
/* free our speculative allocs */
return (sfh);
}
return (nsfh);
}
/*
* Return a shared filehandle object for the given filehandle. The caller
* is responsible for eventually calling sfh4_rele().
*/
{
#ifdef DEBUG
if (nfs4_sharedfh_debug) {
}
#endif
/*
* If there's already an object for the given filehandle, bump the
* reference count and return it. Otherwise, create a new object
* and add it to the AVL tree.
*/
sfh->sfh_refcnt++;
"sfh4_get: found existing %p, new refcnt=%d",
return (sfh);
}
}
/*
* Get a reference to the given shared filehandle object.
*/
void
{
sfh->sfh_refcnt++;
(CE_NOTE, "sfh4_hold %p, new refcnt=%d",
}
/*
* Release a reference to the given shared filehandle object and null out
* the given pointer.
*/
void
{
mntinfo4_t *mi;
sfh->sfh_refcnt--;
"sfh4_rele %p, new refcnt=%d",
goto finish;
}
/*
* Possibly the last reference, so get the lock for the table in
* case it's time to remove the object from the table.
*/
sfh->sfh_refcnt--;
if (sfh->sfh_refcnt > 0) {
"sfh4_rele %p, new refcnt=%d",
goto finish;
}
"sfh4_rele %p, last ref", (void *)sfh));
}
}
/*
* Update the filehandle for the given shared filehandle object.
*/
int nfs4_warn_dupfh = 0; /* if set, always warn about dup fhs below */
void
{
#ifdef DEBUG
#endif
/*
* The basic plan is to remove the shared filehandle object from
* the table, update it to have the new filehandle, then reinsert
* it.
*/
}
/*
* XXX If there is already a shared filehandle object with the new
* filehandle, we're in trouble, because the rnode code assumes
* that there is only one shared filehandle object for a given
* filehandle. So issue a warning (for read-write mounts only)
* and don't try to re-insert the given object into the table.
* Hopefully the given object will quickly go away and everyone
* will use the new object.
*/
"duplicate filehandle detected");
}
} else {
}
}
/*
* Copy out the current filehandle for the given shared filehandle object.
*/
void
{
}
/*
* Print out the filehandle for the given shared filehandle object.
*/
void
{
}
/*
* Compare 2 fnames. Returns -1 if the first is "less" than the second, 0
* if they're the same, +1 if the first is "greater" than the second. The
* caller (or whoever's calling the AVL package) is responsible for
* handling locking issues.
*/
static int
{
int res;
/*
* The AVL package wants +/-1, not arbitrary positive or negative
* integers.
*/
if (res > 0)
res = 1;
else if (res < 0)
res = -1;
return (res);
}
/*
* Get or create an fname with the given name, as a child of the given
* fname. The caller is responsible for eventually releasing the reference
* (fn_rele()). parent may be NULL.
*/
{
/*
* If there's already an fname registered with the given name, bump
* its reference count and return it. Otherwise, create a new one
* and add it to the parent's AVL tree.
*/
return (fnp);
}
}
"fn_get %p:%s, a new nfs4_fname_t!",
}
return (fnp);
}
void
{
"fn_hold %p:%s, new refcnt=%d",
}
/*
* Decrement the reference count of the given fname, and destroy it if its
* reference count goes to zero. Nulls out the given pointer.
*/
void
{
if (newref > 0) {
"fn_rele %p:%s, new refcnt=%d",
return;
}
"fn_rele %p:%s, last reference, deleting...",
}
/*
* Recursivly fn_rele the parent.
* Use goto instead of a recursive call to avoid stack overflow.
*/
goto recur;
}
}
/*
* Returns the single component name of the given fname, in a MAXNAMELEN
* string buffer, which the caller is responsible for freeing. Note that
* the name may become invalid as a result of fn_move().
*/
char *
{
char *name;
return (name);
}
/*
* fn_path_realloc
*
* This function, used only by fn_path, constructs
* a new string which looks like "prepend" + "/" + "current".
* by allocating a new string and freeing the old one.
*/
static void
{
char *news;
/*
* Prime the pump, allocate just the
* space for prepend and return that.
*/
} else {
/*
* Allocate the space for a new string
* +1 +1 is for the "/" and the NULL
* byte at the end of it all.
*/
}
}
/*
* Returns the path name (starting from the fs root) for the given fname.
* The caller is responsible for freeing. Note that the path may be or
* become invalid as a result of fn_move().
*/
char *
{
char *path;
return (NULL);
/* walk up the tree constructing the pathname. */
do {
/*
* Add fn_name in front of the current path
*/
return (path);
}
/*
* Return a reference to the parent of the given fname, which the caller is
* responsible for eventually releasing.
*/
{
return (parent);
}
/*
* Update fnp so that its parent is newparent and its name is newname.
*/
void
{
/*
* This assert exists to catch the client trying to rename
* a dir to be a child of itself. This happened at a recent
* bakeoff against a 3rd party (broken) server which allowed
* the rename to succeed. If it trips it means that:
* a) the code in nfs4rename that detects this case is broken
* b) the server is broken (since it allowed the bogus rename)
*
* For non-DEBUG kernels, prepare for a recursive mutex_enter
* panic below from: mutex_enter(&newparent->fn_lock);
*/
/*
* Remove fnp from its current parent, change its name, then add it
* to newparent.
*/
}
/*
* This could be due to a file that was unlinked while
* open, or perhaps the rnode is in the free list. Remove
* it from newparent and let it go away on its own. The
* contorted code is to deal with lock order issues and
* race conditions.
*/
}
goto again;
}
}
#ifdef DEBUG
/*
* Return non-zero if the type information makes sense for the given vnode.
* Otherwise panic.
*/
int
{
}
return (1);
}
#endif /* DEBUG */