/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/* Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989 AT&T */
/* All Rights Reserved */
#include <sys/pathname.h>
#include <sys/sysmacros.h>
#include <sys/bootconf.h>
#include <rpc/rpcsec_gss.h>
#include <nfs/nfs_clnt.h>
#include <nfs/nfs4_clnt.h>
#ifdef DEBUG
/*
* These are "special" state IDs and file handles that
* match any delegation state ID or file handled. This
* is for testing purposes only.
*/
int nfs4_callback_debug;
int nfs4_recall_debug;
int nfs4_drat_debug;
#endif
/*
* NFS4_MAPSIZE is the number of bytes we are willing to consume
* for the block allocation map when the server grants a NFS_LIMIT_BLOCK
* style delegation.
*/
struct nfs4_dnode {
};
{ "delegations", KSTAT_DATA_UINT64 },
{ "cb_getattr", KSTAT_DATA_UINT64 },
{ "cb_recall", KSTAT_DATA_UINT64 },
{ "cb_null", KSTAT_DATA_UINT64 },
{ "cb_dispatch", KSTAT_DATA_UINT64 },
{ "delegaccept_r", KSTAT_DATA_UINT64 },
{ "delegaccept_rw", KSTAT_DATA_UINT64 },
{ "delegreturn", KSTAT_DATA_UINT64 },
{ "callbacks", KSTAT_DATA_UINT64 },
{ "claim_cur", KSTAT_DATA_UINT64 },
{ "claim_cur_ok", KSTAT_DATA_UINT64 },
{ "recall_trunc", KSTAT_DATA_UINT64 },
{ "recall_failed", KSTAT_DATA_UINT64 },
{ "return_limit_write", KSTAT_DATA_UINT64 },
{ "return_limit_addmap", KSTAT_DATA_UINT64 },
{ "deleg_recover", KSTAT_DATA_UINT64 },
{ "cb_illegal", KSTAT_DATA_UINT64 }
};
struct nfs4_cb_port {
};
static int cb_getattr_bytes;
struct cb_recall_pass {
};
static void nfs4delegreturn_thread(struct cb_recall_pass *);
int);
static void nfs4_dlistclean_impl(struct nfs4_callback_globals *, int);
static int nfs4delegreturn_impl(rnode4_t *, int,
struct nfs4_callback_globals *);
struct nfs4_callback_globals *);
static void
{
long mapcnt;
#ifdef DEBUG
/*
* error injection hook: set cb_getattr_fail global to
* NFS4 pcol error to be returned
*/
if (cb4_getattr_fail != NFS4_OK) {
return;
}
#endif
CB_WARN("cb_getattr: cannot find server\n");
return;
}
/*
* In cb_compound, callback_ident was validated against rq_prog,
* but we couldn't verify that it was set to the value we provided
* at setclientid time (because we didn't have server struct yet).
* Now we have the server struct, but don't have callback_ident
* handy. So, validate server struct program number against req
* RPC's prog number. At this point, we know the RPC prog num
* is valid (else we wouldn't be here); however, we don't know
* that it was the prog number we supplied to this server at
* setclientid time. If the prog numbers aren't equivalent, then
* log the problem and fail the request because either cbserv
*/
#ifdef DEBUG
"cb_getattr: wrong server program number srv=%d req=%d\n",
#else
"cb_getattr: wrong server program number\n");
#endif
return;
}
/*
* Search the delegation list for a matching file handle;
* mutex on sp prevents the list from changing.
*/
break;
}
#ifdef DEBUG
break;
}
#endif
}
/*
* VN_HOLD the vnode before releasing s_lock to guarantee
* we have a valid vnode reference.
*/
}
CB_WARN("cb_getattr: bad fhandle\n");
return;
}
/*
* Figure out which attributes the server wants. We only
* offer FATTR4_CHANGE & FATTR4_SIZE; ignore the rest.
*/
/*
* Don't actually need to create XDR to encode these
* simple data structures.
* xdrmem_create(&xdr, fdata, cb_getattr_bytes, XDR_ENCODE);
*/
/* attrlist4_len starts at 0 and increases as attrs are processed */
fap->attrlist4_len = 0;
/* don't supply attrs if request was zero */
if (args->attr_request != 0) {
/*
* If the file is mmapped, then increment the change
* attribute and return it. This will guarantee that
* the server will perceive that the file has changed
* if there is any chance that the client application
* has changed it. Otherwise, just return the change
* attribute as it has been updated by nfs4write_deleg.
*/
/*
* If object mapped, then always return new change.
* Otherwise, return change if object has dirty
* pages. If object doesn't have any dirty pages,
* then all changes have been pushed to server, so
* reset change to grant change.
*/
if (mapcnt)
rp->r_deleg_change++;
/*
* Use inline XDR code directly, we know that we
* going to a memory buffer and it has enough
* space so it cannot fail.
*/
}
/*
* Use an atomic add of 0 to fetch a consistent view
* of r_size; this avoids having to take rw_lock
* which could cause a deadlock.
*/
/*
* Use inline XDR code directly, we know that we
* going to a memory buffer and it has enough
* space so it cannot fail.
*/
}
}
}
static void
{
}
static void
{
#ifdef DEBUG
/*
* error injection hook: set cb_recall_fail global to
* NFS4 pcol error to be returned
*/
if (cb4_recall_fail != NFS4_OK) {
return;
}
#endif
CB_WARN("cb_recall: cannot find server\n");
return;
}
/*
* Search the delegation list for a matching file handle
* AND stateid; mutex on sp prevents the list from changing.
*/
/* check both state id and file handle! */
sizeof (stateid4)) == 0)) {
break;
} else {
#ifdef DEBUG
CB_WARN("cb_recall: stateid OK, bad fh");
#endif
}
}
#ifdef DEBUG
sizeof (stateid4)) == 0) {
break;
}
#endif
}
/*
* VN_HOLD the vnode before releasing s_lock to guarantee
* we have a valid vnode reference. The async thread will
* release the hold when it's done.
*/
}
CB_WARN("cb_recall: bad stateid\n");
return;
}
/* Fire up a thread to do the delegreturn */
}
/* ARGSUSED */
static void
{
/* nothing to do here, cb_recall doesn't kmem_alloc */
}
/*
* This function handles the CB_NULL proc call from an NFSv4 Server.
*
* We take note that the server has sent a CB_NULL for later processing
* in the recovery logic. It is noted so we may pause slightly after the
* setclientid and before reopening files. The pause is to allow the
* NFSv4 Server time to receive the CB_NULL reply and adjust any of
* its internal structures such that it has the opportunity to grant
* delegations to reopened files.
*
*/
/* ARGSUSED */
static void
struct nfs4_callback_globals *ncg)
{
}
}
/*
* cb_illegal args: void
* res : status (NFS4ERR_OP_CB_ILLEGAL)
*/
/* ARGSUSED */
static void
{
}
static void
struct nfs4_callback_globals *ncg)
{
uint_t i;
/*
* Form a reply tag by copying over the reqeuest tag.
*/
KM_SLEEP);
/*
* XXX for now, minorversion should be zero
*/
return;
}
#ifdef DEBUG
/*
* Verify callback_ident. It doesn't really matter if it's wrong
* because we don't really use callback_ident -- we use prog number
* of the RPC request instead. In this case, just print a DEBUG
*/
"cb_compound: cb_client using wrong "
"callback_ident(%d), should be %d",
#endif
KM_SLEEP);
switch (op) {
case OP_CB_GETATTR:
break;
case OP_CB_RECALL:
break;
case OP_CB_ILLEGAL:
/* fall through */
default:
/*
* Handle OP_CB_ILLEGAL and any undefined opcode.
* Currently, the XDR code will return BADXDR
* if cb op doesn't decode to legal value, so
* it really only handles OP_CB_ILLEGAL.
*/
op = OP_CB_ILLEGAL;
}
/*
* If not at last op, and if we are to stop, then
* compact the results array.
*/
}
}
}
static void
{
}
switch (op) {
case OP_CB_GETATTR:
break;
case OP_CB_RECALL:
break;
default:
break;
}
}
}
}
static void
{
struct nfs4_callback_globals *);
void (*freeproc)(CB_COMPOUND4res *);
case CB_NULL:
break;
case CB_COMPOUND:
proc = cb_compound;
break;
default:
CB_WARN("cb_dispatch: no proc\n");
return;
}
CB_WARN("cb_dispatch: cannot getargs\n");
return;
}
CB_WARN("cb_dispatch: bad sendreply\n");
}
if (freeproc)
CB_WARN("cb_dispatch: bad freeargs\n");
}
}
static rpcprog_t
{
int i, j;
j = ncg->nfs4_program_hint;
for (i = 0; i < nfs4_num_prognums; i++, j++) {
if (j >= nfs4_num_prognums)
j = 0;
return (j+NFS4_CALLBACK);
}
}
return (0);
}
void
{
int i;
return;
if (i < ncg->nfs4_program_hint)
ncg->nfs4_program_hint = i;
}
/*
* nfs4_setport - This function saves a netid and univeral address for
* the callback program. These values will be used during setclientid.
*/
static void
struct nfs4_callback_globals *ncg)
{
struct nfs4_cb_port *p;
break;
}
}
else {
p = kmem_alloc(sizeof (*p), KM_SLEEP);
}
}
/*
* nfs4_cb_args - This function is used to construct the callback
* portion of the arguments needed for setclientid.
*/
void
{
struct nfs4_cb_port *p;
/*
* This server structure may already have a program number
* assigned to it. This happens when the client has to
* re-issue SETCLIENTID. Just re-use the information.
*/
break;
}
}
(CE_WARN, "nfs4_cb_args: could not find netid for %s/%s\n",
args->callback_ident = 0;
return;
}
CB_WARN("nfs4_cb_args: out of program numbers\n");
args->callback_ident = 0;
return;
}
}
static int
{
int error;
return (EBADF);
return (EBADF);
}
/*
* I can't convince myself that we need locking here. The
* rnode cannot disappear and the value returned is instantly
* stale anway, so why bother?
*/
return (error);
}
/*
* NFS4 client system call. This service does the
* necessary initialization for the callback program.
* This is fashioned after the server side interaction
* between nfsd and the kernel. On the client, the
* mount command forks and the child process does the
* necessary interaction with the kernel.
*
* uap->fd is the fd of an open transport provider
*/
int
{
int error;
int readsize;
int cmd;
#ifdef lint
#endif
if (secpolicy_nfs(CRED()) != 0)
return (EPERM);
return (EBADF);
/*
* Set read buffer size to rsize
* and add room for RPC headers.
*/
if (readsize < RPC_MAXDATASIZE)
KNC_STRSIZE, &len);
if (error) {
return (error);
}
if (cmd & NFS4_KRPC_START) {
if (error) {
return (error);
}
}
else
if (error) {
return (error);
}
if (error) {
return (error);
}
if (error) {
return (error);
}
if (cmd & NFS4_SETPORT)
if (cmd & NFS4_KRPC_START) {
if (error) {
CB_WARN1("nfs4_svc: svc_tli_kcreate failed %d\n",
error);
}
}
return (error);
}
struct nfs4_callback_globals *
{
}
static void *
{
sizeof (struct nfs4_server *), KM_SLEEP);
/* initialize the dlist */
/* initialize cb_port list */
/* get our own copy of the kstats */
sizeof (nfs4_callback_stats_tmpl));
/* register "nfs:0:nfs4_callback_stats" for this zone */
if ((nfs4_callback_kstat =
}
return (ncg);
}
static void
{
int i, num_removed;
/*
* It's OK here to just run through the registered "programs", as
* servers without programs won't have any delegations to handle.
*/
for (i = 0; i < nfs4_num_prognums; i++) {
continue;
num_removed = 0;
/*
* We need to take matters into our own hands,
* as nfs4delegreturn_cleanup_impl() won't
* remove this from the list.
*/
num_removed++;
continue;
}
/*
* The following will remove the node from the list.
*/
}
/* each removed list node reles a reference */
while (num_removed-- > 0)
/* remove our reference for nfs4_server_vlock */
}
}
/* ARGSUSED */
static void
{
/*
* Clean pending delegation return list.
*/
/*
* Discard all delegations.
*/
}
static void
{
struct nfs4_cb_port *p;
int i;
/*
* Discard all delegations that may have crept in since we did the
* _shutdown.
*/
/*
* We're completely done with this zone and all associated
* nfs4_server_t's. Any remaining nfs4_server_ts should only have one
* more reference outstanding -- the reference we didn't release in
* nfs4_renew_lease_thread().
*
* Here we need to run through the global nfs4_server_lst as we need to
* deal with nfs4_server_ts without programs, as they also have threads
* created for them, and so have outstanding references that we need to
* release.
*/
while (sp != &nfs4_server_lst) {
}
}
}
#ifdef DEBUG
for (i = 0; i < nfs4_num_prognums; i++) {
}
#endif
sizeof (struct nfs4_server *));
kmem_free(p, sizeof (*p));
}
}
void
nfs4_callback_init(void)
{
int i;
/* initialize the callback table */
sizeof (SVC_CALLOUT), KM_SLEEP);
for (i = 0; i < nfs4_num_prognums; i++) {
}
/*
* Compute max bytes required for dyamically allocated parts
* of cb_getattr reply. Only size and change are supported now.
* If CB_GETATTR is changed to reply with additional attrs,
* additional sizes must be added below.
*
* fattr4_change + fattr4_size == uint64_t + uint64_t
*/
}
void
nfs4_callback_fini(void)
{
}
/*
* NB: This function can be called from the *wrong* zone (ie, the zone that
* 'rp' belongs to and the caller's zone may not be the same). This can happen
* if the zone is going away and we get called from nfs4_async_inactive(). In
* this case the globals will be NULL and we won't update the counters, which
* doesn't matter as the zone is going away anyhow.
*/
static void
struct nfs4_callback_globals *ncg)
{
/*
* Caller must be holding mi_recovlock in read mode
* to call here. This is provided by start_op.
* Delegation management requires to grab s_lock
* first and then r_statev4_lock.
*/
return;
} else {
}
if (need_rele)
return;
}
/*
* Free the cred originally held when
* the delegation was granted. Caller must
* hold this cred if it wants to use it after
* this call.
*/
/*
* Remove the rnode from the server's list and
* update the ref counts.
*/
/* removed list node removes a reference */
if (need_rele)
}
void
{
} else {
/*
* Request coming from the wrong zone.
*/
}
}
static void
{
lost_rqstp->lr_op = 0;
return;
}
"nfs4close_save_lost_rqst: error %d", error));
/*
* The vp is held and rele'd via the recovery code.
* See nfs4_save_lost_rqst.
*/
}
static void
{
hrtime_t t;
int numops;
t = gethrtime();
return;
}
}
int
struct nfs4_callback_globals *ncg)
{
while (!done) {
&recov_state, &recovonly);
if (e.error) {
if (flags & NFS4_DR_FORCE) {
RW_READER, 0);
}
break;
}
/*
* Check to see if the delegation has already been
* returned by the recovery thread. The state of
* the delegation cannot change at this point due
* to start_fop and the r_deleg_recall_lock.
*/
e.error = 0;
break;
}
if (recovonly) {
/*
* Delegation will be returned via the
* recovery framework. Build a lost request
* structure, start recovery and get out.
*/
nfs4_error_init(&e, EINTR);
break;
}
/*
* Ignore some errors on delegreturn; no point in marking
* the file dead on a state destroying operation.
*/
e.stat == NFS4ERR_BADHANDLE ||
e.stat == NFS4ERR_STALE))
else
if (needrecov) {
} else {
}
}
return (e.error);
}
/*
* nfs4_resend_delegreturn - used to drive the delegreturn
* operation via the recovery thread.
*/
void
{
/* If the file failed recovery, just quit. */
}
/*
* If recovery is now needed, then return the error
* and status and let the recovery thread handle it,
* including re-driving another delegreturn. Otherwise,
* just give up and clean up the delegation.
*/
return;
}
/*
* nfs4delegreturn - general function to return a delegation.
*
* NFS4_DR_FORCE - return the delegation even if start_op fails
* NFS4_DR_PUSH - push modified data back to the server via VOP_PUTPAGE
* NFS4_DR_DISCARD - discard the delegation w/o delegreturn
* NFS4_DR_DID_OP - calling function already did nfs4_start_op
* NFS4_DR_RECALL - delegreturned initiated via CB_RECALL
* NFS4_DR_REOPEN - do file reopens, if applicable
*/
static int
{
int error = 0;
/*
* If NFS4_DR_DISCARD is set by itself, take a short-cut and
* discard without doing an otw DELEGRETURN. This may only be used
* by the recovery thread because it bypasses the synchronization
* with r_deleg_recall_lock and mi->mi_recovlock.
*/
if (flags == NFS4_DR_DISCARD) {
return (0);
}
if (flags & NFS4_DR_DID_OP) {
/*
* Caller had already done start_op, which means the
* r_deleg_recall_lock is already held in READ mode
* so we cannot take it in write mode. Return the
* delegation asynchronously.
*
* Remove the NFS4_DR_DID_OP flag so we don't
* get stuck looping through here.
*/
return (0);
}
/*
* Verify we still have a delegation and crhold the credential.
*/
goto out;
}
/*
* Push the modified data back to the server synchronously
* before doing DELEGRETURN.
*/
if (flags & NFS4_DR_PUSH)
/*
* Take r_deleg_recall_lock in WRITE mode, this will prevent
* nfs4_is_otw_open_necessary from trying to use the delegation
* while the DELEGRETURN is in progress.
*/
rw_entered = TRUE;
goto out;
if (flags & NFS4_DR_REOPEN) {
/*
* If R4RECOVERRP is already set, then skip re-opening
* the delegation open streams and go straight to doing
* delegreturn. (XXX if the file has failed recovery, then the
* delegreturn attempt is likely to be futile.)
*/
if (do_reopen) {
if (error != 0) {
== 0)
goto out;
} else if (needrecov) {
if ((flags & NFS4_DR_FORCE) == 0)
goto out;
}
}
}
if (flags & NFS4_DR_DISCARD) {
/*
* deleg_return_pending is cleared inside of delegation_accept
* when a delegation is accepted. if this flag has been
* cleared, then a new delegation has overwritten the one we
* were about to throw away.
*/
if (!rp->r_deleg_return_pending) {
goto out;
}
} else {
}
out:
if (cr)
if (rw_entered)
return (error);
}
int
{
}
void
{
/*
* Fire up a thread to do the actual delegreturn
* Caller must guarantee that the rnode doesn't
* vanish (by calling VN_HOLD).
*/
}
static void
{
break;
}
}
/*
* It's possible that the nfs4_server which was using this
* program number has vanished since this thread is async.
* If so, just return. Your work here is finished, my friend.
*/
if (!found)
goto out;
ncg);
/* retake the s_lock for next trip through the loop */
}
out:
(CE_NOTE, "delereturn_all_thread: complete\n"));
zthread_exit();
}
void
{
/* Check to see if the delegation list is empty */
return;
}
/*
* Grab the program number; the async thread will use this
* to find the nfs4_server.
*/
}
/*
* Discard any delegations
*
* Iterate over the servers s_deleg_list and
* for matching mount-point rnodes discard
* the delegation.
*/
void
{
/*
* Skip if this rnode is in not on the
* same mount-point
*/
continue;
}
#ifdef DEBUG
if (nfs4_client_recov_debug) {
"nfs4_deleg_discard: matched rnode %p "
"-- discarding delegation\n", (void *)rp);
}
#endif
/*
* Free the cred originally held when the delegation
* was granted. Also need to decrement the refcnt
* on this server for each delegation we discard
*/
if (rp->r_deleg_cred)
}
}
/*
* Reopen any open streams that were covered by the given file's
* delegation.
* Returns zero or an errno value. If there was no error, *recovp
* indicates whether recovery was initiated.
*/
static int
int flags)
{
int claimnull;
recov_state.rs_flags = 0;
return (e.error);
}
/*
* if we mean to discard the delegation, it must be BAD, so don't
* use it when doing the reopen or it will fail too.
*/
/*
* Loop through the open streams for this rnode to find
* all of the ones created using the delegation state ID.
* Each of these needs to be re-opened.
*/
if (claimnull) {
} else {
FALSE);
}
goto retry;
}
/*
* if error is EINTR, ETIMEDOUT, or NFS4_FRC_UNMT_ERR, then
* recovery has already been started inside of nfs4_reopen.
*/
break;
}
/*
* Recovery is not possible, but don't give up yet;
* we'd still like to do delegreturn after
* reopening as many streams as possible.
* Continue processing the open streams.
*/
} else if (needrecov) {
/*
* Start recovery and bail out. The recovery
* thread will take it from here.
*/
break;
}
}
return (e.error);
}
/*
* get_next_deleg_stream - returns the next open stream which
* represents a delegation for this rnode. In order to assure
* forward progress, the caller must guarantee that each open
* stream returned is changed so that a future call won't return
* it again.
*
* There are several ways for the open stream to change. If the open
* stream is !os_delegation, then we aren't interested in it. Also, if
* either os_failed_reopen or !os_valid, then don't return the osp.
*
* If claimnull is false (doing reopen CLAIM_DELEGATE_CUR) then return
* the osp if it is an os_delegation open stream. Also, if the rnode still
* has r_deleg_return_pending, then return the os_delegation osp. Lastly,
* if the rnode's r_deleg_stateid is different from the osp's open_stateid,
* then return the osp.
*
* We have already taken the 'r_deleg_recall_lock' as WRITER, which
* prevents new OPENs from going OTW (as start_fop takes this
* lock in READ mode); thus, no new open streams can be created
* (which inherently means no new delegation open streams are
* being created).
*/
static nfs4_open_stream_t *
{
/*
* Search through the list of open streams looking for
* one that was created while holding the delegation.
*/
continue;
}
osp->os_ref_count++;
return (osp);
}
}
return (NULL);
}
static void
{
"nfsv4delegRtn");
goto out;
}
/*
* Take the read-write lock in read mode to prevent other
* threads from modifying the data during the recall. This
* doesn't affect mmappers.
*/
/* Proceed with delegreturn */
goto out;
}
/*
* If the file is being truncated at the server, then throw
* away all of the pages, it doesn't matter what flavor of
* delegation we have.
*/
} else if (dtype == OPEN_DELEGATE_WRITE) {
if (rdirty) {
if (error)
CB_WARN1("nfs4delegreturn_thread:"
" VOP_PUTPAGE: %d\n", error);
}
/* turn off NFS4_DR_PUSH because we just did that above. */
flags &= ~NFS4_DR_PUSH;
}
/* If a failed recovery is indicated, discard the pages */
if (rip) {
if (error)
CB_WARN1("nfs4delegreturn_thread: VOP_PUTPAGE: %d\n",
error);
}
/*
* Pass the flags to nfs4delegreturn_impl, but be sure not to pass
* NFS4_DR_DID_OP, which just calls nfs4delegreturn_async again.
*/
flags &= ~NFS4_DR_DID_OP;
out:
zthread_exit();
}
/*
* This function has one assumption that the caller of this function is
* either doing recovery (therefore cannot call nfs4_start_op) or has
* already called nfs4_start_op().
*/
void
{
int dr_flags = 0;
long mapcnt;
/*
* Accept a delegation granted to the client via an OPEN.
* Set the delegation fields in the rnode and insert the
* rnode onto the list anchored in the nfs4_server_t. The
* proper locking order requires the nfs4_server_t first,
* even though it may not be needed in all cases.
*
* NB: find_nfs4_server returns with s_lock held.
*/
return;
/* grab the statelock too, for examining r_mapcnt */
if (odt == OPEN_DELEGATE_READ) {
if (claim == CLAIM_PREVIOUS)
} else if (odt == OPEN_DELEGATE_WRITE) {
if (claim == CLAIM_PREVIOUS)
valid_garp = FALSE;
rp->r_deleg_change = 0;
rp->r_deleg_change_grant = 0;
} else {
}
/*
* Update the delegation change attribute if
* there are mappers for the file is dirty. This
* might be the case during recovery after server
* reboot.
*/
rp->r_deleg_change++;
"nfs4_delegation_accept: r_deleg_change: 0x%x\n",
"nfs4_delegation_accept: r_delg_change_grant: 0x%x\n",
} else if (already) {
/*
* No delegation granted. If the rnode currently has
* has one, then consider it tainted and return it.
*/
}
if (delegation_granted) {
/* Add the rnode to the list. */
if (!already) {
/* added list node gets a reference */
}
}
/*
* We've now safely accepted the delegation, if any. Drop the
* locks and figure out what post-processing is needed. We'd
* like to retain r_statev4_lock, but nfs4_server_rele takes
* s_lock which would be a lock ordering violation.
*/
/*
* Check to see if we are in recovery. Remember that
* this function is protected by start_op, so a recovery
* cannot begin until we are out of here.
*/
if (recov) {
/*
* We cannot call delegreturn from inside
* of recovery or VOP_PUTPAGE will hang
* due to nfs4_start_fop call in
* nfs4write. Use dlistadd to add the
* rnode to the list of rnodes needing
* cleaning. We do not need to do reopen
* here because recov_openfiles will do it.
* In the non-recall case, just discard the
* delegation as it is no longer valid.
*/
if (recall)
else
dr_flags = 0;
} else {
/*
* Push the modified data back to the server,
* reopen any delegation open streams, and return
* the delegation. Drop the statev4_lock first!
*/
}
}
if (dr_flags)
}
/*
* nfs4delegabandon - Abandon the delegation on an rnode4. This code
* is called when the client receives EXPIRED, BAD_STATEID, OLD_STATEID
* or BADSEQID and the recovery code is unable to recover. Push any
* dirty data back to the server and return the delegation (if any).
*/
void
{
if (dt == OPEN_DELEGATE_NONE)
return;
/*
* Recovery on the file has failed and we want to return
* the delegation. We don't want to reopen files and
* nfs4delegreturn_thread() figures out what to do about
* the data. The only thing to do is attempt to return
* the delegation.
*/
/*
* Fire up a thread to do the delegreturn; this is
* necessary because we could be inside a GETPAGE or
* PUTPAGE and we cannot do another one.
*/
}
static int
int flg)
{
int error = 0;
#ifdef lint
#endif
/*
* Take r_deleg_recall_lock in read mode to synchronize
* with delegreturn.
*/
if (error == 0)
}
return (error);
}
void
{
(CE_NOTE, "nfs4_end_op_recall: 0x%p, 0x%p\n",
}
int
{
int error;
(CE_NOTE, "wait_for_recall: 0x%p, 0x%p\n",
return (error);
!= 0) {
}
return (error);
}
return (0);
}
/*
* nfs4_dlistadd - Add this rnode to a list of rnodes to be
* DELEGRETURN'd at the end of recovery.
*/
static void
{
/*
* Mark the delegation as having a return pending.
* This will prevent the use of the delegation stateID
* by read, write, setattr and open.
*/
#ifdef DEBUG
ncg->nfs4_dlistadd_c++;
#endif
}
/*
* nfs4_dlistclean_impl - Do DELEGRETURN for each rnode on the list.
* of files awaiting cleaning. If the override_flags are non-zero
* then use them rather than the flags that were set when the rnode
* was added to the dlist.
*/
static void
{
int flags;
#ifdef DEBUG
ncg->nfs4_dlistclean_c++;
#endif
}
}
void
nfs4_dlistclean(void)
{
nfs4_dlistclean_impl(ncg, 0);
}