/*
* Copyright (c) 2008 Isilon Inc http://www.isilon.com/
* Authors: Doug Rabson <dfr@rabson.org>
* Developed with Red Inc: Alfred Perlstein <alfred@freebsd.org>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*
* Copyright 2015 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2012 by Delphix. All rights reserved.
*/
/*
* Most of the interesting code is here.
*
* Source code derived from FreeBSD nlm_prot_impl.c
*/
#include <rpc/pmap_prot.h>
#include <rpc/pmap_clnt.h>
#include <rpc/rpcb_prot.h>
#include <rpcsvc/nlm_prot.h>
#include <rpcsvc/sm_inter.h>
#include <rpcsvc/nsm_addr.h>
#include <nfs/nfs_clnt.h>
#include "nlm_impl.h"
struct nlm_knc {
const char *n_netid;
};
/*
* Number of attempts NLM tries to obtain RPC binding
* of local statd.
*/
/*
* Timeout (in seconds) NLM waits before making another
* attempt to obtain RPC binding of local statd.
*/
/*
* Total number of sysids in NLM sysid bitmap
*/
/*
* Number of ulong_t words in bitmap that is used
* for allocation of sysid numbers.
*/
/*
* Given an integer x, the macro returns
* -1 if x is negative,
* 0 if x is zero
* 1 if x is positive
*/
#define SIGN(x) (((x) > 0) - ((x) < 0))
/*
* Zero timeout for asynchronous NLM RPC operations
*/
/*
* List of all Zone globals nlm_globals instences
* linked together.
*/
/*
* NLM kmem caches
*/
/*
* A bitmap for allocation of new sysids.
* Sysid is a unique number between LM_SYSID
* and LM_SYSID_MAX. Sysid represents unique remote
* host that does file locks on the given host.
*/
/*
* RPC service registration for all transports
*/
};
};
/*
* Static table of all netid/knetconfig network
* lock manager can work with. nlm_netconfigs table
* is used when we need to get valid knetconfig by
* netid and vice versa.
*
* Knetconfigs are activated either by the call from
* user-space lockd daemon (server side) or by taking
* knetconfig from NFS mountinfo (client side)
*/
/* UDP */
{
"udp",
},
/* TCP */
{
"tcp",
},
/* UDP over IPv6 */
{
"udp6",
},
/* TCP over IPv6 */
{
"tcp6",
},
/* ticlts (loopback over UDP) */
{
"ticlts",
},
/* ticotsord (loopback over TCP) */
{
"ticotsord",
},
};
/*
* NLM misc. function
*/
static void nlm_kmem_reclaim(void *);
static void nlm_pool_shutdown(void);
static void nlm_suspend_zone(struct nlm_globals *);
static void nlm_resume_zone(struct nlm_globals *);
/*
* NLM thread functions
*/
static void nlm_gc(struct nlm_globals *);
static void nlm_reclaimer(struct nlm_host *);
/*
* NLM NSM functions
*/
static int nlm_init_local_knc(struct knetconfig *);
static int nlm_nsm_init_local(struct nlm_nsm *);
static void nlm_nsm_fini(struct nlm_nsm *);
/*
* NLM host functions
*/
static int nlm_host_ctor(void *, void *, int);
static void nlm_host_dtor(void *, void *);
static void nlm_host_destroy(struct nlm_host *);
static struct nlm_host *nlm_host_create(char *, const char *,
struct knetconfig *, struct netbuf *);
const char *, struct netbuf *, avl_index_t *);
static void nlm_host_gc_vholds(struct nlm_host *);
/*
* NLM vhold functions
*/
static int nlm_vhold_ctor(void *, void *, int);
static void nlm_vhold_dtor(void *, void *);
static void nlm_vhold_destroy(struct nlm_host *,
struct nlm_vhold *);
static void nlm_vhold_clean(struct nlm_vhold *, int);
/*
*/
static void nlm_shres_destroy_item(struct nlm_shres *);
/*
* NLM initialization functions.
*/
void
nlm_init(void)
{
nlm_rpc_init();
/* initialize sysids bitmap */
nlm_sysid_nidx = 1;
/*
* Reserv the sysid #0, because it's associated
* with local locks only. Don't let to allocate
* it for remote locks.
*/
BT_SET(nlm_sysid_bmap, 0);
}
void
{
}
void
{
}
/* ARGSUSED */
static void
{
struct nlm_globals *g;
cv_broadcast(&g->nlm_gc_sched_cv);
}
/*
* NLM garbage collector thread (GC).
*
* NLM GC periodically checks whether there're any host objects
* that can be cleaned up. It also releases stale vnodes that
* live on the server side (under protection of vhold objects).
*
* NLM host objects are cleaned up from GC thread because
* operations helping us to determine whether given host has
* any locks can be quite expensive and it's not good to call
* them every time the very last reference to the host is dropped.
* Thus we use "lazy" approach for hosts cleanup.
*
* The work of GC is to release stale vnodes on the server side
* and destroy hosts that haven't any locks and any activity for
* some time (i.e. idle hosts).
*/
static void
{
mutex_enter(&g->lock);
for (;;) {
/*
* GC thread can be explicitly scheduled from
* memory reclamation function.
*/
ddi_get_lbolt() + idle_period);
/*
* NLM is shutting down, time to die.
*/
if (g->run_status == NLM_ST_STOPPING)
break;
now = ddi_get_lbolt();
/*
* Find all obviously unused vholds and destroy them.
*/
/*
* If these conditions are met, the vhold is
* obviously unused and we will destroy it. In
* non-NULL the vhold might still be unused by
* the host, but it is expensive to check that.
* We defer such check until the host is idle.
* The expensive check is done below without
* the global lock held.
*/
}
}
}
/*
* Handle all hosts that are unused at the moment
* until we meet one with idle timeout in future.
*/
break;
/*
* Drop global lock while doing expensive work
* on this host. We'll re-check any conditions
* that might change after retaking the global
* lock.
*/
mutex_exit(&g->lock);
/*
* nlm_globals lock was dropped earlier because
* garbage collecting of vholds and checking whether
*/
mutex_enter(&g->lock);
/*
* While we were doing expensive operations
* outside of nlm_globals critical section,
* somebody could take the host and remove it
* from the idle list. Whether its been
* reinserted or not, our information about
* the host is outdated, and we should take no
* further action.
*/
continue;
/*
* If the host has locks we have to renew the
* host's timeout and put it at the end of LRU
* list.
*/
if (has_locks) {
continue;
}
/*
* We're here if all the following conditions hold:
* 1) Host hasn't any locks or share reservations
* 2) Host is unused
* 3) Host wasn't touched by anyone at least for
* g->cn_idle_tmo seconds.
*
* So, now we can destroy it.
*/
nlm_host_unregister(g, hostp);
mutex_exit(&g->lock);
nlm_host_unmonitor(g, hostp);
mutex_enter(&g->lock);
if (g->run_status == NLM_ST_STOPPING)
break;
}
}
/* Let others know that GC has died */
g->nlm_gc_thread = NULL;
mutex_exit(&g->lock);
cv_broadcast(&g->nlm_gc_finish_cv);
zthread_exit();
}
/*
* on the given server represented by hostp.
*/
static void
{
struct nlm_globals *g;
nlm_reclaim_client(g, hostp);
/*
* Host was explicitly referenced before
* nlm_reclaim() was called, release it
* here.
*/
nlm_host_release(g, hostp);
zthread_exit();
}
/*
* Copy a struct netobj. (see xdr.h)
*/
void
{
}
/*
* An NLM specificw replacement for clnt_call().
* nlm_clnt_call() is used by all RPC functions generated
* from nlm_prot.x specification. The function is aware
* about some pitfalls of NLM RPC procedures and has a logic
* that handles them properly.
*/
enum clnt_stat
{
/*
* If NLM RPC procnum is one of the NLM _RES procedures
* that are used to reply to asynchronous NLM RPC
* (MSG calls), explicitly set RPC timeout to zero.
* Client doesn't send a reply to RES procedures, so
* we don't need to wait anything.
*
* NOTE: we ignore NLM4_*_RES procnums because they are
* equal to NLM_*_RES numbers.
*/
/*
* We need to block signals in case of NLM_CANCEL RPC
* in order to prevent interruption of network RPC
* calls.
*/
if (procnum == NLM_CANCEL) {
sig_blocked = TRUE;
}
/*
* Restore signal mask back if signals were blocked
*/
if (sig_blocked)
return (stat);
}
/*
*
* During suspend operation we mark those hosts
* that have any locks with NLM_NH_SUSPEND flags,
* so that they can be checked later, when resume
* operation occurs.
*/
static void
{
/*
* Note that while we're doing suspend, GC thread is active
* and it can destroy some hosts while we're walking through
* the hosts tree. To prevent that and make suspend logic
* a bit more simple we put all hosts to local "all_hosts"
* list and increment reference counter of each host.
* This guaranties that no hosts will be released while
* we're doing suspend.
* NOTE: reference of each host must be dropped during
* resume operation.
*/
mutex_enter(&g->lock);
/*
* If host is idle, remove it from idle list and
* clear idle flag. That is done to prevent GC
* from touching this host.
*/
}
}
/*
* Now we can walk through all hosts on the system
* with zone globals lock released. The fact the
* we have taken a reference to each host guaranties
* that no hosts can be destroyed during that process.
*/
mutex_exit(&g->lock);
if (nlm_host_has_locks(hostp))
}
}
/*
* Resume NLM hosts for the given zone.
*
* nlm_resume_zone() is called after hosts were suspended
* (see nlm_suspend_zone) and its main purpose to check
* whether remote locks owned by hosts are still in consistent
* state. If they aren't, resume function tries to reclaim
* locks (for client side hosts) and clean locks (for
* server side hosts).
*/
static void
{
mutex_enter(&g->lock);
/*
* In nlm_suspend_zone() the reference counter of each
* host was incremented, so we can safely iterate through
* all hosts without worrying that any host we touch will
* be removed at the moment.
*/
int error;
mutex_exit(&g->lock);
/*
* Suspend operation marked that the host doesn't
* have any locks. Skip it.
*/
goto cycle_end;
if (error != 0) {
NLM_ERR("Resume: Failed to contact to NSM of host %s "
goto cycle_end;
}
if (stat != RPC_SUCCESS) {
NLM_ERR("Resume: Failed to call SM_STAT operation for "
nlm_nsm_fini(&nsm);
goto cycle_end;
}
/*
* Current SM state of the host isn't equal
* to the one host had when it was suspended.
* Probably it was rebooted. Try to reclaim
* locks if the host has any on its client side.
* Also try to clean up its server side locks
* (if the host has any).
*/
}
nlm_nsm_fini(&nsm);
if (resume_failed) {
/*
* Resume failed for the given host.
* Just clean up all resources it owns.
*/
}
nlm_host_release(g, hostp);
mutex_enter(&g->lock);
}
mutex_exit(&g->lock);
}
/*
* NLM functions responsible for operations on NSM handle.
*/
/*
* Initialize knetconfig that is used for communication
* with local statd via loopback interface.
*/
static int
{
int error;
if (error != 0)
return (error);
return (0);
}
/*
* Initialize NSM handle that will be used to talk
* to local statd via loopback interface.
*/
static int
{
int error;
if (error != 0)
return (error);
}
/*
* Initialize NSM handle used for talking to statd
*/
static int
{
/*
* Try several times to get the port of statd service,
* If rpcbind_getaddr returns RPC_PROGNOTREGISTERED,
* retry an attempt, but wait for NLM_NSM_RPCBIND_TIMEOUT
* seconds berofore.
*/
if (stat != RPC_SUCCESS) {
if (stat == RPC_PROGNOTREGISTERED) {
continue;
}
}
break;
}
if (stat != RPC_SUCCESS) {
int, retries);
goto error;
}
/*
* Create an RPC handle that'll be used for communication with local
* statd using the status monitor protocol.
*/
if (error != 0)
goto error;
/*
* Create an RPC handle that'll be used for communication with the
* local statd using the address registration protocol.
*/
if (error != 0)
goto error;
return (0);
return (error);
}
static void
{
}
static enum clnt_stat
{
return (stat);
}
static enum clnt_stat
{
if (stat == RPC_SUCCESS)
return (stat);
}
static enum clnt_stat
{
return (stat);
}
static enum clnt_stat
{
return (stat);
}
static enum clnt_stat
{
return (stat);
}
/*
* Get NLM vhold object corresponding to vnode "vp".
* If no such object was found, create a new one.
*
* The purpose of this function is to associate vhold
* object with given vnode, so that:
* 1) vnode is hold (VN_HOLD) while vhold object is alive.
* 2) host has a track of all vnodes it touched by lock
* or share operations. These vnodes are accessible
* via collection of vhold objects.
*/
struct nlm_vhold *
{
goto out;
/* nlm_vhold wasn't found, then create a new one */
/*
* Check if another thread has already
* created the same nlm_vhold.
*/
}
out:
return (nvp);
}
/*
* Drop a reference to vhold object nvp.
*/
void
{
return;
/*
* If these conditions are met, the vhold is obviously unused and we
* non-NULL the vhold might still be unused by the host, but it is
* expensive to check that. We defer such check until the host is
* idle. The expensive check is done in the NLM garbage collector.
*/
}
}
/*
* Clean all locks and share reservations on the
* given vhold object that were acquired by the
* given sysid
*/
static void
{
}
static void
{
(mod_hash_val_t)&nvp) == 0);
}
/*
* Return TRUE if the given vhold is busy.
* Vhold object is considered to be "busy" when
* all the following conditions hold:
* 1) No one uses it at the moment;
* 2) It hasn't any locks;
* 3) It hasn't any share reservations;
*/
static bool_t
{
int sysid;
return (TRUE);
return (TRUE);
return (FALSE);
}
/* ARGSUSED */
static int
{
return (0);
}
/* ARGSUSED */
static void
{
}
struct nlm_vhold *
{
(mod_hash_val_t)&nvp);
return (nvp);
}
/*
* NLM host functions
*/
static void
{
}
/* ARGSUSED */
static int
{
return (0);
}
/* ARGSUSED */
static void
{
}
static void
{
(mod_hash_val_t)&hostp) == 0);
}
/*
* Free resources used by a host. This is called after the reference
* count has reached zero so it doesn't need to worry about locks.
*/
static void
{
}
/*
* Cleanup SERVER-side state after a client restarts,
* or becomes unresponsive, or whatever.
*
* We unlock any active locks owned by the host.
* When rpc.lockd is shutting down,
* this function is called with newstate set to zero
* which allows us to cancel any pending async locks
* and clear the locking state.
*
* When "state" is 0, we don't update host's state,
* but cleanup all remote locks on the host.
* It's useful to call this function for resources
* cleanup.
*/
void
{
if (state != 0)
/* cleanup sleeping requests at first */
/*
* Instead of freeing cancelled sleeping request
* here, we add it to the linked list created
* on the stack in order to do all frees outside
* the critical section.
*/
}
}
}
}
/*
* Cleanup CLIENT-side state after a server restarts,
* or becomes unresponsive, or whatever.
*
* This is called by the local NFS statd when we receive a
* host state change notification. (also nlm_svc_stopping)
*
* Deal with a server restart. If we are stopping the
* NLM service, we'll have newstate == 0, and will just
* cancel all our client-side lock requests. Otherwise,
* start the "recovery" process to reclaim any locks
* we hold on this server.
*/
void
{
/*
* Either host's state is up to date or
* host is already in recovery.
*/
return;
}
/*
* Host will be released by the recovery thread,
* thus we need to increment refcount.
*/
hostp, 0, minclsyspri);
}
/*
* The function is called when NLM client detects that
* server has entered in grace period and client needs
* to wait until reclamation process (if any) does
* its job.
*/
int
{
struct nlm_globals *g;
int error = 0;
do {
int rc;
SEC_TO_TICK(g->retrans_tmo));
if (rc == 0) {
break;
}
return (error);
}
/*
* Create a new NLM host.
*
* which needs both a knetconfig and an address when creating
* endpoints. Thus host object stores both knetconfig and
* netid.
*/
static struct nlm_host *
{
return (host);
}
/*
* Cancel all client side sleeping locks owned by given host.
*/
void
{
mutex_enter(&g->lock);
}
}
mutex_exit(&g->lock);
}
/*
* Garbage collect stale vhold objects.
*
* In other words check whether vnodes that are
* held by vhold objects still have any locks
* or shares or still in use. If they aren't,
* just destroy them.
*/
static void
{
continue;
}
}
}
/*
* Check whether the given host has any
* server side locks or share reservations.
*/
static bool_t
{
/*
* It's cheap and simple: if server has
* object storing the affected vnode.
*
* NOTE: We don't need to check sleeping
* locks on the server side, because if
* server side sleeping lock is alive,
* there must be a vhold object corresponding
* to target vnode.
*/
return (TRUE);
return (FALSE);
}
/*
* Check whether the given host has any client side
* locks or share reservations.
*/
static bool_t
{
/*
* XXX: It's not the way I'd like to do the check,
* because flk_sysid_has_locks() can be very
* expensive by design. Unfortunatelly it iterates
* through all locks on the system, doesn't matter
* were they made on remote system via NLM or
* on local system via reclock. To understand the
* problem, consider that there're dozens of thousands
* of locks that are made on some ZFS dataset. And there's
* another dataset shared by NFS where NLM client had locks
* some time ago, but doesn't have them now.
* In this case flk_sysid_has_locks() will iterate
* thrught dozens of thousands locks until it returns us
* FALSE.
* Oh, I hope that in shiny future somebody will make
* it'd be more friedly to remote locks and
* flk_sysid_has_locks() wouldn't be so expensive.
*/
return (TRUE);
/*
* Check whether host has any share reservations
* registered on the client side.
*/
return (TRUE);
return (FALSE);
}
/*
* Determine whether the given host owns any
* locks or share reservations.
*/
static bool_t
{
if (nlm_host_has_srv_locks(hostp))
return (TRUE);
return (nlm_host_has_cli_locks(hostp));
}
/*
* This function compares only addresses of two netbufs
* that belong to NC_TCP[6] or NC_UDP[6] protofamily.
* Port part of netbuf is ignored.
*
* Return values:
* -1: nb1's address is "smaller" than nb2's
* 0: addresses are equal
* 1: nb1's address is "greater" than nb2's
*/
static int
{
union nlm_addr {
int res;
/* LINTED E_BAD_PTR_CAST_ALIGN */
/* LINTED E_BAD_PTR_CAST_ALIGN */
return (-1);
return (1);
case AF_INET:
break;
case AF_INET6:
break;
default:
VERIFY(0);
return (0);
}
}
/*
* Compare two nlm hosts.
* Return values:
* -1: host1 is "smaller" than host2
* 0: host1 is equal to host2
* 1: host1 is "greater" than host2
*/
int
{
int res;
if (res != 0)
return (res);
}
/*
* Find the host specified by... (see below)
* If found, increment the ref count.
*/
static struct nlm_host *
{
/*
* Host is inuse now. Remove it from idle
* hosts list if needed.
*/
}
}
return (hostp);
}
/*
* Find NLM host for the given name and address.
*/
struct nlm_host *
{
mutex_enter(&g->lock);
if (g->run_status != NLM_ST_UP)
goto out;
out:
mutex_exit(&g->lock);
return (hostp);
}
/*
* Find or create an NLM host for the given name and address.
*
* The remote host is determined by all of: name, netid, address.
* Note that the netid is whatever nlm_svc_add_ep() gave to
* svc_tli_kcreate() for the service binding. If any of these
* are different, allocate a new host (new sysid).
*/
struct nlm_host *
{
int err;
mutex_enter(&g->lock);
if (g->run_status != NLM_ST_UP) {
mutex_exit(&g->lock);
return (NULL);
}
mutex_exit(&g->lock);
return (host);
if (err != 0)
return (NULL);
/*
* Do allocations (etc.) outside of mutex,
* and then check again before inserting.
*/
goto out;
mutex_enter(&g->lock);
/*
* Insert host to the hosts AVL tree that is
* used to lookup by <netid, address> pair.
*/
/*
* Insert host to the hosts hash table that is
* used to lookup host by sysid.
*/
(mod_hash_val_t)host) == 0);
}
mutex_exit(&g->lock);
out:
/*
* We do not need the preallocated nlm_host
* so decrement the reference counter
* and destroy it.
*/
}
return (host);
}
/*
* Find the NLM host that matches the value of 'sysid'.
* If found, return it with a new ref,
* else return NULL.
*/
struct nlm_host *
{
mutex_enter(&g->lock);
if (g->run_status != NLM_ST_UP)
goto out;
(void) mod_hash_find(g->nlm_hosts_hash,
(mod_hash_val_t)&hostp);
goto out;
/*
* Host is inuse now. Remove it
* from idle hosts list if needed.
*/
}
out:
mutex_exit(&g->lock);
return (hostp);
}
/*
* Release the given host.
* I.e. drop a reference that was taken earlier by one of
* the following functions: nlm_host_findcreate(), nlm_host_find(),
* nlm_host_find_by_sysid().
*
* When the very last reference is dropped, host is moved to
* so-called "idle state". All hosts that are in idle state
* have an idle timeout. If timeout is expired, GC thread
* checks whether hosts have any locks and if they heven't
* any, it removes them.
* NOTE: only unused hosts can be in idle state.
*/
static void
{
return;
return;
/*
* The very last reference to the host was dropped,
* thus host is unused now. Set its idle timeout
* and move it to the idle hosts LRU list.
*/
SEC_TO_TICK(g->cn_idle_tmo);
}
void
{
return;
mutex_enter(&g->lock);
mutex_exit(&g->lock);
}
/*
* Unregister this NLM host (NFS client) with the local statd
* due to idleness (no locks held for a while).
*/
void
{
return;
if (stat != RPC_SUCCESS) {
return;
}
}
/*
* Ask the local NFS statd to begin monitoring this host.
* It will call us back when that host restarts, using the
* prog,vers,proc specified below, i.e. NLM_SM_NOTIFY1,
* which is handled in nlm_do_notify1().
*/
void
{
int family;
/*
* This is the first time we have seen an NSM state
* Value for this host. We record it here to help
* detect host reboots.
*/
}
return;
}
/*
* Before we begin monitoring the host register the network address
* associated with this hostname.
*/
if (stat != RPC_SUCCESS) {
mutex_enter(&g->lock);
mutex_exit(&g->lock);
return;
}
/*
* Tell statd how to call us with status updates for
* this host. Updates arrive via nlm_do_notify1().
*
* We put our assigned system ID value in the priv field to
* make it simpler to find the host if we are notified of a
* host restart.
*/
if (stat != RPC_SUCCESS) {
mutex_enter(&g->lock);
mutex_exit(&g->lock);
return;
}
}
int
{
}
/*
*/
/*
* Register client side sleeping lock.
*
* Our client code calls this to keep information
* about sleeping lock somewhere. When it receives
* grant callback from server or when it just
* needs to remove all sleeping locks from vnode,
* properly.
*/
struct nlm_slock *
struct nlm_globals *g,
{
mutex_enter(&g->lock);
mutex_exit(&g->lock);
return (nslp);
}
/*
* Remove this lock from the wait list and destroy it.
*/
void
{
mutex_enter(&g->lock);
mutex_exit(&g->lock);
}
/*
* Wait for a granted callback or cancellation event
* for a sleeping lock.
*
* If a signal interrupted the wait or if the lock
* was cancelled, return EINTR - the caller must arrange to send
* a cancellation to the server.
*
* If timeout occurred, return ETIMEDOUT - the caller must
* resend the lock request to the server.
*
* On success return 0.
*/
int
{
/*
* If the granted message arrived before we got here,
* nslp->nsl_state will be NLM_SL_GRANTED - in that case don't sleep.
*/
cv_res = 1;
mutex_enter(&g->lock);
&g->lock, timeo_ticks);
}
/*
* No matter why we wake up, if the lock was
* cancelled, let the function caller to know
* about it by returning EINTR.
*/
goto out;
}
if (cv_res <= 0) {
/* We were woken up either by timeout or by interrupt */
/*
* The granted message may arrive after the
* mutex. Detect this by examining nslp.
*/
error = 0;
} else { /* Awaken via cv_signal()/cv_broadcast() or didn't block */
error = 0;
}
out:
mutex_exit(&g->lock);
return (error);
}
/*
* Mark client side sleeping lock as granted
* and wake up a process blocked on the lock.
* Called from server side NLM_GRANT handler.
*
* If sleeping lock is found return 0, otherwise
* return ENOENT.
*/
int
{
mutex_enter(&g->lock);
continue;
error = 0;
break;
}
}
mutex_exit(&g->lock);
return (error);
}
/*
* Register sleeping lock request corresponding to
* flp on the given vhold object.
* On success function returns 0, otherwise (if
* lock request with the same flp is already
* registered) function returns EEXIST.
*/
int
{
goto out;
ret = 0;
}
out:
return (ret);
}
/*
* Unregister sleeping lock request corresponding
* to flp from the given vhold object.
* On success function returns 0, otherwise (if
* lock request corresponding to flp isn't found
* on the given vhold) function returns ENOENT.
*/
int
{
return (ENOENT);
}
return (0);
}
/*
* Find sleeping lock request on the given vhold object by flp.
*/
struct nlm_slreq *
{
break;
}
return (slr);
}
/*
* NLM tracks active share reservations made on the client side.
* It needs to have a track of share reservations for two purposes
* share reservations, it is)
* 2) to recover active share reservations when NLM server reports
* that it has rebooted.
*
* doesn't have an ability to lookup all reservations on the system
* by sysid (like local lock manager) or get all reservations by sysid.
* on particular vnode. It's not what NLM needs. Thus it has that ugly
* share reservations tracking scheme.
*/
void
{
/*
* NFS code must fill the s_owner, so that
* s_own_len is never 0.
*/
break;
/*
* Found a duplicate. Do nothing.
*/
goto out;
}
out:
}
void
{
else
continue;
}
}
}
/*
* Get a _copy_ of the list of all active share reservations
* made by the given host.
* NOTE: the list function returns _must_ be released using
* nlm_free_shrlist().
*/
struct nlm_shres *
{
}
return (nslist);
}
/*
* Free memory allocated for the active share reservations
* list created by nlm_get_active_shres() function.
*/
void
{
}
}
static bool_t
{
return (TRUE);
return (FALSE);
}
static struct nlm_shres *
{
return (nsp);
}
static void
{
}
/*
* Called by klmmod.c when lockd adds a network endpoint
* on which we should begin RPC services.
*/
int
{
int error;
if (error != 0)
return (error);
(void) nlm_knc_to_netid(knc);
return (0);
}
/*
* Start NLM service.
*/
int
{
int error;
if (error != 0) {
NLM_ERR("Failed to initialize NSM handler "
"(error=%d)\n", error);
g->run_status = NLM_ST_DOWN;
return (error);
}
/*
* Create an NLM garbage collector thread that will
* clean up stale vholds and hosts objects.
*/
g, 0, minclsyspri);
/*
* Send SIMU_CRASH to local statd to report that
* NLM started, so that statd can report other hosts
* about NLM state change.
*/
if (stat != RPC_SUCCESS) {
NLM_ERR("Failed to connect to local statd "
"(rpcerr=%d)\n", stat);
goto shutdown_lm;
}
if (stat != RPC_SUCCESS) {
NLM_ERR("Failed to get the status of local statd "
"(rpcerr=%d)\n", stat);
goto shutdown_lm;
}
g->grace_threshold = ddi_get_lbolt() +
SEC_TO_TICK(g->grace_period);
/* Register endpoint used for communications with local NLM */
if (error != 0)
goto shutdown_lm;
(void) svc_pool_control(NLM_SVCPOOL_ID,
SVCPSET_SHUTDOWN_PROC, (void *)nlm_pool_shutdown);
g->run_status = NLM_ST_UP;
return (0);
mutex_enter(&g->lock);
g->run_status = NLM_ST_STOPPING;
mutex_exit(&g->lock);
nlm_svc_stopping(g);
return (error);
}
/*
* Called when the server pool is destroyed, so that
* all transports are closed and no any server threads
* exist.
*
* Just call lm_shutdown() to shut NLM down properly.
*/
static void
nlm_pool_shutdown(void)
{
(void) lm_shutdown();
}
/*
* Stop NLM service, cleanup all resources
* NLM owns at the moment.
*
* NOTE: NFS code can call NLM while it's
* stopping or even if it's shut down. Any attempt
* to lock file either on client or on the server
* will fail if NLM isn't in NLM_ST_UP state.
*/
void
{
mutex_enter(&g->lock);
/*
* Ask NLM GC thread to exit and wait until it dies.
*/
cv_signal(&g->nlm_gc_sched_cv);
while (g->nlm_gc_thread != NULL)
mutex_exit(&g->lock);
/*
* Cleanup locks owned by NLM hosts.
* NOTE: New hosts won't be created while
* NLM is stopping.
*/
while (!avl_is_empty(&g->nlm_hosts_tree)) {
int busy_hosts = 0;
/*
* Iterate through all NLM hosts in the system
* and drop the locks they own by force.
*/
/* Cleanup all client and server side locks */
/*
* Oh, it seems the host is still busy, let
* it some time to release and go to the
* next one.
*/
busy_hosts++;
continue;
}
}
/*
* All hosts go to nlm_idle_hosts list after
* all locks they own are cleaned up and last refereces
* were dropped. Just destroy all hosts in nlm_idle_hosts
* list, they can not be removed from there while we're
* in stopping state.
*/
nlm_host_unregister(g, hostp);
}
if (busy_hosts > 0) {
/*
* There're some hosts that weren't cleaned
* up. Probably they're in resource cleanup
* process. Give them some time to do drop
* references.
*/
}
}
nlm_nsm_fini(&g->nlm_nsm);
g->lockd_pid = 0;
g->run_status = NLM_ST_DOWN;
}
/*
* Returns TRUE if the given vnode has
* any active or sleeping locks.
*/
int
{
struct nlm_globals *g;
int active = 0;
/*
* Server side NLM has locks on the given vnode
* if there exist a vhold object that holds
* the given vnode "vp" in one of NLM hosts.
*/
mutex_enter(&g->lock);
active = 1;
break;
}
}
mutex_exit(&g->lock);
return (active);
}
/*
* Called right before NFS export is going to
* dissapear. The function finds all vnodes
* belonging to the given export and cleans
* all remote locks and share reservations
* on them.
*/
void
{
struct nlm_globals *g;
mutex_enter(&g->lock);
}
mutex_exit(&g->lock);
goto next_iter;
/*
* Ok, it we found out that vnode vp is under
* control by the exportinfo exi, now we need
* to drop all locks from this vnode, let's
* do it.
*/
}
mutex_enter(&g->lock);
}
mutex_exit(&g->lock);
}
/*
* Allocate new unique sysid.
* In case of failure (no available sysids)
* return LM_NOSYSID.
*/
nlm_sysid_alloc(void)
{
if (nlm_sysid_nidx > LM_SYSID_MAX)
ret_sysid = nlm_sysid_nidx++;
} else {
if (id > 0) {
}
}
return (ret_sysid);
}
void
{
}
/*
* Return true if the request came from a local caller.
* By necessity, this "knows" the netid names invented
* in lm_svc() and nlm_netid_from_knetconfig().
*/
{
char *netid;
return (FALSE);
return (TRUE);
return (TRUE);
}
return (TRUE);
}
return (FALSE); /* unknown transport */
}
/*
* Get netid string correspondig to the given knetconfig.
* If not done already, save knc->knc_rdev in our table.
*/
const char *
{
int i;
for (i = 0; i < NLM_KNCS; i++) {
nc = &nlm_netconfigs[i];
knc->knc_protofmly) == 0) {
break;
}
}
}
return (netid);
}
/*
* Get a knetconfig corresponding to the given netid.
* If there's no knetconfig for this netid, ENOENT
* is returned.
*/
int
{
int i, ret;
for (i = 0; i < NLM_KNCS; i++) {
nknc = &nlm_netconfigs[i];
ret = 0;
break;
}
}
return (ret);
}
void
nlm_cprsuspend(void)
{
struct nlm_globals *g;
nlm_suspend_zone(g);
}
void
nlm_cprresume(void)
{
struct nlm_globals *g;
nlm_resume_zone(g);
}
static void
{
}
static void
{
/* LINTED pointer alignment */
case AF_INET: {
/* LINTED pointer alignment */
break;
}
case AF_INET6: {
/* LINTED pointer alignment */
break;
}
default:
VERIFY(0);
break;
}
}