flock.c revision 90221f9148b67fdc90178b67f9600b7bd4e3bc7c
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2007 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
/* All Rights Reserved */
/*
* Copyright 2011 Nexenta Systems, Inc. All rights reserved.
* Copyright 2015 Joyent, Inc.
*/
#include <sys/flock_impl.h>
/*
* The following four variables are for statistics purposes and they are
* not protected by locks. They may not be accurate but will at least be
* close to the actual value.
*/
int flk_lock_allocs;
int flk_lock_frees;
int edge_allocs;
int edge_frees;
int flk_proc_edge_allocs;
int flk_proc_edge_frees;
static kmutex_t flock_lock;
#ifdef DEBUG
int check_debug = 0;
if (check_debug) \
{ \
} \
}
#else
#define CHECK_ACTIVE_LOCKS(gp)
#define CHECK_SLEEPING_LOCKS(gp)
#endif /* DEBUG */
struct kmem_cache *flk_edge_cache;
/*
* Clustering.
*
* NLM REGISTRY TYPE IMPLEMENTATION
*
* Assumptions:
* 1. Nodes in a cluster are numbered starting at 1; always non-negative
* integers; maximum node id is returned by clconf_maximum_nodeid().
* 2. We use this node id to identify the node an NLM server runs on.
*/
/*
* NLM registry object keeps track of NLM servers via their
* nlmids (which are the node ids of the node in the cluster they run on)
* that have requested locks at this LLM with which this registry is
* associated.
*
* Representation of abstraction:
* rep = record[ states: array[nlm_state],
* lock: mutex]
*
* Representation invariants:
* 1. index i of rep.states is between 0 and n - 1 where n is number
* of elements in the array, which happen to be the maximum number
* of nodes in the cluster configuration + 1.
* 2. map nlmid to index i of rep.states
* 0 -> 0
* 1 -> 1
* 2 -> 2
* n-1 -> clconf_maximum_nodeid()+1
* 3. This 1-1 mapping is quite convenient and it avoids errors resulting
* from forgetting to subtract 1 from the index.
* 4. The reason we keep the 0th index is the following. A legitimate
* cluster configuration includes making a UFS file system NFS
* exportable. The code is structured so that if you're in a cluster
* you do one thing; otherwise, you do something else. The problem
* is what to do if you think you're in a cluster with PXFS loaded,
* but you're using UFS not PXFS? The upper two bytes of the sysid
* encode the node id of the node where NLM server runs; these bytes
* are zero for UFS. Since the nodeid is used to index into the
* registry, we can record the NLM server state information at index
* 0 using the same mechanism used for PXFS file locks!
*/
/*
* Although we need a global lock dependency graph (and associated data
* structures), we also need a per-zone notion of whether the lock manager is
* running, and so whether to allow lock manager requests or not.
*
* Thus, on a per-zone basis we maintain a ``global'' variable
* (flk_lockmgr_status), protected by flock_lock, and set when the lock
* manager is determined to be changing state (starting or stopping).
*
* the graph's mutex.
*
* The per-graph copies are used to synchronize lock requests with shutdown
* requests. The global copy is used to initialize the per-graph field when a
* new graph is created.
*/
struct flock_globals {
};
static lock_descriptor_t *flk_get_lock(void);
static int flk_process_request(lock_descriptor_t *);
static edge_t *flk_get_edge(void);
static int flk_wait_execute_request(lock_descriptor_t *);
static void flk_insert_active_lock(lock_descriptor_t *);
static void flk_delete_active_lock(lock_descriptor_t *, int);
static void flk_insert_sleeping_lock(lock_descriptor_t *);
static void flk_graph_uncolor(graph_t *);
static void flk_wakeup(lock_descriptor_t *, int);
static void flk_free_edge(edge_t *);
static void flk_recompute_dependencies(lock_descriptor_t *,
lock_descriptor_t **, int, int);
static int flk_find_barriers(lock_descriptor_t *);
static void flk_update_barriers(lock_descriptor_t *);
static int flk_color_reachables(lock_descriptor_t *);
static int flk_canceled(lock_descriptor_t *);
static void flk_delete_locks_by_sysid(lock_descriptor_t *);
static void wait_for_lock(lock_descriptor_t *);
static void unlock_lockmgr_granted(struct flock_globals *);
static void wakeup_sleeping_lockmgr_locks(struct flock_globals *);
/* Clustering hooks */
static void cl_flk_change_nlm_state_all_locks(int, flk_nlm_status_t);
static void cl_flk_wakeup_sleeping_nlm_locks(int);
static void cl_flk_unlock_nlm_granted(int);
#ifdef DEBUG
static int check_lock_transition(int, int);
static void check_sleeping_locks(graph_t *);
static void check_active_locks(graph_t *);
#endif
/* proc_graph function definitions */
static int flk_check_deadlock(lock_descriptor_t *);
static void flk_proc_graph_uncolor(void);
static proc_edge_t *flk_get_proc_edge(void);
static void flk_proc_release(proc_vertex_t *);
static void flk_free_proc_edge(proc_edge_t *);
static void flk_update_proc_graph(edge_t *, int);
/* Non-blocking mandatory locking */
static struct flock_globals *
flk_get_globals(void)
{
/*
* The KLM module had better be loaded if we're attempting to handle
* lockmgr requests.
*/
}
static flk_lockmgr_status_t
flk_get_lockmgr_status(void)
{
struct flock_globals *fg;
if (flock_zone_key == ZONE_KEY_UNINITIALIZED) {
/*
* KLM module not loaded; lock manager definitely not running.
*/
return (FLK_LOCKMGR_DOWN);
}
fg = flk_get_globals();
return (fg->flk_lockmgr_status);
}
/*
* This implements Open File Description (not descriptor) style record locking.
* These locks can also be thought of as pid-less since they are not tied to a
* specific process, thus they're preserved across fork.
*
* Called directly from fcntl.
*
* See reclock() for the implementation of the traditional POSIX style record
* locking scheme (pid-ful). This function is derived from reclock() but
* simplified and modified to work for OFD style locking.
*
* The two primary advantages of OFD style of locking are:
* 1) It is per-file description, so closing a file descriptor that refers to a
* different file description for the same file will not drop the lock (i.e.
* two open's of the same file get different descriptions but a dup or fork
* will refer to the same description).
* 2) Locks are preserved across fork(2).
*
* Because these locks are per-description a lock ptr lives at the f_filocks
* member of the file_t and the lock_descriptor includes a file_t pointer
* to enable unique lock identification and management.
*
* Since these locks are pid-less we cannot do deadlock detection with the
* current process-oriented implementation. This is consistent with OFD locking
* behavior on other operating systems such as Linux. Since we don't do
* deadlock detection we never interact with the process graph that is
* maintained for deadlock detection on the traditional POSIX-style locks.
*
* Future Work:
*
* The current implementation does not support record locks. That is,
* currently the single lock must cover the entire file. This is validated in
* fcntl. To support record locks the f_filock pointer in the file_t needs to
* be changed to a list of pointers to the locks. That list needs to be
* managed independently of the lock list on the vnode itself and it needs to
* be maintained as record locks are created, split, coalesced and deleted.
*
* The current implementation does not support remote file systems (e.g.
* NFS or CIFS). This is handled in fs_frlock(). The design of how OFD locks
* interact with the NLM is not clear since the NLM protocol/implementation
* appears to be oriented around locks associated with a process. A further
* problem is that a design is needed for what nlm_send_siglost() should do and
* where it will send SIGLOST. More recent versions of Linux apparently try to
* emulate OFD locks on NFS by converting them to traditional POSIX style locks
* that work with the NLM. It is not clear that this provides the correct
* semantics in all cases.
*/
int
{
int cmd = 0;
int error = 0;
int serialize = 0;
if (fcmd != F_OFD_GETLK)
/* see block comment */
/*
* For reclock fs_frlock() would normally have set these in a few
* places but for us it's cleaner to centralize it here. Note that
* IGN_PID is -1. We use 0 for our pid-less locks.
*/
/*
* Check access permissions
*/
return (EBADF);
/*
* for query and unlock we use the stack_lock_request
*/
sizeof (lock_descriptor_t));
/*
* following is added to make the assertions in
* flk_execute_request() pass
*/
} else {
lock_request = flk_get_lock();
}
lock_request->l_state = 0;
/*
* Convert the request range into the canonical start and end
* values then check the validity of the lock range.
*/
if (error)
goto done;
MAXEND);
if (error)
goto done;
}
/*
* We are ready for processing the request
*/
nbl_need_check(vp)) {
serialize = 1;
}
/* Get the lock graph for a particular vnode */
switch (lock_request->l_type) {
case F_RDLCK:
case F_WRLCK:
if (IS_QUERY_LOCK(lock_request)) {
} else {
/* process the request now */
}
break;
case F_UNLCK:
/* unlock request will not block so execute it immediately */
break;
default:
break;
}
if (lock_request == &stack_lock_request) {
} else {
}
}
if (serialize)
return (error);
done:
if (lock_request != &stack_lock_request)
return (error);
}
/*
* Remove any lock on the vnode belonging to the given file_t.
* Called from closef on last close, file_t is locked.
*
* This is modeled on the cleanlocks() function but only removes the single
* lock associated with fp.
*/
void
{
return;
return;
if (lock) {
do {
break;
}
}
if (lock) {
do {
break;
}
}
}
/*
*
* This implements traditional POSIX style record locking. The two primary
* drawbacks to this style of locking are:
* 1) It is per-process, so any close of a file descriptor that refers to the
* file descriptor the application loses its lock and does not know).
* 2) Locks are not preserved across fork(2).
*
* Because these locks are only associated with a PID, they are per-process.
* This is why any close will drop the lock and is also why, once the process
* forks, the lock is no longer related to the new process. These locks can
* be considered as PID-ful.
*
* See ofdlock() for the implementation of a similar but improved locking
* scheme.
*/
int
int cmd,
int flag,
{
int error = 0;
int nlmid;
/*
* Check access permissions
*/
return (EBADF);
/*
* for query and unlock we use the stack_lock_request
*/
sizeof (lock_descriptor_t));
/*
* following is added to make the assertions in
* flk_execute_request() to pass through
*/
} else {
lock_request = flk_get_lock();
}
lock_request->l_state = 0;
/*
* Convert the request range into the canonical start and end
* values. The NLM protocol supports locking over the entire
* 32-bit range, so there's no range checking for remote requests,
* but we still need to verify that local requests obey the rules.
*/
/* Clustering */
} else {
/* check the validity of the lock range */
offset);
if (error) {
goto done;
}
if (error) {
goto done;
}
}
/*
* Clustering: set flag for PXFS locks
* We do not _only_ check for the PCMDLCK flag because PXFS locks could
* also be of type 'RCMDLCK'.
* We do not _only_ check the GETPXFSID() macro because local PXFS
* clients use a pxfsid of zero to permit deadlock detection in the LLM.
*/
}
}
/*
* We are ready for processing the request
*/
if (IS_LOCKMGR(lock_request)) {
/*
* If the lock request is an NLM server request ....
*/
if (nlm_status_size == 0) { /* not booted as cluster */
/*
* Bail out if this is a lock manager request and the
* lock manager is not supposed to be running.
*/
if (flk_get_lockmgr_status() != FLK_LOCKMGR_UP) {
goto done;
}
} else { /* booted as a cluster */
/*
* If the NLM registry does not know about this
* NLM server making the request, add its nlmid
* to the registry.
*/
nlmid)) {
} else if (!FLK_REGISTRY_IS_NLM_UP(nlm_reg_status,
nlmid)) {
/*
* If the NLM server is already known (has made
* previous lock requests) and its state is
* not NLM_UP (means that NLM server is
* shutting down), then bail out with an
* error to deny the lock request.
*/
goto done;
}
}
}
/* Now get the lock graph for a particular vnode */
/*
* We drop rwlock here otherwise this might end up causing a
* deadlock if this IOLOCK sleeps. (bugid # 1183392).
*/
if (IS_IO_LOCK(lock_request)) {
}
switch (lock_request->l_type) {
case F_RDLCK:
case F_WRLCK:
if (IS_QUERY_LOCK(lock_request)) {
break;
}
/* process the request now */
break;
case F_UNLCK:
/* unlock request will not block so execute it immediately */
if (IS_LOCKMGR(lock_request) &&
error = 0;
} else {
}
break;
case F_UNLKSYS:
/*
* Recovery mechanism to release lock manager locks when
* NFS client crashes and restart. NFS server will clear
* old locks and grant new locks.
*/
return (EINVAL);
}
if (secpolicy_nfs(CRED()) != 0) {
return (EPERM);
}
return (0);
default:
break;
}
/* Clustering: For blocked PXFS locks, return */
if (error == PXFS_LOCK_BLOCKED) {
return (error);
}
/*
* Now that we have seen the status of locks in the system for
* this vnode we acquire the rwlock if it is an IO_LOCK.
*/
if (IS_IO_LOCK(lock_request)) {
(void) VOP_RWLOCK(vp,
if (!error) {
/*
* This wake up is needed otherwise
* if IO_LOCK has slept the dependents on this
* will not be woken up at all. (bugid # 1185482).
*/
}
/*
* else if error had occurred either flk_process_request()
* has returned EDEADLK in which case there will be no
* dependents for this lock or EINTR from flk_wait_execute_
* request() in which case flk_cancel_sleeping_lock()
* would have been done. same is true with EBADF.
*/
}
if (lock_request == &stack_lock_request) {
} else {
}
}
return (error);
done:
if (lock_request != &stack_lock_request)
return (error);
}
/*
* Invoke the callbacks in the given list. If before sleeping, invoke in
* list order. If after sleeping, invoke in reverse order.
*
* callb_cpr_t, return it. This will be used to make the thread CPR-safe
* while it is sleeping. There should be at most one callb_cpr_t for the
* thread.
* XXX This is unnecessarily complicated. The CPR information should just
* get passed in directly through VOP_FRLOCK and reclock, rather than
* sneaking it in via a callback.
*/
{
return (NULL);
if (when == FLK_BEFORE_SLEEP) {
do {
if (one_result != NULL) {
}
} else {
do {
if (one_result != NULL) {
}
}
return (cpr_callbackp);
}
/*
* Initialize a flk_callback_t to hold the given callback.
*/
void
{
}
/*
* Initialize an flk_callback_t and then link it into the head of an
* existing list (which may be NULL).
*/
void
{
return;
}
/*
* Initialize the flk_edge_cache data structure and create the
* nlm_reg_status array.
*/
void
flk_init(void)
{
uint_t i;
if (flk_edge_cache == NULL) {
}
/*
* Create the NLM registry object.
*/
if (cluster_bootflags & CLUSTER_BOOTED) {
/*
* This routine tells you the maximum node id that will be used
* in the cluster. This number will be the size of the nlm
* registry status array. We add 1 because we will be using
* all entries indexed from 0 to maxnodeid; e.g., from 0
* to 64, for a total of 65 entries.
*/
} else {
nlm_status_size = 0;
}
if (nlm_status_size != 0) { /* booted as a cluster */
KM_SLEEP);
/* initialize all NLM states in array to NLM_UNKNOWN */
for (i = 0; i < nlm_status_size; i++) {
}
}
}
/*
* Zone constructor/destructor callbacks to be executed when a zone is
*/
/* ARGSUSED */
void *
{
struct flock_globals *fg;
uint_t i;
for (i = 0; i < HASH_SIZE; i++)
return (fg);
}
/* ARGSUSED */
void
{
}
/*
* Get a lock_descriptor structure with initialization of edge lists.
*/
static lock_descriptor_t *
flk_get_lock(void)
{
l->pvertex = -1;
l->l_status = FLK_INITIAL_STATE;
return (l);
}
/*
* Free a lock_descriptor structure. Just sets the DELETED_LOCK flag
* when some thread has a reference to it as in reclock().
*/
void
{
if (IS_REFERENCED(lock)) {
return;
}
}
void
{
/*
* Locks in the sleeping list may be woken up in a number of ways,
* and more than once. If a sleeping lock is signaled awake more
* than once, then it may or may not change state depending on its
* current state.
* Also note that NLM locks that are sleeping could be moved to an
* interrupted state more than once if the unlock request is
* retransmitted by the NLM client - the second time around, this is
* just a nop.
* The ordering of being signaled awake is:
* INTERRUPTED_STATE > CANCELLED_STATE > GRANTED_STATE.
* The checks below implement this ordering.
*/
if (IS_INTERRUPTED(lock)) {
if ((new_state == FLK_CANCELLED_STATE) ||
(new_state == FLK_GRANTED_STATE) ||
(new_state == FLK_INTERRUPTED_STATE)) {
return;
}
}
if (IS_CANCELLED(lock)) {
if ((new_state == FLK_GRANTED_STATE) ||
(new_state == FLK_CANCELLED_STATE)) {
return;
}
}
}
}
/*
* Routine that checks whether there are any blocking locks in the system.
*
* The policy followed is if a write lock is sleeping we don't allow read
* locks before this write lock even though there may not be any active
* locks corresponding to the read locks' region.
*
* flk_add_edge() function adds an edge between l1 and l2 iff there
* is no path between l1 and l2. This is done to have a "minimum
* storage representation" of the dependency graph.
*
* Another property of the graph is since only the new request throws
* edges to the existing locks in the graph, the graph is always topologically
* ordered.
*/
static int
{
int request_blocked_by_active = 0;
int request_blocked_by_granted = 0;
int request_blocked_by_sleeping = 0;
int error = 0;
int request_will_wait = 0;
int found_covering_lock = 0;
/*
* check active locks
*/
if (lock) {
do {
if (!request_will_wait)
return (EAGAIN);
break;
}
/*
* Grant lock if it is for the same owner holding active
* lock that covers the request.
*/
return (flk_execute_request(request));
}
if (!request_blocked_by_active) {
/*
* Shall we grant this?! NO!!
* What about those locks that were just granted and still
* in sleep queue. Those threads are woken up and so locks
* are almost active.
*/
if (lock) {
do {
if (IS_GRANTED(lock)) {
} else {
}
}
}
goto block;
if (!request_blocked_by_sleeping) {
/*
* If the request isn't going to be blocked by a
* sleeping request, we know that it isn't going to
* be blocked; we can just execute the request --
* without performing costly deadlock detection.
*/
return (flk_execute_request(request));
/*
* If we have a sleeping writer in the requested
* lock's range, block.
*/
goto block;
}
if (lock) {
do {
}
lock = first_glock;
if (lock) {
do {
if (IS_GRANTED(lock)) {
}
}
return (EDEADLK);
return (flk_execute_request(request));
}
if (request_will_wait)
/* check sleeping locks */
/*
* If we find a sleeping write lock that is a superset of the
* region wanted by request we can be assured that by adding an
* edge to this write lock we have paths to all locks in the
* graph that blocks the request except in one case and that is why
* another check for SAME_OWNER in the loop below. The exception
* case is when this process that owns the sleeping write lock 'l1'
* has other locks l2, l3, l4 that are in the system and arrived
* before l1. l1 does not have path to these locks as they are from
* same process. We break when we find a second covering sleeping
* lock l5 owned by a process different from that owning l1, because
* there cannot be any of l2, l3, l4, etc., arrived before l5, and if
* it has l1 would have produced a deadlock already.
*/
if (lock) {
do {
if (!request_will_wait)
return (EAGAIN);
if (found_covering_lock &&
break;
}
found_covering_lock = 1;
covered_by = lock;
}
if (found_covering_lock &&
continue;
}
!found_covering_lock, 0)))
return (error);
}
}
/*
* found_covering_lock == 2 iff at this point 'request' has paths
* to all locks that blocks 'request'. found_covering_lock == 1 iff at this
* point 'request' has paths to all locks that blocks 'request' whose owners
* are not same as the one that covers 'request' (covered_by above) and
* we can have locks whose owner is same as covered_by in the active list.
*/
do {
if (found_covering_lock &&
continue;
}
CHECK_CYCLE, 0)))
return (error);
}
}
if (NOT_BLOCKED(request)) {
/*
* request not dependent on any other locks
* so execute this request
*/
return (flk_execute_request(request));
} else {
/*
* check for deadlock
*/
if (flk_check_deadlock(request))
return (EDEADLK);
/*
* this thread has to sleep
*/
return (flk_wait_execute_request(request));
}
}
/*
* The actual execution of the request in the simple case is only to
* insert the 'request' in the list of active locks if it is not an
* UNLOCK.
* We have to consider the existing active locks' relation to
* this 'request' if they are owned by same process. flk_relation() does
* this job and sees to that the dependency graph information is maintained
* properly.
*/
int
{
int done_searching = 0;
/* IO_LOCK requests are only to check status */
if (IS_IO_LOCK(request))
return (0);
return (0);
return (0);
}
do {
}
/*
* insert in active queue
*/
return (0);
}
/*
* 'request' is blocked by some one therefore we put it into sleep queue.
*/
static int
{
struct flock_globals *fg;
int index;
if (IS_LOCKMGR(request)) {
fg = flk_get_globals();
if (nlm_status_size == 0) { /* not booted as a cluster */
return (ENOLCK);
}
} else { /* booted as a cluster */
/*
* If the request is an NLM server lock request,
* and the NLM state of the lock request is not
* NLM_UP (because the NLM server is shutting
* down), then cancel the sleeping lock and
* return error ENOLCK that will encourage the
* client to retransmit.
*/
return (ENOLCK);
}
}
}
/* Clustering: For blocking PXFS locks, return */
/*
* PXFS locks sleep on the client side.
* The callback argument is used to wake up the sleeper
* when the lock is granted.
* We return -1 (rather than an errno value) to indicate
* the client side should sleep
*/
return (PXFS_LOCK_BLOCKED);
}
/*
* To make sure the shutdown code works correctly, either
* the callback must happen after putting the lock on the
* sleep list, or we must check the shutdown status after
* returning from the callback (and before sleeping). At
* least for now, we'll use the first option. If a
* shutdown or signal or whatever happened while the graph
* mutex was dropped, that will be detected by
* wait_for_lock().
*/
} else {
}
} else {
}
if (IS_LOCKMGR(request)) {
/*
* If the lock manager is shutting down, return an
* error that will encourage the client to retransmit.
*/
!IS_GRANTED(request)) {
return (ENOLCK);
}
}
if (IS_INTERRUPTED(request)) {
/* we got a signal, or act like we did */
return (EINTR);
}
/* Cancelled if some other thread has closed the file */
if (IS_CANCELLED(request)) {
return (EBADF);
}
return (flk_execute_request(request));
}
/*
* This routine adds an edge between from and to because from depends
* to. If asked to check for deadlock it checks whether there are any
* reachable locks from "from_lock" that is owned by the same process
* as "from_lock".
* NOTE: It is the caller's responsibility to make sure that the color
* of the graph is consistent between the calls to flk_add_edge as done
* in flk_process_request. This routine does not color and check for
* deadlock explicitly.
*/
static int
int check_cycle, int update_graph)
{
/*
* if to vertex already has mark_color just return
* don't add an edge as it is reachable from from vertex
* before itself.
*/
return (0);
edge = flk_get_edge();
/*
* set the from and to vertex
*/
/*
* put in adjacency list of from vertex
*/
/*
* put in list of to vertex
*/
if (update_graph) {
return (0);
}
if (!check_cycle) {
return (0);
}
continue;
goto dead_lock;
}
}
return (0);
/*
* remove all edges
*/
}
return (EDEADLK);
}
/*
* Get an edge structure for representing the dependency between two locks.
*/
static edge_t *
{
edge_allocs++;
return (ep);
}
/*
* Free the edge structure.
*/
static void
{
edge_frees++;
}
/*
* Check the relationship of request with lock and perform the
* recomputation of dependencies, break lock if required, and return
* 1 if request cannot have any more relationship with the next
* active locks.
* The 'lock' and 'request' are compared and in case of overlap we
* delete the 'lock' and form new locks to represent the non-overlapped
* portion of original 'lock'. This function has side effects such as
* 'lock' will be freed, new locks will be added to the active list.
*/
static int
{
int lock_effect;
int nvertex = 0;
int i;
else
lock_effect == FLK_STAY_SAME) {
nvertex = 1;
goto recompute;
} else {
return (0);
}
}
lock_effect == FLK_STAY_SAME) {
nvertex = 1;
goto recompute;
} else {
return (1);
}
}
if (lock_effect == FLK_STAY_SAME) {
nvertex = 1;
} else {
lock1 = flk_get_lock();
lock2 = flk_get_lock();
nvertex = 3;
}
if (lock_effect == FLK_STAY_SAME) {
nvertex = 1;
} else {
lock1 = flk_get_lock();
nvertex = 2;
}
} else {
if (lock_effect == FLK_STAY_SAME) {
nvertex = 1;
} else {
lock1 = flk_get_lock();
nvertex = 2;
}
}
if (lock_effect == FLK_STAY_SAME) {
nvertex = 1;
} else {
lock1 = flk_get_lock();
nvertex = 2;
}
nvertex = 1;
} else {
nvertex = 1;
}
} else {
if (lock_effect == FLK_STAY_SAME) {
nvertex = 1;
} else {
lock1 = flk_get_lock();
nvertex = 2;
}
nvertex = 1;
} else {
if (lock_effect != FLK_UNLOCK) {
nvertex = 1;
} else {
return (1);
}
}
}
/*
* For unlock we don't send the 'request' to for recomputing
* dependencies because no lock will add an edge to this.
*/
if (lock_effect == FLK_UNLOCK) {
nvertex--;
}
for (i = 0; i < nvertex; i++) {
}
/*
* we remove the adjacent edges for all vertices' to this vertex
* 'lock'.
*/
}
/* We are ready for recomputing the dependencies now */
for (i = 0; i < nvertex; i++) {
}
if (lock_effect == FLK_UNLOCK) {
nvertex++;
}
for (i = 0; i < nvertex - 1; i++) {
}
flk_wakeup(lock, 0);
} else {
}
}
return (0);
}
/*
* Insert a lock into the active queue.
*/
static void
{
first_lock = lock;
if (first_lock != NULL) {
;
} else {
}
}
}
/*
* Delete the active lock : Performs two functions depending on the
* value of second parameter. One is to remove from the active lists
* only and other is to both remove and free the lock.
*/
static void
{
if (free_lock)
NULL);
}
if (free_lock)
}
/*
* Insert into the sleep queue.
*/
static void
{
;
}
/*
* Cancelling a sleeping lock implies removing a vertex from the
* dependency graph and therefore we should recompute the dependencies
* of all vertices that have a path to this vertex, w.r.t. all
* vertices reachable from this vertex.
*/
void
{
int nvertex = 0;
int i;
/*
* count number of vertex pointers that has to be allocated
* All vertices that are reachable from request.
*/
continue;
nvertex++;
}
}
/*
* allocate memory for holding the vertex pointers
*/
if (nvertex) {
KM_SLEEP);
}
/*
* one more pass to actually store the vertices in the
* allocated array.
* We first check sleeping locks and then active locks
* so that topology array will be in a topological
* order.
*/
nvertex = 0;
if (lock) {
do {
if (IS_RECOMPUTE(lock)) {
}
}
if (lock) {
do {
if (IS_RECOMPUTE(lock)) {
}
}
/*
* remove in and out edges of request
* They are freed after updating proc_graph below.
*/
}
if (remove_from_queue)
/* we are ready to recompute */
}
/*
* unset the RECOMPUTE flag in those vertices
*/
for (i = 0; i < nvertex; i++) {
}
/*
* free the topology
*/
if (nvertex)
(nvertex * sizeof (lock_descriptor_t *)));
/*
* Possibility of some locks unblocked now
*/
flk_wakeup(request, 0);
/*
* we expect to have a correctly recomputed graph now.
*/
}
/*
* Uncoloring the graph is simply to increment the mark value of the graph
* And only when wrap round takes place will we color all vertices in
* the graph explicitly.
*/
static void
{
} else {
}
}
/*
* Wake up locks that are blocked on the given lock.
*/
static void
{
if (NO_DEPENDENTS(lock))
return;
do {
/*
* delete the edge from the adjacency list
* of from vertex. if no more adjacent edges
* for this vertex wake this process.
*/
if (adj_list_remove)
if (NOT_BLOCKED(lck)) {
}
}
/*
* The dependents of request, is checked for its dependency against the
* locks in topology (called topology because the array is and should be in
* topological order for this algorithm, if not in topological order the
* inner loop below might add more edges than necessary. Topological ordering
* of vertices satisfies the property that all edges will be from left to
* right i.e., topology[i] can have an edge to topology[j], iff i<j)
* If lock l1 in the dependent set of request is dependent (blocked by)
* on lock l2 in topology but does not have a path to it, we add an edge
* in the inner loop below.
*
* We don't want to add an edge between l1 and l2 if there exists
* already a path from l1 to l2, so care has to be taken for those vertices
* that have two paths to 'request'. These vertices are referred to here
* as barrier locks.
*
* The barriers has to be found (those vertex that originally had two paths
* to request) because otherwise we may end up adding edges unnecessarily
* to vertices in topology, and thus barrier vertices can have an edge
* to a vertex in topology as well a path to it.
*/
static void
int nvertex, int update_graph)
{
int i, count;
int barrier_found = 0;
if (nvertex == 0)
return;
count = 0;
goto next_in_edge;
}
if (IS_BARRIER(vertex)) {
/* decrement the barrier count */
/* this guy will be pushed again anyway ? */
/*
* barrier is over we can recompute
* dependencies for this lock in the
* next stack pop
*/
}
continue;
}
}
for (i = 0; i < nvertex; i++) {
continue;
count++;
}
}
/* prune the tree below this */
/* update the barrier locks below this! */
}
continue;
}
}
}
/*
* Color all reachable vertices from vertex that belongs to topology (here
* those that have RECOMPUTE_LOCK set in their state) and yet uncolored.
*
* Note: we need to use a different stack_link l_stack1 because this is
* called from flk_recompute_dependencies() that already uses a stack with
* l_stack as stack_link.
*/
static int
{
int count;
count = 0;
continue;
if (IS_RECOMPUTE(lock))
count++;
}
}
return (count);
}
/*
* Called from flk_recompute_dependencies() this routine decrements
* the barrier count of barrier vertices that are reachable from lock.
*/
static void
{
if (IS_BARRIER(lck)) {
}
continue;
}
if (IS_BARRIER(lck)) {
}
}
}
}
/*
* Finds all vertices that are reachable from 'lock' more than once and
* mark them as barrier vertices and increment their barrier count.
* The barrier count is one minus the total number of paths from lock
* to that vertex.
*/
static int
{
int found = 0;
/* this is a barrier */
/* index will have barrier count */
if (!found)
found = 1;
continue;
}
}
}
return (found);
}
/*
* Finds the first lock that is mainly responsible for blocking this
* request. If there is no such lock, request->l_flock.l_type is set to
* F_UNLCK. Otherwise, request->l_flock is filled in with the particulars
* of the blocking lock.
*
* Note: It is possible a request is blocked by a sleeping lock because
* of the fairness policy used in flk_process_request() to construct the
* dependencies. (see comments before flk_process_request()).
*/
static void
{
if (lock) {
do {
break;
}
}
/*
* No active lock is blocking this request, but if a read
* lock is requested, it may also get blocked by a waiting
* writer. So search all sleeping locks and see if there is
* a writer waiting.
*/
if (lock) {
do {
break;
}
}
}
if (blocker) {
} else
}
/*
* Get the graph_t structure associated with a vnode.
* If 'initialize' is non-zero, and the graph_t structure for this vnode has
* not yet been initialized, then a new element is allocated and returned.
*/
graph_t *
{
if (initialize == FLK_USE_GRAPH) {
return (gp);
}
/* Initialize the graph */
}
/* Recheck the value within flock_lock */
struct flock_globals *fg;
/* We must have previously allocated the graph_t structure */
/*
* The lockmgr status is only needed if KLM is loaded.
*/
if (flock_zone_key != ZONE_KEY_UNINITIALIZED) {
fg = flk_get_globals();
}
}
/* There was a race to allocate the graph_t and we lost */
}
return (gp);
}
/*
* PSARC case 1997/292
*/
int
{
int result = 0;
int lock_nlmid;
/*
* Check to see if node is booted as a cluster. If not, return.
*/
if ((cluster_bootflags & CLUSTER_BOOTED) == 0) {
return (0);
}
return (0);
}
if (lock) {
/* get NLM id from sysid */
/*
* If NLM server request _and_ nlmid of lock matches
* nlmid of argument, then we've found a remote lock.
*/
result = 1;
goto done;
}
}
}
if (lock) {
/* get NLM id from sysid */
/*
* If NLM server request _and_ nlmid of lock matches
* nlmid of argument, then we've found a remote lock.
*/
result = 1;
goto done;
}
}
}
done:
return (result);
}
/*
* Determine whether there are any locks for the given vnode with a remote
* sysid. Returns zero if not, non-zero if there are.
*
* Note that the return value from this function is potentially invalid
* once it has been returned. The caller is responsible for providing its
* own synchronization mechanism to ensure that the return value is useful
* (e.g., see nfs_lockcompletion()).
*/
int
{
int result = 0;
return (0);
}
if (lock) {
result = 1;
goto done;
}
}
}
if (lock) {
result = 1;
goto done;
}
}
}
done:
return (result);
}
/*
* Determine whether there are any locks for the given vnode with a remote
* sysid matching given sysid.
* Used by the new (open source) NFS Lock Manager (NLM)
*/
int
{
int result = 0;
if (sysid == 0)
return (0);
return (0);
}
if (lock) {
result = 1;
goto done;
}
}
}
if (lock) {
result = 1;
goto done;
}
}
}
done:
return (result);
}
/*
* Determine if there are any locks owned by the given sysid.
* Returns zero if not, non-zero if there are. Note that this return code
* could be derived from flk_get_{sleeping,active}_locks, but this routine
* avoids all the memory allocations of those routines.
*
* This routine has the same synchronization issues as
* flk_has_remote_locks.
*/
int
{
int has_locks = 0;
int i;
gp = lock_graph[i];
continue;
}
if (lck_type & FLK_QUERY_ACTIVE) {
has_locks = 1;
}
}
if (lck_type & FLK_QUERY_SLEEPING) {
has_locks = 1;
}
}
}
return (has_locks);
}
/*
* PSARC case 1997/292
*
* Requires: "sysid" is a pair [nlmid, sysid]. The lower half is 16-bit
* quantity, the real sysid generated by the NLM server; the upper half
* identifies the node of the cluster where the NLM server ran.
* This routine is only called by an NLM server running in a cluster.
* Effects: Remove all locks held on behalf of the client identified
* by "sysid."
*/
void
{
int i;
/*
* Check to see if node is booted as a cluster. If not, return.
*/
if ((cluster_bootflags & CLUSTER_BOOTED) == 0) {
return;
}
for (i = 0; i < HASH_SIZE; i++) {
gp = lock_graph[i];
continue;
/* signal sleeping requests so that they bail out */
}
}
/* delete active locks */
}
}
}
}
/*
* Delete all locks in the system that belongs to the sysid of the request.
*/
static void
{
int i;
for (i = 0; i < HASH_SIZE; i++) {
gp = lock_graph[i];
continue;
/* signal sleeping requests so that they bail out */
}
}
/* delete active locks */
}
}
}
}
/*
* Clustering: Deletes PXFS locks
* Effects: Delete all locks on files in the given file system and with the
* given PXFS id.
*/
void
{
int i;
for (i = 0; i < HASH_SIZE; i++) {
gp = lock_graph[i];
continue;
/* signal sleeping requests so that they bail out */
pxfsid) {
}
}
}
/* delete active locks */
pxfsid) {
}
}
}
}
}
/*
* Search for a sleeping lock manager lock which matches exactly this lock
* request; if one is found, fake a signal to cancel it.
*
* Return 1 if a matching lock was found, 0 otherwise.
*/
static int
{
if (lock) {
return (1);
}
}
}
return (0);
}
/*
* Remove all non-OFD locks for the vnode belonging to the given pid and sysid.
* That is, since OFD locks are pid-less we'll never match on the incoming
* pid. OFD locks are removed earlier in the close() path via closef() and
* ofdcleanlock().
*/
void
{
return;
if (lock) {
do {
}
}
if (lock) {
do {
}
}
}
}
/*
* Called from 'fs' read and write routines for files that have mandatory
* locking enabled.
*/
int
int iomode,
int fmode,
{
register int i;
int error = 0;
} else {
}
return (error);
}
/*
* convoff - converts the given data (start, whence) to the
* given whence.
*/
int
int whence;
{
int error;
return (error);
}
case 1:
break;
case 2:
/* FALLTHRU */
case 0:
break;
default:
return (EINVAL);
}
return (EINVAL);
switch (whence) {
case 1:
break;
case 2:
/* FALLTHRU */
case 0:
break;
default:
return (EINVAL);
}
return (0);
}
/* proc_graph function definitions */
/*
* Function checks for deadlock due to the new 'lock'. If deadlock found
* edges of this lock are freed and returned.
*/
static int
{
/*
* OFD style locks are not associated with any process so there is
* no proc graph for these. Thus we cannot, and do not, do deadlock
* detection.
*/
return (0);
/* construct the edges from this process to other processes */
break;
}
}
pep = flk_get_proc_edge();
}
}
break;
}
}
pep = flk_get_proc_edge();
start_vertex->incount++;
}
}
if (start_vertex->incount == 0) {
return (0);
}
if (!PROC_ARRIVED(dvertex)) {
break;
}
if (!PROC_DEPARTED(dvertex))
goto deadlock;
}
}
}
return (0);
/* we remove all lock edges and proc edges */
} else {
}
}
break;
}
}
}
} else {
}
start_vertex->incount--;
}
break;
}
}
}
return (1);
}
/*
* Get a proc vertex. If lock's pvertex value gets a correct proc vertex
* from the list we return that, otherwise we allocate one. If necessary,
* we grow the list of vertices also.
*/
static proc_vertex_t *
{
int i;
return (pv);
}
}
return (pv);
}
}
return (pv);
}
}
}
sizeof (proc_vertex_t *), KM_SLEEP);
}
return (pv);
}
/*
* Allocate a proc edge.
*/
static proc_edge_t *
{
return (pep);
}
/*
* Free the proc edge. Called whenever its reference count goes to zero.
*/
static void
{
}
/*
* Color the graph explicitly done only when the mark value hits max value.
*/
static void
{
int i;
}
} else {
}
}
/*
* Release the proc vertex iff both there are no in edges and out edges
*/
static void
{
}
}
/*
* Updates process graph to reflect change in a lock_graph.
* Note: We should call this function only after we have a correctly
* recomputed lock graph. Otherwise we might miss a deadlock detection.
* eg: in function flk_relation() we call this function after flk_recompute_
* dependencies() otherwise if a process tries to lock a vnode hashed
* into another graph it might sleep for ever.
*/
static void
{
/*
* OFD style locks are not associated with any process so there is
* no proc graph for these.
*/
return;
}
if (!delete)
goto add;
} else {
}
}
break;
}
}
return;
add:
break;
}
}
pep = flk_get_proc_edge();
}
}
/*
* Set the control status for lock manager requests.
*
*/
/*
* PSARC case 1997/292
*
* Requires: "nlmid" must be >= 1 and <= clconf_maximum_nodeid().
* Effects: Set the state of the NLM server identified by "nlmid"
* in the NLM registry to state "nlm_state."
* Raises exception no_such_nlm if "nlmid" doesn't identify a known
* NLM server to this LLM.
* Note that when this routine is called with NLM_SHUTTING_DOWN there
* may be locks requests that have gotten started but not finished. In
* particular, there may be blocking requests that are in the callback code
* before sleeping (so they're not holding the lock for the graph). If
* such a thread reacquires the graph's lock (to go to sleep) after
* NLM state in the NLM registry is set to a non-up value,
* it will notice the status and bail out. If the request gets
* granted before the thread can check the NLM registry, let it
* continue normally. It will get flushed when we are called with NLM_DOWN.
*
* Modifies: nlm_reg_obj (global)
* Arguments:
* nlmid (IN): id uniquely identifying an NLM server
* nlm_state (IN): NLM server state to change "nlmid" to
*/
void
{
/*
* Check to see if node is booted as a cluster. If not, return.
*/
if ((cluster_bootflags & CLUSTER_BOOTED) == 0) {
return;
}
/*
* Check for development/debugging. It is possible to boot a node
* in non-cluster mode, and then run a special script, currently
* available only to developers, to bring up the node as part of a
* cluster. The problem is that running such a script does not
* result in the routine flk_init() being called and hence global array
* nlm_reg_status is NULL. The NLM thinks it's in cluster mode,
* but the LLM needs to do an additional check to see if the global
* array has been created or not. If nlm_reg_status is NULL, then
* return, else continue.
*/
if (nlm_reg_status == NULL) {
return;
}
/*
* If the NLM server "nlmid" is unknown in the NLM registry,
* add it to the registry in the nlm shutting down state.
*/
} else {
/*
* Change the state of the NLM server identified by "nlmid"
* in the NLM registry to the argument "nlm_state."
*/
}
/*
* The reason we must register the NLM server that is shutting down
* with an LLM that doesn't already know about it (never sent a lock
* request) is to handle correctly a race between shutdown and a new
* lock request. Suppose that a shutdown request from the NLM server
* invokes this routine at the LLM, and a thread is spawned to
* service the request. Now suppose a new lock request is in
* progress and has already passed the first line of defense in
* reclock(), which denies new locks requests from NLM servers
* that are not in the NLM_UP state. After the current routine
* is invoked for both phases of shutdown, the routine will return,
* having done nothing, and the lock request will proceed and
* probably be granted. The problem is that the shutdown was ignored
* by the lock request because there was no record of that NLM server
* shutting down. We will be in the peculiar position of thinking
* that we've shutdown the NLM server and all locks at all LLMs have
* been discarded, but in fact there's still one lock held.
* The solution is to record the existence of NLM server and change
* its state immediately to NLM_SHUTTING_DOWN. The lock request in
* progress may proceed because the next phase NLM_DOWN will catch
* this lock and discard it.
*/
switch (nlm_state) {
case FLK_NLM_UP:
/*
* Change the NLM state of all locks still held on behalf of
* the NLM server identified by "nlmid" to NLM_UP.
*/
break;
case FLK_NLM_SHUTTING_DOWN:
/*
* Wake up all sleeping locks for the NLM server identified
* by "nlmid." Note that eventually all woken threads will
* have their lock requests cancelled and descriptors
* removed from the sleeping lock list. Note that the NLM
* server state associated with each lock descriptor is
* changed to FLK_NLM_SHUTTING_DOWN.
*/
break;
case FLK_NLM_DOWN:
/*
* Discard all active, granted locks for this NLM server
* identified by "nlmid."
*/
break;
default:
}
}
/*
* Set the control status for lock manager requests.
*
* Note that when this routine is called with FLK_WAKEUP_SLEEPERS, there
* may be locks requests that have gotten started but not finished. In
* particular, there may be blocking requests that are in the callback code
* before sleeping (so they're not holding the lock for the graph). If
* such a thread reacquires the graph's lock (to go to sleep) after
* flk_lockmgr_status is set to a non-up value, it will notice the status
* and bail out. If the request gets granted before the thread can check
* flk_lockmgr_status, let it continue normally. It will get flushed when
* we are called with FLK_LOCKMGR_DOWN.
*/
void
{
int i;
struct flock_globals *fg;
fg = flk_get_globals();
/*
* If the lock manager is coming back up, all that's needed is to
* propagate this information to the graphs. If the lock manager
* is going down, additional action is required, and each graph's
* copy of the state is updated atomically with this other action.
*/
switch (status) {
case FLK_LOCKMGR_UP:
for (i = 0; i < HASH_SIZE; i++) {
gp = lock_graph[i];
continue;
}
break;
case FLK_WAKEUP_SLEEPERS:
break;
case FLK_LOCKMGR_DOWN:
break;
default:
break;
}
}
/*
* This routine returns all the locks that are active or sleeping and are
* associated with a particular set of identifiers. If lock_state != 0, then
* only locks that match the lock_state are returned. If lock_state == 0, then
* all locks are returned. If pid == NOPID, the pid is ignored. If
* use_sysid is FALSE, then the sysid is ignored. If vp is NULL, then the
* vnode pointer is ignored.
*
* A list containing the vnode pointer and an flock structure
* describing the lock is returned. Each element in the list is
* dynamically allocated and must be freed by the caller. The
* last item in the list is denoted by a NULL value in the ll_next
* field.
*
* The vnode pointers returned are held. The caller is responsible
* for releasing these. Note that the returned list is only a snapshot of
* the current lock information, and that it is a snapshot of a moving
* target (only one graph is locked at a time).
*/
{
int i;
int first_index; /* graph index */
int num_indexes; /* graph index */
(list_type == FLK_SLEEPING_STATE));
/*
* Get a pointer to something to use as a list head while building
* the rest of the list.
*/
/* Figure out which graphs we want to look at. */
first_index = 0;
} else {
num_indexes = 1;
}
gp = lock_graph[i];
continue;
}
lock != graph_head;
continue;
continue;
continue;
continue;
continue;
/*
* A matching lock was found. Allocate
* space for a new locklist entry and fill
* it in.
*/
}
}
return (llp);
}
/*
* These two functions are simply interfaces to get_lock_list. They return
* a list of sleeping or active locks for the given sysid and pid. See
* get_lock_list for details.
*
* In either case we don't particularly care to specify the zone of interest;
* the sysid-space is global across zones, so the sysid will map to exactly one
* zone, and we'll return information for that zone.
*/
{
ALL_ZONES));
}
{
ALL_ZONES));
}
/*
* Another interface to get_lock_list. This one returns all the active
* locks for a given vnode. Again, see get_lock_list for details.
*
* We don't need to specify which zone's locks we're interested in. The matter
* would only be interesting if the vnode belonged to NFS, and NFS vnodes can't
* be used by multiple zones, so the list of locks will all be from the right
* zone.
*/
{
ALL_ZONES));
}
/*
* Another interface to get_lock_list. This one returns all the active
* nbmand locks for a given vnode. Again, see get_lock_list for details.
*
* See the comment for flk_active_locks_for_vp() for why we don't care to
* specify the particular zone of interest.
*/
{
}
/*
* Another interface to get_lock_list. This one returns all the active
* nbmand locks for a given pid. Again, see get_lock_list for details.
*
* The zone doesn't need to be specified here; the locks held by a
* particular process will either be local (ie, non-NFS) or from the zone
* the process is executing in. This is because other parts of the system
* ensure that an NFS vnode can't be used in a zone other than that in
* which it was opened.
*/
{
}
/*
* Free up all entries in the locklist.
*/
void
{
while (llp) {
}
}
static void
{
/*
* For each graph "lg" in the hash table lock_graph do
* a. Get the list of sleeping locks
* b. For each lock descriptor in the list do
* i. If the requested lock is an NLM server request AND
* the nlmid is the same as the routine argument then
* change the lock descriptor's state field to
* "nlm_state."
* c. Get the list of active locks
* d. For each lock descriptor in the list do
* i. If the requested lock is an NLM server request AND
* the nlmid is the same as the routine argument then
* change the lock descriptor's state field to
* "nlm_state."
*/
int i;
int lock_nlmid;
for (i = 0; i < HASH_SIZE; i++) {
gp = lock_graph[i];
continue;
}
/* Get list of sleeping locks in current lock graph. */
/* get NLM id */
/*
* If NLM server request AND nlmid of lock matches
* nlmid of argument, then set the NLM state of the
* lock to "nlm_state."
*/
}
}
/* Get list of active locks in current lock graph. */
/* get NLM id */
/*
* If NLM server request AND nlmid of lock matches
* nlmid of argument, then set the NLM state of the
* lock to "nlm_state."
*/
}
}
}
}
/*
* Requires: "nlmid" >= 1 and <= clconf_maximum_nodeid().
* Effects: Find all sleeping lock manager requests _only_ for the NLM server
* identified by "nlmid." Poke those lock requests.
*/
static void
{
int i;
int lock_nlmid;
for (i = 0; i < HASH_SIZE; i++) {
gp = lock_graph[i];
continue;
}
/*
* If NLM server request _and_ nlmid of lock matches
* nlmid of argument, then set the NLM state of the
* lock to NLM_SHUTTING_DOWN, and wake up sleeping
* request.
*/
if (IS_LOCKMGR(lock)) {
/* get NLM id */
if (nlmid == lock_nlmid) {
}
}
}
}
}
/*
* Requires: "nlmid" >= 1 and <= clconf_maximum_nodeid()
* Effects: Find all active (granted) lock manager locks _only_ for the
* NLM server identified by "nlmid" and release them.
*/
static void
{
int i;
int lock_nlmid;
for (i = 0; i < HASH_SIZE; i++) {
gp = lock_graph[i];
continue;
}
/*
* If it's an NLM server request _and_ nlmid of
* the lock matches nlmid of argument, then
* remove the active lock the list, wakup blocked
* threads, and free the storage for the lock.
* Note that there's no need to mark the NLM state
* of this lock to NLM_DOWN because the lock will
* be deleted anyway and its storage freed.
*/
if (IS_LOCKMGR(lock)) {
/* get NLM id */
if (nlmid == lock_nlmid) {
}
}
}
}
}
/*
* Find all sleeping lock manager requests and poke them.
*/
static void
{
int i;
for (i = 0; i < HASH_SIZE; i++) {
gp = lock_graph[i];
continue;
}
}
}
}
}
/*
* Find all active (granted) lock manager locks and release them.
*/
static void
{
int i;
for (i = 0; i < HASH_SIZE; i++) {
gp = lock_graph[i];
continue;
}
}
}
}
}
/*
* Wait until a lock is granted, cancelled, or interrupted.
*/
static void
{
!(IS_INTERRUPTED(request))) {
}
}
}
/*
* Create an flock structure from the existing lock information
*
* This routine is used to create flock structures for the lock manager
* to use in a reclaim request. Since the lock was originated on this
* host, it must be conforming to UNIX semantics, so no checking is
* done to make sure it falls within the lower half of the 32-bit range.
*/
static void
{
}
/*
* Convert flock_t data describing a lock range into unsigned long starting
* and ending points, which are put into lock_request. Returns 0 or an
* errno value.
* Large Files: max is passed by the caller and we return EOVERFLOW
* as defined by LFS API.
*/
int
{
int error;
/*
* Determine the starting point of the request
*/
case 0: /* SEEK_SET */
break;
case 1: /* SEEK_CUR */
break;
case 2: /* SEEK_END */
return (error);
break;
default:
return (EINVAL);
}
/*
* Determine the range covered by the request.
*/
*end = MAX_U_OFFSET_T;
} else {
/*
* Negative length; why do we even allow this ?
* Because this allows easy specification of
* the last n bytes of the file.
*/
(*start)++;
}
return (0);
}
/*
* Check the validity of lock data. This can used by the NFS
* frlock routines to check data before contacting the server. The
* server must support semantics that aren't as restrictive as
* the UNIX API, so the NFS client is required to check.
* The maximum is now passed in by the caller.
*/
int
{
/*
* The end (length) for local locking should never be greater
* than MAXEND. However, the representation for
* the entire file is MAX_U_OFFSET_T.
*/
return (EINVAL);
}
return (EINVAL);
}
return (0);
}
/*
* Fill in request->l_flock with information about the lock blocking the
* request. The complexity here is that lock manager requests are allowed
* to see into the upper part of the 32-bit address range, whereas local
* requests are only allowed to see signed values.
*
* What should be done when "blocker" is a lock manager lock that uses the
* upper portion of the 32-bit range, but "request" is local? Since the
* request has already been determined to have been blocked by the blocker,
* at least some portion of "blocker" must be in the range of the request,
* or the request extends to the end of file. For the first case, the
* portion in the lower range is returned with the indication that it goes
* "to EOF." For the second case, the last byte of the lower range is
* returned with the indication that it goes "to EOF."
*/
static void
{
if (IS_LOCKMGR(request)) {
else
} else {
} else {
else
}
}
}
/*
* PSARC case 1997/292
*/
/*
* This is the public routine exported by flock.h.
*/
void
{
/*
* Check to see if node is booted as a cluster. If not, return.
*/
if ((cluster_bootflags & CLUSTER_BOOTED) == 0) {
return;
}
/*
* See comment in cl_flk_set_nlm_status().
*/
if (nlm_reg_status == NULL) {
return;
}
/*
* protect NLM registry state with a mutex.
*/
}
/*
* Return non-zero if the given I/O request conflicts with an active NBMAND
* lock.
* If svmand is non-zero, it means look at all active locks, not just NBMAND
* locks.
*/
int
{
int conflict = 0;
int sysid;
sysid = 0;
} else {
}
return (0);
conflict = 1;
break;
}
}
return (conflict);
}
/*
* Return non-zero if the given I/O request conflicts with the given lock.
*/
static int
{
return (0);
return (1);
return (1);
return (0);
}
#ifdef DEBUG
static void
{
}
"active lock %p blocks %p",
"active lock %p blocks %p",
}
}
}
}
}
/*
* Effect: This functions checks to see if the transition from 'old_state' to
* 'new_state' is a valid one. It returns 0 if the transition is valid
* and 1 if it is not.
* For a map of valid transitions, see sys/flock_impl.h
*/
static int
{
switch (old_state) {
case FLK_INITIAL_STATE:
if ((new_state == FLK_START_STATE) ||
(new_state == FLK_SLEEPING_STATE) ||
(new_state == FLK_ACTIVE_STATE) ||
(new_state == FLK_DEAD_STATE)) {
return (0);
} else {
return (1);
}
case FLK_START_STATE:
if ((new_state == FLK_ACTIVE_STATE) ||
(new_state == FLK_DEAD_STATE)) {
return (0);
} else {
return (1);
}
case FLK_ACTIVE_STATE:
if (new_state == FLK_DEAD_STATE) {
return (0);
} else {
return (1);
}
case FLK_SLEEPING_STATE:
if ((new_state == FLK_GRANTED_STATE) ||
(new_state == FLK_INTERRUPTED_STATE) ||
(new_state == FLK_CANCELLED_STATE)) {
return (0);
} else {
return (1);
}
case FLK_GRANTED_STATE:
if ((new_state == FLK_START_STATE) ||
(new_state == FLK_INTERRUPTED_STATE) ||
(new_state == FLK_CANCELLED_STATE)) {
return (0);
} else {
return (1);
}
case FLK_CANCELLED_STATE:
if ((new_state == FLK_INTERRUPTED_STATE) ||
(new_state == FLK_DEAD_STATE)) {
return (0);
} else {
return (1);
}
case FLK_INTERRUPTED_STATE:
if (new_state == FLK_DEAD_STATE) {
return (0);
} else {
return (1);
}
case FLK_DEAD_STATE:
/* May be set more than once */
if (new_state == FLK_DEAD_STATE) {
return (0);
} else {
return (1);
}
default:
return (1);
}
}
static void
{
}
}
}
}
}
}
}
}
}
static int
{
if (no_path)
}
continue;
return (1);
}
}
return (0);
}
static void
{
/* Ignore OFD style locks since they're not process-wide. */
if (pid == 0)
return;
if (lock) {
"owner pid %d's lock %p in active queue",
}
}
if (lock) {
"owner pid %d's lock %p in sleep queue",
}
}
}
static int
{
return (1);
else
}
return (0);
}
static int
{
}
static void
{
"one edge one path from lock1 %p lock2 %p",
}
"No path from lock1 %p to lock2 %p",
}
}
#endif /* DEBUG */