Lines Matching refs:lock
74 cmn_err(CE_PANIC, "Illegal lock transition \
111 * lock: mutex]
136 static kmutex_t nlm_reg_lock; /* lock to protect arrary */
140 * Although we need a global lock dependency graph (and associated data
141 * structures), we also need a per-zone notion of whether the lock manager is
142 * running, and so whether to allow lock manager requests or not.
145 * (flk_lockmgr_status), protected by flock_lock, and set when the lock
151 * The per-graph copies are used to synchronize lock requests with shutdown
164 static void flk_free_lock(lock_descriptor_t *lock);
238 * KLM module not loaded; lock manager definitely not running.
259 * different file description for the same file will not drop the lock (i.e.
264 * Because these locks are per-description a lock ptr lives at the f_filocks
266 * to enable unique lock identification and management.
277 * currently the single lock must cover the entire file. This is validated in
280 * managed independently of the lock list on the vnode itself and it needs to
361 * values then check the validity of the lock range.
396 /* Get the lock graph for a particular vnode */
452 * Remove any lock on the vnode belonging to the given file_t.
456 * lock associated with fp.
461 lock_descriptor_t *fplock, *lock, *nlock;
482 SET_LOCK_TO_FIRST_SLEEP_VP(gp, lock, vp);
484 if (lock) {
486 nlock = lock->l_next;
487 if (fplock == lock) {
488 CANCEL_WAKEUP(lock);
491 lock = nlock;
492 } while (lock->l_vnode == vp);
495 SET_LOCK_TO_FIRST_ACTIVE_VP(gp, lock, vp);
497 if (lock) {
499 nlock = lock->l_next;
500 if (fplock == lock) {
501 flk_delete_active_lock(lock, 0);
502 flk_wakeup(lock, 1);
503 flk_free_lock(lock);
506 lock = nlock;
507 } while (lock->l_vnode == vp);
521 * file will drop the lock (e.g. lock /etc/passwd, call a library function
523 * file descriptor the application loses its lock and does not know).
527 * This is why any close will drop the lock and is also why, once the process
528 * forks, the lock is no longer related to the new process. These locks can
596 /* check the validity of the lock range */
645 * If the lock request is an NLM server request ....
650 * Bail out if this is a lock manager request and the
651 * lock manager is not supposed to be running.
676 * previous lock requests) and its state is
679 * error to deny the lock request.
689 /* Now get the lock graph for a particular vnode */
736 * Recovery mechanism to release lock manager locks when
793 * dependents for this lock or EINTR from flk_wait_execute_
1000 flk_free_lock(lock_descriptor_t *lock)
1004 ASSERT(IS_DEAD(lock));
1006 if ((fp = lock->l_ofd) != NULL && fp->f_filock == (struct filock *)lock)
1009 if (IS_REFERENCED(lock)) {
1010 lock->l_state |= DELETED_LOCK;
1014 kmem_free((void *)lock, sizeof (lock_descriptor_t));
1018 flk_set_state(lock_descriptor_t *lock, int new_state)
1022 * and more than once. If a sleeping lock is signaled awake more
1033 if (IS_INTERRUPTED(lock)) {
1040 if (IS_CANCELLED(lock)) {
1046 CHECK_LOCK_TRANSITION(lock->l_status, new_state);
1047 if (IS_PXFS(lock)) {
1048 cl_flk_state_transition_notify(lock, lock->l_status, new_state);
1050 lock->l_status = new_state;
1056 * The policy followed is if a write lock is sleeping we don't allow read
1057 * locks before this write lock even though there may not be any active
1073 lock_descriptor_t *lock;
1090 SET_LOCK_TO_FIRST_ACTIVE_VP(gp, lock, vp);
1093 if (lock) {
1095 if (BLOCKS(lock, request)) {
1102 * Grant lock if it is for the same owner holding active
1103 * lock that covers the request.
1106 if (SAME_OWNER(lock, request) &&
1107 COVERS(lock, request) &&
1110 lock = lock->l_next;
1111 } while (lock->l_vnode == vp);
1123 SET_LOCK_TO_FIRST_SLEEP_VP(gp, lock, vp);
1124 if (lock) {
1126 if (BLOCKS(lock, request)) {
1127 if (IS_GRANTED(lock)) {
1134 lock = lock->l_next;
1135 } while ((lock->l_vnode == vp));
1136 first_glock = lock->l_prev;
1155 * lock's range, block.
1162 SET_LOCK_TO_FIRST_ACTIVE_VP(gp, lock, vp);
1163 if (lock) {
1165 flk_recompute_dependencies(lock, lk, 1, 0);
1166 lock = lock->l_next;
1167 } while (lock->l_vnode == vp);
1169 lock = first_glock;
1170 if (lock) {
1172 if (IS_GRANTED(lock)) {
1173 flk_recompute_dependencies(lock, lk, 1, 0);
1175 lock = lock->l_prev;
1176 } while ((lock->l_vnode == vp));
1190 SET_LOCK_TO_FIRST_SLEEP_VP(gp, lock, vp);
1193 * If we find a sleeping write lock that is a superset of the
1195 * edge to this write lock we have paths to all locks in the
1198 * case is when this process that owns the sleeping write lock 'l1'
1202 * lock l5 owned by a process different from that owning l1, because
1207 if (lock) {
1209 if (BLOCKS(lock, request)) {
1212 if (COVERS(lock, request) &&
1213 lock->l_type == F_WRLCK) {
1215 !SAME_OWNER(lock, covered_by)) {
1220 covered_by = lock;
1223 !SAME_OWNER(lock, covered_by)) {
1224 lock = lock->l_next;
1227 if ((error = flk_add_edge(request, lock,
1231 lock = lock->l_next;
1232 } while (lock->l_vnode == vp);
1244 SET_LOCK_TO_FIRST_ACTIVE_VP(gp, lock, vp);
1245 ASSERT(lock != NULL);
1247 if (BLOCKS(lock, request)) {
1249 !SAME_OWNER(lock, covered_by)) {
1250 lock = lock->l_next;
1253 if ((error = flk_add_edge(request, lock,
1257 lock = lock->l_next;
1258 } while (lock->l_vnode == vp);
1295 lock_descriptor_t *lock, *lock1;
1312 SET_LOCK_TO_FIRST_ACTIVE_VP(gp, lock, vp);
1314 if (lock == NULL && request->l_type == F_UNLCK)
1316 if (lock == NULL) {
1322 lock1 = lock->l_next;
1323 if (SAME_OWNER(request, lock)) {
1324 done_searching = flk_relation(lock, request);
1326 lock = lock1;
1327 } while (lock->l_vnode == vp && !done_searching);
1366 * If the request is an NLM server lock request,
1367 * and the NLM state of the lock request is not
1369 * down), then cancel the sleeping lock and
1385 * when the lock is granted.
1395 * the callback must happen after putting the lock on the
1432 * If the lock manager is shutting down, return an
1592 * Check the relationship of request with lock and perform the
1593 * recomputation of dependencies, break lock if required, and return
1596 * The 'lock' and 'request' are compared and in case of overlap we
1597 * delete the 'lock' and form new locks to represent the non-overlapped
1598 * portion of original 'lock'. This function has side effects such as
1599 * 'lock' will be freed, new locks will be added to the active list.
1603 flk_relation(lock_descriptor_t *lock, lock_descriptor_t *request)
1611 graph_t *gp = (lock->l_graph);
1624 lock->l_type == F_WRLCK)
1627 lock->l_type == F_RDLCK)
1632 if (lock->l_end < request->l_start) {
1633 if (lock->l_end == request->l_start - 1 &&
1636 request->l_start = lock->l_start;
1644 if (lock->l_start > request->l_end) {
1645 if (request->l_end == lock->l_start - 1 &&
1648 request->l_end = lock->l_end;
1656 if (request->l_end < lock->l_end) {
1657 if (request->l_start > lock->l_start) {
1659 request->l_start = lock->l_start;
1660 request->l_end = lock->l_end;
1666 COPY(lock1, lock);
1667 COPY(lock2, lock);
1668 lock1->l_start = lock->l_start;
1671 lock2->l_end = lock->l_end;
1677 } else if (request->l_start < lock->l_start) {
1679 request->l_end = lock->l_end;
1684 COPY(lock1, lock);
1692 request->l_start = lock->l_start;
1693 request->l_end = lock->l_end;
1698 COPY(lock1, lock);
1705 } else if (request->l_end > lock->l_end) {
1706 if (request->l_start > lock->l_start) {
1708 request->l_start = lock->l_start;
1713 COPY(lock1, lock);
1719 } else if (request->l_start < lock->l_start) {
1727 if (request->l_start > lock->l_start) {
1729 request->l_start = lock->l_start;
1734 COPY(lock1, lock);
1740 } else if (request->l_start < lock->l_start) {
1748 flk_delete_active_lock(lock, 0);
1749 flk_wakeup(lock, 1);
1750 flk_free_lock(lock);
1762 * dependencies because no lock will add an edge to this.
1774 ASSERT(FIRST_ADJ(lock) == HEAD(lock));
1778 * 'lock'.
1781 ep = FIRST_IN(lock);
1782 while (ep != HEAD(lock)) {
1787 flk_delete_active_lock(lock, 0);
1791 flk_recompute_dependencies(lock, topology, nvertex, 1);
1808 flk_wakeup(lock, 0);
1810 ep = FIRST_IN(lock);
1811 while (ep != HEAD(lock)) {
1812 lock->l_sedge = NEXT_IN(ep);
1816 ep = lock->l_sedge;
1819 flk_free_lock(lock);
1827 * Insert a lock into the active queue.
1835 lock_descriptor_t *first_lock, *lock;
1839 SET_LOCK_TO_FIRST_ACTIVE_VP(gp, lock, vp);
1840 first_lock = lock;
1843 for (; (lock->l_vnode == vp &&
1844 lock->l_start < new_lock->l_start); lock = lock->l_next)
1847 lock = ACTIVE_HEAD(gp);
1850 lock->l_prev->l_next = new_lock;
1851 new_lock->l_next = lock;
1852 new_lock->l_prev = lock->l_prev;
1853 lock->l_prev = new_lock;
1866 * Delete the active lock : Performs two functions depending on the
1868 * only and other is to both remove and free the lock.
1872 flk_delete_active_lock(lock_descriptor_t *lock, int free_lock)
1874 vnode_t *vp = lock->l_vnode;
1875 graph_t *gp = lock->l_graph;
1879 ASSERT(NO_DEPENDENTS(lock));
1880 ASSERT(NOT_BLOCKED(lock));
1881 ASSERT(IS_ACTIVE(lock));
1885 if (vp->v_filocks == (struct filock *)lock) {
1887 ((lock->l_next->l_vnode == vp) ? lock->l_next :
1890 lock->l_next->l_prev = lock->l_prev;
1891 lock->l_prev->l_next = lock->l_next;
1892 lock->l_next = lock->l_prev = NULL;
1893 flk_set_state(lock, FLK_DEAD_STATE);
1894 lock->l_state &= ~ACTIVE_LOCK;
1897 flk_free_lock(lock);
1911 lock_descriptor_t *lock;
1916 for (lock = gp->sleeping_locks.l_next; (lock != &gp->sleeping_locks &&
1917 lock->l_vnode < vp); lock = lock->l_next)
1920 lock->l_prev->l_next = request;
1921 request->l_prev = lock->l_prev;
1922 lock->l_prev = request;
1923 request->l_next = lock;
1929 * Cancelling a sleeping lock implies removing a vertex from the
1942 lock_descriptor_t *vertex, *lock;
1987 SET_LOCK_TO_FIRST_SLEEP_VP(gp, lock, vp);
1989 if (lock) {
1991 if (IS_RECOMPUTE(lock)) {
1992 lock->l_index = nvertex;
1993 topology[nvertex++] = lock;
1995 lock->l_color = NO_COLOR;
1996 lock = lock->l_next;
1997 } while (lock->l_vnode == vp);
2000 SET_LOCK_TO_FIRST_ACTIVE_VP(gp, lock, vp);
2002 if (lock) {
2004 if (IS_RECOMPUTE(lock)) {
2005 lock->l_index = nvertex;
2006 topology[nvertex++] = lock;
2008 lock->l_color = NO_COLOR;
2009 lock = lock->l_next;
2010 } while (lock->l_vnode == vp);
2080 lock_descriptor_t *lock;
2084 for (lock = ACTIVE_HEAD(gp)->l_next; lock != ACTIVE_HEAD(gp);
2085 lock = lock->l_next)
2086 lock->l_color = 0;
2088 for (lock = SLEEPING_HEAD(gp)->l_next; lock != SLEEPING_HEAD(gp);
2089 lock = lock->l_next)
2090 lock->l_color = 0;
2097 * Wake up locks that are blocked on the given lock.
2101 flk_wakeup(lock_descriptor_t *lock, int adj_list_remove)
2104 graph_t *gp = lock->l_graph;
2108 if (NO_DEPENDENTS(lock))
2110 ep = FIRST_IN(lock);
2124 lock->l_sedge = NEXT_IN(ep);
2127 ep = lock->l_sedge;
2128 } while (ep != HEAD(lock));
2129 ASSERT(NO_DEPENDENTS(lock));
2139 * If lock l1 in the dependent set of request is dependent (blocked by)
2140 * on lock l2 in topology but does not have a path to it, we add an edge
2159 lock_descriptor_t *vertex, *lock;
2193 * dependencies for this lock in the
2205 lock = topology[i];
2206 if (COLORED(lock))
2208 if (BLOCKS(lock, vertex)) {
2209 (void) flk_add_edge(vertex, lock,
2211 COLOR(lock);
2213 count += flk_color_reachables(lock);
2233 lock = ep->from_vertex;
2234 STACK_PUSH(vertex_stack, lock, l_stack);
2235 lock->l_sedge = FIRST_IN(lock);
2253 lock_descriptor_t *ver, *lock;
2267 lock = ep->to_vertex;
2268 if (COLORED(lock))
2270 COLOR(lock);
2271 if (IS_RECOMPUTE(lock))
2273 STACK_PUSH(vertex_stack, lock, l_stack1);
2282 * the barrier count of barrier vertices that are reachable from lock.
2286 flk_update_barriers(lock_descriptor_t *lock)
2294 STACK_PUSH(vertex_stack, lock, l_stack1);
2323 * Finds all vertices that are reachable from 'lock' more than once and
2325 * The barrier count is one minus the total number of paths from lock
2330 flk_find_barriers(lock_descriptor_t *lock)
2339 STACK_PUSH(vertex_stack, lock, l_stack1);
2364 * Finds the first lock that is mainly responsible for blocking this
2365 * request. If there is no such lock, request->l_flock.l_type is set to
2367 * of the blocking lock.
2369 * Note: It is possible a request is blocked by a sleeping lock because
2379 lock_descriptor_t *lock, *blocker;
2383 SET_LOCK_TO_FIRST_ACTIVE_VP(gp, lock, vp);
2385 if (lock) {
2387 if (BLOCKS(lock, request)) {
2388 blocker = lock;
2391 lock = lock->l_next;
2392 } while (lock->l_vnode == vp);
2397 * No active lock is blocking this request, but if a read
2398 * lock is requested, it may also get blocked by a waiting
2402 SET_LOCK_TO_FIRST_SLEEP_VP(gp, lock, vp);
2403 if (lock) {
2405 if (BLOCKS(lock, request)) {
2406 blocker = lock;
2409 lock = lock->l_next;
2410 } while (lock->l_vnode == vp);
2494 lock_descriptor_t *lock;
2513 SET_LOCK_TO_FIRST_ACTIVE_VP(gp, lock, vp);
2515 if (lock) {
2516 while (lock->l_vnode == vp) {
2518 lock_nlmid = GETNLMID(lock->l_flock.l_sysid);
2521 * If NLM server request _and_ nlmid of lock matches
2522 * nlmid of argument, then we've found a remote lock.
2524 if (IS_LOCKMGR(lock) && nlmid == lock_nlmid) {
2528 lock = lock->l_next;
2532 SET_LOCK_TO_FIRST_SLEEP_VP(gp, lock, vp);
2534 if (lock) {
2535 while (lock->l_vnode == vp) {
2537 lock_nlmid = GETNLMID(lock->l_flock.l_sysid);
2540 * If NLM server request _and_ nlmid of lock matches
2541 * nlmid of argument, then we've found a remote lock.
2543 if (IS_LOCKMGR(lock) && nlmid == lock_nlmid) {
2547 lock = lock->l_next;
2568 lock_descriptor_t *lock;
2579 SET_LOCK_TO_FIRST_ACTIVE_VP(gp, lock, vp);
2581 if (lock) {
2582 while (lock->l_vnode == vp) {
2583 if (IS_REMOTE(lock)) {
2587 lock = lock->l_next;
2591 SET_LOCK_TO_FIRST_SLEEP_VP(gp, lock, vp);
2593 if (lock) {
2594 while (lock->l_vnode == vp) {
2595 if (IS_REMOTE(lock)) {
2599 lock = lock->l_next;
2616 lock_descriptor_t *lock;
2630 SET_LOCK_TO_FIRST_ACTIVE_VP(gp, lock, vp);
2632 if (lock) {
2633 while (lock->l_vnode == vp) {
2634 if (lock->l_flock.l_sysid == sysid) {
2638 lock = lock->l_next;
2642 SET_LOCK_TO_FIRST_SLEEP_VP(gp, lock, vp);
2644 if (lock) {
2645 while (lock->l_vnode == vp) {
2646 if (lock->l_flock.l_sysid == sysid) {
2650 lock = lock->l_next;
2673 lock_descriptor_t *lock;
2688 for (lock = ACTIVE_HEAD(gp)->l_next;
2689 lock != ACTIVE_HEAD(gp) && !has_locks;
2690 lock = lock->l_next) {
2691 if (lock->l_flock.l_sysid == sysid)
2697 for (lock = SLEEPING_HEAD(gp)->l_next;
2698 lock != SLEEPING_HEAD(gp) && !has_locks;
2699 lock = lock->l_next) {
2700 if (lock->l_flock.l_sysid == sysid)
2726 lock_descriptor_t *lock, *nlock;
2744 mutex_enter(&gp->gp_mutex); /* get mutex on lock graph */
2747 lock = SLEEPING_HEAD(gp)->l_next;
2748 while (lock != SLEEPING_HEAD(gp)) {
2749 nlock = lock->l_next;
2750 if (lock->l_flock.l_sysid == sysid) {
2751 INTERRUPT_WAKEUP(lock);
2753 lock = nlock;
2757 lock = ACTIVE_HEAD(gp)->l_next;
2758 while (lock != ACTIVE_HEAD(gp)) {
2759 nlock = lock->l_next;
2760 if (lock->l_flock.l_sysid == sysid) {
2761 flk_delete_active_lock(lock, 0);
2762 flk_wakeup(lock, 1);
2763 flk_free_lock(lock);
2765 lock = nlock;
2767 mutex_exit(&gp->gp_mutex); /* release mutex on lock graph */
2779 lock_descriptor_t *lock, *nlock;
2799 lock = SLEEPING_HEAD(gp)->l_next;
2800 while (lock != SLEEPING_HEAD(gp)) {
2801 nlock = lock->l_next;
2802 if (lock->l_flock.l_sysid == sysid) {
2803 INTERRUPT_WAKEUP(lock);
2805 lock = nlock;
2809 lock = ACTIVE_HEAD(gp)->l_next;
2810 while (lock != ACTIVE_HEAD(gp)) {
2811 nlock = lock->l_next;
2812 if (lock->l_flock.l_sysid == sysid) {
2813 flk_delete_active_lock(lock, 0);
2814 flk_wakeup(lock, 1);
2815 flk_free_lock(lock);
2817 lock = nlock;
2833 lock_descriptor_t *lock, *nlock;
2848 lock = SLEEPING_HEAD(gp)->l_next;
2849 while (lock != SLEEPING_HEAD(gp)) {
2850 nlock = lock->l_next;
2851 if (lock->l_vnode->v_vfsp == vfsp) {
2852 ASSERT(IS_PXFS(lock));
2853 if (GETPXFSID(lock->l_flock.l_sysid) ==
2855 flk_set_state(lock,
2857 flk_cancel_sleeping_lock(lock, 1);
2860 lock = nlock;
2864 lock = ACTIVE_HEAD(gp)->l_next;
2865 while (lock != ACTIVE_HEAD(gp)) {
2866 nlock = lock->l_next;
2867 if (lock->l_vnode->v_vfsp == vfsp) {
2868 ASSERT(IS_PXFS(lock));
2869 if (GETPXFSID(lock->l_flock.l_sysid) ==
2871 flk_delete_active_lock(lock, 0);
2872 flk_wakeup(lock, 1);
2873 flk_free_lock(lock);
2876 lock = nlock;
2883 * Search for a sleeping lock manager lock which matches exactly this lock
2886 * Return 1 if a matching lock was found, 0 otherwise.
2892 lock_descriptor_t *lock, *nlock;
2898 SET_LOCK_TO_FIRST_SLEEP_VP(gp, lock, vp);
2900 if (lock) {
2901 while (lock->l_vnode == vp) {
2902 nlock = lock->l_next;
2903 if (SAME_OWNER(lock, request) &&
2904 lock->l_start == request->l_start &&
2905 lock->l_end == request->l_end) {
2906 INTERRUPT_WAKEUP(lock);
2909 lock = nlock;
2925 lock_descriptor_t *lock, *nlock;
2939 SET_LOCK_TO_FIRST_SLEEP_VP(gp, lock, vp);
2941 if (lock) {
2943 nlock = lock->l_next;
2944 if ((lock->l_flock.l_pid == pid ||
2946 lock->l_flock.l_sysid == sysid) {
2947 CANCEL_WAKEUP(lock);
2949 lock = nlock;
2950 } while (lock->l_vnode == vp);
2953 SET_LOCK_TO_FIRST_ACTIVE_VP(gp, lock, vp);
2955 if (lock) {
2957 nlock = lock->l_next;
2958 if ((lock->l_flock.l_pid == pid ||
2960 lock->l_flock.l_sysid == sysid) {
2961 flk_delete_active_lock(lock, 0);
2962 STACK_PUSH(link_stack, lock, l_stack);
2964 lock = nlock;
2965 } while (lock->l_vnode == vp);
2968 while ((lock = STACK_TOP(link_stack)) != NULL) {
2970 flk_wakeup(lock, 1);
2971 flk_free_lock(lock);
3074 * Function checks for deadlock due to the new 'lock'. If deadlock found
3075 * edges of this lock are freed and returned.
3079 flk_check_deadlock(lock_descriptor_t *lock)
3092 if (lock->l_ofd != NULL)
3098 start_vertex = flk_get_proc_vertex(lock);
3103 ep = FIRST_ADJ(lock);
3104 while (ep != HEAD(lock)) {
3126 ep = FIRST_IN(lock);
3128 while (ep != HEAD(lock)) {
3185 /* we remove all lock edges and proc edges */
3187 ep = FIRST_ADJ(lock);
3188 while (ep != HEAD(lock)) {
3215 ep = FIRST_IN(lock);
3216 while (ep != HEAD(lock)) {
3249 * Get a proc vertex. If lock's pvertex value gets a correct proc vertex
3255 flk_get_proc_vertex(lock_descriptor_t *lock)
3262 if (lock->pvertex != -1) {
3263 ASSERT(lock->pvertex >= 0);
3264 pv = pgraph.proc[lock->pvertex];
3265 if (pv != NULL && PROC_SAME_OWNER(lock, pv)) {
3271 if (pv != NULL && PROC_SAME_OWNER(lock, pv)) {
3272 lock->pvertex = pv->index = i;
3277 pv->pid = lock->l_flock.l_pid;
3278 pv->sysid = lock->l_flock.l_sysid;
3284 lock->pvertex = pv->index = i;
3302 pv->index = lock->pvertex = pgraph.gcount;
3374 * recomputed lock graph. Otherwise we might miss a deadlock detection.
3376 * dependencies() otherwise if a process tries to lock a vnode hashed
3451 * Set the control status for lock manager requests.
3466 * before sleeping (so they're not holding the lock for the graph). If
3467 * such a thread reacquires the graph's lock (to go to sleep) after
3524 * with an LLM that doesn't already know about it (never sent a lock
3526 * lock request. Suppose that a shutdown request from the NLM server
3528 * service the request. Now suppose a new lock request is in
3533 * having done nothing, and the lock request will proceed and
3535 * by the lock request because there was no record of that NLM server
3538 * been discarded, but in fact there's still one lock held.
3540 * its state immediately to NLM_SHUTTING_DOWN. The lock request in
3542 * this lock and discard it.
3559 * have their lock requests cancelled and descriptors
3560 * removed from the sleeping lock list. Note that the NLM
3561 * server state associated with each lock descriptor is
3581 * Set the control status for lock manager requests.
3586 * before sleeping (so they're not holding the lock for the graph). If
3587 * such a thread reacquires the graph's lock (to go to sleep) after
3609 * If the lock manager is coming back up, all that's needed is to
3610 * propagate this information to the graphs. If the lock manager
3648 * describing the lock is returned. Each element in the list is
3655 * the current lock information, and that it is a snapshot of a moving
3663 lock_descriptor_t *lock;
3705 for (lock = graph_head->l_next;
3706 lock != graph_head;
3707 lock = lock->l_next) {
3708 if (use_sysid && lock->l_flock.l_sysid != sysid)
3710 if (pid != NOPID && lock->l_flock.l_pid != pid)
3712 if (vp != NULL && lock->l_vnode != vp)
3714 if (lock_state && !(lock_state & lock->l_state))
3716 if (zoneid != lock->l_zoneid && zoneid != ALL_ZONES)
3719 * A matching lock was found. Allocate
3725 VN_HOLD(lock->l_vnode);
3726 llp->ll_vp = lock->l_vnode;
3727 create_flock(lock, &(llp->ll_flock));
3832 * b. For each lock descriptor in the list do
3833 * i. If the requested lock is an NLM server request AND
3835 * change the lock descriptor's state field to
3838 * d. For each lock descriptor in the list do
3839 * i. If the requested lock is an NLM server request AND
3841 * change the lock descriptor's state field to
3846 graph_t *gp; /* lock graph */
3847 lock_descriptor_t *lock; /* lock */
3848 lock_descriptor_t *nlock = NULL; /* next lock */
3859 /* Get list of sleeping locks in current lock graph. */
3861 for (lock = SLEEPING_HEAD(gp)->l_next;
3862 lock != SLEEPING_HEAD(gp);
3863 lock = nlock) {
3864 nlock = lock->l_next;
3866 lock_nlmid = GETNLMID(lock->l_flock.l_sysid);
3869 * If NLM server request AND nlmid of lock matches
3871 * lock to "nlm_state."
3873 if (IS_LOCKMGR(lock) && nlmid == lock_nlmid) {
3874 SET_NLM_STATE(lock, nlm_state);
3878 /* Get list of active locks in current lock graph. */
3879 for (lock = ACTIVE_HEAD(gp)->l_next;
3880 lock != ACTIVE_HEAD(gp);
3881 lock = nlock) {
3882 nlock = lock->l_next;
3884 lock_nlmid = GETNLMID(lock->l_flock.l_sysid);
3887 * If NLM server request AND nlmid of lock matches
3889 * lock to "nlm_state."
3891 if (IS_LOCKMGR(lock) && nlmid == lock_nlmid) {
3892 ASSERT(IS_ACTIVE(lock));
3893 SET_NLM_STATE(lock, nlm_state);
3902 * Effects: Find all sleeping lock manager requests _only_ for the NLM server
3903 * identified by "nlmid." Poke those lock requests.
3908 lock_descriptor_t *lock;
3909 lock_descriptor_t *nlock = NULL; /* next lock */
3923 for (lock = SLEEPING_HEAD(gp)->l_next;
3924 lock != SLEEPING_HEAD(gp);
3925 lock = nlock) {
3926 nlock = lock->l_next;
3928 * If NLM server request _and_ nlmid of lock matches
3930 * lock to NLM_SHUTTING_DOWN, and wake up sleeping
3933 if (IS_LOCKMGR(lock)) {
3936 GETNLMID(lock->l_flock.l_sysid);
3938 SET_NLM_STATE(lock,
3940 INTERRUPT_WAKEUP(lock);
3950 * Effects: Find all active (granted) lock manager locks _only_ for the
3956 lock_descriptor_t *lock;
3957 lock_descriptor_t *nlock = NULL; /* next lock */
3971 for (lock = ACTIVE_HEAD(gp)->l_next;
3972 lock != ACTIVE_HEAD(gp);
3973 lock = nlock) {
3974 nlock = lock->l_next;
3975 ASSERT(IS_ACTIVE(lock));
3979 * the lock matches nlmid of argument, then
3980 * remove the active lock the list, wakup blocked
3981 * threads, and free the storage for the lock.
3983 * of this lock to NLM_DOWN because the lock will
3986 if (IS_LOCKMGR(lock)) {
3988 lock_nlmid = GETNLMID(lock->l_flock.l_sysid);
3990 flk_delete_active_lock(lock, 0);
3991 flk_wakeup(lock, 1);
3992 flk_free_lock(lock);
4001 * Find all sleeping lock manager requests and poke them.
4006 lock_descriptor_t *lock;
4007 lock_descriptor_t *nlock = NULL; /* next lock */
4022 for (lock = SLEEPING_HEAD(gp)->l_next;
4023 lock != SLEEPING_HEAD(gp);
4024 lock = nlock) {
4025 nlock = lock->l_next;
4026 if (IS_LOCKMGR(lock) && lock->l_zoneid == zoneid) {
4027 INTERRUPT_WAKEUP(lock);
4036 * Find all active (granted) lock manager locks and release them.
4041 lock_descriptor_t *lock;
4042 lock_descriptor_t *nlock = NULL; /* next lock */
4057 for (lock = ACTIVE_HEAD(gp)->l_next;
4058 lock != ACTIVE_HEAD(gp);
4059 lock = nlock) {
4060 nlock = lock->l_next;
4061 if (IS_LOCKMGR(lock) && lock->l_zoneid == zoneid) {
4062 ASSERT(IS_ACTIVE(lock));
4063 flk_delete_active_lock(lock, 0);
4064 flk_wakeup(lock, 1);
4065 flk_free_lock(lock);
4074 * Wait until a lock is granted, cancelled, or interrupted.
4094 * Create an flock structure from the existing lock information
4096 * This routine is used to create flock structures for the lock manager
4097 * to use in a reclaim request. Since the lock was originated on this
4118 * Convert flock_t data describing a lock range into unsigned long starting
4173 * Check the validity of lock data. This can used by the NFS
4199 * Fill in request->l_flock with information about the lock blocking the
4200 * request. The complexity here is that lock manager requests are allowed
4204 * What should be done when "blocker" is a lock manager lock that uses the
4283 * lock.
4294 lock_descriptor_t *lock;
4313 SET_LOCK_TO_FIRST_ACTIVE_VP(gp, lock, vp);
4315 for (; lock && lock->l_vnode == vp; lock = lock->l_next) {
4316 if ((svmand || (lock->l_state & NBMAND_LOCK)) &&
4317 (lock->l_flock.l_sysid != sysid ||
4318 lock->l_flock.l_pid != pid) &&
4320 lock->l_type, lock->l_start, lock->l_end)) {
4331 * Return non-zero if the given I/O request conflicts with the given lock.
4356 lock_descriptor_t *lock, *lock1;
4359 for (lock = ACTIVE_HEAD(gp)->l_next; lock != ACTIVE_HEAD(gp);
4360 lock = lock->l_next) {
4361 ASSERT(IS_ACTIVE(lock));
4362 ASSERT(NOT_BLOCKED(lock));
4363 ASSERT(!IS_BARRIER(lock));
4365 ep = FIRST_IN(lock);
4367 while (ep != HEAD(lock)) {
4373 for (lock1 = lock->l_next; lock1 != ACTIVE_HEAD(gp);
4375 if (lock1->l_vnode == lock->l_vnode) {
4376 if (BLOCKS(lock1, lock)) {
4378 "active lock %p blocks %p",
4379 (void *)lock1, (void *)lock);
4380 } else if (BLOCKS(lock, lock1)) {
4382 "active lock %p blocks %p",
4383 (void *)lock, (void *)lock1);
4540 lock_descriptor_t *lock;
4546 SET_LOCK_TO_FIRST_ACTIVE_VP(gp, lock, vp);
4548 if (lock) {
4549 while (lock != ACTIVE_HEAD(gp) && (lock->l_vnode == vp)) {
4550 if (lock->l_flock.l_pid == pid &&
4551 lock->l_flock.l_sysid == sysid)
4553 "owner pid %d's lock %p in active queue",
4554 pid, (void *)lock);
4555 lock = lock->l_next;
4558 SET_LOCK_TO_FIRST_SLEEP_VP(gp, lock, vp);
4560 if (lock) {
4561 while (lock != SLEEPING_HEAD(gp) && (lock->l_vnode == vp)) {
4562 if (lock->l_flock.l_pid == pid &&
4563 lock->l_flock.l_sysid == sysid)
4565 "owner pid %d's lock %p in sleep queue",
4566 pid, (void *)lock);
4567 lock = lock->l_next;