104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * CDDL HEADER START
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * The contents of this file are subject to the terms of the
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * Common Development and Distribution License (the "License").
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * You may not use this file except in compliance with the License.
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * See the License for the specific language governing permissions
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * and limitations under the License.
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * When distributing Covered Code, include this CDDL HEADER in each
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * If applicable, add the following below this CDDL HEADER, with the
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * fields enclosed by brackets "[]" replaced with your own identifying
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * information: Portions Copyright [yyyy] [name of copyright owner]
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * CDDL HEADER END
0a586cea3ceec7e5e50e7e54c745082a7a333ac2Mark Shellenbaum * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * Use is subject to license terms.
fb09f5aad449c97fe309678f3f604982b563a96fMadhav Suresh * Copyright (c) 2012 by Delphix. All rights reserved.
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * This file contains the code to implement file range locking in
f7170741490edba9d1d9c697c177c887172bc741Will Andrews * ZFS, although there isn't much specific to ZFS (all that comes to mind is
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * support for growing the blocksize).
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * Interface
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * ---------
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * Defined in zfs_rlock.h but essentially:
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * rl = zfs_range_lock(zp, off, len, lock_type);
c5c6ffa0498b9c8555798756141b4a3061a138c1maybee * zfs_range_unlock(rl);
c5c6ffa0498b9c8555798756141b4a3061a138c1maybee * zfs_range_reduce(rl, off, len);
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * An AVL tree is used to maintain the state of the existing ranges
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * that are locked for exclusive (writer) or shared (reader) use.
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * The starting range offset is used for searching and sorting the tree.
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * Common case
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * -----------
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * The (hopefully) usual case is of no overlaps or contention for
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * locks. On entry to zfs_lock_range() a rl_t is allocated; the tree
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * searched that finds no overlap, and *this* rl_t is placed in the tree.
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * ---------------------------------------
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * The avl code only allows one node at a particular offset. Also it's very
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * inefficient to search through all previous entries looking for overlaps
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * (because the very 1st in the ordered list might be at offset 0 but
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * cover the whole file).
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * So this implementation uses reference counts and proxy range locks.
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * Firstly, only reader locks use reference counts and proxy locks,
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * because writer locks are exclusive.
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * When a reader lock overlaps with another then a proxy lock is created
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * for that range and replaces the original lock. If the overlap
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * is exact then the reference count of the proxy is simply incremented.
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * Otherwise, the proxy lock is split into smaller lock ranges and
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * new proxy locks created for non overlapping ranges.
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * The reference counts are adjusted accordingly.
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * Meanwhile, the orginal lock is kept around (this is the callers handle)
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * and its offset and length are used when releasing the lock.
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * Thread coordination
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * -------------------
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * In order to make wakeups efficient and to ensure multiple continuous
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * readers on a range don't starve a writer for the same range lock,
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * two condition variables are allocated in each rl_t.
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * If a writer (or reader) can't get a range it initialises the writer
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * (or reader) cv; sets a flag saying there's a writer (or reader) waiting;
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * and waits on that cv. When a thread unlocks that range it wakes up all
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * writers then all readers before destroying the lock.
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * Append mode writes
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * ------------------
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * Append mode writes need to lock a range at the end of a file.
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * The offset of the end of the file is determined under the
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * range locking mutex, and the lock type converted from RL_APPEND to
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * RL_WRITER and the range locked.
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * Grow block handling
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * -------------------
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * ZFS supports multiple block sizes currently upto 128K. The smallest
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * block size is used for the file which is grown as needed. During this
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * growth all other writers and readers must be excluded.
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * So if the block size needs to be grown then the whole file is
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * exclusively locked, then later the caller will reduce the lock
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * range to just the range to be written using zfs_reduce_range.
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * Check if a write lock can be grabbed, or wait and recheck until available.
c2e6a7d6abc139a8d59fca4857d6276f3b70ddf9perrin * Range locking is also used by zvol and uses a
c2e6a7d6abc139a8d59fca4857d6276f3b70ddf9perrin * dummied up znode. However, for zvol, we don't need to
c2e6a7d6abc139a8d59fca4857d6276f3b70ddf9perrin * append or grow blocksize, and besides we don't have
0a586cea3ceec7e5e50e7e54c745082a7a333ac2Mark Shellenbaum * a "sa" data or z_zfsvfs - so skip that processing.
c2e6a7d6abc139a8d59fca4857d6276f3b70ddf9perrin * Yes, this is ugly, and would be solved by not handling
c2e6a7d6abc139a8d59fca4857d6276f3b70ddf9perrin * grow or append in range lock code. If that was done then
c2e6a7d6abc139a8d59fca4857d6276f3b70ddf9perrin * we could make the range locking code generically available
c2e6a7d6abc139a8d59fca4857d6276f3b70ddf9perrin * to other non-zfs consumers.
c2e6a7d6abc139a8d59fca4857d6276f3b70ddf9perrin * If in append mode pick up the current end of file.
c2e6a7d6abc139a8d59fca4857d6276f3b70ddf9perrin * This is done under z_range_lock to avoid races.
c2e6a7d6abc139a8d59fca4857d6276f3b70ddf9perrin * If we need to grow the block size then grab the whole
c2e6a7d6abc139a8d59fca4857d6276f3b70ddf9perrin * file range. This is also done under z_range_lock to
c2e6a7d6abc139a8d59fca4857d6276f3b70ddf9perrin * avoid races.
0a586cea3ceec7e5e50e7e54c745082a7a333ac2Mark Shellenbaum end_size = MAX(zp->z_size, new->r_off + len);
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * First check for the usual case of no locks
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * Look for any locks in the range.
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin new->r_type = RL_WRITER; /* convert possible RL_APPEND */
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin /* reset to original */
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * If this is an original (non-proxy) lock then replace it by
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * a proxy and return the proxy.
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin /* create a proxy range lock */
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * Split the range lock at the supplied offset
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * returning the *front* proxy.
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrinzfs_range_split(avl_tree_t *tree, rl_t *rl, uint64_t off)
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin /* create the rear proxy range lock */
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * Create and add a new proxy range lock for the supplied range.
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrinzfs_range_new_proxy(avl_tree_t *tree, uint64_t off, uint64_t len)
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrinzfs_range_add_reader(avl_tree_t *tree, rl_t *new, rl_t *prev, avl_index_t where)
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * prev arrives either:
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * - pointing to an entry at the same offset
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * - pointing to the entry with the closest previous offset whose
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * range may overlap with the new range
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * - null, if there were no ranges starting before the new one
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * convert to proxy if needed then
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * split this entry and bump ref count
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin prev = AVL_NEXT(tree, prev); /* move to rear range */
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin /* no overlaps, use the original new rl_t in the tree */
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin /* Add a proxy for initial range before the overlap */
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * We now search forward through the ranges, until we go past the end
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * of the new range. For each entry we make it a proxy if it
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * isn't already, then bump its reference count. If there's any
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * gaps between the ranges then we create a new proxy range.
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin for (prev = NULL; next; prev = next, next = AVL_NEXT(tree, next)) {
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin if (prev && prev->r_off + prev->r_len < next->r_off) {
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin /* there's a gap */
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin /* exact overlap with end */
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin /* new range ends in the middle of this block */
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin /* Add the remaining end range. */
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * Check if a reader lock can be grabbed, or wait and recheck until available.
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * Look for any writer locks in the range.
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * Check the previous range for a writer lock overlap.
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin if ((prev->r_type == RL_WRITER) || (prev->r_write_wanted)) {
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * Search through the following ranges to see if there's
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * write lock any overlap.
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin if ((next->r_type == RL_WRITER) || (next->r_write_wanted)) {
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * Add the read lock, which may involve splitting existing
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * locks and bumping ref counts (r_cnt).
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * Lock a range (offset, length) as either shared (RL_READER)
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * or exclusive (RL_WRITER). Returns the range lock structure
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * for later unlocking or reduce range (if entire file
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * previously locked as RL_WRITER).
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrinzfs_range_lock(znode_t *zp, uint64_t off, uint64_t len, rl_type_t type)
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin ASSERT(type == RL_READER || type == RL_WRITER || type == RL_APPEND);
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin new->r_cnt = 1; /* assume it's going to be in the tree */
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * First check for the usual case of no locks
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin zfs_range_lock_writer(zp, new); /* RL_WRITER or RL_APPEND */
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * Unlock a reader lock
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * The common case is when the remove entry is in the tree
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * (cnt == 1) meaning there's been no other reader locks overlapping
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * with this one. Otherwise the remove entry will have been
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * removed from the tree and replaced by proxies (one or
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * more ranges mapping to the entire range).
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * Find start proxy representing this reader lock,
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * then decrement ref count on all proxies
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * that make up this range, freeing them as needed.
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * Unlock range and destroy range lock structure.
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin ASSERT(rl->r_type == RL_WRITER || rl->r_type == RL_READER);
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin /* writer locks can't be shared or split */
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * lock may be shared, let zfs_range_unlock_reader()
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * release the lock and free the rl_t
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * Reduce range locked as RL_WRITER from whole file to specified range.
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * Asserts the whole file is exclusivly locked and so there's only one
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * entry in the tree.
c5c6ffa0498b9c8555798756141b4a3061a138c1maybeezfs_range_reduce(rl_t *rl, uint64_t off, uint64_t len)
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin /* Ensure there are no other locks */
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * AVL comparison function used to order range locks
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin * Locks are ordered on the start offset of the range.
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin return (1);
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin return (-1);
104e2ed78d9ef0a0f89f320108b8ca29ca3850d5perrin return (0);