/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2011, 2015 by Delphix. All rights reserved.
* Copyright 2015 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
* Copyright 2013 Saso Kiselkov. All rights reserved.
* Copyright (c) 2014 Integros [integros.com]
*/
#include <sys/zfs_context.h>
#include <sys/spa_impl.h>
#include <sys/spa_boot.h>
#include <sys/zio_checksum.h>
#include <sys/zio_compress.h>
#include <sys/vdev_impl.h>
#include <sys/metaslab.h>
#include <sys/uberblock_impl.h>
#include <sys/dsl_pool.h>
#include <sys/dsl_prop.h>
#include <sys/dsl_scan.h>
#include <sys/metaslab_impl.h>
#include "zfs_prop.h"
#include <sys/zfeature.h>
/*
* SPA locking
*
* There are four basic locks for managing spa_t structures:
*
* spa_namespace_lock (global mutex)
*
* This lock must be acquired to do any of the following:
*
* - Lookup a spa_t by name
* - Add or remove a spa_t from the namespace
* - Increase spa_refcount from non-zero
* - Check if spa_refcount is zero
* - Rename a spa_t
*
* It does not need to handle recursion. A create or destroy may
* reference objects (files or zvols) in other pools, but by
* definition they must have an existing reference, and will never need
* to lookup a spa_t by name.
*
* spa_refcount (per-spa refcount_t protected by mutex)
*
* This reference count keep track of any active users of the spa_t. The
* spa_t cannot be destroyed or freed while this is non-zero. Internally,
* the refcount is never really 'zero' - opening a pool implicitly keeps
* some references in the DMU. Internally we check against spa_minref, but
*
* spa_config_lock[] (per-spa array of rwlocks)
*
* This protects the spa_t from config changes, and must be held in
* the following circumstances:
*
* - RW_READER to perform I/O to the spa
* - RW_WRITER to change the vdev config
*
* The locking order is fairly straightforward:
*
* spa_namespace_lock -> spa_refcount
*
* The namespace lock must be acquired to increase the refcount from 0
* or to check if it is zero.
*
* spa_refcount -> spa_config_lock[]
*
* There must be at least one valid reference on the spa_t to acquire
* the config lock.
*
* spa_namespace_lock -> spa_config_lock[]
*
* The namespace lock must always be taken before the config lock.
*
*
* The spa_namespace_lock can be acquired directly and is globally visible.
*
* The namespace is manipulated using the following functions, all of which
* require the spa_namespace_lock to be held.
*
* spa_lookup() Lookup a spa_t by name.
*
* spa_add() Create a new spa_t in the namespace.
*
* spa_remove() Remove a spa_t from the namespace. This also
* frees up any memory associated with the spa_t.
*
* spa_next() Returns the next spa_t in the system, or the
* first if NULL is passed.
*
* spa_evict_all() Shutdown and remove all spa_t structures in
* the system.
*
*
* The spa_refcount is manipulated using the following functions:
*
* spa_open_ref() Adds a reference to the given spa_t. Must be
* called with spa_namespace_lock held if the
* refcount is currently zero.
*
* spa_close() Remove a reference from the spa_t. This will
* not free the spa_t or remove it from the
* namespace. No locking is required.
*
* spa_refcount_zero() Returns true if the refcount is currently
* zero. Must be called with spa_namespace_lock
* held.
*
* The spa_config_lock[] is an array of rwlocks, ordered as follows:
* SCL_CONFIG > SCL_STATE > SCL_ALLOC > SCL_ZIO > SCL_FREE > SCL_VDEV.
* spa_config_lock[] is manipulated with spa_config_{enter,exit,held}().
*
* To read the configuration, it suffices to hold one of these locks as reader.
* To modify the configuration, you must hold all locks as writer. To modify
* you must hold SCL_STATE and SCL_ZIO as writer.
*
* We use these distinct config locks to avoid recursive lock entry.
* For example, spa_sync() (which holds SCL_CONFIG as reader) induces
* block allocations (SCL_ALLOC), which may require reading space maps
* from disk (dmu_read() -> zio_read() -> SCL_ZIO).
*
* The spa config locks cannot be normal rwlocks because we need the
* ability to hand off ownership. For example, SCL_ZIO is acquired
* by the issuing thread and later released by an interrupt thread.
* They do, however, obey the usual write-wanted semantics to prevent
* writer (i.e. system administrator) starvation.
*
* The lock acquisition rules are as follows:
*
* SCL_CONFIG
* Protects changes to the vdev tree topology, such as vdev
* (spa_config_dirty_list) and the set of spares and l2arc devices.
*
* SCL_STATE
* Protects changes to pool state and vdev state, such as vdev
* (spa_state_dirty_list) and global pool state (spa_state).
*
* SCL_ALLOC
* Protects changes to metaslab groups and classes.
* Held as reader by metaslab_alloc() and metaslab_claim().
*
* SCL_ZIO
* Held by bp-level zios (those which have no io_vd upon entry)
* to prevent changes to the vdev tree. The bp-level zio implicitly
* protects all of its vdev child zios, which do not hold SCL_ZIO.
*
* SCL_FREE
* Protects changes to metaslab groups and classes.
* Held as reader by metaslab_free(). SCL_FREE is distinct from
* SCL_ALLOC, and lower than SCL_ZIO, so that we can safely free
* blocks in zio_done() while another i/o that holds either
* SCL_ALLOC or SCL_ZIO is waiting for this i/o to complete.
*
* SCL_VDEV
* Held as reader to prevent changes to the vdev tree during trivial
* inquiries such as bp_get_dsize(). SCL_VDEV is distinct from the
* other locks, and lower than all of them, to ensure that it's safe
* to acquire regardless of caller context.
*
* In addition, the following rules apply:
*
* (a) spa_props_lock protects pool properties, spa_config and spa_config_list.
* The lock ordering is SCL_CONFIG > spa_props_lock.
*
* (b) I/O operations on leaf vdevs. For any zio operation that takes
* an explicit vdev_t argument -- such as zio_ioctl(), zio_read_phys(),
* or zio_write_phys() -- the caller must ensure that the config cannot
* cannot change in the interim, and that the vdev cannot be reopened.
* SCL_STATE as reader suffices for both.
*
* The vdev configuration is protected by spa_vdev_enter() / spa_vdev_exit().
*
* spa_vdev_enter() Acquire the namespace lock and the config lock
* for writing.
*
* spa_vdev_exit() Release the config lock, wait for all I/O
* to complete, sync the updated configs to the
* cache, and release the namespace lock.
*
* vdev state is protected by spa_vdev_state_enter() / spa_vdev_state_exit().
* Like spa_vdev_enter/exit, these are convenience wrappers -- the actual
* locking is, always, based on spa_namespace_lock and spa_config_lock[].
*
* spa_rename() is also implemented within this file since it requires
* manipulation of the namespace.
*/
static int spa_active_count;
int spa_mode_global;
#ifdef ZFS_DEBUG
/* Everything except dprintf and spa is on by default in debug builds */
#else
int zfs_flags = 0;
#endif
/*
* zfs_recover can be set to nonzero to attempt to recover from
* otherwise-fatal errors, typically caused by on-disk corruption. When
* set, calls to zfs_panic_recover() will turn into warning messages.
* This should only be used as a last resort, as it typically results
* in leaked space, or worse.
*/
/*
* If destroy encounters an EIO while reading metadata (e.g. indirect
* blocks), space referenced by the missing metadata can not be freed.
* Normally this causes the background destroy to become "stalled", as
* it is unable to make forward progress. While in this stalled state,
* all remaining space to free from the error-encountering filesystem is
* "temporarily leaked". Set this flag to cause it to ignore the EIO,
* permanently leak the space from indirect blocks that can not be read,
* and continue to free everything else that it can.
*
* The default, "stalling" behavior is useful if the storage partially
* this case, we will be able to continue pool operations while it is
* partially failed, and when it recovers, we can continue to free the
* space, with no leaks. However, note that this case is actually
* fairly rare.
*
* Typically pools either (a) fail completely (but perhaps temporarily,
* e.g. a top-level vdev going offline), or (b) have localized,
* permanent errors (e.g. disk returns the wrong data due to bit flip or
* firmware bug). In case (a), this setting does not matter because the
* pool will be suspended and the sync thread will not be able to make
* forward progress regardless. In case (b), because the error is
* permanent, the best we can do is leak the minimum amount of space,
* which is what setting this flag will do. Therefore, it is reasonable
* for this flag to normally be set, but we chose the more conservative
* approach of not setting it, so that there is no possibility of
* leaking space in the "partial temporary" failure case.
*/
/*
* Expiration time in milliseconds. This value has two meanings. First it is
* used to determine when the spa_deadman() logic should fire. By default the
* spa_deadman() will fire if spa_sync() has not completed in 1000 seconds.
* Secondly, the value determines if an I/O is considered "hung". Any I/O that
* has not completed in zfs_deadman_synctime_ms is considered "hung" resulting
* in a system panic.
*/
/*
* Check time in milliseconds. This defines the frequency at which we check
* for hung I/O.
*/
/*
* deadman is enabled except on VMware and sparc deployments.
*/
/*
* The worst case is single-sector max-parity RAID-Z blocks, in which
* case the space requirement is exactly (VDEV_RAIDZ_MAXPARITY + 1)
* times the size; so just assume that. Add to this the fact that
* we can have up to 3 DVAs per bp, and one more factor of 2 because
* the block may be dittoed with up to 3 DVAs by ddt_sync(). All together,
* the worst case is:
* (VDEV_RAIDZ_MAXPARITY + 1) * SPA_DVAS_PER_BP * 2 == 24
*/
/*
* Normally, we don't allow the last 3.2% (1/(2^spa_slop_shift)) of space in
* the pool to be consumed. This ensures that we don't run the pool
* completely out of space, due to unaccounted changes (e.g. to the MOS).
* It also limits the worst-case time to allocate space. If we have
* less than this amount of free space, most ZPL operations (e.g. write,
* create) will return ENOSPC.
*
* Certain operations (e.g. file removal, most administrative actions) can
* use half the slop space. They will only return ENOSPC if less than half
* the slop space is free. Typically, once the pool has less than the slop
* space free, the user will use these operations to free up space in the pool.
* These are the operations that call dsl_pool_adjustedsize() with the netfree
* argument set to TRUE.
*
* A very restricted set of operations are always permitted, regardless of
* the amount of free space. These are the operations that call
* dsl_sync_task(ZFS_SPACE_CHECK_NONE), e.g. "zfs destroy". If these
* operations result in a net increase in the amount of space used,
* it is possible to run the pool completely out of space, causing it to
* be permanently read-only.
*
* Note that on very small pools, the slop space will be larger than
* 3.2%, in an effort to have it be at least spa_min_slop (128MB),
* but we never allow it to be more than half the pool size.
*
* See also the comments in zfs_space_check_t.
*/
/*
* ==========================================================================
* SPA config locking
* ==========================================================================
*/
static void
{
for (int i = 0; i < SCL_LOCKS; i++) {
scl->scl_write_wanted = 0;
}
}
static void
{
for (int i = 0; i < SCL_LOCKS; i++) {
}
}
int
{
for (int i = 0; i < SCL_LOCKS; i++) {
if (!(locks & (1 << i)))
continue;
tag);
return (0);
}
} else {
tag);
return (0);
}
}
}
return (1);
}
void
{
int wlocks_held = 0;
for (int i = 0; i < SCL_LOCKS; i++) {
wlocks_held |= (1 << i);
if (!(locks & (1 << i)))
continue;
}
} else {
scl->scl_write_wanted++;
scl->scl_write_wanted--;
}
}
}
}
void
{
for (int i = SCL_LOCKS - 1; i >= 0; i--) {
if (!(locks & (1 << i)))
continue;
}
}
}
int
{
int locks_held = 0;
for (int i = 0; i < SCL_LOCKS; i++) {
if (!(locks & (1 << i)))
continue;
locks_held |= 1 << i;
}
return (locks_held);
}
/*
* ==========================================================================
* SPA namespace functions
* ==========================================================================
*/
/*
* Lookup the named spa_t in the AVL tree. The spa_namespace_lock must be held.
* Returns NULL if no matching spa_t is found.
*/
spa_t *
{
char *cp;
/*
* If it's a full dataset name, figure out the pool name and
* just use that.
*/
*cp = '\0';
return (spa);
}
/*
* Fires when spa_sync has not completed within zfs_deadman_synctime_ms.
* If the zfs_deadman_enabled flag is set then it inspects all vdev queues
*/
void
{
/*
* Disable the deadman timer if the pool is suspended.
*/
if (spa_suspended(spa)) {
return;
}
zfs_dbgmsg("slow spa_sync: started %llu seconds ago, calls %llu",
++spa->spa_deadman_calls);
if (zfs_deadman_enabled)
}
/*
* Create an uninitialized spa_t with the given name. Requires
* spa_namespace_lock. The caller must ensure that the spa_t doesn't already
* exist by calling spa_lookup() first.
*/
spa_t *
{
for (int t = 0; t < TXG_SIZE; t++)
/*
* an expensive operation we don't want to check too frequently.
* Instead wait for 5 seconds before checking again.
*/
/*
* Set the alternate root, if there is one.
*/
if (altroot) {
}
/*
* Every pool starts with the default cachefile
*/
KM_SLEEP) == 0);
&features) == 0) {
0) == 0);
}
}
KM_SLEEP) == 0);
}
if (spa->spa_iokstat) {
}
spa->spa_max_ashift = 0;
/*
* As a pool is being created, treat all features as disabled by
* setting SPA_FEATURE_DISABLED for all entries in the feature
* refcount cache.
*/
for (int i = 0; i < SPA_FEATURES; i++) {
}
return (spa);
}
/*
* Removes a spa_t from the namespace, freeing up any memory used. Requires
* spa_namespace_lock. This is called only after the spa_t has been closed and
* deactivated.
*/
void
{
}
}
for (int t = 0; t < TXG_SIZE; t++)
}
/*
* Given a pool, return the next pool in the namespace, or NULL if there is
* none. If 'prev' is NULL, return the first pool.
*/
spa_t *
{
if (prev)
else
return (avl_first(&spa_namespace_avl));
}
/*
* ==========================================================================
* SPA refcount functions
* ==========================================================================
*/
/*
* Add a reference to the given spa_t. Must have at least one reference, or
* have the namespace lock held.
*/
void
{
}
/*
* Remove a reference to the given spa_t. Must have at least one reference, or
* have the namespace lock held.
*/
void
{
}
/*
* Remove a reference to the given spa_t held by a dsl dir that is
* being asynchronously released. Async releases occur from a taskq
* performing eviction of dsl datasets and dirs. The namespace lock
* isn't held and the hold by the object being evicted may contribute to
* spa_minref (e.g. dataset or directory released during pool export),
* so the asserts in spa_close() do not apply.
*/
void
{
}
/*
* Check to see if the spa refcount is zero. Must be called with
* spa_namespace_lock held. We really compare against spa_minref, which is the
* number of references acquired when opening a pool
*/
{
}
/*
* ==========================================================================
* SPA spare and l2cache tracking
* ==========================================================================
*/
/*
* Hot spares and cache devices are tracked using the same code below,
* for 'auxiliary' devices.
*/
typedef struct spa_aux {
int aux_count;
} spa_aux_t;
static int
spa_aux_compare(const void *a, const void *b)
{
return (-1);
return (1);
else
return (0);
}
void
{
} else {
}
}
void
{
}
}
{
if (pool) {
if (found)
else
}
if (refcnt) {
if (found)
else
*refcnt = 0;
}
}
void
{
}
/*
* Spares are tracked globally due to the following constraints:
*
* - A spare may be part of multiple pools.
* - A spare may be added to a pool even if it's actively in use within
* another pool.
* - A spare in use in any pool can only be the source of a replacement if
* the target is a spare in the same pool.
*
* We keep track of all spares on the system through the use of a reference
* counted AVL tree. When a vdev is added as a spare, or used as a replacement
* spare, then we bump the reference count in the AVL tree. In addition, we set
* the 'vdev_isspare' member to indicate that the device is a spare (active or
* inactive). When a spare is made active (used to replace a device in the
* pool), we also keep track of which pool its been made a part of.
*
* The 'spa_spare_lock' protects the AVL tree. These functions are normally
* called under the spa_namespace lock as part of vdev reconfiguration. The
* separate spare lock exists for the status query path, which does not need to
* be completely consistent with respect to other vdev configuration changes.
*/
static int
spa_spare_compare(const void *a, const void *b)
{
return (spa_aux_compare(a, b));
}
void
{
}
void
{
}
{
return (found);
}
void
{
}
/*
* Level 2 ARC devices are tracked globally for the same reasons as spares.
* Cache devices currently only support one pool per cache device, and so
* for these devices the aux reference count is currently unused beyond 1.
*/
static int
spa_l2cache_compare(const void *a, const void *b)
{
return (spa_aux_compare(a, b));
}
void
{
}
void
{
}
{
return (found);
}
void
{
}
/*
* ==========================================================================
* SPA vdev locking
* ==========================================================================
*/
/*
* Lock the given spa_t for the purpose of adding or removing a vdev.
* Grabs the global spa_namespace_lock plus the spa config lock for writing.
* It returns the next transaction group for the spa_t.
*/
{
return (spa_vdev_config_enter(spa));
}
/*
* Internal implementation for spa_vdev_enter(). Used when a vdev
* operation requires multiple syncs (i.e. removing a device) while
* keeping the spa_namespace_lock held.
*/
{
}
/*
* Used in combination with spa_vdev_config_enter() to allow the syncing
* of multiple transactions without releasing the spa_namespace_lock.
*/
void
{
/*
* Reassess the DTLs.
*/
}
/*
* Verify the metaslab classes.
*/
/*
* Panic the system if the specified tag requires it. This
* is useful for ensuring that configurations are updated
* transactionally.
*/
/*
* Note: this txg_wait_synced() is important because it ensures
* that there won't be more than one config change per txg.
* This allows us to use the txg as the generation number.
*/
if (error == 0)
}
/*
* If the config changed, update the config cache.
*/
if (config_changed)
}
/*
* Unlock the spa_t after adding or removing a vdev. Besides undoing the
* locking of spa_vdev_enter(), we also want make sure the transactions have
* synced to disk, and then update the global configuration cache with the new
* information.
*/
int
{
return (error);
}
/*
* Lock the given spa_t for the purpose of changing vdev state.
*/
void
{
/*
* Root pools may need to read of the underlying devfs filesystem
* when opening up a vdev. Unfortunately if we're holding the
* SCL_ZIO lock it will result in a deadlock when we try to issue
* the read from the root filesystem. Instead we "prefetch"
* the associated vnodes that we need prior to opening the
* underlying devices and cache them so that we can prevent
* any I/O when we are doing the actual open.
*/
if (spa_is_root(spa)) {
} else {
}
}
int
{
0, 0, B_FALSE);
}
if (spa_is_root(spa))
/*
* If anything changed, wait for it to sync. This ensures that,
* from the system administrator's perspective, zpool(1M) commands
* are synchronous. This is important for things like zpool offline:
* when the command completes, you expect no further I/O from ZFS.
*/
/*
* If the config changed, update the config cache.
*/
if (config_changed) {
}
return (error);
}
/*
* ==========================================================================
* Miscellaneous functions
* ==========================================================================
*/
void
{
/*
* When we are creating the pool (tx_txg==TXG_INITIAL), we can't
* dirty the vdev config because lock SCL_CONFIG is not held.
* Thankfully, in this case we don't need to dirty the config
* because it will be written out anyway when we finish
* creating the pool.
*/
}
}
void
{
}
/*
* Rename a spa_t.
*/
int
{
int err;
/*
* Lookup the spa_t and grab the config lock for writing. We need to
* actually open the pool so that we can sync out the necessary labels.
* It's OK to call spa_open() with the namespace lock held because we
* allow recursive calls for other reasons.
*/
return (err);
}
/*
* Sync all labels to disk with the new names by marking the root vdev
* dirty and waiting for it to sync. It will pick up the new pool name
* during the sync.
*/
/*
* Sync the updated config cache.
*/
return (0);
}
/*
* Return the spa_t associated with given pool_guid, if it exists. If
* device_guid is non-zero, determine whether the pool exists *and* contains
* a device with the specified device_guid.
*/
spa_t *
{
avl_tree_t *t = &spa_namespace_avl;
continue;
continue;
if (device_guid == 0)
break;
device_guid) != NULL)
break;
/*
* Check any devices we may be in the process of adding.
*/
if (spa->spa_pending_vdev) {
device_guid) != NULL)
break;
}
}
}
return (spa);
}
/*
* Determine whether a pool with the given pool_guid exists.
*/
{
}
char *
spa_strdup(const char *s)
{
char *new;
return (new);
}
void
spa_strfree(char *s)
{
}
{
uint64_t r;
(void) random_get_pseudo_bytes((void *)&r, sizeof (uint64_t));
return (r % range);
}
{
} else {
}
return (guid);
}
void
{
"metadata" : "data",
} else {
sizeof (type));
}
if (!BP_IS_EMBEDDED(bp)) {
checksum =
}
}
compress);
}
void
{
}
if (freeze_txg != 0)
}
void
{
}
/*
* This is a stripped-down version of strtoull, suitable only for converting
* lowercase hexadecimal numbers that don't overflow.
*/
{
char c;
int digit;
while ((c = *str) != '\0') {
if (c >= '0' && c <= '9')
digit = c - '0';
else if (c >= 'a' && c <= 'f')
else
break;
val *= 16;
str++;
}
if (nptr)
return (val);
}
/*
* ==========================================================================
* Accessor functions
* ==========================================================================
*/
{
return (spa->spa_async_suspended);
}
{
return (spa->spa_dsl_pool);
}
{
return (spa->spa_is_initializing);
}
blkptr_t *
{
}
void
{
}
void
{
buf[0] = '\0';
else
}
int
{
return (spa->spa_sync_pass);
}
char *
{
}
{
/*
* If we fail to parse the config during spa_load(), we can go through
* the error path (which posts an ereport) and end up here with no root
* vdev. We stash the original pool guid in 'spa_config_guid' to handle
* this case.
*/
return (spa->spa_config_guid);
/*
* Return the most recently synced out guid unless we're
* in syncing context.
*/
else
return (guid);
}
{
/*
* This is a GUID that exists solely as a reference for the
* purposes of the arc. It is generated at load time, and
* is never written to persistent storage.
*/
return (spa->spa_load_guid);
}
{
}
{
return (spa->spa_first_txg);
}
{
return (spa->spa_syncing_txg);
}
{
}
{
return (spa->spa_load_state);
}
{
return (spa->spa_freeze_txg);
}
/* ARGSUSED */
{
return (lsize * spa_asize_inflation);
}
/*
* Return the amount of slop space in bytes. It is 1/32 of the pool (3.2%),
* or at least 128MB, unless that would cause it to be more than half the
* pool size.
*
* See the comment above spa_slop_shift for details.
*/
{
}
{
return (spa->spa_dspace);
}
void
{
}
/*
* Return the failure mode that has been set to this pool. The default
*/
{
return (spa->spa_failmode);
}
{
return (spa->spa_suspended);
}
{
}
{
return (spa->spa_deflate);
}
{
return (spa->spa_normal_class);
}
{
return (spa->spa_log_class);
}
void
{
}
void
{
}
void
{
}
int
{
/*
* As of SPA_VERSION == SPA_VERSION_DITTO_BLOCKS, we are able to
* handle BPs with more than one DVA allocated. Set our max
* replication level accordingly.
*/
return (1);
}
int
{
return (spa->spa_prev_software_version);
}
{
return (spa->spa_deadman_synctime);
}
{
}
return (dsize);
}
{
for (int d = 0; d < BP_GET_NDVAS(bp); d++)
return (dsize);
}
{
for (int d = 0; d < BP_GET_NDVAS(bp); d++)
return (dsize);
}
/*
* ==========================================================================
* Initialization and Termination
* ==========================================================================
*/
static int
{
int s;
if (s > 0)
return (1);
if (s < 0)
return (-1);
return (0);
}
int
spa_busy(void)
{
return (spa_active_count);
}
void
{
}
void
{
#ifdef _KERNEL
#else
if (arc_procfd == -1) {
perror("could not enable watchpoints: "
} else {
}
}
#endif
unique_init();
zio_init();
dmu_init();
zil_init();
l2arc_start();
}
void
spa_fini(void)
{
l2arc_stop();
zil_fini();
dmu_fini();
zio_fini();
unique_fini();
}
/*
* Return whether this pool has slogs. No locking needed.
* It's not a problem if the wrong answer is returned as it's only for
* performance and not correctness
*/
{
}
{
return (spa->spa_log_state);
}
void
{
}
{
return (spa->spa_is_root);
}
{
}
/*
* Returns true if there is a pending sync task in any of the current
* syncing txg, the current quiescing txg, or the current open txg.
*/
{
}
int
{
}
{
return (spa->spa_bootfs);
}
{
return (spa->spa_delegation);
}
objset_t *
{
return (spa->spa_meta_objset);
}
enum zio_checksum
{
return (spa->spa_dedup_checksum);
}
/*
* Reset pool scan stat per scan pass (or reboot).
*/
void
{
/* data not stored on disk */
spa->spa_scan_pass_exam = 0;
}
/*
* Get scan stats for zpool status reports
*/
int
{
/* data stored on disk */
/* data not stored on disk */
return (0);
}
{
}
int
{
return (SPA_MAXBLOCKSIZE);
else
return (SPA_OLD_MAXBLOCKSIZE);
}