vdev.c revision c39a2aae1e2c439d156021edfc20910dad7f9891
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2011, 2015 by Delphix. All rights reserved.
* Copyright 2015 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2014 Integros [integros.com]
*/
#include <sys/zfs_context.h>
#include <sys/spa_impl.h>
#include <sys/vdev_impl.h>
#include <sys/uberblock_impl.h>
#include <sys/metaslab.h>
#include <sys/metaslab_impl.h>
#include <sys/space_map.h>
#include <sys/space_reftree.h>
#include <sys/dsl_scan.h>
/*
* Virtual device management.
*/
static vdev_ops_t *vdev_ops_table[] = {
};
int zfs_scrub_limit = 10;
/*
* When a vdev is added, it will be divided into approximately (but no
* more than) this number of metaslabs.
*/
int metaslabs_per_vdev = 200;
/*
* Given a vdev type, return the appropriate ops vector.
*/
static vdev_ops_t *
vdev_getops(const char *type)
{
break;
return (ops);
}
/*
* Default asize function: return the MAX of psize with the asize of
* all children. This is what's used by anything other than RAID-Z.
*/
{
for (int c = 0; c < vd->vdev_children; c++) {
}
return (asize);
}
/*
* Get the minimum allocatable size. We define the allocatable size as
* the vdev's asize rounded to the nearest metaslab. This allows us to
* replace or attach devices which don't have the same physical size but
* can still satisfy the same number of allocations.
*/
{
/*
* If our parent is NULL (inactive spare or cache) or is the root,
* just return our own asize.
*/
return (vd->vdev_asize);
/*
* The top-level vdev just returns the allocatable size rounded
* to the nearest metaslab.
*/
/*
* The allocatable space for a raidz vdev is N * sizeof(smallest child),
* so each child must provide at least 1/Nth of its asize.
*/
return (pvd->vdev_min_asize);
}
void
{
for (int c = 0; c < vd->vdev_children; c++)
}
vdev_t *
{
}
return (NULL);
}
vdev_t *
{
return (vd);
for (int c = 0; c < vd->vdev_children; c++)
NULL)
return (mvd);
return (NULL);
}
static int
{
int n = 0;
return (1);
for (int c = 0; c < vd->vdev_children; c++)
return (n);
}
int
{
}
void
{
return;
}
/*
* Walk up all ancestors to update guid sum.
*/
}
void
{
int c;
return;
for (c = 0; c < pvd->vdev_children; c++)
if (pvd->vdev_child[c])
break;
if (c == pvd->vdev_children) {
pvd->vdev_children = 0;
}
/*
* Walk up all ancestors to update guid sum.
*/
}
/*
* Remove any holes in the child array.
*/
void
{
int newc;
if (pvd->vdev_child[c])
newc++;
}
}
}
/*
* Allocate and minimally initialize a vdev_t.
*/
vdev_t *
{
}
/*
* The root vdev's guid will also be the pool guid,
* which must be unique among all pools.
*/
} else {
/*
* Any other vdev's guid must be unique within the pool.
*/
}
}
for (int t = 0; t < DTL_TYPES; t++) {
&vd->vdev_dtl_lock);
}
return (vd);
}
/*
* Allocate a new vdev. The 'alloctype' is used to control whether we are
* creating a new vdev or loading an existing one - the behavior is slightly
* different for each case.
*/
int
int alloctype)
{
char *type;
/*
* If this is a load, get the vdev guid from the nvlist.
* Otherwise, vdev_alloc_common() will generate one for us.
*/
if (alloctype == VDEV_ALLOC_LOAD) {
} else if (alloctype == VDEV_ALLOC_SPARE) {
} else if (alloctype == VDEV_ALLOC_L2CACHE) {
} else if (alloctype == VDEV_ALLOC_ROOTPOOL) {
}
/*
* The first allocated vdev must be of type 'root'.
*/
/*
* Determine whether we're a log vdev.
*/
islog = 0;
/*
* Set the nparity property for RAID-Z vdevs.
*/
nparity = -1ULL;
if (ops == &vdev_raidz_ops) {
&nparity) == 0) {
/*
* Previous versions could only support 1 or 2 parity
* device.
*/
if (nparity > 1 &&
if (nparity > 2 &&
} else {
/*
* We require the parity to be specified for SPAs that
* support multiple parity levels.
*/
/*
* Otherwise, we default to 1 parity device for RAID-Z.
*/
nparity = 1;
}
} else {
nparity = 0;
}
&vd->vdev_physpath) == 0)
/*
* Set the whole_disk property. If it's not specified, leave the value
* as -1.
*/
&vd->vdev_wholedisk) != 0)
/*
* Look for the 'not present' flag. This will only be set if the device
* was not present at the time of import.
*/
&vd->vdev_not_present);
/*
* Get the alignment requirement.
*/
/*
* Retrieve the vdev creation time.
*/
&vd->vdev_crtxg);
/*
* If we're a top-level vdev, try to load the allocation parameters.
*/
&vd->vdev_ms_array);
&vd->vdev_ms_shift);
&vd->vdev_asize);
&vd->vdev_removing);
&vd->vdev_top_zap);
} else {
}
alloctype == VDEV_ALLOC_ADD ||
alloctype == VDEV_ALLOC_SPLIT ||
}
(void) nvlist_lookup_uint64(nv,
} else {
}
/*
* If we're a leaf vdev, try to load the DTL object and other state.
*/
alloctype == VDEV_ALLOC_ROOTPOOL)) {
if (alloctype == VDEV_ALLOC_LOAD) {
&vd->vdev_dtl_object);
&vd->vdev_unspare);
}
if (alloctype == VDEV_ALLOC_ROOTPOOL) {
}
&vd->vdev_offline);
&vd->vdev_resilver_txg);
/*
* When importing a pool, we want to ignore the persistent fault
* state, as the diagnosis made on another system may not be
* valid in the current context. Local vdevs will
* remain in the faulted state.
*/
&vd->vdev_faulted);
&vd->vdev_degraded);
&vd->vdev_removed);
char *aux;
vd->vdev_label_aux =
if (nvlist_lookup_string(nv,
ZPOOL_CONFIG_AUX_STATE, &aux) == 0 &&
}
}
}
/*
* Add ourselves to the parent's list of children.
*/
return (0);
}
void
{
/*
* vdev_free() implies closing the vdev first. This is simpler than
* trying to ensure complicated semantics for all callers.
*/
vdev_close(vd);
/*
* Free all children.
*/
for (int c = 0; c < vd->vdev_children; c++)
/*
* Discard allocation state.
*/
}
/*
* Remove this vdev from its parent's child list.
*/
/*
* Clean up vdev structure.
*/
if (vd->vdev_devid)
if (vd->vdev_physpath)
if (vd->vdev_isspare)
if (vd->vdev_isl2cache)
for (int t = 0; t < DTL_TYPES; t++) {
}
}
/*
* Transfer top-level vdev state from svd to tvd.
*/
static void
{
int t;
svd->vdev_ms_array = 0;
svd->vdev_ms_shift = 0;
svd->vdev_ms_count = 0;
svd->vdev_top_zap = 0;
for (t = 0; t < TXG_SIZE; t++) {
}
}
}
svd->vdev_deflate_ratio = 0;
svd->vdev_islog = 0;
}
static void
{
return;
for (int c = 0; c < vd->vdev_children; c++)
}
/*
*/
vdev_t *
{
return (mvd);
}
/*
*/
void
{
/*
* If cvd will replace mvd as a top-level vdev, preserve mvd's guid.
* Otherwise, we could have detached an offline device, and when we
* go to import the pool we'll think we have two top-level vdevs,
* instead of a different version of the same top-level vdev.
*/
}
}
int
{
uint64_t m;
metaslab_t **mspp;
int error;
/*
* This vdev is not being allocated from yet or is a hole.
*/
if (vd->vdev_ms_shift == 0)
return (0);
/*
* Compute the raidz-deflation ratio. Note, we hard-code
* in 128k (1 << 17) because it is the "typical" blocksize.
* Even though SPA_MAXBLOCKSIZE changed, this algorithm can not change,
* otherwise it would inconsistently account for existing bp's.
*/
if (oldc != 0) {
}
if (txg == 0) {
if (error)
return (error);
}
if (error)
return (error);
}
if (txg == 0)
/*
* If the vdev is being removed we don't activate
* the metaslabs since we want to ensure that no new
* allocations are performed on this device.
*/
if (txg == 0)
return (0);
}
void
{
uint64_t m;
for (m = 0; m < count; m++) {
}
}
}
typedef struct vdev_probe_stats {
int vps_flags;
static void
{
} else {
}
if (vdev_readable(vd) &&
} else {
}
}
}
/*
* Determine whether this device is accessible.
*
* Read and write to several known locations: the pad regions of each
* vdev label but the first, which we leave alone in case it contains
* a VTOC.
*/
zio_t *
{
/*
* Don't probe the probe.
*/
return (NULL);
/*
* To prevent 'probe storms' when a device fails, we create
* just one probe i/o at a time. All zios that want to probe
* this vdev will become parents of the probe io.
*/
/*
* vdev_cant_read and vdev_cant_write can only
* transition from TRUE to FALSE when we have the
* SCL_ZIO lock as writer; otherwise they can only
* transition from FALSE to TRUE. This ensures that
* any zio looking at these values can assume that
* failures persist for the life of the I/O. That's
* important because when a device has intermittent
* connectivity problems, we want to ensure that
* they're ascribed to the device (ENXIO) and not
* the zio (EIO).
*
* Since we hold SCL_ZIO as writer here, clear both
* values so the probe can reevaluate from first
* principles.
*/
}
/*
* We can't change the vdev state in this context, so we
* kick off an async task to do it on our behalf.
*/
}
}
return (NULL);
}
for (int l = 1; l < VDEV_LABELS; l++) {
}
return (pio);
return (NULL);
}
static void
vdev_open_child(void *arg)
{
}
{
return (B_TRUE);
for (int c = 0; c < vd->vdev_children; c++)
return (B_TRUE);
return (B_FALSE);
}
void
{
/*
* in order to handle pools on top of zvols, do the opens
* in a single thread so that the same thread holds the
* spa_namespace_lock
*/
if (vdev_uses_zvols(vd)) {
for (int c = 0; c < children; c++)
return;
}
for (int c = 0; c < children; c++)
}
/*
* Prepare a virtual device for access.
*/
int
{
int error;
/*
* If this vdev is not removed, check its fault status. If it's
* faulted, bail out of the open.
*/
vd->vdev_label_aux);
} else if (vd->vdev_offline) {
}
/*
* Reset the vdev_reopening flag so that we actually close
* the vdev on error.
*/
if (zio_injection_enabled && error == 0)
if (error) {
if (vd->vdev_removed &&
return (error);
}
/*
* Recheck the faulted flag now that we have confirmed that
* the vdev is accessible. If we're faulted, bail.
*/
if (vd->vdev_faulted) {
vd->vdev_label_aux);
}
if (vd->vdev_degraded) {
} else {
}
/*
* For hole or missing vdevs we just return success.
*/
return (0);
for (int c = 0; c < vd->vdev_children; c++) {
break;
}
}
if (vd->vdev_children == 0) {
if (osize < SPA_MINDEVSIZE) {
}
} else {
}
psize = 0;
}
/*
* Make sure the allocatable size hasn't shrunk.
*/
}
if (vd->vdev_asize == 0) {
/*
* This is the first-ever open, so use the computed values.
* For testing purposes, a higher ashift can be requested.
*/
} else {
/*
* Detect if the alignment requirement has increased.
* We don't want to make the pool unavailable, just
* issue a warning instead.
*/
"Disk, '%s', has a block alignment that is "
"larger than the pool's alignment\n",
}
}
/*
* If all children are healthy and the asize has increased,
* then we've experienced dynamic LUN growth. If automatic
* expansion is enabled then use the additional space.
*/
/*
* Ensure we can issue some IO before declaring the
* vdev open for business.
*/
return (error);
}
/*
* Track the min and max ashift values for normal data devices.
*/
}
/*
* If a leaf vdev has a DTL, and seems healthy, then kick off a
* resilver. But don't do this if we are doing a reopen for a scrub,
* since this would just restart the scrub we are already doing.
*/
return (0);
}
/*
* Called once the vdevs are all opened, this routine validates the label
* contents. This needs to be done before vdev_load() so that we don't
*
* If 'strict' is false ignore the spa guid check. This is necessary because
* if the machine crashed during a re-guid the new guid might have been written
* to all of the vdev labels, but not the cached config. The strict check
* will be performed when the pool is opened again using the mos config.
*
* This function will only return failure if one of the vdevs indicates that it
* has since been destroyed or exported. This is only possible if
* /etc/zfs/zpool.cache was readonly at the time. Otherwise, the vdev state
* will be updated but the function will return 0.
*/
int
{
for (int c = 0; c < vd->vdev_children; c++)
/*
* If the device has already failed, or was marked offline, don't do
* any further validation. Otherwise, label I/O will fail and we will
* overwrite the previous state.
*/
return (0);
}
/*
* Determine if this vdev has been split off into another
* pool. If so, then refuse to open it.
*/
return (0);
}
ZPOOL_CONFIG_POOL_GUID, &guid) != 0 ||
return (0);
}
&aux_guid) != 0)
aux_guid = 0;
/*
* If this vdev just became a top-level vdev because its
* sibling was detached, it will have adopted the parent's
* vdev guid -- but the label may or may not be on disk yet.
* Fortunately, either version of the label will have the
* same top guid, so if we're a top-level vdev, we can
* safely compare to that instead.
*
* If we split this vdev off instead, then we also check the
* original pool's guid. We don't want to consider the vdev
* corrupt if it is partway through a split operation.
*/
&guid) != 0 ||
&top_guid) != 0 ||
return (0);
}
&state) != 0) {
return (0);
}
/*
* If this is a verbatim import, no need to check the
* state of the pool.
*/
/*
* If we were able to open and validate a vdev that was
* previously marked permanently unavailable, clear that state
* now.
*/
if (vd->vdev_not_present)
vd->vdev_not_present = 0;
}
return (0);
}
/*
* Close a virtual device.
*/
void
{
/*
* If our parent is reopening, then we are as well, unless we are
* going offline.
*/
/*
* We record the previous state before we close it, so that if we are
* doing a reopen(), we don't generate FMA ereports if we notice that
* it's still faulted.
*/
if (vd->vdev_offline)
else
}
void
{
return;
for (int c = 0; c < vd->vdev_children; c++)
}
void
{
for (int c = 0; c < vd->vdev_children; c++)
}
/*
* Reopen all interior vdevs and any unopened leaves. We don't actually
* reopen leaf vdevs which had previously been opened as they might deadlock
* on the spa_config_lock. Instead we only obtain the leaf's physical size.
* If the leaf has never been opened then open it, as usual.
*/
void
{
/* set the reopening flag unless we're taking the vdev offline */
vdev_close(vd);
/*
* Call vdev_validate() here to make sure we have the same device.
* Otherwise, a device with an invalid label could be successfully
* opened in response to vdev_reopen().
*/
(void) vdev_validate_aux(vd);
} else {
}
/*
* Reassess parent vdev's health.
*/
}
int
{
int error;
/*
* Normally, partial opens (e.g. of a mirror) are allowed.
* For a create, however, we want to fail the request if
* there are any components we can't open.
*/
vdev_close(vd);
}
/*
* Recursively load DTLs and initialize all labels.
*/
VDEV_LABEL_REPLACE : VDEV_LABEL_CREATE)) != 0) {
vdev_close(vd);
return (error);
}
return (0);
}
void
{
/*
* Aim for roughly metaslabs_per_vdev (default 200) metaslabs per vdev.
*/
}
void
{
if (flags & VDD_METASLAB)
}
void
{
for (int c = 0; c < vd->vdev_children; c++)
}
/*
* DTLs.
*
* A vdev's DTL (dirty time log) is the set of transaction groups for which
* the vdev has less than perfect replication. There are four kinds of DTL:
*
* DTL_MISSING: txgs for which the vdev has no valid copies of the data
*
* DTL_PARTIAL: txgs for which data is available, but not fully replicated
*
* DTL_SCRUB: the txgs that could not be repaired by the last scrub; upon
* scrub completion, DTL_SCRUB replaces DTL_MISSING in the range of
* txgs that was scrubbed.
*
* DTL_OUTAGE: txgs which cannot currently be read, whether due to
* persistent errors or just some device being offline.
* Unlike the other three, the DTL_OUTAGE map is not generally
* maintained; it's only computed when needed, typically to
* determine whether a device can be detached.
*
* For leaf vdevs, DTL_MISSING and DTL_PARTIAL are identical: the device
* either has the data or it doesn't.
*
* For interior vdevs such as mirror and RAID-Z the picture is more complex.
* A vdev's DTL_PARTIAL is the union of its children's DTL_PARTIALs, because
* if any child is less than fully replicated, then so is its parent.
* A vdev's DTL_MISSING is a modified union of its children's DTL_MISSINGs,
* comprising only those txgs which appear in 'maxfaults' or more children;
* those are the txgs we don't have enough replication to read. For example,
* double-parity RAID-Z can tolerate up to two missing devices (maxfaults == 2);
* thus, its DTL_MISSING consists of the set of txgs that appear in more than
* two child DTL_MISSING maps.
*
* It should be clear from the above that to compute the DTLs and outage maps
* for all vdevs, it suffices to know just the leaf vdevs' DTL_MISSING maps.
* Therefore, that is all we keep on disk. When loading the pool, or after
* a configuration change, we generate all other DTLs from first principles.
*/
void
{
}
{
if (range_tree_space(rt) != 0)
return (dirty);
}
{
return (empty);
}
/*
* Returns the lowest txg in the DTL range.
*/
static uint64_t
{
}
/*
* Returns the highest txg in the DTL.
*/
static uint64_t
{
}
/*
* Determine if a resilvering vdev should remove any DTL entries from
* its range. If the vdev was resilvering for the entire duration of the
* scan then it should excise that range from its DTLs. Otherwise, this
* vdev is considered partially resilvered and should leave its DTL
* entries intact. The comment in vdev_dtl_reassess() describes how we
* excise the DTLs.
*/
static boolean_t
{
if (vd->vdev_resilver_txg == 0 ||
return (B_TRUE);
/*
* When a resilver is initiated the scan will assign the scn_max_txg
* value to the highest txg value that exists in all DTLs. If this
* device's max DTL is not part of this scan (i.e. it is not in
* the range (scn_min_txg, scn_max_txg] then it is not eligible
* for excision.
*/
return (B_TRUE);
}
return (B_FALSE);
}
/*
* Reassess DTLs after a config change or scrub completion.
*/
void
{
int minref;
for (int c = 0; c < vd->vdev_children; c++)
return;
/*
* If we've completed a scan cleanly then determine
* if this vdev should remove any DTLs. We only want to
* excise regions on vdevs that were available during
* the entire duration of this scan.
*/
if (scrub_txg != 0 &&
(spa->spa_scrub_started ||
/*
* We completed a scrub up to scrub_txg. If we
* did it without rebooting, then the scrub dtl
* will be valid, so excise the old region and
* fold in the scrub dtl. Otherwise, leave the
* dtl as-is if there was an error.
*
* There's little trick here: to excise the beginning
* of the DTL_MISSING map, we put it into a reference
* tree and then add a segment with refcnt -1 that
* covers the range [0, scrub_txg). This means
* that each txg in that range has refcnt -1 or 0.
* We then add DTL_SCRUB with a refcnt of 2, so that
* entries in the range [0, scrub_txg) will have a
* positive refcnt -- either 1 or 2. We then convert
* the reference tree into the new DTL_MISSING map.
*/
}
if (scrub_done)
if (!vdev_readable(vd))
else
/*
* If the vdev was resilvering and no longer has any
* DTLs then reset its resilvering flag.
*/
if (vd->vdev_resilver_txg != 0 &&
vd->vdev_resilver_txg = 0;
if (txg != 0)
return;
}
for (int t = 0; t < DTL_TYPES; t++) {
/* account for child's outage in parent's missing map */
int s = (t == DTL_MISSING) ? DTL_OUTAGE: t;
if (t == DTL_SCRUB)
continue; /* leaf vdevs only */
if (t == DTL_PARTIAL)
else if (vd->vdev_nparity != 0)
else
for (int c = 0; c < vd->vdev_children; c++) {
}
}
}
int
{
int error = 0;
if (error)
return (error);
/*
* Now that we've opened the space_map we need to update
* the in-core DTL.
*/
return (error);
}
for (int c = 0; c < vd->vdev_children; c++) {
if (error != 0)
break;
}
return (error);
}
void
{
}
{
DMU_OT_NONE, 0, tx);
return (zap);
}
void
{
}
}
}
}
}
void
{
/*
* We only destroy the leaf ZAP for detached leaves or for
* removed log devices. Removed data devices handle leaf ZAP
* cleanup later, once cancellation is no longer possible.
*/
vd->vdev_leaf_zap = 0;
}
return;
}
VERIFY3U(new_object, !=, 0);
}
mutex_exit(&rtlock);
/*
* If the object for the space map has changed then dirty
* the top level so that we update the config.
*/
zfs_dbgmsg("txg %llu, spa %s, DTL old object %llu, "
}
}
/*
* without losing data.
*/
{
return (B_TRUE);
/*
* Temporarily mark the device as unreadable, and then determine
* whether this results in any DTL outages in the top-level vdev.
*/
if (!required && zio_injection_enabled)
return (required);
}
/*
* Determine if resilver is needed, and if so the txg range.
*/
{
if (vd->vdev_children == 0) {
vdev_writeable(vd)) {
}
} else {
for (int c = 0; c < vd->vdev_children; c++) {
}
}
}
}
return (needed);
}
void
{
/*
* Recursively load all children.
*/
for (int c = 0; c < vd->vdev_children; c++)
/*
* If this is a top-level vdev, initialize its metaslabs.
*/
vdev_metaslab_init(vd, 0) != 0))
/*
* If this is a leaf vdev, load its DTL.
*/
}
/*
* The special vdev case is used for hot spares and l2cache devices. Its
* sole purpose it to set the vdev state for the associated vdev. To do this,
* we make sure that we can open the underlying device, then try to read the
* label, and make sure that the label is sane and that it hasn't been
* repurposed to another pool.
*/
int
{
if (!vdev_readable(vd))
return (0);
return (-1);
}
return (-1);
}
/*
* We don't actually check the pool state here. If it's in fact in
* use by another pool, we update this fact on the fly when requested.
*/
return (0);
}
void
{
for (int m = 0; m < vd->vdev_ms_count; m++) {
continue;
/*
* If the metaslab was not loaded when the vdev
* was removed then the histogram accounting may
* not be accurate. Update the histogram information
* here so that we ensure that the metaslab group
* and metaslab class are up-to-date.
*/
}
for (int i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++)
}
if (vd->vdev_ms_array) {
vd->vdev_ms_array = 0;
}
vd->vdev_top_zap = 0;
}
}
void
{
if (reassess)
}
void
{
}
/*
* Remove the metadata associated with this vdev once it's empty.
*/
}
}
{
}
/*
* Mark the given vdev faulted. A faulted vdev behaves as if the device could
* not be opened, and no I/O is attempted.
*/
int
{
/*
* We don't directly use the aux state here, but if we do a
* vdev_reopen(), we need this value to be present to remember why we
* were faulted.
*/
/*
* Faulted state takes precedence over degraded.
*/
/*
* If this device has the only valid copy of the data, then
* back off and simply mark the vdev as degraded instead.
*/
/*
* If we reopen the device and it's not dead, only then do we
* mark it degraded.
*/
if (vdev_readable(vd))
}
}
/*
* Mark the given vdev degraded. A degraded vdev is purely an indication to the
* user that something is wrong. The vdev continues to operate as normal as far
* as I/O is concerned.
*/
int
{
/*
* If the vdev is already faulted, then don't do anything.
*/
if (!vdev_is_dead(vd))
aux);
}
/*
* Online the given vdev.
*
* If 'ZFS_ONLINE_UNSPARE' is set, it implies two things. First, any attached
* spare device should be detached when the device finishes resilvering.
* Second, the online should be treated like a 'test' online case, so no FMA
* events are generated if the device fails to open.
*/
int
{
/* XXX - L2ARC 1.0 does not support expansion */
}
}
if (newstate)
if ((flags & ZFS_ONLINE_UNSPARE) &&
/* XXX - L2ARC 1.0 does not support expansion */
}
if (postevent)
}
static int
{
int error = 0;
top:
/*
* If the device isn't already offline, try to offline it.
*/
if (!vd->vdev_offline) {
/*
* If this device has the only valid copy of some data,
* don't allow it to be offlined. Log devices are always
* expendable.
*/
/*
* If the top-level is a slog and it has had allocations
* then proceed. We check that the vdev's metaslab group
* is not NULL since it's possible that we may have just
* added this vdev but not yet initialized its metaslabs.
*/
/*
* Prevent any future allocations.
*/
/*
* Check to see if the config has changed.
*/
if (error)
return (spa_vdev_state_exit(spa,
goto top;
}
}
/*
* Offline this device and reopen its top-level vdev.
* If the top-level vdev is a log device then just offline
* it. Otherwise, if this action results in the top-level
* vdev becoming unusable, undo it and fail the request.
*/
vdev_is_dead(tvd)) {
}
/*
* Add the device back into the metaslab rotor so that
* once we online the device it's open for business.
*/
}
}
int
{
int error;
return (error);
}
/*
* Clear the error counts associated with this vdev. Unlike vdev_online() and
* vdev_offline(), we assume the spa config is locked. We also clear all
* children. If 'vd' is NULL, then the user wants to clear all vdevs.
*/
void
{
for (int c = 0; c < vd->vdev_children; c++)
/*
* If we're in the FAULTED state or have experienced failed I/O, then
* clear the persistent state and attempt to reopen the device. We
* also mark the vdev config dirty, so that the new faulted state is
* written out to disk.
*/
/*
* When reopening in reponse to a clear event, it may be due to
* a fmadm repair request. In this case, if the device is
* still broken, we want to still post the ereport again.
*/
}
/*
* When clearing a FMA-diagnosed fault, we always want to
* unspare the device, as we assume that the original spare was
* done in response to the FMA fault.
*/
}
{
/*
* Holes and missing devices are always considered "dead".
* This simplifies the code since we don't have to check for
* these types of devices in the various code paths.
* Instead we rely on the fact that we skip over dead devices
* before issuing I/O to them.
*/
}
{
}
{
}
{
/*
* We currently allow allocations from vdevs which may be in the
* process of reopening (i.e. VDEV_STATE_CLOSED). If the device
* fails to reopen then we'll catch it later when we're holding
* the proper locks. Note that we have to get the vdev state
* in a local variable because although it changes atomically,
* we're asking two separate questions about it.
*/
}
{
return (B_FALSE);
return (!vd->vdev_cant_read);
return (!vd->vdev_cant_write);
return (B_TRUE);
}
/*
* Get statistics for the given vdev.
*/
void
{
/*
* Report expandable space on top-level, non-auxillary devices only.
* The expandable space is reported in terms of metaslab sized units
* since that determines how much space the pool can expand.
*/
}
}
/*
* If we're getting stats on the root vdev, aggregate the I/O counts
* over all top-level vdevs (i.e. the direct children of the root).
*/
for (int c = 0; c < rvd->vdev_children; c++) {
for (int t = 0; t < ZIO_TYPES; t++) {
}
}
}
}
void
{
}
void
{
for (int c = 0; c < vd->vdev_children; c++)
vs->vs_scan_processed = 0;
}
void
{
/*
* If this i/o is a gang leader, it didn't do any actual work.
*/
if (zio->io_gang_tree)
return;
/*
* If this is a root i/o, don't count it -- we've already
* counted the top-level vdevs, and vdev_get_stats() will
* aggregate them when asked. This reduces contention on
* the root vdev_stat_lock and implicitly handles blocks
* that compress away to holes, for which there is no i/o.
* (Holes never create vdev children, so all the counters
* remain zero, which is what we want.)
*
* Note: this only applies to successful i/o (io_error == 0)
* because unlike i/o counts, errors are not additive.
* When reading a ditto block, for example, failure of
* one top-level vdev does not imply a root-level error.
*/
return;
if (flags & ZIO_FLAG_IO_BYPASS)
return;
if (flags & ZIO_FLAG_IO_REPAIR) {
if (flags & ZIO_FLAG_SCAN_THREAD) {
/* XXX cleanup? */
}
if (flags & ZIO_FLAG_SELF_HEAL)
}
return;
}
if (flags & ZIO_FLAG_SPECULATIVE)
return;
/*
* If this is an I/O error that is going to be retried, then ignore the
* error. Otherwise, the user may interpret B_FAILFAST I/O errors as
* hard errors, when in reality they can happen for any number of
* innocuous reasons (bus resets, MPxIO link failure, etc).
*/
return;
/*
* Intent logs writes won't propagate their error to the root
* I/O so don't mark these types of failures as pool-level
* errors.
*/
return;
vs->vs_checksum_errors++;
else
vs->vs_read_errors++;
}
vs->vs_write_errors++;
(!(flags & ZIO_FLAG_IO_REPAIR) ||
(flags & ZIO_FLAG_SCAN_THREAD) ||
spa->spa_claiming)) {
/*
* This is either a normal write (not a repair), or it's
* a repair induced by the scrub thread, or it's a repair
* made by zil_claim() during spa_load() in the first txg.
* In the normal case, we commit the DTL change in the same
* txg as the block was born. In the scrub-induced repair
* case, we know that scrubs run in first-pass syncing context,
* so we commit the DTL change in spa_syncing_txg(spa).
* In the zil_claim() case, we commit in spa_first_txg(spa).
*
* We currently do not make DTL entries for failed spontaneous
* self-healing writes triggered by normal (non-scrubbing)
* reads, because we have no transactional context in which to
* do so -- and it's not clear that it'd be desirable anyway.
*/
if (flags & ZIO_FLAG_SCAN_THREAD) {
} else if (spa->spa_claiming) {
}
return;
}
}
}
/*
* Update the in-core space usage stats for this vdev, its metaslab class,
* and the root vdev.
*/
void
{
/*
* Apply the inverse of the psize-to-asize (ie. RAID-Z) space-expansion
* factor. We must calculate this here and not at the root vdev
* because the root vdev's psize-to-asize is simply the max of its
* childrens', thus not accurate enough for us.
*/
}
}
}
/*
* Mark a top-level vdev's config as dirty, placing it on the dirty list
* so that it will be written out next time the vdev configuration is synced.
* If the root vdev is specified (vdev_top == NULL), dirty all top-level vdevs.
*/
void
{
int c;
/*
* If this is an aux vdev (as with l2cache and spare devices), then we
* update the vdev config manually and set the sync flag.
*/
break;
}
/*
* We're being removed. There's nothing more to do.
*/
return;
}
}
/*
* Setting the nvlist in the middle if the array is a little
* sketchy, but it will work.
*/
nvlist_free(aux[c]);
return;
}
/*
* The dirty list is protected by the SCL_CONFIG lock. The caller
* must either hold SCL_CONFIG as writer, or must be the sync thread
* (which holds SCL_CONFIG as reader). There's only one sync thread,
* so this is sufficient to ensure mutual exclusion.
*/
for (c = 0; c < rvd->vdev_children; c++)
} else {
!vd->vdev_ishole)
}
}
void
{
}
/*
* Mark a top-level vdev's state as dirty, so that the next pass of
* spa_sync() can convert this into vdev_config_dirty(). We distinguish
* the state changes from larger config changes because they require
* much less locking, and are often needed for administrative actions.
*/
void
{
/*
* The state list is protected by the SCL_STATE lock. The caller
* must either hold SCL_STATE as writer, or must be the sync thread
* (which holds SCL_STATE as reader). There's only one sync thread,
* so this is sufficient to ensure mutual exclusion.
*/
}
void
{
}
/*
* Propagate vdev state up from children to parent.
*/
void
{
int corrupted = 0;
if (vd->vdev_children > 0) {
for (int c = 0; c < vd->vdev_children; c++) {
/*
* Don't factor holes into the decision.
*/
if (child->vdev_ishole)
continue;
if (!vdev_readable(child) ||
/*
* Root special: if there is a top-level log
* device, treat the root vdev as if it were
* degraded.
*/
degraded++;
else
faulted++;
degraded++;
}
corrupted++;
}
/*
* Root special: if there is a top-level vdev that cannot be
* opened due to corrupted metadata, then propagate the root
* vdev's aux state as 'corrupt' rather than 'insufficient
* replicas'.
*/
}
if (vd->vdev_parent)
}
/*
* Set a vdev's state. If this is during an open, we don't update the parent
* state, because we're in the process of opening children depth-first.
* Otherwise, we propagate the change to the parent.
*
* If this routine places a device in a faulted state, an appropriate ereport is
* generated.
*/
void
{
return;
}
/*
* If we are setting the vdev state to anything but an open state, then
* always close the underlying device unless the device has requested
* a delayed close (i.e. we're about to remove or fault the device).
* Otherwise, we keep accessible but invalid devices open forever.
* We don't call vdev_close() itself, because that implies some extra
* checks (offline, etc) that we don't want here. This is limited to
* leaf devices, because otherwise closing the device will affect other
* children.
*/
/*
* If we have brought this vdev back into service, we need
* to notify fmd so that it can gracefully repair any outstanding
* cases due to a missing device. We do this in all cases, even those
* that probably don't correlate to a repaired fault. This is sure to
* catch all cases, and we let the zfs-retire agent sort it out. If
* this is a transient state it's OK, as the retire agent will
* double-check the state of the vdev before repairing it.
*/
if (vd->vdev_removed &&
state == VDEV_STATE_CANT_OPEN &&
/*
* If the previous state is set to VDEV_STATE_REMOVED, then this
* device was previously marked removed and someone attempted to
* reopen it. If this failed due to a nonexistent device, then
* keep the device in the REMOVED state. We also let this be if
* it is one of our special test online cases, which is only
* attempting to online the device and shouldn't generate an FMA
* fault.
*/
} else if (state == VDEV_STATE_REMOVED) {
} else if (state == VDEV_STATE_CANT_OPEN) {
/*
* If we fail to open a vdev during an import or recovery, we
* mark it as "not available", which signifies that it was
* never there to begin with. Failure to open such a device
* is not considered an error.
*/
/*
* Post the appropriate ereport. If the 'prevstate' field is
* set to something other than VDEV_STATE_UNKNOWN, it indicates
* that this is part of a vdev_reopen(). In this case, we don't
* want to post the ereport if the device was already in the
* CANT_OPEN state beforehand.
*
* If the 'checkremove' flag is set, then this is an attempt to
* online the device in response to an insertion event. If we
* hit this case, then we have detected an insertion event for a
* faulted or offline device that wasn't in the removed state.
* In this scenario, we don't post an ereport because we are
* about to replace the device, or attempt an online with
* vdev_forcefault, which will generate the fault for us.
*/
const char *class;
switch (aux) {
case VDEV_AUX_OPEN_FAILED:
break;
case VDEV_AUX_CORRUPT_DATA:
break;
case VDEV_AUX_NO_REPLICAS:
break;
case VDEV_AUX_BAD_GUID_SUM:
break;
case VDEV_AUX_TOO_SMALL:
break;
case VDEV_AUX_BAD_LABEL:
break;
default:
}
}
/* Erase any notion of persistent removed state */
} else {
}
}
/*
* Check the vdev configuration to ensure that it's capable of supporting
* a root pool. Currently, we do not support RAID-Z or partial configuration.
* In addition, only a single top-level vdev is allowed and none of the leaves
* can be wholedisks.
*/
{
return (B_FALSE);
return (B_FALSE);
}
}
for (int c = 0; c < vd->vdev_children; c++) {
return (B_FALSE);
}
return (B_TRUE);
}
/*
* Load the state from the original vdev tree (ovd) which
* we've retrieved from the MOS config object. If the original
* vdev was offline or faulted then we transfer that state to the
* device in the current vdev tree (nvd).
*/
void
{
for (int c = 0; c < nvd->vdev_children; c++)
/*
* Restore the persistent vdev state
*/
}
}
/*
* Determine if a log device has valid content. If the vdev was
* removed or faulted in the MOS config then we know that
* the content on the log device has already been written to the pool.
*/
{
!vd->vdev_removed)
return (B_TRUE);
for (int c = 0; c < vd->vdev_children; c++)
return (B_TRUE);
return (B_FALSE);
}
/*
* Expand a vdev if possible.
*/
void
{
}
}
/*
* Split a vdev.
*/
void
{
}
}
void
{
for (int c = 0; c < vd->vdev_children; c++) {
}
/*
* Look at the head of all the pending queues,
* if any I/O has been outstanding for longer than
* the spa_deadman_synctime we panic the system.
*/
zfs_dbgmsg("SLOW IO: zio timestamp %lluns, "
"delta %lluns, last io %lluns",
fm_panic("I/O to pool '%s' appears to be "
}
}
}
}