libzfs_status.c revision c25309d42d46e04be84e0dbadaf3c9ab3369ad05
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/*
* This file contains the functions which analyze the status of a pool. This
* include both the status of an active pool, as well as the status exported
* pools. Returns one of the ZPOOL_STATUS_* defines describing the status of
* the pool. This status is independent (to a certain degree) from the state of
* the pool. A pool's state describes only whether or not it is capable of
* providing the necessary fault tolerance for data. The status describes the
* overall status of devices. A pool that is online can still have a device
* that is experiencing errors.
*
* Only a subset of the possible faults can be detected using 'zpool status',
* and not all possible errors correspond to a FMA message ID. The explanation
* is left up to the caller, depending on whether it is a live pool or an
* import.
*/
#include <libzfs.h>
#include <string.h>
#include <unistd.h>
#include "libzfs_impl.h"
/*
* Message ID table. This must be kept in sync with the ZPOOL_STATUS_* defines
* in libzfs.h. Note that there are some status results which go past the end
* of this table, and hence have no associated message ID.
*/
static char *zfs_msgid_table[] = {
"ZFS-8000-14",
"ZFS-8000-2Q",
"ZFS-8000-3C",
"ZFS-8000-4J",
"ZFS-8000-5E",
"ZFS-8000-6X",
"ZFS-8000-72",
"ZFS-8000-8A",
"ZFS-8000-9P",
"ZFS-8000-A5",
"ZFS-8000-EY",
"ZFS-8000-HC",
"ZFS-8000-JQ",
"ZFS-8000-K4",
};
/* ARGSUSED */
static int
{
return (state == VDEV_STATE_CANT_OPEN &&
aux == VDEV_AUX_OPEN_FAILED);
}
/* ARGSUSED */
static int
{
return (state == VDEV_STATE_FAULTED);
}
/* ARGSUSED */
static int
{
}
/* ARGSUSED */
static int
{
return (state == VDEV_STATE_CANT_OPEN);
}
/* ARGSUSED */
static int
{
return (state == VDEV_STATE_OFFLINE);
}
/* ARGSUSED */
static int
{
return (state == VDEV_STATE_REMOVED);
}
/*
* Detect if any leaf devices that have seen errors or could not be opened.
*/
static boolean_t
{
char *type;
/*
* Ignore problems within a 'replacing' vdev, since we're presumably in
* the process of repairing any such errors, and don't want to call them
* out again. We'll pick up the fact that a resilver is happening
* later.
*/
return (B_FALSE);
&children) == 0) {
for (c = 0; c < children; c++)
return (B_TRUE);
} else {
vs->vs_read_errors +
return (B_TRUE);
}
return (B_FALSE);
}
/*
* Active pool health status.
*
* To determine the status for a pool, we make several passes over the config,
* picking the most egregious error we find. In order of importance, we do the
* following:
*
* - Check for a complete and valid configuration
* - Look for any faulted or missing devices in a non-replicated config
* - Check for any data errors
* - Check for any faulted or missing devices in a replicated config
* - Look for any devices showing errors
* - Check for any resilvering devices
*
* There can obviously be multiple errors within a single pool, so this routine
* only picks the most damaging of all the current errors to report.
*/
static zpool_status_t
{
&version) == 0);
&nvroot) == 0);
&stateval) == 0);
/*
* Pool last accessed by another system.
*/
return (ZPOOL_STATUS_HOSTID_MISMATCH);
/*
* Newer on-disk version.
*/
return (ZPOOL_STATUS_VERSION_NEWER);
/*
* Check that the config is complete.
*/
return (ZPOOL_STATUS_BAD_GUID_SUM);
/*
* Check whether the pool has suspended due to failed I/O.
*/
&suspended) == 0) {
if (suspended == ZIO_FAILURE_MODE_CONTINUE)
return (ZPOOL_STATUS_IO_FAILURE_CONTINUE);
return (ZPOOL_STATUS_IO_FAILURE_WAIT);
}
/*
* Could not read a log.
*/
return (ZPOOL_STATUS_BAD_LOG);
}
/*
* Bad devices in non-replicated config.
*/
return (ZPOOL_STATUS_FAULTED_DEV_NR);
return (ZPOOL_STATUS_MISSING_DEV_NR);
return (ZPOOL_STATUS_CORRUPT_LABEL_NR);
/*
* Corrupted pool metadata
*/
return (ZPOOL_STATUS_CORRUPT_POOL);
/*
* Persistent data errors.
*/
if (!isimport) {
return (ZPOOL_STATUS_CORRUPT_DATA);
}
/*
* Missing devices in a replicated config.
*/
return (ZPOOL_STATUS_FAULTED_DEV_R);
return (ZPOOL_STATUS_MISSING_DEV_R);
return (ZPOOL_STATUS_CORRUPT_LABEL_R);
/*
* Devices with errors
*/
return (ZPOOL_STATUS_FAILING_DEV);
/*
* Offlined devices
*/
return (ZPOOL_STATUS_OFFLINE_DEV);
/*
* Removed device
*/
return (ZPOOL_STATUS_REMOVED_DEV);
/*
* Currently resilvering
*/
return (ZPOOL_STATUS_RESILVERING);
/*
* Outdated, but usable, version
*/
if (version < SPA_VERSION)
return (ZPOOL_STATUS_VERSION_OLDER);
return (ZPOOL_STATUS_OK);
}
{
else
return (ret);
}
{
else
return (ret);
}