/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
*/
/*
* The ZFS retire agent is responsible for managing hot spares across all pools.
* When we see a device fault or a device removal, we try to open the associated
* pool and look for any hot spares. We iterate over any available hot spares
* and attempt a 'zpool replace' for each one.
*
* For vdevs diagnosed as faulty, the agent is also responsible for proactively
* marking the vdev FAULTY (for I/O errors) or DEGRADED (for checksum errors).
*/
#include <libzfs.h>
#include <string.h>
typedef struct zfs_retire_repaired {
typedef struct zfs_retire_data {
static void
{
}
}
/*
* Find a pool with a matching GUID.
*/
typedef struct find_cbdata {
const char *cb_fru;
static int
{
return (1);
}
return (0);
}
/*
* Find a vdev within a tree with a matching GUID.
*/
static nvlist_t *
{
char *fru;
if (search_fru != NULL) {
return (nv);
} else {
guid == search_guid)
return (nv);
}
return (NULL);
for (c = 0; c < children; c++) {
search_guid)) != NULL)
return (ret);
}
return (NULL);
for (c = 0; c < children; c++) {
search_guid)) != NULL)
return (ret);
}
return (NULL);
}
/*
* Given a (pool, vdev) GUID pair, find the matching pool and vdev.
*/
static zpool_handle_t *
{
/*
* Find the corresponding pool and make sure the vdev still exists.
*/
return (NULL);
&nvroot) != 0) {
return (NULL);
}
if (vdev_guid != 0) {
return (NULL);
}
}
return (zhp);
}
static int
{
&nvroot) != 0) {
return (0);
}
return (1);
}
return (0);
}
/*
* Given a FRU FMRI, find the matching pool and vdev.
*/
static zpool_handle_t *
{
return (NULL);
}
/*
* Given a vdev, attempt to replace it with every known spare until one
* succeeds.
*/
static void
{
char *dev_name;
&nvroot) != 0)
return;
/*
* Find out if there are any hot spares available in the pool.
*/
return;
/*
* Try to replace each spare, ending when we successfully
* replace it.
*/
for (s = 0; s < nspares; s++) {
char *spare_name;
&spare_name) != 0)
continue;
(void) nvlist_add_nvlist_array(replacement,
replacement, B_TRUE) == 0)
break;
}
}
/*
* Repair this vdev if we had diagnosed a 'fault.fs.zfs.device' and
* ASRU is now usable. ZFS has found the device to be present and
* functioning.
*/
/*ARGSUSED*/
void
{
return;
/*
* Before checking the state of the ASRU, go through and see if we've
* already made an attempt to repair this ASRU. This list is cleared
* whenever we receive any kind of list event, and is designed to
* prevent us from generating a feedback loop when we attempt repairs
* against a faulted pool. The problem is that checking the unusable
* state of the ASRU can involve opening the pool, which can post
* statechange events but otherwise leave the pool in the faulted
* state. This list allows us to detect when a statechange event is
* due to our own request.
*/
return;
}
/*
* We explicitly check for the unusable state here to make sure we
* aren't responding to a transient state change. As part of opening a
* vdev, it's possible to see the 'statechange' event, only to be
* followed by a vdev failure later. If we don't check the current
* state of the vdev (or pool) before marking it repaired, then we risk
* generating spurious repair events followed immediately by the same
* diagnosis.
*
* This assumes that the ZFS scheme code associated unusable (i.e.
* isolated) with its own definition of faulty state. In the case of a
* DEGRADED leaf vdev (due to checksum errors), this is not the case.
* This works, however, because the transient state change is not
* posted in this case. This could be made more explicit by not
* relying on the scheme's unusable callback and instead directly
* checking the vdev state, where we could correctly account for
* DEGRADED state.
*/
int err;
}
}
/*ARGSUSED*/
static void
const char *class)
{
char *uuid;
int repair_done = 0;
int err;
/*
* If this is a resource notifying us of device removal, then simply
* check for an available spare and continue.
*/
&pool_guid) != 0 ||
&vdev_guid) != 0)
return;
return;
return;
}
return;
"resource.sysevent.EC_zfs.ESC_ZFS_vdev_remove") == 0) {
return;
}
else
/*
* We subscribe to zfs faults as well as all repair events.
*/
return;
for (f = 0; f < nfaults; f++) {
continue;
/*
* While we subscribe to fault.fs.zfs.*, we only take action
* for faults targeting a specific vdev (open failure or SERD
* failure). We also subscribe to fault.io.* events, so that
* faulty disks will be faulted in the ZFS configuration.
*/
"fault.fs.zfs.vdev.checksum")) {
"fault.fs.zfs.device")) {
} else {
continue;
}
if (is_disk) {
/*
* This is a disk fault. Lookup the FRU, convert it to
* an FMRI string, and attempt to find a matching vdev.
*/
&fru) != 0 ||
&scheme) != 0)
continue;
continue;
continue;
}
continue;
(void) nvlist_lookup_uint64(vdev,
} else {
/*
* This is a ZFS fault. Lookup the resource, and
* attempt to find the matching vdev.
*/
&resource) != 0 ||
&scheme) != 0)
continue;
continue;
&pool_guid) != 0)
continue;
&vdev_guid) != 0) {
if (is_repair)
vdev_guid = 0;
else
continue;
}
continue;
}
if (vdev_guid == 0) {
/*
* For pool-level repair events, clear the entire pool.
*/
continue;
}
/*
* If this is a repair event, then mark the vdev as repaired and
* continue.
*/
if (is_repair) {
repair_done = 1;
continue;
}
/*
* Actively fault the device if needed.
*/
if (fault_device)
if (degrade_device)
/*
* Attempt to substitute a hot spare.
*/
}
}
zfs_retire_recv, /* fmdo_recv */
NULL, /* fmdo_timeout */
NULL, /* fmdo_close */
NULL, /* fmdo_stats */
NULL, /* fmdo_gc */
};
};
};
void
{
return;
return;
}
}
void
{
}
}