99653d4ee642c6528e88224f12409a5f23060994eschrock * CDDL HEADER START
99653d4ee642c6528e88224f12409a5f23060994eschrock * The contents of this file are subject to the terms of the
99653d4ee642c6528e88224f12409a5f23060994eschrock * Common Development and Distribution License (the "License").
99653d4ee642c6528e88224f12409a5f23060994eschrock * You may not use this file except in compliance with the License.
99653d4ee642c6528e88224f12409a5f23060994eschrock * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
99653d4ee642c6528e88224f12409a5f23060994eschrock * See the License for the specific language governing permissions
99653d4ee642c6528e88224f12409a5f23060994eschrock * and limitations under the License.
99653d4ee642c6528e88224f12409a5f23060994eschrock * When distributing Covered Code, include this CDDL HEADER in each
99653d4ee642c6528e88224f12409a5f23060994eschrock * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
99653d4ee642c6528e88224f12409a5f23060994eschrock * If applicable, add the following below this CDDL HEADER, with the
99653d4ee642c6528e88224f12409a5f23060994eschrock * fields enclosed by brackets "[]" replaced with your own identifying
99653d4ee642c6528e88224f12409a5f23060994eschrock * information: Portions Copyright [yyyy] [name of copyright owner]
99653d4ee642c6528e88224f12409a5f23060994eschrock * CDDL HEADER END
2a417b235e35bd992c3c76d3eb89d35467286133Robert Johnston * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
99653d4ee642c6528e88224f12409a5f23060994eschrock * The ZFS retire agent is responsible for managing hot spares across all pools.
3d7072f8bd27709dba14f6fe336f149d25d9e207eschrock * When we see a device fault or a device removal, we try to open the associated
3d7072f8bd27709dba14f6fe336f149d25d9e207eschrock * pool and look for any hot spares. We iterate over any available hot spares
3d7072f8bd27709dba14f6fe336f149d25d9e207eschrock * and attempt a 'zpool replace' for each one.
3d7072f8bd27709dba14f6fe336f149d25d9e207eschrock * For vdevs diagnosed as faulty, the agent is also responsible for proactively
3d7072f8bd27709dba14f6fe336f149d25d9e207eschrock * marking the vdev FAULTY (for I/O errors) or DEGRADED (for checksum errors).
069f55e237020c4a4907b235fc38fafc6442ce94Eric Schrockzfs_retire_clear_data(fmd_hdl_t *hdl, zfs_retire_data_t *zdp)
069f55e237020c4a4907b235fc38fafc6442ce94Eric Schrock fmd_hdl_free(hdl, zrp, sizeof (zfs_retire_repaired_t));
99653d4ee642c6528e88224f12409a5f23060994eschrock * Find a pool with a matching GUID.
99653d4ee642c6528e88224f12409a5f23060994eschrock return (1);
99653d4ee642c6528e88224f12409a5f23060994eschrock return (0);
99653d4ee642c6528e88224f12409a5f23060994eschrock * Find a vdev within a tree with a matching GUID.
069f55e237020c4a4907b235fc38fafc6442ce94Eric Schrockfind_vdev(libzfs_handle_t *zhdl, nvlist_t *nv, const char *search_fru,
069f55e237020c4a4907b235fc38fafc6442ce94Eric Schrock if (nvlist_lookup_string(nv, ZPOOL_CONFIG_FRU, &fru) == 0 &&
069f55e237020c4a4907b235fc38fafc6442ce94Eric Schrock if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) == 0 &&
99653d4ee642c6528e88224f12409a5f23060994eschrock if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
99653d4ee642c6528e88224f12409a5f23060994eschrock for (c = 0; c < children; c++) {
069f55e237020c4a4907b235fc38fafc6442ce94Eric Schrock if ((ret = find_vdev(zhdl, child[c], search_fru,
c5904d138f3bdf0762dbf452a43d5a5c387ea6a8eschrock if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
c5904d138f3bdf0762dbf452a43d5a5c387ea6a8eschrock for (c = 0; c < children; c++) {
069f55e237020c4a4907b235fc38fafc6442ce94Eric Schrock if ((ret = find_vdev(zhdl, child[c], search_fru,
3d7072f8bd27709dba14f6fe336f149d25d9e207eschrock * Given a (pool, vdev) GUID pair, find the matching pool and vdev.
3d7072f8bd27709dba14f6fe336f149d25d9e207eschrockfind_by_guid(libzfs_handle_t *zhdl, uint64_t pool_guid, uint64_t vdev_guid,
3d7072f8bd27709dba14f6fe336f149d25d9e207eschrock * Find the corresponding pool and make sure the vdev still exists.
3d7072f8bd27709dba14f6fe336f149d25d9e207eschrock if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
069f55e237020c4a4907b235fc38fafc6442ce94Eric Schrock if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
069f55e237020c4a4907b235fc38fafc6442ce94Eric Schrock if ((cbp->cb_vdev = find_vdev(zpool_get_handle(zhp), nvroot,
069f55e237020c4a4907b235fc38fafc6442ce94Eric Schrock * Given a FRU FMRI, find the matching pool and vdev.
069f55e237020c4a4907b235fc38fafc6442ce94Eric Schrockfind_by_fru(libzfs_handle_t *zhdl, const char *fru, nvlist_t **vdevp)
3d7072f8bd27709dba14f6fe336f149d25d9e207eschrock * Given a vdev, attempt to replace it with every known spare until one
3d7072f8bd27709dba14f6fe336f149d25d9e207eschrock * succeeds.
069f55e237020c4a4907b235fc38fafc6442ce94Eric Schrockreplace_with_spare(fmd_hdl_t *hdl, zpool_handle_t *zhp, nvlist_t *vdev)
3d7072f8bd27709dba14f6fe336f149d25d9e207eschrock if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
3d7072f8bd27709dba14f6fe336f149d25d9e207eschrock * Find out if there are any hot spares available in the pool.
3d7072f8bd27709dba14f6fe336f149d25d9e207eschrock if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
069f55e237020c4a4907b235fc38fafc6442ce94Eric Schrock (void) nvlist_add_string(replacement, ZPOOL_CONFIG_TYPE,
88ecc943b4eb72f7c4fbbd8435997b85ef171fc3George Wilson dev_name = zpool_vdev_name(NULL, zhp, vdev, B_FALSE);
3d7072f8bd27709dba14f6fe336f149d25d9e207eschrock * Try to replace each spare, ending when we successfully
3d7072f8bd27709dba14f6fe336f149d25d9e207eschrock * replace it.
3d7072f8bd27709dba14f6fe336f149d25d9e207eschrock for (s = 0; s < nspares; s++) {
3d7072f8bd27709dba14f6fe336f149d25d9e207eschrock if (nvlist_lookup_string(spares[s], ZPOOL_CONFIG_PATH,
069f55e237020c4a4907b235fc38fafc6442ce94Eric Schrock * Repair this vdev if we had diagnosed a 'fault.fs.zfs.device' and
069f55e237020c4a4907b235fc38fafc6442ce94Eric Schrock * ASRU is now usable. ZFS has found the device to be present and
069f55e237020c4a4907b235fc38fafc6442ce94Eric Schrock * functioning.
069f55e237020c4a4907b235fc38fafc6442ce94Eric Schrock zfs_retire_data_t *zdp = fmd_hdl_getspecific(hdl);
069f55e237020c4a4907b235fc38fafc6442ce94Eric Schrock if (nvlist_lookup_uint64(nvl, FM_EREPORT_PAYLOAD_ZFS_POOL_GUID,
069f55e237020c4a4907b235fc38fafc6442ce94Eric Schrock FM_EREPORT_PAYLOAD_ZFS_VDEV_GUID, &vdev_guid) != 0)
069f55e237020c4a4907b235fc38fafc6442ce94Eric Schrock * Before checking the state of the ASRU, go through and see if we've
069f55e237020c4a4907b235fc38fafc6442ce94Eric Schrock * already made an attempt to repair this ASRU. This list is cleared
069f55e237020c4a4907b235fc38fafc6442ce94Eric Schrock * whenever we receive any kind of list event, and is designed to
069f55e237020c4a4907b235fc38fafc6442ce94Eric Schrock * prevent us from generating a feedback loop when we attempt repairs
069f55e237020c4a4907b235fc38fafc6442ce94Eric Schrock * against a faulted pool. The problem is that checking the unusable
069f55e237020c4a4907b235fc38fafc6442ce94Eric Schrock * state of the ASRU can involve opening the pool, which can post
069f55e237020c4a4907b235fc38fafc6442ce94Eric Schrock * statechange events but otherwise leave the pool in the faulted
069f55e237020c4a4907b235fc38fafc6442ce94Eric Schrock * state. This list allows us to detect when a statechange event is
069f55e237020c4a4907b235fc38fafc6442ce94Eric Schrock * due to our own request.
069f55e237020c4a4907b235fc38fafc6442ce94Eric Schrock for (zrp = zdp->zrd_repaired; zrp != NULL; zrp = zrp->zrr_next) {
069f55e237020c4a4907b235fc38fafc6442ce94Eric Schrock (void) nvlist_add_uint8(asru, FM_VERSION, ZFS_SCHEME_VERSION0);
069f55e237020c4a4907b235fc38fafc6442ce94Eric Schrock (void) nvlist_add_string(asru, FM_FMRI_SCHEME, FM_FMRI_SCHEME_ZFS);
069f55e237020c4a4907b235fc38fafc6442ce94Eric Schrock (void) nvlist_add_uint64(asru, FM_FMRI_ZFS_POOL, pool_guid);
069f55e237020c4a4907b235fc38fafc6442ce94Eric Schrock (void) nvlist_add_uint64(asru, FM_FMRI_ZFS_VDEV, vdev_guid);
069f55e237020c4a4907b235fc38fafc6442ce94Eric Schrock * We explicitly check for the unusable state here to make sure we
069f55e237020c4a4907b235fc38fafc6442ce94Eric Schrock * aren't responding to a transient state change. As part of opening a
069f55e237020c4a4907b235fc38fafc6442ce94Eric Schrock * vdev, it's possible to see the 'statechange' event, only to be
069f55e237020c4a4907b235fc38fafc6442ce94Eric Schrock * followed by a vdev failure later. If we don't check the current
069f55e237020c4a4907b235fc38fafc6442ce94Eric Schrock * state of the vdev (or pool) before marking it repaired, then we risk
069f55e237020c4a4907b235fc38fafc6442ce94Eric Schrock * generating spurious repair events followed immediately by the same
069f55e237020c4a4907b235fc38fafc6442ce94Eric Schrock * diagnosis.
069f55e237020c4a4907b235fc38fafc6442ce94Eric Schrock * This assumes that the ZFS scheme code associated unusable (i.e.
069f55e237020c4a4907b235fc38fafc6442ce94Eric Schrock * isolated) with its own definition of faulty state. In the case of a
069f55e237020c4a4907b235fc38fafc6442ce94Eric Schrock * DEGRADED leaf vdev (due to checksum errors), this is not the case.
069f55e237020c4a4907b235fc38fafc6442ce94Eric Schrock * This works, however, because the transient state change is not
069f55e237020c4a4907b235fc38fafc6442ce94Eric Schrock * posted in this case. This could be made more explicit by not
069f55e237020c4a4907b235fc38fafc6442ce94Eric Schrock * relying on the scheme's unusable callback and instead directly
069f55e237020c4a4907b235fc38fafc6442ce94Eric Schrock * checking the vdev state, where we could correctly account for
069f55e237020c4a4907b235fc38fafc6442ce94Eric Schrock * DEGRADED state.
069f55e237020c4a4907b235fc38fafc6442ce94Eric Schrock if (!fmd_nvl_fmri_unusable(hdl, asru) && fmd_nvl_fmri_has_fault(hdl,
069f55e237020c4a4907b235fc38fafc6442ce94Eric Schrock if (topo_fmri_nvl2str(thp, asru, &fmri, &err) == 0)
069f55e237020c4a4907b235fc38fafc6442ce94Eric Schrock zrp = fmd_hdl_alloc(hdl, sizeof (zfs_retire_repaired_t), FMD_SLEEP);
99653d4ee642c6528e88224f12409a5f23060994eschrock/*ARGSUSED*/
99653d4ee642c6528e88224f12409a5f23060994eschrockzfs_retire_recv(fmd_hdl_t *hdl, fmd_event_t *ep, nvlist_t *nvl,
99653d4ee642c6528e88224f12409a5f23060994eschrock const char *class)
069f55e237020c4a4907b235fc38fafc6442ce94Eric Schrock zfs_retire_data_t *zdp = fmd_hdl_getspecific(hdl);
3d7072f8bd27709dba14f6fe336f149d25d9e207eschrock * If this is a resource notifying us of device removal, then simply
3d7072f8bd27709dba14f6fe336f149d25d9e207eschrock * check for an available spare and continue.
3d7072f8bd27709dba14f6fe336f149d25d9e207eschrock if (strcmp(class, "resource.fs.zfs.removed") == 0) {
3d7072f8bd27709dba14f6fe336f149d25d9e207eschrock if (nvlist_lookup_uint64(nvl, FM_EREPORT_PAYLOAD_ZFS_POOL_GUID,
3d7072f8bd27709dba14f6fe336f149d25d9e207eschrock nvlist_lookup_uint64(nvl, FM_EREPORT_PAYLOAD_ZFS_VDEV_GUID,
3d7072f8bd27709dba14f6fe336f149d25d9e207eschrock if ((zhp = find_by_guid(zhdl, pool_guid, vdev_guid,
cbf75e67acb6c32a2f4884f28a839d59f7988d37Stephen Hanson if (strcmp(class, FM_LIST_RESOLVED_CLASS) == 0)
069f55e237020c4a4907b235fc38fafc6442ce94Eric Schrock if (strcmp(class, "resource.fs.zfs.statechange") == 0 ||
069f55e237020c4a4907b235fc38fafc6442ce94Eric Schrock "resource.sysevent.EC_zfs.ESC_ZFS_vdev_remove") == 0) {
3d7072f8bd27709dba14f6fe336f149d25d9e207eschrock * We subscribe to zfs faults as well as all repair events.
99653d4ee642c6528e88224f12409a5f23060994eschrock if (nvlist_lookup_nvlist_array(nvl, FM_SUSPECT_FAULT_LIST,
99653d4ee642c6528e88224f12409a5f23060994eschrock for (f = 0; f < nfaults; f++) {
cbf75e67acb6c32a2f4884f28a839d59f7988d37Stephen Hanson if (nvlist_lookup_boolean_value(fault, FM_SUSPECT_RETIRE,
3d7072f8bd27709dba14f6fe336f149d25d9e207eschrock * While we subscribe to fault.fs.zfs.*, we only take action
3d7072f8bd27709dba14f6fe336f149d25d9e207eschrock * for faults targeting a specific vdev (open failure or SERD
069f55e237020c4a4907b235fc38fafc6442ce94Eric Schrock * failure). We also subscribe to fault.io.* events, so that
069f55e237020c4a4907b235fc38fafc6442ce94Eric Schrock * faulty disks will be faulted in the ZFS configuration.
069f55e237020c4a4907b235fc38fafc6442ce94Eric Schrock if (fmd_nvl_class_match(hdl, fault, "fault.fs.zfs.vdev.io")) {
069f55e237020c4a4907b235fc38fafc6442ce94Eric Schrock "fault.fs.zfs.device")) {
069f55e237020c4a4907b235fc38fafc6442ce94Eric Schrock } else if (fmd_nvl_class_match(hdl, fault, "fault.io.*")) {
069f55e237020c4a4907b235fc38fafc6442ce94Eric Schrock * This is a disk fault. Lookup the FRU, convert it to
069f55e237020c4a4907b235fc38fafc6442ce94Eric Schrock * an FMRI string, and attempt to find a matching vdev.
069f55e237020c4a4907b235fc38fafc6442ce94Eric Schrock if (topo_fmri_nvl2str(thp, fru, &fmri, &err) != 0) {
069f55e237020c4a4907b235fc38fafc6442ce94Eric Schrock * This is a ZFS fault. Lookup the resource, and
069f55e237020c4a4907b235fc38fafc6442ce94Eric Schrock * attempt to find the matching vdev.
069f55e237020c4a4907b235fc38fafc6442ce94Eric Schrock if (nvlist_lookup_nvlist(fault, FM_FAULT_RESOURCE,
069f55e237020c4a4907b235fc38fafc6442ce94Eric Schrock if (nvlist_lookup_uint64(resource, FM_FMRI_ZFS_POOL,
069f55e237020c4a4907b235fc38fafc6442ce94Eric Schrock if (nvlist_lookup_uint64(resource, FM_FMRI_ZFS_VDEV,
069f55e237020c4a4907b235fc38fafc6442ce94Eric Schrock if ((zhp = find_by_guid(zhdl, pool_guid, vdev_guid,
069f55e237020c4a4907b235fc38fafc6442ce94Eric Schrock * For pool-level repair events, clear the entire pool.
3d7072f8bd27709dba14f6fe336f149d25d9e207eschrock * If this is a repair event, then mark the vdev as repaired and
3d7072f8bd27709dba14f6fe336f149d25d9e207eschrock * continue.
3d7072f8bd27709dba14f6fe336f149d25d9e207eschrock * Actively fault the device if needed.
069f55e237020c4a4907b235fc38fafc6442ce94Eric Schrock (void) zpool_vdev_degrade(zhp, vdev_guid, aux);
3d7072f8bd27709dba14f6fe336f149d25d9e207eschrock * Attempt to substitute a hot spare.
25c6ff4b77fcddf4097ce78a8277275ca603b46cstephh if (strcmp(class, FM_LIST_REPAIRED_CLASS) == 0 && repair_done &&
25c6ff4b77fcddf4097ce78a8277275ca603b46cstephh nvlist_lookup_string(nvl, FM_SUSPECT_UUID, &uuid) == 0)
99653d4ee642c6528e88224f12409a5f23060994eschrock if (fmd_hdl_register(hdl, FMD_API_VERSION, &fmd_info) != 0) {
069f55e237020c4a4907b235fc38fafc6442ce94Eric Schrock zdp = fmd_hdl_zalloc(hdl, sizeof (zfs_retire_data_t), FMD_SLEEP);
069f55e237020c4a4907b235fc38fafc6442ce94Eric Schrock zfs_retire_data_t *zdp = fmd_hdl_getspecific(hdl);