zfs_mod.c revision 3c112a2b34403220c06c3e2fcac403358cfba168
015f8fff605f2fbd5fd0072e555576297804d57bhiremath * CDDL HEADER START
015f8fff605f2fbd5fd0072e555576297804d57bhiremath * The contents of this file are subject to the terms of the
015f8fff605f2fbd5fd0072e555576297804d57bhiremath * Common Development and Distribution License (the "License").
015f8fff605f2fbd5fd0072e555576297804d57bhiremath * You may not use this file except in compliance with the License.
015f8fff605f2fbd5fd0072e555576297804d57bhiremath * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
015f8fff605f2fbd5fd0072e555576297804d57bhiremath * See the License for the specific language governing permissions
015f8fff605f2fbd5fd0072e555576297804d57bhiremath * and limitations under the License.
015f8fff605f2fbd5fd0072e555576297804d57bhiremath * When distributing Covered Code, include this CDDL HEADER in each
015f8fff605f2fbd5fd0072e555576297804d57bhiremath * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
015f8fff605f2fbd5fd0072e555576297804d57bhiremath * If applicable, add the following below this CDDL HEADER, with the
015f8fff605f2fbd5fd0072e555576297804d57bhiremath * fields enclosed by brackets "[]" replaced with your own identifying
015f8fff605f2fbd5fd0072e555576297804d57bhiremath * information: Portions Copyright [yyyy] [name of copyright owner]
015f8fff605f2fbd5fd0072e555576297804d57bhiremath * CDDL HEADER END
1cfa752f4e24c34133009b0f6c139127a5c461deRamaswamy Tummala * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
015f8fff605f2fbd5fd0072e555576297804d57bhiremath * ZFS syseventd module.
015f8fff605f2fbd5fd0072e555576297804d57bhiremath * The purpose of this module is to identify when devices are added to the
015f8fff605f2fbd5fd0072e555576297804d57bhiremath * system, and appropriately online or replace the affected vdevs.
e11c3f44f531fdff80941ce57c065d2ae861cefcmeem * When a device is added to the system:
015f8fff605f2fbd5fd0072e555576297804d57bhiremath * 1. Search for any vdevs whose devid matches that of the newly added
015f8fff605f2fbd5fd0072e555576297804d57bhiremath * 2. If no vdevs are found, then search for any vdevs whose devfs path
015f8fff605f2fbd5fd0072e555576297804d57bhiremath * matches that of the new device.
015f8fff605f2fbd5fd0072e555576297804d57bhiremath * 3. If no vdevs match by either method, then ignore the event.
1cfa752f4e24c34133009b0f6c139127a5c461deRamaswamy Tummala * 4. Attempt to online the device with a flag to indicate that it should
015f8fff605f2fbd5fd0072e555576297804d57bhiremath * be unspared when resilvering completes. If this succeeds, then the
015f8fff605f2fbd5fd0072e555576297804d57bhiremath * same device was inserted and we should continue normally.
261906274d77b4a1c6d61c75d170ab5a8e85a6a7Shantkumar Hiremath * 5. If the pool does not have the 'autoreplace' property set, attempt to
bd670b35a010421b6e1a5536c34453a827007c81Erik Nordmark * online the device again without the unspare flag, which will
015f8fff605f2fbd5fd0072e555576297804d57bhiremath * generate a FMA fault.
015f8fff605f2fbd5fd0072e555576297804d57bhiremath * 6. If the pool has the 'autoreplace' property set, and the matching vdev
015f8fff605f2fbd5fd0072e555576297804d57bhiremath * is a whole disk, then label the new disk and attempt a 'zpool
015f8fff605f2fbd5fd0072e555576297804d57bhiremath * replace'.
015f8fff605f2fbd5fd0072e555576297804d57bhiremath * The module responds to EC_DEV_ADD events for both disks and lofi devices,
015f8fff605f2fbd5fd0072e555576297804d57bhiremath * with the latter used for testing. The special ESC_ZFS_VDEV_CHECK event
d3a82192edbbe93c6027629b50fd93fed5d0e1abShantkumar Hiremath * indicates that a device failed to open during pool load, but the autoreplace
d3a82192edbbe93c6027629b50fd93fed5d0e1abShantkumar Hiremath * property was set. In this case, we deferred the associated FMA fault until
d3a82192edbbe93c6027629b50fd93fed5d0e1abShantkumar Hiremath * our module had a chance to process the autoreplace logic. If the device
d3a82192edbbe93c6027629b50fd93fed5d0e1abShantkumar Hiremath * could not be replaced, then the second online attempt will trigger the FMA
d3a82192edbbe93c6027629b50fd93fed5d0e1abShantkumar Hiremath * fault that we skipped earlier.
015f8fff605f2fbd5fd0072e555576297804d57bhiremathtypedef void (*zfs_process_func_t)(zpool_handle_t *, nvlist_t *, boolean_t);
015f8fff605f2fbd5fd0072e555576297804d57bhiremathtypedef struct unavailpool {
bd670b35a010421b6e1a5536c34453a827007c81Erik Nordmark unsigned int c;
015f8fff605f2fbd5fd0072e555576297804d57bhiremath verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
bbd6719318c24a8a2364080d8a139444e9944311hiremath verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_VDEV_STATS,
23a9f846c20554daf41a34c0f67d0184f9bb792fShantkumar Hiremathzfs_unavail_pool(zpool_handle_t *zhp, void *data)
015f8fff605f2fbd5fd0072e555576297804d57bhiremath if (zfs_toplevel_state(zhp) < VDEV_STATE_DEGRADED) {
261906274d77b4a1c6d61c75d170ab5a8e85a6a7Shantkumar Hiremath * The device associated with the given vdev (either by devid or physical path)
261906274d77b4a1c6d61c75d170ab5a8e85a6a7Shantkumar Hiremath * has been added to the system. If 'isdisk' is set, then we only attempt a
261906274d77b4a1c6d61c75d170ab5a8e85a6a7Shantkumar Hiremath * replacement if it's a whole disk. This also implies that we should label the
261906274d77b4a1c6d61c75d170ab5a8e85a6a7Shantkumar Hiremath * First, we attempt to online the device (making sure to undo any spare
261906274d77b4a1c6d61c75d170ab5a8e85a6a7Shantkumar Hiremath * operation when finished). If this succeeds, then we're done. If it fails,
261906274d77b4a1c6d61c75d170ab5a8e85a6a7Shantkumar Hiremath * and the new state is VDEV_CANT_OPEN, it indicates that the device was opened,
261906274d77b4a1c6d61c75d170ab5a8e85a6a7Shantkumar Hiremath * but that the label was not what we expected. If the 'autoreplace' property
261906274d77b4a1c6d61c75d170ab5a8e85a6a7Shantkumar Hiremath * is not set, then we relabel the disk (if specified), and attempt a 'zpool
261906274d77b4a1c6d61c75d170ab5a8e85a6a7Shantkumar Hiremath * replace'. If the online is successful, but the new state is something else
261906274d77b4a1c6d61c75d170ab5a8e85a6a7Shantkumar Hiremath * (REMOVED or FAULTED), it indicates that we're out of sync or in some sort of
261906274d77b4a1c6d61c75d170ab5a8e85a6a7Shantkumar Hiremath * race, and we should avoid attempting to relabel the disk.
261906274d77b4a1c6d61c75d170ab5a8e85a6a7Shantkumar Hiremathzfs_process_add(zpool_handle_t *zhp, nvlist_t *vdev, boolean_t isdisk)
261906274d77b4a1c6d61c75d170ab5a8e85a6a7Shantkumar Hiremath char rawpath[PATH_MAX], fullpath[PATH_MAX];
261906274d77b4a1c6d61c75d170ab5a8e85a6a7Shantkumar Hiremath if (nvlist_lookup_string(vdev, ZPOOL_CONFIG_PATH, &path) != 0)
261906274d77b4a1c6d61c75d170ab5a8e85a6a7Shantkumar Hiremath (void) nvlist_lookup_string(vdev, ZPOOL_CONFIG_PHYS_PATH, &physpath);
261906274d77b4a1c6d61c75d170ab5a8e85a6a7Shantkumar Hiremath (void) nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_WHOLE_DISK, &wholedisk);
261906274d77b4a1c6d61c75d170ab5a8e85a6a7Shantkumar Hiremath * We should have a way to online a device by guid. With the current
261906274d77b4a1c6d61c75d170ab5a8e85a6a7Shantkumar Hiremath * interface, we are forced to chop off the 's0' for whole disks.
261906274d77b4a1c6d61c75d170ab5a8e85a6a7Shantkumar Hiremath (void) strlcpy(fullpath, path, sizeof (fullpath));
261906274d77b4a1c6d61c75d170ab5a8e85a6a7Shantkumar Hiremath * Attempt to online the device. It would be nice to online this by
261906274d77b4a1c6d61c75d170ab5a8e85a6a7Shantkumar Hiremath * GUID, but the current interface only supports lookup by path.
261906274d77b4a1c6d61c75d170ab5a8e85a6a7Shantkumar Hiremath ZFS_ONLINE_CHECKREMOVE | ZFS_ONLINE_UNSPARE, &newstate) == 0 &&
015f8fff605f2fbd5fd0072e555576297804d57bhiremath (newstate == VDEV_STATE_HEALTHY || newstate == VDEV_STATE_DEGRADED))
015f8fff605f2fbd5fd0072e555576297804d57bhiremath * If the pool doesn't have the autoreplace property set, then attempt a
bd670b35a010421b6e1a5536c34453a827007c81Erik Nordmark * true online (without the unspare flag), which will trigger a FMA
23a9f846c20554daf41a34c0f67d0184f9bb792fShantkumar Hiremath if (!zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOREPLACE, NULL) ||
23a9f846c20554daf41a34c0f67d0184f9bb792fShantkumar Hiremath (void) zpool_vdev_online(zhp, fullpath, ZFS_ONLINE_FORCEFAULT,
bbd6719318c24a8a2364080d8a139444e9944311hiremath * If this is a request to label a whole disk, then attempt to
bbd6719318c24a8a2364080d8a139444e9944311hiremath * write out the label. Before we can label the disk, we need
bbd6719318c24a8a2364080d8a139444e9944311hiremath * access to a raw node. Ideally, we'd like to walk the devinfo
015f8fff605f2fbd5fd0072e555576297804d57bhiremath * tree and find a raw node from the corresponding parent node.
015f8fff605f2fbd5fd0072e555576297804d57bhiremath * This is overly complicated, and since we know how we labeled
015f8fff605f2fbd5fd0072e555576297804d57bhiremath * this device in the first place, we know it's save to switch
015f8fff605f2fbd5fd0072e555576297804d57bhiremath * from /dev/dsk to /dev/rdsk and append the backup slice.
015f8fff605f2fbd5fd0072e555576297804d57bhiremath * If any part of this process fails, then do a force online to
015f8fff605f2fbd5fd0072e555576297804d57bhiremath * trigger a ZFS fault for the device (and any hot spare
015f8fff605f2fbd5fd0072e555576297804d57bhiremath * replacement).
1cfa752f4e24c34133009b0f6c139127a5c461deRamaswamy Tummala (void) strlcpy(rawpath, path + 9, sizeof (rawpath));
1cfa752f4e24c34133009b0f6c139127a5c461deRamaswamy Tummala if (zpool_label_disk(g_zfshdl, zhp, rawpath) != 0) {
015f8fff605f2fbd5fd0072e555576297804d57bhiremath * Cosntruct the root vdev to pass to zpool_vdev_attach(). While adding
1cfa752f4e24c34133009b0f6c139127a5c461deRamaswamy Tummala * the entire vdev structure is harmless, we construct a reduced set of
1cfa752f4e24c34133009b0f6c139127a5c461deRamaswamy Tummala * path/physpath/wholedisk to keep it simple.
1cfa752f4e24c34133009b0f6c139127a5c461deRamaswamy Tummala if (nvlist_alloc(&nvroot, NV_UNIQUE_NAME, 0) != 0)
1cfa752f4e24c34133009b0f6c139127a5c461deRamaswamy Tummala if (nvlist_alloc(&newvd, NV_UNIQUE_NAME, 0) != 0) {
1cfa752f4e24c34133009b0f6c139127a5c461deRamaswamy Tummala if (nvlist_add_string(newvd, ZPOOL_CONFIG_TYPE, VDEV_TYPE_DISK) != 0 ||
1cfa752f4e24c34133009b0f6c139127a5c461deRamaswamy Tummala nvlist_add_string(newvd, ZPOOL_CONFIG_PATH, path) != 0 ||
1cfa752f4e24c34133009b0f6c139127a5c461deRamaswamy Tummala (physpath != NULL && nvlist_add_string(newvd,
1cfa752f4e24c34133009b0f6c139127a5c461deRamaswamy Tummala ZPOOL_CONFIG_PHYS_PATH, physpath) != 0) ||
1cfa752f4e24c34133009b0f6c139127a5c461deRamaswamy Tummala nvlist_add_uint64(newvd, ZPOOL_CONFIG_WHOLE_DISK, wholedisk) != 0 ||
1cfa752f4e24c34133009b0f6c139127a5c461deRamaswamy Tummala nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE, VDEV_TYPE_ROOT) != 0 ||
1cfa752f4e24c34133009b0f6c139127a5c461deRamaswamy Tummala nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, &newvd,
1cfa752f4e24c34133009b0f6c139127a5c461deRamaswamy Tummala (void) zpool_vdev_attach(zhp, fullpath, path, nvroot, B_TRUE);
015f8fff605f2fbd5fd0072e555576297804d57bhiremath * Utility functions to find a vdev matching given criteria.
e11c3f44f531fdff80941ce57c065d2ae861cefcmeemtypedef struct dev_data {
e11c3f44f531fdff80941ce57c065d2ae861cefcmeem const char *dd_compare;
015f8fff605f2fbd5fd0072e555576297804d57bhiremath const char *dd_prop;
af4c679f647cf088543c762e33d41a3ac52cfa14Sean McEnroezfs_iter_vdev(zpool_handle_t *zhp, nvlist_t *nvl, void *data)
e11c3f44f531fdff80941ce57c065d2ae861cefcmeem * First iterate over any children.
e11c3f44f531fdff80941ce57c065d2ae861cefcmeem if (nvlist_lookup_nvlist_array(nvl, ZPOOL_CONFIG_CHILDREN,
af4c679f647cf088543c762e33d41a3ac52cfa14Sean McEnroe for (c = 0; c < children; c++)
e11c3f44f531fdff80941ce57c065d2ae861cefcmeem if (nvlist_lookup_string(nvl, dp->dd_prop, &path) != 0 ||
e11c3f44f531fdff80941ce57c065d2ae861cefcmeem * Normally, we want to have an exact match for the comparison
e11c3f44f531fdff80941ce57c065d2ae861cefcmeem * string. However, we allow substring matches in the following
d3a82192edbbe93c6027629b50fd93fed5d0e1abShantkumar Hiremath * <path>: This is a devpath, and the target is one
d3a82192edbbe93c6027629b50fd93fed5d0e1abShantkumar Hiremath * of its children.
e11c3f44f531fdff80941ce57c065d2ae861cefcmeem * <path/> This is a devid for a whole disk, and
e11c3f44f531fdff80941ce57c065d2ae861cefcmeem * the target is one of its children.
1cfa752f4e24c34133009b0f6c139127a5c461deRamaswamy Tummalazfs_iter_pool(zpool_handle_t *zhp, void *data)
1cfa752f4e24c34133009b0f6c139127a5c461deRamaswamy Tummala if ((config = zpool_get_config(zhp, NULL)) != NULL) {
1cfa752f4e24c34133009b0f6c139127a5c461deRamaswamy Tummala (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
1cfa752f4e24c34133009b0f6c139127a5c461deRamaswamy Tummala &pool_guid) == 0 && pool_guid == dp->dd_pool_guid)) {
1cfa752f4e24c34133009b0f6c139127a5c461deRamaswamy Tummala for (pool = list_head(&g_pool_list); pool != NULL;
1cfa752f4e24c34133009b0f6c139127a5c461deRamaswamy Tummala if (zfs_toplevel_state(zhp) >= VDEV_STATE_DEGRADED) {
1cfa752f4e24c34133009b0f6c139127a5c461deRamaswamy Tummala (void) tpool_dispatch(g_tpool, zfs_enable_ds, pool);
e11c3f44f531fdff80941ce57c065d2ae861cefcmeem return (0);
e11c3f44f531fdff80941ce57c065d2ae861cefcmeem * Given a physical device path, iterate over all (pool, vdev) pairs which
e11c3f44f531fdff80941ce57c065d2ae861cefcmeem * correspond to the given path.
e11c3f44f531fdff80941ce57c065d2ae861cefcmeemdevpath_iter(const char *devpath, zfs_process_func_t func, boolean_t wholedisk)
d3a82192edbbe93c6027629b50fd93fed5d0e1abShantkumar Hiremath (void) zpool_iter(g_zfshdl, zfs_iter_pool, &data);
1cfa752f4e24c34133009b0f6c139127a5c461deRamaswamy Tummala * Given a /devices path, lookup the corresponding devid for each minor node,
1cfa752f4e24c34133009b0f6c139127a5c461deRamaswamy Tummala * and find any vdevs with matching devids. Doing this straight up would be
e11c3f44f531fdff80941ce57c065d2ae861cefcmeem * rather inefficient, O(minor nodes * vdevs in system), so we take advantage of
e11c3f44f531fdff80941ce57c065d2ae861cefcmeem * the fact that each devid ends with "/<minornode>". Once we find any valid
015f8fff605f2fbd5fd0072e555576297804d57bhiremath * minor node, we chop off the portion after the last slash, and then search for
e11c3f44f531fdff80941ce57c065d2ae861cefcmeem * matching vdevs, which is O(vdevs in system).
e11c3f44f531fdff80941ce57c065d2ae861cefcmeemdevid_iter(const char *devpath, zfs_process_func_t func, boolean_t wholedisk)
e11c3f44f531fdff80941ce57c065d2ae861cefcmeem * Try to open a known minor node.
015f8fff605f2fbd5fd0072e555576297804d57bhiremath (void) snprintf(fullpath, len, "/devices%s%s", devpath, PHYS_PATH);
d3a82192edbbe93c6027629b50fd93fed5d0e1abShantkumar Hiremath if ((fd = open(fullpath, O_RDONLY)) < 0)
d3a82192edbbe93c6027629b50fd93fed5d0e1abShantkumar Hiremath * Determine the devid as a string, with no trailing slash for the minor
015f8fff605f2fbd5fd0072e555576297804d57bhiremath if ((devidstr = devid_str_encode(devid, NULL)) == NULL) {
1cfa752f4e24c34133009b0f6c139127a5c461deRamaswamy Tummala (void) snprintf(fulldevid, len, "%s/", devidstr);
d3a82192edbbe93c6027629b50fd93fed5d0e1abShantkumar Hiremath (void) zpool_iter(g_zfshdl, zfs_iter_pool, &data);
d3a82192edbbe93c6027629b50fd93fed5d0e1abShantkumar Hiremath * This function is called when we receive a devfs add event. This can be
d3a82192edbbe93c6027629b50fd93fed5d0e1abShantkumar Hiremath * either a disk event or a lofi event, and the behavior is slightly different
d3a82192edbbe93c6027629b50fd93fed5d0e1abShantkumar Hiremath * depending on which it is.
d3a82192edbbe93c6027629b50fd93fed5d0e1abShantkumar Hiremathzfs_deliver_add(nvlist_t *nvl, boolean_t is_lofi)
d3a82192edbbe93c6027629b50fd93fed5d0e1abShantkumar Hiremath char path[PATH_MAX], realpath[PATH_MAX];
015f8fff605f2fbd5fd0072e555576297804d57bhiremath * The main unit of operation is the physical device path. For disks,
if (is_lofi) {
&devname) == 0 &&
sizeof (path))) > 0) {
&path) == 0);
&wholedisk) == 0);
if (wholedisk)
&newstate);
char *devname;
int ret;
if (is_dle)
else if (is_check)
return (ret);
struct slm_mod_ops *
slm_init()
return (NULL);
0, NULL);
return (&zfs_mod_ops);
slm_fini()
if (g_tpool) {