fa9e4066f08beec538e775443c5be79dd423fcabahrens * CDDL HEADER START
fa9e4066f08beec538e775443c5be79dd423fcabahrens * The contents of this file are subject to the terms of the
ea8dc4b6d2251b437950c0056bc626b311c73c27eschrock * Common Development and Distribution License (the "License").
ea8dc4b6d2251b437950c0056bc626b311c73c27eschrock * You may not use this file except in compliance with the License.
fa9e4066f08beec538e775443c5be79dd423fcabahrens * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
fa9e4066f08beec538e775443c5be79dd423fcabahrens * See the License for the specific language governing permissions
fa9e4066f08beec538e775443c5be79dd423fcabahrens * and limitations under the License.
fa9e4066f08beec538e775443c5be79dd423fcabahrens * When distributing Covered Code, include this CDDL HEADER in each
fa9e4066f08beec538e775443c5be79dd423fcabahrens * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
fa9e4066f08beec538e775443c5be79dd423fcabahrens * If applicable, add the following below this CDDL HEADER, with the
fa9e4066f08beec538e775443c5be79dd423fcabahrens * fields enclosed by brackets "[]" replaced with your own identifying
fa9e4066f08beec538e775443c5be79dd423fcabahrens * information: Portions Copyright [yyyy] [name of copyright owner]
fa9e4066f08beec538e775443c5be79dd423fcabahrens * CDDL HEADER END
f9af39bacaaa0f9dda3b75ff6858b9f3988a39afGeorge Wilson * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
f83b46baf98d276f5f84fa84c8b461f412ac1f5ePaul Dagnelie * Copyright (c) 2012, 2015 by Delphix. All rights reserved.
bd0f709169e67f4bd34526e186a7c34f595f0d9bAndrew Stormont * Copyright 2015 RackTop Systems.
6401734d545a04c18f68b448202f9d9a77216bb9Will Andrews * Copyright 2016 Nexenta Systems, Inc.
fa9e4066f08beec538e775443c5be79dd423fcabahrens * Pool import support functions.
fa9e4066f08beec538e775443c5be79dd423fcabahrens * To import a pool, we rely on reading the configuration information from the
fa9e4066f08beec538e775443c5be79dd423fcabahrens * ZFS label of each device. If we successfully read the label, then we
fa9e4066f08beec538e775443c5be79dd423fcabahrens * organize the configuration information in the following hierarchy:
fa9e4066f08beec538e775443c5be79dd423fcabahrens * pool guid -> toplevel vdev guid -> label txg
fa9e4066f08beec538e775443c5be79dd423fcabahrens * Duplicate entries matching this same tuple will be discarded. Once we have
fa9e4066f08beec538e775443c5be79dd423fcabahrens * examined every device, we pick the best label txg config for each toplevel
fa9e4066f08beec538e775443c5be79dd423fcabahrens * vdev. We then arrange these toplevel vdevs into a complete pool config, and
fa9e4066f08beec538e775443c5be79dd423fcabahrens * update any paths that have changed. Finally, we attempt to import the pool
fa9e4066f08beec538e775443c5be79dd423fcabahrens * using our derived config, and record the results.
fa9e4066f08beec538e775443c5be79dd423fcabahrens * Intermediate structures used to gather configuration information.
fa9e4066f08beec538e775443c5be79dd423fcabahrensstatic char *
fa9e4066f08beec538e775443c5be79dd423fcabahrens * Go through and fix up any path and/or devid information for the given vdev
fa9e4066f08beec538e775443c5be79dd423fcabahrens * configuration.
fa9e4066f08beec538e775443c5be79dd423fcabahrens if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
fa9e4066f08beec538e775443c5be79dd423fcabahrens for (c = 0; c < children; c++)
99653d4ee642c6528e88224f12409a5f23060994eschrock return (-1);
99653d4ee642c6528e88224f12409a5f23060994eschrock return (0);
fa9e4066f08beec538e775443c5be79dd423fcabahrens * This is a leaf (file or disk) vdev. In either case, go through
fa9e4066f08beec538e775443c5be79dd423fcabahrens * the name list and see if we find a matching guid. If so, replace
fa9e4066f08beec538e775443c5be79dd423fcabahrens * the path and see if we can calculate a new devid.
c67d9675bbc8392fe45f3a7dfbda1ad4daa1eb07eschrock * There may be multiple names associated with a particular guid, in
c67d9675bbc8392fe45f3a7dfbda1ad4daa1eb07eschrock * which case we have overlapping slices or multiple paths to the same
c67d9675bbc8392fe45f3a7dfbda1ad4daa1eb07eschrock * disk. If this is the case, then we want to pick the path that is
c67d9675bbc8392fe45f3a7dfbda1ad4daa1eb07eschrock * the most similar to the original, where "most similar" is the number
c67d9675bbc8392fe45f3a7dfbda1ad4daa1eb07eschrock * of matching characters starting from the end of the path. This will
c67d9675bbc8392fe45f3a7dfbda1ad4daa1eb07eschrock * preserve slice numbers even if the disks have been reorganized, and
c67d9675bbc8392fe45f3a7dfbda1ad4daa1eb07eschrock * will also catch preferred disk names if multiple paths exist.
fa9e4066f08beec538e775443c5be79dd423fcabahrens verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) == 0);
c67d9675bbc8392fe45f3a7dfbda1ad4daa1eb07eschrock if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) != 0)
c67d9675bbc8392fe45f3a7dfbda1ad4daa1eb07eschrock * At this point, 'count' is the number of characters
c67d9675bbc8392fe45f3a7dfbda1ad4daa1eb07eschrock * matched from the end.
99653d4ee642c6528e88224f12409a5f23060994eschrock return (0);
99653d4ee642c6528e88224f12409a5f23060994eschrock if (nvlist_add_string(nv, ZPOOL_CONFIG_PATH, best->ne_name) != 0)
99653d4ee642c6528e88224f12409a5f23060994eschrock return (-1);
078266a5aafa880521ea55488ef3d676f35e908eMarcel Telka if (nvlist_add_string(nv, ZPOOL_CONFIG_DEVID, devid) != 0) {
99653d4ee642c6528e88224f12409a5f23060994eschrock return (-1);
99653d4ee642c6528e88224f12409a5f23060994eschrock return (0);
fa9e4066f08beec538e775443c5be79dd423fcabahrens * Add the given configuration to the list of known devices.
99653d4ee642c6528e88224f12409a5f23060994eschrockadd_config(libzfs_handle_t *hdl, pool_list_t *pl, const char *path,
99653d4ee642c6528e88224f12409a5f23060994eschrock uint64_t pool_guid, vdev_guid, top_guid, txg, state;
fa94a07fd0519b8abfd871ad8fe60e6bebe1e2bbbrendan * If this is a hot spare not currently in use or level 2 cache
fa94a07fd0519b8abfd871ad8fe60e6bebe1e2bbbrendan * device, add it to the list of names to translate, but don't do
fa94a07fd0519b8abfd871ad8fe60e6bebe1e2bbbrendan * anything else.
99653d4ee642c6528e88224f12409a5f23060994eschrock if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
fa94a07fd0519b8abfd871ad8fe60e6bebe1e2bbbrendan (state == POOL_STATE_SPARE || state == POOL_STATE_L2CACHE) &&
99653d4ee642c6528e88224f12409a5f23060994eschrock nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID, &vdev_guid) == 0) {
99653d4ee642c6528e88224f12409a5f23060994eschrock if ((ne = zfs_alloc(hdl, sizeof (name_entry_t))) == NULL)
ccae0b50330edda9b094cee1ec6a0ad35443e8b0eschrock return (-1);
99653d4ee642c6528e88224f12409a5f23060994eschrock if ((ne->ne_name = zfs_strdup(hdl, path)) == NULL) {
99653d4ee642c6528e88224f12409a5f23060994eschrock return (-1);
99653d4ee642c6528e88224f12409a5f23060994eschrock return (0);
fa9e4066f08beec538e775443c5be79dd423fcabahrens * If we have a valid config but cannot read any of these fields, then
fa9e4066f08beec538e775443c5be79dd423fcabahrens * it means we have a half-initialized label. In vdev_label_init()
fa9e4066f08beec538e775443c5be79dd423fcabahrens * we write a label with txg == 0 so that we can identify the device
fa9e4066f08beec538e775443c5be79dd423fcabahrens * in case the user refers to the same disk later on. If we fail to
fa9e4066f08beec538e775443c5be79dd423fcabahrens * create the pool, we'll be left with a label in this state
fa9e4066f08beec538e775443c5be79dd423fcabahrens * which should not be considered part of a valid pool.
fa9e4066f08beec538e775443c5be79dd423fcabahrens if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
99653d4ee642c6528e88224f12409a5f23060994eschrock return (0);
fa9e4066f08beec538e775443c5be79dd423fcabahrens * First, see if we know about this pool. If not, then add it to the
fa9e4066f08beec538e775443c5be79dd423fcabahrens * list of known pools.
99653d4ee642c6528e88224f12409a5f23060994eschrock if ((pe = zfs_alloc(hdl, sizeof (pool_entry_t))) == NULL) {
99653d4ee642c6528e88224f12409a5f23060994eschrock return (-1);
fa9e4066f08beec538e775443c5be79dd423fcabahrens * Second, see if we know about this toplevel vdev. Add it if its
fa9e4066f08beec538e775443c5be79dd423fcabahrens for (ve = pe->pe_vdevs; ve != NULL; ve = ve->ve_next) {
99653d4ee642c6528e88224f12409a5f23060994eschrock if ((ve = zfs_alloc(hdl, sizeof (vdev_entry_t))) == NULL) {
99653d4ee642c6528e88224f12409a5f23060994eschrock return (-1);
fa9e4066f08beec538e775443c5be79dd423fcabahrens * Third, see if we have a config with a matching transaction group. If
fa9e4066f08beec538e775443c5be79dd423fcabahrens * so, then we do nothing. Otherwise, add it to the list of known
fa9e4066f08beec538e775443c5be79dd423fcabahrens for (ce = ve->ve_configs; ce != NULL; ce = ce->ce_next) {
99653d4ee642c6528e88224f12409a5f23060994eschrock if ((ce = zfs_alloc(hdl, sizeof (config_entry_t))) == NULL) {
99653d4ee642c6528e88224f12409a5f23060994eschrock return (-1);
fa9e4066f08beec538e775443c5be79dd423fcabahrens * At this point we've successfully added our config to the list of
fa9e4066f08beec538e775443c5be79dd423fcabahrens * known configs. The last thing to do is add the vdev guid -> path
fa9e4066f08beec538e775443c5be79dd423fcabahrens * mappings so that we can fix up the configuration as necessary before
fa9e4066f08beec538e775443c5be79dd423fcabahrens * doing the import.
99653d4ee642c6528e88224f12409a5f23060994eschrock if ((ne = zfs_alloc(hdl, sizeof (name_entry_t))) == NULL)
99653d4ee642c6528e88224f12409a5f23060994eschrock return (-1);
99653d4ee642c6528e88224f12409a5f23060994eschrock if ((ne->ne_name = zfs_strdup(hdl, path)) == NULL) {
99653d4ee642c6528e88224f12409a5f23060994eschrock return (-1);
99653d4ee642c6528e88224f12409a5f23060994eschrock return (0);
eaca9bbd5f5d1e4e554da4c7108e8a03c8c33481eschrock * Returns true if the named pool matches the given GUID.
94de1d4cf6ec0a3bf040dcc4b8df107c4ed36b51eschrockpool_active(libzfs_handle_t *hdl, const char *name, uint64_t guid,
94de1d4cf6ec0a3bf040dcc4b8df107c4ed36b51eschrock return (-1);
94de1d4cf6ec0a3bf040dcc4b8df107c4ed36b51eschrock return (0);
eaca9bbd5f5d1e4e554da4c7108e8a03c8c33481eschrock verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_POOL_GUID,
94de1d4cf6ec0a3bf040dcc4b8df107c4ed36b51eschrock return (0);
2f8aaab38e6371ad39ed90a1211ba8921acbb4d5eschrockrefresh_config(libzfs_handle_t *hdl, nvlist_t *config)
2f8aaab38e6371ad39ed90a1211ba8921acbb4d5eschrock while ((err = ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_TRYIMPORT,
88ecc943b4eb72f7c4fbbd8435997b85ef171fc3George Wilson * Determine if the vdev id is a hole in the namespace.
88ecc943b4eb72f7c4fbbd8435997b85ef171fc3George Wilsonvdev_is_hole(uint64_t *hole_array, uint_t holes, uint_t id)
88ecc943b4eb72f7c4fbbd8435997b85ef171fc3George Wilson for (int c = 0; c < holes; c++) {
88ecc943b4eb72f7c4fbbd8435997b85ef171fc3George Wilson /* Top-level is a hole */
fa9e4066f08beec538e775443c5be79dd423fcabahrens * Convert our list of pools into the definitive set of configurations. We
fa9e4066f08beec538e775443c5be79dd423fcabahrens * start by picking the best config for each toplevel vdev. Once that's done,
fa9e4066f08beec538e775443c5be79dd423fcabahrens * we assemble the toplevel vdevs into a full config for the pool. We make a
fa9e4066f08beec538e775443c5be79dd423fcabahrens * pass to fix up any incorrect paths, and then add it to the main list to
fa9e4066f08beec538e775443c5be79dd423fcabahrens * return to the user.
3a57275a335306e90136ebd00a4689fe0ee72519ckget_configs(libzfs_handle_t *hdl, pool_list_t *pl, boolean_t active_ok)
f83b46baf98d276f5f84fa84c8b461f412ac1f5ePaul Dagnelie nvlist_t *ret = NULL, *config = NULL, *tmp = NULL, *nvtop, *nvroot;
99653d4ee642c6528e88224f12409a5f23060994eschrock for (pe = pl->pools; pe != NULL; pe = pe->pe_next) {
fa9e4066f08beec538e775443c5be79dd423fcabahrens * Iterate over all toplevel vdevs. Grab the pool configuration
fa9e4066f08beec538e775443c5be79dd423fcabahrens * from the first one we find, and then go through the rest and
fa9e4066f08beec538e775443c5be79dd423fcabahrens * add them as necessary to the 'vdevs' member of the config.
99653d4ee642c6528e88224f12409a5f23060994eschrock for (ve = pe->pe_vdevs; ve != NULL; ve = ve->ve_next) {
fa9e4066f08beec538e775443c5be79dd423fcabahrens * Determine the best configuration for this vdev by
fa9e4066f08beec538e775443c5be79dd423fcabahrens * selecting the config with the latest transaction
88ecc943b4eb72f7c4fbbd8435997b85ef171fc3George Wilson * We rely on the fact that the max txg for the
88ecc943b4eb72f7c4fbbd8435997b85ef171fc3George Wilson * pool will contain the most up-to-date information
88ecc943b4eb72f7c4fbbd8435997b85ef171fc3George Wilson * about the valid top-levels in the vdev namespace.
fa9e4066f08beec538e775443c5be79dd423fcabahrens * Copy the relevant pieces of data to the pool
fa9e4066f08beec538e775443c5be79dd423fcabahrens * configuration:
8704186e373c9ed74daa395ff3f7fd745396df9eDan McDonald * comment (if available)
95173954d2b811ceb583a9012c3b16e1d0dd6438ek * hostid (if available)
95173954d2b811ceb583a9012c3b16e1d0dd6438ek * hostname (if available)
fa9e4066f08beec538e775443c5be79dd423fcabahrens * Add this top-level vdev to the child array.
fa9e4066f08beec538e775443c5be79dd423fcabahrens sizeof (nvlist_t *));
fa9e4066f08beec538e775443c5be79dd423fcabahrens for (c = 0; c < children; c++)
88ecc943b4eb72f7c4fbbd8435997b85ef171fc3George Wilson * If we have information about all the top-levels then
88ecc943b4eb72f7c4fbbd8435997b85ef171fc3George Wilson * clean up the nvlist which we've constructed. This
88ecc943b4eb72f7c4fbbd8435997b85ef171fc3George Wilson * means removing any extraneous devices that are
88ecc943b4eb72f7c4fbbd8435997b85ef171fc3George Wilson * beyond the valid range or adding devices to the end
88ecc943b4eb72f7c4fbbd8435997b85ef171fc3George Wilson * of our array which appear to be missing.
88ecc943b4eb72f7c4fbbd8435997b85ef171fc3George Wilson for (c = 0; c < children; c++)
fa9e4066f08beec538e775443c5be79dd423fcabahrens verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
88ecc943b4eb72f7c4fbbd8435997b85ef171fc3George Wilson * The vdev namespace may contain holes as a result of
88ecc943b4eb72f7c4fbbd8435997b85ef171fc3George Wilson * device removal. We must add them back into the vdev
88ecc943b4eb72f7c4fbbd8435997b85ef171fc3George Wilson * tree before we process any missing devices.
88ecc943b4eb72f7c4fbbd8435997b85ef171fc3George Wilson for (c = 0; c < children; c++) {
88ecc943b4eb72f7c4fbbd8435997b85ef171fc3George Wilson * Holes in the namespace are treated as
88ecc943b4eb72f7c4fbbd8435997b85ef171fc3George Wilson * "hole" top-level vdevs and have a
88ecc943b4eb72f7c4fbbd8435997b85ef171fc3George Wilson * special flag set on them.
fa9e4066f08beec538e775443c5be79dd423fcabahrens * Look for any missing top-level vdevs. If this is the case,
fa9e4066f08beec538e775443c5be79dd423fcabahrens * create a faked up 'missing' vdev as a placeholder. We cannot
fa9e4066f08beec538e775443c5be79dd423fcabahrens * simply compress the child array, because the kernel performs
fa9e4066f08beec538e775443c5be79dd423fcabahrens * certain checks to make sure the vdev IDs match their location
fa9e4066f08beec538e775443c5be79dd423fcabahrens * in the configuration.
88ecc943b4eb72f7c4fbbd8435997b85ef171fc3George Wilson for (c = 0; c < children; c++) {
fa9e4066f08beec538e775443c5be79dd423fcabahrens * Put all of this pool's top-level vdevs into a root vdev.
99653d4ee642c6528e88224f12409a5f23060994eschrock nvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL) != 0 ||
99653d4ee642c6528e88224f12409a5f23060994eschrock nvlist_add_uint64(nvroot, ZPOOL_CONFIG_GUID, guid) != 0 ||
99653d4ee642c6528e88224f12409a5f23060994eschrock nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
fa9e4066f08beec538e775443c5be79dd423fcabahrens for (c = 0; c < children; c++)
fa9e4066f08beec538e775443c5be79dd423fcabahrens * Go through and fix up any paths and/or devids based on our
fa9e4066f08beec538e775443c5be79dd423fcabahrens * known list of vdev GUID -> path mappings.
fa9e4066f08beec538e775443c5be79dd423fcabahrens * Add the root vdev to this pool's configuration.
99653d4ee642c6528e88224f12409a5f23060994eschrock if (nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
3a57275a335306e90136ebd00a4689fe0ee72519ck * zdb uses this path to report on active pools that were
3a57275a335306e90136ebd00a4689fe0ee72519ck * imported or created using -R.
fa9e4066f08beec538e775443c5be79dd423fcabahrens * Determine if this pool is currently active, in which case we
fa9e4066f08beec538e775443c5be79dd423fcabahrens * can't actually import it.
fa9e4066f08beec538e775443c5be79dd423fcabahrens verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
fa9e4066f08beec538e775443c5be79dd423fcabahrens verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
88ecc943b4eb72f7c4fbbd8435997b85ef171fc3George Wilson if ((nvl = refresh_config(hdl, config)) == NULL) {
99653d4ee642c6528e88224f12409a5f23060994eschrock * Go through and update the paths for spares, now that we have
99653d4ee642c6528e88224f12409a5f23060994eschrock verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
99653d4ee642c6528e88224f12409a5f23060994eschrock if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
99653d4ee642c6528e88224f12409a5f23060994eschrock for (i = 0; i < nspares; i++) {
fa94a07fd0519b8abfd871ad8fe60e6bebe1e2bbbrendan * Update the paths for l2cache devices.
fa94a07fd0519b8abfd871ad8fe60e6bebe1e2bbbrendan if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
fa94a07fd0519b8abfd871ad8fe60e6bebe1e2bbbrendan for (i = 0; i < nl2cache; i++) {
95173954d2b811ceb583a9012c3b16e1d0dd6438ek * Restore the original information read from the actual label.
95173954d2b811ceb583a9012c3b16e1d0dd6438ek if (hostid != 0) {
fa9e4066f08beec538e775443c5be79dd423fcabahrens * Add this pool to the list of configs.
e9dbad6f263d5570ed7ff5443ec5b958af8c24d7eschrock verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
99653d4ee642c6528e88224f12409a5f23060994eschrock for (c = 0; c < children; c++)
fa9e4066f08beec538e775443c5be79dd423fcabahrens * Return the offset of the given label.
e7437265dc2a4920c197ed4337665539d358b22cahrens ASSERT(P2PHASE_TYPED(size, sizeof (vdev_label_t), uint64_t) == 0);
fa9e4066f08beec538e775443c5be79dd423fcabahrens return (l * sizeof (vdev_label_t) + (l < VDEV_LABELS / 2 ?
fa9e4066f08beec538e775443c5be79dd423fcabahrens * Given a file descriptor, read the label information and return an nvlist
fa9e4066f08beec538e775443c5be79dd423fcabahrens * describing the configuration, if there is one.
99653d4ee642c6528e88224f12409a5f23060994eschrock return (0);
e7437265dc2a4920c197ed4337665539d358b22cahrens size = P2ALIGN_TYPED(statbuf.st_size, sizeof (vdev_label_t), uint64_t);
99653d4ee642c6528e88224f12409a5f23060994eschrock if ((label = malloc(sizeof (vdev_label_t))) == NULL)
99653d4ee642c6528e88224f12409a5f23060994eschrock return (-1);
fa9e4066f08beec538e775443c5be79dd423fcabahrens for (l = 0; l < VDEV_LABELS; l++) {
99653d4ee642c6528e88224f12409a5f23060994eschrock sizeof (label->vl_vdev_phys.vp_nvlist), config, 0) != 0)
99653d4ee642c6528e88224f12409a5f23060994eschrock if (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_STATE,
fa94a07fd0519b8abfd871ad8fe60e6bebe1e2bbbrendan if (state != POOL_STATE_SPARE && state != POOL_STATE_L2CACHE &&
99653d4ee642c6528e88224f12409a5f23060994eschrock (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_TXG,
99653d4ee642c6528e88224f12409a5f23060994eschrock return (0);
99653d4ee642c6528e88224f12409a5f23060994eschrock return (0);
4f67d755171a044e0b4b52782b9e87c67ac48b03Eric Taylorslice_cache_compare(const void *arg1, const void *arg2)
4f67d755171a044e0b4b52782b9e87c67ac48b03Eric Taylor const char *nm1 = ((rdsk_node_t *)arg1)->rn_name;
4f67d755171a044e0b4b52782b9e87c67ac48b03Eric Taylor const char *nm2 = ((rdsk_node_t *)arg2)->rn_name;
4f67d755171a044e0b4b52782b9e87c67ac48b03Eric Taylor * slices zero and two are the most likely to provide results,
4f67d755171a044e0b4b52782b9e87c67ac48b03Eric Taylor * so put those first
4f67d755171a044e0b4b52782b9e87c67ac48b03Eric Taylor return (-1);
4f67d755171a044e0b4b52782b9e87c67ac48b03Eric Taylor return (-1);
4f67d755171a044e0b4b52782b9e87c67ac48b03Eric Taylorcheck_one_slice(avl_tree_t *r, char *diskname, uint_t partno,
4f67d755171a044e0b4b52782b9e87c67ac48b03Eric Taylor (void) snprintf(tmpnode.rn_name, MAXNAMELEN, "%s%u",
16d2251e8bfa8576e38e3c44e646c89b396b3ee2Eric Taylor * protect against division by zero for disk labels that
16d2251e8bfa8576e38e3c44e646c89b396b3ee2Eric Taylor * contain a bogus sector size
4f67d755171a044e0b4b52782b9e87c67ac48b03Eric Taylor /* too small to contain a zpool? */
4f67d755171a044e0b4b52782b9e87c67ac48b03Eric Taylornozpool_all_slices(avl_tree_t *r, const char *sname)
4f67d755171a044e0b4b52782b9e87c67ac48b03Eric Taylor for (i = 0; i < NDKMAP; i++)
4f67d755171a044e0b4b52782b9e87c67ac48b03Eric Taylor for (i = 0; i <= FD_NUMPART; i++)
4f67d755171a044e0b4b52782b9e87c67ac48b03Eric Taylorcheck_slices(avl_tree_t *r, int fd, const char *sname)
4f67d755171a044e0b4b52782b9e87c67ac48b03Eric Taylor if ((ptr = strrchr(diskname, 's')) == NULL || !isdigit(ptr[1]))
4f67d755171a044e0b4b52782b9e87c67ac48b03Eric Taylor for (i = 0; i < NDKMAP; i++)
4f67d755171a044e0b4b52782b9e87c67ac48b03Eric Taylor * on x86 we'll still have leftover links that point
4f67d755171a044e0b4b52782b9e87c67ac48b03Eric Taylor * to slices s[9-15], so use NDKMAP instead
4f67d755171a044e0b4b52782b9e87c67ac48b03Eric Taylor for (i = 0; i < NDKMAP; i++)
4f67d755171a044e0b4b52782b9e87c67ac48b03Eric Taylor /* nodes p[1-4] are never used with EFI labels */
4f67d755171a044e0b4b52782b9e87c67ac48b03Eric Taylor if ((fd = openat64(rn->rn_dfd, rn->rn_name, O_RDONLY)) < 0) {
4f67d755171a044e0b4b52782b9e87c67ac48b03Eric Taylor /* symlink to a device that's no longer there */
4f67d755171a044e0b4b52782b9e87c67ac48b03Eric Taylor * Ignore failed stats. We only want regular
4f67d755171a044e0b4b52782b9e87c67ac48b03Eric Taylor * files, character devs and block devs.
4f67d755171a044e0b4b52782b9e87c67ac48b03Eric Taylor /* this file is too small to hold a zpool */
4f67d755171a044e0b4b52782b9e87c67ac48b03Eric Taylor * Try to read the disk label first so we don't have to
4f67d755171a044e0b4b52782b9e87c67ac48b03Eric Taylor * open a bunch of minor nodes that can't have a zpool.
6401734d545a04c18f68b448202f9d9a77216bb9Will Andrews * Given a file descriptor, clear (zero) the label information.
096d22d43da2758693a6df66ce7d9ab54c9d464cEric Schrock size = P2ALIGN_TYPED(statbuf.st_size, sizeof (vdev_label_t), uint64_t);
096d22d43da2758693a6df66ce7d9ab54c9d464cEric Schrock if ((label = calloc(sizeof (vdev_label_t), 1)) == NULL)
096d22d43da2758693a6df66ce7d9ab54c9d464cEric Schrock return (-1);
096d22d43da2758693a6df66ce7d9ab54c9d464cEric Schrock for (l = 0; l < VDEV_LABELS; l++) {
078266a5aafa880521ea55488ef3d676f35e908eMarcel Telka label_offset(size, l)) != sizeof (vdev_label_t)) {
096d22d43da2758693a6df66ce7d9ab54c9d464cEric Schrock return (-1);
fa9e4066f08beec538e775443c5be79dd423fcabahrens * Given a list of directories to search, find all pools stored on disk. This
fa9e4066f08beec538e775443c5be79dd423fcabahrens * includes partial pools which are not available to import. If no args are
fa9e4066f08beec538e775443c5be79dd423fcabahrens * given (argc is 0), then the default directory (/dev/dsk) is searched.
24e697d414a4df0377b91a2875f029e7b5f97247ck * poolname or guid (but not both) are provided by the caller when trying
24e697d414a4df0377b91a2875f029e7b5f97247ck * to import a specific pool.
d41c437653d9e16d837cc66844073e1885276ceeMark J Musantezpool_find_import_impl(libzfs_handle_t *hdl, importargs_t *iarg)
fa9e4066f08beec538e775443c5be79dd423fcabahrens * Go through and read the label configuration information from every
fa9e4066f08beec538e775443c5be79dd423fcabahrens * possible device, organizing the information according to pool GUID
fa9e4066f08beec538e775443c5be79dd423fcabahrens * and toplevel GUID.
d41c437653d9e16d837cc66844073e1885276ceeMark J Musante for (i = 0; i < dirs; i++) {
842c5645dc246ff7fd786d8a6f56644d79705e18jwadams /* use realpath to normalize the path */
d41c437653d9e16d837cc66844073e1885276ceeMark J Musante dgettext(TEXT_DOMAIN, "cannot open '%s'"), dir[i]);
842c5645dc246ff7fd786d8a6f56644d79705e18jwadams * Using raw devices instead of block devices when we're
842c5645dc246ff7fd786d8a6f56644d79705e18jwadams * reading the labels skips a bunch of slow operations during
842c5645dc246ff7fd786d8a6f56644d79705e18jwadams * close(2) processing, so we replace /dev/dsk with /dev/rdsk.
6401734d545a04c18f68b448202f9d9a77216bb9Will Andrews (void) strlcpy(rdsk, ZFS_RDISK_ROOTD, sizeof (rdsk));
4f67d755171a044e0b4b52782b9e87c67ac48b03Eric Taylor sizeof (rdsk_node_t), offsetof(rdsk_node_t, rn_node));
fa9e4066f08beec538e775443c5be79dd423fcabahrens * This is not MT-safe, but we have no MT consumers of libzfs
4f67d755171a044e0b4b52782b9e87c67ac48b03Eric Taylor * create a thread pool to do all of this in parallel;
4f67d755171a044e0b4b52782b9e87c67ac48b03Eric Taylor * rn_nozpool is not protected, so this is racy in that
4f67d755171a044e0b4b52782b9e87c67ac48b03Eric Taylor * multiple tasks could decide that the same slice can
4f67d755171a044e0b4b52782b9e87c67ac48b03Eric Taylor * not hold a zpool, which is benign. Also choose
4f67d755171a044e0b4b52782b9e87c67ac48b03Eric Taylor * double the number of processors; we hold a lot of
4f67d755171a044e0b4b52782b9e87c67ac48b03Eric Taylor * locks in the kernel, so going beyond this doesn't
4f67d755171a044e0b4b52782b9e87c67ac48b03Eric Taylor * buy us much.
4f67d755171a044e0b4b52782b9e87c67ac48b03Eric Taylor t = tpool_create(1, 2 * sysconf(_SC_NPROCESSORS_ONLN),
4f67d755171a044e0b4b52782b9e87c67ac48b03Eric Taylor (void) tpool_dispatch(t, zpool_open_func, slice);
078266a5aafa880521ea55488ef3d676f35e908eMarcel Telka if (slice->rn_config != NULL && !config_failed) {
078266a5aafa880521ea55488ef3d676f35e908eMarcel Telka * use the non-raw path for the config
d41c437653d9e16d837cc66844073e1885276ceeMark J Musante ret = get_configs(hdl, &pools, iarg->can_be_active);
99653d4ee642c6528e88224f12409a5f23060994eschrock for (ce = ve->ve_configs; ce != NULL; ce = cenext) {
24e697d414a4df0377b91a2875f029e7b5f97247ckzpool_find_import(libzfs_handle_t *hdl, int argc, char **argv)
2f8aaab38e6371ad39ed90a1211ba8921acbb4d5eschrock * Given a cache file, return the contents as a list of importable pools.
24e697d414a4df0377b91a2875f029e7b5f97247ck * poolname or guid (but not both) are provided by the caller when trying
24e697d414a4df0377b91a2875f029e7b5f97247ck * to import a specific pool.
3a57275a335306e90136ebd00a4689fe0ee72519ckzpool_find_import_cached(libzfs_handle_t *hdl, const char *cachefile,
2f8aaab38e6371ad39ed90a1211ba8921acbb4d5eschrock dgettext(TEXT_DOMAIN, "failed to open cache file"));
2f8aaab38e6371ad39ed90a1211ba8921acbb4d5eschrock dgettext(TEXT_DOMAIN, "failed to get size of cache file"));
2f8aaab38e6371ad39ed90a1211ba8921acbb4d5eschrock if ((buf = zfs_alloc(hdl, statbuf.st_size)) == NULL) {
2f8aaab38e6371ad39ed90a1211ba8921acbb4d5eschrock if (read(fd, buf, statbuf.st_size) != statbuf.st_size) {
2f8aaab38e6371ad39ed90a1211ba8921acbb4d5eschrock "failed to read cache file contents"));
2f8aaab38e6371ad39ed90a1211ba8921acbb4d5eschrock if (nvlist_unpack(buf, statbuf.st_size, &raw, 0) != 0) {
2f8aaab38e6371ad39ed90a1211ba8921acbb4d5eschrock "invalid or corrupt cache file contents"));
2f8aaab38e6371ad39ed90a1211ba8921acbb4d5eschrock * Go through and get the current state of the pools and refresh their
2f8aaab38e6371ad39ed90a1211ba8921acbb4d5eschrock while ((elem = nvlist_next_nvpair(raw, elem)) != NULL) {
b18d6b0e20b5bf6e2007c550bb33dcbab6b5dddcMatthew Ahrens name = fnvlist_lookup_string(src, ZPOOL_CONFIG_POOL_NAME);
b18d6b0e20b5bf6e2007c550bb33dcbab6b5dddcMatthew Ahrens this_guid = fnvlist_lookup_uint64(src, ZPOOL_CONFIG_POOL_GUID);
e829d913cde9eeff0efd355502799863d4f74f69ck if (nvlist_add_nvlist(pools, nvpair_name(elem), dst) != 0) {
d41c437653d9e16d837cc66844073e1885276ceeMark J Musantename_or_guid_exists(zpool_handle_t *zhp, void *data)
d41c437653d9e16d837cc66844073e1885276ceeMark J Musante verify(nvlist_lookup_string(zhp->zpool_config,
d41c437653d9e16d837cc66844073e1885276ceeMark J Musante if (strcmp(pool_name, import->poolname) == 0)
d41c437653d9e16d837cc66844073e1885276ceeMark J Musante verify(nvlist_lookup_uint64(zhp->zpool_config,
d41c437653d9e16d837cc66844073e1885276ceeMark J Musantezpool_search_import(libzfs_handle_t *hdl, importargs_t *import)
d41c437653d9e16d837cc66844073e1885276ceeMark J Musante verify(import->poolname == NULL || import->guid == 0);
d41c437653d9e16d837cc66844073e1885276ceeMark J Musante import->exists = zpool_iter(hdl, name_or_guid_exists, import);
d41c437653d9e16d837cc66844073e1885276ceeMark J Musante return (zpool_find_import_cached(hdl, import->cachefile,
d41c437653d9e16d837cc66844073e1885276ceeMark J Musante return (zpool_find_import_impl(hdl, import));
fa9e4066f08beec538e775443c5be79dd423fcabahrens verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &tmp) == 0);
fa9e4066f08beec538e775443c5be79dd423fcabahrens if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
fa9e4066f08beec538e775443c5be79dd423fcabahrens for (c = 0; c < children; c++)
99653d4ee642c6528e88224f12409a5f23060994eschrock verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
fa94a07fd0519b8abfd871ad8fe60e6bebe1e2bbbrendan if (nvlist_lookup_nvlist_array(nvroot, cbp->cb_type,
fa94a07fd0519b8abfd871ad8fe60e6bebe1e2bbbrendan for (i = 0; i < count; i++) {
99653d4ee642c6528e88224f12409a5f23060994eschrock return (1);
99653d4ee642c6528e88224f12409a5f23060994eschrock return (0);
99653d4ee642c6528e88224f12409a5f23060994eschrock * Determines if the pool is in use. If so, it returns true and the state of
fa9e4066f08beec538e775443c5be79dd423fcabahrens * the pool as well as the name of the pool. Both strings are allocated and
fa9e4066f08beec538e775443c5be79dd423fcabahrens * must be freed by the caller.
99653d4ee642c6528e88224f12409a5f23060994eschrockzpool_in_use(libzfs_handle_t *hdl, int fd, pool_state_t *state, char **namestr,
99653d4ee642c6528e88224f12409a5f23060994eschrock return (-1);
99653d4ee642c6528e88224f12409a5f23060994eschrock return (0);
fa9e4066f08beec538e775443c5be79dd423fcabahrens verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
fa9e4066f08beec538e775443c5be79dd423fcabahrens verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID,
fa94a07fd0519b8abfd871ad8fe60e6bebe1e2bbbrendan if (stateval != POOL_STATE_SPARE && stateval != POOL_STATE_L2CACHE) {
99653d4ee642c6528e88224f12409a5f23060994eschrock verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
99653d4ee642c6528e88224f12409a5f23060994eschrock verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
f9af39bacaaa0f9dda3b75ff6858b9f3988a39afGeorge Wilson * A pool with an exported state may in fact be imported
f9af39bacaaa0f9dda3b75ff6858b9f3988a39afGeorge Wilson * read-only, so check the in-core state to see if it's
f9af39bacaaa0f9dda3b75ff6858b9f3988a39afGeorge Wilson * active and imported read-only. If it is, set
f9af39bacaaa0f9dda3b75ff6858b9f3988a39afGeorge Wilson * its state to active.
f9af39bacaaa0f9dda3b75ff6858b9f3988a39afGeorge Wilson if (pool_active(hdl, name, guid, &isactive) == 0 && isactive &&
fb13f48f1d9593453b94cd1c7277553b56f493c8Josef 'Jeff' Sipek (zhp = zpool_open_canfail(hdl, name)) != NULL) {
fb13f48f1d9593453b94cd1c7277553b56f493c8Josef 'Jeff' Sipek if (zpool_get_prop_int(zhp, ZPOOL_PROP_READONLY, NULL))
fb13f48f1d9593453b94cd1c7277553b56f493c8Josef 'Jeff' Sipek * All we needed the zpool handle for is the
fb13f48f1d9593453b94cd1c7277553b56f493c8Josef 'Jeff' Sipek * readonly prop check.
fa9e4066f08beec538e775443c5be79dd423fcabahrens * For an active pool, we have to determine if it's really part
eaca9bbd5f5d1e4e554da4c7108e8a03c8c33481eschrock * of a currently active pool (in which case the pool will exist
eaca9bbd5f5d1e4e554da4c7108e8a03c8c33481eschrock * and the guid will be the same), or whether it's part of an
eaca9bbd5f5d1e4e554da4c7108e8a03c8c33481eschrock * active pool that was disconnected without being explicitly
eaca9bbd5f5d1e4e554da4c7108e8a03c8c33481eschrock * exported.
94de1d4cf6ec0a3bf040dcc4b8df107c4ed36b51eschrock if (pool_active(hdl, name, guid, &isactive) != 0) {
94de1d4cf6ec0a3bf040dcc4b8df107c4ed36b51eschrock return (-1);
fa9e4066f08beec538e775443c5be79dd423fcabahrens * Because the device may have been removed while
fa9e4066f08beec538e775443c5be79dd423fcabahrens * offlined, we only report it as active if the vdev is
fa9e4066f08beec538e775443c5be79dd423fcabahrens * still present in the config. Otherwise, pretend like
fa9e4066f08beec538e775443c5be79dd423fcabahrens * it's not in use.
99653d4ee642c6528e88224f12409a5f23060994eschrock if ((zhp = zpool_open_canfail(hdl, name)) != NULL &&
39c23413b8df94a95f67b34cfd4a4dfc3fd0b48deschrock * If this is an active spare within another pool, we
39c23413b8df94a95f67b34cfd4a4dfc3fd0b48deschrock * treat it like an unused hot spare. This allows the
39c23413b8df94a95f67b34cfd4a4dfc3fd0b48deschrock * user to create a pool with a hot spare that currently
39c23413b8df94a95f67b34cfd4a4dfc3fd0b48deschrock * in use within another pool. Since we return B_TRUE,
39c23413b8df94a95f67b34cfd4a4dfc3fd0b48deschrock * libdiskmgt will continue to prevent generic consumers
39c23413b8df94a95f67b34cfd4a4dfc3fd0b48deschrock * from using the device.
99653d4ee642c6528e88224f12409a5f23060994eschrock * For a hot spare, it can be either definitively in use, or
99653d4ee642c6528e88224f12409a5f23060994eschrock * potentially active. To determine if it's in use, we iterate
99653d4ee642c6528e88224f12409a5f23060994eschrock * over all pools in the system and search for one with a spare
99653d4ee642c6528e88224f12409a5f23060994eschrock * with a matching guid.
99653d4ee642c6528e88224f12409a5f23060994eschrock * Due to the shared nature of spares, we don't actually report
99653d4ee642c6528e88224f12409a5f23060994eschrock * the potentially active case as in use. This means the user
99653d4ee642c6528e88224f12409a5f23060994eschrock * can freely create pools on the hot spares of exported pools,
99653d4ee642c6528e88224f12409a5f23060994eschrock * but to do otherwise makes the resulting code complicated, and
99653d4ee642c6528e88224f12409a5f23060994eschrock * we end up having to deal with this case anyway.
fa94a07fd0519b8abfd871ad8fe60e6bebe1e2bbbrendan * Check if any pool is currently using this l2cache device.
99653d4ee642c6528e88224f12409a5f23060994eschrock return (-1);
99653d4ee642c6528e88224f12409a5f23060994eschrock return (0);