libzfs_import.c revision 4c58d71403cebfaa40a572ff12b17668ebd56987
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2006 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#pragma ident "%Z%%M% %I% %E% SMI"
/*
* Pool import support functions.
*
* To import a pool, we rely on reading the configuration information from the
* ZFS label of each device. If we successfully read the label, then we
* organize the configuration information in the following hierarchy:
*
* pool guid -> toplevel vdev guid -> label txg
*
* Duplicate entries matching this same tuple will be discarded. Once we have
* examined every device, we pick the best label txg config for each toplevel
* vdev. We then arrange these toplevel vdevs into a complete pool config, and
* update any paths that have changed. Finally, we attempt to import the pool
* using our derived config, and record the results.
*/
#include <devid.h>
#include <dirent.h>
#include <errno.h>
#include <libintl.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <fcntl.h>
#include <sys/vdev_impl.h>
#include "libzfs.h"
#include "libzfs_impl.h"
/*
* Intermediate structures used to gather configuration information.
*/
typedef struct config_entry {
struct config_entry *ce_next;
typedef struct vdev_entry {
struct vdev_entry *ve_next;
} vdev_entry_t;
typedef struct pool_entry {
struct pool_entry *pe_next;
} pool_entry_t;
typedef struct name_entry {
const char *ne_name;
struct name_entry *ne_next;
} name_entry_t;
typedef struct pool_list {
} pool_list_t;
static char *
{
int fd;
return (NULL);
}
return (ret);
}
/*
* configuration.
*/
static void
{
int matched;
for (c = 0; c < children; c++)
return;
}
/*
* This is a leaf (file or disk) vdev. In either case, go through
* the name list and see if we find a matching guid. If so, replace
* the path and see if we can calculate a new devid.
*
* There may be multiple names associated with a particular guid, in
* which case we have overlapping slices or multiple paths to the same
* disk. If this is the case, then we want to pick the path that is
* the most similar to the original, where "most similar" is the number
* of matching characters starting from the end of the path. This will
* preserve slice numbers even if the disks have been reorganized, and
* will also catch preferred disk names if multiple paths exist.
*/
matched = 0;
int count;
break;
}
break;
/*
* At this point, 'count' is the number of characters
* matched from the end.
*/
}
}
}
return;
} else {
}
}
/*
* Add the given configuration to the list of known devices.
*/
static void
{
/*
* If we have a valid config but cannot read any of these fields, then
* it means we have a half-initialized label. In vdev_label_init()
* we write a label with txg == 0 so that we can identify the device
* in case the user refers to the same disk later on. If we fail to
* create the pool, we'll be left with a label in this state
* which should not be considered part of a valid pool.
*/
&pool_guid) != 0 ||
&vdev_guid) != 0 ||
&top_guid) != 0 ||
return;
}
/*
* First, see if we know about this pool. If not, then add it to the
* list of known pools.
*/
break;
}
}
/*
* Second, see if we know about this toplevel vdev. Add it if its
* missing.
*/
break;
}
}
/*
* Third, see if we have a config with a matching transaction group. If
* so, then we do nothing. Otherwise, add it to the list of known
* configs.
*/
break;
}
} else {
}
/*
* At this point we've successfully added our config to the list of
* known configs. The last thing to do is add the vdev guid -> path
* mappings so that we can fix up the configuration as necessary before
* doing the import.
*/
}
/*
* Convert our list of pools into the definitive set of configurations. We
* start by picking the best config for each toplevel vdev. Once that's done,
* we assemble the toplevel vdevs into a full config for the pool. We make a
* pass to fix up any incorrect paths, and then add it to the main list to
* return to the user.
*/
static nvlist_t *
{
int config_seen;
char *name;
char *packed;
int err;
uint_t c;
config_seen = FALSE;
/*
* Iterate over all toplevel vdevs. Grab the pool configuration
* from the first one we find, and then go through the rest and
* add them as necessary to the 'vdevs' member of the config.
*/
/*
* Determine the best configuration for this vdev by
* selecting the config with the latest transaction
* group.
*/
best_txg = 0;
}
if (!config_seen) {
/*
* Copy the relevant pieces of data to the pool
* configuration:
*
* pool guid
* name
* pool state
*/
ZPOOL_CONFIG_POOL_GUID, &guid) == 0);
ZPOOL_CONFIG_POOL_GUID, guid) == 0);
ZPOOL_CONFIG_POOL_NAME, &name) == 0);
ZPOOL_CONFIG_POOL_NAME, name) == 0);
ZPOOL_CONFIG_POOL_STATE, &state) == 0);
ZPOOL_CONFIG_POOL_STATE, state) == 0);
config_seen = TRUE;
}
/*
* Add this top-level vdev to the child array.
*/
ZPOOL_CONFIG_VDEV_TREE, &nvtop) == 0);
&id) == 0);
sizeof (nvlist_t *));
for (c = 0; c < children; c++)
}
/*
* Go through and free all config information.
*/
}
/*
* Free this vdev entry, since it has now been merged
* into the main config.
*/
}
&guid) == 0);
/*
* Look for any missing top-level vdevs. If this is the case,
* create a faked up 'missing' vdev as a placeholder. We cannot
* simply compress the child array, because the kernel performs
* certain checks to make sure the vdev IDs match their location
* in the configuration.
*/
for (c = 0; c < children; c++)
0) == 0);
ZPOOL_CONFIG_TYPE, VDEV_TYPE_MISSING) == 0);
ZPOOL_CONFIG_ID, c) == 0);
ZPOOL_CONFIG_GUID, 0ULL) == 0);
}
/*
* Put all of this pool's top-level vdevs into a root vdev.
*/
VDEV_TYPE_ROOT) == 0);
for (c = 0; c < children; c++)
nvlist_free(child[c]);
/*
* known list of vdev GUID -> path mappings.
*/
/*
* Add the root vdev to this pool's configuration.
*/
nvroot) == 0);
/*
* Free this pool entry.
*/
/*
* Determine if this pool is currently active, in which case we
* can't actually import it.
*/
&name) == 0);
&guid) == 0);
continue;
}
/*
* Try to do the import in order to get vdev state.
*/
NV_ENCODE_NATIVE, 0)) != 0)
}
if (err)
/*
* Add this pool to the list of configs.
*/
}
return (ret);
}
/*
* Return the offset of the given label.
*/
static uint64_t
{
}
/*
* Given a file descriptor, read the label information and return an nvlist
* describing the configuration, if there is one.
*/
nvlist_t *
zpool_read_label(int fd)
{
int l;
return (NULL);
for (l = 0; l < VDEV_LABELS; l++) {
continue;
continue;
continue;
}
continue;
}
return (config);
}
return (NULL);
}
/*
* Given a list of directories to search, find all pools stored on disk. This
* includes partial pools which are not available to import. If no args are
*/
nvlist_t *
{
int i;
char path[MAXPATHLEN];
static char *default_dir = "/dev/dsk";
int fd;
pool_list_t pools = { 0 };
if (argc == 0) {
argc = 1;
argv = &default_dir;
}
/*
* Go through and read the label configuration information from every
* possible device, organizing the information according to pool GUID
* and toplevel GUID.
*/
for (i = 0; i < argc; i++) {
if (argv[i][0] != '/') {
"cannot open '%s': must be an absolute path"),
argv[i]);
return (NULL);
}
"cannot open '%s': %s"), argv[i],
return (NULL);
}
/*
* This is not MT-safe, but we have no MT consumers of libzfs
*/
continue;
/*
* Ignore directories (which includes "." and "..").
*/
continue;
continue;
}
}
return (ret);
}
int
{
return (TRUE);
for (c = 0; c < children; c++)
return (TRUE);
}
return (FALSE);
}
/*
* Determines if the pool is in use. If so, it returns TRUE and the state of
* the pool as well as the name of the pool. Both strings are allocated and
* must be freed by the caller.
*/
int
{
char *name;
int ret;
return (FALSE);
&name) == 0);
&stateval) == 0);
&guid) == 0);
&vdev_guid) == 0);
switch (stateval) {
case POOL_STATE_EXPORTED:
break;
case POOL_STATE_ACTIVE:
/*
* For an active pool, we have to determine if it's really part
* of an active pool (in which case the pool will exist and the
* guid will be the same), or whether it's part of an active
* pool that was disconnected without being explicitly exported.
*
* We use the direct ioctl() first to avoid triggering an error
* message if the pool cannot be opened.
*/
/*
* Because the device may have been removed while
* offlined, we only report it as active if the vdev is
* still present in the config. Otherwise, pretend like
* it's not in use.
*/
!= NULL) {
ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
} else {
}
} else {
}
break;
default:
}
if (ret) {
}
return (ret);
}