zpool_vdev.c revision 705040ed336e23b47ac6a3421d1f23ab5e86871b
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2008 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/*
* Functions to convert between a list of vdevs and an nvlist representing the
* configuration. Each entry in the list can be one of:
*
* Device vdevs
* disk=(path=..., devid=...)
* file=(path=...)
*
* Group vdevs
* raidz[1|2]=(...)
* mirror=(...)
*
* Hot spares
*
* While the underlying implementation supports it, group vdevs cannot contain
* other group vdevs. All userland verification of devices is contained within
* this file. If successful, the nvlist returned can be passed directly to the
* kernel; we've done as much verification as possible in userland.
*
* Hot spares are a special case, and passed down as an array of disk vdevs, at
* the same level as the root of the vdev tree.
*
* The only function exported by this file is 'make_root_vdev'. The
* function performs several passes:
*
* 1. Construct the vdev specification. Performs syntax validation and
* makes sure each device is valid.
* 2. Check for devices in use. Using libdiskmgt, makes sure that no
* devices are also in use. Some can be overridden using the 'force'
* flag, others cannot.
* 3. Check for replication errors if the 'force' flag is not specified.
* validates that the replication level is consistent across the
* entire pool.
* 4. Call libzfs to label any whole disks with an EFI label.
*/
#include <assert.h>
#include <devid.h>
#include <errno.h>
#include <fcntl.h>
#include <libdiskmgt.h>
#include <libintl.h>
#include <libnvpair.h>
#include <stdio.h>
#include <string.h>
#include <unistd.h>
#include <sys/efi_partition.h>
#include "zpool_util.h"
#define RDISK_ROOT "/dev/rdsk"
#define BACKUP_SLICE "s2"
/*
* For any given vdev specification, we can have multiple errors. The
* vdev_error() function keeps track of whether we have seen an error yet, and
* prints out a header if its the first error we've seen.
*/
/*PRINTFLIKE1*/
static void
vdev_error(const char *fmt, ...)
{
if (!error_seen) {
if (!is_force)
"the following errors:\n"));
else
"must be manually repaired:\n"));
error_seen = B_TRUE;
}
}
static void
libdiskmgt_error(int error)
{
/*
*/
return;
}
/*
* Validate a device, passing the bulk of the work off to libdiskmgt.
*/
static int
{
char *msg;
int error = 0;
if (force)
else if (isspare)
else
who = DM_WHO_ZPOOL;
if (error != 0) {
return (0);
} else {
return (-1);
}
}
/*
* If we're given a whole disk, ignore overlapping slices since we're
* about to label it anyway.
*/
error = 0;
if (error == 0) {
/* dm_isoverlapping returned -1 */
return (-1);
/* libdiskmgt's devcache only handles physical drives */
return (0);
}
}
return (0);
}
/*
* Validate a whole disk. Iterate over all slices on the disk and make sure
* that none is in use by calling check_slice().
*/
static int
{
int err = 0;
int i;
int ret;
/*
* Get the drive associated with this disk. This should never fail,
* because we already have an alias handle open for the device.
*/
if (err)
return (0);
}
if (err)
return (0);
}
/*
* It is possible that the user has specified a removable media drive,
* and the media is not present.
*/
return (-1);
}
if (err)
return (0);
}
ret = 0;
/*
* Iterate over all slices and report any errors. We don't care about
* overlapping slices because we are using the whole disk.
*/
ret = -1;
}
return (ret);
}
/*
* Validate a device.
*/
static int
{
int err;
char *dev;
/*
* For whole disks, libdiskmgt does not include the leading dev path.
*/
dev++;
return (err);
}
}
/*
* Check that a file is valid. All we can do in this case is check that it's
* not in use by another pool, and not in use by swap.
*/
static int
{
char *name;
int fd;
int ret = 0;
int err;
if (err)
else
"Please see swap(1M).\n"), file);
return (-1);
}
return (0);
const char *desc;
switch (state) {
case POOL_STATE_ACTIVE:
break;
case POOL_STATE_EXPORTED:
break;
break;
default:
break;
}
/*
* Allow hot spares to be shared between pools.
*/
return (0);
if (state == POOL_STATE_ACTIVE ||
switch (state) {
case POOL_STATE_SPARE:
break;
default:
break;
}
ret = -1;
}
}
return (ret);
}
/*
* By "whole disk" we mean an entire physical disk (something we can
* label, toggle the write cache on, etc.) as opposed to the full
* capacity of a pseudo-device such as lofi or did. We act as if we
* are labeling the disk, which should be a pretty good test of whether
* it's a viable device or not. Returns B_TRUE if it is and B_FALSE if
* it isn't.
*/
static boolean_t
is_whole_disk(const char *arg)
{
int fd;
char path[MAXPATHLEN];
return (B_FALSE);
return (B_FALSE);
}
return (B_TRUE);
}
/*
* Create a leaf vdev. Determine if this is a file or a device. If it's a
* device, fill in the device id to make a complete nvlist. Valid forms for a
* leaf vdev are:
*
* /xxx Full path to file
*/
static nvlist_t *
{
char path[MAXPATHLEN];
/*
* Determine what type of vdev this is, and put the full path into
* 'path'. We detect whether this is a device of file afterwards by
* checking the st_mode of the file.
*/
if (arg[0] == '/') {
/*
* Complete device or file path. Exact type is determined by
* examining the file descriptor afterwards.
*/
gettext("cannot open '%s': %s\n"),
return (NULL);
}
} else {
/*
* This may be a short path for a device, or it could be total
* gibberish. Check to see if it's a known device in
* an entire disk (minus the slice number).
*/
arg);
/*
* If we got ENOENT, then the user gave us
* gibberish, so try to direct them with a
* reasonable error message. Otherwise,
* regurgitate strerror() since it's the best we
* can do.
*/
gettext("cannot open '%s': no such "
gettext("must be a full path or "
"shorthand device name\n"));
return (NULL);
} else {
gettext("cannot open '%s': %s\n"),
return (NULL);
}
}
}
/*
* Determine whether this is a device or a file.
*/
} else {
"block device or regular file\n"), path);
return (NULL);
}
/*
* Finally, we have the complete device or file, and we know that it is
* acceptable to use. Construct the nvlist to describe this vdev. All
* vdevs have a 'path' element, and devices also have a 'devid' element.
*/
/*
* For a whole disk, defer getting its devid until after labeling it.
*/
/*
* Get the devid for the device.
*/
int fd;
return (NULL);
}
NULL) {
ZPOOL_CONFIG_DEVID, devid_str) == 0);
}
}
}
return (vdev);
}
/*
* Go through and verify the replication level of the pool is consistent.
* Performs the following checks:
*
* For the new spec, verifies that devices in mirrors and raidz are the
* same size.
*
* If the current configuration already has inconsistent replication
* levels, ignore any other potential problems in the new spec.
*
* Otherwise, make sure that the current spec (if there is one) and the new
* spec have consistent replication levels.
*/
typedef struct replication_level {
char *zprl_type;
/*
* Given a list of toplevel vdevs, return the current replication level. If
* the config is inconsistent, then NULL is returned. If 'fatal' is set, then
* an error message will be displayed for each self-inconsistent vdev.
*/
static replication_level_t *
{
char *type;
for (t = 0; t < toplevels; t++) {
/*
* For separate logs we ignore the top level vdev replication
* constraints.
*/
if (is_log)
continue;
&type) == 0);
/*
* This is a 'file' or 'disk' vdev.
*/
rep.zprl_parity = 0;
} else {
/*
* This is a mirror or RAID-Z vdev. Go through and make
* sure the contents are all the same (files vs. disks),
* keeping track of the number of elements in the
* process.
*
* We also check that the size of each vdev (if it can
* be determined) is the same.
*/
rep.zprl_children = 0;
&rep.zprl_parity) == 0);
} else {
rep.zprl_parity = 0;
}
/*
* The 'dontreport' variable indicates that we've
* already reported an error for this spec, so don't
* bother doing it again.
*/
dontreport = 0;
vdev_size = -1ULL;
for (c = 0; c < children; c++) {
char *path;
char *childtype;
rep.zprl_children++;
ZPOOL_CONFIG_TYPE, &childtype) == 0);
/*
* If this is a replacing or spare vdev, then
* get the real first child of the vdev.
*/
VDEV_TYPE_REPLACING) == 0 ||
&rchildren) == 0);
&childtype) == 0);
}
ZPOOL_CONFIG_PATH, &path) == 0);
/*
* with files, report it as an error.
*/
if (fatal)
"mismatched replication "
"level: %s contains both "
"files and devices\n"),
else
return (NULL);
dontreport = B_TRUE;
}
/*
* According to stat(2), the value of 'st_size'
* is undefined for block devices and character
* devices. But there is no effective way to
* determine the real size in userland.
*
* Instead, we'll take advantage of an
* implementation detail of spec_size(). If the
* device is currently open, then we (should)
* return a valid size.
*
* If we still don't get a valid size (indicated
* by a size of 0 or MAXOFFSET_T), then ignore
* this device altogether.
*/
} else {
}
if (err != 0 ||
continue;
/*
* Also make sure that devices and
* slices have a consistent size. If
* they differ by a significant amount
* (~16MB) then report an error.
*/
if (!dontreport &&
(vdev_size != -1ULL &&
ZPOOL_FUZZ))) {
if (fatal)
"%s contains devices of "
"different sizes\n"),
else
return (NULL);
dontreport = B_TRUE;
}
}
}
/*
* At this point, we have the replication of the last toplevel
* vdev in 'rep'. Compare it to 'lastrep' to see if its
* different.
*/
if (fatal)
"mismatched replication level: "
"both %s and %s vdevs are "
"present\n"),
else
return (NULL);
if (ret)
if (fatal)
"mismatched replication level: "
"both %llu and %llu device parity "
"%s vdevs are present\n"),
else
return (NULL);
if (ret)
if (fatal)
"mismatched replication level: "
"both %llu-way and %llu-way %s "
"vdevs are present\n"),
else
return (NULL);
}
}
}
return (ret);
}
/*
* Check the replication level of the vdev spec against the current pool. Calls
* get_replication() to make sure the new spec is self-consistent. If the pool
* has a consistent replication level, then we ignore any errors. Otherwise,
* report any difference between the two.
*/
static int
{
int ret;
/*
* If we have a current pool configuration, check to see if it's
* self-consistent. If not, simply return success.
*/
&nvroot) == 0);
return (0);
}
/*
* for spares there may be no children, and therefore no
* replication level to check
*/
return (0);
}
/*
* If all we have is logs then there's no replication level to check.
*/
return (0);
}
/*
* Get the replication level of the new vdev spec, reporting any
* inconsistencies found.
*/
return (-1);
}
/*
* Check to see if the new vdev spec matches the replication level of
* the current pool.
*/
ret = 0;
"mismatched replication level: pool uses %s "
"and new vdev is %s\n"),
ret = -1;
"mismatched replication level: pool uses %llu "
"device parity and new vdev uses %llu\n"),
ret = -1;
"mismatched replication level: pool uses %llu-way "
"%s and new vdev uses %llu-way %s\n"),
ret = -1;
}
}
return (ret);
}
/*
* Go through and find any whole disks in the vdev specification, labelling them
* as appropriate. When constructing the vdev spec, we were unable to open this
* device in order to provide a devid. Now that we have labelled the disk and
* know that slice 0 is valid, we can construct the devid now.
*
* If the disk was already labeled with an EFI label, we will have gotten the
* devid already (because we were able to open the whole disk). Otherwise, we
* need to get the devid after we label the disk.
*/
static int
{
char buf[MAXPATHLEN];
int fd;
int ret;
return (0);
/*
* We have a disk device. Get the path to the device
* and see if it's a whole disk by appending the backup
* slice and stat()ing the device.
*/
return (0);
diskname++;
return (-1);
/*
* Fill in the devid, now that we've labeled the disk.
*/
gettext("cannot open '%s': %s\n"),
return (-1);
}
NULL) {
ZPOOL_CONFIG_DEVID, devid_str) == 0);
}
}
/*
* Update the path to refer to the 's0' slice. The presence of
* the 'whole_disk' field indicates to the CLI that we should
* chop off the slice number when displaying the device in
* future output.
*/
return (0);
}
for (c = 0; c < children; c++)
return (ret);
for (c = 0; c < children; c++)
return (ret);
for (c = 0; c < children; c++)
return (ret);
return (0);
}
/*
* Determine if the given path is a hot spare within the given configuration.
*/
static boolean_t
{
int fd;
return (B_FALSE);
!inuse ||
state != POOL_STATE_SPARE ||
return (B_FALSE);
}
&nvroot) == 0);
for (i = 0; i < nspares; i++) {
ZPOOL_CONFIG_GUID, &spareguid) == 0);
return (B_TRUE);
}
}
return (B_FALSE);
}
/*
* Go through and find any devices that are in use. We rely on libdiskmgt for
* the majority of this task.
*/
static int
int isspare)
{
int ret;
char buf[MAXPATHLEN];
/*
* As a generic check, we look to see if this is a replace of a
* hot spare within the same pool. If so, we allow it
* regardless of what libdiskmgt or zpool_in_use() says.
*/
if (isreplacing) {
path);
else
return (0);
}
return (ret);
}
for (c = 0; c < children; c++)
isreplacing, B_FALSE)) != 0)
return (ret);
for (c = 0; c < children; c++)
isreplacing, B_TRUE)) != 0)
return (ret);
for (c = 0; c < children; c++)
isreplacing, B_FALSE)) != 0)
return (ret);
return (0);
}
static const char *
{
*mindev = 2;
return (VDEV_TYPE_RAIDZ);
}
*mindev = 3;
return (VDEV_TYPE_RAIDZ);
}
*mindev = 2;
return (VDEV_TYPE_MIRROR);
}
*mindev = 1;
return (VDEV_TYPE_SPARE);
}
*mindev = 1;
return (VDEV_TYPE_LOG);
}
*mindev = 1;
return (VDEV_TYPE_L2CACHE);
}
return (NULL);
}
/*
* Construct a syntactically valid vdev specification,
* and ensure that all devices and files exist and can be opened.
* Note: we don't bother freeing anything in the error paths
* because the program is just going to exit anyway.
*/
nvlist_t *
{
const char *type;
toplevels = 0;
nspares = 0;
nlogs = 0;
nl2cache = 0;
while (argc > 0) {
/*
* If it's a mirror or raidz, the subsequent arguments are
* its leaves -- until we encounter the next mirror or raidz.
*/
int c, children = 0;
gettext("invalid vdev "
"specification: 'spare' can be "
"specified only once\n"));
return (NULL);
}
}
if (seen_logs) {
gettext("invalid vdev "
"specification: 'log' can be "
"specified only once\n"));
return (NULL);
}
argc--;
argv++;
/*
* A log is not a real grouping device.
* We just set is_log and continue.
*/
continue;
}
gettext("invalid vdev "
"specification: 'cache' can be "
"specified only once\n"));
return (NULL);
}
}
if (is_log) {
gettext("invalid vdev "
"specification: unsupported 'log' "
"device: %s\n"), type);
return (NULL);
}
nlogs++;
}
for (c = 1; c < argc; c++) {
break;
children++;
== NULL)
return (NULL);
}
"specification: %s requires at least %d "
return (NULL);
}
argc -= c;
argv += c;
continue;
continue;
} else {
0) == 0);
type) == 0);
ZPOOL_CONFIG_IS_LOG, is_log) == 0);
mindev - 1) == 0);
}
children) == 0);
for (c = 0; c < children; c++)
nvlist_free(child[c]);
}
} else {
/*
* We have a device. Pass off to make_leaf_vdev() to
* construct the appropriate nvlist describing the vdev.
*/
return (NULL);
if (is_log)
nlogs++;
argc--;
argv++;
}
toplevels++;
}
"specification: at least one toplevel vdev must be "
"specified\n"));
return (NULL);
}
"log requires at least 1 device\n"));
return (NULL);
}
/*
* Finally, create nvroot and add all top-level vdevs to it.
*/
VDEV_TYPE_ROOT) == 0);
if (nspares != 0)
if (nl2cache != 0)
for (t = 0; t < toplevels; t++)
nvlist_free(top[t]);
for (t = 0; t < nspares; t++)
nvlist_free(spares[t]);
for (t = 0; t < nl2cache; t++)
nvlist_free(l2cache[t]);
if (spares)
if (l2cache)
return (nvroot);
}
/*
* Get and validate the contents of the given vdev specification. This ensures
* that the nvlist returned is well-formed, that all the devices exist, and that
* they are not currently in use by any other known consumer. The 'poolconfig'
* parameter is the current configuration of the pool when adding devices
* existing pool, and is used to perform additional checks, such as changing the
* replication level of the pool. It can be 'NULL' to indicate that this is a
* new pool. The 'force' flag controls whether devices should be forcefully
* added, even if they appear in use.
*/
nvlist_t *
{
/*
* Construct the vdev specification. If this is successful, we know
* that we have a valid specification, and that all devices can be
* opened.
*/
return (NULL);
return (NULL);
/*
* Validate each device to make sure that its not shared with another
* subsystem. We do this even if 'force' is set, because there are some
* uses (such as a dedicated dump device) that even '-f' cannot
* override.
*/
B_FALSE) != 0) {
return (NULL);
}
/*
* Check the replication level of the given vdevs and report any errors
* found. We include the existing pool spec, if any, as we need to
* catch changes against the existing replication level.
*/
return (NULL);
}
/*
* Run through the vdev specification and label any whole disks found.
*/
return (NULL);
}
return (newroot);
}