libzfs_pool.c revision b81d61a68b235e0529ebadc18e14d9d1dd52a258
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License, Version 1.0 only
* (the "License"). You may not use this file except in compliance
* with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2005 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#pragma ident "%Z%%M% %I% %E% SMI"
#include <assert.h>
#include <ctype.h>
#include <errno.h>
#include <devid.h>
#include <fcntl.h>
#include <libintl.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <sys/zfs_ioctl.h>
#include "zfs_namecheck.h"
#include "libzfs_impl.h"
/*
* Validate the given pool name, optionally putting an extended error message in
* 'buf'.
*/
static int
{
char what;
switch (why) {
case NAME_ERR_TOOLONG:
break;
case NAME_ERR_INVALCHAR:
"'%c' in pool name"), what);
break;
case NAME_ERR_NOLETTER:
"name must begin with a letter"), buflen);
break;
case NAME_ERR_RESERVED:
"name is reserved\n"
"pool name may have been omitted"), buflen);
break;
case NAME_ERR_DISKLIKE:
"pool name is reserved\n"
"pool name may have been omitted"), buflen);
break;
}
}
return (FALSE);
}
return (TRUE);
}
/*
* Set the pool-wide health based on the vdev state of the root vdev.
*/
void
{
char *health;
&nvroot) == 0);
case VDEV_STATE_CLOSED:
case VDEV_STATE_CANT_OPEN:
case VDEV_STATE_OFFLINE:
break;
case VDEV_STATE_DEGRADED:
break;
case VDEV_STATE_HEALTHY:
break;
default:
}
health) == 0);
}
/*
* Open a handle to the given pool, even if the pool is currently in the FAULTED
* state.
*/
zpool_open_canfail(const char *pool)
{
int error;
/*
* Make sure the pool name is valid.
*/
"pool name"), pool);
return (NULL);
}
"such pool"), pool);
return (NULL);
} else {
}
} else {
}
return (zhp);
}
/*
* Like the above, but silent on error. Used when iterating over pools (because
* the configuration cache may be out of date).
*/
zpool_open_silent(const char *pool)
{
int error;
return (NULL);
} else {
}
} else {
}
return (zhp);
}
/*
* Similar to zpool_open_canfail(), but refuses to open pools in the faulted
* state.
*/
zpool_open(const char *pool)
{
return (NULL);
return (NULL);
}
return (zhp);
}
/*
* Close the handle. Simply frees the memory associated with the handle.
*/
void
{
if (zhp->zpool_config)
if (zhp->zpool_old_config)
}
/*
* Return the name of the pool.
*/
const char *
{
return (zhp->zpool_name);
}
/*
* Return the GUID of the pool.
*/
{
&guid) == 0);
return (guid);
}
/*
* Return the amount of space currently consumed by the pool.
*/
{
&nvroot) == 0);
}
/*
* Return the total space in the pool.
*/
{
&nvroot) == 0);
}
/*
* Return the alternate root for this pool, if any.
*/
int
{
return (-1);
return (0);
}
/*
* Return the state of the pool (ACTIVE or UNAVAILABLE)
*/
int
{
return (zhp->zpool_state);
}
/*
* Create the named pool, using the provided vdev list. It is assumed
* that the consumer has already validated the contents of the nvlist, so we
* don't have to worry about error semantics.
*/
int
{
char *packed;
int err;
char reason[64];
return (-1);
}
return (-1);
}
NV_ENCODE_NATIVE, 0)) != 0)
switch (errno) {
case EEXIST:
"pool exists"), pool);
break;
case EPERM:
"permission denied"), pool);
break;
case EBUSY:
/*
* This can happen if the user has specified the same
* device multiple times. We can't reliably detect this
* until we try to add it and see we already have a
* label.
*/
"one or more vdevs refer to the same device"),
pool);
break;
case EOVERFLOW:
/*
* This occurrs when one of the devices is below
* SPA_MINDEVSIZE. Unfortunately, we can't detect which
* device was the problem device since there's no
* reliable way to determine device size from userland.
*/
{
char buf[64];
"create '%s': one or more devices is less "
"than the minimum size (%s)"), pool,
buf);
}
break;
case ENAMETOOLONG:
/*
* One of the vdevs has exceeded VDEV_SPEC_MAX length in
* its plaintext representation.
*/
"too many devices in a single vdev"), pool);
break;
case EIO:
"I/O error on one or more devices"), pool);
break;
case ENXIO:
/*
* This is unlikely to happen since we've verified that
* all the devices can be opened from userland, but it's
* still possible in some circumstances.
*/
"one or more devices is unavailable"), pool);
break;
case ENOSPC:
/*
* This can occur if we were incapable of writing to a
* file vdev because the underlying filesystem is out of
* space. This is very similar to EOVERFLOW, but we'll
* produce a slightly different message.
*/
"one or more devices is out of space"), pool);
break;
default:
}
return (-1);
}
/*
* If this is an alternate root pool, then we automatically set the
* moutnpoint of the root dataset to be '/'.
*/
}
return (0);
}
/*
* Destroy the given pool. It is up to the caller to ensure that there are no
* datasets left in the pool.
*/
int
{
return (-1);
return (-1);
switch (errno) {
case EPERM:
"cannot destroy '%s': permission denied"),
zhp->zpool_name);
break;
case EBUSY:
"cannot destroy '%s': pool busy"),
zhp->zpool_name);
break;
case ENOENT:
"cannot destroy '%s': no such pool"),
zhp->zpool_name);
break;
case EROFS:
"cannot destroy '%s': one or more devices is "
"read only, or '/' is mounted read only"),
zhp->zpool_name);
break;
default:
}
if (zfp)
return (-1);
}
if (zfp) {
}
return (0);
}
/*
* Add the given vdevs to the pool. The caller must have already performed the
* necessary verification to ensure that the vdev specification is well-formed.
*/
int
{
char *packed;
switch (errno) {
case EPERM:
break;
case EBUSY:
/*
* This can happen if the user has specified the same
* device multiple times. We can't reliably detect this
* until we try to add it and see we already have a
* label.
*/
"one or more vdevs refer to the same device"),
zhp->zpool_name);
break;
case ENAMETOOLONG:
/*
* One of the vdevs has exceeded VDEV_SPEC_MAX length in
* its plaintext representation.
*/
"too many devices in a single vdev"),
zhp->zpool_name);
break;
case ENXIO:
/*
* This is unlikely to happen since we've verified that
* all the devices can be opened from userland, but it's
* still possible in some circumstances.
*/
"one or more devices is unavailable"),
zhp->zpool_name);
break;
case EOVERFLOW:
/*
* This occurrs when one of the devices is below
* SPA_MINDEVSIZE. Unfortunately, we can't detect which
* device was the problem device since there's no
* reliable way to determine device size from userland.
*/
{
char buf[64];
"add to '%s': one or more devices is less "
"than the minimum size (%s)"),
}
break;
default:
}
return (-1);
}
return (0);
}
/*
* Exports the pool from the system. The caller must ensure that there are no
* mounted datasets in the pool.
*/
int
{
if (zpool_remove_zvol_links(zhp) != 0)
return (-1);
switch (errno) {
case EPERM:
"cannot export '%s': permission denied"),
zhp->zpool_name);
break;
case EBUSY:
"cannot export '%s': pool is in use"),
zhp->zpool_name);
break;
case ENOENT:
"cannot export '%s': no such pool"),
zhp->zpool_name);
break;
default:
}
return (-1);
}
return (0);
}
/*
* Import the given pool using the known configuration. The configuration
* should have come from zpool_find_import(). The 'newname' and 'altroot'
* parameters control whether the pool is imported with a different name or with
* an alternate root, respectively.
*/
int
{
char *packed;
char *thename;
char *origname;
int ret;
&origname) == 0);
"invalid pool name"), newname);
return (-1);
}
} else {
}
"root '%s' must be a complete path"), thename,
altroot);
return (-1);
}
else
&zc.zc_pool_guid) == 0);
ret = 0;
char desc[1024];
thename);
else
switch (errno) {
case EEXIST:
/*
* A pool with that name already exists.
*/
desc);
break;
case EPERM:
/*
* The user doesn't have permission to create pools.
*/
"denied"), desc);
break;
case ENXIO:
case EDOM:
/*
* Device is unavailable, or vdev sum didn't match.
*/
"devices is unavailable"),
desc);
break;
default:
}
ret = -1;
} else {
/*
* This should never fail, but play it safe anyway.
*/
}
}
return (ret);
}
/*
* Scrub the pool.
*/
int
{
char msg[1024];
return (0);
switch (errno) {
case EPERM:
/*
* No permission to scrub this pool.
*/
break;
case EBUSY:
/*
* Resilver in progress.
*/
msg);
break;
default:
}
return (-1);
}
/*
* Bring the specified vdev online
*/
int
{
char msg[1024];
return (0);
switch (errno) {
case ENODEV:
/*
* Device doesn't exist
*/
break;
case EPERM:
/*
* No permission to bring this vdev online.
*/
break;
default:
}
return (-1);
}
/*
* Take the specified vdev offline
*/
int
{
char msg[1024];
return (0);
switch (errno) {
case ENODEV:
/*
* Device doesn't exist
*/
break;
case EPERM:
/*
* No permission to take this vdev offline.
*/
break;
case EBUSY:
/*
* There are no other replicas of this device.
*/
break;
default:
}
return (-1);
}
/*
* Attach new_disk (fully described by nvroot) to old_disk.
* If 'replacing' is specified, tne new disk will replace the old one.
*/
int
{
char msg[1024];
char *packed;
int ret;
if (ret == 0)
return (0);
if (replacing)
else
switch (errno) {
case EPERM:
/*
* No permission to mess with the config.
*/
break;
case ENODEV:
/*
* Device doesn't exist.
*/
break;
case ENOTSUP:
/*
* Can't attach to or replace this type of vdev.
*/
if (replacing)
"%s: cannot replace a replacing device"), msg);
else
"%s: attach is only applicable to mirrors"), msg);
break;
case EINVAL:
/*
* The new device must be a single disk.
*/
"%s: <new_device> must be a single disk"), msg);
break;
case ENXIO:
/*
* This is unlikely to happen since we've verified that
* all the devices can be opened from userland, but it's
* still possible in some circumstances.
*/
break;
case EBUSY:
/*
* The new device is is use.
*/
break;
case EOVERFLOW:
/*
* The new device is too small.
*/
break;
case EDOM:
/*
* The new device has a different alignment requirement.
*/
"%s: devices have different sector alignment"), msg);
break;
case ENAMETOOLONG:
/*
* The resulting top-level vdev spec won't fit in the label.
*/
"%s: too many devices in a single vdev"), msg);
break;
default:
}
return (1);
}
/*
* Detach the specified device.
*/
int
{
char msg[1024];
return (0);
switch (errno) {
case EPERM:
/*
* No permission to mess with the config.
*/
break;
case ENODEV:
/*
* Device doesn't exist.
*/
break;
case ENOTSUP:
/*
* Can't detach from this type of vdev.
*/
"%s: only applicable to mirror and replacing vdevs"), msg);
break;
case EBUSY:
/*
* There are no other replicas of this device.
*/
break;
default:
}
return (1);
}
static int
{
int ret;
/*
* We check for volblocksize intead of ZFS_TYPE_VOLUME so that we
* correctly handle snapshots of volumes.
*/
if (zhp->zfs_volblocksize != 0) {
if (linktype)
else
}
return (ret);
}
/*
* Iterate over all zvols in the pool and make any necessary minor nodes.
*/
int
{
int ret;
/*
* If the pool is unavailable, just return success.
*/
return (0);
return (ret);
}
/*
* Iterate over all zvols in the poool and remove any minor nodes.
*/
int
{
int ret;
/*
* If the pool is unavailable, just return success.
*/
return (0);
return (ret);
}