dsl_destroy.c revision c166b69d29138aed7a415fe7cef698e54c6ae945
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2012, 2015 by Delphix. All rights reserved.
* Copyright (c) 2013 Steven Hartland. All rights reserved.
* Copyright (c) 2013 by Joyent, Inc. All rights reserved.
* Copyright (c) 2014 Integros [integros.com]
*/
#include <sys/zfs_context.h>
#include <sys/dsl_userhold.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_synctask.h>
#include <sys/dsl_pool.h>
#include <sys/dmu_traverse.h>
#include <sys/dsl_scan.h>
#include <sys/dmu_objset.h>
#include <sys/zfeature.h>
#include <sys/zfs_ioctl.h>
#include <sys/dsl_deleg.h>
#include <sys/dmu_impl.h>
typedef struct dmu_snapshots_destroy_arg {
int
{
if (!ds->ds_is_snapshot)
if (dsl_dataset_long_held(ds))
/*
* Only allow deferred destroy on pools that support it.
* NOTE: deferred destroy is only supported on snapshots.
*/
if (defer) {
return (0);
}
/*
* If this snapshot has an elevated user reference count,
* we can't destroy it yet.
*/
if (ds->ds_userrefs > 0)
/*
* Can't delete a branch point.
*/
return (0);
}
static int
{
int error = 0;
if (!dmu_tx_is_syncing(tx))
return (0);
/*
* If the snapshot does not exist, silently ignore it
* (it's "already destroyed").
*/
continue;
if (error == 0) {
dsda->dsda_defer);
}
if (error == 0) {
nvpair_name(pair));
} else {
}
}
return (fnvpair_value_int32(pair));
return (0);
}
struct process_old_arg {
};
static int
{
}
} else {
}
return (0);
}
static void
{
struct process_old_arg poa = { 0 };
/* change snapused */
/* swap next's deadlist to our deadlist */
}
static void
{
/*
* If it is the old version, dd_clones doesn't exist so we can't
* find the clones, but dsl_deadlist_remove_key() is a no-op so it
* doesn't matter.
*/
return;
zap_cursor_advance(&zc)) {
}
}
}
void
{
int err;
int after_branch_point = FALSE;
if (defer &&
(ds->ds_userrefs > 0 ||
return;
}
/* We need to log before removing it from the namespace. */
for (spa_feature_t f = 0; f < SPA_FEATURES; f++) {
if (ds->ds_feature_inuse[f]) {
}
}
if (after_branch_point &&
tx));
}
}
if (!after_branch_point) {
}
}
} else {
/* Adjust prev's unique space. */
if (ds_prev && !after_branch_point) {
}
/* Adjust snapused. */
/* Move blocks to be freed to pool's free list. */
tx);
/* Merge our deadlist into next's and free it. */
}
/* Collapse range in clone heads */
if (ds_next->ds_is_snapshot) {
/*
* Update next's unique to include blocks which
* were previously shared by only this snapshot
* and it. Those blocks will be born after the
* prev snap and before this snap, and will have
* died after the next snap and before the one
* after that (ie. be on the snap after next's
* deadlist).
*/
FTAG, &ds_nextnext));
/* Collapse range in this head. */
} else {
if (ds_prev) {
}
/*
* Reduce the amount of our unconsumed refreservation
* being charged to our parent by the amount of
* new unique data we have gained.
*/
}
}
/*
* This must be done after the dsl_traverse(), because it will
* re-open the objset.
*/
}
/* remove from snapshot namespace */
#ifdef ZFS_DEBUG
{
}
#endif
count == 0);
}
tx));
tx));
}
static void
{
}
}
/*
* The semantics of this function are described in the comment above
* lzc_destroy_snaps(). To summarize:
*
* The snapshots must all be in the same pool.
*
* Snapshots that don't exist will be silently ignored (considered to be
* "already deleted").
*
* On success, all snaps will be destroyed and this will return 0.
* On failure, no snaps will be destroyed, the errlist will be filled in,
* and this will return an errno.
*/
int
{
int error;
return (0);
&dsda, 0, ZFS_SPACE_CHECK_NONE);
return (error);
}
int
{
int error;
return (error);
}
struct killarg {
};
/* ARGSUSED */
static int
{
return (0);
/*
* It's a block in the intent log. It has no
* accounting, so just free it.
*/
} else {
}
return (0);
}
static void
{
/*
* Free everything that we point to (that's born after
* the previous snapshot, if we are a clone)
*
* NB: this should be very quick, because we already
* freed all the objects in open context.
*/
kill_blkptr, &ka));
}
typedef struct dsl_destroy_head_arg {
const char *ddha_name;
int
{
int error;
if (ds->ds_is_snapshot)
/*
* Can't delete a head dataset if there are snapshots of it.
* (Except if the only snapshots are from the branch we cloned
* from.)
*/
/*
* Can't delete if there are children of this fs.
*/
if (error != 0)
return (error);
if (count != 0)
/* We need to remove the origin snapshot as well. */
}
return (0);
}
static int
{
int error;
if (error != 0)
return (error);
return (error);
}
static void
{
dd_used_t t;
/*
* Decrement the filesystem count for all parent filesystems.
*
* When we receive an incremental stream into a filesystem that already
* exists, a temporary clone is created. We never count this temporary
* clone, whose name begins with a '%'.
*/
/*
* Remove our reservation. The impl() routine avoids setting the
* actual property, which would require the (already destroyed) ds.
*/
for (t = 0; t < DD_USED_NUM; t++)
}
void
{
/* We need to log before removing it from the namespace. */
/* Remove our reservation. */
if (ds->ds_reserved != 0) {
0, tx);
}
for (spa_feature_t f = 0; f < SPA_FEATURES; f++) {
if (ds->ds_feature_inuse[f]) {
}
}
/* This is a clone */
obj);
}
}
/*
* Destroy the deadlist. Unless it's a clone, the
* deadlist should be empty. (If it's a clone, it's
* safe to ignore the deadlist contents.)
*/
} else {
/*
* Move the bptree into the pool's list of trees to
* clean up and update space accounting information.
*/
tx);
}
}
}
}
/*
* This must be done after the dsl_traverse(), because it will
* re-open the objset.
*/
}
/* Erase the link in the dir */
if (ds->ds_bookmarks != 0) {
}
if (rmorigin) {
}
}
static void
{
}
static void
{
/* Mark it as inconsistent on-disk, in case we crash */
}
int
dsl_destroy_head(const char *name)
{
int error;
#ifdef _KERNEL
#endif
if (error != 0)
return (error);
if (!isenabled) {
0, ZFS_SPACE_CHECK_NONE);
if (error != 0)
return (error);
/*
* Head deletion is processed in one txg on old pools;
* remove the objects from open context so that the txg sync
* is not too long.
*/
if (error == 0) {
/* sync out all frees */
}
}
}
/*
* Note, this function is used as the callback for dmu_objset_find(). We
* always return 0 so that we will continue to find and process
* inconsistent datasets, even if we encounter an error trying to
* process one of them.
*/
/* ARGSUSED */
int
{
/*
* If the dataset is inconsistent because a resumable receive
* has failed, then do not destroy it.
*/
if (need_destroy)
(void) dsl_destroy_head(dsname);
}
return (0);
}