dsl_dataset.c revision feaa74e41c407fe56e66a47e097c2842d4f65b9f
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
*/
#include <sys/dmu_objset.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_prop.h>
#include <sys/dsl_synctask.h>
#include <sys/dmu_traverse.h>
#include <sys/zfs_context.h>
#include <sys/zfs_ioctl.h>
#include <sys/zfs_znode.h>
#include <sys/zfs_onexit.h>
#include <sys/dsl_scan.h>
#include <sys/dsl_deadlist.h>
static char *dsl_reaper = "the grim reaper";
#define SWITCH64(x, y) \
{ \
(x) = (y); \
(y) = __tmp; \
}
/*
* Figure out how much of this delta should be propogated to the dsl_dir
* layer. If there's a refreservation, that space has already been
* partially accounted for in our ancestors.
*/
static int64_t
{
if (ds->ds_reserved == 0)
return (delta);
}
void
{
/* It could have been compressed away to nothing */
if (BP_IS_HOLE(bp))
return;
/*
* Account for the meta-objset space in its placeholder
* dsl_dir.
*/
return;
}
}
int
{
if (BP_IS_HOLE(bp))
return (0);
/*
* Account for the meta-objset space in its placeholder
* dataset.
*/
return (used);
}
} else {
if (async) {
/*
* We are here as part of zio's write done callback,
* which means we're a zio interrupt thread. We can't
* call dsl_deadlist_insert() now because it may block
* waiting for I/O. Instead, put bp on the deferred
* queue and let dsl_pool_sync() finish the job.
*/
} else {
}
/* if (bp->blk_birth > prev prev snap txg) prev unique += bs */
}
}
}
return (used);
}
{
return (0);
/*
* The snapshot creation could fail, but that would cause an
* incorrect FALSE return, which would only result in an
* overestimation of the amount of space that an operation would
* consume, which is OK.
*
* There's also a small window where we could miss a pending
* snapshot, because we could set the sync task in the quiescing
* phase. So this should only be used as a guess.
*/
if (ds->ds_trysnap_txg >
}
{
return (B_FALSE);
return (B_TRUE);
}
/* ARGSUSED */
static void
{
}
} else {
}
}
static int
{
int err;
if (ds->ds_snapname[0])
return (0);
return (0);
if (err)
return (err);
return (err);
}
static int
{
int err;
else
return (err);
}
static int
{
int err;
else
return (err);
}
static int
dsl_dataset_t **dsp)
{
int err;
if (err)
return (err);
/* Make sure dsobj has the correct object type. */
return (EINVAL);
if (err == 0) {
}
if (err) {
return (err);
}
if (!dsl_dataset_is_snapshot(ds)) {
}
} else {
if (zfs_flags & ZFS_DEBUG_SNAPNAMES)
&ds->ds_userrefs);
}
}
/*
* In sync context, we're called with either no lock
* or with the write lock. If we're not syncing,
* we're always called with the read lock held.
*/
if (need_lock)
if (err == 0) {
}
if (need_lock)
} else {
}
if (err == 0) {
}
if (err) {
return (err);
}
} else {
ds->ds_fsid_guid =
}
}
return (ENOENT);
}
return (0);
}
static int
{
/*
* In syncing context we don't want the rwlock lock: there
* may be an existing writer waiting for sync phase to
* finish. We don't need to worry about such writers, since
* sync phase is single-threaded, so the writer can't be
* doing anything while we are active.
*/
if (dsl_pool_sync_context(dp)) {
return (0);
}
/*
* Normal users will hold the ds_rwlock as a READER until they
* are finished (i.e., call dsl_dataset_rele()). "Owners" will
* drop their READER lock after they set the ds_owner field.
*
* If the dataset is being destroyed, the destroy thread will
* obtain a WRITER lock for exclusive access after it's done its
* open-context work and then change the ds_owner to
* dsl_reaper once destruction is assured. So threads
* may block here temporarily, until the "destructability" of
* the dataset is determined.
*/
if (DSL_DATASET_IS_DESTROYED(ds)) {
return (ENOENT);
}
/*
* The dp_config_rwlock lives above the ds_lock. And
* we need to check DSL_DATASET_IS_DESTROYED() while
* holding the ds_lock, so we have to drop and reacquire
* the ds_lock here.
*/
}
return (0);
}
int
dsl_dataset_t **dsp)
{
if (err)
return (err);
}
int
{
if (err)
return (err);
return (EBUSY);
}
return (0);
}
int
{
dsl_pool_t *dp;
const char *snapname;
int err = 0;
if (err)
return (err);
if (obj)
else
if (err)
goto out;
/* we may be looking for a snapshot */
if (*snapname++ != '@') {
goto out;
}
if (err == 0)
if (ds) {
if (ds->ds_snapname[0] == 0)
sizeof (ds->ds_snapname));
}
}
out:
return (err);
}
int
{
if (err)
return (err);
return (EBUSY);
}
return (0);
}
void
{
} else {
if (ds->ds_snapname[0]) {
/*
* We use a "recursive" mutex so that we
* can call dprintf_ds() with ds_lock held.
*/
} else {
}
}
}
}
static int
{
int result;
} else {
if (ds->ds_snapname[0]) {
++result; /* adding one for the @-sign */
} else {
}
}
}
return (result);
}
void
{
}
void
{
}
}
void
{
}
else
}
{
}
return (gotit);
}
void
{
}
{
DMU_OT_NONE, 0, tx);
} else {
}
}
}
}
}
return (dsobj);
}
{
/*
* If we are creating a clone, make sure we zero out any stale
* data from the origin snapshots zil header.
*/
}
return (dsobj);
}
struct destroyarg {
char *snapname;
char *failed;
};
static int
{
int err;
char *dsname;
if (err == 0) {
struct dsl_ds_destroyarg *dsda;
err = 0;
} else {
}
return (err);
}
/*
* Destroy 'snapname' in all descendants of 'fsname'.
*/
int
{
int err;
struct destroyarg da;
if (err)
return (err);
if (err == 0)
/*
* Return the file system name that triggered the error
*/
}
}
return (err);
}
static boolean_t
{
return (might_destroy);
}
/*
* If we're removing a clone, and these three conditions are true:
* 1) the clone's origin has no other children
* 2) the clone's origin has no user references
* 3) the clone's origin has been marked for deferred destruction
* Then, prepare to remove the origin as part of this sync task group.
*/
static int
{
char *name;
int namelen;
int error;
#ifdef _KERNEL
if (error) {
return (error);
}
#endif
if (error)
return (error);
}
return (0);
}
/*
* ds must be opened as OWNER. On return (whether successful or not),
* ds will be closed and caller can no longer dereference it.
*/
int
{
int err;
struct dsl_ds_destroyarg dsda = { 0 };
dsl_dataset_t dummy_ds = { 0 };
if (dsl_dataset_is_snapshot(ds)) {
/* Destroying a snapshot is simpler */
goto out;
} else if (defer) {
goto out;
}
/*
* Check for errors and mark this ds as inconsistent, in
* case we crash while freeing the objects.
*/
if (err)
goto out;
if (err)
goto out;
/*
* remove the objects in open context, so that we won't
* have too much to do in syncing context.
*/
/*
* Ignore errors, if there is not enough disk space
* we will deal with it in dsl_dataset_destroy_sync().
*/
}
goto out;
/*
* Only the ZIL knows how to free log blocks.
*/
/*
* Sync out all in-flight IO.
*/
/*
* If we managed to free all the objects in open
* context, the user space accounting should be zero.
*/
count == 0);
count == 0);
}
if (err)
goto out;
/*
* Blow away the dsl_dir + head dataset.
*/
/*
* If we're removing a clone, we might also need to remove its
* origin.
*/
do {
if (dsl_dir_is_clone(dd)) {
if (err) {
goto out;
}
}
/*
* We could be racing against 'zfs release' or 'zfs destroy -d'
* on the origin snap, in which case we can get EBUSY if we
* needed to destroy the origin snap but were not ready to
* do so.
*/
}
/* if it is successful, dsl_dir_destroy_sync will close the dd */
if (err)
out:
return (err);
}
blkptr_t *
{
}
void
{
/* If it's the meta-objset, set dp_meta_rootbp */
} else {
}
}
spa_t *
{
}
void
{
dsl_pool_t *dp;
return;
panic("dirtying snapshot!");
/* up the hold count until we can be written out */
}
}
/*
* The unique space in the head dataset can be calculated by subtracting
* the space used in the most recent snapshot, that is still being used
* in this file system, from the space currently in use. To figure out
* the space in the most recent snapshot still in use, we need to take
* the total space used in the snapshot and subtract out the space that
* has been freed up since the snapshot was taken.
*/
static void
{
else
mrs_used = 0;
}
struct killarg {
};
/* ARGSUSED */
static int
{
return (0);
/*
* It's a block in the intent log. It has no
* accounting, so just free it.
*/
} else {
}
return (0);
}
/* ARGSUSED */
static int
{
int err;
/*
* Can't delete a head dataset if there are snapshots of it.
* (Except if the only snapshots are from the branch we cloned
* from.)
*/
return (EBUSY);
/*
* This is really a dsl_dir thing, but check it here so that
* we'll be less likely to leave this dataset inconsistent &
* nearly destroyed.
*/
if (err)
return (err);
if (count != 0)
return (EEXIST);
return (0);
}
/* ARGSUSED */
static void
{
/* Mark it as inconsistent on-disk, in case we crash */
}
static int
{
struct dsl_ds_destroyarg ndsda = {0};
/*
* If we're not prepared to remove the origin, don't remove
* the clone either.
*/
return (EBUSY);
}
}
/*
* If we're not going to remove the origin after all,
* undo the open context setup.
*/
}
return (0);
}
/* ARGSUSED */
int
{
/* we have an owner hold, so noone else can destroy us */
/*
* Only allow deferred destroy on pools that support it.
* NOTE: deferred destroy is only supported on snapshots.
*/
return (ENOTSUP);
return (0);
}
/*
* Can't delete a head dataset if there are snapshots of it.
* (Except if the only snapshots are from the branch we cloned
* from.)
*/
return (EBUSY);
/*
* If we made changes this txg, traverse_dsl_dataset won't find
* them. Try again.
*/
return (EAGAIN);
if (dsl_dataset_is_snapshot(ds)) {
/*
* If this snapshot has an elevated user reference count,
* we can't destroy it yet.
*/
return (EBUSY);
/*
* Can't delete a branch point. However, if we're destroying
* a clone and removing its origin due to it having a user
* hold count of 0 and having been marked for deferred destroy,
* it's OK for the origin to have a single clone.
*/
return (EEXIST);
}
}
/* XXX we should do some i/o error checking... */
return (0);
}
struct refsarg {
};
/* ARGSUSED */
static void
{
}
static void
{
}
static void
{
int err;
/*
* The err should not be ENOENT, but a bug in a previous version
* of the code could cause upgrade_clones_cb() to not set
* ds_next_snap_obj when it should, leading to a missing entry.
* If we knew that the pool was created after
* SPA_VERSION_NEXT_CLONES, we could assert that it isn't
* ENOENT. However, at least we can check that we don't have
* too many entries in the next_clones_obj even after failing to
* remove this one.
*/
}
&count));
}
static void
{
/*
* If it is the old version, dd_clones doesn't exist so we can't
* find the clones, but deadlist_remove_key() is a no-op so it
* doesn't matter.
*/
return;
zap_cursor_advance(&zc)) {
}
}
}
struct process_old_arg {
};
static int
{
}
} else {
}
return (0);
}
static void
{
struct process_old_arg poa = { 0 };
/* change snapused */
/* swap next's deadlist to our deadlist */
}
void
{
int err;
int after_branch_point = FALSE;
return;
}
}
/* signal any waiters that this dataset is going away */
/* Remove our reservation */
if (ds->ds_reserved != 0) {
&value);
}
} else {
}
if (after_branch_point &&
}
}
if (after_branch_point &&
/* This clone is toast. */
/*
* If the clone's origin has no other clones, no
* user holds, and has been marked for deferred
* deletion, then we should have done the necessary
* destroy setup for it.
*/
ds_prev->ds_userrefs == 0 &&
} else {
}
} else if (!after_branch_point) {
}
}
if (dsl_dataset_is_snapshot(ds)) {
} else {
/* Adjust prev's unique space. */
if (ds_prev && !after_branch_point) {
}
/* Adjust snapused. */
/* Move blocks to be freed to pool's free list. */
tx);
/* Merge our deadlist into next's and free it. */
}
/* Collapse range in clone heads */
if (dsl_dataset_is_snapshot(ds_next)) {
/*
* Update next's unique to include blocks which
* were previously shared by only this snapshot
* and it. Those blocks will be born after the
* prev snap and before this snap, and will have
* died after the next snap and before the one
* after that (ie. be on the snap after next's
* deadlist).
*/
FTAG, &ds_nextnext));
/* Collapse range in this head. */
} else {
if (ds_prev) {
}
/*
* Reduce the amount of our unconsmed refreservation
* being charged to our parent by the amount of
* new unique data we have gained.
*/
}
}
} else {
/*
* There's no next snapshot, so this is a head dataset.
* Destroy the deadlist. Unless it's a clone, the
* deadlist should be empty. (If it's a clone, it's
* safe to ignore the deadlist contents.)
*/
/*
* Free everything that we point to (that's born after
* the previous snapshot, if we are a clone)
*
* NB: this should be very quick, because we already
* freed all the objects in open context.
*/
}
}
}
/*
* This must be done after the dsl_traverse(), because it will
* re-open the objset.
*/
}
/* Erase the link in the dir */
} else {
/* remove from snapshot namespace */
#ifdef ZFS_DEBUG
{
}
#endif
}
}
/*
* Remove the origin of the clone we just destroyed.
*/
struct dsl_ds_destroyarg ndsda = {0};
}
}
static int
{
if (!dmu_tx_is_syncing(tx))
return (0);
/*
* If there's an fs-only reservation, any blocks that might become
* owned by the snapshot dataset must be accommodated by space
* outside of the reservation.
*/
return (ENOSPC);
/*
* Propogate any reserved space for this snapshot to other
* snapshot checks in this sync group.
*/
if (asize > 0)
return (0);
}
int
{
int err;
/*
* We don't allow multiple snapshots of the same txg. If there
* is already one, try again.
*/
return (EAGAIN);
/*
* Check for conflicting name snapshot name.
*/
if (err == 0)
return (EEXIST);
return (err);
/*
* Check that the dataset's name is not too long. Name consists
* of the dataset's length + 1 for the @-sign + snapshot name's length
*/
return (ENAMETOOLONG);
if (err)
return (err);
return (0);
}
void
{
int err;
/*
* The origin's ds_creation_txg has to be < TXG_INITIAL
*/
crtxg = 1;
else
} else if (next_clones_obj != 0) {
}
}
/*
* If we have a reference-reservation on this dataset, we will
* need to increase the amount of refreservation being charged
* since our unique space is going to zero.
*/
if (ds->ds_reserved) {
}
zfs_dbgmsg("taking snapshot %s@%s/%llu; newkey=%llu",
"dataset = %llu", dsobj);
}
void
{
/*
* in case we had to change ds_fsid_guid when we opened it,
* sync it out now.
*/
}
void
{
ds->ds_reserved);
ds->ds_userrefs);
/*
* This is a snapshot; override the dd's space used with
* our unique space and compression ratio.
*/
}
}
void
{
} else {
stat->dds_num_clones = 0;
}
/* clone origin is really a dsl_dir thing... */
} else {
}
}
{
return (ds->ds_fsid_guid);
}
void
{
/*
* Adjust available bytes according to refquota
*/
else
*availbytesp = 0;
}
}
{
return (B_FALSE);
/*
* It may be that only the ZIL differs, because it was
* reset in the head. Don't count that as being
* modified.
*/
return (B_TRUE);
return (B_TRUE);
}
return (B_FALSE);
}
/* ARGSUSED */
static int
{
char *newsnapname = arg2;
int err;
if (err)
return (err);
/* new name better not be in use */
if (err == 0)
err = 0;
/* dataset name + 1 for the "@" + the new snapshot name must fit */
err = ENAMETOOLONG;
return (err);
}
static void
{
const char *newsnapname = arg2;
int err;
}
struct renamesnaparg {
char failed[MAXPATHLEN];
char *oldsnap;
char *newsnap;
};
static int
{
char *snapname;
int err;
/*
* For recursive snapshot renames the parent won't be changing
*/
if (err != 0) {
}
#ifdef _KERNEL
/*
* For all filesystems undergoing rename, we'll need to unmount it.
*/
#endif
if (err != 0)
return (0);
}
static int
{
int err;
struct renamesnaparg *ra;
/* truncate the snapshot name to get the fsname */
*cp = '\0';
if (err) {
return (err);
}
if (err == 0) {
}
}
}
if (err)
return (err);
}
static int
{
return (ENAMETOOLONG);
return (0);
}
int
{
const char *tail;
int err;
if (err)
return (err);
/* if we're growing, validate child name lengths */
if (delta > 0)
if (err == 0)
return (err);
}
if (tail[0] != '@') {
/* the name ended in a nonexistent component */
return (ENOENT);
}
/* new name must be snapshot in same filesystem */
return (EINVAL);
tail++;
return (EXDEV);
if (recursive) {
} else {
if (err)
return (err);
}
return (err);
}
struct promotenode {
};
struct promotearg {
char *err_ds;
};
static int
{
int err;
/* Check that it is a real clone */
return (EINVAL);
/* Since this is so expensive, don't do the preliminary check */
if (!dmu_tx_is_syncing(tx))
return (0);
return (EXDEV);
/* compute origin's new unique space */
/*
* Walk the snapshots that we are moving
*
* Compute space to transfer. Consider the incremental changes
* to used for each snapshot:
* (my used) = (prev's used) + (blocks born) - (blocks killed)
* So each snapshot gave birth to:
* (blocks born) = (my used) - (prev's used) + (blocks killed)
* So a sequence would look like:
* (uN - u(N-1) + kN) + ... + (u1 - u0 + k1) + (u0 - 0 + k0)
* Which simplifies to:
* uN + kN + kN-1 + ... + k1 + k0
* Note however, if we stop before we reach the ORIGIN we get:
* uN + kN + kN-1 + ... + kM - uM-1
*/
/* Check that the snapshot name does not conflict */
if (err == 0) {
goto out;
}
goto out;
/* The very first snapshot does not have a deadlist */
continue;
}
/*
* If we are a clone of a clone then we never reached ORIGIN,
* so we need to subtract out the clone origin's used space.
*/
if (pa->origin_origin) {
}
/* Check that there is enough space here */
if (err)
return (err);
/*
* Compute the amounts of space that will be used by snapshots
* after the promotion (for both origin and clone). For each,
* it is the amount of space that will be on all of their
* deadlists (that was not born before their new origin).
*/
/*
* Note, typically this will not be a clone of a clone,
* so dd_origin_txg will be < TXG_INITIAL, so
* these snaplist_space() -> dsl_deadlist_space_range()
* calls will be fast because they do not have to
* iterate over all bps.
*/
if (err)
return (err);
if (err)
return (err);
}
if (err)
return (err);
}
return (0);
out:
return (err);
}
static void
{
/*
* We need to explicitly open odd, since origin_ds's dd will be
* changing.
*/
/* change origin's next snap */
/* change the origin's next clone */
oldnext_obj, tx));
}
/* change origin */
/* change dd_clone entries */
}
}
/* move snapshots to this dir */
/* unregister props as dsl_dir is changing */
}
/* move snap name entry */
/* change containing dsl_dir */
/* move any clone references */
zap_cursor_advance(&zc)) {
uint64_t o;
/*
* We've already moved the
* origin's reference.
*/
continue;
}
}
}
}
/*
* Change space accounting.
* Note, pa->*usedsnap and dd_used_breakdown[SNAP] will either
* both be valid, or both be 0 (resulting in delta == 0). This
* is true for each of {clone,origin} independently.
*/
/* log history record */
}
static char *snaplist_tag = "snaplist";
/*
* Make a list of dsl_dataset_t's for the snapshots between first_obj
* (exclusive) and last_obj (inclusive). The list will be in reverse
* order (last_obj will be the list_head()). If first_obj == 0, do all
* snapshots back to this dataset's origin.
*/
static int
{
list_create(l, sizeof (struct promotenode),
struct promotenode *snap;
int err;
if (own) {
0, snaplist_tag, &ds);
if (err == 0)
} else {
}
/* lost race with snapshot destroy */
continue;
} else if (err) {
return (err);
}
if (first_obj == 0)
list_insert_tail(l, snap);
}
return (0);
}
static int
{
struct promotenode *snap;
*spacep = 0;
}
return (0);
}
static void
{
struct promotenode *snap;
if (!l || !list_link_active(&l->list_head))
return;
list_remove(l, snap);
if (own)
else
}
list_destroy(l);
}
/*
* Promote a clone. Nomenclature note:
* "clone" or "cds": the original clone which is being promoted
* "origin" or "ods": the snapshot which is originally clone's origin
* "origin head" or "ohds": the dataset which is the head
* (filesystem/volume) for the origin
* "origin origin": the origin of the origin's filesystem (typically
* NULL, indicating that the clone is not a clone of a clone).
*/
int
{
dsl_pool_t *dp;
struct promotearg pa = { 0 };
struct promotenode *snap;
int err;
if (err)
return (err);
if (err) {
return (err);
}
return (EINVAL);
}
/*
* We are going to inherit all the snapshots taken before our
* origin (i.e., our new origin will be our parent's origin).
* Take ownership of them so that we can rename them into our
* namespace.
*/
&pa.shared_snaps);
if (err != 0)
goto out;
if (err != 0)
goto out;
if (err != 0)
goto out;
if (err != 0)
goto out;
}
out:
/*
* Add in 128x the snapnames zapobj size, since we will be moving
* a bunch of snapnames to the promoted ds, and dirtying their
* bonus buffers.
*/
if (err == 0) {
}
if (pa.origin_origin)
return (err);
}
struct cloneswaparg {
};
/* ARGSUSED */
static int
{
/* they should both be heads */
return (EINVAL);
/* the branch point should be just before them */
return (EINVAL);
/* cds should be the clone (unless they are unrelated) */
return (EINVAL);
/* the clone should be a child of the origin */
return (EINVAL);
/* ohds shouldn't be modified unless 'force' */
return (ETXTBSY);
/* adjust amount of any unconsumed refreservation */
if (csa->unused_refres_delta > 0 &&
return (ENOSPC);
return (EDQUOT);
return (0);
}
/* ARGSUSED */
static void
{
}
}
/*
* Reset origin's unique bytes, if it exists.
*/
}
/* swap blkptrs */
{
}
/* set dd_*_bytes */
{
dd_used_breakdown[DD_USED_SNAP], ==, 0);
/*
* The difference in the space used by snapshots is the
* difference in snapshot space due to the head's
* deadlist (since that's the only thing that's
* changing that affects the snapused).
*/
}
/* swap ds_*_bytes */
/* apply any parent delta for change in unconsumed refreservation */
/*
* Swap deadlists.
*/
}
/*
* Swap 'clone' with its origin head datasets. Used at the end of "zfs
* recv" into an existing fs to swizzle the file system to the new
* version, and by "zfs rollback". Can also be used to swap two
* independent head datasets if neither has any snapshots.
*/
int
{
struct cloneswaparg csa;
int error;
/*
* Need exclusive access for the swap. If we're swapping these
* datasets back after an error, we already hold the locks.
*/
goto retry;
}
}
return (error);
}
/*
* Given a pool name and a dataset object number in that pool,
* return the name of that dataset.
*/
int
{
dsl_pool_t *dp;
int error;
return (error);
}
return (error);
}
int
{
int error = 0;
/*
* *ref_rsrv is the portion of asize that will come from any
* unconsumed refreservation space.
*/
*ref_rsrv = 0;
/*
* Make a space adjustment for reserved bytes.
*/
*ref_rsrv =
}
return (0);
}
/*
* If they are requesting more space, and our current estimate
* is over quota, they get to try again unless the actual
* on-disk is over quota and there are no pending changes (which
* may free up space for us).
*/
else
}
return (error);
}
/* ARGSUSED */
static int
{
int err;
return (ENOTSUP);
return (err);
if (psa->psa_effective_value == 0)
return (0);
return (ENOSPC);
return (0);
}
extern void dsl_prop_set_sync(void *, void *, dmu_tx_t *);
void
{
}
}
int
{
int err;
if (err)
return (err);
/*
* If someone removes a file, then tries to set the quota, we
* want to make sure the file freeing takes effect.
*/
return (err);
}
static int
{
int err;
return (ENOTSUP);
if (dsl_dataset_is_snapshot(ds))
return (EINVAL);
return (err);
/*
* If we are doing the preliminary check in open context, the
* space estimates may be inaccurate.
*/
if (!dmu_tx_is_syncing(tx))
return (0);
if (!DS_UNIQUE_IS_ACCURATE(ds))
return (ENOSPC);
return (ENOSPC);
}
return (0);
}
static void
{
}
int
{
int err;
&reservation);
if (err)
return (err);
return (err);
}
struct dsl_ds_holdarg {
char *htag;
char *snapname;
char failed[MAXPATHLEN];
};
typedef struct zfs_hold_cleanup_arg {
dsl_pool_t *dp;
char htag[MAXNAMELEN];
static void
{
B_TRUE);
}
void
{
}
/*
* The max length of a temporary tag prefix is the number of hex digits
* required to express UINT64_MAX plus one for the hyphen.
*/
#define MAX_TAG_PREFIX_LEN 17
static int
{
int error = 0;
return (ENOTSUP);
if (!dsl_dataset_is_snapshot(ds))
return (EINVAL);
/* tags must be unique */
if (error == 0)
error = 0;
}
return (error);
}
static void
{
/*
* This is the first user hold for this dataset. Create
* the userrefs zap object.
*/
} else {
}
ds->ds_userrefs++;
}
}
static int
{
int error;
char *name;
/* alloc a buffer to hold dsname@snapname plus terminating NULL */
if (error == 0) {
error = 0;
} else {
}
return (error);
}
int
{
struct dsl_ds_holdarg *ha;
int error;
return (error);
}
int
{
struct dsl_ds_holdarg *ha;
int error;
if (cleanup_fd != -1) {
/* Currently we only support cleanup-on-exit of tempholds. */
if (!temphold)
return (EINVAL);
if (error)
return (error);
}
if (error) {
if (cleanup_fd != -1)
return (error);
}
if (recursive) {
} else {
}
if (error == 0)
/*
* If this hold is to be released upon process exit,
* register that action now.
*/
}
}
if (error)
if (cleanup_fd != -1)
return (error);
}
struct dsl_ds_releasearg {
const char *htag;
};
static int
{
int error;
*might_destroy = B_FALSE;
if (zapobj == 0) {
/* The tag can't possibly exist */
return (ESRCH);
}
/* Make sure the tag exists */
if (error) {
return (error);
}
*might_destroy = B_TRUE;
return (0);
}
static int
{
int error;
return (ENOTSUP);
if (error)
return (error);
if (might_destroy) {
struct dsl_ds_destroyarg dsda = {0};
if (dmu_tx_is_syncing(tx)) {
/*
* If we're not prepared to remove the snapshot,
* we can't allow the release to happen right now.
*/
return (EBUSY);
}
}
return (0);
}
static void
{
int error;
ds->ds_userrefs--;
struct dsl_ds_destroyarg dsda = {0};
/* We already did the destroy_check */
}
}
static int
{
struct dsl_ds_releasearg *ra;
int error;
char *name;
/* alloc a buffer to hold dsname@snapname, plus the terminating NULL */
return (0);
if (error)
return (error);
if (error) {
return (error);
}
if (might_destroy) {
#ifdef _KERNEL
if (error) {
return (error);
}
#endif
return (EBUSY);
} else {
}
}
return (0);
}
int
{
struct dsl_ds_holdarg *ha;
int error;
top:
if (error) {
return (error);
}
if (recursive) {
} else {
}
if (error == 0)
else
}
/*
* We can get EBUSY if we were racing with deferred destroy and
* dsl_dataset_user_release_check() hadn't done the necessary
* open context setup. We can also get EBUSY if we're racing
* with destroy and that thread is the ds_owner. Either way
* the busy condition should be transient, and we should retry
* the release operation.
*/
goto top;
return (error);
}
/*
* Called at spa_load time (with retry == B_FALSE) to release a stale
* temporary user hold. Also called by the onexit code (with retry == B_TRUE).
*/
int
{
char *snap;
char *name;
int namelen;
int error;
do {
if (error)
return (error);
*snap = '\0';
++snap;
/*
* The object can't have been destroyed because we have a hold,
* but it might have been renamed, resulting in ENOENT. Retry
* if we've been requested to do so.
*
* It would be nice if we could use the dsobj all the way
* through and avoid ENOENT entirely. But we might need to
* unmount the snapshot, and there's currently no way to lookup
* a vfsp using a ZFS object id.
*/
return (error);
}
int
{
int err;
if (err)
return (err);
zap_cursor_advance(&zc)) {
za->za_first_integer));
}
}
return (0);
}
/*
* Note, this fuction is used as the callback for dmu_objset_find(). We
* always return 0 so that we will continue to find and process
* inconsistent datasets, even if we encounter an error trying to
* process one of them.
*/
/* ARGSUSED */
int
{
if (DS_IS_INCONSISTENT(ds))
else
}
return (0);
}