dmu_objset.c revision d5285cae913f4e01ffa0e6693a6d8ef1fbea30ba
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2012 by Delphix. All rights reserved.
*/
/* Portions Copyright 2010 Robert Milkowski */
#include <sys/zfs_context.h>
#include <sys/dmu_objset.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_prop.h>
#include <sys/dsl_pool.h>
#include <sys/dsl_synctask.h>
#include <sys/dsl_deleg.h>
#include <sys/dmu_impl.h>
#include <sys/zfs_ioctl.h>
#include <sys/zfs_onexit.h>
/*
* Needed to close a window in dnode_move() that allows the objset to be freed
* before it can be safely accessed.
*/
void
dmu_objset_init(void)
{
}
void
dmu_objset_fini(void)
{
}
spa_t *
{
}
zilog_t *
{
}
{
else
}
{
return (os->os_dsl_dataset);
}
{
}
void
{
}
{
}
{
}
{
return (os->os_logbias);
}
static void
{
/*
* Inheritance should have been done by now.
*/
}
static void
{
/*
* Inheritance and range checking should have been done by now.
*/
}
static void
{
/*
* Inheritance and range checking should have been done by now.
*/
}
static void
{
enum zio_checksum checksum;
/*
* Inheritance should have been done by now.
*/
}
static void
{
/*
* Inheritance and range checking should have been done by now.
*/
newval == ZFS_CACHE_METADATA);
}
static void
{
/*
* Inheritance and range checking should have been done by now.
*/
newval == ZFS_CACHE_METADATA);
}
static void
{
/*
* Inheritance and range checking should have been done by now.
*/
newval == ZFS_SYNC_DISABLED);
}
static void
{
}
void
{
if (size == sizeof (objset_phys_t)) {
}
}
int
{
int i, err;
if (DMU_OS_IS_L2CACHEABLE(os))
aflags |= ARC_L2CACHE;
if (err) {
/* convert checksum errors into IO errors */
return (err);
}
/* Increase the blocksize if we are permitted. */
&os->os_phys_buf);
}
} else {
sizeof (objset_phys_t) : OBJSET_OLD_PHYS_SIZE;
}
/*
* Note: the changed_cb will be called once before the register
* func returns, thus changing the checksum/compression from the
*/
if (ds) {
if (err == 0)
if (!dsl_dataset_is_snapshot(ds)) {
if (err == 0)
if (err == 0)
if (err == 0)
if (err == 0)
if (err == 0)
if (err == 0)
}
if (err) {
return (err);
}
/* It's the meta-objset. */
os->os_dedup_verify = 0;
os->os_logbias = 0;
}
for (i = 0; i < TXG_SIZE; i++) {
}
&os->os_meta_dnode);
&os->os_userused_dnode);
&os->os_groupused_dnode);
}
/*
* We should be the only thread trying to do this because we
* have ds_opening_lock
*/
if (ds) {
}
return (0);
}
int
{
int err = 0;
}
return (err);
}
/* called from zpl */
int
{
int err;
if (err)
return (err);
if (err)
return (err);
}
/* called from zpl */
int
{
int err;
if (err)
return (err);
if (err) {
return (EINVAL);
return (EROFS);
}
return (err);
}
void
{
}
void
{
}
int
{
/* process the mdn last, since the other dnodes have holds on it */
/*
* Find the first dnode with holds. We have to do this dance
* because dnode_add_ref() only works if you already have a
* hold. If there are no holds then it has no dbufs so OK to
* skip.
*/
continue;
while (dn) {
do {
}
}
void
{
for (int t = 0; t < TXG_SIZE; t++)
if (ds) {
if (!dsl_dataset_is_snapshot(ds)) {
copies_changed_cb, os));
dedup_changed_cb, os));
logbias_changed_cb, os));
sync_changed_cb, os));
}
}
/*
* We should need only a single pass over the dnode list, since
* nothing can be added to the list at this point.
*/
(void) dmu_objset_evict_dbufs(os);
if (DMU_USERUSED_DNODE(os)) {
}
/*
* This is a barrier to prevent the objset from going away in
* dnode_move() until we can safely ensure that the objset is still in
* use. We consider the objset valid before the barrier and invalid
* after the barrier.
*/
}
{
}
/* called from dsl for meta-objset */
objset_t *
{
else
/*
* We don't want to have to increase the meta-dnode's nlevels
* later, because then we could do it in quescing context while
* we are also accessing it in open context.
*
* This precaution is not necessary for the MOS (ds == NULL),
* because the MOS is only updated in syncing context.
* This is most fortunate: the MOS is the only objset that
* needs to be synced multiple times as spa_sync() iterates
* to convergence, so minimizing its dn_nlevels matters.
*/
int levels = 1;
/*
* Determine the number of levels necessary for the meta-dnode
* to contain DN_MAX_OBJECT dnodes.
*/
DN_MAX_OBJECT * sizeof (dnode_phys_t))
levels++;
}
if (dmu_objset_userused_enabled(os)) {
}
return (os);
}
struct oscarg {
void *userarg;
const char *lastname;
};
/*ARGSUSED*/
static int
{
int err;
/* You can't clone across pools. */
return (EXDEV);
/* You can only clone snapshots, not the head datasets. */
return (EINVAL);
}
return (0);
}
static void
{
if (BP_IS_HOLE(bp)) {
}
} else {
char namebuf[MAXNAMELEN];
}
}
int
{
const char *tail;
int err = 0;
if (err)
return (err);
return (EEXIST);
}
return (err);
}
int
{
const char *tail;
int err = 0;
if (err)
return (err);
return (EEXIST);
}
return (err);
}
int
{
int error;
if (error == 0) {
/* dsl_dataset_destroy() closes the ds. */
}
return (error);
}
typedef struct snapallarg {
/* the following are used only if 'temporary' is set: */
const char *saa_htag;
struct dsl_ds_holdarg *saa_ha;
} snapallarg_t;
typedef struct snaponearg {
const char *soa_longname; /* long snap name */
const char *soa_snapname; /* short snap name */
} snaponearg_t;
static int
{
int error;
/* The props have already been checked by zfs_check_userprops(). */
if (error)
return (error);
if (saa->saa_temporary) {
/*
* Ideally we would just call
* dsl_dataset_user_hold_check() and
* dsl_dataset_destroy_check() here. However the
* dataset we want to hold and destroy is the snapshot
* that we just confirmed we can create, but it won't
* exist until after these checks are run. Do any
* checks we can here and if more checks are added to
* those routines in the future, similar checks may be
* necessary here.
*/
return (ENOTSUP);
/*
* Not checking number of tags because the tag will be
* unique, as it will be the only tag.
*/
return (E2BIG);
KM_SLEEP);
}
return (error);
}
static void
{
}
if (saa->saa_temporary) {
struct dsl_ds_destroyarg da;
}
}
static int
{
char fsname[MAXPATHLEN];
int err;
if (err != 0)
return (err);
/*
* If the objset is in an inconsistent state (eg, in the process
* of being destroyed), don't snapshot it.
*/
return (EBUSY);
}
if (saa->saa_needsuspend) {
if (err) {
return (err);
}
}
return (0);
}
/*
* The snapshots must all be in the same pool.
*/
int
{
snapallarg_t saa = { 0 };
int rv = 0;
int err;
return (0);
if (err)
return (err);
if (err != 0) {
}
}
}
/*
* If any call to snapshot_one_impl() failed, don't execute the
* sync task. The error handling code below will clean up the
* snaponearg_t from any successful calls to
* snapshot_one_impl().
*/
if (rv == 0)
if (err != 0)
}
}
if (saa.saa_needsuspend)
}
return (rv);
}
int
{
int err;
return (err);
}
int
{
snapallarg_t saa = { 0 };
int err;
if (err)
return (err);
if (cleanup_fd < 0) {
return (EINVAL);
}
return (err);
}
if (err == 0)
if (saa.saa_needsuspend)
}
return (err);
}
static void
{
/*
* Initialize dn_zio outside dnode_sync() because the
* meta-dnode needs to set it ouside dnode_sync().
*/
if (newlist) {
}
}
}
/* ARGSUSED */
static void
{
/*
* Update rootbp fill count: it should be the number of objects
* allocated in the object set (not counting the "special"
* objects that are stored in the objset_phys_t -- the meta
*/
for (int i = 0; i < dnp->dn_nblkptr; i++)
}
/* ARGSUSED */
static void
{
} else {
}
}
/* called from dsl */
void
{
int txgoff;
/* XXX the write_done callback should really give us the tx... */
/*
* This is the MOS. If we have upgraded,
* spa_max_replication() could change, so reset
* os_copies here.
*/
}
/*
* Create the root block IO
*/
/*
* Sync special dnodes - the parent IO for the sync is the root block
*/
if (DMU_USERUSED_DNODE(os) &&
}
if (dmu_objset_userused_enabled(os)) {
/*
* We must create the list here because it uses the
* dn_dirty_link[] of this txg.
*/
}
}
/*
* Free intent log blocks up to this tx.
*/
}
{
}
void
{
}
{
}
static void
{
if ((flags & DNODE_FLAG_USERUSED_ACCOUNTED)) {
if (subtract)
}
}
void
{
int flags;
}
/*
* We intentionally modify the zap object even if the
* net delta is zero. Otherwise
* the block of the zap obj could be shared between
* datasets but need to be different between them after
* a bprewrite.
*/
if (flags & DN_ID_OLD_EXIST) {
}
if (flags & DN_ID_NEW_EXIST) {
}
dn->dn_oldused = 0;
dn->dn_oldflags = 0;
if (dn->dn_bonuslen == 0)
else
}
}
}
/*
*
* If a dirty record for transaction group that is syncing can't
* be found then NULL is returned. In the NULL case it is assumed
*/
static void *
{
void *data;
if (db->db_dirtycnt == 0)
break;
} else {
if (dn->dn_bonuslen == 0 &&
else
}
return (data);
}
void
{
int error;
return;
return;
} else {
}
int rf = 0;
rf |= DB_RF_HAVESTRUCT;
have_spill = B_TRUE;
} else {
return;
}
if (before) {
} else if (data) {
}
/*
* Must always call the callback in case the object
* type has changed and that type isn't an object type to track
*/
/*
* If we don't know what the old values are then just assign
* them to 0, since that is a new file being created.
*/
if (flags & DN_ID_OLD_EXIST) {
} else {
}
error = 0;
}
if (db)
if (have_spill) {
} else {
}
if (have_spill)
}
{
}
int
{
int err = 0;
return (0);
if (!dmu_objset_userused_enabled(os))
return (ENOTSUP);
if (dmu_objset_is_snapshot(os))
return (EINVAL);
/*
* We simply need to mark every object dirty, so that it will be
* synced out and now accounted. If this is called
* concurrently, or if we already did some work before crashing,
* that's fine, since we track each object's accounted state
* independently.
*/
int objerr;
return (EINTR);
if (objerr)
continue;
if (objerr) {
continue;
}
}
return (0);
}
void
{
}
{
}
void
{
if (os->os_dsl_dataset)
}
void
{
}
int
{
else
return (B_FALSE);
}
int
{
return (ENOENT);
}
int
{
return (ENOENT);
return (ENOENT);
}
return (ENAMETOOLONG);
}
if (idp)
if (case_conflict)
return (0);
}
int
{
/* there is no next dir on a snapshot! */
return (ENOENT);
return (ENOENT);
}
return (ENAMETOOLONG);
}
if (idp)
return (0);
}
struct findarg {
int (*func)(const char *, void *);
void *arg;
};
/* ARGSUSED */
static int
{
}
/*
* Find all objsets under name, and for each, call 'func(child_name, arg)'.
* Perhaps change all callers to use dmu_objset_find_spa()?
*/
int
int flags)
{
}
/*
* Find all objsets under name, call func on each
*/
int
{
dsl_pool_t *dp;
char *child;
int err;
if (err)
return (err);
/* Don't visit hidden ($MOS & $ORIGIN) objsets. */
return (0);
}
/*
* Iterate over all children.
*/
if (flags & DS_FIND_CHILDREN) {
(void) zap_cursor_advance(&zc)) {
if (err)
break;
}
if (err) {
return (err);
}
}
/*
* Iterate over all snapshots.
*/
if (flags & DS_FIND_SNAPSHOTS) {
if (!dsl_pool_sync_context(dp))
if (!dsl_pool_sync_context(dp))
if (err == 0) {
(void) zap_cursor_advance(&zc)) {
sizeof (uint64_t));
if (err)
break;
}
}
}
if (err)
return (err);
/*
* Apply to self if appropriate.
*/
return (err);
}
/* ARGSUSED */
int
{
return (0);
}
}
return (0);
}
void
{
}
void *
{
return (os->os_user_ptr);
}