dsl_scrub.c revision 9fb35deba8248caea6ddae3a5c4c2952283f8603
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2010 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#include <sys/dsl_pool.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_prop.h>
#include <sys/dsl_synctask.h>
#include <sys/dmu_objset.h>
#include <sys/zfs_context.h>
#include <sys/zfs_znode.h>
#include <sys/spa_impl.h>
#include <sys/vdev_impl.h>
#include <sys/zil_impl.h>
#include <sys/zio_checksum.h>
static scrub_cb_t dsl_pool_scrub_clean_cb;
extern int zfs_txg_timeout;
NULL,
};
/* ARGSUSED */
static void
{
dmu_object_type_t ot = 0;
dp->dp_scrub_min_txg = 0;
if (*funcp == SCRUB_FUNC_CLEAN) {
/* rewrite all disk labels */
if (vdev_resilver_needed(rvd,
} else {
}
/* zero out the scrub stats in all vdev_stat_t's */
/*
* If this is an incremental scrub, limit the DDT scrub phase
* to just the auto-ditto class (for correctness); the rest
* of the scrub should go faster using top-down pruning.
*/
}
/* back to the generic stuff */
dp->dp_blkstats =
}
DMU_POOL_SCRUB_BOOKMARK, sizeof (uint64_t),
DMU_POOL_SCRUB_DDT_BOOKMARK, sizeof (uint64_t),
"func=%u mintxg=%llu maxtxg=%llu",
}
int
{
}
/* ARGSUSED */
static void
{
return;
if (dp->dp_scrub_restart) {
}
/* XXX this is scrub-clean specific */
}
dp->dp_scrub_queue_obj = 0;
"complete=%u", *completep);
/* below is scrub-clean specific */
*completep);
/*
* Whether it succeeded or not, vacate all temporary scrub DTLs.
*/
if (*completep)
/*
* We may have finished replacing a device.
* Let the async thread assess this and handle the detach.
*/
}
int
{
}
void
{
/*
* This function will be used by bp-rewrite wad to intercept frees.
*/
}
static boolean_t
{
}
/* dnp is the dnode for zb1->zb_object */
static boolean_t
const zbookmark_t *zb2)
{
/*
* A bookmark in the deadlist is considered to be after
* everything else.
*/
return (B_TRUE);
/* The objset_phys_t isn't before anything. */
return (B_FALSE);
return (nextobj <= zb2thisobj);
}
return (B_TRUE);
return (B_FALSE);
return (B_FALSE);
}
static boolean_t
{
int mintime;
if (dp->dp_scrub_pausing)
return (B_TRUE); /* we're already pausing */
return (B_FALSE); /* we're resuming */
/* We only know how to resume from level-0 blocks. */
return (B_FALSE);
if (zb) {
dprintf("pausing at bookmark %llx/%llx/%llx/%llx\n",
}
if (ddb) {
dprintf("pausing at DDT bookmark %llx/%llx/%llx/%llx\n",
}
return (B_TRUE);
}
return (B_FALSE);
}
typedef struct zil_traverse_arg {
/* ARGSUSED */
static int
{
return (0);
/*
* One block ("stubby") can be allocated a long time ago; we
* want to visit that one because it has been allocated
* (on-disk) even if it hasn't been claimed (even though for
* plain scrub there's nothing to do to it).
*/
return (0);
return (0);
}
/* ARGSUSED */
static int
{
return (0);
/*
* birth can be < claim_txg if this record's txg is
* already txg sync'ed (but this log block contains
* other records that are not synced)
*/
return (0);
}
return (0);
}
static void
{
/*
* We only want to visit blocks that have been claimed but not yet
* replayed (or, in read-only mode, blocks that *would* be claimed).
*/
return;
}
static void
{
return;
return;
}
static void
{
int err;
return;
return;
/*
* If we already visited this bp & everything below (in
* a prior txg), don't bother doing it again.
*/
return;
/*
* If we found the block we're trying to resume from, or
* we went past it to a different object, zero it out to
* indicate that it's OK to start checking for pausing
* again.
*/
dprintf("resuming at %llx/%llx/%llx/%llx\n",
}
}
/*
* If dsl_pool_scrub_ddt() has aready scrubbed this block,
* don't scrub it again.
*/
if (BP_GET_LEVEL(bp) > 0) {
int i;
if (err) {
return;
}
}
}
int i, j;
if (err) {
return;
}
for (j = 0; j < cdnp->dn_nblkptr; j++) {
}
}
}
if (err) {
return;
}
}
}
if (buf)
}
static void
{
int j;
for (j = 0; j < dnp->dn_nblkptr; j++) {
}
}
static void
{
}
void
{
return;
ZB_DESTROYED_OBJSET, 0, 0, 0);
return;
}
}
}
void
{
return;
}
}
void
{
return;
}
/* Both were there to begin with */
}
}
}
struct enqueue_clones_arg {
};
/* ARGSUSED */
static int
{
int err;
dsl_pool_t *dp;
if (err)
return (err);
if (err)
return (err);
}
}
return (0);
}
static void
{
/*
* Iterate over the bps in this ds.
*/
if (dp->dp_scrub_pausing)
goto out;
/*
* Add descendent datasets to work queue.
*/
}
/*
* A bug in a previous version of the code could
* cause upgrade_clones_cb() to not set
* ds_next_snap_obj when it should, leading to a
* missing entry. Therefore we can only use the
* next_clones_obj when its count is correct.
*/
if (err == 0 &&
}
if (usenext) {
} else {
struct enqueue_clones_arg eca;
}
}
out:
}
/* ARGSUSED */
static int
{
int err;
dsl_pool_t *dp;
if (err)
return (err);
if (err) {
return (err);
}
/*
* If this is a clone, we don't need to worry about it for now.
*/
return (0);
}
}
return (0);
}
/*
*
* If there are N references to a deduped block, we don't want to scrub it
* N times -- ideally, we should scrub it exactly once.
*
* We leverage the fact that the dde's replication class (enum ddt_class)
* is ordered from highest replication class (DDT_CLASS_DITTO) to lowest
* (DDT_CLASS_UNIQUE) so that we may walk the DDT in that order.
*
* To prevent excess scrubbing, the scrub begins by walking the DDT
* to find all blocks with refcnt > 1, and scrubs each of these once.
* Since there are two replication classes which contain blocks with
* refcnt > 1, we scrub the highest replication class (DDT_CLASS_DITTO) first.
* Finally the top-down scrub begins, only visiting blocks with refcnt == 1.
*
* There would be nothing more to say if a block's refcnt couldn't change
* during a scrub, but of course it can so we must account for changes
* in a block's replication class.
*
* Here's an example of what can occur:
*
* If a block has refcnt > 1 during the DDT scrub phase, but has refcnt == 1
* when visited during the top-down scrub phase, it will be scrubbed twice.
* This negates our scrub optimization, but is otherwise harmless.
*
* If a block has refcnt == 1 during the DDT scrub phase, but has refcnt > 1
* on each visit during the top-down scrub phase, it will never be scrubbed.
* To catch this, ddt_sync_entry() notifies the scrub code whenever a block's
* reference class transitions to a higher level (i.e DDT_CLASS_UNIQUE to
* DDT_CLASS_DUPLICATE); if it transitions from refcnt == 1 to refcnt > 1
* while a scrub is in progress, it scrubs the block right then.
*/
static void
{
int error;
return;
return;
}
}
void
const ddt_entry_t *dde)
{
zbookmark_t zb = { 0 };
for (int p = 0; p < DDT_PHYS_TYPES; p++, ddp++) {
if (ddp->ddp_phys_birth == 0)
continue;
}
}
void
{
return;
/*
* If the pool is not loaded, or is trying to unload, leave it alone.
*/
return;
if (dp->dp_scrub_restart) {
}
/*
* We must have resumed after rebooting; reset the vdev
* stats to know that we're doing a scrub (although it
* will think we're just starting now).
*/
}
if (dp->dp_scrub_pausing)
goto out;
}
/* First do the MOS & ORIGIN */
if (dp->dp_scrub_pausing)
goto out;
} else {
}
/*
* If we were paused, continue from here. Note if the ds
* we were paused on was destroyed, the zb_objset will be
* ZB_DESTROYED_OBJSET, so we will skip this and find a new
* objset below.
*/
if (dp->dp_scrub_pausing)
goto out;
}
/*
* In case we were paused right at the end of the ds, zero the
* bookmark so we don't think that we're still trying to resume.
*/
/* keep pulling things out of the zap-object-as-queue */
if (dp->dp_scrub_pausing)
break;
}
if (dp->dp_scrub_pausing)
goto out;
/* done. */
return;
out:
DMU_POOL_SCRUB_BOOKMARK, sizeof (uint64_t),
DMU_POOL_SCRUB_DDT_BOOKMARK, sizeof (uint64_t),
}
void
{
}
/*
* scrub consumers
*/
static void
{
int i;
/*
* If we resume after a reboot, zab will be NULL; don't record
* incomplete stats in that case.
*/
return;
for (i = 0; i < 4; i++) {
int equal;
switch (BP_GET_NDVAS(bp)) {
case 2:
break;
case 3:
if (equal == 1)
else if (equal == 3)
break;
}
}
}
static void
{
spa->spa_scrub_errors++;
}
static int
{
int zio_priority;
return (0);
if (dp->dp_scrub_isresilver == 0) {
/* It's a scrub */
} else {
/* It's a resilver */
}
/* If it's an intent log block, failure is expected. */
for (int d = 0; d < BP_GET_NDVAS(bp); d++) {
/*
* Keep track of how much data we've examined so that
* zpool(1M) status can make useful progress reports.
*/
/* if it's a resilver, this may not be in the target range */
if (!needs_io) {
/*
* Gang members may be spread across multiple
* vdevs, so the best estimate we have is the
* scrub range, which has already been checked.
* XXX -- it would be better to change our
* allocation policy to ensure that all
* gang members reside on the same vdev.
*/
} else {
phys_birth, 1);
}
}
}
if (needs_io && !zfs_no_scrub_io) {
}
/* do not relocate this block */
return (0);
}
int
{
/*
* Purge all vdev caches and probe all devices. We do this here
* rather than in sync context because this requires a writer lock
* on the spa_config lock, which we can't do from sync context. The
* spa_scrub_reopen flag indicates that vdev_open() should not
* attempt to start another scrub.
*/
}