dsl_scrub.c revision c33e334fd3eb2b3d91c4b9667d7a465b6924e8d3
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#include <sys/dsl_pool.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_prop.h>
#include <sys/dsl_synctask.h>
#include <sys/dmu_objset.h>
#include <sys/zfs_context.h>
#include <sys/zfs_znode.h>
#include <sys/spa_impl.h>
#include <sys/vdev_impl.h>
#include <sys/zil_impl.h>
static scrub_cb_t dsl_pool_scrub_clean_cb;
extern int zfs_txg_timeout;
NULL,
};
{ \
}
/* ARGSUSED */
static void
{
dmu_object_type_t ot = 0;
dp->dp_scrub_min_txg = 0;
if (*funcp == SCRUB_FUNC_CLEAN) {
/* rewrite all disk labels */
if (vdev_resilver_needed(rvd,
} else {
}
/* zero out the scrub stats in all vdev_stat_t's */
}
/* back to the generic stuff */
dp->dp_blkstats =
}
"func=%u mintxg=%llu maxtxg=%llu",
}
int
{
}
/* ARGSUSED */
static void
{
return;
if (dp->dp_scrub_restart) {
}
/* XXX this is scrub-clean specific */
}
dp->dp_scrub_queue_obj = 0;
"complete=%u", *completep);
/* below is scrub-clean specific */
*completep);
/*
* Whether it succeeded or not, vacate all temporary scrub DTLs.
*/
if (*completep)
/*
* We may have finished replacing a device.
* Let the async thread assess this and handle the detach.
*/
}
int
{
}
int
{
/*
* This function will be used by bp-rewrite wad to intercept frees.
*/
}
static boolean_t
{
}
/* dnp is the dnode for zb1->zb_object */
static boolean_t
const zbookmark_t *zb2)
{
/*
* A bookmark in the deadlist is considered to be after
* everything else.
*/
return (B_TRUE);
/* The objset_phys_t isn't before anything. */
return (B_FALSE);
return (nextobj <= zb2thisobj);
}
return (B_TRUE);
return (B_FALSE);
return (B_FALSE);
}
static boolean_t
{
int elapsed_ticks;
int mintime;
if (dp->dp_scrub_pausing)
return (B_TRUE); /* we're already pausing */
return (B_FALSE); /* we're resuming */
/* We only know how to resume from level-0 blocks. */
return (B_FALSE);
dprintf("pausing at %llx/%llx/%llx/%llx\n",
return (B_TRUE);
}
return (B_FALSE);
}
typedef struct zil_traverse_arg {
/* ARGSUSED */
static void
{
return;
/*
* One block ("stubby") can be allocated a long time ago; we
* want to visit that one because it has been allocated
* (on-disk) even if it hasn't been claimed (even though for
* plain scrub there's nothing to do to it).
*/
return;
}
/* ARGSUSED */
static void
{
return;
/*
* birth can be < claim_txg if this record's txg is
* already txg sync'ed (but this log block contains
* other records that are not synced)
*/
return;
}
}
static void
{
/*
* We only want to visit blocks that have been claimed but not yet
* replayed (or, in read-only mode, blocks that *would* be claimed).
*/
return;
}
static void
{
int err;
return;
return;
/*
* If we already visited this bp & everything below (in
* a prior txg), don't bother doing it again.
*/
return;
/*
* If we found the block we're trying to resume from, or
* we went past it to a different object, zero it out to
* indicate that it's OK to start checking for pausing
* again.
*/
dprintf("resuming at %llx/%llx/%llx/%llx\n",
}
}
if (BP_GET_LEVEL(bp) > 0) {
int i;
if (err) {
return;
}
}
int i;
if (err) {
return;
}
}
if (err) {
return;
}
}
}
if (buf)
}
static void
{
int j;
for (j = 0; j < dnp->dn_nblkptr; j++) {
}
}
static void
{
}
void
{
return;
return;
}
}
}
void
{
return;
}
}
void
{
return;
}
/* Both were there to begin with */
}
}
}
struct enqueue_clones_arg {
};
/* ARGSUSED */
static int
{
int err;
dsl_pool_t *dp;
if (err)
return (err);
if (err)
return (err);
}
}
return (0);
}
static void
{
/*
* Iterate over the bps in this ds.
*/
if (dp->dp_scrub_pausing)
goto out;
/*
* Add descendent datasets to work queue.
*/
}
/*
* A bug in a previous version of the code could
* cause upgrade_clones_cb() to not set
* ds_next_snap_obj when it should, leading to a
* missing entry. Therefore we can only use the
* next_clones_obj when its count is correct.
*/
if (err == 0 &&
}
if (usenext) {
} else {
struct enqueue_clones_arg eca;
}
}
out:
}
/* ARGSUSED */
static int
{
int err;
dsl_pool_t *dp;
if (err)
return (err);
if (err) {
return (err);
}
/*
* If this is a clone, we don't need to worry about it for now.
*/
return (0);
}
}
return (0);
}
void
{
return;
/*
* If the pool is not loaded, or is trying to unload, leave it alone.
*/
return;
if (dp->dp_scrub_restart) {
}
/*
* We must have resumed after rebooting; reset the vdev
* stats to know that we're doing a scrub (although it
* will think we're just starting now).
*/
}
/* First do the MOS & ORIGIN */
if (dp->dp_scrub_pausing)
goto out;
} else {
}
/*
* If we were paused, continue from here. Note if the
* ds we were paused on was deleted, the zb_objset will
* be -1, so we will skip this and find a new objset
* below.
*/
if (dp->dp_scrub_pausing)
goto out;
}
/*
* In case we were paused right at the end of the ds, zero the
* bookmark so we don't think that we're still trying to resume.
*/
/* keep pulling things out of the zap-object-as-queue */
if (dp->dp_scrub_pausing)
break;
}
if (dp->dp_scrub_pausing)
goto out;
/* done. */
return;
out:
/* XXX this is scrub-clean specific */
while (spa->spa_scrub_inflight > 0)
}
void
{
}
/*
* scrub consumers
*/
static void
{
int i;
/*
* If we resume after a reboot, zab will be NULL; don't record
* incomplete stats in that case.
*/
return;
for (i = 0; i < 4; i++) {
int equal;
switch (BP_GET_NDVAS(bp)) {
case 2:
break;
case 3:
if (equal == 1)
else if (equal == 3)
break;
}
}
}
static void
{
spa->spa_scrub_errors++;
}
static int
{
int zio_priority;
return (0);
if (dp->dp_scrub_isresilver == 0) {
/* It's a scrub */
} else {
/* It's a resilver */
}
/* If it's an intent log block, failure is expected. */
for (int d = 0; d < BP_GET_NDVAS(bp); d++) {
/*
* Keep track of how much data we've examined so that
* zpool(1M) status can make useful progress reports.
*/
/* if it's a resilver, this may not be in the target range */
if (!needs_io) {
/*
* Gang members may be spread across multiple
* vdevs, so the best estimate we have is the
* scrub range, which has already been checked.
* XXX -- it would be better to change our
* allocation policy to ensure that all
* gang members reside on the same vdev.
*/
} else {
}
}
}
if (needs_io && !zfs_no_scrub_io) {
}
/* do not relocate this block */
return (0);
}
int
{
/*
* Purge all vdev caches. We do this here rather than in sync
* context because this requires a writer lock on the spa_config
* lock, which we can't do from sync context. The
* spa_scrub_reopen flag indicates that vdev_open() should not
* attempt to start another scrub.
*/
}