ztest.c revision 09c9d376e8ccb8fbba74f33cc268964464092b62
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2011 by Delphix. All rights reserved.
*/
/*
* that runs entirely in userland, is easy to use, and easy to extend.
*
* The overall design of the ztest program is as follows:
*
* (1) For each major functional area (e.g. adding vdevs to a pool,
* creating and destroying datasets, reading and writing objects, etc)
* we have a simple routine to test that functionality. These
* individual routines do not have to do anything "stressful".
*
* (2) We turn these simple functionality tests into a stress test by
* running them all in parallel, with as many threads as desired,
* and spread across as many datasets, objects, and vdevs as desired.
*
* (3) While all this is happening, we inject faults into the pool to
* verify that self-healing data really works.
*
* (4) Every time we open a dataset, we change its checksum and compression
* functions. Thus even individual objects vary from block to block
* in which checksum they use and whether they're compressed.
*
* (5) To verify that we never lose on-disk consistency after a crash,
* we run the entire test in a child of the main process.
* At random times, the child self-immolates with a SIGKILL.
* This is the software equivalent of pulling the power cord.
* The parent then runs the test again, using the existing
* storage pool, as many times as desired.
*
* (6) To verify that we don't have future leaks or temporal incursions,
* many of the functional tests record the transaction group number
* as part of their data. When reading old data, they verify that
* the transaction group number is less than the current, open txg.
* If you add a new test, please do this if applicable.
*
* When run with no arguments, ztest runs for about five minutes and
* produces no output if successful. To get a little bit of information,
* specify -V. To get more information, specify -VV, and so on.
*
* To turn this into an overnight stress test, use -T to specify run time.
*
* You can ask more more vdevs [-v], datasets [-d], or threads [-t]
* to increase the pool capacity, fanout, and overall stress level.
*
* The -N(okill) option will suppress kills, so each child runs to completion.
* This can be useful when you're trying to distinguish temporal incursions
* from plain old race conditions.
*/
#include <sys/zfs_context.h>
#include <sys/dmu_objset.h>
#include <sys/resource.h>
#include <sys/zil_impl.h>
#include <sys/vdev_impl.h>
#include <sys/vdev_file.h>
#include <sys/spa_impl.h>
#include <sys/metaslab_impl.h>
#include <sys/dsl_prop.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_scan.h>
#include <sys/zio_checksum.h>
#include <sys/refcount.h>
#include <stdio.h>
#include <stdio_ext.h>
#include <stdlib.h>
#include <unistd.h>
#include <signal.h>
#include <umem.h>
#include <dlfcn.h>
#include <ctype.h>
#include <math.h>
#include <libnvpair.h>
static char cmdname[] = "ztest";
static uint64_t zopt_vdevtime;
static int zopt_ashift = SPA_MINBLOCKSHIFT;
static int zopt_mirrors = 2;
static int zopt_raidz = 4;
static int zopt_raidz_parity = 1;
static int zopt_datasets = 7;
static int zopt_threads = 23;
static int zopt_verbose = 0;
static int zopt_init = 1;
static char *zopt_dir = "/tmp";
#define BT_MAGIC 0x123456789abcdefULL
enum ztest_io_type {
};
typedef struct ztest_block_tag {
typedef struct bufwad {
} bufwad_t;
/*
* XXX -- fix zfs range locks to be generic so we can use them here.
*/
typedef enum {
} rl_type_t;
typedef struct rll {
void *rll_writer;
int rll_readers;
} rll_t;
typedef struct rl {
} rl_t;
#define ZTEST_RANGE_LOCKS 64
#define ZTEST_OBJECT_LOCKS 64
/*
*/
typedef struct ztest_od {
char od_name[MAXNAMELEN];
} ztest_od_t;
/*
* Per-dataset state.
*/
typedef struct ztest_ds {
char zd_name[MAXNAMELEN];
} ztest_ds_t;
/*
* Per-iteration state.
*/
typedef struct ztest_info {
} ztest_info_t;
/*
* Note: these aren't static because we want dladdr() to work.
*/
ztest_info_t ztest_info[] = {
#if 0
#endif
};
/*
* The following struct is used to hold a list of uncalled commit callbacks.
* The callbacks are ordered by txg number.
*/
typedef struct ztest_cb_list {
/*
* Stuff we need to share writably between parent and child.
*/
typedef struct ztest_shared {
char *zs_pool;
ztest_ds_t zs_zd[];
#define ID_PARALLEL -1ULL
static char ztest_dev_template[] = "%s/%s.%llua";
static char ztest_aux_template[] = "%s/%s.%s.%llu";
static int ztest_random_fd;
static int ztest_dump_core = 1;
static boolean_t ztest_exiting;
/* Global commit callback list */
static ztest_cb_list_t zcl;
extern uint64_t metaslab_gang_bang;
extern uint64_t metaslab_df_alloc_threshold;
static uint64_t metaslab_sz;
enum ztest_object {
ZTEST_META_DNODE = 0,
};
/*
* These libumem hooks provide a reasonable set of defaults for the allocator's
* debugging facilities.
*/
const char *
{
return ("default,verbose"); /* $UMEM_DEBUG setting */
}
const char *
_umem_logging_init(void)
{
return ("fail,contents"); /* $UMEM_LOGGING setting */
}
#define FATAL_MSG_SZ 1024
char *fatal_msg;
static void
{
int save_errno = errno;
char buf[FATAL_MSG_SZ];
/* LINTED */
if (do_perror) {
}
if (ztest_dump_core)
abort();
exit(3);
}
static int
{
const char *ends = "BKMGTPEZ";
int i;
if (buf[0] == '\0')
return (0);
break;
}
buf);
}
return (10*i);
}
/* NOTREACHED */
}
static uint64_t
nicenumtoull(const char *buf)
{
char *end;
} else if (end[0] == '.') {
if (fval > UINT64_MAX) {
buf);
}
} else {
buf);
}
}
return (val);
}
static void
{
char nice_vdev_size[10];
char nice_gang_bang[10];
"\t[-v vdevs (default: %llu)]\n"
"\t[-s size_of_each_vdev (default: %s)]\n"
"\t[-a alignment_shift (default: %d)] use 0 for random\n"
"\t[-m mirror_copies (default: %d)]\n"
"\t[-r raidz_disks (default: %d)]\n"
"\t[-R raidz_parity (default: %d)]\n"
"\t[-d datasets (default: %d)]\n"
"\t[-t threads (default: %d)]\n"
"\t[-g gang_block_threshold (default: %s)]\n"
"\t[-i init_count (default: %d)] initialize pool i times\n"
"\t[-k kill_percentage (default: %llu%%)]\n"
"\t[-p pool_name (default: %s)]\n"
"\t[-f dir (default: %s)] file directory for vdev files\n"
"\t[-V] verbose (use multiple times for ever more blather)\n"
"\t[-E] use existing pool instead of creating new one\n"
"\t[-T time (default: %llu sec)] total run time\n"
"\t[-F freezeloops (default: %llu)] max loops in spa_freeze()\n"
"\t[-P passtime (default: %llu sec)] time per pass\n"
"\t[-h] (print help)\n"
"",
nice_vdev_size, /* -s */
zopt_ashift, /* -a */
zopt_mirrors, /* -m */
zopt_raidz, /* -r */
zopt_raidz_parity, /* -R */
zopt_datasets, /* -d */
zopt_threads, /* -t */
nice_gang_bang, /* -g */
zopt_init, /* -i */
zopt_pool, /* -p */
zopt_dir, /* -f */
}
static void
{
int opt;
/* By default, test gang blocks for blocks 32K and greater */
"v:s:a:m:r:R:d:t:g:i:k:p:f:VET:P:hF:")) != EOF) {
value = 0;
switch (opt) {
case 'v':
case 's':
case 'a':
case 'm':
case 'r':
case 'R':
case 'd':
case 't':
case 'g':
case 'i':
case 'k':
case 'T':
case 'P':
case 'F':
}
switch (opt) {
case 'v':
zopt_vdevs = value;
break;
case 's':
break;
case 'a':
zopt_ashift = value;
break;
case 'm':
break;
case 'r':
break;
case 'R':
break;
case 'd':
break;
case 't':
break;
case 'g':
break;
case 'i':
break;
case 'k':
break;
case 'p':
break;
case 'f':
break;
case 'V':
zopt_verbose++;
break;
case 'E':
zopt_init = 0;
break;
case 'T':
break;
case 'P':
break;
case 'F':
break;
case 'h':
break;
case '?':
default:
break;
}
}
UINT64_MAX >> 2);
}
static void
{
}
static uint64_t
{
uint64_t r;
if (range == 0)
return (0);
if (read(ztest_random_fd, &r, sizeof (r)) != sizeof (r))
return (r % range);
}
/* ARGSUSED */
static void
ztest_record_enospc(const char *s)
{
}
static uint64_t
ztest_get_ashift(void)
{
if (zopt_ashift == 0)
return (zopt_ashift);
}
static nvlist_t *
{
char pathbuf[MAXPATHLEN];
if (ashift == 0)
ashift = ztest_get_ashift();
} else {
}
}
if (size != 0) {
if (fd == -1)
}
return (file);
}
static nvlist_t *
{
int c;
if (r < 2)
for (c = 0; c < r; c++)
VDEV_TYPE_RAIDZ) == 0);
zopt_raidz_parity) == 0);
child, r) == 0);
for (c = 0; c < r; c++)
nvlist_free(child[c]);
return (raidz);
}
static nvlist_t *
int r, int m)
{
int c;
if (m < 1)
for (c = 0; c < m; c++)
VDEV_TYPE_MIRROR) == 0);
child, m) == 0);
for (c = 0; c < m; c++)
nvlist_free(child[c]);
return (mirror);
}
static nvlist_t *
int log, int r, int m, int t)
{
int c;
ASSERT(t > 0);
for (c = 0; c < t; c++) {
log) == 0);
}
child, t) == 0);
for (c = 0; c < t; c++)
nvlist_free(child[c]);
return (root);
}
static int
ztest_random_blocksize(void)
{
return (1 << (SPA_MINBLOCKSHIFT +
}
static int
ztest_random_ibshift(void)
{
return (DN_MIN_INDBLKSHIFT +
}
static uint64_t
{
do {
return (top);
}
static uint64_t
{
do {
return (value);
}
static int
{
const char *valname;
char setpoint[MAXPATHLEN];
int error;
return (error);
}
if (zopt_verbose >= 6) {
(void) printf("%s %s = %s at '%s'\n",
}
return (error);
}
static int
{
int error;
return (error);
}
return (error);
}
static void
{
rll->rll_readers = 0;
}
static void
{
}
static void
{
rll->rll_readers++;
} else {
}
}
static void
{
if (rll->rll_writer) {
} else {
rll->rll_readers--;
}
}
static void
{
}
static void
{
}
static rl_t *
{
return (rl);
}
static void
{
}
static void
{
for (int l = 0; l < ZTEST_OBJECT_LOCKS; l++)
for (int l = 0; l < ZTEST_RANGE_LOCKS; l++)
}
static void
{
for (int l = 0; l < ZTEST_OBJECT_LOCKS; l++)
for (int l = 0; l < ZTEST_RANGE_LOCKS; l++)
}
static uint64_t
{
int error;
/*
* Attempt to assign tx to some transaction group.
*/
if (error) {
} else {
}
return (0);
}
return (txg);
}
static void
{
}
static boolean_t
{
return (diff == 0);
}
static void
{
}
static void
{
}
static ztest_block_tag_t *
{
return (bt);
}
/*
* ZIL logging ops
*/
#define lrz_blocksize lr_uid
#define lrz_ibshift lr_gid
#define lrz_bonustype lr_rdev
static void
{
return;
}
static void
{
return;
}
static void
{
return;
if (write_state == WR_COPIED &&
}
}
static void
{
return;
}
static void
{
return;
}
/*
* ZIL replay ops
*/
static int
{
int error = 0;
if (byteswap)
} else {
}
if (txg == 0)
return (ENOSPC);
} else {
}
} else {
} else {
}
}
if (error) {
return (error);
}
return (0);
}
static int
{
if (byteswap)
VERIFY3U(0, ==,
if (txg == 0) {
return (ENOSPC);
}
} else {
}
return (0);
}
static int
{
if (byteswap)
/* If it's a dmu_sync() block, write the whole block */
}
}
if (txg == 0) {
return (ENOSPC);
}
/*
* Usually, verify the old data before writing new data --
* but not always, because we also want to verify correct
* behavior when the data was not recently read into cache.
*/
if (ztest_random(4) != 0) {
}
}
/*
* Writes can appear to be newer than the bonus buffer because
* the ztest_get_data() callback does a dmu_read() of the
* open-context data, which may be different than the data
* as it was when the write was generated.
*/
}
/*
* so that all of the usual ASSERTs will work.
*/
}
} else {
}
return (0);
}
static int
{
if (byteswap)
if (txg == 0) {
return (ENOSPC);
}
return (0);
}
static int
{
if (byteswap)
if (txg == 0) {
return (ENOSPC);
}
} else {
/*
* Randomly change the size and increment the generation.
*/
sizeof (*bbt);
}
/*
* Verify that the current bonus buffer is not newer than our txg.
*/
return (0);
}
NULL, /* 0 no such transaction type */
ztest_replay_create, /* TX_CREATE */
NULL, /* TX_MKDIR */
NULL, /* TX_MKXATTR */
NULL, /* TX_SYMLINK */
ztest_replay_remove, /* TX_REMOVE */
NULL, /* TX_RMDIR */
NULL, /* TX_LINK */
NULL, /* TX_RENAME */
ztest_replay_write, /* TX_WRITE */
ztest_replay_truncate, /* TX_TRUNCATE */
ztest_replay_setattr, /* TX_SETATTR */
NULL, /* TX_ACL */
NULL, /* TX_CREATE_ACL */
NULL, /* TX_CREATE_ATTR */
NULL, /* TX_CREATE_ACL_ATTR */
NULL, /* TX_MKDIR_ACL */
NULL, /* TX_MKDIR_ATTR */
NULL, /* TX_MKDIR_ACL_ATTR */
NULL, /* TX_WRITE2 */
};
/*
* ZIL get_data callbacks
*/
static void
{
}
static int
{
int error;
if (error) {
return (error);
}
return (ENOENT);
}
} else {
} else {
offset = 0;
}
if (error == 0) {
if (error == 0)
return (0);
}
}
return (error);
}
static void *
{
char *lr;
if (name)
return (lr);
}
void
{
}
/*
* Lookup a bunch of objects. Returns the number of objects not found.
*/
static int
{
int missing = 0;
int error;
if (error) {
missing++;
} else {
}
}
return (missing);
}
static int
{
int missing = 0;
if (missing) {
missing++;
continue;
}
missing++;
} else {
}
}
return (missing);
}
static int
{
int missing = 0;
int error;
if (missing) {
missing++;
continue;
}
continue;
missing++;
} else {
}
}
return (missing);
}
static int
void *data)
{
lr_write_t *lr;
int error;
return (error);
}
static int
{
int error;
return (error);
}
static int
{
int error;
return (error);
}
static void
{
if (txg != 0) {
} else {
}
}
static void
{
enum ztest_io_type io_type;
void *data;
/*
* Pick an i/o type at random, biased toward writing block tags.
*/
if (ztest_random(2) == 0)
switch (io_type) {
case ZTEST_IO_WRITE_TAG:
break;
case ZTEST_IO_WRITE_PATTERN:
if (ztest_random(2) == 0) {
/*
* Induce fletcher2 collisions to ensure that
* zio_ddt_collision() detects and resolves them
* when using fletcher2-verify for deduplication.
*/
}
break;
case ZTEST_IO_WRITE_ZEROES:
break;
case ZTEST_IO_TRUNCATE:
break;
case ZTEST_IO_SETATTR:
break;
}
}
/*
* Initialize an object description template.
*/
static void
{
od->od_blocksize = 0;
}
/*
* Lookup or create the objects for a test using the od template.
* If the objects do not all exist, or if 'remove' is specified,
* remove any existing objects and create new ones. Otherwise,
* use the existing objects.
*/
static int
{
int rv = 0;
rv = -1;
return (rv);
}
/* ARGSUSED */
void
{
/*
* shared memory. If we die, the next iteration of ztest_run()
* will verify that the log really does contain this record.
*/
}
/*
* Verify that we can't destroy an active pool, create an existing pool,
* or create a pool with a bad vdev spec.
*/
/* ARGSUSED */
void
{
/*
* Attempt to create using a bad file.
*/
/*
* Attempt to create using a bad mirror.
*/
/*
* Attempt to create an existing pool. It shouldn't matter
* what's in the nvroot; we should fail with EEXIST.
*/
}
static vdev_t *
{
return (vd);
for (int c = 0; c < vd->vdev_children; c++)
NULL)
return (mvd);
return (NULL);
}
/*
* Find the first available hole which can be used as a top-level.
*/
int
{
int c;
for (c = 0; c < rvd->vdev_children; c++) {
if (cvd->vdev_ishole)
break;
}
return (c);
}
/*
* Verify that vdev_add() works as expected.
*/
/* ARGSUSED */
void
{
int error;
/*
* If we have slogs then remove them 1/4 of the time.
*/
/*
* Grab the guid from the head of the log class rotor.
*/
/*
* We have to grab the zs_name_lock as writer to
* prevent a race between removing a slog (dmu_objset_find)
* and destroying a dataset. Removing the slog will
* grab a reference on the dataset which may cause
* dmu_objset_destroy() to fail with EBUSY thus
* leaving the dataset in an inconsistent state.
*/
} else {
/*
* Make 1/4 of the devices be log devices.
*/
ztest_record_enospc("spa_vdev_add");
else if (error != 0)
}
}
/*
*/
/* ARGSUSED */
void
{
char *aux;
int error;
if (ztest_random(2) == 0) {
} else {
}
/*
* Pick a random device to remove.
*/
} else {
/*
* Find an unused device we can add.
*/
zs->zs_vdev_aux = 0;
for (;;) {
char path[MAXPATHLEN];
int c;
path) == 0)
break;
break;
zs->zs_vdev_aux++;
}
}
if (guid == 0) {
/*
* Add a new device.
*/
if (error != 0)
} else {
/*
* Remove an existing device. Sometimes, dirty its
* vdev state first to make sure we handle removal
* of devices that have pending state changes.
*/
if (ztest_random(2) == 0)
}
}
/*
* split a pool if it has mirror tlvdevs
*/
/* ARGSUSED */
void
{
int error = 0;
/* ensure we have a useable config; mirrors of raidz aren't supported */
return;
}
/* clean up the old pool, if any */
(void) spa_destroy("splitp");
/* generate a config from the existing config */
&tree) == 0);
&children) == 0);
for (c = 0; c < children; c++) {
0) == 0);
ZPOOL_CONFIG_TYPE, VDEV_TYPE_HOLE) == 0);
ZPOOL_CONFIG_IS_HOLE, 1) == 0);
if (lastlogid == 0)
++schildren;
continue;
}
lastlogid = 0;
}
/* OK, create a config that can be used to split */
VDEV_TYPE_ROOT) == 0);
for (c = 0; c < schildren; c++)
nvlist_free(schild[c]);
if (error == 0) {
(void) printf("successful split - results:\n");
--zs->zs_mirrors;
}
}
/*
* Verify that we can attach and detach devices.
*/
/* ARGSUSED */
void
{
int replacing;
int oldvd_has_siblings = B_FALSE;
int newvd_is_spare = B_FALSE;
int oldvd_is_log;
int error, expected_error;
/*
* Decide whether to do an attach or a replace.
*/
/*
* Pick a random top-level vdev.
*/
/*
* Pick a random leaf within it.
*/
/*
* Locate this vdev.
*/
}
if (zopt_raidz > 1) {
}
/*
* If we're already doing an attach or replace, oldvd may be a
* mirror vdev -- in which case, pick a random child.
*/
while (oldvd->vdev_children != 0) {
}
/*
* If oldvd has siblings, then half of the time, detach it.
*/
return;
}
/*
* For the new vdev, choose with equal probability between the two
* standard paths (ending in either 'a' or 'b') or a random hot spare.
*/
} else {
if (ztest_random(2) == 0)
}
if (newvd) {
} else {
/*
* Make newsize a little bigger or smaller than oldsize.
* If it's smaller, the attach should fail.
* If it's larger, and we're doing a replace,
* we should get dynamic LUN growth when we're done.
*/
}
/*
* If pvd is not a mirror or root, the attach should fail with ENOTSUP,
* unless it's a replace; in that case any non-replacing parent is OK.
*
* If newvd is already part of the pool, it should fail with EBUSY.
*
* If newvd is too small, it should fail with EOVERFLOW.
*/
else
expected_error = 0;
/*
* Build the nvlist describing newpath.
*/
ashift, 0, 0, 0, 1);
/*
* If our parent was the replacing vdev, but the replace completed,
* then instead of failing with ENOTSUP we may either succeed,
* fail with ENODEV, or fail with EOVERFLOW.
*/
if (expected_error == ENOTSUP &&
/*
* If someone grew the LUN, the replacement may be too small.
*/
/* XXX workaround 6690467 */
fatal(0, "attach (%s %llu, %s %llu, %d) "
"returned %d, expected %d",
}
}
/*
* Callback function which expands the physical size of the vdev.
*/
vdev_t *
{
int fd;
return (vd);
if (zopt_verbose >= 6) {
(void) printf("%s grew from %lu to %lu bytes\n",
}
return (NULL);
}
/*
* Callback function which expands a given vdev by calling vdev_online().
*/
/* ARGSUSED */
vdev_t *
{
int error;
/* Calling vdev_online will initialize the new metaslabs */
/*
* If vdev_online returned an error or the underlying vdev_open
* failed then we abort the expand. The only way to know that
* vdev_open fails is by checking the returned newstate.
*/
if (zopt_verbose >= 5) {
(void) printf("Unable to expand vdev, state %llu, "
}
return (vd);
}
/*
* Since we dropped the lock we need to ensure that we're
* still talking to the original vdev. It's possible this
* trying to online it.
*/
if (zopt_verbose >= 5) {
(void) printf("vdev configuration has changed, "
"guid %llu, state %llu, expected gen %llu, "
"got gen %llu\n",
}
return (vd);
}
return (NULL);
}
/*
* Traverse the vdev tree calling the supplied function.
* We continue to walk the tree until we either have walked all
* children or we receive a non-NULL return from the callback.
* If a NULL callback is passed, then we just return back the first
* leaf vdev we encounter.
*/
vdev_t *
{
return (vd);
else
}
return (cvd);
}
return (NULL);
}
/*
* Verify that dynamic LUN growth works as expected.
*/
/* ARGSUSED */
void
{
/*
* Determine the size of the first leaf vdev associated with
* our top-level device.
*/
/*
* We only try to expand the vdev if it's healthy, less than 4x its
* original size, and it has a valid psize.
*/
return;
}
if (zopt_verbose >= 6) {
(void) printf("Expanding LUN %s from %lu to %lu\n",
}
/*
* Growing the vdev is a two step process:
* 1). expand the physical size (i.e. relabel)
* 2). online the vdev to create the new metaslabs
*/
if (zopt_verbose >= 5) {
(void) printf("Could not expand LUN because "
"the vdev configuration changed.\n");
}
return;
}
/*
* Expanding the LUN will update the config asynchronously,
* thus we must wait for the async thread to complete any
* pending tasks before proceeding.
*/
for (;;) {
if (done)
break;
}
if (zopt_verbose >= 5) {
(void) printf("Could not verify LUN expansion due to "
"intervening vdev offline or remove.\n");
}
return;
}
/*
* Make sure we were able to grow the vdev.
*/
if (new_ms_count <= old_ms_count)
fatal(0, "LUN expansion failed: ms_count %llu <= %llu\n",
/*
* Make sure we were able to grow the pool.
*/
if (new_class_space <= old_class_space)
fatal(0, "LUN expansion failed: class_space %llu <= %llu\n",
if (zopt_verbose >= 5) {
(void) printf("%s grew from %s to %s\n",
}
}
/*
* Verify that dmu_objset_{create,destroy,open,close} work as expected.
*/
/* ARGSUSED */
static void
{
/*
* Create the objects common to all ztest datasets.
*/
}
static int
ztest_dataset_create(char *dsname)
{
return (err);
}
/* ARGSUSED */
static int
{
int error;
/*
* Verify that the dataset contains a directory object.
*/
/* We could have crashed in the middle of destroying it */
}
/*
* Destroy the dataset.
*/
return (0);
}
static boolean_t
{
char snapname[MAXNAMELEN];
int error;
(u_longlong_t)id);
return (B_FALSE);
}
return (B_TRUE);
}
static boolean_t
{
char snapname[MAXNAMELEN];
int error;
(u_longlong_t)id);
return (B_TRUE);
}
/* ARGSUSED */
void
{
int iters;
int error;
char name[MAXNAMELEN];
/*
* If this dataset exists from a previous run, process its replay log
* half of the time. If we don't replay it, then dmu_objset_destroy()
* (invoked from ztest_objset_destroy_cb()) should just throw it away.
*/
if (ztest_random(2) == 0 &&
}
/*
* There may be an old instance of the dataset we're about to
* create lying around from a previous run. If so, destroy it
* and all of its snapshots.
*/
/*
* Verify that the destroyed dataset is no longer in the namespace.
*/
/*
* Verify that we can create a new dataset.
*/
if (error) {
return;
}
}
VERIFY3U(0, ==,
/*
* Open the intent log for it.
*/
/*
* Put some objects in there, do a little I/O to them,
* and randomly take a couple of snapshots along the way.
*/
for (int i = 0; i < iters; i++) {
if (ztest_random(iters) == 0)
(void) ztest_snapshot_create(name, i);
}
/*
* Verify that we cannot create an existing dataset.
*/
/*
* Verify that we can hold an objset that is also owned.
*/
/*
* Verify that we cannot own an objset that is already owned.
*/
}
/*
* Verify that dmu_snapshot_{create,destroy,open,close} work as expected.
*/
void
{
}
/*
* Cleanup non-standard snapshots and clones.
*/
void
{
char snap1name[MAXNAMELEN];
char clone1name[MAXNAMELEN];
char snap2name[MAXNAMELEN];
char clone2name[MAXNAMELEN];
char snap3name[MAXNAMELEN];
int error;
}
/*
* Verify dsl_dataset_promote handles EBUSY
*/
void
{
char snap1name[MAXNAMELEN];
char clone1name[MAXNAMELEN];
char snap2name[MAXNAMELEN];
char clone2name[MAXNAMELEN];
char snap3name[MAXNAMELEN];
int error;
goto out;
}
}
if (error)
if (error) {
goto out;
}
}
goto out;
}
}
goto out;
}
}
if (error)
if (error) {
goto out;
}
}
if (error)
error);
out:
}
/*
* Verify that dmu_object_{alloc,free} work as expected.
*/
void
{
for (int b = 0; b < batchsize; b++)
/*
* Destroy the previous batch of objects, create a new batch,
* and do some I/O on the new objects.
*/
return;
}
/*
* Verify that dmu_{read,write} work as expected.
*/
void
{
int free_percent = 5;
/*
* This test uses two objects, packobj and bigobj, that are always
* updated together (i.e. in the same tx) so that their contents are
* in sync and can be compared. Their contents relate to each other
* in a simple way: packobj is a dense array of 'bufwad' structures,
* while bigobj is a sparse array of the same bufwads. Specifically,
* for any index n, there are three bufwads that should be identical:
*
* packobj, at offset n * sizeof (bufwad_t)
* bigobj, at the head of the nth chunk
* bigobj, at the tail of the nth chunk
*
* The chunk size is arbitrary. It doesn't have to be a power of two,
* and it doesn't have any relation to the object blocksize.
* The only requirement is that it can hold at least two bufwads.
*
* Normally, we write the bufwad to each of these locations.
* However, free_percent of the time we instead write zeroes to
* packobj and perform a dmu_free_range() on bigobj. By comparing
* bigobj to packobj, we can verify that the DMU is correctly
* tracking which parts of an object are allocated and free,
* and that the contents of the allocated blocks are correct.
*/
/*
* Read the directory info. If it's the first time, set things up.
*/
return;
/*
* Prefetch a random chunk of the big object.
* Our aim here is to get some async reads in flight
* for blocks that we may free below; the DMU should
* handle this race correctly.
*/
/*
* Pick a random index and compute the offsets into packobj and bigobj.
*/
/*
* free_percent of the time, free a range of bigobj rather than
* overwriting it.
*/
/*
* Read the current contents of our objects.
*/
/*
* Get a tx for the mods to both packobj and bigobj.
*/
if (freeit)
else
if (txg == 0) {
return;
}
/*
* For each index from n to n + s, verify that the existing bufwad
* in packobj matches the bufwads at the head and tail of the
* corresponding chunk in bigobj. Then update all three bufwads
* with the new values we want to write out.
*/
for (i = 0; i < s; i++) {
/* LINTED */
/* LINTED */
/* LINTED */
fatal(0, "future leak: got %llx, open txg is %llx",
fatal(0, "wrong index: got %llx, wanted %llx+%llx",
if (freeit) {
} else {
}
}
/*
* We've verified all the old bufwads, and made new ones.
* Now write them out.
*/
if (freeit) {
if (zopt_verbose >= 7) {
(void) printf("freeing offset %llx size %llx"
" txg %llx\n",
(u_longlong_t)txg);
}
} else {
if (zopt_verbose >= 7) {
(void) printf("writing offset %llx size %llx"
" txg %llx\n",
(u_longlong_t)txg);
}
}
/*
* Sanity check the stuff we just wrote.
*/
{
}
}
void
{
uint64_t i;
/*
* For each index from n to n + s, verify that the existing bufwad
* in packobj matches the bufwads at the head and tail of the
* corresponding chunk in bigobj. Then update all three bufwads
* with the new values we want to write out.
*/
for (i = 0; i < s; i++) {
/* LINTED */
/* LINTED */
/* LINTED */
fatal(0, "future leak: got %llx, open txg is %llx",
fatal(0, "wrong index: got %llx, wanted %llx+%llx",
}
}
void
{
uint64_t i;
int error;
/*
* This test uses two objects, packobj and bigobj, that are always
* updated together (i.e. in the same tx) so that their contents are
* in sync and can be compared. Their contents relate to each other
* in a simple way: packobj is a dense array of 'bufwad' structures,
* while bigobj is a sparse array of the same bufwads. Specifically,
* for any index n, there are three bufwads that should be identical:
*
* packobj, at offset n * sizeof (bufwad_t)
* bigobj, at the head of the nth chunk
* bigobj, at the tail of the nth chunk
*
* The chunk size is set equal to bigobj block size so that
* dmu_assign_arcbuf() can be tested for object updates.
*/
/*
* Read the directory info. If it's the first time, set things up.
*/
return;
/*
* Pick a random index and compute the offsets into packobj and bigobj.
*/
/*
* Iteration 0 test zcopy for DB_UNCACHED dbufs.
* Iteration 1 test zcopy to already referenced dbufs.
* Iteration 2 test zcopy to dirty dbuf in the same txg.
* Iteration 3 test zcopy to dbuf dirty in previous txg.
* Iteration 4 test zcopy when dbuf is no longer dirty.
* Iteration 5 test zcopy when it can't be done.
* Iteration 6 one more zcopy write.
*/
for (i = 0; i < 7; i++) {
uint64_t j;
/*
* In iteration 5 (i == 5) use arcbufs
* that don't match bigobj blksz to test
* dmu_assign_arcbuf() when it can't directly
* assign an arcbuf to a dbuf.
*/
for (j = 0; j < s; j++) {
if (i != 5) {
bigbuf_arcbufs[j] =
} else {
bigbuf_arcbufs[2 * j] =
}
}
/*
* Get a tx for the mods to both packobj and bigobj.
*/
if (txg == 0) {
for (j = 0; j < s; j++) {
if (i != 5) {
} else {
bigbuf_arcbufs[2 * j]);
}
}
return;
}
/*
* 50% of the time don't read objects in the 1st iteration to
* test dmu_assign_arcbuf() for the case when there're no
* existing dbufs for the specified offsets.
*/
if (i != 0 || ztest_random(2) != 0) {
}
/*
* We've verified all the old bufwads, and made new ones.
* Now write them out.
*/
if (zopt_verbose >= 7) {
(void) printf("writing offset %llx size %llx"
" txg %llx\n",
(u_longlong_t)txg);
}
if (i != 5) {
} else {
chunksize / 2);
chunksize / 2,
chunksize / 2);
}
if (i == 1) {
}
if (i != 5) {
bigbuf_arcbufs[j], tx);
} else {
}
if (i == 1) {
}
}
/*
* Sanity check the stuff we just wrote.
*/
{
}
if (i == 2) {
} else if (i == 3) {
}
}
}
/* ARGSUSED */
void
{
/*
* Have multiple threads write to large offsets in an object
* to verify that parallel writes to an object -- even to the
* same blocks within the object -- doesn't cause any trouble.
*/
return;
while (ztest_random(10) != 0)
}
void
{
void *data;
return;
return;
while (ztest_random(count) != 0) {
data) != 0)
break;
while (ztest_random(4) != 0)
}
}
/*
* Verify that zap_{create,destroy,add,remove,update} work as expected.
*/
#define ZTEST_ZAP_MIN_INTS 1
#define ZTEST_ZAP_MAX_INTS 4
#define ZTEST_ZAP_MAX_PROPS 1000
void
{
int i, ints;
int error;
return;
/*
* Generate a known hash collision, and verify that
* we can lookup and remove both entries.
*/
if (txg == 0)
return;
for (i = 0; i < 2; i++) {
value[i] = i;
}
for (i = 0; i < 2; i++) {
VERIFY3U(0, ==,
}
for (i = 0; i < 2; i++) {
}
/*
* Generate a buch of random entries.
*/
last_txg = 0;
/*
* If these zap entries already exist, validate their contents.
*/
if (error == 0) {
&zl_ints) == 0);
for (i = 0; i < ints; i++) {
}
} else {
}
/*
* Atomically update two entries in our zap object.
* The first is named txg_%llu, and contains the txg
* in which the property was last updated. The second
* is named prop_%llu, and the nth element of its value
* should be txg + object + n.
*/
if (txg == 0)
return;
for (i = 0; i < ints; i++)
/*
* Remove a random pair of entries.
*/
return;
if (txg == 0)
return;
}
/*
* Testcase to test the upgrading of a microzap to fatzap.
*/
void
{
return;
/*
* Add entries to this ZAP and make sure it spills over
* and gets upgraded to a fatzap. Also, since we are adding
* 2050 entries we should see ptrtbl growth and leaf-block split.
*/
for (int i = 0; i < 2050; i++) {
char name[MAXNAMELEN];
int error;
if (txg == 0)
return;
}
}
/* ARGSUSED */
void
{
void *data;
return;
/*
* Generate a random name of the form 'xxx.....' where each
* x is a random printable character and the dots are dots.
* There are 94 such characters, and the name length goes from
* 6 to 20, so there are 94^3 * 15 = 12,458,760 possible names.
*/
for (i = 0; i < 3; i++)
for (; i < namelen - 1; i++)
name[i] = '.';
name[i] = '\0';
wc = 1;
} else {
wsize = 1;
data = string_value;
}
count = -1ULL;
/*
* Select an operation: length, lookup, add, update, remove.
*/
i = ztest_random(5);
if (i >= 2) {
if (txg == 0)
return;
} else {
txg = 0;
}
switch (i) {
case 0:
if (error == 0) {
} else {
}
break;
case 1:
if (error == 0) {
if (data == string_value &&
fatal(0, "name '%s' != val '%s' len %d",
} else {
}
break;
case 2:
break;
case 3:
break;
case 4:
break;
}
}
/*
* Commit callback data.
*/
typedef struct ztest_cb_data {
int zcd_expected_err;
/* This is the actual commit callback function */
static void
{
/*
* The private callback data should be destroyed here, but
* since we are going to check the zcd_called field after
* dmu_tx_abort(), we will destroy it there.
*/
return;
}
/* Was this callback added to the global callback list? */
goto out;
/* Remove our callback from the list */
out:
}
/* Allocate and initialize callback data structure */
static ztest_cb_data_t *
{
return (cb_data);
}
/*
* If a number of txgs equal to this threshold have been created after a commit
* callback has been registered but not called, then we assume there is an
* implementation bug.
*/
/*
* Commit callback test.
*/
void
{
int i, error;
return;
/* Every once in a while, abort the transaction on purpose */
if (ztest_random(100) == 0)
error = -1;
if (!error)
if (error) {
/*
* It's not a strict requirement to call the registered
* callbacks from inside dmu_tx_abort(), but that's what
* it's supposed to happen in the current implementation
* so we will check for that.
*/
for (i = 0; i < 2; i++) {
}
for (i = 0; i < 2; i++) {
}
return;
}
/*
* Read existing data to make sure there isn't a future leak.
*/
&old_txg, DMU_READ_PREFETCH));
/*
* Since commit callbacks don't have any ordering requirement and since
* it is theoretically possible for a commit callback to be called
* after an arbitrary amount of time has elapsed since its txg has been
* synced, it is difficult to reliably determine whether a commit
* callback hasn't been called due to high load or due to a flawed
* implementation.
*
* In practice, we will assume that if after a certain number of txgs a
* commit callback hasn't been called, then most likely there's an
* implementation bug..
*/
fatal(0, "Commit callback threshold exceeded, oldest txg: %"
}
/*
* Let's find the place to insert our callbacks.
*
* Even though the list is ordered by txg, it is possible for the
* insertion point to not be the end because our txg may already be
* quiescing at this point and other callbacks in the open txg
* (from other objsets) may have sneaked in.
*/
/* Add the 3 callbacks to the list */
for (i = 0; i < 3; i++) {
else
cb_data[i]);
}
}
/* ARGSUSED */
void
{
zfs_prop_t proplist[] = {
};
}
/* ARGSUSED */
void
{
if (zopt_verbose >= 6)
}
/*
*/
void
{
int error;
char snapname[100];
char fullname[100];
char clonename[100];
char tag[100];
char osname[MAXNAMELEN];
/*
* Clean up from any previous run.
*/
/*
* Create snapshot, clone it, mark snap for deferred destroy,
* destroy clone, verify snap was also destroyed.
*/
FALSE, -1);
if (error) {
ztest_record_enospc("dmu_objset_snapshot");
goto out;
}
}
if (error)
if (error) {
ztest_record_enospc("dmu_objset_clone");
goto out;
}
}
if (error) {
fatal(0, "dmu_objset_destroy(%s, B_TRUE) = %d",
}
if (error)
/*
* Create snapshot, add temporary hold, verify that we can't
* destroy a held snapshot, mark for deferred destroy,
* release hold, verify snapshot was destroyed.
*/
FALSE, -1);
if (error) {
ztest_record_enospc("dmu_objset_snapshot");
goto out;
}
}
B_TRUE, -1);
if (error)
fatal(0, "dmu_objset_destroy(%s, B_FALSE) = %d",
}
if (error) {
fatal(0, "dmu_objset_destroy(%s, B_TRUE) = %d",
}
if (error)
out:
}
/*
* Inject random faults into the on-disk data.
*/
/* ARGSUSED */
void
{
int fd;
char path0[MAXPATHLEN];
char pathrand[MAXPATHLEN];
int iters = 1000;
int maxfaults;
int mirror_save;
/*
* We need SCL_STATE here because we're going to look at vd0->vdev_tsd.
*/
if (ztest_random(2) == 0) {
/*
* Inject errors on a normal data device or slog device.
*/
/*
* Generate paths to the first leaf in this top-level vdev,
* and to the random leaf we selected. We'll induce transient
* and we'll write random garbage to the randomly chosen leaf.
*/
/*
* Make vd0 explicitly claim to be unreadable,
* or unwriteable, or reach behind its back
* and close the underlying fd. We can do this if
* maxfaults == 0 because we'll fail and reexecute,
* and we can do it if maxfaults >= 2 because we'll
* have enough redundancy. If maxfaults == 1, the
* combination of this with injection of random data
* corruption below exceeds the pool's fault tolerance.
*/
} else if (ztest_random(2) == 0) {
} else {
}
}
} else {
/*
* Inject errors on an l2cache device.
*/
return;
}
leaf = 0;
leaves = 1;
}
/*
* If we can tolerate two or more faults, or we're dealing
*/
ZFS_OFFLINE_TEMPORARY : 0);
/*
* We have to grab the zs_name_lock as writer to
* prevent a race between offlining a slog and
* destroying a dataset. Offlining the slog will
* grab a reference on the dataset which may cause
* dmu_objset_destroy() to fail with EBUSY thus
* leaving the dataset in an inconsistent state.
*/
if (islog)
if (islog)
} else {
}
}
if (maxfaults == 0)
return;
/*
* We have at least single-fault tolerance, so inject data corruption.
*/
return;
while (--iters != 0) {
continue;
return;
}
if (zopt_verbose >= 7)
(void) printf("injected bad word into %s,"
}
}
/*
* Verify that DDT repair works as expected.
*/
void
{
void *buf;
return;
/*
* Take the name lock as writer to prevent anyone else from changing
* the pool and dataset properies we need to maintain during this test.
*/
B_FALSE) != 0 ||
B_FALSE) != 0) {
return;
}
if (txg == 0) {
return;
}
/*
* Write all the copies of our block.
*/
for (int i = 0; i < copies; i++) {
DMU_READ_NO_PREFETCH) == 0);
}
/*
* Find out what block we got.
*/
DMU_READ_NO_PREFETCH) == 0);
/*
* Damage the block. Dedup-ditto will save us when we read it later.
*/
}
/*
* Scrub the pool.
*/
/* ARGSUSED */
void
{
}
/*
* Rename the pool to a different name and then rename it back.
*/
/* ARGSUSED */
void
{
/*
* Do the rename
*/
/*
* Try to open it under the old name, which shouldn't exist
*/
/*
* Open it under the new name and make sure it's still the same spa_t.
*/
/*
* Rename it back to the original
*/
/*
* Make sure it can still be opened
*/
}
/*
* Verify pool integrity by running zdb.
*/
static void
ztest_run_zdb(char *pool)
{
int status;
char zbuf[1024];
char *bin;
char *ztest;
char *isa;
int isalen;
/* LINTED */
isa,
pool);
if (zopt_verbose >= 5)
if (zopt_verbose >= 3)
if (status == 0)
return;
ztest_dump_core = 0;
else
}
static void
ztest_walk_pool_directory(char *header)
{
if (zopt_verbose >= 6)
if (zopt_verbose >= 6)
}
static void
{
if (zopt_verbose >= 4) {
}
/*
* Clean up from previous runs.
*/
(void) spa_destroy(newname);
/*
* Get the pool's configuration and guid.
*/
/*
*/
if (ztest_random(2) == 0)
ztest_walk_pool_directory("pools before export");
/*
* Export it.
*/
ztest_walk_pool_directory("pools after export");
/*
* Try to import it.
*/
/*
* Import it under the new name.
*/
ztest_walk_pool_directory("pools after import");
/*
* Try to import it again -- should fail with EEXIST.
*/
/*
* Try to import it under a different name -- should fail with EEXIST.
*/
/*
* Verify that the pool is no longer visible under the old name.
*/
/*
* Verify that we can open and close the pool using the new name.
*/
}
static void
{
(void) printf("resuming from suspended state\n");
(void) zio_resume(spa);
}
static void *
ztest_resume_thread(void *arg)
{
while (!ztest_exiting) {
if (spa_suspended(spa))
}
return (NULL);
}
static void *
ztest_deadman_thread(void *arg)
{
int grace = 300;
return (NULL);
}
static void
{
if (zopt_verbose >= 4) {
(void) printf("%6.2f sec in %s\n",
}
}
static void *
ztest_thread(void *arg)
{
/*
* See if it's time to force a crash.
*/
ztest_kill(zs);
/*
* If we're getting ENOSPC with some regularity, stop.
*/
break;
/*
* Pick a random function to execute.
*/
}
return (NULL);
}
static void
{
}
static void
{
char name[MAXNAMELEN];
if (zopt_verbose >= 3)
/*
* Cleanup any non-standard clones and snapshots. In general,
* ztest thread t operates on dataset (t % zopt_datasets),
* so there may be more than one thing to clean up.
*/
for (int t = d; t < zopt_threads; t += zopt_datasets)
}
static void
{
/*
* ZTEST_DIROBJ is the object directory for the entire dataset.
* Therefore, the number of objects in use should equal the
* number of ZTEST_DIROBJ entries, +1 for ZTEST_DIROBJ itself.
* If not, we have an object leak.
*
* Note that we can only check this in ztest_dataset_open(),
* when the open-context and syncing-context values agree.
* That's because zap_count() returns the open-context value,
* while dmu_objset_space() returns the rootbp fill count.
*/
}
static int
{
char name[MAXNAMELEN];
int error;
return (error);
}
fatal(0, "missing log records: claimed %llu < committed %llu",
if (zopt_verbose >= 6)
(void) printf("%s replay %llu blocks, %llu records, seq %llu\n",
if (zilog->zl_replaying_seq != 0 &&
fatal(0, "missing log records: replayed %llu < committed %llu",
return (0);
}
static void
{
}
/*
* Kick off threads to run tests on all datasets in parallel.
*/
static void
{
int error;
/*
*/
/*
* Open our pool.
*/
/*
* We don't expect the pool to suspend unless maxfaults == 0,
* in which case ztest_fault_inject() temporarily takes away
* the only valid replica.
*/
if (MAXFAULTS() == 0)
else
/*
* Create a thread to periodically resume suspended I/O.
*/
&resume_tid) == 0);
/*
* Create a deadman thread to abort() if we hang.
*/
NULL) == 0);
/*
* Verify that we can safely inquire about about any object,
* whether it's allocated or not. To make it interesting,
* we probe a 5-wide window around each power of two.
* This hits all edge cases, including zero and the max.
*/
for (int t = 0; t < 64; t++) {
for (int d = -5; d <= 5; d++) {
(1ULL << t) + d, NULL);
}
}
/*
* If we got any ENOSPC errors on the previous run, destroy something.
*/
if (zs->zs_enospc_count != 0) {
int d = ztest_random(zopt_datasets);
ztest_dataset_destroy(zs, d);
}
zs->zs_enospc_count = 0;
if (zopt_verbose >= 4)
(void) printf("starting main threads...\n");
/*
* Kick off all the tests that run in parallel.
*/
for (int t = 0; t < zopt_threads; t++) {
return;
}
/*
* Wait for all of the tests to complete. We go in reverse order
* so we don't close datasets while threads are still using them.
*/
for (int t = zopt_threads - 1; t >= 0; t--) {
if (t < zopt_datasets)
ztest_dataset_close(zs, t);
}
/* Kill the resume thread */
/*
* Right before closing the pool, kick off a bunch of async I/O;
* spa_close() should wait for it to complete.
*/
/*
* Verify that we can loop over all pools.
*/
if (zopt_verbose > 3)
/*
* Verify that we can export the pool and reimport it under a
* different name.
*/
if (ztest_random(2) == 0) {
char name[MAXNAMELEN];
}
kernel_fini();
}
static void
{
int numloops = 0;
if (zopt_verbose >= 3)
(void) printf("testing spa_freeze()...\n");
/*
* Force the first log block to be transactionally allocated.
* We have to do this before we freeze the pool -- otherwise
* the log chain won't be anchored.
*/
}
/*
* Freeze the pool. This stops spa_sync() from doing anything,
* so that the only way to record changes from now on is the ZIL.
*/
/*
* Run tests that generate log records but don't alter the pool config
* We do a txg_wait_synced() after each iteration to force the txg
* to increase well beyond the last synced value in the uberblock.
* The ZIL should be OK with that.
*/
}
/*
* Commit all of the changes we just generated.
*/
/*
* Close our dataset and close the pool.
*/
ztest_dataset_close(zs, 0);
kernel_fini();
/*
* Open and close the pool and dataset to induce log replay.
*/
ztest_dataset_close(zs, 0);
kernel_fini();
}
void
{
hrtime_t m = s / 60;
hrtime_t h = m / 60;
hrtime_t d = h / 24;
s -= m * 60;
m -= h * 60;
h -= d * 24;
timebuf[0] = '\0';
if (d)
"%llud%02lluh%02llum%02llus", d, h, m, s);
else if (h)
else if (m)
else
}
static nvlist_t *
{
if (ztest_random(2) == 0)
return (NULL);
(void) printf("props:\n");
return (props);
}
/*
* Create a storage pool with the given name and initial vdev size.
* Then test spa_freeze() functionality.
*/
static void
{
/*
* Create the storage pool.
*/
props = make_random_props();
kernel_fini();
}
int
{
int kills = 0;
int iters = 0;
char timebuf[100];
char numbuf[6];
/* Override location of zpool.cache */
/*
* Blow away any existing copy of zpool.cache
*/
if (zopt_init != 0)
(void) remove(spa_config_path);
if (zopt_verbose >= 1) {
(void) printf("%llu vdevs, %d datasets, %d threads,"
" %llu seconds...\n",
}
/*
* Create and initialize our storage pool.
*/
for (int i = 1; i <= zopt_init; i++) {
(void) printf("ztest_init(), pass %d\n", i);
ztest_init(zs);
}
for (int f = 0; f < ZTEST_FUNCS; f++) {
*zi = ztest_info[f];
else
}
/*
* Run the tests in a loop. These tests include fault injection
* to verify that self-healing data works, and forced crashes
* to verify that we never lose on-disk consistency.
*/
int status;
/*
* Initialize the workload counters for each function.
*/
for (int f = 0; f < ZTEST_FUNCS; f++) {
zi->zi_call_count = 0;
zi->zi_call_time = 0;
}
/* Set the allocation switch size */
if (pid == -1)
if (pid == 0) { /* child */
exit(0);
}
continue;
if (WEXITSTATUS(status) != 0) {
"child exited with code %d\n",
exit(2);
}
} else if (WIFSIGNALED(status)) {
"child died with signal %d\n",
exit(3);
}
kills++;
} else {
"to child\n");
exit(4);
}
iters++;
if (zopt_verbose >= 1) {
(void) printf("Pass %3d, %8s, %3llu ENOSPC, "
"%4.1f%% of %5s used, %3.0f%% done, %8s to go\n",
}
if (zopt_verbose >= 2) {
(void) printf("\nWorkload summary:\n\n");
(void) printf("%7s %9s %s\n",
"Calls", "Time", "Function");
(void) printf("%7s %9s %s\n",
"-----", "----", "--------");
for (int f = 0; f < ZTEST_FUNCS; f++) {
(void) printf("%7llu %9s %s\n",
}
(void) printf("\n");
}
/*
* It's possible that we killed a child during a rename test,
* in which case we'll have a 'ztest_tmp' pool lying around
* instead of 'ztest'. Do a blind rename in case this happened.
*/
} else {
char tmpname[MAXNAMELEN];
kernel_fini();
}
kernel_fini();
}
if (zopt_verbose >= 1) {
(void) printf("%d killed, %d completed, %.0f%% kill rate\n",
}
return (0);
}