metaslab.c revision 0125049cd6136d1d2ca9e982382a915b6d7916ce
fa9e4066f08beec538e775443c5be79dd423fcabahrens * CDDL HEADER START
fa9e4066f08beec538e775443c5be79dd423fcabahrens * The contents of this file are subject to the terms of the
ea8dc4b6d2251b437950c0056bc626b311c73c27eschrock * Common Development and Distribution License (the "License").
ea8dc4b6d2251b437950c0056bc626b311c73c27eschrock * You may not use this file except in compliance with the License.
fa9e4066f08beec538e775443c5be79dd423fcabahrens * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
fa9e4066f08beec538e775443c5be79dd423fcabahrens * See the License for the specific language governing permissions
fa9e4066f08beec538e775443c5be79dd423fcabahrens * and limitations under the License.
fa9e4066f08beec538e775443c5be79dd423fcabahrens * When distributing Covered Code, include this CDDL HEADER in each
fa9e4066f08beec538e775443c5be79dd423fcabahrens * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
fa9e4066f08beec538e775443c5be79dd423fcabahrens * If applicable, add the following below this CDDL HEADER, with the
fa9e4066f08beec538e775443c5be79dd423fcabahrens * fields enclosed by brackets "[]" replaced with your own identifying
fa9e4066f08beec538e775443c5be79dd423fcabahrens * information: Portions Copyright [yyyy] [name of copyright owner]
fa9e4066f08beec538e775443c5be79dd423fcabahrens * CDDL HEADER END
0125049cd6136d1d2ca9e982382a915b6d7916ceahrens * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
fa9e4066f08beec538e775443c5be79dd423fcabahrens * Use is subject to license terms.
fa9e4066f08beec538e775443c5be79dd423fcabahrens#pragma ident "%Z%%M% %I% %E% SMI"
fa9e4066f08beec538e775443c5be79dd423fcabahrens * ==========================================================================
fa9e4066f08beec538e775443c5be79dd423fcabahrens * Metaslab classes
fa9e4066f08beec538e775443c5be79dd423fcabahrens * ==========================================================================
fa9e4066f08beec538e775443c5be79dd423fcabahrens mc = kmem_zalloc(sizeof (metaslab_class_t), KM_SLEEP);
fa9e4066f08beec538e775443c5be79dd423fcabahrensmetaslab_class_add(metaslab_class_t *mc, metaslab_group_t *mg)
fa9e4066f08beec538e775443c5be79dd423fcabahrensmetaslab_class_remove(metaslab_class_t *mc, metaslab_group_t *mg)
fa9e4066f08beec538e775443c5be79dd423fcabahrens * ==========================================================================
fa9e4066f08beec538e775443c5be79dd423fcabahrens * Metaslab groups
fa9e4066f08beec538e775443c5be79dd423fcabahrens * ==========================================================================
fa9e4066f08beec538e775443c5be79dd423fcabahrens return (1);
fa9e4066f08beec538e775443c5be79dd423fcabahrens return (-1);
fa9e4066f08beec538e775443c5be79dd423fcabahrens * If the weights are identical, use the offset to force uniqueness.
fa9e4066f08beec538e775443c5be79dd423fcabahrens return (-1);
fa9e4066f08beec538e775443c5be79dd423fcabahrens return (1);
fa9e4066f08beec538e775443c5be79dd423fcabahrens return (0);
fa9e4066f08beec538e775443c5be79dd423fcabahrensmetaslab_group_create(metaslab_class_t *mc, vdev_t *vd)
fa9e4066f08beec538e775443c5be79dd423fcabahrens mg = kmem_zalloc(sizeof (metaslab_group_t), KM_SLEEP);
fa9e4066f08beec538e775443c5be79dd423fcabahrens sizeof (metaslab_t), offsetof(struct metaslab, ms_group_node));
13506d1eefbbc37e2f12a0528831d9f6d4c361d7maybee mg->mg_aliquot = metaslab_aliquot * MAX(1, vd->vdev_children);
ecc2d604e885a75cc75e647b5641af99d5a6f4a6bonwickmetaslab_group_add(metaslab_group_t *mg, metaslab_t *msp)
fa9e4066f08beec538e775443c5be79dd423fcabahrensmetaslab_group_remove(metaslab_group_t *mg, metaslab_t *msp)
fa9e4066f08beec538e775443c5be79dd423fcabahrensmetaslab_group_sort(metaslab_group_t *mg, metaslab_t *msp, uint64_t weight)
5f5f7a6f9c8e9c1587a54e690556d756ec67558cahrens * Although in principle the weight can be any value, in
5f5f7a6f9c8e9c1587a54e690556d756ec67558cahrens * practice we do not use values in the range [1, 510].
ecc2d604e885a75cc75e647b5641af99d5a6f4a6bonwick * ==========================================================================
ecc2d604e885a75cc75e647b5641af99d5a6f4a6bonwick * The first-fit block allocator
ecc2d604e885a75cc75e647b5641af99d5a6f4a6bonwick * ==========================================================================
ecc2d604e885a75cc75e647b5641af99d5a6f4a6bonwick sm->sm_ppd = kmem_zalloc(64 * sizeof (uint64_t), KM_SLEEP);
ecc2d604e885a75cc75e647b5641af99d5a6f4a6bonwick uint64_t *cursor = (uint64_t *)sm->sm_ppd + highbit(align) - 1;
ecc2d604e885a75cc75e647b5641af99d5a6f4a6bonwick * If we know we've searched the whole map (*cursor == 0), give up.
ecc2d604e885a75cc75e647b5641af99d5a6f4a6bonwick * Otherwise, reset the cursor to the beginning and try again.
ecc2d604e885a75cc75e647b5641af99d5a6f4a6bonwick return (-1ULL);
ecc2d604e885a75cc75e647b5641af99d5a6f4a6bonwick/* ARGSUSED */
ecc2d604e885a75cc75e647b5641af99d5a6f4a6bonwickmetaslab_ff_claim(space_map_t *sm, uint64_t start, uint64_t size)
ecc2d604e885a75cc75e647b5641af99d5a6f4a6bonwick /* No need to update cursor */
ecc2d604e885a75cc75e647b5641af99d5a6f4a6bonwick/* ARGSUSED */
ecc2d604e885a75cc75e647b5641af99d5a6f4a6bonwickmetaslab_ff_free(space_map_t *sm, uint64_t start, uint64_t size)
ecc2d604e885a75cc75e647b5641af99d5a6f4a6bonwick /* No need to update cursor */
fa9e4066f08beec538e775443c5be79dd423fcabahrens * ==========================================================================
fa9e4066f08beec538e775443c5be79dd423fcabahrens * Metaslabs
fa9e4066f08beec538e775443c5be79dd423fcabahrens * ==========================================================================
ecc2d604e885a75cc75e647b5641af99d5a6f4a6bonwickmetaslab_init(metaslab_group_t *mg, space_map_obj_t *smo,
ecc2d604e885a75cc75e647b5641af99d5a6f4a6bonwick * We create the main space map here, but we don't create the
ecc2d604e885a75cc75e647b5641af99d5a6f4a6bonwick * allocmaps and freemaps until metaslab_sync_done(). This serves
ecc2d604e885a75cc75e647b5641af99d5a6f4a6bonwick * two purposes: it allows metaslab_sync_done() to detect the
ecc2d604e885a75cc75e647b5641af99d5a6f4a6bonwick * addition of new space; and for debugging, it ensures that we'd
ecc2d604e885a75cc75e647b5641af99d5a6f4a6bonwick * data fault on any attempt to use this metaslab before it's ready.
fa9e4066f08beec538e775443c5be79dd423fcabahrens * If we're opening an existing pool (txg == 0) or creating
fa9e4066f08beec538e775443c5be79dd423fcabahrens * a new one (txg == TXG_INITIAL), all space is available now.
fa9e4066f08beec538e775443c5be79dd423fcabahrens * If we're adding space to an existing pool, the new space
fa9e4066f08beec538e775443c5be79dd423fcabahrens * does not become available until after this txg has synced.
ecc2d604e885a75cc75e647b5641af99d5a6f4a6bonwick if (txg != 0) {
ecc2d604e885a75cc75e647b5641af99d5a6f4a6bonwick * The vdev is dirty, but the metaslab isn't -- it just needs
ecc2d604e885a75cc75e647b5641af99d5a6f4a6bonwick * to have metaslab_sync_done() invoked from vdev_sync_done().
ecc2d604e885a75cc75e647b5641af99d5a6f4a6bonwick * [We could just dirty the metaslab, but that would cause us
ecc2d604e885a75cc75e647b5641af99d5a6f4a6bonwick * to allocate a space map object for it, which is wasteful
ecc2d604e885a75cc75e647b5641af99d5a6f4a6bonwick * and would mess up the locality logic in metaslab_weight().]
ecc2d604e885a75cc75e647b5641af99d5a6f4a6bonwick ASSERT(TXG_CLEAN(txg) == spa_last_synced_txg(vd->vdev_spa));
ecc2d604e885a75cc75e647b5641af99d5a6f4a6bonwick for (t = 0; t < TXG_SIZE; t++) {
ecc2d604e885a75cc75e647b5641af99d5a6f4a6bonwick * The baseline weight is the metaslab's free space.
ecc2d604e885a75cc75e647b5641af99d5a6f4a6bonwick * Modern disks have uniform bit density and constant angular velocity.
ecc2d604e885a75cc75e647b5641af99d5a6f4a6bonwick * Therefore, the outer recording zones are faster (higher bandwidth)
ecc2d604e885a75cc75e647b5641af99d5a6f4a6bonwick * than the inner zones by the ratio of outer to inner track diameter,
ecc2d604e885a75cc75e647b5641af99d5a6f4a6bonwick * which is typically around 2:1. We account for this by assigning
ecc2d604e885a75cc75e647b5641af99d5a6f4a6bonwick * higher weight to lower metaslabs (multiplier ranging from 2x to 1x).
ecc2d604e885a75cc75e647b5641af99d5a6f4a6bonwick * In effect, this means that we'll select the metaslab with the most
ecc2d604e885a75cc75e647b5641af99d5a6f4a6bonwick * free bandwidth rather than simply the one with the most free space.
ecc2d604e885a75cc75e647b5641af99d5a6f4a6bonwick ((sm->sm_start >> vd->vdev_ms_shift) * weight) / vd->vdev_ms_count;
ecc2d604e885a75cc75e647b5641af99d5a6f4a6bonwick * For locality, assign higher weight to metaslabs we've used before.
ecc2d604e885a75cc75e647b5641af99d5a6f4a6bonwick * If this metaslab is one we're actively using, adjust its weight to
ecc2d604e885a75cc75e647b5641af99d5a6f4a6bonwick * make it preferable to any inactive metaslab so we'll polish it off.
44cd46cadd9aab751dae6a4023c1cb5bf316d274billmmetaslab_activate(metaslab_t *msp, uint64_t activation_weight)
ecc2d604e885a75cc75e647b5641af99d5a6f4a6bonwick return (0);
5f5f7a6f9c8e9c1587a54e690556d756ec67558cahrens * If size < SPA_MINBLOCKSIZE, then we will not allocate from
5f5f7a6f9c8e9c1587a54e690556d756ec67558cahrens * this metaslab again. In that case, it had better be empty,
5f5f7a6f9c8e9c1587a54e690556d756ec67558cahrens * or we would be leaving space on the table.
5f5f7a6f9c8e9c1587a54e690556d756ec67558cahrens ASSERT(size >= SPA_MINBLOCKSIZE || msp->ms_map.sm_space == 0);
44cd46cadd9aab751dae6a4023c1cb5bf316d274billm metaslab_group_sort(msp->ms_group, msp, MIN(msp->ms_weight, size));
fa9e4066f08beec538e775443c5be79dd423fcabahrens * Write a metaslab to disk in the context of the specified transaction group.
fa9e4066f08beec538e775443c5be79dd423fcabahrens space_map_t *allocmap = &msp->ms_allocmap[txg & TXG_MASK];
fa9e4066f08beec538e775443c5be79dd423fcabahrens space_map_t *freemap = &msp->ms_freemap[txg & TXG_MASK];
fa9e4066f08beec538e775443c5be79dd423fcabahrens space_map_t *freed_map = &msp->ms_freemap[TXG_CLEAN(txg) & TXG_MASK];
ecc2d604e885a75cc75e647b5641af99d5a6f4a6bonwick * The only state that can actually be changing concurrently with
ecc2d604e885a75cc75e647b5641af99d5a6f4a6bonwick * metaslab_sync() is the metaslab's ms_map. No other thread can
ecc2d604e885a75cc75e647b5641af99d5a6f4a6bonwick * be modifying this txg's allocmap, freemap, freed_map, or smo.
ecc2d604e885a75cc75e647b5641af99d5a6f4a6bonwick * Therefore, we only hold ms_lock to satify space_map ASSERTs.
ecc2d604e885a75cc75e647b5641af99d5a6f4a6bonwick * We drop it whenever we call into the DMU, because the DMU
ecc2d604e885a75cc75e647b5641af99d5a6f4a6bonwick * can call down to us (e.g. via zio_free()) at any time.
ecc2d604e885a75cc75e647b5641af99d5a6f4a6bonwick dmu_write(mos, vd->vdev_ms_array, sizeof (uint64_t) *
ecc2d604e885a75cc75e647b5641af99d5a6f4a6bonwick if (sm->sm_loaded && spa_sync_pass(spa) == 1 && smo->smo_objsize >=
ecc2d604e885a75cc75e647b5641af99d5a6f4a6bonwick 2 * sizeof (uint64_t) * avl_numnodes(&sm->sm_root)) {
ecc2d604e885a75cc75e647b5641af99d5a6f4a6bonwick * The in-core space map representation is twice as compact
ecc2d604e885a75cc75e647b5641af99d5a6f4a6bonwick * as the on-disk one, so it's time to condense the latter
ecc2d604e885a75cc75e647b5641af99d5a6f4a6bonwick * by generating a pure allocmap from first principles.
ecc2d604e885a75cc75e647b5641af99d5a6f4a6bonwick * This metaslab is 100% allocated,
ecc2d604e885a75cc75e647b5641af99d5a6f4a6bonwick * minus the content of the in-core map (sm),
ecc2d604e885a75cc75e647b5641af99d5a6f4a6bonwick * minus what's been freed this txg (freed_map),
ecc2d604e885a75cc75e647b5641af99d5a6f4a6bonwick * minus allocations from txgs in the future
ecc2d604e885a75cc75e647b5641af99d5a6f4a6bonwick * (because they haven't been committed yet).
ecc2d604e885a75cc75e647b5641af99d5a6f4a6bonwick space_map_add(allocmap, allocmap->sm_start, allocmap->sm_size);
ecc2d604e885a75cc75e647b5641af99d5a6f4a6bonwick space_map_walk(freed_map, space_map_remove, allocmap);
ecc2d604e885a75cc75e647b5641af99d5a6f4a6bonwick space_map_walk(&msp->ms_allocmap[(txg + t) & TXG_MASK],
ecc2d604e885a75cc75e647b5641af99d5a6f4a6bonwick VERIFY(0 == dmu_bonus_hold(mos, smo->smo_object, FTAG, &db));
fa9e4066f08beec538e775443c5be79dd423fcabahrens * Called after a transaction group has completely synced to mark
fa9e4066f08beec538e775443c5be79dd423fcabahrens * all of the metaslab's free space as usable.
ecc2d604e885a75cc75e647b5641af99d5a6f4a6bonwick space_map_t *freed_map = &msp->ms_freemap[TXG_CLEAN(txg) & TXG_MASK];
ecc2d604e885a75cc75e647b5641af99d5a6f4a6bonwick * If this metaslab is just becoming available, initialize its
ecc2d604e885a75cc75e647b5641af99d5a6f4a6bonwick * allocmaps and freemaps and add its capacity to the vdev.
ecc2d604e885a75cc75e647b5641af99d5a6f4a6bonwick for (t = 0; t < TXG_SIZE; t++) {
ecc2d604e885a75cc75e647b5641af99d5a6f4a6bonwick space_map_create(&msp->ms_allocmap[t], sm->sm_start,
ecc2d604e885a75cc75e647b5641af99d5a6f4a6bonwick vdev_space_update(vd, 0, smosync->smo_alloc - smo->smo_alloc);
ecc2d604e885a75cc75e647b5641af99d5a6f4a6bonwick ASSERT(msp->ms_allocmap[txg & TXG_MASK].sm_space == 0);
ecc2d604e885a75cc75e647b5641af99d5a6f4a6bonwick ASSERT(msp->ms_freemap[txg & TXG_MASK].sm_space == 0);
ecc2d604e885a75cc75e647b5641af99d5a6f4a6bonwick * If there's a space_map_load() in progress, wait for it to complete
ecc2d604e885a75cc75e647b5641af99d5a6f4a6bonwick * so that we have a consistent view of the in-core space map.
ecc2d604e885a75cc75e647b5641af99d5a6f4a6bonwick * Then, add everything we freed in this txg to the map.
ecc2d604e885a75cc75e647b5641af99d5a6f4a6bonwick space_map_vacate(freed_map, sm->sm_loaded ? space_map_free : NULL, sm);
ecc2d604e885a75cc75e647b5641af99d5a6f4a6bonwick * If the map is loaded but no longer active, evict it as soon as all
ecc2d604e885a75cc75e647b5641af99d5a6f4a6bonwick * future allocations have synced. (If we unloaded it now and then
ecc2d604e885a75cc75e647b5641af99d5a6f4a6bonwick * loaded a moment later, the map wouldn't reflect those allocations.)
44cd46cadd9aab751dae6a4023c1cb5bf316d274billm if (sm->sm_loaded && (msp->ms_weight & METASLAB_ACTIVE_MASK) == 0) {
ecc2d604e885a75cc75e647b5641af99d5a6f4a6bonwick if (msp->ms_allocmap[(txg + t) & TXG_MASK].sm_space)
44cd46cadd9aab751dae6a4023c1cb5bf316d274billm uint64_t ms_shift = msp->ms_group->mg_vd->vdev_ms_shift;
44cd46cadd9aab751dae6a4023c1cb5bf316d274billm if (msp->ms_group->mg_vd->vdev_id != DVA_GET_VDEV(dva))
44cd46cadd9aab751dae6a4023c1cb5bf316d274billm return (0);
44cd46cadd9aab751dae6a4023c1cb5bf316d274billmmetaslab_group_alloc(metaslab_group_t *mg, uint64_t size, uint64_t txg,
44cd46cadd9aab751dae6a4023c1cb5bf316d274billm for (i = 0; i < d; i++)
44cd46cadd9aab751dae6a4023c1cb5bf316d274billm for (msp = avl_first(t); msp; msp = AVL_NEXT(t, msp)) {
44cd46cadd9aab751dae6a4023c1cb5bf316d274billm return (-1ULL);
44cd46cadd9aab751dae6a4023c1cb5bf316d274billm for (i = 0; i < d; i++)
44cd46cadd9aab751dae6a4023c1cb5bf316d274billm if (i == d)
44cd46cadd9aab751dae6a4023c1cb5bf316d274billm return (-1ULL);
ecc2d604e885a75cc75e647b5641af99d5a6f4a6bonwick if ((offset = space_map_alloc(&msp->ms_map, size)) != -1ULL)
ecc2d604e885a75cc75e647b5641af99d5a6f4a6bonwick space_map_add(&msp->ms_allocmap[txg & TXG_MASK], offset, size);
fa9e4066f08beec538e775443c5be79dd423fcabahrens * Allocate a block for the specified i/o.
d80c45e0f58fa434ba37259ea2e2b12e0380c19abonwickmetaslab_alloc_dva(spa_t *spa, uint64_t psize, dva_t *dva, int d,
67bd71c6cc629bab3aa0d595c624a667f1574254perrin dva_t *hintdva, uint64_t txg, boolean_t hintdva_avoid)
fa9e4066f08beec538e775443c5be79dd423fcabahrens * Start at the rotor and loop through all mgs until we find something.
fa9e4066f08beec538e775443c5be79dd423fcabahrens * Note that there's no locking on mc_rotor or mc_allocated because
fa9e4066f08beec538e775443c5be79dd423fcabahrens * nothing actually breaks if we miss a few updates -- we just won't
fa9e4066f08beec538e775443c5be79dd423fcabahrens * allocate quite as evenly. It all balances out over time.
67bd71c6cc629bab3aa0d595c624a667f1574254perrin * If we are doing ditto or log blocks, try to spread them across
67bd71c6cc629bab3aa0d595c624a667f1574254perrin * consecutive vdevs. If we're forced to reuse a vdev before we've
67bd71c6cc629bab3aa0d595c624a667f1574254perrin * allocated all of our ditto blocks, then try and spread them out on
67bd71c6cc629bab3aa0d595c624a667f1574254perrin * that vdev as much as possible. If it turns out to not be possible,
44cd46cadd9aab751dae6a4023c1cb5bf316d274billm * gradually lower our standards until anything becomes acceptable.
44cd46cadd9aab751dae6a4023c1cb5bf316d274billm * Also, allocating on consecutive vdevs (as opposed to random vdevs)
44cd46cadd9aab751dae6a4023c1cb5bf316d274billm * gives us hope of containing our fault domains to something we're
44cd46cadd9aab751dae6a4023c1cb5bf316d274billm * able to reason about. Otherwise, any two top-level vdev failures
44cd46cadd9aab751dae6a4023c1cb5bf316d274billm * will guarantee the loss of data. With consecutive allocation,
44cd46cadd9aab751dae6a4023c1cb5bf316d274billm * only two adjacent top-level vdev failures will result in data loss.
44cd46cadd9aab751dae6a4023c1cb5bf316d274billm * If we are doing gang blocks (hintdva is non-NULL), try to keep
44cd46cadd9aab751dae6a4023c1cb5bf316d274billm * ourselves on the same vdev as our gang block header. That
44cd46cadd9aab751dae6a4023c1cb5bf316d274billm * way, we can hope for locality in vdev_cache, plus it makes our
44cd46cadd9aab751dae6a4023c1cb5bf316d274billm * fault domains something tractable.
44cd46cadd9aab751dae6a4023c1cb5bf316d274billm } else if (d != 0) {
fa9e4066f08beec538e775443c5be79dd423fcabahrens ASSERT(P2PHASE(asize, 1ULL << vd->vdev_ashift) == 0);
44cd46cadd9aab751dae6a4023c1cb5bf316d274billm offset = metaslab_group_alloc(mg, asize, txg, distance, dva, d);
fa9e4066f08beec538e775443c5be79dd423fcabahrens * If we've just selected this metaslab group,
fa9e4066f08beec538e775443c5be79dd423fcabahrens * figure out whether the corresponding vdev is
fa9e4066f08beec538e775443c5be79dd423fcabahrens * over- or under-used relative to the pool,
fa9e4066f08beec538e775443c5be79dd423fcabahrens * and set an allocation bias to even it out.
fa9e4066f08beec538e775443c5be79dd423fcabahrens * Determine percent used in units of 0..1024.
fa9e4066f08beec538e775443c5be79dd423fcabahrens * (This is just to avoid floating point.)
fa9e4066f08beec538e775443c5be79dd423fcabahrens * Bias by at most +/- 25% of the aliquot.
fa9e4066f08beec538e775443c5be79dd423fcabahrens return (0);
fa9e4066f08beec538e775443c5be79dd423fcabahrens * Free the block represented by DVA in the context of the specified
fa9e4066f08beec538e775443c5be79dd423fcabahrens * transaction group.
d80c45e0f58fa434ba37259ea2e2b12e0380c19abonwickmetaslab_free_dva(spa_t *spa, const dva_t *dva, uint64_t txg, boolean_t now)
d80c45e0f58fa434ba37259ea2e2b12e0380c19abonwick (offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count) {
d80c45e0f58fa434ba37259ea2e2b12e0380c19abonwick cmn_err(CE_WARN, "metaslab_free_dva(): bad DVA %llu:%llu",
ecc2d604e885a75cc75e647b5641af99d5a6f4a6bonwick space_map_add(&msp->ms_freemap[txg & TXG_MASK], offset, size);
0125049cd6136d1d2ca9e982382a915b6d7916ceahrens * verify that this region is actually allocated in
0125049cd6136d1d2ca9e982382a915b6d7916ceahrens * either a ms_allocmap or the ms_map
0125049cd6136d1d2ca9e982382a915b6d7916ceahrens (void) space_map_load(&msp->ms_map, &metaslab_ff_ops,
0125049cd6136d1d2ca9e982382a915b6d7916ceahrens if (!space_map_contains(&msp->ms_map, offset, size)) {
0125049cd6136d1d2ca9e982382a915b6d7916ceahrens for (i = 0; i < TXG_CONCURRENT_STATES; i++) {
0125049cd6136d1d2ca9e982382a915b6d7916ceahrens "(vdev=%llu offset=%llx size=%llx)",
d80c45e0f58fa434ba37259ea2e2b12e0380c19abonwick * Intent log support: upon opening the pool after a crash, notify the SPA
d80c45e0f58fa434ba37259ea2e2b12e0380c19abonwick * of blocks that the intent log has allocated for immediate write, but
d80c45e0f58fa434ba37259ea2e2b12e0380c19abonwick * which are still considered free by the SPA because the last transaction
d80c45e0f58fa434ba37259ea2e2b12e0380c19abonwick * group didn't commit yet.
d80c45e0f58fa434ba37259ea2e2b12e0380c19abonwickmetaslab_claim_dva(spa_t *spa, const dva_t *dva, uint64_t txg)
d80c45e0f58fa434ba37259ea2e2b12e0380c19abonwick error = metaslab_activate(msp, METASLAB_WEIGHT_SECONDARY);
d80c45e0f58fa434ba37259ea2e2b12e0380c19abonwick space_map_add(&msp->ms_allocmap[txg & TXG_MASK], offset, size);
d80c45e0f58fa434ba37259ea2e2b12e0380c19abonwick return (0);
d80c45e0f58fa434ba37259ea2e2b12e0380c19abonwickmetaslab_alloc(spa_t *spa, uint64_t psize, blkptr_t *bp, int ndvas,
67bd71c6cc629bab3aa0d595c624a667f1574254perrin uint64_t txg, blkptr_t *hintbp, boolean_t hintbp_avoid)
d80c45e0f58fa434ba37259ea2e2b12e0380c19abonwick ASSERT(ndvas > 0 && ndvas <= spa_max_replication(spa));
d80c45e0f58fa434ba37259ea2e2b12e0380c19abonwick ASSERT(hintbp == NULL || ndvas <= BP_GET_NDVAS(hintbp));
d80c45e0f58fa434ba37259ea2e2b12e0380c19abonwick for (d = 0; d < ndvas; d++) {
67bd71c6cc629bab3aa0d595c624a667f1574254perrin error = metaslab_alloc_dva(spa, psize, dva, d, hintdva,
d80c45e0f58fa434ba37259ea2e2b12e0380c19abonwick for (d--; d >= 0; d--) {
d80c45e0f58fa434ba37259ea2e2b12e0380c19abonwick return (0);
d80c45e0f58fa434ba37259ea2e2b12e0380c19abonwickmetaslab_free(spa_t *spa, const blkptr_t *bp, uint64_t txg, boolean_t now)
d80c45e0f58fa434ba37259ea2e2b12e0380c19abonwick for (d = 0; d < ndvas; d++)
d80c45e0f58fa434ba37259ea2e2b12e0380c19abonwickmetaslab_claim(spa_t *spa, const blkptr_t *bp, uint64_t txg)
d80c45e0f58fa434ba37259ea2e2b12e0380c19abonwick for (d = 0; d < ndvas; d++)
d80c45e0f58fa434ba37259ea2e2b12e0380c19abonwick if ((error = metaslab_claim_dva(spa, &dva[d], txg)) != 0)