Searched defs:txg (Results 1 - 25 of 36) sorted by relevance

12

/illumos-gate/usr/src/uts/common/fs/zfs/
H A Duberblock.c47 uberblock_update(uberblock_t *ub, vdev_t *rvd, uint64_t txg) argument
49 ASSERT(ub->ub_txg < txg);
56 ub->ub_txg = txg;
61 return (ub->ub_rootbp.blk_birth == txg);
H A Ddmu_object.c50 * once per txg, but after that keep looking from here.
51 * os_scan_dnodes is set during txg sync if enough objects
169 * after the specified txg.
172 dmu_object_next(objset_t *os, uint64_t *objectp, boolean_t hole, uint64_t txg) argument
178 (hole ? DNODE_FIND_HOLE : 0), &offset, 0, DNODES_PER_BLOCK, txg);
H A Dspa_errlog.c343 spa_errlog_sync(spa_t *spa, uint64_t txg) argument
368 tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
H A Dzio_checksum.c200 * Set the external verifier for a gang block based on <vdev, offset, txg>,
207 uint64_t txg = BP_PHYSICAL_BIRTH(bp); local
211 ZIO_SET_CHECKSUM(zcp, DVA_GET_VDEV(dva), DVA_GET_OFFSET(dva), txg, 0);
216 * The vdev is implicit, and the txg is unknowable at pool open time --
H A Dtxg.c41 * these transaction groups. Each successive transaction group (txg) is
44 * there may be an active txg associated with each state; each active txg may
46 * be up to three active txgs, and there is always a txg in the open state
49 * accepted into the txg in the open state, and are completed while the txg is
55 * When a new txg becomes active, it first enters the open state. New
57 * currently open txg. There is always a txg in the open state so that ZFS can
58 * accept new changes (though the txg ma
117 txg_init(dsl_pool_t *dp, uint64_t txg) argument
296 uint64_t txg; local
353 txg_quiesce(dsl_pool_t *dp, uint64_t txg) argument
408 txg_dispatch_callbacks(dsl_pool_t *dp, uint64_t txg) argument
460 uint64_t txg; local
535 uint64_t txg; local
577 txg_delay(dsl_pool_t *dp, uint64_t txg, hrtime_t delay, hrtime_t resolution) argument
603 txg_wait_synced(dsl_pool_t *dp, uint64_t txg) argument
628 txg_wait_open(dsl_pool_t *dp, uint64_t txg) argument
715 txg_list_empty(txg_list_t *tl, uint64_t txg) argument
743 txg_list_add(txg_list_t *tl, void *p, uint64_t txg) argument
767 txg_list_add_tail(txg_list_t *tl, void *p, uint64_t txg) argument
794 txg_list_remove(txg_list_t *tl, uint64_t txg) argument
816 txg_list_remove_this(txg_list_t *tl, void *p, uint64_t txg) argument
839 txg_list_member(txg_list_t *tl, void *p, uint64_t txg) argument
851 txg_list_head(txg_list_t *tl, uint64_t txg) argument
860 txg_list_next(txg_list_t *tl, void *p, uint64_t txg) argument
[all...]
H A Dzfeature.c486 * OUT txg argument.
488 * Returns B_TRUE if the feature is enabled, in which case txg will be filled
493 spa_feature_enabled_txg(spa_t *spa, spa_feature_t fid, uint64_t *txg) argument
501 err = feature_get_enabled_txg(spa, &spa_feature_table[fid], txg);
H A Dspa_config.c353 spa_config_generate(spa_t *spa, vdev_t *vd, uint64_t txg, int getstats) argument
371 * If txg is -1, report the current value of spa->spa_config_txg.
373 if (txg == -1ULL)
374 txg = spa->spa_config_txg;
381 fnvlist_add_uint64(config, ZPOOL_CONFIG_POOL_TXG, txg);
499 uint64_t txg; local
505 txg = spa_last_synced_txg(spa) + 1;
520 vdev_expand(tvd, txg);
528 txg_wait_synced(spa->spa_dsl_pool, txg);
H A Dvdev_mirror.c221 uint64_t txg = zio->io_txg; local
224 ASSERT(zio->io_bp == NULL || BP_PHYSICAL_BIRTH(zio->io_bp) == txg);
243 if (!vdev_dtl_contains(mc->mc_vd, DTL_MISSING, txg, 1))
251 * Every device is either missing or has this txg in its DTL.
H A Ddnode_sync.c159 uint64_t txg = tx->tx_txg; local
192 while (dr && dr->dr_txg > txg)
194 ASSERT(dr == NULL || dr->dr_txg == txg);
210 * future txg.
461 uint64_t txg = dr->dr_txg; local
481 dbuf_rele_and_unlock(db, (void *)(uintptr_t)txg);
H A Dvdev_label.c123 * txg Transaction group in which this label was written
435 * which don't have a txg value stored on their label (i.e. spares/cache)
436 * or have not been completely initialized (txg = 0) just return
439 * 'txg' value.
442 vdev_label_read_config(vdev_t *vd, uint64_t txg) argument
476 * Auxiliary vdevs won't have txg values in their
487 } else if (label_txg <= txg && label_txg > best_txg) {
519 uint64_t state, pool_guid, device_guid, txg, spare_pool; local
549 &txg) != 0)) {
575 txg
762 uint64_t txg = 0ULL; local
1099 vdev_label_sync(zio_t *zio, vdev_t *vd, int l, uint64_t txg, int flags) argument
1141 vdev_label_sync_list(spa_t *spa, int l, uint64_t txg, int flags) argument
1194 vdev_config_sync(vdev_t **svd, int svdcount, uint64_t txg) argument
[all...]
H A Dvdev_disk.c866 uint64_t offset, state, txg = 0; local
888 &txg) != 0 || txg == 0) {
H A Ddmu_traverse.c685 uint64_t txg = txg_start; local
695 if (dsl_dataset_phys(ds)->ds_prev_snap_txg > txg)
696 txg = dsl_dataset_phys(ds)->ds_prev_snap_txg;
697 err = traverse_dataset(ds, txg, flags, func, arg);
H A Ddsl_pool.c75 * txg (dp_dirty_pertxg[]) and poolwide (dp_dirty_total) accounting of
78 * relevant, the per-txg value is useful for debugging. The tunable
83 * ensure that there is a txg syncing (see the comment in txg.c for a full
103 * If there is at least this much dirty data, push out a txg.
149 dsl_pool_open_impl(spa_t *spa, uint64_t txg) argument
158 txg_init(dp, txg);
179 dsl_pool_init(spa_t *spa, uint64_t txg, dsl_pool_t **dpp) argument
182 dsl_pool_t *dp = dsl_pool_open_impl(spa, txg);
340 dsl_pool_create(spa_t *spa, nvlist_t *zplprops, uint64_t txg) argument
456 dsl_pool_sync(dsl_pool_t *dp, uint64_t txg) argument
580 dsl_pool_sync_done(dsl_pool_t *dp, uint64_t txg) argument
656 dsl_pool_undirty_space(dsl_pool_t *dp, int64_t space, uint64_t txg) argument
[all...]
H A Dddt.c246 ddt_bp_fill(const ddt_phys_t *ddp, blkptr_t *bp, uint64_t txg) argument
248 ASSERT(txg != 0);
252 BP_SET_BIRTH(bp, txg, ddp->ddp_phys_birth);
318 ddt_phys_free(ddt_t *ddt, ddt_key_t *ddk, ddt_phys_t *ddp, uint64_t txg) argument
324 zio_free(ddt->ddt_spa, txg, &blk);
990 ddt_sync_entry(ddt_t *ddt, ddt_entry_t *dde, dmu_tx_t *tx, uint64_t txg) argument
1013 ddt_phys_free(ddt, ddk, ddp, txg);
1017 ddt_phys_free(ddt, ddk, ddp, txg);
1057 ddt_sync_table(ddt_t *ddt, dmu_tx_t *tx, uint64_t txg) argument
1075 ddt_sync_entry(ddt, dde, tx, txg);
1098 ddt_sync(spa_t *spa, uint64_t txg) argument
[all...]
H A Ddmu_tx.c76 dmu_tx_create_assigned(struct dsl_pool *dp, uint64_t txg) argument
80 ASSERT3U(txg, <=, dp->dp_tx.tx_open_txg);
82 tx->tx_txg = txg;
346 * before this IO gets assigned into a txg.
1307 * (1) TXG_WAIT. If the current open txg is full, waits until there's
1311 * (2) TXG_NOWAIT. If we can't assign into the current open txg without
1393 * A dnode is assigned to the quiescing txg. Wait for its
H A Ddsl_scan.c67 int zfs_scan_min_time_ms = 1000; /* min millisecs to scrub per txg */
68 int zfs_free_min_time_ms = 1000; /* min millisecs to free per txg */
69 int zfs_resilver_min_time_ms = 3000; /* min millisecs to resilver per txg */
96 dsl_scan_init(dsl_pool_t *dp, uint64_t txg) argument
122 scn->scn_restart_txg = txg;
124 "restarting new-style scrub in txg %llu",
152 scn->scn_restart_txg = txg;
154 "by old software; restarting in txg %llu",
376 dsl_free(dsl_pool_t *dp, uint64_t txg, const blkptr_t *bp) argument
378 zio_free(dp->dp_spa, txg, b
382 dsl_free_sync(zio_t *pio, dsl_pool_t *dp, uint64_t txg, const blkptr_t *bpp) argument
1679 dsl_resilver_restart(dsl_pool_t *dp, uint64_t txg) argument
[all...]
H A Ddnode.c550 dprintf("os=%p obj=%llu txg=%llu blocksize=%d ibs=%d\n", dn->dn_objset,
1264 uint64_t txg = tx->tx_txg; local
1276 ASSERT(dn->dn_free_txg == 0 || dn->dn_free_txg >= txg);
1290 if (list_link_active(&dn->dn_dirty_link[txg & TXG_MASK])) {
1298 ASSERT0(dn->dn_next_bonuslen[txg&TXG_MASK]);
1299 ASSERT0(dn->dn_next_blksz[txg&TXG_MASK]);
1300 ASSERT0(dn->dn_next_bonustype[txg&TXG_MASK]);
1302 dprintf_ds(os->os_dsl_dataset, "obj=%llu txg=%llu\n",
1303 dn->dn_object, txg);
1305 if (dn->dn_free_txg > 0 && dn->dn_free_txg <= txg) {
1845 dnode_next_offset_level(dnode_t *dn, int flags, uint64_t *offset, int lvl, uint64_t blkfill, uint64_t txg) argument
1978 dnode_next_offset(dnode_t *dn, int flags, uint64_t *offset, int minlvl, uint64_t blkfill, uint64_t txg) argument
[all...]
H A Dvdev.c849 vdev_metaslab_init(vdev_t *vd, uint64_t txg) argument
859 ASSERT(txg == 0 || spa_config_held(spa, SCL_ALLOC, RW_WRITER));
893 if (txg == 0) {
901 error = metaslab_init(vd->vdev_mg, m, object, txg,
907 if (txg == 0)
918 if (txg == 0)
1388 uint64_t txg = spa_last_synced_txg(spa) != 0 ? local
1391 if ((label = vdev_label_read_config(vd, txg)) == NULL) {
1583 vdev_create(vdev_t *vd, uint64_t txg, boolean_t isreplacing) argument
1603 (error = vdev_label_init(vd, txg, isreplacin
1623 vdev_dirty(vdev_t *vd, int flags, void *arg, uint64_t txg) argument
1640 vdev_dirty_leaves(vdev_t *vd, int flags, uint64_t txg) argument
1688 vdev_dtl_dirty(vdev_t *vd, vdev_dtl_type_t t, uint64_t txg, uint64_t size) argument
1703 vdev_dtl_contains(vdev_t *vd, vdev_dtl_type_t t, uint64_t txg, uint64_t size) argument
1805 vdev_dtl_reassess(vdev_t *vd, uint64_t txg, uint64_t scrub_txg, int scrub_done) argument
1999 vdev_dtl_sync(vdev_t *vd, uint64_t txg) argument
2227 vdev_remove(vdev_t *vd, uint64_t txg) argument
2286 vdev_sync_done(vdev_t *vd, uint64_t txg) argument
2301 vdev_sync(vdev_t *vd, uint64_t txg) argument
2812 uint64_t txg = zio->io_txg; local
3399 vdev_expand(vdev_t *vd, uint64_t txg) argument
[all...]
H A Ddbuf.c1032 * first time in a txg, when we are freeing a range in a dnode that includes
1040 dbuf_fix_old_data(dmu_buf_impl_t *db, uint64_t txg) argument
1061 ASSERT(dr->dr_txg >= txg - 2);
1218 uint64_t txg = dr->dr_txg; local
1232 zio_free(db->db_objset->os_spa, txg, bp);
1259 uint64_t txg = tx->tx_txg; local
1316 if (dr->dr_txg == txg) {
1333 dbuf_fix_old_data(db, txg);
1619 * transaction group won't leak out when we sync the older txg.
1676 * This buffer is now part of this txg
1787 uint64_t txg = tx->tx_txg; local
1824 dr->dr_accounted, txg); local
3052 uint64_t txg = tx->tx_txg; local
3512 uint64_t txg = tx->tx_txg; local
[all...]
H A Ddmu.c1533 * newly allocated block in this txg.
1589 * EEXIST: this txg has already been synced, so there's nothing to do.
1608 dmu_sync(zio_t *pio, uint64_t txg, dmu_sync_cb_t *done, zgd_t *zgd) argument
1621 ASSERT(txg != 0);
1635 if (txg > spa_freeze_txg(os->os_spa))
1640 * and us. If we determine that this txg is not yet syncing,
1646 if (txg <= spa_last_synced_txg(os->os_spa)) {
1648 * This txg has already synced. There's nothing to do.
1654 if (txg <= spa_syncing_txg(os->os_spa)) {
1656 * This txg i
[all...]
H A Ddmu_objset.c1117 dprintf_ds(os->os_dsl_dataset, "txg=%llu\n", tx->tx_txg);
1169 * dn_dirty_link[] of this txg.
1201 dmu_objset_is_dirty(objset_t *os, uint64_t txg) argument
1203 return (!list_is_empty(&os->os_dirty_dnodes[txg & TXG_MASK]) ||
1204 !list_is_empty(&os->os_free_dnodes[txg & TXG_MASK]));
H A Ddsl_dir.c1014 dprintf_dd(dd, "txg=%llu towrite=%lluK\n", tx->tx_txg,
1116 uint64_t txg = tx->tx_txg; local
1121 int txgidx = txg & TXG_MASK;
1125 ASSERT3U(txg, !=, 0);
1227 * Reserve space in this dsl_dir, to be used in this tx's txg.
/illumos-gate/usr/src/lib/libzfs/common/
H A Dlibzfs_import.c36 * pool guid -> toplevel vdev guid -> label txg
39 * examined every device, we pick the best label txg config for each toplevel
218 uint64_t pool_guid, vdev_guid, top_guid, txg, state; local
249 * we write a label with txg == 0 so that we can identify the device
261 &txg) != 0 || txg == 0) {
310 if (ce->ce_txg == txg)
319 ce->ce_txg = txg;
492 * We rely on the fact that the max txg for the
872 uint64_t state, txg, siz local
[all...]
/illumos-gate/usr/src/grub/grub-0.97/stage2/
H A Dfsys_zfs.c1532 uint64_t pool_state, txg = 0; local
1559 if (nvlist_lookup_value(nvlist, ZPOOL_CONFIG_POOL_TXG, &txg,
1564 if (txg == 0)
/illumos-gate/usr/src/boot/sys/cddl/boot/zfs/
H A Dzfssubr.c201 * Set the external verifier for a gang block based on <vdev, offset, txg>,
208 uint64_t txg = BP_PHYSICAL_BIRTH(bp); local
212 ZIO_SET_CHECKSUM(zcp, DVA_GET_VDEV(dva), DVA_GET_OFFSET(dva), txg, 0);
217 * The vdev is implicit, and the txg is unknowable at pool open time --

Completed in 139 milliseconds

12