Lines Matching defs:txg
41 * these transaction groups. Each successive transaction group (txg) is
44 * there may be an active txg associated with each state; each active txg may
46 * be up to three active txgs, and there is always a txg in the open state
49 * accepted into the txg in the open state, and are completed while the txg is
55 * When a new txg becomes active, it first enters the open state. New
57 * currently open txg. There is always a txg in the open state so that ZFS can
58 * accept new changes (though the txg may refuse new changes if it has hit
59 * some limit). ZFS advances the open txg to the next state for a variety of
65 * After a txg exits the open state, it enters the quiescing state. The
69 * operation without delaying either of the other states. Typically, a txg is
72 * transactions complete, the txg is ready to enter the next state.
99 * datasets. Note that when a synctask is initiated it enters the open txg,
100 * and ZFS then pushes that txg as quickly as possible to completion of the
104 * pool. Finally, if there is a quiesced txg waiting, we signal that it can
111 int zfs_txg_timeout = 5; /* max seconds worth of delta per txg */
114 * Prepare the txg subsystem.
117 txg_init(dsl_pool_t *dp, uint64_t txg)
148 tx->tx_open_txg = txg;
152 * Close down the txg subsystem.
296 uint64_t txg;
299 txg = tx->tx_open_txg;
302 tc->tc_count[txg & TXG_MASK]++;
306 th->th_txg = txg;
308 return (txg);
353 txg_quiesce(dsl_pool_t *dp, uint64_t txg)
356 int g = txg & TXG_MASK;
360 * Grab all tc_open_locks so nobody else can get into this txg.
365 ASSERT(txg == tx->tx_open_txg);
369 DTRACE_PROBE2(txg__quiescing, dsl_pool_t *, dp, uint64_t, txg);
402 * Dispatch the commit callbacks registered on this txg to worker threads.
408 txg_dispatch_callbacks(dsl_pool_t *dp, uint64_t txg)
418 * only be called once a txg has been synced.
421 int g = txg & TXG_MASK;
460 uint64_t txg;
464 * on us, or the quiesce thread has handed off a txg to
481 * Wait until the quiesce thread hands off a txg to us,
495 * Consume the quiesced txg which has been handed off to
497 * able to quiesce another txg, so we must signal it.
499 txg = tx->tx_quiesced_txg;
501 tx->tx_syncing_txg = txg;
502 DTRACE_PROBE2(txg__syncing, dsl_pool_t *, dp, uint64_t, txg);
505 dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n",
506 txg, tx->tx_quiesce_txg_waiting, tx->tx_sync_txg_waiting);
510 spa_sync(spa, txg);
514 tx->tx_synced_txg = txg;
516 DTRACE_PROBE2(txg__synced, dsl_pool_t *, dp, uint64_t, txg);
522 txg_dispatch_callbacks(dp, txg);
535 uint64_t txg;
539 * However, we can only have one txg in "quiescing" or
541 * the "quiesced, waiting to sync" txg has been consumed
552 txg = tx->tx_open_txg;
553 dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n",
554 txg, tx->tx_quiesce_txg_waiting,
557 txg_quiesce(dp, txg);
561 * Hand this txg off to the sync thread.
563 dprintf("quiesce done, handing off txg %llu\n", txg);
564 tx->tx_quiesced_txg = txg;
565 DTRACE_PROBE2(txg__quiesced, dsl_pool_t *, dp, uint64_t, txg);
573 * transaction group and there is already a waiting txg quiescing or quiesced.
574 * Abort the delay if this txg stalls or enters the quiescing state.
577 txg_delay(dsl_pool_t *dp, uint64_t txg, hrtime_t delay, hrtime_t resolution)
582 /* don't delay if this txg could transition to quiescing immediately */
583 if (tx->tx_open_txg > txg ||
584 tx->tx_syncing_txg == txg-1 || tx->tx_synced_txg == txg-1)
588 if (tx->tx_open_txg > txg || tx->tx_synced_txg == txg-1) {
594 tx->tx_syncing_txg < txg-1 && !txg_stalled(dp)) {
603 txg_wait_synced(dsl_pool_t *dp, uint64_t txg)
611 if (txg == 0)
612 txg = tx->tx_open_txg + TXG_DEFER_SIZE;
613 if (tx->tx_sync_txg_waiting < txg)
614 tx->tx_sync_txg_waiting = txg;
615 dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n",
616 txg, tx->tx_quiesce_txg_waiting, tx->tx_sync_txg_waiting);
617 while (tx->tx_synced_txg < txg) {
628 txg_wait_open(dsl_pool_t *dp, uint64_t txg)
636 if (txg == 0)
637 txg = tx->tx_open_txg + 1;
638 if (tx->tx_quiesce_txg_waiting < txg)
639 tx->tx_quiesce_txg_waiting = txg;
640 dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n",
641 txg, tx->tx_quiesce_txg_waiting, tx->tx_sync_txg_waiting);
642 while (tx->tx_open_txg < txg) {
650 * If there isn't a txg syncing or in the pipeline, push another txg through
651 * the pipeline by queiscing the open txg.
688 * Per-txg object lists.
715 txg_list_empty(txg_list_t *tl, uint64_t txg)
717 return (tl->tl_head[txg & TXG_MASK] == NULL);
721 * Returns true if all txg lists are empty.
743 txg_list_add(txg_list_t *tl, void *p, uint64_t txg)
745 int t = txg & TXG_MASK;
767 txg_list_add_tail(txg_list_t *tl, void *p, uint64_t txg)
769 int t = txg & TXG_MASK;
794 txg_list_remove(txg_list_t *tl, uint64_t txg)
796 int t = txg & TXG_MASK;
816 txg_list_remove_this(txg_list_t *tl, void *p, uint64_t txg)
818 int t = txg & TXG_MASK;
839 txg_list_member(txg_list_t *tl, void *p, uint64_t txg)
841 int t = txg & TXG_MASK;
848 * Walk a txg list -- only safe if you know it's not changing.
851 txg_list_head(txg_list_t *tl, uint64_t txg)
853 int t = txg & TXG_MASK;
860 txg_list_next(txg_list_t *tl, void *p, uint64_t txg)
862 int t = txg & TXG_MASK;