Lines Matching defs:zilog
83 static void zil_async_to_sync(zilog_t *zilog, uint64_t foid);
118 zil_bp_tree_init(zilog_t *zilog)
120 avl_create(&zilog->zl_bp_tree, zil_bp_compare,
125 zil_bp_tree_fini(zilog_t *zilog)
127 avl_tree_t *t = &zilog->zl_bp_tree;
138 zil_bp_tree_add(zilog_t *zilog, const blkptr_t *bp)
140 avl_tree_t *t = &zilog->zl_bp_tree;
161 zil_header_in_syncing_context(zilog_t *zilog)
163 return ((zil_header_t *)zilog->zl_header);
167 zil_init_log_chain(zilog_t *zilog, blkptr_t *bp)
173 zc->zc_word[ZIL_ZC_OBJSET] = dmu_objset_id(zilog->zl_os);
181 zil_read_log_block(zilog_t *zilog, const blkptr_t *bp, blkptr_t *nbp, void *dst,
190 if (zilog->zl_header->zh_claim_txg == 0)
193 if (!(zilog->zl_header->zh_flags & ZIL_CLAIM_LR_SEQ_VALID))
199 error = arc_read(NULL, zilog->zl_spa, bp, arc_getbuf_func, &abuf,
257 zil_read_log_data(zilog_t *zilog, const lr_write_t *lr, void *wbuf)
272 if (zilog->zl_header->zh_claim_txg == 0)
275 SET_BOOKMARK(&zb, dmu_objset_id(zilog->zl_os), lr->lr_foid,
278 error = arc_read(NULL, zilog->zl_spa, bp, arc_getbuf_func, &abuf,
294 zil_parse(zilog_t *zilog, zil_parse_blk_func_t *parse_blk_func,
297 const zil_header_t *zh = zilog->zl_header;
325 zil_bp_tree_init(zilog);
334 if ((error = parse_blk_func(zilog, &blk, arg, txg)) != 0)
343 error = zil_read_log_block(zilog, &blk, &next_blk, lrbuf, &end);
353 if ((error = parse_lr_func(zilog, lr, arg, txg)) != 0)
361 zilog->zl_parse_error = error;
362 zilog->zl_parse_blk_seq = max_blk_seq;
363 zilog->zl_parse_lr_seq = max_lr_seq;
364 zilog->zl_parse_blk_count = blk_count;
365 zilog->zl_parse_lr_count = lr_count;
370 zil_bp_tree_fini(zilog);
377 zil_claim_log_block(zilog_t *zilog, blkptr_t *bp, void *tx, uint64_t first_txg)
384 zil_bp_tree_add(zilog, bp) != 0)
387 return (zio_wait(zio_claim(NULL, zilog->zl_spa,
393 zil_claim_log_record(zilog_t *zilog, lr_t *lrc, void *tx, uint64_t first_txg)
410 (error = zil_read_log_data(zilog, lr, NULL)) != 0)
412 return (zil_claim_log_block(zilog, &lr->lr_blkptr, tx, first_txg));
417 zil_free_log_block(zilog_t *zilog, blkptr_t *bp, void *tx, uint64_t claim_txg)
419 zio_free_zil(zilog->zl_spa, dmu_tx_get_txg(tx), bp);
425 zil_free_log_record(zilog_t *zilog, lr_t *lrc, void *tx, uint64_t claim_txg)
434 bp->blk_birth >= claim_txg && zil_bp_tree_add(zilog, bp) == 0 &&
436 zio_free(zilog->zl_spa, dmu_tx_get_txg(tx), bp);
442 zil_alloc_lwb(zilog_t *zilog, blkptr_t *bp, uint64_t txg)
447 lwb->lwb_zilog = zilog;
461 mutex_enter(&zilog->zl_lock);
462 list_insert_tail(&zilog->zl_lwb_list, lwb);
463 mutex_exit(&zilog->zl_lock);
473 zilog_dirty(zilog_t *zilog, uint64_t txg)
475 dsl_pool_t *dp = zilog->zl_dmu_pool;
476 dsl_dataset_t *ds = dmu_objset_ds(zilog->zl_os);
481 if (txg_list_add(&dp->dp_dirty_zilogs, zilog, txg)) {
483 dmu_buf_add_ref(ds->ds_dbuf, zilog);
495 zilog_is_dirty_in_txg(zilog_t *zilog, uint64_t txg)
497 dsl_pool_t *dp = zilog->zl_dmu_pool;
499 if (txg_list_member(&dp->dp_dirty_zilogs, zilog, txg & TXG_MASK))
509 zilog_is_dirty(zilog_t *zilog)
511 dsl_pool_t *dp = zilog->zl_dmu_pool;
514 if (txg_list_member(&dp->dp_dirty_zilogs, zilog, t))
524 zil_create(zilog_t *zilog)
526 const zil_header_t *zh = zilog->zl_header;
536 txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg);
549 tx = dmu_tx_create(zilog->zl_os);
551 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
555 zio_free_zil(zilog->zl_spa, txg, &blk);
559 error = zio_alloc_zil(zilog->zl_spa, txg, &blk, NULL,
560 ZIL_MIN_BLKSZ, zilog->zl_logbias == ZFS_LOGBIAS_LATENCY);
563 zil_init_log_chain(zilog, &blk);
570 lwb = zil_alloc_lwb(zilog, &blk, txg);
579 txg_wait_synced(zilog->zl_dmu_pool, txg);
597 zil_destroy(zilog_t *zilog, boolean_t keep_first)
599 const zil_header_t *zh = zilog->zl_header;
607 txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg);
609 zilog->zl_old_header = *zh; /* debugging aid */
614 tx = dmu_tx_create(zilog->zl_os);
616 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
619 mutex_enter(&zilog->zl_lock);
621 ASSERT3U(zilog->zl_destroy_txg, <, txg);
622 zilog->zl_destroy_txg = txg;
623 zilog->zl_keep_first = keep_first;
625 if (!list_is_empty(&zilog->zl_lwb_list)) {
628 while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) {
629 list_remove(&zilog->zl_lwb_list, lwb);
632 zio_free_zil(zilog->zl_spa, txg, &lwb->lwb_blk);
636 zil_destroy_sync(zilog, tx);
638 mutex_exit(&zilog->zl_lock);
644 zil_destroy_sync(zilog_t *zilog, dmu_tx_t *tx)
646 ASSERT(list_is_empty(&zilog->zl_lwb_list));
647 (void) zil_parse(zilog, zil_free_log_block,
648 zil_free_log_record, tx, zilog->zl_header->zh_claim_txg);
656 zilog_t *zilog;
675 zilog = dmu_objset_zil(os);
676 zh = zil_header_in_syncing_context(zilog);
678 if (spa_get_log_state(zilog->zl_spa) == SPA_LOG_CLEAR) {
680 zio_free_zil(zilog->zl_spa, first_txg, &zh->zh_log);
696 (void) zil_parse(zilog, zil_claim_log_block,
699 zh->zh_claim_blk_seq = zilog->zl_parse_blk_seq;
700 zh->zh_claim_lr_seq = zilog->zl_parse_lr_seq;
701 if (zilog->zl_parse_lr_count || zilog->zl_parse_blk_count > 1)
707 ASSERT3U(first_txg, ==, (spa_last_synced_txg(zilog->zl_spa) + 1));
721 zilog_t *zilog;
735 zilog = dmu_objset_zil(os);
736 bp = (blkptr_t *)&zilog->zl_header->zh_log;
765 error = zil_parse(zilog, zil_claim_log_block, zil_claim_log_record, tx,
766 zilog->zl_header->zh_claim_txg ? -1ULL : spa_first_txg(os->os_spa));
786 zil_add_block(zilog_t *zilog, const blkptr_t *bp)
788 avl_tree_t *t = &zilog->zl_vdev_tree;
797 ASSERT(zilog->zl_writer);
804 mutex_enter(&zilog->zl_vdev_lock);
813 mutex_exit(&zilog->zl_vdev_lock);
817 zil_flush_vdevs(zilog_t *zilog)
819 spa_t *spa = zilog->zl_spa;
820 avl_tree_t *t = &zilog->zl_vdev_tree;
825 ASSERT(zilog->zl_writer);
861 zilog_t *zilog = lwb->lwb_zilog;
881 mutex_enter(&zilog->zl_lock);
884 mutex_exit(&zilog->zl_lock);
898 zil_lwb_write_init(zilog_t *zilog, lwb_t *lwb)
906 if (zilog->zl_root_zio == NULL) {
907 zilog->zl_root_zio = zio_root(zilog->zl_spa, NULL, NULL,
911 lwb->lwb_zio = zio_rewrite(zilog->zl_root_zio, zilog->zl_spa,
938 #define USE_SLOG(zilog) (((zilog)->zl_logbias == ZFS_LOGBIAS_LATENCY) && \
939 (((zilog)->zl_cur_used < zil_slog_limit) || \
940 ((zilog)->zl_itx_list_sz < (zil_slog_limit << 1))))
947 zil_lwb_write_start(zilog_t *zilog, lwb_t *lwb)
951 spa_t *spa = zilog->zl_spa;
977 tx = dmu_tx_create(zilog->zl_os);
979 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
1000 zil_blksz = zilog->zl_cur_used + sizeof (zil_chain_t);
1006 zilog->zl_prev_blks[zilog->zl_prev_rotor] = zil_blksz;
1008 zil_blksz = MAX(zil_blksz, zilog->zl_prev_blks[i]);
1009 zilog->zl_prev_rotor = (zilog->zl_prev_rotor + 1) & (ZIL_PREV_BLKS - 1);
1014 USE_SLOG(zilog));
1023 nlwb = zil_alloc_lwb(zilog, bp, txg);
1026 zil_add_block(zilog, &lwb->lwb_blk);
1058 zil_lwb_commit(zilog_t *zilog, itx_t *itx, lwb_t *lwb)
1076 zilog->zl_cur_used += (reclen + dlen);
1078 zil_lwb_write_init(zilog, lwb);
1084 lwb = zil_lwb_write_start(zilog, lwb);
1087 zil_lwb_write_init(zilog, lwb);
1090 txg_wait_synced(zilog->zl_dmu_pool, txg);
1104 if (txg > spa_freeze_txg(zilog->zl_spa))
1105 txg_wait_synced(zilog->zl_dmu_pool, txg);
1118 error = zilog->zl_get_data(
1121 txg_wait_synced(zilog->zl_dmu_pool, txg);
1138 lrc->lrc_seq = ++zilog->zl_lr_seq; /* we are single threaded */
1225 zil_remove_async(zilog_t *zilog, uint64_t oid)
1237 if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */
1240 otxg = spa_last_synced_txg(zilog->zl_spa) + 1;
1243 itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK];
1269 zil_itx_assign(zilog_t *zilog, itx_t *itx, dmu_tx_t *tx)
1283 zil_remove_async(zilog, itx->itx_oid);
1289 zil_async_to_sync(zilog, itx->itx_oid);
1291 if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX)
1296 itxg = &zilog->zl_itxg[txg & TXG_MASK];
1308 atomic_add_64(&zilog->zl_itx_list_sz, -itxg->itxg_sod);
1324 atomic_add_64(&zilog->zl_itx_list_sz, itx->itx_sod);
1344 zilog_dirty(zilog, txg);
1360 zil_clean(zilog_t *zilog, uint64_t synced_txg)
1362 itxg_t *itxg = &zilog->zl_itxg[synced_txg & TXG_MASK];
1372 ASSERT(zilog->zl_clean_taskq != NULL);
1373 atomic_add_64(&zilog->zl_itx_list_sz, -itxg->itxg_sod);
1385 if (taskq_dispatch(zilog->zl_clean_taskq,
1394 zil_get_commit_list(zilog_t *zilog)
1397 list_t *commit_list = &zilog->zl_itx_commit_list;
1400 if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */
1403 otxg = spa_last_synced_txg(zilog->zl_spa) + 1;
1411 itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK];
1427 ASSERT(zilog_is_dirty_in_txg(zilog, txg) ||
1428 spa_freeze_txg(zilog->zl_spa) != UINT64_MAX);
1435 atomic_add_64(&zilog->zl_itx_list_sz, -push_sod);
1442 zil_async_to_sync(zilog_t *zilog, uint64_t foid)
1449 if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */
1452 otxg = spa_last_synced_txg(zilog->zl_spa) + 1;
1459 itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK];
1495 zil_commit_writer(zilog_t *zilog)
1500 spa_t *spa = zilog->zl_spa;
1503 ASSERT(zilog->zl_root_zio == NULL);
1505 mutex_exit(&zilog->zl_lock);
1507 zil_get_commit_list(zilog);
1513 if (list_head(&zilog->zl_itx_commit_list) == NULL) {
1514 mutex_enter(&zilog->zl_lock);
1518 if (zilog->zl_suspend) {
1521 lwb = list_tail(&zilog->zl_lwb_list);
1523 lwb = zil_create(zilog);
1526 DTRACE_PROBE1(zil__cw1, zilog_t *, zilog);
1527 while (itx = list_head(&zilog->zl_itx_commit_list)) {
1538 lwb = zil_lwb_commit(zilog, itx, lwb);
1539 list_remove(&zilog->zl_itx_commit_list, itx);
1543 DTRACE_PROBE1(zil__cw2, zilog_t *, zilog);
1547 lwb = zil_lwb_write_start(zilog, lwb);
1549 zilog->zl_cur_used = 0;
1554 if (zilog->zl_root_zio) {
1555 error = zio_wait(zilog->zl_root_zio);
1556 zilog->zl_root_zio = NULL;
1557 zil_flush_vdevs(zilog);
1561 txg_wait_synced(zilog->zl_dmu_pool, 0);
1563 mutex_enter(&zilog->zl_lock);
1571 zilog->zl_commit_lr_seq = zilog->zl_lr_seq;
1598 zil_commit(zilog_t *zilog, uint64_t foid)
1602 if (zilog->zl_sync == ZFS_SYNC_DISABLED)
1606 zil_async_to_sync(zilog, foid);
1608 mutex_enter(&zilog->zl_lock);
1609 mybatch = zilog->zl_next_batch;
1610 while (zilog->zl_writer) {
1611 cv_wait(&zilog->zl_cv_batch[mybatch & 1], &zilog->zl_lock);
1612 if (mybatch <= zilog->zl_com_batch) {
1613 mutex_exit(&zilog->zl_lock);
1618 zilog->zl_next_batch++;
1619 zilog->zl_writer = B_TRUE;
1620 zil_commit_writer(zilog);
1621 zilog->zl_com_batch = mybatch;
1622 zilog->zl_writer = B_FALSE;
1623 mutex_exit(&zilog->zl_lock);
1626 cv_signal(&zilog->zl_cv_batch[(mybatch+1) & 1]);
1629 cv_broadcast(&zilog->zl_cv_batch[mybatch & 1]);
1636 zil_sync(zilog_t *zilog, dmu_tx_t *tx)
1638 zil_header_t *zh = zil_header_in_syncing_context(zilog);
1640 spa_t *spa = zilog->zl_spa;
1641 uint64_t *replayed_seq = &zilog->zl_replayed_seq[txg & TXG_MASK];
1651 mutex_enter(&zilog->zl_lock);
1653 ASSERT(zilog->zl_stop_sync == 0);
1661 if (zilog->zl_destroy_txg == txg) {
1664 ASSERT(list_head(&zilog->zl_lwb_list) == NULL);
1667 bzero(zilog->zl_replayed_seq, sizeof (zilog->zl_replayed_seq));
1669 if (zilog->zl_keep_first) {
1678 zil_init_log_chain(zilog, &blk);
1683 while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) {
1687 list_remove(&zilog->zl_lwb_list, lwb);
1697 if (list_head(&zilog->zl_lwb_list) == NULL)
1700 mutex_exit(&zilog->zl_lock);
1717 zil_set_sync(zilog_t *zilog, uint64_t sync)
1719 zilog->zl_sync = sync;
1723 zil_set_logbias(zilog_t *zilog, uint64_t logbias)
1725 zilog->zl_logbias = logbias;
1731 zilog_t *zilog;
1733 zilog = kmem_zalloc(sizeof (zilog_t), KM_SLEEP);
1735 zilog->zl_header = zh_phys;
1736 zilog->zl_os = os;
1737 zilog->zl_spa = dmu_objset_spa(os);
1738 zilog->zl_dmu_pool = dmu_objset_pool(os);
1739 zilog->zl_destroy_txg = TXG_INITIAL - 1;
1740 zilog->zl_logbias = dmu_objset_logbias(os);
1741 zilog->zl_sync = dmu_objset_syncprop(os);
1742 zilog->zl_next_batch = 1;
1744 mutex_init(&zilog->zl_lock, NULL, MUTEX_DEFAULT, NULL);
1747 mutex_init(&zilog->zl_itxg[i].itxg_lock, NULL,
1751 list_create(&zilog->zl_lwb_list, sizeof (lwb_t),
1754 list_create(&zilog->zl_itx_commit_list, sizeof (itx_t),
1757 mutex_init(&zilog->zl_vdev_lock, NULL, MUTEX_DEFAULT, NULL);
1759 avl_create(&zilog->zl_vdev_tree, zil_vdev_compare,
1762 cv_init(&zilog->zl_cv_writer, NULL, CV_DEFAULT, NULL);
1763 cv_init(&zilog->zl_cv_suspend, NULL, CV_DEFAULT, NULL);
1764 cv_init(&zilog->zl_cv_batch[0], NULL, CV_DEFAULT, NULL);
1765 cv_init(&zilog->zl_cv_batch[1], NULL, CV_DEFAULT, NULL);
1767 return (zilog);
1771 zil_free(zilog_t *zilog)
1773 zilog->zl_stop_sync = 1;
1775 ASSERT0(zilog->zl_suspend);
1776 ASSERT0(zilog->zl_suspending);
1778 ASSERT(list_is_empty(&zilog->zl_lwb_list));
1779 list_destroy(&zilog->zl_lwb_list);
1781 avl_destroy(&zilog->zl_vdev_tree);
1782 mutex_destroy(&zilog->zl_vdev_lock);
1784 ASSERT(list_is_empty(&zilog->zl_itx_commit_list));
1785 list_destroy(&zilog->zl_itx_commit_list);
1795 if (zilog->zl_itxg[i].itxg_itxs)
1796 zil_itxg_clean(zilog->zl_itxg[i].itxg_itxs);
1797 mutex_destroy(&zilog->zl_itxg[i].itxg_lock);
1800 mutex_destroy(&zilog->zl_lock);
1802 cv_destroy(&zilog->zl_cv_writer);
1803 cv_destroy(&zilog->zl_cv_suspend);
1804 cv_destroy(&zilog->zl_cv_batch[0]);
1805 cv_destroy(&zilog->zl_cv_batch[1]);
1807 kmem_free(zilog, sizeof (zilog_t));
1816 zilog_t *zilog = dmu_objset_zil(os);
1818 ASSERT(zilog->zl_clean_taskq == NULL);
1819 ASSERT(zilog->zl_get_data == NULL);
1820 ASSERT(list_is_empty(&zilog->zl_lwb_list));
1822 zilog->zl_get_data = get_data;
1823 zilog->zl_clean_taskq = taskq_create("zil_clean", 1, minclsyspri,
1826 return (zilog);
1833 zil_close(zilog_t *zilog)
1838 zil_commit(zilog, 0); /* commit all itx */
1846 mutex_enter(&zilog->zl_lock);
1847 lwb = list_tail(&zilog->zl_lwb_list);
1850 mutex_exit(&zilog->zl_lock);
1852 txg_wait_synced(zilog->zl_dmu_pool, txg);
1854 if (zilog_is_dirty(zilog))
1855 zfs_dbgmsg("zil (%p) is dirty, txg %llu", zilog, txg);
1856 VERIFY(!zilog_is_dirty(zilog));
1858 taskq_destroy(zilog->zl_clean_taskq);
1859 zilog->zl_clean_taskq = NULL;
1860 zilog->zl_get_data = NULL;
1865 mutex_enter(&zilog->zl_lock);
1866 lwb = list_head(&zilog->zl_lwb_list);
1868 ASSERT(lwb == list_tail(&zilog->zl_lwb_list));
1869 list_remove(&zilog->zl_lwb_list, lwb);
1873 mutex_exit(&zilog->zl_lock);
1904 zilog_t *zilog;
1911 zilog = dmu_objset_zil(os);
1913 mutex_enter(&zilog->zl_lock);
1914 zh = zilog->zl_header;
1917 mutex_exit(&zilog->zl_lock);
1928 if (cookiep == NULL && !zilog->zl_suspending &&
1929 (zilog->zl_suspend > 0 || BP_IS_HOLE(&zh->zh_log))) {
1930 mutex_exit(&zilog->zl_lock);
1938 zilog->zl_suspend++;
1940 if (zilog->zl_suspend > 1) {
1946 while (zilog->zl_suspending)
1947 cv_wait(&zilog->zl_cv_suspend, &zilog->zl_lock);
1948 mutex_exit(&zilog->zl_lock);
1966 mutex_exit(&zilog->zl_lock);
1970 zilog->zl_suspending = B_TRUE;
1971 mutex_exit(&zilog->zl_lock);
1973 zil_commit(zilog, 0);
1975 zil_destroy(zilog, B_FALSE);
1977 mutex_enter(&zilog->zl_lock);
1978 zilog->zl_suspending = B_FALSE;
1979 cv_broadcast(&zilog->zl_cv_suspend);
1980 mutex_exit(&zilog->zl_lock);
1993 zilog_t *zilog = dmu_objset_zil(os);
1995 mutex_enter(&zilog->zl_lock);
1996 ASSERT(zilog->zl_suspend != 0);
1997 zilog->zl_suspend--;
1998 mutex_exit(&zilog->zl_lock);
2011 zil_replay_error(zilog_t *zilog, lr_t *lr, int error)
2015 zilog->zl_replaying_seq--; /* didn't actually replay this one */
2017 dmu_objset_name(zilog->zl_os, name);
2029 zil_replay_log_record(zilog_t *zilog, lr_t *lr, void *zra, uint64_t claim_txg)
2032 const zil_header_t *zh = zilog->zl_header;
2037 zilog->zl_replaying_seq = lr->lrc_seq;
2049 return (zil_replay_error(zilog, lr, EINVAL));
2056 error = dmu_object_info(zilog->zl_os,
2071 error = zil_read_log_data(zilog, (lr_write_t *)lr,
2074 return (zil_replay_error(zilog, lr, error));
2102 txg_wait_synced(spa_get_dsl(zilog->zl_spa), 0);
2105 return (zil_replay_error(zilog, lr, error));
2112 zil_incr_blks(zilog_t *zilog, blkptr_t *bp, void *arg, uint64_t claim_txg)
2114 zilog->zl_replay_blks++;
2125 zilog_t *zilog = dmu_objset_zil(os);
2126 const zil_header_t *zh = zilog->zl_header;
2130 zil_destroy(zilog, B_TRUE);
2142 txg_wait_synced(zilog->zl_dmu_pool, 0);
2144 zilog->zl_replay = B_TRUE;
2145 zilog->zl_replay_time = ddi_get_lbolt();
2146 ASSERT(zilog->zl_replay_blks == 0);
2147 (void) zil_parse(zilog, zil_incr_blks, zil_replay_log_record, &zr,
2151 zil_destroy(zilog, B_FALSE);
2152 txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg);
2153 zilog->zl_replay = B_FALSE;
2157 zil_replaying(zilog_t *zilog, dmu_tx_t *tx)
2159 if (zilog->zl_sync == ZFS_SYNC_DISABLED)
2162 if (zilog->zl_replay) {
2163 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
2164 zilog->zl_replayed_seq[dmu_tx_get_txg(tx) & TXG_MASK] =
2165 zilog->zl_replaying_seq;