Lines Matching refs:un

327 mirror_geterror(mm_unit_t *un, int *smi, int *cip, int clr_error,
339 sm = &un->un_sm[i];
340 smic = &un->un_smic[i];
345 compcnt = (*(smic->sm_get_component_count)) (sm->sm_dev, un);
411 mm_unit_t *un,
435 mnum = MD_SID(un);
436 setno = MD_UN2SET(un);
437 sm = &un->un_sm[smi];
438 smic = &un->un_smic[smi];
446 if (MD_STATUS(un) & MD_UN_RESYNC_ACTIVE)
455 allochspmsg.msg_allochsp_mnum = un->c.un_self_id;
546 if ((new_un != un) || (un->c.un_type != MD_METAMIRROR)) {
608 sm = &un->un_sm[smi];
619 set_sm_comp_state(un, smi, ci, CS_RESYNC, recids,
627 setno, MD_SID(un));
635 un->un_rs_type = MD_RS_NONE;
636 un->un_rs_resync_done = 0;
637 un->un_rs_resync_2_do = 0;
677 check_unit_4_hotspares(mm_unit_t *un, int flags)
685 if (MD_STATUS(un) & MD_UN_RESYNC_ACTIVE)
689 sm = &un->un_sm[i];
690 smic = &un->un_smic[i];
704 if (check_comp_4_hotspares(un, i, ci,
717 mm_unit_t *un;
742 un = (mm_unit_t *)md_unit_readerlock(ui);
749 if (MD_MNSET_SETNO(MD_UN2SET(un)) &&
750 md_set[MD_UN2SET(un)].s_am_i_master == 0) {
754 if (MD_STATUS(un) & MD_UN_RESYNC_ACTIVE) {
760 un = (mm_unit_t *)md_unit_writerlock(ui);
767 if (check_unit_4_hotspares(un, MD_HOTSPARE_LINKHELD) == 0)
823 * PARAMETERS: mm_unit_t un - pointer to mirror unit structure
829 mirror_openfail_console_info(mm_unit_t *un, int smi, int ci)
835 tmpdev = un->un_sm[smi].sm_dev;
840 md_shortname(MD_SID(un)), md_devname(MD_UN2SET(un),
844 md_shortname(MD_SID(un)));
849 mirror_close_all_devs(mm_unit_t *un, int md_cflags)
855 if (!SMS_BY_INDEX_IS(un, i, SMS_INUSE))
857 dev = un->un_sm[i].sm_dev;
951 mm_unit_t *un;
956 un = MD_UNIT(mnum);
966 if (!SMS_BY_INDEX_IS(un, i, SMS_INUSE))
969 sm = &un->un_sm[i];
970 smic = &un->un_smic[i];
999 mp_unit_t *un;
1007 un = (mp_unit_t *)md_unit_readerlock(ui);
1009 dev_mnum = MD_SID(un);
1013 tmpdev = un->un_dev;
1017 un->un_key) == 1) {
1019 tmpdev, un->un_key);
1085 submirror_unavailable(mm_unit_t *un, int smi, int from_probe)
1093 sm = &un->un_sm[smi];
1094 smic = &un->un_smic[smi];
1096 compcnt = (*(smic->sm_get_component_count)) (sm->sm_dev, un);
1119 mm_unit_t *un;
1133 un = MD_UNIT(mnum);
1135 setno = MD_UN2SET(un);
1138 md_dev64_t tmpdev = un->un_sm[i].sm_dev;
1140 if (!SMS_BY_INDEX_IS(un, i, SMS_INUSE))
1144 un->un_sm[i].sm_dev = tmpdev;
1175 if (!SMS_BY_INDEX_IS(un, i, SMS_INUSE))
1178 tmpdev = un->un_sm[i].sm_dev;
1194 tmpdev = un->un_sm[i].sm_dev;
1196 un->un_sm[i].sm_dev = tmpdev;
1202 un = (mm_unit_t *)md_ioctl_writerlock(lockp, ui);
1205 un = (mm_unit_t *)md_unit_writerlock(ui);
1222 if (!SMS_BY_INDEX_IS(un, i, SMS_INUSE))
1226 tmpdev = un->un_sm[i].sm_dev;
1230 if (submirror_unavailable(un, i, 0)) {
1251 while (mirror_geterror(un, &smi, &ci, 1, 0) != 0) {
1252 if (mirror_other_sources(un, smi, ci, 1) == 1) {
1255 (void) mirror_close_all_devs(un, md_oflags);
1257 SVM_TAG_METADEVICE, setno, MD_SID(un));
1258 mirror_openfail_console_info(un, smi, ci);
1292 set_sm_comp_state(un, c->ec_smi, c->ec_ci, CS_ERRED, 0,
1295 set_sm_comp_state(un, c->ec_smi, c->ec_ci, CS_ERRED, 0,
1304 SVM_TAG_METADEVICE, setno, MD_SID(un));
1342 mm_unit_t *un;
1348 un = ps->ps_un;
1350 mutex_enter(&un->un_overlap_tree_mx);
1351 avl_remove(&un->un_overlap_root, ps);
1353 if (un->un_overlap_tree_flag != 0) {
1354 un->un_overlap_tree_flag = 0;
1355 cv_broadcast(&un->un_overlap_tree_cv);
1357 mutex_exit(&un->un_overlap_tree_mx);
1374 mm_unit_t *un;
1381 un = ps->ps_un;
1382 mutex_enter(&un->un_overlap_tree_mx);
1385 mutex_exit(&un->un_overlap_tree_mx);
1392 ps1 = avl_find(&un->un_overlap_root, ps, &where);
1398 avl_insert(&un->un_overlap_root, ps, where);
1408 un->un_overlap_tree_flag = 1;
1409 cv_wait(&un->un_overlap_tree_cv,
1410 &un->un_overlap_tree_mx);
1413 mutex_exit(&un->un_overlap_tree_mx);
1437 mm_unit_t *un,
1451 if (md_get_setstatus(MD_UN2SET(un)) & MD_SET_STALE)
1463 if (un != NULL)
1464 recids[ri++] = un->c.un_record_id;
1468 sm = &un->un_sm[i];
1505 select_write_units(struct mm_unit *un, md_mps_t *ps)
1513 if (SUBMIRROR_IS_WRITEABLE(un, i)) {
1526 select_write_after_read_units(struct mm_unit *un, md_mps_t *ps)
1534 if (SUBMIRROR_IS_WRITEABLE(un, i) &&
1535 un->un_sm[i].sm_flags & MD_SM_RESYNC_TARGET) {
1552 mm_unit_t *un,
1572 if (!SUBMIRROR_IS_READABLE(un, i))
1574 sm = &un->un_sm[i];
1575 smic = &un->un_smic[i];
1600 if (un->un_sm[i].sm_flags & MD_SM_FAILFAST &&
1605 return (un->un_sm[i].sm_dev);
1618 dev = un->un_sm[i].sm_dev;
1650 mm_unit_t *un;
1659 un = ps->ps_un;
1662 if (!SMS_BY_INDEX_IS(un, i, SMS_RUNNING))
1678 sm_index = un->un_dmr_last_read;
1683 switch (un->un_read_option) {
1686 howmany(un->c.un_total_blocks, nunits));
1695 un->un_last_read = (un->un_last_read + 1) % nunits;
1697 un->un_last_read);
1701 bp->b_edev = md_dev64_to_dev(un->un_sm[sm_index].sm_dev);
1704 if (un->un_sm[sm_index].sm_flags & MD_SM_FAILFAST) {
1713 mirror_are_submirrors_available(mm_unit_t *un)
1717 md_dev64_t tmpdev = un->un_sm[i].sm_dev;
1719 if ((!SMS_BY_INDEX_IS(un, i, SMS_INUSE)) ||
1734 build_submirror(mm_unit_t *un, int i, int snarfing)
1741 sm = &un->un_sm[i];
1742 smic = &un->un_smic[i];
1746 setno = MD_UN2SET(un);
1756 un->c.un_flag |= (su->c.un_flag & MD_LABELED);
1771 MD_STATUS(un) |= MD_UN_OFFLINE_SM;
1772 md_set_parent(sm->sm_dev, MD_SID(un));
1776 mirror_cleanup(mm_unit_t *un)
1787 if (MD_MNSET_SETNO(MD_UN2SET(un)) &&
1788 md_set[MD_UN2SET(un)].s_am_i_master == 0) {
1793 if (!SMS_BY_INDEX_IS(un, smi, SMS_INUSE))
1795 sv[nsv].setno = MD_UN2SET(un);
1796 sv[nsv++].key = un->un_sm[smi].sm_key;
1799 recid = un->un_rr_dirty_recid;
1800 mddb_deleterec_wrapper(un->c.un_record_id);
1843 collapse_submirrors(mm_unit_t *un)
1848 int nsmidx = un->un_nsm - 1;
1855 sm = &un->un_sm[smi];
1867 if (!SMS_BY_INDEX_IS(un, smi, SMS_INUSE)) {
1880 smic = &un->un_smic[smi];
1881 sm = &un->un_sm[smi];
1908 old_sm = &un->un_sm[smi];
1909 new_sm = &un->un_sm[smi - 1];
1924 mirror_build_incore(mm_unit_t *un, int snarfing)
1928 if (MD_STATUS(un) & MD_UN_BEING_RESET) {
1929 mddb_setrecprivate(un->c.un_record_id, MD_PRV_PENDCLEAN);
1933 if (mirror_are_submirrors_available(un) == 0)
1936 if (MD_UNIT(MD_SID(un)) != NULL)
1939 MD_STATUS(un) = 0;
1942 MD_CAPAB(un) = MD_CAN_META_CHILD | MD_CAN_PARENT | MD_CAN_SP;
1944 un->un_overlap_tree_flag = 0;
1945 avl_create(&un->un_overlap_root, mirror_overlap_compare,
1957 collapse_submirrors(un);
1960 build_submirror(un, i, snarfing);
1962 if (unit_setup_resync(un, snarfing) != 0) {
1964 mddb_setrecprivate(un->c.un_record_id, MD_PRV_GOTIT);
1977 if (MD_MNSET_SETNO(MD_UN2SET(un)) &&
1978 !(md_get_setstatus(MD_UN2SET(un)) & MD_SET_STALE)) {
1985 mutex_init(&un->un_overlap_tree_mx, NULL, MUTEX_DEFAULT, NULL);
1986 cv_init(&un->un_overlap_tree_cv, NULL, CV_DEFAULT, NULL);
1988 un->un_suspend_wr_flag = 0;
1989 mutex_init(&un->un_suspend_wr_mx, NULL, MUTEX_DEFAULT, NULL);
1990 cv_init(&un->un_suspend_wr_cv, NULL, CV_DEFAULT, NULL);
1997 mutex_init(&un->un_owner_mx, NULL, MUTEX_DEFAULT, NULL);
2003 mutex_init(&un->un_rs_thread_mx, NULL, MUTEX_DEFAULT, NULL);
2004 cv_init(&un->un_rs_thread_cv, NULL, CV_DEFAULT, NULL);
2010 mutex_init(&un->un_rs_progress_mx, NULL, MUTEX_DEFAULT, NULL);
2011 cv_init(&un->un_rs_progress_cv, NULL, CV_DEFAULT, NULL);
2018 mutex_init(&un->un_dmr_mx, NULL, MUTEX_DEFAULT, NULL);
2019 cv_init(&un->un_dmr_cv, NULL, CV_DEFAULT, NULL);
2025 rw_init(&un->un_pernode_dirty_mx[i], NULL, RW_DEFAULT, NULL);
2029 md_nblocks_set(MD_SID(un), un->c.un_total_blocks);
2030 MD_UNIT(MD_SID(un)) = un;
2037 reset_mirror(struct mm_unit *un, minor_t mnum, int removing)
2052 shortcnt = un->un_rrd_num * sizeof (short);
2053 bitcnt = howmany(un->un_rrd_num, NBBY);
2055 if (un->un_outstanding_writes)
2056 kmem_free((caddr_t)un->un_outstanding_writes, shortcnt);
2057 if (un->un_goingclean_bm)
2058 kmem_free((caddr_t)un->un_goingclean_bm, bitcnt);
2059 if (un->un_goingdirty_bm)
2060 kmem_free((caddr_t)un->un_goingdirty_bm, bitcnt);
2061 if (un->un_resync_bm)
2062 kmem_free((caddr_t)un->un_resync_bm, bitcnt);
2063 if (un->un_pernode_dirty_sum)
2064 kmem_free((caddr_t)un->un_pernode_dirty_sum, un->un_rrd_num);
2070 if (un->un_drl_task != NULL)
2071 ddi_taskq_destroy(un->un_drl_task);
2085 if (!SMS_BY_INDEX_IS(un, smi, SMS_INUSE))
2088 su = MD_UNIT(md_getminor(un->un_sm[smi].sm_dev));
2090 md_reset_parent(un->un_sm[smi].sm_dev);
2091 reset_comp_states(&un->un_sm[smi], &un->un_smic[smi]);
2094 sv[nsv++].key = un->un_sm[smi].sm_key;
2098 MD_STATUS(un) |= MD_UN_BEING_RESET;
2099 recid = un->un_rr_dirty_recid;
2100 vtoc_id = un->c.un_vtoc_id;
2101 selfid = MD_SID(un);
2103 mirror_commit(un, bits, 0);
2105 avl_destroy(&un->un_overlap_root);
2108 mutex_destroy(&un->un_suspend_wr_mx);
2109 cv_destroy(&un->un_suspend_wr_cv);
2110 mutex_destroy(&un->un_overlap_tree_mx);
2111 cv_destroy(&un->un_overlap_tree_cv);
2112 mutex_destroy(&un->un_owner_mx);
2113 mutex_destroy(&un->un_rs_thread_mx);
2114 cv_destroy(&un->un_rs_thread_cv);
2115 mutex_destroy(&un->un_rs_progress_mx);
2116 cv_destroy(&un->un_rs_progress_cv);
2117 mutex_destroy(&un->un_dmr_mx);
2118 cv_destroy(&un->un_dmr_cv);
2121 rw_destroy(&un->un_pernode_dirty_mx[i]);
2122 if (un->un_pernode_dirty_bm[i])
2123 kmem_free((caddr_t)un->un_pernode_dirty_bm[i], bitcnt);
2129 if (un->c.un_revision & MD_FN_META_DEV) {
2130 (void) md_rem_selfname(un->c.un_self_id);
2134 mddb_deleterec_wrapper(un->c.un_record_id);
2236 mm_unit_t *un;
2246 un = (mm_unit_t *)md_ioctl_openclose_enter(lockp, ui);
2248 un = (mm_unit_t *)md_unit_openclose_enter(ui);
2267 if (new_resync && !(MD_STATUS(un) & MD_UN_KEEP_DIRTY)) {
2268 if (!MD_MNSET_SETNO(MD_UN2SET(un)) ||
2270 mirror_process_unit_resync(un);
2272 (void) mirror_close_all_devs(un, md_cflags);
2280 if (MD_MNSET_SETNO(MD_UN2SET(un)) &&
2327 reset_lasterred(mm_unit_t *un, int smi, mddb_recid_t *extras, uint_t flags,
2338 sm = &un->un_sm[i];
2339 smic = &un->un_smic[i];
2348 compcnt = (*(smic->sm_get_component_count)) (sm->sm_dev, un);
2356 !mirror_other_sources(un, i, ci, 1)) {
2358 set_sm_comp_state(un, i, ci, CS_ERRED, extras,
2369 setno = MD_UN2SET(un);
2409 mm_unit_t *un,
2426 set_t setno = MD_UN2SET(un);
2428 mdi_unit_t *ui = MDI_UNIT(MD_SID(un));
2437 sm = &un->un_sm[smi];
2438 smic = &un->un_smic[smi];
2539 stchmsg.msg_stch_mnum = un->c.un_self_id;
2650 un->un_changecnt++;
2651 shared->ms_lasterrcnt = un->un_changecnt;
2654 mirror_commit(un, SMI2BIT(smi), extras);
2662 reset_lasterred(un, smi, extras, flags, lockp);
2668 mm_unit_t *un,
2686 dev = select_read_unit(un, blk, mcnt, &cando,
2704 mirror_other_sources(mm_unit_t *un, int smi, int ci, int must_be_open)
2717 sm = &un->un_sm[smi];
2718 smic = &un->un_smic[smi];
2729 not_found = mirror_other_sources(un, smi, ci,
2749 if (block >= un->c.un_total_blocks)
2752 if ((block + size) > un->c.un_total_blocks)
2753 size = un->c.un_total_blocks - block;
2755 not_found = find_another_logical(un, sm, block, size,
2769 mm_unit_t *un;
2774 un = ps->ps_un;
2809 if (ps->ps_changecnt != un->un_changecnt) {
2839 mm_unit_t *un;
2848 un = (mm_unit_t *)md_unit_writerlock(ui);
2849 setno = MD_UN2SET(un);
2862 while (mirror_geterror(un, &smi, &ci, 1, 0) != 0) {
2863 if (mirror_other_sources(un, smi, ci, 0) == 1) {
2866 set_sm_comp_state(un, smi, ci, CS_LAST_ERRED, 0, flags,
2872 if (!MD_MNSET_SETNO(MD_UN2SET(un))) {
2874 SVM_TAG_METADEVICE, setno, MD_SID(un));
2879 set_sm_comp_state(un, smi, ci, CS_ERRED, 0, flags,
2885 if (!MD_MNSET_SETNO(MD_UN2SET(un))) {
2887 SVM_TAG_METADEVICE, setno, MD_SID(un));
3111 submirror_is_lasterred(mm_unit_t *un, int smi)
3119 sm = &un->un_sm[smi];
3120 smic = &un->un_smic[smi];
3122 compcnt = (*(smic->sm_get_component_count)) (sm->sm_dev, un);
3136 if (mirror_other_sources(un, smi, ci, 0) == 1)
3163 mm_unit_t *un = ps->ps_un;
3166 if (!SMS_BY_INDEX_IS(un, i, SMS_INUSE))
3170 md_dev64_to_dev(un->un_sm[i].sm_dev)) {
3176 if (submirror_is_lasterred(un, i)) {
3211 mm_unit_t *un;
3231 un = ps->ps_un;
3241 (un->un_nsm >= 2) &&
3245 BLK_TO_RR(end_rr, ps->ps_lastblk, un);
3246 BLK_TO_RR(start_rr, ps->ps_firstblk, un);
3247 mutex_enter(&un->un_resync_mx);
3249 un->un_outstanding_writes[current_rr]--;
3250 mutex_exit(&un->un_resync_mx);
3303 mm_unit_t *un;
3312 un = cs->cs_ps->ps_un;
3315 if (!SMS_BY_INDEX_IS(un, smi, SMS_INUSE))
3318 if (cb->b_edev == md_dev64_to_dev(un->un_sm[smi].sm_dev))
3325 sm = &un->un_sm[smi];
3326 smic = &un->un_smic[smi];
3343 cnt = (*(smic->sm_get_component_count))(sm->sm_dev, un);
3368 mm_unit_t *un;
3373 un = ps->ps_un;
3380 bp->b_edev = md_dev64_to_dev(select_read_unit(un, blkno,
3418 mm_unit_t *un;
3421 un = ps->ps_un;
3427 (void) mirror_map_write(un, cs, ps, 0);
3435 mirror_map_write(mm_unit_t *un, md_mcs_t *cs, md_mps_t *ps, int war)
3451 dev = md_dev64_to_dev(un->un_sm[i].sm_dev);
3456 if (war && (blkno == 0) && (un->c.un_flag & MD_LABELED)) {
3487 if (un->un_sm[i].sm_flags & MD_SM_FAILFAST) {
3488 if (un->un_sm[i].sm_state & SMS_COMP_ERRED) {
3494 sm = &un->un_sm[i];
3495 smic = &un->un_smic[i];
3498 (sm->sm_dev, un);
3519 if (un->un_write_option == WR_SERIAL) {
3538 mm_unit_t *un;
3541 un = ps->ps_un;
3548 mutex_enter(&un->un_dmr_mx);
3549 cv_signal(&un->un_dmr_cv);
3550 mutex_exit(&un->un_dmr_mx);
3613 mm_unit_t *un = MD_UNIT(ui->ui_link.ln_id);
3617 mutex_enter(&un->un_rrp_inflight_mx);
3619 ps->ps_un = un;
3621 if (mddb_reread_rr(setno, un->un_rr_dirty_recid) == 0) {
3626 mirror_copy_rr(howmany(un->un_rrd_num, NBBY), un->un_resync_bm,
3627 un->un_dirty_bm);
3631 restart_resync = (un->un_rs_thread_flags & MD_RI_BLOCK_OWNER) ? 1 : 0;
3633 mutex_exit(&un->un_rrp_inflight_mx);
3637 mutex_enter(&un->un_rs_thread_mx);
3638 un->un_rs_thread_flags &= ~MD_RI_BLOCK_OWNER;
3639 cv_signal(&un->un_rs_thread_cv);
3640 mutex_exit(&un->un_rs_thread_mx);
3677 mm_unit_t *un = ps->ps_un;
3693 if (MD_MN_MIRROR_OWNER(un)) {
3710 mutex_enter(&un->un_owner_mx);
3711 if ((un->un_owner_state & MM_MN_OWNER_SENT) == 0) {
3721 mutex_exit(&un->un_owner_mx);
3739 un->un_owner_state |= MM_MN_OWNER_SENT;
3740 mutex_exit(&un->un_owner_mx);
3744 msg->mnum = MD_SID(un);
3776 mutex_enter(&un->un_owner_mx);
3777 if (un->un_owner_state & MM_MN_BECOME_OWNER) {
3778 un->un_mirror_owner = md_mn_mynode_id;
3780 if (un->un_rr_dirty_recid)
3782 un->un_rr_dirty_recid,
3784 un->un_owner_state &=
3790 ps1 = un->un_rs_prev_overlap;
3794 mutex_exit(&un->un_owner_mx);
3824 mutex_exit(&un->un_owner_mx);
3842 mutex_enter(&un->un_owner_mx);
3843 un->un_owner_state &=
3845 un->un_mirror_owner = MD_MN_MIRROR_UNOWNED;
3846 mutex_exit(&un->un_owner_mx);
3850 mutex_exit(&un->un_owner_mx);
3867 mm_unit_t *un;
3874 un = (mm_unit_t *)MD_UNIT(getminor(pb->b_edev));
3894 mutex_enter(&un->un_suspend_wr_mx);
3895 while (un->un_suspend_wr_flag) {
3896 cv_wait(&un->un_suspend_wr_cv,
3897 &un->un_suspend_wr_mx);
3899 mutex_exit(&un->un_suspend_wr_mx);
3905 if (md_checkbuf(ui, (md_unit_t *)un, pb)) {
3940 ps->ps_un = un;
3946 ps->ps_changecnt = un->un_changecnt;
3955 mutex_enter(&un->un_suspend_wr_mx);
3956 if (un->un_suspend_wr_flag) {
3958 mutex_exit(&un->un_suspend_wr_mx);
3964 mutex_exit(&un->un_suspend_wr_mx);
3971 mutex_enter(&un->un_owner_mx);
3972 if (MD_MNSET_SETNO(setno) && (!(MD_MN_MIRROR_OWNER(un))) &&
3976 ps1 = un->un_rs_prev_overlap;
3981 mutex_exit(&un->un_owner_mx);
3986 mutex_enter(&un->un_owner_mx);
3992 if (MD_MN_MIRROR_OWNER(un) &&
3999 mutex_exit(&un->un_owner_mx);
4017 if ((MD_MNSET_SETNO(MD_UN2SET(un))) &&
4019 if (!IN_RESYNC_REGION(un, ps))
4022 if ((select_write_after_read_units(un, ps) == 0) ||
4027 MD_SID(un), ps->ps_firstblk);
4038 select_write_units(un, ps);
4043 un = md_unit_readerlock(ui);
4052 mutex_enter(&un->un_owner_mx);
4053 if (((MD_MN_MIRROR_OWNER(un))) && rs_on_overlap) {
4056 mutex_exit(&un->un_owner_mx);
4062 mutex_exit(&un->un_owner_mx);
4079 if (MD_MN_NO_MIRROR_OWNER(un)) {
4098 if (mirror_mark_resync_region(un, ps->ps_firstblk,
4144 more = mirror_map_write(un, cs, ps, (flag & MD_STR_WAR));
4185 mm_unit_t *un;
4197 un = (mm_unit_t *)md_unit_readerlock(ui);
4200 if (md_checkbuf(ui, (md_unit_t *)un, pb)) {
4223 * specified side (in un->un_dmr_last_read) for the source of the data.
4232 ps->ps_un = un;
4238 ps->ps_changecnt = un->un_changecnt;
4271 if (((MD_STATUS(un) & MD_UN_OPT_NOT_DONE) || (flag & MD_STR_WAR)) &&
4289 BLK_TO_RR(end_rr, ps->ps_lastblk, un);
4290 BLK_TO_RR(start_rr, ps->ps_firstblk, un);
4293 if ((region_dirty = IS_KEEPDIRTY(i, un)) != 0)
4298 !(md_get_setstatus(MD_UN2SET(un)) & MD_SET_STALE)) {
4322 setno = MD_UN2SET(un);
4329 un = md_unit_readerlock(ui);
4349 if (MD_MN_NO_MIRROR_OWNER(un)) {
4368 (!(un->c.un_status & MD_UN_WAR) ||
4369 (!IN_RESYNC_REGION(un, ps)))) {
4374 MD_SID(un),
4505 mm_unit_t *un;
4542 un = md_unit_readerlock(ui);
4562 !SUBMIRROR_IS_READABLE(un, next_side))
4573 un->un_dmr_last_read = next_side;
4588 mutex_enter(&un->un_dmr_mx);
4589 cv_wait(&un->un_dmr_cv, &un->un_dmr_mx);
4590 mutex_exit(&un->un_dmr_mx);
4619 * we have just read from (un->un_dmr_last_read)
4621 un = md_unit_readerlock(ui);
4623 vdr->vdr_side = un->un_dmr_last_read;
4624 sm = &un->un_sm[un->un_dmr_last_read];
4635 next_side = un->un_dmr_last_read + 1;
4637 !SUBMIRROR_IS_READABLE(un, next_side))
4671 mm_unit_t *un;
4695 if ((un = mirror_getun(p->mnum, &p->mde, NO_LOCK, NULL)) == NULL)
4703 rs_active = (MD_STATUS(un) & MD_UN_RESYNC_ACTIVE) ? 1 : 0;
4726 if (!rs_active || (p->rs_type == un->un_resync_completed))
4736 if ((p->rs_type == un->un_rs_type) &&
4737 (p->rs_start < un->un_resync_startbl))
4739 ps = un->un_rs_prev_overlap;
4745 ps->ps_un = un;
4752 un->un_rs_prev_overlap = ps;
4762 BLK_TO_RR(rr_end, ps->ps_lastblk, un);
4763 BLK_TO_RR(rr_start, p->rs_start, un);
4765 BLK_TO_RR(rr_start, ps->ps_firstblk, un);
4766 mutex_enter(&un->un_resync_mx);
4772 CLR_KEEPDIRTY(rr, un);
4774 mutex_exit(&un->un_resync_mx);
4788 un->un_rs_resync_done = p->rs_done;
4789 un->un_rs_resync_2_do = p->rs_2_do;
4790 un->un_rs_type = p->rs_type;
4791 un->un_resync_startbl = p->rs_start;
4797 mutex_enter(&un->un_owner_mx);
4798 if (MD_MN_MIRROR_OWNER(un)) {
4814 mutex_exit(&un->un_owner_mx);
4820 mutex_enter(&un->un_owner_mx);
4828 if (MD_MN_MIRROR_OWNER(un) &&
4833 mutex_exit(&un->un_owner_mx);
4842 SVM_TAG_METADEVICE, MD_UN2SET(un),
4843 MD_SID(un));
4847 if (un->un_rs_thread == NULL) {
4872 if ((un->c.un_status & MD_UN_RESYNC_ACTIVE) &&
4874 mutex_enter(&un->un_rs_thread_mx);
4875 un->c.un_status &= ~MD_UN_RESYNC_CANCEL;
4876 un->un_rs_thread_flags |= MD_RI_SHUTDOWN;
4877 un->un_rs_thread_flags &=
4879 cv_signal(&un->un_rs_thread_cv);
4880 mutex_exit(&un->un_rs_thread_mx);
4884 mutex_enter(&un->un_owner_mx);
4885 un->un_mirror_owner = 0;
4886 mutex_exit(&un->un_owner_mx);
4889 ps = un->un_rs_prev_overlap;
4897 un->un_rs_prev_overlap = NULL;
4904 un->un_rs_resync_done = p->rs_done;
4905 un->un_rs_resync_2_do = p->rs_2_do;
4906 un->un_rs_type = p->rs_type;
4907 mutex_enter(&un->un_rs_progress_mx);
4908 cv_signal(&un->un_rs_progress_cv);
4909 mutex_exit(&un->un_rs_progress_mx);
4911 un = md_ioctl_writerlock(lockp, ui);
4912 un->c.un_status &= ~MD_UN_RESYNC_ACTIVE;
4914 if (un->c.un_status & MD_UN_GROW_PENDING) {
4915 if ((mirror_grow_unit(un, &mde) != 0) ||
4917 un->c.un_status &= ~MD_UN_GROW_PENDING;
4955 un = md_ioctl_writerlock(lockp, ui);
4959 SET_RS_TYPE_NONE(un->un_rs_type);
4968 if ((un->c.un_status & MD_UN_OPT_NOT_DONE) &&
4969 (RS_TYPE(un->un_rs_type) == MD_RS_NONE)) {
4971 un->c.un_status &= ~MD_UN_OPT_NOT_DONE;
4972 un->c.un_status &= ~MD_UN_WAR;
4988 un->c.un_status &= ~MD_UN_KEEP_DIRTY;
4990 un->c.un_status &= ~MD_UN_WAR;
4998 (ps = un->un_rs_prev_overlap) != NULL) {
5000 un);
5001 BLK_TO_RR(rr_end, ps->ps_lastblk, un);
5002 mutex_enter(&un->un_resync_mx);
5005 CLR_KEEPDIRTY(rr, un);
5007 mutex_exit(&un->un_resync_mx);
5015 un->un_resync_completed = un->un_rs_type;
5016 SET_RS_TYPE_NONE(un->un_rs_type);
5024 un->un_sm[smi].sm_flags &=
5026 if (SMS_BY_INDEX_IS(un, smi,
5036 &un->un_sm[smi],
5037 &un->un_smic[smi], state,
5039 mirror_commit(un, NO_SUBMIRRORS,
5046 if (SMS_BY_INDEX_IS(un, smi,
5048 un->c.un_status |=
5054 un = md_ioctl_writerlock(lockp, ui);
5056 sm = &un->un_sm[smi];
5057 smic = &un->un_smic[smi];
5059 un->un_sm[smi].sm_flags &= ~MD_SM_RESYNC_TARGET;
5064 un->un_resync_completed = un->un_rs_type;
5065 SET_RS_TYPE_NONE(un->un_rs_type);
5073 un->c.un_status &= ~MD_UN_WAR;
5074 mirror_commit(un, SMI2BIT(smi), 0);
5078 un = md_ioctl_writerlock(lockp, ui);
5081 sm = &un->un_sm[smi];
5082 smic = &un->un_smic[smi];
5086 un->c.un_status &= ~MD_UN_WAR;
5088 un->un_sm[smi].sm_flags &= ~MD_SM_RESYNC_TARGET;
5093 un->un_resync_completed = un->un_rs_type;
5094 SET_RS_TYPE_NONE(un->un_rs_type);
5107 set_sm_comp_state(un, smi, ci, CS_OKAY, 0,
5124 SVM_TAG_METADEVICE, MD_UN2SET(un),
5125 MD_SID(un));
5128 SVM_TAG_METADEVICE, MD_UN2SET(un),
5129 MD_SID(un));
5151 mm_unit_t *un;
5170 un = (mm_unit_t *)mddb_getrecaddr(recid);
5171 mirror_cleanup(un);
5215 * incores are at the end of un
5220 un = big_un;
5227 un = (mm_unit_t *)mddb_getrecaddr_resize(recid,
5228 sizeof (*un), 0);
5230 un->c.un_revision &= ~MD_64BIT_META_DEV;
5235 un = (mm_unit_t *)mddb_getrecaddr_resize(recid,
5236 sizeof (*un), 0);
5237 un->c.un_revision |= MD_64BIT_META_DEV;
5238 un->c.un_flag |= MD_EFILABEL;
5241 MDDB_NOTE_FN(rbp->rb_revision, un->c.un_revision);
5246 (void) md_create_minor_node(setno, MD_SID(un));
5248 if (MD_UNIT(MD_SID(un)) != NULL) {
5253 retval = mirror_build_incore(un, 1);
5256 md_create_unit_incore(MD_SID(un), &mirror_md_ops, 0);
5269 ui = MDI_UNIT(MD_SID(un));
5410 mm_unit_t *un;
5422 un = (mm_unit_t *)MD_UNIT(getminor(dev));
5424 if ((diskaddr_t)blkno >= un->c.un_total_blocks)
5427 if ((diskaddr_t)blkno + nblk > un->c.un_total_blocks)
5431 if (!SUBMIRROR_IS_WRITEABLE(un, smi))
5433 mapdev = md_dev64_to_dev(un->un_sm[smi].sm_dev);
5461 mm_unit_t *un;
5470 un = MD_UNIT(mnum);
5471 setno = MD_UN2SET(un);
5479 if (!SMS_BY_INDEX_IS(un, i, SMS_INUSE)) {
5484 tmpdev = un->un_sm[i].sm_dev;
5487 un->un_sm[i].sm_dev = tmpdev;
5496 if (submirror_unavailable(un, i, 1)) {
5527 while (mirror_geterror(un, &smi, &ci, 1, 1) != 0) {
5529 if (mirror_other_sources(un, smi, ci, 0) == 1) {
5540 set_sm_comp_state(un, smi, ci, CS_LAST_ERRED,
5546 MD_SID(un));
5550 (void) mirror_close_all_devs(un,
5556 MD_SID(un));
5558 mirror_openfail_console_info(un, smi, ci);
5570 set_sm_comp_state(un, smi, ci, CS_ERRED, 0,
5575 MD_SID(un));
5578 mirror_openfail_console_info(un, smi, ci);
5587 (void) mirror_close_all_devs(un, MD_OFLG_PROBEDEV);
5716 mm_unit_t *un;
5719 if ((un = mirror_getun(getminor(dev), &mde, NO_LOCK, NULL)) == NULL)
5722 if (un->c.un_status & MD_UN_OFFLINE_SM)
5738 mm_unit_t *un;
5741 if ((un = mirror_getun(getminor(dev), &mde, NO_LOCK, NULL)) == NULL)
5743 un->un_abr_count++;
5758 mm_unit_t *un;
5761 if ((un = mirror_getun(getminor(dev), &mde, NO_LOCK, NULL)) == NULL)
5763 un->un_abr_count--;