Lines Matching defs:mi

197 	mntinfo4_t *mi;
205 mi = VFTOMI4(vfsp);
206 mutex_enter(&mi->mi_lock);
207 recov = FAILOVER_MOUNT4(mi) && !(mi->mi_flags & MI4_MOUNTING);
208 mutex_exit(&mi->mi_lock);
277 * Transfer the state recovery information in recovp to mi's resend queue,
278 * and mark mi as having a lost state request.
281 nfs4_enqueue_lost_rqst(recov_info_t *recovp, mntinfo4_t *mi)
285 ASSERT(nfs_rw_lock_held(&mi->mi_recovlock, RW_READER) ||
286 nfs_rw_lock_held(&mi->mi_recovlock, RW_WRITER));
294 mutex_enter(&mi->mi_lock);
295 mi->mi_recovflags |= MI4R_LOST_STATE;
297 list_insert_head(&mi->mi_lost_state, lrp);
299 list_insert_tail(&mi->mi_lost_state, lrp);
301 mutex_exit(&mi->mi_lock);
303 nfs4_queue_event(RE_LOST_STATE, mi, NULL, lrp->lr_op, lrp->lr_vp,
308 * Transfer the bad seqid recovery information in recovp to mi's
309 * bad seqid queue, and mark mi as having a bad seqid request.
312 enqueue_bseqid_rqst(recov_info_t *recovp, mntinfo4_t *mi)
314 ASSERT(nfs_rw_lock_held(&mi->mi_recovlock, RW_READER) ||
315 nfs_rw_lock_held(&mi->mi_recovlock, RW_WRITER));
318 mutex_enter(&mi->mi_lock);
319 mi->mi_recovflags |= MI4R_BAD_SEQID;
320 list_insert_tail(&mi->mi_bseqid_list, recovp->rc_bseqid_rqst);
322 mutex_exit(&mi->mi_lock);
329 * attempt. mi, vp1, and vp2 refer to the filesystem and files that were
334 * start a new thread. The caller should hold mi->mi_recovlock as a reader
340 nfs4_start_recovery(nfs4_error_t *ep, mntinfo4_t *mi, vnode_t *vp1,
349 ASSERT(nfs_zone() == mi->mi_zone);
350 mutex_enter(&mi->mi_lock);
355 gone = FS_OR_ZONE_GONE4(mi->mi_vfsp);
363 !(mi->mi_recovflags & MI4R_LOST_STATE)) {
368 mutex_exit(&mi->mi_lock);
374 mi->mi_in_recovery++;
375 mutex_exit(&mi->mi_lock);
379 sp = find_nfs4_server(mi);
380 errs_to_action(recovp, sp, mi, sid, lost_rqstp, gone, op, bsep);
383 start_recovery(recovp, mi, vp1, vp2, sp, moved_vp, moved_nm);
395 start_recovery_action(nfs4_recov_t what, bool_t reboot, mntinfo4_t *mi,
400 ASSERT(nfs_zone() == mi->mi_zone);
401 mutex_enter(&mi->mi_lock);
402 mi->mi_in_recovery++;
403 mutex_exit(&mi->mi_lock);
409 start_recovery(recovp, mi, vp1, vp2, NULL, NULL, NULL);
413 start_recovery(recov_info_t *recovp, mntinfo4_t *mi,
418 "start_recovery: mi %p, what %s", (void*)mi,
425 VFS_HOLD(mi->mi_vfsp);
426 MI4_HOLD(mi);
430 ASSERT(nfs_rw_lock_held(&mi->mi_recovlock, RW_READER) ||
431 nfs_rw_lock_held(&mi->mi_recovlock, RW_WRITER));
432 if (mi->mi_servers->sv_next == NULL)
434 mutex_enter(&mi->mi_lock);
435 mi->mi_recovflags |= MI4R_NEED_NEW_SERVER;
436 mutex_exit(&mi->mi_lock);
439 nfs4_enqueue_lost_rqst(recovp, mi);
459 ASSERT(nfs_rw_lock_held(&mi->mi_recovlock, RW_READER) ||
460 nfs_rw_lock_held(&mi->mi_recovlock, RW_WRITER));
461 mutex_enter(&mi->mi_lock);
462 mi->mi_recovflags |= MI4R_NEED_CLIENTID;
464 mi->mi_recovflags |= MI4R_SRV_REBOOT;
465 mutex_exit(&mi->mi_lock);
469 ASSERT(nfs_rw_lock_held(&mi->mi_recovlock, RW_READER) ||
470 nfs_rw_lock_held(&mi->mi_recovlock, RW_WRITER));
471 mutex_enter(&mi->mi_lock);
472 mi->mi_recovflags |= MI4R_REOPEN_FILES;
474 mi->mi_recovflags |= MI4R_SRV_REBOOT;
475 mutex_exit(&mi->mi_lock);
479 ASSERT(nfs_rw_lock_held(&mi->mi_recovlock, RW_READER) ||
480 nfs_rw_lock_held(&mi->mi_recovlock, RW_WRITER));
481 mutex_enter(&mi->mi_lock);
482 mi->mi_recovflags |= MI4R_NEED_SECINFO;
483 mutex_exit(&mi->mi_lock);
514 ASSERT(nfs_rw_lock_held(&mi->mi_recovlock, RW_READER) ||
515 nfs_rw_lock_held(&mi->mi_recovlock, RW_WRITER));
517 recov_filehandle(recovp->rc_action, mi, vp1);
519 recov_filehandle(recovp->rc_action, mi, vp2);
528 ASSERT(nfs_rw_lock_held(&mi->mi_recovlock, RW_READER) ||
529 nfs_rw_lock_held(&mi->mi_recovlock, RW_WRITER));
532 recov_stale(mi, vp1);
534 recov_stale(mi, vp2);
535 mutex_enter(&mi->mi_lock);
536 if ((mi->mi_recovflags & MI4R_NEED_NEW_SERVER) == 0) {
537 mutex_exit(&mi->mi_lock);
540 mutex_exit(&mi->mi_lock);
546 enqueue_bseqid_rqst(recovp, mi);
564 nfs4_set_grace_wait(mi);
574 nfs4_enqueue_lost_rqst(recovp, mi);
577 nfs4_queue_event(RE_UNEXPECTED_ACTION, mi, NULL,
598 mutex_enter(&mi->mi_lock);
599 if (mi->mi_flags & MI4_RECOV_ACTIV) {
600 mutex_exit(&mi->mi_lock);
603 mi->mi_flags |= MI4_RECOV_ACTIV;
604 mutex_exit(&mi->mi_lock);
606 "start_recovery: starting new thread for mi %p", (void*)mi));
608 recovp->rc_mi = mi;
611 ASSERT(VTOMI4(vp1) == mi);
616 ASSERT(VTOMI4(vp2) == mi);
628 mutex_enter(&mi->mi_lock);
629 mi->mi_in_recovery--;
630 if (mi->mi_in_recovery == 0)
631 cv_broadcast(&mi->mi_cv_in_recov);
632 mutex_exit(&mi->mi_lock);
634 VFS_RELE(mi->mi_vfsp);
635 MI4_RELE(mi);
735 nfs4_start_fop(mntinfo4_t *mi, vnode_t *vp1, vnode_t *vp2, nfs4_op_hint_t op,
747 ASSERT(vp1 == NULL || vp1->v_vfsp == mi->mi_vfsp);
748 ASSERT(vp2 == NULL || vp2->v_vfsp == mi->mi_vfsp);
765 error = nfs4_wait_for_grace(mi, rsp);
787 error = wait_for_recovery(mi, op);
801 nfs4_check_remap(mi, vp1, NFS4_REMAP_CKATTRS, &e);
807 nfs4_check_remap(mi, vp2, NFS4_REMAP_CKATTRS, &e);
818 if (nfs_rw_enter_sig(&mi->mi_recovlock, RW_READER,
819 mi->mi_flags & MI4_INT)) {
824 sp = find_nfs4_server(mi);
828 droplock_cnt = mi->mi_srvset_cnt;
830 nfs_rw_exit(&mi->mi_recovlock);
834 mi->mi_flags & MI4_INT)) {
839 if (nfs_rw_enter_sig(&mi->mi_recovlock, RW_READER,
840 mi->mi_flags & MI4_INT)) {
851 if (sp == NULL || droplock_cnt != mi->mi_srvset_cnt) {
852 tsp = find_nfs4_server(mi);
890 if (NFS4_VOLATILE_FH(mi) && op != OH_MOUNT) {
891 if (nfs_rw_enter_sig(&mi->mi_rename_lock,
893 mi->mi_flags & MI4_INT)) {
894 nfs_rw_exit(&mi->mi_recovlock);
917 mutex_enter(&mi->mi_lock);
918 if (FS_OR_ZONE_GONE4(mi->mi_vfsp))
921 (mi->mi_flags & MI4_RECOV_ACTIV))
925 mutex_exit(&mi->mi_lock);
955 nfs4_start_op(mntinfo4_t *mi, vnode_t *vp1, vnode_t *vp2,
960 return (nfs4_start_fop(mi, vp1, vp2, OH_OTHER, rsp, NULL));
972 nfs4_end_fop(mntinfo4_t *mi, vnode_t *vp1, vnode_t *vp2, nfs4_op_hint_t op,
994 nfs_rw_exit(&mi->mi_rename_lock);
1014 nfs_rw_exit(&mi->mi_recovlock);
1022 nfs_rw_exit(&mi->mi_recovlock);
1027 nfs4_end_op(mntinfo4_t *mi, vnode_t *vp1, vnode_t *vp2,
1030 nfs4_end_fop(mi, vp1, vp2, OH_OTHER, rsp, needrecov);
1049 wait_for_recovery(mntinfo4_t *mi, nfs4_op_hint_t op_hint)
1053 mutex_enter(&mi->mi_lock);
1055 while (mi->mi_recovflags != 0) {
1058 if ((mi->mi_vfsp->vfs_flag & VFS_UNMOUNTED) ||
1059 (mi->mi_flags & MI4_RECOV_FAIL))
1068 if (cv_wait_sig(&mi->mi_failover_cv, &mi->mi_lock) == 0) {
1078 if ((mi->mi_vfsp->vfs_flag & VFS_UNMOUNTED) &&
1083 } else if (mi->mi_flags & MI4_RECOV_FAIL) {
1086 error = mi->mi_error;
1089 mutex_exit(&mi->mi_lock);
1104 nfs4_wait_for_grace(mntinfo4_t *mi, nfs4_recov_state_t *rsp)
1110 if (mi->mi_grace_wait != 0) {
1111 mutex_enter(&mi->mi_lock);
1113 if (mi->mi_grace_wait != 0) {
1119 if (curtime < mi->mi_grace_wait) {
1121 time_to_wait = mi->mi_grace_wait - curtime;
1123 mutex_exit(&mi->mi_lock);
1129 mutex_enter(&mi->mi_lock);
1131 if (curtime >= mi->mi_grace_wait)
1132 mi->mi_grace_wait = 0;
1134 mi->mi_grace_wait = 0;
1137 mutex_exit(&mi->mi_lock);
1207 mntinfo4_t *mi = recovp->rc_mi;
1214 nfs4_queue_event(RE_START, mi, NULL, mi->mi_recovflags,
1221 mutex_enter(&mi->mi_lock);
1222 mi->mi_recovthread = curthread;
1223 mutex_exit(&mi->mi_lock);
1231 (void) nfs_rw_enter_sig(&mi->mi_recovlock, RW_READER, 0);
1232 sp = find_nfs4_server(mi);
1235 nfs_rw_exit(&mi->mi_recovlock);
1243 mutex_enter(&mi->mi_lock);
1244 if (FS_OR_ZONE_GONE4(mi->mi_vfsp)) {
1248 mi->mi_vfsp->vfs_flag & VFS_UNMOUNTED, (CE_NOTE,
1261 if (mi->mi_recovflags &
1265 mi->mi_flags |= MI4_RECOV_FAIL;
1266 mi->mi_error = recovp->rc_error;
1280 if (!(mi->mi_recovflags & MI4R_LOST_STATE)) {
1282 mutex_exit(&mi->mi_lock);
1285 mutex_exit(&mi->mi_lock);
1297 mutex_enter(&mi->mi_lock);
1298 mi->mi_flags |= MI4_RECOV_FAIL;
1299 mi->mi_error = recovp->rc_error;
1300 mutex_exit(&mi->mi_lock);
1314 mutex_exit(&mi->mi_lock);
1322 mutex_enter(&mi->mi_lock);
1324 (mi->mi_recovflags & MI4R_NEED_NEW_SERVER)) {
1325 mutex_exit(&mi->mi_lock);
1328 mutex_exit(&mi->mi_lock);
1347 mutex_enter(&mi->mi_lock);
1348 mi->mi_recovflags &= ~MI4R_NEED_CLIENTID;
1349 mutex_exit(&mi->mi_lock);
1357 mutex_enter(&mi->mi_lock);
1358 if ((mi->mi_recovflags & MI4R_NEED_SECINFO) &&
1359 !(mi->mi_flags & MI4_RECOV_FAIL)) {
1360 mutex_exit(&mi->mi_lock);
1361 (void) nfs_rw_enter_sig(&mi->mi_recovlock,
1370 mutex_enter(&mi->mi_lock);
1371 mi->mi_flags |= MI4_RECOV_FAIL;
1372 mi->mi_error = recovp->rc_error;
1373 mutex_exit(&mi->mi_lock);
1374 nfs4_queue_event(RE_WRONGSEC, mi, NULL,
1378 nfs_rw_exit(&mi->mi_recovlock);
1380 mutex_exit(&mi->mi_lock);
1385 mutex_enter(&mi->mi_lock);
1386 if ((mi->mi_recovflags & MI4R_BAD_SEQID) &&
1387 !(mi->mi_flags & MI4_RECOV_FAIL)) {
1388 mutex_exit(&mi->mi_lock);
1389 (void) nfs_rw_enter_sig(&mi->mi_recovlock,
1392 nfs_rw_exit(&mi->mi_recovlock);
1394 mutex_exit(&mi->mi_lock);
1401 mutex_enter(&mi->mi_lock);
1402 if ((mi->mi_recovflags & MI4R_REOPEN_FILES) &&
1403 !(mi->mi_flags & MI4_RECOV_FAIL)) {
1404 mutex_exit(&mi->mi_lock);
1407 mutex_exit(&mi->mi_lock);
1413 mutex_enter(&mi->mi_lock);
1415 (mi->mi_recovflags & MI4R_LOST_STATE) &&
1416 !(mi->mi_flags & MI4_RECOV_FAIL)) {
1417 mutex_exit(&mi->mi_lock);
1418 (void) nfs_rw_enter_sig(&mi->mi_recovlock,
1421 if (list_head(&mi->mi_lost_state) == NULL) {
1423 mutex_enter(&mi->mi_lock);
1424 mi->mi_recovflags &= ~MI4R_LOST_STATE;
1425 mutex_exit(&mi->mi_lock);
1427 nfs_rw_exit(&mi->mi_recovlock);
1429 mutex_exit(&mi->mi_lock);
1439 (void) nfs_rw_enter_sig(&mi->mi_recovlock, RW_READER, 0);
1440 mutex_enter(&mi->mi_lock);
1441 if ((mi->mi_recovflags & ~MI4R_SRV_REBOOT) == 0 ||
1442 (mi->mi_flags & MI4_RECOV_FAIL)) {
1448 * unmark the mi as no longer doing recovery to
1450 * requests on the same mi (and the going away
1461 list_move_tail(&local_lost_state, &mi->mi_lost_state);
1464 mutex_exit(&mi->mi_lock);
1475 mutex_exit(&mi->mi_lock);
1476 nfs_rw_exit(&mi->mi_recovlock);
1485 if (!done && FS_OR_ZONE_GONE4(mi->mi_vfsp)) {
1486 mutex_enter(&mi->mi_lock);
1487 cv_broadcast(&mi->mi_failover_cv);
1488 mutex_exit(&mi->mi_lock);
1502 mutex_enter(&mi->mi_lock);
1503 recov_done(mi, recovp);
1504 mutex_exit(&mi->mi_lock);
1514 /* now we are done using the mi struct, signal the waiters */
1515 mutex_enter(&mi->mi_lock);
1516 mi->mi_in_recovery--;
1517 if (mi->mi_in_recovery == 0)
1518 cv_broadcast(&mi->mi_cv_in_recov);
1519 mutex_exit(&mi->mi_lock);
1521 VFS_RELE(mi->mi_vfsp);
1522 MI4_RELE(mi);
1535 recov_done(mntinfo4_t *mi, recov_info_t *recovp)
1538 ASSERT(MUTEX_HELD(&mi->mi_lock));
1540 nfs4_queue_event(RE_END, mi, NULL, 0, recovp->rc_vp1,
1542 mi->mi_recovthread = NULL;
1543 mi->mi_flags &= ~MI4_RECOV_ACTIV;
1544 mi->mi_recovflags &= ~MI4R_SRV_REBOOT;
1545 cv_broadcast(&mi->mi_failover_cv);
1562 mntinfo4_t *mi = recovp->rc_mi;
1576 (void) nfs_rw_enter_sig(&mi->mi_recovlock, RW_WRITER, 0);
1599 for (svp = mi->mi_servers; svp; svp = svp->sv_next) {
1601 mutex_enter(&mi->mi_lock);
1602 if (FS_OR_ZONE_GONE4(mi->mi_vfsp)) {
1603 mi->mi_flags |= MI4_RECOV_FAIL;
1604 mutex_exit(&mi->mi_lock);
1605 (void) nfs_rw_exit(&mi->mi_recovlock);
1611 mutex_exit(&mi->mi_lock);
1620 if (!oncethru && svp == mi->mi_curr_serv)
1628 if (!(mi->mi_flags & MI4_INT))
1632 if (!(mi->mi_flags & MI4_INT))
1637 nfs4_queue_event(RE_FAILOVER, mi,
1638 svp == mi->mi_curr_serv ? NULL :
1647 snames = nfs4_getsrvnames(mi, &len);
1648 nfs4_queue_fact(RF_SRVS_NOT_RESPOND, mi,
1657 nfs4_queue_fact(RF_SRVS_OK, mi, 0, 0, 0, FALSE, snames,
1668 mutex_enter(&mi->mi_lock);
1669 mi->mi_recovflags &= ~MI4R_NEED_NEW_SERVER;
1670 if (svp != mi->mi_curr_serv) {
1671 servinfo4_t *osvp = mi->mi_curr_serv;
1673 mutex_exit(&mi->mi_lock);
1678 index = rtable4hash(mi->mi_rootfh);
1681 rp = r4find(&rtable4[index], mi->mi_rootfh, mi->mi_vfsp);
1698 (void) dnlc_purge_vfsp(mi->mi_vfsp, 0);
1700 mutex_enter(&mi->mi_lock);
1701 mi->mi_recovflags |= MI4R_REOPEN_FILES | MI4R_REMAP_FILES;
1703 mi->mi_recovflags |= MI4R_SRV_REBOOT;
1704 mi->mi_curr_serv = svp;
1705 mi->mi_failover++;
1706 mi->mi_flags &= ~MI4_BADOWNER_DEBUG;
1707 mutex_exit(&mi->mi_lock);
1712 sfh4_update(mi->mi_rootfh, &fh);
1715 sfh4_update(mi->mi_srvparentfh, &fh);
1718 *spp = nfs4_move_mi(mi, osvp, svp);
1722 mutex_exit(&mi->mi_lock);
1723 (void) nfs_rw_exit(&mi->mi_recovlock);
1733 mntinfo4_t *mi = recovp->rc_mi;
1747 (void) nfs_rw_enter_sig(&mi->mi_recovlock, RW_WRITER, 0);
1756 nfs4setclientid(mi, kcred, TRUE, &n4e);
1765 mutex_enter(&mi->mi_lock);
1766 need_new_s = mi->mi_recovflags & MI4R_NEED_NEW_SERVER;
1767 mutex_exit(&mi->mi_lock);
1770 nfs_rw_exit(&mi->mi_recovlock);
1775 nfs4_queue_event(RE_CLIENTID, mi, NULL, n4e.error, NULL,
1777 mutex_enter(&mi->mi_lock);
1778 mi->mi_flags |= MI4_RECOV_FAIL;
1779 mi->mi_error = recovp->rc_error;
1780 mutex_exit(&mi->mi_lock);
1786 mutex_enter(&mi->mi_lock);
1787 mi->mi_recovflags &= ~MI4R_NEED_CLIENTID;
1796 mi->mi_recovflags |= MI4R_REOPEN_FILES;
1798 mi->mi_recovflags |= MI4R_SRV_REBOOT;
1800 mutex_exit(&mi->mi_lock);
1803 nfs_rw_exit(&mi->mi_recovlock);
1807 mutex_enter(&mi->mi_lock);
1808 if ((mi->mi_flags & MI4_RECOV_FAIL) == 0)
1810 mutex_exit(&mi->mi_lock);
1825 if (tmi != mi) {
1906 recov_filehandle(nfs4_recov_t action, mntinfo4_t *mi, vnode_t *vp)
1935 nfs4_queue_event(RE_BADHANDLE, mi, NULL, 0,
1939 nfs4_remap_file(mi, vp, 0, &e);
1940 needrecov = nfs4_needs_recovery(&e, FALSE, mi->mi_vfsp);
1959 (void) nfs4_start_recovery(&e, mi, vp, NULL,
1963 !NFS4_FRC_UNMT_ERR(e.error, mi->mi_vfsp) &&
1998 recov_stale(mntinfo4_t *mi, vnode_t *vp)
2044 (void) nfs4_start_recovery(&e, mi, vp, NULL,
2097 (void) nfs4_start_recovery(&e, mi, rootvp, NULL,
2129 if (FAILOVER_MOUNT4(mi)) {
2130 mutex_enter(&mi->mi_lock);
2131 mi->mi_recovflags |= MI4R_NEED_NEW_SERVER;
2135 mutex_exit(&mi->mi_lock);
2175 mutex_enter(&mi->mi_lock);
2176 mi->mi_error = ESTALE;
2177 mutex_exit(&mi->mi_lock);
2179 svp = mi->mi_curr_serv;
2206 relock_file(vnode_t *vp, mntinfo4_t *mi, nfs4_error_t *ep,
2271 nfs4_queue_event(RE_FAIL_RELOCK, mi,
2276 nfs4_queue_event(RE_FAIL_RELOCK, mi,
2280 nfs4_send_siglost(llp->ll_flock.l_pid, mi, vp, TRUE,
2469 nfs4_remove_lost_rqsts(mntinfo4_t *mi, nfs4_server_t *sp)
2473 mutex_enter(&mi->mi_lock);
2474 while ((lrp = list_head(&mi->mi_lost_state)) != NULL) {
2475 list_remove(&mi->mi_lost_state, lrp);
2476 mutex_exit(&mi->mi_lock);
2478 mutex_enter(&mi->mi_lock);
2480 mutex_exit(&mi->mi_lock);
2490 mntinfo4_t *mi = recovp->rc_mi;
2516 (void) nfs_rw_enter_sig(&mi->mi_recovlock, RW_WRITER, 0);
2518 if (NFS4_VOLATILE_FH(mi)) {
2519 nfs4_remap_root(mi, &e, 0);
2520 if (nfs4_needs_recovery(&e, FALSE, mi->mi_vfsp)) {
2521 (void) nfs4_start_recovery(&e, mi, NULL,
2526 mutex_enter(&mi->mi_lock);
2527 if (recovp->rc_srv_reboot || (mi->mi_recovflags & MI4R_SRV_REBOOT))
2531 mutex_exit(&mi->mi_lock);
2539 reopenlist = r4mkopenlist(mi);
2541 mutex_enter(&mi->mi_lock);
2542 remap = mi->mi_recovflags & MI4R_REMAP_FILES;
2543 mutex_exit(&mi->mi_lock);
2549 nfs4_remove_lost_rqsts(mi, sp);
2554 nfs4_remap_file(mi, rep->re_vp,
2591 mi->mi_vfsp)) {
2592 (void) nfs4_start_recovery(&e, mi,
2603 relock_file(rep->re_vp, mi, &e, pre_change);
2605 if (nfs4_needs_recovery(&e, TRUE, mi->mi_vfsp))
2606 (void) nfs4_start_recovery(&e, mi,
2622 nfs4_check_remap(mi, recovp->rc_vp1, NFS4_REMAP_CKATTRS,
2624 nfs4_check_remap(mi, recovp->rc_vp2, NFS4_REMAP_CKATTRS,
2630 mutex_enter(&mi->mi_lock);
2631 mi->mi_recovflags &= ~(MI4R_REOPEN_FILES | MI4R_REMAP_FILES);
2632 mutex_exit(&mi->mi_lock);
2635 nfs_rw_exit(&mi->mi_recovlock);
2650 mntinfo4_t *mi = recovp->rc_mi;
2658 ASSERT(mi != NULL);
2659 ASSERT(nfs_rw_lock_held(&mi->mi_recovlock, RW_WRITER));
2661 mutex_enter(&mi->mi_lock);
2662 lrp = list_head(&mi->mi_lost_state);
2663 mutex_exit(&mi->mi_lock);
2666 resend_one_op(lrp, &n4e, mi, sp);
2679 nfs4_needs_recovery(&n4e, TRUE, mi->mi_vfsp) &&
2681 NFS4_FRC_UNMT_ERR(n4e.error, mi->mi_vfsp) ||
2694 NFS4_FRC_UNMT_ERR(n4e.error, mi->mi_vfsp)) {
2698 mi, lrp->lr_dvp, lrp->lr_vp, NULL, NULL,
2704 mutex_enter(&mi->mi_lock);
2705 list_remove(&mi->mi_lost_state, lrp);
2707 lrp = list_head(&mi->mi_lost_state);
2708 mutex_exit(&mi->mi_lock);
2720 mntinfo4_t *mi, nfs4_server_t *sp)
2737 ep->error = nfs4_start_open_seqid_sync(lrp->lr_oop, mi);
2772 nfs4_queue_event(RE_LOST_STATE_BAD_OP, mi, NULL,
2978 mntinfo4_t *mi;
2982 mi = VTOMI4(vp);
2990 mutex_enter(&mi->mi_lock);
2991 for (lrp = list_next(&mi->mi_lost_state, lrp); lrp != NULL;
2993 nlrp = list_next(&mi->mi_lost_state, lrp);
3002 list_remove(&mi->mi_lost_state, lrp);
3005 mutex_exit(&mi->mi_lock);
3020 nfs4_recov_t *action, mntinfo4_t *mi)
3070 nfs4_queue_event(RE_LOST_STATE_BAD_OP, mi, NULL,
3115 nfs4_server_t *sp, mntinfo4_t *mi, stateid4 *sidp,
3130 FAILOVER_MOUNT4(mi);
3141 nfs4_save_lost_rqst(lost_rqstp, recovp, &action, mi);
3147 nfs4_queue_event(RE_UNEXPECTED_ERRNO, mi, NULL, error, NULL,
3201 mntinfo4_t *, mi,
3228 nfs4_queue_event(RE_UNEXPECTED_STATUS, mi, NULL, 0,
3240 nfs4_queue_fact(RF_ERR, mi, stat, action, op, reboot, NULL, error,
3277 nfs4_send_siglost(pid_t pid, mntinfo4_t *mi, vnode_t *vp, bool_t dump,
3287 nfs4_queue_event(dump ? RE_SIGLOST : RE_SIGLOST_NO_DUMP, mi,
3391 nfs4_set_grace_wait(mntinfo4_t *mi)
3393 mutex_enter(&mi->mi_lock);
3395 mi->mi_grace_wait = gethrestime_sec() + nfs4err_delay_time;
3396 mutex_exit(&mi->mi_lock);
3427 nfs4_getsrvnames(mntinfo4_t *mi, size_t *len)
3440 for (svp = mi->mi_servers; svp != NULL; svp = svp->sv_next) {
3453 for (svp = mi->mi_servers; svp != NULL; svp = svp->sv_next) {
3518 mntinfo4_t *mi = recovp->rc_mi;
3527 ASSERT(mi != NULL);
3528 ASSERT(nfs_rw_lock_held(&mi->mi_recovlock, RW_WRITER));
3530 mutex_enter(&mi->mi_lock);
3531 bsep = list_head(&mi->mi_bseqid_list);
3532 mutex_exit(&mi->mi_lock);
3535 * Handle all the bad seqid entries on mi's list.
3552 nfs4_queue_event(RE_BAD_SEQID, mi, NULL,
3559 error = nfs4_start_open_seqid_sync(bad_oop, mi);
3577 nfs4_send_siglost(pid, mi, vp, TRUE,
3581 mutex_enter(&mi->mi_lock);
3582 list_remove(&mi->mi_bseqid_list, bsep);
3584 bsep = list_head(&mi->mi_bseqid_list);
3585 mutex_exit(&mi->mi_lock);
3589 mutex_enter(&mi->mi_lock);
3590 mi->mi_recovflags &= ~MI4R_BAD_SEQID;
3591 mutex_exit(&mi->mi_lock);