Lines Matching refs:mi

451 	mntinfo4_t *mi = VTOMI4(vp);
456 ASSERT(mi->mi_vfsp->vfs_dev == garp->n4g_va.va_fsid);
459 mutex_enter(&mi->mi_lock);
461 mutex_exit(&mi->mi_lock);
682 mntinfo4_t *mi;
698 mi = VTOMI4(vp);
706 rp->r_time_cache_inval = now + mi->mi_acdirmax;
729 if ((mi->mi_flags & MI4_NOAC) || (vp->v_flag & VNOCACHE))
734 if (delta < mi->mi_acdirmin)
735 delta = mi->mi_acdirmin;
736 else if (delta > mi->mi_acdirmax)
737 delta = mi->mi_acdirmax;
739 if (delta < mi->mi_acregmin)
740 delta = mi->mi_acregmin;
741 else if (delta > mi->mi_acregmax)
742 delta = mi->mi_acregmax;
823 mntinfo4_t *mi = VTOMI4(vp);
832 (void) save_mnt_secinfo(mi->mi_curr_serv);
836 if ((e.error = nfs4_start_fop(mi, vp, NULL, OH_GETATTR,
838 (void) check_mnt_secinfo(mi->mi_curr_serv, vp);
872 (void) check_mnt_secinfo(mi->mi_curr_serv, vp);
916 argop[1].nfs_argop4_u.opgetattr.mi = VTOMI4(vp);
988 mntinfo4_t *mi = VTOMI4(vp);
1003 e.error = nfs4_start_fop(mi, vp, NULL, OH_GETATTR, &recov_state, NULL);
1014 argop[1].nfs_argop4_u.opgetattr.mi = mi;
1022 rfs4call(mi, &args, &res, cr, &doqueue, 0, &e);
1124 mntinfo4_t *mi;
1127 mi = VFTOMI4(vfsp);
1129 CALLB_CPR_INIT(&cprinfo, &mi->mi_async_lock, callb_generic_cpr,
1132 mutex_enter(&mi->mi_async_lock);
1141 max_threads = MAX(mi->mi_max_threads, 1);
1159 while (mi->mi_async_req_count > 0) {
1162 * (mi->mi_max_threads == 0), and the value was
1167 * mi->mi_max_threads, now non-zero) thinks we
1174 * mi->mi_max_threads are ignored for our
1178 if (mi->mi_threads[NFS4_ASYNC_QUEUE] <
1179 MAX(mi->mi_max_threads, max_threads)) {
1180 mi->mi_threads[NFS4_ASYNC_QUEUE]++;
1181 mutex_exit(&mi->mi_async_lock);
1182 MI4_HOLD(mi);
1186 mutex_enter(&mi->mi_async_lock);
1187 } else if (mi->mi_threads[NFS4_ASYNC_PGOPS_QUEUE] <
1189 mi->mi_threads[NFS4_ASYNC_PGOPS_QUEUE]++;
1190 mutex_exit(&mi->mi_async_lock);
1191 MI4_HOLD(mi);
1196 mutex_enter(&mi->mi_async_lock);
1198 NFS4_WAKE_ASYNC_WORKER(mi->mi_async_work_cv);
1199 ASSERT(mi->mi_async_req_count != 0);
1200 mi->mi_async_req_count--;
1203 mutex_enter(&mi->mi_lock);
1204 if (mi->mi_flags & MI4_ASYNC_MGR_STOP) {
1205 mutex_exit(&mi->mi_lock);
1208 mutex_exit(&mi->mi_lock);
1211 cv_wait(&mi->mi_async_reqs_cv, &mi->mi_async_lock);
1212 CALLB_CPR_SAFE_END(&cprinfo, &mi->mi_async_lock);
1216 "nfs4_async_manager exiting for vfs %p\n", (void *)mi->mi_vfsp));
1220 mi->mi_manager_thread = NULL;
1224 cv_broadcast(&mi->mi_inact_req_cv);
1228 cv_broadcast(&mi->mi_async_cv);
1230 * There is no explicit call to mutex_exit(&mi->mi_async_lock)
1236 MI4_RELE(mi);
1246 mntinfo4_t *mi = VFTOMI4(vfsp);
1248 mutex_enter(&mi->mi_async_lock);
1249 mutex_enter(&mi->mi_lock);
1250 mi->mi_flags |= MI4_ASYNC_MGR_STOP;
1251 mutex_exit(&mi->mi_lock);
1252 cv_broadcast(&mi->mi_async_reqs_cv);
1256 while (mi->mi_manager_thread != NULL)
1257 cv_wait(&mi->mi_async_cv, &mi->mi_async_lock);
1258 mutex_exit(&mi->mi_async_lock);
1267 mntinfo4_t *mi;
1273 mi = VTOMI4(vp);
1316 mutex_enter(&mi->mi_async_lock);
1321 if (mi->mi_max_threads == 0) {
1322 mutex_exit(&mi->mi_async_lock);
1330 if (mi->mi_async_reqs[NFS4_READ_AHEAD] == NULL) {
1331 mi->mi_async_reqs[NFS4_READ_AHEAD] = args;
1332 mi->mi_async_tail[NFS4_READ_AHEAD] = args;
1334 mi->mi_async_tail[NFS4_READ_AHEAD]->a_next = args;
1335 mi->mi_async_tail[NFS4_READ_AHEAD] = args;
1338 if (mi->mi_io_kstats) {
1339 mutex_enter(&mi->mi_lock);
1340 kstat_waitq_enter(KSTAT_IO_PTR(mi->mi_io_kstats));
1341 mutex_exit(&mi->mi_lock);
1344 mi->mi_async_req_count++;
1345 ASSERT(mi->mi_async_req_count != 0);
1346 cv_signal(&mi->mi_async_reqs_cv);
1347 mutex_exit(&mi->mi_async_lock);
1392 mntinfo4_t *mi = VFTOMI4(vfsp);
1402 async_work_cv = &mi->mi_async_work_cv[NFS4_ASYNC_QUEUE];
1405 async_work_cv = &mi->mi_async_work_cv[NFS4_ASYNC_PGOPS_QUEUE];
1415 CALLB_CPR_INIT(&cprinfo, &mi->mi_async_lock, callb_generic_cpr, "nas");
1417 mutex_enter(&mi->mi_async_lock);
1426 args = *mi->mi_async_curr[async_queue];
1429 mi->mi_async_curr[async_queue]++;
1430 if (mi->mi_async_curr[async_queue] ==
1431 &mi->mi_async_reqs[async_types]) {
1432 mi->mi_async_curr[async_queue] =
1433 &mi->mi_async_reqs[0];
1453 if (mi->mi_max_threads == 0 || time_left <= 0) {
1454 --mi->mi_threads[async_queue];
1456 if (mi->mi_threads[NFS4_ASYNC_QUEUE] == 0 &&
1457 mi->mi_threads[NFS4_ASYNC_PGOPS_QUEUE] == 0)
1458 cv_signal(&mi->mi_async_cv);
1461 MI4_RELE(mi);
1466 &mi->mi_async_lock, nfs_async_timeout,
1469 CALLB_CPR_SAFE_END(&cprinfo, &mi->mi_async_lock);
1484 *mi->mi_async_curr[async_queue] = args->a_next;
1485 if (*mi->mi_async_curr[async_queue] == NULL ||
1486 --mi->mi_async_clusters[args->a_io] == 0) {
1487 mi->mi_async_clusters[args->a_io] =
1488 mi->mi_async_init_clusters;
1489 mi->mi_async_curr[async_queue]++;
1490 if (mi->mi_async_curr[async_queue] ==
1491 &mi->mi_async_reqs[async_types]) {
1492 mi->mi_async_curr[async_queue] =
1493 &mi->mi_async_reqs[0];
1497 if (args->a_io != NFS4_INACTIVE && mi->mi_io_kstats) {
1498 mutex_enter(&mi->mi_lock);
1499 kstat_waitq_exit(KSTAT_IO_PTR(mi->mi_io_kstats));
1500 mutex_exit(&mi->mi_lock);
1503 mutex_exit(&mi->mi_async_lock);
1508 if (args->a_io == NFS4_READ_AHEAD && mi->mi_max_threads > 0) {
1541 mutex_enter(&mi->mi_async_lock);
1551 nfs4_inactive_thread(mntinfo4_t *mi)
1555 vfs_t *vfsp = mi->mi_vfsp;
1557 CALLB_CPR_INIT(&cprinfo, &mi->mi_async_lock, callb_generic_cpr,
1561 mutex_enter(&mi->mi_async_lock);
1562 args = mi->mi_async_reqs[NFS4_INACTIVE];
1564 mutex_enter(&mi->mi_lock);
1574 if (mi->mi_manager_thread == NULL)
1576 mi->mi_flags |= MI4_INACTIVE_IDLE;
1577 mutex_exit(&mi->mi_lock);
1578 cv_signal(&mi->mi_async_cv);
1580 cv_wait(&mi->mi_inact_req_cv, &mi->mi_async_lock);
1581 CALLB_CPR_SAFE_END(&cprinfo, &mi->mi_async_lock);
1582 mutex_exit(&mi->mi_async_lock);
1584 mutex_enter(&mi->mi_lock);
1585 mi->mi_flags &= ~MI4_INACTIVE_IDLE;
1586 mutex_exit(&mi->mi_lock);
1587 mi->mi_async_reqs[NFS4_INACTIVE] = args->a_next;
1588 mutex_exit(&mi->mi_async_lock);
1595 mutex_exit(&mi->mi_lock);
1596 mi->mi_inactive_thread = NULL;
1597 cv_signal(&mi->mi_async_cv);
1600 * There is no explicit call to mutex_exit(&mi->mi_async_lock) since
1608 MI4_RELE(mi);
1621 mntinfo4_t *mi = VFTOMI4(vfsp);
1627 mutex_enter(&mi->mi_async_lock);
1628 mi->mi_max_threads = 0;
1629 NFS4_WAKEALL_ASYNC_WORKERS(mi->mi_async_work_cv);
1630 while (mi->mi_threads[NFS4_ASYNC_QUEUE] != 0 ||
1631 mi->mi_threads[NFS4_ASYNC_PGOPS_QUEUE] != 0)
1632 cv_wait(&mi->mi_async_cv, &mi->mi_async_lock);
1638 if (mi->mi_inactive_thread != NULL) {
1639 mutex_enter(&mi->mi_lock);
1640 while (!(mi->mi_flags & MI4_INACTIVE_IDLE) ||
1641 (mi->mi_async_reqs[NFS4_INACTIVE] != NULL)) {
1642 mutex_exit(&mi->mi_lock);
1643 cv_wait(&mi->mi_async_cv, &mi->mi_async_lock);
1644 mutex_enter(&mi->mi_lock);
1646 mutex_exit(&mi->mi_lock);
1648 mutex_exit(&mi->mi_async_lock);
1661 mntinfo4_t *mi = VFTOMI4(vfsp);
1669 mutex_enter(&mi->mi_async_lock);
1670 omax = mi->mi_max_threads;
1671 mi->mi_max_threads = 0;
1672 NFS4_WAKEALL_ASYNC_WORKERS(mi->mi_async_work_cv);
1673 while (mi->mi_threads[NFS4_ASYNC_QUEUE] != 0 ||
1674 mi->mi_threads[NFS4_ASYNC_PGOPS_QUEUE] != 0) {
1675 if (!cv_wait_sig(&mi->mi_async_cv, &mi->mi_async_lock)) {
1685 if (mi->mi_inactive_thread != NULL) {
1686 mutex_enter(&mi->mi_lock);
1687 while (!(mi->mi_flags & MI4_INACTIVE_IDLE) ||
1688 (mi->mi_async_reqs[NFS4_INACTIVE] != NULL)) {
1689 mutex_exit(&mi->mi_lock);
1690 if (!cv_wait_sig(&mi->mi_async_cv,
1691 &mi->mi_async_lock)) {
1695 mutex_enter(&mi->mi_lock);
1697 mutex_exit(&mi->mi_lock);
1701 mi->mi_max_threads = omax;
1702 mutex_exit(&mi->mi_async_lock);
1713 mntinfo4_t *mi;
1722 mi = VTOMI4(vp);
1747 mutex_enter(&mi->mi_async_lock);
1755 if (mi->mi_max_threads == 0) {
1756 mutex_exit(&mi->mi_async_lock);
1768 if (mi->mi_async_reqs[NFS4_PUTAPAGE] == NULL) {
1769 mi->mi_async_reqs[NFS4_PUTAPAGE] = args;
1770 mi->mi_async_tail[NFS4_PUTAPAGE] = args;
1772 mi->mi_async_tail[NFS4_PUTAPAGE]->a_next = args;
1773 mi->mi_async_tail[NFS4_PUTAPAGE] = args;
1781 if (mi->mi_io_kstats) {
1782 mutex_enter(&mi->mi_lock);
1783 kstat_waitq_enter(KSTAT_IO_PTR(mi->mi_io_kstats));
1784 mutex_exit(&mi->mi_lock);
1787 mi->mi_async_req_count++;
1788 ASSERT(mi->mi_async_req_count != 0);
1789 cv_signal(&mi->mi_async_reqs_cv);
1790 mutex_exit(&mi->mi_async_lock);
1815 if (nfs_zone() != mi->mi_zone) {
1837 mntinfo4_t *mi;
1846 mi = VTOMI4(vp);
1871 mutex_enter(&mi->mi_async_lock);
1879 if (mi->mi_max_threads == 0) {
1880 mutex_exit(&mi->mi_async_lock);
1892 if (mi->mi_async_reqs[NFS4_PAGEIO] == NULL) {
1893 mi->mi_async_reqs[NFS4_PAGEIO] = args;
1894 mi->mi_async_tail[NFS4_PAGEIO] = args;
1896 mi->mi_async_tail[NFS4_PAGEIO]->a_next = args;
1897 mi->mi_async_tail[NFS4_PAGEIO] = args;
1905 if (mi->mi_io_kstats) {
1906 mutex_enter(&mi->mi_lock);
1907 kstat_waitq_enter(KSTAT_IO_PTR(mi->mi_io_kstats));
1908 mutex_exit(&mi->mi_lock);
1911 mi->mi_async_req_count++;
1912 ASSERT(mi->mi_async_req_count != 0);
1913 cv_signal(&mi->mi_async_reqs_cv);
1914 mutex_exit(&mi->mi_async_lock);
1947 if (nfs_zone() != mi->mi_zone) {
1967 mntinfo4_t *mi;
1973 mi = VTOMI4(vp);
1994 mutex_enter(&mi->mi_async_lock);
1999 if (mi->mi_max_threads == 0) {
2000 mutex_exit(&mi->mi_async_lock);
2012 if (mi->mi_async_reqs[NFS4_READDIR] == NULL) {
2013 mi->mi_async_reqs[NFS4_READDIR] = args;
2014 mi->mi_async_tail[NFS4_READDIR] = args;
2016 mi->mi_async_tail[NFS4_READDIR]->a_next = args;
2017 mi->mi_async_tail[NFS4_READDIR] = args;
2024 if (mi->mi_io_kstats) {
2025 mutex_enter(&mi->mi_lock);
2026 kstat_waitq_enter(KSTAT_IO_PTR(mi->mi_io_kstats));
2027 mutex_exit(&mi->mi_lock);
2030 mi->mi_async_req_count++;
2031 ASSERT(mi->mi_async_req_count != 0);
2032 cv_signal(&mi->mi_async_reqs_cv);
2033 mutex_exit(&mi->mi_async_lock);
2055 mntinfo4_t *mi;
2060 mi = VTOMI4(vp);
2084 mutex_enter(&mi->mi_async_lock);
2092 if (mi->mi_max_threads == 0) {
2093 mutex_exit(&mi->mi_async_lock);
2105 if (mi->mi_async_reqs[NFS4_COMMIT] == NULL) {
2106 mi->mi_async_reqs[NFS4_COMMIT] = args;
2107 mi->mi_async_tail[NFS4_COMMIT] = args;
2109 mi->mi_async_tail[NFS4_COMMIT]->a_next = args;
2110 mi->mi_async_tail[NFS4_COMMIT] = args;
2117 if (mi->mi_io_kstats) {
2118 mutex_enter(&mi->mi_lock);
2119 kstat_waitq_enter(KSTAT_IO_PTR(mi->mi_io_kstats));
2120 mutex_exit(&mi->mi_lock);
2123 mi->mi_async_req_count++;
2124 ASSERT(mi->mi_async_req_count != 0);
2125 cv_signal(&mi->mi_async_reqs_cv);
2126 mutex_exit(&mi->mi_async_lock);
2131 nfs_zone() != mi->mi_zone) {
2155 mntinfo4_t *mi;
2159 mi = VTOMI4(vp);
2173 * Note that we don't check mi->mi_max_threads here, since we
2180 mutex_enter(&mi->mi_async_lock);
2181 if (mi->mi_inactive_thread == NULL) {
2187 mutex_exit(&mi->mi_async_lock);
2217 (void) nfs_rw_enter_sig(&mi->mi_recovlock, RW_READER,
2220 nfs_rw_exit(&mi->mi_recovlock);
2233 if (mi->mi_manager_thread == NULL) {
2244 if (mi->mi_async_reqs[NFS4_INACTIVE] == NULL) {
2245 mi->mi_async_reqs[NFS4_INACTIVE] = args;
2246 mi->mi_async_tail[NFS4_INACTIVE] = args;
2249 mi->mi_async_tail[NFS4_INACTIVE]->a_next = args;
2250 mi->mi_async_tail[NFS4_INACTIVE] = args;
2253 cv_signal(&mi->mi_inact_req_cv);
2255 mi->mi_async_req_count++;
2256 ASSERT(mi->mi_async_req_count != 0);
2257 cv_signal(&mi->mi_async_reqs_cv);
2260 mutex_exit(&mi->mi_async_lock);
2586 mntinfo4_t *mi;
2602 mi = VFTOMI4(vfsp);
2605 (void) strcpy(mik->mik_proto, mi->mi_curr_serv->sv_knconf->knc_proto);
2607 mik->mik_vers = (uint32_t)mi->mi_vers;
2608 mik->mik_flags = mi->mi_flags;
2616 mik->mik_secmod = mi->mi_curr_serv->sv_currsec ?
2617 mi->mi_curr_serv->sv_currsec->secmod :
2618 mi->mi_curr_serv->sv_secdata->secmod;
2619 mik->mik_curread = (uint32_t)mi->mi_curread;
2620 mik->mik_curwrite = (uint32_t)mi->mi_curwrite;
2621 mik->mik_retrans = mi->mi_retrans;
2622 mik->mik_timeo = mi->mi_timeo;
2623 mik->mik_acregmin = HR2SEC(mi->mi_acregmin);
2624 mik->mik_acregmax = HR2SEC(mi->mi_acregmax);
2625 mik->mik_acdirmin = HR2SEC(mi->mi_acdirmin);
2626 mik->mik_acdirmax = HR2SEC(mi->mi_acdirmax);
2627 mik->mik_noresponse = (uint32_t)mi->mi_noresponse;
2628 mik->mik_failover = (uint32_t)mi->mi_failover;
2629 mik->mik_remap = (uint32_t)mi->mi_remap;
2631 (void) strcpy(mik->mik_curserver, mi->mi_curr_serv->sv_hostname);
2639 mntinfo4_t *mi = VFTOMI4(vfsp);
2652 mi->mi_io_kstats = kstat_create_zone("nfs", getminor(vfsp->vfs_dev),
2653 NULL, "nfs", KSTAT_TYPE_IO, 1, 0, mi->mi_zone->zone_id);
2654 if (mi->mi_io_kstats) {
2655 if (mi->mi_zone->zone_id != GLOBAL_ZONEID)
2656 kstat_zone_add(mi->mi_io_kstats, GLOBAL_ZONEID);
2657 mi->mi_io_kstats->ks_lock = &mi->mi_lock;
2658 kstat_install(mi->mi_io_kstats);
2661 if ((mi->mi_ro_kstats = kstat_create_zone("nfs",
2663 sizeof (struct mntinfo_kstat), 0, mi->mi_zone->zone_id)) != NULL) {
2664 if (mi->mi_zone->zone_id != GLOBAL_ZONEID)
2665 kstat_zone_add(mi->mi_ro_kstats, GLOBAL_ZONEID);
2666 mi->mi_ro_kstats->ks_update = nfs4_mnt_kstat_update;
2667 mi->mi_ro_kstats->ks_private = (void *)vfsp;
2668 kstat_install(mi->mi_ro_kstats);
2677 mntinfo4_t *mi;
2680 mi = VTOMI4(vp);
2685 if (mi->mi_vfsp->vfs_flag & VFS_UNMOUNTED)
2692 if (mi->mi_flags & MI4_RECOV_FAIL)
2700 now - mi->mi_printftime > 0) {
2701 zoneid_t zoneid = mi->mi_zone->zone_id;
2705 mi->mi_vers, VTOR4(vp)->r_server->sv_hostname, NULL);
2721 mi->mi_printftime = now +
2801 mntinfo4_t *mi = VTOMI4(vp);
2803 mutex_enter(&mi->mi_lock);
2804 for (lrp = list_head(&mi->mi_lost_state); lrp != NULL;
2805 lrp = list_next(&mi->mi_lost_state, lrp)) {
2817 mutex_exit(&mi->mi_lock);
2880 mntinfo4_t *mi;
2888 mi = list_head(&mig->mig_list);
2889 if (mi == NULL) {
2895 "nfs4_mi_shutdown stopping vfs %p\n", (void *)mi->mi_vfsp));
2899 (void) dnlc_purge_vfsp(mi->mi_vfsp, 0);
2903 mutex_enter(&mi->mi_async_lock);
2904 mi->mi_max_threads = 0;
2905 NFS4_WAKEALL_ASYNC_WORKERS(mi->mi_async_work_cv);
2911 mutex_enter(&mi->mi_lock);
2912 mi->mi_flags |= (MI4_ASYNC_MGR_STOP|MI4_DEAD);
2913 mutex_exit(&mi->mi_lock);
2914 mutex_exit(&mi->mi_async_lock);
2915 if (mi->mi_manager_thread) {
2916 nfs4_async_manager_stop(mi->mi_vfsp);
2918 if (mi->mi_inactive_thread) {
2919 mutex_enter(&mi->mi_async_lock);
2920 cv_signal(&mi->mi_inact_req_cv);
2924 while (mi->mi_inactive_thread != NULL) {
2925 cv_wait(&mi->mi_async_cv, &mi->mi_async_lock);
2927 mutex_exit(&mi->mi_async_lock);
2931 * signal when it is done using the "mi" structure and about
2934 mutex_enter(&mi->mi_lock);
2935 while (mi->mi_in_recovery > 0)
2936 cv_wait(&mi->mi_cv_in_recov, &mi->mi_lock);
2937 mutex_exit(&mi->mi_lock);
2939 * We're done when every mi has been done or the list is empty.
2942 list_remove(&mig->mig_list, mi);
2944 zone_rele_ref(&mi->mi_zone_ref, ZONE_REF_NFSV4);
2947 * Release hold on vfs and mi done to prevent race with zone
2950 VFS_RELE(mi->mi_vfsp);
2951 MI4_RELE(mi);
3007 nfs4_mi_zonelist_add(mntinfo4_t *mi)
3011 mig = zone_getspecific(mi4_list_key, mi->mi_zone);
3013 list_insert_head(&mig->mig_list, mi);
3018 MI4_HOLD(mi);
3019 VFS_HOLD(mi->mi_vfsp);
3027 nfs4_mi_zonelist_remove(mntinfo4_t *mi)
3032 mig = zone_getspecific(mi4_list_key, mi->mi_zone);
3034 mutex_enter(&mi->mi_lock);
3035 /* if this mi is marked dead, then the zone already released it */
3036 if (!(mi->mi_flags & MI4_DEAD)) {
3037 list_remove(&mig->mig_list, mi);
3038 mutex_exit(&mi->mi_lock);
3041 VFS_RELE(mi->mi_vfsp);
3042 MI4_RELE(mi);
3045 mutex_exit(&mi->mi_lock);
3051 * mi globals.
3063 nfs_free_mi4(mntinfo4_t *mi)
3079 mutex_enter(&mi->mi_lock);
3080 ASSERT(mi->mi_recovthread == NULL);
3081 ASSERT(mi->mi_flags & MI4_ASYNC_MGR_STOP);
3082 mutex_exit(&mi->mi_lock);
3083 mutex_enter(&mi->mi_async_lock);
3084 ASSERT(mi->mi_threads[NFS4_ASYNC_QUEUE] == 0 &&
3085 mi->mi_threads[NFS4_ASYNC_PGOPS_QUEUE] == 0);
3086 ASSERT(mi->mi_manager_thread == NULL);
3087 mutex_exit(&mi->mi_async_lock);
3088 if (mi->mi_io_kstats) {
3089 kstat_delete(mi->mi_io_kstats);
3090 mi->mi_io_kstats = NULL;
3092 if (mi->mi_ro_kstats) {
3093 kstat_delete(mi->mi_ro_kstats);
3094 mi->mi_ro_kstats = NULL;
3096 if (mi->mi_recov_ksp) {
3097 kstat_delete(mi->mi_recov_ksp);
3098 mi->mi_recov_ksp = NULL;
3100 mutex_enter(&mi->mi_msg_list_lock);
3101 while (msgp = list_head(&mi->mi_msg_list)) {
3102 list_remove(&mi->mi_msg_list, msgp);
3105 mutex_exit(&mi->mi_msg_list_lock);
3106 list_destroy(&mi->mi_msg_list);
3107 if (mi->mi_fname != NULL)
3108 fn_rele(&mi->mi_fname);
3109 if (mi->mi_rootfh != NULL)
3110 sfh4_rele(&mi->mi_rootfh);
3111 if (mi->mi_srvparentfh != NULL)
3112 sfh4_rele(&mi->mi_srvparentfh);
3113 svp = mi->mi_servers;
3115 mutex_destroy(&mi->mi_lock);
3116 mutex_destroy(&mi->mi_async_lock);
3117 mutex_destroy(&mi->mi_msg_list_lock);
3118 nfs_rw_destroy(&mi->mi_recovlock);
3119 nfs_rw_destroy(&mi->mi_rename_lock);
3120 nfs_rw_destroy(&mi->mi_fh_lock);
3121 cv_destroy(&mi->mi_failover_cv);
3122 cv_destroy(&mi->mi_async_reqs_cv);
3123 cv_destroy(&mi->mi_async_work_cv[NFS4_ASYNC_QUEUE]);
3124 cv_destroy(&mi->mi_async_work_cv[NFS4_ASYNC_PGOPS_QUEUE]);
3125 cv_destroy(&mi->mi_async_cv);
3126 cv_destroy(&mi->mi_inact_req_cv);
3131 bucketp = &(mi->mi_oo_list[i]);
3145 foop = list_head(&mi->mi_foo_list);
3147 list_remove(&mi->mi_foo_list, foop);
3149 foop = list_head(&mi->mi_foo_list);
3151 list_destroy(&mi->mi_foo_list);
3152 list_destroy(&mi->mi_bseqid_list);
3153 list_destroy(&mi->mi_lost_state);
3154 avl_destroy(&mi->mi_filehandles);
3155 kmem_free(mi, sizeof (*mi));
3158 mi_hold(mntinfo4_t *mi)
3160 atomic_inc_32(&mi->mi_count);
3161 ASSERT(mi->mi_count != 0);
3165 mi_rele(mntinfo4_t *mi)
3167 ASSERT(mi->mi_count != 0);
3168 if (atomic_dec_32_nv(&mi->mi_count) == 0) {
3169 nfs_free_mi4(mi);
3436 mntinfo4_t *mi;
3448 mi = sp->mntinfo4_list;
3449 VFS_HOLD(mi->mi_vfsp);
3451 ASSERT(mi != NULL);
3453 e.error = nfs4_start_op(mi, NULL, NULL, &recov_state);
3455 VFS_RELE(mi->mi_vfsp);
3463 nfs4_end_op(mi, NULL, NULL, &recov_state, needrecov);
3464 VFS_RELE(mi->mi_vfsp);
3468 /* Make sure mi hasn't changed on us */
3469 if (mi != sp->mntinfo4_list) {
3472 nfs4_end_op(mi, NULL, NULL, &recov_state, needrecov);
3473 VFS_RELE(mi->mi_vfsp);
3504 mntinfo4_t *, mi);
3506 rfs4call(mi, &args, &res, cr, &doqueue, 0, &e);
3510 mntinfo4_t *, mi);
3525 nfs4_end_op(mi, NULL, NULL, &recov_state, needrecov);
3526 VFS_RELE(mi->mi_vfsp);
3536 needrecov = nfs4_needs_recovery(&e, FALSE, mi->mi_vfsp);
3538 nfs4_end_op(mi, NULL, NULL, &recov_state, needrecov);
3539 VFS_RELE(mi->mi_vfsp);
3549 if (nfs4_start_recovery(&e, mi, NULL, NULL, NULL, NULL,
3551 nfs4_end_op(mi, NULL, NULL, &recov_state, needrecov);
3552 VFS_RELE(mi->mi_vfsp);
3576 nfs4_end_op(mi, NULL, NULL, &recov_state, needrecov);
3578 VFS_RELE(mi->mi_vfsp);
3584 nfs4_inc_state_ref_count(mntinfo4_t *mi)
3589 sp = find_nfs4_server(mi);
3592 nfs4_inc_state_ref_count_nolock(sp, mi);
3605 nfs4_inc_state_ref_count_nolock(nfs4_server_t *sp, mntinfo4_t *mi)
3625 /* update the number of open files for mi */
3626 mi->mi_open_files++;
3630 nfs4_dec_state_ref_count(mntinfo4_t *mi)
3635 sp = find_nfs4_server_all(mi, 1);
3638 nfs4_dec_state_ref_count_nolock(sp, mi);
3649 nfs4_dec_state_ref_count_nolock(nfs4_server_t *sp, mntinfo4_t *mi)
3659 mi->mi_open_files--;
3661 "nfs4_dec_state_ref_count: mi open files %d, v4 flags 0x%x",
3662 mi->mi_open_files, mi->mi_flags));
3665 if (mi->mi_open_files == 0 &&
3666 (mi->mi_flags & MI4_REMOVE_ON_LAST_CLOSE)) {
3669 "we have closed the last open file", (void*)mi));
3670 nfs4_remove_mi_from_server(mi, sp);
3732 sfh4_put(const nfs_fh4 *fh, mntinfo4_t *mi, nfs4_sharedfh_t *key)
3755 nsfh->sfh_mi = mi;
3759 (void) nfs_rw_enter_sig(&mi->mi_fh_lock, RW_WRITER, 0);
3760 sfh = avl_find(&mi->mi_filehandles, key, &where);
3765 nfs_rw_exit(&mi->mi_fh_lock);
3772 avl_insert(&mi->mi_filehandles, nsfh, where);
3773 nfs_rw_exit(&mi->mi_fh_lock);
3784 sfh4_get(const nfs_fh4 *fh, mntinfo4_t *mi)
3797 zcmn_err(mi->mi_zone->zone_id, CE_NOTE, "sfh4_get:");
3810 (void) nfs_rw_enter_sig(&mi->mi_fh_lock, RW_READER, 0);
3811 sfh = avl_find(&mi->mi_filehandles, &key, NULL);
3819 nfs_rw_exit(&mi->mi_fh_lock);
3822 nfs_rw_exit(&mi->mi_fh_lock);
3824 return (sfh4_put(fh, mi, &key));
3852 mntinfo4_t *mi;
3872 mi = sfh->sfh_mi;
3873 (void) nfs_rw_enter_sig(&mi->mi_fh_lock, RW_WRITER, 0);
3881 nfs_rw_exit(&mi->mi_fh_lock);
3888 avl_remove(&mi->mi_filehandles, sfh);
3892 nfs_rw_exit(&mi->mi_fh_lock);
3910 mntinfo4_t *mi = sfh->sfh_mi;
3928 (void) nfs_rw_enter_sig(&mi->mi_fh_lock, RW_WRITER, 0);
3931 avl_remove(&mi->mi_filehandles, sfh);
3949 dupsfh = avl_find(&mi->mi_filehandles, &key, &where);
3951 if (!(mi->mi_vfsp->vfs_flag & VFS_RDONLY) || nfs4_warn_dupfh) {
3952 zcmn_err(mi->mi_zone->zone_id, CE_WARN, "sfh4_update: "
3957 avl_insert(&mi->mi_filehandles, sfh, where);
3962 nfs_rw_exit(&mi->mi_fh_lock);
3972 mntinfo4_t *mi = sfh->sfh_mi;
3976 (void) nfs_rw_enter_sig(&mi->mi_fh_lock, RW_READER, 0);
3980 nfs_rw_exit(&mi->mi_fh_lock);