Lines Matching refs:pool

54  *   Each master transport is registered to exactly one thread pool.
68 * A pool represents a kernel RPC service (NFS, Lock Manager, etc.).
69 * Transports related to the service are registered to the service pool.
70 * Service threads can switch between different transports in the pool.
71 * Thus, each service has its own pool of service threads. The maximum
72 * number of threads in a pool is pool->p_maxthreads. This limit allows
80 * In addition, each pool contains a doubly-linked list of transports,
82 * the pool share some other parameters such as stack size and
89 * svc_do_run(), respectively. Once the pool has been initialized,
97 * When we try to register a new pool and there is an old pool with
98 * the same id in the doubly linked pool list (this happens when we kill
99 * and restart nfsd or lockd), then we unlink the old pool from the list
102 * transports and service threads associated with the pool are gone the
103 * creator thread (see below) will clean up the pool structure and exit.
120 * If there is none we take a hint from the pool's `xprt-ready' queue.
122 * each transport in the pool's transport list. Once we find a
129 * requests on the transports registered on the pool's transports.
130 * All the pool's threads sleep on the same condition variable.
138 * this search more efficient each pool has an `xprt-ready' queue.
146 * less efficient but safe `drain' mode and walk through the pool's
156 * Each pool has a thread creator associated with it. The creator thread
165 * When the pool is in closing state (ie it has been already unregistered
166 * from the pool list) the last thread on the last transport in the pool
168 * clean up the pool structure and exit.
173 * at least pool->p_redline non-detached threads that can process incoming
259 * thread pool is limited to pool->p_maxthreads - svc_redline.
360 SVCPOOL *pool;
363 while ((pool = svc->svc_pools) != NULL) {
364 svc_pool_unregister(svc, pool);
399 svc_pool_cleanup(SVCPOOL *pool)
401 ASSERT(pool->p_threads + pool->p_detached_threads == 0);
402 ASSERT(pool->p_lcount == 0);
403 ASSERT(pool->p_closing);
407 * here so the user of the pool will be able to cleanup
410 if (pool->p_shutdown != NULL)
411 (pool->p_shutdown)();
414 svc_xprt_qdestroy(pool);
417 rw_destroy(&pool->p_lrwlock);
420 mutex_destroy(&pool->p_thread_lock);
421 mutex_destroy(&pool->p_req_lock);
422 cv_destroy(&pool->p_req_cv);
425 mutex_destroy(&pool->p_creator_lock);
426 cv_destroy(&pool->p_creator_cv);
427 mutex_destroy(&pool->p_user_lock);
428 cv_destroy(&pool->p_user_cv);
430 /* Free pool structure */
431 kmem_free(pool, sizeof (SVCPOOL));
439 svc_pool_tryexit(SVCPOOL *pool)
441 ASSERT(MUTEX_HELD(&pool->p_thread_lock));
442 ASSERT(pool->p_closing);
444 if (pool->p_threads + pool->p_detached_threads == 0) {
445 rw_enter(&pool->p_lrwlock, RW_READER);
446 if (pool->p_lcount == 0) {
450 rw_exit(&pool->p_lrwlock);
451 mutex_exit(&pool->p_thread_lock);
456 * NOTICE: No references to the pool beyond this point!
457 * The pool is being destroyed.
459 ASSERT(!MUTEX_HELD(&pool->p_thread_lock));
460 svc_creator_signalexit(pool);
464 rw_exit(&pool->p_lrwlock);
467 ASSERT(MUTEX_HELD(&pool->p_thread_lock));
472 * Find a pool with a given id.
477 SVCPOOL *pool;
482 * Search the list for a pool with a matching id
483 * and register the transport handle with that pool.
485 for (pool = svc->svc_pools; pool; pool = pool->p_next)
486 if (pool->p_id == id)
487 return (pool);
501 SVCPOOL *pool;
508 pool = svc_pool_find(svc, id);
512 if (pool == NULL)
516 * Increment counter of pool threads now
519 mutex_enter(&pool->p_thread_lock);
520 pool->p_threads++;
521 mutex_exit(&pool->p_thread_lock);
524 err = svc_run(pool);
530 * Unregister a pool from the pool list.
535 svc_pool_unregister(struct svc_globals *svc, SVCPOOL *pool)
537 SVCPOOL *next = pool->p_next;
538 SVCPOOL *prev = pool->p_prev;
543 if (pool == svc->svc_pools)
549 pool->p_next = pool->p_prev = NULL;
552 * Offline the pool. Mark the pool as closing.
553 * If there are no transports in this pool notify
556 mutex_enter(&pool->p_thread_lock);
557 if (pool->p_offline != NULL)
558 (pool->p_offline)();
559 pool->p_closing = TRUE;
560 if (svc_pool_tryexit(pool))
562 mutex_exit(&pool->p_thread_lock);
566 * Register a pool with a given id in the global doubly linked pool list.
567 * - if there is a pool with the same id in the list then unregister it
568 * - insert the new pool into the list.
571 svc_pool_register(struct svc_globals *svc, SVCPOOL *pool, int id)
576 * If there is a pool with the same id then remove it from
577 * the list and mark the pool as closing.
585 pool->p_id = id;
586 pool->p_next = svc->svc_pools;
587 pool->p_prev = NULL;
589 svc->svc_pools->p_prev = pool;
590 svc->svc_pools = pool;
596 * Initialize a newly created pool structure
599 svc_pool_init(SVCPOOL *pool, uint_t maxthreads, uint_t redline,
604 ASSERT(pool);
623 svc_xprt_qinit(pool, qsize);
626 rw_init(&pool->p_lrwlock, NULL, RW_DEFAULT, NULL);
639 pool->p_maxthreads = maxthreads;
640 pool->p_redline = redline;
641 pool->p_timeout = timeout * hz;
642 pool->p_stksize = stksize;
643 pool->p_max_same_xprt = max_same_xprt;
644 mutex_init(&pool->p_thread_lock, NULL, MUTEX_DEFAULT, NULL);
645 mutex_init(&pool->p_req_lock, NULL, MUTEX_DEFAULT, NULL);
646 cv_init(&pool->p_req_cv, NULL, CV_DEFAULT, NULL);
649 pool->p_user_exit = FALSE;
650 pool->p_signal_create_thread = FALSE;
651 pool->p_user_waiting = FALSE;
652 mutex_init(&pool->p_user_lock, NULL, MUTEX_DEFAULT, NULL);
653 cv_init(&pool->p_user_cv, NULL, CV_DEFAULT, NULL);
656 pool->p_creator_exit = FALSE;
657 mutex_init(&pool->p_creator_lock, NULL, MUTEX_DEFAULT, NULL);
658 cv_init(&pool->p_creator_cv, NULL, CV_DEFAULT, NULL);
660 (void) zthread_create(NULL, pool->p_stksize, svc_thread_creator,
661 pool, 0, minclsyspri);
672 * Create an kernel RPC server-side thread/transport pool.
674 * This is public interface for creation of a server RPC thread pool
675 * for a given service provider. Transports registered with the pool's id
676 * will be served by a pool's threads. This function is called from the
682 SVCPOOL *pool;
692 /* Allocate a new pool */
693 pool = kmem_zalloc(sizeof (SVCPOOL), KM_SLEEP);
696 * Initialize the pool structure and create a creator thread.
698 error = svc_pool_init(pool, args->maxthreads, args->redline,
702 kmem_free(pool, sizeof (SVCPOOL));
706 /* Register the pool with the global pool list */
707 svc_pool_register(svc, pool, args->id);
715 SVCPOOL *pool;
723 * Search the list for a pool with a matching id
724 * and register the transport handle with that pool.
728 if ((pool = svc_pool_find(svc, id)) == NULL) {
734 * pool list lock
736 rw_enter(&pool->p_lrwlock, RW_WRITER);
739 pool->p_shutdown = *((void (*)())arg);
741 rw_exit(&pool->p_lrwlock);
746 * Search the list for a pool with a matching id
747 * and register the unregister callback handle with that pool.
751 if ((pool = svc_pool_find(svc, id)) == NULL) {
757 * pool list lock
759 rw_enter(&pool->p_lrwlock, RW_WRITER);
762 pool->p_offline = *((void (*)())arg);
764 rw_exit(&pool->p_lrwlock);
779 * list of server transport handles (one list per pool).
789 SVCPOOL *pool;
794 * Search the list for a pool with a matching id
795 * and register the transport handle with that pool.
799 if ((pool = svc_pool_find(svc, id)) == NULL) {
804 /* Grab the transport list lock before releasing the pool list lock */
805 rw_enter(&pool->p_lrwlock, RW_WRITER);
808 /* Don't register new transports when the pool is in closing state */
809 if (pool->p_closing) {
810 rw_exit(&pool->p_lrwlock);
815 * Initialize xp_pool to point to the pool.
816 * We don't want to go through the pool list every time.
818 xprt->xp_pool = pool;
824 if (pool->p_lhead == NULL)
825 pool->p_lhead = xprt->xp_prev = xprt->xp_next = xprt;
827 next = pool->p_lhead;
828 prev = pool->p_lhead->xp_prev;
833 pool->p_lhead = prev->xp_next = next->xp_prev = xprt;
837 pool->p_lcount++;
839 rw_exit(&pool->p_lrwlock);
845 * from the pool's list of server transports (when a transport is
851 SVCPOOL *pool = xprt->xp_pool;
859 rw_enter(&pool->p_lrwlock, RW_WRITER);
862 pool->p_lhead = NULL;
870 if (pool->p_lhead == xprt)
871 pool->p_lhead = next;
877 pool->p_lcount--;
879 rw_exit(&pool->p_lrwlock);
883 svc_xprt_qdestroy(SVCPOOL *pool)
885 mutex_destroy(&pool->p_qend_lock);
886 kmem_free(pool->p_qbody, pool->p_qsize * sizeof (__SVCXPRT_QNODE));
890 * Initialize an `xprt-ready' queue for a given pool.
893 svc_xprt_qinit(SVCPOOL *pool, size_t qsize)
897 pool->p_qsize = qsize;
898 pool->p_qbody = kmem_zalloc(pool->p_qsize * sizeof (__SVCXPRT_QNODE),
901 for (i = 0; i < pool->p_qsize - 1; i++)
902 pool->p_qbody[i].q_next = &(pool->p_qbody[i+1]);
904 pool->p_qbody[pool->p_qsize-1].q_next = &(pool->p_qbody[0]);
905 pool->p_qtop = &(pool->p_qbody[0]);
906 pool->p_qend = &(pool->p_qbody[0]);
908 mutex_init(&pool->p_qend_lock, NULL, MUTEX_DEFAULT, NULL);
917 * NOTICE: pool->p_qtop is protected by the pool's request lock
921 svc_xprt_qput(SVCPOOL *pool, SVCMASTERXPRT *xprt)
923 ASSERT(MUTEX_HELD(&pool->p_req_lock));
926 if (pool->p_qoverflow)
930 if (pool->p_qtop->q_next == pool->p_qend) {
931 mutex_enter(&pool->p_qend_lock);
932 if (pool->p_qtop->q_next == pool->p_qend) {
933 pool->p_qoverflow = TRUE;
934 mutex_exit(&pool->p_qend_lock);
937 mutex_exit(&pool->p_qend_lock);
940 /* Insert a hint and move pool->p_qtop */
941 pool->p_qtop->q_xprt = xprt;
942 pool->p_qtop = pool->p_qtop->q_next;
950 * Since we do not acquire the pool's request lock while checking if
953 * count indicates that there are pending requests for this pool.
956 svc_xprt_qget(SVCPOOL *pool)
960 mutex_enter(&pool->p_qend_lock);
964 * Since we do not acquire the pool's request lock which
965 * protects pool->p_qtop this is not exact check. However,
969 if (pool->p_qend == pool->p_qtop) {
970 mutex_exit(&pool->p_qend_lock);
974 /* Get a hint and move pool->p_qend */
975 xprt = pool->p_qend->q_xprt;
976 pool->p_qend = pool->p_qend->q_next;
980 mutex_exit(&pool->p_qend_lock);
991 svc_xprt_qdelete(SVCPOOL *pool, SVCMASTERXPRT *xprt)
995 mutex_enter(&pool->p_req_lock);
996 for (q = pool->p_qend; q != pool->p_qtop; q = q->q_next) {
1000 mutex_exit(&pool->p_req_lock);
1011 * b) remove a reference to this transport from the pool's transport list
1037 /* Unregister xprt from the pool's transport list */
1584 svc_thread_exit(SVCPOOL *pool, SVCXPRT *clone_xprt)
1590 mutex_enter(&pool->p_thread_lock);
1591 pool->p_threads--;
1592 if (pool->p_closing && svc_pool_tryexit(pool))
1595 mutex_exit(&pool->p_thread_lock);
1602 * - decrement the `detached thread' count for the pool
1611 svc_thread_exitdetached(SVCPOOL *pool, SVCXPRT *clone_xprt)
1616 ASSERT(!MUTEX_HELD(&pool->p_thread_lock));
1621 mutex_enter(&pool->p_thread_lock);
1623 ASSERT(pool->p_reserved_threads >= 0);
1624 ASSERT(pool->p_detached_threads > 0);
1626 pool->p_detached_threads--;
1627 if (pool->p_closing && svc_pool_tryexit(pool))
1630 mutex_exit(&pool->p_thread_lock);
1644 SVCPOOL *pool;
1650 pool = svc_pool_find(svc, id);
1653 if (pool == NULL)
1656 mutex_enter(&pool->p_user_lock);
1658 /* Check if there's already a user thread waiting on this pool */
1659 if (pool->p_user_waiting) {
1660 mutex_exit(&pool->p_user_lock);
1664 pool->p_user_waiting = TRUE;
1667 while (!pool->p_signal_create_thread && !pool->p_user_exit) {
1668 if (cv_wait_sig(&pool->p_user_cv, &pool->p_user_lock) == 0) {
1670 pool->p_user_waiting = FALSE;
1671 pool->p_signal_create_thread = FALSE;
1672 mutex_exit(&pool->p_user_lock);
1678 * pool at this time.
1681 svc_pool_unregister(svc, pool);
1688 pool->p_signal_create_thread = FALSE;
1689 pool->p_user_waiting = FALSE;
1692 * About to exit the service pool. Set return value
1695 * pool structure.
1697 if (pool->p_user_exit) {
1699 cv_signal(&pool->p_user_cv);
1702 mutex_exit(&pool->p_user_lock);
1713 svc_thread_creator(SVCPOOL *pool)
1717 CALLB_CPR_INIT(&cpr_info, &pool->p_creator_lock, callb_generic_cpr,
1721 mutex_enter(&pool->p_creator_lock);
1724 if (pool->p_creator_exit)
1728 pool->p_creator_signaled = FALSE;
1731 cv_wait(&pool->p_creator_cv, &pool->p_creator_lock);
1732 CALLB_CPR_SAFE_END(&cpr_info, &pool->p_creator_lock);
1735 if (pool->p_creator_exit)
1738 mutex_exit(&pool->p_creator_lock);
1740 mutex_enter(&pool->p_thread_lock);
1743 * When the pool is in closing state and all the transports
1746 if (pool->p_closing) {
1747 rw_enter(&pool->p_lrwlock, RW_READER);
1748 if (pool->p_lcount == 0) {
1749 rw_exit(&pool->p_lrwlock);
1750 mutex_exit(&pool->p_thread_lock);
1753 rw_exit(&pool->p_lrwlock);
1759 ASSERT(pool->p_reserved_threads >= 0);
1760 ASSERT(pool->p_detached_threads >= 0);
1762 if (pool->p_threads + pool->p_detached_threads <
1763 pool->p_maxthreads) {
1765 * Signal the service pool wait thread
1768 mutex_enter(&pool->p_user_lock);
1769 if (pool->p_signal_create_thread == FALSE) {
1770 pool->p_signal_create_thread = TRUE;
1771 cv_signal(&pool->p_user_cv);
1773 mutex_exit(&pool->p_user_lock);
1777 mutex_exit(&pool->p_thread_lock);
1785 mutex_enter(&pool->p_user_lock);
1786 pool->p_user_exit = TRUE;
1787 cv_broadcast(&pool->p_user_cv);
1788 mutex_exit(&pool->p_user_lock);
1790 /* Wait for svc_wait() to be done with the pool */
1791 mutex_enter(&pool->p_user_lock);
1792 while (pool->p_user_waiting) {
1794 cv_wait(&pool->p_user_cv, &pool->p_user_lock);
1795 CALLB_CPR_SAFE_END(&cpr_info, &pool->p_creator_lock);
1797 mutex_exit(&pool->p_user_lock);
1800 svc_pool_cleanup(pool);
1809 svc_creator_signal(SVCPOOL *pool)
1811 mutex_enter(&pool->p_creator_lock);
1812 if (pool->p_creator_signaled == FALSE) {
1813 pool->p_creator_signaled = TRUE;
1814 cv_signal(&pool->p_creator_cv);
1816 mutex_exit(&pool->p_creator_lock);
1823 svc_creator_signalexit(SVCPOOL *pool)
1825 mutex_enter(&pool->p_creator_lock);
1826 pool->p_creator_exit = TRUE;
1827 cv_signal(&pool->p_creator_cv);
1828 mutex_exit(&pool->p_creator_lock);
1842 svc_poll(SVCPOOL *pool, SVCMASTERXPRT *xprt, SVCXPRT *clone_xprt)
1859 * the pool and return to svc_run().
1867 * pool->p_max_same_xprt requests from the same transport
1871 if (xprt && xprt->xp_req_head && (!pool->p_qoverflow ||
1872 clone_xprt->xp_same_xprt++ < pool->p_max_same_xprt)) {
1885 mutex_enter(&pool->p_req_lock);
1886 pool->p_walkers++;
1887 mutex_exit(&pool->p_req_lock);
1893 rw_enter(&pool->p_lrwlock, RW_READER);
1906 * the pool and `walking-threads' counts, and return
1909 hint = svc_xprt_qget(pool);
1914 rw_exit(&pool->p_lrwlock);
1916 mutex_enter(&pool->p_req_lock);
1917 pool->p_walkers--;
1918 mutex_exit(&pool->p_req_lock);
1934 if (pool->p_reqs < pool->p_walkers) {
1935 mutex_enter(&pool->p_req_lock);
1936 if (pool->p_reqs < pool->p_walkers)
1938 mutex_exit(&pool->p_req_lock);
1940 if (pool->p_qoverflow) {
1949 * pool's transport list and search for a transport with a
1957 if (xprt == NULL && pool->p_lhead == NULL) {
1958 mutex_enter(&pool->p_req_lock);
1963 * `Walk' through the pool's list of master server
1967 next = xprt ? xprt->xp_next : pool->p_lhead;
1984 rw_exit(&pool->p_lrwlock);
1986 mutex_enter(&pool->p_req_lock);
1987 pool->p_walkers--;
1988 mutex_exit(&pool->p_req_lock);
1996 * Continue to `walk' through the pool's
2001 if (pool->p_reqs < pool->p_walkers) {
2003 mutex_enter(&pool->p_req_lock);
2004 if (pool->p_reqs < pool->p_walkers)
2006 mutex_exit(&pool->p_req_lock);
2015 * Decrement the `walking-threads' count for the pool.
2017 pool->p_walkers--;
2018 rw_exit(&pool->p_lrwlock);
2024 pool->p_asleep++;
2025 timeleft = cv_reltimedwait_sig(&pool->p_req_cv,
2026 &pool->p_req_lock, pool->p_timeout, TR_CLOCK_TICK);
2037 if (pool->p_drowsy) {
2038 pool->p_drowsy = FALSE;
2047 pool->p_asleep--;
2049 mutex_exit(&pool->p_req_lock);
2104 * The max number of threads working on the pool is roughly pool->p_maxthreads.
2105 * Every thread could handle up to pool->p_max_same_xprt requests from one
2107 * In case all threads in the pool are working on a transport they will handle
2108 * no more than enough_reqs (pool->p_maxthreads * pool->p_max_same_xprt)
2119 * We want to prevent a particular pool exhausting the memory, so once the
2120 * total length of queued requests for the whole pool reaches the high
2125 * should be enough. We should also consider that up to pool->p_maxthreads
2126 * threads for the pool might work on large requests (this is not counted for
2147 * particular pool might grow up to 2 * the high watermark.
2153 * or once the total memory consumption for the whole pool falls below the low
2160 SVCPOOL *pool = xprt->xp_pool;
2162 int enough_reqs = pool->p_maxthreads * pool->p_max_same_xprt;
2179 * If this pool uses over 20% of memory and this transport is
2182 if (pool->p_size >= totalmem / 5 &&
2183 xprt->xp_size >= totalmem / 5 / pool->p_lcount)
2196 * If this pool still uses over 16% of memory and this transport is
2199 if (pool->p_size >= totalmem / 6 &&
2200 xprt->xp_size >= totalmem / 5 / pool->p_lcount / 2)
2218 svc_run(SVCPOOL *pool)
2245 svc_thread_exit(pool, clone_xprt);
2250 next = svc_poll(pool, xprt, clone_xprt);
2273 svc_thread_exit(pool, clone_xprt);
2282 svc_thread_exit(pool, clone_xprt);
2295 mutex_enter(&pool->p_req_lock);
2296 pool->p_reqs--;
2297 if (pool->p_reqs == 0)
2298 pool->p_qoverflow = FALSE;
2299 pool->p_size -= size;
2300 mutex_exit(&pool->p_req_lock);
2309 "rpc_que_req_deq:pool %p mp %p", pool, mp);
2334 if (!(pool->p_drowsy || pool->p_reqs <= pool->p_walkers ||
2335 pool->p_asleep == 0)) {
2336 mutex_enter(&pool->p_req_lock);
2338 if (pool->p_drowsy || pool->p_reqs <= pool->p_walkers ||
2339 pool->p_asleep == 0)
2340 mutex_exit(&pool->p_req_lock);
2342 pool->p_asleep--;
2343 pool->p_drowsy = TRUE;
2345 cv_signal(&pool->p_req_cv);
2346 mutex_exit(&pool->p_req_lock);
2352 * still below pool->p_maxthreads limit, and no thread is
2360 if (pool->p_asleep == 0 && !pool->p_drowsy &&
2361 pool->p_threads + pool->p_detached_threads <
2362 pool->p_maxthreads)
2363 svc_creator_signal(pool);
2379 svc_thread_exitdetached(pool, clone_xprt);
2406 SVCPOOL *pool;
2412 pool = xprt->xp_pool;
2420 mutex_enter(&pool->p_req_lock);
2421 pool->p_reqs -= xprt->xp_reqs;
2422 pool->p_size -= xprt->xp_size;
2423 mutex_exit(&pool->p_req_lock);
2467 SVCPOOL *pool = xprt->xp_pool;
2475 mutex_enter(&pool->p_thread_lock);
2478 * If the pool is in closing state and this was
2479 * the last transport in the pool then signal the creator
2482 if (pool->p_closing && svc_pool_tryexit(pool)) {
2485 mutex_exit(&pool->p_thread_lock);
2512 * - increment the `pending-requests' count for the pool
2521 SVCPOOL *pool = xprt->xp_pool;
2532 * pool's request lock so that when we put
2545 mutex_enter(&pool->p_req_lock);
2560 svc_xprt_qput(pool, xprt);
2563 pool->p_reqs++;
2568 pool->p_size += size;
2575 "rpc_que_req_enq:pool %p mp %p", pool, mp);
2585 if (pool->p_drowsy || pool->p_reqs <= pool->p_walkers ||
2586 pool->p_asleep == 0) {
2587 mutex_exit(&pool->p_req_lock);
2589 pool->p_drowsy = TRUE;
2590 pool->p_asleep--;
2595 cv_signal(&pool->p_req_cv);
2596 mutex_exit(&pool->p_req_lock);
2603 * still below pool->p_maxthreads limit, and no thread is
2611 if (pool->p_asleep == 0 && !pool->p_drowsy &&
2612 pool->p_threads + pool->p_detached_threads < pool->p_maxthreads)
2613 svc_creator_signal(pool);
2625 * pool->p_maxthreads - pool->p_redline (i.e. that we can have
2626 * up to pool->p_redline non-detached threads).
2632 * - if so, then increment the `reserved threads' count for the pool
2640 SVCPOOL *pool = clone_xprt->xp_master->xp_pool;
2646 /* Check pool counts if there is room for reservation */
2647 mutex_enter(&pool->p_thread_lock);
2648 if (pool->p_reserved_threads + pool->p_detached_threads >=
2649 pool->p_maxthreads - pool->p_redline) {
2650 mutex_exit(&pool->p_thread_lock);
2653 pool->p_reserved_threads++;
2654 mutex_exit(&pool->p_thread_lock);
2664 * - decrement the `reserved threads' count for the pool
2670 SVCPOOL *pool = clone_xprt->xp_master->xp_pool;
2677 mutex_enter(&pool->p_thread_lock);
2678 pool->p_reserved_threads--;
2679 mutex_exit(&pool->p_thread_lock);
2694 * counts and increment the `detached threads' count for the pool
2708 SVCPOOL *pool = xprt->xp_pool;
2721 /* Bookkeeping for the pool */
2722 mutex_enter(&pool->p_thread_lock);
2723 pool->p_threads--;
2724 pool->p_reserved_threads--;
2725 pool->p_detached_threads++;
2726 mutex_exit(&pool->p_thread_lock);
2747 * active in a given registered or unregistered kRPC thread pool. Its shuts
2748 * all active rdma transports in that pool. If the thread active on the trasport
2749 * happens to be last thread for that pool, it will signal the creater thread
2750 * to cleanup the pool and destroy the xprt in svc_queueclose()
2760 SVCPOOL *pool;
2777 pool = xprt->xp_pool;
2789 mutex_enter(&pool->p_req_lock);
2790 pool->p_reqs -= xprt->xp_reqs;
2791 pool->p_size -= xprt->xp_size;
2792 mutex_exit(&pool->p_req_lock);