Lines Matching refs:cp

1223 	kmem_cache_t *cp;
1226 for (cp = list_head(&kmem_caches); cp != NULL;
1227 cp = list_next(&kmem_caches, cp))
1229 (void) taskq_dispatch(tq, (task_func_t *)func, cp,
1232 func(cp);
1239 kmem_cache_t *cp;
1242 for (cp = list_head(&kmem_caches); cp != NULL;
1243 cp = list_next(&kmem_caches, cp)) {
1244 if (!(cp->cache_cflags & KMC_IDENTIFIER))
1247 (void) taskq_dispatch(tq, (task_func_t *)func, cp,
1250 func(cp);
1259 kmem_findslab(kmem_cache_t *cp, void *buf)
1263 mutex_enter(&cp->cache_lock);
1264 for (sp = list_head(&cp->cache_complete_slabs); sp != NULL;
1265 sp = list_next(&cp->cache_complete_slabs, sp)) {
1267 mutex_exit(&cp->cache_lock);
1271 for (sp = avl_first(&cp->cache_partial_slabs); sp != NULL;
1272 sp = AVL_NEXT(&cp->cache_partial_slabs, sp)) {
1274 mutex_exit(&cp->cache_lock);
1278 mutex_exit(&cp->cache_lock);
1288 kmem_cache_t *cp = cparg;
1297 sp = kmem_findslab(cp, buf);
1299 for (cp = list_tail(&kmem_caches); cp != NULL;
1300 cp = list_prev(&kmem_caches, cp)) {
1301 if ((sp = kmem_findslab(cp, buf)) != NULL)
1307 cp = NULL;
1310 if (cp != cparg)
1314 (uintptr_t)sp->slab_base) % cp->cache_chunksize;
1317 if (cp->cache_flags & KMF_BUFTAG)
1318 btp = KMEM_BUFTAG(cp, buf);
1319 if (cp->cache_flags & KMF_HASH) {
1320 mutex_enter(&cp->cache_lock);
1321 for (bcp = *KMEM_HASH(cp, buf); bcp; bcp = bcp->bc_next)
1324 mutex_exit(&cp->cache_lock);
1327 if (kmem_findslab(cp->cache_bufctl_cache, bcp) ==
1340 kmem_panic_info.kmp_realcache = cp;
1350 off = verify_pattern(KMEM_FREE_PATTERN, buf, cp->cache_verify);
1384 printf("buffer was allocated from %s,\n", cp->cache_name);
1403 if (bcp != NULL && (cp->cache_flags & KMF_AUDIT) &&
1413 (void *)sp, cp->cache_name);
1501 #define KMEM_AUDIT(lp, cp, bcp) \
1511 kmem_log_event(kmem_log_header_t *lp, kmem_cache_t *cp,
1519 bca.bc_cache = cp;
1520 KMEM_AUDIT(lp, cp, &bca);
1524 * Create a new slab for cache cp.
1527 kmem_slab_create(kmem_cache_t *cp, int kmflag)
1529 size_t slabsize = cp->cache_slabsize;
1530 size_t chunksize = cp->cache_chunksize;
1531 int cache_flags = cp->cache_flags;
1536 vmem_t *vmp = cp->cache_arena;
1538 ASSERT(MUTEX_NOT_HELD(&cp->cache_lock));
1540 color = cp->cache_color + cp->cache_align;
1541 if (color > cp->cache_maxcolor)
1542 color = cp->cache_mincolor;
1543 cp->cache_color = color;
1558 ASSERT((cp->cache_move == NULL) || !(cp->cache_cflags & KMC_NOTOUCH));
1559 if (!(cp->cache_cflags & KMC_NOTOUCH))
1567 sp = KMEM_SLAB(cp, slab);
1571 sp->slab_cache = cp;
1583 bcp = kmem_cache_alloc(cp->cache_bufctl_cache, kmflag);
1590 bcap->bc_cache = cp;
1595 bcp = KMEM_BUFCTL(cp, buf);
1598 kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
1604 cp->cache_verify);
1612 kmem_log_event(kmem_slab_log, cp, sp, slab);
1620 kmem_cache_free(cp->cache_bufctl_cache, bcp);
1630 kmem_log_event(kmem_failure_log, cp, NULL, NULL);
1631 atomic_inc_64(&cp->cache_alloc_fail);
1640 kmem_slab_destroy(kmem_cache_t *cp, kmem_slab_t *sp)
1642 vmem_t *vmp = cp->cache_arena;
1645 ASSERT(MUTEX_NOT_HELD(&cp->cache_lock));
1648 if (cp->cache_flags & KMF_HASH) {
1652 kmem_cache_free(cp->cache_bufctl_cache, bcp);
1656 vmem_free(vmp, slab, cp->cache_slabsize);
1660 kmem_slab_alloc_impl(kmem_cache_t *cp, kmem_slab_t *sp, boolean_t prefill)
1666 ASSERT(MUTEX_HELD(&cp->cache_lock));
1669 * can't ASSERT(avl_is_empty(&cp->cache_partial_slabs)) here when the
1673 (sp == avl_first(&cp->cache_partial_slabs))));
1674 ASSERT(sp->slab_cache == cp);
1676 cp->cache_slab_alloc++;
1677 cp->cache_bufslab--;
1683 if (cp->cache_flags & KMF_HASH) {
1688 hash_bucket = KMEM_HASH(cp, buf);
1691 if ((cp->cache_flags & (KMF_AUDIT | KMF_BUFTAG)) == KMF_AUDIT) {
1692 KMEM_AUDIT(kmem_transaction_log, cp, bcp);
1695 buf = KMEM_BUF(cp, bcp);
1706 avl_remove(&cp->cache_partial_slabs, sp);
1711 list_insert_head(&cp->cache_complete_slabs, sp);
1712 cp->cache_complete_slab_count++;
1722 if (new_slab && prefill && (cp->cache_flags & KMF_PREFILL) &&
1723 (KMEM_CPU_CACHE(cp)->cc_magsize != 0)) {
1724 kmem_slab_prefill(cp, sp);
1729 avl_add(&cp->cache_partial_slabs, sp);
1737 ASSERT(!avl_update(&cp->cache_partial_slabs, sp));
1742 * Allocate a raw (unconstructed) buffer from cp's slab layer.
1745 kmem_slab_alloc(kmem_cache_t *cp, int kmflag)
1751 mutex_enter(&cp->cache_lock);
1752 test_destructor = (cp->cache_slab_alloc == 0);
1753 sp = avl_first(&cp->cache_partial_slabs);
1755 ASSERT(cp->cache_bufslab == 0);
1760 mutex_exit(&cp->cache_lock);
1761 if ((sp = kmem_slab_create(cp, kmflag)) == NULL) {
1764 mutex_enter(&cp->cache_lock);
1765 cp->cache_slab_create++;
1766 if ((cp->cache_buftotal += sp->slab_chunks) > cp->cache_bufmax)
1767 cp->cache_bufmax = cp->cache_buftotal;
1768 cp->cache_bufslab += sp->slab_chunks;
1771 buf = kmem_slab_alloc_impl(cp, sp, B_TRUE);
1772 ASSERT((cp->cache_slab_create - cp->cache_slab_destroy) ==
1773 (cp->cache_complete_slab_count +
1774 avl_numnodes(&cp->cache_partial_slabs) +
1775 (cp->cache_defrag == NULL ? 0 : cp->cache_defrag->kmd_deadcount)));
1776 mutex_exit(&cp->cache_lock);
1778 if (test_destructor && cp->cache_destructor != NULL) {
1784 if ((cp->cache_constructor == NULL) ||
1785 cp->cache_constructor(buf, cp->cache_private,
1787 cp->cache_destructor(buf, cp->cache_private);
1790 cp->cache_bufsize);
1791 if (cp->cache_flags & KMF_DEADBEEF) {
1792 copy_pattern(KMEM_FREE_PATTERN, buf, cp->cache_verify);
1802 * Free a raw (unconstructed) buffer to cp's slab layer.
1805 kmem_slab_free(kmem_cache_t *cp, void *buf)
1812 mutex_enter(&cp->cache_lock);
1813 cp->cache_slab_free++;
1815 if (cp->cache_flags & KMF_HASH) {
1819 prev_bcpp = KMEM_HASH(cp, buf);
1826 cp->cache_lookup_depth++;
1830 bcp = KMEM_BUFCTL(cp, buf);
1831 sp = KMEM_SLAB(cp, buf);
1834 if (bcp == NULL || sp->slab_cache != cp || !KMEM_SLAB_MEMBER(sp, buf)) {
1835 mutex_exit(&cp->cache_lock);
1836 kmem_error(KMERR_BADADDR, cp, buf);
1848 kmem_slab_move_yes(cp, sp, buf);
1851 if ((cp->cache_flags & (KMF_AUDIT | KMF_BUFTAG)) == KMF_AUDIT) {
1852 if (cp->cache_flags & KMF_CONTENTS)
1855 cp->cache_contents);
1856 KMEM_AUDIT(kmem_transaction_log, cp, bcp);
1862 cp->cache_bufslab++;
1871 list_remove(&cp->cache_complete_slabs, sp);
1872 cp->cache_complete_slab_count--;
1874 avl_remove(&cp->cache_partial_slabs, sp);
1877 cp->cache_buftotal -= sp->slab_chunks;
1878 cp->cache_bufslab -= sp->slab_chunks;
1890 if (cp->cache_defrag == NULL ||
1891 (avl_is_empty(&cp->cache_defrag->kmd_moves_pending) &&
1893 cp->cache_slab_destroy++;
1894 mutex_exit(&cp->cache_lock);
1895 kmem_slab_destroy(cp, sp);
1897 list_t *deadlist = &cp->cache_defrag->kmd_deadlist;
1911 cp->cache_defrag->kmd_deadcount++;
1912 mutex_exit(&cp->cache_lock);
1921 list_remove(&cp->cache_complete_slabs, sp);
1922 cp->cache_complete_slab_count--;
1923 avl_add(&cp->cache_partial_slabs, sp);
1926 if (avl_update_gt(&cp->cache_partial_slabs, sp)) {
1932 (void) avl_update_gt(&cp->cache_partial_slabs, sp);
1936 ASSERT((cp->cache_slab_create - cp->cache_slab_destroy) ==
1937 (cp->cache_complete_slab_count +
1938 avl_numnodes(&cp->cache_partial_slabs) +
1939 (cp->cache_defrag == NULL ? 0 : cp->cache_defrag->kmd_deadcount)));
1940 mutex_exit(&cp->cache_lock);
1947 kmem_cache_alloc_debug(kmem_cache_t *cp, void *buf, int kmflag, int construct,
1950 kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
1955 kmem_error(KMERR_BADBUFTAG, cp, buf);
1961 if ((cp->cache_flags & KMF_HASH) && bcp->bc_addr != buf) {
1962 kmem_error(KMERR_BADBUFCTL, cp, buf);
1966 if (cp->cache_flags & KMF_DEADBEEF) {
1967 if (!construct && (cp->cache_flags & KMF_LITE)) {
1969 kmem_error(KMERR_MODIFIED, cp, buf);
1972 if (cp->cache_constructor != NULL)
1980 cp->cache_verify)) {
1981 kmem_error(KMERR_MODIFIED, cp, buf);
1988 if ((mtbf = kmem_mtbf | cp->cache_mtbf) != 0 &&
1991 kmem_log_event(kmem_failure_log, cp, NULL, NULL);
1992 if (!construct && cp->cache_destructor != NULL)
1993 cp->cache_destructor(buf, cp->cache_private);
1998 if (mtbf || (construct && cp->cache_constructor != NULL &&
1999 cp->cache_constructor(buf, cp->cache_private, kmflag) != 0)) {
2000 atomic_inc_64(&cp->cache_alloc_fail);
2002 if (cp->cache_flags & KMF_DEADBEEF)
2003 copy_pattern(KMEM_FREE_PATTERN, buf, cp->cache_verify);
2004 kmem_slab_free(cp, buf);
2008 if (cp->cache_flags & KMF_AUDIT) {
2009 KMEM_AUDIT(kmem_transaction_log, cp, bcp);
2012 if ((cp->cache_flags & KMF_LITE) &&
2013 !(cp->cache_cflags & KMC_KMEM_ALLOC)) {
2021 kmem_cache_free_debug(kmem_cache_t *cp, void *buf, caddr_t caller)
2023 kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
2029 kmem_error(KMERR_DUPFREE, cp, buf);
2032 sp = kmem_findslab(cp, buf);
2033 if (sp == NULL || sp->slab_cache != cp)
2034 kmem_error(KMERR_BADADDR, cp, buf);
2036 kmem_error(KMERR_REDZONE, cp, buf);
2042 if ((cp->cache_flags & KMF_HASH) && bcp->bc_addr != buf) {
2043 kmem_error(KMERR_BADBUFCTL, cp, buf);
2048 kmem_error(KMERR_REDZONE, cp, buf);
2052 if (cp->cache_flags & KMF_AUDIT) {
2053 if (cp->cache_flags & KMF_CONTENTS)
2055 buf, cp->cache_contents);
2056 KMEM_AUDIT(kmem_transaction_log, cp, bcp);
2059 if ((cp->cache_flags & KMF_LITE) &&
2060 !(cp->cache_cflags & KMC_KMEM_ALLOC)) {
2064 if (cp->cache_flags & KMF_DEADBEEF) {
2065 if (cp->cache_flags & KMF_LITE)
2067 else if (cp->cache_destructor != NULL)
2068 cp->cache_destructor(buf, cp->cache_private);
2070 copy_pattern(KMEM_FREE_PATTERN, buf, cp->cache_verify);
2077 * Free each object in magazine mp to cp's slab layer, and free mp itself.
2080 kmem_magazine_destroy(kmem_cache_t *cp, kmem_magazine_t *mp, int nrounds)
2084 ASSERT(!list_link_active(&cp->cache_link) ||
2090 if (cp->cache_flags & KMF_DEADBEEF) {
2092 cp->cache_verify) != NULL) {
2093 kmem_error(KMERR_MODIFIED, cp, buf);
2096 if ((cp->cache_flags & KMF_LITE) &&
2097 cp->cache_destructor != NULL) {
2098 kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
2100 cp->cache_destructor(buf, cp->cache_private);
2103 } else if (cp->cache_destructor != NULL) {
2104 cp->cache_destructor(buf, cp->cache_private);
2107 kmem_slab_free(cp, buf);
2109 ASSERT(KMEM_MAGAZINE_VALID(cp, mp));
2110 kmem_cache_free(cp->cache_magtype->mt_cache, mp);
2117 kmem_depot_alloc(kmem_cache_t *cp, kmem_maglist_t *mlp)
2127 if (!mutex_tryenter(&cp->cache_depot_lock)) {
2128 mutex_enter(&cp->cache_depot_lock);
2129 cp->cache_depot_contention++;
2133 ASSERT(KMEM_MAGAZINE_VALID(cp, mp));
2140 mutex_exit(&cp->cache_depot_lock);
2149 kmem_depot_free(kmem_cache_t *cp, kmem_maglist_t *mlp, kmem_magazine_t *mp)
2151 mutex_enter(&cp->cache_depot_lock);
2152 ASSERT(KMEM_MAGAZINE_VALID(cp, mp));
2156 mutex_exit(&cp->cache_depot_lock);
2160 * Update the working set statistics for cp's depot.
2163 kmem_depot_ws_update(kmem_cache_t *cp)
2165 mutex_enter(&cp->cache_depot_lock);
2166 cp->cache_full.ml_reaplimit = cp->cache_full.ml_min;
2167 cp->cache_full.ml_min = cp->cache_full.ml_total;
2168 cp->cache_empty.ml_reaplimit = cp->cache_empty.ml_min;
2169 cp->cache_empty.ml_min = cp->cache_empty.ml_total;
2170 mutex_exit(&cp->cache_depot_lock);
2174 * Set the working set statistics for cp's depot to zero. (Everything is
2178 kmem_depot_ws_zero(kmem_cache_t *cp)
2180 mutex_enter(&cp->cache_depot_lock);
2181 cp->cache_full.ml_reaplimit = cp->cache_full.ml_total;
2182 cp->cache_full.ml_min = cp->cache_full.ml_total;
2183 cp->cache_empty.ml_reaplimit = cp->cache_empty.ml_total;
2184 cp->cache_empty.ml_min = cp->cache_empty.ml_total;
2185 mutex_exit(&cp->cache_depot_lock);
2199 kmem_depot_ws_reap(kmem_cache_t *cp)
2205 ASSERT(!list_link_active(&cp->cache_link) ||
2208 reap = MIN(cp->cache_full.ml_reaplimit, cp->cache_full.ml_min);
2210 (mp = kmem_depot_alloc(cp, &cp->cache_full)) != NULL) {
2211 kmem_magazine_destroy(cp, mp, cp->cache_magtype->mt_magsize);
2212 bytes += cp->cache_magtype->mt_magsize * cp->cache_bufsize;
2219 reap = MIN(cp->cache_empty.ml_reaplimit, cp->cache_empty.ml_min);
2221 (mp = kmem_depot_alloc(cp, &cp->cache_empty)) != NULL) {
2222 kmem_magazine_destroy(cp, mp, 0);
2223 bytes += cp->cache_magtype->mt_magsize * cp->cache_bufsize;
2262 #define KMEM_DUMPCTL(cp, buf) \
2263 ((kmem_dumpctl_t *)P2ROUNDUP((uintptr_t)(buf) + (cp)->cache_bufsize, \
2281 #define KDI_LOG(cp, stat) { \
2283 if ((kdl = (kmem_dump_log_t *)((cp)->cache_dumplog)) != NULL) { \
2288 kdl->kdl_cache = (cp); \
2289 (cp)->cache_dumplog = kdl; \
2353 kmem_cache_t *cp;
2355 for (cp = list_head(&kmem_caches); cp != NULL;
2356 cp = list_next(&kmem_caches, cp)) {
2357 kmem_cpu_cache_t *ccp = KMEM_CPU_CACHE(cp);
2359 if (cp->cache_arena->vm_cflags & VMC_DUMPSAFE) {
2360 cp->cache_flags |= KMF_DUMPDIVERT;
2366 cp->cache_flags |= KMF_DUMPUNSAFE;
2387 kmem_cache_t *cp;
2408 cp = kdl->kdl_cache;
2409 if (cp == NULL)
2420 cp->cache_name, kdl->kdl_allocs, kdl->kdl_frees,
2435 kmem_cache_alloc_dump(kmem_cache_t *cp, int kmflag)
2442 if ((buf = cp->cache_dumpfreelist) != NULL) {
2443 cp->cache_dumpfreelist = KMEM_DUMPCTL(cp, buf)->kdc_next;
2444 KDI_LOG(cp, kdl_allocs);
2450 buf = (void *)P2ROUNDUP((uintptr_t)curr, cp->cache_align);
2451 bufend = (char *)KMEM_DUMPCTL(cp, buf) + sizeof (kmem_dumpctl_t);
2454 if (cp->cache_align < PAGESIZE) {
2465 KDI_LOG(cp, kdl_alloc_fails);
2476 if (cp->cache_constructor != NULL &&
2477 cp->cache_constructor(buf, cp->cache_private, kmflag)
2481 cp->cache_name, (void *)cp);
2488 KDI_LOG(cp, kdl_alloc_fails);
2492 KDI_LOG(cp, kdl_allocs);
2500 kmem_cache_free_dump(kmem_cache_t *cp, void *buf)
2505 KMEM_DUMPCTL(cp, buf)->kdc_next = cp->cache_dumpfreelist;
2506 cp->cache_dumpfreelist = buf;
2507 KDI_LOG(cp, kdl_frees);
2512 KDI_LOG(cp, kdl_free_nondump);
2523 * Allocate a constructed object from cache cp.
2526 kmem_cache_alloc(kmem_cache_t *cp, int kmflag)
2528 kmem_cpu_cache_t *ccp = KMEM_CPU_CACHE(cp);
2546 KDI_LOG(cp, kdl_unsafe);
2549 kmem_cache_alloc_debug(cp, buf, kmflag, 0,
2577 KDI_LOG(cp, kdl_unsafe);
2579 if ((buf = kmem_cache_alloc_dump(cp, kmflag)) !=
2597 fmp = kmem_depot_alloc(cp, &cp->cache_full);
2600 kmem_depot_free(cp, &cp->cache_empty,
2618 buf = kmem_slab_alloc(cp, kmflag);
2623 if (cp->cache_flags & KMF_BUFTAG) {
2627 int rc = kmem_cache_alloc_debug(cp, buf, kmflag, 1, caller());
2638 return (kmem_cache_alloc(cp, kmflag));
2643 if (cp->cache_constructor != NULL &&
2644 cp->cache_constructor(buf, cp->cache_private, kmflag) != 0) {
2645 atomic_inc_64(&cp->cache_alloc_fail);
2646 kmem_slab_free(cp, buf);
2660 kmem_slab_free_constructed(kmem_cache_t *cp, void *buf, boolean_t freed)
2662 if (!freed && (cp->cache_flags & KMF_BUFTAG))
2663 if (kmem_cache_free_debug(cp, buf, caller()) == -1)
2670 if ((cp->cache_flags & (KMF_DEADBEEF | KMF_LITE)) != KMF_DEADBEEF &&
2671 cp->cache_destructor != NULL) {
2672 if (cp->cache_flags & KMF_DEADBEEF) { /* KMF_LITE implied */
2673 kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
2675 cp->cache_destructor(buf, cp->cache_private);
2678 cp->cache_destructor(buf, cp->cache_private);
2682 kmem_slab_free(cp, buf);
2693 kmem_cpucache_magazine_alloc(kmem_cpu_cache_t *ccp, kmem_cache_t *cp)
2704 emp = kmem_depot_alloc(cp, &cp->cache_empty);
2707 kmem_depot_free(cp, &cp->cache_full,
2718 mtp = cp->cache_magtype;
2741 kmem_depot_free(cp, &cp->cache_empty, emp);
2753 * Free a constructed object to cache cp.
2756 kmem_cache_free(kmem_cache_t *cp, void *buf)
2758 kmem_cpu_cache_t *ccp = KMEM_CPU_CACHE(cp);
2764 ASSERT(cp->cache_defrag == NULL ||
2765 cp->cache_defrag->kmd_thread != curthread ||
2766 (buf != cp->cache_defrag->kmd_from_buf &&
2767 buf != cp->cache_defrag->kmd_to_buf));
2773 KDI_LOG(cp, kdl_unsafe);
2774 } else if (KMEM_DUMPCC(ccp) && !kmem_cache_free_dump(cp, buf)) {
2778 if (kmem_cache_free_debug(cp, buf, caller()) == -1)
2814 if (!kmem_cpucache_magazine_alloc(ccp, cp)) {
2824 kmem_slab_free_constructed(cp, buf, B_TRUE);
2828 kmem_slab_prefill(kmem_cache_t *cp, kmem_slab_t *sp)
2830 kmem_cpu_cache_t *ccp = KMEM_CPU_CACHE(cp);
2831 int cache_flags = cp->cache_flags;
2841 ASSERT(MUTEX_HELD(&cp->cache_lock));
2843 ASSERT(cp->cache_constructor == NULL);
2844 ASSERT(sp->slab_cache == cp);
2847 ASSERT(avl_find(&cp->cache_partial_slabs, sp, NULL) == NULL);
2853 cp->cache_bufslab -= nbufs;
2854 cp->cache_slab_alloc += nbufs;
2855 list_insert_head(&cp->cache_complete_slabs, sp);
2856 cp->cache_complete_slab_count++;
2857 mutex_exit(&cp->cache_lock);
2861 void *buf = KMEM_BUF(cp, head);
2895 if (!kmem_cpucache_magazine_alloc(ccp, cp))
2910 kmem_slab_free(cp, KMEM_BUF(cp, head));
2917 mutex_enter(&cp->cache_lock);
2927 kmem_cache_t *cp = kmem_alloc_table[index];
2928 buf = kmem_cache_alloc(cp, kmflag);
2930 if ((cp->cache_flags & KMF_BUFTAG) && !KMEM_DUMP(cp)) {
2931 kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
2935 if (cp->cache_flags & KMF_LITE) {
2954 kmem_cache_t *cp;
2958 cp = kmem_alloc_table[index];
2963 cp = kmem_big_alloc_table[index];
2984 buf = kmem_cache_alloc(cp, kmflag);
2985 if ((cp->cache_flags & KMF_BUFTAG) && !KMEM_DUMP(cp) && buf != NULL) {
2986 kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
2990 if (cp->cache_flags & KMF_LITE) {
3001 kmem_cache_t *cp;
3004 cp = kmem_alloc_table[index];
3009 cp = kmem_big_alloc_table[index];
3020 if ((cp->cache_flags & KMF_BUFTAG) && !KMEM_DUMP(cp)) {
3021 kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
3025 kmem_error(KMERR_DUPFREE, cp, buf);
3030 kmem_error(KMERR_BADSIZE, cp, buf);
3032 kmem_error(KMERR_REDZONE, cp, buf);
3037 kmem_error(KMERR_REDZONE, cp, buf);
3041 if (cp->cache_flags & KMF_LITE) {
3046 kmem_cache_free(cp, buf);
3119 kmem_cache_reap(kmem_cache_t *cp)
3122 cp->cache_reap++;
3131 if (cp->cache_reclaim != NULL) {
3138 delta = cp->cache_full.ml_total;
3139 cp->cache_reclaim(cp->cache_private);
3140 delta = cp->cache_full.ml_total - delta;
3142 mutex_enter(&cp->cache_depot_lock);
3143 cp->cache_full.ml_reaplimit += delta;
3144 cp->cache_full.ml_min += delta;
3145 mutex_exit(&cp->cache_depot_lock);
3149 kmem_depot_ws_reap(cp);
3151 if (cp->cache_defrag != NULL && !kmem_move_noreap) {
3152 kmem_cache_defrag(cp);
3249 kmem_cache_magazine_purge(kmem_cache_t *cp)
3255 ASSERT(!list_link_active(&cp->cache_link) ||
3257 ASSERT(MUTEX_NOT_HELD(&cp->cache_lock));
3260 ccp = &cp->cache_cpu[cpu_seqid];
3275 kmem_magazine_destroy(cp, mp, rounds);
3277 kmem_magazine_destroy(cp, pmp, prounds);
3280 kmem_depot_ws_zero(cp);
3281 kmem_depot_ws_reap(cp);
3288 kmem_cache_magazine_enable(kmem_cache_t *cp)
3292 if (cp->cache_flags & KMF_NOMAGAZINE)
3296 kmem_cpu_cache_t *ccp = &cp->cache_cpu[cpu_seqid];
3298 ccp->cc_magsize = cp->cache_magtype->mt_magsize;
3308 kmem_cache_reap_now(kmem_cache_t *cp)
3310 ASSERT(list_link_active(&cp->cache_link));
3312 kmem_depot_ws_zero(cp);
3315 (task_func_t *)kmem_depot_ws_reap, cp, TQ_SLEEP);
3331 kmem_cache_magazine_resize(kmem_cache_t *cp)
3333 kmem_magtype_t *mtp = cp->cache_magtype;
3337 if (cp->cache_chunksize < mtp->mt_maxbuf) {
3338 kmem_cache_magazine_purge(cp);
3339 mutex_enter(&cp->cache_depot_lock);
3340 cp->cache_magtype = ++mtp;
3341 cp->cache_depot_contention_prev =
3342 cp->cache_depot_contention + INT_MAX;
3343 mutex_exit(&cp->cache_depot_lock);
3344 kmem_cache_magazine_enable(cp);
3353 kmem_hash_rescale(kmem_cache_t *cp)
3361 1 << (highbit(3 * cp->cache_buftotal + 4) - 2));
3362 old_size = cp->cache_hash_mask + 1;
3373 mutex_enter(&cp->cache_lock);
3375 old_size = cp->cache_hash_mask + 1;
3376 old_table = cp->cache_hash_table;
3378 cp->cache_hash_mask = new_size - 1;
3379 cp->cache_hash_table = new_table;
3380 cp->cache_rescale++;
3387 kmem_bufctl_t **hash_bucket = KMEM_HASH(cp, addr);
3394 mutex_exit(&cp->cache_lock);
3404 kmem_cache_update(kmem_cache_t *cp)
3415 mutex_enter(&cp->cache_lock);
3417 if ((cp->cache_flags & KMF_HASH) &&
3418 (cp->cache_buftotal > (cp->cache_hash_mask << 1) ||
3419 (cp->cache_buftotal < (cp->cache_hash_mask >> 1) &&
3420 cp->cache_hash_mask > KMEM_HASH_INITIAL)))
3423 mutex_exit(&cp->cache_lock);
3428 kmem_depot_ws_update(cp);
3434 mutex_enter(&cp->cache_depot_lock);
3436 if (cp->cache_chunksize < cp->cache_magtype->mt_maxbuf &&
3437 (int)(cp->cache_depot_contention -
3438 cp->cache_depot_contention_prev) > kmem_depot_contention)
3441 cp->cache_depot_contention_prev = cp->cache_depot_contention;
3443 mutex_exit(&cp->cache_depot_lock);
3447 (task_func_t *)kmem_hash_rescale, cp, TQ_NOSLEEP);
3451 (task_func_t *)kmem_cache_magazine_resize, cp, TQ_NOSLEEP);
3453 if (cp->cache_defrag != NULL)
3455 (task_func_t *)kmem_cache_scan, cp, TQ_NOSLEEP);
3484 kmem_cache_t *cp = ksp->ks_private;
3495 mutex_enter(&cp->cache_lock);
3497 kmcp->kmc_alloc_fail.value.ui64 = cp->cache_alloc_fail;
3498 kmcp->kmc_alloc.value.ui64 = cp->cache_slab_alloc;
3499 kmcp->kmc_free.value.ui64 = cp->cache_slab_free;
3500 kmcp->kmc_slab_alloc.value.ui64 = cp->cache_slab_alloc;
3501 kmcp->kmc_slab_free.value.ui64 = cp->cache_slab_free;
3504 kmem_cpu_cache_t *ccp = &cp->cache_cpu[cpu_seqid];
3521 mutex_enter(&cp->cache_depot_lock);
3523 kmcp->kmc_depot_alloc.value.ui64 = cp->cache_full.ml_alloc;
3524 kmcp->kmc_depot_free.value.ui64 = cp->cache_empty.ml_alloc;
3525 kmcp->kmc_depot_contention.value.ui64 = cp->cache_depot_contention;
3526 kmcp->kmc_full_magazines.value.ui64 = cp->cache_full.ml_total;
3527 kmcp->kmc_empty_magazines.value.ui64 = cp->cache_empty.ml_total;
3529 (cp->cache_flags & KMF_NOMAGAZINE) ?
3530 0 : cp->cache_magtype->mt_magsize;
3532 kmcp->kmc_alloc.value.ui64 += cp->cache_full.ml_alloc;
3533 kmcp->kmc_free.value.ui64 += cp->cache_empty.ml_alloc;
3534 buf_avail += cp->cache_full.ml_total * cp->cache_magtype->mt_magsize;
3536 reap = MIN(cp->cache_full.ml_reaplimit, cp->cache_full.ml_min);
3537 reap = MIN(reap, cp->cache_full.ml_total);
3539 mutex_exit(&cp->cache_depot_lock);
3541 kmcp->kmc_buf_size.value.ui64 = cp->cache_bufsize;
3542 kmcp->kmc_align.value.ui64 = cp->cache_align;
3543 kmcp->kmc_chunk_size.value.ui64 = cp->cache_chunksize;
3544 kmcp->kmc_slab_size.value.ui64 = cp->cache_slabsize;
3546 buf_avail += cp->cache_bufslab;
3548 kmcp->kmc_buf_inuse.value.ui64 = cp->cache_buftotal - buf_avail;
3549 kmcp->kmc_buf_total.value.ui64 = cp->cache_buftotal;
3550 kmcp->kmc_buf_max.value.ui64 = cp->cache_bufmax;
3551 kmcp->kmc_slab_create.value.ui64 = cp->cache_slab_create;
3552 kmcp->kmc_slab_destroy.value.ui64 = cp->cache_slab_destroy;
3553 kmcp->kmc_hash_size.value.ui64 = (cp->cache_flags & KMF_HASH) ?
3554 cp->cache_hash_mask + 1 : 0;
3555 kmcp->kmc_hash_lookup_depth.value.ui64 = cp->cache_lookup_depth;
3556 kmcp->kmc_hash_rescale.value.ui64 = cp->cache_rescale;
3557 kmcp->kmc_vmem_source.value.ui64 = cp->cache_arena->vm_id;
3558 kmcp->kmc_reap.value.ui64 = cp->cache_reap;
3560 if (cp->cache_defrag == NULL) {
3575 kmem_defrag_t *kd = cp->cache_defrag;
3587 reclaimable = cp->cache_bufslab - (cp->cache_maxchunks - 1);
3589 reclaimable += ((uint64_t)reap * cp->cache_magtype->mt_magsize);
3593 mutex_exit(&cp->cache_lock);
3603 kmem_cache_stat(kmem_cache_t *cp, char *name)
3606 kstat_t *ksp = cp->cache_kstat;
3693 const kmem_cache_t *cp;
3702 cp = s1->slab_cache;
3703 ASSERT(MUTEX_HELD(&cp->cache_lock));
3704 binshift = cp->cache_partial_binshift;
3709 w0 -= cp->cache_maxchunks;
3715 w1 -= cp->cache_maxchunks;
3751 kmem_cache_t *cp;
3779 * Get a kmem_cache structure. We arrange that cp->cache_cpu[]
3783 cp = vmem_xalloc(kmem_cache_arena, csize, KMEM_CPU_CACHE_SIZE,
3785 bzero(cp, csize);
3786 list_link_init(&cp->cache_link);
3806 cp->cache_flags = (kmem_flags | cflags) & KMF_DEBUG;
3814 if (cp->cache_flags & KMF_LITE) {
3818 cp->cache_flags |= KMF_BUFTAG;
3819 cp->cache_flags &= ~(KMF_AUDIT | KMF_FIREWALL);
3821 cp->cache_flags &= ~KMF_DEBUG;
3825 if (cp->cache_flags & KMF_DEADBEEF)
3826 cp->cache_flags |= KMF_REDZONE;
3828 if ((cflags & KMC_QCACHE) && (cp->cache_flags & KMF_AUDIT))
3829 cp->cache_flags |= KMF_NOMAGAZINE;
3832 cp->cache_flags &= ~KMF_DEBUG;
3835 cp->cache_flags &= ~KMF_TOUCH;
3838 cp->cache_flags |= KMF_PREFILL;
3841 cp->cache_flags &= ~(KMF_AUDIT | KMF_FIREWALL);
3844 cp->cache_flags |= KMF_NOMAGAZINE;
3846 if ((cp->cache_flags & KMF_AUDIT) && !(cflags & KMC_NOTOUCH))
3847 cp->cache_flags |= KMF_REDZONE;
3849 if (!(cp->cache_flags & KMF_AUDIT))
3850 cp->cache_flags &= ~KMF_CONTENTS;
3852 if ((cp->cache_flags & KMF_BUFTAG) && bufsize >= kmem_minfirewall &&
3853 !(cp->cache_flags & KMF_LITE) && !(cflags & KMC_NOHASH))
3854 cp->cache_flags |= KMF_FIREWALL;
3857 cp->cache_flags &= ~KMF_FIREWALL;
3859 if (cp->cache_flags & KMF_FIREWALL) {
3860 cp->cache_flags &= ~KMF_BUFTAG;
3861 cp->cache_flags |= KMF_NOMAGAZINE;
3869 (void) strncpy(cp->cache_name, name, KMEM_CACHE_NAMELEN);
3870 strident_canon(cp->cache_name, KMEM_CACHE_NAMELEN + 1);
3871 cp->cache_bufsize = bufsize;
3872 cp->cache_align = align;
3873 cp->cache_constructor = constructor;
3874 cp->cache_destructor = destructor;
3875 cp->cache_reclaim = reclaim;
3876 cp->cache_private = private;
3877 cp->cache_arena = vmp;
3878 cp->cache_cflags = cflags;
3887 cp->cache_bufctl = chunksize - KMEM_ALIGN;
3890 if (cp->cache_flags & KMF_BUFTAG) {
3891 cp->cache_bufctl = chunksize;
3892 cp->cache_buftag = chunksize;
3893 if (cp->cache_flags & KMF_LITE)
3899 if (cp->cache_flags & KMF_DEADBEEF) {
3900 cp->cache_verify = MIN(cp->cache_buftag, kmem_maxverify);
3901 if (cp->cache_flags & KMF_LITE)
3902 cp->cache_verify = sizeof (uint64_t);
3905 cp->cache_contents = MIN(cp->cache_bufctl, kmem_content_maxsave);
3907 cp->cache_chunksize = chunksize = P2ROUNDUP(chunksize, align);
3913 cp->cache_slabsize = P2ROUNDUP(chunksize, vmp->vm_quantum);
3914 cp->cache_mincolor = cp->cache_slabsize - chunksize;
3915 cp->cache_maxcolor = cp->cache_mincolor;
3916 cp->cache_flags |= KMF_HASH;
3917 ASSERT(!(cp->cache_flags & KMF_BUFTAG));
3919 !(cp->cache_flags & KMF_AUDIT) &&
3921 cp->cache_slabsize = vmp->vm_quantum;
3922 cp->cache_mincolor = 0;
3923 cp->cache_maxcolor =
3924 (cp->cache_slabsize - sizeof (kmem_slab_t)) % chunksize;
3925 ASSERT(chunksize + sizeof (kmem_slab_t) <= cp->cache_slabsize);
3926 ASSERT(!(cp->cache_flags & KMF_AUDIT));
3943 cp->cache_slabsize = bestfit;
3944 cp->cache_mincolor = 0;
3945 cp->cache_maxcolor = bestfit % chunksize;
3946 cp->cache_flags |= KMF_HASH;
3949 cp->cache_maxchunks = (cp->cache_slabsize / cp->cache_chunksize);
3950 cp->cache_partial_binshift = highbit(cp->cache_maxchunks / 16) + 1;
3959 cp->cache_flags & (KMF_HASH | KMF_BUFTAG) ||
3960 cp->cache_constructor != NULL)
3961 cp->cache_flags &= ~KMF_PREFILL;
3963 if (cp->cache_flags & KMF_HASH) {
3965 cp->cache_bufctl_cache = (cp->cache_flags & KMF_AUDIT) ?
3969 if (cp->cache_maxcolor >= vmp->vm_quantum)
3970 cp->cache_maxcolor = vmp->vm_quantum - 1;
3972 cp->cache_color = cp->cache_mincolor;
3977 mutex_init(&cp->cache_lock, NULL, MUTEX_DEFAULT, NULL);
3979 avl_create(&cp->cache_partial_slabs, kmem_partial_slab_cmp,
3984 list_create(&cp->cache_complete_slabs,
3987 if (cp->cache_flags & KMF_HASH) {
3988 cp->cache_hash_table = vmem_alloc(kmem_hash_arena,
3990 bzero(cp->cache_hash_table,
3992 cp->cache_hash_mask = KMEM_HASH_INITIAL - 1;
3993 cp->cache_hash_shift = highbit((ulong_t)chunksize) - 1;
3999 mutex_init(&cp->cache_depot_lock, NULL, MUTEX_DEFAULT, NULL);
4004 cp->cache_magtype = mtp;
4010 kmem_cpu_cache_t *ccp = &cp->cache_cpu[cpu_seqid];
4012 ccp->cc_flags = cp->cache_flags;
4020 if ((cp->cache_kstat = kstat_create("unix", 0, cp->cache_name,
4024 cp->cache_kstat->ks_data = &kmem_cache_kstat;
4025 cp->cache_kstat->ks_update = kmem_cache_kstat_update;
4026 cp->cache_kstat->ks_private = cp;
4027 cp->cache_kstat->ks_lock = &kmem_cache_kstat_lock;
4028 kstat_install(cp->cache_kstat);
4036 list_insert_tail(&kmem_caches, cp);
4040 kmem_cache_magazine_enable(cp);
4042 return (cp);
4085 kmem_cache_set_move(kmem_cache_t *cp,
4097 ASSERT(!(cp->cache_cflags & KMC_NOTOUCH));
4098 ASSERT(!(cp->cache_cflags & KMC_IDENTIFIER));
4107 mutex_enter(&cp->cache_lock);
4109 if (KMEM_IS_MOVABLE(cp)) {
4110 if (cp->cache_move == NULL) {
4111 ASSERT(cp->cache_slab_alloc == 0);
4113 cp->cache_defrag = defrag;
4115 bzero(cp->cache_defrag, sizeof (kmem_defrag_t));
4116 avl_create(&cp->cache_defrag->kmd_moves_pending,
4122 list_create(&cp->cache_defrag->kmd_deadlist,
4125 kmem_reset_reclaim_threshold(cp->cache_defrag);
4127 cp->cache_move = move;
4130 mutex_exit(&cp->cache_lock);
4138 kmem_cache_destroy(kmem_cache_t *cp)
4148 list_remove(&kmem_caches, cp);
4156 kmem_cache_magazine_purge(cp);
4158 mutex_enter(&cp->cache_lock);
4159 if (cp->cache_buftotal != 0)
4161 cp->cache_name, (void *)cp);
4162 if (cp->cache_defrag != NULL) {
4163 avl_destroy(&cp->cache_defrag->kmd_moves_pending);
4164 list_destroy(&cp->cache_defrag->kmd_deadlist);
4165 kmem_cache_free(kmem_defrag_cache, cp->cache_defrag);
4166 cp->cache_defrag = NULL;
4174 cp->cache_constructor = (int (*)(void *, void *, int))1;
4175 cp->cache_destructor = (void (*)(void *, void *))2;
4176 cp->cache_reclaim = (void (*)(void *))3;
4177 cp->cache_move = (kmem_cbrc_t (*)(void *, void *, size_t, void *))4;
4178 mutex_exit(&cp->cache_lock);
4180 kstat_delete(cp->cache_kstat);
4182 if (cp->cache_hash_table != NULL)
4183 vmem_free(kmem_hash_arena, cp->cache_hash_table,
4184 (cp->cache_hash_mask + 1) * sizeof (void *));
4187 mutex_destroy(&cp->cache_cpu[cpu_seqid].cc_lock);
4189 mutex_destroy(&cp->cache_depot_lock);
4190 mutex_destroy(&cp->cache_lock);
4192 vmem_free(kmem_cache_arena, cp, KMEM_CACHE_SIZE(max_ncpus));
4221 kmem_cache_t *cp;
4240 cp = kmem_cache_create(name, cache_size, align,
4244 alloc_table[(size - 1) >> shift] = cp;
4353 kmem_cache_t *cp;
4422 while ((cp = list_tail(&kmem_caches)) != NULL)
4423 kmem_cache_destroy(cp);
4608 kmem_slab_allocated(kmem_cache_t *cp, kmem_slab_t *sp, void *buf)
4612 ASSERT(MUTEX_HELD(&cp->cache_lock));
4615 if (cp->cache_flags & KMF_HASH) {
4616 for (bcp = *KMEM_HASH(cp, buf);
4626 sp = KMEM_SLAB(cp, buf);
4628 bufbcp = KMEM_BUFCTL(cp, buf);
4638 kmem_slab_is_reclaimable(kmem_cache_t *cp, kmem_slab_t *sp, int flags)
4642 ASSERT(cp->cache_defrag != NULL);
4677 (sp->slab_chunks * cp->cache_defrag->kmd_reclaim_numer));
4681 kmem_hunt_mag(kmem_cache_t *cp, kmem_magazine_t *m, int n, void *buf,
4688 if (cp->cache_flags & KMF_BUFTAG) {
4689 (void) kmem_cache_free_debug(cp, tbuf,
4706 kmem_hunt_mags(kmem_cache_t *cp, void *buf)
4714 ASSERT(MUTEX_NOT_HELD(&cp->cache_lock));
4720 tbuf = kmem_cache_alloc(cp, KM_NOSLEEP);
4727 if (cp->cache_flags & KMF_BUFTAG) {
4728 (void) kmem_cache_free_debug(cp, buf, caller());
4734 mutex_enter(&cp->cache_depot_lock);
4735 n = cp->cache_magtype->mt_magsize;
4736 for (m = cp->cache_full.ml_list; m != NULL; m = m->mag_next) {
4737 if (kmem_hunt_mag(cp, m, n, buf, tbuf) != NULL) {
4738 mutex_exit(&cp->cache_depot_lock);
4742 mutex_exit(&cp->cache_depot_lock);
4746 ccp = &cp->cache_cpu[cpu_seqid];
4751 if (kmem_hunt_mag(cp, m, n, buf, tbuf) != NULL) {
4757 if (kmem_hunt_mag(cp, m, n, buf, tbuf) != NULL) {
4764 kmem_cache_free(cp, tbuf);
4773 kmem_slab_move_yes(kmem_cache_t *cp, kmem_slab_t *sp, void *from_buf)
4775 ASSERT(MUTEX_HELD(&cp->cache_lock));
4784 avl_remove(&cp->cache_partial_slabs, sp);
4787 avl_add(&cp->cache_partial_slabs, sp);
4796 kmem_slab_move_no(kmem_cache_t *cp, kmem_slab_t *sp, void *from_buf)
4799 ASSERT(MUTEX_HELD(&cp->cache_lock));
4806 avl_remove(&cp->cache_partial_slabs, sp);
4810 avl_add(&cp->cache_partial_slabs, sp);
4851 kmem_cache_t *cp = sp->slab_cache;
4855 ASSERT(MUTEX_NOT_HELD(&cp->cache_lock));
4864 if (!kmem_slab_is_reclaimable(cp, sp, callback->kmm_flags)) {
4868 kmem_slab_free(cp, callback->kmm_to_buf);
4869 kmem_move_end(cp, callback);
4879 mutex_enter(&cp->cache_lock);
4880 free_on_slab = (kmem_slab_allocated(cp, sp,
4882 mutex_exit(&cp->cache_lock);
4886 kmem_slab_free(cp, callback->kmm_to_buf);
4887 kmem_move_end(cp, callback);
4891 if (cp->cache_flags & KMF_BUFTAG) {
4895 if (kmem_cache_alloc_debug(cp, callback->kmm_to_buf,
4898 kmem_move_end(cp, callback);
4901 } else if (cp->cache_constructor != NULL &&
4902 cp->cache_constructor(callback->kmm_to_buf, cp->cache_private,
4904 atomic_inc_64(&cp->cache_alloc_fail);
4906 kmem_slab_free(cp, callback->kmm_to_buf);
4907 kmem_move_end(cp, callback);
4914 cp->cache_defrag->kmd_callbacks++;
4915 cp->cache_defrag->kmd_thread = curthread;
4916 cp->cache_defrag->kmd_from_buf = callback->kmm_from_buf;
4917 cp->cache_defrag->kmd_to_buf = callback->kmm_to_buf;
4918 DTRACE_PROBE2(kmem__move__start, kmem_cache_t *, cp, kmem_move_t *,
4921 response = cp->cache_move(callback->kmm_from_buf,
4922 callback->kmm_to_buf, cp->cache_bufsize, cp->cache_private);
4924 DTRACE_PROBE3(kmem__move__end, kmem_cache_t *, cp, kmem_move_t *,
4926 cp->cache_defrag->kmd_thread = NULL;
4927 cp->cache_defrag->kmd_from_buf = NULL;
4928 cp->cache_defrag->kmd_to_buf = NULL;
4932 cp->cache_defrag->kmd_yes++;
4933 kmem_slab_free_constructed(cp, callback->kmm_from_buf, B_FALSE);
4936 cp->cache_defrag->kmd_slabs_freed++;
4937 mutex_enter(&cp->cache_lock);
4938 kmem_slab_move_yes(cp, sp, callback->kmm_from_buf);
4939 mutex_exit(&cp->cache_lock);
4940 kmem_move_end(cp, callback);
4947 cp->cache_defrag->kmd_no++;
4948 mutex_enter(&cp->cache_lock);
4949 kmem_slab_move_no(cp, sp, callback->kmm_from_buf);
4950 mutex_exit(&cp->cache_lock);
4954 cp->cache_defrag->kmd_later++;
4955 mutex_enter(&cp->cache_lock);
4957 mutex_exit(&cp->cache_lock);
4963 kmem_slab_move_no(cp, sp, callback->kmm_from_buf);
4968 mutex_exit(&cp->cache_lock);
4972 cp->cache_defrag->kmd_dont_need++;
4973 kmem_slab_free_constructed(cp, callback->kmm_from_buf, B_FALSE);
4975 cp->cache_defrag->kmd_slabs_freed++;
4976 mutex_enter(&cp->cache_lock);
4977 kmem_slab_move_yes(cp, sp, callback->kmm_from_buf);
4978 mutex_exit(&cp->cache_lock);
4982 cp->cache_defrag->kmd_dont_know++;
4983 if (kmem_hunt_mags(cp, callback->kmm_from_buf) != NULL) {
4985 cp->cache_defrag->kmd_hunt_found++;
4986 kmem_slab_free_constructed(cp, callback->kmm_from_buf,
4989 cp->cache_defrag->kmd_slabs_freed++;
4990 mutex_enter(&cp->cache_lock);
4991 kmem_slab_move_yes(cp, sp, callback->kmm_from_buf);
4992 mutex_exit(&cp->cache_lock);
4997 cp->cache_name, (void *)cp, response);
5000 kmem_slab_free_constructed(cp, callback->kmm_to_buf, B_FALSE);
5001 kmem_move_end(cp, callback);
5006 kmem_move_begin(kmem_cache_t *cp, kmem_slab_t *sp, void *buf, int flags)
5014 ASSERT(MUTEX_NOT_HELD(&cp->cache_lock));
5027 mutex_enter(&cp->cache_lock);
5029 n = avl_numnodes(&cp->cache_partial_slabs);
5031 mutex_exit(&cp->cache_lock);
5036 pending = avl_find(&cp->cache_defrag->kmd_moves_pending, buf, &index);
5045 mutex_exit(&cp->cache_lock);
5051 to_buf = kmem_slab_alloc_impl(cp, avl_first(&cp->cache_partial_slabs),
5054 avl_insert(&cp->cache_defrag->kmd_moves_pending, callback, index);
5056 mutex_exit(&cp->cache_lock);
5061 mutex_enter(&cp->cache_lock);
5062 avl_remove(&cp->cache_defrag->kmd_moves_pending, callback);
5063 mutex_exit(&cp->cache_lock);
5064 kmem_slab_free(cp, to_buf);
5073 kmem_move_end(kmem_cache_t *cp, kmem_move_t *callback)
5077 ASSERT(cp->cache_defrag != NULL);
5079 ASSERT(MUTEX_NOT_HELD(&cp->cache_lock));
5081 mutex_enter(&cp->cache_lock);
5082 VERIFY(avl_find(&cp->cache_defrag->kmd_moves_pending,
5084 avl_remove(&cp->cache_defrag->kmd_moves_pending, callback);
5085 if (avl_is_empty(&cp->cache_defrag->kmd_moves_pending)) {
5086 list_t *deadlist = &cp->cache_defrag->kmd_deadlist;
5102 cp->cache_defrag->kmd_deadcount--;
5103 cp->cache_slab_destroy++;
5104 mutex_exit(&cp->cache_lock);
5105 kmem_slab_destroy(cp, sp);
5107 mutex_enter(&cp->cache_lock);
5110 mutex_exit(&cp->cache_lock);
5129 kmem_move_buffers(kmem_cache_t *cp, size_t max_scan, size_t max_slabs,
5142 ASSERT(MUTEX_HELD(&cp->cache_lock));
5144 ASSERT(cp->cache_move != NULL && cp->cache_defrag != NULL);
5145 ASSERT((flags & KMM_DEBUG) ? !avl_is_empty(&cp->cache_partial_slabs) :
5146 avl_numnodes(&cp->cache_partial_slabs) > 1);
5169 sp = avl_last(&cp->cache_partial_slabs);
5172 ((sp != avl_first(&cp->cache_partial_slabs)) ||
5174 sp = AVL_PREV(&cp->cache_partial_slabs, sp), i++) {
5176 if (!kmem_slab_is_reclaimable(cp, sp, flags)) {
5184 buf = (((char *)buf) + cp->cache_chunksize), j++) {
5186 if (kmem_slab_allocated(cp, sp, buf) == NULL) {
5210 mutex_exit(&cp->cache_lock);
5212 success = kmem_move_begin(cp, sp, buf, flags);
5226 mutex_enter(&cp->cache_lock);
5232 &cp->cache_defrag->kmd_deadlist;
5236 &cp->cache_defrag->kmd_moves_pending)) {
5263 cp->cache_defrag->kmd_deadcount--;
5264 cp->cache_slab_destroy++;
5265 mutex_exit(&cp->cache_lock);
5266 kmem_slab_destroy(cp, sp);
5271 mutex_enter(&cp->cache_lock);
5327 ASSERT(!avl_is_empty(&cp->cache_partial_slabs));
5328 if (sp == avl_first(&cp->cache_partial_slabs)) {
5341 (sp == avl_first(&cp->cache_partial_slabs)),
5356 kmem_cache_t *cp = args->kmna_cache;
5361 ASSERT(list_link_active(&cp->cache_link));
5364 mutex_enter(&cp->cache_lock);
5365 sp = kmem_slab_allocated(cp, NULL, buf);
5369 mutex_exit(&cp->cache_lock);
5374 if (avl_numnodes(&cp->cache_partial_slabs) > 1) {
5382 mutex_exit(&cp->cache_lock);
5386 kmem_slab_move_yes(cp, sp, buf);
5389 mutex_exit(&cp->cache_lock);
5391 (void) kmem_move_begin(cp, sp, buf, KMM_NOTIFY);
5392 mutex_enter(&cp->cache_lock);
5396 list_t *deadlist = &cp->cache_defrag->kmd_deadlist;
5400 &cp->cache_defrag->kmd_moves_pending)) {
5402 mutex_exit(&cp->cache_lock);
5408 cp->cache_defrag->kmd_deadcount--;
5409 cp->cache_slab_destroy++;
5410 mutex_exit(&cp->cache_lock);
5411 kmem_slab_destroy(cp, sp);
5418 kmem_slab_move_yes(cp, sp, buf);
5420 mutex_exit(&cp->cache_lock);
5424 kmem_cache_move_notify(kmem_cache_t *cp, void *buf)
5431 args->kmna_cache = cp;
5441 kmem_cache_defrag(kmem_cache_t *cp)
5445 ASSERT(cp->cache_defrag != NULL);
5447 mutex_enter(&cp->cache_lock);
5448 n = avl_numnodes(&cp->cache_partial_slabs);
5452 cp->cache_defrag->kmd_defrags++;
5453 (void) kmem_move_buffers(cp, n, 0, KMM_DESPERATE);
5455 mutex_exit(&cp->cache_lock);
5460 kmem_cache_frag_threshold(kmem_cache_t *cp, uint64_t nfree)
5465 * cp->cache_buftotal kmem_frag_denom
5468 (cp->cache_buftotal * kmem_frag_numer));
5472 kmem_cache_is_fragmented(kmem_cache_t *cp, boolean_t *doreap)
5477 ASSERT(MUTEX_HELD(&cp->cache_lock));
5481 if (avl_numnodes(&cp->cache_partial_slabs) > 1) {
5485 if ((cp->cache_complete_slab_count + avl_numnodes(
5486 &cp->cache_partial_slabs)) < kmem_frag_minslabs) {
5491 nfree = cp->cache_bufslab;
5492 fragmented = ((avl_numnodes(&cp->cache_partial_slabs) > 1) &&
5493 kmem_cache_frag_threshold(cp, nfree));
5504 mutex_enter(&cp->cache_depot_lock);
5505 reap = MIN(cp->cache_full.ml_reaplimit, cp->cache_full.ml_min);
5506 reap = MIN(reap, cp->cache_full.ml_total);
5507 mutex_exit(&cp->cache_depot_lock);
5509 nfree += ((uint64_t)reap * cp->cache_magtype->mt_magsize);
5510 if (kmem_cache_frag_threshold(cp, nfree)) {
5520 kmem_cache_scan(kmem_cache_t *cp)
5527 mutex_enter(&cp->cache_lock);
5529 kmd = cp->cache_defrag;
5532 mutex_exit(&cp->cache_lock);
5533 kmem_cache_reap(cp);
5537 if (kmem_cache_is_fragmented(cp, &reap)) {
5552 slabs_found = kmem_move_buffers(cp, kmem_reclaim_scan_range,
5577 kmem_reset_reclaim_threshold(cp->cache_defrag);
5579 if (!avl_is_empty(&cp->cache_partial_slabs)) {
5590 mutex_exit(&cp->cache_lock);
5592 kmem_cache_reap(cp);
5598 (void) kmem_move_buffers(cp,
5605 mutex_exit(&cp->cache_lock);
5609 kmem_depot_ws_reap(cp);