Lines Matching refs:cp

143  *	is always zero.  umem_cache_alloc uses cp->cache_cpu_mask to
156 * with either umem_cpu_mask or cp->cache_cpu_mask to find the actual "cpu" id.
932 umem_cache_t *cp;
935 for (cp = umem_null_cache.cache_next; cp != &umem_null_cache;
936 cp = cp->cache_next)
937 func(cp);
942 umem_add_update_unlocked(umem_cache_t *cp, int flags)
951 if (cp->cache_uflags & UMU_ACTIVE) {
952 cp->cache_uflags |= flags;
954 if (cp->cache_unext != NULL) {
955 ASSERT(cp->cache_uflags != 0);
956 cp->cache_uflags |= flags;
958 ASSERT(cp->cache_uflags == 0);
959 cp->cache_uflags = flags;
960 cp->cache_unext = cnext = &umem_null_cache;
961 cp->cache_uprev = cprev = umem_null_cache.cache_uprev;
962 cnext->cache_uprev = cp;
963 cprev->cache_unext = cp;
969 umem_add_update(umem_cache_t *cp, int flags)
973 umem_add_update_unlocked(cp, flags);
986 umem_remove_updates(umem_cache_t *cp)
993 while (cp->cache_uflags & UMU_ACTIVE) {
996 ASSERT(cp->cache_unext == NULL);
998 cp->cache_uflags |= UMU_NOTIFY;
1015 if (cp->cache_unext != NULL) {
1016 cp->cache_uprev->cache_unext = cp->cache_unext;
1017 cp->cache_unext->cache_uprev = cp->cache_uprev;
1018 cp->cache_uprev = cp->cache_unext = NULL;
1019 cp->cache_uflags = 0;
1024 ASSERT(cp->cache_unext == NULL && cp->cache_uflags == 0);
1031 umem_cache_t *cp;
1041 for (cp = umem_null_cache.cache_next; cp != &umem_null_cache;
1042 cp = cp->cache_next)
1043 umem_add_update_unlocked(cp, flags);
1056 umem_findslab(umem_cache_t *cp, void *buf)
1060 (void) mutex_lock(&cp->cache_lock);
1061 for (sp = cp->cache_nullslab.slab_next;
1062 sp != &cp->cache_nullslab; sp = sp->slab_next) {
1064 (void) mutex_unlock(&cp->cache_lock);
1068 (void) mutex_unlock(&cp->cache_lock);
1078 umem_cache_t *cp = cparg;
1089 sp = umem_findslab(cp, buf);
1091 for (cp = umem_null_cache.cache_prev; cp != &umem_null_cache;
1092 cp = cp->cache_prev) {
1093 if ((sp = umem_findslab(cp, buf)) != NULL)
1099 cp = NULL;
1102 if (cp != cparg)
1106 (uintptr_t)sp->slab_base) % cp->cache_chunksize;
1109 if (cp->cache_flags & UMF_BUFTAG)
1110 btp = UMEM_BUFTAG(cp, buf);
1111 if (cp->cache_flags & UMF_HASH) {
1112 (void) mutex_lock(&cp->cache_lock);
1113 for (bcp = *UMEM_HASH(cp, buf); bcp; bcp = bcp->bc_next)
1116 (void) mutex_unlock(&cp->cache_lock);
1119 if (umem_findslab(cp->cache_bufctl_cache, bcp) ==
1132 umem_abort_info.ump_realcache = cp;
1142 off = verify_pattern(UMEM_FREE_PATTERN, buf, cp->cache_verify);
1176 umem_printf("buffer was allocated from %s,\n", cp->cache_name);
1196 if (bcp != NULL && (cp->cache_flags & UMF_AUDIT) &&
1210 (void *)sp, cp->cache_name);
1229 umem_alloc_retry(umem_cache_t *cp, int umflag)
1231 if (cp == &umem_null_cache) {
1371 #define UMEM_AUDIT(lp, cp, bcp) \
1377 (cp != NULL) && (cp->cache_flags & UMF_CHECKSIGNAL)); \
1383 umem_log_event(umem_log_header_t *lp, umem_cache_t *cp,
1392 bcp->bc_cache = cp;
1393 UMEM_AUDIT(lp, cp, bcp);
1397 * Create a new slab for cache cp.
1400 umem_slab_create(umem_cache_t *cp, int umflag)
1402 size_t slabsize = cp->cache_slabsize;
1403 size_t chunksize = cp->cache_chunksize;
1404 int cache_flags = cp->cache_flags;
1409 vmem_t *vmp = cp->cache_arena;
1411 color = cp->cache_color + cp->cache_align;
1412 if (color > cp->cache_maxcolor)
1413 color = cp->cache_mincolor;
1414 cp->cache_color = color;
1423 if (!(cp->cache_cflags & UMC_NOTOUCH) &&
1424 (cp->cache_flags & UMF_DEADBEEF))
1432 sp = UMEM_SLAB(cp, slab);
1436 sp->slab_cache = cp;
1445 bcp = _umem_cache_alloc(cp->cache_bufctl_cache, umflag);
1452 bcap->bc_cache = cp;
1457 bcp = UMEM_BUFCTL(cp, buf);
1460 umem_buftag_t *btp = UMEM_BUFTAG(cp, buf);
1466 cp->cache_verify);
1474 umem_log_event(umem_slab_log, cp, sp, slab);
1482 _umem_cache_free(cp->cache_bufctl_cache, bcp);
1492 umem_log_event(umem_failure_log, cp, NULL, NULL);
1493 atomic_add_64(&cp->cache_alloc_fail, 1);
1502 umem_slab_destroy(umem_cache_t *cp, umem_slab_t *sp)
1504 vmem_t *vmp = cp->cache_arena;
1507 if (cp->cache_flags & UMF_HASH) {
1511 _umem_cache_free(cp->cache_bufctl_cache, bcp);
1515 vmem_free(vmp, slab, cp->cache_slabsize);
1519 * Allocate a raw (unconstructed) buffer from cp's slab layer.
1522 umem_slab_alloc(umem_cache_t *cp, int umflag)
1528 (void) mutex_lock(&cp->cache_lock);
1529 cp->cache_slab_alloc++;
1530 sp = cp->cache_freelist;
1531 ASSERT(sp->slab_cache == cp);
1536 (void) mutex_unlock(&cp->cache_lock);
1537 if (cp == &umem_null_cache)
1539 if ((sp = umem_slab_create(cp, umflag)) == NULL)
1541 (void) mutex_lock(&cp->cache_lock);
1542 cp->cache_slab_create++;
1543 if ((cp->cache_buftotal += sp->slab_chunks) > cp->cache_bufmax)
1544 cp->cache_bufmax = cp->cache_buftotal;
1545 sp->slab_next = cp->cache_freelist;
1546 sp->slab_prev = cp->cache_freelist->slab_prev;
1549 cp->cache_freelist = sp;
1561 cp->cache_freelist = sp->slab_next;
1565 if (cp->cache_flags & UMF_HASH) {
1570 hash_bucket = UMEM_HASH(cp, buf);
1573 if ((cp->cache_flags & (UMF_AUDIT | UMF_BUFTAG)) == UMF_AUDIT) {
1574 UMEM_AUDIT(umem_transaction_log, cp, bcp);
1577 buf = UMEM_BUF(cp, bcp);
1582 (void) mutex_unlock(&cp->cache_lock);
1588 * Free a raw (unconstructed) buffer to cp's slab layer.
1591 umem_slab_free(umem_cache_t *cp, void *buf)
1598 (void) mutex_lock(&cp->cache_lock);
1599 cp->cache_slab_free++;
1601 if (cp->cache_flags & UMF_HASH) {
1605 prev_bcpp = UMEM_HASH(cp, buf);
1612 cp->cache_lookup_depth++;
1616 bcp = UMEM_BUFCTL(cp, buf);
1617 sp = UMEM_SLAB(cp, buf);
1620 if (bcp == NULL || sp->slab_cache != cp || !UMEM_SLAB_MEMBER(sp, buf)) {
1621 (void) mutex_unlock(&cp->cache_lock);
1622 umem_error(UMERR_BADADDR, cp, buf);
1626 if ((cp->cache_flags & (UMF_AUDIT | UMF_BUFTAG)) == UMF_AUDIT) {
1627 if (cp->cache_flags & UMF_CONTENTS)
1630 cp->cache_contents);
1631 UMEM_AUDIT(umem_transaction_log, cp, bcp);
1639 ASSERT(cp->cache_freelist != sp);
1642 sp->slab_next = cp->cache_freelist;
1643 sp->slab_prev = cp->cache_freelist->slab_prev;
1646 cp->cache_freelist = sp;
1660 if (sp == cp->cache_freelist)
1661 cp->cache_freelist = sp->slab_next;
1662 cp->cache_slab_destroy++;
1663 cp->cache_buftotal -= sp->slab_chunks;
1664 (void) mutex_unlock(&cp->cache_lock);
1665 umem_slab_destroy(cp, sp);
1668 (void) mutex_unlock(&cp->cache_lock);
1672 umem_cache_alloc_debug(umem_cache_t *cp, void *buf, int umflag)
1674 umem_buftag_t *btp = UMEM_BUFTAG(cp, buf);
1680 umem_error(UMERR_BADBUFTAG, cp, buf);
1686 if ((cp->cache_flags & UMF_HASH) && bcp->bc_addr != buf) {
1687 umem_error(UMERR_BADBUFCTL, cp, buf);
1693 if (cp->cache_flags & UMF_DEADBEEF) {
1695 UMEM_UNINITIALIZED_PATTERN, buf, cp->cache_verify)) {
1696 umem_error(UMERR_MODIFIED, cp, buf);
1701 if ((mtbf = umem_mtbf | cp->cache_mtbf) != 0 &&
1704 umem_log_event(umem_failure_log, cp, NULL, NULL);
1714 if (mtbf || (cp->cache_constructor != NULL &&
1715 cp->cache_constructor(buf, cp->cache_private, flags_nfatal) != 0)) {
1716 atomic_add_64(&cp->cache_alloc_fail, 1);
1718 copy_pattern(UMEM_FREE_PATTERN, buf, cp->cache_verify);
1719 umem_slab_free(cp, buf);
1723 if (cp->cache_flags & UMF_AUDIT) {
1724 UMEM_AUDIT(umem_transaction_log, cp, bcp);
1731 umem_cache_free_debug(umem_cache_t *cp, void *buf)
1733 umem_buftag_t *btp = UMEM_BUFTAG(cp, buf);
1739 umem_error(UMERR_DUPFREE, cp, buf);
1742 sp = umem_findslab(cp, buf);
1743 if (sp == NULL || sp->slab_cache != cp)
1744 umem_error(UMERR_BADADDR, cp, buf);
1746 umem_error(UMERR_REDZONE, cp, buf);
1752 if ((cp->cache_flags & UMF_HASH) && bcp->bc_addr != buf) {
1753 umem_error(UMERR_BADBUFCTL, cp, buf);
1758 umem_error(UMERR_REDZONE, cp, buf);
1762 if (cp->cache_flags & UMF_AUDIT) {
1763 if (cp->cache_flags & UMF_CONTENTS)
1765 buf, cp->cache_contents);
1766 UMEM_AUDIT(umem_transaction_log, cp, bcp);
1769 if (cp->cache_destructor != NULL)
1770 cp->cache_destructor(buf, cp->cache_private);
1772 if (cp->cache_flags & UMF_DEADBEEF)
1773 copy_pattern(UMEM_FREE_PATTERN, buf, cp->cache_verify);
1779 * Free each object in magazine mp to cp's slab layer, and free mp itself.
1782 umem_magazine_destroy(umem_cache_t *cp, umem_magazine_t *mp, int nrounds)
1786 ASSERT(cp->cache_next == NULL || IN_UPDATE());
1791 if ((cp->cache_flags & UMF_DEADBEEF) &&
1793 cp->cache_verify) != NULL) {
1794 umem_error(UMERR_MODIFIED, cp, buf);
1798 if (!(cp->cache_flags & UMF_BUFTAG) &&
1799 cp->cache_destructor != NULL)
1800 cp->cache_destructor(buf, cp->cache_private);
1802 umem_slab_free(cp, buf);
1804 ASSERT(UMEM_MAGAZINE_VALID(cp, mp));
1805 _umem_cache_free(cp->cache_magtype->mt_cache, mp);
1812 umem_depot_alloc(umem_cache_t *cp, umem_maglist_t *mlp)
1822 if (mutex_trylock(&cp->cache_depot_lock) != 0) {
1823 (void) mutex_lock(&cp->cache_depot_lock);
1824 cp->cache_depot_contention++;
1828 ASSERT(UMEM_MAGAZINE_VALID(cp, mp));
1835 (void) mutex_unlock(&cp->cache_depot_lock);
1844 umem_depot_free(umem_cache_t *cp, umem_maglist_t *mlp, umem_magazine_t *mp)
1846 (void) mutex_lock(&cp->cache_depot_lock);
1847 ASSERT(UMEM_MAGAZINE_VALID(cp, mp));
1851 (void) mutex_unlock(&cp->cache_depot_lock);
1855 * Update the working set statistics for cp's depot.
1858 umem_depot_ws_update(umem_cache_t *cp)
1860 (void) mutex_lock(&cp->cache_depot_lock);
1861 cp->cache_full.ml_reaplimit = cp->cache_full.ml_min;
1862 cp->cache_full.ml_min = cp->cache_full.ml_total;
1863 cp->cache_empty.ml_reaplimit = cp->cache_empty.ml_min;
1864 cp->cache_empty.ml_min = cp->cache_empty.ml_total;
1865 (void) mutex_unlock(&cp->cache_depot_lock);
1872 umem_depot_ws_reap(umem_cache_t *cp)
1877 ASSERT(cp->cache_next == NULL || IN_REAP());
1879 reap = MIN(cp->cache_full.ml_reaplimit, cp->cache_full.ml_min);
1880 while (reap-- && (mp = umem_depot_alloc(cp, &cp->cache_full)) != NULL)
1881 umem_magazine_destroy(cp, mp, cp->cache_magtype->mt_magsize);
1883 reap = MIN(cp->cache_empty.ml_reaplimit, cp->cache_empty.ml_min);
1884 while (reap-- && (mp = umem_depot_alloc(cp, &cp->cache_empty)) != NULL)
1885 umem_magazine_destroy(cp, mp, 0);
1902 * Allocate a constructed object from cache cp.
1906 _umem_cache_alloc(umem_cache_t *cp, int umflag)
1914 ccp = UMEM_CPU_CACHE(cp, CPU(cp->cache_cpu_mask));
1926 umem_cache_alloc_debug(cp, buf, umflag) == -1) {
1927 if (umem_alloc_retry(cp, umflag)) {
1954 fmp = umem_depot_alloc(cp, &cp->cache_full);
1957 umem_depot_free(cp, &cp->cache_empty,
1975 buf = umem_slab_alloc(cp, umflag);
1978 if (cp == &umem_null_cache)
1980 if (umem_alloc_retry(cp, umflag)) {
1987 if (cp->cache_flags & UMF_BUFTAG) {
1991 if (umem_cache_alloc_debug(cp, buf, umflag) == -1) {
1992 if (umem_alloc_retry(cp, umflag)) {
2005 if (cp->cache_constructor != NULL &&
2006 cp->cache_constructor(buf, cp->cache_private, flags_nfatal) != 0) {
2007 atomic_add_64(&cp->cache_alloc_fail, 1);
2008 umem_slab_free(cp, buf);
2010 if (umem_alloc_retry(cp, umflag)) {
2020 * Free a constructed object to cache cp.
2024 _umem_cache_free(umem_cache_t *cp, void *buf)
2026 umem_cpu_cache_t *ccp = UMEM_CPU_CACHE(cp, CPU(cp->cache_cpu_mask));
2031 if (umem_cache_free_debug(cp, buf) == -1)
2065 emp = umem_depot_alloc(cp, &cp->cache_empty);
2068 umem_depot_free(cp, &cp->cache_full,
2080 mtp = cp->cache_magtype;
2103 umem_depot_free(cp, &cp->cache_empty, emp);
2121 if (!(cp->cache_flags & UMF_BUFTAG) && cp->cache_destructor != NULL)
2122 cp->cache_destructor(buf, cp->cache_private);
2124 umem_slab_free(cp, buf);
2136 umem_cache_t *cp = umem_alloc_table[index];
2137 buf = _umem_cache_alloc(cp, umflag);
2139 if (cp->cache_flags & UMF_BUFTAG) {
2140 umem_buftag_t *btp = UMEM_BUFTAG(cp, buf);
2145 } else if (umem_alloc_retry(cp, umflag))
2163 umem_cache_t *cp = umem_alloc_table[index];
2164 buf = _umem_cache_alloc(cp, umflag);
2165 if ((cp->cache_flags & UMF_BUFTAG) && buf != NULL) {
2166 umem_buftag_t *btp = UMEM_BUFTAG(cp, buf);
2170 if (buf == NULL && umem_alloc_retry(cp, umflag))
2228 umem_cache_t *cp = umem_alloc_table[index];
2229 if (cp->cache_flags & UMF_BUFTAG) {
2230 umem_buftag_t *btp = UMEM_BUFTAG(cp, buf);
2234 umem_error(UMERR_DUPFREE, cp, buf);
2239 umem_error(UMERR_BADSIZE, cp, buf);
2241 umem_error(UMERR_REDZONE, cp, buf);
2246 umem_error(UMERR_REDZONE, cp, buf);
2251 _umem_cache_free(cp, buf);
2298 umem_cache_reap(umem_cache_t *cp)
2307 if (cp->cache_reclaim != NULL)
2308 cp->cache_reclaim(cp->cache_private);
2310 umem_depot_ws_reap(cp);
2319 umem_cache_magazine_purge(umem_cache_t *cp)
2325 ASSERT(cp->cache_next == NULL || IN_UPDATE());
2328 ccp = &cp->cache_cpu[cpu_seqid];
2343 umem_magazine_destroy(cp, mp, rounds);
2345 umem_magazine_destroy(cp, pmp, prounds);
2353 umem_depot_ws_update(cp);
2354 umem_depot_ws_update(cp);
2356 umem_depot_ws_reap(cp);
2363 umem_cache_magazine_enable(umem_cache_t *cp)
2367 if (cp->cache_flags & UMF_NOMAGAZINE)
2371 umem_cpu_cache_t *ccp = &cp->cache_cpu[cpu_seqid];
2373 ccp->cc_magsize = cp->cache_magtype->mt_magsize;
2392 umem_cache_magazine_resize(umem_cache_t *cp)
2394 umem_magtype_t *mtp = cp->cache_magtype;
2398 if (cp->cache_chunksize < mtp->mt_maxbuf) {
2399 umem_cache_magazine_purge(cp);
2400 (void) mutex_lock(&cp->cache_depot_lock);
2401 cp->cache_magtype = ++mtp;
2402 cp->cache_depot_contention_prev =
2403 cp->cache_depot_contention + INT_MAX;
2404 (void) mutex_unlock(&cp->cache_depot_lock);
2405 umem_cache_magazine_enable(cp);
2414 umem_hash_rescale(umem_cache_t *cp)
2422 1 << (highbit(3 * cp->cache_buftotal + 4) - 2));
2423 old_size = cp->cache_hash_mask + 1;
2434 (void) mutex_lock(&cp->cache_lock);
2436 old_size = cp->cache_hash_mask + 1;
2437 old_table = cp->cache_hash_table;
2439 cp->cache_hash_mask = new_size - 1;
2440 cp->cache_hash_table = new_table;
2441 cp->cache_rescale++;
2448 umem_bufctl_t **hash_bucket = UMEM_HASH(cp, addr);
2455 (void) mutex_unlock(&cp->cache_lock);
2465 umem_cache_update(umem_cache_t *cp)
2475 (void) mutex_lock(&cp->cache_lock);
2477 if ((cp->cache_flags & UMF_HASH) &&
2478 (cp->cache_buftotal > (cp->cache_hash_mask << 1) ||
2479 (cp->cache_buftotal < (cp->cache_hash_mask >> 1) &&
2480 cp->cache_hash_mask > UMEM_HASH_INITIAL)))
2483 (void) mutex_unlock(&cp->cache_lock);
2488 umem_depot_ws_update(cp);
2494 (void) mutex_lock(&cp->cache_depot_lock);
2496 if (cp->cache_chunksize < cp->cache_magtype->mt_maxbuf &&
2497 (int)(cp->cache_depot_contention -
2498 cp->cache_depot_contention_prev) > umem_depot_contention)
2501 cp->cache_depot_contention_prev = cp->cache_depot_contention;
2503 (void) mutex_unlock(&cp->cache_depot_lock);
2506 umem_add_update(cp, update_flags);
2521 umem_cache_t *cp = umem_null_cache.cache_unext;
2523 cp->cache_uprev->cache_unext = cp->cache_unext;
2524 cp->cache_unext->cache_uprev = cp->cache_uprev;
2525 cp->cache_uprev = cp->cache_unext = NULL;
2527 ASSERT(!(cp->cache_uflags & UMU_ACTIVE));
2529 while (cp->cache_uflags) {
2530 int uflags = (cp->cache_uflags |= UMU_ACTIVE);
2539 umem_hash_rescale(cp);
2542 umem_cache_magazine_resize(cp);
2545 umem_cache_reap(cp);
2552 if (cp->cache_uflags & UMU_NOTIFY) {
2556 cp->cache_uflags &= ~uflags;
2657 umem_cache_t *cp, *cnext, *cprev;
2711 * Get a umem_cache structure. We arrange that cp->cache_cpu[]
2715 cp = vmem_xalloc(umem_cache_arena, csize, UMEM_CPU_CACHE_SIZE, phase,
2718 if (cp == NULL) {
2723 bzero(cp, csize);
2729 cp->cache_flags = umem_flags | (cflags & UMF_DEBUG);
2735 if (cp->cache_flags & UMF_LITE) {
2739 cp->cache_flags |= UMF_BUFTAG;
2740 cp->cache_flags &= ~(UMF_AUDIT | UMF_FIREWALL);
2742 cp->cache_flags &= ~UMF_DEBUG;
2746 if ((cflags & UMC_QCACHE) && (cp->cache_flags & UMF_AUDIT))
2747 cp->cache_flags |= UMF_NOMAGAZINE;
2750 cp->cache_flags &= ~UMF_DEBUG;
2753 cp->cache_flags &= ~UMF_TOUCH;
2756 cp->cache_flags &= ~(UMF_AUDIT | UMF_FIREWALL);
2759 cp->cache_flags |= UMF_NOMAGAZINE;
2761 if ((cp->cache_flags & UMF_AUDIT) && !(cflags & UMC_NOTOUCH))
2762 cp->cache_flags |= UMF_REDZONE;
2764 if ((cp->cache_flags & UMF_BUFTAG) && bufsize >= umem_minfirewall &&
2765 !(cp->cache_flags & UMF_LITE) && !(cflags & UMC_NOHASH))
2766 cp->cache_flags |= UMF_FIREWALL;
2769 cp->cache_flags &= ~UMF_FIREWALL;
2771 if (cp->cache_flags & UMF_FIREWALL) {
2772 cp->cache_flags &= ~UMF_BUFTAG;
2773 cp->cache_flags |= UMF_NOMAGAZINE;
2781 (void) strncpy(cp->cache_name, name, sizeof (cp->cache_name) - 1);
2782 cp->cache_bufsize = bufsize;
2783 cp->cache_align = align;
2784 cp->cache_constructor = constructor;
2785 cp->cache_destructor = destructor;
2786 cp->cache_reclaim = reclaim;
2787 cp->cache_private = private;
2788 cp->cache_arena = vmp;
2789 cp->cache_cflags = cflags;
2790 cp->cache_cpu_mask = umem_cpu_mask;
2799 cp->cache_bufctl = chunksize - UMEM_ALIGN;
2802 if (cp->cache_flags & UMF_BUFTAG) {
2803 cp->cache_bufctl = chunksize;
2804 cp->cache_buftag = chunksize;
2808 if (cp->cache_flags & UMF_DEADBEEF) {
2809 cp->cache_verify = MIN(cp->cache_buftag, umem_maxverify);
2810 if (cp->cache_flags & UMF_LITE)
2811 cp->cache_verify = MIN(cp->cache_verify, UMEM_ALIGN);
2814 cp->cache_contents = MIN(cp->cache_bufctl, umem_content_maxsave);
2816 cp->cache_chunksize = chunksize = P2ROUNDUP(chunksize, align);
2827 cp->cache_slabsize = P2ROUNDUP(chunksize, vmp->vm_quantum);
2828 cp->cache_mincolor = cp->cache_slabsize - chunksize;
2829 cp->cache_maxcolor = cp->cache_mincolor;
2830 cp->cache_flags |= UMF_HASH;
2831 ASSERT(!(cp->cache_flags & UMF_BUFTAG));
2833 !(cp->cache_flags & UMF_AUDIT) &&
2835 cp->cache_slabsize = vmp->vm_quantum;
2836 cp->cache_mincolor = 0;
2837 cp->cache_maxcolor =
2838 (cp->cache_slabsize - sizeof (umem_slab_t)) % chunksize;
2840 if (chunksize + sizeof (umem_slab_t) > cp->cache_slabsize) {
2844 ASSERT(!(cp->cache_flags & UMF_AUDIT));
2868 cp->cache_slabsize = bestfit;
2869 cp->cache_mincolor = 0;
2870 cp->cache_maxcolor = bestfit % chunksize;
2871 cp->cache_flags |= UMF_HASH;
2874 if (cp->cache_flags & UMF_HASH) {
2876 cp->cache_bufctl_cache = (cp->cache_flags & UMF_AUDIT) ?
2880 if (cp->cache_maxcolor >= vmp->vm_quantum)
2881 cp->cache_maxcolor = vmp->vm_quantum - 1;
2883 cp->cache_color = cp->cache_mincolor;
2888 (void) mutex_init(&cp->cache_lock, USYNC_THREAD, NULL);
2890 cp->cache_freelist = &cp->cache_nullslab;
2891 cp->cache_nullslab.slab_cache = cp;
2892 cp->cache_nullslab.slab_refcnt = -1;
2893 cp->cache_nullslab.slab_next = &cp->cache_nullslab;
2894 cp->cache_nullslab.slab_prev = &cp->cache_nullslab;
2896 if (cp->cache_flags & UMF_HASH) {
2897 cp->cache_hash_table = vmem_alloc(umem_hash_arena,
2899 if (cp->cache_hash_table == NULL) {
2903 bzero(cp->cache_hash_table,
2905 cp->cache_hash_mask = UMEM_HASH_INITIAL - 1;
2906 cp->cache_hash_shift = highbit((ulong_t)chunksize) - 1;
2912 (void) mutex_init(&cp->cache_depot_lock, USYNC_THREAD, NULL);
2917 cp->cache_magtype = mtp;
2923 umem_cpu_cache_t *ccp = &cp->cache_cpu[cpu_seqid];
2925 ccp->cc_flags = cp->cache_flags;
2935 cp->cache_next = cnext = &umem_null_cache;
2936 cp->cache_prev = cprev = umem_null_cache.cache_prev;
2937 cnext->cache_prev = cp;
2938 cprev->cache_next = cp;
2942 umem_cache_magazine_enable(cp);
2944 return (cp);
2947 (void) mutex_destroy(&cp->cache_lock);
2949 vmem_xfree(umem_cache_arena, cp, csize);
2954 umem_cache_destroy(umem_cache_t *cp)
2964 cp->cache_prev->cache_next = cp->cache_next;
2965 cp->cache_next->cache_prev = cp->cache_prev;
2966 cp->cache_prev = cp->cache_next = NULL;
2969 umem_remove_updates(cp);
2971 umem_cache_magazine_purge(cp);
2973 (void) mutex_lock(&cp->cache_lock);
2974 if (cp->cache_buftotal != 0)
2976 cp->cache_name, (void *)cp);
2977 cp->cache_reclaim = NULL;
2983 cp->cache_constructor = (umem_constructor_t *)1;
2984 cp->cache_destructor = (umem_destructor_t *)2;
2985 (void) mutex_unlock(&cp->cache_lock);
2987 if (cp->cache_hash_table != NULL)
2988 vmem_free(umem_hash_arena, cp->cache_hash_table,
2989 (cp->cache_hash_mask + 1) * sizeof (void *));
2992 (void) mutex_destroy(&cp->cache_cpu[cpu_seqid].cc_lock);
2994 (void) mutex_destroy(&cp->cache_depot_lock);
2995 (void) mutex_destroy(&cp->cache_lock);
2997 vmem_free(umem_cache_arena, cp, UMEM_CACHE_SIZE(umem_max_ncpus));
3093 umem_cache_t *cp;
3096 cp = umem_alloc_table[(size - 1) >> UMEM_ALIGN_SHIFT];
3097 _umem_cache_free(cp, buf);
3105 umem_cache_t *cp;
3207 cp = umem_cache_create(name, cache_size, align,
3209 if (cp == NULL)
3212 umem_alloc_caches[i] = cp;
3236 cp = umem_alloc_caches[i];
3239 umem_alloc_table[(size - 1) >> UMEM_ALIGN_SHIFT] = cp;