Lines Matching refs:buf

576  *      object_move(void *buf, void *newbuf, size_t size, void *arg)
578 * object_t *op = buf, *np = newbuf;
1159 #define KMERR_REDZONE 1 /* redzone violation (write past end of buf) */
1184 uint64_t *buf = buf_arg;
1186 while (buf < bufend)
1187 *buf++ = pattern;
1194 uint64_t *buf;
1196 for (buf = buf_arg; buf < bufend; buf++)
1197 if (*buf != pattern)
1198 return (buf);
1206 uint64_t *buf;
1208 for (buf = buf_arg; buf < bufend; buf++) {
1209 if (*buf != old) {
1211 (char *)buf - (char *)buf_arg);
1212 return (buf);
1214 *buf = new;
1259 kmem_findslab(kmem_cache_t *cp, void *buf)
1266 if (KMEM_SLAB_MEMBER(sp, buf)) {
1273 if (KMEM_SLAB_MEMBER(sp, buf)) {
1291 void *buf = bufarg;
1297 sp = kmem_findslab(cp, buf);
1301 if ((sp = kmem_findslab(cp, buf)) != NULL)
1313 buf = (char *)bufarg - ((uintptr_t)bufarg -
1315 if (buf != bufarg)
1318 btp = KMEM_BUFTAG(cp, buf);
1321 for (bcp = *KMEM_HASH(cp, buf); bcp; bcp = bcp->bc_next)
1322 if (bcp->bc_addr == buf)
1329 bcp->bc_addr != buf) {
1338 kmem_panic_info.kmp_realbuf = buf;
1350 off = verify_pattern(KMEM_FREE_PATTERN, buf, cp->cache_verify);
1352 off = buf;
1355 (uintptr_t)off - (uintptr_t)buf,
1396 bufarg, buf);
1410 printf("previous transaction on buffer %p:\n", buf);
1533 char *buf, *slab;
1574 sp->slab_base = buf = slab + color;
1592 bcp->bc_addr = buf;
1595 bcp = KMEM_BUFCTL(cp, buf);
1598 kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
1603 copy_pattern(KMEM_FREE_PATTERN, buf,
1609 buf += chunksize;
1663 void *buf;
1687 buf = bcp->bc_addr;
1688 hash_bucket = KMEM_HASH(cp, buf);
1695 buf = KMEM_BUF(cp, bcp);
1698 ASSERT(KMEM_SLAB_MEMBER(sp, buf));
1713 return (buf);
1725 return (buf);
1730 return (buf);
1738 return (buf);
1748 void *buf;
1771 buf = kmem_slab_alloc_impl(cp, sp, B_TRUE);
1785 cp->cache_constructor(buf, cp->cache_private,
1787 cp->cache_destructor(buf, cp->cache_private);
1789 copy_pattern(KMEM_UNINITIALIZED_PATTERN, buf,
1792 copy_pattern(KMEM_FREE_PATTERN, buf, cp->cache_verify);
1796 return (buf);
1805 kmem_slab_free(kmem_cache_t *cp, void *buf)
1810 ASSERT(buf != NULL);
1819 prev_bcpp = KMEM_HASH(cp, buf);
1821 if (bcp->bc_addr == buf) {
1830 bcp = KMEM_BUFCTL(cp, buf);
1831 sp = KMEM_SLAB(cp, buf);
1834 if (bcp == NULL || sp->slab_cache != cp || !KMEM_SLAB_MEMBER(sp, buf)) {
1836 kmem_error(KMERR_BADADDR, cp, buf);
1840 if (KMEM_SLAB_OFFSET(sp, buf) == sp->slab_stuck_offset) {
1848 kmem_slab_move_yes(cp, sp, buf);
1854 kmem_log_enter(kmem_content_log, buf,
1947 kmem_cache_alloc_debug(kmem_cache_t *cp, void *buf, int kmflag, int construct,
1950 kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
1955 kmem_error(KMERR_BADBUFTAG, cp, buf);
1961 if ((cp->cache_flags & KMF_HASH) && bcp->bc_addr != buf) {
1962 kmem_error(KMERR_BADBUFCTL, cp, buf);
1968 if (*(uint64_t *)buf != KMEM_FREE_PATTERN) {
1969 kmem_error(KMERR_MODIFIED, cp, buf);
1973 *(uint64_t *)buf = btp->bt_redzone;
1975 *(uint64_t *)buf = KMEM_UNINITIALIZED_PATTERN;
1979 KMEM_UNINITIALIZED_PATTERN, buf,
1981 kmem_error(KMERR_MODIFIED, cp, buf);
1993 cp->cache_destructor(buf, cp->cache_private);
1999 cp->cache_constructor(buf, cp->cache_private, kmflag) != 0)) {
2003 copy_pattern(KMEM_FREE_PATTERN, buf, cp->cache_verify);
2004 kmem_slab_free(cp, buf);
2021 kmem_cache_free_debug(kmem_cache_t *cp, void *buf, caddr_t caller)
2023 kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
2029 kmem_error(KMERR_DUPFREE, cp, buf);
2032 sp = kmem_findslab(cp, buf);
2034 kmem_error(KMERR_BADADDR, cp, buf);
2036 kmem_error(KMERR_REDZONE, cp, buf);
2042 if ((cp->cache_flags & KMF_HASH) && bcp->bc_addr != buf) {
2043 kmem_error(KMERR_BADBUFCTL, cp, buf);
2048 kmem_error(KMERR_REDZONE, cp, buf);
2055 buf, cp->cache_contents);
2066 btp->bt_redzone = *(uint64_t *)buf;
2068 cp->cache_destructor(buf, cp->cache_private);
2070 copy_pattern(KMEM_FREE_PATTERN, buf, cp->cache_verify);
2088 void *buf = mp->mag_round[round];
2091 if (verify_pattern(KMEM_FREE_PATTERN, buf,
2093 kmem_error(KMERR_MODIFIED, cp, buf);
2098 kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
2099 *(uint64_t *)buf = btp->bt_redzone;
2100 cp->cache_destructor(buf, cp->cache_private);
2101 *(uint64_t *)buf = KMEM_FREE_PATTERN;
2104 cp->cache_destructor(buf, cp->cache_private);
2107 kmem_slab_free(cp, buf);
2257 /* append to each buf created in the pre-reserved heap */
2262 #define KMEM_DUMPCTL(cp, buf) \
2263 ((kmem_dumpctl_t *)P2ROUNDUP((uintptr_t)(buf) + (cp)->cache_bufsize, \
2379 kmem_dump_finish(char *buf, size_t size)
2389 char *e = buf + size;
2390 char *p = buf;
2428 return (p - buf);
2437 void *buf;
2442 if ((buf = cp->cache_dumpfreelist) != NULL) {
2443 cp->cache_dumpfreelist = KMEM_DUMPCTL(cp, buf)->kdc_next;
2445 return (buf);
2450 buf = (void *)P2ROUNDUP((uintptr_t)curr, cp->cache_align);
2451 bufend = (char *)KMEM_DUMPCTL(cp, buf) + sizeof (kmem_dumpctl_t);
2455 char *page = (char *)P2ROUNDUP((uintptr_t)buf, PAGESIZE);
2457 bufend += page - (char *)buf;
2458 buf = (void *)page;
2477 cp->cache_constructor(buf, cp->cache_private, kmflag)
2493 return (buf);
2500 kmem_cache_free_dump(kmem_cache_t *cp, void *buf)
2503 if ((char *)buf >= (char *)kmem_dump_start &&
2504 (char *)buf < (char *)kmem_dump_end) {
2505 KMEM_DUMPCTL(cp, buf)->kdc_next = cp->cache_dumpfreelist;
2506 cp->cache_dumpfreelist = buf;
2511 /* count all non-dump buf frees */
2530 void *buf;
2539 buf = ccp->cc_loaded->mag_round[--ccp->cc_rounds];
2549 kmem_cache_alloc_debug(cp, buf, kmflag, 0,
2557 return (buf);
2579 if ((buf = kmem_cache_alloc_dump(cp, kmflag)) !=
2582 return (buf);
2618 buf = kmem_slab_alloc(cp, kmflag);
2620 if (buf == NULL)
2627 int rc = kmem_cache_alloc_debug(cp, buf, kmflag, 1, caller());
2640 return (buf);
2644 cp->cache_constructor(buf, cp->cache_private, kmflag) != 0) {
2646 kmem_slab_free(cp, buf);
2650 return (buf);
2660 kmem_slab_free_constructed(kmem_cache_t *cp, void *buf, boolean_t freed)
2663 if (kmem_cache_free_debug(cp, buf, caller()) == -1)
2673 kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
2674 *(uint64_t *)buf = btp->bt_redzone;
2675 cp->cache_destructor(buf, cp->cache_private);
2676 *(uint64_t *)buf = KMEM_FREE_PATTERN;
2678 cp->cache_destructor(buf, cp->cache_private);
2682 kmem_slab_free(cp, buf);
2756 kmem_cache_free(kmem_cache_t *cp, void *buf)
2766 (buf != cp->cache_defrag->kmd_from_buf &&
2767 buf != cp->cache_defrag->kmd_to_buf));
2774 } else if (KMEM_DUMPCC(ccp) && !kmem_cache_free_dump(cp, buf)) {
2778 if (kmem_cache_free_debug(cp, buf, caller()) == -1)
2793 ccp->cc_loaded->mag_round[ccp->cc_rounds++] = buf;
2824 kmem_slab_free_constructed(cp, buf, B_TRUE);
2861 void *buf = KMEM_BUF(cp, head);
2869 buf;
2924 void *buf;
2928 buf = kmem_cache_alloc(cp, kmflag);
2929 if (buf != NULL) {
2931 kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
2932 ((uint8_t *)buf)[size] = KMEM_REDZONE_BYTE;
2940 bzero(buf, size);
2943 buf = kmem_alloc(size, kmflag);
2944 if (buf != NULL)
2945 bzero(buf, size);
2947 return (buf);
2955 void *buf;
2970 buf = vmem_alloc(kmem_oversize_arena, size,
2972 if (buf == NULL)
2981 return (buf);
2984 buf = kmem_cache_alloc(cp, kmflag);
2985 if ((cp->cache_flags & KMF_BUFTAG) && !KMEM_DUMP(cp) && buf != NULL) {
2986 kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
2987 ((uint8_t *)buf)[size] = KMEM_REDZONE_BYTE;
2994 return (buf);
2998 kmem_free(void *buf, size_t size)
3013 EQUIV(buf == NULL, size == 0);
3014 if (buf == NULL && size == 0)
3016 vmem_free(kmem_oversize_arena, buf, size);
3021 kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
3024 if (*(uint64_t *)buf == KMEM_FREE_PATTERN) {
3025 kmem_error(KMERR_DUPFREE, cp, buf);
3030 kmem_error(KMERR_BADSIZE, cp, buf);
3032 kmem_error(KMERR_REDZONE, cp, buf);
3036 if (((uint8_t *)buf)[size] != KMEM_REDZONE_BYTE) {
3037 kmem_error(KMERR_REDZONE, cp, buf);
3046 kmem_cache_free(cp, buf);
4046 kmem_move_cmp(const void *buf, const void *p)
4049 uintptr_t v1 = (uintptr_t)buf;
4608 kmem_slab_allocated(kmem_cache_t *cp, kmem_slab_t *sp, void *buf)
4613 ASSERT(sp == NULL || KMEM_SLAB_MEMBER(sp, buf));
4616 for (bcp = *KMEM_HASH(cp, buf);
4617 (bcp != NULL) && (bcp->bc_addr != buf);
4626 sp = KMEM_SLAB(cp, buf);
4628 bufbcp = KMEM_BUFCTL(cp, buf);
4681 kmem_hunt_mag(kmem_cache_t *cp, kmem_magazine_t *m, int n, void *buf,
4687 if (buf == m->mag_round[i]) {
4693 return (buf);
4706 kmem_hunt_mags(kmem_cache_t *cp, void *buf)
4725 if (tbuf == buf) {
4728 (void) kmem_cache_free_debug(cp, buf, caller());
4730 return (buf);
4737 if (kmem_hunt_mag(cp, m, n, buf, tbuf) != NULL) {
4739 return (buf);
4751 if (kmem_hunt_mag(cp, m, n, buf, tbuf) != NULL) {
4753 return (buf);
4757 if (kmem_hunt_mag(cp, m, n, buf, tbuf) != NULL) {
4759 return (buf);
5006 kmem_move_begin(kmem_cache_t *cp, kmem_slab_t *sp, void *buf, int flags)
5024 callback->kmm_from_buf = buf;
5036 pending = avl_find(&cp->cache_defrag->kmd_moves_pending, buf, &index);
5133 void *buf;
5182 for (j = 0, b = 0, buf = sp->slab_base;
5184 buf = (((char *)buf) + cp->cache_chunksize), j++) {
5186 if (kmem_slab_allocated(cp, sp, buf) == NULL) {
5212 success = kmem_move_begin(cp, sp, buf, flags);
5357 void *buf = args->kmna_buf;
5365 sp = kmem_slab_allocated(cp, NULL, buf);
5386 kmem_slab_move_yes(cp, sp, buf);
5391 (void) kmem_move_begin(cp, sp, buf, KMM_NOTIFY);
5418 kmem_slab_move_yes(cp, sp, buf);
5424 kmem_cache_move_notify(kmem_cache_t *cp, void *buf)
5432 args->kmna_buf = buf;