umem.c revision d7dba7e519e96f726807ca55f6a17fef3f90092f
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/*
* Copyright 2011 Joyent, Inc. All rights reserved.
*/
#include "umem.h"
#include <sys/vmem_impl_user.h>
#include <umem_impl.h>
#include <alloca.h>
#include <limits.h>
#include <mdb/mdb_whatis.h>
#include "misc.h"
#include "leaky.h"
#include "dist.h"
#include "umem_pagesize.h"
#define UM_ALLOCATED 0x1
#define UM_FREE 0x2
#define UM_BUFCTL 0x4
#define UM_HASH 0x8
int umem_ready;
static int umem_stack_depth_warned;
static uint32_t umem_max_ncpus;
#define UMEM_READVAR(var) \
int
umem_update_variables(void)
{
/*
* Figure out which type of umem is being used; if it's not there
* yet, succeed quietly.
*/
if (umem_set_standalone() == -1) {
umem_ready = 0;
return (0); /* umem not there yet */
}
/*
* Solaris 9 used a different name for umem_max_ncpus. It's
* cheap backwards compatibility to check for both names.
*/
mdb_warn("unable to read umem_max_ncpus or max_ncpus");
return (-1);
}
if (UMEM_READVAR(umem_ready))
return (-1);
if (UMEM_READVAR(umem_stack_depth))
return (-1);
if (UMEM_READVAR(pagesize))
return (-1);
if (umem_stack_depth > UMEM_MAX_STACK_DEPTH) {
if (umem_stack_depth_warned == 0) {
mdb_warn("umem_stack_depth corrupted (%d > %d)\n",
}
umem_stack_depth = 0;
}
return (0);
}
/*ARGSUSED*/
static int
{
mdb_walker_t w;
char descr[64];
"walk the %s cache", c->cache_name);
w.walk_name = c->cache_name;
w.walk_descr = descr;
w.walk_init = umem_walk_init;
w.walk_step = umem_walk_step;
w.walk_fini = umem_walk_fini;
w.walk_init_arg = (void *)addr;
if (mdb_add_walker(&w) == -1)
return (WALK_NEXT);
}
/*ARGSUSED*/
static void
umem_statechange_cb(void *arg)
{
static int been_ready = 0;
#ifndef _KMDB
#endif
if (umem_update_variables() == -1)
return;
if (been_ready)
return;
if (umem_ready != UMEM_READY)
return;
been_ready = 1;
}
int
umem_abort_messages(void)
{
char *umem_error_buffer;
if (UMEM_READVAR(umem_error_begin))
return (DCMD_ERR);
mdb_warn("unable to look up umem_error_buffer");
return (DCMD_ERR);
}
!= bufsize) {
mdb_warn("unable to read umem_error_buffer");
return (DCMD_ERR);
}
/* put a zero after the end of the buffer to simplify printing */
umem_error_buffer[bufsize] = 0;
if ((umem_error_begin % bufsize) == 0)
else {
mdb_printf("%s%s\n",
}
return (DCMD_OK);
}
static void
{
return;
mdb_warn("\nunable to read umem_%s_log pointer %p",
return;
}
else if (size % 1024 == 0)
else
}
typedef struct umem_debug_flags {
const char *udf_name;
{ "audit", UMF_AUDIT },
{ "nosignal", UMF_CHECKSIGNAL },
{ "firewall", UMF_FIREWALL },
{ "lite", UMF_LITE },
{ NULL }
};
/*ARGSUSED*/
int
{
int umem_logging;
mdb_printf("Status:\t\t%s\n",
umem_ready == 0 ? "not loaded into address space" :
"unknown (umem_ready invalid)");
if (umem_ready == 0)
return (DCMD_OK);
if (UMEM_READVAR(umem_logging))
goto err;
goto err;
if (UMEM_READVAR(umem_content_log))
goto err;
if (UMEM_READVAR(umem_failure_log))
goto err;
if (UMEM_READVAR(umem_slab_log))
goto err;
mdb_printf("Logs:\t\t");
if (!umem_logging)
mdb_printf("(inactive)");
mdb_printf("\n");
mdb_printf("Message buffer:\n");
return (umem_abort_messages());
err:
mdb_printf("Message buffer:\n");
(void) umem_abort_messages();
return (DCMD_ERR);
}
typedef struct {
int
{
umem_cache_t c;
mdb_warn("couldn't find umem_null_cache");
return (WALK_ERR);
}
return (WALK_ERR);
}
return (WALK_NEXT);
}
int
{
umem_cache_t c;
int status;
return (WALK_DONE);
}
return (WALK_DONE);
return (status);
}
void
{
}
typedef struct {
int
{
mdb_warn("failed to read 'umem_cpus'");
return (WALK_ERR);
}
ucw->ucw_current = 0;
return (WALK_NEXT);
}
int
{
return (WALK_DONE);
return (WALK_ERR);
}
ucw->ucw_current++;
}
void
{
}
int
{
mdb_warn("umem_cpu_cache doesn't support global walks");
return (WALK_ERR);
}
mdb_warn("couldn't walk 'umem_cpu'");
return (WALK_ERR);
}
return (WALK_NEXT);
}
int
{
return (WALK_ERR);
}
}
int
{
umem_cache_t c;
mdb_warn("umem_slab doesn't support global walks\n");
return (WALK_ERR);
}
return (WALK_ERR);
}
return (WALK_NEXT);
}
int
{
umem_cache_t c;
mdb_warn("umem_slab_partial doesn't support global walks\n");
return (WALK_ERR);
}
return (WALK_ERR);
}
/*
* Some consumers (umem_walk_step(), in particular) require at
* least one callback if there are any buffers in the cache. So
* if there are *no* partial slabs, report the last full slab, if
* any.
*
* Yes, this is ugly, but it's cleaner than the other possibilities.
*/
return (WALK_NEXT);
}
int
{
umem_slab_t s;
return (WALK_DONE);
return (WALK_ERR);
}
mdb_warn("slab %p isn't in cache %p (in cache %p)\n",
return (WALK_ERR);
}
}
int
{
umem_cache_t c;
if (!(flags & DCMD_ADDRSPEC)) {
mdb_warn("can't walk umem_cache");
return (DCMD_ERR);
}
return (DCMD_OK);
}
if (DCMD_HDRSPEC(flags))
"FLAG", "CFLAG", "BUFSIZE", "BUFTOTL");
return (DCMD_ERR);
}
return (DCMD_OK);
}
static int
{
return (-1);
return (1);
return (0);
}
static int
{
return (-1);
return (1);
return (0);
}
typedef struct umem_hash_walk {
int
{
umem_cache_t c;
mdb_warn("umem_hash doesn't support global walks\n");
return (WALK_ERR);
}
return (WALK_ERR);
}
if (!(c.cache_flags & UMF_HASH)) {
return (WALK_DONE); /* nothing to do */
}
return (WALK_ERR);
}
return (WALK_NEXT);
}
int
{
break;
}
}
return (WALK_DONE);
return (WALK_ERR);
}
}
void
{
return;
}
/*
* Find the address of the bufctl structure for the address 'buf' in cache
* 'cp', which is at address caddr, and place it in *out.
*/
static int
{
mdb_warn("unable to read hash bucket for %p in cache %p",
return (-1);
}
return (-1);
}
return (0);
}
}
return (-1);
}
int
{
int res;
/*
* if cpu 0 has a non-zero magsize, it must be correct. caches
* with UMF_NOMAGAZINE have disabled their magazine layers, so
* it is okay to return 0 for them.
*/
return (res);
mdb_warn("unable to read 'umem_magtype'");
mdb_warn("cache '%s' has invalid magtype pointer (%p)\n",
return (0);
}
return (0);
}
return (mt.mt_magsize);
}
/*ARGSUSED*/
static int
{
return (WALK_NEXT);
}
/*
* Returns an upper bound on the number of allocated buffers in a given
* cache.
*/
{
int magsize;
(void) mdb_pwalk("umem_slab_partial",
} else {
mdb_warn("cache %p's magazine layer holds more buffers "
"than the slab layer.\n", addr);
}
}
return (cache_est);
}
#define READMAG_ROUNDS(rounds) { \
goto fail; \
} \
for (i = 0; i < rounds; i++) { \
mdb_warn("%d magazines exceeds fudge factor\n", \
magcnt); \
goto fail; \
} \
} \
}
int
{
int i, cpu;
/*
* Read the magtype out of the cache, after verifying the pointer's
* correctness.
*/
if (magsize == 0) {
*magcntp = 0;
*magmaxp = 0;
return (WALK_NEXT);
}
/*
* There are several places where we need to go buffer hunting:
* the per-CPU loaded magazine, the per-CPU spare full magazine,
* and the full magazine list in the depot.
*
* For an upper bound on the number of buffers in the magazine
* layer, we have the number of magazines on the cache_full
* list plus at most two magazines per CPU (the loaded and the
* spare). Toss in 100 magazines as a fudge factor in case this
* is live (the number "100" comes from the same fudge factor in
* crash(1M)).
*/
mdb_warn("magazine size for cache %p unreasonable (%x)\n",
return (WALK_ERR);
}
goto fail;
/*
* First up: the magazines in the depot (i.e. on the cache_full list).
*/
break; /* cache_full list loop detected */
}
dprintf(("cache_full list done\n"));
/*
* Now whip through the CPUs, snagging the loaded magazines
* and full spares.
*/
dprintf(("reading cpu cache %p\n",
}
if (ccp->cc_prounds > 0 &&
dprintf(("reading %d previously loaded rounds\n",
ccp->cc_prounds));
}
}
if (!(alloc_flags & UM_GC))
return (WALK_NEXT);
fail:
if (!(alloc_flags & UM_GC)) {
if (mp)
if (maglist)
}
return (WALK_ERR);
}
static int
{
}
static int
{
/*
* if UMF_AUDIT is not set, we know that we're looking at a
* umem_bufctl_t.
*/
(void) memset(b, 0, UMEM_BUFCTL_AUDIT_SIZE);
return (WALK_ERR);
}
}
}
typedef struct umem_walk {
int umw_type;
/*
* magazine layer
*/
void **umw_maglist;
/*
* slab layer
*/
char *umw_valid; /* to keep track of freed buffers */
char *umw_ubase; /* buffer for slab data */
} umem_walk_t;
static int
{
int csize;
const char *layered;
mdb_warn("umem walk doesn't support global walks\n");
return (WALK_ERR);
}
/*
* The number of "cpus" determines how large the cache is.
*/
goto out2;
}
/*
* It's easy for someone to hand us an invalid cache address.
* Unfortunately, it is hard for this walker to survive an
* invalid cache cleanly. So we make sure that:
*
* 1. the vmem arena for the cache is readable,
* 2. the vmem arena's quantum is a power of 2,
* 3. our slabsize is a multiple of the quantum, and
* 4. our chunksize is >0 and less than our slabsize.
*/
vm_quantum == 0 ||
cp->cache_chunksize == 0 ||
goto out2;
}
if (cp->cache_buftotal == 0) {
return (WALK_DONE);
}
/*
* If they ask for bufctls, but it's a small-slab cache,
* there is nothing to report.
*/
dprintf(("bufctl requested, not UMF_HASH (flags: %p)\n",
cp->cache_flags));
return (WALK_DONE);
}
/*
* Read in the contents of the magazine layer
*/
goto out2;
/*
* We have all of the buffers from the magazines; if we are walking
* allocated buffers, sort them so we can bsearch them later.
*/
if (type & UM_ALLOCATED)
/*
* When walking allocated buffers in a UMF_HASH cache, we walk the
* hash table instead of the slab layer.
*/
layered = "umem_hash";
} else {
/*
* If we are walking freed buffers, we only need the
* magazine layer plus the partially allocated slabs.
* To walk allocated buffers, we need all of the slabs.
*/
if (type & UM_ALLOCATED)
layered = "umem_slab";
else
layered = "umem_slab_partial";
/*
* for small-slab caches, we read in the entire slab. For
* freed buffers, we can just walk the freelist. For
* allocated buffers, we use a 'valid' array to track
* the freed buffers.
*/
sizeof (umem_bufctl_t), UM_SLEEP);
if (type & UM_ALLOCATED)
}
}
}
out1:
sizeof (umem_bufctl_t));
if (umw->umw_maglist)
sizeof (uintptr_t));
}
out2:
return (status);
}
int
{
const umem_slab_t *sp;
const umem_bufctl_t *bcp;
int chunks;
char *kbase;
void *buf;
int i, ret;
/*
* first, handle the 'umem_hash' layered walk case
*/
/*
* We have a buffer which has been allocated out of the
* global layer. We need to make sure that it's not
* actually sitting in a magazine before we report it as
* an allocated buffer.
*/
if (magcnt > 0 &&
return (WALK_NEXT);
}
/*
* If we're walking freed buffers, report everything in the
* magazine layer before processing the first slab.
*/
for (i = 0; i < magcnt; i++) {
/* LINTED - alignment */
mdb_warn("reading buftag for "
continue;
}
} else {
&out) == -1)
continue;
}
} else {
}
return (ret);
}
}
/*
* Handle the buffers in the current slab
*/
return (WALK_ERR);
}
/*
* Set up the valid map as fully allocated -- we'll punch
* out the freelist.
*/
if (type & UM_ALLOCATED)
} else {
}
/*
* walk the slab's freelist
*/
/*
* since we could be in the middle of allocating a buffer,
* our refcnt could be one higher than it aught. So we
* check one further on the freelist than the count allows.
*/
if (i == chunks)
break;
"slab %p in cache %p freelist too short by %d\n",
break;
}
mdb_warn("failed to read bufctl ptr at %p",
bcp);
break;
}
} else {
/*
* Otherwise the buffer is (or should be) in the slab
* that we've read in; determine its offset in the
* slab, validate that it's not corrupt, and add to
* our base address to find the umem_bufctl_t. (Note
* that we don't need to add the size of the bufctl
* to our offset calculation because of the slop that's
* allocated for the buffer at ubase.)
*/
mdb_warn("found corrupt bufctl ptr %p"
" in slab %p in cache %p\n", bcp,
break;
}
}
/*
* This is very wrong; we have managed to find
* a buffer in the slab which shouldn't
* actually be here. Emit a warning, and
* try to continue.
*/
mdb_warn("buf %p is out of range for "
} else if (type & UM_ALLOCATED) {
/*
* we have found a buffer on the slab's freelist;
* clear its entry
*/
} else {
/*
* Report this freed buffer
*/
} else {
}
return (ret);
}
}
dprintf(("slab %p in cache %p freelist too long (%p)\n",
}
/*
* If we are walking freed buffers, the loop above handled reporting
* them.
*/
return (WALK_NEXT);
mdb_warn("impossible situation: small-slab UM_BUFCTL walk for "
"cache %p\n", addr);
return (WALK_ERR);
}
/*
* Report allocated buffers, skipping buffers in the magazine layer.
* We only get this far for small-slab caches.
*/
if (!valid[i])
continue; /* on slab freelist */
if (magcnt > 0 &&
continue; /* in magazine layer */
}
return (ret);
}
void
{
return;
}
/*ARGSUSED*/
static int
{
/*
* Buffers allocated from NOTOUCH caches can also show up as freed
* memory in other caches. This can be a little confusing, so we
* don't walk NOTOUCH caches when walking all caches (thereby assuring
* that "::walk umem" and "::walk freemem" yield disjoint output).
*/
if (c->cache_cflags & UMC_NOTOUCH)
return (WALK_NEXT);
return (WALK_DONE);
return (WALK_NEXT);
}
return (WALK_ERR); \
return (WALK_DONE); \
}
int
{
}
int
{
}
int
{
}
int
{
}
typedef struct bufctl_history_walk {
void *bhw_next;
int
{
mdb_warn("bufctl_history walk doesn't support global walks\n");
return (WALK_ERR);
}
return (WALK_ERR);
}
bhw->bhw_timestamp = 0;
/*
* sometimes the first log entry matches the base bufctl; in that
* case, skip the base bufctl.
*/
else
return (WALK_NEXT);
}
int
{
return (WALK_DONE);
return (WALK_ERR);
}
/*
* The bufctl is only valid if the address, cache, and slab are
* correct. We also check that the timestamp is decreasing, to
* prevent infinite loops.
*/
return (WALK_DONE);
}
void
{
}
typedef struct umem_log_walk {
int
{
int maxndx, i, j, k;
/*
* By default (global walk), walk the umem_transaction_log. Otherwise
* read the log whose umem_log_header_t is stored at walk_addr.
*/
mdb_warn("failed to read 'umem_transaction_log'");
return (WALK_ERR);
}
mdb_warn("log is disabled\n");
return (WALK_ERR);
}
return (WALK_ERR);
}
return (WALK_ERR);
}
sizeof (umem_bufctl_audit_t *), UM_SLEEP);
for (i = 0, k = 0; i < lhp->lh_nchunks; i++) {
for (j = 0; j < maxndx; j++) {
/* LINTED align */
}
}
(int(*)(const void *, const void *))bufctlcmp);
ulw->ulw_maxndx = k;
return (WALK_NEXT);
}
int
{
return (WALK_DONE);
}
void
{
sizeof (umem_bufctl_audit_t *));
}
typedef struct allocdby_bufctl {
typedef struct allocdby_walk {
const char *abw_walk;
int
{
return (WALK_NEXT);
}
return (WALK_NEXT);
}
/*ARGSUSED*/
int
{
return (WALK_DONE);
}
return (WALK_NEXT);
}
static int
{
return (1);
return (-1);
return (0);
}
static int
{
mdb_warn("allocdby walk doesn't support global walks\n");
return (WALK_ERR);
}
if (mdb_walk("umem_cache",
mdb_warn("couldn't walk umem_cache");
return (WALK_ERR);
}
(int(*)(const void *, const void *))allocdby_cmp);
return (WALK_NEXT);
}
int
{
}
int
{
}
int
{
return (WALK_DONE);
return (WALK_DONE);
}
}
void
{
}
/*ARGSUSED*/
int
{
char c[MDB_SYM_NAMLEN];
int i;
continue;
if (is_umem_sym(c, "umem_"))
continue;
mdb_printf("%s+0x%lx",
break;
}
mdb_printf("\n");
return (WALK_NEXT);
}
static int
{
if (!(flags & DCMD_ADDRSPEC))
return (DCMD_USAGE);
return (DCMD_ERR);
}
return (DCMD_OK);
}
/*ARGSUSED*/
int
{
}
/*ARGSUSED*/
int
{
}
typedef struct whatis_info {
const umem_cache_t *wi_cache;
int wi_slab_found;
/* call one of our dcmd functions with "-v" and the provided address */
static void
{
mdb_arg_t a;
a.a_type = MDB_TYPE_STRING;
mdb_printf(":\n");
}
static void
{
if (baddr != 0 && !call_printer)
mdb_printf("%s from %s",
if (call_printer && baddr != 0) {
return;
}
mdb_printf("\n");
}
/*ARGSUSED*/
static int
{
return (WHATIS_WALKRET(w));
}
/*ARGSUSED*/
static int
{
return (WHATIS_WALKRET(w));
}
static int
{
/* We're not interested in anything but alloc and free segments */
return (WALK_NEXT);
/*
* If we're not printing it seperately, provide the vmem_seg
* pointer if it has a stack trace.
*/
if ((mdb_whatis_flags(w) & WHATIS_QUIET) &&
((mdb_whatis_flags(w) & WHATIS_BUFCTL) != 0 ||
}
mdb_printf("%s from %s vmem arena",
if (!mdb_whatis_flags(w) & WHATIS_QUIET)
else
mdb_printf("\n");
}
return (WHATIS_WALKRET(w));
}
static int
{
if (mdb_whatis_flags(w) & WHATIS_VERBOSE)
if (mdb_pwalk("vmem_seg",
return (WALK_NEXT);
}
return (WHATIS_WALKRET(w));
}
/*ARGSUSED*/
static int
{
/* It must overlap with the slab data, or it's not interesting */
if (mdb_whatis_overlaps(w,
wi->wi_slab_found++;
return (WALK_DONE);
}
return (WALK_NEXT);
}
static int
{
int do_bufctl;
/* Override the '-b' flag as necessary */
if (!(c->cache_flags & UMF_HASH))
else if (c->cache_flags & UMF_AUDIT)
else
if (do_bufctl) {
walk = "bufctl";
freewalk = "freectl";
} else {
walk = "umem";
freewalk = "freemem";
}
if (mdb_whatis_flags(w) & WHATIS_VERBOSE)
/*
* If more then two buffers live on each slab, figure out if we're
* interested in anything in any slab before doing the more expensive
*/
if (!(c->cache_flags & UMF_HASH))
wi->wi_slab_found = 0;
addr) == -1) {
mdb_warn("can't find umem_slab walker");
return (WALK_DONE);
}
if (wi->wi_slab_found == 0)
return (WALK_NEXT);
}
return (WALK_DONE);
}
if (mdb_whatis_done(w))
return (WALK_DONE);
/*
* We have searched for allocated memory; now search for freed memory.
*/
if (mdb_whatis_flags(w) & WHATIS_VERBOSE)
return (WALK_DONE);
}
return (WHATIS_WALKRET(w));
}
static int
{
(c->cache_cflags & UMC_NOTOUCH))
return (WALK_NEXT);
}
static int
{
return (WALK_NEXT);
}
static int
{
!(c->cache_cflags & UMC_NOTOUCH))
return (WALK_NEXT);
}
/*ARGSUSED*/
static int
{
/* umem's metadata is allocated from the umem_internal_arena */
mdb_warn("unable to readvar \"umem_internal_arena\"");
/*
* We process umem caches in the following order:
*
* non-UMC_NOTOUCH, non-metadata (typically the most interesting)
* metadata (can be huge with UMF_AUDIT)
* UMC_NOTOUCH, non-metadata (see umem_walk_all())
*/
&wi) == -1 ||
&wi) == -1 ||
&wi) == -1) {
mdb_warn("couldn't find umem_cache walker");
return (1);
}
return (0);
}
/*ARGSUSED*/
static int
{
if (mdb_walk("vmem_postfix",
mdb_warn("couldn't find vmem_postfix walker");
return (1);
}
return (0);
}
int
umem_init(void)
{
mdb_walker_t w = {
};
if (mdb_add_walker(&w) == -1) {
mdb_warn("failed to add umem_cache walker");
return (-1);
}
if (umem_update_variables() == -1)
return (-1);
/* install a callback so that our variables are always up-to-date */
/*
* Register our ::whatis callbacks.
*/
return (0);
}
typedef struct umem_log_cpu {
int
{
int i;
for (i = 0; i < umem_max_ncpus; i++) {
break;
}
if (i == umem_max_ncpus)
mdb_printf(" ");
else
mdb_printf("%3d", i);
b->bc_timestamp, b->bc_thread);
return (WALK_NEXT);
}
/*ARGSUSED*/
int
{
int i;
mdb_warn("failed to read 'umem_transaction_log'");
return (DCMD_ERR);
}
mdb_warn("no umem transaction log\n");
return (DCMD_ERR);
}
return (DCMD_ERR);
}
for (i = 0; i < umem_max_ncpus; i++) {
mdb_warn("cannot read cpu %d's log header at %p",
i, clhp);
return (DCMD_ERR);
}
clhp += sizeof (umem_cpu_log_header_t);
}
if (DCMD_HDRSPEC(flags)) {
"BUFADDR", "TIMESTAMP", "THREAD");
}
/*
* If we have been passed an address, we'll just print out that
* log entry.
*/
if (flags & DCMD_ADDRSPEC) {
return (DCMD_ERR);
}
return (DCMD_OK);
}
mdb_warn("can't find umem log walker");
return (DCMD_ERR);
}
return (DCMD_OK);
}
typedef struct bufctl_history_cb {
int bhc_flags;
int bhc_argc;
int bhc_ret;
/*ARGSUSED*/
static int
{
}
void
bufctl_help(void)
{
mdb_printf("%s\n",
"Display the contents of umem_bufctl_audit_ts, with optional filtering.\n");
mdb_dec_indent(2);
mdb_printf("%<b>OPTIONS%</b>\n");
mdb_inc_indent(2);
mdb_printf("%s",
" -v Display the full content of the bufctl, including its stack trace\n"
" -h retrieve the bufctl's transaction history, if available\n"
" -a addr\n"
" filter out bufctls not involving the buffer at addr\n"
" -c caller\n"
" -e earliest\n"
" filter out bufctls timestamped before earliest\n"
" -l latest\n"
" filter out bufctls timestamped after latest\n"
" -t thread\n"
" filter out bufctls not involving thread\n");
}
int
{
int i, depth;
char c[MDB_SYM_NAMLEN];
return (DCMD_USAGE);
if (!(flags & DCMD_ADDRSPEC))
return (DCMD_USAGE);
if (in_history && !history)
return (DCMD_USAGE);
if (history && !in_history) {
for (i = 0; i < argc; i++)
/*
* When in history mode, we treat each element as if it
* were in a seperate loop, so that the headers group
* bufctls with similar histories.
*/
addr) == -1) {
mdb_warn("unable to walk bufctl_history");
return (DCMD_ERR);
}
mdb_printf("\n");
}
if (verbose) {
mdb_printf("%16s %16s %16s %16s\n"
"%<u>%16s %16s %16s %16s%</u>\n",
"ADDR", "BUFADDR", "TIMESTAMP", "THREAD",
"", "CACHE", "LASTLOG", "CONTENTS");
} else {
mdb_printf("%<u>%-?s %-?s %-12s %5s %s%</u>\n",
"ADDR", "BUFADDR", "TIMESTAMP", "THRD", "CALLER");
}
}
return (DCMD_ERR);
}
/*
* Guard against bogus bc_depth in case the bufctl is corrupt or
* the address does not really refer to a bufctl.
*/
/*
* We were provided an exact symbol value; any
* address in the function is valid.
*/
}
for (i = 0; i < depth; i++)
break;
if (i == depth)
return (DCMD_OK);
}
return (DCMD_OK);
return (DCMD_OK);
return (DCMD_OK);
return (DCMD_OK);
if (flags & DCMD_PIPE_OUT) {
return (DCMD_OK);
}
if (verbose) {
"%<b>%16p%</b> %16p %16llx %16d\n"
"%16s %16p %16p %16p\n",
mdb_inc_indent(17);
for (i = 0; i < depth; i++)
mdb_dec_indent(17);
mdb_printf("\n");
} else {
for (i = 0; i < depth; i++) {
continue;
if (is_umem_sym(c, "umem_"))
continue;
break;
}
if (i >= depth)
mdb_printf("\n");
}
return (DCMD_OK);
}
/*ARGSUSED*/
int
{
mdb_arg_t a;
if (!(flags & DCMD_ADDRSPEC))
return (DCMD_USAGE);
if (argc != 0)
return (DCMD_USAGE);
a.a_type = MDB_TYPE_STRING;
}
typedef struct umem_verify {
int umv_corruption; /* > 0 if corruption found. */
int umv_besilent; /* report actual corruption sites */
/*
* verify_pattern()
* verify that buf is filled with the pattern pat.
*/
static int64_t
{
/*LINTED*/
return (-1);
}
/*
* verify_buftag()
* verify that btp->bt_bxstat == (bcp ^ pat)
*/
static int
{
}
/*
* verify_free()
* verify the integrity of a free block of memory by checking
* that it is filled with 0xdeadbeef and that its buftag is sane.
*/
/*ARGSUSED1*/
static int
{
/*LINTED*/
/*
* Read the buffer to check.
*/
if (!besilent)
return (WALK_NEXT);
}
UMEM_FREE_PATTERN)) >= 0) {
if (!besilent)
mdb_printf("buffer %p (free) seems corrupted, at %p\n",
goto corrupt;
}
if (!besilent)
mdb_printf("buffer %p (free) seems to "
"have a corrupt redzone pattern\n", addr);
goto corrupt;
}
/*
* confirm bufctl pointer integrity.
*/
if (!besilent)
mdb_printf("buffer %p (free) has a corrupt "
"buftag\n", addr);
goto corrupt;
}
return (WALK_NEXT);
umv->umv_corruption++;
return (WALK_NEXT);
}
/*
* verify_alloc()
* Verify that the buftag of an allocated buffer makes sense with respect
* to the buffer.
*/
/*ARGSUSED1*/
static int
{
/*LINTED*/
/*
* Read the buffer to check.
*/
if (!besilent)
return (WALK_NEXT);
}
/*
* There are two cases to handle:
* 1. If the buf was alloc'd using umem_cache_alloc, it will have
* 0xfeedfacefeedface at the end of it
* 2. If the buf was alloc'd using umem_alloc, it will have
* 0xbb just past the end of the region in use. At the buftag,
* it will have 0xfeedface (or, if the whole buffer is in use,
* 0xfeedface & bb000000 or 0xfeedfacf & 000000bb depending on
* endianness), followed by 32 bits containing the offset of the
* 0xbb byte in the buffer.
*
* Finally, the two 32-bit words that comprise the second half of the
* buftag should xor to UMEM_BUFTAG_ALLOC
*/
looks_ok = 1;
size_ok = 0;
looks_ok = 1;
else
size_ok = 0;
if (!size_ok) {
if (!besilent)
mdb_printf("buffer %p (allocated) has a corrupt "
"redzone size encoding\n", addr);
goto corrupt;
}
if (!looks_ok) {
if (!besilent)
mdb_printf("buffer %p (allocated) has a corrupt "
"redzone signature\n", addr);
goto corrupt;
}
if (!besilent)
mdb_printf("buffer %p (allocated) has a "
"corrupt buftag\n", addr);
goto corrupt;
}
return (WALK_NEXT);
umv->umv_corruption++;
return (WALK_NEXT);
}
/*ARGSUSED2*/
int
{
if (flags & DCMD_ADDRSPEC) {
int check_alloc = 0, check_free = 0;
addr) == -1) {
return (DCMD_ERR);
}
sizeof (umem_buftag_t);
umv.umv_corruption = 0;
check_alloc = 1;
check_free = 1;
} else {
mdb_warn("cache %p (%s) does not have "
"redzone checking enabled\n", addr,
}
return (DCMD_ERR);
}
/*
* table mode, don't print out every corrupt buffer
*/
} else {
mdb_printf("Summary for cache '%s'\n",
mdb_inc_indent(2);
umv.umv_besilent = 0;
}
if (check_alloc)
if (check_free)
if (umv.umv_corruption == 0) {
mdb_printf("%-*s %?p clean\n",
} else {
char *s = ""; /* optional s in "buffer[s]" */
s = "s";
mdb_printf("%-*s %?p %d corrupt buffer%s\n",
umv.umv_corruption, s);
}
} else {
/*
* This is the more verbose mode, when the user has
* type addr::umem_verify. If the cache was clean,
* nothing will have yet been printed. So say something.
*/
if (umv.umv_corruption == 0)
mdb_printf("clean\n");
mdb_dec_indent(2);
}
} else {
/*
* If the user didn't specify a cache to verify, we'll walk all
* umem_cache's, specifying ourself as a callback for each...
* this is the equivalent of '::walk umem_cache .::umem_verify'
*/
"Cache Name", "Addr", "Cache Integrity");
}
return (DCMD_OK);
}
typedef struct vmem_node {
struct vmem_node *vn_sibling;
struct vmem_node *vn_children;
int vn_marked;
} vmem_node_t;
typedef struct vmem_walk {
} vmem_walk_t;
int
{
mdb_warn("couldn't read 'vmem_list'");
return (WALK_ERR);
}
goto err;
}
}
continue;
}
continue;
break;
}
mdb_warn("couldn't find %p's parent (%p)\n",
goto err;
}
}
else
return (WALK_NEXT);
err:
}
return (WALK_ERR);
}
int
{
int rval;
return (WALK_DONE);
return (rval);
}
do {
return (rval);
}
/*
* The "vmem_postfix" walk walks the vmem arenas in post-fix order; all
* children are visited before their parent. We perform the postfix walk
* iteratively (rather than recursively) to allow mdb to regain control
* after each callback.
*/
int
{
int rval;
/*
* If this node is marked, then we know that we have already visited
* all of its children. If the node has any siblings, they need to
* be visited next; otherwise, we need to visit the parent. Note
* that vp->vn_marked will only be zero on the first invocation of
* the step function.
*/
else {
/*
* We have neither a parent, nor a sibling, and we
* have already been visited; we're done.
*/
return (WALK_DONE);
}
}
/*
* Before we visit this node, visit its children.
*/
return (rval);
}
void
{
int done;
return;
if (done) {
} else {
}
}
typedef struct vmem_seg_walk {
/*ARGSUSED*/
int
{
return (WALK_ERR);
}
return (WALK_NEXT);
}
/*
* vmem segments can't have type 0 (this should be added to vmem_impl.h).
*/
#define VMEM_NONE 0
int
{
}
int
{
}
int
{
}
int
{
}
int
{
int rval;
if (!seg_size) {
mdb_warn("failed to read 'vmem_seg_size'");
seg_size = sizeof (vmem_seg_t);
}
}
return (WALK_ERR);
}
} else {
}
return (WALK_DONE);
return (rval);
}
void
{
}
#define VMEM_NAMEWIDTH 22
int
{
int ident = 0;
char c[VMEM_NAMEWIDTH];
if (!(flags & DCMD_ADDRSPEC)) {
mdb_warn("can't walk vmem");
return (DCMD_ERR);
}
return (DCMD_OK);
}
if (DCMD_HDRSPEC(flags))
mdb_printf("%-?s %-*s %10s %12s %9s %5s\n",
"TOTAL", "SUCCEED", "FAIL");
return (DCMD_ERR);
}
ident = 0;
break;
}
}
mdb_printf("%0?p %-*s %10llu %12llu %9llu %5llu\n",
addr, VMEM_NAMEWIDTH, c,
return (DCMD_OK);
}
void
vmem_seg_help(void)
{
mdb_printf("%s\n",
"Display the contents of vmem_seg_ts, with optional filtering.\n"
"\n"
"A vmem_seg_t represents a range of addresses (or arbitrary numbers),\n"
"representing a single chunk of data. Only ALLOC segments have debugging\n"
"information.\n");
mdb_dec_indent(2);
mdb_printf("%<b>OPTIONS%</b>\n");
mdb_inc_indent(2);
mdb_printf("%s",
" -v Display the full content of the vmem_seg, including its stack trace\n"
" -s report the size of the segment, instead of the end address\n"
" -c caller\n"
" -e earliest\n"
" filter out segments timestamped before earliest\n"
" -l latest\n"
" filter out segments timestamped after latest\n"
" -m minsize\n"
" filer out segments smaller than minsize\n"
" -M maxsize\n"
" filer out segments larger than maxsize\n"
" -t thread\n"
" filter out segments not involving thread\n"
" -T type\n"
" filter out segments not of type 'type'\n"
}
/*ARGSUSED*/
int
{
uint8_t t;
char c[MDB_SYM_NAMLEN];
int no_debug;
int i;
int depth;
if (!(flags & DCMD_ADDRSPEC))
return (DCMD_USAGE);
return (DCMD_USAGE);
if (verbose) {
mdb_printf("%16s %4s %16s %16s %16s\n"
"%<u>%16s %4s %16s %16s %16s%</u>\n",
"ADDR", "TYPE", "START", "END", "SIZE",
"", "", "THREAD", "TIMESTAMP", "");
} else {
}
}
return (DCMD_ERR);
}
t = VMEM_ALLOC;
t = VMEM_FREE;
t = VMEM_SPAN;
t = VMEM_ROTOR;
t = VMEM_WALKER;
else {
mdb_warn("\"%s\" is not a recognized vmem_seg type\n",
type);
return (DCMD_ERR);
}
return (DCMD_OK);
}
return (DCMD_OK);
return (DCMD_OK);
/*
* debug info, when present, is only accurate for VMEM_ALLOC segments
*/
no_debug = (t != VMEM_ALLOC) ||
if (no_debug) {
latest != 0)
return (DCMD_OK); /* not enough info */
} else {
sizeof (c), &sym) != -1 &&
/*
* We were provided an exact symbol value; any
* address in the function is valid.
*/
}
for (i = 0; i < depth; i++)
break;
if (i == depth)
return (DCMD_OK);
}
return (DCMD_OK);
return (DCMD_OK);
return (DCMD_OK);
}
t == VMEM_FREE ? "FREE" :
t == VMEM_SPAN ? "SPAN" :
t == VMEM_ROTOR ? "ROTR" :
t == VMEM_WALKER ? "WLKR" :
"????");
if (flags & DCMD_PIPE_OUT) {
return (DCMD_OK);
}
if (verbose) {
mdb_printf("%<b>%16p%</b> %4s %16p %16p %16d\n",
if (no_debug)
return (DCMD_OK);
mdb_printf("%16s %4s %16d %16llx\n",
mdb_inc_indent(17);
for (i = 0; i < depth; i++) {
}
mdb_dec_indent(17);
mdb_printf("\n");
} else {
if (no_debug) {
mdb_printf("\n");
return (DCMD_OK);
}
for (i = 0; i < depth; i++) {
c, sizeof (c), &sym) == -1)
continue;
if (is_umem_sym(c, "vmem_"))
continue;
break;
}
}
return (DCMD_OK);
}
/*ARGSUSED*/
static int
{
int i, depth;
if (bcp->bc_timestamp == 0)
return (WALK_DONE);
if (*newest == 0)
mdb_printf("\nT-%lld.%09lld addr=%p %s\n",
for (i = 0; i < depth; i++)
return (WALK_NEXT);
}
int
{
const char *logname = "umem_transaction_log";
return (DCMD_USAGE);
if (argc > 0) {
return (DCMD_USAGE);
logname = "umem_failure_log";
logname = "umem_slab_log";
else
return (DCMD_USAGE);
}
mdb_warn("failed to read %s log header pointer");
return (DCMD_ERR);
}
mdb_warn("failed to walk umem log");
return (DCMD_ERR);
}
return (DCMD_OK);
}
/*
* As the final lure for die-hard crash(1M) users, we provide ::umausers here.
* The first piece is a structure which we use to accumulate umem_cache_t
* addresses of interest. The umc_add is used as a callback for the umem_cache
* walker; we either add all caches, or ones named explicitly as arguments.
*/
typedef struct umclist {
const char *umc_name; /* Name to match (or NULL) */
int umc_nelems; /* Num entries in umc_caches */
int umc_size; /* Size of umc_caches array */
} umclist_t;
static int
{
void *p;
int s;
/*
* If we have a match, grow our array (if necessary), and then
* add the virtual address of the matching cache to our list.
*/
umc->umc_caches = p;
}
}
return (WALK_NEXT);
}
/*
* The second piece of ::umausers is a hash table of allocations. Each
* allocation owner is identified by its stack trace and data_size. We then
* track the total bytes of all such allocations, and the number of allocations
* to report at the end. Once we have a list of caches, we walk through the
* allocated bufctls of each, and update our hash table accordingly.
*/
typedef struct umowner {
int umo_depth; /* Depth of stack trace */
} umowner_t;
typedef struct umusers {
int umu_nelems; /* Number of entries in use */
int umu_size; /* Total number of entries */
} umusers_t;
static void
{
/*
* If the hash table is full, double its size and rehash everything.
*/
}
}
}
/*
* Finish computing the hash signature from the stack trace, and then
* see if the owner is in the hash table. If so, update our stats.
*/
for (i = 0; i < depth; i++)
size_t difference = 0;
for (i = 0; i < depth; i++) {
}
if (difference == 0) {
return;
}
}
}
/*
* If the owner is not yet hashed, grab the next element and fill it
* in based on the allocation information.
*/
for (i = 0; i < depth; i++)
}
/*
* When ::umausers is invoked without the -f flag, we simply update our hash
* table with the information from each allocated bufctl.
*/
/*ARGSUSED*/
static int
{
return (WALK_NEXT);
}
/*
* When ::umausers is invoked with the -f flag, we print out the information
* for each bufctl as well as updating the hash table.
*/
static int
{
mdb_printf("size %d, addr %p, thread %p, cache %s\n",
for (i = 0; i < depth; i++)
return (WALK_NEXT);
}
/*
* We sort our results by allocation size before printing them.
*/
static int
{
}
/*
* The main engine of ::umausers is relatively straightforward: First we
* accumulate our list of umem_cache_t addresses into the umclist_t. Next we
* iterate over the allocated bufctls of each cache in the list. Finally,
* we sort and print our results.
*/
/*ARGSUSED*/
int
{
int audited_caches = 0; /* Number of UMF_AUDIT caches found */
int i, oelems;
if (flags & DCMD_ADDRSPEC)
return (DCMD_USAGE);
argv += i; /* skip past options we just processed */
argc -= i; /* adjust argc */
return (DCMD_USAGE);
return (DCMD_ERR);
}
do_all_caches = 0;
argv++;
argc--;
}
if (opt_e)
mem_threshold = cnt_threshold = 0;
if (opt_f)
if (do_all_caches) {
}
for (i = 0; i < umc.umc_nelems; i++) {
umem_cache_t c;
continue;
}
if (!(c.cache_flags & UMF_AUDIT)) {
if (!do_all_caches) {
mdb_warn("UMF_AUDIT is not enabled for %s\n",
c.cache_name);
}
continue;
}
}
if (audited_caches == 0 && do_all_caches) {
mdb_warn("UMF_AUDIT is not enabled for any caches\n");
return (DCMD_ERR);
}
continue;
mdb_printf("%lu bytes for %u allocations with data size %lu:\n",
}
return (DCMD_OK);
}
struct malloc_data {
};
#ifdef _LP64
#else
#endif
typedef struct umem_malloc_info {
static void
{
int minb = -1;
int maxb = -1;
int buckets;
int nbucks;
int i;
int b;
const int *distarray;
um_malloc = 0;
if (maxbuckets != 0)
if (minbucketsize > 1) {
if (buckets == 0) {
buckets = 1;
}
}
if (geometric)
else
for (i = 0; i < buckets; i++) {
}
mdb_printf("\n");
}
/*
* A malloc()ed buffer looks like:
*
* <----------- mi.malloc_size --->
* <----------- cp.cache_bufsize ------------------>
* <----------- cp.cache_chunksize -------------------------------->
* +-------+-----------------------+---------------+---------------+
* |/tag///| mallocsz |/round-off/////|/debug info////|
* +-------+---------------------------------------+---------------+
* <-- usable space ------>
*
* mallocsz is the argument to malloc(3C).
* mi.malloc_size is the actual size passed to umem_alloc(), which
* is rounded up to the smallest available cache size, which is
* cache_bufsize. If there is debugging or alignment overhead in
* the cache, that is reflected in a larger cache_chunksize.
*
* The tag at the beginning of the buffer is either 8-bytes or 16-bytes,
* depending upon the ISA's alignment requirements. For 32-bit allocations,
* it is always a 8-byte tag. For 64-bit allocations larger than 8 bytes,
* the tag has 8 bytes of padding before it.
*
* 32-byte, 64-byte buffers <= 8 bytes:
* +-------+-------+--------- ...
* |/size//|/stat//| mallocsz ...
* +-------+-------+--------- ...
* ^
* pointer returned from malloc(3C)
*
* 64-byte buffers > 8 bytes:
* +---------------+-------+-------+--------- ...
* |/padding///////|/size//|/stat//| mallocsz ...
* +---------------+-------+-------+--------- ...
* ^
* pointer returned from malloc(3C)
*
* The "size" field is "malloc_size", which is mallocsz + the padding.
* The "stat" field is derived from malloc_size, and functions as a
* validation that this buffer is actually from malloc(3C).
*/
/*ARGSUSED*/
static int
{
struct malloc_data md;
#ifdef _LP64
}
#endif
return (WALK_NEXT);
}
case MALLOC_MAGIC:
#ifdef _LP64
case MALLOC_SECOND_MAGIC:
#endif
/* include round-off and debug overhead */
break;
default:
break;
}
return (WALK_NEXT);
}
int
{
mdb_warn("unable to look up umem_alloc_sizes");
return (-1);
}
return (-1);
}
return (0);
}
static int
{
return (WALK_NEXT);
-1) {
return (WALK_ERR);
}
return (WALK_NEXT);
}
void
umem_malloc_dist_help(void)
{
mdb_printf("%s\n",
"report distribution of outstanding malloc()s");
mdb_dec_indent(2);
mdb_printf("%<b>OPTIONS%</b>\n");
mdb_inc_indent(2);
mdb_printf("%s",
" -b maxbins\n"
" Use at most maxbins bins for the data\n"
" -B minbinsize\n"
" Make the bins at least minbinsize bytes apart\n"
" -d dump the raw data out, without binning\n"
" -g use geometric binning instead of linear binning\n");
}
/*ARGSUSED*/
int
{
size_t maxbuckets = 0;
size_t minbucketsize = 0;
if (flags & DCMD_ADDRSPEC)
return (DCMD_USAGE);
0) != argc)
return (DCMD_USAGE);
&mi) == -1) {
mdb_warn("unable to walk 'umem_cache'");
return (DCMD_ERR);
}
if (dump) {
int i;
return (DCMD_OK);
}
return (DCMD_OK);
}
void
umem_malloc_info_help(void)
{
mdb_printf("%s\n",
"report information about malloc()s by cache. ");
mdb_dec_indent(2);
mdb_printf("%<b>OPTIONS%</b>\n");
mdb_inc_indent(2);
mdb_printf("%s",
" -b maxbins\n"
" Use at most maxbins bins for the data\n"
" -B minbinsize\n"
" Make the bins at least minbinsize bytes apart\n"
" -d dump the raw distribution data without binning\n"
#ifndef _KMDB
" -g use geometric binning instead of linear binning\n"
#endif
"");
}
int
{
umem_cache_t c;
int skip = 0;
size_t maxbuckets = 0;
size_t minbucketsize = 0;
int *alloc_sizes;
int idx;
0) != argc)
return (DCMD_USAGE);
verbose = 1;
if (!(flags & DCMD_ADDRSPEC)) {
mdb_warn("can't walk umem_cache");
return (DCMD_ERR);
}
return (DCMD_OK);
}
return (DCMD_ERR);
}
mdb_warn("umem_malloc_info: cache \"%s\" is not used "
"by malloc()\n", c.cache_name);
skip = 1;
}
/*
* normally, print the header only the first time. In verbose mode,
* print the header on every non-skipped buffer
*/
mdb_printf("%<ul>%-?s %6s %6s %8s %8s %10s %10s %6s%</ul>\n",
"CACHE", "BUFSZ", "MAXMAL",
"BUFMALLC", "AVG_MAL", "MALLOCED", "OVERHEAD", "%OVER");
if (skip)
return (DCMD_OK);
#ifdef _LP64
if (c.cache_bufsize > UMEM_SECOND_ALIGN)
maxmalloc -= sizeof (struct malloc_data);
#endif
if (verbose)
-1) {
mdb_warn("can't walk 'umem'");
return (DCMD_ERR);
}
/* do integer round off for the average */
else
avg_malloc = 0;
/*
* include per-slab overhead
*
* Each slab in a given cache is the same size, and has the same
* number of chunks in it; we read in the first slab on the
* slab list to get the number of chunks for all slabs. To
* compute the per-slab overhead, we just subtract the chunk usage
* from the slabsize:
*
* +------------+-------+-------+ ... --+-------+-------+-------+
* |////////////| | | ... | |///////|///////|
* |////color///| chunk | chunk | ... | chunk |/color/|/slab//|
* |////////////| | | ... | |///////|///////|
* +------------+-------+-------+ ... --+-------+-------+-------+
* | \_______chunksize * chunks_____/ |
* \__________________________slabsize__________________________/
*
* For UMF_HASH caches, there is an additional source of overhead;
* the external umem_slab_t and per-chunk bufctl structures. We
* include those in our per-slab overhead.
*
* Once we have a number for the per-slab overhead, we estimate
* the actual overhead by treating the malloc()ed buffers as if
* they were densely packed:
*
* additional overhead = (# mallocs) * (per-slab) / (chunks);
*
* carefully ordering the multiply before the divide, to avoid
* round-off error.
*/
} else {
if (chunks != 0 && c.cache_chunksize != 0 &&
c.cache_slabsize -
(c.cache_chunksize * chunks);
if (c.cache_flags & UMF_HASH) {
perslab += sizeof (umem_slab_t) +
chunks *
((c.cache_flags & UMF_AUDIT) ?
sizeof (umem_bufctl_audit_t) :
sizeof (umem_bufctl_t));
}
overhead +=
} else {
mdb_warn("invalid #chunks (%d) in slab %p\n",
}
}
}
if (allocated != 0)
else
overhead_pct = 0;
mdb_printf("%0?p %6ld %6ld %8ld %8ld %10ld %10ld %3ld.%01ld%%\n",
if (!verbose)
return (DCMD_OK);
if (!dump)
mdb_printf("\n");
return (DCMD_ERR);
break;
if (alloc_sizes[idx] == 0) {
break;
}
}
"cache %p's size (%d) not in umem_alloc_sizes\n",
addr, c.cache_bufsize);
return (DCMD_ERR);
}
if (minmalloc > 0) {
#ifdef _LP64
if (minmalloc > UMEM_SECOND_ALIGN)
minmalloc -= sizeof (struct malloc_data);
#endif
minmalloc -= sizeof (struct malloc_data);
minmalloc += 1;
}
if (dump) {
mdb_printf("\n");
} else {
}
return (DCMD_OK);
}