/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License, Version 1.0 only
* (the "License"). You may not use this file except in compliance
* with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2006 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#pragma ident "%Z%%M% %I% %E% SMI"
#include <mdb/mdb_param.h>
#include <mdb/mdb_modapi.h>
#include <sys/kmem_impl.h>
#include <sys/vmem_impl.h>
#include <sys/kobj_impl.h>
#include "kmem.h"
#include "leaky_impl.h"
/*
* This file defines the genunix target for leaky.c. There are three types
* of buffers in the kernel's heap: TYPE_VMEM, for kmem_oversize allocations,
* TYPE_KMEM, for kmem_cache_alloc() allocations bufctl_audit_ts, and
* TYPE_CACHE, for kmem_cache_alloc() allocation without bufctl_audit_ts.
*
* See "leaky_impl.h" for the target interface definition.
*/
/*ARGSUSED*/
static int
{
return (WALK_NEXT);
}
/*ARGSUSED*/
static int
{
return (WALK_NEXT);
}
static int
{
return (WALK_NEXT);
}
static int
{
return (0);
return (1);
}
static int
{
if (!leaky_vmem_interested(vmem))
return (WALK_NEXT);
return (WALK_NEXT);
}
/*ARGSUSED*/
static int
{
if (!leaky_vmem_interested(vmem))
return (WALK_NEXT);
return (WALK_NEXT);
}
static int
{
/*
* ignore HAT-related caches that happen to derive from kmem_default
*/
return (0);
mdb_warn("cannot read arena %p for cache '%s'",
return (0);
}
/*
* If this cache isn't allocating from the kmem_default,
* kmem_firewall, or static vmem arenas, we're not interested.
*/
return (0);
return (1);
}
static int
{
if (!leaky_interested(c))
return (WALK_NEXT);
return (WALK_NEXT);
}
/*ARGSUSED*/
static int
{
const char *walk;
if (!leaky_interested(c))
return (WALK_NEXT);
if (audit) {
walk = "bufctl";
} else {
walk = "kmem";
}
c->cache_name);
return (WALK_DONE);
}
if (!audit)
}
return (WALK_NEXT);
}
/*ARGSUSED*/
static int
{
/*
* free, constructed KMF_LITE buffers keep their first uint64_t in
* their buftag's redzone.
*/
if (c->cache_flags & KMF_LITE) {
/* LINTED alignment */
sizeof (btp->bt_redzone));
}
return (WALK_NEXT);
}
/*ARGSUSED*/
static int
{
if (!leaky_interested(c))
return (WALK_NEXT);
/*
* Scan all of the free, constructed buffers, since they may have
* pointers to allocated objects.
*/
if (mdb_pwalk("freemem_constructed",
mdb_warn("can't walk freemem_constructed for cache %p (%s)",
addr, c->cache_name);
return (WALK_DONE);
}
return (WALK_NEXT);
}
/*ARGSUSED*/
static int
{
return (WALK_NEXT);
return (WALK_NEXT);
}
return (WALK_NEXT);
}
static int
{
/*
* If this thread isn't in memory, we can't look at its stack. This
* may result in false positives, so we print a warning.
*/
if (!(t->t_schedflag & TS_LOAD)) {
mdb_printf("findleaks: thread %p's stack swapped out; "
"false positives possible\n", addr);
return (WALK_NEXT);
}
/*
* There is always gunk hanging out between t_stk and the page
* boundary. If this thread structure wasn't kmem allocated,
* this will include the thread structure itself. If the thread
* _is_ kmem allocated, we'll be able to get to it via allthreads.
*/
return (WALK_NEXT);
}
/*ARGSUSED*/
static int
{
return (WALK_NEXT);
}
static void
leaky_kludge(void)
{
int max_mem_nodes;
int idx;
/*
* Because of DR, the page counters (which live in the kmem64 segment)
* can point into kmem_alloc()ed memory. The "page_counters" array
* is multi-dimensional, and each entry points to an array of
* "hw_page_map_t"s which is "max_mem_nodes" in length.
*
* To keep this from having too much grotty knowledge of internals,
* we use CTF data to get the size of the structure. For simplicity,
* we treat the page_counters array as a flat array of pointers, and
* use its size to determine how much to scan. Unused entries will
* be NULL.
*/
mdb_warn("unable to lookup page_counters");
return;
}
mdb_warn("unable to read max_mem_nodes");
return;
}
mdb_warn("unable to lookup unix`hw_page_map_t");
return;
}
mdb_warn("unable to read page_counters");
return;
}
if (addr != 0)
}
}
int
{
int state;
mdb_warn("findleaks: can only be run on a system "
"dump or under kmdb; see dumpadm(1M)\n");
return (DCMD_ERR);
}
mdb_warn("can't read variable 'panicstr'");
return (DCMD_ERR);
}
mdb_warn("findleaks: cannot be run on a live dump.\n");
return (DCMD_ERR);
}
mdb_warn("couldn't walk 'kmem_cache'");
return (DCMD_ERR);
}
if (*estp == 0) {
mdb_warn("findleaks: no buffers found\n");
return (DCMD_ERR);
}
mdb_warn("couldn't walk 'vmem'");
return (DCMD_ERR);
}
return (DCMD_OK);
}
int
{
mdb_warn("couldn't walk 'vmem'");
return (DCMD_ERR);
}
mdb_warn("couldn't walk 'kmem_cache'");
return (DCMD_ERR);
}
mdb_warn("couldn't read 'kmem_lite_count'");
kmem_lite_count = 0;
} else if (kmem_lite_count > 16) {
mdb_warn("kmem_lite_count nonsensical, ignored\n");
kmem_lite_count = 0;
}
return (DCMD_OK);
}
int
leaky_subr_run(void)
{
leaky_kludge();
NULL) == -1) {
mdb_warn("couldn't walk 'kmem_cache'");
return (DCMD_ERR);
}
mdb_warn("couldn't walk 'modctl'");
return (DCMD_ERR);
}
/*
* If kmdb is loaded, we need to walk it's module list, since kmdb
* modctl structures can reference kmem allocations.
*/
mdb_warn("couldn't walk 'thread'");
return (DCMD_ERR);
}
mdb_warn("couldn't walk 'deathrow'");
return (DCMD_ERR);
}
mdb_warn("couldn't read 'kstat_arena'");
return (DCMD_ERR);
}
mdb_warn("couldn't walk kstat vmem arena");
return (DCMD_ERR);
}
return (DCMD_OK);
}
void
{
case LKM_CTL_VMSEG: {
mdb_warn("couldn't read leaked vmem_seg at addr %p",
addr);
return;
}
break;
}
case LKM_CTL_BUFCTL: {
mdb_warn("couldn't read leaked bufctl at addr %p",
addr);
return;
}
/*
* The top of the stack will be kmem_cache_alloc+offset.
* Since the offset in kmem_cache_alloc() isn't interesting
* we skip that frame for the purposes of uniquifying stacks.
*
* We also use the cache pointer as the leaks's cid, to
* prevent the coalescing of leaks from different caches.
*/
if (depth > 0)
depth--;
break;
}
case LKM_CTL_CACHE: {
int depth = 0;
/*
* For KMF_LITE caches, we can get the allocation PC
* out of the buftag structure.
*/
kmem_lite_count > 0 &&
/* LINTED alignment */
depth = 1;
}
break;
}
default:
mdb_warn("internal error: invalid leak_bufctl_t\n");
break;
}
}
static void
{
int i;
buf[0] = 0;
for (i = 0; i < depth; i++) {
if (mdb_lookup_by_addr(pc,
continue;
continue;
continue;
return;
}
/*
* We're only here if the entire call chain begins with "kmem_";
* this shouldn't happen, but we'll just use the last caller.
*/
}
int
{
int rval;
return (rval);
return (-1);
return (1);
return (-1);
return (1);
return (0);
}
/*
* Global state variables used by the leaky_subr_dump_* routines. Note that
* they are carefully cleared before use.
*/
static int lk_vmem_seen;
static int lk_cache_seen;
static int lk_kmem_seen;
void
{
switch (type) {
case TYPE_VMEM:
lk_vmem_seen = 0;
break;
case TYPE_CACHE:
lk_cache_seen = 0;
break;
case TYPE_KMEM:
lk_kmem_seen = 0;
break;
default:
break;
}
lk_ttl = 0;
lk_bytes = 0;
}
void
{
char c[MDB_SYM_NAMLEN];
if (verbose) {
lk_ttl = 0;
lk_bytes = 0;
}
case TYPE_VMEM:
if (!verbose && !lk_vmem_seen) {
lk_vmem_seen = 1;
mdb_printf("%-16s %7s %?s %s\n",
"BYTES", "LEAKED", "VMEM_SEG", "CALLER");
}
lk_ttl++;
}
else
if (!verbose) {
c, &caller);
if (caller != 0) {
(void) mdb_snprintf(c, sizeof (c),
"%a", caller);
} else {
(void) mdb_snprintf(c, sizeof (c),
"%s", "?");
}
} else {
mdb_arg_t v;
if (lk_ttl == 1)
mdb_printf("kmem_oversize leak: 1 vmem_seg, "
"%ld bytes\n", lk_bytes);
else
mdb_printf("kmem_oversize leak: %d vmem_segs, "
"%s bytes each, %ld bytes total\n",
v.a_type = MDB_TYPE_STRING;
mdb_warn("'%p::vmem_seg -v' failed",
}
}
return;
case TYPE_CACHE:
if (!verbose && !lk_cache_seen) {
lk_cache_seen = 1;
if (lk_vmem_seen)
mdb_printf("\n");
mdb_printf("%-?s %7s %?s %s\n",
"CACHE", "LEAKED", "BUFFER", "CALLER");
}
/*
* This _really_ shouldn't happen; we shouldn't
* have been able to get this far if this
* cache wasn't readable.
*/
mdb_warn("can't read cache %p for leaked "
return;
}
if (caller != 0) {
} else {
(void) mdb_snprintf(c, sizeof (c),
}
if (!verbose) {
} else {
if (lk_ttl == 1)
mdb_printf("%s leak: 1 buffer, %ld bytes,\n",
else
mdb_printf("%s leak: %d buffers, "
"%ld bytes each, %ld bytes total,\n",
mdb_printf(" sample addr %p%s%s\n",
}
return;
case TYPE_KMEM:
if (!verbose && !lk_kmem_seen) {
lk_kmem_seen = 1;
if (lk_vmem_seen || lk_cache_seen)
mdb_printf("\n");
mdb_printf("%-?s %7s %?s %s\n",
"CACHE", "LEAKED", "BUFCTL", "CALLER");
}
/*
* This _really_ shouldn't happen; we shouldn't
* have been able to get this far if this
* cache wasn't readable.
*/
mdb_warn("can't read cache %p for leaked "
return;
}
if (!verbose) {
c, &caller);
if (caller != 0) {
(void) mdb_snprintf(c, sizeof (c),
"%a", caller);
} else {
(void) mdb_snprintf(c, sizeof (c),
"%s", "?");
}
} else {
mdb_arg_t v;
if (lk_ttl == 1)
mdb_printf("%s leak: 1 buffer, %ld bytes\n",
else
mdb_printf("%s leak: %d buffers, "
"%ld bytes each, %ld bytes total\n",
v.a_type = MDB_TYPE_STRING;
mdb_warn("'%p::bufctl -v' failed",
}
}
return;
default:
return;
}
}
void
{
int i;
int width;
const char *leaks;
switch (type) {
case TYPE_VMEM:
if (!lk_vmem_seen)
return;
width = 16;
leaks = "kmem_oversize leak";
break;
case TYPE_CACHE:
if (!lk_cache_seen)
return;
leaks = "buffer";
break;
case TYPE_KMEM:
if (!lk_kmem_seen)
return;
leaks = "buffer";
break;
default:
return;
}
for (i = 0; i < 72; i++)
mdb_printf("-");
mdb_printf("\n%*s %7ld %s%s, %ld byte%s\n",
}
int
void *cbdata)
{
case TYPE_VMEM:
mdb_warn("unable to read vmem_seg at %p",
return (WALK_NEXT);
}
case TYPE_CACHE:
case TYPE_KMEM:
mdb_warn("unable to read bufctl at %p",
return (WALK_NEXT);
}
default:
return (WALK_NEXT);
}
}