mem.c revision 96ea71632887c58a9d00f47eb318bf76b35903c3
/*
* Copyright (C) 2004-2006 Internet Systems Consortium, Inc. ("ISC")
* Copyright (C) 1997-2003 Internet Software Consortium.
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
* AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
* INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
* LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/* $Id: mem.c,v 1.129 2007/02/13 02:49:08 marka Exp $ */
/*! \file */
#include <config.h>
#include <stdio.h>
#include <stdlib.h>
#include <stddef.h>
#include <limits.h>
#include <isc/ondestroy.h>
#ifndef ISC_MEM_DEBUGGING
#define ISC_MEM_DEBUGGING 0
#endif
/*
* Constants.
*/
#define DEF_MAX_SIZE 1100
#define DEF_MEM_TARGET 4096
#define TABLE_INCREMENT 1024
#define DEBUGLIST_COUNT 1024
/*
* Types.
*/
typedef struct debuglink debuglink_t;
struct debuglink {
const void *ptr[DEBUGLIST_COUNT];
unsigned int size[DEBUGLIST_COUNT];
const char *file[DEBUGLIST_COUNT];
unsigned int line[DEBUGLIST_COUNT];
unsigned int count;
};
#else
#define FLARG_PASS
#define FLARG
#endif
struct element {
};
typedef struct {
/*!
* This structure must be ALIGNMENT_SIZE bytes.
*/
union {
char bytes[ALIGNMENT_SIZE];
} u;
} size_info;
struct stats {
unsigned long gets;
unsigned long totalgets;
unsigned long blocks;
unsigned long freefrags;
};
#endif
/* List of all active memory contexts. */
static isc_mutex_t lock;
struct isc_mem {
unsigned int magic;
unsigned int flags;
void * arg;
unsigned int references;
void * water_arg;
/* ISC_MEMFLAG_INTERNAL */
unsigned char ** basic_table;
unsigned int basic_table_count;
unsigned int basic_table_size;
unsigned char * lowest;
unsigned char * highest;
#endif
unsigned int memalloc_failures;
};
struct isc_mempool {
/* always unlocked */
unsigned int magic; /*%< magic number */
/*%< locked via the memory context's lock */
/*%< optionally locked from here down */
unsigned int maxalloc; /*%< max number of items allowed */
unsigned int allocated; /*%< # of items currently given out */
unsigned int freecount; /*%< # of items on reserved list */
unsigned int freemax; /*%< # of items allowed on free list */
unsigned int fillcount; /*%< # of items to fetch on each fill */
/*%< Stats only. */
unsigned int gets; /*%< # of requests to this pool */
/*%< Debugging only. */
#endif
};
/*
* Private Inline-able.
*/
#if ! ISC_MEM_TRACKLINES
#define ADD_TRACE(a, b, c, d, e)
#define DELETE_TRACE(a, b, c, d, e)
#else
#define ADD_TRACE(a, b, c, d, e) \
do { \
if ((isc_mem_debugging & (ISC_MEM_DEBUGTRACE | \
ISC_MEM_DEBUGRECORD)) != 0 && \
b != NULL) \
add_trace_entry(a, b, c, d, e); \
} while (0)
#define DELETE_TRACE(a, b, c, d, e) delete_trace_entry(a, b, c, d, e)
static void
/*!
* mctx must be locked.
*/
static inline void
{
unsigned int i;
if ((isc_mem_debugging & ISC_MEM_DEBUGTRACE) != 0)
"add %p size %u "
"file %s line %u mctx %p\n"),
return;
goto next;
for (i = 0; i < DEBUGLIST_COUNT; i++) {
return;
}
}
next:
}
for (i = 1; i < DEBUGLIST_COUNT; i++) {
}
}
static inline void
{
unsigned int i;
if ((isc_mem_debugging & ISC_MEM_DEBUGTRACE) != 0)
"del %p size %u "
"file %s line %u mctx %p\n"),
return;
for (i = 0; i < DEBUGLIST_COUNT; i++) {
}
return;
}
}
}
/*
* If we get here, we didn't find the item on the list. We're
* screwed.
*/
}
#endif /* ISC_MEM_TRACKLINES */
static inline size_t
/*
* round down to ALIGNMENT_SIZE
*/
}
static inline size_t
/*!
* Round up the result in order to get a size big
* enough to satisfy the request and be aligned on ALIGNMENT_SIZE
* byte boundaries.
*/
if (size == 0U)
return (ALIGNMENT_SIZE);
}
static inline isc_boolean_t
void *new;
unsigned char **table;
unsigned int table_size;
int i;
/* Require: we hold the context lock. */
/*
* Did we hit the quota for this context?
*/
return (ISC_FALSE);
table_size * sizeof(unsigned char *));
ctx->memalloc_failures++;
return (ISC_FALSE);
}
if (ctx->basic_table_size != 0) {
sizeof(unsigned char *));
}
}
ctx->memalloc_failures++;
return (ISC_FALSE);
}
ctx->basic_table_count++;
for (i = 0; i < (NUM_BASIC_BLOCKS - 1); i++) {
}
/*
* curr is now pointing at the last block in the
* array.
*/
return (ISC_TRUE);
}
static inline isc_boolean_t
int i, frags;
void *new;
/*!
* Try to get more fragments by chopping up a basic block.
*/
if (!more_basic_blocks(ctx)) {
/*
* We can't get more memory from the OS, or we've
* hit the quota for this context.
*/
/*
* XXXRTH "At quota" notification here.
*/
return (ISC_FALSE);
}
}
/*
* Set up a linked-list of blocks of size
* "new_size".
*/
total_size -= new_size;
for (i = 0; i < (frags - 1); i++) {
total_size -= new_size;
}
/*
* Add the remaining fragment of the basic block to a free list.
*/
if (total_size > 0U) {
}
/*
* curr is now pointing at the last block in the
* array.
*/
return (ISC_TRUE);
}
static inline void *
void *ret;
/*
* memget() was called on something beyond our upper limit.
*/
goto done;
}
ctx->memalloc_failures++;
goto done;
}
/*
* If we don't set new_size to size, then the
* ISC_MEM_FILL code might write over bytes we
* don't own.
*/
goto done;
}
/*
* If there are no blocks in the free list for this size, get a chunk
* of memory and then break it up into "new_size"-sized blocks, adding
* them to the free list.
*/
return (NULL);
/*
* The free list uses the "rounded-up" size "new_size".
*/
/*
* The stats[] uses the _actual_ "size" requested by the
* caller, with the caveat (in the code above) that "size" >= the
* max. size (max_size) ends up getting recorded as a call to
* max_size.
*/
done:
#if ISC_MEM_FILL
#endif
return (ret);
}
#if ISC_MEM_FILL && ISC_MEM_CHECKOVERRUN
static inline void
unsigned char *cp;
cp++;
size++;
}
}
#endif
static inline void
/*
* memput() called on something beyond our upper limit.
*/
#if ISC_MEM_FILL
#endif
return;
}
#if ISC_MEM_FILL
#endif
#endif
/*
* The free list uses the "rounded-up" size "new_size".
*/
/*
* The stats[] uses the _actual_ "size" requested by the
* caller, with the caveat (in the code above) that "size" >= the
* max. size (max_size) ends up getting recorded as a call to
* max_size.
*/
}
/*!
* Perform a malloc, doing memory filling and overrun detection as necessary.
*/
static inline void *
char *ret;
size += 1;
#endif
ctx->memalloc_failures++;
#if ISC_MEM_FILL
#else
# if ISC_MEM_CHECKOVERRUN
# endif
#endif
return (ret);
}
/*!
* Perform a free, doing memory filling and overrun detection as necessary.
*/
static inline void
#endif
#if ISC_MEM_FILL
#else
#endif
}
/*!
* Update internal counters after a memory get.
*/
static inline void
} else {
}
}
/*!
* Update internal counters after a memory put.
*/
static inline void
} else {
}
}
/*
* Private.
*/
static void *
if (size == 0U)
size = 1;
}
static void
}
static void
initialize_action(void) {
}
/*
* Public.
*/
{
}
{
return (ISC_R_NOMEMORY);
if ((flags & ISC_MEMFLAG_NOLOCK) == 0) {
if (result != ISC_R_SUCCESS) {
return (result);
}
}
if (init_max_size == 0U)
else
#endif
ctx->basic_table_count = 0;
ctx->basic_table_size = 0;
goto error;
}
if ((flags & ISC_MEMFLAG_INTERNAL) != 0) {
if (target_size == 0U)
else
sizeof(element *));
goto error;
}
}
if ((isc_mem_debugging & ISC_MEM_DEBUGRECORD) != 0) {
unsigned int i;
goto error;
}
}
#endif
ctx->memalloc_failures = 0;
return (ISC_R_SUCCESS);
#endif /* ISC_MEM_TRACKLINES */
}
return (result);
}
{
}
{
}
static void
unsigned int i;
}
} else {
}
}
}
#endif
#endif
}
}
for (i = 0; i < ctx->basic_table_count; i++)
}
}
void
source->references++;
}
void
ctx->references--;
if (ctx->references == 0)
if (want_destroy)
}
/*
* isc_mem_putanddetach() is the equivalent of:
*
* mctx = NULL;
* isc_mem_attach(ptr->mctx, &mctx);
* isc_mem_detach(&ptr->mctx);
* isc_mem_put(mctx, ptr, sizeof(*ptr);
* isc_mem_detach(&mctx);
*/
void
/*
* Must be before mem_putunlocked() as ctxp is usually within
* [ptr..ptr+size).
*/
if ((isc_mem_debugging & ISC_MEM_DEBUGSIZE) != 0) {
if ((isc_mem_debugging & ISC_MEM_DEBUGCTX) != 0)
}
ctx->references--;
if (ctx->references == 0)
if (want_destroy)
return;
}
} else {
}
ctx->references--;
if (ctx->references == 0)
if (want_destroy)
}
void
/*
* This routine provides legacy support for callers who use mctxs
*/
#endif
ctx->references--;
}
return (res);
}
void *
void *ptr;
} else {
}
}
(isc_mem_debugging & ISC_MEM_DEBUGUSAGE) != 0)
}
if (call_water)
return (ptr);
}
void
{
if ((isc_mem_debugging & ISC_MEM_DEBUGSIZE) != 0) {
if ((isc_mem_debugging & ISC_MEM_DEBUGCTX) != 0)
}
return;
}
} else {
}
/*
* The check against ctx->lo_water == 0 is for the condition
* when the context was pushed over hi_water but then had
* isc_mem_setwater() called with 0 for hi_water and lo_water.
*/
}
if (call_water)
}
static void
unsigned int i, j;
const char *format;
"Dump of all outstanding "
"memory allocations:\n"));
"\tptr %p size %u file %s line %u\n");
for (j = 0; j < DEBUGLIST_COUNT; j++)
}
}
if (!found)
ISC_MSG_NONE, "\tNone.\n"));
}
}
#endif
/*
* Print the stats[] on the stream "out" with suitable formatting.
*/
void
size_t i;
const struct stats *s;
const isc_mempool_t *pool;
continue;
}
/*
* Note that since a pool can be locked now, these stats might be
* somewhat off if the pool is in active use at the time the stats
* are dumped. The link fields are protected by the isc_mem_t's
* lock, however, so walking this list and extracting integers from
* stats fields is always safe.
*/
"[Pool statistics]\n"));
ISC_MSG_POOLNAME, "name"),
ISC_MSG_POOLSIZE, "size"),
ISC_MSG_POOLMAXALLOC, "maxalloc"),
ISC_MSG_POOLALLOCATED, "allocated"),
ISC_MSG_POOLFREECOUNT, "freecount"),
ISC_MSG_POOLFREEMAX, "freemax"),
ISC_MSG_POOLFILLCOUNT, "fillcount"),
ISC_MSG_POOLGETS, "gets"),
"L");
}
}
#endif
}
/*
* Replacements for malloc() and free() -- they implicitly remember the
* size of the object allocated (with some additional overhead).
*/
static void *
size += ALIGNMENT_SIZE;
if ((isc_mem_debugging & ISC_MEM_DEBUGCTX) != 0)
size += ALIGNMENT_SIZE;
else
return (NULL);
if ((isc_mem_debugging & ISC_MEM_DEBUGCTX) != 0) {
si++;
}
return (&si[1]);
}
void *
} else {
}
#endif
}
(isc_mem_debugging & ISC_MEM_DEBUGUSAGE) != 0)
}
if (call_water)
return (si);
}
void
if ((isc_mem_debugging & ISC_MEM_DEBUGCTX) != 0) {
} else {
}
} else {
}
/*
* The check against ctx->lo_water == 0 is for the condition
* when the context was pushed over hi_water but then had
* isc_mem_setwater() called with 0 for hi_water and lo_water.
*/
}
if (call_water)
}
/*
* Other useful things.
*/
char *
char *ns;
return (ns);
}
void
}
/*
* Quotas
*/
void
}
return (quota);
}
return (inuse);
}
void
{
void *oldwater_arg;
} else {
}
}
/*
* Memory pool stuff
*/
/*
* Allocate space for this pool, initialize values, and if all works
* well, attach to the memory context.
*/
return (ISC_R_NOMEMORY);
#endif
return (ISC_R_SUCCESS);
}
void
#else
#endif
}
void
"isc_mempool_destroy(): mempool %s "
"leaked memory",
#endif
/*
* Return any items on the free list
*/
} else {
}
}
/*
* Remove our linked list entry from the memory context.
*/
}
void
}
void *
unsigned int i;
/*
* Don't let the caller go over quota
*/
goto out;
}
/*
* if we have a free list item, return the first here
*/
goto out;
}
/*
* We need to dip into the well. Lock the memory context here and
* fill up our free list.
*/
} else {
}
break;
}
/*
* If we didn't get any items, return NULL.
*/
goto out;
out:
}
#endif /* ISC_MEM_TRACKLINES */
return (item);
}
void
#endif /* ISC_MEM_TRACKLINES */
/*
* If our free list is full, return this to the mctx directly.
*/
} else {
}
return;
}
/*
* Otherwise, attach it to our free list and bump the counter.
*/
}
/*
* Quotas
*/
void
}
unsigned int
unsigned int freemax;
return (freemax);
}
unsigned int
unsigned int freecount;
return (freecount);
}
void
}
unsigned int
unsigned int maxalloc;
return (maxalloc);
}
unsigned int
unsigned int allocated;
return (allocated);
}
void
}
unsigned int
unsigned int fillcount;
return (fillcount);
}
void
#if !ISC_MEM_TRACKLINES
#else
#endif
}
void
#if !ISC_MEM_TRACKLINES
#else
}
#endif
}
void
if (!ISC_LIST_EMPTY(contexts)) {
}
#endif
INSIST(1);
}
}
#ifdef HAVE_LIBXML2
void
{
size_t i;
const struct stats *s;
const isc_mempool_t *pool;
continue;
ISC_XMLCHAR "blocks");
s->blocks);
ISC_XMLCHAR "freefrags");
s->freefrags);
}
}
/*
* Note that since a pool can be locked now, these stats might be
* somewhat off if the pool is in active use at the time the stats
* are dumped. The link fields are protected by the isc_mem_t's
* lock, however, so walking this list and extracting integers from
* stats fields is always safe.
*/
}
}
#endif /* HAVE_LIBXML2 */