umem.c revision 38849194df07385a46363bb46861688fde59a98a
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2008 Sun Microsystems, Inc. All rights reserved.
* Copyright 2012 Joyent, Inc. All rights reserved.
* Use is subject to license terms.
*/
/*
*
* The slab allocator, as described in the following two papers:
*
* Jeff Bonwick,
* The Slab Allocator: An Object-Caching Kernel Memory Allocator.
* Proceedings of the Summer 1994 Usenix Conference.
*
* Jeff Bonwick and Jonathan Adams,
* Magazines and vmem: Extending the Slab Allocator to Many CPUs and
* Arbitrary Resources.
* Proceedings of the 2001 Usenix Conference.
*
* 1. Overview
* -----------
* umem is very close to kmem in implementation. There are four major
* areas of divergence:
*
* * Initialization
*
* * CPU handling
*
* * umem_update()
*
* * KM_SLEEP v.s. UMEM_NOFAIL
*
* * lock ordering
*
* 2. Initialization
* -----------------
* kmem is initialized early on in boot, and knows that no one will call
* into it before it is ready. umem does not have these luxuries. Instead,
* initialization is divided into two phases:
*
* * library initialization, and
*
* * first use
*
* umem's full initialization happens at the time of the first allocation
* request (via malloc() and friends, umem_alloc(), or umem_zalloc()),
* or the first call to umem_cache_create().
*
* umem_free(), and umem_cache_alloc() do not require special handling,
* since the only way to get valid arguments for them is to successfully
* call a function from the first group.
*
* 2.1. Library Initialization: umem_startup()
* -------------------------------------------
* umem_startup() is libumem.so's .init section. It calls pthread_atfork()
* to install the handlers necessary for umem's Fork1-Safety. Because of
* race condition issues, all other pre-umem_init() initialization is done
* statically (i.e. by the dynamic linker).
*
* For standalone use, umem_startup() returns everything to its initial
* state.
*
* 2.2. First use: umem_init()
* ------------------------------
* The first time any memory allocation function is used, we have to
* create the backing caches and vmem arenas which are needed for it.
* umem_init() is the central point for that task. When it completes,
* umem_ready is either UMEM_READY (all set) or UMEM_READY_INIT_FAILED (unable
* to initialize, probably due to lack of memory).
*
* There are four different paths from which umem_init() is called:
*
* * from umem_alloc() or umem_zalloc(), with 0 < size < UMEM_MAXBUF,
*
* * from umem_alloc() or umem_zalloc(), with size > UMEM_MAXBUF,
*
* * from umem_cache_create(), and
*
* * from memalign(), with align > UMEM_ALIGN.
*
* The last three just check if umem is initialized, and call umem_init()
* if it is not. For performance reasons, the first case is more complicated.
*
* 2.2.1. umem_alloc()/umem_zalloc(), with 0 < size < UMEM_MAXBUF
* -----------------------------------------------------------------
* In this case, umem_cache_alloc(&umem_null_cache, ...) is called.
* There is special case code in which causes any allocation on
* &umem_null_cache to fail by returning (NULL), regardless of the
* flags argument.
*
* So umem_cache_alloc() returns NULL, and umem_alloc()/umem_zalloc() call
* umem_alloc_retry(). umem_alloc_retry() sees that the allocation
* was agains &umem_null_cache, and calls umem_init().
*
* If initialization is successful, umem_alloc_retry() returns 1, which
* causes umem_alloc()/umem_zalloc() to start over, which causes it to load
* the (now valid) cache pointer from umem_alloc_table.
*
* 2.2.2. Dealing with race conditions
* -----------------------------------
* There are a couple race conditions resulting from the initialization
* code that we have to guard against:
*
* * In umem_cache_create(), there is a special UMC_INTERNAL cflag
* that is passed for caches created during initialization. It
* is illegal for a user to try to create a UMC_INTERNAL cache.
* This allows initialization to proceed, but any other
* umem_cache_create()s will block by calling umem_init().
*
* * Since umem_null_cache has a 1-element cache_cpu, it's cache_cpu_mask
* is always zero. umem_cache_alloc uses cp->cache_cpu_mask to
* mask the cpu number. This prevents a race between grabbing a
* cache pointer out of umem_alloc_table and growing the cpu array.
*
*
* 3. CPU handling
* ---------------
* kmem uses the CPU's sequence number to determine which "cpu cache" to
* use for an allocation. Currently, there is no way to get the sequence
* number in userspace.
*
* umem keeps track of cpu information in umem_cpus, an array of umem_max_ncpus
* umem_cpu_t structures. CURCPU() is a a "hint" function, which we then mask
* with either umem_cpu_mask or cp->cache_cpu_mask to find the actual "cpu" id.
* The mechanics of this is all in the CPU(mask) macro.
*
* Currently, umem uses _lwp_self() as its hint.
*
*
* 4. The update thread
* --------------------
* kmem uses a task queue, kmem_taskq, to do periodic maintenance on
* every kmem cache. vmem has a periodic timeout for hash table resizing.
* The kmem_taskq also provides a separate context for kmem_cache_reap()'s
* to be done in, avoiding issues of the context of kmem_reap() callers.
*
* Instead, umem has the concept of "updates", which are asynchronous requests
* for work attached to single caches. All caches with pending work are
* on a doubly linked list rooted at the umem_null_cache. All update state
* is protected by the umem_update_lock mutex, and the umem_update_cv is used
* for notification between threads.
*
* 4.1. Cache states with regards to updates
* -----------------------------------------
* A given cache is in one of three states:
*
* Inactive cache_uflags is zero, cache_u{next,prev} are NULL
*
* Work Requested cache_uflags is non-zero (but UMU_ACTIVE is not set),
* cache_u{next,prev} link the cache onto the global
* update list
*
* Active cache_uflags has UMU_ACTIVE set, cache_u{next,prev}
* are NULL, and either umem_update_thr or
* umem_st_update_thr are actively doing work on the
* cache.
*
* An update can be added to any cache in any state -- if the cache is
* Inactive, it transitions to being Work Requested. If the cache is
* Active, the worker will notice the new update and act on it before
* transitioning the cache to the Inactive state.
*
* If a cache is in the Active state, UMU_NOTIFY can be set, which asks
* the worker to broadcast the umem_update_cv when it has finished.
*
* 4.2. Update interface
* ---------------------
* umem_add_update() adds an update to a particular cache.
* umem_updateall() adds an update to all caches.
* umem_remove_updates() returns a cache to the Inactive state.
*
* umem_process_updates() process all caches in the Work Requested state.
*
* 4.3. Reaping
* ------------
* When umem_reap() is called (at the time of heap growth), it schedule
* UMU_REAP updates on every cache. It then checks to see if the update
* thread exists (umem_update_thr != 0). If it is, it broadcasts
* the umem_update_cv to wake the update thread up, and returns.
*
* If the update thread does not exist (umem_update_thr == 0), and the
* program currently has multiple threads, umem_reap() attempts to create
* a new update thread.
*
* If the process is not multithreaded, or the creation fails, umem_reap()
* calls umem_st_update() to do an inline update.
*
* 4.4. The update thread
* ----------------------
* The update thread spends most of its time in cond_timedwait() on the
* umem_update_cv. It wakes up under two conditions:
*
* * The timedwait times out, in which case it needs to run a global
* update, or
*
* * someone cond_broadcast(3THR)s the umem_update_cv, in which case
* it needs to check if there are any caches in the Work Requested
* state.
*
* When it is time for another global update, umem calls umem_cache_update()
* on every cache, then calls vmem_update(), which tunes the vmem structures.
* umem_cache_update() can request further work using umem_add_update().
*
* After any work from the global update completes, the update timer is
* reset to umem_reap_interval seconds in the future. This makes the
* updates self-throttling.
*
* Reaps are similarly self-throttling. After a UMU_REAP update has
* been scheduled on all caches, umem_reap() sets a flag and wakes up the
* update thread. The update thread notices the flag, and resets the
* reap state.
*
* 4.5. Inline updates
* -------------------
* If the update thread is not running, umem_st_update() is used instead. It
* immediately does a global update (as above), then calls
* umem_process_updates() to process both the reaps that umem_reap() added and
* any work generated by the global update. Afterwards, it resets the reap
* state.
*
* While the umem_st_update() is running, umem_st_update_thr holds the thread
* id of the thread performing the update.
*
* 4.6. Updates and fork1()
* ------------------------
* umem has fork1() pre- and post-handlers which lock up (and release) every
* mutex in every cache. They also lock up the umem_update_lock. Since
* fork1() only copies over a single lwp, other threads (including the update
* thread) could have been actively using a cache in the parent. This
* can lead to inconsistencies in the child process.
*
* Because we locked all of the mutexes, the only possible inconsistancies are:
*
* * a umem_cache_alloc() could leak its buffer.
*
* * a caller of umem_depot_alloc() could leak a magazine, and all the
* buffers contained in it.
*
* * a cache could be in the Active update state. In the child, there
* would be no thread actually working on it.
*
* * a umem_hash_rescale() could leak the new hash table.
*
* * a umem_magazine_resize() could be in progress.
*
* * a umem_reap() could be in progress.
*
* The memory leaks we can't do anything about. umem_release_child() resets
* the update state, moves any caches in the Active state to the Work Requested
* state. This might cause some updates to be re-run, but UMU_REAP and
* UMU_HASH_RESCALE are effectively idempotent, and the worst that can
* happen from umem_magazine_resize() is resizing the magazine twice in close
* succession.
*
* Much of the cleanup in umem_release_child() is skipped if
* umem_st_update_thr == thr_self(). This is so that applications which call
* fork1() from a cache callback does not break. Needless to say, any such
* application is tremendously broken.
*
*
* 5. KM_SLEEP v.s. UMEM_NOFAIL
* ----------------------------
* Allocations against kmem and vmem have two basic modes: SLEEP and
* NOSLEEP. A sleeping allocation is will go to sleep (waiting for
* more memory) instead of failing (returning NULL).
*
* SLEEP allocations presume an extremely multithreaded model, with
* a lot of allocation and deallocation activity. umem cannot presume
* that its clients have any particular type of behavior. Instead,
* it provides two types of allocations:
*
* * UMEM_DEFAULT, equivalent to KM_NOSLEEP (i.e. return NULL on
* failure)
*
* * UMEM_NOFAIL, which, on failure, calls an optional callback
* (registered with umem_nofail_callback()).
*
* The callback is invoked with no locks held, and can do an arbitrary
* amount of work. It then has a choice between:
*
* * Returning UMEM_CALLBACK_RETRY, which will cause the allocation
* to be restarted.
*
* * Returning UMEM_CALLBACK_EXIT(status), which will cause exit(2)
* to be invoked with status. If multiple threads attempt to do
* this simultaneously, only one will call exit(2).
*
* * Doing some kind of non-local exit (thr_exit(3thr), longjmp(3C),
* etc.)
*
* The default callback returns UMEM_CALLBACK_EXIT(255).
*
* To have these callbacks without risk of state corruption (in the case of
* a non-local exit), we have to ensure that the callbacks get invoked
* close to the original allocation, with no inconsistent state or held
* locks. The following steps are taken:
*
* * All invocations of vmem are VM_NOSLEEP.
*
* * All constructor callbacks (which can themselves to allocations)
* are passed UMEM_DEFAULT as their required allocation argument. This
* way, the constructor will fail, allowing the highest-level allocation
* invoke the nofail callback.
*
* If a constructor callback _does_ do a UMEM_NOFAIL allocation, and
* the nofail callback does a non-local exit, we will leak the
* partially-constructed buffer.
*
*
* 6. Lock Ordering
* ----------------
* umem has a few more locks than kmem does, mostly in the update path. The
* overall lock ordering (earlier locks must be acquired first) is:
*
* umem_init_lock
*
* vmem_list_lock
* vmem_nosleep_lock.vmpl_mutex
* vmem_t's:
* vm_lock
* sbrk_lock
*
* umem_cache_lock
* umem_update_lock
* umem_flags_lock
* umem_cache_t's:
* cache_cpu[*].cc_lock
* cache_depot_lock
* cache_lock
* umem_log_header_t's:
* lh_cpu[*].clh_lock
* lh_lock
*
* 7. Changing UMEM_MAXBUF
* -----------------------
*
* When changing UMEM_MAXBUF extra care has to be taken. It is not sufficient to
* simply increase this number. First, one must update the umem_alloc_table to
* have the appropriate number of entires based upon the new size. If this is
* not done, this will lead to libumem blowing an assertion.
*
* The second place to update, which is not required, is the umem_alloc_sizes.
* These determine the default cache sizes that we're going to support.
*/
#include <umem_impl.h>
#include <sys/vmem_impl_user.h>
#include "umem_base.h"
#include "vmem_base.h"
#include <sys/processor.h>
#include <sys/sysmacros.h>
#include <alloca.h>
#include <errno.h>
#include <limits.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <strings.h>
#include <signal.h>
#include <unistd.h>
#include <atomic.h>
#include "misc.h"
/*
* The default set of caches to back umem_alloc().
* These sizes should be reevaluated periodically.
*
* We want allocations that are multiples of the coherency granularity
* (64 bytes) to be satisfied from a cache which is a multiple of 64
* bytes, so that it will be 64-byte aligned. For all multiples of 64,
* the next kmem_cache_size greater than or equal to it must be a
* multiple of 64.
*
* This table must be in sorted order, from smallest to highest. The
* highest slot must be UMEM_MAXBUF, and every slot afterwards must be
* zero.
*/
static int umem_alloc_sizes[] = {
#ifdef _LP64
1 * 8,
1 * 16,
2 * 16,
3 * 16,
#else
1 * 8,
2 * 8,
3 * 8,
4 * 8, 5 * 8, 6 * 8, 7 * 8,
#endif
4 * 16, 5 * 16, 6 * 16, 7 * 16,
4 * 32, 5 * 32, 6 * 32, 7 * 32,
4 * 64, 5 * 64, 6 * 64, 7 * 64,
4 * 128, 5 * 128, 6 * 128, 7 * 128,
4096 * 3,
8192 * 2, /* = 8192 * 2 */
24576, 32768, 40960, 49152, 57344, 65536, 73728, 81920,
/* 24 slots for user expansion */
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
};
static umem_magtype_t umem_magtype[] = {
{ 1, 8, 3200, 65536 },
{ 3, 16, 256, 32768 },
{ 7, 32, 64, 16384 },
{ 15, 64, 0, 8192 },
{ 31, 64, 0, 4096 },
{ 47, 64, 0, 2048 },
{ 63, 64, 0, 1024 },
{ 95, 64, 0, 512 },
{ 143, 64, 0, 0 },
};
/*
* umem tunables
*/
uint_t umem_flags = 0;
int umem_init_env_ready; /* environ pre-initted */
int umem_ready = UMEM_READY_STARTUP;
static umem_nofail_callback_t *nofail_callback;
static mutex_t umem_nofail_exit_lock;
static thread_t umem_nofail_exit_thr;
static umem_cache_t *umem_slab_cache;
static umem_cache_t *umem_bufctl_cache;
static umem_cache_t *umem_bufctl_audit_cache;
static vmem_t *heap_arena;
static vmem_alloc_t *heap_alloc;
static vmem_free_t *heap_free;
static vmem_t *umem_internal_arena;
static vmem_t *umem_cache_arena;
static vmem_t *umem_hash_arena;
static vmem_t *umem_log_arena;
static vmem_t *umem_oversize_arena;
static vmem_t *umem_va_arena;
static vmem_t *umem_default_arena;
static vmem_t *umem_firewall_va_arena;
static vmem_t *umem_firewall_arena;
#define CPUHINT_MAX() INT_MAX
UMEM_CACHE_SIZE(0),
0
};
volatile uint32_t umem_reaping;
thr_self() == umem_st_update_thr)
#ifdef UMEM_STANDALONE
static const umem_cache_t umem_null_cache_template = {
#else
#endif
0, 0, 0, 0, 0,
0, 0,
0, 0,
0, 0,
"invalid_cache",
0, 0,
NULL,
0, 0, 0, 0,
0,
DEFAULTMUTEX, /* start of slab layer */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
{
NULL,
NULL,
-1,
0
},
NULL,
NULL,
DEFAULTMUTEX, /* start of depot layer */
NULL, {
NULL, 0, 0, 0, 0
}, {
NULL, 0, 0, 0, 0
}, {
{
DEFAULTMUTEX, /* start of CPU cache */
}
}
};
#define ALLOC_TABLE_4 \
#define ALLOC_TABLE_64 \
#define ALLOC_TABLE_1024 \
};
/* Used to constrain audit-log stack traces */
#define UMERR_MODIFIED 0 /* buffer modified while on freelist */
struct {
int ump_error; /* type of umem error (UMERR_*) */
void *ump_buffer; /* buffer that induced abort */
void *ump_realbuf; /* real start address for buffer */
static void
{
}
static void *
{
return (buf);
return (NULL);
}
static void *
{
return (buf);
}
}
return (NULL);
}
void
{
(void) mutex_lock(&umem_cache_lock);
(void) mutex_unlock(&umem_cache_lock);
}
static void
{
flags &= ~UMU_ACTIVE;
if (!flags)
return;
} else {
} else {
}
}
}
static void
{
(void) mutex_lock(&umem_update_lock);
if (!IN_UPDATE())
(void) cond_broadcast(&umem_update_cv);
(void) mutex_unlock(&umem_update_lock);
}
/*
* Remove a cache from the update list, waiting for any in-progress work to
* complete first.
*/
static void
{
(void) mutex_lock(&umem_update_lock);
/*
* Get it out of the active state
*/
int cancel_state;
/*
* Make sure the update state is sane, before we wait
*/
umem_st_update_thr != thr_self());
&cancel_state);
}
/*
* Get it out of the Work Requested state
*/
cp->cache_uflags = 0;
}
/*
* Make sure it is in the Inactive state
*/
(void) mutex_unlock(&umem_update_lock);
}
static void
umem_updateall(int flags)
{
/*
* NOTE: To prevent deadlock, umem_cache_lock is always acquired first.
*
* (umem_add_update is called from things run via umem_cache_applyall)
*/
(void) mutex_lock(&umem_cache_lock);
(void) mutex_lock(&umem_update_lock);
if (!IN_UPDATE())
(void) cond_broadcast(&umem_update_cv);
(void) mutex_unlock(&umem_update_lock);
(void) mutex_unlock(&umem_cache_lock);
}
/*
* Debugging support. Given a buffer address, find its slab.
*/
static umem_slab_t *
{
return (sp);
}
}
return (NULL);
}
static void
{
int old_logging = umem_logging;
umem_logging = 0; /* stop logging when a bad thing happens */
break;
}
}
} else {
else
break;
}
}
}
umem_printf("umem allocator: ");
switch (error) {
case UMERR_MODIFIED:
umem_printf("buffer modified after being freed\n");
umem_printf("modification occurred at offset 0x%lx "
"(0x%llx replaced by 0x%llx)\n",
break;
case UMERR_REDZONE:
umem_printf("redzone violation: write past end of buffer\n");
break;
case UMERR_BADADDR:
umem_printf("invalid free: buffer not in cache\n");
break;
case UMERR_DUPFREE:
umem_printf("duplicate free: buffer freed twice\n");
break;
case UMERR_BADBUFTAG:
umem_printf("boundary tag corrupted\n");
umem_printf("bcp ^ bxstat = %lx, should be %lx\n",
break;
case UMERR_BADBUFCTL:
umem_printf("bufctl corrupted\n");
break;
case UMERR_BADCACHE:
umem_printf("buffer freed to wrong cache\n");
umem_printf("caller attempting free to %s.\n",
cparg->cache_name);
break;
case UMERR_BADSIZE:
umem_printf("bad free: free size (%u) != alloc size (%u)\n",
break;
case UMERR_BADBASE:
umem_printf("bad free: free address (%p) != alloc address "
break;
}
umem_printf("buffer=%p bufctl=%p cache: %s\n",
error != UMERR_BADBUFCTL) {
int d;
umem_printf("thread=%p time=T-%ld.%09ld slab=%p cache: %s\n",
umem_printf("\n");
}
}
umem_err_recoverable("umem: heap corruption detected");
}
void
{
}
static int
{
if (cp == &umem_null_cache) {
if (umem_init())
return (1); /* retry */
/*
* Initialization failed. Do normal failure processing.
*/
}
if (umflag & UMEM_NOFAIL) {
int result = def_result;
if (result == UMEM_CALLBACK_RETRY)
return (1);
result = def_result;
}
/*
* only one thread will call exit
*/
if (umem_nofail_exit_thr == thr_self())
umem_panic("recursive UMEM_CALLBACK_EXIT()\n");
(void) mutex_lock(&umem_nofail_exit_lock);
/*NOTREACHED*/
}
return (0);
}
static umem_log_header_t *
{
int i;
if (logsize == 0)
return (NULL);
/*
* Make sure that lhp->lh_cpu[] is nicely aligned
* to prevent false sharing of cache lines.
*/
goto fail;
if (lhp->lh_chunksize == 0)
goto fail;
nchunks * sizeof (int), VM_NOSLEEP);
goto fail;
for (i = 0; i < umem_max_ncpus; i++) {
}
for (i = umem_max_ncpus; i < nchunks; i++)
return (lhp);
fail:
}
return (NULL);
}
static void *
{
void *logspace;
return (NULL);
}
return (logspace);
}
{ \
}
static void
{
}
/*
* Create a new slab for cache cp.
*/
static umem_slab_t *
{
goto vmem_alloc_failure;
if (cache_flags & UMF_HASH) {
goto slab_alloc_failure;
} else {
}
sp->slab_refcnt = 0;
while (chunks-- != 0) {
if (cache_flags & UMF_HASH) {
goto bufctl_alloc_failure;
if (cache_flags & UMF_AUDIT) {
}
} else {
}
if (cache_flags & UMF_BUFTAG) {
if (cache_flags & UMF_DEADBEEF) {
cp->cache_verify);
}
}
}
return (sp);
}
return (NULL);
}
/*
* Destroy a slab.
*/
static void
{
}
}
}
/*
* Allocate a raw (unconstructed) buffer from cp's slab layer.
*/
static void *
{
void *buf;
cp->cache_slab_alloc++;
/*
* The freelist is empty. Create a new slab.
*/
if (cp == &umem_null_cache)
return (NULL);
return (NULL);
cp->cache_slab_create++;
}
sp->slab_refcnt++;
/*
* If we're taking the last buffer in the slab,
* remove the slab from the cache's freelist.
*/
}
/*
* Add buffer to allocated-address hash table.
*/
*hash_bucket = bcp;
}
} else {
}
return (buf);
}
/*
* Free a raw (unconstructed) buffer to cp's slab layer.
*/
static void
{
cp->cache_slab_free++;
/*
* Look up buffer in allocated-address hash table.
*/
break;
}
cp->cache_lookup_depth++;
}
} else {
}
return;
}
cp->cache_contents);
}
/*
* If this slab isn't currently on the freelist, put it there.
*/
}
if (--sp->slab_refcnt == 0) {
/*
* There are no outstanding allocations from this slab,
* so we can reclaim the memory.
*/
cp->cache_slab_destroy++;
return;
}
}
static int
{
int flags_nfatal;
return (-1);
}
return (-1);
}
return (-1);
}
}
(umflag & (UMEM_FATAL_FLAGS)) == 0) {
} else {
mtbf = 0;
}
/*
* We do not pass fatal flags on to the constructor. This prevents
* leaking buffers in the event of a subordinate constructor failing.
*/
return (-1);
}
}
return (0);
}
static int
{
return (-1);
}
else
return (-1);
}
return (-1);
}
return (-1);
}
}
return (0);
}
/*
* Free each object in magazine mp to cp's slab layer, and free mp itself.
*/
static void
{
int round;
continue;
}
}
}
/*
* Allocate a magazine from the depot.
*/
static umem_magazine_t *
{
/*
* If we can't get the depot lock without contention,
* update our contention count. We use the depot
* contention rate to determine whether we need to
* increase the magazine size for better scalability.
*/
}
}
return (mp);
}
/*
* Free a magazine to the depot.
*/
static void
{
}
/*
* Update the working set statistics for cp's depot.
*/
static void
{
}
/*
* Reap all magazines that have fallen out of the depot's working set.
*/
static void
{
long reap;
}
static void
{
}
/*
* Allocate a constructed object from cache cp.
*/
void *
{
void *buf;
int flags_nfatal;
for (;;) {
/*
* If there's an object available in the current CPU's
* loaded magazine, just take it and return.
*/
goto retry;
}
return (NULL);
}
return (buf);
}
/*
* The loaded magazine is empty. If the previously loaded
* magazine was full, exchange them and try again.
*/
if (ccp->cc_prounds > 0) {
continue;
}
/*
* If the magazine layer is disabled, break out now.
*/
if (ccp->cc_magsize == 0)
break;
/*
* Try to get a full magazine from the depot.
*/
ccp->cc_ploaded);
continue;
}
/*
* There are no full magazines in the depot,
* so fall through to the slab layer.
*/
break;
}
/*
* We couldn't allocate a constructed object from the magazine layer,
* so get a raw buffer from the slab layer and apply its constructor.
*/
if (cp == &umem_null_cache)
return (NULL);
goto retry;
}
return (NULL);
}
/*
* Let umem_cache_alloc_debug() apply the constructor for us.
*/
goto retry;
}
return (NULL);
}
return (buf);
}
/*
* We do not pass fatal flags on to the constructor. This prevents
* leaking buffers in the event of a subordinate constructor failing.
*/
goto retry;
}
return (NULL);
}
return (buf);
}
/*
* Free a constructed object to cache cp.
*/
void
{
return;
for (;;) {
/*
* If there's a slot available in the current CPU's
* loaded magazine, just put the object there and return.
*/
return;
}
/*
* The loaded magazine is full. If the previously loaded
* magazine was empty, exchange them and try again.
*/
if (ccp->cc_prounds == 0) {
continue;
}
/*
* If the magazine layer is disabled, break out now.
*/
if (ccp->cc_magsize == 0)
break;
/*
* Try to get an empty magazine from the depot.
*/
ccp->cc_ploaded);
continue;
}
/*
* There are no empty magazines in the depot,
* so try to allocate a new one. We must drop all locks
* across umem_cache_alloc() because lower layers may
* attempt to allocate from this cache.
*/
/*
* We successfully allocated an empty magazine.
* However, we had to drop ccp->cc_lock to do it,
* so the cache's magazine size may have changed.
* If so, free the magazine and try again.
*/
continue;
}
/*
* We got a magazine of the right size. Add it to
* the depot and try the whole dance again.
*/
continue;
}
/*
* We couldn't allocate an empty magazine,
* so fall through to the slab layer.
*/
break;
}
/*
* We couldn't free our constructed object to the magazine layer,
* so apply its destructor and free it to the slab layer.
* Note that if UMF_BUFTAG is in effect, umem_cache_free_debug()
* will have already applied the destructor.
*/
}
void *
{
void *buf;
}
goto retry;
} else {
}
return (buf);
}
void *
{
void *buf;
}
goto umem_alloc_retry;
return (buf);
}
if (size == 0)
return (NULL);
if (umem_oversize_arena == NULL) {
if (umem_init())
else
return (NULL);
}
goto umem_alloc_retry;
}
return (buf);
}
void *
{
void *buf;
if (size == 0)
return (NULL);
return (NULL);
if (align < UMEM_ALIGN)
align = UMEM_ALIGN;
if (umem_memalign_arena == NULL) {
if (umem_init())
else
return (NULL);
}
goto umem_alloc_align_retry;
}
return (buf);
}
void
{
return;
}
} else {
}
return;
}
return;
}
}
} else {
return;
}
}
void
{
return;
}
static void *
{
/*
* Annoying edge case: if 'size' is just shy of ULONG_MAX, adding
* vm_quantum will cause integer wraparound. Check for this, and
* blow off the firewall page in this case. Note that such a
* giant allocation (the entire address space) can never be
* satisfied, so it will either fail immediately (VM_NOSLEEP)
* or sleep forever (VM_SLEEP). Thus, there is no need for a
* corresponding check in umem_firewall_va_free().
*/
}
static void
{
}
/*
* Reclaim all unused memory from a cache.
*/
static void
{
/*
* Ask the cache's owner to free some memory if possible.
* The idea is to handle things like the inode cache, which
* typically sits on a bunch of memory that it doesn't truly
* *need*. Reclaim policy is entirely up to the owner; this
* callback is just an advisory plea for help.
*/
}
/*
* Purge all magazines from a cache and set its magazine limit to zero.
* All calls are serialized by being done by the update thread, except for
* the final call from umem_cache_destroy().
*/
static void
{
ccp->cc_magsize = 0;
if (mp)
if (pmp)
}
/*
* Updating the working set statistics twice in a row has the
* effect of setting the working set size to zero, so everything
* is eligible for reaping.
*/
}
/*
* Enable per-cpu magazines on a cache.
*/
static void
{
int cpu_seqid;
return;
}
}
/*
* Recompute a cache's magazine size. The trade-off is that larger magazines
* provide a higher transfer rate with the depot, while smaller magazines
* reduce memory consumption. Magazine resizing is an expensive operation;
* it should not be done frequently.
*
* Changes to the magazine size are serialized by only having one thread
* doing updates. (the update thread)
*
* Note: at present this only grows the magazine size. It might be useful
* to allow shrinkage too.
*/
static void
{
}
}
/*
* Rescale a cache's hash table, so that the table size is roughly the
* cache size. We want the average lookup time to be extremely small.
*/
static void
{
return;
return;
cp->cache_rescale++;
for (h = 0; h < old_size; h++) {
*hash_bucket = bcp;
}
}
}
/*
* Perform periodic maintenance on a cache: hash rescaling,
* depot working-set update, and magazine resizing.
*/
void
{
int update_flags = 0;
/*
* If the cache has become much larger or smaller than its hash table,
* fire off a request to rescale the hash table.
*/
/*
* Update the depot working set statistics.
*/
/*
* If there's a lot of contention in the depot,
* increase the magazine size.
*/
(int)(cp->cache_depot_contention -
if (update_flags)
}
/*
* Runs all pending updates.
*
* The update lock must be held on entrance, and will be held on exit.
*/
void
umem_process_updates(void)
{
int notify = 0;
while (cp->cache_uflags) {
(void) mutex_unlock(&umem_update_lock);
/*
* The order here is important. Each step can speed up
* later steps.
*/
if (uflags & UMU_HASH_RESCALE)
if (uflags & UMU_MAGAZINE_RESIZE)
(void) mutex_lock(&umem_update_lock);
/*
* check if anyone has requested notification
*/
uflags |= UMU_NOTIFY;
notify = 1;
}
}
if (notify)
(void) cond_broadcast(&umem_update_cv);
}
}
#ifndef UMEM_STANDALONE
static void
umem_st_update(void)
{
(void) mutex_unlock(&umem_update_lock);
(void) mutex_lock(&umem_update_lock);
umem_process_updates(); /* does all of the requested work */
umem_reap_next = gethrtime() +
umem_st_update_thr = 0;
}
#endif
/*
* Reclaim all unused memory from all caches. Called from vmem when memory
* gets tight. Must be called with no locks held.
*
* This just requests a reap on all caches, and notifies the update thread.
*/
void
umem_reap(void)
{
#ifndef UMEM_STANDALONE
extern int __nthreads(void);
#endif
gethrtime() < umem_reap_next)
return;
(void) mutex_lock(&umem_update_lock);
(void) mutex_unlock(&umem_update_lock);
return;
}
(void) mutex_unlock(&umem_update_lock);
(void) mutex_lock(&umem_update_lock);
/* Standalone is single-threaded */
#ifndef UMEM_STANDALONE
if (umem_update_thr == 0) {
/*
* The update thread does not exist. If the process is
* multi-threaded, create it. If not, or the creation fails,
* do the update processing inline.
*/
ASSERT(umem_st_update_thr == 0);
}
#endif
(void) mutex_unlock(&umem_update_lock);
}
char *name, /* descriptive name for this cache */
int cflags) /* cache creation flags */
{
int cpu_seqid;
/*
* The init thread is allowed to create internal and quantum caches.
*
* Other threads must wait until until initialization is complete.
*/
if (umem_init_thr == thr_self())
else {
return (NULL);
}
}
/*
* Check that the arguments are reasonable
*/
return (NULL);
}
/*
* If align == 0, we set it to the minimum required alignment.
*
* If align < UMEM_ALIGN, we round it up to UMEM_ALIGN, unless
* UMC_NOTOUCH was passed.
*/
if (align == 0) {
else
align = UMEM_ALIGN;
align = UMEM_ALIGN;
/*
* Get a umem_cache structure. We arrange that cp->cache_cpu[]
* is aligned on a UMEM_CPU_CACHE_SIZE boundary to prevent
* false sharing of per-CPU data.
*/
return (NULL);
}
(void) mutex_lock(&umem_flags_lock);
if (umem_flags & UMF_RANDOMIZE)
(void) mutex_unlock(&umem_flags_lock);
/*
* Make sure all the various flags are reasonable.
*/
if (bufsize >= umem_lite_minsize &&
align <= umem_lite_maxalign &&
} else {
}
}
if (cflags & UMC_NODEBUG)
if (cflags & UMC_NOTOUCH)
if (cflags & UMC_NOHASH)
if (cflags & UMC_NOMAGAZINE)
}
/*
* Set cache properties.
*/
/*
* Determine the chunk size.
*/
if (align >= UMEM_ALIGN) {
}
chunksize += sizeof (umem_buftag_t);
}
}
goto fail;
}
/*
* Now that we know the chunk size, determine the optimal slab size.
*/
if (vmp == umem_firewall_arena) {
cp->cache_mincolor = 0;
cp->cache_maxcolor =
goto fail;
}
} else {
vmp->vm_quantum);
/*
* check for overflow
*/
goto fail;
}
}
}
if (cflags & UMC_QCACHE)
cp->cache_mincolor = 0;
}
}
/*
* Initialize the rest of the slab layer.
*/
UMEM_HASH_INITIAL * sizeof (void *), VM_NOSLEEP);
goto fail_lock;
}
UMEM_HASH_INITIAL * sizeof (void *));
}
/*
* Initialize the depot.
*/
continue;
/*
* Initialize the CPU layer.
*/
}
/*
* Add the cache to the global list. This makes it visible
* to umem_update(), so the cache must be ready for business.
*/
(void) mutex_lock(&umem_cache_lock);
(void) mutex_unlock(&umem_cache_lock);
if (umem_ready == UMEM_READY)
return (cp);
fail:
return (NULL);
}
void
{
int cpu_seqid;
/*
* Remove the cache from the global cache list so that no new updates
* will be scheduled on its behalf, wait for any pending tasks to
* complete, purge the cache, and then destroy it.
*/
(void) mutex_lock(&umem_cache_lock);
(void) mutex_unlock(&umem_cache_lock);
if (cp->cache_buftotal != 0)
log_message("umem_cache_destroy: '%s' (%p) not empty\n",
/*
* The cache is now dead. There should be no further activity.
* We enforce this by setting land mines in the constructor and
* destructor routines that induce a segmentation fault if invoked.
*/
}
void
umem_alloc_sizes_clear(void)
{
int i;
umem_alloc_sizes[0] = UMEM_MAXBUF;
for (i = 1; i < NUM_ALLOC_SIZES; i++)
umem_alloc_sizes[i] = 0;
}
void
{
int i, j;
if (size == 0) {
log_message("size_add: cannot add zero-sized cache\n",
size, UMEM_MAXBUF);
return;
}
if (size > UMEM_MAXBUF) {
return;
}
log_message("size_add: no space in alloc_table for %d\n",
size);
return;
}
size);
}
for (i = 0; i < NUM_ALLOC_SIZES; i++) {
int cur = umem_alloc_sizes[i];
log_message("size_add: %ld already in table\n",
size);
return;
}
break;
}
for (j = NUM_ALLOC_SIZES - 1; j > i; j--)
umem_alloc_sizes[i] = size;
}
void
{
int i;
if (size == UMEM_MAXBUF) {
return;
}
for (i = 0; i < NUM_ALLOC_SIZES; i++) {
int cur = umem_alloc_sizes[i];
break;
log_message("size_remove: %ld not found in table\n",
size);
return;
}
}
for (; i + 1 < NUM_ALLOC_SIZES; i++)
umem_alloc_sizes[i] = 0;
}
static int
umem_cache_init(void)
{
int i;
for (i = 0; i < sizeof (umem_magtype) / sizeof (*mtp); i++) {
mtp = &umem_magtype[i];
mtp->mt_magsize);
return (0);
}
if (umem_slab_cache == NULL)
return (0);
if (umem_bufctl_cache == NULL)
return (0);
/*
* The size of the umem_bufctl_audit structure depends upon
* umem_stack_depth. See umem_impl.h for details on the size
* restrictions.
*/
int max_frames = UMEM_MAX_STACK_DEPTH;
}
if (umem_bufctl_audit_cache == NULL)
return (0);
if (vmem_backend & VMEM_BACKEND_MMAP)
else
if (umem_va_arena == NULL)
return (0);
0, VM_NOSLEEP);
if (umem_default_arena == NULL)
return (0);
/*
* make sure the umem_alloc table initializer is correct
*/
i = sizeof (umem_alloc_table) / sizeof (*umem_alloc_table);
/*
* Create the default caches to back umem_alloc()
*/
for (i = 0; i < NUM_ALLOC_SIZES; i++) {
if (cache_size == 0)
break; /* 0 terminates the list */
/*
* If they allocate a multiple of the coherency granularity,
* they get a coherency-granularity-aligned address.
*/
align = 64;
(long)cache_size);
return (0);
umem_alloc_caches[i] = cp;
}
/*
* Initialization cannot fail at this point. Make the caches
* visible to umem_alloc() and friends.
*/
size = UMEM_ALIGN;
for (i = 0; i < NUM_ALLOC_SIZES; i++) {
if (cache_size == 0)
break; /* 0 terminates the list */
cp = umem_alloc_caches[i];
while (size <= cache_size) {
size += UMEM_ALIGN;
}
}
return (1);
}
/*
* umem_startup() is called early on, and must be called explicitly if we're
* the standalone version.
*/
#ifdef UMEM_STANDALONE
void
#else
#pragma init(umem_startup)
static void
#endif
{
#ifdef UMEM_STANDALONE
int idx;
/* Standalone doesn't fork */
#else
umem_forkhandler_init(); /* register the fork handler */
#endif
#ifdef __lint
/* make lint happy */
#endif
#ifdef UMEM_STANDALONE
umem_init_env_ready = 0;
heap_arena = NULL;
heap_alloc = NULL;
umem_cpu_mask = 0;
sizeof (umem_cache_t));
#endif
/*
* Perform initialization specific to the way we've been compiled
* (library or standalone)
*/
vmem_startup();
}
int
umem_init(void)
{
int idx;
if (thr_self() != umem_init_thr) {
/*
* The usual case -- non-recursive invocation of umem_init().
*/
(void) mutex_lock(&umem_init_lock);
if (umem_ready != UMEM_READY_STARTUP) {
/*
* someone else beat us to initializing umem. Wait
* for them to complete, then return.
*/
while (umem_ready == UMEM_READY_INITING) {
int cancel_state;
(void) pthread_setcancelstate(
(void) cond_wait(&umem_init_cv,
(void) pthread_setcancelstate(
cancel_state, NULL);
}
(void) mutex_unlock(&umem_init_lock);
return (umem_ready == UMEM_READY);
}
ASSERT(umem_init_env_ready == 0);
umem_init_thr = thr_self();
(void) mutex_unlock(&umem_init_lock);
umem_setup_envvars(0); /* can recurse -- see below */
if (umem_init_env_ready) {
/*
* initialization was completed already
*/
ASSERT(umem_init_thr == 0);
return (umem_ready == UMEM_READY);
}
} else if (!umem_init_env_ready) {
/*
* The umem_setup_envvars() call (above) makes calls into
* the dynamic linker and directly into user-supplied code.
* Since we cannot know what that code will do, we could be
* recursively invoked (by, say, a malloc() call in the code
* itself, or in a (C++) _init section it causes to be fired).
*
* This code is where we end up if such recursion occurs. We
* first clean up any partial results in the envvar code, then
* proceed to finish initialization processing in the recursive
* call. The original call will notice this, and return
* immediately.
*/
} else {
"recursive allocation while initializing umem\n");
}
umem_init_env_ready = 1;
/*
* From this point until we finish, recursion into umem_init() will
* cause a umem_panic().
*/
/* LINTED constant condition */
if (sizeof (umem_cpu_cache_t) != UMEM_CPU_CACHE_SIZE) {
umem_panic("sizeof (umem_cpu_cache_t) = %d, should be %d\n",
sizeof (umem_cpu_cache_t), UMEM_CPU_CACHE_SIZE);
}
/*
* load tunables from environment
*/
if (issetugid())
umem_mtbf = 0;
/*
* set up vmem
*/
if (!(umem_flags & UMF_AUDIT))
if (umem_internal_arena == NULL)
goto fail;
0, VM_NOSLEEP);
goto fail;
if (umem_firewall_arena == NULL)
goto fail;
goto fail;
if (umem_max_ncpus > CPUHINT_MAX())
if (umem_max_ncpus == 0)
umem_max_ncpus = 1;
goto fail;
}
if (umem_maxverify == 0)
if (umem_minfirewall == 0)
/*
* Set up updating and reaping
*/
#ifndef UMEM_STANDALONE
#endif
/*
* Set up logging -- failure here is okay, since it will just disable
* the logs
*/
if (umem_logging) {
}
/*
* Set up caches -- if successful, initialization cannot fail, since
* allocations from other threads can now succeed.
*/
if (umem_cache_init() == 0) {
log_message("unable to create initial caches\n");
goto fail;
}
/*
* initialization done, ready to go
*/
(void) mutex_lock(&umem_init_lock);
umem_init_thr = 0;
(void) cond_broadcast(&umem_init_cv);
(void) mutex_unlock(&umem_init_lock);
return (1);
fail:
log_message("umem initialization failed\n");
(void) mutex_lock(&umem_init_lock);
umem_init_thr = 0;
(void) cond_broadcast(&umem_init_cv);
(void) mutex_unlock(&umem_init_lock);
return (0);
}