kmem.c revision 7d692464eb868dd074286c876190c055095ba89d
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2006 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#pragma ident "%Z%%M% %I% %E% SMI"
/*
* Kernel memory allocator, as described in the following two papers:
*
* Jeff Bonwick,
* The Slab Allocator: An Object-Caching Kernel Memory Allocator.
* Proceedings of the Summer 1994 Usenix Conference.
*
* Jeff Bonwick and Jonathan Adams,
* Magazines and vmem: Extending the Slab Allocator to Many CPUs and
* Arbitrary Resources.
* Proceedings of the 2001 Usenix Conference.
*/
#include <sys/kmem_impl.h>
#include <sys/vmem_impl.h>
#include <sys/sysmacros.h>
#include <sys/tuneable.h>
#include <vm/seg_kmem.h>
extern void streams_msg_init(void);
extern int segkp_fromheap;
extern void segkp_cache_free(void);
struct kmem_cache_kstat {
} kmem_cache_kstat = {
{ "buf_size", KSTAT_DATA_UINT64 },
{ "align", KSTAT_DATA_UINT64 },
{ "chunk_size", KSTAT_DATA_UINT64 },
{ "slab_size", KSTAT_DATA_UINT64 },
{ "alloc", KSTAT_DATA_UINT64 },
{ "alloc_fail", KSTAT_DATA_UINT64 },
{ "free", KSTAT_DATA_UINT64 },
{ "depot_alloc", KSTAT_DATA_UINT64 },
{ "depot_free", KSTAT_DATA_UINT64 },
{ "depot_contention", KSTAT_DATA_UINT64 },
{ "slab_alloc", KSTAT_DATA_UINT64 },
{ "slab_free", KSTAT_DATA_UINT64 },
{ "buf_constructed", KSTAT_DATA_UINT64 },
{ "buf_avail", KSTAT_DATA_UINT64 },
{ "buf_inuse", KSTAT_DATA_UINT64 },
{ "buf_total", KSTAT_DATA_UINT64 },
{ "buf_max", KSTAT_DATA_UINT64 },
{ "slab_create", KSTAT_DATA_UINT64 },
{ "slab_destroy", KSTAT_DATA_UINT64 },
{ "vmem_source", KSTAT_DATA_UINT64 },
{ "hash_size", KSTAT_DATA_UINT64 },
{ "hash_lookup_depth", KSTAT_DATA_UINT64 },
{ "hash_rescale", KSTAT_DATA_UINT64 },
{ "full_magazines", KSTAT_DATA_UINT64 },
{ "empty_magazines", KSTAT_DATA_UINT64 },
{ "magazine_size", KSTAT_DATA_UINT64 },
};
static kmutex_t kmem_cache_kstat_lock;
/*
* The default set of caches to back kmem_alloc().
* These sizes should be reevaluated periodically.
*
* We want allocations that are multiples of the coherency granularity
* (64 bytes) to be satisfied from a cache which is a multiple of 64
* bytes, so that it will be 64-byte aligned. For all multiples of 64,
* the next kmem_cache_size greater than or equal to it must be a
* multiple of 64.
*/
static const int kmem_alloc_sizes[] = {
1 * 8,
2 * 8,
3 * 8,
4 * 8, 5 * 8, 6 * 8, 7 * 8,
4 * 16, 5 * 16, 6 * 16, 7 * 16,
4 * 32, 5 * 32, 6 * 32, 7 * 32,
4 * 64, 5 * 64, 6 * 64, 7 * 64,
4 * 128, 5 * 128, 6 * 128, 7 * 128,
4096 * 3,
8192 * 2,
};
#define KMEM_MAXBUF 16384
static kmem_magtype_t kmem_magtype[] = {
{ 1, 8, 3200, 65536 },
{ 3, 16, 256, 32768 },
{ 7, 32, 64, 16384 },
{ 15, 64, 0, 8192 },
{ 31, 64, 0, 4096 },
{ 47, 64, 0, 2048 },
{ 63, 64, 0, 1024 },
{ 95, 64, 0, 512 },
{ 143, 64, 0, 0 },
};
static uint32_t kmem_reaping;
static uint32_t kmem_reaping_idspace;
/*
* kmem tunables
*/
#ifdef DEBUG
#else
int kmem_flags = 0;
#endif
int kmem_ready;
static kmem_cache_t *kmem_slab_cache;
static kmem_cache_t *kmem_bufctl_cache;
static kmem_cache_t *kmem_bufctl_audit_cache;
static taskq_t *kmem_taskq;
static kmutex_t kmem_flags_lock;
static vmem_t *kmem_metadata_arena;
static vmem_t *kmem_cache_arena;
static vmem_t *kmem_hash_arena;
static vmem_t *kmem_log_arena;
static vmem_t *kmem_oversize_arena;
static vmem_t *kmem_va_arena;
static vmem_t *kmem_default_arena;
static vmem_t *kmem_firewall_va_arena;
static vmem_t *kmem_firewall_arena;
static int kmem_lite_count; /* # of PCs in kmem_buftag_lite_t */
if ((count) > 0) { \
/* memmove() the old entries down one notch */ \
}
#define KMERR_MODIFIED 0 /* buffer modified while on freelist */
struct {
int kmp_error; /* type of kmem error */
void *kmp_buffer; /* buffer that induced panic */
void *kmp_realbuf; /* real start address for buffer */
static void
{
}
static void *
{
return (buf);
return (NULL);
}
static void *
{
return (buf);
}
}
return (NULL);
}
static void
{
tqflag);
else
}
static void
{
continue;
tqflag);
else
}
}
/*
* Debugging support. Given a buffer address, find its slab.
*/
static kmem_slab_t *
{
return (sp);
}
}
return (NULL);
}
static void
{
kmem_logging = 0; /* stop logging when a bad thing happens */
break;
}
}
} else {
else
break;
}
}
}
printf("kernel memory allocator: ");
switch (error) {
case KMERR_MODIFIED:
printf("buffer modified after being freed\n");
printf("modification occurred at offset 0x%lx "
"(0x%llx replaced by 0x%llx)\n",
break;
case KMERR_REDZONE:
printf("redzone violation: write past end of buffer\n");
break;
case KMERR_BADADDR:
printf("invalid free: buffer not in cache\n");
break;
case KMERR_DUPFREE:
printf("duplicate free: buffer freed twice\n");
break;
case KMERR_BADBUFTAG:
printf("boundary tag corrupted\n");
printf("bcp ^ bxstat = %lx, should be %lx\n",
break;
case KMERR_BADBUFCTL:
printf("bufctl corrupted\n");
break;
case KMERR_BADCACHE:
printf("buffer freed to wrong cache\n");
break;
case KMERR_BADSIZE:
printf("bad free: free size (%u) != alloc size (%u)\n",
break;
case KMERR_BADBASE:
printf("bad free: free address (%p) != alloc address (%p)\n",
break;
}
printf("buffer=%p bufctl=%p cache: %s\n",
error != KMERR_BADBUFCTL) {
int d;
printf("thread=%p time=T-%ld.%09ld slab=%p cache: %s\n",
}
}
if (kmem_panic > 0)
panic("kernel heap corruption detected");
if (kmem_panic == 0)
}
static kmem_log_header_t *
{
int i;
/*
* Make sure that lhp->lh_cpu[] is nicely aligned
* to prevent false sharing of cache lines.
*/
for (i = 0; i < max_ncpus; i++) {
}
return (lhp);
}
static void *
{
void *logspace;
return (NULL);
}
return (logspace);
}
{ \
}
static void
{
}
/*
* Create a new slab for cache cp.
*/
static kmem_slab_t *
{
goto vmem_alloc_failure;
if (cache_flags & KMF_HASH) {
goto slab_alloc_failure;
} else {
}
sp->slab_refcnt = 0;
while (chunks-- != 0) {
if (cache_flags & KMF_HASH) {
goto bufctl_alloc_failure;
if (cache_flags & KMF_AUDIT) {
}
} else {
}
if (cache_flags & KMF_BUFTAG) {
if (cache_flags & KMF_DEADBEEF) {
cp->cache_verify);
}
}
}
return (sp);
}
return (NULL);
}
/*
* Destroy a slab.
*/
static void
{
}
}
}
/*
* Allocate a raw (unconstructed) buffer from cp's slab layer.
*/
static void *
{
void *buf;
cp->cache_slab_alloc++;
/*
* The freelist is empty. Create a new slab.
*/
return (NULL);
cp->cache_slab_create++;
}
sp->slab_refcnt++;
/*
* If we're taking the last buffer in the slab,
* remove the slab from the cache's freelist.
*/
}
/*
* Add buffer to allocated-address hash table.
*/
*hash_bucket = bcp;
}
} else {
}
return (buf);
}
/*
* Free a raw (unconstructed) buffer to cp's slab layer.
*/
static void
{
cp->cache_slab_free++;
/*
* Look up buffer in allocated-address hash table.
*/
break;
}
cp->cache_lookup_depth++;
}
} else {
}
return;
}
cp->cache_contents);
}
/*
* If this slab isn't currently on the freelist, put it there.
*/
}
if (--sp->slab_refcnt == 0) {
/*
* There are no outstanding allocations from this slab,
* so we can reclaim the memory.
*/
cp->cache_slab_destroy++;
return;
}
}
static int
{
return (-1);
}
return (-1);
}
return (-1);
}
else
} else {
construct = 1;
cp->cache_verify)) {
return (-1);
}
}
}
} else {
mtbf = 0;
}
return (-1);
}
}
}
return (0);
}
static int
{
return (-1);
}
else
return (-1);
}
return (-1);
}
return (-1);
}
}
}
}
return (0);
}
/*
* Free each object in magazine mp to cp's slab layer, and free mp itself.
*/
static void
{
int round;
continue;
}
}
}
}
}
/*
* Allocate a magazine from the depot.
*/
static kmem_magazine_t *
{
/*
* If we can't get the depot lock without contention,
* update our contention count. We use the depot
* contention rate to determine whether we need to
* increase the magazine size for better scalability.
*/
}
}
return (mp);
}
/*
* Free a magazine to the depot.
*/
static void
{
}
/*
* Update the working set statistics for cp's depot.
*/
static void
{
}
/*
* Reap all magazines that have fallen out of the depot's working set.
*/
static void
{
long reap;
}
static void
{
}
/*
* Allocate a constructed object from cache cp.
*/
void *
{
void *buf;
for (;;) {
/*
* If there's an object available in the current CPU's
* loaded magazine, just take it and return.
*/
caller()) == -1) {
if (kmflag & KM_NOSLEEP)
return (NULL);
continue;
}
return (buf);
}
/*
* The loaded magazine is empty. If the previously loaded
* magazine was full, exchange them and try again.
*/
if (ccp->cc_prounds > 0) {
continue;
}
/*
* If the magazine layer is disabled, break out now.
*/
if (ccp->cc_magsize == 0)
break;
/*
* Try to get a full magazine from the depot.
*/
ccp->cc_ploaded);
continue;
}
/*
* There are no full magazines in the depot,
* so fall through to the slab layer.
*/
break;
}
/*
* We couldn't allocate a constructed object from the magazine layer,
* so get a raw buffer from the slab layer and apply its constructor.
*/
return (NULL);
/*
* Make kmem_cache_alloc_debug() apply the constructor for us.
*/
caller()) == -1) {
if (kmflag & KM_NOSLEEP)
return (NULL);
/*
* kmem_cache_alloc_debug() detected corruption
* but didn't panic (kmem_panic <= 0). Try again.
*/
}
return (buf);
}
return (NULL);
}
return (buf);
}
/*
* Free a constructed object to cache cp.
*/
void
{
return;
for (;;) {
/*
* If there's a slot available in the current CPU's
* loaded magazine, just put the object there and return.
*/
return;
}
/*
* The loaded magazine is full. If the previously loaded
* magazine was empty, exchange them and try again.
*/
if (ccp->cc_prounds == 0) {
continue;
}
/*
* If the magazine layer is disabled, break out now.
*/
if (ccp->cc_magsize == 0)
break;
/*
* Try to get an empty magazine from the depot.
*/
ccp->cc_ploaded);
continue;
}
/*
* There are no empty magazines in the depot,
* so try to allocate a new one. We must drop all locks
* across kmem_cache_alloc() because lower layers may
* attempt to allocate from this cache.
*/
/*
* We successfully allocated an empty magazine.
* However, we had to drop ccp->cc_lock to do it,
* so the cache's magazine size may have changed.
* If so, free the magazine and try again.
*/
continue;
}
/*
* We got a magazine of the right size. Add it to
* the depot and try the whole dance again.
*/
continue;
}
/*
* We couldn't allocate an empty magazine,
* so fall through to the slab layer.
*/
break;
}
/*
* We couldn't free our constructed object to the magazine layer,
* so apply its destructor and free it to the slab layer.
* Note that if KMF_DEADBEEF is in effect and KMF_LITE is not,
* kmem_cache_free_debug() will have already applied the destructor.
*/
} else {
}
}
}
void *
{
void *buf;
kmem_lite_count, caller());
}
}
}
} else {
}
return (buf);
}
void *
{
void *buf;
caller());
}
}
return (buf);
}
if (size == 0)
return (NULL);
return (buf);
}
void
{
return;
}
} else {
}
return;
}
return;
}
caller());
}
}
} else {
return;
}
}
void *
{
void *addr;
/*
* Annoying edge case: if 'size' is just shy of ULONG_MAX, adding
* vm_quantum will cause integer wraparound. Check for this, and
* blow off the firewall page in this case. Note that such a
* giant allocation (the entire kernel address space) can never
* be satisfied, so it will either fail immediately (VM_NOSLEEP)
* or sleep forever (VM_SLEEP). Thus, there is no need for a
* corresponding check in kmem_firewall_va_free().
*/
/*
* While boot still owns resource management, make sure that this
* redzone virtual address allocation is properly accounted for in
* OBPs "virtual-memory" "available" lists because we're
* effectively claiming them for a red zone. If we don't do this,
* the available lists become too fragmented and too large for the
*/
return (addr);
}
void
{
}
/*
* Try to allocate at least `size' bytes of memory without sleeping or
* panicking. Return actual allocated size in `asize'. If allocation failed,
* try final allocation with sleep or panic allowed.
*/
void *
{
void *p;
do {
if (p != NULL)
return (p);
*asize += KMEM_ALIGN;
}
/*
* Reclaim all unused memory from a cache.
*/
static void
{
/*
* Ask the cache's owner to free some memory if possible.
* The idea is to handle things like the inode cache, which
* typically sits on a bunch of memory that it doesn't truly
* *need*. Reclaim policy is entirely up to the owner; this
* callback is just an advisory plea for help.
*/
}
static void
kmem_reap_timeout(void *flag_arg)
{
*flag = 0;
}
static void
kmem_reap_done(void *flag)
{
}
static void
kmem_reap_start(void *flag)
{
if (flag == &kmem_reaping) {
/*
* if we have segkp under heap, reap segkp cache.
*/
if (segkp_fromheap)
}
else
/*
* We use taskq_dispatch() to schedule a timeout to clear
* the flag so that kmem_reap() becomes self-throttling:
* we won't reap again until the current reap completes *and*
* at least kmem_reap_interval ticks have elapsed.
*/
}
static void
kmem_reap_common(void *flag_arg)
{
return;
/*
* It may not be kosher to do memory allocation when a reap is called
* is called (for example, if vmem_populate() is in the call chain).
* So we start the reap going with a TQ_NOALLOC dispatch. If the
* dispatch fails, we reset the flag, and the next reap will try again.
*/
*flag = 0;
}
/*
* Reclaim all unused memory from all caches. Called from the VM system
* when memory gets tight.
*/
void
kmem_reap(void)
{
}
/*
* Reclaim all unused memory from identifier arenas, called when a vmem
* arena not back by memory is exhausted. Since reaping memory-backed caches
* cannot help with identifier exhaustion, we avoid both a large amount of
* work and unwanted side-effects from reclaim callbacks.
*/
void
kmem_reap_idspace(void)
{
}
/*
* Purge all magazines from a cache and set its magazine limit to zero.
* All calls are serialized by the kmem_taskq lock, except for the final
* call from kmem_cache_destroy().
*/
static void
{
ccp->cc_magsize = 0;
if (mp)
if (pmp)
}
/*
* Updating the working set statistics twice in a row has the
* effect of setting the working set size to zero, so everything
* is eligible for reaping.
*/
}
/*
* Enable per-cpu magazines on a cache.
*/
static void
{
int cpu_seqid;
return;
}
}
/*
* Reap (almost) everything right now. See kmem_cache_magazine_purge()
* for explanation of the back-to-back kmem_depot_ws_update() calls.
*/
void
{
(void) taskq_dispatch(kmem_taskq,
}
/*
* Recompute a cache's magazine size. The trade-off is that larger magazines
* provide a higher transfer rate with the depot, while smaller magazines
* reduce memory consumption. Magazine resizing is an expensive operation;
* it should not be done frequently.
*
* Changes to the magazine size are serialized by the kmem_taskq lock.
*
* Note: at present this only grows the magazine size. It might be useful
* to allow shrinkage too.
*/
static void
{
}
}
/*
* Rescale a cache's hash table, so that the table size is roughly the
* cache size. We want the average lookup time to be extremely small.
*/
static void
{
return;
return;
cp->cache_rescale++;
for (h = 0; h < old_size; h++) {
*hash_bucket = bcp;
}
}
}
/*
* Perform periodic maintenance on a cache: hash rescaling,
* depot working-set update, and magazine resizing.
*/
static void
{
int need_hash_rescale = 0;
int need_magazine_resize = 0;
/*
* If the cache has become much larger or smaller than its hash table,
* fire off a request to rescale the hash table.
*/
need_hash_rescale = 1;
/*
* Update the depot working set statistics.
*/
/*
* If there's a lot of contention in the depot,
* increase the magazine size.
*/
(int)(cp->cache_depot_contention -
need_magazine_resize = 1;
if (need_hash_rescale)
(void) taskq_dispatch(kmem_taskq,
if (need_magazine_resize)
(void) taskq_dispatch(kmem_taskq,
}
static void
kmem_update_timeout(void *dummy)
{
static void kmem_update(void *);
}
static void
kmem_update(void *dummy)
{
/*
* We use taskq_dispatch() to reschedule the timeout so that
* kmem_update() becomes self-throttling: it won't schedule
* new tasks until all previous tasks have completed.
*/
}
static int
{
int cpu_seqid;
if (rw == KSTAT_WRITE)
return (EACCES);
cpu_buf_avail = 0;
if (ccp->cc_prounds > 0)
}
return (0);
}
/*
* Return a named statistic about a particular cache.
* This shouldn't be called very often, so it's currently designed for
* simplicity (leverages existing kstat support) rather than efficiency.
*/
{
int i;
break;
}
}
}
return (value);
}
/*
* Return an estimate of currently available kernel heap memory.
* On 32-bit systems, physical memory may exceed virtual memory,
* we just truncate the result at 1GB.
*/
kmem_avail(void)
{
}
/*
* Return the maximum amount of memory that is (in theory) allocatable
* from the heap. This may be used as an estimate only since there
* is no guarentee this space will still be available when an allocation
* request is made, nor that the space may be allocated in one big request
* due to kernel heap fragmentation.
*/
kmem_maxavail(void)
{
}
/*
* Indicate whether memory-intensive kmem debugging is enabled.
*/
int
kmem_debugging(void)
{
}
char *name, /* descriptive name for this cache */
int (*constructor)(void *, void *, int), /* object constructor */
void (*destructor)(void *, void *), /* object destructor */
void (*reclaim)(void *), /* memory reclaim callback */
int cflags) /* cache creation flags */
{
int cpu_seqid;
#ifdef DEBUG
/*
* Cache names should conform to the rules for valid C identifiers
*/
if (!strident_valid(name)) {
"kmem_cache_create: '%s' is an invalid cache name\n"
"cache names must conform to the rules for "
"C identifiers\n", name);
}
#endif /* DEBUG */
/*
* If this kmem cache has an identifier vmem arena as its source, mark
* it such to allow kmem_reap_idspace().
*/
cflags |= KMC_IDENTIFIER;
/*
* Get a kmem_cache structure. We arrange that cp->cache_cpu[]
* is aligned on a KMEM_CPU_CACHE_SIZE boundary to prevent
* false sharing of per-CPU data.
*/
if (align == 0)
align = KMEM_ALIGN;
/*
* If we're not at least KMEM_ALIGN aligned, we can't use free
* memory to hold bufctl information (because we can't safely
* perform word loads and stores on it).
*/
if (align < KMEM_ALIGN)
cflags |= KMC_NOTOUCH;
if (kmem_flags & KMF_RANDOMIZE)
/*
* Make sure all the various flags are reasonable.
*/
if (bufsize >= kmem_lite_minsize &&
align <= kmem_lite_maxalign &&
} else {
}
}
if (cflags & KMC_NODEBUG)
if (cflags & KMC_NOTOUCH)
if (cflags & KMC_NOHASH)
if (cflags & KMC_NOMAGAZINE)
}
/*
* Set cache properties.
*/
/*
* Determine the chunk size.
*/
if (align >= KMEM_ALIGN) {
}
else
chunksize += sizeof (kmem_buftag_t);
}
}
/*
* Now that we know the chunk size, determine the optimal slab size.
*/
if (vmp == kmem_firewall_arena) {
cp->cache_mincolor = 0;
cp->cache_maxcolor =
} else {
vmp->vm_quantum);
}
}
if (cflags & KMC_QCACHE)
cp->cache_mincolor = 0;
}
}
/*
* Initialize the rest of the slab layer.
*/
KMEM_HASH_INITIAL * sizeof (void *), VM_SLEEP);
KMEM_HASH_INITIAL * sizeof (void *));
}
/*
* Initialize the depot.
*/
continue;
/*
* Initialize the CPU layer.
*/
}
/*
* Create the cache's kstats.
*/
"kmem_cache", KSTAT_TYPE_NAMED,
sizeof (kmem_cache_kstat) / sizeof (kstat_named_t),
KSTAT_FLAG_VIRTUAL)) != NULL) {
}
/*
* Add the cache to the global list. This makes it visible
* to kmem_update(), so the cache must be ready for business.
*/
if (kmem_ready)
return (cp);
}
void
{
int cpu_seqid;
/*
* Remove the cache from the global cache list so that no one else
* can schedule tasks on its behalf, wait for any pending tasks to
* complete, purge the cache, and then destroy it.
*/
if (kmem_taskq != NULL)
if (cp->cache_buftotal != 0)
/*
* The cache is now dead. There should be no further activity.
* We enforce this by setting land mines in the constructor and
* destructor routines that induce a kernel text fault if invoked.
*/
}
/*ARGSUSED*/
static int
{
if (what == CPU_UNCONFIG) {
}
return (0);
}
static void
{
int i;
for (i = 0; i < sizeof (kmem_magtype) / sizeof (*mtp); i++) {
mtp = &kmem_magtype[i];
}
if (pass == 2) {
if (use_large_pages) {
0, VM_SLEEP);
} else {
0, VM_SLEEP);
}
} else {
/*
* During the first pass, the kmem_alloc_* caches
* are treated as metadata.
*/
}
/*
* Set up the default caches to back kmem_alloc()
*/
size = KMEM_ALIGN;
for (i = 0; i < sizeof (kmem_alloc_sizes) / sizeof (int); i++) {
/*
* If they allocate a multiple of the coherency granularity,
* they get a coherency-granularity-aligned address.
*/
align = 64;
while (size <= cache_size) {
size += KMEM_ALIGN;
}
}
}
void
kmem_init(void)
{
int old_kmem_flags = kmem_flags;
int use_large_pages = 0;
kstat_init();
/*
* Small-memory systems (< 24 MB) can't handle kmem_flags overhead.
*/
kmem_flags = 0;
/*
* Don't do firewalled allocations if the heap is less than 1TB
* (i.e. on a 32-bit kernel)
* The resulting VM_NEXTFIT allocations would create too much
* fragmentation in a small heap.
*/
#if defined(_LP64)
#else
#endif
/* LINTED */
VM_SLEEP);
0, VM_SLEEP);
/* temporary oversize arena for mod_read_system_file */
/*
* needs to use the allocator. The simplest solution is to create
* caches we just created, and then create them all again in light
* of the (possibly) new kmem_flags and other kmem tunables.
*/
kmem_cache_init(1, 0);
if (old_kmem_flags & KMF_STICKY)
if (!(kmem_flags & KMF_AUDIT))
if (kmem_maxverify == 0)
if (kmem_minfirewall == 0)
/*
* give segkmem a chance to figure out if we are using large pages
* for the kernel heap
*/
/*
* To protect against corruption, we keep the actual number of callers
* KMF_LITE records seperate from the tunable. We arbitrarily clamp
* to 16, since the overhead for small buffers quickly gets out of
* hand.
*
* The real limit would depend on the needs of the largest KMC_NOHASH
* cache.
*/
/*
* Normally, we firewall oversized allocations when possible, but
* if we are using large pages for kernel memory, and we don't have
* any non-LITE debugging flags set, we want to allocate oversized
* buffers from large pages, and so skip the firewalling.
*/
if (use_large_pages &&
0, VM_SLEEP);
} else {
}
if (kmem_transaction_log_size == 0)
}
if (kmem_content_log_size == 0)
}
/*
* Initialize STREAMS message caches so allocb() is available.
* This allows us to initialize the logging framework (cmn_err(9F),
* strlog(9F), etc) so we can start recording messages.
*/
/*
* Initialize the ZSD framework in Zones so modules loaded henceforth
* can register their callbacks.
*/
log_init();
taskq_init();
/*
* Warn about invalid or dangerous values of kmem_flags.
* Always warn about unsupported values.
*/
KMF_CONTENTS | KMF_LITE)) != 0) ||
"See the Solaris Tunable Parameters Reference Manual.",
#ifdef DEBUG
if ((kmem_flags & KMF_DEBUG) == 0)
#else
/*
* For non-debug kernels, the only "normal" flags are 0, KMF_LITE,
* KMF_REDZONE, and KMF_CONTENTS (the last because it is only enabled
* if KMF_AUDIT is set). We should warn the user about the performance
* penalty of KMF_AUDIT or KMF_DEADBEEF if they are set and KMF_LITE
* isn't set (since that disables AUDIT).
*/
if (!(kmem_flags & KMF_LITE) &&
"enabled (kmem_flags = 0x%x). Performance degradation "
"and large memory overhead possible. See the Solaris "
"Tunable Parameters Reference Manual.", kmem_flags);
#endif /* not DEBUG */
kmem_ready = 1;
/*
*/
ka_init();
/*
* Initialize 32-bit ID cache.
*/
id32_init();
}
void
kmem_thread_init(void)
{
}
void
kmem_mp_init(void)
{
}