/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2010 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/*
* Copyright (c) 2012, 2015 by Delphix. All rights reserved.
* Copyright (c) 2012, Joyent, Inc. All rights reserved.
*/
/*
* Big Theory Statement for the virtual memory allocator.
*
* For a more complete description of the main ideas, see:
*
* Jeff Bonwick and Jonathan Adams,
*
* Magazines and vmem: Extending the Slab Allocator to Many CPUs and
* Arbitrary Resources.
*
* Proceedings of the 2001 Usenix Conference.
* Available as http://www.usenix.org/event/usenix01/bonwick.html
*
*
* 1. General Concepts
* -------------------
*
* 1.1 Overview
* ------------
* We divide the kernel address space into a number of logically distinct
* pieces, or *arenas*: text, data, heap, stack, and so on. Within these
* arenas we often subdivide further; for example, we use heap addresses
* not only for the kernel heap (kmem_alloc() space), but also for DVMA,
* The kernel address space, therefore, is most accurately described as
* a tree of arenas in which each node of the tree *imports* some subset
* of its parent. The virtual memory allocator manages these arenas and
* supports their natural hierarchical structure.
*
* 1.2 Arenas
* ----------
* An arena is nothing more than a set of integers. These integers most
* commonly represent virtual addresses, but in fact they can represent
* anything at all. For example, we could use an arena containing the
* integers minpid through maxpid to allocate process IDs. vmem_create()
* and vmem_destroy() create and destroy vmem arenas. In order to
* differentiate between arenas used for adresses and arenas used for
* identifiers, the VMC_IDENTIFIER flag is passed to vmem_create(). This
* prevents identifier exhaustion from being diagnosed as general memory
* failure.
*
* 1.3 Spans
* ---------
* We represent the integers in an arena as a collection of *spans*, or
* contiguous ranges of integers. For example, the kernel heap consists
* of just one span: [kernelheap, ekernelheap). Spans can be added to an
* arena in two ways: explicitly, by vmem_add(), or implicitly, by
* importing, as described in Section 1.5 below.
*
* 1.4 Segments
* ------------
* Spans are subdivided into *segments*, each of which is either allocated
* or free. A segment, like a span, is a contiguous range of integers.
* Each allocated segment [addr, addr + size) represents exactly one
* vmem_alloc(size) that returned addr. Free segments represent the space
* between allocated segments. If two free segments are adjacent, we
* coalesce them into one larger segment; that is, if segments [a, b) and
* [b, c) are both free, we merge them into a single segment [a, c).
* The segments within a span are linked together in increasing-address order
* so we can easily determine whether coalescing is possible.
*
* Segments never cross span boundaries. When all segments within
* an imported span become free, we return the span to its source.
*
* 1.5 Imported Memory
* -------------------
* As mentioned in the overview, some arenas are logical subsets of
* other arenas. For example, kmem_va_arena (a virtual address cache
* that satisfies most kmem_slab_create() requests) is just a subset
* of heap_arena (the kernel heap) that provides caching for the most
* common slab sizes. When kmem_va_arena runs out of virtual memory,
* it *imports* more from the heap; we say that heap_arena is the
* *vmem source* for kmem_va_arena. vmem_create() allows you to
* specify any existing vmem arena as the source for your new arena.
* Topologically, since every arena is a child of at most one source,
* the set of all arenas forms a collection of trees.
*
* 1.6 Constrained Allocations
* ---------------------------
* Some vmem clients are quite picky about the kind of address they want.
* For example, the DVMA code may need an address that is at a particular
* phase with respect to some alignment (to get good cache coloring), or
* that lies within certain limits (the addressable range of a device),
* or that doesn't cross some boundary (a DMA counter restriction) --
* or all of the above. vmem_xalloc() allows the client to specify any
* or all of these constraints.
*
* 1.7 The Vmem Quantum
* --------------------
* Every arena has a notion of 'quantum', specified at vmem_create() time,
* that defines the arena's minimum unit of currency. Most commonly the
* quantum is either 1 or PAGESIZE, but any power of 2 is legal.
* All vmem allocations are guaranteed to be quantum-aligned.
*
* 1.8 Quantum Caching
* -------------------
* A vmem arena may be so hot (frequently used) that the scalability of vmem
* allocation is a significant concern. We address this by allowing the most
* common allocation sizes to be serviced by the kernel memory allocator,
* which provides low-latency per-cpu caching. The qcache_max argument to
* vmem_create() specifies the largest allocation size to cache.
*
* 1.9 Relationship to Kernel Memory Allocator
* -------------------------------------------
* Every kmem cache has a vmem arena as its slab supplier. The kernel memory
* allocator uses vmem_alloc() and vmem_free() to create and destroy slabs.
*
*
* 2. Implementation
* -----------------
*
* 2.1 Segment lists and markers
* -----------------------------
* The segment structure (vmem_seg_t) contains two doubly-linked lists.
*
* In addition to the allocated and free segments, the arena contains
* special marker segments at span boundaries. Span markers simplify
* coalescing and importing logic by making it easy to tell both when
* we're at a span boundary (so we don't coalesce across it), and when
* a span is completely free (its neighbors will both be span markers).
*
* Imported spans will have vs_import set.
*
* (1) for allocated segments, vs_knext is the hash chain linkage;
* (2) for free segments, vs_knext is the freelist linkage;
* (3) for span marker segments, vs_knext is the next span marker.
*
* 2.2 Allocation hashing
* ----------------------
* We maintain a hash table of all allocated segments, hashed by address.
* This allows vmem_free() to discover the target segment in constant time.
* vmem_update() periodically resizes hash tables to keep hash chains short.
*
* 2.3 Freelist management
* -----------------------
* We maintain power-of-2 freelists for free segments, i.e. free segments
* of size >= 2^n reside in vmp->vm_freelist[n]. To ensure constant-time
* allocation, vmem_xalloc() looks not in the first freelist that *might*
* satisfy the allocation, but in the first freelist that *definitely*
* satisfies the allocation (unless VM_BESTFIT is specified, or all larger
* freelists are empty). For example, a 1000-byte allocation will be
* satisfied not from the 512..1023-byte freelist, whose members *might*
* contains a 1000-byte segment, but from a 1024-byte or larger freelist,
* the first member of which will *definitely* satisfy the allocation.
* This ensures that vmem_xalloc() works in constant time.
*
* We maintain a bit map to determine quickly which freelists are non-empty.
* vmp->vm_freemap & (1 << n) is non-zero iff vmp->vm_freelist[n] is non-empty.
*
* The different freelists are linked together into one large freelist,
* with the freelist heads serving as markers. Freelist markers simplify
* the maintenance of vm_freemap by making it easy to tell when we're taking
* the last member of a freelist (both of its neighbors will be markers).
*
* 2.4 Vmem Locking
* ----------------
* For simplicity, all arena state is protected by a per-arena lock.
* For very hot arenas, use quantum caching for scalability.
*
* 2.5 Vmem Population
* -------------------
* Any internal vmem routine that might need to allocate new segment
* structures must prepare in advance by calling vmem_populate(), which
* will preallocate enough vmem_seg_t's to get is through the entire
* operation without dropping the arena lock.
*
* 2.6 Auditing
* ------------
* If KMF_AUDIT is set in kmem_flags, we audit vmem allocations as well.
* Since virtual addresses cannot be scribbled on, there is no equivalent
* in vmem to redzone checking, deadbeef, or other kmem debugging features.
* Moreover, we do not audit frees because segment coalescing destroys the
* association between an address and its segment structure. Auditing is
* thus intended primarily to keep track of who's consuming the arena.
* Debugging support could certainly be extended in the future if it proves
* necessary, but we do so much live checking via the allocation hash table
* that even non-DEBUG systems get quite a bit of sanity checking already.
*/
#include <sys/vmem_impl.h>
#include <sys/sysmacros.h>
/*
* Adding a new span to an arena requires two segment structures: one to
* represent the span, and one to represent the free segment it contains.
*/
/*
* Allocating a piece of an existing segment requires 0-2 segment structures
* depending on how much of the segment we're allocating.
*
* To allocate the entire segment, no new segment structures are needed; we
* simply move the existing segment structure from the freelist to the
* allocation hash table.
*
* To allocate a piece from the left or right end of the segment, we must
* split the segment into two pieces (allocated part and remainder), so we
* need one new segment structure to represent the remainder.
*
* To allocate from the middle of a segment, we need two new segment strucures
* to represent the remainders on either side of the allocated part.
*/
#define VMEM_SEGS_PER_EXACT_ALLOC 0
/*
* vmem_populate() preallocates segment structures for vmem to do its work.
* It must preallocate enough for the worst case, which is when we must import
* a new span and then allocate from the middle of it.
*/
#define VMEM_SEGS_PER_ALLOC_MAX \
/*
* The segment structures themselves are allocated from vmem_seg_arena, so
* we have a recursion problem when vmem_seg_arena needs to populate itself.
* We address this by working out the maximum number of segment structures
* this act will require, and multiplying by the maximum number of threads
* that we'll allow to do it simultaneously.
*
* The worst-case segment consumption to populate vmem_seg_arena is as
* follows (depicted as a stack trace to indicate why events are occurring):
*
* (In order to lower the fragmentation in the heap_arena, we specify a
* minimum import size for the vmem_metadata_arena which is the same size
* as the kmem_va quantum cache allocations. This causes the worst-case
* allocation from the vmem_metadata_arena to be 3 segments.)
*
* vmem_alloc(vmem_seg_arena) -> 2 segs (span create + exact alloc)
* segkmem_alloc(vmem_metadata_arena)
* vmem_alloc(vmem_metadata_arena) -> 3 segs (span create + left alloc)
* vmem_alloc(heap_arena) -> 1 seg (left alloc)
* page_create()
* hat_memload()
* kmem_cache_alloc()
* kmem_slab_create()
* vmem_alloc(hat_memload_arena) -> 2 segs (span create + exact alloc)
* segkmem_alloc(heap_arena)
* vmem_alloc(heap_arena) -> 1 seg (left alloc)
* page_create()
* hat_memload() -> (hat layer won't recurse further)
*
* The worst-case consumption for each arena is 3 segment structures.
* Of course, a 3-seg reserve could easily be blown by multiple threads.
* Therefore, we serialize all allocations from vmem_seg_arena (which is OK
* because they're rare). We cannot allow a non-blocking allocation to get
* tied up behind a blocking allocation, however, so we use separate locks
* for VM_SLEEP and VM_NOSLEEP allocations. Similarly, VM_PUSHPAGE allocations
* must not block behind ordinary VM_SLEEPs. In addition, if the system is
* panicking then we must keep enough resources for panic_thread to do its
* work. Thus we have at most four threads trying to allocate from
* vmem_seg_arena, and each thread consumes at most three segment structures,
* so we must maintain a 12-seg reserve.
*/
/*
* vmem_populate() ensures that each arena has VMEM_MINFREE seg structures
* so that it can satisfy the worst-case allocation *and* participate in
* worst-case allocation from vmem_seg_arena.
*/
{ "mem_inuse", KSTAT_DATA_UINT64 },
{ "mem_import", KSTAT_DATA_UINT64 },
{ "mem_total", KSTAT_DATA_UINT64 },
{ "vmem_source", KSTAT_DATA_UINT32 },
{ "alloc", KSTAT_DATA_UINT64 },
{ "free", KSTAT_DATA_UINT64 },
{ "wait", KSTAT_DATA_UINT64 },
{ "fail", KSTAT_DATA_UINT64 },
{ "lookup", KSTAT_DATA_UINT64 },
{ "search", KSTAT_DATA_UINT64 },
{ "populate_wait", KSTAT_DATA_UINT64 },
{ "populate_fail", KSTAT_DATA_UINT64 },
{ "contains", KSTAT_DATA_UINT64 },
{ "contains_search", KSTAT_DATA_UINT64 },
};
/*
*/
{ \
}
{ \
}
/*
* Get a vmem_seg_t from the global segfree list.
*/
static vmem_seg_t *
vmem_getseg_global(void)
{
return (vsp);
}
/*
* Put a vmem_seg_t on the global segfree list.
*/
static void
{
vmem_segfree = vsp;
}
/*
* Get a vmem_seg_t from vmp's segfree list.
*/
static vmem_seg_t *
{
vmp->vm_nsegfree--;
return (vsp);
}
/*
* Put a vmem_seg_t on vmp's segfree list.
*/
static void
{
vmp->vm_nsegfree++;
}
/*
* Add vsp to the appropriate freelist.
*/
static void
{
}
/*
* Take vsp from the freelist.
*/
static void
{
/*
* The segments on both sides of 'vsp' are freelist heads,
* so taking vsp leaves the freelist at vsp->vs_kprev empty.
*/
}
VMEM_DELETE(vsp, k);
}
/*
* Add vsp to the allocated-segment hash table and update kstats.
*/
static void
{
if (vmem_seg_size == sizeof (vmem_seg_t)) {
} else {
}
}
/*
* Remove vsp from the allocated-segment hash table and update kstats.
*/
static vmem_seg_t *
{
break;
}
}
panic("vmem_hash_delete(%p, %lx, %lu): bad free",
panic("vmem_hash_delete(%p, %lx, %lu): wrong size (expect %lu)",
return (vsp);
}
/*
* Create a segment spanning the range [start, end) and add it to the arena.
*/
static vmem_seg_t *
{
return (newseg);
}
/*
* Remove segment vsp from the arena.
*/
static void
{
VMEM_DELETE(vsp, a);
}
/*
* Add the span [vaddr, vaddr + size) to vmp and update kstats.
*/
static vmem_seg_t *
{
panic("vmem_span_create(%p, %p, %lu): misaligned",
if (import)
return (newseg);
}
/*
* Remove span vsp from vmp and update kstats.
*/
static void
{
VMEM_DELETE(span, k);
}
/*
* Allocate the subrange [addr, addr + size) from segment vsp.
* If there are leftovers on either side, place them on the freelist.
* Returns a pointer to the segment representing [addr, addr + size).
*/
static vmem_seg_t *
{
/*
* If we're allocating from the start of the segment, and the
* remainder will be on the same freelist, we can save quite
* a bit of work.
*/
return (vsp);
}
return (vsp);
}
/*
* Returns 1 if we are populating, 0 otherwise.
* Call it if we want to prevent recursion from HAT.
*/
int
{
}
/*
* Populate vmp's segfree list with VMEM_MINFREE vmem_seg_t structures.
*/
static int
{
char *p;
int i;
return (1);
/*
* If we're already populating, tap the reserve.
*/
if (vmem_is_populator()) {
return (1);
}
if (panic_thread == curthread)
lp = &vmem_panic_lock;
else if (vmflag & VM_NOSLEEP)
lp = &vmem_nosleep_lock;
else if (vmflag & VM_PUSHPAGE)
lp = &vmem_pushpage_lock;
else
lp = &vmem_sleep_lock;
/*
* The following vmem_alloc() may need to populate vmem_seg_arena
* and all the things it imports from. When doing so, it will tap
* each arena's reserve to prevent recursion (see the block comment
* above the definition of VMEM_POPULATE_RESERVE).
*/
if (p == NULL) {
mutex_exit(lp);
return (0);
}
/*
* Restock the arenas that may have been depleted during population.
*/
for (i = 0; i < vmem_populators; i++) {
}
mutex_exit(lp);
/*
* Now take our own segments.
*/
/*
* Give the remainder to charity.
*/
while (nseg > 0)
return (1);
}
/*
* Advance a walker from its previous position to 'afterme'.
* Note: may drop and reacquire vmp->vm_lock.
*/
static void
{
VMEM_DELETE(walker, a);
/*
* The walker segment's presence may have prevented its neighbors
* from coalescing. If so, coalesce them now.
*/
}
}
/*
* vsp could represent a complete imported span,
* in which case we must return it to the source.
*/
}
}
/*
* VM_NEXTFIT allocations deliberately cycle through all virtual addresses
* in an arena, so that we avoid reusing addresses for as long as possible.
* This helps to catch used-after-freed bugs. It's also the perfect policy
* for allocating things like process IDs, where we want to cycle through
* all values in order.
*/
static void *
{
return (NULL);
}
/*
* The common case is that the segment right after the rotor is free,
* and large enough that extracting 'size' bytes won't change which
* freelist it's on. In this case we can avoid a *lot* of work.
* Instead of the normal vmem_seg_alloc(), we just advance the start
* address of the victim segment. Instead of moving the rotor, we
* create the new segment structure *behind the rotor*, which has
* the same effect. And finally, we know we don't have to coalesce
* the rotor's neighbors because the new segment lies between them.
*/
return ((void *)addr);
}
/*
* Starting at the rotor, look for a segment large enough to
* satisfy the allocation.
*/
for (;;) {
break;
/*
* We've come full circle. One possibility is that the
* there's actually enough space, but the rotor itself
* is preventing the allocation from succeeding because
* it's sitting between two free segments. Therefore,
* we advance the rotor and see if that liberates a
* suitable segment.
*/
break;
/*
* If there's a lower arena we can import from, or it's
* a VM_NOSLEEP allocation, let vmem_xalloc() handle it.
* Otherwise, wait until another thread frees something.
*/
(vmflag & VM_NOSLEEP)) {
}
}
}
/*
* We found a segment. Extract enough space to satisfy the allocation.
*/
/*
* Advance the rotor to right after the newly-allocated segment.
* That's where the next VM_NEXTFIT allocation will begin searching.
*/
return ((void *)addr);
}
/*
* Checks if vmp is guaranteed to have a size-byte buffer somewhere on its
* freelist. If size is not a power-of-2, it can return a false-negative.
*
* Used to decide if a newly imported span is superfluous after re-acquiring
* the arena lock.
*/
static int
{
int hb;
int flist = 0;
return (flist);
}
/*
* Allocate size bytes at offset phase from an align boundary such that the
* resulting segment [addr, addr + size) is a subset of [minaddr, maxaddr)
* that does not straddle a nocross-aligned boundary.
*/
void *
{
panic("vmem_xalloc(%p, %lu, %lu, %lu, %lu, %p, %p, %x): "
"parameters not vm_quantum aligned",
if (nocross != 0 &&
panic("vmem_xalloc(%p, %lu, %lu, %lu, %lu, %p, %p, %x): "
"overconstrained allocation",
panic("vmem_xalloc(%p, %lu, %lu, %lu, %lu, %p, %p, %x): "
"parameters inconsistent or invalid",
return (NULL);
for (;;) {
break;
/*
* highbit() returns the highest bit + 1, which is exactly
* what we want: we want to search the first freelist whose
* members are *definitely* large enough to satisfy our
* allocation. However, there are certain cases in which we
* want to look at the next-smallest freelist (which *might*
* be able to satisfy the allocation):
*
* (1) The size is exactly a power of 2, in which case
* the smaller freelist is always big enough;
*
* (2) All other freelists are empty;
*
* (3) We're in the highest possible freelist, which is
* always empty (e.g. the 4GB freelist on 32-bit systems);
*
* (4) We're doing a best-fit or first-fit allocation.
*/
} else {
hb == VMEM_FREELISTS ||
hb--;
}
/*
* We're moving up to a larger freelist,
* so if we've already found a candidate,
* the fit can't possibly get any better.
*/
break;
/*
* Find the next non-empty freelist.
*/
if (flist-- == 0)
break;
continue;
}
continue;
continue;
taddr +=
continue;
break;
}
break;
if (size == 0)
panic("vmem_xalloc(): size == 0");
}
/*
* The rounding induced overflow; return NULL
* if we are permitted to fail the allocation
* (and explicitly panic if we aren't).
*/
if ((vmflag & VM_NOSLEEP) &&
return (NULL);
}
panic("vmem_xalloc(): size overflow");
}
/*
* Determine how many segment structures we'll consume.
* The calculation must be precise because if we're
* here on behalf of vmem_populate(), we are taking
* segments from a very limited reserve.
*/
else if (phase == 0 &&
else
vaddr = ((vmem_ximport_t *)
} else {
}
/*
* Since we dropped the vmem lock while
* calling the import function, other
* threads could have imported space
* and made our import unnecessary. In
* order to save space, we return
* excess imports immediately.
*/
goto do_alloc;
}
break;
/*
* Our import failed, but another thread
* added sufficient free memory to the arena
* to satisfy our request. Go back and
* grab it.
*/
goto do_alloc;
}
}
/*
* If the requestor chooses to fail the allocation attempt
* rather than reap wait and retry - get out of the loop.
*/
break;
else
kmem_reap();
if (vmflag & VM_NOSLEEP)
break;
}
/* re-position to end of buffer */
if (vmflag & VM_ENDALLOC) {
}
if (xvaddr)
return ((void *)addr);
}
panic("vmem_xalloc(%p, %lu, %lu, %lu, %lu, %p, %p, %x): "
"cannot satisfy mandatory allocation",
return (NULL);
}
/*
* Free the segment [vaddr, vaddr + size), where vaddr was a constrained
* allocation. vmem_xalloc() and vmem_xfree() must always be paired because
* both routines bypass the quantum caches.
*/
void
{
/*
* Attempt to coalesce with the next segment.
*/
}
/*
* Attempt to coalesce with the previous segment.
*/
}
/*
* If the entire span is free, return it to the source.
*/
} else {
}
}
/*
* Allocate size bytes from arena vmp. Returns the allocated address
* on success, NULL on failure. vmflag specifies VM_SLEEP or VM_NOSLEEP,
* and may also specify best-fit, first-fit, or next-fit allocation policy
* instead of the default instant-fit policy. VM_SLEEP allocations are
* guaranteed to succeed.
*/
void *
{
int hb;
int flist = 0;
return (NULL);
if (vmflag & VM_NEXTFIT)
/*
* Unconstrained instant-fit allocation from the segment list.
*/
}
if (flist-- == 0) {
}
if (vmflag & VM_ENDALLOC) {
}
return ((void *)addr);
}
/*
* Free the segment [vaddr, vaddr + size).
*/
void
{
vaddr);
else
}
/*
* Determine whether arena vmp contains the segment [vaddr, vaddr + size).
*/
int
{
break;
}
}
/*
* Add the span [vaddr, vaddr + size) to arena vmp.
*/
void *
{
panic("vmem_add(%p, %p, %lu): bad arguments",
else
return (vaddr);
}
/*
* Walk the vmp arena, applying func to each segment matching typemask.
* If VMEM_REENTRANT is specified, the arena lock is dropped across each
* call to func(); otherwise, it is held for the duration of vmem_walk()
* to ensure a consistent snapshot. Note that VMEM_REENTRANT callbacks
* are *not* necessarily consistent, so they may only be used when a hint
* is adequate.
*/
void
{
if (typemask & VMEM_WALKER)
return;
if (typemask & VMEM_REENTRANT) {
} else {
}
}
}
}
/*
* Return the total amount of memory whose type matches typemask. Thus:
*
* typemask VMEM_ALLOC yields total memory allocated (in use).
* typemask VMEM_FREE yields total memory free (available).
* typemask (VMEM_ALLOC | VMEM_FREE) yields total arena size.
*/
{
if (typemask & VMEM_ALLOC)
}
/*
* Create an arena called name whose initial span is [base, base + size).
* The arena's natural unit of currency is quantum, so vmem_alloc()
* guarantees quantum-aligned results. The arena may import new spans
* by invoking afunc() on source, and may return those spans by invoking
* ffunc() on source. To make small allocations fast and scalable,
* the arena offers high-performance caching for each integer multiple
* of quantum up to qcache_max.
*/
static vmem_t *
{
int i;
if (vmem_vmem_arena != NULL) {
vmflag & VM_KMFLAGS);
} else {
}
/* An identifier arena must inherit from another identifier arena */
(vmflag & VMC_IDENTIFIER)));
return (NULL);
vmflag &= VM_KMFLAGS;
for (i = 0; i <= VMEM_FREELISTS; i++) {
}
/*
* Some arenas (like vmem_metadata and kmem_metadata) cannot
* use quantum caching to lower fragmentation. Instead, we
* increase their imports, giving a similar effect.
*/
vmp->vm_min_import =
nqcache = 0;
}
if (nqcache != 0) {
for (i = 0; i < nqcache; i++) {
(i + 1) * quantum);
}
}
}
}
return (NULL);
}
return (vmp);
}
vmem_t *
{
vmflag | VMC_XALLOC));
}
vmem_t *
{
}
/*
* Destroy arena vmp.
*/
void
{
int i;
for (i = 0; i < VMEM_NQCACHE_MAX; i++)
if (leaked != 0)
"identifiers" : "bytes");
/*
* Give back the segment structures for anything that's left in the
* arena, e.g. the primary spans and their free segments.
*/
}
while (vmp->vm_nsegfree > 0)
}
/*
* Only shrink vmem hashtable if it is 1<<vmem_rescale_minshift times (8x)
* larger than necessary.
*/
/*
* Resize vmp's hash table to keep the average lookup depth near 1.0.
*/
static void
{
return;
return;
for (h = 0; h < old_size; h++) {
*hash_bucket = vsp;
}
}
old_size * sizeof (void *));
}
/*
* Perform periodic maintenance on all vmem arenas.
*/
void
{
/*
* If threads are waiting for resources, wake them up
* periodically so they can issue another kmem_reap()
* to reclaim resources cached by the slab allocator.
*/
/*
* Rescale the hash table to keep the hash chains short.
*/
}
}
void
{
int i;
/*
* Reap any quantum caches that may be part of this vmem.
*/
for (i = 0; i < VMEM_NQCACHE_MAX; i++)
}
/*
* Prepare vmem for use.
*/
vmem_t *
{
while (--nseg >= 0)
NULL, 0, heap_quantum,
NULL, 0, heap_quantum,
NULL, 0, 8,
VM_SLEEP);
VM_SLEEP);
return (heap);
}