kobj_alloc.c revision 75521904d7c3dbe11337904d9bead2518c94cc50
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License, Version 1.0 only
* (the "License"). You may not use this file except in compliance
* with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2005 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#pragma ident "%Z%%M% %I% %E% SMI"
/*
* Special-purpose krtld allocators
*
* krtld can draw upon two sources of transient memory, both of which are
* provided by boot, and both of which are unmapped early on in kernel
* startup. The first is temporary memory (kobj_tmp_*), which is available
* only during the execution of kobj_init. The only way to reclaim temporary
* memory is via calls to kobj_tmp_free(), which will free *all* temporary
* memory currently allocated.
*
* The second type of transient memory is scratch memory (kobj_bs_*). Scratch
* memory differes from transient memory in that it survives until the
* conclusion of kobj_sync(). There is no way to reclaim scratch memory prior
* to that point.
*/
#include <sys/kobj_impl.h>
#include <sys/bootconf.h>
#include <sys/sysmacros.h>
/*
* Temporary memory
*
* Allocations over (pagesize - 16) bytes are satisfied by page allocations.
* That is, the allocation size is rounded up to the next page, with full pages
* used to satisfy the request. Smaller allocations share pages. When the
* first small allocation is requested, an entire page is requested from the
* booter, and the appropriate portion of that page is returned to the caller.
* Subsequent small allocations are filled, when possible, from the same page.
* If the requested allocation is larger than the available space on the current
* page, a new page is requested for this and subsequent small alloctions.
*
* The small allocations page is laid out as follows:
*
* ------------------------------------------------------------------
* | prev | size | buffer | 0xfeed | size | ... | 0xfeed | next |
* | page | 0xbeef | | face | 0xbeef | | face | page |
* ------------------------------------------------------------------
* 0 4 8 n n+4 n+8 pgsz-8 pgsz-4 pgsz
*/
#define KOBJ_TMP_SIZE_GUARD 0xbeef
#define KOBJ_TMP_ALIGN_PAD 0xdeadbeef
#define KOBJ_TMP_REAR_GUARD 0xfeedface
typedef struct kobj_big_map {
typedef struct kobj_tmp {
} kobj_tmp_t;
/*
* Beyond a certain point (when the size of the buffer plus the minimal overhead
* used to track that buffer) gets larger than a page, we switch to page-based
* allocations.
*/
/*
* Used to track large allocations. Must be less than the large allocation
* threshold, and greater than the number of large allocations.
*/
#define KOBJ_TMP_LARGE_MAP_MAXENT 256
static kobj_tmp_t kobj_tmp;
static void
kobj_tmp_verify(void)
{
}
int i;
}
}
if ((*cur & 0xff) !=
"corrupt: ");
"overwrite (is %02x, ",
(*cur & 0xff));
}
}
}
}
}
}
static void *
{
void *buf;
#ifdef KOBJ_DEBUG
if (kobj_debug & D_DEBUG) {
"%u bytes -- falling back\n", act);
}
#endif
/*
* kobj_alloc doesn't guarantee page alignment, so we do the
* kobj_segbrk ourselves.
*/
NULL) {
"bigalloc\n");
}
return (buf);
}
sizeof (kobj_big_map_t));
sizeof (kobj_big_map_t));
kobj_tmp.tmp_bigmapidx = 0;
} else {
"of large allocations (%u allowed)\n",
}
}
return (buf);
}
void *
{
int i;
/*
* Large requests are satisfied by returning an integral number of
* pages sufficient to cover the allocation.
*/
if (act > KOBJ_TMP_LARGE_ALLOC_THRESH)
return (kobj_tmp_bigalloc(sz));
/*
* If we don't have enough space in the current page (or if there isn't
* one), allocate a new page. Attach the current and new pages, and
* adjust the various pointers and residual counter.
*/
BO_NO_ALIGN)) == NULL) {
#ifdef KOBJ_DEBUG
if (kobj_debug & D_DEBUG) {
"alloc of %u bytes -- falling back\n", act);
}
#endif
}
KOBJ_TMP_SMALL_PAGE_SIZE - 4) =
}
}
/*
* and the post-buffer guard. We also fill the alignment space with
* KOBJ_TMP_ALIGN_PAD to aid in overrun detection.
*/
for (i = 0; i < align; i++) {
}
return (buf);
}
void
kobj_tmp_free(void)
{
int i;
for (i = 0; i < kobj_tmp.tmp_bigmapidx; i++) {
break;
}
}
kobj_tmp.tmp_bigmapidx = 0;
}
/*
* Scratch memory
*/
extern void *
{
BO_NO_ALIGN));
}