/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
*/
/* Copyright (c) 1988 AT&T */
/* All Rights Reserved */
/*
* Memory management: malloc(), realloc(), free().
*
* The following #-parameters may be redefined:
* SEGMENTED: if defined, memory requests are assumed to be
* non-contiguous across calls of GETCORE's.
* GETCORE: a function to get more core memory. If not SEGMENTED,
* GETCORE(0) is assumed to return the next available
* address. Default is 'sbrk'.
* ERRCORE: the error code as returned by GETCORE.
* Default is (char *)(-1).
* CORESIZE: a desired unit (measured in bytes) to be used
* with GETCORE. Default is (1024*ALIGN).
*
* This algorithm is based on a best fit strategy with lists of
* free elts maintained in a self-adjusting binary tree. Each list
* contains all elts of the same size. The tree is ordered by size.
* For results on self-adjusting trees, see the paper:
* Self-Adjusting Binary Trees,
* DD Sleator & RE Tarjan, JACM 1985.
*
* The header of a block contains the size of the data part in bytes.
* Since the size of a block is 0%4, the low two bits of the header
* are free and used as follows:
*
* BIT0: 1 for busy (block is in use), 0 for free.
* BIT1: if the block is busy, this bit is 1 if the
* preceding block in contiguous memory is free.
* Otherwise, it is always 0.
*/
#include "lint.h"
#include "mallint.h"
#include "mtlib.h"
/*
* Some abusers of the system (notably java1.2) acquire __malloc_lock
* in order to prevent threads from holding it while they are being
* suspended via thr_suspend() or thr_suspend_allmutators().
* This never worked when alternate malloc() libraries were used
* because they don't use __malloc_lock for their locking strategy.
* We leave __malloc_lock as an external variable to satisfy these
* old programs, but we define a new lock, private to libc, to do the
* real locking: libc_malloc_lock. This puts libc's malloc() package
* on the same footing as all other malloc packages.
*/
static void realfree(void *);
static void cleanfree(void *);
static void *_malloc_unlocked(size_t);
/*
* Interfaces used only by atfork_init() functions.
*/
void
malloc_locks(void)
{
(void) mutex_lock(&libc_malloc_lock);
}
void
malloc_unlocks(void)
{
(void) mutex_unlock(&libc_malloc_lock);
}
/*
* Allocation of small blocks
*/
static void *
{
size_t i;
/* want to return a unique pointer on malloc(0) */
if (size == 0)
/* list to use */
int n;
/* number of blocks to get at one time */
/* get NPS of these block types */
return (0);
/* make them into a link list */
}
}
/* allocate from the head of the queue */
}
void *
{
void *ret;
if (!primary_link_map) {
return (NULL);
}
(void) mutex_lock(&libc_malloc_lock);
(void) mutex_unlock(&libc_malloc_lock);
return (ret);
}
static void *
{
size_t n;
/* check for size that could overflow calculations */
if (size > MAX_MALLOC) {
return (NULL);
}
/* make sure that size is 0 mod ALIGN */
/* see if the last free block can be used */
if (Lfree) {
CLRBITS01(n);
if (n == size) {
/*
* exact match, use it as is
*/
FREEMASK; /* 1 back */
/*
* got a big enough piece
*/
FREEMASK; /* 1 back */
goto leftover;
}
}
o_bit1 = 0;
/* perform free's of space since last malloc */
/* small blocks */
/* search for an elt of the right size */
n = 0;
if (Root) {
for (;;) {
/* branch left */
}
else
break;
} else { /* branch right */
else
break;
}
}
if (sp) {
/* make the searched-to element the root */
}
}
/* if found none fitted in the tree */
if (!sp) {
return (NULL);
}
/* tell the forward neighbor that we're busy */
/* if the leftover is enough for a new free piece */
n -= WORDSIZE;
/* return the allocated space */
}
/*
* realloc().
*
* If the block size is increasing, we try forward merging first.
* This is not best-fit but it avoids some data recopying.
*/
void *
{
char *new;
if (size == 0) {
return ((void *)0x00000001);
}
if (!primary_link_map) {
return (NULL);
}
/* check for size that could overflow calculations */
if (size > MAX_MALLOC) {
return (NULL);
}
/* pointer to the block */
(void) mutex_lock(&libc_malloc_lock);
if (old <= (void *)0x00000001) {
(void) mutex_unlock(&libc_malloc_lock);
return (new);
}
/* perform free's of space since last malloc */
/* make sure that size is 0 mod ALIGN */
/* if the block was freed, data has been destroyed. */
(void) mutex_unlock(&libc_malloc_lock);
return (NULL);
}
/* nothing to do */
(void) mutex_unlock(&libc_malloc_lock);
return (old);
}
/* special cases involving small blocks */
/* free is size is zero */
if (size == 0) {
(void) mutex_unlock(&libc_malloc_lock);
return (NULL);
} else {
goto call_malloc;
}
}
/* block is increasing in size, try merging the next block */
else
}
#ifndef SEGMENTED
/* not enough & at TRUE end of memory, try extending core */
}
}
#endif
}
/* got enough space to use */
size_t n;
n -= WORDSIZE;
/* the previous block may be free */
(void) mutex_unlock(&libc_malloc_lock);
return (old);
}
/* call malloc to get a new block */
(void) mutex_unlock(&libc_malloc_lock);
return (new);
}
/*
* Attempt special case recovery allocations since malloc() failed:
*
* 1. size <= SIZE(tp) < MINSIZE
* Simply return the existing block
* 2. SIZE(tp) < size < MINSIZE
* malloc() may have failed to allocate the chunk of
* small blocks. Try asking for MINSIZE bytes.
* 3. size < MINSIZE <= SIZE(tp)
* malloc() may have failed as with 2. Change to
* MINSIZE allocation which is taken from the beginning
* of the current block.
* 4. MINSIZE <= SIZE(tp) < size
* If the previous block is free and the combination of
* these two blocks has at least size bytes, then merge
* the two blocks copying the existing contents backwards.
*/
(void) mutex_unlock(&libc_malloc_lock);
return (old);
goto call_malloc;
}
goto chop_big;
/*
* Since the copy may overlap, use memmove() if available.
* Otherwise, copy by hand.
*/
goto chop_big;
}
(void) mutex_unlock(&libc_malloc_lock);
return (NULL);
}
/*
* realfree().
*
* Coalescing of adjacent free blocks is done first.
* Then, the new free block is leaf-inserted into the free tree
* without splaying. This strategy does not guarantee the amortized
* on the tree. In practice, however, free is much more infrequent
* functions adequately keep the tree in balance.
*/
static void
{
/* pointer to the block */
return;
/* small block, put it in the right linked list */
return;
}
/* see if coalescing with next block is warranted */
}
/* the same with the preceding block */
}
/* initialize tree info */
/* the last word of the block contains self's address */
/* set bottom block, or insert in the free tree */
else {
/* search for the place to insert */
if (Root) {
for (;;) {
else {
break;
}
else {
break;
}
} else {
else
} else
/* insert to head of list */
/* doubly link list */
break;
}
}
} else
}
/* tell next block that this one is free */
}
/*
* Get more core. Gaps in memory are noted as busy blocks.
*/
static TREE *
{
char *addr;
/* compute new amount of memory to get */
return (NULL);
/* need to pad size out so that addr is aligned */
else
offset = 0;
#ifndef SEGMENTED
/* if not segmented memory, what we need may be smaller */
n -= WORDSIZE;
}
#endif
/* get a multiple of CORESIZE */
/* check if nsize request could overflow in GETCORE */
return (NULL);
}
return (NULL);
} else {
/*
* the value required is too big for GETCORE() to deal with
* in one go, so use GETCORE() at most 2 times instead.
* Argument to GETCORE() must be multiple of ALIGN.
* If not, GETCORE(-MAX_GETCORE) will not return brk point
* to previous value, but will be ALIGN more.
* This would leave a small hole.
*/
delta = MAX_GETCORE;
while (delta > 0) {
(void) GETCORE(-MAX_GETCORE);
return (NULL);
}
nsize -= MAX_GETCORE;
}
}
/* contiguous memory */
if (tp) {
} else {
n += WORDSIZE;
}
} else
/* new bottom address */
/* new bottom block */
/* reserved the last word to head any noncontiguous memory */
/* non-contiguous memory, free old bottom block */
}
return (tp);
}
/*
* Tree rotation functions (BU: bottom-up, TD: top-down)
*/
#define LEFT1(x, y) \
#define RIGHT1(x, y) \
#define BULEFT2(x, y, z) \
#define BURIGHT2(x, y, z) \
#define TDLEFT2(x, y, z) \
#define TDRIGHT2(x, y, z) \
/*
* Delete a tree element
*/
static void
{
/* if this is a non-tree node */
return;
}
/* make op the root of the tree */
/* if this is the start of a list */
return;
}
/* if op has a non-null left subtree */
/* make the right-end of the left subtree its root */
} else {
}
}
/* hook the right subtree of op to the above elt */
}
}
/*
* Bottom up splaying (simple version).
* The basic idea is to roughly cut in half the
* path from Root to tp and make tp the new root.
*/
static void
{
/* iterate until tp is the root */
/* grandparent of tp */
/* x is a left child */
} else {
}
} else {
} else {
}
}
}
}
/*
* free().
* Performs a delayed free of the block pointed to
* by old. The pointer to old is saved on a list, flist,
* until the next malloc or realloc. At that time, all the
* blocks pointed to in flist are actually freed via
* realfree(). This allows the contents of free blocks to
* remain undisturbed until the next malloc or realloc.
*/
void
{
if (!primary_link_map) {
return;
}
if (old <= (void *)0x00000001)
return;
(void) mutex_lock(&libc_malloc_lock);
(void) mutex_unlock(&libc_malloc_lock);
}
void
{
int i;
return;
/*
* Make sure the same data block is not freed twice.
* 3 cases are checked. It returns immediately if either
* one of the conditions is true.
* 1. Last freed.
* 2. Not in use or freed already.
* 3. In the free list.
*/
return;
return;
for (i = 0; i < freeidx; i++)
return;
}
/*
* cleanfree() frees all the blocks pointed to be flist.
*
* realloc() should work if it is called with a pointer
* to a block that was freed since the last call to malloc() or
* realloc(). If cleanfree() is called from realloc(), ptr
* is set to the old block and that block should not be
* freed since it is actually being reallocated.
*/
static void
{
char **flp;
for (;;) {
break;
}
freeidx = 0;
}