/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
*/
/* Copyright (c) 1988 AT&T */
/* All Rights Reserved */
/*
* Memory management: malloc(), realloc(), free(), memalign().
*
* The following #-parameters may be redefined:
* GETCORE: a function to get more core memory.
* GETCORE(0) is assumed to return the next available
* address. Default is 'sbrk'.
* ERRCORE: the error code as returned by GETCORE.
* Default is ((char *)(-1)).
* CORESIZE: a desired unit (measured in bytes) to be used
* with GETCORE. Default is (1024*ALIGN).
*
* This algorithm is based on a best fit strategy with lists of
* free elts maintained in a self-adjusting binary tree. Each list
* contains all elts of the same size. The tree is ordered by size.
* For results on self-adjusting trees, see the paper:
* Self-Adjusting Binary Trees,
* DD Sleator & RE Tarjan, JACM 1985.
*
* The header of a block contains the size of the data part in bytes.
* Since the size of a block is 0%4, the low two bits of the header
* are free and used as follows:
*
* BIT0: 1 for busy (block is in use), 0 for free.
* BIT1: if the block is busy, this bit is 1 if the
* preceding block in contiguous memory is free.
* Otherwise, it is always 0.
*/
#include "mallint.h"
static void realfree(void *);
static void *malloc_unlocked(size_t);
static void free_unlocked(void *);
#define FREEPAT 0
/*
* Patterns to be copied into freed blocks and allocated blocks.
* 0xfeedbeef and 0xfeedface are invalid pointer values in all programs.
*/
0xdeadbeefdeadbeefULL, /* pattern in a freed block */
0xbaddcafebaddcafeULL /* pattern in an allocated block */
};
static void
{
/* LINTED improper alignment */
while (sz--)
}
/*
* Keep lists of small blocks, LIFO order.
*/
/* number of blocks to get at one time */
static void *
{
size_t i;
/* want to return a unique pointer on malloc(0) */
if (size == 0)
/* list to use */
int n;
/* get NPS of these block types */
return (NULL);
/* make them into a link list */
if (n == NPS - 1) {
} else {
/* LINTED improper alignment */
}
}
}
/* allocate from the head of the queue */
}
void *
{
void *ret;
(void) mutex_lock(&__watch_malloc_lock);
(void) mutex_unlock(&__watch_malloc_lock);
return (ret);
}
static void *
{
size_t n;
/* check for size that could overflow calculations */
if (size > MAX_MALLOC) {
return (NULL);
}
/* make sure that size is 0 mod ALIGN */
/* small blocks */
/* search for an elt of the right size */
n = 0;
if (Root) {
for (;;) {
}
} else {
break;
}
} else { /* branch right */
} else {
break;
}
}
}
if (sp) {
/* make the searched-to element the root */
}
}
/* if found none fitted in the tree */
if (Bottom) {
} else {
return (NULL);
}
} else {
return (NULL);
}
}
/* tell the forward neighbor that we're busy */
/* LINTED improper alignment */
/* if the leftover is enough for a new free piece */
n -= WORDSIZE;
/* LINTED improper alignment */
/* return the allocated space */
}
/*
* realloc().
* If the block size is increasing, we try forward merging first.
* This is not best-fit but it avoids some data recopying.
*/
void *
{
char *new;
if (size == 0) {
return ((void *)0x00000001);
}
/* check for size that could overflow calculations */
if (size > MAX_MALLOC) {
return (NULL);
}
/* pointer to the block */
(void) mutex_lock(&__watch_malloc_lock);
if (old <= (void *)0x00000001) {
(void) mutex_unlock(&__watch_malloc_lock);
return (new);
}
/* make sure that size is 0 mod ALIGN */
/* LINTED improper alignment */
/* if the block was freed, data has been destroyed. */
/* XXX; complain here! */
(void) mutex_unlock(&__watch_malloc_lock);
return (NULL);
}
(void) mutex_unlock(&__watch_malloc_lock);
return (old);
}
/* special cases involving small blocks */
if (size == 0) {
(void) mutex_unlock(&__watch_malloc_lock);
return (NULL);
}
goto call_malloc;
}
/* block is increasing in size, try merging the next block */
/* LINTED improper alignment */
else {
else
/* LINTED improper alignment */
}
/* not enough & at TRUE end of memory, try extending core */
}
}
}
/* got enough space to use */
size_t n;
n -= WORDSIZE;
/* LINTED improper alignment */
/* the previous block may be free */
(void) mutex_unlock(&__watch_malloc_lock);
return (old);
}
call_malloc: /* call malloc to get a new block */
(void) mutex_unlock(&__watch_malloc_lock);
return (new);
}
/*
* Attempt special case recovery allocations since malloc() failed:
*
* 1. size <= SIZE(tp) < MINSIZE
* Simply return the existing block
* 2. SIZE(tp) < size < MINSIZE
* malloc() may have failed to allocate the chunk of
* small blocks. Try asking for MINSIZE bytes.
* 3. size < MINSIZE <= SIZE(tp)
* malloc() may have failed as with 2. Change to
* MINSIZE allocation which is taken from the beginning
* of the current block.
* 4. MINSIZE <= SIZE(tp) < size
* If the previous block is free and the combination of
* these two blocks has at least size bytes, then merge
* the two blocks copying the existing contents backwards.
*/
(void) mutex_unlock(&__watch_malloc_lock);
return (old);
goto call_malloc;
}
goto chop_big;
/*
* Since the copy may overlap, use memmove().
*/
goto chop_big;
}
}
(void) mutex_unlock(&__watch_malloc_lock);
/* malloc() sets errno */
return (NULL);
}
/*
* realfree().
* Coalescing of adjacent free blocks is done first.
* Then, the new free block is leaf-inserted into the free tree
* without splaying. This strategy does not guarantee the amortized
* on the tree. In practice, however, free is much more infrequent
* functions adequately keep the tree in balance.
*/
static void
{
/* pointer to the block */
/* LINTED improper alignment */
return;
}
/* small block, return it to the tail of its queue */
} else {
}
return;
}
/* see if coalescing with next block is warranted */
/* LINTED improper alignment */
else {
}
/* the same with the preceding block */
}
/* initialize tree info */
/* set bottom block, or insert in the free tree */
else {
/* search for the place to insert */
if (Root) {
for (;;) {
} else {
break;
}
} else {
break;
}
} else {
else
} else
/* insert to head of list */
}
}
/* doubly link list */
break;
}
}
} else {
}
}
/*
* Tell next block that this one is free.
* The first WORD of the next block contains self's address.
*/
/* LINTED improper alignment */
/* LINTED improper alignment */
}
/*
* Get more core. Gaps in memory are noted as busy blocks.
*/
static TREE *
{
char *addr;
/* compute new amount of memory to get */
/* errno set by GETCORE sbrk */
return (NULL);
/* need to pad size out so that addr is aligned */
else
offset = 0;
if (tp)
/* if not segmented memory, what we need may be smaller */
n -= WORDSIZE;
}
/* get a multiple of CORESIZE */
requestsize = n + offset;
/* check if nsize request could overflow in GETCORE */
if (tp)
return (NULL);
}
if (requestsize > MAX_GETCORE) {
/*
* the value required is too big for GETCORE() to deal with
* in one go, so use GETCORE() at most 2 times instead.
* Argument to GETCORE() must be multiple of ALIGN.
* If not, GETCORE(-MAX_GETCORE) will not return brk point
* to previous value, but will be ALIGN more.
* This would leave a small hole.
*/
delta = MAX_GETCORE;
while (delta > 0) {
if (tp)
(void) GETCORE(-MAX_GETCORE);
return (NULL);
}
delta = requestsize;
}
if (tp)
return (NULL);
}
/* contiguous memory */
if (tp) {
} else {
n += WORDSIZE;
}
} else {
}
/* new bottom address */
/* new bottom block */
/* LINTED improper alignment */
/* reserved the last word to head any noncontiguous memory */
/* LINTED improper alignment */
/* non-contiguous memory, free old bottom block */
}
return (tp);
}
/*
* Utility function to avoid protecting a tree node twice.
* Return true if tp is in the NULL-terminated array of tree nodes.
*/
static int
{
return (1);
return (0);
}
/*
* Tree rotation functions (BU: bottom-up, TD: top-down).
* All functions are entered with the arguments unprotected.
* They must return in the same condition, with all other elements
* that have been unprotected during the operation re-protected.
*/
static void
{
}
else
}
LEFT(y) = x;
PARENT(x) = y;
}
static void
{
}
else
}
RIGHT(y) = x;
PARENT(x) = y;
}
static void
{
}
}
else
}
LEFT(z) = y;
PARENT(y) = z;
LEFT(y) = x;
PARENT(x) = y;
}
static void
{
}
}
else
}
RIGHT(z) = y;
PARENT(y) = z;
RIGHT(y) = x;
PARENT(x) = y;
}
static void
{
}
else
}
PARENT(x) = z;
LEFT(z) = x;
}
#if 0 /* Not used, for now */
static void
{
}
else
}
PARENT(x) = z;
RIGHT(z) = x;
}
#endif
/*
* Delete a tree element
*/
static void
{
/* if this is a non-tree node */
}
return;
}
/* make op the root of the tree */
/* if this is the start of a list */
}
}
return;
}
/* if op has a non-null left subtree */
/* make the right-end of the left subtree its root */
} else {
}
}
/* hook the right subtree of op to the above elt */
}
}
}
/*
* Bottom up splaying (simple version).
* The basic idea is to roughly cut in half the
* path from Root to tp and make tp the new root.
*/
static void
{
/* iterate until tp is the root */
/* grandparent of tp */
if (gp)
/* x is a left child */
} else {
if (gp)
}
} else {
} else {
if (gp)
}
}
}
}
void
{
(void) mutex_lock(&__watch_malloc_lock);
(void) mutex_unlock(&__watch_malloc_lock);
}
static void
{
if (old > (void *)0x00000001)
}
/*
* memalign(align,nbytes)
*
* Description:
* Returns a block of specified size on a specified alignment boundary.
*
* Algorithm:
* Malloc enough to ensure that a block can be aligned correctly.
* Find the alignment point and return the fragments
* before and after the block.
*
* Errors:
* Returns NULL and sets errno as follows:
* [EINVAL]
* if nbytes = 0,
* or if alignment is misaligned,
* or if the heap has been detectably corrupted.
* [ENOMEM]
* if the requested memory could not be allocated.
*/
/* 4-byte "word" alignment is considered ok in LP64 */
void *
{
TREE *p; /* Ptr returned from malloc() */
size_t x;
/*
* check for valid size and alignment parameters
* MAX_ALIGN check prevents overflow in later calculation.
*/
return (NULL);
}
/*
* Malloc enough memory to guarantee that the result can be
* aligned correctly. The worst case is when malloc returns
* a block so close to the next alignment boundary that a
* fragment of minimum size cannot be created. In order to
* make sure we can handle this, we need to force the
* alignment to be at least as large as the minimum frag size
* (MINSIZE + WORDSIZE).
*/
/* check for size that could overflow ROUND calculation */
if (nbytes > MAX_MALLOC) {
return (NULL);
}
align <<= 1;
/* check for overflow */
return (NULL);
}
/* malloc sets errno */
return (NULL);
}
(void) mutex_lock(&__watch_malloc_lock);
/*
* get size of the entire block (overhead and all)
*/
/* LINTED improper alignment */
/*
* locate the proper alignment boundary within the block.
*/
x = (size_t)p;
if (x % align != 0)
/* LINTED improper alignment */
/*
* Check out the space to the left of the alignment
* boundary, and split off a fragment if necessary.
*/
if (frag_size != 0) {
/*
* Create a fragment to the left of the aligned block.
*/
/*
* Not enough space. So make the split
* at the other end of the alignment unit.
* We know this yields enough space, because
* we forced align >= MINSIZE + WORDSIZE above.
*/
/* LINTED improper alignment */
}
/*
* free_unlocked(DATA(blk)) has the side-effect of calling
* protect() on the block following blk, that is, aligned_blk.
* We recover from this by unprotect()ing it here.
*/
}
/*
* Is there a (sufficiently large) fragment to the
* right of the aligned block?
*/
/*
* split and free a fragment on the right
*/
/* LINTED improper alignment */
}
(void) mutex_unlock(&__watch_malloc_lock);
return (DATA(aligned_blk));
}
void *
{
static unsigned pagesize;
if (!pagesize)
}
void *
{
void *mp;
/* check for overflow */
return (NULL);
}
return (mp);
}
/* ARGSUSED1 */
void
{
free(p);
}
typedef struct {
long cmd;
} ctl_t;
static int dont_watch = 0;
static int do_stop = 0;
static void
{
char *s;
dont_watch = 1;
return;
while (s != NULL) {
char *e = strchr(s, ',');
if (e)
*e++ = '\0';
if (strcmp(s, "STOP") == 0)
do_stop = 1;
else if (strcmp(s, "WATCH") == 0)
dont_watch = 0;
else if (strcmp(s, "RW") == 0) {
dont_watch = 0;
}
s = e;
}
if (dont_watch)
return;
if (ctlfd >= 0)
ctlfd = -1;
dont_watch = 1;
return;
}
/* close-on-exec */
if (do_stop) {
int pfd;
struct {
long cmd;
} ctl;
/*
* Play together with some /proc controller
* that has set other stop-on-fault flags.
*/
== sizeof (pstatus))
}
}
}
static int
nowatch()
{
if (dont_watch)
return (1);
if (ctlfd < 0) /* first time */
init_watch();
/*
* Someone closed our file descriptor.
* Just open another one.
*/
if (ctlfd >= 0)
ctlfd = -1;
dont_watch = 1;
return (1);
}
/* close-on-exec */
}
/*
* We fork()d since the last call to the allocator.
* watchpoints are not inherited across fork().
* XXX: how to recover from this ???
*/
dont_watch = 1;
ctlfd = -1;
}
return (dont_watch);
}
static void
{
if (nowatch())
return;
return;
if (size == 0)
return;
size = 0;
}
static void
{
if (nowatch())
return;
return;
}
static void
{
(void) mutex_lock(&__watch_malloc_lock);
}
static void
{
(void) mutex_unlock(&__watch_malloc_lock);
}
#pragma init(malloc_init)
static void
malloc_init(void)
{
}