/***********************************************************************
* *
* This software is part of the ast package *
* Copyright (c) 1985-2010 AT&T Intellectual Property *
* and is licensed under the *
* Common Public License, Version 1.0 *
* by AT&T Intellectual Property *
* *
* A copy of the License is available at *
* (with md5 checksum 059e8cd6165cb4c31e351f2b69388fd9) *
* *
* Information and Software Systems Research *
* AT&T Research *
* Florham Park NJ *
* *
* Glenn Fowler <gsf@research.att.com> *
* David Korn <dgk@research.att.com> *
* Phong Vo <kpv@research.att.com> *
* *
***********************************************************************/
void _STUB_vmbest(){}
#else
#include "vmhdr.h"
/* Best-fit allocation method. This is based on a best-fit strategy
** using a splay tree for storage of lists of free blocks of the same
** size. Recent free blocks may be cached for fast reuse.
**
** Written by Kiem-Phong Vo, kpv@research.att.com, 01/16/94.
*/
#ifdef DEBUG
#define VM_TRUST 0
#endif /*DEBUG*/
#if _BLD_posix
extern int logsrc(int, const char*, int, const char*, ...);
#endif /*_BLD_posix*/
/* Check to see if a block is in the free tree */
#if __STD_C
#else
Block_t* b;
#endif
{ Block_t* t;
if(t == b)
return 1;
return 1;
return 1;
return 0;
}
#if __STD_C
#else
Block_t* b;
#endif
{
if(list == b)
return 1;
return 0;
}
/* Check to see if a block is known to be free */
#if __STD_C
#else
Block_t* b;
#endif
{
return 0;
return 1;
return 0;
}
/* Check to see if a block is known to be junked */
#if __STD_C
#else
Block_t* b;
#endif
{
Block_t* t;
return 0;
return 1;
/* check the list that b is supposed to be in */
if(t == b)
return 1;
/* on occasions, b may be put onto the catch-all list */
if(t == b)
return 1;
return 0;
}
/* check to see if the free tree is in good shape */
#if __STD_C
#else
#endif
{ Block_t* t;
else return vmchktree(t);
}
else return vmchktree(t);
}
return 0;
}
#if __STD_C
#else
#endif
{
int rv = 0;
if(!CHECK())
return 0;
/* make sure the free tree is still in shape */
{ /* there should be no marked bits of any type */
/* next block must be busy and marked PFREE */
/* must have a self-reference pointer */
if(*SELF(b) != b)
/* segment pointer should be well-defined */
/* must be on a free list */
}
else
{ /* segment pointer should be well-defined */
/* next block should not be marked PFREE */
/* if PFREE, last block should be free */
/* if free but unreclaimed, should be junk */
}
}
}
return rv;
}
/* Tree rotation functions */
/* Find and delete a suitable element in the free tree. */
#if __STD_C
#else
#endif
{
/* extracting a tiniest block from its list */
TLEFT(r) = l;
if(l)
LINK(l) = r;
break;
}
}
return root;
}
/* find the right one to delete */
l = r = &link;
break;
if(size < s)
if(size == s)
break;
}
else
{ LLINK(l,t);
t = RIGHT(t);
}
}
}
else
if(size == s)
break;
}
else
{ RLINK(r,t);
t = LEFT(t);
}
}
}
} while((root = t) );
if(root) /* found it, now isolate it */
}
else /* nothing exactly fit */
/* grab the least one from the right tree */
}
}
{ /* head of a link list, use next one for root */
}
else /* graft left tree to right tree */
{ while((t = LEFT(r)) )
RROTATE(r,t);
}
return root;
}
/* Reclaim all delayed free blocks into the free tree */
#if __STD_C
#else
int c;
#endif
{
}
for(n = S_CACHE; n >= c; --n)
{ /* Note that below here we allow ISJUNK blocks to be
** forward-merged even though they are not removed from
** the list immediately. In this way, the list is
** scanned only once. It works because the LINK and SIZE
** fields are not destroyed during the merging. This can
** be seen by observing that a tiniest block has a 2-word
** header and a 2-word body. Merging a tiniest block
** (1seg) and the next block (2seg) looks like this:
** 1seg size link left 2seg size link left ....
** 1seg size link left rite xxxx xxxx .... self
** After the merge, the 2seg word is replaced by the RIGHT
** pointer of the new block and somewhere beyond the
** two xxxx fields, the SELF pointer will replace some
** other word. The important part is that the two xxxx
** fields are kept intact.
*/
continue;
{ /* see if this address is from region */
break;
if(!seg) /* must be a bug in application code! */
continue;
}
}
#if _BLD_posix
{
}
#endif
}
for(;;) /* forward merge */
#if _BLD_posix
{
}
#endif
if(!ISBUSY(s))
}
else if(ISJUNK(s))
{ /* reclaim any touched junk list */
if((int)C_INDEX(s) < c)
c = C_INDEX(s);
CLRBITS(s);
}
else break;
}
/* tell next block that this one is free */
saw_wanted = 1;
continue;
}
/* wilderness preservation */
continue;
}
/* tiny block goes to tiny list */
if(s == 0) /* TINIEST block */
{ if(np)
}
else
{ if(np)
}
continue;
}
continue;
}
while(1) /* leaf insertion */
np = t;
}
else
break;
}
}
else if(s < size)
np = t;
}
else
break;
}
}
else /* s == size */
}
break;
}
}
}
}
return saw_wanted;
}
#if __STD_C
#else
static int bestcompact(vm)
#endif
{
return -1;
}
}
continue;
{ /* During large block allocations, _Vmextend might
** have been enlarged the rounding factor. Reducing
** it a bit help avoiding getting large raw memory.
*/
round = _Vmpagesize;
/* for the bottom segment, we don't necessarily want
** to return raw memory too early. vd->pool has an
** approximation of the average size of recently freed
** blocks. If this is large, the application is managing
** large blocks so we throttle back memory chopping
** to avoid thrashing the underlying memory system.
*/
continue;
}
continue;
}
if(bp)
}
}
return 0;
}
#if __STD_C
#else
#endif
{
reg int n;
VMOPTIONS();
}
}
/* for ANSI requirement that malloc(0) returns non-NULL pointer */
}
goto done;
}
}
for(;;)
{ for(n = S_CACHE; n >= 0; --n) /* best-fit except for coalescing */
goto got_block;
}
goto got_block;
}
goto got_block;
else
}
}
/* tell next block that we are no longer a free block */
}
}
done:
}
#if __STD_C
#else
#endif
{
return -1L;
}
}
break;
}
offset = 0;
}
else if(seg)
{ while(b < endb)
offset = -1L;
goto done;
}
}
}
done:
return offset;
}
#if __STD_C
#else
#endif
{
#ifdef DEBUG
return 0;
}
#endif
if(!data) /* ANSI-ism */
return 0;
return -1;
}
}
/* Keep an approximate average free block size.
** This is used in bestcompact() to decide when to release
** raw memory back to the underlying memory system.
*/
if(s < MAXCACHE)
}
else
}
/* coalesce on freeing large blocks to avoid fragmentation */
}
}
return 0;
}
#if __STD_C
#else
int type; /* !=0 to move, <0 for not copy */
#endif
{
if(!data)
{ oldsize = 0;
}
goto done;
}
if(size == 0)
}
}
}
do /* forward merge as much as possible */
CLRBITS(s);
}
else if(ISJUNK(s) )
}
else if(!ISBUSY(s) )
}
else break;
}
}
}
goto do_free;
}
else
do_free: /* reclaim these right away */
}
}
}
return data;
}
#if __STD_C
#else
#endif
{
return -1L;
}
}
size = -1L;
continue;
while(b < endb)
size = -1L;
goto done;
}
break;
}
}
done:
return size;
}
#if __STD_C
#else
#endif
{
}
}
/* hack so that dbalign() can store header data */
extra = 0;
else
align *= 2;
}
/* reclaim all free blocks now to avoid fragmentation */
goto done;
/* get an aligned address that we can live with */
}
/* free left-over if too big */
}
done:
}
#if _mem_win32
#if _PACKAGE_ast
#include <ast_windows.h>
#else
#include <windows.h>
#endif
#endif /* _lib_win32 */
#if _mem_mmap_anon
#ifndef MAP_ANON
#endif
#endif /* _mem_mmap_anon */
#if _mem_mmap_zero
typedef struct _mmapdisc_s
int fd;
} Mmapdisc_t;
#ifndef OPEN_MAX
#endif
#endif /* _mem_mmap_zero */
/* failure mode of mmap, sbrk and brk */
#ifndef MAP_FAILED
#endif
/* make sure that allocated memory are addressable */
#if _PACKAGE_ast
#include <sig.h>
#else
#include <signal.h>
typedef void (*Sig_handler_t)(int);
#endif
static int Gotsegv = 0;
#if __STD_C
#else
int sig;
#endif
{
Gotsegv = 1;
}
#if __STD_C
#else
#endif
{
int rv;
Gotsegv = 0; /* catch segment fault */
if(Gotsegv == 0)
if(Gotsegv == 0)
if(Gotsegv == 0)
else rv = -1;
Gotsegv = 0;
return rv;
}
/* A discipline to get raw memory using sbrk/VirtualAlloc/mmap */
#if __STD_C
#else
#endif
{
#if !defined(_done_sbrkmem) && defined(_mem_win32)
if(csize == 0)
else if(nsize == 0)
#endif /* MUST_WIN32 */
#if _mem_mmap_zero
#else
#endif
if(csize == 0) /* allocating new memory */
{
#if _mem_sbrk /* try using sbrk() and brk() */
{
{
return addr;
}
}
}
#endif /* _mem_sbrk */
#if _mem_mmap_anon /* anonymous mmap */
return addr;
}
#endif /* _mem_mmap_anon */
#if _mem_mmap_zero /* mmap from /dev/zero */
{ int fd;
}
#ifdef FD_CLOEXEC
#endif
}
return addr;
}
}
#endif /* _mem_mmap_zero */
}
else
{
#if _mem_sbrk
}
#else
#endif /* _mem_sbrk */
#if _mem_mmap_zero || _mem_mmap_anon
return caddr;
#endif /* _mem_mmap_zero || _mem_mmap_anon */
}
#endif /*_done_sbrkmem*/
#if !_done_sbrkmem /* use native malloc/free as a last resort */
if(csize == 0)
else if(nsize == 0)
return caddr;
}
#endif /* _done_sbrkmem */
}
#if _mem_mmap_zero
#else
#endif
{
};
/* The heap region */
{
0, /* incr */
0, /* pool */
};
{
{ bestalloc,
},
NIL(char*), /* file */
0, /* line */
0, /* func */
&_Vmdata, /* data */
};
#ifdef NoF
#endif
#endif