/***********************************************************************
* *
* This software is part of the ast package *
* Copyright (c) 1985-2012 AT&T Intellectual Property *
* and is licensed under the *
* Eclipse Public License, Version 1.0 *
* by AT&T Intellectual Property *
* *
* A copy of the License is available at *
* (with md5 checksum b35adb5213ca9657e911e9befb180842) *
* *
* Information and Software Systems Research *
* AT&T Research *
* Florham Park NJ *
* *
* Glenn Fowler <gsf@research.att.com> *
* David Korn <dgk@research.att.com> *
* Phong Vo <kpv@research.att.com> *
* *
***********************************************************************/
void _STUB_vmbest(){}
#else
#include "vmhdr.h"
/* Best-fit allocation method. This is based on a best-fit strategy
** using a splay tree for storage of lists of free blocks of the same
** size. Recent free blocks may be cached for fast reuse.
**
** Written by Kiem-Phong Vo, kpv@research.att.com, 01/16/94.
*/
#ifdef DEBUG
#endif /*DEBUG*/
/* Check to see if a block is in the free tree */
#if __STD_C
#else
Block_t* b;
#endif
{ Block_t* t;
if(t == b)
return 1;
return 1;
return 1;
return 0;
}
#if __STD_C
#else
Block_t* b;
#endif
{
if(list == b)
return 1;
return 0;
}
/* Check to see if a block is known to be free */
#if __STD_C
#else
Block_t* b;
#endif
{
return 0;
return 1;
return 0;
}
/* Check to see if a block is known to be junked */
#if __STD_C
#else
Block_t* b;
#endif
{
Block_t* t;
return 0;
return 1;
/* check the list that b is supposed to be in */
if(t == b)
return 1;
/* on occasions, b may be put onto the catch-all list */
if(t == b)
return 1;
return 0;
}
/* check to see if the free tree is in good shape */
#if __STD_C
#else
#endif
{ Block_t* t;
else return vmchktree(t);
}
else return vmchktree(t);
}
return 0;
}
#if __STD_C
#else
#endif
{
int rv = 0;
if(!CHECK())
return 0;
/* make sure the free tree is still in shape */
{ /* there should be no marked bits of any type */
/* next block must be busy and marked PFREE */
/* must have a self-reference pointer */
if(SELF(b) != b)
/* segment pointer should be well-defined */
/* must be on a free list */
}
else
{ /* segment pointer should be well-defined */
/* next block should not be marked PFREE */
/* if PFREE, last block should be free */
/* if free but unreclaimed, should be junk */
}
}
}
return rv;
}
/* Tree rotation functions */
/* Find and delete a suitable element in the free tree. */
#if __STD_C
#else
#endif
{
/* extracting a tiniest block from its list */
TLEFT(r) = l;
if(l)
LINK(l) = r;
break;
}
}
return root;
}
/* find the right one to delete */
l = r = &link;
break;
if(size < s)
if(size == s)
break;
}
else
{ LLINK(l,t);
t = RIGHT(t);
}
}
}
else
if(size == s)
break;
}
else
{ RLINK(r,t);
t = LEFT(t);
}
}
}
} while((root = t) );
if(root) /* found it, now isolate it */
}
else /* nothing exactly fit */
/* grab the least one from the right tree */
}
}
{ /* head of a link list, use next one for root */
}
else /* graft left tree to right tree */
{ while((t = LEFT(r)) )
RROTATE(r,t);
}
return root;
}
/* Reclaim all delayed free blocks into the free tree */
#if __STD_C
#else
int c;
#endif
{
}
for(n = S_CACHE; n >= c; --n)
{ /* Note that below here we allow ISJUNK blocks to be
** forward-merged even though they are not removed from
** the list immediately. In this way, the list is
** scanned only once. It works because the LINK and SIZE
** fields are not destroyed during the merging. This can
** be seen by observing that a tiniest block has a 2-word
** header and a 2-word body. Merging a tiniest block
** (1seg) and the next block (2seg) looks like this:
** 1seg size link left 2seg size link left ....
** 1seg size link left rite xxxx xxxx .... self
** After the merge, the 2seg word is replaced by the RIGHT
** pointer of the new block and somewhere beyond the
** two xxxx fields, the SELF pointer will replace some
** other word. The important part is that the two xxxx
** fields are kept intact.
*/
continue;
}
for(;;) /* forward merge */
if(!ISBUSY(s))
}
else if(ISJUNK(s))
{ /* reclaim any touched junk list */
if((int)C_INDEX(s) < c)
c = (int)C_INDEX(s);
CLRBITS(s);
}
else break;
}
/* tell next block that this one is free */
saw_wanted = 1;
continue;
}
/* wilderness preservation */
continue;
}
/* tiny block goes to tiny list */
if(s == 0) /* TINIEST block */
{ if(np)
}
else
{ if(np)
}
continue;
}
continue;
}
while(1) /* leaf insertion */
np = t;
}
else
break;
}
}
else if(s < size)
np = t;
}
else
break;
}
}
else /* s == size */
}
break;
}
}
}
}
return saw_wanted;
}
#if __STD_C
#else
int local;
#endif
{
continue;
{ /* During large block allocations, _Vmextend might
** have been enlarged the rounding factor. Reducing
** it a bit help avoiding getting large raw memory.
*/
round = _Vmpagesize;
/* for the bottom segment, we don't necessarily want
** to return raw memory too early. vd->pool has an
** approximation of the average size of recently freed
** blocks. If this is large, the application is managing
** large blocks so we throttle back memory chopping
** to avoid thrashing the underlying memory system.
*/
continue;
}
continue;
}
if(bp)
}
}
return 0;
}
#if __STD_C
#else
int local; /* internal call */
#endif
{
reg int n;
/* for ANSI requirement that malloc(0) returns non-NULL pointer */
}
goto done;
}
}
for(n = S_CACHE; n >= 0; --n) /* best-fit except for coalescing */
goto got_block;
}
goto got_block;
}
/* need to extend the arena */
{ got_block:
/* tell next block that we are no longer a free block */
}
}
}
done:
}
#if __STD_C
#else
int local;
#endif
{
break;
}
if(local ) /* from bestfree or bestresize */
offset = 0;
}
else if(seg)
{ while(b < endb)
offset = -1L;
goto done;
}
}
}
done:
return offset;
}
#if __STD_C
#else
int local;
#endif
{
#ifdef DEBUG
if(((char*)data - (char*)0) <= 1)
if (!data)
return 0;
}
#else
if(!data) /* ANSI-ism */
return 0;
#endif
/* Keep an approximate average free block size.
** This is used in bestcompact() to decide when to release
** raw memory back to the underlying memory system.
*/
if(s < MAXCACHE)
}
else
}
/* coalesce on freeing large blocks to avoid fragmentation */
}
}
return 0;
}
#if __STD_C
#else
int type; /* !=0 to move, <0 for not copy */
int local;
#endif
{
if(!data) /* resizing a NULL block is the same as allocating */
return data;
}
if(size == 0) /* resizing to zero size is the same as freeing */
}
do /* forward merge as much as possible */
CLRBITS(s);
}
else if(ISJUNK(s) )
}
else if(!ISBUSY(s) )
}
else break;
}
}
}
goto do_free;
}
else
do_free: /* reclaim these right away */
}
}
}
return data;
}
#if __STD_C
#else
int local;
#endif
{
long size;
size = -1L;
continue;
while(b < endb)
size = -1L;
goto done;
}
break;
}
}
done:
return size;
}
#if __STD_C
#else
int local;
#endif
{
/* hack so that dbalign() can store header data */
extra = 0;
else
align *= 2;
}
/* reclaim all free blocks now to avoid fragmentation */
goto done;
/* get an aligned address that we can live with */
}
/* free left-over if too big */
}
done:
}
/* The below implements the discipline Vmdcsbrk and the heap region Vmheap.
** There are 5 alternative ways to get raw memory:
** win32, sbrk, mmap_anon, mmap_zero and reusing the native malloc
** The selection of method done here is to enable our malloc implementation
** not atomic. Thus, we prefer mmap_anon or mmap_zero if they are available.
*/
#if _mem_win32
#endif
#if _mem_mmap_anon
#if !_PACKAGE_ast
#endif
#endif
#if _mem_mmap_zero
#if !_PACKAGE_ast
#endif
#endif
#if __linux__
/* make sure that allocated memory is addressable */
#include <signal.h>
typedef void (*Sig_f)(int);
static int Gotsegv = 0;
{
Gotsegv = 1;
}
{
int rv;
Gotsegv = 0; /* catch segment fault */
Gotsegv = 0;
return rv;
}
#else
/* known !__linux__ guarantee that brk-addresses are valid */
#define chkaddr(a,n) (0)
#endif /*__linux__*/
#if _mem_win32 /* getting memory on a window system */
#if _PACKAGE_ast
#include <ast_windows.h>
#else
#include <windows.h>
#endif
if(csize == 0)
return caddr;
}
else if(nsize == 0)
return caddr;
}
}
#endif /* _mem_win32 */
{
else if(csize == 0)
}
else return caddr;
}
#endif /* _mem_sbrk */
#include <fcntl.h>
#ifndef MAP_ANON
#ifdef MAP_ANONYMOUS
#else
#define MAP_ANON 0
#endif
#endif /*MAP_ANON*/
#ifndef OPEN_MAX
#endif
typedef struct _mmdisc_s
int fd;
} Mmdisc_t;
{
#if _mem_mmap_zero
{ int fd;
}
}
}
#endif /* _mem_mmap_zero */
if(csize == 0)
#if _mem_mmap_zero
#endif
#if _mem_mmap_anon
if(!mmdc )
#endif
}
else
{ if(mmdc)
return caddr;
}
}
else if(nsize == 0)
else
return caddr;
}
}
}
#endif /* _mem_map_anon || _mem_mmap_zero */
#if _std_malloc /* using native malloc as a last resource */
{
if(csize == 0)
else if(nsize == 0)
return caddr;
}
}
#endif
/* A discipline to get raw memory using VirtualAlloc/mmap/sbrk */
{
#if _mem_win32
#endif
#if _mem_sbrk
#if 1 /* no sbrk() unless explicit VM_break */
#else /* asoinit(0,0,0)==0 => the application did not request a specific aso method => sbrk() ok */
if(((_Vmassert & VM_break) || !(_Vmassert & VM_mmap) && !asoinit(0, 0, 0)) && (addr = sbrkmem(caddr, csize, nsize)) )
#endif
#endif
#if _mem_mmap_anon
#endif
#if _mem_mmap_zero
#endif
#if _mem_sbrk
#endif
#if _std_malloc
#endif
}
#if _mem_mmap_zero || _mem_mmap_anon
static Mmdisc_t _Vmdcsystem = { { getmemory, NIL(Vmexcept_f), 64*1024, sizeof(Mmdisc_t) }, FD_INIT, 0 };
#else
#endif
{
};
/* The heap region */
{
0, /* lock */
0, /* incr */
0, /* pool */
/* tiny[] */
/* cache[] */
};
{
{ bestalloc,
},
NIL(char*), /* file */
0, /* line */
0, /* func */
&_Vmdata, /* data */
};
#ifdef NoF
#endif
#endif