1N/A/***********************************************************************
1N/A* *
1N/A* This software is part of the ast package *
1N/A* Copyright (c) 1985-2011 AT&T Intellectual Property *
1N/A* and is licensed under the *
1N/A* Common Public License, Version 1.0 *
1N/A* by AT&T Intellectual Property *
1N/A* *
1N/A* A copy of the License is available at *
1N/A* http://www.opensource.org/licenses/cpl1.0.txt *
1N/A* (with md5 checksum 059e8cd6165cb4c31e351f2b69388fd9) *
1N/A* *
1N/A* Information and Software Systems Research *
1N/A* AT&T Research *
1N/A* Florham Park NJ *
1N/A* *
1N/A* Glenn Fowler <gsf@research.att.com> *
1N/A* David Korn <dgk@research.att.com> *
1N/A* Phong Vo <kpv@research.att.com> *
1N/A* *
1N/A***********************************************************************/
1N/A#ifndef _VMHDR_H
1N/A#define _VMHDR_H 1
1N/A#ifndef _BLD_vmalloc
1N/A#define _BLD_vmalloc 1
1N/A#endif
1N/A
1N/A/* Common types, and macros for vmalloc functions.
1N/A**
1N/A** Written by Kiem-Phong Vo, kpv@research.att.com, 01/16/94.
1N/A*/
1N/A
1N/A#ifndef __STD_C /* this is normally in vmalloc.h but it's included late here */
1N/A#ifdef __STDC__
1N/A#define __STD_C 1
1N/A#else
1N/A#if __cplusplus || c_plusplus
1N/A#define __STD_C 1
1N/A#else
1N/A#define __STD_C 0
1N/A#endif /*__cplusplus*/
1N/A#endif /*__STDC__*/
1N/A#endif /*__STD_C*/
1N/A
1N/A#if _PACKAGE_ast
1N/A
1N/A#if !_UWIN
1N/A#define getpagesize ______getpagesize
1N/A#define _npt_getpagesize 1
1N/A#define brk ______brk
1N/A#define sbrk ______sbrk
1N/A#define _npt_sbrk 1
1N/A#endif
1N/A
1N/A#include <ast.h>
1N/A
1N/A#if _npt_getpagesize
1N/A#undef getpagesize
1N/A#endif
1N/A#if _npt_sbrk
1N/A#undef brk
1N/A#undef sbrk
1N/A#endif
1N/A
1N/A#else
1N/A
1N/A#include <ast_common.h>
1N/A
1N/A#if !_UWIN
1N/A#define _npt_getpagesize 1
1N/A#define _npt_sbrk 1
1N/A#endif
1N/A
1N/A#ifndef integralof
1N/A#define integralof(x) (((char*)(x))-((char*)0))
1N/A#endif
1N/A
1N/A#endif /*_PACKAGE_ast*/
1N/A
1N/A#include "FEATURE/vmalloc"
1N/A
1N/A#include <setjmp.h>
1N/A
1N/A/* the below macros decide which combinations of sbrk() or mmap() to used */
1N/A#if defined(_WIN32)
1N/A#define _mem_win32 1
1N/A#undef _mem_sbrk
1N/A#undef _mem_mmap_anon
1N/A#undef _mem_mmap_zero
1N/A#endif
1N/A
1N/A#if _mem_mmap_anon
1N/A#undef _mem_mmap_zero
1N/A#endif
1N/A
1N/A#if !_mem_win32 && !_mem_sbrk && !_mem_mmap_anon && !_mem_mmap_zero
1N/A#undef _std_malloc
1N/A#define _std_malloc 1 /* do not define malloc/free/realloc */
1N/A#endif
1N/A
1N/Atypedef unsigned char Vmuchar_t;
1N/Atypedef unsigned long Vmulong_t;
1N/A
1N/Atypedef union _head_u Head_t;
1N/Atypedef union _body_u Body_t;
1N/Atypedef struct _block_s Block_t;
1N/Atypedef struct _seg_s Seg_t;
1N/Atypedef struct _pfobj_s Pfobj_t;
1N/A
1N/A#if !_typ_ssize_t
1N/Atypedef int ssize_t;
1N/A#endif
1N/A
1N/A#define NIL(t) ((t)0)
1N/A#define reg register
1N/A#if __STD_C
1N/A#define NOTUSED(x) (void)(x)
1N/A#else
1N/A#define NOTUSED(x) (&x,1)
1N/A#endif
1N/A
1N/A/* convert an address to an integral value */
1N/A#define VLONG(addr) ((Vmulong_t)((char*)(addr) - (char*)0) )
1N/A
1N/A/* Round x up to a multiple of y. ROUND2 does powers-of-2 and ROUNDX does others */
1N/A#define ROUND2(x,y) (((x) + ((y)-1)) & ~((y)-1))
1N/A#define ROUNDX(x,y) ((((x) + ((y)-1)) / (y)) * (y))
1N/A#define ROUND(x,y) (((y)&((y)-1)) ? ROUNDX((x),(y)) : ROUND2((x),(y)) )
1N/A
1N/A/* compute a value that is a common multiple of x and y */
1N/A#define MULTIPLE(x,y) ((x)%(y) == 0 ? (x) : (y)%(x) == 0 ? (y) : (y)*(x))
1N/A
1N/A#define VM_check 0x0001 /* enable detailed checks */
1N/A#define VM_abort 0x0002 /* abort() on assertion failure */
1N/A#define VM_region 0x0004 /* enable region segment checks */
1N/A#define VM_mmap 0x0010 /* favor mmap allocation */
1N/A
1N/A#if _UWIN
1N/A#include <ast_windows.h>
1N/A#endif
1N/A
1N/A#ifndef DEBUG
1N/A#ifdef _BLD_DEBUG
1N/A#define DEBUG 1
1N/A#endif /*_BLD_DEBUG*/
1N/A#endif /*DEBUG*/
1N/A#if DEBUG
1N/Aextern void _vmmessage _ARG_((const char*, long, const char*, long));
1N/A#define ABORT() (_Vmassert & VM_abort)
1N/A#define CHECK() (_Vmassert & VM_check)
1N/A#define ASSERT(p) ((p) ? 0 : (MESSAGE("Assertion failed"), ABORT() ? (abort(),0) : 0))
1N/A#define COUNT(n) ((n) += 1)
1N/A#define MESSAGE(s) _vmmessage(__FILE__,__LINE__,s,0)
1N/A#else
1N/A#define ABORT() (0)
1N/A#define ASSERT(p)
1N/A#define CHECK() (0)
1N/A#define COUNT(n)
1N/A#define MESSAGE(s) (0)
1N/A#endif /*DEBUG*/
1N/A
1N/A#define VMPAGESIZE 8192
1N/A
1N/A#if _AST_PAGESIZE > VMPAGESIZE
1N/A#undef VMPAGESIZE
1N/A#define VMPAGESIZE _AST_PAGESIZE
1N/A#endif
1N/A
1N/A#if _lib_getpagesize && !defined(_AST_PAGESIZE)
1N/A#define GETPAGESIZE(x) ((x) ? (x) : \
1N/A (((x)=getpagesize()) < VMPAGESIZE ? ((x)=VMPAGESIZE) : (x)) )
1N/A#else
1N/A#define GETPAGESIZE(x) ((x) = VMPAGESIZE)
1N/A#endif
1N/A
1N/A#ifdef _AST_PAGESIZE
1N/A#define VMHEAPINCR (_Vmpagesize*1)
1N/A#else
1N/A#define VMHEAPINCR (_Vmpagesize*sizeof(void*))
1N/A#endif
1N/A
1N/A/* Blocks are allocated such that their sizes are 0%(BITS+1)
1N/A** This frees up enough low order bits to store state information
1N/A*/
1N/A#define BUSY (01) /* block is busy */
1N/A#define PFREE (02) /* preceding block is free */
1N/A#define JUNK (04) /* marked as freed but not yet processed */
1N/A#define BITS (07) /* (BUSY|PFREE|JUNK) */
1N/A#define ALIGNB (8) /* size must be a multiple of BITS+1 */
1N/A
1N/A#define ISBITS(w) ((w) & BITS)
1N/A#define CLRBITS(w) ((w) &= ~BITS)
1N/A#define CPYBITS(w,f) ((w) |= ((f)&BITS) )
1N/A
1N/A#define ISBUSY(w) ((w) & BUSY)
1N/A#define SETBUSY(w) ((w) |= BUSY)
1N/A#define CLRBUSY(w) ((w) &= ~BUSY)
1N/A
1N/A#define ISPFREE(w) ((w) & PFREE)
1N/A#define SETPFREE(w) ((w) |= PFREE)
1N/A#define CLRPFREE(w) ((w) &= ~PFREE)
1N/A
1N/A#define ISJUNK(w) ((w) & JUNK)
1N/A#define SETJUNK(w) ((w) |= JUNK)
1N/A#define CLRJUNK(w) ((w) &= ~JUNK)
1N/A
1N/A#define OFFSET(t,e) ((size_t)(&(((t*)0)->e)) )
1N/A
1N/A/* these bits share the "mode" field with the public bits */
1N/A#define VM_AGAIN 0010000 /* research the arena for space */
1N/A#define VM_LOCK 0020000 /* region is locked */
1N/A#define VM_LOCAL 0040000 /* local call, bypass lock */
1N/A#define VM_INUSE 0004000 /* some operation is running */
1N/A#define VM_UNUSED 0100060
1N/A#define VMETHOD(vd) ((vd)->mode&VM_METHODS)
1N/A
1N/A/* test/set/clear lock state */
1N/A#define SETINUSE(vd,iu) (((iu) = (vd)->mode&VM_INUSE), ((vd)->mode |= VM_INUSE) )
1N/A#define CLRINUSE(vd,iu) ((iu) ? 0 : ((vd)->mode &= ~VM_INUSE) )
1N/A#define SETLOCAL(vd) ((vd)->mode |= VM_LOCAL)
1N/A#define GETLOCAL(vd,l) (((l) = (vd)->mode&VM_LOCAL), ((vd)->mode &= ~VM_LOCAL) )
1N/A#define ISLOCK(vd,l) ((l) ? 0 : ((vd)->mode & VM_LOCK) )
1N/A#define SETLOCK(vd,l) ((l) ? 0 : ((vd)->mode |= VM_LOCK) )
1N/A#define CLRLOCK(vd,l) ((l) ? 0 : ((vd)->mode &= ~VM_LOCK) )
1N/A
1N/A/* announcing entry/exit of allocation calls */
1N/A#define ANNOUNCE(lc, vm,ev,dt,dc) \
1N/A (( ((lc)&VM_LOCAL) || !(dc) || !(dc)->exceptf ) ? 0 : \
1N/A (*(dc)->exceptf)((vm), (ev), (Void_t*)(dt), (dc)) )
1N/A
1N/A
1N/A/* local calls */
1N/A#define KPVALLOC(vm,sz,func) (SETLOCAL((vm)->data), func((vm),(sz)) )
1N/A#define KPVALIGN(vm,sz,al,func) (SETLOCAL((vm)->data), func((vm),(sz),(al)) )
1N/A#define KPVFREE(vm,d,func) (SETLOCAL((vm)->data), func((vm),(d)) )
1N/A#define KPVRESIZE(vm,d,sz,mv,func) (SETLOCAL((vm)->data), func((vm),(d),(sz),(mv)) )
1N/A#define KPVADDR(vm,addr,func) (SETLOCAL((vm)->data), func((vm),(addr)) )
1N/A#define KPVCOMPACT(vm,func) (SETLOCAL((vm)->data), func((vm)) )
1N/A
1N/A/* ALIGN is chosen so that a block can store all primitive types.
1N/A** It should also be a multiple of ALIGNB==(BITS+1) so the size field
1N/A** of Block_t will always be 0%(BITS+1) as noted above.
1N/A** Of paramount importance is the ALIGNA macro below. If the local compile
1N/A** environment is strange enough that the below method does not calculate
1N/A** ALIGNA right, then the code below should be commented out and ALIGNA
1N/A** redefined to the appropriate requirement.
1N/A*/
1N/Aunion _align_u
1N/A{ char c, *cp;
1N/A int i, *ip;
1N/A long l, *lp;
1N/A double d, *dp, ***dppp[8];
1N/A size_t s, *sp;
1N/A void(* fn)();
1N/A union _align_u* align;
1N/A Head_t* head;
1N/A Body_t* body;
1N/A Block_t* block;
1N/A Vmuchar_t a[ALIGNB];
1N/A _ast_fltmax_t ld, *ldp;
1N/A jmp_buf jmp;
1N/A};
1N/Astruct _a_s
1N/A{ char c;
1N/A union _align_u a;
1N/A};
1N/A#define ALIGNA (sizeof(struct _a_s) - sizeof(union _align_u))
1N/Astruct _align_s
1N/A{ char data[MULTIPLE(ALIGNA,ALIGNB)];
1N/A};
1N/A#undef ALIGN /* bsd sys/param.h defines this */
1N/A#define ALIGN sizeof(struct _align_s)
1N/A
1N/A/* make sure that the head of a block is a multiple of ALIGN */
1N/Astruct _head_s
1N/A{ union
1N/A { Seg_t* seg; /* the containing segment */
1N/A Block_t* link; /* possible link list usage */
1N/A Pfobj_t* pf; /* profile structure pointer */
1N/A char* file; /* for file name in Vmdebug */
1N/A } seg;
1N/A union
1N/A { size_t size; /* size of data area in bytes */
1N/A Block_t* link; /* possible link list usage */
1N/A int line; /* for line number in Vmdebug */
1N/A } size;
1N/A};
1N/A#define HEADSIZE ROUND(sizeof(struct _head_s),ALIGN)
1N/Aunion _head_u
1N/A{ Vmuchar_t data[HEADSIZE]; /* to standardize size */
1N/A struct _head_s head;
1N/A};
1N/A
1N/A/* now make sure that the body of a block is a multiple of ALIGN */
1N/Astruct _body_s
1N/A{ Block_t* link; /* next in link list */
1N/A Block_t* left; /* left child in free tree */
1N/A Block_t* right; /* right child in free tree */
1N/A Block_t** self; /* self pointer when free */
1N/A};
1N/A#define BODYSIZE ROUND(sizeof(struct _body_s),ALIGN)
1N/Aunion _body_u
1N/A{ Vmuchar_t data[BODYSIZE]; /* to standardize size */
1N/A struct _body_s body;
1N/A};
1N/A
1N/A/* After all the songs and dances, we should now have:
1N/A** sizeof(Head_t)%ALIGN == 0
1N/A** sizeof(Body_t)%ALIGN == 0
1N/A** and sizeof(Block_t) = sizeof(Head_t)+sizeof(Body_t)
1N/A*/
1N/Astruct _block_s
1N/A{ Head_t head;
1N/A Body_t body;
1N/A};
1N/A
1N/A/* requirements for smallest block type */
1N/Astruct _tiny_s
1N/A{ Block_t* link;
1N/A Block_t* self;
1N/A};
1N/A#define TINYSIZE ROUND(sizeof(struct _tiny_s),ALIGN)
1N/A#define S_TINY 1 /* # of tiny blocks */
1N/A#define MAXTINY (S_TINY*ALIGN + TINYSIZE)
1N/A#define TLEFT(b) ((b)->head.head.seg.link) /* instead of LEFT */
1N/A#define TINIEST(b) (SIZE(b) == TINYSIZE) /* this type uses TLEFT */
1N/A
1N/A#define DIV(x,y) ((y) == 8 ? ((x)>>3) : (x)/(y) )
1N/A#define INDEX(s) DIV((s)-TINYSIZE,ALIGN)
1N/A
1N/A/* small block types kept in separate caches for quick allocation */
1N/A#define S_CACHE 6 /* # of types of small blocks to be cached */
1N/A#define N_CACHE 32 /* on allocation, create this many at a time */
1N/A#define MAXCACHE (S_CACHE*ALIGN + TINYSIZE)
1N/A#define C_INDEX(s) (s < MAXCACHE ? INDEX(s) : S_CACHE)
1N/A
1N/A#define TINY(vd) ((vd)->tiny)
1N/A#define CACHE(vd) ((vd)->cache)
1N/A
1N/Astruct _vmdata_s
1N/A{ int mode; /* current mode for region */
1N/A size_t incr; /* allocate in multiple of this */
1N/A size_t pool; /* size of an elt in a Vmpool region */
1N/A Seg_t* seg; /* list of segments */
1N/A Block_t* free; /* most recent free block */
1N/A Block_t* wild; /* wilderness block */
1N/A Block_t* root; /* root of free tree */
1N/A Block_t* tiny[S_TINY]; /* small blocks */
1N/A Block_t* cache[S_CACHE+1]; /* delayed free blocks */
1N/A};
1N/A/* Vmdata_t typedef in <vmalloc.h> */
1N/A
1N/A#include "vmalloc.h"
1N/A
1N/A#if !_PACKAGE_ast
1N/A/* we don't use these here and they interfere with some local names */
1N/A#undef malloc
1N/A#undef free
1N/A#undef realloc
1N/A#endif
1N/A
1N/A/* segment structure */
1N/Astruct _seg_s
1N/A{ Vmdata_t* vmdt; /* the data region holding this */
1N/A Seg_t* next; /* next segment */
1N/A Void_t* addr; /* starting segment address */
1N/A size_t extent; /* extent of segment */
1N/A Vmuchar_t* baddr; /* bottom of usable memory */
1N/A size_t size; /* allocable size */
1N/A Block_t* free; /* recent free blocks */
1N/A Block_t* last; /* Vmlast last-allocated block */
1N/A};
1N/A
1N/A/* starting block of a segment */
1N/A#define SEGBLOCK(s) ((Block_t*)(((Vmuchar_t*)(s)) + ROUND(sizeof(Seg_t),ALIGN)))
1N/A
1N/A/* short-hands for block data */
1N/A#define SEG(b) ((b)->head.head.seg.seg)
1N/A#define SEGLINK(b) ((b)->head.head.seg.link)
1N/A#define SIZE(b) ((b)->head.head.size.size)
1N/A#define SIZELINK(b) ((b)->head.head.size.link)
1N/A#define LINK(b) ((b)->body.body.link)
1N/A#define LEFT(b) ((b)->body.body.left)
1N/A#define RIGHT(b) ((b)->body.body.right)
1N/A#define VM(b) (SEG(b)->vm)
1N/A
1N/A#define DATA(b) ((Void_t*)((b)->body.data) )
1N/A#define BLOCK(d) ((Block_t*)((char*)(d) - sizeof(Head_t)) )
1N/A#define SELF(b) ((Block_t**)((b)->body.data + SIZE(b) - sizeof(Block_t*)) )
1N/A#define LAST(b) (*((Block_t**)(((char*)(b)) - sizeof(Block_t*)) ) )
1N/A#define NEXT(b) ((Block_t*)((b)->body.data + SIZE(b)) )
1N/A
1N/A/* functions to manipulate link lists of elts of the same size */
1N/A#define SETLINK(b) (RIGHT(b) = (b) )
1N/A#define ISLINK(b) (RIGHT(b) == (b) )
1N/A#define UNLINK(vd,b,i,t) \
1N/A ((((t) = LINK(b)) ? (LEFT(t) = LEFT(b)) : NIL(Block_t*) ), \
1N/A (((t) = LEFT(b)) ? (LINK(t) = LINK(b)) : (TINY(vd)[i] = LINK(b)) ) )
1N/A
1N/A/* delete a block from a link list or the free tree.
1N/A** The test in the below macro is worth scratching your head a bit.
1N/A** Even though tiny blocks (size < BODYSIZE) are kept in separate lists,
1N/A** only the TINIEST ones require TLEFT(b) for the back link. Since this
1N/A** destroys the SEG(b) pointer, it must be carefully restored in bestsearch().
1N/A** Other tiny blocks have enough space to use the usual LEFT(b).
1N/A** In this case, I have also carefully arranged so that RIGHT(b) and
1N/A** SELF(b) can be overlapped and the test ISLINK() will go through.
1N/A*/
1N/A#define REMOVE(vd,b,i,t,func) \
1N/A ((!TINIEST(b) && ISLINK(b)) ? UNLINK((vd),(b),(i),(t)) : \
1N/A func((vd),SIZE(b),(b)) )
1N/A
1N/A/* see if a block is the wilderness block */
1N/A#define SEGWILD(b) (((b)->body.data+SIZE(b)+sizeof(Head_t)) >= SEG(b)->baddr)
1N/A#define VMWILD(vd,b) (((b)->body.data+SIZE(b)+sizeof(Head_t)) >= vd->seg->baddr)
1N/A
1N/A#define VMFLF(vm,fi,ln,fn) ((fi) = (vm)->file, (vm)->file = NIL(char*), \
1N/A (ln) = (vm)->line, (vm)->line = 0 , \
1N/A (fn) = (vm)->func, (vm)->func = NIL(Void_t*) )
1N/A
1N/A/* The lay-out of a Vmprofile block is this:
1N/A** seg_ size ----data---- _pf_ size
1N/A** _________ ____________ _________
1N/A** seg_, size: header required by Vmbest.
1N/A** data: actual data block.
1N/A** _pf_: pointer to the corresponding Pfobj_t struct
1N/A** size: the true size of the block.
1N/A** So each block requires an extra Head_t.
1N/A*/
1N/A#define PF_EXTRA sizeof(Head_t)
1N/A#define PFDATA(d) ((Head_t*)((Vmuchar_t*)(d)+(SIZE(BLOCK(d))&~BITS)-sizeof(Head_t)) )
1N/A#define PFOBJ(d) (PFDATA(d)->head.seg.pf)
1N/A#define PFSIZE(d) (PFDATA(d)->head.size.size)
1N/A
1N/A/* The lay-out of a block allocated by Vmdebug is this:
1N/A** seg_ size file size seg_ magi ----data---- --magi-- magi line
1N/A** --------- --------- --------- ------------ -------- ---------
1N/A** seg_,size: header required by Vmbest management.
1N/A** file: the file where it was created.
1N/A** size: the true byte count of the block
1N/A** seg_: should be the same as the previous seg_.
1N/A** This allows the function vmregion() to work.
1N/A** magi: magic bytes to detect overwrites.
1N/A** data: the actual data block.
1N/A** magi: more magic bytes.
1N/A** line: the line number in the file where it was created.
1N/A** So for each allocated block, we'll need 3 extra Head_t.
1N/A*/
1N/A
1N/A/* convenient macros for accessing the above fields */
1N/A#define DB_HEAD (2*sizeof(Head_t))
1N/A#define DB_TAIL (2*sizeof(Head_t))
1N/A#define DB_EXTRA (DB_HEAD+DB_TAIL)
1N/A#define DBBLOCK(d) ((Block_t*)((Vmuchar_t*)(d) - 3*sizeof(Head_t)) )
1N/A#define DBBSIZE(d) (SIZE(DBBLOCK(d)) & ~BITS)
1N/A#define DBSEG(d) (((Head_t*)((Vmuchar_t*)(d) - sizeof(Head_t)))->head.seg.seg )
1N/A#define DBSIZE(d) (((Head_t*)((Vmuchar_t*)(d) - 2*sizeof(Head_t)))->head.size.size )
1N/A#define DBFILE(d) (((Head_t*)((Vmuchar_t*)(d) - 2*sizeof(Head_t)))->head.seg.file )
1N/A#define DBLN(d) (((Head_t*)((Vmuchar_t*)DBBLOCK(d)+DBBSIZE(d)))->head.size.line )
1N/A#define DBLINE(d) (DBLN(d) < 0 ? -DBLN(d) : DBLN(d))
1N/A
1N/A/* forward/backward translation for addresses between Vmbest and Vmdebug */
1N/A#define DB2BEST(d) ((Vmuchar_t*)(d) - 2*sizeof(Head_t))
1N/A#define DB2DEBUG(b) ((Vmuchar_t*)(b) + 2*sizeof(Head_t))
1N/A
1N/A/* set file and line number, note that DBLN > 0 so that DBISBAD will work */
1N/A#define DBSETFL(d,f,l) (DBFILE(d) = (f), DBLN(d) = (f) ? (l) : 1)
1N/A
1N/A/* set and test the state of known to be corrupted */
1N/A#define DBSETBAD(d) (DBLN(d) > 0 ? (DBLN(d) = -DBLN(d)) : -1)
1N/A#define DBISBAD(d) (DBLN(d) <= 0)
1N/A
1N/A#define DB_MAGIC 0255 /* 10101101 */
1N/A
1N/A/* compute the bounds of the magic areas */
1N/A#define DBHEAD(d,begp,endp) \
1N/A (((begp) = (Vmuchar_t*)(&DBSEG(d)) + sizeof(Seg_t*)), ((endp) = (d)) )
1N/A#define DBTAIL(d,begp,endp) \
1N/A (((begp) = (Vmuchar_t*)(d)+DBSIZE(d)), ((endp) = (Vmuchar_t*)(&DBLN(d))) )
1N/A
1N/A/* external symbols for internal use by vmalloc */
1N/Atypedef Block_t* (*Vmsearch_f)_ARG_((Vmdata_t*, size_t, Block_t*));
1N/Atypedef struct _vmextern_
1N/A{ Block_t* (*vm_extend)_ARG_((Vmalloc_t*, size_t, Vmsearch_f ));
1N/A ssize_t (*vm_truncate)_ARG_((Vmalloc_t*, Seg_t*, size_t, int));
1N/A size_t vm_pagesize;
1N/A char* (*vm_strcpy)_ARG_((char*, const char*, int));
1N/A char* (*vm_itoa)_ARG_((Vmulong_t, int));
1N/A void (*vm_trace)_ARG_((Vmalloc_t*,
1N/A Vmuchar_t*, Vmuchar_t*, size_t, size_t));
1N/A void (*vm_pfclose)_ARG_((Vmalloc_t*));
1N/A int vm_assert;
1N/A int vm_options;
1N/A} Vmextern_t;
1N/A
1N/A#define _Vmextend (_Vmextern.vm_extend)
1N/A#define _Vmtruncate (_Vmextern.vm_truncate)
1N/A#define _Vmpagesize (_Vmextern.vm_pagesize)
1N/A#define _Vmstrcpy (_Vmextern.vm_strcpy)
1N/A#define _Vmitoa (_Vmextern.vm_itoa)
1N/A#define _Vmtrace (_Vmextern.vm_trace)
1N/A#define _Vmpfclose (_Vmextern.vm_pfclose)
1N/A#define _Vmassert (_Vmextern.vm_assert)
1N/A#define _Vmoptions (_Vmextern.vm_options)
1N/A
1N/A#define VMOPTIONS() do { if (!_Vmoptions) { _vmoptions(); } } while (0)
1N/A
1N/Aextern void _vmoptions _ARG_((void));
1N/Aextern int _vmbestcheck _ARG_((Vmdata_t*, Block_t*));
1N/A
1N/A_BEGIN_EXTERNS_
1N/A
1N/Aextern Vmextern_t _Vmextern;
1N/A
1N/A#if _PACKAGE_ast
1N/A
1N/A#if _npt_getpagesize
1N/Aextern int getpagesize _ARG_((void));
1N/A#endif
1N/A#if _npt_sbrk
1N/Aextern int brk _ARG_(( void* ));
1N/Aextern Void_t* sbrk _ARG_(( ssize_t ));
1N/A#endif
1N/A
1N/A#else
1N/A
1N/A#if _hdr_unistd
1N/A#include <unistd.h>
1N/A#else
1N/Aextern void abort _ARG_(( void ));
1N/Aextern ssize_t write _ARG_(( int, const void*, size_t ));
1N/Aextern int getpagesize _ARG_((void));
1N/Aextern Void_t* sbrk _ARG_((ssize_t));
1N/A#endif
1N/A
1N/A#if !__STDC__ && !_hdr_stdlib
1N/Aextern size_t strlen _ARG_(( const char* ));
1N/Aextern char* strcpy _ARG_(( char*, const char* ));
1N/Aextern int strcmp _ARG_(( const char*, const char* ));
1N/Aextern int atexit _ARG_(( void(*)(void) ));
1N/Aextern char* getenv _ARG_(( const char* ));
1N/Aextern Void_t* memcpy _ARG_(( Void_t*, const Void_t*, size_t ));
1N/Aextern Void_t* memset _ARG_(( Void_t*, int, size_t ));
1N/A#else
1N/A#include <stdlib.h>
1N/A#include <string.h>
1N/A#endif
1N/A
1N/A/* for vmexit.c */
1N/Aextern int onexit _ARG_(( void(*)(void) ));
1N/Aextern void _exit _ARG_(( int ));
1N/Aextern void _cleanup _ARG_(( void ));
1N/A
1N/A#endif /*_PACKAGE_ast*/
1N/A
1N/A_END_EXTERNS_
1N/A
1N/A#if _UWIN
1N/A#define abort() (DebugBreak(),abort())
1N/A#endif
1N/A
1N/A#endif /* _VMHDR_H */