diff options
author | Igor Pashev <pashev.igor@gmail.com> | 2012-06-24 22:28:35 +0000 |
---|---|---|
committer | Igor Pashev <pashev.igor@gmail.com> | 2012-06-24 22:28:35 +0000 |
commit | 3950ffe2a485479f6561c27364d3d7df5a21d124 (patch) | |
tree | 468c6e14449d1b1e279222ec32f676b0311917d2 /src/lib/libast/vmalloc | |
download | ksh-upstream.tar.gz |
Imported Upstream version 93u+upstream
Diffstat (limited to 'src/lib/libast/vmalloc')
23 files changed, 7723 insertions, 0 deletions
diff --git a/src/lib/libast/vmalloc/malloc.c b/src/lib/libast/vmalloc/malloc.c new file mode 100644 index 0000000..e69523a --- /dev/null +++ b/src/lib/libast/vmalloc/malloc.c @@ -0,0 +1,1438 @@ +/*********************************************************************** +* * +* This software is part of the ast package * +* Copyright (c) 1985-2012 AT&T Intellectual Property * +* and is licensed under the * +* Eclipse Public License, Version 1.0 * +* by AT&T Intellectual Property * +* * +* A copy of the License is available at * +* http://www.eclipse.org/org/documents/epl-v10.html * +* (with md5 checksum b35adb5213ca9657e911e9befb180842) * +* * +* Information and Software Systems Research * +* AT&T Research * +* Florham Park NJ * +* * +* Glenn Fowler <gsf@research.att.com> * +* David Korn <dgk@research.att.com> * +* Phong Vo <kpv@research.att.com> * +* * +***********************************************************************/ +#if defined(_UWIN) && defined(_BLD_ast) + +void _STUB_malloc(){} + +#else + +#if _UWIN + +#define calloc ______calloc +#define _ast_free ______free +#define malloc ______malloc +#define mallinfo ______mallinfo +#define mallopt ______mallopt +#define mstats ______mstats +#define realloc ______realloc + +#define _STDLIB_H_ 1 + +extern int atexit(void(*)(void)); +extern char* getenv(const char*); + +#endif + +#include "vmhdr.h" +#include <errno.h> + +#if _UWIN + +#include <malloc.h> + +#define _map_malloc 1 +#define _mal_alloca 1 + +#undef calloc +#define calloc _ast_calloc +#undef _ast_free +#define free _ast_free +#undef malloc +#define malloc _ast_malloc +#undef mallinfo +typedef struct ______mallinfo Mallinfo_t; +#undef mallopt +#undef mstats +typedef struct ______mstats Mstats_t; +#undef realloc +#define realloc _ast_realloc + +#endif + +#if __STD_C +#define F0(f,t0) f(t0) +#define F1(f,t1,a1) f(t1 a1) +#define F2(f,t1,a1,t2,a2) f(t1 a1, t2 a2) +#else +#define F0(f,t0) f() +#define F1(f,t1,a1) f(a1) t1 a1; +#define F2(f,t1,a1,t2,a2) f(a1, a2) t1 a1; t2 a2; +#endif + +/* + * define _AST_std_malloc=1 to force the standard malloc + * if _map_malloc is also defined then _ast_malloc etc. + * will simply call malloc etc. + */ + +#if !defined(_AST_std_malloc) && __CYGWIN__ +#define _AST_std_malloc 1 +#endif + +/* malloc compatibility functions +** +** These are aware of debugging/profiling and are driven by the +** VMALLOC_OPTIONS environment variable which is a comma or space +** separated list of [no]name[=value] options: +** +** abort if Vmregion==Vmdebug then VM_DBABORT is set, +** otherwise _BLD_DEBUG enabled assertions abort() +** on failure +** break try sbrk() block allocator first +** check if Vmregion==Vmbest then the region is checked every op +** free disable addfreelist() +** keep disable free -- if code works with this enabled then it +** probably accesses free'd data +** method=m sets Vmregion=m if not defined, m (Vm prefix optional) +** may be one of { best debug last profile } +** period=n sets Vmregion=Vmdebug if not defined, if +** Vmregion==Vmdebug the region is checked every n ops +** profile=f sets Vmregion=Vmprofile if not set, if +** Vmregion==Vmprofile then profile info printed to file f +** start=n sets Vmregion=Vmdebug if not defined, if +** Vmregion==Vmdebug region checking starts after n ops +** trace=f enables tracing to file f +** warn=f sets Vmregion=Vmdebug if not defined, if +** Vmregion==Vmdebug then warnings printed to file f +** watch=a sets Vmregion=Vmdebug if not defined, if +** Vmregion==Vmdebug then address a is watched +** +** Output files are created if they don't exist. &n and /dev/fd/n name +** the file descriptor n which must be open for writing. The pattern %p +** in a file name is replaced by the process ID. +** +** VMALLOC_OPTIONS combines the features of these previously used env vars: +** { VMCHECK VMDEBUG VMETHOD VMPROFILE VMTRACE } +** +** Written by Kiem-Phong Vo, kpv@research.att.com, 01/16/94. +*/ + +#if _sys_stat +#include <sys/stat.h> +#endif +#include <fcntl.h> + +#ifdef S_IRUSR +#define CREAT_MODE (S_IRUSR|S_IWUSR|S_IRGRP|S_IROTH) +#else +#define CREAT_MODE 0644 +#endif + +static Vmulong_t _Vmdbstart = 0; +static Vmulong_t _Vmdbcheck = 0; +static Vmulong_t _Vmdbtime = 0; +static int _Vmpffd = -1; + +#if ( !_std_malloc || !_BLD_ast ) && !_AST_std_malloc + +#if !_map_malloc +#undef calloc +#undef cfree +#undef free +#undef mallinfo +#undef malloc +#undef mallopt +#undef memalign +#undef mstats +#undef realloc +#undef valloc + +#if _malloc_hook + +#include <malloc.h> + +#undef calloc +#undef cfree +#undef free +#undef malloc +#undef memalign +#undef realloc + +#define calloc _ast_calloc +#define cfree _ast_cfree +#define free _ast_free +#define malloc _ast_malloc +#define memalign _ast_memalign +#define realloc _ast_realloc + +#endif + +#endif + +#if _WINIX + +#include <ast_windows.h> + +#if _UWIN + +#define VMRECORD(p) _vmrecord(p) +#define VMBLOCK { int _vmblock = _sigblock(); +#define VMUNBLOCK _sigunblock(_vmblock); } + +extern int _sigblock(void); +extern void _sigunblock(int); +extern unsigned long _record[2048]; + +__inline Void_t* _vmrecord(Void_t* p) +{ + register unsigned long v = ((unsigned long)p)>>16; + + _record[v>>5] |= 1<<((v&0x1f)); + return p; +} + +#else + +#define getenv(s) lcl_getenv(s) + +static char* +lcl_getenv(const char* s) +{ + int n; + static char buf[512]; + + if (!(n = GetEnvironmentVariable(s, buf, sizeof(buf))) || n > sizeof(buf)) + return 0; + return buf; +} + +#endif /* _UWIN */ + +#endif /* _WINIX */ + +#ifndef VMRECORD +#define VMRECORD(p) (p) +#define VMBLOCK +#define VMUNBLOCK +#endif + +#if defined(__EXPORT__) +#define extern extern __EXPORT__ +#endif + +static int _Vmflinit = 0; +#define VMFLINIT() \ + { if(!_Vmflinit) vmflinit(); \ + if(_Vmdbcheck) \ + { if(_Vmdbtime < _Vmdbstart) _Vmdbtime += 1; \ + else if((_Vmdbtime += 1) < _Vmdbstart) _Vmdbtime = _Vmdbstart; \ + if(_Vmdbtime >= _Vmdbstart && (_Vmdbtime % _Vmdbcheck) == 0 && \ + Vmregion->meth.meth == VM_MTDEBUG) \ + vmdbcheck(Vmregion); \ + } \ + } + +#if __STD_C +static int vmflinit(void) +#else +static int vmflinit() +#endif +{ + char* file; + int line; + Void_t* func; + + /* this must be done now to avoid any inadvertent recursion (more below) */ + _Vmflinit = 1; + VMFLF(Vmregion,file,line,func); + + /* if getenv() calls malloc(), the options may not affect the eventual region */ + VMOPTIONS(); + + /* reset file and line number to correct values for the call */ + Vmregion->file = file; + Vmregion->line = line; + Vmregion->func = func; + + return 0; +} + +/* use multiple regions to reduce blocking by concurrent threads */ +#if _mem_mmap_anon || _mem_mmap_zero +static Vmalloc_t *Region[64]; /* list of concurrent regions */ +static unsigned int Regmax = 64; /* max number of regions */ +#else +static Vmalloc_t* Region[1]; /* list of concurrent regions */ +static unsigned int Regmax = 0; +#endif +static unsigned int Regnum = 0; /* current #concurrent regions */ + +/* statistics */ +static unsigned int Regopen = 0; /* #allocation calls opened */ +static unsigned int Reglock = 0; /* #allocation calls locked */ +static unsigned int Regprobe = 0; /* #probes to find a region */ + +int setregmax(int regmax) +{ + int oldmax = Regmax; + + if(regmax >= Regnum && regmax <= sizeof(Region)/sizeof(Region[0])) + Regmax = regmax; + + return oldmax; +} + +/* return statistics */ +int _mallocstat(Vmstat_t* st) +{ + Vmstat_t vmst; + int k; + + if(vmstat(Vmregion, st) < 0) /* add up all stats */ + return -1; + for(k = 0; k < Regnum; ++k) + { if(!Region[k]) + continue; + if(vmstat(Region[k], &vmst) < 0 ) + return -1; + st->n_busy += vmst.n_busy; + st->n_free += vmst.n_free; + st->s_busy += vmst.s_busy; + st->s_free += vmst.s_free; + st->m_busy += vmst.m_busy; + st->m_free += vmst.m_free; + st->n_seg += vmst.n_seg; + st->extent += vmst.extent; + } + + st->n_region = Regnum+1; + st->n_open = Regopen; + st->n_lock = Reglock; + st->n_probe = Regprobe; + + return 0; +} + +/* find the region that a block was allocated from */ +static Vmalloc_t* regionof(Void_t* addr) +{ + int k; + +#if USE_NATIVE +#define CAUTIOUS 1 +#else +#define CAUTIOUS 0 +#endif + if(CAUTIOUS || Vmregion->meth.meth != VM_MTBEST ) + { /* addr will not be dereferenced here */ + if(vmaddr(Vmregion,addr) == 0 ) + return Vmregion; + for(k = 0; k < Regnum; ++k) + if(Region[k] && vmaddr(Region[k], addr) == 0 ) + return Region[k]; + return NIL(Vmalloc_t*); + } + else + { /* fast, but susceptible to bad data */ + Vmdata_t *vd = SEG(BLOCK(addr))->vmdt; + if(Vmregion->data == vd ) + return Vmregion; + for(k = 0; k < Regnum; ++k) + if(Region[k] && Region[k]->data == vd) + return Region[k]; + return NIL(Vmalloc_t*); + } +} + +/* manage a cache of free objects */ +typedef struct _regfree_s +{ struct _regfree_s* next; +} Regfree_t; +static Regfree_t *Regfree; + +static void addfreelist(Regfree_t* data) +{ + unsigned int k; + Regfree_t *head; + + for(k = 0;; ASOLOOP(k) ) + { data->next = head = Regfree; + if(asocasptr(&Regfree, head, data) == (Void_t*)head ) + return; + } +} + +static void clrfreelist() +{ + Regfree_t *list, *next; + Vmalloc_t *vm; + + if(!(list = Regfree) ) + return; /* nothing to do */ + + if(asocasptr(&Regfree, list, NIL(Regfree_t*)) != list ) + return; /* somebody else is doing it */ + + for(; list; list = next) + { next = list->next; + if(vm = regionof((Void_t*)list)) + { if(asocasint(&vm->data->lock, 0, 1) == 0) /* can free this now */ + { (void)(*vm->meth.freef)(vm, (Void_t*)list, 1); + vm->data->lock = 0; + } + else addfreelist(list); /* ah well, back in the queue */ + } + } +} + +/* get a suitable region to allocate from */ +typedef struct _regdisc_s +{ Vmdisc_t disc; + char slop[64]; /* to absorb any extra data in Vmdcsystem */ +} Regdisc_t; + +static int regexcept(Vmalloc_t* vm, int type, Void_t* data, Vmdisc_t* disc) +{ + if(type == VM_OPEN) + { if(data) /* make vmopen allocate all memory using discipline */ + *(Void_t**)data = data; /* just make it non-NULL */ + return 0; + } + return 0; +} + +static Vmalloc_t* getregion(int* local) +{ + Vmalloc_t *vm; + int p, pos; + + static unsigned int Rand = 0xdeadbeef; /* a cheap prng */ +#define RAND() (Rand = Rand*16777617 + 3) + + clrfreelist(); + + if(Regmax <= 0 ) + { /* uni-process/thread */ + *local = 1; + Vmregion->data->lock = 1; + return Vmregion; + } + else if(asocasint(&Vmregion->data->lock, 0, 1) == 0 ) + { /* Vmregion is open, so use it */ + *local = 1; + asoincint(&Regopen); + return Vmregion; + } + + asoincint(&Regprobe); /* probe Region[] to find an open region */ + if(Regnum == 0) + pos = 0; + else for(pos = p = RAND()%Regnum;; ) + { if(Region[p] && asocasint(&Region[p]->data->lock, 0, 1) == 0 ) + { *local = 1; + asoincint(&Regopen); + return Region[p]; + } + if((p = (p+1)%Regnum) == pos ) + break; + } + + /* grab the next open slot for a new region */ + while((p = Regnum) < Regmax) + if(asocasint(&Regnum, p, p+1) == p ) + break; + if(p < Regmax) /* this slot is now ours */ + { static Regdisc_t Regdisc; + if(!Regdisc.disc.exceptf) /* one time initialization */ + { GETPAGESIZE(_Vmpagesize); + memcpy(&Regdisc, Vmdcsystem, Vmdcsystem->size); + Regdisc.disc.round = ROUND(_Vmpagesize, 64*1024); + Regdisc.disc.exceptf = regexcept; + } + + /**/ASSERT(Region[p] == NIL(Vmalloc_t*)); + if((vm = vmopen(&Regdisc.disc, Vmbest, VM_SHARE)) != NIL(Vmalloc_t*) ) + { vm->data->lock = 1; /* lock new region now */ + *local = 1; + asoincint(&Regopen); + return (Region[p] = vm); + } + else Region[p] = Vmregion; /* better than nothing */ + } + + /* must return something */ + vm = Region[pos] ? Region[pos] : Vmregion; + if(asocasint(&vm->data->lock, 0, 1) == 0) + { *local = 1; + asoincint(&Regopen); + } + else + { *local = 0; + asoincint(&Reglock); + } + return vm; +} + +#if __STD_C +extern Void_t* calloc(reg size_t n_obj, reg size_t s_obj) +#else +extern Void_t* calloc(n_obj, s_obj) +reg size_t n_obj; +reg size_t s_obj; +#endif +{ + Void_t *addr; + Vmalloc_t *vm; + int local = 0; + VMFLINIT(); + + vm = getregion(&local); + addr = (*vm->meth.resizef)(vm, NIL(Void_t*), n_obj*s_obj, VM_RSZERO, local); + if(local) + { /**/ASSERT(vm->data->lock == 1); + vm->data->lock = 0; + } + return VMRECORD(addr); +} + +#if __STD_C +extern Void_t* malloc(reg size_t size) +#else +extern Void_t* malloc(size) +reg size_t size; +#endif +{ + Void_t *addr; + Vmalloc_t *vm; + int local = 0; + VMFLINIT(); + + vm = getregion(&local); + addr = (*vm->meth.allocf)(vm, size, local); + if(local) + { /**/ASSERT(vm->data->lock == 1); + vm->data->lock = 0; + } + return VMRECORD(addr); +} + +#if __STD_C +extern Void_t* realloc(reg Void_t* data, reg size_t size) +#else +extern Void_t* realloc(data,size) +reg Void_t* data; /* block to be reallocated */ +reg size_t size; /* new size */ +#endif +{ + ssize_t copy; + Void_t *addr; + Vmalloc_t *vm; + VMFLINIT(); + + if(!data) + return malloc(size); + else if((vm = regionof(data)) ) + { if(vm == Vmregion && vm != Vmheap) /* no multiple region usage here */ + { addr = (*vm->meth.resizef)(vm, data, size, VM_RSCOPY|VM_RSMOVE, 0); + return VMRECORD(addr); + } + if(asocasint(&vm->data->lock, 0, 1) == 0 ) /* region is open */ + { addr = (*vm->meth.resizef)(vm, data, size, VM_RSCOPY|VM_RSMOVE, 1); + vm->data->lock = 0; + return VMRECORD(addr); + } + else if(Regmax > 0 && Vmregion == Vmheap && (addr = malloc(size)) ) + { if((copy = SIZE(BLOCK(data))&~BITS) > size ) + copy = size; + memcpy(addr, data, copy); + addfreelist((Regfree_t*)data); + return VMRECORD(addr); + } + else /* this may block but it is the best that we can do now */ + { addr = (*vm->meth.resizef)(vm, data, size, VM_RSCOPY|VM_RSMOVE, 0); + return VMRECORD(addr); + } + } + else /* not our data */ + { +#if USE_NATIVE +#undef realloc /* let the native realloc() take care of it */ +#if __STD_C + extern Void_t* realloc(Void_t*, size_t); +#else + extern Void_t* realloc(); +#endif + return realloc(data, size); +#else + return NIL(Void_t*); +#endif + } +} + +#if __STD_C +extern void free(reg Void_t* data) +#else +extern void free(data) +reg Void_t* data; +#endif +{ + Vmalloc_t *vm; + VMFLINIT(); + + if(!data || (_Vmassert & VM_keep)) + return; + else if((vm = regionof(data)) ) + { + if(vm == Vmregion && Vmregion != Vmheap || (_Vmassert & VM_free)) + (void)(*vm->meth.freef)(vm, data, 0); + else addfreelist((Regfree_t*)data); + return; + } + else /* not our data */ + { +#if USE_NATIVE +#undef free /* let the native free() take care of it */ +#if __STD_C + extern void free(Void_t*); +#else + extern void free(); +#endif + free(data); +#endif + return; + } +} + +#if __STD_C +extern void cfree(reg Void_t* data) +#else +extern void cfree(data) +reg Void_t* data; +#endif +{ + free(data); +} + +#if __STD_C +extern Void_t* memalign(reg size_t align, reg size_t size) +#else +extern Void_t* memalign(align, size) +reg size_t align; +reg size_t size; +#endif +{ + Void_t *addr; + Vmalloc_t *vm; + int local = 0; + VMFLINIT(); + + vm = getregion(&local); + VMBLOCK + addr = (*vm->meth.alignf)(vm, size, align, local); + if(local) + { /**/ASSERT(vm->data->lock == 1); + vm->data->lock = 0; + } + VMUNBLOCK + return VMRECORD(addr); +} + +#if __STD_C +extern int posix_memalign(reg Void_t **memptr, reg size_t align, reg size_t size) +#else +extern int posix_memalign(memptr, align, size) +reg Void_t** memptr; +reg size_t align; +reg size_t size; +#endif +{ + Void_t *mem; + + if(align == 0 || (align%sizeof(Void_t*)) != 0 || ((align-1)&align) != 0 ) + return EINVAL; + + if(!(mem = memalign(align, size)) ) + return ENOMEM; + + *memptr = mem; + return 0; +} + +#if __STD_C +extern Void_t* valloc(reg size_t size) +#else +extern Void_t* valloc(size) +reg size_t size; +#endif +{ + VMFLINIT(); + + GETPAGESIZE(_Vmpagesize); + return VMRECORD(memalign(_Vmpagesize, size)); +} + +#if __STD_C +extern Void_t* pvalloc(reg size_t size) +#else +extern Void_t* pvalloc(size) +reg size_t size; +#endif +{ + VMFLINIT(); + + GETPAGESIZE(_Vmpagesize); + return VMRECORD(memalign(_Vmpagesize, ROUND(size,_Vmpagesize)) ); +} + +#if !_PACKAGE_ast +#if __STD_C +char* strdup(const char* s) +#else +char* strdup(s) +char* s; +#endif +{ + char *ns; + size_t n; + + if(!s) + return NIL(char*); + else + { n = strlen(s); + if((ns = malloc(n+1)) ) + memcpy(ns,s,n+1); + return ns; + } +} +#endif /* _PACKAGE_ast */ + +#if !_lib_alloca || _mal_alloca +#ifndef _stk_down +#define _stk_down 0 +#endif +typedef struct _alloca_s Alloca_t; +union _alloca_u +{ struct + { char* addr; + Alloca_t* next; + } head; + char array[ALIGN]; +}; +struct _alloca_s +{ union _alloca_u head; + Vmuchar_t data[1]; +}; + +#if __STD_C +extern Void_t* alloca(size_t size) +#else +extern Void_t* alloca(size) +size_t size; +#endif +{ char array[ALIGN]; + char* file; + int line; + Void_t* func; + Alloca_t* f; + Vmalloc_t *vm; + static Alloca_t* Frame; + + VMFLINIT(); + + VMFLF(Vmregion,file,line,func); /* save info before freeing frames */ + + while(Frame) /* free unused frames */ + { if(( _stk_down && &array[0] > Frame->head.head.addr) || + (!_stk_down && &array[0] < Frame->head.head.addr) ) + { f = Frame; Frame = f->head.head.next; + if((vm = regionof(f)) ) + (void)(*vm->meth.freef)(vm, f, 0); + /* else: something bad happened. just keep going */ + } + else break; + } + + Vmregion->file = file; /* restore file/line info before allocation */ + Vmregion->line = line; + Vmregion->func = func; + + f = (Alloca_t*)(*Vmregion->meth.allocf)(Vmregion, size+sizeof(Alloca_t)-1, 0); + + /* if f is NULL, this mimics a stack overflow with a memory error! */ + f->head.head.addr = &array[0]; + f->head.head.next = Frame; + Frame = f; + + return (Void_t*)f->data; +} +#endif /*!_lib_alloca || _mal_alloca*/ + +#if _map_malloc + +/* not sure of all the implications -- 0 is conservative for now */ +#define USE_NATIVE 0 /* native free/realloc on non-vmalloc ptrs */ + +#else + +#if _malloc_hook + +static void vm_free_hook(void* ptr, const void* caller) +{ + free(ptr); +} + +static void* vm_malloc_hook(size_t size, const void* caller) +{ + void* r; + + r = malloc(size); + return r; +} + +static void* vm_memalign_hook(size_t align, size_t size, const void* caller) +{ + void* r; + + r = memalign(align, size); + return r; +} + +static void* vm_realloc_hook(void* ptr, size_t size, const void* caller) +{ + void* r; + + r = realloc(ptr, size); + return r; +} + +static void vm_initialize_hook(void) +{ + __free_hook = vm_free_hook; + __malloc_hook = vm_malloc_hook; + __memalign_hook = vm_memalign_hook; + __realloc_hook = vm_realloc_hook; +} + +void (*__malloc_initialize_hook)(void) = vm_initialize_hook; + +#if 0 /* 2012-02-29 this may be needed to cover shared libs */ + +void __attribute__ ((constructor)) vm_initialize_initialize_hook(void) +{ + vm_initialize_hook(); + __malloc_initialize_hook = vm_initialize_hook; +} + +#endif + +#else + +/* intercept _* __* __libc_* variants */ + +#if __lib__malloc +extern Void_t* F2(_calloc, size_t,n, size_t,m) { return calloc(n, m); } +extern Void_t F1(_cfree, Void_t*,p) { free(p); } +extern Void_t F1(_free, Void_t*,p) { free(p); } +extern Void_t* F1(_malloc, size_t,n) { return malloc(n); } +#if _lib_memalign +extern Void_t* F2(_memalign, size_t,a, size_t,n) { return memalign(a, n); } +#endif +#if _lib_pvalloc +extern Void_t* F1(_pvalloc, size_t,n) { return pvalloc(n); } +#endif +extern Void_t* F2(_realloc, Void_t*,p, size_t,n) { return realloc(p, n); } +#if _lib_valloc +extern Void_t* F1(_valloc, size_t,n) { return valloc(n); } +#endif +#endif + +#if _lib___malloc +extern Void_t* F2(__calloc, size_t,n, size_t,m) { return calloc(n, m); } +extern Void_t F1(__cfree, Void_t*,p) { free(p); } +extern Void_t F1(__free, Void_t*,p) { free(p); } +extern Void_t* F1(__malloc, size_t,n) { return malloc(n); } +#if _lib_memalign +extern Void_t* F2(__memalign, size_t,a, size_t,n) { return memalign(a, n); } +#endif +#if _lib_pvalloc +extern Void_t* F1(__pvalloc, size_t,n) { return pvalloc(n); } +#endif +extern Void_t* F2(__realloc, Void_t*,p, size_t,n) { return realloc(p, n); } +#if _lib_valloc +extern Void_t* F1(__valloc, size_t,n) { return valloc(n); } +#endif +#endif + +#if _lib___libc_malloc +extern Void_t* F2(__libc_calloc, size_t,n, size_t,m) { return calloc(n, m); } +extern Void_t F1(__libc_cfree, Void_t*,p) { free(p); } +extern Void_t F1(__libc_free, Void_t*,p) { free(p); } +extern Void_t* F1(__libc_malloc, size_t,n) { return malloc(n); } +#if _lib_memalign +extern Void_t* F2(__libc_memalign, size_t,a, size_t,n) { return memalign(a, n); } +#endif +#if _lib_pvalloc +extern Void_t* F1(__libc_pvalloc, size_t,n) { return pvalloc(n); } +#endif +extern Void_t* F2(__libc_realloc, Void_t*,p, size_t,n) { return realloc(p, n); } +#if _lib_valloc +extern Void_t* F1(__libc_valloc, size_t,n) { return valloc(n); } +#endif +#endif + +#endif /* _malloc_hook */ + +#endif /* _map_malloc */ + +#undef extern + +#if _hdr_malloc /* need the mallint interface for statistics, etc. */ + +#undef calloc +#define calloc ______calloc +#undef cfree +#define cfree ______cfree +#undef free +#define free ______free +#undef malloc +#define malloc ______malloc +#undef pvalloc +#define pvalloc ______pvalloc +#undef realloc +#define realloc ______realloc +#undef valloc +#define valloc ______valloc + +#if !_UWIN + +#include <malloc.h> + +typedef struct mallinfo Mallinfo_t; +typedef struct mstats Mstats_t; + +#endif + +#if defined(__EXPORT__) +#define extern __EXPORT__ +#endif + +#if _lib_mallopt +#if __STD_C +extern int mallopt(int cmd, int value) +#else +extern int mallopt(cmd, value) +int cmd; +int value; +#endif +{ + VMFLINIT(); + return 0; +} +#endif /*_lib_mallopt*/ + +#if _lib_mallinfo && _mem_arena_mallinfo +#if __STD_C +extern Mallinfo_t mallinfo(void) +#else +extern Mallinfo_t mallinfo() +#endif +{ + Vmstat_t sb; + Mallinfo_t mi; + + VMFLINIT(); + memset(&mi,0,sizeof(mi)); + if(vmstat(Vmregion,&sb) >= 0) + { mi.arena = sb.extent; + mi.ordblks = sb.n_busy+sb.n_free; + mi.uordblks = sb.s_busy; + mi.fordblks = sb.s_free; + } + return mi; +} +#endif /* _lib_mallinfo */ + +#if _lib_mstats && _mem_bytes_total_mstats +#if __STD_C +extern Mstats_t mstats(void) +#else +extern Mstats_t mstats() +#endif +{ + Vmstat_t sb; + Mstats_t ms; + + VMFLINIT(); + memset(&ms,0,sizeof(ms)); + if(vmstat(Vmregion,&sb) >= 0) + { ms.bytes_total = sb.extent; + ms.chunks_used = sb.n_busy; + ms.bytes_used = sb.s_busy; + ms.chunks_free = sb.n_free; + ms.bytes_free = sb.s_free; + } + return ms; +} +#endif /*_lib_mstats*/ + +#undef extern + +#endif/*_hdr_malloc*/ + +#else + +/* + * even though there is no malloc override, still provide + * _ast_* counterparts for object compatibility + */ + +#define setregmax(n) + +#undef calloc +extern Void_t* calloc _ARG_((size_t, size_t)); + +#undef cfree +extern void cfree _ARG_((Void_t*)); + +#undef free +extern void free _ARG_((Void_t*)); + +#undef malloc +extern Void_t* malloc _ARG_((size_t)); + +#if _lib_memalign +#undef memalign +extern Void_t* memalign _ARG_((size_t, size_t)); +#endif + +#if _lib_pvalloc +#undef pvalloc +extern Void_t* pvalloc _ARG_((size_t)); +#endif + +#undef realloc +extern Void_t* realloc _ARG_((Void_t*, size_t)); + +#if _lib_valloc +#undef valloc +extern Void_t* valloc _ARG_((size_t)); +#endif + +#if defined(__EXPORT__) +#define extern __EXPORT__ +#endif + +#if !_malloc_hook + +extern Void_t F1(_ast_free, Void_t*,p) { free(p); } +extern Void_t* F1(_ast_malloc, size_t,n) { return malloc(n); } +#if _lib_memalign +extern Void_t* F2(_ast_memalign, size_t,a, size_t,n) { return memalign(a, n); } +#endif +extern Void_t* F2(_ast_realloc, Void_t*,p, size_t,n) { return realloc(p, n); } + +#endif + +extern Void_t* F2(_ast_calloc, size_t,n, size_t,m) { return calloc(n, m); } +extern Void_t F1(_ast_cfree, Void_t*,p) { free(p); } +#if _lib_pvalloc +extern Void_t* F1(_ast_pvalloc, size_t,n) { return pvalloc(n); } +#endif +#if _lib_valloc +extern Void_t* F1(_ast_valloc, size_t,n) { return valloc(n); } +#endif + +#undef extern + +#if _hdr_malloc + +#undef mallinfo +#undef mallopt +#undef mstats + +#define calloc ______calloc +#define cfree ______cfree +#define free ______free +#define malloc ______malloc +#define pvalloc ______pvalloc +#define realloc ______realloc +#define valloc ______valloc + +#if !_UWIN + +#if !_malloc_hook + +#include <malloc.h> + +#endif + +typedef struct mallinfo Mallinfo_t; +typedef struct mstats Mstats_t; + +#endif + +#if defined(__EXPORT__) +#define extern __EXPORT__ +#endif + +#if _lib_mallopt +extern int F2(_ast_mallopt, int,cmd, int,value) { return mallopt(cmd, value); } +#endif + +#if _lib_mallinfo && _mem_arena_mallinfo +extern Mallinfo_t F0(_ast_mallinfo, void) { return mallinfo(); } +#endif + +#if _lib_mstats && _mem_bytes_total_mstats +extern Mstats_t F0(_ast_mstats, void) { return mstats(); } +#endif + +#undef extern + +#endif /*_hdr_malloc*/ + +#endif /*!_std_malloc*/ + +#if __STD_C +static Vmulong_t atou(char** sp) +#else +static Vmulong_t atou(sp) +char** sp; +#endif +{ + char* s = *sp; + Vmulong_t v = 0; + + if(s[0] == '0' && (s[1] == 'x' || s[1] == 'X') ) + { for(s += 2; *s; ++s) + { if(*s >= '0' && *s <= '9') + v = (v << 4) + (*s - '0'); + else if(*s >= 'a' && *s <= 'f') + v = (v << 4) + (*s - 'a') + 10; + else if(*s >= 'A' && *s <= 'F') + v = (v << 4) + (*s - 'A') + 10; + else break; + } + } + else + { for(; *s; ++s) + { if(*s >= '0' && *s <= '9') + v = v*10 + (*s - '0'); + else break; + } + } + + *sp = s; + return v; +} + +#if __STD_C +static char* insertpid(char* begs, char* ends) +#else +static char* insertpid(begs,ends) +char* begs; +char* ends; +#endif +{ int pid; + char* s; + + if((pid = getpid()) < 0) + return NIL(char*); + + s = ends; + do + { if(s == begs) + return NIL(char*); + *--s = '0' + pid%10; + } while((pid /= 10) > 0); + while(s < ends) + *begs++ = *s++; + + return begs; +} + +#if __STD_C +static int createfile(char* file) +#else +static int createfile(file) +char* file; +#endif +{ + char buf[1024]; + char *next, *endb; + int fd; + + next = buf; + endb = buf + sizeof(buf); + while(*file) + { if(*file == '%') + { switch(file[1]) + { + case 'p' : + if(!(next = insertpid(next,endb)) ) + return -1; + file += 2; + break; + default : + goto copy; + } + } + else + { copy: + *next++ = *file++; + } + + if(next >= endb) + return -1; + } + + *next = '\0'; + file = buf; + if (*file == '&' && *(file += 1) || strncmp(file, "/dev/fd/", 8) == 0 && *(file += 8)) + fd = dup((int)atou(&file)); + else if (*file) +#if _PACKAGE_ast + fd = open(file, O_WRONLY|O_CREAT|O_TRUNC, CREAT_MODE); +#else + fd = creat(file, CREAT_MODE); +#endif + else + return -1; +#if _PACKAGE_ast +#ifdef FD_CLOEXEC + if (fd >= 0) + fcntl(fd, F_SETFD, FD_CLOEXEC); +#endif +#endif + return fd; +} + +#if __STD_C +static void pfprint(void) +#else +static void pfprint() +#endif +{ + if(Vmregion->meth.meth == VM_MTPROFILE) + vmprofile(Vmregion,_Vmpffd); +} + +/* + * initialize runtime options from the VMALLOC_OPTIONS env var + */ + +#define COPY(t,e,f) while ((*t = *f++) && t < e) t++ + +#if __STD_C +void _vmoptions(void) +#else +void _vmoptions() +#endif +{ + Vmalloc_t* vm = 0; + char* trace = 0; + char* s; + char* t; + char* v; + Vmulong_t n; + int fd; + char buf[1024]; + + _Vmoptions = 1; + t = buf; + v = &buf[sizeof(buf)-1]; + if (s = getenv("VMALLOC_OPTIONS")) + COPY(t, v, s); + if (t > buf) + { + *t = 0; + s = buf; + for (;;) + { + while (*s == ',' || *s == ' ' || *s == '\t' || *s == '\r' || *s == '\n') + s++; + if (!*(t = s)) + break; + v = 0; + while (*s) + if (*s == ',' || *s == ' ' || *s == '\t' || *s == '\r' || *s == '\n') + { + *s++ = 0; + break; + } + else if (!v && *s == '=') + { + *s++ = 0; + if (!*(v = s)) + v = 0; + } + else + s++; + if (t[0] == 'n' && t[1] == 'o') + continue; + switch (t[0]) + { + case 'a': /* abort */ + if (!vm) + vm = vmopen(Vmdcsystem, Vmdebug, 0); + if (vm && vm->meth.meth == VM_MTDEBUG) + vmset(vm, VM_DBABORT, 1); + else + _Vmassert |= VM_abort; + break; + case 'b': /* break */ + _Vmassert |= VM_break; + break; + case 'c': /* check */ + _Vmassert |= VM_check; + break; + case 'f': /* free */ + _Vmassert |= VM_free; + break; + case 'k': /* keep */ + _Vmassert |= VM_keep; + break; + case 'm': + if (v && !vm) + { + if ((v[0] == 'V' || v[0] == 'v') && (v[1] == 'M' || v[1] == 'm')) + v += 2; + if (strcmp(v, "debug") == 0) + vm = vmopen(Vmdcsystem, Vmdebug, 0); + else if (strcmp(v, "profile") == 0) + vm = vmopen(Vmdcsystem, Vmprofile, 0); + else if (strcmp(v, "last") == 0) + vm = vmopen(Vmdcsystem, Vmlast, 0); + else if (strcmp(v, "best") == 0) + vm = Vmheap; + } + break; + case 'p': + if (v) + switch (t[1]) + { + case 'e': /* period=<count> */ + if (!vm) + vm = vmopen(Vmdcsystem, Vmdebug, 0); + if (vm && vm->meth.meth == VM_MTDEBUG) + _Vmdbcheck = atou(&v); + break; + case 'r': /* profile=<path> */ + if (!vm) + vm = vmopen(Vmdcsystem, Vmprofile, 0); + if (v && vm && vm->meth.meth == VM_MTPROFILE) + _Vmpffd = createfile(v); + break; + } + break; + case 's': /* start=<count> */ + if (!vm) + vm = vmopen(Vmdcsystem, Vmdebug, 0); + if (v && vm && vm->meth.meth == VM_MTDEBUG) + _Vmdbstart = atou(&v); + break; + case 't': /* trace=<path> */ + trace = v; + break; + case 'w': + if (t[1] == 'a') + switch (t[2]) + { + case 'r': /* warn=<path> */ + if (!vm) + vm = vmopen(Vmdcsystem, Vmdebug, 0); + if (v && vm && vm->meth.meth == VM_MTDEBUG && (fd = createfile(v)) >= 0) + vmdebug(fd); + break; + case 't': /* watch=<addr> */ + if (!vm) + vm = vmopen(Vmdcsystem, Vmdebug, 0); + if (v && vm && vm->meth.meth == VM_MTDEBUG && (n = atou(&v)) >= 0) + vmdbwatch((Void_t*)n); + break; + } + break; + } + } + } + + /* slip in the new region now so that malloc() will work fine */ + + if (vm) + { + if (vm->meth.meth == VM_MTDEBUG) + _Vmdbcheck = 1; + Vmregion = vm; + } + + /* enable tracing -- this currently disables multiple regions */ + + if (trace) + { + setregmax(0); + if ((fd = createfile(trace)) >= 0) + { + vmset(Vmregion, VM_TRACE, 1); + vmtrace(fd); + } + } + else if (Vmregion != Vmheap || asometh(0, 0)->type == ASO_SIGNAL) + setregmax(0); + + /* make sure that profile data is output upon exiting */ + + if (vm && vm->meth.meth == VM_MTPROFILE) + { + if (_Vmpffd < 0) + _Vmpffd = 2; + /* this may wind up calling malloc(), but region is ok now */ + atexit(pfprint); + } + else if (_Vmpffd >= 0) + { + close(_Vmpffd); + _Vmpffd = -1; + } +} + +/* + * ast semi-private workaround for system functions + * that misbehave by passing bogus addresses to free() + * + * not prototyped in any header to keep it ast semi-private + * + * to keep malloc() data by disabling free() + * extern _vmkeep(int); + * int r = _vmkeep(1); + * and to restore to the previous state + * (void)_vmkeep(r); + */ + +int +#if __STD_C +_vmkeep(int v) +#else +_vmkeep(v) +int v; +#endif +{ + int r; + + r = !!(_Vmassert & VM_keep); + if (v) + _Vmassert |= VM_keep; + else + _Vmassert &= ~VM_keep; + return r; +} + +#endif /*_UWIN*/ diff --git a/src/lib/libast/vmalloc/vmbest.c b/src/lib/libast/vmalloc/vmbest.c new file mode 100644 index 0000000..553d83a --- /dev/null +++ b/src/lib/libast/vmalloc/vmbest.c @@ -0,0 +1,1390 @@ +/*********************************************************************** +* * +* This software is part of the ast package * +* Copyright (c) 1985-2012 AT&T Intellectual Property * +* and is licensed under the * +* Eclipse Public License, Version 1.0 * +* by AT&T Intellectual Property * +* * +* A copy of the License is available at * +* http://www.eclipse.org/org/documents/epl-v10.html * +* (with md5 checksum b35adb5213ca9657e911e9befb180842) * +* * +* Information and Software Systems Research * +* AT&T Research * +* Florham Park NJ * +* * +* Glenn Fowler <gsf@research.att.com> * +* David Korn <dgk@research.att.com> * +* Phong Vo <kpv@research.att.com> * +* * +***********************************************************************/ +#if defined(_UWIN) && defined(_BLD_ast) + +void _STUB_vmbest(){} + +#else + +#include "vmhdr.h" + +/* Best-fit allocation method. This is based on a best-fit strategy +** using a splay tree for storage of lists of free blocks of the same +** size. Recent free blocks may be cached for fast reuse. +** +** Written by Kiem-Phong Vo, kpv@research.att.com, 01/16/94. +*/ + +#ifdef DEBUG +static int N_free; /* # of free calls */ +static int N_alloc; /* # of alloc calls */ +static int N_resize; /* # of resize calls */ +static int N_wild; /* # allocated from the wild block */ +static int N_last; /* # allocated from last free block */ +static int N_reclaim; /* # of bestreclaim calls */ +#endif /*DEBUG*/ + +#define COMPACT 8 /* factor to decide when to compact */ + +/* Check to see if a block is in the free tree */ +#if __STD_C +static int vmintree(Block_t* node, Block_t* b) +#else +static int vmintree(node,b) +Block_t* node; +Block_t* b; +#endif +{ Block_t* t; + + for(t = node; t; t = LINK(t)) + if(t == b) + return 1; + if(LEFT(node) && vmintree(LEFT(node),b)) + return 1; + if(RIGHT(node) && vmintree(RIGHT(node),b)) + return 1; + return 0; +} + +#if __STD_C +static int vmonlist(Block_t* list, Block_t* b) +#else +static int vmonlist(list,b) +Block_t* list; +Block_t* b; +#endif +{ + for(; list; list = LINK(list)) + if(list == b) + return 1; + return 0; +} + +/* Check to see if a block is known to be free */ +#if __STD_C +static int vmisfree(Vmdata_t* vd, Block_t* b) +#else +static int vmisfree(vd,b) +Vmdata_t* vd; +Block_t* b; +#endif +{ + if(SIZE(b) & (BUSY|JUNK|PFREE)) + return 0; + + if(b == vd->wild) + return 1; + + if(SIZE(b) < MAXTINY) + return vmonlist(TINY(vd)[INDEX(SIZE(b))], b); + + if(vd->root) + return vmintree(vd->root, b); + + return 0; +} + +/* Check to see if a block is known to be junked */ +#if __STD_C +static int vmisjunk(Vmdata_t* vd, Block_t* b) +#else +static int vmisjunk(vd,b) +Vmdata_t* vd; +Block_t* b; +#endif +{ + Block_t* t; + + if((SIZE(b)&BUSY) == 0 || (SIZE(b)&JUNK) == 0) + return 0; + + if(b == vd->free) /* recently freed */ + return 1; + + /* check the list that b is supposed to be in */ + for(t = CACHE(vd)[C_INDEX(SIZE(b))]; t; t = LINK(t)) + if(t == b) + return 1; + + /* on occasions, b may be put onto the catch-all list */ + if(C_INDEX(SIZE(b)) < S_CACHE) + for(t = CACHE(vd)[S_CACHE]; t; t = LINK(t)) + if(t == b) + return 1; + + return 0; +} + +/* check to see if the free tree is in good shape */ +#if __STD_C +static int vmchktree(Block_t* node) +#else +static int vmchktree(node) +Block_t* node; +#endif +{ Block_t* t; + + if(SIZE(node) & BITS) + { /**/ASSERT(0); return -1; } + + for(t = LINK(node); t; t = LINK(t)) + if(SIZE(t) != SIZE(node)) + { /**/ASSERT(0); return -1; } + + if((t = LEFT(node)) ) + { if(SIZE(t) >= SIZE(node) ) + { /**/ASSERT(0); return -1; } + else return vmchktree(t); + } + if((t = RIGHT(node)) ) + { if(SIZE(t) <= SIZE(node) ) + { /**/ASSERT(0); return -1; } + else return vmchktree(t); + } + + return 0; +} + +#if __STD_C +int _vmbestcheck(Vmdata_t* vd, Block_t* freeb) +#else +int _vmbestcheck(vd, freeb) +Vmdata_t* vd; +Block_t* freeb; /* known to be free but not on any free list */ +#endif +{ + reg Seg_t *seg; + reg Block_t *b, *endb, *nextb; + int rv = 0; + + if(!CHECK()) + return 0; + + /* make sure the free tree is still in shape */ + if(vd->root && vmchktree(vd->root) < 0 ) + { rv = -1; /**/ASSERT(0); } + + for(seg = vd->seg; seg && rv == 0; seg = seg->next) + { b = SEGBLOCK(seg); + endb = (Block_t*)(seg->baddr - sizeof(Head_t)); + for(; b < endb && rv == 0; b = nextb) + { nextb = (Block_t*)((Vmuchar_t*)DATA(b) + (SIZE(b)&~BITS) ); + + if(!ISBUSY(SIZE(b)) ) /* a completely free block */ + { /* there should be no marked bits of any type */ + if(SIZE(b) & (BUSY|JUNK|PFREE) ) + { rv = -1; /**/ASSERT(0); } + + /* next block must be busy and marked PFREE */ + if(!ISBUSY(SIZE(nextb)) || !ISPFREE(SIZE(nextb)) ) + { rv = -1; /**/ASSERT(0); } + + /* must have a self-reference pointer */ + if(SELF(b) != b) + { rv = -1; /**/ASSERT(0); } + + /* segment pointer should be well-defined */ + if(!TINIEST(b) && SEG(b) != seg) + { rv = -1; /**/ASSERT(0); } + + /* must be on a free list */ + if(b != freeb && !vmisfree(vd, b) ) + { rv = -1; /**/ASSERT(0); } + } + else + { /* segment pointer should be well-defined */ + if(SEG(b) != seg) + { rv = -1; /**/ASSERT(0); } + + /* next block should not be marked PFREE */ + if(ISPFREE(SIZE(nextb)) ) + { rv = -1; /**/ASSERT(0); } + + /* if PFREE, last block should be free */ + if(ISPFREE(SIZE(b)) && LAST(b) != freeb && + !vmisfree(vd, LAST(b)) ) + { rv = -1; /**/ASSERT(0); } + + /* if free but unreclaimed, should be junk */ + if(ISJUNK(SIZE(b)) && !vmisjunk(vd, b)) + { rv = -1; /**/ASSERT(0); } + } + } + } + + return rv; +} + +/* Tree rotation functions */ +#define RROTATE(x,y) (LEFT(x) = RIGHT(y), RIGHT(y) = (x), (x) = (y)) +#define LROTATE(x,y) (RIGHT(x) = LEFT(y), LEFT(y) = (x), (x) = (y)) +#define RLINK(s,x) ((s) = LEFT(s) = (x)) +#define LLINK(s,x) ((s) = RIGHT(s) = (x)) + +/* Find and delete a suitable element in the free tree. */ +#if __STD_C +static Block_t* bestsearch(Vmdata_t* vd, reg size_t size, Block_t* wanted) +#else +static Block_t* bestsearch(vd, size, wanted) +Vmdata_t* vd; +reg size_t size; +Block_t* wanted; +#endif +{ + reg size_t s; + reg Block_t *t, *root, *l, *r; + Block_t link; + + /* extracting a tiniest block from its list */ + if((root = wanted) && size == TINYSIZE) + { reg Seg_t* seg; + + l = TLEFT(root); + if((r = LINK(root)) ) + TLEFT(r) = l; + if(l) + LINK(l) = r; + else TINY(vd)[0] = r; + + seg = vd->seg; + if(!seg->next) + SEG(root) = seg; + else for(;; seg = seg->next) + { if((Vmuchar_t*)root > (Vmuchar_t*)seg->addr && + (Vmuchar_t*)root < seg->baddr) + { SEG(root) = seg; + break; + } + } + + return root; + } + + /**/ASSERT(!vd->root || vmchktree(vd->root) == 0); + + /* find the right one to delete */ + l = r = &link; + if((root = vd->root) ) do + { /**/ ASSERT(!ISBITS(size) && !ISBITS(SIZE(root))); + if(size == (s = SIZE(root)) ) + break; + if(size < s) + { if((t = LEFT(root)) ) + { if(size <= (s = SIZE(t)) ) + { RROTATE(root,t); + if(size == s) + break; + t = LEFT(root); + } + else + { LLINK(l,t); + t = RIGHT(t); + } + } + RLINK(r,root); + } + else + { if((t = RIGHT(root)) ) + { if(size >= (s = SIZE(t)) ) + { LROTATE(root,t); + if(size == s) + break; + t = RIGHT(root); + } + else + { RLINK(r,t); + t = LEFT(t); + } + } + LLINK(l,root); + } + /**/ ASSERT(root != t); + } while((root = t) ); + + if(root) /* found it, now isolate it */ + { RIGHT(l) = LEFT(root); + LEFT(r) = RIGHT(root); + } + else /* nothing exactly fit */ + { LEFT(r) = NIL(Block_t*); + RIGHT(l) = NIL(Block_t*); + + /* grab the least one from the right tree */ + if((root = LEFT(&link)) ) + { while((t = LEFT(root)) ) + RROTATE(root,t); + LEFT(&link) = RIGHT(root); + } + } + + if(root && (r = LINK(root)) ) + { /* head of a link list, use next one for root */ + LEFT(r) = RIGHT(&link); + RIGHT(r) = LEFT(&link); + } + else if(!(r = LEFT(&link)) ) + r = RIGHT(&link); + else /* graft left tree to right tree */ + { while((t = LEFT(r)) ) + RROTATE(r,t); + LEFT(r) = RIGHT(&link); + } + vd->root = r; /**/ASSERT(!r || !ISBITS(SIZE(r))); + + /**/ASSERT(!vd->root || vmchktree(vd->root) == 0); + /**/ASSERT(!wanted || wanted == root); + + return root; +} + +/* Reclaim all delayed free blocks into the free tree */ +#if __STD_C +static int bestreclaim(reg Vmdata_t* vd, Block_t* wanted, int c) +#else +static int bestreclaim(vd, wanted, c) +reg Vmdata_t* vd; +Block_t* wanted; +int c; +#endif +{ + reg size_t size, s; + reg Block_t *fp, *np, *t, *list; + reg int n, saw_wanted; + + /**/COUNT(N_reclaim); + /**/ASSERT(_vmbestcheck(vd, NIL(Block_t*)) == 0); + + if((fp = vd->free) ) + { LINK(fp) = CACHE(vd)[S_CACHE]; CACHE(vd)[S_CACHE] = fp; + vd->free = NIL(Block_t*); + } + + saw_wanted = wanted ? 0 : 1; + for(n = S_CACHE; n >= c; --n) + { list = CACHE(vd)[n]; CACHE(vd)[n] = NIL(Block_t*); + while((fp = list) ) + { /* Note that below here we allow ISJUNK blocks to be + ** forward-merged even though they are not removed from + ** the list immediately. In this way, the list is + ** scanned only once. It works because the LINK and SIZE + ** fields are not destroyed during the merging. This can + ** be seen by observing that a tiniest block has a 2-word + ** header and a 2-word body. Merging a tiniest block + ** (1seg) and the next block (2seg) looks like this: + ** 1seg size link left 2seg size link left .... + ** 1seg size link left rite xxxx xxxx .... self + ** After the merge, the 2seg word is replaced by the RIGHT + ** pointer of the new block and somewhere beyond the + ** two xxxx fields, the SELF pointer will replace some + ** other word. The important part is that the two xxxx + ** fields are kept intact. + */ + list = LINK(list); /**/ASSERT(!vmonlist(list,fp)); + + size = SIZE(fp); + if(!ISJUNK(size)) /* already done */ + continue; + + if(ISPFREE(size)) /* backward merge */ + { fp = LAST(fp); + s = SIZE(fp); /**/ASSERT(!(s&BITS)); + REMOVE(vd,fp,INDEX(s),t,bestsearch); + size = (size&~BITS) + s + sizeof(Head_t); + } + else size &= ~BITS; + + for(;;) /* forward merge */ + { np = (Block_t*)((Vmuchar_t*)fp+size+sizeof(Head_t)); + s = SIZE(np); /**/ASSERT(s > 0); + if(!ISBUSY(s)) + { /**/ASSERT((s&BITS) == 0); + if(np == vd->wild) + vd->wild = NIL(Block_t*); + else REMOVE(vd,np,INDEX(s),t,bestsearch); + } + else if(ISJUNK(s)) + { /* reclaim any touched junk list */ + if((int)C_INDEX(s) < c) + c = C_INDEX(s); + SIZE(np) = 0; + CLRBITS(s); + } + else break; + size += s + sizeof(Head_t); + } + SIZE(fp) = size; + + /* tell next block that this one is free */ + np = NEXT(fp); /**/ASSERT(ISBUSY(SIZE(np))); + /**/ASSERT(!ISJUNK(SIZE(np))); + SETPFREE(SIZE(np)); + SELF(fp) = fp; + + if(fp == wanted) /* to be consumed soon */ + { /**/ASSERT(!saw_wanted); /* should be seen just once */ + saw_wanted = 1; + continue; + } + + /* wilderness preservation */ + if(np->body.data >= vd->seg->baddr) + { vd->wild = fp; + continue; + } + + /* tiny block goes to tiny list */ + if(size < MAXTINY) + { s = INDEX(size); + np = LINK(fp) = TINY(vd)[s]; + if(s == 0) /* TINIEST block */ + { if(np) + TLEFT(np) = fp; + TLEFT(fp) = NIL(Block_t*); + } + else + { if(np) + LEFT(np) = fp; + LEFT(fp) = NIL(Block_t*); + SETLINK(fp); + } + TINY(vd)[s] = fp; + continue; + } + + LEFT(fp) = RIGHT(fp) = LINK(fp) = NIL(Block_t*); + if(!(np = vd->root) ) /* inserting into an empty tree */ + { vd->root = fp; + continue; + } + + size = SIZE(fp); + while(1) /* leaf insertion */ + { /**/ASSERT(np != fp); + if((s = SIZE(np)) > size) + { if((t = LEFT(np)) ) + { /**/ ASSERT(np != t); + np = t; + } + else + { LEFT(np) = fp; + break; + } + } + else if(s < size) + { if((t = RIGHT(np)) ) + { /**/ ASSERT(np != t); + np = t; + } + else + { RIGHT(np) = fp; + break; + } + } + else /* s == size */ + { if((t = LINK(np)) ) + { LINK(fp) = t; + LEFT(t) = fp; + } + LINK(np) = fp; + LEFT(fp) = np; + SETLINK(fp); + break; + } + } + } + } + + /**/ASSERT(!wanted || saw_wanted == 1); + /**/ASSERT(_vmbestcheck(vd, wanted) == 0); + return saw_wanted; +} + +#if __STD_C +static int bestcompact(Vmalloc_t* vm, int local) +#else +static int bestcompact(vm, local) +Vmalloc_t* vm; +int local; +#endif +{ + reg Seg_t *seg, *next; + reg Block_t *bp, *tp; + reg size_t size, segsize, round; + reg Vmdata_t* vd = vm->data; + + SETLOCK(vm, local); + + bestreclaim(vd,NIL(Block_t*),0); + + for(seg = vd->seg; seg; seg = next) + { next = seg->next; + + bp = BLOCK(seg->baddr); + if(!ISPFREE(SIZE(bp)) ) + continue; + + bp = LAST(bp); /**/ASSERT(vmisfree(vd,bp)); + size = SIZE(bp); + if(bp == vd->wild) + { /* During large block allocations, _Vmextend might + ** have been enlarged the rounding factor. Reducing + ** it a bit help avoiding getting large raw memory. + */ + if((round = vm->disc->round) == 0) + round = _Vmpagesize; + if(size > COMPACT*vd->incr && vd->incr > round) + vd->incr /= 2; + + /* for the bottom segment, we don't necessarily want + ** to return raw memory too early. vd->pool has an + ** approximation of the average size of recently freed + ** blocks. If this is large, the application is managing + ** large blocks so we throttle back memory chopping + ** to avoid thrashing the underlying memory system. + */ + if(size <= COMPACT*vd->incr || size <= COMPACT*vd->pool) + continue; + + vd->wild = NIL(Block_t*); + vd->pool = 0; + } + else REMOVE(vd,bp,INDEX(size),tp,bestsearch); + tp = NEXT(bp); /* avoid strict-aliasing pun */ + CLRPFREE(SIZE(tp)); + + if(size < (segsize = seg->size)) + size += sizeof(Head_t); + + if((size = (*_Vmtruncate)(vm,seg,size,0)) > 0) + { if(size >= segsize) /* entire segment deleted */ + continue; + /**/ASSERT(SEG(BLOCK(seg->baddr)) == seg); + + if((size = (seg->baddr - ((Vmuchar_t*)bp) - sizeof(Head_t))) > 0) + SIZE(bp) = size - sizeof(Head_t); + else bp = NIL(Block_t*); + } + + if(bp) + { /**/ ASSERT(SIZE(bp) >= BODYSIZE); + /**/ ASSERT(SEGWILD(bp)); + /**/ ASSERT(!vd->root || !vmintree(vd->root,bp)); + SIZE(bp) |= BUSY|JUNK; + LINK(bp) = CACHE(vd)[C_INDEX(SIZE(bp))]; + CACHE(vd)[C_INDEX(SIZE(bp))] = bp; + } + } + + if(!local && _Vmtrace && (vd->mode&VM_TRACE) && VMETHOD(vd) == VM_MTBEST) + (*_Vmtrace)(vm, (Vmuchar_t*)0, (Vmuchar_t*)0, 0, 0); + + CLRLOCK(vm, local); /**/ASSERT(_vmbestcheck(vd, NIL(Block_t*)) == 0); + + return 0; +} + +#if __STD_C +static Void_t* bestalloc(Vmalloc_t* vm, size_t size , int local) +#else +static Void_t* bestalloc(vm, size, local) +Vmalloc_t* vm; /* region allocating from */ +size_t size; /* desired block size */ +int local; /* internal call */ +#endif +{ + reg Vmdata_t* vd = vm->data; + reg size_t s; + reg int n; + reg Block_t *tp, *np, *ap; + size_t orgsize = size; + + /**/COUNT(N_alloc); + /**/ASSERT(local ? (vd->lock == 1) : 1 ); + + SETLOCK(vm,local); + + /**/ ASSERT(_vmbestcheck(vd, NIL(Block_t*)) == 0); + /**/ ASSERT(HEADSIZE == sizeof(Head_t)); + /**/ ASSERT(BODYSIZE == sizeof(Body_t)); + /**/ ASSERT((ALIGN%(BITS+1)) == 0 ); + /**/ ASSERT((sizeof(Head_t)%ALIGN) == 0 ); + /**/ ASSERT((sizeof(Body_t)%ALIGN) == 0 ); + /**/ ASSERT((BODYSIZE%ALIGN) == 0 ); + /**/ ASSERT(sizeof(Block_t) == (sizeof(Body_t)+sizeof(Head_t)) ); + + /* for ANSI requirement that malloc(0) returns non-NULL pointer */ + size = size <= BODYSIZE ? BODYSIZE : ROUND(size,ALIGN); + + if((tp = vd->free) ) /* reuse last free piece if appropriate */ + { /**/ASSERT(ISBUSY(SIZE(tp)) ); + /**/ASSERT(ISJUNK(SIZE(tp)) ); + /**/COUNT(N_last); + + vd->free = NIL(Block_t*); + if((s = SIZE(tp)) >= size && s < (size << 1) ) + { if(s >= size + (sizeof(Head_t)+BODYSIZE) ) + { SIZE(tp) = size; + np = NEXT(tp); + SEG(np) = SEG(tp); + SIZE(np) = ((s&~BITS) - (size+sizeof(Head_t)))|JUNK|BUSY; + vd->free = np; + SIZE(tp) |= s&BITS; + } + CLRJUNK(SIZE(tp)); + goto done; + } + + LINK(tp) = CACHE(vd)[S_CACHE]; + CACHE(vd)[S_CACHE] = tp; + } + + for(n = S_CACHE; n >= 0; --n) /* best-fit except for coalescing */ + { bestreclaim(vd,NIL(Block_t*),n); + if(vd->root && (tp = bestsearch(vd,size,NIL(Block_t*))) ) + goto got_block; + } + + /**/ASSERT(!vd->free); + if((tp = vd->wild) && SIZE(tp) >= size) + { /**/COUNT(N_wild); + vd->wild = NIL(Block_t*); + goto got_block; + } + + /* need to extend the arena */ + KPVCOMPACT(vm,bestcompact); + if((tp = (*_Vmextend)(vm,size,bestsearch)) ) + { got_block: + /**/ ASSERT(!ISBITS(SIZE(tp))); + /**/ ASSERT(SIZE(tp) >= size); + /**/ ASSERT((SIZE(tp)%ALIGN) == 0); + /**/ ASSERT(!vd->free); + + /* tell next block that we are no longer a free block */ + np = NEXT(tp); + CLRPFREE(SIZE(np)); /**/ ASSERT(ISBUSY(SIZE(np))); + + if((s = SIZE(tp)-size) >= (sizeof(Head_t)+BODYSIZE) ) + { SIZE(tp) = size; + + np = NEXT(tp); + SEG(np) = SEG(tp); + SIZE(np) = (s - sizeof(Head_t)) | BUSY|JUNK; + + if(VMWILD(vd,np)) + { SIZE(np) &= ~BITS; + SELF(np) = np; + ap = NEXT(np); /**/ASSERT(ISBUSY(SIZE(ap))); + SETPFREE(SIZE(ap)); + vd->wild = np; + } + else vd->free = np; + } + + SETBUSY(SIZE(tp)); + } + +done: + if(tp && !local && (vd->mode&VM_TRACE) && _Vmtrace && VMETHOD(vd) == VM_MTBEST) + (*_Vmtrace)(vm,NIL(Vmuchar_t*),(Vmuchar_t*)DATA(tp),orgsize,0); + + CLRLOCK(vm,local); /**/ASSERT(_vmbestcheck(vd, NIL(Block_t*)) == 0); + + return tp ? DATA(tp) : NIL(Void_t*); +} + +#if __STD_C +static long bestaddr(Vmalloc_t* vm, Void_t* addr, int local ) +#else +static long bestaddr(vm, addr, local) +Vmalloc_t* vm; /* region allocating from */ +Void_t* addr; /* address to check */ +int local; +#endif +{ + reg Seg_t* seg; + reg Block_t *b, *endb; + reg long offset; + reg Vmdata_t* vd = vm->data; + + /**/ASSERT(local ? (vd->lock == 1) : 1 ); + SETLOCK(vm, local); + + offset = -1L; b = endb = NIL(Block_t*); + for(seg = vd->seg; seg; seg = seg->next) + { b = SEGBLOCK(seg); + endb = (Block_t*)(seg->baddr - sizeof(Head_t)); + if((Vmuchar_t*)addr > (Vmuchar_t*)b && + (Vmuchar_t*)addr < (Vmuchar_t*)endb) + break; + } + + if(local ) /* from bestfree or bestresize */ + { b = BLOCK(addr); + if(seg && SEG(b) == seg && ISBUSY(SIZE(b)) && !ISJUNK(SIZE(b)) ) + offset = 0; + } + else if(seg) + { while(b < endb) + { reg Vmuchar_t* data = (Vmuchar_t*)DATA(b); + reg size_t size = SIZE(b)&~BITS; + + if((Vmuchar_t*)addr >= data && (Vmuchar_t*)addr < data+size) + { if(ISJUNK(SIZE(b)) || !ISBUSY(SIZE(b))) + offset = -1L; + else offset = (Vmuchar_t*)addr - data; + goto done; + } + + b = (Block_t*)((Vmuchar_t*)DATA(b) + size); + } + } + +done: + CLRLOCK(vm,local); + return offset; +} + +#if __STD_C +static int bestfree(Vmalloc_t* vm, Void_t* data, int local ) +#else +static int bestfree(vm, data, local ) +Vmalloc_t* vm; +Void_t* data; +int local; +#endif +{ + reg Vmdata_t* vd = vm->data; + reg Block_t *bp; + reg size_t s; + +#ifdef DEBUG + if(((char*)data - (char*)0) <= 1) + { _Vmassert |= VM_check; + _vmbestcheck(vd, NIL(Block_t*)); + if (!data) + _Vmassert &= ~VM_check; + return 0; + } +#else + if(!data) /* ANSI-ism */ + return 0; +#endif + + /**/COUNT(N_free); + /**/ASSERT(local ? (vd->lock == 1) : 1 ); + + SETLOCK(vm, local); + + /**/ASSERT(KPVADDR(vm, data, bestaddr) == 0); + /**/ASSERT(_vmbestcheck(vd, NIL(Block_t*)) == 0); + bp = BLOCK(data); s = SIZE(bp); + + /* Keep an approximate average free block size. + ** This is used in bestcompact() to decide when to release + ** raw memory back to the underlying memory system. + */ + vd->pool = (vd->pool + (s&~BITS))/2; + + if(ISBUSY(s) && !ISJUNK(s)) + { SETJUNK(SIZE(bp)); + if(s < MAXCACHE) + { /**/ASSERT(!vmonlist(CACHE(vd)[INDEX(s)], bp) ); + LINK(bp) = CACHE(vd)[INDEX(s)]; + CACHE(vd)[INDEX(s)] = bp; + } + else if(!vd->free) + vd->free = bp; + else + { /**/ASSERT(!vmonlist(CACHE(vd)[S_CACHE], bp) ); + LINK(bp) = CACHE(vd)[S_CACHE]; + CACHE(vd)[S_CACHE] = bp; + } + + /* coalesce on freeing large blocks to avoid fragmentation */ + if(SIZE(bp) >= 2*vd->incr) + { bestreclaim(vd,NIL(Block_t*),0); + if(vd->wild && SIZE(vd->wild) >= COMPACT*vd->incr) + KPVCOMPACT(vm,bestcompact); + } + } + + if(!local && _Vmtrace && (vd->mode&VM_TRACE) && VMETHOD(vd) == VM_MTBEST ) + (*_Vmtrace)(vm,(Vmuchar_t*)data,NIL(Vmuchar_t*), (s&~BITS), 0); + + CLRLOCK(vm, local); /**/ASSERT(_vmbestcheck(vd, NIL(Block_t*)) == 0); + + return 0; +} + +#if __STD_C +static Void_t* bestresize(Vmalloc_t* vm, Void_t* data, reg size_t size, int type, int local) +#else +static Void_t* bestresize(vm, data, size, type, local) +Vmalloc_t* vm; /* region allocating from */ +Void_t* data; /* old block of data */ +reg size_t size; /* new size */ +int type; /* !=0 to move, <0 for not copy */ +int local; +#endif +{ + reg Block_t *rp, *np, *t; + size_t s, bs; + size_t oldz = 0, orgsize = size; + Void_t *oldd = 0, *orgdata = data; + Vmdata_t *vd = vm->data; + + /**/COUNT(N_resize); + /**/ASSERT(local ? (vd->lock == 1) : 1); + + if(!data) /* resizing a NULL block is the same as allocating */ + { data = bestalloc(vm, size, local); + if(data && (type&VM_RSZERO) ) + memset((Void_t*)data, 0, size); + return data; + } + if(size == 0) /* resizing to zero size is the same as freeing */ + { (void)bestfree(vm, data, local); + return NIL(Void_t*); + } + + SETLOCK(vm, local); + + /**/ASSERT(KPVADDR(vm, data, bestaddr) == 0); + /**/ASSERT(_vmbestcheck(vd, NIL(Block_t*)) == 0); + size = size <= BODYSIZE ? BODYSIZE : ROUND(size,ALIGN); + rp = BLOCK(data); /**/ASSERT(ISBUSY(SIZE(rp)) && !ISJUNK(SIZE(rp))); + oldz = SIZE(rp); CLRBITS(oldz); + if(oldz < size) + { np = (Block_t*)((Vmuchar_t*)rp + oldz + sizeof(Head_t)); + do /* forward merge as much as possible */ + { s = SIZE(np); /**/ASSERT(!ISPFREE(s)); + if(np == vd->free) + { vd->free = NIL(Block_t*); + CLRBITS(s); + } + else if(ISJUNK(s) ) + { if(!bestreclaim(vd,np,C_INDEX(s)) ) + /**/ASSERT(0); /* oops: did not see np! */ + s = SIZE(np); /**/ASSERT(s%ALIGN == 0); + } + else if(!ISBUSY(s) ) + { if(np == vd->wild) + vd->wild = NIL(Block_t*); + else REMOVE(vd,np,INDEX(s),t,bestsearch); + } + else break; + + SIZE(rp) += (s += sizeof(Head_t)); /**/ASSERT((s%ALIGN) == 0); + np = (Block_t*)((Vmuchar_t*)np + s); + CLRPFREE(SIZE(np)); + } while(SIZE(rp) < size); + + if(SIZE(rp) < size && size > vd->incr && SEGWILD(rp) ) + { reg Seg_t* seg; + + s = (size - SIZE(rp)) + sizeof(Head_t); s = ROUND(s,vd->incr); + seg = SEG(rp); + if((*vm->disc->memoryf)(vm,seg->addr,seg->extent,seg->extent+s, + vm->disc) == seg->addr ) + { SIZE(rp) += s; + seg->extent += s; + seg->size += s; + seg->baddr += s; + s = (SIZE(rp)&~BITS) + sizeof(Head_t); + np = (Block_t*)((Vmuchar_t*)rp + s); + SEG(np) = seg; + SIZE(np) = BUSY; + } + } + } + + if((s = SIZE(rp)) >= (size + (BODYSIZE+sizeof(Head_t))) ) + { SIZE(rp) = size; + np = NEXT(rp); + SEG(np) = SEG(rp); + SIZE(np) = (((s&~BITS)-size) - sizeof(Head_t))|BUSY|JUNK; + CPYBITS(SIZE(rp),s); + rp = np; + goto do_free; + } + else if((bs = s&~BITS) < size) + { if(!(type&(VM_RSMOVE|VM_RSCOPY)) ) + data = NIL(Void_t*); /* old data is not moveable */ + else + { oldd = data; + if((data = KPVALLOC(vm,size,bestalloc)) ) + { if(type&VM_RSCOPY) + memcpy(data, oldd, bs); + + do_free: /* reclaim these right away */ + SETJUNK(SIZE(rp)); + LINK(rp) = CACHE(vd)[S_CACHE]; + CACHE(vd)[S_CACHE] = rp; + bestreclaim(vd, NIL(Block_t*), S_CACHE); + } + } + } + + if(data && (type&VM_RSZERO) && (size = SIZE(BLOCK(data))&~BITS) > oldz ) + memset((Void_t*)((Vmuchar_t*)data + oldz), 0, size-oldz); + + if(!local && _Vmtrace && data && (vd->mode&VM_TRACE) && VMETHOD(vd) == VM_MTBEST) + (*_Vmtrace)(vm, (Vmuchar_t*)orgdata, (Vmuchar_t*)data, orgsize, 0); + + CLRLOCK(vm, local); /**/ASSERT(_vmbestcheck(vd, NIL(Block_t*)) == 0); + + return data; +} + +#if __STD_C +static long bestsize(Vmalloc_t* vm, Void_t* addr, int local ) +#else +static long bestsize(vm, addr, local) +Vmalloc_t* vm; /* region allocating from */ +Void_t* addr; /* address to check */ +int local; +#endif +{ + Seg_t *seg; + Block_t *b, *endb; + long size; + Vmdata_t *vd = vm->data; + + SETLOCK(vm, local); + + size = -1L; + for(seg = vd->seg; seg; seg = seg->next) + { b = SEGBLOCK(seg); + endb = (Block_t*)(seg->baddr - sizeof(Head_t)); + if((Vmuchar_t*)addr <= (Vmuchar_t*)b || + (Vmuchar_t*)addr >= (Vmuchar_t*)endb) + continue; + while(b < endb) + { if(addr == DATA(b)) + { if(!ISBUSY(SIZE(b)) || ISJUNK(SIZE(b)) ) + size = -1L; + else size = (long)SIZE(b)&~BITS; + goto done; + } + else if((Vmuchar_t*)addr <= (Vmuchar_t*)b) + break; + + b = (Block_t*)((Vmuchar_t*)DATA(b) + (SIZE(b)&~BITS) ); + } + } + +done: + CLRLOCK(vm, local); + return size; +} + +#if __STD_C +static Void_t* bestalign(Vmalloc_t* vm, size_t size, size_t align, int local) +#else +static Void_t* bestalign(vm, size, align, local) +Vmalloc_t* vm; +size_t size; +size_t align; +int local; +#endif +{ + Vmuchar_t *data; + Block_t *tp, *np; + Seg_t *seg; + size_t s, extra; + size_t orgsize = size, orgalign = align; + Vmdata_t *vd = vm->data; + + if(size <= 0 || align <= 0) + return NIL(Void_t*); + + SETLOCK(vm, local); + + /**/ASSERT(_vmbestcheck(vd, NIL(Block_t*)) == 0); + size = size <= BODYSIZE ? BODYSIZE : ROUND(size,ALIGN); + align = MULTIPLE(align,ALIGN); + + /* hack so that dbalign() can store header data */ + if(VMETHOD(vd) != VM_MTDEBUG) + extra = 0; + else + { extra = DB_HEAD; + while(align < extra || (align - extra) < sizeof(Block_t)) + align *= 2; + } + + /* reclaim all free blocks now to avoid fragmentation */ + bestreclaim(vd,NIL(Block_t*),0); + + s = size + 2*(align+sizeof(Head_t)+extra); + if(!(data = (Vmuchar_t*)KPVALLOC(vm,s,bestalloc)) ) + goto done; + + tp = BLOCK(data); + seg = SEG(tp); + + /* get an aligned address that we can live with */ + if((s = (size_t)((VLONG(data)+extra)%align)) != 0) + data += align-s; /**/ASSERT(((VLONG(data)+extra)%align) == 0); + + if((np = BLOCK(data)) != tp ) /* need to free left part */ + { if(((Vmuchar_t*)np - (Vmuchar_t*)tp) < (ssize_t)(sizeof(Block_t)+extra) ) + { data += align; + np = BLOCK(data); + } /**/ASSERT(((VLONG(data)+extra)%align) == 0); + + s = (Vmuchar_t*)np - (Vmuchar_t*)tp; + SIZE(np) = ((SIZE(tp)&~BITS) - s)|BUSY; + SEG(np) = seg; + + SIZE(tp) = (s - sizeof(Head_t)) | (SIZE(tp)&BITS) | JUNK; + /**/ ASSERT(SIZE(tp) >= sizeof(Body_t) ); + LINK(tp) = CACHE(vd)[C_INDEX(SIZE(tp))]; + CACHE(vd)[C_INDEX(SIZE(tp))] = tp; + } + + /* free left-over if too big */ + if((s = SIZE(np) - size) >= sizeof(Block_t)) + { SIZE(np) = size; + + tp = NEXT(np); + SIZE(tp) = ((s & ~BITS) - sizeof(Head_t)) | BUSY | JUNK; + SEG(tp) = seg; + LINK(tp) = CACHE(vd)[C_INDEX(SIZE(tp))]; + CACHE(vd)[C_INDEX(SIZE(tp))] = tp; + + SIZE(np) |= s&BITS; + } + + bestreclaim(vd,NIL(Block_t*),0); /* coalesce all free blocks */ + + if(!local && _Vmtrace && (vd->mode&VM_TRACE) ) + (*_Vmtrace)(vm,NIL(Vmuchar_t*),data,orgsize,orgalign); + +done: + CLRLOCK(vm, local); /**/ASSERT(_vmbestcheck(vd, NIL(Block_t*)) == 0); + + return (Void_t*)data; +} + +/* The below implements the discipline Vmdcsbrk and the heap region Vmheap. +** There are 5 alternative ways to get raw memory: +** win32, sbrk, mmap_anon, mmap_zero and reusing the native malloc +** The selection of method done here is to enable our malloc implementation +** to work with concurrent threads. The sbrk/brk interface is unfortunately +** not atomic. Thus, we prefer mmap_anon or mmap_zero if they are available. +*/ +#if _mem_win32 +#undef _mem_mmap_anon +#undef _mem_mmap_zero +#undef _mem_sbrk +#endif +#if _mem_mmap_anon +#undef _mem_mmap_zero +#if !_PACKAGE_ast +#undef _mem_sbrk +#endif +#endif +#if _mem_mmap_zero +#if !_PACKAGE_ast +#undef _mem_sbrk +#endif +#endif + +#if _SUNOS /* sunos guarantees that brk-addresses are valid */ +#define chkaddr(a,n) (0) + +#else /* make sure that allocated memory are addressable */ +#include <signal.h> +typedef void (*Sig_f)(int); +static int Gotsegv = 0; + +static void sigsegv(int sig) +{ + if(sig == SIGSEGV) + Gotsegv = 1; +} +static int chkaddr(Vmuchar_t* addr, size_t nsize) +{ + Sig_f segv; + int rv; + + Gotsegv = 0; /* catch segment fault */ + segv = signal(SIGSEGV, sigsegv); + + rv = *(addr+nsize-1); + rv = Gotsegv ? -1 : rv; + + signal(SIGSEGV, segv); /* restore signal catcher */ + Gotsegv = 0; + + return rv; +} +#endif /*_SUNOS*/ + +#if _mem_win32 /* getting memory on a window system */ +#if _PACKAGE_ast +#include <ast_windows.h> +#else +#include <windows.h> +#endif + +static Void_t* win32mem(Void_t* caddr, size_t csize, size_t nsize) +{ /**/ ASSERT(csize > 0 || nsize > 0) + if(csize == 0) + { caddr = (Void_t*)VirtualAlloc(0,nsize,MEM_COMMIT,PAGE_READWRITE); + return caddr; + } + else if(nsize == 0) + { (void)VirtualFree((LPVOID)caddr,0,MEM_RELEASE); + return caddr; + } + else return NIL(Void_t*); +} +#endif /* _mem_win32 */ + +#if _mem_sbrk /* getting space via brk/sbrk - not concurrent-ready */ +static Void_t* sbrkmem(Void_t* caddr, size_t csize, size_t nsize) +{ + Vmuchar_t *addr = (Vmuchar_t*)sbrk(0); + + if(!addr || addr == (Vmuchar_t*)(-1) ) + return NIL(Void_t*); + + if(csize > 0 && addr != (Vmuchar_t*)caddr+csize) + return NIL(Void_t*); + else if(csize == 0) + caddr = addr; + + /**/ASSERT(addr == (Vmuchar_t*)caddr+csize); + if(nsize < csize) + addr -= csize-nsize; + else if((addr += nsize-csize) < (Vmuchar_t*)caddr ) + return NIL(Void_t*); + + if(brk(addr) != 0 ) + return NIL(Void_t*); + else if(nsize > csize && chkaddr(caddr, nsize) < 0 ) + { (void)brk((Vmuchar_t*)caddr+csize); + return NIL(Void_t*); + } + else return caddr; +} +#endif /* _mem_sbrk */ + +#if _mem_mmap_anon || _mem_mmap_zero /* get space using mmap */ +#include <fcntl.h> +#include <sys/mman.h> + +#ifndef MAP_ANON +#ifdef MAP_ANONYMOUS +#define MAP_ANON MAP_ANONYMOUS +#else +#define MAP_ANON 0 +#endif +#endif /*MAP_ANON*/ + +#ifndef OPEN_MAX +#define OPEN_MAX 64 +#endif +#define FD_PRIVATE (3*OPEN_MAX/4) /* private file descriptor */ +#define FD_INIT (-1) /* uninitialized file desc */ +#define FD_NONE (-2) /* no mapping with file desc */ + +typedef struct _mmdisc_s +{ Vmdisc_t disc; + int fd; + off_t offset; +} Mmdisc_t; + +static Void_t* mmapmem(Void_t* caddr, size_t csize, size_t nsize, Mmdisc_t* mmdc) +{ +#if _mem_mmap_zero + if(mmdc) /* /dev/zero mapping */ + { if(mmdc->fd == FD_INIT ) /* open /dev/zero for mapping */ + { int fd; + if((fd = open("/dev/zero", O_RDONLY)) < 0 ) + { mmdc->fd = FD_NONE; + return NIL(Void_t*); + } + if(fd >= FD_PRIVATE || (mmdc->fd = dup2(fd, FD_PRIVATE)) < 0 ) + mmdc->fd = fd; + else close(fd); +#ifdef FD_CLOEXEC + fcntl(mmdc->fd, F_SETFD, FD_CLOEXEC); +#endif + } + + if(mmdc->fd == FD_NONE) + return NIL(Void_t*); + } +#endif /* _mem_mmap_zero */ + + /**/ASSERT(csize > 0 || nsize > 0); + if(csize == 0) + { nsize = ROUND(nsize, _Vmpagesize); + caddr = NIL(Void_t*); +#if _mem_mmap_zero + if(mmdc && mmdc->fd >= 0 ) + caddr = mmap(0, nsize, PROT_READ|PROT_WRITE, MAP_PRIVATE, mmdc->fd, mmdc->offset); +#endif +#if _mem_mmap_anon + if(!mmdc ) + caddr = mmap(0, nsize, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0); +#endif + if(!caddr || caddr == (Void_t*)(-1)) + return NIL(Void_t*); + else if(chkaddr((Vmuchar_t*)caddr, nsize) < 0 ) + { (void)munmap(caddr, nsize); + return NIL(Void_t*); + } + else + { if(mmdc) + mmdc->offset += nsize; + return caddr; + } + } + else if(nsize == 0) + { Vmuchar_t *addr = (Vmuchar_t*)sbrk(0); + if(addr < (Vmuchar_t*)caddr ) /* in sbrk space */ + return NIL(Void_t*); + else + { (void)munmap(caddr, csize); + return caddr; + } + } + else return NIL(Void_t*); +} +#endif /* _mem_map_anon || _mem_mmap_zero */ + +#if _std_malloc /* using native malloc as a last resource */ +static Void_t* mallocmem(Void_t* caddr, size_t csize, size_t nsize) +{ + /**/ASSERT(csize > 0 || nsize > 0); + if(csize == 0) + return (Void_t*)malloc(nsize); + else if(nsize == 0) + { free(caddr); + return caddr; + } + else return NIL(Void_t*); +} +#endif + +/* A discipline to get raw memory using VirtualAlloc/mmap/sbrk */ +static Void_t* getmemory(Vmalloc_t* vm, Void_t* caddr, size_t csize, size_t nsize, Vmdisc_t* disc) +{ + Vmuchar_t *addr; + + if((csize > 0 && !caddr) || (csize == 0 && nsize == 0) ) + return NIL(Void_t*); + +#if _mem_win32 + if((addr = win32mem(caddr, csize, nsize)) ) + return (Void_t*)addr; +#endif +#if _mem_sbrk + if((_Vmassert & VM_break) && (addr = sbrkmem(caddr, csize, nsize)) ) + return (Void_t*)addr; +#endif +#if _mem_mmap_anon + if((addr = mmapmem(caddr, csize, nsize, (Mmdisc_t*)0)) ) + return (Void_t*)addr; +#endif +#if _mem_mmap_zero + if((addr = mmapmem(caddr, csize, nsize, (Mmdisc_t*)disc)) ) + return (Void_t*)addr; +#endif +#if _mem_sbrk + if(!(_Vmassert & VM_break) && (addr = sbrkmem(caddr, csize, nsize)) ) + return (Void_t*)addr; +#endif +#if _std_malloc + if((addr = mallocmem(caddr, csize, nsize)) ) + return (Void_t*)addr; +#endif + return NIL(Void_t*); +} + +#if _mem_mmap_zero || _mem_mmap_anon +static Mmdisc_t _Vmdcsystem = { { getmemory, NIL(Vmexcept_f), 64*1024, sizeof(Mmdisc_t) }, FD_INIT, 0 }; +#else +static Vmdisc_t _Vmdcsystem = { getmemory, NIL(Vmexcept_f), 0, sizeof(Vmdisc_t) }; +#endif + +static Vmethod_t _Vmbest = +{ + bestalloc, + bestresize, + bestfree, + bestaddr, + bestsize, + bestcompact, + bestalign, + VM_MTBEST +}; + +/* The heap region */ +static Vmdata_t _Vmdata = +{ + 0, /* lock */ + VM_MTBEST|VM_SHARE, /* mode */ + 0, /* incr */ + 0, /* pool */ + NIL(Seg_t*), /* seg */ + NIL(Block_t*), /* free */ + NIL(Block_t*), /* wild */ + NIL(Block_t*) /* root */ + /* tiny[] */ + /* cache[] */ +}; +Vmalloc_t _Vmheap = +{ + { bestalloc, + bestresize, + bestfree, + bestaddr, + bestsize, + bestcompact, + bestalign, + VM_MTBEST + }, + NIL(char*), /* file */ + 0, /* line */ + 0, /* func */ + (Vmdisc_t*)(&_Vmdcsystem), /* disc */ + &_Vmdata, /* data */ + NIL(Vmalloc_t*) /* next */ +}; + +__DEFINE__(Vmalloc_t*, Vmheap, &_Vmheap); +__DEFINE__(Vmalloc_t*, Vmregion, &_Vmheap); +__DEFINE__(Vmethod_t*, Vmbest, &_Vmbest); +__DEFINE__(Vmdisc_t*, Vmdcsystem, (Vmdisc_t*)(&_Vmdcsystem) ); +__DEFINE__(Vmdisc_t*, Vmdcsbrk, (Vmdisc_t*)(&_Vmdcsystem) ); + +#ifdef NoF +NoF(vmbest) +#endif + +#endif diff --git a/src/lib/libast/vmalloc/vmclear.c b/src/lib/libast/vmalloc/vmclear.c new file mode 100644 index 0000000..c4b8107 --- /dev/null +++ b/src/lib/libast/vmalloc/vmclear.c @@ -0,0 +1,85 @@ +/*********************************************************************** +* * +* This software is part of the ast package * +* Copyright (c) 1985-2011 AT&T Intellectual Property * +* and is licensed under the * +* Eclipse Public License, Version 1.0 * +* by AT&T Intellectual Property * +* * +* A copy of the License is available at * +* http://www.eclipse.org/org/documents/epl-v10.html * +* (with md5 checksum b35adb5213ca9657e911e9befb180842) * +* * +* Information and Software Systems Research * +* AT&T Research * +* Florham Park NJ * +* * +* Glenn Fowler <gsf@research.att.com> * +* David Korn <dgk@research.att.com> * +* Phong Vo <kpv@research.att.com> * +* * +***********************************************************************/ +#if defined(_UWIN) && defined(_BLD_ast) + +void _STUB_vmclear(){} + +#else + +#include "vmhdr.h" + +/* Clear out all allocated space. +** +** Written by Kiem-Phong Vo, kpv@research.att.com, 01/16/94. +*/ +#if __STD_C +int vmclear(Vmalloc_t* vm) +#else +int vmclear(vm) +Vmalloc_t* vm; +#endif +{ + Seg_t *seg, *next; + Block_t *tp; + size_t size, s; + Vmdata_t *vd = vm->data; + + SETLOCK(vm, 0); + + vd->free = vd->wild = NIL(Block_t*); + vd->pool = 0; + + if(vd->mode&(VM_MTBEST|VM_MTDEBUG|VM_MTPROFILE) ) + { vd->root = NIL(Block_t*); + for(s = 0; s < S_TINY; ++s) + TINY(vd)[s] = NIL(Block_t*); + for(s = 0; s <= S_CACHE; ++s) + CACHE(vd)[s] = NIL(Block_t*); + } + + for(seg = vd->seg; seg; seg = next) + { next = seg->next; + + tp = SEGBLOCK(seg); + size = seg->baddr - ((Vmuchar_t*)tp) - 2*sizeof(Head_t); + + SEG(tp) = seg; + SIZE(tp) = size; + if((vd->mode&(VM_MTLAST|VM_MTPOOL)) ) + seg->free = tp; + else + { SIZE(tp) |= BUSY|JUNK; + LINK(tp) = CACHE(vd)[C_INDEX(SIZE(tp))]; + CACHE(vd)[C_INDEX(SIZE(tp))] = tp; + } + + tp = BLOCK(seg->baddr); + SEG(tp) = seg; + SIZE(tp) = BUSY; + } + + CLRLOCK(vm, 0); + + return 0; +} + +#endif diff --git a/src/lib/libast/vmalloc/vmclose.c b/src/lib/libast/vmalloc/vmclose.c new file mode 100644 index 0000000..65a3a7e --- /dev/null +++ b/src/lib/libast/vmalloc/vmclose.c @@ -0,0 +1,91 @@ +/*********************************************************************** +* * +* This software is part of the ast package * +* Copyright (c) 1985-2011 AT&T Intellectual Property * +* and is licensed under the * +* Eclipse Public License, Version 1.0 * +* by AT&T Intellectual Property * +* * +* A copy of the License is available at * +* http://www.eclipse.org/org/documents/epl-v10.html * +* (with md5 checksum b35adb5213ca9657e911e9befb180842) * +* * +* Information and Software Systems Research * +* AT&T Research * +* Florham Park NJ * +* * +* Glenn Fowler <gsf@research.att.com> * +* David Korn <dgk@research.att.com> * +* Phong Vo <kpv@research.att.com> * +* * +***********************************************************************/ +#if defined(_UWIN) && defined(_BLD_ast) + +void _STUB_vmclose(){} + +#else + +#include "vmhdr.h" + +/* Close down a region. +** +** Written by Kiem-Phong Vo, kpv@research.att.com, 01/16/94. +*/ +#if __STD_C +int vmclose(Vmalloc_t* vm) +#else +int vmclose(vm) +Vmalloc_t* vm; +#endif +{ + Seg_t *seg, *vmseg, *next; + Vmalloc_t *v, *last, vmp; + Vmdata_t* vd = vm->data; + Vmdisc_t* disc = vm->disc; + int mode, rv = 0; + + if(vm == Vmheap) /* the heap is never freed */ + return -1; + + if(vm->disc->exceptf && /* announcing closing event */ + (rv = (*vm->disc->exceptf)(vm,VM_CLOSE,(Void_t*)1,vm->disc)) < 0 ) + return -1; + + mode = vd->mode; /* remember this in case it gets destroyed below */ + + if((mode&VM_MTPROFILE) && _Vmpfclose) + (*_Vmpfclose)(vm); + + /* remove from linked list of regions */ + _vmlock(NIL(Vmalloc_t*), 1); + for(last = Vmheap, v = last->next; v; last = v, v = v->next) + { if(v == vm) + { last->next = v->next; + break; + } + } + _vmlock(NIL(Vmalloc_t*), 0); + + if(rv == 0) /* deallocate memory obtained from the system */ + { /* lock-free because alzheimer can cause deadlocks :) */ + vmseg = NIL(Seg_t*); + for(seg = vd->seg; seg; seg = next) + { next = seg->next; + if(seg->extent == seg->size) /* root segment */ + vmseg = seg; /* don't free this yet */ + else (*disc->memoryf)(vm,seg->addr,seg->extent,0,disc); + } + if(vmseg) /* now safe to free root segment */ + (*disc->memoryf)(vm,vmseg->addr,vmseg->extent,0,disc); + } + + if(disc->exceptf) /* finalizing closing */ + (void)(*disc->exceptf)(vm, VM_ENDCLOSE, (Void_t*)0, disc); + + if(!(mode & VM_MEMORYF) ) + vmfree(Vmheap,vm); + + return 0; +} + +#endif diff --git a/src/lib/libast/vmalloc/vmdcheap.c b/src/lib/libast/vmalloc/vmdcheap.c new file mode 100644 index 0000000..88edc72 --- /dev/null +++ b/src/lib/libast/vmalloc/vmdcheap.c @@ -0,0 +1,63 @@ +/*********************************************************************** +* * +* This software is part of the ast package * +* Copyright (c) 1985-2011 AT&T Intellectual Property * +* and is licensed under the * +* Eclipse Public License, Version 1.0 * +* by AT&T Intellectual Property * +* * +* A copy of the License is available at * +* http://www.eclipse.org/org/documents/epl-v10.html * +* (with md5 checksum b35adb5213ca9657e911e9befb180842) * +* * +* Information and Software Systems Research * +* AT&T Research * +* Florham Park NJ * +* * +* Glenn Fowler <gsf@research.att.com> * +* David Korn <dgk@research.att.com> * +* Phong Vo <kpv@research.att.com> * +* * +***********************************************************************/ +#if defined(_UWIN) && defined(_BLD_ast) + +void _STUB_vmdcheap(){} + +#else + +#include "vmhdr.h" + +/* A discipline to get memory from the heap. +** +** Written by Kiem-Phong Vo, kpv@research.att.com, 01/16/94. +*/ +#if __STD_C +static Void_t* heapmem(Vmalloc_t* vm, Void_t* caddr, + size_t csize, size_t nsize, + Vmdisc_t* disc) +#else +static Void_t* heapmem(vm, caddr, csize, nsize, disc) +Vmalloc_t* vm; /* region doing allocation from */ +Void_t* caddr; /* current low address */ +size_t csize; /* current size */ +size_t nsize; /* new size */ +Vmdisc_t* disc; /* discipline structure */ +#endif +{ + if(csize == 0 && nsize == 0) + return NIL(Void_t*); + else if(csize == 0) + return vmalloc(Vmheap,nsize); + else if(nsize == 0) + return vmfree(Vmheap,caddr) >= 0 ? caddr : NIL(Void_t*); + else return vmresize(Vmheap,caddr,nsize,0); +} + +static Vmdisc_t _Vmdcheap = { heapmem, NIL(Vmexcept_f), 0 }; +__DEFINE__(Vmdisc_t*,Vmdcheap,&_Vmdcheap); + +#ifdef NoF +NoF(vmdcheap) +#endif + +#endif diff --git a/src/lib/libast/vmalloc/vmdebug.c b/src/lib/libast/vmalloc/vmdebug.c new file mode 100644 index 0000000..160c189 --- /dev/null +++ b/src/lib/libast/vmalloc/vmdebug.c @@ -0,0 +1,745 @@ +/*********************************************************************** +* * +* This software is part of the ast package * +* Copyright (c) 1985-2012 AT&T Intellectual Property * +* and is licensed under the * +* Eclipse Public License, Version 1.0 * +* by AT&T Intellectual Property * +* * +* A copy of the License is available at * +* http://www.eclipse.org/org/documents/epl-v10.html * +* (with md5 checksum b35adb5213ca9657e911e9befb180842) * +* * +* Information and Software Systems Research * +* AT&T Research * +* Florham Park NJ * +* * +* Glenn Fowler <gsf@research.att.com> * +* David Korn <dgk@research.att.com> * +* Phong Vo <kpv@research.att.com> * +* * +***********************************************************************/ +#if defined(_UWIN) && defined(_BLD_ast) + +void _STUB_vmdebug(){} + +#else + +#include "vmhdr.h" + +/* Method to help with debugging. This does rigorous checks on +** addresses and arena integrity. +** +** Written by Kiem-Phong Vo, kpv@research.att.com, 01/16/94. +*/ + +/* structure to keep track of file names */ +typedef struct _dbfile_s Dbfile_t; +struct _dbfile_s +{ Dbfile_t* next; + char file[1]; +}; +static Dbfile_t* Dbfile; + +/* global watch list */ +#define S_WATCH 32 +static int Dbnwatch; +static Void_t* Dbwatch[S_WATCH]; + +/* types of warnings reported by dbwarn() */ +#define DB_CHECK 0 +#define DB_ALLOC 1 +#define DB_FREE 2 +#define DB_RESIZE 3 +#define DB_WATCH 4 +#define DB_RESIZED 5 + +static int Dbinit = 0; +#define DBINIT() (Dbinit ? 0 : (dbinit(), Dbinit=1) ) +static void dbinit() +{ int fd; + if((fd = vmtrace(-1)) >= 0) + vmtrace(fd); +} + +static int Dbfd = 2; /* default warning file descriptor */ +#if __STD_C +int vmdebug(int fd) +#else +int vmdebug(fd) +int fd; +#endif +{ + int old = Dbfd; + Dbfd = fd; + return old; +} + + +/* just an entry point to make it easy to set break point */ +#if __STD_C +static void vmdbwarn(Vmalloc_t* vm, char* mesg, int n) +#else +static void vmdbwarn(vm, mesg, n) +Vmalloc_t* vm; +char* mesg; +int n; +#endif +{ + reg Vmdata_t* vd = vm->data; + + write(Dbfd,mesg,n); + if(vd->mode&VM_DBABORT) + abort(); +} + +/* issue a warning of some type */ +#if __STD_C +static void dbwarn(Vmalloc_t* vm, Void_t* data, int where, + char* file, int line, Void_t* func, int type) +#else +static void dbwarn(vm, data, where, file, line, func, type) +Vmalloc_t* vm; /* region holding the block */ +Void_t* data; /* data block */ +int where; /* byte that was corrupted */ +char* file; /* file where call originates */ +int line; /* line number of call */ +Void_t* func; /* function called from */ +int type; /* operation being done */ +#endif +{ + char buf[1024], *bufp, *endbuf, *s; +#define SLOP 64 /* enough for a message and an int */ + + DBINIT(); + + bufp = buf; + endbuf = buf + sizeof(buf); + + if(type == DB_ALLOC) + bufp = (*_Vmstrcpy)(bufp, "alloc error", ':'); + else if(type == DB_FREE) + bufp = (*_Vmstrcpy)(bufp, "free error", ':'); + else if(type == DB_RESIZE) + bufp = (*_Vmstrcpy)(bufp, "resize error", ':'); + else if(type == DB_CHECK) + bufp = (*_Vmstrcpy)(bufp, "corrupted data", ':'); + else if(type == DB_WATCH) + bufp = (*_Vmstrcpy)(bufp, "alert", ':'); + + /* region info */ + bufp = (*_Vmstrcpy)(bufp, "region", '='); + bufp = (*_Vmstrcpy)(bufp, (*_Vmitoa)(VLONG(vm), 0), ':'); + + if(data) + { bufp = (*_Vmstrcpy)(bufp,"block",'='); + bufp = (*_Vmstrcpy)(bufp,(*_Vmitoa)(VLONG(data),0),':'); + } + + if(!data) + { if(where == DB_ALLOC) + bufp = (*_Vmstrcpy)(bufp, "can't get memory", ':'); + else bufp = (*_Vmstrcpy)(bufp, "region is locked", ':'); + } + else if(type == DB_FREE || type == DB_RESIZE) + { if(where == 0) + bufp = (*_Vmstrcpy)(bufp, "unallocated block", ':'); + else bufp = (*_Vmstrcpy)(bufp, "already freed", ':'); + } + else if(type == DB_WATCH) + { bufp = (*_Vmstrcpy)(bufp, "size", '='); + bufp = (*_Vmstrcpy)(bufp, (*_Vmitoa)(DBSIZE(data),-1), ':'); + if(where == DB_ALLOC) + bufp = (*_Vmstrcpy)(bufp,"just allocated", ':'); + else if(where == DB_FREE) + bufp = (*_Vmstrcpy)(bufp,"being freed", ':'); + else if(where == DB_RESIZE) + bufp = (*_Vmstrcpy)(bufp,"being resized", ':'); + else if(where == DB_RESIZED) + bufp = (*_Vmstrcpy)(bufp,"just resized", ':'); + } + else if(type == DB_CHECK) + { bufp = (*_Vmstrcpy)(bufp, "bad byte at", '='); + bufp = (*_Vmstrcpy)(bufp, (*_Vmitoa)(VLONG(where),-1), ':'); + if((s = DBFILE(data)) && (bufp + strlen(s) + SLOP) < endbuf) + { bufp = (*_Vmstrcpy)(bufp,"allocated at", '='); + bufp = (*_Vmstrcpy)(bufp, s, ','); + bufp = (*_Vmstrcpy)(bufp,(*_Vmitoa)(VLONG(DBLINE(data)),-1),':'); + } + } + + /* location where offending call originates from */ + if(file && file[0] && line > 0 && (bufp + strlen(file) + SLOP) < endbuf) + { bufp = (*_Vmstrcpy)(bufp, "detected at", '='); + bufp = (*_Vmstrcpy)(bufp, file, ','); + bufp = (*_Vmstrcpy)(bufp, (*_Vmitoa)(VLONG(line),-1), ','); + bufp = (*_Vmstrcpy)(bufp, (*_Vmitoa)(VLONG(func),-1), ':'); + } + + *bufp++ = '\n'; + *bufp = '\0'; + + vmdbwarn(vm,buf,(bufp-buf)); +} + +/* check for watched address and issue warnings */ +#if __STD_C +static void dbwatch(Vmalloc_t* vm, Void_t* data, + char* file, int line, Void_t* func, int type) +#else +static void dbwatch(vm, data, file, line, func, type) +Vmalloc_t* vm; +Void_t* data; +char* file; +int line; +Void_t* func; +int type; +#endif +{ + reg int n; + + for(n = Dbnwatch; n >= 0; --n) + { if(Dbwatch[n] == data) + { dbwarn(vm,data,type,file,line,func,DB_WATCH); + return; + } + } +} + +/* record information about the block */ +#if __STD_C +static void dbsetinfo(Vmuchar_t* data, size_t size, char* file, int line) +#else +static void dbsetinfo(data, size, file, line) +Vmuchar_t* data; /* real address not the one from Vmbest */ +size_t size; /* the actual requested size */ +char* file; /* file where the request came from */ +int line; /* and line number */ +#endif +{ + reg Vmuchar_t *begp, *endp; + reg Dbfile_t *last, *db; + + DBINIT(); + + /* find the file structure */ + if(!file || !file[0]) + db = NIL(Dbfile_t*); + else + { for(last = NIL(Dbfile_t*), db = Dbfile; db; last = db, db = db->next) + if(strcmp(db->file,file) == 0) + break; + if(!db) + { db = (Dbfile_t*)vmalloc(Vmheap,sizeof(Dbfile_t)+strlen(file)); + if(db) + { (*_Vmstrcpy)(db->file,file,0); + db->next = Dbfile; + Dbfile = db; + } + } + else if(last) /* move-to-front heuristic */ + { last->next = db->next; + db->next = Dbfile; + Dbfile = db; + } + } + + DBSETFL(data,(db ? db->file : NIL(char*)),line); + DBSIZE(data) = size; + DBSEG(data) = SEG(DBBLOCK(data)); + + DBHEAD(data,begp,endp); + while(begp < endp) + *begp++ = DB_MAGIC; + DBTAIL(data,begp,endp); + while(begp < endp) + *begp++ = DB_MAGIC; +} + +/* Check to see if an address is in some data block of a region. +** This returns -(offset+1) if block is already freed, +(offset+1) +** if block is live, 0 if no match. +*/ +#if __STD_C +static long dbaddr(Vmalloc_t* vm, Void_t* addr, int local) +#else +static long dbaddr(vm, addr, local) +Vmalloc_t* vm; +Void_t* addr; +int local; +#endif +{ + reg Block_t *b, *endb; + reg Seg_t *seg; + reg Vmuchar_t *data; + reg long offset = -1L; + reg Vmdata_t *vd = vm->data; + + SETLOCK(vm, local); + + b = endb = NIL(Block_t*); + for(seg = vd->seg; seg; seg = seg->next) + { b = SEGBLOCK(seg); + endb = (Block_t*)(seg->baddr - sizeof(Head_t)); + if((Vmuchar_t*)addr > (Vmuchar_t*)b && + (Vmuchar_t*)addr < (Vmuchar_t*)endb) + break; + } + if(!seg) + goto done; + + if(local) /* must be vmfree or vmresize checking address */ + { if(DBSEG(addr) == seg) + { b = DBBLOCK(addr); + if(ISBUSY(SIZE(b)) && !ISJUNK(SIZE(b)) ) + offset = 0; + else offset = -2L; + } + goto done; + } + + while(b < endb) + { data = (Vmuchar_t*)DATA(b); + if((Vmuchar_t*)addr >= data && (Vmuchar_t*)addr < data+SIZE(b)) + { if(ISBUSY(SIZE(b)) && !ISJUNK(SIZE(b)) ) + { data = DB2DEBUG(data); + if((Vmuchar_t*)addr >= data && + (Vmuchar_t*)addr < data+DBSIZE(data)) + offset = (Vmuchar_t*)addr - data; + } + goto done; + } + + b = (Block_t*)((Vmuchar_t*)DATA(b) + (SIZE(b)&~BITS) ); + } + +done: + CLRLOCK(vm, local); + return offset; +} + + +#if __STD_C +static long dbsize(Vmalloc_t* vm, Void_t* addr, int local) +#else +static long dbsize(vm, addr, local) +Vmalloc_t* vm; +Void_t* addr; +int local; +#endif +{ + Block_t *b, *endb; + Seg_t *seg; + long size; + Vmdata_t *vd = vm->data; + + SETLOCK(vm, local); + + size = -1L; + for(seg = vd->seg; seg; seg = seg->next) + { b = SEGBLOCK(seg); + endb = (Block_t*)(seg->baddr - sizeof(Head_t)); + if((Vmuchar_t*)addr <= (Vmuchar_t*)b || + (Vmuchar_t*)addr >= (Vmuchar_t*)endb) + continue; + while(b < endb) + { if(addr == (Void_t*)DB2DEBUG(DATA(b))) + { if(ISBUSY(SIZE(b)) && !ISJUNK(SIZE(b)) ) + size = (long)DBSIZE(addr); + goto done; + } + + b = (Block_t*)((Vmuchar_t*)DATA(b) + (SIZE(b)&~BITS) ); + } + } + +done: + CLRLOCK(vm, local); + return size; +} + +#if __STD_C +static Void_t* dballoc(Vmalloc_t* vm, size_t size, int local) +#else +static Void_t* dballoc(vm, size, local) +Vmalloc_t* vm; +size_t size; +int local; +#endif +{ + size_t s; + Vmuchar_t *data; + char *file; + int line; + Void_t *func; + Vmdata_t *vd = vm->data; + VMFLF(vm,file,line,func); + + SETLOCK(vm, local); + + if(vd->mode&VM_DBCHECK) + vmdbcheck(vm); + + s = ROUND(size,ALIGN) + DB_EXTRA; + if(s < sizeof(Body_t)) /* no tiny blocks during Vmdebug */ + s = sizeof(Body_t); + + if(!(data = (Vmuchar_t*)KPVALLOC(vm,s,(*(Vmbest->allocf))) ) ) + { dbwarn(vm,NIL(Vmuchar_t*),DB_ALLOC,file,line,func,DB_ALLOC); + goto done; + } + + data = DB2DEBUG(data); + dbsetinfo(data,size,file,line); + + if((vd->mode&VM_TRACE) && _Vmtrace) + { vm->file = file; vm->line = line; vm->func = func; + (*_Vmtrace)(vm,NIL(Vmuchar_t*),data,size,0); + } + + if(Dbnwatch > 0 ) + dbwatch(vm,data,file,line,func,DB_ALLOC); + +done: + CLRLOCK(vm, local); + + return (Void_t*)data; +} + + +#if __STD_C +static int dbfree(Vmalloc_t* vm, Void_t* data, int local ) +#else +static int dbfree(vm, data, local ) +Vmalloc_t* vm; +Void_t* data; +int local; +#endif +{ + char *file; + int line; + Void_t *func; + long offset; + int rv, *ip, *endip; + Vmdata_t *vd = vm->data; + VMFLF(vm,file,line,func); + + if(!data) + return 0; + + SETLOCK(vm, local); + + if(vd->mode&VM_DBCHECK) + vmdbcheck(vm); + + if((offset = KPVADDR(vm,data,dbaddr)) != 0) + { dbwarn(vm,(Vmuchar_t*)data,offset == -1L ? 0 : 1,file,line,func,DB_FREE); + rv = -1; + } + else + { if(Dbnwatch > 0) + dbwatch(vm,data,file,line,func,DB_FREE); + + if((vd->mode&VM_TRACE) && _Vmtrace) + { vm->file = file; vm->line = line; vm->func = func; + (*_Vmtrace)(vm,(Vmuchar_t*)data,NIL(Vmuchar_t*),DBSIZE(data),0); + } + + /* clear free space */ + ip = (int*)data; + endip = ip + (DBSIZE(data)+sizeof(int)-1)/sizeof(int); + while(ip < endip) + *ip++ = 0; + + rv = KPVFREE((vm), (Void_t*)DB2BEST(data), (*Vmbest->freef)); + } + + CLRLOCK(vm, local); + return rv; +} + +/* Resizing an existing block */ +#if __STD_C +static Void_t* dbresize(Vmalloc_t* vm, Void_t* addr, reg size_t size, int type, int local) +#else +static Void_t* dbresize(vm, addr, size, type, local) +Vmalloc_t* vm; /* region allocating from */ +Void_t* addr; /* old block of data */ +reg size_t size; /* new size */ +int type; /* !=0 for movable, >0 for copy */ +int local; +#endif +{ + Vmuchar_t *data; + long offset; + size_t s, oldsize; + char *file, *oldfile; + int line, oldline; + Void_t *func; + Vmdata_t *vd = vm->data; + VMFLF(vm,file,line,func); + + if(!addr) + { vm->file = file; vm->line = line; + data = (Vmuchar_t*)dballoc(vm, size, local); + if(data && (type&VM_RSZERO) ) + memset((Void_t*)data, 0, size); + return data; + } + if(size == 0) + { vm->file = file; vm->line = line; + (void)dbfree(vm, addr, local); + return NIL(Void_t*); + } + + SETLOCK(vm, local); + + if(vd->mode&VM_DBCHECK) + vmdbcheck(vm); + + if((offset = KPVADDR(vm,addr,dbaddr)) != 0) + { dbwarn(vm,(Vmuchar_t*)addr,offset == -1L ? 0 : 1,file,line,func,DB_RESIZE); + data = NIL(Vmuchar_t*); + } + else + { if(Dbnwatch > 0) + dbwatch(vm,addr,file,line,func,DB_RESIZE); + + /* Vmbest data block */ + data = DB2BEST(addr); + oldsize = DBSIZE(addr); + oldfile = DBFILE(addr); + oldline = DBLINE(addr); + + /* do the resize */ + s = ROUND(size,ALIGN) + DB_EXTRA; + if(s < sizeof(Body_t)) + s = sizeof(Body_t); + data = (Vmuchar_t*)KPVRESIZE(vm,(Void_t*)data,s, + (type&~VM_RSZERO),(*(Vmbest->resizef)) ); + if(!data) /* failed, reset data for old block */ + { dbwarn(vm,NIL(Vmuchar_t*),DB_ALLOC,file,line,func,DB_RESIZE); + dbsetinfo((Vmuchar_t*)addr,oldsize,oldfile,oldline); + } + else + { data = DB2DEBUG(data); + dbsetinfo(data,size,file,line); + + if((vd->mode&VM_TRACE) && _Vmtrace) + { vm->file = file; vm->line = line; + (*_Vmtrace)(vm,(Vmuchar_t*)addr,data,size,0); + } + if(Dbnwatch > 0) + dbwatch(vm,data,file,line,func,DB_RESIZED); + } + + if(data && (type&VM_RSZERO) && size > oldsize) + { Vmuchar_t *d = data+oldsize, *ed = data+size; + do { *d++ = 0; } while(d < ed); + } + } + + CLRLOCK(vm, local); + + return (Void_t*)data; +} + +/* compact any residual free space */ +#if __STD_C +static int dbcompact(Vmalloc_t* vm, int local) +#else +static int dbcompact(vm, local) +Vmalloc_t* vm; +int local; +#endif +{ + return (*(Vmbest->compactf))(vm, local); +} + +/* check for memory overwrites over all live blocks */ +#if __STD_C +int vmdbcheck(Vmalloc_t* vm) +#else +int vmdbcheck(vm) +Vmalloc_t* vm; +#endif +{ + reg Block_t *b, *endb; + reg Seg_t* seg; + int rv; + reg Vmdata_t* vd = vm->data; + + /* check the meta-data of this region */ + if(vd->mode & (VM_MTDEBUG|VM_MTBEST|VM_MTPROFILE)) + { if(_vmbestcheck(vd, NIL(Block_t*)) < 0) + return -1; + if(!(vd->mode&VM_MTDEBUG) ) + return 0; + } + else return -1; + + rv = 0; + for(seg = vd->seg; seg; seg = seg->next) + { b = SEGBLOCK(seg); + endb = (Block_t*)(seg->baddr - sizeof(Head_t)); + while(b < endb) + { reg Vmuchar_t *data, *begp, *endp; + + if(ISJUNK(SIZE(b)) || !ISBUSY(SIZE(b))) + goto next; + + data = DB2DEBUG(DATA(b)); + if(DBISBAD(data)) /* seen this before */ + { rv += 1; + goto next; + } + + DBHEAD(data,begp,endp); + for(; begp < endp; ++begp) + if(*begp != DB_MAGIC) + goto set_bad; + + DBTAIL(data,begp,endp); + for(; begp < endp; ++begp) + { if(*begp == DB_MAGIC) + continue; + set_bad: + dbwarn(vm,data,begp-data,vm->file,vm->line,0,DB_CHECK); + DBSETBAD(data); + rv += 1; + goto next; + } + + next: b = (Block_t*)((Vmuchar_t*)DATA(b) + (SIZE(b)&~BITS)); + } + } + + return rv; +} + +/* set/delete an address to watch */ +#if __STD_C +Void_t* vmdbwatch(Void_t* addr) +#else +Void_t* vmdbwatch(addr) +Void_t* addr; /* address to insert */ +#endif +{ + reg int n; + reg Void_t* out; + + out = NIL(Void_t*); + if(!addr) + Dbnwatch = 0; + else + { for(n = Dbnwatch - 1; n >= 0; --n) + if(Dbwatch[n] == addr) + break; + if(n < 0) /* insert */ + { if(Dbnwatch == S_WATCH) + { /* delete left-most */ + out = Dbwatch[0]; + Dbnwatch -= 1; + for(n = 0; n < Dbnwatch; ++n) + Dbwatch[n] = Dbwatch[n+1]; + } + Dbwatch[Dbnwatch] = addr; + Dbnwatch += 1; + } + } + return out; +} + +#if __STD_C +static Void_t* dbalign(Vmalloc_t* vm, size_t size, size_t align, int local) +#else +static Void_t* dbalign(vm, size, align, local) +Vmalloc_t* vm; +size_t size; +size_t align; +int local; +#endif +{ + Vmuchar_t *data; + size_t s; + char *file; + int line; + Void_t *func; + Vmdata_t *vd = vm->data; + VMFLF(vm,file,line,func); + + if(size <= 0 || align <= 0) + return NIL(Void_t*); + + SETLOCK(vm, local); + + if((s = ROUND(size,ALIGN) + DB_EXTRA) < sizeof(Body_t)) + s = sizeof(Body_t); + + if((data = (Vmuchar_t*)KPVALIGN(vm,s,align,(*(Vmbest->alignf)))) ) + { data += DB_HEAD; + dbsetinfo(data,size,file,line); + + if((vd->mode&VM_TRACE) && _Vmtrace) + { vm->file = file; vm->line = line; vm->func = func; + (*_Vmtrace)(vm,NIL(Vmuchar_t*),data,size,align); + } + } + + CLRLOCK(vm, local); + + return (Void_t*)data; +} + +/* print statistics of region vm. If vm is NULL, use Vmregion */ +#if __STD_C +ssize_t vmdbstat(Vmalloc_t* vm) +#else +ssize_t vmdbstat(vm) +Vmalloc_t* vm; +#endif +{ Vmstat_t st; + char buf[1024], *bufp; + + vmstat(vm ? vm : Vmregion, &st); + bufp = buf; + bufp = (*_Vmstrcpy)(bufp, "n_busy", '='); + bufp = (*_Vmstrcpy)(bufp, (*_Vmitoa)(VLONG(st.n_busy),-1), ','); + bufp = (*_Vmstrcpy)(bufp, " s_busy", '='); + bufp = (*_Vmstrcpy)(bufp, (*_Vmitoa)(VLONG(st.s_busy),-1), '\n'); + bufp = (*_Vmstrcpy)(bufp, "n_free", '='); + bufp = (*_Vmstrcpy)(bufp, (*_Vmitoa)(VLONG(st.n_free),-1), ','); + bufp = (*_Vmstrcpy)(bufp, " s_free", '='); + bufp = (*_Vmstrcpy)(bufp, (*_Vmitoa)(VLONG(st.s_free),-1), '\n'); + bufp = (*_Vmstrcpy)(bufp, "m_busy", '='); + bufp = (*_Vmstrcpy)(bufp, (*_Vmitoa)(VLONG(st.m_busy),-1), ','); + bufp = (*_Vmstrcpy)(bufp, " m_free", '='); + bufp = (*_Vmstrcpy)(bufp, (*_Vmitoa)(VLONG(st.m_free),-1), '\n'); + bufp = (*_Vmstrcpy)(bufp, "n_segment", '='); + bufp = (*_Vmstrcpy)(bufp, (*_Vmitoa)(VLONG(st.n_seg),-1), ','); + bufp = (*_Vmstrcpy)(bufp, " extent", '='); + bufp = (*_Vmstrcpy)(bufp, (*_Vmitoa)(VLONG(st.extent),-1), '\n'); + *bufp = 0; + write(Dbfd, buf, strlen(buf)); + return strlen(buf); +} + +static Vmethod_t _Vmdebug = +{ + dballoc, + dbresize, + dbfree, + dbaddr, + dbsize, + dbcompact, + dbalign, + VM_MTDEBUG +}; + +__DEFINE__(Vmethod_t*,Vmdebug,&_Vmdebug); + +#ifdef NoF +NoF(vmdebug) +#endif + +#endif diff --git a/src/lib/libast/vmalloc/vmdisc.c b/src/lib/libast/vmalloc/vmdisc.c new file mode 100644 index 0000000..3f4efbf --- /dev/null +++ b/src/lib/libast/vmalloc/vmdisc.c @@ -0,0 +1,55 @@ +/*********************************************************************** +* * +* This software is part of the ast package * +* Copyright (c) 1985-2011 AT&T Intellectual Property * +* and is licensed under the * +* Eclipse Public License, Version 1.0 * +* by AT&T Intellectual Property * +* * +* A copy of the License is available at * +* http://www.eclipse.org/org/documents/epl-v10.html * +* (with md5 checksum b35adb5213ca9657e911e9befb180842) * +* * +* Information and Software Systems Research * +* AT&T Research * +* Florham Park NJ * +* * +* Glenn Fowler <gsf@research.att.com> * +* David Korn <dgk@research.att.com> * +* Phong Vo <kpv@research.att.com> * +* * +***********************************************************************/ +#if defined(_UWIN) && defined(_BLD_ast) + +void _STUB_vmdisc(){} + +#else + +#include "vmhdr.h" + +/* Change the discipline for a region. The old discipline +** is returned. If the new discipline is NULL then the +** discipline is not changed. +** +** Written by Kiem-Phong Vo, kpv@research.att.com, 01/16/94. +*/ +#if __STD_C +Vmdisc_t* vmdisc(Vmalloc_t* vm, Vmdisc_t* disc) +#else +Vmdisc_t* vmdisc(vm, disc) +Vmalloc_t* vm; +Vmdisc_t* disc; +#endif +{ + Vmdisc_t* old = vm->disc; + + if(disc) + { if(old->exceptf && + (*old->exceptf)(vm,VM_DISC,(Void_t*)disc,old) != 0 ) + return NIL(Vmdisc_t*); + vm->disc = disc; + } + return old; +} + +#endif diff --git a/src/lib/libast/vmalloc/vmexit.c b/src/lib/libast/vmalloc/vmexit.c new file mode 100644 index 0000000..c027fd5 --- /dev/null +++ b/src/lib/libast/vmalloc/vmexit.c @@ -0,0 +1,100 @@ +/*********************************************************************** +* * +* This software is part of the ast package * +* Copyright (c) 1985-2011 AT&T Intellectual Property * +* and is licensed under the * +* Eclipse Public License, Version 1.0 * +* by AT&T Intellectual Property * +* * +* A copy of the License is available at * +* http://www.eclipse.org/org/documents/epl-v10.html * +* (with md5 checksum b35adb5213ca9657e911e9befb180842) * +* * +* Information and Software Systems Research * +* AT&T Research * +* Florham Park NJ * +* * +* Glenn Fowler <gsf@research.att.com> * +* David Korn <dgk@research.att.com> * +* Phong Vo <kpv@research.att.com> * +* * +***********************************************************************/ +#if defined(_UWIN) && defined(_BLD_ast) + +void _STUB_vmexit(){} + +#else + +#include "vmhdr.h" + +/* +** Any required functions for process exiting. +** Written by Kiem-Phong Vo, kpv@research.att.com (05/25/93). +*/ +#if _PACKAGE_ast || _lib_atexit + +void _STUB_vmexit(){} + +#else + +#if _lib_onexit + +#if __STD_C +int atexit(void (*exitf)(void)) +#else +int atexit(exitf) +void (*exitf)(); +#endif +{ + return onexit(exitf); +} + +#else /*!_lib_onexit*/ + +typedef struct _exit_s +{ struct _exit_s* next; + void(* exitf)_ARG_((void)); +} Exit_t; +static Exit_t* Exit; + +#if __STD_C +atexit(void (*exitf)(void)) +#else +atexit(exitf) +void (*exitf)(); +#endif +{ Exit_t* e; + + if(!(e = (Exit_t*)malloc(sizeof(Exit_t))) ) + return -1; + e->exitf = exitf; + e->next = Exit; + Exit = e; + return 0; +} + +#if __STD_C +void exit(int type) +#else +void exit(type) +int type; +#endif +{ + Exit_t* e; + + for(e = Exit; e; e = e->next) + (*e->exitf)(); + +#if _exit_cleanup + _cleanup(); +#endif + + _exit(type); + return type; +} + +#endif /* _lib_onexit || _lib_on_exit */ + +#endif /*!PACKAGE_ast*/ + +#endif diff --git a/src/lib/libast/vmalloc/vmgetmem.c b/src/lib/libast/vmalloc/vmgetmem.c new file mode 100644 index 0000000..e205fd3 --- /dev/null +++ b/src/lib/libast/vmalloc/vmgetmem.c @@ -0,0 +1,51 @@ +/*********************************************************************** +* * +* This software is part of the ast package * +* Copyright (c) 1985-2011 AT&T Intellectual Property * +* and is licensed under the * +* Eclipse Public License, Version 1.0 * +* by AT&T Intellectual Property * +* * +* A copy of the License is available at * +* http://www.eclipse.org/org/documents/epl-v10.html * +* (with md5 checksum b35adb5213ca9657e911e9befb180842) * +* * +* Information and Software Systems Research * +* AT&T Research * +* Florham Park NJ * +* * +* Glenn Fowler <gsf@research.att.com> * +* David Korn <dgk@research.att.com> * +* Phong Vo <kpv@research.att.com> * +* * +***********************************************************************/ +#include <vmalloc.h> + +/* + * vm open/close/resize - a handy default for discipline memory functions + * + * vmgetmem(0,0,0) open new region + * vmgetmem(r,0,0) free region + * vmgetmem(r,0,n) allocate n bytes initialized to 0 + * vmgetmem(r,p,0) free p + * vmgetmem(r,p,n) realloc p to n bytes + * + * Written by Glenn S. Fowler. + */ + +#if __STD_C +Void_t* vmgetmem(Vmalloc_t* vm, Void_t* data, size_t size) +#else +Void_t* vmgetmem(vm, data, size) +Vmalloc_t* vm; +Void_t* data; +size_t size; +#endif +{ + if (!vm) + return vmopen(Vmdcheap, Vmbest, 0); + if (data || size) + return vmresize(vm, data, size, VM_RSMOVE|VM_RSCOPY|VM_RSZERO); + vmclose(vm); + return 0; +} diff --git a/src/lib/libast/vmalloc/vmhdr.h b/src/lib/libast/vmalloc/vmhdr.h new file mode 100644 index 0000000..25997ac --- /dev/null +++ b/src/lib/libast/vmalloc/vmhdr.h @@ -0,0 +1,530 @@ +/*********************************************************************** +* * +* This software is part of the ast package * +* Copyright (c) 1985-2012 AT&T Intellectual Property * +* and is licensed under the * +* Eclipse Public License, Version 1.0 * +* by AT&T Intellectual Property * +* * +* A copy of the License is available at * +* http://www.eclipse.org/org/documents/epl-v10.html * +* (with md5 checksum b35adb5213ca9657e911e9befb180842) * +* * +* Information and Software Systems Research * +* AT&T Research * +* Florham Park NJ * +* * +* Glenn Fowler <gsf@research.att.com> * +* David Korn <dgk@research.att.com> * +* Phong Vo <kpv@research.att.com> * +* * +***********************************************************************/ +#ifndef _VMHDR_H +#define _VMHDR_H 1 +#ifndef _BLD_vmalloc +#define _BLD_vmalloc 1 +#endif + +/* Common types, and macros for vmalloc functions. +** +** Written by Kiem-Phong Vo, kpv@research.att.com, 01/16/94. +*/ + +#ifndef __STD_C /* this is normally in vmalloc.h but it's included late here */ +#ifdef __STDC__ +#define __STD_C 1 +#else +#if __cplusplus || c_plusplus +#define __STD_C 1 +#else +#define __STD_C 0 +#endif /*__cplusplus*/ +#endif /*__STDC__*/ +#endif /*__STD_C*/ + +#if _PACKAGE_ast + +#if !_UWIN +#define getpagesize ______getpagesize +#define _npt_getpagesize 1 +#define brk ______brk +#define sbrk ______sbrk +#define _npt_sbrk 1 +#endif + +#include <ast.h> + +#if _npt_getpagesize +#undef getpagesize +#endif +#if _npt_sbrk +#undef brk +#undef sbrk +#endif + +#else + +#include <ast_common.h> + +#if !_UWIN +#define _npt_getpagesize 1 +#define _npt_sbrk 1 +#endif + +#undef free +#undef malloc +#undef realloc + +#endif /*_PACKAGE_ast*/ + +#include "FEATURE/vmalloc" + +#include <aso.h> /* atomic scalor operations */ +#include <setjmp.h> /* use the type jmp_buf for alignment */ + +/* extra information needed about methods to get memory from the system */ +#if defined(_WIN32) +#define _mem_win32 1 /* use the VirtualAlloc interface */ +#endif +#if !_mem_win32 && !_mem_sbrk && !_mem_mmap_anon && !_mem_mmap_zero +#undef _std_malloc +#define _std_malloc 1 /* use native malloc/free/realloc */ +#endif + +typedef unsigned char Vmuchar_t; +typedef unsigned long Vmulong_t; + +typedef union _head_u Head_t; +typedef union _body_u Body_t; +typedef struct _block_s Block_t; +typedef struct _seg_s Seg_t; +typedef struct _pfobj_s Pfobj_t; + +#define NIL(t) ((t)0) +#define reg register +#if __STD_C +#define NOTUSED(x) (void)(x) +#else +#define NOTUSED(x) (&x,1) +#endif + + +/* convert an address to an integral value */ +#define VLONG(addr) ((Vmulong_t)((Vmuchar_t*)((Vmulong_t)addr) - (Vmuchar_t*)0) ) + +/* Round x up to a multiple of y. ROUND2 does powers-of-2 and ROUNDX does others */ +#define ROUND2(x,y) (((x) + ((y)-1)) & ~((y)-1)) +#define ROUNDX(x,y) ((((x) + ((y)-1)) / (y)) * (y)) +#define ROUND(x,y) (((y)&((y)-1)) ? ROUNDX((x),(y)) : ROUND2((x),(y)) ) + +/* compute a value that is a common multiple of x and y */ +#define MULTIPLE(x,y) ((x)%(y) == 0 ? (x) : (y)%(x) == 0 ? (y) : (y)*(x)) + +#define VM_abort 0x0001 /* abort() on assertion failure */ +#define VM_break 0x0002 /* try sbrk() block allocator first */ +#define VM_check 0x0004 /* enable detailed checks */ +#define VM_free 0x0008 /* disable addfreelist() */ +#define VM_keep 0x0010 /* disable free() */ + +#if _UWIN +#include <ast_windows.h> +#endif + +#ifndef DEBUG +#ifdef _BLD_DEBUG +#define DEBUG 1 +#endif /*_BLD_DEBUG*/ +#endif /*DEBUG*/ +#if DEBUG +extern void _vmmessage _ARG_((const char*, long, const char*, long)); +#define MESSAGE(s) _vmmessage(__FILE__,__LINE__,s,0) +#define ABORT() (_Vmassert & VM_abort) +#define CHECK() (_Vmassert & VM_check) +#define ASSERT(p) ((p) ? 0 : (MESSAGE("Assertion failed"), ABORT() ? (abort(),0) : 0)) +#define COUNT(n) ((n) += 1) +#else +#define ABORT() (0) +#define ASSERT(p) +#define CHECK() (0) +#define COUNT(n) +#define MESSAGE(s) (0) +#endif /*DEBUG*/ + +#define VMPAGESIZE 8192 +#if _lib_getpagesize +#define GETPAGESIZE(x) ((x) ? (x) : ((x)=getpagesize()) ) +#else +#define GETPAGESIZE(x) ((x) = VMPAGESIZE) +#endif + +/* Blocks are allocated such that their sizes are 0%(BITS+1) +** This frees up enough low order bits to store state information +*/ +#define BUSY (01) /* block is busy */ +#define PFREE (02) /* preceding block is free */ +#define JUNK (04) /* marked as freed but not yet processed */ +#define BITS (07) /* (BUSY|PFREE|JUNK) */ +#define ALIGNB (8) /* size must be a multiple of BITS+1 */ + +#define ISBITS(w) ((w) & BITS) +#define CLRBITS(w) ((w) &= ~BITS) +#define CPYBITS(w,f) ((w) |= ((f)&BITS) ) + +#define ISBUSY(w) ((w) & BUSY) +#define SETBUSY(w) ((w) |= BUSY) +#define CLRBUSY(w) ((w) &= ~BUSY) + +#define ISPFREE(w) ((w) & PFREE) +#define SETPFREE(w) ((w) |= PFREE) +#define CLRPFREE(w) ((w) &= ~PFREE) + +#define ISJUNK(w) ((w) & JUNK) +#define SETJUNK(w) ((w) |= JUNK) +#define CLRJUNK(w) ((w) &= ~JUNK) + +#define OFFSET(t,e) ((size_t)(&(((t*)0)->e)) ) + +#define VMETHOD(vd) ((vd)->mode&VM_METHODS) + +/* lock and unlock regions during concurrent accesses */ +#define SETLOCK(vm,l) ((l) ? 0 : _vmlock((vm), 1) ) +#define CLRLOCK(vm,l) ((l) ? 0 : _vmlock((vm), 0) ) + +/* local calls */ +#define KPVALLOC(vm,sz,func) (func((vm),(sz),1) ) +#define KPVRESIZE(vm,dt,sz,mv,func) (func((vm),(dt),(sz),(mv),1) ) +#define KPVFREE(vm,dt,func) (func((vm),(dt),1) ) +#define KPVADDR(vm,addr,func) (func((vm),(addr),1) ) +#define KPVSIZE(vm,addr,func) (func((vm),(addr),1) ) +#define KPVCOMPACT(vm,func) (func((vm),1) ) +#define KPVALIGN(vm,sz,al,func) (func((vm),(sz),(al),1) ) + +/* ALIGN is chosen so that a block can store all primitive types. +** It should also be a multiple of ALIGNB==(BITS+1) so the size field +** of Block_t will always be 0%(BITS+1) as noted above. +** Of paramount importance is the ALIGNA macro below. If the local compile +** environment is strange enough that the below method does not calculate +** ALIGNA right, then the code below should be commented out and ALIGNA +** redefined to the appropriate requirement. +*/ +union _align_u +{ char c, *cp; + int i, *ip; + long l, *lp; + double d, *dp, ***dppp[8]; + size_t s, *sp; + void(* fn)(); + union _align_u* align; + Head_t* head; + Body_t* body; + Block_t* block; + Vmuchar_t a[ALIGNB]; + _ast_fltmax_t ld, *ldp; + jmp_buf jmp; +}; +struct _a_s +{ char c; + union _align_u a; +}; +#define ALIGNA (sizeof(struct _a_s) - sizeof(union _align_u)) +struct _align_s +{ char data[MULTIPLE(ALIGNA,ALIGNB)]; +}; +#undef ALIGN /* bsd sys/param.h defines this */ +#define ALIGN sizeof(struct _align_s) + +/* make sure that the head of a block is a multiple of ALIGN */ +struct _head_s +{ union + { Seg_t* seg; /* the containing segment */ + Block_t* link; /* possible link list usage */ + Pfobj_t* pf; /* profile structure pointer */ + char* file; /* for file name in Vmdebug */ + } seg; + union + { size_t size; /* size of data area in bytes */ + Block_t* link; /* possible link list usage */ + int line; /* for line number in Vmdebug */ + } size; +}; +#define HEADSIZE ROUND(sizeof(struct _head_s),ALIGN) +union _head_u +{ Vmuchar_t data[HEADSIZE]; /* to standardize size */ + struct _head_s head; +}; + +/* now make sure that the body of a block is a multiple of ALIGN */ +struct _body_s +{ Block_t* link; /* next in link list */ + Block_t* left; /* left child in free tree */ + Block_t* right; /* right child in free tree */ + Block_t** self; /* self pointer when free */ +}; +#define BODYSIZE ROUND(sizeof(struct _body_s),ALIGN) + +union _body_u +{ Vmuchar_t data[BODYSIZE]; /* to standardize size */ + struct _body_s body; + Block_t* self[1]; +}; + +/* After all the songs and dances, we should now have: +** sizeof(Head_t)%ALIGN == 0 +** sizeof(Body_t)%ALIGN == 0 +** and sizeof(Block_t) = sizeof(Head_t)+sizeof(Body_t) +*/ +struct _block_s +{ Head_t head; + Body_t body; +}; + +/* requirements for smallest block type */ +struct _tiny_s +{ Block_t* link; + Block_t* self; +}; +#define TINYSIZE ROUND(sizeof(struct _tiny_s),ALIGN) +#define S_TINY 1 /* # of tiny blocks */ +#define MAXTINY (S_TINY*ALIGN + TINYSIZE) +#define TLEFT(b) ((b)->head.head.seg.link) /* instead of LEFT */ +#define TINIEST(b) (SIZE(b) == TINYSIZE) /* this type uses TLEFT */ + +#define DIV(x,y) ((y) == 8 ? ((x)>>3) : (x)/(y) ) +#define INDEX(s) DIV((s)-TINYSIZE,ALIGN) + +/* small block types kept in separate caches for quick allocation */ +#define S_CACHE 6 /* # of types of small blocks to be cached */ +#define N_CACHE 32 /* on allocation, create this many at a time */ +#define MAXCACHE (S_CACHE*ALIGN + TINYSIZE) +#define C_INDEX(s) (s < MAXCACHE ? INDEX(s) : S_CACHE) + +#define TINY(vd) ((vd)->tiny) +#define CACHE(vd) ((vd)->cache) + +struct _vmdata_s /* core region data - could be in shared/persistent memory */ +{ unsigned int lock; /* lock status */ + int mode; /* current mode for region */ + size_t incr; /* allocate in multiple of this */ + size_t pool; /* size of an elt in a Vmpool region */ + Seg_t* seg; /* list of segments */ + Block_t* free; /* most recent free block */ + Block_t* wild; /* wilderness block */ + Block_t* root; /* root of free tree */ + Block_t* tiny[S_TINY]; /* small blocks */ + Block_t* cache[S_CACHE+1]; /* delayed free blocks */ +}; + +#include "vmalloc.h" + +#if !_PACKAGE_ast +/* we don't use these here and they interfere with some local names */ +#undef malloc +#undef free +#undef realloc +#endif + +/* segment structure */ +struct _seg_s +{ Vmdata_t* vmdt; /* the data region holding this */ + Seg_t* next; /* next segment */ + Void_t* addr; /* starting segment address */ + size_t extent; /* extent of segment */ + Vmuchar_t* baddr; /* bottom of usable memory */ + size_t size; /* allocable size */ + Block_t* free; /* recent free blocks */ + Block_t* last; /* Vmlast last-allocated block */ +}; + +/* starting block of a segment */ +#define SEGBLOCK(s) ((Block_t*)(((Vmuchar_t*)(s)) + ROUND(sizeof(Seg_t),ALIGN))) + +/* short-hands for block data */ +#define SEG(b) ((b)->head.head.seg.seg) +#define SEGLINK(b) ((b)->head.head.seg.link) +#define SIZE(b) ((b)->head.head.size.size) +#define SIZELINK(b) ((b)->head.head.size.link) +#define LINK(b) ((b)->body.body.link) +#define LEFT(b) ((b)->body.body.left) +#define RIGHT(b) ((b)->body.body.right) + +#define DATA(b) ((Void_t*)((b)->body.data) ) +#define BLOCK(d) ((Block_t*)((char*)(d) - sizeof(Head_t)) ) +#define SELF(b) (b)->body.self[SIZE(b)/sizeof(Block_t*)-1] +#define LAST(b) (*((Block_t**)(((char*)(b)) - sizeof(Block_t*)) ) ) +#define NEXT(b) ((Block_t*)((b)->body.data + SIZE(b)) ) + +/* functions to manipulate link lists of elts of the same size */ +#define SETLINK(b) (RIGHT(b) = (b) ) +#define ISLINK(b) (RIGHT(b) == (b) ) +#define UNLINK(vd,b,i,t) \ + ((((t) = LINK(b)) ? (LEFT(t) = LEFT(b)) : NIL(Block_t*) ), \ + (((t) = LEFT(b)) ? (LINK(t) = LINK(b)) : (TINY(vd)[i] = LINK(b)) ) ) + +/* delete a block from a link list or the free tree. +** The test in the below macro is worth scratching your head a bit. +** Even though tiny blocks (size < BODYSIZE) are kept in separate lists, +** only the TINIEST ones require TLEFT(b) for the back link. Since this +** destroys the SEG(b) pointer, it must be carefully restored in bestsearch(). +** Other tiny blocks have enough space to use the usual LEFT(b). +** In this case, I have also carefully arranged so that RIGHT(b) and +** SELF(b) can be overlapped and the test ISLINK() will go through. +*/ +#define REMOVE(vd,b,i,t,func) \ + ((!TINIEST(b) && ISLINK(b)) ? UNLINK((vd),(b),(i),(t)) : \ + func((vd),SIZE(b),(b)) ) + +/* see if a block is the wilderness block */ +#define SEGWILD(b) (((b)->body.data+SIZE(b)+sizeof(Head_t)) >= SEG(b)->baddr) +#define VMWILD(vd,b) (((b)->body.data+SIZE(b)+sizeof(Head_t)) >= vd->seg->baddr) + +#define VMFLF(vm,fi,ln,fn) ((fi) = (vm)->file, (vm)->file = NIL(char*), \ + (ln) = (vm)->line, (vm)->line = 0 , \ + (fn) = (vm)->func, (vm)->func = NIL(Void_t*) ) + +/* The lay-out of a Vmprofile block is this: +** seg_ size ----data---- _pf_ size +** _________ ____________ _________ +** seg_, size: header required by Vmbest. +** data: actual data block. +** _pf_: pointer to the corresponding Pfobj_t struct +** size: the true size of the block. +** So each block requires an extra Head_t. +*/ +#define PF_EXTRA sizeof(Head_t) +#define PFDATA(d) ((Head_t*)((Vmuchar_t*)(d)+(SIZE(BLOCK(d))&~BITS)-sizeof(Head_t)) ) +#define PFOBJ(d) (PFDATA(d)->head.seg.pf) +#define PFSIZE(d) (PFDATA(d)->head.size.size) + +/* The lay-out of a block allocated by Vmdebug is this: +** seg_ size file size seg_ magi ----data---- --magi-- magi line +** --------- --------- --------- ------------ -------- --------- +** seg_,size: header required by Vmbest management. +** file: the file where it was created. +** size: the true byte count of the block +** seg_: should be the same as the previous seg_. +** This allows the function vmregion() to work. +** magi: magic bytes to detect overwrites. +** data: the actual data block. +** magi: more magic bytes. +** line: the line number in the file where it was created. +** So for each allocated block, we'll need 3 extra Head_t. +*/ + +/* convenient macros for accessing the above fields */ +#define DB_HEAD (2*sizeof(Head_t)) +#define DB_TAIL (2*sizeof(Head_t)) +#define DB_EXTRA (DB_HEAD+DB_TAIL) +#define DBBLOCK(d) ((Block_t*)((Vmuchar_t*)(d) - 3*sizeof(Head_t)) ) +#define DBBSIZE(d) (SIZE(DBBLOCK(d)) & ~BITS) +#define DBSEG(d) (((Head_t*)((Vmuchar_t*)(d) - sizeof(Head_t)))->head.seg.seg ) +#define DBSIZE(d) (((Head_t*)((Vmuchar_t*)(d) - 2*sizeof(Head_t)))->head.size.size ) +#define DBFILE(d) (((Head_t*)((Vmuchar_t*)(d) - 2*sizeof(Head_t)))->head.seg.file ) +#define DBLN(d) (((Head_t*)((Vmuchar_t*)DBBLOCK(d)+DBBSIZE(d)))->head.size.line ) +#define DBLINE(d) (DBLN(d) < 0 ? -DBLN(d) : DBLN(d)) + +/* forward/backward translation for addresses between Vmbest and Vmdebug */ +#define DB2BEST(d) ((Vmuchar_t*)(d) - 2*sizeof(Head_t)) +#define DB2DEBUG(b) ((Vmuchar_t*)(b) + 2*sizeof(Head_t)) + +/* set file and line number, note that DBLN > 0 so that DBISBAD will work */ +#define DBSETFL(d,f,l) (DBFILE(d) = (f), DBLN(d) = (f) ? (l) : 1) + +/* set and test the state of known to be corrupted */ +#define DBSETBAD(d) (DBLN(d) > 0 ? (DBLN(d) = -DBLN(d)) : -1) +#define DBISBAD(d) (DBLN(d) <= 0) + +#define DB_MAGIC 0255 /* 10101101 */ + +/* compute the bounds of the magic areas */ +#define DBHEAD(d,begp,endp) \ + (((begp) = (Vmuchar_t*)(&DBSEG(d)) + sizeof(Seg_t*)), ((endp) = (d)) ) +#define DBTAIL(d,begp,endp) \ + (((begp) = (Vmuchar_t*)(d)+DBSIZE(d)), ((endp) = (Vmuchar_t*)(&DBLN(d))) ) + + +/* external symbols for use inside vmalloc only */ +typedef Block_t* (*Vmsearch_f)_ARG_((Vmdata_t*, size_t, Block_t*)); +typedef struct _vmextern_s +{ Block_t* (*vm_extend)_ARG_((Vmalloc_t*, size_t, Vmsearch_f )); + ssize_t (*vm_truncate)_ARG_((Vmalloc_t*, Seg_t*, size_t, int)); + size_t vm_pagesize; + char* (*vm_strcpy)_ARG_((char*, const char*, int)); + char* (*vm_itoa)_ARG_((Vmulong_t, int)); + void (*vm_trace)_ARG_((Vmalloc_t*, + Vmuchar_t*, Vmuchar_t*, size_t, size_t)); + void (*vm_pfclose)_ARG_((Vmalloc_t*)); + unsigned int vm_lock; + int vm_assert; + int vm_options; +} Vmextern_t; + +#define _Vmextend (_Vmextern.vm_extend) +#define _Vmtruncate (_Vmextern.vm_truncate) +#define _Vmpagesize (_Vmextern.vm_pagesize) +#define _Vmstrcpy (_Vmextern.vm_strcpy) +#define _Vmitoa (_Vmextern.vm_itoa) +#define _Vmtrace (_Vmextern.vm_trace) +#define _Vmpfclose (_Vmextern.vm_pfclose) +#define _Vmlock (_Vmextern.vm_lock) +#define _Vmassert (_Vmextern.vm_assert) +#define _Vmoptions (_Vmextern.vm_options) + +#define VMOPTIONS() do { if (!_Vmoptions) { _vmoptions(); } } while (0) + +extern int _vmbestcheck _ARG_((Vmdata_t*, Block_t*)); +extern int _vmlock _ARG_((Vmalloc_t*, int)); +extern void _vmoptions _ARG_((void)); + +_BEGIN_EXTERNS_ + +extern Vmextern_t _Vmextern; + +#if _PACKAGE_ast + +#if _npt_getpagesize +extern int getpagesize _ARG_((void)); +#endif +#if _npt_sbrk +extern int brk _ARG_(( void* )); +extern Void_t* sbrk _ARG_(( ssize_t )); +#endif + +#else + +#if _hdr_unistd +#include <unistd.h> +#else +extern void abort _ARG_(( void )); +extern ssize_t write _ARG_(( int, const void*, size_t )); +extern int getpagesize _ARG_((void)); +extern Void_t* sbrk _ARG_((ssize_t)); +#endif + +#if !__STDC__ && !_hdr_stdlib +extern size_t strlen _ARG_(( const char* )); +extern char* strcpy _ARG_(( char*, const char* )); +extern int strcmp _ARG_(( const char*, const char* )); +extern int atexit _ARG_(( void(*)(void) )); +extern char* getenv _ARG_(( const char* )); +extern Void_t* memcpy _ARG_(( Void_t*, const Void_t*, size_t )); +extern Void_t* memset _ARG_(( Void_t*, int, size_t )); +#else +#include <stdlib.h> +#include <string.h> +#endif + +/* for vmexit.c */ +extern int onexit _ARG_(( void(*)(void) )); +extern void _exit _ARG_(( int )); +extern void _cleanup _ARG_(( void )); + +#endif /*_PACKAGE_ast*/ + +/* for vmdcsbrk.c */ +#if !_typ_ssize_t +typedef int ssize_t; +#endif + +_END_EXTERNS_ + +#endif /* _VMHDR_H */ diff --git a/src/lib/libast/vmalloc/vmlast.c b/src/lib/libast/vmalloc/vmlast.c new file mode 100644 index 0000000..31f70b7 --- /dev/null +++ b/src/lib/libast/vmalloc/vmlast.c @@ -0,0 +1,431 @@ +/*********************************************************************** +* * +* This software is part of the ast package * +* Copyright (c) 1985-2011 AT&T Intellectual Property * +* and is licensed under the * +* Eclipse Public License, Version 1.0 * +* by AT&T Intellectual Property * +* * +* A copy of the License is available at * +* http://www.eclipse.org/org/documents/epl-v10.html * +* (with md5 checksum b35adb5213ca9657e911e9befb180842) * +* * +* Information and Software Systems Research * +* AT&T Research * +* Florham Park NJ * +* * +* Glenn Fowler <gsf@research.att.com> * +* David Korn <dgk@research.att.com> * +* Phong Vo <kpv@research.att.com> * +* * +***********************************************************************/ +#if defined(_UWIN) && defined(_BLD_ast) + +void _STUB_vmlast(){} + +#else + +#include "vmhdr.h" + +/* Allocation with freeing and reallocing of last allocated block only. +** +** Written by Kiem-Phong Vo, kpv@research.att.com, 01/16/94. +*/ + +#if __STD_C +static Void_t* lastalloc(Vmalloc_t* vm, size_t size, int local) +#else +static Void_t* lastalloc(vm, size, local) +Vmalloc_t* vm; +size_t size; +int local; +#endif +{ + Block_t *tp, *next; + Seg_t *seg, *last; + size_t s; + Vmdata_t *vd = vm->data; + size_t orgsize = size; + + SETLOCK(vm, local); + + size = size < ALIGN ? ALIGN : ROUND(size,ALIGN); + for(last = NIL(Seg_t*), seg = vd->seg; seg; last = seg, seg = seg->next) + { if(!(tp = seg->free) || (SIZE(tp)+sizeof(Head_t)) < size) + continue; + if(last) + { last->next = seg->next; + seg->next = vd->seg; + vd->seg = seg; + } + goto got_block; + } + + /* there is no usable free space in region, try extending */ + if((tp = (*_Vmextend)(vm,size,NIL(Vmsearch_f))) ) + { seg = SEG(tp); + goto got_block; + } + else goto done; + +got_block: + if((s = SIZE(tp)) >= size) + { next = (Block_t*)((Vmuchar_t*)tp+size); + SIZE(next) = s - size; + SEG(next) = seg; + seg->free = next; + } + else seg->free = NIL(Block_t*); + + vd->free = seg->last = tp; + + if(!local && (vd->mode&VM_TRACE) && _Vmtrace) + (*_Vmtrace)(vm, NIL(Vmuchar_t*), (Vmuchar_t*)tp, orgsize, 0); + +done: + CLRLOCK(vm, local); + + return (Void_t*)tp; +} + +#if __STD_C +static int lastfree(Vmalloc_t* vm, reg Void_t* data, int local ) +#else +static int lastfree(vm, data, local) +Vmalloc_t* vm; +Void_t* data; +int local; +#endif +{ + Seg_t *seg; + Block_t *fp; + size_t s; + Vmdata_t *vd = vm->data; + + if(!data) + return 0; + + SETLOCK(vm, local); + + if(data != (Void_t*)vd->free) + data = NIL(Void_t*); /* signaling an error */ + else + { seg = vd->seg; + if(!local && (vd->mode&VM_TRACE) && _Vmtrace) + { if(seg->free ) + s = (Vmuchar_t*)(seg->free) - (Vmuchar_t*)data; + else s = (Vmuchar_t*)BLOCK(seg->baddr) - (Vmuchar_t*)data; + (*_Vmtrace)(vm, (Vmuchar_t*)data, NIL(Vmuchar_t*), s, 0); + } + + vd->free = NIL(Block_t*); + fp = (Block_t*)data; + SEG(fp) = seg; + SIZE(fp) = ((Vmuchar_t*)BLOCK(seg->baddr) - (Vmuchar_t*)data) - sizeof(Head_t); + seg->free = fp; + seg->last = NIL(Block_t*); + } + + CLRLOCK(vm, local); + + return data ? 0 : -1; +} + +#if __STD_C +static Void_t* lastresize(Vmalloc_t* vm, reg Void_t* data, size_t size, int type, int local) +#else +static Void_t* lastresize(vm, data, size, type, local ) +Vmalloc_t* vm; +reg Void_t* data; +size_t size; +int type; +int local; +#endif +{ + Block_t *tp; + Seg_t *seg; + ssize_t s, ds; + Void_t *addr; + size_t oldsize = 0; + Void_t *orgdata = data; + size_t orgsize = size; + Vmdata_t *vd = vm->data; + + if(!data) + { data = lastalloc(vm, size, local); + if(data && (type&VM_RSZERO) ) + memset(data, 0, size); + return data; + } + if(size <= 0) + { (void)lastfree(vm, data, local); + return NIL(Void_t*); + } + + SETLOCK(vm, local); + + if(data == (Void_t*)vd->free) + seg = vd->seg; + else + { /* see if it was one of ours */ + for(seg = vd->seg; seg; seg = seg->next) + if(data >= seg->addr && data < (Void_t*)seg->baddr) + break; + if(!seg || (VLONG(data)%ALIGN) != 0 || + (seg->last && (Vmuchar_t*)data > (Vmuchar_t*)seg->last) ) + { data = NIL(Void_t*); + goto done; + } + } + + /* set 's' to be the current available space */ + if(data != seg->last) + { if(seg->last && (Vmuchar_t*)data < (Vmuchar_t*)seg->last) + oldsize = (Vmuchar_t*)seg->last - (Vmuchar_t*)data; + else oldsize = (Vmuchar_t*)BLOCK(seg->baddr) - (Vmuchar_t*)data; + s = -1; + } + else + { s = (Vmuchar_t*)BLOCK(seg->baddr) - (Vmuchar_t*)data; + if(!(tp = seg->free) ) + oldsize = s; + else + { oldsize = (Vmuchar_t*)tp - (Vmuchar_t*)data; + seg->free = NIL(Block_t*); + } + } + + size = size < ALIGN ? ALIGN : ROUND(size,ALIGN); + if(s < 0 || (ssize_t)size > s) + { if(s >= 0) /* amount to extend */ + { ds = size-s; ds = ROUND(ds,vd->incr); + addr = (*vm->disc->memoryf)(vm, seg->addr, seg->extent, + seg->extent+ds, vm->disc); + if(addr == seg->addr) + { s += ds; + seg->size += ds; + seg->extent += ds; + seg->baddr += ds; + SIZE(BLOCK(seg->baddr)) = BUSY; + } + else goto do_alloc; + } + else + { do_alloc: + if(!(type&(VM_RSMOVE|VM_RSCOPY)) ) + data = NIL(Void_t*); + else + { tp = vd->free; + if(!(addr = KPVALLOC(vm,size,lastalloc)) ) + { vd->free = tp; + data = NIL(Void_t*); + } + else + { if(type&VM_RSCOPY) + { ds = oldsize < size ? oldsize : size; + memcpy(addr, data, ds); + } + + if(s >= 0 && seg != vd->seg) + { tp = (Block_t*)data; + SEG(tp) = seg; + SIZE(tp) = s - sizeof(Head_t); + seg->free = tp; + } + + /* new block and size */ + data = addr; + seg = vd->seg; + s = (Vmuchar_t*)BLOCK(seg->baddr) - + (Vmuchar_t*)data; + seg->free = NIL(Block_t*); + } + } + } + } + + if(data) + { if(s >= (ssize_t)(size+sizeof(Head_t)) ) + { tp = (Block_t*)((Vmuchar_t*)data + size); + SEG(tp) = seg; + SIZE(tp) = (s - size) - sizeof(Head_t); + seg->free = tp; + } + + vd->free = seg->last = (Block_t*)data; + + if(!local && (vd->mode&VM_TRACE) && _Vmtrace) + (*_Vmtrace)(vm,(Vmuchar_t*)orgdata,(Vmuchar_t*)data,orgsize,0); + + if((type&VM_RSZERO) && size > oldsize) + memset((Void_t*)((Vmuchar_t*)data + oldsize), 0, size-oldsize); + } + +done: CLRLOCK(vm, local); + + return data; +} + + +#if __STD_C +static long lastaddr(Vmalloc_t* vm, Void_t* addr, int local) +#else +static long lastaddr(vm, addr, local) +Vmalloc_t* vm; +Void_t* addr; +int local; +#endif +{ + long offset; + Vmdata_t *vd = vm->data; + + SETLOCK(vm, local); + + if(!vd->free || addr < (Void_t*)vd->free || addr >= (Void_t*)vd->seg->baddr) + offset = -1L; + else offset = (Vmuchar_t*)addr - (Vmuchar_t*)vd->free; + + CLRLOCK(vm, local); + + return offset; +} + +#if __STD_C +static long lastsize(Vmalloc_t* vm, Void_t* addr, int local) +#else +static long lastsize(vm, addr, local) +Vmalloc_t* vm; +Void_t* addr; +int local; +#endif +{ + long size; + Vmdata_t *vd = vm->data; + + SETLOCK(vm, local); + + if(!vd->free || addr != (Void_t*)vd->free ) + size = -1L; + else if(vd->seg->free) + size = (Vmuchar_t*)vd->seg->free - (Vmuchar_t*)addr; + else size = (Vmuchar_t*)vd->seg->baddr - (Vmuchar_t*)addr - sizeof(Head_t); + + CLRLOCK(vm, local); + + return size; +} + +#if __STD_C +static int lastcompact(Vmalloc_t* vm, int local) +#else +static int lastcompact(vm, local) +Vmalloc_t* vm; +int local; +#endif +{ + ssize_t s; + Block_t *fp; + Seg_t *seg, *next; + Vmdata_t *vd = vm->data; + + SETLOCK(vm, local); + + for(seg = vd->seg; seg; seg = next) + { next = seg->next; + + if(!(fp = seg->free)) + continue; + + seg->free = NIL(Block_t*); + if(seg->size == (s = SIZE(fp)&~BITS)) + s = seg->extent; + else s += sizeof(Head_t); + + if((*_Vmtruncate)(vm,seg,s,1) == s) + seg->free = fp; + } + + if((vd->mode&VM_TRACE) && _Vmtrace) + (*_Vmtrace)(vm,(Vmuchar_t*)0,(Vmuchar_t*)0,0,0); + + CLRLOCK(vm, local); + return 0; +} + +#if __STD_C +static Void_t* lastalign(Vmalloc_t* vm, size_t size, size_t align, int local) +#else +static Void_t* lastalign(vm, size, align, local) +Vmalloc_t* vm; +size_t size; +size_t align; +int local; +#endif +{ + Vmuchar_t *data; + Seg_t *seg; + Block_t *next; + size_t s, orgsize = size, orgalign = align; + Vmdata_t *vd = vm->data; + + if(size <= 0 || align <= 0) + return NIL(Void_t*); + + SETLOCK(vm, local); + + size = size <= TINYSIZE ? TINYSIZE : ROUND(size,ALIGN); + align = MULTIPLE(align,ALIGN); + + s = size + align; + if(!(data = (Vmuchar_t*)KPVALLOC(vm,s,lastalloc)) ) + goto done; + + /* find the segment containing this block */ + for(seg = vd->seg; seg; seg = seg->next) + if(seg->last == (Block_t*)data) + break; + /**/ASSERT(seg); + + /* get a suitably aligned address */ + if((s = (size_t)(VLONG(data)%align)) != 0) + data += align-s; /**/ASSERT((VLONG(data)%align) == 0); + + /* free the unused tail */ + next = (Block_t*)(data+size); + if((s = (seg->baddr - (Vmuchar_t*)next)) >= sizeof(Block_t)) + { SEG(next) = seg; + SIZE(next) = s - sizeof(Head_t); + seg->free = next; + } + + vd->free = seg->last = (Block_t*)data; + + if(!local && (vd->mode&VM_TRACE) && _Vmtrace) + (*_Vmtrace)(vm,NIL(Vmuchar_t*),data,orgsize,orgalign); + +done: + CLRLOCK(vm, local); + + return (Void_t*)data; +} + +/* Public method for free-1 allocation */ +static Vmethod_t _Vmlast = +{ + lastalloc, + lastresize, + lastfree, + lastaddr, + lastsize, + lastcompact, + lastalign, + VM_MTLAST +}; + +__DEFINE__(Vmethod_t*,Vmlast,&_Vmlast); + +#ifdef NoF +NoF(vmlast) +#endif + +#endif diff --git a/src/lib/libast/vmalloc/vmmopen.c b/src/lib/libast/vmalloc/vmmopen.c new file mode 100644 index 0000000..4523cae --- /dev/null +++ b/src/lib/libast/vmalloc/vmmopen.c @@ -0,0 +1,518 @@ +/*********************************************************************** +* * +* This software is part of the ast package * +* Copyright (c) 1985-2012 AT&T Intellectual Property * +* and is licensed under the * +* Eclipse Public License, Version 1.0 * +* by AT&T Intellectual Property * +* * +* A copy of the License is available at * +* http://www.eclipse.org/org/documents/epl-v10.html * +* (with md5 checksum b35adb5213ca9657e911e9befb180842) * +* * +* Information and Software Systems Research * +* AT&T Research * +* Florham Park NJ * +* * +* Glenn Fowler <gsf@research.att.com> * +* David Korn <dgk@research.att.com> * +* Phong Vo <kpv@research.att.com> * +* * +***********************************************************************/ +#if defined(_UWIN) && defined(_BLD_ast) + +void _STUB_vmmapopen(){} + +#else + +#include "vmhdr.h" +#include <sys/types.h> +#include <string.h> +#if _hdr_unistd +#include <unistd.h> +#endif + +#undef ALIGN /* some sys/param.h define this */ + +#include <sys/mman.h> /* mmap() headers */ +#include <sys/file.h> +#include <sys/stat.h> +#include <fcntl.h> + +#include <sys/shm.h> /* shm headers */ +#include <sys/ipc.h> + +#undef ALIGN +#define ALIGN sizeof(struct _align_s) + +/* Create a region to allocate based on mmap() or shmget(). +** Both ways provide for share memory allocation. +** mmap() also allows for allocating persistent data. +** +** Written by Kiem-Phong Vo (kpv@research.att.com) +*/ + +#define MM_INIT 001 /* initialization mode */ + +#define MM_RELEASE 010 /* release share mem */ +#define MM_CLEANUP 020 /* clean up resources */ + +/* magic word signaling region is being initialized */ +#define MM_LETMEDOIT ((unsigned int)(('N'<<24) | ('B'<<16) | ('&'<<8) | ('I')) ) + +/* magic word signaling file/segment is ready */ +#define MM_MAGIC ((unsigned int)(('P'<<24) | ('&'<<16) | ('N'<<8) | ('8')) ) + +/* default mimimum region size */ +#define MM_MINSIZE (64*_Vmpagesize) + +/* macros to get the data section and size */ +#define MMHEAD(file) ROUND(sizeof(Mmvm_t)+strlen(file), ALIGN) +#define MMDATA(mmvm) ((Vmuchar_t*)(mmvm)->base + MMHEAD(mmvm->file)) +#define MMSIZE(mmvm) ((mmvm)->size - MMHEAD(mmvm->file)) + +#ifdef S_IRUSR +#define FILE_MODE (S_IRUSR|S_IWUSR|S_IRGRP|S_IROTH) +#else +#define FILE_MODE 0644 +#endif + +/* to store key/value pairs for application */ +typedef struct _mmuser_s Mmuser_t; +struct _mmuser_s +{ Mmuser_t* next; /* link list */ + int key; /* identifying key */ + Void_t* val; /* associated value */ +}; + +typedef struct _mmvm_s +{ unsigned int magic; /* magic bytes */ + Void_t* base; /* address to map to */ + ssize_t size; /* total data size */ + ssize_t busy; /* amount in use */ + Mmuser_t* user; /* stored (key,val)'s */ + int proj; /* project number */ + char file[1];/* file name */ +} Mmvm_t; + +typedef struct _mmdisc_s +{ Vmdisc_t disc; /* Vmalloc discipline */ + int flag; /* various modes */ + Mmvm_t* mmvm; /* shared memory data */ + ssize_t size; /* desired file size */ + int shmid; /* ID of the shared mem */ + int proj; /* shm project ID */ + char file[1];/* backing store/ftok() */ +} Mmdisc_t; + +#if DEBUG +#include <stdio.h> +#include <string.h> +int _vmmdump(Vmalloc_t* vm, int fd) +{ + char mesg[1024]; + Mmdisc_t *mmdc = (Mmdisc_t*)vm->disc; + + fd = fd < 0 ? 2 : fd; + sprintf(mesg, "File: %s\n", mmdc->file ); write(fd, mesg, strlen(mesg)); + sprintf(mesg, "Project: %10d\n", mmdc->proj); write(fd, mesg, strlen(mesg)); + sprintf(mesg, "Memory: %#010lx\n", mmdc->mmvm); write(fd, mesg, strlen(mesg)); + sprintf(mesg, "Size: %10d\n", mmdc->size); write(fd, mesg, strlen(mesg)); + sprintf(mesg, "Shmid: %10d\n", mmdc->shmid); write(fd, mesg, strlen(mesg)); + + sprintf(mesg, "File header:\n"); write(fd, mesg, strlen(mesg)); + sprintf(mesg, "Magic: %10d\n", mmdc->mmvm->magic); write(fd, mesg, strlen(mesg)); + sprintf(mesg, "Base: %#010lx\n", mmdc->mmvm->base); write(fd, mesg, strlen(mesg)); + sprintf(mesg, "Size: %10d\n", mmdc->mmvm->size); write(fd, mesg, strlen(mesg)); + sprintf(mesg, "Busy: %10d\n", mmdc->mmvm->busy); write(fd, mesg, strlen(mesg)); + return 0; +} +#endif /*DEBUG*/ + +/* fix the mapped address for a region */ +static Mmvm_t* mmfix(Mmvm_t* mmvm, Mmdisc_t* mmdc, int fd) +{ + Void_t *base = mmvm->base; + ssize_t size = mmvm->size; + + if(base != (Void_t*)mmvm) /* mmvm is not right yet */ + { /**/ASSERT(!base || (base && (VLONG(base)%_Vmpagesize) == 0) ); + if(mmdc->proj < 0) + { munmap((Void_t*)mmvm, size); + mmvm = (Mmvm_t*)mmap(base, size, (PROT_READ|PROT_WRITE), + (MAP_FIXED|MAP_SHARED), fd, (off_t)0 ); + } + else + { shmdt((Void_t*)mmvm); + mmvm = (Mmvm_t*)shmat(mmdc->shmid, base, 0); + } + if(!mmvm || mmvm == (Mmvm_t*)(-1) ) + mmvm = NIL(Mmvm_t*); + } + + return mmvm; +} + +/* initialize region data */ +static int mminit(Mmdisc_t* mmdc) +{ + struct shmid_ds shmds; + Void_t *base; + int try, k; + int fd = -1; + key_t key = -1; + ssize_t extent, size = 0; + Mmvm_t *mmvm = NIL(Mmvm_t*); + int rv = -1; + + if(mmdc->mmvm) /* already done this */ + return 0; + + /* fixed size region so make it reasonably large */ + if((size = mmdc->size) < MM_MINSIZE ) + size = MM_MINSIZE; + size += MMHEAD(mmdc->file) + ALIGN; + size = ROUND(size, _Vmpagesize); + + /* this op can happen simultaneously in different processes */ + if((fd = open(mmdc->file, O_RDWR|O_CREAT, FILE_MODE)) < 0) + return -1; + + /* get/create the initial segment of data */ + if(mmdc->proj < 0 ) /* proj < 0 means doing mmap() */ + { /* Note that the location being written to is always zero! */ + if((extent = (ssize_t)lseek(fd, (off_t)0, SEEK_END)) < 0) + goto done; + if(extent < size) /* make the file size large enough */ + if(lseek(fd, (off_t)size, 0) != (off_t)size || write(fd, "", 1) != 1 ) + goto done; + + /* map the file into memory */ + mmvm = (Mmvm_t*)mmap(NIL(Void_t*), size, (PROT_READ|PROT_WRITE), + MAP_SHARED, fd, (off_t)0 ); + } + else + { /* make the key and get/create an id for the share mem segment */ + if((key = ftok(mmdc->file, mmdc->proj)) < 0 ) + goto done; + if((mmdc->shmid = shmget(key, size, IPC_CREAT|FILE_MODE)) < 0 ) + goto done; + + /* map the data segment into memory */ + mmvm = (Mmvm_t*)shmat(mmdc->shmid, NIL(Void_t*), 0); + } + + if(!mmvm || mmvm == (Mmvm_t*)(-1) ) /* initial mapping failed */ + goto done; + + /* all processes compete for the chore to initialize data */ + if(asocasint(&mmvm->magic, 0, MM_LETMEDOIT) == 0 ) /* lucky winner: us! */ + { if(!(base = vmmaddress(size)) ) /* get a suitable base for the map */ + base = (Void_t*)mmvm; + mmdc->flag |= MM_INIT; + mmvm->base = base; + mmvm->size = size; + mmvm->busy = 0; + mmvm->proj = mmdc->proj; + strcpy(mmvm->file, mmdc->file); + if(mmdc->proj < 0 ) /* flush to file */ + msync((Void_t*)mmvm, MMHEAD(mmvm->file), MS_SYNC); + + if(mmvm->base != (Void_t*)mmvm) /* not yet at the right address */ + if(!(mmvm = mmfix(mmvm, mmdc, fd)) ) + goto done; + rv = 0; /* success, return this value to indicate a new map */ + } + else /* wait for someone else to finish initialization */ + { /**/ASSERT(!(mmdc->flag&MM_INIT)); + if(mmvm->magic != MM_LETMEDOIT && mmvm->magic != MM_MAGIC) + goto done; + + for(try = 0, k = 0;; ASOLOOP(k) ) /* waiting */ + { if(asocasint(&mmvm->magic, MM_MAGIC, MM_MAGIC) == MM_MAGIC ) + break; + else if((try += 1) <= 0 ) /* too many tries */ + goto done; + } + + /* mapped the wrong memory */ + if(mmvm->proj != mmdc->proj || strcmp(mmvm->file, mmdc->file) != 0 ) + goto done; + + if(mmvm->base != (Void_t*)mmvm) /* not yet at the right address */ + if(!(mmvm = mmfix(mmvm, mmdc, fd)) ) + goto done; + rv = 1; /* success, return this value to indicate a finished map */ + } + +done: (void)close(fd); + + if(rv >= 0 ) /* successful construction of region */ + { /**/ASSERT(mmvm && mmvm != (Mmvm_t*)(-1)); + mmdc->mmvm = mmvm; + } + else if(mmvm && mmvm != (Mmvm_t*)(-1)) /* error, remove map */ + { if(mmdc->proj < 0) + (void)munmap((Void_t*)mmvm, size); + else (void)shmdt((Void_t*)mmvm); + } + + return rv; +} + +#if __STD_C /* end a file mapping */ +static int mmend(Mmdisc_t* mmdc) +#else +static int mmend(mmdc) +Mmdisc_t* mmdc; +#endif +{ + Mmvm_t *mmvm; + struct shmid_ds shmds; + + if(!(mmvm = mmdc->mmvm) ) + return 0; + + if(mmdc->proj < 0 ) + { (void)msync(mmvm->base, mmvm->size, MS_ASYNC); + if(mmdc->flag&MM_RELEASE) + { if(mmvm->base ) + (void)munmap(mmvm->base, mmvm->size); + } + if(mmdc->flag&MM_CLEANUP) + (void)unlink(mmdc->file); + } + else + { if(mmdc->flag&MM_RELEASE) + { if(mmvm->base ) + (void)shmdt(mmvm->base); + } + if(mmdc->flag&MM_CLEANUP) + { if(mmdc->shmid >= 0 ) + (void)shmctl(mmdc->shmid, IPC_RMID, &shmds); + } + } + + mmdc->mmvm = NIL(Mmvm_t*); + return 0; +} + +#if __STD_C +static Void_t* mmgetmem(Vmalloc_t* vm, Void_t* caddr, + size_t csize, size_t nsize, Vmdisc_t* disc) +#else +static Void_t* mmgetmem(vm, caddr, csize, nsize, disc) +Vmalloc_t* vm; +Void_t* caddr; +size_t csize; +size_t nsize; +Vmdisc_t* disc; +#endif +{ + Mmvm_t *mmvm; + Mmdisc_t *mmdc = (Mmdisc_t*)disc; + + if(!(mmvm = mmdc->mmvm) ) /* bad data */ + return NIL(Void_t*); + + /* this region allows only a single busy block! */ + if(caddr) /* resizing/freeing an existing block */ + { if(caddr == MMDATA(mmvm) && nsize <= MMSIZE(mmvm) ) + { mmvm->busy = nsize; + return MMDATA(mmvm); + } + else return NIL(Void_t*); + } + else /* requesting a new block */ + { if(mmvm->busy == 0 ) + { mmvm->busy = nsize; + return MMDATA(mmvm); + } + else return NIL(Void_t*); + } +} + +#if __STD_C +static int mmexcept(Vmalloc_t* vm, int type, Void_t* data, Vmdisc_t* disc) +#else +static int mmexcept(vm, type, data, disc) +Vmalloc_t* vm; +int type; +Void_t* data; +Vmdisc_t* disc; +#endif +{ + int rv; + Void_t *base; + Vmdata_t *vd = vm->data; + Mmdisc_t *mmdc = (Mmdisc_t*)disc; + + if(type == VM_OPEN) + { if(data) /* VM_OPEN event at start of vmopen() */ + { if((rv = mminit(mmdc)) < 0 ) /* initialization failed */ + return -1; + else if(rv == 0) /* just started a new map */ + { /**/ASSERT(mmdc->flag&MM_INIT); + /**/ASSERT(mmdc->mmvm->magic == MM_LETMEDOIT); + return 0; + } + else /* an existing map was reconstructed */ + { /**/ASSERT(!(mmdc->flag&MM_INIT)); + /**/ASSERT(mmdc->mmvm->magic == MM_MAGIC); + *((Void_t**)data) = MMDATA(mmdc->mmvm); + return 1; + } + } + else return 0; + } + else if(type == VM_ENDOPEN) /* at end of vmopen() */ + { if(mmdc->flag&MM_INIT) /* this is the initializing process! */ + { /**/ASSERT(mmdc->mmvm->magic == MM_LETMEDOIT); + asocasint(&mmdc->mmvm->magic, MM_LETMEDOIT, MM_MAGIC); + + if(mmdc->proj < 0) /* sync data to file now */ + msync((Void_t*)mmdc->mmvm, MMHEAD(mmdc->file), MS_SYNC); + } /**/ASSERT(mmdc->mmvm->magic == MM_MAGIC); + return 0; + } + else if(type == VM_CLOSE) + return 1; /* tell vmclose not to free memory segments */ + else if(type == VM_ENDCLOSE) /* this is the final closing event */ + { (void)mmend(mmdc); + (void)vmfree(Vmheap, mmdc); + return 0; /* all done */ + } + else return 0; +} + +#if __STD_C +Vmalloc_t* vmmopen(char* file, int proj, ssize_t size ) +#else +Vmalloc_t* vmmopen(file, proj, size ) +char* file; /* file for key or data backing */ +int proj; /* project ID, < 0 doing mmap */ +ssize_t size; /* desired size for mem segment */ +#endif +{ + Vmalloc_t *vm; + Mmdisc_t *mmdc; + + GETPAGESIZE(_Vmpagesize); + + if(!file || !file[0] ) + return NIL(Vmalloc_t*); + + /* create discipline structure for getting memory from mmap */ + if(!(mmdc = vmalloc(Vmheap, sizeof(Mmdisc_t)+strlen(file))) ) + return NIL(Vmalloc_t*); + memset(mmdc, 0, sizeof(Mmdisc_t)); + mmdc->disc.memoryf = mmgetmem; + mmdc->disc.exceptf = mmexcept; + mmdc->disc.round = _Vmpagesize; /* round request to this size */ + mmdc->mmvm = NIL(Mmvm_t*); + mmdc->size = size; + mmdc->shmid = -1; + mmdc->flag = 0; + mmdc->proj = proj; + strcpy(mmdc->file, file); + + /* now open the Vmalloc_t handle to return to application */ + if(!(vm = vmopen(&mmdc->disc, Vmbest, VM_SHARE)) ) + { (void)mmend(mmdc); + (void)vmfree(Vmheap, mmdc); + return NIL(Vmalloc_t*); + } + else + { /**/ASSERT(mmdc->mmvm && mmdc->mmvm->magic == MM_MAGIC); + return vm; + } +} + +/* to store (key,value) data in the map */ +#if __STD_C +Void_t* vmmvalue(Vmalloc_t* vm, int key, Void_t* val, int oper) +#else +Void_t* vmmvalue(vm, key, val, oper) +Vmalloc_t* vm; /* a region based on vmmapopen */ +int key; /* key of data to be set */ +Void_t* val; /* data to be set */ +int oper; /* operation type */ +#endif +{ + Mmuser_t *u; + Vmdata_t *vd = vm->data; + Mmdisc_t *mmdc = (Mmdisc_t*)vm->disc; + Mmvm_t *mmvm = mmdc->mmvm; + + /* check to see if operation is well-defined */ + if(oper != VM_MMGET && oper != VM_MMSET && oper != VM_MMADD) + return NIL(Void_t*); + + SETLOCK(vm, 0); + + /* find the key */ + for(u = mmvm->user; u; u = u->next) + if(u->key == key) + break; + + if(!u && (oper == VM_MMSET || oper == VM_MMADD) ) + { if((u = KPVALLOC(vm, sizeof(Mmuser_t), vm->meth.allocf)) ) + { u->val = NIL(Void_t*); + u->key = key; + u->next = mmvm->user; + mmvm->user = u; + } + } + + if(u) /* update data and set value to return */ + { if(oper == VM_MMSET) + u->val = val; + else if(oper == VM_MMADD) + u->val = (Void_t*)((long)(u->val) + (long)(val)); + val = u->val; + } + else val = NIL(Void_t*); + + CLRLOCK(vm, 0); + + return val; +} + +void vmmrelease(Vmalloc_t* vm, int type) +{ + Mmdisc_t *mmdc = (Mmdisc_t*)vm->disc; + + mmdc->flag |= MM_RELEASE; + if(type > 0) + mmdc->flag |= MM_CLEANUP; +} + +/* suggest an address usable for mapping memory */ +Void_t* vmmaddress(size_t size) +{ +#if !defined(_map_min) || !defined(_map_max) || !defined(_map_dir) + return NIL(Void_t*); +#else + Void_t *avail; + static Vmuchar_t *min = (Vmuchar_t*)_map_min; + static Vmuchar_t *max = (Vmuchar_t*)_map_max; + + GETPAGESIZE(_Vmpagesize); + size = ROUND(size, _Vmpagesize); + + if(_map_dir == 0 || (min+size) > max) + avail = NIL(Void_t*); + else if(_map_dir > 0) + { avail = (Void_t*)min; + min += size; + } + else + { max -= size; + avail = (Void_t*)max; + } + + return avail; +#endif +} + +#endif diff --git a/src/lib/libast/vmalloc/vmopen.c b/src/lib/libast/vmalloc/vmopen.c new file mode 100644 index 0000000..5f6912f --- /dev/null +++ b/src/lib/libast/vmalloc/vmopen.c @@ -0,0 +1,180 @@ +/*********************************************************************** +* * +* This software is part of the ast package * +* Copyright (c) 1985-2012 AT&T Intellectual Property * +* and is licensed under the * +* Eclipse Public License, Version 1.0 * +* by AT&T Intellectual Property * +* * +* A copy of the License is available at * +* http://www.eclipse.org/org/documents/epl-v10.html * +* (with md5 checksum b35adb5213ca9657e911e9befb180842) * +* * +* Information and Software Systems Research * +* AT&T Research * +* Florham Park NJ * +* * +* Glenn Fowler <gsf@research.att.com> * +* David Korn <dgk@research.att.com> * +* Phong Vo <kpv@research.att.com> * +* * +***********************************************************************/ +#if defined(_UWIN) && defined(_BLD_ast) + +void _STUB_vmopen(){} + +#else + +#include "vmhdr.h" + +/* Opening a new region of allocation. +** Note that because of possible exotic memory types, +** all region data must be stored within the space given +** by the discipline. +** +** Written by Kiem-Phong Vo, kpv@research.att.com, 01/16/94. +*/ + +/* this structure lives in the top data segment of the region */ +typedef struct _vminit_s +{ union + { Vmdata_t vd; /* root of usable data space */ + Vmuchar_t a[ROUND(sizeof(Vmdata_t),ALIGN)]; + } vd; + union + { Vmalloc_t vm; /* embedded region if needed */ + Vmuchar_t a[ROUND(sizeof(Vmalloc_t),ALIGN)]; + } vm; + union + { Seg_t seg; /* space for segment */ + Vmuchar_t a[ROUND(sizeof(Seg_t),ALIGN)]; + } seg; + Block_t block[16]; /* space for a few blocks */ +} Vminit_t; + +#if __STD_C +Vmalloc_t* vmopen(Vmdisc_t* disc, Vmethod_t* meth, int mode) +#else +Vmalloc_t* vmopen(disc, meth, mode) +Vmdisc_t* disc; /* discipline to get segments */ +Vmethod_t* meth; /* method to manage space */ +int mode; /* type of region */ +#endif +{ + Vmalloc_t *vm, *vmp, vmproto; + Vmdata_t *vd; + Vminit_t *init; + size_t algn, size, incr; + Block_t *bp, *np; + Seg_t *seg; + Vmuchar_t *addr; + int rv; + + if(!meth || !disc || !disc->memoryf ) + return NIL(Vmalloc_t*); + + GETPAGESIZE(_Vmpagesize); + + vmp = &vmproto; /* avoid memory allocation here! */ + memset(vmp, 0, sizeof(Vmalloc_t)); + memcpy(&vmp->meth, meth, sizeof(Vmethod_t)); + vmp->disc = disc; + + mode &= VM_FLAGS; /* start with user-settable flags */ + size = 0; + + if(disc->exceptf) + { addr = NIL(Vmuchar_t*); + if((rv = (*disc->exceptf)(vmp,VM_OPEN,(Void_t*)(&addr),disc)) < 0) + return NIL(Vmalloc_t*); + else if(rv == 0 ) + { if(addr) /* vm itself is in memory from disc->memoryf */ + mode |= VM_MEMORYF; + } + else if(rv > 0) /* the data section is being restored */ + { if(!(init = (Vminit_t*)addr) ) + return NIL(Vmalloc_t*); + size = -1; /* to tell that addr was not from disc->memoryf */ + vd = &init->vd.vd; /**/ASSERT(VLONG(vd)%ALIGN == 0); + goto done; + } + } + + /* make sure vd->incr is properly rounded and get initial memory */ + incr = disc->round <= 0 ? _Vmpagesize : disc->round; + incr = MULTIPLE(incr,ALIGN); + size = ROUND(sizeof(Vminit_t),incr); /* get initial memory */ + if(!(addr = (Vmuchar_t*)(*disc->memoryf)(vmp, NIL(Void_t*), 0, size, disc)) ) + return NIL(Vmalloc_t*); + memset(addr, 0, size); + + /* initialize region data */ + algn = (size_t)(VLONG(addr)%ALIGN); + init = (Vminit_t*)(addr + (algn ? ALIGN-algn : 0)); /**/ASSERT(VLONG(init)%ALIGN == 0); + vd = &init->vd.vd; /**/ASSERT(VLONG(vd)%ALIGN == 0); + vd->mode = mode | meth->meth; + vd->incr = incr; + vd->pool = 0; + vd->free = vd->wild = NIL(Block_t*); + + if(vd->mode&(VM_MTBEST|VM_MTDEBUG|VM_MTPROFILE)) + { int k; + vd->root = NIL(Block_t*); + for(k = S_TINY-1; k >= 0; --k) + TINY(vd)[k] = NIL(Block_t*); + for(k = S_CACHE; k >= 0; --k) + CACHE(vd)[k] = NIL(Block_t*); + } + + vd->seg = &init->seg.seg; /**/ ASSERT(VLONG(vd->seg)%ALIGN == 0); + seg = vd->seg; + seg->next = NIL(Seg_t*); + seg->vmdt = vd; + seg->addr = (Void_t*)addr; + seg->extent = size; + seg->baddr = addr + size; + seg->size = size; /* Note: this size is unusually large to mark seg as + the root segment and can be freed only at closing */ + seg->free = NIL(Block_t*); + + /* make a data block out of the remainder */ + bp = SEGBLOCK(seg); + SEG(bp) = seg; + size = ((seg->baddr - (Vmuchar_t*)bp)/ALIGN) * ALIGN; /**/ ASSERT(size > 0); + SIZE(bp) = size - 2*sizeof(Head_t); /**/ASSERT(SIZE(bp) > 0 && (SIZE(bp)%ALIGN) == 0); + SELF(bp) = bp; + /**/ ASSERT(SIZE(bp)%ALIGN == 0); + /**/ ASSERT(VLONG(bp)%ALIGN == 0); + + /* make a fake header for next block in case of noncontiguous segments */ + np = NEXT(bp); + SEG(np) = seg; + SIZE(np) = BUSY|PFREE; + + if(vd->mode&(VM_MTLAST|VM_MTPOOL)) + seg->free = bp; + else vd->wild = bp; + +done: /* now make the region handle */ + if(vd->mode&VM_MEMORYF) + vm = &init->vm.vm; + else if(!(vm = vmalloc(Vmheap, sizeof(Vmalloc_t))) ) + { if(size > 0) + (void)(*disc->memoryf)(vmp, addr, size, 0, disc); + return NIL(Vmalloc_t*); + } + memcpy(vm, vmp, sizeof(Vmalloc_t)); + vm->data = vd; + + if(disc->exceptf) /* signaling that vmopen succeeded */ + (void)(*disc->exceptf)(vm, VM_ENDOPEN, NIL(Void_t*), disc); + + /* add to the linked list of regions */ + _vmlock(NIL(Vmalloc_t*), 1); + vm->next = Vmheap->next; Vmheap->next = vm; + _vmlock(NIL(Vmalloc_t*), 0); + + return vm; +} + +#endif diff --git a/src/lib/libast/vmalloc/vmpool.c b/src/lib/libast/vmalloc/vmpool.c new file mode 100644 index 0000000..2c65a66 --- /dev/null +++ b/src/lib/libast/vmalloc/vmpool.c @@ -0,0 +1,316 @@ +/*********************************************************************** +* * +* This software is part of the ast package * +* Copyright (c) 1985-2011 AT&T Intellectual Property * +* and is licensed under the * +* Eclipse Public License, Version 1.0 * +* by AT&T Intellectual Property * +* * +* A copy of the License is available at * +* http://www.eclipse.org/org/documents/epl-v10.html * +* (with md5 checksum b35adb5213ca9657e911e9befb180842) * +* * +* Information and Software Systems Research * +* AT&T Research * +* Florham Park NJ * +* * +* Glenn Fowler <gsf@research.att.com> * +* David Korn <dgk@research.att.com> * +* Phong Vo <kpv@research.att.com> * +* * +***********************************************************************/ +#if defined(_UWIN) && defined(_BLD_ast) + +void _STUB_vmpool(){} + +#else + +#include "vmhdr.h" + +#define POOLFREE 0x55555555L /* block free indicator */ + +/* Method for pool allocation. +** All elements in a pool have the same size. +** The following fields of Vmdata_t are used as: +** pool: size of a block. +** free: list of free blocks. +** +** Written by Kiem-Phong Vo, kpv@research.att.com, 01/16/94. +*/ + +#if __STD_C +static Void_t* poolalloc(Vmalloc_t* vm, reg size_t size, int local) +#else +static Void_t* poolalloc(vm, size, local ) +Vmalloc_t* vm; +reg size_t size; +int local; +#endif +{ + reg Block_t *tp, *next; + reg size_t s; + reg Seg_t *seg; + reg Vmdata_t *vd = vm->data; + + if(size <= 0) + return NIL(Void_t*); + + if(size != vd->pool) + { if(vd->pool <= 0) + vd->pool = size; + else return NIL(Void_t*); + } + + SETLOCK(vm, local); + + if((tp = vd->free) ) /* there is a ready free block */ + { vd->free = SEGLINK(tp); + goto done; + } + + size = ROUND(size,ALIGN); + + /* look thru all segments for a suitable free block */ + for(tp = NIL(Block_t*), seg = vd->seg; seg; seg = seg->next) + { if((tp = seg->free) && + (s = (SIZE(tp) & ~BITS) + sizeof(Head_t)) >= size ) + goto got_blk; + } + + if((tp = (*_Vmextend)(vm,ROUND(size,vd->incr),NIL(Vmsearch_f))) ) + { s = (SIZE(tp) & ~BITS) + sizeof(Head_t); + seg = SEG(tp); + goto got_blk; + } + else goto done; + +got_blk: /* if get here, (tp, s, seg) must be well-defined */ + next = (Block_t*)((Vmuchar_t*)tp+size); + if((s -= size) <= (size + sizeof(Head_t)) ) + { for(; s >= size; s -= size) + { SIZE(next) = POOLFREE; + SEGLINK(next) = vd->free; + vd->free = next; + next = (Block_t*)((Vmuchar_t*)next + size); + } + seg->free = NIL(Block_t*); + } + else + { SIZE(next) = s - sizeof(Head_t); + SEG(next) = seg; + seg->free = next; + } + +done: + if(!local && (vd->mode&VM_TRACE) && _Vmtrace && tp) + (*_Vmtrace)(vm,NIL(Vmuchar_t*),(Vmuchar_t*)tp,vd->pool,0); + + CLRLOCK(vm, local); + + return (Void_t*)tp; +} + +#if __STD_C +static long pooladdr(Vmalloc_t* vm, reg Void_t* addr, int local) +#else +static long pooladdr(vm, addr, local) +Vmalloc_t* vm; +reg Void_t* addr; +int local; +#endif +{ + Block_t *bp, *tp; + Vmuchar_t *laddr, *baddr; + size_t size; + Seg_t *seg; + long offset; + Vmdata_t* vd = vm->data; + + SETLOCK(vm, local); + + offset = -1L; + for(seg = vd->seg; seg; seg = seg->next) + { laddr = (Vmuchar_t*)SEGBLOCK(seg); + baddr = seg->baddr-sizeof(Head_t); + if((Vmuchar_t*)addr < laddr || (Vmuchar_t*)addr >= baddr) + continue; + + /* the block that has this address */ + size = ROUND(vd->pool,ALIGN); + tp = (Block_t*)(laddr + (((Vmuchar_t*)addr-laddr)/size)*size ); + + /* see if this block has been freed */ + if(SIZE(tp) == POOLFREE) /* may be a coincidence - make sure */ + for(bp = vd->free; bp; bp = SEGLINK(bp)) + if(bp == tp) + goto done; + + offset = (Vmuchar_t*)addr - (Vmuchar_t*)tp; + goto done; + } + +done : + CLRLOCK(vm, local); + + return offset; +} + +#if __STD_C +static int poolfree(reg Vmalloc_t* vm, reg Void_t* data, int local ) +#else +static int poolfree(vm, data, local) +Vmalloc_t* vm; +Void_t* data; +int local; +#endif +{ + Block_t *bp; + Vmdata_t *vd = vm->data; + + if(!data) + return 0; + if(vd->pool <= 0) + return -1; + + SETLOCK(vm, local); + + /**/ASSERT(KPVADDR(vm, data, pooladdr) == 0); + bp = (Block_t*)data; + SIZE(bp) = POOLFREE; + SEGLINK(bp) = vd->free; + vd->free = bp; + + if(!local && (vd->mode&VM_TRACE) && _Vmtrace) + (*_Vmtrace)(vm, (Vmuchar_t*)data, NIL(Vmuchar_t*), vd->pool, 0); + + CLRLOCK(vm, local); + + return 0; +} + +#if __STD_C +static Void_t* poolresize(Vmalloc_t* vm, Void_t* data, size_t size, int type, int local ) +#else +static Void_t* poolresize(vm, data, size, type, local ) +Vmalloc_t* vm; +Void_t* data; +size_t size; +int type; +int local; +#endif +{ + Vmdata_t *vd = vm->data; + + NOTUSED(type); + + if(!data) + { data = poolalloc(vm, size, local); + if(data && (type&VM_RSZERO) ) + memset(data, 0, size); + return data; + } + if(size == 0) + { (void)poolfree(vm, data, local); + return NIL(Void_t*); + } + if(size != vd->pool) + return NIL(Void_t*); + + SETLOCK(vm, local); + + /**/ASSERT(KPVADDR(vm, data, pooladdr) == 0); + + if(!local && (vd->mode&VM_TRACE) && _Vmtrace) + (*_Vmtrace)(vm, (Vmuchar_t*)data, (Vmuchar_t*)data, size, 0); + + CLRLOCK(vm, local); + + return data; +} + +#if __STD_C +static long poolsize(Vmalloc_t* vm, Void_t* addr, int local) +#else +static long poolsize(vm, addr, local) +Vmalloc_t* vm; +Void_t* addr; +int local; +#endif +{ + return pooladdr(vm, addr, local) == 0 ? (long)vm->data->pool : -1L; +} + +#if __STD_C +static int poolcompact(Vmalloc_t* vm, int local) +#else +static int poolcompact(vm, local) +Vmalloc_t* vm; +int local; +#endif +{ + ssize_t s; + Block_t *fp; + Seg_t *seg, *next; + Vmdata_t *vd = vm->data; + + SETLOCK(vm, local); + + for(seg = vd->seg; seg; seg = next) + { next = seg->next; + + if(!(fp = seg->free)) + continue; + + seg->free = NIL(Block_t*); + if(seg->size == (s = SIZE(fp)&~BITS)) + s = seg->extent; + else s += sizeof(Head_t); + + if((*_Vmtruncate)(vm,seg,s,1) == s) + seg->free = fp; + } + + if(!local && (vd->mode&VM_TRACE) && _Vmtrace) + (*_Vmtrace)(vm, (Vmuchar_t*)0, (Vmuchar_t*)0, 0, 0); + + CLRLOCK(vm, local); + + return 0; +} + +#if __STD_C +static Void_t* poolalign(Vmalloc_t* vm, size_t size, size_t align, int local) +#else +static Void_t* poolalign(vm, size, align, local) +Vmalloc_t* vm; +size_t size; +size_t align; +int local; +#endif +{ + NOTUSED(vm); + NOTUSED(size); + NOTUSED(align); + return NIL(Void_t*); +} + +/* Public interface */ +static Vmethod_t _Vmpool = +{ + poolalloc, + poolresize, + poolfree, + pooladdr, + poolsize, + poolcompact, + poolalign, + VM_MTPOOL +}; + +__DEFINE__(Vmethod_t*,Vmpool,&_Vmpool); + +#ifdef NoF +NoF(vmpool) +#endif + +#endif diff --git a/src/lib/libast/vmalloc/vmprivate.c b/src/lib/libast/vmalloc/vmprivate.c new file mode 100644 index 0000000..28f3f70 --- /dev/null +++ b/src/lib/libast/vmalloc/vmprivate.c @@ -0,0 +1,292 @@ +/*********************************************************************** +* * +* This software is part of the ast package * +* Copyright (c) 1985-2012 AT&T Intellectual Property * +* and is licensed under the * +* Eclipse Public License, Version 1.0 * +* by AT&T Intellectual Property * +* * +* A copy of the License is available at * +* http://www.eclipse.org/org/documents/epl-v10.html * +* (with md5 checksum b35adb5213ca9657e911e9befb180842) * +* * +* Information and Software Systems Research * +* AT&T Research * +* Florham Park NJ * +* * +* Glenn Fowler <gsf@research.att.com> * +* David Korn <dgk@research.att.com> * +* Phong Vo <kpv@research.att.com> * +* * +***********************************************************************/ +#if defined(_UWIN) && defined(_BLD_ast) + +void _STUB_vmprivate(){} + +#else + +#include "vmhdr.h" + +static char* Version = "\n@(#)$Id: Vmalloc (AT&T Labs - Research) 2011-08-08 $\0\n"; + + +/* Private code used in the vmalloc library +** +** Written by Kiem-Phong Vo, kpv@research.att.com, 01/16/94. +*/ + +/* Get more memory for a region */ +#if __STD_C +static Block_t* _vmextend(reg Vmalloc_t* vm, size_t size, Vmsearch_f searchf ) +#else +static Block_t* _vmextend(vm, size, searchf ) +reg Vmalloc_t* vm; /* region to increase in size */ +size_t size; /* desired amount of space */ +Vmsearch_f searchf; /* tree search function */ +#endif +{ + reg size_t s; + reg Seg_t* seg; + reg Block_t *bp, *tp, *np; + reg Vmuchar_t* addr = (Vmuchar_t*)Version; /* shut compiler warning */ + reg Vmdata_t* vd = vm->data; + + GETPAGESIZE(_Vmpagesize); + + if(vd->incr <= 0) /* this is just _Vmheap on the first call */ + vd->incr = _Vmpagesize*sizeof(Void_t*); + + /* Get slightly more for administrative data */ + s = size + sizeof(Seg_t) + sizeof(Block_t) + sizeof(Head_t) + 2*ALIGN; + if(s <= size) /* size was too large and we have wrapped around */ + return NIL(Block_t*); + if((size = ROUND(s,vd->incr)) < s) + return NIL(Block_t*); + + /* increase the rounding factor to reduce # of future extensions */ + if(size > 2*vd->incr && vm->disc->round < vd->incr) + vd->incr *= 2; + + if(!(seg = vd->seg) ) /* there is no current segment */ + addr = NIL(Vmuchar_t*); + else /* see if we can extend the current segment */ + { addr = (Vmuchar_t*)(*vm->disc->memoryf)(vm,seg->addr,seg->extent, + seg->extent+size,vm->disc); + if(addr == (Vmuchar_t*)seg->addr) + addr += seg->extent; /* seg successfully extended */ + else seg = NIL(Seg_t*); /* a new segment was created */ + } + + if(!addr) /* create a new segment */ + { if(!(addr = (Vmuchar_t*)(*vm->disc->memoryf)(vm, NIL(Void_t*), 0, size, vm->disc)) ) + { if(vm->disc->exceptf) /* announce that no more memory is available */ + { + CLRLOCK(vm, 0); + (void)(*vm->disc->exceptf)(vm, VM_NOMEM, (Void_t*)size, vm->disc); + SETLOCK(vm, 0); + } + return NIL(Block_t*); + } + } + + if(seg) + { /* extending current segment */ + bp = BLOCK(seg->baddr); + + if(vd->mode&(VM_MTBEST|VM_MTDEBUG|VM_MTPROFILE) ) + { /**/ ASSERT((SIZE(bp)&~BITS) == 0); + /**/ ASSERT(SEG(bp) == seg); + + if(!ISPFREE(SIZE(bp)) ) + SIZE(bp) = size - sizeof(Head_t); + else + { /**/ ASSERT(searchf); + bp = LAST(bp); + if(bp == vd->wild) + vd->wild = NIL(Block_t*); + else REMOVE(vd,bp,INDEX(SIZE(bp)),tp,(*searchf)); + SIZE(bp) += size; + } + } + else + { if(seg->free) + { bp = seg->free; + seg->free = NIL(Block_t*); + SIZE(bp) += size; + } + else + { SEG(bp) = seg; + SIZE(bp) = size - sizeof(Head_t); + } + } + + seg->size += size; + seg->extent += size; + seg->baddr += size; + } + else + { /* creating a new segment */ + reg Seg_t *sp, *lastsp; + + if((s = (size_t)(VLONG(addr)%ALIGN)) != 0) + addr += ALIGN-s; + + seg = (Seg_t*)addr; + seg->vmdt = vd; + seg->addr = (Void_t*)(addr - (s ? ALIGN-s : 0)); + seg->extent = size; + seg->baddr = addr + size - (s ? 2*ALIGN : 0); + seg->free = NIL(Block_t*); + bp = SEGBLOCK(seg); + SEG(bp) = seg; + SIZE(bp) = seg->baddr - (Vmuchar_t*)bp - 2*sizeof(Head_t); + + /* NOTE: for Vmbest, Vmdebug and Vmprofile the region's segment list + is reversely ordered by addresses. This is so that we can easily + check for the wild block. + */ + lastsp = NIL(Seg_t*); + sp = vd->seg; + if(vd->mode&(VM_MTBEST|VM_MTDEBUG|VM_MTPROFILE)) + for(; sp; lastsp = sp, sp = sp->next) + if(seg->addr > sp->addr) + break; + seg->next = sp; + if(lastsp) + lastsp->next = seg; + else vd->seg = seg; + + seg->size = SIZE(bp); + } + + /* make a fake header for possible segmented memory */ + tp = NEXT(bp); + SEG(tp) = seg; + SIZE(tp) = BUSY; + + /* see if the wild block is still wild */ + if((tp = vd->wild) && (seg = SEG(tp)) != vd->seg) + { np = NEXT(tp); + CLRPFREE(SIZE(np)); + if(vd->mode&(VM_MTBEST|VM_MTDEBUG|VM_MTPROFILE) ) + { SIZE(tp) |= BUSY|JUNK; + LINK(tp) = CACHE(vd)[C_INDEX(SIZE(tp))]; + CACHE(vd)[C_INDEX(SIZE(tp))] = tp; + } + else seg->free = tp; + + vd->wild = NIL(Block_t*); + } + + return bp; +} + +/* Truncate a segment if possible */ +#if __STD_C +static ssize_t _vmtruncate(Vmalloc_t* vm, Seg_t* seg, size_t size, int exact) +#else +static ssize_t _vmtruncate(vm, seg, size, exact) +Vmalloc_t* vm; /* containing region */ +Seg_t* seg; /* the one to be truncated */ +size_t size; /* amount of free space */ +int exact; +#endif +{ + reg Void_t* caddr; + reg Seg_t* last; + reg Vmdata_t* vd = vm->data; + reg Vmemory_f memoryf = vm->disc->memoryf; + + caddr = seg->addr; + + if(size < seg->size) + { reg ssize_t less; + + if(exact) + less = size; + else /* keep truncated amount to discipline requirements */ + { if((less = vm->disc->round) <= 0) + less = _Vmpagesize; + less = (size/less)*less; + less = (less/vd->incr)*vd->incr; + if(less > 0 && (ssize_t)size > less && (size-less) < sizeof(Block_t) ) + less = less <= (ssize_t)vd->incr ? 0 : less - vd->incr; + } + + if(less <= 0 || + (*memoryf)(vm,caddr,seg->extent,seg->extent-less,vm->disc) != caddr) + return 0; + + seg->extent -= less; + seg->size -= less; + seg->baddr -= less; + SEG(BLOCK(seg->baddr)) = seg; + SIZE(BLOCK(seg->baddr)) = BUSY; + + return less; + } + else + { /* unlink segment from region */ + if(seg == vd->seg) + { vd->seg = seg->next; + last = NIL(Seg_t*); + } + else + { for(last = vd->seg; last->next != seg; last = last->next) + ; + last->next = seg->next; + } + + /* now delete it */ + if((*memoryf)(vm,caddr,seg->extent,0,vm->disc) == caddr) + return size; + + /* space reduction failed, reinsert segment */ + if(last) + { seg->next = last->next; + last->next = seg; + } + else + { seg->next = vd->seg; + vd->seg = seg; + } + return 0; + } +} + +int _vmlock(Vmalloc_t* vm, int locking) +{ + int k; + + if(!vm) /* some sort of global locking */ + { if(!locking) /* turn off lock */ + asolock(&_Vmlock, 1, ASO_UNLOCK); + else asolock(&_Vmlock, 1, ASO_SPINLOCK); + } + else if(vm->data->mode&VM_SHARE) + { if(!locking) /* turning off the lock */ + asolock(&vm->data->lock, 1, ASO_UNLOCK); + else asolock(&vm->data->lock, 1, ASO_SPINLOCK); + } + else + { if(!locking) + vm->data->lock = 0; + else vm->data->lock = 1; + } + return 0; +} + + +/* Externally visible names but local to library */ +Vmextern_t _Vmextern = +{ _vmextend, /* _Vmextend */ + _vmtruncate, /* _Vmtruncate */ + 0, /* _Vmpagesize */ + NIL(char*(*)_ARG_((char*,const char*,int))), /* _Vmstrcpy */ + NIL(char*(*)_ARG_((Vmulong_t,int))), /* _Vmitoa */ + NIL(void(*)_ARG_((Vmalloc_t*, + Vmuchar_t*,Vmuchar_t*,size_t,size_t))), /* _Vmtrace */ + NIL(void(*)_ARG_((Vmalloc_t*))) /* _Vmpfclose */ +}; + +#endif diff --git a/src/lib/libast/vmalloc/vmprofile.c b/src/lib/libast/vmalloc/vmprofile.c new file mode 100644 index 0000000..43191ed --- /dev/null +++ b/src/lib/libast/vmalloc/vmprofile.c @@ -0,0 +1,709 @@ +/*********************************************************************** +* * +* This software is part of the ast package * +* Copyright (c) 1985-2011 AT&T Intellectual Property * +* and is licensed under the * +* Eclipse Public License, Version 1.0 * +* by AT&T Intellectual Property * +* * +* A copy of the License is available at * +* http://www.eclipse.org/org/documents/epl-v10.html * +* (with md5 checksum b35adb5213ca9657e911e9befb180842) * +* * +* Information and Software Systems Research * +* AT&T Research * +* Florham Park NJ * +* * +* Glenn Fowler <gsf@research.att.com> * +* David Korn <dgk@research.att.com> * +* Phong Vo <kpv@research.att.com> * +* * +***********************************************************************/ +#if defined(_UWIN) && defined(_BLD_ast) + +void _STUB_vmprofile(){} + +#else + +#include "vmhdr.h" + +/* Method to profile space usage. +** +** Written by Kiem-Phong Vo, kpv@research.att.com, 03/23/94. +*/ + +#define PFHASH(pf) ((pf)->data.data.hash) +#define PFVM(pf) ((pf)->data.data.vm) +#define PFFILE(pf) ((pf)->data.data.fm.file) +#define PFLINE(pf) ((pf)->line) +#define PFNAME(pf) ((pf)->data.f) +#define PFNALLOC(pf) ((pf)->data.data.nalloc) +#define PFALLOC(pf) ((pf)->data.data.alloc) +#define PFNFREE(pf) ((pf)->data.data.nfree) +#define PFFREE(pf) ((pf)->data.data.free) +#define PFREGION(pf) ((pf)->data.data.region) +#define PFMAX(pf) ((pf)->data.data.fm.max) + +typedef struct _pfdata_s Pfdata_t; +struct _pfdata_s +{ Vmulong_t hash; /* hash value */ + union + { char* file; /* file name */ + Vmulong_t max; /* max busy space for region */ + } fm; + Vmalloc_t* vm; /* region alloc from */ + Pfobj_t* region; /* pointer to region record */ + Vmulong_t nalloc; /* number of alloc calls */ + Vmulong_t alloc; /* amount allocated */ + Vmulong_t nfree; /* number of free calls */ + Vmulong_t free; /* amount freed */ +}; +struct _pfobj_s +{ Pfobj_t* next; /* next in linked list */ + int line; /* line #, 0 for name holder */ + union + { + Pfdata_t data; + char f[1]; /* actual file name */ + } data; +}; + +static Pfobj_t** Pftable; /* hash table */ +#define PFTABLE 1019 /* table size */ +static Vmalloc_t* Vmpf; /* heap for our own use */ + +#if __STD_C +static Pfobj_t* pfsearch(Vmalloc_t* vm, char* file, int line) +#else +static Pfobj_t* pfsearch(vm, file, line) +Vmalloc_t* vm; /* region allocating from */ +char* file; /* the file issuing the allocation request */ +int line; /* line number */ +#endif +{ + reg Pfobj_t *pf, *last; + reg Vmulong_t h; + reg int n; + reg char *cp; + + if(!Vmpf && !(Vmpf = vmopen(Vmdcheap,Vmpool,0)) ) + return NIL(Pfobj_t*); + + /* make hash table; PFTABLE'th slot hold regions' records */ + if(!Pftable) + { if(!(Pftable = (Pfobj_t**)vmalloc(Vmheap,(PFTABLE+1)*sizeof(Pfobj_t*))) ) + return NIL(Pfobj_t*); + for(n = PFTABLE; n >= 0; --n) + Pftable[n] = NIL(Pfobj_t*); + } + + /* see if it's there with a combined hash value of vm,file,line */ + h = line + (((Vmulong_t)vm)>>4); + for(cp = file; *cp; ++cp) + h += (h<<7) + ((*cp)&0377) + 987654321L; + n = (int)(h%PFTABLE); + for(last = NIL(Pfobj_t*), pf = Pftable[n]; pf; last = pf, pf = pf->next) + if(PFLINE(pf) == line && PFVM(pf) == vm && strcmp(PFFILE(pf),file) == 0) + break; + + /* insert if not there yet */ + if(!pf) + { reg Pfobj_t* fn; + reg Pfobj_t* pfvm; + reg Vmulong_t hn; + + /* first get/construct the file name slot */ + hn = 0; + for(cp = file; *cp; ++cp) + hn += (hn<<7) + ((*cp)&0377) + 987654321L; + n = (int)(hn%PFTABLE); + for(fn = Pftable[n]; fn; fn = fn->next) + if(PFLINE(fn) < 0 && strcmp(PFNAME(fn),file) == 0) + break; + if(!fn) + { reg size_t s; + s = sizeof(Pfobj_t) - sizeof(Pfdata_t) + strlen(file) + 1; + if(!(fn = (Pfobj_t*)vmalloc(Vmheap,s)) ) + return NIL(Pfobj_t*); + fn->next = Pftable[n]; + Pftable[n] = fn; + PFLINE(fn) = -1; + strcpy(PFNAME(fn),file); + } + + /* get region record; note that these are ordered by vm */ + last = NIL(Pfobj_t*); + for(pfvm = Pftable[PFTABLE]; pfvm; last = pfvm, pfvm = pfvm->next) + if(vm >= PFVM(pfvm)) + break; + if(!pfvm || PFVM(pfvm) > vm) + { if(!(pfvm = (Pfobj_t*)vmalloc(Vmpf,sizeof(Pfobj_t))) ) + return NIL(Pfobj_t*); + if(last) + { pfvm->next = last->next; + last->next = pfvm; + } + else + { pfvm->next = Pftable[PFTABLE]; + Pftable[PFTABLE] = pfvm; + } + PFNALLOC(pfvm) = PFALLOC(pfvm) = 0; + PFNFREE(pfvm) = PFFREE(pfvm) = 0; + PFMAX(pfvm) = 0; + PFVM(pfvm) = vm; + PFLINE(pfvm) = 0; + } + + if(!(pf = (Pfobj_t*)vmalloc(Vmpf,sizeof(Pfobj_t))) ) + return NIL(Pfobj_t*); + n = (int)(h%PFTABLE); + pf->next = Pftable[n]; + Pftable[n] = pf; + PFLINE(pf) = line; + PFFILE(pf) = PFNAME(fn); + PFREGION(pf) = pfvm; + PFVM(pf) = vm; + PFNALLOC(pf) = 0; + PFALLOC(pf) = 0; + PFNFREE(pf) = 0; + PFFREE(pf) = 0; + PFHASH(pf) = h; + } + else if(last) /* do a move-to-front */ + { last->next = pf->next; + pf->next = Pftable[n]; + Pftable[n] = pf; + } + + return pf; +} + +#if __STD_C +static void pfclose(Vmalloc_t* vm) +#else +static void pfclose(vm) +Vmalloc_t* vm; +#endif +{ + reg int n; + reg Pfobj_t *pf, *next, *last; + + /* free all records related to region vm */ + for(n = PFTABLE; n >= 0; --n) + { for(last = NIL(Pfobj_t*), pf = Pftable[n]; pf; ) + { next = pf->next; + + if(PFLINE(pf) >= 0 && PFVM(pf) == vm) + { if(last) + last->next = next; + else Pftable[n] = next; + vmfree(Vmpf,pf); + } + else last = pf; + + pf = next; + } + } +} + +#if __STD_C +static void pfsetinfo(Vmalloc_t* vm, Vmuchar_t* data, size_t size, char* file, int line) +#else +static void pfsetinfo(vm, data, size, file, line) +Vmalloc_t* vm; +Vmuchar_t* data; +size_t size; +char* file; +int line; +#endif +{ + reg Pfobj_t* pf; + reg Vmulong_t s; + + /* let vmclose knows that there are records for region vm */ + _Vmpfclose = pfclose; + + if(!file || line <= 0) + { file = ""; + line = 0; + } + + if((pf = pfsearch(vm,file,line)) ) + { PFALLOC(pf) += size; + PFNALLOC(pf) += 1; + } + PFOBJ(data) = pf; + PFSIZE(data) = size; + + if(pf) + { /* update region statistics */ + pf = PFREGION(pf); + PFALLOC(pf) += size; + PFNALLOC(pf) += 1; + if((s = PFALLOC(pf) - PFFREE(pf)) > PFMAX(pf) ) + PFMAX(pf) = s; + } +} + +/* sort by file names and line numbers */ +#if __STD_C +static Pfobj_t* pfsort(Pfobj_t* pf) +#else +static Pfobj_t* pfsort(pf) +Pfobj_t* pf; +#endif +{ + reg Pfobj_t *one, *two, *next; + reg int cmp; + + if(!pf->next) + return pf; + + /* partition to two equal size lists */ + one = two = NIL(Pfobj_t*); + while(pf) + { next = pf->next; + pf->next = one; + one = pf; + + if((pf = next) ) + { next = pf->next; + pf->next = two; + two = pf; + pf = next; + } + } + + /* sort and merge the lists */ + one = pfsort(one); + two = pfsort(two); + for(pf = next = NIL(Pfobj_t*);; ) + { /* make sure that the "<>" file comes first */ + if(PFLINE(one) == 0 && PFLINE(two) == 0) + cmp = PFVM(one) > PFVM(two) ? 1 : -1; + else if(PFLINE(one) == 0) + cmp = -1; + else if(PFLINE(two) == 0) + cmp = 1; + else if((cmp = strcmp(PFFILE(one),PFFILE(two))) == 0) + { cmp = PFLINE(one) - PFLINE(two); + if(cmp == 0) + cmp = PFVM(one) > PFVM(two) ? 1 : -1; + } + + if(cmp < 0) + { if(!pf) + pf = one; + else next->next = one; + next = one; + if(!(one = one->next) ) + { if(two) + next->next = two; + return pf; + } + } + else + { if(!pf) + pf = two; + else next->next = two; + next = two; + if(!(two = two->next) ) + { if(one) + next->next = one; + return pf; + } + } + } +} + +#if __STD_C +static char* pfsummary(char* buf, Vmulong_t na, Vmulong_t sa, + Vmulong_t nf, Vmulong_t sf, Vmulong_t max, Vmulong_t size) +#else +static char* pfsummary(buf, na, sa, nf, sf, max, size) +char* buf; +Vmulong_t na; +Vmulong_t sa; +Vmulong_t nf; +Vmulong_t sf; +Vmulong_t max; +Vmulong_t size; +#endif +{ + buf = (*_Vmstrcpy)(buf,"n_alloc", '='); + buf = (*_Vmstrcpy)(buf, (*_Vmitoa)(na,-1), ':'); + buf = (*_Vmstrcpy)(buf,"n_free", '='); + buf = (*_Vmstrcpy)(buf, (*_Vmitoa)(nf,-1), ':'); + buf = (*_Vmstrcpy)(buf,"s_alloc", '='); + buf = (*_Vmstrcpy)(buf, (*_Vmitoa)(sa,-1), ':'); + buf = (*_Vmstrcpy)(buf,"s_free", '='); + buf = (*_Vmstrcpy)(buf, (*_Vmitoa)(sf,-1), ':'); + if(max > 0) + { buf = (*_Vmstrcpy)(buf,"max_busy", '='); + buf = (*_Vmstrcpy)(buf, (*_Vmitoa)(max,-1), ':'); + buf = (*_Vmstrcpy)(buf,"extent", '='); + buf = (*_Vmstrcpy)(buf, (*_Vmitoa)(size,-1), ':'); + } + *buf++ = '\n'; + + return buf; +} + +/* print profile data */ +#if __STD_C +int vmprofile(Vmalloc_t* vm, int fd) +#else +int vmprofile(vm, fd) +Vmalloc_t* vm; +int fd; +#endif +{ + reg Pfobj_t *pf, *list, *next, *last; + reg int n; + reg Vmulong_t nalloc, alloc, nfree, free; + reg Seg_t *seg; + char buf[1024], *bufp, *endbuf; +#define INITBUF() (bufp = buf, endbuf = buf+sizeof(buf)-128) +#define CHKBUF() (bufp >= endbuf ? (write(fd,buf,bufp-buf), bufp=buf) : bufp) +#define FLSBUF() (bufp > buf ? write(fd,buf,bufp-buf) : 0) + + if(fd < 0) + return -1; + + /* initialize functions from vmtrace.c that we use below */ + if((n = vmtrace(-1)) >= 0) + vmtrace(n); + + alloc = free = nalloc = nfree = 0; + list = NIL(Pfobj_t*); + for(n = PFTABLE-1; n >= 0; --n) + { for(pf = Pftable[n], last = NIL(Pfobj_t*); pf; ) + { next = pf->next; + + if(PFLINE(pf) < 0 || (vm && vm != PFVM(pf)) ) + { last = pf; + goto next_pf; + } + + /* remove from hash table */ + if(last) + last->next = next; + else Pftable[n] = next; + + /* put on output list */ + pf->next = list; + list = pf; + nalloc += PFNALLOC(pf); + alloc += PFALLOC(pf); + nfree += PFNFREE(pf); + free += PFFREE(pf); + + next_pf: + pf = next; + } + } + + INITBUF(); + bufp = (*_Vmstrcpy)(bufp,"ALLOCATION USAGE SUMMARY", ':'); + bufp = pfsummary(bufp,nalloc,alloc,nfree,free,0,0); + + /* print regions' summary data */ + for(pf = Pftable[PFTABLE]; pf; pf = pf->next) + { if(vm && PFVM(pf) != vm) + continue; + alloc = 0; + for(seg = PFVM(pf)->data->seg; seg; seg = seg->next) + alloc += seg->extent; + bufp = (*_Vmstrcpy)(bufp,"region", '='); + bufp = (*_Vmstrcpy)(bufp, (*_Vmitoa)(VLONG(PFVM(pf)),0), ':'); + bufp = pfsummary(bufp,PFNALLOC(pf),PFALLOC(pf), + PFNFREE(pf),PFFREE(pf),PFMAX(pf),alloc); + } + + /* sort then output detailed profile */ + list = pfsort(list); + for(pf = list; pf; ) + { /* compute summary for file */ + alloc = free = nalloc = nfree = 0; + for(last = pf; last; last = last->next) + { if(strcmp(PFFILE(last),PFFILE(pf)) != 0) + break; + nalloc += PFNALLOC(pf); + alloc += PFALLOC(last); + nfree += PFNFREE(last); + free += PFFREE(last); + } + CHKBUF(); + bufp = (*_Vmstrcpy)(bufp,"file",'='); + bufp = (*_Vmstrcpy)(bufp,PFFILE(pf)[0] ? PFFILE(pf) : "<>" ,':'); + bufp = pfsummary(bufp,nalloc,alloc,nfree,free,0,0); + + while(pf != last) /* detailed data */ + { CHKBUF(); + bufp = (*_Vmstrcpy)(bufp,"\tline",'='); + bufp = (*_Vmstrcpy)(bufp, (*_Vmitoa)(PFLINE(pf),-1), ':'); + bufp = (*_Vmstrcpy)(bufp, "region", '='); + bufp = (*_Vmstrcpy)(bufp, (*_Vmitoa)(VLONG(PFVM(pf)),0), ':'); + bufp = pfsummary(bufp,PFNALLOC(pf),PFALLOC(pf), + PFNFREE(pf),PFFREE(pf),0,0); + + /* reinsert into hash table */ + next = pf->next; + n = (int)(PFHASH(pf)%PFTABLE); + pf->next = Pftable[n]; + Pftable[n] = pf; + pf = next; + } + } + + FLSBUF(); + return 0; +} + +#if __STD_C +static Void_t* pfalloc(Vmalloc_t* vm, size_t size, int local) +#else +static Void_t* pfalloc(vm, size, local) +Vmalloc_t* vm; +size_t size; +int local; +#endif +{ + reg size_t s; + reg Void_t *data; + reg char *file; + reg int line; + reg Void_t *func; + reg Vmdata_t *vd = vm->data; + + VMFLF(vm,file,line,func); + + SETLOCK(vm, local); + + s = ROUND(size,ALIGN) + PF_EXTRA; + if((data = KPVALLOC(vm,s,(*(Vmbest->allocf))) ) ) + { pfsetinfo(vm,(Vmuchar_t*)data,size,file,line); + + if(!local && (vd->mode&VM_TRACE) && _Vmtrace) + { vm->file = file; vm->line = line; vm->func = func; + (*_Vmtrace)(vm,NIL(Vmuchar_t*),(Vmuchar_t*)data,size,0); + } + } + + CLRLOCK(vm, local); + + return data; +} + +#if __STD_C +static int pffree(Vmalloc_t* vm, Void_t* data, int local) +#else +static int pffree(vm, data, local) +Vmalloc_t* vm; +Void_t* data; +int local; +#endif +{ + reg Pfobj_t *pf; + reg size_t s; + reg char *file; + reg int line, rv; + reg Void_t *func; + reg Vmdata_t *vd = vm->data; + + VMFLF(vm,file,line,func); + + if(!data) + return 0; + + SETLOCK(vm,local); + + /**/ASSERT(KPVADDR(vm, data, Vmbest->addrf) == 0 ); + pf = PFOBJ(data); + s = PFSIZE(data); + if(pf) + { PFNFREE(pf) += 1; + PFFREE(pf) += s; + pf = PFREGION(pf); + PFNFREE(pf) += 1; + PFFREE(pf) += s; + } + + if(!local && (vd->mode&VM_TRACE) && _Vmtrace) + { vm->file = file; vm->line = line; vm->func = func; + (*_Vmtrace)(vm,(Vmuchar_t*)data,NIL(Vmuchar_t*),s,0); + } + + rv = KPVFREE((vm), (Void_t*)data, (*Vmbest->freef)); + + CLRLOCK(vm, local); + + return rv; +} + +#if __STD_C +static Void_t* pfresize(Vmalloc_t* vm, Void_t* data, size_t size, int type, int local) +#else +static Void_t* pfresize(vm, data, size, type, local) +Vmalloc_t* vm; +Void_t* data; +size_t size; +int type; +int local; +#endif +{ + reg Pfobj_t *pf; + reg size_t s, news; + reg Void_t *addr; + reg char *file; + reg int line; + reg Void_t *func; + reg size_t oldsize; + reg Vmdata_t *vd = vm->data; + + if(!data) + { addr = pfalloc(vm, size, local); + if(addr && (type&VM_RSZERO) ) + memset(addr, 0, size); + return addr; + } + if(size == 0) + { (void)pffree(vm, data, local); + return NIL(Void_t*); + } + + VMFLF(vm,file,line,func); + + SETLOCK(vm, local); + + /**/ASSERT(KPVADDR(vm,data,Vmbest->addrf) == 0 ); + pf = PFOBJ(data); + s = oldsize = PFSIZE(data); + + news = ROUND(size,ALIGN) + PF_EXTRA; + if((addr = KPVRESIZE(vm,data,news,(type&~VM_RSZERO),Vmbest->resizef)) ) + { if(pf) + { PFFREE(pf) += s; + PFNFREE(pf) += 1; + pf = PFREGION(pf); + PFFREE(pf) += s; + PFNFREE(pf) += 1; + pfsetinfo(vm,(Vmuchar_t*)addr,size,file,line); + } + + if(!local && (vd->mode&VM_TRACE) && _Vmtrace) + { vm->file = file; vm->line = line; vm->func = func; + (*_Vmtrace)(vm,(Vmuchar_t*)data,(Vmuchar_t*)addr,size,0); + } + } + else if(pf) /* reset old info */ + { PFALLOC(pf) -= s; + PFNALLOC(pf) -= 1; + pf = PFREGION(pf); + PFALLOC(pf) -= s; + PFNALLOC(pf) -= 1; + file = PFFILE(pf); + line = PFLINE(pf); + pfsetinfo(vm,(Vmuchar_t*)data,s,file,line); + } + + if(addr && (type&VM_RSZERO) && oldsize < size) + { reg Vmuchar_t *d = (Vmuchar_t*)addr+oldsize, *ed = (Vmuchar_t*)addr+size; + do { *d++ = 0; } while(d < ed); + } + + CLRLOCK(vm, local); + + return addr; +} + +#if __STD_C +static long pfsize(Vmalloc_t* vm, Void_t* addr, int local) +#else +static long pfsize(vm, addr, local) +Vmalloc_t* vm; +Void_t* addr; +int local; +#endif +{ + return (*Vmbest->addrf)(vm, addr, local) != 0 ? -1L : (long)PFSIZE(addr); +} + +#if __STD_C +static long pfaddr(Vmalloc_t* vm, Void_t* addr, int local) +#else +static long pfaddr(vm, addr, local) +Vmalloc_t* vm; +Void_t* addr; +int local; +#endif +{ + return (*Vmbest->addrf)(vm, addr, local); +} + +#if __STD_C +static int pfcompact(Vmalloc_t* vm, int local) +#else +static int pfcompact(vm, local) +Vmalloc_t* vm; +int local; +#endif +{ + return (*Vmbest->compactf)(vm, local); +} + +#if __STD_C +static Void_t* pfalign(Vmalloc_t* vm, size_t size, size_t align, int local) +#else +static Void_t* pfalign(vm, size, align, local) +Vmalloc_t* vm; +size_t size; +size_t align; +int local; +#endif +{ + reg size_t s; + reg Void_t *data; + reg char *file; + reg int line, inuse; + reg Void_t *func; + reg Vmdata_t *vd = vm->data; + + VMFLF(vm,file,line,func); + + SETLOCK(vm, local); + + s = (size <= TINYSIZE ? TINYSIZE : ROUND(size,ALIGN)) + PF_EXTRA; + if((data = KPVALIGN(vm,s,align,Vmbest->alignf)) ) + { pfsetinfo(vm,(Vmuchar_t*)data,size,file,line); + + if(!local && (vd->mode&VM_TRACE) && _Vmtrace) + { vm->file = file; vm->line = line; vm->func = func; + (*_Vmtrace)(vm,NIL(Vmuchar_t*),(Vmuchar_t*)data,size,align); + } + } + + CLRLOCK(vm, local); + + return data; +} + +static Vmethod_t _Vmprofile = +{ + pfalloc, + pfresize, + pffree, + pfaddr, + pfsize, + pfcompact, + pfalign, + VM_MTPROFILE +}; + +__DEFINE__(Vmethod_t*,Vmprofile,&_Vmprofile); + +#ifdef NoF +NoF(vmprofile) +#endif + +#endif diff --git a/src/lib/libast/vmalloc/vmregion.c b/src/lib/libast/vmalloc/vmregion.c new file mode 100644 index 0000000..9aa1172 --- /dev/null +++ b/src/lib/libast/vmalloc/vmregion.c @@ -0,0 +1,61 @@ +/*********************************************************************** +* * +* This software is part of the ast package * +* Copyright (c) 1985-2011 AT&T Intellectual Property * +* and is licensed under the * +* Eclipse Public License, Version 1.0 * +* by AT&T Intellectual Property * +* * +* A copy of the License is available at * +* http://www.eclipse.org/org/documents/epl-v10.html * +* (with md5 checksum b35adb5213ca9657e911e9befb180842) * +* * +* Information and Software Systems Research * +* AT&T Research * +* Florham Park NJ * +* * +* Glenn Fowler <gsf@research.att.com> * +* David Korn <dgk@research.att.com> * +* Phong Vo <kpv@research.att.com> * +* * +***********************************************************************/ +#if defined(_UWIN) && defined(_BLD_ast) + +void _STUB_vmregion(){} + +#else + +#include "vmhdr.h" + +/* Return the containing region of an allocated piece of memory. +** Beware: this only works with Vmbest, Vmdebug and Vmprofile. +** +** 10/31/2009: Add handling of shared/persistent memory regions. +** +** Written by Kiem-Phong Vo, kpv@research.att.com, 01/16/94. +*/ +#if __STD_C +Vmalloc_t* vmregion(Void_t* addr) +#else +Vmalloc_t* vmregion(addr) +Void_t* addr; +#endif +{ + Vmalloc_t *vm; + Vmdata_t *vd; + + if(!addr) + return NIL(Vmalloc_t*); + + vd = SEG(BLOCK(addr))->vmdt; + + _vmlock(NIL(Vmalloc_t*), 1); + for(vm = Vmheap; vm; vm = vm->next) + if(vm->data == vd) + break; + _vmlock(NIL(Vmalloc_t*), 0); + + return vm; +} + +#endif diff --git a/src/lib/libast/vmalloc/vmsegment.c b/src/lib/libast/vmalloc/vmsegment.c new file mode 100644 index 0000000..9cf658d --- /dev/null +++ b/src/lib/libast/vmalloc/vmsegment.c @@ -0,0 +1,58 @@ +/*********************************************************************** +* * +* This software is part of the ast package * +* Copyright (c) 1985-2011 AT&T Intellectual Property * +* and is licensed under the * +* Eclipse Public License, Version 1.0 * +* by AT&T Intellectual Property * +* * +* A copy of the License is available at * +* http://www.eclipse.org/org/documents/epl-v10.html * +* (with md5 checksum b35adb5213ca9657e911e9befb180842) * +* * +* Information and Software Systems Research * +* AT&T Research * +* Florham Park NJ * +* * +* Glenn Fowler <gsf@research.att.com> * +* David Korn <dgk@research.att.com> * +* Phong Vo <kpv@research.att.com> * +* * +***********************************************************************/ +#if defined(_UWIN) && defined(_BLD_ast) + +void _STUB_vmsegment(){} + +#else + +#include "vmhdr.h" + +/* Get the segment containing this address +** +** Written by Kiem-Phong Vo, kpv@research.att.com, 02/07/95 +*/ + +#if __STD_C +Void_t* vmsegment(Vmalloc_t* vm, Void_t* addr) +#else +Void_t* vmsegment(vm, addr) +Vmalloc_t* vm; /* region */ +Void_t* addr; /* address */ +#endif +{ + Seg_t *seg; + Vmdata_t *vd = vm->data; + + SETLOCK(vm, 0); + + for(seg = vd->seg; seg; seg = seg->next) + if((Vmuchar_t*)addr >= (Vmuchar_t*)seg->addr && + (Vmuchar_t*)addr < (Vmuchar_t*)seg->baddr ) + break; + + CLRLOCK(vm, 0); + + return seg ? (Void_t*)seg->addr : NIL(Void_t*); +} + +#endif diff --git a/src/lib/libast/vmalloc/vmset.c b/src/lib/libast/vmalloc/vmset.c new file mode 100644 index 0000000..c437692 --- /dev/null +++ b/src/lib/libast/vmalloc/vmset.c @@ -0,0 +1,62 @@ +/*********************************************************************** +* * +* This software is part of the ast package * +* Copyright (c) 1985-2011 AT&T Intellectual Property * +* and is licensed under the * +* Eclipse Public License, Version 1.0 * +* by AT&T Intellectual Property * +* * +* A copy of the License is available at * +* http://www.eclipse.org/org/documents/epl-v10.html * +* (with md5 checksum b35adb5213ca9657e911e9befb180842) * +* * +* Information and Software Systems Research * +* AT&T Research * +* Florham Park NJ * +* * +* Glenn Fowler <gsf@research.att.com> * +* David Korn <dgk@research.att.com> * +* Phong Vo <kpv@research.att.com> * +* * +***********************************************************************/ +#if defined(_UWIN) && defined(_BLD_ast) + +void _STUB_vmset(){} + +#else + +#include "vmhdr.h" + + +/* Set the control flags for a region. +** +** Written by Kiem-Phong Vo, kpv@research.att.com, 01/16/94. +*/ +#if __STD_C +int vmset(reg Vmalloc_t* vm, int flags, int on) +#else +int vmset(vm, flags, on) +reg Vmalloc_t* vm; /* region being worked on */ +int flags; /* flags must be in VM_FLAGS */ +int on; /* !=0 if turning on, else turning off */ +#endif +{ + int mode; + Vmdata_t *vd = vm->data; + + if(flags == 0 && on == 0) + return vd->mode; + + SETLOCK(vm, 0); + + mode = vd->mode; + if(on) + vd->mode |= (flags&VM_FLAGS); + else vd->mode &= ~(flags&VM_FLAGS); + + CLRLOCK(vm, 0); + + return mode; +} + +#endif diff --git a/src/lib/libast/vmalloc/vmstat.c b/src/lib/libast/vmalloc/vmstat.c new file mode 100644 index 0000000..4c6f6a4 --- /dev/null +++ b/src/lib/libast/vmalloc/vmstat.c @@ -0,0 +1,145 @@ +/*********************************************************************** +* * +* This software is part of the ast package * +* Copyright (c) 1985-2012 AT&T Intellectual Property * +* and is licensed under the * +* Eclipse Public License, Version 1.0 * +* by AT&T Intellectual Property * +* * +* A copy of the License is available at * +* http://www.eclipse.org/org/documents/epl-v10.html * +* (with md5 checksum b35adb5213ca9657e911e9befb180842) * +* * +* Information and Software Systems Research * +* AT&T Research * +* Florham Park NJ * +* * +* Glenn Fowler <gsf@research.att.com> * +* David Korn <dgk@research.att.com> * +* Phong Vo <kpv@research.att.com> * +* * +***********************************************************************/ +#if defined(_UWIN) && defined(_BLD_ast) + +void _STUB_vmstat(){} + +#else + +#include "vmhdr.h" + +/* Get statistics from a region. +** +** Written by Kiem-Phong Vo, kpv@research.att.com, 01/16/94. +*/ + +#if __STD_C +int vmstat(Vmalloc_t* vm, Vmstat_t* st) +#else +int vmstat(vm, st) +Vmalloc_t* vm; +Vmstat_t* st; +#endif +{ + size_t s; + Seg_t *seg; + Block_t *b, *endb; + int local; + Vmdata_t *vd; + Void_t *d; + + if(!st) /* just checking lock state of region */ + return (vm ? vm : Vmregion)->data->lock; + + memset(st, 0, sizeof(Vmstat_t)); + + if(!vm) + { /* getting data for malloc */ +#if ( !_std_malloc || !_BLD_ast ) && !_AST_std_malloc + extern int _mallocstat(Vmstat_t*); + return _mallocstat(st); +#else + return -1; +#endif + } + + SETLOCK(vm, 0); + + st->n_busy = st->n_free = 0; + st->s_busy = st->s_free = st->m_busy = st->m_free = 0; + st->n_seg = 0; + st->extent = 0; + + vd = vm->data; + st->mode = vd->mode; + s = 0; + if(vd->mode&VM_MTLAST) + st->n_busy = 0; + else if((vd->mode&VM_MTPOOL) && (s = vd->pool) > 0) + { s = ROUND(s,ALIGN); + for(b = vd->free; b; b = SEGLINK(b)) + st->n_free += 1; + } + + for(seg = vd->seg; seg; seg = seg->next) + { st->n_seg += 1; + st->extent += seg->extent; + + b = SEGBLOCK(seg); + endb = BLOCK(seg->baddr); + + if(vd->mode&(VM_MTDEBUG|VM_MTBEST|VM_MTPROFILE)) + { while(b < endb) + { s = SIZE(b)&~BITS; + if(ISJUNK(SIZE(b)) || !ISBUSY(SIZE(b))) + { if(s > st->m_free) + st->m_free = s; + st->s_free += s; + st->n_free += 1; + } + else /* get the real size */ + { d = DATA(b); + if(vd->mode&VM_MTDEBUG) + s = DBSIZE(DB2DEBUG(d)); + else if(vd->mode&VM_MTPROFILE) + s = PFSIZE(d); + if(s > st->m_busy) + st->m_busy = s; + st->s_busy += s; + st->n_busy += 1; + } + + b = (Block_t*)((Vmuchar_t*)DATA(b) + (SIZE(b)&~BITS) ); + } + /**/ASSERT(st->extent >= (st->s_busy + st->s_free)); + } + else if(vd->mode&VM_MTLAST) + { if((s = seg->free ? (SIZE(seg->free) + sizeof(Head_t)) : 0) > 0) + { st->s_free += s; + st->n_free += 1; + } + if((s = ((char*)endb - (char*)b) - s) > 0) + { st->s_busy += s; + st->n_busy += 1; + } + } + else if((vd->mode&VM_MTPOOL) && s > 0) + { if(seg->free) + st->n_free += (SIZE(seg->free)+sizeof(Head_t))/s; + st->n_busy += ((seg->baddr - (Vmuchar_t*)b) - sizeof(Head_t))/s; + } + } + + if((vd->mode&VM_MTPOOL) && s > 0) + { st->n_busy -= st->n_free; + if(st->n_busy > 0) + st->s_busy = (st->m_busy = vd->pool)*st->n_busy; + if(st->n_free > 0) + st->s_free = (st->m_free = vd->pool)*st->n_free; + } + + CLRLOCK(vm, 0); + + return 0; +} + +#endif diff --git a/src/lib/libast/vmalloc/vmstrdup.c b/src/lib/libast/vmalloc/vmstrdup.c new file mode 100644 index 0000000..2172f6e --- /dev/null +++ b/src/lib/libast/vmalloc/vmstrdup.c @@ -0,0 +1,48 @@ +/*********************************************************************** +* * +* This software is part of the ast package * +* Copyright (c) 1985-2011 AT&T Intellectual Property * +* and is licensed under the * +* Eclipse Public License, Version 1.0 * +* by AT&T Intellectual Property * +* * +* A copy of the License is available at * +* http://www.eclipse.org/org/documents/epl-v10.html * +* (with md5 checksum b35adb5213ca9657e911e9befb180842) * +* * +* Information and Software Systems Research * +* AT&T Research * +* Florham Park NJ * +* * +* Glenn Fowler <gsf@research.att.com> * +* David Korn <dgk@research.att.com> * +* Phong Vo <kpv@research.att.com> * +* * +***********************************************************************/ +#if defined(_UWIN) && defined(_BLD_ast) + +void _STUB_vmstrdup(){} + +#else + +#include "vmhdr.h" + +/* + * return a copy of s using vmalloc + */ + +#if __STD_C +char* vmstrdup(Vmalloc_t* v, register const char* s) +#else +char* vmstrdup(v, s) +Vmalloc_t* v; +register char* s; +#endif +{ + register char* t; + register size_t n; + + return (s && (t = vmalloc(v, n = strlen(s) + 1))) ? (char*)memcpy(t, s, n) : (char*)0; +} + +#endif diff --git a/src/lib/libast/vmalloc/vmtrace.c b/src/lib/libast/vmalloc/vmtrace.c new file mode 100644 index 0000000..22edba3 --- /dev/null +++ b/src/lib/libast/vmalloc/vmtrace.c @@ -0,0 +1,286 @@ +/*********************************************************************** +* * +* This software is part of the ast package * +* Copyright (c) 1985-2011 AT&T Intellectual Property * +* and is licensed under the * +* Eclipse Public License, Version 1.0 * +* by AT&T Intellectual Property * +* * +* A copy of the License is available at * +* http://www.eclipse.org/org/documents/epl-v10.html * +* (with md5 checksum b35adb5213ca9657e911e9befb180842) * +* * +* Information and Software Systems Research * +* AT&T Research * +* Florham Park NJ * +* * +* Glenn Fowler <gsf@research.att.com> * +* David Korn <dgk@research.att.com> * +* Phong Vo <kpv@research.att.com> * +* * +***********************************************************************/ +#if defined(_UWIN) && defined(_BLD_ast) + +void _STUB_vmtrace(){} + +#else + +#include "vmhdr.h" + +/* Turn on tracing for regions +** +** Written by Kiem-Phong Vo, kpv@research.att.com, 01/16/94. +*/ + +static int Trfile = -1; +static char Trbuf[128]; + +#if __STD_C +static char* trstrcpy(char* to, const char* from, int endc) +#else +static char* trstrcpy(to, from, endc) +char* to; +const char* from; +int endc; +#endif +{ reg int n; + + n = strlen(from); + memcpy(to,from,n); + to += n; + if((*to = endc) ) + to += 1; + return to; +} + +/* convert a long value to an ascii representation */ +#if __STD_C +static char* tritoa(Vmulong_t v, int type) +#else +static char* tritoa(v, type) +Vmulong_t v; /* value to convert */ +int type; /* =0 base-16, >0: unsigned base-10, <0: signed base-10 */ +#endif +{ + char* s; + + s = &Trbuf[sizeof(Trbuf) - 1]; + *s-- = '\0'; + + if(type == 0) /* base-16 */ + { reg char* digit = "0123456789abcdef"; + do + { *s-- = digit[v&0xf]; + v >>= 4; + } while(v); + } + else if(type > 0) /* unsigned base-10 */ + { do + { *s-- = (char)('0' + (v%10)); + v /= 10; + } while(v); + } + else /* signed base-10 */ + { int sign = ((long)v < 0); + if(sign) + v = (Vmulong_t)(-((long)v)); + do + { *s-- = (char)('0' + (v%10)); + v /= 10; + } while(v); + if(sign) + *s-- = '-'; + } + + return s+1; +} + +/* generate a trace of some call */ +#if __STD_C +static void trtrace(Vmalloc_t* vm, + Vmuchar_t* oldaddr, Vmuchar_t* newaddr, size_t size, size_t align ) +#else +static void trtrace(vm, oldaddr, newaddr, size, align) +Vmalloc_t* vm; /* region call was made from */ +Vmuchar_t* oldaddr; /* old data address */ +Vmuchar_t* newaddr; /* new data address */ +size_t size; /* size of piece */ +size_t align; /* alignment */ +#endif +{ + char buf[1024], *bufp, *endbuf; + Vmdata_t* vd = vm->data; + const char* file = 0; + int line = 0; + const char* func = 0; + int comma; + int n; + int m; + + int type; +#define SLOP 64 + + if(oldaddr == (Vmuchar_t*)(-1)) /* printing busy blocks */ + { type = 0; + oldaddr = NIL(Vmuchar_t*); + } + else + { type = vd->mode&VM_METHODS; + VMFLF(vm,file,line,func); + } + + if(Trfile < 0) + return; + + bufp = buf; endbuf = buf+sizeof(buf); + bufp = trstrcpy(bufp, tritoa(oldaddr ? VLONG(oldaddr) : 0L, 0), ':'); + bufp = trstrcpy(bufp, tritoa(newaddr ? VLONG(newaddr) : 0L, 0), ':'); + bufp = trstrcpy(bufp, tritoa((Vmulong_t)size, 1), ':'); + bufp = trstrcpy(bufp, tritoa((Vmulong_t)align, 1), ':'); + bufp = trstrcpy(bufp, tritoa(VLONG(vm), 0), ':'); + if(type&VM_MTBEST) + bufp = trstrcpy(bufp, "b", ':'); + else if(type&VM_MTLAST) + bufp = trstrcpy(bufp, "l", ':'); + else if(type&VM_MTPOOL) + bufp = trstrcpy(bufp, "p", ':'); + else if(type&VM_MTPROFILE) + bufp = trstrcpy(bufp, "s", ':'); + else if(type&VM_MTDEBUG) + bufp = trstrcpy(bufp, "d", ':'); + else bufp = trstrcpy(bufp, "u", ':'); + + comma = 0; + if(file && file[0] && line > 0) + { if((bufp + strlen(file) + SLOP) >= endbuf) + { char* f; + for(f = bufp + strlen(file); f > file; --f) + if(f[-1] == '/' || f[-1] == '\\') + break; + file = f; + } + + bufp = trstrcpy(bufp, "file", '='); + n = endbuf - bufp - SLOP - 3; + m = strlen(file); + if(m > n) + { file += (m - n); + bufp = trstrcpy(bufp, "..", '.'); + } + bufp = trstrcpy(bufp, file, ','); + bufp = trstrcpy(bufp, "line", '='); + bufp = trstrcpy(bufp, tritoa((Vmulong_t)line,1), 0); + comma = 1; + } + if(func) + { if(comma) + *bufp++ = ','; + bufp = trstrcpy(bufp, "func", '='); +#if 1 + bufp = trstrcpy(bufp, (const char*)func, 0); +#else + bufp = trstrcpy(bufp, tritoa((Vmulong_t)func,0), 0); +#endif + comma = 1; + } + if(comma) + *bufp++ = ':'; + + *bufp++ = '\n'; + *bufp = '\0'; + + write(Trfile,buf,(bufp-buf)); +} + +#if __STD_C +void _vmmessage(const char* s1, long n1, const char* s2, long n2) +#else +void _vmmessage(s1, n1, s2, n2) +const char* s1; +long n1; +const char* s2; +long n2; +#endif +{ + char buf[1024], *bufp; + + bufp = buf; + bufp = trstrcpy(bufp, "vmalloc", ':'); + if (s1) + { + bufp = trstrcpy(bufp, s1, ':'); + if (n1) + bufp = trstrcpy(bufp, tritoa(n1, 1), ':'); + } + if (s2) + { + bufp = trstrcpy(bufp, s2, ':'); + if (n2) + bufp = trstrcpy(bufp, tritoa(n2, 0), ':'); + } + + bufp = trstrcpy(bufp, tritoa((long)getpid(), 1), ':'); + + *bufp++ = '\n'; + write(2,buf,(bufp-buf)); +} + +#if __STD_C +int vmtrace(int file) +#else +int vmtrace(file) +int file; +#endif +{ + int fd; + + _Vmstrcpy = trstrcpy; + _Vmitoa = tritoa; + _Vmtrace = trtrace; + + fd = Trfile; + Trfile = file; + return fd; +} + +#if __STD_C +int vmtrbusy(Vmalloc_t* vm) +#else +int vmtrbusy(vm) +Vmalloc_t* vm; +#endif +{ + Seg_t* seg; + Vmdata_t* vd = vm->data; + + if(Trfile < 0 || !(vd->mode&(VM_MTBEST|VM_MTDEBUG|VM_MTPROFILE))) + return -1; + + for(seg = vd->seg; seg; seg = seg->next) + { Block_t *b, *endb; + Vmuchar_t* data; + size_t s; + + for(b = SEGBLOCK(seg), endb = BLOCK(seg->baddr); b < endb; ) + { if(ISJUNK(SIZE(b)) || !ISBUSY(SIZE(b))) + continue; + + data = DATA(b); + if(vd->mode&VM_MTDEBUG) + { data = DB2DEBUG(data); + s = DBSIZE(data); + } + else if(vd->mode&VM_MTPROFILE) + s = PFSIZE(data); + else s = SIZE(b)&~BITS; + + trtrace(vm, (Vmuchar_t*)(-1), data, s, 0); + + b = (Block_t*)((Vmuchar_t*)DATA(b) + (SIZE(b)&~BITS) ); + } + } + + return 0; +} + +#endif diff --git a/src/lib/libast/vmalloc/vmwalk.c b/src/lib/libast/vmalloc/vmwalk.c new file mode 100644 index 0000000..d7a8e36 --- /dev/null +++ b/src/lib/libast/vmalloc/vmwalk.c @@ -0,0 +1,69 @@ +/*********************************************************************** +* * +* This software is part of the ast package * +* Copyright (c) 1985-2011 AT&T Intellectual Property * +* and is licensed under the * +* Eclipse Public License, Version 1.0 * +* by AT&T Intellectual Property * +* * +* A copy of the License is available at * +* http://www.eclipse.org/org/documents/epl-v10.html * +* (with md5 checksum b35adb5213ca9657e911e9befb180842) * +* * +* Information and Software Systems Research * +* AT&T Research * +* Florham Park NJ * +* * +* Glenn Fowler <gsf@research.att.com> * +* David Korn <dgk@research.att.com> * +* Phong Vo <kpv@research.att.com> * +* * +***********************************************************************/ +#if defined(_UWIN) && defined(_BLD_ast) + +void _STUB_vmwalk(){} + +#else + +#include "vmhdr.h" + +/* Walks all segments created in region(s) +** +** Written by Kiem-Phong Vo, kpv@research.att.com (02/08/96) +*/ + +#if __STD_C +int vmwalk(Vmalloc_t* vm, int(*segf)(Vmalloc_t*, Void_t*, size_t, Vmdisc_t*, Void_t*), Void_t* handle ) +#else +int vmwalk(vm, segf, handle) +Vmalloc_t* vm; +int(* segf)(/* Vmalloc_t*, Void_t*, size_t, Vmdisc_t*, Void_t* */); +Void_t* handle; +#endif +{ + reg Seg_t *seg; + reg int rv = 0; + + if(!vm) + { _vmlock(NIL(Vmalloc_t*), 1); + for(vm = Vmheap; vm; vm = vm->next) + { SETLOCK(vm, 0); + for(seg = vm->data->seg; seg; seg = seg->next) + if((rv = (*segf)(vm, seg->addr, seg->extent, vm->disc, handle)) < 0 ) + break; + CLRLOCK(vm, 0); + } + _vmlock(NIL(Vmalloc_t*), 0); + } + else + { SETLOCK(vm, 0); + for(seg = vm->data->seg; seg; seg = seg->next) + if((rv = (*segf)(vm, seg->addr, seg->extent, vm->disc, handle)) < 0 ) + break; + CLRLOCK(vm, 0); + } + + return rv; +} + +#endif |