diff options
author | Rob Pike <r@golang.org> | 2009-06-09 09:53:44 -0700 |
---|---|---|
committer | Rob Pike <r@golang.org> | 2009-06-09 09:53:44 -0700 |
commit | 7249ea4df2b4f12a4e7ed446f270cea87e4ffd34 (patch) | |
tree | 7032a11d0cac2ae4d3e90f7a189b575b5a50f848 /src/pkg/runtime/malloc.c | |
parent | acf6ef7a82b3fe61516a1bac4563706552bdf078 (diff) | |
download | golang-7249ea4df2b4f12a4e7ed446f270cea87e4ffd34.tar.gz |
mv src/lib to src/pkg
tests: all.bash passes, gobuild still works, godoc still works.
R=rsc
OCL=30096
CL=30102
Diffstat (limited to 'src/pkg/runtime/malloc.c')
-rw-r--r-- | src/pkg/runtime/malloc.c | 308 |
1 files changed, 308 insertions, 0 deletions
diff --git a/src/pkg/runtime/malloc.c b/src/pkg/runtime/malloc.c new file mode 100644 index 000000000..81cdfb300 --- /dev/null +++ b/src/pkg/runtime/malloc.c @@ -0,0 +1,308 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// See malloc.h for overview. +// +// TODO(rsc): double-check stats. +// TODO(rsc): solve "stack overflow during malloc" problem. + +#include "runtime.h" +#include "malloc.h" +#include "defs.h" + +MHeap mheap; +MStats mstats; + +// Allocate an object of at least size bytes. +// Small objects are allocated from the per-thread cache's free lists. +// Large objects (> 32 kB) are allocated straight from the heap. +void* +malloc(uintptr size) +{ + int32 sizeclass; + MCache *c; + uintptr npages; + MSpan *s; + void *v; + uint32 *ref; + + if(m->mallocing) + throw("malloc/free - deadlock"); + m->mallocing = 1; + + if(size == 0) + size = 1; + + if(size <= MaxSmallSize) { + // Allocate from mcache free lists. + sizeclass = SizeToClass(size); + size = class_to_size[sizeclass]; + c = m->mcache; + v = MCache_Alloc(c, sizeclass, size); + if(v == nil) + throw("out of memory"); + mstats.alloc += size; + } else { + // TODO(rsc): Report tracebacks for very large allocations. + + // Allocate directly from heap. + npages = size >> PageShift; + if((size & PageMask) != 0) + npages++; + s = MHeap_Alloc(&mheap, npages, 0); + if(s == nil) + throw("out of memory"); + mstats.alloc += npages<<PageShift; + v = (void*)(s->start << PageShift); + } + + // setup for mark sweep + if(!mlookup(v, nil, nil, &ref)) { + printf("malloc %D; mlookup failed\n", (uint64)size); + throw("malloc mlookup"); + } + *ref = RefNone; + + m->mallocing = 0; + return v; +} + +void* +mallocgc(uintptr size) +{ + void *v; + + v = malloc(size); + if(mstats.inuse_pages > mstats.next_gc) + gc(0); + return v; +} + +// Free the object whose base pointer is v. +void +free(void *v) +{ + int32 sizeclass, size; + uintptr page, tmp; + MSpan *s; + MCache *c; + uint32 *ref; + + if(v == nil) + return; + + if(m->mallocing) + throw("malloc/free - deadlock"); + m->mallocing = 1; + + if(!mlookup(v, nil, nil, &ref)) + throw("free mlookup"); + *ref = RefFree; + + // Find size class for v. + page = (uintptr)v >> PageShift; + sizeclass = MHeapMapCache_GET(&mheap.mapcache, page, tmp); + if(sizeclass == 0) { + // Missed in cache. + s = MHeap_Lookup(&mheap, page); + if(s == nil) + throw("free - invalid pointer"); + sizeclass = s->sizeclass; + if(sizeclass == 0) { + // Large object. + mstats.alloc -= s->npages<<PageShift; + sys_memclr(v, s->npages<<PageShift); + MHeap_Free(&mheap, s); + goto out; + } + MHeapMapCache_SET(&mheap.mapcache, page, sizeclass); + } + + // Small object. + c = m->mcache; + size = class_to_size[sizeclass]; + sys_memclr(v, size); + mstats.alloc -= size; + MCache_Free(c, v, sizeclass, size); + +out: + m->mallocing = 0; +} + +int32 +mlookup(void *v, byte **base, uintptr *size, uint32 **ref) +{ + uintptr n, nobj, i; + byte *p; + MSpan *s; + + s = MHeap_LookupMaybe(&mheap, (uintptr)v>>PageShift); + if(s == nil) { + if(base) + *base = nil; + if(size) + *size = 0; + if(ref) + *ref = 0; + return 0; + } + + p = (byte*)((uintptr)s->start<<PageShift); + if(s->sizeclass == 0) { + // Large object. + if(base) + *base = p; + if(size) + *size = s->npages<<PageShift; + if(ref) + *ref = &s->gcref0; + return 1; + } + + if((byte*)v >= (byte*)s->gcref) { + // pointers into the gc ref counts + // do not count as pointers. + return 0; + } + + n = class_to_size[s->sizeclass]; + i = ((byte*)v - p)/n; + if(base) + *base = p + i*n; + if(size) + *size = n; + nobj = (s->npages << PageShift) / (n + RefcountOverhead); + if((byte*)s->gcref < p || (byte*)(s->gcref+nobj) > p+(s->npages<<PageShift)) { + printf("odd span state=%d span=%p base=%p sizeclass=%d n=%D size=%D npages=%D\n", + s->state, s, p, s->sizeclass, (uint64)nobj, (uint64)n, (uint64)s->npages); + printf("s->base sizeclass %d v=%p base=%p gcref=%p blocksize=%D nobj=%D size=%D end=%p end=%p\n", + s->sizeclass, v, p, s->gcref, (uint64)s->npages<<PageShift, + (uint64)nobj, (uint64)n, s->gcref + nobj, p+(s->npages<<PageShift)); + throw("bad gcref"); + } + if(ref) + *ref = &s->gcref[i]; + + return 1; +} + +MCache* +allocmcache(void) +{ + return FixAlloc_Alloc(&mheap.cachealloc); +} + +void +mallocinit(void) +{ + InitSizes(); + MHeap_Init(&mheap, SysAlloc); + m->mcache = allocmcache(); + + // See if it works. + free(malloc(1)); +} + +void* +SysAlloc(uintptr n) +{ + mstats.sys += n; + return sys_mmap(nil, n, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_ANON|MAP_PRIVATE, -1, 0); +} + +void +SysUnused(void *v, uintptr n) +{ + USED(v); + USED(n); + // TODO(rsc): call madvise MADV_DONTNEED +} + +void +SysFree(void *v, uintptr n) +{ + USED(v); + USED(n); + // TODO(rsc): call munmap +} + +// Runtime stubs. + +extern void *oldmal(uint32); + +void* +mal(uint32 n) +{ +//return oldmal(n); + void *v; + + v = mallocgc(n); + + if(0) { + byte *p; + uint32 i; + p = v; + for(i=0; i<n; i++) { + if(p[i] != 0) { + printf("mal %d => %p: byte %d is non-zero\n", n, v, i); + throw("mal"); + } + } + } + +//printf("mal %d %p\n", n, v); // |checkmal to check for overlapping returns. + return v; +} + +// Stack allocator uses malloc/free most of the time, +// but if we're in the middle of malloc and need stack, +// we have to do something else to avoid deadlock. +// In that case, we fall back on a fixed-size free-list +// allocator, assuming that inside malloc all the stack +// frames are small, so that all the stack allocations +// will be a single size, the minimum (right now, 5k). +struct { + Lock; + FixAlloc; +} stacks; + +void* +stackalloc(uint32 n) +{ + void *v; + uint32 *ref; + +//return oldmal(n); + if(m->mallocing) { + lock(&stacks); + if(stacks.size == 0) + FixAlloc_Init(&stacks, n, SysAlloc, nil, nil); + if(stacks.size != n) { + printf("stackalloc: in malloc, size=%D want %d", (uint64)stacks.size, n); + throw("stackalloc"); + } + v = FixAlloc_Alloc(&stacks); + unlock(&stacks); + return v; + } + v = malloc(n); + if(!mlookup(v, nil, nil, &ref)) + throw("stackalloc mlookup"); + *ref = RefStack; + return v; +} + +void +stackfree(void *v) +{ +//return; + + if(m->mallocing) { + lock(&stacks); + FixAlloc_Free(&stacks, v); + unlock(&stacks); + return; + } + free(v); +} |