diff options
-rw-r--r-- | src/lib/malloc.go | 15 | ||||
-rw-r--r-- | src/runtime/Makefile | 1 | ||||
-rw-r--r-- | src/runtime/malloc.c | 73 | ||||
-rw-r--r-- | src/runtime/malloc.h | 28 | ||||
-rw-r--r-- | src/runtime/malloc_go.cgo | 7 | ||||
-rw-r--r-- | src/runtime/mcentral.c | 12 | ||||
-rw-r--r-- | src/runtime/mgc0.c | 246 | ||||
-rw-r--r-- | src/runtime/mheap.c | 53 | ||||
-rw-r--r-- | src/runtime/msize.c | 7 | ||||
-rw-r--r-- | src/runtime/proc.c | 9 | ||||
-rw-r--r-- | src/runtime/rt1_amd64_darwin.c | 3 | ||||
-rw-r--r-- | src/runtime/rt1_amd64_linux.c | 4 | ||||
-rw-r--r-- | src/runtime/runtime.c | 24 | ||||
-rw-r--r-- | src/runtime/runtime.h | 4 | ||||
-rw-r--r-- | test/gc.go | 25 | ||||
-rw-r--r-- | test/gc1.go | 13 |
16 files changed, 477 insertions, 47 deletions
diff --git a/src/lib/malloc.go b/src/lib/malloc.go index e3896e94f..8e4397a0b 100644 --- a/src/lib/malloc.go +++ b/src/lib/malloc.go @@ -11,9 +11,14 @@ package malloc type Stats struct { Alloc uint64; Sys uint64; -}; + Stacks uint64; + InusePages uint64; + NextGC uint64; + EnableGC bool; +} -func Alloc(uint64) *byte; -func Free(*byte); -func GetStats() *Stats; -func Lookup(*byte) (*byte, uintptr); +func Alloc(uint64) *byte +func Free(*byte) +func GetStats() *Stats +func Lookup(*byte) (*byte, uintptr) +func GC() diff --git a/src/runtime/Makefile b/src/runtime/Makefile index 03633a61d..e9c895ab6 100644 --- a/src/runtime/Makefile +++ b/src/runtime/Makefile @@ -29,6 +29,7 @@ LIBOFILES=\ mcentral.$O\ mem.$O\ mfixalloc.$O\ + mgc0.$O\ mheap.$O\ msize.$O\ print.$O\ diff --git a/src/runtime/malloc.c b/src/runtime/malloc.c index 258291da2..e518b5667 100644 --- a/src/runtime/malloc.c +++ b/src/runtime/malloc.c @@ -24,6 +24,7 @@ malloc(uintptr size) uintptr npages; MSpan *s; void *v; + uint32 *ref; if(m->mallocing) throw("malloc - deadlock"); @@ -55,10 +56,25 @@ malloc(uintptr size) v = (void*)(s->start << PageShift); } + // setup for mark sweep + mlookup(v, nil, nil, &ref); + *ref = RefNone; + m->mallocing = 0; return v; } +void* +mallocgc(uintptr size) +{ + void *v; + + v = malloc(size); + if(mstats.inuse_pages > mstats.next_gc) + gc(0); + return v; +} + // Free the object whose base pointer is v. void free(void *v) @@ -67,10 +83,14 @@ free(void *v) uintptr page, tmp; MSpan *s; MCache *c; + uint32 *ref; if(v == nil) return; + mlookup(v, nil, nil, &ref); + *ref = RefFree; + // Find size class for v. page = (uintptr)v >> PageShift; sizeclass = MHeapMapCache_GET(&mheap.mapcache, page, tmp); @@ -98,32 +118,51 @@ free(void *v) MCache_Free(c, v, sizeclass, size); } -void -mlookup(void *v, byte **base, uintptr *size) +int32 +mlookup(void *v, byte **base, uintptr *size, uint32 **ref) { - uintptr n, off; + uintptr n, i; byte *p; MSpan *s; - s = MHeap_Lookup(&mheap, (uintptr)v>>PageShift); + s = MHeap_LookupMaybe(&mheap, (uintptr)v>>PageShift); if(s == nil) { - *base = nil; - *size = 0; - return; + if(base) + *base = nil; + if(size) + *size = 0; + if(ref) + *ref = 0; + return 0; } p = (byte*)((uintptr)s->start<<PageShift); if(s->sizeclass == 0) { // Large object. - *base = p; - *size = s->npages<<PageShift; - return; + if(base) + *base = p; + if(size) + *size = s->npages<<PageShift; + if(ref) + *ref = &s->gcref0; + return 1; } n = class_to_size[s->sizeclass]; - off = ((byte*)v - p)/n * n; - *base = p+off; - *size = n; + i = ((byte*)v - p)/n; + if(base) + *base = p + i*n; + if(size) + *size = n; + if((byte*)s->gcref < p || (byte*)s->gcref >= p+(s->npages<<PageShift)) { + printf("s->base sizeclass %d %p gcref %p block %D\n", + s->sizeclass, p, s->gcref, s->npages<<PageShift); + throw("bad gcref"); + } + if(ref) + *ref = &s->gcref[i]; + + return 1; } MCache* @@ -193,7 +232,7 @@ mal(uint32 n) //return oldmal(n); void *v; - v = malloc(n); + v = mallocgc(n); if(0) { byte *p; @@ -227,6 +266,7 @@ void* stackalloc(uint32 n) { void *v; + uint32 *ref; //return oldmal(n); if(m->mallocing) { @@ -241,7 +281,10 @@ stackalloc(uint32 n) unlock(&stacks); return v; } - return malloc(n); + v = malloc(n); + mlookup(v, nil, nil, &ref); + *ref = RefStack; + return v; } void diff --git a/src/runtime/malloc.h b/src/runtime/malloc.h index ca05f0191..1da9f980d 100644 --- a/src/runtime/malloc.h +++ b/src/runtime/malloc.h @@ -91,7 +91,7 @@ typedef uintptr PageID; // address >> PageShift enum { // Tunable constants. - NumSizeClasses = 133, // Number of size classes (must match msize.c) + NumSizeClasses = 150, // Number of size classes (must match msize.c) MaxSmallSize = 32<<10, FixAllocChunk = 128<<10, // Chunk size for FixAlloc @@ -152,6 +152,9 @@ struct MStats uint64 alloc; uint64 sys; uint64 stacks; + uint64 inuse_pages; // protected by mheap.Lock + uint64 next_gc; // protected by mheap.Lock + bool enablegc; }; extern MStats mstats; @@ -212,6 +215,10 @@ struct MSpan uint32 ref; // number of allocated objects in this span uint32 sizeclass; // size class uint32 state; // MSpanInUse or MSpanFree + union { + uint32 *gcref; // sizeclass > 0 + uint32 gcref0; // sizeclass == 0 + }; }; void MSpan_Init(MSpan *span, PageID start, uintptr npages); @@ -292,6 +299,7 @@ struct MHeapMapNode3 void MHeapMap_Init(MHeapMap *m, void *(*allocator)(uintptr)); bool MHeapMap_Preallocate(MHeapMap *m, PageID k, uintptr npages); MSpan* MHeapMap_Get(MHeapMap *m, PageID k); +MSpan* MHeapMap_GetMaybe(MHeapMap *m, PageID k); void MHeapMap_Set(MHeapMap *m, PageID k, MSpan *v); @@ -364,7 +372,19 @@ void MHeap_Init(MHeap *h, void *(*allocator)(uintptr)); MSpan* MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass); void MHeap_Free(MHeap *h, MSpan *s); MSpan* MHeap_Lookup(MHeap *h, PageID p); +MSpan* MHeap_LookupMaybe(MHeap *h, PageID p); + +int32 mlookup(void *v, byte **base, uintptr *size, uint32 **ref); +void gc(int32 force); + +enum +{ + RefcountOverhead = 4, // one uint32 per object + + RefFree = 0, // must be zero + RefManual, // manual allocation - don't free + RefStack, // stack segment - don't free and don't scan for pointers + RefNone, // no references + RefSome, // some references +}; -void* malloc(uintptr size); -void free(void *v); -void mlookup(void *v, byte **base, uintptr *size); diff --git a/src/runtime/malloc_go.cgo b/src/runtime/malloc_go.cgo index 7c55c1078..6dcdaece2 100644 --- a/src/runtime/malloc_go.cgo +++ b/src/runtime/malloc_go.cgo @@ -15,9 +15,14 @@ func Free(p *byte) { } func Lookup(p *byte) (base *byte, size uintptr) { - mlookup(p, &base, &size); + mlookup(p, &base, &size, nil); } func GetStats() (s *MStats) { s = &mstats; } + +func GC() { + gc(1); +} + diff --git a/src/runtime/mcentral.c b/src/runtime/mcentral.c index 5b07faf11..5c9f720c0 100644 --- a/src/runtime/mcentral.c +++ b/src/runtime/mcentral.c @@ -157,9 +157,9 @@ MCentral_Free(MCentral *c, void *v) static bool MCentral_Grow(MCentral *c) { - int32 n, npages, size; + int32 i, n, npages, size; MLink **tailp, *v; - byte *p, *end; + byte *p; MSpan *s; unlock(c); @@ -174,14 +174,14 @@ MCentral_Grow(MCentral *c) // Carve span into sequence of blocks. tailp = &s->freelist; p = (byte*)(s->start << PageShift); - end = p + (npages << PageShift); size = class_to_size[c->sizeclass]; - n = 0; - for(; p + size <= end; p += size) { + n = (npages << PageShift) / (size + RefcountOverhead); + s->gcref = (uint32*)(p + size*n); + for(i=0; i<n; i++) { v = (MLink*)p; *tailp = v; tailp = &v->next; - n++; + p += size; } *tailp = nil; diff --git a/src/runtime/mgc0.c b/src/runtime/mgc0.c new file mode 100644 index 000000000..3584bf75c --- /dev/null +++ b/src/runtime/mgc0.c @@ -0,0 +1,246 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Garbage collector -- step 0. +// +// Stop the world, mark and sweep garbage collector. +// NOT INTENDED FOR PRODUCTION USE. +// +// A mark and sweep collector provides a way to exercise +// and test the memory allocator and the stack walking machinery +// without also needing to get reference counting +// exactly right. + +#include "runtime.h" +#include "malloc.h" + +enum { + Debug = 0 +}; + +extern byte etext[]; +extern byte end[]; + +static void +scanblock(int32 depth, byte *b, int64 n) +{ + int32 off; + void *obj; + uintptr size; + uint32 *ref; + void **vp; + int64 i; + + if(Debug) + printf("%d scanblock %p %D\n", depth, b, n); + off = (uint32)(uintptr)b & 7; + if(off) { + b += 8 - off; + n -= 8 - off; + } + + vp = (void**)b; + n /= 8; + for(i=0; i<n; i++) { + if(mlookup(vp[i], &obj, &size, &ref)) { + if(*ref == RefFree || *ref == RefStack) + continue; + if(*ref == RefNone) { + if(Debug) + printf("%d found at %p: ", depth, &vp[i]); + *ref = RefSome; + scanblock(depth+1, obj, size); + } + } + } +} + +static void +scanstack(G *g) +{ + Stktop *stk; + byte *sp; + + sp = g->sched.SP; + stk = (Stktop*)g->stackbase; + while(stk) { + scanblock(0, sp, (byte*)stk - sp); + sp = stk->oldsp; + stk = (Stktop*)stk->oldbase; + } +} + +static void +mark(void) +{ + G *gp; + + // mark data+bss + scanblock(0, etext, end - etext); + + // mark stacks + for(gp=allg; gp!=nil; gp=gp->alllink) { + switch(gp->status){ + default: + printf("unexpected G.status %d\n", gp->status); + throw("mark - bad status"); + case Gdead: + break; + case Grunning: + if(gp != g) + throw("mark - world not stopped"); + scanstack(gp); + break; + case Grunnable: + case Gsyscall: + case Gwaiting: + scanstack(gp); + break; + } + } +} + +static void +sweepspan(MSpan *s) +{ + int32 i, n, npages, size; + byte *p; + + if(s->state != MSpanInUse) + return; + + p = (byte*)(s->start << PageShift); + if(s->sizeclass == 0) { + // Large block. + switch(s->gcref0) { + default: + throw("bad 'ref count'"); + case RefFree: + case RefManual: + case RefStack: + break; + case RefNone: + if(Debug) + printf("free %D at %p\n", s->npages<<PageShift, p); + free(p); + break; + case RefSome: + s->gcref0 = RefNone; // set up for next mark phase + break; + } + return; + } + + // Chunk full of small blocks. + // Must match computation in MCentral_Grow. + size = class_to_size[s->sizeclass]; + npages = class_to_allocnpages[s->sizeclass]; + n = (npages << PageShift) / (size + RefcountOverhead); + for(i=0; i<n; i++) { + switch(s->gcref[i]) { + default: + throw("bad 'ref count'"); + case RefFree: + case RefManual: + case RefStack: + break; + case RefNone: + if(Debug) + printf("free %d at %p\n", size, p+i*size); + free(p + i*size); + break; + case RefSome: + s->gcref[i] = RefNone; // set up for next mark phase + break; + } + } +} + +static void +sweepspanlist(MSpan *list) +{ + MSpan *s, *next; + + for(s=list->next; s != list; s=next) { + next = s->next; // in case s gets moved + sweepspan(s); + } +} + +static void +sweep(void) +{ + int32 i; + + // Sweep all the spans. + + for(i=0; i<nelem(mheap.central); i++) { + // Sweep nonempty (has some free blocks available) + // before sweeping empty (is completely allocated), + // because finding something to free in a span from empty + // will move it into nonempty, and we must not sweep + // the same span twice. + sweepspanlist(&mheap.central[i].nonempty); + sweepspanlist(&mheap.central[i].empty); + } +} + +// Semaphore, not Lock, so that the goroutine +// reschedules when there is contention rather +// than spinning. +static uint32 gcsema = 1; + +// Initialized from $GOGC. GOGC=off means no gc. +// +// Next gc is after we've allocated an extra amount of +// memory proportional to the amount already in use. +// If gcpercent=100 and we're using 4M, we'll gc again +// when we get to 8M. This keeps the gc cost in linear +// proportion to the allocation cost. Adjusting gcpercent +// just changes the linear constant (and also the amount of +// extra memory used). +static int32 gcpercent = -2; + +void +gc(int32 force) +{ + byte *p; + + // The gc is turned off (via enablegc) until + // the bootstrap has completed. + // Also, malloc gets called in the guts + // of a number of libraries that might be + // holding locks. To avoid priority inversion + // problems, don't bother trying to run gc + // while holding a lock. The next mallocgc + // without a lock will do the gc instead. + if(!mstats.enablegc || m->locks > 0 || panicking) + return; + + if(gcpercent == -2) { // first time through + p = getenv("GOGC"); + if(p == nil || p[0] == '\0') + gcpercent = 100; + else if(strcmp(p, (byte*)"off") == 0) + gcpercent = -1; + else + gcpercent = atoi(p); + } + if(gcpercent < 0) + return; + + semacquire(&gcsema); + gosave(&g->sched); // update g's stack pointer for scanstack + stoptheworld(); + if(mheap.Lock.key != 0) + throw("mheap locked during gc"); + if(force || mstats.inuse_pages >= mstats.next_gc) { + mark(); + sweep(); + mstats.next_gc = mstats.inuse_pages+mstats.inuse_pages*gcpercent/100; + } + starttheworld(); + gosave(&g->sched); // update g's stack pointer for debugging + semrelease(&gcsema); +} diff --git a/src/runtime/mheap.c b/src/runtime/mheap.c index 9f8e91614..d1b504ed2 100644 --- a/src/runtime/mheap.c +++ b/src/runtime/mheap.c @@ -47,6 +47,8 @@ MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass) lock(h); s = MHeap_AllocLocked(h, npage, sizeclass); + if(s != nil) + mstats.inuse_pages += npage; unlock(h); return s; } @@ -108,6 +110,11 @@ HaveSpan: for(n=0; n<npage; n++) if(MHeapMapCache_GET(&h->mapcache, s->start+n, tmp) != 0) MHeapMapCache_SET(&h->mapcache, s->start+n, 0); + + // Need a list of large allocated spans. + // They have sizeclass == 0, so use heap.central[0].empty, + // since central[0] is otherwise unused. + MSpanList_Insert(&h->central[0].empty, s); } else { // Save cache entries for this span. // If there's a size class, there aren't that many pages. @@ -191,17 +198,38 @@ MHeap_Grow(MHeap *h, uintptr npage) } // Look up the span at the given page number. +// Page number is guaranteed to be in map +// and is guaranteed to be start or end of span. MSpan* MHeap_Lookup(MHeap *h, PageID p) { return MHeapMap_Get(&h->map, p); } +// Look up the span at the given page number. +// Page number is *not* guaranteed to be in map +// and may be anywhere in the span. +// Map entries for the middle of a span are only +// valid for allocated spans. Free spans may have +// other garbage in their middles, so we have to +// check for that. +MSpan* +MHeap_LookupMaybe(MHeap *h, PageID p) +{ + MSpan *s; + + s = MHeapMap_GetMaybe(&h->map, p); + if(s == nil || p < s->start || p - s->start >= s->npages) + return nil; + return s; +} + // Free the span back into the heap. void MHeap_Free(MHeap *h, MSpan *s) { lock(h); + mstats.inuse_pages -= s->npages; MHeap_FreeLocked(h, s); unlock(h); } @@ -266,6 +294,31 @@ MHeapMap_Get(MHeapMap *m, PageID k) return m->p[i1]->p[i2]->s[i3]; } +MSpan* +MHeapMap_GetMaybe(MHeapMap *m, PageID k) +{ + int32 i1, i2, i3; + MHeapMapNode2 *p2; + MHeapMapNode3 *p3; + + i3 = k & MHeapMap_Level3Mask; + k >>= MHeapMap_Level3Bits; + i2 = k & MHeapMap_Level2Mask; + k >>= MHeapMap_Level2Bits; + i1 = k & MHeapMap_Level1Mask; + k >>= MHeapMap_Level1Bits; + if(k != 0) + throw("MHeapMap_Get"); + + p2 = m->p[i1]; + if(p2 == nil) + return nil; + p3 = p2->p[i2]; + if(p3 == nil) + return nil; + return p3->s[i3]; +} + void MHeapMap_Set(MHeapMap *m, PageID k, MSpan *s) { diff --git a/src/runtime/msize.c b/src/runtime/msize.c index ff1ca7200..62d5c3ad9 100644 --- a/src/runtime/msize.c +++ b/src/runtime/msize.c @@ -57,7 +57,7 @@ SizeToClass(int32 size) void InitSizes(void) { - int32 align, sizeclass, size, nextsize, n; + int32 align, sizeclass, size, osize, nextsize, n; uint32 i; uintptr allocsize, npages; @@ -81,7 +81,8 @@ InitSizes(void) // the leftover is less than 1/8 of the total, // so wasted space is at most 12.5%. allocsize = PageSize; - while(allocsize%size > (PageSize/8)) + osize = size + RefcountOverhead; + while(allocsize%osize > (PageSize/8)) allocsize += PageSize; npages = allocsize >> PageShift; @@ -92,7 +93,7 @@ InitSizes(void) // different sizes. if(sizeclass > 1 && npages == class_to_allocnpages[sizeclass-1] - && allocsize/size == allocsize/class_to_size[sizeclass-1]) { + && allocsize/osize == allocsize/(class_to_size[sizeclass-1]+RefcountOverhead)) { class_to_size[sizeclass-1] = size; continue; } diff --git a/src/runtime/proc.c b/src/runtime/proc.c index 943792f49..7435830ff 100644 --- a/src/runtime/proc.c +++ b/src/runtime/proc.c @@ -3,7 +3,7 @@ // license that can be found in the LICENSE file. #include "runtime.h" -#include "malloc.h" /* so that acid generated from proc.c includes malloc data structures */ +#include "malloc.h" typedef struct Sched Sched; @@ -118,6 +118,7 @@ initdone(void) { // Let's go. sched.predawn = 0; + mstats.enablegc = 1; // If main·init_function started other goroutines, // kick off new ms to handle them, like ready @@ -146,7 +147,7 @@ malg(int32 stacksize) byte *stk; // 160 is the slop amount known to the stack growth code - g = mal(sizeof(G)); + g = malloc(sizeof(G)); stk = stackalloc(160 + stacksize); g->stack0 = stk; g->stackguard = stk + 160; @@ -444,7 +445,7 @@ matchmg(void) m->nextg = g; notewakeup(&m->havenextg); }else{ - m = mal(sizeof(M)); + m = malloc(sizeof(M)); m->g0 = malg(8192); m->nextg = g; m->id = sched.mcount++; @@ -525,6 +526,8 @@ scheduler(void) void sys·Gosched(void) { + if(g == m->g0) + throw("gosched of g0"); if(gosave(&g->sched) == 0){ g = m->g0; gogo(&m->sched); diff --git a/src/runtime/rt1_amd64_darwin.c b/src/runtime/rt1_amd64_darwin.c index 453bd519c..b61475672 100644 --- a/src/runtime/rt1_amd64_darwin.c +++ b/src/runtime/rt1_amd64_darwin.c @@ -130,6 +130,7 @@ sighandler(int32 sig, struct siginfo *info, void *context) { if(panicking) // traceback already printed sys_Exit(2); + panicking = 1; _STRUCT_MCONTEXT64 *uc_mcontext = get_uc_mcontext(context); _STRUCT_X86_THREAD_STATE64 *ss = get___ss(uc_mcontext); @@ -282,11 +283,13 @@ lock(Lock *l) if(xadd(&l->key, 1) > 1) // someone else has it; wait mach_semacquire(l->sema); + m->locks++; } void unlock(Lock *l) { + m->locks--; if(xadd(&l->key, -1) > 0) // someone else is waiting mach_semrelease(l->sema); } diff --git a/src/runtime/rt1_amd64_linux.c b/src/runtime/rt1_amd64_linux.c index 74032e4c1..c0c203805 100644 --- a/src/runtime/rt1_amd64_linux.c +++ b/src/runtime/rt1_amd64_linux.c @@ -306,6 +306,8 @@ lock(Lock *l) { uint32 v; + m->locks++; + again: v = l->key; if((v&1) == 0){ @@ -349,6 +351,8 @@ unlock(Lock *l) { uint32 v; + m->locks--; + // Atomically get value and clear lock bit. again: v = l->key; diff --git a/src/runtime/runtime.c b/src/runtime/runtime.c index 00e3638ab..a972b753f 100644 --- a/src/runtime/runtime.c +++ b/src/runtime/runtime.c @@ -147,21 +147,25 @@ args(int32 c, uint8 **v) void goargs(void) { - string* goargv; - string* envv; + string *gargv; + string *genvv; int32 i, envc; - goargv = (string*)argv; - for (i=0; i<argc; i++) - goargv[i] = gostring(argv[i]); - sys·Args.array = (byte*)argv; + for(envc=0; argv[argc+1+envc] != 0; envc++) + ; + + gargv = malloc(argc*sizeof gargv[0]); + genvv = malloc(envc*sizeof genvv[0]); + + for(i=0; i<argc; i++) + gargv[i] = gostring(argv[i]); + sys·Args.array = (byte*)gargv; sys·Args.nel = argc; sys·Args.cap = argc; - envv = goargv + argc + 1; // skip 0 at end of argv - for (envc = 0; envv[envc] != 0; envc++) - envv[envc] = gostring((uint8*)envv[envc]); - sys·Envs.array = (byte*)envv; + for(i=0; i<envc; i++) + genvv[i] = gostring(argv[argc+1+i]); + sys·Envs.array = (byte*)genvv; sys·Envs.nel = envc; sys·Envs.cap = envc; } diff --git a/src/runtime/runtime.h b/src/runtime/runtime.h index 170657d86..5552c9e94 100644 --- a/src/runtime/runtime.h +++ b/src/runtime/runtime.h @@ -162,6 +162,7 @@ struct M int32 siz2; int32 id; int32 mallocing; + int32 locks; Note havenextg; G* nextg; M* schedlink; @@ -304,6 +305,9 @@ bool ifaceeq(Iface, Iface); uint64 ifacehash(Iface); uint64 nohash(uint32, void*); uint32 noequal(uint32, void*, void*); +void* malloc(uintptr size); +void* mallocgc(uintptr size); +void free(void *v); #pragma varargck argpos printf 1 diff --git a/test/gc.go b/test/gc.go new file mode 100644 index 000000000..df9d05e18 --- /dev/null +++ b/test/gc.go @@ -0,0 +1,25 @@ +// $G $F.go && $L $F.$A && ./$A.out + +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import "malloc" + +func mk2() { + b := new([10000]byte); +// println(b, "stored at", &b); +} + +func mk1() { + mk2(); +} + +func main() { + for i := 0; i < 10; i++ { + mk1(); + malloc.GC(); + } +} diff --git a/test/gc1.go b/test/gc1.go new file mode 100644 index 000000000..d746e9c62 --- /dev/null +++ b/test/gc1.go @@ -0,0 +1,13 @@ +// $G $F.go && $L $F.$A && ./$A.out + +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +func main() { + for i := 0; i < 1000000; i++ { + x := new([100]byte); + } +} |