summaryrefslogtreecommitdiff
path: root/src/pkg/runtime/mgc0.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/pkg/runtime/mgc0.c')
-rw-r--r--src/pkg/runtime/mgc0.c227
1 files changed, 136 insertions, 91 deletions
diff --git a/src/pkg/runtime/mgc0.c b/src/pkg/runtime/mgc0.c
index 2324eff29..6dcb61091 100644
--- a/src/pkg/runtime/mgc0.c
+++ b/src/pkg/runtime/mgc0.c
@@ -19,6 +19,13 @@ enum {
Debug = 0
};
+typedef struct BlockList BlockList;
+struct BlockList
+{
+ byte *obj;
+ uintptr size;
+};
+
extern byte data[];
extern byte etext[];
extern byte end[];
@@ -26,8 +33,8 @@ extern byte end[];
static G *fing;
static Finalizer *finq;
static int32 fingwait;
+static BlockList *bl, *ebl;
-static void sweepblock(byte*, int64, uint32*, int32);
static void runfinq(void);
enum {
@@ -35,7 +42,7 @@ enum {
};
static void
-scanblock(int32 depth, byte *b, int64 n)
+scanblock(byte *b, int64 n)
{
int32 off;
void *obj;
@@ -43,48 +50,49 @@ scanblock(int32 depth, byte *b, int64 n)
uint32 *refp, ref;
void **vp;
int64 i;
-
- if(Debug > 1)
- printf("%d scanblock %p %D\n", depth, b, n);
- off = (uint32)(uintptr)b & (PtrSize-1);
- if(off) {
- b += PtrSize - off;
- n -= PtrSize - off;
- }
-
- vp = (void**)b;
- n /= PtrSize;
- for(i=0; i<n; i++) {
- obj = vp[i];
- if(obj == nil)
- continue;
- if(mheap.closure_min != nil && mheap.closure_min <= (byte*)obj && (byte*)obj < mheap.closure_max) {
- if((((uintptr)obj) & 63) != 0)
- continue;
-
- // Looks like a Native Client closure.
- // Actual pointer is pointed at by address in first instruction.
- // Embedded pointer starts at byte 2.
- // If it is f4f4f4f4 then that space hasn't been
- // used for a closure yet (f4 is the HLT instruction).
- // See nacl/386/closure.c for more.
- void **pp;
- pp = *(void***)((byte*)obj+2);
- if(pp == (void**)0xf4f4f4f4) // HLT... - not a closure after all
- continue;
- obj = *pp;
+ BlockList *w;
+
+ w = bl;
+ w->obj = b;
+ w->size = n;
+ w++;
+
+ while(w > bl) {
+ w--;
+ b = w->obj;
+ n = w->size;
+
+ if(Debug > 1)
+ runtime·printf("scanblock %p %D\n", b, n);
+ off = (uint32)(uintptr)b & (PtrSize-1);
+ if(off) {
+ b += PtrSize - off;
+ n -= PtrSize - off;
}
- if(mheap.min <= (byte*)obj && (byte*)obj < mheap.max) {
- if(mlookup(obj, &obj, &size, nil, &refp)) {
- ref = *refp;
- switch(ref & ~RefFlags) {
- case RefNone:
- if(Debug > 1)
- printf("%d found at %p: ", depth, &vp[i]);
- *refp = RefSome | (ref & RefFlags);
- if(!(ref & RefNoPointers))
- scanblock(depth+1, obj, size);
- break;
+
+ vp = (void**)b;
+ n /= PtrSize;
+ for(i=0; i<n; i++) {
+ obj = vp[i];
+ if(obj == nil)
+ continue;
+ if(runtime·mheap.min <= (byte*)obj && (byte*)obj < runtime·mheap.max) {
+ if(runtime·mlookup(obj, &obj, &size, nil, &refp)) {
+ ref = *refp;
+ switch(ref & ~RefFlags) {
+ case RefNone:
+ if(Debug > 1)
+ runtime·printf("found at %p: ", &vp[i]);
+ *refp = RefSome | (ref & RefFlags);
+ if(!(ref & RefNoPointers)) {
+ if(w >= ebl)
+ runtime·throw("scanblock: garbage collection stack overflow");
+ w->obj = obj;
+ w->size = size;
+ w++;
+ }
+ break;
+ }
}
}
}
@@ -102,10 +110,10 @@ scanstack(G *gp)
else
sp = gp->sched.sp;
if(Debug > 1)
- printf("scanstack %d %p\n", gp->goid, sp);
+ runtime·printf("scanstack %d %p\n", gp->goid, sp);
stk = (Stktop*)gp->stackbase;
while(stk) {
- scanblock(0, sp, (byte*)stk - sp);
+ scanblock(sp, (byte*)stk - sp);
sp = stk->gobuf.sp;
stk = (Stktop*)stk->stackbase;
}
@@ -119,36 +127,57 @@ markfin(void *v)
size = 0;
refp = nil;
- if(!mlookup(v, &v, &size, nil, &refp) || !(*refp & RefHasFinalizer))
- throw("mark - finalizer inconsistency");
+ if(!runtime·mlookup(v, &v, &size, nil, &refp) || !(*refp & RefHasFinalizer))
+ runtime·throw("mark - finalizer inconsistency");
// do not mark the finalizer block itself. just mark the things it points at.
- scanblock(1, v, size);
+ scanblock(v, size);
}
static void
mark(void)
{
G *gp;
+ uintptr blsize, nobj;
+
+ // Figure out how big an object stack we need.
+ // Get a new one if we need more than we have
+ // or we need significantly less than we have.
+ nobj = mstats.heap_objects;
+ if(nobj > ebl - bl || nobj < (ebl-bl)/4) {
+ if(bl != nil)
+ runtime·SysFree(bl, (byte*)ebl - (byte*)bl);
+
+ // While we're allocated a new object stack,
+ // add 20% headroom and also round up to
+ // the nearest page boundary, since mmap
+ // will anyway.
+ nobj = nobj * 12/10;
+ blsize = nobj * sizeof *bl;
+ blsize = (blsize + 4095) & ~4095;
+ nobj = blsize / sizeof *bl;
+ bl = runtime·SysAlloc(blsize);
+ ebl = bl + nobj;
+ }
// mark data+bss.
- // skip mheap itself, which has no interesting pointers
+ // skip runtime·mheap itself, which has no interesting pointers
// and is mostly zeroed and would not otherwise be paged in.
- scanblock(0, data, (byte*)&mheap - data);
- scanblock(0, (byte*)(&mheap+1), end - (byte*)(&mheap+1));
+ scanblock(data, (byte*)&runtime·mheap - data);
+ scanblock((byte*)(&runtime·mheap+1), end - (byte*)(&runtime·mheap+1));
// mark stacks
- for(gp=allg; gp!=nil; gp=gp->alllink) {
+ for(gp=runtime·allg; gp!=nil; gp=gp->alllink) {
switch(gp->status){
default:
- printf("unexpected G.status %d\n", gp->status);
- throw("mark - bad status");
+ runtime·printf("unexpected G.status %d\n", gp->status);
+ runtime·throw("mark - bad status");
case Gdead:
break;
case Grunning:
case Grecovery:
if(gp != g)
- throw("mark - world not stopped");
+ runtime·throw("mark - world not stopped");
scanstack(gp);
break;
case Grunnable:
@@ -160,7 +189,7 @@ mark(void)
}
// mark things pointed at by objects with finalizers
- walkfintab(markfin);
+ runtime·walkfintab(markfin);
}
// free RefNone, free & queue finalizers for RefNone|RefHasFinalizer, reset RefSome
@@ -181,16 +210,16 @@ sweepspan(MSpan *s)
case RefNone:
// Free large object.
mstats.alloc -= s->npages<<PageShift;
- runtime_memclr(p, s->npages<<PageShift);
+ runtime·memclr(p, s->npages<<PageShift);
if(ref & RefProfiled)
- MProf_Free(p, s->npages<<PageShift);
+ runtime·MProf_Free(p, s->npages<<PageShift);
s->gcref0 = RefFree;
- MHeap_Free(&mheap, s, 1);
+ runtime·MHeap_Free(&runtime·mheap, s, 1);
break;
case RefNone|RefHasFinalizer:
- f = getfinalizer(p, 1);
+ f = runtime·getfinalizer(p, 1);
if(f == nil)
- throw("finalizer inconsistency");
+ runtime·throw("finalizer inconsistency");
f->arg = p;
f->next = finq;
finq = f;
@@ -205,7 +234,7 @@ sweepspan(MSpan *s)
}
// Chunk full of small blocks.
- MGetSizeClassInfo(s->sizeclass, &size, &npages, &n);
+ runtime·MGetSizeClassInfo(s->sizeclass, &size, &npages, &n);
gcrefp = s->gcref;
gcrefep = s->gcref + n;
for(; gcrefp < gcrefep; gcrefp++, p += size) {
@@ -216,19 +245,19 @@ sweepspan(MSpan *s)
case RefNone:
// Free small object.
if(ref & RefProfiled)
- MProf_Free(p, size);
+ runtime·MProf_Free(p, size);
*gcrefp = RefFree;
c = m->mcache;
if(size > sizeof(uintptr))
((uintptr*)p)[1] = 1; // mark as "needs to be zeroed"
mstats.alloc -= size;
mstats.by_size[s->sizeclass].nfree++;
- MCache_Free(c, p, s->sizeclass, size);
+ runtime·MCache_Free(c, p, s->sizeclass, size);
break;
case RefNone|RefHasFinalizer:
- f = getfinalizer(p, 1);
+ f = runtime·getfinalizer(p, 1);
if(f == nil)
- throw("finalizer inconsistency");
+ runtime·throw("finalizer inconsistency");
f->arg = p;
f->next = finq;
finq = f;
@@ -247,7 +276,7 @@ sweep(void)
{
MSpan *s;
- for(s = mheap.allspans; s != nil; s = s->allnext)
+ for(s = runtime·mheap.allspans; s != nil; s = s->allnext)
if(s->state == MSpanInUse)
sweepspan(s);
}
@@ -273,12 +302,27 @@ stealcache(void)
{
M *m;
- for(m=allm; m; m=m->alllink)
- MCache_ReleaseAll(m->mcache);
+ for(m=runtime·allm; m; m=m->alllink)
+ runtime·MCache_ReleaseAll(m->mcache);
+}
+
+static void
+cachestats(void)
+{
+ M *m;
+ MCache *c;
+
+ for(m=runtime·allm; m; m=m->alllink) {
+ c = m->mcache;
+ mstats.heap_alloc += c->local_alloc;
+ c->local_alloc = 0;
+ mstats.heap_objects += c->local_objects;
+ c->local_objects = 0;
+ }
}
void
-gc(int32 force)
+runtime·gc(int32 force)
{
int64 t0, t1;
byte *p;
@@ -292,28 +336,29 @@ gc(int32 force)
// problems, don't bother trying to run gc
// while holding a lock. The next mallocgc
// without a lock will do the gc instead.
- if(!mstats.enablegc || m->locks > 0 || panicking)
+ if(!mstats.enablegc || m->locks > 0 || runtime·panicking)
return;
if(gcpercent == -2) { // first time through
- p = getenv("GOGC");
+ p = runtime·getenv("GOGC");
if(p == nil || p[0] == '\0')
gcpercent = 100;
- else if(strcmp(p, (byte*)"off") == 0)
+ else if(runtime·strcmp(p, (byte*)"off") == 0)
gcpercent = -1;
else
- gcpercent = atoi(p);
+ gcpercent = runtime·atoi(p);
}
if(gcpercent < 0)
return;
- semacquire(&gcsema);
- t0 = nanotime();
+ runtime·semacquire(&gcsema);
+ t0 = runtime·nanotime();
m->gcing = 1;
- stoptheworld();
- if(mheap.Lock.key != 0)
- throw("mheap locked during gc");
+ runtime·stoptheworld();
+ if(runtime·mheap.Lock.key != 0)
+ runtime·throw("runtime·mheap locked during gc");
if(force || mstats.heap_alloc >= mstats.next_gc) {
+ cachestats();
mark();
sweep();
stealcache();
@@ -326,25 +371,25 @@ gc(int32 force)
if(fp != nil) {
// kick off or wake up goroutine to run queued finalizers
if(fing == nil)
- fing = newproc1((byte*)runfinq, nil, 0, 0);
+ fing = runtime·newproc1((byte*)runfinq, nil, 0, 0);
else if(fingwait) {
fingwait = 0;
- ready(fing);
+ runtime·ready(fing);
}
}
m->locks--;
- t1 = nanotime();
+ t1 = runtime·nanotime();
mstats.numgc++;
mstats.pause_ns += t1 - t0;
if(mstats.debuggc)
- printf("pause %D\n", t1-t0);
- semrelease(&gcsema);
- starttheworld();
+ runtime·printf("pause %D\n", t1-t0);
+ runtime·semrelease(&gcsema);
+ runtime·starttheworld();
// give the queued finalizers, if any, a chance to run
if(fp != nil)
- gosched();
+ runtime·gosched();
}
static void
@@ -365,20 +410,20 @@ runfinq(void)
if(f == nil) {
fingwait = 1;
g->status = Gwaiting;
- gosched();
+ runtime·gosched();
continue;
}
for(; f; f=next) {
next = f->next;
- frame = mal(sizeof(uintptr) + f->nret);
+ frame = runtime·mal(sizeof(uintptr) + f->nret);
*(void**)frame = f->arg;
reflect·call((byte*)f->fn, frame, sizeof(uintptr) + f->nret);
- free(frame);
+ runtime·free(frame);
f->fn = nil;
f->arg = nil;
f->next = nil;
- free(f);
+ runtime·free(f);
}
- gc(1); // trigger another gc to clean up the finalized objects, if possible
+ runtime·gc(1); // trigger another gc to clean up the finalized objects, if possible
}
}