diff options
Diffstat (limited to 'src/pkg/runtime/mheap.c')
-rw-r--r-- | src/pkg/runtime/mheap.c | 145 |
1 files changed, 108 insertions, 37 deletions
diff --git a/src/pkg/runtime/mheap.c b/src/pkg/runtime/mheap.c index c877bfca9..f45149d63 100644 --- a/src/pkg/runtime/mheap.c +++ b/src/pkg/runtime/mheap.c @@ -27,11 +27,26 @@ RecordSpan(void *vh, byte *p) { MHeap *h; MSpan *s; + MSpan **all; + uint32 cap; h = vh; s = (MSpan*)p; - s->allnext = h->allspans; - h->allspans = s; + if(h->nspan >= h->nspancap) { + cap = 64*1024/sizeof(all[0]); + if(cap < h->nspancap*3/2) + cap = h->nspancap*3/2; + all = (MSpan**)runtime·SysAlloc(cap*sizeof(all[0])); + if(all == nil) + runtime·throw("runtime: cannot allocate memory"); + if(h->allspans) { + runtime·memmove(all, h->allspans, h->nspancap*sizeof(all[0])); + runtime·SysFree(h->allspans, h->nspancap*sizeof(all[0])); + } + h->allspans = all; + h->nspancap = cap; + } + h->allspans[h->nspan++] = s; } // Initialize the heap; fetch memory using alloc. @@ -53,12 +68,12 @@ runtime·MHeap_Init(MHeap *h, void *(*alloc)(uintptr)) // Allocate a new span of npage pages from the heap // and record its size class in the HeapMap and HeapMapCache. MSpan* -runtime·MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass, int32 acct) +runtime·MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass, int32 acct, int32 zeroed) { MSpan *s; runtime·lock(h); - runtime·purgecachedstats(m); + runtime·purgecachedstats(m->mcache); s = MHeap_AllocLocked(h, npage, sizeclass); if(s != nil) { mstats.heap_inuse += npage<<PageShift; @@ -68,6 +83,8 @@ runtime·MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass, int32 acct) } } runtime·unlock(h); + if(s != nil && *(uintptr*)(s->start<<PageShift) != 0 && zeroed) + runtime·memclr((byte*)(s->start<<PageShift), s->npages<<PageShift); return s; } @@ -123,14 +140,15 @@ HaveSpan: *(uintptr*)(t->start<<PageShift) = *(uintptr*)(s->start<<PageShift); // copy "needs zeroing" mark t->state = MSpanInUse; MHeap_FreeLocked(h, t); + t->unusedsince = s->unusedsince; // preserve age } - - if(*(uintptr*)(s->start<<PageShift) != 0) - runtime·memclr((byte*)(s->start<<PageShift), s->npages<<PageShift); + s->unusedsince = 0; // Record span info, because gc needs to be // able to map interior pointer to containing span. s->sizeclass = sizeclass; + s->elemsize = (sizeclass==0 ? s->npages<<PageShift : runtime·class_to_size[sizeclass]); + s->types.compression = MTypes_Empty; p = s->start; if(sizeof(void*) == 8) p -= ((uintptr)h->arena_start>>PageShift); @@ -259,7 +277,7 @@ void runtime·MHeap_Free(MHeap *h, MSpan *s, int32 acct) { runtime·lock(h); - runtime·purgecachedstats(m); + runtime·purgecachedstats(m->mcache); mstats.heap_inuse -= s->npages<<PageShift; if(acct) { mstats.heap_alloc -= s->npages<<PageShift; @@ -276,16 +294,22 @@ MHeap_FreeLocked(MHeap *h, MSpan *s) MSpan *t; PageID p; + if(s->types.sysalloc) + runtime·settype_sysfree(s); + s->types.compression = MTypes_Empty; + if(s->state != MSpanInUse || s->ref != 0) { runtime·printf("MHeap_FreeLocked - span %p ptr %p state %d ref %d\n", s, s->start<<PageShift, s->state, s->ref); runtime·throw("MHeap_FreeLocked - invalid free"); } mstats.heap_idle += s->npages<<PageShift; s->state = MSpanFree; - s->unusedsince = 0; - s->npreleased = 0; runtime·MSpanList_Remove(s); sp = (uintptr*)(s->start<<PageShift); + // Stamp newly unused spans. The scavenger will use that + // info to potentially give back some pages to the OS. + s->unusedsince = runtime·nanotime(); + s->npreleased = 0; // Coalesce with earlier, later spans. p = s->start; @@ -325,6 +349,52 @@ MHeap_FreeLocked(MHeap *h, MSpan *s) runtime·MSpanList_Insert(&h->large, s); } +static void +forcegchelper(Note *note) +{ + runtime·gc(1); + runtime·notewakeup(note); +} + +static uintptr +scavengelist(MSpan *list, uint64 now, uint64 limit) +{ + uintptr released, sumreleased; + MSpan *s; + + if(runtime·MSpanList_IsEmpty(list)) + return 0; + + sumreleased = 0; + for(s=list->next; s != list; s=s->next) { + if((now - s->unusedsince) > limit) { + released = (s->npages - s->npreleased) << PageShift; + mstats.heap_released += released; + sumreleased += released; + s->npreleased = s->npages; + runtime·SysUnused((void*)(s->start << PageShift), s->npages << PageShift); + } + } + return sumreleased; +} + +static uintptr +scavenge(uint64 now, uint64 limit) +{ + uint32 i; + uintptr sumreleased; + MHeap *h; + + h = runtime·mheap; + sumreleased = 0; + for(i=0; i < nelem(h->free); i++) + sumreleased += scavengelist(&h->free[i], now, limit); + sumreleased += scavengelist(&h->large, now, limit); + return sumreleased; +} + +static FuncVal forcegchelperv = {(void(*)(void))forcegchelper}; + // Release (part of) unused memory to OS. // Goroutine created at startup. // Loop forever. @@ -332,13 +402,12 @@ void runtime·MHeap_Scavenger(void) { MHeap *h; - MSpan *s, *list; uint64 tick, now, forcegc, limit; - uint32 k, i; - uintptr released, sumreleased; + uint32 k; + uintptr sumreleased; byte *env; bool trace; - Note note; + Note note, *notep; // If we go two minutes without a garbage collection, force one to run. forcegc = 2*60*1e9; @@ -356,10 +425,10 @@ runtime·MHeap_Scavenger(void) if(env != nil) trace = runtime·atoi(env) > 0; - h = &runtime·mheap; + h = runtime·mheap; for(k=0;; k++) { runtime·noteclear(¬e); - runtime·entersyscall(); + runtime·entersyscallblock(); runtime·notetsleep(¬e, tick); runtime·exitsyscall(); @@ -367,30 +436,21 @@ runtime·MHeap_Scavenger(void) now = runtime·nanotime(); if(now - mstats.last_gc > forcegc) { runtime·unlock(h); - runtime·gc(1); + // The scavenger can not block other goroutines, + // otherwise deadlock detector can fire spuriously. + // GC blocks other goroutines via the runtime·worldsema. + runtime·noteclear(¬e); + notep = ¬e; + runtime·newproc1(&forcegchelperv, (byte*)¬ep, sizeof(notep), 0, runtime·MHeap_Scavenger); + runtime·entersyscallblock(); + runtime·notesleep(¬e); + runtime·exitsyscall(); + if(trace) + runtime·printf("scvg%d: GC forced\n", k); runtime·lock(h); now = runtime·nanotime(); - if (trace) - runtime·printf("scvg%d: GC forced\n", k); - } - sumreleased = 0; - for(i=0; i < nelem(h->free)+1; i++) { - if(i < nelem(h->free)) - list = &h->free[i]; - else - list = &h->large; - if(runtime·MSpanList_IsEmpty(list)) - continue; - for(s=list->next; s != list; s=s->next) { - if(s->unusedsince != 0 && (now - s->unusedsince) > limit) { - released = (s->npages - s->npreleased) << PageShift; - mstats.heap_released += released; - sumreleased += released; - s->npreleased = s->npages; - runtime·SysUnused((void*)(s->start << PageShift), s->npages << PageShift); - } - } } + sumreleased = scavenge(now, limit); runtime·unlock(h); if(trace) { @@ -403,6 +463,15 @@ runtime·MHeap_Scavenger(void) } } +void +runtime∕debug·freeOSMemory(void) +{ + runtime·gc(1); + runtime·lock(runtime·mheap); + scavenge(~(uintptr)0, 0); + runtime·unlock(runtime·mheap); +} + // Initialize a new span with the given start and npages. void runtime·MSpan_Init(MSpan *span, PageID start, uintptr npages) @@ -414,9 +483,11 @@ runtime·MSpan_Init(MSpan *span, PageID start, uintptr npages) span->freelist = nil; span->ref = 0; span->sizeclass = 0; + span->elemsize = 0; span->state = 0; span->unusedsince = 0; span->npreleased = 0; + span->types.compression = MTypes_Empty; } // Initialize an empty doubly-linked list. |