summaryrefslogtreecommitdiff
path: root/src/pkg/runtime/mheap.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/pkg/runtime/mheap.c')
-rw-r--r--src/pkg/runtime/mheap.c140
1 files changed, 69 insertions, 71 deletions
diff --git a/src/pkg/runtime/mheap.c b/src/pkg/runtime/mheap.c
index f4fbbee7a..fc80c2600 100644
--- a/src/pkg/runtime/mheap.c
+++ b/src/pkg/runtime/mheap.c
@@ -36,12 +36,12 @@ RecordSpan(void *vh, byte *p)
cap = 64*1024/sizeof(all[0]);
if(cap < h->nspancap*3/2)
cap = h->nspancap*3/2;
- all = (MSpan**)runtime·SysAlloc(cap*sizeof(all[0]));
+ all = (MSpan**)runtime·SysAlloc(cap*sizeof(all[0]), &mstats.other_sys);
if(all == nil)
runtime·throw("runtime: cannot allocate memory");
if(h->allspans) {
runtime·memmove(all, h->allspans, h->nspancap*sizeof(all[0]));
- runtime·SysFree(h->allspans, h->nspancap*sizeof(all[0]));
+ runtime·SysFree(h->allspans, h->nspancap*sizeof(all[0]), &mstats.other_sys);
}
h->allspans = all;
h->nspancap = cap;
@@ -51,12 +51,12 @@ RecordSpan(void *vh, byte *p)
// Initialize the heap; fetch memory using alloc.
void
-runtime·MHeap_Init(MHeap *h, void *(*alloc)(uintptr))
+runtime·MHeap_Init(MHeap *h)
{
uint32 i;
- runtime·FixAlloc_Init(&h->spanalloc, sizeof(MSpan), alloc, RecordSpan, h);
- runtime·FixAlloc_Init(&h->cachealloc, sizeof(MCache), alloc, nil, nil);
+ runtime·FixAlloc_Init(&h->spanalloc, sizeof(MSpan), RecordSpan, h, &mstats.mspan_sys);
+ runtime·FixAlloc_Init(&h->cachealloc, sizeof(MCache), nil, nil, &mstats.mcache_sys);
// h->mapcache needs no init
for(i=0; i<nelem(h->free); i++)
runtime·MSpanList_Init(&h->free[i]);
@@ -65,6 +65,23 @@ runtime·MHeap_Init(MHeap *h, void *(*alloc)(uintptr))
runtime·MCentral_Init(&h->central[i], i);
}
+void
+runtime·MHeap_MapSpans(MHeap *h)
+{
+ uintptr n;
+
+ // Map spans array, PageSize at a time.
+ n = (uintptr)h->arena_used;
+ if(sizeof(void*) == 8)
+ n -= (uintptr)h->arena_start;
+ n = n / PageSize * sizeof(h->spans[0]);
+ n = ROUND(n, PageSize);
+ if(h->spans_mapped >= n)
+ return;
+ runtime·SysMap((byte*)h->spans + h->spans_mapped, n - h->spans_mapped, &mstats.other_sys);
+ h->spans_mapped = n;
+}
+
// Allocate a new span of npage pages from the heap
// and record its size class in the HeapMap and HeapMapCache.
MSpan*
@@ -73,7 +90,8 @@ runtime·MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass, int32 acct, int32
MSpan *s;
runtime·lock(h);
- runtime·purgecachedstats(m->mcache);
+ mstats.heap_alloc += m->mcache->local_cachealloc;
+ m->mcache->local_cachealloc = 0;
s = MHeap_AllocLocked(h, npage, sizeclass);
if(s != nil) {
mstats.heap_inuse += npage<<PageShift;
@@ -138,6 +156,7 @@ HaveSpan:
// is just a unique constant not seen elsewhere in the
// runtime, as a clue in case it turns up unexpectedly in
// memory or in a stack trace.
+ runtime·SysUsed((void*)(s->start<<PageShift), s->npages<<PageShift);
*(uintptr*)(s->start<<PageShift) = (uintptr)0xbeadbeadbeadbeadULL;
}
s->npreleased = 0;
@@ -145,17 +164,15 @@ HaveSpan:
if(s->npages > npage) {
// Trim extra and put it back in the heap.
t = runtime·FixAlloc_Alloc(&h->spanalloc);
- mstats.mspan_inuse = h->spanalloc.inuse;
- mstats.mspan_sys = h->spanalloc.sys;
runtime·MSpan_Init(t, s->start + npage, s->npages - npage);
s->npages = npage;
p = t->start;
if(sizeof(void*) == 8)
p -= ((uintptr)h->arena_start>>PageShift);
if(p > 0)
- h->map[p-1] = s;
- h->map[p] = t;
- h->map[p+t->npages-1] = t;
+ h->spans[p-1] = s;
+ h->spans[p] = t;
+ h->spans[p+t->npages-1] = t;
*(uintptr*)(t->start<<PageShift) = *(uintptr*)(s->start<<PageShift); // copy "needs zeroing" mark
t->state = MSpanInUse;
MHeap_FreeLocked(h, t);
@@ -172,7 +189,7 @@ HaveSpan:
if(sizeof(void*) == 8)
p -= ((uintptr)h->arena_start>>PageShift);
for(n=0; n<npage; n++)
- h->map[p+n] = s;
+ h->spans[p+n] = s;
return s;
}
@@ -232,19 +249,16 @@ MHeap_Grow(MHeap *h, uintptr npage)
return false;
}
}
- mstats.heap_sys += ask;
// Create a fake "in use" span and free it, so that the
// right coalescing happens.
s = runtime·FixAlloc_Alloc(&h->spanalloc);
- mstats.mspan_inuse = h->spanalloc.inuse;
- mstats.mspan_sys = h->spanalloc.sys;
runtime·MSpan_Init(s, (uintptr)v>>PageShift, ask>>PageShift);
p = s->start;
if(sizeof(void*) == 8)
p -= ((uintptr)h->arena_start>>PageShift);
- h->map[p] = s;
- h->map[p + s->npages - 1] = s;
+ h->spans[p] = s;
+ h->spans[p + s->npages - 1] = s;
s->state = MSpanInUse;
MHeap_FreeLocked(h, s);
return true;
@@ -261,7 +275,7 @@ runtime·MHeap_Lookup(MHeap *h, void *v)
p = (uintptr)v;
if(sizeof(void*) == 8)
p -= (uintptr)h->arena_start;
- return h->map[p >> PageShift];
+ return h->spans[p >> PageShift];
}
// Look up the span at the given address.
@@ -283,10 +297,8 @@ runtime·MHeap_LookupMaybe(MHeap *h, void *v)
q = p;
if(sizeof(void*) == 8)
q -= (uintptr)h->arena_start >> PageShift;
- s = h->map[q];
- if(s == nil || p < s->start || p - s->start >= s->npages)
- return nil;
- if(s->state != MSpanInUse)
+ s = h->spans[q];
+ if(s == nil || p < s->start || v >= s->limit || s->state != MSpanInUse)
return nil;
return s;
}
@@ -296,7 +308,8 @@ void
runtime·MHeap_Free(MHeap *h, MSpan *s, int32 acct)
{
runtime·lock(h);
- runtime·purgecachedstats(m->mcache);
+ mstats.heap_alloc += m->mcache->local_cachealloc;
+ m->mcache->local_cachealloc = 0;
mstats.heap_inuse -= s->npages<<PageShift;
if(acct) {
mstats.heap_alloc -= s->npages<<PageShift;
@@ -313,8 +326,6 @@ MHeap_FreeLocked(MHeap *h, MSpan *s)
MSpan *t;
PageID p;
- if(s->types.sysalloc)
- runtime·settype_sysfree(s);
s->types.compression = MTypes_Empty;
if(s->state != MSpanInUse || s->ref != 0) {
@@ -334,31 +345,31 @@ MHeap_FreeLocked(MHeap *h, MSpan *s)
p = s->start;
if(sizeof(void*) == 8)
p -= (uintptr)h->arena_start >> PageShift;
- if(p > 0 && (t = h->map[p-1]) != nil && t->state != MSpanInUse) {
- tp = (uintptr*)(t->start<<PageShift);
- *tp |= *sp; // propagate "needs zeroing" mark
+ if(p > 0 && (t = h->spans[p-1]) != nil && t->state != MSpanInUse) {
+ if(t->npreleased == 0) { // cant't touch this otherwise
+ tp = (uintptr*)(t->start<<PageShift);
+ *tp |= *sp; // propagate "needs zeroing" mark
+ }
s->start = t->start;
s->npages += t->npages;
s->npreleased = t->npreleased; // absorb released pages
p -= t->npages;
- h->map[p] = s;
+ h->spans[p] = s;
runtime·MSpanList_Remove(t);
t->state = MSpanDead;
runtime·FixAlloc_Free(&h->spanalloc, t);
- mstats.mspan_inuse = h->spanalloc.inuse;
- mstats.mspan_sys = h->spanalloc.sys;
}
- if(p+s->npages < nelem(h->map) && (t = h->map[p+s->npages]) != nil && t->state != MSpanInUse) {
- tp = (uintptr*)(t->start<<PageShift);
- *sp |= *tp; // propagate "needs zeroing" mark
+ if((p+s->npages)*sizeof(h->spans[0]) < h->spans_mapped && (t = h->spans[p+s->npages]) != nil && t->state != MSpanInUse) {
+ if(t->npreleased == 0) { // cant't touch this otherwise
+ tp = (uintptr*)(t->start<<PageShift);
+ *sp |= *tp; // propagate "needs zeroing" mark
+ }
s->npages += t->npages;
s->npreleased += t->npreleased;
- h->map[p + s->npages - 1] = s;
+ h->spans[p + s->npages - 1] = s;
runtime·MSpanList_Remove(t);
t->state = MSpanDead;
runtime·FixAlloc_Free(&h->spanalloc, t);
- mstats.mspan_inuse = h->spanalloc.inuse;
- mstats.mspan_sys = h->spanalloc.sys;
}
// Insert s into appropriate list.
@@ -386,7 +397,7 @@ scavengelist(MSpan *list, uint64 now, uint64 limit)
sumreleased = 0;
for(s=list->next; s != list; s=s->next) {
- if((now - s->unusedsince) > limit) {
+ if((now - s->unusedsince) > limit && s->npreleased != s->npages) {
released = (s->npages - s->npreleased) << PageShift;
mstats.heap_released += released;
sumreleased += released;
@@ -397,19 +408,26 @@ scavengelist(MSpan *list, uint64 now, uint64 limit)
return sumreleased;
}
-static uintptr
-scavenge(uint64 now, uint64 limit)
+static void
+scavenge(int32 k, uint64 now, uint64 limit)
{
uint32 i;
uintptr sumreleased;
MHeap *h;
- h = runtime·mheap;
+ h = &runtime·mheap;
sumreleased = 0;
for(i=0; i < nelem(h->free); i++)
sumreleased += scavengelist(&h->free[i], now, limit);
sumreleased += scavengelist(&h->large, now, limit);
- return sumreleased;
+
+ if(runtime·debug.gctrace > 0) {
+ if(sumreleased > 0)
+ runtime·printf("scvg%d: %D MB released\n", k, (uint64)sumreleased>>20);
+ runtime·printf("scvg%d: inuse: %D, idle: %D, sys: %D, released: %D, consumed: %D (MB)\n",
+ k, mstats.heap_inuse>>20, mstats.heap_idle>>20, mstats.heap_sys>>20,
+ mstats.heap_released>>20, (mstats.heap_sys - mstats.heap_released)>>20);
+ }
}
static FuncVal forcegchelperv = {(void(*)(void))forcegchelper};
@@ -422,10 +440,7 @@ runtime·MHeap_Scavenger(void)
{
MHeap *h;
uint64 tick, now, forcegc, limit;
- uint32 k;
- uintptr sumreleased;
- byte *env;
- bool trace;
+ int32 k;
Note note, *notep;
g->issystem = true;
@@ -442,17 +457,10 @@ runtime·MHeap_Scavenger(void)
else
tick = limit/2;
- trace = false;
- env = runtime·getenv("GOGCTRACE");
- if(env != nil)
- trace = runtime·atoi(env) > 0;
-
- h = runtime·mheap;
+ h = &runtime·mheap;
for(k=0;; k++) {
runtime·noteclear(&note);
- runtime·entersyscallblock();
- runtime·notetsleep(&note, tick);
- runtime·exitsyscall();
+ runtime·notetsleepg(&note, tick);
runtime·lock(h);
now = runtime·nanotime();
@@ -464,24 +472,14 @@ runtime·MHeap_Scavenger(void)
runtime·noteclear(&note);
notep = &note;
runtime·newproc1(&forcegchelperv, (byte*)&notep, sizeof(notep), 0, runtime·MHeap_Scavenger);
- runtime·entersyscallblock();
- runtime·notesleep(&note);
- runtime·exitsyscall();
- if(trace)
+ runtime·notetsleepg(&note, -1);
+ if(runtime·debug.gctrace > 0)
runtime·printf("scvg%d: GC forced\n", k);
runtime·lock(h);
now = runtime·nanotime();
}
- sumreleased = scavenge(now, limit);
+ scavenge(k, now, limit);
runtime·unlock(h);
-
- if(trace) {
- if(sumreleased > 0)
- runtime·printf("scvg%d: %p MB released\n", k, sumreleased>>20);
- runtime·printf("scvg%d: inuse: %D, idle: %D, sys: %D, released: %D, consumed: %D (MB)\n",
- k, mstats.heap_inuse>>20, mstats.heap_idle>>20, mstats.heap_sys>>20,
- mstats.heap_released>>20, (mstats.heap_sys - mstats.heap_released)>>20);
- }
}
}
@@ -489,9 +487,9 @@ void
runtime∕debug·freeOSMemory(void)
{
runtime·gc(1);
- runtime·lock(runtime·mheap);
- scavenge(~(uintptr)0, 0);
- runtime·unlock(runtime·mheap);
+ runtime·lock(&runtime·mheap);
+ scavenge(-1, ~(uintptr)0, 0);
+ runtime·unlock(&runtime·mheap);
}
// Initialize a new span with the given start and npages.