summaryrefslogtreecommitdiff
path: root/src/pkg/runtime/mheap.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/pkg/runtime/mheap.c')
-rw-r--r--src/pkg/runtime/mheap.c96
1 files changed, 63 insertions, 33 deletions
diff --git a/src/pkg/runtime/mheap.c b/src/pkg/runtime/mheap.c
index 4bb7f14e3..0c9ac0a09 100644
--- a/src/pkg/runtime/mheap.c
+++ b/src/pkg/runtime/mheap.c
@@ -41,7 +41,6 @@ runtime·MHeap_Init(MHeap *h, void *(*alloc)(uintptr))
runtime·FixAlloc_Init(&h->spanalloc, sizeof(MSpan), alloc, RecordSpan, h);
runtime·FixAlloc_Init(&h->cachealloc, sizeof(MCache), alloc, nil, nil);
- runtime·MHeapMap_Init(&h->map, alloc);
// h->mapcache needs no init
for(i=0; i<nelem(h->free); i++)
runtime·MSpanList_Init(&h->free[i]);
@@ -79,6 +78,7 @@ MHeap_AllocLocked(MHeap *h, uintptr npage, int32 sizeclass)
{
uintptr n;
MSpan *s, *t;
+ PageID p;
// Try in fixed-size lists up to max.
for(n=npage; n < nelem(h->free); n++) {
@@ -112,18 +112,29 @@ HaveSpan:
mstats.mspan_sys = h->spanalloc.sys;
runtime·MSpan_Init(t, s->start + npage, s->npages - npage);
s->npages = npage;
- runtime·MHeapMap_Set(&h->map, t->start - 1, s);
- runtime·MHeapMap_Set(&h->map, t->start, t);
- runtime·MHeapMap_Set(&h->map, t->start + t->npages - 1, t);
+ p = t->start;
+ if(sizeof(void*) == 8)
+ p -= ((uintptr)h->arena_start>>PageShift);
+ if(p > 0)
+ h->map[p-1] = s;
+ h->map[p] = t;
+ h->map[p+t->npages-1] = t;
+ *(uintptr*)(t->start<<PageShift) = *(uintptr*)(s->start<<PageShift); // copy "needs zeroing" mark
t->state = MSpanInUse;
MHeap_FreeLocked(h, t);
}
+ if(*(uintptr*)(s->start<<PageShift) != 0)
+ runtime·memclr((byte*)(s->start<<PageShift), s->npages<<PageShift);
+
// Record span info, because gc needs to be
// able to map interior pointer to containing span.
s->sizeclass = sizeclass;
+ p = s->start;
+ if(sizeof(void*) == 8)
+ p -= ((uintptr)h->arena_start>>PageShift);
for(n=0; n<npage; n++)
- runtime·MHeapMap_Set(&h->map, s->start+n, s);
+ h->map[p+n] = s;
return s;
}
@@ -161,6 +172,7 @@ MHeap_Grow(MHeap *h, uintptr npage)
uintptr ask;
void *v;
MSpan *s;
+ PageID p;
// Ask for a big chunk, to reduce the number of mappings
// the operating system needs to track; also amortizes
@@ -171,29 +183,21 @@ MHeap_Grow(MHeap *h, uintptr npage)
if(ask < HeapAllocChunk)
ask = HeapAllocChunk;
- v = runtime·SysAlloc(ask);
+ v = runtime·MHeap_SysAlloc(h, ask);
if(v == nil) {
if(ask > (npage<<PageShift)) {
ask = npage<<PageShift;
- v = runtime·SysAlloc(ask);
+ v = runtime·MHeap_SysAlloc(h, ask);
}
if(v == nil)
return false;
}
mstats.heap_sys += ask;
- if((byte*)v < h->min || h->min == nil)
- h->min = v;
- if((byte*)v+ask > h->max)
- h->max = (byte*)v+ask;
-
- // NOTE(rsc): In tcmalloc, if we've accumulated enough
- // system allocations, the heap map gets entirely allocated
- // in 32-bit mode. (In 64-bit mode that's not practical.)
- if(!runtime·MHeapMap_Preallocate(&h->map, ((uintptr)v>>PageShift) - 1, (ask>>PageShift) + 2)) {
- runtime·SysFree(v, ask);
- return false;
- }
+ if((byte*)v < h->arena_start || h->arena_start == nil)
+ h->arena_start = v;
+ if((byte*)v+ask > h->arena_end)
+ h->arena_end = (byte*)v+ask;
// Create a fake "in use" span and free it, so that the
// right coalescing happens.
@@ -201,35 +205,50 @@ MHeap_Grow(MHeap *h, uintptr npage)
mstats.mspan_inuse = h->spanalloc.inuse;
mstats.mspan_sys = h->spanalloc.sys;
runtime·MSpan_Init(s, (uintptr)v>>PageShift, ask>>PageShift);
- runtime·MHeapMap_Set(&h->map, s->start, s);
- runtime·MHeapMap_Set(&h->map, s->start + s->npages - 1, s);
+ p = s->start;
+ if(sizeof(void*) == 8)
+ p -= ((uintptr)h->arena_start>>PageShift);
+ h->map[p] = s;
+ h->map[p + s->npages - 1] = s;
s->state = MSpanInUse;
MHeap_FreeLocked(h, s);
return true;
}
-// Look up the span at the given page number.
-// Page number is guaranteed to be in map
+// Look up the span at the given address.
+// Address is guaranteed to be in map
// and is guaranteed to be start or end of span.
MSpan*
-runtime·MHeap_Lookup(MHeap *h, PageID p)
+runtime·MHeap_Lookup(MHeap *h, void *v)
{
- return runtime·MHeapMap_Get(&h->map, p);
+ uintptr p;
+
+ p = (uintptr)v;
+ if(sizeof(void*) == 8)
+ p -= (uintptr)h->arena_start;
+ return h->map[p >> PageShift];
}
-// Look up the span at the given page number.
-// Page number is *not* guaranteed to be in map
+// Look up the span at the given address.
+// Address is *not* guaranteed to be in map
// and may be anywhere in the span.
// Map entries for the middle of a span are only
// valid for allocated spans. Free spans may have
// other garbage in their middles, so we have to
// check for that.
MSpan*
-runtime·MHeap_LookupMaybe(MHeap *h, PageID p)
+runtime·MHeap_LookupMaybe(MHeap *h, void *v)
{
MSpan *s;
+ PageID p, q;
- s = runtime·MHeapMap_GetMaybe(&h->map, p);
+ if((byte*)v < h->arena_start || (byte*)v >= h->arena_used)
+ return nil;
+ p = (uintptr)v>>PageShift;
+ q = p;
+ if(sizeof(void*) == 8)
+ q -= (uintptr)h->arena_start >> PageShift;
+ s = h->map[q];
if(s == nil || p < s->start || p - s->start >= s->npages)
return nil;
if(s->state != MSpanInUse)
@@ -258,7 +277,9 @@ runtime·MHeap_Free(MHeap *h, MSpan *s, int32 acct)
static void
MHeap_FreeLocked(MHeap *h, MSpan *s)
{
+ uintptr *sp, *tp;
MSpan *t;
+ PageID p;
if(s->state != MSpanInUse || s->ref != 0) {
runtime·printf("MHeap_FreeLocked - span %p ptr %p state %d ref %d\n", s, s->start<<PageShift, s->state, s->ref);
@@ -266,21 +287,30 @@ MHeap_FreeLocked(MHeap *h, MSpan *s)
}
s->state = MSpanFree;
runtime·MSpanList_Remove(s);
+ sp = (uintptr*)(s->start<<PageShift);
// Coalesce with earlier, later spans.
- if((t = runtime·MHeapMap_Get(&h->map, s->start - 1)) != nil && t->state != MSpanInUse) {
+ p = s->start;
+ if(sizeof(void*) == 8)
+ p -= (uintptr)h->arena_start >> PageShift;
+ if(p > 0 && (t = h->map[p-1]) != nil && t->state != MSpanInUse) {
+ tp = (uintptr*)(t->start<<PageShift);
+ *tp |= *sp; // propagate "needs zeroing" mark
s->start = t->start;
s->npages += t->npages;
- runtime·MHeapMap_Set(&h->map, s->start, s);
+ p -= t->npages;
+ h->map[p] = s;
runtime·MSpanList_Remove(t);
t->state = MSpanDead;
runtime·FixAlloc_Free(&h->spanalloc, t);
mstats.mspan_inuse = h->spanalloc.inuse;
mstats.mspan_sys = h->spanalloc.sys;
}
- if((t = runtime·MHeapMap_Get(&h->map, s->start + s->npages)) != nil && t->state != MSpanInUse) {
+ if(p+s->npages < nelem(h->map) && (t = h->map[p+s->npages]) != nil && t->state != MSpanInUse) {
+ tp = (uintptr*)(t->start<<PageShift);
+ *sp |= *tp; // propagate "needs zeroing" mark
s->npages += t->npages;
- runtime·MHeapMap_Set(&h->map, s->start + s->npages - 1, s);
+ h->map[p + s->npages - 1] = s;
runtime·MSpanList_Remove(t);
t->state = MSpanDead;
runtime·FixAlloc_Free(&h->spanalloc, t);