summaryrefslogtreecommitdiff
path: root/src/pkg/runtime/mheap.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/pkg/runtime/mheap.c')
-rw-r--r--src/pkg/runtime/mheap.c506
1 files changed, 443 insertions, 63 deletions
diff --git a/src/pkg/runtime/mheap.c b/src/pkg/runtime/mheap.c
index fc80c2600..7e83eb283 100644
--- a/src/pkg/runtime/mheap.c
+++ b/src/pkg/runtime/mheap.c
@@ -41,7 +41,10 @@ RecordSpan(void *vh, byte *p)
runtime·throw("runtime: cannot allocate memory");
if(h->allspans) {
runtime·memmove(all, h->allspans, h->nspancap*sizeof(all[0]));
- runtime·SysFree(h->allspans, h->nspancap*sizeof(all[0]), &mstats.other_sys);
+ // Don't free the old array if it's referenced by sweep.
+ // See the comment in mgc0.c.
+ if(h->allspans != runtime·mheap.sweepspans)
+ runtime·SysFree(h->allspans, h->nspancap*sizeof(all[0]), &mstats.other_sys);
}
h->allspans = all;
h->nspancap = cap;
@@ -57,10 +60,15 @@ runtime·MHeap_Init(MHeap *h)
runtime·FixAlloc_Init(&h->spanalloc, sizeof(MSpan), RecordSpan, h, &mstats.mspan_sys);
runtime·FixAlloc_Init(&h->cachealloc, sizeof(MCache), nil, nil, &mstats.mcache_sys);
+ runtime·FixAlloc_Init(&h->specialfinalizeralloc, sizeof(SpecialFinalizer), nil, nil, &mstats.other_sys);
+ runtime·FixAlloc_Init(&h->specialprofilealloc, sizeof(SpecialProfile), nil, nil, &mstats.other_sys);
// h->mapcache needs no init
- for(i=0; i<nelem(h->free); i++)
+ for(i=0; i<nelem(h->free); i++) {
runtime·MSpanList_Init(&h->free[i]);
- runtime·MSpanList_Init(&h->large);
+ runtime·MSpanList_Init(&h->busy[i]);
+ }
+ runtime·MSpanList_Init(&h->freelarge);
+ runtime·MSpanList_Init(&h->busylarge);
for(i=0; i<nelem(h->central); i++)
runtime·MCentral_Init(&h->central[i], i);
}
@@ -72,20 +80,95 @@ runtime·MHeap_MapSpans(MHeap *h)
// Map spans array, PageSize at a time.
n = (uintptr)h->arena_used;
- if(sizeof(void*) == 8)
- n -= (uintptr)h->arena_start;
+ n -= (uintptr)h->arena_start;
n = n / PageSize * sizeof(h->spans[0]);
- n = ROUND(n, PageSize);
+ n = ROUND(n, PhysPageSize);
if(h->spans_mapped >= n)
return;
- runtime·SysMap((byte*)h->spans + h->spans_mapped, n - h->spans_mapped, &mstats.other_sys);
+ runtime·SysMap((byte*)h->spans + h->spans_mapped, n - h->spans_mapped, h->arena_reserved, &mstats.other_sys);
h->spans_mapped = n;
}
+// Sweeps spans in list until reclaims at least npages into heap.
+// Returns the actual number of pages reclaimed.
+static uintptr
+MHeap_ReclaimList(MHeap *h, MSpan *list, uintptr npages)
+{
+ MSpan *s;
+ uintptr n;
+ uint32 sg;
+
+ n = 0;
+ sg = runtime·mheap.sweepgen;
+retry:
+ for(s = list->next; s != list; s = s->next) {
+ if(s->sweepgen == sg-2 && runtime·cas(&s->sweepgen, sg-2, sg-1)) {
+ runtime·MSpanList_Remove(s);
+ // swept spans are at the end of the list
+ runtime·MSpanList_InsertBack(list, s);
+ runtime·unlock(h);
+ n += runtime·MSpan_Sweep(s);
+ runtime·lock(h);
+ if(n >= npages)
+ return n;
+ // the span could have been moved elsewhere
+ goto retry;
+ }
+ if(s->sweepgen == sg-1) {
+ // the span is being sweept by background sweeper, skip
+ continue;
+ }
+ // already swept empty span,
+ // all subsequent ones must also be either swept or in process of sweeping
+ break;
+ }
+ return n;
+}
+
+// Sweeps and reclaims at least npage pages into heap.
+// Called before allocating npage pages.
+static void
+MHeap_Reclaim(MHeap *h, uintptr npage)
+{
+ uintptr reclaimed, n;
+
+ // First try to sweep busy spans with large objects of size >= npage,
+ // this has good chances of reclaiming the necessary space.
+ for(n=npage; n < nelem(h->busy); n++) {
+ if(MHeap_ReclaimList(h, &h->busy[n], npage))
+ return; // Bingo!
+ }
+
+ // Then -- even larger objects.
+ if(MHeap_ReclaimList(h, &h->busylarge, npage))
+ return; // Bingo!
+
+ // Now try smaller objects.
+ // One such object is not enough, so we need to reclaim several of them.
+ reclaimed = 0;
+ for(n=0; n < npage && n < nelem(h->busy); n++) {
+ reclaimed += MHeap_ReclaimList(h, &h->busy[n], npage-reclaimed);
+ if(reclaimed >= npage)
+ return;
+ }
+
+ // Now sweep everything that is not yet swept.
+ runtime·unlock(h);
+ for(;;) {
+ n = runtime·sweepone();
+ if(n == -1) // all spans are swept
+ break;
+ reclaimed += n;
+ if(reclaimed >= npage)
+ break;
+ }
+ runtime·lock(h);
+}
+
// Allocate a new span of npage pages from the heap
// and record its size class in the HeapMap and HeapMapCache.
MSpan*
-runtime·MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass, int32 acct, int32 zeroed)
+runtime·MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass, bool large, bool needzero)
{
MSpan *s;
@@ -95,14 +178,22 @@ runtime·MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass, int32 acct, int32
s = MHeap_AllocLocked(h, npage, sizeclass);
if(s != nil) {
mstats.heap_inuse += npage<<PageShift;
- if(acct) {
+ if(large) {
mstats.heap_objects++;
mstats.heap_alloc += npage<<PageShift;
+ // Swept spans are at the end of lists.
+ if(s->npages < nelem(h->free))
+ runtime·MSpanList_InsertBack(&h->busy[s->npages], s);
+ else
+ runtime·MSpanList_InsertBack(&h->busylarge, s);
}
}
runtime·unlock(h);
- if(s != nil && *(uintptr*)(s->start<<PageShift) != 0 && zeroed)
- runtime·memclr((byte*)(s->start<<PageShift), s->npages<<PageShift);
+ if(s != nil) {
+ if(needzero && s->needzero)
+ runtime·memclr((byte*)(s->start<<PageShift), s->npages<<PageShift);
+ s->needzero = 0;
+ }
return s;
}
@@ -113,6 +204,11 @@ MHeap_AllocLocked(MHeap *h, uintptr npage, int32 sizeclass)
MSpan *s, *t;
PageID p;
+ // To prevent excessive heap growth, before allocating n pages
+ // we need to sweep and reclaim at least n pages.
+ if(!h->sweepdone)
+ MHeap_Reclaim(h, npage);
+
// Try in fixed-size lists up to max.
for(n=npage; n < nelem(h->free); n++) {
if(!runtime·MSpanList_IsEmpty(&h->free[n])) {
@@ -136,29 +232,12 @@ HaveSpan:
if(s->npages < npage)
runtime·throw("MHeap_AllocLocked - bad npages");
runtime·MSpanList_Remove(s);
+ runtime·atomicstore(&s->sweepgen, h->sweepgen);
s->state = MSpanInUse;
mstats.heap_idle -= s->npages<<PageShift;
mstats.heap_released -= s->npreleased<<PageShift;
- if(s->npreleased > 0) {
- // We have called runtime·SysUnused with these pages, and on
- // Unix systems it called madvise. At this point at least
- // some BSD-based kernels will return these pages either as
- // zeros or with the old data. For our caller, the first word
- // in the page indicates whether the span contains zeros or
- // not (this word was set when the span was freed by
- // MCentral_Free or runtime·MCentral_FreeSpan). If the first
- // page in the span is returned as zeros, and some subsequent
- // page is returned with the old data, then we will be
- // returning a span that is assumed to be all zeros, but the
- // actual data will not be all zeros. Avoid that problem by
- // explicitly marking the span as not being zeroed, just in
- // case. The beadbead constant we use here means nothing, it
- // is just a unique constant not seen elsewhere in the
- // runtime, as a clue in case it turns up unexpectedly in
- // memory or in a stack trace.
+ if(s->npreleased > 0)
runtime·SysUsed((void*)(s->start<<PageShift), s->npages<<PageShift);
- *(uintptr*)(s->start<<PageShift) = (uintptr)0xbeadbeadbeadbeadULL;
- }
s->npreleased = 0;
if(s->npages > npage) {
@@ -167,13 +246,13 @@ HaveSpan:
runtime·MSpan_Init(t, s->start + npage, s->npages - npage);
s->npages = npage;
p = t->start;
- if(sizeof(void*) == 8)
- p -= ((uintptr)h->arena_start>>PageShift);
+ p -= ((uintptr)h->arena_start>>PageShift);
if(p > 0)
h->spans[p-1] = s;
h->spans[p] = t;
h->spans[p+t->npages-1] = t;
- *(uintptr*)(t->start<<PageShift) = *(uintptr*)(s->start<<PageShift); // copy "needs zeroing" mark
+ t->needzero = s->needzero;
+ runtime·atomicstore(&t->sweepgen, h->sweepgen);
t->state = MSpanInUse;
MHeap_FreeLocked(h, t);
t->unusedsince = s->unusedsince; // preserve age
@@ -186,8 +265,7 @@ HaveSpan:
s->elemsize = (sizeclass==0 ? s->npages<<PageShift : runtime·class_to_size[sizeclass]);
s->types.compression = MTypes_Empty;
p = s->start;
- if(sizeof(void*) == 8)
- p -= ((uintptr)h->arena_start>>PageShift);
+ p -= ((uintptr)h->arena_start>>PageShift);
for(n=0; n<npage; n++)
h->spans[p+n] = s;
return s;
@@ -197,7 +275,7 @@ HaveSpan:
static MSpan*
MHeap_AllocLarge(MHeap *h, uintptr npage)
{
- return BestFit(&h->large, npage, nil);
+ return BestFit(&h->freelarge, npage, nil);
}
// Search list for smallest span with >= npage pages.
@@ -255,10 +333,10 @@ MHeap_Grow(MHeap *h, uintptr npage)
s = runtime·FixAlloc_Alloc(&h->spanalloc);
runtime·MSpan_Init(s, (uintptr)v>>PageShift, ask>>PageShift);
p = s->start;
- if(sizeof(void*) == 8)
- p -= ((uintptr)h->arena_start>>PageShift);
+ p -= ((uintptr)h->arena_start>>PageShift);
h->spans[p] = s;
h->spans[p + s->npages - 1] = s;
+ runtime·atomicstore(&s->sweepgen, h->sweepgen);
s->state = MSpanInUse;
MHeap_FreeLocked(h, s);
return true;
@@ -273,8 +351,7 @@ runtime·MHeap_Lookup(MHeap *h, void *v)
uintptr p;
p = (uintptr)v;
- if(sizeof(void*) == 8)
- p -= (uintptr)h->arena_start;
+ p -= (uintptr)h->arena_start;
return h->spans[p >> PageShift];
}
@@ -295,8 +372,7 @@ runtime·MHeap_LookupMaybe(MHeap *h, void *v)
return nil;
p = (uintptr)v>>PageShift;
q = p;
- if(sizeof(void*) == 8)
- q -= (uintptr)h->arena_start >> PageShift;
+ q -= (uintptr)h->arena_start >> PageShift;
s = h->spans[q];
if(s == nil || p < s->start || v >= s->limit || s->state != MSpanInUse)
return nil;
@@ -322,20 +398,19 @@ runtime·MHeap_Free(MHeap *h, MSpan *s, int32 acct)
static void
MHeap_FreeLocked(MHeap *h, MSpan *s)
{
- uintptr *sp, *tp;
MSpan *t;
PageID p;
s->types.compression = MTypes_Empty;
- if(s->state != MSpanInUse || s->ref != 0) {
- runtime·printf("MHeap_FreeLocked - span %p ptr %p state %d ref %d\n", s, s->start<<PageShift, s->state, s->ref);
+ if(s->state != MSpanInUse || s->ref != 0 || s->sweepgen != h->sweepgen) {
+ runtime·printf("MHeap_FreeLocked - span %p ptr %p state %d ref %d sweepgen %d/%d\n",
+ s, s->start<<PageShift, s->state, s->ref, s->sweepgen, h->sweepgen);
runtime·throw("MHeap_FreeLocked - invalid free");
}
mstats.heap_idle += s->npages<<PageShift;
s->state = MSpanFree;
runtime·MSpanList_Remove(s);
- sp = (uintptr*)(s->start<<PageShift);
// Stamp newly unused spans. The scavenger will use that
// info to potentially give back some pages to the OS.
s->unusedsince = runtime·nanotime();
@@ -343,16 +418,12 @@ MHeap_FreeLocked(MHeap *h, MSpan *s)
// Coalesce with earlier, later spans.
p = s->start;
- if(sizeof(void*) == 8)
- p -= (uintptr)h->arena_start >> PageShift;
+ p -= (uintptr)h->arena_start >> PageShift;
if(p > 0 && (t = h->spans[p-1]) != nil && t->state != MSpanInUse) {
- if(t->npreleased == 0) { // cant't touch this otherwise
- tp = (uintptr*)(t->start<<PageShift);
- *tp |= *sp; // propagate "needs zeroing" mark
- }
s->start = t->start;
s->npages += t->npages;
s->npreleased = t->npreleased; // absorb released pages
+ s->needzero |= t->needzero;
p -= t->npages;
h->spans[p] = s;
runtime·MSpanList_Remove(t);
@@ -360,12 +431,9 @@ MHeap_FreeLocked(MHeap *h, MSpan *s)
runtime·FixAlloc_Free(&h->spanalloc, t);
}
if((p+s->npages)*sizeof(h->spans[0]) < h->spans_mapped && (t = h->spans[p+s->npages]) != nil && t->state != MSpanInUse) {
- if(t->npreleased == 0) { // cant't touch this otherwise
- tp = (uintptr*)(t->start<<PageShift);
- *sp |= *tp; // propagate "needs zeroing" mark
- }
s->npages += t->npages;
s->npreleased += t->npreleased;
+ s->needzero |= t->needzero;
h->spans[p + s->npages - 1] = s;
runtime·MSpanList_Remove(t);
t->state = MSpanDead;
@@ -376,7 +444,7 @@ MHeap_FreeLocked(MHeap *h, MSpan *s)
if(s->npages < nelem(h->free))
runtime·MSpanList_Insert(&h->free[s->npages], s);
else
- runtime·MSpanList_Insert(&h->large, s);
+ runtime·MSpanList_Insert(&h->freelarge, s);
}
static void
@@ -419,7 +487,7 @@ scavenge(int32 k, uint64 now, uint64 limit)
sumreleased = 0;
for(i=0; i < nelem(h->free); i++)
sumreleased += scavengelist(&h->free[i], now, limit);
- sumreleased += scavengelist(&h->large, now, limit);
+ sumreleased += scavengelist(&h->freelarge, now, limit);
if(runtime·debug.gctrace > 0) {
if(sumreleased > 0)
@@ -440,6 +508,7 @@ runtime·MHeap_Scavenger(void)
{
MHeap *h;
uint64 tick, now, forcegc, limit;
+ int64 unixnow;
int32 k;
Note note, *notep;
@@ -463,8 +532,8 @@ runtime·MHeap_Scavenger(void)
runtime·notetsleepg(&note, tick);
runtime·lock(h);
- now = runtime·nanotime();
- if(now - mstats.last_gc > forcegc) {
+ unixnow = runtime·unixnanotime();
+ if(unixnow - mstats.last_gc > forcegc) {
runtime·unlock(h);
// The scavenger can not block other goroutines,
// otherwise deadlock detector can fire spuriously.
@@ -476,8 +545,8 @@ runtime·MHeap_Scavenger(void)
if(runtime·debug.gctrace > 0)
runtime·printf("scvg%d: GC forced\n", k);
runtime·lock(h);
- now = runtime·nanotime();
}
+ now = runtime·nanotime();
scavenge(k, now, limit);
runtime·unlock(h);
}
@@ -486,7 +555,7 @@ runtime·MHeap_Scavenger(void)
void
runtime∕debug·freeOSMemory(void)
{
- runtime·gc(1);
+ runtime·gc(2); // force GC and do eager sweep
runtime·lock(&runtime·mheap);
scavenge(-1, ~(uintptr)0, 0);
runtime·unlock(&runtime·mheap);
@@ -503,11 +572,16 @@ runtime·MSpan_Init(MSpan *span, PageID start, uintptr npages)
span->freelist = nil;
span->ref = 0;
span->sizeclass = 0;
+ span->incache = false;
span->elemsize = 0;
- span->state = 0;
+ span->state = MSpanDead;
span->unusedsince = 0;
span->npreleased = 0;
span->types.compression = MTypes_Empty;
+ span->specialLock.key = 0;
+ span->specials = nil;
+ span->needzero = 0;
+ span->freebuf = nil;
}
// Initialize an empty doubly-linked list.
@@ -549,4 +623,310 @@ runtime·MSpanList_Insert(MSpan *list, MSpan *span)
span->prev->next = span;
}
+void
+runtime·MSpanList_InsertBack(MSpan *list, MSpan *span)
+{
+ if(span->next != nil || span->prev != nil) {
+ runtime·printf("failed MSpanList_Insert %p %p %p\n", span, span->next, span->prev);
+ runtime·throw("MSpanList_Insert");
+ }
+ span->next = list;
+ span->prev = list->prev;
+ span->next->prev = span;
+ span->prev->next = span;
+}
+// Adds the special record s to the list of special records for
+// the object p. All fields of s should be filled in except for
+// offset & next, which this routine will fill in.
+// Returns true if the special was successfully added, false otherwise.
+// (The add will fail only if a record with the same p and s->kind
+// already exists.)
+static bool
+addspecial(void *p, Special *s)
+{
+ MSpan *span;
+ Special **t, *x;
+ uintptr offset;
+ byte kind;
+
+ span = runtime·MHeap_LookupMaybe(&runtime·mheap, p);
+ if(span == nil)
+ runtime·throw("addspecial on invalid pointer");
+
+ // Ensure that the span is swept.
+ // GC accesses specials list w/o locks. And it's just much safer.
+ m->locks++;
+ runtime·MSpan_EnsureSwept(span);
+
+ offset = (uintptr)p - (span->start << PageShift);
+ kind = s->kind;
+
+ runtime·lock(&span->specialLock);
+
+ // Find splice point, check for existing record.
+ t = &span->specials;
+ while((x = *t) != nil) {
+ if(offset == x->offset && kind == x->kind) {
+ runtime·unlock(&span->specialLock);
+ m->locks--;
+ return false; // already exists
+ }
+ if(offset < x->offset || (offset == x->offset && kind < x->kind))
+ break;
+ t = &x->next;
+ }
+ // Splice in record, fill in offset.
+ s->offset = offset;
+ s->next = x;
+ *t = s;
+ runtime·unlock(&span->specialLock);
+ m->locks--;
+ return true;
+}
+
+// Removes the Special record of the given kind for the object p.
+// Returns the record if the record existed, nil otherwise.
+// The caller must FixAlloc_Free the result.
+static Special*
+removespecial(void *p, byte kind)
+{
+ MSpan *span;
+ Special *s, **t;
+ uintptr offset;
+
+ span = runtime·MHeap_LookupMaybe(&runtime·mheap, p);
+ if(span == nil)
+ runtime·throw("removespecial on invalid pointer");
+
+ // Ensure that the span is swept.
+ // GC accesses specials list w/o locks. And it's just much safer.
+ m->locks++;
+ runtime·MSpan_EnsureSwept(span);
+
+ offset = (uintptr)p - (span->start << PageShift);
+
+ runtime·lock(&span->specialLock);
+ t = &span->specials;
+ while((s = *t) != nil) {
+ // This function is used for finalizers only, so we don't check for
+ // "interior" specials (p must be exactly equal to s->offset).
+ if(offset == s->offset && kind == s->kind) {
+ *t = s->next;
+ runtime·unlock(&span->specialLock);
+ m->locks--;
+ return s;
+ }
+ t = &s->next;
+ }
+ runtime·unlock(&span->specialLock);
+ m->locks--;
+ return nil;
+}
+
+// Adds a finalizer to the object p. Returns true if it succeeded.
+bool
+runtime·addfinalizer(void *p, FuncVal *f, uintptr nret, Type *fint, PtrType *ot)
+{
+ SpecialFinalizer *s;
+
+ runtime·lock(&runtime·mheap.speciallock);
+ s = runtime·FixAlloc_Alloc(&runtime·mheap.specialfinalizeralloc);
+ runtime·unlock(&runtime·mheap.speciallock);
+ s->kind = KindSpecialFinalizer;
+ s->fn = f;
+ s->nret = nret;
+ s->fint = fint;
+ s->ot = ot;
+ if(addspecial(p, s))
+ return true;
+
+ // There was an old finalizer
+ runtime·lock(&runtime·mheap.speciallock);
+ runtime·FixAlloc_Free(&runtime·mheap.specialfinalizeralloc, s);
+ runtime·unlock(&runtime·mheap.speciallock);
+ return false;
+}
+
+// Removes the finalizer (if any) from the object p.
+void
+runtime·removefinalizer(void *p)
+{
+ SpecialFinalizer *s;
+
+ s = (SpecialFinalizer*)removespecial(p, KindSpecialFinalizer);
+ if(s == nil)
+ return; // there wasn't a finalizer to remove
+ runtime·lock(&runtime·mheap.speciallock);
+ runtime·FixAlloc_Free(&runtime·mheap.specialfinalizeralloc, s);
+ runtime·unlock(&runtime·mheap.speciallock);
+}
+
+// Set the heap profile bucket associated with addr to b.
+void
+runtime·setprofilebucket(void *p, Bucket *b)
+{
+ SpecialProfile *s;
+
+ runtime·lock(&runtime·mheap.speciallock);
+ s = runtime·FixAlloc_Alloc(&runtime·mheap.specialprofilealloc);
+ runtime·unlock(&runtime·mheap.speciallock);
+ s->kind = KindSpecialProfile;
+ s->b = b;
+ if(!addspecial(p, s))
+ runtime·throw("setprofilebucket: profile already set");
+}
+
+// Do whatever cleanup needs to be done to deallocate s. It has
+// already been unlinked from the MSpan specials list.
+// Returns true if we should keep working on deallocating p.
+bool
+runtime·freespecial(Special *s, void *p, uintptr size, bool freed)
+{
+ SpecialFinalizer *sf;
+ SpecialProfile *sp;
+
+ switch(s->kind) {
+ case KindSpecialFinalizer:
+ sf = (SpecialFinalizer*)s;
+ runtime·queuefinalizer(p, sf->fn, sf->nret, sf->fint, sf->ot);
+ runtime·lock(&runtime·mheap.speciallock);
+ runtime·FixAlloc_Free(&runtime·mheap.specialfinalizeralloc, sf);
+ runtime·unlock(&runtime·mheap.speciallock);
+ return false; // don't free p until finalizer is done
+ case KindSpecialProfile:
+ sp = (SpecialProfile*)s;
+ runtime·MProf_Free(sp->b, size, freed);
+ runtime·lock(&runtime·mheap.speciallock);
+ runtime·FixAlloc_Free(&runtime·mheap.specialprofilealloc, sp);
+ runtime·unlock(&runtime·mheap.speciallock);
+ return true;
+ default:
+ runtime·throw("bad special kind");
+ return true;
+ }
+}
+
+// Free all special records for p.
+void
+runtime·freeallspecials(MSpan *span, void *p, uintptr size)
+{
+ Special *s, **t, *list;
+ uintptr offset;
+
+ if(span->sweepgen != runtime·mheap.sweepgen)
+ runtime·throw("runtime: freeallspecials: unswept span");
+ // first, collect all specials into the list; then, free them
+ // this is required to not cause deadlock between span->specialLock and proflock
+ list = nil;
+ offset = (uintptr)p - (span->start << PageShift);
+ runtime·lock(&span->specialLock);
+ t = &span->specials;
+ while((s = *t) != nil) {
+ if(offset + size <= s->offset)
+ break;
+ if(offset <= s->offset) {
+ *t = s->next;
+ s->next = list;
+ list = s;
+ } else
+ t = &s->next;
+ }
+ runtime·unlock(&span->specialLock);
+
+ while(list != nil) {
+ s = list;
+ list = s->next;
+ if(!runtime·freespecial(s, p, size, true))
+ runtime·throw("can't explicitly free an object with a finalizer");
+ }
+}
+
+// Split an allocated span into two equal parts.
+void
+runtime·MHeap_SplitSpan(MHeap *h, MSpan *s)
+{
+ MSpan *t;
+ MCentral *c;
+ uintptr i;
+ uintptr npages;
+ PageID p;
+
+ if(s->state != MSpanInUse)
+ runtime·throw("MHeap_SplitSpan on a free span");
+ if(s->sizeclass != 0 && s->ref != 1)
+ runtime·throw("MHeap_SplitSpan doesn't have an allocated object");
+ npages = s->npages;
+
+ // remove the span from whatever list it is in now
+ if(s->sizeclass > 0) {
+ // must be in h->central[x].empty
+ c = &h->central[s->sizeclass];
+ runtime·lock(c);
+ runtime·MSpanList_Remove(s);
+ runtime·unlock(c);
+ runtime·lock(h);
+ } else {
+ // must be in h->busy/busylarge
+ runtime·lock(h);
+ runtime·MSpanList_Remove(s);
+ }
+ // heap is locked now
+
+ if(npages == 1) {
+ // convert span of 1 PageSize object to a span of 2 PageSize/2 objects.
+ s->ref = 2;
+ s->sizeclass = runtime·SizeToClass(PageSize/2);
+ s->elemsize = PageSize/2;
+ } else {
+ // convert span of n>1 pages into two spans of n/2 pages each.
+ if((s->npages & 1) != 0)
+ runtime·throw("MHeap_SplitSpan on an odd size span");
+
+ // compute position in h->spans
+ p = s->start;
+ p -= (uintptr)h->arena_start >> PageShift;
+
+ // Allocate a new span for the first half.
+ t = runtime·FixAlloc_Alloc(&h->spanalloc);
+ runtime·MSpan_Init(t, s->start, npages/2);
+ t->limit = (byte*)((t->start + npages/2) << PageShift);
+ t->state = MSpanInUse;
+ t->elemsize = npages << (PageShift - 1);
+ t->sweepgen = s->sweepgen;
+ if(t->elemsize <= MaxSmallSize) {
+ t->sizeclass = runtime·SizeToClass(t->elemsize);
+ t->ref = 1;
+ }
+
+ // the old span holds the second half.
+ s->start += npages/2;
+ s->npages = npages/2;
+ s->elemsize = npages << (PageShift - 1);
+ if(s->elemsize <= MaxSmallSize) {
+ s->sizeclass = runtime·SizeToClass(s->elemsize);
+ s->ref = 1;
+ }
+
+ // update span lookup table
+ for(i = p; i < p + npages/2; i++)
+ h->spans[i] = t;
+ }
+
+ // place the span into a new list
+ if(s->sizeclass > 0) {
+ runtime·unlock(h);
+ c = &h->central[s->sizeclass];
+ runtime·lock(c);
+ // swept spans are at the end of the list
+ runtime·MSpanList_InsertBack(&c->empty, s);
+ runtime·unlock(c);
+ } else {
+ // Swept spans are at the end of lists.
+ if(s->npages < nelem(h->free))
+ runtime·MSpanList_InsertBack(&h->busy[s->npages], s);
+ else
+ runtime·MSpanList_InsertBack(&h->busylarge, s);
+ runtime·unlock(h);
+ }
+}