summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorRuss Cox <rsc@golang.org>2010-02-03 16:31:34 -0800
committerRuss Cox <rsc@golang.org>2010-02-03 16:31:34 -0800
commit3732daa8e7c635ba306dd923a4342650524320df (patch)
tree31d454f4c7a1d0189ddbd9b1e771c8716655dc72 /src
parent9c681e46bfdf7a2745d2df35412592e8c13e0fce (diff)
downloadgolang-3732daa8e7c635ba306dd923a4342650524320df.tar.gz
finalizers; merge package malloc into package runtime
R=r, cw CC=golang-dev http://codereview.appspot.com/198085
Diffstat (limited to 'src')
-rw-r--r--src/pkg/Makefile2
-rw-r--r--src/pkg/container/vector/numbers_test.go38
-rw-r--r--src/pkg/fmt/fmt_test.go18
-rw-r--r--src/pkg/malloc/Makefile11
-rw-r--r--src/pkg/malloc/malloc.go29
-rw-r--r--src/pkg/runtime/Makefile1
-rw-r--r--src/pkg/runtime/cgo2c.c3
-rw-r--r--src/pkg/runtime/extern.go68
-rw-r--r--src/pkg/runtime/malloc.cgo46
-rw-r--r--src/pkg/runtime/malloc.h6
-rw-r--r--src/pkg/runtime/mfinal.c127
-rw-r--r--src/pkg/runtime/mgc0.c130
-rw-r--r--src/pkg/runtime/runtime.h1
-rw-r--r--src/pkg/runtime/type.h9
14 files changed, 370 insertions, 119 deletions
diff --git a/src/pkg/Makefile b/src/pkg/Makefile
index 034a66bb6..f057769ec 100644
--- a/src/pkg/Makefile
+++ b/src/pkg/Makefile
@@ -86,7 +86,6 @@ DIRS=\
io/ioutil\
json\
log\
- malloc\
math\
net\
once\
@@ -128,7 +127,6 @@ NOTEST=\
hash\
image\
image/jpeg\
- malloc\
rand\
runtime\
syscall\
diff --git a/src/pkg/container/vector/numbers_test.go b/src/pkg/container/vector/numbers_test.go
index 9a7e2780e..a44242f67 100644
--- a/src/pkg/container/vector/numbers_test.go
+++ b/src/pkg/container/vector/numbers_test.go
@@ -6,7 +6,7 @@ package vector
import (
"fmt"
- "malloc"
+ "runtime"
"strings"
"testing"
)
@@ -35,16 +35,16 @@ func s(n uint64) string {
func TestVectorNums(t *testing.T) {
var v Vector
c := int(0)
- malloc.GC()
- m0 := *malloc.GetStats()
+ runtime.GC()
+ m0 := runtime.MemStats
v.Resize(memTestN, memTestN)
for i := 0; i < memTestN; i++ {
v.Set(i, c)
}
- malloc.GC()
- m := *malloc.GetStats()
+ runtime.GC()
+ m := runtime.MemStats
v.Resize(0, 0)
- malloc.GC()
+ runtime.GC()
n := m.Alloc - m0.Alloc
t.Logf("%T.Push(%#v), n = %s: Alloc/n = %.2f\n", v, c, s(memTestN), float(n)/memTestN)
}
@@ -53,16 +53,16 @@ func TestVectorNums(t *testing.T) {
func TestIntVectorNums(t *testing.T) {
var v IntVector
c := int(0)
- malloc.GC()
- m0 := *malloc.GetStats()
+ runtime.GC()
+ m0 := runtime.MemStats
v.Resize(memTestN, memTestN)
for i := 0; i < memTestN; i++ {
v.Set(i, c)
}
- malloc.GC()
- m := *malloc.GetStats()
+ runtime.GC()
+ m := runtime.MemStats
v.Resize(0, 0)
- malloc.GC()
+ runtime.GC()
n := m.Alloc - m0.Alloc
t.Logf("%T.Push(%#v), n = %s: Alloc/n = %.2f\n", v, c, s(memTestN), float(n)/memTestN)
}
@@ -71,16 +71,16 @@ func TestIntVectorNums(t *testing.T) {
func TestStringVectorNums(t *testing.T) {
var v StringVector
c := ""
- malloc.GC()
- m0 := *malloc.GetStats()
+ runtime.GC()
+ m0 := runtime.MemStats
v.Resize(memTestN, memTestN)
for i := 0; i < memTestN; i++ {
v.Set(i, c)
}
- malloc.GC()
- m := *malloc.GetStats()
+ runtime.GC()
+ m := runtime.MemStats
v.Resize(0, 0)
- malloc.GC()
+ runtime.GC()
n := m.Alloc - m0.Alloc
t.Logf("%T.Push(%#v), n = %s: Alloc/n = %.2f\n", v, c, s(memTestN), float(n)/memTestN)
}
@@ -90,7 +90,7 @@ func BenchmarkVectorNums(b *testing.B) {
c := int(0)
var v Vector
b.StopTimer()
- malloc.GC()
+ runtime.GC()
b.StartTimer()
for i := 0; i < b.N; i++ {
v.Push(c)
@@ -102,7 +102,7 @@ func BenchmarkIntVectorNums(b *testing.B) {
c := int(0)
var v IntVector
b.StopTimer()
- malloc.GC()
+ runtime.GC()
b.StartTimer()
for i := 0; i < b.N; i++ {
v.Push(c)
@@ -114,7 +114,7 @@ func BenchmarkStringVectorNums(b *testing.B) {
c := ""
var v StringVector
b.StopTimer()
- malloc.GC()
+ runtime.GC()
b.StartTimer()
for i := 0; i < b.N; i++ {
v.Push(c)
diff --git a/src/pkg/fmt/fmt_test.go b/src/pkg/fmt/fmt_test.go
index c89a6acac..ecceeb09c 100644
--- a/src/pkg/fmt/fmt_test.go
+++ b/src/pkg/fmt/fmt_test.go
@@ -7,8 +7,8 @@ package fmt_test
import (
. "fmt"
"io"
- "malloc" // for the malloc count test only
"math"
+ "runtime" // for the malloc count test only
"strings"
"testing"
)
@@ -281,29 +281,29 @@ func BenchmarkSprintfIntInt(b *testing.B) {
}
func TestCountMallocs(t *testing.T) {
- mallocs := 0 - malloc.GetStats().Mallocs
+ mallocs := 0 - runtime.MemStats.Mallocs
for i := 0; i < 100; i++ {
Sprintf("")
}
- mallocs += malloc.GetStats().Mallocs
+ mallocs += runtime.MemStats.Mallocs
Printf("mallocs per Sprintf(\"\"): %d\n", mallocs/100)
- mallocs = 0 - malloc.GetStats().Mallocs
+ mallocs = 0 - runtime.MemStats.Mallocs
for i := 0; i < 100; i++ {
Sprintf("xxx")
}
- mallocs += malloc.GetStats().Mallocs
+ mallocs += runtime.MemStats.Mallocs
Printf("mallocs per Sprintf(\"xxx\"): %d\n", mallocs/100)
- mallocs = 0 - malloc.GetStats().Mallocs
+ mallocs = 0 - runtime.MemStats.Mallocs
for i := 0; i < 100; i++ {
Sprintf("%x", i)
}
- mallocs += malloc.GetStats().Mallocs
+ mallocs += runtime.MemStats.Mallocs
Printf("mallocs per Sprintf(\"%%x\"): %d\n", mallocs/100)
- mallocs = 0 - malloc.GetStats().Mallocs
+ mallocs = 0 - runtime.MemStats.Mallocs
for i := 0; i < 100; i++ {
Sprintf("%x %x", i, i)
}
- mallocs += malloc.GetStats().Mallocs
+ mallocs += runtime.MemStats.Mallocs
Printf("mallocs per Sprintf(\"%%x %%x\"): %d\n", mallocs/100)
}
diff --git a/src/pkg/malloc/Makefile b/src/pkg/malloc/Makefile
deleted file mode 100644
index d7c39c0cf..000000000
--- a/src/pkg/malloc/Makefile
+++ /dev/null
@@ -1,11 +0,0 @@
-# Copyright 2009 The Go Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style
-# license that can be found in the LICENSE file.
-
-include ../../Make.$(GOARCH)
-
-TARG=malloc
-GOFILES=\
- malloc.go\
-
-include ../../Make.pkg
diff --git a/src/pkg/malloc/malloc.go b/src/pkg/malloc/malloc.go
deleted file mode 100644
index c66b6237d..000000000
--- a/src/pkg/malloc/malloc.go
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Go declarations for malloc.
-// The actual functions are written in C
-// and part of the runtime library.
-
-// The malloc package exposes statistics and other low-level details about
-// the run-time memory allocator and collector. It is intended for debugging
-// purposes only; other uses are discouraged.
-package malloc
-
-type Stats struct {
- Alloc uint64
- Sys uint64
- Stacks uint64
- InusePages uint64
- NextGC uint64
- Lookups uint64
- Mallocs uint64
- EnableGC bool
-}
-
-func Alloc(uintptr) *byte
-func Free(*byte)
-func GetStats() *Stats
-func Lookup(*byte) (*byte, uintptr)
-func GC()
diff --git a/src/pkg/runtime/Makefile b/src/pkg/runtime/Makefile
index d65fea543..b6e4eed70 100644
--- a/src/pkg/runtime/Makefile
+++ b/src/pkg/runtime/Makefile
@@ -55,6 +55,7 @@ OFILES=\
mcentral.$O\
mem.$O\
memmove.$O\
+ mfinal.$O\
mfixalloc.$O\
mgc0.$O\
mheap.$O\
diff --git a/src/pkg/runtime/cgo2c.c b/src/pkg/runtime/cgo2c.c
index 3b452b78b..a4489213f 100644
--- a/src/pkg/runtime/cgo2c.c
+++ b/src/pkg/runtime/cgo2c.c
@@ -46,6 +46,7 @@ enum {
Uintptr,
String,
Slice,
+ Eface,
};
static struct {
@@ -62,6 +63,7 @@ static struct {
"uintptr", 4,
"String", 8,
"Slice", 12,
+ "Eface", 8,
/* fixed size */
"float32", 4,
@@ -711,6 +713,7 @@ main(int argc, char **argv)
type_table[Uintptr].size = 8;
type_table[String].size = 16;
type_table[Slice].size = 8+4+4;
+ type_table[Eface].size = 8+8;
structround = 8;
}
}
diff --git a/src/pkg/runtime/extern.go b/src/pkg/runtime/extern.go
index 85b165922..53b86b764 100644
--- a/src/pkg/runtime/extern.go
+++ b/src/pkg/runtime/extern.go
@@ -70,3 +70,71 @@ func Signame(sig int32) string
// Siginit enables receipt of signals via Sigrecv. It should typically
// be called during initialization.
func Siginit()
+
+// MemStats holds statistics about the memory system.
+// The statistics are only approximate, as they are not interlocked on update.
+var MemStats struct {
+ Alloc uint64
+ Sys uint64
+ Stacks uint64
+ InusePages uint64
+ NextGC uint64
+ Lookups uint64
+ Mallocs uint64
+ EnableGC bool
+}
+
+// Alloc allocates a block of the given size.
+// FOR TESTING AND DEBUGGING ONLY.
+func Alloc(uintptr) *byte
+
+// Free frees the block starting at the given pointer.
+// FOR TESTING AND DEBUGGING ONLY.
+func Free(*byte)
+
+// Lookup returns the base and size of the block containing the given pointer.
+// FOR TESTING AND DEBUGGING ONLY.
+func Lookup(*byte) (*byte, uintptr)
+
+// GC runs a garbage collection.
+func GC()
+
+// SetFinalizer sets the finalizer associated with x to f.
+// When the garbage collector finds an unreachable block
+// with an associated finalizer, it clears the association and creates
+// a new goroutine running f(x). Creating the new goroutine makes
+// x reachable again, but now without an associated finalizer.
+// Assuming that SetFinalizer is not called again, the next time
+// the garbage collector sees that x is unreachable, it will free x.
+//
+// SetFinalizer(x, nil) clears any finalizer associated with f.
+//
+// The argument x must be a pointer to an object allocated by
+// calling new or by taking the address of a composite literal.
+// The argument f must be a function that takes a single argument
+// of x's type and returns no arguments. If either of these is not
+// true, SetFinalizer aborts the program.
+//
+// Finalizers are run in dependency order: if A points at B, both have
+// finalizers, and they are otherwise unreachable, only the finalizer
+// for A runs; once A is freed, the finalizer for B can run.
+// If a cyclic structure includes a block with a finalizer, that
+// cycle is not guaranteed to be garbage collected and the finalizer
+// is not guaranteed to run, because there is no ordering that
+// respects the dependencies.
+//
+// The finalizer for x is scheduled to run at some arbitrary time after
+// x becomes unreachable.
+// There is no guarantee that finalizers will run before a program exits,
+// so typically they are useful only for releasing non-memory resources
+// associated with an object during a long-running program.
+// For example, an os.File object could use a finalizer to close the
+// associated operating system file descriptor when a program discards
+// an os.File without calling Close, but it would be a mistake
+// to depend on a finalizer to flush an in-memory I/O buffer such as a
+// bufio.Writer, because the buffer would not be flushed at program exit.
+//
+// TODO(rsc): make os.File use SetFinalizer
+// TODO(rsc): allow f to have (ignored) return values
+//
+func SetFinalizer(x, f interface{})
diff --git a/src/pkg/runtime/malloc.cgo b/src/pkg/runtime/malloc.cgo
index 6acbac2eb..d7e3e4151 100644
--- a/src/pkg/runtime/malloc.cgo
+++ b/src/pkg/runtime/malloc.cgo
@@ -6,10 +6,11 @@
//
// TODO(rsc): double-check stats.
-package malloc
+package runtime
#include "runtime.h"
#include "malloc.h"
#include "defs.h"
+#include "type.h"
MHeap mheap;
MStats mstats;
@@ -96,8 +97,10 @@ free(void *v)
throw("malloc/free - deadlock");
m->mallocing = 1;
- if(!mlookup(v, nil, nil, &ref))
+ if(!mlookup(v, nil, nil, &ref)) {
+ printf("free %p: not an allocated block\n", v);
throw("free mlookup");
+ }
*ref = RefFree;
// Find size class for v.
@@ -274,10 +277,41 @@ func Lookup(p *byte) (base *byte, size uintptr) {
mlookup(p, &base, &size, nil);
}
-func GetStats() (s *MStats) {
- s = &mstats;
-}
-
func GC() {
gc(1);
}
+
+func SetFinalizer(obj Eface, finalizer Eface) {
+ byte *base;
+ uintptr size;
+ FuncType *ft;
+
+ if(obj.type == nil) {
+ printf("runtime.SetFinalizer: first argument is nil interface\n");
+ throw:
+ throw("runtime.SetFinalizer");
+ }
+ if(obj.type->kind != KindPtr) {
+ printf("runtime.SetFinalizer: first argument is %S, not pointer\n", *obj.type->string);
+ goto throw;
+ }
+ if(!mlookup(obj.data, &base, &size, nil) || obj.data != base) {
+ printf("runtime.SetFinalizer: pointer not at beginning of allocated block\n");
+ goto throw;
+ }
+ if(finalizer.type != nil) {
+ if(finalizer.type->kind != KindFunc) {
+ badfunc:
+ printf("runtime.SetFinalizer: second argument is %S, not func(%S)\n", *finalizer.type->string, *obj.type->string);
+ goto throw;
+ }
+ ft = (FuncType*)finalizer.type;
+ if(ft->dotdotdot || ft->out.len != 0 || ft->in.len != 1 || *(Type**)ft->in.array != obj.type)
+ goto badfunc;
+ if(getfinalizer(obj.data, 0)) {
+ printf("runtime.SetFinalizer: finalizer already set");
+ goto throw;
+ }
+ }
+ addfinalizer(obj.data, finalizer.data);
+}
diff --git a/src/pkg/runtime/malloc.h b/src/pkg/runtime/malloc.h
index e07faf39f..133ed0232 100644
--- a/src/pkg/runtime/malloc.h
+++ b/src/pkg/runtime/malloc.h
@@ -168,6 +168,8 @@ struct MStats
uint64 nmalloc; // unprotected (approximate)
bool enablegc;
};
+
+#define mstats ·MemStats /* name shared with Go */
extern MStats mstats;
@@ -307,6 +309,9 @@ void* SysAlloc(uintptr);
void SysUnused(void*, uintptr);
void SysFree(void*, uintptr);
+void addfinalizer(void*, void*);
+void* getfinalizer(void*, bool);
+
enum
{
RefcountOverhead = 4, // one uint32 per object
@@ -315,5 +320,6 @@ enum
RefStack, // stack segment - don't free and don't scan for pointers
RefNone, // no references
RefSome, // some references
+ RefFinalize, // ready to be finalized
RefNoPointers = 0x80000000U, // flag - no pointers here
};
diff --git a/src/pkg/runtime/mfinal.c b/src/pkg/runtime/mfinal.c
new file mode 100644
index 000000000..083a53068
--- /dev/null
+++ b/src/pkg/runtime/mfinal.c
@@ -0,0 +1,127 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "runtime.h"
+#include "malloc.h"
+
+// Finalizer hash table. Direct hash, linear scan, at most 3/4 full.
+// Table size is power of 3 so that hash can be key % max.
+// Key[i] == (void*)-1 denotes free but formerly occupied entry
+// (doesn't stop the linear scan).
+// Key and val are separate tables because the garbage collector
+// must be instructed to ignore the pointers in key but follow the
+// pointers in val.
+typedef struct Fintab Fintab;
+struct Fintab
+{
+ void **key;
+ void **val;
+ int32 nkey; // number of non-nil entries in key
+ int32 ndead; // number of dead (-1) entries in key
+ int32 max; // size of key, val allocations
+};
+
+static void
+addfintab(Fintab *t, void *k, void *v)
+{
+ int32 i, j;
+
+ i = (uintptr)k % (uintptr)t->max;
+ for(j=0; j<t->max; j++) {
+ if(t->key[i] == nil) {
+ t->nkey++;
+ goto ret;
+ }
+ if(t->key[i] == (void*)-1) {
+ t->ndead--;
+ goto ret;
+ }
+ if(++i == t->max)
+ i = 0;
+ }
+
+ // cannot happen - table is known to be non-full
+ throw("finalizer table inconsistent");
+
+ret:
+ t->key[i] = k;
+ t->val[i] = v;
+}
+
+static void*
+lookfintab(Fintab *t, void *k, bool del)
+{
+ int32 i, j;
+ void *v;
+
+ if(t->max == 0)
+ return nil;
+ i = (uintptr)k % (uintptr)t->max;
+ for(j=0; j<t->max; j++) {
+ if(t->key[i] == nil)
+ return nil;
+ if(t->key[i] == k) {
+ v = t->val[i];
+ if(del) {
+ t->key[i] = (void*)-1;
+ t->val[i] = nil;
+ t->ndead++;
+ }
+ return v;
+ }
+ if(++i == t->max)
+ i = 0;
+ }
+
+ // cannot happen - table is known to be non-full
+ throw("finalizer table inconsistent");
+ return nil;
+}
+
+static Fintab fintab;
+
+// add finalizer; caller is responsible for making sure not already in table
+void
+addfinalizer(void *p, void *f)
+{
+ Fintab newtab;
+ int32 i;
+
+ if(fintab.nkey >= fintab.max/2+fintab.max/4) {
+ // keep table at most 3/4 full:
+ // allocate new table and rehash.
+
+ runtime_memclr((byte*)&newtab, sizeof newtab);
+ newtab.max = fintab.max;
+ if(newtab.max == 0)
+ newtab.max = 3*3*3;
+ else if(fintab.ndead < fintab.nkey/2) {
+ // grow table if not many dead values.
+ // otherwise just rehash into table of same size.
+ newtab.max *= 3;
+ }
+
+ newtab.key = mallocgc(newtab.max*sizeof newtab.key[0], RefNoPointers, 0);
+ newtab.val = mallocgc(newtab.max*sizeof newtab.val[0], 0, 0);
+
+ for(i=0; i<fintab.max; i++) {
+ void *k;
+
+ k = fintab.key[i];
+ if(k != nil && k != (void*)-1)
+ addfintab(&newtab, k, fintab.val[i]);
+ }
+ free(fintab.key);
+ free(fintab.val);
+ fintab = newtab;
+ }
+
+ addfintab(&fintab, p, f);
+}
+
+void*
+getfinalizer(void *p, bool del)
+{
+ return lookfintab(&fintab, p, del);
+}
diff --git a/src/pkg/runtime/mgc0.c b/src/pkg/runtime/mgc0.c
index 2a050d378..d8a943e2a 100644
--- a/src/pkg/runtime/mgc0.c
+++ b/src/pkg/runtime/mgc0.c
@@ -23,6 +23,12 @@ extern byte data[];
extern byte etext[];
extern byte end[];
+static void *finq[128]; // finalizer queue - two elements per entry
+static void **pfinq = finq;
+static void **efinq = finq+nelem(finq);
+
+static void sweepblock(byte*, int64, uint32*, int32);
+
enum {
PtrSize = sizeof(void*)
};
@@ -37,7 +43,7 @@ scanblock(int32 depth, byte *b, int64 n)
void **vp;
int64 i;
- if(Debug)
+ if(Debug > 1)
printf("%d scanblock %p %D\n", depth, b, n);
off = (uint32)(uintptr)b & (PtrSize-1);
if(off) {
@@ -54,12 +60,18 @@ scanblock(int32 depth, byte *b, int64 n)
if(mlookup(obj, &obj, &size, &ref)) {
if(*ref == RefFree || *ref == RefStack)
continue;
- if(*ref == (RefNone|RefNoPointers)) {
+
+ // If marked for finalization already, some other finalization-ready
+ // object has a pointer: turn off finalization until that object is gone.
+ // This means that cyclic finalizer loops never get collected,
+ // so don't do that.
+
+ if(*ref == (RefNone|RefNoPointers) || *ref == (RefFinalize|RefNoPointers)) {
*ref = RefSome|RefNoPointers;
continue;
}
- if(*ref == RefNone) {
- if(Debug)
+ if(*ref == RefNone || *ref == RefFinalize) {
+ if(Debug > 1)
printf("%d found at %p: ", depth, &vp[i]);
*ref = RefSome;
scanblock(depth+1, obj, size);
@@ -78,6 +90,8 @@ scanstack(G *gp)
sp = (byte*)&gp;
else
sp = gp->sched.sp;
+ if(Debug > 1)
+ printf("scanstack %d %p\n", gp->goid, sp);
stk = (Stktop*)gp->stackbase;
while(stk) {
scanblock(0, sp, (byte*)stk - sp);
@@ -120,7 +134,7 @@ mark(void)
}
static void
-sweepspan(MSpan *s)
+sweepspan(MSpan *s, int32 pass)
{
int32 i, n, npages, size;
byte *p;
@@ -131,24 +145,7 @@ sweepspan(MSpan *s)
p = (byte*)(s->start << PageShift);
if(s->sizeclass == 0) {
// Large block.
- switch(s->gcref0) {
- default:
- throw("bad 'ref count'");
- case RefFree:
- case RefStack:
- break;
- case RefNone:
- case RefNone|RefNoPointers:
- if(Debug)
- printf("free %D at %p\n", (uint64)s->npages<<PageShift, p);
- free(p);
- break;
- case RefSome:
- case RefSome|RefNoPointers:
-//printf("gc-mem 1 %D\n", (uint64)s->npages<<PageShift);
- s->gcref0 = RefNone; // set up for next mark phase
- break;
- }
+ sweepblock(p, (uint64)s->npages<<PageShift, &s->gcref0, pass);
return;
}
@@ -157,26 +154,57 @@ sweepspan(MSpan *s)
size = class_to_size[s->sizeclass];
npages = class_to_allocnpages[s->sizeclass];
n = (npages << PageShift) / (size + RefcountOverhead);
- for(i=0; i<n; i++) {
- switch(s->gcref[i]) {
- default:
- throw("bad 'ref count'");
- case RefFree:
- case RefStack:
- break;
- case RefNone:
- case RefNone|RefNoPointers:
- if(Debug)
- printf("free %d at %p\n", size, p+i*size);
- free(p + i*size);
- break;
- case RefSome:
- case RefSome|RefNoPointers:
- s->gcref[i] = RefNone; // set up for next mark phase
- break;
+ for(i=0; i<n; i++)
+ sweepblock(p+i*size, size, &s->gcref[i], pass);
+}
+
+static void
+sweepblock(byte *p, int64 n, uint32 *gcrefp, int32 pass)
+{
+ uint32 gcref;
+
+ gcref = *gcrefp;
+ switch(gcref) {
+ default:
+ throw("bad 'ref count'");
+ case RefFree:
+ case RefStack:
+ break;
+ case RefNone:
+ case RefNone|RefNoPointers:
+ if(pass == 0 && getfinalizer(p, 0)) {
+ // Tentatively mark as finalizable.
+ // Make sure anything it points at will not be collected.
+ if(Debug > 0)
+ printf("maybe finalize %p+%D\n", p, n);
+ *gcrefp = RefFinalize | (gcref&RefNoPointers);
+ scanblock(100, p, n);
+ } else if(pass == 1) {
+ if(Debug > 0)
+ printf("free %p+%D\n", p, n);
+ free(p);
+ }
+ break;
+ case RefFinalize:
+ case RefFinalize|RefNoPointers:
+ if(pass != 1)
+ throw("sweepspan pass 0 RefFinalize");
+ if(pfinq < efinq) {
+ if(Debug > 0)
+ printf("finalize %p+%D\n", p, n);
+ *pfinq++ = getfinalizer(p, 1);
+ *pfinq++ = p;
}
+ // Reset for next mark+sweep.
+ *gcrefp = RefNone | (gcref&RefNoPointers);
+ break;
+ case RefSome:
+ case RefSome|RefNoPointers:
+ // Reset for next mark+sweep.
+ if(pass == 1)
+ *gcrefp = RefNone | (gcref&RefNoPointers);
+ break;
}
-//printf("gc-mem %d %d\n", s->ref, size);
}
static void
@@ -184,9 +212,13 @@ sweep(void)
{
MSpan *s;
- // Sweep all the spans.
+ // Sweep all the spans marking blocks to be finalized.
for(s = mheap.allspans; s != nil; s = s->allnext)
- sweepspan(s);
+ sweepspan(s, 0);
+
+ // Sweep again queueing finalizers and freeing the others.
+ for(s = mheap.allspans; s != nil; s = s->allnext)
+ sweepspan(s, 1);
}
// Semaphore, not Lock, so that the goroutine
@@ -209,6 +241,7 @@ void
gc(int32 force)
{
byte *p;
+ void **fp;
// The gc is turned off (via enablegc) until
// the bootstrap has completed.
@@ -245,6 +278,17 @@ gc(int32 force)
mstats.next_gc = mstats.inuse_pages+mstats.inuse_pages*gcpercent/100;
}
m->gcing = 0;
+
+ // kick off goroutines to run queued finalizers
+ m->locks++; // disable gc during the mallocs in newproc
+ for(fp=finq; fp<pfinq; fp+=2) {
+ ·newproc(sizeof(void*), fp[0], fp[1]);
+ fp[0] = nil;
+ fp[1] = nil;
+ }
+ pfinq = finq;
+ m->locks--;
+
semrelease(&gcsema);
starttheworld();
}
diff --git a/src/pkg/runtime/runtime.h b/src/pkg/runtime/runtime.h
index 03b54fc26..2182ef319 100644
--- a/src/pkg/runtime/runtime.h
+++ b/src/pkg/runtime/runtime.h
@@ -390,6 +390,7 @@ void goexit(void);
void runcgo(void (*fn)(void*), void*);
void ·entersyscall(void);
void ·exitsyscall(void);
+void ·newproc(int32, byte*, byte*);
void siginit(void);
bool sigsend(int32 sig);
diff --git a/src/pkg/runtime/type.h b/src/pkg/runtime/type.h
index 36a3b6acf..69036f112 100644
--- a/src/pkg/runtime/type.h
+++ b/src/pkg/runtime/type.h
@@ -14,6 +14,7 @@ typedef struct IMethod IMethod;
typedef struct MapType MapType;
typedef struct ChanType ChanType;
typedef struct SliceType SliceType;
+typedef struct FuncType FuncType;
struct CommonType
{
@@ -115,3 +116,11 @@ struct SliceType
Type;
Type *elem;
};
+
+struct FuncType
+{
+ Type;
+ bool dotdotdot;
+ Slice in;
+ Slice out;
+};