summaryrefslogtreecommitdiff
path: root/src/pkg/runtime
diff options
context:
space:
mode:
Diffstat (limited to 'src/pkg/runtime')
-rw-r--r--src/pkg/runtime/alg.goc (renamed from src/pkg/runtime/alg.c)40
-rw-r--r--src/pkg/runtime/append_test.go19
-rw-r--r--src/pkg/runtime/arch_386.h5
-rw-r--r--src/pkg/runtime/arch_amd64.h9
-rw-r--r--src/pkg/runtime/arch_amd64p32.h16
-rw-r--r--src/pkg/runtime/arch_arm.h1
-rw-r--r--src/pkg/runtime/asm_386.s849
-rw-r--r--src/pkg/runtime/asm_amd64.s893
-rw-r--r--src/pkg/runtime/asm_amd64p32.s1073
-rw-r--r--src/pkg/runtime/asm_arm.s477
-rw-r--r--src/pkg/runtime/atomic_amd64x.c (renamed from src/pkg/runtime/atomic_amd64.c)2
-rw-r--r--src/pkg/runtime/atomic_arm.c13
-rw-r--r--src/pkg/runtime/callback_windows.c3
-rw-r--r--src/pkg/runtime/cgo/asm_nacl_amd64p32.s13
-rw-r--r--src/pkg/runtime/cgo/gcc_dragonfly_386.c4
-rw-r--r--src/pkg/runtime/cgo/gcc_dragonfly_amd64.c4
-rw-r--r--src/pkg/runtime/cgo/gcc_freebsd_386.c4
-rw-r--r--src/pkg/runtime/cgo/gcc_freebsd_amd64.c4
-rw-r--r--src/pkg/runtime/cgo/gcc_freebsd_arm.c9
-rw-r--r--src/pkg/runtime/cgo/gcc_linux_386.c4
-rw-r--r--src/pkg/runtime/cgo/gcc_linux_amd64.c4
-rw-r--r--src/pkg/runtime/cgo/gcc_linux_arm.c8
-rw-r--r--src/pkg/runtime/cgo/gcc_netbsd_386.c4
-rw-r--r--src/pkg/runtime/cgo/gcc_netbsd_amd64.c4
-rw-r--r--src/pkg/runtime/cgo/gcc_netbsd_arm.c4
-rw-r--r--src/pkg/runtime/cgo/gcc_openbsd_386.c4
-rw-r--r--src/pkg/runtime/cgo/gcc_openbsd_amd64.c4
-rw-r--r--src/pkg/runtime/cgo/gcc_windows_386.c16
-rw-r--r--src/pkg/runtime/cgo/gcc_windows_amd64.c14
-rw-r--r--src/pkg/runtime/cgo/libcgo.h1
-rw-r--r--src/pkg/runtime/cgocall.c53
-rw-r--r--src/pkg/runtime/chan.goc (renamed from src/pkg/runtime/chan.c)528
-rw-r--r--src/pkg/runtime/chan.h75
-rw-r--r--src/pkg/runtime/chan_test.go637
-rw-r--r--src/pkg/runtime/complex.goc (renamed from src/pkg/runtime/complex.c)8
-rw-r--r--src/pkg/runtime/cpuprof.goc (renamed from src/pkg/runtime/cpuprof.c)11
-rw-r--r--src/pkg/runtime/crash_test.go96
-rw-r--r--src/pkg/runtime/debug/garbage.go20
-rw-r--r--src/pkg/runtime/debug/heapdump_test.go33
-rw-r--r--src/pkg/runtime/debug/stack.go6
-rw-r--r--src/pkg/runtime/defs.c (renamed from src/pkg/runtime/export_test.c)15
-rw-r--r--src/pkg/runtime/defs_freebsd.go6
-rw-r--r--src/pkg/runtime/defs_freebsd_386.h6
-rw-r--r--src/pkg/runtime/defs_freebsd_amd64.h6
-rw-r--r--src/pkg/runtime/defs_freebsd_arm.h36
-rw-r--r--src/pkg/runtime/defs_nacl_386.h63
-rw-r--r--src/pkg/runtime/defs_nacl_amd64p32.h90
-rw-r--r--src/pkg/runtime/defs_openbsd_386.h8
-rw-r--r--src/pkg/runtime/defs_openbsd_amd64.h9
-rw-r--r--src/pkg/runtime/defs_solaris.go156
-rw-r--r--src/pkg/runtime/defs_solaris_amd64.go48
-rw-r--r--src/pkg/runtime/defs_solaris_amd64.h254
-rw-r--r--src/pkg/runtime/env_plan9.c10
-rw-r--r--src/pkg/runtime/env_posix.c17
-rw-r--r--src/pkg/runtime/error.go7
-rw-r--r--src/pkg/runtime/export_test.go34
-rw-r--r--src/pkg/runtime/extern.go28
-rw-r--r--src/pkg/runtime/funcdata.h6
-rw-r--r--src/pkg/runtime/futex_test.go75
-rw-r--r--src/pkg/runtime/gc_test.go83
-rw-r--r--src/pkg/runtime/hash_test.go6
-rw-r--r--src/pkg/runtime/hashmap.goc (renamed from src/pkg/runtime/hashmap.c)707
-rw-r--r--src/pkg/runtime/hashmap.h147
-rw-r--r--src/pkg/runtime/hashmap_fast.c90
-rw-r--r--src/pkg/runtime/heapdump.c981
-rw-r--r--src/pkg/runtime/iface.goc (renamed from src/pkg/runtime/iface.c)239
-rw-r--r--src/pkg/runtime/lfstack.goc (renamed from src/pkg/runtime/lfstack.c)24
-rw-r--r--src/pkg/runtime/lfstack_test.go12
-rw-r--r--src/pkg/runtime/lock_futex.c12
-rw-r--r--src/pkg/runtime/lock_sema.c11
-rw-r--r--src/pkg/runtime/malloc.goc604
-rw-r--r--src/pkg/runtime/malloc.h215
-rw-r--r--src/pkg/runtime/map_test.go66
-rw-r--r--src/pkg/runtime/mapspeed_test.go30
-rw-r--r--src/pkg/runtime/mcache.c123
-rw-r--r--src/pkg/runtime/mcentral.c205
-rw-r--r--src/pkg/runtime/mem.go3
-rw-r--r--src/pkg/runtime/mem_darwin.c13
-rw-r--r--src/pkg/runtime/mem_dragonfly.c19
-rw-r--r--src/pkg/runtime/mem_freebsd.c19
-rw-r--r--src/pkg/runtime/mem_linux.c30
-rw-r--r--src/pkg/runtime/mem_nacl.c118
-rw-r--r--src/pkg/runtime/mem_netbsd.c17
-rw-r--r--src/pkg/runtime/mem_openbsd.c17
-rw-r--r--src/pkg/runtime/mem_plan9.c13
-rw-r--r--src/pkg/runtime/mem_solaris.c99
-rw-r--r--src/pkg/runtime/mem_windows.c43
-rw-r--r--src/pkg/runtime/memclr_386.s127
-rw-r--r--src/pkg/runtime/memclr_amd64.s116
-rw-r--r--src/pkg/runtime/memclr_arm.s6
-rw-r--r--src/pkg/runtime/memclr_plan9_386.s50
-rw-r--r--src/pkg/runtime/memclr_plan9_amd64.s48
-rw-r--r--src/pkg/runtime/memmove_386.s2
-rw-r--r--src/pkg/runtime/memmove_amd64.s2
-rw-r--r--src/pkg/runtime/memmove_nacl_amd64p32.s46
-rw-r--r--src/pkg/runtime/memmove_plan9_386.s127
-rw-r--r--src/pkg/runtime/memmove_plan9_amd64.s126
-rw-r--r--src/pkg/runtime/memmove_test.go179
-rw-r--r--src/pkg/runtime/mfinal.c219
-rw-r--r--src/pkg/runtime/mfinal_test.go175
-rw-r--r--src/pkg/runtime/mgc0.c1814
-rw-r--r--src/pkg/runtime/mgc0.go12
-rw-r--r--src/pkg/runtime/mgc0.h41
-rw-r--r--src/pkg/runtime/mheap.c506
-rw-r--r--src/pkg/runtime/mknacl.sh15
-rw-r--r--src/pkg/runtime/mprof.goc271
-rw-r--r--src/pkg/runtime/msize.c36
-rw-r--r--src/pkg/runtime/netpoll.goc169
-rw-r--r--src/pkg/runtime/netpoll_epoll.c7
-rw-r--r--src/pkg/runtime/netpoll_kqueue.c7
-rw-r--r--src/pkg/runtime/netpoll_nacl.c37
-rw-r--r--src/pkg/runtime/netpoll_solaris.c268
-rw-r--r--src/pkg/runtime/netpoll_windows.c15
-rw-r--r--src/pkg/runtime/norace_test.go36
-rw-r--r--src/pkg/runtime/os_darwin.c14
-rw-r--r--src/pkg/runtime/os_darwin.h2
-rw-r--r--src/pkg/runtime/os_dragonfly.c14
-rw-r--r--src/pkg/runtime/os_dragonfly.h1
-rw-r--r--src/pkg/runtime/os_freebsd.c20
-rw-r--r--src/pkg/runtime/os_freebsd.h1
-rw-r--r--src/pkg/runtime/os_linux.c13
-rw-r--r--src/pkg/runtime/os_linux.h1
-rw-r--r--src/pkg/runtime/os_linux_arm.c2
-rw-r--r--src/pkg/runtime/os_nacl.c278
-rw-r--r--src/pkg/runtime/os_nacl.h162
-rw-r--r--src/pkg/runtime/os_netbsd.c14
-rw-r--r--src/pkg/runtime/os_netbsd.h1
-rw-r--r--src/pkg/runtime/os_openbsd.c16
-rw-r--r--src/pkg/runtime/os_openbsd.h1
-rw-r--r--src/pkg/runtime/os_plan9.c101
-rw-r--r--src/pkg/runtime/os_plan9.h9
-rw-r--r--src/pkg/runtime/os_plan9_386.c135
-rw-r--r--src/pkg/runtime/os_plan9_amd64.c106
-rw-r--r--src/pkg/runtime/os_solaris.c583
-rw-r--r--src/pkg/runtime/os_solaris.h51
-rw-r--r--src/pkg/runtime/os_windows.c101
-rw-r--r--src/pkg/runtime/os_windows_386.c48
-rw-r--r--src/pkg/runtime/os_windows_amd64.c44
-rw-r--r--src/pkg/runtime/panic.c326
-rw-r--r--src/pkg/runtime/parfor.c28
-rw-r--r--src/pkg/runtime/pprof/pprof.go2
-rw-r--r--src/pkg/runtime/pprof/pprof_test.go29
-rw-r--r--src/pkg/runtime/print.c63
-rw-r--r--src/pkg/runtime/proc.c717
-rw-r--r--src/pkg/runtime/proc.p526
-rw-r--r--src/pkg/runtime/proc_test.go66
-rw-r--r--src/pkg/runtime/race.c394
-rw-r--r--src/pkg/runtime/race.h3
-rw-r--r--src/pkg/runtime/race/README2
-rw-r--r--src/pkg/runtime/race/race.go118
-rw-r--r--src/pkg/runtime/race/race_darwin_amd64.sysobin192988 -> 222964 bytes
-rw-r--r--src/pkg/runtime/race/race_linux_amd64.sysobin195144 -> 243208 bytes
-rw-r--r--src/pkg/runtime/race/race_test.go17
-rw-r--r--src/pkg/runtime/race/race_windows_amd64.sysobin161295 -> 210859 bytes
-rw-r--r--src/pkg/runtime/race/testdata/chan_test.go207
-rw-r--r--src/pkg/runtime/race/testdata/finalizer_test.go22
-rw-r--r--src/pkg/runtime/race/testdata/map_test.go79
-rw-r--r--src/pkg/runtime/race/testdata/mop_test.go22
-rw-r--r--src/pkg/runtime/race0.c6
-rw-r--r--src/pkg/runtime/race_amd64.s240
-rw-r--r--src/pkg/runtime/rdebug.goc27
-rw-r--r--src/pkg/runtime/rt0_freebsd_arm.s5
-rw-r--r--src/pkg/runtime/rt0_nacl_386.s22
-rw-r--r--src/pkg/runtime/rt0_nacl_amd64p32.s30
-rw-r--r--src/pkg/runtime/rt0_solaris_amd64.s18
-rw-r--r--src/pkg/runtime/runtime-gdb.py160
-rw-r--r--src/pkg/runtime/runtime.c120
-rw-r--r--src/pkg/runtime/runtime.h254
-rw-r--r--src/pkg/runtime/runtime1.goc114
-rw-r--r--src/pkg/runtime/runtime_test.go82
-rw-r--r--src/pkg/runtime/runtime_unix_test.go56
-rw-r--r--src/pkg/runtime/sema.goc6
-rw-r--r--src/pkg/runtime/signal_386.c9
-rw-r--r--src/pkg/runtime/signal_amd64x.c (renamed from src/pkg/runtime/signal_amd64.c)33
-rw-r--r--src/pkg/runtime/signal_arm.c7
-rw-r--r--src/pkg/runtime/signal_nacl_386.h23
-rw-r--r--src/pkg/runtime/signal_nacl_amd64p32.h31
-rw-r--r--src/pkg/runtime/signal_solaris_amd64.h31
-rw-r--r--src/pkg/runtime/signal_unix.c3
-rw-r--r--src/pkg/runtime/signals_freebsd.h2
-rw-r--r--src/pkg/runtime/signals_linux.h2
-rw-r--r--src/pkg/runtime/signals_nacl.h50
-rw-r--r--src/pkg/runtime/signals_plan9.h60
-rw-r--r--src/pkg/runtime/signals_solaris.h94
-rw-r--r--src/pkg/runtime/slice.goc (renamed from src/pkg/runtime/slice.c)74
-rw-r--r--src/pkg/runtime/softfloat_arm.c4
-rw-r--r--src/pkg/runtime/sqrt.go150
-rw-r--r--src/pkg/runtime/stack.c711
-rw-r--r--src/pkg/runtime/stack.h5
-rw-r--r--src/pkg/runtime/stack_gen_test.go1473
-rw-r--r--src/pkg/runtime/stack_test.go1661
-rw-r--r--src/pkg/runtime/string.goc102
-rw-r--r--src/pkg/runtime/symtab.goc (renamed from src/pkg/runtime/symtab.c)27
-rw-r--r--src/pkg/runtime/sys_darwin_386.s3
-rw-r--r--src/pkg/runtime/sys_dragonfly_386.s4
-rw-r--r--src/pkg/runtime/sys_dragonfly_amd64.s4
-rw-r--r--src/pkg/runtime/sys_freebsd_386.s11
-rw-r--r--src/pkg/runtime/sys_freebsd_amd64.s6
-rw-r--r--src/pkg/runtime/sys_freebsd_arm.s9
-rw-r--r--src/pkg/runtime/sys_linux_386.s6
-rw-r--r--src/pkg/runtime/sys_linux_amd64.s4
-rw-r--r--src/pkg/runtime/sys_linux_arm.s2
-rw-r--r--src/pkg/runtime/sys_nacl_386.s243
-rw-r--r--src/pkg/runtime/sys_nacl_amd64p32.s413
-rw-r--r--src/pkg/runtime/sys_openbsd_386.s56
-rw-r--r--src/pkg/runtime/sys_openbsd_amd64.s22
-rw-r--r--src/pkg/runtime/sys_plan9_386.s15
-rw-r--r--src/pkg/runtime/sys_plan9_amd64.s2
-rw-r--r--src/pkg/runtime/sys_solaris_amd64.s267
-rw-r--r--src/pkg/runtime/sys_windows_386.s123
-rw-r--r--src/pkg/runtime/sys_windows_amd64.s126
-rw-r--r--src/pkg/runtime/sys_x86.c30
-rw-r--r--src/pkg/runtime/syscall_nacl.h71
-rw-r--r--src/pkg/runtime/syscall_solaris.goc374
-rw-r--r--src/pkg/runtime/syscall_windows.goc19
-rw-r--r--src/pkg/runtime/syscall_windows_test.go249
-rw-r--r--src/pkg/runtime/time.goc32
-rw-r--r--src/pkg/runtime/traceback_arm.c116
-rw-r--r--src/pkg/runtime/traceback_x86.c207
-rw-r--r--src/pkg/runtime/type.go1
-rw-r--r--src/pkg/runtime/type.h8
-rw-r--r--src/pkg/runtime/typekind.h3
-rw-r--r--src/pkg/runtime/vlop_arm.s13
-rw-r--r--src/pkg/runtime/vlrt_386.c12
-rw-r--r--src/pkg/runtime/vlrt_arm.c7
225 files changed, 19753 insertions, 6985 deletions
diff --git a/src/pkg/runtime/alg.c b/src/pkg/runtime/alg.goc
index c3a839695..f1b8d5982 100644
--- a/src/pkg/runtime/alg.c
+++ b/src/pkg/runtime/alg.goc
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+package runtime
#include "runtime.h"
#include "type.h"
#include "../../cmd/ld/textflag.h"
@@ -20,7 +21,7 @@ runtime·memhash(uintptr *h, uintptr s, void *a)
{
byte *b;
uintptr hash;
- if(use_aeshash) {
+ if(!NaCl && use_aeshash) {
runtime·aeshash(h, s, a);
return;
}
@@ -464,11 +465,15 @@ runtime·algarray[] =
// Runtime helpers.
// used in asm_{386,amd64}.s
+#pragma dataflag NOPTR
byte runtime·aeskeysched[HashRandomBytes];
void
runtime·hashinit(void)
{
+ if(NaCl)
+ return;
+
// Install aes hash algorithm if we have the instructions we need
if((runtime·cpuid_ecx & (1 << 25)) != 0 && // aes (aesenc)
(runtime·cpuid_ecx & (1 << 9)) != 0 && // sse3 (pshufb)
@@ -505,37 +510,40 @@ void
runtime·equal(Type *t, ...)
{
byte *x, *y;
- uintptr ret;
+ bool *ret;
- x = (byte*)(&t+1);
+ x = (byte*)ROUND((uintptr)(&t+1), t->align);
y = x + t->size;
- ret = (uintptr)(y + t->size);
- ret = ROUND(ret, Structrnd);
- t->alg->equal((bool*)ret, t->size, x, y);
+ ret = (bool*)ROUND((uintptr)(y+t->size), Structrnd);
+ t->alg->equal(ret, t->size, x, y);
+}
+
+// Testing adapter for memclr
+func memclrBytes(s Slice) {
+ runtime·memclr(s.array, s.len);
}
// Testing adapters for hash quality tests (see hash_test.go)
-void runtime·haveGoodHash(bool res) {
+func haveGoodHash() (res bool) {
res = use_aeshash;
- FLUSH(&res);
}
-void runtime·stringHash(String s, uintptr seed, uintptr res) {
+
+func stringHash(s String, seed uintptr) (res uintptr) {
runtime·algarray[ASTRING].hash(&seed, sizeof(String), &s);
res = seed;
- FLUSH(&res);
}
-void runtime·bytesHash(Slice s, uintptr seed, uintptr res) {
+
+func bytesHash(s Slice, seed uintptr) (res uintptr) {
runtime·algarray[AMEM].hash(&seed, s.len, s.array);
res = seed;
- FLUSH(&res);
}
-void runtime·int32Hash(uint32 i, uintptr seed, uintptr res) {
+
+func int32Hash(i uint32, seed uintptr) (res uintptr) {
runtime·algarray[AMEM32].hash(&seed, sizeof(uint32), &i);
res = seed;
- FLUSH(&res);
}
-void runtime·int64Hash(uint64 i, uintptr seed, uintptr res) {
+
+func int64Hash(i uint64, seed uintptr) (res uintptr) {
runtime·algarray[AMEM64].hash(&seed, sizeof(uint64), &i);
res = seed;
- FLUSH(&res);
}
diff --git a/src/pkg/runtime/append_test.go b/src/pkg/runtime/append_test.go
index 937c8259f..a67dc9b49 100644
--- a/src/pkg/runtime/append_test.go
+++ b/src/pkg/runtime/append_test.go
@@ -19,6 +19,25 @@ func BenchmarkAppend(b *testing.B) {
}
}
+func BenchmarkAppendGrowByte(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ var x []byte
+ for j := 0; j < 1<<20; j++ {
+ x = append(x, byte(j))
+ }
+ }
+}
+
+func BenchmarkAppendGrowString(b *testing.B) {
+ var s string
+ for i := 0; i < b.N; i++ {
+ var x []string
+ for j := 0; j < 1<<20; j++ {
+ x = append(x, s)
+ }
+ }
+}
+
func benchmarkAppendBytes(b *testing.B, length int) {
b.StopTimer()
x := make([]byte, 0, N)
diff --git a/src/pkg/runtime/arch_386.h b/src/pkg/runtime/arch_386.h
index ebdb3ff4e..5c0a54f8c 100644
--- a/src/pkg/runtime/arch_386.h
+++ b/src/pkg/runtime/arch_386.h
@@ -7,5 +7,10 @@ enum {
BigEndian = 0,
CacheLineSize = 64,
RuntimeGogoBytes = 64,
+#ifdef GOOS_nacl
+ PhysPageSize = 65536,
+#else
+ PhysPageSize = 4096,
+#endif
PCQuantum = 1
};
diff --git a/src/pkg/runtime/arch_amd64.h b/src/pkg/runtime/arch_amd64.h
index 2bddf0796..c8a21847c 100644
--- a/src/pkg/runtime/arch_amd64.h
+++ b/src/pkg/runtime/arch_amd64.h
@@ -6,6 +6,15 @@ enum {
thechar = '6',
BigEndian = 0,
CacheLineSize = 64,
+#ifdef GOOS_solaris
+ RuntimeGogoBytes = 80,
+#else
+#ifdef GOOS_windows
+ RuntimeGogoBytes = 80,
+#else
RuntimeGogoBytes = 64,
+#endif // Windows
+#endif // Solaris
+ PhysPageSize = 4096,
PCQuantum = 1
};
diff --git a/src/pkg/runtime/arch_amd64p32.h b/src/pkg/runtime/arch_amd64p32.h
new file mode 100644
index 000000000..073a9e30e
--- /dev/null
+++ b/src/pkg/runtime/arch_amd64p32.h
@@ -0,0 +1,16 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+enum {
+ thechar = '6',
+ BigEndian = 0,
+ CacheLineSize = 64,
+ RuntimeGogoBytes = 64,
+#ifdef GOOS_nacl
+ PhysPageSize = 65536,
+#else
+ PhysPageSize = 4096,
+#endif
+ PCQuantum = 1
+};
diff --git a/src/pkg/runtime/arch_arm.h b/src/pkg/runtime/arch_arm.h
index e5da01c60..b9711289f 100644
--- a/src/pkg/runtime/arch_arm.h
+++ b/src/pkg/runtime/arch_arm.h
@@ -7,5 +7,6 @@ enum {
BigEndian = 0,
CacheLineSize = 32,
RuntimeGogoBytes = 80,
+ PhysPageSize = 4096,
PCQuantum = 4
};
diff --git a/src/pkg/runtime/asm_386.s b/src/pkg/runtime/asm_386.s
index 5c642c0ed..95312089d 100644
--- a/src/pkg/runtime/asm_386.s
+++ b/src/pkg/runtime/asm_386.s
@@ -247,6 +247,10 @@ TEXT runtime·morestack(SB),NOSPLIT,$0-0
MOVL $0, 0x1003 // crash if newstack returns
RET
+TEXT runtime·morestack_noctxt(SB),NOSPLIT,$0-0
+ MOVL $0, DX
+ JMP runtime·morestack(SB)
+
// Called from panic. Mimics morestack,
// reuses stack growth code to create a frame
// with the desired args running the desired function.
@@ -307,7 +311,7 @@ TEXT runtime·newstackcall(SB), NOSPLIT, $0-12
JMP AX
// Note: can't just "JMP runtime·NAME(SB)" - bad inlining results.
-TEXT reflect·call(SB), NOSPLIT, $0-12
+TEXT reflect·call(SB), NOSPLIT, $0-16
MOVL argsize+8(FP), CX
DISPATCH(call16, 16)
DISPATCH(call32, 32)
@@ -339,8 +343,22 @@ TEXT reflect·call(SB), NOSPLIT, $0-12
MOVL $runtime·badreflectcall(SB), AX
JMP AX
+// Argument map for the callXX frames. Each has one
+// stack map (for the single call) with 3 arguments.
+DATA gcargs_reflectcall<>+0x00(SB)/4, $1 // 1 stackmap
+DATA gcargs_reflectcall<>+0x04(SB)/4, $6 // 3 args
+DATA gcargs_reflectcall<>+0x08(SB)/4, $(const_BitsPointer+(const_BitsPointer<<2)+(const_BitsScalar<<4))
+GLOBL gcargs_reflectcall<>(SB),RODATA,$12
+
+// callXX frames have no locals
+DATA gclocals_reflectcall<>+0x00(SB)/4, $1 // 1 stackmap
+DATA gclocals_reflectcall<>+0x04(SB)/4, $0 // 0 locals
+GLOBL gclocals_reflectcall<>(SB),RODATA,$8
+
#define CALLFN(NAME,MAXSIZE) \
-TEXT runtime·NAME(SB), WRAPPER, $MAXSIZE-12; \
+TEXT runtime·NAME(SB), WRAPPER, $MAXSIZE-16; \
+ FUNCDATA $FUNCDATA_ArgsPointerMaps,gcargs_reflectcall<>(SB); \
+ FUNCDATA $FUNCDATA_LocalsPointerMaps,gclocals_reflectcall<>(SB);\
/* copy arguments to stack */ \
MOVL argptr+4(FP), SI; \
MOVL argsize+8(FP), CX; \
@@ -348,11 +366,17 @@ TEXT runtime·NAME(SB), WRAPPER, $MAXSIZE-12; \
REP;MOVSB; \
/* call function */ \
MOVL f+0(FP), DX; \
- CALL (DX); \
+ MOVL (DX), AX; \
+ PCDATA $PCDATA_StackMapIndex, $0; \
+ CALL AX; \
/* copy return values back */ \
MOVL argptr+4(FP), DI; \
MOVL argsize+8(FP), CX; \
+ MOVL retoffset+12(FP), BX; \
MOVL SP, SI; \
+ ADDL BX, DI; \
+ ADDL BX, SI; \
+ SUBL BX, CX; \
REP;MOVSB; \
RET
@@ -483,6 +507,12 @@ TEXT runtime·xchg(SB), NOSPLIT, $0-8
XCHGL AX, 0(BX)
RET
+TEXT runtime·xchgp(SB), NOSPLIT, $0-8
+ MOVL 4(SP), BX
+ MOVL 8(SP), AX
+ XCHGL AX, 0(BX)
+ RET
+
TEXT runtime·procyield(SB),NOSPLIT,$0-0
MOVL 4(SP), AX
again:
@@ -646,7 +676,6 @@ havem:
// Save current sp in m->g0->sched.sp in preparation for
// switch back to m->curg stack.
// NOTE: unwindm knows that the saved g->sched.sp is at 0(SP).
- // On Windows, the SEH is at 4(SP) and 8(SP).
MOVL m_g0(BP), SI
MOVL (g_sched+gobuf_sp)(SI), AX
MOVL AX, 0(SP)
@@ -747,21 +776,6 @@ TEXT runtime·stackcheck(SB), NOSPLIT, $0-0
INT $3
RET
-TEXT runtime·memclr(SB),NOSPLIT,$0-8
- MOVL 4(SP), DI // arg 1 addr
- MOVL 8(SP), CX // arg 2 count
- MOVL CX, BX
- ANDL $3, BX
- SHRL $2, CX
- MOVL $0, AX
- CLD
- REP
- STOSL
- MOVL BX, CX
- REP
- STOSB
- RET
-
TEXT runtime·getcallerpc(SB),NOSPLIT,$0-4
MOVL x+0(FP),AX // addr of first arg
MOVL -4(AX),AX // get calling pc
@@ -1353,3 +1367,800 @@ cmp_allsame:
SETEQ CX // 1 if alen == blen
LEAL -1(CX)(AX*2), AX // 1,0,-1 result
RET
+
+// A Duff's device for zeroing memory.
+// The compiler jumps to computed addresses within
+// this routine to zero chunks of memory. Do not
+// change this code without also changing the code
+// in ../../cmd/8g/ggen.c:clearfat.
+// AX: zero
+// DI: ptr to memory to be zeroed
+// DI is updated as a side effect.
+TEXT runtime·duffzero(SB), NOSPLIT, $0-0
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ STOSL
+ RET
+
+// A Duff's device for copying memory.
+// The compiler jumps to computed addresses within
+// this routine to copy chunks of memory. Source
+// and destination must not overlap. Do not
+// change this code without also changing the code
+// in ../../cmd/6g/cgen.c:sgen.
+// SI: ptr to source memory
+// DI: ptr to destination memory
+// SI and DI are updated as a side effect.
+
+// NOTE: this is equivalent to a sequence of MOVSL but
+// for some reason MOVSL is really slow.
+TEXT runtime·duffcopy(SB), NOSPLIT, $0-0
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ MOVL (SI),CX
+ ADDL $4,SI
+ MOVL CX,(DI)
+ ADDL $4,DI
+
+ RET
+
+TEXT runtime·timenow(SB), NOSPLIT, $0-0
+ JMP time·now(SB)
diff --git a/src/pkg/runtime/asm_amd64.s b/src/pkg/runtime/asm_amd64.s
index 2c2ffedd1..3c7eaf343 100644
--- a/src/pkg/runtime/asm_amd64.s
+++ b/src/pkg/runtime/asm_amd64.s
@@ -53,6 +53,9 @@ needtls:
// skip TLS setup on Plan 9
CMPL runtime·isplan9(SB), $1
JEQ ok
+ // skip TLS setup on Solaris
+ CMPL runtime·issolaris(SB), $1
+ JEQ ok
LEAQ runtime·tls0(SB), DI
CALL runtime·settls(SB)
@@ -286,7 +289,7 @@ TEXT runtime·newstackcall(SB), NOSPLIT, $0-20
JMP AX
// Note: can't just "JMP runtime·NAME(SB)" - bad inlining results.
-TEXT reflect·call(SB), NOSPLIT, $0-20
+TEXT reflect·call(SB), NOSPLIT, $0-24
MOVLQZX argsize+16(FP), CX
DISPATCH(call16, 16)
DISPATCH(call32, 32)
@@ -318,8 +321,22 @@ TEXT reflect·call(SB), NOSPLIT, $0-20
MOVQ $runtime·badreflectcall(SB), AX
JMP AX
+// Argument map for the callXX frames. Each has one
+// stack map (for the single call) with 3 arguments.
+DATA gcargs_reflectcall<>+0x00(SB)/4, $1 // 1 stackmap
+DATA gcargs_reflectcall<>+0x04(SB)/4, $6 // 3 args
+DATA gcargs_reflectcall<>+0x08(SB)/4, $(const_BitsPointer+(const_BitsPointer<<2)+(const_BitsScalar<<4))
+GLOBL gcargs_reflectcall<>(SB),RODATA,$12
+
+// callXX frames have no locals
+DATA gclocals_reflectcall<>+0x00(SB)/4, $1 // 1 stackmap
+DATA gclocals_reflectcall<>+0x04(SB)/4, $0 // 0 locals
+GLOBL gclocals_reflectcall<>(SB),RODATA,$8
+
#define CALLFN(NAME,MAXSIZE) \
-TEXT runtime·NAME(SB), WRAPPER, $MAXSIZE-20; \
+TEXT runtime·NAME(SB), WRAPPER, $MAXSIZE-24; \
+ FUNCDATA $FUNCDATA_ArgsPointerMaps,gcargs_reflectcall<>(SB); \
+ FUNCDATA $FUNCDATA_LocalsPointerMaps,gclocals_reflectcall<>(SB);\
/* copy arguments to stack */ \
MOVQ argptr+8(FP), SI; \
MOVLQZX argsize+16(FP), CX; \
@@ -327,11 +344,16 @@ TEXT runtime·NAME(SB), WRAPPER, $MAXSIZE-20; \
REP;MOVSB; \
/* call function */ \
MOVQ f+0(FP), DX; \
+ PCDATA $PCDATA_StackMapIndex, $0; \
CALL (DX); \
/* copy return values back */ \
MOVQ argptr+8(FP), DI; \
MOVLQZX argsize+16(FP), CX; \
+ MOVLQZX retoffset+20(FP), BX; \
MOVQ SP, SI; \
+ ADDQ BX, DI; \
+ ADDQ BX, SI; \
+ SUBQ BX, CX; \
REP;MOVSB; \
RET
@@ -453,6 +475,46 @@ TEXT morestack<>(SB),NOSPLIT,$0
MOVQ $runtime·morestack(SB), AX
JMP AX
+TEXT runtime·morestack00_noctxt(SB),NOSPLIT,$0
+ MOVL $0, DX
+ JMP runtime·morestack00(SB)
+
+TEXT runtime·morestack01_noctxt(SB),NOSPLIT,$0
+ MOVL $0, DX
+ JMP runtime·morestack01(SB)
+
+TEXT runtime·morestack10_noctxt(SB),NOSPLIT,$0
+ MOVL $0, DX
+ JMP runtime·morestack10(SB)
+
+TEXT runtime·morestack11_noctxt(SB),NOSPLIT,$0
+ MOVL $0, DX
+ JMP runtime·morestack11(SB)
+
+TEXT runtime·morestack8_noctxt(SB),NOSPLIT,$0
+ MOVL $0, DX
+ JMP runtime·morestack8(SB)
+
+TEXT runtime·morestack16_noctxt(SB),NOSPLIT,$0
+ MOVL $0, DX
+ JMP runtime·morestack16(SB)
+
+TEXT runtime·morestack24_noctxt(SB),NOSPLIT,$0
+ MOVL $0, DX
+ JMP runtime·morestack24(SB)
+
+TEXT runtime·morestack32_noctxt(SB),NOSPLIT,$0
+ MOVL $0, DX
+ JMP runtime·morestack32(SB)
+
+TEXT runtime·morestack40_noctxt(SB),NOSPLIT,$0
+ MOVL $0, DX
+ JMP runtime·morestack40(SB)
+
+TEXT runtime·morestack48_noctxt(SB),NOSPLIT,$0
+ MOVL $0, DX
+ JMP runtime·morestack48(SB)
+
// bool cas(int32 *val, int32 old, int32 new)
// Atomically:
// if(*val == old){
@@ -546,6 +608,12 @@ TEXT runtime·xchg64(SB), NOSPLIT, $0-16
XCHGQ AX, 0(BX)
RET
+TEXT runtime·xchgp(SB), NOSPLIT, $0-16
+ MOVQ 8(SP), BX
+ MOVQ 16(SP), AX
+ XCHGQ AX, 0(BX)
+ RET
+
TEXT runtime·procyield(SB),NOSPLIT,$0-0
MOVL 8(SP), AX
again:
@@ -614,10 +682,16 @@ TEXT runtime·asmcgocall(SB),NOSPLIT,$0-16
MOVQ m_g0(BP), SI
MOVQ g(CX), DI
CMPQ SI, DI
- JEQ 4(PC)
+ JEQ nosave
+ MOVQ m_gsignal(BP), SI
+ CMPQ SI, DI
+ JEQ nosave
+
+ MOVQ m_g0(BP), SI
CALL gosave<>(SB)
MOVQ SI, g(CX)
MOVQ (g_sched+gobuf_sp)(SI), SP
+nosave:
// Now on a scheduling stack (a pthread-created stack).
// Make sure we have enough room for 4 stack-backed fast-call
@@ -779,21 +853,6 @@ TEXT runtime·stackcheck(SB), NOSPLIT, $0-0
INT $3
RET
-TEXT runtime·memclr(SB),NOSPLIT,$0-16
- MOVQ 8(SP), DI // arg 1 addr
- MOVQ 16(SP), CX // arg 2 count
- MOVQ CX, BX
- ANDQ $7, BX
- SHRQ $3, CX
- MOVQ $0, AX
- CLD
- REP
- STOSQ
- MOVQ BX, CX
- REP
- STOSB
- RET
-
TEXT runtime·getcallerpc(SB),NOSPLIT,$0-8
MOVQ x+0(FP),AX // addr of first arg
MOVQ -8(AX),AX // get calling pc
@@ -1340,3 +1399,801 @@ TEXT bytes·Equal(SB),NOSPLIT,$0-49
eqret:
MOVB AX, ret+48(FP)
RET
+
+// A Duff's device for zeroing memory.
+// The compiler jumps to computed addresses within
+// this routine to zero chunks of memory. Do not
+// change this code without also changing the code
+// in ../../cmd/6g/ggen.c:clearfat.
+// AX: zero
+// DI: ptr to memory to be zeroed
+// DI is updated as a side effect.
+TEXT runtime·duffzero(SB), NOSPLIT, $0-0
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ STOSQ
+ RET
+
+// A Duff's device for copying memory.
+// The compiler jumps to computed addresses within
+// this routine to copy chunks of memory. Source
+// and destination must not overlap. Do not
+// change this code without also changing the code
+// in ../../cmd/6g/cgen.c:sgen.
+// SI: ptr to source memory
+// DI: ptr to destination memory
+// SI and DI are updated as a side effect.
+
+// NOTE: this is equivalent to a sequence of MOVSQ but
+// for some reason that is 3.5x slower than this code.
+// The STOSQ above seem fine, though.
+TEXT runtime·duffcopy(SB), NOSPLIT, $0-0
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ MOVQ (SI),CX
+ ADDQ $8,SI
+ MOVQ CX,(DI)
+ ADDQ $8,DI
+
+ RET
+
+TEXT runtime·timenow(SB), NOSPLIT, $0-0
+ JMP time·now(SB)
diff --git a/src/pkg/runtime/asm_amd64p32.s b/src/pkg/runtime/asm_amd64p32.s
new file mode 100644
index 000000000..d47f12283
--- /dev/null
+++ b/src/pkg/runtime/asm_amd64p32.s
@@ -0,0 +1,1073 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "zasm_GOOS_GOARCH.h"
+#include "funcdata.h"
+#include "../../cmd/ld/textflag.h"
+
+TEXT _rt0_go(SB),NOSPLIT,$0
+ // copy arguments forward on an even stack
+ MOVL argc+0(FP), AX
+ MOVL argv+4(FP), BX
+ MOVL SP, CX
+ SUBL $128, SP // plenty of scratch
+ ANDL $~15, CX
+ MOVL CX, SP
+
+ MOVL AX, 16(SP)
+ MOVL BX, 24(SP)
+
+ // create istack out of the given (operating system) stack.
+ MOVL $runtime·g0(SB), DI
+ LEAL (-64*1024+104)(SP), DI
+ MOVL BX, g_stackguard(DI)
+ MOVL BX, g_stackguard0(DI)
+ MOVL SP, g_stackbase(DI)
+
+ // find out information about the processor we're on
+ MOVQ $0, AX
+ CPUID
+ CMPQ AX, $0
+ JE nocpuinfo
+ MOVQ $1, AX
+ CPUID
+ MOVL CX, runtime·cpuid_ecx(SB)
+ MOVL DX, runtime·cpuid_edx(SB)
+nocpuinfo:
+
+needtls:
+ LEAL runtime·tls0(SB), DI
+ CALL runtime·settls(SB)
+
+ // store through it, to make sure it works
+ get_tls(BX)
+ MOVQ $0x123, g(BX)
+ MOVQ runtime·tls0(SB), AX
+ CMPQ AX, $0x123
+ JEQ 2(PC)
+ MOVL AX, 0 // abort
+ok:
+ // set the per-goroutine and per-mach "registers"
+ get_tls(BX)
+ LEAL runtime·g0(SB), CX
+ MOVL CX, g(BX)
+ LEAL runtime·m0(SB), AX
+ MOVL AX, m(BX)
+
+ // save m->g0 = g0
+ MOVL CX, m_g0(AX)
+
+ CLD // convention is D is always left cleared
+ CALL runtime·check(SB)
+
+ MOVL 16(SP), AX // copy argc
+ MOVL AX, 0(SP)
+ MOVL 24(SP), AX // copy argv
+ MOVL AX, 4(SP)
+ CALL runtime·args(SB)
+ CALL runtime·osinit(SB)
+ CALL runtime·hashinit(SB)
+ CALL runtime·schedinit(SB)
+
+ // create a new goroutine to start program
+ MOVL $runtime·main·f(SB), AX // entry
+ MOVL $0, 0(SP)
+ MOVL AX, 4(SP)
+ ARGSIZE(8)
+ CALL runtime·newproc(SB)
+ ARGSIZE(-1)
+
+ // start this M
+ CALL runtime·mstart(SB)
+
+ MOVL $0xf1, 0xf1 // crash
+ RET
+
+DATA runtime·main·f+0(SB)/4,$runtime·main(SB)
+GLOBL runtime·main·f(SB),RODATA,$4
+
+TEXT runtime·breakpoint(SB),NOSPLIT,$0-0
+ INT $3
+ RET
+
+TEXT runtime·asminit(SB),NOSPLIT,$0-0
+ // No per-thread init.
+ RET
+
+/*
+ * go-routine
+ */
+
+// void gosave(Gobuf*)
+// save state in Gobuf; setjmp
+TEXT runtime·gosave(SB), NOSPLIT, $0-4
+ MOVL b+0(FP), AX // gobuf
+ LEAL b+0(FP), BX // caller's SP
+ MOVL BX, gobuf_sp(AX)
+ MOVL 0(SP), BX // caller's PC
+ MOVL BX, gobuf_pc(AX)
+ MOVL $0, gobuf_ctxt(AX)
+ MOVQ $0, gobuf_ret(AX)
+ get_tls(CX)
+ MOVL g(CX), BX
+ MOVL BX, gobuf_g(AX)
+ RET
+
+// void gogo(Gobuf*)
+// restore state from Gobuf; longjmp
+TEXT runtime·gogo(SB), NOSPLIT, $0-4
+ MOVL b+0(FP), BX // gobuf
+ MOVL gobuf_g(BX), DX
+ MOVL 0(DX), CX // make sure g != nil
+ get_tls(CX)
+ MOVL DX, g(CX)
+ MOVL gobuf_sp(BX), SP // restore SP
+ MOVL gobuf_ctxt(BX), DX
+ MOVQ gobuf_ret(BX), AX
+ MOVL $0, gobuf_sp(BX) // clear to help garbage collector
+ MOVQ $0, gobuf_ret(BX)
+ MOVL $0, gobuf_ctxt(BX)
+ MOVL gobuf_pc(BX), BX
+ JMP BX
+
+// void mcall(void (*fn)(G*))
+// Switch to m->g0's stack, call fn(g).
+// Fn must never return. It should gogo(&g->sched)
+// to keep running g.
+TEXT runtime·mcall(SB), NOSPLIT, $0-4
+ MOVL fn+0(FP), DI
+
+ get_tls(CX)
+ MOVL g(CX), AX // save state in g->sched
+ MOVL 0(SP), BX // caller's PC
+ MOVL BX, (g_sched+gobuf_pc)(AX)
+ LEAL fn+0(FP), BX // caller's SP
+ MOVL BX, (g_sched+gobuf_sp)(AX)
+ MOVL AX, (g_sched+gobuf_g)(AX)
+
+ // switch to m->g0 & its stack, call fn
+ MOVL m(CX), BX
+ MOVL m_g0(BX), SI
+ CMPL SI, AX // if g == m->g0 call badmcall
+ JNE 3(PC)
+ MOVL $runtime·badmcall(SB), AX
+ JMP AX
+ MOVL SI, g(CX) // g = m->g0
+ MOVL (g_sched+gobuf_sp)(SI), SP // sp = m->g0->sched.sp
+ PUSHQ AX
+ ARGSIZE(8)
+ CALL DI
+ POPQ AX
+ MOVL $runtime·badmcall2(SB), AX
+ JMP AX
+ RET
+
+/*
+ * support for morestack
+ */
+
+// Called during function prolog when more stack is needed.
+// Caller has already done get_tls(CX); MOVQ m(CX), BX.
+//
+// The traceback routines see morestack on a g0 as being
+// the top of a stack (for example, morestack calling newstack
+// calling the scheduler calling newm calling gc), so we must
+// record an argument size. For that purpose, it has no arguments.
+TEXT runtime·morestack(SB),NOSPLIT,$0-0
+ // Cannot grow scheduler stack (m->g0).
+ MOVL m_g0(BX), SI
+ CMPL g(CX), SI
+ JNE 2(PC)
+ MOVL 0, AX
+
+ // Called from f.
+ // Set m->morebuf to f's caller.
+ MOVL 8(SP), AX // f's caller's PC
+ MOVL AX, (m_morebuf+gobuf_pc)(BX)
+ LEAL 16(SP), AX // f's caller's SP
+ MOVL AX, (m_morebuf+gobuf_sp)(BX)
+ MOVL AX, m_moreargp(BX)
+ get_tls(CX)
+ MOVL g(CX), SI
+ MOVL SI, (m_morebuf+gobuf_g)(BX)
+
+ // Set g->sched to context in f.
+ MOVL 0(SP), AX // f's PC
+ MOVL AX, (g_sched+gobuf_pc)(SI)
+ MOVL SI, (g_sched+gobuf_g)(SI)
+ LEAL 8(SP), AX // f's SP
+ MOVL AX, (g_sched+gobuf_sp)(SI)
+ MOVL DX, (g_sched+gobuf_ctxt)(SI)
+
+ // Call newstack on m->g0's stack.
+ MOVL m_g0(BX), BX
+ MOVL BX, g(CX)
+ MOVL (g_sched+gobuf_sp)(BX), SP
+ CALL runtime·newstack(SB)
+ MOVL $0, 0x1003 // crash if newstack returns
+ RET
+
+// Called from panic. Mimics morestack,
+// reuses stack growth code to create a frame
+// with the desired args running the desired function.
+//
+// func call(fn *byte, arg *byte, argsize uint32).
+TEXT runtime·newstackcall(SB), NOSPLIT, $0-20
+ get_tls(CX)
+ MOVL m(CX), BX
+
+ // Save our caller's state as the PC and SP to
+ // restore when returning from f.
+ MOVL 0(SP), AX // our caller's PC
+ MOVL AX, (m_morebuf+gobuf_pc)(BX)
+ LEAL 8(SP), AX // our caller's SP
+ MOVL AX, (m_morebuf+gobuf_sp)(BX)
+ MOVL g(CX), AX
+ MOVL AX, (m_morebuf+gobuf_g)(BX)
+
+ // Save our own state as the PC and SP to restore
+ // if this goroutine needs to be restarted.
+ MOVL $runtime·newstackcall(SB), DI
+ MOVL DI, (g_sched+gobuf_pc)(AX)
+ MOVL SP, (g_sched+gobuf_sp)(AX)
+
+ // Set up morestack arguments to call f on a new stack.
+ // We set f's frame size to 1, as a hint to newstack
+ // that this is a call from runtime·newstackcall.
+ // If it turns out that f needs a larger frame than
+ // the default stack, f's usual stack growth prolog will
+ // allocate a new segment (and recopy the arguments).
+ MOVL 8(SP), AX // fn
+ MOVL 12(SP), DX // arg frame
+ MOVL 16(SP), CX // arg size
+
+ MOVQ AX, m_cret(BX) // f's PC
+ MOVL DX, m_moreargp(BX) // argument frame pointer
+ MOVL CX, m_moreargsize(BX) // f's argument size
+ MOVL $1, m_moreframesize(BX) // f's frame size
+
+ // Call newstack on m->g0's stack.
+ MOVL m_g0(BX), BX
+ get_tls(CX)
+ MOVL BX, g(CX)
+ MOVL (g_sched+gobuf_sp)(BX), SP
+ CALL runtime·newstack(SB)
+ MOVL $0, 0x1103 // crash if newstack returns
+ RET
+
+// reflect·call: call a function with the given argument list
+// func call(f *FuncVal, arg *byte, argsize uint32).
+// we don't have variable-sized frames, so we use a small number
+// of constant-sized-frame functions to encode a few bits of size in the pc.
+// Caution: ugly multiline assembly macros in your future!
+
+#define DISPATCH(NAME,MAXSIZE) \
+ CMPL CX, $MAXSIZE; \
+ JA 3(PC); \
+ MOVL $runtime·NAME(SB), AX; \
+ JMP AX
+// Note: can't just "JMP runtime·NAME(SB)" - bad inlining results.
+
+TEXT reflect·call(SB), NOSPLIT, $0-20
+ MOVLQZX argsize+8(FP), CX
+ DISPATCH(call16, 16)
+ DISPATCH(call32, 32)
+ DISPATCH(call64, 64)
+ DISPATCH(call128, 128)
+ DISPATCH(call256, 256)
+ DISPATCH(call512, 512)
+ DISPATCH(call1024, 1024)
+ DISPATCH(call2048, 2048)
+ DISPATCH(call4096, 4096)
+ DISPATCH(call8192, 8192)
+ DISPATCH(call16384, 16384)
+ DISPATCH(call32768, 32768)
+ DISPATCH(call65536, 65536)
+ DISPATCH(call131072, 131072)
+ DISPATCH(call262144, 262144)
+ DISPATCH(call524288, 524288)
+ DISPATCH(call1048576, 1048576)
+ DISPATCH(call2097152, 2097152)
+ DISPATCH(call4194304, 4194304)
+ DISPATCH(call8388608, 8388608)
+ DISPATCH(call16777216, 16777216)
+ DISPATCH(call33554432, 33554432)
+ DISPATCH(call67108864, 67108864)
+ DISPATCH(call134217728, 134217728)
+ DISPATCH(call268435456, 268435456)
+ DISPATCH(call536870912, 536870912)
+ DISPATCH(call1073741824, 1073741824)
+ MOVL $runtime·badreflectcall(SB), AX
+ JMP AX
+
+#define CALLFN(NAME,MAXSIZE) \
+TEXT runtime·NAME(SB), WRAPPER, $MAXSIZE-16; \
+ /* copy arguments to stack */ \
+ MOVL argptr+4(FP), SI; \
+ MOVL argsize+8(FP), CX; \
+ MOVL SP, DI; \
+ REP;MOVSB; \
+ /* call function */ \
+ MOVL f+0(FP), DX; \
+ MOVL (DX), AX; \
+ CALL AX; \
+ /* copy return values back */ \
+ MOVL argptr+4(FP), DI; \
+ MOVL argsize+8(FP), CX; \
+ MOVL retoffset+12(FP), BX; \
+ MOVL SP, SI; \
+ ADDL BX, DI; \
+ ADDL BX, SI; \
+ SUBL BX, CX; \
+ REP;MOVSB; \
+ RET
+
+CALLFN(call16, 16)
+CALLFN(call32, 32)
+CALLFN(call64, 64)
+CALLFN(call128, 128)
+CALLFN(call256, 256)
+CALLFN(call512, 512)
+CALLFN(call1024, 1024)
+CALLFN(call2048, 2048)
+CALLFN(call4096, 4096)
+CALLFN(call8192, 8192)
+CALLFN(call16384, 16384)
+CALLFN(call32768, 32768)
+CALLFN(call65536, 65536)
+CALLFN(call131072, 131072)
+CALLFN(call262144, 262144)
+CALLFN(call524288, 524288)
+CALLFN(call1048576, 1048576)
+CALLFN(call2097152, 2097152)
+CALLFN(call4194304, 4194304)
+CALLFN(call8388608, 8388608)
+CALLFN(call16777216, 16777216)
+CALLFN(call33554432, 33554432)
+CALLFN(call67108864, 67108864)
+CALLFN(call134217728, 134217728)
+CALLFN(call268435456, 268435456)
+CALLFN(call536870912, 536870912)
+CALLFN(call1073741824, 1073741824)
+
+// Return point when leaving stack.
+//
+// Lessstack can appear in stack traces for the same reason
+// as morestack; in that context, it has 0 arguments.
+TEXT runtime·lessstack(SB), NOSPLIT, $0-0
+ // Save return value in m->cret
+ get_tls(CX)
+ MOVL m(CX), BX
+ MOVQ AX, m_cret(BX) // MOVQ, to save all 64 bits
+
+ // Call oldstack on m->g0's stack.
+ MOVL m_g0(BX), BX
+ MOVL BX, g(CX)
+ MOVL (g_sched+gobuf_sp)(BX), SP
+ CALL runtime·oldstack(SB)
+ MOVL $0, 0x1004 // crash if oldstack returns
+ RET
+
+// morestack trampolines
+TEXT runtime·morestack00(SB),NOSPLIT,$0
+ get_tls(CX)
+ MOVL m(CX), BX
+ MOVQ $0, AX
+ MOVQ AX, m_moreframesize(BX)
+ MOVL $runtime·morestack(SB), AX
+ JMP AX
+
+TEXT runtime·morestack01(SB),NOSPLIT,$0
+ get_tls(CX)
+ MOVL m(CX), BX
+ SHLQ $32, AX
+ MOVQ AX, m_moreframesize(BX)
+ MOVL $runtime·morestack(SB), AX
+ JMP AX
+
+TEXT runtime·morestack10(SB),NOSPLIT,$0
+ get_tls(CX)
+ MOVL m(CX), BX
+ MOVLQZX AX, AX
+ MOVQ AX, m_moreframesize(BX)
+ MOVL $runtime·morestack(SB), AX
+ JMP AX
+
+TEXT runtime·morestack11(SB),NOSPLIT,$0
+ get_tls(CX)
+ MOVL m(CX), BX
+ MOVQ AX, m_moreframesize(BX)
+ MOVL $runtime·morestack(SB), AX
+ JMP AX
+
+// subcases of morestack01
+// with const of 8,16,...48
+TEXT runtime·morestack8(SB),NOSPLIT,$0
+ MOVQ $1, R8
+ MOVL $morestack<>(SB), AX
+ JMP AX
+
+TEXT runtime·morestack16(SB),NOSPLIT,$0
+ MOVQ $2, R8
+ MOVL $morestack<>(SB), AX
+ JMP AX
+
+TEXT runtime·morestack24(SB),NOSPLIT,$0
+ MOVQ $3, R8
+ MOVL $morestack<>(SB), AX
+ JMP AX
+
+TEXT runtime·morestack32(SB),NOSPLIT,$0
+ MOVQ $4, R8
+ MOVL $morestack<>(SB), AX
+ JMP AX
+
+TEXT runtime·morestack40(SB),NOSPLIT,$0
+ MOVQ $5, R8
+ MOVL $morestack<>(SB), AX
+ JMP AX
+
+TEXT runtime·morestack48(SB),NOSPLIT,$0
+ MOVQ $6, R8
+ MOVL $morestack<>(SB), AX
+ JMP AX
+
+TEXT morestack<>(SB),NOSPLIT,$0
+ get_tls(CX)
+ MOVL m(CX), BX
+ SHLQ $35, R8
+ MOVQ R8, m_moreframesize(BX)
+ MOVL $runtime·morestack(SB), AX
+ JMP AX
+
+TEXT runtime·morestack00_noctxt(SB),NOSPLIT,$0
+ MOVL $0, DX
+ JMP runtime·morestack00(SB)
+
+TEXT runtime·morestack01_noctxt(SB),NOSPLIT,$0
+ MOVL $0, DX
+ JMP runtime·morestack01(SB)
+
+TEXT runtime·morestack10_noctxt(SB),NOSPLIT,$0
+ MOVL $0, DX
+ JMP runtime·morestack10(SB)
+
+TEXT runtime·morestack11_noctxt(SB),NOSPLIT,$0
+ MOVL $0, DX
+ JMP runtime·morestack11(SB)
+
+TEXT runtime·morestack8_noctxt(SB),NOSPLIT,$0
+ MOVL $0, DX
+ JMP runtime·morestack8(SB)
+
+TEXT runtime·morestack16_noctxt(SB),NOSPLIT,$0
+ MOVL $0, DX
+ JMP runtime·morestack16(SB)
+
+TEXT runtime·morestack24_noctxt(SB),NOSPLIT,$0
+ MOVL $0, DX
+ JMP runtime·morestack24(SB)
+
+TEXT runtime·morestack32_noctxt(SB),NOSPLIT,$0
+ MOVL $0, DX
+ JMP runtime·morestack32(SB)
+
+TEXT runtime·morestack40_noctxt(SB),NOSPLIT,$0
+ MOVL $0, DX
+ JMP runtime·morestack40(SB)
+
+TEXT runtime·morestack48_noctxt(SB),NOSPLIT,$0
+ MOVL $0, DX
+ JMP runtime·morestack48(SB)
+
+// bool cas(int32 *val, int32 old, int32 new)
+// Atomically:
+// if(*val == old){
+// *val = new;
+// return 1;
+// } else
+// return 0;
+TEXT runtime·cas(SB), NOSPLIT, $0-12
+ MOVL val+0(FP), BX
+ MOVL old+4(FP), AX
+ MOVL new+8(FP), CX
+ LOCK
+ CMPXCHGL CX, 0(BX)
+ JZ 3(PC)
+ MOVL $0, AX
+ RET
+ MOVL $1, AX
+ RET
+
+// bool runtime·cas64(uint64 *val, uint64 old, uint64 new)
+// Atomically:
+// if(*val == *old){
+// *val = new;
+// return 1;
+// } else {
+// return 0;
+// }
+TEXT runtime·cas64(SB), NOSPLIT, $0-24
+ MOVL val+0(FP), BX
+ MOVQ old+8(FP), AX
+ MOVQ new+16(FP), CX
+ LOCK
+ CMPXCHGQ CX, 0(BX)
+ JNZ cas64_fail
+ MOVL $1, AX
+ RET
+cas64_fail:
+ MOVL $0, AX
+ RET
+
+// bool casp(void **val, void *old, void *new)
+// Atomically:
+// if(*val == old){
+// *val = new;
+// return 1;
+// } else
+// return 0;
+TEXT runtime·casp(SB), NOSPLIT, $0-12
+ MOVL val+0(FP), BX
+ MOVL old+4(FP), AX
+ MOVL new+8(FP), CX
+ LOCK
+ CMPXCHGL CX, 0(BX)
+ JZ 3(PC)
+ MOVL $0, AX
+ RET
+ MOVL $1, AX
+ RET
+
+// uint32 xadd(uint32 volatile *val, int32 delta)
+// Atomically:
+// *val += delta;
+// return *val;
+TEXT runtime·xadd(SB), NOSPLIT, $0-8
+ MOVL val+0(FP), BX
+ MOVL delta+4(FP), AX
+ MOVL AX, CX
+ LOCK
+ XADDL AX, 0(BX)
+ ADDL CX, AX
+ RET
+
+TEXT runtime·xadd64(SB), NOSPLIT, $0-16
+ MOVL val+0(FP), BX
+ MOVQ delta+8(FP), AX
+ MOVQ AX, CX
+ LOCK
+ XADDQ AX, 0(BX)
+ ADDQ CX, AX
+ RET
+
+TEXT runtime·xchg(SB), NOSPLIT, $0-8
+ MOVL val+0(FP), BX
+ MOVL new+4(FP), AX
+ XCHGL AX, 0(BX)
+ RET
+
+TEXT runtime·xchg64(SB), NOSPLIT, $0-16
+ MOVL val+0(FP), BX
+ MOVQ new+8(FP), AX
+ XCHGQ AX, 0(BX)
+ RET
+
+TEXT runtime·procyield(SB),NOSPLIT,$0-0
+ MOVL val+0(FP), AX
+again:
+ PAUSE
+ SUBL $1, AX
+ JNZ again
+ RET
+
+TEXT runtime·atomicstorep(SB), NOSPLIT, $0-8
+ MOVL ptr+0(FP), BX
+ MOVL val+4(FP), AX
+ XCHGL AX, 0(BX)
+ RET
+
+TEXT runtime·atomicstore(SB), NOSPLIT, $0-8
+ MOVL ptr+0(FP), BX
+ MOVL val+4(FP), AX
+ XCHGL AX, 0(BX)
+ RET
+
+TEXT runtime·atomicstore64(SB), NOSPLIT, $0-16
+ MOVL ptr+0(FP), BX
+ MOVQ val+8(FP), AX
+ XCHGQ AX, 0(BX)
+ RET
+
+// void jmpdefer(fn, sp);
+// called from deferreturn.
+// 1. pop the caller
+// 2. sub 5 bytes from the callers return
+// 3. jmp to the argument
+TEXT runtime·jmpdefer(SB), NOSPLIT, $0-16
+ MOVL fn+0(FP), DX
+ MOVL callersp+4(FP), BX
+ LEAL -8(BX), SP // caller sp after CALL
+ SUBL $5, (SP) // return to CALL again
+ MOVL 0(DX), BX
+ JMP BX // but first run the deferred function
+
+// asmcgocall(void(*fn)(void*), void *arg)
+// Not implemented.
+TEXT runtime·asmcgocall(SB),NOSPLIT,$0-8
+ MOVL 0, AX
+ RET
+
+// cgocallback(void (*fn)(void*), void *frame, uintptr framesize)
+// Not implemented.
+TEXT runtime·cgocallback(SB),NOSPLIT,$0-12
+ MOVL 0, AX
+ RET
+
+// void setmg(M*, G*); set m and g. for use by needm.
+// Not implemented.
+TEXT runtime·setmg(SB), NOSPLIT, $0-8
+ MOVL 0, AX
+ RET
+
+// check that SP is in range [g->stackbase, g->stackguard)
+TEXT runtime·stackcheck(SB), NOSPLIT, $0-0
+ get_tls(CX)
+ MOVL g(CX), AX
+ CMPL g_stackbase(AX), SP
+ JHI 2(PC)
+ MOVL 0, AX
+ CMPL SP, g_stackguard(AX)
+ JHI 2(PC)
+ MOVL 0, AX
+ RET
+
+TEXT runtime·memclr(SB),NOSPLIT,$0-8
+ MOVL addr+0(FP), DI
+ MOVL count+4(FP), CX
+ MOVQ CX, BX
+ ANDQ $7, BX
+ SHRQ $3, CX
+ MOVQ $0, AX
+ CLD
+ REP
+ STOSQ
+ MOVQ BX, CX
+ REP
+ STOSB
+ RET
+
+TEXT runtime·getcallerpc(SB),NOSPLIT,$0-8
+ MOVL x+0(FP),AX // addr of first arg
+ MOVL -8(AX),AX // get calling pc
+ RET
+
+TEXT runtime·setcallerpc(SB),NOSPLIT,$0-16
+ MOVL x+0(FP),AX // addr of first arg
+ MOVL pc+4(FP), BX // pc to set
+ MOVQ BX, -8(AX) // set calling pc
+ RET
+
+TEXT runtime·getcallersp(SB),NOSPLIT,$0-8
+ MOVL sp+0(FP), AX
+ RET
+
+// int64 runtime·cputicks(void)
+TEXT runtime·cputicks(SB),NOSPLIT,$0-0
+ RDTSC
+ SHLQ $32, DX
+ ADDQ DX, AX
+ RET
+
+TEXT runtime·stackguard(SB),NOSPLIT,$0-16
+ MOVL SP, DX
+ MOVL DX, sp+0(FP)
+ get_tls(CX)
+ MOVL g(CX), BX
+ MOVL g_stackguard(BX), DX
+ MOVL DX, limit+4(FP)
+ RET
+
+GLOBL runtime·tls0(SB), $64
+
+// hash function using AES hardware instructions
+// For now, our one amd64p32 system (NaCl) does not
+// support using AES instructions, so have not bothered to
+// write the implementations. Can copy and adjust the ones
+// in asm_amd64.s when the time comes.
+
+TEXT runtime·aeshash(SB),NOSPLIT,$0-24
+ RET
+
+TEXT runtime·aeshashstr(SB),NOSPLIT,$0-24
+ RET
+
+TEXT runtime·aeshash32(SB),NOSPLIT,$0-24
+ RET
+
+TEXT runtime·aeshash64(SB),NOSPLIT,$0-24
+ RET
+
+TEXT runtime·memeq(SB),NOSPLIT,$0-12
+ MOVL a+0(FP), SI
+ MOVL b+4(FP), DI
+ MOVL count+8(FP), BX
+ JMP runtime·memeqbody(SB)
+
+// a in SI
+// b in DI
+// count in BX
+TEXT runtime·memeqbody(SB),NOSPLIT,$0-0
+ XORQ AX, AX
+
+ CMPQ BX, $8
+ JB small
+
+ // 64 bytes at a time using xmm registers
+hugeloop:
+ CMPQ BX, $64
+ JB bigloop
+ MOVOU (SI), X0
+ MOVOU (DI), X1
+ MOVOU 16(SI), X2
+ MOVOU 16(DI), X3
+ MOVOU 32(SI), X4
+ MOVOU 32(DI), X5
+ MOVOU 48(SI), X6
+ MOVOU 48(DI), X7
+ PCMPEQB X1, X0
+ PCMPEQB X3, X2
+ PCMPEQB X5, X4
+ PCMPEQB X7, X6
+ PAND X2, X0
+ PAND X6, X4
+ PAND X4, X0
+ PMOVMSKB X0, DX
+ ADDQ $64, SI
+ ADDQ $64, DI
+ SUBQ $64, BX
+ CMPL DX, $0xffff
+ JEQ hugeloop
+ RET
+
+ // 8 bytes at a time using 64-bit register
+bigloop:
+ CMPQ BX, $8
+ JBE leftover
+ MOVQ (SI), CX
+ MOVQ (DI), DX
+ ADDQ $8, SI
+ ADDQ $8, DI
+ SUBQ $8, BX
+ CMPQ CX, DX
+ JEQ bigloop
+ RET
+
+ // remaining 0-8 bytes
+leftover:
+ ADDQ BX, SI
+ ADDQ BX, DI
+ MOVQ -8(SI), CX
+ MOVQ -8(DI), DX
+ CMPQ CX, DX
+ SETEQ AX
+ RET
+
+small:
+ CMPQ BX, $0
+ JEQ equal
+
+ LEAQ 0(BX*8), CX
+ NEGQ CX
+
+ CMPB SI, $0xf8
+ JA si_high
+
+ // load at SI won't cross a page boundary.
+ MOVQ (SI), SI
+ JMP si_finish
+si_high:
+ // address ends in 11111xxx. Load up to bytes we want, move to correct position.
+ MOVQ BX, DX
+ ADDQ SI, DX
+ MOVQ -8(DX), SI
+ SHRQ CX, SI
+si_finish:
+
+ // same for DI.
+ CMPB DI, $0xf8
+ JA di_high
+ MOVQ (DI), DI
+ JMP di_finish
+di_high:
+ MOVQ BX, DX
+ ADDQ DI, DX
+ MOVQ -8(DX), DI
+ SHRQ CX, DI
+di_finish:
+
+ SUBQ SI, DI
+ SHLQ CX, DI
+equal:
+ SETEQ AX
+ RET
+
+TEXT runtime·cmpstring(SB),NOSPLIT,$0-20
+ MOVL s1+0(FP), SI
+ MOVL s1+4(FP), BX
+ MOVL s2+8(FP), DI
+ MOVL s2+12(FP), DX
+ CALL runtime·cmpbody(SB)
+ MOVL AX, res+16(FP)
+ RET
+
+TEXT bytes·Compare(SB),NOSPLIT,$0-28
+ MOVL s1+0(FP), SI
+ MOVL s1+4(FP), BX
+ MOVL s2+12(FP), DI
+ MOVL s2+16(FP), DX
+ CALL runtime·cmpbody(SB)
+ MOVQ AX, res+24(FP)
+ RET
+
+// input:
+// SI = a
+// DI = b
+// BX = alen
+// DX = blen
+// output:
+// AX = 1/0/-1
+TEXT runtime·cmpbody(SB),NOSPLIT,$0-0
+ CMPQ SI, DI
+ JEQ cmp_allsame
+ CMPQ BX, DX
+ MOVQ DX, R8
+ CMOVQLT BX, R8 // R8 = min(alen, blen) = # of bytes to compare
+ CMPQ R8, $8
+ JB cmp_small
+
+cmp_loop:
+ CMPQ R8, $16
+ JBE cmp_0through16
+ MOVOU (SI), X0
+ MOVOU (DI), X1
+ PCMPEQB X0, X1
+ PMOVMSKB X1, AX
+ XORQ $0xffff, AX // convert EQ to NE
+ JNE cmp_diff16 // branch if at least one byte is not equal
+ ADDQ $16, SI
+ ADDQ $16, DI
+ SUBQ $16, R8
+ JMP cmp_loop
+
+ // AX = bit mask of differences
+cmp_diff16:
+ BSFQ AX, BX // index of first byte that differs
+ XORQ AX, AX
+ ADDQ BX, SI
+ MOVB (SI), CX
+ ADDQ BX, DI
+ CMPB CX, (DI)
+ SETHI AX
+ LEAQ -1(AX*2), AX // convert 1/0 to +1/-1
+ RET
+
+ // 0 through 16 bytes left, alen>=8, blen>=8
+cmp_0through16:
+ CMPQ R8, $8
+ JBE cmp_0through8
+ MOVQ (SI), AX
+ MOVQ (DI), CX
+ CMPQ AX, CX
+ JNE cmp_diff8
+cmp_0through8:
+ ADDQ R8, SI
+ ADDQ R8, DI
+ MOVQ -8(SI), AX
+ MOVQ -8(DI), CX
+ CMPQ AX, CX
+ JEQ cmp_allsame
+
+ // AX and CX contain parts of a and b that differ.
+cmp_diff8:
+ BSWAPQ AX // reverse order of bytes
+ BSWAPQ CX
+ XORQ AX, CX
+ BSRQ CX, CX // index of highest bit difference
+ SHRQ CX, AX // move a's bit to bottom
+ ANDQ $1, AX // mask bit
+ LEAQ -1(AX*2), AX // 1/0 => +1/-1
+ RET
+
+ // 0-7 bytes in common
+cmp_small:
+ LEAQ (R8*8), CX // bytes left -> bits left
+ NEGQ CX // - bits lift (== 64 - bits left mod 64)
+ JEQ cmp_allsame
+
+ // load bytes of a into high bytes of AX
+ CMPB SI, $0xf8
+ JA cmp_si_high
+ MOVQ (SI), SI
+ JMP cmp_si_finish
+cmp_si_high:
+ ADDQ R8, SI
+ MOVQ -8(SI), SI
+ SHRQ CX, SI
+cmp_si_finish:
+ SHLQ CX, SI
+
+ // load bytes of b in to high bytes of BX
+ CMPB DI, $0xf8
+ JA cmp_di_high
+ MOVQ (DI), DI
+ JMP cmp_di_finish
+cmp_di_high:
+ ADDQ R8, DI
+ MOVQ -8(DI), DI
+ SHRQ CX, DI
+cmp_di_finish:
+ SHLQ CX, DI
+
+ BSWAPQ SI // reverse order of bytes
+ BSWAPQ DI
+ XORQ SI, DI // find bit differences
+ JEQ cmp_allsame
+ BSRQ DI, CX // index of highest bit difference
+ SHRQ CX, SI // move a's bit to bottom
+ ANDQ $1, SI // mask bit
+ LEAQ -1(SI*2), AX // 1/0 => +1/-1
+ RET
+
+cmp_allsame:
+ XORQ AX, AX
+ XORQ CX, CX
+ CMPQ BX, DX
+ SETGT AX // 1 if alen > blen
+ SETEQ CX // 1 if alen == blen
+ LEAQ -1(CX)(AX*2), AX // 1,0,-1 result
+ RET
+
+TEXT bytes·IndexByte(SB),NOSPLIT,$0
+ MOVL s+0(FP), SI
+ MOVL s_len+4(FP), BX
+ MOVB c+12(FP), AL
+ CALL runtime·indexbytebody(SB)
+ MOVL AX, ret+16(FP)
+ RET
+
+TEXT strings·IndexByte(SB),NOSPLIT,$0
+ MOVL s+0(FP), SI
+ MOVL s_len+4(FP), BX
+ MOVB c+8(FP), AL
+ CALL runtime·indexbytebody(SB)
+ MOVL AX, ret+16(FP)
+ RET
+
+// input:
+// SI: data
+// BX: data len
+// AL: byte sought
+// output:
+// AX
+TEXT runtime·indexbytebody(SB),NOSPLIT,$0
+ MOVL SI, DI
+
+ CMPL BX, $16
+ JLT indexbyte_small
+
+ // round up to first 16-byte boundary
+ TESTL $15, SI
+ JZ aligned
+ MOVL SI, CX
+ ANDL $~15, CX
+ ADDL $16, CX
+
+ // search the beginning
+ SUBL SI, CX
+ REPN; SCASB
+ JZ success
+
+// DI is 16-byte aligned; get ready to search using SSE instructions
+aligned:
+ // round down to last 16-byte boundary
+ MOVL BX, R11
+ ADDL SI, R11
+ ANDL $~15, R11
+
+ // shuffle X0 around so that each byte contains c
+ MOVD AX, X0
+ PUNPCKLBW X0, X0
+ PUNPCKLBW X0, X0
+ PSHUFL $0, X0, X0
+ JMP condition
+
+sse:
+ // move the next 16-byte chunk of the buffer into X1
+ MOVO (DI), X1
+ // compare bytes in X0 to X1
+ PCMPEQB X0, X1
+ // take the top bit of each byte in X1 and put the result in DX
+ PMOVMSKB X1, DX
+ TESTL DX, DX
+ JNZ ssesuccess
+ ADDL $16, DI
+
+condition:
+ CMPL DI, R11
+ JLT sse
+
+ // search the end
+ MOVL SI, CX
+ ADDL BX, CX
+ SUBL R11, CX
+ // if CX == 0, the zero flag will be set and we'll end up
+ // returning a false success
+ JZ failure
+ REPN; SCASB
+ JZ success
+
+failure:
+ MOVL $-1, AX
+ RET
+
+// handle for lengths < 16
+indexbyte_small:
+ MOVL BX, CX
+ REPN; SCASB
+ JZ success
+ MOVL $-1, AX
+ RET
+
+// we've found the chunk containing the byte
+// now just figure out which specific byte it is
+ssesuccess:
+ // get the index of the least significant set bit
+ BSFW DX, DX
+ SUBL SI, DI
+ ADDL DI, DX
+ MOVL DX, AX
+ RET
+
+success:
+ SUBL SI, DI
+ SUBL $1, DI
+ MOVL DI, AX
+ RET
+
+TEXT bytes·Equal(SB),NOSPLIT,$0-25
+ MOVL a_len+4(FP), BX
+ MOVL b_len+16(FP), CX
+ XORL AX, AX
+ CMPL BX, CX
+ JNE eqret
+ MOVL a+0(FP), SI
+ MOVL b+12(FP), DI
+ CALL runtime·memeqbody(SB)
+eqret:
+ MOVB AX, ret+24(FP)
+ RET
+
+TEXT runtime·timenow(SB), NOSPLIT, $0-0
+ JMP time·now(SB)
diff --git a/src/pkg/runtime/asm_arm.s b/src/pkg/runtime/asm_arm.s
index f483e6fc8..1aea9036a 100644
--- a/src/pkg/runtime/asm_arm.s
+++ b/src/pkg/runtime/asm_arm.s
@@ -89,11 +89,9 @@ TEXT runtime·breakpoint(SB),NOSPLIT,$0-0
WORD $0xe1200071 // BKPT 0x0001
RET
-GLOBL runtime·goarm(SB), $4
-
TEXT runtime·asminit(SB),NOSPLIT,$0-0
// disable runfast (flush-to-zero) mode of vfp if runtime.goarm > 5
- MOVW runtime·goarm(SB), R11
+ MOVB runtime·goarm(SB), R11
CMP $5, R11
BLE 4(PC)
WORD $0xeef1ba10 // vmrs r11, fpscr
@@ -215,6 +213,10 @@ TEXT runtime·morestack(SB),NOSPLIT,$-4-0
// is still in this function, and not the beginning of the next.
RET
+TEXT runtime·morestack_noctxt(SB),NOSPLIT,$-4-0
+ MOVW $0, R7
+ B runtime·morestack(SB)
+
// Called from panic. Mimics morestack,
// reuses stack growth code to create a frame
// with the desired args running the desired function.
@@ -267,7 +269,7 @@ TEXT runtime·newstackcall(SB), NOSPLIT, $-4-12
MOVW $runtime·NAME(SB), R1; \
B (R1)
-TEXT reflect·call(SB), NOSPLIT, $-4-12
+TEXT reflect·call(SB), NOSPLIT, $-4-16
MOVW argsize+8(FP), R0
DISPATCH(call16, 16)
DISPATCH(call32, 32)
@@ -299,8 +301,22 @@ TEXT reflect·call(SB), NOSPLIT, $-4-12
MOVW $runtime·badreflectcall(SB), R1
B (R1)
+// Argument map for the callXX frames. Each has one
+// stack map (for the single call) with 3 arguments.
+DATA gcargs_reflectcall<>+0x00(SB)/4, $1 // 1 stackmap
+DATA gcargs_reflectcall<>+0x04(SB)/4, $6 // 3 args
+DATA gcargs_reflectcall<>+0x08(SB)/4, $(const_BitsPointer+(const_BitsPointer<<2)+(const_BitsScalar<<4))
+GLOBL gcargs_reflectcall<>(SB),RODATA,$12
+
+// callXX frames have no locals
+DATA gclocals_reflectcall<>+0x00(SB)/4, $1 // 1 stackmap
+DATA gclocals_reflectcall<>+0x04(SB)/4, $0 // 0 locals
+GLOBL gclocals_reflectcall<>(SB),RODATA,$8
+
#define CALLFN(NAME,MAXSIZE) \
-TEXT runtime·NAME(SB), WRAPPER, $MAXSIZE-12; \
+TEXT runtime·NAME(SB), WRAPPER, $MAXSIZE-16; \
+ FUNCDATA $FUNCDATA_ArgsPointerMaps,gcargs_reflectcall<>(SB); \
+ FUNCDATA $FUNCDATA_LocalsPointerMaps,gclocals_reflectcall<>(SB);\
/* copy arguments to stack */ \
MOVW argptr+4(FP), R0; \
MOVW argsize+8(FP), R2; \
@@ -314,11 +330,16 @@ TEXT runtime·NAME(SB), WRAPPER, $MAXSIZE-12; \
/* call function */ \
MOVW f+0(FP), R7; \
MOVW (R7), R0; \
+ PCDATA $PCDATA_StackMapIndex, $0; \
BL (R0); \
/* copy return values back */ \
MOVW argptr+4(FP), R0; \
MOVW argsize+8(FP), R2; \
+ MOVW retoffset+12(FP), R3; \
ADD $4, SP, R1; \
+ ADD R3, R1; \
+ ADD R3, R0; \
+ SUB R3, R2; \
CMP $0, R2; \
RET.EQ ; \
MOVBU.P 1(R1), R5; \
@@ -373,6 +394,10 @@ TEXT runtime·lessstack(SB), NOSPLIT, $-4-0
// 1. grab stored LR for caller
// 2. sub 4 bytes to get back to BL deferreturn
// 3. B to fn
+// TODO(rsc): Push things on stack and then use pop
+// to load all registers simultaneously, so that a profiling
+// interrupt can never see mismatched SP/LR/PC.
+// (And double-check that pop is atomic in that way.)
TEXT runtime·jmpdefer(SB), NOSPLIT, $0-8
MOVW 0(SP), LR
MOVW $-4(LR), LR // BL deferreturn
@@ -629,17 +654,33 @@ _next:
// Note: all three functions will clobber R0, and the last
// two can be called from 5c ABI code.
-// g (R10) at 8(TP), m (R9) at 12(TP)
+// save_gm saves the g and m registers into pthread-provided
+// thread-local memory, so that we can call externally compiled
+// ARM code that will overwrite those registers.
+// NOTE: runtime.gogo assumes that R1 is preserved by this function.
TEXT runtime·save_gm(SB),NOSPLIT,$0
- MRC 15, 0, R0, C13, C0, 3 // Fetch TLS register
- MOVW g, 8(R0)
- MOVW m, 12(R0)
+ MRC 15, 0, R0, C13, C0, 3 // fetch TLS base pointer
+ // $runtime.tlsgm(SB) is a special linker symbol.
+ // It is the offset from the TLS base pointer to our
+ // thread-local storage for g and m.
+ MOVW $runtime·tlsgm(SB), R11
+ ADD R11, R0
+ MOVW g, 0(R0)
+ MOVW m, 4(R0)
RET
+// load_gm loads the g and m registers from pthread-provided
+// thread-local memory, for use after calling externally compiled
+// ARM code that overwrote those registers.
TEXT runtime·load_gm(SB),NOSPLIT,$0
- MRC 15, 0, R0, C13, C0, 3 // Fetch TLS register
- MOVW 8(R0), g
- MOVW 12(R0), m
+ MRC 15, 0, R0, C13, C0, 3 // fetch TLS base pointer
+ // $runtime.tlsgm(SB) is a special linker symbol.
+ // It is the offset from the TLS base pointer to our
+ // thread-local storage for g and m.
+ MOVW $runtime·tlsgm(SB), R11
+ ADD R11, R0
+ MOVW 0(R0), g
+ MOVW 4(R0), m
RET
// void setmg_gcc(M*, G*); set m and g called from gcc.
@@ -648,7 +689,6 @@ TEXT setmg_gcc<>(SB),NOSPLIT,$0
MOVW R1, g
B runtime·save_gm(SB)
-
// TODO: share code with memeq?
TEXT bytes·Equal(SB),NOSPLIT,$0
MOVW a_len+4(FP), R1
@@ -726,3 +766,414 @@ _sib_notfound:
MOVW $-1, R0
MOVW R0, ret+12(FP)
RET
+
+TEXT runtime·timenow(SB), NOSPLIT, $0-0
+ B time·now(SB)
+
+// A Duff's device for zeroing memory.
+// The compiler jumps to computed addresses within
+// this routine to zero chunks of memory. Do not
+// change this code without also changing the code
+// in ../../cmd/5g/ggen.c:clearfat.
+// R0: zero
+// R1: ptr to memory to be zeroed
+// R1 is updated as a side effect.
+TEXT runtime·duffzero(SB), NOSPLIT, $0-0
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ MOVW.P R0, 4(R1)
+ RET
+
+// A Duff's device for copying memory.
+// The compiler jumps to computed addresses within
+// this routine to copy chunks of memory. Source
+// and destination must not overlap. Do not
+// change this code without also changing the code
+// in ../../cmd/5g/cgen.c:sgen.
+// R0: scratch space
+// R1: ptr to source memory
+// R2: ptr to destination memory
+// R1 and R2 are updated as a side effect
+TEXT runtime·duffcopy(SB), NOSPLIT, $0-0
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ MOVW.P 4(R1), R0
+ MOVW.P R0, 4(R2)
+ RET
diff --git a/src/pkg/runtime/atomic_amd64.c b/src/pkg/runtime/atomic_amd64x.c
index 0bd4d906b..11b578936 100644
--- a/src/pkg/runtime/atomic_amd64.c
+++ b/src/pkg/runtime/atomic_amd64x.c
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+// +build amd64 amd64p32
+
#include "runtime.h"
#include "../../cmd/ld/textflag.h"
diff --git a/src/pkg/runtime/atomic_arm.c b/src/pkg/runtime/atomic_arm.c
index b1e97b27d..d914475c7 100644
--- a/src/pkg/runtime/atomic_arm.c
+++ b/src/pkg/runtime/atomic_arm.c
@@ -42,6 +42,19 @@ runtime·xchg(uint32 volatile* addr, uint32 v)
}
#pragma textflag NOSPLIT
+void*
+runtime·xchgp(void* volatile* addr, void* v)
+{
+ void *old;
+
+ for(;;) {
+ old = *addr;
+ if(runtime·casp(addr, old, v))
+ return old;
+ }
+}
+
+#pragma textflag NOSPLIT
void
runtime·procyield(uint32 cnt)
{
diff --git a/src/pkg/runtime/callback_windows.c b/src/pkg/runtime/callback_windows.c
index 88ee53bb5..285678fba 100644
--- a/src/pkg/runtime/callback_windows.c
+++ b/src/pkg/runtime/callback_windows.c
@@ -49,7 +49,7 @@ runtime·compilecallback(Eface fn, bool cleanstack)
runtime·cbctxts = &(cbs.ctxt[0]);
n = cbs.n;
for(i=0; i<n; i++) {
- if(cbs.ctxt[i]->gobody == fn.data) {
+ if(cbs.ctxt[i]->gobody == fn.data && cbs.ctxt[i]->cleanstack == cleanstack) {
runtime·unlock(&cbs);
// runtime·callbackasm is just a series of CALL instructions
// (each is 5 bytes long), and we want callback to arrive at
@@ -63,6 +63,7 @@ runtime·compilecallback(Eface fn, bool cleanstack)
c = runtime·mal(sizeof *c);
c->gobody = fn.data;
c->argsize = argsize;
+ c->cleanstack = cleanstack;
if(cleanstack && argsize!=0)
c->restorestack = argsize;
else
diff --git a/src/pkg/runtime/cgo/asm_nacl_amd64p32.s b/src/pkg/runtime/cgo/asm_nacl_amd64p32.s
new file mode 100644
index 000000000..377cf72a3
--- /dev/null
+++ b/src/pkg/runtime/cgo/asm_nacl_amd64p32.s
@@ -0,0 +1,13 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "../../../cmd/ld/textflag.h"
+
+/*
+ * void crosscall2(void (*fn)(void*, int32), void*, int32)
+ * Save registers and call fn with two arguments.
+ */
+TEXT crosscall2(SB),NOSPLIT,$0
+ INT $3
+ RET
diff --git a/src/pkg/runtime/cgo/gcc_dragonfly_386.c b/src/pkg/runtime/cgo/gcc_dragonfly_386.c
index 6797824c6..695c16634 100644
--- a/src/pkg/runtime/cgo/gcc_dragonfly_386.c
+++ b/src/pkg/runtime/cgo/gcc_dragonfly_386.c
@@ -36,14 +36,14 @@ _cgo_sys_thread_start(ThreadStart *ts)
int err;
SIGFILLSET(ign);
- sigprocmask(SIG_SETMASK, &ign, &oset);
+ pthread_sigmask(SIG_SETMASK, &ign, &oset);
pthread_attr_init(&attr);
pthread_attr_getstacksize(&attr, &size);
ts->g->stackguard = size;
err = pthread_create(&p, &attr, threadentry, ts);
- sigprocmask(SIG_SETMASK, &oset, nil);
+ pthread_sigmask(SIG_SETMASK, &oset, nil);
if (err != 0) {
fprintf(stderr, "runtime/cgo: pthread_create failed: %s\n", strerror(err));
diff --git a/src/pkg/runtime/cgo/gcc_dragonfly_amd64.c b/src/pkg/runtime/cgo/gcc_dragonfly_amd64.c
index eb342a2ff..a46c121ad 100644
--- a/src/pkg/runtime/cgo/gcc_dragonfly_amd64.c
+++ b/src/pkg/runtime/cgo/gcc_dragonfly_amd64.c
@@ -35,7 +35,7 @@ _cgo_sys_thread_start(ThreadStart *ts)
int err;
SIGFILLSET(ign);
- sigprocmask(SIG_SETMASK, &ign, &oset);
+ pthread_sigmask(SIG_SETMASK, &ign, &oset);
pthread_attr_init(&attr);
pthread_attr_getstacksize(&attr, &size);
@@ -43,7 +43,7 @@ _cgo_sys_thread_start(ThreadStart *ts)
ts->g->stackguard = size;
err = pthread_create(&p, &attr, threadentry, ts);
- sigprocmask(SIG_SETMASK, &oset, nil);
+ pthread_sigmask(SIG_SETMASK, &oset, nil);
if (err != 0) {
fprintf(stderr, "runtime/cgo: pthread_create failed: %s\n", strerror(err));
diff --git a/src/pkg/runtime/cgo/gcc_freebsd_386.c b/src/pkg/runtime/cgo/gcc_freebsd_386.c
index 6797824c6..695c16634 100644
--- a/src/pkg/runtime/cgo/gcc_freebsd_386.c
+++ b/src/pkg/runtime/cgo/gcc_freebsd_386.c
@@ -36,14 +36,14 @@ _cgo_sys_thread_start(ThreadStart *ts)
int err;
SIGFILLSET(ign);
- sigprocmask(SIG_SETMASK, &ign, &oset);
+ pthread_sigmask(SIG_SETMASK, &ign, &oset);
pthread_attr_init(&attr);
pthread_attr_getstacksize(&attr, &size);
ts->g->stackguard = size;
err = pthread_create(&p, &attr, threadentry, ts);
- sigprocmask(SIG_SETMASK, &oset, nil);
+ pthread_sigmask(SIG_SETMASK, &oset, nil);
if (err != 0) {
fprintf(stderr, "runtime/cgo: pthread_create failed: %s\n", strerror(err));
diff --git a/src/pkg/runtime/cgo/gcc_freebsd_amd64.c b/src/pkg/runtime/cgo/gcc_freebsd_amd64.c
index eb342a2ff..a46c121ad 100644
--- a/src/pkg/runtime/cgo/gcc_freebsd_amd64.c
+++ b/src/pkg/runtime/cgo/gcc_freebsd_amd64.c
@@ -35,7 +35,7 @@ _cgo_sys_thread_start(ThreadStart *ts)
int err;
SIGFILLSET(ign);
- sigprocmask(SIG_SETMASK, &ign, &oset);
+ pthread_sigmask(SIG_SETMASK, &ign, &oset);
pthread_attr_init(&attr);
pthread_attr_getstacksize(&attr, &size);
@@ -43,7 +43,7 @@ _cgo_sys_thread_start(ThreadStart *ts)
ts->g->stackguard = size;
err = pthread_create(&p, &attr, threadentry, ts);
- sigprocmask(SIG_SETMASK, &oset, nil);
+ pthread_sigmask(SIG_SETMASK, &oset, nil);
if (err != 0) {
fprintf(stderr, "runtime/cgo: pthread_create failed: %s\n", strerror(err));
diff --git a/src/pkg/runtime/cgo/gcc_freebsd_arm.c b/src/pkg/runtime/cgo/gcc_freebsd_arm.c
index 211dca75c..6175e1d9c 100644
--- a/src/pkg/runtime/cgo/gcc_freebsd_arm.c
+++ b/src/pkg/runtime/cgo/gcc_freebsd_arm.c
@@ -4,7 +4,9 @@
#include <sys/types.h>
#include <machine/sysarch.h>
+#include <sys/signalvar.h>
#include <pthread.h>
+#include <signal.h>
#include <string.h>
#include "libcgo.h"
@@ -39,10 +41,14 @@ void
_cgo_sys_thread_start(ThreadStart *ts)
{
pthread_attr_t attr;
+ sigset_t ign, oset;
pthread_t p;
size_t size;
int err;
+ SIGFILLSET(ign);
+ pthread_sigmask(SIG_SETMASK, &ign, &oset);
+
// Not sure why the memset is necessary here,
// but without it, we get a bogus stack size
// out of pthread_attr_getstacksize. C'est la Linux.
@@ -52,6 +58,9 @@ _cgo_sys_thread_start(ThreadStart *ts)
pthread_attr_getstacksize(&attr, &size);
ts->g->stackguard = size;
err = pthread_create(&p, &attr, threadentry, ts);
+
+ pthread_sigmask(SIG_SETMASK, &oset, nil);
+
if (err != 0) {
fprintf(stderr, "runtime/cgo: pthread_create failed: %s\n", strerror(err));
abort();
diff --git a/src/pkg/runtime/cgo/gcc_linux_386.c b/src/pkg/runtime/cgo/gcc_linux_386.c
index c25c7b70f..0a46c9b7a 100644
--- a/src/pkg/runtime/cgo/gcc_linux_386.c
+++ b/src/pkg/runtime/cgo/gcc_linux_386.c
@@ -34,7 +34,7 @@ _cgo_sys_thread_start(ThreadStart *ts)
int err;
sigfillset(&ign);
- sigprocmask(SIG_SETMASK, &ign, &oset);
+ pthread_sigmask(SIG_SETMASK, &ign, &oset);
// Not sure why the memset is necessary here,
// but without it, we get a bogus stack size
@@ -46,7 +46,7 @@ _cgo_sys_thread_start(ThreadStart *ts)
ts->g->stackguard = size;
err = pthread_create(&p, &attr, threadentry, ts);
- sigprocmask(SIG_SETMASK, &oset, nil);
+ pthread_sigmask(SIG_SETMASK, &oset, nil);
if (err != 0) {
fprintf(stderr, "runtime/cgo: pthread_create failed: %s\n", strerror(err));
diff --git a/src/pkg/runtime/cgo/gcc_linux_amd64.c b/src/pkg/runtime/cgo/gcc_linux_amd64.c
index bd7c88d99..c530183b7 100644
--- a/src/pkg/runtime/cgo/gcc_linux_amd64.c
+++ b/src/pkg/runtime/cgo/gcc_linux_amd64.c
@@ -34,14 +34,14 @@ _cgo_sys_thread_start(ThreadStart *ts)
int err;
sigfillset(&ign);
- sigprocmask(SIG_SETMASK, &ign, &oset);
+ pthread_sigmask(SIG_SETMASK, &ign, &oset);
pthread_attr_init(&attr);
pthread_attr_getstacksize(&attr, &size);
ts->g->stackguard = size;
err = pthread_create(&p, &attr, threadentry, ts);
- sigprocmask(SIG_SETMASK, &oset, nil);
+ pthread_sigmask(SIG_SETMASK, &oset, nil);
if (err != 0) {
fprintf(stderr, "runtime/cgo: pthread_create failed: %s\n", strerror(err));
diff --git a/src/pkg/runtime/cgo/gcc_linux_arm.c b/src/pkg/runtime/cgo/gcc_linux_arm.c
index 9a6e58594..032568155 100644
--- a/src/pkg/runtime/cgo/gcc_linux_arm.c
+++ b/src/pkg/runtime/cgo/gcc_linux_arm.c
@@ -4,6 +4,7 @@
#include <pthread.h>
#include <string.h>
+#include <signal.h>
#include "libcgo.h"
static void *threadentry(void*);
@@ -28,10 +29,14 @@ void
_cgo_sys_thread_start(ThreadStart *ts)
{
pthread_attr_t attr;
+ sigset_t ign, oset;
pthread_t p;
size_t size;
int err;
+ sigfillset(&ign);
+ pthread_sigmask(SIG_SETMASK, &ign, &oset);
+
// Not sure why the memset is necessary here,
// but without it, we get a bogus stack size
// out of pthread_attr_getstacksize. C'est la Linux.
@@ -41,6 +46,9 @@ _cgo_sys_thread_start(ThreadStart *ts)
pthread_attr_getstacksize(&attr, &size);
ts->g->stackguard = size;
err = pthread_create(&p, &attr, threadentry, ts);
+
+ pthread_sigmask(SIG_SETMASK, &oset, nil);
+
if (err != 0) {
fprintf(stderr, "runtime/cgo: pthread_create failed: %s\n", strerror(err));
abort();
diff --git a/src/pkg/runtime/cgo/gcc_netbsd_386.c b/src/pkg/runtime/cgo/gcc_netbsd_386.c
index b399e16dc..28690ccbd 100644
--- a/src/pkg/runtime/cgo/gcc_netbsd_386.c
+++ b/src/pkg/runtime/cgo/gcc_netbsd_386.c
@@ -35,14 +35,14 @@ _cgo_sys_thread_start(ThreadStart *ts)
int err;
sigfillset(&ign);
- sigprocmask(SIG_SETMASK, &ign, &oset);
+ pthread_sigmask(SIG_SETMASK, &ign, &oset);
pthread_attr_init(&attr);
pthread_attr_getstacksize(&attr, &size);
ts->g->stackguard = size;
err = pthread_create(&p, &attr, threadentry, ts);
- sigprocmask(SIG_SETMASK, &oset, nil);
+ pthread_sigmask(SIG_SETMASK, &oset, nil);
if (err != 0) {
fprintf(stderr, "runtime/cgo: pthread_create failed: %s\n", strerror(err));
diff --git a/src/pkg/runtime/cgo/gcc_netbsd_amd64.c b/src/pkg/runtime/cgo/gcc_netbsd_amd64.c
index f27e142ce..6e0482d5b 100644
--- a/src/pkg/runtime/cgo/gcc_netbsd_amd64.c
+++ b/src/pkg/runtime/cgo/gcc_netbsd_amd64.c
@@ -35,7 +35,7 @@ _cgo_sys_thread_start(ThreadStart *ts)
int err;
sigfillset(&ign);
- sigprocmask(SIG_SETMASK, &ign, &oset);
+ pthread_sigmask(SIG_SETMASK, &ign, &oset);
pthread_attr_init(&attr);
pthread_attr_getstacksize(&attr, &size);
@@ -43,7 +43,7 @@ _cgo_sys_thread_start(ThreadStart *ts)
ts->g->stackguard = size;
err = pthread_create(&p, &attr, threadentry, ts);
- sigprocmask(SIG_SETMASK, &oset, nil);
+ pthread_sigmask(SIG_SETMASK, &oset, nil);
if (err != 0) {
fprintf(stderr, "runtime/cgo: pthread_create failed: %s\n", strerror(err));
diff --git a/src/pkg/runtime/cgo/gcc_netbsd_arm.c b/src/pkg/runtime/cgo/gcc_netbsd_arm.c
index 68c8b6e71..ba2ae2568 100644
--- a/src/pkg/runtime/cgo/gcc_netbsd_arm.c
+++ b/src/pkg/runtime/cgo/gcc_netbsd_arm.c
@@ -36,14 +36,14 @@ _cgo_sys_thread_start(ThreadStart *ts)
int err;
sigfillset(&ign);
- sigprocmask(SIG_SETMASK, &ign, &oset);
+ pthread_sigmask(SIG_SETMASK, &ign, &oset);
pthread_attr_init(&attr);
pthread_attr_getstacksize(&attr, &size);
ts->g->stackguard = size;
err = pthread_create(&p, &attr, threadentry, ts);
- sigprocmask(SIG_SETMASK, &oset, nil);
+ pthread_sigmask(SIG_SETMASK, &oset, nil);
if (err != 0) {
fprintf(stderr, "runtime/cgo: pthread_create failed: %s\n", strerror(err));
diff --git a/src/pkg/runtime/cgo/gcc_openbsd_386.c b/src/pkg/runtime/cgo/gcc_openbsd_386.c
index 6422d1b93..e682c3725 100644
--- a/src/pkg/runtime/cgo/gcc_openbsd_386.c
+++ b/src/pkg/runtime/cgo/gcc_openbsd_386.c
@@ -122,14 +122,14 @@ _cgo_sys_thread_start(ThreadStart *ts)
int err;
sigfillset(&ign);
- sigprocmask(SIG_SETMASK, &ign, &oset);
+ pthread_sigmask(SIG_SETMASK, &ign, &oset);
pthread_attr_init(&attr);
pthread_attr_getstacksize(&attr, &size);
ts->g->stackguard = size;
err = sys_pthread_create(&p, &attr, threadentry, ts);
- sigprocmask(SIG_SETMASK, &oset, nil);
+ pthread_sigmask(SIG_SETMASK, &oset, nil);
if (err != 0) {
fprintf(stderr, "runtime/cgo: pthread_create failed: %s\n", strerror(err));
diff --git a/src/pkg/runtime/cgo/gcc_openbsd_amd64.c b/src/pkg/runtime/cgo/gcc_openbsd_amd64.c
index 5a5a17114..64d29a935 100644
--- a/src/pkg/runtime/cgo/gcc_openbsd_amd64.c
+++ b/src/pkg/runtime/cgo/gcc_openbsd_amd64.c
@@ -122,7 +122,7 @@ _cgo_sys_thread_start(ThreadStart *ts)
int err;
sigfillset(&ign);
- sigprocmask(SIG_SETMASK, &ign, &oset);
+ pthread_sigmask(SIG_SETMASK, &ign, &oset);
pthread_attr_init(&attr);
pthread_attr_getstacksize(&attr, &size);
@@ -130,7 +130,7 @@ _cgo_sys_thread_start(ThreadStart *ts)
ts->g->stackguard = size;
err = sys_pthread_create(&p, &attr, threadentry, ts);
- sigprocmask(SIG_SETMASK, &oset, nil);
+ pthread_sigmask(SIG_SETMASK, &oset, nil);
if (err != 0) {
fprintf(stderr, "runtime/cgo: pthread_create failed: %s\n", strerror(err));
diff --git a/src/pkg/runtime/cgo/gcc_windows_386.c b/src/pkg/runtime/cgo/gcc_windows_386.c
index 02eab12e5..cdc866468 100644
--- a/src/pkg/runtime/cgo/gcc_windows_386.c
+++ b/src/pkg/runtime/cgo/gcc_windows_386.c
@@ -5,6 +5,8 @@
#define WIN32_LEAN_AND_MEAN
#include <windows.h>
#include <process.h>
+#include <stdlib.h>
+#include <stdio.h>
#include "libcgo.h"
static void threadentry(void*);
@@ -25,14 +27,19 @@ x_cgo_init(G *g)
void
_cgo_sys_thread_start(ThreadStart *ts)
{
- _beginthread(threadentry, 0, ts);
+ uintptr_t thandle;
+
+ thandle = _beginthread(threadentry, 0, ts);
+ if(thandle == -1) {
+ fprintf(stderr, "runtime: failed to create new OS thread (%d)\n", errno);
+ abort();
+ }
}
static void
threadentry(void *v)
{
ThreadStart ts;
- void *tls0;
ts = *(ThreadStart*)v;
free(v);
@@ -43,16 +50,13 @@ threadentry(void *v)
/*
* Set specific keys in thread local storage.
*/
- tls0 = (void*)LocalAlloc(LPTR, 32);
asm volatile (
"movl %0, %%fs:0x14\n" // MOVL tls0, 0x14(FS)
"movl %%fs:0x14, %%eax\n" // MOVL 0x14(FS), tmp
"movl %1, 0(%%eax)\n" // MOVL g, 0(FS)
"movl %2, 4(%%eax)\n" // MOVL m, 4(FS)
- :: "r"(tls0), "r"(ts.g), "r"(ts.m) : "%eax"
+ :: "r"(ts.tls), "r"(ts.g), "r"(ts.m) : "%eax"
);
crosscall_386(ts.fn);
-
- LocalFree(tls0);
}
diff --git a/src/pkg/runtime/cgo/gcc_windows_amd64.c b/src/pkg/runtime/cgo/gcc_windows_amd64.c
index f7695a1cc..d8dd69b4a 100644
--- a/src/pkg/runtime/cgo/gcc_windows_amd64.c
+++ b/src/pkg/runtime/cgo/gcc_windows_amd64.c
@@ -5,6 +5,8 @@
#define WIN64_LEAN_AND_MEAN
#include <windows.h>
#include <process.h>
+#include <stdlib.h>
+#include <stdio.h>
#include "libcgo.h"
static void threadentry(void*);
@@ -25,14 +27,19 @@ x_cgo_init(G *g)
void
_cgo_sys_thread_start(ThreadStart *ts)
{
- _beginthread(threadentry, 0, ts);
+ uintptr_t thandle;
+
+ thandle = _beginthread(threadentry, 0, ts);
+ if(thandle == -1) {
+ fprintf(stderr, "runtime: failed to create new OS thread (%d)\n", errno);
+ abort();
+ }
}
static void
threadentry(void *v)
{
ThreadStart ts;
- void *tls0;
ts = *(ThreadStart*)v;
free(v);
@@ -43,13 +50,12 @@ threadentry(void *v)
/*
* Set specific keys in thread local storage.
*/
- tls0 = (void*)LocalAlloc(LPTR, 64);
asm volatile (
"movq %0, %%gs:0x28\n" // MOVL tls0, 0x28(GS)
"movq %%gs:0x28, %%rax\n" // MOVQ 0x28(GS), tmp
"movq %1, 0(%%rax)\n" // MOVQ g, 0(GS)
"movq %2, 8(%%rax)\n" // MOVQ m, 8(GS)
- :: "r"(tls0), "r"(ts.g), "r"(ts.m) : "%rax"
+ :: "r"(ts.tls), "r"(ts.g), "r"(ts.m) : "%rax"
);
crosscall_amd64(ts.fn);
diff --git a/src/pkg/runtime/cgo/libcgo.h b/src/pkg/runtime/cgo/libcgo.h
index 41a371c27..65ea3f372 100644
--- a/src/pkg/runtime/cgo/libcgo.h
+++ b/src/pkg/runtime/cgo/libcgo.h
@@ -34,6 +34,7 @@ struct ThreadStart
{
uintptr m;
G *g;
+ uintptr *tls;
void (*fn)(void);
};
diff --git a/src/pkg/runtime/cgocall.c b/src/pkg/runtime/cgocall.c
index 2a04453fd..7b2ec26f3 100644
--- a/src/pkg/runtime/cgocall.c
+++ b/src/pkg/runtime/cgocall.c
@@ -99,12 +99,7 @@ runtime·cgocall(void (*fn)(void*), void *arg)
{
Defer d;
- if(m->racecall) {
- runtime·asmcgocall(fn, arg);
- return;
- }
-
- if(!runtime·iscgo && !Windows)
+ if(!runtime·iscgo && !Solaris && !Windows)
runtime·throw("cgocall unavailable");
if(fn == 0)
@@ -127,11 +122,10 @@ runtime·cgocall(void (*fn)(void*), void *arg)
d.fn = &endcgoV;
d.siz = 0;
d.link = g->defer;
- d.argp = (void*)-1; // unused because unlockm never recovers
+ d.argp = NoArgs;
d.special = true;
- d.free = false;
g->defer = &d;
-
+
m->ncgo++;
/*
@@ -171,17 +165,6 @@ endcgo(void)
runtime·raceacquire(&cgosync);
}
-void
-runtime·NumCgoCall(int64 ret)
-{
- M *mp;
-
- ret = 0;
- for(mp=runtime·atomicloadp(&runtime·allm); mp; mp=mp->alllink)
- ret += mp->ncgocall;
- FLUSH(&ret);
-}
-
// Helper functions for cgo code.
void (*_cgo_malloc)(void*);
@@ -235,6 +218,11 @@ struct CallbackArgs
#define CBARGS (CallbackArgs*)((byte*)m->g0->sched.sp+2*sizeof(void*))
#endif
+// Unimplemented on amd64p32
+#ifdef GOARCH_amd64p32
+#define CBARGS (CallbackArgs*)(nil)
+#endif
+
// On 386, stack frame is three words, plus caller PC.
#ifdef GOARCH_386
#define CBARGS (CallbackArgs*)((byte*)m->g0->sched.sp+4*sizeof(void*))
@@ -251,21 +239,9 @@ runtime·cgocallbackg(void)
runtime·exit(2);
}
- if(m->racecall) {
- // We were not in syscall, so no need to call runtime·exitsyscall.
- // However we must set m->locks for the following reason.
- // Race detector runtime makes __tsan_symbolize cgo callback
- // holding internal mutexes. The mutexes are not cooperative with Go scheduler.
- // So if we deschedule a goroutine that holds race detector internal mutex
- // (e.g. preempt it), another goroutine will deadlock trying to acquire the same mutex.
- m->locks++;
- runtime·cgocallbackg1();
- m->locks--;
- } else {
- runtime·exitsyscall(); // coming out of cgo call
- runtime·cgocallbackg1();
- runtime·entersyscall(); // going back to cgo call
- }
+ runtime·exitsyscall(); // coming out of cgo call
+ runtime·cgocallbackg1();
+ runtime·entersyscall(); // going back to cgo call
}
void
@@ -283,19 +259,18 @@ runtime·cgocallbackg1(void)
d.fn = &unwindmf;
d.siz = 0;
d.link = g->defer;
- d.argp = (void*)-1; // unused because unwindm never recovers
+ d.argp = NoArgs;
d.special = true;
- d.free = false;
g->defer = &d;
- if(raceenabled && !m->racecall)
+ if(raceenabled)
runtime·raceacquire(&cgosync);
// Invoke callback.
cb = CBARGS;
runtime·newstackcall(cb->fn, cb->arg, cb->argsize);
- if(raceenabled && !m->racecall)
+ if(raceenabled)
runtime·racereleasemerge(&cgosync);
// Pop defer.
diff --git a/src/pkg/runtime/chan.c b/src/pkg/runtime/chan.goc
index 48cc41e20..7a584717b 100644
--- a/src/pkg/runtime/chan.c
+++ b/src/pkg/runtime/chan.goc
@@ -2,96 +2,25 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+package runtime
#include "runtime.h"
#include "arch_GOARCH.h"
#include "type.h"
#include "race.h"
#include "malloc.h"
+#include "chan.h"
#include "../../cmd/ld/textflag.h"
-#define MAXALIGN 8
-#define NOSELGEN 1
-
-typedef struct WaitQ WaitQ;
-typedef struct SudoG SudoG;
-typedef struct Select Select;
-typedef struct Scase Scase;
-
-struct SudoG
-{
- G* g; // g and selgen constitute
- uint32 selgen; // a weak pointer to g
- SudoG* link;
- int64 releasetime;
- byte* elem; // data element
-};
-
-struct WaitQ
-{
- SudoG* first;
- SudoG* last;
-};
-
-// The garbage collector is assuming that Hchan can only contain pointers into the stack
-// and cannot contain pointers into the heap.
-struct Hchan
-{
- uintgo qcount; // total data in the q
- uintgo dataqsiz; // size of the circular q
- uint16 elemsize;
- uint16 pad; // ensures proper alignment of the buffer that follows Hchan in memory
- bool closed;
- Alg* elemalg; // interface for element type
- uintgo sendx; // send index
- uintgo recvx; // receive index
- WaitQ recvq; // list of recv waiters
- WaitQ sendq; // list of send waiters
- Lock;
-};
-
uint32 runtime·Hchansize = sizeof(Hchan);
-// Buffer follows Hchan immediately in memory.
-// chanbuf(c, i) is pointer to the i'th slot in the buffer.
-#define chanbuf(c, i) ((byte*)((c)+1)+(uintptr)(c)->elemsize*(i))
-
-enum
-{
- debug = 0,
-
- // Scase.kind
- CaseRecv,
- CaseSend,
- CaseDefault,
-};
-
-struct Scase
-{
- SudoG sg; // must be first member (cast to Scase)
- Hchan* chan; // chan
- byte* pc; // return pc
- uint16 kind;
- uint16 so; // vararg of selected bool
- bool* receivedp; // pointer to received bool (recv2)
-};
-
-struct Select
-{
- uint16 tcase; // total count of scase[]
- uint16 ncase; // currently filled scase[]
- uint16* pollorder; // case poll order
- Hchan** lockorder; // channel lock order
- Scase scase[1]; // one per case (in order of appearance)
-};
-
static void dequeueg(WaitQ*);
static SudoG* dequeue(WaitQ*);
static void enqueue(WaitQ*, SudoG*);
static void destroychan(Hchan*);
static void racesync(Hchan*, SudoG*);
-Hchan*
-runtime·makechan_c(ChanType *t, int64 hint)
+static Hchan*
+makechan(ChanType *t, int64 hint)
{
Hchan *c;
Type *elem;
@@ -104,13 +33,13 @@ runtime·makechan_c(ChanType *t, int64 hint)
if((sizeof(*c)%MAXALIGN) != 0 || elem->align > MAXALIGN)
runtime·throw("makechan: bad alignment");
- if(hint < 0 || (intgo)hint != hint || (elem->size > 0 && hint > MaxMem / elem->size))
+ if(hint < 0 || (intgo)hint != hint || (elem->size > 0 && hint > (MaxMem - sizeof(*c)) / elem->size))
runtime·panicstring("makechan: size out of range");
// allocate memory in one call
c = (Hchan*)runtime·mallocgc(sizeof(*c) + hint*elem->size, (uintptr)t | TypeInfo_Chan, 0);
c->elemsize = elem->size;
- c->elemalg = elem->alg;
+ c->elemtype = elem;
c->dataqsiz = hint;
if(debug)
@@ -120,21 +49,12 @@ runtime·makechan_c(ChanType *t, int64 hint)
return c;
}
-// For reflect
-// func makechan(typ *ChanType, size uint64) (chan)
-void
-reflect·makechan(ChanType *t, uint64 size, Hchan *c)
-{
- c = runtime·makechan_c(t, size);
- FLUSH(&c);
+func reflect·makechan(t *ChanType, size uint64) (c *Hchan) {
+ c = makechan(t, size);
}
-// makechan(t *ChanType, hint int64) (hchan *chan any);
-void
-runtime·makechan(ChanType *t, int64 hint, Hchan *ret)
-{
- ret = runtime·makechan_c(t, hint);
- FLUSH(&ret);
+func makechan(t *ChanType, size int64) (c *Hchan) {
+ c = makechan(t, size);
}
/*
@@ -151,27 +71,28 @@ runtime·makechan(ChanType *t, int64 hint, Hchan *ret)
* been closed. it is easiest to loop and re-run
* the operation; we'll see that it's now closed.
*/
-void
-runtime·chansend(ChanType *t, Hchan *c, byte *ep, bool *pres, void *pc)
+static bool
+chansend(ChanType *t, Hchan *c, byte *ep, bool block, void *pc)
{
SudoG *sg;
SudoG mysg;
G* gp;
int64 t0;
+ if(raceenabled)
+ runtime·racereadobjectpc(ep, t->elem, runtime·getcallerpc(&t), chansend);
+
if(c == nil) {
USED(t);
- if(pres != nil) {
- *pres = false;
- return;
- }
+ if(!block)
+ return false;
runtime·park(nil, nil, "chan send (nil chan)");
- return; // not reached
+ return false; // not reached
}
if(debug) {
runtime·printf("chansend: chan=%p; elem=", c);
- c->elemalg->print(c->elemsize, ep);
+ c->elemtype->alg->print(c->elemsize, ep);
runtime·prints("\n");
}
@@ -184,7 +105,7 @@ runtime·chansend(ChanType *t, Hchan *c, byte *ep, bool *pres, void *pc)
runtime·lock(c);
if(raceenabled)
- runtime·racereadpc(c, pc, runtime·chansend);
+ runtime·racereadpc(c, pc, chansend);
if(c->closed)
goto closed;
@@ -200,28 +121,24 @@ runtime·chansend(ChanType *t, Hchan *c, byte *ep, bool *pres, void *pc)
gp = sg->g;
gp->param = sg;
if(sg->elem != nil)
- c->elemalg->copy(c->elemsize, sg->elem, ep);
+ c->elemtype->alg->copy(c->elemsize, sg->elem, ep);
if(sg->releasetime)
sg->releasetime = runtime·cputicks();
runtime·ready(gp);
-
- if(pres != nil)
- *pres = true;
- return;
+ return true;
}
- if(pres != nil) {
+ if(!block) {
runtime·unlock(c);
- *pres = false;
- return;
+ return false;
}
mysg.elem = ep;
mysg.g = g;
- mysg.selgen = NOSELGEN;
+ mysg.selectdone = nil;
g->param = nil;
enqueue(&c->sendq, &mysg);
- runtime·park(runtime·unlock, c, "chan send");
+ runtime·parkunlock(c, "chan send");
if(g->param == nil) {
runtime·lock(c);
@@ -233,32 +150,33 @@ runtime·chansend(ChanType *t, Hchan *c, byte *ep, bool *pres, void *pc)
if(mysg.releasetime > 0)
runtime·blockevent(mysg.releasetime - t0, 2);
- return;
+ return true;
asynch:
if(c->closed)
goto closed;
if(c->qcount >= c->dataqsiz) {
- if(pres != nil) {
+ if(!block) {
runtime·unlock(c);
- *pres = false;
- return;
+ return false;
}
mysg.g = g;
mysg.elem = nil;
- mysg.selgen = NOSELGEN;
+ mysg.selectdone = nil;
enqueue(&c->sendq, &mysg);
- runtime·park(runtime·unlock, c, "chan send");
+ runtime·parkunlock(c, "chan send");
runtime·lock(c);
goto asynch;
}
- if(raceenabled)
+ if(raceenabled) {
+ runtime·raceacquire(chanbuf(c, c->sendx));
runtime·racerelease(chanbuf(c, c->sendx));
+ }
- c->elemalg->copy(c->elemsize, chanbuf(c, c->sendx), ep);
+ c->elemtype->alg->copy(c->elemsize, chanbuf(c, c->sendx), ep);
if(++c->sendx == c->dataqsiz)
c->sendx = 0;
c->qcount++;
@@ -272,37 +190,36 @@ asynch:
runtime·ready(gp);
} else
runtime·unlock(c);
- if(pres != nil)
- *pres = true;
if(mysg.releasetime > 0)
runtime·blockevent(mysg.releasetime - t0, 2);
- return;
+ return true;
closed:
runtime·unlock(c);
runtime·panicstring("send on closed channel");
+ return false; // not reached
}
-void
-runtime·chanrecv(ChanType *t, Hchan* c, byte *ep, bool *selected, bool *received)
+static bool
+chanrecv(ChanType *t, Hchan* c, byte *ep, bool block, bool *received)
{
SudoG *sg;
SudoG mysg;
G *gp;
int64 t0;
+ // raceenabled: don't need to check ep, as it is always on the stack.
+
if(debug)
runtime·printf("chanrecv: chan=%p\n", c);
if(c == nil) {
USED(t);
- if(selected != nil) {
- *selected = false;
- return;
- }
+ if(!block)
+ return false;
runtime·park(nil, nil, "chan receive (nil chan)");
- return; // not reached
+ return false; // not reached
}
t0 = 0;
@@ -326,32 +243,29 @@ runtime·chanrecv(ChanType *t, Hchan* c, byte *ep, bool *selected, bool *receive
runtime·unlock(c);
if(ep != nil)
- c->elemalg->copy(c->elemsize, ep, sg->elem);
+ c->elemtype->alg->copy(c->elemsize, ep, sg->elem);
gp = sg->g;
gp->param = sg;
if(sg->releasetime)
sg->releasetime = runtime·cputicks();
runtime·ready(gp);
- if(selected != nil)
- *selected = true;
if(received != nil)
*received = true;
- return;
+ return true;
}
- if(selected != nil) {
+ if(!block) {
runtime·unlock(c);
- *selected = false;
- return;
+ return false;
}
mysg.elem = ep;
mysg.g = g;
- mysg.selgen = NOSELGEN;
+ mysg.selectdone = nil;
g->param = nil;
enqueue(&c->recvq, &mysg);
- runtime·park(runtime·unlock, c, "chan receive");
+ runtime·parkunlock(c, "chan receive");
if(g->param == nil) {
runtime·lock(c);
@@ -364,36 +278,37 @@ runtime·chanrecv(ChanType *t, Hchan* c, byte *ep, bool *selected, bool *receive
*received = true;
if(mysg.releasetime > 0)
runtime·blockevent(mysg.releasetime - t0, 2);
- return;
+ return true;
asynch:
if(c->qcount <= 0) {
if(c->closed)
goto closed;
- if(selected != nil) {
+ if(!block) {
runtime·unlock(c);
- *selected = false;
if(received != nil)
*received = false;
- return;
+ return false;
}
mysg.g = g;
mysg.elem = nil;
- mysg.selgen = NOSELGEN;
+ mysg.selectdone = nil;
enqueue(&c->recvq, &mysg);
- runtime·park(runtime·unlock, c, "chan receive");
+ runtime·parkunlock(c, "chan receive");
runtime·lock(c);
goto asynch;
}
- if(raceenabled)
+ if(raceenabled) {
runtime·raceacquire(chanbuf(c, c->recvx));
+ runtime·racerelease(chanbuf(c, c->recvx));
+ }
if(ep != nil)
- c->elemalg->copy(c->elemsize, ep, chanbuf(c, c->recvx));
- c->elemalg->copy(c->elemsize, chanbuf(c, c->recvx), nil);
+ c->elemtype->alg->copy(c->elemsize, ep, chanbuf(c, c->recvx));
+ c->elemtype->alg->copy(c->elemsize, chanbuf(c, c->recvx), nil);
if(++c->recvx == c->dataqsiz)
c->recvx = 0;
c->qcount--;
@@ -408,19 +323,15 @@ asynch:
} else
runtime·unlock(c);
- if(selected != nil)
- *selected = true;
if(received != nil)
*received = true;
if(mysg.releasetime > 0)
runtime·blockevent(mysg.releasetime - t0, 2);
- return;
+ return true;
closed:
if(ep != nil)
- c->elemalg->copy(c->elemsize, ep, nil);
- if(selected != nil)
- *selected = true;
+ c->elemtype->alg->copy(c->elemsize, ep, nil);
if(received != nil)
*received = false;
if(raceenabled)
@@ -428,38 +339,25 @@ closed:
runtime·unlock(c);
if(mysg.releasetime > 0)
runtime·blockevent(mysg.releasetime - t0, 2);
+ return true;
}
-// chansend1(hchan *chan any, elem any);
#pragma textflag NOSPLIT
-void
-runtime·chansend1(ChanType *t, Hchan* c, ...)
-{
- runtime·chansend(t, c, (byte*)(&c+1), nil, runtime·getcallerpc(&t));
+func chansend1(t *ChanType, c *Hchan, elem *byte) {
+ chansend(t, c, elem, true, runtime·getcallerpc(&t));
}
-// chanrecv1(hchan *chan any) (elem any);
#pragma textflag NOSPLIT
-void
-runtime·chanrecv1(ChanType *t, Hchan* c, ...)
-{
- runtime·chanrecv(t, c, (byte*)(&c+1), nil, nil);
+func chanrecv1(t *ChanType, c *Hchan, elem *byte) {
+ chanrecv(t, c, elem, true, nil);
}
-// chanrecv2(hchan *chan any) (elem any, received bool);
+// chanrecv2(hchan *chan any, elem *any) (received bool);
#pragma textflag NOSPLIT
-void
-runtime·chanrecv2(ChanType *t, Hchan* c, ...)
-{
- byte *ae, *ap;
-
- ae = (byte*)(&c+1);
- ap = ae + t->elem->size;
- runtime·chanrecv(t, c, ae, nil, ap);
+func chanrecv2(t *ChanType, c *Hchan, elem *byte) (received bool) {
+ chanrecv(t, c, elem, true, &received);
}
-// func selectnbsend(c chan any, elem any) bool
-//
// compiler implements
//
// select {
@@ -478,18 +376,10 @@ runtime·chanrecv2(ChanType *t, Hchan* c, ...)
// }
//
#pragma textflag NOSPLIT
-void
-runtime·selectnbsend(ChanType *t, Hchan *c, ...)
-{
- byte *ae, *ap;
-
- ae = (byte*)(&c + 1);
- ap = ae + ROUND(t->elem->size, Structrnd);
- runtime·chansend(t, c, ae, ap, runtime·getcallerpc(&t));
+func selectnbsend(t *ChanType, c *Hchan, elem *byte) (selected bool) {
+ selected = chansend(t, c, elem, false, runtime·getcallerpc(&t));
}
-// func selectnbrecv(elem *any, c chan any) bool
-//
// compiler implements
//
// select {
@@ -508,14 +398,10 @@ runtime·selectnbsend(ChanType *t, Hchan *c, ...)
// }
//
#pragma textflag NOSPLIT
-void
-runtime·selectnbrecv(ChanType *t, byte *v, Hchan *c, bool selected)
-{
- runtime·chanrecv(t, c, v, &selected, nil);
+func selectnbrecv(t *ChanType, elem *byte, c *Hchan) (selected bool) {
+ selected = chanrecv(t, c, elem, false, nil);
}
-// func selectnbrecv2(elem *any, ok *bool, c chan any) bool
-//
// compiler implements
//
// select {
@@ -534,89 +420,29 @@ runtime·selectnbrecv(ChanType *t, byte *v, Hchan *c, bool selected)
// }
//
#pragma textflag NOSPLIT
-void
-runtime·selectnbrecv2(ChanType *t, byte *v, bool *received, Hchan *c, bool selected)
-{
- runtime·chanrecv(t, c, v, &selected, received);
+func selectnbrecv2(t *ChanType, elem *byte, received *bool, c *Hchan) (selected bool) {
+ selected = chanrecv(t, c, elem, false, received);
}
-// For reflect:
-// func chansend(c chan, val iword, nb bool) (selected bool)
-// where an iword is the same word an interface value would use:
-// the actual data if it fits, or else a pointer to the data.
-//
-// The "uintptr selected" is really "bool selected" but saying
-// uintptr gets us the right alignment for the output parameter block.
#pragma textflag NOSPLIT
-void
-reflect·chansend(ChanType *t, Hchan *c, uintptr val, bool nb, uintptr selected)
-{
- bool *sp;
- byte *vp;
-
- if(nb) {
- selected = false;
- sp = (bool*)&selected;
- } else {
- *(bool*)&selected = true;
- FLUSH(&selected);
- sp = nil;
- }
- if(t->elem->size <= sizeof(val))
- vp = (byte*)&val;
- else
- vp = (byte*)val;
- runtime·chansend(t, c, vp, sp, runtime·getcallerpc(&t));
+func reflect·chansend(t *ChanType, c *Hchan, elem *byte, nb bool) (selected bool) {
+ selected = chansend(t, c, elem, !nb, runtime·getcallerpc(&t));
}
-// For reflect:
-// func chanrecv(c chan, nb bool) (val iword, selected, received bool)
-// where an iword is the same word an interface value would use:
-// the actual data if it fits, or else a pointer to the data.
-void
-reflect·chanrecv(ChanType *t, Hchan *c, bool nb, uintptr val, bool selected, bool received)
-{
- byte *vp;
- bool *sp;
-
- if(nb) {
- selected = false;
- sp = &selected;
- } else {
- selected = true;
- FLUSH(&selected);
- sp = nil;
- }
+func reflect·chanrecv(t *ChanType, c *Hchan, nb bool, elem *byte) (selected bool, received bool) {
received = false;
- FLUSH(&received);
- if(t->elem->size <= sizeof(val)) {
- val = 0;
- vp = (byte*)&val;
- } else {
- vp = runtime·mal(t->elem->size);
- val = (uintptr)vp;
- FLUSH(&val);
- }
- runtime·chanrecv(t, c, vp, sp, &received);
+ selected = chanrecv(t, c, elem, !nb, &received);
}
-static void newselect(int32, Select**);
+static Select* newselect(int32);
-// newselect(size uint32) (sel *byte);
#pragma textflag NOSPLIT
-void
-runtime·newselect(int32 size, ...)
-{
- int32 o;
- Select **selp;
-
- o = ROUND(sizeof(size), Structrnd);
- selp = (Select**)((byte*)&size + o);
- newselect(size, selp);
+func newselect(size int32) (sel *byte) {
+ sel = (byte*)newselect(size);
}
-static void
-newselect(int32 size, Select **selp)
+static Select*
+newselect(int32 size)
{
int32 n;
Select *sel;
@@ -638,28 +464,22 @@ newselect(int32 size, Select **selp)
sel->ncase = 0;
sel->lockorder = (void*)(sel->scase + size);
sel->pollorder = (void*)(sel->lockorder + size);
- *selp = sel;
if(debug)
runtime·printf("newselect s=%p size=%d\n", sel, size);
+ return sel;
}
// cut in half to give stack a chance to split
static void selectsend(Select *sel, Hchan *c, void *pc, void *elem, int32 so);
-// selectsend(sel *byte, hchan *chan any, elem *any) (selected bool);
#pragma textflag NOSPLIT
-void
-runtime·selectsend(Select *sel, Hchan *c, void *elem, bool selected)
-{
+func selectsend(sel *Select, c *Hchan, elem *byte) (selected bool) {
selected = false;
- FLUSH(&selected);
// nil cases do not compete
- if(c == nil)
- return;
-
- selectsend(sel, c, runtime·getcallerpc(&sel), elem, (byte*)&selected - (byte*)&sel);
+ if(c != nil)
+ selectsend(sel, c, runtime·getcallerpc(&sel), elem, (byte*)&selected - (byte*)&sel);
}
static void
@@ -688,34 +508,22 @@ selectsend(Select *sel, Hchan *c, void *pc, void *elem, int32 so)
// cut in half to give stack a chance to split
static void selectrecv(Select *sel, Hchan *c, void *pc, void *elem, bool*, int32 so);
-// selectrecv(sel *byte, hchan *chan any, elem *any) (selected bool);
#pragma textflag NOSPLIT
-void
-runtime·selectrecv(Select *sel, Hchan *c, void *elem, bool selected)
-{
+func selectrecv(sel *Select, c *Hchan, elem *byte) (selected bool) {
selected = false;
- FLUSH(&selected);
// nil cases do not compete
- if(c == nil)
- return;
-
- selectrecv(sel, c, runtime·getcallerpc(&sel), elem, nil, (byte*)&selected - (byte*)&sel);
+ if(c != nil)
+ selectrecv(sel, c, runtime·getcallerpc(&sel), elem, nil, (byte*)&selected - (byte*)&sel);
}
-// selectrecv2(sel *byte, hchan *chan any, elem *any, received *bool) (selected bool);
#pragma textflag NOSPLIT
-void
-runtime·selectrecv2(Select *sel, Hchan *c, void *elem, bool *received, bool selected)
-{
+func selectrecv2(sel *Select, c *Hchan, elem *byte, received *bool) (selected bool) {
selected = false;
- FLUSH(&selected);
// nil cases do not compete
- if(c == nil)
- return;
-
- selectrecv(sel, c, runtime·getcallerpc(&sel), elem, received, (byte*)&selected - (byte*)&sel);
+ if(c != nil)
+ selectrecv(sel, c, runtime·getcallerpc(&sel), elem, received, (byte*)&selected - (byte*)&sel);
}
static void
@@ -745,14 +553,9 @@ selectrecv(Select *sel, Hchan *c, void *pc, void *elem, bool *received, int32 so
// cut in half to give stack a chance to split
static void selectdefault(Select*, void*, int32);
-// selectdefault(sel *byte) (selected bool);
#pragma textflag NOSPLIT
-void
-runtime·selectdefault(Select *sel, bool selected)
-{
+func selectdefault(sel *Select) (selected bool) {
selected = false;
- FLUSH(&selected);
-
selectdefault(sel, runtime·getcallerpc(&sel), (byte*)&selected - (byte*)&sel);
}
@@ -821,9 +624,15 @@ selunlock(Select *sel)
}
}
-void
-runtime·block(void)
+static bool
+selparkcommit(G *gp, void *sel)
{
+ USED(gp);
+ selunlock(sel);
+ return true;
+}
+
+func block() {
runtime·park(nil, nil, "select (no cases)"); // forever
}
@@ -834,9 +643,7 @@ static void* selectgo(Select**);
// overwrites return pc on stack to signal which case of the select
// to run, so cannot appear at the top of a split stack.
#pragma textflag NOSPLIT
-void
-runtime·selectgo(Select *sel)
-{
+func selectgo(sel *Select) {
runtime·setcallerpc(&sel, selectgo(&sel));
}
@@ -844,7 +651,7 @@ static void*
selectgo(Select **selp)
{
Select *sel;
- uint32 o, i, j, k;
+ uint32 o, i, j, k, done;
int64 t0;
Scase *cas, *dfl;
Hchan *c;
@@ -946,7 +753,7 @@ loop:
case CaseSend:
if(raceenabled)
- runtime·racereadpc(c, cas->pc, runtime·chansend);
+ runtime·racereadpc(c, cas->pc, chansend);
if(c->closed)
goto sclose;
if(c->dataqsiz > 0) {
@@ -973,13 +780,14 @@ loop:
// pass 2 - enqueue on all chans
+ done = 0;
for(i=0; i<sel->ncase; i++) {
o = sel->pollorder[i];
cas = &sel->scase[o];
c = cas->chan;
sg = &cas->sg;
sg->g = g;
- sg->selgen = g->selgen;
+ sg->selectdone = &done;
switch(cas->kind) {
case CaseRecv:
@@ -993,7 +801,7 @@ loop:
}
g->param = nil;
- runtime·park((void(*)(Lock*))selunlock, (Lock*)sel, "select");
+ runtime·park(selparkcommit, sel, "select");
sellock(sel);
sg = g->param;
@@ -1029,18 +837,29 @@ loop:
*cas->receivedp = true;
}
+ if(raceenabled) {
+ if(cas->kind == CaseRecv && cas->sg.elem != nil)
+ runtime·racewriteobjectpc(cas->sg.elem, c->elemtype, cas->pc, chanrecv);
+ else if(cas->kind == CaseSend)
+ runtime·racereadobjectpc(cas->sg.elem, c->elemtype, cas->pc, chansend);
+ }
+
selunlock(sel);
goto retc;
asyncrecv:
// can receive from buffer
- if(raceenabled)
+ if(raceenabled) {
+ if(cas->sg.elem != nil)
+ runtime·racewriteobjectpc(cas->sg.elem, c->elemtype, cas->pc, chanrecv);
runtime·raceacquire(chanbuf(c, c->recvx));
+ runtime·racerelease(chanbuf(c, c->recvx));
+ }
if(cas->receivedp != nil)
*cas->receivedp = true;
if(cas->sg.elem != nil)
- c->elemalg->copy(c->elemsize, cas->sg.elem, chanbuf(c, c->recvx));
- c->elemalg->copy(c->elemsize, chanbuf(c, c->recvx), nil);
+ c->elemtype->alg->copy(c->elemsize, cas->sg.elem, chanbuf(c, c->recvx));
+ c->elemtype->alg->copy(c->elemsize, chanbuf(c, c->recvx), nil);
if(++c->recvx == c->dataqsiz)
c->recvx = 0;
c->qcount--;
@@ -1058,9 +877,12 @@ asyncrecv:
asyncsend:
// can send to buffer
- if(raceenabled)
+ if(raceenabled) {
+ runtime·raceacquire(chanbuf(c, c->sendx));
runtime·racerelease(chanbuf(c, c->sendx));
- c->elemalg->copy(c->elemsize, chanbuf(c, c->sendx), cas->sg.elem);
+ runtime·racereadobjectpc(cas->sg.elem, c->elemtype, cas->pc, chansend);
+ }
+ c->elemtype->alg->copy(c->elemsize, chanbuf(c, c->sendx), cas->sg.elem);
if(++c->sendx == c->dataqsiz)
c->sendx = 0;
c->qcount++;
@@ -1078,15 +900,18 @@ asyncsend:
syncrecv:
// can receive from sleeping sender (sg)
- if(raceenabled)
+ if(raceenabled) {
+ if(cas->sg.elem != nil)
+ runtime·racewriteobjectpc(cas->sg.elem, c->elemtype, cas->pc, chanrecv);
racesync(c, sg);
+ }
selunlock(sel);
if(debug)
runtime·printf("syncrecv: sel=%p c=%p o=%d\n", sel, c, o);
if(cas->receivedp != nil)
*cas->receivedp = true;
if(cas->sg.elem != nil)
- c->elemalg->copy(c->elemsize, cas->sg.elem, sg->elem);
+ c->elemtype->alg->copy(c->elemsize, cas->sg.elem, sg->elem);
gp = sg->g;
gp->param = sg;
if(sg->releasetime)
@@ -1100,20 +925,22 @@ rclose:
if(cas->receivedp != nil)
*cas->receivedp = false;
if(cas->sg.elem != nil)
- c->elemalg->copy(c->elemsize, cas->sg.elem, nil);
+ c->elemtype->alg->copy(c->elemsize, cas->sg.elem, nil);
if(raceenabled)
runtime·raceacquire(c);
goto retc;
syncsend:
// can send to sleeping receiver (sg)
- if(raceenabled)
+ if(raceenabled) {
+ runtime·racereadobjectpc(cas->sg.elem, c->elemtype, cas->pc, chansend);
racesync(c, sg);
+ }
selunlock(sel);
if(debug)
runtime·printf("syncsend: sel=%p c=%p o=%d\n", sel, c, o);
if(sg->elem != nil)
- c->elemalg->copy(c->elemsize, sg->elem, cas->sg.elem);
+ c->elemtype->alg->copy(c->elemsize, sg->elem, cas->sg.elem);
gp = sg->g;
gp->param = sg;
if(sg->releasetime)
@@ -1150,7 +977,7 @@ struct runtimeSelect
uintptr dir;
ChanType *typ;
Hchan *ch;
- uintptr val;
+ byte *val;
};
// This enum must match ../reflect/value.go:/SelectDir.
@@ -1160,34 +987,17 @@ enum SelectDir {
SelectDefault,
};
-// func rselect(cases []runtimeSelect) (chosen int, word uintptr, recvOK bool)
-void
-reflect·rselect(Slice cases, intgo chosen, uintptr word, bool recvOK)
-{
+func reflect·rselect(cases Slice) (chosen int, recvOK bool) {
int32 i;
Select *sel;
runtimeSelect* rcase, *rc;
- void *elem;
- void *recvptr;
- uintptr maxsize;
chosen = -1;
- word = 0;
recvOK = false;
- maxsize = 0;
rcase = (runtimeSelect*)cases.array;
- for(i=0; i<cases.len; i++) {
- rc = &rcase[i];
- if(rc->dir == SelectRecv && rc->ch != nil && maxsize < rc->typ->elem->size)
- maxsize = rc->typ->elem->size;
- }
-
- recvptr = nil;
- if(maxsize > sizeof(void*))
- recvptr = runtime·mal(maxsize);
- newselect(cases.len, &sel);
+ sel = newselect(cases.len);
for(i=0; i<cases.len; i++) {
rc = &rcase[i];
switch(rc->dir) {
@@ -1197,49 +1007,28 @@ reflect·rselect(Slice cases, intgo chosen, uintptr word, bool recvOK)
case SelectSend:
if(rc->ch == nil)
break;
- if(rc->typ->elem->size > sizeof(void*))
- elem = (void*)rc->val;
- else
- elem = (void*)&rc->val;
- selectsend(sel, rc->ch, (void*)i, elem, 0);
+ selectsend(sel, rc->ch, (void*)i, rc->val, 0);
break;
case SelectRecv:
if(rc->ch == nil)
break;
- if(rc->typ->elem->size > sizeof(void*))
- elem = recvptr;
- else
- elem = &word;
- selectrecv(sel, rc->ch, (void*)i, elem, &recvOK, 0);
+ selectrecv(sel, rc->ch, (void*)i, rc->val, &recvOK, 0);
break;
}
}
chosen = (intgo)(uintptr)selectgo(&sel);
- if(rcase[chosen].dir == SelectRecv && rcase[chosen].typ->elem->size > sizeof(void*))
- word = (uintptr)recvptr;
-
- FLUSH(&chosen);
- FLUSH(&word);
- FLUSH(&recvOK);
}
static void closechan(Hchan *c, void *pc);
-// closechan(sel *byte);
#pragma textflag NOSPLIT
-void
-runtime·closechan(Hchan *c)
-{
+func closechan(c *Hchan) {
closechan(c, runtime·getcallerpc(&c));
}
-// For reflect
-// func chanclose(c chan)
#pragma textflag NOSPLIT
-void
-reflect·chanclose(Hchan *c)
-{
+func reflect·chanclose(c *Hchan) {
closechan(c, runtime·getcallerpc(&c));
}
@@ -1292,28 +1081,18 @@ closechan(Hchan *c, void *pc)
runtime·unlock(c);
}
-// For reflect
-// func chanlen(c chan) (len int)
-void
-reflect·chanlen(Hchan *c, intgo len)
-{
+func reflect·chanlen(c *Hchan) (len int) {
if(c == nil)
len = 0;
else
len = c->qcount;
- FLUSH(&len);
}
-// For reflect
-// func chancap(c chan) int
-void
-reflect·chancap(Hchan *c, intgo cap)
-{
+func reflect·chancap(c *Hchan) (cap int) {
if(c == nil)
cap = 0;
else
cap = c->dataqsiz;
- FLUSH(&cap);
}
static SudoG*
@@ -1327,12 +1106,11 @@ loop:
return nil;
q->first = sgp->link;
- // if sgp is stale, ignore it
- if(sgp->selgen != NOSELGEN &&
- (sgp->selgen != sgp->g->selgen ||
- !runtime·cas(&sgp->g->selgen, sgp->selgen, sgp->selgen + 2))) {
- //prints("INVALID PSEUDOG POINTER\n");
- goto loop;
+ // if sgp participates in a select and is already signaled, ignore it
+ if(sgp->selectdone != nil) {
+ // claim the right to signal
+ if(*sgp->selectdone != 0 || !runtime·cas(sgp->selectdone, 0, 1))
+ goto loop;
}
return sgp;
diff --git a/src/pkg/runtime/chan.h b/src/pkg/runtime/chan.h
new file mode 100644
index 000000000..ce2eb9f4e
--- /dev/null
+++ b/src/pkg/runtime/chan.h
@@ -0,0 +1,75 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#define MAXALIGN 8
+
+typedef struct WaitQ WaitQ;
+typedef struct SudoG SudoG;
+typedef struct Select Select;
+typedef struct Scase Scase;
+
+struct SudoG
+{
+ G* g;
+ uint32* selectdone;
+ SudoG* link;
+ int64 releasetime;
+ byte* elem; // data element
+};
+
+struct WaitQ
+{
+ SudoG* first;
+ SudoG* last;
+};
+
+// The garbage collector is assuming that Hchan can only contain pointers into the stack
+// and cannot contain pointers into the heap.
+struct Hchan
+{
+ uintgo qcount; // total data in the q
+ uintgo dataqsiz; // size of the circular q
+ uint16 elemsize;
+ uint16 pad; // ensures proper alignment of the buffer that follows Hchan in memory
+ bool closed;
+ Type* elemtype; // element type
+ uintgo sendx; // send index
+ uintgo recvx; // receive index
+ WaitQ recvq; // list of recv waiters
+ WaitQ sendq; // list of send waiters
+ Lock;
+};
+
+// Buffer follows Hchan immediately in memory.
+// chanbuf(c, i) is pointer to the i'th slot in the buffer.
+#define chanbuf(c, i) ((byte*)((c)+1)+(uintptr)(c)->elemsize*(i))
+
+enum
+{
+ debug = 0,
+
+ // Scase.kind
+ CaseRecv,
+ CaseSend,
+ CaseDefault,
+};
+
+struct Scase
+{
+ SudoG sg; // must be first member (cast to Scase)
+ Hchan* chan; // chan
+ byte* pc; // return pc
+ uint16 kind;
+ uint16 so; // vararg of selected bool
+ bool* receivedp; // pointer to received bool (recv2)
+};
+
+struct Select
+{
+ uint16 tcase; // total count of scase[]
+ uint16 ncase; // currently filled scase[]
+ uint16* pollorder; // case poll order
+ Hchan** lockorder; // channel lock order
+ Scase scase[1]; // one per case (in order of appearance)
+};
diff --git a/src/pkg/runtime/chan_test.go b/src/pkg/runtime/chan_test.go
index eb2c7c60d..ce4b39627 100644
--- a/src/pkg/runtime/chan_test.go
+++ b/src/pkg/runtime/chan_test.go
@@ -9,8 +9,327 @@ import (
"sync"
"sync/atomic"
"testing"
+ "time"
)
+func TestChan(t *testing.T) {
+ defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(4))
+ N := 200
+ if testing.Short() {
+ N = 20
+ }
+ for chanCap := 0; chanCap < N; chanCap++ {
+ {
+ // Ensure that receive from empty chan blocks.
+ c := make(chan int, chanCap)
+ recv1 := false
+ go func() {
+ _ = <-c
+ recv1 = true
+ }()
+ recv2 := false
+ go func() {
+ _, _ = <-c
+ recv2 = true
+ }()
+ time.Sleep(time.Millisecond)
+ if recv1 || recv2 {
+ t.Fatalf("chan[%d]: receive from empty chan", chanCap)
+ }
+ // Ensure that non-blocking receive does not block.
+ select {
+ case _ = <-c:
+ t.Fatalf("chan[%d]: receive from empty chan", chanCap)
+ default:
+ }
+ select {
+ case _, _ = <-c:
+ t.Fatalf("chan[%d]: receive from empty chan", chanCap)
+ default:
+ }
+ c <- 0
+ c <- 0
+ }
+
+ {
+ // Ensure that send to full chan blocks.
+ c := make(chan int, chanCap)
+ for i := 0; i < chanCap; i++ {
+ c <- i
+ }
+ sent := uint32(0)
+ go func() {
+ c <- 0
+ atomic.StoreUint32(&sent, 1)
+ }()
+ time.Sleep(time.Millisecond)
+ if atomic.LoadUint32(&sent) != 0 {
+ t.Fatalf("chan[%d]: send to full chan", chanCap)
+ }
+ // Ensure that non-blocking send does not block.
+ select {
+ case c <- 0:
+ t.Fatalf("chan[%d]: send to full chan", chanCap)
+ default:
+ }
+ <-c
+ }
+
+ {
+ // Ensure that we receive 0 from closed chan.
+ c := make(chan int, chanCap)
+ for i := 0; i < chanCap; i++ {
+ c <- i
+ }
+ close(c)
+ for i := 0; i < chanCap; i++ {
+ v := <-c
+ if v != i {
+ t.Fatalf("chan[%d]: received %v, expected %v", chanCap, v, i)
+ }
+ }
+ if v := <-c; v != 0 {
+ t.Fatalf("chan[%d]: received %v, expected %v", chanCap, v, 0)
+ }
+ if v, ok := <-c; v != 0 || ok {
+ t.Fatalf("chan[%d]: received %v/%v, expected %v/%v", chanCap, v, ok, 0, false)
+ }
+ }
+
+ {
+ // Ensure that close unblocks receive.
+ c := make(chan int, chanCap)
+ done := make(chan bool)
+ go func() {
+ v, ok := <-c
+ done <- v == 0 && ok == false
+ }()
+ time.Sleep(time.Millisecond)
+ close(c)
+ if !<-done {
+ t.Fatalf("chan[%d]: received non zero from closed chan", chanCap)
+ }
+ }
+
+ {
+ // Send 100 integers,
+ // ensure that we receive them non-corrupted in FIFO order.
+ c := make(chan int, chanCap)
+ go func() {
+ for i := 0; i < 100; i++ {
+ c <- i
+ }
+ }()
+ for i := 0; i < 100; i++ {
+ v := <-c
+ if v != i {
+ t.Fatalf("chan[%d]: received %v, expected %v", chanCap, v, i)
+ }
+ }
+
+ // Same, but using recv2.
+ go func() {
+ for i := 0; i < 100; i++ {
+ c <- i
+ }
+ }()
+ for i := 0; i < 100; i++ {
+ v, ok := <-c
+ if !ok {
+ t.Fatalf("chan[%d]: receive failed, expected %v", chanCap, i)
+ }
+ if v != i {
+ t.Fatalf("chan[%d]: received %v, expected %v", chanCap, v, i)
+ }
+ }
+
+ // Send 1000 integers in 4 goroutines,
+ // ensure that we receive what we send.
+ const P = 4
+ const L = 1000
+ for p := 0; p < P; p++ {
+ go func() {
+ for i := 0; i < L; i++ {
+ c <- i
+ }
+ }()
+ }
+ done := make(chan map[int]int)
+ for p := 0; p < P; p++ {
+ go func() {
+ recv := make(map[int]int)
+ for i := 0; i < L; i++ {
+ v := <-c
+ recv[v] = recv[v] + 1
+ }
+ done <- recv
+ }()
+ }
+ recv := make(map[int]int)
+ for p := 0; p < P; p++ {
+ for k, v := range <-done {
+ recv[k] = recv[k] + v
+ }
+ }
+ if len(recv) != L {
+ t.Fatalf("chan[%d]: received %v values, expected %v", chanCap, len(recv), L)
+ }
+ for _, v := range recv {
+ if v != P {
+ t.Fatalf("chan[%d]: received %v values, expected %v", chanCap, v, P)
+ }
+ }
+ }
+
+ {
+ // Test len/cap.
+ c := make(chan int, chanCap)
+ if len(c) != 0 || cap(c) != chanCap {
+ t.Fatalf("chan[%d]: bad len/cap, expect %v/%v, got %v/%v", chanCap, 0, chanCap, len(c), cap(c))
+ }
+ for i := 0; i < chanCap; i++ {
+ c <- i
+ }
+ if len(c) != chanCap || cap(c) != chanCap {
+ t.Fatalf("chan[%d]: bad len/cap, expect %v/%v, got %v/%v", chanCap, chanCap, chanCap, len(c), cap(c))
+ }
+ }
+
+ }
+}
+
+func TestSelfSelect(t *testing.T) {
+ // Ensure that send/recv on the same chan in select
+ // does not crash nor deadlock.
+ defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2))
+ for _, chanCap := range []int{0, 10} {
+ var wg sync.WaitGroup
+ wg.Add(2)
+ c := make(chan int, chanCap)
+ for p := 0; p < 2; p++ {
+ p := p
+ go func() {
+ defer wg.Done()
+ for i := 0; i < 1000; i++ {
+ if p == 0 || i%2 == 0 {
+ select {
+ case c <- p:
+ case v := <-c:
+ if chanCap == 0 && v == p {
+ t.Fatalf("self receive")
+ }
+ }
+ } else {
+ select {
+ case v := <-c:
+ if chanCap == 0 && v == p {
+ t.Fatalf("self receive")
+ }
+ case c <- p:
+ }
+ }
+ }
+ }()
+ }
+ wg.Wait()
+ }
+}
+
+func TestSelectStress(t *testing.T) {
+ defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(10))
+ var c [4]chan int
+ c[0] = make(chan int)
+ c[1] = make(chan int)
+ c[2] = make(chan int, 2)
+ c[3] = make(chan int, 3)
+ N := int(1e5)
+ if testing.Short() {
+ N /= 10
+ }
+ // There are 4 goroutines that send N values on each of the chans,
+ // + 4 goroutines that receive N values on each of the chans,
+ // + 1 goroutine that sends N values on each of the chans in a single select,
+ // + 1 goroutine that receives N values on each of the chans in a single select.
+ // All these sends, receives and selects interact chaotically at runtime,
+ // but we are careful that this whole construct does not deadlock.
+ var wg sync.WaitGroup
+ wg.Add(10)
+ for k := 0; k < 4; k++ {
+ k := k
+ go func() {
+ for i := 0; i < N; i++ {
+ c[k] <- 0
+ }
+ wg.Done()
+ }()
+ go func() {
+ for i := 0; i < N; i++ {
+ <-c[k]
+ }
+ wg.Done()
+ }()
+ }
+ go func() {
+ var n [4]int
+ c1 := c
+ for i := 0; i < 4*N; i++ {
+ select {
+ case c1[3] <- 0:
+ n[3]++
+ if n[3] == N {
+ c1[3] = nil
+ }
+ case c1[2] <- 0:
+ n[2]++
+ if n[2] == N {
+ c1[2] = nil
+ }
+ case c1[0] <- 0:
+ n[0]++
+ if n[0] == N {
+ c1[0] = nil
+ }
+ case c1[1] <- 0:
+ n[1]++
+ if n[1] == N {
+ c1[1] = nil
+ }
+ }
+ }
+ wg.Done()
+ }()
+ go func() {
+ var n [4]int
+ c1 := c
+ for i := 0; i < 4*N; i++ {
+ select {
+ case <-c1[0]:
+ n[0]++
+ if n[0] == N {
+ c1[0] = nil
+ }
+ case <-c1[1]:
+ n[1]++
+ if n[1] == N {
+ c1[1] = nil
+ }
+ case <-c1[2]:
+ n[2]++
+ if n[2] == N {
+ c1[2] = nil
+ }
+ case <-c1[3]:
+ n[3]++
+ if n[3] == N {
+ c1[3] = nil
+ }
+ }
+ }
+ wg.Done()
+ }()
+ wg.Wait()
+}
+
func TestChanSendInterface(t *testing.T) {
type mt struct{}
m := &mt{}
@@ -29,34 +348,35 @@ func TestChanSendInterface(t *testing.T) {
func TestPseudoRandomSend(t *testing.T) {
n := 100
- c := make(chan int)
- l := make([]int, n)
- var m sync.Mutex
- m.Lock()
- go func() {
+ for _, chanCap := range []int{0, n} {
+ c := make(chan int, chanCap)
+ l := make([]int, n)
+ var m sync.Mutex
+ m.Lock()
+ go func() {
+ for i := 0; i < n; i++ {
+ runtime.Gosched()
+ l[i] = <-c
+ }
+ m.Unlock()
+ }()
for i := 0; i < n; i++ {
- runtime.Gosched()
- l[i] = <-c
+ select {
+ case c <- 1:
+ case c <- 0:
+ }
}
- m.Unlock()
- }()
- for i := 0; i < n; i++ {
- select {
- case c <- 0:
- case c <- 1:
+ m.Lock() // wait
+ n0 := 0
+ n1 := 0
+ for _, i := range l {
+ n0 += (i + 1) % 2
+ n1 += i
}
- }
- m.Lock() // wait
- n0 := 0
- n1 := 0
- for _, i := range l {
- n0 += (i + 1) % 2
- n1 += i
- if n0 > n/10 && n1 > n/10 {
- return
+ if n0 <= n/10 || n1 <= n/10 {
+ t.Errorf("Want pseudorandom, got %d zeros and %d ones (chan cap %d)", n0, n1, chanCap)
}
}
- t.Errorf("Want pseudo random, got %d zeros and %d ones", n0, n1)
}
func TestMultiConsumer(t *testing.T) {
@@ -110,147 +430,106 @@ func TestMultiConsumer(t *testing.T) {
}
}
+func BenchmarkChanNonblocking(b *testing.B) {
+ myc := make(chan int)
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ select {
+ case <-myc:
+ default:
+ }
+ }
+ })
+}
+
func BenchmarkSelectUncontended(b *testing.B) {
- const CallsPerSched = 1000
- procs := runtime.GOMAXPROCS(-1)
- N := int32(b.N / CallsPerSched)
- c := make(chan bool, procs)
- for p := 0; p < procs; p++ {
- go func() {
- myc1 := make(chan int, 1)
- myc2 := make(chan int, 1)
- myc1 <- 0
- for atomic.AddInt32(&N, -1) >= 0 {
- for g := 0; g < CallsPerSched; g++ {
- select {
- case <-myc1:
- myc2 <- 0
- case <-myc2:
- myc1 <- 0
- }
- }
+ b.RunParallel(func(pb *testing.PB) {
+ myc1 := make(chan int, 1)
+ myc2 := make(chan int, 1)
+ myc1 <- 0
+ for pb.Next() {
+ select {
+ case <-myc1:
+ myc2 <- 0
+ case <-myc2:
+ myc1 <- 0
}
- c <- true
- }()
- }
- for p := 0; p < procs; p++ {
- <-c
- }
+ }
+ })
}
func BenchmarkSelectContended(b *testing.B) {
- const CallsPerSched = 1000
- procs := runtime.GOMAXPROCS(-1)
- N := int32(b.N / CallsPerSched)
- c := make(chan bool, procs)
+ procs := runtime.GOMAXPROCS(0)
myc1 := make(chan int, procs)
myc2 := make(chan int, procs)
- for p := 0; p < procs; p++ {
+ b.RunParallel(func(pb *testing.PB) {
myc1 <- 0
- go func() {
- for atomic.AddInt32(&N, -1) >= 0 {
- for g := 0; g < CallsPerSched; g++ {
- select {
- case <-myc1:
- myc2 <- 0
- case <-myc2:
- myc1 <- 0
- }
- }
+ for pb.Next() {
+ select {
+ case <-myc1:
+ myc2 <- 0
+ case <-myc2:
+ myc1 <- 0
}
- c <- true
- }()
- }
- for p := 0; p < procs; p++ {
- <-c
- }
+ }
+ })
}
func BenchmarkSelectNonblock(b *testing.B) {
- const CallsPerSched = 1000
- procs := runtime.GOMAXPROCS(-1)
- N := int32(b.N / CallsPerSched)
- c := make(chan bool, procs)
- for p := 0; p < procs; p++ {
- go func() {
- myc1 := make(chan int)
- myc2 := make(chan int)
- myc3 := make(chan int, 1)
- myc4 := make(chan int, 1)
- for atomic.AddInt32(&N, -1) >= 0 {
- for g := 0; g < CallsPerSched; g++ {
- select {
- case <-myc1:
- default:
- }
- select {
- case myc2 <- 0:
- default:
- }
- select {
- case <-myc3:
- default:
- }
- select {
- case myc4 <- 0:
- default:
- }
- }
+ b.RunParallel(func(pb *testing.PB) {
+ myc1 := make(chan int)
+ myc2 := make(chan int)
+ myc3 := make(chan int, 1)
+ myc4 := make(chan int, 1)
+ for pb.Next() {
+ select {
+ case <-myc1:
+ default:
}
- c <- true
- }()
- }
- for p := 0; p < procs; p++ {
- <-c
- }
+ select {
+ case myc2 <- 0:
+ default:
+ }
+ select {
+ case <-myc3:
+ default:
+ }
+ select {
+ case myc4 <- 0:
+ default:
+ }
+ }
+ })
}
func BenchmarkChanUncontended(b *testing.B) {
- const CallsPerSched = 1000
- procs := runtime.GOMAXPROCS(-1)
- N := int32(b.N / CallsPerSched)
- c := make(chan bool, procs)
- for p := 0; p < procs; p++ {
- go func() {
- myc := make(chan int, CallsPerSched)
- for atomic.AddInt32(&N, -1) >= 0 {
- for g := 0; g < CallsPerSched; g++ {
- myc <- 0
- }
- for g := 0; g < CallsPerSched; g++ {
- <-myc
- }
+ const C = 100
+ b.RunParallel(func(pb *testing.PB) {
+ myc := make(chan int, C)
+ for pb.Next() {
+ for i := 0; i < C; i++ {
+ myc <- 0
}
- c <- true
- }()
- }
- for p := 0; p < procs; p++ {
- <-c
- }
+ for i := 0; i < C; i++ {
+ <-myc
+ }
+ }
+ })
}
func BenchmarkChanContended(b *testing.B) {
- const CallsPerSched = 1000
- procs := runtime.GOMAXPROCS(-1)
- N := int32(b.N / CallsPerSched)
- c := make(chan bool, procs)
- myc := make(chan int, procs*CallsPerSched)
- for p := 0; p < procs; p++ {
- go func() {
- for atomic.AddInt32(&N, -1) >= 0 {
- for g := 0; g < CallsPerSched; g++ {
- myc <- 0
- }
- for g := 0; g < CallsPerSched; g++ {
- <-myc
- }
+ const C = 100
+ myc := make(chan int, C*runtime.GOMAXPROCS(0))
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ for i := 0; i < C; i++ {
+ myc <- 0
}
- c <- true
- }()
- }
- for p := 0; p < procs; p++ {
- <-c
- }
+ for i := 0; i < C; i++ {
+ <-myc
+ }
+ }
+ })
}
func BenchmarkChanSync(b *testing.B) {
@@ -350,33 +629,83 @@ func BenchmarkChanProdConsWork100(b *testing.B) {
benchmarkChanProdCons(b, 100, 100)
}
-func BenchmarkChanCreation(b *testing.B) {
+func BenchmarkSelectProdCons(b *testing.B) {
const CallsPerSched = 1000
procs := runtime.GOMAXPROCS(-1)
N := int32(b.N / CallsPerSched)
- c := make(chan bool, procs)
+ c := make(chan bool, 2*procs)
+ myc := make(chan int, 128)
+ myclose := make(chan bool)
for p := 0; p < procs; p++ {
go func() {
+ // Producer: sends to myc.
+ foo := 0
+ // Intended to not fire during benchmarking.
+ mytimer := time.After(time.Hour)
for atomic.AddInt32(&N, -1) >= 0 {
for g := 0; g < CallsPerSched; g++ {
- myc := make(chan int, 1)
- myc <- 0
- <-myc
+ // Model some local work.
+ for i := 0; i < 100; i++ {
+ foo *= 2
+ foo /= 2
+ }
+ select {
+ case myc <- 1:
+ case <-mytimer:
+ case <-myclose:
+ }
}
}
- c <- true
+ myc <- 0
+ c <- foo == 42
+ }()
+ go func() {
+ // Consumer: receives from myc.
+ foo := 0
+ // Intended to not fire during benchmarking.
+ mytimer := time.After(time.Hour)
+ loop:
+ for {
+ select {
+ case v := <-myc:
+ if v == 0 {
+ break loop
+ }
+ case <-mytimer:
+ case <-myclose:
+ }
+ // Model some local work.
+ for i := 0; i < 100; i++ {
+ foo *= 2
+ foo /= 2
+ }
+ }
+ c <- foo == 42
}()
}
for p := 0; p < procs; p++ {
<-c
+ <-c
}
}
+func BenchmarkChanCreation(b *testing.B) {
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ myc := make(chan int, 1)
+ myc <- 0
+ <-myc
+ }
+ })
+}
+
func BenchmarkChanSem(b *testing.B) {
type Empty struct{}
- c := make(chan Empty, 1)
- for i := 0; i < b.N; i++ {
- c <- Empty{}
- <-c
- }
+ myc := make(chan Empty, runtime.GOMAXPROCS(0))
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ myc <- Empty{}
+ <-myc
+ }
+ })
}
diff --git a/src/pkg/runtime/complex.c b/src/pkg/runtime/complex.goc
index 395e70fe3..40935cf1c 100644
--- a/src/pkg/runtime/complex.c
+++ b/src/pkg/runtime/complex.goc
@@ -2,13 +2,10 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+package runtime
#include "runtime.h"
-typedef struct Complex128 Complex128;
-
-void
-runtime·complex128div(Complex128 n, Complex128 d, Complex128 q)
-{
+func complex128div(n Complex128, d Complex128) (q Complex128) {
int32 ninf, dinf, nnan, dnan;
float64 a, b, ratio, denom;
@@ -58,5 +55,4 @@ runtime·complex128div(Complex128 n, Complex128 d, Complex128 q)
q.imag = (n.imag - n.real*ratio) / denom;
}
}
- FLUSH(&q);
}
diff --git a/src/pkg/runtime/cpuprof.c b/src/pkg/runtime/cpuprof.goc
index 1c34b9e6f..faaea2943 100644
--- a/src/pkg/runtime/cpuprof.c
+++ b/src/pkg/runtime/cpuprof.goc
@@ -48,6 +48,7 @@
// in order to let the log closer set the high bit to indicate "EOF" safely
// in the situation when normally the goroutine "owns" handoff.
+package runtime
#include "runtime.h"
#include "arch_GOARCH.h"
#include "malloc.h"
@@ -80,7 +81,6 @@ struct Profile {
uintptr count; // tick count
uintptr evicts; // eviction count
uintptr lost; // lost ticks that need to be logged
- uintptr totallost; // total lost ticks
// Active recent stack traces.
Bucket hash[HashSize];
@@ -168,7 +168,7 @@ runtime·SetCPUProfileRate(intgo hz)
runtime·noteclear(&prof->wait);
runtime·setcpuprofilerate(tick, hz);
- } else if(prof->on) {
+ } else if(prof != nil && prof->on) {
runtime·setcpuprofilerate(nil, 0);
prof->on = false;
@@ -243,7 +243,6 @@ add(Profile *p, uintptr *pc, int32 n)
if(!evict(p, e)) {
// Could not evict entry. Record lost stack.
p->lost++;
- p->totallost++;
return;
}
p->evicts++;
@@ -307,6 +306,7 @@ flushlog(Profile *p)
*q++ = p->lost;
*q++ = 1;
*q++ = (uintptr)LostProfileData;
+ p->lost = 0;
}
p->nlog = q - log;
return true;
@@ -428,9 +428,6 @@ breakflush:
// CPUProfile returns the next cpu profile block as a []byte.
// The user documentation is in debug.go.
-void
-runtime·CPUProfile(Slice ret)
-{
+func CPUProfile() (ret Slice) {
ret = getprofile(prof);
- FLUSH(&ret);
}
diff --git a/src/pkg/runtime/crash_test.go b/src/pkg/runtime/crash_test.go
index 5476924bb..b0277f293 100644
--- a/src/pkg/runtime/crash_test.go
+++ b/src/pkg/runtime/crash_test.go
@@ -9,6 +9,7 @@ import (
"os"
"os/exec"
"path/filepath"
+ "runtime"
"strings"
"testing"
"text/template"
@@ -31,6 +32,10 @@ func testEnv(cmd *exec.Cmd) *exec.Cmd {
}
func executeTest(t *testing.T, templ string, data interface{}) string {
+ if runtime.GOOS == "nacl" {
+ t.Skip("skipping on nacl")
+ }
+
checkStaleRuntime(t)
st := template.Must(template.New("crashSource").Parse(templ))
@@ -111,8 +116,9 @@ func TestLockedDeadlock2(t *testing.T) {
func TestGoexitDeadlock(t *testing.T) {
output := executeTest(t, goexitDeadlockSource, nil)
- if output != "" {
- t.Fatalf("expected no output, got:\n%s", output)
+ want := "no goroutines (main called runtime.Goexit) - deadlock!"
+ if !strings.Contains(output, want) {
+ t.Fatalf("output:\n%s\n\nwant output containing: %s", output, want)
}
}
@@ -132,6 +138,34 @@ func TestThreadExhaustion(t *testing.T) {
}
}
+func TestRecursivePanic(t *testing.T) {
+ output := executeTest(t, recursivePanicSource, nil)
+ want := `wrap: bad
+panic: again
+
+`
+ if !strings.HasPrefix(output, want) {
+ t.Fatalf("output does not start with %q:\n%s", want, output)
+ }
+
+}
+
+func TestGoexitCrash(t *testing.T) {
+ output := executeTest(t, goexitExitSource, nil)
+ want := "no goroutines (main called runtime.Goexit) - deadlock!"
+ if !strings.Contains(output, want) {
+ t.Fatalf("output:\n%s\n\nwant output containing: %s", output, want)
+ }
+}
+
+func TestGoNil(t *testing.T) {
+ output := executeTest(t, goNilSource, nil)
+ want := "go of nil func value"
+ if !strings.Contains(output, want) {
+ t.Fatalf("output:\n%s\n\nwant output containing: %s", output, want)
+ }
+}
+
const crashSource = `
package main
@@ -272,3 +306,61 @@ func main() {
}
}
`
+
+const recursivePanicSource = `
+package main
+
+import (
+ "fmt"
+)
+
+func main() {
+ func() {
+ defer func() {
+ fmt.Println(recover())
+ }()
+ var x [8192]byte
+ func(x [8192]byte) {
+ defer func() {
+ if err := recover(); err != nil {
+ panic("wrap: " + err.(string))
+ }
+ }()
+ panic("bad")
+ }(x)
+ }()
+ panic("again")
+}
+`
+
+const goexitExitSource = `
+package main
+
+import (
+ "runtime"
+ "time"
+)
+
+func main() {
+ go func() {
+ time.Sleep(time.Millisecond)
+ }()
+ i := 0
+ runtime.SetFinalizer(&i, func(p *int) {})
+ runtime.GC()
+ runtime.Goexit()
+}
+`
+
+const goNilSource = `
+package main
+
+func main() {
+ defer func() {
+ recover()
+ }()
+ var f func()
+ go f()
+ select{}
+}
+`
diff --git a/src/pkg/runtime/debug/garbage.go b/src/pkg/runtime/debug/garbage.go
index 8337d5d5b..edb364387 100644
--- a/src/pkg/runtime/debug/garbage.go
+++ b/src/pkg/runtime/debug/garbage.go
@@ -91,7 +91,9 @@ func (x byDuration) Less(i, j int) bool { return x[i] < x[j] }
// at startup, or 100 if the variable is not set.
// A negative percentage disables garbage collection.
func SetGCPercent(percent int) int {
- return setGCPercent(percent)
+ old := setGCPercent(percent)
+ runtime.GC()
+ return old
}
// FreeOSMemory forces a garbage collection followed by an
@@ -133,3 +135,19 @@ func SetMaxStack(bytes int) int {
func SetMaxThreads(threads int) int {
return setMaxThreads(threads)
}
+
+// SetPanicOnFault controls the runtime's behavior when a program faults
+// at an unexpected (non-nil) address. Such faults are typically caused by
+// bugs such as runtime memory corruption, so the default response is to crash
+// the program. Programs working with memory-mapped files or unsafe
+// manipulation of memory may cause faults at non-nil addresses in less
+// dramatic situations; SetPanicOnFault allows such programs to request
+// that the runtime trigger only a panic, not a crash.
+// SetPanicOnFault applies only to the current goroutine.
+// It returns the previous setting.
+func SetPanicOnFault(enabled bool) bool
+
+// WriteHeapDump writes a description of the heap and the objects in
+// it to the given file descriptor.
+// The heap dump format is defined at http://golang.org/s/go13heapdump.
+func WriteHeapDump(fd uintptr)
diff --git a/src/pkg/runtime/debug/heapdump_test.go b/src/pkg/runtime/debug/heapdump_test.go
new file mode 100644
index 000000000..920190115
--- /dev/null
+++ b/src/pkg/runtime/debug/heapdump_test.go
@@ -0,0 +1,33 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package debug
+
+import (
+ "io/ioutil"
+ "os"
+ "runtime"
+ "testing"
+)
+
+func TestWriteHeapDumpNonempty(t *testing.T) {
+ if runtime.GOOS == "nacl" {
+ t.Skip("WriteHeapDump is not available on NaCl.")
+ }
+ f, err := ioutil.TempFile("", "heapdumptest")
+ if err != nil {
+ t.Fatalf("TempFile failed: %v", err)
+ }
+ defer os.Remove(f.Name())
+ defer f.Close()
+ WriteHeapDump(f.Fd())
+ fi, err := f.Stat()
+ if err != nil {
+ t.Fatalf("Stat failed: %v", err)
+ }
+ const minSize = 1
+ if size := fi.Size(); size < minSize {
+ t.Fatalf("Heap dump size %d bytes, expected at least %d bytes", size, minSize)
+ }
+}
diff --git a/src/pkg/runtime/debug/stack.go b/src/pkg/runtime/debug/stack.go
index 2896b2141..c29b0a226 100644
--- a/src/pkg/runtime/debug/stack.go
+++ b/src/pkg/runtime/debug/stack.go
@@ -18,6 +18,7 @@ var (
dunno = []byte("???")
centerDot = []byte("·")
dot = []byte(".")
+ slash = []byte("/")
)
// PrintStack prints to standard error the stack trace returned by Stack.
@@ -84,6 +85,11 @@ func function(pc uintptr) []byte {
// runtime/debug.*T·ptrmethod
// and want
// *T.ptrmethod
+ // Since the package path might contains dots (e.g. code.google.com/...),
+ // we first remove the path prefix if there is one.
+ if lastslash := bytes.LastIndex(name, slash); lastslash >= 0 {
+ name = name[lastslash+1:]
+ }
if period := bytes.Index(name, dot); period >= 0 {
name = name[period+1:]
}
diff --git a/src/pkg/runtime/export_test.c b/src/pkg/runtime/defs.c
index 5ad1a7007..1c76198fc 100644
--- a/src/pkg/runtime/export_test.c
+++ b/src/pkg/runtime/defs.c
@@ -2,12 +2,13 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+// This file is compiled by cmd/dist to obtain debug information
+// about the given header files.
+
#include "runtime.h"
#include "arch_GOARCH.h"
-
-void
-·GogoBytes(int32 x)
-{
- x = RuntimeGogoBytes;
- FLUSH(&x);
-}
+#include "malloc.h"
+#include "type.h"
+#include "race.h"
+#include "hashmap.h"
+#include "chan.h"
diff --git a/src/pkg/runtime/defs_freebsd.go b/src/pkg/runtime/defs_freebsd.go
index dad20f16d..2832583e0 100644
--- a/src/pkg/runtime/defs_freebsd.go
+++ b/src/pkg/runtime/defs_freebsd.go
@@ -49,8 +49,10 @@ const (
SA_RESTART = C.SA_RESTART
SA_ONSTACK = C.SA_ONSTACK
- UMTX_OP_WAIT_UINT = C.UMTX_OP_WAIT_UINT
- UMTX_OP_WAKE = C.UMTX_OP_WAKE
+ UMTX_OP_WAIT_UINT = C.UMTX_OP_WAIT_UINT
+ UMTX_OP_WAIT_UINT_PRIVATE = C.UMTX_OP_WAIT_UINT_PRIVATE
+ UMTX_OP_WAKE = C.UMTX_OP_WAKE
+ UMTX_OP_WAKE_PRIVATE = C.UMTX_OP_WAKE_PRIVATE
SIGHUP = C.SIGHUP
SIGINT = C.SIGINT
diff --git a/src/pkg/runtime/defs_freebsd_386.h b/src/pkg/runtime/defs_freebsd_386.h
index cf9c76eb1..fab938526 100644
--- a/src/pkg/runtime/defs_freebsd_386.h
+++ b/src/pkg/runtime/defs_freebsd_386.h
@@ -21,8 +21,10 @@ enum {
SA_RESTART = 0x2,
SA_ONSTACK = 0x1,
- UMTX_OP_WAIT_UINT = 0xb,
- UMTX_OP_WAKE = 0x3,
+ UMTX_OP_WAIT_UINT = 0xb,
+ UMTX_OP_WAIT_UINT_PRIVATE = 0xf,
+ UMTX_OP_WAKE = 0x3,
+ UMTX_OP_WAKE_PRIVATE = 0x10,
SIGHUP = 0x1,
SIGINT = 0x2,
diff --git a/src/pkg/runtime/defs_freebsd_amd64.h b/src/pkg/runtime/defs_freebsd_amd64.h
index 3fb33f38a..c1db91803 100644
--- a/src/pkg/runtime/defs_freebsd_amd64.h
+++ b/src/pkg/runtime/defs_freebsd_amd64.h
@@ -21,8 +21,10 @@ enum {
SA_RESTART = 0x2,
SA_ONSTACK = 0x1,
- UMTX_OP_WAIT_UINT = 0xb,
- UMTX_OP_WAKE = 0x3,
+ UMTX_OP_WAIT_UINT = 0xb,
+ UMTX_OP_WAIT_UINT_PRIVATE = 0xf,
+ UMTX_OP_WAKE = 0x3,
+ UMTX_OP_WAKE_PRIVATE = 0x10,
SIGHUP = 0x1,
SIGINT = 0x2,
diff --git a/src/pkg/runtime/defs_freebsd_arm.h b/src/pkg/runtime/defs_freebsd_arm.h
index d321f4249..4fc452e45 100644
--- a/src/pkg/runtime/defs_freebsd_arm.h
+++ b/src/pkg/runtime/defs_freebsd_arm.h
@@ -4,7 +4,7 @@
enum {
EINTR = 0x4,
- EFAULT = 0xe,
+ EFAULT = 0xe,
PROT_NONE = 0x0,
PROT_READ = 0x1,
@@ -21,8 +21,10 @@ enum {
SA_RESTART = 0x2,
SA_ONSTACK = 0x1,
- UMTX_OP_WAIT_UINT = 0xb,
- UMTX_OP_WAKE = 0x3,
+ UMTX_OP_WAIT_UINT = 0xb,
+ UMTX_OP_WAIT_UINT_PRIVATE = 0xf,
+ UMTX_OP_WAKE = 0x3,
+ UMTX_OP_WAKE_PRIVATE = 0x10,
SIGHUP = 0x1,
SIGINT = 0x2,
@@ -76,13 +78,13 @@ enum {
ITIMER_VIRTUAL = 0x1,
ITIMER_PROF = 0x2,
- EV_ADD = 0x1,
- EV_DELETE = 0x2,
- EV_CLEAR = 0x20,
- EV_RECEIPT = 0x40,
- EV_ERROR = 0x4000,
- EVFILT_READ = -0x1,
- EVFILT_WRITE = -0x2,
+ EV_ADD = 0x1,
+ EV_DELETE = 0x2,
+ EV_CLEAR = 0x20,
+ EV_RECEIPT = 0x40,
+ EV_ERROR = 0x4000,
+ EVFILT_READ = -0x1,
+ EVFILT_WRITE = -0x2,
};
typedef struct Rtprio Rtprio;
@@ -159,10 +161,12 @@ struct Ucontext {
struct Timespec {
int64 tv_sec;
int32 tv_nsec;
+ byte Pad_cgo_0[4];
};
struct Timeval {
int64 tv_sec;
int32 tv_usec;
+ byte Pad_cgo_0[4];
};
struct Itimerval {
Timeval it_interval;
@@ -170,12 +174,12 @@ struct Itimerval {
};
struct Kevent {
- uint32 ident;
- int16 filter;
- uint16 flags;
- uint32 fflags;
- int32 data;
- byte *udata;
+ uint32 ident;
+ int16 filter;
+ uint16 flags;
+ uint32 fflags;
+ int32 data;
+ byte *udata;
};
diff --git a/src/pkg/runtime/defs_nacl_386.h b/src/pkg/runtime/defs_nacl_386.h
new file mode 100644
index 000000000..e8fbb38e1
--- /dev/null
+++ b/src/pkg/runtime/defs_nacl_386.h
@@ -0,0 +1,63 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Created by hand, not machine generated.
+
+enum
+{
+ // These values are referred to in the source code
+ // but really don't matter. Even so, use the standard numbers.
+ SIGSEGV = 11,
+ SIGPROF = 27,
+};
+
+typedef struct Siginfo Siginfo;
+
+// native_client/src/trusted/service_runtime/include/machine/_types.h
+typedef struct Timespec Timespec;
+
+struct Timespec
+{
+ int64 tv_sec;
+ int32 tv_nsec;
+};
+
+// native_client/src/trusted/service_runtime/nacl_exception.h
+// native_client/src/include/nacl/nacl_exception.h
+
+typedef struct ExcContext ExcContext;
+typedef struct ExcPortable ExcPortable;
+typedef struct ExcRegs386 ExcRegs386;
+
+struct ExcRegs386
+{
+ uint32 eax;
+ uint32 ecx;
+ uint32 edx;
+ uint32 ebx;
+ uint32 esp;
+ uint32 ebp;
+ uint32 esi;
+ uint32 edi;
+ uint32 eip;
+ uint32 eflags;
+};
+
+struct ExcContext
+{
+ uint32 size;
+ uint32 portable_context_offset;
+ uint32 portable_context_size;
+ uint32 arch;
+ uint32 regs_size;
+ uint32 reserved[11];
+ ExcRegs386 regs;
+};
+
+struct ExcPortableContext
+{
+ uint32 pc;
+ uint32 sp;
+ uint32 fp;
+};
diff --git a/src/pkg/runtime/defs_nacl_amd64p32.h b/src/pkg/runtime/defs_nacl_amd64p32.h
new file mode 100644
index 000000000..8d3068bf8
--- /dev/null
+++ b/src/pkg/runtime/defs_nacl_amd64p32.h
@@ -0,0 +1,90 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Created by hand, not machine generated.
+
+enum
+{
+ // These values are referred to in the source code
+ // but really don't matter. Even so, use the standard numbers.
+ SIGSEGV = 11,
+ SIGPROF = 27,
+};
+
+typedef struct Siginfo Siginfo;
+
+
+// native_client/src/trusted/service_runtime/include/machine/_types.h
+typedef struct Timespec Timespec;
+
+struct Timespec
+{
+ int64 tv_sec;
+ int32 tv_nsec;
+};
+
+// native_client/src/trusted/service_runtime/nacl_exception.h
+// native_client/src/include/nacl/nacl_exception.h
+
+typedef struct ExcContext ExcContext;
+typedef struct ExcPortable ExcPortable;
+typedef struct ExcRegs386 ExcRegs386;
+typedef struct ExcRegsAmd64 ExcRegsAmd64;
+
+struct ExcRegs386
+{
+ uint32 eax;
+ uint32 ecx;
+ uint32 edx;
+ uint32 ebx;
+ uint32 esp;
+ uint32 ebp;
+ uint32 esi;
+ uint32 edi;
+ uint32 eip;
+ uint32 eflags;
+};
+
+struct ExcRegsAmd64
+{
+ uint64 rax;
+ uint64 rcx;
+ uint64 rdx;
+ uint64 rbx;
+ uint64 rsp;
+ uint64 rbp;
+ uint64 rsi;
+ uint64 rdi;
+ uint64 r8;
+ uint64 r9;
+ uint64 r10;
+ uint64 r11;
+ uint64 r12;
+ uint64 r13;
+ uint64 r14;
+ uint64 r15;
+ uint64 rip;
+ uint32 rflags;
+};
+
+struct ExcContext
+{
+ uint32 size;
+ uint32 portable_context_offset;
+ uint32 portable_context_size;
+ uint32 arch;
+ uint32 regs_size;
+ uint32 reserved[11];
+ union {
+ ExcRegs386 regs;
+ ExcRegsAmd64 regs64;
+ };
+};
+
+struct ExcPortableContext
+{
+ uint32 pc;
+ uint32 sp;
+ uint32 fp;
+};
diff --git a/src/pkg/runtime/defs_openbsd_386.h b/src/pkg/runtime/defs_openbsd_386.h
index a5b7f04b5..b8f993e2b 100644
--- a/src/pkg/runtime/defs_openbsd_386.h
+++ b/src/pkg/runtime/defs_openbsd_386.h
@@ -121,7 +121,7 @@ struct Sigcontext {
int32 sc_eflags;
int32 sc_esp;
int32 sc_ss;
- int32 sc_onstack;
+ int32 __sc_unused;
int32 sc_mask;
int32 sc_trapno;
int32 sc_err;
@@ -143,11 +143,11 @@ struct StackT {
};
struct Timespec {
- int32 tv_sec;
+ int64 tv_sec;
int32 tv_nsec;
};
struct Timeval {
- int32 tv_sec;
+ int64 tv_sec;
int32 tv_usec;
};
struct Itimerval {
@@ -160,7 +160,7 @@ struct Kevent {
int16 filter;
uint16 flags;
uint32 fflags;
- int32 data;
+ int64 data;
byte *udata;
};
diff --git a/src/pkg/runtime/defs_openbsd_amd64.h b/src/pkg/runtime/defs_openbsd_amd64.h
index eb47ec892..a1ae2ef65 100644
--- a/src/pkg/runtime/defs_openbsd_amd64.h
+++ b/src/pkg/runtime/defs_openbsd_amd64.h
@@ -133,7 +133,7 @@ struct Sigcontext {
int64 sc_rsp;
int64 sc_ss;
void *sc_fpstate;
- int32 sc_onstack;
+ int32 __sc_unused;
int32 sc_mask;
};
struct Siginfo {
@@ -154,8 +154,7 @@ struct StackT {
};
struct Timespec {
- int32 tv_sec;
- byte Pad_cgo_0[4];
+ int64 tv_sec;
int64 tv_nsec;
};
struct Timeval {
@@ -168,11 +167,11 @@ struct Itimerval {
};
struct Kevent {
- uint32 ident;
+ uint64 ident;
int16 filter;
uint16 flags;
uint32 fflags;
- int32 data;
+ int64 data;
byte *udata;
};
diff --git a/src/pkg/runtime/defs_solaris.go b/src/pkg/runtime/defs_solaris.go
new file mode 100644
index 000000000..8dcbb08b7
--- /dev/null
+++ b/src/pkg/runtime/defs_solaris.go
@@ -0,0 +1,156 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+/*
+Input to cgo.
+
+GOARCH=amd64 go tool cgo -cdefs defs_solaris.go >defs_solaris_amd64.h
+*/
+
+package runtime
+
+/*
+#include <sys/types.h>
+#include <sys/mman.h>
+#include <sys/select.h>
+#include <sys/siginfo.h>
+#include <sys/signal.h>
+#include <sys/stat.h>
+#include <sys/time.h>
+#include <sys/ucontext.h>
+#include <sys/regset.h>
+#include <sys/unistd.h>
+#include <sys/fork.h>
+#include <sys/port.h>
+#include <semaphore.h>
+#include <errno.h>
+#include <signal.h>
+#include <pthread.h>
+#include <netdb.h>
+*/
+import "C"
+
+const (
+ EINTR = C.EINTR
+ EBADF = C.EBADF
+ EFAULT = C.EFAULT
+ EAGAIN = C.EAGAIN
+ ETIMEDOUT = C.ETIMEDOUT
+ EWOULDBLOCK = C.EWOULDBLOCK
+ EINPROGRESS = C.EINPROGRESS
+
+ PROT_NONE = C.PROT_NONE
+ PROT_READ = C.PROT_READ
+ PROT_WRITE = C.PROT_WRITE
+ PROT_EXEC = C.PROT_EXEC
+
+ MAP_ANON = C.MAP_ANON
+ MAP_PRIVATE = C.MAP_PRIVATE
+ MAP_FIXED = C.MAP_FIXED
+
+ MADV_FREE = C.MADV_FREE
+
+ SA_SIGINFO = C.SA_SIGINFO
+ SA_RESTART = C.SA_RESTART
+ SA_ONSTACK = C.SA_ONSTACK
+
+ SIGHUP = C.SIGHUP
+ SIGINT = C.SIGINT
+ SIGQUIT = C.SIGQUIT
+ SIGILL = C.SIGILL
+ SIGTRAP = C.SIGTRAP
+ SIGABRT = C.SIGABRT
+ SIGEMT = C.SIGEMT
+ SIGFPE = C.SIGFPE
+ SIGKILL = C.SIGKILL
+ SIGBUS = C.SIGBUS
+ SIGSEGV = C.SIGSEGV
+ SIGSYS = C.SIGSYS
+ SIGPIPE = C.SIGPIPE
+ SIGALRM = C.SIGALRM
+ SIGTERM = C.SIGTERM
+ SIGURG = C.SIGURG
+ SIGSTOP = C.SIGSTOP
+ SIGTSTP = C.SIGTSTP
+ SIGCONT = C.SIGCONT
+ SIGCHLD = C.SIGCHLD
+ SIGTTIN = C.SIGTTIN
+ SIGTTOU = C.SIGTTOU
+ SIGIO = C.SIGIO
+ SIGXCPU = C.SIGXCPU
+ SIGXFSZ = C.SIGXFSZ
+ SIGVTALRM = C.SIGVTALRM
+ SIGPROF = C.SIGPROF
+ SIGWINCH = C.SIGWINCH
+ SIGUSR1 = C.SIGUSR1
+ SIGUSR2 = C.SIGUSR2
+
+ FPE_INTDIV = C.FPE_INTDIV
+ FPE_INTOVF = C.FPE_INTOVF
+ FPE_FLTDIV = C.FPE_FLTDIV
+ FPE_FLTOVF = C.FPE_FLTOVF
+ FPE_FLTUND = C.FPE_FLTUND
+ FPE_FLTRES = C.FPE_FLTRES
+ FPE_FLTINV = C.FPE_FLTINV
+ FPE_FLTSUB = C.FPE_FLTSUB
+
+ BUS_ADRALN = C.BUS_ADRALN
+ BUS_ADRERR = C.BUS_ADRERR
+ BUS_OBJERR = C.BUS_OBJERR
+
+ SEGV_MAPERR = C.SEGV_MAPERR
+ SEGV_ACCERR = C.SEGV_ACCERR
+
+ ITIMER_REAL = C.ITIMER_REAL
+ ITIMER_VIRTUAL = C.ITIMER_VIRTUAL
+ ITIMER_PROF = C.ITIMER_PROF
+
+ _SC_NPROCESSORS_ONLN = C._SC_NPROCESSORS_ONLN
+
+ PTHREAD_CREATE_DETACHED = C.PTHREAD_CREATE_DETACHED
+
+ FORK_NOSIGCHLD = C.FORK_NOSIGCHLD
+ FORK_WAITPID = C.FORK_WAITPID
+
+ MAXHOSTNAMELEN = C.MAXHOSTNAMELEN
+
+ O_NONBLOCK = C.O_NONBLOCK
+ FD_CLOEXEC = C.FD_CLOEXEC
+ F_GETFL = C.F_GETFL
+ F_SETFL = C.F_SETFL
+ F_SETFD = C.F_SETFD
+
+ POLLIN = C.POLLIN
+ POLLOUT = C.POLLOUT
+ POLLHUP = C.POLLHUP
+ POLLERR = C.POLLERR
+
+ PORT_SOURCE_FD = C.PORT_SOURCE_FD
+)
+
+type SemT C.sem_t
+
+type Sigaltstack C.struct_sigaltstack
+type Sigset C.sigset_t
+type StackT C.stack_t
+
+type Siginfo C.siginfo_t
+type Sigaction C.struct_sigaction
+
+type Fpregset C.fpregset_t
+type Mcontext C.mcontext_t
+type Ucontext C.ucontext_t
+
+type Timespec C.struct_timespec
+type Timeval C.struct_timeval
+type Itimerval C.struct_itimerval
+
+type PortEvent C.port_event_t
+type Pthread C.pthread_t
+type PthreadAttr C.pthread_attr_t
+
+// depends on Timespec, must appear below
+type Stat C.struct_stat
diff --git a/src/pkg/runtime/defs_solaris_amd64.go b/src/pkg/runtime/defs_solaris_amd64.go
new file mode 100644
index 000000000..049317888
--- /dev/null
+++ b/src/pkg/runtime/defs_solaris_amd64.go
@@ -0,0 +1,48 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+/*
+Input to cgo.
+
+GOARCH=amd64 go tool cgo -cdefs defs_solaris.go defs_solaris_amd64.go >defs_solaris_amd64.h
+*/
+
+package runtime
+
+/*
+#include <sys/types.h>
+#include <sys/regset.h>
+*/
+import "C"
+
+const (
+ REG_RDI = C.REG_RDI
+ REG_RSI = C.REG_RSI
+ REG_RDX = C.REG_RDX
+ REG_RCX = C.REG_RCX
+ REG_R8 = C.REG_R8
+ REG_R9 = C.REG_R9
+ REG_R10 = C.REG_R10
+ REG_R11 = C.REG_R11
+ REG_R12 = C.REG_R12
+ REG_R13 = C.REG_R13
+ REG_R14 = C.REG_R14
+ REG_R15 = C.REG_R15
+ REG_RBP = C.REG_RBP
+ REG_RBX = C.REG_RBX
+ REG_RAX = C.REG_RAX
+ REG_GS = C.REG_GS
+ REG_FS = C.REG_FS
+ REG_ES = C.REG_ES
+ REG_DS = C.REG_DS
+ REG_TRAPNO = C.REG_TRAPNO
+ REG_ERR = C.REG_ERR
+ REG_RIP = C.REG_RIP
+ REG_CS = C.REG_CS
+ REG_RFLAGS = C.REG_RFL
+ REG_RSP = C.REG_RSP
+ REG_SS = C.REG_SS
+)
diff --git a/src/pkg/runtime/defs_solaris_amd64.h b/src/pkg/runtime/defs_solaris_amd64.h
new file mode 100644
index 000000000..799724fad
--- /dev/null
+++ b/src/pkg/runtime/defs_solaris_amd64.h
@@ -0,0 +1,254 @@
+// Created by cgo -cdefs - DO NOT EDIT
+// cgo -cdefs defs_solaris.go defs_solaris_amd64.go
+
+
+enum {
+ EINTR = 0x4,
+ EBADF = 0x9,
+ EFAULT = 0xe,
+ EAGAIN = 0xb,
+ ETIMEDOUT = 0x91,
+ EWOULDBLOCK = 0xb,
+ EINPROGRESS = 0x96,
+
+ PROT_NONE = 0x0,
+ PROT_READ = 0x1,
+ PROT_WRITE = 0x2,
+ PROT_EXEC = 0x4,
+
+ MAP_ANON = 0x100,
+ MAP_PRIVATE = 0x2,
+ MAP_FIXED = 0x10,
+
+ MADV_FREE = 0x5,
+
+ SA_SIGINFO = 0x8,
+ SA_RESTART = 0x4,
+ SA_ONSTACK = 0x1,
+
+ SIGHUP = 0x1,
+ SIGINT = 0x2,
+ SIGQUIT = 0x3,
+ SIGILL = 0x4,
+ SIGTRAP = 0x5,
+ SIGABRT = 0x6,
+ SIGEMT = 0x7,
+ SIGFPE = 0x8,
+ SIGKILL = 0x9,
+ SIGBUS = 0xa,
+ SIGSEGV = 0xb,
+ SIGSYS = 0xc,
+ SIGPIPE = 0xd,
+ SIGALRM = 0xe,
+ SIGTERM = 0xf,
+ SIGURG = 0x15,
+ SIGSTOP = 0x17,
+ SIGTSTP = 0x18,
+ SIGCONT = 0x19,
+ SIGCHLD = 0x12,
+ SIGTTIN = 0x1a,
+ SIGTTOU = 0x1b,
+ SIGIO = 0x16,
+ SIGXCPU = 0x1e,
+ SIGXFSZ = 0x1f,
+ SIGVTALRM = 0x1c,
+ SIGPROF = 0x1d,
+ SIGWINCH = 0x14,
+ SIGUSR1 = 0x10,
+ SIGUSR2 = 0x11,
+
+ FPE_INTDIV = 0x1,
+ FPE_INTOVF = 0x2,
+ FPE_FLTDIV = 0x3,
+ FPE_FLTOVF = 0x4,
+ FPE_FLTUND = 0x5,
+ FPE_FLTRES = 0x6,
+ FPE_FLTINV = 0x7,
+ FPE_FLTSUB = 0x8,
+
+ BUS_ADRALN = 0x1,
+ BUS_ADRERR = 0x2,
+ BUS_OBJERR = 0x3,
+
+ SEGV_MAPERR = 0x1,
+ SEGV_ACCERR = 0x2,
+
+ ITIMER_REAL = 0x0,
+ ITIMER_VIRTUAL = 0x1,
+ ITIMER_PROF = 0x2,
+
+ _SC_NPROCESSORS_ONLN = 0xf,
+
+ PTHREAD_CREATE_DETACHED = 0x40,
+
+ FORK_NOSIGCHLD = 0x1,
+ FORK_WAITPID = 0x2,
+
+ MAXHOSTNAMELEN = 0x100,
+
+ O_NONBLOCK = 0x80,
+ FD_CLOEXEC = 0x1,
+ F_GETFL = 0x3,
+ F_SETFL = 0x4,
+ F_SETFD = 0x2,
+
+ POLLIN = 0x1,
+ POLLOUT = 0x4,
+ POLLHUP = 0x10,
+ POLLERR = 0x8,
+
+ PORT_SOURCE_FD = 0x4,
+};
+
+typedef struct SemT SemT;
+typedef struct Sigaltstack Sigaltstack;
+typedef struct Sigset Sigset;
+typedef struct StackT StackT;
+typedef struct Siginfo Siginfo;
+typedef struct Sigaction Sigaction;
+typedef struct Fpregset Fpregset;
+typedef struct Mcontext Mcontext;
+typedef struct Ucontext Ucontext;
+typedef struct Timespec Timespec;
+typedef struct Timeval Timeval;
+typedef struct Itimerval Itimerval;
+typedef struct PortEvent PortEvent;
+typedef struct PthreadAttr PthreadAttr;
+typedef struct Stat Stat;
+
+#pragma pack on
+
+struct SemT {
+ uint32 sem_count;
+ uint16 sem_type;
+ uint16 sem_magic;
+ uint64 sem_pad1[3];
+ uint64 sem_pad2[2];
+};
+
+struct Sigaltstack {
+ byte *ss_sp;
+ uint64 ss_size;
+ int32 ss_flags;
+ byte Pad_cgo_0[4];
+};
+struct Sigset {
+ uint32 __sigbits[4];
+};
+struct StackT {
+ byte *ss_sp;
+ uint64 ss_size;
+ int32 ss_flags;
+ byte Pad_cgo_0[4];
+};
+
+struct Siginfo {
+ int32 si_signo;
+ int32 si_code;
+ int32 si_errno;
+ int32 si_pad;
+ byte __data[240];
+};
+struct Sigaction {
+ int32 sa_flags;
+ byte Pad_cgo_0[4];
+ byte _funcptr[8];
+ Sigset sa_mask;
+};
+
+struct Fpregset {
+ byte fp_reg_set[528];
+};
+struct Mcontext {
+ int64 gregs[28];
+ Fpregset fpregs;
+};
+struct Ucontext {
+ uint64 uc_flags;
+ Ucontext *uc_link;
+ Sigset uc_sigmask;
+ StackT uc_stack;
+ byte Pad_cgo_0[8];
+ Mcontext uc_mcontext;
+ int64 uc_filler[5];
+ byte Pad_cgo_1[8];
+};
+
+struct Timespec {
+ int64 tv_sec;
+ int64 tv_nsec;
+};
+struct Timeval {
+ int64 tv_sec;
+ int64 tv_usec;
+};
+struct Itimerval {
+ Timeval it_interval;
+ Timeval it_value;
+};
+
+struct PortEvent {
+ int32 portev_events;
+ uint16 portev_source;
+ uint16 portev_pad;
+ uint64 portev_object;
+ byte *portev_user;
+};
+typedef uint32 Pthread;
+struct PthreadAttr {
+ byte *__pthread_attrp;
+};
+
+struct Stat {
+ uint64 st_dev;
+ uint64 st_ino;
+ uint32 st_mode;
+ uint32 st_nlink;
+ uint32 st_uid;
+ uint32 st_gid;
+ uint64 st_rdev;
+ int64 st_size;
+ Timespec st_atim;
+ Timespec st_mtim;
+ Timespec st_ctim;
+ int32 st_blksize;
+ byte Pad_cgo_0[4];
+ int64 st_blocks;
+ int8 st_fstype[16];
+};
+
+
+#pragma pack off
+// Created by cgo -cdefs - DO NOT EDIT
+// cgo -cdefs defs_solaris.go defs_solaris_amd64.go
+
+
+enum {
+ REG_RDI = 0x8,
+ REG_RSI = 0x9,
+ REG_RDX = 0xc,
+ REG_RCX = 0xd,
+ REG_R8 = 0x7,
+ REG_R9 = 0x6,
+ REG_R10 = 0x5,
+ REG_R11 = 0x4,
+ REG_R12 = 0x3,
+ REG_R13 = 0x2,
+ REG_R14 = 0x1,
+ REG_R15 = 0x0,
+ REG_RBP = 0xa,
+ REG_RBX = 0xb,
+ REG_RAX = 0xe,
+ REG_GS = 0x17,
+ REG_FS = 0x16,
+ REG_ES = 0x18,
+ REG_DS = 0x19,
+ REG_TRAPNO = 0xf,
+ REG_ERR = 0x10,
+ REG_RIP = 0x11,
+ REG_CS = 0x12,
+ REG_RFLAGS = 0x13,
+ REG_RSP = 0x14,
+ REG_SS = 0x15,
+};
+
diff --git a/src/pkg/runtime/env_plan9.c b/src/pkg/runtime/env_plan9.c
index 599319c75..f732c9f29 100644
--- a/src/pkg/runtime/env_plan9.c
+++ b/src/pkg/runtime/env_plan9.c
@@ -12,6 +12,7 @@ runtime·getenv(int8 *s)
intgo len;
byte file[128];
byte *p;
+ static byte b[128];
len = runtime·findnull((byte*)s);
if(len > sizeof file-6)
@@ -25,7 +26,14 @@ runtime·getenv(int8 *s)
if(fd < 0)
return nil;
n = runtime·seek(fd, 0, 2);
- p = runtime·malloc(n+1);
+ if(runtime·strcmp((byte*)s, (byte*)"GOTRACEBACK") == 0){
+ // should not call malloc
+ if(n >= sizeof b)
+ return nil;
+ runtime·memclr(b, sizeof b);
+ p = b;
+ }else
+ p = runtime·malloc(n+1);
r = runtime·pread(fd, p, n, 0);
runtime·close(fd);
if(r < 0)
diff --git a/src/pkg/runtime/env_posix.c b/src/pkg/runtime/env_posix.c
index 00ce577d0..4c8288f6b 100644
--- a/src/pkg/runtime/env_posix.c
+++ b/src/pkg/runtime/env_posix.c
@@ -2,9 +2,11 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build darwin dragonfly freebsd linux netbsd openbsd windows
+// +build darwin dragonfly freebsd linux nacl netbsd openbsd solaris windows
#include "runtime.h"
+#include "arch_GOARCH.h"
+#include "malloc.h"
Slice syscall·envs;
@@ -44,15 +46,24 @@ void
syscall·setenv_c(String k, String v)
{
byte *arg[2];
+ uintptr len;
if(_cgo_setenv == nil)
return;
- arg[0] = runtime·malloc(k.len + 1);
+ // Objects that are explicitly freed must be at least 16 bytes in size,
+ // so that they are not allocated using tiny alloc.
+ len = k.len + 1;
+ if(len < TinySize)
+ len = TinySize;
+ arg[0] = runtime·malloc(len);
runtime·memmove(arg[0], k.str, k.len);
arg[0][k.len] = 0;
- arg[1] = runtime·malloc(v.len + 1);
+ len = v.len + 1;
+ if(len < TinySize)
+ len = TinySize;
+ arg[1] = runtime·malloc(len);
runtime·memmove(arg[1], v.str, v.len);
arg[1][v.len] = 0;
diff --git a/src/pkg/runtime/error.go b/src/pkg/runtime/error.go
index bd7090883..e704ff872 100644
--- a/src/pkg/runtime/error.go
+++ b/src/pkg/runtime/error.go
@@ -75,19 +75,20 @@ func newErrorString(s string, ret *interface{}) {
}
// An errorCString represents a runtime error described by a single C string.
-type errorCString uintptr
+// Not "type errorCString uintptr" because of http://golang.org/issue/7084.
+type errorCString struct{ cstr uintptr }
func (e errorCString) RuntimeError() {}
func cstringToGo(uintptr) string
func (e errorCString) Error() string {
- return "runtime error: " + cstringToGo(uintptr(e))
+ return "runtime error: " + cstringToGo(e.cstr)
}
// For calling from C.
func newErrorCString(s uintptr, ret *interface{}) {
- *ret = errorCString(s)
+ *ret = errorCString{s}
}
type stringer interface {
diff --git a/src/pkg/runtime/export_test.go b/src/pkg/runtime/export_test.go
index d170fa72a..7a31b63b3 100644
--- a/src/pkg/runtime/export_test.go
+++ b/src/pkg/runtime/export_test.go
@@ -31,11 +31,11 @@ type LFNode struct {
Pushcnt uintptr
}
-func lfstackpush(head *uint64, node *LFNode)
-func lfstackpop2(head *uint64) *LFNode
+func lfstackpush_go(head *uint64, node *LFNode)
+func lfstackpop_go(head *uint64) *LFNode
-var LFStackPush = lfstackpush
-var LFStackPop = lfstackpop2
+var LFStackPush = lfstackpush_go
+var LFStackPop = lfstackpop_go
type ParFor struct {
body *byte
@@ -48,17 +48,17 @@ type ParFor struct {
wait bool
}
-func parforalloc2(nthrmax uint32) *ParFor
-func parforsetup2(desc *ParFor, nthr, n uint32, ctx *byte, wait bool, body func(*ParFor, uint32))
-func parfordo(desc *ParFor)
-func parforiters(desc *ParFor, tid uintptr) (uintptr, uintptr)
+func newParFor(nthrmax uint32) *ParFor
+func parForSetup(desc *ParFor, nthr, n uint32, ctx *byte, wait bool, body func(*ParFor, uint32))
+func parForDo(desc *ParFor)
+func parForIters(desc *ParFor, tid uintptr) (uintptr, uintptr)
-var NewParFor = parforalloc2
-var ParForSetup = parforsetup2
-var ParForDo = parfordo
+var NewParFor = newParFor
+var ParForSetup = parForSetup
+var ParForDo = parForDo
func ParForIters(desc *ParFor, tid uint32) (uint32, uint32) {
- begin, end := parforiters(desc, uintptr(tid))
+ begin, end := parForIters(desc, uintptr(tid))
return uint32(begin), uint32(end)
}
@@ -80,7 +80,13 @@ var BytesHash = bytesHash
var Int32Hash = int32Hash
var Int64Hash = int64Hash
-func GogoBytes() int32
-
var hashLoad float64 // declared in hashmap.c
var HashLoad = &hashLoad
+
+func memclrBytes(b []byte)
+
+var MemclrBytes = memclrBytes
+
+func gogoBytes() int32
+
+var GogoBytes = gogoBytes
diff --git a/src/pkg/runtime/extern.go b/src/pkg/runtime/extern.go
index 527e9cdf8..053dc1014 100644
--- a/src/pkg/runtime/extern.go
+++ b/src/pkg/runtime/extern.go
@@ -24,18 +24,28 @@ percentage at run time. See http://golang.org/pkg/runtime/debug/#SetGCPercent.
The GODEBUG variable controls debug output from the runtime. GODEBUG value is
a comma-separated list of name=val pairs. Supported names are:
+ allocfreetrace: setting allocfreetrace=1 causes every allocation to be
+ profiled and a stack trace printed on each object's allocation and free.
+
+ efence: setting efence=1 causes the allocator to run in a mode
+ where each object is allocated on a unique page and addresses are
+ never recycled.
+
gctrace: setting gctrace=1 causes the garbage collector to emit a single line to standard
error at each collection, summarizing the amount of memory collected and the
length of the pause. Setting gctrace=2 emits the same summary but also
repeats each collection.
- schedtrace: setting schedtrace=X causes the scheduler to emit a single line to standard
- error every X milliseconds, summarizing the scheduler state.
+ gcdead: setting gcdead=1 causes the garbage collector to clobber all stack slots
+ that it thinks are dead.
scheddetail: setting schedtrace=X and scheddetail=1 causes the scheduler to emit
detailed multiline info every X milliseconds, describing state of the scheduler,
processors, threads and goroutines.
+ schedtrace: setting schedtrace=X causes the scheduler to emit a single line to standard
+ error every X milliseconds, summarizing the scheduler state.
+
The GOMAXPROCS variable limits the number of operating system threads that
can execute user-level Go code simultaneously. There is no limit to the number of threads
that can be blocked in system calls on behalf of Go code; those do not count against
@@ -69,6 +79,11 @@ func Gosched()
// Goexit terminates the goroutine that calls it. No other goroutine is affected.
// Goexit runs all deferred calls before terminating the goroutine.
+//
+// Calling Goexit from the main goroutine terminates that goroutine
+// without func main returning. Since func main has not returned,
+// the program continues execution of other goroutines.
+// If all other goroutines exit, the program crashes.
func Goexit()
// Caller reports file and line number information about function invocations on
@@ -153,6 +168,9 @@ func funcentry_go(*Func) uintptr
// to depend on a finalizer to flush an in-memory I/O buffer such as a
// bufio.Writer, because the buffer would not be flushed at program exit.
//
+// It is not guaranteed that a finalizer will run if the size of *x is
+// zero bytes.
+//
// A single goroutine runs all finalizers for a program, sequentially.
// If a finalizer must run for a long time, it should do so by starting
// a new goroutine.
@@ -172,10 +190,8 @@ func GOROOT() string {
}
// Version returns the Go tree's version string.
-// It is either a sequence number or, when possible,
-// a release tag like "release.2010-03-04".
-// A trailing + indicates that the tree had local modifications
-// at the time of the build.
+// It is either the commit hash and date at the time of the build or,
+// when possible, a release tag like "go1.3".
func Version() string {
return theVersion
}
diff --git a/src/pkg/runtime/funcdata.h b/src/pkg/runtime/funcdata.h
index 166263ef9..e20b6ae25 100644
--- a/src/pkg/runtime/funcdata.h
+++ b/src/pkg/runtime/funcdata.h
@@ -8,9 +8,11 @@
// as well as the compilers.
#define PCDATA_ArgSize 0 /* argument size at CALL instruction */
+#define PCDATA_StackMapIndex 1
-#define FUNCDATA_GCArgs 0 /* garbage collector blocks */
-#define FUNCDATA_GCLocals 1
+#define FUNCDATA_ArgsPointerMaps 2 /* garbage collector blocks */
+#define FUNCDATA_LocalsPointerMaps 3
+#define FUNCDATA_DeadValueMaps 4
// To be used in assembly.
#define ARGSIZE(n) PCDATA $PCDATA_ArgSize, $n
diff --git a/src/pkg/runtime/futex_test.go b/src/pkg/runtime/futex_test.go
index f4054b7e7..f57fc52b8 100644
--- a/src/pkg/runtime/futex_test.go
+++ b/src/pkg/runtime/futex_test.go
@@ -2,33 +2,76 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// Futex is only available on Dragonfly, FreeBSD and Linux.
-// The race detector emits calls to split stack functions so it breaks the test.
+// Futex is only available on DragonFly BSD, FreeBSD and Linux.
+// The race detector emits calls to split stack functions so it breaks
+// the test.
+
// +build dragonfly freebsd linux
// +build !race
package runtime_test
import (
- . "runtime"
+ "runtime"
"testing"
"time"
)
+type futexsleepTest struct {
+ mtx uint32
+ ns int64
+ msg string
+ ch chan futexsleepTest
+}
+
+var futexsleepTests = []futexsleepTest{
+ beforeY2038: {mtx: 0, ns: 86400 * 1e9, msg: "before the year 2038", ch: make(chan futexsleepTest, 1)},
+ afterY2038: {mtx: 0, ns: (1<<31 + 100) * 1e9, msg: "after the year 2038", ch: make(chan futexsleepTest, 1)},
+}
+
+const (
+ beforeY2038 = iota
+ afterY2038
+)
+
func TestFutexsleep(t *testing.T) {
- ch := make(chan bool, 1)
- var dummy uint32
+ if runtime.GOMAXPROCS(0) > 1 {
+ // futexsleep doesn't handle EINTR or other signals,
+ // so spurious wakeups may happen.
+ t.Skip("skipping; GOMAXPROCS>1")
+ }
+
start := time.Now()
- go func() {
- Entersyscall()
- Futexsleep(&dummy, 0, (1<<31+100)*1e9)
- Exitsyscall()
- ch <- true
- }()
- select {
- case <-ch:
- t.Errorf("futexsleep finished early after %s!", time.Since(start))
- case <-time.After(time.Second):
- Futexwakeup(&dummy, 1)
+ for _, tt := range futexsleepTests {
+ go func(tt futexsleepTest) {
+ runtime.Entersyscall()
+ runtime.Futexsleep(&tt.mtx, tt.mtx, tt.ns)
+ runtime.Exitsyscall()
+ tt.ch <- tt
+ }(tt)
+ }
+loop:
+ for {
+ select {
+ case tt := <-futexsleepTests[beforeY2038].ch:
+ t.Errorf("futexsleep test %q finished early after %s", tt.msg, time.Since(start))
+ break loop
+ case tt := <-futexsleepTests[afterY2038].ch:
+ // Looks like FreeBSD 10 kernel has changed
+ // the semantics of timedwait on userspace
+ // mutex to make broken stuff look broken.
+ switch {
+ case runtime.GOOS == "freebsd" && runtime.GOARCH == "386":
+ t.Log("freebsd/386 may not work correctly after the year 2038, see golang.org/issue/7194")
+ default:
+ t.Errorf("futexsleep test %q finished early after %s", tt.msg, time.Since(start))
+ break loop
+ }
+ case <-time.After(time.Second):
+ break loop
+ }
+ }
+ for _, tt := range futexsleepTests {
+ runtime.Futexwakeup(&tt.mtx, 1)
}
}
diff --git a/src/pkg/runtime/gc_test.go b/src/pkg/runtime/gc_test.go
index dbd68c1c7..58717ecf7 100644
--- a/src/pkg/runtime/gc_test.go
+++ b/src/pkg/runtime/gc_test.go
@@ -9,6 +9,7 @@ import (
"runtime"
"runtime/debug"
"testing"
+ "time"
)
func TestGcSys(t *testing.T) {
@@ -151,3 +152,85 @@ func TestGcRescan(t *testing.T) {
}
}
}
+
+func TestGcLastTime(t *testing.T) {
+ ms := new(runtime.MemStats)
+ t0 := time.Now().UnixNano()
+ runtime.GC()
+ t1 := time.Now().UnixNano()
+ runtime.ReadMemStats(ms)
+ last := int64(ms.LastGC)
+ if t0 > last || last > t1 {
+ t.Fatalf("bad last GC time: got %v, want [%v, %v]", last, t0, t1)
+ }
+}
+
+func BenchmarkSetTypeNoPtr1(b *testing.B) {
+ type NoPtr1 struct {
+ p uintptr
+ }
+ var p *NoPtr1
+ for i := 0; i < b.N; i++ {
+ p = &NoPtr1{}
+ }
+ _ = p
+}
+func BenchmarkSetTypeNoPtr2(b *testing.B) {
+ type NoPtr2 struct {
+ p, q uintptr
+ }
+ var p *NoPtr2
+ for i := 0; i < b.N; i++ {
+ p = &NoPtr2{}
+ }
+ _ = p
+}
+func BenchmarkSetTypePtr1(b *testing.B) {
+ type Ptr1 struct {
+ p *byte
+ }
+ var p *Ptr1
+ for i := 0; i < b.N; i++ {
+ p = &Ptr1{}
+ }
+ _ = p
+}
+func BenchmarkSetTypePtr2(b *testing.B) {
+ type Ptr2 struct {
+ p, q *byte
+ }
+ var p *Ptr2
+ for i := 0; i < b.N; i++ {
+ p = &Ptr2{}
+ }
+ _ = p
+}
+
+func BenchmarkAllocation(b *testing.B) {
+ type T struct {
+ x, y *byte
+ }
+ ngo := runtime.GOMAXPROCS(0)
+ work := make(chan bool, b.N+ngo)
+ result := make(chan *T)
+ for i := 0; i < b.N; i++ {
+ work <- true
+ }
+ for i := 0; i < ngo; i++ {
+ work <- false
+ }
+ for i := 0; i < ngo; i++ {
+ go func() {
+ var x *T
+ for <-work {
+ for i := 0; i < 1000; i++ {
+ x = &T{}
+ }
+ }
+ result <- x
+ }()
+ }
+ for i := 0; i < ngo; i++ {
+ <-result
+ }
+}
diff --git a/src/pkg/runtime/hash_test.go b/src/pkg/runtime/hash_test.go
index 312c4be8e..1c11e0538 100644
--- a/src/pkg/runtime/hash_test.go
+++ b/src/pkg/runtime/hash_test.go
@@ -397,10 +397,10 @@ func avalancheTest1(t *testing.T, k Key) {
// all sums inside those bounds with 99% probability.
N := n * hashSize
var c float64
- // find c such that Prob(mean-c*stddev < x < mean+c*stddev)^N > .99
- for c = 0.0; math.Pow(math.Erf(c/math.Sqrt(2)), float64(N)) < .99; c += .1 {
+ // find c such that Prob(mean-c*stddev < x < mean+c*stddev)^N > .9999
+ for c = 0.0; math.Pow(math.Erf(c/math.Sqrt(2)), float64(N)) < .9999; c += .1 {
}
- c *= 2.0 // allowed slack - we don't need to be perfectly random
+ c *= 4.0 // allowed slack - we don't need to be perfectly random
mean := .5 * REP
stddev := .5 * math.Sqrt(REP)
low := int(mean - c*stddev)
diff --git a/src/pkg/runtime/hashmap.c b/src/pkg/runtime/hashmap.goc
index 6d2ab2168..3327bed65 100644
--- a/src/pkg/runtime/hashmap.c
+++ b/src/pkg/runtime/hashmap.goc
@@ -2,125 +2,16 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+package runtime
#include "runtime.h"
#include "arch_GOARCH.h"
#include "malloc.h"
#include "type.h"
#include "race.h"
+#include "hashmap.h"
+#include "typekind.h"
#include "../../cmd/ld/textflag.h"
-// This file contains the implementation of Go's map type.
-//
-// The map is just a hash table. The data is arranged
-// into an array of buckets. Each bucket contains up to
-// 8 key/value pairs. The low-order bits of the hash are
-// used to select a bucket. Each bucket contains a few
-// high-order bits of each hash to distinguish the entries
-// within a single bucket.
-//
-// If more than 8 keys hash to a bucket, we chain on
-// extra buckets.
-//
-// When the hashtable grows, we allocate a new array
-// of buckets twice as big. Buckets are incrementally
-// copied from the old bucket array to the new bucket array.
-//
-// Map iterators walk through the array of buckets and
-// return the keys in walk order (bucket #, then overflow
-// chain order, then bucket index). To maintain iteration
-// semantics, we never move keys within their bucket (if
-// we did, keys might be returned 0 or 2 times). When
-// growing the table, iterators remain iterating through the
-// old table and must check the new table if the bucket
-// they are iterating through has been moved ("evacuated")
-// to the new table.
-
-// Maximum number of key/value pairs a bucket can hold.
-#define BUCKETSIZE 8
-
-// Maximum average load of a bucket that triggers growth.
-#define LOAD 6.5
-
-// Picking LOAD: too large and we have lots of overflow
-// buckets, too small and we waste a lot of space. I wrote
-// a simple program to check some stats for different loads:
-// (64-bit, 8 byte keys and values)
-// LOAD %overflow bytes/entry hitprobe missprobe
-// 4.00 2.13 20.77 3.00 4.00
-// 4.50 4.05 17.30 3.25 4.50
-// 5.00 6.85 14.77 3.50 5.00
-// 5.50 10.55 12.94 3.75 5.50
-// 6.00 15.27 11.67 4.00 6.00
-// 6.50 20.90 10.79 4.25 6.50
-// 7.00 27.14 10.15 4.50 7.00
-// 7.50 34.03 9.73 4.75 7.50
-// 8.00 41.10 9.40 5.00 8.00
-//
-// %overflow = percentage of buckets which have an overflow bucket
-// bytes/entry = overhead bytes used per key/value pair
-// hitprobe = # of entries to check when looking up a present key
-// missprobe = # of entries to check when looking up an absent key
-//
-// Keep in mind this data is for maximally loaded tables, i.e. just
-// before the table grows. Typical tables will be somewhat less loaded.
-
-// Maximum key or value size to keep inline (instead of mallocing per element).
-// Must fit in a uint8.
-// Fast versions cannot handle big values - the cutoff size for
-// fast versions in ../../cmd/gc/walk.c must be at most this value.
-#define MAXKEYSIZE 128
-#define MAXVALUESIZE 128
-
-typedef struct Bucket Bucket;
-struct Bucket
-{
- // Note: the format of the Bucket is encoded in ../../cmd/gc/reflect.c and
- // ../reflect/type.go. Don't change this structure without also changing that code!
- uint8 tophash[BUCKETSIZE]; // top 8 bits of hash of each entry (0 = empty)
- Bucket *overflow; // overflow bucket, if any
- byte data[1]; // BUCKETSIZE keys followed by BUCKETSIZE values
-};
-// NOTE: packing all the keys together and then all the values together makes the
-// code a bit more complicated than alternating key/value/key/value/... but it allows
-// us to eliminate padding which would be needed for, e.g., map[int64]int8.
-
-// Low-order bit of overflow field is used to mark a bucket as already evacuated
-// without destroying the overflow pointer.
-// Only buckets in oldbuckets will be marked as evacuated.
-// Evacuated bit will be set identically on the base bucket and any overflow buckets.
-#define evacuated(b) (((uintptr)(b)->overflow & 1) != 0)
-#define overflowptr(b) ((Bucket*)((uintptr)(b)->overflow & ~(uintptr)1))
-
-struct Hmap
-{
- // Note: the format of the Hmap is encoded in ../../cmd/gc/reflect.c and
- // ../reflect/type.go. Don't change this structure without also changing that code!
- uintgo count; // # live cells == size of map. Must be first (used by len() builtin)
- uint32 flags;
- uint32 hash0; // hash seed
- uint8 B; // log_2 of # of buckets (can hold up to LOAD * 2^B items)
- uint8 keysize; // key size in bytes
- uint8 valuesize; // value size in bytes
- uint16 bucketsize; // bucket size in bytes
-
- byte *buckets; // array of 2^B Buckets. may be nil if count==0.
- byte *oldbuckets; // previous bucket array of half the size, non-nil only when growing
- uintptr nevacuate; // progress counter for evacuation (buckets less than this have been evacuated)
-};
-
-// possible flags
-enum
-{
- IndirectKey = 1, // storing pointers to keys
- IndirectValue = 2, // storing pointers to values
- Iterator = 4, // there may be an iterator using buckets
- OldIterator = 8, // there may be an iterator using oldbuckets
-};
-
-// Macros for dereferencing indirect keys
-#define IK(h, p) (((h)->flags & IndirectKey) != 0 ? *(byte**)(p) : (p))
-#define IV(h, p) (((h)->flags & IndirectValue) != 0 ? *(byte**)(p) : (p))
-
enum
{
docheck = 0, // check invariants before and after every op. Slow!!!
@@ -143,16 +34,12 @@ check(MapType *t, Hmap *h)
// check buckets
for(bucket = 0; bucket < (uintptr)1 << h->B; bucket++) {
- if(h->oldbuckets != nil) {
- oldbucket = bucket & (((uintptr)1 << (h->B - 1)) - 1);
- b = (Bucket*)(h->oldbuckets + oldbucket * h->bucketsize);
- if(!evacuated(b))
- continue; // b is still uninitialized
- }
for(b = (Bucket*)(h->buckets + bucket * h->bucketsize); b != nil; b = b->overflow) {
- for(i = 0, k = b->data, v = k + h->keysize * BUCKETSIZE; i < BUCKETSIZE; i++, k += h->keysize, v += h->valuesize) {
- if(b->tophash[i] == 0)
+ for(i = 0, k = (byte*)b->data, v = k + h->keysize * BUCKETSIZE; i < BUCKETSIZE; i++, k += h->keysize, v += h->valuesize) {
+ if(b->tophash[i] == Empty)
continue;
+ if(b->tophash[i] > Empty && b->tophash[i] < MinTopHash)
+ runtime·throw("evacuated cell in buckets");
cnt++;
t->key->alg->equal(&eq, t->key->size, IK(h, k), IK(h, k));
if(!eq)
@@ -160,8 +47,8 @@ check(MapType *t, Hmap *h)
hash = h->hash0;
t->key->alg->hash(&hash, t->key->size, IK(h, k));
top = hash >> (8*sizeof(uintptr) - 8);
- if(top == 0)
- top = 1;
+ if(top < MinTopHash)
+ top += MinTopHash;
if(top != b->tophash[i])
runtime·throw("bad hash");
}
@@ -172,14 +59,12 @@ check(MapType *t, Hmap *h)
if(h->oldbuckets != nil) {
for(oldbucket = 0; oldbucket < (uintptr)1 << (h->B - 1); oldbucket++) {
b = (Bucket*)(h->oldbuckets + oldbucket * h->bucketsize);
- if(evacuated(b))
- continue;
- if(oldbucket < h->nevacuate)
- runtime·throw("bucket became unevacuated");
- for(; b != nil; b = overflowptr(b)) {
- for(i = 0, k = b->data, v = k + h->keysize * BUCKETSIZE; i < BUCKETSIZE; i++, k += h->keysize, v += h->valuesize) {
- if(b->tophash[i] == 0)
+ for(; b != nil; b = b->overflow) {
+ for(i = 0, k = (byte*)b->data, v = k + h->keysize * BUCKETSIZE; i < BUCKETSIZE; i++, k += h->keysize, v += h->valuesize) {
+ if(b->tophash[i] < MinTopHash)
continue;
+ if(oldbucket < h->nevacuate)
+ runtime·throw("unevacuated entry in an evacuated bucket");
cnt++;
t->key->alg->equal(&eq, t->key->size, IK(h, k), IK(h, k));
if(!eq)
@@ -187,8 +72,8 @@ check(MapType *t, Hmap *h)
hash = h->hash0;
t->key->alg->hash(&hash, t->key->size, IK(h, k));
top = hash >> (8*sizeof(uintptr) - 8);
- if(top == 0)
- top = 1;
+ if(top < MinTopHash)
+ top += MinTopHash;
if(top != b->tophash[i])
runtime·throw("bad hash (old)");
}
@@ -224,6 +109,10 @@ hash_init(MapType *t, Hmap *h, uint32 hint)
valuesize = sizeof(byte*);
}
bucketsize = offsetof(Bucket, data[0]) + (keysize + valuesize) * BUCKETSIZE;
+ if(bucketsize != t->bucket->size) {
+ runtime·printf("runtime: bucketsize=%p but t->bucket->size=%p; t=%S\n", bucketsize, t->bucket->size, *t->string);
+ runtime·throw("bucketsize wrong");
+ }
// invariants we depend on. We should probably check these at compile time
// somewhere, but for now we'll do it here.
@@ -237,9 +126,9 @@ hash_init(MapType *t, Hmap *h, uint32 hint)
runtime·throw("value size not a multiple of value align");
if(BUCKETSIZE < 8)
runtime·throw("bucketsize too small for proper alignment");
- if(sizeof(void*) == 4 && t->key->align > 4)
+ if((offsetof(Bucket, data[0]) & (t->key->align-1)) != 0)
runtime·throw("need padding in bucket (key)");
- if(sizeof(void*) == 4 && t->elem->align > 4)
+ if((offsetof(Bucket, data[0]) & (t->elem->align-1)) != 0)
runtime·throw("need padding in bucket (value)");
// find size parameter which will hold the requested # of elements
@@ -273,13 +162,12 @@ hash_init(MapType *t, Hmap *h, uint32 hint)
}
// Moves entries in oldbuckets[i] to buckets[i] and buckets[i+2^k].
-// We leave the original bucket intact, except for the evacuated marks, so that
-// iterators can still iterate through the old buckets.
+// We leave the original bucket intact, except for marking the topbits
+// entries as evacuated, so that iterators can still iterate through the old buckets.
static void
evacuate(MapType *t, Hmap *h, uintptr oldbucket)
{
Bucket *b;
- Bucket *nextb;
Bucket *x, *y;
Bucket *newx, *newy;
uintptr xi, yi;
@@ -302,15 +190,19 @@ evacuate(MapType *t, Hmap *h, uintptr oldbucket)
y = (Bucket*)(h->buckets + (oldbucket + newbit) * h->bucketsize);
xi = 0;
yi = 0;
- xk = x->data;
- yk = y->data;
+ xk = (byte*)x->data;
+ yk = (byte*)y->data;
xv = xk + h->keysize * BUCKETSIZE;
yv = yk + h->keysize * BUCKETSIZE;
- do {
- for(i = 0, k = b->data, v = k + h->keysize * BUCKETSIZE; i < BUCKETSIZE; i++, k += h->keysize, v += h->valuesize) {
+ for(; b != nil; b = b->overflow) {
+ for(i = 0, k = (byte*)b->data, v = k + h->keysize * BUCKETSIZE; i < BUCKETSIZE; i++, k += h->keysize, v += h->valuesize) {
top = b->tophash[i];
- if(top == 0)
+ if(top == Empty) {
+ b->tophash[i] = EvacuatedEmpty;
continue;
+ }
+ if(top < MinTopHash)
+ runtime·throw("bad state");
// Compute hash to make our evacuation decision (whether we need
// to send this key/value to bucket x or bucket y).
@@ -335,19 +227,20 @@ evacuate(MapType *t, Hmap *h, uintptr oldbucket)
else
hash &= ~newbit;
top = hash >> (8*sizeof(uintptr)-8);
- if(top == 0)
- top = 1;
+ if(top < MinTopHash)
+ top += MinTopHash;
}
}
if((hash & newbit) == 0) {
+ b->tophash[i] = EvacuatedX;
if(xi == BUCKETSIZE) {
if(checkgc) mstats.next_gc = mstats.heap_alloc;
newx = runtime·cnew(t->bucket);
x->overflow = newx;
x = newx;
xi = 0;
- xk = x->data;
+ xk = (byte*)x->data;
xv = xk + h->keysize * BUCKETSIZE;
}
x->tophash[xi] = top;
@@ -365,13 +258,14 @@ evacuate(MapType *t, Hmap *h, uintptr oldbucket)
xk += h->keysize;
xv += h->valuesize;
} else {
+ b->tophash[i] = EvacuatedY;
if(yi == BUCKETSIZE) {
if(checkgc) mstats.next_gc = mstats.heap_alloc;
newy = runtime·cnew(t->bucket);
y->overflow = newy;
y = newy;
yi = 0;
- yk = y->data;
+ yk = (byte*)y->data;
yv = yk + h->keysize * BUCKETSIZE;
}
y->tophash[yi] = top;
@@ -390,26 +284,21 @@ evacuate(MapType *t, Hmap *h, uintptr oldbucket)
yv += h->valuesize;
}
}
+ }
- // mark as evacuated so we don't do it again.
- // this also tells any iterators that this data isn't golden anymore.
- nextb = b->overflow;
- b->overflow = (Bucket*)((uintptr)nextb + 1);
-
- b = nextb;
- } while(b != nil);
-
- // Unlink the overflow buckets to help GC.
- b = (Bucket*)(h->oldbuckets + oldbucket * h->bucketsize);
- if((h->flags & OldIterator) == 0)
- b->overflow = (Bucket*)1;
+ // Unlink the overflow buckets & clear key/value to help GC.
+ if((h->flags & OldIterator) == 0) {
+ b = (Bucket*)(h->oldbuckets + oldbucket * h->bucketsize);
+ b->overflow = nil;
+ runtime·memclr((byte*)b->data, h->bucketsize - offsetof(Bucket, data[0]));
+ }
}
- // advance evacuation mark
+ // Advance evacuation mark
if(oldbucket == h->nevacuate) {
h->nevacuate = oldbucket + 1;
if(oldbucket + 1 == newbit) // newbit == # of oldbuckets
- // free main bucket array
+ // Growing is all done. Free old main bucket array.
h->oldbuckets = nil;
}
if(docheck)
@@ -443,7 +332,6 @@ hash_grow(MapType *t, Hmap *h)
if(h->oldbuckets != nil)
runtime·throw("evacuation not done in time");
old_buckets = h->buckets;
- // NOTE: this could be a big malloc, but since we don't need zeroing it is probably fast.
if(checkgc) mstats.next_gc = mstats.heap_alloc;
new_buckets = runtime·cnewarray(t->bucket, (uintptr)1 << (h->B + 1));
flags = (h->flags & ~(Iterator | OldIterator));
@@ -495,10 +383,10 @@ hash_lookup(MapType *t, Hmap *h, byte **keyp)
b = (Bucket*)(h->buckets + bucket * h->bucketsize);
}
top = hash >> (sizeof(uintptr)*8 - 8);
- if(top == 0)
- top = 1;
+ if(top < MinTopHash)
+ top += MinTopHash;
do {
- for(i = 0, k = b->data, v = k + h->keysize * BUCKETSIZE; i < BUCKETSIZE; i++, k += h->keysize, v += h->valuesize) {
+ for(i = 0, k = (byte*)b->data, v = k + h->keysize * BUCKETSIZE; i < BUCKETSIZE; i++, k += h->keysize, v += h->valuesize) {
if(b->tophash[i] == top) {
k2 = IK(h, k);
t->key->alg->equal(&eq, t->key->size, key, k2);
@@ -513,10 +401,6 @@ hash_lookup(MapType *t, Hmap *h, byte **keyp)
return nil;
}
-// When an item is not found, fast versions return a pointer to this zeroed memory.
-#pragma dataflag RODATA
-static uint8 empty_value[MAXVALUESIZE];
-
// Specialized versions of mapaccess1 for specific types.
// See ./hashmap_fast.c and ../../cmd/gc/walk.c.
#define HASH_LOOKUP1 runtime·mapaccess1_fast32
@@ -564,6 +448,9 @@ static uint8 empty_value[MAXVALUESIZE];
#ifdef GOARCH_amd64
#define CHECKTYPE uint64
#endif
+#ifdef GOARCH_amd64p32
+#define CHECKTYPE uint32
+#endif
#ifdef GOARCH_386
#define CHECKTYPE uint32
#endif
@@ -612,15 +499,15 @@ hash_insert(MapType *t, Hmap *h, void *key, void *value)
grow_work(t, h, bucket);
b = (Bucket*)(h->buckets + bucket * h->bucketsize);
top = hash >> (sizeof(uintptr)*8 - 8);
- if(top == 0)
- top = 1;
+ if(top < MinTopHash)
+ top += MinTopHash;
inserti = nil;
insertk = nil;
insertv = nil;
while(true) {
- for(i = 0, k = b->data, v = k + h->keysize * BUCKETSIZE; i < BUCKETSIZE; i++, k += h->keysize, v += h->valuesize) {
+ for(i = 0, k = (byte*)b->data, v = k + h->keysize * BUCKETSIZE; i < BUCKETSIZE; i++, k += h->keysize, v += h->valuesize) {
if(b->tophash[i] != top) {
- if(b->tophash[i] == 0 && inserti == nil) {
+ if(b->tophash[i] == Empty && inserti == nil) {
inserti = &b->tophash[i];
insertk = k;
insertv = v;
@@ -654,7 +541,7 @@ hash_insert(MapType *t, Hmap *h, void *key, void *value)
newb = runtime·cnew(t->bucket);
b->overflow = newb;
inserti = newb->tophash;
- insertk = newb->data;
+ insertk = (byte*)newb->data;
insertv = insertk + h->keysize * BUCKETSIZE;
}
@@ -701,10 +588,10 @@ hash_remove(MapType *t, Hmap *h, void *key)
grow_work(t, h, bucket);
b = (Bucket*)(h->buckets + bucket * h->bucketsize);
top = hash >> (sizeof(uintptr)*8 - 8);
- if(top == 0)
- top = 1;
+ if(top < MinTopHash)
+ top += MinTopHash;
do {
- for(i = 0, k = b->data, v = k + h->keysize * BUCKETSIZE; i < BUCKETSIZE; i++, k += h->keysize, v += h->valuesize) {
+ for(i = 0, k = (byte*)b->data, v = k + h->keysize * BUCKETSIZE; i < BUCKETSIZE; i++, k += h->keysize, v += h->valuesize) {
if(b->tophash[i] != top)
continue;
t->key->alg->equal(&eq, t->key->size, key, IK(h, k));
@@ -722,7 +609,7 @@ hash_remove(MapType *t, Hmap *h, void *key)
t->elem->alg->copy(t->elem->size, v, nil);
}
- b->tophash[i] = 0;
+ b->tophash[i] = Empty;
h->count--;
// TODO: consolidate buckets if they are mostly empty
@@ -737,42 +624,17 @@ hash_remove(MapType *t, Hmap *h, void *key)
// TODO: shrink the map, the same way we grow it.
-// If you modify hash_iter, also change cmd/gc/range.c to indicate
-// the size of this structure.
-struct hash_iter
-{
- uint8* key; // Must be in first position. Write nil to indicate iteration end (see cmd/gc/range.c).
- uint8* value;
-
- MapType *t;
- Hmap *h;
-
- // end point for iteration
- uintptr endbucket;
- bool wrapped;
-
- // state of table at time iterator is initialized
- uint8 B;
- byte *buckets;
-
- // iter state
- uintptr bucket;
- struct Bucket *bptr;
- uintptr i;
- intptr check_bucket;
-};
-
// iterator state:
// bucket: the current bucket ID
// b: the current Bucket in the chain
// i: the next offset to check in the current bucket
static void
-hash_iter_init(MapType *t, Hmap *h, struct hash_iter *it)
+hash_iter_init(MapType *t, Hmap *h, Hiter *it)
{
uint32 old;
- if(sizeof(struct hash_iter) / sizeof(uintptr) != 11) {
- runtime·throw("hash_iter size incorrect"); // see ../../cmd/gc/range.c
+ if(sizeof(Hiter) / sizeof(uintptr) != 10) {
+ runtime·throw("hash_iter size incorrect"); // see ../../cmd/gc/reflect.c
}
it->t = t;
it->h = h;
@@ -782,8 +644,9 @@ hash_iter_init(MapType *t, Hmap *h, struct hash_iter *it)
it->buckets = h->buckets;
// iterator state
- it->bucket = it->endbucket = runtime·fastrand1() & (((uintptr)1 << h->B) - 1);
- it->wrapped = false;
+ it->bucket = 0;
+ it->offset = runtime·fastrand1() & (BUCKETSIZE - 1);
+ it->done = false;
it->bptr = nil;
// Remember we have an iterator.
@@ -798,22 +661,22 @@ hash_iter_init(MapType *t, Hmap *h, struct hash_iter *it)
if(h->buckets == nil) {
// Empty map. Force next hash_next to exit without
- // evalulating h->bucket.
- it->wrapped = true;
+ // evaluating h->bucket.
+ it->done = true;
}
}
// initializes it->key and it->value to the next key/value pair
// in the iteration, or nil if we've reached the end.
static void
-hash_next(struct hash_iter *it)
+hash_next(Hiter *it)
{
Hmap *h;
MapType *t;
uintptr bucket, oldbucket;
uintptr hash;
Bucket *b;
- uintptr i;
+ uintptr i, offi;
intptr check_bucket;
bool eq;
byte *k, *v;
@@ -828,7 +691,7 @@ hash_next(struct hash_iter *it)
next:
if(b == nil) {
- if(bucket == it->endbucket && it->wrapped) {
+ if(it->done) {
// end of iteration
it->key = nil;
it->value = nil;
@@ -854,18 +717,20 @@ next:
bucket++;
if(bucket == ((uintptr)1 << it->B)) {
bucket = 0;
- it->wrapped = true;
+ it->done = true;
}
i = 0;
}
- k = b->data + h->keysize * i;
- v = b->data + h->keysize * BUCKETSIZE + h->valuesize * i;
- for(; i < BUCKETSIZE; i++, k += h->keysize, v += h->valuesize) {
- if(b->tophash[i] != 0) {
+ for(; i < BUCKETSIZE; i++) {
+ offi = (i + it->offset) & (BUCKETSIZE - 1);
+ k = (byte*)b->data + h->keysize * offi;
+ v = (byte*)b->data + h->keysize * BUCKETSIZE + h->valuesize * offi;
+ if(b->tophash[offi] != Empty && b->tophash[offi] != EvacuatedEmpty) {
if(check_bucket >= 0) {
// Special case: iterator was started during a grow and the
// grow is not done yet. We're working on a bucket whose
- // oldbucket has not been evacuated yet. So we're iterating
+ // oldbucket has not been evacuated yet. Or at least, it wasn't
+ // evacuated when we started the bucket. So we're iterating
// through the oldbucket, skipping any keys that will go
// to the other new bucket (each oldbucket expands to two
// buckets during a grow).
@@ -883,12 +748,15 @@ next:
// repeatable and randomish choice of which direction
// to send NaNs during evacuation. We'll use the low
// bit of tophash to decide which way NaNs go.
- if(check_bucket >> (it->B - 1) != (b->tophash[i] & 1)) {
+ // NOTE: this case is why we need two evacuate tophash
+ // values, evacuatedX and evacuatedY, that differ in
+ // their low bit.
+ if(check_bucket >> (it->B - 1) != (b->tophash[offi] & 1)) {
continue;
}
}
}
- if(!evacuated(b)) {
+ if(b->tophash[offi] != EvacuatedX && b->tophash[offi] != EvacuatedY) {
// this is the golden data, we can return it.
it->key = IK(h, k);
it->value = IV(h, v);
@@ -924,7 +792,7 @@ next:
return;
}
}
- b = overflowptr(b);
+ b = b->overflow;
i = 0;
goto next;
}
@@ -933,15 +801,12 @@ next:
/// interfaces to go runtime
//
-void
-reflect·ismapkey(Type *typ, bool ret)
-{
+func reflect·ismapkey(typ *Type) (ret bool) {
ret = typ != nil && typ->alg->hash != runtime·nohash;
- FLUSH(&ret);
}
-Hmap*
-runtime·makemap_c(MapType *typ, int64 hint)
+static Hmap*
+makemap_c(MapType *typ, int64 hint)
{
Hmap *h;
Type *key;
@@ -971,235 +836,186 @@ runtime·makemap_c(MapType *typ, int64 hint)
return h;
}
-// makemap(key, val *Type, hint int64) (hmap *map[any]any);
-void
-runtime·makemap(MapType *typ, int64 hint, Hmap *ret)
-{
- ret = runtime·makemap_c(typ, hint);
- FLUSH(&ret);
+func makemap(typ *MapType, hint int64) (ret *Hmap) {
+ ret = makemap_c(typ, hint);
}
-// For reflect:
-// func makemap(Type *mapType) (hmap *map)
-void
-reflect·makemap(MapType *t, Hmap *ret)
-{
- ret = runtime·makemap_c(t, 0);
- FLUSH(&ret);
+func reflect·makemap(t *MapType) (ret *Hmap) {
+ ret = makemap_c(t, 0);
}
-void
-runtime·mapaccess(MapType *t, Hmap *h, byte *ak, byte *av, bool *pres)
-{
- byte *res;
- Type *elem;
-
- elem = t->elem;
- if(h == nil || h->count == 0) {
- elem->alg->copy(elem->size, av, nil);
- *pres = false;
- return;
- }
-
- res = hash_lookup(t, h, &ak);
-
- if(res != nil) {
- *pres = true;
- elem->alg->copy(elem->size, av, res);
- } else {
- *pres = false;
- elem->alg->copy(elem->size, av, nil);
- }
-}
-
-// mapaccess1(hmap *map[any]any, key any) (val any);
+// NOTE: The returned pointer may keep the whole map live, so don't
+// hold onto it for very long.
#pragma textflag NOSPLIT
-void
-runtime·mapaccess1(MapType *t, Hmap *h, ...)
-{
- byte *ak, *av;
- byte *res;
-
- if(raceenabled && h != nil)
+func mapaccess1(t *MapType, h *Hmap, key *byte) (val *byte) {
+ if(raceenabled && h != nil) {
runtime·racereadpc(h, runtime·getcallerpc(&t), runtime·mapaccess1);
-
- ak = (byte*)(&h + 1);
- av = ak + ROUND(t->key->size, Structrnd);
-
+ runtime·racereadobjectpc(key, t->key, runtime·getcallerpc(&t), runtime·mapaccess1);
+ }
if(h == nil || h->count == 0) {
- t->elem->alg->copy(t->elem->size, av, nil);
+ val = t->elem->zero;
} else {
- res = hash_lookup(t, h, &ak);
- t->elem->alg->copy(t->elem->size, av, res);
+ val = hash_lookup(t, h, &key);
+ if(val == nil)
+ val = t->elem->zero;
}
if(debug) {
runtime·prints("runtime.mapaccess1: map=");
runtime·printpointer(h);
runtime·prints("; key=");
- t->key->alg->print(t->key->size, ak);
+ t->key->alg->print(t->key->size, key);
runtime·prints("; val=");
- t->elem->alg->print(t->elem->size, av);
+ t->elem->alg->print(t->elem->size, val);
runtime·prints("\n");
}
}
-// mapaccess2(hmap *map[any]any, key any) (val any, pres bool);
+// NOTE: The returned pointer keeps the whole map live, so don't
+// hold onto it for very long.
#pragma textflag NOSPLIT
-void
-runtime·mapaccess2(MapType *t, Hmap *h, ...)
-{
- byte *ak, *av, *ap;
-
- if(raceenabled && h != nil)
+func mapaccess2(t *MapType, h *Hmap, key *byte) (val *byte, pres bool) {
+ if(raceenabled && h != nil) {
runtime·racereadpc(h, runtime·getcallerpc(&t), runtime·mapaccess2);
+ runtime·racereadobjectpc(key, t->key, runtime·getcallerpc(&t), runtime·mapaccess2);
+ }
- ak = (byte*)(&h + 1);
- av = ak + ROUND(t->key->size, Structrnd);
- ap = av + t->elem->size;
-
- runtime·mapaccess(t, h, ak, av, ap);
+ if(h == nil || h->count == 0) {
+ val = t->elem->zero;
+ pres = false;
+ } else {
+ val = hash_lookup(t, h, &key);
+ if(val == nil) {
+ val = t->elem->zero;
+ pres = false;
+ } else {
+ pres = true;
+ }
+ }
if(debug) {
runtime·prints("runtime.mapaccess2: map=");
runtime·printpointer(h);
runtime·prints("; key=");
- t->key->alg->print(t->key->size, ak);
+ t->key->alg->print(t->key->size, key);
runtime·prints("; val=");
- t->elem->alg->print(t->elem->size, av);
+ t->elem->alg->print(t->elem->size, val);
runtime·prints("; pres=");
- runtime·printbool(*ap);
+ runtime·printbool(pres);
runtime·prints("\n");
}
}
-// For reflect:
-// func mapaccess(t type, h map, key iword) (val iword, pres bool)
-// where an iword is the same word an interface value would use:
-// the actual data if it fits, or else a pointer to the data.
-void
-reflect·mapaccess(MapType *t, Hmap *h, uintptr key, uintptr val, bool pres)
-{
- byte *ak, *av;
-
- if(raceenabled && h != nil)
- runtime·racereadpc(h, runtime·getcallerpc(&t), reflect·mapaccess);
-
- if(t->key->size <= sizeof(key))
- ak = (byte*)&key;
- else
- ak = (byte*)key;
- val = 0;
- pres = false;
- if(t->elem->size <= sizeof(val))
- av = (byte*)&val;
+#pragma textflag NOSPLIT
+func reflect·mapaccess(t *MapType, h *Hmap, key *byte) (val *byte) {
+ if(h == nil)
+ val = nil;
else {
- av = runtime·mal(t->elem->size);
- val = (uintptr)av;
+ if(raceenabled) {
+ runtime·racereadpc(h, runtime·getcallerpc(&t), reflect·mapaccess);
+ runtime·racereadobjectpc(key, t->key, runtime·getcallerpc(&t), reflect·mapaccess);
+ }
+ val = hash_lookup(t, h, &key);
}
- runtime·mapaccess(t, h, ak, av, &pres);
- FLUSH(&val);
- FLUSH(&pres);
}
-void
-runtime·mapassign(MapType *t, Hmap *h, byte *ak, byte *av)
-{
+#pragma textflag NOSPLIT
+func mapassign1(t *MapType, h *Hmap, key *byte, val *byte) {
if(h == nil)
runtime·panicstring("assignment to entry in nil map");
- if(av == nil) {
- hash_remove(t, h, ak);
- } else {
- hash_insert(t, h, ak, av);
+ if(raceenabled) {
+ runtime·racewritepc(h, runtime·getcallerpc(&t), runtime·mapassign1);
+ runtime·racereadobjectpc(key, t->key, runtime·getcallerpc(&t), runtime·mapassign1);
+ runtime·racereadobjectpc(val, t->elem, runtime·getcallerpc(&t), runtime·mapassign1);
}
+ hash_insert(t, h, key, val);
+
if(debug) {
- runtime·prints("mapassign: map=");
+ runtime·prints("mapassign1: map=");
runtime·printpointer(h);
runtime·prints("; key=");
- t->key->alg->print(t->key->size, ak);
+ t->key->alg->print(t->key->size, key);
runtime·prints("; val=");
- if(av)
- t->elem->alg->print(t->elem->size, av);
- else
- runtime·prints("nil");
+ t->elem->alg->print(t->elem->size, val);
runtime·prints("\n");
}
}
-// mapassign1(mapType *type, hmap *map[any]any, key any, val any);
#pragma textflag NOSPLIT
-void
-runtime·mapassign1(MapType *t, Hmap *h, ...)
-{
- byte *ak, *av;
-
+func mapdelete(t *MapType, h *Hmap, key *byte) {
if(h == nil)
- runtime·panicstring("assignment to entry in nil map");
+ return;
- if(raceenabled)
- runtime·racewritepc(h, runtime·getcallerpc(&t), runtime·mapassign1);
- ak = (byte*)(&h + 1);
- av = ak + ROUND(t->key->size, t->elem->align);
+ if(raceenabled) {
+ runtime·racewritepc(h, runtime·getcallerpc(&t), runtime·mapdelete);
+ runtime·racereadobjectpc(key, t->key, runtime·getcallerpc(&t), runtime·mapdelete);
+ }
+
+ hash_remove(t, h, key);
- runtime·mapassign(t, h, ak, av);
+ if(debug) {
+ runtime·prints("mapdelete: map=");
+ runtime·printpointer(h);
+ runtime·prints("; key=");
+ t->key->alg->print(t->key->size, key);
+ runtime·prints("\n");
+ }
}
-// mapdelete(mapType *type, hmap *map[any]any, key any)
#pragma textflag NOSPLIT
-void
-runtime·mapdelete(MapType *t, Hmap *h, ...)
-{
- byte *ak;
-
+func reflect·mapassign(t *MapType, h *Hmap, key *byte, val *byte) {
if(h == nil)
- return;
+ runtime·panicstring("assignment to entry in nil map");
+ if(raceenabled) {
+ runtime·racewritepc(h, runtime·getcallerpc(&t), reflect·mapassign);
+ runtime·racereadobjectpc(key, t->key, runtime·getcallerpc(&t), reflect·mapassign);
+ runtime·racereadobjectpc(val, t->elem, runtime·getcallerpc(&t), reflect·mapassign);
+ }
- if(raceenabled)
- runtime·racewritepc(h, runtime·getcallerpc(&t), runtime·mapdelete);
- ak = (byte*)(&h + 1);
- runtime·mapassign(t, h, ak, nil);
+ hash_insert(t, h, key, val);
if(debug) {
- runtime·prints("mapdelete: map=");
+ runtime·prints("mapassign: map=");
runtime·printpointer(h);
runtime·prints("; key=");
- t->key->alg->print(t->key->size, ak);
+ t->key->alg->print(t->key->size, key);
+ runtime·prints("; val=");
+ t->elem->alg->print(t->elem->size, val);
runtime·prints("\n");
}
}
-// For reflect:
-// func mapassign(t type h map, key, val iword, pres bool)
-// where an iword is the same word an interface value would use:
-// the actual data if it fits, or else a pointer to the data.
-void
-reflect·mapassign(MapType *t, Hmap *h, uintptr key, uintptr val, bool pres)
-{
- byte *ak, *av;
-
+#pragma textflag NOSPLIT
+func reflect·mapdelete(t *MapType, h *Hmap, key *byte) {
if(h == nil)
- runtime·panicstring("assignment to entry in nil map");
- if(raceenabled)
- runtime·racewritepc(h, runtime·getcallerpc(&t), reflect·mapassign);
- if(t->key->size <= sizeof(key))
- ak = (byte*)&key;
- else
- ak = (byte*)key;
- if(t->elem->size <= sizeof(val))
- av = (byte*)&val;
- else
- av = (byte*)val;
- if(!pres)
- av = nil;
- runtime·mapassign(t, h, ak, av);
+ return; // see bug 8051
+ if(raceenabled) {
+ runtime·racewritepc(h, runtime·getcallerpc(&t), reflect·mapdelete);
+ runtime·racereadobjectpc(key, t->key, runtime·getcallerpc(&t), reflect·mapdelete);
+ }
+ hash_remove(t, h, key);
+
+ if(debug) {
+ runtime·prints("mapdelete: map=");
+ runtime·printpointer(h);
+ runtime·prints("; key=");
+ t->key->alg->print(t->key->size, key);
+ runtime·prints("\n");
+ }
}
-// mapiterinit(mapType *type, hmap *map[any]any, hiter *any);
-void
-runtime·mapiterinit(MapType *t, Hmap *h, struct hash_iter *it)
-{
+#pragma textflag NOSPLIT
+func mapiterinit(t *MapType, h *Hmap, it *Hiter) {
+ // Clear pointer fields so garbage collector does not complain.
+ it->key = nil;
+ it->value = nil;
+ it->t = nil;
+ it->h = nil;
+ it->buckets = nil;
+ it->bptr = nil;
+
if(h == nil || h->count == 0) {
it->key = nil;
return;
@@ -1219,20 +1035,13 @@ runtime·mapiterinit(MapType *t, Hmap *h, struct hash_iter *it)
}
}
-// For reflect:
-// func mapiterinit(h map) (it iter)
-void
-reflect·mapiterinit(MapType *t, Hmap *h, struct hash_iter *it)
-{
+func reflect·mapiterinit(t *MapType, h *Hmap) (it *Hiter) {
it = runtime·mal(sizeof *it);
- FLUSH(&it);
runtime·mapiterinit(t, h, it);
}
-// mapiternext(hiter *any);
-void
-runtime·mapiternext(struct hash_iter *it)
-{
+#pragma textflag NOSPLIT
+func mapiternext(it *Hiter) {
if(raceenabled)
runtime·racereadpc(it->h, runtime·getcallerpc(&it), runtime·mapiternext);
@@ -1246,85 +1055,16 @@ runtime·mapiternext(struct hash_iter *it)
}
}
-// For reflect:
-// func mapiternext(it iter)
-void
-reflect·mapiternext(struct hash_iter *it)
-{
+func reflect·mapiternext(it *Hiter) {
runtime·mapiternext(it);
}
-// mapiter1(hiter *any) (key any);
-#pragma textflag NOSPLIT
-void
-runtime·mapiter1(struct hash_iter *it, ...)
-{
- byte *ak, *res;
- Type *key;
-
- ak = (byte*)(&it + 1);
-
- res = it->key;
- if(res == nil)
- runtime·throw("runtime.mapiter1: key:val nil pointer");
-
- key = it->t->key;
- key->alg->copy(key->size, ak, res);
-
- if(debug) {
- runtime·prints("mapiter1: iter=");
- runtime·printpointer(it);
- runtime·prints("; map=");
- runtime·printpointer(it->h);
- runtime·prints("\n");
- }
-}
-
-bool
-runtime·mapiterkey(struct hash_iter *it, void *ak)
-{
- byte *res;
- Type *key;
-
- res = it->key;
- if(res == nil)
- return false;
- key = it->t->key;
- key->alg->copy(key->size, ak, res);
- return true;
-}
-
-// For reflect:
-// func mapiterkey(h map) (key iword, ok bool)
-// where an iword is the same word an interface value would use:
-// the actual data if it fits, or else a pointer to the data.
-void
-reflect·mapiterkey(struct hash_iter *it, uintptr key, bool ok)
-{
- byte *res;
- Type *tkey;
-
- key = 0;
- ok = false;
- res = it->key;
- if(res != nil) {
- tkey = it->t->key;
- if(tkey->size <= sizeof(key))
- tkey->alg->copy(tkey->size, (byte*)&key, res);
- else
- key = (uintptr)res;
- ok = true;
- }
- FLUSH(&key);
- FLUSH(&ok);
+func reflect·mapiterkey(it *Hiter) (key *byte) {
+ key = it->key;
}
-// For reflect:
-// func maplen(h map) (len int)
-// Like len(m) in the actual language, we treat the nil map as length 0.
-void
-reflect·maplen(Hmap *h, intgo len)
-{
+#pragma textflag NOSPLIT
+func reflect·maplen(h *Hmap) (len int) {
if(h == nil)
len = 0;
else {
@@ -1332,35 +1072,6 @@ reflect·maplen(Hmap *h, intgo len)
if(raceenabled)
runtime·racereadpc(h, runtime·getcallerpc(&h), reflect·maplen);
}
- FLUSH(&len);
-}
-
-// mapiter2(hiter *any) (key any, val any);
-#pragma textflag NOSPLIT
-void
-runtime·mapiter2(struct hash_iter *it, ...)
-{
- byte *ak, *av, *res;
- MapType *t;
-
- t = it->t;
- ak = (byte*)(&it + 1);
- av = ak + ROUND(t->key->size, t->elem->align);
-
- res = it->key;
- if(res == nil)
- runtime·throw("runtime.mapiter2: key:val nil pointer");
-
- t->key->alg->copy(t->key->size, ak, res);
- t->elem->alg->copy(t->elem->size, av, it->value);
-
- if(debug) {
- runtime·prints("mapiter2: iter=");
- runtime·printpointer(it);
- runtime·prints("; map=");
- runtime·printpointer(it->h);
- runtime·prints("\n");
- }
}
// exported value for testing
diff --git a/src/pkg/runtime/hashmap.h b/src/pkg/runtime/hashmap.h
new file mode 100644
index 000000000..522d1ad01
--- /dev/null
+++ b/src/pkg/runtime/hashmap.h
@@ -0,0 +1,147 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file contains the implementation of Go's map type.
+//
+// The map is just a hash table. The data is arranged
+// into an array of buckets. Each bucket contains up to
+// 8 key/value pairs. The low-order bits of the hash are
+// used to select a bucket. Each bucket contains a few
+// high-order bits of each hash to distinguish the entries
+// within a single bucket.
+//
+// If more than 8 keys hash to a bucket, we chain on
+// extra buckets.
+//
+// When the hashtable grows, we allocate a new array
+// of buckets twice as big. Buckets are incrementally
+// copied from the old bucket array to the new bucket array.
+//
+// Map iterators walk through the array of buckets and
+// return the keys in walk order (bucket #, then overflow
+// chain order, then bucket index). To maintain iteration
+// semantics, we never move keys within their bucket (if
+// we did, keys might be returned 0 or 2 times). When
+// growing the table, iterators remain iterating through the
+// old table and must check the new table if the bucket
+// they are iterating through has been moved ("evacuated")
+// to the new table.
+
+// Maximum number of key/value pairs a bucket can hold.
+#define BUCKETSIZE 8
+
+// Maximum average load of a bucket that triggers growth.
+#define LOAD 6.5
+
+// Picking LOAD: too large and we have lots of overflow
+// buckets, too small and we waste a lot of space. I wrote
+// a simple program to check some stats for different loads:
+// (64-bit, 8 byte keys and values)
+// LOAD %overflow bytes/entry hitprobe missprobe
+// 4.00 2.13 20.77 3.00 4.00
+// 4.50 4.05 17.30 3.25 4.50
+// 5.00 6.85 14.77 3.50 5.00
+// 5.50 10.55 12.94 3.75 5.50
+// 6.00 15.27 11.67 4.00 6.00
+// 6.50 20.90 10.79 4.25 6.50
+// 7.00 27.14 10.15 4.50 7.00
+// 7.50 34.03 9.73 4.75 7.50
+// 8.00 41.10 9.40 5.00 8.00
+//
+// %overflow = percentage of buckets which have an overflow bucket
+// bytes/entry = overhead bytes used per key/value pair
+// hitprobe = # of entries to check when looking up a present key
+// missprobe = # of entries to check when looking up an absent key
+//
+// Keep in mind this data is for maximally loaded tables, i.e. just
+// before the table grows. Typical tables will be somewhat less loaded.
+
+// Maximum key or value size to keep inline (instead of mallocing per element).
+// Must fit in a uint8.
+// Fast versions cannot handle big values - the cutoff size for
+// fast versions in ../../cmd/gc/walk.c must be at most this value.
+#define MAXKEYSIZE 128
+#define MAXVALUESIZE 128
+
+typedef struct Bucket Bucket;
+struct Bucket
+{
+ // Note: the format of the Bucket is encoded in ../../cmd/gc/reflect.c and
+ // ../reflect/type.go. Don't change this structure without also changing that code!
+ uint8 tophash[BUCKETSIZE]; // top 8 bits of hash of each entry (or special mark below)
+ Bucket *overflow; // overflow bucket, if any
+ uint64 data[1]; // BUCKETSIZE keys followed by BUCKETSIZE values
+};
+// NOTE: packing all the keys together and then all the values together makes the
+// code a bit more complicated than alternating key/value/key/value/... but it allows
+// us to eliminate padding which would be needed for, e.g., map[int64]int8.
+
+// tophash values. We reserve a few possibilities for special marks.
+// Each bucket (including its overflow buckets, if any) will have either all or none of its
+// entries in the Evacuated* states (except during the evacuate() method, which only happens
+// during map writes and thus no one else can observe the map during that time).
+enum
+{
+ Empty = 0, // cell is empty
+ EvacuatedEmpty = 1, // cell is empty, bucket is evacuated.
+ EvacuatedX = 2, // key/value is valid. Entry has been evacuated to first half of larger table.
+ EvacuatedY = 3, // same as above, but evacuated to second half of larger table.
+ MinTopHash = 4, // minimum tophash for a normal filled cell.
+};
+#define evacuated(b) ((b)->tophash[0] > Empty && (b)->tophash[0] < MinTopHash)
+
+struct Hmap
+{
+ // Note: the format of the Hmap is encoded in ../../cmd/gc/reflect.c and
+ // ../reflect/type.go. Don't change this structure without also changing that code!
+ uintgo count; // # live cells == size of map. Must be first (used by len() builtin)
+ uint32 flags;
+ uint32 hash0; // hash seed
+ uint8 B; // log_2 of # of buckets (can hold up to LOAD * 2^B items)
+ uint8 keysize; // key size in bytes
+ uint8 valuesize; // value size in bytes
+ uint16 bucketsize; // bucket size in bytes
+
+ byte *buckets; // array of 2^B Buckets. may be nil if count==0.
+ byte *oldbuckets; // previous bucket array of half the size, non-nil only when growing
+ uintptr nevacuate; // progress counter for evacuation (buckets less than this have been evacuated)
+};
+
+// possible flags
+enum
+{
+ IndirectKey = 1, // storing pointers to keys
+ IndirectValue = 2, // storing pointers to values
+ Iterator = 4, // there may be an iterator using buckets
+ OldIterator = 8, // there may be an iterator using oldbuckets
+};
+
+// Macros for dereferencing indirect keys
+#define IK(h, p) (((h)->flags & IndirectKey) != 0 ? *(byte**)(p) : (p))
+#define IV(h, p) (((h)->flags & IndirectValue) != 0 ? *(byte**)(p) : (p))
+
+// If you modify Hiter, also change cmd/gc/reflect.c to indicate
+// the layout of this structure.
+struct Hiter
+{
+ uint8* key; // Must be in first position. Write nil to indicate iteration end (see cmd/gc/range.c).
+ uint8* value; // Must be in second position (see cmd/gc/range.c).
+
+ MapType *t;
+ Hmap *h;
+ byte *buckets; // bucket ptr at hash_iter initialization time
+ struct Bucket *bptr; // current bucket
+
+ uint8 offset; // intra-bucket offset to start from during iteration (should be big enough to hold BUCKETSIZE-1)
+ bool done;
+
+ // state of table at time iterator is initialized
+ uint8 B;
+
+ // iter state
+ uintptr bucket;
+ uintptr i;
+ intptr check_bucket;
+};
+
diff --git a/src/pkg/runtime/hashmap_fast.c b/src/pkg/runtime/hashmap_fast.c
index 796582e2d..83bf6feb5 100644
--- a/src/pkg/runtime/hashmap_fast.c
+++ b/src/pkg/runtime/hashmap_fast.c
@@ -5,24 +5,23 @@
// Fast hashmap lookup specialized to a specific key type.
// Included by hashmap.c once for each specialized type.
-// Note that this code differs from hash_lookup in that
-// it returns a pointer to the result, not the result itself.
-// The returned pointer is only valid until the next GC
-// point, so the caller must dereference it before then.
-
// +build ignore
+// Because this file is #included, it cannot be processed by goc2c,
+// so we have to handle the Go resuts ourselves.
+
#pragma textflag NOSPLIT
void
-HASH_LOOKUP1(MapType *t, Hmap *h, KEYTYPE key, byte *value)
+HASH_LOOKUP1(MapType *t, Hmap *h, KEYTYPE key, GoOutput base, ...)
{
uintptr bucket, i;
Bucket *b;
KEYTYPE *k;
- byte *v;
+ byte *v, **valueptr;
uint8 top;
int8 keymaybe;
+ valueptr = (byte**)&base;
if(debug) {
runtime·prints("runtime.mapaccess1_fastXXX: map=");
runtime·printpointer(h);
@@ -31,8 +30,7 @@ HASH_LOOKUP1(MapType *t, Hmap *h, KEYTYPE key, byte *value)
runtime·prints("\n");
}
if(h == nil || h->count == 0) {
- value = empty_value;
- FLUSH(&value);
+ *valueptr = t->elem->zero;
return;
}
if(raceenabled)
@@ -45,26 +43,24 @@ HASH_LOOKUP1(MapType *t, Hmap *h, KEYTYPE key, byte *value)
b = (Bucket*)h->buckets;
if(FASTKEY(key)) {
for(i = 0, k = (KEYTYPE*)b->data, v = (byte*)(k + BUCKETSIZE); i < BUCKETSIZE; i++, k++, v += h->valuesize) {
- if(b->tophash[i] == 0)
+ if(b->tophash[i] == Empty)
continue;
if(QUICK_NE(key, *k))
continue;
if(QUICK_EQ(key, *k) || SLOW_EQ(key, *k)) {
- value = v;
- FLUSH(&value);
+ *valueptr = v;
return;
}
}
} else {
keymaybe = -1;
for(i = 0, k = (KEYTYPE*)b->data, v = (byte*)(k + BUCKETSIZE); i < BUCKETSIZE; i++, k++, v += h->valuesize) {
- if(b->tophash[i] == 0)
+ if(b->tophash[i] == Empty)
continue;
if(QUICK_NE(key, *k))
continue;
if(QUICK_EQ(key, *k)) {
- value = v;
- FLUSH(&value);
+ *valueptr = v;
return;
}
if(MAYBE_EQ(key, *k)) {
@@ -82,8 +78,7 @@ HASH_LOOKUP1(MapType *t, Hmap *h, KEYTYPE key, byte *value)
if(keymaybe >= 0) {
k = (KEYTYPE*)b->data + keymaybe;
if(SLOW_EQ(key, *k)) {
- value = (byte*)((KEYTYPE*)b->data + BUCKETSIZE) + keymaybe * h->valuesize;
- FLUSH(&value);
+ *valueptr = (byte*)((KEYTYPE*)b->data + BUCKETSIZE) + keymaybe * h->valuesize;
return;
}
}
@@ -93,8 +88,8 @@ dohash:
bucket = h->hash0;
HASHFUNC(&bucket, sizeof(KEYTYPE), &key);
top = bucket >> (sizeof(uintptr)*8 - 8);
- if(top == 0)
- top = 1;
+ if(top < MinTopHash)
+ top += MinTopHash;
bucket &= (((uintptr)1 << h->B) - 1);
if(h->oldbuckets != nil) {
i = bucket & (((uintptr)1 << (h->B - 1)) - 1);
@@ -112,29 +107,30 @@ dohash:
if(QUICK_NE(key, *k))
continue;
if(QUICK_EQ(key, *k) || SLOW_EQ(key, *k)) {
- value = v;
- FLUSH(&value);
+ *valueptr = v;
return;
}
}
b = b->overflow;
} while(b != nil);
}
- value = empty_value;
- FLUSH(&value);
+ *valueptr = t->elem->zero;
}
#pragma textflag NOSPLIT
void
-HASH_LOOKUP2(MapType *t, Hmap *h, KEYTYPE key, byte *value, bool res)
+HASH_LOOKUP2(MapType *t, Hmap *h, KEYTYPE key, GoOutput base, ...)
{
uintptr bucket, i;
Bucket *b;
KEYTYPE *k;
- byte *v;
+ byte *v, **valueptr;
uint8 top;
int8 keymaybe;
+ bool *okptr;
+ valueptr = (byte**)&base;
+ okptr = (bool*)(valueptr+1);
if(debug) {
runtime·prints("runtime.mapaccess2_fastXXX: map=");
runtime·printpointer(h);
@@ -143,10 +139,8 @@ HASH_LOOKUP2(MapType *t, Hmap *h, KEYTYPE key, byte *value, bool res)
runtime·prints("\n");
}
if(h == nil || h->count == 0) {
- value = empty_value;
- res = false;
- FLUSH(&value);
- FLUSH(&res);
+ *valueptr = t->elem->zero;
+ *okptr = false;
return;
}
if(raceenabled)
@@ -159,30 +153,26 @@ HASH_LOOKUP2(MapType *t, Hmap *h, KEYTYPE key, byte *value, bool res)
b = (Bucket*)h->buckets;
if(FASTKEY(key)) {
for(i = 0, k = (KEYTYPE*)b->data, v = (byte*)(k + BUCKETSIZE); i < BUCKETSIZE; i++, k++, v += h->valuesize) {
- if(b->tophash[i] == 0)
+ if(b->tophash[i] == Empty)
continue;
if(QUICK_NE(key, *k))
continue;
if(QUICK_EQ(key, *k) || SLOW_EQ(key, *k)) {
- value = v;
- res = true;
- FLUSH(&value);
- FLUSH(&res);
+ *valueptr = v;
+ *okptr = true;
return;
}
}
} else {
keymaybe = -1;
for(i = 0, k = (KEYTYPE*)b->data, v = (byte*)(k + BUCKETSIZE); i < BUCKETSIZE; i++, k++, v += h->valuesize) {
- if(b->tophash[i] == 0)
+ if(b->tophash[i] == Empty)
continue;
if(QUICK_NE(key, *k))
continue;
if(QUICK_EQ(key, *k)) {
- value = v;
- res = true;
- FLUSH(&value);
- FLUSH(&res);
+ *valueptr = v;
+ *okptr = true;
return;
}
if(MAYBE_EQ(key, *k)) {
@@ -200,10 +190,8 @@ HASH_LOOKUP2(MapType *t, Hmap *h, KEYTYPE key, byte *value, bool res)
if(keymaybe >= 0) {
k = (KEYTYPE*)b->data + keymaybe;
if(SLOW_EQ(key, *k)) {
- value = (byte*)((KEYTYPE*)b->data + BUCKETSIZE) + keymaybe * h->valuesize;
- res = true;
- FLUSH(&value);
- FLUSH(&res);
+ *valueptr = (byte*)((KEYTYPE*)b->data + BUCKETSIZE) + keymaybe * h->valuesize;
+ *okptr = true;
return;
}
}
@@ -213,8 +201,8 @@ dohash:
bucket = h->hash0;
HASHFUNC(&bucket, sizeof(KEYTYPE), &key);
top = bucket >> (sizeof(uintptr)*8 - 8);
- if(top == 0)
- top = 1;
+ if(top < MinTopHash)
+ top += MinTopHash;
bucket &= (((uintptr)1 << h->B) - 1);
if(h->oldbuckets != nil) {
i = bucket & (((uintptr)1 << (h->B - 1)) - 1);
@@ -232,18 +220,14 @@ dohash:
if(QUICK_NE(key, *k))
continue;
if(QUICK_EQ(key, *k) || SLOW_EQ(key, *k)) {
- value = v;
- res = true;
- FLUSH(&value);
- FLUSH(&res);
+ *valueptr = v;
+ *okptr = true;
return;
}
}
b = b->overflow;
} while(b != nil);
}
- value = empty_value;
- res = false;
- FLUSH(&value);
- FLUSH(&res);
+ *valueptr = t->elem->zero;
+ *okptr = false;
}
diff --git a/src/pkg/runtime/heapdump.c b/src/pkg/runtime/heapdump.c
new file mode 100644
index 000000000..744c59f9b
--- /dev/null
+++ b/src/pkg/runtime/heapdump.c
@@ -0,0 +1,981 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Implementation of runtime/debug.WriteHeapDump. Writes all
+// objects in the heap plus additional info (roots, threads,
+// finalizers, etc.) to a file.
+
+// The format of the dumped file is described at
+// http://code.google.com/p/go-wiki/wiki/heapdump13
+
+#include "runtime.h"
+#include "arch_GOARCH.h"
+#include "malloc.h"
+#include "mgc0.h"
+#include "type.h"
+#include "typekind.h"
+#include "funcdata.h"
+#include "zaexperiment.h"
+#include "../../cmd/ld/textflag.h"
+
+extern byte data[];
+extern byte edata[];
+extern byte bss[];
+extern byte ebss[];
+extern byte gcdata[];
+extern byte gcbss[];
+
+enum {
+ FieldKindEol = 0,
+ FieldKindPtr = 1,
+ FieldKindString = 2,
+ FieldKindSlice = 3,
+ FieldKindIface = 4,
+ FieldKindEface = 5,
+
+ TagEOF = 0,
+ TagObject = 1,
+ TagOtherRoot = 2,
+ TagType = 3,
+ TagGoRoutine = 4,
+ TagStackFrame = 5,
+ TagParams = 6,
+ TagFinalizer = 7,
+ TagItab = 8,
+ TagOSThread = 9,
+ TagMemStats = 10,
+ TagQueuedFinalizer = 11,
+ TagData = 12,
+ TagBss = 13,
+ TagDefer = 14,
+ TagPanic = 15,
+ TagMemProf = 16,
+ TagAllocSample = 17,
+
+ TypeInfo_Conservative = 127,
+};
+
+static uintptr* playgcprog(uintptr offset, uintptr *prog, void (*callback)(void*,uintptr,uintptr), void *arg);
+static void dumpfields(uintptr *prog);
+static void dumpefacetypes(void *obj, uintptr size, Type *type, uintptr kind);
+static void dumpbvtypes(BitVector *bv, byte *base);
+
+// fd to write the dump to.
+static uintptr dumpfd;
+
+// buffer of pending write data
+enum {
+ BufSize = 4096,
+};
+#pragma dataflag NOPTR
+static byte buf[BufSize];
+static uintptr nbuf;
+
+static void
+write(byte *data, uintptr len)
+{
+ if(len + nbuf <= BufSize) {
+ runtime·memmove(buf + nbuf, data, len);
+ nbuf += len;
+ return;
+ }
+ runtime·write(dumpfd, buf, nbuf);
+ if(len >= BufSize) {
+ runtime·write(dumpfd, data, len);
+ nbuf = 0;
+ } else {
+ runtime·memmove(buf, data, len);
+ nbuf = len;
+ }
+}
+
+static void
+flush(void)
+{
+ runtime·write(dumpfd, buf, nbuf);
+ nbuf = 0;
+}
+
+// Cache of types that have been serialized already.
+// We use a type's hash field to pick a bucket.
+// Inside a bucket, we keep a list of types that
+// have been serialized so far, most recently used first.
+// Note: when a bucket overflows we may end up
+// serializing a type more than once. That's ok.
+enum {
+ TypeCacheBuckets = 256, // must be a power of 2
+ TypeCacheAssoc = 4,
+};
+typedef struct TypeCacheBucket TypeCacheBucket;
+struct TypeCacheBucket {
+ Type *t[TypeCacheAssoc];
+};
+static TypeCacheBucket typecache[TypeCacheBuckets];
+
+// dump a uint64 in a varint format parseable by encoding/binary
+static void
+dumpint(uint64 v)
+{
+ byte buf[10];
+ int32 n;
+ n = 0;
+ while(v >= 0x80) {
+ buf[n++] = v | 0x80;
+ v >>= 7;
+ }
+ buf[n++] = v;
+ write(buf, n);
+}
+
+static void
+dumpbool(bool b)
+{
+ dumpint(b ? 1 : 0);
+}
+
+// dump varint uint64 length followed by memory contents
+static void
+dumpmemrange(byte *data, uintptr len)
+{
+ dumpint(len);
+ write(data, len);
+}
+
+static void
+dumpstr(String s)
+{
+ dumpmemrange(s.str, s.len);
+}
+
+static void
+dumpcstr(int8 *c)
+{
+ dumpmemrange((byte*)c, runtime·findnull((byte*)c));
+}
+
+// dump information for a type
+static void
+dumptype(Type *t)
+{
+ TypeCacheBucket *b;
+ int32 i, j;
+
+ if(t == nil) {
+ return;
+ }
+
+ // If we've definitely serialized the type before,
+ // no need to do it again.
+ b = &typecache[t->hash & (TypeCacheBuckets-1)];
+ if(t == b->t[0]) return;
+ for(i = 1; i < TypeCacheAssoc; i++) {
+ if(t == b->t[i]) {
+ // Move-to-front
+ for(j = i; j > 0; j--) {
+ b->t[j] = b->t[j-1];
+ }
+ b->t[0] = t;
+ return;
+ }
+ }
+ // Might not have been dumped yet. Dump it and
+ // remember we did so.
+ for(j = TypeCacheAssoc-1; j > 0; j--) {
+ b->t[j] = b->t[j-1];
+ }
+ b->t[0] = t;
+
+ // dump the type
+ dumpint(TagType);
+ dumpint((uintptr)t);
+ dumpint(t->size);
+ if(t->x == nil || t->x->pkgPath == nil || t->x->name == nil) {
+ dumpstr(*t->string);
+ } else {
+ dumpint(t->x->pkgPath->len + 1 + t->x->name->len);
+ write(t->x->pkgPath->str, t->x->pkgPath->len);
+ write((byte*)".", 1);
+ write(t->x->name->str, t->x->name->len);
+ }
+ dumpbool(t->size > PtrSize || (t->kind & KindNoPointers) == 0);
+ dumpfields((uintptr*)t->gc + 1);
+}
+
+// returns true if object is scannable
+static bool
+scannable(byte *obj)
+{
+ uintptr *b, off, shift;
+
+ off = (uintptr*)obj - (uintptr*)runtime·mheap.arena_start; // word offset
+ b = (uintptr*)runtime·mheap.arena_start - off/wordsPerBitmapWord - 1;
+ shift = off % wordsPerBitmapWord;
+ return ((*b >> shift) & bitScan) != 0;
+}
+
+// dump an object
+static void
+dumpobj(byte *obj, uintptr size, Type *type, uintptr kind)
+{
+ if(type != nil) {
+ dumptype(type);
+ dumpefacetypes(obj, size, type, kind);
+ }
+
+ dumpint(TagObject);
+ dumpint((uintptr)obj);
+ dumpint((uintptr)type);
+ dumpint(kind);
+ dumpmemrange(obj, size);
+}
+
+static void
+dumpotherroot(int8 *description, byte *to)
+{
+ dumpint(TagOtherRoot);
+ dumpcstr(description);
+ dumpint((uintptr)to);
+}
+
+static void
+dumpfinalizer(byte *obj, FuncVal *fn, Type* fint, PtrType *ot)
+{
+ dumpint(TagFinalizer);
+ dumpint((uintptr)obj);
+ dumpint((uintptr)fn);
+ dumpint((uintptr)fn->fn);
+ dumpint((uintptr)fint);
+ dumpint((uintptr)ot);
+}
+
+typedef struct ChildInfo ChildInfo;
+struct ChildInfo {
+ // Information passed up from the callee frame about
+ // the layout of the outargs region.
+ uintptr argoff; // where the arguments start in the frame
+ uintptr arglen; // size of args region
+ BitVector args; // if args.n >= 0, pointer map of args region
+
+ byte *sp; // callee sp
+ uintptr depth; // depth in call stack (0 == most recent)
+};
+
+// dump kinds & offsets of interesting fields in bv
+static void
+dumpbv(BitVector *bv, uintptr offset)
+{
+ uintptr i;
+
+ for(i = 0; i < bv->n; i += BitsPerPointer) {
+ switch(bv->data[i/32] >> i%32 & 3) {
+ case BitsDead:
+ case BitsScalar:
+ break;
+ case BitsPointer:
+ dumpint(FieldKindPtr);
+ dumpint(offset + i / BitsPerPointer * PtrSize);
+ break;
+ case BitsMultiWord:
+ switch(bv->data[(i+BitsPerPointer)/32] >> (i+BitsPerPointer)%32 & 3) {
+ case BitsString:
+ dumpint(FieldKindString);
+ dumpint(offset + i / BitsPerPointer * PtrSize);
+ i += BitsPerPointer;
+ break;
+ case BitsSlice:
+ dumpint(FieldKindSlice);
+ dumpint(offset + i / BitsPerPointer * PtrSize);
+ i += 2 * BitsPerPointer;
+ break;
+ case BitsIface:
+ dumpint(FieldKindIface);
+ dumpint(offset + i / BitsPerPointer * PtrSize);
+ i += BitsPerPointer;
+ break;
+ case BitsEface:
+ dumpint(FieldKindEface);
+ dumpint(offset + i / BitsPerPointer * PtrSize);
+ i += BitsPerPointer;
+ break;
+ }
+ }
+ }
+}
+
+static bool
+dumpframe(Stkframe *s, void *arg)
+{
+ Func *f;
+ ChildInfo *child;
+ uintptr pc, off, size;
+ int32 pcdata;
+ StackMap *stackmap;
+ int8 *name;
+ BitVector bv;
+
+ child = (ChildInfo*)arg;
+ f = s->fn;
+
+ // Figure out what we can about our stack map
+ pc = s->pc;
+ if(pc != f->entry)
+ pc--;
+ pcdata = runtime·pcdatavalue(f, PCDATA_StackMapIndex, pc);
+ if(pcdata == -1) {
+ // We do not have a valid pcdata value but there might be a
+ // stackmap for this function. It is likely that we are looking
+ // at the function prologue, assume so and hope for the best.
+ pcdata = 0;
+ }
+ stackmap = runtime·funcdata(f, FUNCDATA_LocalsPointerMaps);
+
+ // Dump any types we will need to resolve Efaces.
+ if(child->args.n >= 0)
+ dumpbvtypes(&child->args, (byte*)s->sp + child->argoff);
+ if(stackmap != nil && stackmap->n > 0) {
+ bv = runtime·stackmapdata(stackmap, pcdata);
+ dumpbvtypes(&bv, s->varp - bv.n / BitsPerPointer * PtrSize);
+ } else {
+ bv.n = -1;
+ }
+
+ // Dump main body of stack frame.
+ dumpint(TagStackFrame);
+ dumpint(s->sp); // lowest address in frame
+ dumpint(child->depth); // # of frames deep on the stack
+ dumpint((uintptr)child->sp); // sp of child, or 0 if bottom of stack
+ dumpmemrange((byte*)s->sp, s->fp - s->sp); // frame contents
+ dumpint(f->entry);
+ dumpint(s->pc);
+ dumpint(s->continpc);
+ name = runtime·funcname(f);
+ if(name == nil)
+ name = "unknown function";
+ dumpcstr(name);
+
+ // Dump fields in the outargs section
+ if(child->args.n >= 0) {
+ dumpbv(&child->args, child->argoff);
+ } else {
+ // conservative - everything might be a pointer
+ for(off = child->argoff; off < child->argoff + child->arglen; off += PtrSize) {
+ dumpint(FieldKindPtr);
+ dumpint(off);
+ }
+ }
+
+ // Dump fields in the local vars section
+ if(stackmap == nil) {
+ // No locals information, dump everything.
+ for(off = child->arglen; off < s->varp - (byte*)s->sp; off += PtrSize) {
+ dumpint(FieldKindPtr);
+ dumpint(off);
+ }
+ } else if(stackmap->n < 0) {
+ // Locals size information, dump just the locals.
+ size = -stackmap->n;
+ for(off = s->varp - size - (byte*)s->sp; off < s->varp - (byte*)s->sp; off += PtrSize) {
+ dumpint(FieldKindPtr);
+ dumpint(off);
+ }
+ } else if(stackmap->n > 0) {
+ // Locals bitmap information, scan just the pointers in
+ // locals.
+ dumpbv(&bv, s->varp - bv.n / BitsPerPointer * PtrSize - (byte*)s->sp);
+ }
+ dumpint(FieldKindEol);
+
+ // Record arg info for parent.
+ child->argoff = s->argp - (byte*)s->fp;
+ child->arglen = s->arglen;
+ child->sp = (byte*)s->sp;
+ child->depth++;
+ stackmap = runtime·funcdata(f, FUNCDATA_ArgsPointerMaps);
+ if(stackmap != nil)
+ child->args = runtime·stackmapdata(stackmap, pcdata);
+ else
+ child->args.n = -1;
+ return true;
+}
+
+static void
+dumpgoroutine(G *gp)
+{
+ uintptr sp, pc, lr;
+ ChildInfo child;
+ Defer *d;
+ Panic *p;
+
+ if(gp->syscallstack != (uintptr)nil) {
+ sp = gp->syscallsp;
+ pc = gp->syscallpc;
+ lr = 0;
+ } else {
+ sp = gp->sched.sp;
+ pc = gp->sched.pc;
+ lr = gp->sched.lr;
+ }
+
+ dumpint(TagGoRoutine);
+ dumpint((uintptr)gp);
+ dumpint((uintptr)sp);
+ dumpint(gp->goid);
+ dumpint(gp->gopc);
+ dumpint(gp->status);
+ dumpbool(gp->issystem);
+ dumpbool(gp->isbackground);
+ dumpint(gp->waitsince);
+ dumpcstr(gp->waitreason);
+ dumpint((uintptr)gp->sched.ctxt);
+ dumpint((uintptr)gp->m);
+ dumpint((uintptr)gp->defer);
+ dumpint((uintptr)gp->panic);
+
+ // dump stack
+ child.args.n = -1;
+ child.arglen = 0;
+ child.sp = nil;
+ child.depth = 0;
+ if(!ScanStackByFrames)
+ runtime·throw("need frame info to dump stacks");
+ runtime·gentraceback(pc, sp, lr, gp, 0, nil, 0x7fffffff, dumpframe, &child, false);
+
+ // dump defer & panic records
+ for(d = gp->defer; d != nil; d = d->link) {
+ dumpint(TagDefer);
+ dumpint((uintptr)d);
+ dumpint((uintptr)gp);
+ dumpint((uintptr)d->argp);
+ dumpint((uintptr)d->pc);
+ dumpint((uintptr)d->fn);
+ dumpint((uintptr)d->fn->fn);
+ dumpint((uintptr)d->link);
+ }
+ for (p = gp->panic; p != nil; p = p->link) {
+ dumpint(TagPanic);
+ dumpint((uintptr)p);
+ dumpint((uintptr)gp);
+ dumpint((uintptr)p->arg.type);
+ dumpint((uintptr)p->arg.data);
+ dumpint((uintptr)p->defer);
+ dumpint((uintptr)p->link);
+ }
+}
+
+static void
+dumpgs(void)
+{
+ G *gp;
+ uint32 i;
+
+ // goroutines & stacks
+ for(i = 0; i < runtime·allglen; i++) {
+ gp = runtime·allg[i];
+ switch(gp->status){
+ default:
+ runtime·printf("unexpected G.status %d\n", gp->status);
+ runtime·throw("mark - bad status");
+ case Gdead:
+ break;
+ case Grunnable:
+ case Gsyscall:
+ case Gwaiting:
+ dumpgoroutine(gp);
+ break;
+ }
+ }
+}
+
+static void
+finq_callback(FuncVal *fn, byte *obj, uintptr nret, Type *fint, PtrType *ot)
+{
+ dumpint(TagQueuedFinalizer);
+ dumpint((uintptr)obj);
+ dumpint((uintptr)fn);
+ dumpint((uintptr)fn->fn);
+ dumpint((uintptr)fint);
+ dumpint((uintptr)ot);
+ USED(&nret);
+}
+
+
+static void
+dumproots(void)
+{
+ MSpan *s, **allspans;
+ uint32 spanidx;
+ Special *sp;
+ SpecialFinalizer *spf;
+ byte *p;
+
+ // data segment
+ dumpint(TagData);
+ dumpint((uintptr)data);
+ dumpmemrange(data, edata - data);
+ dumpfields((uintptr*)gcdata + 1);
+
+ // bss segment
+ dumpint(TagBss);
+ dumpint((uintptr)bss);
+ dumpmemrange(bss, ebss - bss);
+ dumpfields((uintptr*)gcbss + 1);
+
+ // MSpan.types
+ allspans = runtime·mheap.allspans;
+ for(spanidx=0; spanidx<runtime·mheap.nspan; spanidx++) {
+ s = allspans[spanidx];
+ if(s->state == MSpanInUse) {
+ // The garbage collector ignores type pointers stored in MSpan.types:
+ // - Compiler-generated types are stored outside of heap.
+ // - The reflect package has runtime-generated types cached in its data structures.
+ // The garbage collector relies on finding the references via that cache.
+ switch(s->types.compression) {
+ case MTypes_Empty:
+ case MTypes_Single:
+ break;
+ case MTypes_Words:
+ case MTypes_Bytes:
+ dumpotherroot("runtime type info", (byte*)s->types.data);
+ break;
+ }
+
+ // Finalizers
+ for(sp = s->specials; sp != nil; sp = sp->next) {
+ if(sp->kind != KindSpecialFinalizer)
+ continue;
+ spf = (SpecialFinalizer*)sp;
+ p = (byte*)((s->start << PageShift) + spf->offset);
+ dumpfinalizer(p, spf->fn, spf->fint, spf->ot);
+ }
+ }
+ }
+
+ // Finalizer queue
+ runtime·iterate_finq(finq_callback);
+}
+
+// Bit vector of free marks.
+// Needs to be as big as the largest number of objects per span.
+static byte free[PageSize/8];
+
+static void
+dumpobjs(void)
+{
+ uintptr i, j, size, n, off, shift, *bitp, bits, ti, kind;
+ MSpan *s;
+ MLink *l;
+ byte *p;
+ Type *t;
+
+ for(i = 0; i < runtime·mheap.nspan; i++) {
+ s = runtime·mheap.allspans[i];
+ if(s->state != MSpanInUse)
+ continue;
+ p = (byte*)(s->start << PageShift);
+ size = s->elemsize;
+ n = (s->npages << PageShift) / size;
+ if(n > PageSize/8)
+ runtime·throw("free array doesn't have enough entries");
+ for(l = s->freelist; l != nil; l = l->next) {
+ free[((byte*)l - p) / size] = true;
+ }
+ for(j = 0; j < n; j++, p += size) {
+ if(free[j]) {
+ free[j] = false;
+ continue;
+ }
+ off = (uintptr*)p - (uintptr*)runtime·mheap.arena_start;
+ bitp = (uintptr*)runtime·mheap.arena_start - off/wordsPerBitmapWord - 1;
+ shift = off % wordsPerBitmapWord;
+ bits = *bitp >> shift;
+
+ // Skip FlagNoGC allocations (stacks)
+ if((bits & bitAllocated) == 0)
+ continue;
+
+ // extract type and kind
+ ti = runtime·gettype(p);
+ t = (Type*)(ti & ~(uintptr)(PtrSize-1));
+ kind = ti & (PtrSize-1);
+
+ // dump it
+ if(kind == TypeInfo_Chan)
+ t = ((ChanType*)t)->elem; // use element type for chan encoding
+ if(t == nil && scannable(p))
+ kind = TypeInfo_Conservative; // special kind for conservatively scanned objects
+ dumpobj(p, size, t, kind);
+ }
+ }
+}
+
+static void
+dumpparams(void)
+{
+ byte *x;
+
+ dumpint(TagParams);
+ x = (byte*)1;
+ if(*(byte*)&x == 1)
+ dumpbool(false); // little-endian ptrs
+ else
+ dumpbool(true); // big-endian ptrs
+ dumpint(PtrSize);
+ dumpint(runtime·Hchansize);
+ dumpint((uintptr)runtime·mheap.arena_start);
+ dumpint((uintptr)runtime·mheap.arena_used);
+ dumpint(thechar);
+ dumpcstr(GOEXPERIMENT);
+ dumpint(runtime·ncpu);
+}
+
+static void
+itab_callback(Itab *tab)
+{
+ Type *t;
+
+ dumpint(TagItab);
+ dumpint((uintptr)tab);
+ t = tab->type;
+ dumpbool(t->size > PtrSize || (t->kind & KindNoPointers) == 0);
+}
+
+static void
+dumpitabs(void)
+{
+ runtime·iterate_itabs(itab_callback);
+}
+
+static void
+dumpms(void)
+{
+ M *mp;
+
+ for(mp = runtime·allm; mp != nil; mp = mp->alllink) {
+ dumpint(TagOSThread);
+ dumpint((uintptr)mp);
+ dumpint(mp->id);
+ dumpint(mp->procid);
+ }
+}
+
+static void
+dumpmemstats(void)
+{
+ int32 i;
+
+ dumpint(TagMemStats);
+ dumpint(mstats.alloc);
+ dumpint(mstats.total_alloc);
+ dumpint(mstats.sys);
+ dumpint(mstats.nlookup);
+ dumpint(mstats.nmalloc);
+ dumpint(mstats.nfree);
+ dumpint(mstats.heap_alloc);
+ dumpint(mstats.heap_sys);
+ dumpint(mstats.heap_idle);
+ dumpint(mstats.heap_inuse);
+ dumpint(mstats.heap_released);
+ dumpint(mstats.heap_objects);
+ dumpint(mstats.stacks_inuse);
+ dumpint(mstats.stacks_sys);
+ dumpint(mstats.mspan_inuse);
+ dumpint(mstats.mspan_sys);
+ dumpint(mstats.mcache_inuse);
+ dumpint(mstats.mcache_sys);
+ dumpint(mstats.buckhash_sys);
+ dumpint(mstats.gc_sys);
+ dumpint(mstats.other_sys);
+ dumpint(mstats.next_gc);
+ dumpint(mstats.last_gc);
+ dumpint(mstats.pause_total_ns);
+ for(i = 0; i < 256; i++)
+ dumpint(mstats.pause_ns[i]);
+ dumpint(mstats.numgc);
+}
+
+static void
+dumpmemprof_callback(Bucket *b, uintptr nstk, uintptr *stk, uintptr size, uintptr allocs, uintptr frees)
+{
+ uintptr i, pc;
+ Func *f;
+ byte buf[20];
+ String file;
+ int32 line;
+
+ dumpint(TagMemProf);
+ dumpint((uintptr)b);
+ dumpint(size);
+ dumpint(nstk);
+ for(i = 0; i < nstk; i++) {
+ pc = stk[i];
+ f = runtime·findfunc(pc);
+ if(f == nil) {
+ runtime·snprintf(buf, sizeof(buf), "%X", (uint64)pc);
+ dumpcstr((int8*)buf);
+ dumpcstr("?");
+ dumpint(0);
+ } else {
+ dumpcstr(runtime·funcname(f));
+ // TODO: Why do we need to back up to a call instruction here?
+ // Maybe profiler should do this.
+ if(i > 0 && pc > f->entry) {
+ if(thechar == '6' || thechar == '8')
+ pc--;
+ else
+ pc -= 4; // arm, etc
+ }
+ line = runtime·funcline(f, pc, &file);
+ dumpstr(file);
+ dumpint(line);
+ }
+ }
+ dumpint(allocs);
+ dumpint(frees);
+}
+
+static void
+dumpmemprof(void)
+{
+ MSpan *s, **allspans;
+ uint32 spanidx;
+ Special *sp;
+ SpecialProfile *spp;
+ byte *p;
+
+ runtime·iterate_memprof(dumpmemprof_callback);
+
+ allspans = runtime·mheap.allspans;
+ for(spanidx=0; spanidx<runtime·mheap.nspan; spanidx++) {
+ s = allspans[spanidx];
+ if(s->state != MSpanInUse)
+ continue;
+ for(sp = s->specials; sp != nil; sp = sp->next) {
+ if(sp->kind != KindSpecialProfile)
+ continue;
+ spp = (SpecialProfile*)sp;
+ p = (byte*)((s->start << PageShift) + spp->offset);
+ dumpint(TagAllocSample);
+ dumpint((uintptr)p);
+ dumpint((uintptr)spp->b);
+ }
+ }
+}
+
+static void
+mdump(G *gp)
+{
+ byte *hdr;
+ uintptr i;
+ MSpan *s;
+
+ // make sure we're done sweeping
+ for(i = 0; i < runtime·mheap.nspan; i++) {
+ s = runtime·mheap.allspans[i];
+ if(s->state == MSpanInUse)
+ runtime·MSpan_EnsureSwept(s);
+ }
+
+ runtime·memclr((byte*)&typecache[0], sizeof(typecache));
+ hdr = (byte*)"go1.3 heap dump\n";
+ write(hdr, runtime·findnull(hdr));
+ dumpparams();
+ dumpitabs();
+ dumpobjs();
+ dumpgs();
+ dumpms();
+ dumproots();
+ dumpmemstats();
+ dumpmemprof();
+ dumpint(TagEOF);
+ flush();
+
+ gp->param = nil;
+ gp->status = Grunning;
+ runtime·gogo(&gp->sched);
+}
+
+void
+runtime∕debug·WriteHeapDump(uintptr fd)
+{
+ // Stop the world.
+ runtime·semacquire(&runtime·worldsema, false);
+ m->gcing = 1;
+ m->locks++;
+ runtime·stoptheworld();
+
+ // Update stats so we can dump them.
+ // As a side effect, flushes all the MCaches so the MSpan.freelist
+ // lists contain all the free objects.
+ runtime·updatememstats(nil);
+
+ // Set dump file.
+ dumpfd = fd;
+
+ // Call dump routine on M stack.
+ g->status = Gwaiting;
+ g->waitreason = "dumping heap";
+ runtime·mcall(mdump);
+
+ // Reset dump file.
+ dumpfd = 0;
+
+ // Start up the world again.
+ m->gcing = 0;
+ runtime·semrelease(&runtime·worldsema);
+ runtime·starttheworld();
+ m->locks--;
+}
+
+// Runs the specified gc program. Calls the callback for every
+// pointer-like field specified by the program and passes to the
+// callback the kind and offset of that field within the object.
+// offset is the offset in the object of the start of the program.
+// Returns a pointer to the opcode that ended the gc program (either
+// GC_END or GC_ARRAY_NEXT).
+static uintptr*
+playgcprog(uintptr offset, uintptr *prog, void (*callback)(void*,uintptr,uintptr), void *arg)
+{
+ uintptr len, elemsize, i, *end;
+
+ for(;;) {
+ switch(prog[0]) {
+ case GC_END:
+ return prog;
+ case GC_PTR:
+ callback(arg, FieldKindPtr, offset + prog[1]);
+ prog += 3;
+ break;
+ case GC_APTR:
+ callback(arg, FieldKindPtr, offset + prog[1]);
+ prog += 2;
+ break;
+ case GC_ARRAY_START:
+ len = prog[2];
+ elemsize = prog[3];
+ end = nil;
+ for(i = 0; i < len; i++) {
+ end = playgcprog(offset + prog[1] + i * elemsize, prog + 4, callback, arg);
+ if(end[0] != GC_ARRAY_NEXT)
+ runtime·throw("GC_ARRAY_START did not have matching GC_ARRAY_NEXT");
+ }
+ prog = end + 1;
+ break;
+ case GC_ARRAY_NEXT:
+ return prog;
+ case GC_CALL:
+ playgcprog(offset + prog[1], (uintptr*)((byte*)prog + *(int32*)&prog[2]), callback, arg);
+ prog += 3;
+ break;
+ case GC_CHAN_PTR:
+ callback(arg, FieldKindPtr, offset + prog[1]);
+ prog += 3;
+ break;
+ case GC_STRING:
+ callback(arg, FieldKindString, offset + prog[1]);
+ prog += 2;
+ break;
+ case GC_EFACE:
+ callback(arg, FieldKindEface, offset + prog[1]);
+ prog += 2;
+ break;
+ case GC_IFACE:
+ callback(arg, FieldKindIface, offset + prog[1]);
+ prog += 2;
+ break;
+ case GC_SLICE:
+ callback(arg, FieldKindSlice, offset + prog[1]);
+ prog += 3;
+ break;
+ case GC_REGION:
+ playgcprog(offset + prog[1], (uintptr*)prog[3] + 1, callback, arg);
+ prog += 4;
+ break;
+ default:
+ runtime·printf("%D\n", (uint64)prog[0]);
+ runtime·throw("bad gc op");
+ }
+ }
+}
+
+static void
+dump_callback(void *p, uintptr kind, uintptr offset)
+{
+ USED(&p);
+ dumpint(kind);
+ dumpint(offset);
+}
+
+// dumpint() the kind & offset of each field in an object.
+static void
+dumpfields(uintptr *prog)
+{
+ playgcprog(0, prog, dump_callback, nil);
+ dumpint(FieldKindEol);
+}
+
+static void
+dumpeface_callback(void *p, uintptr kind, uintptr offset)
+{
+ Eface *e;
+
+ if(kind != FieldKindEface)
+ return;
+ e = (Eface*)((byte*)p + offset);
+ dumptype(e->type);
+}
+
+// The heap dump reader needs to be able to disambiguate
+// Eface entries. So it needs to know every type that might
+// appear in such an entry. The following two routines accomplish
+// that.
+
+// Dump all the types that appear in the type field of
+// any Eface contained in obj.
+static void
+dumpefacetypes(void *obj, uintptr size, Type *type, uintptr kind)
+{
+ uintptr i;
+
+ switch(kind) {
+ case TypeInfo_SingleObject:
+ playgcprog(0, (uintptr*)type->gc + 1, dumpeface_callback, obj);
+ break;
+ case TypeInfo_Array:
+ for(i = 0; i <= size - type->size; i += type->size)
+ playgcprog(i, (uintptr*)type->gc + 1, dumpeface_callback, obj);
+ break;
+ case TypeInfo_Chan:
+ if(type->size == 0) // channels may have zero-sized objects in them
+ break;
+ for(i = runtime·Hchansize; i <= size - type->size; i += type->size)
+ playgcprog(i, (uintptr*)type->gc + 1, dumpeface_callback, obj);
+ break;
+ }
+}
+
+// Dump all the types that appear in the type field of
+// any Eface described by this bit vector.
+static void
+dumpbvtypes(BitVector *bv, byte *base)
+{
+ uintptr i;
+
+ for(i = 0; i < bv->n; i += BitsPerPointer) {
+ if((bv->data[i/32] >> i%32 & 3) != BitsMultiWord)
+ continue;
+ switch(bv->data[(i+BitsPerPointer)/32] >> (i+BitsPerPointer)%32 & 3) {
+ case BitsString:
+ case BitsIface:
+ i += BitsPerPointer;
+ break;
+ case BitsSlice:
+ i += 2 * BitsPerPointer;
+ break;
+ case BitsEface:
+ dumptype(*(Type**)(base + i / BitsPerPointer * PtrSize));
+ i += BitsPerPointer;
+ break;
+ }
+ }
+}
diff --git a/src/pkg/runtime/iface.c b/src/pkg/runtime/iface.goc
index ecbdcc707..c0a17e303 100644
--- a/src/pkg/runtime/iface.c
+++ b/src/pkg/runtime/iface.goc
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+package runtime
#include "runtime.h"
#include "arch_GOARCH.h"
#include "type.h"
@@ -9,15 +10,11 @@
#include "malloc.h"
#include "../../cmd/ld/textflag.h"
-void
-runtime·printiface(Iface i)
-{
+func printiface(i Iface) {
runtime·printf("(%p,%p)", i.tab, i.data);
}
-void
-runtime·printeface(Eface e)
-{
+func printeface(e Eface) {
runtime·printf("(%p,%p)", e.type, e.data);
}
@@ -137,6 +134,20 @@ out:
return m;
}
+// call the callback for every itab that is currently allocated.
+void
+runtime·iterate_itabs(void (*callback)(Itab*))
+{
+ int32 i;
+ Itab *tab;
+
+ for(i = 0; i < nelem(hash); i++) {
+ for(tab = hash[i]; tab != nil; tab = tab->link) {
+ callback(tab);
+ }
+ }
+}
+
static void
copyin(Type *t, void *src, void **dst)
{
@@ -150,7 +161,7 @@ copyin(Type *t, void *src, void **dst)
if(size <= sizeof(*dst))
alg->copy(size, dst, src);
else {
- p = runtime·mal(size);
+ p = runtime·cnew(t);
alg->copy(size, p, src);
*dst = p;
}
@@ -172,66 +183,45 @@ copyout(Type *t, void **src, void *dst)
}
#pragma textflag NOSPLIT
-void
-runtime·typ2Itab(Type *t, InterfaceType *inter, Itab **cache, Itab *ret)
-{
- Itab *tab;
-
+func typ2Itab(t *Type, inter *InterfaceType, cache **Itab) (tab *Itab) {
tab = itab(inter, t, 0);
runtime·atomicstorep(cache, tab);
- ret = tab;
- FLUSH(&ret);
}
-// func convT2I(typ *byte, typ2 *byte, cache **byte, elem any) (ret any)
#pragma textflag NOSPLIT
-void
-runtime·convT2I(Type *t, InterfaceType *inter, Itab **cache, ...)
-{
- byte *elem;
- Iface *ret;
+func convT2I(t *Type, inter *InterfaceType, cache **Itab, elem *byte) (ret Iface) {
Itab *tab;
- int32 wid;
- elem = (byte*)(&cache+1);
- wid = t->size;
- ret = (Iface*)(elem + ROUND(wid, Structrnd));
tab = runtime·atomicloadp(cache);
if(!tab) {
tab = itab(inter, t, 0);
runtime·atomicstorep(cache, tab);
}
- ret->tab = tab;
- copyin(t, elem, &ret->data);
+ ret.tab = tab;
+ copyin(t, elem, &ret.data);
}
-// func convT2E(typ *byte, elem any) (ret any)
#pragma textflag NOSPLIT
-void
-runtime·convT2E(Type *t, ...)
-{
- byte *elem;
- Eface *ret;
- int32 wid;
-
- elem = (byte*)(&t+1);
- wid = t->size;
- ret = (Eface*)(elem + ROUND(wid, Structrnd));
- ret->type = t;
- copyin(t, elem, &ret->data);
+func convT2E(t *Type, elem *byte) (ret Eface) {
+ ret.type = t;
+ copyin(t, elem, &ret.data);
}
static void assertI2Tret(Type *t, Iface i, byte *ret);
-// func ifaceI2T(typ *byte, iface any) (ret any)
+/*
+ * NOTE: Cannot use 'func' here, because we have to declare
+ * a return value, the only types we have are at least 1 byte large,
+ * goc2c will zero the return value, and the actual return value
+ * might have size 0 bytes, in which case the zeroing of the
+ * 1 or more bytes would be wrong.
+ * Using C lets us control (avoid) the initial zeroing.
+ */
#pragma textflag NOSPLIT
void
-runtime·assertI2T(Type *t, Iface i, ...)
+runtime·assertI2T(Type *t, Iface i, GoOutput retbase)
{
- byte *ret;
-
- ret = (byte*)(&i+1);
- assertI2Tret(t, i, ret);
+ assertI2Tret(t, i, (byte*)&retbase);
}
static void
@@ -256,47 +246,38 @@ assertI2Tret(Type *t, Iface i, byte *ret)
copyout(t, &i.data, ret);
}
-// func ifaceI2T2(typ *byte, iface any) (ret any, ok bool)
#pragma textflag NOSPLIT
-void
-runtime·assertI2T2(Type *t, Iface i, ...)
-{
- byte *ret;
+func assertI2T2(t *Type, i Iface) (ret byte, ...) {
bool *ok;
int32 wid;
- ret = (byte*)(&i+1);
wid = t->size;
- ok = (bool*)(ret + wid);
+ ok = (bool*)(&ret + wid);
if(i.tab == nil || i.tab->type != t) {
*ok = false;
- runtime·memclr(ret, wid);
+ runtime·memclr(&ret, wid);
return;
}
*ok = true;
- copyout(t, &i.data, ret);
+ copyout(t, &i.data, &ret);
}
-void
-runtime·assertI2TOK(Type *t, Iface i, bool ok)
-{
+func assertI2TOK(t *Type, i Iface) (ok bool) {
ok = i.tab!=nil && i.tab->type==t;
- FLUSH(&ok);
}
static void assertE2Tret(Type *t, Eface e, byte *ret);
-// func ifaceE2T(typ *byte, iface any) (ret any)
+/*
+ * NOTE: Cannot use 'func' here. See assertI2T above.
+ */
#pragma textflag NOSPLIT
void
-runtime·assertE2T(Type *t, Eface e, ...)
+runtime·assertE2T(Type *t, Eface e, GoOutput retbase)
{
- byte *ret;
-
- ret = (byte*)(&e+1);
- assertE2Tret(t, e, ret);
+ assertE2Tret(t, e, (byte*)&retbase);
}
static void
@@ -319,40 +300,29 @@ assertE2Tret(Type *t, Eface e, byte *ret)
copyout(t, &e.data, ret);
}
-// func ifaceE2T2(sigt *byte, iface any) (ret any, ok bool);
#pragma textflag NOSPLIT
-void
-runtime·assertE2T2(Type *t, Eface e, ...)
-{
- byte *ret;
+func assertE2T2(t *Type, e Eface) (ret byte, ...) {
bool *ok;
int32 wid;
- ret = (byte*)(&e+1);
wid = t->size;
- ok = (bool*)(ret + wid);
+ ok = (bool*)(&ret + wid);
if(t != e.type) {
*ok = false;
- runtime·memclr(ret, wid);
+ runtime·memclr(&ret, wid);
return;
}
*ok = true;
- copyout(t, &e.data, ret);
+ copyout(t, &e.data, &ret);
}
-void
-runtime·assertE2TOK(Type *t, Eface e, bool ok)
-{
+func assertE2TOK(t *Type, e Eface) (ok bool) {
ok = t==e.type;
- FLUSH(&ok);
}
-// func convI2E(elem any) (ret any)
-void
-runtime·convI2E(Iface i, Eface ret)
-{
+func convI2E(i Iface) (ret Eface) {
Itab *tab;
ret.data = i.data;
@@ -360,13 +330,9 @@ runtime·convI2E(Iface i, Eface ret)
ret.type = nil;
else
ret.type = tab->type;
- FLUSH(&ret);
}
-// func ifaceI2E(typ *byte, iface any) (ret any)
-void
-runtime·assertI2E(InterfaceType* inter, Iface i, Eface ret)
-{
+func assertI2E(inter *InterfaceType, i Iface) (ret Eface) {
Itab *tab;
Eface err;
@@ -380,13 +346,9 @@ runtime·assertI2E(InterfaceType* inter, Iface i, Eface ret)
}
ret.data = i.data;
ret.type = tab->type;
- FLUSH(&ret);
}
-// func ifaceI2E2(typ *byte, iface any) (ret any, ok bool)
-void
-runtime·assertI2E2(InterfaceType* inter, Iface i, Eface ret, bool ok)
-{
+func assertI2E2(inter *InterfaceType, i Iface) (ret Eface, ok bool) {
Itab *tab;
USED(inter);
@@ -399,14 +361,9 @@ runtime·assertI2E2(InterfaceType* inter, Iface i, Eface ret, bool ok)
ok = 1;
}
ret.data = i.data;
- FLUSH(&ret);
- FLUSH(&ok);
}
-// func convI2I(typ *byte, elem any) (ret any)
-void
-runtime·convI2I(InterfaceType* inter, Iface i, Iface ret)
-{
+func convI2I(inter *InterfaceType, i Iface) (ret Iface) {
Itab *tab;
ret.data = i.data;
@@ -416,7 +373,6 @@ runtime·convI2I(InterfaceType* inter, Iface i, Iface ret)
ret.tab = tab;
else
ret.tab = itab(inter, tab->type, 0);
- FLUSH(&ret);
}
void
@@ -437,17 +393,11 @@ runtime·ifaceI2I(InterfaceType *inter, Iface i, Iface *ret)
ret->tab = itab(inter, tab->type, 0);
}
-// func ifaceI2I(sigi *byte, iface any) (ret any)
-void
-runtime·assertI2I(InterfaceType* inter, Iface i, Iface ret)
-{
+func assertI2I(inter *InterfaceType, i Iface) (ret Iface) {
runtime·ifaceI2I(inter, i, &ret);
}
-// func ifaceI2I2(sigi *byte, iface any) (ret any, ok bool)
-void
-runtime·assertI2I2(InterfaceType *inter, Iface i, Iface ret, bool ok)
-{
+func assertI2I2(inter *InterfaceType, i Iface) (ret Iface, ok bool) {
Itab *tab;
tab = i.tab;
@@ -460,8 +410,6 @@ runtime·assertI2I2(InterfaceType *inter, Iface i, Iface ret, bool ok)
ret.tab = 0;
ok = 0;
}
- FLUSH(&ret);
- FLUSH(&ok);
}
void
@@ -492,25 +440,15 @@ runtime·ifaceE2I2(InterfaceType *inter, Eface e, Iface *ret)
return true;
}
-// For reflect
-// func ifaceE2I(t *InterfaceType, e interface{}, dst *Iface)
-void
-reflect·ifaceE2I(InterfaceType *inter, Eface e, Iface *dst)
-{
+func reflect·ifaceE2I(inter *InterfaceType, e Eface, dst *Iface) {
runtime·ifaceE2I(inter, e, dst);
}
-// func ifaceE2I(sigi *byte, iface any) (ret any)
-void
-runtime·assertE2I(InterfaceType* inter, Eface e, Iface ret)
-{
+func assertE2I(inter *InterfaceType, e Eface) (ret Iface) {
runtime·ifaceE2I(inter, e, &ret);
}
-// ifaceE2I2(sigi *byte, iface any) (ret any, ok bool)
-void
-runtime·assertE2I2(InterfaceType *inter, Eface e, Iface ret, bool ok)
-{
+func assertE2I2(inter *InterfaceType, e Eface) (ret Iface, ok bool) {
if(e.type == nil) {
ok = 0;
ret.data = nil;
@@ -522,14 +460,9 @@ runtime·assertE2I2(InterfaceType *inter, Eface e, Iface ret, bool ok)
ok = 1;
ret.data = e.data;
}
- FLUSH(&ret);
- FLUSH(&ok);
}
-// func ifaceE2E(typ *byte, iface any) (ret any)
-void
-runtime·assertE2E(InterfaceType* inter, Eface e, Eface ret)
-{
+func assertE2E(inter *InterfaceType, e Eface) (ret Eface) {
Type *t;
Eface err;
@@ -542,18 +475,12 @@ runtime·assertE2E(InterfaceType* inter, Eface e, Eface ret)
runtime·panic(err);
}
ret = e;
- FLUSH(&ret);
}
-// func ifaceE2E2(iface any) (ret any, ok bool)
-void
-runtime·assertE2E2(InterfaceType* inter, Eface e, Eface ret, bool ok)
-{
+func assertE2E2(inter *InterfaceType, e Eface) (ret Eface, ok bool) {
USED(inter);
ret = e;
ok = e.type != nil;
- FLUSH(&ret);
- FLUSH(&ok);
}
static uintptr
@@ -641,81 +568,53 @@ runtime·efaceeq_c(Eface e1, Eface e2)
return ifaceeq1(e1.data, e2.data, e1.type);
}
-// ifaceeq(i1 any, i2 any) (ret bool);
-void
-runtime·ifaceeq(Iface i1, Iface i2, bool ret)
-{
+func ifaceeq(i1 Iface, i2 Iface) (ret bool) {
ret = runtime·ifaceeq_c(i1, i2);
- FLUSH(&ret);
}
-// efaceeq(i1 any, i2 any) (ret bool)
-void
-runtime·efaceeq(Eface e1, Eface e2, bool ret)
-{
+func efaceeq(e1 Eface, e2 Eface) (ret bool) {
ret = runtime·efaceeq_c(e1, e2);
- FLUSH(&ret);
}
-// ifacethash(i1 any) (ret uint32);
-void
-runtime·ifacethash(Iface i1, uint32 ret)
-{
+func ifacethash(i1 Iface) (ret uint32) {
Itab *tab;
ret = 0;
tab = i1.tab;
if(tab != nil)
ret = tab->type->hash;
- FLUSH(&ret);
}
-// efacethash(e1 any) (ret uint32)
-void
-runtime·efacethash(Eface e1, uint32 ret)
-{
+func efacethash(e1 Eface) (ret uint32) {
Type *t;
ret = 0;
t = e1.type;
if(t != nil)
ret = t->hash;
- FLUSH(&ret);
}
-void
-reflect·unsafe_Typeof(Eface e, Eface ret)
-{
+func reflect·unsafe_Typeof(e Eface) (ret Eface) {
if(e.type == nil) {
ret.type = nil;
ret.data = nil;
} else {
ret = *(Eface*)(e.type);
}
- FLUSH(&ret);
}
-void
-reflect·unsafe_New(Type *t, void *ret)
-{
+func reflect·unsafe_New(t *Type) (ret *byte) {
ret = runtime·cnew(t);
- FLUSH(&ret);
}
-void
-reflect·unsafe_NewArray(Type *t, intgo n, void *ret)
-{
+func reflect·unsafe_NewArray(t *Type, n int) (ret *byte) {
ret = runtime·cnewarray(t, n);
- FLUSH(&ret);
}
-void
-reflect·typelinks(Slice ret)
-{
+func reflect·typelinks() (ret Slice) {
extern Type *typelink[], *etypelink[];
static int32 first = 1;
ret.array = (byte*)typelink;
ret.len = etypelink - typelink;
ret.cap = ret.len;
- FLUSH(&ret);
}
diff --git a/src/pkg/runtime/lfstack.c b/src/pkg/runtime/lfstack.goc
index 140384d3d..f7b8effa0 100644
--- a/src/pkg/runtime/lfstack.c
+++ b/src/pkg/runtime/lfstack.goc
@@ -4,6 +4,7 @@
// Lock-free stack.
+package runtime
#include "runtime.h"
#include "arch_GOARCH.h"
@@ -17,6 +18,20 @@
#define PTR_MASK ((1ull<<PTR_BITS)-1)
#define CNT_MASK (0ull-1)
+#ifdef _64BIT
+#ifdef GOOS_solaris
+// SPARC64 and Solaris on AMD64 uses all 64 bits of virtual addresses.
+// Use low-order three bits as ABA counter.
+// http://docs.oracle.com/cd/E19120-01/open.solaris/816-5138/6mba6ua5p/index.html
+#undef PTR_BITS
+#undef CNT_MASK
+#undef PTR_MASK
+#define PTR_BITS 0
+#define CNT_MASK 7
+#define PTR_MASK ((0ull-1)<<3)
+#endif
+#endif
+
void
runtime·lfstackpush(uint64 *head, LFNode *node)
{
@@ -57,9 +72,10 @@ runtime·lfstackpop(uint64 *head)
}
}
-void
-runtime·lfstackpop2(uint64 *head, LFNode *node)
-{
+func lfstackpush_go(head *uint64, node *LFNode) {
+ runtime·lfstackpush(head, node);
+}
+
+func lfstackpop_go(head *uint64) (node *LFNode) {
node = runtime·lfstackpop(head);
- FLUSH(&node);
}
diff --git a/src/pkg/runtime/lfstack_test.go b/src/pkg/runtime/lfstack_test.go
index 505aae605..e51877704 100644
--- a/src/pkg/runtime/lfstack_test.go
+++ b/src/pkg/runtime/lfstack_test.go
@@ -71,6 +71,8 @@ func TestLFStack(t *testing.T) {
}
}
+var stress []*MyNode
+
func TestLFStackStress(t *testing.T) {
const K = 100
P := 4 * GOMAXPROCS(-1)
@@ -80,14 +82,15 @@ func TestLFStackStress(t *testing.T) {
}
// Create 2 stacks.
stacks := [2]*uint64{new(uint64), new(uint64)}
- // Need to keep additional referenfces to nodes, the stack is not all that type-safe.
- var nodes []*MyNode
+ // Need to keep additional references to nodes,
+ // the lock-free stack is not type-safe.
+ stress = nil
// Push K elements randomly onto the stacks.
sum := 0
for i := 0; i < K; i++ {
sum += i
node := &MyNode{data: i}
- nodes = append(nodes, node)
+ stress = append(stress, node)
LFStackPush(stacks[i%2], fromMyNode(node))
}
c := make(chan bool, P)
@@ -127,4 +130,7 @@ func TestLFStackStress(t *testing.T) {
if sum2 != sum {
t.Fatalf("Wrong sum %d/%d", sum2, sum)
}
+
+ // Let nodes be collected now.
+ stress = nil
}
diff --git a/src/pkg/runtime/lock_futex.c b/src/pkg/runtime/lock_futex.c
index e6e9be923..c16ac905d 100644
--- a/src/pkg/runtime/lock_futex.c
+++ b/src/pkg/runtime/lock_futex.c
@@ -130,8 +130,11 @@ runtime·notesleep(Note *n)
{
if(g != m->g0)
runtime·throw("notesleep not on g0");
- while(runtime·atomicload((uint32*)&n->key) == 0)
+ while(runtime·atomicload((uint32*)&n->key) == 0) {
+ m->blocked = true;
runtime·futexsleep((uint32*)&n->key, 0, -1);
+ m->blocked = false;
+ }
}
#pragma textflag NOSPLIT
@@ -143,8 +146,11 @@ notetsleep(Note *n, int64 ns, int64 deadline, int64 now)
// does not count against our nosplit stack sequence.
if(ns < 0) {
- while(runtime·atomicload((uint32*)&n->key) == 0)
+ while(runtime·atomicload((uint32*)&n->key) == 0) {
+ m->blocked = true;
runtime·futexsleep((uint32*)&n->key, 0, -1);
+ m->blocked = false;
+ }
return true;
}
@@ -153,7 +159,9 @@ notetsleep(Note *n, int64 ns, int64 deadline, int64 now)
deadline = runtime·nanotime() + ns;
for(;;) {
+ m->blocked = true;
runtime·futexsleep((uint32*)&n->key, 0, ns);
+ m->blocked = false;
if(runtime·atomicload((uint32*)&n->key) != 0)
break;
now = runtime·nanotime();
diff --git a/src/pkg/runtime/lock_sema.c b/src/pkg/runtime/lock_sema.c
index 3d58cc87f..ff8fdfd42 100644
--- a/src/pkg/runtime/lock_sema.c
+++ b/src/pkg/runtime/lock_sema.c
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build darwin netbsd openbsd plan9 windows
+// +build darwin nacl netbsd openbsd plan9 solaris windows
#include "runtime.h"
#include "stack.h"
@@ -161,7 +161,9 @@ runtime·notesleep(Note *n)
return;
}
// Queued. Sleep.
+ m->blocked = true;
runtime·semasleep(-1);
+ m->blocked = false;
}
#pragma textflag NOSPLIT
@@ -181,18 +183,23 @@ notetsleep(Note *n, int64 ns, int64 deadline, M *mp)
if(ns < 0) {
// Queued. Sleep.
+ m->blocked = true;
runtime·semasleep(-1);
+ m->blocked = false;
return true;
}
deadline = runtime·nanotime() + ns;
for(;;) {
// Registered. Sleep.
+ m->blocked = true;
if(runtime·semasleep(ns) >= 0) {
+ m->blocked = false;
// Acquired semaphore, semawakeup unregistered us.
// Done.
return true;
}
+ m->blocked = false;
// Interrupted or timed out. Still registered. Semaphore not acquired.
ns = deadline - runtime·nanotime();
@@ -214,8 +221,10 @@ notetsleep(Note *n, int64 ns, int64 deadline, M *mp)
} else if(mp == (M*)LOCKED) {
// Wakeup happened so semaphore is available.
// Grab it to avoid getting out of sync.
+ m->blocked = true;
if(runtime·semasleep(-1) < 0)
runtime·throw("runtime: unable to acquire - semaphore out of sync");
+ m->blocked = false;
return true;
} else
runtime·throw("runtime: unexpected waitm - semaphore out of sync");
diff --git a/src/pkg/runtime/malloc.goc b/src/pkg/runtime/malloc.goc
index c3ede4abd..7b7e350d8 100644
--- a/src/pkg/runtime/malloc.goc
+++ b/src/pkg/runtime/malloc.goc
@@ -19,6 +19,8 @@ package runtime
// Mark mheap as 'no pointers', it does not contain interesting pointers but occupies ~45K.
#pragma dataflag NOPTR
MHeap runtime·mheap;
+#pragma dataflag NOPTR
+MStats mstats;
int32 runtime·checking;
@@ -26,6 +28,10 @@ extern MStats mstats; // defined in zruntime_def_$GOOS_$GOARCH.go
extern volatile intgo runtime·MemProfileRate;
+static MSpan* largealloc(uint32, uintptr*);
+static void profilealloc(void *v, uintptr size);
+static void settype(MSpan *s, void *v, uintptr typ);
+
// Allocate an object of at least size bytes.
// Small objects are allocated from the per-thread cache's free lists.
// Large objects (> 32 kB) are allocated straight from the heap.
@@ -34,12 +40,12 @@ void*
runtime·mallocgc(uintptr size, uintptr typ, uint32 flag)
{
int32 sizeclass;
+ uintptr tinysize, size1;
intgo rate;
MCache *c;
- MCacheList *l;
- uintptr npages;
MSpan *s;
- MLink *v;
+ MLink *v, *next;
+ byte *tiny;
if(size == 0) {
// All 0-length allocations use this pointer.
@@ -49,8 +55,8 @@ runtime·mallocgc(uintptr size, uintptr typ, uint32 flag)
}
if(m->mallocing)
runtime·throw("malloc/free - deadlock");
- // Disable preemption during settype_flush.
- // We can not use m->mallocing for this, because settype_flush calls mallocgc.
+ // Disable preemption during settype.
+ // We can not use m->mallocing for this, because settype calls mallocgc.
m->locks++;
m->mallocing = 1;
@@ -58,7 +64,82 @@ runtime·mallocgc(uintptr size, uintptr typ, uint32 flag)
size += sizeof(uintptr);
c = m->mcache;
- if(size <= MaxSmallSize) {
+ if(!runtime·debug.efence && size <= MaxSmallSize) {
+ if((flag&(FlagNoScan|FlagNoGC)) == FlagNoScan && size < TinySize) {
+ // Tiny allocator.
+ //
+ // Tiny allocator combines several tiny allocation requests
+ // into a single memory block. The resulting memory block
+ // is freed when all subobjects are unreachable. The subobjects
+ // must be FlagNoScan (don't have pointers), this ensures that
+ // the amount of potentially wasted memory is bounded.
+ //
+ // Size of the memory block used for combining (TinySize) is tunable.
+ // Current setting is 16 bytes, which relates to 2x worst case memory
+ // wastage (when all but one subobjects are unreachable).
+ // 8 bytes would result in no wastage at all, but provides less
+ // opportunities for combining.
+ // 32 bytes provides more opportunities for combining,
+ // but can lead to 4x worst case wastage.
+ // The best case winning is 8x regardless of block size.
+ //
+ // Objects obtained from tiny allocator must not be freed explicitly.
+ // So when an object will be freed explicitly, we ensure that
+ // its size >= TinySize.
+ //
+ // SetFinalizer has a special case for objects potentially coming
+ // from tiny allocator, it such case it allows to set finalizers
+ // for an inner byte of a memory block.
+ //
+ // The main targets of tiny allocator are small strings and
+ // standalone escaping variables. On a json benchmark
+ // the allocator reduces number of allocations by ~12% and
+ // reduces heap size by ~20%.
+
+ tinysize = c->tinysize;
+ if(size <= tinysize) {
+ tiny = c->tiny;
+ // Align tiny pointer for required (conservative) alignment.
+ if((size&7) == 0)
+ tiny = (byte*)ROUND((uintptr)tiny, 8);
+ else if((size&3) == 0)
+ tiny = (byte*)ROUND((uintptr)tiny, 4);
+ else if((size&1) == 0)
+ tiny = (byte*)ROUND((uintptr)tiny, 2);
+ size1 = size + (tiny - c->tiny);
+ if(size1 <= tinysize) {
+ // The object fits into existing tiny block.
+ v = (MLink*)tiny;
+ c->tiny += size1;
+ c->tinysize -= size1;
+ m->mallocing = 0;
+ m->locks--;
+ if(m->locks == 0 && g->preempt) // restore the preemption request in case we've cleared it in newstack
+ g->stackguard0 = StackPreempt;
+ return v;
+ }
+ }
+ // Allocate a new TinySize block.
+ s = c->alloc[TinySizeClass];
+ if(s->freelist == nil)
+ s = runtime·MCache_Refill(c, TinySizeClass);
+ v = s->freelist;
+ next = v->next;
+ s->freelist = next;
+ s->ref++;
+ if(next != nil) // prefetching nil leads to a DTLB miss
+ PREFETCH(next);
+ ((uint64*)v)[0] = 0;
+ ((uint64*)v)[1] = 0;
+ // See if we need to replace the existing tiny block with the new one
+ // based on amount of remaining free space.
+ if(TinySize-size > tinysize) {
+ c->tiny = (byte*)v + size;
+ c->tinysize = TinySize - size;
+ }
+ size = TinySize;
+ goto done;
+ }
// Allocate from mcache free lists.
// Inlined version of SizeToClass().
if(size <= 1024-8)
@@ -66,87 +147,117 @@ runtime·mallocgc(uintptr size, uintptr typ, uint32 flag)
else
sizeclass = runtime·size_to_class128[(size-1024+127) >> 7];
size = runtime·class_to_size[sizeclass];
- l = &c->list[sizeclass];
- if(l->list == nil)
- runtime·MCache_Refill(c, sizeclass);
- v = l->list;
- l->list = v->next;
- l->nlist--;
+ s = c->alloc[sizeclass];
+ if(s->freelist == nil)
+ s = runtime·MCache_Refill(c, sizeclass);
+ v = s->freelist;
+ next = v->next;
+ s->freelist = next;
+ s->ref++;
+ if(next != nil) // prefetching nil leads to a DTLB miss
+ PREFETCH(next);
if(!(flag & FlagNoZero)) {
v->next = nil;
// block is zeroed iff second word is zero ...
- if(size > sizeof(uintptr) && ((uintptr*)v)[1] != 0)
+ if(size > 2*sizeof(uintptr) && ((uintptr*)v)[1] != 0)
runtime·memclr((byte*)v, size);
}
+ done:
c->local_cachealloc += size;
} else {
- // TODO(rsc): Report tracebacks for very large allocations.
-
// Allocate directly from heap.
- npages = size >> PageShift;
- if((size & PageMask) != 0)
- npages++;
- s = runtime·MHeap_Alloc(&runtime·mheap, npages, 0, 1, !(flag & FlagNoZero));
- if(s == nil)
- runtime·throw("out of memory");
- s->limit = (byte*)(s->start<<PageShift) + size;
- size = npages<<PageShift;
+ s = largealloc(flag, &size);
v = (void*)(s->start << PageShift);
-
- // setup for mark sweep
- runtime·markspan(v, 0, 0, true);
}
- if(!(flag & FlagNoGC))
- runtime·markallocated(v, size, (flag&FlagNoScan) != 0);
+ if(flag & FlagNoGC)
+ runtime·marknogc(v);
+ else if(!(flag & FlagNoScan))
+ runtime·markscan(v);
if(DebugTypeAtBlockEnd)
*(uintptr*)((uintptr)v+size-sizeof(uintptr)) = typ;
+ m->mallocing = 0;
// TODO: save type even if FlagNoScan? Potentially expensive but might help
// heap profiling/tracing.
- if(UseSpanType && !(flag & FlagNoScan) && typ != 0) {
- uintptr *buf, i;
-
- buf = m->settype_buf;
- i = m->settype_bufsize;
- buf[i++] = (uintptr)v;
- buf[i++] = typ;
- m->settype_bufsize = i;
+ if(UseSpanType && !(flag & FlagNoScan) && typ != 0)
+ settype(s, v, typ);
+
+ if(raceenabled)
+ runtime·racemalloc(v, size);
+
+ if(runtime·debug.allocfreetrace)
+ runtime·tracealloc(v, size, typ);
+
+ if(!(flag & FlagNoProfiling) && (rate = runtime·MemProfileRate) > 0) {
+ if(size < rate && size < c->next_sample)
+ c->next_sample -= size;
+ else
+ profilealloc(v, size);
}
- m->mallocing = 0;
- if(UseSpanType && !(flag & FlagNoScan) && typ != 0 && m->settype_bufsize == nelem(m->settype_buf))
- runtime·settype_flush(m);
m->locks--;
if(m->locks == 0 && g->preempt) // restore the preemption request in case we've cleared it in newstack
g->stackguard0 = StackPreempt;
- if(!(flag & FlagNoProfiling) && (rate = runtime·MemProfileRate) > 0) {
- if(size >= rate)
- goto profile;
- if(m->mcache->next_sample > size)
- m->mcache->next_sample -= size;
- else {
- // pick next profile time
- // If you change this, also change allocmcache.
- if(rate > 0x3fffffff) // make 2*rate not overflow
- rate = 0x3fffffff;
- m->mcache->next_sample = runtime·fastrand1() % (2*rate);
- profile:
- runtime·setblockspecial(v, true);
- runtime·MProf_Malloc(v, size);
- }
- }
-
if(!(flag & FlagNoInvokeGC) && mstats.heap_alloc >= mstats.next_gc)
runtime·gc(0);
- if(raceenabled)
- runtime·racemalloc(v, size);
return v;
}
+static MSpan*
+largealloc(uint32 flag, uintptr *sizep)
+{
+ uintptr npages, size;
+ MSpan *s;
+ void *v;
+
+ // Allocate directly from heap.
+ size = *sizep;
+ if(size + PageSize < size)
+ runtime·throw("out of memory");
+ npages = size >> PageShift;
+ if((size & PageMask) != 0)
+ npages++;
+ s = runtime·MHeap_Alloc(&runtime·mheap, npages, 0, 1, !(flag & FlagNoZero));
+ if(s == nil)
+ runtime·throw("out of memory");
+ s->limit = (byte*)(s->start<<PageShift) + size;
+ *sizep = npages<<PageShift;
+ v = (void*)(s->start << PageShift);
+ // setup for mark sweep
+ runtime·markspan(v, 0, 0, true);
+ return s;
+}
+
+static void
+profilealloc(void *v, uintptr size)
+{
+ uintptr rate;
+ int32 next;
+ MCache *c;
+
+ c = m->mcache;
+ rate = runtime·MemProfileRate;
+ if(size < rate) {
+ // pick next profile time
+ // If you change this, also change allocmcache.
+ if(rate > 0x3fffffff) // make 2*rate not overflow
+ rate = 0x3fffffff;
+ next = runtime·fastrand1() % (2*rate);
+ // Subtract the "remainder" of the current allocation.
+ // Otherwise objects that are close in size to sampling rate
+ // will be under-sampled, because we consistently discard this remainder.
+ next -= (size - c->next_sample);
+ if(next < 0)
+ next = 0;
+ c->next_sample = next;
+ }
+ runtime·MProf_Malloc(v, size);
+}
+
void*
runtime·malloc(uintptr size)
{
@@ -160,7 +271,6 @@ runtime·free(void *v)
int32 sizeclass;
MSpan *s;
MCache *c;
- uint32 prof;
uintptr size;
if(v == nil)
@@ -177,39 +287,73 @@ runtime·free(void *v)
runtime·printf("free %p: not an allocated block\n", v);
runtime·throw("free runtime·mlookup");
}
- prof = runtime·blockspecial(v);
+ size = s->elemsize;
+ sizeclass = s->sizeclass;
+ // Objects that are smaller than TinySize can be allocated using tiny alloc,
+ // if then such object is combined with an object with finalizer, we will crash.
+ if(size < TinySize)
+ runtime·throw("freeing too small block");
- if(raceenabled)
- runtime·racefree(v);
+ if(runtime·debug.allocfreetrace)
+ runtime·tracefree(v, size);
+
+ // Ensure that the span is swept.
+ // If we free into an unswept span, we will corrupt GC bitmaps.
+ runtime·MSpan_EnsureSwept(s);
+
+ if(s->specials != nil)
+ runtime·freeallspecials(s, v, size);
- // Find size class for v.
- sizeclass = s->sizeclass;
c = m->mcache;
if(sizeclass == 0) {
// Large object.
- size = s->npages<<PageShift;
- *(uintptr*)(s->start<<PageShift) = (uintptr)0xfeedfeedfeedfeedll; // mark as "needs to be zeroed"
+ s->needzero = 1;
// Must mark v freed before calling unmarkspan and MHeap_Free:
// they might coalesce v into other spans and change the bitmap further.
- runtime·markfreed(v, size);
+ runtime·markfreed(v);
runtime·unmarkspan(v, 1<<PageShift);
- runtime·MHeap_Free(&runtime·mheap, s, 1);
+ // NOTE(rsc,dvyukov): The original implementation of efence
+ // in CL 22060046 used SysFree instead of SysFault, so that
+ // the operating system would eventually give the memory
+ // back to us again, so that an efence program could run
+ // longer without running out of memory. Unfortunately,
+ // calling SysFree here without any kind of adjustment of the
+ // heap data structures means that when the memory does
+ // come back to us, we have the wrong metadata for it, either in
+ // the MSpan structures or in the garbage collection bitmap.
+ // Using SysFault here means that the program will run out of
+ // memory fairly quickly in efence mode, but at least it won't
+ // have mysterious crashes due to confused memory reuse.
+ // It should be possible to switch back to SysFree if we also
+ // implement and then call some kind of MHeap_DeleteSpan.
+ if(runtime·debug.efence)
+ runtime·SysFault((void*)(s->start<<PageShift), size);
+ else
+ runtime·MHeap_Free(&runtime·mheap, s, 1);
c->local_nlargefree++;
c->local_largefree += size;
} else {
// Small object.
- size = runtime·class_to_size[sizeclass];
- if(size > sizeof(uintptr))
+ if(size > 2*sizeof(uintptr))
((uintptr*)v)[1] = (uintptr)0xfeedfeedfeedfeedll; // mark as "needs to be zeroed"
+ else if(size > sizeof(uintptr))
+ ((uintptr*)v)[1] = 0;
// Must mark v freed before calling MCache_Free:
// it might coalesce v and other blocks into a bigger span
// and change the bitmap further.
- runtime·markfreed(v, size);
c->local_nsmallfree[sizeclass]++;
- runtime·MCache_Free(c, v, sizeclass, size);
+ c->local_cachealloc -= size;
+ if(c->alloc[sizeclass] == s) {
+ // We own the span, so we can just add v to the freelist
+ runtime·markfreed(v);
+ ((MLink*)v)->next = s->freelist;
+ s->freelist = v;
+ s->ref--;
+ } else {
+ // Someone else owns this span. Add to free queue.
+ runtime·MCache_Free(c, v, sizeclass, size);
+ }
}
- if(prof)
- runtime·MProf_Free(v, size);
m->mallocing = 0;
}
@@ -261,37 +405,6 @@ runtime·mlookup(void *v, byte **base, uintptr *size, MSpan **sp)
return 1;
}
-MCache*
-runtime·allocmcache(void)
-{
- intgo rate;
- MCache *c;
-
- runtime·lock(&runtime·mheap);
- c = runtime·FixAlloc_Alloc(&runtime·mheap.cachealloc);
- runtime·unlock(&runtime·mheap);
- runtime·memclr((byte*)c, sizeof(*c));
-
- // Set first allocation sample size.
- rate = runtime·MemProfileRate;
- if(rate > 0x3fffffff) // make 2*rate not overflow
- rate = 0x3fffffff;
- if(rate != 0)
- c->next_sample = runtime·fastrand1() % (2*rate);
-
- return c;
-}
-
-void
-runtime·freemcache(MCache *c)
-{
- runtime·MCache_ReleaseAll(c);
- runtime·lock(&runtime·mheap);
- runtime·purgecachedstats(c);
- runtime·FixAlloc_Free(&runtime·mheap.cachealloc, c);
- runtime·unlock(&runtime·mheap);
-}
-
void
runtime·purgecachedstats(MCache *c)
{
@@ -314,33 +427,42 @@ runtime·purgecachedstats(MCache *c)
}
}
-uintptr runtime·sizeof_C_MStats = sizeof(MStats);
+// Size of the trailing by_size array differs between Go and C,
+// NumSizeClasses was changed, but we can not change Go struct because of backward compatibility.
+// sizeof_C_MStats is what C thinks about size of Go struct.
+uintptr runtime·sizeof_C_MStats = sizeof(MStats) - (NumSizeClasses - 61) * sizeof(mstats.by_size[0]);
#define MaxArena32 (2U<<30)
void
runtime·mallocinit(void)
{
- byte *p;
- uintptr arena_size, bitmap_size, spans_size;
+ byte *p, *p1;
+ uintptr arena_size, bitmap_size, spans_size, p_size;
extern byte end[];
- byte *want;
uintptr limit;
uint64 i;
+ bool reserved;
p = nil;
+ p_size = 0;
arena_size = 0;
bitmap_size = 0;
spans_size = 0;
+ reserved = false;
// for 64-bit build
USED(p);
+ USED(p_size);
USED(arena_size);
USED(bitmap_size);
USED(spans_size);
runtime·InitSizes();
+ if(runtime·class_to_size[TinySizeClass] != TinySize)
+ runtime·throw("bad TinySizeClass");
+
// limit = runtime·memlimit();
// See https://code.google.com/p/go/issues/detail?id=5049
// TODO(rsc): Fix after 1.1.
@@ -380,7 +502,8 @@ runtime·mallocinit(void)
spans_size = ROUND(spans_size, PageSize);
for(i = 0; i <= 0x7f; i++) {
p = (void*)(i<<40 | 0x00c0ULL<<32);
- p = runtime·SysReserve(p, bitmap_size + spans_size + arena_size);
+ p_size = bitmap_size + spans_size + arena_size + PageSize;
+ p = runtime·SysReserve(p, p_size, &reserved);
if(p != nil)
break;
}
@@ -422,91 +545,119 @@ runtime·mallocinit(void)
// So adjust it upward a little bit ourselves: 1/4 MB to get
// away from the running binary image and then round up
// to a MB boundary.
- want = (byte*)ROUND((uintptr)end + (1<<18), 1<<20);
- p = runtime·SysReserve(want, bitmap_size + spans_size + arena_size);
+ p = (byte*)ROUND((uintptr)end + (1<<18), 1<<20);
+ p_size = bitmap_size + spans_size + arena_size + PageSize;
+ p = runtime·SysReserve(p, p_size, &reserved);
if(p == nil)
runtime·throw("runtime: cannot reserve arena virtual address space");
- if((uintptr)p & (((uintptr)1<<PageShift)-1))
- runtime·printf("runtime: SysReserve returned unaligned address %p; asked for %p", p,
- bitmap_size+spans_size+arena_size);
}
- if((uintptr)p & (((uintptr)1<<PageShift)-1))
- runtime·throw("runtime: SysReserve returned unaligned address");
- runtime·mheap.spans = (MSpan**)p;
- runtime·mheap.bitmap = p + spans_size;
- runtime·mheap.arena_start = p + spans_size + bitmap_size;
+ // PageSize can be larger than OS definition of page size,
+ // so SysReserve can give us a PageSize-unaligned pointer.
+ // To overcome this we ask for PageSize more and round up the pointer.
+ p1 = (byte*)ROUND((uintptr)p, PageSize);
+
+ runtime·mheap.spans = (MSpan**)p1;
+ runtime·mheap.bitmap = p1 + spans_size;
+ runtime·mheap.arena_start = p1 + spans_size + bitmap_size;
runtime·mheap.arena_used = runtime·mheap.arena_start;
- runtime·mheap.arena_end = runtime·mheap.arena_start + arena_size;
+ runtime·mheap.arena_end = p + p_size;
+ runtime·mheap.arena_reserved = reserved;
+
+ if(((uintptr)runtime·mheap.arena_start & (PageSize-1)) != 0)
+ runtime·throw("misrounded allocation in mallocinit");
// Initialize the rest of the allocator.
runtime·MHeap_Init(&runtime·mheap);
m->mcache = runtime·allocmcache();
// See if it works.
- runtime·free(runtime·malloc(1));
+ runtime·free(runtime·malloc(TinySize));
}
void*
runtime·MHeap_SysAlloc(MHeap *h, uintptr n)
{
- byte *p;
+ byte *p, *p_end;
+ uintptr p_size;
+ bool reserved;
if(n > h->arena_end - h->arena_used) {
// We are in 32-bit mode, maybe we didn't use all possible address space yet.
// Reserve some more space.
byte *new_end;
- uintptr needed;
- needed = (uintptr)h->arena_used + n - (uintptr)h->arena_end;
- needed = ROUND(needed, 256<<20);
- new_end = h->arena_end + needed;
+ p_size = ROUND(n + PageSize, 256<<20);
+ new_end = h->arena_end + p_size;
if(new_end <= h->arena_start + MaxArena32) {
- p = runtime·SysReserve(h->arena_end, new_end - h->arena_end);
- if(p == h->arena_end)
+ // TODO: It would be bad if part of the arena
+ // is reserved and part is not.
+ p = runtime·SysReserve(h->arena_end, p_size, &reserved);
+ if(p == h->arena_end) {
h->arena_end = new_end;
+ h->arena_reserved = reserved;
+ }
+ else if(p+p_size <= h->arena_start + MaxArena32) {
+ // Keep everything page-aligned.
+ // Our pages are bigger than hardware pages.
+ h->arena_end = p+p_size;
+ h->arena_used = p + (-(uintptr)p&(PageSize-1));
+ h->arena_reserved = reserved;
+ } else {
+ uint64 stat;
+ stat = 0;
+ runtime·SysFree(p, p_size, &stat);
+ }
}
}
if(n <= h->arena_end - h->arena_used) {
// Keep taking from our reservation.
p = h->arena_used;
- runtime·SysMap(p, n, &mstats.heap_sys);
+ runtime·SysMap(p, n, h->arena_reserved, &mstats.heap_sys);
h->arena_used += n;
runtime·MHeap_MapBits(h);
runtime·MHeap_MapSpans(h);
if(raceenabled)
runtime·racemapshadow(p, n);
+
+ if(((uintptr)p & (PageSize-1)) != 0)
+ runtime·throw("misrounded allocation in MHeap_SysAlloc");
return p;
}
// If using 64-bit, our reservation is all we have.
- if(sizeof(void*) == 8 && (uintptr)h->bitmap >= 0xffffffffU)
+ if(h->arena_end - h->arena_start >= MaxArena32)
return nil;
// On 32-bit, once the reservation is gone we can
// try to get memory at a location chosen by the OS
// and hope that it is in the range we allocated bitmap for.
- p = runtime·SysAlloc(n, &mstats.heap_sys);
+ p_size = ROUND(n, PageSize) + PageSize;
+ p = runtime·SysAlloc(p_size, &mstats.heap_sys);
if(p == nil)
return nil;
- if(p < h->arena_start || p+n - h->arena_start >= MaxArena32) {
+ if(p < h->arena_start || p+p_size - h->arena_start >= MaxArena32) {
runtime·printf("runtime: memory allocated by OS (%p) not in usable range [%p,%p)\n",
p, h->arena_start, h->arena_start+MaxArena32);
- runtime·SysFree(p, n, &mstats.heap_sys);
+ runtime·SysFree(p, p_size, &mstats.heap_sys);
return nil;
}
-
+
+ p_end = p + p_size;
+ p += -(uintptr)p & (PageSize-1);
if(p+n > h->arena_used) {
h->arena_used = p+n;
- if(h->arena_used > h->arena_end)
- h->arena_end = h->arena_used;
+ if(p_end > h->arena_end)
+ h->arena_end = p_end;
runtime·MHeap_MapBits(h);
runtime·MHeap_MapSpans(h);
if(raceenabled)
runtime·racemapshadow(p, n);
}
+ if(((uintptr)p & (PageSize-1)) != 0)
+ runtime·throw("misrounded allocation in MHeap_SysAlloc");
return p;
}
@@ -534,7 +685,7 @@ runtime·persistentalloc(uintptr size, uintptr align, uint64 *stat)
if(align != 0) {
if(align&(align-1))
- runtime·throw("persistentalloc: align is now a power of 2");
+ runtime·throw("persistentalloc: align is not a power of 2");
if(align > PageSize)
runtime·throw("persistentalloc: align is too large");
} else
@@ -562,95 +713,67 @@ runtime·persistentalloc(uintptr size, uintptr align, uint64 *stat)
return p;
}
-static Lock settype_lock;
-
-void
-runtime·settype_flush(M *mp)
+static void
+settype(MSpan *s, void *v, uintptr typ)
{
- uintptr *buf, *endbuf;
uintptr size, ofs, j, t;
uintptr ntypes, nbytes2, nbytes3;
uintptr *data2;
byte *data3;
- void *v;
- uintptr typ, p;
- MSpan *s;
- buf = mp->settype_buf;
- endbuf = buf + mp->settype_bufsize;
-
- runtime·lock(&settype_lock);
- while(buf < endbuf) {
- v = (void*)*buf;
- *buf = 0;
- buf++;
- typ = *buf;
- buf++;
-
- // (Manually inlined copy of runtime·MHeap_Lookup)
- p = (uintptr)v>>PageShift;
- if(sizeof(void*) == 8)
- p -= (uintptr)runtime·mheap.arena_start >> PageShift;
- s = runtime·mheap.spans[p];
-
- if(s->sizeclass == 0) {
- s->types.compression = MTypes_Single;
- s->types.data = typ;
- continue;
+ if(s->sizeclass == 0) {
+ s->types.compression = MTypes_Single;
+ s->types.data = typ;
+ return;
+ }
+ size = s->elemsize;
+ ofs = ((uintptr)v - (s->start<<PageShift)) / size;
+
+ switch(s->types.compression) {
+ case MTypes_Empty:
+ ntypes = (s->npages << PageShift) / size;
+ nbytes3 = 8*sizeof(uintptr) + 1*ntypes;
+ data3 = runtime·mallocgc(nbytes3, 0, FlagNoProfiling|FlagNoScan|FlagNoInvokeGC);
+ s->types.compression = MTypes_Bytes;
+ s->types.data = (uintptr)data3;
+ ((uintptr*)data3)[1] = typ;
+ data3[8*sizeof(uintptr) + ofs] = 1;
+ break;
+
+ case MTypes_Words:
+ ((uintptr*)s->types.data)[ofs] = typ;
+ break;
+
+ case MTypes_Bytes:
+ data3 = (byte*)s->types.data;
+ for(j=1; j<8; j++) {
+ if(((uintptr*)data3)[j] == typ) {
+ break;
+ }
+ if(((uintptr*)data3)[j] == 0) {
+ ((uintptr*)data3)[j] = typ;
+ break;
+ }
}
-
- size = s->elemsize;
- ofs = ((uintptr)v - (s->start<<PageShift)) / size;
-
- switch(s->types.compression) {
- case MTypes_Empty:
+ if(j < 8) {
+ data3[8*sizeof(uintptr) + ofs] = j;
+ } else {
ntypes = (s->npages << PageShift) / size;
- nbytes3 = 8*sizeof(uintptr) + 1*ntypes;
- data3 = runtime·mallocgc(nbytes3, 0, FlagNoProfiling|FlagNoScan|FlagNoInvokeGC);
- s->types.compression = MTypes_Bytes;
- s->types.data = (uintptr)data3;
- ((uintptr*)data3)[1] = typ;
- data3[8*sizeof(uintptr) + ofs] = 1;
- break;
-
- case MTypes_Words:
- ((uintptr*)s->types.data)[ofs] = typ;
- break;
-
- case MTypes_Bytes:
- data3 = (byte*)s->types.data;
- for(j=1; j<8; j++) {
- if(((uintptr*)data3)[j] == typ) {
- break;
- }
- if(((uintptr*)data3)[j] == 0) {
- ((uintptr*)data3)[j] = typ;
- break;
- }
+ nbytes2 = ntypes * sizeof(uintptr);
+ data2 = runtime·mallocgc(nbytes2, 0, FlagNoProfiling|FlagNoScan|FlagNoInvokeGC);
+ s->types.compression = MTypes_Words;
+ s->types.data = (uintptr)data2;
+
+ // Move the contents of data3 to data2. Then deallocate data3.
+ for(j=0; j<ntypes; j++) {
+ t = data3[8*sizeof(uintptr) + j];
+ t = ((uintptr*)data3)[t];
+ data2[j] = t;
}
- if(j < 8) {
- data3[8*sizeof(uintptr) + ofs] = j;
- } else {
- ntypes = (s->npages << PageShift) / size;
- nbytes2 = ntypes * sizeof(uintptr);
- data2 = runtime·mallocgc(nbytes2, 0, FlagNoProfiling|FlagNoScan|FlagNoInvokeGC);
- s->types.compression = MTypes_Words;
- s->types.data = (uintptr)data2;
-
- // Move the contents of data3 to data2. Then deallocate data3.
- for(j=0; j<ntypes; j++) {
- t = data3[8*sizeof(uintptr) + j];
- t = ((uintptr*)data3)[t];
- data2[j] = t;
- }
- data2[ofs] = typ;
- }
- break;
+ data2[ofs] = typ;
}
+ break;
}
- runtime·unlock(&settype_lock);
-
- mp->settype_bufsize = 0;
}
uintptr
@@ -683,9 +806,7 @@ runtime·gettype(void *v)
runtime·throw("runtime·gettype: invalid compression kind");
}
if(0) {
- runtime·lock(&settype_lock);
runtime·printf("%p -> %d,%X\n", v, (int32)s->types.compression, (int64)t);
- runtime·unlock(&settype_lock);
}
return t;
}
@@ -701,11 +822,8 @@ runtime·mal(uintptr n)
}
#pragma textflag NOSPLIT
-void
-runtime·new(Type *typ, uint8 *ret)
-{
+func new(typ *Type) (ret *uint8) {
ret = runtime·mallocgc(typ->size, (uintptr)typ | TypeInfo_SingleObject, typ->kind&KindNoPointers ? FlagNoScan : 0);
- FLUSH(&ret);
}
static void*
@@ -732,7 +850,7 @@ runtime·cnewarray(Type *typ, intgo n)
}
func GC() {
- runtime·gc(1);
+ runtime·gc(2); // force GC and do eager sweep
}
func SetFinalizer(obj Eface, finalizer Eface) {
@@ -754,14 +872,30 @@ func SetFinalizer(obj Eface, finalizer Eface) {
runtime·printf("runtime.SetFinalizer: first argument is %S, not pointer\n", *obj.type->string);
goto throw;
}
+ ot = (PtrType*)obj.type;
+ // As an implementation detail we do not run finalizers for zero-sized objects,
+ // because we use &runtime·zerobase for all such allocations.
+ if(ot->elem != nil && ot->elem->size == 0)
+ return;
+ // The following check is required for cases when a user passes a pointer to composite literal,
+ // but compiler makes it a pointer to global. For example:
+ // var Foo = &Object{}
+ // func main() {
+ // runtime.SetFinalizer(Foo, nil)
+ // }
+ // See issue 7656.
+ if((byte*)obj.data < runtime·mheap.arena_start || runtime·mheap.arena_used <= (byte*)obj.data)
+ return;
if(!runtime·mlookup(obj.data, &base, &size, nil) || obj.data != base) {
- runtime·printf("runtime.SetFinalizer: pointer not at beginning of allocated block\n");
- goto throw;
+ // As an implementation detail we allow to set finalizers for an inner byte
+ // of an object if it could come from tiny alloc (see mallocgc for details).
+ if(ot->elem == nil || (ot->elem->kind&KindNoPointers) == 0 || ot->elem->size >= TinySize) {
+ runtime·printf("runtime.SetFinalizer: pointer not at beginning of allocated block (%p)\n", obj.data);
+ goto throw;
+ }
}
- nret = 0;
- ot = (PtrType*)obj.type;
- fint = nil;
if(finalizer.type != nil) {
+ runtime·createfing();
if(finalizer.type->kind != KindFunc)
goto badfunc;
ft = (FuncType*)finalizer.type;
@@ -781,16 +915,20 @@ func SetFinalizer(obj Eface, finalizer Eface) {
goto badfunc;
// compute size needed for return parameters
+ nret = 0;
for(i=0; i<ft->out.len; i++) {
t = ((Type**)ft->out.array)[i];
nret = ROUND(nret, t->align) + t->size;
}
nret = ROUND(nret, sizeof(void*));
- }
-
- if(!runtime·addfinalizer(obj.data, finalizer.data, nret, fint, ot)) {
- runtime·printf("runtime.SetFinalizer: finalizer already set\n");
- goto throw;
+ ot = (PtrType*)obj.type;
+ if(!runtime·addfinalizer(obj.data, finalizer.data, nret, fint, ot)) {
+ runtime·printf("runtime.SetFinalizer: finalizer already set\n");
+ goto throw;
+ }
+ } else {
+ // NOTE: asking to remove a finalizer when there currently isn't one set is OK.
+ runtime·removefinalizer(obj.data);
}
return;
diff --git a/src/pkg/runtime/malloc.h b/src/pkg/runtime/malloc.h
index 2c66c6fa7..798c130ad 100644
--- a/src/pkg/runtime/malloc.h
+++ b/src/pkg/runtime/malloc.h
@@ -20,7 +20,7 @@
// MHeap: the malloc heap, managed at page (4096-byte) granularity.
// MSpan: a run of pages managed by the MHeap.
// MCentral: a shared free list for a given size class.
-// MCache: a per-thread (in Go, per-M) cache for small objects.
+// MCache: a per-thread (in Go, per-P) cache for small objects.
// MStats: allocation statistics.
//
// Allocating a small object proceeds up a hierarchy of caches:
@@ -66,14 +66,14 @@
//
// The small objects on the MCache and MCentral free lists
// may or may not be zeroed. They are zeroed if and only if
-// the second word of the object is zero. The spans in the
-// page heap are always zeroed. When a span full of objects
-// is returned to the page heap, the objects that need to be
-// are zeroed first. There are two main benefits to delaying the
+// the second word of the object is zero. A span in the
+// page heap is zeroed unless s->needzero is set. When a span
+// is allocated to break into small objects, it is zeroed if needed
+// and s->needzero is set. There are two main benefits to delaying the
// zeroing this way:
//
// 1. stack frames allocated from the small object lists
-// can avoid zeroing altogether.
+// or the page heap can avoid zeroing altogether.
// 2. the cost of zeroing when reusing a small object is
// charged to the mutator, not the garbage collector.
//
@@ -90,7 +90,7 @@ typedef struct GCStats GCStats;
enum
{
- PageShift = 12,
+ PageShift = 13,
PageSize = 1<<PageShift,
PageMask = PageSize - 1,
};
@@ -103,11 +103,15 @@ enum
// size classes. NumSizeClasses is that number. It's needed here
// because there are static arrays of this length; when msize runs its
// size choosing algorithm it double-checks that NumSizeClasses agrees.
- NumSizeClasses = 61,
+ NumSizeClasses = 67,
// Tunable constants.
MaxSmallSize = 32<<10,
+ // Tiny allocator parameters, see "Tiny allocator" comment in malloc.goc.
+ TinySize = 16,
+ TinySizeClass = 2,
+
FixAllocChunk = 16<<10, // Chunk size for FixAlloc
MaxMHeapList = 1<<(20 - PageShift), // Maximum page length for fixed-size list in MHeap.
HeapAllocChunk = 1<<20, // Chunk size for heap growth
@@ -154,6 +158,9 @@ struct MLink
// SysAlloc obtains a large chunk of zeroed memory from the
// operating system, typically on the order of a hundred kilobytes
// or a megabyte.
+// NOTE: SysAlloc returns OS-aligned memory, but the heap allocator
+// may use larger alignment, so the caller must be careful to realign the
+// memory obtained by SysAlloc.
//
// SysUnused notifies the operating system that the contents
// of the memory region are no longer needed and can be reused
@@ -168,16 +175,29 @@ struct MLink
// SysReserve reserves address space without allocating memory.
// If the pointer passed to it is non-nil, the caller wants the
// reservation there, but SysReserve can still choose another
-// location if that one is unavailable.
+// location if that one is unavailable. On some systems and in some
+// cases SysReserve will simply check that the address space is
+// available and not actually reserve it. If SysReserve returns
+// non-nil, it sets *reserved to true if the address space is
+// reserved, false if it has merely been checked.
+// NOTE: SysReserve returns OS-aligned memory, but the heap allocator
+// may use larger alignment, so the caller must be careful to realign the
+// memory obtained by SysAlloc.
//
// SysMap maps previously reserved address space for use.
+// The reserved argument is true if the address space was really
+// reserved, not merely checked.
+//
+// SysFault marks a (already SysAlloc'd) region to fault
+// if accessed. Used only for debugging the runtime.
void* runtime·SysAlloc(uintptr nbytes, uint64 *stat);
void runtime·SysFree(void *v, uintptr nbytes, uint64 *stat);
void runtime·SysUnused(void *v, uintptr nbytes);
void runtime·SysUsed(void *v, uintptr nbytes);
-void runtime·SysMap(void *v, uintptr nbytes, uint64 *stat);
-void* runtime·SysReserve(void *v, uintptr nbytes);
+void runtime·SysMap(void *v, uintptr nbytes, bool reserved, uint64 *stat);
+void* runtime·SysReserve(void *v, uintptr nbytes, bool *reserved);
+void runtime·SysFault(void *v, uintptr nbytes);
// FixAlloc is a simple free-list allocator for fixed size objects.
// Malloc uses a FixAlloc wrapped around SysAlloc to manages its
@@ -255,8 +275,9 @@ struct MStats
} by_size[NumSizeClasses];
};
-#define mstats runtime·memStats /* name shared with Go */
+#define mstats runtime·memStats
extern MStats mstats;
+void runtime·updatememstats(GCStats *stats);
// Size classes. Computed and initialized by InitSizes.
//
@@ -269,6 +290,7 @@ extern MStats mstats;
// making new objects in class i
int32 runtime·SizeToClass(int32);
+uintptr runtime·roundupsize(uintptr);
extern int32 runtime·class_to_size[NumSizeClasses];
extern int32 runtime·class_to_allocnpages[NumSizeClasses];
extern int8 runtime·size_to_class8[1024/8 + 1];
@@ -276,8 +298,6 @@ extern int8 runtime·size_to_class128[(MaxSmallSize-1024)/128 + 1];
extern void runtime·InitSizes(void);
-// Per-thread (in Go, per-M) cache for small objects.
-// No locking needed because it is per-thread (per-M).
typedef struct MCacheList MCacheList;
struct MCacheList
{
@@ -285,14 +305,21 @@ struct MCacheList
uint32 nlist;
};
+// Per-thread (in Go, per-P) cache for small objects.
+// No locking needed because it is per-thread (per-P).
struct MCache
{
// The following members are accessed on every malloc,
// so they are grouped here for better caching.
int32 next_sample; // trigger heap sample after allocating this many bytes
intptr local_cachealloc; // bytes allocated (or freed) from cache since last lock of heap
+ // Allocator cache for tiny objects w/o pointers.
+ // See "Tiny allocator" comment in malloc.goc.
+ byte* tiny;
+ uintptr tinysize;
// The rest is not accessed on every malloc.
- MCacheList list[NumSizeClasses];
+ MSpan* alloc[NumSizeClasses]; // spans to allocate from
+ MCacheList free[NumSizeClasses];// lists of explicitly freed objects
// Local allocator stats, flushed during GC.
uintptr local_nlookup; // number of pointer lookups
uintptr local_largefree; // bytes freed for large objects (>MaxSmallSize)
@@ -300,8 +327,8 @@ struct MCache
uintptr local_nsmallfree[NumSizeClasses]; // number of frees for small objects (<=MaxSmallSize)
};
-void runtime·MCache_Refill(MCache *c, int32 sizeclass);
-void runtime·MCache_Free(MCache *c, void *p, int32 sizeclass, uintptr size);
+MSpan* runtime·MCache_Refill(MCache *c, int32 sizeclass);
+void runtime·MCache_Free(MCache *c, MLink *p, int32 sizeclass, uintptr size);
void runtime·MCache_ReleaseAll(MCache *c);
// MTypes describes the types of blocks allocated within a span.
@@ -341,6 +368,44 @@ struct MTypes
uintptr data;
};
+enum
+{
+ KindSpecialFinalizer = 1,
+ KindSpecialProfile = 2,
+ // Note: The finalizer special must be first because if we're freeing
+ // an object, a finalizer special will cause the freeing operation
+ // to abort, and we want to keep the other special records around
+ // if that happens.
+};
+
+typedef struct Special Special;
+struct Special
+{
+ Special* next; // linked list in span
+ uint16 offset; // span offset of object
+ byte kind; // kind of Special
+};
+
+// The described object has a finalizer set for it.
+typedef struct SpecialFinalizer SpecialFinalizer;
+struct SpecialFinalizer
+{
+ Special;
+ FuncVal* fn;
+ uintptr nret;
+ Type* fint;
+ PtrType* ot;
+};
+
+// The described object is being heap profiled.
+typedef struct Bucket Bucket; // from mprof.goc
+typedef struct SpecialProfile SpecialProfile;
+struct SpecialProfile
+{
+ Special;
+ Bucket* b;
+};
+
// An MSpan is a run of pages.
enum
{
@@ -356,17 +421,30 @@ struct MSpan
PageID start; // starting page number
uintptr npages; // number of pages in span
MLink *freelist; // list of free objects
- uint32 ref; // number of allocated objects in this span
- int32 sizeclass; // size class
+ // sweep generation:
+ // if sweepgen == h->sweepgen - 2, the span needs sweeping
+ // if sweepgen == h->sweepgen - 1, the span is currently being swept
+ // if sweepgen == h->sweepgen, the span is swept and ready to use
+ // h->sweepgen is incremented by 2 after every GC
+ uint32 sweepgen;
+ uint16 ref; // capacity - number of objects in freelist
+ uint8 sizeclass; // size class
+ bool incache; // being used by an MCache
+ uint8 state; // MSpanInUse etc
+ uint8 needzero; // needs to be zeroed before allocation
uintptr elemsize; // computed from sizeclass or from npages
- uint32 state; // MSpanInUse etc
int64 unusedsince; // First time spotted by GC in MSpanFree state
uintptr npreleased; // number of pages released to the OS
byte *limit; // end of data in span
MTypes types; // types of allocated objects in this span
+ Lock specialLock; // guards specials list
+ Special *specials; // linked list of special records sorted by offset.
+ MLink *freebuf; // objects freed explicitly, not incorporated into freelist yet
};
void runtime·MSpan_Init(MSpan *span, PageID start, uintptr npages);
+void runtime·MSpan_EnsureSwept(MSpan *span);
+bool runtime·MSpan_Sweep(MSpan *span);
// Every MSpan is in one doubly-linked list,
// either one of the MHeap's free lists or one of the
@@ -374,6 +452,7 @@ void runtime·MSpan_Init(MSpan *span, PageID start, uintptr npages);
void runtime·MSpanList_Init(MSpan *list);
bool runtime·MSpanList_IsEmpty(MSpan *list);
void runtime·MSpanList_Insert(MSpan *list, MSpan *span);
+void runtime·MSpanList_InsertBack(MSpan *list, MSpan *span);
void runtime·MSpanList_Remove(MSpan *span); // from whatever list it is in
@@ -382,15 +461,16 @@ struct MCentral
{
Lock;
int32 sizeclass;
- MSpan nonempty;
- MSpan empty;
- int32 nfree;
+ MSpan nonempty; // list of spans with a free object
+ MSpan empty; // list of spans with no free objects (or cached in an MCache)
+ int32 nfree; // # of objects available in nonempty spans
};
void runtime·MCentral_Init(MCentral *c, int32 sizeclass);
-int32 runtime·MCentral_AllocList(MCentral *c, MLink **first);
-void runtime·MCentral_FreeList(MCentral *c, MLink *first);
-void runtime·MCentral_FreeSpan(MCentral *c, MSpan *s, int32 n, MLink *start, MLink *end);
+MSpan* runtime·MCentral_CacheSpan(MCentral *c);
+void runtime·MCentral_UncacheSpan(MCentral *c, MSpan *s);
+bool runtime·MCentral_FreeSpan(MCentral *c, MSpan *s, int32 n, MLink *start, MLink *end);
+void runtime·MCentral_FreeList(MCentral *c, MLink *start); // TODO: need this?
// Main malloc heap.
// The heap itself is the "free[]" and "large" arrays,
@@ -399,10 +479,15 @@ struct MHeap
{
Lock;
MSpan free[MaxMHeapList]; // free lists of given length
- MSpan large; // free lists length >= MaxMHeapList
- MSpan **allspans;
+ MSpan freelarge; // free lists length >= MaxMHeapList
+ MSpan busy[MaxMHeapList]; // busy lists of large objects of given length
+ MSpan busylarge; // busy lists of large objects length >= MaxMHeapList
+ MSpan **allspans; // all spans out there
+ MSpan **sweepspans; // copy of allspans referenced by sweeper
uint32 nspan;
uint32 nspancap;
+ uint32 sweepgen; // sweep generation, see comment in MSpan
+ uint32 sweepdone; // all spans are swept
// span lookup
MSpan** spans;
@@ -414,6 +499,7 @@ struct MHeap
byte *arena_start;
byte *arena_used;
byte *arena_end;
+ bool arena_reserved;
// central free lists for small size classes.
// the padding makes sure that the MCentrals are
@@ -426,6 +512,9 @@ struct MHeap
FixAlloc spanalloc; // allocator for Span*
FixAlloc cachealloc; // allocator for MCache*
+ FixAlloc specialfinalizeralloc; // allocator for SpecialFinalizer*
+ FixAlloc specialprofilealloc; // allocator for SpecialProfile*
+ Lock speciallock; // lock for sepcial record allocators.
// Malloc stats.
uint64 largefree; // bytes freed for large objects (>MaxSmallSize)
@@ -435,7 +524,7 @@ struct MHeap
extern MHeap runtime·mheap;
void runtime·MHeap_Init(MHeap *h);
-MSpan* runtime·MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass, int32 acct, int32 zeroed);
+MSpan* runtime·MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass, bool large, bool needzero);
void runtime·MHeap_Free(MHeap *h, MSpan *s, int32 acct);
MSpan* runtime·MHeap_Lookup(MHeap *h, void *v);
MSpan* runtime·MHeap_LookupMaybe(MHeap *h, void *v);
@@ -444,26 +533,28 @@ void* runtime·MHeap_SysAlloc(MHeap *h, uintptr n);
void runtime·MHeap_MapBits(MHeap *h);
void runtime·MHeap_MapSpans(MHeap *h);
void runtime·MHeap_Scavenger(void);
+void runtime·MHeap_SplitSpan(MHeap *h, MSpan *s);
void* runtime·mallocgc(uintptr size, uintptr typ, uint32 flag);
void* runtime·persistentalloc(uintptr size, uintptr align, uint64 *stat);
int32 runtime·mlookup(void *v, byte **base, uintptr *size, MSpan **s);
void runtime·gc(int32 force);
-void runtime·markallocated(void *v, uintptr n, bool noptr);
+uintptr runtime·sweepone(void);
+void runtime·markscan(void *v);
+void runtime·marknogc(void *v);
void runtime·checkallocated(void *v, uintptr n);
-void runtime·markfreed(void *v, uintptr n);
+void runtime·markfreed(void *v);
void runtime·checkfreed(void *v, uintptr n);
extern int32 runtime·checking;
void runtime·markspan(void *v, uintptr size, uintptr n, bool leftover);
void runtime·unmarkspan(void *v, uintptr size);
-bool runtime·blockspecial(void*);
-void runtime·setblockspecial(void*, bool);
void runtime·purgecachedstats(MCache*);
void* runtime·cnew(Type*);
void* runtime·cnewarray(Type*, intgo);
+void runtime·tracealloc(void*, uintptr, uintptr);
+void runtime·tracefree(void*, uintptr);
+void runtime·tracegc(void);
-void runtime·settype_flush(M*);
-void runtime·settype_sysfree(MSpan*);
uintptr runtime·gettype(void*);
enum
@@ -477,13 +568,25 @@ enum
};
void runtime·MProf_Malloc(void*, uintptr);
-void runtime·MProf_Free(void*, uintptr);
+void runtime·MProf_Free(Bucket*, uintptr, bool);
void runtime·MProf_GC(void);
+void runtime·iterate_memprof(void (*callback)(Bucket*, uintptr, uintptr*, uintptr, uintptr, uintptr));
int32 runtime·gcprocs(void);
void runtime·helpgc(int32 nproc);
void runtime·gchelper(void);
+void runtime·createfing(void);
+G* runtime·wakefing(void);
+extern bool runtime·fingwait;
+extern bool runtime·fingwake;
-void runtime·walkfintab(void (*fn)(void*));
+void runtime·setprofilebucket(void *p, Bucket *b);
+
+bool runtime·addfinalizer(void*, FuncVal *fn, uintptr, Type*, PtrType*);
+void runtime·removefinalizer(void*);
+void runtime·queuefinalizer(byte *p, FuncVal *fn, uintptr nret, Type *fint, PtrType *ot);
+
+void runtime·freeallspecials(MSpan *span, void *p, uintptr size);
+bool runtime·freespecial(Special *s, void *p, uintptr size, bool freed);
enum
{
@@ -495,8 +598,46 @@ enum
DebugTypeAtBlockEnd = 0,
};
+// Information from the compiler about the layout of stack frames.
+typedef struct BitVector BitVector;
+struct BitVector
+{
+ int32 n; // # of bits
+ uint32 *data;
+};
+typedef struct StackMap StackMap;
+struct StackMap
+{
+ int32 n; // number of bitmaps
+ int32 nbit; // number of bits in each bitmap
+ uint32 data[];
+};
+enum {
+ // Pointer map
+ BitsPerPointer = 2,
+ BitsDead = 0,
+ BitsScalar = 1,
+ BitsPointer = 2,
+ BitsMultiWord = 3,
+ // BitsMultiWord will be set for the first word of a multi-word item.
+ // When it is set, one of the following will be set for the second word.
+ BitsString = 0,
+ BitsSlice = 1,
+ BitsIface = 2,
+ BitsEface = 3,
+};
+// Returns pointer map data for the given stackmap index
+// (the index is encoded in PCDATA_StackMapIndex).
+BitVector runtime·stackmapdata(StackMap *stackmap, int32 n);
+
// defined in mgc0.go
void runtime·gc_m_ptr(Eface*);
+void runtime·gc_g_ptr(Eface*);
void runtime·gc_itab_ptr(Eface*);
void runtime·memorydump(void);
+int32 runtime·setgcpercent(int32);
+
+// Value we use to mark dead pointers when GODEBUG=gcdead=1.
+#define PoisonGC ((uintptr)0xf969696969696969ULL)
+#define PoisonStack ((uintptr)0x6868686868686868ULL)
diff --git a/src/pkg/runtime/map_test.go b/src/pkg/runtime/map_test.go
index a221cb28c..e4e838349 100644
--- a/src/pkg/runtime/map_test.go
+++ b/src/pkg/runtime/map_test.go
@@ -409,3 +409,69 @@ func TestMapNanGrowIterator(t *testing.T) {
t.Fatalf("missing value")
}
}
+
+func TestMapIterOrder(t *testing.T) {
+ for _, n := range [...]int{3, 7, 9, 15} {
+ // Make m be {0: true, 1: true, ..., n-1: true}.
+ m := make(map[int]bool)
+ for i := 0; i < n; i++ {
+ m[i] = true
+ }
+ // Check that iterating over the map produces at least two different orderings.
+ ord := func() []int {
+ var s []int
+ for key := range m {
+ s = append(s, key)
+ }
+ return s
+ }
+ first := ord()
+ ok := false
+ for try := 0; try < 100; try++ {
+ if !reflect.DeepEqual(first, ord()) {
+ ok = true
+ break
+ }
+ }
+ if !ok {
+ t.Errorf("Map with n=%d elements had consistent iteration order: %v", n, first)
+ }
+ }
+}
+
+func TestMapStringBytesLookup(t *testing.T) {
+ // Use large string keys to avoid small-allocation coalescing,
+ // which can cause AllocsPerRun to report lower counts than it should.
+ m := map[string]int{
+ "1000000000000000000000000000000000000000000000000": 1,
+ "2000000000000000000000000000000000000000000000000": 2,
+ }
+ buf := []byte("1000000000000000000000000000000000000000000000000")
+ if x := m[string(buf)]; x != 1 {
+ t.Errorf(`m[string([]byte("1"))] = %d, want 1`, x)
+ }
+ buf[0] = '2'
+ if x := m[string(buf)]; x != 2 {
+ t.Errorf(`m[string([]byte("2"))] = %d, want 2`, x)
+ }
+
+ var x int
+ n := testing.AllocsPerRun(100, func() {
+ x += m[string(buf)]
+ })
+ if n != 0 {
+ t.Errorf("AllocsPerRun for m[string(buf)] = %v, want 0", n)
+ }
+
+ x = 0
+ n = testing.AllocsPerRun(100, func() {
+ y, ok := m[string(buf)]
+ if !ok {
+ panic("!ok")
+ }
+ x += y
+ })
+ if n != 0 {
+ t.Errorf("AllocsPerRun for x,ok = m[string(buf)] = %v, want 0", n)
+ }
+}
diff --git a/src/pkg/runtime/mapspeed_test.go b/src/pkg/runtime/mapspeed_test.go
index d643d9898..da45ea11e 100644
--- a/src/pkg/runtime/mapspeed_test.go
+++ b/src/pkg/runtime/mapspeed_test.go
@@ -268,3 +268,33 @@ func BenchmarkSameLengthMap(b *testing.B) {
_ = m[s1]
}
}
+
+type BigKey [3]int64
+
+func BenchmarkBigKeyMap(b *testing.B) {
+ m := make(map[BigKey]bool)
+ k := BigKey{3, 4, 5}
+ m[k] = true
+ for i := 0; i < b.N; i++ {
+ _ = m[k]
+ }
+}
+
+type BigVal [3]int64
+
+func BenchmarkBigValMap(b *testing.B) {
+ m := make(map[BigKey]BigVal)
+ k := BigKey{3, 4, 5}
+ m[k] = BigVal{6, 7, 8}
+ for i := 0; i < b.N; i++ {
+ _ = m[k]
+ }
+}
+
+func BenchmarkSmallKeyMap(b *testing.B) {
+ m := make(map[int16]bool)
+ m[5] = true
+ for i := 0; i < b.N; i++ {
+ _ = m[5]
+ }
+}
diff --git a/src/pkg/runtime/mcache.c b/src/pkg/runtime/mcache.c
index 863030e74..26e3db2dc 100644
--- a/src/pkg/runtime/mcache.c
+++ b/src/pkg/runtime/mcache.c
@@ -10,69 +10,118 @@
#include "arch_GOARCH.h"
#include "malloc.h"
+extern volatile intgo runtime·MemProfileRate;
+
+// dummy MSpan that contains no free objects.
+static MSpan emptymspan;
+
+MCache*
+runtime·allocmcache(void)
+{
+ intgo rate;
+ MCache *c;
+ int32 i;
+
+ runtime·lock(&runtime·mheap);
+ c = runtime·FixAlloc_Alloc(&runtime·mheap.cachealloc);
+ runtime·unlock(&runtime·mheap);
+ runtime·memclr((byte*)c, sizeof(*c));
+ for(i = 0; i < NumSizeClasses; i++)
+ c->alloc[i] = &emptymspan;
+
+ // Set first allocation sample size.
+ rate = runtime·MemProfileRate;
+ if(rate > 0x3fffffff) // make 2*rate not overflow
+ rate = 0x3fffffff;
+ if(rate != 0)
+ c->next_sample = runtime·fastrand1() % (2*rate);
+
+ return c;
+}
+
void
+runtime·freemcache(MCache *c)
+{
+ runtime·MCache_ReleaseAll(c);
+ runtime·lock(&runtime·mheap);
+ runtime·purgecachedstats(c);
+ runtime·FixAlloc_Free(&runtime·mheap.cachealloc, c);
+ runtime·unlock(&runtime·mheap);
+}
+
+// Gets a span that has a free object in it and assigns it
+// to be the cached span for the given sizeclass. Returns this span.
+MSpan*
runtime·MCache_Refill(MCache *c, int32 sizeclass)
{
MCacheList *l;
+ MSpan *s;
- // Replenish using central lists.
- l = &c->list[sizeclass];
- if(l->list)
- runtime·throw("MCache_Refill: the list is not empty");
- l->nlist = runtime·MCentral_AllocList(&runtime·mheap.central[sizeclass], &l->list);
- if(l->list == nil)
- runtime·throw("out of memory");
-}
+ m->locks++;
+ // Return the current cached span to the central lists.
+ s = c->alloc[sizeclass];
+ if(s->freelist != nil)
+ runtime·throw("refill on a nonempty span");
+ if(s != &emptymspan)
+ runtime·MCentral_UncacheSpan(&runtime·mheap.central[sizeclass], s);
-// Take n elements off l and return them to the central free list.
-static void
-ReleaseN(MCacheList *l, int32 n, int32 sizeclass)
-{
- MLink *first, **lp;
- int32 i;
+ // Push any explicitly freed objects to the central lists.
+ // Not required, but it seems like a good time to do it.
+ l = &c->free[sizeclass];
+ if(l->nlist > 0) {
+ runtime·MCentral_FreeList(&runtime·mheap.central[sizeclass], l->list);
+ l->list = nil;
+ l->nlist = 0;
+ }
- // Cut off first n elements.
- first = l->list;
- lp = &l->list;
- for(i=0; i<n; i++)
- lp = &(*lp)->next;
- l->list = *lp;
- *lp = nil;
- l->nlist -= n;
-
- // Return them to central free list.
- runtime·MCentral_FreeList(&runtime·mheap.central[sizeclass], first);
+ // Get a new cached span from the central lists.
+ s = runtime·MCentral_CacheSpan(&runtime·mheap.central[sizeclass]);
+ if(s == nil)
+ runtime·throw("out of memory");
+ if(s->freelist == nil) {
+ runtime·printf("%d %d\n", s->ref, (int32)((s->npages << PageShift) / s->elemsize));
+ runtime·throw("empty span");
+ }
+ c->alloc[sizeclass] = s;
+ m->locks--;
+ return s;
}
void
-runtime·MCache_Free(MCache *c, void *v, int32 sizeclass, uintptr size)
+runtime·MCache_Free(MCache *c, MLink *p, int32 sizeclass, uintptr size)
{
MCacheList *l;
- MLink *p;
- // Put back on list.
- l = &c->list[sizeclass];
- p = v;
+ // Put on free list.
+ l = &c->free[sizeclass];
p->next = l->list;
l->list = p;
l->nlist++;
- c->local_cachealloc -= size;
- // We transfer span at a time from MCentral to MCache,
- // if we have 2 times more than that, release a half back.
- if(l->nlist >= 2*(runtime·class_to_allocnpages[sizeclass]<<PageShift)/size)
- ReleaseN(l, l->nlist/2, sizeclass);
+ // We transfer a span at a time from MCentral to MCache,
+ // so we'll do the same in the other direction.
+ if(l->nlist >= (runtime·class_to_allocnpages[sizeclass]<<PageShift)/size) {
+ runtime·MCentral_FreeList(&runtime·mheap.central[sizeclass], l->list);
+ l->list = nil;
+ l->nlist = 0;
+ }
}
void
runtime·MCache_ReleaseAll(MCache *c)
{
int32 i;
+ MSpan *s;
MCacheList *l;
for(i=0; i<NumSizeClasses; i++) {
- l = &c->list[i];
- if(l->list) {
+ s = c->alloc[i];
+ if(s != &emptymspan) {
+ runtime·MCentral_UncacheSpan(&runtime·mheap.central[i], s);
+ c->alloc[i] = &emptymspan;
+ }
+ l = &c->free[i];
+ if(l->nlist > 0) {
runtime·MCentral_FreeList(&runtime·mheap.central[i], l->list);
l->list = nil;
l->nlist = 0;
diff --git a/src/pkg/runtime/mcentral.c b/src/pkg/runtime/mcentral.c
index 735a7e6a9..203558fca 100644
--- a/src/pkg/runtime/mcentral.c
+++ b/src/pkg/runtime/mcentral.c
@@ -19,7 +19,8 @@
#include "malloc.h"
static bool MCentral_Grow(MCentral *c);
-static void MCentral_Free(MCentral *c, void *v);
+static void MCentral_Free(MCentral *c, MLink *v);
+static void MCentral_ReturnToHeap(MCentral *c, MSpan *s);
// Initialize a single central free list.
void
@@ -30,39 +31,115 @@ runtime·MCentral_Init(MCentral *c, int32 sizeclass)
runtime·MSpanList_Init(&c->empty);
}
-// Allocate a list of objects from the central free list.
-// Return the number of objects allocated.
-// The objects are linked together by their first words.
-// On return, *pfirst points at the first object.
-int32
-runtime·MCentral_AllocList(MCentral *c, MLink **pfirst)
+// Allocate a span to use in an MCache.
+MSpan*
+runtime·MCentral_CacheSpan(MCentral *c)
{
MSpan *s;
int32 cap, n;
+ uint32 sg;
runtime·lock(c);
- // Replenish central list if empty.
- if(runtime·MSpanList_IsEmpty(&c->nonempty)) {
- if(!MCentral_Grow(c)) {
+ sg = runtime·mheap.sweepgen;
+retry:
+ for(s = c->nonempty.next; s != &c->nonempty; s = s->next) {
+ if(s->sweepgen == sg-2 && runtime·cas(&s->sweepgen, sg-2, sg-1)) {
+ runtime·unlock(c);
+ runtime·MSpan_Sweep(s);
+ runtime·lock(c);
+ // the span could have been moved to heap, retry
+ goto retry;
+ }
+ if(s->sweepgen == sg-1) {
+ // the span is being swept by background sweeper, skip
+ continue;
+ }
+ // we have a nonempty span that does not require sweeping, allocate from it
+ goto havespan;
+ }
+
+ for(s = c->empty.next; s != &c->empty; s = s->next) {
+ if(s->sweepgen == sg-2 && runtime·cas(&s->sweepgen, sg-2, sg-1)) {
+ // we have an empty span that requires sweeping,
+ // sweep it and see if we can free some space in it
+ runtime·MSpanList_Remove(s);
+ // swept spans are at the end of the list
+ runtime·MSpanList_InsertBack(&c->empty, s);
runtime·unlock(c);
- *pfirst = nil;
- return 0;
+ runtime·MSpan_Sweep(s);
+ runtime·lock(c);
+ // the span could be moved to nonempty or heap, retry
+ goto retry;
}
+ if(s->sweepgen == sg-1) {
+ // the span is being swept by background sweeper, skip
+ continue;
+ }
+ // already swept empty span,
+ // all subsequent ones must also be either swept or in process of sweeping
+ break;
}
- s = c->nonempty.next;
+
+ // Replenish central list if empty.
+ if(!MCentral_Grow(c)) {
+ runtime·unlock(c);
+ return nil;
+ }
+ goto retry;
+
+havespan:
cap = (s->npages << PageShift) / s->elemsize;
n = cap - s->ref;
- *pfirst = s->freelist;
- s->freelist = nil;
- s->ref += n;
+ if(n == 0)
+ runtime·throw("empty span");
+ if(s->freelist == nil)
+ runtime·throw("freelist empty");
c->nfree -= n;
runtime·MSpanList_Remove(s);
- runtime·MSpanList_Insert(&c->empty, s);
+ runtime·MSpanList_InsertBack(&c->empty, s);
+ s->incache = true;
runtime·unlock(c);
- return n;
+ return s;
}
-// Free the list of objects back into the central free list.
+// Return span from an MCache.
+void
+runtime·MCentral_UncacheSpan(MCentral *c, MSpan *s)
+{
+ MLink *v;
+ int32 cap, n;
+
+ runtime·lock(c);
+
+ s->incache = false;
+
+ // Move any explicitly freed items from the freebuf to the freelist.
+ while((v = s->freebuf) != nil) {
+ s->freebuf = v->next;
+ runtime·markfreed(v);
+ v->next = s->freelist;
+ s->freelist = v;
+ s->ref--;
+ }
+
+ if(s->ref == 0) {
+ // Free back to heap. Unlikely, but possible.
+ MCentral_ReturnToHeap(c, s); // unlocks c
+ return;
+ }
+
+ cap = (s->npages << PageShift) / s->elemsize;
+ n = cap - s->ref;
+ if(n > 0) {
+ c->nfree += n;
+ runtime·MSpanList_Remove(s);
+ runtime·MSpanList_Insert(&c->nonempty, s);
+ }
+ runtime·unlock(c);
+}
+
+// Free the list of objects back into the central free list c.
+// Called from runtime·free.
void
runtime·MCentral_FreeList(MCentral *c, MLink *start)
{
@@ -77,51 +154,58 @@ runtime·MCentral_FreeList(MCentral *c, MLink *start)
}
// Helper: free one object back into the central free list.
+// Caller must hold lock on c on entry. Holds lock on exit.
static void
-MCentral_Free(MCentral *c, void *v)
+MCentral_Free(MCentral *c, MLink *v)
{
MSpan *s;
- MLink *p;
- int32 size;
// Find span for v.
s = runtime·MHeap_Lookup(&runtime·mheap, v);
if(s == nil || s->ref == 0)
runtime·throw("invalid free");
+ if(s->sweepgen != runtime·mheap.sweepgen)
+ runtime·throw("free into unswept span");
+
+ // If the span is currently being used unsynchronized by an MCache,
+ // we can't modify the freelist. Add to the freebuf instead. The
+ // items will get moved to the freelist when the span is returned
+ // by the MCache.
+ if(s->incache) {
+ v->next = s->freebuf;
+ s->freebuf = v;
+ return;
+ }
- // Move to nonempty if necessary.
+ // Move span to nonempty if necessary.
if(s->freelist == nil) {
runtime·MSpanList_Remove(s);
runtime·MSpanList_Insert(&c->nonempty, s);
}
- // Add v back to s's free list.
- p = v;
- p->next = s->freelist;
- s->freelist = p;
+ // Add the object to span's free list.
+ runtime·markfreed(v);
+ v->next = s->freelist;
+ s->freelist = v;
+ s->ref--;
c->nfree++;
// If s is completely freed, return it to the heap.
- if(--s->ref == 0) {
- size = runtime·class_to_size[c->sizeclass];
- runtime·MSpanList_Remove(s);
- runtime·unmarkspan((byte*)(s->start<<PageShift), s->npages<<PageShift);
- *(uintptr*)(s->start<<PageShift) = 1; // needs zeroing
- s->freelist = nil;
- c->nfree -= (s->npages << PageShift) / size;
- runtime·unlock(c);
- runtime·MHeap_Free(&runtime·mheap, s, 0);
+ if(s->ref == 0) {
+ MCentral_ReturnToHeap(c, s); // unlocks c
runtime·lock(c);
}
}
// Free n objects from a span s back into the central free list c.
-// Called from GC.
-void
+// Called during sweep.
+// Returns true if the span was returned to heap. Sets sweepgen to
+// the latest generation.
+bool
runtime·MCentral_FreeSpan(MCentral *c, MSpan *s, int32 n, MLink *start, MLink *end)
{
- int32 size;
-
+ if(s->incache)
+ runtime·throw("freespan into cached span");
runtime·lock(c);
// Move to nonempty if necessary.
@@ -135,20 +219,21 @@ runtime·MCentral_FreeSpan(MCentral *c, MSpan *s, int32 n, MLink *start, MLink *
s->freelist = start;
s->ref -= n;
c->nfree += n;
+
+ // delay updating sweepgen until here. This is the signal that
+ // the span may be used in an MCache, so it must come after the
+ // linked list operations above (actually, just after the
+ // lock of c above.)
+ runtime·atomicstore(&s->sweepgen, runtime·mheap.sweepgen);
- // If s is completely freed, return it to the heap.
- if(s->ref == 0) {
- size = runtime·class_to_size[c->sizeclass];
- runtime·MSpanList_Remove(s);
- *(uintptr*)(s->start<<PageShift) = 1; // needs zeroing
- s->freelist = nil;
- c->nfree -= (s->npages << PageShift) / size;
- runtime·unlock(c);
- runtime·unmarkspan((byte*)(s->start<<PageShift), s->npages<<PageShift);
- runtime·MHeap_Free(&runtime·mheap, s, 0);
- } else {
+ if(s->ref != 0) {
runtime·unlock(c);
+ return false;
}
+
+ // s is completely freed, return it to the heap.
+ MCentral_ReturnToHeap(c, s); // unlocks c
+ return true;
}
void
@@ -202,3 +287,21 @@ MCentral_Grow(MCentral *c)
runtime·MSpanList_Insert(&c->nonempty, s);
return true;
}
+
+// Return s to the heap. s must be unused (s->ref == 0). Unlocks c.
+static void
+MCentral_ReturnToHeap(MCentral *c, MSpan *s)
+{
+ int32 size;
+
+ size = runtime·class_to_size[c->sizeclass];
+ runtime·MSpanList_Remove(s);
+ s->needzero = 1;
+ s->freelist = nil;
+ if(s->ref != 0)
+ runtime·throw("ref wrong");
+ c->nfree -= (s->npages << PageShift) / size;
+ runtime·unlock(c);
+ runtime·unmarkspan((byte*)(s->start<<PageShift), s->npages<<PageShift);
+ runtime·MHeap_Free(&runtime·mheap, s, 0);
+}
diff --git a/src/pkg/runtime/mem.go b/src/pkg/runtime/mem.go
index dc735e4a6..fa308b5d9 100644
--- a/src/pkg/runtime/mem.go
+++ b/src/pkg/runtime/mem.go
@@ -60,9 +60,8 @@ type MemStats struct {
var sizeof_C_MStats uintptr // filled in by malloc.goc
-var memStats MemStats
-
func init() {
+ var memStats MemStats
if sizeof_C_MStats != unsafe.Sizeof(memStats) {
println(sizeof_C_MStats, unsafe.Sizeof(memStats))
panic("MStats vs MemStatsType size mismatch")
diff --git a/src/pkg/runtime/mem_darwin.c b/src/pkg/runtime/mem_darwin.c
index a75c46d9d..878c4e1c5 100644
--- a/src/pkg/runtime/mem_darwin.c
+++ b/src/pkg/runtime/mem_darwin.c
@@ -41,11 +41,18 @@ runtime·SysFree(void *v, uintptr n, uint64 *stat)
runtime·munmap(v, n);
}
+void
+runtime·SysFault(void *v, uintptr n)
+{
+ runtime·mmap(v, n, PROT_NONE, MAP_ANON|MAP_PRIVATE|MAP_FIXED, -1, 0);
+}
+
void*
-runtime·SysReserve(void *v, uintptr n)
+runtime·SysReserve(void *v, uintptr n, bool *reserved)
{
void *p;
+ *reserved = true;
p = runtime·mmap(v, n, PROT_NONE, MAP_ANON|MAP_PRIVATE, -1, 0);
if(p < (void*)4096)
return nil;
@@ -58,10 +65,12 @@ enum
};
void
-runtime·SysMap(void *v, uintptr n, uint64 *stat)
+runtime·SysMap(void *v, uintptr n, bool reserved, uint64 *stat)
{
void *p;
+ USED(reserved);
+
runtime·xadd64(stat, n);
p = runtime·mmap(v, n, PROT_READ|PROT_WRITE, MAP_ANON|MAP_FIXED|MAP_PRIVATE, -1, 0);
if(p == (void*)ENOMEM)
diff --git a/src/pkg/runtime/mem_dragonfly.c b/src/pkg/runtime/mem_dragonfly.c
index 025b62ea6..c270332cb 100644
--- a/src/pkg/runtime/mem_dragonfly.c
+++ b/src/pkg/runtime/mem_dragonfly.c
@@ -45,17 +45,26 @@ runtime·SysFree(void *v, uintptr n, uint64 *stat)
runtime·munmap(v, n);
}
+void
+runtime·SysFault(void *v, uintptr n)
+{
+ runtime·mmap(v, n, PROT_NONE, MAP_ANON|MAP_PRIVATE|MAP_FIXED, -1, 0);
+}
+
void*
-runtime·SysReserve(void *v, uintptr n)
+runtime·SysReserve(void *v, uintptr n, bool *reserved)
{
void *p;
// On 64-bit, people with ulimit -v set complain if we reserve too
// much address space. Instead, assume that the reservation is okay
// and check the assumption in SysMap.
- if(sizeof(void*) == 8)
+ if(sizeof(void*) == 8 && n > 1LL<<32) {
+ *reserved = false;
return v;
-
+ }
+
+ *reserved = true;
p = runtime·mmap(v, n, PROT_NONE, MAP_ANON|MAP_PRIVATE, -1, 0);
if(p < (void*)4096)
return nil;
@@ -63,14 +72,14 @@ runtime·SysReserve(void *v, uintptr n)
}
void
-runtime·SysMap(void *v, uintptr n, uint64 *stat)
+runtime·SysMap(void *v, uintptr n, bool reserved, uint64 *stat)
{
void *p;
runtime·xadd64(stat, n);
// On 64-bit, we don't actually have v reserved, so tread carefully.
- if(sizeof(void*) == 8) {
+ if(!reserved) {
// TODO(jsing): For some reason DragonFly seems to return
// memory at a different address than we requested, even when
// there should be no reason for it to do so. This can be
diff --git a/src/pkg/runtime/mem_freebsd.c b/src/pkg/runtime/mem_freebsd.c
index 1ee2a555e..586947a2d 100644
--- a/src/pkg/runtime/mem_freebsd.c
+++ b/src/pkg/runtime/mem_freebsd.c
@@ -45,17 +45,26 @@ runtime·SysFree(void *v, uintptr n, uint64 *stat)
runtime·munmap(v, n);
}
+void
+runtime·SysFault(void *v, uintptr n)
+{
+ runtime·mmap(v, n, PROT_NONE, MAP_ANON|MAP_PRIVATE|MAP_FIXED, -1, 0);
+}
+
void*
-runtime·SysReserve(void *v, uintptr n)
+runtime·SysReserve(void *v, uintptr n, bool *reserved)
{
void *p;
// On 64-bit, people with ulimit -v set complain if we reserve too
// much address space. Instead, assume that the reservation is okay
// and check the assumption in SysMap.
- if(sizeof(void*) == 8)
+ if(sizeof(void*) == 8 && n > 1LL<<32) {
+ *reserved = false;
return v;
-
+ }
+
+ *reserved = true;
p = runtime·mmap(v, n, PROT_NONE, MAP_ANON|MAP_PRIVATE, -1, 0);
if(p < (void*)4096)
return nil;
@@ -63,14 +72,14 @@ runtime·SysReserve(void *v, uintptr n)
}
void
-runtime·SysMap(void *v, uintptr n, uint64 *stat)
+runtime·SysMap(void *v, uintptr n, bool reserved, uint64 *stat)
{
void *p;
runtime·xadd64(stat, n);
// On 64-bit, we don't actually have v reserved, so tread carefully.
- if(sizeof(void*) == 8) {
+ if(!reserved) {
p = runtime·mmap(v, n, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0);
if(p == (void*)ENOMEM)
runtime·throw("runtime: out of memory");
diff --git a/src/pkg/runtime/mem_linux.c b/src/pkg/runtime/mem_linux.c
index b0f295633..635594c36 100644
--- a/src/pkg/runtime/mem_linux.c
+++ b/src/pkg/runtime/mem_linux.c
@@ -11,6 +11,7 @@
enum
{
_PAGE_SIZE = 4096,
+ EACCES = 13,
};
static int32
@@ -19,15 +20,22 @@ addrspace_free(void *v, uintptr n)
int32 errval;
uintptr chunk;
uintptr off;
- static byte vec[4096];
+
+ // NOTE: vec must be just 1 byte long here.
+ // Mincore returns ENOMEM if any of the pages are unmapped,
+ // but we want to know that all of the pages are unmapped.
+ // To make these the same, we can only ask about one page
+ // at a time. See golang.org/issue/7476.
+ static byte vec[1];
for(off = 0; off < n; off += chunk) {
chunk = _PAGE_SIZE * sizeof vec;
if(chunk > (n - off))
chunk = n - off;
errval = runtime·mincore((int8*)v + off, chunk, vec);
- // errval is 0 if success, or -(error_code) if error.
- if (errval == 0 || errval != -ENOMEM)
+ // ENOMEM means unmapped, which is what we want.
+ // Anything else we assume means the pages are mapped.
+ if (errval != -ENOMEM)
return 0;
}
return 1;
@@ -91,8 +99,14 @@ runtime·SysFree(void *v, uintptr n, uint64 *stat)
runtime·munmap(v, n);
}
+void
+runtime·SysFault(void *v, uintptr n)
+{
+ runtime·mmap(v, n, PROT_NONE, MAP_ANON|MAP_PRIVATE|MAP_FIXED, -1, 0);
+}
+
void*
-runtime·SysReserve(void *v, uintptr n)
+runtime·SysReserve(void *v, uintptr n, bool *reserved)
{
void *p;
@@ -100,7 +114,7 @@ runtime·SysReserve(void *v, uintptr n)
// much address space. Instead, assume that the reservation is okay
// if we can reserve at least 64K and check the assumption in SysMap.
// Only user-mode Linux (UML) rejects these requests.
- if(sizeof(void*) == 8 && (uintptr)v >= 0xffffffffU) {
+ if(sizeof(void*) == 8 && n > 1LL<<32) {
p = mmap_fixed(v, 64<<10, PROT_NONE, MAP_ANON|MAP_PRIVATE, -1, 0);
if (p != v) {
if(p >= (void*)4096)
@@ -108,24 +122,26 @@ runtime·SysReserve(void *v, uintptr n)
return nil;
}
runtime·munmap(p, 64<<10);
+ *reserved = false;
return v;
}
p = runtime·mmap(v, n, PROT_NONE, MAP_ANON|MAP_PRIVATE, -1, 0);
if((uintptr)p < 4096)
return nil;
+ *reserved = true;
return p;
}
void
-runtime·SysMap(void *v, uintptr n, uint64 *stat)
+runtime·SysMap(void *v, uintptr n, bool reserved, uint64 *stat)
{
void *p;
runtime·xadd64(stat, n);
// On 64-bit, we don't actually have v reserved, so tread carefully.
- if(sizeof(void*) == 8 && (uintptr)v >= 0xffffffffU) {
+ if(!reserved) {
p = mmap_fixed(v, n, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0);
if(p == (void*)ENOMEM)
runtime·throw("runtime: out of memory");
diff --git a/src/pkg/runtime/mem_nacl.c b/src/pkg/runtime/mem_nacl.c
new file mode 100644
index 000000000..e2bca40a4
--- /dev/null
+++ b/src/pkg/runtime/mem_nacl.c
@@ -0,0 +1,118 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "runtime.h"
+#include "arch_GOARCH.h"
+#include "defs_GOOS_GOARCH.h"
+#include "os_GOOS.h"
+#include "malloc.h"
+
+enum
+{
+ Debug = 0,
+};
+
+void*
+runtime·SysAlloc(uintptr n, uint64 *stat)
+{
+ void *v;
+
+ v = runtime·mmap(nil, n, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0);
+ if(v < (void*)4096) {
+ if(Debug)
+ runtime·printf("SysAlloc(%p): %p\n", n, v);
+ return nil;
+ }
+ runtime·xadd64(stat, n);
+ if(Debug)
+ runtime·printf("SysAlloc(%p) = %p\n", n, v);
+ return v;
+}
+
+void
+runtime·SysUnused(void *v, uintptr n)
+{
+ if(Debug)
+ runtime·printf("SysUnused(%p, %p)\n", v, n);
+}
+
+void
+runtime·SysUsed(void *v, uintptr n)
+{
+ USED(v);
+ USED(n);
+}
+
+void
+runtime·SysFree(void *v, uintptr n, uint64 *stat)
+{
+ if(Debug)
+ runtime·printf("SysFree(%p, %p)\n", v, n);
+ runtime·xadd64(stat, -(uint64)n);
+ runtime·munmap(v, n);
+}
+
+void
+runtime·SysFault(void *v, uintptr n)
+{
+ runtime·mmap(v, n, PROT_NONE, MAP_ANON|MAP_PRIVATE|MAP_FIXED, -1, 0);
+}
+
+void*
+runtime·SysReserve(void *v, uintptr n, bool *reserved)
+{
+ void *p;
+
+ // On 64-bit, people with ulimit -v set complain if we reserve too
+ // much address space. Instead, assume that the reservation is okay
+ // and check the assumption in SysMap.
+ if(NaCl || sizeof(void*) == 8) {
+ *reserved = false;
+ return v;
+ }
+
+ p = runtime·mmap(v, n, PROT_NONE, MAP_ANON|MAP_PRIVATE, -1, 0);
+ if(p < (void*)4096)
+ return nil;
+ *reserved = true;
+ return p;
+}
+
+void
+runtime·SysMap(void *v, uintptr n, bool reserved, uint64 *stat)
+{
+ void *p;
+
+ runtime·xadd64(stat, n);
+
+ // On 64-bit, we don't actually have v reserved, so tread carefully.
+ if(!reserved) {
+ p = runtime·mmap(v, n, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0);
+ if(p == (void*)ENOMEM) {
+ runtime·printf("SysMap(%p, %p): %p\n", v, n, p);
+ runtime·throw("runtime: out of memory");
+ }
+ if(p != v) {
+ runtime·printf("SysMap(%p, %p): %p\n", v, n, p);
+ runtime·printf("runtime: address space conflict: map(%p) = %p\n", v, p);
+ runtime·throw("runtime: address space conflict");
+ }
+ if(Debug)
+ runtime·printf("SysMap(%p, %p) = %p\n", v, n, p);
+ return;
+ }
+
+ p = runtime·mmap(v, n, PROT_READ|PROT_WRITE, MAP_ANON|MAP_FIXED|MAP_PRIVATE, -1, 0);
+ if(p == (void*)ENOMEM) {
+ runtime·printf("SysMap(%p, %p): %p\n", v, n, p);
+ runtime·throw("runtime: out of memory");
+ }
+ if(p != v) {
+ runtime·printf("SysMap(%p, %p): %p\n", v, n, p);
+ runtime·printf("mmap MAP_FIXED %p returned %p\n", v, p);
+ runtime·throw("runtime: cannot map pages in arena address space");
+ }
+ if(Debug)
+ runtime·printf("SysMap(%p, %p) = %p\n", v, n, p);
+}
diff --git a/src/pkg/runtime/mem_netbsd.c b/src/pkg/runtime/mem_netbsd.c
index 91e36eb60..861ae90c7 100644
--- a/src/pkg/runtime/mem_netbsd.c
+++ b/src/pkg/runtime/mem_netbsd.c
@@ -45,32 +45,41 @@ runtime·SysFree(void *v, uintptr n, uint64 *stat)
runtime·munmap(v, n);
}
+void
+runtime·SysFault(void *v, uintptr n)
+{
+ runtime·mmap(v, n, PROT_NONE, MAP_ANON|MAP_PRIVATE|MAP_FIXED, -1, 0);
+}
+
void*
-runtime·SysReserve(void *v, uintptr n)
+runtime·SysReserve(void *v, uintptr n, bool *reserved)
{
void *p;
// On 64-bit, people with ulimit -v set complain if we reserve too
// much address space. Instead, assume that the reservation is okay
// and check the assumption in SysMap.
- if(sizeof(void*) == 8)
+ if(sizeof(void*) == 8 && n > 1LL<<32) {
+ *reserved = false;
return v;
+ }
p = runtime·mmap(v, n, PROT_NONE, MAP_ANON|MAP_PRIVATE, -1, 0);
if(p < (void*)4096)
return nil;
+ *reserved = true;
return p;
}
void
-runtime·SysMap(void *v, uintptr n, uint64 *stat)
+runtime·SysMap(void *v, uintptr n, bool reserved, uint64 *stat)
{
void *p;
runtime·xadd64(stat, n);
// On 64-bit, we don't actually have v reserved, so tread carefully.
- if(sizeof(void*) == 8) {
+ if(!reserved) {
p = runtime·mmap(v, n, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0);
if(p == (void*)ENOMEM)
runtime·throw("runtime: out of memory");
diff --git a/src/pkg/runtime/mem_openbsd.c b/src/pkg/runtime/mem_openbsd.c
index 91e36eb60..861ae90c7 100644
--- a/src/pkg/runtime/mem_openbsd.c
+++ b/src/pkg/runtime/mem_openbsd.c
@@ -45,32 +45,41 @@ runtime·SysFree(void *v, uintptr n, uint64 *stat)
runtime·munmap(v, n);
}
+void
+runtime·SysFault(void *v, uintptr n)
+{
+ runtime·mmap(v, n, PROT_NONE, MAP_ANON|MAP_PRIVATE|MAP_FIXED, -1, 0);
+}
+
void*
-runtime·SysReserve(void *v, uintptr n)
+runtime·SysReserve(void *v, uintptr n, bool *reserved)
{
void *p;
// On 64-bit, people with ulimit -v set complain if we reserve too
// much address space. Instead, assume that the reservation is okay
// and check the assumption in SysMap.
- if(sizeof(void*) == 8)
+ if(sizeof(void*) == 8 && n > 1LL<<32) {
+ *reserved = false;
return v;
+ }
p = runtime·mmap(v, n, PROT_NONE, MAP_ANON|MAP_PRIVATE, -1, 0);
if(p < (void*)4096)
return nil;
+ *reserved = true;
return p;
}
void
-runtime·SysMap(void *v, uintptr n, uint64 *stat)
+runtime·SysMap(void *v, uintptr n, bool reserved, uint64 *stat)
{
void *p;
runtime·xadd64(stat, n);
// On 64-bit, we don't actually have v reserved, so tread carefully.
- if(sizeof(void*) == 8) {
+ if(!reserved) {
p = runtime·mmap(v, n, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0);
if(p == (void*)ENOMEM)
runtime·throw("runtime: out of memory");
diff --git a/src/pkg/runtime/mem_plan9.c b/src/pkg/runtime/mem_plan9.c
index edf970b2f..bbf04c7ed 100644
--- a/src/pkg/runtime/mem_plan9.c
+++ b/src/pkg/runtime/mem_plan9.c
@@ -62,14 +62,21 @@ runtime·SysUsed(void *v, uintptr nbytes)
}
void
-runtime·SysMap(void *v, uintptr nbytes, uint64 *stat)
+runtime·SysMap(void *v, uintptr nbytes, bool reserved, uint64 *stat)
{
- USED(v, nbytes, stat);
+ USED(v, nbytes, reserved, stat);
+}
+
+void
+runtime·SysFault(void *v, uintptr nbytes)
+{
+ USED(v, nbytes);
}
void*
-runtime·SysReserve(void *v, uintptr nbytes)
+runtime·SysReserve(void *v, uintptr nbytes, bool *reserved)
{
USED(v);
+ *reserved = true;
return runtime·SysAlloc(nbytes, &mstats.heap_sys);
}
diff --git a/src/pkg/runtime/mem_solaris.c b/src/pkg/runtime/mem_solaris.c
new file mode 100644
index 000000000..034222887
--- /dev/null
+++ b/src/pkg/runtime/mem_solaris.c
@@ -0,0 +1,99 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "runtime.h"
+#include "arch_GOARCH.h"
+#include "defs_GOOS_GOARCH.h"
+#include "os_GOOS.h"
+#include "malloc.h"
+
+enum
+{
+ ENOMEM = 12,
+};
+
+void*
+runtime·SysAlloc(uintptr n, uint64 *stat)
+{
+ void *v;
+
+ v = runtime·mmap(nil, n, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0);
+ if(v < (void*)4096)
+ return nil;
+ runtime·xadd64(stat, n);
+ return v;
+}
+
+void
+runtime·SysUnused(void *v, uintptr n)
+{
+ USED(v);
+ USED(n);
+}
+
+void
+runtime·SysUsed(void *v, uintptr n)
+{
+ USED(v);
+ USED(n);
+}
+
+void
+runtime·SysFree(void *v, uintptr n, uint64 *stat)
+{
+ runtime·xadd64(stat, -(uint64)n);
+ runtime·munmap(v, n);
+}
+
+void
+runtime·SysFault(void *v, uintptr n)
+{
+ runtime·mmap(v, n, PROT_NONE, MAP_ANON|MAP_PRIVATE|MAP_FIXED, -1, 0);
+}
+
+void*
+runtime·SysReserve(void *v, uintptr n, bool *reserved)
+{
+ void *p;
+
+ // On 64-bit, people with ulimit -v set complain if we reserve too
+ // much address space. Instead, assume that the reservation is okay
+ // and check the assumption in SysMap.
+ if(sizeof(void*) == 8 && n > 1LL<<32) {
+ *reserved = false;
+ return v;
+ }
+
+ p = runtime·mmap(v, n, PROT_NONE, MAP_ANON|MAP_PRIVATE, -1, 0);
+ if(p < (void*)4096)
+ return nil;
+ *reserved = true;
+ return p;
+}
+
+void
+runtime·SysMap(void *v, uintptr n, bool reserved, uint64 *stat)
+{
+ void *p;
+
+ runtime·xadd64(stat, n);
+
+ // On 64-bit, we don't actually have v reserved, so tread carefully.
+ if(!reserved) {
+ p = runtime·mmap(v, n, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0);
+ if(p == (void*)ENOMEM)
+ runtime·throw("runtime: out of memory");
+ if(p != v) {
+ runtime·printf("runtime: address space conflict: map(%p) = %p\n", v, p);
+ runtime·throw("runtime: address space conflict");
+ }
+ return;
+ }
+
+ p = runtime·mmap(v, n, PROT_READ|PROT_WRITE, MAP_ANON|MAP_FIXED|MAP_PRIVATE, -1, 0);
+ if(p == (void*)ENOMEM)
+ runtime·throw("runtime: out of memory");
+ if(p != v)
+ runtime·throw("runtime: cannot map pages in arena address space");
+}
diff --git a/src/pkg/runtime/mem_windows.c b/src/pkg/runtime/mem_windows.c
index abdc72ad8..77ec6e926 100644
--- a/src/pkg/runtime/mem_windows.c
+++ b/src/pkg/runtime/mem_windows.c
@@ -15,12 +15,15 @@ enum {
MEM_RELEASE = 0x8000,
PAGE_READWRITE = 0x0004,
+ PAGE_NOACCESS = 0x0001,
};
#pragma dynimport runtime·VirtualAlloc VirtualAlloc "kernel32.dll"
#pragma dynimport runtime·VirtualFree VirtualFree "kernel32.dll"
+#pragma dynimport runtime·VirtualProtect VirtualProtect "kernel32.dll"
extern void *runtime·VirtualAlloc;
extern void *runtime·VirtualFree;
+extern void *runtime·VirtualProtect;
void*
runtime·SysAlloc(uintptr n, uint64 *stat)
@@ -33,10 +36,30 @@ void
runtime·SysUnused(void *v, uintptr n)
{
void *r;
+ uintptr small;
r = runtime·stdcall(runtime·VirtualFree, 3, v, n, (uintptr)MEM_DECOMMIT);
- if(r == nil)
- runtime·throw("runtime: failed to decommit pages");
+ if(r != nil)
+ return;
+
+ // Decommit failed. Usual reason is that we've merged memory from two different
+ // VirtualAlloc calls, and Windows will only let each VirtualFree handle pages from
+ // a single VirtualAlloc. It is okay to specify a subset of the pages from a single alloc,
+ // just not pages from multiple allocs. This is a rare case, arising only when we're
+ // trying to give memory back to the operating system, which happens on a time
+ // scale of minutes. It doesn't have to be terribly fast. Instead of extra bookkeeping
+ // on all our VirtualAlloc calls, try freeing successively smaller pieces until
+ // we manage to free something, and then repeat. This ends up being O(n log n)
+ // in the worst case, but that's fast enough.
+ while(n > 0) {
+ small = n;
+ while(small >= 4096 && runtime·stdcall(runtime·VirtualFree, 3, v, small, (uintptr)MEM_DECOMMIT) == nil)
+ small = (small / 2) & ~(4096-1);
+ if(small < 4096)
+ runtime·throw("runtime: failed to decommit pages");
+ v = (byte*)v + small;
+ n -= small;
+ }
}
void
@@ -60,9 +83,17 @@ runtime·SysFree(void *v, uintptr n, uint64 *stat)
runtime·throw("runtime: failed to release pages");
}
+void
+runtime·SysFault(void *v, uintptr n)
+{
+ // SysUnused makes the memory inaccessible and prevents its reuse
+ runtime·SysUnused(v, n);
+}
+
void*
-runtime·SysReserve(void *v, uintptr n)
+runtime·SysReserve(void *v, uintptr n, bool *reserved)
{
+ *reserved = true;
// v is just a hint.
// First try at v.
v = runtime·stdcall(runtime·VirtualAlloc, 4, v, n, (uintptr)MEM_RESERVE, (uintptr)PAGE_READWRITE);
@@ -74,10 +105,12 @@ runtime·SysReserve(void *v, uintptr n)
}
void
-runtime·SysMap(void *v, uintptr n, uint64 *stat)
+runtime·SysMap(void *v, uintptr n, bool reserved, uint64 *stat)
{
void *p;
-
+
+ USED(reserved);
+
runtime·xadd64(stat, n);
p = runtime·stdcall(runtime·VirtualAlloc, 4, v, n, (uintptr)MEM_COMMIT, (uintptr)PAGE_READWRITE);
if(p != v)
diff --git a/src/pkg/runtime/memclr_386.s b/src/pkg/runtime/memclr_386.s
new file mode 100644
index 000000000..4b7580cb4
--- /dev/null
+++ b/src/pkg/runtime/memclr_386.s
@@ -0,0 +1,127 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !plan9
+
+#include "../../cmd/ld/textflag.h"
+
+// void runtime·memclr(void*, uintptr)
+TEXT runtime·memclr(SB), NOSPLIT, $0-8
+ MOVL ptr+0(FP), DI
+ MOVL n+4(FP), BX
+ XORL AX, AX
+
+ // MOVOU seems always faster than REP STOSL.
+clr_tail:
+ TESTL BX, BX
+ JEQ clr_0
+ CMPL BX, $2
+ JBE clr_1or2
+ CMPL BX, $4
+ JBE clr_3or4
+ CMPL BX, $8
+ JBE clr_5through8
+ CMPL BX, $16
+ JBE clr_9through16
+ TESTL $0x4000000, runtime·cpuid_edx(SB) // check for sse2
+ JEQ nosse2
+ PXOR X0, X0
+ CMPL BX, $32
+ JBE clr_17through32
+ CMPL BX, $64
+ JBE clr_33through64
+ CMPL BX, $128
+ JBE clr_65through128
+ CMPL BX, $256
+ JBE clr_129through256
+ // TODO: use branch table and BSR to make this just a single dispatch
+
+clr_loop:
+ MOVOU X0, 0(DI)
+ MOVOU X0, 16(DI)
+ MOVOU X0, 32(DI)
+ MOVOU X0, 48(DI)
+ MOVOU X0, 64(DI)
+ MOVOU X0, 80(DI)
+ MOVOU X0, 96(DI)
+ MOVOU X0, 112(DI)
+ MOVOU X0, 128(DI)
+ MOVOU X0, 144(DI)
+ MOVOU X0, 160(DI)
+ MOVOU X0, 176(DI)
+ MOVOU X0, 192(DI)
+ MOVOU X0, 208(DI)
+ MOVOU X0, 224(DI)
+ MOVOU X0, 240(DI)
+ SUBL $256, BX
+ ADDL $256, DI
+ CMPL BX, $256
+ JAE clr_loop
+ JMP clr_tail
+
+clr_1or2:
+ MOVB AX, (DI)
+ MOVB AX, -1(DI)(BX*1)
+clr_0:
+ RET
+clr_3or4:
+ MOVW AX, (DI)
+ MOVW AX, -2(DI)(BX*1)
+ RET
+clr_5through8:
+ MOVL AX, (DI)
+ MOVL AX, -4(DI)(BX*1)
+ RET
+clr_9through16:
+ MOVL AX, (DI)
+ MOVL AX, 4(DI)
+ MOVL AX, -8(DI)(BX*1)
+ MOVL AX, -4(DI)(BX*1)
+ RET
+clr_17through32:
+ MOVOU X0, (DI)
+ MOVOU X0, -16(DI)(BX*1)
+ RET
+clr_33through64:
+ MOVOU X0, (DI)
+ MOVOU X0, 16(DI)
+ MOVOU X0, -32(DI)(BX*1)
+ MOVOU X0, -16(DI)(BX*1)
+ RET
+clr_65through128:
+ MOVOU X0, (DI)
+ MOVOU X0, 16(DI)
+ MOVOU X0, 32(DI)
+ MOVOU X0, 48(DI)
+ MOVOU X0, -64(DI)(BX*1)
+ MOVOU X0, -48(DI)(BX*1)
+ MOVOU X0, -32(DI)(BX*1)
+ MOVOU X0, -16(DI)(BX*1)
+ RET
+clr_129through256:
+ MOVOU X0, (DI)
+ MOVOU X0, 16(DI)
+ MOVOU X0, 32(DI)
+ MOVOU X0, 48(DI)
+ MOVOU X0, 64(DI)
+ MOVOU X0, 80(DI)
+ MOVOU X0, 96(DI)
+ MOVOU X0, 112(DI)
+ MOVOU X0, -128(DI)(BX*1)
+ MOVOU X0, -112(DI)(BX*1)
+ MOVOU X0, -96(DI)(BX*1)
+ MOVOU X0, -80(DI)(BX*1)
+ MOVOU X0, -64(DI)(BX*1)
+ MOVOU X0, -48(DI)(BX*1)
+ MOVOU X0, -32(DI)(BX*1)
+ MOVOU X0, -16(DI)(BX*1)
+ RET
+nosse2:
+ MOVL BX, CX
+ SHRL $2, CX
+ REP
+ STOSL
+ ANDL $3, BX
+ JNE clr_tail
+ RET
diff --git a/src/pkg/runtime/memclr_amd64.s b/src/pkg/runtime/memclr_amd64.s
new file mode 100644
index 000000000..6b79363b2
--- /dev/null
+++ b/src/pkg/runtime/memclr_amd64.s
@@ -0,0 +1,116 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !plan9
+
+#include "../../cmd/ld/textflag.h"
+
+// void runtime·memclr(void*, uintptr)
+TEXT runtime·memclr(SB), NOSPLIT, $0-16
+ MOVQ ptr+0(FP), DI
+ MOVQ n+8(FP), BX
+ XORQ AX, AX
+
+ // MOVOU seems always faster than REP STOSQ.
+clr_tail:
+ TESTQ BX, BX
+ JEQ clr_0
+ CMPQ BX, $2
+ JBE clr_1or2
+ CMPQ BX, $4
+ JBE clr_3or4
+ CMPQ BX, $8
+ JBE clr_5through8
+ CMPQ BX, $16
+ JBE clr_9through16
+ PXOR X0, X0
+ CMPQ BX, $32
+ JBE clr_17through32
+ CMPQ BX, $64
+ JBE clr_33through64
+ CMPQ BX, $128
+ JBE clr_65through128
+ CMPQ BX, $256
+ JBE clr_129through256
+ // TODO: use branch table and BSR to make this just a single dispatch
+ // TODO: for really big clears, use MOVNTDQ.
+
+clr_loop:
+ MOVOU X0, 0(DI)
+ MOVOU X0, 16(DI)
+ MOVOU X0, 32(DI)
+ MOVOU X0, 48(DI)
+ MOVOU X0, 64(DI)
+ MOVOU X0, 80(DI)
+ MOVOU X0, 96(DI)
+ MOVOU X0, 112(DI)
+ MOVOU X0, 128(DI)
+ MOVOU X0, 144(DI)
+ MOVOU X0, 160(DI)
+ MOVOU X0, 176(DI)
+ MOVOU X0, 192(DI)
+ MOVOU X0, 208(DI)
+ MOVOU X0, 224(DI)
+ MOVOU X0, 240(DI)
+ SUBQ $256, BX
+ ADDQ $256, DI
+ CMPQ BX, $256
+ JAE clr_loop
+ JMP clr_tail
+
+clr_1or2:
+ MOVB AX, (DI)
+ MOVB AX, -1(DI)(BX*1)
+clr_0:
+ RET
+clr_3or4:
+ MOVW AX, (DI)
+ MOVW AX, -2(DI)(BX*1)
+ RET
+clr_5through8:
+ MOVL AX, (DI)
+ MOVL AX, -4(DI)(BX*1)
+ RET
+clr_9through16:
+ MOVQ AX, (DI)
+ MOVQ AX, -8(DI)(BX*1)
+ RET
+clr_17through32:
+ MOVOU X0, (DI)
+ MOVOU X0, -16(DI)(BX*1)
+ RET
+clr_33through64:
+ MOVOU X0, (DI)
+ MOVOU X0, 16(DI)
+ MOVOU X0, -32(DI)(BX*1)
+ MOVOU X0, -16(DI)(BX*1)
+ RET
+clr_65through128:
+ MOVOU X0, (DI)
+ MOVOU X0, 16(DI)
+ MOVOU X0, 32(DI)
+ MOVOU X0, 48(DI)
+ MOVOU X0, -64(DI)(BX*1)
+ MOVOU X0, -48(DI)(BX*1)
+ MOVOU X0, -32(DI)(BX*1)
+ MOVOU X0, -16(DI)(BX*1)
+ RET
+clr_129through256:
+ MOVOU X0, (DI)
+ MOVOU X0, 16(DI)
+ MOVOU X0, 32(DI)
+ MOVOU X0, 48(DI)
+ MOVOU X0, 64(DI)
+ MOVOU X0, 80(DI)
+ MOVOU X0, 96(DI)
+ MOVOU X0, 112(DI)
+ MOVOU X0, -128(DI)(BX*1)
+ MOVOU X0, -112(DI)(BX*1)
+ MOVOU X0, -96(DI)(BX*1)
+ MOVOU X0, -80(DI)(BX*1)
+ MOVOU X0, -64(DI)(BX*1)
+ MOVOU X0, -48(DI)(BX*1)
+ MOVOU X0, -32(DI)(BX*1)
+ MOVOU X0, -16(DI)(BX*1)
+ RET
diff --git a/src/pkg/runtime/memclr_arm.s b/src/pkg/runtime/memclr_arm.s
index d5ff75d7a..b19ea72a3 100644
--- a/src/pkg/runtime/memclr_arm.s
+++ b/src/pkg/runtime/memclr_arm.s
@@ -40,12 +40,6 @@ TEXT runtime·memclr(SB),NOSPLIT,$0-8
CMP $4, R(N) /* need at least 4 bytes to copy */
BLT _1tail
- AND $0xFF, R(0) /* it's a byte */
- SLL $8, R(0), R(TMP) /* replicate to a word */
- ORR R(TMP), R(0)
- SLL $16, R(0), R(TMP)
- ORR R(TMP), R(0)
-
_4align: /* align on 4 */
AND.S $3, R(TO), R(TMP)
BEQ _4aligned
diff --git a/src/pkg/runtime/memclr_plan9_386.s b/src/pkg/runtime/memclr_plan9_386.s
new file mode 100644
index 000000000..9b496785a
--- /dev/null
+++ b/src/pkg/runtime/memclr_plan9_386.s
@@ -0,0 +1,50 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "../../cmd/ld/textflag.h"
+
+// void runtime·memclr(void*, uintptr)
+TEXT runtime·memclr(SB), NOSPLIT, $0-8
+ MOVL ptr+0(FP), DI
+ MOVL n+4(FP), BX
+ XORL AX, AX
+
+clr_tail:
+ TESTL BX, BX
+ JEQ clr_0
+ CMPL BX, $2
+ JBE clr_1or2
+ CMPL BX, $4
+ JBE clr_3or4
+ CMPL BX, $8
+ JBE clr_5through8
+ CMPL BX, $16
+ JBE clr_9through16
+ MOVL BX, CX
+ SHRL $2, CX
+ REP
+ STOSL
+ ANDL $3, BX
+ JNE clr_tail
+ RET
+
+clr_1or2:
+ MOVB AX, (DI)
+ MOVB AX, -1(DI)(BX*1)
+clr_0:
+ RET
+clr_3or4:
+ MOVW AX, (DI)
+ MOVW AX, -2(DI)(BX*1)
+ RET
+clr_5through8:
+ MOVL AX, (DI)
+ MOVL AX, -4(DI)(BX*1)
+ RET
+clr_9through16:
+ MOVL AX, (DI)
+ MOVL AX, 4(DI)
+ MOVL AX, -8(DI)(BX*1)
+ MOVL AX, -4(DI)(BX*1)
+ RET
diff --git a/src/pkg/runtime/memclr_plan9_amd64.s b/src/pkg/runtime/memclr_plan9_amd64.s
new file mode 100644
index 000000000..6b33054f5
--- /dev/null
+++ b/src/pkg/runtime/memclr_plan9_amd64.s
@@ -0,0 +1,48 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "../../cmd/ld/textflag.h"
+
+// void runtime·memclr(void*, uintptr)
+TEXT runtime·memclr(SB), NOSPLIT, $0-16
+ MOVQ ptr+0(FP), DI
+ MOVQ n+8(FP), BX
+ XORQ AX, AX
+
+clr_tail:
+ TESTQ BX, BX
+ JEQ clr_0
+ CMPQ BX, $2
+ JBE clr_1or2
+ CMPQ BX, $4
+ JBE clr_3or4
+ CMPQ BX, $8
+ JBE clr_5through8
+ CMPQ BX, $16
+ JBE clr_9through16
+ MOVQ BX, CX
+ SHRQ $2, CX
+ REP
+ STOSQ
+ ANDQ $3, BX
+ JNE clr_tail
+ RET
+
+clr_1or2:
+ MOVB AX, (DI)
+ MOVB AX, -1(DI)(BX*1)
+clr_0:
+ RET
+clr_3or4:
+ MOVW AX, (DI)
+ MOVW AX, -2(DI)(BX*1)
+ RET
+clr_5through8:
+ MOVL AX, (DI)
+ MOVL AX, -4(DI)(BX*1)
+ RET
+clr_9through16:
+ MOVQ AX, (DI)
+ MOVQ AX, -8(DI)(BX*1)
+ RET
diff --git a/src/pkg/runtime/memmove_386.s b/src/pkg/runtime/memmove_386.s
index 13d575973..3aed8ad07 100644
--- a/src/pkg/runtime/memmove_386.s
+++ b/src/pkg/runtime/memmove_386.s
@@ -23,6 +23,8 @@
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
+// +build !plan9
+
#include "../../cmd/ld/textflag.h"
TEXT runtime·memmove(SB), NOSPLIT, $0-12
diff --git a/src/pkg/runtime/memmove_amd64.s b/src/pkg/runtime/memmove_amd64.s
index f1641cdb2..5895846db 100644
--- a/src/pkg/runtime/memmove_amd64.s
+++ b/src/pkg/runtime/memmove_amd64.s
@@ -23,6 +23,8 @@
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
+// +build !plan9
+
#include "../../cmd/ld/textflag.h"
// void runtime·memmove(void*, void*, uintptr)
diff --git a/src/pkg/runtime/memmove_nacl_amd64p32.s b/src/pkg/runtime/memmove_nacl_amd64p32.s
new file mode 100644
index 000000000..1b5733112
--- /dev/null
+++ b/src/pkg/runtime/memmove_nacl_amd64p32.s
@@ -0,0 +1,46 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "../../cmd/ld/textflag.h"
+
+TEXT runtime·memmove(SB), NOSPLIT, $0-12
+ MOVL to+0(FP), DI
+ MOVL fr+4(FP), SI
+ MOVL n+8(FP), BX
+
+ CMPL SI, DI
+ JLS back
+
+forward:
+ MOVL BX, CX
+ SHRL $3, CX
+ ANDL $7, BX
+ REP; MOVSQ
+ MOVL BX, CX
+ REP; MOVSB
+ RET
+
+back:
+ MOVL SI, CX
+ ADDL BX, CX
+ CMPL CX, DI
+ JLS forward
+
+ ADDL BX, DI
+ ADDL BX, SI
+ STD
+
+ MOVL BX, CX
+ SHRL $3, CX
+ ANDL $7, BX
+ SUBL $8, DI
+ SUBL $8, SI
+ REP; MOVSQ
+ ADDL $7, DI
+ ADDL $7, SI
+ MOVL BX, CX
+ REP; MOVSB
+ CLD
+
+ RET
diff --git a/src/pkg/runtime/memmove_plan9_386.s b/src/pkg/runtime/memmove_plan9_386.s
new file mode 100644
index 000000000..187616cd0
--- /dev/null
+++ b/src/pkg/runtime/memmove_plan9_386.s
@@ -0,0 +1,127 @@
+// Inferno's libkern/memmove-386.s
+// http://code.google.com/p/inferno-os/source/browse/libkern/memmove-386.s
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Revisions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com). All rights reserved.
+// Portions Copyright 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+#include "../../cmd/ld/textflag.h"
+
+TEXT runtime·memmove(SB), NOSPLIT, $0-12
+ MOVL to+0(FP), DI
+ MOVL fr+4(FP), SI
+ MOVL n+8(FP), BX
+
+ // REP instructions have a high startup cost, so we handle small sizes
+ // with some straightline code. The REP MOVSL instruction is really fast
+ // for large sizes. The cutover is approximately 1K.
+tail:
+ TESTL BX, BX
+ JEQ move_0
+ CMPL BX, $2
+ JBE move_1or2
+ CMPL BX, $4
+ JBE move_3or4
+ CMPL BX, $8
+ JBE move_5through8
+ CMPL BX, $16
+ JBE move_9through16
+
+/*
+ * check and set for backwards
+ */
+ CMPL SI, DI
+ JLS back
+
+/*
+ * forward copy loop
+ */
+forward:
+ MOVL BX, CX
+ SHRL $2, CX
+ ANDL $3, BX
+
+ REP; MOVSL
+ JMP tail
+/*
+ * check overlap
+ */
+back:
+ MOVL SI, CX
+ ADDL BX, CX
+ CMPL CX, DI
+ JLS forward
+/*
+ * whole thing backwards has
+ * adjusted addresses
+ */
+
+ ADDL BX, DI
+ ADDL BX, SI
+ STD
+
+/*
+ * copy
+ */
+ MOVL BX, CX
+ SHRL $2, CX
+ ANDL $3, BX
+
+ SUBL $4, DI
+ SUBL $4, SI
+ REP; MOVSL
+
+ CLD
+ ADDL $4, DI
+ ADDL $4, SI
+ SUBL BX, DI
+ SUBL BX, SI
+ JMP tail
+
+move_1or2:
+ MOVB (SI), AX
+ MOVB -1(SI)(BX*1), CX
+ MOVB AX, (DI)
+ MOVB CX, -1(DI)(BX*1)
+move_0:
+ RET
+move_3or4:
+ MOVW (SI), AX
+ MOVW -2(SI)(BX*1), CX
+ MOVW AX, (DI)
+ MOVW CX, -2(DI)(BX*1)
+ RET
+move_5through8:
+ MOVL (SI), AX
+ MOVL -4(SI)(BX*1), CX
+ MOVL AX, (DI)
+ MOVL CX, -4(DI)(BX*1)
+ RET
+move_9through16:
+ MOVL (SI), AX
+ MOVL 4(SI), CX
+ MOVL -8(SI)(BX*1), DX
+ MOVL -4(SI)(BX*1), BP
+ MOVL AX, (DI)
+ MOVL CX, 4(DI)
+ MOVL DX, -8(DI)(BX*1)
+ MOVL BP, -4(DI)(BX*1)
+ RET
diff --git a/src/pkg/runtime/memmove_plan9_amd64.s b/src/pkg/runtime/memmove_plan9_amd64.s
new file mode 100644
index 000000000..60108273c
--- /dev/null
+++ b/src/pkg/runtime/memmove_plan9_amd64.s
@@ -0,0 +1,126 @@
+// Derived from Inferno's libkern/memmove-386.s (adapted for amd64)
+// http://code.google.com/p/inferno-os/source/browse/libkern/memmove-386.s
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Revisions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com). All rights reserved.
+// Portions Copyright 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+#include "../../cmd/ld/textflag.h"
+
+// void runtime·memmove(void*, void*, uintptr)
+TEXT runtime·memmove(SB), NOSPLIT, $0-24
+
+ MOVQ to+0(FP), DI
+ MOVQ fr+8(FP), SI
+ MOVQ n+16(FP), BX
+
+ // REP instructions have a high startup cost, so we handle small sizes
+ // with some straightline code. The REP MOVSQ instruction is really fast
+ // for large sizes. The cutover is approximately 1K.
+tail:
+ TESTQ BX, BX
+ JEQ move_0
+ CMPQ BX, $2
+ JBE move_1or2
+ CMPQ BX, $4
+ JBE move_3or4
+ CMPQ BX, $8
+ JBE move_5through8
+ CMPQ BX, $16
+ JBE move_9through16
+
+/*
+ * check and set for backwards
+ */
+ CMPQ SI, DI
+ JLS back
+
+/*
+ * forward copy loop
+ */
+forward:
+ MOVQ BX, CX
+ SHRQ $3, CX
+ ANDQ $7, BX
+
+ REP; MOVSQ
+ JMP tail
+
+back:
+/*
+ * check overlap
+ */
+ MOVQ SI, CX
+ ADDQ BX, CX
+ CMPQ CX, DI
+ JLS forward
+
+/*
+ * whole thing backwards has
+ * adjusted addresses
+ */
+ ADDQ BX, DI
+ ADDQ BX, SI
+ STD
+
+/*
+ * copy
+ */
+ MOVQ BX, CX
+ SHRQ $3, CX
+ ANDQ $7, BX
+
+ SUBQ $8, DI
+ SUBQ $8, SI
+ REP; MOVSQ
+
+ CLD
+ ADDQ $8, DI
+ ADDQ $8, SI
+ SUBQ BX, DI
+ SUBQ BX, SI
+ JMP tail
+
+move_1or2:
+ MOVB (SI), AX
+ MOVB -1(SI)(BX*1), CX
+ MOVB AX, (DI)
+ MOVB CX, -1(DI)(BX*1)
+move_0:
+ RET
+move_3or4:
+ MOVW (SI), AX
+ MOVW -2(SI)(BX*1), CX
+ MOVW AX, (DI)
+ MOVW CX, -2(DI)(BX*1)
+ RET
+move_5through8:
+ MOVL (SI), AX
+ MOVL -4(SI)(BX*1), CX
+ MOVL AX, (DI)
+ MOVL CX, -4(DI)(BX*1)
+ RET
+move_9through16:
+ MOVQ (SI), AX
+ MOVQ -8(SI)(BX*1), CX
+ MOVQ AX, (DI)
+ MOVQ CX, -8(DI)(BX*1)
+ RET
diff --git a/src/pkg/runtime/memmove_test.go b/src/pkg/runtime/memmove_test.go
index 9525f0682..540f0feb5 100644
--- a/src/pkg/runtime/memmove_test.go
+++ b/src/pkg/runtime/memmove_test.go
@@ -5,6 +5,7 @@
package runtime_test
import (
+ . "runtime"
"testing"
)
@@ -80,7 +81,7 @@ func TestMemmoveAlias(t *testing.T) {
}
}
-func bmMemmove(n int, b *testing.B) {
+func bmMemmove(b *testing.B, n int) {
x := make([]byte, n)
y := make([]byte, n)
b.SetBytes(int64(n))
@@ -89,28 +90,154 @@ func bmMemmove(n int, b *testing.B) {
}
}
-func BenchmarkMemmove0(b *testing.B) { bmMemmove(0, b) }
-func BenchmarkMemmove1(b *testing.B) { bmMemmove(1, b) }
-func BenchmarkMemmove2(b *testing.B) { bmMemmove(2, b) }
-func BenchmarkMemmove3(b *testing.B) { bmMemmove(3, b) }
-func BenchmarkMemmove4(b *testing.B) { bmMemmove(4, b) }
-func BenchmarkMemmove5(b *testing.B) { bmMemmove(5, b) }
-func BenchmarkMemmove6(b *testing.B) { bmMemmove(6, b) }
-func BenchmarkMemmove7(b *testing.B) { bmMemmove(7, b) }
-func BenchmarkMemmove8(b *testing.B) { bmMemmove(8, b) }
-func BenchmarkMemmove9(b *testing.B) { bmMemmove(9, b) }
-func BenchmarkMemmove10(b *testing.B) { bmMemmove(10, b) }
-func BenchmarkMemmove11(b *testing.B) { bmMemmove(11, b) }
-func BenchmarkMemmove12(b *testing.B) { bmMemmove(12, b) }
-func BenchmarkMemmove13(b *testing.B) { bmMemmove(13, b) }
-func BenchmarkMemmove14(b *testing.B) { bmMemmove(14, b) }
-func BenchmarkMemmove15(b *testing.B) { bmMemmove(15, b) }
-func BenchmarkMemmove16(b *testing.B) { bmMemmove(16, b) }
-func BenchmarkMemmove32(b *testing.B) { bmMemmove(32, b) }
-func BenchmarkMemmove64(b *testing.B) { bmMemmove(64, b) }
-func BenchmarkMemmove128(b *testing.B) { bmMemmove(128, b) }
-func BenchmarkMemmove256(b *testing.B) { bmMemmove(256, b) }
-func BenchmarkMemmove512(b *testing.B) { bmMemmove(512, b) }
-func BenchmarkMemmove1024(b *testing.B) { bmMemmove(1024, b) }
-func BenchmarkMemmove2048(b *testing.B) { bmMemmove(2048, b) }
-func BenchmarkMemmove4096(b *testing.B) { bmMemmove(4096, b) }
+func BenchmarkMemmove0(b *testing.B) { bmMemmove(b, 0) }
+func BenchmarkMemmove1(b *testing.B) { bmMemmove(b, 1) }
+func BenchmarkMemmove2(b *testing.B) { bmMemmove(b, 2) }
+func BenchmarkMemmove3(b *testing.B) { bmMemmove(b, 3) }
+func BenchmarkMemmove4(b *testing.B) { bmMemmove(b, 4) }
+func BenchmarkMemmove5(b *testing.B) { bmMemmove(b, 5) }
+func BenchmarkMemmove6(b *testing.B) { bmMemmove(b, 6) }
+func BenchmarkMemmove7(b *testing.B) { bmMemmove(b, 7) }
+func BenchmarkMemmove8(b *testing.B) { bmMemmove(b, 8) }
+func BenchmarkMemmove9(b *testing.B) { bmMemmove(b, 9) }
+func BenchmarkMemmove10(b *testing.B) { bmMemmove(b, 10) }
+func BenchmarkMemmove11(b *testing.B) { bmMemmove(b, 11) }
+func BenchmarkMemmove12(b *testing.B) { bmMemmove(b, 12) }
+func BenchmarkMemmove13(b *testing.B) { bmMemmove(b, 13) }
+func BenchmarkMemmove14(b *testing.B) { bmMemmove(b, 14) }
+func BenchmarkMemmove15(b *testing.B) { bmMemmove(b, 15) }
+func BenchmarkMemmove16(b *testing.B) { bmMemmove(b, 16) }
+func BenchmarkMemmove32(b *testing.B) { bmMemmove(b, 32) }
+func BenchmarkMemmove64(b *testing.B) { bmMemmove(b, 64) }
+func BenchmarkMemmove128(b *testing.B) { bmMemmove(b, 128) }
+func BenchmarkMemmove256(b *testing.B) { bmMemmove(b, 256) }
+func BenchmarkMemmove512(b *testing.B) { bmMemmove(b, 512) }
+func BenchmarkMemmove1024(b *testing.B) { bmMemmove(b, 1024) }
+func BenchmarkMemmove2048(b *testing.B) { bmMemmove(b, 2048) }
+func BenchmarkMemmove4096(b *testing.B) { bmMemmove(b, 4096) }
+
+func TestMemclr(t *testing.T) {
+ size := 512
+ if testing.Short() {
+ size = 128 + 16
+ }
+ mem := make([]byte, size)
+ for i := 0; i < size; i++ {
+ mem[i] = 0xee
+ }
+ for n := 0; n < size; n++ {
+ for x := 0; x <= size-n; x++ { // offset in mem
+ MemclrBytes(mem[x : x+n])
+ for i := 0; i < x; i++ {
+ if mem[i] != 0xee {
+ t.Fatalf("overwrite prefix mem[%d] = %d", i, mem[i])
+ }
+ }
+ for i := x; i < x+n; i++ {
+ if mem[i] != 0 {
+ t.Fatalf("failed clear mem[%d] = %d", i, mem[i])
+ }
+ mem[i] = 0xee
+ }
+ for i := x + n; i < size; i++ {
+ if mem[i] != 0xee {
+ t.Fatalf("overwrite suffix mem[%d] = %d", i, mem[i])
+ }
+ }
+ }
+ }
+}
+
+func bmMemclr(b *testing.B, n int) {
+ x := make([]byte, n)
+ b.SetBytes(int64(n))
+ for i := 0; i < b.N; i++ {
+ MemclrBytes(x)
+ }
+}
+func BenchmarkMemclr5(b *testing.B) { bmMemclr(b, 5) }
+func BenchmarkMemclr16(b *testing.B) { bmMemclr(b, 16) }
+func BenchmarkMemclr64(b *testing.B) { bmMemclr(b, 64) }
+func BenchmarkMemclr256(b *testing.B) { bmMemclr(b, 256) }
+func BenchmarkMemclr4096(b *testing.B) { bmMemclr(b, 4096) }
+func BenchmarkMemclr65536(b *testing.B) { bmMemclr(b, 65536) }
+
+func BenchmarkClearFat32(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ var x [32]byte
+ _ = x
+ }
+}
+func BenchmarkClearFat64(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ var x [64]byte
+ _ = x
+ }
+}
+func BenchmarkClearFat128(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ var x [128]byte
+ _ = x
+ }
+}
+func BenchmarkClearFat256(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ var x [256]byte
+ _ = x
+ }
+}
+func BenchmarkClearFat512(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ var x [512]byte
+ _ = x
+ }
+}
+func BenchmarkClearFat1024(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ var x [1024]byte
+ _ = x
+ }
+}
+
+func BenchmarkCopyFat32(b *testing.B) {
+ var x [32 / 4]uint32
+ for i := 0; i < b.N; i++ {
+ y := x
+ _ = y
+ }
+}
+func BenchmarkCopyFat64(b *testing.B) {
+ var x [64 / 4]uint32
+ for i := 0; i < b.N; i++ {
+ y := x
+ _ = y
+ }
+}
+func BenchmarkCopyFat128(b *testing.B) {
+ var x [128 / 4]uint32
+ for i := 0; i < b.N; i++ {
+ y := x
+ _ = y
+ }
+}
+func BenchmarkCopyFat256(b *testing.B) {
+ var x [256 / 4]uint32
+ for i := 0; i < b.N; i++ {
+ y := x
+ _ = y
+ }
+}
+func BenchmarkCopyFat512(b *testing.B) {
+ var x [512 / 4]uint32
+ for i := 0; i < b.N; i++ {
+ y := x
+ _ = y
+ }
+}
+func BenchmarkCopyFat1024(b *testing.B) {
+ var x [1024 / 4]uint32
+ for i := 0; i < b.N; i++ {
+ y := x
+ _ = y
+ }
+}
diff --git a/src/pkg/runtime/mfinal.c b/src/pkg/runtime/mfinal.c
deleted file mode 100644
index 3e524d3e0..000000000
--- a/src/pkg/runtime/mfinal.c
+++ /dev/null
@@ -1,219 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-#include "runtime.h"
-#include "arch_GOARCH.h"
-#include "malloc.h"
-#include "type.h"
-
-enum { debug = 0 };
-
-typedef struct Fin Fin;
-struct Fin
-{
- FuncVal *fn;
- uintptr nret;
- Type *fint;
- PtrType *ot;
-};
-
-// Finalizer hash table. Direct hash, linear scan, at most 3/4 full.
-// Table size is power of 3 so that hash can be key % max.
-// Key[i] == (void*)-1 denotes free but formerly occupied entry
-// (doesn't stop the linear scan).
-// Key and val are separate tables because the garbage collector
-// must be instructed to ignore the pointers in key but follow the
-// pointers in val.
-typedef struct Fintab Fintab;
-struct Fintab
-{
- Lock;
- void **key;
- Fin *val;
- int32 nkey; // number of non-nil entries in key
- int32 ndead; // number of dead (-1) entries in key
- int32 max; // size of key, val allocations
-};
-
-#define TABSZ 17
-#define TAB(p) (&fintab[((uintptr)(p)>>3)%TABSZ])
-
-static struct {
- Fintab;
- uint8 pad[CacheLineSize - sizeof(Fintab)];
-} fintab[TABSZ];
-
-static void
-addfintab(Fintab *t, void *k, FuncVal *fn, uintptr nret, Type *fint, PtrType *ot)
-{
- int32 i, j;
-
- i = (uintptr)k % (uintptr)t->max;
- for(j=0; j<t->max; j++) {
- if(t->key[i] == nil) {
- t->nkey++;
- goto ret;
- }
- if(t->key[i] == (void*)-1) {
- t->ndead--;
- goto ret;
- }
- if(++i == t->max)
- i = 0;
- }
-
- // cannot happen - table is known to be non-full
- runtime·throw("finalizer table inconsistent");
-
-ret:
- t->key[i] = k;
- t->val[i].fn = fn;
- t->val[i].nret = nret;
- t->val[i].fint = fint;
- t->val[i].ot = ot;
-}
-
-static bool
-lookfintab(Fintab *t, void *k, bool del, Fin *f)
-{
- int32 i, j;
-
- if(t->max == 0)
- return false;
- i = (uintptr)k % (uintptr)t->max;
- for(j=0; j<t->max; j++) {
- if(t->key[i] == nil)
- return false;
- if(t->key[i] == k) {
- if(f)
- *f = t->val[i];
- if(del) {
- t->key[i] = (void*)-1;
- t->val[i].fn = nil;
- t->val[i].nret = 0;
- t->val[i].ot = nil;
- t->ndead++;
- }
- return true;
- }
- if(++i == t->max)
- i = 0;
- }
-
- // cannot happen - table is known to be non-full
- runtime·throw("finalizer table inconsistent");
- return false;
-}
-
-static void
-resizefintab(Fintab *tab)
-{
- Fintab newtab;
- void *k;
- int32 i;
-
- runtime·memclr((byte*)&newtab, sizeof newtab);
- newtab.max = tab->max;
- if(newtab.max == 0)
- newtab.max = 3*3*3;
- else if(tab->ndead < tab->nkey/2) {
- // grow table if not many dead values.
- // otherwise just rehash into table of same size.
- newtab.max *= 3;
- }
-
- newtab.key = runtime·mallocgc(newtab.max*sizeof newtab.key[0], 0, FlagNoInvokeGC|FlagNoScan);
- newtab.val = runtime·mallocgc(newtab.max*sizeof newtab.val[0], 0, FlagNoInvokeGC);
-
- for(i=0; i<tab->max; i++) {
- k = tab->key[i];
- if(k != nil && k != (void*)-1)
- addfintab(&newtab, k, tab->val[i].fn, tab->val[i].nret, tab->val[i].fint, tab->val[i].ot);
- }
-
- runtime·free(tab->key);
- runtime·free(tab->val);
-
- tab->key = newtab.key;
- tab->val = newtab.val;
- tab->nkey = newtab.nkey;
- tab->ndead = newtab.ndead;
- tab->max = newtab.max;
-}
-
-bool
-runtime·addfinalizer(void *p, FuncVal *f, uintptr nret, Type *fint, PtrType *ot)
-{
- Fintab *tab;
- byte *base;
-
- if(debug) {
- if(!runtime·mlookup(p, &base, nil, nil) || p != base)
- runtime·throw("addfinalizer on invalid pointer");
- }
-
- tab = TAB(p);
- runtime·lock(tab);
- if(f == nil) {
- lookfintab(tab, p, true, nil);
- runtime·unlock(tab);
- return true;
- }
-
- if(lookfintab(tab, p, false, nil)) {
- runtime·unlock(tab);
- return false;
- }
-
- if(tab->nkey >= tab->max/2+tab->max/4) {
- // keep table at most 3/4 full:
- // allocate new table and rehash.
- resizefintab(tab);
- }
-
- addfintab(tab, p, f, nret, fint, ot);
- runtime·setblockspecial(p, true);
- runtime·unlock(tab);
- return true;
-}
-
-// get finalizer; if del, delete finalizer.
-// caller is responsible for updating RefHasFinalizer (special) bit.
-bool
-runtime·getfinalizer(void *p, bool del, FuncVal **fn, uintptr *nret, Type **fint, PtrType **ot)
-{
- Fintab *tab;
- bool res;
- Fin f;
-
- tab = TAB(p);
- runtime·lock(tab);
- res = lookfintab(tab, p, del, &f);
- runtime·unlock(tab);
- if(res==false)
- return false;
- *fn = f.fn;
- *nret = f.nret;
- *fint = f.fint;
- *ot = f.ot;
- return true;
-}
-
-void
-runtime·walkfintab(void (*fn)(void*))
-{
- void **key;
- void **ekey;
- int32 i;
-
- for(i=0; i<TABSZ; i++) {
- runtime·lock(&fintab[i]);
- key = fintab[i].key;
- ekey = key + fintab[i].max;
- for(; key < ekey; key++)
- if(*key != nil && *key != ((void*)-1))
- fn(*key);
- runtime·unlock(&fintab[i]);
- }
-}
diff --git a/src/pkg/runtime/mfinal_test.go b/src/pkg/runtime/mfinal_test.go
index 6efef9bb0..6b53888ab 100644
--- a/src/pkg/runtime/mfinal_test.go
+++ b/src/pkg/runtime/mfinal_test.go
@@ -6,10 +6,9 @@ package runtime_test
import (
"runtime"
- "sync"
- "sync/atomic"
"testing"
"time"
+ "unsafe"
)
type Tintptr *int // assignable to *int
@@ -46,13 +45,15 @@ func TestFinalizerType(t *testing.T) {
}
for _, tt := range finalizerTests {
+ done := make(chan bool, 1)
go func() {
v := new(int)
*v = 97531
runtime.SetFinalizer(tt.convert(v), tt.finalizer)
v = nil
+ done <- true
}()
- time.Sleep(1 * time.Second)
+ <-done
runtime.GC()
select {
case <-ch:
@@ -73,6 +74,7 @@ func TestFinalizerInterfaceBig(t *testing.T) {
t.Skipf("Skipping on non-amd64 machine")
}
ch := make(chan bool)
+ done := make(chan bool, 1)
go func() {
v := &bigValue{0xDEADBEEFDEADBEEF, true, "It matters not how strait the gate"}
old := *v
@@ -87,8 +89,9 @@ func TestFinalizerInterfaceBig(t *testing.T) {
close(ch)
})
v = nil
+ done <- true
}()
- time.Sleep(1 * time.Second)
+ <-done
runtime.GC()
select {
case <-ch:
@@ -100,51 +103,137 @@ func TestFinalizerInterfaceBig(t *testing.T) {
func fin(v *int) {
}
+// Verify we don't crash at least. golang.org/issue/6857
+func TestFinalizerZeroSizedStruct(t *testing.T) {
+ type Z struct{}
+ z := new(Z)
+ runtime.SetFinalizer(z, func(*Z) {})
+}
+
func BenchmarkFinalizer(b *testing.B) {
- const CallsPerSched = 1000
- procs := runtime.GOMAXPROCS(-1)
- N := int32(b.N / CallsPerSched)
- var wg sync.WaitGroup
- wg.Add(procs)
- for p := 0; p < procs; p++ {
- go func() {
- var data [CallsPerSched]*int
- for i := 0; i < CallsPerSched; i++ {
- data[i] = new(int)
+ const Batch = 1000
+ b.RunParallel(func(pb *testing.PB) {
+ var data [Batch]*int
+ for i := 0; i < Batch; i++ {
+ data[i] = new(int)
+ }
+ for pb.Next() {
+ for i := 0; i < Batch; i++ {
+ runtime.SetFinalizer(data[i], fin)
}
- for atomic.AddInt32(&N, -1) >= 0 {
- runtime.Gosched()
- for i := 0; i < CallsPerSched; i++ {
- runtime.SetFinalizer(data[i], fin)
- }
- for i := 0; i < CallsPerSched; i++ {
- runtime.SetFinalizer(data[i], nil)
- }
+ for i := 0; i < Batch; i++ {
+ runtime.SetFinalizer(data[i], nil)
}
- wg.Done()
- }()
- }
- wg.Wait()
+ }
+ })
}
func BenchmarkFinalizerRun(b *testing.B) {
- const CallsPerSched = 1000
- procs := runtime.GOMAXPROCS(-1)
- N := int32(b.N / CallsPerSched)
- var wg sync.WaitGroup
- wg.Add(procs)
- for p := 0; p < procs; p++ {
- go func() {
- for atomic.AddInt32(&N, -1) >= 0 {
- runtime.Gosched()
- for i := 0; i < CallsPerSched; i++ {
- v := new(int)
- runtime.SetFinalizer(v, fin)
- }
- runtime.GC()
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ v := new(int)
+ runtime.SetFinalizer(v, fin)
+ }
+ })
+}
+
+// One chunk must be exactly one sizeclass in size.
+// It should be a sizeclass not used much by others, so we
+// have a greater chance of finding adjacent ones.
+// size class 19: 320 byte objects, 25 per page, 1 page alloc at a time
+const objsize = 320
+
+type objtype [objsize]byte
+
+func adjChunks() (*objtype, *objtype) {
+ var s []*objtype
+
+ for {
+ c := new(objtype)
+ for _, d := range s {
+ if uintptr(unsafe.Pointer(c))+unsafe.Sizeof(*c) == uintptr(unsafe.Pointer(d)) {
+ return c, d
}
- wg.Done()
- }()
+ if uintptr(unsafe.Pointer(d))+unsafe.Sizeof(*c) == uintptr(unsafe.Pointer(c)) {
+ return d, c
+ }
+ }
+ s = append(s, c)
+ }
+}
+
+// Make sure an empty slice on the stack doesn't pin the next object in memory.
+func TestEmptySlice(t *testing.T) {
+ if true { // disable until bug 7564 is fixed.
+ return
+ }
+ x, y := adjChunks()
+
+ // the pointer inside xs points to y.
+ xs := x[objsize:] // change objsize to objsize-1 and the test passes
+
+ fin := make(chan bool, 1)
+ runtime.SetFinalizer(y, func(z *objtype) { fin <- true })
+ runtime.GC()
+ select {
+ case <-fin:
+ case <-time.After(4 * time.Second):
+ t.Errorf("finalizer of next object in memory didn't run")
+ }
+ xsglobal = xs // keep empty slice alive until here
+}
+
+var xsglobal []byte
+
+func adjStringChunk() (string, *objtype) {
+ b := make([]byte, objsize)
+ for {
+ s := string(b)
+ t := new(objtype)
+ p := *(*uintptr)(unsafe.Pointer(&s))
+ q := uintptr(unsafe.Pointer(t))
+ if p+objsize == q {
+ return s, t
+ }
+ }
+}
+
+// Make sure an empty string on the stack doesn't pin the next object in memory.
+func TestEmptyString(t *testing.T) {
+ x, y := adjStringChunk()
+
+ ss := x[objsize:] // change objsize to objsize-1 and the test passes
+ fin := make(chan bool, 1)
+ // set finalizer on string contents of y
+ runtime.SetFinalizer(y, func(z *objtype) { fin <- true })
+ runtime.GC()
+ select {
+ case <-fin:
+ case <-time.After(4 * time.Second):
+ t.Errorf("finalizer of next string in memory didn't run")
}
- wg.Wait()
+ ssglobal = ss // keep 0-length string live until here
+}
+
+var ssglobal string
+
+// Test for issue 7656.
+func TestFinalizerOnGlobal(t *testing.T) {
+ runtime.SetFinalizer(Foo1, func(p *Object1) {})
+ runtime.SetFinalizer(Foo2, func(p *Object2) {})
+ runtime.SetFinalizer(Foo1, nil)
+ runtime.SetFinalizer(Foo2, nil)
}
+
+type Object1 struct {
+ Something []byte
+}
+
+type Object2 struct {
+ Something byte
+}
+
+var (
+ Foo2 = &Object2{}
+ Foo1 = &Object1{}
+)
diff --git a/src/pkg/runtime/mgc0.c b/src/pkg/runtime/mgc0.c
index 761f128a8..392da535b 100644
--- a/src/pkg/runtime/mgc0.c
+++ b/src/pkg/runtime/mgc0.c
@@ -2,13 +2,60 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// Garbage collector.
+// Garbage collector (GC).
+//
+// GC is:
+// - mark&sweep
+// - mostly precise (with the exception of some C-allocated objects, assembly frames/arguments, etc)
+// - parallel (up to MaxGcproc threads)
+// - partially concurrent (mark is stop-the-world, while sweep is concurrent)
+// - non-moving/non-compacting
+// - full (non-partial)
+//
+// GC rate.
+// Next GC is after we've allocated an extra amount of memory proportional to
+// the amount already in use. The proportion is controlled by GOGC environment variable
+// (100 by default). If GOGC=100 and we're using 4M, we'll GC again when we get to 8M
+// (this mark is tracked in next_gc variable). This keeps the GC cost in linear
+// proportion to the allocation cost. Adjusting GOGC just changes the linear constant
+// (and also the amount of extra memory used).
+//
+// Concurrent sweep.
+// The sweep phase proceeds concurrently with normal program execution.
+// The heap is swept span-by-span both lazily (when a goroutine needs another span)
+// and concurrently in a background goroutine (this helps programs that are not CPU bound).
+// However, at the end of the stop-the-world GC phase we don't know the size of the live heap,
+// and so next_gc calculation is tricky and happens as follows.
+// At the end of the stop-the-world phase next_gc is conservatively set based on total
+// heap size; all spans are marked as "needs sweeping".
+// Whenever a span is swept, next_gc is decremented by GOGC*newly_freed_memory.
+// The background sweeper goroutine simply sweeps spans one-by-one bringing next_gc
+// closer to the target value. However, this is not enough to avoid over-allocating memory.
+// Consider that a goroutine wants to allocate a new span for a large object and
+// there are no free swept spans, but there are small-object unswept spans.
+// If the goroutine naively allocates a new span, it can surpass the yet-unknown
+// target next_gc value. In order to prevent such cases (1) when a goroutine needs
+// to allocate a new small-object span, it sweeps small-object spans for the same
+// object size until it frees at least one object; (2) when a goroutine needs to
+// allocate large-object span from heap, it sweeps spans until it frees at least
+// that many pages into heap. Together these two measures ensure that we don't surpass
+// target next_gc value by a large margin. There is an exception: if a goroutine sweeps
+// and frees two nonadjacent one-page spans to the heap, it will allocate a new two-page span,
+// but there can still be other one-page unswept spans which could be combined into a two-page span.
+// It's critical to ensure that no operations proceed on unswept spans (that would corrupt
+// mark bits in GC bitmap). During GC all mcaches are flushed into the central cache,
+// so they are empty. When a goroutine grabs a new span into mcache, it sweeps it.
+// When a goroutine explicitly frees an object or sets a finalizer, it ensures that
+// the span is swept (either by sweeping it, or by waiting for the concurrent sweep to finish).
+// The finalizer goroutine is kicked off only when all spans are swept.
+// When the next GC starts, it sweeps all not-yet-swept spans (if any).
#include "runtime.h"
#include "arch_GOARCH.h"
#include "malloc.h"
#include "stack.h"
#include "mgc0.h"
+#include "chan.h"
#include "race.h"
#include "type.h"
#include "typekind.h"
@@ -17,14 +64,11 @@
enum {
Debug = 0,
- DebugMark = 0, // run second pass to check mark
CollectStats = 0,
- ScanStackByFrames = 0,
- IgnorePreciseGC = 0,
+ ConcurrentSweep = 1,
- // Four bits per word (see #defines below).
- wordsPerBitmapWord = sizeof(void*)*8/4,
- bitShift = sizeof(void*)*8/4,
+ WorkbufSize = 16*1024,
+ FinBlockSize = 4*1024,
handoffThreshold = 4,
IntermediateBufferCapacity = 64,
@@ -34,46 +78,50 @@ enum {
LOOP = 2,
PC_BITS = PRECISE | LOOP,
- // Pointer map
- BitsPerPointer = 2,
- BitsNoPointer = 0,
- BitsPointer = 1,
- BitsIface = 2,
- BitsEface = 3,
+ RootData = 0,
+ RootBss = 1,
+ RootFinalizers = 2,
+ RootSpanTypes = 3,
+ RootFlushCaches = 4,
+ RootCount = 5,
};
-// Bits in per-word bitmap.
-// #defines because enum might not be able to hold the values.
-//
-// Each word in the bitmap describes wordsPerBitmapWord words
-// of heap memory. There are 4 bitmap bits dedicated to each heap word,
-// so on a 64-bit system there is one bitmap word per 16 heap words.
-// The bits in the word are packed together by type first, then by
-// heap location, so each 64-bit bitmap word consists of, from top to bottom,
-// the 16 bitSpecial bits for the corresponding heap words, then the 16 bitMarked bits,
-// then the 16 bitNoScan/bitBlockBoundary bits, then the 16 bitAllocated bits.
-// This layout makes it easier to iterate over the bits of a given type.
-//
-// The bitmap starts at mheap.arena_start and extends *backward* from
-// there. On a 64-bit system the off'th word in the arena is tracked by
-// the off/16+1'th word before mheap.arena_start. (On a 32-bit system,
-// the only difference is that the divisor is 8.)
-//
-// To pull out the bits corresponding to a given pointer p, we use:
-//
-// off = p - (uintptr*)mheap.arena_start; // word offset
-// b = (uintptr*)mheap.arena_start - off/wordsPerBitmapWord - 1;
-// shift = off % wordsPerBitmapWord
-// bits = *b >> shift;
-// /* then test bits & bitAllocated, bits & bitMarked, etc. */
-//
-#define bitAllocated ((uintptr)1<<(bitShift*0))
-#define bitNoScan ((uintptr)1<<(bitShift*1)) /* when bitAllocated is set */
-#define bitMarked ((uintptr)1<<(bitShift*2)) /* when bitAllocated is set */
-#define bitSpecial ((uintptr)1<<(bitShift*3)) /* when bitAllocated is set - has finalizer or being profiled */
-#define bitBlockBoundary ((uintptr)1<<(bitShift*1)) /* when bitAllocated is NOT set */
+#define GcpercentUnknown (-2)
+
+// Initialized from $GOGC. GOGC=off means no gc.
+static int32 gcpercent = GcpercentUnknown;
+
+static FuncVal* poolcleanup;
+
+void
+sync·runtime_registerPoolCleanup(FuncVal *f)
+{
+ poolcleanup = f;
+}
+
+static void
+clearpools(void)
+{
+ P *p, **pp;
+ MCache *c;
+ int32 i;
+
+ // clear sync.Pool's
+ if(poolcleanup != nil)
+ reflect·call(poolcleanup, nil, 0, 0);
-#define bitMask (bitBlockBoundary | bitAllocated | bitMarked | bitSpecial)
+ for(pp=runtime·allp; p=*pp; pp++) {
+ // clear tinyalloc pool
+ c = p->mcache;
+ if(c != nil) {
+ c->tiny = nil;
+ c->tinysize = 0;
+ }
+ // clear defer pools
+ for(i=0; i<nelem(p->deferpool); i++)
+ p->deferpool[i] = nil;
+ }
+}
// Holding worldsema grants an M the right to try to stop the world.
// The procedure is:
@@ -98,11 +146,10 @@ struct Obj
uintptr ti; // type info
};
-// The size of Workbuf is N*PageSize.
typedef struct Workbuf Workbuf;
struct Workbuf
{
-#define SIZE (2*PageSize-sizeof(LFNode)-sizeof(uintptr))
+#define SIZE (WorkbufSize-sizeof(LFNode)-sizeof(uintptr))
LFNode node; // must be first
uintptr nobj;
Obj obj[SIZE/sizeof(Obj) - 1];
@@ -138,39 +185,44 @@ extern byte ebss[];
extern byte gcdata[];
extern byte gcbss[];
-static G *fing;
-static FinBlock *finq; // list of finalizers that are to be executed
-static FinBlock *finc; // cache of free blocks
-static FinBlock *allfin; // list of all blocks
-static Lock finlock;
-static int32 fingwait;
+static Lock finlock; // protects the following variables
+static FinBlock *finq; // list of finalizers that are to be executed
+static FinBlock *finc; // cache of free blocks
+static FinBlock *allfin; // list of all blocks
+bool runtime·fingwait;
+bool runtime·fingwake;
+
+static Lock gclock;
+static G* fing;
-static void runfinq(void);
+static void runfinq(void);
+static void bgsweep(void);
static Workbuf* getempty(Workbuf*);
static Workbuf* getfull(Workbuf*);
static void putempty(Workbuf*);
static Workbuf* handoff(Workbuf*);
static void gchelperstart(void);
+static void flushallmcaches(void);
+static bool scanframe(Stkframe *frame, void *wbufp);
+static void addstackroots(G *gp, Workbuf **wbufp);
+
+static FuncVal runfinqv = {runfinq};
+static FuncVal bgsweepv = {bgsweep};
static struct {
uint64 full; // lock-free list of full blocks
uint64 empty; // lock-free list of empty blocks
byte pad0[CacheLineSize]; // prevents false-sharing between full/empty and nproc/nwait
uint32 nproc;
+ int64 tstart;
volatile uint32 nwait;
volatile uint32 ndone;
- volatile uint32 debugmarkdone;
Note alldone;
ParFor *markfor;
- ParFor *sweepfor;
Lock;
byte *chunk;
uintptr nchunk;
-
- Obj *roots;
- uint32 nroot;
- uint32 rootcap;
} work;
enum {
@@ -207,6 +259,8 @@ static struct {
uint64 foundword;
uint64 foundspan;
} markonly;
+ uint32 nbgsweep;
+ uint32 npausesweep;
} gcstats;
// markonly marks an object. It returns true if the object
@@ -260,8 +314,7 @@ markonly(void *obj)
// (Manually inlined copy of MHeap_LookupMaybe.)
k = (uintptr)obj>>PageShift;
x = k;
- if(sizeof(void*) == 8)
- x -= (uintptr)runtime·mheap.arena_start>>PageShift;
+ x -= (uintptr)runtime·mheap.arena_start>>PageShift;
s = runtime·mheap.spans[x];
if(s == nil || k < s->start || obj >= s->limit || s->state != MSpanInUse)
return false;
@@ -317,6 +370,24 @@ struct PtrTarget
uintptr ti;
};
+typedef struct Scanbuf Scanbuf;
+struct Scanbuf
+{
+ struct {
+ PtrTarget *begin;
+ PtrTarget *end;
+ PtrTarget *pos;
+ } ptr;
+ struct {
+ Obj *begin;
+ Obj *end;
+ Obj *pos;
+ } obj;
+ Workbuf *wbuf;
+ Obj *wp;
+ uintptr nobj;
+};
+
typedef struct BufferList BufferList;
struct BufferList
{
@@ -350,7 +421,7 @@ static void enqueue(Obj obj, Workbuf **_wbuf, Obj **_wp, uintptr *_nobj);
// flushptrbuf
// (find block start, mark and enqueue)
static void
-flushptrbuf(PtrTarget *ptrbuf, PtrTarget **ptrbufpos, Obj **_wp, Workbuf **_wbuf, uintptr *_nobj)
+flushptrbuf(Scanbuf *sbuf)
{
byte *p, *arena_start, *obj;
uintptr size, *bitp, bits, shift, j, x, xbits, off, nobj, ti, n;
@@ -358,17 +429,19 @@ flushptrbuf(PtrTarget *ptrbuf, PtrTarget **ptrbufpos, Obj **_wp, Workbuf **_wbuf
PageID k;
Obj *wp;
Workbuf *wbuf;
+ PtrTarget *ptrbuf;
PtrTarget *ptrbuf_end;
arena_start = runtime·mheap.arena_start;
- wp = *_wp;
- wbuf = *_wbuf;
- nobj = *_nobj;
+ wp = sbuf->wp;
+ wbuf = sbuf->wbuf;
+ nobj = sbuf->nobj;
- ptrbuf_end = *ptrbufpos;
- n = ptrbuf_end - ptrbuf;
- *ptrbufpos = ptrbuf;
+ ptrbuf = sbuf->ptr.begin;
+ ptrbuf_end = sbuf->ptr.pos;
+ n = ptrbuf_end - sbuf->ptr.begin;
+ sbuf->ptr.pos = sbuf->ptr.begin;
if(CollectStats) {
runtime·xadd64(&gcstats.ptr.sum, n);
@@ -387,152 +460,146 @@ flushptrbuf(PtrTarget *ptrbuf, PtrTarget **ptrbufpos, Obj **_wp, Workbuf **_wbuf
runtime·throw("ptrbuf has to be smaller than WorkBuf");
}
- // TODO(atom): This block is a branch of an if-then-else statement.
- // The single-threaded branch may be added in a next CL.
- {
- // Multi-threaded version.
+ while(ptrbuf < ptrbuf_end) {
+ obj = ptrbuf->p;
+ ti = ptrbuf->ti;
+ ptrbuf++;
+
+ // obj belongs to interval [mheap.arena_start, mheap.arena_used).
+ if(Debug > 1) {
+ if(obj < runtime·mheap.arena_start || obj >= runtime·mheap.arena_used)
+ runtime·throw("object is outside of mheap");
+ }
- while(ptrbuf < ptrbuf_end) {
- obj = ptrbuf->p;
- ti = ptrbuf->ti;
- ptrbuf++;
+ // obj may be a pointer to a live object.
+ // Try to find the beginning of the object.
- // obj belongs to interval [mheap.arena_start, mheap.arena_used).
- if(Debug > 1) {
- if(obj < runtime·mheap.arena_start || obj >= runtime·mheap.arena_used)
- runtime·throw("object is outside of mheap");
- }
+ // Round down to word boundary.
+ if(((uintptr)obj & ((uintptr)PtrSize-1)) != 0) {
+ obj = (void*)((uintptr)obj & ~((uintptr)PtrSize-1));
+ ti = 0;
+ }
- // obj may be a pointer to a live object.
- // Try to find the beginning of the object.
+ // Find bits for this word.
+ off = (uintptr*)obj - (uintptr*)arena_start;
+ bitp = (uintptr*)arena_start - off/wordsPerBitmapWord - 1;
+ shift = off % wordsPerBitmapWord;
+ xbits = *bitp;
+ bits = xbits >> shift;
- // Round down to word boundary.
- if(((uintptr)obj & ((uintptr)PtrSize-1)) != 0) {
- obj = (void*)((uintptr)obj & ~((uintptr)PtrSize-1));
- ti = 0;
- }
+ // Pointing at the beginning of a block?
+ if((bits & (bitAllocated|bitBlockBoundary)) != 0) {
+ if(CollectStats)
+ runtime·xadd64(&gcstats.flushptrbuf.foundbit, 1);
+ goto found;
+ }
- // Find bits for this word.
- off = (uintptr*)obj - (uintptr*)arena_start;
- bitp = (uintptr*)arena_start - off/wordsPerBitmapWord - 1;
- shift = off % wordsPerBitmapWord;
- xbits = *bitp;
- bits = xbits >> shift;
+ ti = 0;
- // Pointing at the beginning of a block?
- if((bits & (bitAllocated|bitBlockBoundary)) != 0) {
+ // Pointing just past the beginning?
+ // Scan backward a little to find a block boundary.
+ for(j=shift; j-->0; ) {
+ if(((xbits>>j) & (bitAllocated|bitBlockBoundary)) != 0) {
+ obj = (byte*)obj - (shift-j)*PtrSize;
+ shift = j;
+ bits = xbits>>shift;
if(CollectStats)
- runtime·xadd64(&gcstats.flushptrbuf.foundbit, 1);
+ runtime·xadd64(&gcstats.flushptrbuf.foundword, 1);
goto found;
}
+ }
- ti = 0;
-
- // Pointing just past the beginning?
- // Scan backward a little to find a block boundary.
- for(j=shift; j-->0; ) {
- if(((xbits>>j) & (bitAllocated|bitBlockBoundary)) != 0) {
- obj = (byte*)obj - (shift-j)*PtrSize;
- shift = j;
- bits = xbits>>shift;
- if(CollectStats)
- runtime·xadd64(&gcstats.flushptrbuf.foundword, 1);
- goto found;
- }
- }
-
- // Otherwise consult span table to find beginning.
- // (Manually inlined copy of MHeap_LookupMaybe.)
- k = (uintptr)obj>>PageShift;
- x = k;
- if(sizeof(void*) == 8)
- x -= (uintptr)arena_start>>PageShift;
- s = runtime·mheap.spans[x];
- if(s == nil || k < s->start || obj >= s->limit || s->state != MSpanInUse)
- continue;
- p = (byte*)((uintptr)s->start<<PageShift);
- if(s->sizeclass == 0) {
- obj = p;
- } else {
- size = s->elemsize;
- int32 i = ((byte*)obj - p)/size;
- obj = p+i*size;
- }
+ // Otherwise consult span table to find beginning.
+ // (Manually inlined copy of MHeap_LookupMaybe.)
+ k = (uintptr)obj>>PageShift;
+ x = k;
+ x -= (uintptr)arena_start>>PageShift;
+ s = runtime·mheap.spans[x];
+ if(s == nil || k < s->start || obj >= s->limit || s->state != MSpanInUse)
+ continue;
+ p = (byte*)((uintptr)s->start<<PageShift);
+ if(s->sizeclass == 0) {
+ obj = p;
+ } else {
+ size = s->elemsize;
+ int32 i = ((byte*)obj - p)/size;
+ obj = p+i*size;
+ }
- // Now that we know the object header, reload bits.
- off = (uintptr*)obj - (uintptr*)arena_start;
- bitp = (uintptr*)arena_start - off/wordsPerBitmapWord - 1;
- shift = off % wordsPerBitmapWord;
- xbits = *bitp;
- bits = xbits >> shift;
- if(CollectStats)
- runtime·xadd64(&gcstats.flushptrbuf.foundspan, 1);
+ // Now that we know the object header, reload bits.
+ off = (uintptr*)obj - (uintptr*)arena_start;
+ bitp = (uintptr*)arena_start - off/wordsPerBitmapWord - 1;
+ shift = off % wordsPerBitmapWord;
+ xbits = *bitp;
+ bits = xbits >> shift;
+ if(CollectStats)
+ runtime·xadd64(&gcstats.flushptrbuf.foundspan, 1);
- found:
- // Now we have bits, bitp, and shift correct for
- // obj pointing at the base of the object.
- // Only care about allocated and not marked.
- if((bits & (bitAllocated|bitMarked)) != bitAllocated)
- continue;
- if(work.nproc == 1)
- *bitp |= bitMarked<<shift;
- else {
- for(;;) {
- x = *bitp;
- if(x & (bitMarked<<shift))
- goto continue_obj;
- if(runtime·casp((void**)bitp, (void*)x, (void*)(x|(bitMarked<<shift))))
- break;
- }
+ found:
+ // Now we have bits, bitp, and shift correct for
+ // obj pointing at the base of the object.
+ // Only care about allocated and not marked.
+ if((bits & (bitAllocated|bitMarked)) != bitAllocated)
+ continue;
+ if(work.nproc == 1)
+ *bitp |= bitMarked<<shift;
+ else {
+ for(;;) {
+ x = *bitp;
+ if(x & (bitMarked<<shift))
+ goto continue_obj;
+ if(runtime·casp((void**)bitp, (void*)x, (void*)(x|(bitMarked<<shift))))
+ break;
}
+ }
- // If object has no pointers, don't need to scan further.
- if((bits & bitNoScan) != 0)
- continue;
+ // If object has no pointers, don't need to scan further.
+ if((bits & bitScan) == 0)
+ continue;
- // Ask span about size class.
- // (Manually inlined copy of MHeap_Lookup.)
- x = (uintptr)obj >> PageShift;
- if(sizeof(void*) == 8)
- x -= (uintptr)arena_start>>PageShift;
- s = runtime·mheap.spans[x];
+ // Ask span about size class.
+ // (Manually inlined copy of MHeap_Lookup.)
+ x = (uintptr)obj >> PageShift;
+ x -= (uintptr)arena_start>>PageShift;
+ s = runtime·mheap.spans[x];
- PREFETCH(obj);
+ PREFETCH(obj);
- *wp = (Obj){obj, s->elemsize, ti};
- wp++;
- nobj++;
- continue_obj:;
- }
+ *wp = (Obj){obj, s->elemsize, ti};
+ wp++;
+ nobj++;
+ continue_obj:;
+ }
- // If another proc wants a pointer, give it some.
- if(work.nwait > 0 && nobj > handoffThreshold && work.full == 0) {
- wbuf->nobj = nobj;
- wbuf = handoff(wbuf);
- nobj = wbuf->nobj;
- wp = wbuf->obj + nobj;
- }
+ // If another proc wants a pointer, give it some.
+ if(work.nwait > 0 && nobj > handoffThreshold && work.full == 0) {
+ wbuf->nobj = nobj;
+ wbuf = handoff(wbuf);
+ nobj = wbuf->nobj;
+ wp = wbuf->obj + nobj;
}
- *_wp = wp;
- *_wbuf = wbuf;
- *_nobj = nobj;
+ sbuf->wp = wp;
+ sbuf->wbuf = wbuf;
+ sbuf->nobj = nobj;
}
static void
-flushobjbuf(Obj *objbuf, Obj **objbufpos, Obj **_wp, Workbuf **_wbuf, uintptr *_nobj)
+flushobjbuf(Scanbuf *sbuf)
{
uintptr nobj, off;
Obj *wp, obj;
Workbuf *wbuf;
+ Obj *objbuf;
Obj *objbuf_end;
- wp = *_wp;
- wbuf = *_wbuf;
- nobj = *_nobj;
+ wp = sbuf->wp;
+ wbuf = sbuf->wbuf;
+ nobj = sbuf->nobj;
- objbuf_end = *objbufpos;
- *objbufpos = objbuf;
+ objbuf = sbuf->obj.begin;
+ objbuf_end = sbuf->obj.pos;
+ sbuf->obj.pos = sbuf->obj.begin;
while(objbuf < objbuf_end) {
obj = *objbuf++;
@@ -570,9 +637,9 @@ flushobjbuf(Obj *objbuf, Obj **objbufpos, Obj **_wp, Workbuf **_wbuf, uintptr *_
wp = wbuf->obj + nobj;
}
- *_wp = wp;
- *_wbuf = wbuf;
- *_nobj = nobj;
+ sbuf->wp = wp;
+ sbuf->wbuf = wbuf;
+ sbuf->nobj = nobj;
}
// Program that scans the whole block and treats every block element as a potential pointer
@@ -607,8 +674,7 @@ checkptr(void *obj, uintptr objti)
if(t == nil)
return;
x = (uintptr)obj >> PageShift;
- if(sizeof(void*) == 8)
- x -= (uintptr)(runtime·mheap.arena_start)>>PageShift;
+ x -= (uintptr)(runtime·mheap.arena_start)>>PageShift;
s = runtime·mheap.spans[x];
objstart = (byte*)((uintptr)s->start<<PageShift);
if(s->sizeclass != 0) {
@@ -636,8 +702,8 @@ checkptr(void *obj, uintptr objti)
// A simple best-effort check until first GC_END.
for(j = 1; pc1[j] != GC_END && pc2[j] != GC_END; j++) {
if(pc1[j] != pc2[j]) {
- runtime·printf("invalid gc type info for '%s' at %p, type info %p, block info %p\n",
- t->string ? (int8*)t->string->str : (int8*)"?", j, pc1[j], pc2[j]);
+ runtime·printf("invalid gc type info for '%s', type info %p [%d]=%p, block info %p [%d]=%p\n",
+ t->string ? (int8*)t->string->str : (int8*)"?", pc1, (int32)j, pc1[j], pc2, (int32)j, pc2[j]);
runtime·throw("invalid gc type info");
}
}
@@ -650,30 +716,27 @@ checkptr(void *obj, uintptr objti)
// a work list in the Workbuf* structures and loops in the main function
// body. Keeping an explicit work list is easier on the stack allocator and
// more efficient.
-//
-// wbuf: current work buffer
-// wp: storage for next queued pointer (write pointer)
-// nobj: number of queued objects
static void
-scanblock(Workbuf *wbuf, Obj *wp, uintptr nobj, bool keepworking)
+scanblock(Workbuf *wbuf, bool keepworking)
{
byte *b, *arena_start, *arena_used;
- uintptr n, i, end_b, elemsize, size, ti, objti, count, type;
+ uintptr n, i, end_b, elemsize, size, ti, objti, count, type, nobj;
uintptr *pc, precise_type, nominal_size;
uintptr *chan_ret, chancap;
void *obj;
- Type *t;
+ Type *t, *et;
Slice *sliceptr;
+ String *stringptr;
Frame *stack_ptr, stack_top, stack[GC_STACK_CAPACITY+4];
BufferList *scanbuffers;
- PtrTarget *ptrbuf, *ptrbuf_end, *ptrbufpos;
- Obj *objbuf, *objbuf_end, *objbufpos;
+ Scanbuf sbuf;
Eface *eface;
Iface *iface;
Hchan *chan;
ChanType *chantype;
+ Obj *wp;
- if(sizeof(Workbuf) % PageSize != 0)
+ if(sizeof(Workbuf) % WorkbufSize != 0)
runtime·throw("scanblock: size of Workbuf is suboptimal");
// Memory arena parameters.
@@ -681,21 +744,30 @@ scanblock(Workbuf *wbuf, Obj *wp, uintptr nobj, bool keepworking)
arena_used = runtime·mheap.arena_used;
stack_ptr = stack+nelem(stack)-1;
-
+
precise_type = false;
nominal_size = 0;
- // Allocate ptrbuf
- {
- scanbuffers = &bufferList[m->helpgc];
- ptrbuf = &scanbuffers->ptrtarget[0];
- ptrbuf_end = &scanbuffers->ptrtarget[0] + nelem(scanbuffers->ptrtarget);
- objbuf = &scanbuffers->obj[0];
- objbuf_end = &scanbuffers->obj[0] + nelem(scanbuffers->obj);
+ if(wbuf) {
+ nobj = wbuf->nobj;
+ wp = &wbuf->obj[nobj];
+ } else {
+ nobj = 0;
+ wp = nil;
}
- ptrbufpos = ptrbuf;
- objbufpos = objbuf;
+ // Initialize sbuf
+ scanbuffers = &bufferList[m->helpgc];
+
+ sbuf.ptr.begin = sbuf.ptr.pos = &scanbuffers->ptrtarget[0];
+ sbuf.ptr.end = sbuf.ptr.begin + nelem(scanbuffers->ptrtarget);
+
+ sbuf.obj.begin = sbuf.obj.pos = &scanbuffers->obj[0];
+ sbuf.obj.end = sbuf.obj.begin + nelem(scanbuffers->obj);
+
+ sbuf.wbuf = wbuf;
+ sbuf.wp = wp;
+ sbuf.nobj = nobj;
// (Silence the compiler)
chan = nil;
@@ -707,17 +779,17 @@ scanblock(Workbuf *wbuf, Obj *wp, uintptr nobj, bool keepworking)
for(;;) {
// Each iteration scans the block b of length n, queueing pointers in
// the work buffer.
- if(Debug > 1) {
- runtime·printf("scanblock %p %D\n", b, (int64)n);
- }
if(CollectStats) {
runtime·xadd64(&gcstats.nbytes, n);
- runtime·xadd64(&gcstats.obj.sum, nobj);
+ runtime·xadd64(&gcstats.obj.sum, sbuf.nobj);
runtime·xadd64(&gcstats.obj.cnt, 1);
}
if(ti != 0) {
+ if(Debug > 1) {
+ runtime·printf("scanblock %p %D ti %p\n", b, (int64)n, ti);
+ }
pc = (uintptr*)(ti & ~(uintptr)PC_BITS);
precise_type = (ti & PRECISE);
stack_top.elemsize = pc[0];
@@ -773,14 +845,22 @@ scanblock(Workbuf *wbuf, Obj *wp, uintptr nobj, bool keepworking)
pc = chanProg;
break;
default:
+ if(Debug > 1)
+ runtime·printf("scanblock %p %D type %p %S\n", b, (int64)n, type, *t->string);
runtime·throw("scanblock: invalid type");
return;
}
+ if(Debug > 1)
+ runtime·printf("scanblock %p %D type %p %S pc=%p\n", b, (int64)n, type, *t->string, pc);
} else {
pc = defaultProg;
+ if(Debug > 1)
+ runtime·printf("scanblock %p %D unknown type\n", b, (int64)n);
}
} else {
pc = defaultProg;
+ if(Debug > 1)
+ runtime·printf("scanblock %p %D no span types\n", b, (int64)n);
}
if(IgnorePreciseGC)
@@ -788,7 +868,6 @@ scanblock(Workbuf *wbuf, Obj *wp, uintptr nobj, bool keepworking)
pc++;
stack_top.b = (uintptr)b;
-
end_b = (uintptr)b + n - PtrSize;
for(;;) {
@@ -801,6 +880,8 @@ scanblock(Workbuf *wbuf, Obj *wp, uintptr nobj, bool keepworking)
case GC_PTR:
obj = *(void**)(stack_top.b + pc[1]);
objti = pc[2];
+ if(Debug > 2)
+ runtime·printf("gc_ptr @%p: %p ti=%p\n", stack_top.b+pc[1], obj, objti);
pc += 3;
if(Debug)
checkptr(obj, objti);
@@ -808,6 +889,8 @@ scanblock(Workbuf *wbuf, Obj *wp, uintptr nobj, bool keepworking)
case GC_SLICE:
sliceptr = (Slice*)(stack_top.b + pc[1]);
+ if(Debug > 2)
+ runtime·printf("gc_slice @%p: %p/%D/%D\n", sliceptr, sliceptr->array, (int64)sliceptr->len, (int64)sliceptr->cap);
if(sliceptr->cap != 0) {
obj = sliceptr->array;
// Can't use slice element type for scanning,
@@ -821,27 +904,34 @@ scanblock(Workbuf *wbuf, Obj *wp, uintptr nobj, bool keepworking)
case GC_APTR:
obj = *(void**)(stack_top.b + pc[1]);
+ if(Debug > 2)
+ runtime·printf("gc_aptr @%p: %p\n", stack_top.b+pc[1], obj);
pc += 2;
break;
case GC_STRING:
- obj = *(void**)(stack_top.b + pc[1]);
- markonly(obj);
+ stringptr = (String*)(stack_top.b + pc[1]);
+ if(Debug > 2)
+ runtime·printf("gc_string @%p: %p/%D\n", stack_top.b+pc[1], stringptr->str, (int64)stringptr->len);
+ if(stringptr->len != 0)
+ markonly(stringptr->str);
pc += 2;
continue;
case GC_EFACE:
eface = (Eface*)(stack_top.b + pc[1]);
pc += 2;
+ if(Debug > 2)
+ runtime·printf("gc_eface @%p: %p %p\n", stack_top.b+pc[1], eface->type, eface->data);
if(eface->type == nil)
continue;
// eface->type
t = eface->type;
if((void*)t >= arena_start && (void*)t < arena_used) {
- *ptrbufpos++ = (PtrTarget){t, 0};
- if(ptrbufpos == ptrbuf_end)
- flushptrbuf(ptrbuf, &ptrbufpos, &wp, &wbuf, &nobj);
+ *sbuf.ptr.pos++ = (PtrTarget){t, 0};
+ if(sbuf.ptr.pos == sbuf.ptr.end)
+ flushptrbuf(&sbuf);
}
// eface->data
@@ -851,8 +941,14 @@ scanblock(Workbuf *wbuf, Obj *wp, uintptr nobj, bool keepworking)
continue;
obj = eface->data;
- if((t->kind & ~KindNoPointers) == KindPtr)
- objti = (uintptr)((PtrType*)t)->elem->gc;
+ if((t->kind & ~KindNoPointers) == KindPtr) {
+ // Only use type information if it is a pointer-containing type.
+ // This matches the GC programs written by cmd/gc/reflect.c's
+ // dgcsym1 in case TPTR32/case TPTR64. See rationale there.
+ et = ((PtrType*)t)->elem;
+ if(!(et->kind & KindNoPointers))
+ objti = (uintptr)((PtrType*)t)->elem->gc;
+ }
} else {
obj = eface->data;
objti = (uintptr)t->gc;
@@ -863,14 +959,16 @@ scanblock(Workbuf *wbuf, Obj *wp, uintptr nobj, bool keepworking)
case GC_IFACE:
iface = (Iface*)(stack_top.b + pc[1]);
pc += 2;
+ if(Debug > 2)
+ runtime·printf("gc_iface @%p: %p/%p %p\n", stack_top.b+pc[1], iface->tab, nil, iface->data);
if(iface->tab == nil)
continue;
// iface->tab
if((void*)iface->tab >= arena_start && (void*)iface->tab < arena_used) {
- *ptrbufpos++ = (PtrTarget){iface->tab, (uintptr)itabtype->gc};
- if(ptrbufpos == ptrbuf_end)
- flushptrbuf(ptrbuf, &ptrbufpos, &wp, &wbuf, &nobj);
+ *sbuf.ptr.pos++ = (PtrTarget){iface->tab, (uintptr)itabtype->gc};
+ if(sbuf.ptr.pos == sbuf.ptr.end)
+ flushptrbuf(&sbuf);
}
// iface->data
@@ -881,8 +979,14 @@ scanblock(Workbuf *wbuf, Obj *wp, uintptr nobj, bool keepworking)
continue;
obj = iface->data;
- if((t->kind & ~KindNoPointers) == KindPtr)
- objti = (uintptr)((PtrType*)t)->elem->gc;
+ if((t->kind & ~KindNoPointers) == KindPtr) {
+ // Only use type information if it is a pointer-containing type.
+ // This matches the GC programs written by cmd/gc/reflect.c's
+ // dgcsym1 in case TPTR32/case TPTR64. See rationale there.
+ et = ((PtrType*)t)->elem;
+ if(!(et->kind & KindNoPointers))
+ objti = (uintptr)((PtrType*)t)->elem->gc;
+ }
} else {
obj = iface->data;
objti = (uintptr)t->gc;
@@ -893,11 +997,13 @@ scanblock(Workbuf *wbuf, Obj *wp, uintptr nobj, bool keepworking)
case GC_DEFAULT_PTR:
while(stack_top.b <= end_b) {
obj = *(byte**)stack_top.b;
+ if(Debug > 2)
+ runtime·printf("gc_default_ptr @%p: %p\n", stack_top.b, obj);
stack_top.b += PtrSize;
if(obj >= arena_start && obj < arena_used) {
- *ptrbufpos++ = (PtrTarget){obj, 0};
- if(ptrbufpos == ptrbuf_end)
- flushptrbuf(ptrbuf, &ptrbufpos, &wp, &wbuf, &nobj);
+ *sbuf.ptr.pos++ = (PtrTarget){obj, 0};
+ if(sbuf.ptr.pos == sbuf.ptr.end)
+ flushptrbuf(&sbuf);
}
}
goto next_block;
@@ -926,7 +1032,7 @@ scanblock(Workbuf *wbuf, Obj *wp, uintptr nobj, bool keepworking)
if(*(byte**)i != nil) {
// Found a value that may be a pointer.
// Do a rescan of the entire block.
- enqueue((Obj){b, n, 0}, &wbuf, &wp, &nobj);
+ enqueue((Obj){b, n, 0}, &sbuf.wbuf, &sbuf.wp, &sbuf.nobj);
if(CollectStats) {
runtime·xadd64(&gcstats.rescan, 1);
runtime·xadd64(&gcstats.rescanbytes, n);
@@ -972,13 +1078,17 @@ scanblock(Workbuf *wbuf, Obj *wp, uintptr nobj, bool keepworking)
objti = pc[3];
pc += 4;
- *objbufpos++ = (Obj){obj, size, objti};
- if(objbufpos == objbuf_end)
- flushobjbuf(objbuf, &objbufpos, &wp, &wbuf, &nobj);
+ if(Debug > 2)
+ runtime·printf("gc_region @%p: %D %p\n", stack_top.b+pc[1], (int64)size, objti);
+ *sbuf.obj.pos++ = (Obj){obj, size, objti};
+ if(sbuf.obj.pos == sbuf.obj.end)
+ flushobjbuf(&sbuf);
continue;
case GC_CHAN_PTR:
chan = *(Hchan**)(stack_top.b + pc[1]);
+ if(Debug > 2 && chan != nil)
+ runtime·printf("gc_chan_ptr @%p: %p/%D/%D %p\n", stack_top.b+pc[1], chan, (int64)chan->qcount, (int64)chan->dataqsiz, pc[2]);
if(chan == nil) {
pc += 3;
continue;
@@ -1007,10 +1117,10 @@ scanblock(Workbuf *wbuf, Obj *wp, uintptr nobj, bool keepworking)
// in-use part of the circular buffer is scanned.
// (Channel routines zero the unused part, so the current
// code does not lead to leaks, it's just a little inefficient.)
- *objbufpos++ = (Obj){(byte*)chan+runtime·Hchansize, chancap*chantype->elem->size,
+ *sbuf.obj.pos++ = (Obj){(byte*)chan+runtime·Hchansize, chancap*chantype->elem->size,
(uintptr)chantype->elem->gc | PRECISE | LOOP};
- if(objbufpos == objbuf_end)
- flushobjbuf(objbuf, &objbufpos, &wp, &wbuf, &nobj);
+ if(sbuf.obj.pos == sbuf.obj.end)
+ flushobjbuf(&sbuf);
}
}
if(chan_ret == nil)
@@ -1019,14 +1129,15 @@ scanblock(Workbuf *wbuf, Obj *wp, uintptr nobj, bool keepworking)
continue;
default:
+ runtime·printf("runtime: invalid GC instruction %p at %p\n", pc[0], pc);
runtime·throw("scanblock: invalid GC instruction");
return;
}
if(obj >= arena_start && obj < arena_used) {
- *ptrbufpos++ = (PtrTarget){obj, objti};
- if(ptrbufpos == ptrbuf_end)
- flushptrbuf(ptrbuf, &ptrbufpos, &wp, &wbuf, &nobj);
+ *sbuf.ptr.pos++ = (PtrTarget){obj, objti};
+ if(sbuf.ptr.pos == sbuf.ptr.end)
+ flushptrbuf(&sbuf);
}
}
@@ -1034,109 +1145,31 @@ scanblock(Workbuf *wbuf, Obj *wp, uintptr nobj, bool keepworking)
// Done scanning [b, b+n). Prepare for the next iteration of
// the loop by setting b, n, ti to the parameters for the next block.
- if(nobj == 0) {
- flushptrbuf(ptrbuf, &ptrbufpos, &wp, &wbuf, &nobj);
- flushobjbuf(objbuf, &objbufpos, &wp, &wbuf, &nobj);
+ if(sbuf.nobj == 0) {
+ flushptrbuf(&sbuf);
+ flushobjbuf(&sbuf);
- if(nobj == 0) {
+ if(sbuf.nobj == 0) {
if(!keepworking) {
- if(wbuf)
- putempty(wbuf);
- goto endscan;
+ if(sbuf.wbuf)
+ putempty(sbuf.wbuf);
+ return;
}
// Emptied our buffer: refill.
- wbuf = getfull(wbuf);
- if(wbuf == nil)
- goto endscan;
- nobj = wbuf->nobj;
- wp = wbuf->obj + wbuf->nobj;
+ sbuf.wbuf = getfull(sbuf.wbuf);
+ if(sbuf.wbuf == nil)
+ return;
+ sbuf.nobj = sbuf.wbuf->nobj;
+ sbuf.wp = sbuf.wbuf->obj + sbuf.wbuf->nobj;
}
}
// Fetch b from the work buffer.
- --wp;
- b = wp->p;
- n = wp->n;
- ti = wp->ti;
- nobj--;
- }
-
-endscan:;
-}
-
-// debug_scanblock is the debug copy of scanblock.
-// it is simpler, slower, single-threaded, recursive,
-// and uses bitSpecial as the mark bit.
-static void
-debug_scanblock(byte *b, uintptr n)
-{
- byte *obj, *p;
- void **vp;
- uintptr size, *bitp, bits, shift, i, xbits, off;
- MSpan *s;
-
- if(!DebugMark)
- runtime·throw("debug_scanblock without DebugMark");
-
- if((intptr)n < 0) {
- runtime·printf("debug_scanblock %p %D\n", b, (int64)n);
- runtime·throw("debug_scanblock");
- }
-
- // Align b to a word boundary.
- off = (uintptr)b & (PtrSize-1);
- if(off != 0) {
- b += PtrSize - off;
- n -= PtrSize - off;
- }
-
- vp = (void**)b;
- n /= PtrSize;
- for(i=0; i<n; i++) {
- obj = (byte*)vp[i];
-
- // Words outside the arena cannot be pointers.
- if((byte*)obj < runtime·mheap.arena_start || (byte*)obj >= runtime·mheap.arena_used)
- continue;
-
- // Round down to word boundary.
- obj = (void*)((uintptr)obj & ~((uintptr)PtrSize-1));
-
- // Consult span table to find beginning.
- s = runtime·MHeap_LookupMaybe(&runtime·mheap, obj);
- if(s == nil)
- continue;
-
- p = (byte*)((uintptr)s->start<<PageShift);
- size = s->elemsize;
- if(s->sizeclass == 0) {
- obj = p;
- } else {
- int32 i = ((byte*)obj - p)/size;
- obj = p+i*size;
- }
-
- // Now that we know the object header, reload bits.
- off = (uintptr*)obj - (uintptr*)runtime·mheap.arena_start;
- bitp = (uintptr*)runtime·mheap.arena_start - off/wordsPerBitmapWord - 1;
- shift = off % wordsPerBitmapWord;
- xbits = *bitp;
- bits = xbits >> shift;
-
- // Now we have bits, bitp, and shift correct for
- // obj pointing at the base of the object.
- // If not allocated or already marked, done.
- if((bits & bitAllocated) == 0 || (bits & bitSpecial) != 0) // NOTE: bitSpecial not bitMarked
- continue;
- *bitp |= bitSpecial<<shift;
- if(!(bits & bitMarked))
- runtime·printf("found unmarked block %p in %p\n", obj, vp+i);
-
- // If object has no pointers, don't need to scan further.
- if((bits & bitNoScan) != 0)
- continue;
-
- debug_scanblock(obj, size);
+ --sbuf.wp;
+ b = sbuf.wp->p;
+ n = sbuf.wp->n;
+ ti = sbuf.wp->ti;
+ sbuf.nobj--;
}
}
@@ -1196,18 +1229,102 @@ enqueue(Obj obj, Workbuf **_wbuf, Obj **_wp, uintptr *_nobj)
}
static void
+enqueue1(Workbuf **wbufp, Obj obj)
+{
+ Workbuf *wbuf;
+
+ wbuf = *wbufp;
+ if(wbuf->nobj >= nelem(wbuf->obj))
+ *wbufp = wbuf = getempty(wbuf);
+ wbuf->obj[wbuf->nobj++] = obj;
+}
+
+static void
markroot(ParFor *desc, uint32 i)
{
- Obj *wp;
Workbuf *wbuf;
- uintptr nobj;
+ FinBlock *fb;
+ MHeap *h;
+ MSpan **allspans, *s;
+ uint32 spanidx, sg;
+ G *gp;
+ void *p;
USED(&desc);
- wp = nil;
- wbuf = nil;
- nobj = 0;
- enqueue(work.roots[i], &wbuf, &wp, &nobj);
- scanblock(wbuf, wp, nobj, false);
+ wbuf = getempty(nil);
+ // Note: if you add a case here, please also update heapdump.c:dumproots.
+ switch(i) {
+ case RootData:
+ enqueue1(&wbuf, (Obj){data, edata - data, (uintptr)gcdata});
+ break;
+
+ case RootBss:
+ enqueue1(&wbuf, (Obj){bss, ebss - bss, (uintptr)gcbss});
+ break;
+
+ case RootFinalizers:
+ for(fb=allfin; fb; fb=fb->alllink)
+ enqueue1(&wbuf, (Obj){(byte*)fb->fin, fb->cnt*sizeof(fb->fin[0]), 0});
+ break;
+
+ case RootSpanTypes:
+ // mark span types and MSpan.specials (to walk spans only once)
+ h = &runtime·mheap;
+ sg = h->sweepgen;
+ allspans = h->allspans;
+ for(spanidx=0; spanidx<runtime·mheap.nspan; spanidx++) {
+ Special *sp;
+ SpecialFinalizer *spf;
+
+ s = allspans[spanidx];
+ if(s->sweepgen != sg) {
+ runtime·printf("sweep %d %d\n", s->sweepgen, sg);
+ runtime·throw("gc: unswept span");
+ }
+ if(s->state != MSpanInUse)
+ continue;
+ // The garbage collector ignores type pointers stored in MSpan.types:
+ // - Compiler-generated types are stored outside of heap.
+ // - The reflect package has runtime-generated types cached in its data structures.
+ // The garbage collector relies on finding the references via that cache.
+ if(s->types.compression == MTypes_Words || s->types.compression == MTypes_Bytes)
+ markonly((byte*)s->types.data);
+ for(sp = s->specials; sp != nil; sp = sp->next) {
+ if(sp->kind != KindSpecialFinalizer)
+ continue;
+ // don't mark finalized object, but scan it so we
+ // retain everything it points to.
+ spf = (SpecialFinalizer*)sp;
+ // A finalizer can be set for an inner byte of an object, find object beginning.
+ p = (void*)((s->start << PageShift) + spf->offset/s->elemsize*s->elemsize);
+ enqueue1(&wbuf, (Obj){p, s->elemsize, 0});
+ enqueue1(&wbuf, (Obj){(void*)&spf->fn, PtrSize, 0});
+ enqueue1(&wbuf, (Obj){(void*)&spf->fint, PtrSize, 0});
+ enqueue1(&wbuf, (Obj){(void*)&spf->ot, PtrSize, 0});
+ }
+ }
+ break;
+
+ case RootFlushCaches:
+ flushallmcaches();
+ break;
+
+ default:
+ // the rest is scanning goroutine stacks
+ if(i - RootCount >= runtime·allglen)
+ runtime·throw("markroot: bad index");
+ gp = runtime·allg[i - RootCount];
+ // remember when we've first observed the G blocked
+ // needed only to output in traceback
+ if((gp->status == Gwaiting || gp->status == Gsyscall) && gp->waitsince == 0)
+ gp->waitsince = work.tstart;
+ addstackroots(gp, &wbuf);
+ break;
+
+ }
+
+ if(wbuf)
+ scanblock(wbuf, false);
}
// Get an empty work buffer off the work.empty list,
@@ -1304,43 +1421,20 @@ handoff(Workbuf *b)
return b1;
}
-static void
-addroot(Obj obj)
-{
- uint32 cap;
- Obj *new;
-
- if(work.nroot >= work.rootcap) {
- cap = PageSize/sizeof(Obj);
- if(cap < 2*work.rootcap)
- cap = 2*work.rootcap;
- new = (Obj*)runtime·SysAlloc(cap*sizeof(Obj), &mstats.gc_sys);
- if(new == nil)
- runtime·throw("runtime: cannot allocate memory");
- if(work.roots != nil) {
- runtime·memmove(new, work.roots, work.rootcap*sizeof(Obj));
- runtime·SysFree(work.roots, work.rootcap*sizeof(Obj), &mstats.gc_sys);
- }
- work.roots = new;
- work.rootcap = cap;
- }
- work.roots[work.nroot] = obj;
- work.nroot++;
-}
-
extern byte pclntab[]; // base for f->ptrsoff
-typedef struct BitVector BitVector;
-struct BitVector
+BitVector
+runtime·stackmapdata(StackMap *stackmap, int32 n)
{
- int32 n;
- uint32 data[];
-};
+ if(n < 0 || n >= stackmap->n)
+ runtime·throw("stackmapdata: index out of range");
+ return (BitVector){stackmap->nbit, stackmap->data + n*((stackmap->nbit+31)/32)};
+}
// Scans an interface data value when the interface type indicates
// that it is a pointer.
static void
-scaninterfacedata(uintptr bits, byte *scanp, bool afterprologue)
+scaninterfacedata(uintptr bits, byte *scanp, bool afterprologue, void *wbufp)
{
Itab *tab;
Type *type;
@@ -1356,16 +1450,17 @@ scaninterfacedata(uintptr bits, byte *scanp, bool afterprologue)
return;
}
}
- addroot((Obj){scanp+PtrSize, PtrSize, 0});
+ enqueue1(wbufp, (Obj){scanp+PtrSize, PtrSize, 0});
}
// Starting from scanp, scans words corresponding to set bits.
static void
-scanbitvector(byte *scanp, BitVector *bv, bool afterprologue)
+scanbitvector(Func *f, bool precise, byte *scanp, BitVector *bv, bool afterprologue, void *wbufp)
{
uintptr word, bits;
uint32 *wordp;
int32 i, remptrs;
+ byte *p;
wordp = bv->data;
for(remptrs = bv->n; remptrs > 0; remptrs -= 32) {
@@ -1377,11 +1472,88 @@ scanbitvector(byte *scanp, BitVector *bv, bool afterprologue)
i /= BitsPerPointer;
for(; i > 0; i--) {
bits = word & 3;
- if(bits != BitsNoPointer && *(void**)scanp != nil)
- if(bits == BitsPointer)
- addroot((Obj){scanp, PtrSize, 0});
- else
- scaninterfacedata(bits, scanp, afterprologue);
+ switch(bits) {
+ case BitsDead:
+ if(runtime·debug.gcdead)
+ *(uintptr*)scanp = PoisonGC;
+ break;
+ case BitsScalar:
+ break;
+ case BitsPointer:
+ p = *(byte**)scanp;
+ if(p != nil) {
+ if(Debug > 2)
+ runtime·printf("frame %s @%p: ptr %p\n", runtime·funcname(f), scanp, p);
+ if(precise && (p < (byte*)PageSize || (uintptr)p == PoisonGC || (uintptr)p == PoisonStack)) {
+ // Looks like a junk value in a pointer slot.
+ // Liveness analysis wrong?
+ m->traceback = 2;
+ runtime·printf("bad pointer in frame %s at %p: %p\n", runtime·funcname(f), scanp, p);
+ runtime·throw("bad pointer in scanbitvector");
+ }
+ enqueue1(wbufp, (Obj){scanp, PtrSize, 0});
+ }
+ break;
+ case BitsMultiWord:
+ p = scanp;
+ word >>= BitsPerPointer;
+ scanp += PtrSize;
+ i--;
+ if(i == 0) {
+ // Get next chunk of bits
+ remptrs -= 32;
+ word = *wordp++;
+ if(remptrs < 32)
+ i = remptrs;
+ else
+ i = 32;
+ i /= BitsPerPointer;
+ }
+ switch(word & 3) {
+ case BitsString:
+ if(Debug > 2)
+ runtime·printf("frame %s @%p: string %p/%D\n", runtime·funcname(f), p, ((String*)p)->str, (int64)((String*)p)->len);
+ if(((String*)p)->len != 0)
+ markonly(((String*)p)->str);
+ break;
+ case BitsSlice:
+ word >>= BitsPerPointer;
+ scanp += PtrSize;
+ i--;
+ if(i == 0) {
+ // Get next chunk of bits
+ remptrs -= 32;
+ word = *wordp++;
+ if(remptrs < 32)
+ i = remptrs;
+ else
+ i = 32;
+ i /= BitsPerPointer;
+ }
+ if(Debug > 2)
+ runtime·printf("frame %s @%p: slice %p/%D/%D\n", runtime·funcname(f), p, ((Slice*)p)->array, (int64)((Slice*)p)->len, (int64)((Slice*)p)->cap);
+ if(((Slice*)p)->cap < ((Slice*)p)->len) {
+ m->traceback = 2;
+ runtime·printf("bad slice in frame %s at %p: %p/%p/%p\n", runtime·funcname(f), p, ((byte**)p)[0], ((byte**)p)[1], ((byte**)p)[2]);
+ runtime·throw("slice capacity smaller than length");
+ }
+ if(((Slice*)p)->cap != 0)
+ enqueue1(wbufp, (Obj){p, PtrSize, 0});
+ break;
+ case BitsIface:
+ case BitsEface:
+ if(*(byte**)p != nil) {
+ if(Debug > 2) {
+ if((word&3) == BitsEface)
+ runtime·printf("frame %s @%p: eface %p %p\n", runtime·funcname(f), p, ((uintptr*)p)[0], ((uintptr*)p)[1]);
+ else
+ runtime·printf("frame %s @%p: iface %p %p\n", runtime·funcname(f), p, ((uintptr*)p)[0], ((uintptr*)p)[1]);
+ }
+ scaninterfacedata(word & 3, p, afterprologue, wbufp);
+ }
+ break;
+ }
+ }
word >>= BitsPerPointer;
scanp += PtrSize;
}
@@ -1389,64 +1561,111 @@ scanbitvector(byte *scanp, BitVector *bv, bool afterprologue)
}
// Scan a stack frame: local variables and function arguments/results.
-static void
-addframeroots(Stkframe *frame, void*)
+static bool
+scanframe(Stkframe *frame, void *wbufp)
{
Func *f;
- BitVector *args, *locals;
+ StackMap *stackmap;
+ BitVector bv;
uintptr size;
+ uintptr targetpc;
+ int32 pcdata;
bool afterprologue;
+ bool precise;
f = frame->fn;
+ targetpc = frame->continpc;
+ if(targetpc == 0) {
+ // Frame is dead.
+ return true;
+ }
+ if(targetpc != f->entry)
+ targetpc--;
+ pcdata = runtime·pcdatavalue(f, PCDATA_StackMapIndex, targetpc);
+ if(pcdata == -1) {
+ // We do not have a valid pcdata value but there might be a
+ // stackmap for this function. It is likely that we are looking
+ // at the function prologue, assume so and hope for the best.
+ pcdata = 0;
+ }
// Scan local variables if stack frame has been allocated.
// Use pointer information if known.
afterprologue = (frame->varp > (byte*)frame->sp);
+ precise = false;
if(afterprologue) {
- locals = runtime·funcdata(f, FUNCDATA_GCLocals);
- if(locals == nil) {
+ stackmap = runtime·funcdata(f, FUNCDATA_LocalsPointerMaps);
+ if(stackmap == nil) {
// No locals information, scan everything.
size = frame->varp - (byte*)frame->sp;
- addroot((Obj){frame->varp - size, size, 0});
- } else if(locals->n < 0) {
- // Locals size information, scan just the
+ if(Debug > 2)
+ runtime·printf("frame %s unsized locals %p+%p\n", runtime·funcname(f), frame->varp-size, size);
+ enqueue1(wbufp, (Obj){frame->varp - size, size, 0});
+ } else if(stackmap->n < 0) {
+ // Locals size information, scan just the locals.
+ size = -stackmap->n;
+ if(Debug > 2)
+ runtime·printf("frame %s conservative locals %p+%p\n", runtime·funcname(f), frame->varp-size, size);
+ enqueue1(wbufp, (Obj){frame->varp - size, size, 0});
+ } else if(stackmap->n > 0) {
+ // Locals bitmap information, scan just the pointers in
// locals.
- size = -locals->n;
- addroot((Obj){frame->varp - size, size, 0});
- } else if(locals->n > 0) {
- // Locals bitmap information, scan just the
- // pointers in locals.
- size = (locals->n*PtrSize) / BitsPerPointer;
- scanbitvector(frame->varp - size, locals, afterprologue);
+ if(pcdata < 0 || pcdata >= stackmap->n) {
+ // don't know where we are
+ runtime·printf("pcdata is %d and %d stack map entries for %s (targetpc=%p)\n",
+ pcdata, stackmap->n, runtime·funcname(f), targetpc);
+ runtime·throw("scanframe: bad symbol table");
+ }
+ bv = runtime·stackmapdata(stackmap, pcdata);
+ size = (bv.n * PtrSize) / BitsPerPointer;
+ precise = true;
+ scanbitvector(f, true, frame->varp - size, &bv, afterprologue, wbufp);
}
}
// Scan arguments.
// Use pointer information if known.
- args = runtime·funcdata(f, FUNCDATA_GCArgs);
- if(args != nil && args->n > 0)
- scanbitvector(frame->argp, args, false);
- else
- addroot((Obj){frame->argp, frame->arglen, 0});
+ stackmap = runtime·funcdata(f, FUNCDATA_ArgsPointerMaps);
+ if(stackmap != nil) {
+ bv = runtime·stackmapdata(stackmap, pcdata);
+ scanbitvector(f, precise, frame->argp, &bv, true, wbufp);
+ } else {
+ if(Debug > 2)
+ runtime·printf("frame %s conservative args %p+%p\n", runtime·funcname(f), frame->argp, (uintptr)frame->arglen);
+ enqueue1(wbufp, (Obj){frame->argp, frame->arglen, 0});
+ }
+ return true;
}
static void
-addstackroots(G *gp)
+addstackroots(G *gp, Workbuf **wbufp)
{
M *mp;
int32 n;
Stktop *stk;
- uintptr sp, guard, pc, lr;
+ uintptr sp, guard;
void *base;
uintptr size;
- stk = (Stktop*)gp->stackbase;
- guard = gp->stackguard;
+ switch(gp->status){
+ default:
+ runtime·printf("unexpected G.status %d (goroutine %p %D)\n", gp->status, gp, gp->goid);
+ runtime·throw("mark - bad status");
+ case Gdead:
+ return;
+ case Grunning:
+ runtime·throw("mark - world not stopped");
+ case Grunnable:
+ case Gsyscall:
+ case Gwaiting:
+ break;
+ }
if(gp == g)
runtime·throw("can't scan our own stack");
if((mp = gp->m) != nil && mp->helpgc)
runtime·throw("can't scan gchelper stack");
+
if(gp->syscallstack != (uintptr)nil) {
// Scanning another goroutine that is about to enter or might
// have just exited a system call. It may be executing code such
@@ -1454,35 +1673,33 @@ addstackroots(G *gp)
// Use the stack segment and stack pointer at the time of
// the system call instead, since that won't change underfoot.
sp = gp->syscallsp;
- pc = gp->syscallpc;
- lr = 0;
stk = (Stktop*)gp->syscallstack;
guard = gp->syscallguard;
} else {
// Scanning another goroutine's stack.
// The goroutine is usually asleep (the world is stopped).
sp = gp->sched.sp;
- pc = gp->sched.pc;
- lr = gp->sched.lr;
-
+ stk = (Stktop*)gp->stackbase;
+ guard = gp->stackguard;
// For function about to start, context argument is a root too.
if(gp->sched.ctxt != 0 && runtime·mlookup(gp->sched.ctxt, &base, &size, nil))
- addroot((Obj){base, size, 0});
+ enqueue1(wbufp, (Obj){base, size, 0});
}
if(ScanStackByFrames) {
+ USED(sp);
USED(stk);
USED(guard);
- runtime·gentraceback(pc, sp, lr, gp, 0, nil, 0x7fffffff, addframeroots, nil, false);
+ runtime·gentraceback(~(uintptr)0, ~(uintptr)0, 0, gp, 0, nil, 0x7fffffff, scanframe, wbufp, false);
} else {
- USED(lr);
- USED(pc);
n = 0;
while(stk) {
if(sp < guard-StackGuard || (uintptr)stk < sp) {
runtime·printf("scanstack inconsistent: g%D#%d sp=%p not in [%p,%p]\n", gp->goid, n, sp, guard-StackGuard, stk);
runtime·throw("scanstack");
}
- addroot((Obj){(byte*)sp, (uintptr)stk - sp, (uintptr)defaultProg | PRECISE | LOOP});
+ if(Debug > 2)
+ runtime·printf("conservative stack %p+%p\n", (byte*)sp, (uintptr)stk-sp);
+ enqueue1(wbufp, (Obj){(byte*)sp, (uintptr)stk - sp, (uintptr)defaultProg | PRECISE | LOOP});
sp = stk->gobuf.sp;
guard = stk->stackguard;
stk = (Stktop*)stk->stackbase;
@@ -1491,101 +1708,17 @@ addstackroots(G *gp)
}
}
-static void
-addfinroots(void *v)
-{
- uintptr size;
- void *base;
-
- size = 0;
- if(!runtime·mlookup(v, &base, &size, nil) || !runtime·blockspecial(base))
- runtime·throw("mark - finalizer inconsistency");
-
- // do not mark the finalizer block itself. just mark the things it points at.
- addroot((Obj){base, size, 0});
-}
-
-static void
-addroots(void)
-{
- G *gp;
- FinBlock *fb;
- MSpan *s, **allspans;
- uint32 spanidx;
-
- work.nroot = 0;
-
- // data & bss
- // TODO(atom): load balancing
- addroot((Obj){data, edata - data, (uintptr)gcdata});
- addroot((Obj){bss, ebss - bss, (uintptr)gcbss});
-
- // MSpan.types
- allspans = runtime·mheap.allspans;
- for(spanidx=0; spanidx<runtime·mheap.nspan; spanidx++) {
- s = allspans[spanidx];
- if(s->state == MSpanInUse) {
- // The garbage collector ignores type pointers stored in MSpan.types:
- // - Compiler-generated types are stored outside of heap.
- // - The reflect package has runtime-generated types cached in its data structures.
- // The garbage collector relies on finding the references via that cache.
- switch(s->types.compression) {
- case MTypes_Empty:
- case MTypes_Single:
- break;
- case MTypes_Words:
- case MTypes_Bytes:
- markonly((byte*)s->types.data);
- break;
- }
- }
- }
-
- // stacks
- for(gp=runtime·allg; gp!=nil; gp=gp->alllink) {
- switch(gp->status){
- default:
- runtime·printf("unexpected G.status %d\n", gp->status);
- runtime·throw("mark - bad status");
- case Gdead:
- break;
- case Grunning:
- runtime·throw("mark - world not stopped");
- case Grunnable:
- case Gsyscall:
- case Gwaiting:
- addstackroots(gp);
- break;
- }
- }
-
- runtime·walkfintab(addfinroots);
-
- for(fb=allfin; fb; fb=fb->alllink)
- addroot((Obj){(byte*)fb->fin, fb->cnt*sizeof(fb->fin[0]), 0});
-}
-
-static bool
-handlespecial(byte *p, uintptr size)
+void
+runtime·queuefinalizer(byte *p, FuncVal *fn, uintptr nret, Type *fint, PtrType *ot)
{
- FuncVal *fn;
- uintptr nret;
- PtrType *ot;
- Type *fint;
FinBlock *block;
Finalizer *f;
- if(!runtime·getfinalizer(p, true, &fn, &nret, &fint, &ot)) {
- runtime·setblockspecial(p, false);
- runtime·MProf_Free(p, size);
- return false;
- }
-
runtime·lock(&finlock);
if(finq == nil || finq->cnt == finq->cap) {
if(finc == nil) {
- finc = runtime·persistentalloc(PageSize, 0, &mstats.gc_sys);
- finc->cap = (PageSize - sizeof(FinBlock)) / sizeof(Finalizer) + 1;
+ finc = runtime·persistentalloc(FinBlockSize, 0, &mstats.gc_sys);
+ finc->cap = (FinBlockSize - sizeof(FinBlock)) / sizeof(Finalizer) + 1;
finc->alllink = allfin;
allfin = finc;
}
@@ -1601,33 +1734,79 @@ handlespecial(byte *p, uintptr size)
f->fint = fint;
f->ot = ot;
f->arg = p;
+ runtime·fingwake = true;
runtime·unlock(&finlock);
- return true;
+}
+
+void
+runtime·iterate_finq(void (*callback)(FuncVal*, byte*, uintptr, Type*, PtrType*))
+{
+ FinBlock *fb;
+ Finalizer *f;
+ uintptr i;
+
+ for(fb = allfin; fb; fb = fb->alllink) {
+ for(i = 0; i < fb->cnt; i++) {
+ f = &fb->fin[i];
+ callback(f->fn, f->arg, f->nret, f->fint, f->ot);
+ }
+ }
+}
+
+void
+runtime·MSpan_EnsureSwept(MSpan *s)
+{
+ uint32 sg;
+
+ // Caller must disable preemption.
+ // Otherwise when this function returns the span can become unswept again
+ // (if GC is triggered on another goroutine).
+ if(m->locks == 0 && m->mallocing == 0 && g != m->g0)
+ runtime·throw("MSpan_EnsureSwept: m is not locked");
+
+ sg = runtime·mheap.sweepgen;
+ if(runtime·atomicload(&s->sweepgen) == sg)
+ return;
+ if(runtime·cas(&s->sweepgen, sg-2, sg-1)) {
+ runtime·MSpan_Sweep(s);
+ return;
+ }
+ // unfortunate condition, and we don't have efficient means to wait
+ while(runtime·atomicload(&s->sweepgen) != sg)
+ runtime·osyield();
}
// Sweep frees or collects finalizers for blocks not marked in the mark phase.
// It clears the mark bits in preparation for the next GC round.
-static void
-sweepspan(ParFor *desc, uint32 idx)
+// Returns true if the span was returned to heap.
+bool
+runtime·MSpan_Sweep(MSpan *s)
{
- int32 cl, n, npages;
- uintptr size;
+ int32 cl, n, npages, nfree;
+ uintptr size, off, *bitp, shift, bits;
+ uint32 sweepgen;
byte *p;
MCache *c;
byte *arena_start;
MLink head, *end;
- int32 nfree;
byte *type_data;
byte compression;
uintptr type_data_inc;
- MSpan *s;
-
- USED(&desc);
- s = runtime·mheap.allspans[idx];
- if(s->state != MSpanInUse)
- return;
+ MLink *x;
+ Special *special, **specialp, *y;
+ bool res, sweepgenset;
+
+ // It's critical that we enter this function with preemption disabled,
+ // GC must not start while we are in the middle of this function.
+ if(m->locks == 0 && m->mallocing == 0 && g != m->g0)
+ runtime·throw("MSpan_Sweep: m is not locked");
+ sweepgen = runtime·mheap.sweepgen;
+ if(s->state != MSpanInUse || s->sweepgen != sweepgen-1) {
+ runtime·printf("MSpan_Sweep: state=%d sweepgen=%d mheap.sweepgen=%d\n",
+ s->state, s->sweepgen, sweepgen);
+ runtime·throw("MSpan_Sweep: bad span state");
+ }
arena_start = runtime·mheap.arena_start;
- p = (byte*)(s->start << PageShift);
cl = s->sizeclass;
size = s->elemsize;
if(cl == 0) {
@@ -1637,10 +1816,52 @@ sweepspan(ParFor *desc, uint32 idx)
npages = runtime·class_to_allocnpages[cl];
n = (npages << PageShift) / size;
}
+ res = false;
nfree = 0;
end = &head;
c = m->mcache;
-
+ sweepgenset = false;
+
+ // mark any free objects in this span so we don't collect them
+ for(x = s->freelist; x != nil; x = x->next) {
+ // This is markonly(x) but faster because we don't need
+ // atomic access and we're guaranteed to be pointing at
+ // the head of a valid object.
+ off = (uintptr*)x - (uintptr*)runtime·mheap.arena_start;
+ bitp = (uintptr*)runtime·mheap.arena_start - off/wordsPerBitmapWord - 1;
+ shift = off % wordsPerBitmapWord;
+ *bitp |= bitMarked<<shift;
+ }
+
+ // Unlink & free special records for any objects we're about to free.
+ specialp = &s->specials;
+ special = *specialp;
+ while(special != nil) {
+ // A finalizer can be set for an inner byte of an object, find object beginning.
+ p = (byte*)(s->start << PageShift) + special->offset/size*size;
+ off = (uintptr*)p - (uintptr*)arena_start;
+ bitp = (uintptr*)arena_start - off/wordsPerBitmapWord - 1;
+ shift = off % wordsPerBitmapWord;
+ bits = *bitp>>shift;
+ if((bits & (bitAllocated|bitMarked)) == bitAllocated) {
+ // Find the exact byte for which the special was setup
+ // (as opposed to object beginning).
+ p = (byte*)(s->start << PageShift) + special->offset;
+ // about to free object: splice out special record
+ y = special;
+ special = special->next;
+ *specialp = special;
+ if(!runtime·freespecial(y, p, size, false)) {
+ // stop freeing of object if it has a finalizer
+ *bitp |= bitMarked << shift;
+ }
+ } else {
+ // object is still live: keep special record
+ specialp = &special->next;
+ special = *specialp;
+ }
+ }
+
type_data = (byte*)s->types.data;
type_data_inc = sizeof(uintptr);
compression = s->types.compression;
@@ -1654,9 +1875,8 @@ sweepspan(ParFor *desc, uint32 idx)
// Sweep through n objects of given size starting at p.
// This thread owns the span now, so it can manipulate
// the block bitmap without atomic operations.
+ p = (byte*)(s->start << PageShift);
for(; n > 0; n--, p += size, type_data+=type_data_inc) {
- uintptr off, *bitp, shift, bits;
-
off = (uintptr*)p - (uintptr*)arena_start;
bitp = (uintptr*)arena_start - off/wordsPerBitmapWord - 1;
shift = off % wordsPerBitmapWord;
@@ -1666,33 +1886,32 @@ sweepspan(ParFor *desc, uint32 idx)
continue;
if((bits & bitMarked) != 0) {
- if(DebugMark) {
- if(!(bits & bitSpecial))
- runtime·printf("found spurious mark on %p\n", p);
- *bitp &= ~(bitSpecial<<shift);
- }
*bitp &= ~(bitMarked<<shift);
continue;
}
- // Special means it has a finalizer or is being profiled.
- // In DebugMark mode, the bit has been coopted so
- // we have to assume all blocks are special.
- if(DebugMark || (bits & bitSpecial) != 0) {
- if(handlespecial(p, size))
- continue;
- }
+ if(runtime·debug.allocfreetrace)
+ runtime·tracefree(p, size);
- // Mark freed; restore block boundary bit.
- *bitp = (*bitp & ~(bitMask<<shift)) | (bitBlockBoundary<<shift);
+ // Clear mark and scan bits.
+ *bitp &= ~((bitScan|bitMarked)<<shift);
if(cl == 0) {
// Free large span.
runtime·unmarkspan(p, 1<<PageShift);
- *(uintptr*)p = (uintptr)0xdeaddeaddeaddeadll; // needs zeroing
- runtime·MHeap_Free(&runtime·mheap, s, 1);
+ s->needzero = 1;
+ // important to set sweepgen before returning it to heap
+ runtime·atomicstore(&s->sweepgen, sweepgen);
+ sweepgenset = true;
+ // See note about SysFault vs SysFree in malloc.goc.
+ if(runtime·debug.efence)
+ runtime·SysFault(p, size);
+ else
+ runtime·MHeap_Free(&runtime·mheap, s, 1);
c->local_nlargefree++;
c->local_largefree += size;
+ runtime·xadd64(&mstats.next_gc, -(uint64)(size * (gcpercent + 100)/100));
+ res = true;
} else {
// Free small object.
switch(compression) {
@@ -1703,19 +1922,113 @@ sweepspan(ParFor *desc, uint32 idx)
*(byte*)type_data = 0;
break;
}
- if(size > sizeof(uintptr))
+ if(size > 2*sizeof(uintptr))
((uintptr*)p)[1] = (uintptr)0xdeaddeaddeaddeadll; // mark as "needs to be zeroed"
-
+ else if(size > sizeof(uintptr))
+ ((uintptr*)p)[1] = 0;
+
end->next = (MLink*)p;
end = (MLink*)p;
nfree++;
}
}
- if(nfree) {
+ // We need to set s->sweepgen = h->sweepgen only when all blocks are swept,
+ // because of the potential for a concurrent free/SetFinalizer.
+ // But we need to set it before we make the span available for allocation
+ // (return it to heap or mcentral), because allocation code assumes that a
+ // span is already swept if available for allocation.
+
+ if(!sweepgenset && nfree == 0) {
+ // The span must be in our exclusive ownership until we update sweepgen,
+ // check for potential races.
+ if(s->state != MSpanInUse || s->sweepgen != sweepgen-1) {
+ runtime·printf("MSpan_Sweep: state=%d sweepgen=%d mheap.sweepgen=%d\n",
+ s->state, s->sweepgen, sweepgen);
+ runtime·throw("MSpan_Sweep: bad span state after sweep");
+ }
+ runtime·atomicstore(&s->sweepgen, sweepgen);
+ }
+ if(nfree > 0) {
c->local_nsmallfree[cl] += nfree;
c->local_cachealloc -= nfree * size;
- runtime·MCentral_FreeSpan(&runtime·mheap.central[cl], s, nfree, head.next, end);
+ runtime·xadd64(&mstats.next_gc, -(uint64)(nfree * size * (gcpercent + 100)/100));
+ res = runtime·MCentral_FreeSpan(&runtime·mheap.central[cl], s, nfree, head.next, end);
+ //MCentral_FreeSpan updates sweepgen
+ }
+ return res;
+}
+
+// State of background sweep.
+// Pretected by gclock.
+static struct
+{
+ G* g;
+ bool parked;
+
+ MSpan** spans;
+ uint32 nspan;
+ uint32 spanidx;
+} sweep;
+
+// background sweeping goroutine
+static void
+bgsweep(void)
+{
+ g->issystem = 1;
+ for(;;) {
+ while(runtime·sweepone() != -1) {
+ gcstats.nbgsweep++;
+ runtime·gosched();
+ }
+ runtime·lock(&gclock);
+ if(!runtime·mheap.sweepdone) {
+ // It's possible if GC has happened between sweepone has
+ // returned -1 and gclock lock.
+ runtime·unlock(&gclock);
+ continue;
+ }
+ sweep.parked = true;
+ g->isbackground = true;
+ runtime·parkunlock(&gclock, "GC sweep wait");
+ g->isbackground = false;
+ }
+}
+
+// sweeps one span
+// returns number of pages returned to heap, or -1 if there is nothing to sweep
+uintptr
+runtime·sweepone(void)
+{
+ MSpan *s;
+ uint32 idx, sg;
+ uintptr npages;
+
+ // increment locks to ensure that the goroutine is not preempted
+ // in the middle of sweep thus leaving the span in an inconsistent state for next GC
+ m->locks++;
+ sg = runtime·mheap.sweepgen;
+ for(;;) {
+ idx = runtime·xadd(&sweep.spanidx, 1) - 1;
+ if(idx >= sweep.nspan) {
+ runtime·mheap.sweepdone = true;
+ m->locks--;
+ return -1;
+ }
+ s = sweep.spans[idx];
+ if(s->state != MSpanInUse) {
+ s->sweepgen = sg;
+ continue;
+ }
+ if(s->sweepgen != sg-2 || !runtime·cas(&s->sweepgen, sg-2, sg-1))
+ continue;
+ if(s->incache)
+ runtime·throw("sweep of incache span");
+ npages = s->npages;
+ if(!runtime·MSpan_Sweep(s))
+ npages = 0;
+ m->locks--;
+ return npages;
}
}
@@ -1727,7 +2040,7 @@ dumpspan(uint32 idx)
byte *p;
byte *arena_start;
MSpan *s;
- bool allocated, special;
+ bool allocated;
s = runtime·mheap.allspans[idx];
if(s->state != MSpanInUse)
@@ -1754,7 +2067,6 @@ dumpspan(uint32 idx)
bits = *bitp>>shift;
allocated = ((bits & bitAllocated) != 0);
- special = ((bits & bitSpecial) != 0);
for(i=0; i<size; i+=sizeof(void*)) {
if(column == 0) {
@@ -1762,7 +2074,6 @@ dumpspan(uint32 idx)
}
if(i == 0) {
runtime·printf(allocated ? "(" : "[");
- runtime·printf(special ? "@" : "");
runtime·printf("%p: ", p+i);
} else {
runtime·printf(" ");
@@ -1798,42 +2109,24 @@ runtime·memorydump(void)
void
runtime·gchelper(void)
{
- int32 nproc;
+ uint32 nproc;
+ m->traceback = 2;
gchelperstart();
// parallel mark for over gc roots
runtime·parfordo(work.markfor);
// help other threads scan secondary blocks
- scanblock(nil, nil, 0, true);
-
- if(DebugMark) {
- // wait while the main thread executes mark(debug_scanblock)
- while(runtime·atomicload(&work.debugmarkdone) == 0)
- runtime·usleep(10);
- }
+ scanblock(nil, true);
- runtime·parfordo(work.sweepfor);
bufferList[m->helpgc].busy = 0;
nproc = work.nproc; // work.nproc can change right after we increment work.ndone
if(runtime·xadd(&work.ndone, +1) == nproc-1)
runtime·notewakeup(&work.alldone);
+ m->traceback = 0;
}
-#define GcpercentUnknown (-2)
-
-// Initialized from $GOGC. GOGC=off means no gc.
-//
-// Next gc is after we've allocated an extra amount of
-// memory proportional to the amount already in use.
-// If gcpercent=100 and we're using 4M, we'll gc again
-// when we get to 8M. This keeps the gc cost in linear
-// proportion to the allocation cost. Adjusting gcpercent
-// just changes the linear constant (and also the amount of
-// extra memory used).
-static int32 gcpercent = GcpercentUnknown;
-
static void
cachestats(void)
{
@@ -1849,12 +2142,25 @@ cachestats(void)
}
static void
-updatememstats(GCStats *stats)
+flushallmcaches(void)
+{
+ P *p, **pp;
+ MCache *c;
+
+ // Flush MCache's to MCentral.
+ for(pp=runtime·allp; p=*pp; pp++) {
+ c = p->mcache;
+ if(c==nil)
+ continue;
+ runtime·MCache_ReleaseAll(c);
+ }
+}
+
+void
+runtime·updatememstats(GCStats *stats)
{
M *mp;
MSpan *s;
- MCache *c;
- P *p, **pp;
int32 i;
uint64 stacks_inuse, smallfree;
uint64 *src, *dst;
@@ -1895,12 +2201,7 @@ updatememstats(GCStats *stats)
}
// Flush MCache's to MCentral.
- for(pp=runtime·allp; p=*pp; pp++) {
- c = p->mcache;
- if(c==nil)
- continue;
- runtime·MCache_ReleaseAll(c);
- }
+ flushallmcaches();
// Aggregate local stats.
cachestats();
@@ -1942,6 +2243,7 @@ updatememstats(GCStats *stats)
struct gc_args
{
int64 start_time; // start time of GC in ns (just before stoptheworld)
+ bool eagersweep;
};
static void gc(struct gc_args *args);
@@ -1960,8 +2262,8 @@ readgogc(void)
return runtime·atoi(p);
}
-static FuncVal runfinqv = {runfinq};
-
+// force = 1 - do GC regardless of current heap usage
+// force = 2 - go GC and eager sweep
void
runtime·gc(int32 force)
{
@@ -1997,7 +2299,7 @@ runtime·gc(int32 force)
return;
runtime·semacquire(&runtime·worldsema, false);
- if(!force && mstats.heap_alloc < mstats.next_gc) {
+ if(force==0 && mstats.heap_alloc < mstats.next_gc) {
// typically threads which lost the race to grab
// worldsema exit here when gc is done.
runtime·semrelease(&runtime·worldsema);
@@ -2006,22 +2308,25 @@ runtime·gc(int32 force)
// Ok, we're doing it! Stop everybody else
a.start_time = runtime·nanotime();
+ a.eagersweep = force >= 2;
m->gcing = 1;
runtime·stoptheworld();
+ clearpools();
+
// Run gc on the g0 stack. We do this so that the g stack
// we're currently running on will no longer change. Cuts
// the root set down a bit (g0 stacks are not scanned, and
// we don't need to scan gc's internal state). Also an
// enabler for copyable stacks.
for(i = 0; i < (runtime·debug.gctrace > 1 ? 2 : 1); i++) {
+ if(i > 0)
+ a.start_time = runtime·nanotime();
// switch to g0, call gc(&a), then switch back
g->param = &a;
g->status = Gwaiting;
g->waitreason = "garbage collection";
runtime·mcall(mgc);
- // record a new start time in case we're going around again
- a.start_time = runtime·nanotime();
}
// all done
@@ -2032,19 +2337,10 @@ runtime·gc(int32 force)
m->locks--;
// now that gc is done, kick off finalizer thread if needed
- if(finq != nil) {
- runtime·lock(&finlock);
- // kick off or wake up goroutine to run queued finalizers
- if(fing == nil)
- fing = runtime·newproc1(&runfinqv, nil, 0, 0, runtime·gc);
- else if(fingwait) {
- fingwait = 0;
- runtime·ready(fing);
- }
- runtime·unlock(&finlock);
+ if(!ConcurrentSweep) {
+ // give the queued finalizers, if any, a chance to run
+ runtime·gosched();
}
- // give the queued finalizers, if any, a chance to run
- runtime·gosched();
}
static void
@@ -2060,33 +2356,24 @@ static void
gc(struct gc_args *args)
{
int64 t0, t1, t2, t3, t4;
- uint64 heap0, heap1, obj0, obj1, ninstr;
+ uint64 heap0, heap1, obj, ninstr;
GCStats stats;
- M *mp;
uint32 i;
Eface eface;
+ if(runtime·debug.allocfreetrace)
+ runtime·tracegc();
+
+ m->traceback = 2;
t0 = args->start_time;
+ work.tstart = args->start_time;
if(CollectStats)
runtime·memclr((byte*)&gcstats, sizeof(gcstats));
- for(mp=runtime·allm; mp; mp=mp->alllink)
- runtime·settype_flush(mp);
-
- heap0 = 0;
- obj0 = 0;
- if(runtime·debug.gctrace) {
- updatememstats(nil);
- heap0 = mstats.heap_alloc;
- obj0 = mstats.nmalloc - mstats.nfree;
- }
-
m->locks++; // disable gc during mallocs in parforalloc
if(work.markfor == nil)
work.markfor = runtime·parforalloc(MaxGcproc);
- if(work.sweepfor == nil)
- work.sweepfor = runtime·parforalloc(MaxGcproc);
m->locks--;
if(itabtype == nil) {
@@ -2095,43 +2382,49 @@ gc(struct gc_args *args)
itabtype = ((PtrType*)eface.type)->elem;
}
+ t1 = 0;
+ if(runtime·debug.gctrace)
+ t1 = runtime·nanotime();
+
+ // Sweep what is not sweeped by bgsweep.
+ while(runtime·sweepone() != -1)
+ gcstats.npausesweep++;
+
work.nwait = 0;
work.ndone = 0;
- work.debugmarkdone = 0;
work.nproc = runtime·gcprocs();
- addroots();
- runtime·parforsetup(work.markfor, work.nproc, work.nroot, nil, false, markroot);
- runtime·parforsetup(work.sweepfor, work.nproc, runtime·mheap.nspan, nil, true, sweepspan);
+ runtime·parforsetup(work.markfor, work.nproc, RootCount + runtime·allglen, nil, false, markroot);
if(work.nproc > 1) {
runtime·noteclear(&work.alldone);
runtime·helpgc(work.nproc);
}
- t1 = runtime·nanotime();
+ t2 = 0;
+ if(runtime·debug.gctrace)
+ t2 = runtime·nanotime();
gchelperstart();
runtime·parfordo(work.markfor);
- scanblock(nil, nil, 0, true);
+ scanblock(nil, true);
- if(DebugMark) {
- for(i=0; i<work.nroot; i++)
- debug_scanblock(work.roots[i].p, work.roots[i].n);
- runtime·atomicstore(&work.debugmarkdone, 1);
- }
- t2 = runtime·nanotime();
+ t3 = 0;
+ if(runtime·debug.gctrace)
+ t3 = runtime·nanotime();
- runtime·parfordo(work.sweepfor);
bufferList[m->helpgc].busy = 0;
- t3 = runtime·nanotime();
-
if(work.nproc > 1)
runtime·notesleep(&work.alldone);
cachestats();
+ // next_gc calculation is tricky with concurrent sweep since we don't know size of live heap
+ // estimate what was live heap size after previous GC (for tracing only)
+ heap0 = mstats.next_gc*100/(gcpercent+100);
+ // conservatively set next_gc to high value assuming that everything is live
+ // concurrent/lazy sweep will reduce this number while discovering new garbage
mstats.next_gc = mstats.heap_alloc+mstats.heap_alloc*gcpercent/100;
t4 = runtime·nanotime();
- mstats.last_gc = t4;
+ mstats.last_gc = runtime·unixnanotime(); // must be Unix time to make sense to user
mstats.pause_ns[mstats.numgc%nelem(mstats.pause_ns)] = t4 - t0;
mstats.pause_total_ns += t4 - t0;
mstats.numgc++;
@@ -2139,22 +2432,29 @@ gc(struct gc_args *args)
runtime·printf("pause %D\n", t4-t0);
if(runtime·debug.gctrace) {
- updatememstats(&stats);
heap1 = mstats.heap_alloc;
- obj1 = mstats.nmalloc - mstats.nfree;
+ runtime·updatememstats(&stats);
+ if(heap1 != mstats.heap_alloc) {
+ runtime·printf("runtime: mstats skew: heap=%D/%D\n", heap1, mstats.heap_alloc);
+ runtime·throw("mstats skew");
+ }
+ obj = mstats.nmalloc - mstats.nfree;
- stats.nprocyield += work.sweepfor->nprocyield;
- stats.nosyield += work.sweepfor->nosyield;
- stats.nsleep += work.sweepfor->nsleep;
+ stats.nprocyield += work.markfor->nprocyield;
+ stats.nosyield += work.markfor->nosyield;
+ stats.nsleep += work.markfor->nsleep;
- runtime·printf("gc%d(%d): %D+%D+%D ms, %D -> %D MB %D -> %D (%D-%D) objects,"
+ runtime·printf("gc%d(%d): %D+%D+%D+%D us, %D -> %D MB, %D (%D-%D) objects,"
+ " %d/%d/%d sweeps,"
" %D(%D) handoff, %D(%D) steal, %D/%D/%D yields\n",
- mstats.numgc, work.nproc, (t2-t1)/1000000, (t3-t2)/1000000, (t1-t0+t4-t3)/1000000,
- heap0>>20, heap1>>20, obj0, obj1,
+ mstats.numgc, work.nproc, (t1-t0)/1000, (t2-t1)/1000, (t3-t2)/1000, (t4-t3)/1000,
+ heap0>>20, heap1>>20, obj,
mstats.nmalloc, mstats.nfree,
+ sweep.nspan, gcstats.nbgsweep, gcstats.npausesweep,
stats.nhandoff, stats.nhandoffcnt,
- work.sweepfor->nsteal, work.sweepfor->nstealcnt,
+ work.markfor->nsteal, work.markfor->nstealcnt,
stats.nprocyield, stats.nosyield, stats.nsleep);
+ gcstats.nbgsweep = gcstats.npausesweep = 0;
if(CollectStats) {
runtime·printf("scan: %D bytes, %D objects, %D untyped, %D types from MSpan\n",
gcstats.nbytes, gcstats.obj.cnt, gcstats.obj.notype, gcstats.obj.typelookup);
@@ -2181,9 +2481,49 @@ gc(struct gc_args *args)
}
}
+ // We cache current runtime·mheap.allspans array in sweep.spans,
+ // because the former can be resized and freed.
+ // Otherwise we would need to take heap lock every time
+ // we want to convert span index to span pointer.
+
+ // Free the old cached array if necessary.
+ if(sweep.spans && sweep.spans != runtime·mheap.allspans)
+ runtime·SysFree(sweep.spans, sweep.nspan*sizeof(sweep.spans[0]), &mstats.other_sys);
+ // Cache the current array.
+ runtime·mheap.sweepspans = runtime·mheap.allspans;
+ runtime·mheap.sweepgen += 2;
+ runtime·mheap.sweepdone = false;
+ sweep.spans = runtime·mheap.allspans;
+ sweep.nspan = runtime·mheap.nspan;
+ sweep.spanidx = 0;
+
+ // Temporary disable concurrent sweep, because we see failures on builders.
+ if(ConcurrentSweep && !args->eagersweep) {
+ runtime·lock(&gclock);
+ if(sweep.g == nil)
+ sweep.g = runtime·newproc1(&bgsweepv, nil, 0, 0, runtime·gc);
+ else if(sweep.parked) {
+ sweep.parked = false;
+ runtime·ready(sweep.g);
+ }
+ runtime·unlock(&gclock);
+ } else {
+ // Sweep all spans eagerly.
+ while(runtime·sweepone() != -1)
+ gcstats.npausesweep++;
+ }
+
+ // Shrink a stack if not much of it is being used.
+ // TODO: do in a parfor
+ for(i = 0; i < runtime·allglen; i++)
+ runtime·shrinkstack(runtime·allg[i]);
+
runtime·MProf_GC();
+ m->traceback = 0;
}
+extern uintptr runtime·sizeof_C_MStats;
+
void
runtime·ReadMemStats(MStats *stats)
{
@@ -2194,8 +2534,10 @@ runtime·ReadMemStats(MStats *stats)
runtime·semacquire(&runtime·worldsema, false);
m->gcing = 1;
runtime·stoptheworld();
- updatememstats(nil);
- *stats = mstats;
+ runtime·updatememstats(nil);
+ // Size of the trailing by_size array differs between Go and C,
+ // NumSizeClasses was changed, but we can not change Go struct because of backward compatibility.
+ runtime·memcopy(runtime·sizeof_C_MStats, stats, &mstats);
m->gcing = 0;
m->locks++;
runtime·semrelease(&runtime·worldsema);
@@ -2234,9 +2576,10 @@ runtime∕debug·readGCStats(Slice *pauses)
pauses->len = n+3;
}
-void
-runtime∕debug·setGCPercent(intgo in, intgo out)
-{
+int32
+runtime·setgcpercent(int32 in) {
+ int32 out;
+
runtime·lock(&runtime·mheap);
if(gcpercent == GcpercentUnknown)
gcpercent = readgogc();
@@ -2245,7 +2588,7 @@ runtime∕debug·setGCPercent(intgo in, intgo out)
in = -1;
gcpercent = in;
runtime·unlock(&runtime·mheap);
- FLUSH(&out);
+ return out;
}
static void
@@ -2268,15 +2611,38 @@ runfinq(void)
uint32 framesz, framecap, i;
Eface *ef, ef1;
+ // This function blocks for long periods of time, and because it is written in C
+ // we have no liveness information. Zero everything so that uninitialized pointers
+ // do not cause memory leaks.
+ f = nil;
+ fb = nil;
+ next = nil;
frame = nil;
framecap = 0;
+ framesz = 0;
+ i = 0;
+ ef = nil;
+ ef1.type = nil;
+ ef1.data = nil;
+
+ // force flush to memory
+ USED(&f);
+ USED(&fb);
+ USED(&next);
+ USED(&framesz);
+ USED(&i);
+ USED(&ef);
+ USED(&ef1);
+
for(;;) {
runtime·lock(&finlock);
fb = finq;
finq = nil;
if(fb == nil) {
- fingwait = 1;
- runtime·park(runtime·unlock, &finlock, "finalizer wait");
+ runtime·fingwait = true;
+ g->isbackground = true;
+ runtime·parkunlock(&finlock, "finalizer wait");
+ g->isbackground = false;
continue;
}
runtime·unlock(&finlock);
@@ -2290,7 +2656,7 @@ runfinq(void)
if(framecap < framesz) {
runtime·free(frame);
// The frame does not contain pointers interesting for GC,
- // all not yet finalized objects are stored in finc.
+ // all not yet finalized objects are stored in finq.
// If we do not mark it as FlagNoScan,
// the last finalized object is not collected.
frame = runtime·mallocgc(framesz, 0, FlagNoScan|FlagNoInvokeGC);
@@ -2313,80 +2679,99 @@ runfinq(void)
if(!runtime·ifaceE2I2((InterfaceType*)f->fint, ef1, (Iface*)frame))
runtime·throw("invalid type conversion in runfinq");
}
- reflect·call(f->fn, frame, framesz);
+ reflect·call(f->fn, frame, framesz, framesz);
f->fn = nil;
f->arg = nil;
f->ot = nil;
}
fb->cnt = 0;
+ runtime·lock(&finlock);
fb->next = finc;
finc = fb;
+ runtime·unlock(&finlock);
}
+
+ // Zero everything that's dead, to avoid memory leaks.
+ // See comment at top of function.
+ f = nil;
+ fb = nil;
+ next = nil;
+ i = 0;
+ ef = nil;
+ ef1.type = nil;
+ ef1.data = nil;
runtime·gc(1); // trigger another gc to clean up the finalized objects, if possible
}
}
-// mark the block at v of size n as allocated.
-// If noscan is true, mark it as not needing scanning.
void
-runtime·markallocated(void *v, uintptr n, bool noscan)
+runtime·createfing(void)
{
- uintptr *b, obits, bits, off, shift;
+ if(fing != nil)
+ return;
+ // Here we use gclock instead of finlock,
+ // because newproc1 can allocate, which can cause on-demand span sweep,
+ // which can queue finalizers, which would deadlock.
+ runtime·lock(&gclock);
+ if(fing == nil)
+ fing = runtime·newproc1(&runfinqv, nil, 0, 0, runtime·gc);
+ runtime·unlock(&gclock);
+}
- if(0)
- runtime·printf("markallocated %p+%p\n", v, n);
+G*
+runtime·wakefing(void)
+{
+ G *res;
- if((byte*)v+n > (byte*)runtime·mheap.arena_used || (byte*)v < runtime·mheap.arena_start)
- runtime·throw("markallocated: bad pointer");
+ res = nil;
+ runtime·lock(&finlock);
+ if(runtime·fingwait && runtime·fingwake) {
+ runtime·fingwait = false;
+ runtime·fingwake = false;
+ res = fing;
+ }
+ runtime·unlock(&finlock);
+ return res;
+}
+
+void
+runtime·marknogc(void *v)
+{
+ uintptr *b, off, shift;
off = (uintptr*)v - (uintptr*)runtime·mheap.arena_start; // word offset
b = (uintptr*)runtime·mheap.arena_start - off/wordsPerBitmapWord - 1;
shift = off % wordsPerBitmapWord;
+ *b = (*b & ~(bitAllocated<<shift)) | bitBlockBoundary<<shift;
+}
- for(;;) {
- obits = *b;
- bits = (obits & ~(bitMask<<shift)) | (bitAllocated<<shift);
- if(noscan)
- bits |= bitNoScan<<shift;
- if(runtime·gomaxprocs == 1) {
- *b = bits;
- break;
- } else {
- // more than one goroutine is potentially running: use atomic op
- if(runtime·casp((void**)b, (void*)obits, (void*)bits))
- break;
- }
- }
+void
+runtime·markscan(void *v)
+{
+ uintptr *b, off, shift;
+
+ off = (uintptr*)v - (uintptr*)runtime·mheap.arena_start; // word offset
+ b = (uintptr*)runtime·mheap.arena_start - off/wordsPerBitmapWord - 1;
+ shift = off % wordsPerBitmapWord;
+ *b |= bitScan<<shift;
}
-// mark the block at v of size n as freed.
+// mark the block at v as freed.
void
-runtime·markfreed(void *v, uintptr n)
+runtime·markfreed(void *v)
{
- uintptr *b, obits, bits, off, shift;
+ uintptr *b, off, shift;
if(0)
- runtime·printf("markfreed %p+%p\n", v, n);
+ runtime·printf("markfreed %p\n", v);
- if((byte*)v+n > (byte*)runtime·mheap.arena_used || (byte*)v < runtime·mheap.arena_start)
+ if((byte*)v > (byte*)runtime·mheap.arena_used || (byte*)v < runtime·mheap.arena_start)
runtime·throw("markfreed: bad pointer");
off = (uintptr*)v - (uintptr*)runtime·mheap.arena_start; // word offset
b = (uintptr*)runtime·mheap.arena_start - off/wordsPerBitmapWord - 1;
shift = off % wordsPerBitmapWord;
-
- for(;;) {
- obits = *b;
- bits = (obits & ~(bitMask<<shift)) | (bitBlockBoundary<<shift);
- if(runtime·gomaxprocs == 1) {
- *b = bits;
- break;
- } else {
- // more than one goroutine is potentially running: use atomic op
- if(runtime·casp((void**)b, (void*)obits, (void*)bits))
- break;
- }
- }
+ *b = (*b & ~(bitMask<<shift)) | (bitAllocated<<shift);
}
// check that the block at v of size n is marked freed.
@@ -2418,15 +2803,28 @@ runtime·checkfreed(void *v, uintptr n)
void
runtime·markspan(void *v, uintptr size, uintptr n, bool leftover)
{
- uintptr *b, off, shift;
+ uintptr *b, *b0, off, shift, i, x;
byte *p;
if((byte*)v+size*n > (byte*)runtime·mheap.arena_used || (byte*)v < runtime·mheap.arena_start)
runtime·throw("markspan: bad pointer");
+ if(runtime·checking) {
+ // bits should be all zero at the start
+ off = (byte*)v + size - runtime·mheap.arena_start;
+ b = (uintptr*)(runtime·mheap.arena_start - off/wordsPerBitmapWord);
+ for(i = 0; i < size/PtrSize/wordsPerBitmapWord; i++) {
+ if(b[i] != 0)
+ runtime·throw("markspan: span bits not zero");
+ }
+ }
+
p = v;
if(leftover) // mark a boundary just past end of last block too
n++;
+
+ b0 = nil;
+ x = 0;
for(; n-- > 0; p += size) {
// Okay to use non-atomic ops here, because we control
// the entire span, and each bitmap word has bits for only
@@ -2435,8 +2833,15 @@ runtime·markspan(void *v, uintptr size, uintptr n, bool leftover)
off = (uintptr*)p - (uintptr*)runtime·mheap.arena_start; // word offset
b = (uintptr*)runtime·mheap.arena_start - off/wordsPerBitmapWord - 1;
shift = off % wordsPerBitmapWord;
- *b = (*b & ~(bitMask<<shift)) | (bitBlockBoundary<<shift);
+ if(b0 != b) {
+ if(b0 != nil)
+ *b0 = x;
+ b0 = b;
+ x = 0;
+ }
+ x |= bitAllocated<<shift;
}
+ *b0 = x;
}
// unmark the span of memory at v of length n bytes.
@@ -2465,50 +2870,6 @@ runtime·unmarkspan(void *v, uintptr n)
*b-- = 0;
}
-bool
-runtime·blockspecial(void *v)
-{
- uintptr *b, off, shift;
-
- if(DebugMark)
- return true;
-
- off = (uintptr*)v - (uintptr*)runtime·mheap.arena_start;
- b = (uintptr*)runtime·mheap.arena_start - off/wordsPerBitmapWord - 1;
- shift = off % wordsPerBitmapWord;
-
- return (*b & (bitSpecial<<shift)) != 0;
-}
-
-void
-runtime·setblockspecial(void *v, bool s)
-{
- uintptr *b, off, shift, bits, obits;
-
- if(DebugMark)
- return;
-
- off = (uintptr*)v - (uintptr*)runtime·mheap.arena_start;
- b = (uintptr*)runtime·mheap.arena_start - off/wordsPerBitmapWord - 1;
- shift = off % wordsPerBitmapWord;
-
- for(;;) {
- obits = *b;
- if(s)
- bits = obits | (bitSpecial<<shift);
- else
- bits = obits & ~(bitSpecial<<shift);
- if(runtime·gomaxprocs == 1) {
- *b = bits;
- break;
- } else {
- // more than one goroutine is potentially running: use atomic op
- if(runtime·casp((void**)b, (void*)obits, (void*)bits))
- break;
- }
- }
-}
-
void
runtime·MHeap_MapBits(MHeap *h)
{
@@ -2522,9 +2883,10 @@ runtime·MHeap_MapBits(MHeap *h)
n = (h->arena_used - h->arena_start) / wordsPerBitmapWord;
n = ROUND(n, bitmapChunk);
+ n = ROUND(n, PhysPageSize);
if(h->bitmap_mapped >= n)
return;
- runtime·SysMap(h->arena_start - n, n - h->bitmap_mapped, &mstats.gc_sys);
+ runtime·SysMap(h->arena_start - n, n - h->bitmap_mapped, h->arena_reserved, &mstats.gc_sys);
h->bitmap_mapped = n;
}
diff --git a/src/pkg/runtime/mgc0.go b/src/pkg/runtime/mgc0.go
index b15054662..624485d18 100644
--- a/src/pkg/runtime/mgc0.go
+++ b/src/pkg/runtime/mgc0.go
@@ -9,7 +9,19 @@ func gc_m_ptr(ret *interface{}) {
*ret = (*m)(nil)
}
+// Called from C. Returns the Go type *g.
+func gc_g_ptr(ret *interface{}) {
+ *ret = (*g)(nil)
+}
+
// Called from C. Returns the Go type *itab.
func gc_itab_ptr(ret *interface{}) {
*ret = (*itab)(nil)
}
+
+func timenow() (sec int64, nsec int32)
+
+func gc_unixnanotime(now *int64) {
+ sec, nsec := timenow()
+ *now = sec*1e9 + int64(nsec)
+}
diff --git a/src/pkg/runtime/mgc0.h b/src/pkg/runtime/mgc0.h
index f8abe6c9c..16000d1ae 100644
--- a/src/pkg/runtime/mgc0.h
+++ b/src/pkg/runtime/mgc0.h
@@ -44,3 +44,44 @@ enum {
// - at most GC_STACK_CAPACITY allocations because of GC_ARRAY_START
GC_STACK_CAPACITY = 8,
};
+
+enum {
+ ScanStackByFrames = 1,
+ IgnorePreciseGC = 0,
+
+ // Four bits per word (see #defines below).
+ wordsPerBitmapWord = sizeof(void*)*8/4,
+ bitShift = sizeof(void*)*8/4,
+};
+
+// Bits in per-word bitmap.
+// #defines because enum might not be able to hold the values.
+//
+// Each word in the bitmap describes wordsPerBitmapWord words
+// of heap memory. There are 4 bitmap bits dedicated to each heap word,
+// so on a 64-bit system there is one bitmap word per 16 heap words.
+// The bits in the word are packed together by type first, then by
+// heap location, so each 64-bit bitmap word consists of, from top to bottom,
+// the 16 bitMarked bits for the corresponding heap words,
+// then the 16 bitScan/bitBlockBoundary bits, then the 16 bitAllocated bits.
+// This layout makes it easier to iterate over the bits of a given type.
+//
+// The bitmap starts at mheap.arena_start and extends *backward* from
+// there. On a 64-bit system the off'th word in the arena is tracked by
+// the off/16+1'th word before mheap.arena_start. (On a 32-bit system,
+// the only difference is that the divisor is 8.)
+//
+// To pull out the bits corresponding to a given pointer p, we use:
+//
+// off = p - (uintptr*)mheap.arena_start; // word offset
+// b = (uintptr*)mheap.arena_start - off/wordsPerBitmapWord - 1;
+// shift = off % wordsPerBitmapWord
+// bits = *b >> shift;
+// /* then test bits & bitAllocated, bits & bitMarked, etc. */
+//
+#define bitAllocated ((uintptr)1<<(bitShift*0)) /* block start; eligible for garbage collection */
+#define bitScan ((uintptr)1<<(bitShift*1)) /* when bitAllocated is set */
+#define bitMarked ((uintptr)1<<(bitShift*2)) /* when bitAllocated is set */
+#define bitBlockBoundary ((uintptr)1<<(bitShift*1)) /* when bitAllocated is NOT set - mark for FlagNoGC objects */
+
+#define bitMask (bitAllocated | bitScan | bitMarked)
diff --git a/src/pkg/runtime/mheap.c b/src/pkg/runtime/mheap.c
index fc80c2600..7e83eb283 100644
--- a/src/pkg/runtime/mheap.c
+++ b/src/pkg/runtime/mheap.c
@@ -41,7 +41,10 @@ RecordSpan(void *vh, byte *p)
runtime·throw("runtime: cannot allocate memory");
if(h->allspans) {
runtime·memmove(all, h->allspans, h->nspancap*sizeof(all[0]));
- runtime·SysFree(h->allspans, h->nspancap*sizeof(all[0]), &mstats.other_sys);
+ // Don't free the old array if it's referenced by sweep.
+ // See the comment in mgc0.c.
+ if(h->allspans != runtime·mheap.sweepspans)
+ runtime·SysFree(h->allspans, h->nspancap*sizeof(all[0]), &mstats.other_sys);
}
h->allspans = all;
h->nspancap = cap;
@@ -57,10 +60,15 @@ runtime·MHeap_Init(MHeap *h)
runtime·FixAlloc_Init(&h->spanalloc, sizeof(MSpan), RecordSpan, h, &mstats.mspan_sys);
runtime·FixAlloc_Init(&h->cachealloc, sizeof(MCache), nil, nil, &mstats.mcache_sys);
+ runtime·FixAlloc_Init(&h->specialfinalizeralloc, sizeof(SpecialFinalizer), nil, nil, &mstats.other_sys);
+ runtime·FixAlloc_Init(&h->specialprofilealloc, sizeof(SpecialProfile), nil, nil, &mstats.other_sys);
// h->mapcache needs no init
- for(i=0; i<nelem(h->free); i++)
+ for(i=0; i<nelem(h->free); i++) {
runtime·MSpanList_Init(&h->free[i]);
- runtime·MSpanList_Init(&h->large);
+ runtime·MSpanList_Init(&h->busy[i]);
+ }
+ runtime·MSpanList_Init(&h->freelarge);
+ runtime·MSpanList_Init(&h->busylarge);
for(i=0; i<nelem(h->central); i++)
runtime·MCentral_Init(&h->central[i], i);
}
@@ -72,20 +80,95 @@ runtime·MHeap_MapSpans(MHeap *h)
// Map spans array, PageSize at a time.
n = (uintptr)h->arena_used;
- if(sizeof(void*) == 8)
- n -= (uintptr)h->arena_start;
+ n -= (uintptr)h->arena_start;
n = n / PageSize * sizeof(h->spans[0]);
- n = ROUND(n, PageSize);
+ n = ROUND(n, PhysPageSize);
if(h->spans_mapped >= n)
return;
- runtime·SysMap((byte*)h->spans + h->spans_mapped, n - h->spans_mapped, &mstats.other_sys);
+ runtime·SysMap((byte*)h->spans + h->spans_mapped, n - h->spans_mapped, h->arena_reserved, &mstats.other_sys);
h->spans_mapped = n;
}
+// Sweeps spans in list until reclaims at least npages into heap.
+// Returns the actual number of pages reclaimed.
+static uintptr
+MHeap_ReclaimList(MHeap *h, MSpan *list, uintptr npages)
+{
+ MSpan *s;
+ uintptr n;
+ uint32 sg;
+
+ n = 0;
+ sg = runtime·mheap.sweepgen;
+retry:
+ for(s = list->next; s != list; s = s->next) {
+ if(s->sweepgen == sg-2 && runtime·cas(&s->sweepgen, sg-2, sg-1)) {
+ runtime·MSpanList_Remove(s);
+ // swept spans are at the end of the list
+ runtime·MSpanList_InsertBack(list, s);
+ runtime·unlock(h);
+ n += runtime·MSpan_Sweep(s);
+ runtime·lock(h);
+ if(n >= npages)
+ return n;
+ // the span could have been moved elsewhere
+ goto retry;
+ }
+ if(s->sweepgen == sg-1) {
+ // the span is being sweept by background sweeper, skip
+ continue;
+ }
+ // already swept empty span,
+ // all subsequent ones must also be either swept or in process of sweeping
+ break;
+ }
+ return n;
+}
+
+// Sweeps and reclaims at least npage pages into heap.
+// Called before allocating npage pages.
+static void
+MHeap_Reclaim(MHeap *h, uintptr npage)
+{
+ uintptr reclaimed, n;
+
+ // First try to sweep busy spans with large objects of size >= npage,
+ // this has good chances of reclaiming the necessary space.
+ for(n=npage; n < nelem(h->busy); n++) {
+ if(MHeap_ReclaimList(h, &h->busy[n], npage))
+ return; // Bingo!
+ }
+
+ // Then -- even larger objects.
+ if(MHeap_ReclaimList(h, &h->busylarge, npage))
+ return; // Bingo!
+
+ // Now try smaller objects.
+ // One such object is not enough, so we need to reclaim several of them.
+ reclaimed = 0;
+ for(n=0; n < npage && n < nelem(h->busy); n++) {
+ reclaimed += MHeap_ReclaimList(h, &h->busy[n], npage-reclaimed);
+ if(reclaimed >= npage)
+ return;
+ }
+
+ // Now sweep everything that is not yet swept.
+ runtime·unlock(h);
+ for(;;) {
+ n = runtime·sweepone();
+ if(n == -1) // all spans are swept
+ break;
+ reclaimed += n;
+ if(reclaimed >= npage)
+ break;
+ }
+ runtime·lock(h);
+}
+
// Allocate a new span of npage pages from the heap
// and record its size class in the HeapMap and HeapMapCache.
MSpan*
-runtime·MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass, int32 acct, int32 zeroed)
+runtime·MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass, bool large, bool needzero)
{
MSpan *s;
@@ -95,14 +178,22 @@ runtime·MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass, int32 acct, int32
s = MHeap_AllocLocked(h, npage, sizeclass);
if(s != nil) {
mstats.heap_inuse += npage<<PageShift;
- if(acct) {
+ if(large) {
mstats.heap_objects++;
mstats.heap_alloc += npage<<PageShift;
+ // Swept spans are at the end of lists.
+ if(s->npages < nelem(h->free))
+ runtime·MSpanList_InsertBack(&h->busy[s->npages], s);
+ else
+ runtime·MSpanList_InsertBack(&h->busylarge, s);
}
}
runtime·unlock(h);
- if(s != nil && *(uintptr*)(s->start<<PageShift) != 0 && zeroed)
- runtime·memclr((byte*)(s->start<<PageShift), s->npages<<PageShift);
+ if(s != nil) {
+ if(needzero && s->needzero)
+ runtime·memclr((byte*)(s->start<<PageShift), s->npages<<PageShift);
+ s->needzero = 0;
+ }
return s;
}
@@ -113,6 +204,11 @@ MHeap_AllocLocked(MHeap *h, uintptr npage, int32 sizeclass)
MSpan *s, *t;
PageID p;
+ // To prevent excessive heap growth, before allocating n pages
+ // we need to sweep and reclaim at least n pages.
+ if(!h->sweepdone)
+ MHeap_Reclaim(h, npage);
+
// Try in fixed-size lists up to max.
for(n=npage; n < nelem(h->free); n++) {
if(!runtime·MSpanList_IsEmpty(&h->free[n])) {
@@ -136,29 +232,12 @@ HaveSpan:
if(s->npages < npage)
runtime·throw("MHeap_AllocLocked - bad npages");
runtime·MSpanList_Remove(s);
+ runtime·atomicstore(&s->sweepgen, h->sweepgen);
s->state = MSpanInUse;
mstats.heap_idle -= s->npages<<PageShift;
mstats.heap_released -= s->npreleased<<PageShift;
- if(s->npreleased > 0) {
- // We have called runtime·SysUnused with these pages, and on
- // Unix systems it called madvise. At this point at least
- // some BSD-based kernels will return these pages either as
- // zeros or with the old data. For our caller, the first word
- // in the page indicates whether the span contains zeros or
- // not (this word was set when the span was freed by
- // MCentral_Free or runtime·MCentral_FreeSpan). If the first
- // page in the span is returned as zeros, and some subsequent
- // page is returned with the old data, then we will be
- // returning a span that is assumed to be all zeros, but the
- // actual data will not be all zeros. Avoid that problem by
- // explicitly marking the span as not being zeroed, just in
- // case. The beadbead constant we use here means nothing, it
- // is just a unique constant not seen elsewhere in the
- // runtime, as a clue in case it turns up unexpectedly in
- // memory or in a stack trace.
+ if(s->npreleased > 0)
runtime·SysUsed((void*)(s->start<<PageShift), s->npages<<PageShift);
- *(uintptr*)(s->start<<PageShift) = (uintptr)0xbeadbeadbeadbeadULL;
- }
s->npreleased = 0;
if(s->npages > npage) {
@@ -167,13 +246,13 @@ HaveSpan:
runtime·MSpan_Init(t, s->start + npage, s->npages - npage);
s->npages = npage;
p = t->start;
- if(sizeof(void*) == 8)
- p -= ((uintptr)h->arena_start>>PageShift);
+ p -= ((uintptr)h->arena_start>>PageShift);
if(p > 0)
h->spans[p-1] = s;
h->spans[p] = t;
h->spans[p+t->npages-1] = t;
- *(uintptr*)(t->start<<PageShift) = *(uintptr*)(s->start<<PageShift); // copy "needs zeroing" mark
+ t->needzero = s->needzero;
+ runtime·atomicstore(&t->sweepgen, h->sweepgen);
t->state = MSpanInUse;
MHeap_FreeLocked(h, t);
t->unusedsince = s->unusedsince; // preserve age
@@ -186,8 +265,7 @@ HaveSpan:
s->elemsize = (sizeclass==0 ? s->npages<<PageShift : runtime·class_to_size[sizeclass]);
s->types.compression = MTypes_Empty;
p = s->start;
- if(sizeof(void*) == 8)
- p -= ((uintptr)h->arena_start>>PageShift);
+ p -= ((uintptr)h->arena_start>>PageShift);
for(n=0; n<npage; n++)
h->spans[p+n] = s;
return s;
@@ -197,7 +275,7 @@ HaveSpan:
static MSpan*
MHeap_AllocLarge(MHeap *h, uintptr npage)
{
- return BestFit(&h->large, npage, nil);
+ return BestFit(&h->freelarge, npage, nil);
}
// Search list for smallest span with >= npage pages.
@@ -255,10 +333,10 @@ MHeap_Grow(MHeap *h, uintptr npage)
s = runtime·FixAlloc_Alloc(&h->spanalloc);
runtime·MSpan_Init(s, (uintptr)v>>PageShift, ask>>PageShift);
p = s->start;
- if(sizeof(void*) == 8)
- p -= ((uintptr)h->arena_start>>PageShift);
+ p -= ((uintptr)h->arena_start>>PageShift);
h->spans[p] = s;
h->spans[p + s->npages - 1] = s;
+ runtime·atomicstore(&s->sweepgen, h->sweepgen);
s->state = MSpanInUse;
MHeap_FreeLocked(h, s);
return true;
@@ -273,8 +351,7 @@ runtime·MHeap_Lookup(MHeap *h, void *v)
uintptr p;
p = (uintptr)v;
- if(sizeof(void*) == 8)
- p -= (uintptr)h->arena_start;
+ p -= (uintptr)h->arena_start;
return h->spans[p >> PageShift];
}
@@ -295,8 +372,7 @@ runtime·MHeap_LookupMaybe(MHeap *h, void *v)
return nil;
p = (uintptr)v>>PageShift;
q = p;
- if(sizeof(void*) == 8)
- q -= (uintptr)h->arena_start >> PageShift;
+ q -= (uintptr)h->arena_start >> PageShift;
s = h->spans[q];
if(s == nil || p < s->start || v >= s->limit || s->state != MSpanInUse)
return nil;
@@ -322,20 +398,19 @@ runtime·MHeap_Free(MHeap *h, MSpan *s, int32 acct)
static void
MHeap_FreeLocked(MHeap *h, MSpan *s)
{
- uintptr *sp, *tp;
MSpan *t;
PageID p;
s->types.compression = MTypes_Empty;
- if(s->state != MSpanInUse || s->ref != 0) {
- runtime·printf("MHeap_FreeLocked - span %p ptr %p state %d ref %d\n", s, s->start<<PageShift, s->state, s->ref);
+ if(s->state != MSpanInUse || s->ref != 0 || s->sweepgen != h->sweepgen) {
+ runtime·printf("MHeap_FreeLocked - span %p ptr %p state %d ref %d sweepgen %d/%d\n",
+ s, s->start<<PageShift, s->state, s->ref, s->sweepgen, h->sweepgen);
runtime·throw("MHeap_FreeLocked - invalid free");
}
mstats.heap_idle += s->npages<<PageShift;
s->state = MSpanFree;
runtime·MSpanList_Remove(s);
- sp = (uintptr*)(s->start<<PageShift);
// Stamp newly unused spans. The scavenger will use that
// info to potentially give back some pages to the OS.
s->unusedsince = runtime·nanotime();
@@ -343,16 +418,12 @@ MHeap_FreeLocked(MHeap *h, MSpan *s)
// Coalesce with earlier, later spans.
p = s->start;
- if(sizeof(void*) == 8)
- p -= (uintptr)h->arena_start >> PageShift;
+ p -= (uintptr)h->arena_start >> PageShift;
if(p > 0 && (t = h->spans[p-1]) != nil && t->state != MSpanInUse) {
- if(t->npreleased == 0) { // cant't touch this otherwise
- tp = (uintptr*)(t->start<<PageShift);
- *tp |= *sp; // propagate "needs zeroing" mark
- }
s->start = t->start;
s->npages += t->npages;
s->npreleased = t->npreleased; // absorb released pages
+ s->needzero |= t->needzero;
p -= t->npages;
h->spans[p] = s;
runtime·MSpanList_Remove(t);
@@ -360,12 +431,9 @@ MHeap_FreeLocked(MHeap *h, MSpan *s)
runtime·FixAlloc_Free(&h->spanalloc, t);
}
if((p+s->npages)*sizeof(h->spans[0]) < h->spans_mapped && (t = h->spans[p+s->npages]) != nil && t->state != MSpanInUse) {
- if(t->npreleased == 0) { // cant't touch this otherwise
- tp = (uintptr*)(t->start<<PageShift);
- *sp |= *tp; // propagate "needs zeroing" mark
- }
s->npages += t->npages;
s->npreleased += t->npreleased;
+ s->needzero |= t->needzero;
h->spans[p + s->npages - 1] = s;
runtime·MSpanList_Remove(t);
t->state = MSpanDead;
@@ -376,7 +444,7 @@ MHeap_FreeLocked(MHeap *h, MSpan *s)
if(s->npages < nelem(h->free))
runtime·MSpanList_Insert(&h->free[s->npages], s);
else
- runtime·MSpanList_Insert(&h->large, s);
+ runtime·MSpanList_Insert(&h->freelarge, s);
}
static void
@@ -419,7 +487,7 @@ scavenge(int32 k, uint64 now, uint64 limit)
sumreleased = 0;
for(i=0; i < nelem(h->free); i++)
sumreleased += scavengelist(&h->free[i], now, limit);
- sumreleased += scavengelist(&h->large, now, limit);
+ sumreleased += scavengelist(&h->freelarge, now, limit);
if(runtime·debug.gctrace > 0) {
if(sumreleased > 0)
@@ -440,6 +508,7 @@ runtime·MHeap_Scavenger(void)
{
MHeap *h;
uint64 tick, now, forcegc, limit;
+ int64 unixnow;
int32 k;
Note note, *notep;
@@ -463,8 +532,8 @@ runtime·MHeap_Scavenger(void)
runtime·notetsleepg(&note, tick);
runtime·lock(h);
- now = runtime·nanotime();
- if(now - mstats.last_gc > forcegc) {
+ unixnow = runtime·unixnanotime();
+ if(unixnow - mstats.last_gc > forcegc) {
runtime·unlock(h);
// The scavenger can not block other goroutines,
// otherwise deadlock detector can fire spuriously.
@@ -476,8 +545,8 @@ runtime·MHeap_Scavenger(void)
if(runtime·debug.gctrace > 0)
runtime·printf("scvg%d: GC forced\n", k);
runtime·lock(h);
- now = runtime·nanotime();
}
+ now = runtime·nanotime();
scavenge(k, now, limit);
runtime·unlock(h);
}
@@ -486,7 +555,7 @@ runtime·MHeap_Scavenger(void)
void
runtime∕debug·freeOSMemory(void)
{
- runtime·gc(1);
+ runtime·gc(2); // force GC and do eager sweep
runtime·lock(&runtime·mheap);
scavenge(-1, ~(uintptr)0, 0);
runtime·unlock(&runtime·mheap);
@@ -503,11 +572,16 @@ runtime·MSpan_Init(MSpan *span, PageID start, uintptr npages)
span->freelist = nil;
span->ref = 0;
span->sizeclass = 0;
+ span->incache = false;
span->elemsize = 0;
- span->state = 0;
+ span->state = MSpanDead;
span->unusedsince = 0;
span->npreleased = 0;
span->types.compression = MTypes_Empty;
+ span->specialLock.key = 0;
+ span->specials = nil;
+ span->needzero = 0;
+ span->freebuf = nil;
}
// Initialize an empty doubly-linked list.
@@ -549,4 +623,310 @@ runtime·MSpanList_Insert(MSpan *list, MSpan *span)
span->prev->next = span;
}
+void
+runtime·MSpanList_InsertBack(MSpan *list, MSpan *span)
+{
+ if(span->next != nil || span->prev != nil) {
+ runtime·printf("failed MSpanList_Insert %p %p %p\n", span, span->next, span->prev);
+ runtime·throw("MSpanList_Insert");
+ }
+ span->next = list;
+ span->prev = list->prev;
+ span->next->prev = span;
+ span->prev->next = span;
+}
+// Adds the special record s to the list of special records for
+// the object p. All fields of s should be filled in except for
+// offset & next, which this routine will fill in.
+// Returns true if the special was successfully added, false otherwise.
+// (The add will fail only if a record with the same p and s->kind
+// already exists.)
+static bool
+addspecial(void *p, Special *s)
+{
+ MSpan *span;
+ Special **t, *x;
+ uintptr offset;
+ byte kind;
+
+ span = runtime·MHeap_LookupMaybe(&runtime·mheap, p);
+ if(span == nil)
+ runtime·throw("addspecial on invalid pointer");
+
+ // Ensure that the span is swept.
+ // GC accesses specials list w/o locks. And it's just much safer.
+ m->locks++;
+ runtime·MSpan_EnsureSwept(span);
+
+ offset = (uintptr)p - (span->start << PageShift);
+ kind = s->kind;
+
+ runtime·lock(&span->specialLock);
+
+ // Find splice point, check for existing record.
+ t = &span->specials;
+ while((x = *t) != nil) {
+ if(offset == x->offset && kind == x->kind) {
+ runtime·unlock(&span->specialLock);
+ m->locks--;
+ return false; // already exists
+ }
+ if(offset < x->offset || (offset == x->offset && kind < x->kind))
+ break;
+ t = &x->next;
+ }
+ // Splice in record, fill in offset.
+ s->offset = offset;
+ s->next = x;
+ *t = s;
+ runtime·unlock(&span->specialLock);
+ m->locks--;
+ return true;
+}
+
+// Removes the Special record of the given kind for the object p.
+// Returns the record if the record existed, nil otherwise.
+// The caller must FixAlloc_Free the result.
+static Special*
+removespecial(void *p, byte kind)
+{
+ MSpan *span;
+ Special *s, **t;
+ uintptr offset;
+
+ span = runtime·MHeap_LookupMaybe(&runtime·mheap, p);
+ if(span == nil)
+ runtime·throw("removespecial on invalid pointer");
+
+ // Ensure that the span is swept.
+ // GC accesses specials list w/o locks. And it's just much safer.
+ m->locks++;
+ runtime·MSpan_EnsureSwept(span);
+
+ offset = (uintptr)p - (span->start << PageShift);
+
+ runtime·lock(&span->specialLock);
+ t = &span->specials;
+ while((s = *t) != nil) {
+ // This function is used for finalizers only, so we don't check for
+ // "interior" specials (p must be exactly equal to s->offset).
+ if(offset == s->offset && kind == s->kind) {
+ *t = s->next;
+ runtime·unlock(&span->specialLock);
+ m->locks--;
+ return s;
+ }
+ t = &s->next;
+ }
+ runtime·unlock(&span->specialLock);
+ m->locks--;
+ return nil;
+}
+
+// Adds a finalizer to the object p. Returns true if it succeeded.
+bool
+runtime·addfinalizer(void *p, FuncVal *f, uintptr nret, Type *fint, PtrType *ot)
+{
+ SpecialFinalizer *s;
+
+ runtime·lock(&runtime·mheap.speciallock);
+ s = runtime·FixAlloc_Alloc(&runtime·mheap.specialfinalizeralloc);
+ runtime·unlock(&runtime·mheap.speciallock);
+ s->kind = KindSpecialFinalizer;
+ s->fn = f;
+ s->nret = nret;
+ s->fint = fint;
+ s->ot = ot;
+ if(addspecial(p, s))
+ return true;
+
+ // There was an old finalizer
+ runtime·lock(&runtime·mheap.speciallock);
+ runtime·FixAlloc_Free(&runtime·mheap.specialfinalizeralloc, s);
+ runtime·unlock(&runtime·mheap.speciallock);
+ return false;
+}
+
+// Removes the finalizer (if any) from the object p.
+void
+runtime·removefinalizer(void *p)
+{
+ SpecialFinalizer *s;
+
+ s = (SpecialFinalizer*)removespecial(p, KindSpecialFinalizer);
+ if(s == nil)
+ return; // there wasn't a finalizer to remove
+ runtime·lock(&runtime·mheap.speciallock);
+ runtime·FixAlloc_Free(&runtime·mheap.specialfinalizeralloc, s);
+ runtime·unlock(&runtime·mheap.speciallock);
+}
+
+// Set the heap profile bucket associated with addr to b.
+void
+runtime·setprofilebucket(void *p, Bucket *b)
+{
+ SpecialProfile *s;
+
+ runtime·lock(&runtime·mheap.speciallock);
+ s = runtime·FixAlloc_Alloc(&runtime·mheap.specialprofilealloc);
+ runtime·unlock(&runtime·mheap.speciallock);
+ s->kind = KindSpecialProfile;
+ s->b = b;
+ if(!addspecial(p, s))
+ runtime·throw("setprofilebucket: profile already set");
+}
+
+// Do whatever cleanup needs to be done to deallocate s. It has
+// already been unlinked from the MSpan specials list.
+// Returns true if we should keep working on deallocating p.
+bool
+runtime·freespecial(Special *s, void *p, uintptr size, bool freed)
+{
+ SpecialFinalizer *sf;
+ SpecialProfile *sp;
+
+ switch(s->kind) {
+ case KindSpecialFinalizer:
+ sf = (SpecialFinalizer*)s;
+ runtime·queuefinalizer(p, sf->fn, sf->nret, sf->fint, sf->ot);
+ runtime·lock(&runtime·mheap.speciallock);
+ runtime·FixAlloc_Free(&runtime·mheap.specialfinalizeralloc, sf);
+ runtime·unlock(&runtime·mheap.speciallock);
+ return false; // don't free p until finalizer is done
+ case KindSpecialProfile:
+ sp = (SpecialProfile*)s;
+ runtime·MProf_Free(sp->b, size, freed);
+ runtime·lock(&runtime·mheap.speciallock);
+ runtime·FixAlloc_Free(&runtime·mheap.specialprofilealloc, sp);
+ runtime·unlock(&runtime·mheap.speciallock);
+ return true;
+ default:
+ runtime·throw("bad special kind");
+ return true;
+ }
+}
+
+// Free all special records for p.
+void
+runtime·freeallspecials(MSpan *span, void *p, uintptr size)
+{
+ Special *s, **t, *list;
+ uintptr offset;
+
+ if(span->sweepgen != runtime·mheap.sweepgen)
+ runtime·throw("runtime: freeallspecials: unswept span");
+ // first, collect all specials into the list; then, free them
+ // this is required to not cause deadlock between span->specialLock and proflock
+ list = nil;
+ offset = (uintptr)p - (span->start << PageShift);
+ runtime·lock(&span->specialLock);
+ t = &span->specials;
+ while((s = *t) != nil) {
+ if(offset + size <= s->offset)
+ break;
+ if(offset <= s->offset) {
+ *t = s->next;
+ s->next = list;
+ list = s;
+ } else
+ t = &s->next;
+ }
+ runtime·unlock(&span->specialLock);
+
+ while(list != nil) {
+ s = list;
+ list = s->next;
+ if(!runtime·freespecial(s, p, size, true))
+ runtime·throw("can't explicitly free an object with a finalizer");
+ }
+}
+
+// Split an allocated span into two equal parts.
+void
+runtime·MHeap_SplitSpan(MHeap *h, MSpan *s)
+{
+ MSpan *t;
+ MCentral *c;
+ uintptr i;
+ uintptr npages;
+ PageID p;
+
+ if(s->state != MSpanInUse)
+ runtime·throw("MHeap_SplitSpan on a free span");
+ if(s->sizeclass != 0 && s->ref != 1)
+ runtime·throw("MHeap_SplitSpan doesn't have an allocated object");
+ npages = s->npages;
+
+ // remove the span from whatever list it is in now
+ if(s->sizeclass > 0) {
+ // must be in h->central[x].empty
+ c = &h->central[s->sizeclass];
+ runtime·lock(c);
+ runtime·MSpanList_Remove(s);
+ runtime·unlock(c);
+ runtime·lock(h);
+ } else {
+ // must be in h->busy/busylarge
+ runtime·lock(h);
+ runtime·MSpanList_Remove(s);
+ }
+ // heap is locked now
+
+ if(npages == 1) {
+ // convert span of 1 PageSize object to a span of 2 PageSize/2 objects.
+ s->ref = 2;
+ s->sizeclass = runtime·SizeToClass(PageSize/2);
+ s->elemsize = PageSize/2;
+ } else {
+ // convert span of n>1 pages into two spans of n/2 pages each.
+ if((s->npages & 1) != 0)
+ runtime·throw("MHeap_SplitSpan on an odd size span");
+
+ // compute position in h->spans
+ p = s->start;
+ p -= (uintptr)h->arena_start >> PageShift;
+
+ // Allocate a new span for the first half.
+ t = runtime·FixAlloc_Alloc(&h->spanalloc);
+ runtime·MSpan_Init(t, s->start, npages/2);
+ t->limit = (byte*)((t->start + npages/2) << PageShift);
+ t->state = MSpanInUse;
+ t->elemsize = npages << (PageShift - 1);
+ t->sweepgen = s->sweepgen;
+ if(t->elemsize <= MaxSmallSize) {
+ t->sizeclass = runtime·SizeToClass(t->elemsize);
+ t->ref = 1;
+ }
+
+ // the old span holds the second half.
+ s->start += npages/2;
+ s->npages = npages/2;
+ s->elemsize = npages << (PageShift - 1);
+ if(s->elemsize <= MaxSmallSize) {
+ s->sizeclass = runtime·SizeToClass(s->elemsize);
+ s->ref = 1;
+ }
+
+ // update span lookup table
+ for(i = p; i < p + npages/2; i++)
+ h->spans[i] = t;
+ }
+
+ // place the span into a new list
+ if(s->sizeclass > 0) {
+ runtime·unlock(h);
+ c = &h->central[s->sizeclass];
+ runtime·lock(c);
+ // swept spans are at the end of the list
+ runtime·MSpanList_InsertBack(&c->empty, s);
+ runtime·unlock(c);
+ } else {
+ // Swept spans are at the end of lists.
+ if(s->npages < nelem(h->free))
+ runtime·MSpanList_InsertBack(&h->busy[s->npages], s);
+ else
+ runtime·MSpanList_InsertBack(&h->busylarge, s);
+ runtime·unlock(h);
+ }
+}
diff --git a/src/pkg/runtime/mknacl.sh b/src/pkg/runtime/mknacl.sh
new file mode 100644
index 000000000..47fb7bd88
--- /dev/null
+++ b/src/pkg/runtime/mknacl.sh
@@ -0,0 +1,15 @@
+#!/bin/bash
+# Copyright 2013 The Go Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+cat /Users/rsc/pub/native_client/src/trusted/service_runtime/include/bits/nacl_syscalls.h |
+ awk '
+ BEGIN {
+ printf("// generated by mknacl.sh - do not edit\n")
+ }
+ NF==3 && $1=="#define" && $2~/^NACL_sys_/ {
+ name=$2
+ sub(/^NACL_sys_/, "SYS_", name)
+ printf("#define %s %s\n", name, $3)
+ }' >syscall_nacl.h
diff --git a/src/pkg/runtime/mprof.goc b/src/pkg/runtime/mprof.goc
index 4ae74f0c2..9c23a16f8 100644
--- a/src/pkg/runtime/mprof.goc
+++ b/src/pkg/runtime/mprof.goc
@@ -22,7 +22,6 @@ enum { MProf, BProf }; // profile types
// Per-call-stack profiling information.
// Lookup by hashing call stack into a linked-list hash table.
-typedef struct Bucket Bucket;
struct Bucket
{
Bucket *next; // next in hash list
@@ -34,14 +33,33 @@ struct Bucket
{
struct // typ == MProf
{
+ // The following complex 3-stage scheme of stats accumulation
+ // is required to obtain a consistent picture of mallocs and frees
+ // for some point in time.
+ // The problem is that mallocs come in real time, while frees
+ // come only after a GC during concurrent sweeping. So if we would
+ // naively count them, we would get a skew toward mallocs.
+ //
+ // Mallocs are accounted in recent stats.
+ // Explicit frees are accounted in recent stats.
+ // GC frees are accounted in prev stats.
+ // After GC prev stats are added to final stats and
+ // recent stats are moved into prev stats.
uintptr allocs;
uintptr frees;
uintptr alloc_bytes;
uintptr free_bytes;
- uintptr recent_allocs; // since last gc
+
+ uintptr prev_allocs; // since last but one till last gc
+ uintptr prev_frees;
+ uintptr prev_alloc_bytes;
+ uintptr prev_free_bytes;
+
+ uintptr recent_allocs; // since last gc till now
uintptr recent_frees;
uintptr recent_alloc_bytes;
uintptr recent_free_bytes;
+
};
struct // typ == BProf
{
@@ -49,7 +67,8 @@ struct Bucket
int64 cycles;
};
};
- uintptr hash;
+ uintptr hash; // hash of size + stk
+ uintptr size;
uintptr nstk;
uintptr stk[1];
};
@@ -63,7 +82,7 @@ static uintptr bucketmem;
// Return the bucket for stk[0:nstk], allocating new bucket if needed.
static Bucket*
-stkbucket(int32 typ, uintptr *stk, int32 nstk, bool alloc)
+stkbucket(int32 typ, uintptr size, uintptr *stk, int32 nstk, bool alloc)
{
int32 i;
uintptr h;
@@ -82,12 +101,17 @@ stkbucket(int32 typ, uintptr *stk, int32 nstk, bool alloc)
h += h<<10;
h ^= h>>6;
}
+ // hash in size
+ h += size;
+ h += h<<10;
+ h ^= h>>6;
+ // finalize
h += h<<3;
h ^= h>>11;
i = h%BuckHashSize;
for(b = buckhash[i]; b; b=b->next)
- if(b->typ == typ && b->hash == h && b->nstk == nstk &&
+ if(b->typ == typ && b->hash == h && b->size == size && b->nstk == nstk &&
runtime·mcmp((byte*)b->stk, (byte*)stk, nstk*sizeof stk[0]) == 0)
return b;
@@ -99,6 +123,7 @@ stkbucket(int32 typ, uintptr *stk, int32 nstk, bool alloc)
runtime·memmove(b->stk, stk, nstk*sizeof stk[0]);
b->typ = typ;
b->hash = h;
+ b->size = size;
b->nstk = nstk;
b->next = buckhash[i];
buckhash[i] = b;
@@ -118,10 +143,16 @@ MProf_GC(void)
Bucket *b;
for(b=mbuckets; b; b=b->allnext) {
- b->allocs += b->recent_allocs;
- b->frees += b->recent_frees;
- b->alloc_bytes += b->recent_alloc_bytes;
- b->free_bytes += b->recent_free_bytes;
+ b->allocs += b->prev_allocs;
+ b->frees += b->prev_frees;
+ b->alloc_bytes += b->prev_alloc_bytes;
+ b->free_bytes += b->prev_free_bytes;
+
+ b->prev_allocs = b->recent_allocs;
+ b->prev_frees = b->recent_frees;
+ b->prev_alloc_bytes = b->recent_alloc_bytes;
+ b->prev_free_bytes = b->recent_free_bytes;
+
b->recent_allocs = 0;
b->recent_frees = 0;
b->recent_alloc_bytes = 0;
@@ -138,143 +169,39 @@ runtime·MProf_GC(void)
runtime·unlock(&proflock);
}
-// Map from pointer to Bucket* that allocated it.
-// Three levels:
-// Linked-list hash table for top N-AddrHashShift bits.
-// Array index for next AddrDenseBits bits.
-// Linked list for next AddrHashShift-AddrDenseBits bits.
-// This is more efficient than using a general map,
-// because of the typical clustering of the pointer keys.
-
-typedef struct AddrHash AddrHash;
-typedef struct AddrEntry AddrEntry;
-
-enum {
- AddrHashBits = 12, // good for 4GB of used address space
- AddrHashShift = 20, // each AddrHash knows about 1MB of address space
- AddrDenseBits = 8, // good for a profiling rate of 4096 bytes
-};
-
-struct AddrHash
-{
- AddrHash *next; // next in top-level hash table linked list
- uintptr addr; // addr>>20
- AddrEntry *dense[1<<AddrDenseBits];
-};
-
-struct AddrEntry
-{
- AddrEntry *next; // next in bottom-level linked list
- uint32 addr;
- Bucket *b;
-};
-
-static AddrHash **addrhash; // points to (AddrHash*)[1<<AddrHashBits]
-static AddrEntry *addrfree;
-static uintptr addrmem;
-
-// Multiplicative hash function:
-// hashMultiplier is the bottom 32 bits of int((sqrt(5)-1)/2 * (1<<32)).
-// This is a good multiplier as suggested in CLR, Knuth. The hash
-// value is taken to be the top AddrHashBits bits of the bottom 32 bits
-// of the multiplied value.
-enum {
- HashMultiplier = 2654435769U
-};
-
-// Set the bucket associated with addr to b.
-static void
-setaddrbucket(uintptr addr, Bucket *b)
-{
- int32 i;
- uint32 h;
- AddrHash *ah;
- AddrEntry *e;
-
- h = (uint32)((addr>>AddrHashShift)*HashMultiplier) >> (32-AddrHashBits);
- for(ah=addrhash[h]; ah; ah=ah->next)
- if(ah->addr == (addr>>AddrHashShift))
- goto found;
-
- ah = runtime·persistentalloc(sizeof *ah, 0, &mstats.buckhash_sys);
- addrmem += sizeof *ah;
- ah->next = addrhash[h];
- ah->addr = addr>>AddrHashShift;
- addrhash[h] = ah;
-
-found:
- if((e = addrfree) == nil) {
- e = runtime·persistentalloc(64*sizeof *e, 0, &mstats.buckhash_sys);
- addrmem += 64*sizeof *e;
- for(i=0; i+1<64; i++)
- e[i].next = &e[i+1];
- e[63].next = nil;
- }
- addrfree = e->next;
- e->addr = (uint32)~(addr & ((1<<AddrHashShift)-1));
- e->b = b;
- h = (addr>>(AddrHashShift-AddrDenseBits))&(nelem(ah->dense)-1); // entry in dense is top 8 bits of low 20.
- e->next = ah->dense[h];
- ah->dense[h] = e;
-}
-
-// Get the bucket associated with addr and clear the association.
-static Bucket*
-getaddrbucket(uintptr addr)
-{
- uint32 h;
- AddrHash *ah;
- AddrEntry *e, **l;
- Bucket *b;
-
- h = (uint32)((addr>>AddrHashShift)*HashMultiplier) >> (32-AddrHashBits);
- for(ah=addrhash[h]; ah; ah=ah->next)
- if(ah->addr == (addr>>AddrHashShift))
- goto found;
- return nil;
-
-found:
- h = (addr>>(AddrHashShift-AddrDenseBits))&(nelem(ah->dense)-1); // entry in dense is top 8 bits of low 20.
- for(l=&ah->dense[h]; (e=*l) != nil; l=&e->next) {
- if(e->addr == (uint32)~(addr & ((1<<AddrHashShift)-1))) {
- *l = e->next;
- b = e->b;
- e->next = addrfree;
- addrfree = e;
- return b;
- }
- }
- return nil;
-}
-
// Called by malloc to record a profiled block.
void
runtime·MProf_Malloc(void *p, uintptr size)
{
- int32 nstk;
uintptr stk[32];
Bucket *b;
+ int32 nstk;
- nstk = runtime·callers(1, stk, 32);
+ nstk = runtime·callers(1, stk, nelem(stk));
runtime·lock(&proflock);
- b = stkbucket(MProf, stk, nstk, true);
+ b = stkbucket(MProf, size, stk, nstk, true);
b->recent_allocs++;
b->recent_alloc_bytes += size;
- setaddrbucket((uintptr)p, b);
runtime·unlock(&proflock);
+
+ // Setprofilebucket locks a bunch of other mutexes, so we call it outside of proflock.
+ // This reduces potential contention and chances of deadlocks.
+ // Since the object must be alive during call to MProf_Malloc,
+ // it's fine to do this non-atomically.
+ runtime·setprofilebucket(p, b);
}
// Called when freeing a profiled block.
void
-runtime·MProf_Free(void *p, uintptr size)
+runtime·MProf_Free(Bucket *b, uintptr size, bool freed)
{
- Bucket *b;
-
runtime·lock(&proflock);
- b = getaddrbucket((uintptr)p);
- if(b != nil) {
+ if(freed) {
b->recent_frees++;
b->recent_free_bytes += size;
+ } else {
+ b->prev_frees++;
+ b->prev_free_bytes += size;
}
runtime·unlock(&proflock);
}
@@ -311,9 +238,9 @@ runtime·blockevent(int64 cycles, int32 skip)
if(rate <= 0 || (rate > cycles && runtime·fastrand1()%rate > cycles))
return;
- nstk = runtime·callers(skip, stk, 32);
+ nstk = runtime·callers(skip, stk, nelem(stk));
runtime·lock(&proflock);
- b = stkbucket(BProf, stk, nstk, true);
+ b = stkbucket(BProf, 0, stk, nstk, true);
b->count++;
b->cycles += cycles;
runtime·unlock(&proflock);
@@ -365,6 +292,7 @@ func MemProfile(p Slice, include_inuse_zero bool) (n int, ok bool) {
// garbage collection is disabled from the beginning of execution,
// accumulate stats as if a GC just happened, and recount buckets.
MProf_GC();
+ MProf_GC();
n = 0;
for(b=mbuckets; b; b=b->allnext)
if(include_inuse_zero || b->alloc_bytes != b->free_bytes)
@@ -381,6 +309,18 @@ func MemProfile(p Slice, include_inuse_zero bool) (n int, ok bool) {
runtime·unlock(&proflock);
}
+void
+runtime·iterate_memprof(void (*callback)(Bucket*, uintptr, uintptr*, uintptr, uintptr, uintptr))
+{
+ Bucket *b;
+
+ runtime·lock(&proflock);
+ for(b=mbuckets; b; b=b->allnext) {
+ callback(b, b->nstk, b->stk, b->size, b->allocs, b->frees);
+ }
+ runtime·unlock(&proflock);
+}
+
// Must match BlockProfileRecord in debug.go.
typedef struct BRecord BRecord;
struct BRecord {
@@ -483,7 +423,7 @@ saveg(uintptr pc, uintptr sp, G *gp, TRecord *r)
}
func GoroutineProfile(b Slice) (n int, ok bool) {
- uintptr pc, sp;
+ uintptr pc, sp, i;
TRecord *r;
G *gp;
@@ -502,7 +442,8 @@ func GoroutineProfile(b Slice) (n int, ok bool) {
ok = true;
r = (TRecord*)b.array;
saveg(pc, sp, g, r++);
- for(gp = runtime·allg; gp != nil; gp = gp->alllink) {
+ for(i = 0; i < runtime·allglen; i++) {
+ gp = runtime·allg[i];
if(gp == g || gp->status == Gdead)
continue;
saveg(~(uintptr)0, ~(uintptr)0, gp, r++);
@@ -515,8 +456,72 @@ func GoroutineProfile(b Slice) (n int, ok bool) {
}
}
+// Tracing of alloc/free/gc.
+
+static Lock tracelock;
+
+static int8*
+typeinfoname(int32 typeinfo)
+{
+ if(typeinfo == TypeInfo_SingleObject)
+ return "single object";
+ else if(typeinfo == TypeInfo_Array)
+ return "array";
+ else if(typeinfo == TypeInfo_Chan)
+ return "channel";
+ runtime·throw("typinfoname: unknown type info");
+ return nil;
+}
+
+void
+runtime·tracealloc(void *p, uintptr size, uintptr typ)
+{
+ int8 *name;
+ Type *type;
+
+ runtime·lock(&tracelock);
+ m->traceback = 2;
+ type = (Type*)(typ & ~3);
+ name = typeinfoname(typ & 3);
+ if(type == nil)
+ runtime·printf("tracealloc(%p, %p, %s)\n", p, size, name);
+ else
+ runtime·printf("tracealloc(%p, %p, %s of %S)\n", p, size, name, *type->string);
+ if(m->curg == nil || g == m->curg) {
+ runtime·goroutineheader(g);
+ runtime·traceback((uintptr)runtime·getcallerpc(&p), (uintptr)runtime·getcallersp(&p), 0, g);
+ } else {
+ runtime·goroutineheader(m->curg);
+ runtime·traceback(~(uintptr)0, ~(uintptr)0, 0, m->curg);
+ }
+ runtime·printf("\n");
+ m->traceback = 0;
+ runtime·unlock(&tracelock);
+}
+
+void
+runtime·tracefree(void *p, uintptr size)
+{
+ runtime·lock(&tracelock);
+ m->traceback = 2;
+ runtime·printf("tracefree(%p, %p)\n", p, size);
+ runtime·goroutineheader(g);
+ runtime·traceback((uintptr)runtime·getcallerpc(&p), (uintptr)runtime·getcallersp(&p), 0, g);
+ runtime·printf("\n");
+ m->traceback = 0;
+ runtime·unlock(&tracelock);
+}
+
void
-runtime·mprofinit(void)
+runtime·tracegc(void)
{
- addrhash = runtime·persistentalloc((1<<AddrHashBits)*sizeof *addrhash, 0, &mstats.buckhash_sys);
+ runtime·lock(&tracelock);
+ m->traceback = 2;
+ runtime·printf("tracegc()\n");
+ // running on m->g0 stack; show all non-g0 goroutines
+ runtime·tracebackothers(g);
+ runtime·printf("end tracegc\n");
+ runtime·printf("\n");
+ m->traceback = 0;
+ runtime·unlock(&tracelock);
}
diff --git a/src/pkg/runtime/msize.c b/src/pkg/runtime/msize.c
index 50b372b61..2fbd5e104 100644
--- a/src/pkg/runtime/msize.c
+++ b/src/pkg/runtime/msize.c
@@ -28,8 +28,11 @@
#include "runtime.h"
#include "arch_GOARCH.h"
#include "malloc.h"
+#include "../../cmd/ld/textflag.h"
+#pragma dataflag NOPTR
int32 runtime·class_to_size[NumSizeClasses];
+#pragma dataflag NOPTR
int32 runtime·class_to_allocnpages[NumSizeClasses];
// The SizeToClass lookup is implemented using two arrays,
@@ -41,11 +44,15 @@ int32 runtime·class_to_allocnpages[NumSizeClasses];
// size divided by 128 (rounded up). The arrays are filled in
// by InitSizes.
+#pragma dataflag NOPTR
int8 runtime·size_to_class8[1024/8 + 1];
+#pragma dataflag NOPTR
int8 runtime·size_to_class128[(MaxSmallSize-1024)/128 + 1];
-static int32
-SizeToClass(int32 size)
+void runtime·testdefersizes(void);
+
+int32
+runtime·SizeToClass(int32 size)
{
if(size > MaxSmallSize)
runtime·throw("SizeToClass - invalid size");
@@ -90,9 +97,9 @@ runtime·InitSizes(void)
// objects into the page, we might as well
// use just this size instead of having two
// different sizes.
- if(sizeclass > 1
- && npages == runtime·class_to_allocnpages[sizeclass-1]
- && allocsize/size == allocsize/runtime·class_to_size[sizeclass-1]) {
+ if(sizeclass > 1 &&
+ npages == runtime·class_to_allocnpages[sizeclass-1] &&
+ allocsize/size == allocsize/runtime·class_to_size[sizeclass-1]) {
runtime·class_to_size[sizeclass-1] = size;
continue;
}
@@ -119,7 +126,7 @@ runtime·InitSizes(void)
// Double-check SizeToClass.
if(0) {
for(n=0; n < MaxSmallSize; n++) {
- sizeclass = SizeToClass(n);
+ sizeclass = runtime·SizeToClass(n);
if(sizeclass < 1 || sizeclass >= NumSizeClasses || runtime·class_to_size[sizeclass] < n) {
runtime·printf("size=%d sizeclass=%d runtime·class_to_size=%d\n", n, sizeclass, runtime·class_to_size[sizeclass]);
runtime·printf("incorrect SizeToClass");
@@ -133,6 +140,8 @@ runtime·InitSizes(void)
}
}
+ runtime·testdefersizes();
+
// Copy out for statistics table.
for(i=0; i<nelem(runtime·class_to_size); i++)
mstats.by_size[i].size = runtime·class_to_size[i];
@@ -158,3 +167,18 @@ dump:
}
runtime·throw("InitSizes failed");
}
+
+// Returns size of the memory block that mallocgc will allocate if you ask for the size.
+uintptr
+runtime·roundupsize(uintptr size)
+{
+ if(size < MaxSmallSize) {
+ if(size <= 1024-8)
+ return runtime·class_to_size[runtime·size_to_class8[(size+7)>>3]];
+ else
+ return runtime·class_to_size[runtime·size_to_class128[(size-1024+127) >> 7]];
+ }
+ if(size + PageSize < size)
+ return size;
+ return ROUND(size, PageSize);
+}
diff --git a/src/pkg/runtime/netpoll.goc b/src/pkg/runtime/netpoll.goc
index d27bef167..7b3d16d02 100644
--- a/src/pkg/runtime/netpoll.goc
+++ b/src/pkg/runtime/netpoll.goc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build darwin dragonfly freebsd linux netbsd openbsd windows
+// +build darwin dragonfly freebsd linux nacl netbsd openbsd solaris windows
package net
@@ -19,21 +19,46 @@ package net
// An implementation must call the following function to denote that the pd is ready.
// void runtime·netpollready(G **gpp, PollDesc *pd, int32 mode);
+// PollDesc contains 2 binary semaphores, rg and wg, to park reader and writer
+// goroutines respectively. The semaphore can be in the following states:
+// READY - io readiness notification is pending;
+// a goroutine consumes the notification by changing the state to nil.
+// WAIT - a goroutine prepares to park on the semaphore, but not yet parked;
+// the goroutine commits to park by changing the state to G pointer,
+// or, alternatively, concurrent io notification changes the state to READY,
+// or, alternatively, concurrent timeout/close changes the state to nil.
+// G pointer - the goroutine is blocked on the semaphore;
+// io notification or timeout/close changes the state to READY or nil respectively
+// and unparks the goroutine.
+// nil - nothing of the above.
#define READY ((G*)1)
+#define WAIT ((G*)2)
+
+enum
+{
+ PollBlockSize = 4*1024,
+};
struct PollDesc
{
PollDesc* link; // in pollcache, protected by pollcache.Lock
+
+ // The lock protects pollOpen, pollSetDeadline, pollUnblock and deadlineimpl operations.
+ // This fully covers seq, rt and wt variables. fd is constant throughout the PollDesc lifetime.
+ // pollReset, pollWait, pollWaitCanceled and runtime·netpollready (IO rediness notification)
+ // proceed w/o taking the lock. So closing, rg, rd, wg and wd are manipulated
+ // in a lock-free way by all operations.
Lock; // protectes the following fields
uintptr fd;
bool closing;
uintptr seq; // protects from stale timers and ready notifications
- G* rg; // G waiting for read or READY (binary semaphore)
+ G* rg; // READY, WAIT, G waiting for read or nil
Timer rt; // read deadline timer (set if rt.fv != nil)
int64 rd; // read deadline
- G* wg; // the same for writes
- Timer wt;
- int64 wd;
+ G* wg; // READY, WAIT, G waiting for write or nil
+ Timer wt; // write deadline timer
+ int64 wd; // write deadline
+ void* user; // user settable cookie
};
static struct
@@ -47,7 +72,7 @@ static struct
// seq is incremented when deadlines are changed or descriptor is reused.
} pollcache;
-static bool netpollblock(PollDesc*, int32);
+static bool netpollblock(PollDesc*, int32, bool);
static G* netpollunblock(PollDesc*, int32, bool);
static void deadline(int64, Eface);
static void readDeadline(int64, Eface);
@@ -59,6 +84,11 @@ static FuncVal deadlineFn = {(void(*)(void))deadline};
static FuncVal readDeadlineFn = {(void(*)(void))readDeadline};
static FuncVal writeDeadlineFn = {(void(*)(void))writeDeadline};
+// runtimeNano returns the current value of the runtime clock in nanoseconds.
+func runtimeNano() (ns int64) {
+ ns = runtime·nanotime();
+}
+
func runtime_pollServerInit() {
runtime·netpollinit();
}
@@ -97,7 +127,6 @@ func runtime_pollClose(pd *PollDesc) {
}
func runtime_pollReset(pd *PollDesc, mode int) (err int) {
- runtime·lock(pd);
err = checkerr(pd, mode);
if(err)
goto ret;
@@ -106,14 +135,15 @@ func runtime_pollReset(pd *PollDesc, mode int) (err int) {
else if(mode == 'w')
pd->wg = nil;
ret:
- runtime·unlock(pd);
}
func runtime_pollWait(pd *PollDesc, mode int) (err int) {
- runtime·lock(pd);
err = checkerr(pd, mode);
if(err == 0) {
- while(!netpollblock(pd, mode)) {
+ // As for now only Solaris uses level-triggered IO.
+ if(Solaris)
+ runtime·netpollarm(pd, mode);
+ while(!netpollblock(pd, mode, false)) {
err = checkerr(pd, mode);
if(err != 0)
break;
@@ -122,15 +152,13 @@ func runtime_pollWait(pd *PollDesc, mode int) (err int) {
// Pretend it has not happened and retry.
}
}
- runtime·unlock(pd);
}
func runtime_pollWaitCanceled(pd *PollDesc, mode int) {
- runtime·lock(pd);
- // wait for ioready, ignore closing or timeouts.
- while(!netpollblock(pd, mode))
+ // This function is used only on windows after a failed attempt to cancel
+ // a pending async IO operation. Wait for ioready, ignore closing or timeouts.
+ while(!netpollblock(pd, mode, true))
;
- runtime·unlock(pd);
}
func runtime_pollSetDeadline(pd *PollDesc, d int64, mode int) {
@@ -185,7 +213,7 @@ func runtime_pollSetDeadline(pd *PollDesc, d int64, mode int) {
}
// If we set the new deadline in the past, unblock currently pending IO if any.
rg = nil;
- wg = nil;
+ runtime·atomicstorep(&wg, nil); // full memory barrier between stores to rd/wd and load of rg/wg in netpollunblock
if(pd->rd < 0)
rg = netpollunblock(pd, 'r', false);
if(pd->wd < 0)
@@ -205,6 +233,7 @@ func runtime_pollUnblock(pd *PollDesc) {
runtime·throw("runtime_pollUnblock: already closing");
pd->closing = true;
pd->seq++;
+ runtime·atomicstorep(&rg, nil); // full memory barrier between store to closing and read of rg/wg in netpollunblock
rg = netpollunblock(pd, 'r', false);
wg = netpollunblock(pd, 'w', false);
if(pd->rt.fv) {
@@ -228,6 +257,30 @@ runtime·netpollfd(PollDesc *pd)
return pd->fd;
}
+void**
+runtime·netpolluser(PollDesc *pd)
+{
+ return &pd->user;
+}
+
+bool
+runtime·netpollclosing(PollDesc *pd)
+{
+ return pd->closing;
+}
+
+void
+runtime·netpolllock(PollDesc *pd)
+{
+ runtime·lock(pd);
+}
+
+void
+runtime·netpollunlock(PollDesc *pd)
+{
+ runtime·unlock(pd);
+}
+
// make pd ready, newly runnable goroutines (if any) are enqueued info gpp list
void
runtime·netpollready(G **gpp, PollDesc *pd, int32 mode)
@@ -235,12 +288,10 @@ runtime·netpollready(G **gpp, PollDesc *pd, int32 mode)
G *rg, *wg;
rg = wg = nil;
- runtime·lock(pd);
if(mode == 'r' || mode == 'r'+'w')
rg = netpollunblock(pd, 'r', true);
if(mode == 'w' || mode == 'r'+'w')
wg = netpollunblock(pd, 'w', true);
- runtime·unlock(pd);
if(rg) {
rg->schedlink = *gpp;
*gpp = rg;
@@ -261,51 +312,75 @@ checkerr(PollDesc *pd, int32 mode)
return 0;
}
+static bool
+blockcommit(G *gp, G **gpp)
+{
+ return runtime·casp(gpp, WAIT, gp);
+}
+
// returns true if IO is ready, or false if timedout or closed
+// waitio - wait only for completed IO, ignore errors
static bool
-netpollblock(PollDesc *pd, int32 mode)
+netpollblock(PollDesc *pd, int32 mode, bool waitio)
{
- G **gpp;
+ G **gpp, *old;
gpp = &pd->rg;
if(mode == 'w')
gpp = &pd->wg;
- if(*gpp == READY) {
- *gpp = nil;
- return true;
+
+ // set the gpp semaphore to WAIT
+ for(;;) {
+ old = *gpp;
+ if(old == READY) {
+ *gpp = nil;
+ return true;
+ }
+ if(old != nil)
+ runtime·throw("netpollblock: double wait");
+ if(runtime·casp(gpp, nil, WAIT))
+ break;
}
- if(*gpp != nil)
- runtime·throw("netpollblock: double wait");
- *gpp = g;
- runtime·park(runtime·unlock, &pd->Lock, "IO wait");
- runtime·lock(pd);
- if(g->param)
- return true;
- return false;
+
+ // need to recheck error states after setting gpp to WAIT
+ // this is necessary because runtime_pollUnblock/runtime_pollSetDeadline/deadlineimpl
+ // do the opposite: store to closing/rd/wd, membarrier, load of rg/wg
+ if(waitio || checkerr(pd, mode) == 0)
+ runtime·park((bool(*)(G*, void*))blockcommit, gpp, "IO wait");
+ // be careful to not lose concurrent READY notification
+ old = runtime·xchgp(gpp, nil);
+ if(old > WAIT)
+ runtime·throw("netpollblock: corrupted state");
+ return old == READY;
}
static G*
netpollunblock(PollDesc *pd, int32 mode, bool ioready)
{
- G **gpp, *old;
+ G **gpp, *old, *new;
gpp = &pd->rg;
if(mode == 'w')
gpp = &pd->wg;
- if(*gpp == READY)
- return nil;
- if(*gpp == nil) {
- // Only set READY for ioready. runtime_pollWait
- // will check for timeout/cancel before waiting.
+
+ for(;;) {
+ old = *gpp;
+ if(old == READY)
+ return nil;
+ if(old == nil && !ioready) {
+ // Only set READY for ioready. runtime_pollWait
+ // will check for timeout/cancel before waiting.
+ return nil;
+ }
+ new = nil;
if(ioready)
- *gpp = READY;
- return nil;
+ new = READY;
+ if(runtime·casp(gpp, old, new))
+ break;
}
- old = *gpp;
- // pass unblock reason onto blocked g
- old->param = (void*)ioready;
- *gpp = nil;
- return old;
+ if(old > WAIT)
+ return old; // must be G*
+ return nil;
}
static void
@@ -331,14 +406,14 @@ deadlineimpl(int64 now, Eface arg, bool read, bool write)
if(pd->rd <= 0 || pd->rt.fv == nil)
runtime·throw("deadlineimpl: inconsistent read deadline");
pd->rd = -1;
- pd->rt.fv = nil;
+ runtime·atomicstorep(&pd->rt.fv, nil); // full memory barrier between store to rd and load of rg in netpollunblock
rg = netpollunblock(pd, 'r', false);
}
if(write) {
if(pd->wd <= 0 || (pd->wt.fv == nil && !read))
runtime·throw("deadlineimpl: inconsistent write deadline");
pd->wd = -1;
- pd->wt.fv = nil;
+ runtime·atomicstorep(&pd->wt.fv, nil); // full memory barrier between store to wd and load of wg in netpollunblock
wg = netpollunblock(pd, 'w', false);
}
runtime·unlock(pd);
@@ -374,7 +449,7 @@ allocPollDesc(void)
runtime·lock(&pollcache);
if(pollcache.first == nil) {
- n = PageSize/sizeof(*pd);
+ n = PollBlockSize/sizeof(*pd);
if(n == 0)
n = 1;
// Must be in non-GC memory because can be referenced
diff --git a/src/pkg/runtime/netpoll_epoll.c b/src/pkg/runtime/netpoll_epoll.c
index 885ac5e4d..9ea5e1a59 100644
--- a/src/pkg/runtime/netpoll_epoll.c
+++ b/src/pkg/runtime/netpoll_epoll.c
@@ -52,6 +52,13 @@ runtime·netpollclose(uintptr fd)
return -res;
}
+void
+runtime·netpollarm(PollDesc* pd, int32 mode)
+{
+ USED(pd, mode);
+ runtime·throw("unused");
+}
+
// polls for ready network connections
// returns list of goroutines that become runnable
G*
diff --git a/src/pkg/runtime/netpoll_kqueue.c b/src/pkg/runtime/netpoll_kqueue.c
index afc8d6859..171346cce 100644
--- a/src/pkg/runtime/netpoll_kqueue.c
+++ b/src/pkg/runtime/netpoll_kqueue.c
@@ -59,6 +59,13 @@ runtime·netpollclose(uintptr fd)
return 0;
}
+void
+runtime·netpollarm(PollDesc* pd, int32 mode)
+{
+ USED(pd, mode);
+ runtime·throw("unused");
+}
+
// Polls for ready network connections.
// Returns list of goroutines that become runnable.
G*
diff --git a/src/pkg/runtime/netpoll_nacl.c b/src/pkg/runtime/netpoll_nacl.c
new file mode 100644
index 000000000..b75753a23
--- /dev/null
+++ b/src/pkg/runtime/netpoll_nacl.c
@@ -0,0 +1,37 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "runtime.h"
+#include "defs_GOOS_GOARCH.h"
+#include "os_GOOS.h"
+
+// Fake network poller for NaCl.
+// Should never be used, because NaCl network connections do not honor "SetNonblock".
+
+void
+runtime·netpollinit(void)
+{
+}
+
+int32
+runtime·netpollopen(uintptr fd, PollDesc *pd)
+{
+ USED(fd);
+ USED(pd);
+ return 0;
+}
+
+int32
+runtime·netpollclose(uintptr fd)
+{
+ USED(fd);
+ return 0;
+}
+
+G*
+runtime·netpoll(bool block)
+{
+ USED(block);
+ return nil;
+}
diff --git a/src/pkg/runtime/netpoll_solaris.c b/src/pkg/runtime/netpoll_solaris.c
new file mode 100644
index 000000000..a2631a8ab
--- /dev/null
+++ b/src/pkg/runtime/netpoll_solaris.c
@@ -0,0 +1,268 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "runtime.h"
+#include "arch_GOARCH.h"
+#include "defs_GOOS_GOARCH.h"
+#include "os_GOOS.h"
+
+// Solaris runtime-integrated network poller.
+//
+// Solaris uses event ports for scalable network I/O. Event
+// ports are level-triggered, unlike epoll and kqueue which
+// can be configured in both level-triggered and edge-triggered
+// mode. Level triggering means we have to keep track of a few things
+// ourselves. After we receive an event for a file descriptor,
+// it's our responsibility to ask again to be notified for future
+// events for that descriptor. When doing this we must keep track of
+// what kind of events the goroutines are currently interested in,
+// for example a fd may be open both for reading and writing.
+//
+// A description of the high level operation of this code
+// follows. Networking code will get a file descriptor by some means
+// and will register it with the netpolling mechanism by a code path
+// that eventually calls runtime·netpollopen. runtime·netpollopen
+// calls port_associate with an empty event set. That means that we
+// will not receive any events at this point. The association needs
+// to be done at this early point because we need to process the I/O
+// readiness notification at some point in the future. If I/O becomes
+// ready when nobody is listening, when we finally care about it,
+// nobody will tell us anymore.
+//
+// Beside calling runtime·netpollopen, the networking code paths
+// will call runtime·netpollarm each time goroutines are interested
+// in doing network I/O. Because now we know what kind of I/O we
+// are interested in (reading/writting), we can call port_associate
+// passing the correct type of event set (POLLIN/POLLOUT). As we made
+// sure to have already associated the file descriptor with the port,
+// when we now call port_associate, we will unblock the main poller
+// loop (in runtime·netpoll) right away if the socket is actually
+// ready for I/O.
+//
+// The main poller loop runs in its own thread waiting for events
+// using port_getn. When an event happens, it will tell the scheduler
+// about it using runtime·netpollready. Besides doing this, it must
+// also re-associate the events that were not part of this current
+// notification with the file descriptor. Failing to do this would
+// mean each notification will prevent concurrent code using the
+// same file descriptor in parallel.
+//
+// The logic dealing with re-associations is encapsulated in
+// runtime·netpollupdate. This function takes care to associate the
+// descriptor only with the subset of events that were previously
+// part of the association, except the one that just happened. We
+// can't re-associate with that right away, because event ports
+// are level triggered so it would cause a busy loop. Instead, that
+// association is effected only by the runtime·netpollarm code path,
+// when Go code actually asks for I/O.
+//
+// The open and arming mechanisms are serialized using the lock
+// inside PollDesc. This is required because the netpoll loop runs
+// asynchonously in respect to other Go code and by the time we get
+// to call port_associate to update the association in the loop, the
+// file descriptor might have been closed and reopened already. The
+// lock allows runtime·netpollupdate to be called synchronously from
+// the loop thread while preventing other threads operating to the
+// same PollDesc, so once we unblock in the main loop, until we loop
+// again we know for sure we are always talking about the same file
+// descriptor and can safely access the data we want (the event set).
+
+#pragma dynimport libc·fcntl fcntl "libc.so"
+#pragma dynimport libc·port_create port_create "libc.so"
+#pragma dynimport libc·port_associate port_associate "libc.so"
+#pragma dynimport libc·port_dissociate port_dissociate "libc.so"
+#pragma dynimport libc·port_getn port_getn "libc.so"
+extern uintptr libc·fcntl;
+extern uintptr libc·port_create;
+extern uintptr libc·port_associate;
+extern uintptr libc·port_dissociate;
+extern uintptr libc·port_getn;
+
+#define errno (*m->perrno)
+
+int32
+runtime·fcntl(int32 fd, int32 cmd, uintptr arg)
+{
+ return runtime·sysvicall6(libc·fcntl, 3,
+ (uintptr)fd, (uintptr)cmd, (uintptr)arg);
+}
+
+int32
+runtime·port_create(void)
+{
+ return runtime·sysvicall6(libc·port_create, 0);
+}
+
+int32
+runtime·port_associate(int32 port, int32 source, uintptr object, int32 events, uintptr user)
+{
+ return runtime·sysvicall6(libc·port_associate,
+ 5, (uintptr)port, (uintptr)source, object, (uintptr)events, user);
+}
+
+int32
+runtime·port_dissociate(int32 port, int32 source, uintptr object)
+{
+ return runtime·sysvicall6(libc·port_dissociate,
+ 3, (uintptr)port, (uintptr)source, object);
+}
+
+int32
+runtime·port_getn(int32 port, PortEvent *evs, uint32 max, uint32 *nget, Timespec *timeout)
+{
+ return runtime·sysvicall6(libc·port_getn, 5, (uintptr)port,
+ (uintptr)evs, (uintptr)max, (uintptr)nget, (uintptr)timeout);
+}
+
+static int32 portfd = -1;
+
+void
+runtime·netpollinit(void)
+{
+ if((portfd = runtime·port_create()) >= 0) {
+ runtime·fcntl(portfd, F_SETFD, FD_CLOEXEC);
+ return;
+ }
+
+ runtime·printf("netpollinit: failed to create port (%d)\n", errno);
+ runtime·throw("netpollinit: failed to create port");
+}
+
+int32
+runtime·netpollopen(uintptr fd, PollDesc *pd)
+{
+ int32 r;
+
+ runtime·netpolllock(pd);
+ // We don't register for any specific type of events yet, that's
+ // netpollarm's job. We merely ensure we call port_associate before
+ // asynchonous connect/accept completes, so when we actually want
+ // to do any I/O, the call to port_associate (from netpollarm,
+ // with the interested event set) will unblock port_getn right away
+ // because of the I/O readiness notification.
+ *runtime·netpolluser(pd) = 0;
+ r = runtime·port_associate(portfd, PORT_SOURCE_FD, fd, 0, (uintptr)pd);
+ runtime·netpollunlock(pd);
+ return r;
+}
+
+int32
+runtime·netpollclose(uintptr fd)
+{
+ return runtime·port_dissociate(portfd, PORT_SOURCE_FD, fd);
+}
+
+// Updates the association with a new set of interested events. After
+// this call, port_getn will return one and only one event for that
+// particular descriptor, so this function needs to be called again.
+void
+runtime·netpollupdate(PollDesc* pd, uint32 set, uint32 clear)
+{
+ uint32 *ep, old, events;
+ uintptr fd = runtime·netpollfd(pd);
+ ep = (uint32*)runtime·netpolluser(pd);
+
+ if(runtime·netpollclosing(pd))
+ return;
+
+ old = *ep;
+ events = (old & ~clear) | set;
+ if(old == events)
+ return;
+
+ if(events && runtime·port_associate(portfd, PORT_SOURCE_FD, fd, events, (uintptr)pd) != 0) {
+ runtime·printf("netpollupdate: failed to associate (%d)\n", errno);
+ runtime·throw("netpollupdate: failed to associate");
+ }
+ *ep = events;
+}
+
+// subscribe the fd to the port such that port_getn will return one event.
+void
+runtime·netpollarm(PollDesc* pd, int32 mode)
+{
+ runtime·netpolllock(pd);
+ switch(mode) {
+ case 'r':
+ runtime·netpollupdate(pd, POLLIN, 0);
+ break;
+ case 'w':
+ runtime·netpollupdate(pd, POLLOUT, 0);
+ break;
+ default:
+ runtime·throw("netpollarm: bad mode");
+ }
+ runtime·netpollunlock(pd);
+}
+
+// polls for ready network connections
+// returns list of goroutines that become runnable
+G*
+runtime·netpoll(bool block)
+{
+ static int32 lasterr;
+ PortEvent events[128], *ev;
+ PollDesc *pd;
+ int32 i, mode, clear;
+ uint32 n;
+ Timespec *wait = nil, zero;
+ G *gp;
+
+ if(portfd == -1)
+ return (nil);
+
+ if(!block) {
+ zero.tv_sec = 0;
+ zero.tv_nsec = 0;
+ wait = &zero;
+ }
+
+retry:
+ n = 1;
+ if(runtime·port_getn(portfd, events, nelem(events), &n, wait) < 0) {
+ if(errno != EINTR && errno != lasterr) {
+ lasterr = errno;
+ runtime·printf("runtime: port_getn on fd %d failed with %d\n", portfd, errno);
+ }
+ goto retry;
+ }
+
+ gp = nil;
+ for(i = 0; i < n; i++) {
+ ev = &events[i];
+
+ if(ev->portev_events == 0)
+ continue;
+ pd = (PollDesc *)ev->portev_user;
+
+ mode = 0;
+ clear = 0;
+ if(ev->portev_events & (POLLIN|POLLHUP|POLLERR)) {
+ mode += 'r';
+ clear |= POLLIN;
+ }
+ if(ev->portev_events & (POLLOUT|POLLHUP|POLLERR)) {
+ mode += 'w';
+ clear |= POLLOUT;
+ }
+ // To effect edge-triggered events, we need to be sure to
+ // update our association with whatever events were not
+ // set with the event. For example if we are registered
+ // for POLLIN|POLLOUT, and we get POLLIN, besides waking
+ // the goroutine interested in POLLIN we have to not forget
+ // about the one interested in POLLOUT.
+ if(clear != 0) {
+ runtime·netpolllock(pd);
+ runtime·netpollupdate(pd, 0, clear);
+ runtime·netpollunlock(pd);
+ }
+
+ if(mode)
+ runtime·netpollready(&gp, pd, mode);
+ }
+
+ if(block && gp == nil)
+ goto retry;
+ return gp;
+}
diff --git a/src/pkg/runtime/netpoll_windows.c b/src/pkg/runtime/netpoll_windows.c
index b510a41e2..f3cd15c7a 100644
--- a/src/pkg/runtime/netpoll_windows.c
+++ b/src/pkg/runtime/netpoll_windows.c
@@ -72,6 +72,13 @@ runtime·netpollclose(uintptr fd)
return 0;
}
+void
+runtime·netpollarm(PollDesc* pd, int32 mode)
+{
+ USED(pd, mode);
+ runtime·throw("unused");
+}
+
// Polls for completed network IO.
// Returns list of goroutines that become runnable.
G*
@@ -94,13 +101,17 @@ retry:
n = nelem(entries) / runtime·gomaxprocs;
if(n < 8)
n = 8;
+ if(block)
+ m->blocked = true;
if(runtime·stdcall(runtime·GetQueuedCompletionStatusEx, 6, iocphandle, entries, (uintptr)n, &n, (uintptr)wait, (uintptr)0) == 0) {
+ m->blocked = false;
errno = runtime·getlasterror();
if(!block && errno == WAIT_TIMEOUT)
return nil;
runtime·printf("netpoll: GetQueuedCompletionStatusEx failed (errno=%d)\n", errno);
runtime·throw("netpoll: GetQueuedCompletionStatusEx failed");
}
+ m->blocked = false;
for(i = 0; i < n; i++) {
op = entries[i].op;
errno = 0;
@@ -113,7 +124,10 @@ retry:
op = nil;
errno = 0;
qty = 0;
+ if(block)
+ m->blocked = true;
if(runtime·stdcall(runtime·GetQueuedCompletionStatus, 5, iocphandle, &qty, &key, &op, (uintptr)wait) == 0) {
+ m->blocked = false;
errno = runtime·getlasterror();
if(!block && errno == WAIT_TIMEOUT)
return nil;
@@ -123,6 +137,7 @@ retry:
}
// dequeued failed IO packet, so report that
}
+ m->blocked = false;
handlecompletion(&gp, op, errno, qty);
}
if(block && gp == nil)
diff --git a/src/pkg/runtime/norace_test.go b/src/pkg/runtime/norace_test.go
index a3d5b0086..3b171877a 100644
--- a/src/pkg/runtime/norace_test.go
+++ b/src/pkg/runtime/norace_test.go
@@ -9,7 +9,6 @@ package runtime_test
import (
"runtime"
- "sync/atomic"
"testing"
)
@@ -31,28 +30,17 @@ func BenchmarkSyscallExcessWork(b *testing.B) {
}
func benchmarkSyscall(b *testing.B, work, excess int) {
- const CallsPerSched = 1000
- procs := runtime.GOMAXPROCS(-1) * excess
- N := int32(b.N / CallsPerSched)
- c := make(chan bool, procs)
- for p := 0; p < procs; p++ {
- go func() {
- foo := 42
- for atomic.AddInt32(&N, -1) >= 0 {
- runtime.Gosched()
- for g := 0; g < CallsPerSched; g++ {
- runtime.Entersyscall()
- for i := 0; i < work; i++ {
- foo *= 2
- foo /= 2
- }
- runtime.Exitsyscall()
- }
+ b.SetParallelism(excess)
+ b.RunParallel(func(pb *testing.PB) {
+ foo := 42
+ for pb.Next() {
+ runtime.Entersyscall()
+ for i := 0; i < work; i++ {
+ foo *= 2
+ foo /= 2
}
- c <- foo == 42
- }()
- }
- for p := 0; p < procs; p++ {
- <-c
- }
+ runtime.Exitsyscall()
+ }
+ _ = foo
+ })
}
diff --git a/src/pkg/runtime/os_darwin.c b/src/pkg/runtime/os_darwin.c
index 9eb1b4626..33a2df958 100644
--- a/src/pkg/runtime/os_darwin.c
+++ b/src/pkg/runtime/os_darwin.c
@@ -59,6 +59,7 @@ runtime·osinit(void)
void
runtime·get_random_data(byte **rnd, int32 *rnd_len)
{
+ #pragma dataflag NOPTR
static byte urandom_data[HashRandomBytes];
int32 fd;
fd = runtime·open("/dev/urandom", 0 /* O_RDONLY */, 0);
@@ -434,9 +435,12 @@ runtime·mach_semrelease(uint32 sem)
void
runtime·sigpanic(void)
{
+ if(!runtime·canpanic(g))
+ runtime·throw("unexpected signal during runtime execution");
+
switch(g->sig) {
case SIGBUS:
- if(g->sigcode0 == BUS_ADRERR && g->sigcode1 < 0x1000) {
+ if(g->sigcode0 == BUS_ADRERR && g->sigcode1 < 0x1000 || g->paniconfault) {
if(g->sigpc == 0)
runtime·panicstring("call of nil func value");
runtime·panicstring("invalid memory address or nil pointer dereference");
@@ -444,7 +448,7 @@ runtime·sigpanic(void)
runtime·printf("unexpected fault address %p\n", g->sigcode1);
runtime·throw("fault");
case SIGSEGV:
- if((g->sigcode0 == 0 || g->sigcode0 == SEGV_MAPERR || g->sigcode0 == SEGV_ACCERR) && g->sigcode1 < 0x1000) {
+ if((g->sigcode0 == 0 || g->sigcode0 == SEGV_MAPERR || g->sigcode0 == SEGV_ACCERR) && g->sigcode1 < 0x1000 || g->paniconfault) {
if(g->sigpc == 0)
runtime·panicstring("call of nil func value");
runtime·panicstring("invalid memory address or nil pointer dereference");
@@ -517,3 +521,9 @@ runtime·signalstack(byte *p, int32 n)
st.ss_flags = SS_DISABLE;
runtime·sigaltstack(&st, nil);
}
+
+void
+runtime·unblocksignals(void)
+{
+ runtime·sigprocmask(SIG_SETMASK, &sigset_none, nil);
+}
diff --git a/src/pkg/runtime/os_darwin.h b/src/pkg/runtime/os_darwin.h
index b4f49e023..91a405f21 100644
--- a/src/pkg/runtime/os_darwin.h
+++ b/src/pkg/runtime/os_darwin.h
@@ -23,6 +23,7 @@ int32 runtime·sysctl(uint32*, uint32, byte*, uintptr*, byte*, uintptr);
typedef uint32 Sigset;
void runtime·sigprocmask(int32, Sigset*, Sigset*);
+void runtime·unblocksignals(void);
struct Sigaction;
void runtime·sigaction(uintptr, struct Sigaction*, struct Sigaction*);
@@ -39,4 +40,3 @@ void runtime·setitimer(int32, Itimerval*, Itimerval*);
#define SIG_BLOCK 1
#define SIG_UNBLOCK 2
#define SIG_SETMASK 3
-
diff --git a/src/pkg/runtime/os_dragonfly.c b/src/pkg/runtime/os_dragonfly.c
index cf427b78c..e7fd2cc06 100644
--- a/src/pkg/runtime/os_dragonfly.c
+++ b/src/pkg/runtime/os_dragonfly.c
@@ -122,6 +122,7 @@ runtime·osinit(void)
void
runtime·get_random_data(byte **rnd, int32 *rnd_len)
{
+ #pragma dataflag NOPTR
static byte urandom_data[HashRandomBytes];
int32 fd;
fd = runtime·open("/dev/urandom", 0 /* O_RDONLY */, 0);
@@ -169,9 +170,12 @@ runtime·unminit(void)
void
runtime·sigpanic(void)
{
+ if(!runtime·canpanic(g))
+ runtime·throw("unexpected signal during runtime execution");
+
switch(g->sig) {
case SIGBUS:
- if(g->sigcode0 == BUS_ADRERR && g->sigcode1 < 0x1000) {
+ if(g->sigcode0 == BUS_ADRERR && g->sigcode1 < 0x1000 || g->paniconfault) {
if(g->sigpc == 0)
runtime·panicstring("call of nil func value");
runtime·panicstring("invalid memory address or nil pointer dereference");
@@ -179,7 +183,7 @@ runtime·sigpanic(void)
runtime·printf("unexpected fault address %p\n", g->sigcode1);
runtime·throw("fault");
case SIGSEGV:
- if((g->sigcode0 == 0 || g->sigcode0 == SEGV_MAPERR || g->sigcode0 == SEGV_ACCERR) && g->sigcode1 < 0x1000) {
+ if((g->sigcode0 == 0 || g->sigcode0 == SEGV_MAPERR || g->sigcode0 == SEGV_ACCERR) && g->sigcode1 < 0x1000 || g->paniconfault) {
if(g->sigpc == 0)
runtime·panicstring("call of nil func value");
runtime·panicstring("invalid memory address or nil pointer dereference");
@@ -280,3 +284,9 @@ runtime·signalstack(byte *p, int32 n)
st.ss_flags = SS_DISABLE;
runtime·sigaltstack(&st, nil);
}
+
+void
+runtime·unblocksignals(void)
+{
+ runtime·sigprocmask(&sigset_none, nil);
+}
diff --git a/src/pkg/runtime/os_dragonfly.h b/src/pkg/runtime/os_dragonfly.h
index ebbd0eb15..fddeede85 100644
--- a/src/pkg/runtime/os_dragonfly.h
+++ b/src/pkg/runtime/os_dragonfly.h
@@ -12,6 +12,7 @@ void runtime·sigaltstack(Sigaltstack*, Sigaltstack*);
struct sigaction;
void runtime·sigaction(int32, struct sigaction*, struct sigaction*);
void runtime·sigprocmask(Sigset *, Sigset *);
+void runtime·unblocksignals(void);
void runtime·setitimer(int32, Itimerval*, Itimerval*);
int32 runtime·sysctl(uint32*, uint32, byte*, uintptr*, byte*, uintptr);
diff --git a/src/pkg/runtime/os_freebsd.c b/src/pkg/runtime/os_freebsd.c
index 042097bdd..02b13472c 100644
--- a/src/pkg/runtime/os_freebsd.c
+++ b/src/pkg/runtime/os_freebsd.c
@@ -50,7 +50,7 @@ runtime·futexsleep(uint32 *addr, uint32 val, int64 ns)
Timespec ts;
if(ns < 0) {
- ret = runtime·sys_umtx_op(addr, UMTX_OP_WAIT_UINT, val, nil, nil);
+ ret = runtime·sys_umtx_op(addr, UMTX_OP_WAIT_UINT_PRIVATE, val, nil, nil);
if(ret >= 0 || ret == -EINTR)
return;
goto fail;
@@ -58,7 +58,7 @@ runtime·futexsleep(uint32 *addr, uint32 val, int64 ns)
// NOTE: tv_nsec is int64 on amd64, so this assumes a little-endian system.
ts.tv_nsec = 0;
ts.tv_sec = runtime·timediv(ns, 1000000000, (int32*)&ts.tv_nsec);
- ret = runtime·sys_umtx_op(addr, UMTX_OP_WAIT_UINT, val, nil, &ts);
+ ret = runtime·sys_umtx_op(addr, UMTX_OP_WAIT_UINT_PRIVATE, val, nil, &ts);
if(ret >= 0 || ret == -EINTR)
return;
@@ -78,7 +78,7 @@ runtime·futexwakeup(uint32 *addr, uint32 cnt)
{
int32 ret;
- ret = runtime·sys_umtx_op(addr, UMTX_OP_WAKE, cnt, nil, nil);
+ ret = runtime·sys_umtx_op(addr, UMTX_OP_WAKE_PRIVATE, cnt, nil, nil);
if(ret >= 0)
return;
@@ -130,6 +130,7 @@ runtime·osinit(void)
void
runtime·get_random_data(byte **rnd, int32 *rnd_len)
{
+ #pragma dataflag NOPTR
static byte urandom_data[HashRandomBytes];
int32 fd;
fd = runtime·open("/dev/urandom", 0 /* O_RDONLY */, 0);
@@ -177,9 +178,12 @@ runtime·unminit(void)
void
runtime·sigpanic(void)
{
+ if(!runtime·canpanic(g))
+ runtime·throw("unexpected signal during runtime execution");
+
switch(g->sig) {
case SIGBUS:
- if(g->sigcode0 == BUS_ADRERR && g->sigcode1 < 0x1000) {
+ if(g->sigcode0 == BUS_ADRERR && g->sigcode1 < 0x1000 || g->paniconfault) {
if(g->sigpc == 0)
runtime·panicstring("call of nil func value");
runtime·panicstring("invalid memory address or nil pointer dereference");
@@ -187,7 +191,7 @@ runtime·sigpanic(void)
runtime·printf("unexpected fault address %p\n", g->sigcode1);
runtime·throw("fault");
case SIGSEGV:
- if((g->sigcode0 == 0 || g->sigcode0 == SEGV_MAPERR || g->sigcode0 == SEGV_ACCERR) && g->sigcode1 < 0x1000) {
+ if((g->sigcode0 == 0 || g->sigcode0 == SEGV_MAPERR || g->sigcode0 == SEGV_ACCERR) && g->sigcode1 < 0x1000 || g->paniconfault) {
if(g->sigpc == 0)
runtime·panicstring("call of nil func value");
runtime·panicstring("invalid memory address or nil pointer dereference");
@@ -288,3 +292,9 @@ runtime·signalstack(byte *p, int32 n)
st.ss_flags = SS_DISABLE;
runtime·sigaltstack(&st, nil);
}
+
+void
+runtime·unblocksignals(void)
+{
+ runtime·sigprocmask(&sigset_none, nil);
+}
diff --git a/src/pkg/runtime/os_freebsd.h b/src/pkg/runtime/os_freebsd.h
index c1853e65d..4b2c25330 100644
--- a/src/pkg/runtime/os_freebsd.h
+++ b/src/pkg/runtime/os_freebsd.h
@@ -12,6 +12,7 @@ void runtime·sigaltstack(Sigaltstack*, Sigaltstack*);
struct sigaction;
void runtime·sigaction(int32, struct sigaction*, struct sigaction*);
void runtime·sigprocmask(Sigset *, Sigset *);
+void runtime·unblocksignals(void);
void runtime·setitimer(int32, Itimerval*, Itimerval*);
int32 runtime·sysctl(uint32*, uint32, byte*, uintptr*, byte*, uintptr);
diff --git a/src/pkg/runtime/os_linux.c b/src/pkg/runtime/os_linux.c
index cb45fe8ce..8a945242b 100644
--- a/src/pkg/runtime/os_linux.c
+++ b/src/pkg/runtime/os_linux.c
@@ -218,9 +218,12 @@ runtime·unminit(void)
void
runtime·sigpanic(void)
{
+ if(!runtime·canpanic(g))
+ runtime·throw("unexpected signal during runtime execution");
+
switch(g->sig) {
case SIGBUS:
- if(g->sigcode0 == BUS_ADRERR && g->sigcode1 < 0x1000) {
+ if(g->sigcode0 == BUS_ADRERR && g->sigcode1 < 0x1000 || g->paniconfault) {
if(g->sigpc == 0)
runtime·panicstring("call of nil func value");
runtime·panicstring("invalid memory address or nil pointer dereference");
@@ -228,7 +231,7 @@ runtime·sigpanic(void)
runtime·printf("unexpected fault address %p\n", g->sigcode1);
runtime·throw("fault");
case SIGSEGV:
- if((g->sigcode0 == 0 || g->sigcode0 == SEGV_MAPERR || g->sigcode0 == SEGV_ACCERR) && g->sigcode1 < 0x1000) {
+ if((g->sigcode0 == 0 || g->sigcode0 == SEGV_MAPERR || g->sigcode0 == SEGV_ACCERR) && g->sigcode1 < 0x1000 || g->paniconfault) {
if(g->sigpc == 0)
runtime·panicstring("call of nil func value");
runtime·panicstring("invalid memory address or nil pointer dereference");
@@ -331,3 +334,9 @@ runtime·signalstack(byte *p, int32 n)
st.ss_flags = SS_DISABLE;
runtime·sigaltstack(&st, nil);
}
+
+void
+runtime·unblocksignals(void)
+{
+ runtime·rtsigprocmask(SIG_SETMASK, &sigset_none, nil, sizeof sigset_none);
+}
diff --git a/src/pkg/runtime/os_linux.h b/src/pkg/runtime/os_linux.h
index b2d3f6f2a..d4b1902c3 100644
--- a/src/pkg/runtime/os_linux.h
+++ b/src/pkg/runtime/os_linux.h
@@ -28,6 +28,7 @@ struct Sigset
uint32 mask[2];
};
void runtime·rtsigprocmask(int32, Sigset*, Sigset*, int32);
+void runtime·unblocksignals(void);
#define SIG_SETMASK 2
#define RLIMIT_AS 9
diff --git a/src/pkg/runtime/os_linux_arm.c b/src/pkg/runtime/os_linux_arm.c
index 570b3f0be..aad08b989 100644
--- a/src/pkg/runtime/os_linux_arm.c
+++ b/src/pkg/runtime/os_linux_arm.c
@@ -16,7 +16,7 @@
static uint32 runtime·randomNumber;
uint8 runtime·armArch = 6; // we default to ARMv6
uint32 runtime·hwcap; // set by setup_auxv
-uint8 runtime·goarm; // set by 5l
+extern uint8 runtime·goarm; // set by 5l
void
runtime·checkgoarm(void)
diff --git a/src/pkg/runtime/os_nacl.c b/src/pkg/runtime/os_nacl.c
new file mode 100644
index 000000000..3196e2ce3
--- /dev/null
+++ b/src/pkg/runtime/os_nacl.c
@@ -0,0 +1,278 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "runtime.h"
+#include "defs_GOOS_GOARCH.h"
+#include "os_GOOS.h"
+#include "arch_GOARCH.h"
+#include "../../cmd/ld/textflag.h"
+#include "stack.h"
+
+int8 *goos = "nacl";
+extern SigTab runtime·sigtab[];
+
+void runtime·sigtramp(void);
+
+// Called to initialize a new m (including the bootstrap m).
+// Called on the parent thread (main thread in case of bootstrap), can allocate memory.
+void
+runtime·mpreinit(M *mp)
+{
+ mp->gsignal = runtime·malg(32*1024); // OS X wants >=8K, Linux >=2K
+}
+
+// Called to initialize a new m (including the bootstrap m).
+// Called on the new thread, can not allocate memory.
+void
+runtime·minit(void)
+{
+ int32 ret;
+
+ // Initialize signal handling
+ ret = runtime·nacl_exception_stack((byte*)m->gsignal->stackguard - StackGuard, 32*1024);
+ if(ret < 0)
+ runtime·printf("runtime: nacl_exception_stack: error %d\n", -ret);
+
+ ret = runtime·nacl_exception_handler(runtime·sigtramp, nil);
+ if(ret < 0)
+ runtime·printf("runtime: nacl_exception_handler: error %d\n", -ret);
+}
+
+// Called from dropm to undo the effect of an minit.
+void
+runtime·unminit(void)
+{
+}
+
+int8 runtime·sigtrampf[] = "runtime: signal at PC=%X AX=%X CX=%X DX=%X BX=%X DI=%X R15=%X *SP=%X\n";
+int8 runtime·sigtrampp[] = "runtime: sigtramp";
+
+extern byte runtime·tls0[];
+
+void
+runtime·osinit(void)
+{
+ runtime·ncpu = 1;
+ m->procid = 2;
+//runtime·nacl_exception_handler(runtime·sigtramp, nil);
+}
+
+void
+runtime·crash(void)
+{
+ *(int32*)0 = 0;
+}
+
+void
+runtime·get_random_data(byte **rnd, int32 *rnd_len)
+{
+ *rnd = nil;
+ *rnd_len = 0;
+}
+
+void
+runtime·goenvs(void)
+{
+ runtime·goenvs_unix();
+}
+
+void
+runtime·initsig(void)
+{
+}
+
+#pragma textflag NOSPLIT
+void
+runtime·usleep(uint32 us)
+{
+ Timespec ts;
+
+ ts.tv_sec = us/1000000;
+ ts.tv_nsec = (us%1000000)*1000;
+ runtime·nacl_nanosleep(&ts, nil);
+}
+
+void runtime·mstart_nacl(void);
+
+void
+runtime·newosproc(M *mp, void *stk)
+{
+ int32 ret;
+ void **tls;
+
+ tls = (void**)mp->tls;
+ tls[0] = mp->g0;
+ tls[1] = mp;
+ ret = runtime·nacl_thread_create(runtime·mstart_nacl, stk, tls+2, 0);
+ if(ret < 0) {
+ runtime·printf("nacl_thread_create: error %d\n", -ret);
+ runtime·throw("newosproc");
+ }
+}
+
+uintptr
+runtime·semacreate(void)
+{
+ int32 mu, cond;
+
+ mu = runtime·nacl_mutex_create(0);
+ if(mu < 0) {
+ runtime·printf("nacl_mutex_create: error %d\n", -mu);
+ runtime·throw("semacreate");
+ }
+ cond = runtime·nacl_cond_create(0);
+ if(cond < 0) {
+ runtime·printf("nacl_cond_create: error %d\n", -cond);
+ runtime·throw("semacreate");
+ }
+ m->waitsemalock = mu;
+ return cond; // assigned to m->waitsema
+}
+
+#pragma textflag NOSPLIT
+int32
+runtime·semasleep(int64 ns)
+{
+ int32 ret;
+
+ ret = runtime·nacl_mutex_lock(m->waitsemalock);
+ if(ret < 0) {
+ //runtime·printf("nacl_mutex_lock: error %d\n", -ret);
+ runtime·throw("semasleep");
+ }
+ if(m->waitsemacount > 0) {
+ m->waitsemacount = 0;
+ runtime·nacl_mutex_unlock(m->waitsemalock);
+ return 0;
+ }
+
+ while(m->waitsemacount == 0) {
+ if(ns < 0) {
+ ret = runtime·nacl_cond_wait(m->waitsema, m->waitsemalock);
+ if(ret < 0) {
+ //runtime·printf("nacl_cond_wait: error %d\n", -ret);
+ runtime·throw("semasleep");
+ }
+ } else {
+ Timespec ts;
+
+ ns += runtime·nanotime();
+ ts.tv_sec = runtime·timediv(ns, 1000000000, (int32*)&ts.tv_nsec);
+ ret = runtime·nacl_cond_timed_wait_abs(m->waitsema, m->waitsemalock, &ts);
+ if(ret == -ETIMEDOUT) {
+ runtime·nacl_mutex_unlock(m->waitsemalock);
+ return -1;
+ }
+ if(ret < 0) {
+ //runtime·printf("nacl_cond_timed_wait_abs: error %d\n", -ret);
+ runtime·throw("semasleep");
+ }
+ }
+ }
+
+ m->waitsemacount = 0;
+ runtime·nacl_mutex_unlock(m->waitsemalock);
+ return 0;
+}
+
+void
+runtime·semawakeup(M *mp)
+{
+ int32 ret;
+
+ ret = runtime·nacl_mutex_lock(mp->waitsemalock);
+ if(ret < 0) {
+ //runtime·printf("nacl_mutex_lock: error %d\n", -ret);
+ runtime·throw("semawakeup");
+ }
+ if(mp->waitsemacount != 0) {
+ //runtime·printf("semawakeup: double wakeup\n");
+ runtime·throw("semawakeup");
+ }
+ mp->waitsemacount = 1;
+ runtime·nacl_cond_signal(mp->waitsema);
+ runtime·nacl_mutex_unlock(mp->waitsemalock);
+}
+
+void
+os·sigpipe(void)
+{
+ runtime·throw("too many writes on closed pipe");
+}
+
+uintptr
+runtime·memlimit(void)
+{
+ runtime·printf("memlimit\n");
+ return 0;
+}
+
+#pragma dataflag NOPTR
+static int8 badsignal[] = "runtime: signal received on thread not created by Go.\n";
+
+// This runs on a foreign stack, without an m or a g. No stack split.
+#pragma textflag NOSPLIT
+void
+runtime·badsignal2(void)
+{
+ runtime·write(2, badsignal, sizeof badsignal - 1);
+ runtime·exit(2);
+}
+
+void runtime·madvise(byte*, uintptr, int32) { }
+void runtime·munmap(byte*, uintptr) {}
+
+void
+runtime·resetcpuprofiler(int32 hz)
+{
+ USED(hz);
+}
+
+void
+runtime·sigdisable(uint32)
+{
+}
+
+void
+runtime·sigenable(uint32)
+{
+}
+
+void
+runtime·closeonexec(int32)
+{
+}
+
+void
+runtime·sigpanic(void)
+{
+ if(!runtime·canpanic(g))
+ runtime·throw("unexpected signal during runtime execution");
+
+ // Native Client only invokes the exception handler for memory faults.
+ g->sig = SIGSEGV;
+ if(g->sigpc == 0)
+ runtime·panicstring("call of nil func value");
+ runtime·panicstring("invalid memory address or nil pointer dereference");
+}
+
+uint32 runtime·writelock; // test-and-set spin lock for runtime.write
+
+/*
+An attempt at IRT. Doesn't work. See end of sys_nacl_amd64.s.
+
+void (*runtime·nacl_irt_query)(void);
+
+int8 runtime·nacl_irt_basic_v0_1_str[] = "nacl-irt-basic-0.1";
+void *runtime·nacl_irt_basic_v0_1[6]; // exit, gettod, clock, nanosleep, sched_yield, sysconf
+int32 runtime·nacl_irt_basic_v0_1_size = sizeof(runtime·nacl_irt_basic_v0_1);
+
+int8 runtime·nacl_irt_memory_v0_3_str[] = "nacl-irt-memory-0.3";
+void *runtime·nacl_irt_memory_v0_3[3]; // mmap, munmap, mprotect
+int32 runtime·nacl_irt_memory_v0_3_size = sizeof(runtime·nacl_irt_memory_v0_3);
+
+int8 runtime·nacl_irt_thread_v0_1_str[] = "nacl-irt-thread-0.1";
+void *runtime·nacl_irt_thread_v0_1[3]; // thread_create, thread_exit, thread_nice
+int32 runtime·nacl_irt_thread_v0_1_size = sizeof(runtime·nacl_irt_thread_v0_1);
+*/ \ No newline at end of file
diff --git a/src/pkg/runtime/os_nacl.h b/src/pkg/runtime/os_nacl.h
new file mode 100644
index 000000000..7c9d9c242
--- /dev/null
+++ b/src/pkg/runtime/os_nacl.h
@@ -0,0 +1,162 @@
+enum {
+ NSIG = 32,
+ SI_USER = 1,
+
+ // native_client/src/trusted/service_runtime/include/sys/errno.h
+ // The errors are mainly copied from Linux.
+ EPERM = 1, /* Operation not permitted */
+ ENOENT = 2, /* No such file or directory */
+ ESRCH = 3, /* No such process */
+ EINTR = 4, /* Interrupted system call */
+ EIO = 5, /* I/O error */
+ ENXIO = 6, /* No such device or address */
+ E2BIG = 7, /* Argument list too long */
+ ENOEXEC = 8, /* Exec format error */
+ EBADF = 9, /* Bad file number */
+ ECHILD = 10, /* No child processes */
+ EAGAIN = 11, /* Try again */
+ ENOMEM = 12, /* Out of memory */
+ EACCES = 13, /* Permission denied */
+ EFAULT = 14, /* Bad address */
+ EBUSY = 16, /* Device or resource busy */
+ EEXIST = 17, /* File exists */
+ EXDEV = 18, /* Cross-device link */
+ ENODEV = 19, /* No such device */
+ ENOTDIR = 20, /* Not a directory */
+ EISDIR = 21, /* Is a directory */
+ EINVAL = 22, /* Invalid argument */
+ ENFILE = 23, /* File table overflow */
+ EMFILE = 24, /* Too many open files */
+ ENOTTY = 25, /* Not a typewriter */
+ EFBIG = 27, /* File too large */
+ ENOSPC = 28, /* No space left on device */
+ ESPIPE = 29, /* Illegal seek */
+ EROFS = 30, /* Read-only file system */
+ EMLINK = 31, /* Too many links */
+ EPIPE = 32, /* Broken pipe */
+ ENAMETOOLONG = 36, /* File name too long */
+ ENOSYS = 38, /* Function not implemented */
+ EDQUOT = 122, /* Quota exceeded */
+ EDOM = 33, /* Math arg out of domain of func */
+ ERANGE = 34, /* Math result not representable */
+ EDEADLK = 35, /* Deadlock condition */
+ ENOLCK = 37, /* No record locks available */
+ ENOTEMPTY = 39, /* Directory not empty */
+ ELOOP = 40, /* Too many symbolic links */
+ ENOMSG = 42, /* No message of desired type */
+ EIDRM = 43, /* Identifier removed */
+ ECHRNG = 44, /* Channel number out of range */
+ EL2NSYNC = 45, /* Level 2 not synchronized */
+ EL3HLT = 46, /* Level 3 halted */
+ EL3RST = 47, /* Level 3 reset */
+ ELNRNG = 48, /* Link number out of range */
+ EUNATCH = 49, /* Protocol driver not attached */
+ ENOCSI = 50, /* No CSI structure available */
+ EL2HLT = 51, /* Level 2 halted */
+ EBADE = 52, /* Invalid exchange */
+ EBADR = 53, /* Invalid request descriptor */
+ EXFULL = 54, /* Exchange full */
+ ENOANO = 55, /* No anode */
+ EBADRQC = 56, /* Invalid request code */
+ EBADSLT = 57, /* Invalid slot */
+ EDEADLOCK = EDEADLK, /* File locking deadlock error */
+ EBFONT = 59, /* Bad font file fmt */
+ ENOSTR = 60, /* Device not a stream */
+ ENODATA = 61, /* No data (for no delay io) */
+ ETIME = 62, /* Timer expired */
+ ENOSR = 63, /* Out of streams resources */
+ ENONET = 64, /* Machine is not on the network */
+ ENOPKG = 65, /* Package not installed */
+ EREMOTE = 66, /* The object is remote */
+ ENOLINK = 67, /* The link has been severed */
+ EADV = 68, /* Advertise error */
+ ESRMNT = 69, /* Srmount error */
+ ECOMM = 70, /* Communication error on send */
+ EPROTO = 71, /* Protocol error */
+ EMULTIHOP = 72, /* Multihop attempted */
+ EDOTDOT = 73, /* Cross mount point (not really error) */
+ EBADMSG = 74, /* Trying to read unreadable message */
+ EOVERFLOW = 75, /* Value too large for defined data type */
+ ENOTUNIQ = 76, /* Given log. name not unique */
+ EBADFD = 77, /* f.d. invalid for this operation */
+ EREMCHG = 78, /* Remote address changed */
+ ELIBACC = 79, /* Can't access a needed shared lib */
+ ELIBBAD = 80, /* Accessing a corrupted shared lib */
+ ELIBSCN = 81, /* .lib section in a.out corrupted */
+ ELIBMAX = 82, /* Attempting to link in too many libs */
+ ELIBEXEC = 83, /* Attempting to exec a shared library */
+ EILSEQ = 84,
+ EUSERS = 87,
+ ENOTSOCK = 88, /* Socket operation on non-socket */
+ EDESTADDRREQ = 89, /* Destination address required */
+ EMSGSIZE = 90, /* Message too long */
+ EPROTOTYPE = 91, /* Protocol wrong type for socket */
+ ENOPROTOOPT = 92, /* Protocol not available */
+ EPROTONOSUPPORT = 93, /* Unknown protocol */
+ ESOCKTNOSUPPORT = 94, /* Socket type not supported */
+ EOPNOTSUPP = 95, /* Operation not supported on transport endpoint */
+ EPFNOSUPPORT = 96, /* Protocol family not supported */
+ EAFNOSUPPORT = 97, /* Address family not supported by protocol family */
+ EADDRINUSE = 98, /* Address already in use */
+ EADDRNOTAVAIL = 99, /* Address not available */
+ ENETDOWN = 100, /* Network interface is not configured */
+ ENETUNREACH = 101, /* Network is unreachable */
+ ENETRESET = 102,
+ ECONNABORTED = 103, /* Connection aborted */
+ ECONNRESET = 104, /* Connection reset by peer */
+ ENOBUFS = 105, /* No buffer space available */
+ EISCONN = 106, /* Socket is already connected */
+ ENOTCONN = 107, /* Socket is not connected */
+ ESHUTDOWN = 108, /* Can't send after socket shutdown */
+ ETOOMANYREFS = 109,
+ ETIMEDOUT = 110, /* Connection timed out */
+ ECONNREFUSED = 111, /* Connection refused */
+ EHOSTDOWN = 112, /* Host is down */
+ EHOSTUNREACH = 113, /* Host is unreachable */
+ EALREADY = 114, /* Socket already connected */
+ EINPROGRESS = 115, /* Connection already in progress */
+ ESTALE = 116,
+ ENOTSUP = EOPNOTSUPP, /* Not supported */
+ ENOMEDIUM = 123, /* No medium (in tape drive) */
+ ECANCELED = 125, /* Operation canceled. */
+ ELBIN = 2048, /* Inode is remote (not really error) */
+ EFTYPE = 2049, /* Inappropriate file type or format */
+ ENMFILE = 2050, /* No more files */
+ EPROCLIM = 2051,
+ ENOSHARE = 2052, /* No such host or network path */
+ ECASECLASH = 2053, /* Filename exists with different case */
+ EWOULDBLOCK = EAGAIN, /* Operation would block */
+
+ // native_client/src/trusted/service_runtime/include/bits/mman.h.
+ // NOTE: DO NOT USE native_client/src/shared/imc/nacl_imc_c.h.
+ // Those MAP_*values are different from these.
+ PROT_NONE = 0x0,
+ PROT_READ = 0x1,
+ PROT_WRITE = 0x2,
+ PROT_EXEC = 0x4,
+
+ MAP_SHARED = 0x1,
+ MAP_PRIVATE = 0x2,
+ MAP_FIXED = 0x10,
+ MAP_ANON = 0x20,
+};
+typedef byte* kevent_udata;
+
+int32 runtime·nacl_exception_stack(byte*, int32);
+int32 runtime·nacl_exception_handler(void*, void*);
+int32 runtime·nacl_sem_create(int32);
+int32 runtime·nacl_sem_wait(int32);
+int32 runtime·nacl_sem_post(int32);
+int32 runtime·nacl_mutex_create(int32);
+int32 runtime·nacl_mutex_lock(int32);
+int32 runtime·nacl_mutex_trylock(int32);
+int32 runtime·nacl_mutex_unlock(int32);
+int32 runtime·nacl_cond_create(int32);
+int32 runtime·nacl_cond_wait(int32, int32);
+int32 runtime·nacl_cond_signal(int32);
+int32 runtime·nacl_cond_broadcast(int32);
+int32 runtime·nacl_cond_timed_wait_abs(int32, int32, Timespec*);
+int32 runtime·nacl_thread_create(void*, void*, void*, void*);
+int32 runtime·nacl_nanosleep(Timespec*, Timespec*);
+
+void runtime·sigpanic(void);
diff --git a/src/pkg/runtime/os_netbsd.c b/src/pkg/runtime/os_netbsd.c
index a49dca295..93229bffe 100644
--- a/src/pkg/runtime/os_netbsd.c
+++ b/src/pkg/runtime/os_netbsd.c
@@ -188,6 +188,7 @@ runtime·osinit(void)
void
runtime·get_random_data(byte **rnd, int32 *rnd_len)
{
+ #pragma dataflag NOPTR
static byte urandom_data[HashRandomBytes];
int32 fd;
fd = runtime·open("/dev/urandom", 0 /* O_RDONLY */, 0);
@@ -237,9 +238,12 @@ runtime·unminit(void)
void
runtime·sigpanic(void)
{
+ if(!runtime·canpanic(g))
+ runtime·throw("unexpected signal during runtime execution");
+
switch(g->sig) {
case SIGBUS:
- if(g->sigcode0 == BUS_ADRERR && g->sigcode1 < 0x1000) {
+ if(g->sigcode0 == BUS_ADRERR && g->sigcode1 < 0x1000 || g->paniconfault) {
if(g->sigpc == 0)
runtime·panicstring("call of nil func value");
runtime·panicstring("invalid memory address or nil pointer dereference");
@@ -247,7 +251,7 @@ runtime·sigpanic(void)
runtime·printf("unexpected fault address %p\n", g->sigcode1);
runtime·throw("fault");
case SIGSEGV:
- if((g->sigcode0 == 0 || g->sigcode0 == SEGV_MAPERR || g->sigcode0 == SEGV_ACCERR) && g->sigcode1 < 0x1000) {
+ if((g->sigcode0 == 0 || g->sigcode0 == SEGV_MAPERR || g->sigcode0 == SEGV_ACCERR) && g->sigcode1 < 0x1000 || g->paniconfault) {
if(g->sigpc == 0)
runtime·panicstring("call of nil func value");
runtime·panicstring("invalid memory address or nil pointer dereference");
@@ -326,3 +330,9 @@ runtime·signalstack(byte *p, int32 n)
st.ss_flags = SS_DISABLE;
runtime·sigaltstack(&st, nil);
}
+
+void
+runtime·unblocksignals(void)
+{
+ runtime·sigprocmask(SIG_SETMASK, &sigset_none, nil);
+}
diff --git a/src/pkg/runtime/os_netbsd.h b/src/pkg/runtime/os_netbsd.h
index 55743c8d5..16e9833af 100644
--- a/src/pkg/runtime/os_netbsd.h
+++ b/src/pkg/runtime/os_netbsd.h
@@ -18,6 +18,7 @@ void runtime·setitimer(int32, Itimerval*, Itimerval*);
void runtime·sigaction(int32, struct sigaction*, struct sigaction*);
void runtime·sigaltstack(Sigaltstack*, Sigaltstack*);
void runtime·sigprocmask(int32, Sigset*, Sigset*);
+void runtime·unblocksignals(void);
int32 runtime·sysctl(uint32*, uint32, byte*, uintptr*, byte*, uintptr);
extern void runtime·lwp_tramp(void);
diff --git a/src/pkg/runtime/os_openbsd.c b/src/pkg/runtime/os_openbsd.c
index 18377a047..08a290a05 100644
--- a/src/pkg/runtime/os_openbsd.c
+++ b/src/pkg/runtime/os_openbsd.c
@@ -82,7 +82,7 @@ runtime·semasleep(int64 ns)
// NOTE: tv_nsec is int64 on amd64, so this assumes a little-endian system.
ts.tv_nsec = 0;
ts.tv_sec = runtime·timediv(ns, 1000000000, (int32*)&ts.tv_nsec);
- runtime·thrsleep(&m->waitsemacount, CLOCK_REALTIME, &ts, &m->waitsemalock, nil);
+ runtime·thrsleep(&m->waitsemacount, CLOCK_MONOTONIC, &ts, &m->waitsemalock, nil);
}
// reacquire lock
while(runtime·xchg(&m->waitsemalock, 1))
@@ -167,6 +167,7 @@ runtime·osinit(void)
void
runtime·get_random_data(byte **rnd, int32 *rnd_len)
{
+ #pragma dataflag NOPTR
static byte urandom_data[HashRandomBytes];
int32 fd;
fd = runtime·open("/dev/urandom", 0 /* O_RDONLY */, 0);
@@ -214,9 +215,12 @@ runtime·unminit(void)
void
runtime·sigpanic(void)
{
+ if(!runtime·canpanic(g))
+ runtime·throw("unexpected signal during runtime execution");
+
switch(g->sig) {
case SIGBUS:
- if(g->sigcode0 == BUS_ADRERR && g->sigcode1 < 0x1000) {
+ if(g->sigcode0 == BUS_ADRERR && g->sigcode1 < 0x1000 || g->paniconfault) {
if(g->sigpc == 0)
runtime·panicstring("call of nil func value");
runtime·panicstring("invalid memory address or nil pointer dereference");
@@ -224,7 +228,7 @@ runtime·sigpanic(void)
runtime·printf("unexpected fault address %p\n", g->sigcode1);
runtime·throw("fault");
case SIGSEGV:
- if((g->sigcode0 == 0 || g->sigcode0 == SEGV_MAPERR || g->sigcode0 == SEGV_ACCERR) && g->sigcode1 < 0x1000) {
+ if((g->sigcode0 == 0 || g->sigcode0 == SEGV_MAPERR || g->sigcode0 == SEGV_ACCERR) && g->sigcode1 < 0x1000 || g->paniconfault) {
if(g->sigpc == 0)
runtime·panicstring("call of nil func value");
runtime·panicstring("invalid memory address or nil pointer dereference");
@@ -300,3 +304,9 @@ runtime·signalstack(byte *p, int32 n)
st.ss_flags = SS_DISABLE;
runtime·sigaltstack(&st, nil);
}
+
+void
+runtime·unblocksignals(void)
+{
+ runtime·sigprocmask(SIG_SETMASK, sigset_none);
+}
diff --git a/src/pkg/runtime/os_openbsd.h b/src/pkg/runtime/os_openbsd.h
index 4746b314f..bbfde39e2 100644
--- a/src/pkg/runtime/os_openbsd.h
+++ b/src/pkg/runtime/os_openbsd.h
@@ -18,6 +18,7 @@ void runtime·setitimer(int32, Itimerval*, Itimerval*);
void runtime·sigaction(int32, struct sigaction*, struct sigaction*);
void runtime·sigaltstack(Sigaltstack*, Sigaltstack*);
Sigset runtime·sigprocmask(int32, Sigset);
+void runtime·unblocksignals(void);
int32 runtime·sysctl(uint32*, uint32, byte*, uintptr*, byte*, uintptr);
#define NSIG 33
diff --git a/src/pkg/runtime/os_plan9.c b/src/pkg/runtime/os_plan9.c
index 07db2c305..14d4fae48 100644
--- a/src/pkg/runtime/os_plan9.c
+++ b/src/pkg/runtime/os_plan9.c
@@ -102,8 +102,18 @@ runtime·crash(void)
void
runtime·get_random_data(byte **rnd, int32 *rnd_len)
{
- *rnd = nil;
- *rnd_len = 0;
+ static byte random_data[HashRandomBytes];
+ int32 fd;
+
+ fd = runtime·open("/dev/random", 0 /* O_RDONLY */, 0);
+ if(runtime·read(fd, random_data, HashRandomBytes) == HashRandomBytes) {
+ *rnd = random_data;
+ *rnd_len = HashRandomBytes;
+ } else {
+ *rnd = nil;
+ *rnd_len = 0;
+ }
+ runtime·close(fd);
}
void
@@ -183,13 +193,15 @@ runtime·itoa(int32 n, byte *p, uint32 len)
void
runtime·goexitsall(int8 *status)
{
+ int8 buf[ERRMAX];
M *mp;
int32 pid;
+ runtime·snprintf((byte*)buf, sizeof buf, "go: exit %s", status);
pid = getpid();
for(mp=runtime·atomicloadp(&runtime·allm); mp; mp=mp->alllink)
if(mp->procid != pid)
- runtime·postnote(mp->procid, status);
+ runtime·postnote(mp->procid, buf);
}
int32
@@ -271,6 +283,8 @@ runtime·semasleep(int64 ns)
if(ns >= 0) {
ms = runtime·timediv(ns, 1000000, nil);
+ if(ms == 0)
+ ms = 1;
ret = runtime·plan9_tsemacquire(&m->waitsemacount, ms);
if(ret == 1)
return 0; // success
@@ -295,15 +309,82 @@ os·sigpipe(void)
runtime·throw("too many writes on closed pipe");
}
+static int64
+atolwhex(byte *p)
+{
+ int64 n;
+ int32 f;
+
+ n = 0;
+ f = 0;
+ while(*p == ' ' || *p == '\t')
+ p++;
+ if(*p == '-' || *p == '+') {
+ if(*p++ == '-')
+ f = 1;
+ while(*p == ' ' || *p == '\t')
+ p++;
+ }
+ if(p[0] == '0' && p[1]) {
+ if(p[1] == 'x' || p[1] == 'X') {
+ p += 2;
+ for(;;) {
+ if('0' <= *p && *p <= '9')
+ n = n*16 + *p++ - '0';
+ else if('a' <= *p && *p <= 'f')
+ n = n*16 + *p++ - 'a' + 10;
+ else if('A' <= *p && *p <= 'F')
+ n = n*16 + *p++ - 'A' + 10;
+ else
+ break;
+ }
+ } else
+ while('0' <= *p && *p <= '7')
+ n = n*8 + *p++ - '0';
+ } else
+ while('0' <= *p && *p <= '9')
+ n = n*10 + *p++ - '0';
+ if(f)
+ n = -n;
+ return n;
+}
+
void
runtime·sigpanic(void)
{
- if(g->sigpc == 0)
- runtime·panicstring("call of nil func value");
- runtime·panicstring(m->notesig);
-
- if(g->sig == 1 || g->sig == 2)
+ byte *p;
+
+ if(!runtime·canpanic(g))
+ runtime·throw("unexpected signal during runtime execution");
+
+ switch(g->sig) {
+ case SIGRFAULT:
+ case SIGWFAULT:
+ p = runtime·strstr((byte*)m->notesig, (byte*)"addr=")+5;
+ g->sigcode1 = atolwhex(p);
+ if(g->sigcode1 < 0x1000 || g->paniconfault) {
+ if(g->sigpc == 0)
+ runtime·panicstring("call of nil func value");
+ runtime·panicstring("invalid memory address or nil pointer dereference");
+ }
+ runtime·printf("unexpected fault address %p\n", g->sigcode1);
runtime·throw("fault");
+ break;
+ case SIGTRAP:
+ if(g->paniconfault)
+ runtime·panicstring("invalid memory address or nil pointer dereference");
+ runtime·throw(m->notesig);
+ break;
+ case SIGINTDIV:
+ runtime·panicstring("integer divide by zero");
+ break;
+ case SIGFLOAT:
+ runtime·panicstring("floating point error");
+ break;
+ default:
+ runtime·panicstring(m->notesig);
+ break;
+ }
}
int32
@@ -313,9 +394,9 @@ runtime·read(int32 fd, void *buf, int32 nbytes)
}
int32
-runtime·write(int32 fd, void *buf, int32 nbytes)
+runtime·write(uintptr fd, void *buf, int32 nbytes)
{
- return runtime·pwrite(fd, buf, nbytes, -1LL);
+ return runtime·pwrite((int32)fd, buf, nbytes, -1LL);
}
uintptr
diff --git a/src/pkg/runtime/os_plan9.h b/src/pkg/runtime/os_plan9.h
index f0474cda5..00ea8366d 100644
--- a/src/pkg/runtime/os_plan9.h
+++ b/src/pkg/runtime/os_plan9.h
@@ -78,5 +78,12 @@ struct Tos {
/* top of stack is here */
};
-#define NSIG 5 /* number of signals in runtime·SigTab array */
+#define NSIG 14 /* number of signals in runtime·SigTab array */
#define ERRMAX 128 /* max length of note string */
+
+/* Notes in runtime·sigtab that are handled by runtime·sigpanic. */
+#define SIGRFAULT 2
+#define SIGWFAULT 3
+#define SIGINTDIV 4
+#define SIGFLOAT 5
+#define SIGTRAP 6
diff --git a/src/pkg/runtime/os_plan9_386.c b/src/pkg/runtime/os_plan9_386.c
index 0844d726b..80d711f33 100644
--- a/src/pkg/runtime/os_plan9_386.c
+++ b/src/pkg/runtime/os_plan9_386.c
@@ -10,71 +10,80 @@
void
runtime·dumpregs(Ureg *u)
{
- runtime·printf("ax %X\n", u->ax);
- runtime·printf("bx %X\n", u->bx);
- runtime·printf("cx %X\n", u->cx);
- runtime·printf("dx %X\n", u->dx);
- runtime·printf("di %X\n", u->di);
- runtime·printf("si %X\n", u->si);
- runtime·printf("bp %X\n", u->bp);
- runtime·printf("sp %X\n", u->sp);
- runtime·printf("pc %X\n", u->pc);
- runtime·printf("flags %X\n", u->flags);
- runtime·printf("cs %X\n", u->cs);
- runtime·printf("fs %X\n", u->fs);
- runtime·printf("gs %X\n", u->gs);
+ runtime·printf("ax %x\n", u->ax);
+ runtime·printf("bx %x\n", u->bx);
+ runtime·printf("cx %x\n", u->cx);
+ runtime·printf("dx %x\n", u->dx);
+ runtime·printf("di %x\n", u->di);
+ runtime·printf("si %x\n", u->si);
+ runtime·printf("bp %x\n", u->bp);
+ runtime·printf("sp %x\n", u->sp);
+ runtime·printf("pc %x\n", u->pc);
+ runtime·printf("flags %x\n", u->flags);
+ runtime·printf("cs %x\n", u->cs);
+ runtime·printf("fs %x\n", u->fs);
+ runtime·printf("gs %x\n", u->gs);
}
int32
-runtime·sighandler(void *v, int8 *s, G *gp)
+runtime·sighandler(void *v, int8 *note, G *gp)
{
+ uintptr *sp;
+ SigTab *t;
bool crash;
Ureg *ureg;
- uintptr *sp;
- SigTab *sig, *nsig;
- intgo len, i;
+ intgo len, n;
+ int32 sig, flags;
- if(!s)
- return NCONT;
-
- len = runtime·findnull((byte*)s);
- if(len <= 4 || runtime·mcmp((byte*)s, (byte*)"sys:", 4) != 0)
- return NDFLT;
-
- nsig = nil;
- sig = runtime·sigtab;
- for(i=0; i < NSIG; i++) {
- if(runtime·strstr((byte*)s, (byte*)sig->name)) {
- nsig = sig;
+ ureg = (Ureg*)v;
+
+ // The kernel will never pass us a nil note or ureg so we probably
+ // made a mistake somewhere in runtime·sigtramp.
+ if(ureg == nil || note == nil) {
+ runtime·printf("sighandler: ureg %p note %p\n", ureg, note);
+ goto Throw;
+ }
+
+ // Check that the note is no more than ERRMAX bytes (including
+ // the trailing NUL). We should never receive a longer note.
+ len = runtime·findnull((byte*)note);
+ if(len > ERRMAX-1) {
+ runtime·printf("sighandler: note is longer than ERRMAX\n");
+ goto Throw;
+ }
+
+ // See if the note matches one of the patterns in runtime·sigtab.
+ // Notes that do not match any pattern can be handled at a higher
+ // level by the program but will otherwise be ignored.
+ flags = SigNotify;
+ for(sig = 0; sig < nelem(runtime·sigtab); sig++) {
+ t = &runtime·sigtab[sig];
+ n = runtime·findnull((byte*)t->name);
+ if(len < n)
+ continue;
+ if(runtime·strncmp((byte*)note, (byte*)t->name, n) == 0) {
+ flags = t->flags;
break;
}
- sig++;
}
- if(nsig == nil)
- return NDFLT;
-
- ureg = v;
- if(nsig->flags & SigPanic) {
- if(gp == nil || m->notesig == 0)
- goto Throw;
+ if(flags & SigGoExit)
+ runtime·exits(note+9); // Strip "go: exit " prefix.
- // Save error string from sigtramp's stack,
- // into gsignal->sigcode0, so we can reliably
- // access it from the panic routines.
- if(len > ERRMAX)
- len = ERRMAX;
- runtime·memmove((void*)m->notesig, (void*)s, len);
+ if(flags & SigPanic) {
+ // Copy the error string from sigtramp's stack into m->notesig so
+ // we can reliably access it from the panic routines.
+ runtime·memmove(m->notesig, note, len+1);
- gp->sig = i;
+ gp->sig = sig;
gp->sigpc = ureg->pc;
- // Only push runtime·sigpanic if ureg->pc != 0.
- // If ureg->pc == 0, probably panicked because of a
- // call to a nil func. Not pushing that onto sp will
- // make the trace look like a call to runtime·sigpanic instead.
- // (Otherwise the trace will end at runtime·sigpanic and we
- // won't get to see who faulted.)
+ // Only push runtime·sigpanic if PC != 0.
+ //
+ // If PC == 0, probably panicked because of a call to a nil func.
+ // Not pushing that onto SP will make the trace look like a call
+ // to runtime·sigpanic instead. (Otherwise the trace will end at
+ // runtime·sigpanic and we won't get to see who faulted).
if(ureg->pc != 0) {
sp = (uintptr*)ureg->sp;
*--sp = ureg->pc;
@@ -84,34 +93,42 @@ runtime·sighandler(void *v, int8 *s, G *gp)
return NCONT;
}
- if(!(nsig->flags & SigThrow))
- return NDFLT;
+ if(flags & SigNotify) {
+ // TODO(ality): See if os/signal wants it.
+ //if(runtime·sigsend(...))
+ // return NCONT;
+ }
+ if(flags & SigKill)
+ goto Exit;
+ if(!(flags & SigThrow))
+ return NCONT;
Throw:
m->throwing = 1;
m->caughtsig = gp;
runtime·startpanic();
- runtime·printf("%s\n", s);
- runtime·printf("PC=%X\n", ureg->pc);
+ runtime·printf("%s\n", note);
+ runtime·printf("PC=%x\n", ureg->pc);
runtime·printf("\n");
if(runtime·gotraceback(&crash)) {
+ runtime·goroutineheader(gp);
runtime·traceback(ureg->pc, ureg->sp, 0, gp);
runtime·tracebackothers(gp);
+ runtime·printf("\n");
runtime·dumpregs(ureg);
}
if(crash)
runtime·crash();
- runtime·goexitsall("");
- runtime·exits(s);
-
- return 0;
+Exit:
+ runtime·goexitsall(note);
+ runtime·exits(note);
+ return NDFLT; // not reached
}
-
void
runtime·sigenable(uint32 sig)
{
diff --git a/src/pkg/runtime/os_plan9_amd64.c b/src/pkg/runtime/os_plan9_amd64.c
index 58822ff84..a4e5ba819 100644
--- a/src/pkg/runtime/os_plan9_amd64.c
+++ b/src/pkg/runtime/os_plan9_amd64.c
@@ -34,55 +34,64 @@ runtime·dumpregs(Ureg *u)
}
int32
-runtime·sighandler(void *v, int8 *s, G *gp)
+runtime·sighandler(void *v, int8 *note, G *gp)
{
+ uintptr *sp;
+ SigTab *t;
bool crash;
Ureg *ureg;
- uintptr *sp;
- SigTab *sig, *nsig;
- intgo i, len;
+ intgo len, n;
+ int32 sig, flags;
- if(!s)
- return NCONT;
-
- len = runtime·findnull((byte*)s);
- if(len <= 4 || runtime·mcmp((byte*)s, (byte*)"sys:", 4) != 0)
- return NDFLT;
-
- nsig = nil;
- sig = runtime·sigtab;
- for(i=0; i < NSIG; i++) {
- if(runtime·strstr((byte*)s, (byte*)sig->name)) {
- nsig = sig;
+ ureg = (Ureg*)v;
+
+ // The kernel will never pass us a nil note or ureg so we probably
+ // made a mistake somewhere in runtime·sigtramp.
+ if(ureg == nil || note == nil) {
+ runtime·printf("sighandler: ureg %p note %p\n", ureg, note);
+ goto Throw;
+ }
+
+ // Check that the note is no more than ERRMAX bytes (including
+ // the trailing NUL). We should never receive a longer note.
+ len = runtime·findnull((byte*)note);
+ if(len > ERRMAX-1) {
+ runtime·printf("sighandler: note is longer than ERRMAX\n");
+ goto Throw;
+ }
+
+ // See if the note matches one of the patterns in runtime·sigtab.
+ // Notes that do not match any pattern can be handled at a higher
+ // level by the program but will otherwise be ignored.
+ flags = SigNotify;
+ for(sig = 0; sig < nelem(runtime·sigtab); sig++) {
+ t = &runtime·sigtab[sig];
+ n = runtime·findnull((byte*)t->name);
+ if(len < n)
+ continue;
+ if(runtime·strncmp((byte*)note, (byte*)t->name, n) == 0) {
+ flags = t->flags;
break;
}
- sig++;
}
- if(nsig == nil)
- return NDFLT;
-
- ureg = v;
- if(nsig->flags & SigPanic) {
- if(gp == nil || m->notesig == 0)
- goto Throw;
+ if(flags & SigGoExit)
+ runtime·exits(note+9); // Strip "go: exit " prefix.
- // Save error string from sigtramp's stack,
- // into gsignal->sigcode0, so we can reliably
- // access it from the panic routines.
- if(len > ERRMAX)
- len = ERRMAX;
- runtime·memmove((void*)m->notesig, (void*)s, len);
+ if(flags & SigPanic) {
+ // Copy the error string from sigtramp's stack into m->notesig so
+ // we can reliably access it from the panic routines.
+ runtime·memmove(m->notesig, note, len+1);
- gp->sig = i;
+ gp->sig = sig;
gp->sigpc = ureg->ip;
- // Only push runtime·sigpanic if ureg->ip != 0.
- // If ureg->ip == 0, probably panicked because of a
- // call to a nil func. Not pushing that onto sp will
- // make the trace look like a call to runtime·sigpanic instead.
- // (Otherwise the trace will end at runtime·sigpanic and we
- // won't get to see who faulted.)
+ // Only push runtime·sigpanic if PC != 0.
+ //
+ // If PC == 0, probably panicked because of a call to a nil func.
+ // Not pushing that onto SP will make the trace look like a call
+ // to runtime·sigpanic instead. (Otherwise the trace will end at
+ // runtime·sigpanic and we won't get to see who faulted).
if(ureg->ip != 0) {
sp = (uintptr*)ureg->sp;
*--sp = ureg->ip;
@@ -92,31 +101,40 @@ runtime·sighandler(void *v, int8 *s, G *gp)
return NCONT;
}
- if(!(nsig->flags & SigThrow))
- return NDFLT;
+ if(flags & SigNotify) {
+ // TODO(ality): See if os/signal wants it.
+ //if(runtime·sigsend(...))
+ // return NCONT;
+ }
+ if(flags & SigKill)
+ goto Exit;
+ if(!(flags & SigThrow))
+ return NCONT;
Throw:
m->throwing = 1;
m->caughtsig = gp;
runtime·startpanic();
- runtime·printf("%s\n", s);
+ runtime·printf("%s\n", note);
runtime·printf("PC=%X\n", ureg->ip);
runtime·printf("\n");
if(runtime·gotraceback(&crash)) {
+ runtime·goroutineheader(gp);
runtime·traceback(ureg->ip, ureg->sp, 0, gp);
runtime·tracebackothers(gp);
+ runtime·printf("\n");
runtime·dumpregs(ureg);
}
if(crash)
runtime·crash();
- runtime·goexitsall("");
- runtime·exits(s);
-
- return 0;
+Exit:
+ runtime·goexitsall(note);
+ runtime·exits(note);
+ return NDFLT; // not reached
}
void
diff --git a/src/pkg/runtime/os_solaris.c b/src/pkg/runtime/os_solaris.c
new file mode 100644
index 000000000..c6bbea311
--- /dev/null
+++ b/src/pkg/runtime/os_solaris.c
@@ -0,0 +1,583 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "runtime.h"
+#include "defs_GOOS_GOARCH.h"
+#include "os_GOOS.h"
+#include "signal_unix.h"
+#include "stack.h"
+#include "../../cmd/ld/textflag.h"
+
+#pragma dynexport end _end
+#pragma dynexport etext _etext
+#pragma dynexport edata _edata
+
+#pragma dynimport libc·___errno ___errno "libc.so"
+#pragma dynimport libc·clock_gettime clock_gettime "libc.so"
+#pragma dynimport libc·close close "libc.so"
+#pragma dynimport libc·exit exit "libc.so"
+#pragma dynimport libc·fstat fstat "libc.so"
+#pragma dynimport libc·getcontext getcontext "libc.so"
+#pragma dynimport libc·getrlimit getrlimit "libc.so"
+#pragma dynimport libc·malloc malloc "libc.so"
+#pragma dynimport libc·mmap mmap "libc.so"
+#pragma dynimport libc·munmap munmap "libc.so"
+#pragma dynimport libc·open open "libc.so"
+#pragma dynimport libc·pthread_attr_destroy pthread_attr_destroy "libc.so"
+#pragma dynimport libc·pthread_attr_getstack pthread_attr_getstack "libc.so"
+#pragma dynimport libc·pthread_attr_init pthread_attr_init "libc.so"
+#pragma dynimport libc·pthread_attr_setdetachstate pthread_attr_setdetachstate "libc.so"
+#pragma dynimport libc·pthread_attr_setstack pthread_attr_setstack "libc.so"
+#pragma dynimport libc·pthread_create pthread_create "libc.so"
+#pragma dynimport libc·raise raise "libc.so"
+#pragma dynimport libc·read read "libc.so"
+#pragma dynimport libc·select select "libc.so"
+#pragma dynimport libc·sched_yield sched_yield "libc.so"
+#pragma dynimport libc·sem_init sem_init "libc.so"
+#pragma dynimport libc·sem_post sem_post "libc.so"
+#pragma dynimport libc·sem_reltimedwait_np sem_reltimedwait_np "libc.so"
+#pragma dynimport libc·sem_wait sem_wait "libc.so"
+#pragma dynimport libc·setitimer setitimer "libc.so"
+#pragma dynimport libc·sigaction sigaction "libc.so"
+#pragma dynimport libc·sigaltstack sigaltstack "libc.so"
+#pragma dynimport libc·sigprocmask sigprocmask "libc.so"
+#pragma dynimport libc·sysconf sysconf "libc.so"
+#pragma dynimport libc·usleep usleep "libc.so"
+#pragma dynimport libc·write write "libc.so"
+
+extern uintptr libc·___errno;
+extern uintptr libc·clock_gettime;
+extern uintptr libc·close;
+extern uintptr libc·exit;
+extern uintptr libc·fstat;
+extern uintptr libc·getcontext;
+extern uintptr libc·getrlimit;
+extern uintptr libc·malloc;
+extern uintptr libc·mmap;
+extern uintptr libc·munmap;
+extern uintptr libc·open;
+extern uintptr libc·pthread_attr_destroy;
+extern uintptr libc·pthread_attr_getstack;
+extern uintptr libc·pthread_attr_init;
+extern uintptr libc·pthread_attr_setdetachstate;
+extern uintptr libc·pthread_attr_setstack;
+extern uintptr libc·pthread_create;
+extern uintptr libc·raise;
+extern uintptr libc·read;
+extern uintptr libc·sched_yield;
+extern uintptr libc·select;
+extern uintptr libc·sem_init;
+extern uintptr libc·sem_post;
+extern uintptr libc·sem_reltimedwait_np;
+extern uintptr libc·sem_wait;
+extern uintptr libc·setitimer;
+extern uintptr libc·sigaction;
+extern uintptr libc·sigaltstack;
+extern uintptr libc·sigprocmask;
+extern uintptr libc·sysconf;
+extern uintptr libc·usleep;
+extern uintptr libc·write;
+
+void runtime·getcontext(Ucontext *context);
+int32 runtime·pthread_attr_destroy(PthreadAttr* attr);
+int32 runtime·pthread_attr_init(PthreadAttr* attr);
+int32 runtime·pthread_attr_getstack(PthreadAttr* attr, void** addr, uint64* size);
+int32 runtime·pthread_attr_setdetachstate(PthreadAttr* attr, int32 state);
+int32 runtime·pthread_attr_setstack(PthreadAttr* attr, void* addr, uint64 size);
+int32 runtime·pthread_create(Pthread* thread, PthreadAttr* attr, void(*fn)(void), void *arg);
+uint32 runtime·tstart_sysvicall(M *newm);
+int32 runtime·sem_init(SemT* sem, int32 pshared, uint32 value);
+int32 runtime·sem_post(SemT* sem);
+int32 runtime·sem_reltimedwait_np(SemT* sem, Timespec* timeout);
+int32 runtime·sem_wait(SemT* sem);
+int64 runtime·sysconf(int32 name);
+
+extern SigTab runtime·sigtab[];
+static Sigset sigset_none;
+static Sigset sigset_all = { ~(uint32)0, ~(uint32)0, ~(uint32)0, ~(uint32)0, };
+
+// Calling sysvcall on os stack.
+#pragma textflag NOSPLIT
+uintptr
+runtime·sysvicall6(uintptr fn, int32 count, ...)
+{
+ runtime·memclr((byte*)&m->scratch, sizeof(m->scratch));
+ m->libcall.fn = (void*)fn;
+ m->libcall.n = (uintptr)count;
+ for(;count; count--)
+ m->scratch.v[count - 1] = *((uintptr*)&count + count);
+ m->libcall.args = (uintptr*)&m->scratch.v[0];
+ runtime·asmcgocall(runtime·asmsysvicall6, &m->libcall);
+ return m->libcall.r1;
+}
+
+static int32
+getncpu(void)
+{
+ int32 n;
+
+ n = (int32)runtime·sysconf(_SC_NPROCESSORS_ONLN);
+ if(n < 1)
+ return 1;
+ return n;
+}
+
+void
+runtime·osinit(void)
+{
+ runtime·ncpu = getncpu();
+}
+
+void
+runtime·newosproc(M *mp, void *stk)
+{
+ PthreadAttr attr;
+ Sigset oset;
+ Pthread tid;
+ int32 ret;
+
+ USED(stk);
+ if(runtime·pthread_attr_init(&attr) != 0)
+ runtime·throw("pthread_attr_init");
+ if(runtime·pthread_attr_setstack(&attr, 0, 0x200000) != 0)
+ runtime·throw("pthread_attr_setstack");
+ if(runtime·pthread_attr_getstack(&attr, (void**)&mp->g0->stackbase, &mp->g0->stacksize) != 0)
+ runtime·throw("pthread_attr_getstack");
+ if(runtime·pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED) != 0)
+ runtime·throw("pthread_attr_setdetachstate");
+
+ // Disable signals during create, so that the new thread starts
+ // with signals disabled. It will enable them in minit.
+ runtime·sigprocmask(SIG_SETMASK, &sigset_all, &oset);
+ ret = runtime·pthread_create(&tid, &attr, (void (*)(void))runtime·tstart_sysvicall, mp);
+ runtime·sigprocmask(SIG_SETMASK, &oset, nil);
+ if(ret != 0) {
+ runtime·printf("runtime: failed to create new OS thread (have %d already; errno=%d)\n", runtime·mcount(), ret);
+ runtime·throw("runtime.newosproc");
+ }
+}
+
+void
+runtime·get_random_data(byte **rnd, int32 *rnd_len)
+{
+ #pragma dataflag NOPTR
+ static byte urandom_data[HashRandomBytes];
+ int32 fd;
+ fd = runtime·open("/dev/urandom", 0 /* O_RDONLY */, 0);
+ if(runtime·read(fd, urandom_data, HashRandomBytes) == HashRandomBytes) {
+ *rnd = urandom_data;
+ *rnd_len = HashRandomBytes;
+ } else {
+ *rnd = nil;
+ *rnd_len = 0;
+ }
+ runtime·close(fd);
+}
+
+void
+runtime·goenvs(void)
+{
+ runtime·goenvs_unix();
+}
+
+// Called to initialize a new m (including the bootstrap m).
+// Called on the parent thread (main thread in case of bootstrap), can allocate memory.
+void
+runtime·mpreinit(M *mp)
+{
+ mp->gsignal = runtime·malg(32*1024);
+}
+
+// Called to initialize a new m (including the bootstrap m).
+// Called on the new thread, can not allocate memory.
+void
+runtime·minit(void)
+{
+ runtime·asmcgocall(runtime·miniterrno, (void *)libc·___errno);
+ // Initialize signal handling
+ runtime·signalstack((byte*)m->gsignal->stackguard - StackGuard, 32*1024);
+ runtime·sigprocmask(SIG_SETMASK, &sigset_none, nil);
+}
+
+// Called from dropm to undo the effect of an minit.
+void
+runtime·unminit(void)
+{
+ runtime·signalstack(nil, 0);
+}
+
+void
+runtime·sigpanic(void)
+{
+ if(!runtime·canpanic(g))
+ runtime·throw("unexpected signal during runtime execution");
+
+ switch(g->sig) {
+ case SIGBUS:
+ if(g->sigcode0 == BUS_ADRERR && g->sigcode1 < 0x1000 || g->paniconfault) {
+ if(g->sigpc == 0)
+ runtime·panicstring("call of nil func value");
+ runtime·panicstring("invalid memory address or nil pointer dereference");
+ }
+ runtime·printf("unexpected fault address %p\n", g->sigcode1);
+ runtime·throw("fault");
+ case SIGSEGV:
+ if((g->sigcode0 == 0 || g->sigcode0 == SEGV_MAPERR || g->sigcode0 == SEGV_ACCERR) && g->sigcode1 < 0x1000 || g->paniconfault) {
+ if(g->sigpc == 0)
+ runtime·panicstring("call of nil func value");
+ runtime·panicstring("invalid memory address or nil pointer dereference");
+ }
+ runtime·printf("unexpected fault address %p\n", g->sigcode1);
+ runtime·throw("fault");
+ case SIGFPE:
+ switch(g->sigcode0) {
+ case FPE_INTDIV:
+ runtime·panicstring("integer divide by zero");
+ case FPE_INTOVF:
+ runtime·panicstring("integer overflow");
+ }
+ runtime·panicstring("floating point error");
+ }
+ runtime·panicstring(runtime·sigtab[g->sig].name);
+}
+
+uintptr
+runtime·memlimit(void)
+{
+ Rlimit rl;
+ extern byte text[], end[];
+ uintptr used;
+
+ if(runtime·getrlimit(RLIMIT_AS, &rl) != 0)
+ return 0;
+ if(rl.rlim_cur >= 0x7fffffff)
+ return 0;
+
+ // Estimate our VM footprint excluding the heap.
+ // Not an exact science: use size of binary plus
+ // some room for thread stacks.
+ used = end - text + (64<<20);
+ if(used >= rl.rlim_cur)
+ return 0;
+
+ // If there's not at least 16 MB left, we're probably
+ // not going to be able to do much. Treat as no limit.
+ rl.rlim_cur -= used;
+ if(rl.rlim_cur < (16<<20))
+ return 0;
+
+ return rl.rlim_cur - used;
+}
+
+void
+runtime·setprof(bool on)
+{
+ USED(on);
+}
+
+extern void runtime·sigtramp(void);
+
+void
+runtime·setsig(int32 i, GoSighandler *fn, bool restart)
+{
+ Sigaction sa;
+
+ runtime·memclr((byte*)&sa, sizeof sa);
+ sa.sa_flags = SA_SIGINFO|SA_ONSTACK;
+ if(restart)
+ sa.sa_flags |= SA_RESTART;
+ sa.sa_mask.__sigbits[0] = ~(uint32)0;
+ sa.sa_mask.__sigbits[1] = ~(uint32)0;
+ sa.sa_mask.__sigbits[2] = ~(uint32)0;
+ sa.sa_mask.__sigbits[3] = ~(uint32)0;
+ if(fn == runtime·sighandler)
+ fn = (void*)runtime·sigtramp;
+ *((void**)&sa._funcptr[0]) = (void*)fn;
+ runtime·sigaction(i, &sa, nil);
+}
+
+GoSighandler*
+runtime·getsig(int32 i)
+{
+ Sigaction sa;
+
+ runtime·memclr((byte*)&sa, sizeof sa);
+ runtime·sigaction(i, nil, &sa);
+ if(*((void**)&sa._funcptr[0]) == runtime·sigtramp)
+ return runtime·sighandler;
+ return *((void**)&sa._funcptr[0]);
+}
+
+void
+runtime·signalstack(byte *p, int32 n)
+{
+ StackT st;
+
+ st.ss_sp = (void*)p;
+ st.ss_size = n;
+ st.ss_flags = 0;
+ if(p == nil)
+ st.ss_flags = SS_DISABLE;
+ runtime·sigaltstack(&st, nil);
+}
+
+void
+runtime·unblocksignals(void)
+{
+ runtime·sigprocmask(SIG_SETMASK, &sigset_none, nil);
+}
+
+#pragma textflag NOSPLIT
+uintptr
+runtime·semacreate(void)
+{
+ SemT* sem;
+
+ // Call libc's malloc rather than runtime·malloc. This will
+ // allocate space on the C heap. We can't call runtime·malloc
+ // here because it could cause a deadlock.
+ m->libcall.fn = (void*)libc·malloc;
+ m->libcall.n = 1;
+ runtime·memclr((byte*)&m->scratch, sizeof(m->scratch));
+ m->scratch.v[0] = (uintptr)sizeof(*sem);
+ m->libcall.args = (uintptr*)&m->scratch;
+ runtime·asmcgocall(runtime·asmsysvicall6, &m->libcall);
+ sem = (void*)m->libcall.r1;
+ if(runtime·sem_init(sem, 0, 0) != 0)
+ runtime·throw("sem_init");
+ return (uintptr)sem;
+}
+
+#pragma textflag NOSPLIT
+int32
+runtime·semasleep(int64 ns)
+{
+ if(ns >= 0) {
+ m->ts.tv_sec = ns / 1000000000LL;
+ m->ts.tv_nsec = ns % 1000000000LL;
+
+ m->libcall.fn = (void*)libc·sem_reltimedwait_np;
+ m->libcall.n = 2;
+ runtime·memclr((byte*)&m->scratch, sizeof(m->scratch));
+ m->scratch.v[0] = m->waitsema;
+ m->scratch.v[1] = (uintptr)&m->ts;
+ m->libcall.args = (uintptr*)&m->scratch;
+ runtime·asmcgocall(runtime·asmsysvicall6, &m->libcall);
+ if(*m->perrno != 0) {
+ if(*m->perrno == ETIMEDOUT || *m->perrno == EAGAIN || *m->perrno == EINTR)
+ return -1;
+ runtime·throw("sem_reltimedwait_np");
+ }
+ return 0;
+ }
+ for(;;) {
+ m->libcall.fn = (void*)libc·sem_wait;
+ m->libcall.n = 1;
+ runtime·memclr((byte*)&m->scratch, sizeof(m->scratch));
+ m->scratch.v[0] = m->waitsema;
+ m->libcall.args = (uintptr*)&m->scratch;
+ runtime·asmcgocall(runtime·asmsysvicall6, &m->libcall);
+ if(m->libcall.r1 == 0)
+ break;
+ if(*m->perrno == EINTR)
+ continue;
+ runtime·throw("sem_wait");
+ }
+ return 0;
+}
+
+#pragma textflag NOSPLIT
+void
+runtime·semawakeup(M *mp)
+{
+ SemT* sem = (SemT*)mp->waitsema;
+ if(runtime·sem_post(sem) != 0)
+ runtime·throw("sem_post");
+}
+
+int32
+runtime·close(int32 fd)
+{
+ return runtime·sysvicall6(libc·close, 1, (uintptr)fd);
+}
+
+void
+runtime·exit(int32 r)
+{
+ runtime·sysvicall6(libc·exit, 1, (uintptr)r);
+}
+
+/* int32 */ void
+runtime·getcontext(Ucontext* context)
+{
+ runtime·sysvicall6(libc·getcontext, 1, (uintptr)context);
+}
+
+int32
+runtime·getrlimit(int32 res, Rlimit* rlp)
+{
+ return runtime·sysvicall6(libc·getrlimit, 2, (uintptr)res, (uintptr)rlp);
+}
+
+uint8*
+runtime·mmap(byte* addr, uintptr len, int32 prot, int32 flags, int32 fildes, uint32 off)
+{
+ return (uint8*)runtime·sysvicall6(libc·mmap, 6, (uintptr)addr, (uintptr)len, (uintptr)prot, (uintptr)flags, (uintptr)fildes, (uintptr)off);
+}
+
+void
+runtime·munmap(byte* addr, uintptr len)
+{
+ runtime·sysvicall6(libc·munmap, 2, (uintptr)addr, (uintptr)len);
+}
+
+extern int64 runtime·nanotime1(void);
+#pragma textflag NOSPLIT
+int64
+runtime·nanotime(void)
+{
+ return runtime·sysvicall6((uintptr)runtime·nanotime1, 0);
+}
+
+void
+time·now(int64 sec, int32 usec)
+{
+ int64 ns;
+
+ ns = runtime·nanotime();
+ sec = ns / 1000000000LL;
+ usec = ns - sec * 1000000000LL;
+ FLUSH(&sec);
+ FLUSH(&usec);
+}
+
+int32
+runtime·open(int8* path, int32 oflag, int32 mode)
+{
+ return runtime·sysvicall6(libc·open, 3, (uintptr)path, (uintptr)oflag, (uintptr)mode);
+}
+
+int32
+runtime·pthread_attr_destroy(PthreadAttr* attr)
+{
+ return runtime·sysvicall6(libc·pthread_attr_destroy, 1, (uintptr)attr);
+}
+
+int32
+runtime·pthread_attr_getstack(PthreadAttr* attr, void** addr, uint64* size)
+{
+ return runtime·sysvicall6(libc·pthread_attr_getstack, 3, (uintptr)attr, (uintptr)addr, (uintptr)size);
+}
+
+int32
+runtime·pthread_attr_init(PthreadAttr* attr)
+{
+ return runtime·sysvicall6(libc·pthread_attr_init, 1, (uintptr)attr);
+}
+
+int32
+runtime·pthread_attr_setdetachstate(PthreadAttr* attr, int32 state)
+{
+ return runtime·sysvicall6(libc·pthread_attr_setdetachstate, 2, (uintptr)attr, (uintptr)state);
+}
+
+int32
+runtime·pthread_attr_setstack(PthreadAttr* attr, void* addr, uint64 size)
+{
+ return runtime·sysvicall6(libc·pthread_attr_setstack, 3, (uintptr)attr, (uintptr)addr, (uintptr)size);
+}
+
+int32
+runtime·pthread_create(Pthread* thread, PthreadAttr* attr, void(*fn)(void), void *arg)
+{
+ return runtime·sysvicall6(libc·pthread_create, 4, (uintptr)thread, (uintptr)attr, (uintptr)fn, (uintptr)arg);
+}
+
+/* int32 */ void
+runtime·raise(int32 sig)
+{
+ runtime·sysvicall6(libc·raise, 1, (uintptr)sig);
+}
+
+int32
+runtime·read(int32 fd, void* buf, int32 nbyte)
+{
+ return runtime·sysvicall6(libc·read, 3, (uintptr)fd, (uintptr)buf, (uintptr)nbyte);
+}
+
+#pragma textflag NOSPLIT
+int32
+runtime·sem_init(SemT* sem, int32 pshared, uint32 value)
+{
+ return runtime·sysvicall6(libc·sem_init, 3, (uintptr)sem, (uintptr)pshared, (uintptr)value);
+}
+
+#pragma textflag NOSPLIT
+int32
+runtime·sem_post(SemT* sem)
+{
+ return runtime·sysvicall6(libc·sem_post, 1, (uintptr)sem);
+}
+
+#pragma textflag NOSPLIT
+int32
+runtime·sem_reltimedwait_np(SemT* sem, Timespec* timeout)
+{
+ return runtime·sysvicall6(libc·sem_reltimedwait_np, 2, (uintptr)sem, (uintptr)timeout);
+}
+
+#pragma textflag NOSPLIT
+int32
+runtime·sem_wait(SemT* sem)
+{
+ return runtime·sysvicall6(libc·sem_wait, 1, (uintptr)sem);
+}
+
+/* int32 */ void
+runtime·setitimer(int32 which, Itimerval* value, Itimerval* ovalue)
+{
+ runtime·sysvicall6(libc·setitimer, 3, (uintptr)which, (uintptr)value, (uintptr)ovalue);
+}
+
+/* int32 */ void
+runtime·sigaction(int32 sig, struct Sigaction* act, struct Sigaction* oact)
+{
+ runtime·sysvicall6(libc·sigaction, 3, (uintptr)sig, (uintptr)act, (uintptr)oact);
+}
+
+/* int32 */ void
+runtime·sigaltstack(Sigaltstack* ss, Sigaltstack* oss)
+{
+ runtime·sysvicall6(libc·sigaltstack, 2, (uintptr)ss, (uintptr)oss);
+}
+
+/* int32 */ void
+runtime·sigprocmask(int32 how, Sigset* set, Sigset* oset)
+{
+ runtime·sysvicall6(libc·sigprocmask, 3, (uintptr)how, (uintptr)set, (uintptr)oset);
+}
+
+int64
+runtime·sysconf(int32 name)
+{
+ return runtime·sysvicall6(libc·sysconf, 1, (uintptr)name);
+}
+
+void
+runtime·usleep(uint32 us)
+{
+ runtime·sysvicall6(libc·usleep, 1, (uintptr)us);
+}
+
+int32
+runtime·write(uintptr fd, void* buf, int32 nbyte)
+{
+ return runtime·sysvicall6(libc·write, 3, (uintptr)fd, (uintptr)buf, (uintptr)nbyte);
+}
+
+void
+runtime·osyield(void)
+{
+ runtime·sysvicall6(libc·sched_yield, 0);
+}
diff --git a/src/pkg/runtime/os_solaris.h b/src/pkg/runtime/os_solaris.h
new file mode 100644
index 000000000..f3fae5da2
--- /dev/null
+++ b/src/pkg/runtime/os_solaris.h
@@ -0,0 +1,51 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#define SS_DISABLE 2
+
+#define SIG_BLOCK 1
+#define SIG_UNBLOCK 2
+#define SIG_SETMASK 3
+
+typedef uintptr kevent_udata;
+
+struct sigaction;
+
+void runtime·sigpanic(void);
+
+void runtime·setitimer(int32, Itimerval*, Itimerval*);
+void runtime·sigaction(int32, struct Sigaction*, struct Sigaction*);
+void runtime·sigaltstack(Sigaltstack*, Sigaltstack*);
+void runtime·sigprocmask(int32, Sigset*, Sigset*);
+void runtime·unblocksignals(void);
+int32 runtime·sysctl(uint32*, uint32, byte*, uintptr*, byte*, uintptr);
+
+#define NSIG 73 /* number of signals in runtime·SigTab array */
+#define SI_USER 0
+
+void runtime·raisesigpipe(void);
+void runtime·setsig(int32, void(*)(int32, Siginfo*, void*, G*), bool);
+void runtime·sighandler(int32 sig, Siginfo *info, void *context, G *gp);
+void runtime·sigpanic(void);
+
+#define _UC_SIGMASK 0x01
+#define _UC_CPU 0x04
+
+#define RLIMIT_AS 10
+typedef struct Rlimit Rlimit;
+struct Rlimit {
+ int64 rlim_cur;
+ int64 rlim_max;
+};
+int32 runtime·getrlimit(int32, Rlimit*);
+
+// Call a library function with SysV conventions,
+// and switch to os stack during the call.
+#pragma varargck countpos runtime·sysvicall6 2
+#pragma varargck type runtime·sysvicall6 uintptr
+#pragma varargck type runtime·sysvicall6 int32
+void runtime·asmsysvicall6(void *c);
+uintptr runtime·sysvicall6(uintptr fn, int32 count, ...);
+
+void runtime·miniterrno(void *fn);
diff --git a/src/pkg/runtime/os_windows.c b/src/pkg/runtime/os_windows.c
index c3e296aa6..0dd44ed1b 100644
--- a/src/pkg/runtime/os_windows.c
+++ b/src/pkg/runtime/os_windows.c
@@ -8,6 +8,7 @@
#include "os_GOOS.h"
#include "../../cmd/ld/textflag.h"
+#pragma dynimport runtime·AddVectoredExceptionHandler AddVectoredExceptionHandler "kernel32.dll"
#pragma dynimport runtime·CloseHandle CloseHandle "kernel32.dll"
#pragma dynimport runtime·CreateEvent CreateEventA "kernel32.dll"
#pragma dynimport runtime·CreateThread CreateThread "kernel32.dll"
@@ -26,20 +27,20 @@
#pragma dynimport runtime·GetThreadContext GetThreadContext "kernel32.dll"
#pragma dynimport runtime·LoadLibrary LoadLibraryW "kernel32.dll"
#pragma dynimport runtime·LoadLibraryA LoadLibraryA "kernel32.dll"
+#pragma dynimport runtime·NtWaitForSingleObject NtWaitForSingleObject "ntdll.dll"
#pragma dynimport runtime·ResumeThread ResumeThread "kernel32.dll"
#pragma dynimport runtime·SetConsoleCtrlHandler SetConsoleCtrlHandler "kernel32.dll"
#pragma dynimport runtime·SetEvent SetEvent "kernel32.dll"
+#pragma dynimport runtime·SetProcessPriorityBoost SetProcessPriorityBoost "kernel32.dll"
#pragma dynimport runtime·SetThreadPriority SetThreadPriority "kernel32.dll"
#pragma dynimport runtime·SetWaitableTimer SetWaitableTimer "kernel32.dll"
#pragma dynimport runtime·Sleep Sleep "kernel32.dll"
#pragma dynimport runtime·SuspendThread SuspendThread "kernel32.dll"
-#pragma dynimport runtime·timeBeginPeriod timeBeginPeriod "winmm.dll"
#pragma dynimport runtime·WaitForSingleObject WaitForSingleObject "kernel32.dll"
#pragma dynimport runtime·WriteFile WriteFile "kernel32.dll"
-#pragma dynimport runtime·NtWaitForSingleObject NtWaitForSingleObject "ntdll.dll"
-
-extern void *runtime·NtWaitForSingleObject;
+#pragma dynimport runtime·timeBeginPeriod timeBeginPeriod "winmm.dll"
+extern void *runtime·AddVectoredExceptionHandler;
extern void *runtime·CloseHandle;
extern void *runtime·CreateEvent;
extern void *runtime·CreateThread;
@@ -58,19 +59,25 @@ extern void *runtime·GetSystemTimeAsFileTime;
extern void *runtime·GetThreadContext;
extern void *runtime·LoadLibrary;
extern void *runtime·LoadLibraryA;
+extern void *runtime·NtWaitForSingleObject;
extern void *runtime·ResumeThread;
extern void *runtime·SetConsoleCtrlHandler;
extern void *runtime·SetEvent;
+extern void *runtime·SetProcessPriorityBoost;
extern void *runtime·SetThreadPriority;
extern void *runtime·SetWaitableTimer;
extern void *runtime·Sleep;
extern void *runtime·SuspendThread;
-extern void *runtime·timeBeginPeriod;
extern void *runtime·WaitForSingleObject;
extern void *runtime·WriteFile;
+extern void *runtime·timeBeginPeriod;
void *runtime·GetQueuedCompletionStatusEx;
+extern uintptr runtime·externalthreadhandlerp;
+void runtime·externalthreadhandler(void);
+void runtime·sigtramp(void);
+
static int32
getproccount(void)
{
@@ -84,25 +91,22 @@ void
runtime·osinit(void)
{
void *kernel32;
- void *SetProcessPriorityBoost;
- // -1 = current process, -2 = current thread
- runtime·stdcall(runtime·DuplicateHandle, 7,
- (uintptr)-1, (uintptr)-2, (uintptr)-1, &m->thread,
- (uintptr)0, (uintptr)0, (uintptr)DUPLICATE_SAME_ACCESS);
+ runtime·externalthreadhandlerp = (uintptr)runtime·externalthreadhandler;
+
+ runtime·stdcall(runtime·AddVectoredExceptionHandler, 2, (uintptr)1, (uintptr)runtime·sigtramp);
runtime·stdcall(runtime·SetConsoleCtrlHandler, 2, runtime·ctrlhandler, (uintptr)1);
runtime·stdcall(runtime·timeBeginPeriod, 1, (uintptr)1);
runtime·ncpu = getproccount();
+
+ // Windows dynamic priority boosting assumes that a process has different types
+ // of dedicated threads -- GUI, IO, computational, etc. Go processes use
+ // equivalent threads that all do a mix of GUI, IO, computations, etc.
+ // In such context dynamic priority boosting does nothing but harm, so we turn it off.
+ runtime·stdcall(runtime·SetProcessPriorityBoost, 2, (uintptr)-1, (uintptr)1);
kernel32 = runtime·stdcall(runtime·LoadLibraryA, 1, "kernel32.dll");
if(kernel32 != nil) {
- // Windows dynamic priority boosting assumes that a process has different types
- // of dedicated threads -- GUI, IO, computational, etc. Go processes use
- // equivalent threads that all do a mix of GUI, IO, computations, etc.
- // In such context dynamic priority boosting does nothing but harm, so we turn it off.
- SetProcessPriorityBoost = runtime·stdcall(runtime·GetProcAddress, 2, kernel32, "SetProcessPriorityBoost");
- if(SetProcessPriorityBoost != nil) // supported since Windows XP
- runtime·stdcall(SetProcessPriorityBoost, 2, (uintptr)-1, (uintptr)1);
runtime·GetQueuedCompletionStatusEx = runtime·stdcall(runtime·GetProcAddress, 2, kernel32, "GetQueuedCompletionStatusEx");
}
}
@@ -162,7 +166,7 @@ runtime·exit(int32 code)
}
int32
-runtime·write(int32 fd, void *buf, int32 n)
+runtime·write(uintptr fd, void *buf, int32 n)
{
void *handle;
uint32 written;
@@ -176,7 +180,9 @@ runtime·write(int32 fd, void *buf, int32 n)
handle = runtime·stdcall(runtime·GetStdHandle, 1, (uintptr)-12);
break;
default:
- return -1;
+ // assume fd is real windows handle.
+ handle = (void*)fd;
+ break;
}
runtime·stdcall(runtime·WriteFile, 5, handle, buf, (uintptr)n, &written, (uintptr)0);
return written;
@@ -229,7 +235,6 @@ runtime·newosproc(M *mp, void *stk)
runtime·printf("runtime: failed to create new OS thread (have %d already; errno=%d)\n", runtime·mcount(), runtime·getlasterror());
runtime·throw("runtime.newosproc");
}
- runtime·atomicstorep(&mp->thread, thandle);
}
// Called to initialize a new m (including the bootstrap m).
@@ -245,14 +250,19 @@ runtime·mpreinit(M *mp)
void
runtime·minit(void)
{
- runtime·install_exception_handler();
+ void *thandle;
+
+ // -1 = current process, -2 = current thread
+ runtime·stdcall(runtime·DuplicateHandle, 7,
+ (uintptr)-1, (uintptr)-2, (uintptr)-1, &thandle,
+ (uintptr)0, (uintptr)0, (uintptr)DUPLICATE_SAME_ACCESS);
+ runtime·atomicstorep(&m->thread, thandle);
}
// Called from dropm to undo the effect of an minit.
void
runtime·unminit(void)
{
- runtime·remove_exception_handler();
}
#pragma textflag NOSPLIT
@@ -285,11 +295,20 @@ time·now(int64 sec, int32 usec)
void *
runtime·stdcall(void *fn, int32 count, ...)
{
- m->wincall.fn = fn;
- m->wincall.n = count;
- m->wincall.args = (uintptr*)&count + 1;
- runtime·asmcgocall(runtime·asmstdcall, &m->wincall);
- return (void*)m->wincall.r1;
+ m->libcall.fn = fn;
+ m->libcall.n = count;
+ m->libcall.args = (uintptr*)&count + 1;
+ if(m->profilehz != 0) {
+ // leave pc/sp for cpu profiler
+ m->libcallg = g;
+ m->libcallpc = (uintptr)runtime·getcallerpc(&fn);
+ // sp must be the last, because once async cpu profiler finds
+ // all three values to be non-zero, it will use them
+ m->libcallsp = (uintptr)runtime·getcallersp(&fn);
+ }
+ runtime·asmcgocall(runtime·asmstdcall, &m->libcall);
+ m->libcallsp = 0;
+ return (void*)m->libcall.r1;
}
extern void runtime·usleep1(uint32);
@@ -329,9 +348,12 @@ runtime·issigpanic(uint32 code)
void
runtime·sigpanic(void)
{
+ if(!runtime·canpanic(g))
+ runtime·throw("unexpected signal during runtime execution");
+
switch(g->sig) {
case EXCEPTION_ACCESS_VIOLATION:
- if(g->sigcode1 < 0x1000) {
+ if(g->sigcode1 < 0x1000 || g->paniconfault) {
if(g->sigpc == 0)
runtime·panicstring("call of nil func value");
runtime·panicstring("invalid memory address or nil pointer dereference");
@@ -352,8 +374,6 @@ runtime·sigpanic(void)
runtime·throw("fault");
}
-extern void *runtime·sigtramp;
-
void
runtime·initsig(void)
{
@@ -383,7 +403,7 @@ runtime·ctrlhandler1(uint32 type)
return 0;
}
-extern void runtime·dosigprof(Context *r, G *gp);
+extern void runtime·dosigprof(Context *r, G *gp, M *mp);
extern void runtime·profileloop(void);
static void *profiletimer;
@@ -402,13 +422,11 @@ profilem(M *mp)
tls = runtime·tls0;
gp = *(G**)tls;
- if(gp != nil && gp != mp->g0 && gp->status != Gsyscall) {
- // align Context to 16 bytes
- r = (Context*)((uintptr)(&rbuf[15]) & ~15);
- r->ContextFlags = CONTEXT_CONTROL;
- runtime·stdcall(runtime·GetThreadContext, 2, mp->thread, r);
- runtime·dosigprof(r, gp);
- }
+ // align Context to 16 bytes
+ r = (Context*)((uintptr)(&rbuf[15]) & ~15);
+ r->ContextFlags = CONTEXT_CONTROL;
+ runtime·stdcall(runtime·GetThreadContext, 2, mp->thread, r);
+ runtime·dosigprof(r, gp, mp);
}
void
@@ -425,10 +443,13 @@ runtime·profileloop1(void)
allm = runtime·atomicloadp(&runtime·allm);
for(mp = allm; mp != nil; mp = mp->alllink) {
thread = runtime·atomicloadp(&mp->thread);
- if(thread == nil)
+ // Do not profile threads blocked on Notes,
+ // this includes idle worker threads,
+ // idle timer thread, idle heap scavenger, etc.
+ if(thread == nil || mp->profilehz == 0 || mp->blocked)
continue;
runtime·stdcall(runtime·SuspendThread, 1, thread);
- if(mp->profilehz != 0)
+ if(mp->profilehz != 0 && !mp->blocked)
profilem(mp);
runtime·stdcall(runtime·ResumeThread, 1, thread);
}
diff --git a/src/pkg/runtime/os_windows_386.c b/src/pkg/runtime/os_windows_386.c
index c377e5b6c..c36a00114 100644
--- a/src/pkg/runtime/os_windows_386.c
+++ b/src/pkg/runtime/os_windows_386.c
@@ -24,16 +24,47 @@ runtime·dumpregs(Context *r)
runtime·printf("gs %x\n", r->SegGs);
}
+#define DBG_PRINTEXCEPTION_C 0x40010006
+
+// Called by sigtramp from Windows VEH handler.
+// Return value signals whether the exception has been handled (-1)
+// or should be made available to other handlers in the chain (0).
uint32
runtime·sighandler(ExceptionRecord *info, Context *r, G *gp)
{
bool crash;
uintptr *sp;
+ extern byte text[], etext[];
+
+ if(info->ExceptionCode == DBG_PRINTEXCEPTION_C) {
+ // This exception is intended to be caught by debuggers.
+ // There is a not-very-informational message like
+ // "Invalid parameter passed to C runtime function"
+ // sitting at info->ExceptionInformation[0] (a wchar_t*),
+ // with length info->ExceptionInformation[1].
+ // The default behavior is to ignore this exception,
+ // but somehow returning 0 here (meaning keep going)
+ // makes the program crash instead. Maybe Windows has no
+ // other handler registered? In any event, ignore it.
+ return -1;
+ }
+
+ // Only handle exception if executing instructions in Go binary
+ // (not Windows library code).
+ if(r->Eip < (uint32)text || (uint32)etext < r->Eip)
+ return 0;
switch(info->ExceptionCode) {
case EXCEPTION_BREAKPOINT:
- r->Eip--; // because 8l generates 2 bytes for INT3
- return 1;
+ // It is unclear whether this is needed, unclear whether it
+ // would work, and unclear how to test it. Leave out for now.
+ // This only handles breakpoint instructions written in the
+ // assembly sources, not breakpoints set by a debugger, and
+ // there are very few of the former.
+ //
+ // r->Eip--; // because 8l generates 2 bytes for INT3
+ // return 0;
+ break;
}
if(gp != nil && runtime·issigpanic(info->ExceptionCode)) {
@@ -58,15 +89,15 @@ runtime·sighandler(ExceptionRecord *info, Context *r, G *gp)
r->Esp = (uintptr)sp;
}
r->Eip = (uintptr)runtime·sigpanic;
- return 0;
+ return -1;
}
if(runtime·panicking) // traceback already printed
runtime·exit(2);
runtime·panicking = 1;
- runtime·printf("Exception %x %p %p\n", info->ExceptionCode,
- info->ExceptionInformation[0], info->ExceptionInformation[1]);
+ runtime·printf("Exception %x %p %p %p\n", info->ExceptionCode,
+ info->ExceptionInformation[0], info->ExceptionInformation[1], r->Eip);
runtime·printf("PC=%x\n", r->Eip);
if(m->lockedg != nil && m->ncgo > 0 && gp == m->g0) {
@@ -84,9 +115,8 @@ runtime·sighandler(ExceptionRecord *info, Context *r, G *gp)
if(crash)
runtime·crash();
-
runtime·exit(2);
- return 0;
+ return -1; // not reached
}
void
@@ -102,7 +132,7 @@ runtime·sigdisable(uint32 sig)
}
void
-runtime·dosigprof(Context *r, G *gp)
+runtime·dosigprof(Context *r, G *gp, M *mp)
{
- runtime·sigprof((uint8*)r->Eip, (uint8*)r->Esp, nil, gp);
+ runtime·sigprof((uint8*)r->Eip, (uint8*)r->Esp, nil, gp, mp);
}
diff --git a/src/pkg/runtime/os_windows_amd64.c b/src/pkg/runtime/os_windows_amd64.c
index 97c48feb0..7fb973cde 100644
--- a/src/pkg/runtime/os_windows_amd64.c
+++ b/src/pkg/runtime/os_windows_amd64.c
@@ -32,15 +32,44 @@ runtime·dumpregs(Context *r)
runtime·printf("gs %X\n", (uint64)r->SegGs);
}
+#define DBG_PRINTEXCEPTION_C 0x40010006
+
+// Called by sigtramp from Windows VEH handler.
+// Return value signals whether the exception has been handled (-1)
+// or should be made available to other handlers in the chain (0).
uint32
runtime·sighandler(ExceptionRecord *info, Context *r, G *gp)
{
bool crash;
uintptr *sp;
+ extern byte text[], etext[];
+
+ if(info->ExceptionCode == DBG_PRINTEXCEPTION_C) {
+ // This exception is intended to be caught by debuggers.
+ // There is a not-very-informational message like
+ // "Invalid parameter passed to C runtime function"
+ // sitting at info->ExceptionInformation[0] (a wchar_t*),
+ // with length info->ExceptionInformation[1].
+ // The default behavior is to ignore this exception,
+ // but somehow returning 0 here (meaning keep going)
+ // makes the program crash instead. Maybe Windows has no
+ // other handler registered? In any event, ignore it.
+ return -1;
+ }
+
+ // Only handle exception if executing instructions in Go binary
+ // (not Windows library code).
+ if(r->Rip < (uint64)text || (uint64)etext < r->Rip)
+ return 0;
switch(info->ExceptionCode) {
case EXCEPTION_BREAKPOINT:
- return 1;
+ // It is unclear whether this is needed, unclear whether it
+ // would work, and unclear how to test it. Leave out for now.
+ // This only handles breakpoint instructions written in the
+ // assembly sources, not breakpoints set by a debugger, and
+ // there are very few of the former.
+ break;
}
if(gp != nil && runtime·issigpanic(info->ExceptionCode)) {
@@ -65,15 +94,16 @@ runtime·sighandler(ExceptionRecord *info, Context *r, G *gp)
r->Rsp = (uintptr)sp;
}
r->Rip = (uintptr)runtime·sigpanic;
- return 0;
+ return -1;
}
if(runtime·panicking) // traceback already printed
runtime·exit(2);
runtime·panicking = 1;
- runtime·printf("Exception %x %p %p\n", info->ExceptionCode,
- info->ExceptionInformation[0], info->ExceptionInformation[1]);
+ runtime·printf("Exception %x %p %p %p\n", info->ExceptionCode,
+ info->ExceptionInformation[0], info->ExceptionInformation[1], r->Rip);
+
runtime·printf("PC=%X\n", r->Rip);
if(m->lockedg != nil && m->ncgo > 0 && gp == m->g0) {
@@ -92,7 +122,7 @@ runtime·sighandler(ExceptionRecord *info, Context *r, G *gp)
runtime·crash();
runtime·exit(2);
- return 0;
+ return -1; // not reached
}
void
@@ -108,7 +138,7 @@ runtime·sigdisable(uint32 sig)
}
void
-runtime·dosigprof(Context *r, G *gp)
+runtime·dosigprof(Context *r, G *gp, M *mp)
{
- runtime·sigprof((uint8*)r->Rip, (uint8*)r->Rsp, nil, gp);
+ runtime·sigprof((uint8*)r->Rip, (uint8*)r->Rsp, nil, gp, mp);
}
diff --git a/src/pkg/runtime/panic.c b/src/pkg/runtime/panic.c
index 8227a444d..f577b37b5 100644
--- a/src/pkg/runtime/panic.c
+++ b/src/pkg/runtime/panic.c
@@ -13,108 +13,63 @@
uint32 runtime·panicking;
static Lock paniclk;
-enum
-{
- DeferChunkSize = 2048
-};
+// Each P holds pool for defers with arg sizes 8, 24, 40, 56 and 72 bytes.
+// Memory block is 40 (24 for 32 bits) bytes larger due to Defer header.
+// This maps exactly to malloc size classes.
+
+// defer size class for arg size sz
+#define DEFERCLASS(sz) (((sz)+7)>>4)
+// total size of memory block for defer with arg size sz
+#define TOTALSIZE(sz) (sizeof(Defer) - sizeof(((Defer*)nil)->args) + ROUND(sz, sizeof(uintptr)))
-// Allocate a Defer, usually as part of the larger frame of deferred functions.
-// Each defer must be released with both popdefer and freedefer.
+// Allocate a Defer, usually using per-P pool.
+// Each defer must be released with freedefer.
static Defer*
newdefer(int32 siz)
{
- int32 total;
- DeferChunk *c;
+ int32 total, sc;
Defer *d;
-
- c = g->dchunk;
- total = sizeof(*d) + ROUND(siz, sizeof(uintptr)) - sizeof(d->args);
- if(c == nil || total > DeferChunkSize - c->off) {
- if(total > DeferChunkSize / 2) {
- // Not worth putting in any chunk.
- // Allocate a separate block.
- d = runtime·malloc(total);
- d->siz = siz;
- d->special = 1;
- d->free = 1;
- d->link = g->defer;
- g->defer = d;
- return d;
- }
-
- // Cannot fit in current chunk.
- // Switch to next chunk, allocating if necessary.
- c = g->dchunknext;
- if(c == nil)
- c = runtime·malloc(DeferChunkSize);
- c->prev = g->dchunk;
- c->off = sizeof(*c);
- g->dchunk = c;
- g->dchunknext = nil;
+ P *p;
+
+ d = nil;
+ sc = DEFERCLASS(siz);
+ if(sc < nelem(p->deferpool)) {
+ p = m->p;
+ d = p->deferpool[sc];
+ if(d)
+ p->deferpool[sc] = d->link;
+ }
+ if(d == nil) {
+ // deferpool is empty or just a big defer
+ total = TOTALSIZE(siz);
+ d = runtime·malloc(total);
}
-
- d = (Defer*)((byte*)c + c->off);
- c->off += total;
d->siz = siz;
d->special = 0;
- d->free = 0;
d->link = g->defer;
g->defer = d;
- return d;
-}
-
-// Pop the current defer from the defer stack.
-// Its contents are still valid until the goroutine begins executing again.
-// In particular it is safe to call reflect.call(d->fn, d->argp, d->siz) after
-// popdefer returns.
-static void
-popdefer(void)
-{
- Defer *d;
- DeferChunk *c;
- int32 total;
-
- d = g->defer;
- if(d == nil)
- runtime·throw("runtime: popdefer nil");
- g->defer = d->link;
- if(d->special) {
- // Nothing else to do.
- return;
- }
- total = sizeof(*d) + ROUND(d->siz, sizeof(uintptr)) - sizeof(d->args);
- c = g->dchunk;
- if(c == nil || (byte*)d+total != (byte*)c+c->off)
- runtime·throw("runtime: popdefer phase error");
- c->off -= total;
- if(c->off == sizeof(*c)) {
- // Chunk now empty, so pop from stack.
- // Save in dchunknext both to help with pingponging between frames
- // and to make sure d is still valid on return.
- if(g->dchunknext != nil)
- runtime·free(g->dchunknext);
- g->dchunknext = c;
- g->dchunk = c->prev;
- }
+ return d;
}
// Free the given defer.
-// For defers in the per-goroutine chunk this just clears the saved arguments.
-// For large defers allocated on the heap, this frees them.
// The defer cannot be used after this call.
static void
freedefer(Defer *d)
{
- int32 total;
+ int32 sc;
+ P *p;
- if(d->special) {
- if(d->free)
- runtime·free(d);
- } else {
- // Wipe out any possible pointers in argp/pc/fn/args.
- total = sizeof(*d) + ROUND(d->siz, sizeof(uintptr)) - sizeof(d->args);
- runtime·memclr((byte*)d, total);
- }
+ if(d->special)
+ return;
+ sc = DEFERCLASS(d->siz);
+ if(sc < nelem(p->deferpool)) {
+ p = m->p;
+ d->link = p->deferpool[sc];
+ p->deferpool[sc] = d;
+ // No need to wipe out pointers in argp/pc/fn/args,
+ // because we empty the pool before GC.
+ } else
+ runtime·free(d);
}
// Create a new deferred function fn with siz bytes of arguments.
@@ -157,14 +112,12 @@ runtime·deferproc(int32 siz, FuncVal *fn, ...)
// is called again and again until there are no more deferred functions.
// Cannot split the stack because we reuse the caller's frame to
// call the deferred function.
-//
-// The ... in the prototype keeps the compiler from declaring
-// an argument frame size. deferreturn is a very special function,
-// and if the runtime ever asks for its frame size, that means
-// the traceback routines are probably broken.
+
+// The single argument isn't actually used - it just has its address
+// taken so it can be matched against pending defers.
#pragma textflag NOSPLIT
void
-runtime·deferreturn(uintptr arg0, ...)
+runtime·deferreturn(uintptr arg0)
{
Defer *d;
byte *argp;
@@ -184,7 +137,7 @@ runtime·deferreturn(uintptr arg0, ...)
m->locks++;
runtime·memmove(argp, d->args, d->siz);
fn = d->fn;
- popdefer();
+ g->defer = d->link;
freedefer(d);
m->locks--;
if(m->locks == 0 && g->preempt)
@@ -192,6 +145,37 @@ runtime·deferreturn(uintptr arg0, ...)
runtime·jmpdefer(fn, argp);
}
+// Ensure that defer arg sizes that map to the same defer size class
+// also map to the same malloc size class.
+void
+runtime·testdefersizes(void)
+{
+ P *p;
+ int32 i, siz, defersc, mallocsc;
+ int32 map[nelem(p->deferpool)];
+
+ for(i=0; i<nelem(p->deferpool); i++)
+ map[i] = -1;
+ for(i=0;; i++) {
+ defersc = DEFERCLASS(i);
+ if(defersc >= nelem(p->deferpool))
+ break;
+ siz = TOTALSIZE(i);
+ mallocsc = runtime·SizeToClass(siz);
+ siz = runtime·class_to_size[mallocsc];
+ // runtime·printf("defer class %d: arg size %d, block size %d(%d)\n", defersc, i, siz, mallocsc);
+ if(map[defersc] < 0) {
+ map[defersc] = mallocsc;
+ continue;
+ }
+ if(map[defersc] != mallocsc) {
+ runtime·printf("bad defer size class: i=%d siz=%d mallocsc=%d/%d\n",
+ i, siz, map[defersc], mallocsc);
+ runtime·throw("bad defer size class");
+ }
+ }
+}
+
// Run all deferred functions for the current goroutine.
static void
rundefer(void)
@@ -199,8 +183,8 @@ rundefer(void)
Defer *d;
while((d = g->defer) != nil) {
- popdefer();
- reflect·call(d->fn, (byte*)d->args, d->siz);
+ g->defer = d->link;
+ reflect·call(d->fn, (byte*)d->args, d->siz, d->siz);
freedefer(d);
}
}
@@ -221,37 +205,65 @@ printpanics(Panic *p)
}
static void recovery(G*);
+static void abortpanic(Panic*);
+static FuncVal abortpanicV = { (void(*)(void))abortpanic };
// The implementation of the predeclared function panic.
void
runtime·panic(Eface e)
{
- Defer *d;
- Panic *p;
+ Defer *d, dabort;
+ Panic p;
void *pc, *argp;
-
- p = runtime·mal(sizeof *p);
- p->arg = e;
- p->link = g->panic;
- p->stackbase = g->stackbase;
- g->panic = p;
+
+ runtime·memclr((byte*)&p, sizeof p);
+ p.arg = e;
+ p.link = g->panic;
+ p.stackbase = g->stackbase;
+ g->panic = &p;
+
+ dabort.fn = &abortpanicV;
+ dabort.siz = sizeof(&p);
+ dabort.args[0] = &p;
+ dabort.argp = NoArgs;
+ dabort.special = true;
for(;;) {
d = g->defer;
if(d == nil)
break;
// take defer off list in case of recursive panic
- popdefer();
- g->ispanic = true; // rock for newstack, where reflect.newstackcall ends up
+ g->defer = d->link;
+ g->ispanic = true; // rock for runtime·newstack, where runtime·newstackcall ends up
argp = d->argp;
pc = d->pc;
+
+ // The deferred function may cause another panic,
+ // so newstackcall may not return. Set up a defer
+ // to mark this panic aborted if that happens.
+ dabort.link = g->defer;
+ g->defer = &dabort;
+ p.defer = d;
+
runtime·newstackcall(d->fn, (byte*)d->args, d->siz);
+
+ // Newstackcall did not panic. Remove dabort.
+ if(g->defer != &dabort)
+ runtime·throw("bad defer entry in panic");
+ g->defer = dabort.link;
+
freedefer(d);
- if(p->recovered) {
- g->panic = p->link;
+ if(p.recovered) {
+ g->panic = p.link;
+ // Aborted panics are marked but remain on the g->panic list.
+ // Recovery will unwind the stack frames containing their Panic structs.
+ // Remove them from the list and free the associated defers.
+ while(g->panic && g->panic->aborted) {
+ freedefer(g->panic->defer);
+ g->panic = g->panic->link;
+ }
if(g->panic == nil) // must be done with signal
g->sig = 0;
- runtime·free(p);
// Pass information about recovering frame to recovery.
g->sigcode0 = (uintptr)argp;
g->sigcode1 = (uintptr)pc;
@@ -263,7 +275,14 @@ runtime·panic(Eface e)
// ran out of deferred calls - old-school panic now
runtime·startpanic();
printpanics(g->panic);
- runtime·dopanic(0);
+ runtime·dopanic(0); // should not return
+ runtime·exit(1); // not reached
+}
+
+static void
+abortpanic(Panic *p)
+{
+ p->aborted = true;
}
// Unwind the stack after a deferred function calls recover
@@ -320,10 +339,7 @@ runtime·unwindstack(G *gp, byte *sp)
gp->stackbase = top->stackbase;
gp->stackguard = top->stackguard;
gp->stackguard0 = gp->stackguard;
- if(top->free != 0) {
- gp->stacksize -= top->free;
- runtime·stackfree(stk, top->free);
- }
+ runtime·stackfree(gp, stk, top);
}
if(sp != nil && (sp < (byte*)gp->stackguard - StackGuard || (byte*)gp->stackbase < sp)) {
@@ -337,10 +353,11 @@ runtime·unwindstack(G *gp, byte *sp)
// find the stack segment of its caller.
#pragma textflag NOSPLIT
void
-runtime·recover(byte *argp, Eface ret)
+runtime·recover(byte *argp, GoOutput retbase, ...)
{
Panic *p;
Stktop *top;
+ Eface *ret;
// Must be an unrecovered panic in progress.
// Must be on a stack segment created for a deferred call during a panic.
@@ -351,16 +368,16 @@ runtime·recover(byte *argp, Eface ret)
// do not count as official calls to adjust what we consider the top frame
// while they are active on the stack. The linker emits adjustments of
// g->panicwrap in the prologue and epilogue of functions marked as wrappers.
+ ret = (Eface*)&retbase;
top = (Stktop*)g->stackbase;
p = g->panic;
if(p != nil && !p->recovered && top->panic && argp == (byte*)top - top->argsize - g->panicwrap) {
p->recovered = 1;
- ret = p->arg;
+ *ret = p->arg;
} else {
- ret.type = nil;
- ret.data = nil;
+ ret->type = nil;
+ ret->data = nil;
}
- FLUSH(&ret);
}
void
@@ -371,18 +388,34 @@ runtime·startpanic(void)
m->mallocing = 1; // tell rest of panic not to try to malloc
} else if(m->mcache == nil) // can happen if called from signal handler or throw
m->mcache = runtime·allocmcache();
- if(m->dying) {
+ switch(m->dying) {
+ case 0:
+ m->dying = 1;
+ if(g != nil)
+ g->writebuf = nil;
+ runtime·xadd(&runtime·panicking, 1);
+ runtime·lock(&paniclk);
+ if(runtime·debug.schedtrace > 0 || runtime·debug.scheddetail > 0)
+ runtime·schedtrace(true);
+ runtime·freezetheworld();
+ return;
+ case 1:
+ // Something failed while panicing, probably the print of the
+ // argument to panic(). Just print a stack trace and exit.
+ m->dying = 2;
runtime·printf("panic during panic\n");
+ runtime·dopanic(0);
runtime·exit(3);
+ case 2:
+ // This is a genuine bug in the runtime, we couldn't even
+ // print the stack trace successfully.
+ m->dying = 3;
+ runtime·printf("stack trace unavailable\n");
+ runtime·exit(4);
+ default:
+ // Can't even print! Just exit.
+ runtime·exit(5);
}
- m->dying = 1;
- if(g != nil)
- g->writebuf = nil;
- runtime·xadd(&runtime·panicking, 1);
- runtime·lock(&paniclk);
- if(runtime·debug.schedtrace > 0 || runtime·debug.scheddetail > 0)
- runtime·schedtrace(true);
- runtime·freezetheworld();
}
void
@@ -453,6 +486,29 @@ runtime·throwinit(void)
runtime·throw("recursive call during initialization - linker skew");
}
+bool
+runtime·canpanic(G *gp)
+{
+ byte g;
+
+ USED(&g); // don't use global g, it points to gsignal
+
+ // Is it okay for gp to panic instead of crashing the program?
+ // Yes, as long as it is running Go code, not runtime code,
+ // and not stuck in a system call.
+ if(gp == nil || gp != m->curg)
+ return false;
+ if(m->locks-m->softfloat != 0 || m->mallocing != 0 || m->throwing != 0 || m->gcing != 0 || m->dying != 0)
+ return false;
+ if(gp->status != Grunning || gp->syscallsp != 0)
+ return false;
+#ifdef GOOS_windows
+ if(m->libcallsp != 0)
+ return false;
+#endif
+ return true;
+}
+
void
runtime·throw(int8 *s)
{
@@ -470,6 +526,16 @@ runtime·panicstring(int8 *s)
{
Eface err;
+ // m->softfloat is set during software floating point,
+ // which might cause a fault during a memory load.
+ // It increments m->locks to avoid preemption.
+ // If we're panicking, the software floating point frames
+ // will be unwound, so decrement m->locks as they would.
+ if(m->softfloat) {
+ m->locks--;
+ m->softfloat = 0;
+ }
+
if(m->mallocing) {
runtime·printf("panic: %s\n", s);
runtime·throw("panic during malloc");
@@ -478,6 +544,10 @@ runtime·panicstring(int8 *s)
runtime·printf("panic: %s\n", s);
runtime·throw("panic during gc");
}
+ if(m->locks) {
+ runtime·printf("panic: %s\n", s);
+ runtime·throw("panic holding locks");
+ }
runtime·newErrorCString(s, &err);
runtime·panic(err);
}
@@ -488,3 +558,9 @@ runtime·Goexit(void)
rundefer();
runtime·goexit();
}
+
+void
+runtime·panicdivide(void)
+{
+ runtime·panicstring("integer divide by zero");
+}
diff --git a/src/pkg/runtime/parfor.c b/src/pkg/runtime/parfor.c
index ceaac8bc9..4706e0a43 100644
--- a/src/pkg/runtime/parfor.c
+++ b/src/pkg/runtime/parfor.c
@@ -33,15 +33,6 @@ runtime·parforalloc(uint32 nthrmax)
return desc;
}
-// For testing from Go
-// func parforalloc2(nthrmax uint32) *ParFor
-void
-runtime·parforalloc2(uint32 nthrmax, ParFor *desc)
-{
- desc = runtime·parforalloc(nthrmax);
- FLUSH(&desc);
-}
-
void
runtime·parforsetup(ParFor *desc, uint32 nthr, uint32 n, void *ctx, bool wait, void (*body)(ParFor*, uint32))
{
@@ -75,14 +66,6 @@ runtime·parforsetup(ParFor *desc, uint32 nthr, uint32 n, void *ctx, bool wait,
}
}
-// For testing from Go
-// func parforsetup2(desc *ParFor, nthr, n uint32, ctx *byte, wait bool, body func(*ParFor, uint32))
-void
-runtime·parforsetup2(ParFor *desc, uint32 nthr, uint32 n, void *ctx, bool wait, void *body)
-{
- runtime·parforsetup(desc, nthr, n, ctx, wait, *(void(**)(ParFor*, uint32))body);
-}
-
void
runtime·parfordo(ParFor *desc)
{
@@ -207,13 +190,10 @@ exit:
me->nsleep = 0;
}
-// For testing from Go
-// func parforiters(desc *ParFor, tid uintptr) (uintptr, uintptr)
+// For testing from Go.
void
-runtime·parforiters(ParFor *desc, uintptr tid, uintptr start, uintptr end)
+runtime·parforiters(ParFor *desc, uintptr tid, uintptr *start, uintptr *end)
{
- start = (uint32)desc->thr[tid].pos;
- end = (uint32)(desc->thr[tid].pos>>32);
- FLUSH(&start);
- FLUSH(&end);
+ *start = (uint32)desc->thr[tid].pos;
+ *end = (uint32)(desc->thr[tid].pos>>32);
}
diff --git a/src/pkg/runtime/pprof/pprof.go b/src/pkg/runtime/pprof/pprof.go
index 3b8428519..26aa0b8be 100644
--- a/src/pkg/runtime/pprof/pprof.go
+++ b/src/pkg/runtime/pprof/pprof.go
@@ -20,7 +20,7 @@ import (
"text/tabwriter"
)
-// BUG(rsc): Profiles are incomplete and inaccuate on NetBSD, OpenBSD, and OS X.
+// BUG(rsc): Profiles are incomplete and inaccurate on NetBSD and OS X.
// See http://golang.org/issue/6047 for details.
// A Profile is a collection of stack traces showing the call sequences
diff --git a/src/pkg/runtime/pprof/pprof_test.go b/src/pkg/runtime/pprof/pprof_test.go
index eb76b93c4..aba538e75 100644
--- a/src/pkg/runtime/pprof/pprof_test.go
+++ b/src/pkg/runtime/pprof/pprof_test.go
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+// +build !nacl
+
package pprof_test
import (
@@ -138,7 +140,11 @@ func testCPUProfile(t *testing.T, need []string, f func()) {
t.Logf("no CPU profile samples collected")
ok = false
}
- min := total / uintptr(len(have)) / 3
+ // We'd like to check a reasonable minimum, like
+ // total / len(have) / smallconstant, but this test is
+ // pretty flaky (see bug 7095). So we'll just test to
+ // make sure we got at least one sample.
+ min := uintptr(1)
for i, name := range need {
if have[i] < min {
t.Logf("%s has %d samples out of %d, want at least %d, ideally %d", name, have[i], total, min, total/uintptr(len(have)))
@@ -189,9 +195,6 @@ func TestCPUProfileWithFork(t *testing.T) {
// If it did, it would see inconsistent state and would either record an incorrect stack
// or crash because the stack was malformed.
func TestGoroutineSwitch(t *testing.T) {
- if runtime.GOOS == "windows" {
- t.Skip("flaky test; see http://golang.org/issue/6417")
- }
// How much to try. These defaults take about 1 seconds
// on a 2012 MacBook Pro. The ones in short mode take
// about 0.1 seconds.
@@ -217,7 +220,7 @@ func TestGoroutineSwitch(t *testing.T) {
// exists to record a PC without a traceback. Those are okay.
if len(stk) == 2 {
f := runtime.FuncForPC(stk[1])
- if f != nil && f.Name() == "System" {
+ if f != nil && (f.Name() == "System" || f.Name() == "ExternalCode") {
return
}
}
@@ -264,9 +267,9 @@ func TestMathBigDivide(t *testing.T) {
// Operating systems that are expected to fail the tests. See issue 6047.
var badOS = map[string]bool{
- "darwin": true,
- "netbsd": true,
- "openbsd": true,
+ "darwin": true,
+ "netbsd": true,
+ "plan9": true,
}
func TestBlockProfile(t *testing.T) {
@@ -278,31 +281,31 @@ func TestBlockProfile(t *testing.T) {
tests := [...]TestCase{
{"chan recv", blockChanRecv, `
[0-9]+ [0-9]+ @ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+
-# 0x[0-9,a-f]+ runtime\.chanrecv1\+0x[0-9,a-f]+ .*/src/pkg/runtime/chan.c:[0-9]+
+# 0x[0-9,a-f]+ runtime\.chanrecv1\+0x[0-9,a-f]+ .*/src/pkg/runtime/chan.goc:[0-9]+
# 0x[0-9,a-f]+ runtime/pprof_test\.blockChanRecv\+0x[0-9,a-f]+ .*/src/pkg/runtime/pprof/pprof_test.go:[0-9]+
# 0x[0-9,a-f]+ runtime/pprof_test\.TestBlockProfile\+0x[0-9,a-f]+ .*/src/pkg/runtime/pprof/pprof_test.go:[0-9]+
`},
{"chan send", blockChanSend, `
[0-9]+ [0-9]+ @ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+
-# 0x[0-9,a-f]+ runtime\.chansend1\+0x[0-9,a-f]+ .*/src/pkg/runtime/chan.c:[0-9]+
+# 0x[0-9,a-f]+ runtime\.chansend1\+0x[0-9,a-f]+ .*/src/pkg/runtime/chan.goc:[0-9]+
# 0x[0-9,a-f]+ runtime/pprof_test\.blockChanSend\+0x[0-9,a-f]+ .*/src/pkg/runtime/pprof/pprof_test.go:[0-9]+
# 0x[0-9,a-f]+ runtime/pprof_test\.TestBlockProfile\+0x[0-9,a-f]+ .*/src/pkg/runtime/pprof/pprof_test.go:[0-9]+
`},
{"chan close", blockChanClose, `
[0-9]+ [0-9]+ @ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+
-# 0x[0-9,a-f]+ runtime\.chanrecv1\+0x[0-9,a-f]+ .*/src/pkg/runtime/chan.c:[0-9]+
+# 0x[0-9,a-f]+ runtime\.chanrecv1\+0x[0-9,a-f]+ .*/src/pkg/runtime/chan.goc:[0-9]+
# 0x[0-9,a-f]+ runtime/pprof_test\.blockChanClose\+0x[0-9,a-f]+ .*/src/pkg/runtime/pprof/pprof_test.go:[0-9]+
# 0x[0-9,a-f]+ runtime/pprof_test\.TestBlockProfile\+0x[0-9,a-f]+ .*/src/pkg/runtime/pprof/pprof_test.go:[0-9]+
`},
{"select recv async", blockSelectRecvAsync, `
[0-9]+ [0-9]+ @ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+
-# 0x[0-9,a-f]+ runtime\.selectgo\+0x[0-9,a-f]+ .*/src/pkg/runtime/chan.c:[0-9]+
+# 0x[0-9,a-f]+ runtime\.selectgo\+0x[0-9,a-f]+ .*/src/pkg/runtime/chan.goc:[0-9]+
# 0x[0-9,a-f]+ runtime/pprof_test\.blockSelectRecvAsync\+0x[0-9,a-f]+ .*/src/pkg/runtime/pprof/pprof_test.go:[0-9]+
# 0x[0-9,a-f]+ runtime/pprof_test\.TestBlockProfile\+0x[0-9,a-f]+ .*/src/pkg/runtime/pprof/pprof_test.go:[0-9]+
`},
{"select send sync", blockSelectSendSync, `
[0-9]+ [0-9]+ @ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+
-# 0x[0-9,a-f]+ runtime\.selectgo\+0x[0-9,a-f]+ .*/src/pkg/runtime/chan.c:[0-9]+
+# 0x[0-9,a-f]+ runtime\.selectgo\+0x[0-9,a-f]+ .*/src/pkg/runtime/chan.goc:[0-9]+
# 0x[0-9,a-f]+ runtime/pprof_test\.blockSelectSendSync\+0x[0-9,a-f]+ .*/src/pkg/runtime/pprof/pprof_test.go:[0-9]+
# 0x[0-9,a-f]+ runtime/pprof_test\.TestBlockProfile\+0x[0-9,a-f]+ .*/src/pkg/runtime/pprof/pprof_test.go:[0-9]+
`},
diff --git a/src/pkg/runtime/print.c b/src/pkg/runtime/print.c
index 8de3ae4fa..a04708fae 100644
--- a/src/pkg/runtime/print.c
+++ b/src/pkg/runtime/print.c
@@ -63,67 +63,85 @@ runtime·printf(int8 *s, ...)
vprintf(s, arg);
}
+#pragma textflag NOSPLIT
+int32
+runtime·snprintf(byte *buf, int32 n, int8 *s, ...)
+{
+ byte *arg;
+ int32 m;
+
+ arg = (byte*)(&s+1);
+ g->writebuf = buf;
+ g->writenbuf = n-1;
+ vprintf(s, arg);
+ *g->writebuf = '\0';
+ m = g->writebuf - buf;
+ g->writenbuf = 0;
+ g->writebuf = nil;
+ return m;
+}
+
// Very simple printf. Only for debugging prints.
// Do not add to this without checking with Rob.
static void
vprintf(int8 *s, byte *base)
{
int8 *p, *lp;
- uintptr arg, narg;
+ uintptr arg, siz;
byte *v;
//runtime·lock(&debuglock);
lp = p = s;
- arg = 0;
+ arg = (uintptr)base;
for(; *p; p++) {
if(*p != '%')
continue;
if(p > lp)
gwrite(lp, p-lp);
p++;
- narg = 0;
+ siz = 0;
switch(*p) {
case 't':
case 'c':
- narg = arg + 1;
+ siz = 1;
break;
case 'd': // 32-bit
case 'x':
arg = ROUND(arg, 4);
- narg = arg + 4;
+ siz = 4;
break;
case 'D': // 64-bit
case 'U':
case 'X':
case 'f':
- arg = ROUND(arg, sizeof(uintptr));
- narg = arg + 8;
+ arg = ROUND(arg, sizeof(uintreg));
+ siz = 8;
break;
case 'C':
- arg = ROUND(arg, sizeof(uintptr));
- narg = arg + 16;
+ arg = ROUND(arg, sizeof(uintreg));
+ siz = 16;
break;
case 'p': // pointer-sized
case 's':
arg = ROUND(arg, sizeof(uintptr));
- narg = arg + sizeof(uintptr);
+ siz = sizeof(uintptr);
break;
case 'S': // pointer-aligned but bigger
arg = ROUND(arg, sizeof(uintptr));
- narg = arg + sizeof(String);
+ siz = sizeof(String);
break;
case 'a': // pointer-aligned but bigger
arg = ROUND(arg, sizeof(uintptr));
- narg = arg + sizeof(Slice);
+ siz = sizeof(Slice);
break;
case 'i': // pointer-aligned but bigger
case 'e':
arg = ROUND(arg, sizeof(uintptr));
- narg = arg + sizeof(Eface);
+ siz = sizeof(Eface);
break;
}
- v = base+arg;
+ v = (byte*)arg;
switch(*p) {
case 'a':
runtime·printslice(*(Slice*)v);
@@ -171,7 +189,7 @@ vprintf(int8 *s, byte *base)
runtime·printhex(*(uint64*)v);
break;
}
- arg = narg;
+ arg += siz;
lp = p+1;
}
if(p > lp)
@@ -236,7 +254,10 @@ runtime·printfloat(float64 v)
n = 7; // digits printed
e = 0; // exp
s = 0; // sign
- if(v != 0) {
+ if(v == 0) {
+ if(1/v == runtime·neginf)
+ s = 1;
+ } else {
// sign
if(v < 0) {
v = -v;
@@ -345,7 +366,7 @@ runtime·printhex(uint64 v)
void
runtime·printpointer(void *p)
{
- runtime·printhex((uint64)p);
+ runtime·printhex((uintptr)p);
}
void
@@ -370,11 +391,3 @@ runtime·printnl(void)
{
gwrite("\n", 1);
}
-
-void
-runtime·typestring(Eface e, String s)
-{
- s = *e.type->string;
- FLUSH(&s);
-}
-
diff --git a/src/pkg/runtime/proc.c b/src/pkg/runtime/proc.c
index ed3e1e73e..914a02e0b 100644
--- a/src/pkg/runtime/proc.c
+++ b/src/pkg/runtime/proc.c
@@ -58,17 +58,23 @@ struct Sched {
int32 profilehz; // cpu profiling rate
};
-// The max value of GOMAXPROCS.
-// There are no fundamental restrictions on the value.
-enum { MaxGomaxprocs = 1<<8 };
+enum
+{
+ // The max value of GOMAXPROCS.
+ // There are no fundamental restrictions on the value.
+ MaxGomaxprocs = 1<<8,
+
+ // Number of goroutine ids to grab from runtime·sched.goidgen to local per-P cache at once.
+ // 16 seems to provide enough amortization, but other than that it's mostly arbitrary number.
+ GoidCacheBatch = 16,
+};
Sched runtime·sched;
int32 runtime·gomaxprocs;
uint32 runtime·needextram;
bool runtime·iscgo;
M runtime·m0;
-G runtime·g0; // idle goroutine for m0
-G* runtime·allg;
+G runtime·g0; // idle goroutine for m0
G* runtime·lastg;
M* runtime·allm;
M* runtime·extram;
@@ -76,10 +82,15 @@ int8* runtime·goos;
int32 runtime·ncpu;
static int32 newprocs;
+static Lock allglock; // the following vars are protected by this lock or by stoptheworld
+G** runtime·allg;
+uintptr runtime·allglen;
+static uintptr allgcap;
+
void runtime·mstart(void);
static void runqput(P*, G*);
static G* runqget(P*);
-static void runqgrow(P*);
+static bool runqputslow(P*, G*, uint32, uint32);
static G* runqsteal(P*, P*);
static void mput(M*);
static M* mget(void);
@@ -106,6 +117,7 @@ static void gfput(P*, G*);
static G* gfget(P*);
static void gfpurge(P*);
static void globrunqput(G*);
+static void globrunqputbatch(G*, G*, int32);
static G* globrunqget(P*, int32);
static P* pidleget(void);
static void pidleput(P*);
@@ -114,6 +126,7 @@ static bool preemptall(void);
static bool preemptone(P*);
static bool exitsyscallfast(void);
static bool haveexperiment(int8*);
+static void allgadd(G*);
// The bootstrap sequence is:
//
@@ -131,9 +144,9 @@ runtime·schedinit(void)
Eface i;
runtime·sched.maxmcount = 10000;
- runtime·precisestack = haveexperiment("precisestack");
+ runtime·precisestack = true; // haveexperiment("precisestack");
- runtime·mprofinit();
+ runtime·symtabinit();
runtime·mallocinit();
mcommoninit(m);
@@ -142,14 +155,15 @@ runtime·schedinit(void)
// in a fault during a garbage collection, it will not
// need to allocated memory.
runtime·newErrorCString(0, &i);
+
+ // Initialize the cached gotraceback value, since
+ // gotraceback calls getenv, which mallocs on Plan 9.
+ runtime·gotraceback(nil);
runtime·goargs();
runtime·goenvs();
runtime·parsedebugvars();
- // Allocate internal symbol table representation now, we need it for GC anyway.
- runtime·symtabinit();
-
runtime·sched.lastpoll = runtime·nanotime();
procs = 1;
p = runtime·getenv("GOMAXPROCS");
@@ -161,6 +175,11 @@ runtime·schedinit(void)
runtime·allp = runtime·malloc((MaxGomaxprocs+1)*sizeof(runtime·allp[0]));
procresize(procs);
+ runtime·copystack = runtime·precisestack;
+ p = runtime·getenv("GOCOPYSTACK");
+ if(p != nil && !runtime·strcmp(p, (byte*)"0"))
+ runtime·copystack = false;
+
mstats.enablegc = 1;
if(raceenabled)
@@ -175,6 +194,15 @@ static FuncVal scavenger = {runtime·MHeap_Scavenger};
static FuncVal initDone = { runtime·unlockOSThread };
// The main goroutine.
+// Note: C frames in general are not copyable during stack growth, for two reasons:
+// 1) We don't know where in a frame to find pointers to other stack locations.
+// 2) There's no guarantee that globals or heap values do not point into the frame.
+//
+// The C frame for runtime.main is copyable, because:
+// 1) There are no pointers to other stack locations in the frame
+// (d.fn points at a global, d.link is nil, d.argp is -1).
+// 2) The only pointer into this frame is from the defer chain,
+// which is explicitly handled during stack copying.
void
runtime·main(void)
{
@@ -202,9 +230,8 @@ runtime·main(void)
d.fn = &initDone;
d.siz = 0;
d.link = g->defer;
- d.argp = (void*)-1;
+ d.argp = NoArgs;
d.special = true;
- d.free = false;
g->defer = &d;
if(m != &runtime·m0)
@@ -237,6 +264,7 @@ void
runtime·goroutineheader(G *gp)
{
int8 *status;
+ int64 waitfor;
switch(gp->status) {
case Gidle:
@@ -261,7 +289,16 @@ runtime·goroutineheader(G *gp)
status = "???";
break;
}
- runtime·printf("goroutine %D [%s]:\n", gp->goid, status);
+
+ // approx time the G is blocked, in minutes
+ waitfor = 0;
+ if((gp->status == Gwaiting || gp->status == Gsyscall) && gp->waitsince != 0)
+ waitfor = (runtime·nanotime() - gp->waitsince) / (60LL*1000*1000*1000);
+
+ if(waitfor < 1)
+ runtime·printf("goroutine %D [%s]:\n", gp->goid, status);
+ else
+ runtime·printf("goroutine %D [%s, %D minutes]:\n", gp->goid, status, waitfor);
}
void
@@ -269,6 +306,7 @@ runtime·tracebackothers(G *me)
{
G *gp;
int32 traceback;
+ uintptr i;
traceback = runtime·gotraceback(nil);
@@ -279,7 +317,9 @@ runtime·tracebackothers(G *me)
runtime·traceback(~(uintptr)0, ~(uintptr)0, 0, gp);
}
- for(gp = runtime·allg; gp != nil; gp = gp->alllink) {
+ runtime·lock(&allglock);
+ for(i = 0; i < runtime·allglen; i++) {
+ gp = runtime·allg[i];
if(gp == me || gp == m->curg || gp->status == Gdead)
continue;
if(gp->issystem && traceback < 2)
@@ -292,6 +332,7 @@ runtime·tracebackothers(G *me)
} else
runtime·traceback(~(uintptr)0, ~(uintptr)0, 0, gp);
}
+ runtime·unlock(&allglock);
}
static void
@@ -562,15 +603,6 @@ runtime·starttheworld(void)
void
runtime·mstart(void)
{
-#ifdef GOOS_windows
-#ifdef GOARCH_386
- // It is used by windows-386 only. Unfortunately, seh needs
- // to be located on os stack, and mstart runs on os stack
- // for both m0 and m.
- SEH seh;
-#endif
-#endif
-
if(g != m->g0)
runtime·throw("bad runtime·mstart");
@@ -580,11 +612,6 @@ runtime·mstart(void)
runtime·gosave(&m->g0->sched);
m->g0->sched.pc = (uintptr)-1; // make sure it is never used
m->g0->stackguard = m->g0->stackguard0; // cgo sets only stackguard0, copy it to stackguard
-#ifdef GOOS_windows
-#ifdef GOARCH_386
- m->seh = &seh;
-#endif
-#endif
runtime·asminit();
runtime·minit();
@@ -620,6 +647,7 @@ struct CgoThreadStart
{
M *m;
G *g;
+ uintptr *tls;
void (*fn)(void);
};
@@ -643,9 +671,9 @@ runtime·allocm(P *p)
mp = runtime·cnew(mtype);
mcommoninit(mp);
- // In case of cgo, pthread_create will make us a stack.
+ // In case of cgo or Solaris, pthread_create will make us a stack.
// Windows will layout sched stack on OS stack.
- if(runtime·iscgo || Windows)
+ if(runtime·iscgo || Solaris || Windows)
mp->g0 = runtime·malg(-1);
else
mp->g0 = runtime·malg(8192);
@@ -659,6 +687,21 @@ runtime·allocm(P *p)
return mp;
}
+static G*
+allocg(void)
+{
+ G *gp;
+ static Type *gtype;
+
+ if(gtype == nil) {
+ Eface e;
+ runtime·gc_g_ptr(&e);
+ gtype = ((PtrType*)e.type)->elem;
+ }
+ gp = runtime·cnew(gtype);
+ return gp;
+}
+
static M* lockextra(bool nilokay);
static void unlockextra(M*);
@@ -735,16 +778,6 @@ runtime·needm(byte x)
g->stackguard = (uintptr)(&x - 32*1024);
g->stackguard0 = g->stackguard;
-#ifdef GOOS_windows
-#ifdef GOARCH_386
- // On windows/386, we need to put an SEH frame (two words)
- // somewhere on the current stack. We are called from cgocallback_gofunc
- // and we know that it will leave two unused words below m->curg->sched.sp.
- // Use those.
- m->seh = (SEH*)((uintptr*)&x + 1);
-#endif
-#endif
-
// Initialize this thread to use the m.
runtime·asminit();
runtime·minit();
@@ -783,13 +816,7 @@ runtime·newextram(void)
if(raceenabled)
gp->racectx = runtime·racegostart(runtime·newextram);
// put on allg for garbage collector
- runtime·lock(&runtime·sched);
- if(runtime·lastg == nil)
- runtime·allg = gp;
- else
- runtime·lastg->alllink = gp;
- runtime·lastg = gp;
- runtime·unlock(&runtime·sched);
+ allgadd(gp);
// Add m to the extra list.
mnext = lockextra(true);
@@ -828,12 +855,6 @@ runtime·dropm(void)
// Undo whatever initialization minit did during needm.
runtime·unminit();
-#ifdef GOOS_windows
-#ifdef GOARCH_386
- m->seh = nil; // reset dangling typed pointer
-#endif
-#endif
-
// Clear m and g, and return m to the extra list.
// After the call to setmg we can only call nosplit functions.
mp = m;
@@ -904,6 +925,7 @@ newm(void(*fn)(void), P *p)
runtime·throw("_cgo_thread_start missing");
ts.m = mp;
ts.g = mp->g0;
+ ts.tls = mp->tls;
ts.fn = runtime·mstart;
runtime·asmcgocall(_cgo_thread_start, &ts);
return;
@@ -948,7 +970,7 @@ mspinning(void)
}
// Schedules some M to run the p (creates an M if necessary).
-// If p==nil, tries to get an idle P, if no idle P's returns false.
+// If p==nil, tries to get an idle P, if no idle P's does nothing.
static void
startm(P *p, bool spinning)
{
@@ -1112,6 +1134,7 @@ execute(G *gp)
runtime·throw("execute: bad g status");
}
gp->status = Grunning;
+ gp->waitsince = 0;
gp->preempt = false;
gp->stackguard0 = gp->stackguard;
m->p->schedtick++;
@@ -1140,6 +1163,8 @@ top:
gcstopm();
goto top;
}
+ if(runtime·fingwait && runtime·fingwake && (gp = runtime·wakefing()) != nil)
+ runtime·ready(gp);
// local runq
gp = runqget(m->p);
if(gp)
@@ -1331,28 +1356,52 @@ top:
execute(gp);
}
-// Puts the current goroutine into a waiting state and unlocks the lock.
-// The goroutine can be made runnable again by calling runtime·ready(gp).
+// Puts the current goroutine into a waiting state and calls unlockf.
+// If unlockf returns false, the goroutine is resumed.
void
-runtime·park(void(*unlockf)(Lock*), Lock *lock, int8 *reason)
+runtime·park(bool(*unlockf)(G*, void*), void *lock, int8 *reason)
{
+ if(g->status != Grunning)
+ runtime·throw("bad g status");
m->waitlock = lock;
m->waitunlockf = unlockf;
g->waitreason = reason;
runtime·mcall(park0);
}
+static bool
+parkunlock(G *gp, void *lock)
+{
+ USED(gp);
+ runtime·unlock(lock);
+ return true;
+}
+
+// Puts the current goroutine into a waiting state and unlocks the lock.
+// The goroutine can be made runnable again by calling runtime·ready(gp).
+void
+runtime·parkunlock(Lock *lock, int8 *reason)
+{
+ runtime·park(parkunlock, lock, reason);
+}
+
// runtime·park continuation on g0.
static void
park0(G *gp)
{
+ bool ok;
+
gp->status = Gwaiting;
gp->m = nil;
m->curg = nil;
if(m->waitunlockf) {
- m->waitunlockf(m->waitlock);
+ ok = m->waitunlockf(gp, m->waitlock);
m->waitunlockf = nil;
m->waitlock = nil;
+ if(!ok) {
+ gp->status = Grunnable;
+ execute(gp); // Schedule it back, never returns.
+ }
}
if(m->lockedg) {
stoplockedm();
@@ -1365,6 +1414,8 @@ park0(G *gp)
void
runtime·gosched(void)
{
+ if(g->status != Grunning)
+ runtime·throw("bad g status");
runtime·mcall(runtime·gosched0);
}
@@ -1393,6 +1444,8 @@ runtime·gosched0(G *gp)
void
runtime·goexit(void)
{
+ if(g->status != Grunning)
+ runtime·throw("bad g status");
if(raceenabled)
runtime·racegoend();
runtime·mcall(goexit0);
@@ -1405,6 +1458,13 @@ goexit0(G *gp)
gp->status = Gdead;
gp->m = nil;
gp->lockedm = nil;
+ gp->paniconfault = 0;
+ gp->defer = nil; // should be true already but just in case.
+ gp->panic = nil; // non-nil for Goexit during panic. points at stack-allocated data.
+ gp->writenbuf = 0;
+ gp->writebuf = nil;
+ gp->waitreason = nil;
+ gp->param = nil;
m->curg = nil;
m->lockedg = nil;
if(m->locked & ~LockExternal) {
@@ -1535,6 +1595,7 @@ runtime·exitsyscall(void)
if(g->isbackground) // do not consider blocked scavenger for deadlock detection
incidlelocked(-1);
+ g->waitsince = 0;
if(exitsyscallfast()) {
// There's a cpu for us, so we can run.
m->p->syscalltick++;
@@ -1640,6 +1701,7 @@ exitsyscall0(G *gp)
}
// Called from syscall package before fork.
+#pragma textflag NOSPLIT
void
syscall·runtime_BeforeFork(void)
{
@@ -1648,14 +1710,28 @@ syscall·runtime_BeforeFork(void)
m->locks++;
if(m->profilehz != 0)
runtime·resetcpuprofiler(0);
+
+ // This function is called before fork in syscall package.
+ // Code between fork and exec must not allocate memory nor even try to grow stack.
+ // Here we spoil g->stackguard to reliably detect any attempts to grow stack.
+ // runtime_AfterFork will undo this in parent process, but not in child.
+ m->forkstackguard = g->stackguard;
+ g->stackguard0 = StackPreempt-1;
+ g->stackguard = StackPreempt-1;
}
// Called from syscall package after fork in parent.
+#pragma textflag NOSPLIT
void
syscall·runtime_AfterFork(void)
{
int32 hz;
+ // See the comment in runtime_BeforeFork.
+ g->stackguard0 = m->forkstackguard;
+ g->stackguard = m->forkstackguard;
+ m->forkstackguard = 0;
+
hz = runtime·sched.profilehz;
if(hz != 0)
runtime·resetcpuprofiler(hz);
@@ -1669,7 +1745,13 @@ syscall·runtime_AfterFork(void)
static void
mstackalloc(G *gp)
{
- gp->param = runtime·stackalloc((uintptr)gp->param);
+ G *newg;
+ uintptr size;
+
+ newg = (G*)gp->param;
+ size = newg->stacksize;
+ newg->stacksize = 0;
+ gp->param = runtime·stackalloc(newg, size);
runtime·gogo(&gp->sched);
}
@@ -1685,24 +1767,24 @@ runtime·malg(int32 stacksize)
runtime·throw("runtime: bad stack.h");
}
- newg = runtime·malloc(sizeof(G));
+ newg = allocg();
if(stacksize >= 0) {
+ stacksize = runtime·round2(StackSystem + stacksize);
if(g == m->g0) {
// running on scheduler stack already.
- stk = runtime·stackalloc(StackSystem + stacksize);
+ stk = runtime·stackalloc(newg, stacksize);
} else {
// have to call stackalloc on scheduler stack.
- g->param = (void*)(StackSystem + stacksize);
+ newg->stacksize = stacksize;
+ g->param = newg;
runtime·mcall(mstackalloc);
stk = g->param;
g->param = nil;
}
- newg->stacksize = StackSystem + stacksize;
newg->stack0 = (uintptr)stk;
newg->stackguard = (uintptr)stk + StackGuard;
newg->stackguard0 = newg->stackguard;
- newg->stackbase = (uintptr)stk + StackSystem + stacksize - sizeof(Stktop);
- runtime·memclr((byte*)newg->stackbase, sizeof(Stktop));
+ newg->stackbase = (uintptr)stk + stacksize - sizeof(Stktop);
}
return newg;
}
@@ -1736,9 +1818,14 @@ runtime·newproc1(FuncVal *fn, byte *argp, int32 narg, int32 nret, void *callerp
{
byte *sp;
G *newg;
+ P *p;
int32 siz;
//runtime·printf("newproc1 %p %p narg=%d nret=%d\n", fn->fn, argp, narg, nret);
+ if(fn == nil) {
+ m->throwing = -1; // do not dump full stacks
+ runtime·throw("go of nil func value");
+ }
m->locks++; // disable preemption because it can be holding p in a local var
siz = narg + nret;
siz = (siz+7) & ~7;
@@ -1750,18 +1837,13 @@ runtime·newproc1(FuncVal *fn, byte *argp, int32 narg, int32 nret, void *callerp
if(siz > StackMin - 1024)
runtime·throw("runtime.newproc: function arguments too large for new goroutine");
- if((newg = gfget(m->p)) != nil) {
+ p = m->p;
+ if((newg = gfget(p)) != nil) {
if(newg->stackguard - StackGuard != newg->stack0)
runtime·throw("invalid stack in newg");
} else {
newg = runtime·malg(StackMin);
- runtime·lock(&runtime·sched);
- if(runtime·lastg == nil)
- runtime·allg = newg;
- else
- runtime·lastg->alllink = newg;
- runtime·lastg = newg;
- runtime·unlock(&runtime·sched);
+ allgadd(newg);
}
sp = (byte*)newg->stackbase;
@@ -1780,11 +1862,15 @@ runtime·newproc1(FuncVal *fn, byte *argp, int32 narg, int32 nret, void *callerp
runtime·gostartcallfn(&newg->sched, fn);
newg->gopc = (uintptr)callerpc;
newg->status = Grunnable;
- newg->goid = runtime·xadd64(&runtime·sched.goidgen, 1);
+ if(p->goidcache == p->goidcacheend) {
+ p->goidcache = runtime·xadd64(&runtime·sched.goidgen, GoidCacheBatch);
+ p->goidcacheend = p->goidcache + GoidCacheBatch;
+ }
+ newg->goid = p->goidcache++;
newg->panicwrap = 0;
if(raceenabled)
newg->racectx = runtime·racegostart((void*)callerpc);
- runqput(m->p, newg);
+ runqput(p, newg);
if(runtime·atomicload(&runtime·sched.npidle) != 0 && runtime·atomicload(&runtime·sched.nmspinning) == 0 && fn->fn != runtime·main) // TODO: fast atomic
wakep();
@@ -1794,13 +1880,56 @@ runtime·newproc1(FuncVal *fn, byte *argp, int32 narg, int32 nret, void *callerp
return newg;
}
+static void
+allgadd(G *gp)
+{
+ G **new;
+ uintptr cap;
+
+ runtime·lock(&allglock);
+ if(runtime·allglen >= allgcap) {
+ cap = 4096/sizeof(new[0]);
+ if(cap < 2*allgcap)
+ cap = 2*allgcap;
+ new = runtime·malloc(cap*sizeof(new[0]));
+ if(new == nil)
+ runtime·throw("runtime: cannot allocate memory");
+ if(runtime·allg != nil) {
+ runtime·memmove(new, runtime·allg, runtime·allglen*sizeof(new[0]));
+ runtime·free(runtime·allg);
+ }
+ runtime·allg = new;
+ allgcap = cap;
+ }
+ runtime·allg[runtime·allglen++] = gp;
+ runtime·unlock(&allglock);
+}
+
// Put on gfree list.
// If local list is too long, transfer a batch to the global list.
static void
gfput(P *p, G *gp)
{
+ uintptr stksize;
+ Stktop *top;
+
if(gp->stackguard - StackGuard != gp->stack0)
runtime·throw("invalid stack in gfput");
+ stksize = gp->stackbase + sizeof(Stktop) - gp->stack0;
+ if(stksize != gp->stacksize) {
+ runtime·printf("runtime: bad stacksize, goroutine %D, remain=%d, last=%d\n",
+ gp->goid, (int32)gp->stacksize, (int32)stksize);
+ runtime·throw("gfput: bad stacksize");
+ }
+ top = (Stktop*)gp->stackbase;
+ if(top->malloced) {
+ // non-standard stack size - free it.
+ runtime·stackfree(gp, (void*)gp->stack0, top);
+ gp->stack0 = 0;
+ gp->stackguard = 0;
+ gp->stackguard0 = 0;
+ gp->stackbase = 0;
+ }
gp->schedlink = p->gfree;
p->gfree = gp;
p->gfreecnt++;
@@ -1823,6 +1952,7 @@ static G*
gfget(P *p)
{
G *gp;
+ byte *stk;
retry:
gp = p->gfree;
@@ -1841,6 +1971,23 @@ retry:
if(gp) {
p->gfree = gp->schedlink;
p->gfreecnt--;
+
+ if(gp->stack0 == 0) {
+ // Stack was deallocated in gfput. Allocate a new one.
+ if(g == m->g0) {
+ stk = runtime·stackalloc(gp, FixedStack);
+ } else {
+ gp->stacksize = FixedStack;
+ g->param = gp;
+ runtime·mcall(mstackalloc);
+ stk = g->param;
+ g->param = nil;
+ }
+ gp->stack0 = (uintptr)stk;
+ gp->stackbase = (uintptr)stk + FixedStack - sizeof(Stktop);
+ gp->stackguard = (uintptr)stk + StackGuard;
+ gp->stackguard0 = gp->stackguard;
+ }
}
return gp;
}
@@ -1963,39 +2110,26 @@ runtime·lockedOSThread(void)
return g->lockedm != nil && m->lockedg != nil;
}
-// for testing of callbacks
-void
-runtime·golockedOSThread(bool ret)
-{
- ret = runtime·lockedOSThread();
- FLUSH(&ret);
-}
-
-void
-runtime·NumGoroutine(intgo ret)
-{
- ret = runtime·gcount();
- FLUSH(&ret);
-}
-
int32
runtime·gcount(void)
{
G *gp;
int32 n, s;
+ uintptr i;
n = 0;
- runtime·lock(&runtime·sched);
+ runtime·lock(&allglock);
// TODO(dvyukov): runtime.NumGoroutine() is O(N).
// We do not want to increment/decrement centralized counter in newproc/goexit,
// just to make runtime.NumGoroutine() faster.
// Compromise solution is to introduce per-P counters of active goroutines.
- for(gp = runtime·allg; gp; gp = gp->alllink) {
+ for(i = 0; i < runtime·allglen; i++) {
+ gp = runtime·allg[i];
s = gp->status;
if(s == Grunnable || s == Grunning || s == Gsyscall || s == Gwaiting)
n++;
}
- runtime·unlock(&runtime·sched);
+ runtime·unlock(&allglock);
return n;
}
@@ -2032,25 +2166,30 @@ static struct {
uintptr pcbuf[100];
} prof;
-static void
-System(void)
-{
-}
+static void System(void) {}
+static void ExternalCode(void) {}
+static void GC(void) {}
+extern byte etext[];
// Called if we receive a SIGPROF signal.
void
-runtime·sigprof(uint8 *pc, uint8 *sp, uint8 *lr, G *gp)
+runtime·sigprof(uint8 *pc, uint8 *sp, uint8 *lr, G *gp, M *mp)
{
int32 n;
bool traceback;
+ // Do not use global m in this function, use mp instead.
+ // On windows one m is sending reports about all the g's, so m means a wrong thing.
+ byte m;
+
+ m = 0;
+ USED(m);
if(prof.fn == nil || prof.hz == 0)
return;
- traceback = true;
- // Windows does profiling in a dedicated thread w/o m.
- if(!Windows && (m == nil || m->mcache == nil))
- traceback = false;
-
+
+ // Profiling runs concurrently with GC, so it must not allocate.
+ mp->mallocing++;
+
// Define that a "user g" is a user-created goroutine, and a "system g"
// is one that is m->g0 or m->gsignal. We've only made sure that we
// can unwind user g's, so exclude the system g's.
@@ -2123,36 +2262,55 @@ runtime·sigprof(uint8 *pc, uint8 *sp, uint8 *lr, G *gp)
// To recap, there are no constraints on the assembly being used for the
// transition. We simply require that g and SP match and that the PC is not
// in runtime.gogo.
- //
- // On Windows, one m is sending reports about all the g's, so gp == m->curg
- // is not a useful comparison. The profilem function in os_windows.c has
- // already checked that gp is a user g.
- if(gp == nil ||
- (!Windows && gp != m->curg) ||
+ traceback = true;
+ if(gp == nil || gp != mp->curg ||
(uintptr)sp < gp->stackguard - StackGuard || gp->stackbase < (uintptr)sp ||
((uint8*)runtime·gogo <= pc && pc < (uint8*)runtime·gogo + RuntimeGogoBytes))
traceback = false;
- // Race detector calls asmcgocall w/o entersyscall/exitsyscall,
- // we can not currently unwind through asmcgocall.
- if(m != nil && m->racecall)
- traceback = false;
-
runtime·lock(&prof);
if(prof.fn == nil) {
runtime·unlock(&prof);
+ mp->mallocing--;
return;
}
n = 0;
if(traceback)
n = runtime·gentraceback((uintptr)pc, (uintptr)sp, (uintptr)lr, gp, 0, prof.pcbuf, nelem(prof.pcbuf), nil, nil, false);
if(!traceback || n <= 0) {
- n = 2;
- prof.pcbuf[0] = (uintptr)pc;
- prof.pcbuf[1] = (uintptr)System + 1;
+ // Normal traceback is impossible or has failed.
+ // See if it falls into several common cases.
+ n = 0;
+ if(mp->ncgo > 0 && mp->curg != nil &&
+ mp->curg->syscallpc != 0 && mp->curg->syscallsp != 0) {
+ // Cgo, we can't unwind and symbolize arbitrary C code,
+ // so instead collect Go stack that leads to the cgo call.
+ // This is especially important on windows, since all syscalls are cgo calls.
+ n = runtime·gentraceback(mp->curg->syscallpc, mp->curg->syscallsp, 0, mp->curg, 0, prof.pcbuf, nelem(prof.pcbuf), nil, nil, false);
+ }
+#ifdef GOOS_windows
+ if(n == 0 && mp->libcallg != nil && mp->libcallpc != 0 && mp->libcallsp != 0) {
+ // Libcall, i.e. runtime syscall on windows.
+ // Collect Go stack that leads to the call.
+ n = runtime·gentraceback(mp->libcallpc, mp->libcallsp, 0, mp->libcallg, 0, prof.pcbuf, nelem(prof.pcbuf), nil, nil, false);
+ }
+#endif
+ if(n == 0) {
+ // If all of the above has failed, account it against abstract "System" or "GC".
+ n = 2;
+ // "ExternalCode" is better than "etext".
+ if((uintptr)pc > (uintptr)etext)
+ pc = (byte*)ExternalCode + PCQuantum;
+ prof.pcbuf[0] = (uintptr)pc;
+ if(mp->gcing || mp->helpgc)
+ prof.pcbuf[1] = (uintptr)GC + PCQuantum;
+ else
+ prof.pcbuf[1] = (uintptr)System + PCQuantum;
+ }
}
prof.fn(prof.pcbuf, n);
runtime·unlock(&prof);
+ mp->mallocing--;
}
// Arrange to call fn with a traceback hz times a second.
@@ -2195,6 +2353,7 @@ static void
procresize(int32 new)
{
int32 i, old;
+ bool empty;
G *gp;
P *p;
@@ -2216,27 +2375,42 @@ procresize(int32 new)
else
p->mcache = runtime·allocmcache();
}
- if(p->runq == nil) {
- p->runqsize = 128;
- p->runq = (G**)runtime·mallocgc(p->runqsize*sizeof(G*), 0, FlagNoInvokeGC);
- }
}
// redistribute runnable G's evenly
- for(i = 0; i < old; i++) {
- p = runtime·allp[i];
- while(gp = runqget(p))
- globrunqput(gp);
+ // collect all runnable goroutines in global queue preserving FIFO order
+ // FIFO order is required to ensure fairness even during frequent GCs
+ // see http://golang.org/issue/7126
+ empty = false;
+ while(!empty) {
+ empty = true;
+ for(i = 0; i < old; i++) {
+ p = runtime·allp[i];
+ if(p->runqhead == p->runqtail)
+ continue;
+ empty = false;
+ // pop from tail of local queue
+ p->runqtail--;
+ gp = p->runq[p->runqtail%nelem(p->runq)];
+ // push onto head of global queue
+ gp->schedlink = runtime·sched.runqhead;
+ runtime·sched.runqhead = gp;
+ if(runtime·sched.runqtail == nil)
+ runtime·sched.runqtail = gp;
+ runtime·sched.runqsize++;
+ }
}
+ // fill local queues with at most nelem(p->runq)/2 goroutines
// start at 1 because current M already executes some G and will acquire allp[0] below,
// so if we have a spare G we want to put it into allp[1].
- for(i = 1; runtime·sched.runqhead; i++) {
+ for(i = 1; i < new * nelem(p->runq)/2 && runtime·sched.runqsize > 0; i++) {
gp = runtime·sched.runqhead;
runtime·sched.runqhead = gp->schedlink;
+ if(runtime·sched.runqhead == nil)
+ runtime·sched.runqtail = nil;
+ runtime·sched.runqsize--;
runqput(runtime·allp[i%new], gp);
}
- runtime·sched.runqtail = nil;
- runtime·sched.runqsize = 0;
// free unused P's
for(i = new; i < old; i++) {
@@ -2318,30 +2492,41 @@ checkdead(void)
{
G *gp;
int32 run, grunning, s;
+ uintptr i;
// -1 for sysmon
run = runtime·sched.mcount - runtime·sched.nmidle - runtime·sched.nmidlelocked - 1;
if(run > 0)
return;
+ // If we are dying because of a signal caught on an already idle thread,
+ // freezetheworld will cause all running threads to block.
+ // And runtime will essentially enter into deadlock state,
+ // except that there is a thread that will call runtime·exit soon.
+ if(runtime·panicking > 0)
+ return;
if(run < 0) {
- runtime·printf("checkdead: nmidle=%d nmidlelocked=%d mcount=%d\n",
+ runtime·printf("runtime: checkdead: nmidle=%d nmidlelocked=%d mcount=%d\n",
runtime·sched.nmidle, runtime·sched.nmidlelocked, runtime·sched.mcount);
runtime·throw("checkdead: inconsistent counts");
}
grunning = 0;
- for(gp = runtime·allg; gp; gp = gp->alllink) {
+ runtime·lock(&allglock);
+ for(i = 0; i < runtime·allglen; i++) {
+ gp = runtime·allg[i];
if(gp->isbackground)
continue;
s = gp->status;
if(s == Gwaiting)
grunning++;
else if(s == Grunnable || s == Grunning || s == Gsyscall) {
- runtime·printf("checkdead: find g %D in status %d\n", gp->goid, s);
+ runtime·unlock(&allglock);
+ runtime·printf("runtime: checkdead: find g %D in status %d\n", gp->goid, s);
runtime·throw("checkdead: runnable g");
}
}
+ runtime·unlock(&allglock);
if(grunning == 0) // possible if main goroutine calls runtime·Goexit()
- runtime·exit(0);
+ runtime·throw("no goroutines (main called runtime.Goexit) - deadlock!");
m->throwing = -1; // do not dump full stacks
runtime·throw("all goroutines are asleep - deadlock!");
}
@@ -2418,6 +2603,7 @@ struct Pdesc
uint32 syscalltick;
int64 syscallwhen;
};
+#pragma dataflag NOPTR
static Pdesc pdesc[MaxGomaxprocs];
static uint32
@@ -2436,16 +2622,19 @@ retake(int64 now)
pd = &pdesc[i];
s = p->status;
if(s == Psyscall) {
- // Retake P from syscall if it's there for more than 1 sysmon tick (20us).
- // But only if there is other work to do.
+ // Retake P from syscall if it's there for more than 1 sysmon tick (at least 20us).
t = p->syscalltick;
if(pd->syscalltick != t) {
pd->syscalltick = t;
pd->syscallwhen = now;
continue;
}
+ // On the one hand we don't want to retake Ps if there is no other work to do,
+ // but on the other hand we want to retake them eventually
+ // because they can prevent the sysmon thread from deep sleep.
if(p->runqhead == p->runqtail &&
- runtime·atomicload(&runtime·sched.nmspinning) + runtime·atomicload(&runtime·sched.npidle) > 0)
+ runtime·atomicload(&runtime·sched.nmspinning) + runtime·atomicload(&runtime·sched.npidle) > 0 &&
+ pd->syscallwhen + 10*1000*1000 > now)
continue;
// Need to decrement number of idle locked M's
// (pretending that one more is running) before the CAS.
@@ -2525,7 +2714,8 @@ runtime·schedtrace(bool detailed)
static int64 starttime;
int64 now;
int64 id1, id2, id3;
- int32 i, q, t, h, s;
+ int32 i, t, h;
+ uintptr gi;
int8 *fmt;
M *mp, *lockedm;
G *gp, *lockedg;
@@ -2552,15 +2742,11 @@ runtime·schedtrace(bool detailed)
if(p == nil)
continue;
mp = p->m;
- t = p->runqtail;
- h = p->runqhead;
- s = p->runqsize;
- q = t - h;
- if(q < 0)
- q += s;
+ h = runtime·atomicload(&p->runqhead);
+ t = runtime·atomicload(&p->runqtail);
if(detailed)
- runtime·printf(" P%d: status=%d schedtick=%d syscalltick=%d m=%d runqsize=%d/%d gfreecnt=%d\n",
- i, p->status, p->schedtick, p->syscalltick, mp ? mp->id : -1, q, s, p->gfreecnt);
+ runtime·printf(" P%d: status=%d schedtick=%d syscalltick=%d m=%d runqsize=%d gfreecnt=%d\n",
+ i, p->status, p->schedtick, p->syscalltick, mp ? mp->id : -1, t-h, p->gfreecnt);
else {
// In non-detailed mode format lengths of per-P run queues as:
// [len1 len2 len3 len4]
@@ -2571,7 +2757,7 @@ runtime·schedtrace(bool detailed)
fmt = " [%d";
else if(i == runtime·gomaxprocs-1)
fmt = " %d]\n";
- runtime·printf(fmt, q);
+ runtime·printf(fmt, t-h);
}
}
if(!detailed) {
@@ -2592,18 +2778,21 @@ runtime·schedtrace(bool detailed)
if(lockedg)
id3 = lockedg->goid;
runtime·printf(" M%d: p=%D curg=%D mallocing=%d throwing=%d gcing=%d"
- " locks=%d dying=%d helpgc=%d spinning=%d lockedg=%D\n",
+ " locks=%d dying=%d helpgc=%d spinning=%d blocked=%d lockedg=%D\n",
mp->id, id1, id2,
mp->mallocing, mp->throwing, mp->gcing, mp->locks, mp->dying, mp->helpgc,
- mp->spinning, id3);
+ mp->spinning, m->blocked, id3);
}
- for(gp = runtime·allg; gp; gp = gp->alllink) {
+ runtime·lock(&allglock);
+ for(gi = 0; gi < runtime·allglen; gi++) {
+ gp = runtime·allg[gi];
mp = gp->m;
lockedm = gp->lockedm;
runtime·printf(" G%D: status=%d(%s) m=%d lockedm=%d\n",
gp->goid, gp->status, gp->waitreason, mp ? mp->id : -1,
lockedm ? lockedm->id : -1);
}
+ runtime·unlock(&allglock);
runtime·unlock(&runtime·sched);
}
@@ -2646,6 +2835,20 @@ globrunqput(G *gp)
runtime·sched.runqsize++;
}
+// Put a batch of runnable goroutines on the global runnable queue.
+// Sched must be locked.
+static void
+globrunqputbatch(G *ghead, G *gtail, int32 n)
+{
+ gtail->schedlink = nil;
+ if(runtime·sched.runqtail)
+ runtime·sched.runqtail->schedlink = ghead;
+ else
+ runtime·sched.runqhead = ghead;
+ runtime·sched.runqtail = gtail;
+ runtime·sched.runqsize += n;
+}
+
// Try get a batch of G's from the global runnable queue.
// Sched must be locked.
static G*
@@ -2661,6 +2864,8 @@ globrunqget(P *p, int32 max)
n = runtime·sched.runqsize;
if(max > 0 && n > max)
n = max;
+ if(n > nelem(p->runq)/2)
+ n = nelem(p->runq)/2;
runtime·sched.runqsize -= n;
if(runtime·sched.runqsize == 0)
runtime·sched.runqtail = nil;
@@ -2700,78 +2905,98 @@ pidleget(void)
return p;
}
-// Put g on local runnable queue.
-// TODO(dvyukov): consider using lock-free queue.
+// Try to put g on local runnable queue.
+// If it's full, put onto global queue.
+// Executed only by the owner P.
static void
runqput(P *p, G *gp)
{
- int32 h, t, s;
+ uint32 h, t;
- runtime·lock(p);
retry:
- h = p->runqhead;
+ h = runtime·atomicload(&p->runqhead); // load-acquire, synchronize with consumers
t = p->runqtail;
- s = p->runqsize;
- if(t == h-1 || (h == 0 && t == s-1)) {
- runqgrow(p);
- goto retry;
+ if(t - h < nelem(p->runq)) {
+ p->runq[t%nelem(p->runq)] = gp;
+ runtime·atomicstore(&p->runqtail, t+1); // store-release, makes the item available for consumption
+ return;
}
- p->runq[t++] = gp;
- if(t == s)
- t = 0;
- p->runqtail = t;
- runtime·unlock(p);
+ if(runqputslow(p, gp, h, t))
+ return;
+ // the queue is not full, now the put above must suceed
+ goto retry;
+}
+
+// Put g and a batch of work from local runnable queue on global queue.
+// Executed only by the owner P.
+static bool
+runqputslow(P *p, G *gp, uint32 h, uint32 t)
+{
+ G *batch[nelem(p->runq)/2+1];
+ uint32 n, i;
+
+ // First, grab a batch from local queue.
+ n = t-h;
+ n = n/2;
+ if(n != nelem(p->runq)/2)
+ runtime·throw("runqputslow: queue is not full");
+ for(i=0; i<n; i++)
+ batch[i] = p->runq[(h+i)%nelem(p->runq)];
+ if(!runtime·cas(&p->runqhead, h, h+n)) // cas-release, commits consume
+ return false;
+ batch[n] = gp;
+ // Link the goroutines.
+ for(i=0; i<n; i++)
+ batch[i]->schedlink = batch[i+1];
+ // Now put the batch on global queue.
+ runtime·lock(&runtime·sched);
+ globrunqputbatch(batch[0], batch[n], n+1);
+ runtime·unlock(&runtime·sched);
+ return true;
}
// Get g from local runnable queue.
+// Executed only by the owner P.
static G*
runqget(P *p)
{
G *gp;
- int32 t, h, s;
+ uint32 t, h;
- if(p->runqhead == p->runqtail)
- return nil;
- runtime·lock(p);
- h = p->runqhead;
- t = p->runqtail;
- s = p->runqsize;
- if(t == h) {
- runtime·unlock(p);
- return nil;
+ for(;;) {
+ h = runtime·atomicload(&p->runqhead); // load-acquire, synchronize with other consumers
+ t = p->runqtail;
+ if(t == h)
+ return nil;
+ gp = p->runq[h%nelem(p->runq)];
+ if(runtime·cas(&p->runqhead, h, h+1)) // cas-release, commits consume
+ return gp;
}
- gp = p->runq[h++];
- if(h == s)
- h = 0;
- p->runqhead = h;
- runtime·unlock(p);
- return gp;
}
-// Grow local runnable queue.
-// TODO(dvyukov): consider using fixed-size array
-// and transfer excess to the global list (local queue can grow way too big).
-static void
-runqgrow(P *p)
+// Grabs a batch of goroutines from local runnable queue.
+// batch array must be of size nelem(p->runq)/2. Returns number of grabbed goroutines.
+// Can be executed by any P.
+static uint32
+runqgrab(P *p, G **batch)
{
- G **q;
- int32 s, t, h, t2;
+ uint32 t, h, n, i;
- h = p->runqhead;
- t = p->runqtail;
- s = p->runqsize;
- t2 = 0;
- q = runtime·malloc(2*s*sizeof(*q));
- while(t != h) {
- q[t2++] = p->runq[h++];
- if(h == s)
- h = 0;
+ for(;;) {
+ h = runtime·atomicload(&p->runqhead); // load-acquire, synchronize with other consumers
+ t = runtime·atomicload(&p->runqtail); // load-acquire, synchronize with the producer
+ n = t-h;
+ n = n - n/2;
+ if(n == 0)
+ break;
+ if(n > nelem(p->runq)/2) // read inconsistent h and t
+ continue;
+ for(i=0; i<n; i++)
+ batch[i] = p->runq[(h+i)%nelem(p->runq)];
+ if(runtime·cas(&p->runqhead, h, h+n)) // cas-release, commits consume
+ break;
}
- runtime·free(p->runq);
- p->runq = q;
- p->runqhead = 0;
- p->runqtail = t2;
- p->runqsize = 2*s;
+ return n;
}
// Steal half of elements from local runnable queue of p2
@@ -2780,57 +3005,24 @@ runqgrow(P *p)
static G*
runqsteal(P *p, P *p2)
{
- G *gp, *gp1;
- int32 t, h, s, t2, h2, s2, c, i;
+ G *gp;
+ G *batch[nelem(p->runq)/2];
+ uint32 t, h, n, i;
- if(p2->runqhead == p2->runqtail)
+ n = runqgrab(p2, batch);
+ if(n == 0)
return nil;
- // sort locks to prevent deadlocks
- if(p < p2)
- runtime·lock(p);
- runtime·lock(p2);
- if(p2->runqhead == p2->runqtail) {
- runtime·unlock(p2);
- if(p < p2)
- runtime·unlock(p);
- return nil;
- }
- if(p >= p2)
- runtime·lock(p);
- // now we've locked both queues and know the victim is not empty
- h = p->runqhead;
+ n--;
+ gp = batch[n];
+ if(n == 0)
+ return gp;
+ h = runtime·atomicload(&p->runqhead); // load-acquire, synchronize with consumers
t = p->runqtail;
- s = p->runqsize;
- h2 = p2->runqhead;
- t2 = p2->runqtail;
- s2 = p2->runqsize;
- gp = p2->runq[h2++]; // return value
- if(h2 == s2)
- h2 = 0;
- // steal roughly half
- if(t2 > h2)
- c = (t2 - h2) / 2;
- else
- c = (s2 - h2 + t2) / 2;
- // copy
- for(i = 0; i != c; i++) {
- // the target queue is full?
- if(t == h-1 || (h == 0 && t == s-1))
- break;
- // the victim queue is empty?
- if(t2 == h2)
- break;
- gp1 = p2->runq[h2++];
- if(h2 == s2)
- h2 = 0;
- p->runq[t++] = gp1;
- if(t == s)
- t = 0;
- }
- p->runqtail = t;
- p2->runqhead = h2;
- runtime·unlock(p2);
- runtime·unlock(p);
+ if(t - h + n >= nelem(p->runq))
+ runtime·throw("runqsteal: runq overflow");
+ for(i=0; i<n; i++, t++)
+ p->runq[t%nelem(p->runq)] = batch[i];
+ runtime·atomicstore(&p->runqtail, t); // store-release, makes the item available for consumption
return gp;
}
@@ -2838,14 +3030,10 @@ void
runtime·testSchedLocalQueue(void)
{
P p;
- G gs[1000];
+ G gs[nelem(p.runq)];
int32 i, j;
runtime·memclr((byte*)&p, sizeof(p));
- p.runqsize = 1;
- p.runqhead = 0;
- p.runqtail = 0;
- p.runq = runtime·malloc(p.runqsize*sizeof(*p.runq));
for(i = 0; i < nelem(gs); i++) {
if(runqget(&p) != nil)
@@ -2867,20 +3055,11 @@ void
runtime·testSchedLocalQueueSteal(void)
{
P p1, p2;
- G gs[1000], *gp;
+ G gs[nelem(p1.runq)], *gp;
int32 i, j, s;
runtime·memclr((byte*)&p1, sizeof(p1));
- p1.runqsize = 1;
- p1.runqhead = 0;
- p1.runqtail = 0;
- p1.runq = runtime·malloc(p1.runqsize*sizeof(*p1.runq));
-
runtime·memclr((byte*)&p2, sizeof(p2));
- p2.runqsize = nelem(gs);
- p2.runqhead = 0;
- p2.runqtail = 0;
- p2.runq = runtime·malloc(p2.runqsize*sizeof(*p2.runq));
for(i = 0; i < nelem(gs); i++) {
for(j = 0; j < i; j++) {
@@ -2914,6 +3093,7 @@ runtime·testSchedLocalQueueSteal(void)
}
extern void runtime·morestack(void);
+uintptr runtime·externalthreadhandlerp;
// Does f mark the top of a goroutine stack?
bool
@@ -2924,18 +3104,21 @@ runtime·topofstack(Func *f)
f->entry == (uintptr)runtime·mcall ||
f->entry == (uintptr)runtime·morestack ||
f->entry == (uintptr)runtime·lessstack ||
- f->entry == (uintptr)_rt0_go;
+ f->entry == (uintptr)_rt0_go ||
+ (runtime·externalthreadhandlerp != 0 && f->entry == runtime·externalthreadhandlerp);
}
-void
-runtime∕debug·setMaxThreads(intgo in, intgo out)
+int32
+runtime·setmaxthreads(int32 in)
{
+ int32 out;
+
runtime·lock(&runtime·sched);
out = runtime·sched.maxmcount;
runtime·sched.maxmcount = in;
checkmcount();
runtime·unlock(&runtime·sched);
- FLUSH(&out);
+ return out;
}
static int8 experiment[] = GOEXPERIMENT; // defined in zaexperiment.h
diff --git a/src/pkg/runtime/proc.p b/src/pkg/runtime/proc.p
deleted file mode 100644
index f0b46de61..000000000
--- a/src/pkg/runtime/proc.p
+++ /dev/null
@@ -1,526 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-/*
-model for proc.c as of 2011/07/22.
-
-takes 4900 seconds to explore 1189070 states
-with G=3, var_gomaxprocs=1
-on a Core i7 L640 2.13 GHz Lenovo X201s.
-
-rm -f proc.p.trail pan.* pan
-spin -a proc.p
-gcc -DSAFETY -DREACH -DMEMLIM'='4000 -o pan pan.c
-pan -w28 -n -i -m500000
-test -f proc.p.trail && pan -r proc.p.trail
-*/
-
-/*
- * scheduling parameters
- */
-
-/*
- * the number of goroutines G doubles as the maximum
- * number of OS threads; the max is reachable when all
- * the goroutines are blocked in system calls.
- */
-#define G 3
-
-/*
- * whether to allow gomaxprocs to vary during execution.
- * enabling this checks the scheduler even when code is
- * calling GOMAXPROCS, but it also slows down the verification
- * by about 10x.
- */
-#define var_gomaxprocs 1 /* allow gomaxprocs to vary */
-
-/* gomaxprocs */
-#if var_gomaxprocs
-byte gomaxprocs = 3;
-#else
-#define gomaxprocs 3
-#endif
-
-/* queue of waiting M's: sched_mhead[:mwait] */
-byte mwait;
-byte sched_mhead[G];
-
-/* garbage collector state */
-bit gc_lock, gcwaiting;
-
-/* goroutines sleeping, waiting to run */
-byte gsleep, gwait;
-
-/* scheduler state */
-bit sched_lock;
-bit sched_stopped;
-bit atomic_gwaiting, atomic_waitstop;
-byte atomic_mcpu, atomic_mcpumax;
-
-/* M struct fields - state for handing off g to m. */
-bit m_waitnextg[G];
-bit m_havenextg[G];
-bit m_nextg[G];
-
-/*
- * opt_atomic/opt_dstep mark atomic/deterministics
- * sequences that are marked only for reasons of
- * optimization, not for correctness of the algorithms.
- *
- * in general any code that runs while holding the
- * schedlock and does not refer to or modify the atomic_*
- * fields can be marked atomic/dstep without affecting
- * the usefulness of the model. since we trust the lock
- * implementation, what we really want to test is the
- * interleaving of the atomic fast paths with entersyscall
- * and exitsyscall.
- */
-#define opt_atomic atomic
-#define opt_dstep d_step
-
-/* locks */
-inline lock(x) {
- d_step { x == 0; x = 1 }
-}
-
-inline unlock(x) {
- d_step { assert x == 1; x = 0 }
-}
-
-/* notes */
-inline noteclear(x) {
- x = 0
-}
-
-inline notesleep(x) {
- x == 1
-}
-
-inline notewakeup(x) {
- opt_dstep { assert x == 0; x = 1 }
-}
-
-/*
- * scheduler
- */
-inline schedlock() {
- lock(sched_lock)
-}
-
-inline schedunlock() {
- unlock(sched_lock)
-}
-
-/*
- * canaddmcpu is like the C function but takes
- * an extra argument to include in the test, to model
- * "cannget() && canaddmcpu()" as "canaddmcpu(cangget())"
- */
-inline canaddmcpu(g) {
- d_step {
- g && atomic_mcpu < atomic_mcpumax;
- atomic_mcpu++;
- }
-}
-
-/*
- * gput is like the C function.
- * instead of tracking goroutines explicitly we
- * maintain only the count of the number of
- * waiting goroutines.
- */
-inline gput() {
- /* omitted: lockedm, idlem concerns */
- opt_dstep {
- gwait++;
- if
- :: gwait == 1 ->
- atomic_gwaiting = 1
- :: else
- fi
- }
-}
-
-/*
- * cangget is a macro so it can be passed to
- * canaddmcpu (see above).
- */
-#define cangget() (gwait>0)
-
-/*
- * gget is like the C function.
- */
-inline gget() {
- opt_dstep {
- assert gwait > 0;
- gwait--;
- if
- :: gwait == 0 ->
- atomic_gwaiting = 0
- :: else
- fi
- }
-}
-
-/*
- * mput is like the C function.
- * here we do keep an explicit list of waiting M's,
- * so that we know which ones can be awakened.
- * we use _pid-1 because the monitor is proc 0.
- */
-inline mput() {
- opt_dstep {
- sched_mhead[mwait] = _pid - 1;
- mwait++
- }
-}
-
-/*
- * mnextg is like the C function mnextg(m, g).
- * it passes an unspecified goroutine to m to start running.
- */
-inline mnextg(m) {
- opt_dstep {
- m_nextg[m] = 1;
- if
- :: m_waitnextg[m] ->
- m_waitnextg[m] = 0;
- notewakeup(m_havenextg[m])
- :: else
- fi
- }
-}
-
-/*
- * mgetnextg handles the main m handoff in matchmg.
- * it is like mget() || new M followed by mnextg(m, g),
- * but combined to avoid a local variable.
- * unlike the C code, a new M simply assumes it is
- * running a g instead of using the mnextg coordination
- * to obtain one.
- */
-inline mgetnextg() {
- opt_atomic {
- if
- :: mwait > 0 ->
- mwait--;
- mnextg(sched_mhead[mwait]);
- sched_mhead[mwait] = 0;
- :: else ->
- run mstart();
- fi
- }
-}
-
-/*
- * nextgandunlock is like the C function.
- * it pulls a g off the queue or else waits for one.
- */
-inline nextgandunlock() {
- assert atomic_mcpu <= G;
-
- if
- :: m_nextg[_pid-1] ->
- m_nextg[_pid-1] = 0;
- schedunlock();
- :: canaddmcpu(!m_nextg[_pid-1] && cangget()) ->
- gget();
- schedunlock();
- :: else ->
- opt_dstep {
- mput();
- m_nextg[_pid-1] = 0;
- m_waitnextg[_pid-1] = 1;
- noteclear(m_havenextg[_pid-1]);
- }
- if
- :: atomic_waitstop && atomic_mcpu <= atomic_mcpumax ->
- atomic_waitstop = 0;
- notewakeup(sched_stopped)
- :: else
- fi;
- schedunlock();
- opt_dstep {
- notesleep(m_havenextg[_pid-1]);
- assert m_nextg[_pid-1];
- m_nextg[_pid-1] = 0;
- }
- fi
-}
-
-/*
- * stoptheworld is like the C function.
- */
-inline stoptheworld() {
- schedlock();
- gcwaiting = 1;
- atomic_mcpumax = 1;
- do
- :: d_step { atomic_mcpu > 1 ->
- noteclear(sched_stopped);
- assert !atomic_waitstop;
- atomic_waitstop = 1 }
- schedunlock();
- notesleep(sched_stopped);
- schedlock();
- :: else ->
- break
- od;
- schedunlock();
-}
-
-/*
- * starttheworld is like the C function.
- */
-inline starttheworld() {
- schedlock();
- gcwaiting = 0;
- atomic_mcpumax = gomaxprocs;
- matchmg();
- schedunlock();
-}
-
-/*
- * matchmg is like the C function.
- */
-inline matchmg() {
- do
- :: canaddmcpu(cangget()) ->
- gget();
- mgetnextg();
- :: else -> break
- od
-}
-
-/*
- * ready is like the C function.
- * it puts a g on the run queue.
- */
-inline ready() {
- schedlock();
- gput()
- matchmg()
- schedunlock()
-}
-
-/*
- * schedule simulates the C scheduler.
- * it assumes that there is always a goroutine
- * running already, and the goroutine has entered
- * the scheduler for an unspecified reason,
- * either to yield or to block.
- */
-inline schedule() {
- schedlock();
-
- mustsched = 0;
- atomic_mcpu--;
- assert atomic_mcpu <= G;
- if
- :: skip ->
- // goroutine yields, still runnable
- gput();
- :: gsleep+1 < G ->
- // goroutine goes to sleep (but there is another that can wake it)
- gsleep++
- fi;
-
- // Find goroutine to run.
- nextgandunlock()
-}
-
-/*
- * schedpend is > 0 if a goroutine is about to committed to
- * entering the scheduler but has not yet done so.
- * Just as we don't test for the undesirable conditions when a
- * goroutine is in the scheduler, we don't test for them when
- * a goroutine will be in the scheduler shortly.
- * Modeling this state lets us replace mcpu cas loops with
- * simpler mcpu atomic adds.
- */
-byte schedpend;
-
-/*
- * entersyscall is like the C function.
- */
-inline entersyscall() {
- bit willsched;
-
- /*
- * Fast path. Check all the conditions tested during schedlock/schedunlock
- * below, and if we can get through the whole thing without stopping, run it
- * in one atomic cas-based step.
- */
- atomic {
- atomic_mcpu--;
- if
- :: atomic_gwaiting ->
- skip
- :: atomic_waitstop && atomic_mcpu <= atomic_mcpumax ->
- skip
- :: else ->
- goto Lreturn_entersyscall;
- fi;
- willsched = 1;
- schedpend++;
- }
-
- /*
- * Normal path.
- */
- schedlock()
- opt_dstep {
- if
- :: willsched ->
- schedpend--;
- willsched = 0
- :: else
- fi
- }
- if
- :: atomic_gwaiting ->
- matchmg()
- :: else
- fi;
- if
- :: atomic_waitstop && atomic_mcpu <= atomic_mcpumax ->
- atomic_waitstop = 0;
- notewakeup(sched_stopped)
- :: else
- fi;
- schedunlock();
-Lreturn_entersyscall:
- skip
-}
-
-/*
- * exitsyscall is like the C function.
- */
-inline exitsyscall() {
- /*
- * Fast path. If there's a cpu available, use it.
- */
- atomic {
- // omitted profilehz check
- atomic_mcpu++;
- if
- :: atomic_mcpu >= atomic_mcpumax ->
- skip
- :: else ->
- goto Lreturn_exitsyscall
- fi
- }
-
- /*
- * Normal path.
- */
- schedlock();
- d_step {
- if
- :: atomic_mcpu <= atomic_mcpumax ->
- skip
- :: else ->
- mustsched = 1
- fi
- }
- schedunlock()
-Lreturn_exitsyscall:
- skip
-}
-
-#if var_gomaxprocs
-inline gomaxprocsfunc() {
- schedlock();
- opt_atomic {
- if
- :: gomaxprocs != 1 -> gomaxprocs = 1
- :: gomaxprocs != 2 -> gomaxprocs = 2
- :: gomaxprocs != 3 -> gomaxprocs = 3
- fi;
- }
- if
- :: gcwaiting != 0 ->
- assert atomic_mcpumax == 1
- :: else ->
- atomic_mcpumax = gomaxprocs;
- if
- :: atomic_mcpu > gomaxprocs ->
- mustsched = 1
- :: else ->
- matchmg()
- fi
- fi;
- schedunlock();
-}
-#endif
-
-/*
- * mstart is the entry point for a new M.
- * our model of an M is always running some
- * unspecified goroutine.
- */
-proctype mstart() {
- /*
- * mustsched is true if the goroutine must enter the
- * scheduler instead of continuing to execute.
- */
- bit mustsched;
-
- do
- :: skip ->
- // goroutine reschedules.
- schedule()
- :: !mustsched ->
- // goroutine does something.
- if
- :: skip ->
- // goroutine executes system call
- entersyscall();
- exitsyscall()
- :: atomic { gsleep > 0; gsleep-- } ->
- // goroutine wakes another goroutine
- ready()
- :: lock(gc_lock) ->
- // goroutine runs a garbage collection
- stoptheworld();
- starttheworld();
- unlock(gc_lock)
-#if var_gomaxprocs
- :: skip ->
- // goroutine picks a new gomaxprocs
- gomaxprocsfunc()
-#endif
- fi
- od;
-
- assert 0;
-}
-
-/*
- * monitor initializes the scheduler state
- * and then watches for impossible conditions.
- */
-active proctype monitor() {
- opt_dstep {
- byte i = 1;
- do
- :: i < G ->
- gput();
- i++
- :: else -> break
- od;
- atomic_mcpu = 1;
- atomic_mcpumax = 1;
- }
- run mstart();
-
- do
- // Should never have goroutines waiting with procs available.
- :: !sched_lock && schedpend==0 && gwait > 0 && atomic_mcpu < atomic_mcpumax ->
- assert 0
- // Should never have gc waiting for stop if things have already stopped.
- :: !sched_lock && schedpend==0 && atomic_waitstop && atomic_mcpu <= atomic_mcpumax ->
- assert 0
- od
-}
diff --git a/src/pkg/runtime/proc_test.go b/src/pkg/runtime/proc_test.go
index dd70ed97d..5be355195 100644
--- a/src/pkg/runtime/proc_test.go
+++ b/src/pkg/runtime/proc_test.go
@@ -244,6 +244,49 @@ func TestPreemptionGC(t *testing.T) {
atomic.StoreUint32(&stop, 1)
}
+func TestGCFairness(t *testing.T) {
+ output := executeTest(t, testGCFairnessSource, nil)
+ want := "OK\n"
+ if output != want {
+ t.Fatalf("want %s, got %s\n", want, output)
+ }
+}
+
+const testGCFairnessSource = `
+package main
+
+import (
+ "fmt"
+ "os"
+ "runtime"
+ "time"
+)
+
+func main() {
+ runtime.GOMAXPROCS(1)
+ f, err := os.Open("/dev/null")
+ if os.IsNotExist(err) {
+ // This test tests what it is intended to test only if writes are fast.
+ // If there is no /dev/null, we just don't execute the test.
+ fmt.Println("OK")
+ return
+ }
+ if err != nil {
+ fmt.Println(err)
+ os.Exit(1)
+ }
+ for i := 0; i < 2; i++ {
+ go func() {
+ for {
+ f.Write([]byte("."))
+ }
+ }()
+ }
+ time.Sleep(10 * time.Millisecond)
+ fmt.Println("OK")
+}
+`
+
func stackGrowthRecursive(i int) {
var pad [128]uint64
if i != 0 && pad[0] == 0 {
@@ -327,24 +370,11 @@ func TestSchedLocalQueueSteal(t *testing.T) {
}
func benchmarkStackGrowth(b *testing.B, rec int) {
- const CallsPerSched = 1000
- procs := runtime.GOMAXPROCS(-1)
- N := int32(b.N / CallsPerSched)
- c := make(chan bool, procs)
- for p := 0; p < procs; p++ {
- go func() {
- for atomic.AddInt32(&N, -1) >= 0 {
- runtime.Gosched()
- for g := 0; g < CallsPerSched; g++ {
- stackGrowthRecursive(rec)
- }
- }
- c <- true
- }()
- }
- for p := 0; p < procs; p++ {
- <-c
- }
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ stackGrowthRecursive(rec)
+ }
+ })
}
func BenchmarkStackGrowth(b *testing.B) {
diff --git a/src/pkg/runtime/race.c b/src/pkg/runtime/race.c
index 6ee55beff..eb0be7fa6 100644
--- a/src/pkg/runtime/race.c
+++ b/src/pkg/runtime/race.c
@@ -9,178 +9,96 @@
#include "arch_GOARCH.h"
#include "malloc.h"
#include "race.h"
-#include "../../cmd/ld/textflag.h"
-
-void runtime∕race·Initialize(uintptr *racectx);
-void runtime∕race·MapShadow(void *addr, uintptr size);
-void runtime∕race·Finalize(void);
-void runtime∕race·FinalizerGoroutine(uintptr racectx);
-void runtime∕race·Read(uintptr racectx, void *addr, void *pc);
-void runtime∕race·Write(uintptr racectx, void *addr, void *pc);
-void runtime∕race·ReadRange(uintptr racectx, void *addr, uintptr sz, void *pc);
-void runtime∕race·WriteRange(uintptr racectx, void *addr, uintptr sz, void *pc);
-void runtime∕race·FuncEnter(uintptr racectx, void *pc);
-void runtime∕race·FuncExit(uintptr racectx);
-void runtime∕race·Malloc(uintptr racectx, void *p, uintptr sz, void *pc);
-void runtime∕race·Free(void *p);
-void runtime∕race·GoStart(uintptr racectx, uintptr *chracectx, void *pc);
-void runtime∕race·GoEnd(uintptr racectx);
-void runtime∕race·Acquire(uintptr racectx, void *addr);
-void runtime∕race·Release(uintptr racectx, void *addr);
-void runtime∕race·ReleaseMerge(uintptr racectx, void *addr);
+#include "type.h"
+#include "typekind.h"
+
+// Race runtime functions called via runtime·racecall.
+void __tsan_init(void);
+void __tsan_fini(void);
+void __tsan_map_shadow(void);
+void __tsan_finalizer_goroutine(void);
+void __tsan_go_start(void);
+void __tsan_go_end(void);
+void __tsan_malloc(void);
+void __tsan_acquire(void);
+void __tsan_release(void);
+void __tsan_release_merge(void);
+
+// Mimic what cmd/cgo would do.
+#pragma cgo_import_static __tsan_init
+#pragma cgo_import_static __tsan_fini
+#pragma cgo_import_static __tsan_map_shadow
+#pragma cgo_import_static __tsan_finalizer_goroutine
+#pragma cgo_import_static __tsan_go_start
+#pragma cgo_import_static __tsan_go_end
+#pragma cgo_import_static __tsan_malloc
+#pragma cgo_import_static __tsan_acquire
+#pragma cgo_import_static __tsan_release
+#pragma cgo_import_static __tsan_release_merge
+
+// These are called from race_amd64.s.
+#pragma cgo_import_static __tsan_read
+#pragma cgo_import_static __tsan_read_pc
+#pragma cgo_import_static __tsan_read_range
+#pragma cgo_import_static __tsan_write
+#pragma cgo_import_static __tsan_write_pc
+#pragma cgo_import_static __tsan_write_range
+#pragma cgo_import_static __tsan_func_enter
+#pragma cgo_import_static __tsan_func_exit
extern byte noptrdata[];
extern byte enoptrbss[];
+
+// start/end of heap for race_amd64.s
+uintptr runtime·racearenastart;
+uintptr runtime·racearenaend;
-static bool onstack(uintptr argp);
+void runtime·racefuncenter(void *callpc);
+void runtime·racefuncexit(void);
+void runtime·racereadrangepc1(void *addr, uintptr sz, void *pc);
+void runtime·racewriterangepc1(void *addr, uintptr sz, void *pc);
+void runtime·racesymbolizethunk(void*);
-// We set m->racecall around all calls into race library to trigger fast path in cgocall.
-// Also we increment m->locks to disable preemption and potential rescheduling
-// to ensure that we reset m->racecall on the correct m.
+// racecall allows calling an arbitrary function f from C race runtime
+// with up to 4 uintptr arguments.
+void runtime·racecall(void(*f)(void), ...);
uintptr
runtime·raceinit(void)
{
uintptr racectx, start, size;
- m->racecall = true;
- m->locks++;
- runtime∕race·Initialize(&racectx);
+ // cgo is required to initialize libc, which is used by race runtime
+ if(!runtime·iscgo)
+ runtime·throw("raceinit: race build must use cgo");
+ runtime·racecall(__tsan_init, &racectx, runtime·racesymbolizethunk);
// Round data segment to page boundaries, because it's used in mmap().
start = (uintptr)noptrdata & ~(PageSize-1);
size = ROUND((uintptr)enoptrbss - start, PageSize);
- runtime∕race·MapShadow((void*)start, size);
- m->locks--;
- m->racecall = false;
+ runtime·racecall(__tsan_map_shadow, start, size);
return racectx;
}
void
runtime·racefini(void)
{
- m->racecall = true;
- m->locks++;
- runtime∕race·Finalize();
- m->locks--;
- m->racecall = false;
+ runtime·racecall(__tsan_fini);
}
void
runtime·racemapshadow(void *addr, uintptr size)
{
- m->racecall = true;
- m->locks++;
- runtime∕race·MapShadow(addr, size);
- m->locks--;
- m->racecall = false;
-}
-
-// Called from instrumented code.
-// If we split stack, getcallerpc() can return runtime·lessstack().
-#pragma textflag NOSPLIT
-void
-runtime·racewrite(uintptr addr)
-{
- if(!onstack(addr)) {
- m->racecall = true;
- m->locks++;
- runtime∕race·Write(g->racectx, (void*)addr, runtime·getcallerpc(&addr));
- m->locks--;
- m->racecall = false;
- }
-}
-
-#pragma textflag NOSPLIT
-void
-runtime·racewriterange(uintptr addr, uintptr sz)
-{
- if(!onstack(addr)) {
- m->racecall = true;
- m->locks++;
- runtime∕race·WriteRange(g->racectx, (void*)addr, sz, runtime·getcallerpc(&addr));
- m->locks--;
- m->racecall = false;
- }
-}
-
-// Called from instrumented code.
-// If we split stack, getcallerpc() can return runtime·lessstack().
-#pragma textflag NOSPLIT
-void
-runtime·raceread(uintptr addr)
-{
- if(!onstack(addr)) {
- m->racecall = true;
- m->locks++;
- runtime∕race·Read(g->racectx, (void*)addr, runtime·getcallerpc(&addr));
- m->locks--;
- m->racecall = false;
- }
-}
-
-#pragma textflag NOSPLIT
-void
-runtime·racereadrange(uintptr addr, uintptr sz)
-{
- if(!onstack(addr)) {
- m->racecall = true;
- m->locks++;
- runtime∕race·ReadRange(g->racectx, (void*)addr, sz, runtime·getcallerpc(&addr));
- m->locks--;
- m->racecall = false;
- }
-}
-
-// Called from runtime·racefuncenter (assembly).
-#pragma textflag NOSPLIT
-void
-runtime·racefuncenter1(uintptr pc)
-{
- // If the caller PC is lessstack, use slower runtime·callers
- // to walk across the stack split to find the real caller.
- if(pc == (uintptr)runtime·lessstack)
- runtime·callers(2, &pc, 1);
-
- m->racecall = true;
- m->locks++;
- runtime∕race·FuncEnter(g->racectx, (void*)pc);
- m->locks--;
- m->racecall = false;
-}
-
-// Called from instrumented code.
-#pragma textflag NOSPLIT
-void
-runtime·racefuncexit(void)
-{
- m->racecall = true;
- m->locks++;
- runtime∕race·FuncExit(g->racectx);
- m->locks--;
- m->racecall = false;
+ if(runtime·racearenastart == 0)
+ runtime·racearenastart = (uintptr)addr;
+ if(runtime·racearenaend < (uintptr)addr+size)
+ runtime·racearenaend = (uintptr)addr+size;
+ runtime·racecall(__tsan_map_shadow, addr, size);
}
void
runtime·racemalloc(void *p, uintptr sz)
{
- // use m->curg because runtime·stackalloc() is called from g0
- if(m->curg == nil)
- return;
- m->racecall = true;
- m->locks++;
- runtime∕race·Malloc(m->curg->racectx, p, sz, /* unused pc */ 0);
- m->locks--;
- m->racecall = false;
-}
-
-void
-runtime·racefree(void *p)
-{
- m->racecall = true;
- m->locks++;
- runtime∕race·Free(p);
- m->locks--;
- m->racecall = false;
+ runtime·racecall(__tsan_malloc, p, sz);
}
uintptr
@@ -188,96 +106,58 @@ runtime·racegostart(void *pc)
{
uintptr racectx;
- m->racecall = true;
- m->locks++;
- runtime∕race·GoStart(g->racectx, &racectx, pc);
- m->locks--;
- m->racecall = false;
+ runtime·racecall(__tsan_go_start, g->racectx, &racectx, pc);
return racectx;
}
void
runtime·racegoend(void)
{
- m->racecall = true;
- m->locks++;
- runtime∕race·GoEnd(g->racectx);
- m->locks--;
- m->racecall = false;
-}
-
-static void
-memoryaccess(void *addr, uintptr callpc, uintptr pc, bool write)
-{
- uintptr racectx;
-
- if(!onstack((uintptr)addr)) {
- m->racecall = true;
- m->locks++;
- racectx = g->racectx;
- if(callpc) {
- if(callpc == (uintptr)runtime·lessstack)
- runtime·callers(3, &callpc, 1);
- runtime∕race·FuncEnter(racectx, (void*)callpc);
- }
- if(write)
- runtime∕race·Write(racectx, addr, (void*)pc);
- else
- runtime∕race·Read(racectx, addr, (void*)pc);
- if(callpc)
- runtime∕race·FuncExit(racectx);
- m->locks--;
- m->racecall = false;
- }
+ runtime·racecall(__tsan_go_end, g->racectx);
}
void
-runtime·racewritepc(void *addr, void *callpc, void *pc)
+runtime·racewriterangepc(void *addr, uintptr sz, void *callpc, void *pc)
{
- memoryaccess(addr, (uintptr)callpc, (uintptr)pc, true);
+ if(callpc != nil)
+ runtime·racefuncenter(callpc);
+ runtime·racewriterangepc1(addr, sz, pc);
+ if(callpc != nil)
+ runtime·racefuncexit();
}
void
-runtime·racereadpc(void *addr, void *callpc, void *pc)
+runtime·racereadrangepc(void *addr, uintptr sz, void *callpc, void *pc)
{
- memoryaccess(addr, (uintptr)callpc, (uintptr)pc, false);
+ if(callpc != nil)
+ runtime·racefuncenter(callpc);
+ runtime·racereadrangepc1(addr, sz, pc);
+ if(callpc != nil)
+ runtime·racefuncexit();
}
-static void
-rangeaccess(void *addr, uintptr size, uintptr callpc, uintptr pc, bool write)
+void
+runtime·racewriteobjectpc(void *addr, Type *t, void *callpc, void *pc)
{
- uintptr racectx;
+ uint8 kind;
- if(!onstack((uintptr)addr)) {
- m->racecall = true;
- m->locks++;
- racectx = g->racectx;
- if(callpc) {
- if(callpc == (uintptr)runtime·lessstack)
- runtime·callers(3, &callpc, 1);
- runtime∕race·FuncEnter(racectx, (void*)callpc);
- }
- if(write)
- runtime∕race·WriteRange(racectx, addr, size, (void*)pc);
- else
- runtime∕race·ReadRange(racectx, addr, size, (void*)pc);
- if(callpc)
- runtime∕race·FuncExit(racectx);
- m->locks--;
- m->racecall = false;
- }
+ kind = t->kind & ~KindNoPointers;
+ if(kind == KindArray || kind == KindStruct)
+ runtime·racewriterangepc(addr, t->size, callpc, pc);
+ else
+ runtime·racewritepc(addr, callpc, pc);
}
void
-runtime·racewriterangepc(void *addr, uintptr sz, void *callpc, void *pc)
+runtime·racereadobjectpc(void *addr, Type *t, void *callpc, void *pc)
{
- rangeaccess(addr, sz, (uintptr)callpc, (uintptr)pc, true);
-}
+ uint8 kind;
-void
-runtime·racereadrangepc(void *addr, uintptr sz, void *callpc, void *pc)
-{
- rangeaccess(addr, sz, (uintptr)callpc, (uintptr)pc, false);
+ kind = t->kind & ~KindNoPointers;
+ if(kind == KindArray || kind == KindStruct)
+ runtime·racereadrangepc(addr, t->size, callpc, pc);
+ else
+ runtime·racereadpc(addr, callpc, pc);
}
void
@@ -291,11 +171,7 @@ runtime·raceacquireg(G *gp, void *addr)
{
if(g->raceignore)
return;
- m->racecall = true;
- m->locks++;
- runtime∕race·Acquire(gp->racectx, addr);
- m->locks--;
- m->racecall = false;
+ runtime·racecall(__tsan_acquire, gp->racectx, addr);
}
void
@@ -309,11 +185,7 @@ runtime·racereleaseg(G *gp, void *addr)
{
if(g->raceignore)
return;
- m->racecall = true;
- m->locks++;
- runtime∕race·Release(gp->racectx, addr);
- m->locks--;
- m->racecall = false;
+ runtime·racecall(__tsan_release, gp->racectx, addr);
}
void
@@ -327,21 +199,13 @@ runtime·racereleasemergeg(G *gp, void *addr)
{
if(g->raceignore)
return;
- m->racecall = true;
- m->locks++;
- runtime∕race·ReleaseMerge(gp->racectx, addr);
- m->locks--;
- m->racecall = false;
+ runtime·racecall(__tsan_release_merge, gp->racectx, addr);
}
void
runtime·racefingo(void)
{
- m->racecall = true;
- m->locks++;
- runtime∕race·FinalizerGoroutine(g->racectx);
- m->locks--;
- m->racecall = false;
+ runtime·racecall(__tsan_finalizer_goroutine, g->racectx);
}
// func RaceAcquire(addr unsafe.Pointer)
@@ -379,38 +243,6 @@ runtime·RaceSemrelease(uint32 *s)
runtime·semrelease(s);
}
-// func RaceRead(addr unsafe.Pointer)
-#pragma textflag NOSPLIT
-void
-runtime·RaceRead(void *addr)
-{
- memoryaccess(addr, 0, (uintptr)runtime·getcallerpc(&addr), false);
-}
-
-// func RaceWrite(addr unsafe.Pointer)
-#pragma textflag NOSPLIT
-void
-runtime·RaceWrite(void *addr)
-{
- memoryaccess(addr, 0, (uintptr)runtime·getcallerpc(&addr), true);
-}
-
-// func RaceReadRange(addr unsafe.Pointer, len int)
-#pragma textflag NOSPLIT
-void
-runtime·RaceReadRange(void *addr, intgo len)
-{
- rangeaccess(addr, len, 0, (uintptr)runtime·getcallerpc(&addr), false);
-}
-
-// func RaceWriteRange(addr unsafe.Pointer, len int)
-#pragma textflag NOSPLIT
-void
-runtime·RaceWriteRange(void *addr, intgo len)
-{
- rangeaccess(addr, len, 0, (uintptr)runtime·getcallerpc(&addr), true);
-}
-
// func RaceDisable()
void
runtime·RaceDisable(void)
@@ -425,14 +257,36 @@ runtime·RaceEnable(void)
g->raceignore--;
}
-static bool
-onstack(uintptr argp)
+typedef struct SymbolizeContext SymbolizeContext;
+struct SymbolizeContext
+{
+ uintptr pc;
+ int8* func;
+ int8* file;
+ uintptr line;
+ uintptr off;
+ uintptr res;
+};
+
+// Callback from C into Go, runs on g0.
+void
+runtime·racesymbolize(SymbolizeContext *ctx)
{
- // noptrdata, data, bss, noptrbss
- // the layout is in ../../cmd/ld/data.c
- if((byte*)argp >= noptrdata && (byte*)argp < enoptrbss)
- return false;
- if((byte*)argp >= runtime·mheap.arena_start && (byte*)argp < runtime·mheap.arena_used)
- return false;
- return true;
+ Func *f;
+ String file;
+
+ f = runtime·findfunc(ctx->pc);
+ if(f == nil) {
+ ctx->func = "??";
+ ctx->file = "-";
+ ctx->line = 0;
+ ctx->off = ctx->pc;
+ ctx->res = 1;
+ return;
+ }
+ ctx->func = runtime·funcname(f);
+ ctx->line = runtime·funcline(f, ctx->pc, &file);
+ ctx->file = (int8*)file.str; // assume zero-terminated
+ ctx->off = ctx->pc - f->entry;
+ ctx->res = 1;
}
diff --git a/src/pkg/runtime/race.h b/src/pkg/runtime/race.h
index f7aa99dc2..fee31e09f 100644
--- a/src/pkg/runtime/race.h
+++ b/src/pkg/runtime/race.h
@@ -17,13 +17,14 @@ void runtime·racefini(void);
void runtime·racemapshadow(void *addr, uintptr size);
void runtime·racemalloc(void *p, uintptr sz);
-void runtime·racefree(void *p);
uintptr runtime·racegostart(void *pc);
void runtime·racegoend(void);
void runtime·racewritepc(void *addr, void *callpc, void *pc);
void runtime·racereadpc(void *addr, void *callpc, void *pc);
void runtime·racewriterangepc(void *addr, uintptr sz, void *callpc, void *pc);
void runtime·racereadrangepc(void *addr, uintptr sz, void *callpc, void *pc);
+void runtime·racereadobjectpc(void *addr, Type *t, void *callpc, void *pc);
+void runtime·racewriteobjectpc(void *addr, Type *t, void *callpc, void *pc);
void runtime·racefingo(void);
void runtime·raceacquire(void *addr);
void runtime·raceacquireg(G *gp, void *addr);
diff --git a/src/pkg/runtime/race/README b/src/pkg/runtime/race/README
index 0b73bd857..785640607 100644
--- a/src/pkg/runtime/race/README
+++ b/src/pkg/runtime/race/README
@@ -9,4 +9,4 @@ $ ./buildgo.sh
Tested with gcc 4.6.1 and 4.7.0. On Windows it's built with 64-bit MinGW.
-Current runtime is built on rev 191161.
+Current runtime is built on rev 203116.
diff --git a/src/pkg/runtime/race/race.go b/src/pkg/runtime/race/race.go
index 5b44bde83..e53cacf4a 100644
--- a/src/pkg/runtime/race/race.go
+++ b/src/pkg/runtime/race/race.go
@@ -6,116 +6,10 @@
package race
-/*
-void __tsan_init(void **racectx);
-void __tsan_fini(void);
-void __tsan_map_shadow(void *addr, void *size);
-void __tsan_go_start(void *racectx, void **chracectx, void *pc);
-void __tsan_go_end(void *racectx);
-void __tsan_read(void *racectx, void *addr, void *pc);
-void __tsan_write(void *racectx, void *addr, void *pc);
-void __tsan_read_range(void *racectx, void *addr, long sz, long step, void *pc);
-void __tsan_write_range(void *racectx, void *addr, long sz, long step, void *pc);
-void __tsan_func_enter(void *racectx, void *pc);
-void __tsan_func_exit(void *racectx);
-void __tsan_malloc(void *racectx, void *p, long sz, void *pc);
-void __tsan_free(void *p);
-void __tsan_acquire(void *racectx, void *addr);
-void __tsan_release(void *racectx, void *addr);
-void __tsan_release_merge(void *racectx, void *addr);
-void __tsan_finalizer_goroutine(void *racectx);
-*/
-import "C"
-
-import (
- "runtime"
- "unsafe"
-)
-
-func Initialize(racectx *uintptr) {
- C.__tsan_init((*unsafe.Pointer)(unsafe.Pointer(racectx)))
-}
-
-func Finalize() {
- C.__tsan_fini()
-}
-
-func MapShadow(addr, size uintptr) {
- C.__tsan_map_shadow(unsafe.Pointer(addr), unsafe.Pointer(size))
-}
-
-func FinalizerGoroutine(racectx uintptr) {
- C.__tsan_finalizer_goroutine(unsafe.Pointer(racectx))
-}
-
-func Read(racectx uintptr, addr, pc uintptr) {
- C.__tsan_read(unsafe.Pointer(racectx), unsafe.Pointer(addr), unsafe.Pointer(pc))
-}
-
-func Write(racectx uintptr, addr, pc uintptr) {
- C.__tsan_write(unsafe.Pointer(racectx), unsafe.Pointer(addr), unsafe.Pointer(pc))
-}
-
-func ReadRange(racectx uintptr, addr, sz, pc uintptr) {
- C.__tsan_read_range(unsafe.Pointer(racectx), unsafe.Pointer(addr),
- C.long(sz), 0 /*step is unused*/, unsafe.Pointer(pc))
-}
-
-func WriteRange(racectx uintptr, addr, sz, pc uintptr) {
- C.__tsan_write_range(unsafe.Pointer(racectx), unsafe.Pointer(addr),
- C.long(sz), 0 /*step is unused*/, unsafe.Pointer(pc))
-}
+// This file merely ensures that we link in runtime/cgo in race build,
+// this is turn ensures that runtime uses pthread_create to create threads.
+// The prebuilt race runtime lives in race_GOOS_GOARCH.syso.
+// Calls to the runtime are done directly from src/pkg/runtime/race.c.
-func FuncEnter(racectx uintptr, pc uintptr) {
- C.__tsan_func_enter(unsafe.Pointer(racectx), unsafe.Pointer(pc))
-}
-
-func FuncExit(racectx uintptr) {
- C.__tsan_func_exit(unsafe.Pointer(racectx))
-}
-
-func Malloc(racectx uintptr, p, sz, pc uintptr) {
- C.__tsan_malloc(unsafe.Pointer(racectx), unsafe.Pointer(p), C.long(sz), unsafe.Pointer(pc))
-}
-
-func Free(p uintptr) {
- C.__tsan_free(unsafe.Pointer(p))
-}
-
-func GoStart(racectx uintptr, chracectx *uintptr, pc uintptr) {
- C.__tsan_go_start(unsafe.Pointer(racectx), (*unsafe.Pointer)(unsafe.Pointer(chracectx)), unsafe.Pointer(pc))
-}
-
-func GoEnd(racectx uintptr) {
- C.__tsan_go_end(unsafe.Pointer(racectx))
-}
-
-func Acquire(racectx uintptr, addr uintptr) {
- C.__tsan_acquire(unsafe.Pointer(racectx), unsafe.Pointer(addr))
-}
-
-func Release(racectx uintptr, addr uintptr) {
- C.__tsan_release(unsafe.Pointer(racectx), unsafe.Pointer(addr))
-}
-
-func ReleaseMerge(racectx uintptr, addr uintptr) {
- C.__tsan_release_merge(unsafe.Pointer(racectx), unsafe.Pointer(addr))
-}
-
-//export __tsan_symbolize
-func __tsan_symbolize(pc uintptr, fun, file **C.char, line, off *C.int) C.int {
- f := runtime.FuncForPC(pc)
- if f == nil {
- *fun = C.CString("??")
- *file = C.CString("-")
- *line = 0
- *off = C.int(pc)
- return 1
- }
- fi, l := f.FileLine(pc)
- *fun = C.CString(f.Name())
- *file = C.CString(fi)
- *line = C.int(l)
- *off = C.int(pc - f.Entry())
- return 1
-}
+// void __race_unused_func(void);
+import "C"
diff --git a/src/pkg/runtime/race/race_darwin_amd64.syso b/src/pkg/runtime/race/race_darwin_amd64.syso
index 96a43c9a9..249a878ef 100644
--- a/src/pkg/runtime/race/race_darwin_amd64.syso
+++ b/src/pkg/runtime/race/race_darwin_amd64.syso
Binary files differ
diff --git a/src/pkg/runtime/race/race_linux_amd64.syso b/src/pkg/runtime/race/race_linux_amd64.syso
index 50bde9648..8120484d4 100644
--- a/src/pkg/runtime/race/race_linux_amd64.syso
+++ b/src/pkg/runtime/race/race_linux_amd64.syso
Binary files differ
diff --git a/src/pkg/runtime/race/race_test.go b/src/pkg/runtime/race/race_test.go
index 4776ae22d..7e0ee866a 100644
--- a/src/pkg/runtime/race/race_test.go
+++ b/src/pkg/runtime/race/race_test.go
@@ -44,7 +44,7 @@ func TestRace(t *testing.T) {
if err != nil {
t.Fatalf("Failed to run tests: %v\n%v", err, string(testOutput))
}
- reader := bufio.NewReader(bytes.NewBuffer(testOutput))
+ reader := bufio.NewReader(bytes.NewReader(testOutput))
funcName := ""
var tsanLog []string
@@ -155,3 +155,18 @@ func runTests() ([]byte, error) {
cmd.Env = append(cmd.Env, `GORACE="suppress_equal_stacks=0 suppress_equal_addresses=0 exitcode=0"`)
return cmd.CombinedOutput()
}
+
+func TestIssue8102(t *testing.T) {
+ // If this compiles with -race, the test passes.
+ type S struct {
+ x interface{}
+ i int
+ }
+ c := make(chan int)
+ a := [2]*int{}
+ for ; ; c <- *a[S{}.i] {
+ if t != nil {
+ break
+ }
+ }
+}
diff --git a/src/pkg/runtime/race/race_windows_amd64.syso b/src/pkg/runtime/race/race_windows_amd64.syso
index 46eb1274f..67db40f21 100644
--- a/src/pkg/runtime/race/race_windows_amd64.syso
+++ b/src/pkg/runtime/race/race_windows_amd64.syso
Binary files differ
diff --git a/src/pkg/runtime/race/testdata/chan_test.go b/src/pkg/runtime/race/testdata/chan_test.go
index 614ba4a4e..4a3d5290f 100644
--- a/src/pkg/runtime/race/testdata/chan_test.go
+++ b/src/pkg/runtime/race/testdata/chan_test.go
@@ -347,6 +347,119 @@ func TestRaceChanSendSelectClose(t *testing.T) {
<-compl
}
+func TestRaceSelectReadWriteAsync(t *testing.T) {
+ done := make(chan bool)
+ x := 0
+ c1 := make(chan int, 10)
+ c2 := make(chan int, 10)
+ c3 := make(chan int)
+ c2 <- 1
+ go func() {
+ select {
+ case c1 <- x: // read of x races with...
+ case c3 <- 1:
+ }
+ done <- true
+ }()
+ select {
+ case x = <-c2: // ... write to x here
+ case c3 <- 1:
+ }
+ <-done
+}
+
+func TestRaceSelectReadWriteSync(t *testing.T) {
+ done := make(chan bool)
+ x := 0
+ c1 := make(chan int)
+ c2 := make(chan int)
+ c3 := make(chan int)
+ // make c1 and c2 ready for communication
+ go func() {
+ <-c1
+ }()
+ go func() {
+ c2 <- 1
+ }()
+ go func() {
+ select {
+ case c1 <- x: // read of x races with...
+ case c3 <- 1:
+ }
+ done <- true
+ }()
+ select {
+ case x = <-c2: // ... write to x here
+ case c3 <- 1:
+ }
+ <-done
+}
+
+func TestNoRaceSelectReadWriteAsync(t *testing.T) {
+ done := make(chan bool)
+ x := 0
+ c1 := make(chan int)
+ c2 := make(chan int)
+ go func() {
+ select {
+ case c1 <- x: // read of x does not race with...
+ case c2 <- 1:
+ }
+ done <- true
+ }()
+ select {
+ case x = <-c1: // ... write to x here
+ case c2 <- 1:
+ }
+ <-done
+}
+
+func TestRaceChanReadWriteAsync(t *testing.T) {
+ done := make(chan bool)
+ c1 := make(chan int, 10)
+ c2 := make(chan int, 10)
+ c2 <- 10
+ x := 0
+ go func() {
+ c1 <- x // read of x races with...
+ done <- true
+ }()
+ x = <-c2 // ... write to x here
+ <-done
+}
+
+func TestRaceChanReadWriteSync(t *testing.T) {
+ done := make(chan bool)
+ c1 := make(chan int)
+ c2 := make(chan int)
+ // make c1 and c2 ready for communication
+ go func() {
+ <-c1
+ }()
+ go func() {
+ c2 <- 10
+ }()
+ x := 0
+ go func() {
+ c1 <- x // read of x races with...
+ done <- true
+ }()
+ x = <-c2 // ... write to x here
+ <-done
+}
+
+func TestNoRaceChanReadWriteAsync(t *testing.T) {
+ done := make(chan bool)
+ c1 := make(chan int, 10)
+ x := 0
+ go func() {
+ c1 <- x // read of x does not race with...
+ done <- true
+ }()
+ x = <-c1 // ... write to x here
+ <-done
+}
+
func TestNoRaceProducerConsumerUnbuffered(t *testing.T) {
type Task struct {
f func()
@@ -454,20 +567,6 @@ func TestRaceChanCloseLen(t *testing.T) {
v = 2
}
-func TestRaceChanSameCell(t *testing.T) {
- c := make(chan int, 1)
- v := 0
- go func() {
- v = 1
- c <- 42
- <-c
- }()
- time.Sleep(1e7)
- c <- 43
- <-c
- _ = v
-}
-
func TestRaceChanCloseSend(t *testing.T) {
compl := make(chan bool, 1)
c := make(chan int, 10)
@@ -478,3 +577,83 @@ func TestRaceChanCloseSend(t *testing.T) {
c <- 0
<-compl
}
+
+func TestNoRaceChanMutex(t *testing.T) {
+ done := make(chan struct{})
+ mtx := make(chan struct{}, 1)
+ data := 0
+ go func() {
+ mtx <- struct{}{}
+ data = 42
+ <-mtx
+ done <- struct{}{}
+ }()
+ mtx <- struct{}{}
+ data = 43
+ <-mtx
+ <-done
+}
+
+func TestNoRaceSelectMutex(t *testing.T) {
+ done := make(chan struct{})
+ mtx := make(chan struct{}, 1)
+ aux := make(chan bool)
+ data := 0
+ go func() {
+ select {
+ case mtx <- struct{}{}:
+ case <-aux:
+ }
+ data = 42
+ select {
+ case <-mtx:
+ case <-aux:
+ }
+ done <- struct{}{}
+ }()
+ select {
+ case mtx <- struct{}{}:
+ case <-aux:
+ }
+ data = 43
+ select {
+ case <-mtx:
+ case <-aux:
+ }
+ <-done
+}
+
+func TestRaceChanSem(t *testing.T) {
+ done := make(chan struct{})
+ mtx := make(chan bool, 2)
+ data := 0
+ go func() {
+ mtx <- true
+ data = 42
+ <-mtx
+ done <- struct{}{}
+ }()
+ mtx <- true
+ data = 43
+ <-mtx
+ <-done
+}
+
+func TestNoRaceChanWaitGroup(t *testing.T) {
+ const N = 10
+ chanWg := make(chan bool, N/2)
+ data := make([]int, N)
+ for i := 0; i < N; i++ {
+ chanWg <- true
+ go func(i int) {
+ data[i] = 42
+ <-chanWg
+ }(i)
+ }
+ for i := 0; i < cap(chanWg); i++ {
+ chanWg <- true
+ }
+ for i := 0; i < N; i++ {
+ _ = data[i]
+ }
+}
diff --git a/src/pkg/runtime/race/testdata/finalizer_test.go b/src/pkg/runtime/race/testdata/finalizer_test.go
index 2b2607689..222cbf67a 100644
--- a/src/pkg/runtime/race/testdata/finalizer_test.go
+++ b/src/pkg/runtime/race/testdata/finalizer_test.go
@@ -14,16 +14,16 @@ import (
func TestNoRaceFin(t *testing.T) {
c := make(chan bool)
go func() {
- x := new(int)
- runtime.SetFinalizer(x, func(x *int) {
- *x = 42
+ x := new(string)
+ runtime.SetFinalizer(x, func(x *string) {
+ *x = "foo"
})
- *x = 66
+ *x = "bar"
c <- true
}()
<-c
runtime.GC()
- time.Sleep(1e8)
+ time.Sleep(100 * time.Millisecond)
}
var finVar struct {
@@ -34,8 +34,8 @@ var finVar struct {
func TestNoRaceFinGlobal(t *testing.T) {
c := make(chan bool)
go func() {
- x := new(int)
- runtime.SetFinalizer(x, func(x *int) {
+ x := new(string)
+ runtime.SetFinalizer(x, func(x *string) {
finVar.Lock()
finVar.cnt++
finVar.Unlock()
@@ -44,7 +44,7 @@ func TestNoRaceFinGlobal(t *testing.T) {
}()
<-c
runtime.GC()
- time.Sleep(1e8)
+ time.Sleep(100 * time.Millisecond)
finVar.Lock()
finVar.cnt++
finVar.Unlock()
@@ -54,14 +54,14 @@ func TestRaceFin(t *testing.T) {
c := make(chan bool)
y := 0
go func() {
- x := new(int)
- runtime.SetFinalizer(x, func(x *int) {
+ x := new(string)
+ runtime.SetFinalizer(x, func(x *string) {
y = 42
})
c <- true
}()
<-c
runtime.GC()
- time.Sleep(1e8)
+ time.Sleep(100 * time.Millisecond)
y = 66
}
diff --git a/src/pkg/runtime/race/testdata/map_test.go b/src/pkg/runtime/race/testdata/map_test.go
index 35db8db69..98e2a5f10 100644
--- a/src/pkg/runtime/race/testdata/map_test.go
+++ b/src/pkg/runtime/race/testdata/map_test.go
@@ -159,3 +159,82 @@ func TestRaceMapVariable3(t *testing.T) {
m = make(map[int]int)
<-ch
}
+
+type Big struct {
+ x [17]int32
+}
+
+func TestRaceMapLookupPartKey(t *testing.T) {
+ k := &Big{}
+ m := make(map[Big]bool)
+ ch := make(chan bool, 1)
+ go func() {
+ k.x[8] = 1
+ ch <- true
+ }()
+ _ = m[*k]
+ <-ch
+}
+
+func TestRaceMapLookupPartKey2(t *testing.T) {
+ k := &Big{}
+ m := make(map[Big]bool)
+ ch := make(chan bool, 1)
+ go func() {
+ k.x[8] = 1
+ ch <- true
+ }()
+ _, _ = m[*k]
+ <-ch
+}
+func TestRaceMapDeletePartKey(t *testing.T) {
+ k := &Big{}
+ m := make(map[Big]bool)
+ ch := make(chan bool, 1)
+ go func() {
+ k.x[8] = 1
+ ch <- true
+ }()
+ delete(m, *k)
+ <-ch
+}
+
+func TestRaceMapInsertPartKey(t *testing.T) {
+ k := &Big{}
+ m := make(map[Big]bool)
+ ch := make(chan bool, 1)
+ go func() {
+ k.x[8] = 1
+ ch <- true
+ }()
+ m[*k] = true
+ <-ch
+}
+
+func TestRaceMapInsertPartVal(t *testing.T) {
+ v := &Big{}
+ m := make(map[int]Big)
+ ch := make(chan bool, 1)
+ go func() {
+ v.x[8] = 1
+ ch <- true
+ }()
+ m[1] = *v
+ <-ch
+}
+
+// Test for issue 7561.
+func TestRaceMapAssignMultipleReturn(t *testing.T) {
+ connect := func() (int, error) { return 42, nil }
+ conns := make(map[int][]int)
+ conns[1] = []int{0}
+ ch := make(chan bool, 1)
+ var err error
+ go func() {
+ conns[1][0], err = connect()
+ ch <- true
+ }()
+ x := conns[1][0]
+ _ = x
+ <-ch
+}
diff --git a/src/pkg/runtime/race/testdata/mop_test.go b/src/pkg/runtime/race/testdata/mop_test.go
index b0b66562c..14591b184 100644
--- a/src/pkg/runtime/race/testdata/mop_test.go
+++ b/src/pkg/runtime/race/testdata/mop_test.go
@@ -1933,3 +1933,25 @@ func TestRaceMethodThunk4(t *testing.T) {
*(*int)(d.Base) = 42
<-done
}
+
+func TestNoRaceTinyAlloc(t *testing.T) {
+ const P = 4
+ const N = 1e6
+ var tinySink *byte
+ done := make(chan bool)
+ for p := 0; p < P; p++ {
+ go func() {
+ for i := 0; i < N; i++ {
+ var b byte
+ if b != 0 {
+ tinySink = &b // make it heap allocated
+ }
+ b = 42
+ }
+ done <- true
+ }()
+ }
+ for p := 0; p < P; p++ {
+ <-done
+ }
+}
diff --git a/src/pkg/runtime/race0.c b/src/pkg/runtime/race0.c
index b74b03583..eddb0be79 100644
--- a/src/pkg/runtime/race0.c
+++ b/src/pkg/runtime/race0.c
@@ -111,12 +111,6 @@ runtime·racemalloc(void *p, uintptr sz)
USED(sz);
}
-void
-runtime·racefree(void *p)
-{
- USED(p);
-}
-
uintptr
runtime·racegostart(void *pc)
{
diff --git a/src/pkg/runtime/race_amd64.s b/src/pkg/runtime/race_amd64.s
index a33b77a50..d60cf899b 100644
--- a/src/pkg/runtime/race_amd64.s
+++ b/src/pkg/runtime/race_amd64.s
@@ -4,13 +4,241 @@
// +build race
+#include "zasm_GOOS_GOARCH.h"
+#include "funcdata.h"
#include "../../cmd/ld/textflag.h"
+// The following thunks allow calling the gcc-compiled race runtime directly
+// from Go code without going all the way through cgo.
+// First, it's much faster (up to 50% speedup for real Go programs).
+// Second, it eliminates race-related special cases from cgocall and scheduler.
+// Third, in long-term it will allow to remove cyclic runtime/race dependency on cmd/go.
+
+// A brief recap of the amd64 calling convention.
+// Arguments are passed in DI, SI, DX, CX, R8, R9, the rest is on stack.
+// Callee-saved registers are: BX, BP, R12-R15.
+// SP must be 16-byte aligned.
+// On Windows:
+// Arguments are passed in CX, DX, R8, R9, the rest is on stack.
+// Callee-saved registers are: BX, BP, DI, SI, R12-R15.
+// SP must be 16-byte aligned. Windows also requires "stack-backing" for the 4 register arguments:
+// http://msdn.microsoft.com/en-us/library/ms235286.aspx
+// We do not do this, because it seems to be intended for vararg/unprototyped functions.
+// Gcc-compiled race runtime does not try to use that space.
+
+#ifdef GOOS_windows
+#define RARG0 CX
+#define RARG1 DX
+#define RARG2 R8
+#define RARG3 R9
+#else
+#define RARG0 DI
+#define RARG1 SI
+#define RARG2 DX
+#define RARG3 CX
+#endif
+
+// func runtime·raceread(addr uintptr)
+// Called from instrumented code.
+TEXT runtime·raceread(SB), NOSPLIT, $0-8
+ MOVQ addr+0(FP), RARG1
+ MOVQ (SP), RARG2
+ // void __tsan_read(ThreadState *thr, void *addr, void *pc);
+ MOVQ $__tsan_read(SB), AX
+ JMP racecalladdr<>(SB)
+
+// func runtime·RaceRead(addr uintptr)
+TEXT runtime·RaceRead(SB), NOSPLIT, $0-8
+ // This needs to be a tail call, because raceread reads caller pc.
+ JMP runtime·raceread(SB)
+
+// void runtime·racereadpc(void *addr, void *callpc, void *pc)
+TEXT runtime·racereadpc(SB), NOSPLIT, $0-24
+ MOVQ addr+0(FP), RARG1
+ MOVQ callpc+8(FP), RARG2
+ MOVQ pc+16(FP), RARG3
+ // void __tsan_read_pc(ThreadState *thr, void *addr, void *callpc, void *pc);
+ MOVQ $__tsan_read_pc(SB), AX
+ JMP racecalladdr<>(SB)
+
+// func runtime·racewrite(addr uintptr)
+// Called from instrumented code.
+TEXT runtime·racewrite(SB), NOSPLIT, $0-8
+ MOVQ addr+0(FP), RARG1
+ MOVQ (SP), RARG2
+ // void __tsan_write(ThreadState *thr, void *addr, void *pc);
+ MOVQ $__tsan_write(SB), AX
+ JMP racecalladdr<>(SB)
+
+// func runtime·RaceWrite(addr uintptr)
+TEXT runtime·RaceWrite(SB), NOSPLIT, $0-8
+ // This needs to be a tail call, because racewrite reads caller pc.
+ JMP runtime·racewrite(SB)
+
+// void runtime·racewritepc(void *addr, void *callpc, void *pc)
+TEXT runtime·racewritepc(SB), NOSPLIT, $0-24
+ MOVQ addr+0(FP), RARG1
+ MOVQ callpc+8(FP), RARG2
+ MOVQ cp+16(FP), RARG3
+ // void __tsan_write_pc(ThreadState *thr, void *addr, void *callpc, void *pc);
+ MOVQ $__tsan_write_pc(SB), AX
+ JMP racecalladdr<>(SB)
+
+// func runtime·racereadrange(addr, size uintptr)
+// Called from instrumented code.
+TEXT runtime·racereadrange(SB), NOSPLIT, $0-16
+ MOVQ addr+0(FP), RARG1
+ MOVQ size+8(FP), RARG2
+ MOVQ (SP), RARG3
+ // void __tsan_read_range(ThreadState *thr, void *addr, uintptr size, void *pc);
+ MOVQ $__tsan_read_range(SB), AX
+ JMP racecalladdr<>(SB)
+
+// func runtime·RaceReadRange(addr, size uintptr)
+TEXT runtime·RaceReadRange(SB), NOSPLIT, $0-16
+ // This needs to be a tail call, because racereadrange reads caller pc.
+ JMP runtime·racereadrange(SB)
+
+// void runtime·racereadrangepc1(void *addr, uintptr sz, void *pc)
+TEXT runtime·racereadrangepc1(SB), NOSPLIT, $0-24
+ MOVQ addr+0(FP), RARG1
+ MOVQ size+8(FP), RARG2
+ MOVQ pc+16(FP), RARG3
+ // void __tsan_read_range(ThreadState *thr, void *addr, uintptr size, void *pc);
+ MOVQ $__tsan_read_range(SB), AX
+ JMP racecalladdr<>(SB)
+
+// func runtime·racewriterange(addr, size uintptr)
+// Called from instrumented code.
+TEXT runtime·racewriterange(SB), NOSPLIT, $0-16
+ MOVQ addr+0(FP), RARG1
+ MOVQ size+8(FP), RARG2
+ MOVQ (SP), RARG3
+ // void __tsan_write_range(ThreadState *thr, void *addr, uintptr size, void *pc);
+ MOVQ $__tsan_write_range(SB), AX
+ JMP racecalladdr<>(SB)
+
+// func runtime·RaceWriteRange(addr, size uintptr)
+TEXT runtime·RaceWriteRange(SB), NOSPLIT, $0-16
+ // This needs to be a tail call, because racewriterange reads caller pc.
+ JMP runtime·racewriterange(SB)
+
+// void runtime·racewriterangepc1(void *addr, uintptr sz, void *pc)
+TEXT runtime·racewriterangepc1(SB), NOSPLIT, $0-24
+ MOVQ addr+0(FP), RARG1
+ MOVQ size+8(FP), RARG2
+ MOVQ pc+16(FP), RARG3
+ // void __tsan_write_range(ThreadState *thr, void *addr, uintptr size, void *pc);
+ MOVQ $__tsan_write_range(SB), AX
+ JMP racecalladdr<>(SB)
+
+// If addr (RARG1) is out of range, do nothing.
+// Otherwise, setup goroutine context and invoke racecall. Other arguments already set.
+TEXT racecalladdr<>(SB), NOSPLIT, $0-0
+ get_tls(R12)
+ MOVQ g(R12), R14
+ MOVQ g_racectx(R14), RARG0 // goroutine context
+ // Check that addr is within [arenastart, arenaend) or within [noptrdata, enoptrbss).
+ CMPQ RARG1, runtime·racearenastart(SB)
+ JB racecalladdr_data
+ CMPQ RARG1, runtime·racearenaend(SB)
+ JB racecalladdr_call
+racecalladdr_data:
+ CMPQ RARG1, $noptrdata(SB)
+ JB racecalladdr_ret
+ CMPQ RARG1, $enoptrbss(SB)
+ JAE racecalladdr_ret
+racecalladdr_call:
+ MOVQ AX, AX // w/o this 6a miscompiles this function
+ JMP racecall<>(SB)
+racecalladdr_ret:
+ RET
+
// func runtime·racefuncenter(pc uintptr)
-TEXT runtime·racefuncenter(SB), NOSPLIT, $16-8
- MOVQ DX, saved-8(SP) // save function entry context (for closures)
- MOVQ pc+0(FP), DX
- MOVQ DX, arg-16(SP)
- CALL runtime·racefuncenter1(SB)
- MOVQ saved-8(SP), DX
+// Called from instrumented code.
+TEXT runtime·racefuncenter(SB), NOSPLIT, $0-8
+ MOVQ DX, R15 // save function entry context (for closures)
+ get_tls(R12)
+ MOVQ g(R12), R14
+ MOVQ g_racectx(R14), RARG0 // goroutine context
+ MOVQ callpc+0(FP), RARG1
+ // void __tsan_func_enter(ThreadState *thr, void *pc);
+ MOVQ $__tsan_func_enter(SB), AX
+ CALL racecall<>(SB)
+ MOVQ R15, DX // restore function entry context
+ RET
+
+// func runtime·racefuncexit()
+// Called from instrumented code.
+TEXT runtime·racefuncexit(SB), NOSPLIT, $0-0
+ get_tls(R12)
+ MOVQ g(R12), R14
+ MOVQ g_racectx(R14), RARG0 // goroutine context
+ // void __tsan_func_exit(ThreadState *thr);
+ MOVQ $__tsan_func_exit(SB), AX
+ JMP racecall<>(SB)
+
+// void runtime·racecall(void(*f)(...), ...)
+// Calls C function f from race runtime and passes up to 4 arguments to it.
+// The arguments are never heap-object-preserving pointers, so we pretend there are no arguments.
+TEXT runtime·racecall(SB), NOSPLIT, $0-0
+ MOVQ fn+0(FP), AX
+ MOVQ arg0+8(FP), RARG0
+ MOVQ arg1+16(FP), RARG1
+ MOVQ arg2+24(FP), RARG2
+ MOVQ arg3+32(FP), RARG3
+ JMP racecall<>(SB)
+
+// Switches SP to g0 stack and calls (AX). Arguments already set.
+TEXT racecall<>(SB), NOSPLIT, $0-0
+ get_tls(R12)
+ MOVQ m(R12), R13
+ MOVQ g(R12), R14
+ // Switch to g0 stack.
+ MOVQ SP, R12 // callee-saved, preserved across the CALL
+ MOVQ m_g0(R13), R10
+ CMPQ R10, R14
+ JE racecall_cont // already on g0
+ MOVQ (g_sched+gobuf_sp)(R10), SP
+racecall_cont:
+ ANDQ $~15, SP // alignment for gcc ABI
+ CALL AX
+ MOVQ R12, SP
+ RET
+
+// C->Go callback thunk that allows to call runtime·racesymbolize from C code.
+// Direct Go->C race call has only switched SP, finish g->g0 switch by setting correct g.
+// The overall effect of Go->C->Go call chain is similar to that of mcall.
+TEXT runtime·racesymbolizethunk(SB), NOSPLIT, $56-8
+ // Save callee-saved registers (Go code won't respect that).
+ // This is superset of darwin/linux/windows registers.
+ PUSHQ BX
+ PUSHQ BP
+ PUSHQ DI
+ PUSHQ SI
+ PUSHQ R12
+ PUSHQ R13
+ PUSHQ R14
+ PUSHQ R15
+ // Set g = g0.
+ get_tls(R12)
+ MOVQ m(R12), R13
+ MOVQ m_g0(R13), R14
+ MOVQ R14, g(R12) // g = m->g0
+ MOVQ RARG0, 0(SP) // func arg
+ CALL runtime·racesymbolize(SB)
+ // All registers are smashed after Go code, reload.
+ get_tls(R12)
+ MOVQ m(R12), R13
+ MOVQ m_curg(R13), R14
+ MOVQ R14, g(R12) // g = m->curg
+ // Restore callee-saved registers.
+ POPQ R15
+ POPQ R14
+ POPQ R13
+ POPQ R12
+ POPQ SI
+ POPQ DI
+ POPQ BP
+ POPQ BX
RET
diff --git a/src/pkg/runtime/rdebug.goc b/src/pkg/runtime/rdebug.goc
new file mode 100644
index 000000000..042b30ace
--- /dev/null
+++ b/src/pkg/runtime/rdebug.goc
@@ -0,0 +1,27 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime∕debug
+#include "runtime.h"
+#include "arch_GOARCH.h"
+#include "malloc.h"
+#include "stack.h"
+
+func setMaxStack(in int) (out int) {
+ out = runtime·maxstacksize;
+ runtime·maxstacksize = in;
+}
+
+func setGCPercent(in int) (out int) {
+ out = runtime·setgcpercent(in);
+}
+
+func setMaxThreads(in int) (out int) {
+ out = runtime·setmaxthreads(in);
+}
+
+func SetPanicOnFault(enabled bool) (old bool) {
+ old = g->paniconfault;
+ g->paniconfault = enabled;
+}
diff --git a/src/pkg/runtime/rt0_freebsd_arm.s b/src/pkg/runtime/rt0_freebsd_arm.s
index d11087639..56219f899 100644
--- a/src/pkg/runtime/rt0_freebsd_arm.s
+++ b/src/pkg/runtime/rt0_freebsd_arm.s
@@ -11,3 +11,8 @@ TEXT _rt0_arm_freebsd(SB),NOSPLIT,$-4
MOVW $4(R13), R1 // argv
MOVM.DB.W [R0-R1], (R13)
B _rt0_go(SB)
+
+TEXT main(SB),NOSPLIT,$-4
+ MOVM.DB.W [R0-R1], (R13)
+ MOVW $_rt0_go(SB), R4
+ B (R4)
diff --git a/src/pkg/runtime/rt0_nacl_386.s b/src/pkg/runtime/rt0_nacl_386.s
new file mode 100644
index 000000000..8b713548f
--- /dev/null
+++ b/src/pkg/runtime/rt0_nacl_386.s
@@ -0,0 +1,22 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "../../cmd/ld/textflag.h"
+
+// NaCl entry has:
+// 0(FP) - arg block == SP+8
+// 4(FP) - cleanup function pointer, always 0
+// 8(FP) - envc
+// 12(FP) - argc
+// 16(FP) - argv, then 0, then envv, then 0, then auxv
+TEXT _rt0_386_nacl(SB),NOSPLIT,$8
+ MOVL argc+12(FP), AX
+ LEAL argv+16(FP), BX
+ MOVL AX, 0(SP)
+ MOVL BX, 4(SP)
+ CALL main(SB)
+ INT $3
+
+TEXT main(SB),NOSPLIT,$0
+ JMP _rt0_go(SB)
diff --git a/src/pkg/runtime/rt0_nacl_amd64p32.s b/src/pkg/runtime/rt0_nacl_amd64p32.s
new file mode 100644
index 000000000..502d2e2bf
--- /dev/null
+++ b/src/pkg/runtime/rt0_nacl_amd64p32.s
@@ -0,0 +1,30 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "../../cmd/ld/textflag.h"
+
+// NaCl entry on 32-bit x86 has DI pointing at the arg block, which contains:
+//
+// 0(DI) - cleanup function pointer, always 0
+// 4(DI) - envc
+// 8(DI) - argc
+// 12(DI) - argv, then 0, then envv, then 0, then auxv
+// NaCl entry here is almost the same, except that there
+// is no saved caller PC, so 0(FP) is -8(FP) and so on.
+TEXT _rt0_amd64p32_nacl(SB),NOSPLIT,$16
+ MOVL DI, 0(SP)
+ CALL runtime·nacl_sysinfo(SB)
+ MOVL 0(SP), DI
+ MOVL 8(DI), AX
+ LEAL 12(DI), BX
+ MOVL AX, 0(SP)
+ MOVL BX, 4(SP)
+ CALL main(SB)
+ INT $3
+
+TEXT main(SB),NOSPLIT,$0
+ // Uncomment for fake time like on Go Playground.
+ //MOVQ $1257894000000000000, AX
+ //MOVQ AX, runtime·timens(SB)
+ JMP _rt0_go(SB)
diff --git a/src/pkg/runtime/rt0_solaris_amd64.s b/src/pkg/runtime/rt0_solaris_amd64.s
new file mode 100644
index 000000000..4aca991f0
--- /dev/null
+++ b/src/pkg/runtime/rt0_solaris_amd64.s
@@ -0,0 +1,18 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "../../cmd/ld/textflag.h"
+
+TEXT _rt0_amd64_solaris(SB),NOSPLIT,$-8
+ LEAQ 8(SP), SI // argv
+ MOVQ 0(SP), DI // argc
+ MOVQ $main(SB), AX
+ JMP AX
+
+TEXT main(SB),NOSPLIT,$-8
+ MOVQ $_rt0_go(SB), AX
+ JMP AX
+
+DATA runtime·issolaris(SB)/4, $1
+GLOBL runtime·issolaris(SB), $4
diff --git a/src/pkg/runtime/runtime-gdb.py b/src/pkg/runtime/runtime-gdb.py
index e704f4c4b..eedac7cf4 100644
--- a/src/pkg/runtime/runtime-gdb.py
+++ b/src/pkg/runtime/runtime-gdb.py
@@ -15,10 +15,14 @@ path to this file based on the path to the runtime package.
# circumventing the pretty print triggering.
-import sys, re
-
-print >>sys.stderr, "Loading Go Runtime support."
-
+from __future__ import print_function
+import re
+import sys
+
+print("Loading Go Runtime support.", file=sys.stderr)
+#http://python3porting.com/differences.html
+if sys.version > '3':
+ xrange = range
# allow to manually reload while developing
goobjfile = gdb.current_objfile() or gdb.objfiles()[0]
goobjfile.pretty_printers = []
@@ -27,6 +31,7 @@ goobjfile.pretty_printers = []
# Pretty Printers
#
+
class StringTypePrinter:
"Pretty print Go strings."
@@ -61,8 +66,8 @@ class SliceTypePrinter:
if self.val["len"] > self.val["cap"]:
return
ptr = self.val["array"]
- for idx in range(self.val["len"]):
- yield ('[%d]' % idx, (ptr + idx).dereference())
+ for idx in range(int(self.val["len"])):
+ yield ('[{0}]'.format(idx), (ptr + idx).dereference())
class MapTypePrinter:
@@ -72,7 +77,7 @@ class MapTypePrinter:
to inspect their contents with this pretty printer.
"""
- pattern = re.compile(r'^struct hash<.*>$')
+ pattern = re.compile(r'^map\[.*\].*$')
def __init__(self, val):
self.val = val
@@ -90,14 +95,15 @@ class MapTypePrinter:
flags = self.val['flags']
inttype = self.val['hash0'].type
cnt = 0
- for bucket in xrange(2 ** B):
+ for bucket in xrange(2 ** int(B)):
bp = buckets + bucket
if oldbuckets:
oldbucket = bucket & (2 ** (B - 1) - 1)
oldbp = oldbuckets + oldbucket
oldb = oldbp.dereference()
- if (oldb['overflow'].cast(inttype) & 1) == 0: # old bucket not evacuated yet
- if bucket >= 2 ** (B - 1): continue # already did old bucket
+ if (oldb['overflow'].cast(inttype) & 1) == 0: # old bucket not evacuated yet
+ if bucket >= 2 ** (B - 1):
+ continue # already did old bucket
bp = oldbp
while bp:
b = bp.dereference()
@@ -109,11 +115,12 @@ class MapTypePrinter:
k = k.dereference()
if flags & 2:
v = v.dereference()
- yield '%d' % cnt, k
- yield '%d' % (cnt + 1), v
+ yield str(cnt), k
+ yield str(cnt + 1), v
cnt += 2
bp = b['overflow']
+
class ChanTypePrinter:
"""Pretty print chan[T] types.
@@ -138,7 +145,7 @@ class ChanTypePrinter:
ptr = (self.val.address + 1).cast(et.pointer())
for i in range(self.val["qcount"]):
j = (self.val["recvx"] + i) % self.val["dataqsiz"]
- yield ('[%d]' % i, (ptr + j).dereference())
+ yield ('[{0}]'.format(i), (ptr + j).dereference())
#
@@ -150,11 +157,11 @@ def makematcher(klass):
try:
if klass.pattern.match(str(val.type)):
return klass(val)
- except:
+ except Exception:
pass
return matcher
-goobjfile.pretty_printers.extend([makematcher(k) for k in vars().values() if hasattr(k, 'pattern')])
+goobjfile.pretty_printers.extend([makematcher(var) for var in vars().values() if hasattr(var, 'pattern')])
#
# For reference, this is what we're trying to do:
@@ -169,34 +176,35 @@ goobjfile.pretty_printers.extend([makematcher(k) for k in vars().values() if has
def is_iface(val):
try:
- return str(val['tab'].type) == "struct runtime.itab *" \
- and str(val['data'].type) == "void *"
- except:
+ return str(val['tab'].type) == "struct runtime.itab *" and str(val['data'].type) == "void *"
+ except gdb.error:
pass
+
def is_eface(val):
try:
- return str(val['_type'].type) == "struct runtime._type *" \
- and str(val['data'].type) == "void *"
- except:
+ return str(val['_type'].type) == "struct runtime._type *" and str(val['data'].type) == "void *"
+ except gdb.error:
pass
+
def lookup_type(name):
try:
return gdb.lookup_type(name)
- except:
+ except gdb.error:
pass
try:
return gdb.lookup_type('struct ' + name)
- except:
+ except gdb.error:
pass
try:
return gdb.lookup_type('struct ' + name[1:]).pointer()
- except:
+ except gdb.error:
pass
_rctp_type = gdb.lookup_type("struct runtime.rtype").pointer()
+
def iface_commontype(obj):
if is_iface(obj):
go_type_ptr = obj['tab']['_type']
@@ -204,9 +212,9 @@ def iface_commontype(obj):
go_type_ptr = obj['_type']
else:
return
-
+
return go_type_ptr.cast(_rctp_type).dereference()
-
+
def iface_dtype(obj):
"Decode type of the data field of an eface or iface struct."
@@ -221,7 +229,7 @@ def iface_dtype(obj):
dynamic_gdb_type = lookup_type(dtype_name)
if dynamic_gdb_type is None:
return
-
+
type_size = int(dynamic_go_type['size'])
uintptr_size = int(dynamic_go_type['size'].type.sizeof) # size is itself an uintptr
if type_size > uintptr_size:
@@ -229,6 +237,7 @@ def iface_dtype(obj):
return dynamic_gdb_type
+
def iface_dtype_name(obj):
"Decode type name of the data field of an eface or iface struct."
@@ -254,15 +263,15 @@ class IfacePrinter:
return 0x0
try:
dtype = iface_dtype(self.val)
- except:
+ except Exception:
return "<bad dynamic type>"
if dtype is None: # trouble looking up, print something reasonable
- return "(%s)%s" % (iface_dtype_name(self.val), self.val['data'])
+ return "({0}){0}".format(iface_dtype_name(self.val), self.val['data'])
try:
return self.val['data'].cast(dtype).dereference()
- except:
+ except Exception:
pass
return self.val['data'].cast(dtype)
@@ -277,16 +286,14 @@ goobjfile.pretty_printers.append(ifacematcher)
# Convenience Functions
#
+
class GoLenFunc(gdb.Function):
"Length of strings, slices, maps or channels"
- how = ((StringTypePrinter, 'len'),
- (SliceTypePrinter, 'len'),
- (MapTypePrinter, 'count'),
- (ChanTypePrinter, 'qcount'))
+ how = ((StringTypePrinter, 'len'), (SliceTypePrinter, 'len'), (MapTypePrinter, 'count'), (ChanTypePrinter, 'qcount'))
def __init__(self):
- super(GoLenFunc, self).__init__("len")
+ gdb.Function.__init__(self, "len")
def invoke(self, obj):
typename = str(obj.type)
@@ -294,14 +301,14 @@ class GoLenFunc(gdb.Function):
if klass.pattern.match(typename):
return obj[fld]
+
class GoCapFunc(gdb.Function):
"Capacity of slices or channels"
- how = ((SliceTypePrinter, 'cap'),
- (ChanTypePrinter, 'dataqsiz'))
+ how = ((SliceTypePrinter, 'cap'), (ChanTypePrinter, 'dataqsiz'))
def __init__(self):
- super(GoCapFunc, self).__init__("cap")
+ gdb.Function.__init__(self, "cap")
def invoke(self, obj):
typename = str(obj.type)
@@ -309,6 +316,7 @@ class GoCapFunc(gdb.Function):
if klass.pattern.match(typename):
return obj[fld]
+
class DTypeFunc(gdb.Function):
"""Cast Interface values to their dynamic type.
@@ -316,12 +324,12 @@ class DTypeFunc(gdb.Function):
"""
def __init__(self):
- super(DTypeFunc, self).__init__("dtype")
+ gdb.Function.__init__(self, "dtype")
def invoke(self, obj):
try:
return obj['data'].cast(iface_dtype(obj))
- except:
+ except gdb.error:
pass
return obj
@@ -331,6 +339,7 @@ class DTypeFunc(gdb.Function):
sts = ('idle', 'runnable', 'running', 'syscall', 'waiting', 'moribund', 'dead', 'recovery')
+
def linked_list(ptr, linkfield):
while ptr:
yield ptr
@@ -341,29 +350,47 @@ class GoroutinesCmd(gdb.Command):
"List all goroutines."
def __init__(self):
- super(GoroutinesCmd, self).__init__("info goroutines", gdb.COMMAND_STACK, gdb.COMPLETE_NONE)
+ gdb.Command.__init__(self, "info goroutines", gdb.COMMAND_STACK, gdb.COMPLETE_NONE)
- def invoke(self, arg, from_tty):
+ def invoke(self, _arg, _from_tty):
# args = gdb.string_to_argv(arg)
vp = gdb.lookup_type('void').pointer()
for ptr in linked_list(gdb.parse_and_eval("'runtime.allg'"), 'alllink'):
- if ptr['status'] == 6: # 'gdead'
+ if ptr['status'] == 6: # 'gdead'
continue
s = ' '
if ptr['m']:
s = '*'
pc = ptr['sched']['pc'].cast(vp)
- sp = ptr['sched']['sp'].cast(vp)
- blk = gdb.block_for_pc(long((pc)))
- print s, ptr['goid'], "%8s" % sts[long((ptr['status']))], blk.function
+ # python2 will not cast pc (type void*) to an int cleanly
+ # instead python2 and python3 work with the hex string representation
+ # of the void pointer which we can parse back into an int.
+ # int(pc) will not work.
+ try:
+ #python3 / newer versions of gdb
+ pc = int(pc)
+ except gdb.error:
+ pc = int(str(pc), 16)
+ blk = gdb.block_for_pc(pc)
+ print(s, ptr['goid'], "{0:8s}".format(sts[int(ptr['status'])]), blk.function)
+
def find_goroutine(goid):
+ """
+ find_goroutine attempts to find the goroutine identified by goid.
+ It returns a touple of gdv.Value's representing the stack pointer
+ and program counter pointer for the goroutine.
+
+ @param int goid
+
+ @return tuple (gdb.Value, gdb.Value)
+ """
vp = gdb.lookup_type('void').pointer()
for ptr in linked_list(gdb.parse_and_eval("'runtime.allg'"), 'alllink'):
- if ptr['status'] == 6: # 'gdead'
+ if ptr['status'] == 6: # 'gdead'
continue
if ptr['goid'] == goid:
- return [ptr['sched'][x].cast(vp) for x in 'pc', 'sp']
+ return (ptr['sched'][x].cast(vp) for x in ('pc', 'sp'))
return None, None
@@ -380,20 +407,25 @@ class GoroutineCmd(gdb.Command):
"""
def __init__(self):
- super(GoroutineCmd, self).__init__("goroutine", gdb.COMMAND_STACK, gdb.COMPLETE_NONE)
+ gdb.Command.__init__(self, "goroutine", gdb.COMMAND_STACK, gdb.COMPLETE_NONE)
- def invoke(self, arg, from_tty):
+ def invoke(self, arg, _from_tty):
goid, cmd = arg.split(None, 1)
goid = gdb.parse_and_eval(goid)
pc, sp = find_goroutine(int(goid))
if not pc:
- print "No such goroutine: ", goid
+ print("No such goroutine: ", goid)
return
+ try:
+ #python3 / newer versions of gdb
+ pc = int(pc)
+ except gdb.error:
+ pc = int(str(pc), 16)
save_frame = gdb.selected_frame()
gdb.parse_and_eval('$save_pc = $pc')
gdb.parse_and_eval('$save_sp = $sp')
- gdb.parse_and_eval('$pc = 0x%x' % long(pc))
- gdb.parse_and_eval('$sp = 0x%x' % long(sp))
+ gdb.parse_and_eval('$pc = {0}'.format(str(pc)))
+ gdb.parse_and_eval('$sp = {0}'.format(str(sp)))
try:
gdb.execute(cmd)
finally:
@@ -406,31 +438,33 @@ class GoIfaceCmd(gdb.Command):
"Print Static and dynamic interface types"
def __init__(self):
- super(GoIfaceCmd, self).__init__("iface", gdb.COMMAND_DATA, gdb.COMPLETE_SYMBOL)
+ gdb.Command.__init__(self, "iface", gdb.COMMAND_DATA, gdb.COMPLETE_SYMBOL)
- def invoke(self, arg, from_tty):
+ def invoke(self, arg, _from_tty):
for obj in gdb.string_to_argv(arg):
try:
#TODO fix quoting for qualified variable names
- obj = gdb.parse_and_eval("%s" % obj)
- except Exception, e:
- print "Can't parse ", obj, ": ", e
+ obj = gdb.parse_and_eval(str(obj))
+ except Exception as e:
+ print("Can't parse ", obj, ": ", e)
continue
if obj['data'] == 0:
dtype = "nil"
else:
dtype = iface_dtype(obj)
-
+
if dtype is None:
- print "Not an interface: ", obj.type
+ print("Not an interface: ", obj.type)
continue
- print "%s: %s" % (obj.type, dtype)
+ print("{0}: {1}".format(obj.type, dtype))
# TODO: print interface's methods and dynamic type's func pointers thereof.
-#rsc: "to find the number of entries in the itab's Fn field look at itab.inter->numMethods
-#i am sure i have the names wrong but look at the interface type and its method count"
+#rsc: "to find the number of entries in the itab's Fn field look at
+# itab.inter->numMethods
+# i am sure i have the names wrong but look at the interface type
+# and its method count"
# so Itype will start with a commontype which has kind = interface
#
diff --git a/src/pkg/runtime/runtime.c b/src/pkg/runtime/runtime.c
index ab9fed805..3a4f7199e 100644
--- a/src/pkg/runtime/runtime.c
+++ b/src/pkg/runtime/runtime.c
@@ -3,6 +3,7 @@
// license that can be found in the LICENSE file.
#include "runtime.h"
+#include "stack.h"
#include "arch_GOARCH.h"
#include "../../cmd/ld/textflag.h"
@@ -10,13 +11,12 @@ enum {
maxround = sizeof(uintptr),
};
-/*
- * We assume that all architectures turn faults and the like
- * into apparent calls to runtime.sigpanic. If we see a "call"
- * to runtime.sigpanic, we do not back up the PC to find the
- * line number of the CALL instruction, because there is no CALL.
- */
-void runtime·sigpanic(void);
+// Keep a cached value to make gotraceback fast,
+// since we call it on every call to gentraceback.
+// The cached value is a uint32 in which the low bit
+// is the "crash" setting and the top 31 bits are the
+// gotraceback value.
+static uint32 traceback_cache = ~(uint32)0;
// The GOTRACEBACK environment variable controls the
// behavior of a Go program that is crashing and exiting.
@@ -28,18 +28,28 @@ int32
runtime·gotraceback(bool *crash)
{
byte *p;
+ uint32 x;
if(crash != nil)
*crash = false;
- p = runtime·getenv("GOTRACEBACK");
- if(p == nil || p[0] == '\0')
- return 1; // default is on
- if(runtime·strcmp(p, (byte*)"crash") == 0) {
- if(crash != nil)
- *crash = true;
- return 2; // extra information
+ if(m->traceback != 0)
+ return m->traceback;
+ x = runtime·atomicload(&traceback_cache);
+ if(x == ~(uint32)0) {
+ p = runtime·getenv("GOTRACEBACK");
+ if(p == nil)
+ p = (byte*)"";
+ if(p[0] == '\0')
+ x = 1<<1;
+ else if(runtime·strcmp(p, (byte*)"crash") == 0)
+ x = (2<<1) | 1;
+ else
+ x = runtime·atoi(p)<<1;
+ runtime·atomicstore(&traceback_cache, x);
}
- return runtime·atoi(p);
+ if(crash != nil)
+ *crash = x&1;
+ return x>>1;
}
int32
@@ -87,6 +97,7 @@ runtime·args(int32 c, uint8 **v)
}
int32 runtime·isplan9;
+int32 runtime·issolaris;
int32 runtime·iswindows;
// Information about what cpu features are available.
@@ -127,16 +138,8 @@ runtime·goenvs_unix(void)
syscall·envs.array = (byte*)s;
syscall·envs.len = n;
syscall·envs.cap = n;
-}
-void
-runtime·getgoroot(String out)
-{
- byte *p;
-
- p = runtime·getenv("GOROOT");
- out = runtime·gostringnocopy(p);
- FLUSH(&out);
+ traceback_cache = ~(uint32)0;
}
int32
@@ -273,62 +276,9 @@ runtime·check(void)
runtime·throw("float32nan3");
TestAtomic64();
-}
-
-void
-runtime·Caller(intgo skip, uintptr retpc, String retfile, intgo retline, bool retbool)
-{
- Func *f, *g;
- uintptr pc;
- uintptr rpc[2];
-
- /*
- * Ask for two PCs: the one we were asked for
- * and what it called, so that we can see if it
- * "called" sigpanic.
- */
- retpc = 0;
- if(runtime·callers(1+skip-1, rpc, 2) < 2) {
- retfile = runtime·emptystring;
- retline = 0;
- retbool = false;
- } else if((f = runtime·findfunc(rpc[1])) == nil) {
- retfile = runtime·emptystring;
- retline = 0;
- retbool = true; // have retpc at least
- } else {
- retpc = rpc[1];
- pc = retpc;
- g = runtime·findfunc(rpc[0]);
- if(pc > f->entry && (g == nil || g->entry != (uintptr)runtime·sigpanic))
- pc--;
- retline = runtime·funcline(f, pc, &retfile);
- retbool = true;
- }
- FLUSH(&retpc);
- FLUSH(&retfile);
- FLUSH(&retline);
- FLUSH(&retbool);
-}
-void
-runtime·Callers(intgo skip, Slice pc, intgo retn)
-{
- // runtime.callers uses pc.array==nil as a signal
- // to print a stack trace. Pick off 0-length pc here
- // so that we don't let a nil pc slice get to it.
- if(pc.len == 0)
- retn = 0;
- else
- retn = runtime·callers(skip, (uintptr*)pc.array, pc.len);
- FLUSH(&retn);
-}
-
-void
-runtime·FuncForPC(uintptr pc, void *retf)
-{
- retf = runtime·findfunc(pc);
- FLUSH(&retf);
+ if(FixedStack != runtime·round2(FixedStack))
+ runtime·throw("FixedStack is not power-of-2");
}
uint32
@@ -374,22 +324,18 @@ runtime·tickspersecond(void)
return res;
}
-void
-runtime∕pprof·runtime_cyclesPerSecond(int64 res)
-{
- res = runtime·tickspersecond();
- FLUSH(&res);
-}
-
DebugVars runtime·debug;
static struct {
int8* name;
int32* value;
} dbgvar[] = {
+ {"allocfreetrace", &runtime·debug.allocfreetrace},
+ {"efence", &runtime·debug.efence},
{"gctrace", &runtime·debug.gctrace},
- {"schedtrace", &runtime·debug.schedtrace},
+ {"gcdead", &runtime·debug.gcdead},
{"scheddetail", &runtime·debug.scheddetail},
+ {"schedtrace", &runtime·debug.schedtrace},
};
void
diff --git a/src/pkg/runtime/runtime.h b/src/pkg/runtime/runtime.h
index f7c2adb12..511550378 100644
--- a/src/pkg/runtime/runtime.h
+++ b/src/pkg/runtime/runtime.h
@@ -28,6 +28,12 @@ typedef int32 intgo; // Go's int
typedef uint32 uintgo; // Go's uint
#endif
+#ifdef _64BITREG
+typedef uint64 uintreg;
+#else
+typedef uint32 uintreg;
+#endif
+
/*
* get rid of C types
* the / / / forces a syntax error immediately,
@@ -66,17 +72,17 @@ typedef struct Itab Itab;
typedef struct InterfaceType InterfaceType;
typedef struct Eface Eface;
typedef struct Type Type;
+typedef struct PtrType PtrType;
typedef struct ChanType ChanType;
typedef struct MapType MapType;
typedef struct Defer Defer;
-typedef struct DeferChunk DeferChunk;
typedef struct Panic Panic;
typedef struct Hmap Hmap;
+typedef struct Hiter Hiter;
typedef struct Hchan Hchan;
typedef struct Complex64 Complex64;
typedef struct Complex128 Complex128;
-typedef struct WinCall WinCall;
-typedef struct SEH SEH;
+typedef struct LibCall LibCall;
typedef struct WinCallbackContext WinCallbackContext;
typedef struct Timers Timers;
typedef struct Timer Timer;
@@ -93,10 +99,10 @@ typedef struct DebugVars DebugVars;
*
* "extern register" is a special storage class implemented by 6c, 8c, etc.
* On the ARM, it is an actual register; elsewhere it is a slot in thread-
- * local storage indexed by a segment register. See zasmhdr in
+ * local storage indexed by a pseudo-register TLS. See zasmhdr in
* src/cmd/dist/buildruntime.c for details, and be aware that the linker may
* make further OS-specific changes to the compiler's output. For example,
- * 6l/linux rewrites 0(GS) as -16(FS).
+ * 6l/linux rewrites 0(TLS) as -16(FS).
*
* Every C file linked into a Go program must include runtime.h so that the
* C compiler (6c, 8c, etc.) knows to avoid other uses of these dedicated
@@ -208,8 +214,8 @@ struct Gobuf
uintptr sp;
uintptr pc;
G* g;
- uintptr ret;
void* ctxt;
+ uintreg ret;
uintptr lr;
};
struct GCStats
@@ -223,7 +229,7 @@ struct GCStats
uint64 nsleep;
};
-struct WinCall
+struct LibCall
{
void (*fn)(void*);
uintptr n; // number of parameters
@@ -232,17 +238,14 @@ struct WinCall
uintptr r2;
uintptr err; // error number
};
-struct SEH
-{
- void* prev;
- void* handler;
-};
+
// describes how to handle callback
struct WinCallbackContext
{
void* gobody; // Go function to call
uintptr argsize; // callback arguments size (in bytes)
uintptr restorestack; // adjust stack on return by (in bytes) (386 only)
+ bool cleanstack;
};
struct G
@@ -251,7 +254,6 @@ struct G
uintptr stackguard0; // cannot move - also known to linker, libmach, runtime/cgo
uintptr stackbase; // cannot move - also known to libmach, runtime/cgo
uint32 panicwrap; // cannot move - also known to linker
- uint32 selgen; // valid sudog pointer
Defer* defer;
Panic* panic;
Gobuf sched;
@@ -262,24 +264,23 @@ struct G
uintptr stackguard; // same as stackguard0, but not set to StackPreempt
uintptr stack0;
uintptr stacksize;
- G* alllink; // on allg
void* param; // passed parameter on wakeup
int16 status;
int64 goid;
+ int64 waitsince; // approx time when the G become blocked
int8* waitreason; // if status==Gwaiting
G* schedlink;
bool ispanic;
bool issystem; // do not output in stack dump
bool isbackground; // ignore in deadlock detector
bool preempt; // preemption signal, duplicates stackguard0 = StackPreempt
+ bool paniconfault; // panic (instead of crash) on unexpected fault address
int8 raceignore; // ignore race detection events
M* m; // for debuggers, but offset not hard-coded
M* lockedm;
int32 sig;
int32 writenbuf;
byte* writebuf;
- DeferChunk* dchunk;
- DeferChunk* dchunknext;
uintptr sigcode0;
uintptr sigcode1;
uintptr sigpc;
@@ -287,6 +288,7 @@ struct G
uintptr racectx;
uintptr end[];
};
+
struct M
{
G* g0; // goroutine with scheduling stack
@@ -295,8 +297,8 @@ struct M
// Fields not known to debuggers.
uint32 moreframesize; // size arguments to morestack
- uint32 moreargsize;
- uintptr cret; // return value from C
+ uint32 moreargsize; // known by amd64 asm to follow moreframesize
+ uintreg cret; // return value from C
uint64 procid; // for debuggers, but offset not hard-coded
G* gsignal; // signal-handling G
uintptr tls[4]; // thread-local storage (for x86 extern register)
@@ -310,10 +312,12 @@ struct M
int32 throwing;
int32 gcing;
int32 locks;
+ int32 softfloat;
int32 dying;
int32 profilehz;
int32 helpgc;
- bool spinning;
+ bool spinning; // M is out of work and is actively looking for work
+ bool blocked; // M is blocked on a Note
uint32 fastrand;
uint64 ncgocall; // number of cgo calls in total
int32 ncgo; // number of cgo calls currently in progress
@@ -338,23 +342,37 @@ struct M
uint32 waitsemacount;
uint32 waitsemalock;
GCStats gcstats;
- bool racecall;
bool needextram;
- void (*waitunlockf)(Lock*);
+ uint8 traceback;
+ bool (*waitunlockf)(G*, void*);
void* waitlock;
-
- uintptr settype_buf[1024];
- uintptr settype_bufsize;
-
+ uintptr forkstackguard;
#ifdef GOOS_windows
void* thread; // thread handle
- WinCall wincall;
+ // these are here because they are too large to be on the stack
+ // of low-level NOSPLIT functions.
+ LibCall libcall;
+ uintptr libcallpc; // for cpu profiler
+ uintptr libcallsp;
+ G* libcallg;
+#endif
+#ifdef GOOS_solaris
+ int32* perrno; // pointer to TLS errno
+ // these are here because they are too large to be on the stack
+ // of low-level NOSPLIT functions.
+ LibCall libcall;
+ struct {
+ int64 tv_sec;
+ int64 tv_nsec;
+ } ts;
+ struct {
+ uintptr v[6];
+ } scratch;
#endif
#ifdef GOOS_plan9
int8* notesig;
byte* errstr;
#endif
- SEH* seh;
uintptr end[];
};
@@ -369,12 +387,16 @@ struct P
uint32 syscalltick; // incremented on every system call
M* m; // back-link to associated M (nil if idle)
MCache* mcache;
+ Defer* deferpool[5]; // pool of available Defer structs of different sizes (see panic.c)
+
+ // Cache of goroutine ids, amortizes accesses to runtime·sched.goidgen.
+ uint64 goidcache;
+ uint64 goidcacheend;
// Queue of runnable goroutines.
- G** runq;
- int32 runqhead;
- int32 runqtail;
- int32 runqsize;
+ uint32 runqhead;
+ uint32 runqtail;
+ G* runq[256];
// Available G's (status == Gdead)
G* gfree;
@@ -406,8 +428,8 @@ struct Stktop
uint32 panicwrap;
uint8* argp; // pointer to arguments in old frame
- uintptr free; // if free>0, call stackfree using free as size
bool panic; // is this frame the top of a panic?
+ bool malloced;
};
struct SigTab
{
@@ -423,6 +445,7 @@ enum
SigDefault = 1<<4, // if the signal isn't explicitly requested, don't monitor it
SigHandling = 1<<5, // our signal handler is registered
SigIgnored = 1<<6, // the signal was ignored before we registered for it
+ SigGoExit = 1<<7, // cause all runtime procs to exit (only used on Plan 9).
};
// Layout of in-memory per-function information prepared by linker
@@ -456,6 +479,16 @@ struct Itab
void (*fun[])(void);
};
+#ifdef GOOS_nacl
+enum {
+ NaCl = 1,
+};
+#else
+enum {
+ NaCl = 0,
+};
+#endif
+
#ifdef GOOS_windows
enum {
Windows = 1
@@ -465,6 +498,15 @@ enum {
Windows = 0
};
#endif
+#ifdef GOOS_solaris
+enum {
+ Solaris = 1
+};
+#else
+enum {
+ Solaris = 0
+};
+#endif
struct Timers
{
@@ -480,6 +522,8 @@ struct Timers
// Package time knows the layout of this structure.
// If this struct changes, adjust ../time/sleep.go:/runtimeTimer.
+// For GOOS=nacl, package syscall knows the layout of this structure.
+// If this struct changes, adjust ../syscall/net_nacl.go:/runtimeTimer.
struct Timer
{
int32 i; // heap index
@@ -533,12 +577,16 @@ struct CgoMal
// Holds variables parsed from GODEBUG env var.
struct DebugVars
{
+ int32 allocfreetrace;
+ int32 efence;
int32 gctrace;
- int32 schedtrace;
+ int32 gcdead;
int32 scheddetail;
+ int32 schedtrace;
};
extern bool runtime·precisestack;
+extern bool runtime·copystack;
/*
* defined macros
@@ -548,13 +596,13 @@ extern bool runtime·precisestack;
#define nelem(x) (sizeof(x)/sizeof((x)[0]))
#define nil ((void*)0)
#define offsetof(s,m) (uint32)(&(((s*)0)->m))
-#define ROUND(x, n) (((x)+(n)-1)&~((n)-1)) /* all-caps to mark as macro: it evaluates n twice */
+#define ROUND(x, n) (((x)+(n)-1)&~(uintptr)((n)-1)) /* all-caps to mark as macro: it evaluates n twice */
/*
* known to compiler
*/
enum {
- Structrnd = sizeof(uintptr)
+ Structrnd = sizeof(uintreg),
};
/*
@@ -648,7 +696,6 @@ struct Defer
{
int32 siz;
bool special; // not part of defer frame
- bool free; // if special, free when done
byte* argp; // where args were copied from
byte* pc;
FuncVal* fn;
@@ -656,11 +703,10 @@ struct Defer
void* args[1]; // padded to actual size
};
-struct DeferChunk
-{
- DeferChunk *prev;
- uintptr off;
-};
+// argp used in Defer structs when there is no argp.
+// TODO(rsc): Maybe we could use nil instead, but we've always used -1
+// and I don't want to change this days before the Go 1.3 release.
+#define NoArgs ((byte*)-1)
/*
* panics
@@ -670,7 +716,9 @@ struct Panic
Eface arg; // argument to panic
uintptr stackbase; // g->stackbase in panic
Panic* link; // link to earlier panic
+ Defer* defer; // current executing defer
bool recovered; // whether this panic is over
+ bool aborted; // the panic was aborted
};
/*
@@ -681,6 +729,7 @@ struct Stkframe
{
Func* fn; // function being run
uintptr pc; // program counter within fn
+ uintptr continpc; // program counter where execution can continue, or 0 if not
uintptr lr; // program counter at caller aka link register
uintptr sp; // stack pointer at pc
uintptr fp; // stack pointer at caller aka frame pointer
@@ -689,18 +738,24 @@ struct Stkframe
uintptr arglen; // number of bytes at argp
};
-int32 runtime·gentraceback(uintptr, uintptr, uintptr, G*, int32, uintptr*, int32, void(*)(Stkframe*, void*), void*, bool);
+int32 runtime·gentraceback(uintptr, uintptr, uintptr, G*, int32, uintptr*, int32, bool(*)(Stkframe*, void*), void*, bool);
void runtime·traceback(uintptr pc, uintptr sp, uintptr lr, G* gp);
void runtime·tracebackothers(G*);
bool runtime·haszeroargs(uintptr pc);
bool runtime·topofstack(Func*);
+enum
+{
+ // The maximum number of frames we print for a traceback
+ TracebackMaxFrames = 100,
+};
/*
* external data
*/
extern String runtime·emptystring;
extern uintptr runtime·zerobase;
-extern G* runtime·allg;
+extern G** runtime·allg;
+extern uintptr runtime·allglen;
extern G* runtime·lastg;
extern M* runtime·allm;
extern P** runtime·allp;
@@ -722,6 +777,7 @@ extern uintptr runtime·maxstacksize;
* common functions and data
*/
int32 runtime·strcmp(byte*, byte*);
+int32 runtime·strncmp(byte*, byte*, uintptr);
byte* runtime·strstr(byte*, byte*);
intgo runtime·findnull(byte*);
intgo runtime·findnullw(uint16*);
@@ -729,11 +785,33 @@ void runtime·dump(byte*, int32);
int32 runtime·runetochar(byte*, int32);
int32 runtime·charntorune(int32*, uint8*, int32);
+
/*
- * very low level c-called
- */
+ * This macro is used when writing C functions
+ * called as if they were Go functions.
+ * Passed the address of a result before a return statement,
+ * it makes sure the result has been flushed to memory
+ * before the return.
+ *
+ * It is difficult to write such functions portably, because
+ * of the varying requirements on the alignment of the
+ * first output value. Almost all code should write such
+ * functions in .goc files, where goc2c (part of cmd/dist)
+ * can arrange the correct alignment for the target system.
+ * Goc2c also takes care of conveying to the garbage collector
+ * which parts of the argument list are inputs vs outputs.
+ *
+ * Therefore, do NOT use this macro if at all possible.
+ */
#define FLUSH(x) USED(x)
+/*
+ * GoOutput is a type with the same alignment requirements as the
+ * initial output argument from a Go function. Only for use in cases
+ * where using goc2c is not possible. See comment on FLUSH above.
+ */
+typedef uint64 GoOutput;
+
void runtime·gogo(Gobuf*);
void runtime·gostartcall(Gobuf*, void(*)(void), void*);
void runtime·gostartcallfn(Gobuf*, FuncVal*);
@@ -745,8 +823,10 @@ void runtime·goenvs_unix(void);
void* runtime·getu(void);
void runtime·throw(int8*);
void runtime·panicstring(int8*);
+bool runtime·canpanic(G*);
void runtime·prints(int8*);
void runtime·printf(int8*, ...);
+int32 runtime·snprintf(byte*, int32, int8*, ...);
byte* runtime·mchr(byte*, byte, byte*);
int32 runtime·mcmp(byte*, byte*, uintptr);
void runtime·memmove(void*, void*, uintptr);
@@ -764,24 +844,9 @@ int32 runtime·gotraceback(bool *crash);
void runtime·goroutineheader(G*);
int32 runtime·open(int8*, int32, int32);
int32 runtime·read(int32, void*, int32);
-int32 runtime·write(int32, void*, int32);
+int32 runtime·write(uintptr, void*, int32); // use uintptr to accommodate windows.
int32 runtime·close(int32);
int32 runtime·mincore(void*, uintptr, byte*);
-bool runtime·cas(uint32*, uint32, uint32);
-bool runtime·cas64(uint64*, uint64, uint64);
-bool runtime·casp(void**, void*, void*);
-// Don't confuse with XADD x86 instruction,
-// this one is actually 'addx', that is, add-and-fetch.
-uint32 runtime·xadd(uint32 volatile*, int32);
-uint64 runtime·xadd64(uint64 volatile*, int64);
-uint32 runtime·xchg(uint32 volatile*, uint32);
-uint64 runtime·xchg64(uint64 volatile*, uint64);
-uint32 runtime·atomicload(uint32 volatile*);
-void runtime·atomicstore(uint32 volatile*, uint32);
-void runtime·atomicstore64(uint64 volatile*, uint64);
-uint64 runtime·atomicload64(uint64 volatile*);
-void* runtime·atomicloadp(void* volatile*);
-void runtime·atomicstorep(void* volatile*, void*);
void runtime·jmpdefer(FuncVal*, void*);
void runtime·exit1(int32);
void runtime·ready(G*);
@@ -802,12 +867,12 @@ int32 runtime·funcarglen(Func*, uintptr);
int32 runtime·funcspdelta(Func*, uintptr);
int8* runtime·funcname(Func*);
int32 runtime·pcdatavalue(Func*, int32, uintptr);
-void* runtime·stackalloc(uint32);
-void runtime·stackfree(void*, uintptr);
+void* runtime·stackalloc(G*, uint32);
+void runtime·stackfree(G*, void*, Stktop*);
+void runtime·shrinkstack(G*);
MCache* runtime·allocmcache(void);
void runtime·freemcache(MCache*);
void runtime·mallocinit(void);
-void runtime·mprofinit(void);
bool runtime·ifaceeq_c(Iface, Iface);
bool runtime·efaceeq_c(Eface, Eface);
uintptr runtime·ifacehash(Iface, uintptr);
@@ -822,15 +887,35 @@ void runtime·mcall(void(*)(G*));
uint32 runtime·fastrand1(void);
void runtime·rewindmorestack(Gobuf*);
int32 runtime·timediv(int64, int32, int32*);
+int32 runtime·round2(int32 x); // round x up to a power of 2.
+
+// atomic operations
+bool runtime·cas(uint32*, uint32, uint32);
+bool runtime·cas64(uint64*, uint64, uint64);
+bool runtime·casp(void**, void*, void*);
+// Don't confuse with XADD x86 instruction,
+// this one is actually 'addx', that is, add-and-fetch.
+uint32 runtime·xadd(uint32 volatile*, int32);
+uint64 runtime·xadd64(uint64 volatile*, int64);
+uint32 runtime·xchg(uint32 volatile*, uint32);
+uint64 runtime·xchg64(uint64 volatile*, uint64);
+void* runtime·xchgp(void* volatile*, void*);
+uint32 runtime·atomicload(uint32 volatile*);
+void runtime·atomicstore(uint32 volatile*, uint32);
+void runtime·atomicstore64(uint64 volatile*, uint64);
+uint64 runtime·atomicload64(uint64 volatile*);
+void* runtime·atomicloadp(void* volatile*);
+void runtime·atomicstorep(void* volatile*, void*);
-void runtime·setmg(M*, G*);
-void runtime·newextram(void);
+void runtime·setmg(M*, G*);
+void runtime·newextram(void);
void runtime·exit(int32);
void runtime·breakpoint(void);
void runtime·gosched(void);
void runtime·gosched0(G*);
void runtime·schedtrace(bool);
-void runtime·park(void(*)(Lock*), Lock*, int8*);
+void runtime·park(bool(*)(G*, void*), void*, int8*);
+void runtime·parkunlock(Lock*, int8*);
void runtime·tsleep(int64, int8*);
M* runtime·newm(void);
void runtime·goexit(void);
@@ -841,12 +926,13 @@ void runtime·exitsyscall(void);
G* runtime·newproc1(FuncVal*, byte*, int32, int32, void*);
bool runtime·sigsend(int32 sig);
int32 runtime·callers(int32, uintptr*, int32);
-int64 runtime·nanotime(void);
+int64 runtime·nanotime(void); // monotonic time
+int64 runtime·unixnanotime(void); // real time, can skip
void runtime·dopanic(int32);
void runtime·startpanic(void);
void runtime·freezetheworld(void);
void runtime·unwindstack(G*, byte*);
-void runtime·sigprof(uint8 *pc, uint8 *sp, uint8 *lr, G *gp);
+void runtime·sigprof(uint8 *pc, uint8 *sp, uint8 *lr, G *gp, M *mp);
void runtime·resetcpuprofiler(int32);
void runtime·setcpuprofilerate(void(*)(uintptr*, int32), int32);
void runtime·usleep(uint32);
@@ -862,10 +948,19 @@ int32 runtime·netpollopen(uintptr, PollDesc*);
int32 runtime·netpollclose(uintptr);
void runtime·netpollready(G**, PollDesc*, int32);
uintptr runtime·netpollfd(PollDesc*);
+void runtime·netpollarm(PollDesc*, int32);
+void** runtime·netpolluser(PollDesc*);
+bool runtime·netpollclosing(PollDesc*);
+void runtime·netpolllock(PollDesc*);
+void runtime·netpollunlock(PollDesc*);
void runtime·crash(void);
void runtime·parsedebugvars(void);
void _rt0_go(void);
void* runtime·funcdata(Func*, int32);
+int32 runtime·setmaxthreads(int32);
+G* runtime·timejump(void);
+void runtime·iterate_itabs(void (*callback)(Itab*));
+void runtime·iterate_finq(void (*callback)(FuncVal*, byte*, uintptr, Type*, PtrType*));
#pragma varargck argpos runtime·printf 1
#pragma varargck type "c" int32
@@ -954,13 +1049,7 @@ LFNode* runtime·lfstackpop(uint64 *head);
ParFor* runtime·parforalloc(uint32 nthrmax);
void runtime·parforsetup(ParFor *desc, uint32 nthr, uint32 n, void *ctx, bool wait, void (*body)(ParFor*, uint32));
void runtime·parfordo(ParFor *desc);
-
-/*
- * This is consistent across Linux and BSD.
- * If a new OS is added that is different, move this to
- * $GOOS/$GOARCH/defs.h.
- */
-#define EACCES 13
+void runtime·parforiters(ParFor*, uintptr, uintptr*, uintptr*);
/*
* low level C-called
@@ -992,10 +1081,11 @@ void runtime·printhex(uint64);
void runtime·printslice(Slice);
void runtime·printcomplex(Complex128);
void runtime·newstackcall(FuncVal*, byte*, uint32);
-void reflect·call(FuncVal*, byte*, uint32);
+void reflect·call(FuncVal*, byte*, uint32, uint32);
void runtime·panic(Eface);
void runtime·panicindex(void);
void runtime·panicslice(void);
+void runtime·panicdivide(void);
/*
* runtime c-called (but written in Go)
@@ -1036,16 +1126,8 @@ void runtime·procyield(uint32);
void runtime·osyield(void);
void runtime·lockOSThread(void);
void runtime·unlockOSThread(void);
+bool runtime·lockedOSThread(void);
-void runtime·mapassign(MapType*, Hmap*, byte*, byte*);
-void runtime·mapaccess(MapType*, Hmap*, byte*, byte*, bool*);
-void runtime·mapiternext(struct hash_iter*);
-bool runtime·mapiterkey(struct hash_iter*, void*);
-Hmap* runtime·makemap_c(MapType*, int64);
-
-Hchan* runtime·makechan_c(ChanType*, int64);
-void runtime·chansend(ChanType*, Hchan*, byte*, bool*, void*);
-void runtime·chanrecv(ChanType*, Hchan*, byte*, bool*, bool*);
bool runtime·showframe(Func*, G*);
void runtime·printcreatedby(G*);
diff --git a/src/pkg/runtime/runtime1.goc b/src/pkg/runtime/runtime1.goc
index d2c38dfef..c6f6b626a 100644
--- a/src/pkg/runtime/runtime1.goc
+++ b/src/pkg/runtime/runtime1.goc
@@ -4,6 +4,8 @@
package runtime
#include "runtime.h"
+#include "arch_GOARCH.h"
+#include "type.h"
func GOMAXPROCS(n int) (ret int) {
ret = runtime·gomaxprocsfunc(n);
@@ -12,3 +14,115 @@ func GOMAXPROCS(n int) (ret int) {
func NumCPU() (ret int) {
ret = runtime·ncpu;
}
+
+func NumCgoCall() (ret int64) {
+ M *mp;
+
+ ret = 0;
+ for(mp=runtime·atomicloadp(&runtime·allm); mp; mp=mp->alllink)
+ ret += mp->ncgocall;
+}
+
+func newParFor(nthrmax uint32) (desc *ParFor) {
+ desc = runtime·parforalloc(nthrmax);
+}
+
+func parForSetup(desc *ParFor, nthr uint32, n uint32, ctx *byte, wait bool, body *byte) {
+ runtime·parforsetup(desc, nthr, n, ctx, wait, *(void(**)(ParFor*, uint32))body);
+}
+
+func parForDo(desc *ParFor) {
+ runtime·parfordo(desc);
+}
+
+func parForIters(desc *ParFor, tid uintptr) (start uintptr, end uintptr) {
+ runtime·parforiters(desc, tid, &start, &end);
+}
+
+func gogoBytes() (x int32) {
+ x = RuntimeGogoBytes;
+}
+
+func typestring(e Eface) (s String) {
+ s = *e.type->string;
+}
+
+func golockedOSThread() (ret bool) {
+ ret = runtime·lockedOSThread();
+}
+
+func NumGoroutine() (ret int) {
+ ret = runtime·gcount();
+}
+
+func getgoroot() (out String) {
+ byte *p;
+
+ p = runtime·getenv("GOROOT");
+ out = runtime·gostringnocopy(p);
+}
+
+/*
+ * We assume that all architectures turn faults and the like
+ * into apparent calls to runtime.sigpanic. If we see a "call"
+ * to runtime.sigpanic, we do not back up the PC to find the
+ * line number of the CALL instruction, because there is no CALL.
+ */
+void runtime·sigpanic(void);
+
+func Caller(skip int) (retpc uintptr, retfile String, retline int, retbool bool) {
+ Func *f, *g;
+ uintptr pc;
+ uintptr rpc[2];
+
+ /*
+ * Ask for two PCs: the one we were asked for
+ * and what it called, so that we can see if it
+ * "called" sigpanic.
+ */
+ retpc = 0;
+ if(runtime·callers(1+skip-1, rpc, 2) < 2) {
+ retfile = runtime·emptystring;
+ retline = 0;
+ retbool = false;
+ } else if((f = runtime·findfunc(rpc[1])) == nil) {
+ retfile = runtime·emptystring;
+ retline = 0;
+ retbool = true; // have retpc at least
+ } else {
+ retpc = rpc[1];
+ pc = retpc;
+ g = runtime·findfunc(rpc[0]);
+ if(pc > f->entry && (g == nil || g->entry != (uintptr)runtime·sigpanic))
+ pc--;
+ retline = runtime·funcline(f, pc, &retfile);
+ retbool = true;
+ }
+}
+
+func Callers(skip int, pc Slice) (retn int) {
+ // runtime.callers uses pc.array==nil as a signal
+ // to print a stack trace. Pick off 0-length pc here
+ // so that we don't let a nil pc slice get to it.
+ if(pc.len == 0)
+ retn = 0;
+ else
+ retn = runtime·callers(skip, (uintptr*)pc.array, pc.len);
+}
+
+func runtime∕pprof·runtime_cyclesPerSecond() (res int64) {
+ res = runtime·tickspersecond();
+}
+
+func sync·runtime_procPin() (p int) {
+ M *mp;
+
+ mp = m;
+ // Disable preemption.
+ mp->locks++;
+ p = mp->p->id;
+}
+
+func sync·runtime_procUnpin() {
+ m->locks--;
+}
diff --git a/src/pkg/runtime/runtime_test.go b/src/pkg/runtime/runtime_test.go
index de6e5498e..5a9f52fe0 100644
--- a/src/pkg/runtime/runtime_test.go
+++ b/src/pkg/runtime/runtime_test.go
@@ -10,9 +10,11 @@ import (
"os"
"os/exec"
. "runtime"
+ "runtime/debug"
"strconv"
"strings"
"testing"
+ "unsafe"
)
var errf error
@@ -93,6 +95,10 @@ func BenchmarkDeferMany(b *testing.B) {
// The value reported will include the padding between runtime.gogo and the
// next function in memory. That's fine.
func TestRuntimeGogoBytes(t *testing.T) {
+ if GOOS == "nacl" {
+ t.Skip("skipping on nacl")
+ }
+
dir, err := ioutil.TempDir("", "go-build")
if err != nil {
t.Fatalf("failed to create temp directory: %v", err)
@@ -104,7 +110,7 @@ func TestRuntimeGogoBytes(t *testing.T) {
t.Fatalf("building hello world: %v\n%s", err, out)
}
- out, err = exec.Command("go", "tool", "nm", "-S", dir+"/hello").CombinedOutput()
+ out, err = exec.Command("go", "tool", "nm", "-size", dir+"/hello").CombinedOutput()
if err != nil {
t.Fatalf("go tool nm: %v\n%s", err, out)
}
@@ -122,3 +128,77 @@ func TestRuntimeGogoBytes(t *testing.T) {
t.Fatalf("go tool nm did not report size for runtime.gogo")
}
+
+// golang.org/issue/7063
+func TestStopCPUProfilingWithProfilerOff(t *testing.T) {
+ SetCPUProfileRate(0)
+}
+
+// Addresses to test for faulting behavior.
+// This is less a test of SetPanicOnFault and more a check that
+// the operating system and the runtime can process these faults
+// correctly. That is, we're indirectly testing that without SetPanicOnFault
+// these would manage to turn into ordinary crashes.
+// Note that these are truncated on 32-bit systems, so the bottom 32 bits
+// of the larger addresses must themselves be invalid addresses.
+// We might get unlucky and the OS might have mapped one of these
+// addresses, but probably not: they're all in the first page, very high
+// adderesses that normally an OS would reserve for itself, or malformed
+// addresses. Even so, we might have to remove one or two on different
+// systems. We will see.
+
+var faultAddrs = []uint64{
+ // low addresses
+ 0,
+ 1,
+ 0xfff,
+ // high (kernel) addresses
+ // or else malformed.
+ 0xffffffffffffffff,
+ 0xfffffffffffff001,
+ // no 0xffffffffffff0001; 0xffff0001 is mapped for 32-bit user space on OS X
+ // no 0xfffffffffff00001; 0xfff00001 is mapped for 32-bit user space sometimes on Linux
+ 0xffffffffff000001,
+ 0xfffffffff0000001,
+ 0xffffffff00000001,
+ 0xfffffff000000001,
+ 0xffffff0000000001,
+ 0xfffff00000000001,
+ 0xffff000000000001,
+ 0xfff0000000000001,
+ 0xff00000000000001,
+ 0xf000000000000001,
+ 0x8000000000000001,
+}
+
+func TestSetPanicOnFault(t *testing.T) {
+ // This currently results in a fault in the signal trampoline on
+ // dragonfly/386 - see issue 7421.
+ if GOOS == "dragonfly" && GOARCH == "386" {
+ t.Skip("skipping test on dragonfly/386")
+ }
+
+ old := debug.SetPanicOnFault(true)
+ defer debug.SetPanicOnFault(old)
+
+ for _, addr := range faultAddrs {
+ testSetPanicOnFault(t, uintptr(addr))
+ }
+}
+
+func testSetPanicOnFault(t *testing.T, addr uintptr) {
+ if GOOS == "nacl" {
+ t.Skip("nacl doesn't seem to fault on high addresses")
+ }
+
+ defer func() {
+ if err := recover(); err == nil {
+ t.Fatalf("did not find error in recover")
+ }
+ }()
+
+ var p *int
+ p = (*int)(unsafe.Pointer(addr))
+ println(*p)
+ t.Fatalf("still here - should have faulted on address %#x", addr)
+}
diff --git a/src/pkg/runtime/runtime_unix_test.go b/src/pkg/runtime/runtime_unix_test.go
new file mode 100644
index 000000000..963de8cdb
--- /dev/null
+++ b/src/pkg/runtime/runtime_unix_test.go
@@ -0,0 +1,56 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Only works on systems with syscall.Close.
+// We need a fast system call to provoke the race,
+// and Close(-1) is nearly universally fast.
+
+// +build darwin dragonfly freebsd linux netbsd openbsd plan9
+
+package runtime_test
+
+import (
+ "runtime"
+ "sync"
+ "sync/atomic"
+ "syscall"
+ "testing"
+)
+
+func TestGoroutineProfile(t *testing.T) {
+ // GoroutineProfile used to use the wrong starting sp for
+ // goroutines coming out of system calls, causing possible
+ // crashes.
+ defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(100))
+
+ var stop uint32
+ defer atomic.StoreUint32(&stop, 1) // in case of panic
+
+ var wg sync.WaitGroup
+ for i := 0; i < 4; i++ {
+ wg.Add(1)
+ go func() {
+ for atomic.LoadUint32(&stop) == 0 {
+ syscall.Close(-1)
+ }
+ wg.Done()
+ }()
+ }
+
+ max := 10000
+ if testing.Short() {
+ max = 100
+ }
+ stk := make([]runtime.StackRecord, 100)
+ for n := 0; n < max; n++ {
+ _, ok := runtime.GoroutineProfile(stk)
+ if !ok {
+ t.Fatalf("GoroutineProfile failed")
+ }
+ }
+
+ // If the program didn't crash, we passed.
+ atomic.StoreUint32(&stop, 1)
+ wg.Wait()
+}
diff --git a/src/pkg/runtime/sema.goc b/src/pkg/runtime/sema.goc
index 57f32a0dd..c1e8e4e18 100644
--- a/src/pkg/runtime/sema.goc
+++ b/src/pkg/runtime/sema.goc
@@ -137,7 +137,7 @@ runtime·semacquire(uint32 volatile *addr, bool profile)
// Any semrelease after the cansemacquire knows we're waiting
// (we set nwait above), so go to sleep.
semqueue(root, addr, &s);
- runtime·park(runtime·unlock, root, "semacquire");
+ runtime·parkunlock(root, "semacquire");
if(cansemacquire(addr)) {
if(t0)
runtime·blockevent(s.releasetime - t0, 3);
@@ -254,7 +254,7 @@ func runtime_Syncsemacquire(s *SyncSema) {
else
s->tail->next = &w;
s->tail = &w;
- runtime·park(runtime·unlock, s, "semacquire");
+ runtime·parkunlock(s, "semacquire");
if(t0)
runtime·blockevent(w.releasetime - t0, 2);
}
@@ -288,7 +288,7 @@ func runtime_Syncsemrelease(s *SyncSema, n uint32) {
else
s->tail->next = &w;
s->tail = &w;
- runtime·park(runtime·unlock, s, "semarelease");
+ runtime·parkunlock(s, "semarelease");
} else
runtime·unlock(s);
}
diff --git a/src/pkg/runtime/signal_386.c b/src/pkg/runtime/signal_386.c
index 5a913c646..70fcc6a63 100644
--- a/src/pkg/runtime/signal_386.c
+++ b/src/pkg/runtime/signal_386.c
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build darwin dragonfly freebsd linux netbsd openbsd
+// +build darwin dragonfly freebsd linux nacl netbsd openbsd
#include "runtime.h"
#include "defs_GOOS_GOARCH.h"
@@ -39,15 +39,12 @@ runtime·sighandler(int32 sig, Siginfo *info, void *ctxt, G *gp)
bool crash;
if(sig == SIGPROF) {
- runtime·sigprof((byte*)SIG_EIP(info, ctxt), (byte*)SIG_ESP(info, ctxt), nil, gp);
+ runtime·sigprof((byte*)SIG_EIP(info, ctxt), (byte*)SIG_ESP(info, ctxt), nil, gp, m);
return;
}
t = &runtime·sigtab[sig];
if(SIG_CODE0(info, ctxt) != SI_USER && (t->flags & SigPanic)) {
- if(gp == nil || gp == m->g0)
- goto Throw;
-
// Make it look like a call to the signal func.
// Have to pass arguments out of band since
// augmenting the stack frame would break
@@ -94,7 +91,6 @@ runtime·sighandler(int32 sig, Siginfo *info, void *ctxt, G *gp)
if(!(t->flags & SigThrow))
return;
-Throw:
m->throwing = 1;
m->caughtsig = gp;
runtime·startpanic();
@@ -112,6 +108,7 @@ Throw:
runtime·printf("\n");
if(runtime·gotraceback(&crash)){
+ runtime·goroutineheader(gp);
runtime·traceback(SIG_EIP(info, ctxt), SIG_ESP(info, ctxt), 0, gp);
runtime·tracebackothers(gp);
runtime·printf("\n");
diff --git a/src/pkg/runtime/signal_amd64.c b/src/pkg/runtime/signal_amd64x.c
index f0cbb1f8c..04026f32f 100644
--- a/src/pkg/runtime/signal_amd64.c
+++ b/src/pkg/runtime/signal_amd64x.c
@@ -2,7 +2,8 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build darwin dragonfly freebsd linux netbsd openbsd
+// +build amd64 amd64p32
+// +build darwin dragonfly freebsd linux nacl netbsd openbsd solaris
#include "runtime.h"
#include "defs_GOOS_GOARCH.h"
@@ -47,15 +48,33 @@ runtime·sighandler(int32 sig, Siginfo *info, void *ctxt, G *gp)
bool crash;
if(sig == SIGPROF) {
- runtime·sigprof((byte*)SIG_RIP(info, ctxt), (byte*)SIG_RSP(info, ctxt), nil, gp);
+ runtime·sigprof((byte*)SIG_RIP(info, ctxt), (byte*)SIG_RSP(info, ctxt), nil, gp, m);
return;
}
+#ifdef GOOS_darwin
+ // x86-64 has 48-bit virtual addresses. The top 16 bits must echo bit 47.
+ // The hardware delivers a different kind of fault for a malformed address
+ // than it does for an attempt to access a valid but unmapped address.
+ // OS X 10.9.2 mishandles the malformed address case, making it look like
+ // a user-generated signal (like someone ran kill -SEGV ourpid).
+ // We pass user-generated signals to os/signal, or else ignore them.
+ // Doing that here - and returning to the faulting code - results in an
+ // infinite loop. It appears the best we can do is rewrite what the kernel
+ // delivers into something more like the truth. The address used below
+ // has very little chance of being the one that caused the fault, but it is
+ // malformed, it is clearly not a real pointer, and if it does get printed
+ // in real life, people will probably search for it and find this code.
+ // There are no Google hits for b01dfacedebac1e or 0xb01dfacedebac1e
+ // as I type this comment.
+ if(sig == SIGSEGV && SIG_CODE0(info, ctxt) == SI_USER) {
+ SIG_CODE0(info, ctxt) = SI_USER+1;
+ info->si_addr = (void*)(uintptr)0xb01dfacedebac1eULL;
+ }
+#endif
+
t = &runtime·sigtab[sig];
if(SIG_CODE0(info, ctxt) != SI_USER && (t->flags & SigPanic)) {
- if(gp == nil || gp == m->g0)
- goto Throw;
-
// Make it look like a call to the signal func.
// Have to pass arguments out of band since
// augmenting the stack frame would break
@@ -89,6 +108,8 @@ runtime·sighandler(int32 sig, Siginfo *info, void *ctxt, G *gp)
// won't get to see who faulted.)
if(SIG_RIP(info, ctxt) != 0) {
sp = (uintptr*)SIG_RSP(info, ctxt);
+ if(sizeof(uintreg) > sizeof(uintptr))
+ *--sp = 0;
*--sp = SIG_RIP(info, ctxt);
SIG_RSP(info, ctxt) = (uintptr)sp;
}
@@ -104,7 +125,6 @@ runtime·sighandler(int32 sig, Siginfo *info, void *ctxt, G *gp)
if(!(t->flags & SigThrow))
return;
-Throw:
m->throwing = 1;
m->caughtsig = gp;
runtime·startpanic();
@@ -122,6 +142,7 @@ Throw:
runtime·printf("\n");
if(runtime·gotraceback(&crash)){
+ runtime·goroutineheader(gp);
runtime·traceback(SIG_RIP(info, ctxt), SIG_RSP(info, ctxt), 0, gp);
runtime·tracebackothers(gp);
runtime·printf("\n");
diff --git a/src/pkg/runtime/signal_arm.c b/src/pkg/runtime/signal_arm.c
index a6e239601..9b2a43d9b 100644
--- a/src/pkg/runtime/signal_arm.c
+++ b/src/pkg/runtime/signal_arm.c
@@ -46,15 +46,12 @@ runtime·sighandler(int32 sig, Siginfo *info, void *ctxt, G *gp)
bool crash;
if(sig == SIGPROF) {
- runtime·sigprof((uint8*)SIG_PC(info, ctxt), (uint8*)SIG_SP(info, ctxt), (uint8*)SIG_LR(info, ctxt), gp);
+ runtime·sigprof((uint8*)SIG_PC(info, ctxt), (uint8*)SIG_SP(info, ctxt), (uint8*)SIG_LR(info, ctxt), gp, m);
return;
}
t = &runtime·sigtab[sig];
if(SIG_CODE0(info, ctxt) != SI_USER && (t->flags & SigPanic)) {
- if(gp == nil || gp == m->g0)
- goto Throw;
-
// Make it look like a call to the signal func.
// Have to pass arguments out of band since
// augmenting the stack frame would break
@@ -92,7 +89,6 @@ runtime·sighandler(int32 sig, Siginfo *info, void *ctxt, G *gp)
if(!(t->flags & SigThrow))
return;
-Throw:
m->throwing = 1;
m->caughtsig = gp;
if(runtime·panicking) // traceback already printed
@@ -112,6 +108,7 @@ Throw:
runtime·printf("\n");
if(runtime·gotraceback(&crash)){
+ runtime·goroutineheader(gp);
runtime·traceback(SIG_PC(info, ctxt), SIG_SP(info, ctxt), SIG_LR(info, ctxt), gp);
runtime·tracebackothers(gp);
runtime·printf("\n");
diff --git a/src/pkg/runtime/signal_nacl_386.h b/src/pkg/runtime/signal_nacl_386.h
new file mode 100644
index 000000000..c9481b5f4
--- /dev/null
+++ b/src/pkg/runtime/signal_nacl_386.h
@@ -0,0 +1,23 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#define SIG_REGS(ctxt) (((ExcContext*)(ctxt))->regs)
+
+#define SIG_EAX(info, ctxt) (SIG_REGS(ctxt).eax)
+#define SIG_EBX(info, ctxt) (SIG_REGS(ctxt).ebx)
+#define SIG_ECX(info, ctxt) (SIG_REGS(ctxt).ecx)
+#define SIG_EDX(info, ctxt) (SIG_REGS(ctxt).edx)
+#define SIG_EDI(info, ctxt) (SIG_REGS(ctxt).edi)
+#define SIG_ESI(info, ctxt) (SIG_REGS(ctxt).esi)
+#define SIG_EBP(info, ctxt) (SIG_REGS(ctxt).ebp)
+#define SIG_ESP(info, ctxt) (SIG_REGS(ctxt).esp)
+#define SIG_EIP(info, ctxt) (SIG_REGS(ctxt).eip)
+#define SIG_EFLAGS(info, ctxt) (SIG_REGS(ctxt).eflags)
+
+#define SIG_CS(info, ctxt) (~0)
+#define SIG_FS(info, ctxt) (~0)
+#define SIG_GS(info, ctxt) (~0)
+
+#define SIG_CODE0(info, ctxt) (~0)
+#define SIG_CODE1(info, ctxt) (0)
diff --git a/src/pkg/runtime/signal_nacl_amd64p32.h b/src/pkg/runtime/signal_nacl_amd64p32.h
new file mode 100644
index 000000000..c58593a29
--- /dev/null
+++ b/src/pkg/runtime/signal_nacl_amd64p32.h
@@ -0,0 +1,31 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#define SIG_REGS(ctxt) (((ExcContext*)(ctxt))->regs64)
+
+#define SIG_RAX(info, ctxt) (SIG_REGS(ctxt).rax)
+#define SIG_RBX(info, ctxt) (SIG_REGS(ctxt).rbx)
+#define SIG_RCX(info, ctxt) (SIG_REGS(ctxt).rcx)
+#define SIG_RDX(info, ctxt) (SIG_REGS(ctxt).rdx)
+#define SIG_RDI(info, ctxt) (SIG_REGS(ctxt).rdi)
+#define SIG_RSI(info, ctxt) (SIG_REGS(ctxt).rsi)
+#define SIG_RBP(info, ctxt) (SIG_REGS(ctxt).rbp)
+#define SIG_RSP(info, ctxt) (SIG_REGS(ctxt).rsp)
+#define SIG_R8(info, ctxt) (SIG_REGS(ctxt).r8)
+#define SIG_R9(info, ctxt) (SIG_REGS(ctxt).r9)
+#define SIG_R10(info, ctxt) (SIG_REGS(ctxt).r10)
+#define SIG_R11(info, ctxt) (SIG_REGS(ctxt).r11)
+#define SIG_R12(info, ctxt) (SIG_REGS(ctxt).r12)
+#define SIG_R13(info, ctxt) (SIG_REGS(ctxt).r13)
+#define SIG_R14(info, ctxt) (SIG_REGS(ctxt).r14)
+#define SIG_R15(info, ctxt) (SIG_REGS(ctxt).r15)
+#define SIG_RIP(info, ctxt) (SIG_REGS(ctxt).rip)
+#define SIG_RFLAGS(info, ctxt) (SIG_REGS(ctxt).rflags)
+
+#define SIG_CS(info, ctxt) (~0)
+#define SIG_FS(info, ctxt) (~0)
+#define SIG_GS(info, ctxt) (~0)
+
+#define SIG_CODE0(info, ctxt) (~0)
+#define SIG_CODE1(info, ctxt) (0)
diff --git a/src/pkg/runtime/signal_solaris_amd64.h b/src/pkg/runtime/signal_solaris_amd64.h
new file mode 100644
index 000000000..c2e0a1549
--- /dev/null
+++ b/src/pkg/runtime/signal_solaris_amd64.h
@@ -0,0 +1,31 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#define SIG_REGS(ctxt) (((Ucontext*)(ctxt))->uc_mcontext)
+
+#define SIG_RAX(info, ctxt) (SIG_REGS(ctxt).gregs[REG_RAX])
+#define SIG_RBX(info, ctxt) (SIG_REGS(ctxt).gregs[REG_RBX])
+#define SIG_RCX(info, ctxt) (SIG_REGS(ctxt).gregs[REG_RCX])
+#define SIG_RDX(info, ctxt) (SIG_REGS(ctxt).gregs[REG_RDX])
+#define SIG_RDI(info, ctxt) (SIG_REGS(ctxt).gregs[REG_RDI])
+#define SIG_RSI(info, ctxt) (SIG_REGS(ctxt).gregs[REG_RSI])
+#define SIG_RBP(info, ctxt) (SIG_REGS(ctxt).gregs[REG_RBP])
+#define SIG_RSP(info, ctxt) (SIG_REGS(ctxt).gregs[REG_RSP])
+#define SIG_R8(info, ctxt) (SIG_REGS(ctxt).gregs[REG_R8])
+#define SIG_R9(info, ctxt) (SIG_REGS(ctxt).gregs[REG_R9])
+#define SIG_R10(info, ctxt) (SIG_REGS(ctxt).gregs[REG_R10])
+#define SIG_R11(info, ctxt) (SIG_REGS(ctxt).gregs[REG_R11])
+#define SIG_R12(info, ctxt) (SIG_REGS(ctxt).gregs[REG_R12])
+#define SIG_R13(info, ctxt) (SIG_REGS(ctxt).gregs[REG_R13])
+#define SIG_R14(info, ctxt) (SIG_REGS(ctxt).gregs[REG_R14])
+#define SIG_R15(info, ctxt) (SIG_REGS(ctxt).gregs[REG_R15])
+#define SIG_RIP(info, ctxt) (SIG_REGS(ctxt).gregs[REG_RIP])
+#define SIG_RFLAGS(info, ctxt) (SIG_REGS(ctxt).gregs[REG_RFLAGS])
+
+#define SIG_CS(info, ctxt) (SIG_REGS(ctxt).gregs[REG_CS])
+#define SIG_FS(info, ctxt) (SIG_REGS(ctxt).gregs[REG_FS])
+#define SIG_GS(info, ctxt) (SIG_REGS(ctxt).gregs[REG_GS])
+
+#define SIG_CODE0(info, ctxt) ((info)->si_code)
+#define SIG_CODE1(info, ctxt) (*(uintptr*)&(info)->__data[0])
diff --git a/src/pkg/runtime/signal_unix.c b/src/pkg/runtime/signal_unix.c
index 4d14b2208..246a1eb25 100644
--- a/src/pkg/runtime/signal_unix.c
+++ b/src/pkg/runtime/signal_unix.c
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build darwin dragonfly freebsd linux openbsd netbsd
+// +build darwin dragonfly freebsd linux netbsd openbsd solaris
#include "runtime.h"
#include "defs_GOOS_GOARCH.h"
@@ -113,6 +113,7 @@ runtime·crash(void)
return;
#endif
+ runtime·unblocksignals();
runtime·setsig(SIGABRT, SIG_DFL, false);
runtime·raise(SIGABRT);
}
diff --git a/src/pkg/runtime/signals_freebsd.h b/src/pkg/runtime/signals_freebsd.h
index 4d27e050d..8d45c50c3 100644
--- a/src/pkg/runtime/signals_freebsd.h
+++ b/src/pkg/runtime/signals_freebsd.h
@@ -21,7 +21,7 @@ SigTab runtime·sigtab[] = {
/* 9 */ 0, "SIGKILL: kill",
/* 10 */ P, "SIGBUS: bus error",
/* 11 */ P, "SIGSEGV: segmentation violation",
- /* 12 */ T, "SIGSYS: bad system call",
+ /* 12 */ N, "SIGSYS: bad system call",
/* 13 */ N, "SIGPIPE: write to broken pipe",
/* 14 */ N, "SIGALRM: alarm clock",
/* 15 */ N+K, "SIGTERM: termination",
diff --git a/src/pkg/runtime/signals_linux.h b/src/pkg/runtime/signals_linux.h
index 9c3567007..368afc1c8 100644
--- a/src/pkg/runtime/signals_linux.h
+++ b/src/pkg/runtime/signals_linux.h
@@ -41,7 +41,7 @@ SigTab runtime·sigtab[] = {
/* 29 */ N, "SIGIO: i/o now possible",
/* 30 */ N, "SIGPWR: power failure restart",
/* 31 */ N, "SIGSYS: bad system call",
- /* 32 */ N, "signal 32",
+ /* 32 */ 0, "signal 32", /* SIGCANCEL; see issue 6997 */
/* 33 */ 0, "signal 33", /* SIGSETXID; see issue 3871 */
/* 34 */ N, "signal 34",
/* 35 */ N, "signal 35",
diff --git a/src/pkg/runtime/signals_nacl.h b/src/pkg/runtime/signals_nacl.h
new file mode 100644
index 000000000..229b58590
--- /dev/null
+++ b/src/pkg/runtime/signals_nacl.h
@@ -0,0 +1,50 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#define N SigNotify
+#define K SigKill
+#define T SigThrow
+#define P SigPanic
+#define D SigDefault
+
+SigTab runtime·sigtab[] = {
+ /* 0 */ 0, "SIGNONE: no trap",
+ /* 1 */ N+K, "SIGHUP: terminal line hangup",
+ /* 2 */ N+K, "SIGINT: interrupt",
+ /* 3 */ N+T, "SIGQUIT: quit",
+ /* 4 */ T, "SIGILL: illegal instruction",
+ /* 5 */ T, "SIGTRAP: trace trap",
+ /* 6 */ N+T, "SIGABRT: abort",
+ /* 7 */ T, "SIGEMT: emulate instruction executed",
+ /* 8 */ P, "SIGFPE: floating-point exception",
+ /* 9 */ 0, "SIGKILL: kill",
+ /* 10 */ P, "SIGBUS: bus error",
+ /* 11 */ P, "SIGSEGV: segmentation violation",
+ /* 12 */ T, "SIGSYS: bad system call",
+ /* 13 */ N, "SIGPIPE: write to broken pipe",
+ /* 14 */ N, "SIGALRM: alarm clock",
+ /* 15 */ N+K, "SIGTERM: termination",
+ /* 16 */ N, "SIGURG: urgent condition on socket",
+ /* 17 */ 0, "SIGSTOP: stop",
+ /* 18 */ N+D, "SIGTSTP: keyboard stop",
+ /* 19 */ 0, "SIGCONT: continue after stop",
+ /* 20 */ N, "SIGCHLD: child status has changed",
+ /* 21 */ N+D, "SIGTTIN: background read from tty",
+ /* 22 */ N+D, "SIGTTOU: background write to tty",
+ /* 23 */ N, "SIGIO: i/o now possible",
+ /* 24 */ N, "SIGXCPU: cpu limit exceeded",
+ /* 25 */ N, "SIGXFSZ: file size limit exceeded",
+ /* 26 */ N, "SIGVTALRM: virtual alarm clock",
+ /* 27 */ N, "SIGPROF: profiling alarm clock",
+ /* 28 */ N, "SIGWINCH: window size change",
+ /* 29 */ N, "SIGINFO: status request from keyboard",
+ /* 30 */ N, "SIGUSR1: user-defined signal 1",
+ /* 31 */ N, "SIGUSR2: user-defined signal 2",
+};
+
+#undef N
+#undef K
+#undef T
+#undef P
+#undef D
diff --git a/src/pkg/runtime/signals_plan9.h b/src/pkg/runtime/signals_plan9.h
index f9bec65fc..818f508cf 100644
--- a/src/pkg/runtime/signals_plan9.h
+++ b/src/pkg/runtime/signals_plan9.h
@@ -3,26 +3,58 @@
// license that can be found in the LICENSE file.
#define N SigNotify
+#define K SigKill
#define T SigThrow
#define P SigPanic
+#define E SigGoExit
+
+// Incoming notes are compared against this table using strncmp, so the
+// order matters: longer patterns must appear before their prefixes.
+// There are #defined SIG constants in os_plan9.h for the table index of
+// some of these.
+//
+// If you add entries to this table, you must respect the prefix ordering
+// and also update the constant values is os_plan9.h.
SigTab runtime·sigtab[] = {
- P, "sys: fp:",
-
- // Go libraries expect to be able
- // to recover from memory
- // read/write errors, so we flag
- // those as panics. All other traps
- // are generally more serious and
- // should immediately throw an
- // exception.
- P, "sys: trap: fault read addr",
- P, "sys: trap: fault write addr",
- T, "sys: trap:",
-
- N, "sys: bad sys call",
+ // Traps that we cannot be recovered.
+ T, "sys: trap: debug exception",
+ T, "sys: trap: invalid opcode",
+
+ // We can recover from some memory errors in runtime·sigpanic.
+ P, "sys: trap: fault read addr", // SIGRFAULT
+ P, "sys: trap: fault write addr", // SIGWFAULT
+
+ // We can also recover from math errors.
+ P, "sys: trap: divide error", // SIGINTDIV
+ P, "sys: fp:", // SIGFLOAT
+
+ // All other traps are normally handled as if they were marked SigThrow.
+ // We mark them SigPanic here so that debug.SetPanicOnFault will work.
+ P, "sys: trap:", // SIGTRAP
+
+ // Writes to a closed pipe can be handled if desired, otherwise they're ignored.
+ N, "sys: write on closed pipe",
+
+ // Other system notes are more serious and cannot be recovered.
+ T, "sys:",
+
+ // Issued to all other procs when calling runtime·exit.
+ E, "go: exit ",
+
+ // Kill is sent by external programs to cause an exit.
+ K, "kill",
+
+ // Interrupts can be handled if desired, otherwise they cause an exit.
+ N+K, "interrupt",
+ N+K, "hangup",
+
+ // Alarms can be handled if desired, otherwise they're ignored.
+ N, "alarm",
};
#undef N
+#undef K
#undef T
#undef P
+#undef E
diff --git a/src/pkg/runtime/signals_solaris.h b/src/pkg/runtime/signals_solaris.h
new file mode 100644
index 000000000..c272cad29
--- /dev/null
+++ b/src/pkg/runtime/signals_solaris.h
@@ -0,0 +1,94 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#define N SigNotify
+#define K SigKill
+#define T SigThrow
+#define P SigPanic
+#define D SigDefault
+
+SigTab runtime·sigtab[] = {
+ /* 0 */ 0, "SIGNONE: no trap",
+ /* 1 */ N+K, "SIGHUP: hangup",
+ /* 2 */ N+K, "SIGINT: interrupt (rubout)",
+ /* 3 */ N+T, "SIGQUIT: quit (ASCII FS)",
+ /* 4 */ T, "SIGILL: illegal instruction (not reset when caught)",
+ /* 5 */ T, "SIGTRAP: trace trap (not reset when caught)",
+ /* 6 */ N+T, "SIGABRT: used by abort, replace SIGIOT in the future",
+ /* 7 */ T, "SIGEMT: EMT instruction",
+ /* 8 */ P, "SIGFPE: floating point exception",
+ /* 9 */ 0, "SIGKILL: kill (cannot be caught or ignored)",
+ /* 10 */ P, "SIGBUS: bus error",
+ /* 11 */ P, "SIGSEGV: segmentation violation",
+ /* 12 */ T, "SIGSYS: bad argument to system call",
+ /* 13 */ N, "SIGPIPE: write on a pipe with no one to read it",
+ /* 14 */ N, "SIGALRM: alarm clock",
+ /* 15 */ N+K, "SIGTERM: software termination signal from kill",
+ /* 16 */ N, "SIGUSR1: user defined signal 1",
+ /* 17 */ N, "SIGUSR2: user defined signal 2",
+ /* 18 */ N, "SIGCLD: child status change",
+ /* 18 */ N, "SIGCHLD: child status change alias (POSIX)",
+ /* 19 */ N, "SIGPWR: power-fail restart",
+ /* 20 */ N, "SIGWINCH: window size change",
+ /* 21 */ N, "SIGURG: urgent socket condition",
+ /* 22 */ N, "SIGPOLL: pollable event occured",
+ /* 23 */ N+D, "SIGSTOP: stop (cannot be caught or ignored)",
+ /* 24 */ 0, "SIGTSTP: user stop requested from tty",
+ /* 25 */ 0, "SIGCONT: stopped process has been continued",
+ /* 26 */ N+D, "SIGTTIN: background tty read attempted",
+ /* 27 */ N+D, "SIGTTOU: background tty write attempted",
+ /* 28 */ N, "SIGVTALRM: virtual timer expired",
+ /* 29 */ N, "SIGPROF: profiling timer expired",
+ /* 30 */ N, "SIGXCPU: exceeded cpu limit",
+ /* 31 */ N, "SIGXFSZ: exceeded file size limit",
+ /* 32 */ N, "SIGWAITING: reserved signal no longer used by",
+ /* 33 */ N, "SIGLWP: reserved signal no longer used by",
+ /* 34 */ N, "SIGFREEZE: special signal used by CPR",
+ /* 35 */ N, "SIGTHAW: special signal used by CPR",
+ /* 36 */ 0, "SIGCANCEL: reserved signal for thread cancellation",
+ /* 37 */ N, "SIGLOST: resource lost (eg, record-lock lost)",
+ /* 38 */ N, "SIGXRES: resource control exceeded",
+ /* 39 */ N, "SIGJVM1: reserved signal for Java Virtual Machine",
+ /* 40 */ N, "SIGJVM2: reserved signal for Java Virtual Machine",
+
+ /* TODO(aram): what should be do about these signals? D or N? is this set static? */
+ /* 41 */ N, "real time signal",
+ /* 42 */ N, "real time signal",
+ /* 43 */ N, "real time signal",
+ /* 44 */ N, "real time signal",
+ /* 45 */ N, "real time signal",
+ /* 46 */ N, "real time signal",
+ /* 47 */ N, "real time signal",
+ /* 48 */ N, "real time signal",
+ /* 49 */ N, "real time signal",
+ /* 50 */ N, "real time signal",
+ /* 51 */ N, "real time signal",
+ /* 52 */ N, "real time signal",
+ /* 53 */ N, "real time signal",
+ /* 54 */ N, "real time signal",
+ /* 55 */ N, "real time signal",
+ /* 56 */ N, "real time signal",
+ /* 57 */ N, "real time signal",
+ /* 58 */ N, "real time signal",
+ /* 59 */ N, "real time signal",
+ /* 60 */ N, "real time signal",
+ /* 61 */ N, "real time signal",
+ /* 62 */ N, "real time signal",
+ /* 63 */ N, "real time signal",
+ /* 64 */ N, "real time signal",
+ /* 65 */ N, "real time signal",
+ /* 66 */ N, "real time signal",
+ /* 67 */ N, "real time signal",
+ /* 68 */ N, "real time signal",
+ /* 69 */ N, "real time signal",
+ /* 70 */ N, "real time signal",
+ /* 71 */ N, "real time signal",
+ /* 72 */ N, "real time signal",
+};
+
+#undef N
+#undef K
+#undef T
+#undef P
+#undef D
diff --git a/src/pkg/runtime/slice.c b/src/pkg/runtime/slice.goc
index ef8ab7fe0..2a14dafab 100644
--- a/src/pkg/runtime/slice.c
+++ b/src/pkg/runtime/slice.goc
@@ -2,12 +2,14 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+package runtime
#include "runtime.h"
#include "arch_GOARCH.h"
#include "type.h"
#include "typekind.h"
#include "malloc.h"
#include "race.h"
+#include "stack.h"
#include "../../cmd/ld/textflag.h"
enum
@@ -17,13 +19,9 @@ enum
static void makeslice1(SliceType*, intgo, intgo, Slice*);
static void growslice1(SliceType*, Slice, intgo, Slice *);
- void runtime·copy(Slice to, Slice fm, uintptr width, intgo ret);
// see also unsafe·NewArray
-// makeslice(typ *Type, len, cap int64) (ary []any);
-void
-runtime·makeslice(SliceType *t, int64 len, int64 cap, Slice ret)
-{
+func makeslice(t *SliceType, len int64, cap int64) (ret Slice) {
// NOTE: The len > MaxMem/elemsize check here is not strictly necessary,
// but it produces a 'len out of range' error instead of a 'cap out of range' error
// when someone does make([]T, bignumber). 'cap out of range' is true too,
@@ -58,9 +56,7 @@ makeslice1(SliceType *t, intgo len, intgo cap, Slice *ret)
}
// growslice(type *Type, x, []T, n int64) []T
-void
-runtime·growslice(SliceType *t, Slice old, int64 n, Slice ret)
-{
+func growslice(t *SliceType, old Slice, n int64) (ret Slice) {
int64 cap;
void *pc;
@@ -69,7 +65,7 @@ runtime·growslice(SliceType *t, Slice old, int64 n, Slice ret)
cap = old.cap + n;
- if((intgo)cap != cap || cap < old.cap || (t->elem->size > 0 && cap > MaxMem/t->elem->size))
+ if((intgo)cap != cap || cap < (int64)old.cap || (t->elem->size > 0 && cap > MaxMem/t->elem->size))
runtime·panicstring("growslice: cap out of range");
if(raceenabled) {
@@ -79,8 +75,6 @@ runtime·growslice(SliceType *t, Slice old, int64 n, Slice ret)
growslice1(t, old, cap, &ret);
- FLUSH(&ret);
-
if(debug) {
runtime·printf("growslice(%S,", *t->string);
runtime·printslice(old);
@@ -92,33 +86,53 @@ runtime·growslice(SliceType *t, Slice old, int64 n, Slice ret)
static void
growslice1(SliceType *t, Slice x, intgo newcap, Slice *ret)
{
- intgo m;
+ intgo newcap1;
+ uintptr capmem, lenmem;
+ int32 flag;
+ Type *typ;
+
+ typ = t->elem;
+ if(typ->size == 0) {
+ *ret = x;
+ ret->cap = newcap;
+ return;
+ }
- m = x.cap;
+ newcap1 = x.cap;
// Using newcap directly for m+m < newcap handles
// both the case where m == 0 and also the case where
// m+m/4 wraps around, in which case the loop
// below might never terminate.
- if(m+m < newcap)
- m = newcap;
+ if(newcap1+newcap1 < newcap)
+ newcap1 = newcap;
else {
do {
if(x.len < 1024)
- m += m;
+ newcap1 += newcap1;
else
- m += m/4;
- } while(m < newcap);
+ newcap1 += newcap1/4;
+ } while(newcap1 < newcap);
}
- makeslice1(t, x.len, m, ret);
- runtime·memmove(ret->array, x.array, ret->len * t->elem->size);
+
+ if(newcap1 > MaxMem/typ->size)
+ runtime·panicstring("growslice: cap out of range");
+ capmem = runtime·roundupsize(newcap1*typ->size);
+ flag = 0;
+ // Can't use FlagNoZero w/o FlagNoScan, because otherwise GC can scan unitialized memory.
+ if(typ->kind&KindNoPointers)
+ flag = FlagNoScan|FlagNoZero;
+ ret->array = runtime·mallocgc(capmem, (uintptr)typ|TypeInfo_Array, flag);
+ ret->len = x.len;
+ ret->cap = capmem/typ->size;
+ lenmem = x.len*typ->size;
+ runtime·memmove(ret->array, x.array, lenmem);
+ if(typ->kind&KindNoPointers)
+ runtime·memclr(ret->array+lenmem, capmem-lenmem);
}
-// copy(to any, fr any, wid uintptr) int
#pragma textflag NOSPLIT
-void
-runtime·copy(Slice to, Slice fm, uintptr width, intgo ret)
-{
+func copy(to Slice, fm Slice, width uintptr) (ret int) {
void *pc;
if(fm.len == 0 || to.len == 0 || width == 0) {
@@ -143,7 +157,6 @@ runtime·copy(Slice to, Slice fm, uintptr width, intgo ret)
}
out:
- FLUSH(&ret);
if(debug) {
runtime·prints("main·copy: to=");
@@ -159,9 +172,7 @@ out:
}
#pragma textflag NOSPLIT
-void
-runtime·slicestringcopy(Slice to, String fm, intgo ret)
-{
+func slicestringcopy(to Slice, fm String) (ret int) {
void *pc;
if(fm.len == 0 || to.len == 0) {
@@ -180,13 +191,10 @@ runtime·slicestringcopy(Slice to, String fm, intgo ret)
runtime·memmove(to.array, fm.str, ret);
-out:
- FLUSH(&ret);
+out:;
}
-void
-runtime·printslice(Slice a)
-{
+func printslice(a Slice) {
runtime·prints("[");
runtime·printint(a.len);
runtime·prints("/");
diff --git a/src/pkg/runtime/softfloat_arm.c b/src/pkg/runtime/softfloat_arm.c
index f5801dde4..29a52bd0e 100644
--- a/src/pkg/runtime/softfloat_arm.c
+++ b/src/pkg/runtime/softfloat_arm.c
@@ -16,7 +16,7 @@
#define FLAGS_V (1U << 28)
void runtime·abort(void);
-void math·sqrtC(uint64, uint64*);
+void runtime·sqrtC(uint64, uint64*);
static uint32 trace = 0;
@@ -413,7 +413,7 @@ stage3: // regd, regm are 4bit variables
break;
case 0xeeb10bc0: // D[regd] = sqrt D[regm]
- math·sqrtC(getd(regm), &uval);
+ runtime·sqrtC(getd(regm), &uval);
putd(regd, uval);
if(trace)
diff --git a/src/pkg/runtime/sqrt.go b/src/pkg/runtime/sqrt.go
new file mode 100644
index 000000000..34a8c3806
--- /dev/null
+++ b/src/pkg/runtime/sqrt.go
@@ -0,0 +1,150 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Copy of math/sqrt.go, here for use by ARM softfloat.
+
+package runtime
+
+import "unsafe"
+
+// The original C code and the long comment below are
+// from FreeBSD's /usr/src/lib/msun/src/e_sqrt.c and
+// came with this notice. The go code is a simplified
+// version of the original C.
+//
+// ====================================================
+// Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+//
+// Developed at SunPro, a Sun Microsystems, Inc. business.
+// Permission to use, copy, modify, and distribute this
+// software is freely granted, provided that this notice
+// is preserved.
+// ====================================================
+//
+// __ieee754_sqrt(x)
+// Return correctly rounded sqrt.
+// -----------------------------------------
+// | Use the hardware sqrt if you have one |
+// -----------------------------------------
+// Method:
+// Bit by bit method using integer arithmetic. (Slow, but portable)
+// 1. Normalization
+// Scale x to y in [1,4) with even powers of 2:
+// find an integer k such that 1 <= (y=x*2**(2k)) < 4, then
+// sqrt(x) = 2**k * sqrt(y)
+// 2. Bit by bit computation
+// Let q = sqrt(y) truncated to i bit after binary point (q = 1),
+// i 0
+// i+1 2
+// s = 2*q , and y = 2 * ( y - q ). (1)
+// i i i i
+//
+// To compute q from q , one checks whether
+// i+1 i
+//
+// -(i+1) 2
+// (q + 2 ) <= y. (2)
+// i
+// -(i+1)
+// If (2) is false, then q = q ; otherwise q = q + 2 .
+// i+1 i i+1 i
+//
+// With some algebraic manipulation, it is not difficult to see
+// that (2) is equivalent to
+// -(i+1)
+// s + 2 <= y (3)
+// i i
+//
+// The advantage of (3) is that s and y can be computed by
+// i i
+// the following recurrence formula:
+// if (3) is false
+//
+// s = s , y = y ; (4)
+// i+1 i i+1 i
+//
+// otherwise,
+// -i -(i+1)
+// s = s + 2 , y = y - s - 2 (5)
+// i+1 i i+1 i i
+//
+// One may easily use induction to prove (4) and (5).
+// Note. Since the left hand side of (3) contain only i+2 bits,
+// it does not necessary to do a full (53-bit) comparison
+// in (3).
+// 3. Final rounding
+// After generating the 53 bits result, we compute one more bit.
+// Together with the remainder, we can decide whether the
+// result is exact, bigger than 1/2ulp, or less than 1/2ulp
+// (it will never equal to 1/2ulp).
+// The rounding mode can be detected by checking whether
+// huge + tiny is equal to huge, and whether huge - tiny is
+// equal to huge for some floating point number "huge" and "tiny".
+//
+//
+// Notes: Rounding mode detection omitted.
+
+const (
+ uvnan = 0x7FF8000000000001
+ uvinf = 0x7FF0000000000000
+ uvneginf = 0xFFF0000000000000
+ mask = 0x7FF
+ shift = 64 - 11 - 1
+ bias = 1023
+ maxFloat64 = 1.797693134862315708145274237317043567981e+308 // 2**1023 * (2**53 - 1) / 2**52
+)
+
+func float64bits(f float64) uint64 { return *(*uint64)(unsafe.Pointer(&f)) }
+func float64frombits(b uint64) float64 { return *(*float64)(unsafe.Pointer(&b)) }
+
+func sqrt(x float64) float64 {
+ // special cases
+ switch {
+ case x == 0 || x != x || x > maxFloat64:
+ return x
+ case x < 0:
+ return nan
+ }
+ ix := float64bits(x)
+ // normalize x
+ exp := int((ix >> shift) & mask)
+ if exp == 0 { // subnormal x
+ for ix&1<<shift == 0 {
+ ix <<= 1
+ exp--
+ }
+ exp++
+ }
+ exp -= bias // unbias exponent
+ ix &^= mask << shift
+ ix |= 1 << shift
+ if exp&1 == 1 { // odd exp, double x to make it even
+ ix <<= 1
+ }
+ exp >>= 1 // exp = exp/2, exponent of square root
+ // generate sqrt(x) bit by bit
+ ix <<= 1
+ var q, s uint64 // q = sqrt(x)
+ r := uint64(1 << (shift + 1)) // r = moving bit from MSB to LSB
+ for r != 0 {
+ t := s + r
+ if t <= ix {
+ s = t + r
+ ix -= t
+ q += r
+ }
+ ix <<= 1
+ r >>= 1
+ }
+ // final rounding
+ if ix != 0 { // remainder, result not exact
+ q += q & 1 // round according to extra bit
+ }
+ ix = q>>1 + uint64(exp-1+bias)<<shift // significand + biased exponent
+ return float64frombits(ix)
+}
+
+func sqrtC(f float64, r *float64) {
+ *r = sqrt(f)
+}
diff --git a/src/pkg/runtime/stack.c b/src/pkg/runtime/stack.c
index 634706051..1680f004e 100644
--- a/src/pkg/runtime/stack.c
+++ b/src/pkg/runtime/stack.c
@@ -6,10 +6,21 @@
#include "arch_GOARCH.h"
#include "malloc.h"
#include "stack.h"
+#include "funcdata.h"
+#include "typekind.h"
+#include "type.h"
+#include "../../cmd/ld/textflag.h"
enum
{
+ // StackDebug == 0: no logging
+ // == 1: logging of per-stack operations
+ // == 2: logging of per-frame operations
+ // == 3: logging of per-word updates
+ // == 4: logging of per-word reads
StackDebug = 0,
+ StackFromSystem = 0, // allocate stacks from system memory instead of the heap
+ StackFaultOnFree = 0, // old stacks are mapped noaccess to detect use after free
};
typedef struct StackCacheNode StackCacheNode;
@@ -74,22 +85,37 @@ stackcacherelease(void)
}
void*
-runtime·stackalloc(uint32 n)
+runtime·stackalloc(G *gp, uint32 n)
{
uint32 pos;
void *v;
+ bool malloced;
+ Stktop *top;
// Stackalloc must be called on scheduler stack, so that we
// never try to grow the stack during the code that stackalloc runs.
// Doing so would cause a deadlock (issue 1547).
if(g != m->g0)
runtime·throw("stackalloc not on scheduler stack");
+ if((n & (n-1)) != 0)
+ runtime·throw("stack size not a power of 2");
+ if(StackDebug >= 1)
+ runtime·printf("stackalloc %d\n", n);
+
+ gp->stacksize += n;
+ if(runtime·debug.efence || StackFromSystem) {
+ v = runtime·SysAlloc(ROUND(n, PageSize), &mstats.stacks_sys);
+ if(v == nil)
+ runtime·throw("out of memory (stackalloc)");
+ return v;
+ }
- // Stacks are usually allocated with a fixed-size free-list allocator,
- // but if we need a stack of non-standard size, we fall back on malloc
- // (assuming that inside malloc and GC all the stack frames are small,
+ // Minimum-sized stacks are allocated with a fixed-size free-list allocator,
+ // but if we need a stack of a bigger size, we fall back on malloc
+ // (assuming that inside malloc all the stack frames are small,
// so that we do not deadlock).
- if(n == FixedStack || m->mallocing || m->gcing) {
+ malloced = true;
+ if(n == FixedStack || m->mallocing) {
if(n != FixedStack) {
runtime·printf("stackalloc: in malloc, size=%d want %d\n", FixedStack, n);
runtime·throw("stackalloc");
@@ -102,27 +128,46 @@ runtime·stackalloc(uint32 n)
m->stackcachepos = pos;
m->stackcachecnt--;
m->stackinuse++;
- return v;
- }
- return runtime·mallocgc(n, 0, FlagNoProfiling|FlagNoGC|FlagNoZero|FlagNoInvokeGC);
+ malloced = false;
+ } else
+ v = runtime·mallocgc(n, 0, FlagNoProfiling|FlagNoGC|FlagNoZero|FlagNoInvokeGC);
+
+ top = (Stktop*)((byte*)v+n-sizeof(Stktop));
+ runtime·memclr((byte*)top, sizeof(*top));
+ top->malloced = malloced;
+ return v;
}
void
-runtime·stackfree(void *v, uintptr n)
+runtime·stackfree(G *gp, void *v, Stktop *top)
{
uint32 pos;
-
- if(n == FixedStack || m->mallocing || m->gcing) {
- if(m->stackcachecnt == StackCacheSize)
- stackcacherelease();
- pos = m->stackcachepos;
- m->stackcache[pos] = v;
- m->stackcachepos = (pos + 1) % StackCacheSize;
- m->stackcachecnt++;
- m->stackinuse--;
+ uintptr n;
+
+ n = (uintptr)(top+1) - (uintptr)v;
+ if(StackDebug >= 1)
+ runtime·printf("stackfree %p %d\n", v, (int32)n);
+ gp->stacksize -= n;
+ if(runtime·debug.efence || StackFromSystem) {
+ if(runtime·debug.efence || StackFaultOnFree)
+ runtime·SysFault(v, n);
+ else
+ runtime·SysFree(v, n, &mstats.stacks_sys);
+ return;
+ }
+ if(top->malloced) {
+ runtime·free(v);
return;
}
- runtime·free(v);
+ if(n != FixedStack)
+ runtime·throw("stackfree: bad fixed size");
+ if(m->stackcachecnt == StackCacheSize)
+ stackcacherelease();
+ pos = m->stackcachepos;
+ m->stackcache[pos] = v;
+ m->stackcachepos = (pos + 1) % StackCacheSize;
+ m->stackcachecnt++;
+ m->stackinuse--;
}
// Called from runtime·lessstack when returning from a function which
@@ -145,12 +190,12 @@ runtime·oldstack(void)
sp = (byte*)top;
argsize = top->argsize;
- if(StackDebug) {
+ if(StackDebug >= 1) {
runtime·printf("runtime: oldstack gobuf={pc:%p sp:%p lr:%p} cret=%p argsize=%p\n",
- top->gobuf.pc, top->gobuf.sp, top->gobuf.lr, m->cret, (uintptr)argsize);
+ top->gobuf.pc, top->gobuf.sp, top->gobuf.lr, (uintptr)m->cret, (uintptr)argsize);
}
- // gp->status is usually Grunning, but it could be Gsyscall if a stack split
+ // gp->status is usually Grunning, but it could be Gsyscall if a stack overflow
// happens during a function call inside entersyscall.
oldstatus = gp->status;
@@ -175,11 +220,7 @@ runtime·oldstack(void)
gp->stackguard = top->stackguard;
gp->stackguard0 = gp->stackguard;
gp->panicwrap = top->panicwrap;
-
- if(top->free != 0) {
- gp->stacksize -= top->free;
- runtime·stackfree(old, top->free);
- }
+ runtime·stackfree(gp, old, top);
gp->status = oldstatus;
runtime·gogo(&gp->sched);
@@ -187,6 +228,435 @@ runtime·oldstack(void)
uintptr runtime·maxstacksize = 1<<20; // enough until runtime.main sets it for real
+static uint8*
+mapnames[] = {
+ (uint8*)"---",
+ (uint8*)"scalar",
+ (uint8*)"ptr",
+ (uint8*)"multi",
+};
+
+// Stack frame layout
+//
+// (x86)
+// +------------------+
+// | args from caller |
+// +------------------+ <- frame->argp
+// | return address |
+// +------------------+ <- frame->varp
+// | locals |
+// +------------------+
+// | args to callee |
+// +------------------+ <- frame->sp
+//
+// (arm: TODO)
+
+typedef struct CopyableInfo CopyableInfo;
+struct CopyableInfo {
+ byte *stk; // bottom address of segment
+ byte *base; // top address of segment (including Stktop)
+ int32 frames; // count of copyable frames (-1 = not copyable)
+};
+
+void runtime·main(void);
+
+static bool
+checkframecopy(Stkframe *frame, void *arg)
+{
+ CopyableInfo *cinfo;
+ Func *f;
+ StackMap *stackmap;
+
+ cinfo = arg;
+ f = frame->fn;
+ if(StackDebug >= 2)
+ runtime·printf(" checking %s frame=[%p,%p] stk=[%p,%p]\n", runtime·funcname(f), frame->sp, frame->fp, cinfo->stk, cinfo->base);
+ // if we're not in the segment any more, return immediately.
+ if(frame->varp < cinfo->stk || frame->varp >= cinfo->base) {
+ if(StackDebug >= 2)
+ runtime·printf(" <next segment>\n");
+ return false; // stop traceback
+ }
+ if(f->entry == (uintptr)runtime·main) {
+ // A special routine at the TOS of the main routine.
+ // We will allow it to be copied even though we don't
+ // have full GC info for it (because it is written in C).
+ cinfo->frames++;
+ return false; // stop traceback
+ }
+ if(frame->varp != (byte*)frame->sp) { // not in prologue (and has at least one local or outarg)
+ stackmap = runtime·funcdata(f, FUNCDATA_LocalsPointerMaps);
+ if(stackmap == nil) {
+ cinfo->frames = -1;
+ if(StackDebug >= 1)
+ runtime·printf("copystack: no locals info for %s\n", runtime·funcname(f));
+ return false;
+ }
+ if(stackmap->n <= 0) {
+ cinfo->frames = -1;
+ if(StackDebug >= 1)
+ runtime·printf("copystack: locals size info only for %s\n", runtime·funcname(f));
+ return false;
+ }
+ }
+ if(frame->arglen != 0) {
+ stackmap = runtime·funcdata(f, FUNCDATA_ArgsPointerMaps);
+ if(stackmap == nil) {
+ cinfo->frames = -1;
+ if(StackDebug >= 1)
+ runtime·printf("copystack: no arg info for %s\n", runtime·funcname(f));
+ return false;
+ }
+ }
+ cinfo->frames++;
+ return true; // this frame is ok; keep going
+}
+
+// If the top segment of the stack contains an uncopyable
+// frame, return -1. Otherwise return the number of frames
+// in the top segment, all of which are copyable.
+static int32
+copyabletopsegment(G *gp)
+{
+ CopyableInfo cinfo;
+ Defer *d;
+ Func *f;
+ FuncVal *fn;
+ StackMap *stackmap;
+
+ cinfo.stk = (byte*)gp->stackguard - StackGuard;
+ cinfo.base = (byte*)gp->stackbase + sizeof(Stktop);
+ cinfo.frames = 0;
+
+ // Check that each frame is copyable. As a side effect,
+ // count the frames.
+ runtime·gentraceback(~(uintptr)0, ~(uintptr)0, 0, gp, 0, nil, 0x7fffffff, checkframecopy, &cinfo, false);
+ if(StackDebug >= 1 && cinfo.frames != -1)
+ runtime·printf("copystack: %d copyable frames\n", cinfo.frames);
+
+ // Check to make sure all Defers are copyable
+ for(d = gp->defer; d != nil; d = d->link) {
+ if(cinfo.stk <= (byte*)d && (byte*)d < cinfo.base) {
+ // Defer is on the stack. Its copyableness has
+ // been established during stack walking.
+ // For now, this only happens with the Defer in runtime.main.
+ continue;
+ }
+ if(d->argp < cinfo.stk || cinfo.base <= d->argp)
+ break; // a defer for the next segment
+ fn = d->fn;
+ if(fn == nil) // See issue 8047
+ continue;
+ f = runtime·findfunc((uintptr)fn->fn);
+ if(f == nil)
+ return -1;
+
+ // Check to make sure we have an args pointer map for the defer's args.
+ // We only need the args map, but we check
+ // for the locals map also, because when the locals map
+ // isn't provided it means the ptr map came from C and
+ // C (particularly, cgo) lies to us. See issue 7695.
+ stackmap = runtime·funcdata(f, FUNCDATA_ArgsPointerMaps);
+ if(stackmap == nil || stackmap->n <= 0)
+ return -1;
+ stackmap = runtime·funcdata(f, FUNCDATA_LocalsPointerMaps);
+ if(stackmap == nil || stackmap->n <= 0)
+ return -1;
+
+ if(cinfo.stk <= (byte*)fn && (byte*)fn < cinfo.base) {
+ // FuncVal is on the stack. Again, its copyableness
+ // was established during stack walking.
+ continue;
+ }
+ // The FuncVal may have pointers in it, but fortunately for us
+ // the compiler won't put pointers into the stack in a
+ // heap-allocated FuncVal.
+ // One day if we do need to check this, we'll need maps of the
+ // pointerness of the closure args. The only place we have that map
+ // right now is in the gc program for the FuncVal. Ugh.
+ }
+
+ return cinfo.frames;
+}
+
+typedef struct AdjustInfo AdjustInfo;
+struct AdjustInfo {
+ byte *oldstk; // bottom address of segment
+ byte *oldbase; // top address of segment (after Stktop)
+ uintptr delta; // ptr distance from old to new stack (newbase - oldbase)
+};
+
+// bv describes the memory starting at address scanp.
+// Adjust any pointers contained therein.
+static void
+adjustpointers(byte **scanp, BitVector *bv, AdjustInfo *adjinfo, Func *f)
+{
+ uintptr delta;
+ int32 num, i;
+ byte *p, *minp, *maxp;
+ Type *t;
+ Itab *tab;
+
+ minp = adjinfo->oldstk;
+ maxp = adjinfo->oldbase;
+ delta = adjinfo->delta;
+ num = bv->n / BitsPerPointer;
+ for(i = 0; i < num; i++) {
+ if(StackDebug >= 4)
+ runtime·printf(" %p:%s:%p\n", &scanp[i], mapnames[bv->data[i / (32 / BitsPerPointer)] >> (i * BitsPerPointer & 31) & 3], scanp[i]);
+ switch(bv->data[i / (32 / BitsPerPointer)] >> (i * BitsPerPointer & 31) & 3) {
+ case BitsDead:
+ if(runtime·debug.gcdead)
+ scanp[i] = (byte*)PoisonStack;
+ break;
+ case BitsScalar:
+ break;
+ case BitsPointer:
+ p = scanp[i];
+ if(f != nil && (byte*)0 < p && (p < (byte*)PageSize || (uintptr)p == PoisonGC || (uintptr)p == PoisonStack)) {
+ // Looks like a junk value in a pointer slot.
+ // Live analysis wrong?
+ m->traceback = 2;
+ runtime·printf("runtime: bad pointer in frame %s at %p: %p\n", runtime·funcname(f), &scanp[i], p);
+ runtime·throw("bad pointer!");
+ }
+ if(minp <= p && p < maxp) {
+ if(StackDebug >= 3)
+ runtime·printf("adjust ptr %p %s\n", p, runtime·funcname(f));
+ scanp[i] = p + delta;
+ }
+ break;
+ case BitsMultiWord:
+ switch(bv->data[(i+1) / (32 / BitsPerPointer)] >> ((i+1) * BitsPerPointer & 31) & 3) {
+ case BitsString:
+ // string referents are never on the stack, never need to be adjusted
+ i++; // skip len
+ break;
+ case BitsSlice:
+ p = scanp[i];
+ if(minp <= p && p < maxp) {
+ if(StackDebug >= 3)
+ runtime·printf("adjust slice %p\n", p);
+ scanp[i] = p + delta;
+ }
+ i += 2; // skip len, cap
+ break;
+ case BitsEface:
+ t = (Type*)scanp[i];
+ if(t != nil && (t->size > PtrSize || (t->kind & KindNoPointers) == 0)) {
+ p = scanp[i+1];
+ if(minp <= p && p < maxp) {
+ if(StackDebug >= 3)
+ runtime·printf("adjust eface %p\n", p);
+ if(t->size > PtrSize) // currently we always allocate such objects on the heap
+ runtime·throw("large interface value found on stack");
+ scanp[i+1] = p + delta;
+ }
+ }
+ i++;
+ break;
+ case BitsIface:
+ tab = (Itab*)scanp[i];
+ if(tab != nil) {
+ t = tab->type;
+ //runtime·printf(" type=%p\n", t);
+ if(t->size > PtrSize || (t->kind & KindNoPointers) == 0) {
+ p = scanp[i+1];
+ if(minp <= p && p < maxp) {
+ if(StackDebug >= 3)
+ runtime·printf("adjust iface %p\n", p);
+ if(t->size > PtrSize) // currently we always allocate such objects on the heap
+ runtime·throw("large interface value found on stack");
+ scanp[i+1] = p + delta;
+ }
+ }
+ }
+ i++;
+ break;
+ }
+ break;
+ }
+ }
+}
+
+// Note: the argument/return area is adjusted by the callee.
+static bool
+adjustframe(Stkframe *frame, void *arg)
+{
+ AdjustInfo *adjinfo;
+ Func *f;
+ StackMap *stackmap;
+ int32 pcdata;
+ BitVector bv;
+ uintptr targetpc;
+
+ adjinfo = arg;
+ f = frame->fn;
+ if(StackDebug >= 2)
+ runtime·printf(" adjusting %s frame=[%p,%p] pc=%p continpc=%p\n", runtime·funcname(f), frame->sp, frame->fp, frame->pc, frame->continpc);
+ if(f->entry == (uintptr)runtime·main)
+ return true;
+ targetpc = frame->continpc;
+ if(targetpc == 0) {
+ // Frame is dead.
+ return true;
+ }
+ if(targetpc != f->entry)
+ targetpc--;
+ pcdata = runtime·pcdatavalue(f, PCDATA_StackMapIndex, targetpc);
+ if(pcdata == -1)
+ pcdata = 0; // in prologue
+
+ // adjust local pointers
+ if(frame->varp != (byte*)frame->sp) {
+ stackmap = runtime·funcdata(f, FUNCDATA_LocalsPointerMaps);
+ if(stackmap == nil)
+ runtime·throw("no locals info");
+ if(stackmap->n <= 0)
+ runtime·throw("locals size info only");
+ bv = runtime·stackmapdata(stackmap, pcdata);
+ if(StackDebug >= 3)
+ runtime·printf(" locals\n");
+ adjustpointers((byte**)frame->varp - bv.n / BitsPerPointer, &bv, adjinfo, f);
+ }
+ // adjust inargs and outargs
+ if(frame->arglen != 0) {
+ stackmap = runtime·funcdata(f, FUNCDATA_ArgsPointerMaps);
+ if(stackmap == nil)
+ runtime·throw("no arg info");
+ bv = runtime·stackmapdata(stackmap, pcdata);
+ if(StackDebug >= 3)
+ runtime·printf(" args\n");
+ adjustpointers((byte**)frame->argp, &bv, adjinfo, nil);
+ }
+ return true;
+}
+
+static void
+adjustctxt(G *gp, AdjustInfo *adjinfo)
+{
+ if(adjinfo->oldstk <= (byte*)gp->sched.ctxt && (byte*)gp->sched.ctxt < adjinfo->oldbase)
+ gp->sched.ctxt = (byte*)gp->sched.ctxt + adjinfo->delta;
+}
+
+static void
+adjustdefers(G *gp, AdjustInfo *adjinfo)
+{
+ Defer *d, **dp;
+ Func *f;
+ FuncVal *fn;
+ StackMap *stackmap;
+ BitVector bv;
+
+ for(dp = &gp->defer, d = *dp; d != nil; dp = &d->link, d = *dp) {
+ if(adjinfo->oldstk <= (byte*)d && (byte*)d < adjinfo->oldbase) {
+ // The Defer record is on the stack. Its fields will
+ // get adjusted appropriately.
+ // This only happens for runtime.main now, but a compiler
+ // optimization could do more of this.
+ *dp = (Defer*)((byte*)d + adjinfo->delta);
+ continue;
+ }
+ if(d->argp < adjinfo->oldstk || adjinfo->oldbase <= d->argp)
+ break; // a defer for the next segment
+ fn = d->fn;
+ if(fn == nil) {
+ // Defer of nil function. It will panic when run, and there
+ // aren't any args to adjust. See issue 8047.
+ d->argp += adjinfo->delta;
+ continue;
+ }
+ f = runtime·findfunc((uintptr)fn->fn);
+ if(f == nil)
+ runtime·throw("can't adjust unknown defer");
+ if(StackDebug >= 4)
+ runtime·printf(" checking defer %s\n", runtime·funcname(f));
+ // Defer's FuncVal might be on the stack
+ if(adjinfo->oldstk <= (byte*)fn && (byte*)fn < adjinfo->oldbase) {
+ if(StackDebug >= 3)
+ runtime·printf(" adjust defer fn %s\n", runtime·funcname(f));
+ d->fn = (FuncVal*)((byte*)fn + adjinfo->delta);
+ } else {
+ // deferred function's args might point into the stack.
+ if(StackDebug >= 3)
+ runtime·printf(" adjust deferred args for %s\n", runtime·funcname(f));
+ stackmap = runtime·funcdata(f, FUNCDATA_ArgsPointerMaps);
+ if(stackmap == nil)
+ runtime·throw("runtime: deferred function has no arg ptr map");
+ bv = runtime·stackmapdata(stackmap, 0);
+ adjustpointers(d->args, &bv, adjinfo, f);
+ }
+ d->argp += adjinfo->delta;
+ }
+}
+
+// Copies the top stack segment of gp to a new stack segment of a
+// different size. The top segment must contain nframes frames.
+static void
+copystack(G *gp, uintptr nframes, uintptr newsize)
+{
+ byte *oldstk, *oldbase, *newstk, *newbase;
+ uintptr oldsize, used;
+ AdjustInfo adjinfo;
+ Stktop *oldtop, *newtop;
+ bool malloced;
+
+ if(gp->syscallstack != 0)
+ runtime·throw("can't handle stack copy in syscall yet");
+ oldstk = (byte*)gp->stackguard - StackGuard;
+ oldbase = (byte*)gp->stackbase + sizeof(Stktop);
+ oldsize = oldbase - oldstk;
+ used = oldbase - (byte*)gp->sched.sp;
+ oldtop = (Stktop*)gp->stackbase;
+
+ // allocate new stack
+ newstk = runtime·stackalloc(gp, newsize);
+ newbase = newstk + newsize;
+ newtop = (Stktop*)(newbase - sizeof(Stktop));
+ malloced = newtop->malloced;
+
+ if(StackDebug >= 1)
+ runtime·printf("copystack [%p %p]/%d -> [%p %p]/%d\n", oldstk, oldbase, (int32)oldsize, newstk, newbase, (int32)newsize);
+ USED(oldsize);
+
+ // adjust pointers in the to-be-copied frames
+ adjinfo.oldstk = oldstk;
+ adjinfo.oldbase = oldbase;
+ adjinfo.delta = newbase - oldbase;
+ runtime·gentraceback(~(uintptr)0, ~(uintptr)0, 0, gp, 0, nil, nframes, adjustframe, &adjinfo, false);
+
+ // adjust other miscellaneous things that have pointers into stacks.
+ adjustctxt(gp, &adjinfo);
+ adjustdefers(gp, &adjinfo);
+
+ // copy the stack (including Stktop) to the new location
+ runtime·memmove(newbase - used, oldbase - used, used);
+ newtop->malloced = malloced;
+
+ // Swap out old stack for new one
+ gp->stackbase = (uintptr)newtop;
+ gp->stackguard = (uintptr)newstk + StackGuard;
+ gp->stackguard0 = (uintptr)newstk + StackGuard; // NOTE: might clobber a preempt request
+ if(gp->stack0 == (uintptr)oldstk)
+ gp->stack0 = (uintptr)newstk;
+ gp->sched.sp = (uintptr)(newbase - used);
+
+ // free old stack
+ runtime·stackfree(gp, oldstk, oldtop);
+}
+
+// round x up to a power of 2.
+int32
+runtime·round2(int32 x)
+{
+ int32 s;
+
+ s = 0;
+ while((1 << s) < x)
+ s++;
+ return 1 << s;
+}
+
// Called from runtime·newstackcall or from runtime·morestack when a new
// stack segment is needed. Allocate a new stack big enough for
// m->moreframesize bytes, copy m->moreargsize bytes to the new frame,
@@ -195,16 +665,18 @@ uintptr runtime·maxstacksize = 1<<20; // enough until runtime.main sets it for
void
runtime·newstack(void)
{
- int32 framesize, argsize, oldstatus;
+ int32 framesize, argsize, oldstatus, oldsize, newsize, nframes;
Stktop *top, *oldtop;
- byte *stk;
+ byte *stk, *oldstk, *oldbase;
uintptr sp;
uintptr *src, *dst, *dstend;
G *gp;
- Gobuf label;
+ Gobuf label, morebuf;
+ void *moreargp;
bool newstackcall;
- uintptr free;
+ if(m->forkstackguard)
+ runtime·throw("split stack after fork");
if(m->morebuf.g != m->curg) {
runtime·printf("runtime: newstack called from g=%p\n"
"\tm=%p m->curg=%p m->g0=%p m->gsignal=%p\n",
@@ -212,15 +684,21 @@ runtime·newstack(void)
runtime·throw("runtime: wrong goroutine in newstack");
}
- // gp->status is usually Grunning, but it could be Gsyscall if a stack split
+ // gp->status is usually Grunning, but it could be Gsyscall if a stack overflow
// happens during a function call inside entersyscall.
gp = m->curg;
oldstatus = gp->status;
framesize = m->moreframesize;
argsize = m->moreargsize;
+ moreargp = m->moreargp;
+ m->moreargp = nil;
+ morebuf = m->morebuf;
+ m->morebuf.pc = (uintptr)nil;
+ m->morebuf.lr = (uintptr)nil;
+ m->morebuf.sp = (uintptr)nil;
gp->status = Gwaiting;
- gp->waitreason = "stack split";
+ gp->waitreason = "stack growth";
newstackcall = framesize==1;
if(newstackcall)
framesize = 0;
@@ -234,7 +712,7 @@ runtime·newstack(void)
// The call to morestack cost a word.
sp -= sizeof(uintptr);
}
- if(StackDebug || sp < gp->stackguard - StackGuard) {
+ if(StackDebug >= 1 || sp < gp->stackguard - StackGuard) {
runtime·printf("runtime: newstack framesize=%p argsize=%p sp=%p stack=[%p, %p]\n"
"\tmorebuf={pc:%p sp:%p lr:%p}\n"
"\tsched={pc:%p sp:%p lr:%p ctxt:%p}\n",
@@ -248,8 +726,8 @@ runtime·newstack(void)
}
if(argsize % sizeof(uintptr) != 0) {
- runtime·printf("runtime: stack split with misaligned argsize %d\n", argsize);
- runtime·throw("runtime: stack split argsize");
+ runtime·printf("runtime: stack growth with misaligned argsize %d\n", argsize);
+ runtime·throw("runtime: stack growth argsize");
}
if(gp->stackguard0 == (uintptr)StackPreempt) {
@@ -258,7 +736,7 @@ runtime·newstack(void)
if(oldstatus == Grunning && m->p == nil && m->locks == 0)
runtime·throw("runtime: g is running but p is not");
if(oldstatus == Gsyscall && m->locks == 0)
- runtime·throw("runtime: stack split during syscall");
+ runtime·throw("runtime: stack growth during syscall");
// Be conservative about where we preempt.
// We are interested in preempting user Go code, not runtime code.
if(oldstatus != Grunning || m->locks || m->mallocing || m->gcing || m->p->status != Prunning) {
@@ -273,46 +751,55 @@ runtime·newstack(void)
runtime·gosched0(gp); // never return
}
- if(newstackcall && m->morebuf.sp - sizeof(Stktop) - argsize - 32 > gp->stackguard) {
- // special case: called from runtime.newstackcall (framesize==1)
- // to call code with an arbitrary argument size,
- // and we have enough space on the current stack.
- // the new Stktop* is necessary to unwind, but
- // we don't need to create a new segment.
- top = (Stktop*)(m->morebuf.sp - sizeof(*top));
- stk = (byte*)gp->stackguard - StackGuard;
- free = 0;
- } else {
- // allocate new segment.
- framesize += argsize;
- framesize += StackExtra; // room for more functions, Stktop.
- if(framesize < StackMin)
- framesize = StackMin;
- framesize += StackSystem;
- gp->stacksize += framesize;
- if(gp->stacksize > runtime·maxstacksize) {
- runtime·printf("runtime: goroutine stack exceeds %D-byte limit\n", (uint64)runtime·maxstacksize);
- runtime·throw("stack overflow");
+ // If every frame on the top segment is copyable, allocate a bigger segment
+ // and move the segment instead of allocating a new segment.
+ if(runtime·copystack) {
+ if(!runtime·precisestack)
+ runtime·throw("can't copy stacks without precise stacks");
+ nframes = copyabletopsegment(gp);
+ if(nframes != -1) {
+ oldstk = (byte*)gp->stackguard - StackGuard;
+ oldbase = (byte*)gp->stackbase + sizeof(Stktop);
+ oldsize = oldbase - oldstk;
+ newsize = oldsize * 2;
+ copystack(gp, nframes, newsize);
+ if(StackDebug >= 1)
+ runtime·printf("stack grow done\n");
+ if(gp->stacksize > runtime·maxstacksize) {
+ runtime·printf("runtime: goroutine stack exceeds %D-byte limit\n", (uint64)runtime·maxstacksize);
+ runtime·throw("stack overflow");
+ }
+ gp->status = oldstatus;
+ runtime·gogo(&gp->sched);
}
- stk = runtime·stackalloc(framesize);
- top = (Stktop*)(stk+framesize-sizeof(*top));
- free = framesize;
+ // TODO: if stack is uncopyable because we're in C code, patch return value at
+ // end of C code to trigger a copy as soon as C code exits. That way, we'll
+ // have stack available if we get this deep again.
}
- if(StackDebug) {
+ // allocate new segment.
+ framesize += argsize;
+ framesize += StackExtra; // room for more functions, Stktop.
+ if(framesize < StackMin)
+ framesize = StackMin;
+ framesize += StackSystem;
+ framesize = runtime·round2(framesize);
+ stk = runtime·stackalloc(gp, framesize);
+ if(gp->stacksize > runtime·maxstacksize) {
+ runtime·printf("runtime: goroutine stack exceeds %D-byte limit\n", (uint64)runtime·maxstacksize);
+ runtime·throw("stack overflow");
+ }
+ top = (Stktop*)(stk+framesize-sizeof(*top));
+
+ if(StackDebug >= 1) {
runtime·printf("\t-> new stack [%p, %p]\n", stk, top);
}
top->stackbase = gp->stackbase;
top->stackguard = gp->stackguard;
- top->gobuf = m->morebuf;
- top->argp = m->moreargp;
+ top->gobuf = morebuf;
+ top->argp = moreargp;
top->argsize = argsize;
- top->free = free;
- m->moreargp = nil;
- m->morebuf.pc = (uintptr)nil;
- m->morebuf.lr = (uintptr)nil;
- m->morebuf.sp = (uintptr)nil;
// copy flag from panic
top->panic = gp->ispanic;
@@ -365,18 +852,96 @@ runtime·newstack(void)
*(int32*)345 = 123; // never return
}
+#pragma textflag NOSPLIT
+void
+runtime·nilfunc(void)
+{
+ *(byte*)0 = 0;
+}
+
// adjust Gobuf as if it executed a call to fn
// and then did an immediate gosave.
void
runtime·gostartcallfn(Gobuf *gobuf, FuncVal *fv)
{
- runtime·gostartcall(gobuf, fv->fn, fv);
+ void *fn;
+
+ if(fv != nil)
+ fn = fv->fn;
+ else
+ fn = runtime·nilfunc;
+ runtime·gostartcall(gobuf, fn, fv);
}
+// Maybe shrink the stack being used by gp.
+// Called at garbage collection time.
void
-runtime∕debug·setMaxStack(intgo in, intgo out)
+runtime·shrinkstack(G *gp)
{
- out = runtime·maxstacksize;
- runtime·maxstacksize = in;
- FLUSH(&out);
+ int32 nframes;
+ byte *oldstk, *oldbase;
+ uintptr used, oldsize, newsize;
+ MSpan *span;
+
+ if(!runtime·copystack)
+ return;
+ oldstk = (byte*)gp->stackguard - StackGuard;
+ oldbase = (byte*)gp->stackbase + sizeof(Stktop);
+ oldsize = oldbase - oldstk;
+ newsize = oldsize / 2;
+ if(newsize < FixedStack)
+ return; // don't shrink below the minimum-sized stack
+ used = oldbase - (byte*)gp->sched.sp;
+ if(used >= oldsize / 4)
+ return; // still using at least 1/4 of the segment.
+
+ // To shrink to less than 1/2 a page, we need to copy.
+ if(newsize < PageSize/2) {
+ if(gp->syscallstack != (uintptr)nil) // TODO: can we handle this case?
+ return;
+#ifdef GOOS_windows
+ if(gp->m != nil && gp->m->libcallsp != 0)
+ return;
+#endif
+ nframes = copyabletopsegment(gp);
+ if(nframes == -1)
+ return;
+ copystack(gp, nframes, newsize);
+ return;
+ }
+
+ // To shrink a stack of one page size or more, we can shrink it
+ // without copying. Just deallocate the lower half.
+ span = runtime·MHeap_LookupMaybe(&runtime·mheap, oldstk);
+ if(span == nil)
+ return; // stack allocated outside heap. Can't shrink it. Can happen if stack is allocated while inside malloc. TODO: shrink by copying?
+ if(span->elemsize != oldsize)
+ runtime·throw("span element size doesn't match stack size");
+ if((uintptr)oldstk != span->start << PageShift)
+ runtime·throw("stack not at start of span");
+
+ if(StackDebug)
+ runtime·printf("shrinking stack in place %p %X->%X\n", oldstk, oldsize, newsize);
+
+ // new stack guard for smaller stack
+ gp->stackguard = (uintptr)oldstk + newsize + StackGuard;
+ gp->stackguard0 = (uintptr)oldstk + newsize + StackGuard;
+ if(gp->stack0 == (uintptr)oldstk)
+ gp->stack0 = (uintptr)oldstk + newsize;
+ gp->stacksize -= oldsize - newsize;
+
+ // Free bottom half of the stack.
+ if(runtime·debug.efence || StackFromSystem) {
+ if(runtime·debug.efence || StackFaultOnFree)
+ runtime·SysFault(oldstk, newsize);
+ else
+ runtime·SysFree(oldstk, newsize, &mstats.stacks_sys);
+ return;
+ }
+ // First, we trick malloc into thinking
+ // we allocated the stack as two separate half-size allocs. Then the
+ // free() call does the rest of the work for us.
+ runtime·MSpan_EnsureSwept(span);
+ runtime·MHeap_SplitSpan(&runtime·mheap, span);
+ runtime·free(oldstk);
}
diff --git a/src/pkg/runtime/stack.h b/src/pkg/runtime/stack.h
index 296eb688d..18ab30b69 100644
--- a/src/pkg/runtime/stack.h
+++ b/src/pkg/runtime/stack.h
@@ -77,7 +77,8 @@ enum {
// If the amount needed for the splitting frame + StackExtra
// is less than this number, the stack will have this size instead.
StackMin = 8192,
- FixedStack = StackMin + StackSystem,
+ StackSystemRounded = StackSystem + (-StackSystem & (StackMin-1)),
+ FixedStack = StackMin + StackSystemRounded,
// Functions that need frames bigger than this use an extra
// instruction to do the stack split check, to avoid overflow
@@ -102,7 +103,7 @@ enum {
// The assumed size of the top-of-stack data block.
// The actual size can be smaller than this but cannot be larger.
// Checked in proc.c's runtime.malg.
- StackTop = 96,
+ StackTop = 88,
};
// Goroutine preemption request.
diff --git a/src/pkg/runtime/stack_gen_test.go b/src/pkg/runtime/stack_gen_test.go
new file mode 100644
index 000000000..28101062c
--- /dev/null
+++ b/src/pkg/runtime/stack_gen_test.go
@@ -0,0 +1,1473 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime_test
+
+import (
+ . "runtime"
+)
+
+var splitTests = []func() (uintptr, uintptr){
+ // Edit .+1,/^}/-1|seq 4 4 5000 | sed 's/.*/ stack&,/' | fmt
+ stack4, stack8, stack12, stack16, stack20, stack24, stack28,
+ stack32, stack36, stack40, stack44, stack48, stack52, stack56,
+ stack60, stack64, stack68, stack72, stack76, stack80, stack84,
+ stack88, stack92, stack96, stack100, stack104, stack108, stack112,
+ stack116, stack120, stack124, stack128, stack132, stack136,
+ stack140, stack144, stack148, stack152, stack156, stack160,
+ stack164, stack168, stack172, stack176, stack180, stack184,
+ stack188, stack192, stack196, stack200, stack204, stack208,
+ stack212, stack216, stack220, stack224, stack228, stack232,
+ stack236, stack240, stack244, stack248, stack252, stack256,
+ stack260, stack264, stack268, stack272, stack276, stack280,
+ stack284, stack288, stack292, stack296, stack300, stack304,
+ stack308, stack312, stack316, stack320, stack324, stack328,
+ stack332, stack336, stack340, stack344, stack348, stack352,
+ stack356, stack360, stack364, stack368, stack372, stack376,
+ stack380, stack384, stack388, stack392, stack396, stack400,
+ stack404, stack408, stack412, stack416, stack420, stack424,
+ stack428, stack432, stack436, stack440, stack444, stack448,
+ stack452, stack456, stack460, stack464, stack468, stack472,
+ stack476, stack480, stack484, stack488, stack492, stack496,
+ stack500, stack504, stack508, stack512, stack516, stack520,
+ stack524, stack528, stack532, stack536, stack540, stack544,
+ stack548, stack552, stack556, stack560, stack564, stack568,
+ stack572, stack576, stack580, stack584, stack588, stack592,
+ stack596, stack600, stack604, stack608, stack612, stack616,
+ stack620, stack624, stack628, stack632, stack636, stack640,
+ stack644, stack648, stack652, stack656, stack660, stack664,
+ stack668, stack672, stack676, stack680, stack684, stack688,
+ stack692, stack696, stack700, stack704, stack708, stack712,
+ stack716, stack720, stack724, stack728, stack732, stack736,
+ stack740, stack744, stack748, stack752, stack756, stack760,
+ stack764, stack768, stack772, stack776, stack780, stack784,
+ stack788, stack792, stack796, stack800, stack804, stack808,
+ stack812, stack816, stack820, stack824, stack828, stack832,
+ stack836, stack840, stack844, stack848, stack852, stack856,
+ stack860, stack864, stack868, stack872, stack876, stack880,
+ stack884, stack888, stack892, stack896, stack900, stack904,
+ stack908, stack912, stack916, stack920, stack924, stack928,
+ stack932, stack936, stack940, stack944, stack948, stack952,
+ stack956, stack960, stack964, stack968, stack972, stack976,
+ stack980, stack984, stack988, stack992, stack996, stack1000,
+ stack1004, stack1008, stack1012, stack1016, stack1020, stack1024,
+ stack1028, stack1032, stack1036, stack1040, stack1044, stack1048,
+ stack1052, stack1056, stack1060, stack1064, stack1068, stack1072,
+ stack1076, stack1080, stack1084, stack1088, stack1092, stack1096,
+ stack1100, stack1104, stack1108, stack1112, stack1116, stack1120,
+ stack1124, stack1128, stack1132, stack1136, stack1140, stack1144,
+ stack1148, stack1152, stack1156, stack1160, stack1164, stack1168,
+ stack1172, stack1176, stack1180, stack1184, stack1188, stack1192,
+ stack1196, stack1200, stack1204, stack1208, stack1212, stack1216,
+ stack1220, stack1224, stack1228, stack1232, stack1236, stack1240,
+ stack1244, stack1248, stack1252, stack1256, stack1260, stack1264,
+ stack1268, stack1272, stack1276, stack1280, stack1284, stack1288,
+ stack1292, stack1296, stack1300, stack1304, stack1308, stack1312,
+ stack1316, stack1320, stack1324, stack1328, stack1332, stack1336,
+ stack1340, stack1344, stack1348, stack1352, stack1356, stack1360,
+ stack1364, stack1368, stack1372, stack1376, stack1380, stack1384,
+ stack1388, stack1392, stack1396, stack1400, stack1404, stack1408,
+ stack1412, stack1416, stack1420, stack1424, stack1428, stack1432,
+ stack1436, stack1440, stack1444, stack1448, stack1452, stack1456,
+ stack1460, stack1464, stack1468, stack1472, stack1476, stack1480,
+ stack1484, stack1488, stack1492, stack1496, stack1500, stack1504,
+ stack1508, stack1512, stack1516, stack1520, stack1524, stack1528,
+ stack1532, stack1536, stack1540, stack1544, stack1548, stack1552,
+ stack1556, stack1560, stack1564, stack1568, stack1572, stack1576,
+ stack1580, stack1584, stack1588, stack1592, stack1596, stack1600,
+ stack1604, stack1608, stack1612, stack1616, stack1620, stack1624,
+ stack1628, stack1632, stack1636, stack1640, stack1644, stack1648,
+ stack1652, stack1656, stack1660, stack1664, stack1668, stack1672,
+ stack1676, stack1680, stack1684, stack1688, stack1692, stack1696,
+ stack1700, stack1704, stack1708, stack1712, stack1716, stack1720,
+ stack1724, stack1728, stack1732, stack1736, stack1740, stack1744,
+ stack1748, stack1752, stack1756, stack1760, stack1764, stack1768,
+ stack1772, stack1776, stack1780, stack1784, stack1788, stack1792,
+ stack1796, stack1800, stack1804, stack1808, stack1812, stack1816,
+ stack1820, stack1824, stack1828, stack1832, stack1836, stack1840,
+ stack1844, stack1848, stack1852, stack1856, stack1860, stack1864,
+ stack1868, stack1872, stack1876, stack1880, stack1884, stack1888,
+ stack1892, stack1896, stack1900, stack1904, stack1908, stack1912,
+ stack1916, stack1920, stack1924, stack1928, stack1932, stack1936,
+ stack1940, stack1944, stack1948, stack1952, stack1956, stack1960,
+ stack1964, stack1968, stack1972, stack1976, stack1980, stack1984,
+ stack1988, stack1992, stack1996, stack2000, stack2004, stack2008,
+ stack2012, stack2016, stack2020, stack2024, stack2028, stack2032,
+ stack2036, stack2040, stack2044, stack2048, stack2052, stack2056,
+ stack2060, stack2064, stack2068, stack2072, stack2076, stack2080,
+ stack2084, stack2088, stack2092, stack2096, stack2100, stack2104,
+ stack2108, stack2112, stack2116, stack2120, stack2124, stack2128,
+ stack2132, stack2136, stack2140, stack2144, stack2148, stack2152,
+ stack2156, stack2160, stack2164, stack2168, stack2172, stack2176,
+ stack2180, stack2184, stack2188, stack2192, stack2196, stack2200,
+ stack2204, stack2208, stack2212, stack2216, stack2220, stack2224,
+ stack2228, stack2232, stack2236, stack2240, stack2244, stack2248,
+ stack2252, stack2256, stack2260, stack2264, stack2268, stack2272,
+ stack2276, stack2280, stack2284, stack2288, stack2292, stack2296,
+ stack2300, stack2304, stack2308, stack2312, stack2316, stack2320,
+ stack2324, stack2328, stack2332, stack2336, stack2340, stack2344,
+ stack2348, stack2352, stack2356, stack2360, stack2364, stack2368,
+ stack2372, stack2376, stack2380, stack2384, stack2388, stack2392,
+ stack2396, stack2400, stack2404, stack2408, stack2412, stack2416,
+ stack2420, stack2424, stack2428, stack2432, stack2436, stack2440,
+ stack2444, stack2448, stack2452, stack2456, stack2460, stack2464,
+ stack2468, stack2472, stack2476, stack2480, stack2484, stack2488,
+ stack2492, stack2496, stack2500, stack2504, stack2508, stack2512,
+ stack2516, stack2520, stack2524, stack2528, stack2532, stack2536,
+ stack2540, stack2544, stack2548, stack2552, stack2556, stack2560,
+ stack2564, stack2568, stack2572, stack2576, stack2580, stack2584,
+ stack2588, stack2592, stack2596, stack2600, stack2604, stack2608,
+ stack2612, stack2616, stack2620, stack2624, stack2628, stack2632,
+ stack2636, stack2640, stack2644, stack2648, stack2652, stack2656,
+ stack2660, stack2664, stack2668, stack2672, stack2676, stack2680,
+ stack2684, stack2688, stack2692, stack2696, stack2700, stack2704,
+ stack2708, stack2712, stack2716, stack2720, stack2724, stack2728,
+ stack2732, stack2736, stack2740, stack2744, stack2748, stack2752,
+ stack2756, stack2760, stack2764, stack2768, stack2772, stack2776,
+ stack2780, stack2784, stack2788, stack2792, stack2796, stack2800,
+ stack2804, stack2808, stack2812, stack2816, stack2820, stack2824,
+ stack2828, stack2832, stack2836, stack2840, stack2844, stack2848,
+ stack2852, stack2856, stack2860, stack2864, stack2868, stack2872,
+ stack2876, stack2880, stack2884, stack2888, stack2892, stack2896,
+ stack2900, stack2904, stack2908, stack2912, stack2916, stack2920,
+ stack2924, stack2928, stack2932, stack2936, stack2940, stack2944,
+ stack2948, stack2952, stack2956, stack2960, stack2964, stack2968,
+ stack2972, stack2976, stack2980, stack2984, stack2988, stack2992,
+ stack2996, stack3000, stack3004, stack3008, stack3012, stack3016,
+ stack3020, stack3024, stack3028, stack3032, stack3036, stack3040,
+ stack3044, stack3048, stack3052, stack3056, stack3060, stack3064,
+ stack3068, stack3072, stack3076, stack3080, stack3084, stack3088,
+ stack3092, stack3096, stack3100, stack3104, stack3108, stack3112,
+ stack3116, stack3120, stack3124, stack3128, stack3132, stack3136,
+ stack3140, stack3144, stack3148, stack3152, stack3156, stack3160,
+ stack3164, stack3168, stack3172, stack3176, stack3180, stack3184,
+ stack3188, stack3192, stack3196, stack3200, stack3204, stack3208,
+ stack3212, stack3216, stack3220, stack3224, stack3228, stack3232,
+ stack3236, stack3240, stack3244, stack3248, stack3252, stack3256,
+ stack3260, stack3264, stack3268, stack3272, stack3276, stack3280,
+ stack3284, stack3288, stack3292, stack3296, stack3300, stack3304,
+ stack3308, stack3312, stack3316, stack3320, stack3324, stack3328,
+ stack3332, stack3336, stack3340, stack3344, stack3348, stack3352,
+ stack3356, stack3360, stack3364, stack3368, stack3372, stack3376,
+ stack3380, stack3384, stack3388, stack3392, stack3396, stack3400,
+ stack3404, stack3408, stack3412, stack3416, stack3420, stack3424,
+ stack3428, stack3432, stack3436, stack3440, stack3444, stack3448,
+ stack3452, stack3456, stack3460, stack3464, stack3468, stack3472,
+ stack3476, stack3480, stack3484, stack3488, stack3492, stack3496,
+ stack3500, stack3504, stack3508, stack3512, stack3516, stack3520,
+ stack3524, stack3528, stack3532, stack3536, stack3540, stack3544,
+ stack3548, stack3552, stack3556, stack3560, stack3564, stack3568,
+ stack3572, stack3576, stack3580, stack3584, stack3588, stack3592,
+ stack3596, stack3600, stack3604, stack3608, stack3612, stack3616,
+ stack3620, stack3624, stack3628, stack3632, stack3636, stack3640,
+ stack3644, stack3648, stack3652, stack3656, stack3660, stack3664,
+ stack3668, stack3672, stack3676, stack3680, stack3684, stack3688,
+ stack3692, stack3696, stack3700, stack3704, stack3708, stack3712,
+ stack3716, stack3720, stack3724, stack3728, stack3732, stack3736,
+ stack3740, stack3744, stack3748, stack3752, stack3756, stack3760,
+ stack3764, stack3768, stack3772, stack3776, stack3780, stack3784,
+ stack3788, stack3792, stack3796, stack3800, stack3804, stack3808,
+ stack3812, stack3816, stack3820, stack3824, stack3828, stack3832,
+ stack3836, stack3840, stack3844, stack3848, stack3852, stack3856,
+ stack3860, stack3864, stack3868, stack3872, stack3876, stack3880,
+ stack3884, stack3888, stack3892, stack3896, stack3900, stack3904,
+ stack3908, stack3912, stack3916, stack3920, stack3924, stack3928,
+ stack3932, stack3936, stack3940, stack3944, stack3948, stack3952,
+ stack3956, stack3960, stack3964, stack3968, stack3972, stack3976,
+ stack3980, stack3984, stack3988, stack3992, stack3996, stack4000,
+ stack4004, stack4008, stack4012, stack4016, stack4020, stack4024,
+ stack4028, stack4032, stack4036, stack4040, stack4044, stack4048,
+ stack4052, stack4056, stack4060, stack4064, stack4068, stack4072,
+ stack4076, stack4080, stack4084, stack4088, stack4092, stack4096,
+ stack4100, stack4104, stack4108, stack4112, stack4116, stack4120,
+ stack4124, stack4128, stack4132, stack4136, stack4140, stack4144,
+ stack4148, stack4152, stack4156, stack4160, stack4164, stack4168,
+ stack4172, stack4176, stack4180, stack4184, stack4188, stack4192,
+ stack4196, stack4200, stack4204, stack4208, stack4212, stack4216,
+ stack4220, stack4224, stack4228, stack4232, stack4236, stack4240,
+ stack4244, stack4248, stack4252, stack4256, stack4260, stack4264,
+ stack4268, stack4272, stack4276, stack4280, stack4284, stack4288,
+ stack4292, stack4296, stack4300, stack4304, stack4308, stack4312,
+ stack4316, stack4320, stack4324, stack4328, stack4332, stack4336,
+ stack4340, stack4344, stack4348, stack4352, stack4356, stack4360,
+ stack4364, stack4368, stack4372, stack4376, stack4380, stack4384,
+ stack4388, stack4392, stack4396, stack4400, stack4404, stack4408,
+ stack4412, stack4416, stack4420, stack4424, stack4428, stack4432,
+ stack4436, stack4440, stack4444, stack4448, stack4452, stack4456,
+ stack4460, stack4464, stack4468, stack4472, stack4476, stack4480,
+ stack4484, stack4488, stack4492, stack4496, stack4500, stack4504,
+ stack4508, stack4512, stack4516, stack4520, stack4524, stack4528,
+ stack4532, stack4536, stack4540, stack4544, stack4548, stack4552,
+ stack4556, stack4560, stack4564, stack4568, stack4572, stack4576,
+ stack4580, stack4584, stack4588, stack4592, stack4596, stack4600,
+ stack4604, stack4608, stack4612, stack4616, stack4620, stack4624,
+ stack4628, stack4632, stack4636, stack4640, stack4644, stack4648,
+ stack4652, stack4656, stack4660, stack4664, stack4668, stack4672,
+ stack4676, stack4680, stack4684, stack4688, stack4692, stack4696,
+ stack4700, stack4704, stack4708, stack4712, stack4716, stack4720,
+ stack4724, stack4728, stack4732, stack4736, stack4740, stack4744,
+ stack4748, stack4752, stack4756, stack4760, stack4764, stack4768,
+ stack4772, stack4776, stack4780, stack4784, stack4788, stack4792,
+ stack4796, stack4800, stack4804, stack4808, stack4812, stack4816,
+ stack4820, stack4824, stack4828, stack4832, stack4836, stack4840,
+ stack4844, stack4848, stack4852, stack4856, stack4860, stack4864,
+ stack4868, stack4872, stack4876, stack4880, stack4884, stack4888,
+ stack4892, stack4896, stack4900, stack4904, stack4908, stack4912,
+ stack4916, stack4920, stack4924, stack4928, stack4932, stack4936,
+ stack4940, stack4944, stack4948, stack4952, stack4956, stack4960,
+ stack4964, stack4968, stack4972, stack4976, stack4980, stack4984,
+ stack4988, stack4992, stack4996, stack5000,
+}
+
+// Edit .+1,$ | seq 4 4 5000 | sed 's/.*/func stack&()(uintptr, uintptr) { var buf [&]byte; use(buf[:]); return Stackguard() }/'
+func stack4() (uintptr, uintptr) { var buf [4]byte; use(buf[:]); return Stackguard() }
+func stack8() (uintptr, uintptr) { var buf [8]byte; use(buf[:]); return Stackguard() }
+func stack12() (uintptr, uintptr) { var buf [12]byte; use(buf[:]); return Stackguard() }
+func stack16() (uintptr, uintptr) { var buf [16]byte; use(buf[:]); return Stackguard() }
+func stack20() (uintptr, uintptr) { var buf [20]byte; use(buf[:]); return Stackguard() }
+func stack24() (uintptr, uintptr) { var buf [24]byte; use(buf[:]); return Stackguard() }
+func stack28() (uintptr, uintptr) { var buf [28]byte; use(buf[:]); return Stackguard() }
+func stack32() (uintptr, uintptr) { var buf [32]byte; use(buf[:]); return Stackguard() }
+func stack36() (uintptr, uintptr) { var buf [36]byte; use(buf[:]); return Stackguard() }
+func stack40() (uintptr, uintptr) { var buf [40]byte; use(buf[:]); return Stackguard() }
+func stack44() (uintptr, uintptr) { var buf [44]byte; use(buf[:]); return Stackguard() }
+func stack48() (uintptr, uintptr) { var buf [48]byte; use(buf[:]); return Stackguard() }
+func stack52() (uintptr, uintptr) { var buf [52]byte; use(buf[:]); return Stackguard() }
+func stack56() (uintptr, uintptr) { var buf [56]byte; use(buf[:]); return Stackguard() }
+func stack60() (uintptr, uintptr) { var buf [60]byte; use(buf[:]); return Stackguard() }
+func stack64() (uintptr, uintptr) { var buf [64]byte; use(buf[:]); return Stackguard() }
+func stack68() (uintptr, uintptr) { var buf [68]byte; use(buf[:]); return Stackguard() }
+func stack72() (uintptr, uintptr) { var buf [72]byte; use(buf[:]); return Stackguard() }
+func stack76() (uintptr, uintptr) { var buf [76]byte; use(buf[:]); return Stackguard() }
+func stack80() (uintptr, uintptr) { var buf [80]byte; use(buf[:]); return Stackguard() }
+func stack84() (uintptr, uintptr) { var buf [84]byte; use(buf[:]); return Stackguard() }
+func stack88() (uintptr, uintptr) { var buf [88]byte; use(buf[:]); return Stackguard() }
+func stack92() (uintptr, uintptr) { var buf [92]byte; use(buf[:]); return Stackguard() }
+func stack96() (uintptr, uintptr) { var buf [96]byte; use(buf[:]); return Stackguard() }
+func stack100() (uintptr, uintptr) { var buf [100]byte; use(buf[:]); return Stackguard() }
+func stack104() (uintptr, uintptr) { var buf [104]byte; use(buf[:]); return Stackguard() }
+func stack108() (uintptr, uintptr) { var buf [108]byte; use(buf[:]); return Stackguard() }
+func stack112() (uintptr, uintptr) { var buf [112]byte; use(buf[:]); return Stackguard() }
+func stack116() (uintptr, uintptr) { var buf [116]byte; use(buf[:]); return Stackguard() }
+func stack120() (uintptr, uintptr) { var buf [120]byte; use(buf[:]); return Stackguard() }
+func stack124() (uintptr, uintptr) { var buf [124]byte; use(buf[:]); return Stackguard() }
+func stack128() (uintptr, uintptr) { var buf [128]byte; use(buf[:]); return Stackguard() }
+func stack132() (uintptr, uintptr) { var buf [132]byte; use(buf[:]); return Stackguard() }
+func stack136() (uintptr, uintptr) { var buf [136]byte; use(buf[:]); return Stackguard() }
+func stack140() (uintptr, uintptr) { var buf [140]byte; use(buf[:]); return Stackguard() }
+func stack144() (uintptr, uintptr) { var buf [144]byte; use(buf[:]); return Stackguard() }
+func stack148() (uintptr, uintptr) { var buf [148]byte; use(buf[:]); return Stackguard() }
+func stack152() (uintptr, uintptr) { var buf [152]byte; use(buf[:]); return Stackguard() }
+func stack156() (uintptr, uintptr) { var buf [156]byte; use(buf[:]); return Stackguard() }
+func stack160() (uintptr, uintptr) { var buf [160]byte; use(buf[:]); return Stackguard() }
+func stack164() (uintptr, uintptr) { var buf [164]byte; use(buf[:]); return Stackguard() }
+func stack168() (uintptr, uintptr) { var buf [168]byte; use(buf[:]); return Stackguard() }
+func stack172() (uintptr, uintptr) { var buf [172]byte; use(buf[:]); return Stackguard() }
+func stack176() (uintptr, uintptr) { var buf [176]byte; use(buf[:]); return Stackguard() }
+func stack180() (uintptr, uintptr) { var buf [180]byte; use(buf[:]); return Stackguard() }
+func stack184() (uintptr, uintptr) { var buf [184]byte; use(buf[:]); return Stackguard() }
+func stack188() (uintptr, uintptr) { var buf [188]byte; use(buf[:]); return Stackguard() }
+func stack192() (uintptr, uintptr) { var buf [192]byte; use(buf[:]); return Stackguard() }
+func stack196() (uintptr, uintptr) { var buf [196]byte; use(buf[:]); return Stackguard() }
+func stack200() (uintptr, uintptr) { var buf [200]byte; use(buf[:]); return Stackguard() }
+func stack204() (uintptr, uintptr) { var buf [204]byte; use(buf[:]); return Stackguard() }
+func stack208() (uintptr, uintptr) { var buf [208]byte; use(buf[:]); return Stackguard() }
+func stack212() (uintptr, uintptr) { var buf [212]byte; use(buf[:]); return Stackguard() }
+func stack216() (uintptr, uintptr) { var buf [216]byte; use(buf[:]); return Stackguard() }
+func stack220() (uintptr, uintptr) { var buf [220]byte; use(buf[:]); return Stackguard() }
+func stack224() (uintptr, uintptr) { var buf [224]byte; use(buf[:]); return Stackguard() }
+func stack228() (uintptr, uintptr) { var buf [228]byte; use(buf[:]); return Stackguard() }
+func stack232() (uintptr, uintptr) { var buf [232]byte; use(buf[:]); return Stackguard() }
+func stack236() (uintptr, uintptr) { var buf [236]byte; use(buf[:]); return Stackguard() }
+func stack240() (uintptr, uintptr) { var buf [240]byte; use(buf[:]); return Stackguard() }
+func stack244() (uintptr, uintptr) { var buf [244]byte; use(buf[:]); return Stackguard() }
+func stack248() (uintptr, uintptr) { var buf [248]byte; use(buf[:]); return Stackguard() }
+func stack252() (uintptr, uintptr) { var buf [252]byte; use(buf[:]); return Stackguard() }
+func stack256() (uintptr, uintptr) { var buf [256]byte; use(buf[:]); return Stackguard() }
+func stack260() (uintptr, uintptr) { var buf [260]byte; use(buf[:]); return Stackguard() }
+func stack264() (uintptr, uintptr) { var buf [264]byte; use(buf[:]); return Stackguard() }
+func stack268() (uintptr, uintptr) { var buf [268]byte; use(buf[:]); return Stackguard() }
+func stack272() (uintptr, uintptr) { var buf [272]byte; use(buf[:]); return Stackguard() }
+func stack276() (uintptr, uintptr) { var buf [276]byte; use(buf[:]); return Stackguard() }
+func stack280() (uintptr, uintptr) { var buf [280]byte; use(buf[:]); return Stackguard() }
+func stack284() (uintptr, uintptr) { var buf [284]byte; use(buf[:]); return Stackguard() }
+func stack288() (uintptr, uintptr) { var buf [288]byte; use(buf[:]); return Stackguard() }
+func stack292() (uintptr, uintptr) { var buf [292]byte; use(buf[:]); return Stackguard() }
+func stack296() (uintptr, uintptr) { var buf [296]byte; use(buf[:]); return Stackguard() }
+func stack300() (uintptr, uintptr) { var buf [300]byte; use(buf[:]); return Stackguard() }
+func stack304() (uintptr, uintptr) { var buf [304]byte; use(buf[:]); return Stackguard() }
+func stack308() (uintptr, uintptr) { var buf [308]byte; use(buf[:]); return Stackguard() }
+func stack312() (uintptr, uintptr) { var buf [312]byte; use(buf[:]); return Stackguard() }
+func stack316() (uintptr, uintptr) { var buf [316]byte; use(buf[:]); return Stackguard() }
+func stack320() (uintptr, uintptr) { var buf [320]byte; use(buf[:]); return Stackguard() }
+func stack324() (uintptr, uintptr) { var buf [324]byte; use(buf[:]); return Stackguard() }
+func stack328() (uintptr, uintptr) { var buf [328]byte; use(buf[:]); return Stackguard() }
+func stack332() (uintptr, uintptr) { var buf [332]byte; use(buf[:]); return Stackguard() }
+func stack336() (uintptr, uintptr) { var buf [336]byte; use(buf[:]); return Stackguard() }
+func stack340() (uintptr, uintptr) { var buf [340]byte; use(buf[:]); return Stackguard() }
+func stack344() (uintptr, uintptr) { var buf [344]byte; use(buf[:]); return Stackguard() }
+func stack348() (uintptr, uintptr) { var buf [348]byte; use(buf[:]); return Stackguard() }
+func stack352() (uintptr, uintptr) { var buf [352]byte; use(buf[:]); return Stackguard() }
+func stack356() (uintptr, uintptr) { var buf [356]byte; use(buf[:]); return Stackguard() }
+func stack360() (uintptr, uintptr) { var buf [360]byte; use(buf[:]); return Stackguard() }
+func stack364() (uintptr, uintptr) { var buf [364]byte; use(buf[:]); return Stackguard() }
+func stack368() (uintptr, uintptr) { var buf [368]byte; use(buf[:]); return Stackguard() }
+func stack372() (uintptr, uintptr) { var buf [372]byte; use(buf[:]); return Stackguard() }
+func stack376() (uintptr, uintptr) { var buf [376]byte; use(buf[:]); return Stackguard() }
+func stack380() (uintptr, uintptr) { var buf [380]byte; use(buf[:]); return Stackguard() }
+func stack384() (uintptr, uintptr) { var buf [384]byte; use(buf[:]); return Stackguard() }
+func stack388() (uintptr, uintptr) { var buf [388]byte; use(buf[:]); return Stackguard() }
+func stack392() (uintptr, uintptr) { var buf [392]byte; use(buf[:]); return Stackguard() }
+func stack396() (uintptr, uintptr) { var buf [396]byte; use(buf[:]); return Stackguard() }
+func stack400() (uintptr, uintptr) { var buf [400]byte; use(buf[:]); return Stackguard() }
+func stack404() (uintptr, uintptr) { var buf [404]byte; use(buf[:]); return Stackguard() }
+func stack408() (uintptr, uintptr) { var buf [408]byte; use(buf[:]); return Stackguard() }
+func stack412() (uintptr, uintptr) { var buf [412]byte; use(buf[:]); return Stackguard() }
+func stack416() (uintptr, uintptr) { var buf [416]byte; use(buf[:]); return Stackguard() }
+func stack420() (uintptr, uintptr) { var buf [420]byte; use(buf[:]); return Stackguard() }
+func stack424() (uintptr, uintptr) { var buf [424]byte; use(buf[:]); return Stackguard() }
+func stack428() (uintptr, uintptr) { var buf [428]byte; use(buf[:]); return Stackguard() }
+func stack432() (uintptr, uintptr) { var buf [432]byte; use(buf[:]); return Stackguard() }
+func stack436() (uintptr, uintptr) { var buf [436]byte; use(buf[:]); return Stackguard() }
+func stack440() (uintptr, uintptr) { var buf [440]byte; use(buf[:]); return Stackguard() }
+func stack444() (uintptr, uintptr) { var buf [444]byte; use(buf[:]); return Stackguard() }
+func stack448() (uintptr, uintptr) { var buf [448]byte; use(buf[:]); return Stackguard() }
+func stack452() (uintptr, uintptr) { var buf [452]byte; use(buf[:]); return Stackguard() }
+func stack456() (uintptr, uintptr) { var buf [456]byte; use(buf[:]); return Stackguard() }
+func stack460() (uintptr, uintptr) { var buf [460]byte; use(buf[:]); return Stackguard() }
+func stack464() (uintptr, uintptr) { var buf [464]byte; use(buf[:]); return Stackguard() }
+func stack468() (uintptr, uintptr) { var buf [468]byte; use(buf[:]); return Stackguard() }
+func stack472() (uintptr, uintptr) { var buf [472]byte; use(buf[:]); return Stackguard() }
+func stack476() (uintptr, uintptr) { var buf [476]byte; use(buf[:]); return Stackguard() }
+func stack480() (uintptr, uintptr) { var buf [480]byte; use(buf[:]); return Stackguard() }
+func stack484() (uintptr, uintptr) { var buf [484]byte; use(buf[:]); return Stackguard() }
+func stack488() (uintptr, uintptr) { var buf [488]byte; use(buf[:]); return Stackguard() }
+func stack492() (uintptr, uintptr) { var buf [492]byte; use(buf[:]); return Stackguard() }
+func stack496() (uintptr, uintptr) { var buf [496]byte; use(buf[:]); return Stackguard() }
+func stack500() (uintptr, uintptr) { var buf [500]byte; use(buf[:]); return Stackguard() }
+func stack504() (uintptr, uintptr) { var buf [504]byte; use(buf[:]); return Stackguard() }
+func stack508() (uintptr, uintptr) { var buf [508]byte; use(buf[:]); return Stackguard() }
+func stack512() (uintptr, uintptr) { var buf [512]byte; use(buf[:]); return Stackguard() }
+func stack516() (uintptr, uintptr) { var buf [516]byte; use(buf[:]); return Stackguard() }
+func stack520() (uintptr, uintptr) { var buf [520]byte; use(buf[:]); return Stackguard() }
+func stack524() (uintptr, uintptr) { var buf [524]byte; use(buf[:]); return Stackguard() }
+func stack528() (uintptr, uintptr) { var buf [528]byte; use(buf[:]); return Stackguard() }
+func stack532() (uintptr, uintptr) { var buf [532]byte; use(buf[:]); return Stackguard() }
+func stack536() (uintptr, uintptr) { var buf [536]byte; use(buf[:]); return Stackguard() }
+func stack540() (uintptr, uintptr) { var buf [540]byte; use(buf[:]); return Stackguard() }
+func stack544() (uintptr, uintptr) { var buf [544]byte; use(buf[:]); return Stackguard() }
+func stack548() (uintptr, uintptr) { var buf [548]byte; use(buf[:]); return Stackguard() }
+func stack552() (uintptr, uintptr) { var buf [552]byte; use(buf[:]); return Stackguard() }
+func stack556() (uintptr, uintptr) { var buf [556]byte; use(buf[:]); return Stackguard() }
+func stack560() (uintptr, uintptr) { var buf [560]byte; use(buf[:]); return Stackguard() }
+func stack564() (uintptr, uintptr) { var buf [564]byte; use(buf[:]); return Stackguard() }
+func stack568() (uintptr, uintptr) { var buf [568]byte; use(buf[:]); return Stackguard() }
+func stack572() (uintptr, uintptr) { var buf [572]byte; use(buf[:]); return Stackguard() }
+func stack576() (uintptr, uintptr) { var buf [576]byte; use(buf[:]); return Stackguard() }
+func stack580() (uintptr, uintptr) { var buf [580]byte; use(buf[:]); return Stackguard() }
+func stack584() (uintptr, uintptr) { var buf [584]byte; use(buf[:]); return Stackguard() }
+func stack588() (uintptr, uintptr) { var buf [588]byte; use(buf[:]); return Stackguard() }
+func stack592() (uintptr, uintptr) { var buf [592]byte; use(buf[:]); return Stackguard() }
+func stack596() (uintptr, uintptr) { var buf [596]byte; use(buf[:]); return Stackguard() }
+func stack600() (uintptr, uintptr) { var buf [600]byte; use(buf[:]); return Stackguard() }
+func stack604() (uintptr, uintptr) { var buf [604]byte; use(buf[:]); return Stackguard() }
+func stack608() (uintptr, uintptr) { var buf [608]byte; use(buf[:]); return Stackguard() }
+func stack612() (uintptr, uintptr) { var buf [612]byte; use(buf[:]); return Stackguard() }
+func stack616() (uintptr, uintptr) { var buf [616]byte; use(buf[:]); return Stackguard() }
+func stack620() (uintptr, uintptr) { var buf [620]byte; use(buf[:]); return Stackguard() }
+func stack624() (uintptr, uintptr) { var buf [624]byte; use(buf[:]); return Stackguard() }
+func stack628() (uintptr, uintptr) { var buf [628]byte; use(buf[:]); return Stackguard() }
+func stack632() (uintptr, uintptr) { var buf [632]byte; use(buf[:]); return Stackguard() }
+func stack636() (uintptr, uintptr) { var buf [636]byte; use(buf[:]); return Stackguard() }
+func stack640() (uintptr, uintptr) { var buf [640]byte; use(buf[:]); return Stackguard() }
+func stack644() (uintptr, uintptr) { var buf [644]byte; use(buf[:]); return Stackguard() }
+func stack648() (uintptr, uintptr) { var buf [648]byte; use(buf[:]); return Stackguard() }
+func stack652() (uintptr, uintptr) { var buf [652]byte; use(buf[:]); return Stackguard() }
+func stack656() (uintptr, uintptr) { var buf [656]byte; use(buf[:]); return Stackguard() }
+func stack660() (uintptr, uintptr) { var buf [660]byte; use(buf[:]); return Stackguard() }
+func stack664() (uintptr, uintptr) { var buf [664]byte; use(buf[:]); return Stackguard() }
+func stack668() (uintptr, uintptr) { var buf [668]byte; use(buf[:]); return Stackguard() }
+func stack672() (uintptr, uintptr) { var buf [672]byte; use(buf[:]); return Stackguard() }
+func stack676() (uintptr, uintptr) { var buf [676]byte; use(buf[:]); return Stackguard() }
+func stack680() (uintptr, uintptr) { var buf [680]byte; use(buf[:]); return Stackguard() }
+func stack684() (uintptr, uintptr) { var buf [684]byte; use(buf[:]); return Stackguard() }
+func stack688() (uintptr, uintptr) { var buf [688]byte; use(buf[:]); return Stackguard() }
+func stack692() (uintptr, uintptr) { var buf [692]byte; use(buf[:]); return Stackguard() }
+func stack696() (uintptr, uintptr) { var buf [696]byte; use(buf[:]); return Stackguard() }
+func stack700() (uintptr, uintptr) { var buf [700]byte; use(buf[:]); return Stackguard() }
+func stack704() (uintptr, uintptr) { var buf [704]byte; use(buf[:]); return Stackguard() }
+func stack708() (uintptr, uintptr) { var buf [708]byte; use(buf[:]); return Stackguard() }
+func stack712() (uintptr, uintptr) { var buf [712]byte; use(buf[:]); return Stackguard() }
+func stack716() (uintptr, uintptr) { var buf [716]byte; use(buf[:]); return Stackguard() }
+func stack720() (uintptr, uintptr) { var buf [720]byte; use(buf[:]); return Stackguard() }
+func stack724() (uintptr, uintptr) { var buf [724]byte; use(buf[:]); return Stackguard() }
+func stack728() (uintptr, uintptr) { var buf [728]byte; use(buf[:]); return Stackguard() }
+func stack732() (uintptr, uintptr) { var buf [732]byte; use(buf[:]); return Stackguard() }
+func stack736() (uintptr, uintptr) { var buf [736]byte; use(buf[:]); return Stackguard() }
+func stack740() (uintptr, uintptr) { var buf [740]byte; use(buf[:]); return Stackguard() }
+func stack744() (uintptr, uintptr) { var buf [744]byte; use(buf[:]); return Stackguard() }
+func stack748() (uintptr, uintptr) { var buf [748]byte; use(buf[:]); return Stackguard() }
+func stack752() (uintptr, uintptr) { var buf [752]byte; use(buf[:]); return Stackguard() }
+func stack756() (uintptr, uintptr) { var buf [756]byte; use(buf[:]); return Stackguard() }
+func stack760() (uintptr, uintptr) { var buf [760]byte; use(buf[:]); return Stackguard() }
+func stack764() (uintptr, uintptr) { var buf [764]byte; use(buf[:]); return Stackguard() }
+func stack768() (uintptr, uintptr) { var buf [768]byte; use(buf[:]); return Stackguard() }
+func stack772() (uintptr, uintptr) { var buf [772]byte; use(buf[:]); return Stackguard() }
+func stack776() (uintptr, uintptr) { var buf [776]byte; use(buf[:]); return Stackguard() }
+func stack780() (uintptr, uintptr) { var buf [780]byte; use(buf[:]); return Stackguard() }
+func stack784() (uintptr, uintptr) { var buf [784]byte; use(buf[:]); return Stackguard() }
+func stack788() (uintptr, uintptr) { var buf [788]byte; use(buf[:]); return Stackguard() }
+func stack792() (uintptr, uintptr) { var buf [792]byte; use(buf[:]); return Stackguard() }
+func stack796() (uintptr, uintptr) { var buf [796]byte; use(buf[:]); return Stackguard() }
+func stack800() (uintptr, uintptr) { var buf [800]byte; use(buf[:]); return Stackguard() }
+func stack804() (uintptr, uintptr) { var buf [804]byte; use(buf[:]); return Stackguard() }
+func stack808() (uintptr, uintptr) { var buf [808]byte; use(buf[:]); return Stackguard() }
+func stack812() (uintptr, uintptr) { var buf [812]byte; use(buf[:]); return Stackguard() }
+func stack816() (uintptr, uintptr) { var buf [816]byte; use(buf[:]); return Stackguard() }
+func stack820() (uintptr, uintptr) { var buf [820]byte; use(buf[:]); return Stackguard() }
+func stack824() (uintptr, uintptr) { var buf [824]byte; use(buf[:]); return Stackguard() }
+func stack828() (uintptr, uintptr) { var buf [828]byte; use(buf[:]); return Stackguard() }
+func stack832() (uintptr, uintptr) { var buf [832]byte; use(buf[:]); return Stackguard() }
+func stack836() (uintptr, uintptr) { var buf [836]byte; use(buf[:]); return Stackguard() }
+func stack840() (uintptr, uintptr) { var buf [840]byte; use(buf[:]); return Stackguard() }
+func stack844() (uintptr, uintptr) { var buf [844]byte; use(buf[:]); return Stackguard() }
+func stack848() (uintptr, uintptr) { var buf [848]byte; use(buf[:]); return Stackguard() }
+func stack852() (uintptr, uintptr) { var buf [852]byte; use(buf[:]); return Stackguard() }
+func stack856() (uintptr, uintptr) { var buf [856]byte; use(buf[:]); return Stackguard() }
+func stack860() (uintptr, uintptr) { var buf [860]byte; use(buf[:]); return Stackguard() }
+func stack864() (uintptr, uintptr) { var buf [864]byte; use(buf[:]); return Stackguard() }
+func stack868() (uintptr, uintptr) { var buf [868]byte; use(buf[:]); return Stackguard() }
+func stack872() (uintptr, uintptr) { var buf [872]byte; use(buf[:]); return Stackguard() }
+func stack876() (uintptr, uintptr) { var buf [876]byte; use(buf[:]); return Stackguard() }
+func stack880() (uintptr, uintptr) { var buf [880]byte; use(buf[:]); return Stackguard() }
+func stack884() (uintptr, uintptr) { var buf [884]byte; use(buf[:]); return Stackguard() }
+func stack888() (uintptr, uintptr) { var buf [888]byte; use(buf[:]); return Stackguard() }
+func stack892() (uintptr, uintptr) { var buf [892]byte; use(buf[:]); return Stackguard() }
+func stack896() (uintptr, uintptr) { var buf [896]byte; use(buf[:]); return Stackguard() }
+func stack900() (uintptr, uintptr) { var buf [900]byte; use(buf[:]); return Stackguard() }
+func stack904() (uintptr, uintptr) { var buf [904]byte; use(buf[:]); return Stackguard() }
+func stack908() (uintptr, uintptr) { var buf [908]byte; use(buf[:]); return Stackguard() }
+func stack912() (uintptr, uintptr) { var buf [912]byte; use(buf[:]); return Stackguard() }
+func stack916() (uintptr, uintptr) { var buf [916]byte; use(buf[:]); return Stackguard() }
+func stack920() (uintptr, uintptr) { var buf [920]byte; use(buf[:]); return Stackguard() }
+func stack924() (uintptr, uintptr) { var buf [924]byte; use(buf[:]); return Stackguard() }
+func stack928() (uintptr, uintptr) { var buf [928]byte; use(buf[:]); return Stackguard() }
+func stack932() (uintptr, uintptr) { var buf [932]byte; use(buf[:]); return Stackguard() }
+func stack936() (uintptr, uintptr) { var buf [936]byte; use(buf[:]); return Stackguard() }
+func stack940() (uintptr, uintptr) { var buf [940]byte; use(buf[:]); return Stackguard() }
+func stack944() (uintptr, uintptr) { var buf [944]byte; use(buf[:]); return Stackguard() }
+func stack948() (uintptr, uintptr) { var buf [948]byte; use(buf[:]); return Stackguard() }
+func stack952() (uintptr, uintptr) { var buf [952]byte; use(buf[:]); return Stackguard() }
+func stack956() (uintptr, uintptr) { var buf [956]byte; use(buf[:]); return Stackguard() }
+func stack960() (uintptr, uintptr) { var buf [960]byte; use(buf[:]); return Stackguard() }
+func stack964() (uintptr, uintptr) { var buf [964]byte; use(buf[:]); return Stackguard() }
+func stack968() (uintptr, uintptr) { var buf [968]byte; use(buf[:]); return Stackguard() }
+func stack972() (uintptr, uintptr) { var buf [972]byte; use(buf[:]); return Stackguard() }
+func stack976() (uintptr, uintptr) { var buf [976]byte; use(buf[:]); return Stackguard() }
+func stack980() (uintptr, uintptr) { var buf [980]byte; use(buf[:]); return Stackguard() }
+func stack984() (uintptr, uintptr) { var buf [984]byte; use(buf[:]); return Stackguard() }
+func stack988() (uintptr, uintptr) { var buf [988]byte; use(buf[:]); return Stackguard() }
+func stack992() (uintptr, uintptr) { var buf [992]byte; use(buf[:]); return Stackguard() }
+func stack996() (uintptr, uintptr) { var buf [996]byte; use(buf[:]); return Stackguard() }
+func stack1000() (uintptr, uintptr) { var buf [1000]byte; use(buf[:]); return Stackguard() }
+func stack1004() (uintptr, uintptr) { var buf [1004]byte; use(buf[:]); return Stackguard() }
+func stack1008() (uintptr, uintptr) { var buf [1008]byte; use(buf[:]); return Stackguard() }
+func stack1012() (uintptr, uintptr) { var buf [1012]byte; use(buf[:]); return Stackguard() }
+func stack1016() (uintptr, uintptr) { var buf [1016]byte; use(buf[:]); return Stackguard() }
+func stack1020() (uintptr, uintptr) { var buf [1020]byte; use(buf[:]); return Stackguard() }
+func stack1024() (uintptr, uintptr) { var buf [1024]byte; use(buf[:]); return Stackguard() }
+func stack1028() (uintptr, uintptr) { var buf [1028]byte; use(buf[:]); return Stackguard() }
+func stack1032() (uintptr, uintptr) { var buf [1032]byte; use(buf[:]); return Stackguard() }
+func stack1036() (uintptr, uintptr) { var buf [1036]byte; use(buf[:]); return Stackguard() }
+func stack1040() (uintptr, uintptr) { var buf [1040]byte; use(buf[:]); return Stackguard() }
+func stack1044() (uintptr, uintptr) { var buf [1044]byte; use(buf[:]); return Stackguard() }
+func stack1048() (uintptr, uintptr) { var buf [1048]byte; use(buf[:]); return Stackguard() }
+func stack1052() (uintptr, uintptr) { var buf [1052]byte; use(buf[:]); return Stackguard() }
+func stack1056() (uintptr, uintptr) { var buf [1056]byte; use(buf[:]); return Stackguard() }
+func stack1060() (uintptr, uintptr) { var buf [1060]byte; use(buf[:]); return Stackguard() }
+func stack1064() (uintptr, uintptr) { var buf [1064]byte; use(buf[:]); return Stackguard() }
+func stack1068() (uintptr, uintptr) { var buf [1068]byte; use(buf[:]); return Stackguard() }
+func stack1072() (uintptr, uintptr) { var buf [1072]byte; use(buf[:]); return Stackguard() }
+func stack1076() (uintptr, uintptr) { var buf [1076]byte; use(buf[:]); return Stackguard() }
+func stack1080() (uintptr, uintptr) { var buf [1080]byte; use(buf[:]); return Stackguard() }
+func stack1084() (uintptr, uintptr) { var buf [1084]byte; use(buf[:]); return Stackguard() }
+func stack1088() (uintptr, uintptr) { var buf [1088]byte; use(buf[:]); return Stackguard() }
+func stack1092() (uintptr, uintptr) { var buf [1092]byte; use(buf[:]); return Stackguard() }
+func stack1096() (uintptr, uintptr) { var buf [1096]byte; use(buf[:]); return Stackguard() }
+func stack1100() (uintptr, uintptr) { var buf [1100]byte; use(buf[:]); return Stackguard() }
+func stack1104() (uintptr, uintptr) { var buf [1104]byte; use(buf[:]); return Stackguard() }
+func stack1108() (uintptr, uintptr) { var buf [1108]byte; use(buf[:]); return Stackguard() }
+func stack1112() (uintptr, uintptr) { var buf [1112]byte; use(buf[:]); return Stackguard() }
+func stack1116() (uintptr, uintptr) { var buf [1116]byte; use(buf[:]); return Stackguard() }
+func stack1120() (uintptr, uintptr) { var buf [1120]byte; use(buf[:]); return Stackguard() }
+func stack1124() (uintptr, uintptr) { var buf [1124]byte; use(buf[:]); return Stackguard() }
+func stack1128() (uintptr, uintptr) { var buf [1128]byte; use(buf[:]); return Stackguard() }
+func stack1132() (uintptr, uintptr) { var buf [1132]byte; use(buf[:]); return Stackguard() }
+func stack1136() (uintptr, uintptr) { var buf [1136]byte; use(buf[:]); return Stackguard() }
+func stack1140() (uintptr, uintptr) { var buf [1140]byte; use(buf[:]); return Stackguard() }
+func stack1144() (uintptr, uintptr) { var buf [1144]byte; use(buf[:]); return Stackguard() }
+func stack1148() (uintptr, uintptr) { var buf [1148]byte; use(buf[:]); return Stackguard() }
+func stack1152() (uintptr, uintptr) { var buf [1152]byte; use(buf[:]); return Stackguard() }
+func stack1156() (uintptr, uintptr) { var buf [1156]byte; use(buf[:]); return Stackguard() }
+func stack1160() (uintptr, uintptr) { var buf [1160]byte; use(buf[:]); return Stackguard() }
+func stack1164() (uintptr, uintptr) { var buf [1164]byte; use(buf[:]); return Stackguard() }
+func stack1168() (uintptr, uintptr) { var buf [1168]byte; use(buf[:]); return Stackguard() }
+func stack1172() (uintptr, uintptr) { var buf [1172]byte; use(buf[:]); return Stackguard() }
+func stack1176() (uintptr, uintptr) { var buf [1176]byte; use(buf[:]); return Stackguard() }
+func stack1180() (uintptr, uintptr) { var buf [1180]byte; use(buf[:]); return Stackguard() }
+func stack1184() (uintptr, uintptr) { var buf [1184]byte; use(buf[:]); return Stackguard() }
+func stack1188() (uintptr, uintptr) { var buf [1188]byte; use(buf[:]); return Stackguard() }
+func stack1192() (uintptr, uintptr) { var buf [1192]byte; use(buf[:]); return Stackguard() }
+func stack1196() (uintptr, uintptr) { var buf [1196]byte; use(buf[:]); return Stackguard() }
+func stack1200() (uintptr, uintptr) { var buf [1200]byte; use(buf[:]); return Stackguard() }
+func stack1204() (uintptr, uintptr) { var buf [1204]byte; use(buf[:]); return Stackguard() }
+func stack1208() (uintptr, uintptr) { var buf [1208]byte; use(buf[:]); return Stackguard() }
+func stack1212() (uintptr, uintptr) { var buf [1212]byte; use(buf[:]); return Stackguard() }
+func stack1216() (uintptr, uintptr) { var buf [1216]byte; use(buf[:]); return Stackguard() }
+func stack1220() (uintptr, uintptr) { var buf [1220]byte; use(buf[:]); return Stackguard() }
+func stack1224() (uintptr, uintptr) { var buf [1224]byte; use(buf[:]); return Stackguard() }
+func stack1228() (uintptr, uintptr) { var buf [1228]byte; use(buf[:]); return Stackguard() }
+func stack1232() (uintptr, uintptr) { var buf [1232]byte; use(buf[:]); return Stackguard() }
+func stack1236() (uintptr, uintptr) { var buf [1236]byte; use(buf[:]); return Stackguard() }
+func stack1240() (uintptr, uintptr) { var buf [1240]byte; use(buf[:]); return Stackguard() }
+func stack1244() (uintptr, uintptr) { var buf [1244]byte; use(buf[:]); return Stackguard() }
+func stack1248() (uintptr, uintptr) { var buf [1248]byte; use(buf[:]); return Stackguard() }
+func stack1252() (uintptr, uintptr) { var buf [1252]byte; use(buf[:]); return Stackguard() }
+func stack1256() (uintptr, uintptr) { var buf [1256]byte; use(buf[:]); return Stackguard() }
+func stack1260() (uintptr, uintptr) { var buf [1260]byte; use(buf[:]); return Stackguard() }
+func stack1264() (uintptr, uintptr) { var buf [1264]byte; use(buf[:]); return Stackguard() }
+func stack1268() (uintptr, uintptr) { var buf [1268]byte; use(buf[:]); return Stackguard() }
+func stack1272() (uintptr, uintptr) { var buf [1272]byte; use(buf[:]); return Stackguard() }
+func stack1276() (uintptr, uintptr) { var buf [1276]byte; use(buf[:]); return Stackguard() }
+func stack1280() (uintptr, uintptr) { var buf [1280]byte; use(buf[:]); return Stackguard() }
+func stack1284() (uintptr, uintptr) { var buf [1284]byte; use(buf[:]); return Stackguard() }
+func stack1288() (uintptr, uintptr) { var buf [1288]byte; use(buf[:]); return Stackguard() }
+func stack1292() (uintptr, uintptr) { var buf [1292]byte; use(buf[:]); return Stackguard() }
+func stack1296() (uintptr, uintptr) { var buf [1296]byte; use(buf[:]); return Stackguard() }
+func stack1300() (uintptr, uintptr) { var buf [1300]byte; use(buf[:]); return Stackguard() }
+func stack1304() (uintptr, uintptr) { var buf [1304]byte; use(buf[:]); return Stackguard() }
+func stack1308() (uintptr, uintptr) { var buf [1308]byte; use(buf[:]); return Stackguard() }
+func stack1312() (uintptr, uintptr) { var buf [1312]byte; use(buf[:]); return Stackguard() }
+func stack1316() (uintptr, uintptr) { var buf [1316]byte; use(buf[:]); return Stackguard() }
+func stack1320() (uintptr, uintptr) { var buf [1320]byte; use(buf[:]); return Stackguard() }
+func stack1324() (uintptr, uintptr) { var buf [1324]byte; use(buf[:]); return Stackguard() }
+func stack1328() (uintptr, uintptr) { var buf [1328]byte; use(buf[:]); return Stackguard() }
+func stack1332() (uintptr, uintptr) { var buf [1332]byte; use(buf[:]); return Stackguard() }
+func stack1336() (uintptr, uintptr) { var buf [1336]byte; use(buf[:]); return Stackguard() }
+func stack1340() (uintptr, uintptr) { var buf [1340]byte; use(buf[:]); return Stackguard() }
+func stack1344() (uintptr, uintptr) { var buf [1344]byte; use(buf[:]); return Stackguard() }
+func stack1348() (uintptr, uintptr) { var buf [1348]byte; use(buf[:]); return Stackguard() }
+func stack1352() (uintptr, uintptr) { var buf [1352]byte; use(buf[:]); return Stackguard() }
+func stack1356() (uintptr, uintptr) { var buf [1356]byte; use(buf[:]); return Stackguard() }
+func stack1360() (uintptr, uintptr) { var buf [1360]byte; use(buf[:]); return Stackguard() }
+func stack1364() (uintptr, uintptr) { var buf [1364]byte; use(buf[:]); return Stackguard() }
+func stack1368() (uintptr, uintptr) { var buf [1368]byte; use(buf[:]); return Stackguard() }
+func stack1372() (uintptr, uintptr) { var buf [1372]byte; use(buf[:]); return Stackguard() }
+func stack1376() (uintptr, uintptr) { var buf [1376]byte; use(buf[:]); return Stackguard() }
+func stack1380() (uintptr, uintptr) { var buf [1380]byte; use(buf[:]); return Stackguard() }
+func stack1384() (uintptr, uintptr) { var buf [1384]byte; use(buf[:]); return Stackguard() }
+func stack1388() (uintptr, uintptr) { var buf [1388]byte; use(buf[:]); return Stackguard() }
+func stack1392() (uintptr, uintptr) { var buf [1392]byte; use(buf[:]); return Stackguard() }
+func stack1396() (uintptr, uintptr) { var buf [1396]byte; use(buf[:]); return Stackguard() }
+func stack1400() (uintptr, uintptr) { var buf [1400]byte; use(buf[:]); return Stackguard() }
+func stack1404() (uintptr, uintptr) { var buf [1404]byte; use(buf[:]); return Stackguard() }
+func stack1408() (uintptr, uintptr) { var buf [1408]byte; use(buf[:]); return Stackguard() }
+func stack1412() (uintptr, uintptr) { var buf [1412]byte; use(buf[:]); return Stackguard() }
+func stack1416() (uintptr, uintptr) { var buf [1416]byte; use(buf[:]); return Stackguard() }
+func stack1420() (uintptr, uintptr) { var buf [1420]byte; use(buf[:]); return Stackguard() }
+func stack1424() (uintptr, uintptr) { var buf [1424]byte; use(buf[:]); return Stackguard() }
+func stack1428() (uintptr, uintptr) { var buf [1428]byte; use(buf[:]); return Stackguard() }
+func stack1432() (uintptr, uintptr) { var buf [1432]byte; use(buf[:]); return Stackguard() }
+func stack1436() (uintptr, uintptr) { var buf [1436]byte; use(buf[:]); return Stackguard() }
+func stack1440() (uintptr, uintptr) { var buf [1440]byte; use(buf[:]); return Stackguard() }
+func stack1444() (uintptr, uintptr) { var buf [1444]byte; use(buf[:]); return Stackguard() }
+func stack1448() (uintptr, uintptr) { var buf [1448]byte; use(buf[:]); return Stackguard() }
+func stack1452() (uintptr, uintptr) { var buf [1452]byte; use(buf[:]); return Stackguard() }
+func stack1456() (uintptr, uintptr) { var buf [1456]byte; use(buf[:]); return Stackguard() }
+func stack1460() (uintptr, uintptr) { var buf [1460]byte; use(buf[:]); return Stackguard() }
+func stack1464() (uintptr, uintptr) { var buf [1464]byte; use(buf[:]); return Stackguard() }
+func stack1468() (uintptr, uintptr) { var buf [1468]byte; use(buf[:]); return Stackguard() }
+func stack1472() (uintptr, uintptr) { var buf [1472]byte; use(buf[:]); return Stackguard() }
+func stack1476() (uintptr, uintptr) { var buf [1476]byte; use(buf[:]); return Stackguard() }
+func stack1480() (uintptr, uintptr) { var buf [1480]byte; use(buf[:]); return Stackguard() }
+func stack1484() (uintptr, uintptr) { var buf [1484]byte; use(buf[:]); return Stackguard() }
+func stack1488() (uintptr, uintptr) { var buf [1488]byte; use(buf[:]); return Stackguard() }
+func stack1492() (uintptr, uintptr) { var buf [1492]byte; use(buf[:]); return Stackguard() }
+func stack1496() (uintptr, uintptr) { var buf [1496]byte; use(buf[:]); return Stackguard() }
+func stack1500() (uintptr, uintptr) { var buf [1500]byte; use(buf[:]); return Stackguard() }
+func stack1504() (uintptr, uintptr) { var buf [1504]byte; use(buf[:]); return Stackguard() }
+func stack1508() (uintptr, uintptr) { var buf [1508]byte; use(buf[:]); return Stackguard() }
+func stack1512() (uintptr, uintptr) { var buf [1512]byte; use(buf[:]); return Stackguard() }
+func stack1516() (uintptr, uintptr) { var buf [1516]byte; use(buf[:]); return Stackguard() }
+func stack1520() (uintptr, uintptr) { var buf [1520]byte; use(buf[:]); return Stackguard() }
+func stack1524() (uintptr, uintptr) { var buf [1524]byte; use(buf[:]); return Stackguard() }
+func stack1528() (uintptr, uintptr) { var buf [1528]byte; use(buf[:]); return Stackguard() }
+func stack1532() (uintptr, uintptr) { var buf [1532]byte; use(buf[:]); return Stackguard() }
+func stack1536() (uintptr, uintptr) { var buf [1536]byte; use(buf[:]); return Stackguard() }
+func stack1540() (uintptr, uintptr) { var buf [1540]byte; use(buf[:]); return Stackguard() }
+func stack1544() (uintptr, uintptr) { var buf [1544]byte; use(buf[:]); return Stackguard() }
+func stack1548() (uintptr, uintptr) { var buf [1548]byte; use(buf[:]); return Stackguard() }
+func stack1552() (uintptr, uintptr) { var buf [1552]byte; use(buf[:]); return Stackguard() }
+func stack1556() (uintptr, uintptr) { var buf [1556]byte; use(buf[:]); return Stackguard() }
+func stack1560() (uintptr, uintptr) { var buf [1560]byte; use(buf[:]); return Stackguard() }
+func stack1564() (uintptr, uintptr) { var buf [1564]byte; use(buf[:]); return Stackguard() }
+func stack1568() (uintptr, uintptr) { var buf [1568]byte; use(buf[:]); return Stackguard() }
+func stack1572() (uintptr, uintptr) { var buf [1572]byte; use(buf[:]); return Stackguard() }
+func stack1576() (uintptr, uintptr) { var buf [1576]byte; use(buf[:]); return Stackguard() }
+func stack1580() (uintptr, uintptr) { var buf [1580]byte; use(buf[:]); return Stackguard() }
+func stack1584() (uintptr, uintptr) { var buf [1584]byte; use(buf[:]); return Stackguard() }
+func stack1588() (uintptr, uintptr) { var buf [1588]byte; use(buf[:]); return Stackguard() }
+func stack1592() (uintptr, uintptr) { var buf [1592]byte; use(buf[:]); return Stackguard() }
+func stack1596() (uintptr, uintptr) { var buf [1596]byte; use(buf[:]); return Stackguard() }
+func stack1600() (uintptr, uintptr) { var buf [1600]byte; use(buf[:]); return Stackguard() }
+func stack1604() (uintptr, uintptr) { var buf [1604]byte; use(buf[:]); return Stackguard() }
+func stack1608() (uintptr, uintptr) { var buf [1608]byte; use(buf[:]); return Stackguard() }
+func stack1612() (uintptr, uintptr) { var buf [1612]byte; use(buf[:]); return Stackguard() }
+func stack1616() (uintptr, uintptr) { var buf [1616]byte; use(buf[:]); return Stackguard() }
+func stack1620() (uintptr, uintptr) { var buf [1620]byte; use(buf[:]); return Stackguard() }
+func stack1624() (uintptr, uintptr) { var buf [1624]byte; use(buf[:]); return Stackguard() }
+func stack1628() (uintptr, uintptr) { var buf [1628]byte; use(buf[:]); return Stackguard() }
+func stack1632() (uintptr, uintptr) { var buf [1632]byte; use(buf[:]); return Stackguard() }
+func stack1636() (uintptr, uintptr) { var buf [1636]byte; use(buf[:]); return Stackguard() }
+func stack1640() (uintptr, uintptr) { var buf [1640]byte; use(buf[:]); return Stackguard() }
+func stack1644() (uintptr, uintptr) { var buf [1644]byte; use(buf[:]); return Stackguard() }
+func stack1648() (uintptr, uintptr) { var buf [1648]byte; use(buf[:]); return Stackguard() }
+func stack1652() (uintptr, uintptr) { var buf [1652]byte; use(buf[:]); return Stackguard() }
+func stack1656() (uintptr, uintptr) { var buf [1656]byte; use(buf[:]); return Stackguard() }
+func stack1660() (uintptr, uintptr) { var buf [1660]byte; use(buf[:]); return Stackguard() }
+func stack1664() (uintptr, uintptr) { var buf [1664]byte; use(buf[:]); return Stackguard() }
+func stack1668() (uintptr, uintptr) { var buf [1668]byte; use(buf[:]); return Stackguard() }
+func stack1672() (uintptr, uintptr) { var buf [1672]byte; use(buf[:]); return Stackguard() }
+func stack1676() (uintptr, uintptr) { var buf [1676]byte; use(buf[:]); return Stackguard() }
+func stack1680() (uintptr, uintptr) { var buf [1680]byte; use(buf[:]); return Stackguard() }
+func stack1684() (uintptr, uintptr) { var buf [1684]byte; use(buf[:]); return Stackguard() }
+func stack1688() (uintptr, uintptr) { var buf [1688]byte; use(buf[:]); return Stackguard() }
+func stack1692() (uintptr, uintptr) { var buf [1692]byte; use(buf[:]); return Stackguard() }
+func stack1696() (uintptr, uintptr) { var buf [1696]byte; use(buf[:]); return Stackguard() }
+func stack1700() (uintptr, uintptr) { var buf [1700]byte; use(buf[:]); return Stackguard() }
+func stack1704() (uintptr, uintptr) { var buf [1704]byte; use(buf[:]); return Stackguard() }
+func stack1708() (uintptr, uintptr) { var buf [1708]byte; use(buf[:]); return Stackguard() }
+func stack1712() (uintptr, uintptr) { var buf [1712]byte; use(buf[:]); return Stackguard() }
+func stack1716() (uintptr, uintptr) { var buf [1716]byte; use(buf[:]); return Stackguard() }
+func stack1720() (uintptr, uintptr) { var buf [1720]byte; use(buf[:]); return Stackguard() }
+func stack1724() (uintptr, uintptr) { var buf [1724]byte; use(buf[:]); return Stackguard() }
+func stack1728() (uintptr, uintptr) { var buf [1728]byte; use(buf[:]); return Stackguard() }
+func stack1732() (uintptr, uintptr) { var buf [1732]byte; use(buf[:]); return Stackguard() }
+func stack1736() (uintptr, uintptr) { var buf [1736]byte; use(buf[:]); return Stackguard() }
+func stack1740() (uintptr, uintptr) { var buf [1740]byte; use(buf[:]); return Stackguard() }
+func stack1744() (uintptr, uintptr) { var buf [1744]byte; use(buf[:]); return Stackguard() }
+func stack1748() (uintptr, uintptr) { var buf [1748]byte; use(buf[:]); return Stackguard() }
+func stack1752() (uintptr, uintptr) { var buf [1752]byte; use(buf[:]); return Stackguard() }
+func stack1756() (uintptr, uintptr) { var buf [1756]byte; use(buf[:]); return Stackguard() }
+func stack1760() (uintptr, uintptr) { var buf [1760]byte; use(buf[:]); return Stackguard() }
+func stack1764() (uintptr, uintptr) { var buf [1764]byte; use(buf[:]); return Stackguard() }
+func stack1768() (uintptr, uintptr) { var buf [1768]byte; use(buf[:]); return Stackguard() }
+func stack1772() (uintptr, uintptr) { var buf [1772]byte; use(buf[:]); return Stackguard() }
+func stack1776() (uintptr, uintptr) { var buf [1776]byte; use(buf[:]); return Stackguard() }
+func stack1780() (uintptr, uintptr) { var buf [1780]byte; use(buf[:]); return Stackguard() }
+func stack1784() (uintptr, uintptr) { var buf [1784]byte; use(buf[:]); return Stackguard() }
+func stack1788() (uintptr, uintptr) { var buf [1788]byte; use(buf[:]); return Stackguard() }
+func stack1792() (uintptr, uintptr) { var buf [1792]byte; use(buf[:]); return Stackguard() }
+func stack1796() (uintptr, uintptr) { var buf [1796]byte; use(buf[:]); return Stackguard() }
+func stack1800() (uintptr, uintptr) { var buf [1800]byte; use(buf[:]); return Stackguard() }
+func stack1804() (uintptr, uintptr) { var buf [1804]byte; use(buf[:]); return Stackguard() }
+func stack1808() (uintptr, uintptr) { var buf [1808]byte; use(buf[:]); return Stackguard() }
+func stack1812() (uintptr, uintptr) { var buf [1812]byte; use(buf[:]); return Stackguard() }
+func stack1816() (uintptr, uintptr) { var buf [1816]byte; use(buf[:]); return Stackguard() }
+func stack1820() (uintptr, uintptr) { var buf [1820]byte; use(buf[:]); return Stackguard() }
+func stack1824() (uintptr, uintptr) { var buf [1824]byte; use(buf[:]); return Stackguard() }
+func stack1828() (uintptr, uintptr) { var buf [1828]byte; use(buf[:]); return Stackguard() }
+func stack1832() (uintptr, uintptr) { var buf [1832]byte; use(buf[:]); return Stackguard() }
+func stack1836() (uintptr, uintptr) { var buf [1836]byte; use(buf[:]); return Stackguard() }
+func stack1840() (uintptr, uintptr) { var buf [1840]byte; use(buf[:]); return Stackguard() }
+func stack1844() (uintptr, uintptr) { var buf [1844]byte; use(buf[:]); return Stackguard() }
+func stack1848() (uintptr, uintptr) { var buf [1848]byte; use(buf[:]); return Stackguard() }
+func stack1852() (uintptr, uintptr) { var buf [1852]byte; use(buf[:]); return Stackguard() }
+func stack1856() (uintptr, uintptr) { var buf [1856]byte; use(buf[:]); return Stackguard() }
+func stack1860() (uintptr, uintptr) { var buf [1860]byte; use(buf[:]); return Stackguard() }
+func stack1864() (uintptr, uintptr) { var buf [1864]byte; use(buf[:]); return Stackguard() }
+func stack1868() (uintptr, uintptr) { var buf [1868]byte; use(buf[:]); return Stackguard() }
+func stack1872() (uintptr, uintptr) { var buf [1872]byte; use(buf[:]); return Stackguard() }
+func stack1876() (uintptr, uintptr) { var buf [1876]byte; use(buf[:]); return Stackguard() }
+func stack1880() (uintptr, uintptr) { var buf [1880]byte; use(buf[:]); return Stackguard() }
+func stack1884() (uintptr, uintptr) { var buf [1884]byte; use(buf[:]); return Stackguard() }
+func stack1888() (uintptr, uintptr) { var buf [1888]byte; use(buf[:]); return Stackguard() }
+func stack1892() (uintptr, uintptr) { var buf [1892]byte; use(buf[:]); return Stackguard() }
+func stack1896() (uintptr, uintptr) { var buf [1896]byte; use(buf[:]); return Stackguard() }
+func stack1900() (uintptr, uintptr) { var buf [1900]byte; use(buf[:]); return Stackguard() }
+func stack1904() (uintptr, uintptr) { var buf [1904]byte; use(buf[:]); return Stackguard() }
+func stack1908() (uintptr, uintptr) { var buf [1908]byte; use(buf[:]); return Stackguard() }
+func stack1912() (uintptr, uintptr) { var buf [1912]byte; use(buf[:]); return Stackguard() }
+func stack1916() (uintptr, uintptr) { var buf [1916]byte; use(buf[:]); return Stackguard() }
+func stack1920() (uintptr, uintptr) { var buf [1920]byte; use(buf[:]); return Stackguard() }
+func stack1924() (uintptr, uintptr) { var buf [1924]byte; use(buf[:]); return Stackguard() }
+func stack1928() (uintptr, uintptr) { var buf [1928]byte; use(buf[:]); return Stackguard() }
+func stack1932() (uintptr, uintptr) { var buf [1932]byte; use(buf[:]); return Stackguard() }
+func stack1936() (uintptr, uintptr) { var buf [1936]byte; use(buf[:]); return Stackguard() }
+func stack1940() (uintptr, uintptr) { var buf [1940]byte; use(buf[:]); return Stackguard() }
+func stack1944() (uintptr, uintptr) { var buf [1944]byte; use(buf[:]); return Stackguard() }
+func stack1948() (uintptr, uintptr) { var buf [1948]byte; use(buf[:]); return Stackguard() }
+func stack1952() (uintptr, uintptr) { var buf [1952]byte; use(buf[:]); return Stackguard() }
+func stack1956() (uintptr, uintptr) { var buf [1956]byte; use(buf[:]); return Stackguard() }
+func stack1960() (uintptr, uintptr) { var buf [1960]byte; use(buf[:]); return Stackguard() }
+func stack1964() (uintptr, uintptr) { var buf [1964]byte; use(buf[:]); return Stackguard() }
+func stack1968() (uintptr, uintptr) { var buf [1968]byte; use(buf[:]); return Stackguard() }
+func stack1972() (uintptr, uintptr) { var buf [1972]byte; use(buf[:]); return Stackguard() }
+func stack1976() (uintptr, uintptr) { var buf [1976]byte; use(buf[:]); return Stackguard() }
+func stack1980() (uintptr, uintptr) { var buf [1980]byte; use(buf[:]); return Stackguard() }
+func stack1984() (uintptr, uintptr) { var buf [1984]byte; use(buf[:]); return Stackguard() }
+func stack1988() (uintptr, uintptr) { var buf [1988]byte; use(buf[:]); return Stackguard() }
+func stack1992() (uintptr, uintptr) { var buf [1992]byte; use(buf[:]); return Stackguard() }
+func stack1996() (uintptr, uintptr) { var buf [1996]byte; use(buf[:]); return Stackguard() }
+func stack2000() (uintptr, uintptr) { var buf [2000]byte; use(buf[:]); return Stackguard() }
+func stack2004() (uintptr, uintptr) { var buf [2004]byte; use(buf[:]); return Stackguard() }
+func stack2008() (uintptr, uintptr) { var buf [2008]byte; use(buf[:]); return Stackguard() }
+func stack2012() (uintptr, uintptr) { var buf [2012]byte; use(buf[:]); return Stackguard() }
+func stack2016() (uintptr, uintptr) { var buf [2016]byte; use(buf[:]); return Stackguard() }
+func stack2020() (uintptr, uintptr) { var buf [2020]byte; use(buf[:]); return Stackguard() }
+func stack2024() (uintptr, uintptr) { var buf [2024]byte; use(buf[:]); return Stackguard() }
+func stack2028() (uintptr, uintptr) { var buf [2028]byte; use(buf[:]); return Stackguard() }
+func stack2032() (uintptr, uintptr) { var buf [2032]byte; use(buf[:]); return Stackguard() }
+func stack2036() (uintptr, uintptr) { var buf [2036]byte; use(buf[:]); return Stackguard() }
+func stack2040() (uintptr, uintptr) { var buf [2040]byte; use(buf[:]); return Stackguard() }
+func stack2044() (uintptr, uintptr) { var buf [2044]byte; use(buf[:]); return Stackguard() }
+func stack2048() (uintptr, uintptr) { var buf [2048]byte; use(buf[:]); return Stackguard() }
+func stack2052() (uintptr, uintptr) { var buf [2052]byte; use(buf[:]); return Stackguard() }
+func stack2056() (uintptr, uintptr) { var buf [2056]byte; use(buf[:]); return Stackguard() }
+func stack2060() (uintptr, uintptr) { var buf [2060]byte; use(buf[:]); return Stackguard() }
+func stack2064() (uintptr, uintptr) { var buf [2064]byte; use(buf[:]); return Stackguard() }
+func stack2068() (uintptr, uintptr) { var buf [2068]byte; use(buf[:]); return Stackguard() }
+func stack2072() (uintptr, uintptr) { var buf [2072]byte; use(buf[:]); return Stackguard() }
+func stack2076() (uintptr, uintptr) { var buf [2076]byte; use(buf[:]); return Stackguard() }
+func stack2080() (uintptr, uintptr) { var buf [2080]byte; use(buf[:]); return Stackguard() }
+func stack2084() (uintptr, uintptr) { var buf [2084]byte; use(buf[:]); return Stackguard() }
+func stack2088() (uintptr, uintptr) { var buf [2088]byte; use(buf[:]); return Stackguard() }
+func stack2092() (uintptr, uintptr) { var buf [2092]byte; use(buf[:]); return Stackguard() }
+func stack2096() (uintptr, uintptr) { var buf [2096]byte; use(buf[:]); return Stackguard() }
+func stack2100() (uintptr, uintptr) { var buf [2100]byte; use(buf[:]); return Stackguard() }
+func stack2104() (uintptr, uintptr) { var buf [2104]byte; use(buf[:]); return Stackguard() }
+func stack2108() (uintptr, uintptr) { var buf [2108]byte; use(buf[:]); return Stackguard() }
+func stack2112() (uintptr, uintptr) { var buf [2112]byte; use(buf[:]); return Stackguard() }
+func stack2116() (uintptr, uintptr) { var buf [2116]byte; use(buf[:]); return Stackguard() }
+func stack2120() (uintptr, uintptr) { var buf [2120]byte; use(buf[:]); return Stackguard() }
+func stack2124() (uintptr, uintptr) { var buf [2124]byte; use(buf[:]); return Stackguard() }
+func stack2128() (uintptr, uintptr) { var buf [2128]byte; use(buf[:]); return Stackguard() }
+func stack2132() (uintptr, uintptr) { var buf [2132]byte; use(buf[:]); return Stackguard() }
+func stack2136() (uintptr, uintptr) { var buf [2136]byte; use(buf[:]); return Stackguard() }
+func stack2140() (uintptr, uintptr) { var buf [2140]byte; use(buf[:]); return Stackguard() }
+func stack2144() (uintptr, uintptr) { var buf [2144]byte; use(buf[:]); return Stackguard() }
+func stack2148() (uintptr, uintptr) { var buf [2148]byte; use(buf[:]); return Stackguard() }
+func stack2152() (uintptr, uintptr) { var buf [2152]byte; use(buf[:]); return Stackguard() }
+func stack2156() (uintptr, uintptr) { var buf [2156]byte; use(buf[:]); return Stackguard() }
+func stack2160() (uintptr, uintptr) { var buf [2160]byte; use(buf[:]); return Stackguard() }
+func stack2164() (uintptr, uintptr) { var buf [2164]byte; use(buf[:]); return Stackguard() }
+func stack2168() (uintptr, uintptr) { var buf [2168]byte; use(buf[:]); return Stackguard() }
+func stack2172() (uintptr, uintptr) { var buf [2172]byte; use(buf[:]); return Stackguard() }
+func stack2176() (uintptr, uintptr) { var buf [2176]byte; use(buf[:]); return Stackguard() }
+func stack2180() (uintptr, uintptr) { var buf [2180]byte; use(buf[:]); return Stackguard() }
+func stack2184() (uintptr, uintptr) { var buf [2184]byte; use(buf[:]); return Stackguard() }
+func stack2188() (uintptr, uintptr) { var buf [2188]byte; use(buf[:]); return Stackguard() }
+func stack2192() (uintptr, uintptr) { var buf [2192]byte; use(buf[:]); return Stackguard() }
+func stack2196() (uintptr, uintptr) { var buf [2196]byte; use(buf[:]); return Stackguard() }
+func stack2200() (uintptr, uintptr) { var buf [2200]byte; use(buf[:]); return Stackguard() }
+func stack2204() (uintptr, uintptr) { var buf [2204]byte; use(buf[:]); return Stackguard() }
+func stack2208() (uintptr, uintptr) { var buf [2208]byte; use(buf[:]); return Stackguard() }
+func stack2212() (uintptr, uintptr) { var buf [2212]byte; use(buf[:]); return Stackguard() }
+func stack2216() (uintptr, uintptr) { var buf [2216]byte; use(buf[:]); return Stackguard() }
+func stack2220() (uintptr, uintptr) { var buf [2220]byte; use(buf[:]); return Stackguard() }
+func stack2224() (uintptr, uintptr) { var buf [2224]byte; use(buf[:]); return Stackguard() }
+func stack2228() (uintptr, uintptr) { var buf [2228]byte; use(buf[:]); return Stackguard() }
+func stack2232() (uintptr, uintptr) { var buf [2232]byte; use(buf[:]); return Stackguard() }
+func stack2236() (uintptr, uintptr) { var buf [2236]byte; use(buf[:]); return Stackguard() }
+func stack2240() (uintptr, uintptr) { var buf [2240]byte; use(buf[:]); return Stackguard() }
+func stack2244() (uintptr, uintptr) { var buf [2244]byte; use(buf[:]); return Stackguard() }
+func stack2248() (uintptr, uintptr) { var buf [2248]byte; use(buf[:]); return Stackguard() }
+func stack2252() (uintptr, uintptr) { var buf [2252]byte; use(buf[:]); return Stackguard() }
+func stack2256() (uintptr, uintptr) { var buf [2256]byte; use(buf[:]); return Stackguard() }
+func stack2260() (uintptr, uintptr) { var buf [2260]byte; use(buf[:]); return Stackguard() }
+func stack2264() (uintptr, uintptr) { var buf [2264]byte; use(buf[:]); return Stackguard() }
+func stack2268() (uintptr, uintptr) { var buf [2268]byte; use(buf[:]); return Stackguard() }
+func stack2272() (uintptr, uintptr) { var buf [2272]byte; use(buf[:]); return Stackguard() }
+func stack2276() (uintptr, uintptr) { var buf [2276]byte; use(buf[:]); return Stackguard() }
+func stack2280() (uintptr, uintptr) { var buf [2280]byte; use(buf[:]); return Stackguard() }
+func stack2284() (uintptr, uintptr) { var buf [2284]byte; use(buf[:]); return Stackguard() }
+func stack2288() (uintptr, uintptr) { var buf [2288]byte; use(buf[:]); return Stackguard() }
+func stack2292() (uintptr, uintptr) { var buf [2292]byte; use(buf[:]); return Stackguard() }
+func stack2296() (uintptr, uintptr) { var buf [2296]byte; use(buf[:]); return Stackguard() }
+func stack2300() (uintptr, uintptr) { var buf [2300]byte; use(buf[:]); return Stackguard() }
+func stack2304() (uintptr, uintptr) { var buf [2304]byte; use(buf[:]); return Stackguard() }
+func stack2308() (uintptr, uintptr) { var buf [2308]byte; use(buf[:]); return Stackguard() }
+func stack2312() (uintptr, uintptr) { var buf [2312]byte; use(buf[:]); return Stackguard() }
+func stack2316() (uintptr, uintptr) { var buf [2316]byte; use(buf[:]); return Stackguard() }
+func stack2320() (uintptr, uintptr) { var buf [2320]byte; use(buf[:]); return Stackguard() }
+func stack2324() (uintptr, uintptr) { var buf [2324]byte; use(buf[:]); return Stackguard() }
+func stack2328() (uintptr, uintptr) { var buf [2328]byte; use(buf[:]); return Stackguard() }
+func stack2332() (uintptr, uintptr) { var buf [2332]byte; use(buf[:]); return Stackguard() }
+func stack2336() (uintptr, uintptr) { var buf [2336]byte; use(buf[:]); return Stackguard() }
+func stack2340() (uintptr, uintptr) { var buf [2340]byte; use(buf[:]); return Stackguard() }
+func stack2344() (uintptr, uintptr) { var buf [2344]byte; use(buf[:]); return Stackguard() }
+func stack2348() (uintptr, uintptr) { var buf [2348]byte; use(buf[:]); return Stackguard() }
+func stack2352() (uintptr, uintptr) { var buf [2352]byte; use(buf[:]); return Stackguard() }
+func stack2356() (uintptr, uintptr) { var buf [2356]byte; use(buf[:]); return Stackguard() }
+func stack2360() (uintptr, uintptr) { var buf [2360]byte; use(buf[:]); return Stackguard() }
+func stack2364() (uintptr, uintptr) { var buf [2364]byte; use(buf[:]); return Stackguard() }
+func stack2368() (uintptr, uintptr) { var buf [2368]byte; use(buf[:]); return Stackguard() }
+func stack2372() (uintptr, uintptr) { var buf [2372]byte; use(buf[:]); return Stackguard() }
+func stack2376() (uintptr, uintptr) { var buf [2376]byte; use(buf[:]); return Stackguard() }
+func stack2380() (uintptr, uintptr) { var buf [2380]byte; use(buf[:]); return Stackguard() }
+func stack2384() (uintptr, uintptr) { var buf [2384]byte; use(buf[:]); return Stackguard() }
+func stack2388() (uintptr, uintptr) { var buf [2388]byte; use(buf[:]); return Stackguard() }
+func stack2392() (uintptr, uintptr) { var buf [2392]byte; use(buf[:]); return Stackguard() }
+func stack2396() (uintptr, uintptr) { var buf [2396]byte; use(buf[:]); return Stackguard() }
+func stack2400() (uintptr, uintptr) { var buf [2400]byte; use(buf[:]); return Stackguard() }
+func stack2404() (uintptr, uintptr) { var buf [2404]byte; use(buf[:]); return Stackguard() }
+func stack2408() (uintptr, uintptr) { var buf [2408]byte; use(buf[:]); return Stackguard() }
+func stack2412() (uintptr, uintptr) { var buf [2412]byte; use(buf[:]); return Stackguard() }
+func stack2416() (uintptr, uintptr) { var buf [2416]byte; use(buf[:]); return Stackguard() }
+func stack2420() (uintptr, uintptr) { var buf [2420]byte; use(buf[:]); return Stackguard() }
+func stack2424() (uintptr, uintptr) { var buf [2424]byte; use(buf[:]); return Stackguard() }
+func stack2428() (uintptr, uintptr) { var buf [2428]byte; use(buf[:]); return Stackguard() }
+func stack2432() (uintptr, uintptr) { var buf [2432]byte; use(buf[:]); return Stackguard() }
+func stack2436() (uintptr, uintptr) { var buf [2436]byte; use(buf[:]); return Stackguard() }
+func stack2440() (uintptr, uintptr) { var buf [2440]byte; use(buf[:]); return Stackguard() }
+func stack2444() (uintptr, uintptr) { var buf [2444]byte; use(buf[:]); return Stackguard() }
+func stack2448() (uintptr, uintptr) { var buf [2448]byte; use(buf[:]); return Stackguard() }
+func stack2452() (uintptr, uintptr) { var buf [2452]byte; use(buf[:]); return Stackguard() }
+func stack2456() (uintptr, uintptr) { var buf [2456]byte; use(buf[:]); return Stackguard() }
+func stack2460() (uintptr, uintptr) { var buf [2460]byte; use(buf[:]); return Stackguard() }
+func stack2464() (uintptr, uintptr) { var buf [2464]byte; use(buf[:]); return Stackguard() }
+func stack2468() (uintptr, uintptr) { var buf [2468]byte; use(buf[:]); return Stackguard() }
+func stack2472() (uintptr, uintptr) { var buf [2472]byte; use(buf[:]); return Stackguard() }
+func stack2476() (uintptr, uintptr) { var buf [2476]byte; use(buf[:]); return Stackguard() }
+func stack2480() (uintptr, uintptr) { var buf [2480]byte; use(buf[:]); return Stackguard() }
+func stack2484() (uintptr, uintptr) { var buf [2484]byte; use(buf[:]); return Stackguard() }
+func stack2488() (uintptr, uintptr) { var buf [2488]byte; use(buf[:]); return Stackguard() }
+func stack2492() (uintptr, uintptr) { var buf [2492]byte; use(buf[:]); return Stackguard() }
+func stack2496() (uintptr, uintptr) { var buf [2496]byte; use(buf[:]); return Stackguard() }
+func stack2500() (uintptr, uintptr) { var buf [2500]byte; use(buf[:]); return Stackguard() }
+func stack2504() (uintptr, uintptr) { var buf [2504]byte; use(buf[:]); return Stackguard() }
+func stack2508() (uintptr, uintptr) { var buf [2508]byte; use(buf[:]); return Stackguard() }
+func stack2512() (uintptr, uintptr) { var buf [2512]byte; use(buf[:]); return Stackguard() }
+func stack2516() (uintptr, uintptr) { var buf [2516]byte; use(buf[:]); return Stackguard() }
+func stack2520() (uintptr, uintptr) { var buf [2520]byte; use(buf[:]); return Stackguard() }
+func stack2524() (uintptr, uintptr) { var buf [2524]byte; use(buf[:]); return Stackguard() }
+func stack2528() (uintptr, uintptr) { var buf [2528]byte; use(buf[:]); return Stackguard() }
+func stack2532() (uintptr, uintptr) { var buf [2532]byte; use(buf[:]); return Stackguard() }
+func stack2536() (uintptr, uintptr) { var buf [2536]byte; use(buf[:]); return Stackguard() }
+func stack2540() (uintptr, uintptr) { var buf [2540]byte; use(buf[:]); return Stackguard() }
+func stack2544() (uintptr, uintptr) { var buf [2544]byte; use(buf[:]); return Stackguard() }
+func stack2548() (uintptr, uintptr) { var buf [2548]byte; use(buf[:]); return Stackguard() }
+func stack2552() (uintptr, uintptr) { var buf [2552]byte; use(buf[:]); return Stackguard() }
+func stack2556() (uintptr, uintptr) { var buf [2556]byte; use(buf[:]); return Stackguard() }
+func stack2560() (uintptr, uintptr) { var buf [2560]byte; use(buf[:]); return Stackguard() }
+func stack2564() (uintptr, uintptr) { var buf [2564]byte; use(buf[:]); return Stackguard() }
+func stack2568() (uintptr, uintptr) { var buf [2568]byte; use(buf[:]); return Stackguard() }
+func stack2572() (uintptr, uintptr) { var buf [2572]byte; use(buf[:]); return Stackguard() }
+func stack2576() (uintptr, uintptr) { var buf [2576]byte; use(buf[:]); return Stackguard() }
+func stack2580() (uintptr, uintptr) { var buf [2580]byte; use(buf[:]); return Stackguard() }
+func stack2584() (uintptr, uintptr) { var buf [2584]byte; use(buf[:]); return Stackguard() }
+func stack2588() (uintptr, uintptr) { var buf [2588]byte; use(buf[:]); return Stackguard() }
+func stack2592() (uintptr, uintptr) { var buf [2592]byte; use(buf[:]); return Stackguard() }
+func stack2596() (uintptr, uintptr) { var buf [2596]byte; use(buf[:]); return Stackguard() }
+func stack2600() (uintptr, uintptr) { var buf [2600]byte; use(buf[:]); return Stackguard() }
+func stack2604() (uintptr, uintptr) { var buf [2604]byte; use(buf[:]); return Stackguard() }
+func stack2608() (uintptr, uintptr) { var buf [2608]byte; use(buf[:]); return Stackguard() }
+func stack2612() (uintptr, uintptr) { var buf [2612]byte; use(buf[:]); return Stackguard() }
+func stack2616() (uintptr, uintptr) { var buf [2616]byte; use(buf[:]); return Stackguard() }
+func stack2620() (uintptr, uintptr) { var buf [2620]byte; use(buf[:]); return Stackguard() }
+func stack2624() (uintptr, uintptr) { var buf [2624]byte; use(buf[:]); return Stackguard() }
+func stack2628() (uintptr, uintptr) { var buf [2628]byte; use(buf[:]); return Stackguard() }
+func stack2632() (uintptr, uintptr) { var buf [2632]byte; use(buf[:]); return Stackguard() }
+func stack2636() (uintptr, uintptr) { var buf [2636]byte; use(buf[:]); return Stackguard() }
+func stack2640() (uintptr, uintptr) { var buf [2640]byte; use(buf[:]); return Stackguard() }
+func stack2644() (uintptr, uintptr) { var buf [2644]byte; use(buf[:]); return Stackguard() }
+func stack2648() (uintptr, uintptr) { var buf [2648]byte; use(buf[:]); return Stackguard() }
+func stack2652() (uintptr, uintptr) { var buf [2652]byte; use(buf[:]); return Stackguard() }
+func stack2656() (uintptr, uintptr) { var buf [2656]byte; use(buf[:]); return Stackguard() }
+func stack2660() (uintptr, uintptr) { var buf [2660]byte; use(buf[:]); return Stackguard() }
+func stack2664() (uintptr, uintptr) { var buf [2664]byte; use(buf[:]); return Stackguard() }
+func stack2668() (uintptr, uintptr) { var buf [2668]byte; use(buf[:]); return Stackguard() }
+func stack2672() (uintptr, uintptr) { var buf [2672]byte; use(buf[:]); return Stackguard() }
+func stack2676() (uintptr, uintptr) { var buf [2676]byte; use(buf[:]); return Stackguard() }
+func stack2680() (uintptr, uintptr) { var buf [2680]byte; use(buf[:]); return Stackguard() }
+func stack2684() (uintptr, uintptr) { var buf [2684]byte; use(buf[:]); return Stackguard() }
+func stack2688() (uintptr, uintptr) { var buf [2688]byte; use(buf[:]); return Stackguard() }
+func stack2692() (uintptr, uintptr) { var buf [2692]byte; use(buf[:]); return Stackguard() }
+func stack2696() (uintptr, uintptr) { var buf [2696]byte; use(buf[:]); return Stackguard() }
+func stack2700() (uintptr, uintptr) { var buf [2700]byte; use(buf[:]); return Stackguard() }
+func stack2704() (uintptr, uintptr) { var buf [2704]byte; use(buf[:]); return Stackguard() }
+func stack2708() (uintptr, uintptr) { var buf [2708]byte; use(buf[:]); return Stackguard() }
+func stack2712() (uintptr, uintptr) { var buf [2712]byte; use(buf[:]); return Stackguard() }
+func stack2716() (uintptr, uintptr) { var buf [2716]byte; use(buf[:]); return Stackguard() }
+func stack2720() (uintptr, uintptr) { var buf [2720]byte; use(buf[:]); return Stackguard() }
+func stack2724() (uintptr, uintptr) { var buf [2724]byte; use(buf[:]); return Stackguard() }
+func stack2728() (uintptr, uintptr) { var buf [2728]byte; use(buf[:]); return Stackguard() }
+func stack2732() (uintptr, uintptr) { var buf [2732]byte; use(buf[:]); return Stackguard() }
+func stack2736() (uintptr, uintptr) { var buf [2736]byte; use(buf[:]); return Stackguard() }
+func stack2740() (uintptr, uintptr) { var buf [2740]byte; use(buf[:]); return Stackguard() }
+func stack2744() (uintptr, uintptr) { var buf [2744]byte; use(buf[:]); return Stackguard() }
+func stack2748() (uintptr, uintptr) { var buf [2748]byte; use(buf[:]); return Stackguard() }
+func stack2752() (uintptr, uintptr) { var buf [2752]byte; use(buf[:]); return Stackguard() }
+func stack2756() (uintptr, uintptr) { var buf [2756]byte; use(buf[:]); return Stackguard() }
+func stack2760() (uintptr, uintptr) { var buf [2760]byte; use(buf[:]); return Stackguard() }
+func stack2764() (uintptr, uintptr) { var buf [2764]byte; use(buf[:]); return Stackguard() }
+func stack2768() (uintptr, uintptr) { var buf [2768]byte; use(buf[:]); return Stackguard() }
+func stack2772() (uintptr, uintptr) { var buf [2772]byte; use(buf[:]); return Stackguard() }
+func stack2776() (uintptr, uintptr) { var buf [2776]byte; use(buf[:]); return Stackguard() }
+func stack2780() (uintptr, uintptr) { var buf [2780]byte; use(buf[:]); return Stackguard() }
+func stack2784() (uintptr, uintptr) { var buf [2784]byte; use(buf[:]); return Stackguard() }
+func stack2788() (uintptr, uintptr) { var buf [2788]byte; use(buf[:]); return Stackguard() }
+func stack2792() (uintptr, uintptr) { var buf [2792]byte; use(buf[:]); return Stackguard() }
+func stack2796() (uintptr, uintptr) { var buf [2796]byte; use(buf[:]); return Stackguard() }
+func stack2800() (uintptr, uintptr) { var buf [2800]byte; use(buf[:]); return Stackguard() }
+func stack2804() (uintptr, uintptr) { var buf [2804]byte; use(buf[:]); return Stackguard() }
+func stack2808() (uintptr, uintptr) { var buf [2808]byte; use(buf[:]); return Stackguard() }
+func stack2812() (uintptr, uintptr) { var buf [2812]byte; use(buf[:]); return Stackguard() }
+func stack2816() (uintptr, uintptr) { var buf [2816]byte; use(buf[:]); return Stackguard() }
+func stack2820() (uintptr, uintptr) { var buf [2820]byte; use(buf[:]); return Stackguard() }
+func stack2824() (uintptr, uintptr) { var buf [2824]byte; use(buf[:]); return Stackguard() }
+func stack2828() (uintptr, uintptr) { var buf [2828]byte; use(buf[:]); return Stackguard() }
+func stack2832() (uintptr, uintptr) { var buf [2832]byte; use(buf[:]); return Stackguard() }
+func stack2836() (uintptr, uintptr) { var buf [2836]byte; use(buf[:]); return Stackguard() }
+func stack2840() (uintptr, uintptr) { var buf [2840]byte; use(buf[:]); return Stackguard() }
+func stack2844() (uintptr, uintptr) { var buf [2844]byte; use(buf[:]); return Stackguard() }
+func stack2848() (uintptr, uintptr) { var buf [2848]byte; use(buf[:]); return Stackguard() }
+func stack2852() (uintptr, uintptr) { var buf [2852]byte; use(buf[:]); return Stackguard() }
+func stack2856() (uintptr, uintptr) { var buf [2856]byte; use(buf[:]); return Stackguard() }
+func stack2860() (uintptr, uintptr) { var buf [2860]byte; use(buf[:]); return Stackguard() }
+func stack2864() (uintptr, uintptr) { var buf [2864]byte; use(buf[:]); return Stackguard() }
+func stack2868() (uintptr, uintptr) { var buf [2868]byte; use(buf[:]); return Stackguard() }
+func stack2872() (uintptr, uintptr) { var buf [2872]byte; use(buf[:]); return Stackguard() }
+func stack2876() (uintptr, uintptr) { var buf [2876]byte; use(buf[:]); return Stackguard() }
+func stack2880() (uintptr, uintptr) { var buf [2880]byte; use(buf[:]); return Stackguard() }
+func stack2884() (uintptr, uintptr) { var buf [2884]byte; use(buf[:]); return Stackguard() }
+func stack2888() (uintptr, uintptr) { var buf [2888]byte; use(buf[:]); return Stackguard() }
+func stack2892() (uintptr, uintptr) { var buf [2892]byte; use(buf[:]); return Stackguard() }
+func stack2896() (uintptr, uintptr) { var buf [2896]byte; use(buf[:]); return Stackguard() }
+func stack2900() (uintptr, uintptr) { var buf [2900]byte; use(buf[:]); return Stackguard() }
+func stack2904() (uintptr, uintptr) { var buf [2904]byte; use(buf[:]); return Stackguard() }
+func stack2908() (uintptr, uintptr) { var buf [2908]byte; use(buf[:]); return Stackguard() }
+func stack2912() (uintptr, uintptr) { var buf [2912]byte; use(buf[:]); return Stackguard() }
+func stack2916() (uintptr, uintptr) { var buf [2916]byte; use(buf[:]); return Stackguard() }
+func stack2920() (uintptr, uintptr) { var buf [2920]byte; use(buf[:]); return Stackguard() }
+func stack2924() (uintptr, uintptr) { var buf [2924]byte; use(buf[:]); return Stackguard() }
+func stack2928() (uintptr, uintptr) { var buf [2928]byte; use(buf[:]); return Stackguard() }
+func stack2932() (uintptr, uintptr) { var buf [2932]byte; use(buf[:]); return Stackguard() }
+func stack2936() (uintptr, uintptr) { var buf [2936]byte; use(buf[:]); return Stackguard() }
+func stack2940() (uintptr, uintptr) { var buf [2940]byte; use(buf[:]); return Stackguard() }
+func stack2944() (uintptr, uintptr) { var buf [2944]byte; use(buf[:]); return Stackguard() }
+func stack2948() (uintptr, uintptr) { var buf [2948]byte; use(buf[:]); return Stackguard() }
+func stack2952() (uintptr, uintptr) { var buf [2952]byte; use(buf[:]); return Stackguard() }
+func stack2956() (uintptr, uintptr) { var buf [2956]byte; use(buf[:]); return Stackguard() }
+func stack2960() (uintptr, uintptr) { var buf [2960]byte; use(buf[:]); return Stackguard() }
+func stack2964() (uintptr, uintptr) { var buf [2964]byte; use(buf[:]); return Stackguard() }
+func stack2968() (uintptr, uintptr) { var buf [2968]byte; use(buf[:]); return Stackguard() }
+func stack2972() (uintptr, uintptr) { var buf [2972]byte; use(buf[:]); return Stackguard() }
+func stack2976() (uintptr, uintptr) { var buf [2976]byte; use(buf[:]); return Stackguard() }
+func stack2980() (uintptr, uintptr) { var buf [2980]byte; use(buf[:]); return Stackguard() }
+func stack2984() (uintptr, uintptr) { var buf [2984]byte; use(buf[:]); return Stackguard() }
+func stack2988() (uintptr, uintptr) { var buf [2988]byte; use(buf[:]); return Stackguard() }
+func stack2992() (uintptr, uintptr) { var buf [2992]byte; use(buf[:]); return Stackguard() }
+func stack2996() (uintptr, uintptr) { var buf [2996]byte; use(buf[:]); return Stackguard() }
+func stack3000() (uintptr, uintptr) { var buf [3000]byte; use(buf[:]); return Stackguard() }
+func stack3004() (uintptr, uintptr) { var buf [3004]byte; use(buf[:]); return Stackguard() }
+func stack3008() (uintptr, uintptr) { var buf [3008]byte; use(buf[:]); return Stackguard() }
+func stack3012() (uintptr, uintptr) { var buf [3012]byte; use(buf[:]); return Stackguard() }
+func stack3016() (uintptr, uintptr) { var buf [3016]byte; use(buf[:]); return Stackguard() }
+func stack3020() (uintptr, uintptr) { var buf [3020]byte; use(buf[:]); return Stackguard() }
+func stack3024() (uintptr, uintptr) { var buf [3024]byte; use(buf[:]); return Stackguard() }
+func stack3028() (uintptr, uintptr) { var buf [3028]byte; use(buf[:]); return Stackguard() }
+func stack3032() (uintptr, uintptr) { var buf [3032]byte; use(buf[:]); return Stackguard() }
+func stack3036() (uintptr, uintptr) { var buf [3036]byte; use(buf[:]); return Stackguard() }
+func stack3040() (uintptr, uintptr) { var buf [3040]byte; use(buf[:]); return Stackguard() }
+func stack3044() (uintptr, uintptr) { var buf [3044]byte; use(buf[:]); return Stackguard() }
+func stack3048() (uintptr, uintptr) { var buf [3048]byte; use(buf[:]); return Stackguard() }
+func stack3052() (uintptr, uintptr) { var buf [3052]byte; use(buf[:]); return Stackguard() }
+func stack3056() (uintptr, uintptr) { var buf [3056]byte; use(buf[:]); return Stackguard() }
+func stack3060() (uintptr, uintptr) { var buf [3060]byte; use(buf[:]); return Stackguard() }
+func stack3064() (uintptr, uintptr) { var buf [3064]byte; use(buf[:]); return Stackguard() }
+func stack3068() (uintptr, uintptr) { var buf [3068]byte; use(buf[:]); return Stackguard() }
+func stack3072() (uintptr, uintptr) { var buf [3072]byte; use(buf[:]); return Stackguard() }
+func stack3076() (uintptr, uintptr) { var buf [3076]byte; use(buf[:]); return Stackguard() }
+func stack3080() (uintptr, uintptr) { var buf [3080]byte; use(buf[:]); return Stackguard() }
+func stack3084() (uintptr, uintptr) { var buf [3084]byte; use(buf[:]); return Stackguard() }
+func stack3088() (uintptr, uintptr) { var buf [3088]byte; use(buf[:]); return Stackguard() }
+func stack3092() (uintptr, uintptr) { var buf [3092]byte; use(buf[:]); return Stackguard() }
+func stack3096() (uintptr, uintptr) { var buf [3096]byte; use(buf[:]); return Stackguard() }
+func stack3100() (uintptr, uintptr) { var buf [3100]byte; use(buf[:]); return Stackguard() }
+func stack3104() (uintptr, uintptr) { var buf [3104]byte; use(buf[:]); return Stackguard() }
+func stack3108() (uintptr, uintptr) { var buf [3108]byte; use(buf[:]); return Stackguard() }
+func stack3112() (uintptr, uintptr) { var buf [3112]byte; use(buf[:]); return Stackguard() }
+func stack3116() (uintptr, uintptr) { var buf [3116]byte; use(buf[:]); return Stackguard() }
+func stack3120() (uintptr, uintptr) { var buf [3120]byte; use(buf[:]); return Stackguard() }
+func stack3124() (uintptr, uintptr) { var buf [3124]byte; use(buf[:]); return Stackguard() }
+func stack3128() (uintptr, uintptr) { var buf [3128]byte; use(buf[:]); return Stackguard() }
+func stack3132() (uintptr, uintptr) { var buf [3132]byte; use(buf[:]); return Stackguard() }
+func stack3136() (uintptr, uintptr) { var buf [3136]byte; use(buf[:]); return Stackguard() }
+func stack3140() (uintptr, uintptr) { var buf [3140]byte; use(buf[:]); return Stackguard() }
+func stack3144() (uintptr, uintptr) { var buf [3144]byte; use(buf[:]); return Stackguard() }
+func stack3148() (uintptr, uintptr) { var buf [3148]byte; use(buf[:]); return Stackguard() }
+func stack3152() (uintptr, uintptr) { var buf [3152]byte; use(buf[:]); return Stackguard() }
+func stack3156() (uintptr, uintptr) { var buf [3156]byte; use(buf[:]); return Stackguard() }
+func stack3160() (uintptr, uintptr) { var buf [3160]byte; use(buf[:]); return Stackguard() }
+func stack3164() (uintptr, uintptr) { var buf [3164]byte; use(buf[:]); return Stackguard() }
+func stack3168() (uintptr, uintptr) { var buf [3168]byte; use(buf[:]); return Stackguard() }
+func stack3172() (uintptr, uintptr) { var buf [3172]byte; use(buf[:]); return Stackguard() }
+func stack3176() (uintptr, uintptr) { var buf [3176]byte; use(buf[:]); return Stackguard() }
+func stack3180() (uintptr, uintptr) { var buf [3180]byte; use(buf[:]); return Stackguard() }
+func stack3184() (uintptr, uintptr) { var buf [3184]byte; use(buf[:]); return Stackguard() }
+func stack3188() (uintptr, uintptr) { var buf [3188]byte; use(buf[:]); return Stackguard() }
+func stack3192() (uintptr, uintptr) { var buf [3192]byte; use(buf[:]); return Stackguard() }
+func stack3196() (uintptr, uintptr) { var buf [3196]byte; use(buf[:]); return Stackguard() }
+func stack3200() (uintptr, uintptr) { var buf [3200]byte; use(buf[:]); return Stackguard() }
+func stack3204() (uintptr, uintptr) { var buf [3204]byte; use(buf[:]); return Stackguard() }
+func stack3208() (uintptr, uintptr) { var buf [3208]byte; use(buf[:]); return Stackguard() }
+func stack3212() (uintptr, uintptr) { var buf [3212]byte; use(buf[:]); return Stackguard() }
+func stack3216() (uintptr, uintptr) { var buf [3216]byte; use(buf[:]); return Stackguard() }
+func stack3220() (uintptr, uintptr) { var buf [3220]byte; use(buf[:]); return Stackguard() }
+func stack3224() (uintptr, uintptr) { var buf [3224]byte; use(buf[:]); return Stackguard() }
+func stack3228() (uintptr, uintptr) { var buf [3228]byte; use(buf[:]); return Stackguard() }
+func stack3232() (uintptr, uintptr) { var buf [3232]byte; use(buf[:]); return Stackguard() }
+func stack3236() (uintptr, uintptr) { var buf [3236]byte; use(buf[:]); return Stackguard() }
+func stack3240() (uintptr, uintptr) { var buf [3240]byte; use(buf[:]); return Stackguard() }
+func stack3244() (uintptr, uintptr) { var buf [3244]byte; use(buf[:]); return Stackguard() }
+func stack3248() (uintptr, uintptr) { var buf [3248]byte; use(buf[:]); return Stackguard() }
+func stack3252() (uintptr, uintptr) { var buf [3252]byte; use(buf[:]); return Stackguard() }
+func stack3256() (uintptr, uintptr) { var buf [3256]byte; use(buf[:]); return Stackguard() }
+func stack3260() (uintptr, uintptr) { var buf [3260]byte; use(buf[:]); return Stackguard() }
+func stack3264() (uintptr, uintptr) { var buf [3264]byte; use(buf[:]); return Stackguard() }
+func stack3268() (uintptr, uintptr) { var buf [3268]byte; use(buf[:]); return Stackguard() }
+func stack3272() (uintptr, uintptr) { var buf [3272]byte; use(buf[:]); return Stackguard() }
+func stack3276() (uintptr, uintptr) { var buf [3276]byte; use(buf[:]); return Stackguard() }
+func stack3280() (uintptr, uintptr) { var buf [3280]byte; use(buf[:]); return Stackguard() }
+func stack3284() (uintptr, uintptr) { var buf [3284]byte; use(buf[:]); return Stackguard() }
+func stack3288() (uintptr, uintptr) { var buf [3288]byte; use(buf[:]); return Stackguard() }
+func stack3292() (uintptr, uintptr) { var buf [3292]byte; use(buf[:]); return Stackguard() }
+func stack3296() (uintptr, uintptr) { var buf [3296]byte; use(buf[:]); return Stackguard() }
+func stack3300() (uintptr, uintptr) { var buf [3300]byte; use(buf[:]); return Stackguard() }
+func stack3304() (uintptr, uintptr) { var buf [3304]byte; use(buf[:]); return Stackguard() }
+func stack3308() (uintptr, uintptr) { var buf [3308]byte; use(buf[:]); return Stackguard() }
+func stack3312() (uintptr, uintptr) { var buf [3312]byte; use(buf[:]); return Stackguard() }
+func stack3316() (uintptr, uintptr) { var buf [3316]byte; use(buf[:]); return Stackguard() }
+func stack3320() (uintptr, uintptr) { var buf [3320]byte; use(buf[:]); return Stackguard() }
+func stack3324() (uintptr, uintptr) { var buf [3324]byte; use(buf[:]); return Stackguard() }
+func stack3328() (uintptr, uintptr) { var buf [3328]byte; use(buf[:]); return Stackguard() }
+func stack3332() (uintptr, uintptr) { var buf [3332]byte; use(buf[:]); return Stackguard() }
+func stack3336() (uintptr, uintptr) { var buf [3336]byte; use(buf[:]); return Stackguard() }
+func stack3340() (uintptr, uintptr) { var buf [3340]byte; use(buf[:]); return Stackguard() }
+func stack3344() (uintptr, uintptr) { var buf [3344]byte; use(buf[:]); return Stackguard() }
+func stack3348() (uintptr, uintptr) { var buf [3348]byte; use(buf[:]); return Stackguard() }
+func stack3352() (uintptr, uintptr) { var buf [3352]byte; use(buf[:]); return Stackguard() }
+func stack3356() (uintptr, uintptr) { var buf [3356]byte; use(buf[:]); return Stackguard() }
+func stack3360() (uintptr, uintptr) { var buf [3360]byte; use(buf[:]); return Stackguard() }
+func stack3364() (uintptr, uintptr) { var buf [3364]byte; use(buf[:]); return Stackguard() }
+func stack3368() (uintptr, uintptr) { var buf [3368]byte; use(buf[:]); return Stackguard() }
+func stack3372() (uintptr, uintptr) { var buf [3372]byte; use(buf[:]); return Stackguard() }
+func stack3376() (uintptr, uintptr) { var buf [3376]byte; use(buf[:]); return Stackguard() }
+func stack3380() (uintptr, uintptr) { var buf [3380]byte; use(buf[:]); return Stackguard() }
+func stack3384() (uintptr, uintptr) { var buf [3384]byte; use(buf[:]); return Stackguard() }
+func stack3388() (uintptr, uintptr) { var buf [3388]byte; use(buf[:]); return Stackguard() }
+func stack3392() (uintptr, uintptr) { var buf [3392]byte; use(buf[:]); return Stackguard() }
+func stack3396() (uintptr, uintptr) { var buf [3396]byte; use(buf[:]); return Stackguard() }
+func stack3400() (uintptr, uintptr) { var buf [3400]byte; use(buf[:]); return Stackguard() }
+func stack3404() (uintptr, uintptr) { var buf [3404]byte; use(buf[:]); return Stackguard() }
+func stack3408() (uintptr, uintptr) { var buf [3408]byte; use(buf[:]); return Stackguard() }
+func stack3412() (uintptr, uintptr) { var buf [3412]byte; use(buf[:]); return Stackguard() }
+func stack3416() (uintptr, uintptr) { var buf [3416]byte; use(buf[:]); return Stackguard() }
+func stack3420() (uintptr, uintptr) { var buf [3420]byte; use(buf[:]); return Stackguard() }
+func stack3424() (uintptr, uintptr) { var buf [3424]byte; use(buf[:]); return Stackguard() }
+func stack3428() (uintptr, uintptr) { var buf [3428]byte; use(buf[:]); return Stackguard() }
+func stack3432() (uintptr, uintptr) { var buf [3432]byte; use(buf[:]); return Stackguard() }
+func stack3436() (uintptr, uintptr) { var buf [3436]byte; use(buf[:]); return Stackguard() }
+func stack3440() (uintptr, uintptr) { var buf [3440]byte; use(buf[:]); return Stackguard() }
+func stack3444() (uintptr, uintptr) { var buf [3444]byte; use(buf[:]); return Stackguard() }
+func stack3448() (uintptr, uintptr) { var buf [3448]byte; use(buf[:]); return Stackguard() }
+func stack3452() (uintptr, uintptr) { var buf [3452]byte; use(buf[:]); return Stackguard() }
+func stack3456() (uintptr, uintptr) { var buf [3456]byte; use(buf[:]); return Stackguard() }
+func stack3460() (uintptr, uintptr) { var buf [3460]byte; use(buf[:]); return Stackguard() }
+func stack3464() (uintptr, uintptr) { var buf [3464]byte; use(buf[:]); return Stackguard() }
+func stack3468() (uintptr, uintptr) { var buf [3468]byte; use(buf[:]); return Stackguard() }
+func stack3472() (uintptr, uintptr) { var buf [3472]byte; use(buf[:]); return Stackguard() }
+func stack3476() (uintptr, uintptr) { var buf [3476]byte; use(buf[:]); return Stackguard() }
+func stack3480() (uintptr, uintptr) { var buf [3480]byte; use(buf[:]); return Stackguard() }
+func stack3484() (uintptr, uintptr) { var buf [3484]byte; use(buf[:]); return Stackguard() }
+func stack3488() (uintptr, uintptr) { var buf [3488]byte; use(buf[:]); return Stackguard() }
+func stack3492() (uintptr, uintptr) { var buf [3492]byte; use(buf[:]); return Stackguard() }
+func stack3496() (uintptr, uintptr) { var buf [3496]byte; use(buf[:]); return Stackguard() }
+func stack3500() (uintptr, uintptr) { var buf [3500]byte; use(buf[:]); return Stackguard() }
+func stack3504() (uintptr, uintptr) { var buf [3504]byte; use(buf[:]); return Stackguard() }
+func stack3508() (uintptr, uintptr) { var buf [3508]byte; use(buf[:]); return Stackguard() }
+func stack3512() (uintptr, uintptr) { var buf [3512]byte; use(buf[:]); return Stackguard() }
+func stack3516() (uintptr, uintptr) { var buf [3516]byte; use(buf[:]); return Stackguard() }
+func stack3520() (uintptr, uintptr) { var buf [3520]byte; use(buf[:]); return Stackguard() }
+func stack3524() (uintptr, uintptr) { var buf [3524]byte; use(buf[:]); return Stackguard() }
+func stack3528() (uintptr, uintptr) { var buf [3528]byte; use(buf[:]); return Stackguard() }
+func stack3532() (uintptr, uintptr) { var buf [3532]byte; use(buf[:]); return Stackguard() }
+func stack3536() (uintptr, uintptr) { var buf [3536]byte; use(buf[:]); return Stackguard() }
+func stack3540() (uintptr, uintptr) { var buf [3540]byte; use(buf[:]); return Stackguard() }
+func stack3544() (uintptr, uintptr) { var buf [3544]byte; use(buf[:]); return Stackguard() }
+func stack3548() (uintptr, uintptr) { var buf [3548]byte; use(buf[:]); return Stackguard() }
+func stack3552() (uintptr, uintptr) { var buf [3552]byte; use(buf[:]); return Stackguard() }
+func stack3556() (uintptr, uintptr) { var buf [3556]byte; use(buf[:]); return Stackguard() }
+func stack3560() (uintptr, uintptr) { var buf [3560]byte; use(buf[:]); return Stackguard() }
+func stack3564() (uintptr, uintptr) { var buf [3564]byte; use(buf[:]); return Stackguard() }
+func stack3568() (uintptr, uintptr) { var buf [3568]byte; use(buf[:]); return Stackguard() }
+func stack3572() (uintptr, uintptr) { var buf [3572]byte; use(buf[:]); return Stackguard() }
+func stack3576() (uintptr, uintptr) { var buf [3576]byte; use(buf[:]); return Stackguard() }
+func stack3580() (uintptr, uintptr) { var buf [3580]byte; use(buf[:]); return Stackguard() }
+func stack3584() (uintptr, uintptr) { var buf [3584]byte; use(buf[:]); return Stackguard() }
+func stack3588() (uintptr, uintptr) { var buf [3588]byte; use(buf[:]); return Stackguard() }
+func stack3592() (uintptr, uintptr) { var buf [3592]byte; use(buf[:]); return Stackguard() }
+func stack3596() (uintptr, uintptr) { var buf [3596]byte; use(buf[:]); return Stackguard() }
+func stack3600() (uintptr, uintptr) { var buf [3600]byte; use(buf[:]); return Stackguard() }
+func stack3604() (uintptr, uintptr) { var buf [3604]byte; use(buf[:]); return Stackguard() }
+func stack3608() (uintptr, uintptr) { var buf [3608]byte; use(buf[:]); return Stackguard() }
+func stack3612() (uintptr, uintptr) { var buf [3612]byte; use(buf[:]); return Stackguard() }
+func stack3616() (uintptr, uintptr) { var buf [3616]byte; use(buf[:]); return Stackguard() }
+func stack3620() (uintptr, uintptr) { var buf [3620]byte; use(buf[:]); return Stackguard() }
+func stack3624() (uintptr, uintptr) { var buf [3624]byte; use(buf[:]); return Stackguard() }
+func stack3628() (uintptr, uintptr) { var buf [3628]byte; use(buf[:]); return Stackguard() }
+func stack3632() (uintptr, uintptr) { var buf [3632]byte; use(buf[:]); return Stackguard() }
+func stack3636() (uintptr, uintptr) { var buf [3636]byte; use(buf[:]); return Stackguard() }
+func stack3640() (uintptr, uintptr) { var buf [3640]byte; use(buf[:]); return Stackguard() }
+func stack3644() (uintptr, uintptr) { var buf [3644]byte; use(buf[:]); return Stackguard() }
+func stack3648() (uintptr, uintptr) { var buf [3648]byte; use(buf[:]); return Stackguard() }
+func stack3652() (uintptr, uintptr) { var buf [3652]byte; use(buf[:]); return Stackguard() }
+func stack3656() (uintptr, uintptr) { var buf [3656]byte; use(buf[:]); return Stackguard() }
+func stack3660() (uintptr, uintptr) { var buf [3660]byte; use(buf[:]); return Stackguard() }
+func stack3664() (uintptr, uintptr) { var buf [3664]byte; use(buf[:]); return Stackguard() }
+func stack3668() (uintptr, uintptr) { var buf [3668]byte; use(buf[:]); return Stackguard() }
+func stack3672() (uintptr, uintptr) { var buf [3672]byte; use(buf[:]); return Stackguard() }
+func stack3676() (uintptr, uintptr) { var buf [3676]byte; use(buf[:]); return Stackguard() }
+func stack3680() (uintptr, uintptr) { var buf [3680]byte; use(buf[:]); return Stackguard() }
+func stack3684() (uintptr, uintptr) { var buf [3684]byte; use(buf[:]); return Stackguard() }
+func stack3688() (uintptr, uintptr) { var buf [3688]byte; use(buf[:]); return Stackguard() }
+func stack3692() (uintptr, uintptr) { var buf [3692]byte; use(buf[:]); return Stackguard() }
+func stack3696() (uintptr, uintptr) { var buf [3696]byte; use(buf[:]); return Stackguard() }
+func stack3700() (uintptr, uintptr) { var buf [3700]byte; use(buf[:]); return Stackguard() }
+func stack3704() (uintptr, uintptr) { var buf [3704]byte; use(buf[:]); return Stackguard() }
+func stack3708() (uintptr, uintptr) { var buf [3708]byte; use(buf[:]); return Stackguard() }
+func stack3712() (uintptr, uintptr) { var buf [3712]byte; use(buf[:]); return Stackguard() }
+func stack3716() (uintptr, uintptr) { var buf [3716]byte; use(buf[:]); return Stackguard() }
+func stack3720() (uintptr, uintptr) { var buf [3720]byte; use(buf[:]); return Stackguard() }
+func stack3724() (uintptr, uintptr) { var buf [3724]byte; use(buf[:]); return Stackguard() }
+func stack3728() (uintptr, uintptr) { var buf [3728]byte; use(buf[:]); return Stackguard() }
+func stack3732() (uintptr, uintptr) { var buf [3732]byte; use(buf[:]); return Stackguard() }
+func stack3736() (uintptr, uintptr) { var buf [3736]byte; use(buf[:]); return Stackguard() }
+func stack3740() (uintptr, uintptr) { var buf [3740]byte; use(buf[:]); return Stackguard() }
+func stack3744() (uintptr, uintptr) { var buf [3744]byte; use(buf[:]); return Stackguard() }
+func stack3748() (uintptr, uintptr) { var buf [3748]byte; use(buf[:]); return Stackguard() }
+func stack3752() (uintptr, uintptr) { var buf [3752]byte; use(buf[:]); return Stackguard() }
+func stack3756() (uintptr, uintptr) { var buf [3756]byte; use(buf[:]); return Stackguard() }
+func stack3760() (uintptr, uintptr) { var buf [3760]byte; use(buf[:]); return Stackguard() }
+func stack3764() (uintptr, uintptr) { var buf [3764]byte; use(buf[:]); return Stackguard() }
+func stack3768() (uintptr, uintptr) { var buf [3768]byte; use(buf[:]); return Stackguard() }
+func stack3772() (uintptr, uintptr) { var buf [3772]byte; use(buf[:]); return Stackguard() }
+func stack3776() (uintptr, uintptr) { var buf [3776]byte; use(buf[:]); return Stackguard() }
+func stack3780() (uintptr, uintptr) { var buf [3780]byte; use(buf[:]); return Stackguard() }
+func stack3784() (uintptr, uintptr) { var buf [3784]byte; use(buf[:]); return Stackguard() }
+func stack3788() (uintptr, uintptr) { var buf [3788]byte; use(buf[:]); return Stackguard() }
+func stack3792() (uintptr, uintptr) { var buf [3792]byte; use(buf[:]); return Stackguard() }
+func stack3796() (uintptr, uintptr) { var buf [3796]byte; use(buf[:]); return Stackguard() }
+func stack3800() (uintptr, uintptr) { var buf [3800]byte; use(buf[:]); return Stackguard() }
+func stack3804() (uintptr, uintptr) { var buf [3804]byte; use(buf[:]); return Stackguard() }
+func stack3808() (uintptr, uintptr) { var buf [3808]byte; use(buf[:]); return Stackguard() }
+func stack3812() (uintptr, uintptr) { var buf [3812]byte; use(buf[:]); return Stackguard() }
+func stack3816() (uintptr, uintptr) { var buf [3816]byte; use(buf[:]); return Stackguard() }
+func stack3820() (uintptr, uintptr) { var buf [3820]byte; use(buf[:]); return Stackguard() }
+func stack3824() (uintptr, uintptr) { var buf [3824]byte; use(buf[:]); return Stackguard() }
+func stack3828() (uintptr, uintptr) { var buf [3828]byte; use(buf[:]); return Stackguard() }
+func stack3832() (uintptr, uintptr) { var buf [3832]byte; use(buf[:]); return Stackguard() }
+func stack3836() (uintptr, uintptr) { var buf [3836]byte; use(buf[:]); return Stackguard() }
+func stack3840() (uintptr, uintptr) { var buf [3840]byte; use(buf[:]); return Stackguard() }
+func stack3844() (uintptr, uintptr) { var buf [3844]byte; use(buf[:]); return Stackguard() }
+func stack3848() (uintptr, uintptr) { var buf [3848]byte; use(buf[:]); return Stackguard() }
+func stack3852() (uintptr, uintptr) { var buf [3852]byte; use(buf[:]); return Stackguard() }
+func stack3856() (uintptr, uintptr) { var buf [3856]byte; use(buf[:]); return Stackguard() }
+func stack3860() (uintptr, uintptr) { var buf [3860]byte; use(buf[:]); return Stackguard() }
+func stack3864() (uintptr, uintptr) { var buf [3864]byte; use(buf[:]); return Stackguard() }
+func stack3868() (uintptr, uintptr) { var buf [3868]byte; use(buf[:]); return Stackguard() }
+func stack3872() (uintptr, uintptr) { var buf [3872]byte; use(buf[:]); return Stackguard() }
+func stack3876() (uintptr, uintptr) { var buf [3876]byte; use(buf[:]); return Stackguard() }
+func stack3880() (uintptr, uintptr) { var buf [3880]byte; use(buf[:]); return Stackguard() }
+func stack3884() (uintptr, uintptr) { var buf [3884]byte; use(buf[:]); return Stackguard() }
+func stack3888() (uintptr, uintptr) { var buf [3888]byte; use(buf[:]); return Stackguard() }
+func stack3892() (uintptr, uintptr) { var buf [3892]byte; use(buf[:]); return Stackguard() }
+func stack3896() (uintptr, uintptr) { var buf [3896]byte; use(buf[:]); return Stackguard() }
+func stack3900() (uintptr, uintptr) { var buf [3900]byte; use(buf[:]); return Stackguard() }
+func stack3904() (uintptr, uintptr) { var buf [3904]byte; use(buf[:]); return Stackguard() }
+func stack3908() (uintptr, uintptr) { var buf [3908]byte; use(buf[:]); return Stackguard() }
+func stack3912() (uintptr, uintptr) { var buf [3912]byte; use(buf[:]); return Stackguard() }
+func stack3916() (uintptr, uintptr) { var buf [3916]byte; use(buf[:]); return Stackguard() }
+func stack3920() (uintptr, uintptr) { var buf [3920]byte; use(buf[:]); return Stackguard() }
+func stack3924() (uintptr, uintptr) { var buf [3924]byte; use(buf[:]); return Stackguard() }
+func stack3928() (uintptr, uintptr) { var buf [3928]byte; use(buf[:]); return Stackguard() }
+func stack3932() (uintptr, uintptr) { var buf [3932]byte; use(buf[:]); return Stackguard() }
+func stack3936() (uintptr, uintptr) { var buf [3936]byte; use(buf[:]); return Stackguard() }
+func stack3940() (uintptr, uintptr) { var buf [3940]byte; use(buf[:]); return Stackguard() }
+func stack3944() (uintptr, uintptr) { var buf [3944]byte; use(buf[:]); return Stackguard() }
+func stack3948() (uintptr, uintptr) { var buf [3948]byte; use(buf[:]); return Stackguard() }
+func stack3952() (uintptr, uintptr) { var buf [3952]byte; use(buf[:]); return Stackguard() }
+func stack3956() (uintptr, uintptr) { var buf [3956]byte; use(buf[:]); return Stackguard() }
+func stack3960() (uintptr, uintptr) { var buf [3960]byte; use(buf[:]); return Stackguard() }
+func stack3964() (uintptr, uintptr) { var buf [3964]byte; use(buf[:]); return Stackguard() }
+func stack3968() (uintptr, uintptr) { var buf [3968]byte; use(buf[:]); return Stackguard() }
+func stack3972() (uintptr, uintptr) { var buf [3972]byte; use(buf[:]); return Stackguard() }
+func stack3976() (uintptr, uintptr) { var buf [3976]byte; use(buf[:]); return Stackguard() }
+func stack3980() (uintptr, uintptr) { var buf [3980]byte; use(buf[:]); return Stackguard() }
+func stack3984() (uintptr, uintptr) { var buf [3984]byte; use(buf[:]); return Stackguard() }
+func stack3988() (uintptr, uintptr) { var buf [3988]byte; use(buf[:]); return Stackguard() }
+func stack3992() (uintptr, uintptr) { var buf [3992]byte; use(buf[:]); return Stackguard() }
+func stack3996() (uintptr, uintptr) { var buf [3996]byte; use(buf[:]); return Stackguard() }
+func stack4000() (uintptr, uintptr) { var buf [4000]byte; use(buf[:]); return Stackguard() }
+func stack4004() (uintptr, uintptr) { var buf [4004]byte; use(buf[:]); return Stackguard() }
+func stack4008() (uintptr, uintptr) { var buf [4008]byte; use(buf[:]); return Stackguard() }
+func stack4012() (uintptr, uintptr) { var buf [4012]byte; use(buf[:]); return Stackguard() }
+func stack4016() (uintptr, uintptr) { var buf [4016]byte; use(buf[:]); return Stackguard() }
+func stack4020() (uintptr, uintptr) { var buf [4020]byte; use(buf[:]); return Stackguard() }
+func stack4024() (uintptr, uintptr) { var buf [4024]byte; use(buf[:]); return Stackguard() }
+func stack4028() (uintptr, uintptr) { var buf [4028]byte; use(buf[:]); return Stackguard() }
+func stack4032() (uintptr, uintptr) { var buf [4032]byte; use(buf[:]); return Stackguard() }
+func stack4036() (uintptr, uintptr) { var buf [4036]byte; use(buf[:]); return Stackguard() }
+func stack4040() (uintptr, uintptr) { var buf [4040]byte; use(buf[:]); return Stackguard() }
+func stack4044() (uintptr, uintptr) { var buf [4044]byte; use(buf[:]); return Stackguard() }
+func stack4048() (uintptr, uintptr) { var buf [4048]byte; use(buf[:]); return Stackguard() }
+func stack4052() (uintptr, uintptr) { var buf [4052]byte; use(buf[:]); return Stackguard() }
+func stack4056() (uintptr, uintptr) { var buf [4056]byte; use(buf[:]); return Stackguard() }
+func stack4060() (uintptr, uintptr) { var buf [4060]byte; use(buf[:]); return Stackguard() }
+func stack4064() (uintptr, uintptr) { var buf [4064]byte; use(buf[:]); return Stackguard() }
+func stack4068() (uintptr, uintptr) { var buf [4068]byte; use(buf[:]); return Stackguard() }
+func stack4072() (uintptr, uintptr) { var buf [4072]byte; use(buf[:]); return Stackguard() }
+func stack4076() (uintptr, uintptr) { var buf [4076]byte; use(buf[:]); return Stackguard() }
+func stack4080() (uintptr, uintptr) { var buf [4080]byte; use(buf[:]); return Stackguard() }
+func stack4084() (uintptr, uintptr) { var buf [4084]byte; use(buf[:]); return Stackguard() }
+func stack4088() (uintptr, uintptr) { var buf [4088]byte; use(buf[:]); return Stackguard() }
+func stack4092() (uintptr, uintptr) { var buf [4092]byte; use(buf[:]); return Stackguard() }
+func stack4096() (uintptr, uintptr) { var buf [4096]byte; use(buf[:]); return Stackguard() }
+func stack4100() (uintptr, uintptr) { var buf [4100]byte; use(buf[:]); return Stackguard() }
+func stack4104() (uintptr, uintptr) { var buf [4104]byte; use(buf[:]); return Stackguard() }
+func stack4108() (uintptr, uintptr) { var buf [4108]byte; use(buf[:]); return Stackguard() }
+func stack4112() (uintptr, uintptr) { var buf [4112]byte; use(buf[:]); return Stackguard() }
+func stack4116() (uintptr, uintptr) { var buf [4116]byte; use(buf[:]); return Stackguard() }
+func stack4120() (uintptr, uintptr) { var buf [4120]byte; use(buf[:]); return Stackguard() }
+func stack4124() (uintptr, uintptr) { var buf [4124]byte; use(buf[:]); return Stackguard() }
+func stack4128() (uintptr, uintptr) { var buf [4128]byte; use(buf[:]); return Stackguard() }
+func stack4132() (uintptr, uintptr) { var buf [4132]byte; use(buf[:]); return Stackguard() }
+func stack4136() (uintptr, uintptr) { var buf [4136]byte; use(buf[:]); return Stackguard() }
+func stack4140() (uintptr, uintptr) { var buf [4140]byte; use(buf[:]); return Stackguard() }
+func stack4144() (uintptr, uintptr) { var buf [4144]byte; use(buf[:]); return Stackguard() }
+func stack4148() (uintptr, uintptr) { var buf [4148]byte; use(buf[:]); return Stackguard() }
+func stack4152() (uintptr, uintptr) { var buf [4152]byte; use(buf[:]); return Stackguard() }
+func stack4156() (uintptr, uintptr) { var buf [4156]byte; use(buf[:]); return Stackguard() }
+func stack4160() (uintptr, uintptr) { var buf [4160]byte; use(buf[:]); return Stackguard() }
+func stack4164() (uintptr, uintptr) { var buf [4164]byte; use(buf[:]); return Stackguard() }
+func stack4168() (uintptr, uintptr) { var buf [4168]byte; use(buf[:]); return Stackguard() }
+func stack4172() (uintptr, uintptr) { var buf [4172]byte; use(buf[:]); return Stackguard() }
+func stack4176() (uintptr, uintptr) { var buf [4176]byte; use(buf[:]); return Stackguard() }
+func stack4180() (uintptr, uintptr) { var buf [4180]byte; use(buf[:]); return Stackguard() }
+func stack4184() (uintptr, uintptr) { var buf [4184]byte; use(buf[:]); return Stackguard() }
+func stack4188() (uintptr, uintptr) { var buf [4188]byte; use(buf[:]); return Stackguard() }
+func stack4192() (uintptr, uintptr) { var buf [4192]byte; use(buf[:]); return Stackguard() }
+func stack4196() (uintptr, uintptr) { var buf [4196]byte; use(buf[:]); return Stackguard() }
+func stack4200() (uintptr, uintptr) { var buf [4200]byte; use(buf[:]); return Stackguard() }
+func stack4204() (uintptr, uintptr) { var buf [4204]byte; use(buf[:]); return Stackguard() }
+func stack4208() (uintptr, uintptr) { var buf [4208]byte; use(buf[:]); return Stackguard() }
+func stack4212() (uintptr, uintptr) { var buf [4212]byte; use(buf[:]); return Stackguard() }
+func stack4216() (uintptr, uintptr) { var buf [4216]byte; use(buf[:]); return Stackguard() }
+func stack4220() (uintptr, uintptr) { var buf [4220]byte; use(buf[:]); return Stackguard() }
+func stack4224() (uintptr, uintptr) { var buf [4224]byte; use(buf[:]); return Stackguard() }
+func stack4228() (uintptr, uintptr) { var buf [4228]byte; use(buf[:]); return Stackguard() }
+func stack4232() (uintptr, uintptr) { var buf [4232]byte; use(buf[:]); return Stackguard() }
+func stack4236() (uintptr, uintptr) { var buf [4236]byte; use(buf[:]); return Stackguard() }
+func stack4240() (uintptr, uintptr) { var buf [4240]byte; use(buf[:]); return Stackguard() }
+func stack4244() (uintptr, uintptr) { var buf [4244]byte; use(buf[:]); return Stackguard() }
+func stack4248() (uintptr, uintptr) { var buf [4248]byte; use(buf[:]); return Stackguard() }
+func stack4252() (uintptr, uintptr) { var buf [4252]byte; use(buf[:]); return Stackguard() }
+func stack4256() (uintptr, uintptr) { var buf [4256]byte; use(buf[:]); return Stackguard() }
+func stack4260() (uintptr, uintptr) { var buf [4260]byte; use(buf[:]); return Stackguard() }
+func stack4264() (uintptr, uintptr) { var buf [4264]byte; use(buf[:]); return Stackguard() }
+func stack4268() (uintptr, uintptr) { var buf [4268]byte; use(buf[:]); return Stackguard() }
+func stack4272() (uintptr, uintptr) { var buf [4272]byte; use(buf[:]); return Stackguard() }
+func stack4276() (uintptr, uintptr) { var buf [4276]byte; use(buf[:]); return Stackguard() }
+func stack4280() (uintptr, uintptr) { var buf [4280]byte; use(buf[:]); return Stackguard() }
+func stack4284() (uintptr, uintptr) { var buf [4284]byte; use(buf[:]); return Stackguard() }
+func stack4288() (uintptr, uintptr) { var buf [4288]byte; use(buf[:]); return Stackguard() }
+func stack4292() (uintptr, uintptr) { var buf [4292]byte; use(buf[:]); return Stackguard() }
+func stack4296() (uintptr, uintptr) { var buf [4296]byte; use(buf[:]); return Stackguard() }
+func stack4300() (uintptr, uintptr) { var buf [4300]byte; use(buf[:]); return Stackguard() }
+func stack4304() (uintptr, uintptr) { var buf [4304]byte; use(buf[:]); return Stackguard() }
+func stack4308() (uintptr, uintptr) { var buf [4308]byte; use(buf[:]); return Stackguard() }
+func stack4312() (uintptr, uintptr) { var buf [4312]byte; use(buf[:]); return Stackguard() }
+func stack4316() (uintptr, uintptr) { var buf [4316]byte; use(buf[:]); return Stackguard() }
+func stack4320() (uintptr, uintptr) { var buf [4320]byte; use(buf[:]); return Stackguard() }
+func stack4324() (uintptr, uintptr) { var buf [4324]byte; use(buf[:]); return Stackguard() }
+func stack4328() (uintptr, uintptr) { var buf [4328]byte; use(buf[:]); return Stackguard() }
+func stack4332() (uintptr, uintptr) { var buf [4332]byte; use(buf[:]); return Stackguard() }
+func stack4336() (uintptr, uintptr) { var buf [4336]byte; use(buf[:]); return Stackguard() }
+func stack4340() (uintptr, uintptr) { var buf [4340]byte; use(buf[:]); return Stackguard() }
+func stack4344() (uintptr, uintptr) { var buf [4344]byte; use(buf[:]); return Stackguard() }
+func stack4348() (uintptr, uintptr) { var buf [4348]byte; use(buf[:]); return Stackguard() }
+func stack4352() (uintptr, uintptr) { var buf [4352]byte; use(buf[:]); return Stackguard() }
+func stack4356() (uintptr, uintptr) { var buf [4356]byte; use(buf[:]); return Stackguard() }
+func stack4360() (uintptr, uintptr) { var buf [4360]byte; use(buf[:]); return Stackguard() }
+func stack4364() (uintptr, uintptr) { var buf [4364]byte; use(buf[:]); return Stackguard() }
+func stack4368() (uintptr, uintptr) { var buf [4368]byte; use(buf[:]); return Stackguard() }
+func stack4372() (uintptr, uintptr) { var buf [4372]byte; use(buf[:]); return Stackguard() }
+func stack4376() (uintptr, uintptr) { var buf [4376]byte; use(buf[:]); return Stackguard() }
+func stack4380() (uintptr, uintptr) { var buf [4380]byte; use(buf[:]); return Stackguard() }
+func stack4384() (uintptr, uintptr) { var buf [4384]byte; use(buf[:]); return Stackguard() }
+func stack4388() (uintptr, uintptr) { var buf [4388]byte; use(buf[:]); return Stackguard() }
+func stack4392() (uintptr, uintptr) { var buf [4392]byte; use(buf[:]); return Stackguard() }
+func stack4396() (uintptr, uintptr) { var buf [4396]byte; use(buf[:]); return Stackguard() }
+func stack4400() (uintptr, uintptr) { var buf [4400]byte; use(buf[:]); return Stackguard() }
+func stack4404() (uintptr, uintptr) { var buf [4404]byte; use(buf[:]); return Stackguard() }
+func stack4408() (uintptr, uintptr) { var buf [4408]byte; use(buf[:]); return Stackguard() }
+func stack4412() (uintptr, uintptr) { var buf [4412]byte; use(buf[:]); return Stackguard() }
+func stack4416() (uintptr, uintptr) { var buf [4416]byte; use(buf[:]); return Stackguard() }
+func stack4420() (uintptr, uintptr) { var buf [4420]byte; use(buf[:]); return Stackguard() }
+func stack4424() (uintptr, uintptr) { var buf [4424]byte; use(buf[:]); return Stackguard() }
+func stack4428() (uintptr, uintptr) { var buf [4428]byte; use(buf[:]); return Stackguard() }
+func stack4432() (uintptr, uintptr) { var buf [4432]byte; use(buf[:]); return Stackguard() }
+func stack4436() (uintptr, uintptr) { var buf [4436]byte; use(buf[:]); return Stackguard() }
+func stack4440() (uintptr, uintptr) { var buf [4440]byte; use(buf[:]); return Stackguard() }
+func stack4444() (uintptr, uintptr) { var buf [4444]byte; use(buf[:]); return Stackguard() }
+func stack4448() (uintptr, uintptr) { var buf [4448]byte; use(buf[:]); return Stackguard() }
+func stack4452() (uintptr, uintptr) { var buf [4452]byte; use(buf[:]); return Stackguard() }
+func stack4456() (uintptr, uintptr) { var buf [4456]byte; use(buf[:]); return Stackguard() }
+func stack4460() (uintptr, uintptr) { var buf [4460]byte; use(buf[:]); return Stackguard() }
+func stack4464() (uintptr, uintptr) { var buf [4464]byte; use(buf[:]); return Stackguard() }
+func stack4468() (uintptr, uintptr) { var buf [4468]byte; use(buf[:]); return Stackguard() }
+func stack4472() (uintptr, uintptr) { var buf [4472]byte; use(buf[:]); return Stackguard() }
+func stack4476() (uintptr, uintptr) { var buf [4476]byte; use(buf[:]); return Stackguard() }
+func stack4480() (uintptr, uintptr) { var buf [4480]byte; use(buf[:]); return Stackguard() }
+func stack4484() (uintptr, uintptr) { var buf [4484]byte; use(buf[:]); return Stackguard() }
+func stack4488() (uintptr, uintptr) { var buf [4488]byte; use(buf[:]); return Stackguard() }
+func stack4492() (uintptr, uintptr) { var buf [4492]byte; use(buf[:]); return Stackguard() }
+func stack4496() (uintptr, uintptr) { var buf [4496]byte; use(buf[:]); return Stackguard() }
+func stack4500() (uintptr, uintptr) { var buf [4500]byte; use(buf[:]); return Stackguard() }
+func stack4504() (uintptr, uintptr) { var buf [4504]byte; use(buf[:]); return Stackguard() }
+func stack4508() (uintptr, uintptr) { var buf [4508]byte; use(buf[:]); return Stackguard() }
+func stack4512() (uintptr, uintptr) { var buf [4512]byte; use(buf[:]); return Stackguard() }
+func stack4516() (uintptr, uintptr) { var buf [4516]byte; use(buf[:]); return Stackguard() }
+func stack4520() (uintptr, uintptr) { var buf [4520]byte; use(buf[:]); return Stackguard() }
+func stack4524() (uintptr, uintptr) { var buf [4524]byte; use(buf[:]); return Stackguard() }
+func stack4528() (uintptr, uintptr) { var buf [4528]byte; use(buf[:]); return Stackguard() }
+func stack4532() (uintptr, uintptr) { var buf [4532]byte; use(buf[:]); return Stackguard() }
+func stack4536() (uintptr, uintptr) { var buf [4536]byte; use(buf[:]); return Stackguard() }
+func stack4540() (uintptr, uintptr) { var buf [4540]byte; use(buf[:]); return Stackguard() }
+func stack4544() (uintptr, uintptr) { var buf [4544]byte; use(buf[:]); return Stackguard() }
+func stack4548() (uintptr, uintptr) { var buf [4548]byte; use(buf[:]); return Stackguard() }
+func stack4552() (uintptr, uintptr) { var buf [4552]byte; use(buf[:]); return Stackguard() }
+func stack4556() (uintptr, uintptr) { var buf [4556]byte; use(buf[:]); return Stackguard() }
+func stack4560() (uintptr, uintptr) { var buf [4560]byte; use(buf[:]); return Stackguard() }
+func stack4564() (uintptr, uintptr) { var buf [4564]byte; use(buf[:]); return Stackguard() }
+func stack4568() (uintptr, uintptr) { var buf [4568]byte; use(buf[:]); return Stackguard() }
+func stack4572() (uintptr, uintptr) { var buf [4572]byte; use(buf[:]); return Stackguard() }
+func stack4576() (uintptr, uintptr) { var buf [4576]byte; use(buf[:]); return Stackguard() }
+func stack4580() (uintptr, uintptr) { var buf [4580]byte; use(buf[:]); return Stackguard() }
+func stack4584() (uintptr, uintptr) { var buf [4584]byte; use(buf[:]); return Stackguard() }
+func stack4588() (uintptr, uintptr) { var buf [4588]byte; use(buf[:]); return Stackguard() }
+func stack4592() (uintptr, uintptr) { var buf [4592]byte; use(buf[:]); return Stackguard() }
+func stack4596() (uintptr, uintptr) { var buf [4596]byte; use(buf[:]); return Stackguard() }
+func stack4600() (uintptr, uintptr) { var buf [4600]byte; use(buf[:]); return Stackguard() }
+func stack4604() (uintptr, uintptr) { var buf [4604]byte; use(buf[:]); return Stackguard() }
+func stack4608() (uintptr, uintptr) { var buf [4608]byte; use(buf[:]); return Stackguard() }
+func stack4612() (uintptr, uintptr) { var buf [4612]byte; use(buf[:]); return Stackguard() }
+func stack4616() (uintptr, uintptr) { var buf [4616]byte; use(buf[:]); return Stackguard() }
+func stack4620() (uintptr, uintptr) { var buf [4620]byte; use(buf[:]); return Stackguard() }
+func stack4624() (uintptr, uintptr) { var buf [4624]byte; use(buf[:]); return Stackguard() }
+func stack4628() (uintptr, uintptr) { var buf [4628]byte; use(buf[:]); return Stackguard() }
+func stack4632() (uintptr, uintptr) { var buf [4632]byte; use(buf[:]); return Stackguard() }
+func stack4636() (uintptr, uintptr) { var buf [4636]byte; use(buf[:]); return Stackguard() }
+func stack4640() (uintptr, uintptr) { var buf [4640]byte; use(buf[:]); return Stackguard() }
+func stack4644() (uintptr, uintptr) { var buf [4644]byte; use(buf[:]); return Stackguard() }
+func stack4648() (uintptr, uintptr) { var buf [4648]byte; use(buf[:]); return Stackguard() }
+func stack4652() (uintptr, uintptr) { var buf [4652]byte; use(buf[:]); return Stackguard() }
+func stack4656() (uintptr, uintptr) { var buf [4656]byte; use(buf[:]); return Stackguard() }
+func stack4660() (uintptr, uintptr) { var buf [4660]byte; use(buf[:]); return Stackguard() }
+func stack4664() (uintptr, uintptr) { var buf [4664]byte; use(buf[:]); return Stackguard() }
+func stack4668() (uintptr, uintptr) { var buf [4668]byte; use(buf[:]); return Stackguard() }
+func stack4672() (uintptr, uintptr) { var buf [4672]byte; use(buf[:]); return Stackguard() }
+func stack4676() (uintptr, uintptr) { var buf [4676]byte; use(buf[:]); return Stackguard() }
+func stack4680() (uintptr, uintptr) { var buf [4680]byte; use(buf[:]); return Stackguard() }
+func stack4684() (uintptr, uintptr) { var buf [4684]byte; use(buf[:]); return Stackguard() }
+func stack4688() (uintptr, uintptr) { var buf [4688]byte; use(buf[:]); return Stackguard() }
+func stack4692() (uintptr, uintptr) { var buf [4692]byte; use(buf[:]); return Stackguard() }
+func stack4696() (uintptr, uintptr) { var buf [4696]byte; use(buf[:]); return Stackguard() }
+func stack4700() (uintptr, uintptr) { var buf [4700]byte; use(buf[:]); return Stackguard() }
+func stack4704() (uintptr, uintptr) { var buf [4704]byte; use(buf[:]); return Stackguard() }
+func stack4708() (uintptr, uintptr) { var buf [4708]byte; use(buf[:]); return Stackguard() }
+func stack4712() (uintptr, uintptr) { var buf [4712]byte; use(buf[:]); return Stackguard() }
+func stack4716() (uintptr, uintptr) { var buf [4716]byte; use(buf[:]); return Stackguard() }
+func stack4720() (uintptr, uintptr) { var buf [4720]byte; use(buf[:]); return Stackguard() }
+func stack4724() (uintptr, uintptr) { var buf [4724]byte; use(buf[:]); return Stackguard() }
+func stack4728() (uintptr, uintptr) { var buf [4728]byte; use(buf[:]); return Stackguard() }
+func stack4732() (uintptr, uintptr) { var buf [4732]byte; use(buf[:]); return Stackguard() }
+func stack4736() (uintptr, uintptr) { var buf [4736]byte; use(buf[:]); return Stackguard() }
+func stack4740() (uintptr, uintptr) { var buf [4740]byte; use(buf[:]); return Stackguard() }
+func stack4744() (uintptr, uintptr) { var buf [4744]byte; use(buf[:]); return Stackguard() }
+func stack4748() (uintptr, uintptr) { var buf [4748]byte; use(buf[:]); return Stackguard() }
+func stack4752() (uintptr, uintptr) { var buf [4752]byte; use(buf[:]); return Stackguard() }
+func stack4756() (uintptr, uintptr) { var buf [4756]byte; use(buf[:]); return Stackguard() }
+func stack4760() (uintptr, uintptr) { var buf [4760]byte; use(buf[:]); return Stackguard() }
+func stack4764() (uintptr, uintptr) { var buf [4764]byte; use(buf[:]); return Stackguard() }
+func stack4768() (uintptr, uintptr) { var buf [4768]byte; use(buf[:]); return Stackguard() }
+func stack4772() (uintptr, uintptr) { var buf [4772]byte; use(buf[:]); return Stackguard() }
+func stack4776() (uintptr, uintptr) { var buf [4776]byte; use(buf[:]); return Stackguard() }
+func stack4780() (uintptr, uintptr) { var buf [4780]byte; use(buf[:]); return Stackguard() }
+func stack4784() (uintptr, uintptr) { var buf [4784]byte; use(buf[:]); return Stackguard() }
+func stack4788() (uintptr, uintptr) { var buf [4788]byte; use(buf[:]); return Stackguard() }
+func stack4792() (uintptr, uintptr) { var buf [4792]byte; use(buf[:]); return Stackguard() }
+func stack4796() (uintptr, uintptr) { var buf [4796]byte; use(buf[:]); return Stackguard() }
+func stack4800() (uintptr, uintptr) { var buf [4800]byte; use(buf[:]); return Stackguard() }
+func stack4804() (uintptr, uintptr) { var buf [4804]byte; use(buf[:]); return Stackguard() }
+func stack4808() (uintptr, uintptr) { var buf [4808]byte; use(buf[:]); return Stackguard() }
+func stack4812() (uintptr, uintptr) { var buf [4812]byte; use(buf[:]); return Stackguard() }
+func stack4816() (uintptr, uintptr) { var buf [4816]byte; use(buf[:]); return Stackguard() }
+func stack4820() (uintptr, uintptr) { var buf [4820]byte; use(buf[:]); return Stackguard() }
+func stack4824() (uintptr, uintptr) { var buf [4824]byte; use(buf[:]); return Stackguard() }
+func stack4828() (uintptr, uintptr) { var buf [4828]byte; use(buf[:]); return Stackguard() }
+func stack4832() (uintptr, uintptr) { var buf [4832]byte; use(buf[:]); return Stackguard() }
+func stack4836() (uintptr, uintptr) { var buf [4836]byte; use(buf[:]); return Stackguard() }
+func stack4840() (uintptr, uintptr) { var buf [4840]byte; use(buf[:]); return Stackguard() }
+func stack4844() (uintptr, uintptr) { var buf [4844]byte; use(buf[:]); return Stackguard() }
+func stack4848() (uintptr, uintptr) { var buf [4848]byte; use(buf[:]); return Stackguard() }
+func stack4852() (uintptr, uintptr) { var buf [4852]byte; use(buf[:]); return Stackguard() }
+func stack4856() (uintptr, uintptr) { var buf [4856]byte; use(buf[:]); return Stackguard() }
+func stack4860() (uintptr, uintptr) { var buf [4860]byte; use(buf[:]); return Stackguard() }
+func stack4864() (uintptr, uintptr) { var buf [4864]byte; use(buf[:]); return Stackguard() }
+func stack4868() (uintptr, uintptr) { var buf [4868]byte; use(buf[:]); return Stackguard() }
+func stack4872() (uintptr, uintptr) { var buf [4872]byte; use(buf[:]); return Stackguard() }
+func stack4876() (uintptr, uintptr) { var buf [4876]byte; use(buf[:]); return Stackguard() }
+func stack4880() (uintptr, uintptr) { var buf [4880]byte; use(buf[:]); return Stackguard() }
+func stack4884() (uintptr, uintptr) { var buf [4884]byte; use(buf[:]); return Stackguard() }
+func stack4888() (uintptr, uintptr) { var buf [4888]byte; use(buf[:]); return Stackguard() }
+func stack4892() (uintptr, uintptr) { var buf [4892]byte; use(buf[:]); return Stackguard() }
+func stack4896() (uintptr, uintptr) { var buf [4896]byte; use(buf[:]); return Stackguard() }
+func stack4900() (uintptr, uintptr) { var buf [4900]byte; use(buf[:]); return Stackguard() }
+func stack4904() (uintptr, uintptr) { var buf [4904]byte; use(buf[:]); return Stackguard() }
+func stack4908() (uintptr, uintptr) { var buf [4908]byte; use(buf[:]); return Stackguard() }
+func stack4912() (uintptr, uintptr) { var buf [4912]byte; use(buf[:]); return Stackguard() }
+func stack4916() (uintptr, uintptr) { var buf [4916]byte; use(buf[:]); return Stackguard() }
+func stack4920() (uintptr, uintptr) { var buf [4920]byte; use(buf[:]); return Stackguard() }
+func stack4924() (uintptr, uintptr) { var buf [4924]byte; use(buf[:]); return Stackguard() }
+func stack4928() (uintptr, uintptr) { var buf [4928]byte; use(buf[:]); return Stackguard() }
+func stack4932() (uintptr, uintptr) { var buf [4932]byte; use(buf[:]); return Stackguard() }
+func stack4936() (uintptr, uintptr) { var buf [4936]byte; use(buf[:]); return Stackguard() }
+func stack4940() (uintptr, uintptr) { var buf [4940]byte; use(buf[:]); return Stackguard() }
+func stack4944() (uintptr, uintptr) { var buf [4944]byte; use(buf[:]); return Stackguard() }
+func stack4948() (uintptr, uintptr) { var buf [4948]byte; use(buf[:]); return Stackguard() }
+func stack4952() (uintptr, uintptr) { var buf [4952]byte; use(buf[:]); return Stackguard() }
+func stack4956() (uintptr, uintptr) { var buf [4956]byte; use(buf[:]); return Stackguard() }
+func stack4960() (uintptr, uintptr) { var buf [4960]byte; use(buf[:]); return Stackguard() }
+func stack4964() (uintptr, uintptr) { var buf [4964]byte; use(buf[:]); return Stackguard() }
+func stack4968() (uintptr, uintptr) { var buf [4968]byte; use(buf[:]); return Stackguard() }
+func stack4972() (uintptr, uintptr) { var buf [4972]byte; use(buf[:]); return Stackguard() }
+func stack4976() (uintptr, uintptr) { var buf [4976]byte; use(buf[:]); return Stackguard() }
+func stack4980() (uintptr, uintptr) { var buf [4980]byte; use(buf[:]); return Stackguard() }
+func stack4984() (uintptr, uintptr) { var buf [4984]byte; use(buf[:]); return Stackguard() }
+func stack4988() (uintptr, uintptr) { var buf [4988]byte; use(buf[:]); return Stackguard() }
+func stack4992() (uintptr, uintptr) { var buf [4992]byte; use(buf[:]); return Stackguard() }
+func stack4996() (uintptr, uintptr) { var buf [4996]byte; use(buf[:]); return Stackguard() }
+func stack5000() (uintptr, uintptr) { var buf [5000]byte; use(buf[:]); return Stackguard() }
diff --git a/src/pkg/runtime/stack_test.go b/src/pkg/runtime/stack_test.go
index 00c2d0e06..f0c599ac5 100644
--- a/src/pkg/runtime/stack_test.go
+++ b/src/pkg/runtime/stack_test.go
@@ -2,6 +2,22 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+package runtime_test
+
+import (
+ . "runtime"
+ "sync"
+ "testing"
+ "time"
+ "unsafe"
+)
+
+// See stack.h.
+const (
+ StackGuard = 256
+ StackLimit = 128
+)
+
// Test stack split logic by calling functions of every frame size
// from near 0 up to and beyond the default segment size (4k).
// Each of those functions reports its SP + stack limit, and then
@@ -28,22 +44,6 @@
// stack_test.go:22: after runtime_test.stack3860: sp=0x7f7818d5d048 < limit=0x7f7818d5d080
// stack_test.go:22: after runtime_test.stack3864: sp=0x7f7818d5d048 < limit=0x7f7818d5d080
// FAIL
-
-package runtime_test
-
-import (
- . "runtime"
- "testing"
- "time"
- "unsafe"
-)
-
-// See stack.h.
-const (
- StackGuard = 256
- StackLimit = 128
-)
-
func TestStackSplit(t *testing.T) {
for _, f := range splitTests {
sp, guard := f()
@@ -56,218 +56,6 @@ func TestStackSplit(t *testing.T) {
}
}
-var splitTests = []func() (uintptr, uintptr){
- // Edit .+1,/^}/-1|seq 4 4 5000 | sed 's/.*/ stack&,/' | fmt
- stack4, stack8, stack12, stack16, stack20, stack24, stack28,
- stack32, stack36, stack40, stack44, stack48, stack52, stack56,
- stack60, stack64, stack68, stack72, stack76, stack80, stack84,
- stack88, stack92, stack96, stack100, stack104, stack108, stack112,
- stack116, stack120, stack124, stack128, stack132, stack136,
- stack140, stack144, stack148, stack152, stack156, stack160,
- stack164, stack168, stack172, stack176, stack180, stack184,
- stack188, stack192, stack196, stack200, stack204, stack208,
- stack212, stack216, stack220, stack224, stack228, stack232,
- stack236, stack240, stack244, stack248, stack252, stack256,
- stack260, stack264, stack268, stack272, stack276, stack280,
- stack284, stack288, stack292, stack296, stack300, stack304,
- stack308, stack312, stack316, stack320, stack324, stack328,
- stack332, stack336, stack340, stack344, stack348, stack352,
- stack356, stack360, stack364, stack368, stack372, stack376,
- stack380, stack384, stack388, stack392, stack396, stack400,
- stack404, stack408, stack412, stack416, stack420, stack424,
- stack428, stack432, stack436, stack440, stack444, stack448,
- stack452, stack456, stack460, stack464, stack468, stack472,
- stack476, stack480, stack484, stack488, stack492, stack496,
- stack500, stack504, stack508, stack512, stack516, stack520,
- stack524, stack528, stack532, stack536, stack540, stack544,
- stack548, stack552, stack556, stack560, stack564, stack568,
- stack572, stack576, stack580, stack584, stack588, stack592,
- stack596, stack600, stack604, stack608, stack612, stack616,
- stack620, stack624, stack628, stack632, stack636, stack640,
- stack644, stack648, stack652, stack656, stack660, stack664,
- stack668, stack672, stack676, stack680, stack684, stack688,
- stack692, stack696, stack700, stack704, stack708, stack712,
- stack716, stack720, stack724, stack728, stack732, stack736,
- stack740, stack744, stack748, stack752, stack756, stack760,
- stack764, stack768, stack772, stack776, stack780, stack784,
- stack788, stack792, stack796, stack800, stack804, stack808,
- stack812, stack816, stack820, stack824, stack828, stack832,
- stack836, stack840, stack844, stack848, stack852, stack856,
- stack860, stack864, stack868, stack872, stack876, stack880,
- stack884, stack888, stack892, stack896, stack900, stack904,
- stack908, stack912, stack916, stack920, stack924, stack928,
- stack932, stack936, stack940, stack944, stack948, stack952,
- stack956, stack960, stack964, stack968, stack972, stack976,
- stack980, stack984, stack988, stack992, stack996, stack1000,
- stack1004, stack1008, stack1012, stack1016, stack1020, stack1024,
- stack1028, stack1032, stack1036, stack1040, stack1044, stack1048,
- stack1052, stack1056, stack1060, stack1064, stack1068, stack1072,
- stack1076, stack1080, stack1084, stack1088, stack1092, stack1096,
- stack1100, stack1104, stack1108, stack1112, stack1116, stack1120,
- stack1124, stack1128, stack1132, stack1136, stack1140, stack1144,
- stack1148, stack1152, stack1156, stack1160, stack1164, stack1168,
- stack1172, stack1176, stack1180, stack1184, stack1188, stack1192,
- stack1196, stack1200, stack1204, stack1208, stack1212, stack1216,
- stack1220, stack1224, stack1228, stack1232, stack1236, stack1240,
- stack1244, stack1248, stack1252, stack1256, stack1260, stack1264,
- stack1268, stack1272, stack1276, stack1280, stack1284, stack1288,
- stack1292, stack1296, stack1300, stack1304, stack1308, stack1312,
- stack1316, stack1320, stack1324, stack1328, stack1332, stack1336,
- stack1340, stack1344, stack1348, stack1352, stack1356, stack1360,
- stack1364, stack1368, stack1372, stack1376, stack1380, stack1384,
- stack1388, stack1392, stack1396, stack1400, stack1404, stack1408,
- stack1412, stack1416, stack1420, stack1424, stack1428, stack1432,
- stack1436, stack1440, stack1444, stack1448, stack1452, stack1456,
- stack1460, stack1464, stack1468, stack1472, stack1476, stack1480,
- stack1484, stack1488, stack1492, stack1496, stack1500, stack1504,
- stack1508, stack1512, stack1516, stack1520, stack1524, stack1528,
- stack1532, stack1536, stack1540, stack1544, stack1548, stack1552,
- stack1556, stack1560, stack1564, stack1568, stack1572, stack1576,
- stack1580, stack1584, stack1588, stack1592, stack1596, stack1600,
- stack1604, stack1608, stack1612, stack1616, stack1620, stack1624,
- stack1628, stack1632, stack1636, stack1640, stack1644, stack1648,
- stack1652, stack1656, stack1660, stack1664, stack1668, stack1672,
- stack1676, stack1680, stack1684, stack1688, stack1692, stack1696,
- stack1700, stack1704, stack1708, stack1712, stack1716, stack1720,
- stack1724, stack1728, stack1732, stack1736, stack1740, stack1744,
- stack1748, stack1752, stack1756, stack1760, stack1764, stack1768,
- stack1772, stack1776, stack1780, stack1784, stack1788, stack1792,
- stack1796, stack1800, stack1804, stack1808, stack1812, stack1816,
- stack1820, stack1824, stack1828, stack1832, stack1836, stack1840,
- stack1844, stack1848, stack1852, stack1856, stack1860, stack1864,
- stack1868, stack1872, stack1876, stack1880, stack1884, stack1888,
- stack1892, stack1896, stack1900, stack1904, stack1908, stack1912,
- stack1916, stack1920, stack1924, stack1928, stack1932, stack1936,
- stack1940, stack1944, stack1948, stack1952, stack1956, stack1960,
- stack1964, stack1968, stack1972, stack1976, stack1980, stack1984,
- stack1988, stack1992, stack1996, stack2000, stack2004, stack2008,
- stack2012, stack2016, stack2020, stack2024, stack2028, stack2032,
- stack2036, stack2040, stack2044, stack2048, stack2052, stack2056,
- stack2060, stack2064, stack2068, stack2072, stack2076, stack2080,
- stack2084, stack2088, stack2092, stack2096, stack2100, stack2104,
- stack2108, stack2112, stack2116, stack2120, stack2124, stack2128,
- stack2132, stack2136, stack2140, stack2144, stack2148, stack2152,
- stack2156, stack2160, stack2164, stack2168, stack2172, stack2176,
- stack2180, stack2184, stack2188, stack2192, stack2196, stack2200,
- stack2204, stack2208, stack2212, stack2216, stack2220, stack2224,
- stack2228, stack2232, stack2236, stack2240, stack2244, stack2248,
- stack2252, stack2256, stack2260, stack2264, stack2268, stack2272,
- stack2276, stack2280, stack2284, stack2288, stack2292, stack2296,
- stack2300, stack2304, stack2308, stack2312, stack2316, stack2320,
- stack2324, stack2328, stack2332, stack2336, stack2340, stack2344,
- stack2348, stack2352, stack2356, stack2360, stack2364, stack2368,
- stack2372, stack2376, stack2380, stack2384, stack2388, stack2392,
- stack2396, stack2400, stack2404, stack2408, stack2412, stack2416,
- stack2420, stack2424, stack2428, stack2432, stack2436, stack2440,
- stack2444, stack2448, stack2452, stack2456, stack2460, stack2464,
- stack2468, stack2472, stack2476, stack2480, stack2484, stack2488,
- stack2492, stack2496, stack2500, stack2504, stack2508, stack2512,
- stack2516, stack2520, stack2524, stack2528, stack2532, stack2536,
- stack2540, stack2544, stack2548, stack2552, stack2556, stack2560,
- stack2564, stack2568, stack2572, stack2576, stack2580, stack2584,
- stack2588, stack2592, stack2596, stack2600, stack2604, stack2608,
- stack2612, stack2616, stack2620, stack2624, stack2628, stack2632,
- stack2636, stack2640, stack2644, stack2648, stack2652, stack2656,
- stack2660, stack2664, stack2668, stack2672, stack2676, stack2680,
- stack2684, stack2688, stack2692, stack2696, stack2700, stack2704,
- stack2708, stack2712, stack2716, stack2720, stack2724, stack2728,
- stack2732, stack2736, stack2740, stack2744, stack2748, stack2752,
- stack2756, stack2760, stack2764, stack2768, stack2772, stack2776,
- stack2780, stack2784, stack2788, stack2792, stack2796, stack2800,
- stack2804, stack2808, stack2812, stack2816, stack2820, stack2824,
- stack2828, stack2832, stack2836, stack2840, stack2844, stack2848,
- stack2852, stack2856, stack2860, stack2864, stack2868, stack2872,
- stack2876, stack2880, stack2884, stack2888, stack2892, stack2896,
- stack2900, stack2904, stack2908, stack2912, stack2916, stack2920,
- stack2924, stack2928, stack2932, stack2936, stack2940, stack2944,
- stack2948, stack2952, stack2956, stack2960, stack2964, stack2968,
- stack2972, stack2976, stack2980, stack2984, stack2988, stack2992,
- stack2996, stack3000, stack3004, stack3008, stack3012, stack3016,
- stack3020, stack3024, stack3028, stack3032, stack3036, stack3040,
- stack3044, stack3048, stack3052, stack3056, stack3060, stack3064,
- stack3068, stack3072, stack3076, stack3080, stack3084, stack3088,
- stack3092, stack3096, stack3100, stack3104, stack3108, stack3112,
- stack3116, stack3120, stack3124, stack3128, stack3132, stack3136,
- stack3140, stack3144, stack3148, stack3152, stack3156, stack3160,
- stack3164, stack3168, stack3172, stack3176, stack3180, stack3184,
- stack3188, stack3192, stack3196, stack3200, stack3204, stack3208,
- stack3212, stack3216, stack3220, stack3224, stack3228, stack3232,
- stack3236, stack3240, stack3244, stack3248, stack3252, stack3256,
- stack3260, stack3264, stack3268, stack3272, stack3276, stack3280,
- stack3284, stack3288, stack3292, stack3296, stack3300, stack3304,
- stack3308, stack3312, stack3316, stack3320, stack3324, stack3328,
- stack3332, stack3336, stack3340, stack3344, stack3348, stack3352,
- stack3356, stack3360, stack3364, stack3368, stack3372, stack3376,
- stack3380, stack3384, stack3388, stack3392, stack3396, stack3400,
- stack3404, stack3408, stack3412, stack3416, stack3420, stack3424,
- stack3428, stack3432, stack3436, stack3440, stack3444, stack3448,
- stack3452, stack3456, stack3460, stack3464, stack3468, stack3472,
- stack3476, stack3480, stack3484, stack3488, stack3492, stack3496,
- stack3500, stack3504, stack3508, stack3512, stack3516, stack3520,
- stack3524, stack3528, stack3532, stack3536, stack3540, stack3544,
- stack3548, stack3552, stack3556, stack3560, stack3564, stack3568,
- stack3572, stack3576, stack3580, stack3584, stack3588, stack3592,
- stack3596, stack3600, stack3604, stack3608, stack3612, stack3616,
- stack3620, stack3624, stack3628, stack3632, stack3636, stack3640,
- stack3644, stack3648, stack3652, stack3656, stack3660, stack3664,
- stack3668, stack3672, stack3676, stack3680, stack3684, stack3688,
- stack3692, stack3696, stack3700, stack3704, stack3708, stack3712,
- stack3716, stack3720, stack3724, stack3728, stack3732, stack3736,
- stack3740, stack3744, stack3748, stack3752, stack3756, stack3760,
- stack3764, stack3768, stack3772, stack3776, stack3780, stack3784,
- stack3788, stack3792, stack3796, stack3800, stack3804, stack3808,
- stack3812, stack3816, stack3820, stack3824, stack3828, stack3832,
- stack3836, stack3840, stack3844, stack3848, stack3852, stack3856,
- stack3860, stack3864, stack3868, stack3872, stack3876, stack3880,
- stack3884, stack3888, stack3892, stack3896, stack3900, stack3904,
- stack3908, stack3912, stack3916, stack3920, stack3924, stack3928,
- stack3932, stack3936, stack3940, stack3944, stack3948, stack3952,
- stack3956, stack3960, stack3964, stack3968, stack3972, stack3976,
- stack3980, stack3984, stack3988, stack3992, stack3996, stack4000,
- stack4004, stack4008, stack4012, stack4016, stack4020, stack4024,
- stack4028, stack4032, stack4036, stack4040, stack4044, stack4048,
- stack4052, stack4056, stack4060, stack4064, stack4068, stack4072,
- stack4076, stack4080, stack4084, stack4088, stack4092, stack4096,
- stack4100, stack4104, stack4108, stack4112, stack4116, stack4120,
- stack4124, stack4128, stack4132, stack4136, stack4140, stack4144,
- stack4148, stack4152, stack4156, stack4160, stack4164, stack4168,
- stack4172, stack4176, stack4180, stack4184, stack4188, stack4192,
- stack4196, stack4200, stack4204, stack4208, stack4212, stack4216,
- stack4220, stack4224, stack4228, stack4232, stack4236, stack4240,
- stack4244, stack4248, stack4252, stack4256, stack4260, stack4264,
- stack4268, stack4272, stack4276, stack4280, stack4284, stack4288,
- stack4292, stack4296, stack4300, stack4304, stack4308, stack4312,
- stack4316, stack4320, stack4324, stack4328, stack4332, stack4336,
- stack4340, stack4344, stack4348, stack4352, stack4356, stack4360,
- stack4364, stack4368, stack4372, stack4376, stack4380, stack4384,
- stack4388, stack4392, stack4396, stack4400, stack4404, stack4408,
- stack4412, stack4416, stack4420, stack4424, stack4428, stack4432,
- stack4436, stack4440, stack4444, stack4448, stack4452, stack4456,
- stack4460, stack4464, stack4468, stack4472, stack4476, stack4480,
- stack4484, stack4488, stack4492, stack4496, stack4500, stack4504,
- stack4508, stack4512, stack4516, stack4520, stack4524, stack4528,
- stack4532, stack4536, stack4540, stack4544, stack4548, stack4552,
- stack4556, stack4560, stack4564, stack4568, stack4572, stack4576,
- stack4580, stack4584, stack4588, stack4592, stack4596, stack4600,
- stack4604, stack4608, stack4612, stack4616, stack4620, stack4624,
- stack4628, stack4632, stack4636, stack4640, stack4644, stack4648,
- stack4652, stack4656, stack4660, stack4664, stack4668, stack4672,
- stack4676, stack4680, stack4684, stack4688, stack4692, stack4696,
- stack4700, stack4704, stack4708, stack4712, stack4716, stack4720,
- stack4724, stack4728, stack4732, stack4736, stack4740, stack4744,
- stack4748, stack4752, stack4756, stack4760, stack4764, stack4768,
- stack4772, stack4776, stack4780, stack4784, stack4788, stack4792,
- stack4796, stack4800, stack4804, stack4808, stack4812, stack4816,
- stack4820, stack4824, stack4828, stack4832, stack4836, stack4840,
- stack4844, stack4848, stack4852, stack4856, stack4860, stack4864,
- stack4868, stack4872, stack4876, stack4880, stack4884, stack4888,
- stack4892, stack4896, stack4900, stack4904, stack4908, stack4912,
- stack4916, stack4920, stack4924, stack4928, stack4932, stack4936,
- stack4940, stack4944, stack4948, stack4952, stack4956, stack4960,
- stack4964, stack4968, stack4972, stack4976, stack4980, stack4984,
- stack4988, stack4992, stack4996, stack5000,
-}
-
var Used byte
func use(buf []byte) {
@@ -276,1258 +64,6 @@ func use(buf []byte) {
}
}
-// Edit .+1,$ | seq 4 4 5000 | sed 's/.*/func stack&()(uintptr, uintptr) { var buf [&]byte; use(buf[:]); return Stackguard() }/'
-func stack4() (uintptr, uintptr) { var buf [4]byte; use(buf[:]); return Stackguard() }
-func stack8() (uintptr, uintptr) { var buf [8]byte; use(buf[:]); return Stackguard() }
-func stack12() (uintptr, uintptr) { var buf [12]byte; use(buf[:]); return Stackguard() }
-func stack16() (uintptr, uintptr) { var buf [16]byte; use(buf[:]); return Stackguard() }
-func stack20() (uintptr, uintptr) { var buf [20]byte; use(buf[:]); return Stackguard() }
-func stack24() (uintptr, uintptr) { var buf [24]byte; use(buf[:]); return Stackguard() }
-func stack28() (uintptr, uintptr) { var buf [28]byte; use(buf[:]); return Stackguard() }
-func stack32() (uintptr, uintptr) { var buf [32]byte; use(buf[:]); return Stackguard() }
-func stack36() (uintptr, uintptr) { var buf [36]byte; use(buf[:]); return Stackguard() }
-func stack40() (uintptr, uintptr) { var buf [40]byte; use(buf[:]); return Stackguard() }
-func stack44() (uintptr, uintptr) { var buf [44]byte; use(buf[:]); return Stackguard() }
-func stack48() (uintptr, uintptr) { var buf [48]byte; use(buf[:]); return Stackguard() }
-func stack52() (uintptr, uintptr) { var buf [52]byte; use(buf[:]); return Stackguard() }
-func stack56() (uintptr, uintptr) { var buf [56]byte; use(buf[:]); return Stackguard() }
-func stack60() (uintptr, uintptr) { var buf [60]byte; use(buf[:]); return Stackguard() }
-func stack64() (uintptr, uintptr) { var buf [64]byte; use(buf[:]); return Stackguard() }
-func stack68() (uintptr, uintptr) { var buf [68]byte; use(buf[:]); return Stackguard() }
-func stack72() (uintptr, uintptr) { var buf [72]byte; use(buf[:]); return Stackguard() }
-func stack76() (uintptr, uintptr) { var buf [76]byte; use(buf[:]); return Stackguard() }
-func stack80() (uintptr, uintptr) { var buf [80]byte; use(buf[:]); return Stackguard() }
-func stack84() (uintptr, uintptr) { var buf [84]byte; use(buf[:]); return Stackguard() }
-func stack88() (uintptr, uintptr) { var buf [88]byte; use(buf[:]); return Stackguard() }
-func stack92() (uintptr, uintptr) { var buf [92]byte; use(buf[:]); return Stackguard() }
-func stack96() (uintptr, uintptr) { var buf [96]byte; use(buf[:]); return Stackguard() }
-func stack100() (uintptr, uintptr) { var buf [100]byte; use(buf[:]); return Stackguard() }
-func stack104() (uintptr, uintptr) { var buf [104]byte; use(buf[:]); return Stackguard() }
-func stack108() (uintptr, uintptr) { var buf [108]byte; use(buf[:]); return Stackguard() }
-func stack112() (uintptr, uintptr) { var buf [112]byte; use(buf[:]); return Stackguard() }
-func stack116() (uintptr, uintptr) { var buf [116]byte; use(buf[:]); return Stackguard() }
-func stack120() (uintptr, uintptr) { var buf [120]byte; use(buf[:]); return Stackguard() }
-func stack124() (uintptr, uintptr) { var buf [124]byte; use(buf[:]); return Stackguard() }
-func stack128() (uintptr, uintptr) { var buf [128]byte; use(buf[:]); return Stackguard() }
-func stack132() (uintptr, uintptr) { var buf [132]byte; use(buf[:]); return Stackguard() }
-func stack136() (uintptr, uintptr) { var buf [136]byte; use(buf[:]); return Stackguard() }
-func stack140() (uintptr, uintptr) { var buf [140]byte; use(buf[:]); return Stackguard() }
-func stack144() (uintptr, uintptr) { var buf [144]byte; use(buf[:]); return Stackguard() }
-func stack148() (uintptr, uintptr) { var buf [148]byte; use(buf[:]); return Stackguard() }
-func stack152() (uintptr, uintptr) { var buf [152]byte; use(buf[:]); return Stackguard() }
-func stack156() (uintptr, uintptr) { var buf [156]byte; use(buf[:]); return Stackguard() }
-func stack160() (uintptr, uintptr) { var buf [160]byte; use(buf[:]); return Stackguard() }
-func stack164() (uintptr, uintptr) { var buf [164]byte; use(buf[:]); return Stackguard() }
-func stack168() (uintptr, uintptr) { var buf [168]byte; use(buf[:]); return Stackguard() }
-func stack172() (uintptr, uintptr) { var buf [172]byte; use(buf[:]); return Stackguard() }
-func stack176() (uintptr, uintptr) { var buf [176]byte; use(buf[:]); return Stackguard() }
-func stack180() (uintptr, uintptr) { var buf [180]byte; use(buf[:]); return Stackguard() }
-func stack184() (uintptr, uintptr) { var buf [184]byte; use(buf[:]); return Stackguard() }
-func stack188() (uintptr, uintptr) { var buf [188]byte; use(buf[:]); return Stackguard() }
-func stack192() (uintptr, uintptr) { var buf [192]byte; use(buf[:]); return Stackguard() }
-func stack196() (uintptr, uintptr) { var buf [196]byte; use(buf[:]); return Stackguard() }
-func stack200() (uintptr, uintptr) { var buf [200]byte; use(buf[:]); return Stackguard() }
-func stack204() (uintptr, uintptr) { var buf [204]byte; use(buf[:]); return Stackguard() }
-func stack208() (uintptr, uintptr) { var buf [208]byte; use(buf[:]); return Stackguard() }
-func stack212() (uintptr, uintptr) { var buf [212]byte; use(buf[:]); return Stackguard() }
-func stack216() (uintptr, uintptr) { var buf [216]byte; use(buf[:]); return Stackguard() }
-func stack220() (uintptr, uintptr) { var buf [220]byte; use(buf[:]); return Stackguard() }
-func stack224() (uintptr, uintptr) { var buf [224]byte; use(buf[:]); return Stackguard() }
-func stack228() (uintptr, uintptr) { var buf [228]byte; use(buf[:]); return Stackguard() }
-func stack232() (uintptr, uintptr) { var buf [232]byte; use(buf[:]); return Stackguard() }
-func stack236() (uintptr, uintptr) { var buf [236]byte; use(buf[:]); return Stackguard() }
-func stack240() (uintptr, uintptr) { var buf [240]byte; use(buf[:]); return Stackguard() }
-func stack244() (uintptr, uintptr) { var buf [244]byte; use(buf[:]); return Stackguard() }
-func stack248() (uintptr, uintptr) { var buf [248]byte; use(buf[:]); return Stackguard() }
-func stack252() (uintptr, uintptr) { var buf [252]byte; use(buf[:]); return Stackguard() }
-func stack256() (uintptr, uintptr) { var buf [256]byte; use(buf[:]); return Stackguard() }
-func stack260() (uintptr, uintptr) { var buf [260]byte; use(buf[:]); return Stackguard() }
-func stack264() (uintptr, uintptr) { var buf [264]byte; use(buf[:]); return Stackguard() }
-func stack268() (uintptr, uintptr) { var buf [268]byte; use(buf[:]); return Stackguard() }
-func stack272() (uintptr, uintptr) { var buf [272]byte; use(buf[:]); return Stackguard() }
-func stack276() (uintptr, uintptr) { var buf [276]byte; use(buf[:]); return Stackguard() }
-func stack280() (uintptr, uintptr) { var buf [280]byte; use(buf[:]); return Stackguard() }
-func stack284() (uintptr, uintptr) { var buf [284]byte; use(buf[:]); return Stackguard() }
-func stack288() (uintptr, uintptr) { var buf [288]byte; use(buf[:]); return Stackguard() }
-func stack292() (uintptr, uintptr) { var buf [292]byte; use(buf[:]); return Stackguard() }
-func stack296() (uintptr, uintptr) { var buf [296]byte; use(buf[:]); return Stackguard() }
-func stack300() (uintptr, uintptr) { var buf [300]byte; use(buf[:]); return Stackguard() }
-func stack304() (uintptr, uintptr) { var buf [304]byte; use(buf[:]); return Stackguard() }
-func stack308() (uintptr, uintptr) { var buf [308]byte; use(buf[:]); return Stackguard() }
-func stack312() (uintptr, uintptr) { var buf [312]byte; use(buf[:]); return Stackguard() }
-func stack316() (uintptr, uintptr) { var buf [316]byte; use(buf[:]); return Stackguard() }
-func stack320() (uintptr, uintptr) { var buf [320]byte; use(buf[:]); return Stackguard() }
-func stack324() (uintptr, uintptr) { var buf [324]byte; use(buf[:]); return Stackguard() }
-func stack328() (uintptr, uintptr) { var buf [328]byte; use(buf[:]); return Stackguard() }
-func stack332() (uintptr, uintptr) { var buf [332]byte; use(buf[:]); return Stackguard() }
-func stack336() (uintptr, uintptr) { var buf [336]byte; use(buf[:]); return Stackguard() }
-func stack340() (uintptr, uintptr) { var buf [340]byte; use(buf[:]); return Stackguard() }
-func stack344() (uintptr, uintptr) { var buf [344]byte; use(buf[:]); return Stackguard() }
-func stack348() (uintptr, uintptr) { var buf [348]byte; use(buf[:]); return Stackguard() }
-func stack352() (uintptr, uintptr) { var buf [352]byte; use(buf[:]); return Stackguard() }
-func stack356() (uintptr, uintptr) { var buf [356]byte; use(buf[:]); return Stackguard() }
-func stack360() (uintptr, uintptr) { var buf [360]byte; use(buf[:]); return Stackguard() }
-func stack364() (uintptr, uintptr) { var buf [364]byte; use(buf[:]); return Stackguard() }
-func stack368() (uintptr, uintptr) { var buf [368]byte; use(buf[:]); return Stackguard() }
-func stack372() (uintptr, uintptr) { var buf [372]byte; use(buf[:]); return Stackguard() }
-func stack376() (uintptr, uintptr) { var buf [376]byte; use(buf[:]); return Stackguard() }
-func stack380() (uintptr, uintptr) { var buf [380]byte; use(buf[:]); return Stackguard() }
-func stack384() (uintptr, uintptr) { var buf [384]byte; use(buf[:]); return Stackguard() }
-func stack388() (uintptr, uintptr) { var buf [388]byte; use(buf[:]); return Stackguard() }
-func stack392() (uintptr, uintptr) { var buf [392]byte; use(buf[:]); return Stackguard() }
-func stack396() (uintptr, uintptr) { var buf [396]byte; use(buf[:]); return Stackguard() }
-func stack400() (uintptr, uintptr) { var buf [400]byte; use(buf[:]); return Stackguard() }
-func stack404() (uintptr, uintptr) { var buf [404]byte; use(buf[:]); return Stackguard() }
-func stack408() (uintptr, uintptr) { var buf [408]byte; use(buf[:]); return Stackguard() }
-func stack412() (uintptr, uintptr) { var buf [412]byte; use(buf[:]); return Stackguard() }
-func stack416() (uintptr, uintptr) { var buf [416]byte; use(buf[:]); return Stackguard() }
-func stack420() (uintptr, uintptr) { var buf [420]byte; use(buf[:]); return Stackguard() }
-func stack424() (uintptr, uintptr) { var buf [424]byte; use(buf[:]); return Stackguard() }
-func stack428() (uintptr, uintptr) { var buf [428]byte; use(buf[:]); return Stackguard() }
-func stack432() (uintptr, uintptr) { var buf [432]byte; use(buf[:]); return Stackguard() }
-func stack436() (uintptr, uintptr) { var buf [436]byte; use(buf[:]); return Stackguard() }
-func stack440() (uintptr, uintptr) { var buf [440]byte; use(buf[:]); return Stackguard() }
-func stack444() (uintptr, uintptr) { var buf [444]byte; use(buf[:]); return Stackguard() }
-func stack448() (uintptr, uintptr) { var buf [448]byte; use(buf[:]); return Stackguard() }
-func stack452() (uintptr, uintptr) { var buf [452]byte; use(buf[:]); return Stackguard() }
-func stack456() (uintptr, uintptr) { var buf [456]byte; use(buf[:]); return Stackguard() }
-func stack460() (uintptr, uintptr) { var buf [460]byte; use(buf[:]); return Stackguard() }
-func stack464() (uintptr, uintptr) { var buf [464]byte; use(buf[:]); return Stackguard() }
-func stack468() (uintptr, uintptr) { var buf [468]byte; use(buf[:]); return Stackguard() }
-func stack472() (uintptr, uintptr) { var buf [472]byte; use(buf[:]); return Stackguard() }
-func stack476() (uintptr, uintptr) { var buf [476]byte; use(buf[:]); return Stackguard() }
-func stack480() (uintptr, uintptr) { var buf [480]byte; use(buf[:]); return Stackguard() }
-func stack484() (uintptr, uintptr) { var buf [484]byte; use(buf[:]); return Stackguard() }
-func stack488() (uintptr, uintptr) { var buf [488]byte; use(buf[:]); return Stackguard() }
-func stack492() (uintptr, uintptr) { var buf [492]byte; use(buf[:]); return Stackguard() }
-func stack496() (uintptr, uintptr) { var buf [496]byte; use(buf[:]); return Stackguard() }
-func stack500() (uintptr, uintptr) { var buf [500]byte; use(buf[:]); return Stackguard() }
-func stack504() (uintptr, uintptr) { var buf [504]byte; use(buf[:]); return Stackguard() }
-func stack508() (uintptr, uintptr) { var buf [508]byte; use(buf[:]); return Stackguard() }
-func stack512() (uintptr, uintptr) { var buf [512]byte; use(buf[:]); return Stackguard() }
-func stack516() (uintptr, uintptr) { var buf [516]byte; use(buf[:]); return Stackguard() }
-func stack520() (uintptr, uintptr) { var buf [520]byte; use(buf[:]); return Stackguard() }
-func stack524() (uintptr, uintptr) { var buf [524]byte; use(buf[:]); return Stackguard() }
-func stack528() (uintptr, uintptr) { var buf [528]byte; use(buf[:]); return Stackguard() }
-func stack532() (uintptr, uintptr) { var buf [532]byte; use(buf[:]); return Stackguard() }
-func stack536() (uintptr, uintptr) { var buf [536]byte; use(buf[:]); return Stackguard() }
-func stack540() (uintptr, uintptr) { var buf [540]byte; use(buf[:]); return Stackguard() }
-func stack544() (uintptr, uintptr) { var buf [544]byte; use(buf[:]); return Stackguard() }
-func stack548() (uintptr, uintptr) { var buf [548]byte; use(buf[:]); return Stackguard() }
-func stack552() (uintptr, uintptr) { var buf [552]byte; use(buf[:]); return Stackguard() }
-func stack556() (uintptr, uintptr) { var buf [556]byte; use(buf[:]); return Stackguard() }
-func stack560() (uintptr, uintptr) { var buf [560]byte; use(buf[:]); return Stackguard() }
-func stack564() (uintptr, uintptr) { var buf [564]byte; use(buf[:]); return Stackguard() }
-func stack568() (uintptr, uintptr) { var buf [568]byte; use(buf[:]); return Stackguard() }
-func stack572() (uintptr, uintptr) { var buf [572]byte; use(buf[:]); return Stackguard() }
-func stack576() (uintptr, uintptr) { var buf [576]byte; use(buf[:]); return Stackguard() }
-func stack580() (uintptr, uintptr) { var buf [580]byte; use(buf[:]); return Stackguard() }
-func stack584() (uintptr, uintptr) { var buf [584]byte; use(buf[:]); return Stackguard() }
-func stack588() (uintptr, uintptr) { var buf [588]byte; use(buf[:]); return Stackguard() }
-func stack592() (uintptr, uintptr) { var buf [592]byte; use(buf[:]); return Stackguard() }
-func stack596() (uintptr, uintptr) { var buf [596]byte; use(buf[:]); return Stackguard() }
-func stack600() (uintptr, uintptr) { var buf [600]byte; use(buf[:]); return Stackguard() }
-func stack604() (uintptr, uintptr) { var buf [604]byte; use(buf[:]); return Stackguard() }
-func stack608() (uintptr, uintptr) { var buf [608]byte; use(buf[:]); return Stackguard() }
-func stack612() (uintptr, uintptr) { var buf [612]byte; use(buf[:]); return Stackguard() }
-func stack616() (uintptr, uintptr) { var buf [616]byte; use(buf[:]); return Stackguard() }
-func stack620() (uintptr, uintptr) { var buf [620]byte; use(buf[:]); return Stackguard() }
-func stack624() (uintptr, uintptr) { var buf [624]byte; use(buf[:]); return Stackguard() }
-func stack628() (uintptr, uintptr) { var buf [628]byte; use(buf[:]); return Stackguard() }
-func stack632() (uintptr, uintptr) { var buf [632]byte; use(buf[:]); return Stackguard() }
-func stack636() (uintptr, uintptr) { var buf [636]byte; use(buf[:]); return Stackguard() }
-func stack640() (uintptr, uintptr) { var buf [640]byte; use(buf[:]); return Stackguard() }
-func stack644() (uintptr, uintptr) { var buf [644]byte; use(buf[:]); return Stackguard() }
-func stack648() (uintptr, uintptr) { var buf [648]byte; use(buf[:]); return Stackguard() }
-func stack652() (uintptr, uintptr) { var buf [652]byte; use(buf[:]); return Stackguard() }
-func stack656() (uintptr, uintptr) { var buf [656]byte; use(buf[:]); return Stackguard() }
-func stack660() (uintptr, uintptr) { var buf [660]byte; use(buf[:]); return Stackguard() }
-func stack664() (uintptr, uintptr) { var buf [664]byte; use(buf[:]); return Stackguard() }
-func stack668() (uintptr, uintptr) { var buf [668]byte; use(buf[:]); return Stackguard() }
-func stack672() (uintptr, uintptr) { var buf [672]byte; use(buf[:]); return Stackguard() }
-func stack676() (uintptr, uintptr) { var buf [676]byte; use(buf[:]); return Stackguard() }
-func stack680() (uintptr, uintptr) { var buf [680]byte; use(buf[:]); return Stackguard() }
-func stack684() (uintptr, uintptr) { var buf [684]byte; use(buf[:]); return Stackguard() }
-func stack688() (uintptr, uintptr) { var buf [688]byte; use(buf[:]); return Stackguard() }
-func stack692() (uintptr, uintptr) { var buf [692]byte; use(buf[:]); return Stackguard() }
-func stack696() (uintptr, uintptr) { var buf [696]byte; use(buf[:]); return Stackguard() }
-func stack700() (uintptr, uintptr) { var buf [700]byte; use(buf[:]); return Stackguard() }
-func stack704() (uintptr, uintptr) { var buf [704]byte; use(buf[:]); return Stackguard() }
-func stack708() (uintptr, uintptr) { var buf [708]byte; use(buf[:]); return Stackguard() }
-func stack712() (uintptr, uintptr) { var buf [712]byte; use(buf[:]); return Stackguard() }
-func stack716() (uintptr, uintptr) { var buf [716]byte; use(buf[:]); return Stackguard() }
-func stack720() (uintptr, uintptr) { var buf [720]byte; use(buf[:]); return Stackguard() }
-func stack724() (uintptr, uintptr) { var buf [724]byte; use(buf[:]); return Stackguard() }
-func stack728() (uintptr, uintptr) { var buf [728]byte; use(buf[:]); return Stackguard() }
-func stack732() (uintptr, uintptr) { var buf [732]byte; use(buf[:]); return Stackguard() }
-func stack736() (uintptr, uintptr) { var buf [736]byte; use(buf[:]); return Stackguard() }
-func stack740() (uintptr, uintptr) { var buf [740]byte; use(buf[:]); return Stackguard() }
-func stack744() (uintptr, uintptr) { var buf [744]byte; use(buf[:]); return Stackguard() }
-func stack748() (uintptr, uintptr) { var buf [748]byte; use(buf[:]); return Stackguard() }
-func stack752() (uintptr, uintptr) { var buf [752]byte; use(buf[:]); return Stackguard() }
-func stack756() (uintptr, uintptr) { var buf [756]byte; use(buf[:]); return Stackguard() }
-func stack760() (uintptr, uintptr) { var buf [760]byte; use(buf[:]); return Stackguard() }
-func stack764() (uintptr, uintptr) { var buf [764]byte; use(buf[:]); return Stackguard() }
-func stack768() (uintptr, uintptr) { var buf [768]byte; use(buf[:]); return Stackguard() }
-func stack772() (uintptr, uintptr) { var buf [772]byte; use(buf[:]); return Stackguard() }
-func stack776() (uintptr, uintptr) { var buf [776]byte; use(buf[:]); return Stackguard() }
-func stack780() (uintptr, uintptr) { var buf [780]byte; use(buf[:]); return Stackguard() }
-func stack784() (uintptr, uintptr) { var buf [784]byte; use(buf[:]); return Stackguard() }
-func stack788() (uintptr, uintptr) { var buf [788]byte; use(buf[:]); return Stackguard() }
-func stack792() (uintptr, uintptr) { var buf [792]byte; use(buf[:]); return Stackguard() }
-func stack796() (uintptr, uintptr) { var buf [796]byte; use(buf[:]); return Stackguard() }
-func stack800() (uintptr, uintptr) { var buf [800]byte; use(buf[:]); return Stackguard() }
-func stack804() (uintptr, uintptr) { var buf [804]byte; use(buf[:]); return Stackguard() }
-func stack808() (uintptr, uintptr) { var buf [808]byte; use(buf[:]); return Stackguard() }
-func stack812() (uintptr, uintptr) { var buf [812]byte; use(buf[:]); return Stackguard() }
-func stack816() (uintptr, uintptr) { var buf [816]byte; use(buf[:]); return Stackguard() }
-func stack820() (uintptr, uintptr) { var buf [820]byte; use(buf[:]); return Stackguard() }
-func stack824() (uintptr, uintptr) { var buf [824]byte; use(buf[:]); return Stackguard() }
-func stack828() (uintptr, uintptr) { var buf [828]byte; use(buf[:]); return Stackguard() }
-func stack832() (uintptr, uintptr) { var buf [832]byte; use(buf[:]); return Stackguard() }
-func stack836() (uintptr, uintptr) { var buf [836]byte; use(buf[:]); return Stackguard() }
-func stack840() (uintptr, uintptr) { var buf [840]byte; use(buf[:]); return Stackguard() }
-func stack844() (uintptr, uintptr) { var buf [844]byte; use(buf[:]); return Stackguard() }
-func stack848() (uintptr, uintptr) { var buf [848]byte; use(buf[:]); return Stackguard() }
-func stack852() (uintptr, uintptr) { var buf [852]byte; use(buf[:]); return Stackguard() }
-func stack856() (uintptr, uintptr) { var buf [856]byte; use(buf[:]); return Stackguard() }
-func stack860() (uintptr, uintptr) { var buf [860]byte; use(buf[:]); return Stackguard() }
-func stack864() (uintptr, uintptr) { var buf [864]byte; use(buf[:]); return Stackguard() }
-func stack868() (uintptr, uintptr) { var buf [868]byte; use(buf[:]); return Stackguard() }
-func stack872() (uintptr, uintptr) { var buf [872]byte; use(buf[:]); return Stackguard() }
-func stack876() (uintptr, uintptr) { var buf [876]byte; use(buf[:]); return Stackguard() }
-func stack880() (uintptr, uintptr) { var buf [880]byte; use(buf[:]); return Stackguard() }
-func stack884() (uintptr, uintptr) { var buf [884]byte; use(buf[:]); return Stackguard() }
-func stack888() (uintptr, uintptr) { var buf [888]byte; use(buf[:]); return Stackguard() }
-func stack892() (uintptr, uintptr) { var buf [892]byte; use(buf[:]); return Stackguard() }
-func stack896() (uintptr, uintptr) { var buf [896]byte; use(buf[:]); return Stackguard() }
-func stack900() (uintptr, uintptr) { var buf [900]byte; use(buf[:]); return Stackguard() }
-func stack904() (uintptr, uintptr) { var buf [904]byte; use(buf[:]); return Stackguard() }
-func stack908() (uintptr, uintptr) { var buf [908]byte; use(buf[:]); return Stackguard() }
-func stack912() (uintptr, uintptr) { var buf [912]byte; use(buf[:]); return Stackguard() }
-func stack916() (uintptr, uintptr) { var buf [916]byte; use(buf[:]); return Stackguard() }
-func stack920() (uintptr, uintptr) { var buf [920]byte; use(buf[:]); return Stackguard() }
-func stack924() (uintptr, uintptr) { var buf [924]byte; use(buf[:]); return Stackguard() }
-func stack928() (uintptr, uintptr) { var buf [928]byte; use(buf[:]); return Stackguard() }
-func stack932() (uintptr, uintptr) { var buf [932]byte; use(buf[:]); return Stackguard() }
-func stack936() (uintptr, uintptr) { var buf [936]byte; use(buf[:]); return Stackguard() }
-func stack940() (uintptr, uintptr) { var buf [940]byte; use(buf[:]); return Stackguard() }
-func stack944() (uintptr, uintptr) { var buf [944]byte; use(buf[:]); return Stackguard() }
-func stack948() (uintptr, uintptr) { var buf [948]byte; use(buf[:]); return Stackguard() }
-func stack952() (uintptr, uintptr) { var buf [952]byte; use(buf[:]); return Stackguard() }
-func stack956() (uintptr, uintptr) { var buf [956]byte; use(buf[:]); return Stackguard() }
-func stack960() (uintptr, uintptr) { var buf [960]byte; use(buf[:]); return Stackguard() }
-func stack964() (uintptr, uintptr) { var buf [964]byte; use(buf[:]); return Stackguard() }
-func stack968() (uintptr, uintptr) { var buf [968]byte; use(buf[:]); return Stackguard() }
-func stack972() (uintptr, uintptr) { var buf [972]byte; use(buf[:]); return Stackguard() }
-func stack976() (uintptr, uintptr) { var buf [976]byte; use(buf[:]); return Stackguard() }
-func stack980() (uintptr, uintptr) { var buf [980]byte; use(buf[:]); return Stackguard() }
-func stack984() (uintptr, uintptr) { var buf [984]byte; use(buf[:]); return Stackguard() }
-func stack988() (uintptr, uintptr) { var buf [988]byte; use(buf[:]); return Stackguard() }
-func stack992() (uintptr, uintptr) { var buf [992]byte; use(buf[:]); return Stackguard() }
-func stack996() (uintptr, uintptr) { var buf [996]byte; use(buf[:]); return Stackguard() }
-func stack1000() (uintptr, uintptr) { var buf [1000]byte; use(buf[:]); return Stackguard() }
-func stack1004() (uintptr, uintptr) { var buf [1004]byte; use(buf[:]); return Stackguard() }
-func stack1008() (uintptr, uintptr) { var buf [1008]byte; use(buf[:]); return Stackguard() }
-func stack1012() (uintptr, uintptr) { var buf [1012]byte; use(buf[:]); return Stackguard() }
-func stack1016() (uintptr, uintptr) { var buf [1016]byte; use(buf[:]); return Stackguard() }
-func stack1020() (uintptr, uintptr) { var buf [1020]byte; use(buf[:]); return Stackguard() }
-func stack1024() (uintptr, uintptr) { var buf [1024]byte; use(buf[:]); return Stackguard() }
-func stack1028() (uintptr, uintptr) { var buf [1028]byte; use(buf[:]); return Stackguard() }
-func stack1032() (uintptr, uintptr) { var buf [1032]byte; use(buf[:]); return Stackguard() }
-func stack1036() (uintptr, uintptr) { var buf [1036]byte; use(buf[:]); return Stackguard() }
-func stack1040() (uintptr, uintptr) { var buf [1040]byte; use(buf[:]); return Stackguard() }
-func stack1044() (uintptr, uintptr) { var buf [1044]byte; use(buf[:]); return Stackguard() }
-func stack1048() (uintptr, uintptr) { var buf [1048]byte; use(buf[:]); return Stackguard() }
-func stack1052() (uintptr, uintptr) { var buf [1052]byte; use(buf[:]); return Stackguard() }
-func stack1056() (uintptr, uintptr) { var buf [1056]byte; use(buf[:]); return Stackguard() }
-func stack1060() (uintptr, uintptr) { var buf [1060]byte; use(buf[:]); return Stackguard() }
-func stack1064() (uintptr, uintptr) { var buf [1064]byte; use(buf[:]); return Stackguard() }
-func stack1068() (uintptr, uintptr) { var buf [1068]byte; use(buf[:]); return Stackguard() }
-func stack1072() (uintptr, uintptr) { var buf [1072]byte; use(buf[:]); return Stackguard() }
-func stack1076() (uintptr, uintptr) { var buf [1076]byte; use(buf[:]); return Stackguard() }
-func stack1080() (uintptr, uintptr) { var buf [1080]byte; use(buf[:]); return Stackguard() }
-func stack1084() (uintptr, uintptr) { var buf [1084]byte; use(buf[:]); return Stackguard() }
-func stack1088() (uintptr, uintptr) { var buf [1088]byte; use(buf[:]); return Stackguard() }
-func stack1092() (uintptr, uintptr) { var buf [1092]byte; use(buf[:]); return Stackguard() }
-func stack1096() (uintptr, uintptr) { var buf [1096]byte; use(buf[:]); return Stackguard() }
-func stack1100() (uintptr, uintptr) { var buf [1100]byte; use(buf[:]); return Stackguard() }
-func stack1104() (uintptr, uintptr) { var buf [1104]byte; use(buf[:]); return Stackguard() }
-func stack1108() (uintptr, uintptr) { var buf [1108]byte; use(buf[:]); return Stackguard() }
-func stack1112() (uintptr, uintptr) { var buf [1112]byte; use(buf[:]); return Stackguard() }
-func stack1116() (uintptr, uintptr) { var buf [1116]byte; use(buf[:]); return Stackguard() }
-func stack1120() (uintptr, uintptr) { var buf [1120]byte; use(buf[:]); return Stackguard() }
-func stack1124() (uintptr, uintptr) { var buf [1124]byte; use(buf[:]); return Stackguard() }
-func stack1128() (uintptr, uintptr) { var buf [1128]byte; use(buf[:]); return Stackguard() }
-func stack1132() (uintptr, uintptr) { var buf [1132]byte; use(buf[:]); return Stackguard() }
-func stack1136() (uintptr, uintptr) { var buf [1136]byte; use(buf[:]); return Stackguard() }
-func stack1140() (uintptr, uintptr) { var buf [1140]byte; use(buf[:]); return Stackguard() }
-func stack1144() (uintptr, uintptr) { var buf [1144]byte; use(buf[:]); return Stackguard() }
-func stack1148() (uintptr, uintptr) { var buf [1148]byte; use(buf[:]); return Stackguard() }
-func stack1152() (uintptr, uintptr) { var buf [1152]byte; use(buf[:]); return Stackguard() }
-func stack1156() (uintptr, uintptr) { var buf [1156]byte; use(buf[:]); return Stackguard() }
-func stack1160() (uintptr, uintptr) { var buf [1160]byte; use(buf[:]); return Stackguard() }
-func stack1164() (uintptr, uintptr) { var buf [1164]byte; use(buf[:]); return Stackguard() }
-func stack1168() (uintptr, uintptr) { var buf [1168]byte; use(buf[:]); return Stackguard() }
-func stack1172() (uintptr, uintptr) { var buf [1172]byte; use(buf[:]); return Stackguard() }
-func stack1176() (uintptr, uintptr) { var buf [1176]byte; use(buf[:]); return Stackguard() }
-func stack1180() (uintptr, uintptr) { var buf [1180]byte; use(buf[:]); return Stackguard() }
-func stack1184() (uintptr, uintptr) { var buf [1184]byte; use(buf[:]); return Stackguard() }
-func stack1188() (uintptr, uintptr) { var buf [1188]byte; use(buf[:]); return Stackguard() }
-func stack1192() (uintptr, uintptr) { var buf [1192]byte; use(buf[:]); return Stackguard() }
-func stack1196() (uintptr, uintptr) { var buf [1196]byte; use(buf[:]); return Stackguard() }
-func stack1200() (uintptr, uintptr) { var buf [1200]byte; use(buf[:]); return Stackguard() }
-func stack1204() (uintptr, uintptr) { var buf [1204]byte; use(buf[:]); return Stackguard() }
-func stack1208() (uintptr, uintptr) { var buf [1208]byte; use(buf[:]); return Stackguard() }
-func stack1212() (uintptr, uintptr) { var buf [1212]byte; use(buf[:]); return Stackguard() }
-func stack1216() (uintptr, uintptr) { var buf [1216]byte; use(buf[:]); return Stackguard() }
-func stack1220() (uintptr, uintptr) { var buf [1220]byte; use(buf[:]); return Stackguard() }
-func stack1224() (uintptr, uintptr) { var buf [1224]byte; use(buf[:]); return Stackguard() }
-func stack1228() (uintptr, uintptr) { var buf [1228]byte; use(buf[:]); return Stackguard() }
-func stack1232() (uintptr, uintptr) { var buf [1232]byte; use(buf[:]); return Stackguard() }
-func stack1236() (uintptr, uintptr) { var buf [1236]byte; use(buf[:]); return Stackguard() }
-func stack1240() (uintptr, uintptr) { var buf [1240]byte; use(buf[:]); return Stackguard() }
-func stack1244() (uintptr, uintptr) { var buf [1244]byte; use(buf[:]); return Stackguard() }
-func stack1248() (uintptr, uintptr) { var buf [1248]byte; use(buf[:]); return Stackguard() }
-func stack1252() (uintptr, uintptr) { var buf [1252]byte; use(buf[:]); return Stackguard() }
-func stack1256() (uintptr, uintptr) { var buf [1256]byte; use(buf[:]); return Stackguard() }
-func stack1260() (uintptr, uintptr) { var buf [1260]byte; use(buf[:]); return Stackguard() }
-func stack1264() (uintptr, uintptr) { var buf [1264]byte; use(buf[:]); return Stackguard() }
-func stack1268() (uintptr, uintptr) { var buf [1268]byte; use(buf[:]); return Stackguard() }
-func stack1272() (uintptr, uintptr) { var buf [1272]byte; use(buf[:]); return Stackguard() }
-func stack1276() (uintptr, uintptr) { var buf [1276]byte; use(buf[:]); return Stackguard() }
-func stack1280() (uintptr, uintptr) { var buf [1280]byte; use(buf[:]); return Stackguard() }
-func stack1284() (uintptr, uintptr) { var buf [1284]byte; use(buf[:]); return Stackguard() }
-func stack1288() (uintptr, uintptr) { var buf [1288]byte; use(buf[:]); return Stackguard() }
-func stack1292() (uintptr, uintptr) { var buf [1292]byte; use(buf[:]); return Stackguard() }
-func stack1296() (uintptr, uintptr) { var buf [1296]byte; use(buf[:]); return Stackguard() }
-func stack1300() (uintptr, uintptr) { var buf [1300]byte; use(buf[:]); return Stackguard() }
-func stack1304() (uintptr, uintptr) { var buf [1304]byte; use(buf[:]); return Stackguard() }
-func stack1308() (uintptr, uintptr) { var buf [1308]byte; use(buf[:]); return Stackguard() }
-func stack1312() (uintptr, uintptr) { var buf [1312]byte; use(buf[:]); return Stackguard() }
-func stack1316() (uintptr, uintptr) { var buf [1316]byte; use(buf[:]); return Stackguard() }
-func stack1320() (uintptr, uintptr) { var buf [1320]byte; use(buf[:]); return Stackguard() }
-func stack1324() (uintptr, uintptr) { var buf [1324]byte; use(buf[:]); return Stackguard() }
-func stack1328() (uintptr, uintptr) { var buf [1328]byte; use(buf[:]); return Stackguard() }
-func stack1332() (uintptr, uintptr) { var buf [1332]byte; use(buf[:]); return Stackguard() }
-func stack1336() (uintptr, uintptr) { var buf [1336]byte; use(buf[:]); return Stackguard() }
-func stack1340() (uintptr, uintptr) { var buf [1340]byte; use(buf[:]); return Stackguard() }
-func stack1344() (uintptr, uintptr) { var buf [1344]byte; use(buf[:]); return Stackguard() }
-func stack1348() (uintptr, uintptr) { var buf [1348]byte; use(buf[:]); return Stackguard() }
-func stack1352() (uintptr, uintptr) { var buf [1352]byte; use(buf[:]); return Stackguard() }
-func stack1356() (uintptr, uintptr) { var buf [1356]byte; use(buf[:]); return Stackguard() }
-func stack1360() (uintptr, uintptr) { var buf [1360]byte; use(buf[:]); return Stackguard() }
-func stack1364() (uintptr, uintptr) { var buf [1364]byte; use(buf[:]); return Stackguard() }
-func stack1368() (uintptr, uintptr) { var buf [1368]byte; use(buf[:]); return Stackguard() }
-func stack1372() (uintptr, uintptr) { var buf [1372]byte; use(buf[:]); return Stackguard() }
-func stack1376() (uintptr, uintptr) { var buf [1376]byte; use(buf[:]); return Stackguard() }
-func stack1380() (uintptr, uintptr) { var buf [1380]byte; use(buf[:]); return Stackguard() }
-func stack1384() (uintptr, uintptr) { var buf [1384]byte; use(buf[:]); return Stackguard() }
-func stack1388() (uintptr, uintptr) { var buf [1388]byte; use(buf[:]); return Stackguard() }
-func stack1392() (uintptr, uintptr) { var buf [1392]byte; use(buf[:]); return Stackguard() }
-func stack1396() (uintptr, uintptr) { var buf [1396]byte; use(buf[:]); return Stackguard() }
-func stack1400() (uintptr, uintptr) { var buf [1400]byte; use(buf[:]); return Stackguard() }
-func stack1404() (uintptr, uintptr) { var buf [1404]byte; use(buf[:]); return Stackguard() }
-func stack1408() (uintptr, uintptr) { var buf [1408]byte; use(buf[:]); return Stackguard() }
-func stack1412() (uintptr, uintptr) { var buf [1412]byte; use(buf[:]); return Stackguard() }
-func stack1416() (uintptr, uintptr) { var buf [1416]byte; use(buf[:]); return Stackguard() }
-func stack1420() (uintptr, uintptr) { var buf [1420]byte; use(buf[:]); return Stackguard() }
-func stack1424() (uintptr, uintptr) { var buf [1424]byte; use(buf[:]); return Stackguard() }
-func stack1428() (uintptr, uintptr) { var buf [1428]byte; use(buf[:]); return Stackguard() }
-func stack1432() (uintptr, uintptr) { var buf [1432]byte; use(buf[:]); return Stackguard() }
-func stack1436() (uintptr, uintptr) { var buf [1436]byte; use(buf[:]); return Stackguard() }
-func stack1440() (uintptr, uintptr) { var buf [1440]byte; use(buf[:]); return Stackguard() }
-func stack1444() (uintptr, uintptr) { var buf [1444]byte; use(buf[:]); return Stackguard() }
-func stack1448() (uintptr, uintptr) { var buf [1448]byte; use(buf[:]); return Stackguard() }
-func stack1452() (uintptr, uintptr) { var buf [1452]byte; use(buf[:]); return Stackguard() }
-func stack1456() (uintptr, uintptr) { var buf [1456]byte; use(buf[:]); return Stackguard() }
-func stack1460() (uintptr, uintptr) { var buf [1460]byte; use(buf[:]); return Stackguard() }
-func stack1464() (uintptr, uintptr) { var buf [1464]byte; use(buf[:]); return Stackguard() }
-func stack1468() (uintptr, uintptr) { var buf [1468]byte; use(buf[:]); return Stackguard() }
-func stack1472() (uintptr, uintptr) { var buf [1472]byte; use(buf[:]); return Stackguard() }
-func stack1476() (uintptr, uintptr) { var buf [1476]byte; use(buf[:]); return Stackguard() }
-func stack1480() (uintptr, uintptr) { var buf [1480]byte; use(buf[:]); return Stackguard() }
-func stack1484() (uintptr, uintptr) { var buf [1484]byte; use(buf[:]); return Stackguard() }
-func stack1488() (uintptr, uintptr) { var buf [1488]byte; use(buf[:]); return Stackguard() }
-func stack1492() (uintptr, uintptr) { var buf [1492]byte; use(buf[:]); return Stackguard() }
-func stack1496() (uintptr, uintptr) { var buf [1496]byte; use(buf[:]); return Stackguard() }
-func stack1500() (uintptr, uintptr) { var buf [1500]byte; use(buf[:]); return Stackguard() }
-func stack1504() (uintptr, uintptr) { var buf [1504]byte; use(buf[:]); return Stackguard() }
-func stack1508() (uintptr, uintptr) { var buf [1508]byte; use(buf[:]); return Stackguard() }
-func stack1512() (uintptr, uintptr) { var buf [1512]byte; use(buf[:]); return Stackguard() }
-func stack1516() (uintptr, uintptr) { var buf [1516]byte; use(buf[:]); return Stackguard() }
-func stack1520() (uintptr, uintptr) { var buf [1520]byte; use(buf[:]); return Stackguard() }
-func stack1524() (uintptr, uintptr) { var buf [1524]byte; use(buf[:]); return Stackguard() }
-func stack1528() (uintptr, uintptr) { var buf [1528]byte; use(buf[:]); return Stackguard() }
-func stack1532() (uintptr, uintptr) { var buf [1532]byte; use(buf[:]); return Stackguard() }
-func stack1536() (uintptr, uintptr) { var buf [1536]byte; use(buf[:]); return Stackguard() }
-func stack1540() (uintptr, uintptr) { var buf [1540]byte; use(buf[:]); return Stackguard() }
-func stack1544() (uintptr, uintptr) { var buf [1544]byte; use(buf[:]); return Stackguard() }
-func stack1548() (uintptr, uintptr) { var buf [1548]byte; use(buf[:]); return Stackguard() }
-func stack1552() (uintptr, uintptr) { var buf [1552]byte; use(buf[:]); return Stackguard() }
-func stack1556() (uintptr, uintptr) { var buf [1556]byte; use(buf[:]); return Stackguard() }
-func stack1560() (uintptr, uintptr) { var buf [1560]byte; use(buf[:]); return Stackguard() }
-func stack1564() (uintptr, uintptr) { var buf [1564]byte; use(buf[:]); return Stackguard() }
-func stack1568() (uintptr, uintptr) { var buf [1568]byte; use(buf[:]); return Stackguard() }
-func stack1572() (uintptr, uintptr) { var buf [1572]byte; use(buf[:]); return Stackguard() }
-func stack1576() (uintptr, uintptr) { var buf [1576]byte; use(buf[:]); return Stackguard() }
-func stack1580() (uintptr, uintptr) { var buf [1580]byte; use(buf[:]); return Stackguard() }
-func stack1584() (uintptr, uintptr) { var buf [1584]byte; use(buf[:]); return Stackguard() }
-func stack1588() (uintptr, uintptr) { var buf [1588]byte; use(buf[:]); return Stackguard() }
-func stack1592() (uintptr, uintptr) { var buf [1592]byte; use(buf[:]); return Stackguard() }
-func stack1596() (uintptr, uintptr) { var buf [1596]byte; use(buf[:]); return Stackguard() }
-func stack1600() (uintptr, uintptr) { var buf [1600]byte; use(buf[:]); return Stackguard() }
-func stack1604() (uintptr, uintptr) { var buf [1604]byte; use(buf[:]); return Stackguard() }
-func stack1608() (uintptr, uintptr) { var buf [1608]byte; use(buf[:]); return Stackguard() }
-func stack1612() (uintptr, uintptr) { var buf [1612]byte; use(buf[:]); return Stackguard() }
-func stack1616() (uintptr, uintptr) { var buf [1616]byte; use(buf[:]); return Stackguard() }
-func stack1620() (uintptr, uintptr) { var buf [1620]byte; use(buf[:]); return Stackguard() }
-func stack1624() (uintptr, uintptr) { var buf [1624]byte; use(buf[:]); return Stackguard() }
-func stack1628() (uintptr, uintptr) { var buf [1628]byte; use(buf[:]); return Stackguard() }
-func stack1632() (uintptr, uintptr) { var buf [1632]byte; use(buf[:]); return Stackguard() }
-func stack1636() (uintptr, uintptr) { var buf [1636]byte; use(buf[:]); return Stackguard() }
-func stack1640() (uintptr, uintptr) { var buf [1640]byte; use(buf[:]); return Stackguard() }
-func stack1644() (uintptr, uintptr) { var buf [1644]byte; use(buf[:]); return Stackguard() }
-func stack1648() (uintptr, uintptr) { var buf [1648]byte; use(buf[:]); return Stackguard() }
-func stack1652() (uintptr, uintptr) { var buf [1652]byte; use(buf[:]); return Stackguard() }
-func stack1656() (uintptr, uintptr) { var buf [1656]byte; use(buf[:]); return Stackguard() }
-func stack1660() (uintptr, uintptr) { var buf [1660]byte; use(buf[:]); return Stackguard() }
-func stack1664() (uintptr, uintptr) { var buf [1664]byte; use(buf[:]); return Stackguard() }
-func stack1668() (uintptr, uintptr) { var buf [1668]byte; use(buf[:]); return Stackguard() }
-func stack1672() (uintptr, uintptr) { var buf [1672]byte; use(buf[:]); return Stackguard() }
-func stack1676() (uintptr, uintptr) { var buf [1676]byte; use(buf[:]); return Stackguard() }
-func stack1680() (uintptr, uintptr) { var buf [1680]byte; use(buf[:]); return Stackguard() }
-func stack1684() (uintptr, uintptr) { var buf [1684]byte; use(buf[:]); return Stackguard() }
-func stack1688() (uintptr, uintptr) { var buf [1688]byte; use(buf[:]); return Stackguard() }
-func stack1692() (uintptr, uintptr) { var buf [1692]byte; use(buf[:]); return Stackguard() }
-func stack1696() (uintptr, uintptr) { var buf [1696]byte; use(buf[:]); return Stackguard() }
-func stack1700() (uintptr, uintptr) { var buf [1700]byte; use(buf[:]); return Stackguard() }
-func stack1704() (uintptr, uintptr) { var buf [1704]byte; use(buf[:]); return Stackguard() }
-func stack1708() (uintptr, uintptr) { var buf [1708]byte; use(buf[:]); return Stackguard() }
-func stack1712() (uintptr, uintptr) { var buf [1712]byte; use(buf[:]); return Stackguard() }
-func stack1716() (uintptr, uintptr) { var buf [1716]byte; use(buf[:]); return Stackguard() }
-func stack1720() (uintptr, uintptr) { var buf [1720]byte; use(buf[:]); return Stackguard() }
-func stack1724() (uintptr, uintptr) { var buf [1724]byte; use(buf[:]); return Stackguard() }
-func stack1728() (uintptr, uintptr) { var buf [1728]byte; use(buf[:]); return Stackguard() }
-func stack1732() (uintptr, uintptr) { var buf [1732]byte; use(buf[:]); return Stackguard() }
-func stack1736() (uintptr, uintptr) { var buf [1736]byte; use(buf[:]); return Stackguard() }
-func stack1740() (uintptr, uintptr) { var buf [1740]byte; use(buf[:]); return Stackguard() }
-func stack1744() (uintptr, uintptr) { var buf [1744]byte; use(buf[:]); return Stackguard() }
-func stack1748() (uintptr, uintptr) { var buf [1748]byte; use(buf[:]); return Stackguard() }
-func stack1752() (uintptr, uintptr) { var buf [1752]byte; use(buf[:]); return Stackguard() }
-func stack1756() (uintptr, uintptr) { var buf [1756]byte; use(buf[:]); return Stackguard() }
-func stack1760() (uintptr, uintptr) { var buf [1760]byte; use(buf[:]); return Stackguard() }
-func stack1764() (uintptr, uintptr) { var buf [1764]byte; use(buf[:]); return Stackguard() }
-func stack1768() (uintptr, uintptr) { var buf [1768]byte; use(buf[:]); return Stackguard() }
-func stack1772() (uintptr, uintptr) { var buf [1772]byte; use(buf[:]); return Stackguard() }
-func stack1776() (uintptr, uintptr) { var buf [1776]byte; use(buf[:]); return Stackguard() }
-func stack1780() (uintptr, uintptr) { var buf [1780]byte; use(buf[:]); return Stackguard() }
-func stack1784() (uintptr, uintptr) { var buf [1784]byte; use(buf[:]); return Stackguard() }
-func stack1788() (uintptr, uintptr) { var buf [1788]byte; use(buf[:]); return Stackguard() }
-func stack1792() (uintptr, uintptr) { var buf [1792]byte; use(buf[:]); return Stackguard() }
-func stack1796() (uintptr, uintptr) { var buf [1796]byte; use(buf[:]); return Stackguard() }
-func stack1800() (uintptr, uintptr) { var buf [1800]byte; use(buf[:]); return Stackguard() }
-func stack1804() (uintptr, uintptr) { var buf [1804]byte; use(buf[:]); return Stackguard() }
-func stack1808() (uintptr, uintptr) { var buf [1808]byte; use(buf[:]); return Stackguard() }
-func stack1812() (uintptr, uintptr) { var buf [1812]byte; use(buf[:]); return Stackguard() }
-func stack1816() (uintptr, uintptr) { var buf [1816]byte; use(buf[:]); return Stackguard() }
-func stack1820() (uintptr, uintptr) { var buf [1820]byte; use(buf[:]); return Stackguard() }
-func stack1824() (uintptr, uintptr) { var buf [1824]byte; use(buf[:]); return Stackguard() }
-func stack1828() (uintptr, uintptr) { var buf [1828]byte; use(buf[:]); return Stackguard() }
-func stack1832() (uintptr, uintptr) { var buf [1832]byte; use(buf[:]); return Stackguard() }
-func stack1836() (uintptr, uintptr) { var buf [1836]byte; use(buf[:]); return Stackguard() }
-func stack1840() (uintptr, uintptr) { var buf [1840]byte; use(buf[:]); return Stackguard() }
-func stack1844() (uintptr, uintptr) { var buf [1844]byte; use(buf[:]); return Stackguard() }
-func stack1848() (uintptr, uintptr) { var buf [1848]byte; use(buf[:]); return Stackguard() }
-func stack1852() (uintptr, uintptr) { var buf [1852]byte; use(buf[:]); return Stackguard() }
-func stack1856() (uintptr, uintptr) { var buf [1856]byte; use(buf[:]); return Stackguard() }
-func stack1860() (uintptr, uintptr) { var buf [1860]byte; use(buf[:]); return Stackguard() }
-func stack1864() (uintptr, uintptr) { var buf [1864]byte; use(buf[:]); return Stackguard() }
-func stack1868() (uintptr, uintptr) { var buf [1868]byte; use(buf[:]); return Stackguard() }
-func stack1872() (uintptr, uintptr) { var buf [1872]byte; use(buf[:]); return Stackguard() }
-func stack1876() (uintptr, uintptr) { var buf [1876]byte; use(buf[:]); return Stackguard() }
-func stack1880() (uintptr, uintptr) { var buf [1880]byte; use(buf[:]); return Stackguard() }
-func stack1884() (uintptr, uintptr) { var buf [1884]byte; use(buf[:]); return Stackguard() }
-func stack1888() (uintptr, uintptr) { var buf [1888]byte; use(buf[:]); return Stackguard() }
-func stack1892() (uintptr, uintptr) { var buf [1892]byte; use(buf[:]); return Stackguard() }
-func stack1896() (uintptr, uintptr) { var buf [1896]byte; use(buf[:]); return Stackguard() }
-func stack1900() (uintptr, uintptr) { var buf [1900]byte; use(buf[:]); return Stackguard() }
-func stack1904() (uintptr, uintptr) { var buf [1904]byte; use(buf[:]); return Stackguard() }
-func stack1908() (uintptr, uintptr) { var buf [1908]byte; use(buf[:]); return Stackguard() }
-func stack1912() (uintptr, uintptr) { var buf [1912]byte; use(buf[:]); return Stackguard() }
-func stack1916() (uintptr, uintptr) { var buf [1916]byte; use(buf[:]); return Stackguard() }
-func stack1920() (uintptr, uintptr) { var buf [1920]byte; use(buf[:]); return Stackguard() }
-func stack1924() (uintptr, uintptr) { var buf [1924]byte; use(buf[:]); return Stackguard() }
-func stack1928() (uintptr, uintptr) { var buf [1928]byte; use(buf[:]); return Stackguard() }
-func stack1932() (uintptr, uintptr) { var buf [1932]byte; use(buf[:]); return Stackguard() }
-func stack1936() (uintptr, uintptr) { var buf [1936]byte; use(buf[:]); return Stackguard() }
-func stack1940() (uintptr, uintptr) { var buf [1940]byte; use(buf[:]); return Stackguard() }
-func stack1944() (uintptr, uintptr) { var buf [1944]byte; use(buf[:]); return Stackguard() }
-func stack1948() (uintptr, uintptr) { var buf [1948]byte; use(buf[:]); return Stackguard() }
-func stack1952() (uintptr, uintptr) { var buf [1952]byte; use(buf[:]); return Stackguard() }
-func stack1956() (uintptr, uintptr) { var buf [1956]byte; use(buf[:]); return Stackguard() }
-func stack1960() (uintptr, uintptr) { var buf [1960]byte; use(buf[:]); return Stackguard() }
-func stack1964() (uintptr, uintptr) { var buf [1964]byte; use(buf[:]); return Stackguard() }
-func stack1968() (uintptr, uintptr) { var buf [1968]byte; use(buf[:]); return Stackguard() }
-func stack1972() (uintptr, uintptr) { var buf [1972]byte; use(buf[:]); return Stackguard() }
-func stack1976() (uintptr, uintptr) { var buf [1976]byte; use(buf[:]); return Stackguard() }
-func stack1980() (uintptr, uintptr) { var buf [1980]byte; use(buf[:]); return Stackguard() }
-func stack1984() (uintptr, uintptr) { var buf [1984]byte; use(buf[:]); return Stackguard() }
-func stack1988() (uintptr, uintptr) { var buf [1988]byte; use(buf[:]); return Stackguard() }
-func stack1992() (uintptr, uintptr) { var buf [1992]byte; use(buf[:]); return Stackguard() }
-func stack1996() (uintptr, uintptr) { var buf [1996]byte; use(buf[:]); return Stackguard() }
-func stack2000() (uintptr, uintptr) { var buf [2000]byte; use(buf[:]); return Stackguard() }
-func stack2004() (uintptr, uintptr) { var buf [2004]byte; use(buf[:]); return Stackguard() }
-func stack2008() (uintptr, uintptr) { var buf [2008]byte; use(buf[:]); return Stackguard() }
-func stack2012() (uintptr, uintptr) { var buf [2012]byte; use(buf[:]); return Stackguard() }
-func stack2016() (uintptr, uintptr) { var buf [2016]byte; use(buf[:]); return Stackguard() }
-func stack2020() (uintptr, uintptr) { var buf [2020]byte; use(buf[:]); return Stackguard() }
-func stack2024() (uintptr, uintptr) { var buf [2024]byte; use(buf[:]); return Stackguard() }
-func stack2028() (uintptr, uintptr) { var buf [2028]byte; use(buf[:]); return Stackguard() }
-func stack2032() (uintptr, uintptr) { var buf [2032]byte; use(buf[:]); return Stackguard() }
-func stack2036() (uintptr, uintptr) { var buf [2036]byte; use(buf[:]); return Stackguard() }
-func stack2040() (uintptr, uintptr) { var buf [2040]byte; use(buf[:]); return Stackguard() }
-func stack2044() (uintptr, uintptr) { var buf [2044]byte; use(buf[:]); return Stackguard() }
-func stack2048() (uintptr, uintptr) { var buf [2048]byte; use(buf[:]); return Stackguard() }
-func stack2052() (uintptr, uintptr) { var buf [2052]byte; use(buf[:]); return Stackguard() }
-func stack2056() (uintptr, uintptr) { var buf [2056]byte; use(buf[:]); return Stackguard() }
-func stack2060() (uintptr, uintptr) { var buf [2060]byte; use(buf[:]); return Stackguard() }
-func stack2064() (uintptr, uintptr) { var buf [2064]byte; use(buf[:]); return Stackguard() }
-func stack2068() (uintptr, uintptr) { var buf [2068]byte; use(buf[:]); return Stackguard() }
-func stack2072() (uintptr, uintptr) { var buf [2072]byte; use(buf[:]); return Stackguard() }
-func stack2076() (uintptr, uintptr) { var buf [2076]byte; use(buf[:]); return Stackguard() }
-func stack2080() (uintptr, uintptr) { var buf [2080]byte; use(buf[:]); return Stackguard() }
-func stack2084() (uintptr, uintptr) { var buf [2084]byte; use(buf[:]); return Stackguard() }
-func stack2088() (uintptr, uintptr) { var buf [2088]byte; use(buf[:]); return Stackguard() }
-func stack2092() (uintptr, uintptr) { var buf [2092]byte; use(buf[:]); return Stackguard() }
-func stack2096() (uintptr, uintptr) { var buf [2096]byte; use(buf[:]); return Stackguard() }
-func stack2100() (uintptr, uintptr) { var buf [2100]byte; use(buf[:]); return Stackguard() }
-func stack2104() (uintptr, uintptr) { var buf [2104]byte; use(buf[:]); return Stackguard() }
-func stack2108() (uintptr, uintptr) { var buf [2108]byte; use(buf[:]); return Stackguard() }
-func stack2112() (uintptr, uintptr) { var buf [2112]byte; use(buf[:]); return Stackguard() }
-func stack2116() (uintptr, uintptr) { var buf [2116]byte; use(buf[:]); return Stackguard() }
-func stack2120() (uintptr, uintptr) { var buf [2120]byte; use(buf[:]); return Stackguard() }
-func stack2124() (uintptr, uintptr) { var buf [2124]byte; use(buf[:]); return Stackguard() }
-func stack2128() (uintptr, uintptr) { var buf [2128]byte; use(buf[:]); return Stackguard() }
-func stack2132() (uintptr, uintptr) { var buf [2132]byte; use(buf[:]); return Stackguard() }
-func stack2136() (uintptr, uintptr) { var buf [2136]byte; use(buf[:]); return Stackguard() }
-func stack2140() (uintptr, uintptr) { var buf [2140]byte; use(buf[:]); return Stackguard() }
-func stack2144() (uintptr, uintptr) { var buf [2144]byte; use(buf[:]); return Stackguard() }
-func stack2148() (uintptr, uintptr) { var buf [2148]byte; use(buf[:]); return Stackguard() }
-func stack2152() (uintptr, uintptr) { var buf [2152]byte; use(buf[:]); return Stackguard() }
-func stack2156() (uintptr, uintptr) { var buf [2156]byte; use(buf[:]); return Stackguard() }
-func stack2160() (uintptr, uintptr) { var buf [2160]byte; use(buf[:]); return Stackguard() }
-func stack2164() (uintptr, uintptr) { var buf [2164]byte; use(buf[:]); return Stackguard() }
-func stack2168() (uintptr, uintptr) { var buf [2168]byte; use(buf[:]); return Stackguard() }
-func stack2172() (uintptr, uintptr) { var buf [2172]byte; use(buf[:]); return Stackguard() }
-func stack2176() (uintptr, uintptr) { var buf [2176]byte; use(buf[:]); return Stackguard() }
-func stack2180() (uintptr, uintptr) { var buf [2180]byte; use(buf[:]); return Stackguard() }
-func stack2184() (uintptr, uintptr) { var buf [2184]byte; use(buf[:]); return Stackguard() }
-func stack2188() (uintptr, uintptr) { var buf [2188]byte; use(buf[:]); return Stackguard() }
-func stack2192() (uintptr, uintptr) { var buf [2192]byte; use(buf[:]); return Stackguard() }
-func stack2196() (uintptr, uintptr) { var buf [2196]byte; use(buf[:]); return Stackguard() }
-func stack2200() (uintptr, uintptr) { var buf [2200]byte; use(buf[:]); return Stackguard() }
-func stack2204() (uintptr, uintptr) { var buf [2204]byte; use(buf[:]); return Stackguard() }
-func stack2208() (uintptr, uintptr) { var buf [2208]byte; use(buf[:]); return Stackguard() }
-func stack2212() (uintptr, uintptr) { var buf [2212]byte; use(buf[:]); return Stackguard() }
-func stack2216() (uintptr, uintptr) { var buf [2216]byte; use(buf[:]); return Stackguard() }
-func stack2220() (uintptr, uintptr) { var buf [2220]byte; use(buf[:]); return Stackguard() }
-func stack2224() (uintptr, uintptr) { var buf [2224]byte; use(buf[:]); return Stackguard() }
-func stack2228() (uintptr, uintptr) { var buf [2228]byte; use(buf[:]); return Stackguard() }
-func stack2232() (uintptr, uintptr) { var buf [2232]byte; use(buf[:]); return Stackguard() }
-func stack2236() (uintptr, uintptr) { var buf [2236]byte; use(buf[:]); return Stackguard() }
-func stack2240() (uintptr, uintptr) { var buf [2240]byte; use(buf[:]); return Stackguard() }
-func stack2244() (uintptr, uintptr) { var buf [2244]byte; use(buf[:]); return Stackguard() }
-func stack2248() (uintptr, uintptr) { var buf [2248]byte; use(buf[:]); return Stackguard() }
-func stack2252() (uintptr, uintptr) { var buf [2252]byte; use(buf[:]); return Stackguard() }
-func stack2256() (uintptr, uintptr) { var buf [2256]byte; use(buf[:]); return Stackguard() }
-func stack2260() (uintptr, uintptr) { var buf [2260]byte; use(buf[:]); return Stackguard() }
-func stack2264() (uintptr, uintptr) { var buf [2264]byte; use(buf[:]); return Stackguard() }
-func stack2268() (uintptr, uintptr) { var buf [2268]byte; use(buf[:]); return Stackguard() }
-func stack2272() (uintptr, uintptr) { var buf [2272]byte; use(buf[:]); return Stackguard() }
-func stack2276() (uintptr, uintptr) { var buf [2276]byte; use(buf[:]); return Stackguard() }
-func stack2280() (uintptr, uintptr) { var buf [2280]byte; use(buf[:]); return Stackguard() }
-func stack2284() (uintptr, uintptr) { var buf [2284]byte; use(buf[:]); return Stackguard() }
-func stack2288() (uintptr, uintptr) { var buf [2288]byte; use(buf[:]); return Stackguard() }
-func stack2292() (uintptr, uintptr) { var buf [2292]byte; use(buf[:]); return Stackguard() }
-func stack2296() (uintptr, uintptr) { var buf [2296]byte; use(buf[:]); return Stackguard() }
-func stack2300() (uintptr, uintptr) { var buf [2300]byte; use(buf[:]); return Stackguard() }
-func stack2304() (uintptr, uintptr) { var buf [2304]byte; use(buf[:]); return Stackguard() }
-func stack2308() (uintptr, uintptr) { var buf [2308]byte; use(buf[:]); return Stackguard() }
-func stack2312() (uintptr, uintptr) { var buf [2312]byte; use(buf[:]); return Stackguard() }
-func stack2316() (uintptr, uintptr) { var buf [2316]byte; use(buf[:]); return Stackguard() }
-func stack2320() (uintptr, uintptr) { var buf [2320]byte; use(buf[:]); return Stackguard() }
-func stack2324() (uintptr, uintptr) { var buf [2324]byte; use(buf[:]); return Stackguard() }
-func stack2328() (uintptr, uintptr) { var buf [2328]byte; use(buf[:]); return Stackguard() }
-func stack2332() (uintptr, uintptr) { var buf [2332]byte; use(buf[:]); return Stackguard() }
-func stack2336() (uintptr, uintptr) { var buf [2336]byte; use(buf[:]); return Stackguard() }
-func stack2340() (uintptr, uintptr) { var buf [2340]byte; use(buf[:]); return Stackguard() }
-func stack2344() (uintptr, uintptr) { var buf [2344]byte; use(buf[:]); return Stackguard() }
-func stack2348() (uintptr, uintptr) { var buf [2348]byte; use(buf[:]); return Stackguard() }
-func stack2352() (uintptr, uintptr) { var buf [2352]byte; use(buf[:]); return Stackguard() }
-func stack2356() (uintptr, uintptr) { var buf [2356]byte; use(buf[:]); return Stackguard() }
-func stack2360() (uintptr, uintptr) { var buf [2360]byte; use(buf[:]); return Stackguard() }
-func stack2364() (uintptr, uintptr) { var buf [2364]byte; use(buf[:]); return Stackguard() }
-func stack2368() (uintptr, uintptr) { var buf [2368]byte; use(buf[:]); return Stackguard() }
-func stack2372() (uintptr, uintptr) { var buf [2372]byte; use(buf[:]); return Stackguard() }
-func stack2376() (uintptr, uintptr) { var buf [2376]byte; use(buf[:]); return Stackguard() }
-func stack2380() (uintptr, uintptr) { var buf [2380]byte; use(buf[:]); return Stackguard() }
-func stack2384() (uintptr, uintptr) { var buf [2384]byte; use(buf[:]); return Stackguard() }
-func stack2388() (uintptr, uintptr) { var buf [2388]byte; use(buf[:]); return Stackguard() }
-func stack2392() (uintptr, uintptr) { var buf [2392]byte; use(buf[:]); return Stackguard() }
-func stack2396() (uintptr, uintptr) { var buf [2396]byte; use(buf[:]); return Stackguard() }
-func stack2400() (uintptr, uintptr) { var buf [2400]byte; use(buf[:]); return Stackguard() }
-func stack2404() (uintptr, uintptr) { var buf [2404]byte; use(buf[:]); return Stackguard() }
-func stack2408() (uintptr, uintptr) { var buf [2408]byte; use(buf[:]); return Stackguard() }
-func stack2412() (uintptr, uintptr) { var buf [2412]byte; use(buf[:]); return Stackguard() }
-func stack2416() (uintptr, uintptr) { var buf [2416]byte; use(buf[:]); return Stackguard() }
-func stack2420() (uintptr, uintptr) { var buf [2420]byte; use(buf[:]); return Stackguard() }
-func stack2424() (uintptr, uintptr) { var buf [2424]byte; use(buf[:]); return Stackguard() }
-func stack2428() (uintptr, uintptr) { var buf [2428]byte; use(buf[:]); return Stackguard() }
-func stack2432() (uintptr, uintptr) { var buf [2432]byte; use(buf[:]); return Stackguard() }
-func stack2436() (uintptr, uintptr) { var buf [2436]byte; use(buf[:]); return Stackguard() }
-func stack2440() (uintptr, uintptr) { var buf [2440]byte; use(buf[:]); return Stackguard() }
-func stack2444() (uintptr, uintptr) { var buf [2444]byte; use(buf[:]); return Stackguard() }
-func stack2448() (uintptr, uintptr) { var buf [2448]byte; use(buf[:]); return Stackguard() }
-func stack2452() (uintptr, uintptr) { var buf [2452]byte; use(buf[:]); return Stackguard() }
-func stack2456() (uintptr, uintptr) { var buf [2456]byte; use(buf[:]); return Stackguard() }
-func stack2460() (uintptr, uintptr) { var buf [2460]byte; use(buf[:]); return Stackguard() }
-func stack2464() (uintptr, uintptr) { var buf [2464]byte; use(buf[:]); return Stackguard() }
-func stack2468() (uintptr, uintptr) { var buf [2468]byte; use(buf[:]); return Stackguard() }
-func stack2472() (uintptr, uintptr) { var buf [2472]byte; use(buf[:]); return Stackguard() }
-func stack2476() (uintptr, uintptr) { var buf [2476]byte; use(buf[:]); return Stackguard() }
-func stack2480() (uintptr, uintptr) { var buf [2480]byte; use(buf[:]); return Stackguard() }
-func stack2484() (uintptr, uintptr) { var buf [2484]byte; use(buf[:]); return Stackguard() }
-func stack2488() (uintptr, uintptr) { var buf [2488]byte; use(buf[:]); return Stackguard() }
-func stack2492() (uintptr, uintptr) { var buf [2492]byte; use(buf[:]); return Stackguard() }
-func stack2496() (uintptr, uintptr) { var buf [2496]byte; use(buf[:]); return Stackguard() }
-func stack2500() (uintptr, uintptr) { var buf [2500]byte; use(buf[:]); return Stackguard() }
-func stack2504() (uintptr, uintptr) { var buf [2504]byte; use(buf[:]); return Stackguard() }
-func stack2508() (uintptr, uintptr) { var buf [2508]byte; use(buf[:]); return Stackguard() }
-func stack2512() (uintptr, uintptr) { var buf [2512]byte; use(buf[:]); return Stackguard() }
-func stack2516() (uintptr, uintptr) { var buf [2516]byte; use(buf[:]); return Stackguard() }
-func stack2520() (uintptr, uintptr) { var buf [2520]byte; use(buf[:]); return Stackguard() }
-func stack2524() (uintptr, uintptr) { var buf [2524]byte; use(buf[:]); return Stackguard() }
-func stack2528() (uintptr, uintptr) { var buf [2528]byte; use(buf[:]); return Stackguard() }
-func stack2532() (uintptr, uintptr) { var buf [2532]byte; use(buf[:]); return Stackguard() }
-func stack2536() (uintptr, uintptr) { var buf [2536]byte; use(buf[:]); return Stackguard() }
-func stack2540() (uintptr, uintptr) { var buf [2540]byte; use(buf[:]); return Stackguard() }
-func stack2544() (uintptr, uintptr) { var buf [2544]byte; use(buf[:]); return Stackguard() }
-func stack2548() (uintptr, uintptr) { var buf [2548]byte; use(buf[:]); return Stackguard() }
-func stack2552() (uintptr, uintptr) { var buf [2552]byte; use(buf[:]); return Stackguard() }
-func stack2556() (uintptr, uintptr) { var buf [2556]byte; use(buf[:]); return Stackguard() }
-func stack2560() (uintptr, uintptr) { var buf [2560]byte; use(buf[:]); return Stackguard() }
-func stack2564() (uintptr, uintptr) { var buf [2564]byte; use(buf[:]); return Stackguard() }
-func stack2568() (uintptr, uintptr) { var buf [2568]byte; use(buf[:]); return Stackguard() }
-func stack2572() (uintptr, uintptr) { var buf [2572]byte; use(buf[:]); return Stackguard() }
-func stack2576() (uintptr, uintptr) { var buf [2576]byte; use(buf[:]); return Stackguard() }
-func stack2580() (uintptr, uintptr) { var buf [2580]byte; use(buf[:]); return Stackguard() }
-func stack2584() (uintptr, uintptr) { var buf [2584]byte; use(buf[:]); return Stackguard() }
-func stack2588() (uintptr, uintptr) { var buf [2588]byte; use(buf[:]); return Stackguard() }
-func stack2592() (uintptr, uintptr) { var buf [2592]byte; use(buf[:]); return Stackguard() }
-func stack2596() (uintptr, uintptr) { var buf [2596]byte; use(buf[:]); return Stackguard() }
-func stack2600() (uintptr, uintptr) { var buf [2600]byte; use(buf[:]); return Stackguard() }
-func stack2604() (uintptr, uintptr) { var buf [2604]byte; use(buf[:]); return Stackguard() }
-func stack2608() (uintptr, uintptr) { var buf [2608]byte; use(buf[:]); return Stackguard() }
-func stack2612() (uintptr, uintptr) { var buf [2612]byte; use(buf[:]); return Stackguard() }
-func stack2616() (uintptr, uintptr) { var buf [2616]byte; use(buf[:]); return Stackguard() }
-func stack2620() (uintptr, uintptr) { var buf [2620]byte; use(buf[:]); return Stackguard() }
-func stack2624() (uintptr, uintptr) { var buf [2624]byte; use(buf[:]); return Stackguard() }
-func stack2628() (uintptr, uintptr) { var buf [2628]byte; use(buf[:]); return Stackguard() }
-func stack2632() (uintptr, uintptr) { var buf [2632]byte; use(buf[:]); return Stackguard() }
-func stack2636() (uintptr, uintptr) { var buf [2636]byte; use(buf[:]); return Stackguard() }
-func stack2640() (uintptr, uintptr) { var buf [2640]byte; use(buf[:]); return Stackguard() }
-func stack2644() (uintptr, uintptr) { var buf [2644]byte; use(buf[:]); return Stackguard() }
-func stack2648() (uintptr, uintptr) { var buf [2648]byte; use(buf[:]); return Stackguard() }
-func stack2652() (uintptr, uintptr) { var buf [2652]byte; use(buf[:]); return Stackguard() }
-func stack2656() (uintptr, uintptr) { var buf [2656]byte; use(buf[:]); return Stackguard() }
-func stack2660() (uintptr, uintptr) { var buf [2660]byte; use(buf[:]); return Stackguard() }
-func stack2664() (uintptr, uintptr) { var buf [2664]byte; use(buf[:]); return Stackguard() }
-func stack2668() (uintptr, uintptr) { var buf [2668]byte; use(buf[:]); return Stackguard() }
-func stack2672() (uintptr, uintptr) { var buf [2672]byte; use(buf[:]); return Stackguard() }
-func stack2676() (uintptr, uintptr) { var buf [2676]byte; use(buf[:]); return Stackguard() }
-func stack2680() (uintptr, uintptr) { var buf [2680]byte; use(buf[:]); return Stackguard() }
-func stack2684() (uintptr, uintptr) { var buf [2684]byte; use(buf[:]); return Stackguard() }
-func stack2688() (uintptr, uintptr) { var buf [2688]byte; use(buf[:]); return Stackguard() }
-func stack2692() (uintptr, uintptr) { var buf [2692]byte; use(buf[:]); return Stackguard() }
-func stack2696() (uintptr, uintptr) { var buf [2696]byte; use(buf[:]); return Stackguard() }
-func stack2700() (uintptr, uintptr) { var buf [2700]byte; use(buf[:]); return Stackguard() }
-func stack2704() (uintptr, uintptr) { var buf [2704]byte; use(buf[:]); return Stackguard() }
-func stack2708() (uintptr, uintptr) { var buf [2708]byte; use(buf[:]); return Stackguard() }
-func stack2712() (uintptr, uintptr) { var buf [2712]byte; use(buf[:]); return Stackguard() }
-func stack2716() (uintptr, uintptr) { var buf [2716]byte; use(buf[:]); return Stackguard() }
-func stack2720() (uintptr, uintptr) { var buf [2720]byte; use(buf[:]); return Stackguard() }
-func stack2724() (uintptr, uintptr) { var buf [2724]byte; use(buf[:]); return Stackguard() }
-func stack2728() (uintptr, uintptr) { var buf [2728]byte; use(buf[:]); return Stackguard() }
-func stack2732() (uintptr, uintptr) { var buf [2732]byte; use(buf[:]); return Stackguard() }
-func stack2736() (uintptr, uintptr) { var buf [2736]byte; use(buf[:]); return Stackguard() }
-func stack2740() (uintptr, uintptr) { var buf [2740]byte; use(buf[:]); return Stackguard() }
-func stack2744() (uintptr, uintptr) { var buf [2744]byte; use(buf[:]); return Stackguard() }
-func stack2748() (uintptr, uintptr) { var buf [2748]byte; use(buf[:]); return Stackguard() }
-func stack2752() (uintptr, uintptr) { var buf [2752]byte; use(buf[:]); return Stackguard() }
-func stack2756() (uintptr, uintptr) { var buf [2756]byte; use(buf[:]); return Stackguard() }
-func stack2760() (uintptr, uintptr) { var buf [2760]byte; use(buf[:]); return Stackguard() }
-func stack2764() (uintptr, uintptr) { var buf [2764]byte; use(buf[:]); return Stackguard() }
-func stack2768() (uintptr, uintptr) { var buf [2768]byte; use(buf[:]); return Stackguard() }
-func stack2772() (uintptr, uintptr) { var buf [2772]byte; use(buf[:]); return Stackguard() }
-func stack2776() (uintptr, uintptr) { var buf [2776]byte; use(buf[:]); return Stackguard() }
-func stack2780() (uintptr, uintptr) { var buf [2780]byte; use(buf[:]); return Stackguard() }
-func stack2784() (uintptr, uintptr) { var buf [2784]byte; use(buf[:]); return Stackguard() }
-func stack2788() (uintptr, uintptr) { var buf [2788]byte; use(buf[:]); return Stackguard() }
-func stack2792() (uintptr, uintptr) { var buf [2792]byte; use(buf[:]); return Stackguard() }
-func stack2796() (uintptr, uintptr) { var buf [2796]byte; use(buf[:]); return Stackguard() }
-func stack2800() (uintptr, uintptr) { var buf [2800]byte; use(buf[:]); return Stackguard() }
-func stack2804() (uintptr, uintptr) { var buf [2804]byte; use(buf[:]); return Stackguard() }
-func stack2808() (uintptr, uintptr) { var buf [2808]byte; use(buf[:]); return Stackguard() }
-func stack2812() (uintptr, uintptr) { var buf [2812]byte; use(buf[:]); return Stackguard() }
-func stack2816() (uintptr, uintptr) { var buf [2816]byte; use(buf[:]); return Stackguard() }
-func stack2820() (uintptr, uintptr) { var buf [2820]byte; use(buf[:]); return Stackguard() }
-func stack2824() (uintptr, uintptr) { var buf [2824]byte; use(buf[:]); return Stackguard() }
-func stack2828() (uintptr, uintptr) { var buf [2828]byte; use(buf[:]); return Stackguard() }
-func stack2832() (uintptr, uintptr) { var buf [2832]byte; use(buf[:]); return Stackguard() }
-func stack2836() (uintptr, uintptr) { var buf [2836]byte; use(buf[:]); return Stackguard() }
-func stack2840() (uintptr, uintptr) { var buf [2840]byte; use(buf[:]); return Stackguard() }
-func stack2844() (uintptr, uintptr) { var buf [2844]byte; use(buf[:]); return Stackguard() }
-func stack2848() (uintptr, uintptr) { var buf [2848]byte; use(buf[:]); return Stackguard() }
-func stack2852() (uintptr, uintptr) { var buf [2852]byte; use(buf[:]); return Stackguard() }
-func stack2856() (uintptr, uintptr) { var buf [2856]byte; use(buf[:]); return Stackguard() }
-func stack2860() (uintptr, uintptr) { var buf [2860]byte; use(buf[:]); return Stackguard() }
-func stack2864() (uintptr, uintptr) { var buf [2864]byte; use(buf[:]); return Stackguard() }
-func stack2868() (uintptr, uintptr) { var buf [2868]byte; use(buf[:]); return Stackguard() }
-func stack2872() (uintptr, uintptr) { var buf [2872]byte; use(buf[:]); return Stackguard() }
-func stack2876() (uintptr, uintptr) { var buf [2876]byte; use(buf[:]); return Stackguard() }
-func stack2880() (uintptr, uintptr) { var buf [2880]byte; use(buf[:]); return Stackguard() }
-func stack2884() (uintptr, uintptr) { var buf [2884]byte; use(buf[:]); return Stackguard() }
-func stack2888() (uintptr, uintptr) { var buf [2888]byte; use(buf[:]); return Stackguard() }
-func stack2892() (uintptr, uintptr) { var buf [2892]byte; use(buf[:]); return Stackguard() }
-func stack2896() (uintptr, uintptr) { var buf [2896]byte; use(buf[:]); return Stackguard() }
-func stack2900() (uintptr, uintptr) { var buf [2900]byte; use(buf[:]); return Stackguard() }
-func stack2904() (uintptr, uintptr) { var buf [2904]byte; use(buf[:]); return Stackguard() }
-func stack2908() (uintptr, uintptr) { var buf [2908]byte; use(buf[:]); return Stackguard() }
-func stack2912() (uintptr, uintptr) { var buf [2912]byte; use(buf[:]); return Stackguard() }
-func stack2916() (uintptr, uintptr) { var buf [2916]byte; use(buf[:]); return Stackguard() }
-func stack2920() (uintptr, uintptr) { var buf [2920]byte; use(buf[:]); return Stackguard() }
-func stack2924() (uintptr, uintptr) { var buf [2924]byte; use(buf[:]); return Stackguard() }
-func stack2928() (uintptr, uintptr) { var buf [2928]byte; use(buf[:]); return Stackguard() }
-func stack2932() (uintptr, uintptr) { var buf [2932]byte; use(buf[:]); return Stackguard() }
-func stack2936() (uintptr, uintptr) { var buf [2936]byte; use(buf[:]); return Stackguard() }
-func stack2940() (uintptr, uintptr) { var buf [2940]byte; use(buf[:]); return Stackguard() }
-func stack2944() (uintptr, uintptr) { var buf [2944]byte; use(buf[:]); return Stackguard() }
-func stack2948() (uintptr, uintptr) { var buf [2948]byte; use(buf[:]); return Stackguard() }
-func stack2952() (uintptr, uintptr) { var buf [2952]byte; use(buf[:]); return Stackguard() }
-func stack2956() (uintptr, uintptr) { var buf [2956]byte; use(buf[:]); return Stackguard() }
-func stack2960() (uintptr, uintptr) { var buf [2960]byte; use(buf[:]); return Stackguard() }
-func stack2964() (uintptr, uintptr) { var buf [2964]byte; use(buf[:]); return Stackguard() }
-func stack2968() (uintptr, uintptr) { var buf [2968]byte; use(buf[:]); return Stackguard() }
-func stack2972() (uintptr, uintptr) { var buf [2972]byte; use(buf[:]); return Stackguard() }
-func stack2976() (uintptr, uintptr) { var buf [2976]byte; use(buf[:]); return Stackguard() }
-func stack2980() (uintptr, uintptr) { var buf [2980]byte; use(buf[:]); return Stackguard() }
-func stack2984() (uintptr, uintptr) { var buf [2984]byte; use(buf[:]); return Stackguard() }
-func stack2988() (uintptr, uintptr) { var buf [2988]byte; use(buf[:]); return Stackguard() }
-func stack2992() (uintptr, uintptr) { var buf [2992]byte; use(buf[:]); return Stackguard() }
-func stack2996() (uintptr, uintptr) { var buf [2996]byte; use(buf[:]); return Stackguard() }
-func stack3000() (uintptr, uintptr) { var buf [3000]byte; use(buf[:]); return Stackguard() }
-func stack3004() (uintptr, uintptr) { var buf [3004]byte; use(buf[:]); return Stackguard() }
-func stack3008() (uintptr, uintptr) { var buf [3008]byte; use(buf[:]); return Stackguard() }
-func stack3012() (uintptr, uintptr) { var buf [3012]byte; use(buf[:]); return Stackguard() }
-func stack3016() (uintptr, uintptr) { var buf [3016]byte; use(buf[:]); return Stackguard() }
-func stack3020() (uintptr, uintptr) { var buf [3020]byte; use(buf[:]); return Stackguard() }
-func stack3024() (uintptr, uintptr) { var buf [3024]byte; use(buf[:]); return Stackguard() }
-func stack3028() (uintptr, uintptr) { var buf [3028]byte; use(buf[:]); return Stackguard() }
-func stack3032() (uintptr, uintptr) { var buf [3032]byte; use(buf[:]); return Stackguard() }
-func stack3036() (uintptr, uintptr) { var buf [3036]byte; use(buf[:]); return Stackguard() }
-func stack3040() (uintptr, uintptr) { var buf [3040]byte; use(buf[:]); return Stackguard() }
-func stack3044() (uintptr, uintptr) { var buf [3044]byte; use(buf[:]); return Stackguard() }
-func stack3048() (uintptr, uintptr) { var buf [3048]byte; use(buf[:]); return Stackguard() }
-func stack3052() (uintptr, uintptr) { var buf [3052]byte; use(buf[:]); return Stackguard() }
-func stack3056() (uintptr, uintptr) { var buf [3056]byte; use(buf[:]); return Stackguard() }
-func stack3060() (uintptr, uintptr) { var buf [3060]byte; use(buf[:]); return Stackguard() }
-func stack3064() (uintptr, uintptr) { var buf [3064]byte; use(buf[:]); return Stackguard() }
-func stack3068() (uintptr, uintptr) { var buf [3068]byte; use(buf[:]); return Stackguard() }
-func stack3072() (uintptr, uintptr) { var buf [3072]byte; use(buf[:]); return Stackguard() }
-func stack3076() (uintptr, uintptr) { var buf [3076]byte; use(buf[:]); return Stackguard() }
-func stack3080() (uintptr, uintptr) { var buf [3080]byte; use(buf[:]); return Stackguard() }
-func stack3084() (uintptr, uintptr) { var buf [3084]byte; use(buf[:]); return Stackguard() }
-func stack3088() (uintptr, uintptr) { var buf [3088]byte; use(buf[:]); return Stackguard() }
-func stack3092() (uintptr, uintptr) { var buf [3092]byte; use(buf[:]); return Stackguard() }
-func stack3096() (uintptr, uintptr) { var buf [3096]byte; use(buf[:]); return Stackguard() }
-func stack3100() (uintptr, uintptr) { var buf [3100]byte; use(buf[:]); return Stackguard() }
-func stack3104() (uintptr, uintptr) { var buf [3104]byte; use(buf[:]); return Stackguard() }
-func stack3108() (uintptr, uintptr) { var buf [3108]byte; use(buf[:]); return Stackguard() }
-func stack3112() (uintptr, uintptr) { var buf [3112]byte; use(buf[:]); return Stackguard() }
-func stack3116() (uintptr, uintptr) { var buf [3116]byte; use(buf[:]); return Stackguard() }
-func stack3120() (uintptr, uintptr) { var buf [3120]byte; use(buf[:]); return Stackguard() }
-func stack3124() (uintptr, uintptr) { var buf [3124]byte; use(buf[:]); return Stackguard() }
-func stack3128() (uintptr, uintptr) { var buf [3128]byte; use(buf[:]); return Stackguard() }
-func stack3132() (uintptr, uintptr) { var buf [3132]byte; use(buf[:]); return Stackguard() }
-func stack3136() (uintptr, uintptr) { var buf [3136]byte; use(buf[:]); return Stackguard() }
-func stack3140() (uintptr, uintptr) { var buf [3140]byte; use(buf[:]); return Stackguard() }
-func stack3144() (uintptr, uintptr) { var buf [3144]byte; use(buf[:]); return Stackguard() }
-func stack3148() (uintptr, uintptr) { var buf [3148]byte; use(buf[:]); return Stackguard() }
-func stack3152() (uintptr, uintptr) { var buf [3152]byte; use(buf[:]); return Stackguard() }
-func stack3156() (uintptr, uintptr) { var buf [3156]byte; use(buf[:]); return Stackguard() }
-func stack3160() (uintptr, uintptr) { var buf [3160]byte; use(buf[:]); return Stackguard() }
-func stack3164() (uintptr, uintptr) { var buf [3164]byte; use(buf[:]); return Stackguard() }
-func stack3168() (uintptr, uintptr) { var buf [3168]byte; use(buf[:]); return Stackguard() }
-func stack3172() (uintptr, uintptr) { var buf [3172]byte; use(buf[:]); return Stackguard() }
-func stack3176() (uintptr, uintptr) { var buf [3176]byte; use(buf[:]); return Stackguard() }
-func stack3180() (uintptr, uintptr) { var buf [3180]byte; use(buf[:]); return Stackguard() }
-func stack3184() (uintptr, uintptr) { var buf [3184]byte; use(buf[:]); return Stackguard() }
-func stack3188() (uintptr, uintptr) { var buf [3188]byte; use(buf[:]); return Stackguard() }
-func stack3192() (uintptr, uintptr) { var buf [3192]byte; use(buf[:]); return Stackguard() }
-func stack3196() (uintptr, uintptr) { var buf [3196]byte; use(buf[:]); return Stackguard() }
-func stack3200() (uintptr, uintptr) { var buf [3200]byte; use(buf[:]); return Stackguard() }
-func stack3204() (uintptr, uintptr) { var buf [3204]byte; use(buf[:]); return Stackguard() }
-func stack3208() (uintptr, uintptr) { var buf [3208]byte; use(buf[:]); return Stackguard() }
-func stack3212() (uintptr, uintptr) { var buf [3212]byte; use(buf[:]); return Stackguard() }
-func stack3216() (uintptr, uintptr) { var buf [3216]byte; use(buf[:]); return Stackguard() }
-func stack3220() (uintptr, uintptr) { var buf [3220]byte; use(buf[:]); return Stackguard() }
-func stack3224() (uintptr, uintptr) { var buf [3224]byte; use(buf[:]); return Stackguard() }
-func stack3228() (uintptr, uintptr) { var buf [3228]byte; use(buf[:]); return Stackguard() }
-func stack3232() (uintptr, uintptr) { var buf [3232]byte; use(buf[:]); return Stackguard() }
-func stack3236() (uintptr, uintptr) { var buf [3236]byte; use(buf[:]); return Stackguard() }
-func stack3240() (uintptr, uintptr) { var buf [3240]byte; use(buf[:]); return Stackguard() }
-func stack3244() (uintptr, uintptr) { var buf [3244]byte; use(buf[:]); return Stackguard() }
-func stack3248() (uintptr, uintptr) { var buf [3248]byte; use(buf[:]); return Stackguard() }
-func stack3252() (uintptr, uintptr) { var buf [3252]byte; use(buf[:]); return Stackguard() }
-func stack3256() (uintptr, uintptr) { var buf [3256]byte; use(buf[:]); return Stackguard() }
-func stack3260() (uintptr, uintptr) { var buf [3260]byte; use(buf[:]); return Stackguard() }
-func stack3264() (uintptr, uintptr) { var buf [3264]byte; use(buf[:]); return Stackguard() }
-func stack3268() (uintptr, uintptr) { var buf [3268]byte; use(buf[:]); return Stackguard() }
-func stack3272() (uintptr, uintptr) { var buf [3272]byte; use(buf[:]); return Stackguard() }
-func stack3276() (uintptr, uintptr) { var buf [3276]byte; use(buf[:]); return Stackguard() }
-func stack3280() (uintptr, uintptr) { var buf [3280]byte; use(buf[:]); return Stackguard() }
-func stack3284() (uintptr, uintptr) { var buf [3284]byte; use(buf[:]); return Stackguard() }
-func stack3288() (uintptr, uintptr) { var buf [3288]byte; use(buf[:]); return Stackguard() }
-func stack3292() (uintptr, uintptr) { var buf [3292]byte; use(buf[:]); return Stackguard() }
-func stack3296() (uintptr, uintptr) { var buf [3296]byte; use(buf[:]); return Stackguard() }
-func stack3300() (uintptr, uintptr) { var buf [3300]byte; use(buf[:]); return Stackguard() }
-func stack3304() (uintptr, uintptr) { var buf [3304]byte; use(buf[:]); return Stackguard() }
-func stack3308() (uintptr, uintptr) { var buf [3308]byte; use(buf[:]); return Stackguard() }
-func stack3312() (uintptr, uintptr) { var buf [3312]byte; use(buf[:]); return Stackguard() }
-func stack3316() (uintptr, uintptr) { var buf [3316]byte; use(buf[:]); return Stackguard() }
-func stack3320() (uintptr, uintptr) { var buf [3320]byte; use(buf[:]); return Stackguard() }
-func stack3324() (uintptr, uintptr) { var buf [3324]byte; use(buf[:]); return Stackguard() }
-func stack3328() (uintptr, uintptr) { var buf [3328]byte; use(buf[:]); return Stackguard() }
-func stack3332() (uintptr, uintptr) { var buf [3332]byte; use(buf[:]); return Stackguard() }
-func stack3336() (uintptr, uintptr) { var buf [3336]byte; use(buf[:]); return Stackguard() }
-func stack3340() (uintptr, uintptr) { var buf [3340]byte; use(buf[:]); return Stackguard() }
-func stack3344() (uintptr, uintptr) { var buf [3344]byte; use(buf[:]); return Stackguard() }
-func stack3348() (uintptr, uintptr) { var buf [3348]byte; use(buf[:]); return Stackguard() }
-func stack3352() (uintptr, uintptr) { var buf [3352]byte; use(buf[:]); return Stackguard() }
-func stack3356() (uintptr, uintptr) { var buf [3356]byte; use(buf[:]); return Stackguard() }
-func stack3360() (uintptr, uintptr) { var buf [3360]byte; use(buf[:]); return Stackguard() }
-func stack3364() (uintptr, uintptr) { var buf [3364]byte; use(buf[:]); return Stackguard() }
-func stack3368() (uintptr, uintptr) { var buf [3368]byte; use(buf[:]); return Stackguard() }
-func stack3372() (uintptr, uintptr) { var buf [3372]byte; use(buf[:]); return Stackguard() }
-func stack3376() (uintptr, uintptr) { var buf [3376]byte; use(buf[:]); return Stackguard() }
-func stack3380() (uintptr, uintptr) { var buf [3380]byte; use(buf[:]); return Stackguard() }
-func stack3384() (uintptr, uintptr) { var buf [3384]byte; use(buf[:]); return Stackguard() }
-func stack3388() (uintptr, uintptr) { var buf [3388]byte; use(buf[:]); return Stackguard() }
-func stack3392() (uintptr, uintptr) { var buf [3392]byte; use(buf[:]); return Stackguard() }
-func stack3396() (uintptr, uintptr) { var buf [3396]byte; use(buf[:]); return Stackguard() }
-func stack3400() (uintptr, uintptr) { var buf [3400]byte; use(buf[:]); return Stackguard() }
-func stack3404() (uintptr, uintptr) { var buf [3404]byte; use(buf[:]); return Stackguard() }
-func stack3408() (uintptr, uintptr) { var buf [3408]byte; use(buf[:]); return Stackguard() }
-func stack3412() (uintptr, uintptr) { var buf [3412]byte; use(buf[:]); return Stackguard() }
-func stack3416() (uintptr, uintptr) { var buf [3416]byte; use(buf[:]); return Stackguard() }
-func stack3420() (uintptr, uintptr) { var buf [3420]byte; use(buf[:]); return Stackguard() }
-func stack3424() (uintptr, uintptr) { var buf [3424]byte; use(buf[:]); return Stackguard() }
-func stack3428() (uintptr, uintptr) { var buf [3428]byte; use(buf[:]); return Stackguard() }
-func stack3432() (uintptr, uintptr) { var buf [3432]byte; use(buf[:]); return Stackguard() }
-func stack3436() (uintptr, uintptr) { var buf [3436]byte; use(buf[:]); return Stackguard() }
-func stack3440() (uintptr, uintptr) { var buf [3440]byte; use(buf[:]); return Stackguard() }
-func stack3444() (uintptr, uintptr) { var buf [3444]byte; use(buf[:]); return Stackguard() }
-func stack3448() (uintptr, uintptr) { var buf [3448]byte; use(buf[:]); return Stackguard() }
-func stack3452() (uintptr, uintptr) { var buf [3452]byte; use(buf[:]); return Stackguard() }
-func stack3456() (uintptr, uintptr) { var buf [3456]byte; use(buf[:]); return Stackguard() }
-func stack3460() (uintptr, uintptr) { var buf [3460]byte; use(buf[:]); return Stackguard() }
-func stack3464() (uintptr, uintptr) { var buf [3464]byte; use(buf[:]); return Stackguard() }
-func stack3468() (uintptr, uintptr) { var buf [3468]byte; use(buf[:]); return Stackguard() }
-func stack3472() (uintptr, uintptr) { var buf [3472]byte; use(buf[:]); return Stackguard() }
-func stack3476() (uintptr, uintptr) { var buf [3476]byte; use(buf[:]); return Stackguard() }
-func stack3480() (uintptr, uintptr) { var buf [3480]byte; use(buf[:]); return Stackguard() }
-func stack3484() (uintptr, uintptr) { var buf [3484]byte; use(buf[:]); return Stackguard() }
-func stack3488() (uintptr, uintptr) { var buf [3488]byte; use(buf[:]); return Stackguard() }
-func stack3492() (uintptr, uintptr) { var buf [3492]byte; use(buf[:]); return Stackguard() }
-func stack3496() (uintptr, uintptr) { var buf [3496]byte; use(buf[:]); return Stackguard() }
-func stack3500() (uintptr, uintptr) { var buf [3500]byte; use(buf[:]); return Stackguard() }
-func stack3504() (uintptr, uintptr) { var buf [3504]byte; use(buf[:]); return Stackguard() }
-func stack3508() (uintptr, uintptr) { var buf [3508]byte; use(buf[:]); return Stackguard() }
-func stack3512() (uintptr, uintptr) { var buf [3512]byte; use(buf[:]); return Stackguard() }
-func stack3516() (uintptr, uintptr) { var buf [3516]byte; use(buf[:]); return Stackguard() }
-func stack3520() (uintptr, uintptr) { var buf [3520]byte; use(buf[:]); return Stackguard() }
-func stack3524() (uintptr, uintptr) { var buf [3524]byte; use(buf[:]); return Stackguard() }
-func stack3528() (uintptr, uintptr) { var buf [3528]byte; use(buf[:]); return Stackguard() }
-func stack3532() (uintptr, uintptr) { var buf [3532]byte; use(buf[:]); return Stackguard() }
-func stack3536() (uintptr, uintptr) { var buf [3536]byte; use(buf[:]); return Stackguard() }
-func stack3540() (uintptr, uintptr) { var buf [3540]byte; use(buf[:]); return Stackguard() }
-func stack3544() (uintptr, uintptr) { var buf [3544]byte; use(buf[:]); return Stackguard() }
-func stack3548() (uintptr, uintptr) { var buf [3548]byte; use(buf[:]); return Stackguard() }
-func stack3552() (uintptr, uintptr) { var buf [3552]byte; use(buf[:]); return Stackguard() }
-func stack3556() (uintptr, uintptr) { var buf [3556]byte; use(buf[:]); return Stackguard() }
-func stack3560() (uintptr, uintptr) { var buf [3560]byte; use(buf[:]); return Stackguard() }
-func stack3564() (uintptr, uintptr) { var buf [3564]byte; use(buf[:]); return Stackguard() }
-func stack3568() (uintptr, uintptr) { var buf [3568]byte; use(buf[:]); return Stackguard() }
-func stack3572() (uintptr, uintptr) { var buf [3572]byte; use(buf[:]); return Stackguard() }
-func stack3576() (uintptr, uintptr) { var buf [3576]byte; use(buf[:]); return Stackguard() }
-func stack3580() (uintptr, uintptr) { var buf [3580]byte; use(buf[:]); return Stackguard() }
-func stack3584() (uintptr, uintptr) { var buf [3584]byte; use(buf[:]); return Stackguard() }
-func stack3588() (uintptr, uintptr) { var buf [3588]byte; use(buf[:]); return Stackguard() }
-func stack3592() (uintptr, uintptr) { var buf [3592]byte; use(buf[:]); return Stackguard() }
-func stack3596() (uintptr, uintptr) { var buf [3596]byte; use(buf[:]); return Stackguard() }
-func stack3600() (uintptr, uintptr) { var buf [3600]byte; use(buf[:]); return Stackguard() }
-func stack3604() (uintptr, uintptr) { var buf [3604]byte; use(buf[:]); return Stackguard() }
-func stack3608() (uintptr, uintptr) { var buf [3608]byte; use(buf[:]); return Stackguard() }
-func stack3612() (uintptr, uintptr) { var buf [3612]byte; use(buf[:]); return Stackguard() }
-func stack3616() (uintptr, uintptr) { var buf [3616]byte; use(buf[:]); return Stackguard() }
-func stack3620() (uintptr, uintptr) { var buf [3620]byte; use(buf[:]); return Stackguard() }
-func stack3624() (uintptr, uintptr) { var buf [3624]byte; use(buf[:]); return Stackguard() }
-func stack3628() (uintptr, uintptr) { var buf [3628]byte; use(buf[:]); return Stackguard() }
-func stack3632() (uintptr, uintptr) { var buf [3632]byte; use(buf[:]); return Stackguard() }
-func stack3636() (uintptr, uintptr) { var buf [3636]byte; use(buf[:]); return Stackguard() }
-func stack3640() (uintptr, uintptr) { var buf [3640]byte; use(buf[:]); return Stackguard() }
-func stack3644() (uintptr, uintptr) { var buf [3644]byte; use(buf[:]); return Stackguard() }
-func stack3648() (uintptr, uintptr) { var buf [3648]byte; use(buf[:]); return Stackguard() }
-func stack3652() (uintptr, uintptr) { var buf [3652]byte; use(buf[:]); return Stackguard() }
-func stack3656() (uintptr, uintptr) { var buf [3656]byte; use(buf[:]); return Stackguard() }
-func stack3660() (uintptr, uintptr) { var buf [3660]byte; use(buf[:]); return Stackguard() }
-func stack3664() (uintptr, uintptr) { var buf [3664]byte; use(buf[:]); return Stackguard() }
-func stack3668() (uintptr, uintptr) { var buf [3668]byte; use(buf[:]); return Stackguard() }
-func stack3672() (uintptr, uintptr) { var buf [3672]byte; use(buf[:]); return Stackguard() }
-func stack3676() (uintptr, uintptr) { var buf [3676]byte; use(buf[:]); return Stackguard() }
-func stack3680() (uintptr, uintptr) { var buf [3680]byte; use(buf[:]); return Stackguard() }
-func stack3684() (uintptr, uintptr) { var buf [3684]byte; use(buf[:]); return Stackguard() }
-func stack3688() (uintptr, uintptr) { var buf [3688]byte; use(buf[:]); return Stackguard() }
-func stack3692() (uintptr, uintptr) { var buf [3692]byte; use(buf[:]); return Stackguard() }
-func stack3696() (uintptr, uintptr) { var buf [3696]byte; use(buf[:]); return Stackguard() }
-func stack3700() (uintptr, uintptr) { var buf [3700]byte; use(buf[:]); return Stackguard() }
-func stack3704() (uintptr, uintptr) { var buf [3704]byte; use(buf[:]); return Stackguard() }
-func stack3708() (uintptr, uintptr) { var buf [3708]byte; use(buf[:]); return Stackguard() }
-func stack3712() (uintptr, uintptr) { var buf [3712]byte; use(buf[:]); return Stackguard() }
-func stack3716() (uintptr, uintptr) { var buf [3716]byte; use(buf[:]); return Stackguard() }
-func stack3720() (uintptr, uintptr) { var buf [3720]byte; use(buf[:]); return Stackguard() }
-func stack3724() (uintptr, uintptr) { var buf [3724]byte; use(buf[:]); return Stackguard() }
-func stack3728() (uintptr, uintptr) { var buf [3728]byte; use(buf[:]); return Stackguard() }
-func stack3732() (uintptr, uintptr) { var buf [3732]byte; use(buf[:]); return Stackguard() }
-func stack3736() (uintptr, uintptr) { var buf [3736]byte; use(buf[:]); return Stackguard() }
-func stack3740() (uintptr, uintptr) { var buf [3740]byte; use(buf[:]); return Stackguard() }
-func stack3744() (uintptr, uintptr) { var buf [3744]byte; use(buf[:]); return Stackguard() }
-func stack3748() (uintptr, uintptr) { var buf [3748]byte; use(buf[:]); return Stackguard() }
-func stack3752() (uintptr, uintptr) { var buf [3752]byte; use(buf[:]); return Stackguard() }
-func stack3756() (uintptr, uintptr) { var buf [3756]byte; use(buf[:]); return Stackguard() }
-func stack3760() (uintptr, uintptr) { var buf [3760]byte; use(buf[:]); return Stackguard() }
-func stack3764() (uintptr, uintptr) { var buf [3764]byte; use(buf[:]); return Stackguard() }
-func stack3768() (uintptr, uintptr) { var buf [3768]byte; use(buf[:]); return Stackguard() }
-func stack3772() (uintptr, uintptr) { var buf [3772]byte; use(buf[:]); return Stackguard() }
-func stack3776() (uintptr, uintptr) { var buf [3776]byte; use(buf[:]); return Stackguard() }
-func stack3780() (uintptr, uintptr) { var buf [3780]byte; use(buf[:]); return Stackguard() }
-func stack3784() (uintptr, uintptr) { var buf [3784]byte; use(buf[:]); return Stackguard() }
-func stack3788() (uintptr, uintptr) { var buf [3788]byte; use(buf[:]); return Stackguard() }
-func stack3792() (uintptr, uintptr) { var buf [3792]byte; use(buf[:]); return Stackguard() }
-func stack3796() (uintptr, uintptr) { var buf [3796]byte; use(buf[:]); return Stackguard() }
-func stack3800() (uintptr, uintptr) { var buf [3800]byte; use(buf[:]); return Stackguard() }
-func stack3804() (uintptr, uintptr) { var buf [3804]byte; use(buf[:]); return Stackguard() }
-func stack3808() (uintptr, uintptr) { var buf [3808]byte; use(buf[:]); return Stackguard() }
-func stack3812() (uintptr, uintptr) { var buf [3812]byte; use(buf[:]); return Stackguard() }
-func stack3816() (uintptr, uintptr) { var buf [3816]byte; use(buf[:]); return Stackguard() }
-func stack3820() (uintptr, uintptr) { var buf [3820]byte; use(buf[:]); return Stackguard() }
-func stack3824() (uintptr, uintptr) { var buf [3824]byte; use(buf[:]); return Stackguard() }
-func stack3828() (uintptr, uintptr) { var buf [3828]byte; use(buf[:]); return Stackguard() }
-func stack3832() (uintptr, uintptr) { var buf [3832]byte; use(buf[:]); return Stackguard() }
-func stack3836() (uintptr, uintptr) { var buf [3836]byte; use(buf[:]); return Stackguard() }
-func stack3840() (uintptr, uintptr) { var buf [3840]byte; use(buf[:]); return Stackguard() }
-func stack3844() (uintptr, uintptr) { var buf [3844]byte; use(buf[:]); return Stackguard() }
-func stack3848() (uintptr, uintptr) { var buf [3848]byte; use(buf[:]); return Stackguard() }
-func stack3852() (uintptr, uintptr) { var buf [3852]byte; use(buf[:]); return Stackguard() }
-func stack3856() (uintptr, uintptr) { var buf [3856]byte; use(buf[:]); return Stackguard() }
-func stack3860() (uintptr, uintptr) { var buf [3860]byte; use(buf[:]); return Stackguard() }
-func stack3864() (uintptr, uintptr) { var buf [3864]byte; use(buf[:]); return Stackguard() }
-func stack3868() (uintptr, uintptr) { var buf [3868]byte; use(buf[:]); return Stackguard() }
-func stack3872() (uintptr, uintptr) { var buf [3872]byte; use(buf[:]); return Stackguard() }
-func stack3876() (uintptr, uintptr) { var buf [3876]byte; use(buf[:]); return Stackguard() }
-func stack3880() (uintptr, uintptr) { var buf [3880]byte; use(buf[:]); return Stackguard() }
-func stack3884() (uintptr, uintptr) { var buf [3884]byte; use(buf[:]); return Stackguard() }
-func stack3888() (uintptr, uintptr) { var buf [3888]byte; use(buf[:]); return Stackguard() }
-func stack3892() (uintptr, uintptr) { var buf [3892]byte; use(buf[:]); return Stackguard() }
-func stack3896() (uintptr, uintptr) { var buf [3896]byte; use(buf[:]); return Stackguard() }
-func stack3900() (uintptr, uintptr) { var buf [3900]byte; use(buf[:]); return Stackguard() }
-func stack3904() (uintptr, uintptr) { var buf [3904]byte; use(buf[:]); return Stackguard() }
-func stack3908() (uintptr, uintptr) { var buf [3908]byte; use(buf[:]); return Stackguard() }
-func stack3912() (uintptr, uintptr) { var buf [3912]byte; use(buf[:]); return Stackguard() }
-func stack3916() (uintptr, uintptr) { var buf [3916]byte; use(buf[:]); return Stackguard() }
-func stack3920() (uintptr, uintptr) { var buf [3920]byte; use(buf[:]); return Stackguard() }
-func stack3924() (uintptr, uintptr) { var buf [3924]byte; use(buf[:]); return Stackguard() }
-func stack3928() (uintptr, uintptr) { var buf [3928]byte; use(buf[:]); return Stackguard() }
-func stack3932() (uintptr, uintptr) { var buf [3932]byte; use(buf[:]); return Stackguard() }
-func stack3936() (uintptr, uintptr) { var buf [3936]byte; use(buf[:]); return Stackguard() }
-func stack3940() (uintptr, uintptr) { var buf [3940]byte; use(buf[:]); return Stackguard() }
-func stack3944() (uintptr, uintptr) { var buf [3944]byte; use(buf[:]); return Stackguard() }
-func stack3948() (uintptr, uintptr) { var buf [3948]byte; use(buf[:]); return Stackguard() }
-func stack3952() (uintptr, uintptr) { var buf [3952]byte; use(buf[:]); return Stackguard() }
-func stack3956() (uintptr, uintptr) { var buf [3956]byte; use(buf[:]); return Stackguard() }
-func stack3960() (uintptr, uintptr) { var buf [3960]byte; use(buf[:]); return Stackguard() }
-func stack3964() (uintptr, uintptr) { var buf [3964]byte; use(buf[:]); return Stackguard() }
-func stack3968() (uintptr, uintptr) { var buf [3968]byte; use(buf[:]); return Stackguard() }
-func stack3972() (uintptr, uintptr) { var buf [3972]byte; use(buf[:]); return Stackguard() }
-func stack3976() (uintptr, uintptr) { var buf [3976]byte; use(buf[:]); return Stackguard() }
-func stack3980() (uintptr, uintptr) { var buf [3980]byte; use(buf[:]); return Stackguard() }
-func stack3984() (uintptr, uintptr) { var buf [3984]byte; use(buf[:]); return Stackguard() }
-func stack3988() (uintptr, uintptr) { var buf [3988]byte; use(buf[:]); return Stackguard() }
-func stack3992() (uintptr, uintptr) { var buf [3992]byte; use(buf[:]); return Stackguard() }
-func stack3996() (uintptr, uintptr) { var buf [3996]byte; use(buf[:]); return Stackguard() }
-func stack4000() (uintptr, uintptr) { var buf [4000]byte; use(buf[:]); return Stackguard() }
-func stack4004() (uintptr, uintptr) { var buf [4004]byte; use(buf[:]); return Stackguard() }
-func stack4008() (uintptr, uintptr) { var buf [4008]byte; use(buf[:]); return Stackguard() }
-func stack4012() (uintptr, uintptr) { var buf [4012]byte; use(buf[:]); return Stackguard() }
-func stack4016() (uintptr, uintptr) { var buf [4016]byte; use(buf[:]); return Stackguard() }
-func stack4020() (uintptr, uintptr) { var buf [4020]byte; use(buf[:]); return Stackguard() }
-func stack4024() (uintptr, uintptr) { var buf [4024]byte; use(buf[:]); return Stackguard() }
-func stack4028() (uintptr, uintptr) { var buf [4028]byte; use(buf[:]); return Stackguard() }
-func stack4032() (uintptr, uintptr) { var buf [4032]byte; use(buf[:]); return Stackguard() }
-func stack4036() (uintptr, uintptr) { var buf [4036]byte; use(buf[:]); return Stackguard() }
-func stack4040() (uintptr, uintptr) { var buf [4040]byte; use(buf[:]); return Stackguard() }
-func stack4044() (uintptr, uintptr) { var buf [4044]byte; use(buf[:]); return Stackguard() }
-func stack4048() (uintptr, uintptr) { var buf [4048]byte; use(buf[:]); return Stackguard() }
-func stack4052() (uintptr, uintptr) { var buf [4052]byte; use(buf[:]); return Stackguard() }
-func stack4056() (uintptr, uintptr) { var buf [4056]byte; use(buf[:]); return Stackguard() }
-func stack4060() (uintptr, uintptr) { var buf [4060]byte; use(buf[:]); return Stackguard() }
-func stack4064() (uintptr, uintptr) { var buf [4064]byte; use(buf[:]); return Stackguard() }
-func stack4068() (uintptr, uintptr) { var buf [4068]byte; use(buf[:]); return Stackguard() }
-func stack4072() (uintptr, uintptr) { var buf [4072]byte; use(buf[:]); return Stackguard() }
-func stack4076() (uintptr, uintptr) { var buf [4076]byte; use(buf[:]); return Stackguard() }
-func stack4080() (uintptr, uintptr) { var buf [4080]byte; use(buf[:]); return Stackguard() }
-func stack4084() (uintptr, uintptr) { var buf [4084]byte; use(buf[:]); return Stackguard() }
-func stack4088() (uintptr, uintptr) { var buf [4088]byte; use(buf[:]); return Stackguard() }
-func stack4092() (uintptr, uintptr) { var buf [4092]byte; use(buf[:]); return Stackguard() }
-func stack4096() (uintptr, uintptr) { var buf [4096]byte; use(buf[:]); return Stackguard() }
-func stack4100() (uintptr, uintptr) { var buf [4100]byte; use(buf[:]); return Stackguard() }
-func stack4104() (uintptr, uintptr) { var buf [4104]byte; use(buf[:]); return Stackguard() }
-func stack4108() (uintptr, uintptr) { var buf [4108]byte; use(buf[:]); return Stackguard() }
-func stack4112() (uintptr, uintptr) { var buf [4112]byte; use(buf[:]); return Stackguard() }
-func stack4116() (uintptr, uintptr) { var buf [4116]byte; use(buf[:]); return Stackguard() }
-func stack4120() (uintptr, uintptr) { var buf [4120]byte; use(buf[:]); return Stackguard() }
-func stack4124() (uintptr, uintptr) { var buf [4124]byte; use(buf[:]); return Stackguard() }
-func stack4128() (uintptr, uintptr) { var buf [4128]byte; use(buf[:]); return Stackguard() }
-func stack4132() (uintptr, uintptr) { var buf [4132]byte; use(buf[:]); return Stackguard() }
-func stack4136() (uintptr, uintptr) { var buf [4136]byte; use(buf[:]); return Stackguard() }
-func stack4140() (uintptr, uintptr) { var buf [4140]byte; use(buf[:]); return Stackguard() }
-func stack4144() (uintptr, uintptr) { var buf [4144]byte; use(buf[:]); return Stackguard() }
-func stack4148() (uintptr, uintptr) { var buf [4148]byte; use(buf[:]); return Stackguard() }
-func stack4152() (uintptr, uintptr) { var buf [4152]byte; use(buf[:]); return Stackguard() }
-func stack4156() (uintptr, uintptr) { var buf [4156]byte; use(buf[:]); return Stackguard() }
-func stack4160() (uintptr, uintptr) { var buf [4160]byte; use(buf[:]); return Stackguard() }
-func stack4164() (uintptr, uintptr) { var buf [4164]byte; use(buf[:]); return Stackguard() }
-func stack4168() (uintptr, uintptr) { var buf [4168]byte; use(buf[:]); return Stackguard() }
-func stack4172() (uintptr, uintptr) { var buf [4172]byte; use(buf[:]); return Stackguard() }
-func stack4176() (uintptr, uintptr) { var buf [4176]byte; use(buf[:]); return Stackguard() }
-func stack4180() (uintptr, uintptr) { var buf [4180]byte; use(buf[:]); return Stackguard() }
-func stack4184() (uintptr, uintptr) { var buf [4184]byte; use(buf[:]); return Stackguard() }
-func stack4188() (uintptr, uintptr) { var buf [4188]byte; use(buf[:]); return Stackguard() }
-func stack4192() (uintptr, uintptr) { var buf [4192]byte; use(buf[:]); return Stackguard() }
-func stack4196() (uintptr, uintptr) { var buf [4196]byte; use(buf[:]); return Stackguard() }
-func stack4200() (uintptr, uintptr) { var buf [4200]byte; use(buf[:]); return Stackguard() }
-func stack4204() (uintptr, uintptr) { var buf [4204]byte; use(buf[:]); return Stackguard() }
-func stack4208() (uintptr, uintptr) { var buf [4208]byte; use(buf[:]); return Stackguard() }
-func stack4212() (uintptr, uintptr) { var buf [4212]byte; use(buf[:]); return Stackguard() }
-func stack4216() (uintptr, uintptr) { var buf [4216]byte; use(buf[:]); return Stackguard() }
-func stack4220() (uintptr, uintptr) { var buf [4220]byte; use(buf[:]); return Stackguard() }
-func stack4224() (uintptr, uintptr) { var buf [4224]byte; use(buf[:]); return Stackguard() }
-func stack4228() (uintptr, uintptr) { var buf [4228]byte; use(buf[:]); return Stackguard() }
-func stack4232() (uintptr, uintptr) { var buf [4232]byte; use(buf[:]); return Stackguard() }
-func stack4236() (uintptr, uintptr) { var buf [4236]byte; use(buf[:]); return Stackguard() }
-func stack4240() (uintptr, uintptr) { var buf [4240]byte; use(buf[:]); return Stackguard() }
-func stack4244() (uintptr, uintptr) { var buf [4244]byte; use(buf[:]); return Stackguard() }
-func stack4248() (uintptr, uintptr) { var buf [4248]byte; use(buf[:]); return Stackguard() }
-func stack4252() (uintptr, uintptr) { var buf [4252]byte; use(buf[:]); return Stackguard() }
-func stack4256() (uintptr, uintptr) { var buf [4256]byte; use(buf[:]); return Stackguard() }
-func stack4260() (uintptr, uintptr) { var buf [4260]byte; use(buf[:]); return Stackguard() }
-func stack4264() (uintptr, uintptr) { var buf [4264]byte; use(buf[:]); return Stackguard() }
-func stack4268() (uintptr, uintptr) { var buf [4268]byte; use(buf[:]); return Stackguard() }
-func stack4272() (uintptr, uintptr) { var buf [4272]byte; use(buf[:]); return Stackguard() }
-func stack4276() (uintptr, uintptr) { var buf [4276]byte; use(buf[:]); return Stackguard() }
-func stack4280() (uintptr, uintptr) { var buf [4280]byte; use(buf[:]); return Stackguard() }
-func stack4284() (uintptr, uintptr) { var buf [4284]byte; use(buf[:]); return Stackguard() }
-func stack4288() (uintptr, uintptr) { var buf [4288]byte; use(buf[:]); return Stackguard() }
-func stack4292() (uintptr, uintptr) { var buf [4292]byte; use(buf[:]); return Stackguard() }
-func stack4296() (uintptr, uintptr) { var buf [4296]byte; use(buf[:]); return Stackguard() }
-func stack4300() (uintptr, uintptr) { var buf [4300]byte; use(buf[:]); return Stackguard() }
-func stack4304() (uintptr, uintptr) { var buf [4304]byte; use(buf[:]); return Stackguard() }
-func stack4308() (uintptr, uintptr) { var buf [4308]byte; use(buf[:]); return Stackguard() }
-func stack4312() (uintptr, uintptr) { var buf [4312]byte; use(buf[:]); return Stackguard() }
-func stack4316() (uintptr, uintptr) { var buf [4316]byte; use(buf[:]); return Stackguard() }
-func stack4320() (uintptr, uintptr) { var buf [4320]byte; use(buf[:]); return Stackguard() }
-func stack4324() (uintptr, uintptr) { var buf [4324]byte; use(buf[:]); return Stackguard() }
-func stack4328() (uintptr, uintptr) { var buf [4328]byte; use(buf[:]); return Stackguard() }
-func stack4332() (uintptr, uintptr) { var buf [4332]byte; use(buf[:]); return Stackguard() }
-func stack4336() (uintptr, uintptr) { var buf [4336]byte; use(buf[:]); return Stackguard() }
-func stack4340() (uintptr, uintptr) { var buf [4340]byte; use(buf[:]); return Stackguard() }
-func stack4344() (uintptr, uintptr) { var buf [4344]byte; use(buf[:]); return Stackguard() }
-func stack4348() (uintptr, uintptr) { var buf [4348]byte; use(buf[:]); return Stackguard() }
-func stack4352() (uintptr, uintptr) { var buf [4352]byte; use(buf[:]); return Stackguard() }
-func stack4356() (uintptr, uintptr) { var buf [4356]byte; use(buf[:]); return Stackguard() }
-func stack4360() (uintptr, uintptr) { var buf [4360]byte; use(buf[:]); return Stackguard() }
-func stack4364() (uintptr, uintptr) { var buf [4364]byte; use(buf[:]); return Stackguard() }
-func stack4368() (uintptr, uintptr) { var buf [4368]byte; use(buf[:]); return Stackguard() }
-func stack4372() (uintptr, uintptr) { var buf [4372]byte; use(buf[:]); return Stackguard() }
-func stack4376() (uintptr, uintptr) { var buf [4376]byte; use(buf[:]); return Stackguard() }
-func stack4380() (uintptr, uintptr) { var buf [4380]byte; use(buf[:]); return Stackguard() }
-func stack4384() (uintptr, uintptr) { var buf [4384]byte; use(buf[:]); return Stackguard() }
-func stack4388() (uintptr, uintptr) { var buf [4388]byte; use(buf[:]); return Stackguard() }
-func stack4392() (uintptr, uintptr) { var buf [4392]byte; use(buf[:]); return Stackguard() }
-func stack4396() (uintptr, uintptr) { var buf [4396]byte; use(buf[:]); return Stackguard() }
-func stack4400() (uintptr, uintptr) { var buf [4400]byte; use(buf[:]); return Stackguard() }
-func stack4404() (uintptr, uintptr) { var buf [4404]byte; use(buf[:]); return Stackguard() }
-func stack4408() (uintptr, uintptr) { var buf [4408]byte; use(buf[:]); return Stackguard() }
-func stack4412() (uintptr, uintptr) { var buf [4412]byte; use(buf[:]); return Stackguard() }
-func stack4416() (uintptr, uintptr) { var buf [4416]byte; use(buf[:]); return Stackguard() }
-func stack4420() (uintptr, uintptr) { var buf [4420]byte; use(buf[:]); return Stackguard() }
-func stack4424() (uintptr, uintptr) { var buf [4424]byte; use(buf[:]); return Stackguard() }
-func stack4428() (uintptr, uintptr) { var buf [4428]byte; use(buf[:]); return Stackguard() }
-func stack4432() (uintptr, uintptr) { var buf [4432]byte; use(buf[:]); return Stackguard() }
-func stack4436() (uintptr, uintptr) { var buf [4436]byte; use(buf[:]); return Stackguard() }
-func stack4440() (uintptr, uintptr) { var buf [4440]byte; use(buf[:]); return Stackguard() }
-func stack4444() (uintptr, uintptr) { var buf [4444]byte; use(buf[:]); return Stackguard() }
-func stack4448() (uintptr, uintptr) { var buf [4448]byte; use(buf[:]); return Stackguard() }
-func stack4452() (uintptr, uintptr) { var buf [4452]byte; use(buf[:]); return Stackguard() }
-func stack4456() (uintptr, uintptr) { var buf [4456]byte; use(buf[:]); return Stackguard() }
-func stack4460() (uintptr, uintptr) { var buf [4460]byte; use(buf[:]); return Stackguard() }
-func stack4464() (uintptr, uintptr) { var buf [4464]byte; use(buf[:]); return Stackguard() }
-func stack4468() (uintptr, uintptr) { var buf [4468]byte; use(buf[:]); return Stackguard() }
-func stack4472() (uintptr, uintptr) { var buf [4472]byte; use(buf[:]); return Stackguard() }
-func stack4476() (uintptr, uintptr) { var buf [4476]byte; use(buf[:]); return Stackguard() }
-func stack4480() (uintptr, uintptr) { var buf [4480]byte; use(buf[:]); return Stackguard() }
-func stack4484() (uintptr, uintptr) { var buf [4484]byte; use(buf[:]); return Stackguard() }
-func stack4488() (uintptr, uintptr) { var buf [4488]byte; use(buf[:]); return Stackguard() }
-func stack4492() (uintptr, uintptr) { var buf [4492]byte; use(buf[:]); return Stackguard() }
-func stack4496() (uintptr, uintptr) { var buf [4496]byte; use(buf[:]); return Stackguard() }
-func stack4500() (uintptr, uintptr) { var buf [4500]byte; use(buf[:]); return Stackguard() }
-func stack4504() (uintptr, uintptr) { var buf [4504]byte; use(buf[:]); return Stackguard() }
-func stack4508() (uintptr, uintptr) { var buf [4508]byte; use(buf[:]); return Stackguard() }
-func stack4512() (uintptr, uintptr) { var buf [4512]byte; use(buf[:]); return Stackguard() }
-func stack4516() (uintptr, uintptr) { var buf [4516]byte; use(buf[:]); return Stackguard() }
-func stack4520() (uintptr, uintptr) { var buf [4520]byte; use(buf[:]); return Stackguard() }
-func stack4524() (uintptr, uintptr) { var buf [4524]byte; use(buf[:]); return Stackguard() }
-func stack4528() (uintptr, uintptr) { var buf [4528]byte; use(buf[:]); return Stackguard() }
-func stack4532() (uintptr, uintptr) { var buf [4532]byte; use(buf[:]); return Stackguard() }
-func stack4536() (uintptr, uintptr) { var buf [4536]byte; use(buf[:]); return Stackguard() }
-func stack4540() (uintptr, uintptr) { var buf [4540]byte; use(buf[:]); return Stackguard() }
-func stack4544() (uintptr, uintptr) { var buf [4544]byte; use(buf[:]); return Stackguard() }
-func stack4548() (uintptr, uintptr) { var buf [4548]byte; use(buf[:]); return Stackguard() }
-func stack4552() (uintptr, uintptr) { var buf [4552]byte; use(buf[:]); return Stackguard() }
-func stack4556() (uintptr, uintptr) { var buf [4556]byte; use(buf[:]); return Stackguard() }
-func stack4560() (uintptr, uintptr) { var buf [4560]byte; use(buf[:]); return Stackguard() }
-func stack4564() (uintptr, uintptr) { var buf [4564]byte; use(buf[:]); return Stackguard() }
-func stack4568() (uintptr, uintptr) { var buf [4568]byte; use(buf[:]); return Stackguard() }
-func stack4572() (uintptr, uintptr) { var buf [4572]byte; use(buf[:]); return Stackguard() }
-func stack4576() (uintptr, uintptr) { var buf [4576]byte; use(buf[:]); return Stackguard() }
-func stack4580() (uintptr, uintptr) { var buf [4580]byte; use(buf[:]); return Stackguard() }
-func stack4584() (uintptr, uintptr) { var buf [4584]byte; use(buf[:]); return Stackguard() }
-func stack4588() (uintptr, uintptr) { var buf [4588]byte; use(buf[:]); return Stackguard() }
-func stack4592() (uintptr, uintptr) { var buf [4592]byte; use(buf[:]); return Stackguard() }
-func stack4596() (uintptr, uintptr) { var buf [4596]byte; use(buf[:]); return Stackguard() }
-func stack4600() (uintptr, uintptr) { var buf [4600]byte; use(buf[:]); return Stackguard() }
-func stack4604() (uintptr, uintptr) { var buf [4604]byte; use(buf[:]); return Stackguard() }
-func stack4608() (uintptr, uintptr) { var buf [4608]byte; use(buf[:]); return Stackguard() }
-func stack4612() (uintptr, uintptr) { var buf [4612]byte; use(buf[:]); return Stackguard() }
-func stack4616() (uintptr, uintptr) { var buf [4616]byte; use(buf[:]); return Stackguard() }
-func stack4620() (uintptr, uintptr) { var buf [4620]byte; use(buf[:]); return Stackguard() }
-func stack4624() (uintptr, uintptr) { var buf [4624]byte; use(buf[:]); return Stackguard() }
-func stack4628() (uintptr, uintptr) { var buf [4628]byte; use(buf[:]); return Stackguard() }
-func stack4632() (uintptr, uintptr) { var buf [4632]byte; use(buf[:]); return Stackguard() }
-func stack4636() (uintptr, uintptr) { var buf [4636]byte; use(buf[:]); return Stackguard() }
-func stack4640() (uintptr, uintptr) { var buf [4640]byte; use(buf[:]); return Stackguard() }
-func stack4644() (uintptr, uintptr) { var buf [4644]byte; use(buf[:]); return Stackguard() }
-func stack4648() (uintptr, uintptr) { var buf [4648]byte; use(buf[:]); return Stackguard() }
-func stack4652() (uintptr, uintptr) { var buf [4652]byte; use(buf[:]); return Stackguard() }
-func stack4656() (uintptr, uintptr) { var buf [4656]byte; use(buf[:]); return Stackguard() }
-func stack4660() (uintptr, uintptr) { var buf [4660]byte; use(buf[:]); return Stackguard() }
-func stack4664() (uintptr, uintptr) { var buf [4664]byte; use(buf[:]); return Stackguard() }
-func stack4668() (uintptr, uintptr) { var buf [4668]byte; use(buf[:]); return Stackguard() }
-func stack4672() (uintptr, uintptr) { var buf [4672]byte; use(buf[:]); return Stackguard() }
-func stack4676() (uintptr, uintptr) { var buf [4676]byte; use(buf[:]); return Stackguard() }
-func stack4680() (uintptr, uintptr) { var buf [4680]byte; use(buf[:]); return Stackguard() }
-func stack4684() (uintptr, uintptr) { var buf [4684]byte; use(buf[:]); return Stackguard() }
-func stack4688() (uintptr, uintptr) { var buf [4688]byte; use(buf[:]); return Stackguard() }
-func stack4692() (uintptr, uintptr) { var buf [4692]byte; use(buf[:]); return Stackguard() }
-func stack4696() (uintptr, uintptr) { var buf [4696]byte; use(buf[:]); return Stackguard() }
-func stack4700() (uintptr, uintptr) { var buf [4700]byte; use(buf[:]); return Stackguard() }
-func stack4704() (uintptr, uintptr) { var buf [4704]byte; use(buf[:]); return Stackguard() }
-func stack4708() (uintptr, uintptr) { var buf [4708]byte; use(buf[:]); return Stackguard() }
-func stack4712() (uintptr, uintptr) { var buf [4712]byte; use(buf[:]); return Stackguard() }
-func stack4716() (uintptr, uintptr) { var buf [4716]byte; use(buf[:]); return Stackguard() }
-func stack4720() (uintptr, uintptr) { var buf [4720]byte; use(buf[:]); return Stackguard() }
-func stack4724() (uintptr, uintptr) { var buf [4724]byte; use(buf[:]); return Stackguard() }
-func stack4728() (uintptr, uintptr) { var buf [4728]byte; use(buf[:]); return Stackguard() }
-func stack4732() (uintptr, uintptr) { var buf [4732]byte; use(buf[:]); return Stackguard() }
-func stack4736() (uintptr, uintptr) { var buf [4736]byte; use(buf[:]); return Stackguard() }
-func stack4740() (uintptr, uintptr) { var buf [4740]byte; use(buf[:]); return Stackguard() }
-func stack4744() (uintptr, uintptr) { var buf [4744]byte; use(buf[:]); return Stackguard() }
-func stack4748() (uintptr, uintptr) { var buf [4748]byte; use(buf[:]); return Stackguard() }
-func stack4752() (uintptr, uintptr) { var buf [4752]byte; use(buf[:]); return Stackguard() }
-func stack4756() (uintptr, uintptr) { var buf [4756]byte; use(buf[:]); return Stackguard() }
-func stack4760() (uintptr, uintptr) { var buf [4760]byte; use(buf[:]); return Stackguard() }
-func stack4764() (uintptr, uintptr) { var buf [4764]byte; use(buf[:]); return Stackguard() }
-func stack4768() (uintptr, uintptr) { var buf [4768]byte; use(buf[:]); return Stackguard() }
-func stack4772() (uintptr, uintptr) { var buf [4772]byte; use(buf[:]); return Stackguard() }
-func stack4776() (uintptr, uintptr) { var buf [4776]byte; use(buf[:]); return Stackguard() }
-func stack4780() (uintptr, uintptr) { var buf [4780]byte; use(buf[:]); return Stackguard() }
-func stack4784() (uintptr, uintptr) { var buf [4784]byte; use(buf[:]); return Stackguard() }
-func stack4788() (uintptr, uintptr) { var buf [4788]byte; use(buf[:]); return Stackguard() }
-func stack4792() (uintptr, uintptr) { var buf [4792]byte; use(buf[:]); return Stackguard() }
-func stack4796() (uintptr, uintptr) { var buf [4796]byte; use(buf[:]); return Stackguard() }
-func stack4800() (uintptr, uintptr) { var buf [4800]byte; use(buf[:]); return Stackguard() }
-func stack4804() (uintptr, uintptr) { var buf [4804]byte; use(buf[:]); return Stackguard() }
-func stack4808() (uintptr, uintptr) { var buf [4808]byte; use(buf[:]); return Stackguard() }
-func stack4812() (uintptr, uintptr) { var buf [4812]byte; use(buf[:]); return Stackguard() }
-func stack4816() (uintptr, uintptr) { var buf [4816]byte; use(buf[:]); return Stackguard() }
-func stack4820() (uintptr, uintptr) { var buf [4820]byte; use(buf[:]); return Stackguard() }
-func stack4824() (uintptr, uintptr) { var buf [4824]byte; use(buf[:]); return Stackguard() }
-func stack4828() (uintptr, uintptr) { var buf [4828]byte; use(buf[:]); return Stackguard() }
-func stack4832() (uintptr, uintptr) { var buf [4832]byte; use(buf[:]); return Stackguard() }
-func stack4836() (uintptr, uintptr) { var buf [4836]byte; use(buf[:]); return Stackguard() }
-func stack4840() (uintptr, uintptr) { var buf [4840]byte; use(buf[:]); return Stackguard() }
-func stack4844() (uintptr, uintptr) { var buf [4844]byte; use(buf[:]); return Stackguard() }
-func stack4848() (uintptr, uintptr) { var buf [4848]byte; use(buf[:]); return Stackguard() }
-func stack4852() (uintptr, uintptr) { var buf [4852]byte; use(buf[:]); return Stackguard() }
-func stack4856() (uintptr, uintptr) { var buf [4856]byte; use(buf[:]); return Stackguard() }
-func stack4860() (uintptr, uintptr) { var buf [4860]byte; use(buf[:]); return Stackguard() }
-func stack4864() (uintptr, uintptr) { var buf [4864]byte; use(buf[:]); return Stackguard() }
-func stack4868() (uintptr, uintptr) { var buf [4868]byte; use(buf[:]); return Stackguard() }
-func stack4872() (uintptr, uintptr) { var buf [4872]byte; use(buf[:]); return Stackguard() }
-func stack4876() (uintptr, uintptr) { var buf [4876]byte; use(buf[:]); return Stackguard() }
-func stack4880() (uintptr, uintptr) { var buf [4880]byte; use(buf[:]); return Stackguard() }
-func stack4884() (uintptr, uintptr) { var buf [4884]byte; use(buf[:]); return Stackguard() }
-func stack4888() (uintptr, uintptr) { var buf [4888]byte; use(buf[:]); return Stackguard() }
-func stack4892() (uintptr, uintptr) { var buf [4892]byte; use(buf[:]); return Stackguard() }
-func stack4896() (uintptr, uintptr) { var buf [4896]byte; use(buf[:]); return Stackguard() }
-func stack4900() (uintptr, uintptr) { var buf [4900]byte; use(buf[:]); return Stackguard() }
-func stack4904() (uintptr, uintptr) { var buf [4904]byte; use(buf[:]); return Stackguard() }
-func stack4908() (uintptr, uintptr) { var buf [4908]byte; use(buf[:]); return Stackguard() }
-func stack4912() (uintptr, uintptr) { var buf [4912]byte; use(buf[:]); return Stackguard() }
-func stack4916() (uintptr, uintptr) { var buf [4916]byte; use(buf[:]); return Stackguard() }
-func stack4920() (uintptr, uintptr) { var buf [4920]byte; use(buf[:]); return Stackguard() }
-func stack4924() (uintptr, uintptr) { var buf [4924]byte; use(buf[:]); return Stackguard() }
-func stack4928() (uintptr, uintptr) { var buf [4928]byte; use(buf[:]); return Stackguard() }
-func stack4932() (uintptr, uintptr) { var buf [4932]byte; use(buf[:]); return Stackguard() }
-func stack4936() (uintptr, uintptr) { var buf [4936]byte; use(buf[:]); return Stackguard() }
-func stack4940() (uintptr, uintptr) { var buf [4940]byte; use(buf[:]); return Stackguard() }
-func stack4944() (uintptr, uintptr) { var buf [4944]byte; use(buf[:]); return Stackguard() }
-func stack4948() (uintptr, uintptr) { var buf [4948]byte; use(buf[:]); return Stackguard() }
-func stack4952() (uintptr, uintptr) { var buf [4952]byte; use(buf[:]); return Stackguard() }
-func stack4956() (uintptr, uintptr) { var buf [4956]byte; use(buf[:]); return Stackguard() }
-func stack4960() (uintptr, uintptr) { var buf [4960]byte; use(buf[:]); return Stackguard() }
-func stack4964() (uintptr, uintptr) { var buf [4964]byte; use(buf[:]); return Stackguard() }
-func stack4968() (uintptr, uintptr) { var buf [4968]byte; use(buf[:]); return Stackguard() }
-func stack4972() (uintptr, uintptr) { var buf [4972]byte; use(buf[:]); return Stackguard() }
-func stack4976() (uintptr, uintptr) { var buf [4976]byte; use(buf[:]); return Stackguard() }
-func stack4980() (uintptr, uintptr) { var buf [4980]byte; use(buf[:]); return Stackguard() }
-func stack4984() (uintptr, uintptr) { var buf [4984]byte; use(buf[:]); return Stackguard() }
-func stack4988() (uintptr, uintptr) { var buf [4988]byte; use(buf[:]); return Stackguard() }
-func stack4992() (uintptr, uintptr) { var buf [4992]byte; use(buf[:]); return Stackguard() }
-func stack4996() (uintptr, uintptr) { var buf [4996]byte; use(buf[:]); return Stackguard() }
-func stack5000() (uintptr, uintptr) { var buf [5000]byte; use(buf[:]); return Stackguard() }
-
// TestStackMem measures per-thread stack segment cache behavior.
// The test consumed up to 500MB in the past.
func TestStackMem(t *testing.T) {
@@ -1576,9 +112,172 @@ func TestStackMem(t *testing.T) {
if consumed > estimate {
t.Fatalf("Stack mem: want %v, got %v", estimate, consumed)
}
- inuse := s1.StackInuse - s0.StackInuse
+ // Due to broken stack memory accounting (http://golang.org/issue/7468),
+ // StackInuse can decrease during function execution, so we cast the values to int64.
+ inuse := int64(s1.StackInuse) - int64(s0.StackInuse)
t.Logf("Inuse %vMB for stack mem", inuse>>20)
if inuse > 4<<20 {
t.Fatalf("Stack inuse: want %v, got %v", 4<<20, inuse)
}
}
+
+// Test stack growing in different contexts.
+func TestStackGrowth(t *testing.T) {
+ switch GOARCH {
+ case "386", "arm":
+ t.Skipf("skipping test on %q; see issue 8083", GOARCH)
+ }
+ t.Parallel()
+ var wg sync.WaitGroup
+
+ // in a normal goroutine
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ growStack()
+ }()
+ wg.Wait()
+
+ // in locked goroutine
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ LockOSThread()
+ growStack()
+ UnlockOSThread()
+ }()
+ wg.Wait()
+
+ // in finalizer
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ done := make(chan bool)
+ go func() {
+ s := new(string)
+ SetFinalizer(s, func(ss *string) {
+ growStack()
+ done <- true
+ })
+ s = nil
+ done <- true
+ }()
+ <-done
+ GC()
+ select {
+ case <-done:
+ case <-time.After(20 * time.Second):
+ t.Fatal("finalizer did not run")
+ }
+ }()
+ wg.Wait()
+}
+
+// ... and in init
+//func init() {
+// growStack()
+//}
+
+func growStack() {
+ n := 1 << 10
+ if testing.Short() {
+ n = 1 << 8
+ }
+ for i := 0; i < n; i++ {
+ x := 0
+ growStackIter(&x, i)
+ if x != i+1 {
+ panic("stack is corrupted")
+ }
+ }
+ GC()
+}
+
+// This function is not an anonimous func, so that the compiler can do escape
+// analysis and place x on stack (and subsequently stack growth update the pointer).
+func growStackIter(p *int, n int) {
+ if n == 0 {
+ *p = n + 1
+ GC()
+ return
+ }
+ *p = n + 1
+ x := 0
+ growStackIter(&x, n-1)
+ if x != n {
+ panic("stack is corrupted")
+ }
+}
+
+func TestStackGrowthCallback(t *testing.T) {
+ t.Parallel()
+ var wg sync.WaitGroup
+
+ // test stack growth at chan op
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ c := make(chan int, 1)
+ growStackWithCallback(func() {
+ c <- 1
+ <-c
+ })
+ }()
+
+ // test stack growth at map op
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ m := make(map[int]int)
+ growStackWithCallback(func() {
+ _, _ = m[1]
+ m[1] = 1
+ })
+ }()
+
+ // test stack growth at goroutine creation
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ growStackWithCallback(func() {
+ done := make(chan bool)
+ go func() {
+ done <- true
+ }()
+ <-done
+ })
+ }()
+
+ wg.Wait()
+}
+
+func growStackWithCallback(cb func()) {
+ var f func(n int)
+ f = func(n int) {
+ if n == 0 {
+ cb()
+ return
+ }
+ f(n - 1)
+ }
+ for i := 0; i < 1<<10; i++ {
+ f(i)
+ }
+}
+
+// TestDeferPtrs tests the adjustment of Defer's argument pointers (p aka &y)
+// during a stack copy.
+func set(p *int, x int) {
+ *p = x
+}
+func TestDeferPtrs(t *testing.T) {
+ var y int
+
+ defer func() {
+ if y != 42 {
+ t.Errorf("defer's stack references were not adjusted appropriately")
+ }
+ }()
+ defer set(&y, 42)
+ growStack()
+}
diff --git a/src/pkg/runtime/string.goc b/src/pkg/runtime/string.goc
index b79acbe1c..97a69d07b 100644
--- a/src/pkg/runtime/string.goc
+++ b/src/pkg/runtime/string.goc
@@ -46,10 +46,8 @@ gostringsize(intgo l)
if(l == 0)
return runtime·emptystring;
- // leave room for NUL for C runtime (e.g., callers of getenv)
- s.str = runtime·mallocgc(l+1, 0, FlagNoScan|FlagNoZero);
+ s.str = runtime·mallocgc(l, 0, FlagNoScan|FlagNoZero);
s.len = l;
- s.str[l] = 0;
for(;;) {
ms = runtime·maxstring;
if((uintptr)l <= ms || runtime·casp((void**)&runtime·maxstring, (void*)ms, (void*)l))
@@ -80,6 +78,7 @@ runtime·gostringn(byte *str, intgo l)
return s;
}
+// used by cmd/cgo
Slice
runtime·gobytes(byte *p, intgo n)
{
@@ -102,11 +101,8 @@ runtime·gostringnocopy(byte *str)
return s;
}
-void
-runtime·cstringToGo(byte *str, String s)
-{
+func cstringToGo(str *byte) (s String) {
s = runtime·gostringnocopy(str);
- FLUSH(&s);
}
String
@@ -179,14 +175,35 @@ concatstring(intgo n, String *s)
return out;
}
-// NOTE: Cannot use func syntax, because we need the ...,
-// to signal to the garbage collector that this function does
-// not have a fixed size argument count.
#pragma textflag NOSPLIT
-void
-runtime·concatstring(intgo n, String s1, ...)
-{
- (&s1)[n] = concatstring(n, &s1);
+func concatstring2(s1 String, s2 String) (res String) {
+ USED(&s2);
+ res = concatstring(2, &s1);
+}
+#pragma textflag NOSPLIT
+func concatstring3(s1 String, s2 String, s3 String) (res String) {
+ USED(&s2);
+ USED(&s3);
+ res = concatstring(3, &s1);
+}
+#pragma textflag NOSPLIT
+func concatstring4(s1 String, s2 String, s3 String, s4 String) (res String) {
+ USED(&s2);
+ USED(&s3);
+ USED(&s4);
+ res = concatstring(4, &s1);
+}
+#pragma textflag NOSPLIT
+func concatstring5(s1 String, s2 String, s3 String, s4 String, s5 String) (res String) {
+ USED(&s2);
+ USED(&s3);
+ USED(&s4);
+ USED(&s5);
+ res = concatstring(5, &s1);
+}
+#pragma textflag NOSPLIT
+func concatstrings(s Slice) (res String) {
+ res = concatstring(s.len, (String*)s.array);
}
func eqstring(s1 String, s2 String) (v bool) {
@@ -219,6 +236,25 @@ runtime·strcmp(byte *s1, byte *s2)
}
}
+int32
+runtime·strncmp(byte *s1, byte *s2, uintptr n)
+{
+ uintptr i;
+ byte c1, c2;
+
+ for(i=0; i<n; i++) {
+ c1 = s1[i];
+ c2 = s2[i];
+ if(c1 < c2)
+ return -1;
+ if(c1 > c2)
+ return +1;
+ if(c1 == 0)
+ break;
+ }
+ return 0;
+}
+
byte*
runtime·strstr(byte *s1, byte *s2)
{
@@ -258,11 +294,35 @@ func slicebytetostring(b Slice) (s String) {
runtime·memmove(s.str, b.array, s.len);
}
+func slicebytetostringtmp(b Slice) (s String) {
+ void *pc;
+
+ if(raceenabled) {
+ pc = runtime·getcallerpc(&b);
+ runtime·racereadrangepc(b.array, b.len, pc, runtime·slicebytetostringtmp);
+ }
+
+ // Return a "string" referring to the actual []byte bytes.
+ // This is only for use by internal compiler optimizations
+ // that know that the string form will be discarded before
+ // the calling goroutine could possibly modify the original
+ // slice or synchronize with another goroutine.
+ // Today, the only such case is a m[string(k)] lookup where
+ // m is a string-keyed map and k is a []byte.
+ s.str = b.array;
+ s.len = b.len;
+}
+
func stringtoslicebyte(s String) (b Slice) {
- b.array = runtime·mallocgc(s.len, 0, FlagNoScan|FlagNoZero);
+ uintptr cap;
+
+ cap = runtime·roundupsize(s.len);
+ b.array = runtime·mallocgc(cap, 0, FlagNoScan|FlagNoZero);
b.len = s.len;
- b.cap = s.len;
+ b.cap = cap;
runtime·memmove(b.array, s.str, s.len);
+ if(cap != b.len)
+ runtime·memclr(b.array+b.len, cap-b.len);
}
func slicerunetostring(b Slice) (s String) {
@@ -297,6 +357,7 @@ func stringtoslicerune(s String) (b Slice) {
intgo n;
int32 dum, *r;
uint8 *p, *ep;
+ uintptr mem;
// two passes.
// unlike slicerunetostring, no race because strings are immutable.
@@ -308,13 +369,18 @@ func stringtoslicerune(s String) (b Slice) {
n++;
}
- b.array = runtime·mallocgc(n*sizeof(r[0]), 0, FlagNoScan|FlagNoZero);
+ if(n > MaxMem/sizeof(r[0]))
+ runtime·throw("out of memory");
+ mem = runtime·roundupsize(n*sizeof(r[0]));
+ b.array = runtime·mallocgc(mem, 0, FlagNoScan|FlagNoZero);
b.len = n;
- b.cap = n;
+ b.cap = mem/sizeof(r[0]);
p = s.str;
r = (int32*)b.array;
while(p < ep)
p += runtime·charntorune(r++, p, ep-p);
+ if(b.cap > b.len)
+ runtime·memclr(b.array+b.len*sizeof(r[0]), (b.cap-b.len)*sizeof(r[0]));
}
enum
diff --git a/src/pkg/runtime/symtab.c b/src/pkg/runtime/symtab.goc
index dd0015aee..15e1d28fa 100644
--- a/src/pkg/runtime/symtab.c
+++ b/src/pkg/runtime/symtab.goc
@@ -5,6 +5,7 @@
// Runtime symbol table parsing.
// See http://golang.org/s/go12symtab for an overview.
+package runtime
#include "runtime.h"
#include "defs_GOOS_GOARCH.h"
#include "os_GOOS.h"
@@ -38,7 +39,7 @@ runtime·symtabinit(void)
// two zero bytes, a byte giving the PC quantum,
// and a byte giving the pointer width in bytes.
if(*(uint32*)pclntab != 0xfffffffb || pclntab[4] != 0 || pclntab[5] != 0 || pclntab[6] != PCQuantum || pclntab[7] != sizeof(void*)) {
- runtime·printf("runtime: function symbol table header: 0x%x 0x%x\n", *(uint32*)pclntab, *(uint32*)(pclntab+4));
+ runtime·printf("runtime: function symbol table header: %x %x\n", *(uint32*)pclntab, *(uint32*)(pclntab+4));
runtime·throw("invalid function symbol table\n");
}
@@ -86,8 +87,11 @@ runtime·funcdata(Func *f, int32 i)
if(i < 0 || i >= f->nfuncdata)
return nil;
p = (byte*)&f->nfuncdata + 4 + f->npcdata*4;
- if(sizeof(void*) == 8 && ((uintptr)p & 4))
+ if(sizeof(void*) == 8 && ((uintptr)p & 4)) {
+ if(((uintptr)f & 4))
+ runtime·printf("misaligned func %p\n", f);
p += 4;
+ }
return ((void**)p)[i];
}
@@ -224,27 +228,18 @@ runtime·funcarglen(Func *f, uintptr targetpc)
return runtime·pcdatavalue(f, PCDATA_ArgSize, targetpc-PCQuantum);
}
-void
-runtime·funcline_go(Func *f, uintptr targetpc, String retfile, intgo retline)
-{
+func funcline_go(f *Func, targetpc uintptr) (retfile String, retline int) {
// Pass strict=false here, because anyone can call this function,
// and they might just be wrong about targetpc belonging to f.
retline = funcline(f, targetpc, &retfile, false);
- FLUSH(&retline);
}
-void
-runtime·funcname_go(Func *f, String ret)
-{
+func funcname_go(f *Func) (ret String) {
ret = runtime·gostringnocopy((uint8*)runtime·funcname(f));
- FLUSH(&ret);
}
-void
-runtime·funcentry_go(Func *f, uintptr ret)
-{
+func funcentry_go(f *Func) (ret uintptr) {
ret = f->entry;
- FLUSH(&ret);
}
Func*
@@ -281,6 +276,10 @@ runtime·findfunc(uintptr addr)
return nil;
}
+func FuncForPC(pc uintptr) (ret *Func) {
+ ret = runtime·findfunc(pc);
+}
+
static bool
hasprefix(String s, int8 *p)
{
diff --git a/src/pkg/runtime/sys_darwin_386.s b/src/pkg/runtime/sys_darwin_386.s
index c2a259e5b..bfaaa00a7 100644
--- a/src/pkg/runtime/sys_darwin_386.s
+++ b/src/pkg/runtime/sys_darwin_386.s
@@ -457,8 +457,7 @@ TEXT runtime·setldt(SB),NOSPLIT,$32
* we use its pthread_create and let it set up %gs
* for us. When we do that, the private storage
* we get is not at 0(GS) but at 0x468(GS).
- * To insulate the rest of the tool chain from this ugliness,
- * 8l rewrites 0(GS) into 0x468(GS) for us.
+ * 8l rewrites 0(TLS) into 0x468(GS) for us.
* To accommodate that rewrite, we translate the
* address and limit here so that 0x468(GS) maps to 0(address).
*
diff --git a/src/pkg/runtime/sys_dragonfly_386.s b/src/pkg/runtime/sys_dragonfly_386.s
index 9085ded6f..20e699966 100644
--- a/src/pkg/runtime/sys_dragonfly_386.s
+++ b/src/pkg/runtime/sys_dragonfly_386.s
@@ -155,7 +155,7 @@ TEXT runtime·setitimer(SB), NOSPLIT, $-4
TEXT time·now(SB), NOSPLIT, $32
MOVL $232, AX
LEAL 12(SP), BX
- MOVL $0, 4(SP)
+ MOVL $0, 4(SP) // CLOCK_REALTIME
MOVL BX, 8(SP)
INT $0x80
MOVL 12(SP), AX // sec
@@ -172,7 +172,7 @@ TEXT time·now(SB), NOSPLIT, $32
TEXT runtime·nanotime(SB), NOSPLIT, $32
MOVL $232, AX
LEAL 12(SP), BX
- MOVL $0, 4(SP)
+ MOVL $4, 4(SP) // CLOCK_MONOTONIC
MOVL BX, 8(SP)
INT $0x80
MOVL 12(SP), AX // sec
diff --git a/src/pkg/runtime/sys_dragonfly_amd64.s b/src/pkg/runtime/sys_dragonfly_amd64.s
index 2fa97f207..d70d2e80c 100644
--- a/src/pkg/runtime/sys_dragonfly_amd64.s
+++ b/src/pkg/runtime/sys_dragonfly_amd64.s
@@ -125,7 +125,7 @@ TEXT runtime·setitimer(SB), NOSPLIT, $-8
// func now() (sec int64, nsec int32)
TEXT time·now(SB), NOSPLIT, $32
MOVL $232, AX
- MOVQ $0, DI
+ MOVQ $0, DI // CLOCK_REALTIME
LEAQ 8(SP), SI
SYSCALL
MOVQ 8(SP), AX // sec
@@ -138,7 +138,7 @@ TEXT time·now(SB), NOSPLIT, $32
TEXT runtime·nanotime(SB), NOSPLIT, $32
MOVL $232, AX
- MOVQ $0, DI
+ MOVQ $4, DI // CLOCK_MONOTONIC
LEAQ 8(SP), SI
SYSCALL
MOVQ 8(SP), AX // sec
diff --git a/src/pkg/runtime/sys_freebsd_386.s b/src/pkg/runtime/sys_freebsd_386.s
index 8b4d2317d..4c97eec7b 100644
--- a/src/pkg/runtime/sys_freebsd_386.s
+++ b/src/pkg/runtime/sys_freebsd_386.s
@@ -135,7 +135,7 @@ TEXT runtime·setitimer(SB), NOSPLIT, $-4
TEXT time·now(SB), NOSPLIT, $32
MOVL $232, AX
LEAL 12(SP), BX
- MOVL $0, 4(SP)
+ MOVL $0, 4(SP) // CLOCK_REALTIME
MOVL BX, 8(SP)
INT $0x80
MOVL 12(SP), AX // sec
@@ -152,7 +152,9 @@ TEXT time·now(SB), NOSPLIT, $32
TEXT runtime·nanotime(SB), NOSPLIT, $32
MOVL $232, AX
LEAL 12(SP), BX
- MOVL $0, 4(SP)
+ // We can use CLOCK_MONOTONIC_FAST here when we drop
+ // support for FreeBSD 8-STABLE.
+ MOVL $4, 4(SP) // CLOCK_MONOTONIC
MOVL BX, 8(SP)
INT $0x80
MOVL 12(SP), AX // sec
@@ -307,8 +309,7 @@ TEXT runtime·i386_set_ldt(SB),NOSPLIT,$16
MOVL AX, 8(SP)
MOVL $165, AX
INT $0x80
- CMPL AX, $0xfffff001
- JLS 2(PC)
+ JAE 2(PC)
INT $3
RET
@@ -324,7 +325,7 @@ TEXT runtime·sysctl(SB),NOSPLIT,$28
MOVSL // arg 6 - newlen
MOVL $202, AX // sys___sysctl
INT $0x80
- JCC 3(PC)
+ JAE 3(PC)
NEGL AX
RET
MOVL $0, AX
diff --git a/src/pkg/runtime/sys_freebsd_amd64.s b/src/pkg/runtime/sys_freebsd_amd64.s
index 63cd3ac07..4c5b32504 100644
--- a/src/pkg/runtime/sys_freebsd_amd64.s
+++ b/src/pkg/runtime/sys_freebsd_amd64.s
@@ -144,7 +144,7 @@ TEXT runtime·setitimer(SB), NOSPLIT, $-8
// func now() (sec int64, nsec int32)
TEXT time·now(SB), NOSPLIT, $32
MOVL $232, AX
- MOVQ $0, DI
+ MOVQ $0, DI // CLOCK_REALTIME
LEAQ 8(SP), SI
SYSCALL
MOVQ 8(SP), AX // sec
@@ -157,7 +157,9 @@ TEXT time·now(SB), NOSPLIT, $32
TEXT runtime·nanotime(SB), NOSPLIT, $32
MOVL $232, AX
- MOVQ $0, DI
+ // We can use CLOCK_MONOTONIC_FAST here when we drop
+ // support for FreeBSD 8-STABLE.
+ MOVQ $4, DI // CLOCK_MONOTONIC
LEAQ 8(SP), SI
SYSCALL
MOVQ 8(SP), AX // sec
diff --git a/src/pkg/runtime/sys_freebsd_arm.s b/src/pkg/runtime/sys_freebsd_arm.s
index 106d72799..3ec95a651 100644
--- a/src/pkg/runtime/sys_freebsd_arm.s
+++ b/src/pkg/runtime/sys_freebsd_arm.s
@@ -162,7 +162,9 @@ TEXT time·now(SB), NOSPLIT, $32
// int64 nanotime(void) so really
// void nanotime(int64 *nsec)
TEXT runtime·nanotime(SB), NOSPLIT, $32
- MOVW $0, R0 // CLOCK_REALTIME
+ // We can use CLOCK_MONOTONIC_FAST here when we drop
+ // support for FreeBSD 8-STABLE.
+ MOVW $4, R0 // CLOCK_MONOTONIC
MOVW $8(R13), R1
MOVW $SYS_clock_gettime, R7
SWI $0
@@ -365,6 +367,7 @@ TEXT runtime·casp(SB),NOSPLIT,$0
TEXT runtime·cas(SB),NOSPLIT,$0
B runtime·armcas(SB)
+// TODO(minux): this only supports ARMv6K+.
TEXT runtime·read_tls_fallback(SB),NOSPLIT,$-4
- MOVW $0xffff1000, R0
- MOVW (R0), R0
+ WORD $0xee1d0f70 // mrc p15, 0, r0, c13, c0, 3
+ RET
diff --git a/src/pkg/runtime/sys_linux_386.s b/src/pkg/runtime/sys_linux_386.s
index fcda739db..b7896f178 100644
--- a/src/pkg/runtime/sys_linux_386.s
+++ b/src/pkg/runtime/sys_linux_386.s
@@ -106,7 +106,7 @@ TEXT runtime·mincore(SB),NOSPLIT,$0-24
// func now() (sec int64, nsec int32)
TEXT time·now(SB), NOSPLIT, $32
MOVL $265, AX // syscall - clock_gettime
- MOVL $0, BX
+ MOVL $0, BX // CLOCK_REALTIME
LEAL 8(SP), CX
MOVL $0, DX
CALL *runtime·_vdso(SB)
@@ -123,7 +123,7 @@ TEXT time·now(SB), NOSPLIT, $32
// void nanotime(int64 *nsec)
TEXT runtime·nanotime(SB), NOSPLIT, $32
MOVL $265, AX // syscall - clock_gettime
- MOVL $0, BX
+ MOVL $1, BX // CLOCK_MONOTONIC
LEAL 8(SP), CX
MOVL $0, DX
CALL *runtime·_vdso(SB)
@@ -383,7 +383,7 @@ TEXT runtime·setldt(SB),NOSPLIT,$32
* for us. When we do that, the private storage
* we get is not at 0(GS), 4(GS), but -8(GS), -4(GS).
* To insulate the rest of the tool chain from this
- * ugliness, 8l rewrites 0(GS) into -8(GS) for us.
+ * ugliness, 8l rewrites 0(TLS) into -8(GS) for us.
* To accommodate that rewrite, we translate
* the address here and bump the limit to 0xffffffff (no limit)
* so that -8(GS) maps to 0(address).
diff --git a/src/pkg/runtime/sys_linux_amd64.s b/src/pkg/runtime/sys_linux_amd64.s
index 481841a67..b340c4f2c 100644
--- a/src/pkg/runtime/sys_linux_amd64.s
+++ b/src/pkg/runtime/sys_linux_amd64.s
@@ -76,7 +76,7 @@ TEXT runtime·usleep(SB),NOSPLIT,$16
SYSCALL
RET
-TEXT runtime·raise(SB),NOSPLIT,$12
+TEXT runtime·raise(SB),NOSPLIT,$0
MOVL $186, AX // syscall - gettid
SYSCALL
MOVL AX, DI // arg 1 tid
@@ -136,7 +136,7 @@ TEXT runtime·nanotime(SB),NOSPLIT,$16
MOVQ runtime·__vdso_clock_gettime_sym(SB), AX
CMPQ AX, $0
JEQ fallback_gtod_nt
- MOVL $0, DI // CLOCK_REALTIME
+ MOVL $1, DI // CLOCK_MONOTONIC
LEAQ 0(SP), SI
CALL AX
MOVQ 0(SP), AX // sec
diff --git a/src/pkg/runtime/sys_linux_arm.s b/src/pkg/runtime/sys_linux_arm.s
index 42aef56a7..c537a8722 100644
--- a/src/pkg/runtime/sys_linux_arm.s
+++ b/src/pkg/runtime/sys_linux_arm.s
@@ -175,7 +175,7 @@ TEXT time·now(SB), NOSPLIT, $32
// int64 nanotime(void) so really
// void nanotime(int64 *nsec)
TEXT runtime·nanotime(SB),NOSPLIT,$32
- MOVW $0, R0 // CLOCK_REALTIME
+ MOVW $1, R0 // CLOCK_MONOTONIC
MOVW $8(R13), R1 // timespec
MOVW $SYS_clock_gettime, R7
SWI $0
diff --git a/src/pkg/runtime/sys_nacl_386.s b/src/pkg/runtime/sys_nacl_386.s
new file mode 100644
index 000000000..42ba0e0ed
--- /dev/null
+++ b/src/pkg/runtime/sys_nacl_386.s
@@ -0,0 +1,243 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "zasm_GOOS_GOARCH.h"
+#include "../../cmd/ld/textflag.h"
+#include "syscall_nacl.h"
+
+#define NACL_SYSCALL(code) \
+ MOVL $(0x10000 + ((code)<<5)), AX; CALL AX
+
+#define NACL_SYSJMP(code) \
+ MOVL $(0x10000 + ((code)<<5)), AX; JMP AX
+
+TEXT runtime·exit(SB),NOSPLIT,$0
+ NACL_SYSJMP(SYS_exit)
+
+TEXT runtime·exit1(SB),NOSPLIT,$0
+ NACL_SYSJMP(SYS_thread_exit)
+
+TEXT runtime·open(SB),NOSPLIT,$0
+ NACL_SYSJMP(SYS_open)
+
+TEXT runtime·close(SB),NOSPLIT,$0
+ NACL_SYSJMP(SYS_close)
+
+TEXT runtime·read(SB),NOSPLIT,$0
+ NACL_SYSJMP(SYS_read)
+
+TEXT syscall·naclWrite(SB), NOSPLIT, $12-16
+ MOVL arg1+0(FP), DI
+ MOVL arg2+4(FP), SI
+ MOVL arg3+8(FP), DX
+ MOVL DI, 0(SP)
+ MOVL SI, 4(SP)
+ MOVL DX, 8(SP)
+ CALL runtime·write(SB)
+ MOVL AX, ret+16(FP)
+ RET
+
+TEXT runtime·write(SB),NOSPLIT,$0
+ NACL_SYSJMP(SYS_write)
+
+TEXT runtime·nacl_exception_stack(SB),NOSPLIT,$0
+ NACL_SYSJMP(SYS_exception_stack)
+
+TEXT runtime·nacl_exception_handler(SB),NOSPLIT,$0
+ NACL_SYSJMP(SYS_exception_handler)
+
+TEXT runtime·nacl_sem_create(SB),NOSPLIT,$0
+ NACL_SYSJMP(SYS_sem_create)
+
+TEXT runtime·nacl_sem_wait(SB),NOSPLIT,$0
+ NACL_SYSJMP(SYS_sem_wait)
+
+TEXT runtime·nacl_sem_post(SB),NOSPLIT,$0
+ NACL_SYSJMP(SYS_sem_post)
+
+TEXT runtime·nacl_mutex_create(SB),NOSPLIT,$0
+ NACL_SYSJMP(SYS_mutex_create)
+
+TEXT runtime·nacl_mutex_lock(SB),NOSPLIT,$0
+ NACL_SYSJMP(SYS_mutex_lock)
+
+TEXT runtime·nacl_mutex_trylock(SB),NOSPLIT,$0
+ NACL_SYSJMP(SYS_mutex_trylock)
+
+TEXT runtime·nacl_mutex_unlock(SB),NOSPLIT,$0
+ NACL_SYSJMP(SYS_mutex_unlock)
+
+TEXT runtime·nacl_cond_create(SB),NOSPLIT,$0
+ NACL_SYSJMP(SYS_cond_create)
+
+TEXT runtime·nacl_cond_wait(SB),NOSPLIT,$0
+ NACL_SYSJMP(SYS_cond_wait)
+
+TEXT runtime·nacl_cond_signal(SB),NOSPLIT,$0
+ NACL_SYSJMP(SYS_cond_signal)
+
+TEXT runtime·nacl_cond_broadcast(SB),NOSPLIT,$0
+ NACL_SYSJMP(SYS_cond_broadcast)
+
+TEXT runtime·nacl_cond_timed_wait_abs(SB),NOSPLIT,$0
+ NACL_SYSJMP(SYS_cond_timed_wait_abs)
+
+TEXT runtime·nacl_thread_create(SB),NOSPLIT,$0
+ NACL_SYSJMP(SYS_thread_create)
+
+TEXT runtime·mstart_nacl(SB),NOSPLIT,$0
+ JMP runtime·mstart(SB)
+
+TEXT runtime·nacl_nanosleep(SB),NOSPLIT,$0
+ NACL_SYSJMP(SYS_nanosleep)
+
+TEXT runtime·osyield(SB),NOSPLIT,$0
+ NACL_SYSJMP(SYS_sched_yield)
+
+TEXT runtime·mmap(SB),NOSPLIT,$32
+ MOVL arg1+0(FP), AX
+ MOVL AX, 0(SP)
+ MOVL arg2+4(FP), AX
+ MOVL AX, 4(SP)
+ MOVL arg3+8(FP), AX
+ MOVL AX, 8(SP)
+ MOVL arg4+12(FP), AX
+ MOVL AX, 12(SP)
+ MOVL arg5+16(FP), AX
+ MOVL AX, 16(SP)
+ MOVL arg6+20(FP), AX
+ MOVL AX, 24(SP)
+ MOVL $0, 28(SP)
+ LEAL 24(SP), AX
+ MOVL AX, 20(SP)
+ NACL_SYSCALL(SYS_mmap)
+ RET
+
+TEXT time·now(SB),NOSPLIT,$20
+ MOVL $0, 0(SP) // real time clock
+ LEAL 8(SP), AX
+ MOVL AX, 4(SP) // timespec
+ NACL_SYSCALL(SYS_clock_gettime)
+ MOVL 8(SP), AX // low 32 sec
+ MOVL 12(SP), CX // high 32 sec
+ MOVL 16(SP), BX // nsec
+
+ // sec is in AX, nsec in BX
+ MOVL AX, sec+0(FP)
+ MOVL CX, sec+4(FP)
+ MOVL BX, nsec+8(FP)
+ RET
+
+TEXT syscall·now(SB),NOSPLIT,$0
+ JMP time·now(SB)
+
+TEXT runtime·nacl_clock_gettime(SB),NOSPLIT,$0
+ NACL_SYSJMP(SYS_clock_gettime)
+
+TEXT runtime·nanotime(SB),NOSPLIT,$20
+ MOVL $0, 0(SP) // real time clock
+ LEAL 8(SP), AX
+ MOVL AX, 4(SP) // timespec
+ NACL_SYSCALL(SYS_clock_gettime)
+ MOVL 8(SP), AX // low 32 sec
+ MOVL 16(SP), BX // nsec
+
+ // sec is in AX, nsec in BX
+ // convert to DX:AX nsec
+ MOVL $1000000000, CX
+ MULL CX
+ ADDL BX, AX
+ ADCL $0, DX
+
+ MOVL ret+0(FP), DI
+ MOVL AX, 0(DI)
+ MOVL DX, 4(DI)
+ RET
+
+TEXT runtime·setldt(SB),NOSPLIT,$8
+ MOVL addr+4(FP), BX // aka base
+ ADDL $0x8, BX
+ MOVL BX, 0(SP)
+ NACL_SYSCALL(SYS_tls_init)
+ RET
+
+TEXT runtime·sigtramp(SB),NOSPLIT,$0
+ get_tls(CX)
+
+ // check that m exists
+ MOVL m(CX), BX
+ CMPL BX, $0
+ JNE 6(PC)
+ MOVL $11, BX
+ MOVL BX, 0(SP)
+ MOVL $runtime·badsignal(SB), AX
+ CALL AX
+ JMP sigtramp_ret
+
+ // save g
+ MOVL g(CX), DI
+ MOVL DI, 20(SP)
+
+ // g = m->gsignal
+ MOVL m_gsignal(BX), BX
+ MOVL BX, g(CX)
+
+ // copy arguments for sighandler
+ MOVL $11, 0(SP) // signal
+ MOVL $0, 4(SP) // siginfo
+ LEAL ctxt+4(FP), AX
+ MOVL AX, 8(SP) // context
+ MOVL DI, 12(SP) // g
+
+ CALL runtime·sighandler(SB)
+
+ // restore g
+ get_tls(CX)
+ MOVL 20(SP), BX
+ MOVL BX, g(CX)
+
+sigtramp_ret:
+ // Enable exceptions again.
+ NACL_SYSCALL(SYS_exception_clear_flag)
+
+ // NaCl has abidcated its traditional operating system responsibility
+ // and declined to implement 'sigreturn'. Instead the only way to return
+ // to the execution of our program is to restore the registers ourselves.
+ // Unfortunately, that is impossible to do with strict fidelity, because
+ // there is no way to do the final update of PC that ends the sequence
+ // without either (1) jumping to a register, in which case the register ends
+ // holding the PC value instead of its intended value or (2) storing the PC
+ // on the stack and using RET, which imposes the requirement that SP is
+ // valid and that is okay to smash the word below it. The second would
+ // normally be the lesser of the two evils, except that on NaCl, the linker
+ // must rewrite RET into "POP reg; AND $~31, reg; JMP reg", so either way
+ // we are going to lose a register as a result of the incoming signal.
+ // Similarly, there is no way to restore EFLAGS; the usual way is to use
+ // POPFL, but NaCl rejects that instruction. We could inspect the bits and
+ // execute a sequence of instructions designed to recreate those flag
+ // settings, but that's a lot of work.
+ //
+ // Thankfully, Go's signal handlers never try to return directly to the
+ // executing code, so all the registers and EFLAGS are dead and can be
+ // smashed. The only registers that matter are the ones that are setting
+ // up for the simulated call that the signal handler has created.
+ // Today those registers are just PC and SP, but in case additional registers
+ // are relevant in the future (for example DX is the Go func context register)
+ // we restore as many registers as possible.
+ //
+ // We smash BP, because that's what the linker smashes during RET.
+ //
+ LEAL ctxt+4(FP), BP
+ ADDL $64, BP
+ MOVL 0(BP), AX
+ MOVL 4(BP), CX
+ MOVL 8(BP), DX
+ MOVL 12(BP), BX
+ MOVL 16(BP), SP
+ // 20(BP) is saved BP, never to be seen again
+ MOVL 24(BP), SI
+ MOVL 28(BP), DI
+ // 36(BP) is saved EFLAGS, never to be seen again
+ MOVL 32(BP), BP // saved PC
+ JMP BP
diff --git a/src/pkg/runtime/sys_nacl_amd64p32.s b/src/pkg/runtime/sys_nacl_amd64p32.s
new file mode 100644
index 000000000..43c172372
--- /dev/null
+++ b/src/pkg/runtime/sys_nacl_amd64p32.s
@@ -0,0 +1,413 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "zasm_GOOS_GOARCH.h"
+#include "../../cmd/ld/textflag.h"
+#include "syscall_nacl.h"
+
+#define NACL_SYSCALL(code) \
+ MOVL $(0x10000 + ((code)<<5)), AX; CALL AX
+
+#define NACL_SYSJMP(code) \
+ MOVL $(0x10000 + ((code)<<5)), AX; JMP AX
+
+TEXT runtime·settls(SB),NOSPLIT,$0
+ MOVL DI, TLS // really BP
+ RET
+
+TEXT runtime·exit(SB),NOSPLIT,$0
+ MOVL arg1+0(FP), DI
+ NACL_SYSJMP(SYS_exit)
+
+TEXT runtime·exit1(SB),NOSPLIT,$0
+ MOVL arg1+0(FP), DI
+ NACL_SYSJMP(SYS_thread_exit)
+
+TEXT runtime·open(SB),NOSPLIT,$0
+ MOVL arg1+0(FP), DI
+ MOVL arg2+4(FP), SI
+ MOVL arg3+8(FP), DX
+ NACL_SYSJMP(SYS_open)
+
+TEXT runtime·close(SB),NOSPLIT,$0
+ MOVL arg1+0(FP), DI
+ NACL_SYSJMP(SYS_close)
+
+TEXT runtime·read(SB),NOSPLIT,$0
+ MOVL arg1+0(FP), DI
+ MOVL arg2+4(FP), SI
+ MOVL arg3+8(FP), DX
+ NACL_SYSJMP(SYS_read)
+
+TEXT syscall·naclWrite(SB), NOSPLIT, $16-20
+ MOVL arg1+0(FP), DI
+ MOVL arg2+4(FP), SI
+ MOVL arg3+8(FP), DX
+ MOVL DI, 0(SP)
+ MOVL SI, 4(SP)
+ MOVL DX, 8(SP)
+ CALL runtime·write(SB)
+ MOVL AX, ret+16(FP)
+ RET
+
+TEXT runtime·write(SB),NOSPLIT,$16-12
+ // If using fake time and writing to stdout or stderr,
+ // emit playback header before actual data.
+ MOVQ runtime·timens(SB), AX
+ CMPQ AX, $0
+ JEQ write
+ MOVL arg1+0(FP), DI
+ CMPL DI, $1
+ JEQ playback
+ CMPL DI, $2
+ JEQ playback
+
+write:
+ // Ordinary write.
+ MOVL arg1+0(FP), DI
+ MOVL arg2+4(FP), SI
+ MOVL arg3+8(FP), DX
+ NACL_SYSCALL(SYS_write)
+ RET
+
+ // Write with playback header.
+ // First, lock to avoid interleaving writes.
+playback:
+ MOVL $1, BX
+ XCHGL runtime·writelock(SB), BX
+ CMPL BX, $0
+ JNE playback
+
+ // Playback header: 0 0 P B <8-byte time> <4-byte data length>
+ MOVL $(('B'<<24) | ('P'<<16)), 0(SP)
+ BSWAPQ AX
+ MOVQ AX, 4(SP)
+ MOVL arg3+8(FP), DX
+ BSWAPL DX
+ MOVL DX, 12(SP)
+ MOVL $1, DI // standard output
+ MOVL SP, SI
+ MOVL $16, DX
+ NACL_SYSCALL(SYS_write)
+
+ // Write actual data.
+ MOVL $1, DI // standard output
+ MOVL arg2+4(FP), SI
+ MOVL arg3+8(FP), DX
+ NACL_SYSCALL(SYS_write)
+
+ // Unlock.
+ MOVL $0, runtime·writelock(SB)
+
+ RET
+
+TEXT runtime·nacl_exception_stack(SB),NOSPLIT,$0
+ MOVL arg1+0(FP), DI
+ MOVL arg2+4(FP), SI
+ NACL_SYSJMP(SYS_exception_stack)
+
+TEXT runtime·nacl_exception_handler(SB),NOSPLIT,$0
+ MOVL arg1+0(FP), DI
+ MOVL arg2+4(FP), SI
+ NACL_SYSJMP(SYS_exception_handler)
+
+TEXT runtime·nacl_sem_create(SB),NOSPLIT,$0
+ MOVL arg1+0(FP), DI
+ NACL_SYSJMP(SYS_sem_create)
+
+TEXT runtime·nacl_sem_wait(SB),NOSPLIT,$0
+ MOVL arg1+0(FP), DI
+ NACL_SYSJMP(SYS_sem_wait)
+
+TEXT runtime·nacl_sem_post(SB),NOSPLIT,$0
+ MOVL arg1+0(FP), DI
+ NACL_SYSJMP(SYS_sem_post)
+
+TEXT runtime·nacl_mutex_create(SB),NOSPLIT,$0
+ MOVL arg1+0(FP), DI
+ NACL_SYSJMP(SYS_mutex_create)
+
+TEXT runtime·nacl_mutex_lock(SB),NOSPLIT,$0
+ MOVL arg1+0(FP), DI
+ NACL_SYSJMP(SYS_mutex_lock)
+
+TEXT runtime·nacl_mutex_trylock(SB),NOSPLIT,$0
+ MOVL arg1+0(FP), DI
+ NACL_SYSJMP(SYS_mutex_trylock)
+
+TEXT runtime·nacl_mutex_unlock(SB),NOSPLIT,$0
+ MOVL arg1+0(FP), DI
+ NACL_SYSJMP(SYS_mutex_unlock)
+
+TEXT runtime·nacl_cond_create(SB),NOSPLIT,$0
+ MOVL arg1+0(FP), DI
+ NACL_SYSJMP(SYS_cond_create)
+
+TEXT runtime·nacl_cond_wait(SB),NOSPLIT,$0
+ MOVL arg1+0(FP), DI
+ MOVL arg2+4(FP), SI
+ NACL_SYSJMP(SYS_cond_wait)
+
+TEXT runtime·nacl_cond_signal(SB),NOSPLIT,$0
+ MOVL arg1+0(FP), DI
+ NACL_SYSJMP(SYS_cond_signal)
+
+TEXT runtime·nacl_cond_broadcast(SB),NOSPLIT,$0
+ MOVL arg1+0(FP), DI
+ NACL_SYSJMP(SYS_cond_broadcast)
+
+TEXT runtime·nacl_cond_timed_wait_abs(SB),NOSPLIT,$0
+ MOVL arg1+0(FP), DI
+ MOVL arg2+4(FP), SI
+ MOVL arg3+8(FP), DX
+ NACL_SYSJMP(SYS_cond_timed_wait_abs)
+
+TEXT runtime·nacl_thread_create(SB),NOSPLIT,$0
+ MOVL arg1+0(FP), DI
+ MOVL arg2+4(FP), SI
+ MOVL arg3+8(FP), DX
+ MOVL arg4+12(FP), CX
+ NACL_SYSJMP(SYS_thread_create)
+
+TEXT runtime·mstart_nacl(SB),NOSPLIT,$0
+ NACL_SYSCALL(SYS_tls_get)
+ SUBL $8, AX
+ MOVL AX, TLS
+ JMP runtime·mstart(SB)
+
+TEXT runtime·nacl_nanosleep(SB),NOSPLIT,$0
+ MOVL arg1+0(FP), DI
+ MOVL arg2+4(FP), SI
+ NACL_SYSJMP(SYS_nanosleep)
+
+TEXT runtime·osyield(SB),NOSPLIT,$0
+ NACL_SYSJMP(SYS_sched_yield)
+
+TEXT runtime·mmap(SB),NOSPLIT,$8
+ MOVL arg1+0(FP), DI
+ MOVL arg2+4(FP), SI
+ MOVL arg3+8(FP), DX
+ MOVL arg4+12(FP), CX
+ MOVL arg5+16(FP), R8
+ MOVL arg6+20(FP), AX
+ MOVQ AX, 0(SP)
+ MOVL SP, R9
+ NACL_SYSCALL(SYS_mmap)
+ CMPL AX, $-4095
+ JNA 2(PC)
+ NEGL AX
+ RET
+
+TEXT time·now(SB),NOSPLIT,$16
+ MOVQ runtime·timens(SB), AX
+ CMPQ AX, $0
+ JEQ realtime
+ MOVQ $0, DX
+ MOVQ $1000000000, CX
+ DIVQ CX
+ MOVQ AX, sec+0(FP)
+ MOVL DX, nsec+8(FP)
+ RET
+realtime:
+ MOVL $0, DI // real time clock
+ LEAL 0(SP), AX
+ MOVL AX, SI // timespec
+ NACL_SYSCALL(SYS_clock_gettime)
+ MOVL 0(SP), AX // low 32 sec
+ MOVL 4(SP), CX // high 32 sec
+ MOVL 8(SP), BX // nsec
+
+ // sec is in AX, nsec in BX
+ MOVL AX, sec+0(FP)
+ MOVL CX, sec+4(FP)
+ MOVL BX, nsec+8(FP)
+ RET
+
+TEXT syscall·now(SB),NOSPLIT,$0
+ JMP time·now(SB)
+
+TEXT runtime·nacl_clock_gettime(SB),NOSPLIT,$0
+ MOVL arg1+0(FP), DI
+ MOVL arg2+4(FP), SI
+ NACL_SYSJMP(SYS_clock_gettime)
+
+TEXT runtime·nanotime(SB),NOSPLIT,$16
+ MOVQ runtime·timens(SB), AX
+ CMPQ AX, $0
+ JEQ 2(PC)
+ RET
+ MOVL $0, DI // real time clock
+ LEAL 0(SP), AX
+ MOVL AX, SI // timespec
+ NACL_SYSCALL(SYS_clock_gettime)
+ MOVQ 0(SP), AX // sec
+ MOVL 8(SP), DX // nsec
+
+ // sec is in AX, nsec in DX
+ // return nsec in AX
+ IMULQ $1000000000, AX
+ ADDQ DX, AX
+ RET
+
+TEXT runtime·sigtramp(SB),NOSPLIT,$80
+ // restore TLS register at time of execution,
+ // in case it's been smashed.
+ // the TLS register is really BP, but for consistency
+ // with non-NaCl systems it is referred to here as TLS.
+ // NOTE: Cannot use SYS_tls_get here (like we do in mstart_nacl),
+ // because the main thread never calls tls_set.
+ LEAL ctxt+0(FP), AX
+ MOVL (16*4+5*8)(AX), AX
+ MOVL AX, TLS
+
+ // check that m exists
+ get_tls(CX)
+ MOVL m(CX), BX
+
+ CMPL BX, $0
+ JEQ nom
+
+ // save g
+ MOVL g(CX), DI
+ MOVL DI, 20(SP)
+
+ // g = m->gsignal
+ MOVL m_gsignal(BX), BX
+ MOVL BX, g(CX)
+
+//JMP debughandler
+
+ // copy arguments for sighandler
+ MOVL $11, 0(SP) // signal
+ MOVL $0, 4(SP) // siginfo
+ LEAL ctxt+0(FP), AX
+ MOVL AX, 8(SP) // context
+ MOVL DI, 12(SP) // g
+
+ CALL runtime·sighandler(SB)
+
+ // restore g
+ get_tls(CX)
+ MOVL 20(SP), BX
+ MOVL BX, g(CX)
+
+sigtramp_ret:
+ // Enable exceptions again.
+ NACL_SYSCALL(SYS_exception_clear_flag)
+
+ // Restore registers as best we can. Impossible to do perfectly.
+ // See comment in sys_nacl_386.s for extended rationale.
+ LEAL ctxt+0(FP), SI
+ ADDL $64, SI
+ MOVQ 0(SI), AX
+ MOVQ 8(SI), CX
+ MOVQ 16(SI), DX
+ MOVQ 24(SI), BX
+ MOVL 32(SI), SP // MOVL for SP sandboxing
+ // 40(SI) is saved BP aka TLS, already restored above
+ // 48(SI) is saved SI, never to be seen again
+ MOVQ 56(SI), DI
+ MOVQ 64(SI), R8
+ MOVQ 72(SI), R9
+ MOVQ 80(SI), R10
+ MOVQ 88(SI), R11
+ MOVQ 96(SI), R12
+ MOVQ 104(SI), R13
+ MOVQ 112(SI), R14
+ // 120(SI) is R15, which is owned by Native Client and must not be modified
+ MOVQ 128(SI), SI // saved PC
+ // 136(SI) is saved EFLAGS, never to be seen again
+ JMP SI
+
+debughandler:
+ // print basic information
+ LEAL ctxt+0(FP), DI
+ MOVL $runtime·sigtrampf(SB), AX
+ MOVL AX, 0(SP)
+ MOVQ (16*4+16*8)(DI), BX // rip
+ MOVQ BX, 8(SP)
+ MOVQ (16*4+0*8)(DI), BX // rax
+ MOVQ BX, 16(SP)
+ MOVQ (16*4+1*8)(DI), BX // rcx
+ MOVQ BX, 24(SP)
+ MOVQ (16*4+2*8)(DI), BX // rdx
+ MOVQ BX, 32(SP)
+ MOVQ (16*4+3*8)(DI), BX // rbx
+ MOVQ BX, 40(SP)
+ MOVQ (16*4+7*8)(DI), BX // rdi
+ MOVQ BX, 48(SP)
+ MOVQ (16*4+15*8)(DI), BX // r15
+ MOVQ BX, 56(SP)
+ MOVQ (16*4+4*8)(DI), BX // rsp
+ MOVQ 0(BX), BX
+ MOVQ BX, 64(SP)
+ CALL runtime·printf(SB)
+
+ LEAL ctxt+0(FP), DI
+ MOVQ (16*4+16*8)(DI), BX // rip
+ MOVL BX, 0(SP)
+ MOVQ (16*4+4*8)(DI), BX // rsp
+ MOVL BX, 4(SP)
+ MOVL $0, 8(SP) // lr
+ get_tls(CX)
+ MOVL g(CX), BX
+ MOVL BX, 12(SP) // gp
+ CALL runtime·traceback(SB)
+
+notls:
+ MOVL 0, AX
+ RET
+
+nom:
+ MOVL 0, AX
+ RET
+
+// cannot do real signal handling yet, because gsignal has not been allocated.
+MOVL $1, DI; NACL_SYSCALL(SYS_exit)
+
+TEXT runtime·nacl_sysinfo(SB),NOSPLIT,$16
+/*
+ MOVL di+0(FP), DI
+ LEAL 12(DI), BX
+ MOVL 8(DI), AX
+ ADDL 4(DI), AX
+ ADDL $2, AX
+ LEAL (BX)(AX*4), BX
+ MOVL BX, runtime·nacl_irt_query(SB)
+auxloop:
+ MOVL 0(BX), DX
+ CMPL DX, $0
+ JNE 2(PC)
+ RET
+ CMPL DX, $32
+ JEQ auxfound
+ ADDL $8, BX
+ JMP auxloop
+auxfound:
+ MOVL 4(BX), BX
+ MOVL BX, runtime·nacl_irt_query(SB)
+
+ LEAL runtime·nacl_irt_basic_v0_1_str(SB), DI
+ LEAL runtime·nacl_irt_basic_v0_1(SB), SI
+ MOVL runtime·nacl_irt_basic_v0_1_size(SB), DX
+ MOVL runtime·nacl_irt_query(SB), BX
+ CALL BX
+
+ LEAL runtime·nacl_irt_memory_v0_3_str(SB), DI
+ LEAL runtime·nacl_irt_memory_v0_3(SB), SI
+ MOVL runtime·nacl_irt_memory_v0_3_size(SB), DX
+ MOVL runtime·nacl_irt_query(SB), BX
+ CALL BX
+
+ LEAL runtime·nacl_irt_thread_v0_1_str(SB), DI
+ LEAL runtime·nacl_irt_thread_v0_1(SB), SI
+ MOVL runtime·nacl_irt_thread_v0_1_size(SB), DX
+ MOVL runtime·nacl_irt_query(SB), BX
+ CALL BX
+
+ // TODO: Once we have a NaCl SDK with futex syscall support,
+ // try switching to futex syscalls and here load the
+ // nacl-irt-futex-0.1 table.
+*/
+ RET
diff --git a/src/pkg/runtime/sys_openbsd_386.s b/src/pkg/runtime/sys_openbsd_386.s
index e1ec5337a..8f0da5c0e 100644
--- a/src/pkg/runtime/sys_openbsd_386.s
+++ b/src/pkg/runtime/sys_openbsd_386.s
@@ -9,6 +9,8 @@
#include "zasm_GOOS_GOARCH.h"
#include "../../cmd/ld/textflag.h"
+#define CLOCK_MONOTONIC $3
+
// Exit the entire program (like C exit)
TEXT runtime·exit(SB),NOSPLIT,$-4
MOVL $1, AX
@@ -45,21 +47,22 @@ TEXT runtime·write(SB),NOSPLIT,$-4
INT $0x80
RET
-TEXT runtime·usleep(SB),NOSPLIT,$20
+TEXT runtime·usleep(SB),NOSPLIT,$24
MOVL $0, DX
MOVL usec+0(FP), AX
MOVL $1000000, CX
DIVL CX
- MOVL AX, 12(SP) // tv_sec
+ MOVL AX, 12(SP) // tv_sec - l32
+ MOVL $0, 16(SP) // tv_sec - h32
MOVL $1000, AX
MULL DX
- MOVL AX, 16(SP) // tv_nsec
+ MOVL AX, 20(SP) // tv_nsec
MOVL $0, 0(SP)
LEAL 12(SP), AX
MOVL AX, 4(SP) // arg 1 - rqtp
MOVL $0, 8(SP) // arg 2 - rmtp
- MOVL $240, AX // sys_nanosleep
+ MOVL $91, AX // sys_nanosleep
INT $0x80
RET
@@ -107,43 +110,46 @@ TEXT runtime·madvise(SB),NOSPLIT,$-4
RET
TEXT runtime·setitimer(SB),NOSPLIT,$-4
- MOVL $83, AX
+ MOVL $69, AX
INT $0x80
RET
// func now() (sec int64, nsec int32)
TEXT time·now(SB), NOSPLIT, $32
- MOVL $232, AX
LEAL 12(SP), BX
- MOVL $0, 4(SP)
- MOVL BX, 8(SP)
+ MOVL $0, 4(SP) // arg 1 - clock_id
+ MOVL BX, 8(SP) // arg 2 - tp
+ MOVL $87, AX // sys_clock_gettime
INT $0x80
- MOVL 12(SP), AX // sec
- MOVL 16(SP), BX // nsec
- // sec is in AX, nsec in BX
+ MOVL 12(SP), AX // sec - l32
MOVL AX, sec+0(FP)
- MOVL $0, sec+4(FP)
+ MOVL 16(SP), AX // sec - h32
+ MOVL AX, sec+4(FP)
+
+ MOVL 20(SP), BX // nsec
MOVL BX, nsec+8(FP)
RET
// int64 nanotime(void) so really
// void nanotime(int64 *nsec)
TEXT runtime·nanotime(SB),NOSPLIT,$32
- MOVL $232, AX
LEAL 12(SP), BX
- MOVL $0, 4(SP)
- MOVL BX, 8(SP)
+ MOVL CLOCK_MONOTONIC, 4(SP) // arg 1 - clock_id
+ MOVL BX, 8(SP) // arg 2 - tp
+ MOVL $87, AX // sys_clock_gettime
INT $0x80
- MOVL 12(SP), AX // sec
- MOVL 16(SP), BX // nsec
- // sec is in AX, nsec in BX
- // convert to DX:AX nsec
- MOVL $1000000000, CX
- MULL CX
+ MOVL 16(SP), CX // sec - h32
+ IMULL $1000000000, CX
+
+ MOVL 12(SP), AX // sec - l32
+ MOVL $1000000000, BX
+ MULL BX // result in dx:ax
+
+ MOVL 20(SP), BX // nsec
ADDL BX, AX
- ADCL $0, DX
+ ADCL CX, DX // add high bits with carry
MOVL ret+0(FP), DI
MOVL AX, 0(DI)
@@ -325,7 +331,7 @@ TEXT runtime·osyield(SB),NOSPLIT,$-4
RET
TEXT runtime·thrsleep(SB),NOSPLIT,$-4
- MOVL $300, AX // sys___thrsleep
+ MOVL $94, AX // sys___thrsleep
INT $0x80
RET
@@ -362,7 +368,7 @@ TEXT runtime·kqueue(SB),NOSPLIT,$0
// int32 runtime·kevent(int kq, Kevent *changelist, int nchanges, Kevent *eventlist, int nevents, Timespec *timeout);
TEXT runtime·kevent(SB),NOSPLIT,$0
- MOVL $270, AX
+ MOVL $72, AX // sys_kevent
INT $0x80
JAE 2(PC)
NEGL AX
@@ -370,7 +376,7 @@ TEXT runtime·kevent(SB),NOSPLIT,$0
// int32 runtime·closeonexec(int32 fd);
TEXT runtime·closeonexec(SB),NOSPLIT,$32
- MOVL $92, AX // fcntl
+ MOVL $92, AX // sys_fcntl
// 0(SP) is where the caller PC would be; kernel skips it
MOVL fd+0(FP), BX
MOVL BX, 4(SP) // fd
diff --git a/src/pkg/runtime/sys_openbsd_amd64.s b/src/pkg/runtime/sys_openbsd_amd64.s
index 26dc61f71..b2a61820a 100644
--- a/src/pkg/runtime/sys_openbsd_amd64.s
+++ b/src/pkg/runtime/sys_openbsd_amd64.s
@@ -9,6 +9,8 @@
#include "zasm_GOOS_GOARCH.h"
#include "../../cmd/ld/textflag.h"
+#define CLOCK_MONOTONIC $3
+
// int64 tfork(void *param, uintptr psize, M *mp, G *gp, void (*fn)(void));
TEXT runtime·tfork(SB),NOSPLIT,$32
@@ -62,7 +64,7 @@ TEXT runtime·thrsleep(SB),NOSPLIT,$0
MOVQ 24(SP), DX // arg 3 - tp
MOVQ 32(SP), R10 // arg 4 - lock
MOVQ 40(SP), R8 // arg 5 - abort
- MOVL $300, AX // sys___thrsleep
+ MOVL $94, AX // sys___thrsleep
SYSCALL
RET
@@ -130,7 +132,7 @@ TEXT runtime·usleep(SB),NOSPLIT,$16
MOVQ SP, DI // arg 1 - rqtp
MOVQ $0, SI // arg 2 - rmtp
- MOVL $240, AX // sys_nanosleep
+ MOVL $91, AX // sys_nanosleep
SYSCALL
RET
@@ -138,7 +140,7 @@ TEXT runtime·raise(SB),NOSPLIT,$16
MOVL $299, AX // sys_getthrid
SYSCALL
MOVQ AX, DI // arg 1 - pid
- MOVL sig+0(FP), SI // arg 2 - signum
+ MOVL sig+0(FP), SI // arg 2 - signum
MOVL $37, AX // sys_kill
SYSCALL
RET
@@ -147,7 +149,7 @@ TEXT runtime·setitimer(SB),NOSPLIT,$-8
MOVL 8(SP), DI // arg 1 - which
MOVQ 16(SP), SI // arg 2 - itv
MOVQ 24(SP), DX // arg 3 - oitv
- MOVL $83, AX // sys_setitimer
+ MOVL $69, AX // sys_setitimer
SYSCALL
RET
@@ -155,9 +157,9 @@ TEXT runtime·setitimer(SB),NOSPLIT,$-8
TEXT time·now(SB), NOSPLIT, $32
MOVQ $0, DI // arg 1 - clock_id
LEAQ 8(SP), SI // arg 2 - tp
- MOVL $232, AX // sys_clock_gettime
+ MOVL $87, AX // sys_clock_gettime
SYSCALL
- MOVL 8(SP), AX // sec
+ MOVQ 8(SP), AX // sec
MOVQ 16(SP), DX // nsec
// sec is in AX, nsec in DX
@@ -166,11 +168,11 @@ TEXT time·now(SB), NOSPLIT, $32
RET
TEXT runtime·nanotime(SB),NOSPLIT,$24
- MOVQ $0, DI // arg 1 - clock_id
+ MOVQ CLOCK_MONOTONIC, DI // arg 1 - clock_id
LEAQ 8(SP), SI // arg 2 - tp
- MOVL $232, AX // sys_clock_gettime
+ MOVL $87, AX // sys_clock_gettime
SYSCALL
- MOVL 8(SP), AX // sec
+ MOVQ 8(SP), AX // sec
MOVQ 16(SP), DX // nsec
// sec is in AX, nsec in DX
@@ -318,7 +320,7 @@ TEXT runtime·kevent(SB),NOSPLIT,$0
MOVQ 32(SP), R10
MOVL 40(SP), R8
MOVQ 48(SP), R9
- MOVL $270, AX
+ MOVL $72, AX
SYSCALL
JCC 2(PC)
NEGQ AX
diff --git a/src/pkg/runtime/sys_plan9_386.s b/src/pkg/runtime/sys_plan9_386.s
index bed0f7ebe..143cd2e49 100644
--- a/src/pkg/runtime/sys_plan9_386.s
+++ b/src/pkg/runtime/sys_plan9_386.s
@@ -81,6 +81,10 @@ TEXT runtime·plan9_semrelease(SB),NOSPLIT,$0
TEXT runtime·rfork(SB),NOSPLIT,$0
MOVL $19, AX // rfork
+ MOVL stack+8(SP), CX
+ MOVL mm+12(SP), BX // m
+ MOVL gg+16(SP), DX // g
+ MOVL fn+20(SP), SI // fn
INT $64
// In parent, return.
@@ -88,13 +92,7 @@ TEXT runtime·rfork(SB),NOSPLIT,$0
JEQ 2(PC)
RET
- // In child on old stack.
- MOVL mm+12(SP), BX // m
- MOVL gg+16(SP), DX // g
- MOVL fn+20(SP), SI // fn
-
// set SP to be on the new child stack
- MOVL stack+8(SP), CX
MOVL CX, SP
// Initialize m, g.
@@ -102,8 +100,9 @@ TEXT runtime·rfork(SB),NOSPLIT,$0
MOVL DX, g(AX)
MOVL BX, m(AX)
- // Initialize AX from TOS struct.
- MOVL procid(AX), AX
+ // Initialize procid from TOS struct.
+ // TODO: Be explicit and insert a new MOVL _tos(SB), AX here.
+ MOVL 48(AX), AX // procid
MOVL AX, m_procid(BX) // save pid as m->procid
CALL runtime·stackcheck(SB) // smashes AX, CX
diff --git a/src/pkg/runtime/sys_plan9_amd64.s b/src/pkg/runtime/sys_plan9_amd64.s
index d6702e865..e60459cb8 100644
--- a/src/pkg/runtime/sys_plan9_amd64.s
+++ b/src/pkg/runtime/sys_plan9_amd64.s
@@ -136,7 +136,7 @@ TEXT runtime·rfork(SB),NOSPLIT,$0
MOVQ BX, m(AX)
// Initialize AX from pid in TLS.
- MOVQ procid(AX), AX
+ MOVQ 0(FS), AX
MOVQ AX, m_procid(BX) // save pid as m->procid
CALL runtime·stackcheck(SB) // smashes AX, CX
diff --git a/src/pkg/runtime/sys_solaris_amd64.s b/src/pkg/runtime/sys_solaris_amd64.s
new file mode 100644
index 000000000..21517693b
--- /dev/null
+++ b/src/pkg/runtime/sys_solaris_amd64.s
@@ -0,0 +1,267 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+//
+// System calls and other sys.stuff for AMD64, SunOS
+// /usr/include/sys/syscall.h for syscall numbers.
+//
+
+#include "zasm_GOOS_GOARCH.h"
+#include "../../cmd/ld/textflag.h"
+
+// This is needed by asm_amd64.s
+TEXT runtime·settls(SB),NOSPLIT,$8
+ RET
+
+// void libc·miniterrno(void *(*___errno)(void));
+//
+// Set the TLS errno pointer in M.
+//
+// Called using runtime·asmcgocall from os_solaris.c:/minit.
+TEXT runtime·miniterrno(SB),NOSPLIT,$0
+ // asmcgocall will put first argument into DI.
+ CALL DI // SysV ABI so returns in AX
+ get_tls(CX)
+ MOVQ m(CX), BX
+ MOVQ AX, m_perrno(BX)
+ RET
+
+// int64 runtime·nanotime1(void);
+//
+// clock_gettime(3c) wrapper because Timespec is too large for
+// runtime·nanotime stack.
+//
+// Called using runtime·sysvicall6 from os_solaris.c:/nanotime.
+TEXT runtime·nanotime1(SB),NOSPLIT,$0
+ // need space for the timespec argument.
+ SUBQ $64, SP // 16 bytes will do, but who knows in the future?
+ MOVQ $3, DI // CLOCK_REALTIME from <sys/time_impl.h>
+ MOVQ SP, SI
+ MOVQ libc·clock_gettime(SB), AX
+ CALL AX
+ MOVQ (SP), AX // tv_sec from struct timespec
+ IMULQ $1000000000, AX // multiply into nanoseconds
+ ADDQ 8(SP), AX // tv_nsec, offset should be stable.
+ ADDQ $64, SP
+ RET
+
+// pipe(3c) wrapper that returns fds in AX, DX.
+TEXT runtime·pipe1(SB),NOSPLIT,$0
+ SUBQ $16, SP // 8 bytes will do, but stack has to be 16-byte alligned
+ MOVQ SP, DI
+ MOVQ libc·pipe(SB), AX
+ CALL AX
+ MOVL 0(SP), AX
+ MOVL 4(SP), DX
+ ADDQ $16, SP
+ RET
+
+// Call a library function with SysV calling conventions.
+// The called function can take a maximum of 6 INTEGER class arguments,
+// see
+// Michael Matz, Jan Hubicka, Andreas Jaeger, and Mark Mitchell
+// System V Application Binary Interface
+// AMD64 Architecture Processor Supplement
+// section 3.2.3.
+//
+// Called by runtime·asmcgocall or runtime·cgocall.
+TEXT runtime·asmsysvicall6(SB),NOSPLIT,$0
+ // asmcgocall will put first argument into DI.
+ PUSHQ DI // save for later
+ MOVQ libcall_fn(DI), AX
+ MOVQ libcall_args(DI), R11
+ MOVQ libcall_n(DI), R10
+
+ get_tls(CX)
+ MOVQ m(CX), BX
+ MOVQ m_perrno(BX), DX
+ CMPQ DX, $0
+ JEQ skiperrno1
+ MOVL $0, 0(DX)
+
+skiperrno1:
+ CMPQ R11, $0
+ JEQ skipargs
+ // Load 6 args into correspondent registers.
+ MOVQ 0(R11), DI
+ MOVQ 8(R11), SI
+ MOVQ 16(R11), DX
+ MOVQ 24(R11), CX
+ MOVQ 32(R11), R8
+ MOVQ 40(R11), R9
+skipargs:
+
+ // Call SysV function
+ CALL AX
+
+ // Return result
+ POPQ DI
+ MOVQ AX, libcall_r1(DI)
+ MOVQ DX, libcall_r2(DI)
+
+ get_tls(CX)
+ MOVQ m(CX), BX
+ MOVQ m_perrno(BX), AX
+ CMPQ AX, $0
+ JEQ skiperrno2
+ MOVL 0(AX), AX
+ MOVQ AX, libcall_err(DI)
+
+skiperrno2:
+ RET
+
+// uint32 tstart_sysvicall(M *newm);
+TEXT runtime·tstart_sysvicall(SB),NOSPLIT,$0
+ // DI contains first arg newm
+ MOVQ m_g0(DI), DX // g
+
+ // Make TLS entries point at g and m.
+ get_tls(BX)
+ MOVQ DX, g(BX)
+ MOVQ DI, m(BX)
+
+ // Layout new m scheduler stack on os stack.
+ MOVQ SP, AX
+ MOVQ AX, g_stackbase(DX)
+ SUBQ $(0x100000), AX // stack size
+ MOVQ AX, g_stackguard(DX)
+ MOVQ AX, g_stackguard0(DX)
+
+ // Someday the convention will be D is always cleared.
+ CLD
+
+ CALL runtime·stackcheck(SB) // clobbers AX,CX
+ CALL runtime·mstart(SB)
+
+ XORL AX, AX // return 0 == success
+ RET
+
+// Careful, this is called by __sighndlr, a libc function. We must preserve
+// registers as per AMD 64 ABI.
+TEXT runtime·sigtramp(SB),NOSPLIT,$0
+ // Note that we are executing on altsigstack here, so we have
+ // more stack available than NOSPLIT would have us believe.
+ // To defeat the linker, we make our own stack frame with
+ // more space:
+ SUBQ $184, SP
+
+ // save registers
+ MOVQ BX, 32(SP)
+ MOVQ BP, 40(SP)
+ MOVQ R12, 48(SP)
+ MOVQ R13, 56(SP)
+ MOVQ R14, 64(SP)
+ MOVQ R15, 72(SP)
+
+ get_tls(BX)
+ // check that m exists
+ MOVQ m(BX), BP
+ CMPQ BP, $0
+ JNE allgood
+ MOVQ DI, 0(SP)
+ MOVQ $runtime·badsignal(SB), AX
+ CALL AX
+ RET
+
+allgood:
+ // save g
+ MOVQ g(BX), R10
+ MOVQ R10, 80(SP)
+
+ // Save m->libcall and m->scratch. We need to do this because we
+ // might get interrupted by a signal in runtime·asmcgocall.
+
+ // save m->libcall
+ LEAQ m_libcall(BP), R11
+ MOVQ libcall_fn(R11), R10
+ MOVQ R10, 88(SP)
+ MOVQ libcall_args(R11), R10
+ MOVQ R10, 96(SP)
+ MOVQ libcall_n(R11), R10
+ MOVQ R10, 104(SP)
+ MOVQ libcall_r1(R11), R10
+ MOVQ R10, 168(SP)
+ MOVQ libcall_r2(R11), R10
+ MOVQ R10, 176(SP)
+
+ // save m->scratch
+ LEAQ m_scratch(BP), R11
+ MOVQ 0(R11), R10
+ MOVQ R10, 112(SP)
+ MOVQ 8(R11), R10
+ MOVQ R10, 120(SP)
+ MOVQ 16(R11), R10
+ MOVQ R10, 128(SP)
+ MOVQ 24(R11), R10
+ MOVQ R10, 136(SP)
+ MOVQ 32(R11), R10
+ MOVQ R10, 144(SP)
+ MOVQ 40(R11), R10
+ MOVQ R10, 152(SP)
+
+ // save errno, it might be EINTR; stuff we do here might reset it.
+ MOVQ m_perrno(BP), R10
+ MOVL 0(R10), R10
+ MOVQ R10, 160(SP)
+
+ MOVQ g(BX), R10
+ // g = m->gsignal
+ MOVQ m_gsignal(BP), BP
+ MOVQ BP, g(BX)
+
+ // prepare call
+ MOVQ DI, 0(SP)
+ MOVQ SI, 8(SP)
+ MOVQ DX, 16(SP)
+ MOVQ R10, 24(SP)
+ CALL runtime·sighandler(SB)
+
+ get_tls(BX)
+ MOVQ m(BX), BP
+ // restore libcall
+ LEAQ m_libcall(BP), R11
+ MOVQ 88(SP), R10
+ MOVQ R10, libcall_fn(R11)
+ MOVQ 96(SP), R10
+ MOVQ R10, libcall_args(R11)
+ MOVQ 104(SP), R10
+ MOVQ R10, libcall_n(R11)
+ MOVQ 168(SP), R10
+ MOVQ R10, libcall_r1(R11)
+ MOVQ 176(SP), R10
+ MOVQ R10, libcall_r2(R11)
+
+ // restore scratch
+ LEAQ m_scratch(BP), R11
+ MOVQ 112(SP), R10
+ MOVQ R10, 0(R11)
+ MOVQ 120(SP), R10
+ MOVQ R10, 8(R11)
+ MOVQ 128(SP), R10
+ MOVQ R10, 16(R11)
+ MOVQ 136(SP), R10
+ MOVQ R10, 24(R11)
+ MOVQ 144(SP), R10
+ MOVQ R10, 32(R11)
+ MOVQ 152(SP), R10
+ MOVQ R10, 40(R11)
+
+ // restore errno
+ MOVQ m_perrno(BP), R11
+ MOVQ 160(SP), R10
+ MOVL R10, 0(R11)
+
+ // restore g
+ MOVQ 80(SP), R10
+ MOVQ R10, g(BX)
+
+ // restore registers
+ MOVQ 32(SP), BX
+ MOVQ 40(SP), BP
+ MOVQ 48(SP), R12
+ MOVQ 56(SP), R13
+ MOVQ 64(SP), R14
+ MOVQ 72(SP), R15
+
+ ADDQ $184, SP
+ RET
diff --git a/src/pkg/runtime/sys_windows_386.s b/src/pkg/runtime/sys_windows_386.s
index 20753638e..e0c0631cf 100644
--- a/src/pkg/runtime/sys_windows_386.s
+++ b/src/pkg/runtime/sys_windows_386.s
@@ -14,28 +14,28 @@ TEXT runtime·asmstdcall(SB),NOSPLIT,$0
// Copy args to the stack.
MOVL SP, BP
- MOVL wincall_n(BX), CX // words
+ MOVL libcall_n(BX), CX // words
MOVL CX, AX
SALL $2, AX
SUBL AX, SP // room for args
MOVL SP, DI
- MOVL wincall_args(BX), SI
+ MOVL libcall_args(BX), SI
CLD
REP; MOVSL
// Call stdcall or cdecl function.
// DI SI BP BX are preserved, SP is not
- CALL wincall_fn(BX)
+ CALL libcall_fn(BX)
MOVL BP, SP
// Return result.
MOVL c+0(FP), BX
- MOVL AX, wincall_r1(BX)
- MOVL DX, wincall_r2(BX)
+ MOVL AX, libcall_r1(BX)
+ MOVL DX, libcall_r2(BX)
// GetLastError().
MOVL 0x34(FS), AX
- MOVL AX, wincall_err(BX)
+ MOVL AX, libcall_err(BX)
RET
@@ -69,43 +69,47 @@ TEXT runtime·setlasterror(SB),NOSPLIT,$0
MOVL AX, 0x34(FS)
RET
-TEXT runtime·sigtramp(SB),NOSPLIT,$28
- // unwinding?
- MOVL info+0(FP), CX
- TESTL $6, 4(CX) // exception flags
- MOVL $1, AX
- JNZ sigdone
-
- // copy arguments for call to sighandler
- MOVL CX, 0(SP)
- MOVL context+8(FP), CX
- MOVL CX, 4(SP)
-
- get_tls(CX)
-
- // check that m exists
- MOVL m(CX), AX
- CMPL AX, $0
- JNE 2(PC)
- CALL runtime·badsignal2(SB)
-
- MOVL g(CX), CX
- MOVL CX, 8(SP)
+// Called by Windows as a Vectored Exception Handler (VEH).
+// First argument is pointer to struct containing
+// exception record and context pointers.
+// Return 0 for 'not handled', -1 for handled.
+TEXT runtime·sigtramp(SB),NOSPLIT,$0-0
+ MOVL ptrs+0(FP), CX
+ SUBL $28, SP
+ // save callee-saved registers
MOVL BX, 12(SP)
MOVL BP, 16(SP)
MOVL SI, 20(SP)
MOVL DI, 24(SP)
+ MOVL 0(CX), BX // ExceptionRecord*
+ MOVL 4(CX), CX // Context*
+
+ // fetch g
+ get_tls(DX)
+ MOVL m(DX), AX
+ CMPL AX, $0
+ JNE 2(PC)
+ CALL runtime·badsignal2(SB)
+ MOVL g(DX), DX
+ // call sighandler(ExceptionRecord*, Context*, G*)
+ MOVL BX, 0(SP)
+ MOVL CX, 4(SP)
+ MOVL DX, 8(SP)
CALL runtime·sighandler(SB)
// AX is set to report result back to Windows
+ // restore callee-saved registers
MOVL 24(SP), DI
MOVL 20(SP), SI
MOVL 16(SP), BP
MOVL 12(SP), BX
-sigdone:
- RET
+
+ ADDL $28, SP
+ // RET 4 (return and pop 4 bytes parameters)
+ BYTE $0xC2; WORD $4
+ RET // unreached; make assembler happy
TEXT runtime·ctrlhandler(SB),NOSPLIT,$0
PUSHL $runtime·ctrlhandler1(SB)
@@ -182,11 +186,6 @@ TEXT runtime·callbackasm1+0(SB),NOSPLIT,$0
PUSHL BP
PUSHL BX
- // set up SEH frame again
- PUSHL $runtime·sigtramp(SB)
- PUSHL 0(FS)
- MOVL SP, 0(FS)
-
// determine index into runtime·cbctxts table
SUBL $runtime·callbackasm(SB), AX
MOVL $0, DX
@@ -232,10 +231,6 @@ TEXT runtime·callbackasm1+0(SB),NOSPLIT,$0
MOVL BX, CX // cannot use BX anymore
- // pop SEH frame
- POPL 0(FS)
- POPL BX
-
// restore registers as required for windows callback
POPL BX
POPL BP
@@ -299,35 +294,6 @@ TEXT runtime·setldt(SB),NOSPLIT,$0
MOVL CX, 0x14(FS)
RET
-// void install_exception_handler()
-TEXT runtime·install_exception_handler(SB),NOSPLIT,$0
- get_tls(CX)
- MOVL m(CX), CX // m
-
- // Set up SEH frame
- MOVL m_seh(CX), DX
- MOVL $runtime·sigtramp(SB), AX
- MOVL AX, seh_handler(DX)
- MOVL 0(FS), AX
- MOVL AX, seh_prev(DX)
-
- // Install it
- MOVL DX, 0(FS)
-
- RET
-
-// void remove_exception_handler()
-TEXT runtime·remove_exception_handler(SB),NOSPLIT,$0
- get_tls(CX)
- MOVL m(CX), CX // m
-
- // Remove SEH frame
- MOVL m_seh(CX), DX
- MOVL seh_prev(DX), AX
- MOVL AX, 0(FS)
-
- RET
-
// Sleep duration is in 100ns units.
TEXT runtime·usleep1(SB),NOSPLIT,$0
MOVL duration+0(FP), BX
@@ -343,19 +309,36 @@ TEXT runtime·usleep1(SB),NOSPLIT,$0
RET
MOVL m(CX), BP
+
+ // leave pc/sp for cpu profiler
+ MOVL (SP), SI
+ MOVL SI, m_libcallpc(BP)
+ MOVL g(CX), SI
+ MOVL SI, m_libcallg(BP)
+ // sp must be the last, because once async cpu profiler finds
+ // all three values to be non-zero, it will use them
+ LEAL 4(SP), SI
+ MOVL SI, m_libcallsp(BP)
+
MOVL m_g0(BP), SI
CMPL g(CX), SI
- JNE 3(PC)
+ JNE usleep1_switch
// executing on m->g0 already
CALL AX
- RET
+ JMP usleep1_ret
+usleep1_switch:
// Switch to m->g0 stack and back.
MOVL (g_sched+gobuf_sp)(SI), SI
MOVL SP, -4(SI)
LEAL -4(SI), SP
CALL AX
MOVL 0(SP), SP
+
+usleep1_ret:
+ get_tls(CX)
+ MOVL m(CX), BP
+ MOVL $0, m_libcallsp(BP)
RET
// Runs on OS stack. duration (in 100ns units) is in BX.
diff --git a/src/pkg/runtime/sys_windows_amd64.s b/src/pkg/runtime/sys_windows_amd64.s
index 8c4caf867..94845903e 100644
--- a/src/pkg/runtime/sys_windows_amd64.s
+++ b/src/pkg/runtime/sys_windows_amd64.s
@@ -13,9 +13,9 @@
TEXT runtime·asmstdcall(SB),NOSPLIT,$0
// asmcgocall will put first argument into CX.
PUSHQ CX // save for later
- MOVQ wincall_fn(CX), AX
- MOVQ wincall_args(CX), SI
- MOVQ wincall_n(CX), CX
+ MOVQ libcall_fn(CX), AX
+ MOVQ libcall_args(CX), SI
+ MOVQ libcall_n(CX), CX
// SetLastError(0).
MOVQ 0x30(GS), DI
@@ -52,12 +52,12 @@ loadregs:
// Return result.
POPQ CX
- MOVQ AX, wincall_r1(CX)
+ MOVQ AX, libcall_r1(CX)
// GetLastError().
MOVQ 0x30(GS), DI
MOVL 0x68(DI), AX
- MOVQ AX, wincall_err(CX)
+ MOVQ AX, libcall_err(CX)
RET
@@ -95,49 +95,55 @@ TEXT runtime·setlasterror(SB),NOSPLIT,$0
MOVL AX, 0x68(CX)
RET
-TEXT runtime·sigtramp(SB),NOSPLIT,$0
- // CX: exception record
- // R8: context
+// Called by Windows as a Vectored Exception Handler (VEH).
+// First argument is pointer to struct containing
+// exception record and context pointers.
+// Return 0 for 'not handled', -1 for handled.
+TEXT runtime·sigtramp(SB),NOSPLIT,$0-0
+ // CX: PEXCEPTION_POINTERS ExceptionInfo
- // unwinding?
- TESTL $6, 4(CX) // exception flags
- MOVL $1, AX
- JNZ sigdone
-
- // copy arguments for call to sighandler.
-
- // Stack adjustment is here to hide from 6l,
- // which doesn't understand that sigtramp
- // runs on essentially unlimited stack.
- SUBQ $56, SP
- MOVQ CX, 0(SP)
- MOVQ R8, 8(SP)
-
- get_tls(CX)
-
- // check that m exists
- MOVQ m(CX), AX
+ // DI SI BP BX R12 R13 R14 R15 registers and DF flag are preserved
+ // as required by windows callback convention.
+ PUSHFQ
+ SUBQ $88, SP
+ MOVQ DI, 80(SP)
+ MOVQ SI, 72(SP)
+ MOVQ BP, 64(SP)
+ MOVQ BX, 56(SP)
+ MOVQ R12, 48(SP)
+ MOVQ R13, 40(SP)
+ MOVQ R14, 32(SP)
+ MOVQ R15, 24(SP)
+
+ MOVQ 0(CX), BX // ExceptionRecord*
+ MOVQ 8(CX), CX // Context*
+
+ // fetch g
+ get_tls(DX)
+ MOVQ m(DX), AX
CMPQ AX, $0
JNE 2(PC)
CALL runtime·badsignal2(SB)
-
- MOVQ g(CX), CX
- MOVQ CX, 16(SP)
-
- MOVQ BX, 24(SP)
- MOVQ BP, 32(SP)
- MOVQ SI, 40(SP)
- MOVQ DI, 48(SP)
-
+ MOVQ g(DX), DX
+ // call sighandler(ExceptionRecord*, Context*, G*)
+ MOVQ BX, 0(SP)
+ MOVQ CX, 8(SP)
+ MOVQ DX, 16(SP)
CALL runtime·sighandler(SB)
+ // AX is set to report result back to Windows
- MOVQ 24(SP), BX
- MOVQ 32(SP), BP
- MOVQ 40(SP), SI
- MOVQ 48(SP), DI
- ADDQ $56, SP
+ // restore registers as required for windows callback
+ MOVQ 24(SP), R15
+ MOVQ 32(SP), R14
+ MOVQ 40(SP), R13
+ MOVQ 48(SP), R12
+ MOVQ 56(SP), BX
+ MOVQ 64(SP), BP
+ MOVQ 72(SP), SI
+ MOVQ 80(SP), DI
+ ADDQ $88, SP
+ POPFQ
-sigdone:
RET
TEXT runtime·ctrlhandler(SB),NOSPLIT,$8
@@ -277,13 +283,6 @@ TEXT runtime·callbackasm1(SB),NOSPLIT,$0
POPQ -8(CX)(DX*1) // restore bytes just after the args
RET
-TEXT runtime·setstacklimits(SB),NOSPLIT,$0
- MOVQ 0x30(GS), CX
- MOVQ $0, 0x10(CX)
- MOVQ $0xffffffffffff, AX
- MOVQ AX, 0x08(CX)
- RET
-
// uint32 tstart_stdcall(M *newm);
TEXT runtime·tstart_stdcall(SB),NOSPLIT,$0
// CX contains first arg newm
@@ -315,14 +314,6 @@ TEXT runtime·settls(SB),NOSPLIT,$0
MOVQ DI, 0x28(GS)
RET
-// void install_exception_handler()
-TEXT runtime·install_exception_handler(SB),NOSPLIT,$0
- CALL runtime·setstacklimits(SB)
- RET
-
-TEXT runtime·remove_exception_handler(SB),NOSPLIT,$0
- RET
-
// Sleep duration is in 100ns units.
TEXT runtime·usleep1(SB),NOSPLIT,$0
MOVL duration+0(FP), BX
@@ -337,20 +328,35 @@ TEXT runtime·usleep1(SB),NOSPLIT,$0
CALL AX
RET
- MOVQ m(R15), R14
- MOVQ m_g0(R14), R14
+ MOVQ m(R15), R13
+
+ // leave pc/sp for cpu profiler
+ MOVQ (SP), R12
+ MOVQ R12, m_libcallpc(R13)
+ MOVQ g(R15), R12
+ MOVQ R12, m_libcallg(R13)
+ // sp must be the last, because once async cpu profiler finds
+ // all three values to be non-zero, it will use them
+ LEAQ 8(SP), R12
+ MOVQ R12, m_libcallsp(R13)
+
+ MOVQ m_g0(R13), R14
CMPQ g(R15), R14
- JNE 3(PC)
+ JNE usleep1_switch
// executing on m->g0 already
CALL AX
- RET
+ JMP usleep1_ret
+usleep1_switch:
// Switch to m->g0 stack and back.
MOVQ (g_sched+gobuf_sp)(R14), R14
MOVQ SP, -8(R14)
LEAQ -8(R14), SP
CALL AX
MOVQ 0(SP), SP
+
+usleep1_ret:
+ MOVQ $0, m_libcallsp(R13)
RET
// Runs on OS stack. duration (in 100ns units) is in BX.
diff --git a/src/pkg/runtime/sys_x86.c b/src/pkg/runtime/sys_x86.c
index 0df6dfbbd..a450b3e58 100644
--- a/src/pkg/runtime/sys_x86.c
+++ b/src/pkg/runtime/sys_x86.c
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build amd64 386
+// +build amd64 amd64p32 386
#include "runtime.h"
@@ -14,6 +14,8 @@ runtime·gostartcall(Gobuf *gobuf, void (*fn)(void), void *ctxt)
uintptr *sp;
sp = (uintptr*)gobuf->sp;
+ if(sizeof(uintreg) > sizeof(uintptr))
+ *--sp = 0;
*--sp = (uintptr)gobuf->pc;
gobuf->sp = (uintptr)sp;
gobuf->pc = (uintptr)fn;
@@ -27,7 +29,7 @@ void
runtime·rewindmorestack(Gobuf *gobuf)
{
byte *pc;
-
+
pc = (byte*)gobuf->pc;
if(pc[0] == 0xe9) { // jmp 4-byte offset
gobuf->pc = gobuf->pc + 5 + *(int32*)(pc+1);
@@ -37,18 +39,18 @@ runtime·rewindmorestack(Gobuf *gobuf)
gobuf->pc = gobuf->pc + 2 + *(int8*)(pc+1);
return;
}
- if(pc[0] == 0xcc) {
- // This is a breakpoint inserted by gdb. We could use
- // runtime·findfunc to find the function. But if we
- // do that, then we will continue execution at the
- // function entry point, and we will not hit the gdb
- // breakpoint. So for this case we don't change
- // gobuf->pc, so that when we return we will execute
- // the jump instruction and carry on. This means that
- // stack unwinding may not work entirely correctly
- // (http://golang.org/issue/5723) but the user is
- // running under gdb anyhow.
- return;
+ if(pc[0] == 0xcc) {
+ // This is a breakpoint inserted by gdb. We could use
+ // runtime·findfunc to find the function. But if we
+ // do that, then we will continue execution at the
+ // function entry point, and we will not hit the gdb
+ // breakpoint. So for this case we don't change
+ // gobuf->pc, so that when we return we will execute
+ // the jump instruction and carry on. This means that
+ // stack unwinding may not work entirely correctly
+ // (http://golang.org/issue/5723) but the user is
+ // running under gdb anyhow.
+ return;
}
runtime·printf("runtime: pc=%p %x %x %x %x %x\n", pc, pc[0], pc[1], pc[2], pc[3], pc[4]);
runtime·throw("runtime: misuse of rewindmorestack");
diff --git a/src/pkg/runtime/syscall_nacl.h b/src/pkg/runtime/syscall_nacl.h
new file mode 100644
index 000000000..b33852ec8
--- /dev/null
+++ b/src/pkg/runtime/syscall_nacl.h
@@ -0,0 +1,71 @@
+// generated by mknacl.sh - do not edit
+#define SYS_null 1
+#define SYS_nameservice 2
+#define SYS_dup 8
+#define SYS_dup2 9
+#define SYS_open 10
+#define SYS_close 11
+#define SYS_read 12
+#define SYS_write 13
+#define SYS_lseek 14
+#define SYS_ioctl 15
+#define SYS_stat 16
+#define SYS_fstat 17
+#define SYS_chmod 18
+#define SYS_brk 20
+#define SYS_mmap 21
+#define SYS_munmap 22
+#define SYS_getdents 23
+#define SYS_mprotect 24
+#define SYS_list_mappings 25
+#define SYS_exit 30
+#define SYS_getpid 31
+#define SYS_sched_yield 32
+#define SYS_sysconf 33
+#define SYS_gettimeofday 40
+#define SYS_clock 41
+#define SYS_nanosleep 42
+#define SYS_clock_getres 43
+#define SYS_clock_gettime 44
+#define SYS_mkdir 45
+#define SYS_rmdir 46
+#define SYS_chdir 47
+#define SYS_getcwd 48
+#define SYS_unlink 49
+#define SYS_imc_makeboundsock 60
+#define SYS_imc_accept 61
+#define SYS_imc_connect 62
+#define SYS_imc_sendmsg 63
+#define SYS_imc_recvmsg 64
+#define SYS_imc_mem_obj_create 65
+#define SYS_imc_socketpair 66
+#define SYS_mutex_create 70
+#define SYS_mutex_lock 71
+#define SYS_mutex_trylock 72
+#define SYS_mutex_unlock 73
+#define SYS_cond_create 74
+#define SYS_cond_wait 75
+#define SYS_cond_signal 76
+#define SYS_cond_broadcast 77
+#define SYS_cond_timed_wait_abs 79
+#define SYS_thread_create 80
+#define SYS_thread_exit 81
+#define SYS_tls_init 82
+#define SYS_thread_nice 83
+#define SYS_tls_get 84
+#define SYS_second_tls_set 85
+#define SYS_second_tls_get 86
+#define SYS_exception_handler 87
+#define SYS_exception_stack 88
+#define SYS_exception_clear_flag 89
+#define SYS_sem_create 100
+#define SYS_sem_wait 101
+#define SYS_sem_post 102
+#define SYS_sem_get_value 103
+#define SYS_dyncode_create 104
+#define SYS_dyncode_modify 105
+#define SYS_dyncode_delete 106
+#define SYS_test_infoleak 109
+#define SYS_test_crash 110
+#define SYS_test_syscall_1 111
+#define SYS_test_syscall_2 112
diff --git a/src/pkg/runtime/syscall_solaris.goc b/src/pkg/runtime/syscall_solaris.goc
new file mode 100644
index 000000000..21bcce4d1
--- /dev/null
+++ b/src/pkg/runtime/syscall_solaris.goc
@@ -0,0 +1,374 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package syscall
+#include "runtime.h"
+#include "defs_GOOS_GOARCH.h"
+#include "os_GOOS.h"
+#include "cgocall.h"
+#include "../../cmd/ld/textflag.h"
+
+#pragma dynimport libc·chdir chdir "libc.so"
+#pragma dynimport libc·chroot chroot "libc.so"
+#pragma dynimport libc·close close "libc.so"
+#pragma dynimport libc·dlclose dlclose "libc.so"
+#pragma dynimport libc·dlopen dlopen "libc.so"
+#pragma dynimport libc·dlsym dlsym "libc.so"
+#pragma dynimport libc·execve execve "libc.so"
+#pragma dynimport libc·fcntl fcntl "libc.so"
+#pragma dynimport libc·gethostname gethostname "libc.so"
+#pragma dynimport libc·ioctl ioctl "libc.so"
+#pragma dynimport libc·pipe pipe "libc.so"
+#pragma dynimport libc·setgid setgid "libc.so"
+#pragma dynimport libc·setgroups setgroups "libc.so"
+#pragma dynimport libc·setsid setsid "libc.so"
+#pragma dynimport libc·setuid setuid "libc.so"
+#pragma dynimport libc·setpgid setsid "libc.so"
+#pragma dynimport libc·syscall syscall "libc.so"
+#pragma dynimport libc·forkx forkx "libc.so"
+#pragma dynimport libc·wait4 wait4 "libc.so"
+extern uintptr libc·chdir;
+extern uintptr libc·chroot;
+extern uintptr libc·close;
+extern uintptr libc·dlclose;
+extern uintptr libc·dlopen;
+extern uintptr libc·dlsym;
+extern uintptr libc·execve;
+extern uintptr libc·exit;
+extern uintptr libc·fcntl;
+extern uintptr libc·gethostname;
+extern uintptr libc·ioctl;
+extern uintptr libc·pipe;
+extern uintptr libc·setgid;
+extern uintptr libc·setgroups;
+extern uintptr libc·setsid;
+extern uintptr libc·setuid;
+extern uintptr libc·setpgid;
+extern uintptr libc·syscall;
+extern uintptr libc·forkx;
+extern uintptr libc·wait4;
+extern uintptr libc·write;
+
+func sysvicall6(func uintptr, nargs uintptr, a1 uintptr, a2 uintptr, a3 uintptr, a4 uintptr, a5 uintptr, a6 uintptr) (r1 uintptr, r2 uintptr, err uintptr)
+{
+ LibCall c;
+
+ USED(a2);
+ USED(a3);
+ USED(a4);
+ USED(a5);
+ USED(a6);
+ c.fn = (void*)func;
+ c.n = nargs;
+ c.args = (void*)&a1;
+ runtime·cgocall(runtime·asmsysvicall6, &c);
+ err = c.err;
+ r1 = c.r1;
+ r2 = c.r2;
+}
+
+#pragma textflag NOSPLIT
+func rawSysvicall6(func uintptr, nargs uintptr, a1 uintptr, a2 uintptr, a3 uintptr, a4 uintptr, a5 uintptr, a6 uintptr) (r1 uintptr, r2 uintptr, err uintptr)
+{
+ LibCall c;
+
+ USED(a2);
+ USED(a3);
+ USED(a4);
+ USED(a5);
+ USED(a6);
+ c.fn = (void*)func;
+ c.n = nargs;
+ c.args = (void*)&a1;
+ runtime·asmcgocall(runtime·asmsysvicall6, &c);
+ err = c.err;
+ r1 = c.r1;
+ r2 = c.r2;
+}
+
+#pragma textflag NOSPLIT
+func chdir(path uintptr) (err uintptr) {
+ LibCall c;
+
+ c.fn = (void*)libc·chdir;
+ c.n = 1;
+ c.args = (void*)&path;
+ runtime·asmcgocall(runtime·asmsysvicall6, &c);
+ err = c.err;
+}
+
+#pragma textflag NOSPLIT
+func chroot1(path uintptr) (err uintptr) {
+ LibCall c;
+
+ c.fn = (void*)libc·chroot;
+ c.n = 1;
+ c.args = (void*)&path;
+ runtime·asmcgocall(runtime·asmsysvicall6, &c);
+ err = c.err;
+}
+
+#pragma textflag NOSPLIT
+func close(fd uintptr) (err uintptr) {
+ LibCall c;
+
+ c.fn = (void*)libc·close;
+ c.n = 1;
+ c.args = (void*)&fd;
+ runtime·asmcgocall(runtime·asmsysvicall6, &c);
+ err = c.err;
+}
+
+func dlclose(handle uintptr) (err uintptr) {
+ LibCall c;
+
+ USED(handle);
+ c.fn = (void*)libc·dlclose;
+ c.n = 1;
+ c.args = (void*)&handle;
+ runtime·cgocall(runtime·asmsysvicall6, &c);
+ err = c.r1;
+}
+
+func dlopen(name *uint8, mode uintptr) (handle uintptr, err uintptr) {
+ LibCall c;
+
+ USED(mode);
+ c.fn = (void*)libc·dlopen;
+ c.n = 2;
+ c.args = (void*)&name;
+ runtime·cgocall(runtime·asmsysvicall6, &c);
+ handle = c.r1;
+ if(handle == 0)
+ err = c.err;
+ else
+ err = 0;
+}
+
+func dlsym(handle uintptr, name *uint8) (proc uintptr, err uintptr) {
+ LibCall c;
+
+ USED(name);
+ c.fn = (void*)libc·dlsym;
+ c.n = 2;
+ c.args = &handle;
+ runtime·cgocall(runtime·asmsysvicall6, &c);
+ proc = c.r1;
+ if(proc == 0)
+ err = c.err;
+ else
+ err = 0;
+}
+
+#pragma textflag NOSPLIT
+func execve(path uintptr, argv uintptr, envp uintptr) (err uintptr) {
+ LibCall c;
+
+ USED(argv);
+ USED(envp);
+ c.fn = (void*)libc·execve;
+ c.n = 3;
+ c.args = (void*)&path;
+ runtime·asmcgocall(runtime·asmsysvicall6, &c);
+ err = c.err;
+}
+
+#pragma textflag NOSPLIT
+func exit(code uintptr) {
+ LibCall c;
+
+ c.fn = (void*)libc·exit;
+ c.n = 1;
+ c.args = (void*)&code;
+ runtime·asmcgocall(runtime·asmsysvicall6, &c);
+}
+
+#pragma textflag NOSPLIT
+func fcntl1(fd uintptr, cmd uintptr, arg uintptr) (val uintptr, err uintptr) {
+ LibCall c;
+
+ USED(cmd);
+ USED(arg);
+ c.fn = (void*)libc·fcntl;
+ c.n = 3;
+ c.args = (void*)&fd;
+ runtime·asmcgocall(runtime·asmsysvicall6, &c);
+ err = c.err;
+ val = c.r1;
+}
+
+func gethostname() (name String, err uintptr) {
+ struct { uintptr v[2]; } args;
+ uint8 cname[MAXHOSTNAMELEN];
+ LibCall c;
+
+ c.fn = (void*)libc·gethostname;
+ c.n = 2;
+ args.v[0] = (uintptr)&cname[0];
+ args.v[1] = MAXHOSTNAMELEN;
+ c.args = (void*)&args;
+ runtime·cgocall(runtime·asmsysvicall6, &c);
+ err = c.err;
+ if(c.r1) {
+ name = runtime·emptystring;
+ return;
+ }
+ cname[MAXHOSTNAMELEN - 1] = 0;
+ name = runtime·gostring(cname);
+}
+
+#pragma textflag NOSPLIT
+func ioctl(fd uintptr, req uintptr, arg uintptr) (err uintptr) {
+ LibCall c;
+
+ USED(req);
+ USED(arg);
+ c.fn = (void*)libc·ioctl;
+ c.n = 3;
+ c.args = (void*)&fd;
+ runtime·asmcgocall(runtime·asmsysvicall6, &c);
+ err = c.err;
+}
+
+func wait4(pid uintptr, wstatus *uint32, options uintptr, rusage *void) (wpid int, err uintptr) {
+ LibCall c;
+
+ USED(wstatus);
+ USED(options);
+ USED(rusage);
+ c.fn = (void*)libc·wait4;
+ c.n = 4;
+ c.args = (void*)&pid;
+ runtime·cgocall(runtime·asmsysvicall6, &c);
+ err = c.err;
+ wpid = c.r1;
+}
+
+#pragma textflag NOSPLIT
+func setgid(gid uintptr) (err uintptr) {
+ LibCall c;
+
+ c.fn = (void*)libc·setgid;
+ c.n = 1;
+ c.args = (void*)&gid;
+ runtime·asmcgocall(runtime·asmsysvicall6, &c);
+ err = c.err;
+}
+
+#pragma textflag NOSPLIT
+func setgroups1(ngid uintptr, gid uintptr) (err uintptr) {
+ LibCall c;
+
+ USED(gid);
+ c.fn = (void*)libc·setgroups;
+ c.n = 2;
+ c.args = (void*)&ngid;
+ runtime·asmcgocall(runtime·asmsysvicall6, &c);
+ err = c.err;
+}
+
+#pragma textflag NOSPLIT
+func setsid() (pid uintptr, err uintptr) {
+ LibCall c;
+
+ c.fn = (void*)libc·setsid;
+ c.n = 0;
+ c.args = (void*)0;
+ runtime·asmcgocall(runtime·asmsysvicall6, &c);
+ err = c.err;
+ pid = c.r1;
+}
+
+#pragma textflag NOSPLIT
+func setuid(uid uintptr) (err uintptr) {
+ LibCall c;
+
+ c.fn = (void*)libc·setuid;
+ c.n = 1;
+ c.args = (void*)&uid;
+ runtime·asmcgocall(runtime·asmsysvicall6, &c);
+ err = c.err;
+}
+
+#pragma textflag NOSPLIT
+func setpgid(pid uintptr, pgid uintptr) (err uintptr) {
+ LibCall c;
+
+ USED(pgid);
+ c.fn = (void*)libc·setpgid;
+ c.n = 2;
+ c.args = (void*)&pid;
+ runtime·asmcgocall(runtime·asmsysvicall6, &c);
+ err = c.err;
+}
+
+#pragma textflag NOSPLIT
+func forkx(flags uintptr) (pid uintptr, err uintptr) {
+ LibCall c;
+
+ c.fn = (void*)libc·forkx;
+ c.n = 1;
+ c.args = (void*)&flags;
+ runtime·asmcgocall(runtime·asmsysvicall6, &c);
+ err = c.err;
+ pid = c.r1;
+}
+
+void runtime·pipe1(void);
+
+func pipe() (r uintptr, w uintptr, err uintptr) {
+ LibCall c;
+
+ c.fn = (void*)runtime·pipe1;
+ c.n = 0;
+ c.args = (void*)0;
+ runtime·cgocall(runtime·asmsysvicall6, &c);
+ err = c.err;
+ r = c.r1;
+ w = c.r2;
+}
+
+#pragma textflag NOSPLIT
+func write1(fd uintptr, buf uintptr, nbyte uintptr) (n uintptr, err uintptr) {
+ LibCall c;
+
+ USED(buf);
+ USED(nbyte);
+ c.fn = (void*)libc·write;
+ c.n = 3;
+ c.args = (void*)fd;
+ runtime·asmcgocall(runtime·asmsysvicall6, &c);
+ err = c.err;
+ n = c.r1;
+}
+
+func Syscall(trap uintptr, a1 uintptr, a2 uintptr, a3 uintptr) (r1 uintptr, r2 uintptr, err uintptr) {
+ LibCall c;
+
+ USED(a1);
+ USED(a2);
+ USED(a3);
+ c.fn = (void*)libc·syscall;
+ c.n = 4;
+ c.args = &trap;
+ runtime·cgocall(runtime·asmsysvicall6, &c);
+ err = c.err;
+ r1 = c.r1;
+ r2 = c.r2;
+}
+
+func RawSyscall(trap uintptr, a1 uintptr, a2 uintptr, a3 uintptr) (r1 uintptr, r2 uintptr, err uintptr) {
+ LibCall c;
+
+ USED(a1);
+ USED(a2);
+ USED(a3);
+ c.fn = (void*)libc·syscall;
+ c.n = 4;
+ c.args = &trap;
+ runtime·asmcgocall(runtime·asmsysvicall6, &c);
+ err = c.err;
+ r1 = c.r1;
+ r2 = c.r2;
+}
diff --git a/src/pkg/runtime/syscall_windows.goc b/src/pkg/runtime/syscall_windows.goc
index 173d3ed6a..528245363 100644
--- a/src/pkg/runtime/syscall_windows.goc
+++ b/src/pkg/runtime/syscall_windows.goc
@@ -8,7 +8,7 @@ package syscall
#include "cgocall.h"
func loadlibrary(filename *uint16) (handle uintptr, err uintptr) {
- WinCall c;
+ LibCall c;
c.fn = runtime·LoadLibrary;
c.n = 1;
@@ -22,7 +22,7 @@ func loadlibrary(filename *uint16) (handle uintptr, err uintptr) {
}
func getprocaddress(handle uintptr, procname *uint8) (proc uintptr, err uintptr) {
- WinCall c;
+ LibCall c;
USED(procname);
c.fn = runtime·GetProcAddress;
@@ -40,17 +40,12 @@ func NewCallback(fn Eface) (code uintptr) {
code = (uintptr)runtime·compilecallback(fn, true);
}
-/*
- * If this is needed, uncomment here and add a declaration in package syscall
- * next to the NewCallback declaration.
- *
func NewCallbackCDecl(fn Eface) (code uintptr) {
code = (uintptr)runtime·compilecallback(fn, false);
}
- */
func Syscall(fn uintptr, nargs uintptr, a1 uintptr, a2 uintptr, a3 uintptr) (r1 uintptr, r2 uintptr, err uintptr) {
- WinCall c;
+ LibCall c;
USED(a2);
USED(a3);
@@ -64,7 +59,7 @@ func Syscall(fn uintptr, nargs uintptr, a1 uintptr, a2 uintptr, a3 uintptr) (r1
}
func Syscall6(fn uintptr, nargs uintptr, a1 uintptr, a2 uintptr, a3 uintptr, a4 uintptr, a5 uintptr, a6 uintptr) (r1 uintptr, r2 uintptr, err uintptr) {
- WinCall c;
+ LibCall c;
USED(a2);
USED(a3);
@@ -81,7 +76,7 @@ func Syscall6(fn uintptr, nargs uintptr, a1 uintptr, a2 uintptr, a3 uintptr, a4
}
func Syscall9(fn uintptr, nargs uintptr, a1 uintptr, a2 uintptr, a3 uintptr, a4 uintptr, a5 uintptr, a6 uintptr, a7 uintptr, a8 uintptr, a9 uintptr) (r1 uintptr, r2 uintptr, err uintptr) {
- WinCall c;
+ LibCall c;
USED(a2);
USED(a3);
@@ -101,7 +96,7 @@ func Syscall9(fn uintptr, nargs uintptr, a1 uintptr, a2 uintptr, a3 uintptr, a4
}
func Syscall12(fn uintptr, nargs uintptr, a1 uintptr, a2 uintptr, a3 uintptr, a4 uintptr, a5 uintptr, a6 uintptr, a7 uintptr, a8 uintptr, a9 uintptr, a10 uintptr, a11 uintptr, a12 uintptr) (r1 uintptr, r2 uintptr, err uintptr) {
- WinCall c;
+ LibCall c;
USED(a2);
USED(a3);
@@ -124,7 +119,7 @@ func Syscall12(fn uintptr, nargs uintptr, a1 uintptr, a2 uintptr, a3 uintptr, a4
}
func Syscall15(fn uintptr, nargs uintptr, a1 uintptr, a2 uintptr, a3 uintptr, a4 uintptr, a5 uintptr, a6 uintptr, a7 uintptr, a8 uintptr, a9 uintptr, a10 uintptr, a11 uintptr, a12 uintptr, a13 uintptr, a14 uintptr, a15 uintptr) (r1 uintptr, r2 uintptr, err uintptr) {
- WinCall c;
+ LibCall c;
USED(a2);
USED(a3);
diff --git a/src/pkg/runtime/syscall_windows_test.go b/src/pkg/runtime/syscall_windows_test.go
index f04d2cd54..fabf935d8 100644
--- a/src/pkg/runtime/syscall_windows_test.go
+++ b/src/pkg/runtime/syscall_windows_test.go
@@ -5,7 +5,13 @@
package runtime_test
import (
+ "fmt"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path/filepath"
"runtime"
+ "strings"
"syscall"
"testing"
"unsafe"
@@ -171,10 +177,12 @@ func TestCallbackGC(t *testing.T) {
nestedCall(t, runtime.GC)
}
-func TestCallbackPanic(t *testing.T) {
- // Make sure panic during callback unwinds properly.
- if runtime.LockedOSThread() {
- t.Fatal("locked OS thread on entry to TestCallbackPanic")
+func TestCallbackPanicLocked(t *testing.T) {
+ runtime.LockOSThread()
+ defer runtime.UnlockOSThread()
+
+ if !runtime.LockedOSThread() {
+ t.Fatal("runtime.LockOSThread didn't")
}
defer func() {
s := recover()
@@ -184,27 +192,18 @@ func TestCallbackPanic(t *testing.T) {
if s.(string) != "callback panic" {
t.Fatal("wrong panic:", s)
}
- if runtime.LockedOSThread() {
- t.Fatal("locked OS thread on exit from TestCallbackPanic")
+ if !runtime.LockedOSThread() {
+ t.Fatal("lost lock on OS thread after panic")
}
}()
nestedCall(t, func() { panic("callback panic") })
panic("nestedCall returned")
}
-func TestCallbackPanicLoop(t *testing.T) {
- // Make sure we don't blow out m->g0 stack.
- for i := 0; i < 100000; i++ {
- TestCallbackPanic(t)
- }
-}
-
-func TestCallbackPanicLocked(t *testing.T) {
- runtime.LockOSThread()
- defer runtime.UnlockOSThread()
-
- if !runtime.LockedOSThread() {
- t.Fatal("runtime.LockOSThread didn't")
+func TestCallbackPanic(t *testing.T) {
+ // Make sure panic during callback unwinds properly.
+ if runtime.LockedOSThread() {
+ t.Fatal("locked OS thread on entry to TestCallbackPanic")
}
defer func() {
s := recover()
@@ -214,14 +213,21 @@ func TestCallbackPanicLocked(t *testing.T) {
if s.(string) != "callback panic" {
t.Fatal("wrong panic:", s)
}
- if !runtime.LockedOSThread() {
- t.Fatal("lost lock on OS thread after panic")
+ if runtime.LockedOSThread() {
+ t.Fatal("locked OS thread on exit from TestCallbackPanic")
}
}()
nestedCall(t, func() { panic("callback panic") })
panic("nestedCall returned")
}
+func TestCallbackPanicLoop(t *testing.T) {
+ // Make sure we don't blow out m->g0 stack.
+ for i := 0; i < 100000; i++ {
+ TestCallbackPanic(t)
+ }
+}
+
func TestBlockingCallback(t *testing.T) {
c := make(chan int)
go func() {
@@ -242,3 +248,204 @@ func TestBlockingCallback(t *testing.T) {
func TestCallbackInAnotherThread(t *testing.T) {
// TODO: test a function which calls back in another thread: QueueUserAPC() or CreateThread()
}
+
+type cbDLLFunc int // int determines number of callback parameters
+
+func (f cbDLLFunc) stdcallName() string {
+ return fmt.Sprintf("stdcall%d", f)
+}
+
+func (f cbDLLFunc) cdeclName() string {
+ return fmt.Sprintf("cdecl%d", f)
+}
+
+func (f cbDLLFunc) buildOne(stdcall bool) string {
+ var funcname, attr string
+ if stdcall {
+ funcname = f.stdcallName()
+ attr = "__stdcall"
+ } else {
+ funcname = f.cdeclName()
+ attr = "__cdecl"
+ }
+ typename := "t" + funcname
+ p := make([]string, f)
+ for i := range p {
+ p[i] = "void*"
+ }
+ params := strings.Join(p, ",")
+ for i := range p {
+ p[i] = fmt.Sprintf("%d", i+1)
+ }
+ args := strings.Join(p, ",")
+ return fmt.Sprintf(`
+typedef void %s (*%s)(%s);
+void %s(%s f, void *n) {
+ int i;
+ for(i=0;i<(int)n;i++){
+ f(%s);
+ }
+}
+ `, attr, typename, params, funcname, typename, args)
+}
+
+func (f cbDLLFunc) build() string {
+ return f.buildOne(false) + f.buildOne(true)
+}
+
+var cbFuncs = [...]interface{}{
+ 2: func(i1, i2 uintptr) uintptr {
+ if i1+i2 != 3 {
+ panic("bad input")
+ }
+ return 0
+ },
+ 3: func(i1, i2, i3 uintptr) uintptr {
+ if i1+i2+i3 != 6 {
+ panic("bad input")
+ }
+ return 0
+ },
+ 4: func(i1, i2, i3, i4 uintptr) uintptr {
+ if i1+i2+i3+i4 != 10 {
+ panic("bad input")
+ }
+ return 0
+ },
+ 5: func(i1, i2, i3, i4, i5 uintptr) uintptr {
+ if i1+i2+i3+i4+i5 != 15 {
+ panic("bad input")
+ }
+ return 0
+ },
+ 6: func(i1, i2, i3, i4, i5, i6 uintptr) uintptr {
+ if i1+i2+i3+i4+i5+i6 != 21 {
+ panic("bad input")
+ }
+ return 0
+ },
+ 7: func(i1, i2, i3, i4, i5, i6, i7 uintptr) uintptr {
+ if i1+i2+i3+i4+i5+i6+i7 != 28 {
+ panic("bad input")
+ }
+ return 0
+ },
+ 8: func(i1, i2, i3, i4, i5, i6, i7, i8 uintptr) uintptr {
+ if i1+i2+i3+i4+i5+i6+i7+i8 != 36 {
+ panic("bad input")
+ }
+ return 0
+ },
+ 9: func(i1, i2, i3, i4, i5, i6, i7, i8, i9 uintptr) uintptr {
+ if i1+i2+i3+i4+i5+i6+i7+i8+i9 != 45 {
+ panic("bad input")
+ }
+ return 0
+ },
+}
+
+type cbDLL struct {
+ name string
+ buildArgs func(out, src string) []string
+}
+
+func (d *cbDLL) buildSrc(t *testing.T, path string) {
+ f, err := os.Create(path)
+ if err != nil {
+ t.Fatalf("failed to create source file: %v", err)
+ }
+ defer f.Close()
+
+ for i := 2; i < 10; i++ {
+ fmt.Fprint(f, cbDLLFunc(i).build())
+ }
+}
+
+func (d *cbDLL) build(t *testing.T, dir string) string {
+ srcname := d.name + ".c"
+ d.buildSrc(t, filepath.Join(dir, srcname))
+ outname := d.name + ".dll"
+ args := d.buildArgs(outname, srcname)
+ cmd := exec.Command(args[0], args[1:]...)
+ cmd.Dir = dir
+ out, err := cmd.CombinedOutput()
+ if err != nil {
+ t.Fatalf("failed to build dll: %v - %v", err, string(out))
+ }
+ return filepath.Join(dir, outname)
+}
+
+var cbDLLs = []cbDLL{
+ {
+ "test",
+ func(out, src string) []string {
+ return []string{"gcc", "-shared", "-s", "-o", out, src}
+ },
+ },
+ {
+ "testO2",
+ func(out, src string) []string {
+ return []string{"gcc", "-shared", "-s", "-o", out, "-O2", src}
+ },
+ },
+}
+
+type cbTest struct {
+ n int // number of callback parameters
+ param uintptr // dll function parameter
+}
+
+func (test *cbTest) run(t *testing.T, dllpath string) {
+ dll := syscall.MustLoadDLL(dllpath)
+ defer dll.Release()
+ cb := cbFuncs[test.n]
+ stdcall := syscall.NewCallback(cb)
+ f := cbDLLFunc(test.n)
+ test.runOne(t, dll, f.stdcallName(), stdcall)
+ cdecl := syscall.NewCallbackCDecl(cb)
+ test.runOne(t, dll, f.cdeclName(), cdecl)
+}
+
+func (test *cbTest) runOne(t *testing.T, dll *syscall.DLL, proc string, cb uintptr) {
+ defer func() {
+ if r := recover(); r != nil {
+ t.Errorf("dll call %v(..., %d) failed: %v", proc, test.param, r)
+ }
+ }()
+ dll.MustFindProc(proc).Call(cb, test.param)
+}
+
+var cbTests = []cbTest{
+ {2, 1},
+ {2, 10000},
+ {3, 3},
+ {4, 5},
+ {4, 6},
+ {5, 2},
+ {6, 7},
+ {6, 8},
+ {7, 6},
+ {8, 1},
+ {9, 8},
+ {9, 10000},
+ {3, 4},
+ {5, 3},
+ {7, 7},
+ {8, 2},
+ {9, 9},
+}
+
+func TestStdcallAndCDeclCallbacks(t *testing.T) {
+ tmp, err := ioutil.TempDir("", "TestCDeclCallback")
+ if err != nil {
+ t.Fatal("TempDir failed: ", err)
+ }
+ defer os.RemoveAll(tmp)
+
+ for _, dll := range cbDLLs {
+ dllPath := dll.build(t, tmp)
+ for _, test := range cbTests {
+ test.run(t, dllPath)
+ }
+ }
+}
diff --git a/src/pkg/runtime/time.goc b/src/pkg/runtime/time.goc
index b575696f7..712e03e83 100644
--- a/src/pkg/runtime/time.goc
+++ b/src/pkg/runtime/time.goc
@@ -21,11 +21,19 @@ static Timers timers;
static void addtimer(Timer*);
static void dumptimers(int8*);
+// nacl fake time support.
+int64 runtime·timens;
+
// Package time APIs.
// Godoc uses the comments in package time, not these.
// time.now is implemented in assembly.
+// runtimeNano returns the current value of the runtime clock in nanoseconds.
+func runtimeNano() (ns int64) {
+ ns = runtime·nanotime();
+}
+
// Sleep puts the current goroutine to sleep for at least ns nanoseconds.
func Sleep(ns int64) {
runtime·tsleep(ns, "sleep");
@@ -46,6 +54,16 @@ func stopTimer(t *Timer) (stopped bool) {
// C runtime.
+void runtime·gc_unixnanotime(int64 *now);
+
+int64 runtime·unixnanotime(void)
+{
+ int64 now;
+
+ runtime·gc_unixnanotime(&now);
+ return now;
+}
+
static void timerproc(void);
static void siftup(int32);
static void siftdown(int32);
@@ -76,7 +94,7 @@ runtime·tsleep(int64 ns, int8 *reason)
t.arg.data = g;
runtime·lock(&timers);
addtimer(&t);
- runtime·park(runtime·unlock, &timers, reason);
+ runtime·parkunlock(&timers, reason);
}
static FuncVal timerprocv = {timerproc};
@@ -217,12 +235,22 @@ timerproc(void)
if(raceenabled)
runtime·raceacquire(t);
f(now, arg);
+
+ // clear f and arg to avoid leak while sleeping for next timer
+ f = nil;
+ USED(f);
+ arg.type = nil;
+ arg.data = nil;
+ USED(&arg);
+
runtime·lock(&timers);
}
if(delta < 0) {
// No timers left - put goroutine to sleep.
timers.rescheduling = true;
- runtime·park(runtime·unlock, &timers, "timer goroutine (idle)");
+ g->isbackground = true;
+ runtime·parkunlock(&timers, "timer goroutine (idle)");
+ g->isbackground = false;
continue;
}
// At least one timer pending. Sleep until then.
diff --git a/src/pkg/runtime/traceback_arm.c b/src/pkg/runtime/traceback_arm.c
index 8a3685e76..d15244c2a 100644
--- a/src/pkg/runtime/traceback_arm.c
+++ b/src/pkg/runtime/traceback_arm.c
@@ -8,17 +8,23 @@
#include "funcdata.h"
void runtime·sigpanic(void);
+void runtime·newproc(void);
+void runtime·deferproc(void);
int32
-runtime·gentraceback(uintptr pc0, uintptr sp0, uintptr lr0, G *gp, int32 skip, uintptr *pcbuf, int32 max, void (*callback)(Stkframe*, void*), void *v, bool printall)
+runtime·gentraceback(uintptr pc0, uintptr sp0, uintptr lr0, G *gp, int32 skip, uintptr *pcbuf, int32 max, bool (*callback)(Stkframe*, void*), void *v, bool printall)
{
- int32 i, n, nprint, line;
- uintptr x, tracepc;
- bool waspanic, printing;
+ int32 i, n, nprint, line, gotraceback;
+ uintptr x, tracepc, sparg;
+ bool waspanic, wasnewproc, printing;
Func *f, *flr;
Stkframe frame;
Stktop *stk;
String file;
+ Panic *panic;
+ Defer *defer;
+
+ gotraceback = runtime·gotraceback(nil);
if(pc0 == ~(uintptr)0 && sp0 == ~(uintptr)0) { // Signal to fetch saved values from gp.
if(gp->syscallstack != (uintptr)nil) {
@@ -38,8 +44,17 @@ runtime·gentraceback(uintptr pc0, uintptr sp0, uintptr lr0, G *gp, int32 skip,
frame.lr = lr0;
frame.sp = sp0;
waspanic = false;
+ wasnewproc = false;
printing = pcbuf==nil && callback==nil;
+ panic = gp->panic;
+ defer = gp->defer;
+
+ while(defer != nil && defer->argp == NoArgs)
+ defer = defer->link;
+ while(panic != nil && panic->defer == nil)
+ panic = panic->link;
+
// If the PC is zero, it's likely a nil function call.
// Start in the caller's frame.
if(frame.pc == 0) {
@@ -95,6 +110,19 @@ runtime·gentraceback(uintptr pc0, uintptr sp0, uintptr lr0, G *gp, int32 skip,
if(runtime·topofstack(f)) {
frame.lr = 0;
flr = nil;
+ } else if(f->entry == (uintptr)runtime·jmpdefer) {
+ // jmpdefer modifies SP/LR/PC non-atomically.
+ // If a profiling interrupt arrives during jmpdefer,
+ // the stack unwind may see a mismatched register set
+ // and get confused. Stop if we see PC within jmpdefer
+ // to avoid that confusion.
+ // See golang.org/issue/8153.
+ // This check can be deleted if jmpdefer is changed
+ // to restore all three atomically using pop.
+ if(callback != nil)
+ runtime·throw("traceback_arm: found jmpdefer when tracing with callback");
+ frame.lr = 0;
+ flr = nil;
} else {
if((n == 0 && frame.sp < frame.fp) || frame.lr == 0)
frame.lr = *(uintptr*)frame.sp;
@@ -133,6 +161,47 @@ runtime·gentraceback(uintptr pc0, uintptr sp0, uintptr lr0, G *gp, int32 skip,
}
}
+ // Determine function SP where deferproc would find its arguments.
+ // On ARM that's just the standard bottom-of-stack plus 1 word for
+ // the saved LR. If the previous frame was a direct call to newproc/deferproc,
+ // however, the SP is three words lower than normal.
+ // If the function has no frame at all - perhaps it just started, or perhaps
+ // it is a leaf with no local variables - then we cannot possibly find its
+ // SP in a defer, and we might confuse its SP for its caller's SP, so
+ // set sparg=0 in that case.
+ sparg = 0;
+ if(frame.fp != frame.sp) {
+ sparg = frame.sp + sizeof(uintreg);
+ if(wasnewproc)
+ sparg += 3*sizeof(uintreg);
+ }
+
+ // Determine frame's 'continuation PC', where it can continue.
+ // Normally this is the return address on the stack, but if sigpanic
+ // is immediately below this function on the stack, then the frame
+ // stopped executing due to a trap, and frame.pc is probably not
+ // a safe point for looking up liveness information. In this panicking case,
+ // the function either doesn't return at all (if it has no defers or if the
+ // defers do not recover) or it returns from one of the calls to
+ // deferproc a second time (if the corresponding deferred func recovers).
+ // It suffices to assume that the most recent deferproc is the one that
+ // returns; everything live at earlier deferprocs is still live at that one.
+ frame.continpc = frame.pc;
+ if(waspanic) {
+ if(panic != nil && panic->defer->argp == (byte*)sparg)
+ frame.continpc = (uintptr)panic->defer->pc;
+ else if(defer != nil && defer->argp == (byte*)sparg)
+ frame.continpc = (uintptr)defer->pc;
+ else
+ frame.continpc = 0;
+ }
+
+ // Unwind our local panic & defer stacks past this frame.
+ while(panic != nil && (panic->defer == nil || panic->defer->argp == (byte*)sparg || panic->defer->argp == NoArgs))
+ panic = panic->link;
+ while(defer != nil && (defer->argp == (byte*)sparg || defer->argp == NoArgs))
+ defer = defer->link;
+
if(skip > 0) {
skip--;
goto skipped;
@@ -140,8 +209,10 @@ runtime·gentraceback(uintptr pc0, uintptr sp0, uintptr lr0, G *gp, int32 skip,
if(pcbuf != nil)
pcbuf[n] = frame.pc;
- if(callback != nil)
- callback(&frame, v);
+ if(callback != nil) {
+ if(!callback(&frame, v))
+ return n;
+ }
if(printing) {
if(printall || runtime·showframe(f, gp)) {
// Print during crash.
@@ -152,7 +223,7 @@ runtime·gentraceback(uintptr pc0, uintptr sp0, uintptr lr0, G *gp, int32 skip,
tracepc -= sizeof(uintptr);
runtime·printf("%s(", runtime·funcname(f));
for(i = 0; i < frame.arglen/sizeof(uintptr); i++) {
- if(i >= 5) {
+ if(i >= 10) {
runtime·prints(", ...");
break;
}
@@ -165,8 +236,8 @@ runtime·gentraceback(uintptr pc0, uintptr sp0, uintptr lr0, G *gp, int32 skip,
runtime·printf("\t%S:%d", file, line);
if(frame.pc > f->entry)
runtime·printf(" +%p", (uintptr)(frame.pc - f->entry));
- if(m->throwing > 0 && gp == m->curg)
- runtime·printf(" fp=%p", frame.fp);
+ if(m->throwing > 0 && gp == m->curg || gotraceback >= 2)
+ runtime·printf(" fp=%p sp=%p", frame.fp, frame.sp);
runtime·printf("\n");
nprint++;
}
@@ -175,6 +246,7 @@ runtime·gentraceback(uintptr pc0, uintptr sp0, uintptr lr0, G *gp, int32 skip,
skipped:
waspanic = f->entry == (uintptr)runtime·sigpanic;
+ wasnewproc = f->entry == (uintptr)runtime·newproc || f->entry == (uintptr)runtime·deferproc;
// Do not unwind past the bottom of the stack.
if(flr == nil)
@@ -202,6 +274,23 @@ runtime·gentraceback(uintptr pc0, uintptr sp0, uintptr lr0, G *gp, int32 skip,
if(pcbuf == nil && callback == nil)
n = nprint;
+ // For rationale, see long comment in traceback_x86.c.
+ if(callback != nil && n < max && defer != nil) {
+ if(defer != nil)
+ runtime·printf("runtime: g%D: leftover defer argp=%p pc=%p\n", gp->goid, defer->argp, defer->pc);
+ if(panic != nil)
+ runtime·printf("runtime: g%D: leftover panic argp=%p pc=%p\n", gp->goid, panic->defer->argp, panic->defer->pc);
+ for(defer = gp->defer; defer != nil; defer = defer->link)
+ runtime·printf("\tdefer %p argp=%p pc=%p\n", defer, defer->argp, defer->pc);
+ for(panic = gp->panic; panic != nil; panic = panic->link) {
+ runtime·printf("\tpanic %p defer %p", panic, panic->defer);
+ if(panic->defer != nil)
+ runtime·printf(" argp=%p pc=%p", panic->defer->argp, panic->defer->pc);
+ runtime·printf("\n");
+ }
+ runtime·throw("traceback has leftover defers or panics");
+ }
+
return n;
}
@@ -231,6 +320,8 @@ runtime·printcreatedby(G *gp)
void
runtime·traceback(uintptr pc, uintptr sp, uintptr lr, G *gp)
{
+ int32 n;
+
if(gp->status == Gsyscall) {
// Override signal registers if blocked in system call.
pc = gp->syscallpc;
@@ -240,8 +331,11 @@ runtime·traceback(uintptr pc, uintptr sp, uintptr lr, G *gp)
// Print traceback. By default, omits runtime frames.
// If that means we print nothing at all, repeat forcing all frames printed.
- if(runtime·gentraceback(pc, sp, lr, gp, 0, nil, 100, nil, nil, false) == 0)
- runtime·gentraceback(pc, sp, lr, gp, 0, nil, 100, nil, nil, true);
+ n = runtime·gentraceback(pc, sp, lr, gp, 0, nil, TracebackMaxFrames, nil, nil, false);
+ if(n == 0)
+ runtime·gentraceback(pc, sp, lr, gp, 0, nil, TracebackMaxFrames, nil, nil, true);
+ if(n == TracebackMaxFrames)
+ runtime·printf("...additional frames elided...\n");
runtime·printcreatedby(gp);
}
diff --git a/src/pkg/runtime/traceback_x86.c b/src/pkg/runtime/traceback_x86.c
index 8e3063f43..851504f52 100644
--- a/src/pkg/runtime/traceback_x86.c
+++ b/src/pkg/runtime/traceback_x86.c
@@ -2,14 +2,23 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build amd64 386
+// +build amd64 amd64p32 386
#include "runtime.h"
#include "arch_GOARCH.h"
#include "malloc.h"
#include "funcdata.h"
+#ifdef GOOS_windows
+#include "defs_GOOS_GOARCH.h"
+#endif
void runtime·sigpanic(void);
+void runtime·newproc(void);
+void runtime·deferproc(void);
+
+#ifdef GOOS_windows
+void runtime·sigtramp(void);
+#endif
// This code is also used for the 386 tracebacks.
// Use uintptr for an appropriate word-sized integer.
@@ -19,18 +28,22 @@ void runtime·sigpanic(void);
// collector (callback != nil). A little clunky to merge these, but avoids
// duplicating the code and all its subtlety.
int32
-runtime·gentraceback(uintptr pc0, uintptr sp0, uintptr lr0, G *gp, int32 skip, uintptr *pcbuf, int32 max, void (*callback)(Stkframe*, void*), void *v, bool printall)
+runtime·gentraceback(uintptr pc0, uintptr sp0, uintptr lr0, G *gp, int32 skip, uintptr *pcbuf, int32 max, bool (*callback)(Stkframe*, void*), void *v, bool printall)
{
- int32 i, n, nprint, line;
- uintptr tracepc;
- bool waspanic, printing;
+ int32 i, n, nprint, line, gotraceback;
+ uintptr tracepc, sparg;
+ bool waspanic, wasnewproc, printing;
Func *f, *flr;
Stkframe frame;
Stktop *stk;
String file;
+ Panic *panic;
+ Defer *defer;
USED(lr0);
+ gotraceback = runtime·gotraceback(nil);
+
if(pc0 == ~(uintptr)0 && sp0 == ~(uintptr)0) { // Signal to fetch saved values from gp.
if(gp->syscallstack != (uintptr)nil) {
pc0 = gp->syscallpc;
@@ -46,13 +59,21 @@ runtime·gentraceback(uintptr pc0, uintptr sp0, uintptr lr0, G *gp, int32 skip,
frame.pc = pc0;
frame.sp = sp0;
waspanic = false;
+ wasnewproc = false;
printing = pcbuf==nil && callback==nil;
-
+ panic = gp->panic;
+ defer = gp->defer;
+
+ while(defer != nil && defer->argp == NoArgs)
+ defer = defer->link;
+ while(panic != nil && panic->defer == nil)
+ panic = panic->link;
+
// If the PC is zero, it's likely a nil function call.
// Start in the caller's frame.
if(frame.pc == 0) {
frame.pc = *(uintptr*)frame.sp;
- frame.sp += sizeof(uintptr);
+ frame.sp += sizeof(uintreg);
}
f = runtime·findfunc(frame.pc);
@@ -95,20 +116,63 @@ runtime·gentraceback(uintptr pc0, uintptr sp0, uintptr lr0, G *gp, int32 skip,
frame.fn = f;
continue;
}
+
f = frame.fn;
+#ifdef GOOS_windows
+ // Windows exception handlers run on the actual g stack (there is room
+ // dedicated to this below the usual "bottom of stack"), not on a separate
+ // stack. As a result, we have to be able to unwind past the exception
+ // handler when called to unwind during stack growth inside the handler.
+ // Recognize the frame at the call to sighandler in sigtramp and unwind
+ // using the context argument passed to the call. This is awful.
+ if(f != nil && f->entry == (uintptr)runtime·sigtramp && frame.pc > f->entry) {
+ Context *r;
+
+ // Invoke callback so that stack copier sees an uncopyable frame.
+ if(callback != nil) {
+ frame.continpc = frame.pc;
+ frame.argp = nil;
+ frame.arglen = 0;
+ if(!callback(&frame, v))
+ return n;
+ }
+ r = (Context*)((uintptr*)frame.sp)[1];
+#ifdef GOARCH_amd64
+ frame.pc = r->Rip;
+ frame.sp = r->Rsp;
+#else
+ frame.pc = r->Eip;
+ frame.sp = r->Esp;
+#endif
+ frame.lr = 0;
+ frame.fp = 0;
+ frame.fn = nil;
+ if(printing && runtime·showframe(nil, gp))
+ runtime·printf("----- exception handler -----\n");
+ f = runtime·findfunc(frame.pc);
+ if(f == nil) {
+ runtime·printf("runtime: unknown pc %p after exception handler\n", frame.pc);
+ if(callback != nil)
+ runtime·throw("unknown pc");
+ }
+ frame.fn = f;
+ continue;
+ }
+#endif
+
// Found an actual function.
// Derive frame pointer and link register.
if(frame.fp == 0) {
frame.fp = frame.sp + runtime·funcspdelta(f, frame.pc);
- frame.fp += sizeof(uintptr); // caller PC
+ frame.fp += sizeof(uintreg); // caller PC
}
if(runtime·topofstack(f)) {
frame.lr = 0;
flr = nil;
} else {
if(frame.lr == 0)
- frame.lr = ((uintptr*)frame.fp)[-1];
+ frame.lr = ((uintreg*)frame.fp)[-1];
flr = runtime·findfunc(frame.lr);
if(flr == nil) {
runtime·printf("runtime: unexpected return pc for %s called from %p\n", runtime·funcname(f), frame.lr);
@@ -117,7 +181,7 @@ runtime·gentraceback(uintptr pc0, uintptr sp0, uintptr lr0, G *gp, int32 skip,
}
}
- frame.varp = (byte*)frame.fp - sizeof(uintptr);
+ frame.varp = (byte*)frame.fp - sizeof(uintreg);
// Derive size of arguments.
// Most functions have a fixed-size argument block,
@@ -143,6 +207,40 @@ runtime·gentraceback(uintptr pc0, uintptr sp0, uintptr lr0, G *gp, int32 skip,
frame.arglen = 0;
}
}
+
+ // Determine function SP where deferproc would find its arguments.
+ // On x86 that's just the standard bottom-of-stack, so SP exactly.
+ // If the previous frame was a direct call to newproc/deferproc, however,
+ // the SP is two words lower than normal.
+ sparg = frame.sp;
+ if(wasnewproc)
+ sparg += 2*sizeof(uintreg);
+
+ // Determine frame's 'continuation PC', where it can continue.
+ // Normally this is the return address on the stack, but if sigpanic
+ // is immediately below this function on the stack, then the frame
+ // stopped executing due to a trap, and frame.pc is probably not
+ // a safe point for looking up liveness information. In this panicking case,
+ // the function either doesn't return at all (if it has no defers or if the
+ // defers do not recover) or it returns from one of the calls to
+ // deferproc a second time (if the corresponding deferred func recovers).
+ // It suffices to assume that the most recent deferproc is the one that
+ // returns; everything live at earlier deferprocs is still live at that one.
+ frame.continpc = frame.pc;
+ if(waspanic) {
+ if(panic != nil && panic->defer->argp == (byte*)sparg)
+ frame.continpc = (uintptr)panic->defer->pc;
+ else if(defer != nil && defer->argp == (byte*)sparg)
+ frame.continpc = (uintptr)defer->pc;
+ else
+ frame.continpc = 0;
+ }
+
+ // Unwind our local panic & defer stacks past this frame.
+ while(panic != nil && (panic->defer == nil || panic->defer->argp == (byte*)sparg || panic->defer->argp == NoArgs))
+ panic = panic->link;
+ while(defer != nil && (defer->argp == (byte*)sparg || defer->argp == NoArgs))
+ defer = defer->link;
if(skip > 0) {
skip--;
@@ -151,8 +249,10 @@ runtime·gentraceback(uintptr pc0, uintptr sp0, uintptr lr0, G *gp, int32 skip,
if(pcbuf != nil)
pcbuf[n] = frame.pc;
- if(callback != nil)
- callback(&frame, v);
+ if(callback != nil) {
+ if(!callback(&frame, v))
+ return n;
+ }
if(printing) {
if(printall || runtime·showframe(f, gp)) {
// Print during crash.
@@ -164,7 +264,7 @@ runtime·gentraceback(uintptr pc0, uintptr sp0, uintptr lr0, G *gp, int32 skip,
tracepc--;
runtime·printf("%s(", runtime·funcname(f));
for(i = 0; i < frame.arglen/sizeof(uintptr); i++) {
- if(i >= 5) {
+ if(i >= 10) {
runtime·prints(", ...");
break;
}
@@ -177,8 +277,8 @@ runtime·gentraceback(uintptr pc0, uintptr sp0, uintptr lr0, G *gp, int32 skip,
runtime·printf("\t%S:%d", file, line);
if(frame.pc > f->entry)
runtime·printf(" +%p", (uintptr)(frame.pc - f->entry));
- if(m->throwing > 0 && gp == m->curg)
- runtime·printf(" fp=%p", frame.fp);
+ if(m->throwing > 0 && gp == m->curg || gotraceback >= 2)
+ runtime·printf(" fp=%p sp=%p", frame.fp, frame.sp);
runtime·printf("\n");
nprint++;
}
@@ -187,6 +287,7 @@ runtime·gentraceback(uintptr pc0, uintptr sp0, uintptr lr0, G *gp, int32 skip,
skipped:
waspanic = f->entry == (uintptr)runtime·sigpanic;
+ wasnewproc = f->entry == (uintptr)runtime·newproc || f->entry == (uintptr)runtime·deferproc;
// Do not unwind past the bottom of the stack.
if(flr == nil)
@@ -202,7 +303,72 @@ runtime·gentraceback(uintptr pc0, uintptr sp0, uintptr lr0, G *gp, int32 skip,
if(pcbuf == nil && callback == nil)
n = nprint;
-
+
+ // If callback != nil, we're being called to gather stack information during
+ // garbage collection or stack growth. In that context, require that we used
+ // up the entire defer stack. If not, then there is a bug somewhere and the
+ // garbage collection or stack growth may not have seen the correct picture
+ // of the stack. Crash now instead of silently executing the garbage collection
+ // or stack copy incorrectly and setting up for a mysterious crash later.
+ //
+ // Note that panic != nil is okay here: there can be leftover panics,
+ // because the defers on the panic stack do not nest in frame order as
+ // they do on the defer stack. If you have:
+ //
+ // frame 1 defers d1
+ // frame 2 defers d2
+ // frame 3 defers d3
+ // frame 4 panics
+ // frame 4's panic starts running defers
+ // frame 5, running d3, defers d4
+ // frame 5 panics
+ // frame 5's panic starts running defers
+ // frame 6, running d4, garbage collects
+ // frame 6, running d2, garbage collects
+ //
+ // During the execution of d4, the panic stack is d4 -> d3, which
+ // is nested properly, and we'll treat frame 3 as resumable, because we
+ // can find d3. (And in fact frame 3 is resumable. If d4 recovers
+ // and frame 5 continues running, d3, d3 can recover and we'll
+ // resume execution in (returning from) frame 3.)
+ //
+ // During the execution of d2, however, the panic stack is d2 -> d3,
+ // which is inverted. The scan will match d2 to frame 2 but having
+ // d2 on the stack until then means it will not match d3 to frame 3.
+ // This is okay: if we're running d2, then all the defers after d2 have
+ // completed and their corresponding frames are dead. Not finding d3
+ // for frame 3 means we'll set frame 3's continpc == 0, which is correct
+ // (frame 3 is dead). At the end of the walk the panic stack can thus
+ // contain defers (d3 in this case) for dead frames. The inversion here
+ // always indicates a dead frame, and the effect of the inversion on the
+ // scan is to hide those dead frames, so the scan is still okay:
+ // what's left on the panic stack are exactly (and only) the dead frames.
+ //
+ // We require callback != nil here because only when callback != nil
+ // do we know that gentraceback is being called in a "must be correct"
+ // context as opposed to a "best effort" context. The tracebacks with
+ // callbacks only happen when everything is stopped nicely.
+ // At other times, such as when gathering a stack for a profiling signal
+ // or when printing a traceback during a crash, everything may not be
+ // stopped nicely, and the stack walk may not be able to complete.
+ // It's okay in those situations not to use up the entire defer stack:
+ // incomplete information then is still better than nothing.
+ if(callback != nil && n < max && defer != nil) {
+ if(defer != nil)
+ runtime·printf("runtime: g%D: leftover defer argp=%p pc=%p\n", gp->goid, defer->argp, defer->pc);
+ if(panic != nil)
+ runtime·printf("runtime: g%D: leftover panic argp=%p pc=%p\n", gp->goid, panic->defer->argp, panic->defer->pc);
+ for(defer = gp->defer; defer != nil; defer = defer->link)
+ runtime·printf("\tdefer %p argp=%p pc=%p\n", defer, defer->argp, defer->pc);
+ for(panic = gp->panic; panic != nil; panic = panic->link) {
+ runtime·printf("\tpanic %p defer %p", panic, panic->defer);
+ if(panic->defer != nil)
+ runtime·printf(" argp=%p pc=%p", panic->defer->argp, panic->defer->pc);
+ runtime·printf("\n");
+ }
+ runtime·throw("traceback has leftover defers or panics");
+ }
+
return n;
}
@@ -232,6 +398,8 @@ runtime·printcreatedby(G *gp)
void
runtime·traceback(uintptr pc, uintptr sp, uintptr lr, G *gp)
{
+ int32 n;
+
USED(lr);
if(gp->status == Gsyscall) {
@@ -242,8 +410,11 @@ runtime·traceback(uintptr pc, uintptr sp, uintptr lr, G *gp)
// Print traceback. By default, omits runtime frames.
// If that means we print nothing at all, repeat forcing all frames printed.
- if(runtime·gentraceback(pc, sp, 0, gp, 0, nil, 100, nil, nil, false) == 0)
- runtime·gentraceback(pc, sp, 0, gp, 0, nil, 100, nil, nil, true);
+ n = runtime·gentraceback(pc, sp, 0, gp, 0, nil, TracebackMaxFrames, nil, nil, false);
+ if(n == 0)
+ n = runtime·gentraceback(pc, sp, 0, gp, 0, nil, TracebackMaxFrames, nil, nil, true);
+ if(n == TracebackMaxFrames)
+ runtime·printf("...additional frames elided...\n");
runtime·printcreatedby(gp);
}
diff --git a/src/pkg/runtime/type.go b/src/pkg/runtime/type.go
index 374754afa..276dbc0c9 100644
--- a/src/pkg/runtime/type.go
+++ b/src/pkg/runtime/type.go
@@ -26,6 +26,7 @@ type rtype struct {
string *string
*uncommonType
ptrToThis *rtype
+ zero unsafe.Pointer
}
type _method struct {
diff --git a/src/pkg/runtime/type.h b/src/pkg/runtime/type.h
index 30936046c..1598acc18 100644
--- a/src/pkg/runtime/type.h
+++ b/src/pkg/runtime/type.h
@@ -15,9 +15,8 @@ typedef struct Method Method;
typedef struct IMethod IMethod;
typedef struct SliceType SliceType;
typedef struct FuncType FuncType;
-typedef struct PtrType PtrType;
-// Needs to be in sync with typekind.h/CommonSize
+// Needs to be in sync with ../../cmd/ld/decodesym.c:/^commonsize
struct Type
{
uintptr size;
@@ -31,6 +30,7 @@ struct Type
String *string;
UncommonType *x;
Type *ptrto;
+ byte *zero; // ptr to the zero value for this type
};
struct Method
@@ -100,7 +100,3 @@ struct PtrType
Type;
Type *elem;
};
-
-// Here instead of in runtime.h because it uses the type names.
-bool runtime·addfinalizer(void*, FuncVal *fn, uintptr, Type*, PtrType*);
-bool runtime·getfinalizer(void *p, bool del, FuncVal **fn, uintptr *nret, Type**, PtrType**);
diff --git a/src/pkg/runtime/typekind.h b/src/pkg/runtime/typekind.h
index 9bae2a871..3f0ba9acb 100644
--- a/src/pkg/runtime/typekind.h
+++ b/src/pkg/runtime/typekind.h
@@ -34,8 +34,5 @@ enum {
KindUnsafePointer,
KindNoPointers = 1<<7,
-
- // size of Type structure.
- CommonSize = 6*PtrSize + 8,
};
diff --git a/src/pkg/runtime/vlop_arm.s b/src/pkg/runtime/vlop_arm.s
index d7c566afb..80f516ec4 100644
--- a/src/pkg/runtime/vlop_arm.s
+++ b/src/pkg/runtime/vlop_arm.s
@@ -75,10 +75,14 @@ TEXT _sfloat(SB), NOSPLIT, $64-0 // 4 arg + 14*4 saved regs + cpsr
MOVW m_locks(m), R1
ADD $1, R1
MOVW R1, m_locks(m)
+ MOVW $1, R1
+ MOVW R1, m_softfloat(m)
BL runtime·_sfloat2(SB)
MOVW m_locks(m), R1
SUB $1, R1
MOVW R1, m_locks(m)
+ MOVW $0, R1
+ MOVW R1, m_softfloat(m)
MOVW R0, 0(R13)
MOVW 64(R13), R1
WORD $0xe128f001 // msr cpsr_f, r1
@@ -255,7 +259,7 @@ TEXT _div(SB),NOSPLIT,$16
d0:
BL udiv<>(SB) /* none/both neg */
MOVW R(q), R(TMP)
- B out
+ B out1
d1:
CMP $0, R(q)
BGE d0
@@ -263,7 +267,12 @@ d1:
d2:
BL udiv<>(SB) /* one neg */
RSB $0, R(q), R(TMP)
- B out
+out1:
+ MOVW 4(R13), R(q)
+ MOVW 8(R13), R(r)
+ MOVW 12(R13), R(s)
+ MOVW 16(R13), R(M)
+ RET
TEXT _mod(SB),NOSPLIT,$16
MOVW R(q), 4(R13)
diff --git a/src/pkg/runtime/vlrt_386.c b/src/pkg/runtime/vlrt_386.c
index 8d965c086..ace1beb4c 100644
--- a/src/pkg/runtime/vlrt_386.c
+++ b/src/pkg/runtime/vlrt_386.c
@@ -32,6 +32,8 @@
* to generate the code directly now. Find and remove.
*/
+extern void runtime·panicdivide(void);
+
typedef unsigned long ulong;
typedef unsigned int uint;
typedef unsigned short ushort;
@@ -240,6 +242,8 @@ dodiv(Vlong num, Vlong den, Vlong *qp, Vlong *rp)
}
} else {
if(num.hi >= den.lo){
+ if(den.lo == 0)
+ runtime·panicdivide();
q.hi = n = num.hi/den.lo;
num.hi -= den.lo*n;
} else {
@@ -263,6 +267,8 @@ _divvu(Vlong *q, Vlong n, Vlong d)
{
if(n.hi == 0 && d.hi == 0) {
+ if(d.lo == 0)
+ runtime·panicdivide();
q->hi = 0;
q->lo = n.lo / d.lo;
return;
@@ -281,6 +287,8 @@ _modvu(Vlong *r, Vlong n, Vlong d)
{
if(n.hi == 0 && d.hi == 0) {
+ if(d.lo == 0)
+ runtime·panicdivide();
r->hi = 0;
r->lo = n.lo % d.lo;
return;
@@ -319,6 +327,8 @@ _divv(Vlong *q, Vlong n, Vlong d)
q->hi = 0;
return;
}
+ if(d.lo == 0)
+ runtime·panicdivide();
q->lo = (long)n.lo / (long)d.lo;
q->hi = ((long)q->lo) >> 31;
return;
@@ -353,6 +363,8 @@ _modv(Vlong *r, Vlong n, Vlong d)
r->hi = 0;
return;
}
+ if(d.lo == 0)
+ runtime·panicdivide();
r->lo = (long)n.lo % (long)d.lo;
r->hi = ((long)r->lo) >> 31;
return;
diff --git a/src/pkg/runtime/vlrt_arm.c b/src/pkg/runtime/vlrt_arm.c
index 219163c60..7dd71b40e 100644
--- a/src/pkg/runtime/vlrt_arm.c
+++ b/src/pkg/runtime/vlrt_arm.c
@@ -27,6 +27,7 @@
// declared here to avoid include of runtime.h
void runtime·panicstring(char*);
+void runtime·panicdivide(void);
typedef unsigned long ulong;
typedef unsigned int uint;
@@ -36,12 +37,6 @@ typedef signed char schar;
#define SIGN(n) (1UL<<(n-1))
-void
-runtime·panicdivide(void)
-{
- runtime·panicstring("integer divide by zero");
-}
-
typedef struct Vlong Vlong;
struct Vlong
{