summaryrefslogtreecommitdiff
path: root/src/pkg/runtime
diff options
context:
space:
mode:
Diffstat (limited to 'src/pkg/runtime')
-rw-r--r--src/pkg/runtime/386/memmove.s12
-rw-r--r--src/pkg/runtime/amd64/memmove.s12
-rw-r--r--src/pkg/runtime/append_test.go51
-rw-r--r--src/pkg/runtime/arm/closure.c2
-rw-r--r--src/pkg/runtime/arm/softfloat.c10
-rw-r--r--src/pkg/runtime/arm/vlop.s2
-rw-r--r--src/pkg/runtime/cgo/Makefile14
-rw-r--r--src/pkg/runtime/cgo/setenv.c16
-rw-r--r--src/pkg/runtime/closure_test.go53
-rw-r--r--src/pkg/runtime/linux/386/sys.s8
-rw-r--r--src/pkg/runtime/linux/amd64/sys.s8
-rw-r--r--src/pkg/runtime/linux/arm/sys.s9
-rw-r--r--src/pkg/runtime/linux/mem.c46
-rw-r--r--src/pkg/runtime/linux/thread.c10
-rw-r--r--src/pkg/runtime/malloc.goc2
-rw-r--r--src/pkg/runtime/mprof.goc2
-rw-r--r--src/pkg/runtime/proc.c35
-rw-r--r--src/pkg/runtime/proc_test.go9
-rw-r--r--src/pkg/runtime/runtime-gdb.py4
-rw-r--r--src/pkg/runtime/runtime.h17
-rw-r--r--src/pkg/runtime/slice.c91
-rw-r--r--src/pkg/runtime/softfloat64.go4
-rw-r--r--src/pkg/runtime/stack.h14
-rw-r--r--src/pkg/runtime/windows/386/sys.s16
-rw-r--r--src/pkg/runtime/windows/thread.c40
25 files changed, 387 insertions, 100 deletions
diff --git a/src/pkg/runtime/386/memmove.s b/src/pkg/runtime/386/memmove.s
index 38a0652b5..471553ba2 100644
--- a/src/pkg/runtime/386/memmove.s
+++ b/src/pkg/runtime/386/memmove.s
@@ -32,7 +32,6 @@ TEXT runtime·memmove(SB), 7, $0
/*
* check and set for backwards
- * should we look closer for overlap?
*/
CMPL SI, DI
JLS back
@@ -40,6 +39,7 @@ TEXT runtime·memmove(SB), 7, $0
/*
* forward copy loop
*/
+forward:
MOVL BX, CX
SHRL $2, CX
ANDL $3, BX
@@ -51,10 +51,18 @@ TEXT runtime·memmove(SB), 7, $0
MOVL to+0(FP),AX
RET
/*
+ * check overlap
+ */
+back:
+ MOVL SI, CX
+ ADDL BX, CX
+ CMPL CX, DI
+ JLS forward
+/*
* whole thing backwards has
* adjusted addresses
*/
-back:
+
ADDL BX, DI
ADDL BX, SI
STD
diff --git a/src/pkg/runtime/amd64/memmove.s b/src/pkg/runtime/amd64/memmove.s
index 9966b0ba7..fc9573f72 100644
--- a/src/pkg/runtime/amd64/memmove.s
+++ b/src/pkg/runtime/amd64/memmove.s
@@ -33,7 +33,6 @@ TEXT runtime·memmove(SB), 7, $0
/*
* check and set for backwards
- * should we look closer for overlap?
*/
CMPQ SI, DI
JLS back
@@ -41,6 +40,7 @@ TEXT runtime·memmove(SB), 7, $0
/*
* forward copy loop
*/
+forward:
MOVQ BX, CX
SHRQ $3, CX
ANDQ $7, BX
@@ -51,11 +51,19 @@ TEXT runtime·memmove(SB), 7, $0
MOVQ to+0(FP),AX
RET
+back:
+/*
+ * check overlap
+ */
+ MOVQ SI, CX
+ ADDQ BX, CX
+ CMPQ CX, DI
+ JLS forward
+
/*
* whole thing backwards has
* adjusted addresses
*/
-back:
ADDQ BX, DI
ADDQ BX, SI
STD
diff --git a/src/pkg/runtime/append_test.go b/src/pkg/runtime/append_test.go
new file mode 100644
index 000000000..75a635306
--- /dev/null
+++ b/src/pkg/runtime/append_test.go
@@ -0,0 +1,51 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+package runtime_test
+
+import "testing"
+
+const N = 20
+
+func BenchmarkAppend(b *testing.B) {
+ b.StopTimer()
+ x := make([]int, 0, N)
+ b.StartTimer()
+ for i := 0; i < b.N; i++ {
+ x = x[0:0]
+ for j := 0; j < N; j++ {
+ x = append(x, j)
+ }
+ }
+}
+
+func BenchmarkAppendSpecialCase(b *testing.B) {
+ b.StopTimer()
+ x := make([]int, 0, N)
+ b.StartTimer()
+ for i := 0; i < b.N; i++ {
+ x = x[0:0]
+ for j := 0; j < N; j++ {
+ if len(x) < cap(x) {
+ x = x[:len(x)+1]
+ x[len(x)-1] = j
+ } else {
+ x = append(x, j)
+ }
+ }
+ }
+}
+
+var x = make([]int, 0, 10)
+
+func f() int {
+ x[:1][0] = 3
+ return 2
+}
+
+func TestSideEffectOrder(t *testing.T) {
+ x = append(x, 1, f())
+ if x[0] != 1 || x[1] != 2 {
+ t.Error("append failed: ", x[0], x[1])
+ }
+}
diff --git a/src/pkg/runtime/arm/closure.c b/src/pkg/runtime/arm/closure.c
index 3aca3a42d..36a93bc53 100644
--- a/src/pkg/runtime/arm/closure.c
+++ b/src/pkg/runtime/arm/closure.c
@@ -43,7 +43,7 @@ vars: WORD arg0
WORD arg2
*/
-extern void cacheflush(byte* start, byte* end);
+extern void runtime·cacheflush(byte* start, byte* end);
#pragma textflag 7
void
diff --git a/src/pkg/runtime/arm/softfloat.c b/src/pkg/runtime/arm/softfloat.c
index f91a6fc09..0a071dada 100644
--- a/src/pkg/runtime/arm/softfloat.c
+++ b/src/pkg/runtime/arm/softfloat.c
@@ -15,6 +15,7 @@
#define FLAGS_V (1 << 28)
void runtime·abort(void);
+void math·sqrtGoC(uint64, uint64*);
static uint32 trace = 0;
@@ -357,6 +358,15 @@ stage3: // regd, regm are 4bit variables
regd, regm, m->freghi[regd], m->freglo[regd]);
break;
+ case 0xeeb10bc0: // D[regd] = sqrt D[regm]
+ math·sqrtGoC(getd(regm), &uval);
+ putd(regd, uval);
+
+ if(trace)
+ runtime·printf("*** D[%d] = sqrt D[%d] %x-%x\n",
+ regd, regm, m->freghi[regd], m->freglo[regd]);
+ break;
+
case 0xeeb40bc0: // D[regd] :: D[regm] (CMPD)
runtime·fcmp64c(getd(regd), getd(regm), &cmp, &nan);
m->fflag = fstatus(nan, cmp);
diff --git a/src/pkg/runtime/arm/vlop.s b/src/pkg/runtime/arm/vlop.s
index 2c5d7ebe1..fc679f0ee 100644
--- a/src/pkg/runtime/arm/vlop.s
+++ b/src/pkg/runtime/arm/vlop.s
@@ -105,7 +105,7 @@ loop:
/*
* compare numerator to denominator
- * if less, subtract and set quotent bit
+ * if less, subtract and set quotient bit
*/
CMP R(D), R(N)
ORR.HS $1, R(Q)
diff --git a/src/pkg/runtime/cgo/Makefile b/src/pkg/runtime/cgo/Makefile
index 768fe80ac..f26da2c51 100644
--- a/src/pkg/runtime/cgo/Makefile
+++ b/src/pkg/runtime/cgo/Makefile
@@ -28,18 +28,20 @@ CGO_OFILES=\
$(GOOS)_$(GOARCH).o\
util.o\
-OFILES=\
- iscgo.$O\
- callbacks.$O\
- _cgo_import.$O\
- $(CGO_OFILES)\
-
ifeq ($(GOOS),windows)
CGO_LDFLAGS=-lm -mthreads
else
CGO_LDFLAGS=-lpthread
+CGO_OFILES+=setenv.o\
+
endif
+OFILES=\
+ iscgo.$O\
+ callbacks.$O\
+ _cgo_import.$O\
+ $(CGO_OFILES)\
+
ifeq ($(GOOS),freebsd)
OFILES+=\
freebsd.$O\
diff --git a/src/pkg/runtime/cgo/setenv.c b/src/pkg/runtime/cgo/setenv.c
new file mode 100644
index 000000000..c911b8392
--- /dev/null
+++ b/src/pkg/runtime/cgo/setenv.c
@@ -0,0 +1,16 @@
+// Copyright 20111 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "libcgo.h"
+
+#include <stdlib.h>
+
+/* Stub for calling setenv */
+static void
+xlibcgo_setenv(char **arg)
+{
+ setenv(arg[0], arg[1], 1);
+}
+
+void (*libcgo_setenv)(char**) = xlibcgo_setenv;
diff --git a/src/pkg/runtime/closure_test.go b/src/pkg/runtime/closure_test.go
new file mode 100644
index 000000000..ea65fbd5f
--- /dev/null
+++ b/src/pkg/runtime/closure_test.go
@@ -0,0 +1,53 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+package runtime_test
+
+import "testing"
+
+var s int
+
+func BenchmarkCallClosure(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ s += func(ii int) int { return 2 * ii }(i)
+ }
+}
+
+func BenchmarkCallClosure1(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ j := i
+ s += func(ii int) int { return 2*ii + j }(i)
+ }
+}
+
+var ss *int
+
+func BenchmarkCallClosure2(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ j := i
+ s += func() int {
+ ss = &j
+ return 2
+ }()
+ }
+}
+
+func addr1(x int) *int {
+ return func() *int { return &x }()
+}
+
+func BenchmarkCallClosure3(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ ss = addr1(i)
+ }
+}
+
+func addr2() (x int, p *int) {
+ return 0, func() *int { return &x }()
+}
+
+func BenchmarkCallClosure4(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ _, ss = addr2()
+ }
+}
diff --git a/src/pkg/runtime/linux/386/sys.s b/src/pkg/runtime/linux/386/sys.s
index 868a0d901..e8b423324 100644
--- a/src/pkg/runtime/linux/386/sys.s
+++ b/src/pkg/runtime/linux/386/sys.s
@@ -47,6 +47,14 @@ TEXT runtime·setitimer(SB),7,$0-24
INT $0x80
RET
+TEXT runtime·mincore(SB),7,$0-24
+ MOVL $218, AX // syscall - mincore
+ MOVL 4(SP), BX
+ MOVL 8(SP), CX
+ MOVL 12(SP), DX
+ INT $0x80
+ RET
+
TEXT runtime·gettime(SB), 7, $32
MOVL $78, AX // syscall - gettimeofday
LEAL 8(SP), BX
diff --git a/src/pkg/runtime/linux/amd64/sys.s b/src/pkg/runtime/linux/amd64/sys.s
index eadd30005..66fdab208 100644
--- a/src/pkg/runtime/linux/amd64/sys.s
+++ b/src/pkg/runtime/linux/amd64/sys.s
@@ -53,6 +53,14 @@ TEXT runtime·setitimer(SB),7,$0-24
SYSCALL
RET
+TEXT runtime·mincore(SB),7,$0-24
+ MOVQ 8(SP), DI
+ MOVQ 16(SP), SI
+ MOVQ 24(SP), DX
+ MOVL $27, AX // syscall entry
+ SYSCALL
+ RET
+
TEXT runtime·gettime(SB), 7, $32
LEAQ 8(SP), DI
MOVQ $0, SI
diff --git a/src/pkg/runtime/linux/arm/sys.s b/src/pkg/runtime/linux/arm/sys.s
index 2b5365bd8..ab5349822 100644
--- a/src/pkg/runtime/linux/arm/sys.s
+++ b/src/pkg/runtime/linux/arm/sys.s
@@ -26,6 +26,7 @@
#define SYS_exit_group (SYS_BASE + 248)
#define SYS_munmap (SYS_BASE + 91)
#define SYS_setitimer (SYS_BASE + 104)
+#define SYS_mincore (SYS_BASE + 219)
#define SYS_gettid (SYS_BASE + 224)
#define SYS_tkill (SYS_BASE + 238)
@@ -91,6 +92,14 @@ TEXT runtime·setitimer(SB),7,$0
SWI $0
RET
+TEXT runtime·mincore(SB),7,$0
+ MOVW 0(FP), R0
+ MOVW 4(FP), R1
+ MOVW 8(FP), R2
+ MOVW $SYS_mincore, R7
+ SWI $0
+ RET
+
TEXT runtime·gettime(SB),7,$32
/* dummy version - return 0,0 */
MOVW $0, R1
diff --git a/src/pkg/runtime/linux/mem.c b/src/pkg/runtime/linux/mem.c
index ce1a8aa70..38ca7e4a0 100644
--- a/src/pkg/runtime/linux/mem.c
+++ b/src/pkg/runtime/linux/mem.c
@@ -3,6 +3,30 @@
#include "os.h"
#include "malloc.h"
+enum
+{
+ ENOMEM = 12,
+};
+
+static int32
+addrspace_free(void *v, uintptr n)
+{
+ uintptr page_size = 4096;
+ uintptr off;
+ int8 one_byte;
+
+ for(off = 0; off < n; off += page_size) {
+ int32 errval = runtime·mincore((int8 *)v + off, page_size, (void *)&one_byte);
+ // errval is 0 if success, or -(error_code) if error.
+ if (errval == 0 || errval != -ENOMEM)
+ return 0;
+ }
+ USED(v);
+ USED(n);
+ return 1;
+}
+
+
void*
runtime·SysAlloc(uintptr n)
{
@@ -39,20 +63,21 @@ runtime·SysFree(void *v, uintptr n)
void*
runtime·SysReserve(void *v, uintptr n)
{
+ void *p;
+
// On 64-bit, people with ulimit -v set complain if we reserve too
// much address space. Instead, assume that the reservation is okay
// and check the assumption in SysMap.
if(sizeof(void*) == 8)
return v;
- return runtime·mmap(v, n, PROT_NONE, MAP_ANON|MAP_PRIVATE, -1, 0);
+ p = runtime·mmap(v, n, PROT_NONE, MAP_ANON|MAP_PRIVATE, -1, 0);
+ if(p < (void*)4096) {
+ return nil;
+ }
+ return p;
}
-enum
-{
- ENOMEM = 12,
-};
-
void
runtime·SysMap(void *v, uintptr n)
{
@@ -63,6 +88,13 @@ runtime·SysMap(void *v, uintptr n)
// On 64-bit, we don't actually have v reserved, so tread carefully.
if(sizeof(void*) == 8) {
p = runtime·mmap(v, n, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_ANON|MAP_PRIVATE, -1, 0);
+ if(p != v && addrspace_free(v, n)) {
+ // On some systems, mmap ignores v without
+ // MAP_FIXED, so retry if the address space is free.
+ p = runtime·mmap(v, n, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_ANON|MAP_FIXED|MAP_PRIVATE, -1, 0);
+ }
+ if(p == (void*)ENOMEM)
+ runtime·throw("runtime: out of memory");
if(p != v) {
runtime·printf("runtime: address space conflict: map(%p) = %p\n", v, p);
runtime·throw("runtime: address space conflict");
@@ -71,7 +103,7 @@ runtime·SysMap(void *v, uintptr n)
}
p = runtime·mmap(v, n, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_ANON|MAP_FIXED|MAP_PRIVATE, -1, 0);
- if(p == (void*)-ENOMEM)
+ if(p == (void*)ENOMEM)
runtime·throw("runtime: out of memory");
if(p != v)
runtime·throw("runtime: cannot map pages in arena address space");
diff --git a/src/pkg/runtime/linux/thread.c b/src/pkg/runtime/linux/thread.c
index 7166b0ef2..7c7ca7b4e 100644
--- a/src/pkg/runtime/linux/thread.c
+++ b/src/pkg/runtime/linux/thread.c
@@ -116,7 +116,7 @@ again:
//
// We only really care that (v&1) == 1 (the lock is held),
// and in fact there is a futex variant that could
- // accomodate that check, but let's not get carried away.)
+ // accommodate that check, but let's not get carried away.)
futexsleep(&l->key, v+2);
// We're awake: remove ourselves from the count.
@@ -252,10 +252,10 @@ runtime·newosproc(M *m, G *g, void *stk, void (*fn)(void))
stk, m, g, fn, runtime·clone, m->id, m->tls[0], &m);
}
- ret = runtime·clone(flags, stk, m, g, fn);
-
- if(ret < 0)
- *(int32*)123 = 123;
+ if((ret = runtime·clone(flags, stk, m, g, fn)) < 0) {
+ runtime·printf("runtime: failed to create new OS thread (have %d already; errno=%d)\n", runtime·mcount(), -ret);
+ runtime·throw("runtime.newosproc");
+ }
}
void
diff --git a/src/pkg/runtime/malloc.goc b/src/pkg/runtime/malloc.goc
index 1f2d6da40..c55be9772 100644
--- a/src/pkg/runtime/malloc.goc
+++ b/src/pkg/runtime/malloc.goc
@@ -487,7 +487,7 @@ func SetFinalizer(obj Eface, finalizer Eface) {
nret = (nret + sizeof(void*)-1) & ~(sizeof(void*)-1);
if(runtime·getfinalizer(obj.data, 0)) {
- runtime·printf("runtime.SetFinalizer: finalizer already set");
+ runtime·printf("runtime.SetFinalizer: finalizer already set\n");
goto throw;
}
}
diff --git a/src/pkg/runtime/mprof.goc b/src/pkg/runtime/mprof.goc
index aae3d183f..517f96a31 100644
--- a/src/pkg/runtime/mprof.goc
+++ b/src/pkg/runtime/mprof.goc
@@ -113,7 +113,7 @@ static uintptr addrmem;
// hashMultiplier is the bottom 32 bits of int((sqrt(5)-1)/2 * (1<<32)).
// This is a good multiplier as suggested in CLR, Knuth. The hash
// value is taken to be the top AddrHashBits bits of the bottom 32 bits
-// of the muliplied value.
+// of the multiplied value.
enum {
HashMultiplier = 2654435769U
};
diff --git a/src/pkg/runtime/proc.c b/src/pkg/runtime/proc.c
index 52784854f..c5af8b754 100644
--- a/src/pkg/runtime/proc.c
+++ b/src/pkg/runtime/proc.c
@@ -714,7 +714,7 @@ runtime·oldstack(void)
goid = old.gobuf.g->goid; // fault if g is bad, before gogo
if(old.free != 0)
- runtime·stackfree(g1->stackguard - StackGuard - StackSystem, old.free);
+ runtime·stackfree(g1->stackguard - StackGuard, old.free);
g1->stackbase = old.stackbase;
g1->stackguard = old.stackguard;
@@ -756,7 +756,7 @@ runtime·newstack(void)
// the new Stktop* is necessary to unwind, but
// we don't need to create a new segment.
top = (Stktop*)(m->morebuf.sp - sizeof(*top));
- stk = g1->stackguard - StackGuard - StackSystem;
+ stk = g1->stackguard - StackGuard;
free = 0;
} else {
// allocate new segment.
@@ -785,7 +785,7 @@ runtime·newstack(void)
g1->ispanic = false;
g1->stackbase = (byte*)top;
- g1->stackguard = stk + StackGuard + StackSystem;
+ g1->stackguard = stk + StackGuard;
sp = (byte*)top;
if(argsize > 0) {
@@ -834,7 +834,7 @@ runtime·malg(int32 stacksize)
g->param = nil;
}
newg->stack0 = stk;
- newg->stackguard = stk + StackSystem + StackGuard;
+ newg->stackguard = stk + StackGuard;
newg->stackbase = stk + StackSystem + stacksize - sizeof(Stktop);
runtime·memclr(newg->stackbase, sizeof(Stktop));
}
@@ -880,7 +880,7 @@ runtime·newproc1(byte *fn, byte *argp, int32 narg, int32 nret, void *callerpc)
if((newg = gfget()) != nil){
newg->status = Gwaiting;
- if(newg->stackguard - StackGuard - StackSystem != newg->stack0)
+ if(newg->stackguard - StackGuard != newg->stack0)
runtime·throw("invalid stack in newg");
} else {
newg = runtime·malg(StackMin);
@@ -1165,7 +1165,7 @@ nomatch:
static void
gfput(G *g)
{
- if(g->stackguard - StackGuard - StackSystem != g->stack0)
+ if(g->stackguard - StackGuard != g->stack0)
runtime·throw("invalid stack in gfput");
g->schedlink = runtime·sched.gfree;
runtime·sched.gfree = g;
@@ -1343,3 +1343,26 @@ runtime·setcpuprofilerate(void (*fn)(uintptr*, int32), int32 hz)
if(hz != 0)
runtime·resetcpuprofiler(hz);
}
+
+void (*libcgo_setenv)(byte**);
+
+void
+os·setenv_c(String k, String v)
+{
+ byte *arg[2];
+
+ if(libcgo_setenv == nil)
+ return;
+
+ arg[0] = runtime·malloc(k.len + 1);
+ runtime·mcpy(arg[0], k.str, k.len);
+ arg[0][k.len] = 0;
+
+ arg[1] = runtime·malloc(v.len + 1);
+ runtime·mcpy(arg[1], v.str, v.len);
+ arg[1][v.len] = 0;
+
+ runtime·asmcgocall(libcgo_setenv, arg);
+ runtime·free(arg[0]);
+ runtime·free(arg[1]);
+}
diff --git a/src/pkg/runtime/proc_test.go b/src/pkg/runtime/proc_test.go
index a15b2d80a..cac4f9eea 100644
--- a/src/pkg/runtime/proc_test.go
+++ b/src/pkg/runtime/proc_test.go
@@ -24,20 +24,23 @@ func TestStopTheWorldDeadlock(t *testing.T) {
t.Logf("skipping during short test")
return
}
- runtime.GOMAXPROCS(3)
- compl := make(chan int, 1)
+ maxprocs := runtime.GOMAXPROCS(3)
+ compl := make(chan bool, 2)
go func() {
for i := 0; i != 1000; i += 1 {
runtime.GC()
}
- compl <- 0
+ compl <- true
}()
go func() {
for i := 0; i != 1000; i += 1 {
runtime.GOMAXPROCS(3)
}
+ compl <- true
}()
go perpetuumMobile()
<-compl
+ <-compl
stop <- true
+ runtime.GOMAXPROCS(maxprocs)
}
diff --git a/src/pkg/runtime/runtime-gdb.py b/src/pkg/runtime/runtime-gdb.py
index 3f767fbdd..a96f3f382 100644
--- a/src/pkg/runtime/runtime-gdb.py
+++ b/src/pkg/runtime/runtime-gdb.py
@@ -2,7 +2,7 @@
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
-"""GDB Pretty printers and convencience functions for Go's runtime structures.
+"""GDB Pretty printers and convenience functions for Go's runtime structures.
This script is loaded by GDB when it finds a .debug_gdb_scripts
section in the compiled binary. The [68]l linkers emit this with a
@@ -393,7 +393,7 @@ class GoIfaceCmd(gdb.Command):
# so Itype will start with a commontype which has kind = interface
#
-# Register all convience functions and CLI commands
+# Register all convenience functions and CLI commands
#
for k in vars().values():
if hasattr(k, 'invoke'):
diff --git a/src/pkg/runtime/runtime.h b/src/pkg/runtime/runtime.h
index f9b404e15..f3ccff1bc 100644
--- a/src/pkg/runtime/runtime.h
+++ b/src/pkg/runtime/runtime.h
@@ -306,7 +306,7 @@ enum {
/*
* defined macros
- * you need super-goru privilege
+ * you need super-gopher-guru privilege
* to add this list.
*/
#define nelem(x) (sizeof(x)/sizeof((x)[0]))
@@ -413,6 +413,7 @@ int32 runtime·gotraceback(void);
void runtime·traceback(uint8 *pc, uint8 *sp, uint8 *lr, G* gp);
void runtime·tracebackothers(G*);
int32 runtime·write(int32, void*, int32);
+int32 runtime·mincore(void*, uintptr, byte*);
bool runtime·cas(uint32*, uint32, uint32);
bool runtime·casp(void**, void*, void*);
uint32 runtime·xadd(uint32 volatile*, int32);
@@ -597,17 +598,3 @@ int32 runtime·chancap(Hchan*);
void runtime·ifaceE2I(struct InterfaceType*, Eface, Iface*);
-enum
-{
- // StackSystem is a number of additional bytes to add
- // to each stack below the usual guard area for OS-specific
- // purposes like signal handling.
- // TODO(rsc): This is only for Windows. Can't Windows use
- // a separate exception stack like every other operating system?
-#ifdef __WINDOWS__
- StackSystem = 2048,
-#else
- StackSystem = 0,
-#endif
-};
-
diff --git a/src/pkg/runtime/slice.c b/src/pkg/runtime/slice.c
index 1fee923e4..9146c177f 100644
--- a/src/pkg/runtime/slice.c
+++ b/src/pkg/runtime/slice.c
@@ -9,6 +9,8 @@
static int32 debug = 0;
static void makeslice1(SliceType*, int32, int32, Slice*);
+static void growslice1(SliceType*, Slice, int32, Slice *);
+static void appendslice1(SliceType*, Slice, Slice, Slice*);
void runtime·slicecopy(Slice to, Slice fm, uintptr width, int32 ret);
// see also unsafe·NewArray
@@ -46,22 +48,6 @@ makeslice1(SliceType *t, int32 len, int32 cap, Slice *ret)
ret->array = runtime·mal(size);
}
-static void appendslice1(SliceType*, Slice, Slice, Slice*);
-
-// append(type *Type, n int, old []T, ...,) []T
-#pragma textflag 7
-void
-runtime·append(SliceType *t, int32 n, Slice old, ...)
-{
- Slice sl;
- Slice *ret;
-
- sl.len = n;
- sl.array = (byte*)(&old+1);
- ret = (Slice*)(sl.array + ((t->elem->size*n+sizeof(uintptr)-1) & ~(sizeof(uintptr)-1)));
- appendslice1(t, old, sl, ret);
-}
-
// appendslice(type *Type, x, y, []T) []T
void
runtime·appendslice(SliceType *t, Slice x, Slice y, Slice ret)
@@ -72,36 +58,69 @@ runtime·appendslice(SliceType *t, Slice x, Slice y, Slice ret)
static void
appendslice1(SliceType *t, Slice x, Slice y, Slice *ret)
{
- Slice newx;
int32 m;
uintptr w;
- if(x.len+y.len < x.len)
+ m = x.len+y.len;
+
+ if(m < x.len)
runtime·throw("append: slice overflow");
+ if(m > x.cap)
+ growslice1(t, x, m, ret);
+ else
+ *ret = x;
+
w = t->elem->size;
- if(x.len+y.len > x.cap) {
- m = x.cap;
- if(m == 0)
- m = y.len;
- else {
- do {
- if(x.len < 1024)
- m += m;
- else
- m += m/4;
- } while(m < x.len+y.len);
- }
- makeslice1(t, x.len, m, &newx);
- runtime·memmove(newx.array, x.array, x.len*w);
- x = newx;
+ runtime·memmove(ret->array + ret->len*w, y.array, y.len*w);
+ ret->len += y.len;
+}
+
+// growslice(type *Type, x, []T, n int64) []T
+void
+runtime·growslice(SliceType *t, Slice old, int64 n, Slice ret)
+{
+ int64 cap;
+
+ if(n < 1)
+ runtime·panicstring("growslice: invalid n");
+
+ cap = old.cap + n;
+
+ if((int32)cap != cap || cap > ((uintptr)-1) / t->elem->size)
+ runtime·panicstring("growslice: cap out of range");
+
+ growslice1(t, old, cap, &ret);
+
+ FLUSH(&ret);
+
+ if(debug) {
+ runtime·printf("growslice(%S,", *t->string);
+ runtime·printslice(old);
+ runtime·printf(", new cap=%D) =", cap);
+ runtime·printslice(ret);
}
- runtime·memmove(x.array+x.len*w, y.array, y.len*w);
- x.len += y.len;
- *ret = x;
}
+static void
+growslice1(SliceType *t, Slice x, int32 newcap, Slice *ret)
+{
+ int32 m;
+ m = x.cap;
+ if(m == 0)
+ m = newcap;
+ else {
+ do {
+ if(x.len < 1024)
+ m += m;
+ else
+ m += m/4;
+ } while(m < newcap);
+ }
+ makeslice1(t, x.len, m, ret);
+ runtime·memmove(ret->array, x.array, ret->len * t->elem->size);
+}
// sliceslice(old []any, lb uint64, hb uint64, width uint64) (ary []any);
void
diff --git a/src/pkg/runtime/softfloat64.go b/src/pkg/runtime/softfloat64.go
index d9bbe5def..e0c3b7b73 100644
--- a/src/pkg/runtime/softfloat64.go
+++ b/src/pkg/runtime/softfloat64.go
@@ -11,7 +11,7 @@ package runtime
const (
mantbits64 uint = 52
expbits64 uint = 11
- bias64 = -1<<(expbits64-1) + 1
+ bias64 = -1<<(expbits64-1) + 1
nan64 uint64 = (1<<expbits64-1)<<mantbits64 + 1
inf64 uint64 = (1<<expbits64 - 1) << mantbits64
@@ -19,7 +19,7 @@ const (
mantbits32 uint = 23
expbits32 uint = 8
- bias32 = -1<<(expbits32-1) + 1
+ bias32 = -1<<(expbits32-1) + 1
nan32 uint32 = (1<<expbits32-1)<<mantbits32 + 1
inf32 uint32 = (1<<expbits32 - 1) << mantbits32
diff --git a/src/pkg/runtime/stack.h b/src/pkg/runtime/stack.h
index ebf0462b5..2b6b0e387 100644
--- a/src/pkg/runtime/stack.h
+++ b/src/pkg/runtime/stack.h
@@ -53,6 +53,16 @@ functions to make sure that this limit cannot be violated.
*/
enum {
+ // StackSystem is a number of additional bytes to add
+ // to each stack below the usual guard area for OS-specific
+ // purposes like signal handling. Used on Windows because
+ // it does not use a separate stack.
+#ifdef __WINDOWS__
+ StackSystem = 2048,
+#else
+ StackSystem = 0,
+#endif
+
// The amount of extra stack to allocate beyond the size
// needed for the single frame that triggered the split.
StackExtra = 1024,
@@ -73,7 +83,7 @@ enum {
// The stack guard is a pointer this many bytes above the
// bottom of the stack.
- StackGuard = 256,
+ StackGuard = 256 + StackSystem,
// After a stack split check the SP is allowed to be this
// many bytes below the stack guard. This saves an instruction
@@ -82,5 +92,5 @@ enum {
// The maximum number of bytes that a chain of NOSPLIT
// functions can use.
- StackLimit = StackGuard - StackSmall,
+ StackLimit = StackGuard - StackSystem - StackSmall,
};
diff --git a/src/pkg/runtime/windows/386/sys.s b/src/pkg/runtime/windows/386/sys.s
index 15f7f95b8..d38405075 100644
--- a/src/pkg/runtime/windows/386/sys.s
+++ b/src/pkg/runtime/windows/386/sys.s
@@ -59,15 +59,21 @@ TEXT runtime·setlasterror(SB),7,$0
TEXT runtime·sigtramp(SB),7,$0
PUSHL BP // cdecl
+ PUSHL BX
+ PUSHL SI
+ PUSHL DI
PUSHL 0(FS)
CALL runtime·sigtramp1(SB)
POPL 0(FS)
+ POPL DI
+ POPL SI
+ POPL BX
POPL BP
RET
-TEXT runtime·sigtramp1(SB),0,$16-28
+TEXT runtime·sigtramp1(SB),0,$16-40
// unwinding?
- MOVL info+12(FP), BX
+ MOVL info+24(FP), BX
MOVL 4(BX), CX // exception flags
ANDL $6, CX
MOVL $1, AX
@@ -75,15 +81,15 @@ TEXT runtime·sigtramp1(SB),0,$16-28
// place ourselves at the top of the SEH chain to
// ensure SEH frames lie within thread stack bounds
- MOVL frame+16(FP), CX // our SEH frame
+ MOVL frame+28(FP), CX // our SEH frame
MOVL CX, 0(FS)
// copy arguments for call to sighandler
MOVL BX, 0(SP)
MOVL CX, 4(SP)
- MOVL context+20(FP), BX
+ MOVL context+32(FP), BX
MOVL BX, 8(SP)
- MOVL dispatcher+24(FP), BX
+ MOVL dispatcher+36(FP), BX
MOVL BX, 12(SP)
CALL runtime·sighandler(SB)
diff --git a/src/pkg/runtime/windows/thread.c b/src/pkg/runtime/windows/thread.c
index 2ce92dcfb..81ad68033 100644
--- a/src/pkg/runtime/windows/thread.c
+++ b/src/pkg/runtime/windows/thread.c
@@ -324,13 +324,31 @@ runtime·ctrlhandler1(uint32 type)
return 0;
}
+// Will keep all callbacks in a linked list, so they don't get garbage collected.
+typedef struct Callback Callback;
+struct Callback {
+ Callback* link;
+ void* gobody;
+ byte asmbody;
+};
+
+typedef struct Callbacks Callbacks;
+struct Callbacks {
+ Lock;
+ Callback* link;
+ int32 n;
+};
+
+static Callbacks cbs;
+
// Call back from windows dll into go.
byte *
runtime·compilecallback(Eface fn, bool cleanstack)
{
Func *f;
int32 argsize, n;
- byte *ret, *p;
+ byte *p;
+ Callback *c;
if(fn.type->kind != KindFunc)
runtime·panicstring("not a function");
@@ -348,7 +366,23 @@ runtime·compilecallback(Eface fn, bool cleanstack)
if(cleanstack)
n += 2; // ... argsize
- ret = p = runtime·mal(n);
+ runtime·lock(&cbs);
+ for(c = cbs.link; c != nil; c = c->link) {
+ if(c->gobody == fn.data) {
+ runtime·unlock(&cbs);
+ return &c->asmbody;
+ }
+ }
+ if(cbs.n >= 2000)
+ runtime·throw("too many callback functions");
+ c = runtime·mal(sizeof *c + n);
+ c->gobody = fn.data;
+ c->link = cbs.link;
+ cbs.link = c;
+ cbs.n++;
+ runtime·unlock(&cbs);
+
+ p = &c->asmbody;
// MOVL fn, AX
*p++ = 0xb8;
@@ -376,7 +410,7 @@ runtime·compilecallback(Eface fn, bool cleanstack)
} else
*p = 0xc3;
- return ret;
+ return &c->asmbody;
}
void