summaryrefslogtreecommitdiff
path: root/src/pkg/runtime
diff options
context:
space:
mode:
authorOndřej Surý <ondrej@sury.org>2011-09-13 13:13:40 +0200
committerOndřej Surý <ondrej@sury.org>2011-09-13 13:13:40 +0200
commit5ff4c17907d5b19510a62e08fd8d3b11e62b431d (patch)
treec0650497e988f47be9c6f2324fa692a52dea82e1 /src/pkg/runtime
parent80f18fc933cf3f3e829c5455a1023d69f7b86e52 (diff)
downloadgolang-5ff4c17907d5b19510a62e08fd8d3b11e62b431d.tar.gz
Imported Upstream version 60upstream/60
Diffstat (limited to 'src/pkg/runtime')
-rw-r--r--src/pkg/runtime/386/arch.h3
-rw-r--r--src/pkg/runtime/386/asm.s549
-rw-r--r--src/pkg/runtime/386/atomic.c19
-rw-r--r--src/pkg/runtime/386/closure.c105
-rw-r--r--src/pkg/runtime/386/memmove.s86
-rw-r--r--src/pkg/runtime/386/vlop.s48
-rw-r--r--src/pkg/runtime/386/vlrt.c815
-rw-r--r--src/pkg/runtime/Makefile168
-rw-r--r--src/pkg/runtime/amd64/arch.h3
-rw-r--r--src/pkg/runtime/amd64/asm.s577
-rw-r--r--src/pkg/runtime/amd64/atomic.c19
-rw-r--r--src/pkg/runtime/amd64/closure.c123
-rw-r--r--src/pkg/runtime/amd64/memmove.s88
-rw-r--r--src/pkg/runtime/amd64/traceback.c295
-rw-r--r--src/pkg/runtime/append_test.go52
-rw-r--r--src/pkg/runtime/arm/arch.h3
-rw-r--r--src/pkg/runtime/arm/asm.s316
-rw-r--r--src/pkg/runtime/arm/atomic.c83
-rw-r--r--src/pkg/runtime/arm/closure.c129
-rw-r--r--src/pkg/runtime/arm/memmove.s255
-rw-r--r--src/pkg/runtime/arm/memset.s94
-rw-r--r--src/pkg/runtime/arm/softfloat.c525
-rw-r--r--src/pkg/runtime/arm/traceback.c213
-rw-r--r--src/pkg/runtime/arm/vlop.s190
-rw-r--r--src/pkg/runtime/arm/vlrt.c816
-rwxr-xr-xsrc/pkg/runtime/cgo/386.S67
-rw-r--r--src/pkg/runtime/cgo/Makefile60
-rw-r--r--src/pkg/runtime/cgo/amd64.S73
-rw-r--r--src/pkg/runtime/cgo/arm.S1
-rw-r--r--src/pkg/runtime/cgo/callbacks.c73
-rw-r--r--src/pkg/runtime/cgo/cgo.go17
-rw-r--r--src/pkg/runtime/cgo/darwin_386.c149
-rw-r--r--src/pkg/runtime/cgo/darwin_amd64.c119
-rw-r--r--src/pkg/runtime/cgo/freebsd.c13
-rw-r--r--src/pkg/runtime/cgo/freebsd_386.c64
-rw-r--r--src/pkg/runtime/cgo/freebsd_amd64.c63
-rw-r--r--src/pkg/runtime/cgo/iscgo.c14
-rw-r--r--src/pkg/runtime/cgo/libcgo.h60
-rw-r--r--src/pkg/runtime/cgo/linux_386.c73
-rw-r--r--src/pkg/runtime/cgo/linux_amd64.c63
-rw-r--r--src/pkg/runtime/cgo/linux_arm.c19
-rw-r--r--src/pkg/runtime/cgo/setenv.c16
-rw-r--r--src/pkg/runtime/cgo/util.c51
-rwxr-xr-xsrc/pkg/runtime/cgo/windows_386.c62
-rwxr-xr-xsrc/pkg/runtime/cgo/windows_amd64.c60
-rw-r--r--src/pkg/runtime/cgocall.c249
-rw-r--r--src/pkg/runtime/cgocall.h12
-rw-r--r--src/pkg/runtime/chan.c1161
-rw-r--r--src/pkg/runtime/chan_test.go322
-rw-r--r--src/pkg/runtime/closure_test.go53
-rw-r--r--src/pkg/runtime/complex.c60
-rw-r--r--src/pkg/runtime/cpuprof.c425
-rw-r--r--src/pkg/runtime/darwin/386/defs.h289
-rw-r--r--src/pkg/runtime/darwin/386/rt0.s8
-rw-r--r--src/pkg/runtime/darwin/386/signal.c194
-rw-r--r--src/pkg/runtime/darwin/386/sys.s311
-rw-r--r--src/pkg/runtime/darwin/amd64/defs.h305
-rw-r--r--src/pkg/runtime/darwin/amd64/rt0.s10
-rw-r--r--src/pkg/runtime/darwin/amd64/signal.c204
-rw-r--r--src/pkg/runtime/darwin/amd64/sys.s295
-rw-r--r--src/pkg/runtime/darwin/defs.c159
-rw-r--r--src/pkg/runtime/darwin/mem.c55
-rw-r--r--src/pkg/runtime/darwin/os.h31
-rw-r--r--src/pkg/runtime/darwin/signals.h51
-rw-r--r--src/pkg/runtime/darwin/thread.c484
-rw-r--r--src/pkg/runtime/debug.go115
-rw-r--r--src/pkg/runtime/debug/Makefile11
-rw-r--r--src/pkg/runtime/debug/stack.go90
-rw-r--r--src/pkg/runtime/debug/stack_test.go55
-rw-r--r--src/pkg/runtime/error.go138
-rw-r--r--src/pkg/runtime/export_test.go23
-rw-r--r--src/pkg/runtime/extern.go192
-rw-r--r--src/pkg/runtime/float.c173
-rw-r--r--src/pkg/runtime/freebsd/386/defs.h187
-rw-r--r--src/pkg/runtime/freebsd/386/rt0.s9
-rw-r--r--src/pkg/runtime/freebsd/386/signal.c193
-rw-r--r--src/pkg/runtime/freebsd/386/sys.s239
-rw-r--r--src/pkg/runtime/freebsd/amd64/defs.h198
-rw-r--r--src/pkg/runtime/freebsd/amd64/rt0.s9
-rw-r--r--src/pkg/runtime/freebsd/amd64/signal.c201
-rw-r--r--src/pkg/runtime/freebsd/amd64/sys.s182
-rw-r--r--src/pkg/runtime/freebsd/defs.c108
-rw-r--r--src/pkg/runtime/freebsd/mem.c74
-rw-r--r--src/pkg/runtime/freebsd/os.h12
-rw-r--r--src/pkg/runtime/freebsd/signals.h52
-rw-r--r--src/pkg/runtime/freebsd/thread.c201
-rw-r--r--src/pkg/runtime/goc2c.c727
-rw-r--r--src/pkg/runtime/hashmap.c1180
-rw-r--r--src/pkg/runtime/hashmap.h159
-rw-r--r--src/pkg/runtime/iface.c788
-rw-r--r--src/pkg/runtime/linux/386/defs.h191
-rw-r--r--src/pkg/runtime/linux/386/rt0.s9
-rw-r--r--src/pkg/runtime/linux/386/signal.c184
-rw-r--r--src/pkg/runtime/linux/386/sys.s344
-rw-r--r--src/pkg/runtime/linux/amd64/defs.h236
-rw-r--r--src/pkg/runtime/linux/amd64/rt0.s10
-rw-r--r--src/pkg/runtime/linux/amd64/signal.c194
-rw-r--r--src/pkg/runtime/linux/amd64/sys.s252
-rw-r--r--src/pkg/runtime/linux/arm/defs.h149
-rw-r--r--src/pkg/runtime/linux/arm/rt0.s6
-rw-r--r--src/pkg/runtime/linux/arm/signal.c189
-rw-r--r--src/pkg/runtime/linux/arm/sys.s319
-rw-r--r--src/pkg/runtime/linux/defs.c95
-rw-r--r--src/pkg/runtime/linux/defs1.c24
-rw-r--r--src/pkg/runtime/linux/defs2.c120
-rw-r--r--src/pkg/runtime/linux/defs_arm.c122
-rw-r--r--src/pkg/runtime/linux/mem.c113
-rw-r--r--src/pkg/runtime/linux/os.h19
-rw-r--r--src/pkg/runtime/linux/signals.h51
-rw-r--r--src/pkg/runtime/linux/thread.c320
-rw-r--r--src/pkg/runtime/malloc.goc482
-rw-r--r--src/pkg/runtime/malloc.h422
-rw-r--r--src/pkg/runtime/mcache.c133
-rw-r--r--src/pkg/runtime/mcentral.c200
-rw-r--r--src/pkg/runtime/mem.go74
-rw-r--r--src/pkg/runtime/mfinal.c181
-rw-r--r--src/pkg/runtime/mfixalloc.c62
-rw-r--r--src/pkg/runtime/mgc0.c910
-rw-r--r--src/pkg/runtime/mheap.c374
-rwxr-xr-xsrc/pkg/runtime/mkasmh.sh112
-rwxr-xr-xsrc/pkg/runtime/mkgodefs.sh39
-rw-r--r--src/pkg/runtime/mkversion.c15
-rw-r--r--src/pkg/runtime/mprof.goc274
-rw-r--r--src/pkg/runtime/msize.c168
-rw-r--r--src/pkg/runtime/openbsd/amd64/defs.h149
-rw-r--r--src/pkg/runtime/openbsd/amd64/rt0.s8
-rw-r--r--src/pkg/runtime/openbsd/amd64/signal.c199
-rw-r--r--src/pkg/runtime/openbsd/amd64/sys.s221
-rw-r--r--src/pkg/runtime/openbsd/defs.c103
-rw-r--r--src/pkg/runtime/openbsd/mem.c74
-rw-r--r--src/pkg/runtime/openbsd/os.h12
-rw-r--r--src/pkg/runtime/openbsd/signals.h52
-rw-r--r--src/pkg/runtime/openbsd/thread.c156
-rw-r--r--src/pkg/runtime/plan9/386/defs.h2
-rw-r--r--src/pkg/runtime/plan9/386/rt0.s32
-rw-r--r--src/pkg/runtime/plan9/386/signal.c24
-rw-r--r--src/pkg/runtime/plan9/386/sys.s82
-rw-r--r--src/pkg/runtime/plan9/mem.c67
-rw-r--r--src/pkg/runtime/plan9/os.h57
-rw-r--r--src/pkg/runtime/plan9/signals.h1
-rw-r--r--src/pkg/runtime/plan9/thread.c174
-rw-r--r--src/pkg/runtime/pprof/Makefile11
-rw-r--r--src/pkg/runtime/pprof/pprof.go176
-rw-r--r--src/pkg/runtime/pprof/pprof_test.go77
-rw-r--r--src/pkg/runtime/print.c351
-rw-r--r--src/pkg/runtime/proc.c1568
-rw-r--r--src/pkg/runtime/proc.p526
-rw-r--r--src/pkg/runtime/proc_test.go125
-rw-r--r--src/pkg/runtime/rune.c224
-rw-r--r--src/pkg/runtime/runtime-gdb.py400
-rw-r--r--src/pkg/runtime/runtime.c728
-rw-r--r--src/pkg/runtime/runtime.h635
-rw-r--r--src/pkg/runtime/runtime1.goc10
-rw-r--r--src/pkg/runtime/sema.goc180
-rw-r--r--src/pkg/runtime/sema_test.go100
-rw-r--r--src/pkg/runtime/sig.go16
-rw-r--r--src/pkg/runtime/sigqueue.goc99
-rw-r--r--src/pkg/runtime/slice.c330
-rw-r--r--src/pkg/runtime/softfloat64.go498
-rw-r--r--src/pkg/runtime/softfloat64_test.go198
-rw-r--r--src/pkg/runtime/stack.h97
-rw-r--r--src/pkg/runtime/string.goc360
-rw-r--r--src/pkg/runtime/symtab.c466
-rw-r--r--src/pkg/runtime/symtab_test.go47
-rw-r--r--src/pkg/runtime/type.go208
-rw-r--r--src/pkg/runtime/type.h131
-rw-r--r--src/pkg/runtime/windows/386/defs.h81
-rw-r--r--src/pkg/runtime/windows/386/rt0.s14
-rw-r--r--src/pkg/runtime/windows/386/signal.c98
-rw-r--r--src/pkg/runtime/windows/386/sys.s256
-rw-r--r--src/pkg/runtime/windows/amd64/defs.h40
-rw-r--r--src/pkg/runtime/windows/amd64/rt0.s13
-rw-r--r--src/pkg/runtime/windows/amd64/signal.c20
-rw-r--r--src/pkg/runtime/windows/amd64/sys.s130
-rw-r--r--src/pkg/runtime/windows/defs.c37
-rw-r--r--src/pkg/runtime/windows/mem.c70
-rw-r--r--src/pkg/runtime/windows/os.h30
-rw-r--r--src/pkg/runtime/windows/signals.h3
-rw-r--r--src/pkg/runtime/windows/syscall.goc67
-rw-r--r--src/pkg/runtime/windows/thread.c432
180 files changed, 33343 insertions, 0 deletions
diff --git a/src/pkg/runtime/386/arch.h b/src/pkg/runtime/386/arch.h
new file mode 100644
index 000000000..d95c7aa81
--- /dev/null
+++ b/src/pkg/runtime/386/arch.h
@@ -0,0 +1,3 @@
+enum {
+ thechar = '8'
+};
diff --git a/src/pkg/runtime/386/asm.s b/src/pkg/runtime/386/asm.s
new file mode 100644
index 000000000..a14518839
--- /dev/null
+++ b/src/pkg/runtime/386/asm.s
@@ -0,0 +1,549 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "386/asm.h"
+
+TEXT _rt0_386(SB),7,$0
+ // Linux, Windows start the FPU in extended double precision.
+ // Other operating systems use double precision.
+ // Change to double precision to match them,
+ // and to match other hardware that only has double.
+ PUSHL $0x27F
+ FLDCW 0(SP)
+ POPL AX
+
+ // copy arguments forward on an even stack
+ MOVL 0(SP), AX // argc
+ LEAL 4(SP), BX // argv
+ SUBL $128, SP // plenty of scratch
+ ANDL $~15, SP
+ MOVL AX, 120(SP) // save argc, argv away
+ MOVL BX, 124(SP)
+
+ // if there is an initcgo, call it to let it
+ // initialize and to set up GS. if not,
+ // we set up GS ourselves.
+ MOVL initcgo(SB), AX
+ TESTL AX, AX
+ JZ 4(PC)
+ CALL AX
+ // skip runtime·ldt0setup(SB) and tls test after initcgo for non-windows
+ CMPL runtime·iswindows(SB), $0
+ JEQ ok
+
+ // skip runtime·ldt0setup(SB) and tls test on Plan 9 in all cases
+ CMPL runtime·isplan9(SB), $1
+ JEQ ok
+
+ // set up %gs
+ CALL runtime·ldt0setup(SB)
+
+ // store through it, to make sure it works
+ get_tls(BX)
+ MOVL $0x123, g(BX)
+ MOVL runtime·tls0(SB), AX
+ CMPL AX, $0x123
+ JEQ ok
+ MOVL AX, 0 // abort
+ok:
+ // set up m and g "registers"
+ get_tls(BX)
+ LEAL runtime·g0(SB), CX
+ MOVL CX, g(BX)
+ LEAL runtime·m0(SB), AX
+ MOVL AX, m(BX)
+
+ // save m->g0 = g0
+ MOVL CX, m_g0(AX)
+
+ // create istack out of the OS stack
+ LEAL (-64*1024+104)(SP), AX // TODO: 104?
+ MOVL AX, g_stackguard(CX)
+ MOVL SP, g_stackbase(CX)
+ CALL runtime·emptyfunc(SB) // fault if stack check is wrong
+
+ // convention is D is always cleared
+ CLD
+
+ CALL runtime·check(SB)
+
+ // saved argc, argv
+ MOVL 120(SP), AX
+ MOVL AX, 0(SP)
+ MOVL 124(SP), AX
+ MOVL AX, 4(SP)
+ CALL runtime·args(SB)
+ CALL runtime·osinit(SB)
+ CALL runtime·schedinit(SB)
+
+ // create a new goroutine to start program
+ PUSHL $runtime·mainstart(SB) // entry
+ PUSHL $0 // arg size
+ CALL runtime·newproc(SB)
+ POPL AX
+ POPL AX
+
+ // start this M
+ CALL runtime·mstart(SB)
+
+ INT $3
+ RET
+
+TEXT runtime·mainstart(SB),7,$0
+ CALL main·init(SB)
+ CALL runtime·initdone(SB)
+ CALL main·main(SB)
+ PUSHL $0
+ CALL runtime·exit(SB)
+ POPL AX
+ INT $3
+ RET
+
+TEXT runtime·breakpoint(SB),7,$0
+ INT $3
+ RET
+
+/*
+ * go-routine
+ */
+
+// void gosave(Gobuf*)
+// save state in Gobuf; setjmp
+TEXT runtime·gosave(SB), 7, $0
+ MOVL 4(SP), AX // gobuf
+ LEAL 4(SP), BX // caller's SP
+ MOVL BX, gobuf_sp(AX)
+ MOVL 0(SP), BX // caller's PC
+ MOVL BX, gobuf_pc(AX)
+ get_tls(CX)
+ MOVL g(CX), BX
+ MOVL BX, gobuf_g(AX)
+ RET
+
+// void gogo(Gobuf*, uintptr)
+// restore state from Gobuf; longjmp
+TEXT runtime·gogo(SB), 7, $0
+ MOVL 8(SP), AX // return 2nd arg
+ MOVL 4(SP), BX // gobuf
+ MOVL gobuf_g(BX), DX
+ MOVL 0(DX), CX // make sure g != nil
+ get_tls(CX)
+ MOVL DX, g(CX)
+ MOVL gobuf_sp(BX), SP // restore SP
+ MOVL gobuf_pc(BX), BX
+ JMP BX
+
+// void gogocall(Gobuf*, void (*fn)(void))
+// restore state from Gobuf but then call fn.
+// (call fn, returning to state in Gobuf)
+TEXT runtime·gogocall(SB), 7, $0
+ MOVL 8(SP), AX // fn
+ MOVL 4(SP), BX // gobuf
+ MOVL gobuf_g(BX), DX
+ get_tls(CX)
+ MOVL DX, g(CX)
+ MOVL 0(DX), CX // make sure g != nil
+ MOVL gobuf_sp(BX), SP // restore SP
+ MOVL gobuf_pc(BX), BX
+ PUSHL BX
+ JMP AX
+ POPL BX // not reached
+
+// void mcall(void (*fn)(G*))
+// Switch to m->g0's stack, call fn(g).
+// Fn must never return. It should gogo(&g->sched)
+// to keep running g.
+TEXT runtime·mcall(SB), 7, $0
+ MOVL fn+0(FP), DI
+
+ get_tls(CX)
+ MOVL g(CX), AX // save state in g->gobuf
+ MOVL 0(SP), BX // caller's PC
+ MOVL BX, (g_sched+gobuf_pc)(AX)
+ LEAL 4(SP), BX // caller's SP
+ MOVL BX, (g_sched+gobuf_sp)(AX)
+ MOVL AX, (g_sched+gobuf_g)(AX)
+
+ // switch to m->g0 & its stack, call fn
+ MOVL m(CX), BX
+ MOVL m_g0(BX), SI
+ CMPL SI, AX // if g == m->g0 call badmcall
+ JNE 2(PC)
+ CALL runtime·badmcall(SB)
+ MOVL SI, g(CX) // g = m->g0
+ MOVL (g_sched+gobuf_sp)(SI), SP // sp = m->g0->gobuf.sp
+ PUSHL AX
+ CALL DI
+ POPL AX
+ CALL runtime·badmcall2(SB)
+ RET
+
+/*
+ * support for morestack
+ */
+
+// Called during function prolog when more stack is needed.
+TEXT runtime·morestack(SB),7,$0
+ // Cannot grow scheduler stack (m->g0).
+ get_tls(CX)
+ MOVL m(CX), BX
+ MOVL m_g0(BX), SI
+ CMPL g(CX), SI
+ JNE 2(PC)
+ INT $3
+
+ // frame size in DX
+ // arg size in AX
+ // Save in m.
+ MOVL DX, m_moreframesize(BX)
+ MOVL AX, m_moreargsize(BX)
+
+ // Called from f.
+ // Set m->morebuf to f's caller.
+ MOVL 4(SP), DI // f's caller's PC
+ MOVL DI, (m_morebuf+gobuf_pc)(BX)
+ LEAL 8(SP), CX // f's caller's SP
+ MOVL CX, (m_morebuf+gobuf_sp)(BX)
+ MOVL CX, m_moreargp(BX)
+ get_tls(CX)
+ MOVL g(CX), SI
+ MOVL SI, (m_morebuf+gobuf_g)(BX)
+
+ // Set m->morepc to f's PC.
+ MOVL 0(SP), AX
+ MOVL AX, m_morepc(BX)
+
+ // Call newstack on m->g0's stack.
+ MOVL m_g0(BX), BP
+ MOVL BP, g(CX)
+ MOVL (g_sched+gobuf_sp)(BP), AX
+ MOVL -4(AX), BX // fault if CALL would, before smashing SP
+ MOVL AX, SP
+ CALL runtime·newstack(SB)
+ MOVL $0, 0x1003 // crash if newstack returns
+ RET
+
+// Called from reflection library. Mimics morestack,
+// reuses stack growth code to create a frame
+// with the desired args running the desired function.
+//
+// func call(fn *byte, arg *byte, argsize uint32).
+TEXT reflect·call(SB), 7, $0
+ get_tls(CX)
+ MOVL m(CX), BX
+
+ // Save our caller's state as the PC and SP to
+ // restore when returning from f.
+ MOVL 0(SP), AX // our caller's PC
+ MOVL AX, (m_morebuf+gobuf_pc)(BX)
+ LEAL 4(SP), AX // our caller's SP
+ MOVL AX, (m_morebuf+gobuf_sp)(BX)
+ MOVL g(CX), AX
+ MOVL AX, (m_morebuf+gobuf_g)(BX)
+
+ // Set up morestack arguments to call f on a new stack.
+ // We set f's frame size to 1, as a hint to newstack
+ // that this is a call from reflect·call.
+ // If it turns out that f needs a larger frame than
+ // the default stack, f's usual stack growth prolog will
+ // allocate a new segment (and recopy the arguments).
+ MOVL 4(SP), AX // fn
+ MOVL 8(SP), DX // arg frame
+ MOVL 12(SP), CX // arg size
+
+ MOVL AX, m_morepc(BX) // f's PC
+ MOVL DX, m_moreargp(BX) // f's argument pointer
+ MOVL CX, m_moreargsize(BX) // f's argument size
+ MOVL $1, m_moreframesize(BX) // f's frame size
+
+ // Call newstack on m->g0's stack.
+ MOVL m_g0(BX), BP
+ get_tls(CX)
+ MOVL BP, g(CX)
+ MOVL (g_sched+gobuf_sp)(BP), SP
+ CALL runtime·newstack(SB)
+ MOVL $0, 0x1103 // crash if newstack returns
+ RET
+
+
+// Return point when leaving stack.
+TEXT runtime·lessstack(SB), 7, $0
+ // Save return value in m->cret
+ get_tls(CX)
+ MOVL m(CX), BX
+ MOVL AX, m_cret(BX)
+
+ // Call oldstack on m->g0's stack.
+ MOVL m_g0(BX), BP
+ MOVL BP, g(CX)
+ MOVL (g_sched+gobuf_sp)(BP), SP
+ CALL runtime·oldstack(SB)
+ MOVL $0, 0x1004 // crash if oldstack returns
+ RET
+
+
+// bool cas(int32 *val, int32 old, int32 new)
+// Atomically:
+// if(*val == old){
+// *val = new;
+// return 1;
+// }else
+// return 0;
+TEXT runtime·cas(SB), 7, $0
+ MOVL 4(SP), BX
+ MOVL 8(SP), AX
+ MOVL 12(SP), CX
+ LOCK
+ CMPXCHGL CX, 0(BX)
+ JZ 3(PC)
+ MOVL $0, AX
+ RET
+ MOVL $1, AX
+ RET
+
+// bool casp(void **p, void *old, void *new)
+// Atomically:
+// if(*p == old){
+// *p = new;
+// return 1;
+// }else
+// return 0;
+TEXT runtime·casp(SB), 7, $0
+ MOVL 4(SP), BX
+ MOVL 8(SP), AX
+ MOVL 12(SP), CX
+ LOCK
+ CMPXCHGL CX, 0(BX)
+ JZ 3(PC)
+ MOVL $0, AX
+ RET
+ MOVL $1, AX
+ RET
+
+// uint32 xadd(uint32 volatile *val, int32 delta)
+// Atomically:
+// *val += delta;
+// return *val;
+TEXT runtime·xadd(SB), 7, $0
+ MOVL 4(SP), BX
+ MOVL 8(SP), AX
+ MOVL AX, CX
+ LOCK
+ XADDL AX, 0(BX)
+ ADDL CX, AX
+ RET
+
+TEXT runtime·xchg(SB), 7, $0
+ MOVL 4(SP), BX
+ MOVL 8(SP), AX
+ XCHGL AX, 0(BX)
+ RET
+
+TEXT runtime·procyield(SB),7,$0
+ MOVL 4(SP), AX
+again:
+ PAUSE
+ SUBL $1, AX
+ JNZ again
+ RET
+
+TEXT runtime·atomicstorep(SB), 7, $0
+ MOVL 4(SP), BX
+ MOVL 8(SP), AX
+ XCHGL AX, 0(BX)
+ RET
+
+TEXT runtime·atomicstore(SB), 7, $0
+ MOVL 4(SP), BX
+ MOVL 8(SP), AX
+ XCHGL AX, 0(BX)
+ RET
+
+// void jmpdefer(fn, sp);
+// called from deferreturn.
+// 1. pop the caller
+// 2. sub 5 bytes from the callers return
+// 3. jmp to the argument
+TEXT runtime·jmpdefer(SB), 7, $0
+ MOVL 4(SP), AX // fn
+ MOVL 8(SP), BX // caller sp
+ LEAL -4(BX), SP // caller sp after CALL
+ SUBL $5, (SP) // return to CALL again
+ JMP AX // but first run the deferred function
+
+// Dummy function to use in saved gobuf.PC,
+// to match SP pointing at a return address.
+// The gobuf.PC is unused by the contortions here
+// but setting it to return will make the traceback code work.
+TEXT return<>(SB),7,$0
+ RET
+
+// asmcgocall(void(*fn)(void*), void *arg)
+// Call fn(arg) on the scheduler stack,
+// aligned appropriately for the gcc ABI.
+// See cgocall.c for more details.
+TEXT runtime·asmcgocall(SB),7,$0
+ MOVL fn+0(FP), AX
+ MOVL arg+4(FP), BX
+ MOVL SP, DX
+
+ // Figure out if we need to switch to m->g0 stack.
+ // We get called to create new OS threads too, and those
+ // come in on the m->g0 stack already.
+ get_tls(CX)
+ MOVL m(CX), BP
+ MOVL m_g0(BP), SI
+ MOVL g(CX), DI
+ CMPL SI, DI
+ JEQ 6(PC)
+ MOVL SP, (g_sched+gobuf_sp)(DI)
+ MOVL $return<>(SB), (g_sched+gobuf_pc)(DI)
+ MOVL DI, (g_sched+gobuf_g)(DI)
+ MOVL SI, g(CX)
+ MOVL (g_sched+gobuf_sp)(SI), SP
+
+ // Now on a scheduling stack (a pthread-created stack).
+ SUBL $32, SP
+ ANDL $~15, SP // alignment, perhaps unnecessary
+ MOVL DI, 8(SP) // save g
+ MOVL DX, 4(SP) // save SP
+ MOVL BX, 0(SP) // first argument in x86-32 ABI
+ CALL AX
+
+ // Restore registers, g, stack pointer.
+ get_tls(CX)
+ MOVL 8(SP), DI
+ MOVL DI, g(CX)
+ MOVL 4(SP), SP
+ RET
+
+// cgocallback(void (*fn)(void*), void *frame, uintptr framesize)
+// See cgocall.c for more details.
+TEXT runtime·cgocallback(SB),7,$12
+ MOVL fn+0(FP), AX
+ MOVL frame+4(FP), BX
+ MOVL framesize+8(FP), DX
+
+ // Save current m->g0->sched.sp on stack and then set it to SP.
+ get_tls(CX)
+ MOVL m(CX), BP
+ MOVL m_g0(BP), SI
+ PUSHL (g_sched+gobuf_sp)(SI)
+ MOVL SP, (g_sched+gobuf_sp)(SI)
+
+ // Switch to m->curg stack and call runtime.cgocallback
+ // with the three arguments. Because we are taking over
+ // the execution of m->curg but *not* resuming what had
+ // been running, we need to save that information (m->curg->gobuf)
+ // so that we can restore it when we're done.
+ // We can restore m->curg->gobuf.sp easily, because calling
+ // runtime.cgocallback leaves SP unchanged upon return.
+ // To save m->curg->gobuf.pc, we push it onto the stack.
+ // This has the added benefit that it looks to the traceback
+ // routine like cgocallback is going to return to that
+ // PC (because we defined cgocallback to have
+ // a frame size of 12, the same amount that we use below),
+ // so that the traceback will seamlessly trace back into
+ // the earlier calls.
+ MOVL m_curg(BP), SI
+ MOVL SI, g(CX)
+ MOVL (g_sched+gobuf_sp)(SI), DI // prepare stack as DI
+
+ // Push gobuf.pc
+ MOVL (g_sched+gobuf_pc)(SI), BP
+ SUBL $4, DI
+ MOVL BP, 0(DI)
+
+ // Push arguments to cgocallbackg.
+ // Frame size here must match the frame size above
+ // to trick traceback routines into doing the right thing.
+ SUBL $12, DI
+ MOVL AX, 0(DI)
+ MOVL BX, 4(DI)
+ MOVL DX, 8(DI)
+
+ // Switch stack and make the call.
+ MOVL DI, SP
+ CALL runtime·cgocallbackg(SB)
+
+ // Restore g->gobuf (== m->curg->gobuf) from saved values.
+ get_tls(CX)
+ MOVL g(CX), SI
+ MOVL 12(SP), BP
+ MOVL BP, (g_sched+gobuf_pc)(SI)
+ LEAL (12+4)(SP), DI
+ MOVL DI, (g_sched+gobuf_sp)(SI)
+
+ // Switch back to m->g0's stack and restore m->g0->sched.sp.
+ // (Unlike m->curg, the g0 goroutine never uses sched.pc,
+ // so we do not have to restore it.)
+ MOVL m(CX), BP
+ MOVL m_g0(BP), SI
+ MOVL SI, g(CX)
+ MOVL (g_sched+gobuf_sp)(SI), SP
+ POPL (g_sched+gobuf_sp)(SI)
+
+ // Done!
+ RET
+
+// check that SP is in range [g->stackbase, g->stackguard)
+TEXT runtime·stackcheck(SB), 7, $0
+ get_tls(CX)
+ MOVL g(CX), AX
+ CMPL g_stackbase(AX), SP
+ JHI 2(PC)
+ INT $3
+ CMPL SP, g_stackguard(AX)
+ JHI 2(PC)
+ INT $3
+ RET
+
+TEXT runtime·memclr(SB),7,$0
+ MOVL 4(SP), DI // arg 1 addr
+ MOVL 8(SP), CX // arg 2 count
+ MOVL CX, BX
+ ANDL $3, BX
+ SHRL $2, CX
+ MOVL $0, AX
+ CLD
+ REP
+ STOSL
+ MOVL BX, CX
+ REP
+ STOSB
+ RET
+
+TEXT runtime·getcallerpc(SB),7,$0
+ MOVL x+0(FP),AX // addr of first arg
+ MOVL -4(AX),AX // get calling pc
+ RET
+
+TEXT runtime·setcallerpc(SB),7,$0
+ MOVL x+0(FP),AX // addr of first arg
+ MOVL x+4(FP), BX
+ MOVL BX, -4(AX) // set calling pc
+ RET
+
+TEXT runtime·getcallersp(SB), 7, $0
+ MOVL sp+0(FP), AX
+ RET
+
+TEXT runtime·ldt0setup(SB),7,$16
+ // set up ldt 7 to point at tls0
+ // ldt 1 would be fine on Linux, but on OS X, 7 is as low as we can go.
+ // the entry number is just a hint. setldt will set up GS with what it used.
+ MOVL $7, 0(SP)
+ LEAL runtime·tls0(SB), AX
+ MOVL AX, 4(SP)
+ MOVL $32, 8(SP) // sizeof(tls array)
+ CALL runtime·setldt(SB)
+ RET
+
+TEXT runtime·emptyfunc(SB),0,$0
+ RET
+
+TEXT runtime·abort(SB),7,$0
+ INT $0x3
+
+GLOBL runtime·tls0(SB), $32
diff --git a/src/pkg/runtime/386/atomic.c b/src/pkg/runtime/386/atomic.c
new file mode 100644
index 000000000..a4f2a114f
--- /dev/null
+++ b/src/pkg/runtime/386/atomic.c
@@ -0,0 +1,19 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "runtime.h"
+
+#pragma textflag 7
+uint32
+runtime·atomicload(uint32 volatile* addr)
+{
+ return *addr;
+}
+
+#pragma textflag 7
+void*
+runtime·atomicloadp(void* volatile* addr)
+{
+ return *addr;
+}
diff --git a/src/pkg/runtime/386/closure.c b/src/pkg/runtime/386/closure.c
new file mode 100644
index 000000000..b4d867711
--- /dev/null
+++ b/src/pkg/runtime/386/closure.c
@@ -0,0 +1,105 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "runtime.h"
+
+#pragma textflag 7
+// func closure(siz int32,
+// fn func(arg0, arg1, arg2 *ptr, callerpc uintptr, xxx) yyy,
+// arg0, arg1, arg2 *ptr) (func(xxx) yyy)
+void
+runtime·closure(int32 siz, byte *fn, byte *arg0)
+{
+ byte *p, *q, **ret;
+ int32 i, n;
+ int32 pcrel;
+
+ if(siz < 0 || siz%4 != 0)
+ runtime·throw("bad closure size");
+
+ ret = (byte**)((byte*)&arg0 + siz);
+
+ if(siz > 100) {
+ // TODO(rsc): implement stack growth preamble?
+ runtime·throw("closure too big");
+ }
+
+ // compute size of new fn.
+ // must match code laid out below.
+ n = 6+5+2+1; // SUBL MOVL MOVL CLD
+ if(siz <= 4*4)
+ n += 1*siz/4; // MOVSL MOVSL...
+ else
+ n += 6+2; // MOVL REP MOVSL
+ n += 5; // CALL
+ n += 6+1; // ADDL RET
+
+ // store args aligned after code, so gc can find them.
+ n += siz;
+ if(n%4)
+ n += 4 - n%4;
+
+ p = runtime·mal(n);
+ *ret = p;
+ q = p + n - siz;
+
+ if(siz > 0) {
+ runtime·memmove(q, (byte*)&arg0, siz);
+
+ // SUBL $siz, SP
+ *p++ = 0x81;
+ *p++ = 0xec;
+ *(uint32*)p = siz;
+ p += 4;
+
+ // MOVL $q, SI
+ *p++ = 0xbe;
+ *(byte**)p = q;
+ p += 4;
+
+ // MOVL SP, DI
+ *p++ = 0x89;
+ *p++ = 0xe7;
+
+ // CLD
+ *p++ = 0xfc;
+
+ if(siz <= 4*4) {
+ for(i=0; i<siz; i+=4) {
+ // MOVSL
+ *p++ = 0xa5;
+ }
+ } else {
+ // MOVL $(siz/4), CX [32-bit immediate siz/4]
+ *p++ = 0xc7;
+ *p++ = 0xc1;
+ *(uint32*)p = siz/4;
+ p += 4;
+
+ // REP; MOVSL
+ *p++ = 0xf3;
+ *p++ = 0xa5;
+ }
+ }
+
+ // call fn
+ pcrel = fn - (p+5);
+ // direct call with pc-relative offset
+ // CALL fn
+ *p++ = 0xe8;
+ *(int32*)p = pcrel;
+ p += 4;
+
+ // ADDL $siz, SP
+ *p++ = 0x81;
+ *p++ = 0xc4;
+ *(uint32*)p = siz;
+ p += 4;
+
+ // RET
+ *p++ = 0xc3;
+
+ if(p > q)
+ runtime·throw("bad math in sys.closure");
+}
diff --git a/src/pkg/runtime/386/memmove.s b/src/pkg/runtime/386/memmove.s
new file mode 100644
index 000000000..203a8187c
--- /dev/null
+++ b/src/pkg/runtime/386/memmove.s
@@ -0,0 +1,86 @@
+// Inferno's libkern/memmove-386.s
+// http://code.google.com/p/inferno-os/source/browse/libkern/memmove-386.s
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Revisions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com). All rights reserved.
+// Portions Copyright 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+TEXT runtime·memmove(SB), 7, $0
+ MOVL to+0(FP), DI
+ MOVL fr+4(FP), SI
+ MOVL n+8(FP), BX
+/*
+ * check and set for backwards
+ */
+ CMPL SI, DI
+ JLS back
+
+/*
+ * forward copy loop
+ */
+forward:
+ MOVL BX, CX
+ SHRL $2, CX
+ ANDL $3, BX
+
+ REP; MOVSL
+ MOVL BX, CX
+ REP; MOVSB
+
+ MOVL to+0(FP),AX
+ RET
+/*
+ * check overlap
+ */
+back:
+ MOVL SI, CX
+ ADDL BX, CX
+ CMPL CX, DI
+ JLS forward
+/*
+ * whole thing backwards has
+ * adjusted addresses
+ */
+
+ ADDL BX, DI
+ ADDL BX, SI
+ STD
+
+/*
+ * copy
+ */
+ MOVL BX, CX
+ SHRL $2, CX
+ ANDL $3, BX
+
+ SUBL $4, DI
+ SUBL $4, SI
+ REP; MOVSL
+
+ ADDL $3, DI
+ ADDL $3, SI
+ MOVL BX, CX
+ REP; MOVSB
+
+ CLD
+ MOVL to+0(FP),AX
+ RET
+
diff --git a/src/pkg/runtime/386/vlop.s b/src/pkg/runtime/386/vlop.s
new file mode 100644
index 000000000..28f6da82d
--- /dev/null
+++ b/src/pkg/runtime/386/vlop.s
@@ -0,0 +1,48 @@
+// Inferno's libkern/vlop-386.s
+// http://code.google.com/p/inferno-os/source/browse/libkern/vlop-386.s
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Revisions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com). All rights reserved.
+// Portions Copyright 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+/*
+ * C runtime for 64-bit divide.
+ */
+
+TEXT _mul64by32(SB), 7, $0
+ MOVL r+0(FP), CX
+ MOVL a+4(FP), AX
+ MULL b+12(FP)
+ MOVL AX, 0(CX)
+ MOVL DX, BX
+ MOVL a+8(FP), AX
+ MULL b+12(FP)
+ ADDL AX, BX
+ MOVL BX, 4(CX)
+ RET
+
+TEXT _div64by32(SB), 7, $0
+ MOVL r+12(FP), CX
+ MOVL a+0(FP), AX
+ MOVL a+4(FP), DX
+ DIVL b+8(FP)
+ MOVL DX, 0(CX)
+ RET
diff --git a/src/pkg/runtime/386/vlrt.c b/src/pkg/runtime/386/vlrt.c
new file mode 100644
index 000000000..1631dbe10
--- /dev/null
+++ b/src/pkg/runtime/386/vlrt.c
@@ -0,0 +1,815 @@
+// Inferno's libkern/vlrt-386.c
+// http://code.google.com/p/inferno-os/source/browse/libkern/vlrt-386.c
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Revisions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com). All rights reserved.
+// Portions Copyright 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+/*
+ * C runtime for 64-bit divide, others.
+ *
+ * TODO(rsc): The simple functions are dregs--8c knows how
+ * to generate the code directly now. Find and remove.
+ */
+
+typedef unsigned long ulong;
+typedef unsigned int uint;
+typedef unsigned short ushort;
+typedef unsigned char uchar;
+typedef signed char schar;
+
+#define SIGN(n) (1UL<<(n-1))
+
+typedef struct Vlong Vlong;
+struct Vlong
+{
+ union
+ {
+ long long v;
+ struct
+ {
+ ulong lo;
+ ulong hi;
+ };
+ struct
+ {
+ ushort lols;
+ ushort loms;
+ ushort hils;
+ ushort hims;
+ };
+ };
+};
+
+void runtime·abort(void);
+
+void
+_d2v(Vlong *y, double d)
+{
+ union { double d; struct Vlong; } x;
+ ulong xhi, xlo, ylo, yhi;
+ int sh;
+
+ x.d = d;
+
+ xhi = (x.hi & 0xfffff) | 0x100000;
+ xlo = x.lo;
+ sh = 1075 - ((x.hi >> 20) & 0x7ff);
+
+ ylo = 0;
+ yhi = 0;
+ if(sh >= 0) {
+ /* v = (hi||lo) >> sh */
+ if(sh < 32) {
+ if(sh == 0) {
+ ylo = xlo;
+ yhi = xhi;
+ } else {
+ ylo = (xlo >> sh) | (xhi << (32-sh));
+ yhi = xhi >> sh;
+ }
+ } else {
+ if(sh == 32) {
+ ylo = xhi;
+ } else
+ if(sh < 64) {
+ ylo = xhi >> (sh-32);
+ }
+ }
+ } else {
+ /* v = (hi||lo) << -sh */
+ sh = -sh;
+ if(sh <= 10) {
+ ylo = xlo << sh;
+ yhi = (xhi << sh) | (xlo >> (32-sh));
+ } else {
+ /* overflow */
+ yhi = d; /* causes something awful */
+ }
+ }
+ if(x.hi & SIGN(32)) {
+ if(ylo != 0) {
+ ylo = -ylo;
+ yhi = ~yhi;
+ } else
+ yhi = -yhi;
+ }
+
+ y->hi = yhi;
+ y->lo = ylo;
+}
+
+void
+_f2v(Vlong *y, float f)
+{
+
+ _d2v(y, f);
+}
+
+double
+_v2d(Vlong x)
+{
+ if(x.hi & SIGN(32)) {
+ if(x.lo) {
+ x.lo = -x.lo;
+ x.hi = ~x.hi;
+ } else
+ x.hi = -x.hi;
+ return -((long)x.hi*4294967296. + x.lo);
+ }
+ return (long)x.hi*4294967296. + x.lo;
+}
+
+float
+_v2f(Vlong x)
+{
+ return _v2d(x);
+}
+
+ulong _div64by32(Vlong, ulong, ulong*);
+void _mul64by32(Vlong*, Vlong, ulong);
+
+static void
+slowdodiv(Vlong num, Vlong den, Vlong *q, Vlong *r)
+{
+ ulong numlo, numhi, denhi, denlo, quohi, quolo, t;
+ int i;
+
+ numhi = num.hi;
+ numlo = num.lo;
+ denhi = den.hi;
+ denlo = den.lo;
+
+ /*
+ * get a divide by zero
+ */
+ if(denlo==0 && denhi==0) {
+ numlo = numlo / denlo;
+ }
+
+ /*
+ * set up the divisor and find the number of iterations needed
+ */
+ if(numhi >= SIGN(32)) {
+ quohi = SIGN(32);
+ quolo = 0;
+ } else {
+ quohi = numhi;
+ quolo = numlo;
+ }
+ i = 0;
+ while(denhi < quohi || (denhi == quohi && denlo < quolo)) {
+ denhi = (denhi<<1) | (denlo>>31);
+ denlo <<= 1;
+ i++;
+ }
+
+ quohi = 0;
+ quolo = 0;
+ for(; i >= 0; i--) {
+ quohi = (quohi<<1) | (quolo>>31);
+ quolo <<= 1;
+ if(numhi > denhi || (numhi == denhi && numlo >= denlo)) {
+ t = numlo;
+ numlo -= denlo;
+ if(numlo > t)
+ numhi--;
+ numhi -= denhi;
+ quolo |= 1;
+ }
+ denlo = (denlo>>1) | (denhi<<31);
+ denhi >>= 1;
+ }
+
+ if(q) {
+ q->lo = quolo;
+ q->hi = quohi;
+ }
+ if(r) {
+ r->lo = numlo;
+ r->hi = numhi;
+ }
+}
+
+static void
+dodiv(Vlong num, Vlong den, Vlong *qp, Vlong *rp)
+{
+ ulong n;
+ Vlong x, q, r;
+
+ if(den.hi > num.hi || (den.hi == num.hi && den.lo > num.lo)){
+ if(qp) {
+ qp->hi = 0;
+ qp->lo = 0;
+ }
+ if(rp) {
+ rp->hi = num.hi;
+ rp->lo = num.lo;
+ }
+ return;
+ }
+
+ if(den.hi != 0){
+ q.hi = 0;
+ n = num.hi/den.hi;
+ _mul64by32(&x, den, n);
+ if(x.hi > num.hi || (x.hi == num.hi && x.lo > num.lo))
+ slowdodiv(num, den, &q, &r);
+ else {
+ q.lo = n;
+ r.v = num.v - x.v;
+ }
+ } else {
+ if(num.hi >= den.lo){
+ q.hi = n = num.hi/den.lo;
+ num.hi -= den.lo*n;
+ } else {
+ q.hi = 0;
+ }
+ q.lo = _div64by32(num, den.lo, &r.lo);
+ r.hi = 0;
+ }
+ if(qp) {
+ qp->lo = q.lo;
+ qp->hi = q.hi;
+ }
+ if(rp) {
+ rp->lo = r.lo;
+ rp->hi = r.hi;
+ }
+}
+
+void
+_divvu(Vlong *q, Vlong n, Vlong d)
+{
+
+ if(n.hi == 0 && d.hi == 0) {
+ q->hi = 0;
+ q->lo = n.lo / d.lo;
+ return;
+ }
+ dodiv(n, d, q, 0);
+}
+
+void
+runtime·uint64div(Vlong n, Vlong d, Vlong q)
+{
+ _divvu(&q, n, d);
+}
+
+void
+_modvu(Vlong *r, Vlong n, Vlong d)
+{
+
+ if(n.hi == 0 && d.hi == 0) {
+ r->hi = 0;
+ r->lo = n.lo % d.lo;
+ return;
+ }
+ dodiv(n, d, 0, r);
+}
+
+void
+runtime·uint64mod(Vlong n, Vlong d, Vlong q)
+{
+ _modvu(&q, n, d);
+}
+
+static void
+vneg(Vlong *v)
+{
+
+ if(v->lo == 0) {
+ v->hi = -v->hi;
+ return;
+ }
+ v->lo = -v->lo;
+ v->hi = ~v->hi;
+}
+
+void
+_divv(Vlong *q, Vlong n, Vlong d)
+{
+ long nneg, dneg;
+
+ if(n.hi == (((long)n.lo)>>31) && d.hi == (((long)d.lo)>>31)) {
+ if((long)n.lo == -0x80000000 && (long)d.lo == -1) {
+ // special case: 32-bit -0x80000000 / -1 causes divide error,
+ // but it's okay in this 64-bit context.
+ q->lo = 0x80000000;
+ q->hi = 0;
+ return;
+ }
+ q->lo = (long)n.lo / (long)d.lo;
+ q->hi = ((long)q->lo) >> 31;
+ return;
+ }
+ nneg = n.hi >> 31;
+ if(nneg)
+ vneg(&n);
+ dneg = d.hi >> 31;
+ if(dneg)
+ vneg(&d);
+ dodiv(n, d, q, 0);
+ if(nneg != dneg)
+ vneg(q);
+}
+
+void
+runtime·int64div(Vlong n, Vlong d, Vlong q)
+{
+ _divv(&q, n, d);
+}
+
+void
+_modv(Vlong *r, Vlong n, Vlong d)
+{
+ long nneg, dneg;
+
+ if(n.hi == (((long)n.lo)>>31) && d.hi == (((long)d.lo)>>31)) {
+ if((long)n.lo == -0x80000000 && (long)d.lo == -1) {
+ // special case: 32-bit -0x80000000 % -1 causes divide error,
+ // but it's okay in this 64-bit context.
+ r->lo = 0;
+ r->hi = 0;
+ return;
+ }
+ r->lo = (long)n.lo % (long)d.lo;
+ r->hi = ((long)r->lo) >> 31;
+ return;
+ }
+ nneg = n.hi >> 31;
+ if(nneg)
+ vneg(&n);
+ dneg = d.hi >> 31;
+ if(dneg)
+ vneg(&d);
+ dodiv(n, d, 0, r);
+ if(nneg)
+ vneg(r);
+}
+
+void
+runtime·int64mod(Vlong n, Vlong d, Vlong q)
+{
+ _modv(&q, n, d);
+}
+
+void
+_rshav(Vlong *r, Vlong a, int b)
+{
+ long t;
+
+ t = a.hi;
+ if(b >= 32) {
+ r->hi = t>>31;
+ if(b >= 64) {
+ /* this is illegal re C standard */
+ r->lo = t>>31;
+ return;
+ }
+ r->lo = t >> (b-32);
+ return;
+ }
+ if(b <= 0) {
+ r->hi = t;
+ r->lo = a.lo;
+ return;
+ }
+ r->hi = t >> b;
+ r->lo = (t << (32-b)) | (a.lo >> b);
+}
+
+void
+_rshlv(Vlong *r, Vlong a, int b)
+{
+ ulong t;
+
+ t = a.hi;
+ if(b >= 32) {
+ r->hi = 0;
+ if(b >= 64) {
+ /* this is illegal re C standard */
+ r->lo = 0;
+ return;
+ }
+ r->lo = t >> (b-32);
+ return;
+ }
+ if(b <= 0) {
+ r->hi = t;
+ r->lo = a.lo;
+ return;
+ }
+ r->hi = t >> b;
+ r->lo = (t << (32-b)) | (a.lo >> b);
+}
+
+void
+_lshv(Vlong *r, Vlong a, int b)
+{
+ ulong t;
+
+ t = a.lo;
+ if(b >= 32) {
+ r->lo = 0;
+ if(b >= 64) {
+ /* this is illegal re C standard */
+ r->hi = 0;
+ return;
+ }
+ r->hi = t << (b-32);
+ return;
+ }
+ if(b <= 0) {
+ r->lo = t;
+ r->hi = a.hi;
+ return;
+ }
+ r->lo = t << b;
+ r->hi = (t >> (32-b)) | (a.hi << b);
+}
+
+void
+_andv(Vlong *r, Vlong a, Vlong b)
+{
+ r->hi = a.hi & b.hi;
+ r->lo = a.lo & b.lo;
+}
+
+void
+_orv(Vlong *r, Vlong a, Vlong b)
+{
+ r->hi = a.hi | b.hi;
+ r->lo = a.lo | b.lo;
+}
+
+void
+_xorv(Vlong *r, Vlong a, Vlong b)
+{
+ r->hi = a.hi ^ b.hi;
+ r->lo = a.lo ^ b.lo;
+}
+
+void
+_vpp(Vlong *l, Vlong *r)
+{
+
+ l->hi = r->hi;
+ l->lo = r->lo;
+ r->lo++;
+ if(r->lo == 0)
+ r->hi++;
+}
+
+void
+_vmm(Vlong *l, Vlong *r)
+{
+
+ l->hi = r->hi;
+ l->lo = r->lo;
+ if(r->lo == 0)
+ r->hi--;
+ r->lo--;
+}
+
+void
+_ppv(Vlong *l, Vlong *r)
+{
+
+ r->lo++;
+ if(r->lo == 0)
+ r->hi++;
+ l->hi = r->hi;
+ l->lo = r->lo;
+}
+
+void
+_mmv(Vlong *l, Vlong *r)
+{
+
+ if(r->lo == 0)
+ r->hi--;
+ r->lo--;
+ l->hi = r->hi;
+ l->lo = r->lo;
+}
+
+void
+_vasop(Vlong *ret, void *lv, void fn(Vlong*, Vlong, Vlong), int type, Vlong rv)
+{
+ Vlong t, u;
+
+ u.lo = 0;
+ u.hi = 0;
+ switch(type) {
+ default:
+ runtime·abort();
+ break;
+
+ case 1: /* schar */
+ t.lo = *(schar*)lv;
+ t.hi = t.lo >> 31;
+ fn(&u, t, rv);
+ *(schar*)lv = u.lo;
+ break;
+
+ case 2: /* uchar */
+ t.lo = *(uchar*)lv;
+ t.hi = 0;
+ fn(&u, t, rv);
+ *(uchar*)lv = u.lo;
+ break;
+
+ case 3: /* short */
+ t.lo = *(short*)lv;
+ t.hi = t.lo >> 31;
+ fn(&u, t, rv);
+ *(short*)lv = u.lo;
+ break;
+
+ case 4: /* ushort */
+ t.lo = *(ushort*)lv;
+ t.hi = 0;
+ fn(&u, t, rv);
+ *(ushort*)lv = u.lo;
+ break;
+
+ case 9: /* int */
+ t.lo = *(int*)lv;
+ t.hi = t.lo >> 31;
+ fn(&u, t, rv);
+ *(int*)lv = u.lo;
+ break;
+
+ case 10: /* uint */
+ t.lo = *(uint*)lv;
+ t.hi = 0;
+ fn(&u, t, rv);
+ *(uint*)lv = u.lo;
+ break;
+
+ case 5: /* long */
+ t.lo = *(long*)lv;
+ t.hi = t.lo >> 31;
+ fn(&u, t, rv);
+ *(long*)lv = u.lo;
+ break;
+
+ case 6: /* ulong */
+ t.lo = *(ulong*)lv;
+ t.hi = 0;
+ fn(&u, t, rv);
+ *(ulong*)lv = u.lo;
+ break;
+
+ case 7: /* vlong */
+ case 8: /* uvlong */
+ fn(&u, *(Vlong*)lv, rv);
+ *(Vlong*)lv = u;
+ break;
+ }
+ *ret = u;
+}
+
+void
+_p2v(Vlong *ret, void *p)
+{
+ long t;
+
+ t = (ulong)p;
+ ret->lo = t;
+ ret->hi = 0;
+}
+
+void
+_sl2v(Vlong *ret, long sl)
+{
+ long t;
+
+ t = sl;
+ ret->lo = t;
+ ret->hi = t >> 31;
+}
+
+void
+_ul2v(Vlong *ret, ulong ul)
+{
+ long t;
+
+ t = ul;
+ ret->lo = t;
+ ret->hi = 0;
+}
+
+void
+_si2v(Vlong *ret, int si)
+{
+ long t;
+
+ t = si;
+ ret->lo = t;
+ ret->hi = t >> 31;
+}
+
+void
+_ui2v(Vlong *ret, uint ui)
+{
+ long t;
+
+ t = ui;
+ ret->lo = t;
+ ret->hi = 0;
+}
+
+void
+_sh2v(Vlong *ret, long sh)
+{
+ long t;
+
+ t = (sh << 16) >> 16;
+ ret->lo = t;
+ ret->hi = t >> 31;
+}
+
+void
+_uh2v(Vlong *ret, ulong ul)
+{
+ long t;
+
+ t = ul & 0xffff;
+ ret->lo = t;
+ ret->hi = 0;
+}
+
+void
+_sc2v(Vlong *ret, long uc)
+{
+ long t;
+
+ t = (uc << 24) >> 24;
+ ret->lo = t;
+ ret->hi = t >> 31;
+}
+
+void
+_uc2v(Vlong *ret, ulong ul)
+{
+ long t;
+
+ t = ul & 0xff;
+ ret->lo = t;
+ ret->hi = 0;
+}
+
+long
+_v2sc(Vlong rv)
+{
+ long t;
+
+ t = rv.lo & 0xff;
+ return (t << 24) >> 24;
+}
+
+long
+_v2uc(Vlong rv)
+{
+
+ return rv.lo & 0xff;
+}
+
+long
+_v2sh(Vlong rv)
+{
+ long t;
+
+ t = rv.lo & 0xffff;
+ return (t << 16) >> 16;
+}
+
+long
+_v2uh(Vlong rv)
+{
+
+ return rv.lo & 0xffff;
+}
+
+long
+_v2sl(Vlong rv)
+{
+
+ return rv.lo;
+}
+
+long
+_v2ul(Vlong rv)
+{
+
+ return rv.lo;
+}
+
+long
+_v2si(Vlong rv)
+{
+
+ return rv.lo;
+}
+
+long
+_v2ui(Vlong rv)
+{
+
+ return rv.lo;
+}
+
+int
+_testv(Vlong rv)
+{
+ return rv.lo || rv.hi;
+}
+
+int
+_eqv(Vlong lv, Vlong rv)
+{
+ return lv.lo == rv.lo && lv.hi == rv.hi;
+}
+
+int
+_nev(Vlong lv, Vlong rv)
+{
+ return lv.lo != rv.lo || lv.hi != rv.hi;
+}
+
+int
+_ltv(Vlong lv, Vlong rv)
+{
+ return (long)lv.hi < (long)rv.hi ||
+ (lv.hi == rv.hi && lv.lo < rv.lo);
+}
+
+int
+_lev(Vlong lv, Vlong rv)
+{
+ return (long)lv.hi < (long)rv.hi ||
+ (lv.hi == rv.hi && lv.lo <= rv.lo);
+}
+
+int
+_gtv(Vlong lv, Vlong rv)
+{
+ return (long)lv.hi > (long)rv.hi ||
+ (lv.hi == rv.hi && lv.lo > rv.lo);
+}
+
+int
+_gev(Vlong lv, Vlong rv)
+{
+ return (long)lv.hi > (long)rv.hi ||
+ (lv.hi == rv.hi && lv.lo >= rv.lo);
+}
+
+int
+_lov(Vlong lv, Vlong rv)
+{
+ return lv.hi < rv.hi ||
+ (lv.hi == rv.hi && lv.lo < rv.lo);
+}
+
+int
+_lsv(Vlong lv, Vlong rv)
+{
+ return lv.hi < rv.hi ||
+ (lv.hi == rv.hi && lv.lo <= rv.lo);
+}
+
+int
+_hiv(Vlong lv, Vlong rv)
+{
+ return lv.hi > rv.hi ||
+ (lv.hi == rv.hi && lv.lo > rv.lo);
+}
+
+int
+_hsv(Vlong lv, Vlong rv)
+{
+ return lv.hi > rv.hi ||
+ (lv.hi == rv.hi && lv.lo >= rv.lo);
+}
diff --git a/src/pkg/runtime/Makefile b/src/pkg/runtime/Makefile
new file mode 100644
index 000000000..64bd2b771
--- /dev/null
+++ b/src/pkg/runtime/Makefile
@@ -0,0 +1,168 @@
+# Copyright 2009 The Go Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+include ../../Make.inc
+
+TARG=runtime
+
+# Set SIZE to 32 or 64.
+SIZE_386=32
+SIZE_amd64=64
+SIZE_arm=32
+SIZE=$(SIZE_$(GOARCH))
+
+CFLAGS_windows=-D__WINDOWS__
+CFLAGS=-I$(GOOS) -I$(GOARCH) -I$(GOOS)/$(GOARCH) -wF $(CFLAGS_$(GOARCH)) $(CFLAGS_$(GOOS))
+
+GOFILES=\
+ debug.go\
+ error.go\
+ extern.go\
+ mem.go\
+ sig.go\
+ softfloat64.go\
+ type.go\
+ version.go\
+ version_$(GOOS).go\
+ version_$(GOARCH).go\
+ runtime_defs.go\
+
+CLEANFILES+=version.go version_*.go
+
+OFILES_windows=\
+ syscall.$O\
+
+# 386-specific object files
+OFILES_386=\
+ vlop.$O\
+ vlrt.$O\
+
+# arm-specific object files
+OFILES_arm=\
+ memset.$O\
+ softfloat.$O\
+ vlop.$O\
+ vlrt.$O\
+
+OFILES=\
+ asm.$O\
+ atomic.$O\
+ cgocall.$O\
+ chan.$O\
+ closure.$O\
+ cpuprof.$O\
+ float.$O\
+ complex.$O\
+ hashmap.$O\
+ iface.$O\
+ malloc.$O\
+ mcache.$O\
+ mcentral.$O\
+ mem.$O\
+ memmove.$O\
+ mfinal.$O\
+ mfixalloc.$O\
+ mgc0.$O\
+ mheap.$O\
+ mprof.$O\
+ msize.$O\
+ print.$O\
+ proc.$O\
+ rune.$O\
+ runtime.$O\
+ runtime1.$O\
+ rt0.$O\
+ sema.$O\
+ signal.$O\
+ sigqueue.$O\
+ slice.$O\
+ string.$O\
+ symtab.$O\
+ sys.$O\
+ thread.$O\
+ traceback.$O\
+ $(OFILES_$(GOARCH))\
+ $(OFILES_$(GOOS))\
+
+HFILES=\
+ cgocall.h\
+ runtime.h\
+ hashmap.h\
+ malloc.h\
+ stack.h\
+ $(GOARCH)/asm.h\
+ $(GOOS)/os.h\
+ $(GOOS)/signals.h\
+ $(GOOS)/$(GOARCH)/defs.h\
+
+GOFILES+=$(GOFILES_$(GOOS))
+
+# For use by cgo.
+INSTALLFILES=$(pkgdir)/runtime.h $(pkgdir)/cgocall.h
+
+# special, out of the way compiler flag that means "add runtime metadata to output"
+GC+= -+
+
+include ../../Make.pkg
+
+$(pkgdir)/%.h: %.h
+ @test -d $(QUOTED_GOROOT)/pkg && mkdir -p $(pkgdir)
+ cp $< "$@"
+
+clean: clean-local
+
+clean-local:
+ rm -f goc2c mkversion version.go */asm.h runtime.acid.* runtime_defs.go $$(ls *.goc | sed 's/goc$$/c/')
+
+$(GOARCH)/asm.h: mkasmh.sh runtime.acid.$(GOARCH)
+ ./mkasmh.sh >$@.x
+ mv -f $@.x $@
+
+goc2c: goc2c.c
+ quietgcc -o $@ -I "$(GOROOT)/include" $< "$(GOROOT)/lib/lib9.a"
+
+mkversion: mkversion.c
+ quietgcc -o $@ -I "$(GOROOT)/include" $< "$(GOROOT)/lib/lib9.a"
+
+version.go: mkversion
+ GOROOT="$(GOROOT_FINAL)" ./mkversion >version.go
+
+version_$(GOARCH).go:
+ (echo 'package runtime'; echo 'const theGoarch = "$(GOARCH)"') >$@
+
+version_$(GOOS).go:
+ (echo 'package runtime'; echo 'const theGoos = "$(GOOS)"') >$@
+
+%.c: %.goc goc2c
+ ./goc2c "`pwd`/$<" > $@.tmp
+ mv -f $@.tmp $@
+
+%.$O: $(GOARCH)/%.c $(HFILES)
+ $(CC) $(CFLAGS) $<
+
+%.$O: $(GOOS)/%.c $(HFILES)
+ $(CC) $(CFLAGS) $<
+
+%.$O: $(GOOS)/$(GOARCH)/%.c $(HFILES)
+ $(CC) $(CFLAGS) $<
+
+%.$O: $(GOARCH)/%.s $(GOARCH)/asm.h
+ $(AS) $<
+
+%.$O: $(GOOS)/$(GOARCH)/%.s $(GOARCH)/asm.h
+ $(AS) $<
+
+# for discovering offsets inside structs when debugging
+runtime.acid.$(GOARCH): runtime.h proc.c
+ $(CC) $(CFLAGS) -a proc.c >$@
+
+# 386 traceback is really amd64 traceback
+ifeq ($(GOARCH),386)
+traceback.$O: amd64/traceback.c
+ $(CC) $(CFLAGS) $<
+endif
+
+runtime_defs.go: proc.c iface.c hashmap.c chan.c
+ CC="$(CC)" CFLAGS="$(CFLAGS)" ./mkgodefs.sh $^ > $@.x
+ mv -f $@.x $@
diff --git a/src/pkg/runtime/amd64/arch.h b/src/pkg/runtime/amd64/arch.h
new file mode 100644
index 000000000..fe10fd89f
--- /dev/null
+++ b/src/pkg/runtime/amd64/arch.h
@@ -0,0 +1,3 @@
+enum {
+ thechar = '6'
+};
diff --git a/src/pkg/runtime/amd64/asm.s b/src/pkg/runtime/amd64/asm.s
new file mode 100644
index 000000000..3e3818c10
--- /dev/null
+++ b/src/pkg/runtime/amd64/asm.s
@@ -0,0 +1,577 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "amd64/asm.h"
+
+TEXT _rt0_amd64(SB),7,$-8
+ // copy arguments forward on an even stack
+ MOVQ 0(DI), AX // argc
+ LEAQ 8(DI), BX // argv
+ SUBQ $(4*8+7), SP // 2args 2auto
+ ANDQ $~15, SP
+ MOVQ AX, 16(SP)
+ MOVQ BX, 24(SP)
+
+ // if there is an initcgo, call it.
+ MOVQ initcgo(SB), AX
+ TESTQ AX, AX
+ JZ needtls
+ CALL AX
+ CMPL runtime·iswindows(SB), $0
+ JEQ ok
+
+needtls:
+ LEAQ runtime·tls0(SB), DI
+ CALL runtime·settls(SB)
+
+ // store through it, to make sure it works
+ get_tls(BX)
+ MOVQ $0x123, g(BX)
+ MOVQ runtime·tls0(SB), AX
+ CMPQ AX, $0x123
+ JEQ 2(PC)
+ MOVL AX, 0 // abort
+ok:
+ // set the per-goroutine and per-mach "registers"
+ get_tls(BX)
+ LEAQ runtime·g0(SB), CX
+ MOVQ CX, g(BX)
+ LEAQ runtime·m0(SB), AX
+ MOVQ AX, m(BX)
+
+ // save m->g0 = g0
+ MOVQ CX, m_g0(AX)
+
+ // create istack out of the given (operating system) stack
+ LEAQ (-8192+104)(SP), AX
+ MOVQ AX, g_stackguard(CX)
+ MOVQ SP, g_stackbase(CX)
+
+ CLD // convention is D is always left cleared
+ CALL runtime·check(SB)
+
+ MOVL 16(SP), AX // copy argc
+ MOVL AX, 0(SP)
+ MOVQ 24(SP), AX // copy argv
+ MOVQ AX, 8(SP)
+ CALL runtime·args(SB)
+ CALL runtime·osinit(SB)
+ CALL runtime·schedinit(SB)
+
+ // create a new goroutine to start program
+ PUSHQ $runtime·mainstart(SB) // entry
+ PUSHQ $0 // arg size
+ CALL runtime·newproc(SB)
+ POPQ AX
+ POPQ AX
+
+ // start this M
+ CALL runtime·mstart(SB)
+
+ CALL runtime·notok(SB) // never returns
+ RET
+
+TEXT runtime·mainstart(SB),7,$0
+ CALL main·init(SB)
+ CALL runtime·initdone(SB)
+ CALL main·main(SB)
+ PUSHQ $0
+ CALL runtime·exit(SB)
+ POPQ AX
+ CALL runtime·notok(SB)
+ RET
+
+TEXT runtime·breakpoint(SB),7,$0
+ BYTE $0xcc
+ RET
+
+/*
+ * go-routine
+ */
+
+// void gosave(Gobuf*)
+// save state in Gobuf; setjmp
+TEXT runtime·gosave(SB), 7, $0
+ MOVQ 8(SP), AX // gobuf
+ LEAQ 8(SP), BX // caller's SP
+ MOVQ BX, gobuf_sp(AX)
+ MOVQ 0(SP), BX // caller's PC
+ MOVQ BX, gobuf_pc(AX)
+ get_tls(CX)
+ MOVQ g(CX), BX
+ MOVQ BX, gobuf_g(AX)
+ RET
+
+// void gogo(Gobuf*, uintptr)
+// restore state from Gobuf; longjmp
+TEXT runtime·gogo(SB), 7, $0
+ MOVQ 16(SP), AX // return 2nd arg
+ MOVQ 8(SP), BX // gobuf
+ MOVQ gobuf_g(BX), DX
+ MOVQ 0(DX), CX // make sure g != nil
+ get_tls(CX)
+ MOVQ DX, g(CX)
+ MOVQ gobuf_sp(BX), SP // restore SP
+ MOVQ gobuf_pc(BX), BX
+ JMP BX
+
+// void gogocall(Gobuf*, void (*fn)(void))
+// restore state from Gobuf but then call fn.
+// (call fn, returning to state in Gobuf)
+TEXT runtime·gogocall(SB), 7, $0
+ MOVQ 16(SP), AX // fn
+ MOVQ 8(SP), BX // gobuf
+ MOVQ gobuf_g(BX), DX
+ get_tls(CX)
+ MOVQ DX, g(CX)
+ MOVQ 0(DX), CX // make sure g != nil
+ MOVQ gobuf_sp(BX), SP // restore SP
+ MOVQ gobuf_pc(BX), BX
+ PUSHQ BX
+ JMP AX
+ POPQ BX // not reached
+
+// void mcall(void (*fn)(G*))
+// Switch to m->g0's stack, call fn(g).
+// Fn must never return. It should gogo(&g->sched)
+// to keep running g.
+TEXT runtime·mcall(SB), 7, $0
+ MOVQ fn+0(FP), DI
+
+ get_tls(CX)
+ MOVQ g(CX), AX // save state in g->gobuf
+ MOVQ 0(SP), BX // caller's PC
+ MOVQ BX, (g_sched+gobuf_pc)(AX)
+ LEAQ 8(SP), BX // caller's SP
+ MOVQ BX, (g_sched+gobuf_sp)(AX)
+ MOVQ AX, (g_sched+gobuf_g)(AX)
+
+ // switch to m->g0 & its stack, call fn
+ MOVQ m(CX), BX
+ MOVQ m_g0(BX), SI
+ CMPQ SI, AX // if g == m->g0 call badmcall
+ JNE 2(PC)
+ CALL runtime·badmcall(SB)
+ MOVQ SI, g(CX) // g = m->g0
+ MOVQ (g_sched+gobuf_sp)(SI), SP // sp = m->g0->gobuf.sp
+ PUSHQ AX
+ CALL DI
+ POPQ AX
+ CALL runtime·badmcall2(SB)
+ RET
+
+/*
+ * support for morestack
+ */
+
+// Called during function prolog when more stack is needed.
+// Caller has already done get_tls(CX); MOVQ m(CX), BX.
+TEXT runtime·morestack(SB),7,$0
+ // Cannot grow scheduler stack (m->g0).
+ MOVQ m_g0(BX), SI
+ CMPQ g(CX), SI
+ JNE 2(PC)
+ INT $3
+
+ // Called from f.
+ // Set m->morebuf to f's caller.
+ MOVQ 8(SP), AX // f's caller's PC
+ MOVQ AX, (m_morebuf+gobuf_pc)(BX)
+ LEAQ 16(SP), AX // f's caller's SP
+ MOVQ AX, (m_morebuf+gobuf_sp)(BX)
+ MOVQ AX, m_moreargp(BX)
+ get_tls(CX)
+ MOVQ g(CX), SI
+ MOVQ SI, (m_morebuf+gobuf_g)(BX)
+
+ // Set m->morepc to f's PC.
+ MOVQ 0(SP), AX
+ MOVQ AX, m_morepc(BX)
+
+ // Call newstack on m->g0's stack.
+ MOVQ m_g0(BX), BP
+ MOVQ BP, g(CX)
+ MOVQ (g_sched+gobuf_sp)(BP), SP
+ CALL runtime·newstack(SB)
+ MOVQ $0, 0x1003 // crash if newstack returns
+ RET
+
+// Called from reflection library. Mimics morestack,
+// reuses stack growth code to create a frame
+// with the desired args running the desired function.
+//
+// func call(fn *byte, arg *byte, argsize uint32).
+TEXT reflect·call(SB), 7, $0
+ get_tls(CX)
+ MOVQ m(CX), BX
+
+ // Save our caller's state as the PC and SP to
+ // restore when returning from f.
+ MOVQ 0(SP), AX // our caller's PC
+ MOVQ AX, (m_morebuf+gobuf_pc)(BX)
+ LEAQ 8(SP), AX // our caller's SP
+ MOVQ AX, (m_morebuf+gobuf_sp)(BX)
+ MOVQ g(CX), AX
+ MOVQ AX, (m_morebuf+gobuf_g)(BX)
+
+ // Set up morestack arguments to call f on a new stack.
+ // We set f's frame size to 1, as a hint to newstack
+ // that this is a call from reflect·call.
+ // If it turns out that f needs a larger frame than
+ // the default stack, f's usual stack growth prolog will
+ // allocate a new segment (and recopy the arguments).
+ MOVQ 8(SP), AX // fn
+ MOVQ 16(SP), DX // arg frame
+ MOVL 24(SP), CX // arg size
+
+ MOVQ AX, m_morepc(BX) // f's PC
+ MOVQ DX, m_moreargp(BX) // argument frame pointer
+ MOVL CX, m_moreargsize(BX) // f's argument size
+ MOVL $1, m_moreframesize(BX) // f's frame size
+
+ // Call newstack on m->g0's stack.
+ MOVQ m_g0(BX), BP
+ get_tls(CX)
+ MOVQ BP, g(CX)
+ MOVQ (g_sched+gobuf_sp)(BP), SP
+ CALL runtime·newstack(SB)
+ MOVQ $0, 0x1103 // crash if newstack returns
+ RET
+
+// Return point when leaving stack.
+TEXT runtime·lessstack(SB), 7, $0
+ // Save return value in m->cret
+ get_tls(CX)
+ MOVQ m(CX), BX
+ MOVQ AX, m_cret(BX)
+
+ // Call oldstack on m->g0's stack.
+ MOVQ m_g0(BX), BP
+ MOVQ BP, g(CX)
+ MOVQ (g_sched+gobuf_sp)(BP), SP
+ CALL runtime·oldstack(SB)
+ MOVQ $0, 0x1004 // crash if oldstack returns
+ RET
+
+// morestack trampolines
+TEXT runtime·morestack00(SB),7,$0
+ get_tls(CX)
+ MOVQ m(CX), BX
+ MOVQ $0, AX
+ MOVQ AX, m_moreframesize(BX)
+ MOVQ $runtime·morestack(SB), AX
+ JMP AX
+
+TEXT runtime·morestack01(SB),7,$0
+ get_tls(CX)
+ MOVQ m(CX), BX
+ SHLQ $32, AX
+ MOVQ AX, m_moreframesize(BX)
+ MOVQ $runtime·morestack(SB), AX
+ JMP AX
+
+TEXT runtime·morestack10(SB),7,$0
+ get_tls(CX)
+ MOVQ m(CX), BX
+ MOVLQZX AX, AX
+ MOVQ AX, m_moreframesize(BX)
+ MOVQ $runtime·morestack(SB), AX
+ JMP AX
+
+TEXT runtime·morestack11(SB),7,$0
+ get_tls(CX)
+ MOVQ m(CX), BX
+ MOVQ AX, m_moreframesize(BX)
+ MOVQ $runtime·morestack(SB), AX
+ JMP AX
+
+// subcases of morestack01
+// with const of 8,16,...48
+TEXT runtime·morestack8(SB),7,$0
+ PUSHQ $1
+ MOVQ $morestack<>(SB), AX
+ JMP AX
+
+TEXT runtime·morestack16(SB),7,$0
+ PUSHQ $2
+ MOVQ $morestack<>(SB), AX
+ JMP AX
+
+TEXT runtime·morestack24(SB),7,$0
+ PUSHQ $3
+ MOVQ $morestack<>(SB), AX
+ JMP AX
+
+TEXT runtime·morestack32(SB),7,$0
+ PUSHQ $4
+ MOVQ $morestack<>(SB), AX
+ JMP AX
+
+TEXT runtime·morestack40(SB),7,$0
+ PUSHQ $5
+ MOVQ $morestack<>(SB), AX
+ JMP AX
+
+TEXT runtime·morestack48(SB),7,$0
+ PUSHQ $6
+ MOVQ $morestack<>(SB), AX
+ JMP AX
+
+TEXT morestack<>(SB),7,$0
+ get_tls(CX)
+ MOVQ m(CX), BX
+ POPQ AX
+ SHLQ $35, AX
+ MOVQ AX, m_moreframesize(BX)
+ MOVQ $runtime·morestack(SB), AX
+ JMP AX
+
+// bool cas(int32 *val, int32 old, int32 new)
+// Atomically:
+// if(*val == old){
+// *val = new;
+// return 1;
+// } else
+// return 0;
+TEXT runtime·cas(SB), 7, $0
+ MOVQ 8(SP), BX
+ MOVL 16(SP), AX
+ MOVL 20(SP), CX
+ LOCK
+ CMPXCHGL CX, 0(BX)
+ JZ 3(PC)
+ MOVL $0, AX
+ RET
+ MOVL $1, AX
+ RET
+
+// bool casp(void **val, void *old, void *new)
+// Atomically:
+// if(*val == old){
+// *val = new;
+// return 1;
+// } else
+// return 0;
+TEXT runtime·casp(SB), 7, $0
+ MOVQ 8(SP), BX
+ MOVQ 16(SP), AX
+ MOVQ 24(SP), CX
+ LOCK
+ CMPXCHGQ CX, 0(BX)
+ JZ 3(PC)
+ MOVL $0, AX
+ RET
+ MOVL $1, AX
+ RET
+
+// uint32 xadd(uint32 volatile *val, int32 delta)
+// Atomically:
+// *val += delta;
+// return *val;
+TEXT runtime·xadd(SB), 7, $0
+ MOVQ 8(SP), BX
+ MOVL 16(SP), AX
+ MOVL AX, CX
+ LOCK
+ XADDL AX, 0(BX)
+ ADDL CX, AX
+ RET
+
+TEXT runtime·xchg(SB), 7, $0
+ MOVQ 8(SP), BX
+ MOVL 16(SP), AX
+ XCHGL AX, 0(BX)
+ RET
+
+TEXT runtime·procyield(SB),7,$0
+ MOVL 8(SP), AX
+again:
+ PAUSE
+ SUBL $1, AX
+ JNZ again
+ RET
+
+TEXT runtime·atomicstorep(SB), 7, $0
+ MOVQ 8(SP), BX
+ MOVQ 16(SP), AX
+ XCHGQ AX, 0(BX)
+ RET
+
+TEXT runtime·atomicstore(SB), 7, $0
+ MOVQ 8(SP), BX
+ MOVL 16(SP), AX
+ XCHGL AX, 0(BX)
+ RET
+
+// void jmpdefer(fn, sp);
+// called from deferreturn.
+// 1. pop the caller
+// 2. sub 5 bytes from the callers return
+// 3. jmp to the argument
+TEXT runtime·jmpdefer(SB), 7, $0
+ MOVQ 8(SP), AX // fn
+ MOVQ 16(SP), BX // caller sp
+ LEAQ -8(BX), SP // caller sp after CALL
+ SUBQ $5, (SP) // return to CALL again
+ JMP AX // but first run the deferred function
+
+// Dummy function to use in saved gobuf.PC,
+// to match SP pointing at a return address.
+// The gobuf.PC is unused by the contortions here
+// but setting it to return will make the traceback code work.
+TEXT return<>(SB),7,$0
+ RET
+
+// asmcgocall(void(*fn)(void*), void *arg)
+// Call fn(arg) on the scheduler stack,
+// aligned appropriately for the gcc ABI.
+// See cgocall.c for more details.
+TEXT runtime·asmcgocall(SB),7,$0
+ MOVQ fn+0(FP), AX
+ MOVQ arg+8(FP), BX
+ MOVQ SP, DX
+
+ // Figure out if we need to switch to m->g0 stack.
+ // We get called to create new OS threads too, and those
+ // come in on the m->g0 stack already.
+ get_tls(CX)
+ MOVQ m(CX), BP
+ MOVQ m_g0(BP), SI
+ MOVQ g(CX), DI
+ CMPQ SI, DI
+ JEQ 6(PC)
+ MOVQ SP, (g_sched+gobuf_sp)(DI)
+ MOVQ $return<>(SB), (g_sched+gobuf_pc)(DI)
+ MOVQ DI, (g_sched+gobuf_g)(DI)
+ MOVQ SI, g(CX)
+ MOVQ (g_sched+gobuf_sp)(SI), SP
+
+ // Now on a scheduling stack (a pthread-created stack).
+ SUBQ $32, SP
+ ANDQ $~15, SP // alignment for gcc ABI
+ MOVQ DI, 16(SP) // save g
+ MOVQ DX, 8(SP) // save SP
+ MOVQ BX, DI // DI = first argument in AMD64 ABI
+ MOVQ BX, CX // CX = first argument in Win64
+ CALL AX
+
+ // Restore registers, g, stack pointer.
+ get_tls(CX)
+ MOVQ 16(SP), DI
+ MOVQ DI, g(CX)
+ MOVQ 8(SP), SP
+ RET
+
+// cgocallback(void (*fn)(void*), void *frame, uintptr framesize)
+// See cgocall.c for more details.
+TEXT runtime·cgocallback(SB),7,$24
+ MOVQ fn+0(FP), AX
+ MOVQ frame+8(FP), BX
+ MOVQ framesize+16(FP), DX
+
+ // Save current m->g0->sched.sp on stack and then set it to SP.
+ get_tls(CX)
+ MOVQ m(CX), BP
+ MOVQ m_g0(BP), SI
+ PUSHQ (g_sched+gobuf_sp)(SI)
+ MOVQ SP, (g_sched+gobuf_sp)(SI)
+
+ // Switch to m->curg stack and call runtime.cgocallback
+ // with the three arguments. Because we are taking over
+ // the execution of m->curg but *not* resuming what had
+ // been running, we need to save that information (m->curg->gobuf)
+ // so that we can restore it when we're done.
+ // We can restore m->curg->gobuf.sp easily, because calling
+ // runtime.cgocallback leaves SP unchanged upon return.
+ // To save m->curg->gobuf.pc, we push it onto the stack.
+ // This has the added benefit that it looks to the traceback
+ // routine like cgocallback is going to return to that
+ // PC (because we defined cgocallback to have
+ // a frame size of 24, the same amount that we use below),
+ // so that the traceback will seamlessly trace back into
+ // the earlier calls.
+ MOVQ m_curg(BP), SI
+ MOVQ SI, g(CX)
+ MOVQ (g_sched+gobuf_sp)(SI), DI // prepare stack as DI
+
+ // Push gobuf.pc
+ MOVQ (g_sched+gobuf_pc)(SI), BP
+ SUBQ $8, DI
+ MOVQ BP, 0(DI)
+
+ // Push arguments to cgocallbackg.
+ // Frame size here must match the frame size above
+ // to trick traceback routines into doing the right thing.
+ SUBQ $24, DI
+ MOVQ AX, 0(DI)
+ MOVQ BX, 8(DI)
+ MOVQ DX, 16(DI)
+
+ // Switch stack and make the call.
+ MOVQ DI, SP
+ CALL runtime·cgocallbackg(SB)
+
+ // Restore g->gobuf (== m->curg->gobuf) from saved values.
+ get_tls(CX)
+ MOVQ g(CX), SI
+ MOVQ 24(SP), BP
+ MOVQ BP, (g_sched+gobuf_pc)(SI)
+ LEAQ (24+8)(SP), DI
+ MOVQ DI, (g_sched+gobuf_sp)(SI)
+
+ // Switch back to m->g0's stack and restore m->g0->sched.sp.
+ // (Unlike m->curg, the g0 goroutine never uses sched.pc,
+ // so we do not have to restore it.)
+ MOVQ m(CX), BP
+ MOVQ m_g0(BP), SI
+ MOVQ SI, g(CX)
+ MOVQ (g_sched+gobuf_sp)(SI), SP
+ POPQ (g_sched+gobuf_sp)(SI)
+
+ // Done!
+ RET
+
+// check that SP is in range [g->stackbase, g->stackguard)
+TEXT runtime·stackcheck(SB), 7, $0
+ get_tls(CX)
+ MOVQ g(CX), AX
+ CMPQ g_stackbase(AX), SP
+ JHI 2(PC)
+ INT $3
+ CMPQ SP, g_stackguard(AX)
+ JHI 2(PC)
+ INT $3
+ RET
+
+TEXT runtime·memclr(SB),7,$0
+ MOVQ 8(SP), DI // arg 1 addr
+ MOVQ 16(SP), CX // arg 2 count
+ MOVQ CX, BX
+ ANDQ $7, BX
+ SHRQ $3, CX
+ MOVQ $0, AX
+ CLD
+ REP
+ STOSQ
+ MOVQ BX, CX
+ REP
+ STOSB
+ RET
+
+TEXT runtime·getcallerpc(SB),7,$0
+ MOVQ x+0(FP),AX // addr of first arg
+ MOVQ -8(AX),AX // get calling pc
+ RET
+
+TEXT runtime·setcallerpc(SB),7,$0
+ MOVQ x+0(FP),AX // addr of first arg
+ MOVQ x+8(FP), BX
+ MOVQ BX, -8(AX) // set calling pc
+ RET
+
+TEXT runtime·getcallersp(SB),7,$0
+ MOVQ sp+0(FP), AX
+ RET
+
+GLOBL runtime·tls0(SB), $64
diff --git a/src/pkg/runtime/amd64/atomic.c b/src/pkg/runtime/amd64/atomic.c
new file mode 100644
index 000000000..a4f2a114f
--- /dev/null
+++ b/src/pkg/runtime/amd64/atomic.c
@@ -0,0 +1,19 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "runtime.h"
+
+#pragma textflag 7
+uint32
+runtime·atomicload(uint32 volatile* addr)
+{
+ return *addr;
+}
+
+#pragma textflag 7
+void*
+runtime·atomicloadp(void* volatile* addr)
+{
+ return *addr;
+}
diff --git a/src/pkg/runtime/amd64/closure.c b/src/pkg/runtime/amd64/closure.c
new file mode 100644
index 000000000..481b4a888
--- /dev/null
+++ b/src/pkg/runtime/amd64/closure.c
@@ -0,0 +1,123 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "runtime.h"
+
+#pragma textflag 7
+// func closure(siz int32,
+// fn func(arg0, arg1, arg2 *ptr, callerpc uintptr, xxx) yyy,
+// arg0, arg1, arg2 *ptr) (func(xxx) yyy)
+void
+runtime·closure(int32 siz, byte *fn, byte *arg0)
+{
+ byte *p, *q, **ret;
+ int32 i, n;
+ int64 pcrel;
+
+ if(siz < 0 || siz%8 != 0)
+ runtime·throw("bad closure size");
+
+ ret = (byte**)((byte*)&arg0 + siz);
+
+ if(siz > 100) {
+ // TODO(rsc): implement stack growth preamble?
+ runtime·throw("closure too big");
+ }
+
+ // compute size of new fn.
+ // must match code laid out below.
+ n = 7+10+3; // SUBQ MOVQ MOVQ
+ if(siz <= 4*8)
+ n += 2*siz/8; // MOVSQ MOVSQ...
+ else
+ n += 7+3; // MOVQ REP MOVSQ
+ n += 12; // CALL worst case; sometimes only 5
+ n += 7+1; // ADDQ RET
+
+ // store args aligned after code, so gc can find them.
+ n += siz;
+ if(n%8)
+ n += 8 - n%8;
+
+ p = runtime·mal(n);
+ *ret = p;
+ q = p + n - siz;
+
+ if(siz > 0) {
+ runtime·memmove(q, (byte*)&arg0, siz);
+
+ // SUBQ $siz, SP
+ *p++ = 0x48;
+ *p++ = 0x81;
+ *p++ = 0xec;
+ *(uint32*)p = siz;
+ p += 4;
+
+ // MOVQ $q, SI
+ *p++ = 0x48;
+ *p++ = 0xbe;
+ *(byte**)p = q;
+ p += 8;
+
+ // MOVQ SP, DI
+ *p++ = 0x48;
+ *p++ = 0x89;
+ *p++ = 0xe7;
+
+ if(siz <= 4*8) {
+ for(i=0; i<siz; i+=8) {
+ // MOVSQ
+ *p++ = 0x48;
+ *p++ = 0xa5;
+ }
+ } else {
+ // MOVQ $(siz/8), CX [32-bit immediate siz/8]
+ *p++ = 0x48;
+ *p++ = 0xc7;
+ *p++ = 0xc1;
+ *(uint32*)p = siz/8;
+ p += 4;
+
+ // REP; MOVSQ
+ *p++ = 0xf3;
+ *p++ = 0x48;
+ *p++ = 0xa5;
+ }
+ }
+
+ // call fn
+ pcrel = fn - (p+5);
+ if((int32)pcrel == pcrel) {
+ // can use direct call with pc-relative offset
+ // CALL fn
+ *p++ = 0xe8;
+ *(int32*)p = pcrel;
+ p += 4;
+ } else {
+ // MOVQ $fn, CX [64-bit immediate fn]
+ *p++ = 0x48;
+ *p++ = 0xb9;
+ *(byte**)p = fn;
+ p += 8;
+
+ // CALL *CX
+ *p++ = 0xff;
+ *p++ = 0xd1;
+ }
+
+ // ADDQ $siz, SP
+ *p++ = 0x48;
+ *p++ = 0x81;
+ *p++ = 0xc4;
+ *(uint32*)p = siz;
+ p += 4;
+
+ // RET
+ *p++ = 0xc3;
+
+ if(p > q)
+ runtime·throw("bad math in sys.closure");
+}
+
+
diff --git a/src/pkg/runtime/amd64/memmove.s b/src/pkg/runtime/amd64/memmove.s
new file mode 100644
index 000000000..e78be8145
--- /dev/null
+++ b/src/pkg/runtime/amd64/memmove.s
@@ -0,0 +1,88 @@
+// Derived from Inferno's libkern/memmove-386.s (adapted for amd64)
+// http://code.google.com/p/inferno-os/source/browse/libkern/memmove-386.s
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Revisions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com). All rights reserved.
+// Portions Copyright 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+TEXT runtime·memmove(SB), 7, $0
+
+ MOVQ to+0(FP), DI
+ MOVQ fr+8(FP), SI
+ MOVLQSX n+16(FP), BX
+
+/*
+ * check and set for backwards
+ */
+ CMPQ SI, DI
+ JLS back
+
+/*
+ * forward copy loop
+ */
+forward:
+ MOVQ BX, CX
+ SHRQ $3, CX
+ ANDQ $7, BX
+
+ REP; MOVSQ
+ MOVQ BX, CX
+ REP; MOVSB
+
+ MOVQ to+0(FP),AX
+ RET
+back:
+/*
+ * check overlap
+ */
+ MOVQ SI, CX
+ ADDQ BX, CX
+ CMPQ CX, DI
+ JLS forward
+
+/*
+ * whole thing backwards has
+ * adjusted addresses
+ */
+ ADDQ BX, DI
+ ADDQ BX, SI
+ STD
+
+/*
+ * copy
+ */
+ MOVQ BX, CX
+ SHRQ $3, CX
+ ANDQ $7, BX
+
+ SUBQ $8, DI
+ SUBQ $8, SI
+ REP; MOVSQ
+
+ ADDQ $7, DI
+ ADDQ $7, SI
+ MOVQ BX, CX
+ REP; MOVSB
+
+ CLD
+ MOVQ to+0(FP),AX
+ RET
+
diff --git a/src/pkg/runtime/amd64/traceback.c b/src/pkg/runtime/amd64/traceback.c
new file mode 100644
index 000000000..3e85d36bd
--- /dev/null
+++ b/src/pkg/runtime/amd64/traceback.c
@@ -0,0 +1,295 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "runtime.h"
+#include "malloc.h"
+
+static uintptr isclosureentry(uintptr);
+void runtime·deferproc(void);
+void runtime·newproc(void);
+void runtime·newstack(void);
+void runtime·morestack(void);
+void runtime·sigpanic(void);
+
+// This code is also used for the 386 tracebacks.
+// Use uintptr for an appropriate word-sized integer.
+
+// Generic traceback. Handles runtime stack prints (pcbuf == nil)
+// as well as the runtime.Callers function (pcbuf != nil).
+// A little clunky to merge the two but avoids duplicating
+// the code and all its subtlety.
+int32
+runtime·gentraceback(byte *pc0, byte *sp, byte *lr0, G *g, int32 skip, uintptr *pcbuf, int32 max)
+{
+ byte *p;
+ int32 i, n, iter, sawnewstack;
+ uintptr pc, lr, tracepc;
+ byte *fp;
+ Stktop *stk;
+ Func *f;
+ bool waspanic;
+
+ USED(lr0);
+ pc = (uintptr)pc0;
+ lr = 0;
+ fp = nil;
+ waspanic = false;
+
+ // If the PC is goexit, the goroutine hasn't started yet.
+ if(pc0 == g->sched.pc && sp == g->sched.sp && pc0 == (byte*)runtime·goexit) {
+ fp = sp;
+ lr = pc;
+ pc = (uintptr)g->entry;
+ }
+
+ // If the PC is zero, it's likely a nil function call.
+ // Start in the caller's frame.
+ if(pc == 0) {
+ pc = lr;
+ lr = 0;
+ }
+
+ // If the PC is zero, it's likely a nil function call.
+ // Start in the caller's frame.
+ if(pc == 0) {
+ pc = *(uintptr*)sp;
+ sp += sizeof(uintptr);
+ }
+
+ n = 0;
+ sawnewstack = 0;
+ stk = (Stktop*)g->stackbase;
+ for(iter = 0; iter < 100 && n < max; iter++) { // iter avoids looping forever
+ // Typically:
+ // pc is the PC of the running function.
+ // sp is the stack pointer at that program counter.
+ // fp is the frame pointer (caller's stack pointer) at that program counter, or nil if unknown.
+ // stk is the stack containing sp.
+ // The caller's program counter is lr, unless lr is zero, in which case it is *(uintptr*)sp.
+
+ if(pc == (uintptr)runtime·lessstack) {
+ // Hit top of stack segment. Unwind to next segment.
+ pc = (uintptr)stk->gobuf.pc;
+ sp = stk->gobuf.sp;
+ lr = 0;
+ fp = nil;
+ if(pcbuf == nil)
+ runtime·printf("----- stack segment boundary -----\n");
+ stk = (Stktop*)stk->stackbase;
+ continue;
+ }
+ if(pc <= 0x1000 || (f = runtime·findfunc(pc)) == nil) {
+ // Dangerous, but worthwhile: see if this is a closure:
+ // ADDQ $wwxxyyzz, SP; RET
+ // [48] 81 c4 zz yy xx ww c3
+ // The 0x48 byte is only on amd64.
+ p = (byte*)pc;
+ // We check p < p+8 to avoid wrapping and faulting if we lose track.
+ if(runtime·mheap.arena_start < p && p < p+8 && p+8 < runtime·mheap.arena_used && // pointer in allocated memory
+ (sizeof(uintptr) != 8 || *p++ == 0x48) && // skip 0x48 byte on amd64
+ p[0] == 0x81 && p[1] == 0xc4 && p[6] == 0xc3) {
+ sp += *(uint32*)(p+2);
+ pc = *(uintptr*)sp;
+ sp += sizeof(uintptr);
+ lr = 0;
+ fp = nil;
+ continue;
+ }
+
+ // Closure at top of stack, not yet started.
+ if(lr == (uintptr)runtime·goexit && (pc = isclosureentry(pc)) != 0) {
+ fp = sp;
+ continue;
+ }
+
+ // Unknown pc: stop.
+ break;
+ }
+
+ // Found an actual function.
+ if(fp == nil) {
+ fp = sp;
+ if(pc > f->entry && f->frame >= sizeof(uintptr))
+ fp += f->frame - sizeof(uintptr);
+ if(lr == 0)
+ lr = *(uintptr*)fp;
+ fp += sizeof(uintptr);
+ } else if(lr == 0)
+ lr = *(uintptr*)fp;
+
+ if(skip > 0)
+ skip--;
+ else if(pcbuf != nil)
+ pcbuf[n++] = pc;
+ else {
+ // Print during crash.
+ // main+0xf /home/rsc/go/src/runtime/x.go:23
+ // main(0x1, 0x2, 0x3)
+ runtime·printf("%S", f->name);
+ if(pc > f->entry)
+ runtime·printf("+%p", (uintptr)(pc - f->entry));
+ tracepc = pc; // back up to CALL instruction for funcline.
+ if(n > 0 && pc > f->entry && !waspanic)
+ tracepc--;
+ runtime·printf(" %S:%d\n", f->src, runtime·funcline(f, tracepc));
+ runtime·printf("\t%S(", f->name);
+ for(i = 0; i < f->args; i++) {
+ if(i != 0)
+ runtime·prints(", ");
+ runtime·printhex(((uintptr*)fp)[i]);
+ if(i >= 4) {
+ runtime·prints(", ...");
+ break;
+ }
+ }
+ runtime·prints(")\n");
+ n++;
+ }
+
+ waspanic = f->entry == (uintptr)runtime·sigpanic;
+
+ if(f->entry == (uintptr)runtime·deferproc || f->entry == (uintptr)runtime·newproc)
+ fp += 2*sizeof(uintptr);
+
+ if(f->entry == (uintptr)runtime·newstack)
+ sawnewstack = 1;
+
+ if(pcbuf == nil && f->entry == (uintptr)runtime·morestack && g == m->g0 && sawnewstack) {
+ // The fact that we saw newstack means that morestack
+ // has managed to record its information in m, so we can
+ // use it to keep unwinding the stack.
+ runtime·printf("----- morestack called from goroutine %d -----\n", m->curg->goid);
+ pc = (uintptr)m->morepc;
+ sp = m->morebuf.sp - sizeof(void*);
+ lr = (uintptr)m->morebuf.pc;
+ fp = m->morebuf.sp;
+ sawnewstack = 0;
+ g = m->curg;
+ stk = (Stktop*)g->stackbase;
+ continue;
+ }
+
+ if(pcbuf == nil && f->entry == (uintptr)runtime·lessstack && g == m->g0) {
+ // Lessstack is running on scheduler stack. Switch to original goroutine.
+ runtime·printf("----- lessstack called from goroutine %d -----\n", m->curg->goid);
+ g = m->curg;
+ stk = (Stktop*)g->stackbase;
+ sp = stk->gobuf.sp;
+ pc = (uintptr)stk->gobuf.pc;
+ fp = nil;
+ lr = 0;
+ continue;
+ }
+
+ // Unwind to next frame.
+ pc = lr;
+ lr = 0;
+ sp = fp;
+ fp = nil;
+ }
+
+ if(pcbuf == nil && (pc = g->gopc) != 0 && (f = runtime·findfunc(pc)) != nil) {
+ runtime·printf("----- goroutine created by -----\n%S", f->name);
+ if(pc > f->entry)
+ runtime·printf("+%p", (uintptr)(pc - f->entry));
+ tracepc = pc; // back up to CALL instruction for funcline.
+ if(n > 0 && pc > f->entry)
+ tracepc--;
+ runtime·printf(" %S:%d\n", f->src, runtime·funcline(f, tracepc));
+ }
+
+ return n;
+}
+
+void
+runtime·traceback(byte *pc0, byte *sp, byte*, G *g)
+{
+ runtime·gentraceback(pc0, sp, nil, g, 0, nil, 100);
+}
+
+int32
+runtime·callers(int32 skip, uintptr *pcbuf, int32 m)
+{
+ byte *pc, *sp;
+
+ // our caller's pc, sp.
+ sp = (byte*)&skip;
+ pc = runtime·getcallerpc(&skip);
+
+ return runtime·gentraceback(pc, sp, nil, g, skip, pcbuf, m);
+}
+
+static uintptr
+isclosureentry(uintptr pc)
+{
+ byte *p;
+ int32 i, siz;
+
+ p = (byte*)pc;
+ if(p < runtime·mheap.arena_start || p+32 > runtime·mheap.arena_used)
+ return 0;
+
+ if(*p == 0xe8) {
+ // CALL fn
+ return pc+5+*(int32*)(p+1);
+ }
+
+ if(sizeof(uintptr) == 8 && p[0] == 0x48 && p[1] == 0xb9 && p[10] == 0xff && p[11] == 0xd1) {
+ // MOVQ $fn, CX; CALL *CX
+ return *(uintptr*)(p+2);
+ }
+
+ // SUBQ $siz, SP
+ if((sizeof(uintptr) == 8 && *p++ != 0x48) || *p++ != 0x81 || *p++ != 0xec)
+ return 0;
+ siz = *(uint32*)p;
+ p += 4;
+
+ // MOVQ $q, SI
+ if((sizeof(uintptr) == 8 && *p++ != 0x48) || *p++ != 0xbe)
+ return 0;
+ p += sizeof(uintptr);
+
+ // MOVQ SP, DI
+ if((sizeof(uintptr) == 8 && *p++ != 0x48) || *p++ != 0x89 || *p++ != 0xe7)
+ return 0;
+
+ // CLD on 32-bit
+ if(sizeof(uintptr) == 4 && *p++ != 0xfc)
+ return 0;
+
+ if(siz <= 4*sizeof(uintptr)) {
+ // MOVSQ...
+ for(i=0; i<siz; i+=sizeof(uintptr))
+ if((sizeof(uintptr) == 8 && *p++ != 0x48) || *p++ != 0xa5)
+ return 0;
+ } else {
+ // MOVQ $(siz/8), CX [32-bit immediate siz/8]
+ if((sizeof(uintptr) == 8 && *p++ != 0x48) || *p++ != 0xc7 || *p++ != 0xc1)
+ return 0;
+ p += 4;
+
+ // REP MOVSQ
+ if(*p++ != 0xf3 || (sizeof(uintptr) == 8 && *p++ != 0x48) || *p++ != 0xa5)
+ return 0;
+ }
+
+ // CALL fn
+ if(*p == 0xe8) {
+ p++;
+ return (uintptr)p+4 + *(int32*)p;
+ }
+
+ // MOVQ $fn, CX; CALL *CX
+ if(sizeof(uintptr) != 8 || *p++ != 0x48 || *p++ != 0xb9)
+ return 0;
+
+ pc = *(uintptr*)p;
+ p += 8;
+
+ if(*p++ != 0xff || *p != 0xd1)
+ return 0;
+
+ return pc;
+}
diff --git a/src/pkg/runtime/append_test.go b/src/pkg/runtime/append_test.go
new file mode 100644
index 000000000..b8552224e
--- /dev/null
+++ b/src/pkg/runtime/append_test.go
@@ -0,0 +1,52 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+package runtime_test
+
+import "testing"
+
+const N = 20
+
+func BenchmarkAppend(b *testing.B) {
+ b.StopTimer()
+ x := make([]int, 0, N)
+ b.StartTimer()
+ for i := 0; i < b.N; i++ {
+ x = x[0:0]
+ for j := 0; j < N; j++ {
+ x = append(x, j)
+ }
+ }
+}
+
+func BenchmarkAppendSpecialCase(b *testing.B) {
+ b.StopTimer()
+ x := make([]int, 0, N)
+ b.StartTimer()
+ for i := 0; i < b.N; i++ {
+ x = x[0:0]
+ for j := 0; j < N; j++ {
+ if len(x) < cap(x) {
+ x = x[:len(x)+1]
+ x[len(x)-1] = j
+ } else {
+ x = append(x, j)
+ }
+ }
+ }
+}
+
+var x []int
+
+func f() int {
+ x[:1][0] = 3
+ return 2
+}
+
+func TestSideEffectOrder(t *testing.T) {
+ x = make([]int, 0, 10)
+ x = append(x, 1, f())
+ if x[0] != 1 || x[1] != 2 {
+ t.Error("append failed: ", x[0], x[1])
+ }
+}
diff --git a/src/pkg/runtime/arm/arch.h b/src/pkg/runtime/arm/arch.h
new file mode 100644
index 000000000..3ddb626dd
--- /dev/null
+++ b/src/pkg/runtime/arm/arch.h
@@ -0,0 +1,3 @@
+enum {
+ thechar = '5'
+};
diff --git a/src/pkg/runtime/arm/asm.s b/src/pkg/runtime/arm/asm.s
new file mode 100644
index 000000000..63153658f
--- /dev/null
+++ b/src/pkg/runtime/arm/asm.s
@@ -0,0 +1,316 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "arm/asm.h"
+
+// using frame size $-4 means do not save LR on stack.
+TEXT _rt0_arm(SB),7,$-4
+ MOVW $0xcafebabe, R12
+
+ // copy arguments forward on an even stack
+ // use R13 instead of SP to avoid linker rewriting the offsets
+ MOVW 0(R13), R0 // argc
+ MOVW $4(R13), R1 // argv
+ SUB $64, R13 // plenty of scratch
+ AND $~7, R13
+ MOVW R0, 60(R13) // save argc, argv away
+ MOVW R1, 64(R13)
+
+ // set up m and g registers
+ // g is R10, m is R9
+ MOVW $runtime·g0(SB), g
+ MOVW $runtime·m0(SB), m
+
+ // save m->g0 = g0
+ MOVW g, m_g0(m)
+
+ // create istack out of the OS stack
+ MOVW $(-8192+104)(R13), R0
+ MOVW R0, g_stackguard(g) // (w 104b guard)
+ MOVW R13, g_stackbase(g)
+ BL runtime·emptyfunc(SB) // fault if stack check is wrong
+
+ BL runtime·check(SB)
+
+ // saved argc, argv
+ MOVW 60(R13), R0
+ MOVW R0, 4(R13)
+ MOVW 64(R13), R1
+ MOVW R1, 8(R13)
+ BL runtime·args(SB)
+ BL runtime·osinit(SB)
+ BL runtime·schedinit(SB)
+
+ // create a new goroutine to start program
+ MOVW $runtime·mainstart(SB), R0
+ MOVW.W R0, -4(R13)
+ MOVW $8, R0
+ MOVW.W R0, -4(R13)
+ MOVW $0, R0
+ MOVW.W R0, -4(R13) // push $0 as guard
+ BL runtime·newproc(SB)
+ MOVW $12(R13), R13 // pop args and LR
+
+ // start this M
+ BL runtime·mstart(SB)
+
+ MOVW $1234, R0
+ MOVW $1000, R1
+ MOVW R0, (R1) // fail hard
+ B runtime·_dep_dummy(SB) // Never reached
+
+
+TEXT runtime·mainstart(SB),7,$4
+ BL main·init(SB)
+ BL runtime·initdone(SB)
+ EOR R0, R0
+ MOVW R0, 0(R13)
+ BL main·main(SB)
+ MOVW $0, R0
+ MOVW R0, 4(SP)
+ BL runtime·exit(SB)
+ MOVW $1234, R0
+ MOVW $1001, R1
+ MOVW R0, (R1) // fail hard
+ RET
+
+// TODO(kaib): remove these once i actually understand how the linker removes symbols
+// pull in dummy dependencies
+TEXT runtime·_dep_dummy(SB),7,$0
+ BL _div(SB)
+ BL _divu(SB)
+ BL _mod(SB)
+ BL _modu(SB)
+ BL _modu(SB)
+ BL _sfloat(SB)
+
+TEXT runtime·breakpoint(SB),7,$0
+ // no breakpoint yet; let program exit
+ RET
+
+/*
+ * go-routine
+ */
+
+// void gosave(Gobuf*)
+// save state in Gobuf; setjmp
+TEXT runtime·gosave(SB), 7, $-4
+ MOVW 0(FP), R0 // gobuf
+ MOVW SP, gobuf_sp(R0)
+ MOVW LR, gobuf_pc(R0)
+ MOVW g, gobuf_g(R0)
+ RET
+
+// void gogo(Gobuf*, uintptr)
+// restore state from Gobuf; longjmp
+TEXT runtime·gogo(SB), 7, $-4
+ MOVW 0(FP), R1 // gobuf
+ MOVW 4(FP), R0 // return 2nd arg
+ MOVW gobuf_g(R1), g
+ MOVW 0(g), R2 // make sure g != nil
+ MOVW gobuf_sp(R1), SP // restore SP
+ MOVW gobuf_pc(R1), PC
+
+// void gogocall(Gobuf*, void (*fn)(void))
+// restore state from Gobuf but then call fn.
+// (call fn, returning to state in Gobuf)
+// using frame size $-4 means do not save LR on stack.
+TEXT runtime·gogocall(SB), 7, $-4
+ MOVW 0(FP), R0 // gobuf
+ MOVW 4(FP), R1 // fn
+ MOVW 8(FP), R2 // fp offset
+ MOVW gobuf_g(R0), g
+ MOVW 0(g), R3 // make sure g != nil
+ MOVW gobuf_sp(R0), SP // restore SP
+ MOVW gobuf_pc(R0), LR
+ MOVW R1, PC
+
+// void mcall(void (*fn)(G*))
+// Switch to m->g0's stack, call fn(g).
+// Fn must never return. It should gogo(&g->sched)
+// to keep running g.
+TEXT runtime·mcall(SB), 7, $-4
+ MOVW fn+0(FP), R0
+
+ // Save caller state in g->gobuf.
+ MOVW SP, (g_sched+gobuf_sp)(g)
+ MOVW LR, (g_sched+gobuf_pc)(g)
+ MOVW g, (g_sched+gobuf_g)(g)
+
+ // Switch to m->g0 & its stack, call fn.
+ MOVW g, R1
+ MOVW m_g0(m), g
+ CMP g, R1
+ BL.EQ runtime·badmcall(SB)
+ MOVW (g_sched+gobuf_sp)(g), SP
+ SUB $8, SP
+ MOVW R1, 4(SP)
+ BL (R0)
+ BL runtime·badmcall2(SB)
+ RET
+
+/*
+ * support for morestack
+ */
+
+// Called during function prolog when more stack is needed.
+// R1 frame size
+// R2 arg size
+// R3 prolog's LR
+// NB. we do not save R0 because we've forced 5c to pass all arguments
+// on the stack.
+// using frame size $-4 means do not save LR on stack.
+TEXT runtime·morestack(SB),7,$-4
+ // Cannot grow scheduler stack (m->g0).
+ MOVW m_g0(m), R4
+ CMP g, R4
+ BL.EQ runtime·abort(SB)
+
+ // Save in m.
+ MOVW R1, m_moreframesize(m)
+ MOVW R2, m_moreargsize(m)
+
+ // Called from f.
+ // Set m->morebuf to f's caller.
+ MOVW R3, (m_morebuf+gobuf_pc)(m) // f's caller's PC
+ MOVW SP, (m_morebuf+gobuf_sp)(m) // f's caller's SP
+ MOVW $4(SP), R3 // f's argument pointer
+ MOVW R3, m_moreargp(m)
+ MOVW g, (m_morebuf+gobuf_g)(m)
+
+ // Set m->morepc to f's PC.
+ MOVW LR, m_morepc(m)
+
+ // Call newstack on m->g0's stack.
+ MOVW m_g0(m), g
+ MOVW (g_sched+gobuf_sp)(g), SP
+ B runtime·newstack(SB)
+
+// Called from reflection library. Mimics morestack,
+// reuses stack growth code to create a frame
+// with the desired args running the desired function.
+//
+// func call(fn *byte, arg *byte, argsize uint32).
+TEXT reflect·call(SB), 7, $-4
+ // Save our caller's state as the PC and SP to
+ // restore when returning from f.
+ MOVW LR, (m_morebuf+gobuf_pc)(m) // our caller's PC
+ MOVW SP, (m_morebuf+gobuf_sp)(m) // our caller's SP
+ MOVW g, (m_morebuf+gobuf_g)(m)
+
+ // Set up morestack arguments to call f on a new stack.
+ // We set f's frame size to 1, as a hint to newstack
+ // that this is a call from reflect·call.
+ // If it turns out that f needs a larger frame than
+ // the default stack, f's usual stack growth prolog will
+ // allocate a new segment (and recopy the arguments).
+ MOVW 4(SP), R0 // fn
+ MOVW 8(SP), R1 // arg frame
+ MOVW 12(SP), R2 // arg size
+
+ MOVW R0, m_morepc(m) // f's PC
+ MOVW R1, m_moreargp(m) // f's argument pointer
+ MOVW R2, m_moreargsize(m) // f's argument size
+ MOVW $1, R3
+ MOVW R3, m_moreframesize(m) // f's frame size
+
+ // Call newstack on m->g0's stack.
+ MOVW m_g0(m), g
+ MOVW (g_sched+gobuf_sp)(g), SP
+ B runtime·newstack(SB)
+
+// Return point when leaving stack.
+// using frame size $-4 means do not save LR on stack.
+TEXT runtime·lessstack(SB), 7, $-4
+ // Save return value in m->cret
+ MOVW R0, m_cret(m)
+
+ // Call oldstack on m->g0's stack.
+ MOVW m_g0(m), g
+ MOVW (g_sched+gobuf_sp)(g), SP
+ B runtime·oldstack(SB)
+
+// void jmpdefer(fn, sp);
+// called from deferreturn.
+// 1. grab stored LR for caller
+// 2. sub 4 bytes to get back to BL deferreturn
+// 3. B to fn
+TEXT runtime·jmpdefer(SB), 7, $0
+ MOVW 0(SP), LR
+ MOVW $-4(LR), LR // BL deferreturn
+ MOVW fn+0(FP), R0
+ MOVW argp+4(FP), SP
+ MOVW $-4(SP), SP // SP is 4 below argp, due to saved LR
+ B (R0)
+
+TEXT runtime·asmcgocall(SB),7,$0
+ B runtime·cgounimpl(SB)
+
+TEXT runtime·cgocallback(SB),7,$0
+ B runtime·cgounimpl(SB)
+
+TEXT runtime·memclr(SB),7,$20
+ MOVW 0(FP), R0
+ MOVW $0, R1 // c = 0
+ MOVW R1, -16(SP)
+ MOVW 4(FP), R1 // n
+ MOVW R1, -12(SP)
+ MOVW m, -8(SP) // Save m and g
+ MOVW g, -4(SP)
+ BL runtime·memset(SB)
+ MOVW -8(SP), m // Restore m and g, memset clobbers them
+ MOVW -4(SP), g
+ RET
+
+TEXT runtime·getcallerpc(SB),7,$-4
+ MOVW 0(SP), R0
+ RET
+
+TEXT runtime·setcallerpc(SB),7,$-4
+ MOVW x+4(FP), R0
+ MOVW R0, 0(SP)
+ RET
+
+TEXT runtime·getcallersp(SB),7,$-4
+ MOVW 0(FP), R0
+ MOVW $-4(R0), R0
+ RET
+
+TEXT runtime·emptyfunc(SB),0,$0
+ RET
+
+TEXT runtime·abort(SB),7,$-4
+ MOVW $0, R0
+ MOVW (R0), R1
+
+// bool armcas(int32 *val, int32 old, int32 new)
+// Atomically:
+// if(*val == old){
+// *val = new;
+// return 1;
+// }else
+// return 0;
+//
+// To implement runtime·cas in ../$GOOS/arm/sys.s
+// using the native instructions, use:
+//
+// TEXT runtime·cas(SB),7,$0
+// B runtime·armcas(SB)
+//
+TEXT runtime·armcas(SB),7,$0
+ MOVW valptr+0(FP), R1
+ MOVW old+4(FP), R2
+ MOVW new+8(FP), R3
+casl:
+ LDREX (R1), R0
+ CMP R0, R2
+ BNE casfail
+ STREX R3, (R1), R0
+ CMP $0, R0
+ BNE casl
+ MOVW $1, R0
+ RET
+casfail:
+ MOVW $0, R0
+ RET
diff --git a/src/pkg/runtime/arm/atomic.c b/src/pkg/runtime/arm/atomic.c
new file mode 100644
index 000000000..52e4059ae
--- /dev/null
+++ b/src/pkg/runtime/arm/atomic.c
@@ -0,0 +1,83 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "runtime.h"
+
+// Atomic add and return new value.
+#pragma textflag 7
+uint32
+runtime·xadd(uint32 volatile *val, int32 delta)
+{
+ uint32 oval, nval;
+
+ for(;;){
+ oval = *val;
+ nval = oval + delta;
+ if(runtime·cas(val, oval, nval))
+ return nval;
+ }
+}
+
+#pragma textflag 7
+uint32
+runtime·xchg(uint32 volatile* addr, uint32 v)
+{
+ uint32 old;
+
+ for(;;) {
+ old = *addr;
+ if(runtime·cas(addr, old, v))
+ return old;
+ }
+}
+
+#pragma textflag 7
+void
+runtime·procyield(uint32 cnt)
+{
+ uint32 volatile i;
+
+ for(i = 0; i < cnt; i++) {
+ }
+}
+
+#pragma textflag 7
+uint32
+runtime·atomicload(uint32 volatile* addr)
+{
+ return runtime·xadd(addr, 0);
+}
+
+#pragma textflag 7
+void*
+runtime·atomicloadp(void* volatile* addr)
+{
+ return (void*)runtime·xadd((uint32 volatile*)addr, 0);
+}
+
+#pragma textflag 7
+void
+runtime·atomicstorep(void* volatile* addr, void* v)
+{
+ void *old;
+
+ for(;;) {
+ old = *addr;
+ if(runtime·casp(addr, old, v))
+ return;
+ }
+}
+
+#pragma textflag 7
+void
+runtime·atomicstore(uint32 volatile* addr, uint32 v)
+{
+ uint32 old;
+
+ for(;;) {
+ old = *addr;
+ if(runtime·cas(addr, old, v))
+ return;
+ }
+} \ No newline at end of file
diff --git a/src/pkg/runtime/arm/closure.c b/src/pkg/runtime/arm/closure.c
new file mode 100644
index 000000000..119e91b61
--- /dev/null
+++ b/src/pkg/runtime/arm/closure.c
@@ -0,0 +1,129 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "runtime.h"
+
+/*
+ There are two bits of magic:
+ - The signature of the compiler generated function uses two stack frames
+ as arguments (callerpc separates these frames)
+ - size determines how many arguments runtime.closure actually has
+ starting at arg0.
+
+ Example closure with 3 captured variables:
+ func closure(siz int32,
+ fn func(arg0, arg1, arg2 *ptr, callerpc uintptr, xxx) yyy,
+ arg0, arg1, arg2 *ptr) (func(xxx) yyy)
+
+ Code generated:
+ src R0
+ dst R1
+ end R3
+ tmp R4
+ frame = siz+4
+
+//skip loop for 0 size closures
+ MOVW.W R14,-frame(R13)
+
+ MOVW $vars(PC), R0
+ MOVW $4(SP), R1
+ MOVW $siz(R0), R3
+loop: MOVW.P 4(R0), R4
+ MOVW.P R4, 4(R1)
+ CMP R0, R3
+ BNE loop
+
+ MOVW 8(PC), R0
+ BL (R0) // 2 words
+ MOVW.P frame(R13),R15
+fptr: WORD *fn
+vars: WORD arg0
+ WORD arg1
+ WORD arg2
+*/
+
+extern void runtime·cacheflush(byte* start, byte* end);
+
+#pragma textflag 7
+void
+runtime·closure(int32 siz, byte *fn, byte *arg0)
+{
+ byte *p, *q, **ret;
+ uint32 *pc;
+ int32 n;
+
+ if(siz < 0 || siz%4 != 0)
+ runtime·throw("bad closure size");
+
+ ret = (byte**)((byte*)&arg0 + siz);
+
+ if(siz > 100) {
+ // TODO(kaib): implement stack growth preamble?
+ runtime·throw("closure too big");
+ }
+
+ // size of new fn.
+ // must match code laid out below.
+ if (siz > 0)
+ n = 6 * 4 + 7 * 4;
+ else
+ n = 6 * 4;
+
+ // store args aligned after code, so gc can find them.
+ n += siz;
+
+ p = runtime·mal(n);
+ *ret = p;
+ q = p + n - siz;
+
+ pc = (uint32*)p;
+
+ // MOVW.W R14,-frame(R13)
+ *pc++ = 0xe52de000 | (siz + 4);
+
+ if(siz > 0) {
+ runtime·memmove(q, (byte*)&arg0, siz);
+
+ // MOVW $vars(PC), R0
+ *pc = 0xe28f0000 | (int32)(q - (byte*)pc - 8);
+ pc++;
+
+ // MOVW $4(SP), R1
+ *pc++ = 0xe28d1004;
+
+ // MOVW $siz(R0), R3
+ *pc++ = 0xe2803000 | siz;
+
+ // MOVW.P 4(R0), R4
+ *pc++ = 0xe4904004;
+ // MOVW.P R4, 4(R1)
+ *pc++ = 0xe4814004;
+ // CMP R0, R3
+ *pc++ = 0xe1530000;
+ // BNE loop
+ *pc++ = 0x1afffffb;
+ }
+
+ // MOVW fptr(PC), R0
+ *pc = 0xe59f0008 | (int32)((q - 4) -(byte*) pc - 8);
+ pc++;
+
+ // BL (R0)
+ *pc++ = 0xe28fe000;
+ *pc++ = 0xe280f000;
+
+ // MOVW.P frame(R13),R15
+ *pc++ = 0xe49df000 | (siz + 4);
+
+ // WORD *fn
+ *pc++ = (uint32)fn;
+
+ p = (byte*)pc;
+
+ if(p > q)
+ runtime·throw("bad math in sys.closure");
+
+ runtime·cacheflush(*ret, q+siz);
+}
+
diff --git a/src/pkg/runtime/arm/memmove.s b/src/pkg/runtime/arm/memmove.s
new file mode 100644
index 000000000..5c0e57404
--- /dev/null
+++ b/src/pkg/runtime/arm/memmove.s
@@ -0,0 +1,255 @@
+// Inferno's libkern/memmove-arm.s
+// http://code.google.com/p/inferno-os/source/browse/libkern/memmove-arm.s
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Revisions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com). All rights reserved.
+// Portions Copyright 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+TS = 0
+TE = 1
+FROM = 2
+N = 3
+TMP = 3 /* N and TMP don't overlap */
+TMP1 = 4
+
+// TODO(kaib): This can be done with the existing registers of LR is re-used. Same for memset.
+TEXT runtime·memmove(SB), 7, $8
+ // save g and m
+ MOVW R9, 4(R13)
+ MOVW R10, 8(R13)
+
+_memmove:
+ MOVW to+0(FP), R(TS)
+ MOVW from+4(FP), R(FROM)
+ MOVW n+8(FP), R(N)
+
+ ADD R(N), R(TS), R(TE) /* to end pointer */
+
+ CMP R(FROM), R(TS)
+ BLS _forward
+
+_back:
+ ADD R(N), R(FROM) /* from end pointer */
+ CMP $4, R(N) /* need at least 4 bytes to copy */
+ BLT _b1tail
+
+_b4align: /* align destination on 4 */
+ AND.S $3, R(TE), R(TMP)
+ BEQ _b4aligned
+
+ MOVBU.W -1(R(FROM)), R(TMP) /* pre-indexed */
+ MOVBU.W R(TMP), -1(R(TE)) /* pre-indexed */
+ B _b4align
+
+_b4aligned: /* is source now aligned? */
+ AND.S $3, R(FROM), R(TMP)
+ BNE _bunaligned
+
+ ADD $31, R(TS), R(TMP) /* do 32-byte chunks if possible */
+_b32loop:
+ CMP R(TMP), R(TE)
+ BLS _b4tail
+
+ MOVM.DB.W (R(FROM)), [R4-R11]
+ MOVM.DB.W [R4-R11], (R(TE))
+ B _b32loop
+
+_b4tail: /* do remaining words if possible */
+ ADD $3, R(TS), R(TMP)
+_b4loop:
+ CMP R(TMP), R(TE)
+ BLS _b1tail
+
+ MOVW.W -4(R(FROM)), R(TMP1) /* pre-indexed */
+ MOVW.W R(TMP1), -4(R(TE)) /* pre-indexed */
+ B _b4loop
+
+_b1tail: /* remaining bytes */
+ CMP R(TE), R(TS)
+ BEQ _return
+
+ MOVBU.W -1(R(FROM)), R(TMP) /* pre-indexed */
+ MOVBU.W R(TMP), -1(R(TE)) /* pre-indexed */
+ B _b1tail
+
+_forward:
+ CMP $4, R(N) /* need at least 4 bytes to copy */
+ BLT _f1tail
+
+_f4align: /* align destination on 4 */
+ AND.S $3, R(TS), R(TMP)
+ BEQ _f4aligned
+
+ MOVBU.P 1(R(FROM)), R(TMP) /* implicit write back */
+ MOVBU.P R(TMP), 1(R(TS)) /* implicit write back */
+ B _f4align
+
+_f4aligned: /* is source now aligned? */
+ AND.S $3, R(FROM), R(TMP)
+ BNE _funaligned
+
+ SUB $31, R(TE), R(TMP) /* do 32-byte chunks if possible */
+_f32loop:
+ CMP R(TMP), R(TS)
+ BHS _f4tail
+
+ MOVM.IA.W (R(FROM)), [R4-R11]
+ MOVM.IA.W [R4-R11], (R(TS))
+ B _f32loop
+
+_f4tail:
+ SUB $3, R(TE), R(TMP) /* do remaining words if possible */
+_f4loop:
+ CMP R(TMP), R(TS)
+ BHS _f1tail
+
+ MOVW.P 4(R(FROM)), R(TMP1) /* implicit write back */
+ MOVW.P R4, 4(R(TS)) /* implicit write back */
+ B _f4loop
+
+_f1tail:
+ CMP R(TS), R(TE)
+ BEQ _return
+
+ MOVBU.P 1(R(FROM)), R(TMP) /* implicit write back */
+ MOVBU.P R(TMP), 1(R(TS)) /* implicit write back */
+ B _f1tail
+
+_return:
+ // restore g and m
+ MOVW 4(R13), R9
+ MOVW 8(R13), R10
+ MOVW to+0(FP), R0
+ RET
+
+RSHIFT = 4
+LSHIFT = 5
+OFFSET = 6
+
+BR0 = 7
+BW0 = 8
+BR1 = 8
+BW1 = 9
+BR2 = 9
+BW2 = 10
+BR3 = 10
+BW3 = 11
+
+_bunaligned:
+ CMP $2, R(TMP) /* is R(TMP) < 2 ? */
+
+ MOVW.LT $8, R(RSHIFT) /* (R(n)<<24)|(R(n-1)>>8) */
+ MOVW.LT $24, R(LSHIFT)
+ MOVW.LT $1, R(OFFSET)
+
+ MOVW.EQ $16, R(RSHIFT) /* (R(n)<<16)|(R(n-1)>>16) */
+ MOVW.EQ $16, R(LSHIFT)
+ MOVW.EQ $2, R(OFFSET)
+
+ MOVW.GT $24, R(RSHIFT) /* (R(n)<<8)|(R(n-1)>>24) */
+ MOVW.GT $8, R(LSHIFT)
+ MOVW.GT $3, R(OFFSET)
+
+ ADD $16, R(TS), R(TMP) /* do 16-byte chunks if possible */
+ CMP R(TMP), R(TE)
+ BLS _b1tail
+
+ AND $~0x03, R(FROM) /* align source */
+ MOVW (R(FROM)), R(BR0) /* prime first block register */
+
+_bu16loop:
+ CMP R(TMP), R(TE)
+ BLS _bu1tail
+
+ MOVW R(BR0)<<R(LSHIFT), R(BW3)
+ MOVM.DB.W (R(FROM)), [R(BR0)-R(BR3)]
+ ORR R(BR3)>>R(RSHIFT), R(BW3)
+
+ MOVW R(BR3)<<R(LSHIFT), R(BW2)
+ ORR R(BR2)>>R(RSHIFT), R(BW2)
+
+ MOVW R(BR2)<<R(LSHIFT), R(BW1)
+ ORR R(BR1)>>R(RSHIFT), R(BW1)
+
+ MOVW R(BR1)<<R(LSHIFT), R(BW0)
+ ORR R(BR0)>>R(RSHIFT), R(BW0)
+
+ MOVM.DB.W [R(BW0)-R(BW3)], (R(TE))
+ B _bu16loop
+
+_bu1tail:
+ ADD R(OFFSET), R(FROM)
+ B _b1tail
+
+FW0 = 7
+FR0 = 8
+FW1 = 8
+FR1 = 9
+FW2 = 9
+FR2 = 10
+FW3 = 10
+FR3 = 11
+
+_funaligned:
+ CMP $2, R(TMP)
+
+ MOVW.LT $8, R(RSHIFT) /* (R(n+1)<<24)|(R(n)>>8) */
+ MOVW.LT $24, R(LSHIFT)
+ MOVW.LT $3, R(OFFSET)
+
+ MOVW.EQ $16, R(RSHIFT) /* (R(n+1)<<16)|(R(n)>>16) */
+ MOVW.EQ $16, R(LSHIFT)
+ MOVW.EQ $2, R(OFFSET)
+
+ MOVW.GT $24, R(RSHIFT) /* (R(n+1)<<8)|(R(n)>>24) */
+ MOVW.GT $8, R(LSHIFT)
+ MOVW.GT $1, R(OFFSET)
+
+ SUB $16, R(TE), R(TMP) /* do 16-byte chunks if possible */
+ CMP R(TMP), R(TS)
+ BHS _f1tail
+
+ AND $~0x03, R(FROM) /* align source */
+ MOVW.P 4(R(FROM)), R(FR3) /* prime last block register, implicit write back */
+
+_fu16loop:
+ CMP R(TMP), R(TS)
+ BHS _fu1tail
+
+ MOVW R(FR3)>>R(RSHIFT), R(FW0)
+ MOVM.IA.W (R(FROM)), [R(FR0)-R(FR3)]
+ ORR R(FR0)<<R(LSHIFT), R(FW0)
+
+ MOVW R(FR0)>>R(RSHIFT), R(FW1)
+ ORR R(FR1)<<R(LSHIFT), R(FW1)
+
+ MOVW R(FR1)>>R(RSHIFT), R(FW2)
+ ORR R(FR2)<<R(LSHIFT), R(FW2)
+
+ MOVW R(FR2)>>R(RSHIFT), R(FW3)
+ ORR R(FR3)<<R(LSHIFT), R(FW3)
+
+ MOVM.IA.W [R(FW0)-R(FW3)], (R(TS))
+ B _fu16loop
+
+_fu1tail:
+ SUB R(OFFSET), R(FROM)
+ B _f1tail
diff --git a/src/pkg/runtime/arm/memset.s b/src/pkg/runtime/arm/memset.s
new file mode 100644
index 000000000..974b8da7a
--- /dev/null
+++ b/src/pkg/runtime/arm/memset.s
@@ -0,0 +1,94 @@
+// Inferno's libkern/memset-arm.s
+// http://code.google.com/p/inferno-os/source/browse/libkern/memset-arm.s
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Revisions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com). All rights reserved.
+// Portions Copyright 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+TO = 1
+TOE = 2
+N = 3
+TMP = 3 /* N and TMP don't overlap */
+
+// TODO(kaib): memset clobbers R9 and R10 (m and g). This makes the
+// registers unpredictable if (when) memset SIGSEGV's. Fix it by
+// moving the R4-R11 register bank.
+TEXT runtime·memset(SB), $0
+ MOVW R0, R(TO)
+ MOVW data+4(FP), R(4)
+ MOVW n+8(FP), R(N)
+
+ ADD R(N), R(TO), R(TOE) /* to end pointer */
+
+ CMP $4, R(N) /* need at least 4 bytes to copy */
+ BLT _1tail
+
+ AND $0xFF, R(4) /* it's a byte */
+ SLL $8, R(4), R(TMP) /* replicate to a word */
+ ORR R(TMP), R(4)
+ SLL $16, R(4), R(TMP)
+ ORR R(TMP), R(4)
+
+_4align: /* align on 4 */
+ AND.S $3, R(TO), R(TMP)
+ BEQ _4aligned
+
+ MOVBU.P R(4), 1(R(TO)) /* implicit write back */
+ B _4align
+
+_4aligned:
+ SUB $31, R(TOE), R(TMP) /* do 32-byte chunks if possible */
+ CMP R(TMP), R(TO)
+ BHS _4tail
+
+ MOVW R4, R5 /* replicate */
+ MOVW R4, R6
+ MOVW R4, R7
+ MOVW R4, R8
+ MOVW R4, R9
+ MOVW R4, R10
+ MOVW R4, R11
+
+_f32loop:
+ CMP R(TMP), R(TO)
+ BHS _4tail
+
+ MOVM.IA.W [R4-R11], (R(TO))
+ B _f32loop
+
+_4tail:
+ SUB $3, R(TOE), R(TMP) /* do remaining words if possible */
+_4loop:
+ CMP R(TMP), R(TO)
+ BHS _1tail
+
+ MOVW.P R(4), 4(R(TO)) /* implicit write back */
+ B _4loop
+
+_1tail:
+ CMP R(TO), R(TOE)
+ BEQ _return
+
+ MOVBU.P R(4), 1(R(TO)) /* implicit write back */
+ B _1tail
+
+_return:
+ RET
diff --git a/src/pkg/runtime/arm/softfloat.c b/src/pkg/runtime/arm/softfloat.c
new file mode 100644
index 000000000..0a071dada
--- /dev/null
+++ b/src/pkg/runtime/arm/softfloat.c
@@ -0,0 +1,525 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Software floating point interpretaton of ARM 7500 FP instructions.
+// The interpretation is not bit compatible with the 7500.
+// It uses true little-endian doubles, while the 7500 used mixed-endian.
+
+#include "runtime.h"
+
+#define CPSR 14
+#define FLAGS_N (1 << 31)
+#define FLAGS_Z (1 << 30)
+#define FLAGS_C (1 << 29)
+#define FLAGS_V (1 << 28)
+
+void runtime·abort(void);
+void math·sqrtGoC(uint64, uint64*);
+
+static uint32 trace = 0;
+
+static void
+fabort(void)
+{
+ if (1) {
+ runtime·printf("Unsupported floating point instruction\n");
+ runtime·abort();
+ }
+}
+
+static void
+putf(uint32 reg, uint32 val)
+{
+ m->freglo[reg] = val;
+}
+
+static void
+putd(uint32 reg, uint64 val)
+{
+ m->freglo[reg] = (uint32)val;
+ m->freghi[reg] = (uint32)(val>>32);
+}
+
+static uint64
+getd(uint32 reg)
+{
+ return (uint64)m->freglo[reg] | ((uint64)m->freghi[reg]<<32);
+}
+
+static void
+fprint(void)
+{
+ uint32 i;
+ for (i = 0; i < 16; i++) {
+ runtime·printf("\tf%d:\t%X %X\n", i, m->freghi[i], m->freglo[i]);
+ }
+}
+
+static uint32
+d2f(uint64 d)
+{
+ uint32 x;
+
+ runtime·f64to32c(d, &x);
+ return x;
+}
+
+static uint64
+f2d(uint32 f)
+{
+ uint64 x;
+
+ runtime·f32to64c(f, &x);
+ return x;
+}
+
+static uint32
+fstatus(bool nan, int32 cmp)
+{
+ if(nan)
+ return FLAGS_C | FLAGS_V;
+ if(cmp == 0)
+ return FLAGS_Z | FLAGS_C;
+ if(cmp < 0)
+ return FLAGS_N;
+ return FLAGS_C;
+}
+
+// returns number of words that the fp instruction
+// is occupying, 0 if next instruction isn't float.
+static uint32
+stepflt(uint32 *pc, uint32 *regs)
+{
+ uint32 i, regd, regm, regn;
+ int32 delta;
+ uint32 *addr;
+ uint64 uval;
+ int64 sval;
+ bool nan, ok;
+ int32 cmp;
+
+ i = *pc;
+
+ if(trace)
+ runtime·printf("stepflt %p %x\n", pc, i);
+
+ // special cases
+ if((i&0xfffff000) == 0xe59fb000) {
+ // load r11 from pc-relative address.
+ // might be part of a floating point move
+ // (or might not, but no harm in simulating
+ // one instruction too many).
+ addr = (uint32*)((uint8*)pc + (i&0xfff) + 8);
+ regs[11] = addr[0];
+
+ if(trace)
+ runtime·printf("*** cpu R[%d] = *(%p) %x\n",
+ 11, addr, regs[11]);
+ return 1;
+ }
+ if(i == 0xe08bb00d) {
+ // add sp to r11.
+ // might be part of a large stack offset address
+ // (or might not, but again no harm done).
+ regs[11] += regs[13];
+
+ if(trace)
+ runtime·printf("*** cpu R[%d] += R[%d] %x\n",
+ 11, 13, regs[11]);
+ return 1;
+ }
+ if(i == 0xeef1fa10) {
+ regs[CPSR] = (regs[CPSR]&0x0fffffff) | m->fflag;
+
+ if(trace)
+ runtime·printf("*** fpsr R[CPSR] = F[CPSR] %x\n", regs[CPSR]);
+ return 1;
+ }
+ if((i&0xff000000) == 0xea000000) {
+ // unconditional branch
+ // can happen in the middle of floating point
+ // if the linker decides it is time to lay down
+ // a sequence of instruction stream constants.
+ delta = i&0xffffff;
+ delta = (delta<<8) >> 8; // sign extend
+
+ if(trace)
+ runtime·printf("*** cpu PC += %x\n", (delta+2)*4);
+ return delta+2;
+ }
+
+ goto stage1;
+
+stage1: // load/store regn is cpureg, regm is 8bit offset
+ regd = i>>12 & 0xf;
+ regn = i>>16 & 0xf;
+ regm = (i & 0xff) << 2; // PLUS or MINUS ??
+
+ switch(i & 0xfff00f00) {
+ default:
+ goto stage2;
+
+ case 0xed900a00: // single load
+ addr = (uint32*)(regs[regn] + regm);
+ m->freglo[regd] = addr[0];
+
+ if(trace)
+ runtime·printf("*** load F[%d] = %x\n",
+ regd, m->freglo[regd]);
+ break;
+
+ case 0xed900b00: // double load
+ addr = (uint32*)(regs[regn] + regm);
+ m->freglo[regd] = addr[0];
+ m->freghi[regd] = addr[1];
+
+ if(trace)
+ runtime·printf("*** load D[%d] = %x-%x\n",
+ regd, m->freghi[regd], m->freglo[regd]);
+ break;
+
+ case 0xed800a00: // single store
+ addr = (uint32*)(regs[regn] + regm);
+ addr[0] = m->freglo[regd];
+
+ if(trace)
+ runtime·printf("*** *(%p) = %x\n",
+ addr, addr[0]);
+ break;
+
+ case 0xed800b00: // double store
+ addr = (uint32*)(regs[regn] + regm);
+ addr[0] = m->freglo[regd];
+ addr[1] = m->freghi[regd];
+
+ if(trace)
+ runtime·printf("*** *(%p) = %x-%x\n",
+ addr, addr[1], addr[0]);
+ break;
+ }
+ return 1;
+
+stage2: // regd, regm, regn are 4bit variables
+ regm = i>>0 & 0xf;
+ switch(i & 0xfff00ff0) {
+ default:
+ goto stage3;
+
+ case 0xf3000110: // veor
+ m->freglo[regd] = m->freglo[regm]^m->freglo[regn];
+ m->freghi[regd] = m->freghi[regm]^m->freghi[regn];
+
+ if(trace)
+ runtime·printf("*** veor D[%d] = %x-%x\n",
+ regd, m->freghi[regd], m->freglo[regd]);
+ break;
+
+ case 0xeeb00b00: // D[regd] = const(regn,regm)
+ regn = (regn<<4) | regm;
+ regm = 0x40000000UL;
+ if(regn & 0x80)
+ regm |= 0x80000000UL;
+ if(regn & 0x40)
+ regm ^= 0x7fc00000UL;
+ regm |= (regn & 0x3f) << 16;
+ m->freglo[regd] = 0;
+ m->freghi[regd] = regm;
+
+ if(trace)
+ runtime·printf("*** immed D[%d] = %x-%x\n",
+ regd, m->freghi[regd], m->freglo[regd]);
+ break;
+
+ case 0xeeb00a00: // F[regd] = const(regn,regm)
+ regn = (regn<<4) | regm;
+ regm = 0x40000000UL;
+ if(regn & 0x80)
+ regm |= 0x80000000UL;
+ if(regn & 0x40)
+ regm ^= 0x7e000000UL;
+ regm |= (regn & 0x3f) << 19;
+ m->freglo[regd] = regm;
+
+ if(trace)
+ runtime·printf("*** immed D[%d] = %x\n",
+ regd, m->freglo[regd]);
+ break;
+
+ case 0xee300b00: // D[regd] = D[regn]+D[regm]
+ runtime·fadd64c(getd(regn), getd(regm), &uval);
+ putd(regd, uval);
+
+ if(trace)
+ runtime·printf("*** add D[%d] = D[%d]+D[%d] %x-%x\n",
+ regd, regn, regm, m->freghi[regd], m->freglo[regd]);
+ break;
+
+ case 0xee300a00: // F[regd] = F[regn]+F[regm]
+ runtime·fadd64c(f2d(m->freglo[regn]), f2d(m->freglo[regm]), &uval);
+ m->freglo[regd] = d2f(uval);
+
+ if(trace)
+ runtime·printf("*** add F[%d] = F[%d]+F[%d] %x\n",
+ regd, regn, regm, m->freglo[regd]);
+ break;
+
+ case 0xee300b40: // D[regd] = D[regn]-D[regm]
+ runtime·fsub64c(getd(regn), getd(regm), &uval);
+ putd(regd, uval);
+
+ if(trace)
+ runtime·printf("*** sub D[%d] = D[%d]-D[%d] %x-%x\n",
+ regd, regn, regm, m->freghi[regd], m->freglo[regd]);
+ break;
+
+ case 0xee300a40: // F[regd] = F[regn]-F[regm]
+ runtime·fsub64c(f2d(m->freglo[regn]), f2d(m->freglo[regm]), &uval);
+ m->freglo[regd] = d2f(uval);
+
+ if(trace)
+ runtime·printf("*** sub F[%d] = F[%d]-F[%d] %x\n",
+ regd, regn, regm, m->freglo[regd]);
+ break;
+
+ case 0xee200b00: // D[regd] = D[regn]*D[regm]
+ runtime·fmul64c(getd(regn), getd(regm), &uval);
+ putd(regd, uval);
+
+ if(trace)
+ runtime·printf("*** mul D[%d] = D[%d]*D[%d] %x-%x\n",
+ regd, regn, regm, m->freghi[regd], m->freglo[regd]);
+ break;
+
+ case 0xee200a00: // F[regd] = F[regn]*F[regm]
+ runtime·fmul64c(f2d(m->freglo[regn]), f2d(m->freglo[regm]), &uval);
+ m->freglo[regd] = d2f(uval);
+
+ if(trace)
+ runtime·printf("*** mul F[%d] = F[%d]*F[%d] %x\n",
+ regd, regn, regm, m->freglo[regd]);
+ break;
+
+ case 0xee800b00: // D[regd] = D[regn]/D[regm]
+ runtime·fdiv64c(getd(regn), getd(regm), &uval);
+ putd(regd, uval);
+
+ if(trace)
+ runtime·printf("*** div D[%d] = D[%d]/D[%d] %x-%x\n",
+ regd, regn, regm, m->freghi[regd], m->freglo[regd]);
+ break;
+
+ case 0xee800a00: // F[regd] = F[regn]/F[regm]
+ runtime·fdiv64c(f2d(m->freglo[regn]), f2d(m->freglo[regm]), &uval);
+ m->freglo[regd] = d2f(uval);
+
+ if(trace)
+ runtime·printf("*** div F[%d] = F[%d]/F[%d] %x\n",
+ regd, regn, regm, m->freglo[regd]);
+ break;
+
+ case 0xee000b10: // S[regn] = R[regd] (MOVW) (regm ignored)
+ m->freglo[regn] = regs[regd];
+
+ if(trace)
+ runtime·printf("*** cpy S[%d] = R[%d] %x\n",
+ regn, regd, m->freglo[regn]);
+ break;
+
+ case 0xee100b10: // R[regd] = S[regn] (MOVW) (regm ignored)
+ regs[regd] = m->freglo[regn];
+
+ if(trace)
+ runtime·printf("*** cpy R[%d] = S[%d] %x\n",
+ regd, regn, regs[regd]);
+ break;
+ }
+ return 1;
+
+stage3: // regd, regm are 4bit variables
+ switch(i & 0xffff0ff0) {
+ default:
+ goto done;
+
+ case 0xeeb00a40: // F[regd] = F[regm] (MOVF)
+ m->freglo[regd] = m->freglo[regm];
+
+ if(trace)
+ runtime·printf("*** F[%d] = F[%d] %x\n",
+ regd, regm, m->freglo[regd]);
+ break;
+
+ case 0xeeb00b40: // D[regd] = D[regm] (MOVD)
+ m->freglo[regd] = m->freglo[regm];
+ m->freghi[regd] = m->freghi[regm];
+
+ if(trace)
+ runtime·printf("*** D[%d] = D[%d] %x-%x\n",
+ regd, regm, m->freghi[regd], m->freglo[regd]);
+ break;
+
+ case 0xeeb10bc0: // D[regd] = sqrt D[regm]
+ math·sqrtGoC(getd(regm), &uval);
+ putd(regd, uval);
+
+ if(trace)
+ runtime·printf("*** D[%d] = sqrt D[%d] %x-%x\n",
+ regd, regm, m->freghi[regd], m->freglo[regd]);
+ break;
+
+ case 0xeeb40bc0: // D[regd] :: D[regm] (CMPD)
+ runtime·fcmp64c(getd(regd), getd(regm), &cmp, &nan);
+ m->fflag = fstatus(nan, cmp);
+
+ if(trace)
+ runtime·printf("*** cmp D[%d]::D[%d] %x\n",
+ regd, regm, m->fflag);
+ break;
+
+ case 0xeeb40ac0: // F[regd] :: F[regm] (CMPF)
+ runtime·fcmp64c(f2d(m->freglo[regd]), f2d(m->freglo[regm]), &cmp, &nan);
+ m->fflag = fstatus(nan, cmp);
+
+ if(trace)
+ runtime·printf("*** cmp F[%d]::F[%d] %x\n",
+ regd, regm, m->fflag);
+ break;
+
+ case 0xeeb70ac0: // D[regd] = F[regm] (MOVFD)
+ putd(regd, f2d(m->freglo[regm]));
+
+ if(trace)
+ runtime·printf("*** f2d D[%d]=F[%d] %x-%x\n",
+ regd, regm, m->freghi[regd], m->freglo[regd]);
+ break;
+
+ case 0xeeb70bc0: // F[regd] = D[regm] (MOVDF)
+ m->freglo[regd] = d2f(getd(regm));
+
+ if(trace)
+ runtime·printf("*** d2f F[%d]=D[%d] %x-%x\n",
+ regd, regm, m->freghi[regd], m->freglo[regd]);
+ break;
+
+ case 0xeebd0ac0: // S[regd] = F[regm] (MOVFW)
+ runtime·f64tointc(f2d(m->freglo[regm]), &sval, &ok);
+ if(!ok || (int32)sval != sval)
+ sval = 0;
+ m->freglo[regd] = sval;
+
+ if(trace)
+ runtime·printf("*** fix S[%d]=F[%d] %x\n",
+ regd, regm, m->freglo[regd]);
+ break;
+
+ case 0xeebc0ac0: // S[regd] = F[regm] (MOVFW.U)
+ runtime·f64tointc(f2d(m->freglo[regm]), &sval, &ok);
+ if(!ok || (uint32)sval != sval)
+ sval = 0;
+ m->freglo[regd] = sval;
+
+ if(trace)
+ runtime·printf("*** fix unsigned S[%d]=F[%d] %x\n",
+ regd, regm, m->freglo[regd]);
+ break;
+
+ case 0xeebd0bc0: // S[regd] = D[regm] (MOVDW)
+ runtime·f64tointc(getd(regm), &sval, &ok);
+ if(!ok || (int32)sval != sval)
+ sval = 0;
+ m->freglo[regd] = sval;
+
+ if(trace)
+ runtime·printf("*** fix S[%d]=D[%d] %x\n",
+ regd, regm, m->freglo[regd]);
+ break;
+
+ case 0xeebc0bc0: // S[regd] = D[regm] (MOVDW.U)
+ runtime·f64tointc(getd(regm), &sval, &ok);
+ if(!ok || (uint32)sval != sval)
+ sval = 0;
+ m->freglo[regd] = sval;
+
+ if(trace)
+ runtime·printf("*** fix unsigned S[%d]=D[%d] %x\n",
+ regd, regm, m->freglo[regd]);
+ break;
+
+ case 0xeeb80ac0: // D[regd] = S[regm] (MOVWF)
+ cmp = m->freglo[regm];
+ if(cmp < 0) {
+ runtime·fintto64c(-cmp, &uval);
+ putf(regd, d2f(uval));
+ m->freglo[regd] ^= 0x80000000;
+ } else {
+ runtime·fintto64c(cmp, &uval);
+ putf(regd, d2f(uval));
+ }
+
+ if(trace)
+ runtime·printf("*** float D[%d]=S[%d] %x-%x\n",
+ regd, regm, m->freghi[regd], m->freglo[regd]);
+ break;
+
+ case 0xeeb80a40: // D[regd] = S[regm] (MOVWF.U)
+ runtime·fintto64c(m->freglo[regm], &uval);
+ putf(regd, d2f(uval));
+
+ if(trace)
+ runtime·printf("*** float unsigned D[%d]=S[%d] %x-%x\n",
+ regd, regm, m->freghi[regd], m->freglo[regd]);
+ break;
+
+ case 0xeeb80bc0: // D[regd] = S[regm] (MOVWD)
+ cmp = m->freglo[regm];
+ if(cmp < 0) {
+ runtime·fintto64c(-cmp, &uval);
+ putd(regd, uval);
+ m->freghi[regd] ^= 0x80000000;
+ } else {
+ runtime·fintto64c(cmp, &uval);
+ putd(regd, uval);
+ }
+
+ if(trace)
+ runtime·printf("*** float D[%d]=S[%d] %x-%x\n",
+ regd, regm, m->freghi[regd], m->freglo[regd]);
+ break;
+
+ case 0xeeb80b40: // D[regd] = S[regm] (MOVWD.U)
+ runtime·fintto64c(m->freglo[regm], &uval);
+ putd(regd, uval);
+
+ if(trace)
+ runtime·printf("*** float unsigned D[%d]=S[%d] %x-%x\n",
+ regd, regm, m->freghi[regd], m->freglo[regd]);
+ break;
+ }
+ return 1;
+
+done:
+ if((i&0xff000000) == 0xee000000 ||
+ (i&0xff000000) == 0xed000000) {
+ runtime·printf("stepflt %p %x\n", pc, i);
+ fabort();
+ }
+ return 0;
+}
+
+#pragma textflag 7
+uint32*
+runtime·_sfloat2(uint32 *lr, uint32 r0)
+{
+ uint32 skip;
+
+ skip = stepflt(lr, &r0);
+ if(skip == 0) {
+ runtime·printf("sfloat2 %p %x\n", lr, *lr);
+ fabort(); // not ok to fail first instruction
+ }
+
+ lr += skip;
+ while(skip = stepflt(lr, &r0))
+ lr += skip;
+ return lr;
+}
diff --git a/src/pkg/runtime/arm/traceback.c b/src/pkg/runtime/arm/traceback.c
new file mode 100644
index 000000000..5628b8349
--- /dev/null
+++ b/src/pkg/runtime/arm/traceback.c
@@ -0,0 +1,213 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "runtime.h"
+#include "malloc.h"
+
+void runtime·deferproc(void);
+void runtime·newproc(void);
+void runtime·newstack(void);
+void runtime·morestack(void);
+void runtime·sigpanic(void);
+void _div(void);
+void _mod(void);
+void _divu(void);
+void _modu(void);
+
+int32
+runtime·gentraceback(byte *pc0, byte *sp, byte *lr0, G *g, int32 skip, uintptr *pcbuf, int32 max)
+{
+ int32 i, n, iter;
+ uintptr pc, lr, tracepc, x;
+ byte *fp, *p;
+ bool waspanic;
+ Stktop *stk;
+ Func *f;
+
+ pc = (uintptr)pc0;
+ lr = (uintptr)lr0;
+ fp = nil;
+ waspanic = false;
+
+ // If the PC is goexit, the goroutine hasn't started yet.
+ if(pc == (uintptr)runtime·goexit) {
+ pc = (uintptr)g->entry;
+ lr = (uintptr)runtime·goexit;
+ }
+
+ // If the PC is zero, it's likely a nil function call.
+ // Start in the caller's frame.
+ if(pc == 0) {
+ pc = lr;
+ lr = 0;
+ }
+
+ n = 0;
+ stk = (Stktop*)g->stackbase;
+ for(iter = 0; iter < 100 && n < max; iter++) { // iter avoids looping forever
+ // Typically:
+ // pc is the PC of the running function.
+ // sp is the stack pointer at that program counter.
+ // fp is the frame pointer (caller's stack pointer) at that program counter, or nil if unknown.
+ // stk is the stack containing sp.
+ // The caller's program counter is lr, unless lr is zero, in which case it is *(uintptr*)sp.
+
+ if(pc == (uintptr)runtime·lessstack) {
+ // Hit top of stack segment. Unwind to next segment.
+ pc = (uintptr)stk->gobuf.pc;
+ sp = stk->gobuf.sp;
+ lr = 0;
+ fp = nil;
+ if(pcbuf == nil)
+ runtime·printf("----- stack segment boundary -----\n");
+ stk = (Stktop*)stk->stackbase;
+ continue;
+ }
+
+ if(pc <= 0x1000 || (f = runtime·findfunc(pc)) == nil) {
+ // Dangerous, but worthwhile: see if this is a closure by
+ // decoding the instruction stream.
+ //
+ // We check p < p+4 to avoid wrapping and faulting if
+ // we have lost track of where we are.
+ p = (byte*)pc;
+ if((pc&3) == 0 && p < p+4 &&
+ runtime·mheap.arena_start < p &&
+ p+4 < runtime·mheap.arena_used) {
+ x = *(uintptr*)p;
+ if((x&0xfffff000) == 0xe49df000) {
+ // End of closure:
+ // MOVW.P frame(R13), R15
+ pc = *(uintptr*)sp;
+ lr = 0;
+ sp += x & 0xfff;
+ fp = nil;
+ continue;
+ }
+ if((x&0xfffff000) == 0xe52de000 && lr == (uintptr)runtime·goexit) {
+ // Beginning of closure.
+ // Closure at top of stack, not yet started.
+ p += 5*4;
+ if((x&0xfff) != 4) {
+ // argument copying
+ p += 7*4;
+ }
+ if((byte*)pc < p && p < p+4 && p+4 < runtime·mheap.arena_used) {
+ pc = *(uintptr*)p;
+ fp = nil;
+ continue;
+ }
+ }
+ }
+ break;
+ }
+
+ // Found an actual function.
+ if(lr == 0)
+ lr = *(uintptr*)sp;
+ if(fp == nil) {
+ fp = sp;
+ if(pc > f->entry && f->frame >= 0)
+ fp += f->frame;
+ }
+
+ if(skip > 0)
+ skip--;
+ else if(pcbuf != nil)
+ pcbuf[n++] = pc;
+ else {
+ // Print during crash.
+ // main+0xf /home/rsc/go/src/runtime/x.go:23
+ // main(0x1, 0x2, 0x3)
+ runtime·printf("[%p] %S", fp, f->name);
+ if(pc > f->entry)
+ runtime·printf("+%p", (uintptr)(pc - f->entry));
+ tracepc = pc; // back up to CALL instruction for funcline.
+ if(n > 0 && pc > f->entry && !waspanic)
+ tracepc -= sizeof(uintptr);
+ runtime·printf(" %S:%d\n", f->src, runtime·funcline(f, tracepc));
+ runtime·printf("\t%S(", f->name);
+ for(i = 0; i < f->args; i++) {
+ if(i != 0)
+ runtime·prints(", ");
+ runtime·printhex(((uintptr*)fp)[1+i]);
+ if(i >= 4) {
+ runtime·prints(", ...");
+ break;
+ }
+ }
+ runtime·prints(")\n");
+ n++;
+ }
+
+ waspanic = f->entry == (uintptr)runtime·sigpanic;
+
+ if(pcbuf == nil && f->entry == (uintptr)runtime·newstack && g == m->g0) {
+ runtime·printf("----- newstack called from goroutine %d -----\n", m->curg->goid);
+ pc = (uintptr)m->morepc;
+ sp = (byte*)m->moreargp - sizeof(void*);
+ lr = (uintptr)m->morebuf.pc;
+ fp = m->morebuf.sp;
+ g = m->curg;
+ stk = (Stktop*)g->stackbase;
+ continue;
+ }
+
+ if(pcbuf == nil && f->entry == (uintptr)runtime·lessstack && g == m->g0) {
+ runtime·printf("----- lessstack called from goroutine %d -----\n", m->curg->goid);
+ g = m->curg;
+ stk = (Stktop*)g->stackbase;
+ sp = stk->gobuf.sp;
+ pc = (uintptr)stk->gobuf.pc;
+ fp = nil;
+ lr = 0;
+ continue;
+ }
+
+ // Unwind to next frame.
+ pc = lr;
+ lr = 0;
+ sp = fp;
+ fp = nil;
+
+ // If this was div or divu or mod or modu, the caller had
+ // an extra 8 bytes on its stack. Adjust sp.
+ if(f->entry == (uintptr)_div || f->entry == (uintptr)_divu || f->entry == (uintptr)_mod || f->entry == (uintptr)_modu)
+ sp += 8;
+
+ // If this was deferproc or newproc, the caller had an extra 12.
+ if(f->entry == (uintptr)runtime·deferproc || f->entry == (uintptr)runtime·newproc)
+ sp += 12;
+ }
+
+ if(pcbuf == nil && (pc = g->gopc) != 0 && (f = runtime·findfunc(pc)) != nil) {
+ runtime·printf("----- goroutine created by -----\n%S", f->name);
+ if(pc > f->entry)
+ runtime·printf("+%p", (uintptr)(pc - f->entry));
+ tracepc = pc; // back up to CALL instruction for funcline.
+ if(n > 0 && pc > f->entry)
+ tracepc -= sizeof(uintptr);
+ runtime·printf(" %S:%d\n", f->src, runtime·funcline(f, tracepc));
+ }
+
+ return n;
+}
+
+void
+runtime·traceback(byte *pc0, byte *sp, byte *lr, G *g)
+{
+ runtime·gentraceback(pc0, sp, lr, g, 0, nil, 100);
+}
+
+// func caller(n int) (pc uintptr, file string, line int, ok bool)
+int32
+runtime·callers(int32 skip, uintptr *pcbuf, int32 m)
+{
+ byte *pc, *sp;
+
+ sp = runtime·getcallersp(&skip);
+ pc = runtime·getcallerpc(&skip);
+
+ return runtime·gentraceback(pc, sp, 0, g, skip, pcbuf, m);
+}
diff --git a/src/pkg/runtime/arm/vlop.s b/src/pkg/runtime/arm/vlop.s
new file mode 100644
index 000000000..fc679f0ee
--- /dev/null
+++ b/src/pkg/runtime/arm/vlop.s
@@ -0,0 +1,190 @@
+// Inferno's libkern/vlop-arm.s
+// http://code.google.com/p/inferno-os/source/browse/libkern/vlop-arm.s
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Revisions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com). All rights reserved.
+// Portions Copyright 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+#define UMULL(Rs,Rm,Rhi,Rlo,S) WORD $((14<<28)|(4<<21)|(S<<20)|(Rhi<<16)|(Rlo<<12)|(Rs<<8)|(9<<4)|Rm)
+#define UMLAL(Rs,Rm,Rhi,Rlo,S) WORD $((14<<28)|(5<<21)|(S<<20)|(Rhi<<16)|(Rlo<<12)|(Rs<<8)|(9<<4)|Rm)
+#define MUL(Rs,Rm,Rd,S) WORD $((14<<28)|(0<<21)|(S<<20)|(Rd<<16)|(Rs<<8)|(9<<4)|Rm)
+arg=0
+
+/* replaced use of R10 by R11 because the former can be the data segment base register */
+
+TEXT _mulv(SB), $0
+ MOVW 0(FP), R0
+ MOVW 4(FP), R2 /* l0 */
+ MOVW 8(FP), R11 /* h0 */
+ MOVW 12(FP), R4 /* l1 */
+ MOVW 16(FP), R5 /* h1 */
+ UMULL(4, 2, 7, 6, 0)
+ MUL(11, 4, 8, 0)
+ ADD R8, R7
+ MUL(2, 5, 8, 0)
+ ADD R8, R7
+ MOVW R6, 0(R(arg))
+ MOVW R7, 4(R(arg))
+ RET
+
+
+Q = 0
+N = 1
+D = 2
+CC = 3
+TMP = 11
+
+TEXT save<>(SB), 7, $0
+ MOVW R(Q), 0(FP)
+ MOVW R(N), 4(FP)
+ MOVW R(D), 8(FP)
+ MOVW R(CC), 12(FP)
+
+ MOVW R(TMP), R(Q) /* numerator */
+ MOVW 20(FP), R(D) /* denominator */
+ CMP $0, R(D)
+ BNE s1
+ BL runtime·panicdivide(SB)
+/* MOVW -1(R(D)), R(TMP) /* divide by zero fault */
+s1: RET
+
+TEXT rest<>(SB), 7, $0
+ MOVW 0(FP), R(Q)
+ MOVW 4(FP), R(N)
+ MOVW 8(FP), R(D)
+ MOVW 12(FP), R(CC)
+/*
+ * return to caller
+ * of rest<>
+ */
+ MOVW 0(R13), R14
+ ADD $20, R13
+ B (R14)
+
+TEXT div<>(SB), 7, $0
+ MOVW $32, R(CC)
+/*
+ * skip zeros 8-at-a-time
+ */
+e1:
+ AND.S $(0xff<<24),R(Q), R(N)
+ BNE e2
+ SLL $8, R(Q)
+ SUB.S $8, R(CC)
+ BNE e1
+ RET
+e2:
+ MOVW $0, R(N)
+
+loop:
+/*
+ * shift R(N||Q) left one
+ */
+ SLL $1, R(N)
+ CMP $0, R(Q)
+ ORR.LT $1, R(N)
+ SLL $1, R(Q)
+
+/*
+ * compare numerator to denominator
+ * if less, subtract and set quotient bit
+ */
+ CMP R(D), R(N)
+ ORR.HS $1, R(Q)
+ SUB.HS R(D), R(N)
+ SUB.S $1, R(CC)
+ BNE loop
+ RET
+
+TEXT _div(SB), 7, $16
+ BL save<>(SB)
+ CMP $0, R(Q)
+ BGE d1
+ RSB $0, R(Q), R(Q)
+ CMP $0, R(D)
+ BGE d2
+ RSB $0, R(D), R(D)
+d0:
+ BL div<>(SB) /* none/both neg */
+ MOVW R(Q), R(TMP)
+ B out
+d1:
+ CMP $0, R(D)
+ BGE d0
+ RSB $0, R(D), R(D)
+d2:
+ BL div<>(SB) /* one neg */
+ RSB $0, R(Q), R(TMP)
+ B out
+
+TEXT _mod(SB), 7, $16
+ BL save<>(SB)
+ CMP $0, R(D)
+ RSB.LT $0, R(D), R(D)
+ CMP $0, R(Q)
+ BGE m1
+ RSB $0, R(Q), R(Q)
+ BL div<>(SB) /* neg numerator */
+ RSB $0, R(N), R(TMP)
+ B out
+m1:
+ BL div<>(SB) /* pos numerator */
+ MOVW R(N), R(TMP)
+ B out
+
+TEXT _divu(SB), 7, $16
+ BL save<>(SB)
+ BL div<>(SB)
+ MOVW R(Q), R(TMP)
+ B out
+
+TEXT _modu(SB), 7, $16
+ BL save<>(SB)
+ BL div<>(SB)
+ MOVW R(N), R(TMP)
+ B out
+
+out:
+ BL rest<>(SB)
+ B out
+
+// trampoline for _sfloat2. passes LR as arg0 and
+// saves registers R0-R13 and CPSR on the stack. R0-R12 and CPSR flags can
+// be changed by _sfloat2.
+TEXT _sfloat(SB), 7, $64 // 4 arg + 14*4 saved regs + cpsr
+ MOVW R14, 4(R13)
+ MOVW R0, 8(R13)
+ MOVW $12(R13), R0
+ MOVM.IA.W [R1-R12], (R0)
+ MOVW $68(R13), R1 // correct for frame size
+ MOVW R1, 60(R13)
+ WORD $0xe10f1000 // mrs r1, cpsr
+ MOVW R1, 64(R13)
+ BL runtime·_sfloat2(SB)
+ MOVW R0, 0(R13)
+ MOVW 64(R13), R1
+ WORD $0xe128f001 // msr cpsr_f, r1
+ MOVW $12(R13), R0
+ MOVM.IA.W (R0), [R1-R12]
+ MOVW 8(R13), R0
+ RET
+
+
diff --git a/src/pkg/runtime/arm/vlrt.c b/src/pkg/runtime/arm/vlrt.c
new file mode 100644
index 000000000..50f33710b
--- /dev/null
+++ b/src/pkg/runtime/arm/vlrt.c
@@ -0,0 +1,816 @@
+// Inferno's libkern/vlrt-arm.c
+// http://code.google.com/p/inferno-os/source/browse/libkern/vlrt-arm.c
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Revisions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com). All rights reserved.
+// Portions Copyright 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+// declared here to avoid include of runtime.h
+void runtime·panicstring(char*);
+
+typedef unsigned long ulong;
+typedef unsigned int uint;
+typedef unsigned short ushort;
+typedef unsigned char uchar;
+typedef signed char schar;
+
+#define SIGN(n) (1UL<<(n-1))
+
+void
+runtime·panicdivide(void)
+{
+ runtime·panicstring("integer divide by zero");
+}
+
+typedef struct Vlong Vlong;
+struct Vlong
+{
+ union
+ {
+ struct
+ {
+ ulong lo;
+ ulong hi;
+ };
+ struct
+ {
+ ushort lols;
+ ushort loms;
+ ushort hils;
+ ushort hims;
+ };
+ };
+};
+
+void runtime·abort(void);
+
+void
+_addv(Vlong *r, Vlong a, Vlong b)
+{
+ ulong lo, hi;
+
+ lo = a.lo + b.lo;
+ hi = a.hi + b.hi;
+ if(lo < a.lo)
+ hi++;
+ r->lo = lo;
+ r->hi = hi;
+}
+
+void
+_subv(Vlong *r, Vlong a, Vlong b)
+{
+ ulong lo, hi;
+
+ lo = a.lo - b.lo;
+ hi = a.hi - b.hi;
+ if(lo > a.lo)
+ hi--;
+ r->lo = lo;
+ r->hi = hi;
+}
+
+void
+_d2v(Vlong *y, double d)
+{
+ union { double d; struct Vlong; } x;
+ ulong xhi, xlo, ylo, yhi;
+ int sh;
+
+ x.d = d;
+
+ xhi = (x.hi & 0xfffff) | 0x100000;
+ xlo = x.lo;
+ sh = 1075 - ((x.hi >> 20) & 0x7ff);
+
+ ylo = 0;
+ yhi = 0;
+ if(sh >= 0) {
+ /* v = (hi||lo) >> sh */
+ if(sh < 32) {
+ if(sh == 0) {
+ ylo = xlo;
+ yhi = xhi;
+ } else {
+ ylo = (xlo >> sh) | (xhi << (32-sh));
+ yhi = xhi >> sh;
+ }
+ } else {
+ if(sh == 32) {
+ ylo = xhi;
+ } else
+ if(sh < 64) {
+ ylo = xhi >> (sh-32);
+ }
+ }
+ } else {
+ /* v = (hi||lo) << -sh */
+ sh = -sh;
+ if(sh <= 11) {
+ ylo = xlo << sh;
+ yhi = (xhi << sh) | (xlo >> (32-sh));
+ } else {
+ /* overflow */
+ yhi = d; /* causes something awful */
+ }
+ }
+ if(x.hi & SIGN(32)) {
+ if(ylo != 0) {
+ ylo = -ylo;
+ yhi = ~yhi;
+ } else
+ yhi = -yhi;
+ }
+
+ y->hi = yhi;
+ y->lo = ylo;
+}
+
+void
+_f2v(Vlong *y, float f)
+{
+ _d2v(y, f);
+}
+
+void
+runtime·float64toint64(double d, Vlong y)
+{
+ _d2v(&y, d);
+}
+
+void
+runtime·float64touint64(double d, Vlong y)
+{
+ _d2v(&y, d);
+}
+
+double
+_ul2d(ulong u)
+{
+ // compensate for bug in c
+ if(u & SIGN(32)) {
+ u ^= SIGN(32);
+ return 2147483648. + u;
+ }
+ return u;
+}
+
+double
+_v2d(Vlong x)
+{
+ if(x.hi & SIGN(32)) {
+ if(x.lo) {
+ x.lo = -x.lo;
+ x.hi = ~x.hi;
+ } else
+ x.hi = -x.hi;
+ return -(_ul2d(x.hi)*4294967296. + _ul2d(x.lo));
+ }
+ return x.hi*4294967296. + _ul2d(x.lo);
+}
+
+float
+_v2f(Vlong x)
+{
+ return _v2d(x);
+}
+
+void
+runtime·int64tofloat64(Vlong y, double d)
+{
+ d = _v2d(y);
+}
+
+void
+runtime·uint64tofloat64(Vlong y, double d)
+{
+ d = _ul2d(y.hi)*4294967296. + _ul2d(y.lo);
+}
+
+static void
+dodiv(Vlong num, Vlong den, Vlong *q, Vlong *r)
+{
+ ulong numlo, numhi, denhi, denlo, quohi, quolo, t;
+ int i;
+
+ numhi = num.hi;
+ numlo = num.lo;
+ denhi = den.hi;
+ denlo = den.lo;
+
+ /*
+ * get a divide by zero
+ */
+ if(denlo==0 && denhi==0) {
+ runtime·panicdivide();
+ }
+
+ /*
+ * set up the divisor and find the number of iterations needed
+ */
+ if(numhi >= SIGN(32)) {
+ quohi = SIGN(32);
+ quolo = 0;
+ } else {
+ quohi = numhi;
+ quolo = numlo;
+ }
+ i = 0;
+ while(denhi < quohi || (denhi == quohi && denlo < quolo)) {
+ denhi = (denhi<<1) | (denlo>>31);
+ denlo <<= 1;
+ i++;
+ }
+
+ quohi = 0;
+ quolo = 0;
+ for(; i >= 0; i--) {
+ quohi = (quohi<<1) | (quolo>>31);
+ quolo <<= 1;
+ if(numhi > denhi || (numhi == denhi && numlo >= denlo)) {
+ t = numlo;
+ numlo -= denlo;
+ if(numlo > t)
+ numhi--;
+ numhi -= denhi;
+ quolo |= 1;
+ }
+ denlo = (denlo>>1) | (denhi<<31);
+ denhi >>= 1;
+ }
+
+ if(q) {
+ q->lo = quolo;
+ q->hi = quohi;
+ }
+ if(r) {
+ r->lo = numlo;
+ r->hi = numhi;
+ }
+}
+
+void
+_divvu(Vlong *q, Vlong n, Vlong d)
+{
+
+ if(n.hi == 0 && d.hi == 0) {
+ q->hi = 0;
+ q->lo = n.lo / d.lo;
+ return;
+ }
+ dodiv(n, d, q, 0);
+}
+
+void
+runtime·uint64div(Vlong n, Vlong d, Vlong q)
+{
+ _divvu(&q, n, d);
+}
+
+void
+_modvu(Vlong *r, Vlong n, Vlong d)
+{
+
+ if(n.hi == 0 && d.hi == 0) {
+ r->hi = 0;
+ r->lo = n.lo % d.lo;
+ return;
+ }
+ dodiv(n, d, 0, r);
+}
+
+void
+runtime·uint64mod(Vlong n, Vlong d, Vlong q)
+{
+ _modvu(&q, n, d);
+}
+
+static void
+vneg(Vlong *v)
+{
+
+ if(v->lo == 0) {
+ v->hi = -v->hi;
+ return;
+ }
+ v->lo = -v->lo;
+ v->hi = ~v->hi;
+}
+
+void
+_divv(Vlong *q, Vlong n, Vlong d)
+{
+ long nneg, dneg;
+
+ if(n.hi == (((long)n.lo)>>31) && d.hi == (((long)d.lo)>>31)) {
+ if((long)n.lo == -0x80000000 && (long)d.lo == -1) {
+ // special case: 32-bit -0x80000000 / -1 causes wrong sign
+ q->lo = 0x80000000;
+ q->hi = 0;
+ return;
+ }
+ q->lo = (long)n.lo / (long)d.lo;
+ q->hi = ((long)q->lo) >> 31;
+ return;
+ }
+ nneg = n.hi >> 31;
+ if(nneg)
+ vneg(&n);
+ dneg = d.hi >> 31;
+ if(dneg)
+ vneg(&d);
+ dodiv(n, d, q, 0);
+ if(nneg != dneg)
+ vneg(q);
+}
+
+void
+runtime·int64div(Vlong n, Vlong d, Vlong q)
+{
+ _divv(&q, n, d);
+}
+
+void
+_modv(Vlong *r, Vlong n, Vlong d)
+{
+ long nneg, dneg;
+
+ if(n.hi == (((long)n.lo)>>31) && d.hi == (((long)d.lo)>>31)) {
+ r->lo = (long)n.lo % (long)d.lo;
+ r->hi = ((long)r->lo) >> 31;
+ return;
+ }
+ nneg = n.hi >> 31;
+ if(nneg)
+ vneg(&n);
+ dneg = d.hi >> 31;
+ if(dneg)
+ vneg(&d);
+ dodiv(n, d, 0, r);
+ if(nneg)
+ vneg(r);
+}
+
+void
+runtime·int64mod(Vlong n, Vlong d, Vlong q)
+{
+ _modv(&q, n, d);
+}
+
+void
+_rshav(Vlong *r, Vlong a, int b)
+{
+ long t;
+
+ t = a.hi;
+ if(b >= 32) {
+ r->hi = t>>31;
+ if(b >= 64) {
+ /* this is illegal re C standard */
+ r->lo = t>>31;
+ return;
+ }
+ r->lo = t >> (b-32);
+ return;
+ }
+ if(b <= 0) {
+ r->hi = t;
+ r->lo = a.lo;
+ return;
+ }
+ r->hi = t >> b;
+ r->lo = (t << (32-b)) | (a.lo >> b);
+}
+
+void
+_rshlv(Vlong *r, Vlong a, int b)
+{
+ ulong t;
+
+ t = a.hi;
+ if(b >= 32) {
+ r->hi = 0;
+ if(b >= 64) {
+ /* this is illegal re C standard */
+ r->lo = 0;
+ return;
+ }
+ r->lo = t >> (b-32);
+ return;
+ }
+ if(b <= 0) {
+ r->hi = t;
+ r->lo = a.lo;
+ return;
+ }
+ r->hi = t >> b;
+ r->lo = (t << (32-b)) | (a.lo >> b);
+}
+
+void
+_lshv(Vlong *r, Vlong a, int b)
+{
+ ulong t;
+
+ t = a.lo;
+ if(b >= 32) {
+ r->lo = 0;
+ if(b >= 64) {
+ /* this is illegal re C standard */
+ r->hi = 0;
+ return;
+ }
+ r->hi = t << (b-32);
+ return;
+ }
+ if(b <= 0) {
+ r->lo = t;
+ r->hi = a.hi;
+ return;
+ }
+ r->lo = t << b;
+ r->hi = (t >> (32-b)) | (a.hi << b);
+}
+
+void
+_andv(Vlong *r, Vlong a, Vlong b)
+{
+ r->hi = a.hi & b.hi;
+ r->lo = a.lo & b.lo;
+}
+
+void
+_orv(Vlong *r, Vlong a, Vlong b)
+{
+ r->hi = a.hi | b.hi;
+ r->lo = a.lo | b.lo;
+}
+
+void
+_xorv(Vlong *r, Vlong a, Vlong b)
+{
+ r->hi = a.hi ^ b.hi;
+ r->lo = a.lo ^ b.lo;
+}
+
+void
+_vpp(Vlong *l, Vlong *r)
+{
+
+ l->hi = r->hi;
+ l->lo = r->lo;
+ r->lo++;
+ if(r->lo == 0)
+ r->hi++;
+}
+
+void
+_vmm(Vlong *l, Vlong *r)
+{
+
+ l->hi = r->hi;
+ l->lo = r->lo;
+ if(r->lo == 0)
+ r->hi--;
+ r->lo--;
+}
+
+void
+_ppv(Vlong *l, Vlong *r)
+{
+
+ r->lo++;
+ if(r->lo == 0)
+ r->hi++;
+ l->hi = r->hi;
+ l->lo = r->lo;
+}
+
+void
+_mmv(Vlong *l, Vlong *r)
+{
+
+ if(r->lo == 0)
+ r->hi--;
+ r->lo--;
+ l->hi = r->hi;
+ l->lo = r->lo;
+}
+
+void
+_vasop(Vlong *ret, void *lv, void fn(Vlong*, Vlong, Vlong), int type, Vlong rv)
+{
+ Vlong t, u;
+
+ u = *ret;
+ switch(type) {
+ default:
+ runtime·abort();
+ break;
+
+ case 1: /* schar */
+ t.lo = *(schar*)lv;
+ t.hi = t.lo >> 31;
+ fn(&u, t, rv);
+ *(schar*)lv = u.lo;
+ break;
+
+ case 2: /* uchar */
+ t.lo = *(uchar*)lv;
+ t.hi = 0;
+ fn(&u, t, rv);
+ *(uchar*)lv = u.lo;
+ break;
+
+ case 3: /* short */
+ t.lo = *(short*)lv;
+ t.hi = t.lo >> 31;
+ fn(&u, t, rv);
+ *(short*)lv = u.lo;
+ break;
+
+ case 4: /* ushort */
+ t.lo = *(ushort*)lv;
+ t.hi = 0;
+ fn(&u, t, rv);
+ *(ushort*)lv = u.lo;
+ break;
+
+ case 9: /* int */
+ t.lo = *(int*)lv;
+ t.hi = t.lo >> 31;
+ fn(&u, t, rv);
+ *(int*)lv = u.lo;
+ break;
+
+ case 10: /* uint */
+ t.lo = *(uint*)lv;
+ t.hi = 0;
+ fn(&u, t, rv);
+ *(uint*)lv = u.lo;
+ break;
+
+ case 5: /* long */
+ t.lo = *(long*)lv;
+ t.hi = t.lo >> 31;
+ fn(&u, t, rv);
+ *(long*)lv = u.lo;
+ break;
+
+ case 6: /* ulong */
+ t.lo = *(ulong*)lv;
+ t.hi = 0;
+ fn(&u, t, rv);
+ *(ulong*)lv = u.lo;
+ break;
+
+ case 7: /* vlong */
+ case 8: /* uvlong */
+ fn(&u, *(Vlong*)lv, rv);
+ *(Vlong*)lv = u;
+ break;
+ }
+ *ret = u;
+}
+
+void
+_p2v(Vlong *ret, void *p)
+{
+ long t;
+
+ t = (ulong)p;
+ ret->lo = t;
+ ret->hi = 0;
+}
+
+void
+_sl2v(Vlong *ret, long sl)
+{
+ long t;
+
+ t = sl;
+ ret->lo = t;
+ ret->hi = t >> 31;
+}
+
+void
+_ul2v(Vlong *ret, ulong ul)
+{
+ long t;
+
+ t = ul;
+ ret->lo = t;
+ ret->hi = 0;
+}
+
+void
+_si2v(Vlong *ret, int si)
+{
+ long t;
+
+ t = si;
+ ret->lo = t;
+ ret->hi = t >> 31;
+}
+
+void
+_ui2v(Vlong *ret, uint ui)
+{
+ long t;
+
+ t = ui;
+ ret->lo = t;
+ ret->hi = 0;
+}
+
+void
+_sh2v(Vlong *ret, long sh)
+{
+ long t;
+
+ t = (sh << 16) >> 16;
+ ret->lo = t;
+ ret->hi = t >> 31;
+}
+
+void
+_uh2v(Vlong *ret, ulong ul)
+{
+ long t;
+
+ t = ul & 0xffff;
+ ret->lo = t;
+ ret->hi = 0;
+}
+
+void
+_sc2v(Vlong *ret, long uc)
+{
+ long t;
+
+ t = (uc << 24) >> 24;
+ ret->lo = t;
+ ret->hi = t >> 31;
+}
+
+void
+_uc2v(Vlong *ret, ulong ul)
+{
+ long t;
+
+ t = ul & 0xff;
+ ret->lo = t;
+ ret->hi = 0;
+}
+
+long
+_v2sc(Vlong rv)
+{
+ long t;
+
+ t = rv.lo & 0xff;
+ return (t << 24) >> 24;
+}
+
+long
+_v2uc(Vlong rv)
+{
+
+ return rv.lo & 0xff;
+}
+
+long
+_v2sh(Vlong rv)
+{
+ long t;
+
+ t = rv.lo & 0xffff;
+ return (t << 16) >> 16;
+}
+
+long
+_v2uh(Vlong rv)
+{
+
+ return rv.lo & 0xffff;
+}
+
+long
+_v2sl(Vlong rv)
+{
+
+ return rv.lo;
+}
+
+long
+_v2ul(Vlong rv)
+{
+
+ return rv.lo;
+}
+
+long
+_v2si(Vlong rv)
+{
+
+ return rv.lo;
+}
+
+long
+_v2ui(Vlong rv)
+{
+
+ return rv.lo;
+}
+
+int
+_testv(Vlong rv)
+{
+ return rv.lo || rv.hi;
+}
+
+int
+_eqv(Vlong lv, Vlong rv)
+{
+ return lv.lo == rv.lo && lv.hi == rv.hi;
+}
+
+int
+_nev(Vlong lv, Vlong rv)
+{
+ return lv.lo != rv.lo || lv.hi != rv.hi;
+}
+
+int
+_ltv(Vlong lv, Vlong rv)
+{
+ return (long)lv.hi < (long)rv.hi ||
+ (lv.hi == rv.hi && lv.lo < rv.lo);
+}
+
+int
+_lev(Vlong lv, Vlong rv)
+{
+ return (long)lv.hi < (long)rv.hi ||
+ (lv.hi == rv.hi && lv.lo <= rv.lo);
+}
+
+int
+_gtv(Vlong lv, Vlong rv)
+{
+ return (long)lv.hi > (long)rv.hi ||
+ (lv.hi == rv.hi && lv.lo > rv.lo);
+}
+
+int
+_gev(Vlong lv, Vlong rv)
+{
+ return (long)lv.hi > (long)rv.hi ||
+ (lv.hi == rv.hi && lv.lo >= rv.lo);
+}
+
+int
+_lov(Vlong lv, Vlong rv)
+{
+ return lv.hi < rv.hi ||
+ (lv.hi == rv.hi && lv.lo < rv.lo);
+}
+
+int
+_lsv(Vlong lv, Vlong rv)
+{
+ return lv.hi < rv.hi ||
+ (lv.hi == rv.hi && lv.lo <= rv.lo);
+}
+
+int
+_hiv(Vlong lv, Vlong rv)
+{
+ return lv.hi > rv.hi ||
+ (lv.hi == rv.hi && lv.lo > rv.lo);
+}
+
+int
+_hsv(Vlong lv, Vlong rv)
+{
+ return lv.hi > rv.hi ||
+ (lv.hi == rv.hi && lv.lo >= rv.lo);
+}
diff --git a/src/pkg/runtime/cgo/386.S b/src/pkg/runtime/cgo/386.S
new file mode 100755
index 000000000..9abab7ebd
--- /dev/null
+++ b/src/pkg/runtime/cgo/386.S
@@ -0,0 +1,67 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+ * Apple still insists on underscore prefixes for C function names.
+ */
+#if defined(__APPLE__) || defined(_WIN32)
+#define EXT(s) _##s
+#else
+#define EXT(s) s
+#endif
+
+/*
+ * void crosscall_386(void (*fn)(void))
+ *
+ * Calling into the 8c tool chain, where all registers are caller save.
+ * Called from standard x86 ABI, where %ebp, %ebx, %esi,
+ * and %edi are callee-save, so they must be saved explicitly.
+ */
+.globl EXT(crosscall_386)
+EXT(crosscall_386):
+ pushl %ebp
+ movl %esp, %ebp
+ pushl %ebx
+ pushl %esi
+ pushl %edi
+
+ movl 8(%ebp), %eax /* fn */
+ call *%eax
+
+ popl %edi
+ popl %esi
+ popl %ebx
+ popl %ebp
+ ret
+
+/*
+ * void crosscall2(void (*fn)(void*, int32), void*, int32)
+ *
+ * Save registers and call fn with two arguments.
+ */
+.globl EXT(crosscall2)
+EXT(crosscall2):
+ pushl %ebp
+ movl %esp, %ebp
+ pushl %ebx
+ pushl %esi
+ pushl %edi
+
+ pushl 16(%ebp)
+ pushl 12(%ebp)
+ mov 8(%ebp), %eax
+ call *%eax
+ addl $8,%esp
+
+ popl %edi
+ popl %esi
+ popl %ebx
+ popl %ebp
+ ret
+
+.globl EXT(__stack_chk_fail_local)
+EXT(__stack_chk_fail_local):
+1:
+ jmp 1b
+
diff --git a/src/pkg/runtime/cgo/Makefile b/src/pkg/runtime/cgo/Makefile
new file mode 100644
index 000000000..766794797
--- /dev/null
+++ b/src/pkg/runtime/cgo/Makefile
@@ -0,0 +1,60 @@
+# Copyright 2010 The Go Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+include ../../../Make.inc
+
+TARG=runtime/cgo
+
+GOFILES=\
+ cgo.go\
+
+ifeq ($(CGO_ENABLED),1)
+
+# Unwarranted chumminess with Make.pkg's cgo rules.
+# Do not try this at home.
+CGO_OFILES=\
+ $(GOARCH).o\
+ $(GOOS)_$(GOARCH).o\
+ util.o\
+
+ifeq ($(GOOS),windows)
+CGO_LDFLAGS=-lm -mthreads
+else
+CGO_LDFLAGS=-lpthread
+CGO_OFILES+=setenv.o\
+
+endif
+
+OFILES=\
+ iscgo.$O\
+ callbacks.$O\
+ _cgo_import.$O\
+ $(CGO_OFILES)\
+
+ifeq ($(GOOS),freebsd)
+OFILES+=\
+ freebsd.$O\
+
+endif
+
+endif
+
+include ../../../Make.pkg
+
+ifeq ($(CGO_ENABLED),1)
+_cgo_defun.c:
+ echo >$@
+
+_cgo_main.c:
+ echo 'int main() { return 0; }' >$@
+endif
+
+$(GOARCH).o: $(GOARCH).S
+ $(HOST_CC) $(_CGO_CFLAGS_$(GOARCH)) -g -O2 -fPIC -o $@ -c $^
+
+$(GOOS)_$(GOARCH).o: $(GOOS)_$(GOARCH).c
+ $(HOST_CC) $(_CGO_CFLAGS_$(GOARCH)) -g -O2 -fPIC -o $@ -c $^
+
+%.o: %.c
+ $(HOST_CC) $(_CGO_CFLAGS_$(GOARCH)) -g -O2 -fPIC -o $@ -c $^
diff --git a/src/pkg/runtime/cgo/amd64.S b/src/pkg/runtime/cgo/amd64.S
new file mode 100644
index 000000000..083c2bc94
--- /dev/null
+++ b/src/pkg/runtime/cgo/amd64.S
@@ -0,0 +1,73 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+ * Apple still insists on underscore prefixes for C function names.
+ */
+#if defined(__APPLE__) || defined(_WIN32)
+#define EXT(s) _##s
+#else
+#define EXT(s) s
+#endif
+
+/*
+ * void crosscall_amd64(void (*fn)(void))
+ *
+ * Calling into the 6c tool chain, where all registers are caller save.
+ * Called from standard x86-64 ABI, where %rbx, %rbp, %r12-%r15
+ * are callee-save so they must be saved explicitly.
+ * The standard x86-64 ABI passes the three arguments m, g, fn
+ * in %rdi, %rsi, %rdx.
+ *
+ * Also need to set %r15 to g and %r14 to m (see ../pkg/runtime/mkasmh.sh)
+ * during the call.
+ */
+.globl EXT(crosscall_amd64)
+EXT(crosscall_amd64):
+ pushq %rbx
+ pushq %rbp
+ pushq %r12
+ pushq %r13
+ pushq %r14
+ pushq %r15
+
+ call *%rdi /* fn */
+
+ popq %r15
+ popq %r14
+ popq %r13
+ popq %r12
+ popq %rbp
+ popq %rbx
+ ret
+
+/*
+ * void crosscall2(void (*fn)(void*, int32), void *arg, int32 argsize)
+ *
+ * Save registers and call fn with two arguments. fn is a Go function
+ * which takes parameters on the stack rather than in registers.
+ */
+.globl EXT(crosscall2)
+EXT(crosscall2):
+ subq $0x58, %rsp /* keeps stack pointer 32-byte aligned */
+ movq %rbx, 0x10(%rsp)
+ movq %rbp, 0x18(%rsp)
+ movq %r12, 0x20(%rsp)
+ movq %r13, 0x28(%rsp)
+ movq %r14, 0x30(%rsp)
+ movq %r15, 0x38(%rsp)
+
+ movq %rsi, 0(%rsp) /* arg */
+ movq %rdx, 8(%rsp) /* argsize (includes padding) */
+
+ call *%rdi /* fn */
+
+ movq 0x10(%rsp), %rbx
+ movq 0x18(%rsp), %rbp
+ movq 0x20(%rsp), %r12
+ movq 0x28(%rsp), %r13
+ movq 0x30(%rsp), %r14
+ movq 0x38(%rsp), %r15
+ addq $0x58, %rsp
+ ret
diff --git a/src/pkg/runtime/cgo/arm.S b/src/pkg/runtime/cgo/arm.S
new file mode 100644
index 000000000..32d862984
--- /dev/null
+++ b/src/pkg/runtime/cgo/arm.S
@@ -0,0 +1 @@
+/* unimplemented */
diff --git a/src/pkg/runtime/cgo/callbacks.c b/src/pkg/runtime/cgo/callbacks.c
new file mode 100644
index 000000000..f36fb3fd7
--- /dev/null
+++ b/src/pkg/runtime/cgo/callbacks.c
@@ -0,0 +1,73 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "../runtime.h"
+#include "../cgocall.h"
+
+// These utility functions are available to be called from code
+// compiled with gcc via crosscall2.
+
+// The declaration of crosscall2 is:
+// void crosscall2(void (*fn)(void *, int), void *, int);
+//
+// We need to export the symbol crosscall2 in order to support
+// callbacks from shared libraries.
+#pragma dynexport crosscall2 crosscall2
+
+// Allocate memory. This allocates the requested number of bytes in
+// memory controlled by the Go runtime. The allocated memory will be
+// zeroed. You are responsible for ensuring that the Go garbage
+// collector can see a pointer to the allocated memory for as long as
+// it is valid, e.g., by storing a pointer in a local variable in your
+// C function, or in memory allocated by the Go runtime. If the only
+// pointers are in a C global variable or in memory allocated via
+// malloc, then the Go garbage collector may collect the memory.
+
+// Call like this in code compiled with gcc:
+// struct { size_t len; void *ret; } a;
+// a.len = /* number of bytes to allocate */;
+// crosscall2(_cgo_allocate, &a, sizeof a);
+// /* Here a.ret is a pointer to the allocated memory. */
+
+static void
+_cgo_allocate_internal(uintptr len, byte *ret)
+{
+ ret = runtime·mal(len);
+ FLUSH(&ret);
+}
+
+#pragma dynexport _cgo_allocate _cgo_allocate
+void
+_cgo_allocate(void *a, int32 n)
+{
+ runtime·cgocallback((void(*)(void))_cgo_allocate_internal, a, n);
+}
+
+// Panic. The argument is converted into a Go string.
+
+// Call like this in code compiled with gcc:
+// struct { const char *p; } a;
+// a.p = /* string to pass to panic */;
+// crosscall2(_cgo_panic, &a, sizeof a);
+// /* The function call will not return. */
+
+extern void ·cgoStringToEface(String, Eface*);
+
+static void
+_cgo_panic_internal(byte *p)
+{
+ String s;
+ Eface err;
+
+ s = runtime·gostring(p);
+ ·cgoStringToEface(s, &err);
+ runtime·panic(err);
+}
+
+#pragma dynexport _cgo_panic _cgo_panic
+void
+_cgo_panic(void *a, int32 n)
+{
+ runtime·cgocallback((void(*)(void))_cgo_panic_internal, a, n);
+}
diff --git a/src/pkg/runtime/cgo/cgo.go b/src/pkg/runtime/cgo/cgo.go
new file mode 100644
index 000000000..5dcced1e4
--- /dev/null
+++ b/src/pkg/runtime/cgo/cgo.go
@@ -0,0 +1,17 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package cgo contains runtime support for code generated
+by the cgo tool. See the documentation for the cgo command
+for details on using cgo.
+*/
+package cgo
+
+// Supports _cgo_panic by converting a string constant to an empty
+// interface.
+
+func cgoStringToEface(s string, ret *interface{}) {
+ *ret = s
+}
diff --git a/src/pkg/runtime/cgo/darwin_386.c b/src/pkg/runtime/cgo/darwin_386.c
new file mode 100644
index 000000000..6d4e259be
--- /dev/null
+++ b/src/pkg/runtime/cgo/darwin_386.c
@@ -0,0 +1,149 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include <pthread.h>
+#include "libcgo.h"
+
+static void* threadentry(void*);
+static pthread_key_t k1, k2;
+
+#define magic1 (0x23581321U)
+
+static void
+inittls(void)
+{
+ uint32 x, y;
+ pthread_key_t tofree[128], k;
+ int i, ntofree;
+ int havek1, havek2;
+
+ /*
+ * Allocate thread-local storage slots for m, g.
+ * The key numbers start at 0x100, and we expect to be
+ * one of the early calls to pthread_key_create, so we
+ * should be able to get pretty low numbers.
+ *
+ * In Darwin/386 pthreads, %gs points at the thread
+ * structure, and each key is an index into the thread-local
+ * storage array that begins at offset 0x48 within in that structure.
+ * It may happen that we are not quite the first function to try
+ * to allocate thread-local storage keys, so instead of depending
+ * on getting 0x100 and 0x101, we try for 0x108 and 0x109,
+ * allocating keys until we get the ones we want and then freeing
+ * the ones we didn't want.
+ *
+ * Thus the final offsets to use in %gs references are
+ * 0x48+4*0x108 = 0x468 and 0x48+4*0x109 = 0x46c.
+ *
+ * The linker and runtime hard-code these constant offsets
+ * from %gs where we expect to find m and g.
+ * Known to ../cmd/8l/obj.c:/468
+ * and to ../pkg/runtime/darwin/386/sys.s:/468
+ *
+ * This is truly disgusting and a bit fragile, but taking care
+ * of it here protects the rest of the system from damage.
+ * The alternative would be to use a global variable that
+ * held the offset and refer to that variable each time we
+ * need a %gs variable (m or g). That approach would
+ * require an extra instruction and memory reference in
+ * every stack growth prolog and would also require
+ * rewriting the code that 8c generates for extern registers.
+ *
+ * Things get more disgusting on OS X 10.7 Lion.
+ * The 0x48 base mentioned above is the offset of the tsd
+ * array within the per-thread structure on Leopard and Snow Leopard.
+ * On Lion, the base moved a little, so while the math above
+ * still applies, the base is different. Thus, we cannot
+ * look for specific key values if we want to build binaries
+ * that run on both systems. Instead, forget about the
+ * specific key values and just allocate and initialize per-thread
+ * storage until we find a key that writes to the memory location
+ * we want. Then keep that key.
+ */
+ havek1 = 0;
+ havek2 = 0;
+ ntofree = 0;
+ while(!havek1 || !havek2) {
+ if(pthread_key_create(&k, nil) < 0) {
+ fprintf(stderr, "runtime/cgo: pthread_key_create failed\n");
+ abort();
+ }
+ pthread_setspecific(k, (void*)magic1);
+ asm volatile("movl %%gs:0x468, %0" : "=r"(x));
+ asm volatile("movl %%gs:0x46c, %0" : "=r"(y));
+ if(x == magic1) {
+ havek1 = 1;
+ k1 = k;
+ } else if(y == magic1) {
+ havek2 = 1;
+ k2 = k;
+ } else {
+ if(ntofree >= nelem(tofree)) {
+ fprintf(stderr, "runtime/cgo: could not obtain pthread_keys\n");
+ fprintf(stderr, "\ttried");
+ for(i=0; i<ntofree; i++)
+ fprintf(stderr, " %#x", (unsigned)tofree[i]);
+ fprintf(stderr, "\n");
+ abort();
+ }
+ tofree[ntofree++] = k;
+ }
+ pthread_setspecific(k, 0);
+ }
+
+ /*
+ * We got the keys we wanted. Free the others.
+ */
+ for(i=0; i<ntofree; i++)
+ pthread_key_delete(tofree[i]);
+}
+
+static void
+xinitcgo(void)
+{
+ inittls();
+}
+
+void (*initcgo)(void) = xinitcgo;
+
+void
+libcgo_sys_thread_start(ThreadStart *ts)
+{
+ pthread_attr_t attr;
+ pthread_t p;
+ size_t size;
+ int err;
+
+ pthread_attr_init(&attr);
+ pthread_attr_getstacksize(&attr, &size);
+ ts->g->stackguard = size;
+ err = pthread_create(&p, &attr, threadentry, ts);
+ if (err != 0) {
+ fprintf(stderr, "runtime/cgo: pthread_create failed: %s\n", strerror(err));
+ abort();
+ }
+}
+
+static void*
+threadentry(void *v)
+{
+ ThreadStart ts;
+
+ ts = *(ThreadStart*)v;
+ free(v);
+
+ ts.g->stackbase = (uintptr)&ts;
+
+ /*
+ * libcgo_sys_thread_start set stackguard to stack size;
+ * change to actual guard pointer.
+ */
+ ts.g->stackguard = (uintptr)&ts - ts.g->stackguard + 4096;
+
+ pthread_setspecific(k1, (void*)ts.g);
+ pthread_setspecific(k2, (void*)ts.m);
+
+ crosscall_386(ts.fn);
+ return nil;
+}
diff --git a/src/pkg/runtime/cgo/darwin_amd64.c b/src/pkg/runtime/cgo/darwin_amd64.c
new file mode 100644
index 000000000..3471044c0
--- /dev/null
+++ b/src/pkg/runtime/cgo/darwin_amd64.c
@@ -0,0 +1,119 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include <pthread.h>
+#include "libcgo.h"
+
+static void* threadentry(void*);
+static pthread_key_t k1, k2;
+
+#define magic1 (0x23581321345589ULL)
+
+static void
+inittls(void)
+{
+ uint64 x, y;
+ pthread_key_t tofree[128], k;
+ int i, ntofree;
+ int havek1, havek2;
+
+ /*
+ * Same logic, code as darwin_386.c:/inittls, except that words
+ * are 8 bytes long now, and the thread-local storage starts
+ * at 0x60 on Leopard / Snow Leopard. So the offsets are
+ * 0x60+8*0x108 = 0x8a0 and 0x60+8*0x109 = 0x8a8.
+ *
+ * The linker and runtime hard-code these constant offsets
+ * from %gs where we expect to find m and g.
+ * Known to ../cmd/6l/obj.c:/8a0
+ * and to ../pkg/runtime/darwin/amd64/sys.s:/8a0
+ *
+ * As disgusting as on the 386; same justification.
+ */
+ havek1 = 0;
+ havek2 = 0;
+ ntofree = 0;
+ while(!havek1 || !havek2) {
+ if(pthread_key_create(&k, nil) < 0) {
+ fprintf(stderr, "runtime/cgo: pthread_key_create failed\n");
+ abort();
+ }
+ pthread_setspecific(k, (void*)magic1);
+ asm volatile("movq %%gs:0x8a0, %0" : "=r"(x));
+ asm volatile("movq %%gs:0x8a8, %0" : "=r"(y));
+ if(x == magic1) {
+ havek1 = 1;
+ k1 = k;
+ } else if(y == magic1) {
+ havek2 = 1;
+ k2 = k;
+ } else {
+ if(ntofree >= nelem(tofree)) {
+ fprintf(stderr, "runtime/cgo: could not obtain pthread_keys\n");
+ fprintf(stderr, "\ttried");
+ for(i=0; i<ntofree; i++)
+ fprintf(stderr, " %#x", (unsigned)tofree[i]);
+ fprintf(stderr, "\n");
+ abort();
+ }
+ tofree[ntofree++] = k;
+ }
+ pthread_setspecific(k, 0);
+ }
+
+ /*
+ * We got the keys we wanted. Free the others.
+ */
+ for(i=0; i<ntofree; i++)
+ pthread_key_delete(tofree[i]);
+}
+
+void
+xinitcgo(void)
+{
+ inittls();
+}
+
+void (*initcgo) = xinitcgo;
+
+void
+libcgo_sys_thread_start(ThreadStart *ts)
+{
+ pthread_attr_t attr;
+ pthread_t p;
+ size_t size;
+ int err;
+
+ pthread_attr_init(&attr);
+ pthread_attr_getstacksize(&attr, &size);
+ ts->g->stackguard = size;
+ err = pthread_create(&p, &attr, threadentry, ts);
+ if (err != 0) {
+ fprintf(stderr, "runtime/cgo: pthread_create failed: %s\n", strerror(err));
+ abort();
+ }
+}
+
+static void*
+threadentry(void *v)
+{
+ ThreadStart ts;
+
+ ts = *(ThreadStart*)v;
+ free(v);
+
+ ts.g->stackbase = (uintptr)&ts;
+
+ /*
+ * libcgo_sys_thread_start set stackguard to stack size;
+ * change to actual guard pointer.
+ */
+ ts.g->stackguard = (uintptr)&ts - ts.g->stackguard + 4096;
+
+ pthread_setspecific(k1, (void*)ts.g);
+ pthread_setspecific(k2, (void*)ts.m);
+
+ crosscall_amd64(ts.fn);
+ return nil;
+}
diff --git a/src/pkg/runtime/cgo/freebsd.c b/src/pkg/runtime/cgo/freebsd.c
new file mode 100644
index 000000000..dfcfa3a21
--- /dev/null
+++ b/src/pkg/runtime/cgo/freebsd.c
@@ -0,0 +1,13 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Supply environ and __progname, because we don't
+// link against the standard FreeBSD crt0.o and the
+// libc dynamic library needs them.
+
+char *environ[1];
+char *__progname;
+
+#pragma dynexport environ environ
+#pragma dynexport __progname __progname
diff --git a/src/pkg/runtime/cgo/freebsd_386.c b/src/pkg/runtime/cgo/freebsd_386.c
new file mode 100644
index 000000000..ae53201b4
--- /dev/null
+++ b/src/pkg/runtime/cgo/freebsd_386.c
@@ -0,0 +1,64 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include <pthread.h>
+#include "libcgo.h"
+
+static void* threadentry(void*);
+
+static void
+xinitcgo(void)
+{
+}
+
+void (*initcgo)(void) = xinitcgo;
+
+void
+libcgo_sys_thread_start(ThreadStart *ts)
+{
+ pthread_attr_t attr;
+ pthread_t p;
+ size_t size;
+ int err;
+
+ pthread_attr_init(&attr);
+ pthread_attr_getstacksize(&attr, &size);
+ ts->g->stackguard = size;
+ err = pthread_create(&p, &attr, threadentry, ts);
+ if (err != 0) {
+ fprintf(stderr, "runtime/cgo: pthread_create failed: %s\n", strerror(err));
+ abort();
+ }
+}
+
+static void*
+threadentry(void *v)
+{
+ ThreadStart ts;
+
+ ts = *(ThreadStart*)v;
+ free(v);
+
+ ts.g->stackbase = (uintptr)&ts;
+
+ /*
+ * libcgo_sys_thread_start set stackguard to stack size;
+ * change to actual guard pointer.
+ */
+ ts.g->stackguard = (uintptr)&ts - ts.g->stackguard + 4096;
+
+ /*
+ * Set specific keys. On FreeBSD/ELF, the thread local storage
+ * is just before %gs:0. Our dynamic 8.out's reserve 8 bytes
+ * for the two words g and m at %gs:-8 and %gs:-4.
+ */
+ asm volatile (
+ "movl %0, %%gs:-8\n" // MOVL g, -8(GS)
+ "movl %1, %%gs:-4\n" // MOVL m, -4(GS)
+ :: "r"(ts.g), "r"(ts.m)
+ );
+
+ crosscall_386(ts.fn);
+ return nil;
+}
diff --git a/src/pkg/runtime/cgo/freebsd_amd64.c b/src/pkg/runtime/cgo/freebsd_amd64.c
new file mode 100644
index 000000000..5afc1dfea
--- /dev/null
+++ b/src/pkg/runtime/cgo/freebsd_amd64.c
@@ -0,0 +1,63 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include <pthread.h>
+#include "libcgo.h"
+
+static void* threadentry(void*);
+
+static void
+xinitcgo(void)
+{
+}
+
+void (*initcgo)(void) = xinitcgo;
+
+void
+libcgo_sys_thread_start(ThreadStart *ts)
+{
+ pthread_attr_t attr;
+ pthread_t p;
+ size_t size;
+ int err;
+
+ pthread_attr_init(&attr);
+ pthread_attr_getstacksize(&attr, &size);
+ ts->g->stackguard = size;
+ err = pthread_create(&p, &attr, threadentry, ts);
+ if (err != 0) {
+ fprintf(stderr, "runtime/cgo: pthread_create failed: %s\n", strerror(err));
+ abort();
+ }
+}
+
+static void*
+threadentry(void *v)
+{
+ ThreadStart ts;
+
+ ts = *(ThreadStart*)v;
+ free(v);
+
+ ts.g->stackbase = (uintptr)&ts;
+
+ /*
+ * libcgo_sys_thread_start set stackguard to stack size;
+ * change to actual guard pointer.
+ */
+ ts.g->stackguard = (uintptr)&ts - ts.g->stackguard + 4096;
+
+ /*
+ * Set specific keys. On FreeBSD/ELF, the thread local storage
+ * is just before %fs:0. Our dynamic 6.out's reserve 16 bytes
+ * for the two words g and m at %fs:-16 and %fs:-8.
+ */
+ asm volatile (
+ "movq %0, %%fs:-16\n" // MOVL g, -16(FS)
+ "movq %1, %%fs:-8\n" // MOVL m, -8(FS)
+ :: "r"(ts.g), "r"(ts.m)
+ );
+ crosscall_amd64(ts.fn);
+ return nil;
+}
diff --git a/src/pkg/runtime/cgo/iscgo.c b/src/pkg/runtime/cgo/iscgo.c
new file mode 100644
index 000000000..eb6f5c09d
--- /dev/null
+++ b/src/pkg/runtime/cgo/iscgo.c
@@ -0,0 +1,14 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// The runtime package contains an uninitialized definition
+// for runtime·iscgo. Override it to tell the runtime we're here.
+// There are various function pointers that should be set too,
+// but those depend on dynamic linker magic to get initialized
+// correctly, and sometimes they break. This variable is a
+// backup: it depends only on old C style static linking rules.
+
+#include "../runtime.h"
+
+bool runtime·iscgo = 1;
diff --git a/src/pkg/runtime/cgo/libcgo.h b/src/pkg/runtime/cgo/libcgo.h
new file mode 100644
index 000000000..91032959c
--- /dev/null
+++ b/src/pkg/runtime/cgo/libcgo.h
@@ -0,0 +1,60 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+
+#define nil ((void*)0)
+#define nelem(x) (sizeof(x)/sizeof((x)[0]))
+
+typedef uint32_t uint32;
+typedef uint64_t uint64;
+typedef uintptr_t uintptr;
+
+/*
+ * The beginning of the per-goroutine structure,
+ * as defined in ../pkg/runtime/runtime.h.
+ * Just enough to edit these two fields.
+ */
+typedef struct G G;
+struct G
+{
+ uintptr stackguard;
+ uintptr stackbase;
+};
+
+/*
+ * Arguments to the libcgo_thread_start call.
+ * Also known to ../pkg/runtime/runtime.h.
+ */
+typedef struct ThreadStart ThreadStart;
+struct ThreadStart
+{
+ uintptr m;
+ G *g;
+ void (*fn)(void);
+};
+
+/*
+ * Called by 5c/6c/8c world.
+ * Makes a local copy of the ThreadStart and
+ * calls libcgo_sys_thread_start(ts).
+ */
+void (*libcgo_thread_start)(ThreadStart *ts);
+
+/*
+ * Creates the new operating system thread (OS, arch dependent).
+ */
+void libcgo_sys_thread_start(ThreadStart *ts);
+
+/*
+ * Call fn in the 6c world.
+ */
+void crosscall_amd64(void (*fn)(void));
+
+/*
+ * Call fn in the 8c world.
+ */
+void crosscall_386(void (*fn)(void));
diff --git a/src/pkg/runtime/cgo/linux_386.c b/src/pkg/runtime/cgo/linux_386.c
new file mode 100644
index 000000000..e9df5ffdc
--- /dev/null
+++ b/src/pkg/runtime/cgo/linux_386.c
@@ -0,0 +1,73 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include <pthread.h>
+#include <string.h>
+#include "libcgo.h"
+
+static void *threadentry(void*);
+
+static void
+xinitcgo(void)
+{
+}
+
+void (*initcgo) = xinitcgo;
+
+void
+libcgo_sys_thread_start(ThreadStart *ts)
+{
+ pthread_attr_t attr;
+ pthread_t p;
+ size_t size;
+ int err;
+
+ // Not sure why the memset is necessary here,
+ // but without it, we get a bogus stack size
+ // out of pthread_attr_getstacksize. C'est la Linux.
+ memset(&attr, 0, sizeof attr);
+ pthread_attr_init(&attr);
+ size = 0;
+ pthread_attr_getstacksize(&attr, &size);
+ ts->g->stackguard = size;
+ err = pthread_create(&p, &attr, threadentry, ts);
+ if (err != 0) {
+ fprintf(stderr, "runtime/cgo: pthread_create failed: %s\n", strerror(err));
+ abort();
+ }
+}
+
+static void*
+threadentry(void *v)
+{
+ ThreadStart ts;
+
+ ts = *(ThreadStart*)v;
+ free(v);
+
+ ts.g->stackbase = (uintptr)&ts;
+
+ /*
+ * libcgo_sys_thread_start set stackguard to stack size;
+ * change to actual guard pointer.
+ */
+ ts.g->stackguard = (uintptr)&ts - ts.g->stackguard + 4096;
+
+ /*
+ * Set specific keys. On Linux/ELF, the thread local storage
+ * is just before %gs:0. Our dynamic 8.out's reserve 8 bytes
+ * for the two words g and m at %gs:-8 and %gs:-4.
+ * Xen requires us to access those words indirect from %gs:0
+ * which points at itself.
+ */
+ asm volatile (
+ "movl %%gs:0, %%eax\n" // MOVL 0(GS), tmp
+ "movl %0, -8(%%eax)\n" // MOVL g, -8(GS)
+ "movl %1, -4(%%eax)\n" // MOVL m, -4(GS)
+ :: "r"(ts.g), "r"(ts.m) : "%eax"
+ );
+
+ crosscall_386(ts.fn);
+ return nil;
+}
diff --git a/src/pkg/runtime/cgo/linux_amd64.c b/src/pkg/runtime/cgo/linux_amd64.c
new file mode 100644
index 000000000..d9b8b3706
--- /dev/null
+++ b/src/pkg/runtime/cgo/linux_amd64.c
@@ -0,0 +1,63 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include <pthread.h>
+#include "libcgo.h"
+
+static void* threadentry(void*);
+
+void
+xinitcgo(void)
+{
+}
+
+void (*initcgo)(void) = xinitcgo;
+
+void
+libcgo_sys_thread_start(ThreadStart *ts)
+{
+ pthread_attr_t attr;
+ pthread_t p;
+ size_t size;
+ int err;
+
+ pthread_attr_init(&attr);
+ pthread_attr_getstacksize(&attr, &size);
+ ts->g->stackguard = size;
+ err = pthread_create(&p, &attr, threadentry, ts);
+ if (err != 0) {
+ fprintf(stderr, "runtime/cgo: pthread_create failed: %s\n", strerror(err));
+ abort();
+ }
+}
+
+static void*
+threadentry(void *v)
+{
+ ThreadStart ts;
+
+ ts = *(ThreadStart*)v;
+ free(v);
+
+ ts.g->stackbase = (uintptr)&ts;
+
+ /*
+ * libcgo_sys_thread_start set stackguard to stack size;
+ * change to actual guard pointer.
+ */
+ ts.g->stackguard = (uintptr)&ts - ts.g->stackguard + 4096;
+
+ /*
+ * Set specific keys. On Linux/ELF, the thread local storage
+ * is just before %fs:0. Our dynamic 6.out's reserve 16 bytes
+ * for the two words g and m at %fs:-16 and %fs:-8.
+ */
+ asm volatile (
+ "movq %0, %%fs:-16\n" // MOVL g, -16(FS)
+ "movq %1, %%fs:-8\n" // MOVL m, -8(FS)
+ :: "r"(ts.g), "r"(ts.m)
+ );
+ crosscall_amd64(ts.fn);
+ return nil;
+}
diff --git a/src/pkg/runtime/cgo/linux_arm.c b/src/pkg/runtime/cgo/linux_arm.c
new file mode 100644
index 000000000..e556c433c
--- /dev/null
+++ b/src/pkg/runtime/cgo/linux_arm.c
@@ -0,0 +1,19 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "libcgo.h"
+
+static void
+xinitcgo(void)
+{
+}
+
+void (*initcgo)(void) = xinitcgo;
+
+void
+libcgo_sys_thread_start(ThreadStart *ts)
+{
+ // unimplemented
+ *(int*)0 = 0;
+}
diff --git a/src/pkg/runtime/cgo/setenv.c b/src/pkg/runtime/cgo/setenv.c
new file mode 100644
index 000000000..c911b8392
--- /dev/null
+++ b/src/pkg/runtime/cgo/setenv.c
@@ -0,0 +1,16 @@
+// Copyright 20111 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "libcgo.h"
+
+#include <stdlib.h>
+
+/* Stub for calling setenv */
+static void
+xlibcgo_setenv(char **arg)
+{
+ setenv(arg[0], arg[1], 1);
+}
+
+void (*libcgo_setenv)(char**) = xlibcgo_setenv;
diff --git a/src/pkg/runtime/cgo/util.c b/src/pkg/runtime/cgo/util.c
new file mode 100644
index 000000000..9d96521f5
--- /dev/null
+++ b/src/pkg/runtime/cgo/util.c
@@ -0,0 +1,51 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "libcgo.h"
+
+/* Stub for calling malloc from Go */
+static void
+x_cgo_malloc(void *p)
+{
+ struct a {
+ long long n;
+ void *ret;
+ } *a = p;
+
+ a->ret = malloc(a->n);
+}
+
+void (*_cgo_malloc)(void*) = x_cgo_malloc;
+
+/* Stub for calling from Go */
+static void
+x_cgo_free(void *p)
+{
+ struct a {
+ void *arg;
+ } *a = p;
+
+ free(a->arg);
+}
+
+void (*_cgo_free)(void*) = x_cgo_free;
+
+/* Stub for creating a new thread */
+static void
+xlibcgo_thread_start(ThreadStart *arg)
+{
+ ThreadStart *ts;
+
+ /* Make our own copy that can persist after we return. */
+ ts = malloc(sizeof *ts);
+ if(ts == nil) {
+ fprintf(stderr, "runtime/cgo: out of memory in thread_start\n");
+ abort();
+ }
+ *ts = *arg;
+
+ libcgo_sys_thread_start(ts); /* OS-dependent half */
+}
+
+void (*libcgo_thread_start)(ThreadStart*) = xlibcgo_thread_start;
diff --git a/src/pkg/runtime/cgo/windows_386.c b/src/pkg/runtime/cgo/windows_386.c
new file mode 100755
index 000000000..f39309cb1
--- /dev/null
+++ b/src/pkg/runtime/cgo/windows_386.c
@@ -0,0 +1,62 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#define WIN32_LEAN_AND_MEAN
+#include <windows.h>
+#include "libcgo.h"
+
+static void *threadentry(void*);
+
+/* From what I've read 1MB is default for 32-bit Linux.
+ Allocation granularity on Windows is typically 64 KB. */
+#define STACKSIZE (1*1024*1024)
+
+static void
+xinitcgo(void)
+{
+}
+
+void (*initcgo)(void) = xinitcgo;
+
+void
+libcgo_sys_thread_start(ThreadStart *ts)
+{
+ ts->g->stackguard = STACKSIZE;
+ _beginthread(threadentry, STACKSIZE, ts);
+}
+
+static void*
+threadentry(void *v)
+{
+ ThreadStart ts;
+ void *tls0;
+
+ ts = *(ThreadStart*)v;
+ free(v);
+
+ ts.g->stackbase = (uintptr)&ts;
+
+ /*
+ * libcgo_sys_thread_start set stackguard to stack size;
+ * change to actual guard pointer.
+ */
+ ts.g->stackguard = (uintptr)&ts - ts.g->stackguard + 4096;
+
+ /*
+ * Set specific keys in thread local storage.
+ */
+ tls0 = (void*)LocalAlloc(LPTR, 32);
+ asm volatile (
+ "movl %0, %%fs:0x2c\n" // MOVL tls0, 0x2c(FS)
+ "movl %%fs:0x2c, %%eax\n" // MOVL 0x2c(FS), tmp
+ "movl %1, 0(%%eax)\n" // MOVL g, 0(FS)
+ "movl %2, 4(%%eax)\n" // MOVL m, 4(FS)
+ :: "r"(tls0), "r"(ts.g), "r"(ts.m) : "%eax"
+ );
+
+ crosscall_386(ts.fn);
+
+ LocalFree(tls0);
+ return nil;
+}
diff --git a/src/pkg/runtime/cgo/windows_amd64.c b/src/pkg/runtime/cgo/windows_amd64.c
new file mode 100755
index 000000000..e8313e250
--- /dev/null
+++ b/src/pkg/runtime/cgo/windows_amd64.c
@@ -0,0 +1,60 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#define WIN64_LEAN_AND_MEAN
+#include <windows.h>
+#include "libcgo.h"
+
+static void *threadentry(void*);
+
+/* From what I've read 2MB is default for 64-bit Linux.
+ Allocation granularity on Windows is typically 64 KB. */
+#define STACKSIZE (2*1024*1024)
+
+static void
+xinitcgo(void)
+{
+}
+
+void (*initcgo)(void) = xinitcgo;
+
+void
+libcgo_sys_thread_start(ThreadStart *ts)
+{
+ ts->g->stackguard = STACKSIZE;
+ _beginthread(threadentry, STACKSIZE, ts);
+}
+
+static void*
+threadentry(void *v)
+{
+ ThreadStart ts;
+ void *tls0;
+
+ ts = *(ThreadStart*)v;
+ free(v);
+
+ ts.g->stackbase = (uintptr)&ts;
+
+ /*
+ * libcgo_sys_thread_start set stackguard to stack size;
+ * change to actual guard pointer.
+ */
+ ts.g->stackguard = (uintptr)&ts - ts.g->stackguard + 4096;
+
+ /*
+ * Set specific keys in thread local storage.
+ */
+ tls0 = (void*)LocalAlloc(LPTR, 64);
+ asm volatile (
+ "movq %0, %%gs:0x58\n" // MOVL tls0, 0x58(GS)
+ "movq %%gs:0x58, %%rax\n" // MOVQ 0x58(GS), tmp
+ "movq %1, 0(%%rax)\n" // MOVQ g, 0(GS)
+ "movq %2, 8(%%rax)\n" // MOVQ m, 8(GS)
+ :: "r"(tls0), "r"(ts.g), "r"(ts.m) : "%rax"
+ );
+
+ crosscall_amd64(ts.fn);
+ return nil;
+}
diff --git a/src/pkg/runtime/cgocall.c b/src/pkg/runtime/cgocall.c
new file mode 100644
index 000000000..829448b02
--- /dev/null
+++ b/src/pkg/runtime/cgocall.c
@@ -0,0 +1,249 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "runtime.h"
+#include "arch.h"
+#include "stack.h"
+#include "cgocall.h"
+
+// Cgo call and callback support.
+//
+// To call into the C function f from Go, the cgo-generated code calls
+// runtime.cgocall(_cgo_Cfunc_f, frame), where _cgo_Cfunc_f is a
+// gcc-compiled function written by cgo.
+//
+// runtime.cgocall (below) locks g to m, calls entersyscall
+// so as not to block other goroutines or the garbage collector,
+// and then calls runtime.asmcgocall(_cgo_Cfunc_f, frame).
+//
+// runtime.asmcgocall (in $GOARCH/asm.s) switches to the m->g0 stack
+// (assumed to be an operating system-allocated stack, so safe to run
+// gcc-compiled code on) and calls _cgo_Cfunc_f(frame).
+//
+// _cgo_Cfunc_f invokes the actual C function f with arguments
+// taken from the frame structure, records the results in the frame,
+// and returns to runtime.asmcgocall.
+//
+// After it regains control, runtime.asmcgocall switches back to the
+// original g (m->curg)'s stack and returns to runtime.cgocall.
+//
+// After it regains control, runtime.cgocall calls exitsyscall, which blocks
+// until this m can run Go code without violating the $GOMAXPROCS limit,
+// and then unlocks g from m.
+//
+// The above description skipped over the possibility of the gcc-compiled
+// function f calling back into Go. If that happens, we continue down
+// the rabbit hole during the execution of f.
+//
+// To make it possible for gcc-compiled C code to call a Go function p.GoF,
+// cgo writes a gcc-compiled function named GoF (not p.GoF, since gcc doesn't
+// know about packages). The gcc-compiled C function f calls GoF.
+//
+// GoF calls crosscall2(_cgoexp_GoF, frame, framesize). Crosscall2
+// (in cgo/$GOOS.S, a gcc-compiled assembly file) is a two-argument
+// adapter from the gcc function call ABI to the 6c function call ABI.
+// It is called from gcc to call 6c functions. In this case it calls
+// _cgoexp_GoF(frame, framesize), still running on m->g0's stack
+// and outside the $GOMAXPROCS limit. Thus, this code cannot yet
+// call arbitrary Go code directly and must be careful not to allocate
+// memory or use up m->g0's stack.
+//
+// _cgoexp_GoF calls runtime.cgocallback(p.GoF, frame, framesize).
+// (The reason for having _cgoexp_GoF instead of writing a crosscall3
+// to make this call directly is that _cgoexp_GoF, because it is compiled
+// with 6c instead of gcc, can refer to dotted names like
+// runtime.cgocallback and p.GoF.)
+//
+// runtime.cgocallback (in $GOOS/asm.s) switches from m->g0's
+// stack to the original g (m->curg)'s stack, on which it calls
+// runtime.cgocallbackg(p.GoF, frame, framesize).
+// As part of the stack switch, runtime.cgocallback saves the current
+// SP as m->g0->sched.sp, so that any use of m->g0's stack during the
+// execution of the callback will be done below the existing stack frames.
+// Before overwriting m->g0->sched.sp, it pushes the old value on the
+// m->g0 stack, so that it can be restored later.
+//
+// runtime.cgocallbackg (below) is now running on a real goroutine
+// stack (not an m->g0 stack). First it calls runtime.exitsyscall, which will
+// block until the $GOMAXPROCS limit allows running this goroutine.
+// Once exitsyscall has returned, it is safe to do things like call the memory
+// allocator or invoke the Go callback function p.GoF. runtime.cgocallback
+// first defers a function to unwind m->g0.sched.sp, so that if p.GoF
+// panics, m->g0.sched.sp will be restored to its old value: the m->g0 stack
+// and the m->curg stack will be unwound in lock step.
+// Then it calls p.GoF. Finally it pops but does not execute the deferred
+// function, calls runtime.entersyscall, and returns to runtime.cgocallback.
+//
+// After it regains control, runtime.cgocallback switches back to
+// m->g0's stack (the pointer is still in m->g0.sched.sp), restores the old
+// m->g0.sched.sp value from the stack, and returns to _cgoexp_GoF.
+//
+// _cgoexp_GoF immediately returns to crosscall2, which restores the
+// callee-save registers for gcc and returns to GoF, which returns to f.
+
+void *initcgo; /* filled in by dynamic linker when Cgo is available */
+
+static void unlockm(void);
+static void unwindm(void);
+
+// Call from Go to C.
+
+void
+runtime·cgocall(void (*fn)(void*), void *arg)
+{
+ Defer *d;
+
+ if(!runtime·iscgo)
+ runtime·throw("cgocall unavailable");
+
+ if(fn == 0)
+ runtime·throw("cgocall nil");
+
+ m->ncgocall++;
+
+ /*
+ * Lock g to m to ensure we stay on the same stack if we do a
+ * cgo callback.
+ */
+ d = nil;
+ if(m->lockedg == nil) {
+ m->lockedg = g;
+ g->lockedm = m;
+
+ // Add entry to defer stack in case of panic.
+ d = runtime·malloc(sizeof(*d));
+ d->fn = (byte*)unlockm;
+ d->siz = 0;
+ d->link = g->defer;
+ d->argp = (void*)-1; // unused because unwindm never recovers
+ g->defer = d;
+ }
+
+ /*
+ * Announce we are entering a system call
+ * so that the scheduler knows to create another
+ * M to run goroutines while we are in the
+ * foreign code.
+ *
+ * The call to asmcgocall is guaranteed not to
+ * split the stack and does not allocate memory,
+ * so it is safe to call while "in a system call", outside
+ * the $GOMAXPROCS accounting.
+ */
+ runtime·entersyscall();
+ runtime·asmcgocall(fn, arg);
+ runtime·exitsyscall();
+
+ if(d != nil) {
+ if(g->defer != d || d->fn != (byte*)unlockm)
+ runtime·throw("runtime: bad defer entry in cgocallback");
+ g->defer = d->link;
+ runtime·free(d);
+ unlockm();
+ }
+}
+
+static void
+unlockm(void)
+{
+ m->lockedg = nil;
+ g->lockedm = nil;
+}
+
+void
+runtime·Cgocalls(int64 ret)
+{
+ M *m;
+
+ ret = 0;
+ for(m=runtime·atomicloadp(&runtime·allm); m; m=m->alllink)
+ ret += m->ncgocall;
+ FLUSH(&ret);
+}
+
+// Helper functions for cgo code.
+
+void (*_cgo_malloc)(void*);
+void (*_cgo_free)(void*);
+
+void*
+runtime·cmalloc(uintptr n)
+{
+ struct {
+ uint64 n;
+ void *ret;
+ } a;
+
+ a.n = n;
+ a.ret = nil;
+ runtime·cgocall(_cgo_malloc, &a);
+ return a.ret;
+}
+
+void
+runtime·cfree(void *p)
+{
+ runtime·cgocall(_cgo_free, p);
+}
+
+// Call from C back to Go.
+
+void
+runtime·cgocallbackg(void (*fn)(void), void *arg, uintptr argsize)
+{
+ Defer *d;
+
+ if(g != m->curg)
+ runtime·throw("runtime: bad g in cgocallback");
+
+ runtime·exitsyscall(); // coming out of cgo call
+
+ // Add entry to defer stack in case of panic.
+ d = runtime·malloc(sizeof(*d));
+ d->fn = (byte*)unwindm;
+ d->siz = 0;
+ d->link = g->defer;
+ d->argp = (void*)-1; // unused because unwindm never recovers
+ g->defer = d;
+
+ // Invoke callback.
+ reflect·call((byte*)fn, arg, argsize);
+
+ // Pop defer.
+ // Do not unwind m->g0->sched.sp.
+ // Our caller, cgocallback, will do that.
+ if(g->defer != d || d->fn != (byte*)unwindm)
+ runtime·throw("runtime: bad defer entry in cgocallback");
+ g->defer = d->link;
+ runtime·free(d);
+
+ runtime·entersyscall(); // going back to cgo call
+}
+
+static void
+unwindm(void)
+{
+ // Restore sp saved by cgocallback during
+ // unwind of g's stack (see comment at top of file).
+ switch(thechar){
+ default:
+ runtime·throw("runtime: unwindm not implemented");
+ case '8':
+ case '6':
+ m->g0->sched.sp = *(void**)m->g0->sched.sp;
+ break;
+ }
+}
+
+void
+runtime·badcgocallback(void) // called from assembly
+{
+ runtime·throw("runtime: misaligned stack in cgocallback");
+}
+
+void
+runtime·cgounimpl(void) // called from (incomplete) assembly
+{
+ runtime·throw("runtime: cgo not implemented");
+}
diff --git a/src/pkg/runtime/cgocall.h b/src/pkg/runtime/cgocall.h
new file mode 100644
index 000000000..253661a7e
--- /dev/null
+++ b/src/pkg/runtime/cgocall.h
@@ -0,0 +1,12 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+ * Cgo interface.
+ */
+
+void runtime·cgocall(void (*fn)(void*), void*);
+void runtime·cgocallback(void (*fn)(void), void*, uintptr);
+void *runtime·cmalloc(uintptr);
+void runtime·cfree(void*);
diff --git a/src/pkg/runtime/chan.c b/src/pkg/runtime/chan.c
new file mode 100644
index 000000000..ef5342353
--- /dev/null
+++ b/src/pkg/runtime/chan.c
@@ -0,0 +1,1161 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "runtime.h"
+#include "type.h"
+
+#define MAXALIGN 7
+#define NOSELGEN 1
+
+static int32 debug = 0;
+
+typedef struct WaitQ WaitQ;
+typedef struct SudoG SudoG;
+typedef struct Select Select;
+typedef struct Scase Scase;
+
+struct SudoG
+{
+ G* g; // g and selgen constitute
+ uint32 selgen; // a weak pointer to g
+ SudoG* link;
+ byte* elem; // data element
+};
+
+struct WaitQ
+{
+ SudoG* first;
+ SudoG* last;
+};
+
+struct Hchan
+{
+ uint32 qcount; // total data in the q
+ uint32 dataqsiz; // size of the circular q
+ uint16 elemsize;
+ bool closed;
+ uint8 elemalign;
+ Alg* elemalg; // interface for element type
+ uint32 sendx; // send index
+ uint32 recvx; // receive index
+ WaitQ recvq; // list of recv waiters
+ WaitQ sendq; // list of send waiters
+ Lock;
+};
+
+// Buffer follows Hchan immediately in memory.
+// chanbuf(c, i) is pointer to the i'th slot in the buffer.
+#define chanbuf(c, i) ((byte*)((c)+1)+(uintptr)(c)->elemsize*(i))
+
+enum
+{
+ // Scase.kind
+ CaseRecv,
+ CaseSend,
+ CaseDefault,
+};
+
+struct Scase
+{
+ SudoG sg; // must be first member (cast to Scase)
+ Hchan* chan; // chan
+ byte* pc; // return pc
+ uint16 kind;
+ uint16 so; // vararg of selected bool
+ bool* receivedp; // pointer to received bool (recv2)
+};
+
+struct Select
+{
+ uint16 tcase; // total count of scase[]
+ uint16 ncase; // currently filled scase[]
+ uint16* pollorder; // case poll order
+ Hchan** lockorder; // channel lock order
+ Scase scase[1]; // one per case (in order of appearance)
+};
+
+static void dequeueg(WaitQ*);
+static SudoG* dequeue(WaitQ*);
+static void enqueue(WaitQ*, SudoG*);
+static void destroychan(Hchan*);
+
+Hchan*
+runtime·makechan_c(ChanType *t, int64 hint)
+{
+ Hchan *c;
+ int32 n;
+ Type *elem;
+
+ elem = t->elem;
+
+ if(hint < 0 || (int32)hint != hint || (elem->size > 0 && hint > ((uintptr)-1) / elem->size))
+ runtime·panicstring("makechan: size out of range");
+
+ if(elem->alg >= nelem(runtime·algarray)) {
+ runtime·printf("chan(alg=%d)\n", elem->alg);
+ runtime·throw("runtime.makechan: unsupported elem type");
+ }
+
+ // calculate rounded size of Hchan
+ n = sizeof(*c);
+ while(n & MAXALIGN)
+ n++;
+
+ // allocate memory in one call
+ c = (Hchan*)runtime·mal(n + hint*elem->size);
+ if(runtime·destroylock)
+ runtime·addfinalizer(c, destroychan, 0);
+
+ c->elemsize = elem->size;
+ c->elemalg = &runtime·algarray[elem->alg];
+ c->elemalign = elem->align;
+ c->dataqsiz = hint;
+
+ if(debug)
+ runtime·printf("makechan: chan=%p; elemsize=%D; elemalg=%d; elemalign=%d; dataqsiz=%d\n",
+ c, (int64)elem->size, elem->alg, elem->align, c->dataqsiz);
+
+ return c;
+}
+
+// For reflect
+// func makechan(typ *ChanType, size uint32) (chan)
+void
+reflect·makechan(ChanType *t, uint32 size, Hchan *c)
+{
+ c = runtime·makechan_c(t, size);
+ FLUSH(&c);
+}
+
+static void
+destroychan(Hchan *c)
+{
+ runtime·destroylock(&c->Lock);
+}
+
+
+// makechan(t *ChanType, hint int64) (hchan *chan any);
+void
+runtime·makechan(ChanType *t, int64 hint, Hchan *ret)
+{
+ ret = runtime·makechan_c(t, hint);
+ FLUSH(&ret);
+}
+
+/*
+ * generic single channel send/recv
+ * if the bool pointer is nil,
+ * then the full exchange will
+ * occur. if pres is not nil,
+ * then the protocol will not
+ * sleep but return if it could
+ * not complete.
+ *
+ * sleep can wake up with g->param == nil
+ * when a channel involved in the sleep has
+ * been closed. it is easiest to loop and re-run
+ * the operation; we'll see that it's now closed.
+ */
+void
+runtime·chansend(ChanType *t, Hchan *c, byte *ep, bool *pres)
+{
+ SudoG *sg;
+ SudoG mysg;
+ G* gp;
+
+ if(c == nil) {
+ USED(t);
+ if(pres != nil) {
+ *pres = false;
+ return;
+ }
+ g->status = Gwaiting;
+ runtime·gosched();
+ return; // not reached
+ }
+
+ if(runtime·gcwaiting)
+ runtime·gosched();
+
+ if(debug) {
+ runtime·printf("chansend: chan=%p; elem=", c);
+ c->elemalg->print(c->elemsize, ep);
+ runtime·prints("\n");
+ }
+
+ runtime·lock(c);
+ if(c->closed)
+ goto closed;
+
+ if(c->dataqsiz > 0)
+ goto asynch;
+
+ sg = dequeue(&c->recvq);
+ if(sg != nil) {
+ runtime·unlock(c);
+
+ gp = sg->g;
+ gp->param = sg;
+ if(sg->elem != nil)
+ c->elemalg->copy(c->elemsize, sg->elem, ep);
+ runtime·ready(gp);
+
+ if(pres != nil)
+ *pres = true;
+ return;
+ }
+
+ if(pres != nil) {
+ runtime·unlock(c);
+ *pres = false;
+ return;
+ }
+
+ mysg.elem = ep;
+ mysg.g = g;
+ mysg.selgen = NOSELGEN;
+ g->param = nil;
+ g->status = Gwaiting;
+ enqueue(&c->sendq, &mysg);
+ runtime·unlock(c);
+ runtime·gosched();
+
+ if(g->param == nil) {
+ runtime·lock(c);
+ if(!c->closed)
+ runtime·throw("chansend: spurious wakeup");
+ goto closed;
+ }
+
+ return;
+
+asynch:
+ if(c->closed)
+ goto closed;
+
+ if(c->qcount >= c->dataqsiz) {
+ if(pres != nil) {
+ runtime·unlock(c);
+ *pres = false;
+ return;
+ }
+ mysg.g = g;
+ mysg.elem = nil;
+ mysg.selgen = NOSELGEN;
+ g->status = Gwaiting;
+ enqueue(&c->sendq, &mysg);
+ runtime·unlock(c);
+ runtime·gosched();
+
+ runtime·lock(c);
+ goto asynch;
+ }
+ c->elemalg->copy(c->elemsize, chanbuf(c, c->sendx), ep);
+ if(++c->sendx == c->dataqsiz)
+ c->sendx = 0;
+ c->qcount++;
+
+ sg = dequeue(&c->recvq);
+ if(sg != nil) {
+ gp = sg->g;
+ runtime·unlock(c);
+ runtime·ready(gp);
+ } else
+ runtime·unlock(c);
+ if(pres != nil)
+ *pres = true;
+ return;
+
+closed:
+ runtime·unlock(c);
+ runtime·panicstring("send on closed channel");
+}
+
+
+void
+runtime·chanrecv(ChanType *t, Hchan* c, byte *ep, bool *selected, bool *received)
+{
+ SudoG *sg;
+ SudoG mysg;
+ G *gp;
+
+ if(runtime·gcwaiting)
+ runtime·gosched();
+
+ if(debug)
+ runtime·printf("chanrecv: chan=%p\n", c);
+
+ if(c == nil) {
+ USED(t);
+ if(selected != nil) {
+ *selected = false;
+ return;
+ }
+ g->status = Gwaiting;
+ runtime·gosched();
+ return; // not reached
+ }
+
+ runtime·lock(c);
+ if(c->dataqsiz > 0)
+ goto asynch;
+
+ if(c->closed)
+ goto closed;
+
+ sg = dequeue(&c->sendq);
+ if(sg != nil) {
+ runtime·unlock(c);
+
+ if(ep != nil)
+ c->elemalg->copy(c->elemsize, ep, sg->elem);
+ gp = sg->g;
+ gp->param = sg;
+ runtime·ready(gp);
+
+ if(selected != nil)
+ *selected = true;
+ if(received != nil)
+ *received = true;
+ return;
+ }
+
+ if(selected != nil) {
+ runtime·unlock(c);
+ *selected = false;
+ return;
+ }
+
+ mysg.elem = ep;
+ mysg.g = g;
+ mysg.selgen = NOSELGEN;
+ g->param = nil;
+ g->status = Gwaiting;
+ enqueue(&c->recvq, &mysg);
+ runtime·unlock(c);
+ runtime·gosched();
+
+ if(g->param == nil) {
+ runtime·lock(c);
+ if(!c->closed)
+ runtime·throw("chanrecv: spurious wakeup");
+ goto closed;
+ }
+
+ if(received != nil)
+ *received = true;
+ return;
+
+asynch:
+ if(c->qcount <= 0) {
+ if(c->closed)
+ goto closed;
+
+ if(selected != nil) {
+ runtime·unlock(c);
+ *selected = false;
+ if(received != nil)
+ *received = false;
+ return;
+ }
+ mysg.g = g;
+ mysg.elem = nil;
+ mysg.selgen = NOSELGEN;
+ g->status = Gwaiting;
+ enqueue(&c->recvq, &mysg);
+ runtime·unlock(c);
+ runtime·gosched();
+
+ runtime·lock(c);
+ goto asynch;
+ }
+ if(ep != nil)
+ c->elemalg->copy(c->elemsize, ep, chanbuf(c, c->recvx));
+ c->elemalg->copy(c->elemsize, chanbuf(c, c->recvx), nil);
+ if(++c->recvx == c->dataqsiz)
+ c->recvx = 0;
+ c->qcount--;
+
+ sg = dequeue(&c->sendq);
+ if(sg != nil) {
+ gp = sg->g;
+ runtime·unlock(c);
+ runtime·ready(gp);
+ } else
+ runtime·unlock(c);
+
+ if(selected != nil)
+ *selected = true;
+ if(received != nil)
+ *received = true;
+ return;
+
+closed:
+ if(ep != nil)
+ c->elemalg->copy(c->elemsize, ep, nil);
+ if(selected != nil)
+ *selected = true;
+ if(received != nil)
+ *received = false;
+ runtime·unlock(c);
+}
+
+// chansend1(hchan *chan any, elem any);
+#pragma textflag 7
+void
+runtime·chansend1(ChanType *t, Hchan* c, ...)
+{
+ runtime·chansend(t, c, (byte*)(&c+1), nil);
+}
+
+// chanrecv1(hchan *chan any) (elem any);
+#pragma textflag 7
+void
+runtime·chanrecv1(ChanType *t, Hchan* c, ...)
+{
+ runtime·chanrecv(t, c, (byte*)(&c+1), nil, nil);
+}
+
+// chanrecv2(hchan *chan any) (elem any, received bool);
+#pragma textflag 7
+void
+runtime·chanrecv2(ChanType *t, Hchan* c, ...)
+{
+ byte *ae, *ap;
+
+ ae = (byte*)(&c+1);
+ ap = ae + t->elem->size;
+ runtime·chanrecv(t, c, ae, nil, ap);
+}
+
+// func selectnbsend(c chan any, elem any) bool
+//
+// compiler implements
+//
+// select {
+// case c <- v:
+// ... foo
+// default:
+// ... bar
+// }
+//
+// as
+//
+// if selectnbsend(c, v) {
+// ... foo
+// } else {
+// ... bar
+// }
+//
+#pragma textflag 7
+void
+runtime·selectnbsend(ChanType *t, Hchan *c, ...)
+{
+ byte *ae, *ap;
+
+ ae = (byte*)(&c + 1);
+ ap = ae + runtime·rnd(t->elem->size, Structrnd);
+ runtime·chansend(t, c, ae, ap);
+}
+
+// func selectnbrecv(elem *any, c chan any) bool
+//
+// compiler implements
+//
+// select {
+// case v = <-c:
+// ... foo
+// default:
+// ... bar
+// }
+//
+// as
+//
+// if selectnbrecv(&v, c) {
+// ... foo
+// } else {
+// ... bar
+// }
+//
+#pragma textflag 7
+void
+runtime·selectnbrecv(ChanType *t, byte *v, Hchan *c, bool selected)
+{
+ runtime·chanrecv(t, c, v, &selected, nil);
+}
+
+// func selectnbrecv2(elem *any, ok *bool, c chan any) bool
+//
+// compiler implements
+//
+// select {
+// case v, ok = <-c:
+// ... foo
+// default:
+// ... bar
+// }
+//
+// as
+//
+// if c != nil && selectnbrecv2(&v, &ok, c) {
+// ... foo
+// } else {
+// ... bar
+// }
+//
+#pragma textflag 7
+void
+runtime·selectnbrecv2(ChanType *t, byte *v, bool *received, Hchan *c, bool selected)
+{
+ runtime·chanrecv(t, c, v, &selected, received);
+}
+
+// For reflect:
+// func chansend(c chan, val iword, nb bool) (selected bool)
+// where an iword is the same word an interface value would use:
+// the actual data if it fits, or else a pointer to the data.
+//
+// The "uintptr selected" is really "bool selected" but saying
+// uintptr gets us the right alignment for the output parameter block.
+void
+reflect·chansend(ChanType *t, Hchan *c, uintptr val, bool nb, uintptr selected)
+{
+ bool *sp;
+ byte *vp;
+
+ if(nb) {
+ selected = false;
+ sp = (bool*)&selected;
+ } else {
+ *(bool*)&selected = true;
+ FLUSH(&selected);
+ sp = nil;
+ }
+ if(t->elem->size <= sizeof(val))
+ vp = (byte*)&val;
+ else
+ vp = (byte*)val;
+ runtime·chansend(t, c, vp, sp);
+}
+
+// For reflect:
+// func chanrecv(c chan, nb bool) (val iword, selected, received bool)
+// where an iword is the same word an interface value would use:
+// the actual data if it fits, or else a pointer to the data.
+void
+reflect·chanrecv(ChanType *t, Hchan *c, bool nb, uintptr val, bool selected, bool received)
+{
+ byte *vp;
+ bool *sp;
+
+ if(nb) {
+ selected = false;
+ sp = &selected;
+ } else {
+ selected = true;
+ FLUSH(&selected);
+ sp = nil;
+ }
+ received = false;
+ FLUSH(&received);
+ if(t->elem->size <= sizeof(val)) {
+ val = 0;
+ vp = (byte*)&val;
+ } else {
+ vp = runtime·mal(t->elem->size);
+ val = (uintptr)vp;
+ FLUSH(&val);
+ }
+ runtime·chanrecv(t, c, vp, sp, &received);
+}
+
+static void newselect(int32, Select**);
+
+// newselect(size uint32) (sel *byte);
+#pragma textflag 7
+void
+runtime·newselect(int32 size, ...)
+{
+ int32 o;
+ Select **selp;
+
+ o = runtime·rnd(sizeof(size), Structrnd);
+ selp = (Select**)((byte*)&size + o);
+ newselect(size, selp);
+}
+
+static void
+newselect(int32 size, Select **selp)
+{
+ int32 n;
+ Select *sel;
+
+ n = 0;
+ if(size > 1)
+ n = size-1;
+
+ sel = runtime·mal(sizeof(*sel) +
+ n*sizeof(sel->scase[0]) +
+ size*sizeof(sel->lockorder[0]) +
+ size*sizeof(sel->pollorder[0]));
+
+ sel->tcase = size;
+ sel->ncase = 0;
+ sel->pollorder = (void*)(sel->scase + size);
+ sel->lockorder = (void*)(sel->pollorder + size);
+ *selp = sel;
+
+ if(debug)
+ runtime·printf("newselect s=%p size=%d\n", sel, size);
+}
+
+// cut in half to give stack a chance to split
+static void selectsend(Select *sel, Hchan *c, void *pc, void *elem, int32 so);
+
+// selectsend(sel *byte, hchan *chan any, elem *any) (selected bool);
+#pragma textflag 7
+void
+runtime·selectsend(Select *sel, Hchan *c, void *elem, bool selected)
+{
+ selected = false;
+ FLUSH(&selected);
+
+ // nil cases do not compete
+ if(c == nil)
+ return;
+
+ selectsend(sel, c, runtime·getcallerpc(&sel), elem, (byte*)&selected - (byte*)&sel);
+}
+
+static void
+selectsend(Select *sel, Hchan *c, void *pc, void *elem, int32 so)
+{
+ int32 i;
+ Scase *cas;
+
+ i = sel->ncase;
+ if(i >= sel->tcase)
+ runtime·throw("selectsend: too many cases");
+ sel->ncase = i+1;
+ cas = &sel->scase[i];
+
+ cas->pc = pc;
+ cas->chan = c;
+ cas->so = so;
+ cas->kind = CaseSend;
+ cas->sg.elem = elem;
+
+ if(debug)
+ runtime·printf("selectsend s=%p pc=%p chan=%p so=%d\n",
+ sel, cas->pc, cas->chan, cas->so);
+}
+
+// cut in half to give stack a chance to split
+static void selectrecv(Select *sel, Hchan *c, void *pc, void *elem, bool*, int32 so);
+
+// selectrecv(sel *byte, hchan *chan any, elem *any) (selected bool);
+#pragma textflag 7
+void
+runtime·selectrecv(Select *sel, Hchan *c, void *elem, bool selected)
+{
+ selected = false;
+ FLUSH(&selected);
+
+ // nil cases do not compete
+ if(c == nil)
+ return;
+
+ selectrecv(sel, c, runtime·getcallerpc(&sel), elem, nil, (byte*)&selected - (byte*)&sel);
+}
+
+// selectrecv2(sel *byte, hchan *chan any, elem *any, received *bool) (selected bool);
+#pragma textflag 7
+void
+runtime·selectrecv2(Select *sel, Hchan *c, void *elem, bool *received, bool selected)
+{
+ selected = false;
+ FLUSH(&selected);
+
+ // nil cases do not compete
+ if(c == nil)
+ return;
+
+ selectrecv(sel, c, runtime·getcallerpc(&sel), elem, received, (byte*)&selected - (byte*)&sel);
+}
+
+static void
+selectrecv(Select *sel, Hchan *c, void *pc, void *elem, bool *received, int32 so)
+{
+ int32 i;
+ Scase *cas;
+
+ i = sel->ncase;
+ if(i >= sel->tcase)
+ runtime·throw("selectrecv: too many cases");
+ sel->ncase = i+1;
+ cas = &sel->scase[i];
+ cas->pc = pc;
+ cas->chan = c;
+
+ cas->so = so;
+ cas->kind = CaseRecv;
+ cas->sg.elem = elem;
+ cas->receivedp = received;
+
+ if(debug)
+ runtime·printf("selectrecv s=%p pc=%p chan=%p so=%d\n",
+ sel, cas->pc, cas->chan, cas->so);
+}
+
+// cut in half to give stack a chance to split
+static void selectdefault(Select*, void*, int32);
+
+// selectdefault(sel *byte) (selected bool);
+#pragma textflag 7
+void
+runtime·selectdefault(Select *sel, bool selected)
+{
+ selected = false;
+ FLUSH(&selected);
+
+ selectdefault(sel, runtime·getcallerpc(&sel), (byte*)&selected - (byte*)&sel);
+}
+
+static void
+selectdefault(Select *sel, void *callerpc, int32 so)
+{
+ int32 i;
+ Scase *cas;
+
+ i = sel->ncase;
+ if(i >= sel->tcase)
+ runtime·throw("selectdefault: too many cases");
+ sel->ncase = i+1;
+ cas = &sel->scase[i];
+ cas->pc = callerpc;
+ cas->chan = nil;
+
+ cas->so = so;
+ cas->kind = CaseDefault;
+
+ if(debug)
+ runtime·printf("selectdefault s=%p pc=%p so=%d\n",
+ sel, cas->pc, cas->so);
+}
+
+static void
+sellock(Select *sel)
+{
+ uint32 i;
+ Hchan *c, *c0;
+
+ c = nil;
+ for(i=0; i<sel->ncase; i++) {
+ c0 = sel->lockorder[i];
+ if(c0 && c0 != c) {
+ c = sel->lockorder[i];
+ runtime·lock(c);
+ }
+ }
+}
+
+static void
+selunlock(Select *sel)
+{
+ uint32 i;
+ Hchan *c, *c0;
+
+ c = nil;
+ for(i=sel->ncase; i-->0;) {
+ c0 = sel->lockorder[i];
+ if(c0 && c0 != c) {
+ c = c0;
+ runtime·unlock(c);
+ }
+ }
+}
+
+void
+runtime·block(void)
+{
+ g->status = Gwaiting; // forever
+ runtime·gosched();
+}
+
+static void* selectgo(Select**);
+
+// selectgo(sel *byte);
+//
+// overwrites return pc on stack to signal which case of the select
+// to run, so cannot appear at the top of a split stack.
+#pragma textflag 7
+void
+runtime·selectgo(Select *sel)
+{
+ runtime·setcallerpc(&sel, selectgo(&sel));
+}
+
+static void*
+selectgo(Select **selp)
+{
+ Select *sel;
+ uint32 o, i, j;
+ Scase *cas, *dfl;
+ Hchan *c;
+ SudoG *sg;
+ G *gp;
+ byte *as;
+ void *pc;
+
+ sel = *selp;
+ if(runtime·gcwaiting)
+ runtime·gosched();
+
+ if(debug)
+ runtime·printf("select: sel=%p\n", sel);
+
+ // The compiler rewrites selects that statically have
+ // only 0 or 1 cases plus default into simpler constructs.
+ // The only way we can end up with such small sel->ncase
+ // values here is for a larger select in which most channels
+ // have been nilled out. The general code handles those
+ // cases correctly, and they are rare enough not to bother
+ // optimizing (and needing to test).
+
+ // generate permuted order
+ for(i=0; i<sel->ncase; i++)
+ sel->pollorder[i] = i;
+ for(i=1; i<sel->ncase; i++) {
+ o = sel->pollorder[i];
+ j = runtime·fastrand1()%(i+1);
+ sel->pollorder[i] = sel->pollorder[j];
+ sel->pollorder[j] = o;
+ }
+
+ // sort the cases by Hchan address to get the locking order.
+ for(i=0; i<sel->ncase; i++) {
+ c = sel->scase[i].chan;
+ for(j=i; j>0 && sel->lockorder[j-1] >= c; j--)
+ sel->lockorder[j] = sel->lockorder[j-1];
+ sel->lockorder[j] = c;
+ }
+ sellock(sel);
+
+loop:
+ // pass 1 - look for something already waiting
+ dfl = nil;
+ for(i=0; i<sel->ncase; i++) {
+ o = sel->pollorder[i];
+ cas = &sel->scase[o];
+ c = cas->chan;
+
+ switch(cas->kind) {
+ case CaseRecv:
+ if(c->dataqsiz > 0) {
+ if(c->qcount > 0)
+ goto asyncrecv;
+ } else {
+ sg = dequeue(&c->sendq);
+ if(sg != nil)
+ goto syncrecv;
+ }
+ if(c->closed)
+ goto rclose;
+ break;
+
+ case CaseSend:
+ if(c->closed)
+ goto sclose;
+ if(c->dataqsiz > 0) {
+ if(c->qcount < c->dataqsiz)
+ goto asyncsend;
+ } else {
+ sg = dequeue(&c->recvq);
+ if(sg != nil)
+ goto syncsend;
+ }
+ break;
+
+ case CaseDefault:
+ dfl = cas;
+ break;
+ }
+ }
+
+ if(dfl != nil) {
+ selunlock(sel);
+ cas = dfl;
+ goto retc;
+ }
+
+
+ // pass 2 - enqueue on all chans
+ for(i=0; i<sel->ncase; i++) {
+ o = sel->pollorder[i];
+ cas = &sel->scase[o];
+ c = cas->chan;
+ sg = &cas->sg;
+ sg->g = g;
+ sg->selgen = g->selgen;
+
+ switch(cas->kind) {
+ case CaseRecv:
+ enqueue(&c->recvq, sg);
+ break;
+
+ case CaseSend:
+ enqueue(&c->sendq, sg);
+ break;
+ }
+ }
+
+ g->param = nil;
+ g->status = Gwaiting;
+ selunlock(sel);
+ runtime·gosched();
+
+ sellock(sel);
+ sg = g->param;
+
+ // pass 3 - dequeue from unsuccessful chans
+ // otherwise they stack up on quiet channels
+ for(i=0; i<sel->ncase; i++) {
+ cas = &sel->scase[i];
+ if(cas != (Scase*)sg) {
+ c = cas->chan;
+ if(cas->kind == CaseSend)
+ dequeueg(&c->sendq);
+ else
+ dequeueg(&c->recvq);
+ }
+ }
+
+ if(sg == nil)
+ goto loop;
+
+ cas = (Scase*)sg;
+ c = cas->chan;
+
+ if(c->dataqsiz > 0)
+ runtime·throw("selectgo: shouldnt happen");
+
+ if(debug)
+ runtime·printf("wait-return: sel=%p c=%p cas=%p kind=%d\n",
+ sel, c, cas, cas->kind);
+
+ if(cas->kind == CaseRecv) {
+ if(cas->receivedp != nil)
+ *cas->receivedp = true;
+ }
+
+ selunlock(sel);
+ goto retc;
+
+asyncrecv:
+ // can receive from buffer
+ if(cas->receivedp != nil)
+ *cas->receivedp = true;
+ if(cas->sg.elem != nil)
+ c->elemalg->copy(c->elemsize, cas->sg.elem, chanbuf(c, c->recvx));
+ c->elemalg->copy(c->elemsize, chanbuf(c, c->recvx), nil);
+ if(++c->recvx == c->dataqsiz)
+ c->recvx = 0;
+ c->qcount--;
+ sg = dequeue(&c->sendq);
+ if(sg != nil) {
+ gp = sg->g;
+ selunlock(sel);
+ runtime·ready(gp);
+ } else {
+ selunlock(sel);
+ }
+ goto retc;
+
+asyncsend:
+ // can send to buffer
+ c->elemalg->copy(c->elemsize, chanbuf(c, c->sendx), cas->sg.elem);
+ if(++c->sendx == c->dataqsiz)
+ c->sendx = 0;
+ c->qcount++;
+ sg = dequeue(&c->recvq);
+ if(sg != nil) {
+ gp = sg->g;
+ selunlock(sel);
+ runtime·ready(gp);
+ } else {
+ selunlock(sel);
+ }
+ goto retc;
+
+syncrecv:
+ // can receive from sleeping sender (sg)
+ selunlock(sel);
+ if(debug)
+ runtime·printf("syncrecv: sel=%p c=%p o=%d\n", sel, c, o);
+ if(cas->receivedp != nil)
+ *cas->receivedp = true;
+ if(cas->sg.elem != nil)
+ c->elemalg->copy(c->elemsize, cas->sg.elem, sg->elem);
+ gp = sg->g;
+ gp->param = sg;
+ runtime·ready(gp);
+ goto retc;
+
+rclose:
+ // read at end of closed channel
+ selunlock(sel);
+ if(cas->receivedp != nil)
+ *cas->receivedp = false;
+ if(cas->sg.elem != nil)
+ c->elemalg->copy(c->elemsize, cas->sg.elem, nil);
+ goto retc;
+
+syncsend:
+ // can send to sleeping receiver (sg)
+ selunlock(sel);
+ if(debug)
+ runtime·printf("syncsend: sel=%p c=%p o=%d\n", sel, c, o);
+ c->elemalg->copy(c->elemsize, sg->elem, cas->sg.elem);
+ gp = sg->g;
+ gp->param = sg;
+ runtime·ready(gp);
+
+retc:
+ // return to pc corresponding to chosen case
+ pc = cas->pc;
+ as = (byte*)selp + cas->so;
+ runtime·free(sel);
+ *as = true;
+ return pc;
+
+sclose:
+ // send on closed channel
+ selunlock(sel);
+ runtime·panicstring("send on closed channel");
+ return nil; // not reached
+}
+
+// closechan(sel *byte);
+void
+runtime·closechan(Hchan *c)
+{
+ SudoG *sg;
+ G* gp;
+
+ if(runtime·gcwaiting)
+ runtime·gosched();
+
+ runtime·lock(c);
+ if(c->closed) {
+ runtime·unlock(c);
+ runtime·panicstring("close of closed channel");
+ }
+
+ c->closed = true;
+
+ // release all readers
+ for(;;) {
+ sg = dequeue(&c->recvq);
+ if(sg == nil)
+ break;
+ gp = sg->g;
+ gp->param = nil;
+ runtime·ready(gp);
+ }
+
+ // release all writers
+ for(;;) {
+ sg = dequeue(&c->sendq);
+ if(sg == nil)
+ break;
+ gp = sg->g;
+ gp->param = nil;
+ runtime·ready(gp);
+ }
+
+ runtime·unlock(c);
+}
+
+// For reflect
+// func chanclose(c chan)
+void
+reflect·chanclose(Hchan *c)
+{
+ runtime·closechan(c);
+}
+
+// For reflect
+// func chanlen(c chan) (len int32)
+void
+reflect·chanlen(Hchan *c, int32 len)
+{
+ if(c == nil)
+ len = 0;
+ else
+ len = c->qcount;
+ FLUSH(&len);
+}
+
+// For reflect
+// func chancap(c chan) (cap int32)
+void
+reflect·chancap(Hchan *c, int32 cap)
+{
+ if(c == nil)
+ cap = 0;
+ else
+ cap = c->dataqsiz;
+ FLUSH(&cap);
+}
+
+static SudoG*
+dequeue(WaitQ *q)
+{
+ SudoG *sgp;
+
+loop:
+ sgp = q->first;
+ if(sgp == nil)
+ return nil;
+ q->first = sgp->link;
+
+ // if sgp is stale, ignore it
+ if(sgp->selgen != NOSELGEN &&
+ (sgp->selgen != sgp->g->selgen ||
+ !runtime·cas(&sgp->g->selgen, sgp->selgen, sgp->selgen + 2))) {
+ //prints("INVALID PSEUDOG POINTER\n");
+ goto loop;
+ }
+
+ return sgp;
+}
+
+static void
+dequeueg(WaitQ *q)
+{
+ SudoG **l, *sgp, *prevsgp;
+
+ prevsgp = nil;
+ for(l=&q->first; (sgp=*l) != nil; l=&sgp->link, prevsgp=sgp) {
+ if(sgp->g == g) {
+ *l = sgp->link;
+ if(q->last == sgp)
+ q->last = prevsgp;
+ break;
+ }
+ }
+}
+
+static void
+enqueue(WaitQ *q, SudoG *sgp)
+{
+ sgp->link = nil;
+ if(q->first == nil) {
+ q->first = sgp;
+ q->last = sgp;
+ return;
+ }
+ q->last->link = sgp;
+ q->last = sgp;
+}
diff --git a/src/pkg/runtime/chan_test.go b/src/pkg/runtime/chan_test.go
new file mode 100644
index 000000000..46ddfd7e8
--- /dev/null
+++ b/src/pkg/runtime/chan_test.go
@@ -0,0 +1,322 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime_test
+
+import (
+ "runtime"
+ "sync"
+ "sync/atomic"
+ "testing"
+)
+
+func TestChanSendInterface(t *testing.T) {
+ type mt struct{}
+ m := &mt{}
+ c := make(chan interface{}, 1)
+ c <- m
+ select {
+ case c <- m:
+ default:
+ }
+ select {
+ case c <- m:
+ case c <- &mt{}:
+ default:
+ }
+}
+
+func TestPseudoRandomSend(t *testing.T) {
+ n := 100
+ c := make(chan int)
+ l := make([]int, n)
+ var m sync.Mutex
+ m.Lock()
+ go func() {
+ for i := 0; i < n; i++ {
+ runtime.Gosched()
+ l[i] = <-c
+ }
+ m.Unlock()
+ }()
+ for i := 0; i < n; i++ {
+ select {
+ case c <- 0:
+ case c <- 1:
+ }
+ }
+ m.Lock() // wait
+ n0 := 0
+ n1 := 0
+ for _, i := range l {
+ n0 += (i + 1) % 2
+ n1 += i
+ if n0 > n/10 && n1 > n/10 {
+ return
+ }
+ }
+ t.Errorf("Want pseudo random, got %d zeros and %d ones", n0, n1)
+}
+
+func BenchmarkSelectUncontended(b *testing.B) {
+ const CallsPerSched = 1000
+ procs := runtime.GOMAXPROCS(-1)
+ N := int32(b.N / CallsPerSched)
+ c := make(chan bool, procs)
+ for p := 0; p < procs; p++ {
+ go func() {
+ myc1 := make(chan int, 1)
+ myc2 := make(chan int, 1)
+ myc1 <- 0
+ for atomic.AddInt32(&N, -1) >= 0 {
+ for g := 0; g < CallsPerSched; g++ {
+ select {
+ case <-myc1:
+ myc2 <- 0
+ case <-myc2:
+ myc1 <- 0
+ }
+ }
+ }
+ c <- true
+ }()
+ }
+ for p := 0; p < procs; p++ {
+ <-c
+ }
+}
+
+func BenchmarkSelectContended(b *testing.B) {
+ const CallsPerSched = 1000
+ procs := runtime.GOMAXPROCS(-1)
+ N := int32(b.N / CallsPerSched)
+ c := make(chan bool, procs)
+ myc1 := make(chan int, procs)
+ myc2 := make(chan int, procs)
+ for p := 0; p < procs; p++ {
+ myc1 <- 0
+ go func() {
+ for atomic.AddInt32(&N, -1) >= 0 {
+ for g := 0; g < CallsPerSched; g++ {
+ select {
+ case <-myc1:
+ myc2 <- 0
+ case <-myc2:
+ myc1 <- 0
+ }
+ }
+ }
+ c <- true
+ }()
+ }
+ for p := 0; p < procs; p++ {
+ <-c
+ }
+}
+
+func BenchmarkSelectNonblock(b *testing.B) {
+ const CallsPerSched = 1000
+ procs := runtime.GOMAXPROCS(-1)
+ N := int32(b.N / CallsPerSched)
+ c := make(chan bool, procs)
+ for p := 0; p < procs; p++ {
+ go func() {
+ myc1 := make(chan int)
+ myc2 := make(chan int)
+ myc3 := make(chan int, 1)
+ myc4 := make(chan int, 1)
+ for atomic.AddInt32(&N, -1) >= 0 {
+ for g := 0; g < CallsPerSched; g++ {
+ select {
+ case <-myc1:
+ default:
+ }
+ select {
+ case myc2 <- 0:
+ default:
+ }
+ select {
+ case <-myc3:
+ default:
+ }
+ select {
+ case myc4 <- 0:
+ default:
+ }
+ }
+ }
+ c <- true
+ }()
+ }
+ for p := 0; p < procs; p++ {
+ <-c
+ }
+}
+
+func BenchmarkChanUncontended(b *testing.B) {
+ const CallsPerSched = 1000
+ procs := runtime.GOMAXPROCS(-1)
+ N := int32(b.N / CallsPerSched)
+ c := make(chan bool, procs)
+ for p := 0; p < procs; p++ {
+ go func() {
+ myc := make(chan int, CallsPerSched)
+ for atomic.AddInt32(&N, -1) >= 0 {
+ for g := 0; g < CallsPerSched; g++ {
+ myc <- 0
+ }
+ for g := 0; g < CallsPerSched; g++ {
+ <-myc
+ }
+ }
+ c <- true
+ }()
+ }
+ for p := 0; p < procs; p++ {
+ <-c
+ }
+}
+
+func BenchmarkChanContended(b *testing.B) {
+ const CallsPerSched = 1000
+ procs := runtime.GOMAXPROCS(-1)
+ N := int32(b.N / CallsPerSched)
+ c := make(chan bool, procs)
+ myc := make(chan int, procs*CallsPerSched)
+ for p := 0; p < procs; p++ {
+ go func() {
+ for atomic.AddInt32(&N, -1) >= 0 {
+ for g := 0; g < CallsPerSched; g++ {
+ myc <- 0
+ }
+ for g := 0; g < CallsPerSched; g++ {
+ <-myc
+ }
+ }
+ c <- true
+ }()
+ }
+ for p := 0; p < procs; p++ {
+ <-c
+ }
+}
+
+func BenchmarkChanSync(b *testing.B) {
+ const CallsPerSched = 1000
+ procs := 2
+ N := int32(b.N / CallsPerSched / procs * procs)
+ c := make(chan bool, procs)
+ myc := make(chan int)
+ for p := 0; p < procs; p++ {
+ go func() {
+ for {
+ i := atomic.AddInt32(&N, -1)
+ if i < 0 {
+ break
+ }
+ for g := 0; g < CallsPerSched; g++ {
+ if i%2 == 0 {
+ <-myc
+ myc <- 0
+ } else {
+ myc <- 0
+ <-myc
+ }
+ }
+ }
+ c <- true
+ }()
+ }
+ for p := 0; p < procs; p++ {
+ <-c
+ }
+}
+
+func benchmarkChanProdCons(b *testing.B, chanSize, localWork int) {
+ const CallsPerSched = 1000
+ procs := runtime.GOMAXPROCS(-1)
+ N := int32(b.N / CallsPerSched)
+ c := make(chan bool, 2*procs)
+ myc := make(chan int, chanSize)
+ for p := 0; p < procs; p++ {
+ go func() {
+ foo := 0
+ for atomic.AddInt32(&N, -1) >= 0 {
+ for g := 0; g < CallsPerSched; g++ {
+ for i := 0; i < localWork; i++ {
+ foo *= 2
+ foo /= 2
+ }
+ myc <- 1
+ }
+ }
+ myc <- 0
+ c <- foo == 42
+ }()
+ go func() {
+ foo := 0
+ for {
+ v := <-myc
+ if v == 0 {
+ break
+ }
+ for i := 0; i < localWork; i++ {
+ foo *= 2
+ foo /= 2
+ }
+ }
+ c <- foo == 42
+ }()
+ }
+ for p := 0; p < procs; p++ {
+ <-c
+ <-c
+ }
+}
+
+func BenchmarkChanProdCons0(b *testing.B) {
+ benchmarkChanProdCons(b, 0, 0)
+}
+
+func BenchmarkChanProdCons10(b *testing.B) {
+ benchmarkChanProdCons(b, 10, 0)
+}
+
+func BenchmarkChanProdCons100(b *testing.B) {
+ benchmarkChanProdCons(b, 100, 0)
+}
+
+func BenchmarkChanProdConsWork0(b *testing.B) {
+ benchmarkChanProdCons(b, 0, 100)
+}
+
+func BenchmarkChanProdConsWork10(b *testing.B) {
+ benchmarkChanProdCons(b, 10, 100)
+}
+
+func BenchmarkChanProdConsWork100(b *testing.B) {
+ benchmarkChanProdCons(b, 100, 100)
+}
+
+func BenchmarkChanCreation(b *testing.B) {
+ const CallsPerSched = 1000
+ procs := runtime.GOMAXPROCS(-1)
+ N := int32(b.N / CallsPerSched)
+ c := make(chan bool, procs)
+ for p := 0; p < procs; p++ {
+ go func() {
+ for atomic.AddInt32(&N, -1) >= 0 {
+ for g := 0; g < CallsPerSched; g++ {
+ myc := make(chan int, 1)
+ myc <- 0
+ <-myc
+ }
+ }
+ c <- true
+ }()
+ }
+ for p := 0; p < procs; p++ {
+ <-c
+ }
+}
diff --git a/src/pkg/runtime/closure_test.go b/src/pkg/runtime/closure_test.go
new file mode 100644
index 000000000..ea65fbd5f
--- /dev/null
+++ b/src/pkg/runtime/closure_test.go
@@ -0,0 +1,53 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+package runtime_test
+
+import "testing"
+
+var s int
+
+func BenchmarkCallClosure(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ s += func(ii int) int { return 2 * ii }(i)
+ }
+}
+
+func BenchmarkCallClosure1(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ j := i
+ s += func(ii int) int { return 2*ii + j }(i)
+ }
+}
+
+var ss *int
+
+func BenchmarkCallClosure2(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ j := i
+ s += func() int {
+ ss = &j
+ return 2
+ }()
+ }
+}
+
+func addr1(x int) *int {
+ return func() *int { return &x }()
+}
+
+func BenchmarkCallClosure3(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ ss = addr1(i)
+ }
+}
+
+func addr2() (x int, p *int) {
+ return 0, func() *int { return &x }()
+}
+
+func BenchmarkCallClosure4(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ _, ss = addr2()
+ }
+}
diff --git a/src/pkg/runtime/complex.c b/src/pkg/runtime/complex.c
new file mode 100644
index 000000000..eeb943940
--- /dev/null
+++ b/src/pkg/runtime/complex.c
@@ -0,0 +1,60 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "runtime.h"
+
+typedef struct Complex128 Complex128;
+
+void
+runtime·complex128div(Complex128 n, Complex128 d, Complex128 q)
+{
+ int32 ninf, dinf, nnan, dnan;
+ float64 a, b, ratio, denom;
+
+ // Special cases as in C99.
+ ninf = runtime·isInf(n.real, 0) || runtime·isInf(n.imag, 0);
+ dinf = runtime·isInf(d.real, 0) || runtime·isInf(d.imag, 0);
+
+ nnan = !ninf && (runtime·isNaN(n.real) || runtime·isNaN(n.imag));
+ dnan = !dinf && (runtime·isNaN(d.real) || runtime·isNaN(d.imag));
+
+ if(nnan || dnan) {
+ q.real = runtime·NaN();
+ q.imag = runtime·NaN();
+ } else if(ninf && !dinf && !dnan) {
+ q.real = runtime·Inf(0);
+ q.imag = runtime·Inf(0);
+ } else if(!ninf && !nnan && dinf) {
+ q.real = 0;
+ q.imag = 0;
+ } else if(d.real == 0 && d.imag == 0) {
+ if(n.real == 0 && n.imag == 0) {
+ q.real = runtime·NaN();
+ q.imag = runtime·NaN();
+ } else {
+ q.real = runtime·Inf(0);
+ q.imag = runtime·Inf(0);
+ }
+ } else {
+ // Standard complex arithmetic, factored to avoid unnecessary overflow.
+ a = d.real;
+ if(a < 0)
+ a = -a;
+ b = d.imag;
+ if(b < 0)
+ b = -b;
+ if(a <= b) {
+ ratio = d.real/d.imag;
+ denom = d.real*ratio + d.imag;
+ q.real = (n.real*ratio + n.imag) / denom;
+ q.imag = (n.imag*ratio - n.real) / denom;
+ } else {
+ ratio = d.imag/d.real;
+ denom = d.imag*ratio + d.real;
+ q.real = (n.imag*ratio + n.real) / denom;
+ q.imag = (n.imag - n.real*ratio) / denom;
+ }
+ }
+ FLUSH(&q);
+}
diff --git a/src/pkg/runtime/cpuprof.c b/src/pkg/runtime/cpuprof.c
new file mode 100644
index 000000000..74b795b7e
--- /dev/null
+++ b/src/pkg/runtime/cpuprof.c
@@ -0,0 +1,425 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// CPU profiling.
+// Based on algorithms and data structures used in
+// http://code.google.com/p/google-perftools/.
+//
+// The main difference between this code and the google-perftools
+// code is that this code is written to allow copying the profile data
+// to an arbitrary io.Writer, while the google-perftools code always
+// writes to an operating system file.
+//
+// The signal handler for the profiling clock tick adds a new stack trace
+// to a hash table tracking counts for recent traces. Most clock ticks
+// hit in the cache. In the event of a cache miss, an entry must be
+// evicted from the hash table, copied to a log that will eventually be
+// written as profile data. The google-perftools code flushed the
+// log itself during the signal handler. This code cannot do that, because
+// the io.Writer might block or need system calls or locks that are not
+// safe to use from within the signal handler. Instead, we split the log
+// into two halves and let the signal handler fill one half while a goroutine
+// is writing out the other half. When the signal handler fills its half, it
+// offers to swap with the goroutine. If the writer is not done with its half,
+// we lose the stack trace for this clock tick (and record that loss).
+// The goroutine interacts with the signal handler by calling getprofile() to
+// get the next log piece to write, implicitly handing back the last log
+// piece it obtained.
+//
+// The state of this dance between the signal handler and the goroutine
+// is encoded in the Profile.handoff field. If handoff == 0, then the goroutine
+// is not using either log half and is waiting (or will soon be waiting) for
+// a new piece by calling notesleep(&p->wait). If the signal handler
+// changes handoff from 0 to non-zero, it must call notewakeup(&p->wait)
+// to wake the goroutine. The value indicates the number of entries in the
+// log half being handed off. The goroutine leaves the non-zero value in
+// place until it has finished processing the log half and then flips the number
+// back to zero. Setting the high bit in handoff means that the profiling is over,
+// and the goroutine is now in charge of flushing the data left in the hash table
+// to the log and returning that data.
+//
+// The handoff field is manipulated using atomic operations.
+// For the most part, the manipulation of handoff is orderly: if handoff == 0
+// then the signal handler owns it and can change it to non-zero.
+// If handoff != 0 then the goroutine owns it and can change it to zero.
+// If that were the end of the story then we would not need to manipulate
+// handoff using atomic operations. The operations are needed, however,
+// in order to let the log closer set the high bit to indicate "EOF" safely
+// in the situation when normally the goroutine "owns" handoff.
+
+#include "runtime.h"
+#include "malloc.h"
+
+enum
+{
+ HashSize = 1<<10,
+ LogSize = 1<<17,
+ Assoc = 4,
+ MaxStack = 64,
+};
+
+typedef struct Profile Profile;
+typedef struct Bucket Bucket;
+typedef struct Entry Entry;
+
+struct Entry {
+ uintptr count;
+ uintptr depth;
+ uintptr stack[MaxStack];
+};
+
+struct Bucket {
+ Entry entry[Assoc];
+};
+
+struct Profile {
+ bool on; // profiling is on
+ Note wait; // goroutine waits here
+ uintptr count; // tick count
+ uintptr evicts; // eviction count
+ uintptr lost; // lost ticks that need to be logged
+ uintptr totallost; // total lost ticks
+
+ // Active recent stack traces.
+ Bucket hash[HashSize];
+
+ // Log of traces evicted from hash.
+ // Signal handler has filled log[toggle][:nlog].
+ // Goroutine is writing log[1-toggle][:handoff].
+ uintptr log[2][LogSize/2];
+ uintptr nlog;
+ int32 toggle;
+ uint32 handoff;
+
+ // Writer state.
+ // Writer maintains its own toggle to avoid races
+ // looking at signal handler's toggle.
+ uint32 wtoggle;
+ bool wholding; // holding & need to release a log half
+ bool flushing; // flushing hash table - profile is over
+};
+
+static Lock lk;
+static Profile *prof;
+
+static void tick(uintptr*, int32);
+static void add(Profile*, uintptr*, int32);
+static bool evict(Profile*, Entry*);
+static bool flushlog(Profile*);
+
+// LostProfileData is a no-op function used in profiles
+// to mark the number of profiling stack traces that were
+// discarded due to slow data writers.
+static void LostProfileData(void) {
+}
+
+// SetCPUProfileRate sets the CPU profiling rate.
+// The user documentation is in debug.go.
+void
+runtime·SetCPUProfileRate(int32 hz)
+{
+ uintptr *p;
+ uintptr n;
+
+ // Call findfunc now so that it won't have to
+ // build tables during the signal handler.
+ runtime·findfunc(0);
+
+ // Clamp hz to something reasonable.
+ if(hz < 0)
+ hz = 0;
+ if(hz > 1000000)
+ hz = 1000000;
+
+ runtime·lock(&lk);
+ if(hz > 0) {
+ if(prof == nil) {
+ prof = runtime·SysAlloc(sizeof *prof);
+ if(prof == nil) {
+ runtime·printf("runtime: cpu profiling cannot allocate memory\n");
+ runtime·unlock(&lk);
+ return;
+ }
+ }
+ if(prof->on || prof->handoff != 0) {
+ runtime·printf("runtime: cannot set cpu profile rate until previous profile has finished.\n");
+ runtime·unlock(&lk);
+ return;
+ }
+
+ prof->on = true;
+ p = prof->log[0];
+ // pprof binary header format.
+ // http://code.google.com/p/google-perftools/source/browse/trunk/src/profiledata.cc#117
+ *p++ = 0; // count for header
+ *p++ = 3; // depth for header
+ *p++ = 0; // version number
+ *p++ = 1000000 / hz; // period (microseconds)
+ *p++ = 0;
+ prof->nlog = p - prof->log[0];
+ prof->toggle = 0;
+ prof->wholding = false;
+ prof->wtoggle = 0;
+ prof->flushing = false;
+ runtime·noteclear(&prof->wait);
+
+ runtime·setcpuprofilerate(tick, hz);
+ } else if(prof->on) {
+ runtime·setcpuprofilerate(nil, 0);
+ prof->on = false;
+
+ // Now add is not running anymore, and getprofile owns the entire log.
+ // Set the high bit in prof->handoff to tell getprofile.
+ for(;;) {
+ n = prof->handoff;
+ if(n&0x80000000)
+ runtime·printf("runtime: setcpuprofile(off) twice");
+ if(runtime·cas(&prof->handoff, n, n|0x80000000))
+ break;
+ }
+ if(n == 0) {
+ // we did the transition from 0 -> nonzero so we wake getprofile
+ runtime·notewakeup(&prof->wait);
+ }
+ }
+ runtime·unlock(&lk);
+}
+
+static void
+tick(uintptr *pc, int32 n)
+{
+ add(prof, pc, n);
+}
+
+// add adds the stack trace to the profile.
+// It is called from signal handlers and other limited environments
+// and cannot allocate memory or acquire locks that might be
+// held at the time of the signal, nor can it use substantial amounts
+// of stack. It is allowed to call evict.
+static void
+add(Profile *p, uintptr *pc, int32 n)
+{
+ int32 i, j;
+ uintptr h, x;
+ Bucket *b;
+ Entry *e;
+
+ if(n > MaxStack)
+ n = MaxStack;
+
+ // Compute hash.
+ h = 0;
+ for(i=0; i<n; i++) {
+ h = h<<8 | (h>>(8*(sizeof(h)-1)));
+ x = pc[i];
+ h += x*31 + x*7 + x*3;
+ }
+ p->count++;
+
+ // Add to entry count if already present in table.
+ b = &p->hash[h%HashSize];
+ for(i=0; i<Assoc; i++) {
+ e = &b->entry[i];
+ if(e->depth != n)
+ continue;
+ for(j=0; j<n; j++)
+ if(e->stack[j] != pc[j])
+ goto ContinueAssoc;
+ e->count++;
+ return;
+ ContinueAssoc:;
+ }
+
+ // Evict entry with smallest count.
+ e = &b->entry[0];
+ for(i=1; i<Assoc; i++)
+ if(b->entry[i].count < e->count)
+ e = &b->entry[i];
+ if(e->count > 0) {
+ if(!evict(p, e)) {
+ // Could not evict entry. Record lost stack.
+ p->lost++;
+ p->totallost++;
+ return;
+ }
+ p->evicts++;
+ }
+
+ // Reuse the newly evicted entry.
+ e->depth = n;
+ e->count = 1;
+ for(i=0; i<n; i++)
+ e->stack[i] = pc[i];
+}
+
+// evict copies the given entry's data into the log, so that
+// the entry can be reused. evict is called from add, which
+// is called from the profiling signal handler, so it must not
+// allocate memory or block. It is safe to call flushLog.
+// evict returns true if the entry was copied to the log,
+// false if there was no room available.
+static bool
+evict(Profile *p, Entry *e)
+{
+ int32 i, d, nslot;
+ uintptr *log, *q;
+
+ d = e->depth;
+ nslot = d+2;
+ log = p->log[p->toggle];
+ if(p->nlog+nslot > nelem(p->log[0])) {
+ if(!flushlog(p))
+ return false;
+ log = p->log[p->toggle];
+ }
+
+ q = log+p->nlog;
+ *q++ = e->count;
+ *q++ = d;
+ for(i=0; i<d; i++)
+ *q++ = e->stack[i];
+ p->nlog = q - log;
+ e->count = 0;
+ return true;
+}
+
+// flushlog tries to flush the current log and switch to the other one.
+// flushlog is called from evict, called from add, called from the signal handler,
+// so it cannot allocate memory or block. It can try to swap logs with
+// the writing goroutine, as explained in the comment at the top of this file.
+static bool
+flushlog(Profile *p)
+{
+ uintptr *log, *q;
+
+ if(!runtime·cas(&p->handoff, 0, p->nlog))
+ return false;
+ runtime·notewakeup(&p->wait);
+
+ p->toggle = 1 - p->toggle;
+ log = p->log[p->toggle];
+ q = log;
+ if(p->lost > 0) {
+ *q++ = p->lost;
+ *q++ = 1;
+ *q++ = (uintptr)LostProfileData;
+ }
+ p->nlog = q - log;
+ return true;
+}
+
+// getprofile blocks until the next block of profiling data is available
+// and returns it as a []byte. It is called from the writing goroutine.
+Slice
+getprofile(Profile *p)
+{
+ uint32 i, j, n;
+ Slice ret;
+ Bucket *b;
+ Entry *e;
+
+ ret.array = nil;
+ ret.len = 0;
+ ret.cap = 0;
+
+ if(p == nil)
+ return ret;
+
+ if(p->wholding) {
+ // Release previous log to signal handling side.
+ // Loop because we are racing against setprofile(off).
+ for(;;) {
+ n = p->handoff;
+ if(n == 0) {
+ runtime·printf("runtime: phase error during cpu profile handoff\n");
+ return ret;
+ }
+ if(n & 0x80000000) {
+ p->wtoggle = 1 - p->wtoggle;
+ p->wholding = false;
+ p->flushing = true;
+ goto flush;
+ }
+ if(runtime·cas(&p->handoff, n, 0))
+ break;
+ }
+ p->wtoggle = 1 - p->wtoggle;
+ p->wholding = false;
+ }
+
+ if(p->flushing)
+ goto flush;
+
+ if(!p->on && p->handoff == 0)
+ return ret;
+
+ // Wait for new log.
+ runtime·entersyscall();
+ runtime·notesleep(&p->wait);
+ runtime·exitsyscall();
+ runtime·noteclear(&p->wait);
+
+ n = p->handoff;
+ if(n == 0) {
+ runtime·printf("runtime: phase error during cpu profile wait\n");
+ return ret;
+ }
+ if(n == 0x80000000) {
+ p->flushing = true;
+ goto flush;
+ }
+ n &= ~0x80000000;
+
+ // Return new log to caller.
+ p->wholding = true;
+
+ ret.array = (byte*)p->log[p->wtoggle];
+ ret.len = n*sizeof(uintptr);
+ ret.cap = ret.len;
+ return ret;
+
+flush:
+ // In flush mode.
+ // Add is no longer being called. We own the log.
+ // Also, p->handoff is non-zero, so flushlog will return false.
+ // Evict the hash table into the log and return it.
+ for(i=0; i<HashSize; i++) {
+ b = &p->hash[i];
+ for(j=0; j<Assoc; j++) {
+ e = &b->entry[j];
+ if(e->count > 0 && !evict(p, e)) {
+ // Filled the log. Stop the loop and return what we've got.
+ goto breakflush;
+ }
+ }
+ }
+breakflush:
+
+ // Return pending log data.
+ if(p->nlog > 0) {
+ // Note that we're using toggle now, not wtoggle,
+ // because we're working on the log directly.
+ ret.array = (byte*)p->log[p->toggle];
+ ret.len = p->nlog*sizeof(uintptr);
+ ret.cap = ret.len;
+ p->nlog = 0;
+ return ret;
+ }
+
+ // Made it through the table without finding anything to log.
+ // Finally done. Clean up and return nil.
+ p->flushing = false;
+ if(!runtime·cas(&p->handoff, p->handoff, 0))
+ runtime·printf("runtime: profile flush racing with something\n");
+ return ret; // set to nil at top of function
+}
+
+// CPUProfile returns the next cpu profile block as a []byte.
+// The user documentation is in debug.go.
+void
+runtime·CPUProfile(Slice ret)
+{
+ ret = getprofile(prof);
+ FLUSH(&ret);
+}
diff --git a/src/pkg/runtime/darwin/386/defs.h b/src/pkg/runtime/darwin/386/defs.h
new file mode 100644
index 000000000..bb70207fd
--- /dev/null
+++ b/src/pkg/runtime/darwin/386/defs.h
@@ -0,0 +1,289 @@
+// godefs -f -m32 defs.c
+
+// MACHINE GENERATED - DO NOT EDIT.
+
+// Constants
+enum {
+ PROT_NONE = 0,
+ PROT_READ = 0x1,
+ PROT_WRITE = 0x2,
+ PROT_EXEC = 0x4,
+ MAP_ANON = 0x1000,
+ MAP_PRIVATE = 0x2,
+ MAP_FIXED = 0x10,
+ MACH_MSG_TYPE_MOVE_RECEIVE = 0x10,
+ MACH_MSG_TYPE_MOVE_SEND = 0x11,
+ MACH_MSG_TYPE_MOVE_SEND_ONCE = 0x12,
+ MACH_MSG_TYPE_COPY_SEND = 0x13,
+ MACH_MSG_TYPE_MAKE_SEND = 0x14,
+ MACH_MSG_TYPE_MAKE_SEND_ONCE = 0x15,
+ MACH_MSG_TYPE_COPY_RECEIVE = 0x16,
+ MACH_MSG_PORT_DESCRIPTOR = 0,
+ MACH_MSG_OOL_DESCRIPTOR = 0x1,
+ MACH_MSG_OOL_PORTS_DESCRIPTOR = 0x2,
+ MACH_MSG_OOL_VOLATILE_DESCRIPTOR = 0x3,
+ MACH_MSGH_BITS_COMPLEX = 0x80000000,
+ MACH_SEND_MSG = 0x1,
+ MACH_RCV_MSG = 0x2,
+ MACH_RCV_LARGE = 0x4,
+ MACH_SEND_TIMEOUT = 0x10,
+ MACH_SEND_INTERRUPT = 0x40,
+ MACH_SEND_CANCEL = 0x80,
+ MACH_SEND_ALWAYS = 0x10000,
+ MACH_SEND_TRAILER = 0x20000,
+ MACH_RCV_TIMEOUT = 0x100,
+ MACH_RCV_NOTIFY = 0x200,
+ MACH_RCV_INTERRUPT = 0x400,
+ MACH_RCV_OVERWRITE = 0x1000,
+ NDR_PROTOCOL_2_0 = 0,
+ NDR_INT_BIG_ENDIAN = 0,
+ NDR_INT_LITTLE_ENDIAN = 0x1,
+ NDR_FLOAT_IEEE = 0,
+ NDR_CHAR_ASCII = 0,
+ SA_SIGINFO = 0x40,
+ SA_RESTART = 0x2,
+ SA_ONSTACK = 0x1,
+ SA_USERTRAMP = 0x100,
+ SA_64REGSET = 0x200,
+ SIGHUP = 0x1,
+ SIGINT = 0x2,
+ SIGQUIT = 0x3,
+ SIGILL = 0x4,
+ SIGTRAP = 0x5,
+ SIGABRT = 0x6,
+ SIGEMT = 0x7,
+ SIGFPE = 0x8,
+ SIGKILL = 0x9,
+ SIGBUS = 0xa,
+ SIGSEGV = 0xb,
+ SIGSYS = 0xc,
+ SIGPIPE = 0xd,
+ SIGALRM = 0xe,
+ SIGTERM = 0xf,
+ SIGURG = 0x10,
+ SIGSTOP = 0x11,
+ SIGTSTP = 0x12,
+ SIGCONT = 0x13,
+ SIGCHLD = 0x14,
+ SIGTTIN = 0x15,
+ SIGTTOU = 0x16,
+ SIGIO = 0x17,
+ SIGXCPU = 0x18,
+ SIGXFSZ = 0x19,
+ SIGVTALRM = 0x1a,
+ SIGPROF = 0x1b,
+ SIGWINCH = 0x1c,
+ SIGINFO = 0x1d,
+ SIGUSR1 = 0x1e,
+ SIGUSR2 = 0x1f,
+ FPE_INTDIV = 0x7,
+ FPE_INTOVF = 0x8,
+ FPE_FLTDIV = 0x1,
+ FPE_FLTOVF = 0x2,
+ FPE_FLTUND = 0x3,
+ FPE_FLTRES = 0x4,
+ FPE_FLTINV = 0x5,
+ FPE_FLTSUB = 0x6,
+ BUS_ADRALN = 0x1,
+ BUS_ADRERR = 0x2,
+ BUS_OBJERR = 0x3,
+ SEGV_MAPERR = 0x1,
+ SEGV_ACCERR = 0x2,
+ ITIMER_REAL = 0,
+ ITIMER_VIRTUAL = 0x1,
+ ITIMER_PROF = 0x2,
+};
+
+// Types
+#pragma pack on
+
+typedef struct MachBody MachBody;
+struct MachBody {
+ uint32 msgh_descriptor_count;
+};
+
+typedef struct MachHeader MachHeader;
+struct MachHeader {
+ uint32 msgh_bits;
+ uint32 msgh_size;
+ uint32 msgh_remote_port;
+ uint32 msgh_local_port;
+ uint32 msgh_reserved;
+ int32 msgh_id;
+};
+
+typedef struct MachNDR MachNDR;
+struct MachNDR {
+ uint8 mig_vers;
+ uint8 if_vers;
+ uint8 reserved1;
+ uint8 mig_encoding;
+ uint8 int_rep;
+ uint8 char_rep;
+ uint8 float_rep;
+ uint8 reserved2;
+};
+
+typedef struct MachPort MachPort;
+struct MachPort {
+ uint32 name;
+ uint32 pad1;
+ uint16 pad2;
+ uint8 disposition;
+ uint8 type;
+};
+
+typedef struct StackT StackT;
+struct StackT {
+ void *ss_sp;
+ uint32 ss_size;
+ int32 ss_flags;
+};
+
+typedef union Sighandler Sighandler;
+union Sighandler {
+ uint32 __sa_handler;
+ uint32 __sa_sigaction;
+};
+
+typedef struct Sigaction Sigaction;
+struct Sigaction {
+ Sighandler __sigaction_u;
+ uint32 sa_tramp;
+ uint32 sa_mask;
+ int32 sa_flags;
+};
+
+typedef union Sigval Sigval;
+union Sigval {
+ int32 sival_int;
+ void *sival_ptr;
+};
+
+typedef struct Siginfo Siginfo;
+struct Siginfo {
+ int32 si_signo;
+ int32 si_errno;
+ int32 si_code;
+ int32 si_pid;
+ uint32 si_uid;
+ int32 si_status;
+ void *si_addr;
+ Sigval si_value;
+ int32 si_band;
+ uint32 __pad[7];
+};
+
+typedef struct Timeval Timeval;
+struct Timeval {
+ int32 tv_sec;
+ int32 tv_usec;
+};
+
+typedef struct Itimerval Itimerval;
+struct Itimerval {
+ Timeval it_interval;
+ Timeval it_value;
+};
+
+typedef struct FPControl FPControl;
+struct FPControl {
+ byte pad_godefs_0[2];
+};
+
+typedef struct FPStatus FPStatus;
+struct FPStatus {
+ byte pad_godefs_0[2];
+};
+
+typedef struct RegMMST RegMMST;
+struct RegMMST {
+ int8 mmst_reg[10];
+ int8 mmst_rsrv[6];
+};
+
+typedef struct RegXMM RegXMM;
+struct RegXMM {
+ int8 xmm_reg[16];
+};
+
+typedef struct Regs Regs;
+struct Regs {
+ uint32 eax;
+ uint32 ebx;
+ uint32 ecx;
+ uint32 edx;
+ uint32 edi;
+ uint32 esi;
+ uint32 ebp;
+ uint32 esp;
+ uint32 ss;
+ uint32 eflags;
+ uint32 eip;
+ uint32 cs;
+ uint32 ds;
+ uint32 es;
+ uint32 fs;
+ uint32 gs;
+};
+
+typedef struct FloatState FloatState;
+struct FloatState {
+ uint64 fpu_reserved;
+ FPControl fpu_fcw;
+ FPStatus fpu_fsw;
+ uint8 fpu_ftw;
+ uint8 fpu_rsrv1;
+ uint16 fpu_fop;
+ uint32 fpu_ip;
+ uint16 fpu_cs;
+ uint16 fpu_rsrv2;
+ uint32 fpu_dp;
+ uint16 fpu_ds;
+ uint16 fpu_rsrv3;
+ uint32 fpu_mxcsr;
+ uint32 fpu_mxcsrmask;
+ RegMMST fpu_stmm0;
+ RegMMST fpu_stmm1;
+ RegMMST fpu_stmm2;
+ RegMMST fpu_stmm3;
+ RegMMST fpu_stmm4;
+ RegMMST fpu_stmm5;
+ RegMMST fpu_stmm6;
+ RegMMST fpu_stmm7;
+ RegXMM fpu_xmm0;
+ RegXMM fpu_xmm1;
+ RegXMM fpu_xmm2;
+ RegXMM fpu_xmm3;
+ RegXMM fpu_xmm4;
+ RegXMM fpu_xmm5;
+ RegXMM fpu_xmm6;
+ RegXMM fpu_xmm7;
+ int8 fpu_rsrv4[224];
+ int32 fpu_reserved1;
+};
+
+typedef struct ExceptionState ExceptionState;
+struct ExceptionState {
+ uint32 trapno;
+ uint32 err;
+ uint32 faultvaddr;
+};
+
+typedef struct Mcontext Mcontext;
+struct Mcontext {
+ ExceptionState es;
+ Regs ss;
+ FloatState fs;
+};
+
+typedef struct Ucontext Ucontext;
+struct Ucontext {
+ int32 uc_onstack;
+ uint32 uc_sigmask;
+ StackT uc_stack;
+ uint32 uc_link;
+ uint32 uc_mcsize;
+ Mcontext *uc_mcontext;
+};
+#pragma pack off
diff --git a/src/pkg/runtime/darwin/386/rt0.s b/src/pkg/runtime/darwin/386/rt0.s
new file mode 100644
index 000000000..30b497f5e
--- /dev/null
+++ b/src/pkg/runtime/darwin/386/rt0.s
@@ -0,0 +1,8 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Darwin and Linux use the same linkage to main
+
+TEXT _rt0_386_darwin(SB),7,$0
+ JMP _rt0_386(SB)
diff --git a/src/pkg/runtime/darwin/386/signal.c b/src/pkg/runtime/darwin/386/signal.c
new file mode 100644
index 000000000..29170b669
--- /dev/null
+++ b/src/pkg/runtime/darwin/386/signal.c
@@ -0,0 +1,194 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "runtime.h"
+#include "defs.h"
+#include "os.h"
+#include "signals.h"
+
+void
+runtime·dumpregs(Regs *r)
+{
+ runtime·printf("eax %x\n", r->eax);
+ runtime·printf("ebx %x\n", r->ebx);
+ runtime·printf("ecx %x\n", r->ecx);
+ runtime·printf("edx %x\n", r->edx);
+ runtime·printf("edi %x\n", r->edi);
+ runtime·printf("esi %x\n", r->esi);
+ runtime·printf("ebp %x\n", r->ebp);
+ runtime·printf("esp %x\n", r->esp);
+ runtime·printf("eip %x\n", r->eip);
+ runtime·printf("eflags %x\n", r->eflags);
+ runtime·printf("cs %x\n", r->cs);
+ runtime·printf("fs %x\n", r->fs);
+ runtime·printf("gs %x\n", r->gs);
+}
+
+String
+runtime·signame(int32 sig)
+{
+ if(sig < 0 || sig >= NSIG)
+ return runtime·emptystring;
+ return runtime·gostringnocopy((byte*)runtime·sigtab[sig].name);
+}
+
+void
+runtime·sighandler(int32 sig, Siginfo *info, void *context, G *gp)
+{
+ Ucontext *uc;
+ Mcontext *mc;
+ Regs *r;
+ uintptr *sp;
+ byte *pc;
+
+ uc = context;
+ mc = uc->uc_mcontext;
+ r = &mc->ss;
+
+ if(sig == SIGPROF) {
+ runtime·sigprof((uint8*)r->eip, (uint8*)r->esp, nil, gp);
+ return;
+ }
+
+ if(gp != nil && (runtime·sigtab[sig].flags & SigPanic)) {
+ // Work around Leopard bug that doesn't set FPE_INTDIV.
+ // Look at instruction to see if it is a divide.
+ // Not necessary in Snow Leopard (si_code will be != 0).
+ if(sig == SIGFPE && info->si_code == 0) {
+ pc = (byte*)r->eip;
+ if(pc[0] == 0x66) // 16-bit instruction prefix
+ pc++;
+ if(pc[0] == 0xF6 || pc[0] == 0xF7)
+ info->si_code = FPE_INTDIV;
+ }
+
+ // Make it look like a call to the signal func.
+ // Have to pass arguments out of band since
+ // augmenting the stack frame would break
+ // the unwinding code.
+ gp->sig = sig;
+ gp->sigcode0 = info->si_code;
+ gp->sigcode1 = (uintptr)info->si_addr;
+ gp->sigpc = r->eip;
+
+ // Only push runtime·sigpanic if r->eip != 0.
+ // If r->eip == 0, probably panicked because of a
+ // call to a nil func. Not pushing that onto sp will
+ // make the trace look like a call to runtime·sigpanic instead.
+ // (Otherwise the trace will end at runtime·sigpanic and we
+ // won't get to see who faulted.)
+ if(r->eip != 0) {
+ sp = (uintptr*)r->esp;
+ *--sp = r->eip;
+ r->esp = (uintptr)sp;
+ }
+ r->eip = (uintptr)runtime·sigpanic;
+ return;
+ }
+
+ if(runtime·sigtab[sig].flags & SigQueue) {
+ if(runtime·sigsend(sig) || (runtime·sigtab[sig].flags & SigIgnore))
+ return;
+ runtime·exit(2); // SIGINT, SIGTERM, etc
+ }
+
+ if(runtime·panicking) // traceback already printed
+ runtime·exit(2);
+ runtime·panicking = 1;
+
+ if(sig < 0 || sig >= NSIG){
+ runtime·printf("Signal %d\n", sig);
+ }else{
+ runtime·printf("%s\n", runtime·sigtab[sig].name);
+ }
+
+ runtime·printf("pc: %x\n", r->eip);
+ runtime·printf("\n");
+
+ if(runtime·gotraceback()){
+ runtime·traceback((void*)r->eip, (void*)r->esp, 0, gp);
+ runtime·tracebackothers(gp);
+ runtime·dumpregs(r);
+ }
+
+ runtime·exit(2);
+}
+
+void
+runtime·sigignore(int32, Siginfo*, void*)
+{
+}
+
+void
+runtime·signalstack(byte *p, int32 n)
+{
+ StackT st;
+
+ st.ss_sp = p;
+ st.ss_size = n;
+ st.ss_flags = 0;
+ runtime·sigaltstack(&st, nil);
+}
+
+static void
+sigaction(int32 i, void (*fn)(int32, Siginfo*, void*, G*), bool restart)
+{
+ Sigaction sa;
+
+ runtime·memclr((byte*)&sa, sizeof sa);
+ sa.sa_flags = SA_SIGINFO|SA_ONSTACK;
+ if(restart)
+ sa.sa_flags |= SA_RESTART;
+ sa.sa_mask = ~0U;
+ sa.sa_tramp = (uintptr)runtime·sigtramp; // runtime·sigtramp's job is to call into real handler
+ sa.__sigaction_u.__sa_sigaction = (uintptr)fn;
+ runtime·sigaction(i, &sa, nil);
+}
+
+void
+runtime·initsig(int32 queue)
+{
+ int32 i;
+ void *fn;
+
+ runtime·siginit();
+
+ for(i = 0; i<NSIG; i++) {
+ if(runtime·sigtab[i].flags) {
+ if((runtime·sigtab[i].flags & SigQueue) != queue)
+ continue;
+ if(runtime·sigtab[i].flags & (SigCatch | SigQueue))
+ fn = runtime·sighandler;
+ else
+ fn = runtime·sigignore;
+ sigaction(i, fn, (runtime·sigtab[i].flags & SigRestart) != 0);
+ }
+ }
+}
+
+void
+runtime·resetcpuprofiler(int32 hz)
+{
+ Itimerval it;
+
+ runtime·memclr((byte*)&it, sizeof it);
+ if(hz == 0) {
+ runtime·setitimer(ITIMER_PROF, &it, nil);
+ sigaction(SIGPROF, SIG_IGN, true);
+ } else {
+ sigaction(SIGPROF, runtime·sighandler, true);
+ it.it_interval.tv_sec = 0;
+ it.it_interval.tv_usec = 1000000 / hz;
+ it.it_value = it.it_interval;
+ runtime·setitimer(ITIMER_PROF, &it, nil);
+ }
+ m->profilehz = hz;
+}
+
+void
+os·sigpipe(void)
+{
+ sigaction(SIGPIPE, SIG_DFL, false);
+ runtime·raisesigpipe();
+}
diff --git a/src/pkg/runtime/darwin/386/sys.s b/src/pkg/runtime/darwin/386/sys.s
new file mode 100644
index 000000000..87fbdbb79
--- /dev/null
+++ b/src/pkg/runtime/darwin/386/sys.s
@@ -0,0 +1,311 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// System calls and other sys.stuff for 386, Darwin
+// See http://fxr.watson.org/fxr/source/bsd/kern/syscalls.c?v=xnu-1228
+// or /usr/include/sys/syscall.h (on a Mac) for system call numbers.
+
+#include "386/asm.h"
+
+TEXT runtime·notok(SB),7,$0
+ MOVL $0xf1, 0xf1
+ RET
+
+// Exit the entire program (like C exit)
+TEXT runtime·exit(SB),7,$0
+ MOVL $1, AX
+ INT $0x80
+ CALL runtime·notok(SB)
+ RET
+
+// Exit this OS thread (like pthread_exit, which eventually
+// calls __bsdthread_terminate).
+TEXT runtime·exit1(SB),7,$0
+ MOVL $361, AX
+ INT $0x80
+ JAE 2(PC)
+ CALL runtime·notok(SB)
+ RET
+
+TEXT runtime·write(SB),7,$0
+ MOVL $4, AX
+ INT $0x80
+ RET
+
+TEXT runtime·raisesigpipe(SB),7,$8
+ get_tls(CX)
+ MOVL m(CX), DX
+ MOVL m_procid(DX), DX
+ MOVL DX, 0(SP) // thread_port
+ MOVL $13, 4(SP) // signal: SIGPIPE
+ MOVL $328, AX // __pthread_kill
+ INT $0x80
+ RET
+
+TEXT runtime·mmap(SB),7,$0
+ MOVL $197, AX
+ INT $0x80
+ RET
+
+TEXT runtime·munmap(SB),7,$0
+ MOVL $73, AX
+ INT $0x80
+ JAE 2(PC)
+ CALL runtime·notok(SB)
+ RET
+
+TEXT runtime·setitimer(SB),7,$0
+ MOVL $83, AX
+ INT $0x80
+ RET
+
+// void gettime(int64 *sec, int32 *usec)
+TEXT runtime·gettime(SB), 7, $32
+ LEAL 12(SP), AX // must be non-nil, unused
+ MOVL AX, 4(SP)
+ MOVL $0, 8(SP) // time zone pointer
+ MOVL $116, AX
+ INT $0x80
+
+ MOVL sec+0(FP), DI
+ MOVL AX, (DI)
+ MOVL $0, 4(DI) // zero extend 32 -> 64
+
+ MOVL usec+4(FP), DI
+ MOVL DX, (DI)
+ RET
+
+TEXT runtime·sigaction(SB),7,$0
+ MOVL $46, AX
+ INT $0x80
+ JAE 2(PC)
+ CALL runtime·notok(SB)
+ RET
+
+// Sigtramp's job is to call the actual signal handler.
+// It is called with the following arguments on the stack:
+// 0(FP) "return address" - ignored
+// 4(FP) actual handler
+// 8(FP) siginfo style - ignored
+// 12(FP) signal number
+// 16(FP) siginfo
+// 20(FP) context
+TEXT runtime·sigtramp(SB),7,$40
+ get_tls(CX)
+
+ // save g
+ MOVL g(CX), DI
+ MOVL DI, 20(SP)
+
+ // g = m->gsignal
+ MOVL m(CX), BP
+ MOVL m_gsignal(BP), BP
+ MOVL BP, g(CX)
+
+ // copy arguments to sighandler
+ MOVL sig+8(FP), BX
+ MOVL BX, 0(SP)
+ MOVL info+12(FP), BX
+ MOVL BX, 4(SP)
+ MOVL context+16(FP), BX
+ MOVL BX, 8(SP)
+ MOVL DI, 12(SP)
+
+ MOVL handler+0(FP), BX
+ CALL BX
+
+ // restore g
+ get_tls(CX)
+ MOVL 20(SP), DI
+ MOVL DI, g(CX)
+
+ // call sigreturn
+ MOVL context+16(FP), CX
+ MOVL style+4(FP), BX
+ MOVL $0, 0(SP) // "caller PC" - ignored
+ MOVL CX, 4(SP)
+ MOVL BX, 8(SP)
+ MOVL $184, AX // sigreturn(ucontext, infostyle)
+ INT $0x80
+ CALL runtime·notok(SB)
+ RET
+
+TEXT runtime·sigaltstack(SB),7,$0
+ MOVL $53, AX
+ INT $0x80
+ JAE 2(PC)
+ CALL runtime·notok(SB)
+ RET
+
+// void bsdthread_create(void *stk, M *m, G *g, void (*fn)(void))
+// System call args are: func arg stack pthread flags.
+TEXT runtime·bsdthread_create(SB),7,$32
+ MOVL $360, AX
+ // 0(SP) is where the caller PC would be; kernel skips it
+ MOVL func+12(FP), BX
+ MOVL BX, 4(SP) // func
+ MOVL mm+4(FP), BX
+ MOVL BX, 8(SP) // arg
+ MOVL stk+0(FP), BX
+ MOVL BX, 12(SP) // stack
+ MOVL gg+8(FP), BX
+ MOVL BX, 16(SP) // pthread
+ MOVL $0x1000000, 20(SP) // flags = PTHREAD_START_CUSTOM
+ INT $0x80
+ JAE 3(PC)
+ NEGL AX
+ RET
+ MOVL $0, AX
+ RET
+
+// The thread that bsdthread_create creates starts executing here,
+// because we registered this function using bsdthread_register
+// at startup.
+// AX = "pthread" (= g)
+// BX = mach thread port
+// CX = "func" (= fn)
+// DX = "arg" (= m)
+// DI = stack top
+// SI = flags (= 0x1000000)
+// SP = stack - C_32_STK_ALIGN
+TEXT runtime·bsdthread_start(SB),7,$0
+ // set up ldt 7+id to point at m->tls.
+ // m->tls is at m+40. newosproc left
+ // the m->id in tls[0].
+ LEAL m_tls(DX), BP
+ MOVL 0(BP), DI
+ ADDL $7, DI // m0 is LDT#7. count up.
+ // setldt(tls#, &tls, sizeof tls)
+ PUSHAL // save registers
+ PUSHL $32 // sizeof tls
+ PUSHL BP // &tls
+ PUSHL DI // tls #
+ CALL runtime·setldt(SB)
+ POPL AX
+ POPL AX
+ POPL AX
+ POPAL
+
+ // Now segment is established. Initialize m, g.
+ get_tls(BP)
+ MOVL AX, g(BP)
+ MOVL DX, m(BP)
+ MOVL BX, m_procid(DX) // m->procid = thread port (for debuggers)
+ CALL runtime·stackcheck(SB) // smashes AX
+ CALL CX // fn()
+ CALL runtime·exit1(SB)
+ RET
+
+// void bsdthread_register(void)
+// registers callbacks for threadstart (see bsdthread_create above
+// and wqthread and pthsize (not used). returns 0 on success.
+TEXT runtime·bsdthread_register(SB),7,$40
+ MOVL $366, AX
+ // 0(SP) is where kernel expects caller PC; ignored
+ MOVL $runtime·bsdthread_start(SB), 4(SP) // threadstart
+ MOVL $0, 8(SP) // wqthread, not used by us
+ MOVL $0, 12(SP) // pthsize, not used by us
+ MOVL $0, 16(SP) // dummy_value [sic]
+ MOVL $0, 20(SP) // targetconc_ptr
+ MOVL $0, 24(SP) // dispatchqueue_offset
+ INT $0x80
+ JAE 2(PC)
+ CALL runtime·notok(SB)
+ RET
+
+// Invoke Mach system call.
+// Assumes system call number in AX,
+// caller PC on stack, caller's caller PC next,
+// and then the system call arguments.
+//
+// Can be used for BSD too, but we don't,
+// because if you use this interface the BSD
+// system call numbers need an extra field
+// in the high 16 bits that seems to be the
+// argument count in bytes but is not always.
+// INT $0x80 works fine for those.
+TEXT runtime·sysenter(SB),7,$0
+ POPL DX
+ MOVL SP, CX
+ BYTE $0x0F; BYTE $0x34; // SYSENTER
+ // returns to DX with SP set to CX
+
+TEXT runtime·mach_msg_trap(SB),7,$0
+ MOVL $-31, AX
+ CALL runtime·sysenter(SB)
+ RET
+
+TEXT runtime·mach_reply_port(SB),7,$0
+ MOVL $-26, AX
+ CALL runtime·sysenter(SB)
+ RET
+
+TEXT runtime·mach_task_self(SB),7,$0
+ MOVL $-28, AX
+ CALL runtime·sysenter(SB)
+ RET
+
+// Mach provides trap versions of the semaphore ops,
+// instead of requiring the use of RPC.
+
+// uint32 mach_semaphore_wait(uint32)
+TEXT runtime·mach_semaphore_wait(SB),7,$0
+ MOVL $-36, AX
+ CALL runtime·sysenter(SB)
+ RET
+
+// uint32 mach_semaphore_timedwait(uint32, uint32, uint32)
+TEXT runtime·mach_semaphore_timedwait(SB),7,$0
+ MOVL $-38, AX
+ CALL runtime·sysenter(SB)
+ RET
+
+// uint32 mach_semaphore_signal(uint32)
+TEXT runtime·mach_semaphore_signal(SB),7,$0
+ MOVL $-33, AX
+ CALL runtime·sysenter(SB)
+ RET
+
+// uint32 mach_semaphore_signal_all(uint32)
+TEXT runtime·mach_semaphore_signal_all(SB),7,$0
+ MOVL $-34, AX
+ CALL runtime·sysenter(SB)
+ RET
+
+// setldt(int entry, int address, int limit)
+// entry and limit are ignored.
+TEXT runtime·setldt(SB),7,$32
+ MOVL address+4(FP), BX // aka base
+
+ /*
+ * When linking against the system libraries,
+ * we use its pthread_create and let it set up %gs
+ * for us. When we do that, the private storage
+ * we get is not at 0(GS) but at 0x468(GS).
+ * To insulate the rest of the tool chain from this ugliness,
+ * 8l rewrites 0(GS) into 0x468(GS) for us.
+ * To accommodate that rewrite, we translate the
+ * address and limit here so that 0x468(GS) maps to 0(address).
+ *
+ * See ../../../../libcgo/darwin_386.c for the derivation
+ * of the constant.
+ */
+ SUBL $0x468, BX
+
+ /*
+ * Must set up as USER_CTHREAD segment because
+ * Darwin forces that value into %gs for signal handlers,
+ * and if we don't set one up, we'll get a recursive
+ * fault trying to get into the signal handler.
+ * Since we have to set one up anyway, it might as
+ * well be the value we want. So don't bother with
+ * i386_set_ldt.
+ */
+ MOVL BX, 4(SP)
+ MOVL $3, AX // thread_fast_set_cthread_self - machdep call #3
+ INT $0x82 // sic: 0x82, not 0x80, for machdep call
+
+ XORL AX, AX
+ MOVW GS, AX
+ RET
diff --git a/src/pkg/runtime/darwin/amd64/defs.h b/src/pkg/runtime/darwin/amd64/defs.h
new file mode 100644
index 000000000..90f798e8a
--- /dev/null
+++ b/src/pkg/runtime/darwin/amd64/defs.h
@@ -0,0 +1,305 @@
+// godefs -f -m64 defs.c
+
+// MACHINE GENERATED - DO NOT EDIT.
+
+// Constants
+enum {
+ PROT_NONE = 0,
+ PROT_READ = 0x1,
+ PROT_WRITE = 0x2,
+ PROT_EXEC = 0x4,
+ MAP_ANON = 0x1000,
+ MAP_PRIVATE = 0x2,
+ MAP_FIXED = 0x10,
+ MACH_MSG_TYPE_MOVE_RECEIVE = 0x10,
+ MACH_MSG_TYPE_MOVE_SEND = 0x11,
+ MACH_MSG_TYPE_MOVE_SEND_ONCE = 0x12,
+ MACH_MSG_TYPE_COPY_SEND = 0x13,
+ MACH_MSG_TYPE_MAKE_SEND = 0x14,
+ MACH_MSG_TYPE_MAKE_SEND_ONCE = 0x15,
+ MACH_MSG_TYPE_COPY_RECEIVE = 0x16,
+ MACH_MSG_PORT_DESCRIPTOR = 0,
+ MACH_MSG_OOL_DESCRIPTOR = 0x1,
+ MACH_MSG_OOL_PORTS_DESCRIPTOR = 0x2,
+ MACH_MSG_OOL_VOLATILE_DESCRIPTOR = 0x3,
+ MACH_MSGH_BITS_COMPLEX = 0x80000000,
+ MACH_SEND_MSG = 0x1,
+ MACH_RCV_MSG = 0x2,
+ MACH_RCV_LARGE = 0x4,
+ MACH_SEND_TIMEOUT = 0x10,
+ MACH_SEND_INTERRUPT = 0x40,
+ MACH_SEND_CANCEL = 0x80,
+ MACH_SEND_ALWAYS = 0x10000,
+ MACH_SEND_TRAILER = 0x20000,
+ MACH_RCV_TIMEOUT = 0x100,
+ MACH_RCV_NOTIFY = 0x200,
+ MACH_RCV_INTERRUPT = 0x400,
+ MACH_RCV_OVERWRITE = 0x1000,
+ NDR_PROTOCOL_2_0 = 0,
+ NDR_INT_BIG_ENDIAN = 0,
+ NDR_INT_LITTLE_ENDIAN = 0x1,
+ NDR_FLOAT_IEEE = 0,
+ NDR_CHAR_ASCII = 0,
+ SA_SIGINFO = 0x40,
+ SA_RESTART = 0x2,
+ SA_ONSTACK = 0x1,
+ SA_USERTRAMP = 0x100,
+ SA_64REGSET = 0x200,
+ SIGHUP = 0x1,
+ SIGINT = 0x2,
+ SIGQUIT = 0x3,
+ SIGILL = 0x4,
+ SIGTRAP = 0x5,
+ SIGABRT = 0x6,
+ SIGEMT = 0x7,
+ SIGFPE = 0x8,
+ SIGKILL = 0x9,
+ SIGBUS = 0xa,
+ SIGSEGV = 0xb,
+ SIGSYS = 0xc,
+ SIGPIPE = 0xd,
+ SIGALRM = 0xe,
+ SIGTERM = 0xf,
+ SIGURG = 0x10,
+ SIGSTOP = 0x11,
+ SIGTSTP = 0x12,
+ SIGCONT = 0x13,
+ SIGCHLD = 0x14,
+ SIGTTIN = 0x15,
+ SIGTTOU = 0x16,
+ SIGIO = 0x17,
+ SIGXCPU = 0x18,
+ SIGXFSZ = 0x19,
+ SIGVTALRM = 0x1a,
+ SIGPROF = 0x1b,
+ SIGWINCH = 0x1c,
+ SIGINFO = 0x1d,
+ SIGUSR1 = 0x1e,
+ SIGUSR2 = 0x1f,
+ FPE_INTDIV = 0x7,
+ FPE_INTOVF = 0x8,
+ FPE_FLTDIV = 0x1,
+ FPE_FLTOVF = 0x2,
+ FPE_FLTUND = 0x3,
+ FPE_FLTRES = 0x4,
+ FPE_FLTINV = 0x5,
+ FPE_FLTSUB = 0x6,
+ BUS_ADRALN = 0x1,
+ BUS_ADRERR = 0x2,
+ BUS_OBJERR = 0x3,
+ SEGV_MAPERR = 0x1,
+ SEGV_ACCERR = 0x2,
+ ITIMER_REAL = 0,
+ ITIMER_VIRTUAL = 0x1,
+ ITIMER_PROF = 0x2,
+};
+
+// Types
+#pragma pack on
+
+typedef struct MachBody MachBody;
+struct MachBody {
+ uint32 msgh_descriptor_count;
+};
+
+typedef struct MachHeader MachHeader;
+struct MachHeader {
+ uint32 msgh_bits;
+ uint32 msgh_size;
+ uint32 msgh_remote_port;
+ uint32 msgh_local_port;
+ uint32 msgh_reserved;
+ int32 msgh_id;
+};
+
+typedef struct MachNDR MachNDR;
+struct MachNDR {
+ uint8 mig_vers;
+ uint8 if_vers;
+ uint8 reserved1;
+ uint8 mig_encoding;
+ uint8 int_rep;
+ uint8 char_rep;
+ uint8 float_rep;
+ uint8 reserved2;
+};
+
+typedef struct MachPort MachPort;
+struct MachPort {
+ uint32 name;
+ uint32 pad1;
+ uint16 pad2;
+ uint8 disposition;
+ uint8 type;
+};
+
+typedef struct StackT StackT;
+struct StackT {
+ void *ss_sp;
+ uint64 ss_size;
+ int32 ss_flags;
+ byte pad_godefs_0[4];
+};
+
+typedef union Sighandler Sighandler;
+union Sighandler {
+ uint64 __sa_handler;
+ uint64 __sa_sigaction;
+};
+
+typedef struct Sigaction Sigaction;
+struct Sigaction {
+ Sighandler __sigaction_u;
+ uint64 sa_tramp;
+ uint32 sa_mask;
+ int32 sa_flags;
+};
+
+typedef union Sigval Sigval;
+union Sigval {
+ int32 sival_int;
+ void *sival_ptr;
+};
+
+typedef struct Siginfo Siginfo;
+struct Siginfo {
+ int32 si_signo;
+ int32 si_errno;
+ int32 si_code;
+ int32 si_pid;
+ uint32 si_uid;
+ int32 si_status;
+ void *si_addr;
+ Sigval si_value;
+ int64 si_band;
+ uint64 __pad[7];
+};
+
+typedef struct Timeval Timeval;
+struct Timeval {
+ int64 tv_sec;
+ int32 tv_usec;
+ byte pad_godefs_0[4];
+};
+
+typedef struct Itimerval Itimerval;
+struct Itimerval {
+ Timeval it_interval;
+ Timeval it_value;
+};
+
+typedef struct FPControl FPControl;
+struct FPControl {
+ byte pad_godefs_0[2];
+};
+
+typedef struct FPStatus FPStatus;
+struct FPStatus {
+ byte pad_godefs_0[2];
+};
+
+typedef struct RegMMST RegMMST;
+struct RegMMST {
+ int8 mmst_reg[10];
+ int8 mmst_rsrv[6];
+};
+
+typedef struct RegXMM RegXMM;
+struct RegXMM {
+ int8 xmm_reg[16];
+};
+
+typedef struct Regs Regs;
+struct Regs {
+ uint64 rax;
+ uint64 rbx;
+ uint64 rcx;
+ uint64 rdx;
+ uint64 rdi;
+ uint64 rsi;
+ uint64 rbp;
+ uint64 rsp;
+ uint64 r8;
+ uint64 r9;
+ uint64 r10;
+ uint64 r11;
+ uint64 r12;
+ uint64 r13;
+ uint64 r14;
+ uint64 r15;
+ uint64 rip;
+ uint64 rflags;
+ uint64 cs;
+ uint64 fs;
+ uint64 gs;
+};
+
+typedef struct FloatState FloatState;
+struct FloatState {
+ uint64 fpu_reserved;
+ FPControl fpu_fcw;
+ FPStatus fpu_fsw;
+ uint8 fpu_ftw;
+ uint8 fpu_rsrv1;
+ uint16 fpu_fop;
+ uint32 fpu_ip;
+ uint16 fpu_cs;
+ uint16 fpu_rsrv2;
+ uint32 fpu_dp;
+ uint16 fpu_ds;
+ uint16 fpu_rsrv3;
+ uint32 fpu_mxcsr;
+ uint32 fpu_mxcsrmask;
+ RegMMST fpu_stmm0;
+ RegMMST fpu_stmm1;
+ RegMMST fpu_stmm2;
+ RegMMST fpu_stmm3;
+ RegMMST fpu_stmm4;
+ RegMMST fpu_stmm5;
+ RegMMST fpu_stmm6;
+ RegMMST fpu_stmm7;
+ RegXMM fpu_xmm0;
+ RegXMM fpu_xmm1;
+ RegXMM fpu_xmm2;
+ RegXMM fpu_xmm3;
+ RegXMM fpu_xmm4;
+ RegXMM fpu_xmm5;
+ RegXMM fpu_xmm6;
+ RegXMM fpu_xmm7;
+ RegXMM fpu_xmm8;
+ RegXMM fpu_xmm9;
+ RegXMM fpu_xmm10;
+ RegXMM fpu_xmm11;
+ RegXMM fpu_xmm12;
+ RegXMM fpu_xmm13;
+ RegXMM fpu_xmm14;
+ RegXMM fpu_xmm15;
+ int8 fpu_rsrv4[96];
+ int32 fpu_reserved1;
+};
+
+typedef struct ExceptionState ExceptionState;
+struct ExceptionState {
+ uint32 trapno;
+ uint32 err;
+ uint64 faultvaddr;
+};
+
+typedef struct Mcontext Mcontext;
+struct Mcontext {
+ ExceptionState es;
+ Regs ss;
+ FloatState fs;
+ byte pad_godefs_0[4];
+};
+
+typedef struct Ucontext Ucontext;
+struct Ucontext {
+ int32 uc_onstack;
+ uint32 uc_sigmask;
+ StackT uc_stack;
+ uint64 uc_link;
+ uint64 uc_mcsize;
+ Mcontext *uc_mcontext;
+};
+#pragma pack off
diff --git a/src/pkg/runtime/darwin/amd64/rt0.s b/src/pkg/runtime/darwin/amd64/rt0.s
new file mode 100644
index 000000000..4cfab5876
--- /dev/null
+++ b/src/pkg/runtime/darwin/amd64/rt0.s
@@ -0,0 +1,10 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Darwin and Linux use the same linkage to main
+
+TEXT _rt0_amd64_darwin(SB),7,$-8
+ MOVQ $_rt0_amd64(SB), AX
+ MOVQ SP, DI
+ JMP AX
diff --git a/src/pkg/runtime/darwin/amd64/signal.c b/src/pkg/runtime/darwin/amd64/signal.c
new file mode 100644
index 000000000..036a3aca7
--- /dev/null
+++ b/src/pkg/runtime/darwin/amd64/signal.c
@@ -0,0 +1,204 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "runtime.h"
+#include "defs.h"
+#include "os.h"
+#include "signals.h"
+
+void
+runtime·dumpregs(Regs *r)
+{
+ runtime·printf("rax %X\n", r->rax);
+ runtime·printf("rbx %X\n", r->rbx);
+ runtime·printf("rcx %X\n", r->rcx);
+ runtime·printf("rdx %X\n", r->rdx);
+ runtime·printf("rdi %X\n", r->rdi);
+ runtime·printf("rsi %X\n", r->rsi);
+ runtime·printf("rbp %X\n", r->rbp);
+ runtime·printf("rsp %X\n", r->rsp);
+ runtime·printf("r8 %X\n", r->r8 );
+ runtime·printf("r9 %X\n", r->r9 );
+ runtime·printf("r10 %X\n", r->r10);
+ runtime·printf("r11 %X\n", r->r11);
+ runtime·printf("r12 %X\n", r->r12);
+ runtime·printf("r13 %X\n", r->r13);
+ runtime·printf("r14 %X\n", r->r14);
+ runtime·printf("r15 %X\n", r->r15);
+ runtime·printf("rip %X\n", r->rip);
+ runtime·printf("rflags %X\n", r->rflags);
+ runtime·printf("cs %X\n", r->cs);
+ runtime·printf("fs %X\n", r->fs);
+ runtime·printf("gs %X\n", r->gs);
+}
+
+String
+runtime·signame(int32 sig)
+{
+ if(sig < 0 || sig >= NSIG)
+ return runtime·emptystring;
+ return runtime·gostringnocopy((byte*)runtime·sigtab[sig].name);
+}
+
+void
+runtime·sighandler(int32 sig, Siginfo *info, void *context, G *gp)
+{
+ Ucontext *uc;
+ Mcontext *mc;
+ Regs *r;
+ uintptr *sp;
+ byte *pc;
+
+ uc = context;
+ mc = uc->uc_mcontext;
+ r = &mc->ss;
+
+ if(sig == SIGPROF) {
+ runtime·sigprof((uint8*)r->rip, (uint8*)r->rsp, nil, gp);
+ return;
+ }
+
+ if(gp != nil && (runtime·sigtab[sig].flags & SigPanic)) {
+ // Work around Leopard bug that doesn't set FPE_INTDIV.
+ // Look at instruction to see if it is a divide.
+ // Not necessary in Snow Leopard (si_code will be != 0).
+ if(sig == SIGFPE && info->si_code == 0) {
+ pc = (byte*)r->rip;
+ if((pc[0]&0xF0) == 0x40) // 64-bit REX prefix
+ pc++;
+ else if(pc[0] == 0x66) // 16-bit instruction prefix
+ pc++;
+ if(pc[0] == 0xF6 || pc[0] == 0xF7)
+ info->si_code = FPE_INTDIV;
+ }
+
+ // Make it look like a call to the signal func.
+ // Have to pass arguments out of band since
+ // augmenting the stack frame would break
+ // the unwinding code.
+ gp->sig = sig;
+ gp->sigcode0 = info->si_code;
+ gp->sigcode1 = (uintptr)info->si_addr;
+ gp->sigpc = r->rip;
+
+ // Only push runtime·sigpanic if r->rip != 0.
+ // If r->rip == 0, probably panicked because of a
+ // call to a nil func. Not pushing that onto sp will
+ // make the trace look like a call to runtime·sigpanic instead.
+ // (Otherwise the trace will end at runtime·sigpanic and we
+ // won't get to see who faulted.)
+ if(r->rip != 0) {
+ sp = (uintptr*)r->rsp;
+ *--sp = r->rip;
+ r->rsp = (uintptr)sp;
+ }
+ r->rip = (uintptr)runtime·sigpanic;
+ return;
+ }
+
+ if(runtime·sigtab[sig].flags & SigQueue) {
+ if(runtime·sigsend(sig) || (runtime·sigtab[sig].flags & SigIgnore))
+ return;
+ runtime·exit(2); // SIGINT, SIGTERM, etc
+ }
+
+ if(runtime·panicking) // traceback already printed
+ runtime·exit(2);
+ runtime·panicking = 1;
+
+ if(sig < 0 || sig >= NSIG){
+ runtime·printf("Signal %d\n", sig);
+ }else{
+ runtime·printf("%s\n", runtime·sigtab[sig].name);
+ }
+
+ runtime·printf("pc: %X\n", r->rip);
+ runtime·printf("\n");
+
+ if(runtime·gotraceback()){
+ runtime·traceback((void*)r->rip, (void*)r->rsp, 0, gp);
+ runtime·tracebackothers(gp);
+ runtime·dumpregs(r);
+ }
+
+ runtime·exit(2);
+}
+
+void
+runtime·sigignore(int32, Siginfo*, void*)
+{
+}
+
+void
+runtime·signalstack(byte *p, int32 n)
+{
+ StackT st;
+
+ st.ss_sp = p;
+ st.ss_size = n;
+ st.ss_flags = 0;
+ runtime·sigaltstack(&st, nil);
+}
+
+static void
+sigaction(int32 i, void (*fn)(int32, Siginfo*, void*, G*), bool restart)
+{
+ Sigaction sa;
+
+ runtime·memclr((byte*)&sa, sizeof sa);
+ sa.sa_flags = SA_SIGINFO|SA_ONSTACK;
+ if(restart)
+ sa.sa_flags |= SA_RESTART;
+ sa.sa_mask = ~0ULL;
+ sa.sa_tramp = (uintptr)runtime·sigtramp; // runtime·sigtramp's job is to call into real handler
+ sa.__sigaction_u.__sa_sigaction = (uintptr)fn;
+ runtime·sigaction(i, &sa, nil);
+}
+
+void
+runtime·initsig(int32 queue)
+{
+ int32 i;
+ void *fn;
+
+ runtime·siginit();
+
+ for(i = 0; i<NSIG; i++) {
+ if(runtime·sigtab[i].flags) {
+ if((runtime·sigtab[i].flags & SigQueue) != queue)
+ continue;
+ if(runtime·sigtab[i].flags & (SigCatch | SigQueue))
+ fn = runtime·sighandler;
+ else
+ fn = runtime·sigignore;
+ sigaction(i, fn, (runtime·sigtab[i].flags & SigRestart) != 0);
+ }
+ }
+}
+
+void
+runtime·resetcpuprofiler(int32 hz)
+{
+ Itimerval it;
+
+ runtime·memclr((byte*)&it, sizeof it);
+ if(hz == 0) {
+ runtime·setitimer(ITIMER_PROF, &it, nil);
+ sigaction(SIGPROF, SIG_IGN, true);
+ } else {
+ sigaction(SIGPROF, runtime·sighandler, true);
+ it.it_interval.tv_sec = 0;
+ it.it_interval.tv_usec = 1000000 / hz;
+ it.it_value = it.it_interval;
+ runtime·setitimer(ITIMER_PROF, &it, nil);
+ }
+ m->profilehz = hz;
+}
+
+void
+os·sigpipe(void)
+{
+ sigaction(SIGPIPE, SIG_DFL, false);
+ runtime·raisesigpipe();
+}
diff --git a/src/pkg/runtime/darwin/amd64/sys.s b/src/pkg/runtime/darwin/amd64/sys.s
new file mode 100644
index 000000000..8d1b20f11
--- /dev/null
+++ b/src/pkg/runtime/darwin/amd64/sys.s
@@ -0,0 +1,295 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//
+// System calls and other sys.stuff for AMD64, Darwin
+// See http://fxr.watson.org/fxr/source/bsd/kern/syscalls.c?v=xnu-1228
+// or /usr/include/sys/syscall.h (on a Mac) for system call numbers.
+//
+// The low 24 bits are the system call number.
+// The high 8 bits specify the kind of system call: 1=Mach, 2=BSD, 3=Machine-Dependent.
+//
+
+#include "amd64/asm.h"
+
+// Exit the entire program (like C exit)
+TEXT runtime·exit(SB),7,$0
+ MOVL 8(SP), DI // arg 1 exit status
+ MOVL $(0x2000000+1), AX // syscall entry
+ SYSCALL
+ CALL runtime·notok(SB)
+ RET
+
+// Exit this OS thread (like pthread_exit, which eventually
+// calls __bsdthread_terminate).
+TEXT runtime·exit1(SB),7,$0
+ MOVL 8(SP), DI // arg 1 exit status
+ MOVL $(0x2000000+361), AX // syscall entry
+ SYSCALL
+ CALL runtime·notok(SB)
+ RET
+
+TEXT runtime·write(SB),7,$0
+ MOVL 8(SP), DI // arg 1 fd
+ MOVQ 16(SP), SI // arg 2 buf
+ MOVL 24(SP), DX // arg 3 count
+ MOVL $(0x2000000+4), AX // syscall entry
+ SYSCALL
+ RET
+
+TEXT runtime·raisesigpipe(SB),7,$24
+ get_tls(CX)
+ MOVQ m(CX), DX
+ MOVL $13, DI // arg 1 SIGPIPE
+ MOVQ m_procid(DX), SI // arg 2 thread_port
+ MOVL $(0x2000000+328), AX // syscall entry __pthread_kill
+ SYSCALL
+ RET
+
+TEXT runtime·setitimer(SB), 7, $0
+ MOVL 8(SP), DI
+ MOVQ 16(SP), SI
+ MOVQ 24(SP), DX
+ MOVL $(0x2000000+83), AX // syscall entry
+ SYSCALL
+ RET
+
+// void gettime(int64 *sec, int32 *usec)
+TEXT runtime·gettime(SB), 7, $32
+ MOVQ SP, DI // must be non-nil, unused
+ MOVQ $0, SI
+ MOVL $(0x2000000+116), AX
+ SYSCALL
+ MOVQ sec+0(FP), DI
+ MOVQ AX, (DI)
+ MOVQ usec+8(FP), DI
+ MOVL DX, (DI)
+ RET
+
+TEXT runtime·sigaction(SB),7,$0
+ MOVL 8(SP), DI // arg 1 sig
+ MOVQ 16(SP), SI // arg 2 act
+ MOVQ 24(SP), DX // arg 3 oact
+ MOVQ 24(SP), CX // arg 3 oact
+ MOVQ 24(SP), R10 // arg 3 oact
+ MOVL $(0x2000000+46), AX // syscall entry
+ SYSCALL
+ JCC 2(PC)
+ CALL runtime·notok(SB)
+ RET
+
+TEXT runtime·sigtramp(SB),7,$64
+ get_tls(BX)
+
+ // save g
+ MOVQ g(BX), R10
+ MOVQ R10, 48(SP)
+
+ // g = m->gsignal
+ MOVQ m(BX), BP
+ MOVQ m_gsignal(BP), BP
+ MOVQ BP, g(BX)
+
+ MOVL DX, 0(SP)
+ MOVQ CX, 8(SP)
+ MOVQ R8, 16(SP)
+ MOVQ R10, 24(SP)
+
+ MOVQ R8, 32(SP) // save ucontext
+ MOVQ SI, 40(SP) // save infostyle
+ CALL DI
+
+ // restore g
+ get_tls(BX)
+ MOVQ 48(SP), R10
+ MOVQ R10, g(BX)
+
+ // call sigreturn
+ MOVL $(0x2000000+184), AX // sigreturn(ucontext, infostyle)
+ MOVQ 32(SP), DI // saved ucontext
+ MOVQ 40(SP), SI // saved infostyle
+ SYSCALL
+ INT $3 // not reached
+
+TEXT runtime·mmap(SB),7,$0
+ MOVQ 8(SP), DI // arg 1 addr
+ MOVQ 16(SP), SI // arg 2 len
+ MOVL 24(SP), DX // arg 3 prot
+ MOVL 28(SP), R10 // arg 4 flags
+ MOVL 32(SP), R8 // arg 5 fid
+ MOVL 36(SP), R9 // arg 6 offset
+ MOVL $(0x2000000+197), AX // syscall entry
+ SYSCALL
+ RET
+
+TEXT runtime·munmap(SB),7,$0
+ MOVQ 8(SP), DI // arg 1 addr
+ MOVQ 16(SP), SI // arg 2 len
+ MOVL $(0x2000000+73), AX // syscall entry
+ SYSCALL
+ JCC 2(PC)
+ CALL runtime·notok(SB)
+ RET
+
+TEXT runtime·notok(SB),7,$0
+ MOVL $0xf1, BP
+ MOVQ BP, (BP)
+ RET
+
+TEXT runtime·sigaltstack(SB),7,$0
+ MOVQ new+8(SP), DI
+ MOVQ old+16(SP), SI
+ MOVQ $(0x2000000+53), AX
+ SYSCALL
+ JCC 2(PC)
+ CALL runtime·notok(SB)
+ RET
+
+// void bsdthread_create(void *stk, M *m, G *g, void (*fn)(void))
+TEXT runtime·bsdthread_create(SB),7,$0
+ // Set up arguments to bsdthread_create system call.
+ // The ones in quotes pass through to the thread callback
+ // uninterpreted, so we can put whatever we want there.
+ MOVQ fn+32(SP), DI // "func"
+ MOVQ mm+16(SP), SI // "arg"
+ MOVQ stk+8(SP), DX // stack
+ MOVQ gg+24(SP), R10 // "pthread"
+ MOVQ $0x01000000, R8 // flags = PTHREAD_START_CUSTOM
+ MOVQ $0, R9 // paranoia
+ MOVQ $(0x2000000+360), AX // bsdthread_create
+ SYSCALL
+ JCC 3(PC)
+ NEGL AX
+ RET
+ MOVL $0, AX
+ RET
+
+// The thread that bsdthread_create creates starts executing here,
+// because we registered this function using bsdthread_register
+// at startup.
+// DI = "pthread"
+// SI = mach thread port
+// DX = "func" (= fn)
+// CX = "arg" (= m)
+// R8 = stack
+// R9 = flags (= 0)
+// SP = stack - C_64_REDZONE_LEN (= stack - 128)
+TEXT runtime·bsdthread_start(SB),7,$0
+ MOVQ R8, SP // empirically, SP is very wrong but R8 is right
+
+ PUSHQ DX
+ PUSHQ CX
+ PUSHQ SI
+
+ // set up thread local storage pointing at m->tls.
+ LEAQ m_tls(CX), DI
+ CALL runtime·settls(SB)
+
+ POPQ SI
+ POPQ CX
+ POPQ DX
+
+ get_tls(BX)
+ MOVQ CX, m(BX)
+ MOVQ SI, m_procid(CX) // thread port is m->procid
+ MOVQ m_g0(CX), AX
+ MOVQ AX, g(BX)
+ CALL runtime·stackcheck(SB) // smashes AX, CX
+ CALL DX // fn
+ CALL runtime·exit1(SB)
+ RET
+
+// void bsdthread_register(void)
+// registers callbacks for threadstart (see bsdthread_create above
+// and wqthread and pthsize (not used). returns 0 on success.
+TEXT runtime·bsdthread_register(SB),7,$0
+ MOVQ $runtime·bsdthread_start(SB), DI // threadstart
+ MOVQ $0, SI // wqthread, not used by us
+ MOVQ $0, DX // pthsize, not used by us
+ MOVQ $0, R10 // dummy_value [sic]
+ MOVQ $0, R8 // targetconc_ptr
+ MOVQ $0, R9 // dispatchqueue_offset
+ MOVQ $(0x2000000+366), AX // bsdthread_register
+ SYSCALL
+ JCC 2(PC)
+ CALL runtime·notok(SB)
+ RET
+
+// Mach system calls use 0x1000000 instead of the BSD's 0x2000000.
+
+// uint32 mach_msg_trap(void*, uint32, uint32, uint32, uint32, uint32, uint32)
+TEXT runtime·mach_msg_trap(SB),7,$0
+ MOVQ 8(SP), DI
+ MOVL 16(SP), SI
+ MOVL 20(SP), DX
+ MOVL 24(SP), R10
+ MOVL 28(SP), R8
+ MOVL 32(SP), R9
+ MOVL 36(SP), R11
+ PUSHQ R11 // seventh arg, on stack
+ MOVL $(0x1000000+31), AX // mach_msg_trap
+ SYSCALL
+ POPQ R11
+ RET
+
+TEXT runtime·mach_task_self(SB),7,$0
+ MOVL $(0x1000000+28), AX // task_self_trap
+ SYSCALL
+ RET
+
+TEXT runtime·mach_thread_self(SB),7,$0
+ MOVL $(0x1000000+27), AX // thread_self_trap
+ SYSCALL
+ RET
+
+TEXT runtime·mach_reply_port(SB),7,$0
+ MOVL $(0x1000000+26), AX // mach_reply_port
+ SYSCALL
+ RET
+
+// Mach provides trap versions of the semaphore ops,
+// instead of requiring the use of RPC.
+
+// uint32 mach_semaphore_wait(uint32)
+TEXT runtime·mach_semaphore_wait(SB),7,$0
+ MOVL 8(SP), DI
+ MOVL $(0x1000000+36), AX // semaphore_wait_trap
+ SYSCALL
+ RET
+
+// uint32 mach_semaphore_timedwait(uint32, uint32, uint32)
+TEXT runtime·mach_semaphore_timedwait(SB),7,$0
+ MOVL 8(SP), DI
+ MOVL 12(SP), SI
+ MOVL 16(SP), DX
+ MOVL $(0x1000000+38), AX // semaphore_timedwait_trap
+ SYSCALL
+ RET
+
+// uint32 mach_semaphore_signal(uint32)
+TEXT runtime·mach_semaphore_signal(SB),7,$0
+ MOVL 8(SP), DI
+ MOVL $(0x1000000+33), AX // semaphore_signal_trap
+ SYSCALL
+ RET
+
+// uint32 mach_semaphore_signal_all(uint32)
+TEXT runtime·mach_semaphore_signal_all(SB),7,$0
+ MOVL 8(SP), DI
+ MOVL $(0x1000000+34), AX // semaphore_signal_all_trap
+ SYSCALL
+ RET
+
+// set tls base to DI
+TEXT runtime·settls(SB),7,$32
+ /*
+ * Same as in ../386/sys.s:/ugliness, different constant.
+ * See ../../../../libcgo/darwin_amd64.c for the derivation
+ * of the constant.
+ */
+ SUBQ $0x8a0, DI
+
+ MOVL $(0x3000000+3), AX // thread_fast_set_cthread_self - machdep call #3
+ SYSCALL
+ RET
diff --git a/src/pkg/runtime/darwin/defs.c b/src/pkg/runtime/darwin/defs.c
new file mode 100644
index 000000000..032a6bcbb
--- /dev/null
+++ b/src/pkg/runtime/darwin/defs.c
@@ -0,0 +1,159 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+ * Input to godefs.
+ *
+ godefs -f -m64 defs.c >amd64/defs.h
+ godefs -f -m32 defs.c >386/defs.h
+ */
+
+#define __DARWIN_UNIX03 0
+
+#include <mach/mach.h>
+#include <mach/message.h>
+#include <sys/types.h>
+#include <sys/time.h>
+#include <signal.h>
+#include <sys/mman.h>
+
+enum {
+ $PROT_NONE = PROT_NONE,
+ $PROT_READ = PROT_READ,
+ $PROT_WRITE = PROT_WRITE,
+ $PROT_EXEC = PROT_EXEC,
+
+ $MAP_ANON = MAP_ANON,
+ $MAP_PRIVATE = MAP_PRIVATE,
+ $MAP_FIXED = MAP_FIXED,
+
+ $MACH_MSG_TYPE_MOVE_RECEIVE = MACH_MSG_TYPE_MOVE_RECEIVE,
+ $MACH_MSG_TYPE_MOVE_SEND = MACH_MSG_TYPE_MOVE_SEND,
+ $MACH_MSG_TYPE_MOVE_SEND_ONCE = MACH_MSG_TYPE_MOVE_SEND_ONCE,
+ $MACH_MSG_TYPE_COPY_SEND = MACH_MSG_TYPE_COPY_SEND,
+ $MACH_MSG_TYPE_MAKE_SEND = MACH_MSG_TYPE_MAKE_SEND,
+ $MACH_MSG_TYPE_MAKE_SEND_ONCE = MACH_MSG_TYPE_MAKE_SEND_ONCE,
+ $MACH_MSG_TYPE_COPY_RECEIVE = MACH_MSG_TYPE_COPY_RECEIVE,
+
+ $MACH_MSG_PORT_DESCRIPTOR = MACH_MSG_PORT_DESCRIPTOR,
+ $MACH_MSG_OOL_DESCRIPTOR = MACH_MSG_OOL_DESCRIPTOR,
+ $MACH_MSG_OOL_PORTS_DESCRIPTOR = MACH_MSG_OOL_PORTS_DESCRIPTOR,
+ $MACH_MSG_OOL_VOLATILE_DESCRIPTOR = MACH_MSG_OOL_VOLATILE_DESCRIPTOR,
+
+ $MACH_MSGH_BITS_COMPLEX = MACH_MSGH_BITS_COMPLEX,
+
+ $MACH_SEND_MSG = MACH_SEND_MSG,
+ $MACH_RCV_MSG = MACH_RCV_MSG,
+ $MACH_RCV_LARGE = MACH_RCV_LARGE,
+
+ $MACH_SEND_TIMEOUT = MACH_SEND_TIMEOUT,
+ $MACH_SEND_INTERRUPT = MACH_SEND_INTERRUPT,
+ $MACH_SEND_CANCEL = MACH_SEND_CANCEL,
+ $MACH_SEND_ALWAYS = MACH_SEND_ALWAYS,
+ $MACH_SEND_TRAILER = MACH_SEND_TRAILER,
+ $MACH_RCV_TIMEOUT = MACH_RCV_TIMEOUT,
+ $MACH_RCV_NOTIFY = MACH_RCV_NOTIFY,
+ $MACH_RCV_INTERRUPT = MACH_RCV_INTERRUPT,
+ $MACH_RCV_OVERWRITE = MACH_RCV_OVERWRITE,
+
+ $NDR_PROTOCOL_2_0 = NDR_PROTOCOL_2_0,
+ $NDR_INT_BIG_ENDIAN = NDR_INT_BIG_ENDIAN,
+ $NDR_INT_LITTLE_ENDIAN = NDR_INT_LITTLE_ENDIAN,
+ $NDR_FLOAT_IEEE = NDR_FLOAT_IEEE,
+ $NDR_CHAR_ASCII = NDR_CHAR_ASCII,
+
+ $SA_SIGINFO = SA_SIGINFO,
+ $SA_RESTART = SA_RESTART,
+ $SA_ONSTACK = SA_ONSTACK,
+ $SA_USERTRAMP = SA_USERTRAMP,
+ $SA_64REGSET = SA_64REGSET,
+
+ $SIGHUP = SIGHUP,
+ $SIGINT = SIGINT,
+ $SIGQUIT = SIGQUIT,
+ $SIGILL = SIGILL,
+ $SIGTRAP = SIGTRAP,
+ $SIGABRT = SIGABRT,
+ $SIGEMT = SIGEMT,
+ $SIGFPE = SIGFPE,
+ $SIGKILL = SIGKILL,
+ $SIGBUS = SIGBUS,
+ $SIGSEGV = SIGSEGV,
+ $SIGSYS = SIGSYS,
+ $SIGPIPE = SIGPIPE,
+ $SIGALRM = SIGALRM,
+ $SIGTERM = SIGTERM,
+ $SIGURG = SIGURG,
+ $SIGSTOP = SIGSTOP,
+ $SIGTSTP = SIGTSTP,
+ $SIGCONT = SIGCONT,
+ $SIGCHLD = SIGCHLD,
+ $SIGTTIN = SIGTTIN,
+ $SIGTTOU = SIGTTOU,
+ $SIGIO = SIGIO,
+ $SIGXCPU = SIGXCPU,
+ $SIGXFSZ = SIGXFSZ,
+ $SIGVTALRM = SIGVTALRM,
+ $SIGPROF = SIGPROF,
+ $SIGWINCH = SIGWINCH,
+ $SIGINFO = SIGINFO,
+ $SIGUSR1 = SIGUSR1,
+ $SIGUSR2 = SIGUSR2,
+
+ $FPE_INTDIV = FPE_INTDIV,
+ $FPE_INTOVF = FPE_INTOVF,
+ $FPE_FLTDIV = FPE_FLTDIV,
+ $FPE_FLTOVF = FPE_FLTOVF,
+ $FPE_FLTUND = FPE_FLTUND,
+ $FPE_FLTRES = FPE_FLTRES,
+ $FPE_FLTINV = FPE_FLTINV,
+ $FPE_FLTSUB = FPE_FLTSUB,
+
+ $BUS_ADRALN = BUS_ADRALN,
+ $BUS_ADRERR = BUS_ADRERR,
+ $BUS_OBJERR = BUS_OBJERR,
+
+ $SEGV_MAPERR = SEGV_MAPERR,
+ $SEGV_ACCERR = SEGV_ACCERR,
+
+ $ITIMER_REAL = ITIMER_REAL,
+ $ITIMER_VIRTUAL = ITIMER_VIRTUAL,
+ $ITIMER_PROF = ITIMER_PROF,
+};
+
+typedef mach_msg_body_t $MachBody;
+typedef mach_msg_header_t $MachHeader;
+typedef NDR_record_t $MachNDR;
+typedef mach_msg_port_descriptor_t $MachPort;
+
+typedef stack_t $StackT;
+typedef union __sigaction_u $Sighandler;
+
+typedef struct __sigaction $Sigaction; // used in syscalls
+// typedef struct sigaction $Sigaction; // used by the C library
+typedef union sigval $Sigval;
+typedef siginfo_t $Siginfo;
+typedef struct timeval $Timeval;
+typedef struct itimerval $Itimerval;
+
+typedef struct fp_control $FPControl;
+typedef struct fp_status $FPStatus;
+typedef struct mmst_reg $RegMMST;
+typedef struct xmm_reg $RegXMM;
+
+#ifdef __LP64__
+// amd64
+typedef x86_thread_state64_t $Regs;
+typedef x86_float_state64_t $FloatState;
+typedef x86_exception_state64_t $ExceptionState;
+typedef struct mcontext64 $Mcontext;
+#else
+// 386
+typedef x86_thread_state32_t $Regs;
+typedef x86_float_state32_t $FloatState;
+typedef x86_exception_state32_t $ExceptionState;
+typedef struct mcontext32 $Mcontext;
+#endif
+
+typedef ucontext_t $Ucontext;
diff --git a/src/pkg/runtime/darwin/mem.c b/src/pkg/runtime/darwin/mem.c
new file mode 100644
index 000000000..935c032bc
--- /dev/null
+++ b/src/pkg/runtime/darwin/mem.c
@@ -0,0 +1,55 @@
+#include "runtime.h"
+#include "defs.h"
+#include "os.h"
+#include "malloc.h"
+
+void*
+runtime·SysAlloc(uintptr n)
+{
+ void *v;
+
+ mstats.sys += n;
+ v = runtime·mmap(nil, n, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_ANON|MAP_PRIVATE, -1, 0);
+ if(v < (void*)4096)
+ return nil;
+ return v;
+}
+
+void
+runtime·SysUnused(void *v, uintptr n)
+{
+ USED(v);
+ USED(n);
+ // TODO(rsc): call madvise MADV_DONTNEED
+}
+
+void
+runtime·SysFree(void *v, uintptr n)
+{
+ mstats.sys -= n;
+ runtime·munmap(v, n);
+}
+
+void*
+runtime·SysReserve(void *v, uintptr n)
+{
+ return runtime·mmap(v, n, PROT_NONE, MAP_ANON|MAP_PRIVATE, -1, 0);
+}
+
+enum
+{
+ ENOMEM = 12,
+};
+
+void
+runtime·SysMap(void *v, uintptr n)
+{
+ void *p;
+
+ mstats.sys += n;
+ p = runtime·mmap(v, n, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_ANON|MAP_FIXED|MAP_PRIVATE, -1, 0);
+ if(p == (void*)-ENOMEM)
+ runtime·throw("runtime: out of memory");
+ if(p != v)
+ runtime·throw("runtime: cannot map pages in arena address space");
+}
diff --git a/src/pkg/runtime/darwin/os.h b/src/pkg/runtime/darwin/os.h
new file mode 100644
index 000000000..db3c2e8a7
--- /dev/null
+++ b/src/pkg/runtime/darwin/os.h
@@ -0,0 +1,31 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#define SIG_DFL ((void*)0)
+#define SIG_IGN ((void*)1)
+
+int32 runtime·bsdthread_create(void*, M*, G*, void(*)(void));
+void runtime·bsdthread_register(void);
+int32 runtime·mach_msg_trap(MachHeader*, int32, uint32, uint32, uint32, uint32, uint32);
+uint32 runtime·mach_reply_port(void);
+void runtime·mach_semacquire(uint32);
+uint32 runtime·mach_semcreate(void);
+void runtime·mach_semdestroy(uint32);
+void runtime·mach_semrelease(uint32);
+void runtime·mach_semreset(uint32);
+uint32 runtime·mach_task_self(void);
+uint32 runtime·mach_task_self(void);
+uint32 runtime·mach_thread_self(void);
+uint32 runtime·mach_thread_self(void);
+
+struct Sigaction;
+void runtime·sigaction(uintptr, struct Sigaction*, struct Sigaction*);
+
+struct StackT;
+void runtime·sigaltstack(struct StackT*, struct StackT*);
+void runtime·sigtramp(void);
+void runtime·sigpanic(void);
+void runtime·setitimer(int32, Itimerval*, Itimerval*);
+
+void runtime·raisesigpipe(void);
diff --git a/src/pkg/runtime/darwin/signals.h b/src/pkg/runtime/darwin/signals.h
new file mode 100644
index 000000000..035027fad
--- /dev/null
+++ b/src/pkg/runtime/darwin/signals.h
@@ -0,0 +1,51 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#define C SigCatch
+#define I SigIgnore
+#define R SigRestart
+#define Q SigQueue
+#define P SigPanic
+
+SigTab runtime·sigtab[] = {
+ /* 0 */ 0, "SIGNONE: no trap",
+ /* 1 */ Q+R, "SIGHUP: terminal line hangup",
+ /* 2 */ Q+R, "SIGINT: interrupt",
+ /* 3 */ C, "SIGQUIT: quit",
+ /* 4 */ C, "SIGILL: illegal instruction",
+ /* 5 */ C, "SIGTRAP: trace trap", /* used by panic and array out of bounds, etc. */
+ /* 6 */ C, "SIGABRT: abort",
+ /* 7 */ C, "SIGEMT: emulate instruction executed",
+ /* 8 */ C+P, "SIGFPE: floating-point exception",
+ /* 9 */ 0, "SIGKILL: kill",
+ /* 10 */ C+P, "SIGBUS: bus error",
+ /* 11 */ C+P, "SIGSEGV: segmentation violation",
+ /* 12 */ C, "SIGSYS: bad system call",
+ /* 13 */ I, "SIGPIPE: write to broken pipe",
+ /* 14 */ Q+I+R, "SIGALRM: alarm clock",
+ /* 15 */ Q+R, "SIGTERM: termination",
+ /* 16 */ Q+I+R, "SIGURG: urgent condition on socket",
+ /* 17 */ 0, "SIGSTOP: stop",
+ /* 18 */ Q+I+R, "SIGTSTP: keyboard stop",
+ /* 19 */ 0, "SIGCONT: continue after stop",
+ /* 20 */ Q+I+R, "SIGCHLD: child status has changed",
+ /* 21 */ Q+I+R, "SIGTTIN: background read from tty",
+ /* 22 */ Q+I+R, "SIGTTOU: background write to tty",
+ /* 23 */ Q+I+R, "SIGIO: i/o now possible",
+ /* 24 */ Q+I+R, "SIGXCPU: cpu limit exceeded",
+ /* 25 */ Q+I+R, "SIGXFSZ: file size limit exceeded",
+ /* 26 */ Q+I+R, "SIGVTALRM: virtual alarm clock",
+ /* 27 */ Q+I+R, "SIGPROF: profiling alarm clock",
+ /* 28 */ Q+I+R, "SIGWINCH: window size change",
+ /* 29 */ Q+I+R, "SIGINFO: status request from keyboard",
+ /* 30 */ Q+I+R, "SIGUSR1: user-defined signal 1",
+ /* 31 */ Q+I+R, "SIGUSR2: user-defined signal 2",
+};
+#undef C
+#undef I
+#undef R
+#undef Q
+#undef P
+
+#define NSIG 32
diff --git a/src/pkg/runtime/darwin/thread.c b/src/pkg/runtime/darwin/thread.c
new file mode 100644
index 000000000..6733e815e
--- /dev/null
+++ b/src/pkg/runtime/darwin/thread.c
@@ -0,0 +1,484 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "runtime.h"
+#include "defs.h"
+#include "os.h"
+#include "stack.h"
+
+extern SigTab runtime·sigtab[];
+
+static void
+unimplemented(int8 *name)
+{
+ runtime·prints(name);
+ runtime·prints(" not implemented\n");
+ *(int32*)1231 = 1231;
+}
+
+// Thread-safe allocation of a semaphore.
+// Psema points at a kernel semaphore key.
+// It starts out zero, meaning no semaphore.
+// Fill it in, being careful of others calling initsema
+// simultaneously.
+static void
+initsema(uint32 *psema)
+{
+ uint32 sema;
+
+ if(*psema != 0) // already have one
+ return;
+
+ sema = runtime·mach_semcreate();
+ if(!runtime·cas(psema, 0, sema)){
+ // Someone else filled it in. Use theirs.
+ runtime·mach_semdestroy(sema);
+ return;
+ }
+}
+
+
+// Blocking locks.
+
+// Implement Locks, using semaphores.
+// l->key is the number of threads who want the lock.
+// In a race, one thread increments l->key from 0 to 1
+// and the others increment it from >0 to >1. The thread
+// who does the 0->1 increment gets the lock, and the
+// others wait on the semaphore. When the 0->1 thread
+// releases the lock by decrementing l->key, l->key will
+// be >0, so it will increment the semaphore to wake up
+// one of the others. This is the same algorithm used
+// in Plan 9's user-level locks.
+
+void
+runtime·lock(Lock *l)
+{
+ if(m->locks < 0)
+ runtime·throw("lock count");
+ m->locks++;
+
+ if(runtime·xadd(&l->key, 1) > 1) { // someone else has it; wait
+ // Allocate semaphore if needed.
+ if(l->sema == 0)
+ initsema(&l->sema);
+ runtime·mach_semacquire(l->sema);
+ }
+}
+
+void
+runtime·unlock(Lock *l)
+{
+ m->locks--;
+ if(m->locks < 0)
+ runtime·throw("lock count");
+
+ if(runtime·xadd(&l->key, -1) > 0) { // someone else is waiting
+ // Allocate semaphore if needed.
+ if(l->sema == 0)
+ initsema(&l->sema);
+ runtime·mach_semrelease(l->sema);
+ }
+}
+
+static void
+destroylock(Lock *l)
+{
+ if(l->sema != 0) {
+ runtime·mach_semdestroy(l->sema);
+ l->sema = 0;
+ }
+}
+
+// User-level semaphore implementation:
+// try to do the operations in user space on u,
+// but when it's time to block, fall back on the kernel semaphore k.
+// This is the same algorithm used in Plan 9.
+void
+runtime·usemacquire(Usema *s)
+{
+ if((int32)runtime·xadd(&s->u, -1) < 0) {
+ if(s->k == 0)
+ initsema(&s->k);
+ runtime·mach_semacquire(s->k);
+ }
+}
+
+void
+runtime·usemrelease(Usema *s)
+{
+ if((int32)runtime·xadd(&s->u, 1) <= 0) {
+ if(s->k == 0)
+ initsema(&s->k);
+ runtime·mach_semrelease(s->k);
+ }
+}
+
+
+// Event notifications.
+void
+runtime·noteclear(Note *n)
+{
+ n->wakeup = 0;
+}
+
+void
+runtime·notesleep(Note *n)
+{
+ while(!n->wakeup)
+ runtime·usemacquire(&n->sema);
+}
+
+void
+runtime·notewakeup(Note *n)
+{
+ n->wakeup = 1;
+ runtime·usemrelease(&n->sema);
+}
+
+
+// BSD interface for threading.
+void
+runtime·osinit(void)
+{
+ // Register our thread-creation callback (see {amd64,386}/sys.s)
+ // but only if we're not using cgo. If we are using cgo we need
+ // to let the C pthread libary install its own thread-creation callback.
+ if(!runtime·iscgo)
+ runtime·bsdthread_register();
+ runtime·destroylock = destroylock;
+}
+
+void
+runtime·goenvs(void)
+{
+ runtime·goenvs_unix();
+}
+
+void
+runtime·newosproc(M *m, G *g, void *stk, void (*fn)(void))
+{
+ int32 errno;
+
+ m->tls[0] = m->id; // so 386 asm can find it
+ if(0){
+ runtime·printf("newosproc stk=%p m=%p g=%p fn=%p id=%d/%d ostk=%p\n",
+ stk, m, g, fn, m->id, m->tls[0], &m);
+ }
+ if((errno = runtime·bsdthread_create(stk, m, g, fn)) < 0) {
+ runtime·printf("runtime: failed to create new OS thread (have %d already; errno=%d)\n", runtime·mcount(), -errno);
+ runtime·throw("runtime.newosproc");
+ }
+}
+
+// Called to initialize a new m (including the bootstrap m).
+void
+runtime·minit(void)
+{
+ // Initialize signal handling.
+ m->gsignal = runtime·malg(32*1024); // OS X wants >=8K, Linux >=2K
+ runtime·signalstack(m->gsignal->stackguard - StackGuard, 32*1024);
+}
+
+// Mach IPC, to get at semaphores
+// Definitions are in /usr/include/mach on a Mac.
+
+static void
+macherror(int32 r, int8 *fn)
+{
+ runtime·printf("mach error %s: %d\n", fn, r);
+ runtime·throw("mach error");
+}
+
+enum
+{
+ DebugMach = 0
+};
+
+static MachNDR zerondr;
+
+#define MACH_MSGH_BITS(a, b) ((a) | ((b)<<8))
+
+static int32
+mach_msg(MachHeader *h,
+ int32 op,
+ uint32 send_size,
+ uint32 rcv_size,
+ uint32 rcv_name,
+ uint32 timeout,
+ uint32 notify)
+{
+ // TODO: Loop on interrupt.
+ return runtime·mach_msg_trap(h, op, send_size, rcv_size, rcv_name, timeout, notify);
+}
+
+// Mach RPC (MIG)
+
+enum
+{
+ MinMachMsg = 48,
+ Reply = 100,
+};
+
+#pragma pack on
+typedef struct CodeMsg CodeMsg;
+struct CodeMsg
+{
+ MachHeader h;
+ MachNDR NDR;
+ int32 code;
+};
+#pragma pack off
+
+static int32
+machcall(MachHeader *h, int32 maxsize, int32 rxsize)
+{
+ uint32 *p;
+ int32 i, ret, id;
+ uint32 port;
+ CodeMsg *c;
+
+ if((port = m->machport) == 0){
+ port = runtime·mach_reply_port();
+ m->machport = port;
+ }
+
+ h->msgh_bits |= MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND, MACH_MSG_TYPE_MAKE_SEND_ONCE);
+ h->msgh_local_port = port;
+ h->msgh_reserved = 0;
+ id = h->msgh_id;
+
+ if(DebugMach){
+ p = (uint32*)h;
+ runtime·prints("send:\t");
+ for(i=0; i<h->msgh_size/sizeof(p[0]); i++){
+ runtime·prints(" ");
+ runtime·printpointer((void*)p[i]);
+ if(i%8 == 7)
+ runtime·prints("\n\t");
+ }
+ if(i%8)
+ runtime·prints("\n");
+ }
+
+ ret = mach_msg(h, MACH_SEND_MSG|MACH_RCV_MSG,
+ h->msgh_size, maxsize, port, 0, 0);
+ if(ret != 0){
+ if(DebugMach){
+ runtime·prints("mach_msg error ");
+ runtime·printint(ret);
+ runtime·prints("\n");
+ }
+ return ret;
+ }
+
+ if(DebugMach){
+ p = (uint32*)h;
+ runtime·prints("recv:\t");
+ for(i=0; i<h->msgh_size/sizeof(p[0]); i++){
+ runtime·prints(" ");
+ runtime·printpointer((void*)p[i]);
+ if(i%8 == 7)
+ runtime·prints("\n\t");
+ }
+ if(i%8)
+ runtime·prints("\n");
+ }
+
+ if(h->msgh_id != id+Reply){
+ if(DebugMach){
+ runtime·prints("mach_msg reply id mismatch ");
+ runtime·printint(h->msgh_id);
+ runtime·prints(" != ");
+ runtime·printint(id+Reply);
+ runtime·prints("\n");
+ }
+ return -303; // MIG_REPLY_MISMATCH
+ }
+
+ // Look for a response giving the return value.
+ // Any call can send this back with an error,
+ // and some calls only have return values so they
+ // send it back on success too. I don't quite see how
+ // you know it's one of these and not the full response
+ // format, so just look if the message is right.
+ c = (CodeMsg*)h;
+ if(h->msgh_size == sizeof(CodeMsg)
+ && !(h->msgh_bits & MACH_MSGH_BITS_COMPLEX)){
+ if(DebugMach){
+ runtime·prints("mig result ");
+ runtime·printint(c->code);
+ runtime·prints("\n");
+ }
+ return c->code;
+ }
+
+ if(h->msgh_size != rxsize){
+ if(DebugMach){
+ runtime·prints("mach_msg reply size mismatch ");
+ runtime·printint(h->msgh_size);
+ runtime·prints(" != ");
+ runtime·printint(rxsize);
+ runtime·prints("\n");
+ }
+ return -307; // MIG_ARRAY_TOO_LARGE
+ }
+
+ return 0;
+}
+
+
+// Semaphores!
+
+enum
+{
+ Tmach_semcreate = 3418,
+ Rmach_semcreate = Tmach_semcreate + Reply,
+
+ Tmach_semdestroy = 3419,
+ Rmach_semdestroy = Tmach_semdestroy + Reply,
+
+ // Mach calls that get interrupted by Unix signals
+ // return this error code. We retry them.
+ KERN_ABORTED = 14,
+};
+
+typedef struct Tmach_semcreateMsg Tmach_semcreateMsg;
+typedef struct Rmach_semcreateMsg Rmach_semcreateMsg;
+typedef struct Tmach_semdestroyMsg Tmach_semdestroyMsg;
+// Rmach_semdestroyMsg = CodeMsg
+
+#pragma pack on
+struct Tmach_semcreateMsg
+{
+ MachHeader h;
+ MachNDR ndr;
+ int32 policy;
+ int32 value;
+};
+
+struct Rmach_semcreateMsg
+{
+ MachHeader h;
+ MachBody body;
+ MachPort semaphore;
+};
+
+struct Tmach_semdestroyMsg
+{
+ MachHeader h;
+ MachBody body;
+ MachPort semaphore;
+};
+#pragma pack off
+
+uint32
+runtime·mach_semcreate(void)
+{
+ union {
+ Tmach_semcreateMsg tx;
+ Rmach_semcreateMsg rx;
+ uint8 pad[MinMachMsg];
+ } m;
+ int32 r;
+
+ m.tx.h.msgh_bits = 0;
+ m.tx.h.msgh_size = sizeof(m.tx);
+ m.tx.h.msgh_remote_port = runtime·mach_task_self();
+ m.tx.h.msgh_id = Tmach_semcreate;
+ m.tx.ndr = zerondr;
+
+ m.tx.policy = 0; // 0 = SYNC_POLICY_FIFO
+ m.tx.value = 0;
+
+ while((r = machcall(&m.tx.h, sizeof m, sizeof(m.rx))) != 0){
+ if(r == KERN_ABORTED) // interrupted
+ continue;
+ macherror(r, "semaphore_create");
+ }
+ if(m.rx.body.msgh_descriptor_count != 1)
+ unimplemented("mach_semcreate desc count");
+ return m.rx.semaphore.name;
+}
+
+void
+runtime·mach_semdestroy(uint32 sem)
+{
+ union {
+ Tmach_semdestroyMsg tx;
+ uint8 pad[MinMachMsg];
+ } m;
+ int32 r;
+
+ m.tx.h.msgh_bits = MACH_MSGH_BITS_COMPLEX;
+ m.tx.h.msgh_size = sizeof(m.tx);
+ m.tx.h.msgh_remote_port = runtime·mach_task_self();
+ m.tx.h.msgh_id = Tmach_semdestroy;
+ m.tx.body.msgh_descriptor_count = 1;
+ m.tx.semaphore.name = sem;
+ m.tx.semaphore.disposition = MACH_MSG_TYPE_MOVE_SEND;
+ m.tx.semaphore.type = 0;
+
+ while((r = machcall(&m.tx.h, sizeof m, 0)) != 0){
+ if(r == KERN_ABORTED) // interrupted
+ continue;
+ macherror(r, "semaphore_destroy");
+ }
+}
+
+// The other calls have simple system call traps in sys.s
+int32 runtime·mach_semaphore_wait(uint32 sema);
+int32 runtime·mach_semaphore_timedwait(uint32 sema, uint32 sec, uint32 nsec);
+int32 runtime·mach_semaphore_signal(uint32 sema);
+int32 runtime·mach_semaphore_signal_all(uint32 sema);
+
+void
+runtime·mach_semacquire(uint32 sem)
+{
+ int32 r;
+
+ while((r = runtime·mach_semaphore_wait(sem)) != 0) {
+ if(r == KERN_ABORTED) // interrupted
+ continue;
+ macherror(r, "semaphore_wait");
+ }
+}
+
+void
+runtime·mach_semrelease(uint32 sem)
+{
+ int32 r;
+
+ while((r = runtime·mach_semaphore_signal(sem)) != 0) {
+ if(r == KERN_ABORTED) // interrupted
+ continue;
+ macherror(r, "semaphore_signal");
+ }
+}
+
+void
+runtime·sigpanic(void)
+{
+ switch(g->sig) {
+ case SIGBUS:
+ if(g->sigcode0 == BUS_ADRERR && g->sigcode1 < 0x1000)
+ runtime·panicstring("invalid memory address or nil pointer dereference");
+ runtime·printf("unexpected fault address %p\n", g->sigcode1);
+ runtime·throw("fault");
+ case SIGSEGV:
+ if((g->sigcode0 == 0 || g->sigcode0 == SEGV_MAPERR || g->sigcode0 == SEGV_ACCERR) && g->sigcode1 < 0x1000)
+ runtime·panicstring("invalid memory address or nil pointer dereference");
+ runtime·printf("unexpected fault address %p\n", g->sigcode1);
+ runtime·throw("fault");
+ case SIGFPE:
+ switch(g->sigcode0) {
+ case FPE_INTDIV:
+ runtime·panicstring("integer divide by zero");
+ case FPE_INTOVF:
+ runtime·panicstring("integer overflow");
+ }
+ runtime·panicstring("floating point error");
+ }
+ runtime·panicstring(runtime·sigtab[g->sig].name);
+}
diff --git a/src/pkg/runtime/debug.go b/src/pkg/runtime/debug.go
new file mode 100644
index 000000000..6370a57d8
--- /dev/null
+++ b/src/pkg/runtime/debug.go
@@ -0,0 +1,115 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+// Breakpoint() executes a breakpoint trap.
+func Breakpoint()
+
+// LockOSThread wires the calling goroutine to its current operating system thread.
+// Until the calling goroutine exits or calls UnlockOSThread, it will always
+// execute in that thread, and no other goroutine can.
+// LockOSThread cannot be used during init functions.
+func LockOSThread()
+
+// UnlockOSThread unwires the calling goroutine from its fixed operating system thread.
+// If the calling goroutine has not called LockOSThread, UnlockOSThread is a no-op.
+func UnlockOSThread()
+
+// GOMAXPROCS sets the maximum number of CPUs that can be executing
+// simultaneously and returns the previous setting. If n < 1, it does not
+// change the current setting.
+// This call will go away when the scheduler improves.
+func GOMAXPROCS(n int) int
+
+// Cgocalls returns the number of cgo calls made by the current process.
+func Cgocalls() int64
+
+// Goroutines returns the number of goroutines that currently exist.
+func Goroutines() int32
+
+// Alloc allocates a block of the given size.
+// FOR TESTING AND DEBUGGING ONLY.
+func Alloc(uintptr) *byte
+
+// Free frees the block starting at the given pointer.
+// FOR TESTING AND DEBUGGING ONLY.
+func Free(*byte)
+
+// Lookup returns the base and size of the block containing the given pointer.
+// FOR TESTING AND DEBUGGING ONLY.
+func Lookup(*byte) (*byte, uintptr)
+
+// MemProfileRate controls the fraction of memory allocations
+// that are recorded and reported in the memory profile.
+// The profiler aims to sample an average of
+// one allocation per MemProfileRate bytes allocated.
+//
+// To include every allocated block in the profile, set MemProfileRate to 1.
+// To turn off profiling entirely, set MemProfileRate to 0.
+//
+// The tools that process the memory profiles assume that the
+// profile rate is constant across the lifetime of the program
+// and equal to the current value. Programs that change the
+// memory profiling rate should do so just once, as early as
+// possible in the execution of the program (for example,
+// at the beginning of main).
+var MemProfileRate int = 512 * 1024
+
+// A MemProfileRecord describes the live objects allocated
+// by a particular call sequence (stack trace).
+type MemProfileRecord struct {
+ AllocBytes, FreeBytes int64 // number of bytes allocated, freed
+ AllocObjects, FreeObjects int64 // number of objects allocated, freed
+ Stack0 [32]uintptr // stack trace for this record; ends at first 0 entry
+}
+
+// InUseBytes returns the number of bytes in use (AllocBytes - FreeBytes).
+func (r *MemProfileRecord) InUseBytes() int64 { return r.AllocBytes - r.FreeBytes }
+
+// InUseObjects returns the number of objects in use (AllocObjects - FreeObjects).
+func (r *MemProfileRecord) InUseObjects() int64 {
+ return r.AllocObjects - r.FreeObjects
+}
+
+// Stack returns the stack trace associated with the record,
+// a prefix of r.Stack0.
+func (r *MemProfileRecord) Stack() []uintptr {
+ for i, v := range r.Stack0 {
+ if v == 0 {
+ return r.Stack0[0:i]
+ }
+ }
+ return r.Stack0[0:]
+}
+
+// MemProfile returns n, the number of records in the current memory profile.
+// If len(p) >= n, MemProfile copies the profile into p and returns n, true.
+// If len(p) < n, MemProfile does not change p and returns n, false.
+//
+// If inuseZero is true, the profile includes allocation records
+// where r.AllocBytes > 0 but r.AllocBytes == r.FreeBytes.
+// These are sites where memory was allocated, but it has all
+// been released back to the runtime.
+// Most clients should use the runtime/pprof package or
+// the testing package's -test.memprofile flag instead
+// of calling MemProfile directly.
+func MemProfile(p []MemProfileRecord, inuseZero bool) (n int, ok bool)
+
+// CPUProfile returns the next chunk of binary CPU profiling stack trace data,
+// blocking until data is available. If profiling is turned off and all the profile
+// data accumulated while it was on has been returned, CPUProfile returns nil.
+// The caller must save the returned data before calling CPUProfile again.
+// Most clients should use the runtime/pprof package or
+// the testing package's -test.cpuprofile flag instead of calling
+// CPUProfile directly.
+func CPUProfile() []byte
+
+// SetCPUProfileRate sets the CPU profiling rate to hz samples per second.
+// If hz <= 0, SetCPUProfileRate turns off profiling.
+// If the profiler is on, the rate cannot be changed without first turning it off.
+// Most clients should use the runtime/pprof package or
+// the testing package's -test.cpuprofile flag instead of calling
+// SetCPUProfileRate directly.
+func SetCPUProfileRate(hz int)
diff --git a/src/pkg/runtime/debug/Makefile b/src/pkg/runtime/debug/Makefile
new file mode 100644
index 000000000..885f66aca
--- /dev/null
+++ b/src/pkg/runtime/debug/Makefile
@@ -0,0 +1,11 @@
+# Copyright 2011 The Go Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+include ../../../Make.inc
+
+TARG=runtime/debug
+GOFILES=\
+ stack.go\
+
+include ../../../Make.pkg
diff --git a/src/pkg/runtime/debug/stack.go b/src/pkg/runtime/debug/stack.go
new file mode 100644
index 000000000..a533a5c3b
--- /dev/null
+++ b/src/pkg/runtime/debug/stack.go
@@ -0,0 +1,90 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package debug contains facilities for programs to debug themselves while
+// they are running.
+package debug
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "runtime"
+)
+
+var (
+ dunno = []byte("???")
+ centerDot = []byte("·")
+ dot = []byte(".")
+)
+
+// PrintStack prints to standard error the stack trace returned by Stack.
+func PrintStack() {
+ os.Stderr.Write(stack())
+}
+
+// Stack returns a formatted stack trace of the goroutine that calls it.
+// For each routine, it includes the source line information and PC value,
+// then attempts to discover, for Go functions, the calling function or
+// method and the text of the line containing the invocation.
+func Stack() []byte {
+ return stack()
+}
+
+// stack implements Stack, skipping 2 frames
+func stack() []byte {
+ buf := new(bytes.Buffer) // the returned data
+ // As we loop, we open files and read them. These variables record the currently
+ // loaded file.
+ var lines [][]byte
+ var lastFile string
+ for i := 2; ; i++ { // Caller we care about is the user, 2 frames up
+ pc, file, line, ok := runtime.Caller(i)
+ if !ok {
+ break
+ }
+ // Print this much at least. If we can't find the source, it won't show.
+ fmt.Fprintf(buf, "%s:%d (0x%x)\n", file, line, pc)
+ if file != lastFile {
+ data, err := ioutil.ReadFile(file)
+ if err != nil {
+ continue
+ }
+ lines = bytes.Split(data, []byte{'\n'})
+ lastFile = file
+ }
+ line-- // in stack trace, lines are 1-indexed but our array is 0-indexed
+ fmt.Fprintf(buf, "\t%s: %s\n", function(pc), source(lines, line))
+ }
+ return buf.Bytes()
+}
+
+// source returns a space-trimmed slice of the n'th line.
+func source(lines [][]byte, n int) []byte {
+ if n < 0 || n >= len(lines) {
+ return dunno
+ }
+ return bytes.Trim(lines[n], " \t")
+}
+
+// function returns, if possible, the name of the function containing the PC.
+func function(pc uintptr) []byte {
+ fn := runtime.FuncForPC(pc)
+ if fn == nil {
+ return dunno
+ }
+ name := []byte(fn.Name())
+ // The name includes the path name to the package, which is unnecessary
+ // since the file name is already included. Plus, it has center dots.
+ // That is, we see
+ // runtime/debug.*T·ptrmethod
+ // and want
+ // *T.ptrmethod
+ if period := bytes.Index(name, dot); period >= 0 {
+ name = name[period+1:]
+ }
+ name = bytes.Replace(name, centerDot, dot, -1)
+ return name
+}
diff --git a/src/pkg/runtime/debug/stack_test.go b/src/pkg/runtime/debug/stack_test.go
new file mode 100644
index 000000000..94293bb93
--- /dev/null
+++ b/src/pkg/runtime/debug/stack_test.go
@@ -0,0 +1,55 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package debug
+
+import (
+ "strings"
+ "testing"
+)
+
+type T int
+
+func (t *T) ptrmethod() []byte {
+ return Stack()
+}
+func (t T) method() []byte {
+ return t.ptrmethod()
+}
+
+/*
+ The traceback should look something like this, modulo line numbers and hex constants.
+ Don't worry much about the base levels, but check the ones in our own package.
+
+ /Users/r/go/src/pkg/runtime/debug/stack_test.go:15 (0x13878)
+ (*T).ptrmethod: return Stack()
+ /Users/r/go/src/pkg/runtime/debug/stack_test.go:18 (0x138dd)
+ T.method: return t.ptrmethod()
+ /Users/r/go/src/pkg/runtime/debug/stack_test.go:23 (0x13920)
+ TestStack: b := T(0).method()
+ /Users/r/go/src/pkg/testing/testing.go:132 (0x14a7a)
+ tRunner: test.F(t)
+ /Users/r/go/src/pkg/runtime/proc.c:145 (0xc970)
+ ???: runtime·unlock(&runtime·sched);
+*/
+func TestStack(t *testing.T) {
+ b := T(0).method()
+ lines := strings.Split(string(b), "\n")
+ if len(lines) <= 6 {
+ t.Fatal("too few lines")
+ }
+ check(t, lines[0], "src/pkg/runtime/debug/stack_test.go")
+ check(t, lines[1], "\t(*T).ptrmethod: return Stack()")
+ check(t, lines[2], "src/pkg/runtime/debug/stack_test.go")
+ check(t, lines[3], "\tT.method: return t.ptrmethod()")
+ check(t, lines[4], "src/pkg/runtime/debug/stack_test.go")
+ check(t, lines[5], "\tTestStack: b := T(0).method()")
+ check(t, lines[6], "src/pkg/testing/testing.go")
+}
+
+func check(t *testing.T, line, has string) {
+ if strings.Index(line, has) < 0 {
+ t.Errorf("expected %q in %q", has, line)
+ }
+}
diff --git a/src/pkg/runtime/error.go b/src/pkg/runtime/error.go
new file mode 100644
index 000000000..6c37f888f
--- /dev/null
+++ b/src/pkg/runtime/error.go
@@ -0,0 +1,138 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+// The Error interface identifies a run time error.
+type Error interface {
+ String() string
+
+ // RuntimeError is a no-op function but
+ // serves to distinguish types that are runtime
+ // errors from ordinary os.Errors: a type is a
+ // runtime error if it has a RuntimeError method.
+ RuntimeError()
+}
+
+// A TypeAssertionError explains a failed type assertion.
+type TypeAssertionError struct {
+ interfaceType Type // interface had this type
+ concreteType Type // concrete value had this type
+ assertedType Type // asserted type
+ interfaceString string
+ concreteString string
+ assertedString string
+ missingMethod string // one method needed by Interface, missing from Concrete
+}
+
+func (*TypeAssertionError) RuntimeError() {}
+
+func (e *TypeAssertionError) String() string {
+ inter := e.interfaceString
+ if inter == "" {
+ inter = "interface"
+ }
+ if e.concreteType == nil {
+ return "interface conversion: " + inter + " is nil, not " + e.assertedString
+ }
+ if e.missingMethod == "" {
+ return "interface conversion: " + inter + " is " + e.concreteString +
+ ", not " + e.assertedString
+ }
+ return "interface conversion: " + e.concreteString + " is not " + e.assertedString +
+ ": missing method " + e.missingMethod
+}
+
+// Concrete returns the type of the concrete value in the failed type assertion.
+// If the interface value was nil, Concrete returns nil.
+func (e *TypeAssertionError) Concrete() Type {
+ return e.concreteType
+}
+
+// Asserted returns the type incorrectly asserted by the type assertion.
+func (e *TypeAssertionError) Asserted() Type {
+ return e.assertedType
+}
+
+// If the type assertion is to an interface type, MissingMethod returns the
+// name of a method needed to satisfy that interface type but not implemented
+// by Concrete. If there are multiple such methods,
+// MissingMethod returns one; which one is unspecified.
+// If the type assertion is not to an interface type, MissingMethod returns an empty string.
+func (e *TypeAssertionError) MissingMethod() string {
+ return e.missingMethod
+}
+
+// For calling from C.
+func newTypeAssertionError(pt1, pt2, pt3 *Type, ps1, ps2, ps3 *string, pmeth *string, ret *interface{}) {
+ var t1, t2, t3 Type
+ var s1, s2, s3, meth string
+
+ if pt1 != nil {
+ t1 = *pt1
+ }
+ if pt2 != nil {
+ t2 = *pt2
+ }
+ if pt3 != nil {
+ t3 = *pt3
+ }
+ if ps1 != nil {
+ s1 = *ps1
+ }
+ if ps2 != nil {
+ s2 = *ps2
+ }
+ if ps3 != nil {
+ s3 = *ps3
+ }
+ if pmeth != nil {
+ meth = *pmeth
+ }
+ *ret = &TypeAssertionError{t1, t2, t3, s1, s2, s3, meth}
+}
+
+// An errorString represents a runtime error described by a single string.
+type errorString string
+
+func (e errorString) RuntimeError() {}
+
+func (e errorString) String() string {
+ return "runtime error: " + string(e)
+}
+
+// For calling from C.
+func newErrorString(s string, ret *interface{}) {
+ *ret = errorString(s)
+}
+
+type stringer interface {
+ String() string
+}
+
+func typestring(interface{}) string
+
+// For calling from C.
+// Prints an argument passed to panic.
+// There's room for arbitrary complexity here, but we keep it
+// simple and handle just a few important cases: int, string, and Stringer.
+func printany(i interface{}) {
+ switch v := i.(type) {
+ case nil:
+ print("nil")
+ case stringer:
+ print(v.String())
+ case int:
+ print(v)
+ case string:
+ print(v)
+ default:
+ print("(", typestring(i), ") ", i)
+ }
+}
+
+// called from generated code
+func panicwrap(pkg, typ, meth string) {
+ panic("value method " + pkg + "." + typ + "." + meth + " called using nil *" + typ + " pointer")
+}
diff --git a/src/pkg/runtime/export_test.go b/src/pkg/runtime/export_test.go
new file mode 100644
index 000000000..53c5fcba4
--- /dev/null
+++ b/src/pkg/runtime/export_test.go
@@ -0,0 +1,23 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Export guts for testing.
+
+package runtime
+
+var Fadd64 = fadd64
+var Fsub64 = fsub64
+var Fmul64 = fmul64
+var Fdiv64 = fdiv64
+var F64to32 = f64to32
+var F32to64 = f32to64
+var Fcmp64 = fcmp64
+var Fintto64 = fintto64
+var F64toint = f64toint
+
+func entersyscall()
+func exitsyscall()
+
+var Entersyscall = entersyscall
+var Exitsyscall = exitsyscall
diff --git a/src/pkg/runtime/extern.go b/src/pkg/runtime/extern.go
new file mode 100644
index 000000000..9da3423c6
--- /dev/null
+++ b/src/pkg/runtime/extern.go
@@ -0,0 +1,192 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+ Package runtime contains operations that interact with Go's runtime system,
+ such as functions to control goroutines. It also includes the low-level type information
+ used by the reflect package; see reflect's documentation for the programmable
+ interface to the run-time type system.
+*/
+package runtime
+
+// Gosched yields the processor, allowing other goroutines to run. It does not
+// suspend the current goroutine, so execution resumes automatically.
+func Gosched()
+
+// Goexit terminates the goroutine that calls it. No other goroutine is affected.
+// Goexit runs all deferred calls before terminating the goroutine.
+func Goexit()
+
+// Caller reports file and line number information about function invocations on
+// the calling goroutine's stack. The argument skip is the number of stack frames to
+// ascend, with 0 identifying the the caller of Caller. The return values report the
+// program counter, file name, and line number within the file of the corresponding
+// call. The boolean ok is false if it was not possible to recover the information.
+func Caller(skip int) (pc uintptr, file string, line int, ok bool)
+
+// Callers fills the slice pc with the program counters of function invocations
+// on the calling goroutine's stack. The argument skip is the number of stack frames
+// to skip before recording in pc, with 0 starting at the caller of Caller.
+// It returns the number of entries written to pc.
+func Callers(skip int, pc []uintptr) int
+
+type Func struct { // Keep in sync with runtime.h:struct Func
+ name string
+ typ string // go type string
+ src string // src file name
+ pcln []byte // pc/ln tab for this func
+ entry uintptr // entry pc
+ pc0 uintptr // starting pc, ln for table
+ ln0 int32
+ frame int32 // stack frame size
+ args int32 // number of 32-bit in/out args
+ locals int32 // number of 32-bit locals
+}
+
+// FuncForPC returns a *Func describing the function that contains the
+// given program counter address, or else nil.
+func FuncForPC(pc uintptr) *Func
+
+// Name returns the name of the function.
+func (f *Func) Name() string { return f.name }
+
+// Entry returns the entry address of the function.
+func (f *Func) Entry() uintptr { return f.entry }
+
+// FileLine returns the file name and line number of the
+// source code corresponding to the program counter pc.
+// The result will not be accurate if pc is not a program
+// counter within f.
+func (f *Func) FileLine(pc uintptr) (file string, line int) {
+ // NOTE(rsc): If you edit this function, also edit
+ // symtab.c:/^funcline. That function also has the
+ // comments explaining the logic.
+ targetpc := pc
+
+ var pcQuant uintptr = 1
+ if GOARCH == "arm" {
+ pcQuant = 4
+ }
+
+ p := f.pcln
+ pc = f.pc0
+ line = int(f.ln0)
+ i := 0
+ //print("FileLine start pc=", pc, " targetpc=", targetpc, " line=", line,
+ // " tab=", p, " ", p[0], " quant=", pcQuant, " GOARCH=", GOARCH, "\n")
+ for {
+ for i < len(p) && p[i] > 128 {
+ pc += pcQuant * uintptr(p[i]-128)
+ i++
+ }
+ //print("pc<", pc, " targetpc=", targetpc, " line=", line, "\n")
+ if pc > targetpc || i >= len(p) {
+ break
+ }
+ if p[i] == 0 {
+ if i+5 > len(p) {
+ break
+ }
+ line += int(p[i+1]<<24) | int(p[i+2]<<16) | int(p[i+3]<<8) | int(p[i+4])
+ i += 5
+ } else if p[i] <= 64 {
+ line += int(p[i])
+ i++
+ } else {
+ line -= int(p[i] - 64)
+ i++
+ }
+ //print("pc=", pc, " targetpc=", targetpc, " line=", line, "\n")
+ pc += pcQuant
+ }
+ file = f.src
+ return
+}
+
+// mid returns the current os thread (m) id.
+func mid() uint32
+
+// Semacquire waits until *s > 0 and then atomically decrements it.
+// It is intended as a simple sleep primitive for use by the synchronization
+// library and should not be used directly.
+func Semacquire(s *uint32)
+
+// Semrelease atomically increments *s and notifies a waiting goroutine
+// if one is blocked in Semacquire.
+// It is intended as a simple wakeup primitive for use by the synchronization
+// library and should not be used directly.
+func Semrelease(s *uint32)
+
+// SetFinalizer sets the finalizer associated with x to f.
+// When the garbage collector finds an unreachable block
+// with an associated finalizer, it clears the association and runs
+// f(x) in a separate goroutine. This makes x reachable again, but
+// now without an associated finalizer. Assuming that SetFinalizer
+// is not called again, the next time the garbage collector sees
+// that x is unreachable, it will free x.
+//
+// SetFinalizer(x, nil) clears any finalizer associated with x.
+//
+// The argument x must be a pointer to an object allocated by
+// calling new or by taking the address of a composite literal.
+// The argument f must be a function that takes a single argument
+// of x's type and returns no arguments. If either of these is not
+// true, SetFinalizer aborts the program.
+//
+// Finalizers are run in dependency order: if A points at B, both have
+// finalizers, and they are otherwise unreachable, only the finalizer
+// for A runs; once A is freed, the finalizer for B can run.
+// If a cyclic structure includes a block with a finalizer, that
+// cycle is not guaranteed to be garbage collected and the finalizer
+// is not guaranteed to run, because there is no ordering that
+// respects the dependencies.
+//
+// The finalizer for x is scheduled to run at some arbitrary time after
+// x becomes unreachable.
+// There is no guarantee that finalizers will run before a program exits,
+// so typically they are useful only for releasing non-memory resources
+// associated with an object during a long-running program.
+// For example, an os.File object could use a finalizer to close the
+// associated operating system file descriptor when a program discards
+// an os.File without calling Close, but it would be a mistake
+// to depend on a finalizer to flush an in-memory I/O buffer such as a
+// bufio.Writer, because the buffer would not be flushed at program exit.
+//
+// A single goroutine runs all finalizers for a program, sequentially.
+// If a finalizer must run for a long time, it should do so by starting
+// a new goroutine.
+//
+// TODO(rsc): allow f to have (ignored) return values
+//
+func SetFinalizer(x, f interface{})
+
+func getgoroot() string
+
+// GOROOT returns the root of the Go tree.
+// It uses the GOROOT environment variable, if set,
+// or else the root used during the Go build.
+func GOROOT() string {
+ s := getgoroot()
+ if s != "" {
+ return s
+ }
+ return defaultGoroot
+}
+
+// Version returns the Go tree's version string.
+// It is either a sequence number or, when possible,
+// a release tag like "release.2010-03-04".
+// A trailing + indicates that the tree had local modifications
+// at the time of the build.
+func Version() string {
+ return theVersion
+}
+
+// GOOS is the Go tree's operating system target:
+// one of darwin, freebsd, linux, and so on.
+const GOOS string = theGoos
+
+// GOARCH is the Go tree's architecture target:
+// 386, amd64, or arm.
+const GOARCH string = theGoarch
diff --git a/src/pkg/runtime/float.c b/src/pkg/runtime/float.c
new file mode 100644
index 000000000..f481519f6
--- /dev/null
+++ b/src/pkg/runtime/float.c
@@ -0,0 +1,173 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "runtime.h"
+
+static uint64 uvnan = 0x7FF0000000000001ULL;
+static uint64 uvinf = 0x7FF0000000000000ULL;
+static uint64 uvneginf = 0xFFF0000000000000ULL;
+
+uint32
+runtime·float32tobits(float32 f)
+{
+ // The obvious cast-and-pointer code is technically
+ // not valid, and gcc miscompiles it. Use a union instead.
+ union {
+ float32 f;
+ uint32 i;
+ } u;
+ u.f = f;
+ return u.i;
+}
+
+uint64
+runtime·float64tobits(float64 f)
+{
+ // The obvious cast-and-pointer code is technically
+ // not valid, and gcc miscompiles it. Use a union instead.
+ union {
+ float64 f;
+ uint64 i;
+ } u;
+ u.f = f;
+ return u.i;
+}
+
+float64
+runtime·float64frombits(uint64 i)
+{
+ // The obvious cast-and-pointer code is technically
+ // not valid, and gcc miscompiles it. Use a union instead.
+ union {
+ float64 f;
+ uint64 i;
+ } u;
+ u.i = i;
+ return u.f;
+}
+
+float32
+runtime·float32frombits(uint32 i)
+{
+ // The obvious cast-and-pointer code is technically
+ // not valid, and gcc miscompiles it. Use a union instead.
+ union {
+ float32 f;
+ uint32 i;
+ } u;
+ u.i = i;
+ return u.f;
+}
+
+bool
+runtime·isInf(float64 f, int32 sign)
+{
+ uint64 x;
+
+ x = runtime·float64tobits(f);
+ if(sign == 0)
+ return x == uvinf || x == uvneginf;
+ if(sign > 0)
+ return x == uvinf;
+ return x == uvneginf;
+}
+
+float64
+runtime·NaN(void)
+{
+ return runtime·float64frombits(uvnan);
+}
+
+bool
+runtime·isNaN(float64 f)
+{
+ uint64 x;
+
+ x = runtime·float64tobits(f);
+ return ((uint32)(x>>52) & 0x7FF) == 0x7FF && !runtime·isInf(f, 0);
+}
+
+float64
+runtime·Inf(int32 sign)
+{
+ if(sign >= 0)
+ return runtime·float64frombits(uvinf);
+ else
+ return runtime·float64frombits(uvneginf);
+}
+
+enum
+{
+ MASK = 0x7ffL,
+ SHIFT = 64-11-1,
+ BIAS = 1022L,
+};
+
+float64
+runtime·frexp(float64 d, int32 *ep)
+{
+ uint64 x;
+
+ if(d == 0) {
+ *ep = 0;
+ return 0;
+ }
+ x = runtime·float64tobits(d);
+ *ep = (int32)((x >> SHIFT) & MASK) - BIAS;
+ x &= ~((uint64)MASK << SHIFT);
+ x |= (uint64)BIAS << SHIFT;
+ return runtime·float64frombits(x);
+}
+
+float64
+runtime·ldexp(float64 d, int32 e)
+{
+ uint64 x;
+
+ if(d == 0)
+ return 0;
+ x = runtime·float64tobits(d);
+ e += (int32)(x >> SHIFT) & MASK;
+ if(e <= 0)
+ return 0; /* underflow */
+ if(e >= MASK){ /* overflow */
+ if(d < 0)
+ return runtime·Inf(-1);
+ return runtime·Inf(1);
+ }
+ x &= ~((uint64)MASK << SHIFT);
+ x |= (uint64)e << SHIFT;
+ return runtime·float64frombits(x);
+}
+
+float64
+runtime·modf(float64 d, float64 *ip)
+{
+ float64 dd;
+ uint64 x;
+ int32 e;
+
+ if(d < 1) {
+ if(d < 0) {
+ d = runtime·modf(-d, ip);
+ *ip = -*ip;
+ return -d;
+ }
+ *ip = 0;
+ return d;
+ }
+
+ x = runtime·float64tobits(d);
+ e = (int32)((x >> SHIFT) & MASK) - BIAS;
+
+ /*
+ * Keep the top 11+e bits; clear the rest.
+ */
+ if(e <= 64-11)
+ x &= ~(((uint64)1 << (64LL-11LL-e))-1);
+ dd = runtime·float64frombits(x);
+ *ip = dd;
+ return d - dd;
+}
+
diff --git a/src/pkg/runtime/freebsd/386/defs.h b/src/pkg/runtime/freebsd/386/defs.h
new file mode 100644
index 000000000..ae12b2019
--- /dev/null
+++ b/src/pkg/runtime/freebsd/386/defs.h
@@ -0,0 +1,187 @@
+// godefs -f -m32 defs.c
+
+// MACHINE GENERATED - DO NOT EDIT.
+
+// Constants
+enum {
+ PROT_NONE = 0,
+ PROT_READ = 0x1,
+ PROT_WRITE = 0x2,
+ PROT_EXEC = 0x4,
+ MAP_ANON = 0x1000,
+ MAP_PRIVATE = 0x2,
+ MAP_FIXED = 0x10,
+ SA_SIGINFO = 0x40,
+ SA_RESTART = 0x2,
+ SA_ONSTACK = 0x1,
+ UMTX_OP_WAIT = 0x2,
+ UMTX_OP_WAKE = 0x3,
+ EINTR = 0x4,
+ SIGHUP = 0x1,
+ SIGINT = 0x2,
+ SIGQUIT = 0x3,
+ SIGILL = 0x4,
+ SIGTRAP = 0x5,
+ SIGABRT = 0x6,
+ SIGEMT = 0x7,
+ SIGFPE = 0x8,
+ SIGKILL = 0x9,
+ SIGBUS = 0xa,
+ SIGSEGV = 0xb,
+ SIGSYS = 0xc,
+ SIGPIPE = 0xd,
+ SIGALRM = 0xe,
+ SIGTERM = 0xf,
+ SIGURG = 0x10,
+ SIGSTOP = 0x11,
+ SIGTSTP = 0x12,
+ SIGCONT = 0x13,
+ SIGCHLD = 0x14,
+ SIGTTIN = 0x15,
+ SIGTTOU = 0x16,
+ SIGIO = 0x17,
+ SIGXCPU = 0x18,
+ SIGXFSZ = 0x19,
+ SIGVTALRM = 0x1a,
+ SIGPROF = 0x1b,
+ SIGWINCH = 0x1c,
+ SIGINFO = 0x1d,
+ SIGUSR1 = 0x1e,
+ SIGUSR2 = 0x1f,
+ FPE_INTDIV = 0x2,
+ FPE_INTOVF = 0x1,
+ FPE_FLTDIV = 0x3,
+ FPE_FLTOVF = 0x4,
+ FPE_FLTUND = 0x5,
+ FPE_FLTRES = 0x6,
+ FPE_FLTINV = 0x7,
+ FPE_FLTSUB = 0x8,
+ BUS_ADRALN = 0x1,
+ BUS_ADRERR = 0x2,
+ BUS_OBJERR = 0x3,
+ SEGV_MAPERR = 0x1,
+ SEGV_ACCERR = 0x2,
+ ITIMER_REAL = 0,
+ ITIMER_VIRTUAL = 0x1,
+ ITIMER_PROF = 0x2,
+};
+
+// Types
+#pragma pack on
+
+typedef struct Rtprio Rtprio;
+struct Rtprio {
+ uint16 type;
+ uint16 prio;
+};
+
+typedef struct ThrParam ThrParam;
+struct ThrParam {
+ void *start_func;
+ void *arg;
+ int8 *stack_base;
+ uint32 stack_size;
+ int8 *tls_base;
+ uint32 tls_size;
+ int32 *child_tid;
+ int32 *parent_tid;
+ int32 flags;
+ Rtprio *rtp;
+ void* spare[3];
+};
+
+typedef struct Sigaltstack Sigaltstack;
+struct Sigaltstack {
+ int8 *ss_sp;
+ uint32 ss_size;
+ int32 ss_flags;
+};
+
+typedef struct Sigset Sigset;
+struct Sigset {
+ uint32 __bits[4];
+};
+
+typedef union Sigval Sigval;
+union Sigval {
+ int32 sival_int;
+ void *sival_ptr;
+ int32 sigval_int;
+ void *sigval_ptr;
+};
+
+typedef struct StackT StackT;
+struct StackT {
+ int8 *ss_sp;
+ uint32 ss_size;
+ int32 ss_flags;
+};
+
+typedef struct Siginfo Siginfo;
+struct Siginfo {
+ int32 si_signo;
+ int32 si_errno;
+ int32 si_code;
+ int32 si_pid;
+ uint32 si_uid;
+ int32 si_status;
+ void *si_addr;
+ Sigval si_value;
+ byte _reason[32];
+};
+
+typedef struct Mcontext Mcontext;
+struct Mcontext {
+ int32 mc_onstack;
+ int32 mc_gs;
+ int32 mc_fs;
+ int32 mc_es;
+ int32 mc_ds;
+ int32 mc_edi;
+ int32 mc_esi;
+ int32 mc_ebp;
+ int32 mc_isp;
+ int32 mc_ebx;
+ int32 mc_edx;
+ int32 mc_ecx;
+ int32 mc_eax;
+ int32 mc_trapno;
+ int32 mc_err;
+ int32 mc_eip;
+ int32 mc_cs;
+ int32 mc_eflags;
+ int32 mc_esp;
+ int32 mc_ss;
+ int32 mc_len;
+ int32 mc_fpformat;
+ int32 mc_ownedfp;
+ int32 mc_spare1[1];
+ int32 mc_fpstate[128];
+ int32 mc_fsbase;
+ int32 mc_gsbase;
+ int32 mc_spare2[6];
+};
+
+typedef struct Ucontext Ucontext;
+struct Ucontext {
+ Sigset uc_sigmask;
+ Mcontext uc_mcontext;
+ Ucontext *uc_link;
+ StackT uc_stack;
+ int32 uc_flags;
+ int32 __spare__[4];
+ byte pad_godefs_0[12];
+};
+
+typedef struct Timeval Timeval;
+struct Timeval {
+ int32 tv_sec;
+ int32 tv_usec;
+};
+
+typedef struct Itimerval Itimerval;
+struct Itimerval {
+ Timeval it_interval;
+ Timeval it_value;
+};
+#pragma pack off
diff --git a/src/pkg/runtime/freebsd/386/rt0.s b/src/pkg/runtime/freebsd/386/rt0.s
new file mode 100644
index 000000000..3ca981b3a
--- /dev/null
+++ b/src/pkg/runtime/freebsd/386/rt0.s
@@ -0,0 +1,9 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Darwin and Linux use the same linkage to main
+
+TEXT _rt0_386_freebsd(SB),7,$0
+ JMP _rt0_386(SB)
+
diff --git a/src/pkg/runtime/freebsd/386/signal.c b/src/pkg/runtime/freebsd/386/signal.c
new file mode 100644
index 000000000..2fe7ecd70
--- /dev/null
+++ b/src/pkg/runtime/freebsd/386/signal.c
@@ -0,0 +1,193 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "runtime.h"
+#include "defs.h"
+#include "signals.h"
+#include "os.h"
+
+extern void runtime·sigtramp(void);
+
+typedef struct sigaction {
+ union {
+ void (*__sa_handler)(int32);
+ void (*__sa_sigaction)(int32, Siginfo*, void *);
+ } __sigaction_u; /* signal handler */
+ int32 sa_flags; /* see signal options below */
+ int64 sa_mask; /* signal mask to apply */
+} Sigaction;
+
+void
+runtime·dumpregs(Mcontext *r)
+{
+ runtime·printf("eax %x\n", r->mc_eax);
+ runtime·printf("ebx %x\n", r->mc_ebx);
+ runtime·printf("ecx %x\n", r->mc_ecx);
+ runtime·printf("edx %x\n", r->mc_edx);
+ runtime·printf("edi %x\n", r->mc_edi);
+ runtime·printf("esi %x\n", r->mc_esi);
+ runtime·printf("ebp %x\n", r->mc_ebp);
+ runtime·printf("esp %x\n", r->mc_esp);
+ runtime·printf("eip %x\n", r->mc_eip);
+ runtime·printf("eflags %x\n", r->mc_eflags);
+ runtime·printf("cs %x\n", r->mc_cs);
+ runtime·printf("fs %x\n", r->mc_fs);
+ runtime·printf("gs %x\n", r->mc_gs);
+}
+
+String
+runtime·signame(int32 sig)
+{
+ if(sig < 0 || sig >= NSIG)
+ return runtime·emptystring;
+ return runtime·gostringnocopy((byte*)runtime·sigtab[sig].name);
+}
+
+void
+runtime·sighandler(int32 sig, Siginfo *info, void *context, G *gp)
+{
+ Ucontext *uc;
+ Mcontext *r;
+ uintptr *sp;
+
+ uc = context;
+ r = &uc->uc_mcontext;
+
+ if(sig == SIGPROF) {
+ runtime·sigprof((uint8*)r->mc_eip, (uint8*)r->mc_esp, nil, gp);
+ return;
+ }
+
+ if(gp != nil && (runtime·sigtab[sig].flags & SigPanic)) {
+ // Make it look like a call to the signal func.
+ // Have to pass arguments out of band since
+ // augmenting the stack frame would break
+ // the unwinding code.
+ gp->sig = sig;
+ gp->sigcode0 = info->si_code;
+ gp->sigcode1 = (uintptr)info->si_addr;
+ gp->sigpc = r->mc_eip;
+
+ // Only push runtime·sigpanic if r->mc_eip != 0.
+ // If r->mc_eip == 0, probably panicked because of a
+ // call to a nil func. Not pushing that onto sp will
+ // make the trace look like a call to runtime·sigpanic instead.
+ // (Otherwise the trace will end at runtime·sigpanic and we
+ // won't get to see who faulted.)
+ if(r->mc_eip != 0) {
+ sp = (uintptr*)r->mc_esp;
+ *--sp = r->mc_eip;
+ r->mc_esp = (uintptr)sp;
+ }
+ r->mc_eip = (uintptr)runtime·sigpanic;
+ return;
+ }
+
+ if(runtime·sigtab[sig].flags & SigQueue) {
+ if(runtime·sigsend(sig) || (runtime·sigtab[sig].flags & SigIgnore))
+ return;
+ runtime·exit(2); // SIGINT, SIGTERM, etc
+ }
+
+ if(runtime·panicking) // traceback already printed
+ runtime·exit(2);
+ runtime·panicking = 1;
+
+ if(sig < 0 || sig >= NSIG)
+ runtime·printf("Signal %d\n", sig);
+ else
+ runtime·printf("%s\n", runtime·sigtab[sig].name);
+
+ runtime·printf("PC=%X\n", r->mc_eip);
+ runtime·printf("\n");
+
+ if(runtime·gotraceback()){
+ runtime·traceback((void*)r->mc_eip, (void*)r->mc_esp, 0, gp);
+ runtime·tracebackothers(gp);
+ runtime·dumpregs(r);
+ }
+
+ runtime·exit(2);
+}
+
+// Called from kernel on signal stack, so no stack split.
+#pragma textflag 7
+void
+runtime·sigignore(void)
+{
+}
+
+void
+runtime·signalstack(byte *p, int32 n)
+{
+ Sigaltstack st;
+
+ st.ss_sp = (int8*)p;
+ st.ss_size = n;
+ st.ss_flags = 0;
+ runtime·sigaltstack(&st, nil);
+}
+
+static void
+sigaction(int32 i, void (*fn)(int32, Siginfo*, void*, G*), bool restart)
+{
+ Sigaction sa;
+
+ runtime·memclr((byte*)&sa, sizeof sa);
+ sa.sa_flags = SA_SIGINFO|SA_ONSTACK;
+ if(restart)
+ sa.sa_flags |= SA_RESTART;
+ sa.sa_mask = ~0ULL;
+ if (fn == runtime·sighandler)
+ fn = (void*)runtime·sigtramp;
+ sa.__sigaction_u.__sa_sigaction = (void*)fn;
+ runtime·sigaction(i, &sa, nil);
+}
+
+void
+runtime·initsig(int32 queue)
+{
+ int32 i;
+ void *fn;
+
+ runtime·siginit();
+
+ for(i = 0; i<NSIG; i++) {
+ if(runtime·sigtab[i].flags) {
+ if((runtime·sigtab[i].flags & SigQueue) != queue)
+ continue;
+ if(runtime·sigtab[i].flags & (SigCatch | SigQueue))
+ fn = runtime·sighandler;
+ else
+ fn = runtime·sigignore;
+ sigaction(i, fn, (runtime·sigtab[i].flags & SigRestart) != 0);
+ }
+ }
+}
+
+void
+runtime·resetcpuprofiler(int32 hz)
+{
+ Itimerval it;
+
+ runtime·memclr((byte*)&it, sizeof it);
+ if(hz == 0) {
+ runtime·setitimer(ITIMER_PROF, &it, nil);
+ sigaction(SIGPROF, SIG_IGN, true);
+ } else {
+ sigaction(SIGPROF, runtime·sighandler, true);
+ it.it_interval.tv_sec = 0;
+ it.it_interval.tv_usec = 1000000 / hz;
+ it.it_value = it.it_interval;
+ runtime·setitimer(ITIMER_PROF, &it, nil);
+ }
+ m->profilehz = hz;
+}
+
+void
+os·sigpipe(void)
+{
+ sigaction(SIGPIPE, SIG_DFL, false);
+ runtime·raisesigpipe();
+}
diff --git a/src/pkg/runtime/freebsd/386/sys.s b/src/pkg/runtime/freebsd/386/sys.s
new file mode 100644
index 000000000..765e2fcc4
--- /dev/null
+++ b/src/pkg/runtime/freebsd/386/sys.s
@@ -0,0 +1,239 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+//
+// System calls and other sys.stuff for 386, FreeBSD
+// /usr/src/sys/kern/syscalls.master for syscall numbers.
+//
+
+#include "386/asm.h"
+
+TEXT runtime·sys_umtx_op(SB),7,$-4
+ MOVL $454, AX
+ INT $0x80
+ RET
+
+TEXT runtime·thr_new(SB),7,$-4
+ MOVL $455, AX
+ INT $0x80
+ RET
+
+TEXT runtime·thr_start(SB),7,$0
+ MOVL mm+0(FP), AX
+ MOVL m_g0(AX), BX
+ LEAL m_tls(AX), BP
+ MOVL 0(BP), DI
+ ADDL $7, DI
+ PUSHAL
+ PUSHL $32
+ PUSHL BP
+ PUSHL DI
+ CALL runtime·setldt(SB)
+ POPL AX
+ POPL AX
+ POPL AX
+ POPAL
+ get_tls(CX)
+ MOVL BX, g(CX)
+
+ MOVL AX, m(CX)
+ CALL runtime·stackcheck(SB) // smashes AX
+ CALL runtime·mstart(SB)
+ MOVL 0, AX // crash (not reached)
+
+// Exit the entire program (like C exit)
+TEXT runtime·exit(SB),7,$-4
+ MOVL $1, AX
+ INT $0x80
+ CALL runtime·notok(SB)
+ RET
+
+TEXT runtime·exit1(SB),7,$-4
+ MOVL $431, AX
+ INT $0x80
+ JAE 2(PC)
+ CALL runtime·notok(SB)
+ RET
+
+TEXT runtime·write(SB),7,$-4
+ MOVL $4, AX
+ INT $0x80
+ RET
+
+TEXT runtime·raisesigpipe(SB),7,$12
+ // thr_self(&8(SP))
+ LEAL 8(SP), AX
+ MOVL AX, 0(SP)
+ MOVL $432, AX
+ INT $0x80
+ // thr_kill(self, SIGPIPE)
+ MOVL 8(SP), AX
+ MOVL AX, 0(SP)
+ MOVL $13, 4(SP)
+ MOVL $433, AX
+ INT $0x80
+ RET
+
+TEXT runtime·notok(SB),7,$0
+ MOVL $0xf1, 0xf1
+ RET
+
+TEXT runtime·mmap(SB),7,$32
+ LEAL arg0+0(FP), SI
+ LEAL 4(SP), DI
+ CLD
+ MOVSL
+ MOVSL
+ MOVSL
+ MOVSL
+ MOVSL
+ MOVSL
+ MOVL $0, AX // top 64 bits of file offset
+ STOSL
+ MOVL $477, AX
+ INT $0x80
+ RET
+
+TEXT runtime·munmap(SB),7,$-4
+ MOVL $73, AX
+ INT $0x80
+ JAE 2(PC)
+ CALL runtime·notok(SB)
+ RET
+
+TEXT runtime·setitimer(SB), 7, $-4
+ MOVL $83, AX
+ INT $0x80
+ RET
+
+TEXT runtime·gettime(SB), 7, $32
+ MOVL $116, AX
+ LEAL 12(SP), BX
+ MOVL BX, 4(SP)
+ MOVL $0, 8(SP)
+ INT $0x80
+
+ MOVL 12(SP), BX // sec
+ MOVL sec+0(FP), DI
+ MOVL BX, (DI)
+ MOVL $0, 4(DI) // zero extend 32 -> 64 bits
+
+ MOVL 16(SP), BX // usec
+ MOVL usec+4(FP), DI
+ MOVL BX, (DI)
+ RET
+
+TEXT runtime·sigaction(SB),7,$-4
+ MOVL $416, AX
+ INT $0x80
+ JAE 2(PC)
+ CALL runtime·notok(SB)
+ RET
+
+TEXT runtime·sigtramp(SB),7,$44
+ get_tls(CX)
+
+ // save g
+ MOVL g(CX), DI
+ MOVL DI, 20(SP)
+
+ // g = m->gsignal
+ MOVL m(CX), BX
+ MOVL m_gsignal(BX), BX
+ MOVL BX, g(CX)
+
+ // copy arguments for call to sighandler
+ MOVL signo+0(FP), BX
+ MOVL BX, 0(SP)
+ MOVL info+4(FP), BX
+ MOVL BX, 4(SP)
+ MOVL context+8(FP), BX
+ MOVL BX, 8(SP)
+ MOVL DI, 12(SP)
+
+ CALL runtime·sighandler(SB)
+
+ // restore g
+ get_tls(CX)
+ MOVL 20(SP), BX
+ MOVL BX, g(CX)
+
+ // call sigreturn
+ MOVL context+8(FP), AX
+ MOVL $0, 0(SP) // syscall gap
+ MOVL AX, 4(SP)
+ MOVL $417, AX // sigreturn(ucontext)
+ INT $0x80
+ CALL runtime·notok(SB)
+ RET
+
+TEXT runtime·sigaltstack(SB),7,$0
+ MOVL $53, AX
+ INT $0x80
+ JAE 2(PC)
+ CALL runtime·notok(SB)
+ RET
+
+/*
+descriptor entry format for system call
+is the native machine format, ugly as it is:
+
+ 2-byte limit
+ 3-byte base
+ 1-byte: 0x80=present, 0x60=dpl<<5, 0x1F=type
+ 1-byte: 0x80=limit is *4k, 0x40=32-bit operand size,
+ 0x0F=4 more bits of limit
+ 1 byte: 8 more bits of base
+
+int i386_get_ldt(int, union ldt_entry *, int);
+int i386_set_ldt(int, const union ldt_entry *, int);
+
+*/
+
+// setldt(int entry, int address, int limit)
+TEXT runtime·setldt(SB),7,$32
+ MOVL address+4(FP), BX // aka base
+ // see comment in linux/386/sys.s; freebsd is similar
+ ADDL $0x8, BX
+
+ // set up data_desc
+ LEAL 16(SP), AX // struct data_desc
+ MOVL $0, 0(AX)
+ MOVL $0, 4(AX)
+
+ MOVW BX, 2(AX)
+ SHRL $16, BX
+ MOVB BX, 4(AX)
+ SHRL $8, BX
+ MOVB BX, 7(AX)
+
+ MOVW $0xffff, 0(AX)
+ MOVB $0xCF, 6(AX) // 32-bit operand, 4k limit unit, 4 more bits of limit
+
+ MOVB $0xF2, 5(AX) // r/w data descriptor, dpl=3, present
+
+ // call i386_set_ldt(entry, desc, 1)
+ MOVL $0xffffffff, 0(SP) // auto-allocate entry and return in AX
+ MOVL AX, 4(SP)
+ MOVL $1, 8(SP)
+ CALL runtime·i386_set_ldt(SB)
+
+ // compute segment selector - (entry*8+7)
+ SHLL $3, AX
+ ADDL $7, AX
+ MOVW AX, GS
+ RET
+
+TEXT runtime·i386_set_ldt(SB),7,$16
+ LEAL args+0(FP), AX // 0(FP) == 4(SP) before SP got moved
+ MOVL $0, 0(SP) // syscall gap
+ MOVL $1, 4(SP)
+ MOVL AX, 8(SP)
+ MOVL $165, AX
+ INT $0x80
+ CMPL AX, $0xfffff001
+ JLS 2(PC)
+ INT $3
+ RET
+
+GLOBL runtime·tlsoffset(SB),$4
diff --git a/src/pkg/runtime/freebsd/amd64/defs.h b/src/pkg/runtime/freebsd/amd64/defs.h
new file mode 100644
index 000000000..b101b1932
--- /dev/null
+++ b/src/pkg/runtime/freebsd/amd64/defs.h
@@ -0,0 +1,198 @@
+// godefs -f -m64 defs.c
+
+// MACHINE GENERATED - DO NOT EDIT.
+
+// Constants
+enum {
+ PROT_NONE = 0,
+ PROT_READ = 0x1,
+ PROT_WRITE = 0x2,
+ PROT_EXEC = 0x4,
+ MAP_ANON = 0x1000,
+ MAP_PRIVATE = 0x2,
+ MAP_FIXED = 0x10,
+ SA_SIGINFO = 0x40,
+ SA_RESTART = 0x2,
+ SA_ONSTACK = 0x1,
+ UMTX_OP_WAIT = 0x2,
+ UMTX_OP_WAKE = 0x3,
+ EINTR = 0x4,
+ SIGHUP = 0x1,
+ SIGINT = 0x2,
+ SIGQUIT = 0x3,
+ SIGILL = 0x4,
+ SIGTRAP = 0x5,
+ SIGABRT = 0x6,
+ SIGEMT = 0x7,
+ SIGFPE = 0x8,
+ SIGKILL = 0x9,
+ SIGBUS = 0xa,
+ SIGSEGV = 0xb,
+ SIGSYS = 0xc,
+ SIGPIPE = 0xd,
+ SIGALRM = 0xe,
+ SIGTERM = 0xf,
+ SIGURG = 0x10,
+ SIGSTOP = 0x11,
+ SIGTSTP = 0x12,
+ SIGCONT = 0x13,
+ SIGCHLD = 0x14,
+ SIGTTIN = 0x15,
+ SIGTTOU = 0x16,
+ SIGIO = 0x17,
+ SIGXCPU = 0x18,
+ SIGXFSZ = 0x19,
+ SIGVTALRM = 0x1a,
+ SIGPROF = 0x1b,
+ SIGWINCH = 0x1c,
+ SIGINFO = 0x1d,
+ SIGUSR1 = 0x1e,
+ SIGUSR2 = 0x1f,
+ FPE_INTDIV = 0x2,
+ FPE_INTOVF = 0x1,
+ FPE_FLTDIV = 0x3,
+ FPE_FLTOVF = 0x4,
+ FPE_FLTUND = 0x5,
+ FPE_FLTRES = 0x6,
+ FPE_FLTINV = 0x7,
+ FPE_FLTSUB = 0x8,
+ BUS_ADRALN = 0x1,
+ BUS_ADRERR = 0x2,
+ BUS_OBJERR = 0x3,
+ SEGV_MAPERR = 0x1,
+ SEGV_ACCERR = 0x2,
+ ITIMER_REAL = 0,
+ ITIMER_VIRTUAL = 0x1,
+ ITIMER_PROF = 0x2,
+};
+
+// Types
+#pragma pack on
+
+typedef struct Rtprio Rtprio;
+struct Rtprio {
+ uint16 type;
+ uint16 prio;
+};
+
+typedef struct ThrParam ThrParam;
+struct ThrParam {
+ void *start_func;
+ void *arg;
+ int8 *stack_base;
+ uint64 stack_size;
+ int8 *tls_base;
+ uint64 tls_size;
+ int64 *child_tid;
+ int64 *parent_tid;
+ int32 flags;
+ byte pad_godefs_0[4];
+ Rtprio *rtp;
+ void* spare[3];
+};
+
+typedef struct Sigaltstack Sigaltstack;
+struct Sigaltstack {
+ int8 *ss_sp;
+ uint64 ss_size;
+ int32 ss_flags;
+ byte pad_godefs_0[4];
+};
+
+typedef struct Sigset Sigset;
+struct Sigset {
+ uint32 __bits[4];
+};
+
+typedef union Sigval Sigval;
+union Sigval {
+ int32 sival_int;
+ void *sival_ptr;
+ int32 sigval_int;
+ void *sigval_ptr;
+};
+
+typedef struct StackT StackT;
+struct StackT {
+ int8 *ss_sp;
+ uint64 ss_size;
+ int32 ss_flags;
+ byte pad_godefs_0[4];
+};
+
+typedef struct Siginfo Siginfo;
+struct Siginfo {
+ int32 si_signo;
+ int32 si_errno;
+ int32 si_code;
+ int32 si_pid;
+ uint32 si_uid;
+ int32 si_status;
+ void *si_addr;
+ Sigval si_value;
+ byte _reason[40];
+};
+
+typedef struct Mcontext Mcontext;
+struct Mcontext {
+ int64 mc_onstack;
+ int64 mc_rdi;
+ int64 mc_rsi;
+ int64 mc_rdx;
+ int64 mc_rcx;
+ int64 mc_r8;
+ int64 mc_r9;
+ int64 mc_rax;
+ int64 mc_rbx;
+ int64 mc_rbp;
+ int64 mc_r10;
+ int64 mc_r11;
+ int64 mc_r12;
+ int64 mc_r13;
+ int64 mc_r14;
+ int64 mc_r15;
+ uint32 mc_trapno;
+ uint16 mc_fs;
+ uint16 mc_gs;
+ int64 mc_addr;
+ uint32 mc_flags;
+ uint16 mc_es;
+ uint16 mc_ds;
+ int64 mc_err;
+ int64 mc_rip;
+ int64 mc_cs;
+ int64 mc_rflags;
+ int64 mc_rsp;
+ int64 mc_ss;
+ int64 mc_len;
+ int64 mc_fpformat;
+ int64 mc_ownedfp;
+ int64 mc_fpstate[64];
+ int64 mc_fsbase;
+ int64 mc_gsbase;
+ int64 mc_spare[6];
+};
+
+typedef struct Ucontext Ucontext;
+struct Ucontext {
+ Sigset uc_sigmask;
+ Mcontext uc_mcontext;
+ Ucontext *uc_link;
+ StackT uc_stack;
+ int32 uc_flags;
+ int32 __spare__[4];
+ byte pad_godefs_0[12];
+};
+
+typedef struct Timeval Timeval;
+struct Timeval {
+ int64 tv_sec;
+ int64 tv_usec;
+};
+
+typedef struct Itimerval Itimerval;
+struct Itimerval {
+ Timeval it_interval;
+ Timeval it_value;
+};
+#pragma pack off
diff --git a/src/pkg/runtime/freebsd/amd64/rt0.s b/src/pkg/runtime/freebsd/amd64/rt0.s
new file mode 100644
index 000000000..5d2eeeeff
--- /dev/null
+++ b/src/pkg/runtime/freebsd/amd64/rt0.s
@@ -0,0 +1,9 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Darwin and Linux use the same linkage to main
+
+TEXT _rt0_amd64_freebsd(SB),7,$-8
+ MOVQ $_rt0_amd64(SB), DX
+ JMP DX
diff --git a/src/pkg/runtime/freebsd/amd64/signal.c b/src/pkg/runtime/freebsd/amd64/signal.c
new file mode 100644
index 000000000..8015e366e
--- /dev/null
+++ b/src/pkg/runtime/freebsd/amd64/signal.c
@@ -0,0 +1,201 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "runtime.h"
+#include "defs.h"
+#include "signals.h"
+#include "os.h"
+
+extern void runtime·sigtramp(void);
+
+typedef struct sigaction {
+ union {
+ void (*__sa_handler)(int32);
+ void (*__sa_sigaction)(int32, Siginfo*, void *);
+ } __sigaction_u; /* signal handler */
+ int32 sa_flags; /* see signal options below */
+ int64 sa_mask; /* signal mask to apply */
+} Sigaction;
+
+void
+runtime·dumpregs(Mcontext *r)
+{
+ runtime·printf("rax %X\n", r->mc_rax);
+ runtime·printf("rbx %X\n", r->mc_rbx);
+ runtime·printf("rcx %X\n", r->mc_rcx);
+ runtime·printf("rdx %X\n", r->mc_rdx);
+ runtime·printf("rdi %X\n", r->mc_rdi);
+ runtime·printf("rsi %X\n", r->mc_rsi);
+ runtime·printf("rbp %X\n", r->mc_rbp);
+ runtime·printf("rsp %X\n", r->mc_rsp);
+ runtime·printf("r8 %X\n", r->mc_r8 );
+ runtime·printf("r9 %X\n", r->mc_r9 );
+ runtime·printf("r10 %X\n", r->mc_r10);
+ runtime·printf("r11 %X\n", r->mc_r11);
+ runtime·printf("r12 %X\n", r->mc_r12);
+ runtime·printf("r13 %X\n", r->mc_r13);
+ runtime·printf("r14 %X\n", r->mc_r14);
+ runtime·printf("r15 %X\n", r->mc_r15);
+ runtime·printf("rip %X\n", r->mc_rip);
+ runtime·printf("rflags %X\n", r->mc_flags);
+ runtime·printf("cs %X\n", r->mc_cs);
+ runtime·printf("fs %X\n", r->mc_fs);
+ runtime·printf("gs %X\n", r->mc_gs);
+}
+
+String
+runtime·signame(int32 sig)
+{
+ if(sig < 0 || sig >= NSIG)
+ return runtime·emptystring;
+ return runtime·gostringnocopy((byte*)runtime·sigtab[sig].name);
+}
+
+void
+runtime·sighandler(int32 sig, Siginfo *info, void *context, G *gp)
+{
+ Ucontext *uc;
+ Mcontext *r;
+ uintptr *sp;
+
+ uc = context;
+ r = &uc->uc_mcontext;
+
+ if(sig == SIGPROF) {
+ runtime·sigprof((uint8*)r->mc_rip, (uint8*)r->mc_rsp, nil, gp);
+ return;
+ }
+
+ if(gp != nil && (runtime·sigtab[sig].flags & SigPanic)) {
+ // Make it look like a call to the signal func.
+ // Have to pass arguments out of band since
+ // augmenting the stack frame would break
+ // the unwinding code.
+ gp->sig = sig;
+ gp->sigcode0 = info->si_code;
+ gp->sigcode1 = (uintptr)info->si_addr;
+ gp->sigpc = r->mc_rip;
+
+ // Only push runtime·sigpanic if r->mc_rip != 0.
+ // If r->mc_rip == 0, probably panicked because of a
+ // call to a nil func. Not pushing that onto sp will
+ // make the trace look like a call to runtime·sigpanic instead.
+ // (Otherwise the trace will end at runtime·sigpanic and we
+ // won't get to see who faulted.)
+ if(r->mc_rip != 0) {
+ sp = (uintptr*)r->mc_rsp;
+ *--sp = r->mc_rip;
+ r->mc_rsp = (uintptr)sp;
+ }
+ r->mc_rip = (uintptr)runtime·sigpanic;
+ return;
+ }
+
+ if(runtime·sigtab[sig].flags & SigQueue) {
+ if(runtime·sigsend(sig) || (runtime·sigtab[sig].flags & SigIgnore))
+ return;
+ runtime·exit(2); // SIGINT, SIGTERM, etc
+ }
+
+ if(runtime·panicking) // traceback already printed
+ runtime·exit(2);
+ runtime·panicking = 1;
+
+ if(sig < 0 || sig >= NSIG)
+ runtime·printf("Signal %d\n", sig);
+ else
+ runtime·printf("%s\n", runtime·sigtab[sig].name);
+
+ runtime·printf("PC=%X\n", r->mc_rip);
+ runtime·printf("\n");
+
+ if(runtime·gotraceback()){
+ runtime·traceback((void*)r->mc_rip, (void*)r->mc_rsp, 0, gp);
+ runtime·tracebackothers(gp);
+ runtime·dumpregs(r);
+ }
+
+ runtime·exit(2);
+}
+
+// Called from kernel on signal stack, so no stack split.
+#pragma textflag 7
+void
+runtime·sigignore(void)
+{
+}
+
+void
+runtime·signalstack(byte *p, int32 n)
+{
+ Sigaltstack st;
+
+ st.ss_sp = (int8*)p;
+ st.ss_size = n;
+ st.ss_flags = 0;
+ runtime·sigaltstack(&st, nil);
+}
+
+static void
+sigaction(int32 i, void (*fn)(int32, Siginfo*, void*, G*), bool restart)
+{
+ Sigaction sa;
+
+ runtime·memclr((byte*)&sa, sizeof sa);
+ sa.sa_flags = SA_SIGINFO|SA_ONSTACK;
+ if(restart)
+ sa.sa_flags |= SA_RESTART;
+ sa.sa_mask = ~0ULL;
+ if (fn == runtime·sighandler)
+ fn = (void*)runtime·sigtramp;
+ sa.__sigaction_u.__sa_sigaction = (void*)fn;
+ runtime·sigaction(i, &sa, nil);
+}
+
+void
+runtime·initsig(int32 queue)
+{
+ int32 i;
+ void *fn;
+
+ runtime·siginit();
+
+ for(i = 0; i<NSIG; i++) {
+ if(runtime·sigtab[i].flags) {
+ if((runtime·sigtab[i].flags & SigQueue) != queue)
+ continue;
+ if(runtime·sigtab[i].flags & (SigCatch | SigQueue))
+ fn = runtime·sighandler;
+ else
+ fn = runtime·sigignore;
+ sigaction(i, fn, (runtime·sigtab[i].flags & SigRestart) != 0);
+ }
+ }
+}
+
+void
+runtime·resetcpuprofiler(int32 hz)
+{
+ Itimerval it;
+
+ runtime·memclr((byte*)&it, sizeof it);
+ if(hz == 0) {
+ runtime·setitimer(ITIMER_PROF, &it, nil);
+ sigaction(SIGPROF, SIG_IGN, true);
+ } else {
+ sigaction(SIGPROF, runtime·sighandler, true);
+ it.it_interval.tv_sec = 0;
+ it.it_interval.tv_usec = 1000000 / hz;
+ it.it_value = it.it_interval;
+ runtime·setitimer(ITIMER_PROF, &it, nil);
+ }
+ m->profilehz = hz;
+}
+
+void
+os·sigpipe(void)
+{
+ sigaction(SIGPIPE, SIG_DFL, false);
+ runtime·raisesigpipe();
+}
diff --git a/src/pkg/runtime/freebsd/amd64/sys.s b/src/pkg/runtime/freebsd/amd64/sys.s
new file mode 100644
index 000000000..c5cc082e4
--- /dev/null
+++ b/src/pkg/runtime/freebsd/amd64/sys.s
@@ -0,0 +1,182 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+//
+// System calls and other sys.stuff for AMD64, FreeBSD
+// /usr/src/sys/kern/syscalls.master for syscall numbers.
+//
+
+#include "amd64/asm.h"
+
+TEXT runtime·sys_umtx_op(SB),7,$0
+ MOVQ 8(SP), DI
+ MOVL 16(SP), SI
+ MOVL 20(SP), DX
+ MOVQ 24(SP), R10
+ MOVQ 32(SP), R8
+ MOVL $454, AX
+ SYSCALL
+ RET
+
+TEXT runtime·thr_new(SB),7,$0
+ MOVQ 8(SP), DI
+ MOVQ 16(SP), SI
+ MOVL $455, AX
+ SYSCALL
+ RET
+
+TEXT runtime·thr_start(SB),7,$0
+ MOVQ DI, R13 // m
+
+ // set up FS to point at m->tls
+ LEAQ m_tls(R13), DI
+ CALL runtime·settls(SB) // smashes DI
+
+ // set up m, g
+ get_tls(CX)
+ MOVQ R13, m(CX)
+ MOVQ m_g0(R13), DI
+ MOVQ DI, g(CX)
+
+ CALL runtime·stackcheck(SB)
+ CALL runtime·mstart(SB)
+ MOVQ 0, AX // crash (not reached)
+
+// Exit the entire program (like C exit)
+TEXT runtime·exit(SB),7,$-8
+ MOVL 8(SP), DI // arg 1 exit status
+ MOVL $1, AX
+ SYSCALL
+ CALL runtime·notok(SB)
+ RET
+
+TEXT runtime·exit1(SB),7,$-8
+ MOVQ 8(SP), DI // arg 1 exit status
+ MOVL $431, AX
+ SYSCALL
+ CALL runtime·notok(SB)
+ RET
+
+TEXT runtime·write(SB),7,$-8
+ MOVL 8(SP), DI // arg 1 fd
+ MOVQ 16(SP), SI // arg 2 buf
+ MOVL 24(SP), DX // arg 3 count
+ MOVL $4, AX
+ SYSCALL
+ RET
+
+TEXT runtime·raisesigpipe(SB),7,$16
+ // thr_self(&8(SP))
+ LEAQ 8(SP), DI // arg 1 &8(SP)
+ MOVL $432, AX
+ SYSCALL
+ // thr_kill(self, SIGPIPE)
+ MOVQ 8(SP), DI // arg 1 id
+ MOVQ $13, SI // arg 2 SIGPIPE
+ MOVL $433, AX
+ SYSCALL
+ RET
+
+TEXT runtime·setitimer(SB), 7, $-8
+ MOVL 8(SP), DI
+ MOVQ 16(SP), SI
+ MOVQ 24(SP), DX
+ MOVL $83, AX
+ SYSCALL
+ RET
+
+TEXT runtime·gettime(SB), 7, $32
+ MOVL $116, AX
+ LEAQ 8(SP), DI
+ MOVQ $0, SI
+ SYSCALL
+
+ MOVQ 8(SP), BX // sec
+ MOVQ sec+0(FP), DI
+ MOVQ BX, (DI)
+
+ MOVL 16(SP), BX // usec
+ MOVQ usec+8(FP), DI
+ MOVL BX, (DI)
+ RET
+
+TEXT runtime·sigaction(SB),7,$-8
+ MOVL 8(SP), DI // arg 1 sig
+ MOVQ 16(SP), SI // arg 2 act
+ MOVQ 24(SP), DX // arg 3 oact
+ MOVL $416, AX
+ SYSCALL
+ JCC 2(PC)
+ CALL runtime·notok(SB)
+ RET
+
+TEXT runtime·sigtramp(SB),7,$64
+ get_tls(BX)
+
+ // save g
+ MOVQ g(BX), R10
+ MOVQ R10, 40(SP)
+
+ // g = m->signal
+ MOVQ m(BX), BP
+ MOVQ m_gsignal(BP), BP
+ MOVQ BP, g(BX)
+
+ MOVQ DI, 0(SP)
+ MOVQ SI, 8(SP)
+ MOVQ DX, 16(SP)
+ MOVQ R10, 24(SP)
+
+ CALL runtime·sighandler(SB)
+
+ // restore g
+ get_tls(BX)
+ MOVQ 40(SP), R10
+ MOVQ R10, g(BX)
+ RET
+
+TEXT runtime·mmap(SB),7,$0
+ MOVQ 8(SP), DI // arg 1 addr
+ MOVQ 16(SP), SI // arg 2 len
+ MOVL 24(SP), DX // arg 3 prot
+ MOVL 28(SP), R10 // arg 4 flags
+ MOVL 32(SP), R8 // arg 5 fid
+ MOVL 36(SP), R9 // arg 6 offset
+ MOVL $477, AX
+ SYSCALL
+ RET
+
+TEXT runtime·munmap(SB),7,$0
+ MOVQ 8(SP), DI // arg 1 addr
+ MOVQ 16(SP), SI // arg 2 len
+ MOVL $73, AX
+ SYSCALL
+ JCC 2(PC)
+ CALL runtime·notok(SB)
+ RET
+
+TEXT runtime·notok(SB),7,$-8
+ MOVL $0xf1, BP
+ MOVQ BP, (BP)
+ RET
+
+TEXT runtime·sigaltstack(SB),7,$-8
+ MOVQ new+8(SP), DI
+ MOVQ old+16(SP), SI
+ MOVQ $53, AX
+ SYSCALL
+ JCC 2(PC)
+ CALL runtime·notok(SB)
+ RET
+
+// set tls base to DI
+TEXT runtime·settls(SB),7,$8
+ ADDQ $16, DI // adjust for ELF: wants to use -16(FS) and -8(FS) for g and m
+ MOVQ DI, 0(SP)
+ MOVQ SP, SI
+ MOVQ $129, DI // AMD64_SET_FSBASE
+ MOVQ $165, AX // sysarch
+ SYSCALL
+ JCC 2(PC)
+ CALL runtime·notok(SB)
+ RET
diff --git a/src/pkg/runtime/freebsd/defs.c b/src/pkg/runtime/freebsd/defs.c
new file mode 100644
index 000000000..2ce4fdc51
--- /dev/null
+++ b/src/pkg/runtime/freebsd/defs.c
@@ -0,0 +1,108 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+ * Input to godefs.
+ *
+ godefs -f -m64 defs.c >amd64/defs.h
+ godefs -f -m32 defs.c >386/defs.h
+ */
+
+#include <sys/types.h>
+#include <sys/time.h>
+#include <signal.h>
+#include <errno.h>
+#include <sys/mman.h>
+#include <sys/ucontext.h>
+#include <sys/umtx.h>
+#include <sys/rtprio.h>
+#include <sys/thr.h>
+#include <sys/_sigset.h>
+#include <sys/unistd.h>
+
+enum {
+ $PROT_NONE = PROT_NONE,
+ $PROT_READ = PROT_READ,
+ $PROT_WRITE = PROT_WRITE,
+ $PROT_EXEC = PROT_EXEC,
+
+ $MAP_ANON = MAP_ANON,
+ $MAP_PRIVATE = MAP_PRIVATE,
+ $MAP_FIXED = MAP_FIXED,
+
+ $SA_SIGINFO = SA_SIGINFO,
+ $SA_RESTART = SA_RESTART,
+ $SA_ONSTACK = SA_ONSTACK,
+
+ $UMTX_OP_WAIT = UMTX_OP_WAIT,
+ $UMTX_OP_WAKE = UMTX_OP_WAKE,
+
+ $EINTR = EINTR,
+
+ $SIGHUP = SIGHUP,
+ $SIGINT = SIGINT,
+ $SIGQUIT = SIGQUIT,
+ $SIGILL = SIGILL,
+ $SIGTRAP = SIGTRAP,
+ $SIGABRT = SIGABRT,
+ $SIGEMT = SIGEMT,
+ $SIGFPE = SIGFPE,
+ $SIGKILL = SIGKILL,
+ $SIGBUS = SIGBUS,
+ $SIGSEGV = SIGSEGV,
+ $SIGSYS = SIGSYS,
+ $SIGPIPE = SIGPIPE,
+ $SIGALRM = SIGALRM,
+ $SIGTERM = SIGTERM,
+ $SIGURG = SIGURG,
+ $SIGSTOP = SIGSTOP,
+ $SIGTSTP = SIGTSTP,
+ $SIGCONT = SIGCONT,
+ $SIGCHLD = SIGCHLD,
+ $SIGTTIN = SIGTTIN,
+ $SIGTTOU = SIGTTOU,
+ $SIGIO = SIGIO,
+ $SIGXCPU = SIGXCPU,
+ $SIGXFSZ = SIGXFSZ,
+ $SIGVTALRM = SIGVTALRM,
+ $SIGPROF = SIGPROF,
+ $SIGWINCH = SIGWINCH,
+ $SIGINFO = SIGINFO,
+ $SIGUSR1 = SIGUSR1,
+ $SIGUSR2 = SIGUSR2,
+
+ $FPE_INTDIV = FPE_INTDIV,
+ $FPE_INTOVF = FPE_INTOVF,
+ $FPE_FLTDIV = FPE_FLTDIV,
+ $FPE_FLTOVF = FPE_FLTOVF,
+ $FPE_FLTUND = FPE_FLTUND,
+ $FPE_FLTRES = FPE_FLTRES,
+ $FPE_FLTINV = FPE_FLTINV,
+ $FPE_FLTSUB = FPE_FLTSUB,
+
+ $BUS_ADRALN = BUS_ADRALN,
+ $BUS_ADRERR = BUS_ADRERR,
+ $BUS_OBJERR = BUS_OBJERR,
+
+ $SEGV_MAPERR = SEGV_MAPERR,
+ $SEGV_ACCERR = SEGV_ACCERR,
+
+ $ITIMER_REAL = ITIMER_REAL,
+ $ITIMER_VIRTUAL = ITIMER_VIRTUAL,
+ $ITIMER_PROF = ITIMER_PROF,
+};
+
+typedef struct rtprio $Rtprio;
+typedef struct thr_param $ThrParam;
+typedef struct sigaltstack $Sigaltstack;
+typedef struct __sigset $Sigset;
+typedef union sigval $Sigval;
+typedef stack_t $StackT;
+
+typedef siginfo_t $Siginfo;
+
+typedef mcontext_t $Mcontext;
+typedef ucontext_t $Ucontext;
+typedef struct timeval $Timeval;
+typedef struct itimerval $Itimerval;
diff --git a/src/pkg/runtime/freebsd/mem.c b/src/pkg/runtime/freebsd/mem.c
new file mode 100644
index 000000000..07abf2cfe
--- /dev/null
+++ b/src/pkg/runtime/freebsd/mem.c
@@ -0,0 +1,74 @@
+#include "runtime.h"
+#include "defs.h"
+#include "os.h"
+#include "malloc.h"
+
+void*
+runtime·SysAlloc(uintptr n)
+{
+ void *v;
+
+ mstats.sys += n;
+ v = runtime·mmap(nil, n, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_ANON|MAP_PRIVATE, -1, 0);
+ if(v < (void*)4096)
+ return nil;
+ return v;
+}
+
+void
+runtime·SysUnused(void *v, uintptr n)
+{
+ USED(v);
+ USED(n);
+ // TODO(rsc): call madvise MADV_DONTNEED
+}
+
+void
+runtime·SysFree(void *v, uintptr n)
+{
+ mstats.sys -= n;
+ runtime·munmap(v, n);
+}
+
+void*
+runtime·SysReserve(void *v, uintptr n)
+{
+ // On 64-bit, people with ulimit -v set complain if we reserve too
+ // much address space. Instead, assume that the reservation is okay
+ // and check the assumption in SysMap.
+ if(sizeof(void*) == 8)
+ return v;
+
+ return runtime·mmap(v, n, PROT_NONE, MAP_ANON|MAP_PRIVATE, -1, 0);
+}
+
+enum
+{
+ ENOMEM = 12,
+};
+
+void
+runtime·SysMap(void *v, uintptr n)
+{
+ void *p;
+
+ mstats.sys += n;
+
+ // On 64-bit, we don't actually have v reserved, so tread carefully.
+ if(sizeof(void*) == 8) {
+ p = runtime·mmap(v, n, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_ANON|MAP_PRIVATE, -1, 0);
+ if(p == (void*)-ENOMEM)
+ runtime·throw("runtime: out of memory");
+ if(p != v) {
+ runtime·printf("runtime: address space conflict: map(%p) = %p\n", v, p);
+ runtime·throw("runtime: address space conflict");
+ }
+ return;
+ }
+
+ p = runtime·mmap(v, n, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_ANON|MAP_FIXED|MAP_PRIVATE, -1, 0);
+ if(p == (void*)-ENOMEM)
+ runtime·throw("runtime: out of memory");
+ if(p != v)
+ runtime·throw("runtime: cannot map pages in arena address space");
+}
diff --git a/src/pkg/runtime/freebsd/os.h b/src/pkg/runtime/freebsd/os.h
new file mode 100644
index 000000000..007856c6b
--- /dev/null
+++ b/src/pkg/runtime/freebsd/os.h
@@ -0,0 +1,12 @@
+#define SIG_DFL ((void*)0)
+#define SIG_IGN ((void*)1)
+
+int32 runtime·thr_new(ThrParam*, int32);
+void runtime·sigpanic(void);
+void runtime·sigaltstack(Sigaltstack*, Sigaltstack*);
+struct sigaction;
+void runtime·sigaction(int32, struct sigaction*, struct sigaction*);
+void runtiem·setitimerval(int32, Itimerval*, Itimerval*);
+void runtime·setitimer(int32, Itimerval*, Itimerval*);
+
+void runtime·raisesigpipe(void);
diff --git a/src/pkg/runtime/freebsd/signals.h b/src/pkg/runtime/freebsd/signals.h
new file mode 100644
index 000000000..63a84671d
--- /dev/null
+++ b/src/pkg/runtime/freebsd/signals.h
@@ -0,0 +1,52 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#define C SigCatch
+#define I SigIgnore
+#define R SigRestart
+#define Q SigQueue
+#define P SigPanic
+
+SigTab runtime·sigtab[] = {
+ /* 0 */ 0, "SIGNONE: no trap",
+ /* 1 */ Q+R, "SIGHUP: terminal line hangup",
+ /* 2 */ Q+R, "SIGINT: interrupt",
+ /* 3 */ C, "SIGQUIT: quit",
+ /* 4 */ C, "SIGILL: illegal instruction",
+ /* 5 */ C, "SIGTRAP: trace trap",
+ /* 6 */ C, "SIGABRT: abort",
+ /* 7 */ C, "SIGEMT: EMT instruction",
+ /* 8 */ C+P, "SIGFPE: floating-point exception",
+ /* 9 */ 0, "SIGKILL: kill",
+ /* 10 */ C+P, "SIGBUS: bus error",
+ /* 11 */ C+P, "SIGSEGV: segmentation violation",
+ /* 12 */ C, "SIGSYS: bad system call",
+ /* 13 */ I, "SIGPIPE: write to broken pipe",
+ /* 14 */ Q+I+R, "SIGALRM: alarm clock",
+ /* 15 */ Q+R, "SIGTERM: termination",
+ /* 16 */ Q+I+R, "SIGURG: urgent condition on socket",
+ /* 17 */ 0, "SIGSTOP: stop, unblockable",
+ /* 18 */ Q+I+R, "SIGTSTP: stop from tty",
+ /* 19 */ 0, "SIGCONT: continue",
+ /* 20 */ Q+I+R, "SIGCHLD: child status has changed",
+ /* 21 */ Q+I+R, "SIGTTIN: background read from tty",
+ /* 22 */ Q+I+R, "SIGTTOU: background write to tty",
+ /* 23 */ Q+I+R, "SIGIO: i/o now possible",
+ /* 24 */ Q+I+R, "SIGXCPU: cpu limit exceeded",
+ /* 25 */ Q+I+R, "SIGXFSZ: file size limit exceeded",
+ /* 26 */ Q+I+R, "SIGVTALRM: virtual alarm clock",
+ /* 27 */ Q+I+R, "SIGPROF: profiling alarm clock",
+ /* 28 */ Q+I+R, "SIGWINCH: window size change",
+ /* 29 */ Q+I+R, "SIGINFO: information request",
+ /* 30 */ Q+I+R, "SIGUSR1: user-defined signal 1",
+ /* 31 */ Q+I+R, "SIGUSR2: user-defined signal 2",
+ /* 32 */ Q+I+R, "SIGTHR: reserved",
+};
+#undef C
+#undef I
+#undef R
+#undef Q
+#undef P
+
+#define NSIG 33
diff --git a/src/pkg/runtime/freebsd/thread.c b/src/pkg/runtime/freebsd/thread.c
new file mode 100644
index 000000000..f8c550f57
--- /dev/null
+++ b/src/pkg/runtime/freebsd/thread.c
@@ -0,0 +1,201 @@
+// Use of this source file is governed by a BSD-style
+// license that can be found in the LICENSE file.`
+
+#include "runtime.h"
+#include "defs.h"
+#include "os.h"
+#include "stack.h"
+
+extern SigTab runtime·sigtab[];
+extern int32 runtime·sys_umtx_op(uint32*, int32, uint32, void*, void*);
+
+// FreeBSD's umtx_op syscall is effectively the same as Linux's futex, and
+// thus the code is largely similar. See linux/thread.c for comments.
+
+static void
+umtx_wait(uint32 *addr, uint32 val)
+{
+ int32 ret;
+
+ ret = runtime·sys_umtx_op(addr, UMTX_OP_WAIT, val, nil, nil);
+ if(ret >= 0 || ret == -EINTR)
+ return;
+
+ runtime·printf("umtx_wait addr=%p val=%d ret=%d\n", addr, val, ret);
+ *(int32*)0x1005 = 0x1005;
+}
+
+static void
+umtx_wake(uint32 *addr)
+{
+ int32 ret;
+
+ ret = runtime·sys_umtx_op(addr, UMTX_OP_WAKE, 1, nil, nil);
+ if(ret >= 0)
+ return;
+
+ runtime·printf("umtx_wake addr=%p ret=%d\n", addr, ret);
+ *(int32*)0x1006 = 0x1006;
+}
+
+// See linux/thread.c for comments about the algorithm.
+static void
+umtx_lock(Lock *l)
+{
+ uint32 v;
+
+again:
+ v = l->key;
+ if((v&1) == 0){
+ if(runtime·cas(&l->key, v, v|1))
+ return;
+ goto again;
+ }
+
+ if(!runtime·cas(&l->key, v, v+2))
+ goto again;
+
+ umtx_wait(&l->key, v+2);
+
+ for(;;){
+ v = l->key;
+ if(v < 2)
+ runtime·throw("bad lock key");
+ if(runtime·cas(&l->key, v, v-2))
+ break;
+ }
+
+ goto again;
+}
+
+static void
+umtx_unlock(Lock *l)
+{
+ uint32 v;
+
+again:
+ v = l->key;
+ if((v&1) == 0)
+ runtime·throw("unlock of unlocked lock");
+ if(!runtime·cas(&l->key, v, v&~1))
+ goto again;
+
+ if(v&~1)
+ umtx_wake(&l->key);
+}
+
+void
+runtime·lock(Lock *l)
+{
+ if(m->locks < 0)
+ runtime·throw("lock count");
+ m->locks++;
+ umtx_lock(l);
+}
+
+void
+runtime·unlock(Lock *l)
+{
+ m->locks--;
+ if(m->locks < 0)
+ runtime·throw("lock count");
+ umtx_unlock(l);
+}
+
+// Event notifications.
+void
+runtime·noteclear(Note *n)
+{
+ n->lock.key = 0;
+ umtx_lock(&n->lock);
+}
+
+void
+runtime·notesleep(Note *n)
+{
+ umtx_lock(&n->lock);
+ umtx_unlock(&n->lock);
+}
+
+void
+runtime·notewakeup(Note *n)
+{
+ umtx_unlock(&n->lock);
+}
+
+void runtime·thr_start(void*);
+
+void
+runtime·newosproc(M *m, G *g, void *stk, void (*fn)(void))
+{
+ ThrParam param;
+
+ USED(fn); // thr_start assumes fn == mstart
+ USED(g); // thr_start assumes g == m->g0
+
+ if(0){
+ runtime·printf("newosproc stk=%p m=%p g=%p fn=%p id=%d/%d ostk=%p\n",
+ stk, m, g, fn, m->id, m->tls[0], &m);
+ }
+
+ runtime·memclr((byte*)&param, sizeof param);
+
+ param.start_func = runtime·thr_start;
+ param.arg = m;
+ param.stack_base = (int8*)g->stackbase;
+ param.stack_size = (byte*)stk - (byte*)g->stackbase;
+ param.child_tid = (intptr*)&m->procid;
+ param.parent_tid = nil;
+ param.tls_base = (int8*)&m->tls[0];
+ param.tls_size = sizeof m->tls;
+
+ m->tls[0] = m->id; // so 386 asm can find it
+
+ runtime·thr_new(&param, sizeof param);
+}
+
+void
+runtime·osinit(void)
+{
+}
+
+void
+runtime·goenvs(void)
+{
+ runtime·goenvs_unix();
+}
+
+// Called to initialize a new m (including the bootstrap m).
+void
+runtime·minit(void)
+{
+ // Initialize signal handling
+ m->gsignal = runtime·malg(32*1024);
+ runtime·signalstack(m->gsignal->stackguard - StackGuard, 32*1024);
+}
+
+void
+runtime·sigpanic(void)
+{
+ switch(g->sig) {
+ case SIGBUS:
+ if(g->sigcode0 == BUS_ADRERR && g->sigcode1 < 0x1000)
+ runtime·panicstring("invalid memory address or nil pointer dereference");
+ runtime·printf("unexpected fault address %p\n", g->sigcode1);
+ runtime·throw("fault");
+ case SIGSEGV:
+ if((g->sigcode0 == 0 || g->sigcode0 == SEGV_MAPERR || g->sigcode0 == SEGV_ACCERR) && g->sigcode1 < 0x1000)
+ runtime·panicstring("invalid memory address or nil pointer dereference");
+ runtime·printf("unexpected fault address %p\n", g->sigcode1);
+ runtime·throw("fault");
+ case SIGFPE:
+ switch(g->sigcode0) {
+ case FPE_INTDIV:
+ runtime·panicstring("integer divide by zero");
+ case FPE_INTOVF:
+ runtime·panicstring("integer overflow");
+ }
+ runtime·panicstring("floating point error");
+ }
+ runtime·panicstring(runtime·sigtab[g->sig].name);
+}
diff --git a/src/pkg/runtime/goc2c.c b/src/pkg/runtime/goc2c.c
new file mode 100644
index 000000000..61236e226
--- /dev/null
+++ b/src/pkg/runtime/goc2c.c
@@ -0,0 +1,727 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+ * Translate a .goc file into a .c file. A .goc file is a combination
+ * of a limited form of Go with C.
+ */
+
+/*
+ package PACKAGENAME
+ {# line}
+ func NAME([NAME TYPE { , NAME TYPE }]) [(NAME TYPE { , NAME TYPE })] \{
+ C code with proper brace nesting
+ \}
+*/
+
+/*
+ * We generate C code which implements the function such that it can
+ * be called from Go and executes the C code.
+ */
+
+#include <u.h>
+#include <stdio.h>
+#include <libc.h>
+
+/* Whether we're emitting for gcc */
+static int gcc;
+
+/* File and line number */
+static const char *file;
+static unsigned int lineno = 1;
+
+/* List of names and types. */
+struct params {
+ struct params *next;
+ char *name;
+ char *type;
+};
+
+/* index into type_table */
+enum {
+ Bool,
+ Float,
+ Int,
+ Uint,
+ Uintptr,
+ String,
+ Slice,
+ Eface,
+};
+
+static struct {
+ char *name;
+ int size;
+} type_table[] = {
+ /* variable sized first, for easy replacement */
+ /* order matches enum above */
+ /* default is 32-bit architecture sizes */
+ "bool", 1,
+ "float", 4,
+ "int", 4,
+ "uint", 4,
+ "uintptr", 4,
+ "String", 8,
+ "Slice", 12,
+ "Eface", 8,
+
+ /* fixed size */
+ "float32", 4,
+ "float64", 8,
+ "byte", 1,
+ "int8", 1,
+ "uint8", 1,
+ "int16", 2,
+ "uint16", 2,
+ "int32", 4,
+ "uint32", 4,
+ "int64", 8,
+ "uint64", 8,
+
+ NULL,
+};
+
+/* Fixed structure alignment (non-gcc only) */
+int structround = 4;
+
+/* Unexpected EOF. */
+static void
+bad_eof(void)
+{
+ sysfatal("%s:%ud: unexpected EOF\n", file, lineno);
+}
+
+/* Out of memory. */
+static void
+bad_mem(void)
+{
+ sysfatal("%s:%ud: out of memory\n", file, lineno);
+}
+
+/* Allocate memory without fail. */
+static void *
+xmalloc(unsigned int size)
+{
+ void *ret = malloc(size);
+ if (ret == NULL)
+ bad_mem();
+ return ret;
+}
+
+/* Reallocate memory without fail. */
+static void*
+xrealloc(void *buf, unsigned int size)
+{
+ void *ret = realloc(buf, size);
+ if (ret == NULL)
+ bad_mem();
+ return ret;
+}
+
+/* Free a list of parameters. */
+static void
+free_params(struct params *p)
+{
+ while (p != NULL) {
+ struct params *next;
+
+ next = p->next;
+ free(p->name);
+ free(p->type);
+ free(p);
+ p = next;
+ }
+}
+
+/* Read a character, tracking lineno. */
+static int
+getchar_update_lineno(void)
+{
+ int c;
+
+ c = getchar();
+ if (c == '\n')
+ ++lineno;
+ return c;
+}
+
+/* Read a character, giving an error on EOF, tracking lineno. */
+static int
+getchar_no_eof(void)
+{
+ int c;
+
+ c = getchar_update_lineno();
+ if (c == EOF)
+ bad_eof();
+ return c;
+}
+
+/* Read a character, skipping comments. */
+static int
+getchar_skipping_comments(void)
+{
+ int c;
+
+ while (1) {
+ c = getchar_update_lineno();
+ if (c != '/')
+ return c;
+
+ c = getchar();
+ if (c == '/') {
+ do {
+ c = getchar_update_lineno();
+ } while (c != EOF && c != '\n');
+ return c;
+ } else if (c == '*') {
+ while (1) {
+ c = getchar_update_lineno();
+ if (c == EOF)
+ return EOF;
+ if (c == '*') {
+ do {
+ c = getchar_update_lineno();
+ } while (c == '*');
+ if (c == '/')
+ break;
+ }
+ }
+ } else {
+ ungetc(c, stdin);
+ return '/';
+ }
+ }
+}
+
+/*
+ * Read and return a token. Tokens are delimited by whitespace or by
+ * [(),{}]. The latter are all returned as single characters.
+ */
+static char *
+read_token(void)
+{
+ int c;
+ char *buf;
+ unsigned int alc, off;
+ const char* delims = "(),{}";
+
+ while (1) {
+ c = getchar_skipping_comments();
+ if (c == EOF)
+ return NULL;
+ if (!isspace(c))
+ break;
+ }
+ alc = 16;
+ buf = xmalloc(alc + 1);
+ off = 0;
+ if (strchr(delims, c) != NULL) {
+ buf[off] = c;
+ ++off;
+ } else {
+ while (1) {
+ if (off >= alc) {
+ alc *= 2;
+ buf = xrealloc(buf, alc + 1);
+ }
+ buf[off] = c;
+ ++off;
+ c = getchar_skipping_comments();
+ if (c == EOF)
+ break;
+ if (isspace(c) || strchr(delims, c) != NULL) {
+ if (c == '\n')
+ lineno--;
+ ungetc(c, stdin);
+ break;
+ }
+ }
+ }
+ buf[off] = '\0';
+ return buf;
+}
+
+/* Read a token, giving an error on EOF. */
+static char *
+read_token_no_eof(void)
+{
+ char *token = read_token();
+ if (token == NULL)
+ bad_eof();
+ return token;
+}
+
+/* Read the package clause, and return the package name. */
+static char *
+read_package(void)
+{
+ char *token;
+
+ token = read_token_no_eof();
+ if (token == nil)
+ sysfatal("%s:%ud: no token\n", file, lineno);
+ if (strcmp(token, "package") != 0) {
+ sysfatal("%s:%ud: expected \"package\", got \"%s\"\n",
+ file, lineno, token);
+ }
+ return read_token_no_eof();
+}
+
+/* Read and copy preprocessor lines. */
+static void
+read_preprocessor_lines(void)
+{
+ while (1) {
+ int c;
+
+ do {
+ c = getchar_skipping_comments();
+ } while (isspace(c));
+ if (c != '#') {
+ ungetc(c, stdin);
+ break;
+ }
+ putchar(c);
+ do {
+ c = getchar_update_lineno();
+ putchar(c);
+ } while (c != '\n');
+ }
+}
+
+/*
+ * Read a type in Go syntax and return a type in C syntax. We only
+ * permit basic types and pointers.
+ */
+static char *
+read_type(void)
+{
+ char *p, *op, *q;
+ int pointer_count;
+ unsigned int len;
+
+ p = read_token_no_eof();
+ if (*p != '*')
+ return p;
+ op = p;
+ pointer_count = 0;
+ while (*p == '*') {
+ ++pointer_count;
+ ++p;
+ }
+ len = strlen(p);
+ q = xmalloc(len + pointer_count + 1);
+ memcpy(q, p, len);
+ while (pointer_count > 0) {
+ q[len] = '*';
+ ++len;
+ --pointer_count;
+ }
+ q[len] = '\0';
+ free(op);
+ return q;
+}
+
+/* Return the size of the given type. */
+static int
+type_size(char *p)
+{
+ int i;
+
+ if(p[strlen(p)-1] == '*')
+ return type_table[Uintptr].size;
+
+ for(i=0; type_table[i].name; i++)
+ if(strcmp(type_table[i].name, p) == 0)
+ return type_table[i].size;
+ sysfatal("%s:%ud: unknown type %s\n", file, lineno, p);
+ return 0;
+}
+
+/*
+ * Read a list of parameters. Each parameter is a name and a type.
+ * The list ends with a ')'. We have already read the '('.
+ */
+static struct params *
+read_params(int *poffset)
+{
+ char *token;
+ struct params *ret, **pp, *p;
+ int offset, size, rnd;
+
+ ret = NULL;
+ pp = &ret;
+ token = read_token_no_eof();
+ offset = 0;
+ if (strcmp(token, ")") != 0) {
+ while (1) {
+ p = xmalloc(sizeof(struct params));
+ p->name = token;
+ p->type = read_type();
+ p->next = NULL;
+ *pp = p;
+ pp = &p->next;
+
+ size = type_size(p->type);
+ rnd = size;
+ if(rnd > structround)
+ rnd = structround;
+ if(offset%rnd)
+ offset += rnd - offset%rnd;
+ offset += size;
+
+ token = read_token_no_eof();
+ if (strcmp(token, ",") != 0)
+ break;
+ token = read_token_no_eof();
+ }
+ }
+ if (strcmp(token, ")") != 0) {
+ sysfatal("%s:%ud: expected '('\n",
+ file, lineno);
+ }
+ if (poffset != NULL)
+ *poffset = offset;
+ return ret;
+}
+
+/*
+ * Read a function header. This reads up to and including the initial
+ * '{' character. Returns 1 if it read a header, 0 at EOF.
+ */
+static int
+read_func_header(char **name, struct params **params, int *paramwid, struct params **rets)
+{
+ int lastline;
+ char *token;
+
+ lastline = -1;
+ while (1) {
+ token = read_token();
+ if (token == NULL)
+ return 0;
+ if (strcmp(token, "func") == 0) {
+ if(lastline != -1)
+ printf("\n");
+ break;
+ }
+ if (lastline != lineno) {
+ if (lastline == lineno-1)
+ printf("\n");
+ else
+ printf("\n#line %d \"%s\"\n", lineno, file);
+ lastline = lineno;
+ }
+ printf("%s ", token);
+ }
+
+ *name = read_token_no_eof();
+
+ token = read_token();
+ if (token == NULL || strcmp(token, "(") != 0) {
+ sysfatal("%s:%ud: expected \"(\"\n",
+ file, lineno);
+ }
+ *params = read_params(paramwid);
+
+ token = read_token();
+ if (token == NULL || strcmp(token, "(") != 0)
+ *rets = NULL;
+ else {
+ *rets = read_params(NULL);
+ token = read_token();
+ }
+ if (token == NULL || strcmp(token, "{") != 0) {
+ sysfatal("%s:%ud: expected \"{\"\n",
+ file, lineno);
+ }
+ return 1;
+}
+
+/* Write out parameters. */
+static void
+write_params(struct params *params, int *first)
+{
+ struct params *p;
+
+ for (p = params; p != NULL; p = p->next) {
+ if (*first)
+ *first = 0;
+ else
+ printf(", ");
+ printf("%s %s", p->type, p->name);
+ }
+}
+
+/* Write a 6g function header. */
+static void
+write_6g_func_header(char *package, char *name, struct params *params,
+ int paramwid, struct params *rets)
+{
+ int first, n;
+
+ printf("void\n%s·%s(", package, name);
+ first = 1;
+ write_params(params, &first);
+
+ /* insert padding to align output struct */
+ if(rets != NULL && paramwid%structround != 0) {
+ n = structround - paramwid%structround;
+ if(n & 1)
+ printf(", uint8");
+ if(n & 2)
+ printf(", uint16");
+ if(n & 4)
+ printf(", uint32");
+ }
+
+ write_params(rets, &first);
+ printf(")\n{\n");
+}
+
+/* Write a 6g function trailer. */
+static void
+write_6g_func_trailer(struct params *rets)
+{
+ struct params *p;
+
+ for (p = rets; p != NULL; p = p->next)
+ printf("\tFLUSH(&%s);\n", p->name);
+ printf("}\n");
+}
+
+/* Define the gcc function return type if necessary. */
+static void
+define_gcc_return_type(char *package, char *name, struct params *rets)
+{
+ struct params *p;
+
+ if (rets == NULL || rets->next == NULL)
+ return;
+ printf("struct %s_%s_ret {\n", package, name);
+ for (p = rets; p != NULL; p = p->next)
+ printf(" %s %s;\n", p->type, p->name);
+ printf("};\n");
+}
+
+/* Write out the gcc function return type. */
+static void
+write_gcc_return_type(char *package, char *name, struct params *rets)
+{
+ if (rets == NULL)
+ printf("void");
+ else if (rets->next == NULL)
+ printf("%s", rets->type);
+ else
+ printf("struct %s_%s_ret", package, name);
+}
+
+/* Write out a gcc function header. */
+static void
+write_gcc_func_header(char *package, char *name, struct params *params,
+ struct params *rets)
+{
+ int first;
+ struct params *p;
+
+ define_gcc_return_type(package, name, rets);
+ write_gcc_return_type(package, name, rets);
+ printf(" %s_%s(", package, name);
+ first = 1;
+ write_params(params, &first);
+ printf(") asm (\"%s.%s\");\n", package, name);
+ write_gcc_return_type(package, name, rets);
+ printf(" %s_%s(", package, name);
+ first = 1;
+ write_params(params, &first);
+ printf(")\n{\n");
+ for (p = rets; p != NULL; p = p->next)
+ printf(" %s %s;\n", p->type, p->name);
+}
+
+/* Write out a gcc function trailer. */
+static void
+write_gcc_func_trailer(char *package, char *name, struct params *rets)
+{
+ if (rets == NULL)
+ ;
+ else if (rets->next == NULL)
+ printf("return %s;\n", rets->name);
+ else {
+ struct params *p;
+
+ printf(" {\n struct %s_%s_ret __ret;\n", package, name);
+ for (p = rets; p != NULL; p = p->next)
+ printf(" __ret.%s = %s;\n", p->name, p->name);
+ printf(" return __ret;\n }\n");
+ }
+ printf("}\n");
+}
+
+/* Write out a function header. */
+static void
+write_func_header(char *package, char *name,
+ struct params *params, int paramwid,
+ struct params *rets)
+{
+ if (gcc)
+ write_gcc_func_header(package, name, params, rets);
+ else
+ write_6g_func_header(package, name, params, paramwid, rets);
+ printf("#line %d \"%s\"\n", lineno, file);
+}
+
+/* Write out a function trailer. */
+static void
+write_func_trailer(char *package, char *name,
+ struct params *rets)
+{
+ if (gcc)
+ write_gcc_func_trailer(package, name, rets);
+ else
+ write_6g_func_trailer(rets);
+}
+
+/*
+ * Read and write the body of the function, ending in an unnested }
+ * (which is read but not written).
+ */
+static void
+copy_body(void)
+{
+ int nesting = 0;
+ while (1) {
+ int c;
+
+ c = getchar_no_eof();
+ if (c == '}' && nesting == 0)
+ return;
+ putchar(c);
+ switch (c) {
+ default:
+ break;
+ case '{':
+ ++nesting;
+ break;
+ case '}':
+ --nesting;
+ break;
+ case '/':
+ c = getchar_update_lineno();
+ putchar(c);
+ if (c == '/') {
+ do {
+ c = getchar_no_eof();
+ putchar(c);
+ } while (c != '\n');
+ } else if (c == '*') {
+ while (1) {
+ c = getchar_no_eof();
+ putchar(c);
+ if (c == '*') {
+ do {
+ c = getchar_no_eof();
+ putchar(c);
+ } while (c == '*');
+ if (c == '/')
+ break;
+ }
+ }
+ }
+ break;
+ case '"':
+ case '\'':
+ {
+ int delim = c;
+ do {
+ c = getchar_no_eof();
+ putchar(c);
+ if (c == '\\') {
+ c = getchar_no_eof();
+ putchar(c);
+ c = '\0';
+ }
+ } while (c != delim);
+ }
+ break;
+ }
+ }
+}
+
+/* Process the entire file. */
+static void
+process_file(void)
+{
+ char *package, *name;
+ struct params *params, *rets;
+ int paramwid;
+
+ package = read_package();
+ read_preprocessor_lines();
+ while (read_func_header(&name, &params, &paramwid, &rets)) {
+ write_func_header(package, name, params, paramwid, rets);
+ copy_body();
+ write_func_trailer(package, name, rets);
+ free(name);
+ free_params(params);
+ free_params(rets);
+ }
+ free(package);
+}
+
+static void
+usage(void)
+{
+ sysfatal("Usage: goc2c [--6g | --gc] [file]\n");
+}
+
+void
+main(int argc, char **argv)
+{
+ char *goarch;
+
+ argv0 = argv[0];
+ while(argc > 1 && argv[1][0] == '-') {
+ if(strcmp(argv[1], "-") == 0)
+ break;
+ if(strcmp(argv[1], "--6g") == 0)
+ gcc = 0;
+ else if(strcmp(argv[1], "--gcc") == 0)
+ gcc = 1;
+ else
+ usage();
+ argc--;
+ argv++;
+ }
+
+ if(argc <= 1 || strcmp(argv[1], "-") == 0) {
+ file = "<stdin>";
+ process_file();
+ exits(0);
+ }
+
+ if(argc > 2)
+ usage();
+
+ file = argv[1];
+ if(freopen(file, "r", stdin) == 0) {
+ sysfatal("open %s: %r\n", file);
+ }
+
+ if(!gcc) {
+ // 6g etc; update size table
+ goarch = getenv("GOARCH");
+ if(goarch != NULL && strcmp(goarch, "amd64") == 0) {
+ type_table[Uintptr].size = 8;
+ type_table[String].size = 16;
+ type_table[Slice].size = 8+4+4;
+ type_table[Eface].size = 8+8;
+ structround = 8;
+ }
+ }
+
+ process_file();
+ exits(0);
+}
diff --git a/src/pkg/runtime/hashmap.c b/src/pkg/runtime/hashmap.c
new file mode 100644
index 000000000..0c0e3e4a2
--- /dev/null
+++ b/src/pkg/runtime/hashmap.c
@@ -0,0 +1,1180 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "runtime.h"
+#include "hashmap.h"
+#include "type.h"
+
+/* Return a pointer to the struct/union of type "type"
+ whose "field" field is addressed by pointer "p". */
+
+struct Hmap { /* a hash table; initialize with hash_init() */
+ uint32 count; /* elements in table - must be first */
+
+ uint8 datasize; /* amount of data to store in entry */
+ uint8 max_power; /* max power of 2 to create sub-tables */
+ uint8 max_probes; /* max entries to probe before rehashing */
+ uint8 indirectval; /* storing pointers to values */
+ int32 changes; /* inc'ed whenever a subtable is created/grown */
+ hash_hash_t (*data_hash) (uint32, void *a); /* return hash of *a */
+ uint32 (*data_eq) (uint32, void *a, void *b); /* return whether *a == *b */
+ void (*data_del) (uint32, void *arg, void *data); /* invoked on deletion */
+ struct hash_subtable *st; /* first-level table */
+
+ uint32 keysize;
+ uint32 valsize;
+ uint32 datavo;
+
+ // three sets of offsets: the digit counts how many
+ // of key, value are passed as inputs:
+ // 0 = func() (key, value)
+ // 1 = func(key) (value)
+ // 2 = func(key, value)
+ uint32 ko0;
+ uint32 vo0;
+ uint32 ko1;
+ uint32 vo1;
+ uint32 po1;
+ uint32 ko2;
+ uint32 vo2;
+ uint32 po2;
+ Alg* keyalg;
+ Alg* valalg;
+};
+
+struct hash_entry {
+ hash_hash_t hash; /* hash value of data */
+ byte data[1]; /* user data has "datasize" bytes */
+};
+
+struct hash_subtable {
+ uint8 power; /* bits used to index this table */
+ uint8 used; /* bits in hash used before reaching this table */
+ uint8 datasize; /* bytes of client data in an entry */
+ uint8 max_probes; /* max number of probes when searching */
+ int16 limit_bytes; /* max_probes * (datasize+sizeof (hash_hash_t)) */
+ struct hash_entry *end; /* points just past end of entry[] */
+ struct hash_entry entry[1]; /* 2**power+max_probes-1 elements of elemsize bytes */
+};
+
+#define HASH_DATA_EQ(h,x,y) ((*h->data_eq) (h->keysize, (x), (y)))
+
+#define HASH_REHASH 0x2 /* an internal flag */
+/* the number of bits used is stored in the flags word too */
+#define HASH_USED(x) ((x) >> 2)
+#define HASH_MAKE_USED(x) ((x) << 2)
+
+#define HASH_LOW 6
+#define HASH_ONE (((hash_hash_t)1) << HASH_LOW)
+#define HASH_MASK (HASH_ONE - 1)
+#define HASH_ADJUST(x) (((x) < HASH_ONE) << HASH_LOW)
+
+#define HASH_BITS (sizeof (hash_hash_t) * 8)
+
+#define HASH_SUBHASH HASH_MASK
+#define HASH_NIL 0
+#define HASH_NIL_MEMSET 0
+
+#define HASH_OFFSET(base, byte_offset) \
+ ((struct hash_entry *) (((byte *) (base)) + (byte_offset)))
+
+
+/* return a hash layer with 2**power empty entries */
+static struct hash_subtable *
+hash_subtable_new (Hmap *h, int32 power, int32 used)
+{
+ int32 elemsize = h->datasize + offsetof (struct hash_entry, data[0]);
+ int32 bytes = elemsize << power;
+ struct hash_subtable *st;
+ int32 limit_bytes = h->max_probes * elemsize;
+ int32 max_probes = h->max_probes;
+
+ if (bytes < limit_bytes) {
+ limit_bytes = bytes;
+ max_probes = 1 << power;
+ }
+ bytes += limit_bytes - elemsize;
+ st = malloc (offsetof (struct hash_subtable, entry[0]) + bytes);
+ st->power = power;
+ st->used = used;
+ st->datasize = h->datasize;
+ st->max_probes = max_probes;
+ st->limit_bytes = limit_bytes;
+ st->end = HASH_OFFSET (st->entry, bytes);
+ memset (st->entry, HASH_NIL_MEMSET, bytes);
+ return (st);
+}
+
+static void
+init_sizes (int64 hint, int32 *init_power, int32 *max_power)
+{
+ int32 log = 0;
+ int32 i;
+
+ for (i = 32; i != 0; i >>= 1) {
+ if ((hint >> (log + i)) != 0) {
+ log += i;
+ }
+ }
+ log += 1 + (((hint << 3) >> log) >= 11); /* round up for utilization */
+ if (log <= 14) {
+ *init_power = log;
+ } else {
+ *init_power = 12;
+ }
+ *max_power = 12;
+}
+
+static void
+hash_init (Hmap *h,
+ int32 datasize,
+ hash_hash_t (*data_hash) (uint32, void *),
+ uint32 (*data_eq) (uint32, void *, void *),
+ void (*data_del) (uint32, void *, void *),
+ int64 hint)
+{
+ int32 init_power;
+ int32 max_power;
+
+ if(datasize < sizeof (void *))
+ datasize = sizeof (void *);
+ datasize = runtime·rnd(datasize, sizeof (void *));
+ init_sizes (hint, &init_power, &max_power);
+ h->datasize = datasize;
+ h->max_power = max_power;
+ h->max_probes = 15;
+ assert (h->datasize == datasize);
+ assert (h->max_power == max_power);
+ assert (sizeof (void *) <= h->datasize || h->max_power == 255);
+ h->count = 0;
+ h->changes = 0;
+ h->data_hash = data_hash;
+ h->data_eq = data_eq;
+ h->data_del = data_del;
+ h->st = hash_subtable_new (h, init_power, 0);
+}
+
+static void
+hash_remove_n (struct hash_subtable *st, struct hash_entry *dst_e, int32 n)
+{
+ int32 elemsize = st->datasize + offsetof (struct hash_entry, data[0]);
+ struct hash_entry *src_e = HASH_OFFSET (dst_e, n * elemsize);
+ struct hash_entry *end_e = st->end;
+ int32 shift = HASH_BITS - (st->power + st->used);
+ int32 index_mask = (((hash_hash_t)1) << st->power) - 1;
+ int32 dst_i = (((byte *) dst_e) - ((byte *) st->entry)) / elemsize;
+ int32 src_i = dst_i + n;
+ hash_hash_t hash;
+ int32 skip;
+ int32 bytes;
+
+ while (dst_e != src_e) {
+ if (src_e != end_e) {
+ struct hash_entry *cp_e = src_e;
+ int32 save_dst_i = dst_i;
+ while (cp_e != end_e && (hash = cp_e->hash) != HASH_NIL &&
+ ((hash >> shift) & index_mask) <= dst_i) {
+ cp_e = HASH_OFFSET (cp_e, elemsize);
+ dst_i++;
+ }
+ bytes = ((byte *) cp_e) - (byte *) src_e;
+ memmove (dst_e, src_e, bytes);
+ dst_e = HASH_OFFSET (dst_e, bytes);
+ src_e = cp_e;
+ src_i += dst_i - save_dst_i;
+ if (src_e != end_e && (hash = src_e->hash) != HASH_NIL) {
+ skip = ((hash >> shift) & index_mask) - dst_i;
+ } else {
+ skip = src_i - dst_i;
+ }
+ } else {
+ skip = src_i - dst_i;
+ }
+ bytes = skip * elemsize;
+ memset (dst_e, HASH_NIL_MEMSET, bytes);
+ dst_e = HASH_OFFSET (dst_e, bytes);
+ dst_i += skip;
+ }
+}
+
+static int32
+hash_insert_internal (struct hash_subtable **pst, int32 flags, hash_hash_t hash,
+ Hmap *h, void *data, void **pres);
+
+static void
+hash_conv (Hmap *h,
+ struct hash_subtable *st, int32 flags,
+ hash_hash_t hash,
+ struct hash_entry *e)
+{
+ int32 new_flags = (flags + HASH_MAKE_USED (st->power)) | HASH_REHASH;
+ int32 shift = HASH_BITS - HASH_USED (new_flags);
+ hash_hash_t prefix_mask = (-(hash_hash_t)1) << shift;
+ int32 elemsize = h->datasize + offsetof (struct hash_entry, data[0]);
+ void *dummy_result;
+ struct hash_entry *de;
+ int32 index_mask = (1 << st->power) - 1;
+ hash_hash_t e_hash;
+ struct hash_entry *pe = HASH_OFFSET (e, -elemsize);
+
+ while (e != st->entry && (e_hash = pe->hash) != HASH_NIL && (e_hash & HASH_MASK) != HASH_SUBHASH) {
+ e = pe;
+ pe = HASH_OFFSET (pe, -elemsize);
+ }
+
+ de = e;
+ while (e != st->end &&
+ (e_hash = e->hash) != HASH_NIL &&
+ (e_hash & HASH_MASK) != HASH_SUBHASH) {
+ struct hash_entry *target_e = HASH_OFFSET (st->entry, ((e_hash >> shift) & index_mask) * elemsize);
+ struct hash_entry *ne = HASH_OFFSET (e, elemsize);
+ hash_hash_t current = e_hash & prefix_mask;
+ if (de < target_e) {
+ memset (de, HASH_NIL_MEMSET, ((byte *) target_e) - (byte *) de);
+ de = target_e;
+ }
+ if ((hash & prefix_mask) == current ||
+ (ne != st->end && (e_hash = ne->hash) != HASH_NIL &&
+ (e_hash & prefix_mask) == current)) {
+ struct hash_subtable *new_st = hash_subtable_new (h, 1, HASH_USED (new_flags));
+ int32 rc = hash_insert_internal (&new_st, new_flags, e->hash, h, e->data, &dummy_result);
+ assert (rc == 0);
+ memcpy(dummy_result, e->data, h->datasize);
+ e = ne;
+ while (e != st->end && (e_hash = e->hash) != HASH_NIL && (e_hash & prefix_mask) == current) {
+ assert ((e_hash & HASH_MASK) != HASH_SUBHASH);
+ rc = hash_insert_internal (&new_st, new_flags, e_hash, h, e->data, &dummy_result);
+ assert (rc == 0);
+ memcpy(dummy_result, e->data, h->datasize);
+ e = HASH_OFFSET (e, elemsize);
+ }
+ memset (de->data, HASH_NIL_MEMSET, h->datasize);
+ *(struct hash_subtable **)de->data = new_st;
+ de->hash = current | HASH_SUBHASH;
+ } else {
+ if (e != de) {
+ memcpy (de, e, elemsize);
+ }
+ e = HASH_OFFSET (e, elemsize);
+ }
+ de = HASH_OFFSET (de, elemsize);
+ }
+ if (e != de) {
+ hash_remove_n (st, de, (((byte *) e) - (byte *) de) / elemsize);
+ }
+}
+
+static void
+hash_grow (Hmap *h, struct hash_subtable **pst, int32 flags)
+{
+ struct hash_subtable *old_st = *pst;
+ int32 elemsize = h->datasize + offsetof (struct hash_entry, data[0]);
+ *pst = hash_subtable_new (h, old_st->power + 1, HASH_USED (flags));
+ struct hash_entry *end_e = old_st->end;
+ struct hash_entry *e;
+ void *dummy_result;
+ int32 used = 0;
+
+ flags |= HASH_REHASH;
+ for (e = old_st->entry; e != end_e; e = HASH_OFFSET (e, elemsize)) {
+ hash_hash_t hash = e->hash;
+ if (hash != HASH_NIL) {
+ int32 rc = hash_insert_internal (pst, flags, e->hash, h, e->data, &dummy_result);
+ assert (rc == 0);
+ memcpy(dummy_result, e->data, h->datasize);
+ used++;
+ }
+ }
+ free (old_st);
+}
+
+static int32
+hash_lookup (Hmap *h, void *data, void **pres)
+{
+ int32 elemsize = h->datasize + offsetof (struct hash_entry, data[0]);
+ hash_hash_t hash = (*h->data_hash) (h->keysize, data) & ~HASH_MASK;
+ struct hash_subtable *st = h->st;
+ int32 used = 0;
+ hash_hash_t e_hash;
+ struct hash_entry *e;
+ struct hash_entry *end_e;
+
+ hash += HASH_ADJUST (hash);
+ for (;;) {
+ int32 shift = HASH_BITS - (st->power + used);
+ int32 index_mask = (1 << st->power) - 1;
+ int32 i = (hash >> shift) & index_mask; /* i is the natural position of hash */
+
+ e = HASH_OFFSET (st->entry, i * elemsize); /* e points to element i */
+ e_hash = e->hash;
+ if ((e_hash & HASH_MASK) != HASH_SUBHASH) { /* a subtable */
+ break;
+ }
+ used += st->power;
+ st = *(struct hash_subtable **)e->data;
+ }
+ end_e = HASH_OFFSET (e, st->limit_bytes);
+ while (e != end_e && (e_hash = e->hash) != HASH_NIL && e_hash < hash) {
+ e = HASH_OFFSET (e, elemsize);
+ }
+ while (e != end_e && ((e_hash = e->hash) ^ hash) < HASH_SUBHASH) {
+ if (HASH_DATA_EQ (h, data, e->data)) { /* a match */
+ *pres = e->data;
+ return (1);
+ }
+ e = HASH_OFFSET (e, elemsize);
+ }
+ USED(e_hash);
+ *pres = 0;
+ return (0);
+}
+
+static int32
+hash_remove (Hmap *h, void *data, void *arg)
+{
+ int32 elemsize = h->datasize + offsetof (struct hash_entry, data[0]);
+ hash_hash_t hash = (*h->data_hash) (h->keysize, data) & ~HASH_MASK;
+ struct hash_subtable *st = h->st;
+ int32 used = 0;
+ hash_hash_t e_hash;
+ struct hash_entry *e;
+ struct hash_entry *end_e;
+
+ hash += HASH_ADJUST (hash);
+ for (;;) {
+ int32 shift = HASH_BITS - (st->power + used);
+ int32 index_mask = (1 << st->power) - 1;
+ int32 i = (hash >> shift) & index_mask; /* i is the natural position of hash */
+
+ e = HASH_OFFSET (st->entry, i * elemsize); /* e points to element i */
+ e_hash = e->hash;
+ if ((e_hash & HASH_MASK) != HASH_SUBHASH) { /* a subtable */
+ break;
+ }
+ used += st->power;
+ st = *(struct hash_subtable **)e->data;
+ }
+ end_e = HASH_OFFSET (e, st->limit_bytes);
+ while (e != end_e && (e_hash = e->hash) != HASH_NIL && e_hash < hash) {
+ e = HASH_OFFSET (e, elemsize);
+ }
+ while (e != end_e && ((e_hash = e->hash) ^ hash) < HASH_SUBHASH) {
+ if (HASH_DATA_EQ (h, data, e->data)) { /* a match */
+ (*h->data_del) (h->datavo, arg, e->data);
+ hash_remove_n (st, e, 1);
+ h->count--;
+ return (1);
+ }
+ e = HASH_OFFSET (e, elemsize);
+ }
+ USED(e_hash);
+ return (0);
+}
+
+static int32
+hash_insert_internal (struct hash_subtable **pst, int32 flags, hash_hash_t hash,
+ Hmap *h, void *data, void **pres)
+{
+ int32 elemsize = h->datasize + offsetof (struct hash_entry, data[0]);
+
+ if ((flags & HASH_REHASH) == 0) {
+ hash += HASH_ADJUST (hash);
+ hash &= ~HASH_MASK;
+ }
+ for (;;) {
+ struct hash_subtable *st = *pst;
+ int32 shift = HASH_BITS - (st->power + HASH_USED (flags));
+ int32 index_mask = (1 << st->power) - 1;
+ int32 i = (hash >> shift) & index_mask; /* i is the natural position of hash */
+ struct hash_entry *start_e =
+ HASH_OFFSET (st->entry, i * elemsize); /* start_e is the pointer to element i */
+ struct hash_entry *e = start_e; /* e is going to range over [start_e, end_e) */
+ struct hash_entry *end_e;
+ hash_hash_t e_hash = e->hash;
+
+ if ((e_hash & HASH_MASK) == HASH_SUBHASH) { /* a subtable */
+ pst = (struct hash_subtable **) e->data;
+ flags += HASH_MAKE_USED (st->power);
+ continue;
+ }
+ end_e = HASH_OFFSET (start_e, st->limit_bytes);
+ while (e != end_e && (e_hash = e->hash) != HASH_NIL && e_hash < hash) {
+ e = HASH_OFFSET (e, elemsize);
+ i++;
+ }
+ if (e != end_e && e_hash != HASH_NIL) {
+ /* ins_e ranges over the elements that may match */
+ struct hash_entry *ins_e = e;
+ int32 ins_i = i;
+ hash_hash_t ins_e_hash;
+ while (ins_e != end_e && ((e_hash = ins_e->hash) ^ hash) < HASH_SUBHASH) {
+ if (HASH_DATA_EQ (h, data, ins_e->data)) { /* a match */
+ *pres = ins_e->data;
+ return (1);
+ }
+ assert (e_hash != hash || (flags & HASH_REHASH) == 0);
+ hash += (e_hash == hash); /* adjust hash if it collides */
+ ins_e = HASH_OFFSET (ins_e, elemsize);
+ ins_i++;
+ if (e_hash <= hash) { /* set e to insertion point */
+ e = ins_e;
+ i = ins_i;
+ }
+ }
+ /* set ins_e to the insertion point for the new element */
+ ins_e = e;
+ ins_i = i;
+ ins_e_hash = 0;
+ /* move ins_e to point at the end of the contiguous block, but
+ stop if any element can't be moved by one up */
+ while (ins_e != st->end && (ins_e_hash = ins_e->hash) != HASH_NIL &&
+ ins_i + 1 - ((ins_e_hash >> shift) & index_mask) < st->max_probes &&
+ (ins_e_hash & HASH_MASK) != HASH_SUBHASH) {
+ ins_e = HASH_OFFSET (ins_e, elemsize);
+ ins_i++;
+ }
+ if (e == end_e || ins_e == st->end || ins_e_hash != HASH_NIL) {
+ e = end_e; /* can't insert; must grow or convert to subtable */
+ } else { /* make space for element */
+ memmove (HASH_OFFSET (e, elemsize), e, ((byte *) ins_e) - (byte *) e);
+ }
+ }
+ if (e != end_e) {
+ e->hash = hash;
+ *pres = e->data;
+ return (0);
+ }
+ h->changes++;
+ if (st->power < h->max_power) {
+ hash_grow (h, pst, flags);
+ } else {
+ hash_conv (h, st, flags, hash, start_e);
+ }
+ }
+}
+
+static int32
+hash_insert (Hmap *h, void *data, void **pres)
+{
+ int32 rc = hash_insert_internal (&h->st, 0, (*h->data_hash) (h->keysize, data), h, data, pres);
+
+ h->count += (rc == 0); /* increment count if element didn't previously exist */
+ return (rc);
+}
+
+static uint32
+hash_count (Hmap *h)
+{
+ return (h->count);
+}
+
+static void
+iter_restart (struct hash_iter *it, struct hash_subtable *st, int32 used)
+{
+ int32 elemsize = it->elemsize;
+ hash_hash_t last_hash = it->last_hash;
+ struct hash_entry *e;
+ hash_hash_t e_hash;
+ struct hash_iter_sub *sub = &it->subtable_state[it->i];
+ struct hash_entry *end;
+
+ for (;;) {
+ int32 shift = HASH_BITS - (st->power + used);
+ int32 index_mask = (1 << st->power) - 1;
+ int32 i = (last_hash >> shift) & index_mask;
+
+ end = st->end;
+ e = HASH_OFFSET (st->entry, i * elemsize);
+ sub->start = st->entry;
+ sub->end = end;
+
+ if ((e->hash & HASH_MASK) != HASH_SUBHASH) {
+ break;
+ }
+ sub->e = HASH_OFFSET (e, elemsize);
+ sub = &it->subtable_state[++(it->i)];
+ used += st->power;
+ st = *(struct hash_subtable **)e->data;
+ }
+ while (e != end && ((e_hash = e->hash) == HASH_NIL || e_hash <= last_hash)) {
+ e = HASH_OFFSET (e, elemsize);
+ }
+ sub->e = e;
+}
+
+static void *
+hash_next (struct hash_iter *it)
+{
+ int32 elemsize = it->elemsize;
+ struct hash_iter_sub *sub = &it->subtable_state[it->i];
+ struct hash_entry *e = sub->e;
+ struct hash_entry *end = sub->end;
+ hash_hash_t e_hash = 0;
+
+ if (it->changes != it->h->changes) { /* hash table's structure changed; recompute */
+ it->changes = it->h->changes;
+ it->i = 0;
+ iter_restart (it, it->h->st, 0);
+ sub = &it->subtable_state[it->i];
+ e = sub->e;
+ end = sub->end;
+ }
+ if (e != sub->start && it->last_hash != HASH_OFFSET (e, -elemsize)->hash) {
+ struct hash_entry *start = HASH_OFFSET (e, -(elemsize * it->h->max_probes));
+ struct hash_entry *pe = HASH_OFFSET (e, -elemsize);
+ hash_hash_t last_hash = it->last_hash;
+ if (start < sub->start) {
+ start = sub->start;
+ }
+ while (e != start && ((e_hash = pe->hash) == HASH_NIL || last_hash < e_hash)) {
+ e = pe;
+ pe = HASH_OFFSET (pe, -elemsize);
+ }
+ while (e != end && ((e_hash = e->hash) == HASH_NIL || e_hash <= last_hash)) {
+ e = HASH_OFFSET (e, elemsize);
+ }
+ }
+
+ for (;;) {
+ while (e != end && (e_hash = e->hash) == HASH_NIL) {
+ e = HASH_OFFSET (e, elemsize);
+ }
+ if (e == end) {
+ if (it->i == 0) {
+ it->last_hash = HASH_OFFSET (e, -elemsize)->hash;
+ sub->e = e;
+ return (0);
+ } else {
+ it->i--;
+ sub = &it->subtable_state[it->i];
+ e = sub->e;
+ end = sub->end;
+ }
+ } else if ((e_hash & HASH_MASK) != HASH_SUBHASH) {
+ it->last_hash = e->hash;
+ sub->e = HASH_OFFSET (e, elemsize);
+ return (e->data);
+ } else {
+ struct hash_subtable *st =
+ *(struct hash_subtable **)e->data;
+ sub->e = HASH_OFFSET (e, elemsize);
+ it->i++;
+ assert (it->i < sizeof (it->subtable_state) /
+ sizeof (it->subtable_state[0]));
+ sub = &it->subtable_state[it->i];
+ sub->e = e = st->entry;
+ sub->start = st->entry;
+ sub->end = end = st->end;
+ }
+ }
+}
+
+static void
+hash_iter_init (Hmap *h, struct hash_iter *it)
+{
+ it->elemsize = h->datasize + offsetof (struct hash_entry, data[0]);
+ it->changes = h->changes;
+ it->i = 0;
+ it->h = h;
+ it->last_hash = 0;
+ it->subtable_state[0].e = h->st->entry;
+ it->subtable_state[0].start = h->st->entry;
+ it->subtable_state[0].end = h->st->end;
+}
+
+static void
+clean_st (struct hash_subtable *st, int32 *slots, int32 *used)
+{
+ int32 elemsize = st->datasize + offsetof (struct hash_entry, data[0]);
+ struct hash_entry *e = st->entry;
+ struct hash_entry *end = st->end;
+ int32 lslots = (((byte *) end) - (byte *) e) / elemsize;
+ int32 lused = 0;
+
+ while (e != end) {
+ hash_hash_t hash = e->hash;
+ if ((hash & HASH_MASK) == HASH_SUBHASH) {
+ clean_st (*(struct hash_subtable **)e->data, slots, used);
+ } else {
+ lused += (hash != HASH_NIL);
+ }
+ e = HASH_OFFSET (e, elemsize);
+ }
+ free (st);
+ *slots += lslots;
+ *used += lused;
+}
+
+static void
+hash_destroy (Hmap *h)
+{
+ int32 slots = 0;
+ int32 used = 0;
+
+ clean_st (h->st, &slots, &used);
+ free (h);
+}
+
+static void
+hash_visit_internal (struct hash_subtable *st,
+ int32 used, int32 level,
+ void (*data_visit) (void *arg, int32 level, void *data),
+ void *arg)
+{
+ int32 elemsize = st->datasize + offsetof (struct hash_entry, data[0]);
+ struct hash_entry *e = st->entry;
+ int32 shift = HASH_BITS - (used + st->power);
+ int32 i = 0;
+
+ while (e != st->end) {
+ int32 index = ((e->hash >> (shift - 1)) >> 1) & ((1 << st->power) - 1);
+ if ((e->hash & HASH_MASK) == HASH_SUBHASH) {
+ (*data_visit) (arg, level, e->data);
+ hash_visit_internal (*(struct hash_subtable **)e->data,
+ used + st->power, level + 1, data_visit, arg);
+ } else {
+ (*data_visit) (arg, level, e->data);
+ }
+ if (e->hash != HASH_NIL) {
+ assert (i < index + st->max_probes);
+ assert (index <= i);
+ }
+ e = HASH_OFFSET (e, elemsize);
+ i++;
+ }
+}
+
+static void
+hash_visit (Hmap *h, void (*data_visit) (void *arg, int32 level, void *data), void *arg)
+{
+ hash_visit_internal (h->st, 0, 0, data_visit, arg);
+}
+
+//
+/// interfaces to go runtime
+//
+
+// hash requires < 256 bytes of data (key+value) stored inline.
+// Only basic types can be key - biggest is complex128 (16 bytes).
+// Leave some room to grow, just in case.
+enum {
+ MaxValsize = 256 - 64
+};
+
+static void
+donothing(uint32 s, void *a, void *b)
+{
+ USED(s);
+ USED(a);
+ USED(b);
+}
+
+static void
+freedata(uint32 datavo, void *a, void *b)
+{
+ void *p;
+
+ USED(a);
+ p = *(void**)((byte*)b + datavo);
+ free(p);
+}
+
+static void**
+hash_indirect(Hmap *h, void *p)
+{
+ if(h->indirectval)
+ p = *(void**)p;
+ return p;
+}
+
+static int32 debug = 0;
+
+// makemap(typ *Type, hint uint32) (hmap *map[any]any);
+Hmap*
+runtime·makemap_c(MapType *typ, int64 hint)
+{
+ Hmap *h;
+ int32 keyalg, valalg, keysize, valsize, valsize_in_hash;
+ void (*data_del)(uint32, void*, void*);
+ Type *key, *val;
+
+ key = typ->key;
+ val = typ->elem;
+
+ if(hint < 0 || (int32)hint != hint)
+ runtime·panicstring("makemap: size out of range");
+
+ keyalg = key->alg;
+ valalg = val->alg;
+ keysize = key->size;
+ valsize = val->size;
+
+ if(keyalg >= nelem(runtime·algarray) || runtime·algarray[keyalg].hash == runtime·nohash) {
+ runtime·printf("map(keyalg=%d)\n", keyalg);
+ runtime·throw("runtime.makemap: unsupported map key type");
+ }
+
+ if(valalg >= nelem(runtime·algarray)) {
+ runtime·printf("map(valalg=%d)\n", valalg);
+ runtime·throw("runtime.makemap: unsupported map value type");
+ }
+
+ h = runtime·mal(sizeof(*h));
+
+ valsize_in_hash = valsize;
+ data_del = donothing;
+ if (valsize > MaxValsize) {
+ h->indirectval = 1;
+ data_del = freedata;
+ valsize_in_hash = sizeof(void*);
+ }
+
+ // align value inside data so that mark-sweep gc can find it.
+ // might remove in the future and just assume datavo == keysize.
+ h->datavo = keysize;
+ if(valsize_in_hash >= sizeof(void*))
+ h->datavo = runtime·rnd(keysize, sizeof(void*));
+
+ hash_init(h, h->datavo+valsize_in_hash,
+ runtime·algarray[keyalg].hash,
+ runtime·algarray[keyalg].equal,
+ data_del,
+ hint);
+
+ h->keysize = keysize;
+ h->valsize = valsize;
+ h->keyalg = &runtime·algarray[keyalg];
+ h->valalg = &runtime·algarray[valalg];
+
+ // these calculations are compiler dependent.
+ // figure out offsets of map call arguments.
+
+ // func() (key, val)
+ h->ko0 = runtime·rnd(sizeof(h), Structrnd);
+ h->vo0 = runtime·rnd(h->ko0+keysize, val->align);
+
+ // func(key) (val[, pres])
+ h->ko1 = runtime·rnd(sizeof(h), key->align);
+ h->vo1 = runtime·rnd(h->ko1+keysize, Structrnd);
+ h->po1 = h->vo1 + valsize;
+
+ // func(key, val[, pres])
+ h->ko2 = runtime·rnd(sizeof(h), key->align);
+ h->vo2 = runtime·rnd(h->ko2+keysize, val->align);
+ h->po2 = h->vo2 + valsize;
+
+ if(debug) {
+ runtime·printf("makemap: map=%p; keysize=%d; valsize=%d; keyalg=%d; valalg=%d; offsets=%d,%d; %d,%d,%d; %d,%d,%d\n",
+ h, keysize, valsize, keyalg, valalg, h->ko0, h->vo0, h->ko1, h->vo1, h->po1, h->ko2, h->vo2, h->po2);
+ }
+
+ return h;
+}
+
+// makemap(key, val *Type, hint int64) (hmap *map[any]any);
+void
+runtime·makemap(MapType *typ, int64 hint, Hmap *ret)
+{
+ ret = runtime·makemap_c(typ, hint);
+ FLUSH(&ret);
+}
+
+// For reflect:
+// func makemap(Type *mapType) (hmap *map)
+void
+reflect·makemap(MapType *t, Hmap *ret)
+{
+ ret = runtime·makemap_c(t, 0);
+ FLUSH(&ret);
+}
+
+void
+runtime·mapaccess(MapType *t, Hmap *h, byte *ak, byte *av, bool *pres)
+{
+ byte *res;
+ Type *elem;
+
+ if(h == nil) {
+ elem = t->elem;
+ runtime·algarray[elem->alg].copy(elem->size, av, nil);
+ *pres = false;
+ return;
+ }
+
+ if(runtime·gcwaiting)
+ runtime·gosched();
+
+ res = nil;
+ if(hash_lookup(h, ak, (void**)&res)) {
+ *pres = true;
+ h->valalg->copy(h->valsize, av, hash_indirect(h, res+h->datavo));
+ } else {
+ *pres = false;
+ h->valalg->copy(h->valsize, av, nil);
+ }
+}
+
+// mapaccess1(hmap *map[any]any, key any) (val any);
+#pragma textflag 7
+void
+runtime·mapaccess1(MapType *t, Hmap *h, ...)
+{
+ byte *ak, *av;
+ bool pres;
+
+ if(h == nil) {
+ ak = (byte*)(&h + 1);
+ av = ak + runtime·rnd(t->key->size, Structrnd);
+ } else {
+ ak = (byte*)&h + h->ko1;
+ av = (byte*)&h + h->vo1;
+ }
+
+ runtime·mapaccess(t, h, ak, av, &pres);
+
+ if(debug) {
+ runtime·prints("runtime.mapaccess1: map=");
+ runtime·printpointer(h);
+ runtime·prints("; key=");
+ h->keyalg->print(h->keysize, ak);
+ runtime·prints("; val=");
+ h->valalg->print(h->valsize, av);
+ runtime·prints("; pres=");
+ runtime·printbool(pres);
+ runtime·prints("\n");
+ }
+}
+
+// mapaccess2(hmap *map[any]any, key any) (val any, pres bool);
+#pragma textflag 7
+void
+runtime·mapaccess2(MapType *t, Hmap *h, ...)
+{
+ byte *ak, *av, *ap;
+
+ if(h == nil) {
+ ak = (byte*)(&h + 1);
+ av = ak + runtime·rnd(t->key->size, Structrnd);
+ ap = av + t->elem->size;
+ } else {
+ ak = (byte*)&h + h->ko1;
+ av = (byte*)&h + h->vo1;
+ ap = (byte*)&h + h->po1;
+ }
+
+ runtime·mapaccess(t, h, ak, av, ap);
+
+ if(debug) {
+ runtime·prints("runtime.mapaccess2: map=");
+ runtime·printpointer(h);
+ runtime·prints("; key=");
+ h->keyalg->print(h->keysize, ak);
+ runtime·prints("; val=");
+ h->valalg->print(h->valsize, av);
+ runtime·prints("; pres=");
+ runtime·printbool(*ap);
+ runtime·prints("\n");
+ }
+}
+
+// For reflect:
+// func mapaccess(t type, h map, key iword) (val iword, pres bool)
+// where an iword is the same word an interface value would use:
+// the actual data if it fits, or else a pointer to the data.
+void
+reflect·mapaccess(MapType *t, Hmap *h, uintptr key, uintptr val, bool pres)
+{
+ byte *ak, *av;
+
+ if(t->key->size <= sizeof(key))
+ ak = (byte*)&key;
+ else
+ ak = (byte*)key;
+ val = 0;
+ pres = false;
+ if(t->elem->size <= sizeof(val))
+ av = (byte*)&val;
+ else {
+ av = runtime·mal(t->elem->size);
+ val = (uintptr)av;
+ }
+ runtime·mapaccess(t, h, ak, av, &pres);
+ FLUSH(&val);
+ FLUSH(&pres);
+}
+
+void
+runtime·mapassign(MapType *t, Hmap *h, byte *ak, byte *av)
+{
+ byte *res;
+ int32 hit;
+
+ USED(t);
+
+ if(h == nil)
+ runtime·panicstring("assignment to entry in nil map");
+
+ if(runtime·gcwaiting)
+ runtime·gosched();
+
+ res = nil;
+ if(av == nil) {
+ hash_remove(h, ak, (void**)&res);
+ return;
+ }
+
+ hit = hash_insert(h, ak, (void**)&res);
+ if(!hit && h->indirectval)
+ *(void**)(res+h->datavo) = runtime·mal(h->valsize);
+ h->keyalg->copy(h->keysize, res, ak);
+ h->valalg->copy(h->valsize, hash_indirect(h, res+h->datavo), av);
+
+ if(debug) {
+ runtime·prints("mapassign: map=");
+ runtime·printpointer(h);
+ runtime·prints("; key=");
+ h->keyalg->print(h->keysize, ak);
+ runtime·prints("; val=");
+ h->valalg->print(h->valsize, av);
+ runtime·prints("; hit=");
+ runtime·printint(hit);
+ runtime·prints("; res=");
+ runtime·printpointer(res);
+ runtime·prints("\n");
+ }
+}
+
+// mapassign1(mapType *type, hmap *map[any]any, key any, val any);
+#pragma textflag 7
+void
+runtime·mapassign1(MapType *t, Hmap *h, ...)
+{
+ byte *ak, *av;
+
+ if(h == nil)
+ runtime·panicstring("assignment to entry in nil map");
+
+ ak = (byte*)&h + h->ko2;
+ av = (byte*)&h + h->vo2;
+
+ runtime·mapassign(t, h, ak, av);
+}
+
+// mapassign2(mapType *type, hmap *map[any]any, key any, val any, pres bool);
+#pragma textflag 7
+void
+runtime·mapassign2(MapType *t, Hmap *h, ...)
+{
+ byte *ak, *av, *ap;
+
+ if(h == nil)
+ runtime·panicstring("assignment to entry in nil map");
+
+ ak = (byte*)&h + h->ko2;
+ av = (byte*)&h + h->vo2;
+ ap = (byte*)&h + h->po2;
+
+ if(*ap == false)
+ av = nil; // delete
+
+ runtime·mapassign(t, h, ak, av);
+
+ if(debug) {
+ runtime·prints("mapassign2: map=");
+ runtime·printpointer(h);
+ runtime·prints("; key=");
+ h->keyalg->print(h->keysize, ak);
+ runtime·prints("\n");
+ }
+}
+
+// For reflect:
+// func mapassign(t type h map, key, val iword, pres bool)
+// where an iword is the same word an interface value would use:
+// the actual data if it fits, or else a pointer to the data.
+void
+reflect·mapassign(MapType *t, Hmap *h, uintptr key, uintptr val, bool pres)
+{
+ byte *ak, *av;
+
+ if(h == nil)
+ runtime·panicstring("assignment to entry in nil map");
+ if(h->keysize <= sizeof(key))
+ ak = (byte*)&key;
+ else
+ ak = (byte*)key;
+ if(h->valsize <= sizeof(val))
+ av = (byte*)&val;
+ else
+ av = (byte*)val;
+ if(!pres)
+ av = nil;
+ runtime·mapassign(t, h, ak, av);
+}
+
+// mapiterinit(mapType *type, hmap *map[any]any, hiter *any);
+void
+runtime·mapiterinit(MapType*, Hmap *h, struct hash_iter *it)
+{
+ if(h == nil) {
+ it->data = nil;
+ return;
+ }
+ hash_iter_init(h, it);
+ it->data = hash_next(it);
+ if(debug) {
+ runtime·prints("runtime.mapiterinit: map=");
+ runtime·printpointer(h);
+ runtime·prints("; iter=");
+ runtime·printpointer(it);
+ runtime·prints("; data=");
+ runtime·printpointer(it->data);
+ runtime·prints("\n");
+ }
+}
+
+// For reflect:
+// func mapiterinit(h map) (it iter)
+void
+reflect·mapiterinit(MapType *t, Hmap *h, struct hash_iter *it)
+{
+ it = runtime·mal(sizeof *it);
+ FLUSH(&it);
+ runtime·mapiterinit(t, h, it);
+}
+
+// mapiternext(hiter *any);
+void
+runtime·mapiternext(struct hash_iter *it)
+{
+ if(runtime·gcwaiting)
+ runtime·gosched();
+
+ it->data = hash_next(it);
+ if(debug) {
+ runtime·prints("runtime.mapiternext: iter=");
+ runtime·printpointer(it);
+ runtime·prints("; data=");
+ runtime·printpointer(it->data);
+ runtime·prints("\n");
+ }
+}
+
+// For reflect:
+// func mapiternext(it iter)
+void
+reflect·mapiternext(struct hash_iter *it)
+{
+ runtime·mapiternext(it);
+}
+
+// mapiter1(hiter *any) (key any);
+#pragma textflag 7
+void
+runtime·mapiter1(struct hash_iter *it, ...)
+{
+ Hmap *h;
+ byte *ak, *res;
+
+ h = it->h;
+ ak = (byte*)&it + h->ko0;
+
+ res = it->data;
+ if(res == nil)
+ runtime·throw("runtime.mapiter1: key:val nil pointer");
+
+ h->keyalg->copy(h->keysize, ak, res);
+
+ if(debug) {
+ runtime·prints("mapiter2: iter=");
+ runtime·printpointer(it);
+ runtime·prints("; map=");
+ runtime·printpointer(h);
+ runtime·prints("\n");
+ }
+}
+
+bool
+runtime·mapiterkey(struct hash_iter *it, void *ak)
+{
+ Hmap *h;
+ byte *res;
+
+ h = it->h;
+ res = it->data;
+ if(res == nil)
+ return false;
+ h->keyalg->copy(h->keysize, ak, res);
+ return true;
+}
+
+// For reflect:
+// func mapiterkey(h map) (key iword, ok bool)
+// where an iword is the same word an interface value would use:
+// the actual data if it fits, or else a pointer to the data.
+void
+reflect·mapiterkey(struct hash_iter *it, uintptr key, bool ok)
+{
+ Hmap *h;
+ byte *res;
+
+ key = 0;
+ ok = false;
+ h = it->h;
+ res = it->data;
+ if(res == nil) {
+ key = 0;
+ ok = false;
+ } else {
+ key = 0;
+ if(h->keysize <= sizeof(key))
+ h->keyalg->copy(h->keysize, (byte*)&key, res);
+ else
+ key = (uintptr)res;
+ ok = true;
+ }
+ FLUSH(&key);
+ FLUSH(&ok);
+}
+
+// For reflect:
+// func maplen(h map) (len int32)
+// Like len(m) in the actual language, we treat the nil map as length 0.
+void
+reflect·maplen(Hmap *h, int32 len)
+{
+ if(h == nil)
+ len = 0;
+ else
+ len = h->count;
+ FLUSH(&len);
+}
+
+// mapiter2(hiter *any) (key any, val any);
+#pragma textflag 7
+void
+runtime·mapiter2(struct hash_iter *it, ...)
+{
+ Hmap *h;
+ byte *ak, *av, *res;
+
+ h = it->h;
+ ak = (byte*)&it + h->ko0;
+ av = (byte*)&it + h->vo0;
+
+ res = it->data;
+ if(res == nil)
+ runtime·throw("runtime.mapiter2: key:val nil pointer");
+
+ h->keyalg->copy(h->keysize, ak, res);
+ h->valalg->copy(h->valsize, av, hash_indirect(h, res+h->datavo));
+
+ if(debug) {
+ runtime·prints("mapiter2: iter=");
+ runtime·printpointer(it);
+ runtime·prints("; map=");
+ runtime·printpointer(h);
+ runtime·prints("\n");
+ }
+}
diff --git a/src/pkg/runtime/hashmap.h b/src/pkg/runtime/hashmap.h
new file mode 100644
index 000000000..19ff41697
--- /dev/null
+++ b/src/pkg/runtime/hashmap.h
@@ -0,0 +1,159 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/* A hash table.
+ Example, hashing nul-terminated char*s:
+ hash_hash_t str_hash (void *v) {
+ char *s;
+ hash_hash_t hash = 0;
+ for (s = *(char **)v; *s != 0; s++) {
+ hash = (hash ^ *s) * 2654435769U;
+ }
+ return (hash);
+ }
+ int str_eq (void *a, void *b) {
+ return (strcmp (*(char **)a, *(char **)b) == 0);
+ }
+ void str_del (void *arg, void *data) {
+ *(char **)arg = *(char **)data;
+ }
+
+ struct hash *h = hash_new (sizeof (char *), &str_hash, &str_eq, &str_del, 3, 12, 15);
+ ... 3=> 2**3 entries initial size
+ ... 12=> 2**12 entries before sprouting sub-tables
+ ... 15=> number of adjacent probes to attempt before growing
+
+ Example lookup:
+ char *key = "foobar";
+ char **result_ptr;
+ if (hash_lookup (h, &key, (void **) &result_ptr)) {
+ printf ("found in table: %s\n", *result_ptr);
+ } else {
+ printf ("not found in table\n");
+ }
+
+ Example insertion:
+ char *key = strdup ("foobar");
+ char **result_ptr;
+ if (hash_lookup (h, &key, (void **) &result_ptr)) {
+ printf ("found in table: %s\n", *result_ptr);
+ printf ("to overwrite, do *result_ptr = key\n");
+ } else {
+ printf ("not found in table; inserted as %s\n", *result_ptr);
+ assert (*result_ptr == key);
+ }
+
+ Example deletion:
+ char *key = "foobar";
+ char *result;
+ if (hash_remove (h, &key, &result)) {
+ printf ("key found and deleted from table\n");
+ printf ("called str_del (&result, data) to copy data to result: %s\n", result);
+ } else {
+ printf ("not found in table\n");
+ }
+
+ Example iteration over the elements of *h:
+ char **data;
+ struct hash_iter it;
+ hash_iter_init (h, &it);
+ for (data = hash_next (&it); data != 0; data = hash_next (&it)) {
+ printf ("%s\n", *data);
+ }
+ */
+
+#define malloc runtime·mal
+#define memset(a,b,c) runtime·memclr((byte*)(a), (uint32)(c))
+#define memcpy(a,b,c) runtime·memmove((byte*)(a),(byte*)(b),(uint32)(c))
+#define assert(a) if(!(a)) runtime·throw("assert")
+#define free(x) runtime·free(x)
+#define memmove(a,b,c) runtime·memmove(a, b, c)
+
+struct Hmap; /* opaque */
+struct hash_subtable; /* opaque */
+struct hash_entry; /* opaque */
+
+typedef uintptr uintptr_t;
+typedef uintptr_t hash_hash_t;
+
+struct hash_iter {
+ uint8* data; /* returned from next */
+ int32 elemsize; /* size of elements in table */
+ int32 changes; /* number of changes observed last time */
+ int32 i; /* stack pointer in subtable_state */
+ hash_hash_t last_hash; /* last hash value returned */
+ struct Hmap *h; /* the hash table */
+ struct hash_iter_sub {
+ struct hash_entry *e; /* pointer into subtable */
+ struct hash_entry *start; /* start of subtable */
+ struct hash_entry *end; /* end of subtable */
+ } subtable_state[4]; /* Should be large enough unless the hashing is
+ so bad that many distinct data values hash
+ to the same hash value. */
+};
+
+/* Return a hashtable h 2**init_power empty entries, each with
+ "datasize" data bytes.
+ (*data_hash)(a) should return the hash value of data element *a.
+ (*data_eq)(a,b) should return whether the data at "a" and the data at "b"
+ are equal.
+ (*data_del)(arg, a) will be invoked when data element *a is about to be removed
+ from the table. "arg" is the argument passed to "hash_remove()".
+
+ Growing is accomplished by resizing if the current tables size is less than
+ a threshold, and by adding subtables otherwise. hint should be set
+ the expected maximum size of the table.
+ "datasize" should be in [sizeof (void*), ..., 255]. If you need a
+ bigger "datasize", store a pointer to another piece of memory. */
+
+//struct hash *hash_new (int32 datasize,
+// hash_hash_t (*data_hash) (void *),
+// int32 (*data_eq) (void *, void *),
+// void (*data_del) (void *, void *),
+// int64 hint);
+
+/* Lookup *data in *h. If the data is found, return 1 and place a pointer to
+ the found element in *pres. Otherwise return 0 and place 0 in *pres. */
+// int32 hash_lookup (struct hash *h, void *data, void **pres);
+
+/* Lookup *data in *h. If the data is found, execute (*data_del) (arg, p)
+ where p points to the data in the table, then remove it from *h and return
+ 1. Otherwise return 0. */
+// int32 hash_remove (struct hash *h, void *data, void *arg);
+
+/* Lookup *data in *h. If the data is found, return 1, and place a pointer
+ to the found element in *pres. Otherwise, return 0, allocate a region
+ for the data to be inserted, and place a pointer to the inserted element
+ in *pres; it is the caller's responsibility to copy the data to be
+ inserted to the pointer returned in *pres in this case.
+
+ If using garbage collection, it is the caller's responsibility to
+ add references for **pres if HASH_ADDED is returned. */
+// int32 hash_insert (struct hash *h, void *data, void **pres);
+
+/* Return the number of elements in the table. */
+// uint32 hash_count (struct hash *h);
+
+/* The following call is useful only if not using garbage collection on the
+ table.
+ Remove all sub-tables associated with *h.
+ This undoes the effects of hash_init().
+ If other memory pointed to by user data must be freed, the caller is
+ responsible for doiing do by iterating over *h first; see
+ hash_iter_init()/hash_next(). */
+// void hash_destroy (struct hash *h);
+
+/*----- iteration -----*/
+
+/* Initialize *it from *h. */
+// void hash_iter_init (struct hash *h, struct hash_iter *it);
+
+/* Return the next used entry in the table which which *it was initialized. */
+// void *hash_next (struct hash_iter *it);
+
+/*---- test interface ----*/
+/* Call (*data_visit) (arg, level, data) for every data entry in the table,
+ whether used or not. "level" is the subtable level, 0 means first level. */
+/* TESTING ONLY: DO NOT USE THIS ROUTINE IN NORMAL CODE */
+// void hash_visit (struct hash *h, void (*data_visit) (void *arg, int32 level, void *data), void *arg);
diff --git a/src/pkg/runtime/iface.c b/src/pkg/runtime/iface.c
new file mode 100644
index 000000000..000f834cf
--- /dev/null
+++ b/src/pkg/runtime/iface.c
@@ -0,0 +1,788 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "runtime.h"
+#include "type.h"
+#include "malloc.h"
+
+enum
+{
+ // If an empty interface has these bits set in its type
+ // pointer, it was copied from a reflect.Value and is
+ // not a valid empty interface.
+ reflectFlags = 3,
+};
+
+void
+runtime·printiface(Iface i)
+{
+ runtime·printf("(%p,%p)", i.tab, i.data);
+}
+
+void
+runtime·printeface(Eface e)
+{
+ runtime·printf("(%p,%p)", e.type, e.data);
+}
+
+/*
+ * layout of Itab known to compilers
+ */
+struct Itab
+{
+ InterfaceType* inter;
+ Type* type;
+ Itab* link;
+ int32 bad;
+ int32 unused;
+ void (*fun[])(void);
+};
+
+static Itab* hash[1009];
+static Lock ifacelock;
+
+static Itab*
+itab(InterfaceType *inter, Type *type, int32 canfail)
+{
+ int32 locked;
+ int32 ni;
+ Method *t, *et;
+ IMethod *i, *ei;
+ uint32 h;
+ String *iname, *ipkgPath;
+ Itab *m;
+ UncommonType *x;
+ Type *itype;
+ Eface err;
+
+ if(inter->mhdr.len == 0)
+ runtime·throw("internal error - misuse of itab");
+
+ locked = 0;
+
+ // easy case
+ x = type->x;
+ if(x == nil) {
+ if(canfail)
+ return nil;
+ iname = inter->m[0].name;
+ goto throw;
+ }
+
+ // compiler has provided some good hash codes for us.
+ h = inter->hash;
+ h += 17 * type->hash;
+ // TODO(rsc): h += 23 * x->mhash ?
+ h %= nelem(hash);
+
+ // look twice - once without lock, once with.
+ // common case will be no lock contention.
+ for(locked=0; locked<2; locked++) {
+ if(locked)
+ runtime·lock(&ifacelock);
+ for(m=runtime·atomicloadp(&hash[h]); m!=nil; m=m->link) {
+ if(m->inter == inter && m->type == type) {
+ if(m->bad) {
+ m = nil;
+ if(!canfail) {
+ // this can only happen if the conversion
+ // was already done once using the , ok form
+ // and we have a cached negative result.
+ // the cached result doesn't record which
+ // interface function was missing, so jump
+ // down to the interface check, which will
+ // do more work but give a better error.
+ goto search;
+ }
+ }
+ if(locked)
+ runtime·unlock(&ifacelock);
+ return m;
+ }
+ }
+ }
+
+ ni = inter->mhdr.len;
+ m = runtime·malloc(sizeof(*m) + ni*sizeof m->fun[0]);
+ m->inter = inter;
+ m->type = type;
+
+search:
+ // both inter and type have method sorted by name,
+ // and interface names are unique,
+ // so can iterate over both in lock step;
+ // the loop is O(ni+nt) not O(ni*nt).
+ i = inter->m;
+ ei = i + inter->mhdr.len;
+ t = x->m;
+ et = t + x->mhdr.len;
+ for(; i < ei; i++) {
+ itype = i->type;
+ iname = i->name;
+ ipkgPath = i->pkgPath;
+ for(;; t++) {
+ if(t >= et) {
+ if(!canfail) {
+ throw:
+ // didn't find method
+ runtime·newTypeAssertionError(nil, type, inter,
+ nil, type->string, inter->string,
+ iname, &err);
+ if(locked)
+ runtime·unlock(&ifacelock);
+ runtime·panic(err);
+ return nil; // not reached
+ }
+ m->bad = 1;
+ goto out;
+ }
+ if(t->mtyp == itype && t->name == iname && t->pkgPath == ipkgPath)
+ break;
+ }
+ if(m)
+ m->fun[i - inter->m] = t->ifn;
+ }
+
+out:
+ if(!locked)
+ runtime·panicstring("invalid itab locking");
+ m->link = hash[h];
+ runtime·atomicstorep(&hash[h], m);
+ runtime·unlock(&ifacelock);
+ if(m->bad)
+ return nil;
+ return m;
+}
+
+static void
+copyin(Type *t, void *src, void **dst)
+{
+ int32 wid, alg;
+ void *p;
+
+ wid = t->size;
+ alg = t->alg;
+
+ if(wid <= sizeof(*dst))
+ runtime·algarray[alg].copy(wid, dst, src);
+ else {
+ p = runtime·mal(wid);
+ runtime·algarray[alg].copy(wid, p, src);
+ *dst = p;
+ }
+}
+
+static void
+copyout(Type *t, void **src, void *dst)
+{
+ int32 wid, alg;
+
+ wid = t->size;
+ alg = t->alg;
+
+ if(wid <= sizeof(*src))
+ runtime·algarray[alg].copy(wid, dst, src);
+ else
+ runtime·algarray[alg].copy(wid, dst, *src);
+}
+
+// func convT2I(typ *byte, typ2 *byte, elem any) (ret any)
+#pragma textflag 7
+void
+runtime·convT2I(Type *t, InterfaceType *inter, ...)
+{
+ byte *elem;
+ Iface *ret;
+ int32 wid;
+
+ elem = (byte*)(&inter+1);
+ wid = t->size;
+ ret = (Iface*)(elem + runtime·rnd(wid, Structrnd));
+ ret->tab = itab(inter, t, 0);
+ copyin(t, elem, &ret->data);
+}
+
+// func convT2E(typ *byte, elem any) (ret any)
+#pragma textflag 7
+void
+runtime·convT2E(Type *t, ...)
+{
+ byte *elem;
+ Eface *ret;
+ int32 wid;
+
+ elem = (byte*)(&t+1);
+ wid = t->size;
+ ret = (Eface*)(elem + runtime·rnd(wid, Structrnd));
+ ret->type = t;
+ copyin(t, elem, &ret->data);
+}
+
+static void assertI2Tret(Type *t, Iface i, byte *ret);
+
+// func ifaceI2T(typ *byte, iface any) (ret any)
+#pragma textflag 7
+void
+runtime·assertI2T(Type *t, Iface i, ...)
+{
+ byte *ret;
+
+ ret = (byte*)(&i+1);
+ assertI2Tret(t, i, ret);
+}
+
+static void
+assertI2Tret(Type *t, Iface i, byte *ret)
+{
+ Itab *tab;
+ Eface err;
+
+ tab = i.tab;
+ if(tab == nil) {
+ runtime·newTypeAssertionError(nil, nil, t,
+ nil, nil, t->string,
+ nil, &err);
+ runtime·panic(err);
+ }
+ if(tab->type != t) {
+ runtime·newTypeAssertionError(tab->inter, tab->type, t,
+ tab->inter->string, tab->type->string, t->string,
+ nil, &err);
+ runtime·panic(err);
+ }
+ copyout(t, &i.data, ret);
+}
+
+// func ifaceI2T2(typ *byte, iface any) (ret any, ok bool)
+#pragma textflag 7
+void
+runtime·assertI2T2(Type *t, Iface i, ...)
+{
+ byte *ret;
+ bool *ok;
+ int32 wid;
+
+ ret = (byte*)(&i+1);
+ wid = t->size;
+ ok = (bool*)(ret + wid);
+
+ if(i.tab == nil || i.tab->type != t) {
+ *ok = false;
+ runtime·memclr(ret, wid);
+ return;
+ }
+
+ *ok = true;
+ copyout(t, &i.data, ret);
+}
+
+static void assertE2Tret(Type *t, Eface e, byte *ret);
+
+// func ifaceE2T(typ *byte, iface any) (ret any)
+#pragma textflag 7
+void
+runtime·assertE2T(Type *t, Eface e, ...)
+{
+ byte *ret;
+
+ if(((uintptr)e.type&reflectFlags) != 0)
+ runtime·throw("invalid interface value");
+ ret = (byte*)(&e+1);
+ assertE2Tret(t, e, ret);
+}
+
+static void
+assertE2Tret(Type *t, Eface e, byte *ret)
+{
+ Eface err;
+
+ if(((uintptr)e.type&reflectFlags) != 0)
+ runtime·throw("invalid interface value");
+ if(e.type == nil) {
+ runtime·newTypeAssertionError(nil, nil, t,
+ nil, nil, t->string,
+ nil, &err);
+ runtime·panic(err);
+ }
+ if(e.type != t) {
+ runtime·newTypeAssertionError(nil, e.type, t,
+ nil, e.type->string, t->string,
+ nil, &err);
+ runtime·panic(err);
+ }
+ copyout(t, &e.data, ret);
+}
+
+// func ifaceE2T2(sigt *byte, iface any) (ret any, ok bool);
+#pragma textflag 7
+void
+runtime·assertE2T2(Type *t, Eface e, ...)
+{
+ byte *ret;
+ bool *ok;
+ int32 wid;
+
+ if(((uintptr)e.type&reflectFlags) != 0)
+ runtime·throw("invalid interface value");
+ ret = (byte*)(&e+1);
+ wid = t->size;
+ ok = (bool*)(ret + wid);
+
+ if(t != e.type) {
+ *ok = false;
+ runtime·memclr(ret, wid);
+ return;
+ }
+
+ *ok = true;
+ copyout(t, &e.data, ret);
+}
+
+// func convI2E(elem any) (ret any)
+void
+runtime·convI2E(Iface i, Eface ret)
+{
+ Itab *tab;
+
+ ret.data = i.data;
+ if((tab = i.tab) == nil)
+ ret.type = nil;
+ else
+ ret.type = tab->type;
+ FLUSH(&ret);
+}
+
+// func ifaceI2E(typ *byte, iface any) (ret any)
+void
+runtime·assertI2E(InterfaceType* inter, Iface i, Eface ret)
+{
+ Itab *tab;
+ Eface err;
+
+ tab = i.tab;
+ if(tab == nil) {
+ // explicit conversions require non-nil interface value.
+ runtime·newTypeAssertionError(nil, nil, inter,
+ nil, nil, inter->string,
+ nil, &err);
+ runtime·panic(err);
+ }
+ ret.data = i.data;
+ ret.type = tab->type;
+ FLUSH(&ret);
+}
+
+// func ifaceI2E2(typ *byte, iface any) (ret any, ok bool)
+void
+runtime·assertI2E2(InterfaceType* inter, Iface i, Eface ret, bool ok)
+{
+ Itab *tab;
+
+ USED(inter);
+ tab = i.tab;
+ if(tab == nil) {
+ ret.type = nil;
+ ok = 0;
+ } else {
+ ret.type = tab->type;
+ ok = 1;
+ }
+ ret.data = i.data;
+ FLUSH(&ret);
+ FLUSH(&ok);
+}
+
+// func convI2I(typ *byte, elem any) (ret any)
+void
+runtime·convI2I(InterfaceType* inter, Iface i, Iface ret)
+{
+ Itab *tab;
+
+ ret.data = i.data;
+ if((tab = i.tab) == nil)
+ ret.tab = nil;
+ else if(tab->inter == inter)
+ ret.tab = tab;
+ else
+ ret.tab = itab(inter, tab->type, 0);
+ FLUSH(&ret);
+}
+
+void
+runtime·ifaceI2I(InterfaceType *inter, Iface i, Iface *ret)
+{
+ Itab *tab;
+ Eface err;
+
+ tab = i.tab;
+ if(tab == nil) {
+ // explicit conversions require non-nil interface value.
+ runtime·newTypeAssertionError(nil, nil, inter,
+ nil, nil, inter->string,
+ nil, &err);
+ runtime·panic(err);
+ }
+ ret->data = i.data;
+ ret->tab = itab(inter, tab->type, 0);
+}
+
+// func ifaceI2I(sigi *byte, iface any) (ret any)
+void
+runtime·assertI2I(InterfaceType* inter, Iface i, Iface ret)
+{
+ runtime·ifaceI2I(inter, i, &ret);
+}
+
+// func ifaceI2I2(sigi *byte, iface any) (ret any, ok bool)
+void
+runtime·assertI2I2(InterfaceType *inter, Iface i, Iface ret, bool ok)
+{
+ Itab *tab;
+
+ tab = i.tab;
+ if(tab != nil && (tab->inter == inter || (tab = itab(inter, tab->type, 1)) != nil)) {
+ ret.data = i.data;
+ ret.tab = tab;
+ ok = 1;
+ } else {
+ ret.data = 0;
+ ret.tab = 0;
+ ok = 0;
+ }
+ FLUSH(&ret);
+ FLUSH(&ok);
+}
+
+void
+runtime·ifaceE2I(InterfaceType *inter, Eface e, Iface *ret)
+{
+ Type *t;
+ Eface err;
+
+ if(((uintptr)e.type&reflectFlags) != 0)
+ runtime·throw("invalid interface value");
+ t = e.type;
+ if(t == nil) {
+ // explicit conversions require non-nil interface value.
+ runtime·newTypeAssertionError(nil, nil, inter,
+ nil, nil, inter->string,
+ nil, &err);
+ runtime·panic(err);
+ }
+ ret->data = e.data;
+ ret->tab = itab(inter, t, 0);
+}
+
+// For reflect
+// func ifaceE2I(t *InterfaceType, e interface{}, dst *Iface)
+void
+reflect·ifaceE2I(InterfaceType *inter, Eface e, Iface *dst)
+{
+ runtime·ifaceE2I(inter, e, dst);
+}
+
+// func ifaceE2I(sigi *byte, iface any) (ret any)
+void
+runtime·assertE2I(InterfaceType* inter, Eface e, Iface ret)
+{
+ runtime·ifaceE2I(inter, e, &ret);
+}
+
+// ifaceE2I2(sigi *byte, iface any) (ret any, ok bool)
+void
+runtime·assertE2I2(InterfaceType *inter, Eface e, Iface ret, bool ok)
+{
+ if(((uintptr)e.type&reflectFlags) != 0)
+ runtime·throw("invalid interface value");
+ if(e.type == nil) {
+ ok = 0;
+ ret.data = nil;
+ ret.tab = nil;
+ } else if((ret.tab = itab(inter, e.type, 1)) == nil) {
+ ok = 0;
+ ret.data = nil;
+ } else {
+ ok = 1;
+ ret.data = e.data;
+ }
+ FLUSH(&ret);
+ FLUSH(&ok);
+}
+
+// func ifaceE2E(typ *byte, iface any) (ret any)
+void
+runtime·assertE2E(InterfaceType* inter, Eface e, Eface ret)
+{
+ Type *t;
+ Eface err;
+
+ if(((uintptr)e.type&reflectFlags) != 0)
+ runtime·throw("invalid interface value");
+ t = e.type;
+ if(t == nil) {
+ // explicit conversions require non-nil interface value.
+ runtime·newTypeAssertionError(nil, nil, inter,
+ nil, nil, inter->string,
+ nil, &err);
+ runtime·panic(err);
+ }
+ ret = e;
+ FLUSH(&ret);
+}
+
+// func ifaceE2E2(iface any) (ret any, ok bool)
+void
+runtime·assertE2E2(InterfaceType* inter, Eface e, Eface ret, bool ok)
+{
+ if(((uintptr)e.type&reflectFlags) != 0)
+ runtime·throw("invalid interface value");
+ USED(inter);
+ ret = e;
+ ok = e.type != nil;
+ FLUSH(&ret);
+ FLUSH(&ok);
+}
+
+static uintptr
+ifacehash1(void *data, Type *t)
+{
+ int32 alg, wid;
+ Eface err;
+
+ if(t == nil)
+ return 0;
+
+ alg = t->alg;
+ wid = t->size;
+ if(runtime·algarray[alg].hash == runtime·nohash) {
+ // calling nohash will panic too,
+ // but we can print a better error.
+ runtime·newErrorString(runtime·catstring(runtime·gostringnocopy((byte*)"hash of unhashable type "), *t->string), &err);
+ runtime·panic(err);
+ }
+ if(wid <= sizeof(data))
+ return runtime·algarray[alg].hash(wid, &data);
+ return runtime·algarray[alg].hash(wid, data);
+}
+
+uintptr
+runtime·ifacehash(Iface a)
+{
+ if(a.tab == nil)
+ return 0;
+ return ifacehash1(a.data, a.tab->type);
+}
+
+uintptr
+runtime·efacehash(Eface a)
+{
+ return ifacehash1(a.data, a.type);
+}
+
+static bool
+ifaceeq1(void *data1, void *data2, Type *t)
+{
+ int32 alg, wid;
+ Eface err;
+
+ alg = t->alg;
+ wid = t->size;
+
+ if(runtime·algarray[alg].equal == runtime·noequal) {
+ // calling noequal will panic too,
+ // but we can print a better error.
+ runtime·newErrorString(runtime·catstring(runtime·gostringnocopy((byte*)"comparing uncomparable type "), *t->string), &err);
+ runtime·panic(err);
+ }
+
+ if(wid <= sizeof(data1))
+ return runtime·algarray[alg].equal(wid, &data1, &data2);
+ return runtime·algarray[alg].equal(wid, data1, data2);
+}
+
+bool
+runtime·ifaceeq_c(Iface i1, Iface i2)
+{
+ if(i1.tab != i2.tab)
+ return false;
+ if(i1.tab == nil)
+ return true;
+ return ifaceeq1(i1.data, i2.data, i1.tab->type);
+}
+
+bool
+runtime·efaceeq_c(Eface e1, Eface e2)
+{
+ if(((uintptr)e1.type&reflectFlags) != 0)
+ runtime·throw("invalid interface value");
+ if(((uintptr)e2.type&reflectFlags) != 0)
+ runtime·throw("invalid interface value");
+ if(e1.type != e2.type)
+ return false;
+ if(e1.type == nil)
+ return true;
+ return ifaceeq1(e1.data, e2.data, e1.type);
+}
+
+// ifaceeq(i1 any, i2 any) (ret bool);
+void
+runtime·ifaceeq(Iface i1, Iface i2, bool ret)
+{
+ ret = runtime·ifaceeq_c(i1, i2);
+ FLUSH(&ret);
+}
+
+// efaceeq(i1 any, i2 any) (ret bool)
+void
+runtime·efaceeq(Eface e1, Eface e2, bool ret)
+{
+ ret = runtime·efaceeq_c(e1, e2);
+ FLUSH(&ret);
+}
+
+// ifacethash(i1 any) (ret uint32);
+void
+runtime·ifacethash(Iface i1, uint32 ret)
+{
+ Itab *tab;
+
+ ret = 0;
+ tab = i1.tab;
+ if(tab != nil)
+ ret = tab->type->hash;
+ FLUSH(&ret);
+}
+
+// efacethash(e1 any) (ret uint32)
+void
+runtime·efacethash(Eface e1, uint32 ret)
+{
+ Type *t;
+
+ if(((uintptr)e1.type&reflectFlags) != 0)
+ runtime·throw("invalid interface value");
+ ret = 0;
+ t = e1.type;
+ if(t != nil)
+ ret = t->hash;
+ FLUSH(&ret);
+}
+
+void
+unsafe·Typeof(Eface e, Eface ret)
+{
+ if(((uintptr)e.type&reflectFlags) != 0)
+ runtime·throw("invalid interface value");
+ if(e.type == nil) {
+ ret.type = nil;
+ ret.data = nil;
+ } else {
+ ret = *(Eface*)(e.type);
+ }
+ FLUSH(&ret);
+}
+
+void
+unsafe·Reflect(Eface e, Eface rettype, void *retaddr)
+{
+ uintptr *p;
+ uintptr x;
+
+ if(((uintptr)e.type&reflectFlags) != 0)
+ runtime·throw("invalid interface value");
+ if(e.type == nil) {
+ rettype.type = nil;
+ rettype.data = nil;
+ retaddr = 0;
+ } else {
+ rettype = *(Eface*)e.type;
+ if(e.type->size <= sizeof(uintptr)) {
+ // Copy data into x ...
+ x = 0;
+ runtime·algarray[e.type->alg].copy(e.type->size, &x, &e.data);
+
+ // but then build pointer to x so that Reflect
+ // always returns pointer to data.
+ p = runtime·mal(sizeof(uintptr));
+ *p = x;
+ } else {
+ // Already a pointer, but still make a copy,
+ // to preserve value semantics for interface data.
+ p = runtime·mal(e.type->size);
+ runtime·algarray[e.type->alg].copy(e.type->size, p, e.data);
+ }
+ retaddr = p;
+ }
+ FLUSH(&rettype);
+ FLUSH(&retaddr);
+}
+
+void
+unsafe·Unreflect(Eface typ, void *addr, Eface e)
+{
+ if(((uintptr)typ.type&reflectFlags) != 0)
+ runtime·throw("invalid interface value");
+
+ // Reflect library has reinterpreted typ
+ // as its own kind of type structure.
+ // We know that the pointer to the original
+ // type structure sits before the data pointer.
+ e.type = (Type*)((Eface*)typ.data-1);
+
+ // Interface holds either pointer to data
+ // or copy of original data.
+ if(e.type->size <= sizeof(uintptr))
+ runtime·algarray[e.type->alg].copy(e.type->size, &e.data, addr);
+ else {
+ // Easier: already a pointer to data.
+ // TODO(rsc): Should this make a copy?
+ e.data = addr;
+ }
+
+ FLUSH(&e);
+}
+
+void
+unsafe·New(Eface typ, void *ret)
+{
+ Type *t;
+
+ if(((uintptr)typ.type&reflectFlags) != 0)
+ runtime·throw("invalid interface value");
+
+ // Reflect library has reinterpreted typ
+ // as its own kind of type structure.
+ // We know that the pointer to the original
+ // type structure sits before the data pointer.
+ t = (Type*)((Eface*)typ.data-1);
+
+ if(t->kind&KindNoPointers)
+ ret = runtime·mallocgc(t->size, FlagNoPointers, 1, 1);
+ else
+ ret = runtime·mal(t->size);
+ FLUSH(&ret);
+}
+
+void
+unsafe·NewArray(Eface typ, uint32 n, void *ret)
+{
+ uint64 size;
+ Type *t;
+
+ if(((uintptr)typ.type&reflectFlags) != 0)
+ runtime·throw("invalid interface value");
+
+ // Reflect library has reinterpreted typ
+ // as its own kind of type structure.
+ // We know that the pointer to the original
+ // type structure sits before the data pointer.
+ t = (Type*)((Eface*)typ.data-1);
+
+ size = n*t->size;
+ if(t->kind&KindNoPointers)
+ ret = runtime·mallocgc(size, FlagNoPointers, 1, 1);
+ else
+ ret = runtime·mal(size);
+ FLUSH(&ret);
+}
diff --git a/src/pkg/runtime/linux/386/defs.h b/src/pkg/runtime/linux/386/defs.h
new file mode 100644
index 000000000..73fe23ef9
--- /dev/null
+++ b/src/pkg/runtime/linux/386/defs.h
@@ -0,0 +1,191 @@
+// godefs -f -m32 -f -I/home/rsc/pub/linux-2.6/arch/x86/include -f -I/home/rsc/pub/linux-2.6/include -f -D_LOOSE_KERNEL_NAMES -f -D__ARCH_SI_UID_T=__kernel_uid32_t defs2.c
+
+// MACHINE GENERATED - DO NOT EDIT.
+
+// Constants
+enum {
+ PROT_NONE = 0,
+ PROT_READ = 0x1,
+ PROT_WRITE = 0x2,
+ PROT_EXEC = 0x4,
+ MAP_ANON = 0x20,
+ MAP_PRIVATE = 0x2,
+ MAP_FIXED = 0x10,
+ SA_RESTART = 0x10000000,
+ SA_ONSTACK = 0x8000000,
+ SA_RESTORER = 0x4000000,
+ SA_SIGINFO = 0x4,
+ SIGHUP = 0x1,
+ SIGINT = 0x2,
+ SIGQUIT = 0x3,
+ SIGILL = 0x4,
+ SIGTRAP = 0x5,
+ SIGABRT = 0x6,
+ SIGBUS = 0x7,
+ SIGFPE = 0x8,
+ SIGKILL = 0x9,
+ SIGUSR1 = 0xa,
+ SIGSEGV = 0xb,
+ SIGUSR2 = 0xc,
+ SIGPIPE = 0xd,
+ SIGALRM = 0xe,
+ SIGSTKFLT = 0x10,
+ SIGCHLD = 0x11,
+ SIGCONT = 0x12,
+ SIGSTOP = 0x13,
+ SIGTSTP = 0x14,
+ SIGTTIN = 0x15,
+ SIGTTOU = 0x16,
+ SIGURG = 0x17,
+ SIGXCPU = 0x18,
+ SIGXFSZ = 0x19,
+ SIGVTALRM = 0x1a,
+ SIGPROF = 0x1b,
+ SIGWINCH = 0x1c,
+ SIGIO = 0x1d,
+ SIGPWR = 0x1e,
+ SIGSYS = 0x1f,
+ FPE_INTDIV = 0x1,
+ FPE_INTOVF = 0x2,
+ FPE_FLTDIV = 0x3,
+ FPE_FLTOVF = 0x4,
+ FPE_FLTUND = 0x5,
+ FPE_FLTRES = 0x6,
+ FPE_FLTINV = 0x7,
+ FPE_FLTSUB = 0x8,
+ BUS_ADRALN = 0x1,
+ BUS_ADRERR = 0x2,
+ BUS_OBJERR = 0x3,
+ SEGV_MAPERR = 0x1,
+ SEGV_ACCERR = 0x2,
+ ITIMER_REAL = 0,
+ ITIMER_VIRTUAL = 0x1,
+ ITIMER_PROF = 0x2,
+ O_RDONLY = 0,
+ O_CLOEXEC = 02000000,
+};
+
+// Types
+#pragma pack on
+
+typedef struct Fpreg Fpreg;
+struct Fpreg {
+ uint16 significand[4];
+ uint16 exponent;
+};
+
+typedef struct Fpxreg Fpxreg;
+struct Fpxreg {
+ uint16 significand[4];
+ uint16 exponent;
+ uint16 padding[3];
+};
+
+typedef struct Xmmreg Xmmreg;
+struct Xmmreg {
+ uint32 element[4];
+};
+
+typedef struct Fpstate Fpstate;
+struct Fpstate {
+ uint32 cw;
+ uint32 sw;
+ uint32 tag;
+ uint32 ipoff;
+ uint32 cssel;
+ uint32 dataoff;
+ uint32 datasel;
+ Fpreg _st[8];
+ uint16 status;
+ uint16 magic;
+ uint32 _fxsr_env[6];
+ uint32 mxcsr;
+ uint32 reserved;
+ Fpxreg _fxsr_st[8];
+ Xmmreg _xmm[8];
+ uint32 padding1[44];
+ byte Pad_godefs_0[48];
+};
+
+typedef struct Timespec Timespec;
+struct Timespec {
+ int32 tv_sec;
+ int32 tv_nsec;
+};
+
+typedef struct Timeval Timeval;
+struct Timeval {
+ int32 tv_sec;
+ int32 tv_usec;
+};
+
+typedef struct Sigaction Sigaction;
+struct Sigaction {
+ void *k_sa_handler;
+ uint32 sa_flags;
+ void *sa_restorer;
+ uint32 sa_mask;
+};
+
+typedef struct Siginfo Siginfo;
+struct Siginfo {
+ int32 si_signo;
+ int32 si_errno;
+ int32 si_code;
+ byte _sifields[116];
+};
+
+typedef struct Sigaltstack Sigaltstack;
+struct Sigaltstack {
+ void *ss_sp;
+ int32 ss_flags;
+ uint32 ss_size;
+};
+
+typedef struct Sigcontext Sigcontext;
+struct Sigcontext {
+ uint16 gs;
+ uint16 __gsh;
+ uint16 fs;
+ uint16 __fsh;
+ uint16 es;
+ uint16 __esh;
+ uint16 ds;
+ uint16 __dsh;
+ uint32 edi;
+ uint32 esi;
+ uint32 ebp;
+ uint32 esp;
+ uint32 ebx;
+ uint32 edx;
+ uint32 ecx;
+ uint32 eax;
+ uint32 trapno;
+ uint32 err;
+ uint32 eip;
+ uint16 cs;
+ uint16 __csh;
+ uint32 eflags;
+ uint32 esp_at_signal;
+ uint16 ss;
+ uint16 __ssh;
+ Fpstate *fpstate;
+ uint32 oldmask;
+ uint32 cr2;
+};
+
+typedef struct Ucontext Ucontext;
+struct Ucontext {
+ uint32 uc_flags;
+ Ucontext *uc_link;
+ Sigaltstack uc_stack;
+ Sigcontext uc_mcontext;
+ uint32 uc_sigmask;
+};
+
+typedef struct Itimerval Itimerval;
+struct Itimerval {
+ Timeval it_interval;
+ Timeval it_value;
+};
+#pragma pack off
diff --git a/src/pkg/runtime/linux/386/rt0.s b/src/pkg/runtime/linux/386/rt0.s
new file mode 100644
index 000000000..223e6d2ea
--- /dev/null
+++ b/src/pkg/runtime/linux/386/rt0.s
@@ -0,0 +1,9 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Darwin and Linux use the same linkage to main
+
+TEXT _rt0_386_linux(SB),7,$0
+ JMP _rt0_386(SB)
+
diff --git a/src/pkg/runtime/linux/386/signal.c b/src/pkg/runtime/linux/386/signal.c
new file mode 100644
index 000000000..8916e10bd
--- /dev/null
+++ b/src/pkg/runtime/linux/386/signal.c
@@ -0,0 +1,184 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "runtime.h"
+#include "defs.h"
+#include "signals.h"
+#include "os.h"
+
+void
+runtime·dumpregs(Sigcontext *r)
+{
+ runtime·printf("eax %x\n", r->eax);
+ runtime·printf("ebx %x\n", r->ebx);
+ runtime·printf("ecx %x\n", r->ecx);
+ runtime·printf("edx %x\n", r->edx);
+ runtime·printf("edi %x\n", r->edi);
+ runtime·printf("esi %x\n", r->esi);
+ runtime·printf("ebp %x\n", r->ebp);
+ runtime·printf("esp %x\n", r->esp);
+ runtime·printf("eip %x\n", r->eip);
+ runtime·printf("eflags %x\n", r->eflags);
+ runtime·printf("cs %x\n", r->cs);
+ runtime·printf("fs %x\n", r->fs);
+ runtime·printf("gs %x\n", r->gs);
+}
+
+/*
+ * This assembler routine takes the args from registers, puts them on the stack,
+ * and calls sighandler().
+ */
+extern void runtime·sigtramp(void);
+extern void runtime·sigignore(void); // just returns
+extern void runtime·sigreturn(void); // calls runtime·sigreturn
+
+String
+runtime·signame(int32 sig)
+{
+ if(sig < 0 || sig >= NSIG)
+ return runtime·emptystring;
+ return runtime·gostringnocopy((byte*)runtime·sigtab[sig].name);
+}
+
+void
+runtime·sighandler(int32 sig, Siginfo *info, void *context, G *gp)
+{
+ Ucontext *uc;
+ Sigcontext *r;
+ uintptr *sp;
+
+ uc = context;
+ r = &uc->uc_mcontext;
+
+ if(sig == SIGPROF) {
+ runtime·sigprof((uint8*)r->eip, (uint8*)r->esp, nil, gp);
+ return;
+ }
+
+ if(gp != nil && (runtime·sigtab[sig].flags & SigPanic)) {
+ // Make it look like a call to the signal func.
+ // Have to pass arguments out of band since
+ // augmenting the stack frame would break
+ // the unwinding code.
+ gp->sig = sig;
+ gp->sigcode0 = info->si_code;
+ gp->sigcode1 = ((uintptr*)info)[3];
+ gp->sigpc = r->eip;
+
+ // Only push runtime·sigpanic if r->eip != 0.
+ // If r->eip == 0, probably panicked because of a
+ // call to a nil func. Not pushing that onto sp will
+ // make the trace look like a call to runtime·sigpanic instead.
+ // (Otherwise the trace will end at runtime·sigpanic and we
+ // won't get to see who faulted.)
+ if(r->eip != 0) {
+ sp = (uintptr*)r->esp;
+ *--sp = r->eip;
+ r->esp = (uintptr)sp;
+ }
+ r->eip = (uintptr)runtime·sigpanic;
+ return;
+ }
+
+ if(runtime·sigtab[sig].flags & SigQueue) {
+ if(runtime·sigsend(sig) || (runtime·sigtab[sig].flags & SigIgnore))
+ return;
+ runtime·exit(2); // SIGINT, SIGTERM, etc
+ }
+
+ if(runtime·panicking) // traceback already printed
+ runtime·exit(2);
+ runtime·panicking = 1;
+
+ if(sig < 0 || sig >= NSIG)
+ runtime·printf("Signal %d\n", sig);
+ else
+ runtime·printf("%s\n", runtime·sigtab[sig].name);
+
+ runtime·printf("PC=%X\n", r->eip);
+ runtime·printf("\n");
+
+ if(runtime·gotraceback()){
+ runtime·traceback((void*)r->eip, (void*)r->esp, 0, gp);
+ runtime·tracebackothers(gp);
+ runtime·dumpregs(r);
+ }
+
+ runtime·exit(2);
+}
+
+void
+runtime·signalstack(byte *p, int32 n)
+{
+ Sigaltstack st;
+
+ st.ss_sp = p;
+ st.ss_size = n;
+ st.ss_flags = 0;
+ runtime·sigaltstack(&st, nil);
+}
+
+static void
+sigaction(int32 i, void (*fn)(int32, Siginfo*, void*, G*), bool restart)
+{
+ Sigaction sa;
+
+ runtime·memclr((byte*)&sa, sizeof sa);
+ sa.sa_flags = SA_ONSTACK | SA_SIGINFO | SA_RESTORER;
+ if(restart)
+ sa.sa_flags |= SA_RESTART;
+ sa.sa_mask = ~0ULL;
+ sa.sa_restorer = (void*)runtime·sigreturn;
+ if(fn == runtime·sighandler)
+ fn = (void*)runtime·sigtramp;
+ sa.k_sa_handler = fn;
+ runtime·rt_sigaction(i, &sa, nil, 8);
+}
+
+void
+runtime·initsig(int32 queue)
+{
+ int32 i;
+ void *fn;
+
+ runtime·siginit();
+
+ for(i = 0; i<NSIG; i++) {
+ if(runtime·sigtab[i].flags) {
+ if((runtime·sigtab[i].flags & SigQueue) != queue)
+ continue;
+ if(runtime·sigtab[i].flags & (SigCatch | SigQueue))
+ fn = runtime·sighandler;
+ else
+ fn = runtime·sigignore;
+ sigaction(i, fn, (runtime·sigtab[i].flags & SigRestart) != 0);
+ }
+ }
+}
+
+void
+runtime·resetcpuprofiler(int32 hz)
+{
+ Itimerval it;
+
+ runtime·memclr((byte*)&it, sizeof it);
+ if(hz == 0) {
+ runtime·setitimer(ITIMER_PROF, &it, nil);
+ sigaction(SIGPROF, SIG_IGN, true);
+ } else {
+ sigaction(SIGPROF, runtime·sighandler, true);
+ it.it_interval.tv_sec = 0;
+ it.it_interval.tv_usec = 1000000 / hz;
+ it.it_value = it.it_interval;
+ runtime·setitimer(ITIMER_PROF, &it, nil);
+ }
+ m->profilehz = hz;
+}
+
+void
+os·sigpipe(void)
+{
+ sigaction(SIGPIPE, SIG_DFL, false);
+ runtime·raisesigpipe();
+}
diff --git a/src/pkg/runtime/linux/386/sys.s b/src/pkg/runtime/linux/386/sys.s
new file mode 100644
index 000000000..0b4a34986
--- /dev/null
+++ b/src/pkg/runtime/linux/386/sys.s
@@ -0,0 +1,344 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//
+// System calls and other sys.stuff for 386, Linux
+//
+
+#include "386/asm.h"
+
+TEXT runtime·exit(SB),7,$0
+ MOVL $252, AX // syscall number
+ MOVL 4(SP), BX
+ INT $0x80
+ INT $3 // not reached
+ RET
+
+TEXT runtime·exit1(SB),7,$0
+ MOVL $1, AX // exit - exit the current os thread
+ MOVL 4(SP), BX
+ INT $0x80
+ INT $3 // not reached
+ RET
+
+TEXT runtime·open(SB),7,$0
+ MOVL $5, AX // syscall - open
+ MOVL 4(SP), BX
+ MOVL 8(SP), CX
+ MOVL 12(SP), DX
+ INT $0x80
+ RET
+
+TEXT runtime·close(SB),7,$0
+ MOVL $6, AX // syscall - close
+ MOVL 4(SP), BX
+ INT $0x80
+ RET
+
+TEXT runtime·write(SB),7,$0
+ MOVL $4, AX // syscall - write
+ MOVL 4(SP), BX
+ MOVL 8(SP), CX
+ MOVL 12(SP), DX
+ INT $0x80
+ RET
+
+TEXT runtime·read(SB),7,$0
+ MOVL $3, AX // syscall - read
+ MOVL 4(SP), BX
+ MOVL 8(SP), CX
+ MOVL 12(SP), DX
+ INT $0x80
+ RET
+
+TEXT runtime·raisesigpipe(SB),7,$12
+ MOVL $224, AX // syscall - gettid
+ INT $0x80
+ MOVL AX, 0(SP) // arg 1 tid
+ MOVL $13, 4(SP) // arg 2 SIGPIPE
+ MOVL $238, AX // syscall - tkill
+ INT $0x80
+ RET
+
+TEXT runtime·setitimer(SB),7,$0-24
+ MOVL $104, AX // syscall - setitimer
+ MOVL 4(SP), BX
+ MOVL 8(SP), CX
+ MOVL 12(SP), DX
+ INT $0x80
+ RET
+
+TEXT runtime·mincore(SB),7,$0-24
+ MOVL $218, AX // syscall - mincore
+ MOVL 4(SP), BX
+ MOVL 8(SP), CX
+ MOVL 12(SP), DX
+ INT $0x80
+ RET
+
+TEXT runtime·gettime(SB), 7, $32
+ MOVL $78, AX // syscall - gettimeofday
+ LEAL 8(SP), BX
+ MOVL $0, CX
+ MOVL $0, DX
+ INT $0x80
+
+ MOVL 8(SP), BX // sec
+ MOVL sec+0(FP), DI
+ MOVL BX, (DI)
+ MOVL $0, 4(DI) // zero extend 32 -> 64 bits
+
+ MOVL 12(SP), BX // usec
+ MOVL usec+4(FP), DI
+ MOVL BX, (DI)
+ RET
+
+TEXT runtime·rt_sigaction(SB),7,$0
+ MOVL $174, AX // syscall - rt_sigaction
+ MOVL 4(SP), BX
+ MOVL 8(SP), CX
+ MOVL 12(SP), DX
+ MOVL 16(SP), SI
+ INT $0x80
+ RET
+
+TEXT runtime·sigtramp(SB),7,$44
+ get_tls(CX)
+
+ // save g
+ MOVL g(CX), DI
+ MOVL DI, 20(SP)
+
+ // g = m->gsignal
+ MOVL m(CX), BX
+ MOVL m_gsignal(BX), BX
+ MOVL BX, g(CX)
+
+ // copy arguments for call to sighandler
+ MOVL sig+0(FP), BX
+ MOVL BX, 0(SP)
+ MOVL info+4(FP), BX
+ MOVL BX, 4(SP)
+ MOVL context+8(FP), BX
+ MOVL BX, 8(SP)
+ MOVL DI, 12(SP)
+
+ CALL runtime·sighandler(SB)
+
+ // restore g
+ get_tls(CX)
+ MOVL 20(SP), BX
+ MOVL BX, g(CX)
+
+ RET
+
+TEXT runtime·sigignore(SB),7,$0
+ RET
+
+TEXT runtime·sigreturn(SB),7,$0
+ MOVL $173, AX // rt_sigreturn
+ INT $0x80
+ INT $3 // not reached
+ RET
+
+TEXT runtime·mmap(SB),7,$0
+ MOVL $192, AX // mmap2
+ MOVL 4(SP), BX
+ MOVL 8(SP), CX
+ MOVL 12(SP), DX
+ MOVL 16(SP), SI
+ MOVL 20(SP), DI
+ MOVL 24(SP), BP
+ SHRL $12, BP
+ INT $0x80
+ CMPL AX, $0xfffff001
+ JLS 3(PC)
+ NOTL AX
+ INCL AX
+ RET
+
+TEXT runtime·munmap(SB),7,$0
+ MOVL $91, AX // munmap
+ MOVL 4(SP), BX
+ MOVL 8(SP), CX
+ INT $0x80
+ CMPL AX, $0xfffff001
+ JLS 2(PC)
+ INT $3
+ RET
+
+// int32 futex(int32 *uaddr, int32 op, int32 val,
+// struct timespec *timeout, int32 *uaddr2, int32 val2);
+TEXT runtime·futex(SB),7,$0
+ MOVL $240, AX // futex
+ MOVL 4(SP), BX
+ MOVL 8(SP), CX
+ MOVL 12(SP), DX
+ MOVL 16(SP), SI
+ MOVL 20(SP), DI
+ MOVL 24(SP), BP
+ INT $0x80
+ RET
+
+// int32 clone(int32 flags, void *stack, M *m, G *g, void (*fn)(void));
+TEXT runtime·clone(SB),7,$0
+ MOVL $120, AX // clone
+ MOVL flags+4(SP), BX
+ MOVL stack+8(SP), CX
+ MOVL $0, DX // parent tid ptr
+ MOVL $0, DI // child tid ptr
+
+ // Copy m, g, fn off parent stack for use by child.
+ SUBL $16, CX
+ MOVL mm+12(SP), SI
+ MOVL SI, 0(CX)
+ MOVL gg+16(SP), SI
+ MOVL SI, 4(CX)
+ MOVL fn+20(SP), SI
+ MOVL SI, 8(CX)
+ MOVL $1234, 12(CX)
+
+ INT $0x80
+
+ // In parent, return.
+ CMPL AX, $0
+ JEQ 2(PC)
+ RET
+
+ // Paranoia: check that SP is as we expect.
+ MOVL 12(SP), BP
+ CMPL BP, $1234
+ JEQ 2(PC)
+ INT $3
+
+ // Initialize AX to Linux tid
+ MOVL $224, AX
+ INT $0x80
+
+ // In child on new stack. Reload registers (paranoia).
+ MOVL 0(SP), BX // m
+ MOVL 4(SP), DX // g
+ MOVL 8(SP), SI // fn
+
+ MOVL AX, m_procid(BX) // save tid as m->procid
+
+ // set up ldt 7+id to point at m->tls.
+ // newosproc left the id in tls[0].
+ LEAL m_tls(BX), BP
+ MOVL 0(BP), DI
+ ADDL $7, DI // m0 is LDT#7. count up.
+ // setldt(tls#, &tls, sizeof tls)
+ PUSHAL // save registers
+ PUSHL $32 // sizeof tls
+ PUSHL BP // &tls
+ PUSHL DI // tls #
+ CALL runtime·setldt(SB)
+ POPL AX
+ POPL AX
+ POPL AX
+ POPAL
+
+ // Now segment is established. Initialize m, g.
+ get_tls(AX)
+ MOVL DX, g(AX)
+ MOVL BX, m(AX)
+
+ CALL runtime·stackcheck(SB) // smashes AX, CX
+ MOVL 0(DX), DX // paranoia; check they are not nil
+ MOVL 0(BX), BX
+
+ // more paranoia; check that stack splitting code works
+ PUSHAL
+ CALL runtime·emptyfunc(SB)
+ POPAL
+
+ CALL SI // fn()
+ CALL runtime·exit1(SB)
+ MOVL $0x1234, 0x1005
+ RET
+
+TEXT runtime·sigaltstack(SB),7,$-8
+ MOVL $186, AX // sigaltstack
+ MOVL new+4(SP), BX
+ MOVL old+8(SP), CX
+ INT $0x80
+ CMPL AX, $0xfffff001
+ JLS 2(PC)
+ INT $3
+ RET
+
+// <asm-i386/ldt.h>
+// struct user_desc {
+// unsigned int entry_number;
+// unsigned long base_addr;
+// unsigned int limit;
+// unsigned int seg_32bit:1;
+// unsigned int contents:2;
+// unsigned int read_exec_only:1;
+// unsigned int limit_in_pages:1;
+// unsigned int seg_not_present:1;
+// unsigned int useable:1;
+// };
+#define SEG_32BIT 0x01
+// contents are the 2 bits 0x02 and 0x04.
+#define CONTENTS_DATA 0x00
+#define CONTENTS_STACK 0x02
+#define CONTENTS_CODE 0x04
+#define READ_EXEC_ONLY 0x08
+#define LIMIT_IN_PAGES 0x10
+#define SEG_NOT_PRESENT 0x20
+#define USEABLE 0x40
+
+// setldt(int entry, int address, int limit)
+TEXT runtime·setldt(SB),7,$32
+ MOVL entry+0(FP), BX // entry
+ MOVL address+4(FP), CX // base address
+
+ /*
+ * When linking against the system libraries,
+ * we use its pthread_create and let it set up %gs
+ * for us. When we do that, the private storage
+ * we get is not at 0(GS), 4(GS), but -8(GS), -4(GS).
+ * To insulate the rest of the tool chain from this
+ * ugliness, 8l rewrites 0(GS) into -8(GS) for us.
+ * To accommodate that rewrite, we translate
+ * the address here and bump the limit to 0xffffffff (no limit)
+ * so that -8(GS) maps to 0(address).
+ * Also, the final 0(GS) (current 8(CX)) has to point
+ * to itself, to mimic ELF.
+ */
+ ADDL $0x8, CX // address
+ MOVL CX, 0(CX)
+
+ // set up user_desc
+ LEAL 16(SP), AX // struct user_desc
+ MOVL BX, 0(AX)
+ MOVL CX, 4(AX)
+ MOVL $0xfffff, 8(AX)
+ MOVL $(SEG_32BIT|LIMIT_IN_PAGES|USEABLE|CONTENTS_DATA), 12(AX) // flag bits
+
+ // call modify_ldt
+ MOVL $1, BX // func = 1 (write)
+ MOVL AX, CX // user_desc
+ MOVL $16, DX // sizeof(user_desc)
+ MOVL $123, AX // syscall - modify_ldt
+ INT $0x80
+
+ // breakpoint on error
+ CMPL AX, $0xfffff001
+ JLS 2(PC)
+ INT $3
+
+ // compute segment selector - (entry*8+7)
+ MOVL entry+0(FP), AX
+ SHLL $3, AX
+ ADDL $7, AX
+ MOVW AX, GS
+
+ RET
+
+TEXT runtime·osyield(SB),7,$0
+ MOVL $158, AX
+ INT $0x80
+ RET
diff --git a/src/pkg/runtime/linux/amd64/defs.h b/src/pkg/runtime/linux/amd64/defs.h
new file mode 100644
index 000000000..8053dd16f
--- /dev/null
+++ b/src/pkg/runtime/linux/amd64/defs.h
@@ -0,0 +1,236 @@
+// godefs -f -m64 defs.c
+
+// MACHINE GENERATED - DO NOT EDIT.
+
+// Constants
+enum {
+ PROT_NONE = 0,
+ PROT_READ = 0x1,
+ PROT_WRITE = 0x2,
+ PROT_EXEC = 0x4,
+ MAP_ANON = 0x20,
+ MAP_PRIVATE = 0x2,
+ MAP_FIXED = 0x10,
+ SA_RESTART = 0x10000000,
+ SA_ONSTACK = 0x8000000,
+ SA_RESTORER = 0x4000000,
+ SA_SIGINFO = 0x4,
+ SIGHUP = 0x1,
+ SIGINT = 0x2,
+ SIGQUIT = 0x3,
+ SIGILL = 0x4,
+ SIGTRAP = 0x5,
+ SIGABRT = 0x6,
+ SIGBUS = 0x7,
+ SIGFPE = 0x8,
+ SIGKILL = 0x9,
+ SIGUSR1 = 0xa,
+ SIGSEGV = 0xb,
+ SIGUSR2 = 0xc,
+ SIGPIPE = 0xd,
+ SIGALRM = 0xe,
+ SIGSTKFLT = 0x10,
+ SIGCHLD = 0x11,
+ SIGCONT = 0x12,
+ SIGSTOP = 0x13,
+ SIGTSTP = 0x14,
+ SIGTTIN = 0x15,
+ SIGTTOU = 0x16,
+ SIGURG = 0x17,
+ SIGXCPU = 0x18,
+ SIGXFSZ = 0x19,
+ SIGVTALRM = 0x1a,
+ SIGPROF = 0x1b,
+ SIGWINCH = 0x1c,
+ SIGIO = 0x1d,
+ SIGPWR = 0x1e,
+ SIGSYS = 0x1f,
+ FPE_INTDIV = 0x1,
+ FPE_INTOVF = 0x2,
+ FPE_FLTDIV = 0x3,
+ FPE_FLTOVF = 0x4,
+ FPE_FLTUND = 0x5,
+ FPE_FLTRES = 0x6,
+ FPE_FLTINV = 0x7,
+ FPE_FLTSUB = 0x8,
+ BUS_ADRALN = 0x1,
+ BUS_ADRERR = 0x2,
+ BUS_OBJERR = 0x3,
+ SEGV_MAPERR = 0x1,
+ SEGV_ACCERR = 0x2,
+ ITIMER_REAL = 0,
+ ITIMER_VIRTUAL = 0x1,
+ ITIMER_PROF = 0x2,
+ O_RDONLY = 0,
+ O_CLOEXEC = 02000000,
+};
+
+// Types
+#pragma pack on
+
+typedef struct Timespec Timespec;
+struct Timespec {
+ int64 tv_sec;
+ int64 tv_nsec;
+};
+
+typedef struct Timeval Timeval;
+struct Timeval {
+ int64 tv_sec;
+ int64 tv_usec;
+};
+
+typedef struct Sigaction Sigaction;
+struct Sigaction {
+ void *sa_handler;
+ uint64 sa_flags;
+ void *sa_restorer;
+ uint64 sa_mask;
+};
+
+typedef struct Siginfo Siginfo;
+struct Siginfo {
+ int32 si_signo;
+ int32 si_errno;
+ int32 si_code;
+ byte pad_godefs_0[4];
+ byte _sifields[112];
+};
+
+typedef struct Itimerval Itimerval;
+struct Itimerval {
+ Timeval it_interval;
+ Timeval it_value;
+};
+#pragma pack off
+// godefs -f -m64 defs1.c
+
+// MACHINE GENERATED - DO NOT EDIT.
+
+// Constants
+
+// Types
+#pragma pack on
+
+typedef struct Usigset Usigset;
+struct Usigset {
+ uint64 __val[16];
+};
+
+typedef struct Fpxreg Fpxreg;
+struct Fpxreg {
+ uint16 significand[4];
+ uint16 exponent;
+ uint16 padding[3];
+};
+
+typedef struct Xmmreg Xmmreg;
+struct Xmmreg {
+ uint32 element[4];
+};
+
+typedef struct Fpstate Fpstate;
+struct Fpstate {
+ uint16 cwd;
+ uint16 swd;
+ uint16 ftw;
+ uint16 fop;
+ uint64 rip;
+ uint64 rdp;
+ uint32 mxcsr;
+ uint32 mxcr_mask;
+ Fpxreg _st[8];
+ Xmmreg _xmm[16];
+ uint32 padding[24];
+};
+
+typedef struct Fpxreg1 Fpxreg1;
+struct Fpxreg1 {
+ uint16 significand[4];
+ uint16 exponent;
+ uint16 padding[3];
+};
+
+typedef struct Xmmreg1 Xmmreg1;
+struct Xmmreg1 {
+ uint32 element[4];
+};
+
+typedef struct Fpstate1 Fpstate1;
+struct Fpstate1 {
+ uint16 cwd;
+ uint16 swd;
+ uint16 ftw;
+ uint16 fop;
+ uint64 rip;
+ uint64 rdp;
+ uint32 mxcsr;
+ uint32 mxcr_mask;
+ Fpxreg1 _st[8];
+ Xmmreg1 _xmm[16];
+ uint32 padding[24];
+};
+
+typedef struct Fpreg1 Fpreg1;
+struct Fpreg1 {
+ uint16 significand[4];
+ uint16 exponent;
+};
+
+typedef struct Sigaltstack Sigaltstack;
+struct Sigaltstack {
+ void *ss_sp;
+ int32 ss_flags;
+ byte pad_godefs_0[4];
+ uint64 ss_size;
+};
+
+typedef struct Mcontext Mcontext;
+struct Mcontext {
+ int64 gregs[23];
+ Fpstate *fpregs;
+ uint64 __reserved1[8];
+};
+
+typedef struct Ucontext Ucontext;
+struct Ucontext {
+ uint64 uc_flags;
+ Ucontext *uc_link;
+ Sigaltstack uc_stack;
+ Mcontext uc_mcontext;
+ Usigset uc_sigmask;
+ Fpstate __fpregs_mem;
+};
+
+typedef struct Sigcontext Sigcontext;
+struct Sigcontext {
+ uint64 r8;
+ uint64 r9;
+ uint64 r10;
+ uint64 r11;
+ uint64 r12;
+ uint64 r13;
+ uint64 r14;
+ uint64 r15;
+ uint64 rdi;
+ uint64 rsi;
+ uint64 rbp;
+ uint64 rbx;
+ uint64 rdx;
+ uint64 rax;
+ uint64 rcx;
+ uint64 rsp;
+ uint64 rip;
+ uint64 eflags;
+ uint16 cs;
+ uint16 gs;
+ uint16 fs;
+ uint16 __pad0;
+ uint64 err;
+ uint64 trapno;
+ uint64 oldmask;
+ uint64 cr2;
+ Fpstate1 *fpstate;
+ uint64 __reserved1[8];
+};
+#pragma pack off
diff --git a/src/pkg/runtime/linux/amd64/rt0.s b/src/pkg/runtime/linux/amd64/rt0.s
new file mode 100644
index 000000000..dac9ae181
--- /dev/null
+++ b/src/pkg/runtime/linux/amd64/rt0.s
@@ -0,0 +1,10 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Darwin and Linux use the same linkage to main
+
+TEXT _rt0_amd64_linux(SB),7,$-8
+ MOVQ $_rt0_amd64(SB), AX
+ MOVQ SP, DI
+ JMP AX
diff --git a/src/pkg/runtime/linux/amd64/signal.c b/src/pkg/runtime/linux/amd64/signal.c
new file mode 100644
index 000000000..ee90271ed
--- /dev/null
+++ b/src/pkg/runtime/linux/amd64/signal.c
@@ -0,0 +1,194 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "runtime.h"
+#include "defs.h"
+#include "signals.h"
+#include "os.h"
+
+void
+runtime·dumpregs(Sigcontext *r)
+{
+ runtime·printf("rax %X\n", r->rax);
+ runtime·printf("rbx %X\n", r->rbx);
+ runtime·printf("rcx %X\n", r->rcx);
+ runtime·printf("rdx %X\n", r->rdx);
+ runtime·printf("rdi %X\n", r->rdi);
+ runtime·printf("rsi %X\n", r->rsi);
+ runtime·printf("rbp %X\n", r->rbp);
+ runtime·printf("rsp %X\n", r->rsp);
+ runtime·printf("r8 %X\n", r->r8 );
+ runtime·printf("r9 %X\n", r->r9 );
+ runtime·printf("r10 %X\n", r->r10);
+ runtime·printf("r11 %X\n", r->r11);
+ runtime·printf("r12 %X\n", r->r12);
+ runtime·printf("r13 %X\n", r->r13);
+ runtime·printf("r14 %X\n", r->r14);
+ runtime·printf("r15 %X\n", r->r15);
+ runtime·printf("rip %X\n", r->rip);
+ runtime·printf("rflags %X\n", r->eflags);
+ runtime·printf("cs %X\n", (uint64)r->cs);
+ runtime·printf("fs %X\n", (uint64)r->fs);
+ runtime·printf("gs %X\n", (uint64)r->gs);
+}
+
+/*
+ * This assembler routine takes the args from registers, puts them on the stack,
+ * and calls sighandler().
+ */
+extern void runtime·sigtramp(void);
+extern void runtime·sigignore(void); // just returns
+extern void runtime·sigreturn(void); // calls runtime·sigreturn
+
+String
+runtime·signame(int32 sig)
+{
+ if(sig < 0 || sig >= NSIG)
+ return runtime·emptystring;
+ return runtime·gostringnocopy((byte*)runtime·sigtab[sig].name);
+}
+
+void
+runtime·sighandler(int32 sig, Siginfo *info, void *context, G *gp)
+{
+ Ucontext *uc;
+ Mcontext *mc;
+ Sigcontext *r;
+ uintptr *sp;
+
+ uc = context;
+ mc = &uc->uc_mcontext;
+ r = (Sigcontext*)mc; // same layout, more conveient names
+
+ if(sig == SIGPROF) {
+ runtime·sigprof((uint8*)r->rip, (uint8*)r->rsp, nil, gp);
+ return;
+ }
+
+ if(gp != nil && (runtime·sigtab[sig].flags & SigPanic)) {
+ // Make it look like a call to the signal func.
+ // Have to pass arguments out of band since
+ // augmenting the stack frame would break
+ // the unwinding code.
+ gp->sig = sig;
+ gp->sigcode0 = info->si_code;
+ gp->sigcode1 = ((uintptr*)info)[2];
+ gp->sigpc = r->rip;
+
+ // Only push runtime·sigpanic if r->rip != 0.
+ // If r->rip == 0, probably panicked because of a
+ // call to a nil func. Not pushing that onto sp will
+ // make the trace look like a call to runtime·sigpanic instead.
+ // (Otherwise the trace will end at runtime·sigpanic and we
+ // won't get to see who faulted.)
+ if(r->rip != 0) {
+ sp = (uintptr*)r->rsp;
+ *--sp = r->rip;
+ r->rsp = (uintptr)sp;
+ }
+ r->rip = (uintptr)runtime·sigpanic;
+ return;
+ }
+
+ if(runtime·sigtab[sig].flags & SigQueue) {
+ if(runtime·sigsend(sig) || (runtime·sigtab[sig].flags & SigIgnore))
+ return;
+ runtime·exit(2); // SIGINT, SIGTERM, etc
+ }
+
+ if(runtime·panicking) // traceback already printed
+ runtime·exit(2);
+ runtime·panicking = 1;
+
+ if(sig < 0 || sig >= NSIG)
+ runtime·printf("Signal %d\n", sig);
+ else
+ runtime·printf("%s\n", runtime·sigtab[sig].name);
+
+ runtime·printf("PC=%X\n", r->rip);
+ runtime·printf("\n");
+
+ if(runtime·gotraceback()){
+ runtime·traceback((void*)r->rip, (void*)r->rsp, 0, gp);
+ runtime·tracebackothers(gp);
+ runtime·dumpregs(r);
+ }
+
+ runtime·exit(2);
+}
+
+void
+runtime·signalstack(byte *p, int32 n)
+{
+ Sigaltstack st;
+
+ st.ss_sp = p;
+ st.ss_size = n;
+ st.ss_flags = 0;
+ runtime·sigaltstack(&st, nil);
+}
+
+static void
+sigaction(int32 i, void (*fn)(int32, Siginfo*, void*, G*), bool restart)
+{
+ Sigaction sa;
+
+ runtime·memclr((byte*)&sa, sizeof sa);
+ sa.sa_flags = SA_ONSTACK | SA_SIGINFO | SA_RESTORER;
+ if(restart)
+ sa.sa_flags |= SA_RESTART;
+ sa.sa_mask = ~0ULL;
+ sa.sa_restorer = (void*)runtime·sigreturn;
+ if(fn == runtime·sighandler)
+ fn = (void*)runtime·sigtramp;
+ sa.sa_handler = fn;
+ runtime·rt_sigaction(i, &sa, nil, 8);
+}
+
+void
+runtime·initsig(int32 queue)
+{
+ int32 i;
+ void *fn;
+
+ runtime·siginit();
+
+ for(i = 0; i<NSIG; i++) {
+ if(runtime·sigtab[i].flags) {
+ if((runtime·sigtab[i].flags & SigQueue) != queue)
+ continue;
+ if(runtime·sigtab[i].flags & (SigCatch | SigQueue))
+ fn = runtime·sighandler;
+ else
+ fn = runtime·sigignore;
+ sigaction(i, fn, (runtime·sigtab[i].flags & SigRestart) != 0);
+ }
+ }
+}
+
+void
+runtime·resetcpuprofiler(int32 hz)
+{
+ Itimerval it;
+
+ runtime·memclr((byte*)&it, sizeof it);
+ if(hz == 0) {
+ runtime·setitimer(ITIMER_PROF, &it, nil);
+ sigaction(SIGPROF, SIG_IGN, true);
+ } else {
+ sigaction(SIGPROF, runtime·sighandler, true);
+ it.it_interval.tv_sec = 0;
+ it.it_interval.tv_usec = 1000000 / hz;
+ it.it_value = it.it_interval;
+ runtime·setitimer(ITIMER_PROF, &it, nil);
+ }
+ m->profilehz = hz;
+}
+
+void
+os·sigpipe(void)
+{
+ sigaction(SIGPIPE, SIG_DFL, false);
+ runtime·raisesigpipe();
+}
diff --git a/src/pkg/runtime/linux/amd64/sys.s b/src/pkg/runtime/linux/amd64/sys.s
new file mode 100644
index 000000000..8b4dcd921
--- /dev/null
+++ b/src/pkg/runtime/linux/amd64/sys.s
@@ -0,0 +1,252 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//
+// System calls and other sys.stuff for AMD64, Linux
+//
+
+#include "amd64/asm.h"
+
+TEXT runtime·exit(SB),7,$0-8
+ MOVL 8(SP), DI
+ MOVL $231, AX // exitgroup - force all os threads to exit
+ SYSCALL
+ RET
+
+TEXT runtime·exit1(SB),7,$0-8
+ MOVL 8(SP), DI
+ MOVL $60, AX // exit - exit the current os thread
+ SYSCALL
+ RET
+
+TEXT runtime·open(SB),7,$0-16
+ MOVQ 8(SP), DI
+ MOVL 16(SP), SI
+ MOVL 20(SP), DX
+ MOVL $2, AX // syscall entry
+ SYSCALL
+ RET
+
+TEXT runtime·close(SB),7,$0-16
+ MOVL 8(SP), DI
+ MOVL $3, AX // syscall entry
+ SYSCALL
+ RET
+
+TEXT runtime·write(SB),7,$0-24
+ MOVL 8(SP), DI
+ MOVQ 16(SP), SI
+ MOVL 24(SP), DX
+ MOVL $1, AX // syscall entry
+ SYSCALL
+ RET
+
+TEXT runtime·read(SB),7,$0-24
+ MOVL 8(SP), DI
+ MOVQ 16(SP), SI
+ MOVL 24(SP), DX
+ MOVL $0, AX // syscall entry
+ SYSCALL
+ RET
+
+TEXT runtime·raisesigpipe(SB),7,$12
+ MOVL $186, AX // syscall - gettid
+ SYSCALL
+ MOVL AX, DI // arg 1 tid
+ MOVL $13, SI // arg 2 SIGPIPE
+ MOVL $200, AX // syscall - tkill
+ SYSCALL
+ RET
+
+TEXT runtime·setitimer(SB),7,$0-24
+ MOVL 8(SP), DI
+ MOVQ 16(SP), SI
+ MOVQ 24(SP), DX
+ MOVL $38, AX // syscall entry
+ SYSCALL
+ RET
+
+TEXT runtime·mincore(SB),7,$0-24
+ MOVQ 8(SP), DI
+ MOVQ 16(SP), SI
+ MOVQ 24(SP), DX
+ MOVL $27, AX // syscall entry
+ SYSCALL
+ RET
+
+TEXT runtime·gettime(SB), 7, $32
+ LEAQ 8(SP), DI
+ MOVQ $0, SI
+ MOVQ $0xffffffffff600000, AX
+ CALL AX
+
+ MOVQ 8(SP), BX // sec
+ MOVQ sec+0(FP), DI
+ MOVQ BX, (DI)
+
+ MOVL 16(SP), BX // usec
+ MOVQ usec+8(FP), DI
+ MOVL BX, (DI)
+ RET
+
+TEXT runtime·rt_sigaction(SB),7,$0-32
+ MOVL 8(SP), DI
+ MOVQ 16(SP), SI
+ MOVQ 24(SP), DX
+ MOVQ 32(SP), R10
+ MOVL $13, AX // syscall entry
+ SYSCALL
+ RET
+
+TEXT runtime·sigtramp(SB),7,$64
+ get_tls(BX)
+
+ // save g
+ MOVQ g(BX), R10
+ MOVQ R10, 40(SP)
+
+ // g = m->gsignal
+ MOVQ m(BX), BP
+ MOVQ m_gsignal(BP), BP
+ MOVQ BP, g(BX)
+
+ MOVQ DI, 0(SP)
+ MOVQ SI, 8(SP)
+ MOVQ DX, 16(SP)
+ MOVQ R10, 24(SP)
+
+ CALL runtime·sighandler(SB)
+
+ // restore g
+ get_tls(BX)
+ MOVQ 40(SP), R10
+ MOVQ R10, g(BX)
+ RET
+
+TEXT runtime·sigignore(SB),7,$0
+ RET
+
+TEXT runtime·sigreturn(SB),7,$0
+ MOVL $15, AX // rt_sigreturn
+ SYSCALL
+ INT $3 // not reached
+
+TEXT runtime·mmap(SB),7,$0
+ MOVQ 8(SP), DI
+ MOVQ $0, SI
+ MOVQ 16(SP), SI
+ MOVL 24(SP), DX
+ MOVL 28(SP), R10
+ MOVL 32(SP), R8
+ MOVL 36(SP), R9
+
+ MOVL $9, AX // mmap
+ SYSCALL
+ CMPQ AX, $0xfffffffffffff001
+ JLS 3(PC)
+ NOTQ AX
+ INCQ AX
+ RET
+
+TEXT runtime·munmap(SB),7,$0
+ MOVQ 8(SP), DI
+ MOVQ 16(SP), SI
+ MOVQ $11, AX // munmap
+ SYSCALL
+ CMPQ AX, $0xfffffffffffff001
+ JLS 2(PC)
+ CALL runtime·notok(SB)
+ RET
+
+TEXT runtime·notok(SB),7,$0
+ MOVQ $0xf1, BP
+ MOVQ BP, (BP)
+ RET
+
+// int64 futex(int32 *uaddr, int32 op, int32 val,
+// struct timespec *timeout, int32 *uaddr2, int32 val2);
+TEXT runtime·futex(SB),7,$0
+ MOVQ 8(SP), DI
+ MOVL 16(SP), SI
+ MOVL 20(SP), DX
+ MOVQ 24(SP), R10
+ MOVQ 32(SP), R8
+ MOVL 40(SP), R9
+ MOVL $202, AX
+ SYSCALL
+ RET
+
+// int64 clone(int32 flags, void *stack, M *m, G *g, void (*fn)(void));
+TEXT runtime·clone(SB),7,$0
+ MOVL flags+8(SP), DI
+ MOVQ stack+16(SP), SI
+
+ // Copy m, g, fn off parent stack for use by child.
+ // Careful: Linux system call clobbers CX and R11.
+ MOVQ mm+24(SP), R8
+ MOVQ gg+32(SP), R9
+ MOVQ fn+40(SP), R12
+
+ MOVL $56, AX
+ SYSCALL
+
+ // In parent, return.
+ CMPQ AX, $0
+ JEQ 2(PC)
+ RET
+
+ // In child, on new stack.
+ MOVQ SI, SP
+
+ // Initialize m->procid to Linux tid
+ MOVL $186, AX // gettid
+ SYSCALL
+ MOVQ AX, m_procid(R8)
+
+ // Set FS to point at m->tls.
+ LEAQ m_tls(R8), DI
+ CALL runtime·settls(SB)
+
+ // In child, set up new stack
+ get_tls(CX)
+ MOVQ R8, m(CX)
+ MOVQ R9, g(CX)
+ CALL runtime·stackcheck(SB)
+
+ // Call fn
+ CALL R12
+
+ // It shouldn't return. If it does, exit
+ MOVL $111, DI
+ MOVL $60, AX
+ SYSCALL
+ JMP -3(PC) // keep exiting
+
+TEXT runtime·sigaltstack(SB),7,$-8
+ MOVQ new+8(SP), DI
+ MOVQ old+16(SP), SI
+ MOVQ $131, AX
+ SYSCALL
+ CMPQ AX, $0xfffffffffffff001
+ JLS 2(PC)
+ CALL runtime·notok(SB)
+ RET
+
+// set tls base to DI
+TEXT runtime·settls(SB),7,$32
+ ADDQ $16, DI // ELF wants to use -16(FS), -8(FS)
+
+ MOVQ DI, SI
+ MOVQ $0x1002, DI // ARCH_SET_FS
+ MOVQ $158, AX // arch_prctl
+ SYSCALL
+ CMPQ AX, $0xfffffffffffff001
+ JLS 2(PC)
+ CALL runtime·notok(SB)
+ RET
+
+TEXT runtime·osyield(SB),7,$0
+ MOVL $24, AX
+ SYSCALL
+ RET
diff --git a/src/pkg/runtime/linux/arm/defs.h b/src/pkg/runtime/linux/arm/defs.h
new file mode 100644
index 000000000..09b558ed0
--- /dev/null
+++ b/src/pkg/runtime/linux/arm/defs.h
@@ -0,0 +1,149 @@
+// godefs -f-I/usr/src/linux-headers-2.6.26-2-versatile/include defs_arm.c
+
+// MACHINE GENERATED - DO NOT EDIT.
+
+// Constants
+enum {
+ PROT_NONE = 0,
+ PROT_READ = 0x1,
+ PROT_WRITE = 0x2,
+ PROT_EXEC = 0x4,
+ MAP_ANON = 0x20,
+ MAP_PRIVATE = 0x2,
+ MAP_FIXED = 0x10,
+ SA_RESTART = 0x10000000,
+ SA_ONSTACK = 0x8000000,
+ SA_RESTORER = 0x4000000,
+ SA_SIGINFO = 0x4,
+ SIGHUP = 0x1,
+ SIGINT = 0x2,
+ SIGQUIT = 0x3,
+ SIGILL = 0x4,
+ SIGTRAP = 0x5,
+ SIGABRT = 0x6,
+ SIGBUS = 0x7,
+ SIGFPE = 0x8,
+ SIGKILL = 0x9,
+ SIGUSR1 = 0xa,
+ SIGSEGV = 0xb,
+ SIGUSR2 = 0xc,
+ SIGPIPE = 0xd,
+ SIGALRM = 0xe,
+ SIGSTKFLT = 0x10,
+ SIGCHLD = 0x11,
+ SIGCONT = 0x12,
+ SIGSTOP = 0x13,
+ SIGTSTP = 0x14,
+ SIGTTIN = 0x15,
+ SIGTTOU = 0x16,
+ SIGURG = 0x17,
+ SIGXCPU = 0x18,
+ SIGXFSZ = 0x19,
+ SIGVTALRM = 0x1a,
+ SIGPROF = 0x1b,
+ SIGWINCH = 0x1c,
+ SIGIO = 0x1d,
+ SIGPWR = 0x1e,
+ SIGSYS = 0x1f,
+ FPE_INTDIV = 0x1,
+ FPE_INTOVF = 0x2,
+ FPE_FLTDIV = 0x3,
+ FPE_FLTOVF = 0x4,
+ FPE_FLTUND = 0x5,
+ FPE_FLTRES = 0x6,
+ FPE_FLTINV = 0x7,
+ FPE_FLTSUB = 0x8,
+ BUS_ADRALN = 0x1,
+ BUS_ADRERR = 0x2,
+ BUS_OBJERR = 0x3,
+ SEGV_MAPERR = 0x1,
+ SEGV_ACCERR = 0x2,
+ ITIMER_REAL = 0,
+ ITIMER_PROF = 0x2,
+ ITIMER_VIRTUAL = 0x1,
+ O_RDONLY = 0,
+ O_CLOEXEC = 02000000,
+};
+
+// Types
+#pragma pack on
+
+typedef uint32 Sigset;
+
+typedef struct Timespec Timespec;
+struct Timespec {
+ int32 tv_sec;
+ int32 tv_nsec;
+};
+
+typedef struct Sigaltstack Sigaltstack;
+struct Sigaltstack {
+ void *ss_sp;
+ int32 ss_flags;
+ uint32 ss_size;
+};
+
+typedef struct Sigcontext Sigcontext;
+struct Sigcontext {
+ uint32 trap_no;
+ uint32 error_code;
+ uint32 oldmask;
+ uint32 arm_r0;
+ uint32 arm_r1;
+ uint32 arm_r2;
+ uint32 arm_r3;
+ uint32 arm_r4;
+ uint32 arm_r5;
+ uint32 arm_r6;
+ uint32 arm_r7;
+ uint32 arm_r8;
+ uint32 arm_r9;
+ uint32 arm_r10;
+ uint32 arm_fp;
+ uint32 arm_ip;
+ uint32 arm_sp;
+ uint32 arm_lr;
+ uint32 arm_pc;
+ uint32 arm_cpsr;
+ uint32 fault_address;
+};
+
+typedef struct Ucontext Ucontext;
+struct Ucontext {
+ uint32 uc_flags;
+ Ucontext *uc_link;
+ Sigaltstack uc_stack;
+ Sigcontext uc_mcontext;
+ uint32 uc_sigmask;
+ int32 __unused[31];
+ uint32 uc_regspace[128];
+};
+
+typedef struct Timeval Timeval;
+struct Timeval {
+ int32 tv_sec;
+ int32 tv_usec;
+};
+
+typedef struct Itimerval Itimerval;
+struct Itimerval {
+ Timeval it_interval;
+ Timeval it_value;
+};
+
+typedef struct Siginfo Siginfo;
+struct Siginfo {
+ int32 si_signo;
+ int32 si_errno;
+ int32 si_code;
+ uint8 _sifields[4];
+};
+
+typedef struct Sigaction Sigaction;
+struct Sigaction {
+ void *sa_handler;
+ uint32 sa_flags;
+ void *sa_restorer;
+ uint32 sa_mask;
+};
+#pragma pack off
diff --git a/src/pkg/runtime/linux/arm/rt0.s b/src/pkg/runtime/linux/arm/rt0.s
new file mode 100644
index 000000000..8838b4891
--- /dev/null
+++ b/src/pkg/runtime/linux/arm/rt0.s
@@ -0,0 +1,6 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+TEXT _rt0_arm_linux(SB),7,$0
+ B _rt0_arm(SB)
diff --git a/src/pkg/runtime/linux/arm/signal.c b/src/pkg/runtime/linux/arm/signal.c
new file mode 100644
index 000000000..88a84d112
--- /dev/null
+++ b/src/pkg/runtime/linux/arm/signal.c
@@ -0,0 +1,189 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "runtime.h"
+#include "defs.h"
+#include "signals.h"
+#include "os.h"
+
+void
+runtime·dumpregs(Sigcontext *r)
+{
+ runtime·printf("trap %x\n", r->trap_no);
+ runtime·printf("error %x\n", r->error_code);
+ runtime·printf("oldmask %x\n", r->oldmask);
+ runtime·printf("r0 %x\n", r->arm_r0);
+ runtime·printf("r1 %x\n", r->arm_r1);
+ runtime·printf("r2 %x\n", r->arm_r2);
+ runtime·printf("r3 %x\n", r->arm_r3);
+ runtime·printf("r4 %x\n", r->arm_r4);
+ runtime·printf("r5 %x\n", r->arm_r5);
+ runtime·printf("r6 %x\n", r->arm_r6);
+ runtime·printf("r7 %x\n", r->arm_r7);
+ runtime·printf("r8 %x\n", r->arm_r8);
+ runtime·printf("r9 %x\n", r->arm_r9);
+ runtime·printf("r10 %x\n", r->arm_r10);
+ runtime·printf("fp %x\n", r->arm_fp);
+ runtime·printf("ip %x\n", r->arm_ip);
+ runtime·printf("sp %x\n", r->arm_sp);
+ runtime·printf("lr %x\n", r->arm_lr);
+ runtime·printf("pc %x\n", r->arm_pc);
+ runtime·printf("cpsr %x\n", r->arm_cpsr);
+ runtime·printf("fault %x\n", r->fault_address);
+}
+
+/*
+ * This assembler routine takes the args from registers, puts them on the stack,
+ * and calls sighandler().
+ */
+extern void runtime·sigtramp(void);
+extern void runtime·sigignore(void); // just returns
+extern void runtime·sigreturn(void); // calls runtime·sigreturn
+
+String
+runtime·signame(int32 sig)
+{
+ if(sig < 0 || sig >= NSIG)
+ return runtime·emptystring;
+ return runtime·gostringnocopy((byte*)runtime·sigtab[sig].name);
+}
+
+void
+runtime·sighandler(int32 sig, Siginfo *info, void *context, G *gp)
+{
+ Ucontext *uc;
+ Sigcontext *r;
+
+ uc = context;
+ r = &uc->uc_mcontext;
+
+ if(sig == SIGPROF) {
+ runtime·sigprof((uint8*)r->arm_pc, (uint8*)r->arm_sp, (uint8*)r->arm_lr, gp);
+ return;
+ }
+
+ if(gp != nil && (runtime·sigtab[sig].flags & SigPanic)) {
+ // Make it look like a call to the signal func.
+ // Have to pass arguments out of band since
+ // augmenting the stack frame would break
+ // the unwinding code.
+ gp->sig = sig;
+ gp->sigcode0 = info->si_code;
+ gp->sigcode1 = r->fault_address;
+ gp->sigpc = r->arm_pc;
+
+ // If this is a leaf function, we do smash LR,
+ // but we're not going back there anyway.
+ // Don't bother smashing if r->arm_pc is 0,
+ // which is probably a call to a nil func: the
+ // old link register is more useful in the stack trace.
+ if(r->arm_pc != 0)
+ r->arm_lr = r->arm_pc;
+ r->arm_pc = (uintptr)runtime·sigpanic;
+ return;
+ }
+
+ if(runtime·sigtab[sig].flags & SigQueue) {
+ if(runtime·sigsend(sig) || (runtime·sigtab[sig].flags & SigIgnore))
+ return;
+ runtime·exit(2); // SIGINT, SIGTERM, etc
+ }
+
+ if(runtime·panicking) // traceback already printed
+ runtime·exit(2);
+ runtime·panicking = 1;
+
+ if(sig < 0 || sig >= NSIG)
+ runtime·printf("Signal %d\n", sig);
+ else
+ runtime·printf("%s\n", runtime·sigtab[sig].name);
+
+ runtime·printf("PC=%x\n", r->arm_pc);
+ runtime·printf("\n");
+
+ if(runtime·gotraceback()){
+ runtime·traceback((void*)r->arm_pc, (void*)r->arm_sp, (void*)r->arm_lr, gp);
+ runtime·tracebackothers(gp);
+ runtime·printf("\n");
+ runtime·dumpregs(r);
+ }
+
+// breakpoint();
+ runtime·exit(2);
+}
+
+void
+runtime·signalstack(byte *p, int32 n)
+{
+ Sigaltstack st;
+
+ st.ss_sp = p;
+ st.ss_size = n;
+ st.ss_flags = 0;
+ runtime·sigaltstack(&st, nil);
+}
+
+static void
+sigaction(int32 i, void (*fn)(int32, Siginfo*, void*, G*), bool restart)
+{
+ Sigaction sa;
+
+ runtime·memclr((byte*)&sa, sizeof sa);
+ sa.sa_flags = SA_ONSTACK | SA_SIGINFO | SA_RESTORER;
+ if(restart)
+ sa.sa_flags |= SA_RESTART;
+ sa.sa_mask = ~0ULL;
+ sa.sa_restorer = (void*)runtime·sigreturn;
+ if(fn == runtime·sighandler)
+ fn = (void*)runtime·sigtramp;
+ sa.sa_handler = fn;
+ runtime·rt_sigaction(i, &sa, nil, 8);
+}
+
+void
+runtime·initsig(int32 queue)
+{
+ int32 i;
+ void *fn;
+
+ runtime·siginit();
+
+ for(i = 0; i<NSIG; i++) {
+ if(runtime·sigtab[i].flags) {
+ if((runtime·sigtab[i].flags & SigQueue) != queue)
+ continue;
+ if(runtime·sigtab[i].flags & (SigCatch | SigQueue))
+ fn = runtime·sighandler;
+ else
+ fn = runtime·sigignore;
+ sigaction(i, fn, (runtime·sigtab[i].flags & SigRestart) != 0);
+ }
+ }
+}
+
+void
+runtime·resetcpuprofiler(int32 hz)
+{
+ Itimerval it;
+
+ runtime·memclr((byte*)&it, sizeof it);
+ if(hz == 0) {
+ runtime·setitimer(ITIMER_PROF, &it, nil);
+ sigaction(SIGPROF, SIG_IGN, true);
+ } else {
+ sigaction(SIGPROF, runtime·sighandler, true);
+ it.it_interval.tv_sec = 0;
+ it.it_interval.tv_usec = 1000000 / hz;
+ it.it_value = it.it_interval;
+ runtime·setitimer(ITIMER_PROF, &it, nil);
+ }
+ m->profilehz = hz;
+}
+
+void
+os·sigpipe(void)
+{
+ sigaction(SIGPIPE, SIG_DFL, false);
+ runtime·raisesigpipe();
+}
diff --git a/src/pkg/runtime/linux/arm/sys.s b/src/pkg/runtime/linux/arm/sys.s
new file mode 100644
index 000000000..8619f0945
--- /dev/null
+++ b/src/pkg/runtime/linux/arm/sys.s
@@ -0,0 +1,319 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//
+// System calls and other sys.stuff for arm, Linux
+//
+
+#include "arm/asm.h"
+
+// OABI
+//#define SYS_BASE 0x00900000
+
+// EABI
+#define SYS_BASE 0x0
+
+#define SYS_exit (SYS_BASE + 1)
+#define SYS_read (SYS_BASE + 3)
+#define SYS_write (SYS_BASE + 4)
+#define SYS_open (SYS_BASE + 5)
+#define SYS_close (SYS_BASE + 6)
+#define SYS_gettimeofday (SYS_BASE + 78)
+#define SYS_clone (SYS_BASE + 120)
+#define SYS_rt_sigreturn (SYS_BASE + 173)
+#define SYS_rt_sigaction (SYS_BASE + 174)
+#define SYS_sigaltstack (SYS_BASE + 186)
+#define SYS_mmap2 (SYS_BASE + 192)
+#define SYS_futex (SYS_BASE + 240)
+#define SYS_exit_group (SYS_BASE + 248)
+#define SYS_munmap (SYS_BASE + 91)
+#define SYS_setitimer (SYS_BASE + 104)
+#define SYS_mincore (SYS_BASE + 219)
+#define SYS_gettid (SYS_BASE + 224)
+#define SYS_tkill (SYS_BASE + 238)
+#define SYS_sched_yield (SYS_BASE + 158)
+
+#define ARM_BASE (SYS_BASE + 0x0f0000)
+#define SYS_ARM_cacheflush (ARM_BASE + 2)
+
+TEXT runtime·open(SB),7,$0
+ MOVW 0(FP), R0
+ MOVW 4(FP), R1
+ MOVW 8(FP), R2
+ MOVW $SYS_open, R7
+ SWI $0
+ RET
+
+TEXT runtime·close(SB),7,$0
+ MOVW 0(FP), R0
+ MOVW $SYS_close, R7
+ SWI $0
+ RET
+
+TEXT runtime·write(SB),7,$0
+ MOVW 0(FP), R0
+ MOVW 4(FP), R1
+ MOVW 8(FP), R2
+ MOVW $SYS_write, R7
+ SWI $0
+ RET
+
+TEXT runtime·read(SB),7,$0
+ MOVW 0(FP), R0
+ MOVW 4(FP), R1
+ MOVW 8(FP), R2
+ MOVW $SYS_read, R7
+ SWI $0
+ RET
+
+TEXT runtime·exit(SB),7,$-4
+ MOVW 0(FP), R0
+ MOVW $SYS_exit_group, R7
+ SWI $0
+ MOVW $1234, R0
+ MOVW $1002, R1
+ MOVW R0, (R1) // fail hard
+
+TEXT runtime·exit1(SB),7,$-4
+ MOVW 0(FP), R0
+ MOVW $SYS_exit, R7
+ SWI $0
+ MOVW $1234, R0
+ MOVW $1003, R1
+ MOVW R0, (R1) // fail hard
+
+TEXT runtime·raisesigpipe(SB),7,$-4
+ MOVW $SYS_gettid, R7
+ SWI $0
+ // arg 1 tid already in R0 from gettid
+ MOVW $13, R1 // arg 2 SIGPIPE
+ MOVW $SYS_tkill, R7
+ SWI $0
+ RET
+
+TEXT runtime·mmap(SB),7,$0
+ MOVW 0(FP), R0
+ MOVW 4(FP), R1
+ MOVW 8(FP), R2
+ MOVW 12(FP), R3
+ MOVW 16(FP), R4
+ MOVW 20(FP), R5
+ MOVW $SYS_mmap2, R7
+ SWI $0
+ RET
+
+TEXT runtime·munmap(SB),7,$0
+ MOVW 0(FP), R0
+ MOVW 4(FP), R1
+ MOVW $SYS_munmap, R7
+ SWI $0
+ RET
+
+TEXT runtime·setitimer(SB),7,$0
+ MOVW 0(FP), R0
+ MOVW 4(FP), R1
+ MOVW 8(FP), R2
+ MOVW $SYS_setitimer, R7
+ SWI $0
+ RET
+
+TEXT runtime·mincore(SB),7,$0
+ MOVW 0(FP), R0
+ MOVW 4(FP), R1
+ MOVW 8(FP), R2
+ MOVW $SYS_mincore, R7
+ SWI $0
+ RET
+
+TEXT runtime·gettime(SB),7,$32
+ /* dummy version - return 0,0 */
+ MOVW $0, R1
+ MOVW 0(FP), R0
+ MOVW R1, 0(R0)
+ MOVW R1, 4(R0)
+ MOVW 4(FP), R0
+ MOVW R1, 0(R0)
+
+/*
+ attempt at real version - seg faults
+
+ MOVW $8(SP), R0
+ MOVW $0, R1
+ MOVW $SYS_gettimeofday, R7
+ SWI $0
+
+ MOVW 0(FP), R0 // sec
+ MOVW 8(SP), R1
+ MOVW R1, 0(R0)
+
+ MOVW 4(FP), R0 // usec
+ MOVW 12(SP), R1
+ MOVW R1, 0(R0)
+*/
+ RET
+
+// int32 futex(int32 *uaddr, int32 op, int32 val,
+// struct timespec *timeout, int32 *uaddr2, int32 val2);
+TEXT runtime·futex(SB),7,$0
+ MOVW 4(SP), R0
+ MOVW 8(SP), R1
+ MOVW 12(SP), R2
+ MOVW 16(SP), R3
+ MOVW 20(SP), R4
+ MOVW 24(SP), R5
+ MOVW $SYS_futex, R7
+ SWI $0
+ RET
+
+
+// int32 clone(int32 flags, void *stack, M *m, G *g, void (*fn)(void));
+TEXT runtime·clone(SB),7,$0
+ MOVW flags+0(FP), R0
+ MOVW stack+4(FP), R1
+ MOVW $0, R2 // parent tid ptr
+ MOVW $0, R3 // tls_val
+ MOVW $0, R4 // child tid ptr
+ MOVW $0, R5
+
+ // Copy m, g, fn off parent stack for use by child.
+ // TODO(kaib): figure out which registers are clobbered by clone and avoid stack copying
+ MOVW $-16(R1), R1
+ MOVW mm+8(FP), R6
+ MOVW R6, 0(R1)
+ MOVW gg+12(FP), R6
+ MOVW R6, 4(R1)
+ MOVW fn+16(FP), R6
+ MOVW R6, 8(R1)
+ MOVW $1234, R6
+ MOVW R6, 12(R1)
+
+ MOVW $SYS_clone, R7
+ SWI $0
+
+ // In parent, return.
+ CMP $0, R0
+ BEQ 2(PC)
+ RET
+
+ // Paranoia: check that SP is as we expect. Use R13 to avoid linker 'fixup'
+ MOVW 12(R13), R0
+ MOVW $1234, R1
+ CMP R0, R1
+ BEQ 2(PC)
+ BL runtime·abort(SB)
+
+ MOVW 0(R13), m
+ MOVW 4(R13), g
+
+ // paranoia; check they are not nil
+ MOVW 0(m), R0
+ MOVW 0(g), R0
+
+ BL runtime·emptyfunc(SB) // fault if stack check is wrong
+
+ // Initialize m->procid to Linux tid
+ MOVW $SYS_gettid, R7
+ SWI $0
+ MOVW R0, m_procid(m)
+
+ // Call fn
+ MOVW 8(R13), R0
+ MOVW $16(R13), R13
+ BL (R0)
+
+ MOVW $0, R0
+ MOVW R0, 4(R13)
+ BL runtime·exit1(SB)
+
+ // It shouldn't return
+ MOVW $1234, R0
+ MOVW $1005, R1
+ MOVW R0, (R1)
+
+
+TEXT runtime·cacheflush(SB),7,$0
+ MOVW 0(FP), R0
+ MOVW 4(FP), R1
+ MOVW $0, R2
+ MOVW $SYS_ARM_cacheflush, R7
+ SWI $0
+ RET
+
+TEXT runtime·sigaltstack(SB),7,$0
+ MOVW 0(FP), R0
+ MOVW 4(FP), R1
+ MOVW $SYS_sigaltstack, R7
+ SWI $0
+ RET
+
+TEXT runtime·sigignore(SB),7,$0
+ RET
+
+TEXT runtime·sigtramp(SB),7,$24
+ // save g
+ MOVW g, R3
+ MOVW g, 20(R13)
+
+ // g = m->gsignal
+ MOVW m_gsignal(m), g
+
+ // copy arguments for call to sighandler
+ MOVW R0, 4(R13)
+ MOVW R1, 8(R13)
+ MOVW R2, 12(R13)
+ MOVW R3, 16(R13)
+
+ BL runtime·sighandler(SB)
+
+ // restore g
+ MOVW 20(R13), g
+
+ RET
+
+TEXT runtime·rt_sigaction(SB),7,$0
+ MOVW 0(FP), R0
+ MOVW 4(FP), R1
+ MOVW 8(FP), R2
+ MOVW 12(FP), R3
+ MOVW $SYS_rt_sigaction, R7
+ SWI $0
+ RET
+
+TEXT runtime·sigreturn(SB),7,$0
+ MOVW $SYS_rt_sigreturn, R7
+ SWI $0
+ RET
+
+// Use kernel version instead of native armcas in ../../arm.s.
+// See ../../../sync/atomic/asm_linux_arm.s for details.
+TEXT cas<>(SB),7,$0
+ MOVW $0xffff0fc0, PC
+
+TEXT runtime·cas(SB),7,$0
+ MOVW valptr+0(FP), R2
+ MOVW old+4(FP), R0
+casagain:
+ MOVW new+8(FP), R1
+ BL cas<>(SB)
+ BCC cascheck
+ MOVW $1, R0
+ RET
+cascheck:
+ // Kernel lies; double-check.
+ MOVW valptr+0(FP), R2
+ MOVW old+4(FP), R0
+ MOVW 0(R2), R3
+ CMP R0, R3
+ BEQ casagain
+ MOVW $0, R0
+ RET
+
+
+TEXT runtime·casp(SB),7,$0
+ B runtime·cas(SB)
+
+TEXT runtime·osyield(SB),7,$0
+ MOVW $SYS_sched_yield, R7
+ SWI $0
+ RET
diff --git a/src/pkg/runtime/linux/defs.c b/src/pkg/runtime/linux/defs.c
new file mode 100644
index 000000000..5dda78789
--- /dev/null
+++ b/src/pkg/runtime/linux/defs.c
@@ -0,0 +1,95 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+ * Input to godefs
+ godefs -f -m64 defs.c >amd64/defs.h
+ godefs -f -m64 defs1.c >>amd64/defs.h
+ */
+
+// Linux glibc and Linux kernel define different and conflicting
+// definitions for struct sigaction, struct timespec, etc.
+// We want the kernel ones, which are in the asm/* headers.
+// But then we'd get conflicts when we include the system
+// headers for things like ucontext_t, so that happens in
+// a separate file, defs1.c.
+
+#include <asm/posix_types.h>
+#define size_t __kernel_size_t
+#include <asm/signal.h>
+#include <asm/siginfo.h>
+#include <asm/mman.h>
+
+enum {
+ $PROT_NONE = PROT_NONE,
+ $PROT_READ = PROT_READ,
+ $PROT_WRITE = PROT_WRITE,
+ $PROT_EXEC = PROT_EXEC,
+
+ $MAP_ANON = MAP_ANONYMOUS,
+ $MAP_PRIVATE = MAP_PRIVATE,
+ $MAP_FIXED = MAP_FIXED,
+
+ $SA_RESTART = SA_RESTART,
+ $SA_ONSTACK = SA_ONSTACK,
+ $SA_RESTORER = SA_RESTORER,
+ $SA_SIGINFO = SA_SIGINFO,
+
+ $SIGHUP = SIGHUP,
+ $SIGINT = SIGINT,
+ $SIGQUIT = SIGQUIT,
+ $SIGILL = SIGILL,
+ $SIGTRAP = SIGTRAP,
+ $SIGABRT = SIGABRT,
+ $SIGBUS = SIGBUS,
+ $SIGFPE = SIGFPE,
+ $SIGKILL = SIGKILL,
+ $SIGUSR1 = SIGUSR1,
+ $SIGSEGV = SIGSEGV,
+ $SIGUSR2 = SIGUSR2,
+ $SIGPIPE = SIGPIPE,
+ $SIGALRM = SIGALRM,
+ $SIGSTKFLT = SIGSTKFLT,
+ $SIGCHLD = SIGCHLD,
+ $SIGCONT = SIGCONT,
+ $SIGSTOP = SIGSTOP,
+ $SIGTSTP = SIGTSTP,
+ $SIGTTIN = SIGTTIN,
+ $SIGTTOU = SIGTTOU,
+ $SIGURG = SIGURG,
+ $SIGXCPU = SIGXCPU,
+ $SIGXFSZ = SIGXFSZ,
+ $SIGVTALRM = SIGVTALRM,
+ $SIGPROF = SIGPROF,
+ $SIGWINCH = SIGWINCH,
+ $SIGIO = SIGIO,
+ $SIGPWR = SIGPWR,
+ $SIGSYS = SIGSYS,
+
+ $FPE_INTDIV = FPE_INTDIV,
+ $FPE_INTOVF = FPE_INTOVF,
+ $FPE_FLTDIV = FPE_FLTDIV,
+ $FPE_FLTOVF = FPE_FLTOVF,
+ $FPE_FLTUND = FPE_FLTUND,
+ $FPE_FLTRES = FPE_FLTRES,
+ $FPE_FLTINV = FPE_FLTINV,
+ $FPE_FLTSUB = FPE_FLTSUB,
+
+ $BUS_ADRALN = BUS_ADRALN,
+ $BUS_ADRERR = BUS_ADRERR,
+ $BUS_OBJERR = BUS_OBJERR,
+
+ $SEGV_MAPERR = SEGV_MAPERR,
+ $SEGV_ACCERR = SEGV_ACCERR,
+
+ $ITIMER_REAL = ITIMER_REAL,
+ $ITIMER_VIRTUAL = ITIMER_VIRTUAL,
+ $ITIMER_PROF = ITIMER_PROF,
+};
+
+typedef struct timespec $Timespec;
+typedef struct timeval $Timeval;
+typedef struct sigaction $Sigaction;
+typedef siginfo_t $Siginfo;
+typedef struct itimerval $Itimerval;
diff --git a/src/pkg/runtime/linux/defs1.c b/src/pkg/runtime/linux/defs1.c
new file mode 100644
index 000000000..e737f8e9e
--- /dev/null
+++ b/src/pkg/runtime/linux/defs1.c
@@ -0,0 +1,24 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+ * Input to godefs
+ godefs -f -m64 defs.c >amd64/defs.h
+ godefs -f -m64 defs1.c >>amd64/defs.h
+ */
+
+#include <ucontext.h>
+
+typedef __sigset_t $Usigset;
+typedef struct _libc_fpxreg $Fpxreg;
+typedef struct _libc_xmmreg $Xmmreg;
+typedef struct _libc_fpstate $Fpstate;
+typedef struct _fpxreg $Fpxreg1;
+typedef struct _xmmreg $Xmmreg1;
+typedef struct _fpstate $Fpstate1;
+typedef struct _fpreg $Fpreg1;
+typedef struct sigaltstack $Sigaltstack;
+typedef mcontext_t $Mcontext;
+typedef ucontext_t $Ucontext;
+typedef struct sigcontext $Sigcontext;
diff --git a/src/pkg/runtime/linux/defs2.c b/src/pkg/runtime/linux/defs2.c
new file mode 100644
index 000000000..ff641fff2
--- /dev/null
+++ b/src/pkg/runtime/linux/defs2.c
@@ -0,0 +1,120 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+ * Input to godefs
+ godefs -f -m32 \
+ -f -I/home/rsc/pub/linux-2.6/arch/x86/include \
+ -f -I/home/rsc/pub/linux-2.6/include \
+ -f -D_LOOSE_KERNEL_NAMES \
+ -f -D__ARCH_SI_UID_T'='__kernel_uid32_t \
+ defs2.c >386/defs.h
+
+ * The asm header tricks we have to use for Linux on amd64
+ * (see defs.c and defs1.c) don't work here, so this is yet another
+ * file. Sigh.
+ */
+
+#include <asm/signal.h>
+#include <asm/mman.h>
+#include <asm/sigcontext.h>
+#include <asm/ucontext.h>
+#include <asm/siginfo.h>
+
+/*
+#include <sys/signal.h>
+#include <sys/mman.h>
+#include <ucontext.h>
+*/
+
+/* This is the sigaction structure from the Linux 2.1.68 kernel which
+ is used with the rt_sigaction system call. For 386 this is not
+ defined in any public header file. */
+
+struct kernel_sigaction {
+ __sighandler_t k_sa_handler;
+ unsigned long sa_flags;
+ void (*sa_restorer) (void);
+ sigset_t sa_mask;
+};
+
+enum {
+ $PROT_NONE = PROT_NONE,
+ $PROT_READ = PROT_READ,
+ $PROT_WRITE = PROT_WRITE,
+ $PROT_EXEC = PROT_EXEC,
+
+ $MAP_ANON = MAP_ANONYMOUS,
+ $MAP_PRIVATE = MAP_PRIVATE,
+ $MAP_FIXED = MAP_FIXED,
+
+ $SA_RESTART = SA_RESTART,
+ $SA_ONSTACK = SA_ONSTACK,
+ $SA_RESTORER = SA_RESTORER,
+ $SA_SIGINFO = SA_SIGINFO,
+
+ $SIGHUP = SIGHUP,
+ $SIGINT = SIGINT,
+ $SIGQUIT = SIGQUIT,
+ $SIGILL = SIGILL,
+ $SIGTRAP = SIGTRAP,
+ $SIGABRT = SIGABRT,
+ $SIGBUS = SIGBUS,
+ $SIGFPE = SIGFPE,
+ $SIGKILL = SIGKILL,
+ $SIGUSR1 = SIGUSR1,
+ $SIGSEGV = SIGSEGV,
+ $SIGUSR2 = SIGUSR2,
+ $SIGPIPE = SIGPIPE,
+ $SIGALRM = SIGALRM,
+ $SIGSTKFLT = SIGSTKFLT,
+ $SIGCHLD = SIGCHLD,
+ $SIGCONT = SIGCONT,
+ $SIGSTOP = SIGSTOP,
+ $SIGTSTP = SIGTSTP,
+ $SIGTTIN = SIGTTIN,
+ $SIGTTOU = SIGTTOU,
+ $SIGURG = SIGURG,
+ $SIGXCPU = SIGXCPU,
+ $SIGXFSZ = SIGXFSZ,
+ $SIGVTALRM = SIGVTALRM,
+ $SIGPROF = SIGPROF,
+ $SIGWINCH = SIGWINCH,
+ $SIGIO = SIGIO,
+ $SIGPWR = SIGPWR,
+ $SIGSYS = SIGSYS,
+
+ $FPE_INTDIV = FPE_INTDIV,
+ $FPE_INTOVF = FPE_INTOVF,
+ $FPE_FLTDIV = FPE_FLTDIV,
+ $FPE_FLTOVF = FPE_FLTOVF,
+ $FPE_FLTUND = FPE_FLTUND,
+ $FPE_FLTRES = FPE_FLTRES,
+ $FPE_FLTINV = FPE_FLTINV,
+ $FPE_FLTSUB = FPE_FLTSUB,
+
+ $BUS_ADRALN = BUS_ADRALN,
+ $BUS_ADRERR = BUS_ADRERR,
+ $BUS_OBJERR = BUS_OBJERR,
+
+ $SEGV_MAPERR = SEGV_MAPERR,
+ $SEGV_ACCERR = SEGV_ACCERR,
+
+ $ITIMER_REAL = ITIMER_REAL,
+ $ITIMER_VIRTUAL = ITIMER_VIRTUAL,
+ $ITIMER_PROF = ITIMER_PROF,
+};
+
+typedef struct _fpreg $Fpreg;
+typedef struct _fpxreg $Fpxreg;
+typedef struct _xmmreg $Xmmreg;
+typedef struct _fpstate $Fpstate;
+typedef struct timespec $Timespec;
+typedef struct timeval $Timeval;
+typedef struct kernel_sigaction $Sigaction;
+typedef siginfo_t $Siginfo;
+typedef struct sigaltstack $Sigaltstack;
+typedef struct sigcontext $Sigcontext;
+typedef struct ucontext $Ucontext;
+typedef struct itimerval $Itimerval;
diff --git a/src/pkg/runtime/linux/defs_arm.c b/src/pkg/runtime/linux/defs_arm.c
new file mode 100644
index 000000000..1f935046e
--- /dev/null
+++ b/src/pkg/runtime/linux/defs_arm.c
@@ -0,0 +1,122 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+ * Input to godefs
+ * On a Debian Lenny arm linux distribution:
+ godefs -f-I/usr/src/linux-headers-2.6.26-2-versatile/include defs_arm.c
+ */
+
+#define __ARCH_SI_UID_T int
+
+#include <asm/signal.h>
+#include <asm/mman.h>
+#include <asm/sigcontext.h>
+#include <asm/ucontext.h>
+#include <asm/siginfo.h>
+#include <linux/time.h>
+
+/*
+#include <sys/signal.h>
+#include <sys/mman.h>
+#include <ucontext.h>
+*/
+
+enum {
+ $PROT_NONE = PROT_NONE,
+ $PROT_READ = PROT_READ,
+ $PROT_WRITE = PROT_WRITE,
+ $PROT_EXEC = PROT_EXEC,
+
+ $MAP_ANON = MAP_ANONYMOUS,
+ $MAP_PRIVATE = MAP_PRIVATE,
+ $MAP_FIXED = MAP_FIXED,
+
+ $SA_RESTART = SA_RESTART,
+ $SA_ONSTACK = SA_ONSTACK,
+ $SA_RESTORER = SA_RESTORER,
+ $SA_SIGINFO = SA_SIGINFO,
+
+ $SIGHUP = SIGHUP,
+ $SIGINT = SIGINT,
+ $SIGQUIT = SIGQUIT,
+ $SIGILL = SIGILL,
+ $SIGTRAP = SIGTRAP,
+ $SIGABRT = SIGABRT,
+ $SIGBUS = SIGBUS,
+ $SIGFPE = SIGFPE,
+ $SIGKILL = SIGKILL,
+ $SIGUSR1 = SIGUSR1,
+ $SIGSEGV = SIGSEGV,
+ $SIGUSR2 = SIGUSR2,
+ $SIGPIPE = SIGPIPE,
+ $SIGALRM = SIGALRM,
+ $SIGSTKFLT = SIGSTKFLT,
+ $SIGCHLD = SIGCHLD,
+ $SIGCONT = SIGCONT,
+ $SIGSTOP = SIGSTOP,
+ $SIGTSTP = SIGTSTP,
+ $SIGTTIN = SIGTTIN,
+ $SIGTTOU = SIGTTOU,
+ $SIGURG = SIGURG,
+ $SIGXCPU = SIGXCPU,
+ $SIGXFSZ = SIGXFSZ,
+ $SIGVTALRM = SIGVTALRM,
+ $SIGPROF = SIGPROF,
+ $SIGWINCH = SIGWINCH,
+ $SIGIO = SIGIO,
+ $SIGPWR = SIGPWR,
+ $SIGSYS = SIGSYS,
+
+ $FPE_INTDIV = FPE_INTDIV & 0xFFFF,
+ $FPE_INTOVF = FPE_INTOVF & 0xFFFF,
+ $FPE_FLTDIV = FPE_FLTDIV & 0xFFFF,
+ $FPE_FLTOVF = FPE_FLTOVF & 0xFFFF,
+ $FPE_FLTUND = FPE_FLTUND & 0xFFFF,
+ $FPE_FLTRES = FPE_FLTRES & 0xFFFF,
+ $FPE_FLTINV = FPE_FLTINV & 0xFFFF,
+ $FPE_FLTSUB = FPE_FLTSUB & 0xFFFF,
+
+ $BUS_ADRALN = BUS_ADRALN & 0xFFFF,
+ $BUS_ADRERR = BUS_ADRERR & 0xFFFF,
+ $BUS_OBJERR = BUS_OBJERR & 0xFFFF,
+
+ $SEGV_MAPERR = SEGV_MAPERR & 0xFFFF,
+ $SEGV_ACCERR = SEGV_ACCERR & 0xFFFF,
+
+ $ITIMER_REAL = ITIMER_REAL,
+ $ITIMER_PROF = ITIMER_PROF,
+ $ITIMER_VIRTUAL = ITIMER_VIRTUAL,
+};
+
+typedef sigset_t $Sigset;
+typedef struct timespec $Timespec;
+typedef struct sigaltstack $Sigaltstack;
+typedef struct sigcontext $Sigcontext;
+typedef struct ucontext $Ucontext;
+typedef struct timeval $Timeval;
+typedef struct itimerval $Itimerval;
+
+struct xsiginfo {
+ int si_signo;
+ int si_errno;
+ int si_code;
+ char _sifields[4];
+};
+
+typedef struct xsiginfo $Siginfo;
+
+#undef sa_handler
+#undef sa_flags
+#undef sa_restorer
+#undef sa_mask
+
+struct xsigaction {
+ void (*sa_handler)(void);
+ unsigned long sa_flags;
+ void (*sa_restorer)(void);
+ unsigned int sa_mask; /* mask last for extensibility */
+};
+
+typedef struct xsigaction $Sigaction;
diff --git a/src/pkg/runtime/linux/mem.c b/src/pkg/runtime/linux/mem.c
new file mode 100644
index 000000000..ad0fac6d3
--- /dev/null
+++ b/src/pkg/runtime/linux/mem.c
@@ -0,0 +1,113 @@
+#include "runtime.h"
+#include "defs.h"
+#include "os.h"
+#include "malloc.h"
+
+enum
+{
+ ENOMEM = 12,
+};
+
+static int32
+addrspace_free(void *v, uintptr n)
+{
+ uintptr page_size = 4096;
+ uintptr off;
+ int8 one_byte;
+
+ for(off = 0; off < n; off += page_size) {
+ int32 errval = runtime·mincore((int8 *)v + off, page_size, (void *)&one_byte);
+ // errval is 0 if success, or -(error_code) if error.
+ if (errval == 0 || errval != -ENOMEM)
+ return 0;
+ }
+ USED(v);
+ USED(n);
+ return 1;
+}
+
+
+void*
+runtime·SysAlloc(uintptr n)
+{
+ void *p;
+
+ mstats.sys += n;
+ p = runtime·mmap(nil, n, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_ANON|MAP_PRIVATE, -1, 0);
+ if(p < (void*)4096) {
+ if(p == (void*)EACCES) {
+ runtime·printf("runtime: mmap: access denied\n");
+ runtime·printf("if you're running SELinux, enable execmem for this process.\n");
+ runtime·exit(2);
+ }
+ return nil;
+ }
+ return p;
+}
+
+void
+runtime·SysUnused(void *v, uintptr n)
+{
+ USED(v);
+ USED(n);
+ // TODO(rsc): call madvise MADV_DONTNEED
+}
+
+void
+runtime·SysFree(void *v, uintptr n)
+{
+ mstats.sys -= n;
+ runtime·munmap(v, n);
+}
+
+void*
+runtime·SysReserve(void *v, uintptr n)
+{
+ void *p;
+
+ // On 64-bit, people with ulimit -v set complain if we reserve too
+ // much address space. Instead, assume that the reservation is okay
+ // and check the assumption in SysMap.
+ if(sizeof(void*) == 8)
+ return v;
+
+ p = runtime·mmap(v, n, PROT_NONE, MAP_ANON|MAP_PRIVATE, -1, 0);
+ if(p < (void*)4096) {
+ return nil;
+ }
+ return p;
+}
+
+void
+runtime·SysMap(void *v, uintptr n)
+{
+ void *p;
+
+ mstats.sys += n;
+
+ // On 64-bit, we don't actually have v reserved, so tread carefully.
+ if(sizeof(void*) == 8) {
+ p = runtime·mmap(v, n, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_ANON|MAP_PRIVATE, -1, 0);
+ if(p != v && addrspace_free(v, n)) {
+ // On some systems, mmap ignores v without
+ // MAP_FIXED, so retry if the address space is free.
+ if(p > (void*)4096) {
+ runtime·munmap(p, n);
+ }
+ p = runtime·mmap(v, n, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_ANON|MAP_FIXED|MAP_PRIVATE, -1, 0);
+ }
+ if(p == (void*)ENOMEM)
+ runtime·throw("runtime: out of memory");
+ if(p != v) {
+ runtime·printf("runtime: address space conflict: map(%p) = %p\n", v, p);
+ runtime·throw("runtime: address space conflict");
+ }
+ return;
+ }
+
+ p = runtime·mmap(v, n, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_ANON|MAP_FIXED|MAP_PRIVATE, -1, 0);
+ if(p == (void*)ENOMEM)
+ runtime·throw("runtime: out of memory");
+ if(p != v)
+ runtime·throw("runtime: cannot map pages in arena address space");
+}
diff --git a/src/pkg/runtime/linux/os.h b/src/pkg/runtime/linux/os.h
new file mode 100644
index 000000000..0bb8d0339
--- /dev/null
+++ b/src/pkg/runtime/linux/os.h
@@ -0,0 +1,19 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#define SIG_DFL ((void*)0)
+#define SIG_IGN ((void*)1)
+
+// Linux-specific system calls
+int32 runtime·futex(uint32*, int32, uint32, Timespec*, uint32*, uint32);
+int32 runtime·clone(int32, void*, M*, G*, void(*)(void));
+
+struct Sigaction;
+void runtime·rt_sigaction(uintptr, struct Sigaction*, void*, uintptr);
+
+void runtime·sigaltstack(Sigaltstack*, Sigaltstack*);
+void runtime·sigpanic(void);
+void runtime·setitimer(int32, Itimerval*, Itimerval*);
+
+void runtime·raisesigpipe(void);
diff --git a/src/pkg/runtime/linux/signals.h b/src/pkg/runtime/linux/signals.h
new file mode 100644
index 000000000..919b80ea2
--- /dev/null
+++ b/src/pkg/runtime/linux/signals.h
@@ -0,0 +1,51 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#define C SigCatch
+#define I SigIgnore
+#define R SigRestart
+#define Q SigQueue
+#define P SigPanic
+
+SigTab runtime·sigtab[] = {
+ /* 0 */ 0, "SIGNONE: no trap",
+ /* 1 */ Q+R, "SIGHUP: terminal line hangup",
+ /* 2 */ Q+R, "SIGINT: interrupt",
+ /* 3 */ C, "SIGQUIT: quit",
+ /* 4 */ C+P, "SIGILL: illegal instruction",
+ /* 5 */ C, "SIGTRAP: trace trap",
+ /* 6 */ C, "SIGABRT: abort",
+ /* 7 */ C+P, "SIGBUS: bus error",
+ /* 8 */ C+P, "SIGFPE: floating-point exception",
+ /* 9 */ 0, "SIGKILL: kill",
+ /* 10 */ Q+I+R, "SIGUSR1: user-defined signal 1",
+ /* 11 */ C+P, "SIGSEGV: segmentation violation",
+ /* 12 */ Q+I+R, "SIGUSR2: user-defined signal 2",
+ /* 13 */ I, "SIGPIPE: write to broken pipe",
+ /* 14 */ Q+I+R, "SIGALRM: alarm clock",
+ /* 15 */ Q+R, "SIGTERM: termination",
+ /* 16 */ C, "SIGSTKFLT: stack fault",
+ /* 17 */ Q+I+R, "SIGCHLD: child status has changed",
+ /* 18 */ 0, "SIGCONT: continue",
+ /* 19 */ 0, "SIGSTOP: stop, unblockable",
+ /* 20 */ Q+I+R, "SIGTSTP: keyboard stop",
+ /* 21 */ Q+I+R, "SIGTTIN: background read from tty",
+ /* 22 */ Q+I+R, "SIGTTOU: background write to tty",
+ /* 23 */ Q+I+R, "SIGURG: urgent condition on socket",
+ /* 24 */ Q+I+R, "SIGXCPU: cpu limit exceeded",
+ /* 25 */ Q+I+R, "SIGXFSZ: file size limit exceeded",
+ /* 26 */ Q+I+R, "SIGVTALRM: virtual alarm clock",
+ /* 27 */ Q+I+R, "SIGPROF: profiling alarm clock",
+ /* 28 */ Q+I+R, "SIGWINCH: window size change",
+ /* 29 */ Q+I+R, "SIGIO: i/o now possible",
+ /* 30 */ Q+I+R, "SIGPWR: power failure restart",
+ /* 31 */ C, "SIGSYS: bad system call",
+};
+#undef C
+#undef I
+#undef R
+#undef Q
+#undef P
+
+#define NSIG 32
diff --git a/src/pkg/runtime/linux/thread.c b/src/pkg/runtime/linux/thread.c
new file mode 100644
index 000000000..4878a00f2
--- /dev/null
+++ b/src/pkg/runtime/linux/thread.c
@@ -0,0 +1,320 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "runtime.h"
+#include "defs.h"
+#include "os.h"
+#include "stack.h"
+
+extern SigTab runtime·sigtab[];
+static int32 proccount;
+
+int32 runtime·open(uint8*, int32, int32);
+int32 runtime·close(int32);
+int32 runtime·read(int32, void*, int32);
+
+// Linux futex.
+//
+// futexsleep(uint32 *addr, uint32 val)
+// futexwakeup(uint32 *addr)
+//
+// Futexsleep atomically checks if *addr == val and if so, sleeps on addr.
+// Futexwakeup wakes up threads sleeping on addr.
+// Futexsleep is allowed to wake up spuriously.
+
+enum
+{
+ MUTEX_UNLOCKED = 0,
+ MUTEX_LOCKED = 1,
+ MUTEX_SLEEPING = 2,
+
+ ACTIVE_SPIN = 4,
+ ACTIVE_SPIN_CNT = 30,
+ PASSIVE_SPIN = 1,
+
+ FUTEX_WAIT = 0,
+ FUTEX_WAKE = 1,
+
+ EINTR = 4,
+ EAGAIN = 11,
+};
+
+// TODO(rsc): I tried using 1<<40 here but futex woke up (-ETIMEDOUT).
+// I wonder if the timespec that gets to the kernel
+// actually has two 32-bit numbers in it, so that
+// a 64-bit 1<<40 ends up being 0 seconds,
+// 1<<8 nanoseconds.
+static Timespec longtime =
+{
+ 1<<30, // 34 years
+ 0
+};
+
+// Atomically,
+// if(*addr == val) sleep
+// Might be woken up spuriously; that's allowed.
+static void
+futexsleep(uint32 *addr, uint32 val)
+{
+ // Some Linux kernels have a bug where futex of
+ // FUTEX_WAIT returns an internal error code
+ // as an errno. Libpthread ignores the return value
+ // here, and so can we: as it says a few lines up,
+ // spurious wakeups are allowed.
+ runtime·futex(addr, FUTEX_WAIT, val, &longtime, nil, 0);
+}
+
+// If any procs are sleeping on addr, wake up at most cnt.
+static void
+futexwakeup(uint32 *addr, uint32 cnt)
+{
+ int64 ret;
+
+ ret = runtime·futex(addr, FUTEX_WAKE, cnt, nil, nil, 0);
+
+ if(ret >= 0)
+ return;
+
+ // I don't know that futex wakeup can return
+ // EAGAIN or EINTR, but if it does, it would be
+ // safe to loop and call futex again.
+ runtime·printf("futexwakeup addr=%p returned %D\n", addr, ret);
+ *(int32*)0x1006 = 0x1006;
+}
+
+static int32
+getproccount(void)
+{
+ int32 fd, rd, cnt, cpustrlen;
+ byte *cpustr, *pos, *bufpos;
+ byte buf[256];
+
+ fd = runtime·open((byte*)"/proc/stat", O_RDONLY|O_CLOEXEC, 0);
+ if(fd == -1)
+ return 1;
+ cnt = 0;
+ bufpos = buf;
+ cpustr = (byte*)"\ncpu";
+ cpustrlen = runtime·findnull(cpustr);
+ for(;;) {
+ rd = runtime·read(fd, bufpos, sizeof(buf)-cpustrlen);
+ if(rd == -1)
+ break;
+ bufpos[rd] = 0;
+ for(pos=buf; pos=runtime·strstr(pos, cpustr); cnt++, pos++) {
+ }
+ if(rd < cpustrlen)
+ break;
+ runtime·memmove(buf, bufpos+rd-cpustrlen+1, cpustrlen-1);
+ bufpos = buf+cpustrlen-1;
+ }
+ runtime·close(fd);
+ return cnt ? cnt : 1;
+}
+
+// Possible lock states are MUTEX_UNLOCKED, MUTEX_LOCKED and MUTEX_SLEEPING.
+// MUTEX_SLEEPING means that there is presumably at least one sleeping thread.
+// Note that there can be spinning threads during all states - they do not
+// affect mutex's state.
+static void
+futexlock(Lock *l)
+{
+ uint32 i, v, wait, spin;
+
+ // Speculative grab for lock.
+ v = runtime·xchg(&l->key, MUTEX_LOCKED);
+ if(v == MUTEX_UNLOCKED)
+ return;
+
+ // wait is either MUTEX_LOCKED or MUTEX_SLEEPING
+ // depending on whether there is a thread sleeping
+ // on this mutex. If we ever change l->key from
+ // MUTEX_SLEEPING to some other value, we must be
+ // careful to change it back to MUTEX_SLEEPING before
+ // returning, to ensure that the sleeping thread gets
+ // its wakeup call.
+ wait = v;
+
+ if(proccount == 0)
+ proccount = getproccount();
+
+ // On uniprocessor's, no point spinning.
+ // On multiprocessors, spin for ACTIVE_SPIN attempts.
+ spin = 0;
+ if(proccount > 1)
+ spin = ACTIVE_SPIN;
+
+ for(;;) {
+ // Try for lock, spinning.
+ for(i = 0; i < spin; i++) {
+ while(l->key == MUTEX_UNLOCKED)
+ if(runtime·cas(&l->key, MUTEX_UNLOCKED, wait))
+ return;
+ runtime·procyield(ACTIVE_SPIN_CNT);
+ }
+
+ // Try for lock, rescheduling.
+ for(i=0; i < PASSIVE_SPIN; i++) {
+ while(l->key == MUTEX_UNLOCKED)
+ if(runtime·cas(&l->key, MUTEX_UNLOCKED, wait))
+ return;
+ runtime·osyield();
+ }
+
+ // Sleep.
+ v = runtime·xchg(&l->key, MUTEX_SLEEPING);
+ if(v == MUTEX_UNLOCKED)
+ return;
+ wait = MUTEX_SLEEPING;
+ futexsleep(&l->key, MUTEX_SLEEPING);
+ }
+}
+
+static void
+futexunlock(Lock *l)
+{
+ uint32 v;
+
+ v = runtime·xchg(&l->key, MUTEX_UNLOCKED);
+ if(v == MUTEX_UNLOCKED)
+ runtime·throw("unlock of unlocked lock");
+ if(v == MUTEX_SLEEPING)
+ futexwakeup(&l->key, 1);
+}
+
+void
+runtime·lock(Lock *l)
+{
+ if(m->locks++ < 0)
+ runtime·throw("runtime·lock: lock count");
+ futexlock(l);
+}
+
+void
+runtime·unlock(Lock *l)
+{
+ if(--m->locks < 0)
+ runtime·throw("runtime·unlock: lock count");
+ futexunlock(l);
+}
+
+
+// One-time notifications.
+void
+runtime·noteclear(Note *n)
+{
+ n->state = 0;
+}
+
+void
+runtime·notewakeup(Note *n)
+{
+ runtime·xchg(&n->state, 1);
+ futexwakeup(&n->state, 1<<30);
+}
+
+void
+runtime·notesleep(Note *n)
+{
+ while(runtime·atomicload(&n->state) == 0)
+ futexsleep(&n->state, 0);
+}
+
+
+// Clone, the Linux rfork.
+enum
+{
+ CLONE_VM = 0x100,
+ CLONE_FS = 0x200,
+ CLONE_FILES = 0x400,
+ CLONE_SIGHAND = 0x800,
+ CLONE_PTRACE = 0x2000,
+ CLONE_VFORK = 0x4000,
+ CLONE_PARENT = 0x8000,
+ CLONE_THREAD = 0x10000,
+ CLONE_NEWNS = 0x20000,
+ CLONE_SYSVSEM = 0x40000,
+ CLONE_SETTLS = 0x80000,
+ CLONE_PARENT_SETTID = 0x100000,
+ CLONE_CHILD_CLEARTID = 0x200000,
+ CLONE_UNTRACED = 0x800000,
+ CLONE_CHILD_SETTID = 0x1000000,
+ CLONE_STOPPED = 0x2000000,
+ CLONE_NEWUTS = 0x4000000,
+ CLONE_NEWIPC = 0x8000000,
+};
+
+void
+runtime·newosproc(M *m, G *g, void *stk, void (*fn)(void))
+{
+ int32 ret;
+ int32 flags;
+
+ /*
+ * note: strace gets confused if we use CLONE_PTRACE here.
+ */
+ flags = CLONE_VM /* share memory */
+ | CLONE_FS /* share cwd, etc */
+ | CLONE_FILES /* share fd table */
+ | CLONE_SIGHAND /* share sig handler table */
+ | CLONE_THREAD /* revisit - okay for now */
+ ;
+
+ m->tls[0] = m->id; // so 386 asm can find it
+ if(0){
+ runtime·printf("newosproc stk=%p m=%p g=%p fn=%p clone=%p id=%d/%d ostk=%p\n",
+ stk, m, g, fn, runtime·clone, m->id, m->tls[0], &m);
+ }
+
+ if((ret = runtime·clone(flags, stk, m, g, fn)) < 0) {
+ runtime·printf("runtime: failed to create new OS thread (have %d already; errno=%d)\n", runtime·mcount(), -ret);
+ runtime·throw("runtime.newosproc");
+ }
+}
+
+void
+runtime·osinit(void)
+{
+}
+
+void
+runtime·goenvs(void)
+{
+ runtime·goenvs_unix();
+}
+
+// Called to initialize a new m (including the bootstrap m).
+void
+runtime·minit(void)
+{
+ // Initialize signal handling.
+ m->gsignal = runtime·malg(32*1024); // OS X wants >=8K, Linux >=2K
+ runtime·signalstack(m->gsignal->stackguard - StackGuard, 32*1024);
+}
+
+void
+runtime·sigpanic(void)
+{
+ switch(g->sig) {
+ case SIGBUS:
+ if(g->sigcode0 == BUS_ADRERR && g->sigcode1 < 0x1000)
+ runtime·panicstring("invalid memory address or nil pointer dereference");
+ runtime·printf("unexpected fault address %p\n", g->sigcode1);
+ runtime·throw("fault");
+ case SIGSEGV:
+ if((g->sigcode0 == 0 || g->sigcode0 == SEGV_MAPERR || g->sigcode0 == SEGV_ACCERR) && g->sigcode1 < 0x1000)
+ runtime·panicstring("invalid memory address or nil pointer dereference");
+ runtime·printf("unexpected fault address %p\n", g->sigcode1);
+ runtime·throw("fault");
+ case SIGFPE:
+ switch(g->sigcode0) {
+ case FPE_INTDIV:
+ runtime·panicstring("integer divide by zero");
+ case FPE_INTOVF:
+ runtime·panicstring("integer overflow");
+ }
+ runtime·panicstring("floating point error");
+ }
+ runtime·panicstring(runtime·sigtab[g->sig].name);
+}
diff --git a/src/pkg/runtime/malloc.goc b/src/pkg/runtime/malloc.goc
new file mode 100644
index 000000000..b9fe36db6
--- /dev/null
+++ b/src/pkg/runtime/malloc.goc
@@ -0,0 +1,482 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// See malloc.h for overview.
+//
+// TODO(rsc): double-check stats.
+
+package runtime
+#include "runtime.h"
+#include "stack.h"
+#include "malloc.h"
+#include "defs.h"
+#include "type.h"
+
+MHeap runtime·mheap;
+extern MStats mstats; // defined in extern.go
+
+extern volatile int32 runtime·MemProfileRate;
+
+// Allocate an object of at least size bytes.
+// Small objects are allocated from the per-thread cache's free lists.
+// Large objects (> 32 kB) are allocated straight from the heap.
+void*
+runtime·mallocgc(uintptr size, uint32 flag, int32 dogc, int32 zeroed)
+{
+ int32 sizeclass, rate;
+ MCache *c;
+ uintptr npages;
+ MSpan *s;
+ void *v;
+
+ if(runtime·gcwaiting && g != m->g0 && m->locks == 0)
+ runtime·gosched();
+ if(m->mallocing)
+ runtime·throw("malloc/free - deadlock");
+ m->mallocing = 1;
+ if(size == 0)
+ size = 1;
+
+ c = m->mcache;
+ c->local_nmalloc++;
+ if(size <= MaxSmallSize) {
+ // Allocate from mcache free lists.
+ sizeclass = runtime·SizeToClass(size);
+ size = runtime·class_to_size[sizeclass];
+ v = runtime·MCache_Alloc(c, sizeclass, size, zeroed);
+ if(v == nil)
+ runtime·throw("out of memory");
+ c->local_alloc += size;
+ c->local_total_alloc += size;
+ c->local_by_size[sizeclass].nmalloc++;
+ } else {
+ // TODO(rsc): Report tracebacks for very large allocations.
+
+ // Allocate directly from heap.
+ npages = size >> PageShift;
+ if((size & PageMask) != 0)
+ npages++;
+ s = runtime·MHeap_Alloc(&runtime·mheap, npages, 0, 1);
+ if(s == nil)
+ runtime·throw("out of memory");
+ size = npages<<PageShift;
+ c->local_alloc += size;
+ c->local_total_alloc += size;
+ v = (void*)(s->start << PageShift);
+
+ // setup for mark sweep
+ runtime·markspan(v, 0, 0, true);
+ }
+ if(!(flag & FlagNoGC))
+ runtime·markallocated(v, size, (flag&FlagNoPointers) != 0);
+
+ m->mallocing = 0;
+
+ if(!(flag & FlagNoProfiling) && (rate = runtime·MemProfileRate) > 0) {
+ if(size >= rate)
+ goto profile;
+ if(m->mcache->next_sample > size)
+ m->mcache->next_sample -= size;
+ else {
+ // pick next profile time
+ if(rate > 0x3fffffff) // make 2*rate not overflow
+ rate = 0x3fffffff;
+ m->mcache->next_sample = runtime·fastrand1() % (2*rate);
+ profile:
+ runtime·setblockspecial(v);
+ runtime·MProf_Malloc(v, size);
+ }
+ }
+
+ if(dogc && mstats.heap_alloc >= mstats.next_gc)
+ runtime·gc(0);
+ return v;
+}
+
+void*
+runtime·malloc(uintptr size)
+{
+ return runtime·mallocgc(size, 0, 0, 1);
+}
+
+// Free the object whose base pointer is v.
+void
+runtime·free(void *v)
+{
+ int32 sizeclass;
+ MSpan *s;
+ MCache *c;
+ uint32 prof;
+ uintptr size;
+
+ if(v == nil)
+ return;
+
+ // If you change this also change mgc0.c:/^sweepspan,
+ // which has a copy of the guts of free.
+
+ if(m->mallocing)
+ runtime·throw("malloc/free - deadlock");
+ m->mallocing = 1;
+
+ if(!runtime·mlookup(v, nil, nil, &s)) {
+ runtime·printf("free %p: not an allocated block\n", v);
+ runtime·throw("free runtime·mlookup");
+ }
+ prof = runtime·blockspecial(v);
+
+ // Find size class for v.
+ sizeclass = s->sizeclass;
+ c = m->mcache;
+ if(sizeclass == 0) {
+ // Large object.
+ size = s->npages<<PageShift;
+ *(uintptr*)(s->start<<PageShift) = 1; // mark as "needs to be zeroed"
+ // Must mark v freed before calling unmarkspan and MHeap_Free:
+ // they might coalesce v into other spans and change the bitmap further.
+ runtime·markfreed(v, size);
+ runtime·unmarkspan(v, 1<<PageShift);
+ runtime·MHeap_Free(&runtime·mheap, s, 1);
+ } else {
+ // Small object.
+ size = runtime·class_to_size[sizeclass];
+ if(size > sizeof(uintptr))
+ ((uintptr*)v)[1] = 1; // mark as "needs to be zeroed"
+ // Must mark v freed before calling MCache_Free:
+ // it might coalesce v and other blocks into a bigger span
+ // and change the bitmap further.
+ runtime·markfreed(v, size);
+ c->local_by_size[sizeclass].nfree++;
+ runtime·MCache_Free(c, v, sizeclass, size);
+ }
+ c->local_alloc -= size;
+ if(prof)
+ runtime·MProf_Free(v, size);
+ m->mallocing = 0;
+}
+
+int32
+runtime·mlookup(void *v, byte **base, uintptr *size, MSpan **sp)
+{
+ uintptr n, i;
+ byte *p;
+ MSpan *s;
+
+ m->mcache->local_nlookup++;
+ s = runtime·MHeap_LookupMaybe(&runtime·mheap, v);
+ if(sp)
+ *sp = s;
+ if(s == nil) {
+ runtime·checkfreed(v, 1);
+ if(base)
+ *base = nil;
+ if(size)
+ *size = 0;
+ return 0;
+ }
+
+ p = (byte*)((uintptr)s->start<<PageShift);
+ if(s->sizeclass == 0) {
+ // Large object.
+ if(base)
+ *base = p;
+ if(size)
+ *size = s->npages<<PageShift;
+ return 1;
+ }
+
+ if((byte*)v >= (byte*)s->limit) {
+ // pointers past the last block do not count as pointers.
+ return 0;
+ }
+
+ n = runtime·class_to_size[s->sizeclass];
+ if(base) {
+ i = ((byte*)v - p)/n;
+ *base = p + i*n;
+ }
+ if(size)
+ *size = n;
+
+ return 1;
+}
+
+MCache*
+runtime·allocmcache(void)
+{
+ MCache *c;
+
+ runtime·lock(&runtime·mheap);
+ c = runtime·FixAlloc_Alloc(&runtime·mheap.cachealloc);
+ mstats.mcache_inuse = runtime·mheap.cachealloc.inuse;
+ mstats.mcache_sys = runtime·mheap.cachealloc.sys;
+ runtime·unlock(&runtime·mheap);
+ return c;
+}
+
+void
+runtime·purgecachedstats(M* m)
+{
+ MCache *c;
+
+ // Protected by either heap or GC lock.
+ c = m->mcache;
+ mstats.heap_alloc += c->local_cachealloc;
+ c->local_cachealloc = 0;
+ mstats.heap_objects += c->local_objects;
+ c->local_objects = 0;
+ mstats.nmalloc += c->local_nmalloc;
+ c->local_nmalloc = 0;
+ mstats.nfree += c->local_nfree;
+ c->local_nfree = 0;
+ mstats.nlookup += c->local_nlookup;
+ c->local_nlookup = 0;
+ mstats.alloc += c->local_alloc;
+ c->local_alloc= 0;
+ mstats.total_alloc += c->local_total_alloc;
+ c->local_total_alloc= 0;
+}
+
+uintptr runtime·sizeof_C_MStats = sizeof(MStats);
+
+#define MaxArena32 (2U<<30)
+
+void
+runtime·mallocinit(void)
+{
+ byte *p;
+ uintptr arena_size, bitmap_size;
+ extern byte end[];
+
+ runtime·InitSizes();
+
+ // Set up the allocation arena, a contiguous area of memory where
+ // allocated data will be found. The arena begins with a bitmap large
+ // enough to hold 4 bits per allocated word.
+ if(sizeof(void*) == 8) {
+ // On a 64-bit machine, allocate from a single contiguous reservation.
+ // 16 GB should be big enough for now.
+ //
+ // The code will work with the reservation at any address, but ask
+ // SysReserve to use 0x000000f800000000 if possible.
+ // Allocating a 16 GB region takes away 36 bits, and the amd64
+ // doesn't let us choose the top 17 bits, so that leaves the 11 bits
+ // in the middle of 0x00f8 for us to choose. Choosing 0x00f8 means
+ // that the valid memory addresses will begin 0x00f8, 0x00f9, 0x00fa, 0x00fb.
+ // None of the bytes f8 f9 fa fb can appear in valid UTF-8, and
+ // they are otherwise as far from ff (likely a common byte) as possible.
+ // Choosing 0x00 for the leading 6 bits was more arbitrary, but it
+ // is not a common ASCII code point either. Using 0x11f8 instead
+ // caused out of memory errors on OS X during thread allocations.
+ // These choices are both for debuggability and to reduce the
+ // odds of the conservative garbage collector not collecting memory
+ // because some non-pointer block of memory had a bit pattern
+ // that matched a memory address.
+ //
+ // Actually we reserve 17 GB (because the bitmap ends up being 1 GB)
+ // but it hardly matters: fc is not valid UTF-8 either, and we have to
+ // allocate 15 GB before we get that far.
+ arena_size = 16LL<<30;
+ bitmap_size = arena_size / (sizeof(void*)*8/4);
+ p = runtime·SysReserve((void*)(0x00f8ULL<<32), bitmap_size + arena_size);
+ if(p == nil)
+ runtime·throw("runtime: cannot reserve arena virtual address space");
+ } else {
+ // On a 32-bit machine, we can't typically get away
+ // with a giant virtual address space reservation.
+ // Instead we map the memory information bitmap
+ // immediately after the data segment, large enough
+ // to handle another 2GB of mappings (256 MB),
+ // along with a reservation for another 512 MB of memory.
+ // When that gets used up, we'll start asking the kernel
+ // for any memory anywhere and hope it's in the 2GB
+ // following the bitmap (presumably the executable begins
+ // near the bottom of memory, so we'll have to use up
+ // most of memory before the kernel resorts to giving out
+ // memory before the beginning of the text segment).
+ //
+ // Alternatively we could reserve 512 MB bitmap, enough
+ // for 4GB of mappings, and then accept any memory the
+ // kernel threw at us, but normally that's a waste of 512 MB
+ // of address space, which is probably too much in a 32-bit world.
+ bitmap_size = MaxArena32 / (sizeof(void*)*8/4);
+ arena_size = 512<<20;
+
+ // SysReserve treats the address we ask for, end, as a hint,
+ // not as an absolute requirement. If we ask for the end
+ // of the data segment but the operating system requires
+ // a little more space before we can start allocating, it will
+ // give out a slightly higher pointer. That's fine.
+ // Run with what we get back.
+ p = runtime·SysReserve(end, bitmap_size + arena_size);
+ if(p == nil)
+ runtime·throw("runtime: cannot reserve arena virtual address space");
+ }
+ if((uintptr)p & (((uintptr)1<<PageShift)-1))
+ runtime·throw("runtime: SysReserve returned unaligned address");
+
+ runtime·mheap.bitmap = p;
+ runtime·mheap.arena_start = p + bitmap_size;
+ runtime·mheap.arena_used = runtime·mheap.arena_start;
+ runtime·mheap.arena_end = runtime·mheap.arena_start + arena_size;
+
+ // Initialize the rest of the allocator.
+ runtime·MHeap_Init(&runtime·mheap, runtime·SysAlloc);
+ m->mcache = runtime·allocmcache();
+
+ // See if it works.
+ runtime·free(runtime·malloc(1));
+}
+
+void*
+runtime·MHeap_SysAlloc(MHeap *h, uintptr n)
+{
+ byte *p;
+
+ if(n <= h->arena_end - h->arena_used) {
+ // Keep taking from our reservation.
+ p = h->arena_used;
+ runtime·SysMap(p, n);
+ h->arena_used += n;
+ runtime·MHeap_MapBits(h);
+ return p;
+ }
+
+ // On 64-bit, our reservation is all we have.
+ if(sizeof(void*) == 8)
+ return nil;
+
+ // On 32-bit, once the reservation is gone we can
+ // try to get memory at a location chosen by the OS
+ // and hope that it is in the range we allocated bitmap for.
+ p = runtime·SysAlloc(n);
+ if(p == nil)
+ return nil;
+
+ if(p < h->arena_start || p+n - h->arena_start >= MaxArena32) {
+ runtime·printf("runtime: memory allocated by OS not in usable range\n");
+ runtime·SysFree(p, n);
+ return nil;
+ }
+
+ if(p+n > h->arena_used) {
+ h->arena_used = p+n;
+ if(h->arena_used > h->arena_end)
+ h->arena_end = h->arena_used;
+ runtime·MHeap_MapBits(h);
+ }
+
+ return p;
+}
+
+// Runtime stubs.
+
+void*
+runtime·mal(uintptr n)
+{
+ return runtime·mallocgc(n, 0, 1, 1);
+}
+
+func new(n uint32) (ret *uint8) {
+ ret = runtime·mal(n);
+}
+
+void*
+runtime·stackalloc(uint32 n)
+{
+ // Stackalloc must be called on scheduler stack, so that we
+ // never try to grow the stack during the code that stackalloc runs.
+ // Doing so would cause a deadlock (issue 1547).
+ if(g != m->g0)
+ runtime·throw("stackalloc not on scheduler stack");
+
+ // Stack allocator uses malloc/free most of the time,
+ // but if we're in the middle of malloc and need stack,
+ // we have to do something else to avoid deadlock.
+ // In that case, we fall back on a fixed-size free-list
+ // allocator, assuming that inside malloc all the stack
+ // frames are small, so that all the stack allocations
+ // will be a single size, the minimum (right now, 5k).
+ if(m->mallocing || m->gcing || n == FixedStack) {
+ if(n != FixedStack) {
+ runtime·printf("stackalloc: in malloc, size=%d want %d", FixedStack, n);
+ runtime·throw("stackalloc");
+ }
+ return runtime·FixAlloc_Alloc(m->stackalloc);
+ }
+ return runtime·mallocgc(n, FlagNoProfiling|FlagNoGC, 0, 0);
+}
+
+void
+runtime·stackfree(void *v, uintptr n)
+{
+ if(m->mallocing || m->gcing || n == FixedStack) {
+ runtime·FixAlloc_Free(m->stackalloc, v);
+ return;
+ }
+ runtime·free(v);
+}
+
+func Alloc(n uintptr) (p *byte) {
+ p = runtime·malloc(n);
+}
+
+func Free(p *byte) {
+ runtime·free(p);
+}
+
+func Lookup(p *byte) (base *byte, size uintptr) {
+ runtime·mlookup(p, &base, &size, nil);
+}
+
+func GC() {
+ runtime·gc(1);
+}
+
+func SetFinalizer(obj Eface, finalizer Eface) {
+ byte *base;
+ uintptr size;
+ FuncType *ft;
+ int32 i, nret;
+ Type *t;
+
+ if(obj.type == nil) {
+ runtime·printf("runtime.SetFinalizer: first argument is nil interface\n");
+ throw:
+ runtime·throw("runtime.SetFinalizer");
+ }
+ if(obj.type->kind != KindPtr) {
+ runtime·printf("runtime.SetFinalizer: first argument is %S, not pointer\n", *obj.type->string);
+ goto throw;
+ }
+ if(!runtime·mlookup(obj.data, &base, &size, nil) || obj.data != base) {
+ runtime·printf("runtime.SetFinalizer: pointer not at beginning of allocated block\n");
+ goto throw;
+ }
+ nret = 0;
+ if(finalizer.type != nil) {
+ if(finalizer.type->kind != KindFunc) {
+ badfunc:
+ runtime·printf("runtime.SetFinalizer: second argument is %S, not func(%S)\n", *finalizer.type->string, *obj.type->string);
+ goto throw;
+ }
+ ft = (FuncType*)finalizer.type;
+ if(ft->dotdotdot || ft->in.len != 1 || *(Type**)ft->in.array != obj.type)
+ goto badfunc;
+
+ // compute size needed for return parameters
+ for(i=0; i<ft->out.len; i++) {
+ t = ((Type**)ft->out.array)[i];
+ nret = (nret + t->align - 1) & ~(t->align - 1);
+ nret += t->size;
+ }
+ nret = (nret + sizeof(void*)-1) & ~(sizeof(void*)-1);
+
+ if(runtime·getfinalizer(obj.data, 0)) {
+ runtime·printf("runtime.SetFinalizer: finalizer already set\n");
+ goto throw;
+ }
+ }
+ runtime·addfinalizer(obj.data, finalizer.data, nret);
+}
diff --git a/src/pkg/runtime/malloc.h b/src/pkg/runtime/malloc.h
new file mode 100644
index 000000000..5bc80f4df
--- /dev/null
+++ b/src/pkg/runtime/malloc.h
@@ -0,0 +1,422 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Memory allocator, based on tcmalloc.
+// http://goog-perftools.sourceforge.net/doc/tcmalloc.html
+
+// The main allocator works in runs of pages.
+// Small allocation sizes (up to and including 32 kB) are
+// rounded to one of about 100 size classes, each of which
+// has its own free list of objects of exactly that size.
+// Any free page of memory can be split into a set of objects
+// of one size class, which are then managed using free list
+// allocators.
+//
+// The allocator's data structures are:
+//
+// FixAlloc: a free-list allocator for fixed-size objects,
+// used to manage storage used by the allocator.
+// MHeap: the malloc heap, managed at page (4096-byte) granularity.
+// MSpan: a run of pages managed by the MHeap.
+// MCentral: a shared free list for a given size class.
+// MCache: a per-thread (in Go, per-M) cache for small objects.
+// MStats: allocation statistics.
+//
+// Allocating a small object proceeds up a hierarchy of caches:
+//
+// 1. Round the size up to one of the small size classes
+// and look in the corresponding MCache free list.
+// If the list is not empty, allocate an object from it.
+// This can all be done without acquiring a lock.
+//
+// 2. If the MCache free list is empty, replenish it by
+// taking a bunch of objects from the MCentral free list.
+// Moving a bunch amortizes the cost of acquiring the MCentral lock.
+//
+// 3. If the MCentral free list is empty, replenish it by
+// allocating a run of pages from the MHeap and then
+// chopping that memory into a objects of the given size.
+// Allocating many objects amortizes the cost of locking
+// the heap.
+//
+// 4. If the MHeap is empty or has no page runs large enough,
+// allocate a new group of pages (at least 1MB) from the
+// operating system. Allocating a large run of pages
+// amortizes the cost of talking to the operating system.
+//
+// Freeing a small object proceeds up the same hierarchy:
+//
+// 1. Look up the size class for the object and add it to
+// the MCache free list.
+//
+// 2. If the MCache free list is too long or the MCache has
+// too much memory, return some to the MCentral free lists.
+//
+// 3. If all the objects in a given span have returned to
+// the MCentral list, return that span to the page heap.
+//
+// 4. If the heap has too much memory, return some to the
+// operating system.
+//
+// TODO(rsc): Step 4 is not implemented.
+//
+// Allocating and freeing a large object uses the page heap
+// directly, bypassing the MCache and MCentral free lists.
+//
+// The small objects on the MCache and MCentral free lists
+// may or may not be zeroed. They are zeroed if and only if
+// the second word of the object is zero. The spans in the
+// page heap are always zeroed. When a span full of objects
+// is returned to the page heap, the objects that need to be
+// are zeroed first. There are two main benefits to delaying the
+// zeroing this way:
+//
+// 1. stack frames allocated from the small object lists
+// can avoid zeroing altogether.
+// 2. the cost of zeroing when reusing a small object is
+// charged to the mutator, not the garbage collector.
+//
+// This C code was written with an eye toward translating to Go
+// in the future. Methods have the form Type_Method(Type *t, ...).
+
+typedef struct MCentral MCentral;
+typedef struct MHeap MHeap;
+typedef struct MSpan MSpan;
+typedef struct MStats MStats;
+typedef struct MLink MLink;
+
+enum
+{
+ PageShift = 12,
+ PageSize = 1<<PageShift,
+ PageMask = PageSize - 1,
+};
+typedef uintptr PageID; // address >> PageShift
+
+enum
+{
+ // Computed constant. The definition of MaxSmallSize and the
+ // algorithm in msize.c produce some number of different allocation
+ // size classes. NumSizeClasses is that number. It's needed here
+ // because there are static arrays of this length; when msize runs its
+ // size choosing algorithm it double-checks that NumSizeClasses agrees.
+ NumSizeClasses = 61,
+
+ // Tunable constants.
+ MaxSmallSize = 32<<10,
+
+ FixAllocChunk = 128<<10, // Chunk size for FixAlloc
+ MaxMCacheListLen = 256, // Maximum objects on MCacheList
+ MaxMCacheSize = 2<<20, // Maximum bytes in one MCache
+ MaxMHeapList = 1<<(20 - PageShift), // Maximum page length for fixed-size list in MHeap.
+ HeapAllocChunk = 1<<20, // Chunk size for heap growth
+
+ // Number of bits in page to span calculations (4k pages).
+ // On 64-bit, we limit the arena to 16G, so 22 bits suffices.
+ // On 32-bit, we don't bother limiting anything: 20 bits for 4G.
+#ifdef _64BIT
+ MHeapMap_Bits = 22,
+#else
+ MHeapMap_Bits = 20,
+#endif
+};
+
+// A generic linked list of blocks. (Typically the block is bigger than sizeof(MLink).)
+struct MLink
+{
+ MLink *next;
+};
+
+// SysAlloc obtains a large chunk of zeroed memory from the
+// operating system, typically on the order of a hundred kilobytes
+// or a megabyte. If the pointer argument is non-nil, the caller
+// wants a mapping there or nowhere.
+//
+// SysUnused notifies the operating system that the contents
+// of the memory region are no longer needed and can be reused
+// for other purposes. The program reserves the right to start
+// accessing those pages in the future.
+//
+// SysFree returns it unconditionally; this is only used if
+// an out-of-memory error has been detected midway through
+// an allocation. It is okay if SysFree is a no-op.
+//
+// SysReserve reserves address space without allocating memory.
+// If the pointer passed to it is non-nil, the caller wants the
+// reservation there, but SysReserve can still choose another
+// location if that one is unavailable.
+//
+// SysMap maps previously reserved address space for use.
+
+void* runtime·SysAlloc(uintptr nbytes);
+void runtime·SysFree(void *v, uintptr nbytes);
+void runtime·SysUnused(void *v, uintptr nbytes);
+void runtime·SysMap(void *v, uintptr nbytes);
+void* runtime·SysReserve(void *v, uintptr nbytes);
+
+// FixAlloc is a simple free-list allocator for fixed size objects.
+// Malloc uses a FixAlloc wrapped around SysAlloc to manages its
+// MCache and MSpan objects.
+//
+// Memory returned by FixAlloc_Alloc is not zeroed.
+// The caller is responsible for locking around FixAlloc calls.
+// Callers can keep state in the object but the first word is
+// smashed by freeing and reallocating.
+struct FixAlloc
+{
+ uintptr size;
+ void *(*alloc)(uintptr);
+ void (*first)(void *arg, byte *p); // called first time p is returned
+ void *arg;
+ MLink *list;
+ byte *chunk;
+ uint32 nchunk;
+ uintptr inuse; // in-use bytes now
+ uintptr sys; // bytes obtained from system
+};
+
+void runtime·FixAlloc_Init(FixAlloc *f, uintptr size, void *(*alloc)(uintptr), void (*first)(void*, byte*), void *arg);
+void* runtime·FixAlloc_Alloc(FixAlloc *f);
+void runtime·FixAlloc_Free(FixAlloc *f, void *p);
+
+
+// Statistics.
+// Shared with Go: if you edit this structure, also edit extern.go.
+struct MStats
+{
+ // General statistics.
+ uint64 alloc; // bytes allocated and still in use
+ uint64 total_alloc; // bytes allocated (even if freed)
+ uint64 sys; // bytes obtained from system (should be sum of xxx_sys below, no locking, approximate)
+ uint64 nlookup; // number of pointer lookups
+ uint64 nmalloc; // number of mallocs
+ uint64 nfree; // number of frees
+
+ // Statistics about malloc heap.
+ // protected by mheap.Lock
+ uint64 heap_alloc; // bytes allocated and still in use
+ uint64 heap_sys; // bytes obtained from system
+ uint64 heap_idle; // bytes in idle spans
+ uint64 heap_inuse; // bytes in non-idle spans
+ uint64 heap_objects; // total number of allocated objects
+
+ // Statistics about allocation of low-level fixed-size structures.
+ // Protected by FixAlloc locks.
+ uint64 stacks_inuse; // bootstrap stacks
+ uint64 stacks_sys;
+ uint64 mspan_inuse; // MSpan structures
+ uint64 mspan_sys;
+ uint64 mcache_inuse; // MCache structures
+ uint64 mcache_sys;
+ uint64 buckhash_sys; // profiling bucket hash table
+
+ // Statistics about garbage collector.
+ // Protected by stopping the world during GC.
+ uint64 next_gc; // next GC (in heap_alloc time)
+ uint64 pause_total_ns;
+ uint64 pause_ns[256];
+ uint32 numgc;
+ bool enablegc;
+ bool debuggc;
+
+ // Statistics about allocation size classes.
+ struct {
+ uint32 size;
+ uint64 nmalloc;
+ uint64 nfree;
+ } by_size[NumSizeClasses];
+};
+
+#define mstats runtime·MemStats /* name shared with Go */
+extern MStats mstats;
+
+
+// Size classes. Computed and initialized by InitSizes.
+//
+// SizeToClass(0 <= n <= MaxSmallSize) returns the size class,
+// 1 <= sizeclass < NumSizeClasses, for n.
+// Size class 0 is reserved to mean "not small".
+//
+// class_to_size[i] = largest size in class i
+// class_to_allocnpages[i] = number of pages to allocate when
+// making new objects in class i
+// class_to_transfercount[i] = number of objects to move when
+// taking a bunch of objects out of the central lists
+// and putting them in the thread free list.
+
+int32 runtime·SizeToClass(int32);
+extern int32 runtime·class_to_size[NumSizeClasses];
+extern int32 runtime·class_to_allocnpages[NumSizeClasses];
+extern int32 runtime·class_to_transfercount[NumSizeClasses];
+extern void runtime·InitSizes(void);
+
+
+// Per-thread (in Go, per-M) cache for small objects.
+// No locking needed because it is per-thread (per-M).
+typedef struct MCacheList MCacheList;
+struct MCacheList
+{
+ MLink *list;
+ uint32 nlist;
+ uint32 nlistmin;
+};
+
+struct MCache
+{
+ MCacheList list[NumSizeClasses];
+ uint64 size;
+ int64 local_cachealloc; // bytes allocated (or freed) from cache since last lock of heap
+ int64 local_objects; // objects allocated (or freed) from cache since last lock of heap
+ int64 local_alloc; // bytes allocated (or freed) since last lock of heap
+ int64 local_total_alloc; // bytes allocated (even if freed) since last lock of heap
+ int64 local_nmalloc; // number of mallocs since last lock of heap
+ int64 local_nfree; // number of frees since last lock of heap
+ int64 local_nlookup; // number of pointer lookups since last lock of heap
+ int32 next_sample; // trigger heap sample after allocating this many bytes
+ // Statistics about allocation size classes since last lock of heap
+ struct {
+ int64 nmalloc;
+ int64 nfree;
+ } local_by_size[NumSizeClasses];
+
+};
+
+void* runtime·MCache_Alloc(MCache *c, int32 sizeclass, uintptr size, int32 zeroed);
+void runtime·MCache_Free(MCache *c, void *p, int32 sizeclass, uintptr size);
+void runtime·MCache_ReleaseAll(MCache *c);
+
+// An MSpan is a run of pages.
+enum
+{
+ MSpanInUse = 0,
+ MSpanFree,
+ MSpanListHead,
+ MSpanDead,
+};
+struct MSpan
+{
+ MSpan *next; // in a span linked list
+ MSpan *prev; // in a span linked list
+ MSpan *allnext; // in the list of all spans
+ PageID start; // starting page number
+ uintptr npages; // number of pages in span
+ MLink *freelist; // list of free objects
+ uint32 ref; // number of allocated objects in this span
+ uint32 sizeclass; // size class
+ uint32 state; // MSpanInUse etc
+ byte *limit; // end of data in span
+};
+
+void runtime·MSpan_Init(MSpan *span, PageID start, uintptr npages);
+
+// Every MSpan is in one doubly-linked list,
+// either one of the MHeap's free lists or one of the
+// MCentral's span lists. We use empty MSpan structures as list heads.
+void runtime·MSpanList_Init(MSpan *list);
+bool runtime·MSpanList_IsEmpty(MSpan *list);
+void runtime·MSpanList_Insert(MSpan *list, MSpan *span);
+void runtime·MSpanList_Remove(MSpan *span); // from whatever list it is in
+
+
+// Central list of free objects of a given size.
+struct MCentral
+{
+ Lock;
+ int32 sizeclass;
+ MSpan nonempty;
+ MSpan empty;
+ int32 nfree;
+};
+
+void runtime·MCentral_Init(MCentral *c, int32 sizeclass);
+int32 runtime·MCentral_AllocList(MCentral *c, int32 n, MLink **first);
+void runtime·MCentral_FreeList(MCentral *c, int32 n, MLink *first);
+
+// Main malloc heap.
+// The heap itself is the "free[]" and "large" arrays,
+// but all the other global data is here too.
+struct MHeap
+{
+ Lock;
+ MSpan free[MaxMHeapList]; // free lists of given length
+ MSpan large; // free lists length >= MaxMHeapList
+ MSpan *allspans;
+
+ // span lookup
+ MSpan *map[1<<MHeapMap_Bits];
+
+ // range of addresses we might see in the heap
+ byte *bitmap;
+ uintptr bitmap_mapped;
+ byte *arena_start;
+ byte *arena_used;
+ byte *arena_end;
+
+ // central free lists for small size classes.
+ // the union makes sure that the MCentrals are
+ // spaced 64 bytes apart, so that each MCentral.Lock
+ // gets its own cache line.
+ union {
+ MCentral;
+ byte pad[64];
+ } central[NumSizeClasses];
+
+ FixAlloc spanalloc; // allocator for Span*
+ FixAlloc cachealloc; // allocator for MCache*
+};
+extern MHeap runtime·mheap;
+
+void runtime·MHeap_Init(MHeap *h, void *(*allocator)(uintptr));
+MSpan* runtime·MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass, int32 acct);
+void runtime·MHeap_Free(MHeap *h, MSpan *s, int32 acct);
+MSpan* runtime·MHeap_Lookup(MHeap *h, void *v);
+MSpan* runtime·MHeap_LookupMaybe(MHeap *h, void *v);
+void runtime·MGetSizeClassInfo(int32 sizeclass, uintptr *size, int32 *npages, int32 *nobj);
+void* runtime·MHeap_SysAlloc(MHeap *h, uintptr n);
+void runtime·MHeap_MapBits(MHeap *h);
+
+void* runtime·mallocgc(uintptr size, uint32 flag, int32 dogc, int32 zeroed);
+int32 runtime·mlookup(void *v, byte **base, uintptr *size, MSpan **s);
+void runtime·gc(int32 force);
+void runtime·markallocated(void *v, uintptr n, bool noptr);
+void runtime·checkallocated(void *v, uintptr n);
+void runtime·markfreed(void *v, uintptr n);
+void runtime·checkfreed(void *v, uintptr n);
+int32 runtime·checking;
+void runtime·markspan(void *v, uintptr size, uintptr n, bool leftover);
+void runtime·unmarkspan(void *v, uintptr size);
+bool runtime·blockspecial(void*);
+void runtime·setblockspecial(void*);
+void runtime·purgecachedstats(M*);
+
+enum
+{
+ // flags to malloc
+ FlagNoPointers = 1<<0, // no pointers here
+ FlagNoProfiling = 1<<1, // must not profile
+ FlagNoGC = 1<<2, // must not free or scan for pointers
+};
+
+void runtime·MProf_Malloc(void*, uintptr);
+void runtime·MProf_Free(void*, uintptr);
+
+// Malloc profiling settings.
+// Must match definition in extern.go.
+enum {
+ MProf_None = 0,
+ MProf_Sample = 1,
+ MProf_All = 2,
+};
+extern int32 runtime·malloc_profile;
+
+typedef struct Finalizer Finalizer;
+struct Finalizer
+{
+ Finalizer *next; // for use by caller of getfinalizer
+ void (*fn)(void*);
+ void *arg;
+ int32 nret;
+};
+
+Finalizer* runtime·getfinalizer(void*, bool);
diff --git a/src/pkg/runtime/mcache.c b/src/pkg/runtime/mcache.c
new file mode 100644
index 000000000..711e938fc
--- /dev/null
+++ b/src/pkg/runtime/mcache.c
@@ -0,0 +1,133 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Per-thread (in Go, per-M) malloc cache for small objects.
+//
+// See malloc.h for an overview.
+
+#include "runtime.h"
+#include "malloc.h"
+
+void*
+runtime·MCache_Alloc(MCache *c, int32 sizeclass, uintptr size, int32 zeroed)
+{
+ MCacheList *l;
+ MLink *first, *v;
+ int32 n;
+
+ // Allocate from list.
+ l = &c->list[sizeclass];
+ if(l->list == nil) {
+ // Replenish using central lists.
+ n = runtime·MCentral_AllocList(&runtime·mheap.central[sizeclass],
+ runtime·class_to_transfercount[sizeclass], &first);
+ if(n == 0)
+ runtime·throw("out of memory");
+ l->list = first;
+ l->nlist = n;
+ c->size += n*size;
+ }
+ v = l->list;
+ l->list = v->next;
+ l->nlist--;
+ if(l->nlist < l->nlistmin)
+ l->nlistmin = l->nlist;
+ c->size -= size;
+
+ // v is zeroed except for the link pointer
+ // that we used above; zero that.
+ v->next = nil;
+ if(zeroed) {
+ // block is zeroed iff second word is zero ...
+ if(size > sizeof(uintptr) && ((uintptr*)v)[1] != 0)
+ runtime·memclr((byte*)v, size);
+ else {
+ // ... except for the link pointer
+ // that we used above; zero that.
+ v->next = nil;
+ }
+ }
+ c->local_cachealloc += size;
+ c->local_objects++;
+ return v;
+}
+
+// Take n elements off l and return them to the central free list.
+static void
+ReleaseN(MCache *c, MCacheList *l, int32 n, int32 sizeclass)
+{
+ MLink *first, **lp;
+ int32 i;
+
+ // Cut off first n elements.
+ first = l->list;
+ lp = &l->list;
+ for(i=0; i<n; i++)
+ lp = &(*lp)->next;
+ l->list = *lp;
+ *lp = nil;
+ l->nlist -= n;
+ if(l->nlist < l->nlistmin)
+ l->nlistmin = l->nlist;
+ c->size -= n*runtime·class_to_size[sizeclass];
+
+ // Return them to central free list.
+ runtime·MCentral_FreeList(&runtime·mheap.central[sizeclass], n, first);
+}
+
+void
+runtime·MCache_Free(MCache *c, void *v, int32 sizeclass, uintptr size)
+{
+ int32 i, n;
+ MCacheList *l;
+ MLink *p;
+
+ // Put back on list.
+ l = &c->list[sizeclass];
+ p = v;
+ p->next = l->list;
+ l->list = p;
+ l->nlist++;
+ c->size += size;
+ c->local_cachealloc -= size;
+ c->local_objects--;
+
+ if(l->nlist >= MaxMCacheListLen) {
+ // Release a chunk back.
+ ReleaseN(c, l, runtime·class_to_transfercount[sizeclass], sizeclass);
+ }
+
+ if(c->size >= MaxMCacheSize) {
+ // Scavenge.
+ for(i=0; i<NumSizeClasses; i++) {
+ l = &c->list[i];
+ n = l->nlistmin;
+
+ // n is the minimum number of elements we've seen on
+ // the list since the last scavenge. If n > 0, it means that
+ // we could have gotten by with n fewer elements
+ // without needing to consult the central free list.
+ // Move toward that situation by releasing n/2 of them.
+ if(n > 0) {
+ if(n > 1)
+ n /= 2;
+ ReleaseN(c, l, n, i);
+ }
+ l->nlistmin = l->nlist;
+ }
+ }
+}
+
+void
+runtime·MCache_ReleaseAll(MCache *c)
+{
+ int32 i;
+ MCacheList *l;
+
+ for(i=0; i<NumSizeClasses; i++) {
+ l = &c->list[i];
+ ReleaseN(c, l, l->nlist, i);
+ l->nlistmin = 0;
+ }
+}
diff --git a/src/pkg/runtime/mcentral.c b/src/pkg/runtime/mcentral.c
new file mode 100644
index 000000000..29b03b58f
--- /dev/null
+++ b/src/pkg/runtime/mcentral.c
@@ -0,0 +1,200 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Central free lists.
+//
+// See malloc.h for an overview.
+//
+// The MCentral doesn't actually contain the list of free objects; the MSpan does.
+// Each MCentral is two lists of MSpans: those with free objects (c->nonempty)
+// and those that are completely allocated (c->empty).
+//
+// TODO(rsc): tcmalloc uses a "transfer cache" to split the list
+// into sections of class_to_transfercount[sizeclass] objects
+// so that it is faster to move those lists between MCaches and MCentrals.
+
+#include "runtime.h"
+#include "malloc.h"
+
+static bool MCentral_Grow(MCentral *c);
+static void* MCentral_Alloc(MCentral *c);
+static void MCentral_Free(MCentral *c, void *v);
+
+// Initialize a single central free list.
+void
+runtime·MCentral_Init(MCentral *c, int32 sizeclass)
+{
+ c->sizeclass = sizeclass;
+ runtime·MSpanList_Init(&c->nonempty);
+ runtime·MSpanList_Init(&c->empty);
+}
+
+// Allocate up to n objects from the central free list.
+// Return the number of objects allocated.
+// The objects are linked together by their first words.
+// On return, *pstart points at the first object and *pend at the last.
+int32
+runtime·MCentral_AllocList(MCentral *c, int32 n, MLink **pfirst)
+{
+ MLink *first, *last, *v;
+ int32 i;
+
+ runtime·lock(c);
+ // Replenish central list if empty.
+ if(runtime·MSpanList_IsEmpty(&c->nonempty)) {
+ if(!MCentral_Grow(c)) {
+ runtime·unlock(c);
+ *pfirst = nil;
+ return 0;
+ }
+ }
+
+ // Copy from list, up to n.
+ // First one is guaranteed to work, because we just grew the list.
+ first = MCentral_Alloc(c);
+ last = first;
+ for(i=1; i<n && (v = MCentral_Alloc(c)) != nil; i++) {
+ last->next = v;
+ last = v;
+ }
+ last->next = nil;
+ c->nfree -= i;
+
+ runtime·unlock(c);
+ *pfirst = first;
+ return i;
+}
+
+// Helper: allocate one object from the central free list.
+static void*
+MCentral_Alloc(MCentral *c)
+{
+ MSpan *s;
+ MLink *v;
+
+ if(runtime·MSpanList_IsEmpty(&c->nonempty))
+ return nil;
+ s = c->nonempty.next;
+ s->ref++;
+ v = s->freelist;
+ s->freelist = v->next;
+ if(s->freelist == nil) {
+ runtime·MSpanList_Remove(s);
+ runtime·MSpanList_Insert(&c->empty, s);
+ }
+ return v;
+}
+
+// Free n objects back into the central free list.
+// Return the number of objects allocated.
+// The objects are linked together by their first words.
+// On return, *pstart points at the first object and *pend at the last.
+void
+runtime·MCentral_FreeList(MCentral *c, int32 n, MLink *start)
+{
+ MLink *v, *next;
+
+ // Assume next == nil marks end of list.
+ // n and end would be useful if we implemented
+ // the transfer cache optimization in the TODO above.
+ USED(n);
+
+ runtime·lock(c);
+ for(v=start; v; v=next) {
+ next = v->next;
+ MCentral_Free(c, v);
+ }
+ runtime·unlock(c);
+}
+
+// Helper: free one object back into the central free list.
+static void
+MCentral_Free(MCentral *c, void *v)
+{
+ MSpan *s;
+ MLink *p;
+ int32 size;
+
+ // Find span for v.
+ s = runtime·MHeap_Lookup(&runtime·mheap, v);
+ if(s == nil || s->ref == 0)
+ runtime·throw("invalid free");
+
+ // Move to nonempty if necessary.
+ if(s->freelist == nil) {
+ runtime·MSpanList_Remove(s);
+ runtime·MSpanList_Insert(&c->nonempty, s);
+ }
+
+ // Add v back to s's free list.
+ p = v;
+ p->next = s->freelist;
+ s->freelist = p;
+ c->nfree++;
+
+ // If s is completely freed, return it to the heap.
+ if(--s->ref == 0) {
+ size = runtime·class_to_size[c->sizeclass];
+ runtime·MSpanList_Remove(s);
+ runtime·unmarkspan((byte*)(s->start<<PageShift), s->npages<<PageShift);
+ *(uintptr*)(s->start<<PageShift) = 1; // needs zeroing
+ s->freelist = nil;
+ c->nfree -= (s->npages << PageShift) / size;
+ runtime·unlock(c);
+ runtime·MHeap_Free(&runtime·mheap, s, 0);
+ runtime·lock(c);
+ }
+}
+
+void
+runtime·MGetSizeClassInfo(int32 sizeclass, uintptr *sizep, int32 *npagesp, int32 *nobj)
+{
+ int32 size;
+ int32 npages;
+
+ npages = runtime·class_to_allocnpages[sizeclass];
+ size = runtime·class_to_size[sizeclass];
+ *npagesp = npages;
+ *sizep = size;
+ *nobj = (npages << PageShift) / size;
+}
+
+// Fetch a new span from the heap and
+// carve into objects for the free list.
+static bool
+MCentral_Grow(MCentral *c)
+{
+ int32 i, n, npages;
+ uintptr size;
+ MLink **tailp, *v;
+ byte *p;
+ MSpan *s;
+
+ runtime·unlock(c);
+ runtime·MGetSizeClassInfo(c->sizeclass, &size, &npages, &n);
+ s = runtime·MHeap_Alloc(&runtime·mheap, npages, c->sizeclass, 0);
+ if(s == nil) {
+ // TODO(rsc): Log out of memory
+ runtime·lock(c);
+ return false;
+ }
+
+ // Carve span into sequence of blocks.
+ tailp = &s->freelist;
+ p = (byte*)(s->start << PageShift);
+ s->limit = p + size*n;
+ for(i=0; i<n; i++) {
+ v = (MLink*)p;
+ *tailp = v;
+ tailp = &v->next;
+ p += size;
+ }
+ *tailp = nil;
+ runtime·markspan((byte*)(s->start<<PageShift), size, n, size*n < (s->npages<<PageShift));
+
+ runtime·lock(c);
+ c->nfree += n;
+ runtime·MSpanList_Insert(&c->nonempty, s);
+ return true;
+}
diff --git a/src/pkg/runtime/mem.go b/src/pkg/runtime/mem.go
new file mode 100644
index 000000000..93d155a7f
--- /dev/null
+++ b/src/pkg/runtime/mem.go
@@ -0,0 +1,74 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import "unsafe"
+
+type MemStatsType struct {
+ // General statistics.
+ // Not locked during update; approximate.
+ Alloc uint64 // bytes allocated and still in use
+ TotalAlloc uint64 // bytes allocated (even if freed)
+ Sys uint64 // bytes obtained from system (should be sum of XxxSys below)
+ Lookups uint64 // number of pointer lookups
+ Mallocs uint64 // number of mallocs
+ Frees uint64 // number of frees
+
+ // Main allocation heap statistics.
+ HeapAlloc uint64 // bytes allocated and still in use
+ HeapSys uint64 // bytes obtained from system
+ HeapIdle uint64 // bytes in idle spans
+ HeapInuse uint64 // bytes in non-idle span
+ HeapObjects uint64 // total number of allocated objects
+
+ // Low-level fixed-size structure allocator statistics.
+ // Inuse is bytes used now.
+ // Sys is bytes obtained from system.
+ StackInuse uint64 // bootstrap stacks
+ StackSys uint64
+ MSpanInuse uint64 // mspan structures
+ MSpanSys uint64
+ MCacheInuse uint64 // mcache structures
+ MCacheSys uint64
+ BuckHashSys uint64 // profiling bucket hash table
+
+ // Garbage collector statistics.
+ NextGC uint64
+ PauseTotalNs uint64
+ PauseNs [256]uint64 // most recent GC pause times
+ NumGC uint32
+ EnableGC bool
+ DebugGC bool
+
+ // Per-size allocation statistics.
+ // Not locked during update; approximate.
+ // 61 is NumSizeClasses in the C code.
+ BySize [61]struct {
+ Size uint32
+ Mallocs uint64
+ Frees uint64
+ }
+}
+
+var sizeof_C_MStats uintptr // filled in by malloc.goc
+
+func init() {
+ if sizeof_C_MStats != unsafe.Sizeof(MemStats) {
+ println(sizeof_C_MStats, unsafe.Sizeof(MemStats))
+ panic("MStats vs MemStatsType size mismatch")
+ }
+}
+
+// MemStats holds statistics about the memory system.
+// The statistics may be out of date, as the information is
+// updated lazily from per-thread caches.
+// Use UpdateMemStats to bring the statistics up to date.
+var MemStats MemStatsType
+
+// UpdateMemStats brings MemStats up to date.
+func UpdateMemStats()
+
+// GC runs a garbage collection.
+func GC()
diff --git a/src/pkg/runtime/mfinal.c b/src/pkg/runtime/mfinal.c
new file mode 100644
index 000000000..f3138145b
--- /dev/null
+++ b/src/pkg/runtime/mfinal.c
@@ -0,0 +1,181 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "runtime.h"
+#include "malloc.h"
+
+// Lock to protect finalizer data structures.
+// Cannot reuse mheap.Lock because the finalizer
+// maintenance requires allocation.
+static Lock finlock;
+
+// Finalizer hash table. Direct hash, linear scan, at most 3/4 full.
+// Table size is power of 3 so that hash can be key % max.
+// Key[i] == (void*)-1 denotes free but formerly occupied entry
+// (doesn't stop the linear scan).
+// Key and val are separate tables because the garbage collector
+// must be instructed to ignore the pointers in key but follow the
+// pointers in val.
+typedef struct Fintab Fintab;
+struct Fintab
+{
+ void **key;
+ Finalizer **val;
+ int32 nkey; // number of non-nil entries in key
+ int32 ndead; // number of dead (-1) entries in key
+ int32 max; // size of key, val allocations
+};
+
+static void
+addfintab(Fintab *t, void *k, Finalizer *v)
+{
+ int32 i, j;
+
+ i = (uintptr)k % (uintptr)t->max;
+ for(j=0; j<t->max; j++) {
+ if(t->key[i] == nil) {
+ t->nkey++;
+ goto ret;
+ }
+ if(t->key[i] == (void*)-1) {
+ t->ndead--;
+ goto ret;
+ }
+ if(++i == t->max)
+ i = 0;
+ }
+
+ // cannot happen - table is known to be non-full
+ runtime·throw("finalizer table inconsistent");
+
+ret:
+ t->key[i] = k;
+ t->val[i] = v;
+}
+
+static Finalizer*
+lookfintab(Fintab *t, void *k, bool del)
+{
+ int32 i, j;
+ Finalizer *v;
+
+ if(t->max == 0)
+ return nil;
+ i = (uintptr)k % (uintptr)t->max;
+ for(j=0; j<t->max; j++) {
+ if(t->key[i] == nil)
+ return nil;
+ if(t->key[i] == k) {
+ v = t->val[i];
+ if(del) {
+ t->key[i] = (void*)-1;
+ t->val[i] = nil;
+ t->ndead++;
+ }
+ return v;
+ }
+ if(++i == t->max)
+ i = 0;
+ }
+
+ // cannot happen - table is known to be non-full
+ runtime·throw("finalizer table inconsistent");
+ return nil;
+}
+
+static Fintab fintab;
+
+// add finalizer; caller is responsible for making sure not already in table
+void
+runtime·addfinalizer(void *p, void (*f)(void*), int32 nret)
+{
+ Fintab newtab;
+ int32 i;
+ byte *base;
+ Finalizer *e;
+
+ e = nil;
+ if(f != nil) {
+ e = runtime·mal(sizeof *e);
+ e->fn = f;
+ e->nret = nret;
+ }
+
+ runtime·lock(&finlock);
+ if(!runtime·mlookup(p, &base, nil, nil) || p != base) {
+ runtime·unlock(&finlock);
+ runtime·throw("addfinalizer on invalid pointer");
+ }
+ if(f == nil) {
+ lookfintab(&fintab, p, 1);
+ runtime·unlock(&finlock);
+ return;
+ }
+
+ if(lookfintab(&fintab, p, 0)) {
+ runtime·unlock(&finlock);
+ runtime·throw("double finalizer");
+ }
+ runtime·setblockspecial(p);
+
+ if(fintab.nkey >= fintab.max/2+fintab.max/4) {
+ // keep table at most 3/4 full:
+ // allocate new table and rehash.
+
+ runtime·memclr((byte*)&newtab, sizeof newtab);
+ newtab.max = fintab.max;
+ if(newtab.max == 0)
+ newtab.max = 3*3*3;
+ else if(fintab.ndead < fintab.nkey/2) {
+ // grow table if not many dead values.
+ // otherwise just rehash into table of same size.
+ newtab.max *= 3;
+ }
+
+ newtab.key = runtime·mallocgc(newtab.max*sizeof newtab.key[0], FlagNoPointers, 0, 1);
+ newtab.val = runtime·mallocgc(newtab.max*sizeof newtab.val[0], 0, 0, 1);
+
+ for(i=0; i<fintab.max; i++) {
+ void *k;
+
+ k = fintab.key[i];
+ if(k != nil && k != (void*)-1)
+ addfintab(&newtab, k, fintab.val[i]);
+ }
+ runtime·free(fintab.key);
+ runtime·free(fintab.val);
+ fintab = newtab;
+ }
+
+ addfintab(&fintab, p, e);
+ runtime·unlock(&finlock);
+}
+
+// get finalizer; if del, delete finalizer.
+// caller is responsible for updating RefHasFinalizer bit.
+Finalizer*
+runtime·getfinalizer(void *p, bool del)
+{
+ Finalizer *f;
+
+ runtime·lock(&finlock);
+ f = lookfintab(&fintab, p, del);
+ runtime·unlock(&finlock);
+ return f;
+}
+
+void
+runtime·walkfintab(void (*fn)(void*))
+{
+ void **key;
+ void **ekey;
+
+ runtime·lock(&finlock);
+ key = fintab.key;
+ ekey = key + fintab.max;
+ for(; key < ekey; key++)
+ if(*key != nil && *key != ((void*)-1))
+ fn(*key);
+ runtime·unlock(&finlock);
+}
diff --git a/src/pkg/runtime/mfixalloc.c b/src/pkg/runtime/mfixalloc.c
new file mode 100644
index 000000000..ab9df3196
--- /dev/null
+++ b/src/pkg/runtime/mfixalloc.c
@@ -0,0 +1,62 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Fixed-size object allocator. Returned memory is not zeroed.
+//
+// See malloc.h for overview.
+
+#include "runtime.h"
+#include "malloc.h"
+
+// Initialize f to allocate objects of the given size,
+// using the allocator to obtain chunks of memory.
+void
+runtime·FixAlloc_Init(FixAlloc *f, uintptr size, void *(*alloc)(uintptr), void (*first)(void*, byte*), void *arg)
+{
+ f->size = size;
+ f->alloc = alloc;
+ f->first = first;
+ f->arg = arg;
+ f->list = nil;
+ f->chunk = nil;
+ f->nchunk = 0;
+ f->inuse = 0;
+ f->sys = 0;
+}
+
+void*
+runtime·FixAlloc_Alloc(FixAlloc *f)
+{
+ void *v;
+
+ if(f->list) {
+ v = f->list;
+ f->list = *(void**)f->list;
+ f->inuse += f->size;
+ return v;
+ }
+ if(f->nchunk < f->size) {
+ f->sys += FixAllocChunk;
+ f->chunk = f->alloc(FixAllocChunk);
+ if(f->chunk == nil)
+ runtime·throw("out of memory (FixAlloc)");
+ f->nchunk = FixAllocChunk;
+ }
+ v = f->chunk;
+ if(f->first)
+ f->first(f->arg, v);
+ f->chunk += f->size;
+ f->nchunk -= f->size;
+ f->inuse += f->size;
+ return v;
+}
+
+void
+runtime·FixAlloc_Free(FixAlloc *f, void *p)
+{
+ f->inuse -= f->size;
+ *(void**)p = f->list;
+ f->list = p;
+}
+
diff --git a/src/pkg/runtime/mgc0.c b/src/pkg/runtime/mgc0.c
new file mode 100644
index 000000000..78ea2aa2b
--- /dev/null
+++ b/src/pkg/runtime/mgc0.c
@@ -0,0 +1,910 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Garbage collector.
+
+#include "runtime.h"
+#include "malloc.h"
+#include "stack.h"
+
+enum {
+ Debug = 0,
+ UseCas = 1,
+ PtrSize = sizeof(void*),
+
+ // Four bits per word (see #defines below).
+ wordsPerBitmapWord = sizeof(void*)*8/4,
+ bitShift = sizeof(void*)*8/4,
+};
+
+// Bits in per-word bitmap.
+// #defines because enum might not be able to hold the values.
+//
+// Each word in the bitmap describes wordsPerBitmapWord words
+// of heap memory. There are 4 bitmap bits dedicated to each heap word,
+// so on a 64-bit system there is one bitmap word per 16 heap words.
+// The bits in the word are packed together by type first, then by
+// heap location, so each 64-bit bitmap word consists of, from top to bottom,
+// the 16 bitSpecial bits for the corresponding heap words, then the 16 bitMarked bits,
+// then the 16 bitNoPointers/bitBlockBoundary bits, then the 16 bitAllocated bits.
+// This layout makes it easier to iterate over the bits of a given type.
+//
+// The bitmap starts at mheap.arena_start and extends *backward* from
+// there. On a 64-bit system the off'th word in the arena is tracked by
+// the off/16+1'th word before mheap.arena_start. (On a 32-bit system,
+// the only difference is that the divisor is 8.)
+//
+// To pull out the bits corresponding to a given pointer p, we use:
+//
+// off = p - (uintptr*)mheap.arena_start; // word offset
+// b = (uintptr*)mheap.arena_start - off/wordsPerBitmapWord - 1;
+// shift = off % wordsPerBitmapWord
+// bits = *b >> shift;
+// /* then test bits & bitAllocated, bits & bitMarked, etc. */
+//
+#define bitAllocated ((uintptr)1<<(bitShift*0))
+#define bitNoPointers ((uintptr)1<<(bitShift*1)) /* when bitAllocated is set */
+#define bitMarked ((uintptr)1<<(bitShift*2)) /* when bitAllocated is set */
+#define bitSpecial ((uintptr)1<<(bitShift*3)) /* when bitAllocated is set - has finalizer or being profiled */
+#define bitBlockBoundary ((uintptr)1<<(bitShift*1)) /* when bitAllocated is NOT set */
+
+#define bitMask (bitBlockBoundary | bitAllocated | bitMarked | bitSpecial)
+
+static uint64 nlookup;
+static uint64 nsizelookup;
+static uint64 naddrlookup;
+static int32 gctrace;
+
+typedef struct Workbuf Workbuf;
+struct Workbuf
+{
+ Workbuf *next;
+ uintptr nw;
+ byte *w[2048-2];
+};
+
+extern byte data[];
+extern byte etext[];
+extern byte end[];
+
+static G *fing;
+static Finalizer *finq;
+static int32 fingwait;
+
+static void runfinq(void);
+static Workbuf* getempty(Workbuf*);
+static Workbuf* getfull(Workbuf*);
+
+// scanblock scans a block of n bytes starting at pointer b for references
+// to other objects, scanning any it finds recursively until there are no
+// unscanned objects left. Instead of using an explicit recursion, it keeps
+// a work list in the Workbuf* structures and loops in the main function
+// body. Keeping an explicit work list is easier on the stack allocator and
+// more efficient.
+static void
+scanblock(byte *b, int64 n)
+{
+ byte *obj, *arena_start, *p;
+ void **vp;
+ uintptr size, *bitp, bits, shift, i, j, x, xbits, off;
+ MSpan *s;
+ PageID k;
+ void **bw, **w, **ew;
+ Workbuf *wbuf;
+
+ if((int64)(uintptr)n != n || n < 0) {
+ runtime·printf("scanblock %p %D\n", b, n);
+ runtime·throw("scanblock");
+ }
+
+ // Memory arena parameters.
+ arena_start = runtime·mheap.arena_start;
+
+ wbuf = nil; // current work buffer
+ ew = nil; // end of work buffer
+ bw = nil; // beginning of work buffer
+ w = nil; // current pointer into work buffer
+
+ // Align b to a word boundary.
+ off = (uintptr)b & (PtrSize-1);
+ if(off != 0) {
+ b += PtrSize - off;
+ n -= PtrSize - off;
+ }
+
+ for(;;) {
+ // Each iteration scans the block b of length n, queueing pointers in
+ // the work buffer.
+ if(Debug > 1)
+ runtime·printf("scanblock %p %D\n", b, n);
+
+ vp = (void**)b;
+ n /= PtrSize;
+ for(i=0; i<n; i++) {
+ obj = (byte*)vp[i];
+
+ // Words outside the arena cannot be pointers.
+ if((byte*)obj < arena_start || (byte*)obj >= runtime·mheap.arena_used)
+ continue;
+
+ // obj may be a pointer to a live object.
+ // Try to find the beginning of the object.
+
+ // Round down to word boundary.
+ obj = (void*)((uintptr)obj & ~((uintptr)PtrSize-1));
+
+ // Find bits for this word.
+ off = (uintptr*)obj - (uintptr*)arena_start;
+ bitp = (uintptr*)arena_start - off/wordsPerBitmapWord - 1;
+ shift = off % wordsPerBitmapWord;
+ xbits = *bitp;
+ bits = xbits >> shift;
+
+ // Pointing at the beginning of a block?
+ if((bits & (bitAllocated|bitBlockBoundary)) != 0)
+ goto found;
+
+ // Pointing just past the beginning?
+ // Scan backward a little to find a block boundary.
+ for(j=shift; j-->0; ) {
+ if(((xbits>>j) & (bitAllocated|bitBlockBoundary)) != 0) {
+ obj = (byte*)obj - (shift-j)*PtrSize;
+ shift = j;
+ bits = xbits>>shift;
+ goto found;
+ }
+ }
+
+ // Otherwise consult span table to find beginning.
+ // (Manually inlined copy of MHeap_LookupMaybe.)
+ nlookup++;
+ naddrlookup++;
+ k = (uintptr)obj>>PageShift;
+ x = k;
+ if(sizeof(void*) == 8)
+ x -= (uintptr)arena_start>>PageShift;
+ s = runtime·mheap.map[x];
+ if(s == nil || k < s->start || k - s->start >= s->npages || s->state != MSpanInUse)
+ continue;
+ p = (byte*)((uintptr)s->start<<PageShift);
+ if(s->sizeclass == 0) {
+ obj = p;
+ } else {
+ if((byte*)obj >= (byte*)s->limit)
+ continue;
+ size = runtime·class_to_size[s->sizeclass];
+ int32 i = ((byte*)obj - p)/size;
+ obj = p+i*size;
+ }
+
+ // Now that we know the object header, reload bits.
+ off = (uintptr*)obj - (uintptr*)arena_start;
+ bitp = (uintptr*)arena_start - off/wordsPerBitmapWord - 1;
+ shift = off % wordsPerBitmapWord;
+ xbits = *bitp;
+ bits = xbits >> shift;
+
+ found:
+ // Now we have bits, bitp, and shift correct for
+ // obj pointing at the base of the object.
+ // If not allocated or already marked, done.
+ if((bits & bitAllocated) == 0 || (bits & bitMarked) != 0)
+ continue;
+ *bitp |= bitMarked<<shift;
+
+ // If object has no pointers, don't need to scan further.
+ if((bits & bitNoPointers) != 0)
+ continue;
+
+ // If buffer is full, get a new one.
+ if(w >= ew) {
+ wbuf = getempty(wbuf);
+ bw = wbuf->w;
+ w = bw;
+ ew = bw + nelem(wbuf->w);
+ }
+ *w++ = obj;
+ }
+
+ // Done scanning [b, b+n). Prepare for the next iteration of
+ // the loop by setting b and n to the parameters for the next block.
+
+ // Fetch b from the work buffers.
+ if(w <= bw) {
+ // Emptied our buffer: refill.
+ wbuf = getfull(wbuf);
+ if(wbuf == nil)
+ break;
+ bw = wbuf->w;
+ ew = wbuf->w + nelem(wbuf->w);
+ w = bw+wbuf->nw;
+ }
+ b = *--w;
+
+ // Figure out n = size of b. Start by loading bits for b.
+ off = (uintptr*)b - (uintptr*)arena_start;
+ bitp = (uintptr*)arena_start - off/wordsPerBitmapWord - 1;
+ shift = off % wordsPerBitmapWord;
+ xbits = *bitp;
+ bits = xbits >> shift;
+
+ // Might be small; look for nearby block boundary.
+ // A block boundary is marked by either bitBlockBoundary
+ // or bitAllocated being set (see notes near their definition).
+ enum {
+ boundary = bitBlockBoundary|bitAllocated
+ };
+ // Look for a block boundary both after and before b
+ // in the same bitmap word.
+ //
+ // A block boundary j words after b is indicated by
+ // bits>>j & boundary
+ // assuming shift+j < bitShift. (If shift+j >= bitShift then
+ // we'll be bleeding other bit types like bitMarked into our test.)
+ // Instead of inserting the conditional shift+j < bitShift into the loop,
+ // we can let j range from 1 to bitShift as long as we first
+ // apply a mask to keep only the bits corresponding
+ // to shift+j < bitShift aka j < bitShift-shift.
+ bits &= (boundary<<(bitShift-shift)) - boundary;
+
+ // A block boundary j words before b is indicated by
+ // xbits>>(shift-j) & boundary
+ // (assuming shift >= j). There is no cleverness here
+ // avoid the test, because when j gets too large the shift
+ // turns negative, which is undefined in C.
+
+ for(j=1; j<bitShift; j++) {
+ if(((bits>>j)&boundary) != 0 || shift>=j && ((xbits>>(shift-j))&boundary) != 0) {
+ n = j*PtrSize;
+ goto scan;
+ }
+ }
+
+ // Fall back to asking span about size class.
+ // (Manually inlined copy of MHeap_Lookup.)
+ nlookup++;
+ nsizelookup++;
+ x = (uintptr)b>>PageShift;
+ if(sizeof(void*) == 8)
+ x -= (uintptr)arena_start>>PageShift;
+ s = runtime·mheap.map[x];
+ if(s->sizeclass == 0)
+ n = s->npages<<PageShift;
+ else
+ n = runtime·class_to_size[s->sizeclass];
+ scan:;
+ }
+}
+
+static struct {
+ Workbuf *full;
+ Workbuf *empty;
+ byte *chunk;
+ uintptr nchunk;
+} work;
+
+// Get an empty work buffer off the work.empty list,
+// allocating new buffers as needed.
+static Workbuf*
+getempty(Workbuf *b)
+{
+ if(b != nil) {
+ b->nw = nelem(b->w);
+ b->next = work.full;
+ work.full = b;
+ }
+ b = work.empty;
+ if(b != nil) {
+ work.empty = b->next;
+ return b;
+ }
+
+ if(work.nchunk < sizeof *b) {
+ work.nchunk = 1<<20;
+ work.chunk = runtime·SysAlloc(work.nchunk);
+ }
+ b = (Workbuf*)work.chunk;
+ work.chunk += sizeof *b;
+ work.nchunk -= sizeof *b;
+ return b;
+}
+
+// Get a full work buffer off the work.full list, or return nil.
+static Workbuf*
+getfull(Workbuf *b)
+{
+ if(b != nil) {
+ b->nw = 0;
+ b->next = work.empty;
+ work.empty = b;
+ }
+ b = work.full;
+ if(b != nil)
+ work.full = b->next;
+ return b;
+}
+
+// Scanstack calls scanblock on each of gp's stack segments.
+static void
+scanstack(G *gp)
+{
+ int32 n;
+ Stktop *stk;
+ byte *sp, *guard;
+
+ stk = (Stktop*)gp->stackbase;
+ guard = gp->stackguard;
+
+ if(gp == g) {
+ // Scanning our own stack: start at &gp.
+ sp = (byte*)&gp;
+ } else {
+ // Scanning another goroutine's stack.
+ // The goroutine is usually asleep (the world is stopped).
+ sp = gp->sched.sp;
+
+ // The exception is that if the goroutine is about to enter or might
+ // have just exited a system call, it may be executing code such
+ // as schedlock and may have needed to start a new stack segment.
+ // Use the stack segment and stack pointer at the time of
+ // the system call instead, since that won't change underfoot.
+ if(gp->gcstack != nil) {
+ stk = (Stktop*)gp->gcstack;
+ sp = gp->gcsp;
+ guard = gp->gcguard;
+ }
+ }
+
+ if(Debug > 1)
+ runtime·printf("scanstack %d %p\n", gp->goid, sp);
+ n = 0;
+ while(stk) {
+ if(sp < guard-StackGuard || (byte*)stk < sp) {
+ runtime·printf("scanstack inconsistent: g%d#%d sp=%p not in [%p,%p]\n", gp->goid, n, sp, guard-StackGuard, stk);
+ runtime·throw("scanstack");
+ }
+ scanblock(sp, (byte*)stk - sp);
+ sp = stk->gobuf.sp;
+ guard = stk->stackguard;
+ stk = (Stktop*)stk->stackbase;
+ n++;
+ }
+}
+
+// Markfin calls scanblock on the blocks that have finalizers:
+// the things pointed at cannot be freed until the finalizers have run.
+static void
+markfin(void *v)
+{
+ uintptr size;
+
+ size = 0;
+ if(!runtime·mlookup(v, &v, &size, nil) || !runtime·blockspecial(v))
+ runtime·throw("mark - finalizer inconsistency");
+
+ // do not mark the finalizer block itself. just mark the things it points at.
+ scanblock(v, size);
+}
+
+// Mark
+static void
+mark(void)
+{
+ G *gp;
+
+ // mark data+bss.
+ // skip runtime·mheap itself, which has no interesting pointers
+ // and is mostly zeroed and would not otherwise be paged in.
+ scanblock(data, (byte*)&runtime·mheap - data);
+ scanblock((byte*)(&runtime·mheap+1), end - (byte*)(&runtime·mheap+1));
+
+ // mark stacks
+ for(gp=runtime·allg; gp!=nil; gp=gp->alllink) {
+ switch(gp->status){
+ default:
+ runtime·printf("unexpected G.status %d\n", gp->status);
+ runtime·throw("mark - bad status");
+ case Gdead:
+ break;
+ case Grunning:
+ if(gp != g)
+ runtime·throw("mark - world not stopped");
+ scanstack(gp);
+ break;
+ case Grunnable:
+ case Gsyscall:
+ case Gwaiting:
+ scanstack(gp);
+ break;
+ }
+ }
+
+ // mark things pointed at by objects with finalizers
+ runtime·walkfintab(markfin);
+}
+
+// Sweep frees or calls finalizers for blocks not marked in the mark phase.
+// It clears the mark bits in preparation for the next GC round.
+static void
+sweep(void)
+{
+ MSpan *s;
+ int32 cl, n, npages;
+ uintptr size;
+ byte *p;
+ MCache *c;
+ Finalizer *f;
+
+ for(s = runtime·mheap.allspans; s != nil; s = s->allnext) {
+ if(s->state != MSpanInUse)
+ continue;
+
+ p = (byte*)(s->start << PageShift);
+ cl = s->sizeclass;
+ if(cl == 0) {
+ size = s->npages<<PageShift;
+ n = 1;
+ } else {
+ // Chunk full of small blocks.
+ size = runtime·class_to_size[cl];
+ npages = runtime·class_to_allocnpages[cl];
+ n = (npages << PageShift) / size;
+ }
+
+ // sweep through n objects of given size starting at p.
+ for(; n > 0; n--, p += size) {
+ uintptr off, *bitp, shift, bits;
+
+ off = (uintptr*)p - (uintptr*)runtime·mheap.arena_start;
+ bitp = (uintptr*)runtime·mheap.arena_start - off/wordsPerBitmapWord - 1;
+ shift = off % wordsPerBitmapWord;
+ bits = *bitp>>shift;
+
+ if((bits & bitAllocated) == 0)
+ continue;
+
+ if((bits & bitMarked) != 0) {
+ *bitp &= ~(bitMarked<<shift);
+ continue;
+ }
+
+ if((bits & bitSpecial) != 0) {
+ // Special means it has a finalizer or is being profiled.
+ f = runtime·getfinalizer(p, 1);
+ if(f != nil) {
+ f->arg = p;
+ f->next = finq;
+ finq = f;
+ continue;
+ }
+ runtime·MProf_Free(p, size);
+ }
+
+ // Mark freed; restore block boundary bit.
+ *bitp = (*bitp & ~(bitMask<<shift)) | (bitBlockBoundary<<shift);
+
+ c = m->mcache;
+ if(s->sizeclass == 0) {
+ // Free large span.
+ runtime·unmarkspan(p, 1<<PageShift);
+ *(uintptr*)p = 1; // needs zeroing
+ runtime·MHeap_Free(&runtime·mheap, s, 1);
+ } else {
+ // Free small object.
+ if(size > sizeof(uintptr))
+ ((uintptr*)p)[1] = 1; // mark as "needs to be zeroed"
+ c->local_by_size[s->sizeclass].nfree++;
+ runtime·MCache_Free(c, p, s->sizeclass, size);
+ }
+ c->local_alloc -= size;
+ c->local_nfree++;
+ }
+ }
+}
+
+// Semaphore, not Lock, so that the goroutine
+// reschedules when there is contention rather
+// than spinning.
+static uint32 gcsema = 1;
+
+// Initialized from $GOGC. GOGC=off means no gc.
+//
+// Next gc is after we've allocated an extra amount of
+// memory proportional to the amount already in use.
+// If gcpercent=100 and we're using 4M, we'll gc again
+// when we get to 8M. This keeps the gc cost in linear
+// proportion to the allocation cost. Adjusting gcpercent
+// just changes the linear constant (and also the amount of
+// extra memory used).
+static int32 gcpercent = -2;
+
+static void
+stealcache(void)
+{
+ M *m;
+
+ for(m=runtime·allm; m; m=m->alllink)
+ runtime·MCache_ReleaseAll(m->mcache);
+}
+
+static void
+cachestats(void)
+{
+ M *m;
+ MCache *c;
+ int32 i;
+ uint64 stacks_inuse;
+ uint64 stacks_sys;
+
+ stacks_inuse = 0;
+ stacks_sys = 0;
+ for(m=runtime·allm; m; m=m->alllink) {
+ runtime·purgecachedstats(m);
+ stacks_inuse += m->stackalloc->inuse;
+ stacks_sys += m->stackalloc->sys;
+ c = m->mcache;
+ for(i=0; i<nelem(c->local_by_size); i++) {
+ mstats.by_size[i].nmalloc += c->local_by_size[i].nmalloc;
+ c->local_by_size[i].nmalloc = 0;
+ mstats.by_size[i].nfree += c->local_by_size[i].nfree;
+ c->local_by_size[i].nfree = 0;
+ }
+ }
+ mstats.stacks_inuse = stacks_inuse;
+ mstats.stacks_sys = stacks_sys;
+}
+
+void
+runtime·gc(int32 force)
+{
+ int64 t0, t1, t2, t3;
+ uint64 heap0, heap1, obj0, obj1;
+ byte *p;
+ Finalizer *fp;
+
+ // The gc is turned off (via enablegc) until
+ // the bootstrap has completed.
+ // Also, malloc gets called in the guts
+ // of a number of libraries that might be
+ // holding locks. To avoid priority inversion
+ // problems, don't bother trying to run gc
+ // while holding a lock. The next mallocgc
+ // without a lock will do the gc instead.
+ if(!mstats.enablegc || m->locks > 0 || runtime·panicking)
+ return;
+
+ if(gcpercent == -2) { // first time through
+ p = runtime·getenv("GOGC");
+ if(p == nil || p[0] == '\0')
+ gcpercent = 100;
+ else if(runtime·strcmp(p, (byte*)"off") == 0)
+ gcpercent = -1;
+ else
+ gcpercent = runtime·atoi(p);
+
+ p = runtime·getenv("GOGCTRACE");
+ if(p != nil)
+ gctrace = runtime·atoi(p);
+ }
+ if(gcpercent < 0)
+ return;
+
+ runtime·semacquire(&gcsema);
+ if(!force && mstats.heap_alloc < mstats.next_gc) {
+ runtime·semrelease(&gcsema);
+ return;
+ }
+
+ t0 = runtime·nanotime();
+ nlookup = 0;
+ nsizelookup = 0;
+ naddrlookup = 0;
+
+ m->gcing = 1;
+ runtime·stoptheworld();
+ if(runtime·mheap.Lock.key != 0)
+ runtime·throw("runtime·mheap locked during gc");
+
+ cachestats();
+ heap0 = mstats.heap_alloc;
+ obj0 = mstats.nmalloc - mstats.nfree;
+
+ mark();
+ t1 = runtime·nanotime();
+ sweep();
+ t2 = runtime·nanotime();
+ stealcache();
+ cachestats();
+
+ mstats.next_gc = mstats.heap_alloc+mstats.heap_alloc*gcpercent/100;
+ m->gcing = 0;
+
+ m->locks++; // disable gc during the mallocs in newproc
+ fp = finq;
+ if(fp != nil) {
+ // kick off or wake up goroutine to run queued finalizers
+ if(fing == nil)
+ fing = runtime·newproc1((byte*)runfinq, nil, 0, 0, runtime·gc);
+ else if(fingwait) {
+ fingwait = 0;
+ runtime·ready(fing);
+ }
+ }
+ m->locks--;
+
+ cachestats();
+ heap1 = mstats.heap_alloc;
+ obj1 = mstats.nmalloc - mstats.nfree;
+
+ t3 = runtime·nanotime();
+ mstats.pause_ns[mstats.numgc%nelem(mstats.pause_ns)] = t3 - t0;
+ mstats.pause_total_ns += t3 - t0;
+ mstats.numgc++;
+ if(mstats.debuggc)
+ runtime·printf("pause %D\n", t3-t0);
+
+ if(gctrace) {
+ runtime·printf("gc%d: %D+%D+%D ms %D -> %D MB %D -> %D (%D-%D) objects %D pointer lookups (%D size, %D addr)\n",
+ mstats.numgc, (t1-t0)/1000000, (t2-t1)/1000000, (t3-t2)/1000000,
+ heap0>>20, heap1>>20, obj0, obj1,
+ mstats.nmalloc, mstats.nfree,
+ nlookup, nsizelookup, naddrlookup);
+ }
+
+ runtime·semrelease(&gcsema);
+ runtime·starttheworld();
+
+ // give the queued finalizers, if any, a chance to run
+ if(fp != nil)
+ runtime·gosched();
+
+ if(gctrace > 1 && !force)
+ runtime·gc(1);
+}
+
+void
+runtime·UpdateMemStats(void)
+{
+ // Have to acquire gcsema to stop the world,
+ // because stoptheworld can only be used by
+ // one goroutine at a time, and there might be
+ // a pending garbage collection already calling it.
+ runtime·semacquire(&gcsema);
+ m->gcing = 1;
+ runtime·stoptheworld();
+ cachestats();
+ m->gcing = 0;
+ runtime·semrelease(&gcsema);
+ runtime·starttheworld();
+}
+
+static void
+runfinq(void)
+{
+ Finalizer *f, *next;
+ byte *frame;
+
+ for(;;) {
+ // There's no need for a lock in this section
+ // because it only conflicts with the garbage
+ // collector, and the garbage collector only
+ // runs when everyone else is stopped, and
+ // runfinq only stops at the gosched() or
+ // during the calls in the for loop.
+ f = finq;
+ finq = nil;
+ if(f == nil) {
+ fingwait = 1;
+ g->status = Gwaiting;
+ runtime·gosched();
+ continue;
+ }
+ for(; f; f=next) {
+ next = f->next;
+ frame = runtime·mal(sizeof(uintptr) + f->nret);
+ *(void**)frame = f->arg;
+ reflect·call((byte*)f->fn, frame, sizeof(uintptr) + f->nret);
+ runtime·free(frame);
+ f->fn = nil;
+ f->arg = nil;
+ f->next = nil;
+ runtime·free(f);
+ }
+ runtime·gc(1); // trigger another gc to clean up the finalized objects, if possible
+ }
+}
+
+// mark the block at v of size n as allocated.
+// If noptr is true, mark it as having no pointers.
+void
+runtime·markallocated(void *v, uintptr n, bool noptr)
+{
+ uintptr *b, obits, bits, off, shift;
+
+ if(0)
+ runtime·printf("markallocated %p+%p\n", v, n);
+
+ if((byte*)v+n > (byte*)runtime·mheap.arena_used || (byte*)v < runtime·mheap.arena_start)
+ runtime·throw("markallocated: bad pointer");
+
+ off = (uintptr*)v - (uintptr*)runtime·mheap.arena_start; // word offset
+ b = (uintptr*)runtime·mheap.arena_start - off/wordsPerBitmapWord - 1;
+ shift = off % wordsPerBitmapWord;
+
+ for(;;) {
+ obits = *b;
+ bits = (obits & ~(bitMask<<shift)) | (bitAllocated<<shift);
+ if(noptr)
+ bits |= bitNoPointers<<shift;
+ if(runtime·singleproc) {
+ *b = bits;
+ break;
+ } else {
+ // more than one goroutine is potentially running: use atomic op
+ if(runtime·casp((void**)b, (void*)obits, (void*)bits))
+ break;
+ }
+ }
+}
+
+// mark the block at v of size n as freed.
+void
+runtime·markfreed(void *v, uintptr n)
+{
+ uintptr *b, obits, bits, off, shift;
+
+ if(0)
+ runtime·printf("markallocated %p+%p\n", v, n);
+
+ if((byte*)v+n > (byte*)runtime·mheap.arena_used || (byte*)v < runtime·mheap.arena_start)
+ runtime·throw("markallocated: bad pointer");
+
+ off = (uintptr*)v - (uintptr*)runtime·mheap.arena_start; // word offset
+ b = (uintptr*)runtime·mheap.arena_start - off/wordsPerBitmapWord - 1;
+ shift = off % wordsPerBitmapWord;
+
+ for(;;) {
+ obits = *b;
+ bits = (obits & ~(bitMask<<shift)) | (bitBlockBoundary<<shift);
+ if(runtime·singleproc) {
+ *b = bits;
+ break;
+ } else {
+ // more than one goroutine is potentially running: use atomic op
+ if(runtime·casp((void**)b, (void*)obits, (void*)bits))
+ break;
+ }
+ }
+}
+
+// check that the block at v of size n is marked freed.
+void
+runtime·checkfreed(void *v, uintptr n)
+{
+ uintptr *b, bits, off, shift;
+
+ if(!runtime·checking)
+ return;
+
+ if((byte*)v+n > (byte*)runtime·mheap.arena_used || (byte*)v < runtime·mheap.arena_start)
+ return; // not allocated, so okay
+
+ off = (uintptr*)v - (uintptr*)runtime·mheap.arena_start; // word offset
+ b = (uintptr*)runtime·mheap.arena_start - off/wordsPerBitmapWord - 1;
+ shift = off % wordsPerBitmapWord;
+
+ bits = *b>>shift;
+ if((bits & bitAllocated) != 0) {
+ runtime·printf("checkfreed %p+%p: off=%p have=%p\n",
+ v, n, off, bits & bitMask);
+ runtime·throw("checkfreed: not freed");
+ }
+}
+
+// mark the span of memory at v as having n blocks of the given size.
+// if leftover is true, there is left over space at the end of the span.
+void
+runtime·markspan(void *v, uintptr size, uintptr n, bool leftover)
+{
+ uintptr *b, off, shift;
+ byte *p;
+
+ if((byte*)v+size*n > (byte*)runtime·mheap.arena_used || (byte*)v < runtime·mheap.arena_start)
+ runtime·throw("markspan: bad pointer");
+
+ p = v;
+ if(leftover) // mark a boundary just past end of last block too
+ n++;
+ for(; n-- > 0; p += size) {
+ // Okay to use non-atomic ops here, because we control
+ // the entire span, and each bitmap word has bits for only
+ // one span, so no other goroutines are changing these
+ // bitmap words.
+ off = (uintptr*)p - (uintptr*)runtime·mheap.arena_start; // word offset
+ b = (uintptr*)runtime·mheap.arena_start - off/wordsPerBitmapWord - 1;
+ shift = off % wordsPerBitmapWord;
+ *b = (*b & ~(bitMask<<shift)) | (bitBlockBoundary<<shift);
+ }
+}
+
+// unmark the span of memory at v of length n bytes.
+void
+runtime·unmarkspan(void *v, uintptr n)
+{
+ uintptr *p, *b, off;
+
+ if((byte*)v+n > (byte*)runtime·mheap.arena_used || (byte*)v < runtime·mheap.arena_start)
+ runtime·throw("markspan: bad pointer");
+
+ p = v;
+ off = p - (uintptr*)runtime·mheap.arena_start; // word offset
+ if(off % wordsPerBitmapWord != 0)
+ runtime·throw("markspan: unaligned pointer");
+ b = (uintptr*)runtime·mheap.arena_start - off/wordsPerBitmapWord - 1;
+ n /= PtrSize;
+ if(n%wordsPerBitmapWord != 0)
+ runtime·throw("unmarkspan: unaligned length");
+ // Okay to use non-atomic ops here, because we control
+ // the entire span, and each bitmap word has bits for only
+ // one span, so no other goroutines are changing these
+ // bitmap words.
+ n /= wordsPerBitmapWord;
+ while(n-- > 0)
+ *b-- = 0;
+}
+
+bool
+runtime·blockspecial(void *v)
+{
+ uintptr *b, off, shift;
+
+ off = (uintptr*)v - (uintptr*)runtime·mheap.arena_start;
+ b = (uintptr*)runtime·mheap.arena_start - off/wordsPerBitmapWord - 1;
+ shift = off % wordsPerBitmapWord;
+
+ return (*b & (bitSpecial<<shift)) != 0;
+}
+
+void
+runtime·setblockspecial(void *v)
+{
+ uintptr *b, off, shift, bits, obits;
+
+ off = (uintptr*)v - (uintptr*)runtime·mheap.arena_start;
+ b = (uintptr*)runtime·mheap.arena_start - off/wordsPerBitmapWord - 1;
+ shift = off % wordsPerBitmapWord;
+
+ for(;;) {
+ obits = *b;
+ bits = obits | (bitSpecial<<shift);
+ if(runtime·singleproc) {
+ *b = bits;
+ break;
+ } else {
+ // more than one goroutine is potentially running: use atomic op
+ if(runtime·casp((void**)b, (void*)obits, (void*)bits))
+ break;
+ }
+ }
+}
+
+void
+runtime·MHeap_MapBits(MHeap *h)
+{
+ // Caller has added extra mappings to the arena.
+ // Add extra mappings of bitmap words as needed.
+ // We allocate extra bitmap pieces in chunks of bitmapChunk.
+ enum {
+ bitmapChunk = 8192
+ };
+ uintptr n;
+
+ n = (h->arena_used - h->arena_start) / wordsPerBitmapWord;
+ n = (n+bitmapChunk-1) & ~(bitmapChunk-1);
+ if(h->bitmap_mapped >= n)
+ return;
+
+ runtime·SysMap(h->arena_start - n, n - h->bitmap_mapped);
+ h->bitmap_mapped = n;
+}
diff --git a/src/pkg/runtime/mheap.c b/src/pkg/runtime/mheap.c
new file mode 100644
index 000000000..37d505681
--- /dev/null
+++ b/src/pkg/runtime/mheap.c
@@ -0,0 +1,374 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Page heap.
+//
+// See malloc.h for overview.
+//
+// When a MSpan is in the heap free list, state == MSpanFree
+// and heapmap(s->start) == span, heapmap(s->start+s->npages-1) == span.
+//
+// When a MSpan is allocated, state == MSpanInUse
+// and heapmap(i) == span for all s->start <= i < s->start+s->npages.
+
+#include "runtime.h"
+#include "malloc.h"
+
+static MSpan *MHeap_AllocLocked(MHeap*, uintptr, int32);
+static bool MHeap_Grow(MHeap*, uintptr);
+static void MHeap_FreeLocked(MHeap*, MSpan*);
+static MSpan *MHeap_AllocLarge(MHeap*, uintptr);
+static MSpan *BestFit(MSpan*, uintptr, MSpan*);
+
+static void
+RecordSpan(void *vh, byte *p)
+{
+ MHeap *h;
+ MSpan *s;
+
+ h = vh;
+ s = (MSpan*)p;
+ s->allnext = h->allspans;
+ h->allspans = s;
+}
+
+// Initialize the heap; fetch memory using alloc.
+void
+runtime·MHeap_Init(MHeap *h, void *(*alloc)(uintptr))
+{
+ uint32 i;
+
+ runtime·FixAlloc_Init(&h->spanalloc, sizeof(MSpan), alloc, RecordSpan, h);
+ runtime·FixAlloc_Init(&h->cachealloc, sizeof(MCache), alloc, nil, nil);
+ // h->mapcache needs no init
+ for(i=0; i<nelem(h->free); i++)
+ runtime·MSpanList_Init(&h->free[i]);
+ runtime·MSpanList_Init(&h->large);
+ for(i=0; i<nelem(h->central); i++)
+ runtime·MCentral_Init(&h->central[i], i);
+}
+
+// Allocate a new span of npage pages from the heap
+// and record its size class in the HeapMap and HeapMapCache.
+MSpan*
+runtime·MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass, int32 acct)
+{
+ MSpan *s;
+
+ runtime·lock(h);
+ runtime·purgecachedstats(m);
+ s = MHeap_AllocLocked(h, npage, sizeclass);
+ if(s != nil) {
+ mstats.heap_inuse += npage<<PageShift;
+ if(acct) {
+ mstats.heap_objects++;
+ mstats.heap_alloc += npage<<PageShift;
+ }
+ }
+ runtime·unlock(h);
+ return s;
+}
+
+static MSpan*
+MHeap_AllocLocked(MHeap *h, uintptr npage, int32 sizeclass)
+{
+ uintptr n;
+ MSpan *s, *t;
+ PageID p;
+
+ // Try in fixed-size lists up to max.
+ for(n=npage; n < nelem(h->free); n++) {
+ if(!runtime·MSpanList_IsEmpty(&h->free[n])) {
+ s = h->free[n].next;
+ goto HaveSpan;
+ }
+ }
+
+ // Best fit in list of large spans.
+ if((s = MHeap_AllocLarge(h, npage)) == nil) {
+ if(!MHeap_Grow(h, npage))
+ return nil;
+ if((s = MHeap_AllocLarge(h, npage)) == nil)
+ return nil;
+ }
+
+HaveSpan:
+ // Mark span in use.
+ if(s->state != MSpanFree)
+ runtime·throw("MHeap_AllocLocked - MSpan not free");
+ if(s->npages < npage)
+ runtime·throw("MHeap_AllocLocked - bad npages");
+ runtime·MSpanList_Remove(s);
+ s->state = MSpanInUse;
+
+ if(s->npages > npage) {
+ // Trim extra and put it back in the heap.
+ t = runtime·FixAlloc_Alloc(&h->spanalloc);
+ mstats.mspan_inuse = h->spanalloc.inuse;
+ mstats.mspan_sys = h->spanalloc.sys;
+ runtime·MSpan_Init(t, s->start + npage, s->npages - npage);
+ s->npages = npage;
+ p = t->start;
+ if(sizeof(void*) == 8)
+ p -= ((uintptr)h->arena_start>>PageShift);
+ if(p > 0)
+ h->map[p-1] = s;
+ h->map[p] = t;
+ h->map[p+t->npages-1] = t;
+ *(uintptr*)(t->start<<PageShift) = *(uintptr*)(s->start<<PageShift); // copy "needs zeroing" mark
+ t->state = MSpanInUse;
+ MHeap_FreeLocked(h, t);
+ }
+
+ if(*(uintptr*)(s->start<<PageShift) != 0)
+ runtime·memclr((byte*)(s->start<<PageShift), s->npages<<PageShift);
+
+ // Record span info, because gc needs to be
+ // able to map interior pointer to containing span.
+ s->sizeclass = sizeclass;
+ p = s->start;
+ if(sizeof(void*) == 8)
+ p -= ((uintptr)h->arena_start>>PageShift);
+ for(n=0; n<npage; n++)
+ h->map[p+n] = s;
+ return s;
+}
+
+// Allocate a span of exactly npage pages from the list of large spans.
+static MSpan*
+MHeap_AllocLarge(MHeap *h, uintptr npage)
+{
+ return BestFit(&h->large, npage, nil);
+}
+
+// Search list for smallest span with >= npage pages.
+// If there are multiple smallest spans, take the one
+// with the earliest starting address.
+static MSpan*
+BestFit(MSpan *list, uintptr npage, MSpan *best)
+{
+ MSpan *s;
+
+ for(s=list->next; s != list; s=s->next) {
+ if(s->npages < npage)
+ continue;
+ if(best == nil
+ || s->npages < best->npages
+ || (s->npages == best->npages && s->start < best->start))
+ best = s;
+ }
+ return best;
+}
+
+// Try to add at least npage pages of memory to the heap,
+// returning whether it worked.
+static bool
+MHeap_Grow(MHeap *h, uintptr npage)
+{
+ uintptr ask;
+ void *v;
+ MSpan *s;
+ PageID p;
+
+ // Ask for a big chunk, to reduce the number of mappings
+ // the operating system needs to track; also amortizes
+ // the overhead of an operating system mapping.
+ // Allocate a multiple of 64kB (16 pages).
+ npage = (npage+15)&~15;
+ ask = npage<<PageShift;
+ if(ask < HeapAllocChunk)
+ ask = HeapAllocChunk;
+
+ v = runtime·MHeap_SysAlloc(h, ask);
+ if(v == nil) {
+ if(ask > (npage<<PageShift)) {
+ ask = npage<<PageShift;
+ v = runtime·MHeap_SysAlloc(h, ask);
+ }
+ if(v == nil) {
+ runtime·printf("runtime: out of memory: cannot allocate %D-byte block (%D in use)\n", (uint64)ask, mstats.heap_sys);
+ return false;
+ }
+ }
+ mstats.heap_sys += ask;
+
+ // Create a fake "in use" span and free it, so that the
+ // right coalescing happens.
+ s = runtime·FixAlloc_Alloc(&h->spanalloc);
+ mstats.mspan_inuse = h->spanalloc.inuse;
+ mstats.mspan_sys = h->spanalloc.sys;
+ runtime·MSpan_Init(s, (uintptr)v>>PageShift, ask>>PageShift);
+ p = s->start;
+ if(sizeof(void*) == 8)
+ p -= ((uintptr)h->arena_start>>PageShift);
+ h->map[p] = s;
+ h->map[p + s->npages - 1] = s;
+ s->state = MSpanInUse;
+ MHeap_FreeLocked(h, s);
+ return true;
+}
+
+// Look up the span at the given address.
+// Address is guaranteed to be in map
+// and is guaranteed to be start or end of span.
+MSpan*
+runtime·MHeap_Lookup(MHeap *h, void *v)
+{
+ uintptr p;
+
+ p = (uintptr)v;
+ if(sizeof(void*) == 8)
+ p -= (uintptr)h->arena_start;
+ return h->map[p >> PageShift];
+}
+
+// Look up the span at the given address.
+// Address is *not* guaranteed to be in map
+// and may be anywhere in the span.
+// Map entries for the middle of a span are only
+// valid for allocated spans. Free spans may have
+// other garbage in their middles, so we have to
+// check for that.
+MSpan*
+runtime·MHeap_LookupMaybe(MHeap *h, void *v)
+{
+ MSpan *s;
+ PageID p, q;
+
+ if((byte*)v < h->arena_start || (byte*)v >= h->arena_used)
+ return nil;
+ p = (uintptr)v>>PageShift;
+ q = p;
+ if(sizeof(void*) == 8)
+ q -= (uintptr)h->arena_start >> PageShift;
+ s = h->map[q];
+ if(s == nil || p < s->start || p - s->start >= s->npages)
+ return nil;
+ if(s->state != MSpanInUse)
+ return nil;
+ return s;
+}
+
+// Free the span back into the heap.
+void
+runtime·MHeap_Free(MHeap *h, MSpan *s, int32 acct)
+{
+ runtime·lock(h);
+ runtime·purgecachedstats(m);
+ mstats.heap_inuse -= s->npages<<PageShift;
+ if(acct) {
+ mstats.heap_alloc -= s->npages<<PageShift;
+ mstats.heap_objects--;
+ }
+ MHeap_FreeLocked(h, s);
+ runtime·unlock(h);
+}
+
+static void
+MHeap_FreeLocked(MHeap *h, MSpan *s)
+{
+ uintptr *sp, *tp;
+ MSpan *t;
+ PageID p;
+
+ if(s->state != MSpanInUse || s->ref != 0) {
+ runtime·printf("MHeap_FreeLocked - span %p ptr %p state %d ref %d\n", s, s->start<<PageShift, s->state, s->ref);
+ runtime·throw("MHeap_FreeLocked - invalid free");
+ }
+ s->state = MSpanFree;
+ runtime·MSpanList_Remove(s);
+ sp = (uintptr*)(s->start<<PageShift);
+
+ // Coalesce with earlier, later spans.
+ p = s->start;
+ if(sizeof(void*) == 8)
+ p -= (uintptr)h->arena_start >> PageShift;
+ if(p > 0 && (t = h->map[p-1]) != nil && t->state != MSpanInUse) {
+ tp = (uintptr*)(t->start<<PageShift);
+ *tp |= *sp; // propagate "needs zeroing" mark
+ s->start = t->start;
+ s->npages += t->npages;
+ p -= t->npages;
+ h->map[p] = s;
+ runtime·MSpanList_Remove(t);
+ t->state = MSpanDead;
+ runtime·FixAlloc_Free(&h->spanalloc, t);
+ mstats.mspan_inuse = h->spanalloc.inuse;
+ mstats.mspan_sys = h->spanalloc.sys;
+ }
+ if(p+s->npages < nelem(h->map) && (t = h->map[p+s->npages]) != nil && t->state != MSpanInUse) {
+ tp = (uintptr*)(t->start<<PageShift);
+ *sp |= *tp; // propagate "needs zeroing" mark
+ s->npages += t->npages;
+ h->map[p + s->npages - 1] = s;
+ runtime·MSpanList_Remove(t);
+ t->state = MSpanDead;
+ runtime·FixAlloc_Free(&h->spanalloc, t);
+ mstats.mspan_inuse = h->spanalloc.inuse;
+ mstats.mspan_sys = h->spanalloc.sys;
+ }
+
+ // Insert s into appropriate list.
+ if(s->npages < nelem(h->free))
+ runtime·MSpanList_Insert(&h->free[s->npages], s);
+ else
+ runtime·MSpanList_Insert(&h->large, s);
+
+ // TODO(rsc): IncrementalScavenge() to return memory to OS.
+}
+
+// Initialize a new span with the given start and npages.
+void
+runtime·MSpan_Init(MSpan *span, PageID start, uintptr npages)
+{
+ span->next = nil;
+ span->prev = nil;
+ span->start = start;
+ span->npages = npages;
+ span->freelist = nil;
+ span->ref = 0;
+ span->sizeclass = 0;
+ span->state = 0;
+}
+
+// Initialize an empty doubly-linked list.
+void
+runtime·MSpanList_Init(MSpan *list)
+{
+ list->state = MSpanListHead;
+ list->next = list;
+ list->prev = list;
+}
+
+void
+runtime·MSpanList_Remove(MSpan *span)
+{
+ if(span->prev == nil && span->next == nil)
+ return;
+ span->prev->next = span->next;
+ span->next->prev = span->prev;
+ span->prev = nil;
+ span->next = nil;
+}
+
+bool
+runtime·MSpanList_IsEmpty(MSpan *list)
+{
+ return list->next == list;
+}
+
+void
+runtime·MSpanList_Insert(MSpan *list, MSpan *span)
+{
+ if(span->next != nil || span->prev != nil) {
+ runtime·printf("failed MSpanList_Insert %p %p %p\n", span, span->next, span->prev);
+ runtime·throw("MSpanList_Insert");
+ }
+ span->next = list->next;
+ span->prev = list;
+ span->next->prev = span;
+ span->prev->next = span;
+}
+
+
diff --git a/src/pkg/runtime/mkasmh.sh b/src/pkg/runtime/mkasmh.sh
new file mode 100755
index 000000000..328e2d5ba
--- /dev/null
+++ b/src/pkg/runtime/mkasmh.sh
@@ -0,0 +1,112 @@
+#!/bin/sh
+# Copyright 2009 The Go Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+set -e
+
+cat <<'EOF'
+// Assembly constants.
+// AUTOMATICALLY GENERATED BY mkasmh.sh DURING BUILD
+
+EOF
+
+case "$GOARCH" in
+386)
+ # The offsets 0 and 4 are also known to:
+ # ../../cmd/8l/pass.c:/D_GS
+ # ../../libcgo/linux_386.c:/^threadentry
+ # ../../libcgo/darwin_386.c:/^threadentry
+ case "$GOOS" in
+ windows)
+ echo '#define get_tls(r) MOVL 0x2c(FS), r'
+ echo '#define g(r) 0(r)'
+ echo '#define m(r) 4(r)'
+ ;;
+ plan9)
+ echo '#define get_tls(r) MOVL _tos(SB), r '
+ echo '#define g(r) -8(r)'
+ echo '#define m(r) -4(r)'
+ ;;
+ linux)
+ # On Linux systems, what we call 0(GS) and 4(GS) for g and m
+ # turn into %gs:-8 and %gs:-4 (using gcc syntax to denote
+ # what the machine sees as opposed to 8l input).
+ # 8l rewrites 0(GS) and 4(GS) into these.
+ #
+ # On Linux Xen, it is not allowed to use %gs:-8 and %gs:-4
+ # directly. Instead, we have to store %gs:0 into a temporary
+ # register and then use -8(%reg) and -4(%reg). This kind
+ # of addressing is correct even when not running Xen.
+ #
+ # 8l can rewrite MOVL 0(GS), CX into the appropriate pair
+ # of mov instructions, using CX as the intermediate register
+ # (safe because CX is about to be written to anyway).
+ # But 8l cannot handle other instructions, like storing into 0(GS),
+ # which is where these macros come into play.
+ # get_tls sets up the temporary and then g and r use it.
+ #
+ # The final wrinkle is that get_tls needs to read from %gs:0,
+ # but in 8l input it's called 8(GS), because 8l is going to
+ # subtract 8 from all the offsets, as described above.
+ echo '#define get_tls(r) MOVL 8(GS), r'
+ echo '#define g(r) -8(r)'
+ echo '#define m(r) -4(r)'
+ ;;
+ *)
+ echo '#define get_tls(r)'
+ echo '#define g(r) 0(GS)'
+ echo '#define m(r) 4(GS)'
+ ;;
+ esac
+ ;;
+amd64)
+ case "$GOOS" in
+ windows)
+ echo '#define get_tls(r) MOVQ 0x58(GS), r'
+ echo '#define g(r) 0(r)'
+ echo '#define m(r) 8(r)'
+ ;;
+ *)
+ # The offsets 0 and 8 are known to:
+ # ../../cmd/6l/pass.c:/D_GS
+ # ../../libcgo/linux_amd64.c:/^threadentry
+ # ../../libcgo/darwin_amd64.c:/^threadentry
+ #
+ echo '#define get_tls(r)'
+ echo '#define g(r) 0(GS)'
+ echo '#define m(r) 8(GS)'
+ ;;
+ esac
+ ;;
+arm)
+ echo '#define g R10'
+ echo '#define m R9'
+ echo '#define LR R14'
+ ;;
+*)
+ echo 'unknown $GOARCH: '$GOARCH 1>&2
+ exit 1
+ ;;
+esac
+echo
+
+awk '
+{ gsub(/\r/, ""); }
+/^aggr G$/ { aggr="g" }
+/^aggr M$/ { aggr = "m" }
+/^aggr Gobuf$/ { aggr = "gobuf" }
+/^}/ { aggr = "" }
+
+# Gobuf 24 sched;
+# 'Y' 48 stack0;
+# 'Y' 56 entry;
+# 'A' G 64 alllink;
+aggr != "" && /^ / {
+ name=$NF;
+ sub(/;/, "", name);
+ offset=$(NF-1);
+ printf("#define %s_%s %s\n", aggr, name, offset);
+}
+' runtime.acid.$GOARCH
+
diff --git a/src/pkg/runtime/mkgodefs.sh b/src/pkg/runtime/mkgodefs.sh
new file mode 100755
index 000000000..b6e97213e
--- /dev/null
+++ b/src/pkg/runtime/mkgodefs.sh
@@ -0,0 +1,39 @@
+#!/bin/sh
+# Copyright 2011 The Go Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+set -e
+
+cat <<EOF
+// Go definitions for C variables and types.
+// AUTOMATICALLY GENERATED BY THE FOLLOWING COMMAND. DO NOT EDIT.
+// CC="$CC" CFLAGS="$CFLAGS" ./mkgodefs.sh $@
+
+package runtime
+import "unsafe"
+var _ unsafe.Pointer
+
+EOF
+
+for i in "$@"; do
+ $CC $CFLAGS -q $i
+done | awk '
+/^func/ { next }
+/^const/ { next }
+/^\/\/.*type/ { next }
+
+/^(const|func|type|var) / {
+ if(seen[$2]++) {
+ skip = /{[^}]*$/;
+ next;
+ }
+}
+
+skip {
+ skip = !/^}/
+ next;
+}
+
+{print}
+'
diff --git a/src/pkg/runtime/mkversion.c b/src/pkg/runtime/mkversion.c
new file mode 100644
index 000000000..0d96aa356
--- /dev/null
+++ b/src/pkg/runtime/mkversion.c
@@ -0,0 +1,15 @@
+#include <u.h>
+#include <libc.h>
+
+char *template =
+ "// generated by mkversion.c; do not edit.\n"
+ "package runtime\n"
+ "const defaultGoroot = `%s`\n"
+ "const theVersion = \"%s\"\n";
+
+void
+main(void)
+{
+ print(template, getgoroot(), getgoversion());
+ exits(0);
+}
diff --git a/src/pkg/runtime/mprof.goc b/src/pkg/runtime/mprof.goc
new file mode 100644
index 000000000..517f96a31
--- /dev/null
+++ b/src/pkg/runtime/mprof.goc
@@ -0,0 +1,274 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Malloc profiling.
+// Patterned after tcmalloc's algorithms; shorter code.
+
+package runtime
+#include "runtime.h"
+#include "malloc.h"
+#include "defs.h"
+#include "type.h"
+
+// NOTE(rsc): Everything here could use cas if contention became an issue.
+static Lock proflock;
+
+// Per-call-stack allocation information.
+// Lookup by hashing call stack into a linked-list hash table.
+typedef struct Bucket Bucket;
+struct Bucket
+{
+ Bucket *next; // next in hash list
+ Bucket *allnext; // next in list of all buckets
+ uintptr allocs;
+ uintptr frees;
+ uintptr alloc_bytes;
+ uintptr free_bytes;
+ uintptr hash;
+ uintptr nstk;
+ uintptr stk[1];
+};
+enum {
+ BuckHashSize = 179999,
+};
+static Bucket **buckhash;
+static Bucket *buckets;
+static uintptr bucketmem;
+
+// Return the bucket for stk[0:nstk], allocating new bucket if needed.
+static Bucket*
+stkbucket(uintptr *stk, int32 nstk)
+{
+ int32 i;
+ uintptr h;
+ Bucket *b;
+
+ if(buckhash == nil) {
+ buckhash = runtime·SysAlloc(BuckHashSize*sizeof buckhash[0]);
+ mstats.buckhash_sys += BuckHashSize*sizeof buckhash[0];
+ }
+
+ // Hash stack.
+ h = 0;
+ for(i=0; i<nstk; i++) {
+ h += stk[i];
+ h += h<<10;
+ h ^= h>>6;
+ }
+ h += h<<3;
+ h ^= h>>11;
+
+ i = h%BuckHashSize;
+ for(b = buckhash[i]; b; b=b->next)
+ if(b->hash == h && b->nstk == nstk &&
+ runtime·mcmp((byte*)b->stk, (byte*)stk, nstk*sizeof stk[0]) == 0)
+ return b;
+
+ b = runtime·mallocgc(sizeof *b + nstk*sizeof stk[0], FlagNoProfiling, 0, 1);
+ bucketmem += sizeof *b + nstk*sizeof stk[0];
+ runtime·memmove(b->stk, stk, nstk*sizeof stk[0]);
+ b->hash = h;
+ b->nstk = nstk;
+ b->next = buckhash[i];
+ buckhash[i] = b;
+ b->allnext = buckets;
+ buckets = b;
+ return b;
+}
+
+// Map from pointer to Bucket* that allocated it.
+// Three levels:
+// Linked-list hash table for top N-20 bits.
+// Array index for next 13 bits.
+// Linked list for next 7 bits.
+// This is more efficient than using a general map,
+// because of the typical clustering of the pointer keys.
+
+typedef struct AddrHash AddrHash;
+typedef struct AddrEntry AddrEntry;
+
+struct AddrHash
+{
+ AddrHash *next; // next in top-level hash table linked list
+ uintptr addr; // addr>>20
+ AddrEntry *dense[1<<13];
+};
+
+struct AddrEntry
+{
+ AddrEntry *next; // next in bottom-level linked list
+ uint32 addr;
+ Bucket *b;
+};
+
+enum {
+ AddrHashBits = 12 // 1MB per entry, so good for 4GB of used address space
+};
+static AddrHash *addrhash[1<<AddrHashBits];
+static AddrEntry *addrfree;
+static uintptr addrmem;
+
+// Multiplicative hash function:
+// hashMultiplier is the bottom 32 bits of int((sqrt(5)-1)/2 * (1<<32)).
+// This is a good multiplier as suggested in CLR, Knuth. The hash
+// value is taken to be the top AddrHashBits bits of the bottom 32 bits
+// of the multiplied value.
+enum {
+ HashMultiplier = 2654435769U
+};
+
+// Set the bucket associated with addr to b.
+static void
+setaddrbucket(uintptr addr, Bucket *b)
+{
+ int32 i;
+ uint32 h;
+ AddrHash *ah;
+ AddrEntry *e;
+
+ h = (uint32)((addr>>20)*HashMultiplier) >> (32-AddrHashBits);
+ for(ah=addrhash[h]; ah; ah=ah->next)
+ if(ah->addr == (addr>>20))
+ goto found;
+
+ ah = runtime·mallocgc(sizeof *ah, FlagNoProfiling, 0, 1);
+ addrmem += sizeof *ah;
+ ah->next = addrhash[h];
+ ah->addr = addr>>20;
+ addrhash[h] = ah;
+
+found:
+ if((e = addrfree) == nil) {
+ e = runtime·mallocgc(64*sizeof *e, FlagNoProfiling, 0, 0);
+ addrmem += 64*sizeof *e;
+ for(i=0; i+1<64; i++)
+ e[i].next = &e[i+1];
+ e[63].next = nil;
+ }
+ addrfree = e->next;
+ e->addr = (uint32)~(addr & ((1<<20)-1));
+ e->b = b;
+ h = (addr>>7)&(nelem(ah->dense)-1); // entry in dense is top 13 bits of low 20.
+ e->next = ah->dense[h];
+ ah->dense[h] = e;
+}
+
+// Get the bucket associated with addr and clear the association.
+static Bucket*
+getaddrbucket(uintptr addr)
+{
+ uint32 h;
+ AddrHash *ah;
+ AddrEntry *e, **l;
+ Bucket *b;
+
+ h = (uint32)((addr>>20)*HashMultiplier) >> (32-AddrHashBits);
+ for(ah=addrhash[h]; ah; ah=ah->next)
+ if(ah->addr == (addr>>20))
+ goto found;
+ return nil;
+
+found:
+ h = (addr>>7)&(nelem(ah->dense)-1); // entry in dense is top 13 bits of low 20.
+ for(l=&ah->dense[h]; (e=*l) != nil; l=&e->next) {
+ if(e->addr == (uint32)~(addr & ((1<<20)-1))) {
+ *l = e->next;
+ b = e->b;
+ e->next = addrfree;
+ addrfree = e;
+ return b;
+ }
+ }
+ return nil;
+}
+
+// Called by malloc to record a profiled block.
+void
+runtime·MProf_Malloc(void *p, uintptr size)
+{
+ int32 nstk;
+ uintptr stk[32];
+ Bucket *b;
+
+ if(m->nomemprof > 0)
+ return;
+
+ m->nomemprof++;
+ nstk = runtime·callers(1, stk, 32);
+ runtime·lock(&proflock);
+ b = stkbucket(stk, nstk);
+ b->allocs++;
+ b->alloc_bytes += size;
+ setaddrbucket((uintptr)p, b);
+ runtime·unlock(&proflock);
+ m->nomemprof--;
+}
+
+// Called when freeing a profiled block.
+void
+runtime·MProf_Free(void *p, uintptr size)
+{
+ Bucket *b;
+
+ if(m->nomemprof > 0)
+ return;
+
+ m->nomemprof++;
+ runtime·lock(&proflock);
+ b = getaddrbucket((uintptr)p);
+ if(b != nil) {
+ b->frees++;
+ b->free_bytes += size;
+ }
+ runtime·unlock(&proflock);
+ m->nomemprof--;
+}
+
+
+// Go interface to profile data. (Declared in extern.go)
+// Assumes Go sizeof(int) == sizeof(int32)
+
+// Must match MemProfileRecord in extern.go.
+typedef struct Record Record;
+struct Record {
+ int64 alloc_bytes, free_bytes;
+ int64 alloc_objects, free_objects;
+ uintptr stk[32];
+};
+
+// Write b's data to r.
+static void
+record(Record *r, Bucket *b)
+{
+ int32 i;
+
+ r->alloc_bytes = b->alloc_bytes;
+ r->free_bytes = b->free_bytes;
+ r->alloc_objects = b->allocs;
+ r->free_objects = b->frees;
+ for(i=0; i<b->nstk && i<nelem(r->stk); i++)
+ r->stk[i] = b->stk[i];
+ for(; i<nelem(r->stk); i++)
+ r->stk[i] = 0;
+}
+
+func MemProfile(p Slice, include_inuse_zero bool) (n int32, ok bool) {
+ Bucket *b;
+ Record *r;
+
+ runtime·lock(&proflock);
+ n = 0;
+ for(b=buckets; b; b=b->allnext)
+ if(include_inuse_zero || b->alloc_bytes != b->free_bytes)
+ n++;
+ ok = false;
+ if(n <= p.len) {
+ ok = true;
+ r = (Record*)p.array;
+ for(b=buckets; b; b=b->allnext)
+ if(include_inuse_zero || b->alloc_bytes != b->free_bytes)
+ record(r++, b);
+ }
+ runtime·unlock(&proflock);
+}
diff --git a/src/pkg/runtime/msize.c b/src/pkg/runtime/msize.c
new file mode 100644
index 000000000..770ef38ce
--- /dev/null
+++ b/src/pkg/runtime/msize.c
@@ -0,0 +1,168 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Malloc small size classes.
+//
+// See malloc.h for overview.
+//
+// The size classes are chosen so that rounding an allocation
+// request up to the next size class wastes at most 12.5% (1.125x).
+//
+// Each size class has its own page count that gets allocated
+// and chopped up when new objects of the size class are needed.
+// That page count is chosen so that chopping up the run of
+// pages into objects of the given size wastes at most 12.5% (1.125x)
+// of the memory. It is not necessary that the cutoff here be
+// the same as above.
+//
+// The two sources of waste multiply, so the worst possible case
+// for the above constraints would be that allocations of some
+// size might have a 26.6% (1.266x) overhead.
+// In practice, only one of the wastes comes into play for a
+// given size (sizes < 512 waste mainly on the round-up,
+// sizes > 512 waste mainly on the page chopping).
+//
+// TODO(rsc): Compute max waste for any given size.
+
+#include "runtime.h"
+#include "malloc.h"
+
+int32 runtime·class_to_size[NumSizeClasses];
+int32 runtime·class_to_allocnpages[NumSizeClasses];
+int32 runtime·class_to_transfercount[NumSizeClasses];
+
+// The SizeToClass lookup is implemented using two arrays,
+// one mapping sizes <= 1024 to their class and one mapping
+// sizes >= 1024 and <= MaxSmallSize to their class.
+// All objects are 8-aligned, so the first array is indexed by
+// the size divided by 8 (rounded up). Objects >= 1024 bytes
+// are 128-aligned, so the second array is indexed by the
+// size divided by 128 (rounded up). The arrays are filled in
+// by InitSizes.
+
+static int32 size_to_class8[1024/8 + 1];
+static int32 size_to_class128[(MaxSmallSize-1024)/128 + 1];
+
+int32
+runtime·SizeToClass(int32 size)
+{
+ if(size > MaxSmallSize)
+ runtime·throw("SizeToClass - invalid size");
+ if(size > 1024-8)
+ return size_to_class128[(size-1024+127) >> 7];
+ return size_to_class8[(size+7)>>3];
+}
+
+void
+runtime·InitSizes(void)
+{
+ int32 align, sizeclass, size, nextsize, n;
+ uint32 i;
+ uintptr allocsize, npages;
+
+ // Initialize the runtime·class_to_size table (and choose class sizes in the process).
+ runtime·class_to_size[0] = 0;
+ sizeclass = 1; // 0 means no class
+ align = 8;
+ for(size = align; size <= MaxSmallSize; size += align) {
+ if((size&(size-1)) == 0) { // bump alignment once in a while
+ if(size >= 2048)
+ align = 256;
+ else if(size >= 128)
+ align = size / 8;
+ else if(size >= 16)
+ align = 16; // required for x86 SSE instructions, if we want to use them
+ }
+ if((align&(align-1)) != 0)
+ runtime·throw("InitSizes - bug");
+
+ // Make the allocnpages big enough that
+ // the leftover is less than 1/8 of the total,
+ // so wasted space is at most 12.5%.
+ allocsize = PageSize;
+ while(allocsize%size > allocsize/8)
+ allocsize += PageSize;
+ npages = allocsize >> PageShift;
+
+ // If the previous sizeclass chose the same
+ // allocation size and fit the same number of
+ // objects into the page, we might as well
+ // use just this size instead of having two
+ // different sizes.
+ if(sizeclass > 1
+ && npages == runtime·class_to_allocnpages[sizeclass-1]
+ && allocsize/size == allocsize/runtime·class_to_size[sizeclass-1]) {
+ runtime·class_to_size[sizeclass-1] = size;
+ continue;
+ }
+
+ runtime·class_to_allocnpages[sizeclass] = npages;
+ runtime·class_to_size[sizeclass] = size;
+ sizeclass++;
+ }
+ if(sizeclass != NumSizeClasses) {
+ runtime·printf("sizeclass=%d NumSizeClasses=%d\n", sizeclass, NumSizeClasses);
+ runtime·throw("InitSizes - bad NumSizeClasses");
+ }
+
+ // Initialize the size_to_class tables.
+ nextsize = 0;
+ for (sizeclass = 1; sizeclass < NumSizeClasses; sizeclass++) {
+ for(; nextsize < 1024 && nextsize <= runtime·class_to_size[sizeclass]; nextsize+=8)
+ size_to_class8[nextsize/8] = sizeclass;
+ if(nextsize >= 1024)
+ for(; nextsize <= runtime·class_to_size[sizeclass]; nextsize += 128)
+ size_to_class128[(nextsize-1024)/128] = sizeclass;
+ }
+
+ // Double-check SizeToClass.
+ if(0) {
+ for(n=0; n < MaxSmallSize; n++) {
+ sizeclass = runtime·SizeToClass(n);
+ if(sizeclass < 1 || sizeclass >= NumSizeClasses || runtime·class_to_size[sizeclass] < n) {
+ runtime·printf("size=%d sizeclass=%d runtime·class_to_size=%d\n", n, sizeclass, runtime·class_to_size[sizeclass]);
+ runtime·printf("incorrect SizeToClass");
+ goto dump;
+ }
+ if(sizeclass > 1 && runtime·class_to_size[sizeclass-1] >= n) {
+ runtime·printf("size=%d sizeclass=%d runtime·class_to_size=%d\n", n, sizeclass, runtime·class_to_size[sizeclass]);
+ runtime·printf("SizeToClass too big");
+ goto dump;
+ }
+ }
+ }
+
+ // Copy out for statistics table.
+ for(i=0; i<nelem(runtime·class_to_size); i++)
+ mstats.by_size[i].size = runtime·class_to_size[i];
+
+ // Initialize the runtime·class_to_transfercount table.
+ for(sizeclass = 1; sizeclass < NumSizeClasses; sizeclass++) {
+ n = 64*1024 / runtime·class_to_size[sizeclass];
+ if(n < 2)
+ n = 2;
+ if(n > 32)
+ n = 32;
+ runtime·class_to_transfercount[sizeclass] = n;
+ }
+ return;
+
+dump:
+ if(1){
+ runtime·printf("NumSizeClasses=%d\n", NumSizeClasses);
+ runtime·printf("runtime·class_to_size:");
+ for(sizeclass=0; sizeclass<NumSizeClasses; sizeclass++)
+ runtime·printf(" %d", runtime·class_to_size[sizeclass]);
+ runtime·printf("\n\n");
+ runtime·printf("size_to_class8:");
+ for(i=0; i<nelem(size_to_class8); i++)
+ runtime·printf(" %d=>%d(%d)\n", i*8, size_to_class8[i], runtime·class_to_size[size_to_class8[i]]);
+ runtime·printf("\n");
+ runtime·printf("size_to_class128:");
+ for(i=0; i<nelem(size_to_class128); i++)
+ runtime·printf(" %d=>%d(%d)\n", i*128, size_to_class128[i], runtime·class_to_size[size_to_class128[i]]);
+ runtime·printf("\n");
+ }
+ runtime·throw("InitSizes failed");
+}
diff --git a/src/pkg/runtime/openbsd/amd64/defs.h b/src/pkg/runtime/openbsd/amd64/defs.h
new file mode 100644
index 000000000..4eb5cd205
--- /dev/null
+++ b/src/pkg/runtime/openbsd/amd64/defs.h
@@ -0,0 +1,149 @@
+// godefs -f -m64 defs.c
+
+// MACHINE GENERATED - DO NOT EDIT.
+
+// Constants
+enum {
+ PROT_NONE = 0,
+ PROT_READ = 0x1,
+ PROT_WRITE = 0x2,
+ PROT_EXEC = 0x4,
+ MAP_ANON = 0x1000,
+ MAP_PRIVATE = 0x2,
+ MAP_FIXED = 0x10,
+ SA_SIGINFO = 0x40,
+ SA_RESTART = 0x2,
+ SA_ONSTACK = 0x1,
+ EINTR = 0x4,
+ SIGHUP = 0x1,
+ SIGINT = 0x2,
+ SIGQUIT = 0x3,
+ SIGILL = 0x4,
+ SIGTRAP = 0x5,
+ SIGABRT = 0x6,
+ SIGEMT = 0x7,
+ SIGFPE = 0x8,
+ SIGKILL = 0x9,
+ SIGBUS = 0xa,
+ SIGSEGV = 0xb,
+ SIGSYS = 0xc,
+ SIGPIPE = 0xd,
+ SIGALRM = 0xe,
+ SIGTERM = 0xf,
+ SIGURG = 0x10,
+ SIGSTOP = 0x11,
+ SIGTSTP = 0x12,
+ SIGCONT = 0x13,
+ SIGCHLD = 0x14,
+ SIGTTIN = 0x15,
+ SIGTTOU = 0x16,
+ SIGIO = 0x17,
+ SIGXCPU = 0x18,
+ SIGXFSZ = 0x19,
+ SIGVTALRM = 0x1a,
+ SIGPROF = 0x1b,
+ SIGWINCH = 0x1c,
+ SIGINFO = 0x1d,
+ SIGUSR1 = 0x1e,
+ SIGUSR2 = 0x1f,
+ FPE_INTDIV = 0x1,
+ FPE_INTOVF = 0x2,
+ FPE_FLTDIV = 0x3,
+ FPE_FLTOVF = 0x4,
+ FPE_FLTUND = 0x5,
+ FPE_FLTRES = 0x6,
+ FPE_FLTINV = 0x7,
+ FPE_FLTSUB = 0x8,
+ BUS_ADRALN = 0x1,
+ BUS_ADRERR = 0x2,
+ BUS_OBJERR = 0x3,
+ SEGV_MAPERR = 0x1,
+ SEGV_ACCERR = 0x2,
+ ITIMER_REAL = 0,
+ ITIMER_VIRTUAL = 0x1,
+ ITIMER_PROF = 0x2,
+};
+
+// Types
+#pragma pack on
+
+typedef struct Sigaltstack Sigaltstack;
+struct Sigaltstack {
+ void *ss_sp;
+ uint64 ss_size;
+ int32 ss_flags;
+ byte pad_godefs_0[4];
+};
+
+typedef uint32 Sigset;
+
+typedef struct Siginfo Siginfo;
+struct Siginfo {
+ int32 si_signo;
+ int32 si_code;
+ int32 si_errno;
+ byte pad_godefs_0[4];
+ byte _data[120];
+};
+
+typedef union Sigval Sigval;
+union Sigval {
+ int32 sival_int;
+ void *sival_ptr;
+};
+
+typedef struct StackT StackT;
+struct StackT {
+ void *ss_sp;
+ uint64 ss_size;
+ int32 ss_flags;
+ byte pad_godefs_0[4];
+};
+
+typedef struct Timeval Timeval;
+struct Timeval {
+ int64 tv_sec;
+ int64 tv_usec;
+};
+
+typedef struct Itimerval Itimerval;
+struct Itimerval {
+ Timeval it_interval;
+ Timeval it_value;
+};
+
+typedef void sfxsave64;
+
+typedef struct Sigcontext Sigcontext;
+struct Sigcontext {
+ int64 sc_rdi;
+ int64 sc_rsi;
+ int64 sc_rdx;
+ int64 sc_rcx;
+ int64 sc_r8;
+ int64 sc_r9;
+ int64 sc_r10;
+ int64 sc_r11;
+ int64 sc_r12;
+ int64 sc_r13;
+ int64 sc_r14;
+ int64 sc_r15;
+ int64 sc_rbp;
+ int64 sc_rbx;
+ int64 sc_rax;
+ int64 sc_gs;
+ int64 sc_fs;
+ int64 sc_es;
+ int64 sc_ds;
+ int64 sc_trapno;
+ int64 sc_err;
+ int64 sc_rip;
+ int64 sc_cs;
+ int64 sc_rflags;
+ int64 sc_rsp;
+ int64 sc_ss;
+ sfxsave64 *sc_fpstate;
+ int32 sc_onstack;
+ int32 sc_mask;
+};
+#pragma pack off
diff --git a/src/pkg/runtime/openbsd/amd64/rt0.s b/src/pkg/runtime/openbsd/amd64/rt0.s
new file mode 100644
index 000000000..e7fce5969
--- /dev/null
+++ b/src/pkg/runtime/openbsd/amd64/rt0.s
@@ -0,0 +1,8 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+TEXT _rt0_amd64_openbsd(SB),7,$-8
+ MOVQ $_rt0_amd64(SB), DX
+ MOVQ SP, DI
+ JMP DX
diff --git a/src/pkg/runtime/openbsd/amd64/signal.c b/src/pkg/runtime/openbsd/amd64/signal.c
new file mode 100644
index 000000000..01bc76d20
--- /dev/null
+++ b/src/pkg/runtime/openbsd/amd64/signal.c
@@ -0,0 +1,199 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "runtime.h"
+#include "defs.h"
+#include "signals.h"
+#include "os.h"
+
+extern void runtime·sigtramp(void);
+
+typedef struct sigaction {
+ union {
+ void (*__sa_handler)(int32);
+ void (*__sa_sigaction)(int32, Siginfo*, void *);
+ } __sigaction_u; /* signal handler */
+ uint32 sa_mask; /* signal mask to apply */
+ int32 sa_flags; /* see signal options below */
+} Sigaction;
+
+void
+runtime·dumpregs(Sigcontext *r)
+{
+ runtime·printf("rax %X\n", r->sc_rax);
+ runtime·printf("rbx %X\n", r->sc_rbx);
+ runtime·printf("rcx %X\n", r->sc_rcx);
+ runtime·printf("rdx %X\n", r->sc_rdx);
+ runtime·printf("rdi %X\n", r->sc_rdi);
+ runtime·printf("rsi %X\n", r->sc_rsi);
+ runtime·printf("rbp %X\n", r->sc_rbp);
+ runtime·printf("rsp %X\n", r->sc_rsp);
+ runtime·printf("r8 %X\n", r->sc_r8);
+ runtime·printf("r9 %X\n", r->sc_r9);
+ runtime·printf("r10 %X\n", r->sc_r10);
+ runtime·printf("r11 %X\n", r->sc_r11);
+ runtime·printf("r12 %X\n", r->sc_r12);
+ runtime·printf("r13 %X\n", r->sc_r13);
+ runtime·printf("r14 %X\n", r->sc_r14);
+ runtime·printf("r15 %X\n", r->sc_r15);
+ runtime·printf("rip %X\n", r->sc_rip);
+ runtime·printf("rflags %X\n", r->sc_rflags);
+ runtime·printf("cs %X\n", r->sc_cs);
+ runtime·printf("fs %X\n", r->sc_fs);
+ runtime·printf("gs %X\n", r->sc_gs);
+}
+
+String
+runtime·signame(int32 sig)
+{
+ if(sig < 0 || sig >= NSIG)
+ return runtime·emptystring;
+ return runtime·gostringnocopy((byte*)runtime·sigtab[sig].name);
+}
+
+void
+runtime·sighandler(int32 sig, Siginfo *info, void *context, G *gp)
+{
+ Sigcontext *r = context;
+ uintptr *sp;
+
+ if(sig == SIGPROF) {
+ runtime·sigprof((uint8*)r->sc_rip,
+ (uint8*)r->sc_rsp, nil, gp);
+ return;
+ }
+
+ if(gp != nil && (runtime·sigtab[sig].flags & SigPanic)) {
+ // Make it look like a call to the signal func.
+ // Have to pass arguments out of band since
+ // augmenting the stack frame would break
+ // the unwinding code.
+ gp->sig = sig;
+ gp->sigcode0 = info->si_code;
+ gp->sigcode1 = *(uintptr*)((byte*)info + 16); /* si_addr */
+ gp->sigpc = r->sc_rip;
+
+ // Only push runtime·sigpanic if r->mc_rip != 0.
+ // If r->mc_rip == 0, probably panicked because of a
+ // call to a nil func. Not pushing that onto sp will
+ // make the trace look like a call to runtime·sigpanic instead.
+ // (Otherwise the trace will end at runtime·sigpanic and we
+ // won't get to see who faulted.)
+ if(r->sc_rip != 0) {
+ sp = (uintptr*)r->sc_rsp;
+ *--sp = r->sc_rip;
+ r->sc_rsp = (uintptr)sp;
+ }
+ r->sc_rip = (uintptr)runtime·sigpanic;
+ return;
+ }
+
+ if(runtime·sigtab[sig].flags & SigQueue) {
+ if(runtime·sigsend(sig)
+ || (runtime·sigtab[sig].flags & SigIgnore))
+ return;
+ runtime·exit(2); // SIGINT, SIGTERM, etc
+ }
+
+ if(runtime·panicking) // traceback already printed
+ runtime·exit(2);
+ runtime·panicking = 1;
+
+ if(sig < 0 || sig >= NSIG)
+ runtime·printf("Signal %d\n", sig);
+ else
+ runtime·printf("%s\n", runtime·sigtab[sig].name);
+
+ runtime·printf("PC=%X\n", r->sc_rip);
+ runtime·printf("\n");
+
+ if(runtime·gotraceback()){
+ runtime·traceback((void*)r->sc_rip, (void*)r->sc_rsp, 0, gp);
+ runtime·tracebackothers(gp);
+ runtime·dumpregs(r);
+ }
+
+ runtime·exit(2);
+}
+
+// Called from kernel on signal stack, so no stack split.
+#pragma textflag 7
+void
+runtime·sigignore(void)
+{
+}
+
+void
+runtime·signalstack(byte *p, int32 n)
+{
+ Sigaltstack st;
+
+ st.ss_sp = (int8*)p;
+ st.ss_size = n;
+ st.ss_flags = 0;
+ runtime·sigaltstack(&st, nil);
+}
+
+static void
+sigaction(int32 i, void (*fn)(int32, Siginfo*, void*, G*), bool restart)
+{
+ Sigaction sa;
+
+ runtime·memclr((byte*)&sa, sizeof sa);
+ sa.sa_flags = SA_SIGINFO|SA_ONSTACK;
+ if(restart)
+ sa.sa_flags |= SA_RESTART;
+ sa.sa_mask = ~0ULL;
+ if (fn == runtime·sighandler)
+ fn = (void*)runtime·sigtramp;
+ sa.__sigaction_u.__sa_sigaction = (void*)fn;
+ runtime·sigaction(i, &sa, nil);
+}
+
+void
+runtime·initsig(int32 queue)
+{
+ int32 i;
+ void *fn;
+
+ runtime·siginit();
+
+ for(i = 0; i<NSIG; i++) {
+ if(runtime·sigtab[i].flags) {
+ if((runtime·sigtab[i].flags & SigQueue) != queue)
+ continue;
+ if(runtime·sigtab[i].flags & (SigCatch | SigQueue))
+ fn = runtime·sighandler;
+ else
+ fn = runtime·sigignore;
+ sigaction(i, fn, (runtime·sigtab[i].flags & SigRestart) != 0);
+ }
+ }
+}
+
+void
+runtime·resetcpuprofiler(int32 hz)
+{
+ Itimerval it;
+
+ runtime·memclr((byte*)&it, sizeof it);
+ if(hz == 0) {
+ runtime·setitimer(ITIMER_PROF, &it, nil);
+ sigaction(SIGPROF, SIG_IGN, true);
+ } else {
+ sigaction(SIGPROF, runtime·sighandler, true);
+ it.it_interval.tv_sec = 0;
+ it.it_interval.tv_usec = 1000000 / hz;
+ it.it_value = it.it_interval;
+ runtime·setitimer(ITIMER_PROF, &it, nil);
+ }
+ m->profilehz = hz;
+}
+
+void
+os·sigpipe(void)
+{
+ sigaction(SIGPIPE, SIG_DFL, false);
+ runtime·raisesigpipe();
+}
diff --git a/src/pkg/runtime/openbsd/amd64/sys.s b/src/pkg/runtime/openbsd/amd64/sys.s
new file mode 100644
index 000000000..2a238dffb
--- /dev/null
+++ b/src/pkg/runtime/openbsd/amd64/sys.s
@@ -0,0 +1,221 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+//
+// System calls and other sys.stuff for AMD64, OpenBSD
+// /usr/src/sys/kern/syscalls.master for syscall numbers.
+//
+
+#include "amd64/asm.h"
+
+// int64 rfork_thread(int32 flags, void *stack, M *m, G *g, void (*fn)(void));
+TEXT runtime·rfork_thread(SB),7,$0
+ MOVL flags+8(SP), DI
+ MOVQ stack+16(SP), SI
+
+ // Copy m, g, fn off parent stack for use by child.
+ MOVQ mm+24(SP), R8
+ MOVQ gg+32(SP), R9
+ MOVQ fn+40(SP), R12
+
+ MOVL $251, AX // sys_rfork
+ SYSCALL
+
+ // Return if rfork syscall failed
+ JCC 3(PC)
+ NEGL AX
+ RET
+
+ // In parent, return.
+ CMPL AX, $0
+ JEQ 2(PC)
+ RET
+
+ // In child, on new stack.
+ MOVQ SI, SP
+
+ // Initialize m->procid to thread ID
+ MOVL $299, AX // sys_getthrid
+ SYSCALL
+ MOVQ AX, m_procid(R8)
+
+ // Set FS to point at m->tls.
+ LEAQ m_tls(R8), DI
+ CALL runtime·settls(SB)
+
+ // In child, set up new stack
+ get_tls(CX)
+ MOVQ R8, m(CX)
+ MOVQ R9, g(CX)
+ CALL runtime·stackcheck(SB)
+
+ // Call fn
+ CALL R12
+
+ // It shouldn't return. If it does, exit
+ MOVL $302, AX // sys_threxit
+ SYSCALL
+ JMP -3(PC) // keep exiting
+
+TEXT runtime·sys_sched_yield(SB),7,$0
+ MOVL $298, AX
+ SYSCALL
+ RET
+
+TEXT runtime·sys_thrsleep(SB),7,$0
+ MOVQ 8(SP), DI
+ MOVL 16(SP), SI
+ MOVQ 24(SP), DX
+ MOVQ 32(SP), R10
+ MOVL $300, AX
+ SYSCALL
+ RET
+
+TEXT runtime·sys_thrwakeup(SB),7,$0
+ MOVQ 8(SP), DI
+ MOVL 16(SP), SI
+ MOVL $301, AX
+ SYSCALL
+ RET
+
+// Exit the entire program (like C exit)
+TEXT runtime·exit(SB),7,$-8
+ MOVL 8(SP), DI // arg 1 - exit status
+ MOVL $1, AX // sys_exit
+ SYSCALL
+ CALL runtime·notok(SB)
+ RET
+
+TEXT runtime·exit1(SB),7,$-8
+ MOVL $302, AX // sys_threxit
+ SYSCALL
+ CALL runtime·notok(SB)
+ RET
+
+TEXT runtime·write(SB),7,$-8
+ MOVL 8(SP), DI // arg 1 - fd
+ MOVQ 16(SP), SI // arg 2 - buf
+ MOVL 24(SP), DX // arg 3 - nbyte
+ MOVL $4, AX // sys_write
+ SYSCALL
+ RET
+
+TEXT runtime·raisesigpipe(SB),7,$16
+ MOVL $299, AX // sys_getthrid
+ SYSCALL
+ MOVQ AX, DI // arg 1 - pid
+ MOVQ $13, SI // arg 2 - signum == SIGPIPE
+ MOVL $37, AX // sys_kill
+ SYSCALL
+ RET
+
+TEXT runtime·setitimer(SB),7,$-8
+ MOVL 8(SP), DI // arg 1 - which
+ MOVQ 16(SP), SI // arg 2 - itv
+ MOVQ 24(SP), DX // arg 3 - oitv
+ MOVL $83, AX // sys_setitimer
+ SYSCALL
+ RET
+
+TEXT runtime·gettime(SB),7,$32
+ LEAQ 8(SP), DI // arg 1 - tp
+ MOVQ $0, SI // arg 2 - tzp
+ MOVL $116, AX // sys_gettimeofday
+ SYSCALL
+
+ MOVQ 8(SP), BX // sec
+ MOVQ sec+0(FP), DI
+ MOVQ BX, (DI)
+
+ MOVL 16(SP), BX // usec
+ MOVQ usec+8(FP), DI
+ MOVL BX, (DI)
+ RET
+
+TEXT runtime·sigaction(SB),7,$-8
+ MOVL 8(SP), DI // arg 1 - signum
+ MOVQ 16(SP), SI // arg 2 - nsa
+ MOVQ 24(SP), DX // arg 3 - osa
+ MOVL $46, AX
+ SYSCALL
+ JCC 2(PC)
+ CALL runtime·notok(SB)
+ RET
+
+TEXT runtime·sigtramp(SB),7,$64
+ get_tls(BX)
+
+ // save g
+ MOVQ g(BX), R10
+ MOVQ R10, 40(SP)
+
+ // g = m->signal
+ MOVQ m(BX), BP
+ MOVQ m_gsignal(BP), BP
+ MOVQ BP, g(BX)
+
+ MOVQ DI, 0(SP)
+ MOVQ SI, 8(SP)
+ MOVQ DX, 16(SP)
+ MOVQ R10, 24(SP)
+
+ CALL runtime·sighandler(SB)
+
+ // restore g
+ get_tls(BX)
+ MOVQ 40(SP), R10
+ MOVQ R10, g(BX)
+ RET
+
+TEXT runtime·mmap(SB),7,$0
+ MOVQ 8(SP), DI // arg 1 - addr
+ MOVQ 16(SP), SI // arg 2 - len
+ MOVL 24(SP), DX // arg 3 - prot
+ MOVL 28(SP), R10 // arg 4 - flags
+ MOVL 32(SP), R8 // arg 5 - fd
+ MOVQ 36(SP), R9
+ SUBQ $16, SP
+ MOVQ R9, 8(SP) // arg 7 - offset (passed on stack)
+ MOVQ $0, R9 // arg 6 - pad
+ MOVL $197, AX
+ SYSCALL
+ JCC 2(PC)
+ NEGL AX
+ ADDQ $16, SP
+ RET
+
+TEXT runtime·munmap(SB),7,$0
+ MOVQ 8(SP), DI // arg 1 - addr
+ MOVQ 16(SP), SI // arg 2 - len
+ MOVL $73, AX // sys_munmap
+ SYSCALL
+ JCC 2(PC)
+ CALL runtime·notok(SB)
+ RET
+
+TEXT runtime·notok(SB),7,$-8
+ MOVL $0xf1, BP
+ MOVQ BP, (BP)
+ RET
+
+TEXT runtime·sigaltstack(SB),7,$-8
+ MOVQ new+8(SP), DI // arg 1 - nss
+ MOVQ old+16(SP), SI // arg 2 - oss
+ MOVQ $288, AX // sys_sigaltstack
+ SYSCALL
+ JCC 2(PC)
+ CALL runtime·notok(SB)
+ RET
+
+// set tls base to DI
+TEXT runtime·settls(SB),7,$8
+ // adjust for ELF: wants to use -16(FS) and -8(FS) for g and m
+ ADDQ $16, DI
+ MOVQ DI, 0(SP)
+ MOVQ SP, SI
+ MOVQ $12, DI // AMD64_SET_FSBASE (machine/sysarch.h)
+ MOVQ $165, AX // sys_sysarch
+ SYSCALL
+ JCC 2(PC)
+ CALL runtime·notok(SB)
+ RET
diff --git a/src/pkg/runtime/openbsd/defs.c b/src/pkg/runtime/openbsd/defs.c
new file mode 100644
index 000000000..d8adec981
--- /dev/null
+++ b/src/pkg/runtime/openbsd/defs.c
@@ -0,0 +1,103 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+ * Input to godefs.
+ *
+ godefs -f -m64 defs.c >amd64/defs.h
+ godefs -f -m32 defs.c >386/defs.h
+ */
+
+#include <sys/types.h>
+#include <sys/mman.h>
+#include <sys/time.h>
+#include <sys/unistd.h>
+#include <sys/signal.h>
+#include <machine/mcontext.h>
+#include <errno.h>
+#include <signal.h>
+
+enum {
+ $PROT_NONE = PROT_NONE,
+ $PROT_READ = PROT_READ,
+ $PROT_WRITE = PROT_WRITE,
+ $PROT_EXEC = PROT_EXEC,
+
+ $MAP_ANON = MAP_ANON,
+ $MAP_PRIVATE = MAP_PRIVATE,
+ $MAP_FIXED = MAP_FIXED,
+
+ $SA_SIGINFO = SA_SIGINFO,
+ $SA_RESTART = SA_RESTART,
+ $SA_ONSTACK = SA_ONSTACK,
+
+ $EINTR = EINTR,
+
+ $SIGHUP = SIGHUP,
+ $SIGINT = SIGINT,
+ $SIGQUIT = SIGQUIT,
+ $SIGILL = SIGILL,
+ $SIGTRAP = SIGTRAP,
+ $SIGABRT = SIGABRT,
+ $SIGEMT = SIGEMT,
+ $SIGFPE = SIGFPE,
+ $SIGKILL = SIGKILL,
+ $SIGBUS = SIGBUS,
+ $SIGSEGV = SIGSEGV,
+ $SIGSYS = SIGSYS,
+ $SIGPIPE = SIGPIPE,
+ $SIGALRM = SIGALRM,
+ $SIGTERM = SIGTERM,
+ $SIGURG = SIGURG,
+ $SIGSTOP = SIGSTOP,
+ $SIGTSTP = SIGTSTP,
+ $SIGCONT = SIGCONT,
+ $SIGCHLD = SIGCHLD,
+ $SIGTTIN = SIGTTIN,
+ $SIGTTOU = SIGTTOU,
+ $SIGIO = SIGIO,
+ $SIGXCPU = SIGXCPU,
+ $SIGXFSZ = SIGXFSZ,
+ $SIGVTALRM = SIGVTALRM,
+ $SIGPROF = SIGPROF,
+ $SIGWINCH = SIGWINCH,
+ $SIGINFO = SIGINFO,
+ $SIGUSR1 = SIGUSR1,
+ $SIGUSR2 = SIGUSR2,
+
+ $FPE_INTDIV = FPE_INTDIV,
+ $FPE_INTOVF = FPE_INTOVF,
+ $FPE_FLTDIV = FPE_FLTDIV,
+ $FPE_FLTOVF = FPE_FLTOVF,
+ $FPE_FLTUND = FPE_FLTUND,
+ $FPE_FLTRES = FPE_FLTRES,
+ $FPE_FLTINV = FPE_FLTINV,
+ $FPE_FLTSUB = FPE_FLTSUB,
+
+ $BUS_ADRALN = BUS_ADRALN,
+ $BUS_ADRERR = BUS_ADRERR,
+ $BUS_OBJERR = BUS_OBJERR,
+
+ $SEGV_MAPERR = SEGV_MAPERR,
+ $SEGV_ACCERR = SEGV_ACCERR,
+
+ $ITIMER_REAL = ITIMER_REAL,
+ $ITIMER_VIRTUAL = ITIMER_VIRTUAL,
+ $ITIMER_PROF = ITIMER_PROF,
+};
+
+typedef struct sigaltstack $Sigaltstack;
+typedef sigset_t $Sigset;
+typedef siginfo_t $Siginfo;
+typedef union sigval $Sigval;
+
+typedef stack_t $StackT;
+
+typedef struct timeval $Timeval;
+typedef struct itimerval $Itimerval;
+
+// This is a hack to avoid pulling in machine/fpu.h and struct fxsave64.
+typedef void $sfxsave64;
+
+typedef struct sigcontext $Sigcontext;
diff --git a/src/pkg/runtime/openbsd/mem.c b/src/pkg/runtime/openbsd/mem.c
new file mode 100644
index 000000000..07abf2cfe
--- /dev/null
+++ b/src/pkg/runtime/openbsd/mem.c
@@ -0,0 +1,74 @@
+#include "runtime.h"
+#include "defs.h"
+#include "os.h"
+#include "malloc.h"
+
+void*
+runtime·SysAlloc(uintptr n)
+{
+ void *v;
+
+ mstats.sys += n;
+ v = runtime·mmap(nil, n, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_ANON|MAP_PRIVATE, -1, 0);
+ if(v < (void*)4096)
+ return nil;
+ return v;
+}
+
+void
+runtime·SysUnused(void *v, uintptr n)
+{
+ USED(v);
+ USED(n);
+ // TODO(rsc): call madvise MADV_DONTNEED
+}
+
+void
+runtime·SysFree(void *v, uintptr n)
+{
+ mstats.sys -= n;
+ runtime·munmap(v, n);
+}
+
+void*
+runtime·SysReserve(void *v, uintptr n)
+{
+ // On 64-bit, people with ulimit -v set complain if we reserve too
+ // much address space. Instead, assume that the reservation is okay
+ // and check the assumption in SysMap.
+ if(sizeof(void*) == 8)
+ return v;
+
+ return runtime·mmap(v, n, PROT_NONE, MAP_ANON|MAP_PRIVATE, -1, 0);
+}
+
+enum
+{
+ ENOMEM = 12,
+};
+
+void
+runtime·SysMap(void *v, uintptr n)
+{
+ void *p;
+
+ mstats.sys += n;
+
+ // On 64-bit, we don't actually have v reserved, so tread carefully.
+ if(sizeof(void*) == 8) {
+ p = runtime·mmap(v, n, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_ANON|MAP_PRIVATE, -1, 0);
+ if(p == (void*)-ENOMEM)
+ runtime·throw("runtime: out of memory");
+ if(p != v) {
+ runtime·printf("runtime: address space conflict: map(%p) = %p\n", v, p);
+ runtime·throw("runtime: address space conflict");
+ }
+ return;
+ }
+
+ p = runtime·mmap(v, n, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_ANON|MAP_FIXED|MAP_PRIVATE, -1, 0);
+ if(p == (void*)-ENOMEM)
+ runtime·throw("runtime: out of memory");
+ if(p != v)
+ runtime·throw("runtime: cannot map pages in arena address space");
+}
diff --git a/src/pkg/runtime/openbsd/os.h b/src/pkg/runtime/openbsd/os.h
new file mode 100644
index 000000000..eba53b7cc
--- /dev/null
+++ b/src/pkg/runtime/openbsd/os.h
@@ -0,0 +1,12 @@
+#define SIG_DFL ((void*)0)
+#define SIG_IGN ((void*)1)
+
+struct sigaction;
+
+void runtime·sigpanic(void);
+void runtime·sigaltstack(Sigaltstack*, Sigaltstack*);
+void runtime·sigaction(int32, struct sigaction*, struct sigaction*);
+void runtime·setitimerval(int32, Itimerval*, Itimerval*);
+void runtime·setitimer(int32, Itimerval*, Itimerval*);
+
+void runtime·raisesigpipe(void);
diff --git a/src/pkg/runtime/openbsd/signals.h b/src/pkg/runtime/openbsd/signals.h
new file mode 100644
index 000000000..63a84671d
--- /dev/null
+++ b/src/pkg/runtime/openbsd/signals.h
@@ -0,0 +1,52 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#define C SigCatch
+#define I SigIgnore
+#define R SigRestart
+#define Q SigQueue
+#define P SigPanic
+
+SigTab runtime·sigtab[] = {
+ /* 0 */ 0, "SIGNONE: no trap",
+ /* 1 */ Q+R, "SIGHUP: terminal line hangup",
+ /* 2 */ Q+R, "SIGINT: interrupt",
+ /* 3 */ C, "SIGQUIT: quit",
+ /* 4 */ C, "SIGILL: illegal instruction",
+ /* 5 */ C, "SIGTRAP: trace trap",
+ /* 6 */ C, "SIGABRT: abort",
+ /* 7 */ C, "SIGEMT: EMT instruction",
+ /* 8 */ C+P, "SIGFPE: floating-point exception",
+ /* 9 */ 0, "SIGKILL: kill",
+ /* 10 */ C+P, "SIGBUS: bus error",
+ /* 11 */ C+P, "SIGSEGV: segmentation violation",
+ /* 12 */ C, "SIGSYS: bad system call",
+ /* 13 */ I, "SIGPIPE: write to broken pipe",
+ /* 14 */ Q+I+R, "SIGALRM: alarm clock",
+ /* 15 */ Q+R, "SIGTERM: termination",
+ /* 16 */ Q+I+R, "SIGURG: urgent condition on socket",
+ /* 17 */ 0, "SIGSTOP: stop, unblockable",
+ /* 18 */ Q+I+R, "SIGTSTP: stop from tty",
+ /* 19 */ 0, "SIGCONT: continue",
+ /* 20 */ Q+I+R, "SIGCHLD: child status has changed",
+ /* 21 */ Q+I+R, "SIGTTIN: background read from tty",
+ /* 22 */ Q+I+R, "SIGTTOU: background write to tty",
+ /* 23 */ Q+I+R, "SIGIO: i/o now possible",
+ /* 24 */ Q+I+R, "SIGXCPU: cpu limit exceeded",
+ /* 25 */ Q+I+R, "SIGXFSZ: file size limit exceeded",
+ /* 26 */ Q+I+R, "SIGVTALRM: virtual alarm clock",
+ /* 27 */ Q+I+R, "SIGPROF: profiling alarm clock",
+ /* 28 */ Q+I+R, "SIGWINCH: window size change",
+ /* 29 */ Q+I+R, "SIGINFO: information request",
+ /* 30 */ Q+I+R, "SIGUSR1: user-defined signal 1",
+ /* 31 */ Q+I+R, "SIGUSR2: user-defined signal 2",
+ /* 32 */ Q+I+R, "SIGTHR: reserved",
+};
+#undef C
+#undef I
+#undef R
+#undef Q
+#undef P
+
+#define NSIG 33
diff --git a/src/pkg/runtime/openbsd/thread.c b/src/pkg/runtime/openbsd/thread.c
new file mode 100644
index 000000000..7e9ba5d67
--- /dev/null
+++ b/src/pkg/runtime/openbsd/thread.c
@@ -0,0 +1,156 @@
+// Use of this source file is governed by a BSD-style
+// license that can be found in the LICENSE file.`
+
+#include "runtime.h"
+#include "defs.h"
+#include "os.h"
+#include "stack.h"
+
+extern SigTab runtime·sigtab[];
+
+extern int64 runtime·rfork_thread(int32 flags, void *stack, M *m, G *g, void (*fn)(void));
+extern void runtime·sys_sched_yield(void);
+
+// Basic spinlocks using CAS. We can improve on these later.
+static void
+lock(Lock *l)
+{
+ uint32 v;
+ int32 ret;
+
+ for(;;) {
+ if(runtime·cas(&l->key, 0, 1))
+ return;
+ runtime·sys_sched_yield();
+ }
+}
+
+static void
+unlock(Lock *l)
+{
+ uint32 v;
+ int32 ret;
+
+ for (;;) {
+ v = l->key;
+ if((v&1) == 0)
+ runtime·throw("unlock of unlocked lock");
+ if(runtime·cas(&l->key, v, 0))
+ break;
+ }
+}
+
+void
+runtime·lock(Lock *l)
+{
+ if(m->locks < 0)
+ runtime·throw("lock count");
+ m->locks++;
+ lock(l);
+}
+
+void
+runtime·unlock(Lock *l)
+{
+ m->locks--;
+ if(m->locks < 0)
+ runtime·throw("lock count");
+ unlock(l);
+}
+
+// Event notifications.
+void
+runtime·noteclear(Note *n)
+{
+ n->lock.key = 0;
+ lock(&n->lock);
+}
+
+void
+runtime·notesleep(Note *n)
+{
+ lock(&n->lock);
+ unlock(&n->lock);
+}
+
+void
+runtime·notewakeup(Note *n)
+{
+ unlock(&n->lock);
+}
+
+// From OpenBSD's sys/param.h
+#define RFPROC (1<<4) /* change child (else changes curproc) */
+#define RFMEM (1<<5) /* share `address space' */
+#define RFNOWAIT (1<<6) /* parent need not wait() on child */
+#define RFTHREAD (1<<13) /* create a thread, not a process */
+
+void
+runtime·newosproc(M *m, G *g, void *stk, void (*fn)(void))
+{
+ int32 flags;
+ int32 ret;
+
+ flags = RFPROC | RFTHREAD | RFMEM | RFNOWAIT;
+
+ if (0) {
+ runtime·printf(
+ "newosproc stk=%p m=%p g=%p fn=%p id=%d/%d ostk=%p\n",
+ stk, m, g, fn, m->id, m->tls[0], &m);
+ }
+
+ m->tls[0] = m->id; // so 386 asm can find it
+
+ if((ret = runtime·rfork_thread(flags, stk, m, g, fn)) < 0) {
+ runtime·printf("runtime: failed to create new OS thread (have %d already; errno=%d)\n", runtime·mcount() - 1, -ret);
+ runtime·printf("runtime: is kern.rthreads disabled?\n");
+
+ runtime·throw("runtime.newosproc");
+ }
+}
+
+void
+runtime·osinit(void)
+{
+}
+
+void
+runtime·goenvs(void)
+{
+ runtime·goenvs_unix();
+}
+
+// Called to initialize a new m (including the bootstrap m).
+void
+runtime·minit(void)
+{
+ // Initialize signal handling
+ m->gsignal = runtime·malg(32*1024);
+ runtime·signalstack(m->gsignal->stackguard - StackGuard, 32*1024);
+}
+
+void
+runtime·sigpanic(void)
+{
+ switch(g->sig) {
+ case SIGBUS:
+ if(g->sigcode0 == BUS_ADRERR && g->sigcode1 < 0x1000)
+ runtime·panicstring("invalid memory address or nil pointer dereference");
+ runtime·printf("unexpected fault address %p\n", g->sigcode1);
+ runtime·throw("fault");
+ case SIGSEGV:
+ if((g->sigcode0 == 0 || g->sigcode0 == SEGV_MAPERR || g->sigcode0 == SEGV_ACCERR) && g->sigcode1 < 0x1000)
+ runtime·panicstring("invalid memory address or nil pointer dereference");
+ runtime·printf("unexpected fault address %p\n", g->sigcode1);
+ runtime·throw("fault");
+ case SIGFPE:
+ switch(g->sigcode0) {
+ case FPE_INTDIV:
+ runtime·panicstring("integer divide by zero");
+ case FPE_INTOVF:
+ runtime·panicstring("integer overflow");
+ }
+ runtime·panicstring("floating point error");
+ }
+ runtime·panicstring(runtime·sigtab[g->sig].name);
+}
diff --git a/src/pkg/runtime/plan9/386/defs.h b/src/pkg/runtime/plan9/386/defs.h
new file mode 100644
index 000000000..58fd9d94d
--- /dev/null
+++ b/src/pkg/runtime/plan9/386/defs.h
@@ -0,0 +1,2 @@
+// nothing to see here
+#define tos_pid 48
diff --git a/src/pkg/runtime/plan9/386/rt0.s b/src/pkg/runtime/plan9/386/rt0.s
new file mode 100644
index 000000000..b56c8b325
--- /dev/null
+++ b/src/pkg/runtime/plan9/386/rt0.s
@@ -0,0 +1,32 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+TEXT _rt0_386_plan9(SB),7, $0
+ MOVL AX, _tos(SB)
+
+ // move arguments down to make room for
+ // m and g at top of stack, right before Tos.
+ MOVL SP, SI
+ SUBL $8, SP
+ MOVL SP, DI
+
+ MOVL AX, CX
+ SUBL SI, CX
+ CLD
+ REP; MOVSB
+
+ // adjust argv
+ SUBL SI, DI
+ MOVL newargc+0(SP), CX
+ LEAL newargv+4(SP), BP
+argv_fix:
+ ADDL DI, 0(BP)
+ ADDL $4, BP
+ LOOP argv_fix
+
+ JMP _rt0_386(SB)
+
+DATA runtime·isplan9(SB)/4, $1
+GLOBL runtime·isplan9(SB), $4
+GLOBL _tos(SB), $4
diff --git a/src/pkg/runtime/plan9/386/signal.c b/src/pkg/runtime/plan9/386/signal.c
new file mode 100644
index 000000000..364fd1c41
--- /dev/null
+++ b/src/pkg/runtime/plan9/386/signal.c
@@ -0,0 +1,24 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "runtime.h"
+
+void
+runtime·gettime(int64*, int32*)
+{
+}
+
+String
+runtime·signame(int32)
+{
+ return runtime·emptystring;
+}
+
+void
+runtime·resetcpuprofiler(int32 hz)
+{
+ // TODO: Enable profiling interrupts.
+
+ m->profilehz = hz;
+}
diff --git a/src/pkg/runtime/plan9/386/sys.s b/src/pkg/runtime/plan9/386/sys.s
new file mode 100644
index 000000000..1cb570b68
--- /dev/null
+++ b/src/pkg/runtime/plan9/386/sys.s
@@ -0,0 +1,82 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "defs.h"
+#include "386/asm.h"
+
+// setldt(int entry, int address, int limit)
+TEXT runtime·setldt(SB),7,$0
+ RET
+
+TEXT runtime·open(SB),7,$0
+ MOVL $14, AX
+ INT $64
+ RET
+
+TEXT runtime·write(SB),7,$0
+ MOVL $20, AX
+ INT $64
+ RET
+
+TEXT runtime·exits(SB),7,$0
+ MOVL $8, AX
+ INT $64
+ RET
+
+TEXT runtime·brk_(SB),7,$0
+ MOVL $24, AX
+ INT $64
+ RET
+
+TEXT runtime·plan9_semacquire(SB),7,$0
+ MOVL $37, AX
+ INT $64
+ RET
+
+TEXT runtime·plan9_semrelease(SB),7,$0
+ MOVL $38, AX
+ INT $64
+ RET
+
+TEXT runtime·rfork(SB),7,$0
+ MOVL $19, AX // rfork
+ INT $64
+
+ // In parent, return.
+ CMPL AX, $0
+ JEQ 2(PC)
+ RET
+
+ // In child on old stack.
+ MOVL mm+12(SP), BX // m
+ MOVL gg+16(SP), DX // g
+ MOVL fn+20(SP), SI // fn
+
+ // set SP to be on the new child stack
+ MOVL stack+8(SP), CX
+ MOVL CX, SP
+
+ // Initialize m, g.
+ get_tls(AX)
+ MOVL DX, g(AX)
+ MOVL BX, m(AX)
+
+ // Initialize AX from _tos->pid
+ MOVL _tos(SB), AX
+ MOVL tos_pid(AX), AX
+ MOVL AX, m_procid(BX) // save pid as m->procid
+
+ CALL runtime·stackcheck(SB) // smashes AX, CX
+
+ MOVL 0(DX), DX // paranoia; check they are not nil
+ MOVL 0(BX), BX
+
+ // more paranoia; check that stack splitting code works
+ PUSHAL
+ CALL runtime·emptyfunc(SB)
+ POPAL
+
+ CALL SI // fn()
+ CALL runtime·exit(SB)
+ RET
diff --git a/src/pkg/runtime/plan9/mem.c b/src/pkg/runtime/plan9/mem.c
new file mode 100644
index 000000000..f795b2c01
--- /dev/null
+++ b/src/pkg/runtime/plan9/mem.c
@@ -0,0 +1,67 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "runtime.h"
+#include "malloc.h"
+#include "os.h"
+
+extern byte end[];
+static byte *bloc = { end };
+static Lock memlock;
+
+enum
+{
+ Round = 4095
+};
+
+void*
+runtime·SysAlloc(uintptr nbytes)
+{
+ uintptr bl;
+
+ runtime·lock(&memlock);
+ mstats.sys += nbytes;
+ // Plan 9 sbrk from /sys/src/libc/9sys/sbrk.c
+ bl = ((uintptr)bloc + Round) & ~Round;
+ if(runtime·brk_((void*)(bl + nbytes)) < 0) {
+ runtime·unlock(&memlock);
+ return (void*)-1;
+ }
+ bloc = (byte*)bl + nbytes;
+ runtime·unlock(&memlock);
+ return (void*)bl;
+}
+
+void
+runtime·SysFree(void *v, uintptr nbytes)
+{
+ runtime·lock(&memlock);
+ mstats.sys -= nbytes;
+ // from tiny/mem.c
+ // Push pointer back if this is a free
+ // of the most recent SysAlloc.
+ nbytes += (nbytes + Round) & ~Round;
+ if(bloc == (byte*)v+nbytes)
+ bloc -= nbytes;
+ runtime·unlock(&memlock);
+}
+
+void
+runtime·SysUnused(void *v, uintptr nbytes)
+{
+ USED(v, nbytes);
+}
+
+void
+runtime·SysMap(void *v, uintptr nbytes)
+{
+ USED(v, nbytes);
+}
+
+void*
+runtime·SysReserve(void *v, uintptr nbytes)
+{
+ USED(v);
+ return runtime·SysAlloc(nbytes);
+}
diff --git a/src/pkg/runtime/plan9/os.h b/src/pkg/runtime/plan9/os.h
new file mode 100644
index 000000000..b2f7357ec
--- /dev/null
+++ b/src/pkg/runtime/plan9/os.h
@@ -0,0 +1,57 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+extern int32 runtime·write(int32 fd, void* buffer, int32 nbytes);
+extern void runtime·exits(int8* msg);
+extern int32 runtime·brk_(void*);
+
+/* open */
+enum
+{
+ OREAD = 0,
+ OWRITE = 1,
+ ORDWR = 2
+};
+
+/* rfork */
+enum
+{
+ RFNAMEG = (1<<0),
+ RFENVG = (1<<1),
+ RFFDG = (1<<2),
+ RFNOTEG = (1<<3),
+ RFPROC = (1<<4),
+ RFMEM = (1<<5),
+ RFNOWAIT = (1<<6),
+ RFCNAMEG = (1<<10),
+ RFCENVG = (1<<11),
+ RFCFDG = (1<<12),
+ RFREND = (1<<13),
+ RFNOMNT = (1<<14)
+};
+
+typedef struct Tos Tos;
+typedef intptr Plink;
+
+struct Tos {
+ struct /* Per process profiling */
+ {
+ Plink *pp; /* known to be 0(ptr) */
+ Plink *next; /* known to be 4(ptr) */
+ Plink *last;
+ Plink *first;
+ uint32 pid;
+ uint32 what;
+ } prof;
+ uint64 cyclefreq; /* cycle clock frequency if there is one, 0 otherwise */
+ int64 kcycles; /* cycles spent in kernel */
+ int64 pcycles; /* cycles spent in process (kernel + user) */
+ uint32 pid; /* might as well put the pid here */
+ uint32 clock;
+ /* top of stack is here */
+};
+
+extern int32 runtime·rfork(int32 flags, void *stk, M *m, G *g, void (*fn)(void));
+extern int32 runtime·plan9_semacquire(uint32 *addr, int32 block);
+extern int32 runtime·plan9_semrelease(uint32 *addr, int32 count);
diff --git a/src/pkg/runtime/plan9/signals.h b/src/pkg/runtime/plan9/signals.h
new file mode 100644
index 000000000..5df757613
--- /dev/null
+++ b/src/pkg/runtime/plan9/signals.h
@@ -0,0 +1 @@
+// nothing to see here
diff --git a/src/pkg/runtime/plan9/thread.c b/src/pkg/runtime/plan9/thread.c
new file mode 100644
index 000000000..776989242
--- /dev/null
+++ b/src/pkg/runtime/plan9/thread.c
@@ -0,0 +1,174 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "runtime.h"
+#include "os.h"
+
+int8 *goos = "plan9";
+
+void
+runtime·minit(void)
+{
+}
+
+void
+runtime·osinit(void)
+{
+}
+
+void
+runtime·goenvs(void)
+{
+}
+
+void
+runtime·initsig(int32 queue)
+{
+}
+
+extern Tos *_tos;
+void
+runtime·exit(int32)
+{
+ int32 fd;
+ uint8 buf[128];
+ uint8 tmp[16];
+ uint8 *p, *q;
+ int32 pid;
+
+ runtime·memclr(buf, sizeof buf);
+ runtime·memclr(tmp, sizeof tmp);
+ pid = _tos->pid;
+
+ /* build path string /proc/pid/notepg */
+ for(q=tmp; pid > 0;) {
+ *q++ = '0' + (pid%10);
+ pid = pid/10;
+ }
+ p = buf;
+ runtime·memmove((void*)p, (void*)"/proc/", 6);
+ p += 6;
+ for(q--; q >= tmp;)
+ *p++ = *q--;
+ runtime·memmove((void*)p, (void*)"/notepg", 7);
+
+ /* post interrupt note */
+ fd = runtime·open(buf, OWRITE);
+ runtime·write(fd, "interrupt", 9);
+ runtime·exits(nil);
+}
+
+void
+runtime·newosproc(M *m, G *g, void *stk, void (*fn)(void))
+{
+ m->tls[0] = m->id; // so 386 asm can find it
+ if(0){
+ runtime·printf("newosproc stk=%p m=%p g=%p fn=%p rfork=%p id=%d/%d ostk=%p\n",
+ stk, m, g, fn, runtime·rfork, m->id, m->tls[0], &m);
+ }
+
+ if (runtime·rfork(RFPROC|RFMEM|RFNOWAIT, stk, m, g, fn) < 0 )
+ runtime·throw("newosproc: rfork failed");
+}
+
+// Blocking locks.
+
+// Implement Locks, using semaphores.
+// l->key is the number of threads who want the lock.
+// In a race, one thread increments l->key from 0 to 1
+// and the others increment it from >0 to >1. The thread
+// who does the 0->1 increment gets the lock, and the
+// others wait on the semaphore. When the 0->1 thread
+// releases the lock by decrementing l->key, l->key will
+// be >0, so it will increment the semaphore to wake up
+// one of the others. This is the same algorithm used
+// in Plan 9's user-level locks.
+
+void
+runtime·lock(Lock *l)
+{
+ if(m->locks < 0)
+ runtime·throw("lock count");
+ m->locks++;
+
+ if(runtime·xadd(&l->key, 1) == 1)
+ return; // changed from 0 -> 1; we hold lock
+ // otherwise wait in kernel
+ while(runtime·plan9_semacquire(&l->sema, 1) < 0) {
+ /* interrupted; try again */
+ }
+}
+
+void
+runtime·unlock(Lock *l)
+{
+ m->locks--;
+ if(m->locks < 0)
+ runtime·throw("lock count");
+
+ if(runtime·xadd(&l->key, -1) == 0)
+ return; // changed from 1 -> 0: no contention
+
+ runtime·plan9_semrelease(&l->sema, 1);
+}
+
+
+// User-level semaphore implementation:
+// try to do the operations in user space on u,
+// but when it's time to block, fall back on the kernel semaphore k.
+// This is the same algorithm used in Plan 9.
+void
+runtime·usemacquire(Usema *s)
+{
+ if((int32)runtime·xadd(&s->u, -1) < 0)
+ while(runtime·plan9_semacquire(&s->k, 1) < 0) {
+ /* interrupted; try again */
+ }
+}
+
+void
+runtime·usemrelease(Usema *s)
+{
+ if((int32)runtime·xadd(&s->u, 1) <= 0)
+ runtime·plan9_semrelease(&s->k, 1);
+}
+
+
+// Event notifications.
+void
+runtime·noteclear(Note *n)
+{
+ n->wakeup = 0;
+}
+
+void
+runtime·notesleep(Note *n)
+{
+ while(!n->wakeup)
+ runtime·usemacquire(&n->sema);
+}
+
+void
+runtime·notewakeup(Note *n)
+{
+ n->wakeup = 1;
+ runtime·usemrelease(&n->sema);
+}
+
+void
+os·sigpipe(void)
+{
+ runtime·throw("too many writes on closed pipe");
+}
+
+/*
+ * placeholder - once notes are implemented,
+ * a signal generating a panic must appear as
+ * a call to this function for correct handling by
+ * traceback.
+ */
+void
+runtime·sigpanic(void)
+{
+}
diff --git a/src/pkg/runtime/pprof/Makefile b/src/pkg/runtime/pprof/Makefile
new file mode 100644
index 000000000..8bccc0cc0
--- /dev/null
+++ b/src/pkg/runtime/pprof/Makefile
@@ -0,0 +1,11 @@
+# Copyright 2010 The Go Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+include ../../../Make.inc
+
+TARG=runtime/pprof
+GOFILES=\
+ pprof.go\
+
+include ../../../Make.pkg
diff --git a/src/pkg/runtime/pprof/pprof.go b/src/pkg/runtime/pprof/pprof.go
new file mode 100644
index 000000000..fdeceb4e8
--- /dev/null
+++ b/src/pkg/runtime/pprof/pprof.go
@@ -0,0 +1,176 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package pprof writes runtime profiling data in the format expected
+// by the pprof visualization tool.
+// For more information about pprof, see
+// http://code.google.com/p/google-perftools/.
+package pprof
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "os"
+ "runtime"
+ "sync"
+)
+
+// WriteHeapProfile writes a pprof-formatted heap profile to w.
+// If a write to w returns an error, WriteHeapProfile returns that error.
+// Otherwise, WriteHeapProfile returns nil.
+func WriteHeapProfile(w io.Writer) os.Error {
+ // Find out how many records there are (MemProfile(nil, false)),
+ // allocate that many records, and get the data.
+ // There's a race—more records might be added between
+ // the two calls—so allocate a few extra records for safety
+ // and also try again if we're very unlucky.
+ // The loop should only execute one iteration in the common case.
+ var p []runtime.MemProfileRecord
+ n, ok := runtime.MemProfile(nil, false)
+ for {
+ // Allocate room for a slightly bigger profile,
+ // in case a few more entries have been added
+ // since the call to MemProfile.
+ p = make([]runtime.MemProfileRecord, n+50)
+ n, ok = runtime.MemProfile(p, false)
+ if ok {
+ p = p[0:n]
+ break
+ }
+ // Profile grew; try again.
+ }
+
+ var total runtime.MemProfileRecord
+ for i := range p {
+ r := &p[i]
+ total.AllocBytes += r.AllocBytes
+ total.AllocObjects += r.AllocObjects
+ total.FreeBytes += r.FreeBytes
+ total.FreeObjects += r.FreeObjects
+ }
+
+ // Technically the rate is MemProfileRate not 2*MemProfileRate,
+ // but early versions of the C++ heap profiler reported 2*MemProfileRate,
+ // so that's what pprof has come to expect.
+ b := bufio.NewWriter(w)
+ fmt.Fprintf(b, "heap profile: %d: %d [%d: %d] @ heap/%d\n",
+ total.InUseObjects(), total.InUseBytes(),
+ total.AllocObjects, total.AllocBytes,
+ 2*runtime.MemProfileRate)
+
+ for i := range p {
+ r := &p[i]
+ fmt.Fprintf(b, "%d: %d [%d: %d] @",
+ r.InUseObjects(), r.InUseBytes(),
+ r.AllocObjects, r.AllocBytes)
+ for _, pc := range r.Stack() {
+ fmt.Fprintf(b, " %#x", pc)
+ }
+ fmt.Fprintf(b, "\n")
+ }
+
+ // Print memstats information too.
+ // Pprof will ignore, but useful for people.
+ s := &runtime.MemStats
+ fmt.Fprintf(b, "\n# runtime.MemStats\n")
+ fmt.Fprintf(b, "# Alloc = %d\n", s.Alloc)
+ fmt.Fprintf(b, "# TotalAlloc = %d\n", s.TotalAlloc)
+ fmt.Fprintf(b, "# Sys = %d\n", s.Sys)
+ fmt.Fprintf(b, "# Lookups = %d\n", s.Lookups)
+ fmt.Fprintf(b, "# Mallocs = %d\n", s.Mallocs)
+
+ fmt.Fprintf(b, "# HeapAlloc = %d\n", s.HeapAlloc)
+ fmt.Fprintf(b, "# HeapSys = %d\n", s.HeapSys)
+ fmt.Fprintf(b, "# HeapIdle = %d\n", s.HeapIdle)
+ fmt.Fprintf(b, "# HeapInuse = %d\n", s.HeapInuse)
+
+ fmt.Fprintf(b, "# Stack = %d / %d\n", s.StackInuse, s.StackSys)
+ fmt.Fprintf(b, "# MSpan = %d / %d\n", s.MSpanInuse, s.MSpanSys)
+ fmt.Fprintf(b, "# MCache = %d / %d\n", s.MCacheInuse, s.MCacheSys)
+ fmt.Fprintf(b, "# BuckHashSys = %d\n", s.BuckHashSys)
+
+ fmt.Fprintf(b, "# NextGC = %d\n", s.NextGC)
+ fmt.Fprintf(b, "# PauseNs = %d\n", s.PauseNs)
+ fmt.Fprintf(b, "# NumGC = %d\n", s.NumGC)
+ fmt.Fprintf(b, "# EnableGC = %v\n", s.EnableGC)
+ fmt.Fprintf(b, "# DebugGC = %v\n", s.DebugGC)
+
+ fmt.Fprintf(b, "# BySize = Size * (Active = Mallocs - Frees)\n")
+ fmt.Fprintf(b, "# (Excluding large blocks.)\n")
+ for _, t := range s.BySize {
+ if t.Mallocs > 0 {
+ fmt.Fprintf(b, "# %d * (%d = %d - %d)\n", t.Size, t.Mallocs-t.Frees, t.Mallocs, t.Frees)
+ }
+ }
+ return b.Flush()
+}
+
+var cpu struct {
+ sync.Mutex
+ profiling bool
+ done chan bool
+}
+
+// StartCPUProfile enables CPU profiling for the current process.
+// While profiling, the profile will be buffered and written to w.
+// StartCPUProfile returns an error if profiling is already enabled.
+func StartCPUProfile(w io.Writer) os.Error {
+ // The runtime routines allow a variable profiling rate,
+ // but in practice operating systems cannot trigger signals
+ // at more than about 500 Hz, and our processing of the
+ // signal is not cheap (mostly getting the stack trace).
+ // 100 Hz is a reasonable choice: it is frequent enough to
+ // produce useful data, rare enough not to bog down the
+ // system, and a nice round number to make it easy to
+ // convert sample counts to seconds. Instead of requiring
+ // each client to specify the frequency, we hard code it.
+ const hz = 100
+
+ // Avoid queueing behind StopCPUProfile.
+ // Could use TryLock instead if we had it.
+ if cpu.profiling {
+ return fmt.Errorf("cpu profiling already in use")
+ }
+
+ cpu.Lock()
+ defer cpu.Unlock()
+ if cpu.done == nil {
+ cpu.done = make(chan bool)
+ }
+ // Double-check.
+ if cpu.profiling {
+ return fmt.Errorf("cpu profiling already in use")
+ }
+ cpu.profiling = true
+ runtime.SetCPUProfileRate(hz)
+ go profileWriter(w)
+ return nil
+}
+
+func profileWriter(w io.Writer) {
+ for {
+ data := runtime.CPUProfile()
+ if data == nil {
+ break
+ }
+ w.Write(data)
+ }
+ cpu.done <- true
+}
+
+// StopCPUProfile stops the current CPU profile, if any.
+// StopCPUProfile only returns after all the writes for the
+// profile have completed.
+func StopCPUProfile() {
+ cpu.Lock()
+ defer cpu.Unlock()
+
+ if !cpu.profiling {
+ return
+ }
+ cpu.profiling = false
+ runtime.SetCPUProfileRate(0)
+ <-cpu.done
+}
diff --git a/src/pkg/runtime/pprof/pprof_test.go b/src/pkg/runtime/pprof/pprof_test.go
new file mode 100644
index 000000000..4486d5525
--- /dev/null
+++ b/src/pkg/runtime/pprof/pprof_test.go
@@ -0,0 +1,77 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pprof_test
+
+import (
+ "bytes"
+ "hash/crc32"
+ "runtime"
+ . "runtime/pprof"
+ "strings"
+ "testing"
+ "unsafe"
+)
+
+func TestCPUProfile(t *testing.T) {
+ switch runtime.GOOS {
+ case "darwin":
+ // see Apple Bug Report #9177434 (copied into change description)
+ return
+ case "plan9":
+ // unimplemented
+ return
+ case "windows":
+ // unimplemented
+ return
+ }
+
+ buf := make([]byte, 100000)
+ var prof bytes.Buffer
+ if err := StartCPUProfile(&prof); err != nil {
+ t.Fatal(err)
+ }
+ // This loop takes about a quarter second on a 2 GHz laptop.
+ // We only need to get one 100 Hz clock tick, so we've got
+ // a 25x safety buffer.
+ for i := 0; i < 1000; i++ {
+ crc32.ChecksumIEEE(buf)
+ }
+ StopCPUProfile()
+
+ // Convert []byte to []uintptr.
+ bytes := prof.Bytes()
+ val := *(*[]uintptr)(unsafe.Pointer(&bytes))
+ val = val[:len(bytes)/int(unsafe.Sizeof(uintptr(0)))]
+
+ if len(val) < 10 {
+ t.Fatalf("profile too short: %#x", val)
+ }
+ if val[0] != 0 || val[1] != 3 || val[2] != 0 || val[3] != 1e6/100 || val[4] != 0 {
+ t.Fatalf("unexpected header %#x", val[:5])
+ }
+
+ // Check that profile is well formed and contains ChecksumIEEE.
+ found := false
+ val = val[5:]
+ for len(val) > 0 {
+ if len(val) < 2 || val[0] < 1 || val[1] < 1 || uintptr(len(val)) < 2+val[1] {
+ t.Fatalf("malformed profile. leftover: %#x", val)
+ }
+ for _, pc := range val[2 : 2+val[1]] {
+ f := runtime.FuncForPC(pc)
+ if f == nil {
+ continue
+ }
+ if strings.Contains(f.Name(), "ChecksumIEEE") {
+ found = true
+ }
+ }
+ val = val[2+val[1]:]
+ }
+
+ if !found {
+ t.Fatal("did not find ChecksumIEEE in the profile")
+ }
+}
diff --git a/src/pkg/runtime/print.c b/src/pkg/runtime/print.c
new file mode 100644
index 000000000..3ce779495
--- /dev/null
+++ b/src/pkg/runtime/print.c
@@ -0,0 +1,351 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "runtime.h"
+#include "type.h"
+
+//static Lock debuglock;
+
+static void vprintf(int8*, byte*);
+
+void
+runtime·dump(byte *p, int32 n)
+{
+ int32 i;
+
+ for(i=0; i<n; i++) {
+ runtime·printpointer((byte*)(p[i]>>4));
+ runtime·printpointer((byte*)(p[i]&0xf));
+ if((i&15) == 15)
+ runtime·prints("\n");
+ else
+ runtime·prints(" ");
+ }
+ if(n & 15)
+ runtime·prints("\n");
+}
+
+void
+runtime·prints(int8 *s)
+{
+ runtime·write(2, s, runtime·findnull((byte*)s));
+}
+
+#pragma textflag 7
+void
+runtime·printf(int8 *s, ...)
+{
+ byte *arg;
+
+ arg = (byte*)(&s+1);
+ vprintf(s, arg);
+}
+
+// Very simple printf. Only for debugging prints.
+// Do not add to this without checking with Rob.
+static void
+vprintf(int8 *s, byte *base)
+{
+ int8 *p, *lp;
+ uintptr arg, narg;
+ byte *v;
+
+// lock(&debuglock);
+
+ lp = p = s;
+ arg = 0;
+ for(; *p; p++) {
+ if(*p != '%')
+ continue;
+ if(p > lp)
+ runtime·write(2, lp, p-lp);
+ p++;
+ narg = 0;
+ switch(*p) {
+ case 't':
+ narg = arg + 1;
+ break;
+ case 'd': // 32-bit
+ case 'x':
+ arg = runtime·rnd(arg, 4);
+ narg = arg + 4;
+ break;
+ case 'D': // 64-bit
+ case 'U':
+ case 'X':
+ case 'f':
+ arg = runtime·rnd(arg, sizeof(uintptr));
+ narg = arg + 8;
+ break;
+ case 'C':
+ arg = runtime·rnd(arg, sizeof(uintptr));
+ narg = arg + 16;
+ break;
+ case 'p': // pointer-sized
+ case 's':
+ arg = runtime·rnd(arg, sizeof(uintptr));
+ narg = arg + sizeof(uintptr);
+ break;
+ case 'S': // pointer-aligned but bigger
+ arg = runtime·rnd(arg, sizeof(uintptr));
+ narg = arg + sizeof(String);
+ break;
+ case 'a': // pointer-aligned but bigger
+ arg = runtime·rnd(arg, sizeof(uintptr));
+ narg = arg + sizeof(Slice);
+ break;
+ case 'i': // pointer-aligned but bigger
+ case 'e':
+ arg = runtime·rnd(arg, sizeof(uintptr));
+ narg = arg + sizeof(Eface);
+ break;
+ }
+ v = base+arg;
+ switch(*p) {
+ case 'a':
+ runtime·printslice(*(Slice*)v);
+ break;
+ case 'd':
+ runtime·printint(*(int32*)v);
+ break;
+ case 'D':
+ runtime·printint(*(int64*)v);
+ break;
+ case 'e':
+ runtime·printeface(*(Eface*)v);
+ break;
+ case 'f':
+ runtime·printfloat(*(float64*)v);
+ break;
+ case 'C':
+ runtime·printcomplex(*(Complex128*)v);
+ break;
+ case 'i':
+ runtime·printiface(*(Iface*)v);
+ break;
+ case 'p':
+ runtime·printpointer(*(void**)v);
+ break;
+ case 's':
+ runtime·prints(*(int8**)v);
+ break;
+ case 'S':
+ runtime·printstring(*(String*)v);
+ break;
+ case 't':
+ runtime·printbool(*(bool*)v);
+ break;
+ case 'U':
+ runtime·printuint(*(uint64*)v);
+ break;
+ case 'x':
+ runtime·printhex(*(uint32*)v);
+ break;
+ case 'X':
+ runtime·printhex(*(uint64*)v);
+ break;
+ }
+ arg = narg;
+ lp = p+1;
+ }
+ if(p > lp)
+ runtime·write(2, lp, p-lp);
+
+// unlock(&debuglock);
+}
+
+#pragma textflag 7
+void
+runtime·goprintf(String s, ...)
+{
+ // Can assume s has terminating NUL because only
+ // the Go compiler generates calls to runtime·goprintf, using
+ // string constants, and all the string constants have NULs.
+ vprintf((int8*)s.str, (byte*)(&s+1));
+}
+
+void
+runtime·printpc(void *p)
+{
+ runtime·prints("PC=");
+ runtime·printhex((uint64)runtime·getcallerpc(p));
+}
+
+void
+runtime·printbool(bool v)
+{
+ if(v) {
+ runtime·write(2, (byte*)"true", 4);
+ return;
+ }
+ runtime·write(2, (byte*)"false", 5);
+}
+
+void
+runtime·printfloat(float64 v)
+{
+ byte buf[20];
+ int32 e, s, i, n;
+ float64 h;
+
+ if(runtime·isNaN(v)) {
+ runtime·write(2, "NaN", 3);
+ return;
+ }
+ if(runtime·isInf(v, 1)) {
+ runtime·write(2, "+Inf", 4);
+ return;
+ }
+ if(runtime·isInf(v, -1)) {
+ runtime·write(2, "-Inf", 4);
+ return;
+ }
+
+ n = 7; // digits printed
+ e = 0; // exp
+ s = 0; // sign
+ if(v != 0) {
+ // sign
+ if(v < 0) {
+ v = -v;
+ s = 1;
+ }
+
+ // normalize
+ while(v >= 10) {
+ e++;
+ v /= 10;
+ }
+ while(v < 1) {
+ e--;
+ v *= 10;
+ }
+
+ // round
+ h = 5;
+ for(i=0; i<n; i++)
+ h /= 10;
+
+ v += h;
+ if(v >= 10) {
+ e++;
+ v /= 10;
+ }
+ }
+
+ // format +d.dddd+edd
+ buf[0] = '+';
+ if(s)
+ buf[0] = '-';
+ for(i=0; i<n; i++) {
+ s = v;
+ buf[i+2] = s+'0';
+ v -= s;
+ v *= 10.;
+ }
+ buf[1] = buf[2];
+ buf[2] = '.';
+
+ buf[n+2] = 'e';
+ buf[n+3] = '+';
+ if(e < 0) {
+ e = -e;
+ buf[n+3] = '-';
+ }
+
+ buf[n+4] = (e/100) + '0';
+ buf[n+5] = (e/10)%10 + '0';
+ buf[n+6] = (e%10) + '0';
+ runtime·write(2, buf, n+7);
+}
+
+void
+runtime·printcomplex(Complex128 v)
+{
+ runtime·write(2, "(", 1);
+ runtime·printfloat(v.real);
+ runtime·printfloat(v.imag);
+ runtime·write(2, "i)", 2);
+}
+
+void
+runtime·printuint(uint64 v)
+{
+ byte buf[100];
+ int32 i;
+
+ for(i=nelem(buf)-1; i>0; i--) {
+ buf[i] = v%10 + '0';
+ if(v < 10)
+ break;
+ v = v/10;
+ }
+ runtime·write(2, buf+i, nelem(buf)-i);
+}
+
+void
+runtime·printint(int64 v)
+{
+ if(v < 0) {
+ runtime·write(2, "-", 1);
+ v = -v;
+ }
+ runtime·printuint(v);
+}
+
+void
+runtime·printhex(uint64 v)
+{
+ static int8 *dig = "0123456789abcdef";
+ byte buf[100];
+ int32 i;
+
+ i=nelem(buf);
+ for(; v>0; v/=16)
+ buf[--i] = dig[v%16];
+ if(i == nelem(buf))
+ buf[--i] = '0';
+ buf[--i] = 'x';
+ buf[--i] = '0';
+ runtime·write(2, buf+i, nelem(buf)-i);
+}
+
+void
+runtime·printpointer(void *p)
+{
+ runtime·printhex((uint64)p);
+}
+
+void
+runtime·printstring(String v)
+{
+ extern uint32 runtime·maxstring;
+
+ if(v.len > runtime·maxstring) {
+ runtime·write(2, "[invalid string]", 16);
+ return;
+ }
+ if(v.len > 0)
+ runtime·write(2, v.str, v.len);
+}
+
+void
+runtime·printsp(void)
+{
+ runtime·write(2, " ", 1);
+}
+
+void
+runtime·printnl(void)
+{
+ runtime·write(2, "\n", 1);
+}
+
+void
+runtime·typestring(Eface e, String s)
+{
+ s = *e.type->string;
+ FLUSH(&s);
+}
+
diff --git a/src/pkg/runtime/proc.c b/src/pkg/runtime/proc.c
new file mode 100644
index 000000000..5f396b49f
--- /dev/null
+++ b/src/pkg/runtime/proc.c
@@ -0,0 +1,1568 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "runtime.h"
+#include "arch.h"
+#include "defs.h"
+#include "malloc.h"
+#include "os.h"
+#include "stack.h"
+
+bool runtime·iscgo;
+
+static void unwindstack(G*, byte*);
+static void schedule(G*);
+static void acquireproc(void);
+static void releaseproc(void);
+
+typedef struct Sched Sched;
+
+M runtime·m0;
+G runtime·g0; // idle goroutine for m0
+
+static int32 debug = 0;
+
+int32 runtime·gcwaiting;
+
+// Go scheduler
+//
+// The go scheduler's job is to match ready-to-run goroutines (`g's)
+// with waiting-for-work schedulers (`m's). If there are ready g's
+// and no waiting m's, ready() will start a new m running in a new
+// OS thread, so that all ready g's can run simultaneously, up to a limit.
+// For now, m's never go away.
+//
+// By default, Go keeps only one kernel thread (m) running user code
+// at a single time; other threads may be blocked in the operating system.
+// Setting the environment variable $GOMAXPROCS or calling
+// runtime.GOMAXPROCS() will change the number of user threads
+// allowed to execute simultaneously. $GOMAXPROCS is thus an
+// approximation of the maximum number of cores to use.
+//
+// Even a program that can run without deadlock in a single process
+// might use more m's if given the chance. For example, the prime
+// sieve will use as many m's as there are primes (up to runtime·sched.mmax),
+// allowing different stages of the pipeline to execute in parallel.
+// We could revisit this choice, only kicking off new m's for blocking
+// system calls, but that would limit the amount of parallel computation
+// that go would try to do.
+//
+// In general, one could imagine all sorts of refinements to the
+// scheduler, but the goal now is just to get something working on
+// Linux and OS X.
+
+struct Sched {
+ Lock;
+
+ G *gfree; // available g's (status == Gdead)
+ int32 goidgen;
+
+ G *ghead; // g's waiting to run
+ G *gtail;
+ int32 gwait; // number of g's waiting to run
+ int32 gcount; // number of g's that are alive
+ int32 grunning; // number of g's running on cpu or in syscall
+
+ M *mhead; // m's waiting for work
+ int32 mwait; // number of m's waiting for work
+ int32 mcount; // number of m's that have been created
+
+ volatile uint32 atomic; // atomic scheduling word (see below)
+
+ int32 predawn; // running initialization, don't run new g's.
+ int32 profilehz; // cpu profiling rate
+
+ Note stopped; // one g can set waitstop and wait here for m's to stop
+};
+
+// The atomic word in sched is an atomic uint32 that
+// holds these fields.
+//
+// [15 bits] mcpu number of m's executing on cpu
+// [15 bits] mcpumax max number of m's allowed on cpu
+// [1 bit] waitstop some g is waiting on stopped
+// [1 bit] gwaiting gwait != 0
+//
+// These fields are the information needed by entersyscall
+// and exitsyscall to decide whether to coordinate with the
+// scheduler. Packing them into a single machine word lets
+// them use a fast path with a single atomic read/write and
+// no lock/unlock. This greatly reduces contention in
+// syscall- or cgo-heavy multithreaded programs.
+//
+// Except for entersyscall and exitsyscall, the manipulations
+// to these fields only happen while holding the schedlock,
+// so the routines holding schedlock only need to worry about
+// what entersyscall and exitsyscall do, not the other routines
+// (which also use the schedlock).
+//
+// In particular, entersyscall and exitsyscall only read mcpumax,
+// waitstop, and gwaiting. They never write them. Thus, writes to those
+// fields can be done (holding schedlock) without fear of write conflicts.
+// There may still be logic conflicts: for example, the set of waitstop must
+// be conditioned on mcpu >= mcpumax or else the wait may be a
+// spurious sleep. The Promela model in proc.p verifies these accesses.
+enum {
+ mcpuWidth = 15,
+ mcpuMask = (1<<mcpuWidth) - 1,
+ mcpuShift = 0,
+ mcpumaxShift = mcpuShift + mcpuWidth,
+ waitstopShift = mcpumaxShift + mcpuWidth,
+ gwaitingShift = waitstopShift+1,
+
+ // The max value of GOMAXPROCS is constrained
+ // by the max value we can store in the bit fields
+ // of the atomic word. Reserve a few high values
+ // so that we can detect accidental decrement
+ // beyond zero.
+ maxgomaxprocs = mcpuMask - 10,
+};
+
+#define atomic_mcpu(v) (((v)>>mcpuShift)&mcpuMask)
+#define atomic_mcpumax(v) (((v)>>mcpumaxShift)&mcpuMask)
+#define atomic_waitstop(v) (((v)>>waitstopShift)&1)
+#define atomic_gwaiting(v) (((v)>>gwaitingShift)&1)
+
+Sched runtime·sched;
+int32 runtime·gomaxprocs;
+bool runtime·singleproc;
+
+// An m that is waiting for notewakeup(&m->havenextg). This may be
+// only be accessed while the scheduler lock is held. This is used to
+// minimize the number of times we call notewakeup while the scheduler
+// lock is held, since the m will normally move quickly to lock the
+// scheduler itself, producing lock contention.
+static M* mwakeup;
+
+// Scheduling helpers. Sched must be locked.
+static void gput(G*); // put/get on ghead/gtail
+static G* gget(void);
+static void mput(M*); // put/get on mhead
+static M* mget(G*);
+static void gfput(G*); // put/get on gfree
+static G* gfget(void);
+static void matchmg(void); // match m's to g's
+static void readylocked(G*); // ready, but sched is locked
+static void mnextg(M*, G*);
+static void mcommoninit(M*);
+
+void
+setmcpumax(uint32 n)
+{
+ uint32 v, w;
+
+ for(;;) {
+ v = runtime·sched.atomic;
+ w = v;
+ w &= ~(mcpuMask<<mcpumaxShift);
+ w |= n<<mcpumaxShift;
+ if(runtime·cas(&runtime·sched.atomic, v, w))
+ break;
+ }
+}
+
+// The bootstrap sequence is:
+//
+// call osinit
+// call schedinit
+// make & queue new G
+// call runtime·mstart
+//
+// The new G does:
+//
+// call main·init_function
+// call initdone
+// call main·main
+void
+runtime·schedinit(void)
+{
+ int32 n;
+ byte *p;
+
+ m->nomemprof++;
+ runtime·mallocinit();
+ mcommoninit(m);
+
+ runtime·goargs();
+ runtime·goenvs();
+
+ // For debugging:
+ // Allocate internal symbol table representation now,
+ // so that we don't need to call malloc when we crash.
+ // runtime·findfunc(0);
+
+ runtime·gomaxprocs = 1;
+ p = runtime·getenv("GOMAXPROCS");
+ if(p != nil && (n = runtime·atoi(p)) != 0) {
+ if(n > maxgomaxprocs)
+ n = maxgomaxprocs;
+ runtime·gomaxprocs = n;
+ }
+ setmcpumax(runtime·gomaxprocs);
+ runtime·singleproc = runtime·gomaxprocs == 1;
+ runtime·sched.predawn = 1;
+
+ m->nomemprof--;
+}
+
+// Lock the scheduler.
+static void
+schedlock(void)
+{
+ runtime·lock(&runtime·sched);
+}
+
+// Unlock the scheduler.
+static void
+schedunlock(void)
+{
+ M *m;
+
+ m = mwakeup;
+ mwakeup = nil;
+ runtime·unlock(&runtime·sched);
+ if(m != nil)
+ runtime·notewakeup(&m->havenextg);
+}
+
+// Called after main·init_function; main·main will be called on return.
+void
+runtime·initdone(void)
+{
+ // Let's go.
+ runtime·sched.predawn = 0;
+ mstats.enablegc = 1;
+
+ // If main·init_function started other goroutines,
+ // kick off new m's to handle them, like ready
+ // would have, had it not been pre-dawn.
+ schedlock();
+ matchmg();
+ schedunlock();
+}
+
+void
+runtime·goexit(void)
+{
+ g->status = Gmoribund;
+ runtime·gosched();
+}
+
+void
+runtime·tracebackothers(G *me)
+{
+ G *g;
+
+ for(g = runtime·allg; g != nil; g = g->alllink) {
+ if(g == me || g->status == Gdead)
+ continue;
+ runtime·printf("\ngoroutine %d [%d]:\n", g->goid, g->status);
+ runtime·traceback(g->sched.pc, g->sched.sp, 0, g);
+ }
+}
+
+// Mark this g as m's idle goroutine.
+// This functionality might be used in environments where programs
+// are limited to a single thread, to simulate a select-driven
+// network server. It is not exposed via the standard runtime API.
+void
+runtime·idlegoroutine(void)
+{
+ if(g->idlem != nil)
+ runtime·throw("g is already an idle goroutine");
+ g->idlem = m;
+}
+
+static void
+mcommoninit(M *m)
+{
+ // Add to runtime·allm so garbage collector doesn't free m
+ // when it is just in a register or thread-local storage.
+ m->alllink = runtime·allm;
+ // runtime·Cgocalls() iterates over allm w/o schedlock,
+ // so we need to publish it safely.
+ runtime·atomicstorep(&runtime·allm, m);
+
+ m->id = runtime·sched.mcount++;
+ m->fastrand = 0x49f6428aUL + m->id;
+ m->stackalloc = runtime·malloc(sizeof(*m->stackalloc));
+ runtime·FixAlloc_Init(m->stackalloc, FixedStack, runtime·SysAlloc, nil, nil);
+}
+
+// Try to increment mcpu. Report whether succeeded.
+static bool
+canaddmcpu(void)
+{
+ uint32 v;
+
+ for(;;) {
+ v = runtime·sched.atomic;
+ if(atomic_mcpu(v) >= atomic_mcpumax(v))
+ return 0;
+ if(runtime·cas(&runtime·sched.atomic, v, v+(1<<mcpuShift)))
+ return 1;
+ }
+}
+
+// Put on `g' queue. Sched must be locked.
+static void
+gput(G *g)
+{
+ M *m;
+
+ // If g is wired, hand it off directly.
+ if((m = g->lockedm) != nil && canaddmcpu()) {
+ mnextg(m, g);
+ return;
+ }
+
+ // If g is the idle goroutine for an m, hand it off.
+ if(g->idlem != nil) {
+ if(g->idlem->idleg != nil) {
+ runtime·printf("m%d idle out of sync: g%d g%d\n",
+ g->idlem->id,
+ g->idlem->idleg->goid, g->goid);
+ runtime·throw("runtime: double idle");
+ }
+ g->idlem->idleg = g;
+ return;
+ }
+
+ g->schedlink = nil;
+ if(runtime·sched.ghead == nil)
+ runtime·sched.ghead = g;
+ else
+ runtime·sched.gtail->schedlink = g;
+ runtime·sched.gtail = g;
+
+ // increment gwait.
+ // if it transitions to nonzero, set atomic gwaiting bit.
+ if(runtime·sched.gwait++ == 0)
+ runtime·xadd(&runtime·sched.atomic, 1<<gwaitingShift);
+}
+
+// Report whether gget would return something.
+static bool
+haveg(void)
+{
+ return runtime·sched.ghead != nil || m->idleg != nil;
+}
+
+// Get from `g' queue. Sched must be locked.
+static G*
+gget(void)
+{
+ G *g;
+
+ g = runtime·sched.ghead;
+ if(g){
+ runtime·sched.ghead = g->schedlink;
+ if(runtime·sched.ghead == nil)
+ runtime·sched.gtail = nil;
+ // decrement gwait.
+ // if it transitions to zero, clear atomic gwaiting bit.
+ if(--runtime·sched.gwait == 0)
+ runtime·xadd(&runtime·sched.atomic, -1<<gwaitingShift);
+ } else if(m->idleg != nil) {
+ g = m->idleg;
+ m->idleg = nil;
+ }
+ return g;
+}
+
+// Put on `m' list. Sched must be locked.
+static void
+mput(M *m)
+{
+ m->schedlink = runtime·sched.mhead;
+ runtime·sched.mhead = m;
+ runtime·sched.mwait++;
+}
+
+// Get an `m' to run `g'. Sched must be locked.
+static M*
+mget(G *g)
+{
+ M *m;
+
+ // if g has its own m, use it.
+ if((m = g->lockedm) != nil)
+ return m;
+
+ // otherwise use general m pool.
+ if((m = runtime·sched.mhead) != nil){
+ runtime·sched.mhead = m->schedlink;
+ runtime·sched.mwait--;
+ }
+ return m;
+}
+
+// Mark g ready to run.
+void
+runtime·ready(G *g)
+{
+ schedlock();
+ readylocked(g);
+ schedunlock();
+}
+
+// Mark g ready to run. Sched is already locked.
+// G might be running already and about to stop.
+// The sched lock protects g->status from changing underfoot.
+static void
+readylocked(G *g)
+{
+ if(g->m){
+ // Running on another machine.
+ // Ready it when it stops.
+ g->readyonstop = 1;
+ return;
+ }
+
+ // Mark runnable.
+ if(g->status == Grunnable || g->status == Grunning) {
+ runtime·printf("goroutine %d has status %d\n", g->goid, g->status);
+ runtime·throw("bad g->status in ready");
+ }
+ g->status = Grunnable;
+
+ gput(g);
+ if(!runtime·sched.predawn)
+ matchmg();
+}
+
+static void
+nop(void)
+{
+}
+
+// Same as readylocked but a different symbol so that
+// debuggers can set a breakpoint here and catch all
+// new goroutines.
+static void
+newprocreadylocked(G *g)
+{
+ nop(); // avoid inlining in 6l
+ readylocked(g);
+}
+
+// Pass g to m for running.
+// Caller has already incremented mcpu.
+static void
+mnextg(M *m, G *g)
+{
+ runtime·sched.grunning++;
+ m->nextg = g;
+ if(m->waitnextg) {
+ m->waitnextg = 0;
+ if(mwakeup != nil)
+ runtime·notewakeup(&mwakeup->havenextg);
+ mwakeup = m;
+ }
+}
+
+// Get the next goroutine that m should run.
+// Sched must be locked on entry, is unlocked on exit.
+// Makes sure that at most $GOMAXPROCS g's are
+// running on cpus (not in system calls) at any given time.
+static G*
+nextgandunlock(void)
+{
+ G *gp;
+ uint32 v;
+
+ if(atomic_mcpu(runtime·sched.atomic) >= maxgomaxprocs)
+ runtime·throw("negative mcpu");
+
+ // If there is a g waiting as m->nextg, the mcpu++
+ // happened before it was passed to mnextg.
+ if(m->nextg != nil) {
+ gp = m->nextg;
+ m->nextg = nil;
+ schedunlock();
+ return gp;
+ }
+
+ if(m->lockedg != nil) {
+ // We can only run one g, and it's not available.
+ // Make sure some other cpu is running to handle
+ // the ordinary run queue.
+ if(runtime·sched.gwait != 0) {
+ matchmg();
+ // m->lockedg might have been on the queue.
+ if(m->nextg != nil) {
+ gp = m->nextg;
+ m->nextg = nil;
+ schedunlock();
+ return gp;
+ }
+ }
+ } else {
+ // Look for work on global queue.
+ while(haveg() && canaddmcpu()) {
+ gp = gget();
+ if(gp == nil)
+ runtime·throw("gget inconsistency");
+
+ if(gp->lockedm) {
+ mnextg(gp->lockedm, gp);
+ continue;
+ }
+ runtime·sched.grunning++;
+ schedunlock();
+ return gp;
+ }
+
+ // The while loop ended either because the g queue is empty
+ // or because we have maxed out our m procs running go
+ // code (mcpu >= mcpumax). We need to check that
+ // concurrent actions by entersyscall/exitsyscall cannot
+ // invalidate the decision to end the loop.
+ //
+ // We hold the sched lock, so no one else is manipulating the
+ // g queue or changing mcpumax. Entersyscall can decrement
+ // mcpu, but if does so when there is something on the g queue,
+ // the gwait bit will be set, so entersyscall will take the slow path
+ // and use the sched lock. So it cannot invalidate our decision.
+ //
+ // Wait on global m queue.
+ mput(m);
+ }
+
+ v = runtime·atomicload(&runtime·sched.atomic);
+ if(runtime·sched.grunning == 0)
+ runtime·throw("all goroutines are asleep - deadlock!");
+ m->nextg = nil;
+ m->waitnextg = 1;
+ runtime·noteclear(&m->havenextg);
+
+ // Stoptheworld is waiting for all but its cpu to go to stop.
+ // Entersyscall might have decremented mcpu too, but if so
+ // it will see the waitstop and take the slow path.
+ // Exitsyscall never increments mcpu beyond mcpumax.
+ if(atomic_waitstop(v) && atomic_mcpu(v) <= atomic_mcpumax(v)) {
+ // set waitstop = 0 (known to be 1)
+ runtime·xadd(&runtime·sched.atomic, -1<<waitstopShift);
+ runtime·notewakeup(&runtime·sched.stopped);
+ }
+ schedunlock();
+
+ runtime·notesleep(&m->havenextg);
+ if((gp = m->nextg) == nil)
+ runtime·throw("bad m->nextg in nextgoroutine");
+ m->nextg = nil;
+ return gp;
+}
+
+void
+runtime·stoptheworld(void)
+{
+ uint32 v;
+
+ schedlock();
+ runtime·gcwaiting = 1;
+
+ setmcpumax(1);
+
+ // while mcpu > 1
+ for(;;) {
+ v = runtime·sched.atomic;
+ if(atomic_mcpu(v) <= 1)
+ break;
+
+ // It would be unsafe for multiple threads to be using
+ // the stopped note at once, but there is only
+ // ever one thread doing garbage collection.
+ runtime·noteclear(&runtime·sched.stopped);
+ if(atomic_waitstop(v))
+ runtime·throw("invalid waitstop");
+
+ // atomic { waitstop = 1 }, predicated on mcpu <= 1 check above
+ // still being true.
+ if(!runtime·cas(&runtime·sched.atomic, v, v+(1<<waitstopShift)))
+ continue;
+
+ schedunlock();
+ runtime·notesleep(&runtime·sched.stopped);
+ schedlock();
+ }
+ runtime·singleproc = runtime·gomaxprocs == 1;
+ schedunlock();
+}
+
+// TODO(rsc): Remove. This is only temporary,
+// for the mark and sweep collector.
+void
+runtime·starttheworld(void)
+{
+ schedlock();
+ runtime·gcwaiting = 0;
+ setmcpumax(runtime·gomaxprocs);
+ matchmg();
+ schedunlock();
+}
+
+// Called to start an M.
+void
+runtime·mstart(void)
+{
+ if(g != m->g0)
+ runtime·throw("bad runtime·mstart");
+ if(m->mcache == nil)
+ m->mcache = runtime·allocmcache();
+
+ // Record top of stack for use by mcall.
+ // Once we call schedule we're never coming back,
+ // so other calls can reuse this stack space.
+ runtime·gosave(&m->g0->sched);
+ m->g0->sched.pc = (void*)-1; // make sure it is never used
+
+ runtime·minit();
+ schedule(nil);
+}
+
+// When running with cgo, we call libcgo_thread_start
+// to start threads for us so that we can play nicely with
+// foreign code.
+void (*libcgo_thread_start)(void*);
+
+typedef struct CgoThreadStart CgoThreadStart;
+struct CgoThreadStart
+{
+ M *m;
+ G *g;
+ void (*fn)(void);
+};
+
+// Kick off new m's as needed (up to mcpumax).
+// There are already `other' other cpus that will
+// start looking for goroutines shortly.
+// Sched is locked.
+static void
+matchmg(void)
+{
+ G *g;
+
+ if(m->mallocing || m->gcing)
+ return;
+
+ while(haveg() && canaddmcpu()) {
+ g = gget();
+ if(g == nil)
+ runtime·throw("gget inconsistency");
+
+ // Find the m that will run g.
+ M *m;
+ if((m = mget(g)) == nil){
+ m = runtime·malloc(sizeof(M));
+ mcommoninit(m);
+
+ if(runtime·iscgo) {
+ CgoThreadStart ts;
+
+ if(libcgo_thread_start == nil)
+ runtime·throw("libcgo_thread_start missing");
+ // pthread_create will make us a stack.
+ m->g0 = runtime·malg(-1);
+ ts.m = m;
+ ts.g = m->g0;
+ ts.fn = runtime·mstart;
+ runtime·asmcgocall(libcgo_thread_start, &ts);
+ } else {
+ if(Windows)
+ // windows will layout sched stack on os stack
+ m->g0 = runtime·malg(-1);
+ else
+ m->g0 = runtime·malg(8192);
+ runtime·newosproc(m, m->g0, m->g0->stackbase, runtime·mstart);
+ }
+ }
+ mnextg(m, g);
+ }
+}
+
+// One round of scheduler: find a goroutine and run it.
+// The argument is the goroutine that was running before
+// schedule was called, or nil if this is the first call.
+// Never returns.
+static void
+schedule(G *gp)
+{
+ int32 hz;
+ uint32 v;
+
+ schedlock();
+ if(gp != nil) {
+ if(runtime·sched.predawn)
+ runtime·throw("init rescheduling");
+
+ // Just finished running gp.
+ gp->m = nil;
+ runtime·sched.grunning--;
+
+ // atomic { mcpu-- }
+ v = runtime·xadd(&runtime·sched.atomic, -1<<mcpuShift);
+ if(atomic_mcpu(v) > maxgomaxprocs)
+ runtime·throw("negative mcpu in scheduler");
+
+ switch(gp->status){
+ case Grunnable:
+ case Gdead:
+ // Shouldn't have been running!
+ runtime·throw("bad gp->status in sched");
+ case Grunning:
+ gp->status = Grunnable;
+ gput(gp);
+ break;
+ case Gmoribund:
+ gp->status = Gdead;
+ if(gp->lockedm) {
+ gp->lockedm = nil;
+ m->lockedg = nil;
+ }
+ gp->idlem = nil;
+ unwindstack(gp, nil);
+ gfput(gp);
+ if(--runtime·sched.gcount == 0)
+ runtime·exit(0);
+ break;
+ }
+ if(gp->readyonstop){
+ gp->readyonstop = 0;
+ readylocked(gp);
+ }
+ }
+
+ // Find (or wait for) g to run. Unlocks runtime·sched.
+ gp = nextgandunlock();
+ gp->readyonstop = 0;
+ gp->status = Grunning;
+ m->curg = gp;
+ gp->m = m;
+
+ // Check whether the profiler needs to be turned on or off.
+ hz = runtime·sched.profilehz;
+ if(m->profilehz != hz)
+ runtime·resetcpuprofiler(hz);
+
+ if(gp->sched.pc == (byte*)runtime·goexit) { // kickoff
+ runtime·gogocall(&gp->sched, (void(*)(void))gp->entry);
+ }
+ runtime·gogo(&gp->sched, 0);
+}
+
+// Enter scheduler. If g->status is Grunning,
+// re-queues g and runs everyone else who is waiting
+// before running g again. If g->status is Gmoribund,
+// kills off g.
+// Cannot split stack because it is called from exitsyscall.
+// See comment below.
+#pragma textflag 7
+void
+runtime·gosched(void)
+{
+ if(m->locks != 0)
+ runtime·throw("gosched holding locks");
+ if(g == m->g0)
+ runtime·throw("gosched of g0");
+ runtime·mcall(schedule);
+}
+
+// The goroutine g is about to enter a system call.
+// Record that it's not using the cpu anymore.
+// This is called only from the go syscall library and cgocall,
+// not from the low-level system calls used by the runtime.
+//
+// Entersyscall cannot split the stack: the runtime·gosave must
+// make g->sched refer to the caller's stack segment, because
+// entersyscall is going to return immediately after.
+// It's okay to call matchmg and notewakeup even after
+// decrementing mcpu, because we haven't released the
+// sched lock yet, so the garbage collector cannot be running.
+#pragma textflag 7
+void
+runtime·entersyscall(void)
+{
+ uint32 v;
+
+ if(runtime·sched.predawn)
+ return;
+
+ // Leave SP around for gc and traceback.
+ runtime·gosave(&g->sched);
+ g->gcsp = g->sched.sp;
+ g->gcstack = g->stackbase;
+ g->gcguard = g->stackguard;
+ g->status = Gsyscall;
+ if(g->gcsp < g->gcguard-StackGuard || g->gcstack < g->gcsp) {
+ // runtime·printf("entersyscall inconsistent %p [%p,%p]\n",
+ // g->gcsp, g->gcguard-StackGuard, g->gcstack);
+ runtime·throw("entersyscall");
+ }
+
+ // Fast path.
+ // The slow path inside the schedlock/schedunlock will get
+ // through without stopping if it does:
+ // mcpu--
+ // gwait not true
+ // waitstop && mcpu <= mcpumax not true
+ // If we can do the same with a single atomic add,
+ // then we can skip the locks.
+ v = runtime·xadd(&runtime·sched.atomic, -1<<mcpuShift);
+ if(!atomic_gwaiting(v) && (!atomic_waitstop(v) || atomic_mcpu(v) > atomic_mcpumax(v)))
+ return;
+
+ schedlock();
+ v = runtime·atomicload(&runtime·sched.atomic);
+ if(atomic_gwaiting(v)) {
+ matchmg();
+ v = runtime·atomicload(&runtime·sched.atomic);
+ }
+ if(atomic_waitstop(v) && atomic_mcpu(v) <= atomic_mcpumax(v)) {
+ runtime·xadd(&runtime·sched.atomic, -1<<waitstopShift);
+ runtime·notewakeup(&runtime·sched.stopped);
+ }
+
+ // Re-save sched in case one of the calls
+ // (notewakeup, matchmg) triggered something using it.
+ runtime·gosave(&g->sched);
+
+ schedunlock();
+}
+
+// The goroutine g exited its system call.
+// Arrange for it to run on a cpu again.
+// This is called only from the go syscall library, not
+// from the low-level system calls used by the runtime.
+void
+runtime·exitsyscall(void)
+{
+ uint32 v;
+
+ if(runtime·sched.predawn)
+ return;
+
+ // Fast path.
+ // If we can do the mcpu++ bookkeeping and
+ // find that we still have mcpu <= mcpumax, then we can
+ // start executing Go code immediately, without having to
+ // schedlock/schedunlock.
+ v = runtime·xadd(&runtime·sched.atomic, (1<<mcpuShift));
+ if(m->profilehz == runtime·sched.profilehz && atomic_mcpu(v) <= atomic_mcpumax(v)) {
+ // There's a cpu for us, so we can run.
+ g->status = Grunning;
+ // Garbage collector isn't running (since we are),
+ // so okay to clear gcstack.
+ g->gcstack = nil;
+ return;
+ }
+
+ // Tell scheduler to put g back on the run queue:
+ // mostly equivalent to g->status = Grunning,
+ // but keeps the garbage collector from thinking
+ // that g is running right now, which it's not.
+ g->readyonstop = 1;
+
+ // All the cpus are taken.
+ // The scheduler will ready g and put this m to sleep.
+ // When the scheduler takes g away from m,
+ // it will undo the runtime·sched.mcpu++ above.
+ runtime·gosched();
+
+ // Gosched returned, so we're allowed to run now.
+ // Delete the gcstack information that we left for
+ // the garbage collector during the system call.
+ // Must wait until now because until gosched returns
+ // we don't know for sure that the garbage collector
+ // is not running.
+ g->gcstack = nil;
+}
+
+void
+runtime·oldstack(void)
+{
+ Stktop *top, old;
+ uint32 argsize;
+ byte *sp;
+ G *g1;
+ int32 goid;
+
+//printf("oldstack m->cret=%p\n", m->cret);
+
+ g1 = m->curg;
+ top = (Stktop*)g1->stackbase;
+ sp = (byte*)top;
+ old = *top;
+ argsize = old.argsize;
+ if(argsize > 0) {
+ sp -= argsize;
+ runtime·memmove(top->argp, sp, argsize);
+ }
+ goid = old.gobuf.g->goid; // fault if g is bad, before gogo
+ USED(goid);
+
+ if(old.free != 0)
+ runtime·stackfree(g1->stackguard - StackGuard, old.free);
+ g1->stackbase = old.stackbase;
+ g1->stackguard = old.stackguard;
+
+ runtime·gogo(&old.gobuf, m->cret);
+}
+
+void
+runtime·newstack(void)
+{
+ int32 framesize, argsize;
+ Stktop *top;
+ byte *stk, *sp;
+ G *g1;
+ Gobuf label;
+ bool reflectcall;
+ uintptr free;
+
+ framesize = m->moreframesize;
+ argsize = m->moreargsize;
+ g1 = m->curg;
+
+ if(m->morebuf.sp < g1->stackguard - StackGuard) {
+ runtime·printf("runtime: split stack overflow: %p < %p\n", m->morebuf.sp, g1->stackguard - StackGuard);
+ runtime·throw("runtime: split stack overflow");
+ }
+ if(argsize % sizeof(uintptr) != 0) {
+ runtime·printf("runtime: stack split with misaligned argsize %d\n", argsize);
+ runtime·throw("runtime: stack split argsize");
+ }
+
+ reflectcall = framesize==1;
+ if(reflectcall)
+ framesize = 0;
+
+ if(reflectcall && m->morebuf.sp - sizeof(Stktop) - argsize - 32 > g1->stackguard) {
+ // special case: called from reflect.call (framesize==1)
+ // to call code with an arbitrary argument size,
+ // and we have enough space on the current stack.
+ // the new Stktop* is necessary to unwind, but
+ // we don't need to create a new segment.
+ top = (Stktop*)(m->morebuf.sp - sizeof(*top));
+ stk = g1->stackguard - StackGuard;
+ free = 0;
+ } else {
+ // allocate new segment.
+ framesize += argsize;
+ framesize += StackExtra; // room for more functions, Stktop.
+ if(framesize < StackMin)
+ framesize = StackMin;
+ framesize += StackSystem;
+ stk = runtime·stackalloc(framesize);
+ top = (Stktop*)(stk+framesize-sizeof(*top));
+ free = framesize;
+ }
+
+//runtime·printf("newstack framesize=%d argsize=%d morepc=%p moreargp=%p gobuf=%p, %p top=%p old=%p\n",
+//framesize, argsize, m->morepc, m->moreargp, m->morebuf.pc, m->morebuf.sp, top, g1->stackbase);
+
+ top->stackbase = g1->stackbase;
+ top->stackguard = g1->stackguard;
+ top->gobuf = m->morebuf;
+ top->argp = m->moreargp;
+ top->argsize = argsize;
+ top->free = free;
+
+ // copy flag from panic
+ top->panic = g1->ispanic;
+ g1->ispanic = false;
+
+ g1->stackbase = (byte*)top;
+ g1->stackguard = stk + StackGuard;
+
+ sp = (byte*)top;
+ if(argsize > 0) {
+ sp -= argsize;
+ runtime·memmove(sp, m->moreargp, argsize);
+ }
+ if(thechar == '5') {
+ // caller would have saved its LR below args.
+ sp -= sizeof(void*);
+ *(void**)sp = nil;
+ }
+
+ // Continue as if lessstack had just called m->morepc
+ // (the PC that decided to grow the stack).
+ label.sp = sp;
+ label.pc = (byte*)runtime·lessstack;
+ label.g = m->curg;
+ runtime·gogocall(&label, m->morepc);
+
+ *(int32*)345 = 123; // never return
+}
+
+static void
+mstackalloc(G *gp)
+{
+ gp->param = runtime·stackalloc((uintptr)gp->param);
+ runtime·gogo(&gp->sched, 0);
+}
+
+G*
+runtime·malg(int32 stacksize)
+{
+ G *newg;
+ byte *stk;
+
+ newg = runtime·malloc(sizeof(G));
+ if(stacksize >= 0) {
+ if(g == m->g0) {
+ // running on scheduler stack already.
+ stk = runtime·stackalloc(StackSystem + stacksize);
+ } else {
+ // have to call stackalloc on scheduler stack.
+ g->param = (void*)(StackSystem + stacksize);
+ runtime·mcall(mstackalloc);
+ stk = g->param;
+ g->param = nil;
+ }
+ newg->stack0 = stk;
+ newg->stackguard = stk + StackGuard;
+ newg->stackbase = stk + StackSystem + stacksize - sizeof(Stktop);
+ runtime·memclr(newg->stackbase, sizeof(Stktop));
+ }
+ return newg;
+}
+
+/*
+ * Newproc and deferproc need to be textflag 7
+ * (no possible stack split when nearing overflow)
+ * because they assume that the arguments to fn
+ * are available sequentially beginning at &arg0.
+ * If a stack split happened, only the one word
+ * arg0 would be copied. It's okay if any functions
+ * they call split the stack below the newproc frame.
+ */
+#pragma textflag 7
+void
+runtime·newproc(int32 siz, byte* fn, ...)
+{
+ byte *argp;
+
+ if(thechar == '5')
+ argp = (byte*)(&fn+2); // skip caller's saved LR
+ else
+ argp = (byte*)(&fn+1);
+ runtime·newproc1(fn, argp, siz, 0, runtime·getcallerpc(&siz));
+}
+
+G*
+runtime·newproc1(byte *fn, byte *argp, int32 narg, int32 nret, void *callerpc)
+{
+ byte *sp;
+ G *newg;
+ int32 siz;
+
+//printf("newproc1 %p %p narg=%d nret=%d\n", fn, argp, narg, nret);
+ siz = narg + nret;
+ siz = (siz+7) & ~7;
+
+ // We could instead create a secondary stack frame
+ // and make it look like goexit was on the original but
+ // the call to the actual goroutine function was split.
+ // Not worth it: this is almost always an error.
+ if(siz > StackMin - 1024)
+ runtime·throw("runtime.newproc: function arguments too large for new goroutine");
+
+ schedlock();
+
+ if((newg = gfget()) != nil){
+ newg->status = Gwaiting;
+ if(newg->stackguard - StackGuard != newg->stack0)
+ runtime·throw("invalid stack in newg");
+ } else {
+ newg = runtime·malg(StackMin);
+ newg->status = Gwaiting;
+ newg->alllink = runtime·allg;
+ runtime·allg = newg;
+ }
+
+ sp = newg->stackbase;
+ sp -= siz;
+ runtime·memmove(sp, argp, narg);
+ if(thechar == '5') {
+ // caller's LR
+ sp -= sizeof(void*);
+ *(void**)sp = nil;
+ }
+
+ newg->sched.sp = sp;
+ newg->sched.pc = (byte*)runtime·goexit;
+ newg->sched.g = newg;
+ newg->entry = fn;
+ newg->gopc = (uintptr)callerpc;
+
+ runtime·sched.gcount++;
+ runtime·sched.goidgen++;
+ newg->goid = runtime·sched.goidgen;
+
+ newprocreadylocked(newg);
+ schedunlock();
+
+ return newg;
+//printf(" goid=%d\n", newg->goid);
+}
+
+#pragma textflag 7
+uintptr
+runtime·deferproc(int32 siz, byte* fn, ...)
+{
+ Defer *d;
+
+ d = runtime·malloc(sizeof(*d) + siz - sizeof(d->args));
+ d->fn = fn;
+ d->siz = siz;
+ d->pc = runtime·getcallerpc(&siz);
+ if(thechar == '5')
+ d->argp = (byte*)(&fn+2); // skip caller's saved link register
+ else
+ d->argp = (byte*)(&fn+1);
+ runtime·memmove(d->args, d->argp, d->siz);
+
+ d->link = g->defer;
+ g->defer = d;
+
+ // deferproc returns 0 normally.
+ // a deferred func that stops a panic
+ // makes the deferproc return 1.
+ // the code the compiler generates always
+ // checks the return value and jumps to the
+ // end of the function if deferproc returns != 0.
+ return 0;
+}
+
+#pragma textflag 7
+void
+runtime·deferreturn(uintptr arg0)
+{
+ Defer *d;
+ byte *argp, *fn;
+
+ d = g->defer;
+ if(d == nil)
+ return;
+ argp = (byte*)&arg0;
+ if(d->argp != argp)
+ return;
+ runtime·memmove(argp, d->args, d->siz);
+ g->defer = d->link;
+ fn = d->fn;
+ runtime·free(d);
+ runtime·jmpdefer(fn, argp);
+}
+
+static void
+rundefer(void)
+{
+ Defer *d;
+
+ while((d = g->defer) != nil) {
+ g->defer = d->link;
+ reflect·call(d->fn, d->args, d->siz);
+ runtime·free(d);
+ }
+}
+
+// Free stack frames until we hit the last one
+// or until we find the one that contains the argp.
+static void
+unwindstack(G *gp, byte *sp)
+{
+ Stktop *top;
+ byte *stk;
+
+ // Must be called from a different goroutine, usually m->g0.
+ if(g == gp)
+ runtime·throw("unwindstack on self");
+
+ while((top = (Stktop*)gp->stackbase) != nil && top->stackbase != nil) {
+ stk = gp->stackguard - StackGuard;
+ if(stk <= sp && sp < gp->stackbase)
+ break;
+ gp->stackbase = top->stackbase;
+ gp->stackguard = top->stackguard;
+ if(top->free != 0)
+ runtime·stackfree(stk, top->free);
+ }
+
+ if(sp != nil && (sp < gp->stackguard - StackGuard || gp->stackbase < sp)) {
+ runtime·printf("recover: %p not in [%p, %p]\n", sp, gp->stackguard - StackGuard, gp->stackbase);
+ runtime·throw("bad unwindstack");
+ }
+}
+
+static void
+printpanics(Panic *p)
+{
+ if(p->link) {
+ printpanics(p->link);
+ runtime·printf("\t");
+ }
+ runtime·printf("panic: ");
+ runtime·printany(p->arg);
+ if(p->recovered)
+ runtime·printf(" [recovered]");
+ runtime·printf("\n");
+}
+
+static void recovery(G*);
+
+void
+runtime·panic(Eface e)
+{
+ Defer *d;
+ Panic *p;
+
+ p = runtime·mal(sizeof *p);
+ p->arg = e;
+ p->link = g->panic;
+ p->stackbase = g->stackbase;
+ g->panic = p;
+
+ for(;;) {
+ d = g->defer;
+ if(d == nil)
+ break;
+ // take defer off list in case of recursive panic
+ g->defer = d->link;
+ g->ispanic = true; // rock for newstack, where reflect.call ends up
+ reflect·call(d->fn, d->args, d->siz);
+ if(p->recovered) {
+ g->panic = p->link;
+ if(g->panic == nil) // must be done with signal
+ g->sig = 0;
+ runtime·free(p);
+ // put recovering defer back on list
+ // for scheduler to find.
+ d->link = g->defer;
+ g->defer = d;
+ runtime·mcall(recovery);
+ runtime·throw("recovery failed"); // mcall should not return
+ }
+ runtime·free(d);
+ }
+
+ // ran out of deferred calls - old-school panic now
+ runtime·startpanic();
+ printpanics(g->panic);
+ runtime·dopanic(0);
+}
+
+static void
+recovery(G *gp)
+{
+ Defer *d;
+
+ // Rewind gp's stack; we're running on m->g0's stack.
+ d = gp->defer;
+ gp->defer = d->link;
+
+ // Unwind to the stack frame with d's arguments in it.
+ unwindstack(gp, d->argp);
+
+ // Make the deferproc for this d return again,
+ // this time returning 1. The calling function will
+ // jump to the standard return epilogue.
+ // The -2*sizeof(uintptr) makes up for the
+ // two extra words that are on the stack at
+ // each call to deferproc.
+ // (The pc we're returning to does pop pop
+ // before it tests the return value.)
+ // On the arm there are 2 saved LRs mixed in too.
+ if(thechar == '5')
+ gp->sched.sp = (byte*)d->argp - 4*sizeof(uintptr);
+ else
+ gp->sched.sp = (byte*)d->argp - 2*sizeof(uintptr);
+ gp->sched.pc = d->pc;
+ runtime·free(d);
+ runtime·gogo(&gp->sched, 1);
+}
+
+#pragma textflag 7 /* no split, or else g->stackguard is not the stack for fp */
+void
+runtime·recover(byte *argp, Eface ret)
+{
+ Stktop *top, *oldtop;
+ Panic *p;
+
+ // Must be a panic going on.
+ if((p = g->panic) == nil || p->recovered)
+ goto nomatch;
+
+ // Frame must be at the top of the stack segment,
+ // because each deferred call starts a new stack
+ // segment as a side effect of using reflect.call.
+ // (There has to be some way to remember the
+ // variable argument frame size, and the segment
+ // code already takes care of that for us, so we
+ // reuse it.)
+ //
+ // As usual closures complicate things: the fp that
+ // the closure implementation function claims to have
+ // is where the explicit arguments start, after the
+ // implicit pointer arguments and PC slot.
+ // If we're on the first new segment for a closure,
+ // then fp == top - top->args is correct, but if
+ // the closure has its own big argument frame and
+ // allocated a second segment (see below),
+ // the fp is slightly above top - top->args.
+ // That condition can't happen normally though
+ // (stack pointers go down, not up), so we can accept
+ // any fp between top and top - top->args as
+ // indicating the top of the segment.
+ top = (Stktop*)g->stackbase;
+ if(argp < (byte*)top - top->argsize || (byte*)top < argp)
+ goto nomatch;
+
+ // The deferred call makes a new segment big enough
+ // for the argument frame but not necessarily big
+ // enough for the function's local frame (size unknown
+ // at the time of the call), so the function might have
+ // made its own segment immediately. If that's the
+ // case, back top up to the older one, the one that
+ // reflect.call would have made for the panic.
+ //
+ // The fp comparison here checks that the argument
+ // frame that was copied during the split (the top->args
+ // bytes above top->fp) abuts the old top of stack.
+ // This is a correct test for both closure and non-closure code.
+ oldtop = (Stktop*)top->stackbase;
+ if(oldtop != nil && top->argp == (byte*)oldtop - top->argsize)
+ top = oldtop;
+
+ // Now we have the segment that was created to
+ // run this call. It must have been marked as a panic segment.
+ if(!top->panic)
+ goto nomatch;
+
+ // Okay, this is the top frame of a deferred call
+ // in response to a panic. It can see the panic argument.
+ p->recovered = 1;
+ ret = p->arg;
+ FLUSH(&ret);
+ return;
+
+nomatch:
+ ret.type = nil;
+ ret.data = nil;
+ FLUSH(&ret);
+}
+
+
+// Put on gfree list. Sched must be locked.
+static void
+gfput(G *g)
+{
+ if(g->stackguard - StackGuard != g->stack0)
+ runtime·throw("invalid stack in gfput");
+ g->schedlink = runtime·sched.gfree;
+ runtime·sched.gfree = g;
+}
+
+// Get from gfree list. Sched must be locked.
+static G*
+gfget(void)
+{
+ G *g;
+
+ g = runtime·sched.gfree;
+ if(g)
+ runtime·sched.gfree = g->schedlink;
+ return g;
+}
+
+void
+runtime·Breakpoint(void)
+{
+ runtime·breakpoint();
+}
+
+void
+runtime·Goexit(void)
+{
+ rundefer();
+ runtime·goexit();
+}
+
+void
+runtime·Gosched(void)
+{
+ runtime·gosched();
+}
+
+void
+runtime·LockOSThread(void)
+{
+ if(runtime·sched.predawn)
+ runtime·throw("cannot wire during init");
+ m->lockedg = g;
+ g->lockedm = m;
+}
+
+// delete when scheduler is stronger
+int32
+runtime·gomaxprocsfunc(int32 n)
+{
+ int32 ret;
+ uint32 v;
+
+ schedlock();
+ ret = runtime·gomaxprocs;
+ if(n <= 0)
+ n = ret;
+ if(n > maxgomaxprocs)
+ n = maxgomaxprocs;
+ runtime·gomaxprocs = n;
+ if(runtime·gomaxprocs > 1)
+ runtime·singleproc = false;
+ if(runtime·gcwaiting != 0) {
+ if(atomic_mcpumax(runtime·sched.atomic) != 1)
+ runtime·throw("invalid mcpumax during gc");
+ schedunlock();
+ return ret;
+ }
+
+ setmcpumax(n);
+
+ // If there are now fewer allowed procs
+ // than procs running, stop.
+ v = runtime·atomicload(&runtime·sched.atomic);
+ if(atomic_mcpu(v) > n) {
+ schedunlock();
+ runtime·gosched();
+ return ret;
+ }
+ // handle more procs
+ matchmg();
+ schedunlock();
+ return ret;
+}
+
+void
+runtime·UnlockOSThread(void)
+{
+ m->lockedg = nil;
+ g->lockedm = nil;
+}
+
+bool
+runtime·lockedOSThread(void)
+{
+ return g->lockedm != nil && m->lockedg != nil;
+}
+
+// for testing of wire, unwire
+void
+runtime·mid(uint32 ret)
+{
+ ret = m->id;
+ FLUSH(&ret);
+}
+
+void
+runtime·Goroutines(int32 ret)
+{
+ ret = runtime·sched.gcount;
+ FLUSH(&ret);
+}
+
+int32
+runtime·mcount(void)
+{
+ return runtime·sched.mcount;
+}
+
+void
+runtime·badmcall(void) // called from assembly
+{
+ runtime·throw("runtime: mcall called on m->g0 stack");
+}
+
+void
+runtime·badmcall2(void) // called from assembly
+{
+ runtime·throw("runtime: mcall function returned");
+}
+
+static struct {
+ Lock;
+ void (*fn)(uintptr*, int32);
+ int32 hz;
+ uintptr pcbuf[100];
+} prof;
+
+void
+runtime·sigprof(uint8 *pc, uint8 *sp, uint8 *lr, G *gp)
+{
+ int32 n;
+
+ if(prof.fn == nil || prof.hz == 0)
+ return;
+
+ runtime·lock(&prof);
+ if(prof.fn == nil) {
+ runtime·unlock(&prof);
+ return;
+ }
+ n = runtime·gentraceback(pc, sp, lr, gp, 0, prof.pcbuf, nelem(prof.pcbuf));
+ if(n > 0)
+ prof.fn(prof.pcbuf, n);
+ runtime·unlock(&prof);
+}
+
+void
+runtime·setcpuprofilerate(void (*fn)(uintptr*, int32), int32 hz)
+{
+ // Force sane arguments.
+ if(hz < 0)
+ hz = 0;
+ if(hz == 0)
+ fn = nil;
+ if(fn == nil)
+ hz = 0;
+
+ // Stop profiler on this cpu so that it is safe to lock prof.
+ // if a profiling signal came in while we had prof locked,
+ // it would deadlock.
+ runtime·resetcpuprofiler(0);
+
+ runtime·lock(&prof);
+ prof.fn = fn;
+ prof.hz = hz;
+ runtime·unlock(&prof);
+ runtime·lock(&runtime·sched);
+ runtime·sched.profilehz = hz;
+ runtime·unlock(&runtime·sched);
+
+ if(hz != 0)
+ runtime·resetcpuprofiler(hz);
+}
+
+void (*libcgo_setenv)(byte**);
+
+void
+os·setenv_c(String k, String v)
+{
+ byte *arg[2];
+
+ if(libcgo_setenv == nil)
+ return;
+
+ arg[0] = runtime·malloc(k.len + 1);
+ runtime·memmove(arg[0], k.str, k.len);
+ arg[0][k.len] = 0;
+
+ arg[1] = runtime·malloc(v.len + 1);
+ runtime·memmove(arg[1], v.str, v.len);
+ arg[1][v.len] = 0;
+
+ runtime·asmcgocall(libcgo_setenv, arg);
+ runtime·free(arg[0]);
+ runtime·free(arg[1]);
+}
diff --git a/src/pkg/runtime/proc.p b/src/pkg/runtime/proc.p
new file mode 100644
index 000000000..f0b46de61
--- /dev/null
+++ b/src/pkg/runtime/proc.p
@@ -0,0 +1,526 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+model for proc.c as of 2011/07/22.
+
+takes 4900 seconds to explore 1189070 states
+with G=3, var_gomaxprocs=1
+on a Core i7 L640 2.13 GHz Lenovo X201s.
+
+rm -f proc.p.trail pan.* pan
+spin -a proc.p
+gcc -DSAFETY -DREACH -DMEMLIM'='4000 -o pan pan.c
+pan -w28 -n -i -m500000
+test -f proc.p.trail && pan -r proc.p.trail
+*/
+
+/*
+ * scheduling parameters
+ */
+
+/*
+ * the number of goroutines G doubles as the maximum
+ * number of OS threads; the max is reachable when all
+ * the goroutines are blocked in system calls.
+ */
+#define G 3
+
+/*
+ * whether to allow gomaxprocs to vary during execution.
+ * enabling this checks the scheduler even when code is
+ * calling GOMAXPROCS, but it also slows down the verification
+ * by about 10x.
+ */
+#define var_gomaxprocs 1 /* allow gomaxprocs to vary */
+
+/* gomaxprocs */
+#if var_gomaxprocs
+byte gomaxprocs = 3;
+#else
+#define gomaxprocs 3
+#endif
+
+/* queue of waiting M's: sched_mhead[:mwait] */
+byte mwait;
+byte sched_mhead[G];
+
+/* garbage collector state */
+bit gc_lock, gcwaiting;
+
+/* goroutines sleeping, waiting to run */
+byte gsleep, gwait;
+
+/* scheduler state */
+bit sched_lock;
+bit sched_stopped;
+bit atomic_gwaiting, atomic_waitstop;
+byte atomic_mcpu, atomic_mcpumax;
+
+/* M struct fields - state for handing off g to m. */
+bit m_waitnextg[G];
+bit m_havenextg[G];
+bit m_nextg[G];
+
+/*
+ * opt_atomic/opt_dstep mark atomic/deterministics
+ * sequences that are marked only for reasons of
+ * optimization, not for correctness of the algorithms.
+ *
+ * in general any code that runs while holding the
+ * schedlock and does not refer to or modify the atomic_*
+ * fields can be marked atomic/dstep without affecting
+ * the usefulness of the model. since we trust the lock
+ * implementation, what we really want to test is the
+ * interleaving of the atomic fast paths with entersyscall
+ * and exitsyscall.
+ */
+#define opt_atomic atomic
+#define opt_dstep d_step
+
+/* locks */
+inline lock(x) {
+ d_step { x == 0; x = 1 }
+}
+
+inline unlock(x) {
+ d_step { assert x == 1; x = 0 }
+}
+
+/* notes */
+inline noteclear(x) {
+ x = 0
+}
+
+inline notesleep(x) {
+ x == 1
+}
+
+inline notewakeup(x) {
+ opt_dstep { assert x == 0; x = 1 }
+}
+
+/*
+ * scheduler
+ */
+inline schedlock() {
+ lock(sched_lock)
+}
+
+inline schedunlock() {
+ unlock(sched_lock)
+}
+
+/*
+ * canaddmcpu is like the C function but takes
+ * an extra argument to include in the test, to model
+ * "cannget() && canaddmcpu()" as "canaddmcpu(cangget())"
+ */
+inline canaddmcpu(g) {
+ d_step {
+ g && atomic_mcpu < atomic_mcpumax;
+ atomic_mcpu++;
+ }
+}
+
+/*
+ * gput is like the C function.
+ * instead of tracking goroutines explicitly we
+ * maintain only the count of the number of
+ * waiting goroutines.
+ */
+inline gput() {
+ /* omitted: lockedm, idlem concerns */
+ opt_dstep {
+ gwait++;
+ if
+ :: gwait == 1 ->
+ atomic_gwaiting = 1
+ :: else
+ fi
+ }
+}
+
+/*
+ * cangget is a macro so it can be passed to
+ * canaddmcpu (see above).
+ */
+#define cangget() (gwait>0)
+
+/*
+ * gget is like the C function.
+ */
+inline gget() {
+ opt_dstep {
+ assert gwait > 0;
+ gwait--;
+ if
+ :: gwait == 0 ->
+ atomic_gwaiting = 0
+ :: else
+ fi
+ }
+}
+
+/*
+ * mput is like the C function.
+ * here we do keep an explicit list of waiting M's,
+ * so that we know which ones can be awakened.
+ * we use _pid-1 because the monitor is proc 0.
+ */
+inline mput() {
+ opt_dstep {
+ sched_mhead[mwait] = _pid - 1;
+ mwait++
+ }
+}
+
+/*
+ * mnextg is like the C function mnextg(m, g).
+ * it passes an unspecified goroutine to m to start running.
+ */
+inline mnextg(m) {
+ opt_dstep {
+ m_nextg[m] = 1;
+ if
+ :: m_waitnextg[m] ->
+ m_waitnextg[m] = 0;
+ notewakeup(m_havenextg[m])
+ :: else
+ fi
+ }
+}
+
+/*
+ * mgetnextg handles the main m handoff in matchmg.
+ * it is like mget() || new M followed by mnextg(m, g),
+ * but combined to avoid a local variable.
+ * unlike the C code, a new M simply assumes it is
+ * running a g instead of using the mnextg coordination
+ * to obtain one.
+ */
+inline mgetnextg() {
+ opt_atomic {
+ if
+ :: mwait > 0 ->
+ mwait--;
+ mnextg(sched_mhead[mwait]);
+ sched_mhead[mwait] = 0;
+ :: else ->
+ run mstart();
+ fi
+ }
+}
+
+/*
+ * nextgandunlock is like the C function.
+ * it pulls a g off the queue or else waits for one.
+ */
+inline nextgandunlock() {
+ assert atomic_mcpu <= G;
+
+ if
+ :: m_nextg[_pid-1] ->
+ m_nextg[_pid-1] = 0;
+ schedunlock();
+ :: canaddmcpu(!m_nextg[_pid-1] && cangget()) ->
+ gget();
+ schedunlock();
+ :: else ->
+ opt_dstep {
+ mput();
+ m_nextg[_pid-1] = 0;
+ m_waitnextg[_pid-1] = 1;
+ noteclear(m_havenextg[_pid-1]);
+ }
+ if
+ :: atomic_waitstop && atomic_mcpu <= atomic_mcpumax ->
+ atomic_waitstop = 0;
+ notewakeup(sched_stopped)
+ :: else
+ fi;
+ schedunlock();
+ opt_dstep {
+ notesleep(m_havenextg[_pid-1]);
+ assert m_nextg[_pid-1];
+ m_nextg[_pid-1] = 0;
+ }
+ fi
+}
+
+/*
+ * stoptheworld is like the C function.
+ */
+inline stoptheworld() {
+ schedlock();
+ gcwaiting = 1;
+ atomic_mcpumax = 1;
+ do
+ :: d_step { atomic_mcpu > 1 ->
+ noteclear(sched_stopped);
+ assert !atomic_waitstop;
+ atomic_waitstop = 1 }
+ schedunlock();
+ notesleep(sched_stopped);
+ schedlock();
+ :: else ->
+ break
+ od;
+ schedunlock();
+}
+
+/*
+ * starttheworld is like the C function.
+ */
+inline starttheworld() {
+ schedlock();
+ gcwaiting = 0;
+ atomic_mcpumax = gomaxprocs;
+ matchmg();
+ schedunlock();
+}
+
+/*
+ * matchmg is like the C function.
+ */
+inline matchmg() {
+ do
+ :: canaddmcpu(cangget()) ->
+ gget();
+ mgetnextg();
+ :: else -> break
+ od
+}
+
+/*
+ * ready is like the C function.
+ * it puts a g on the run queue.
+ */
+inline ready() {
+ schedlock();
+ gput()
+ matchmg()
+ schedunlock()
+}
+
+/*
+ * schedule simulates the C scheduler.
+ * it assumes that there is always a goroutine
+ * running already, and the goroutine has entered
+ * the scheduler for an unspecified reason,
+ * either to yield or to block.
+ */
+inline schedule() {
+ schedlock();
+
+ mustsched = 0;
+ atomic_mcpu--;
+ assert atomic_mcpu <= G;
+ if
+ :: skip ->
+ // goroutine yields, still runnable
+ gput();
+ :: gsleep+1 < G ->
+ // goroutine goes to sleep (but there is another that can wake it)
+ gsleep++
+ fi;
+
+ // Find goroutine to run.
+ nextgandunlock()
+}
+
+/*
+ * schedpend is > 0 if a goroutine is about to committed to
+ * entering the scheduler but has not yet done so.
+ * Just as we don't test for the undesirable conditions when a
+ * goroutine is in the scheduler, we don't test for them when
+ * a goroutine will be in the scheduler shortly.
+ * Modeling this state lets us replace mcpu cas loops with
+ * simpler mcpu atomic adds.
+ */
+byte schedpend;
+
+/*
+ * entersyscall is like the C function.
+ */
+inline entersyscall() {
+ bit willsched;
+
+ /*
+ * Fast path. Check all the conditions tested during schedlock/schedunlock
+ * below, and if we can get through the whole thing without stopping, run it
+ * in one atomic cas-based step.
+ */
+ atomic {
+ atomic_mcpu--;
+ if
+ :: atomic_gwaiting ->
+ skip
+ :: atomic_waitstop && atomic_mcpu <= atomic_mcpumax ->
+ skip
+ :: else ->
+ goto Lreturn_entersyscall;
+ fi;
+ willsched = 1;
+ schedpend++;
+ }
+
+ /*
+ * Normal path.
+ */
+ schedlock()
+ opt_dstep {
+ if
+ :: willsched ->
+ schedpend--;
+ willsched = 0
+ :: else
+ fi
+ }
+ if
+ :: atomic_gwaiting ->
+ matchmg()
+ :: else
+ fi;
+ if
+ :: atomic_waitstop && atomic_mcpu <= atomic_mcpumax ->
+ atomic_waitstop = 0;
+ notewakeup(sched_stopped)
+ :: else
+ fi;
+ schedunlock();
+Lreturn_entersyscall:
+ skip
+}
+
+/*
+ * exitsyscall is like the C function.
+ */
+inline exitsyscall() {
+ /*
+ * Fast path. If there's a cpu available, use it.
+ */
+ atomic {
+ // omitted profilehz check
+ atomic_mcpu++;
+ if
+ :: atomic_mcpu >= atomic_mcpumax ->
+ skip
+ :: else ->
+ goto Lreturn_exitsyscall
+ fi
+ }
+
+ /*
+ * Normal path.
+ */
+ schedlock();
+ d_step {
+ if
+ :: atomic_mcpu <= atomic_mcpumax ->
+ skip
+ :: else ->
+ mustsched = 1
+ fi
+ }
+ schedunlock()
+Lreturn_exitsyscall:
+ skip
+}
+
+#if var_gomaxprocs
+inline gomaxprocsfunc() {
+ schedlock();
+ opt_atomic {
+ if
+ :: gomaxprocs != 1 -> gomaxprocs = 1
+ :: gomaxprocs != 2 -> gomaxprocs = 2
+ :: gomaxprocs != 3 -> gomaxprocs = 3
+ fi;
+ }
+ if
+ :: gcwaiting != 0 ->
+ assert atomic_mcpumax == 1
+ :: else ->
+ atomic_mcpumax = gomaxprocs;
+ if
+ :: atomic_mcpu > gomaxprocs ->
+ mustsched = 1
+ :: else ->
+ matchmg()
+ fi
+ fi;
+ schedunlock();
+}
+#endif
+
+/*
+ * mstart is the entry point for a new M.
+ * our model of an M is always running some
+ * unspecified goroutine.
+ */
+proctype mstart() {
+ /*
+ * mustsched is true if the goroutine must enter the
+ * scheduler instead of continuing to execute.
+ */
+ bit mustsched;
+
+ do
+ :: skip ->
+ // goroutine reschedules.
+ schedule()
+ :: !mustsched ->
+ // goroutine does something.
+ if
+ :: skip ->
+ // goroutine executes system call
+ entersyscall();
+ exitsyscall()
+ :: atomic { gsleep > 0; gsleep-- } ->
+ // goroutine wakes another goroutine
+ ready()
+ :: lock(gc_lock) ->
+ // goroutine runs a garbage collection
+ stoptheworld();
+ starttheworld();
+ unlock(gc_lock)
+#if var_gomaxprocs
+ :: skip ->
+ // goroutine picks a new gomaxprocs
+ gomaxprocsfunc()
+#endif
+ fi
+ od;
+
+ assert 0;
+}
+
+/*
+ * monitor initializes the scheduler state
+ * and then watches for impossible conditions.
+ */
+active proctype monitor() {
+ opt_dstep {
+ byte i = 1;
+ do
+ :: i < G ->
+ gput();
+ i++
+ :: else -> break
+ od;
+ atomic_mcpu = 1;
+ atomic_mcpumax = 1;
+ }
+ run mstart();
+
+ do
+ // Should never have goroutines waiting with procs available.
+ :: !sched_lock && schedpend==0 && gwait > 0 && atomic_mcpu < atomic_mcpumax ->
+ assert 0
+ // Should never have gc waiting for stop if things have already stopped.
+ :: !sched_lock && schedpend==0 && atomic_waitstop && atomic_mcpu <= atomic_mcpumax ->
+ assert 0
+ od
+}
diff --git a/src/pkg/runtime/proc_test.go b/src/pkg/runtime/proc_test.go
new file mode 100644
index 000000000..32111080a
--- /dev/null
+++ b/src/pkg/runtime/proc_test.go
@@ -0,0 +1,125 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime_test
+
+import (
+ "runtime"
+ "sync/atomic"
+ "testing"
+)
+
+var stop = make(chan bool, 1)
+
+func perpetuumMobile() {
+ select {
+ case <-stop:
+ default:
+ go perpetuumMobile()
+ }
+}
+
+func TestStopTheWorldDeadlock(t *testing.T) {
+ if testing.Short() {
+ t.Logf("skipping during short test")
+ return
+ }
+ maxprocs := runtime.GOMAXPROCS(3)
+ compl := make(chan bool, 2)
+ go func() {
+ for i := 0; i != 1000; i += 1 {
+ runtime.GC()
+ }
+ compl <- true
+ }()
+ go func() {
+ for i := 0; i != 1000; i += 1 {
+ runtime.GOMAXPROCS(3)
+ }
+ compl <- true
+ }()
+ go perpetuumMobile()
+ <-compl
+ <-compl
+ stop <- true
+ runtime.GOMAXPROCS(maxprocs)
+}
+
+func stackGrowthRecursive(i int) {
+ var pad [128]uint64
+ if i != 0 && pad[0] == 0 {
+ stackGrowthRecursive(i - 1)
+ }
+}
+
+func BenchmarkStackGrowth(b *testing.B) {
+ const CallsPerSched = 1000
+ procs := runtime.GOMAXPROCS(-1)
+ N := int32(b.N / CallsPerSched)
+ c := make(chan bool, procs)
+ for p := 0; p < procs; p++ {
+ go func() {
+ for atomic.AddInt32(&N, -1) >= 0 {
+ runtime.Gosched()
+ for g := 0; g < CallsPerSched; g++ {
+ stackGrowthRecursive(10)
+ }
+ }
+ c <- true
+ }()
+ }
+ for p := 0; p < procs; p++ {
+ <-c
+ }
+}
+
+func BenchmarkSyscall(b *testing.B) {
+ const CallsPerSched = 1000
+ procs := runtime.GOMAXPROCS(-1)
+ N := int32(b.N / CallsPerSched)
+ c := make(chan bool, procs)
+ for p := 0; p < procs; p++ {
+ go func() {
+ for atomic.AddInt32(&N, -1) >= 0 {
+ runtime.Gosched()
+ for g := 0; g < CallsPerSched; g++ {
+ runtime.Entersyscall()
+ runtime.Exitsyscall()
+ }
+ }
+ c <- true
+ }()
+ }
+ for p := 0; p < procs; p++ {
+ <-c
+ }
+}
+
+func BenchmarkSyscallWork(b *testing.B) {
+ const CallsPerSched = 1000
+ const LocalWork = 100
+ procs := runtime.GOMAXPROCS(-1)
+ N := int32(b.N / CallsPerSched)
+ c := make(chan bool, procs)
+ for p := 0; p < procs; p++ {
+ go func() {
+ foo := 42
+ for atomic.AddInt32(&N, -1) >= 0 {
+ runtime.Gosched()
+ for g := 0; g < CallsPerSched; g++ {
+ runtime.Entersyscall()
+ for i := 0; i < LocalWork; i++ {
+ foo *= 2
+ foo /= 2
+ }
+ runtime.Exitsyscall()
+ }
+ }
+ c <- foo == 42
+ }()
+ }
+ for p := 0; p < procs; p++ {
+ <-c
+ }
+}
diff --git a/src/pkg/runtime/rune.c b/src/pkg/runtime/rune.c
new file mode 100644
index 000000000..86ee76ddd
--- /dev/null
+++ b/src/pkg/runtime/rune.c
@@ -0,0 +1,224 @@
+/*
+ * The authors of this software are Rob Pike and Ken Thompson.
+ * Copyright (c) 2002 by Lucent Technologies.
+ * Portions Copyright 2009 The Go Authors. All rights reserved.
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose without fee is hereby granted, provided that this entire notice
+ * is included in all copies of any software which is or includes a copy
+ * or modification of this software and in all copies of the supporting
+ * documentation for such software.
+ * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR IMPLIED
+ * WARRANTY. IN PARTICULAR, NEITHER THE AUTHORS NOR LUCENT TECHNOLOGIES MAKE ANY
+ * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE MERCHANTABILITY
+ * OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR PURPOSE.
+ */
+
+/*
+ * This code is copied, with slight editing due to type differences,
+ * from a subset of ../lib9/utf/rune.c
+ */
+
+#include "runtime.h"
+
+enum
+{
+ Bit1 = 7,
+ Bitx = 6,
+ Bit2 = 5,
+ Bit3 = 4,
+ Bit4 = 3,
+ Bit5 = 2,
+
+ T1 = ((1<<(Bit1+1))-1) ^ 0xFF, /* 0000 0000 */
+ Tx = ((1<<(Bitx+1))-1) ^ 0xFF, /* 1000 0000 */
+ T2 = ((1<<(Bit2+1))-1) ^ 0xFF, /* 1100 0000 */
+ T3 = ((1<<(Bit3+1))-1) ^ 0xFF, /* 1110 0000 */
+ T4 = ((1<<(Bit4+1))-1) ^ 0xFF, /* 1111 0000 */
+ T5 = ((1<<(Bit5+1))-1) ^ 0xFF, /* 1111 1000 */
+
+ Rune1 = (1<<(Bit1+0*Bitx))-1, /* 0000 0000 0111 1111 */
+ Rune2 = (1<<(Bit2+1*Bitx))-1, /* 0000 0111 1111 1111 */
+ Rune3 = (1<<(Bit3+2*Bitx))-1, /* 1111 1111 1111 1111 */
+ Rune4 = (1<<(Bit4+3*Bitx))-1, /* 0001 1111 1111 1111 1111 1111 */
+
+ Maskx = (1<<Bitx)-1, /* 0011 1111 */
+ Testx = Maskx ^ 0xFF, /* 1100 0000 */
+
+ Runeerror = 0xFFFD,
+ Runeself = 0x80,
+
+ Bad = Runeerror,
+
+ Runemax = 0x10FFFF, /* maximum rune value */
+};
+
+/*
+ * Modified by Wei-Hwa Huang, Google Inc., on 2004-09-24
+ * This is a slower but "safe" version of the old chartorune
+ * that works on strings that are not necessarily null-terminated.
+ *
+ * If you know for sure that your string is null-terminated,
+ * chartorune will be a bit faster.
+ *
+ * It is guaranteed not to attempt to access "length"
+ * past the incoming pointer. This is to avoid
+ * possible access violations. If the string appears to be
+ * well-formed but incomplete (i.e., to get the whole Rune
+ * we'd need to read past str+length) then we'll set the Rune
+ * to Bad and return 0.
+ *
+ * Note that if we have decoding problems for other
+ * reasons, we return 1 instead of 0.
+ */
+int32
+runtime·charntorune(int32 *rune, uint8 *str, int32 length)
+{
+ int32 c, c1, c2, c3, l;
+
+ /* When we're not allowed to read anything */
+ if(length <= 0) {
+ goto badlen;
+ }
+
+ /*
+ * one character sequence (7-bit value)
+ * 00000-0007F => T1
+ */
+ c = *(uint8*)str;
+ if(c < Tx) {
+ *rune = c;
+ return 1;
+ }
+
+ // If we can't read more than one character we must stop
+ if(length <= 1) {
+ goto badlen;
+ }
+
+ /*
+ * two character sequence (11-bit value)
+ * 0080-07FF => T2 Tx
+ */
+ c1 = *(uint8*)(str+1) ^ Tx;
+ if(c1 & Testx)
+ goto bad;
+ if(c < T3) {
+ if(c < T2)
+ goto bad;
+ l = ((c << Bitx) | c1) & Rune2;
+ if(l <= Rune1)
+ goto bad;
+ *rune = l;
+ return 2;
+ }
+
+ // If we can't read more than two characters we must stop
+ if(length <= 2) {
+ goto badlen;
+ }
+
+ /*
+ * three character sequence (16-bit value)
+ * 0800-FFFF => T3 Tx Tx
+ */
+ c2 = *(uint8*)(str+2) ^ Tx;
+ if(c2 & Testx)
+ goto bad;
+ if(c < T4) {
+ l = ((((c << Bitx) | c1) << Bitx) | c2) & Rune3;
+ if(l <= Rune2)
+ goto bad;
+ *rune = l;
+ return 3;
+ }
+
+ if (length <= 3)
+ goto badlen;
+
+ /*
+ * four character sequence (21-bit value)
+ * 10000-1FFFFF => T4 Tx Tx Tx
+ */
+ c3 = *(uint8*)(str+3) ^ Tx;
+ if (c3 & Testx)
+ goto bad;
+ if (c < T5) {
+ l = ((((((c << Bitx) | c1) << Bitx) | c2) << Bitx) | c3) & Rune4;
+ if (l <= Rune3 || l > Runemax)
+ goto bad;
+ *rune = l;
+ return 4;
+ }
+
+ // Support for 5-byte or longer UTF-8 would go here, but
+ // since we don't have that, we'll just fall through to bad.
+
+ /*
+ * bad decoding
+ */
+bad:
+ *rune = Bad;
+ return 1;
+badlen:
+ // was return 0, but return 1 is more convenient for the runtime.
+ *rune = Bad;
+ return 1;
+
+}
+
+int32
+runtime·runetochar(byte *str, int32 rune) /* note: in original, arg2 was pointer */
+{
+ /* Runes are signed, so convert to unsigned for range check. */
+ uint32 c;
+
+ /*
+ * one character sequence
+ * 00000-0007F => 00-7F
+ */
+ c = rune;
+ if(c <= Rune1) {
+ str[0] = c;
+ return 1;
+ }
+
+ /*
+ * two character sequence
+ * 0080-07FF => T2 Tx
+ */
+ if(c <= Rune2) {
+ str[0] = T2 | (c >> 1*Bitx);
+ str[1] = Tx | (c & Maskx);
+ return 2;
+ }
+
+ /*
+ * If the Rune is out of range, convert it to the error rune.
+ * Do this test here because the error rune encodes to three bytes.
+ * Doing it earlier would duplicate work, since an out of range
+ * Rune wouldn't have fit in one or two bytes.
+ */
+ if (c > Runemax)
+ c = Runeerror;
+
+ /*
+ * three character sequence
+ * 0800-FFFF => T3 Tx Tx
+ */
+ if (c <= Rune3) {
+ str[0] = T3 | (c >> 2*Bitx);
+ str[1] = Tx | ((c >> 1*Bitx) & Maskx);
+ str[2] = Tx | (c & Maskx);
+ return 3;
+ }
+
+ /*
+ * four character sequence (21-bit value)
+ * 10000-1FFFFF => T4 Tx Tx Tx
+ */
+ str[0] = T4 | (c >> 3*Bitx);
+ str[1] = Tx | ((c >> 2*Bitx) & Maskx);
+ str[2] = Tx | ((c >> 1*Bitx) & Maskx);
+ str[3] = Tx | (c & Maskx);
+ return 4;
+}
diff --git a/src/pkg/runtime/runtime-gdb.py b/src/pkg/runtime/runtime-gdb.py
new file mode 100644
index 000000000..a96f3f382
--- /dev/null
+++ b/src/pkg/runtime/runtime-gdb.py
@@ -0,0 +1,400 @@
+# Copyright 2010 The Go Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+"""GDB Pretty printers and convenience functions for Go's runtime structures.
+
+This script is loaded by GDB when it finds a .debug_gdb_scripts
+section in the compiled binary. The [68]l linkers emit this with a
+path to this file based on the path to the runtime package.
+"""
+
+# Known issues:
+# - pretty printing only works for the 'native' strings. E.g. 'type
+# foo string' will make foo a plain struct in the eyes of gdb,
+# circumventing the pretty print triggering.
+
+
+import sys, re
+
+print >>sys.stderr, "Loading Go Runtime support."
+
+# allow to manually reload while developing
+goobjfile = gdb.current_objfile() or gdb.objfiles()[0]
+goobjfile.pretty_printers = []
+
+#
+# Pretty Printers
+#
+
+class StringTypePrinter:
+ "Pretty print Go strings."
+
+ pattern = re.compile(r'^struct string$')
+
+ def __init__(self, val):
+ self.val = val
+
+ def display_hint(self):
+ return 'string'
+
+ def to_string(self):
+ l = int(self.val['len'])
+ return self.val['str'].string("utf-8", "ignore", l)
+
+
+class SliceTypePrinter:
+ "Pretty print slices."
+
+ pattern = re.compile(r'^struct \[\]')
+
+ def __init__(self, val):
+ self.val = val
+
+ def display_hint(self):
+ return 'array'
+
+ def to_string(self):
+ return str(self.val.type)[6:] # skip 'struct '
+
+ def children(self):
+ ptr = self.val["array"]
+ for idx in range(self.val["len"]):
+ yield ('[%d]' % idx, (ptr + idx).dereference())
+
+
+class MapTypePrinter:
+ """Pretty print map[K]V types.
+
+ Map-typed go variables are really pointers. dereference them in gdb
+ to inspect their contents with this pretty printer.
+ """
+
+ pattern = re.compile(r'^struct hash<.*>$')
+
+ def __init__(self, val):
+ self.val = val
+
+ def display_hint(self):
+ return 'map'
+
+ def to_string(self):
+ return str(self.val.type)
+
+ def children(self):
+ stab = self.val['st']
+ i = 0
+ for v in self.traverse_hash(stab):
+ yield ("[%d]" % i, v['key'])
+ yield ("[%d]" % (i + 1), v['val'])
+ i += 2
+
+ def traverse_hash(self, stab):
+ ptr = stab['entry'].address
+ end = stab['end']
+ while ptr < end:
+ v = ptr.dereference()
+ ptr = ptr + 1
+ if v['hash'] == 0: continue
+ if v['hash'] & 63 == 63: # subtable
+ for v in self.traverse_hash(v['key'].cast(self.val['st'].type)):
+ yield v
+ else:
+ yield v
+
+
+class ChanTypePrinter:
+ """Pretty print chan[T] types.
+
+ Chan-typed go variables are really pointers. dereference them in gdb
+ to inspect their contents with this pretty printer.
+ """
+
+ pattern = re.compile(r'^struct hchan<.*>$')
+
+ def __init__(self, val):
+ self.val = val
+
+ def display_hint(self):
+ return 'array'
+
+ def to_string(self):
+ return str(self.val.type)
+
+ def children(self):
+ # see chan.c chanbuf()
+ et = [x.type for x in self.val['free'].type.target().fields() if x.name == 'elem'][0]
+ ptr = (self.val.address + 1).cast(et.pointer())
+ for i in range(self.val["qcount"]):
+ j = (self.val["recvx"] + i) % self.val["dataqsiz"]
+ yield ('[%d]' % i, (ptr + j).dereference())
+
+
+#
+# Register all the *Printer classes above.
+#
+
+def makematcher(klass):
+ def matcher(val):
+ try:
+ if klass.pattern.match(str(val.type)):
+ return klass(val)
+ except:
+ pass
+ return matcher
+
+goobjfile.pretty_printers.extend([makematcher(k) for k in vars().values() if hasattr(k, 'pattern')])
+
+#
+# For reference, this is what we're trying to do:
+# eface: p *(*(struct 'runtime.commonType'*)'main.e'->type_->data)->string
+# iface: p *(*(struct 'runtime.commonType'*)'main.s'->tab->Type->data)->string
+#
+# interface types can't be recognized by their name, instead we check
+# if they have the expected fields. Unfortunately the mapping of
+# fields to python attributes in gdb.py isn't complete: you can't test
+# for presence other than by trapping.
+
+
+def is_iface(val):
+ try:
+ return str(val['tab'].type) == "struct runtime.itab *" \
+ and str(val['data'].type) == "void *"
+ except:
+ pass
+
+def is_eface(val):
+ try:
+ return str(val['_type'].type) == "struct runtime._type *" \
+ and str(val['data'].type) == "void *"
+ except:
+ pass
+
+def lookup_type(name):
+ try:
+ return gdb.lookup_type(name)
+ except:
+ pass
+ try:
+ return gdb.lookup_type('struct ' + name)
+ except:
+ pass
+ try:
+ return gdb.lookup_type('struct ' + name[1:]).pointer()
+ except:
+ pass
+
+
+def iface_dtype(obj):
+ "Decode type of the data field of an eface or iface struct."
+
+ if is_iface(obj):
+ go_type_ptr = obj['tab']['_type']
+ elif is_eface(obj):
+ go_type_ptr = obj['_type']
+ else:
+ return
+
+ ct = gdb.lookup_type("struct runtime.commonType").pointer()
+ dynamic_go_type = go_type_ptr['ptr'].cast(ct).dereference()
+ dtype_name = dynamic_go_type['string'].dereference()['str'].string()
+ type_size = int(dynamic_go_type['size'])
+ uintptr_size = int(dynamic_go_type['size'].type.sizeof) # size is itself an uintptr
+ dynamic_gdb_type = lookup_type(dtype_name)
+ if type_size > uintptr_size:
+ dynamic_gdb_type = dynamic_gdb_type.pointer()
+ return dynamic_gdb_type
+
+
+class IfacePrinter:
+ """Pretty print interface values
+
+ Casts the data field to the appropriate dynamic type."""
+
+ def __init__(self, val):
+ self.val = val
+
+ def display_hint(self):
+ return 'string'
+
+ def to_string(self):
+ if self.val['data'] == 0:
+ return 0x0
+ try:
+ dtype = iface_dtype(self.val)
+ except:
+ return "<bad dynamic type>"
+ try:
+ return self.val['data'].cast(dtype).dereference()
+ except:
+ pass
+ return self.val['data'].cast(dtype)
+
+
+def ifacematcher(val):
+ if is_iface(val) or is_eface(val):
+ return IfacePrinter(val)
+
+goobjfile.pretty_printers.append(ifacematcher)
+
+#
+# Convenience Functions
+#
+
+class GoLenFunc(gdb.Function):
+ "Length of strings, slices, maps or channels"
+
+ how = ((StringTypePrinter, 'len' ),
+ (SliceTypePrinter, 'len'),
+ (MapTypePrinter, 'count'),
+ (ChanTypePrinter, 'qcount'))
+
+ def __init__(self):
+ super(GoLenFunc, self).__init__("len")
+
+ def invoke(self, obj):
+ typename = str(obj.type)
+ for klass, fld in self.how:
+ if klass.pattern.match(typename):
+ return obj[fld]
+
+class GoCapFunc(gdb.Function):
+ "Capacity of slices or channels"
+
+ how = ((SliceTypePrinter, 'cap'),
+ (ChanTypePrinter, 'dataqsiz'))
+
+ def __init__(self):
+ super(GoCapFunc, self).__init__("cap")
+
+ def invoke(self, obj):
+ typename = str(obj.type)
+ for klass, fld in self.how:
+ if klass.pattern.match(typename):
+ return obj[fld]
+
+class DTypeFunc(gdb.Function):
+ """Cast Interface values to their dynamic type.
+
+ For non-interface types this behaves as the identity operation.
+ """
+
+ def __init__(self):
+ super(DTypeFunc, self).__init__("dtype")
+
+ def invoke(self, obj):
+ try:
+ return obj['data'].cast(iface_dtype(obj))
+ except:
+ pass
+ return obj
+
+#
+# Commands
+#
+
+sts = ( 'idle', 'runnable', 'running', 'syscall', 'waiting', 'moribund', 'dead', 'recovery')
+
+def linked_list(ptr, linkfield):
+ while ptr:
+ yield ptr
+ ptr = ptr[linkfield]
+
+
+class GoroutinesCmd(gdb.Command):
+ "List all goroutines."
+
+ def __init__(self):
+ super(GoroutinesCmd, self).__init__("info goroutines", gdb.COMMAND_STACK, gdb.COMPLETE_NONE)
+
+ def invoke(self, arg, from_tty):
+ # args = gdb.string_to_argv(arg)
+ vp = gdb.lookup_type('void').pointer()
+ for ptr in linked_list(gdb.parse_and_eval("'runtime.allg'"), 'alllink'):
+ if ptr['status'] == 6: # 'gdead'
+ continue
+ s = ' '
+ if ptr['m']:
+ s = '*'
+ pc = ptr['sched']['pc'].cast(vp)
+ sp = ptr['sched']['sp'].cast(vp)
+ blk = gdb.block_for_pc(long((pc)))
+ print s, ptr['goid'], "%8s" % sts[long((ptr['status']))], blk.function
+
+def find_goroutine(goid):
+ vp = gdb.lookup_type('void').pointer()
+ for ptr in linked_list(gdb.parse_and_eval("'runtime.allg'"), 'alllink'):
+ if ptr['status'] == 6: # 'gdead'
+ continue
+ if ptr['goid'] == goid:
+ return [ptr['sched'][x].cast(vp) for x in 'pc', 'sp']
+ return None, None
+
+
+class GoroutineCmd(gdb.Command):
+ """Execute gdb command in the context of goroutine <goid>.
+
+ Switch PC and SP to the ones in the goroutine's G structure,
+ execute an arbitrary gdb command, and restore PC and SP.
+
+ Usage: (gdb) goroutine <goid> <gdbcmd>
+
+ Note that it is ill-defined to modify state in the context of a goroutine.
+ Restrict yourself to inspecting values.
+ """
+
+ def __init__(self):
+ super(GoroutineCmd, self).__init__("goroutine", gdb.COMMAND_STACK, gdb.COMPLETE_NONE)
+
+ def invoke(self, arg, from_tty):
+ goid, cmd = arg.split(None, 1)
+ pc, sp = find_goroutine(int(goid))
+ if not pc:
+ print "No such goroutine: ", goid
+ return
+ save_frame = gdb.selected_frame()
+ gdb.parse_and_eval('$save_pc = $pc')
+ gdb.parse_and_eval('$save_sp = $sp')
+ gdb.parse_and_eval('$pc = 0x%x' % long(pc))
+ gdb.parse_and_eval('$sp = 0x%x' % long(sp))
+ try:
+ gdb.execute(cmd)
+ finally:
+ gdb.parse_and_eval('$pc = $save_pc')
+ gdb.parse_and_eval('$sp = $save_sp')
+ save_frame.select()
+
+
+class GoIfaceCmd(gdb.Command):
+ "Print Static and dynamic interface types"
+
+ def __init__(self):
+ super(GoIfaceCmd, self).__init__("iface", gdb.COMMAND_DATA, gdb.COMPLETE_SYMBOL)
+
+ def invoke(self, arg, from_tty):
+ for obj in gdb.string_to_argv(arg):
+ try:
+ #TODO fix quoting for qualified variable names
+ obj = gdb.parse_and_eval("%s" % obj)
+ except Exception, e:
+ print "Can't parse ", obj, ": ", e
+ continue
+
+ dtype = iface_dtype(obj)
+ if not dtype:
+ print "Not an interface: ", obj.type
+ continue
+
+ print "%s: %s" % (obj.type, dtype)
+
+# TODO: print interface's methods and dynamic type's func pointers thereof.
+#rsc: "to find the number of entries in the itab's Fn field look at itab.inter->numMethods
+#i am sure i have the names wrong but look at the interface type and its method count"
+# so Itype will start with a commontype which has kind = interface
+
+#
+# Register all convenience functions and CLI commands
+#
+for k in vars().values():
+ if hasattr(k, 'invoke'):
+ k()
diff --git a/src/pkg/runtime/runtime.c b/src/pkg/runtime/runtime.c
new file mode 100644
index 000000000..49aba7da0
--- /dev/null
+++ b/src/pkg/runtime/runtime.c
@@ -0,0 +1,728 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "runtime.h"
+#include "stack.h"
+
+enum {
+ maxround = sizeof(uintptr),
+};
+
+uint32 runtime·panicking;
+void (*runtime·destroylock)(Lock*);
+
+/*
+ * We assume that all architectures turn faults and the like
+ * into apparent calls to runtime.sigpanic. If we see a "call"
+ * to runtime.sigpanic, we do not back up the PC to find the
+ * line number of the CALL instruction, because there is no CALL.
+ */
+void runtime·sigpanic(void);
+
+int32
+runtime·gotraceback(void)
+{
+ byte *p;
+
+ p = runtime·getenv("GOTRACEBACK");
+ if(p == nil || p[0] == '\0')
+ return 1; // default is on
+ return runtime·atoi(p);
+}
+
+static Lock paniclk;
+
+void
+runtime·startpanic(void)
+{
+ if(m->dying) {
+ runtime·printf("panic during panic\n");
+ runtime·exit(3);
+ }
+ m->dying = 1;
+ runtime·xadd(&runtime·panicking, 1);
+ runtime·lock(&paniclk);
+}
+
+void
+runtime·dopanic(int32 unused)
+{
+ static bool didothers;
+
+ if(g->sig != 0)
+ runtime·printf("\n[signal %x code=%p addr=%p pc=%p]\n",
+ g->sig, g->sigcode0, g->sigcode1, g->sigpc);
+
+ runtime·printf("\n");
+ if(runtime·gotraceback()){
+ runtime·traceback(runtime·getcallerpc(&unused), runtime·getcallersp(&unused), 0, g);
+ if(!didothers) {
+ didothers = true;
+ runtime·tracebackothers(g);
+ }
+ }
+ runtime·unlock(&paniclk);
+ if(runtime·xadd(&runtime·panicking, -1) != 0) {
+ // Some other m is panicking too.
+ // Let it print what it needs to print.
+ // Wait forever without chewing up cpu.
+ // It will exit when it's done.
+ static Lock deadlock;
+ runtime·lock(&deadlock);
+ runtime·lock(&deadlock);
+ }
+
+ runtime·exit(2);
+}
+
+void
+runtime·panicindex(void)
+{
+ runtime·panicstring("index out of range");
+}
+
+void
+runtime·panicslice(void)
+{
+ runtime·panicstring("slice bounds out of range");
+}
+
+void
+runtime·throwreturn(void)
+{
+ // can only happen if compiler is broken
+ runtime·throw("no return at end of a typed function - compiler is broken");
+}
+
+void
+runtime·throwinit(void)
+{
+ // can only happen with linker skew
+ runtime·throw("recursive call during initialization - linker skew");
+}
+
+void
+runtime·throw(int8 *s)
+{
+ runtime·startpanic();
+ runtime·printf("throw: %s\n", s);
+ runtime·dopanic(0);
+ *(int32*)0 = 0; // not reached
+ runtime·exit(1); // even more not reached
+}
+
+void
+runtime·panicstring(int8 *s)
+{
+ Eface err;
+
+ if(m->gcing) {
+ runtime·printf("panic: %s\n", s);
+ runtime·throw("panic during gc");
+ }
+ runtime·newErrorString(runtime·gostringnocopy((byte*)s), &err);
+ runtime·panic(err);
+}
+
+int32
+runtime·mcmp(byte *s1, byte *s2, uint32 n)
+{
+ uint32 i;
+ byte c1, c2;
+
+ for(i=0; i<n; i++) {
+ c1 = s1[i];
+ c2 = s2[i];
+ if(c1 < c2)
+ return -1;
+ if(c1 > c2)
+ return +1;
+ }
+ return 0;
+}
+
+
+byte*
+runtime·mchr(byte *p, byte c, byte *ep)
+{
+ for(; p < ep; p++)
+ if(*p == c)
+ return p;
+ return nil;
+}
+
+uint32
+runtime·rnd(uint32 n, uint32 m)
+{
+ uint32 r;
+
+ if(m > maxround)
+ m = maxround;
+ r = n % m;
+ if(r)
+ n += m-r;
+ return n;
+}
+
+static int32 argc;
+static uint8** argv;
+
+Slice os·Args;
+Slice os·Envs;
+
+void
+runtime·args(int32 c, uint8 **v)
+{
+ argc = c;
+ argv = v;
+}
+
+int32 runtime·isplan9;
+int32 runtime·iswindows;
+
+void
+runtime·goargs(void)
+{
+ String *s;
+ int32 i;
+
+ // for windows implementation see "os" package
+ if(Windows)
+ return;
+
+ s = runtime·malloc(argc*sizeof s[0]);
+ for(i=0; i<argc; i++)
+ s[i] = runtime·gostringnocopy(argv[i]);
+ os·Args.array = (byte*)s;
+ os·Args.len = argc;
+ os·Args.cap = argc;
+}
+
+void
+runtime·goenvs_unix(void)
+{
+ String *s;
+ int32 i, n;
+
+ for(n=0; argv[argc+1+n] != 0; n++)
+ ;
+
+ s = runtime·malloc(n*sizeof s[0]);
+ for(i=0; i<n; i++)
+ s[i] = runtime·gostringnocopy(argv[argc+1+i]);
+ os·Envs.array = (byte*)s;
+ os·Envs.len = n;
+ os·Envs.cap = n;
+}
+
+byte*
+runtime·getenv(int8 *s)
+{
+ int32 i, j, len;
+ byte *v, *bs;
+ String* envv;
+ int32 envc;
+
+ bs = (byte*)s;
+ len = runtime·findnull(bs);
+ envv = (String*)os·Envs.array;
+ envc = os·Envs.len;
+ for(i=0; i<envc; i++){
+ if(envv[i].len <= len)
+ continue;
+ v = envv[i].str;
+ for(j=0; j<len; j++)
+ if(bs[j] != v[j])
+ goto nomatch;
+ if(v[len] != '=')
+ goto nomatch;
+ return v+len+1;
+ nomatch:;
+ }
+ return nil;
+}
+
+void
+runtime·getgoroot(String out)
+{
+ byte *p;
+
+ p = runtime·getenv("GOROOT");
+ out = runtime·gostringnocopy(p);
+ FLUSH(&out);
+}
+
+int32
+runtime·atoi(byte *p)
+{
+ int32 n;
+
+ n = 0;
+ while('0' <= *p && *p <= '9')
+ n = n*10 + *p++ - '0';
+ return n;
+}
+
+void
+runtime·check(void)
+{
+ int8 a;
+ uint8 b;
+ int16 c;
+ uint16 d;
+ int32 e;
+ uint32 f;
+ int64 g;
+ uint64 h;
+ float32 i;
+ float64 j;
+ void* k;
+ uint16* l;
+ struct x1 {
+ byte x;
+ };
+ struct y1 {
+ struct x1 x1;
+ byte y;
+ };
+
+ if(sizeof(a) != 1) runtime·throw("bad a");
+ if(sizeof(b) != 1) runtime·throw("bad b");
+ if(sizeof(c) != 2) runtime·throw("bad c");
+ if(sizeof(d) != 2) runtime·throw("bad d");
+ if(sizeof(e) != 4) runtime·throw("bad e");
+ if(sizeof(f) != 4) runtime·throw("bad f");
+ if(sizeof(g) != 8) runtime·throw("bad g");
+ if(sizeof(h) != 8) runtime·throw("bad h");
+ if(sizeof(i) != 4) runtime·throw("bad i");
+ if(sizeof(j) != 8) runtime·throw("bad j");
+ if(sizeof(k) != sizeof(uintptr)) runtime·throw("bad k");
+ if(sizeof(l) != sizeof(uintptr)) runtime·throw("bad l");
+ if(sizeof(struct x1) != 1) runtime·throw("bad sizeof x1");
+ if(offsetof(struct y1, y) != 1) runtime·throw("bad offsetof y1.y");
+ if(sizeof(struct y1) != 2) runtime·throw("bad sizeof y1");
+
+ uint32 z;
+ z = 1;
+ if(!runtime·cas(&z, 1, 2))
+ runtime·throw("cas1");
+ if(z != 2)
+ runtime·throw("cas2");
+
+ z = 4;
+ if(runtime·cas(&z, 5, 6))
+ runtime·throw("cas3");
+ if(z != 4)
+ runtime·throw("cas4");
+
+ runtime·initsig(0);
+}
+
+/*
+ * map and chan helpers for
+ * dealing with unknown types
+ */
+static uintptr
+memhash(uint32 s, void *a)
+{
+ byte *b;
+ uintptr hash;
+
+ b = a;
+ if(sizeof(hash) == 4)
+ hash = 2860486313U;
+ else
+ hash = 33054211828000289ULL;
+ while(s > 0) {
+ if(sizeof(hash) == 4)
+ hash = (hash ^ *b) * 3267000013UL;
+ else
+ hash = (hash ^ *b) * 23344194077549503ULL;
+ b++;
+ s--;
+ }
+ return hash;
+}
+
+static uint32
+memequal(uint32 s, void *a, void *b)
+{
+ byte *ba, *bb, *aend;
+
+ if(a == b)
+ return 1;
+ ba = a;
+ bb = b;
+ aend = ba+s;
+ while(ba != aend) {
+ if(*ba != *bb)
+ return 0;
+ ba++;
+ bb++;
+ }
+ return 1;
+}
+
+static void
+memprint(uint32 s, void *a)
+{
+ uint64 v;
+
+ v = 0xbadb00b;
+ switch(s) {
+ case 1:
+ v = *(uint8*)a;
+ break;
+ case 2:
+ v = *(uint16*)a;
+ break;
+ case 4:
+ v = *(uint32*)a;
+ break;
+ case 8:
+ v = *(uint64*)a;
+ break;
+ }
+ runtime·printint(v);
+}
+
+static void
+memcopy(uint32 s, void *a, void *b)
+{
+ if(b == nil) {
+ runtime·memclr(a,s);
+ return;
+ }
+ runtime·memmove(a,b,s);
+}
+
+static uint32
+memequal8(uint32 s, uint8 *a, uint8 *b)
+{
+ USED(s);
+ return *a == *b;
+}
+
+static void
+memcopy8(uint32 s, uint8 *a, uint8 *b)
+{
+ USED(s);
+ if(b == nil) {
+ *a = 0;
+ return;
+ }
+ *a = *b;
+}
+
+static uint32
+memequal16(uint32 s, uint16 *a, uint16 *b)
+{
+ USED(s);
+ return *a == *b;
+}
+
+static void
+memcopy16(uint32 s, uint16 *a, uint16 *b)
+{
+ USED(s);
+ if(b == nil) {
+ *a = 0;
+ return;
+ }
+ *a = *b;
+}
+
+static uint32
+memequal32(uint32 s, uint32 *a, uint32 *b)
+{
+ USED(s);
+ return *a == *b;
+}
+
+static void
+memcopy32(uint32 s, uint32 *a, uint32 *b)
+{
+ USED(s);
+ if(b == nil) {
+ *a = 0;
+ return;
+ }
+ *a = *b;
+}
+
+static uint32
+memequal64(uint32 s, uint64 *a, uint64 *b)
+{
+ USED(s);
+ return *a == *b;
+}
+
+static void
+memcopy64(uint32 s, uint64 *a, uint64 *b)
+{
+ USED(s);
+ if(b == nil) {
+ *a = 0;
+ return;
+ }
+ *a = *b;
+}
+
+static uint32
+memequal128(uint32 s, uint64 *a, uint64 *b)
+{
+ USED(s);
+ return a[0] == b[0] && a[1] == b[1];
+}
+
+static void
+memcopy128(uint32 s, uint64 *a, uint64 *b)
+{
+ USED(s);
+ if(b == nil) {
+ a[0] = 0;
+ a[1] = 0;
+ return;
+ }
+ a[0] = b[0];
+ a[1] = b[1];
+}
+
+static void
+slicecopy(uint32 s, Slice *a, Slice *b)
+{
+ USED(s);
+ if(b == nil) {
+ a->array = 0;
+ a->len = 0;
+ a->cap = 0;
+ return;
+ }
+ a->array = b->array;
+ a->len = b->len;
+ a->cap = b->cap;
+}
+
+static uintptr
+strhash(uint32 s, String *a)
+{
+ USED(s);
+ return memhash((*a).len, (*a).str);
+}
+
+static uint32
+strequal(uint32 s, String *a, String *b)
+{
+ int32 alen;
+
+ USED(s);
+ alen = a->len;
+ if(alen != b->len)
+ return false;
+ return memequal(alen, a->str, b->str);
+}
+
+static void
+strprint(uint32 s, String *a)
+{
+ USED(s);
+ runtime·printstring(*a);
+}
+
+static void
+strcopy(uint32 s, String *a, String *b)
+{
+ USED(s);
+ if(b == nil) {
+ a->str = 0;
+ a->len = 0;
+ return;
+ }
+ a->str = b->str;
+ a->len = b->len;
+}
+
+static uintptr
+interhash(uint32 s, Iface *a)
+{
+ USED(s);
+ return runtime·ifacehash(*a);
+}
+
+static void
+interprint(uint32 s, Iface *a)
+{
+ USED(s);
+ runtime·printiface(*a);
+}
+
+static uint32
+interequal(uint32 s, Iface *a, Iface *b)
+{
+ USED(s);
+ return runtime·ifaceeq_c(*a, *b);
+}
+
+static void
+intercopy(uint32 s, Iface *a, Iface *b)
+{
+ USED(s);
+ if(b == nil) {
+ a->tab = 0;
+ a->data = 0;
+ return;
+ }
+ a->tab = b->tab;
+ a->data = b->data;
+}
+
+static uintptr
+nilinterhash(uint32 s, Eface *a)
+{
+ USED(s);
+ return runtime·efacehash(*a);
+}
+
+static void
+nilinterprint(uint32 s, Eface *a)
+{
+ USED(s);
+ runtime·printeface(*a);
+}
+
+static uint32
+nilinterequal(uint32 s, Eface *a, Eface *b)
+{
+ USED(s);
+ return runtime·efaceeq_c(*a, *b);
+}
+
+static void
+nilintercopy(uint32 s, Eface *a, Eface *b)
+{
+ USED(s);
+ if(b == nil) {
+ a->type = 0;
+ a->data = 0;
+ return;
+ }
+ a->type = b->type;
+ a->data = b->data;
+}
+
+uintptr
+runtime·nohash(uint32 s, void *a)
+{
+ USED(s);
+ USED(a);
+ runtime·panicstring("hash of unhashable type");
+ return 0;
+}
+
+uint32
+runtime·noequal(uint32 s, void *a, void *b)
+{
+ USED(s);
+ USED(a);
+ USED(b);
+ runtime·panicstring("comparing uncomparable types");
+ return 0;
+}
+
+Alg
+runtime·algarray[] =
+{
+[AMEM] { memhash, memequal, memprint, memcopy },
+[ANOEQ] { runtime·nohash, runtime·noequal, memprint, memcopy },
+[ASTRING] { strhash, strequal, strprint, strcopy },
+[AINTER] { interhash, interequal, interprint, intercopy },
+[ANILINTER] { nilinterhash, nilinterequal, nilinterprint, nilintercopy },
+[ASLICE] { runtime·nohash, runtime·noequal, memprint, slicecopy },
+[AMEM8] { memhash, memequal8, memprint, memcopy8 },
+[AMEM16] { memhash, memequal16, memprint, memcopy16 },
+[AMEM32] { memhash, memequal32, memprint, memcopy32 },
+[AMEM64] { memhash, memequal64, memprint, memcopy64 },
+[AMEM128] { memhash, memequal128, memprint, memcopy128 },
+[ANOEQ8] { runtime·nohash, runtime·noequal, memprint, memcopy8 },
+[ANOEQ16] { runtime·nohash, runtime·noequal, memprint, memcopy16 },
+[ANOEQ32] { runtime·nohash, runtime·noequal, memprint, memcopy32 },
+[ANOEQ64] { runtime·nohash, runtime·noequal, memprint, memcopy64 },
+[ANOEQ128] { runtime·nohash, runtime·noequal, memprint, memcopy128 },
+};
+
+int64
+runtime·nanotime(void)
+{
+ int64 sec;
+ int32 usec;
+
+ sec = 0;
+ usec = 0;
+ runtime·gettime(&sec, &usec);
+ return sec*1000000000 + (int64)usec*1000;
+}
+
+void
+runtime·Caller(int32 skip, uintptr retpc, String retfile, int32 retline, bool retbool)
+{
+ Func *f, *g;
+ uintptr pc;
+ uintptr rpc[2];
+
+ /*
+ * Ask for two PCs: the one we were asked for
+ * and what it called, so that we can see if it
+ * "called" sigpanic.
+ */
+ retpc = 0;
+ if(runtime·callers(1+skip-1, rpc, 2) < 2) {
+ retfile = runtime·emptystring;
+ retline = 0;
+ retbool = false;
+ } else if((f = runtime·findfunc(rpc[1])) == nil) {
+ retfile = runtime·emptystring;
+ retline = 0;
+ retbool = true; // have retpc at least
+ } else {
+ retpc = rpc[1];
+ retfile = f->src;
+ pc = retpc;
+ g = runtime·findfunc(rpc[0]);
+ if(pc > f->entry && (g == nil || g->entry != (uintptr)runtime·sigpanic))
+ pc--;
+ retline = runtime·funcline(f, pc);
+ retbool = true;
+ }
+ FLUSH(&retpc);
+ FLUSH(&retfile);
+ FLUSH(&retline);
+ FLUSH(&retbool);
+}
+
+void
+runtime·Callers(int32 skip, Slice pc, int32 retn)
+{
+ retn = runtime·callers(skip, (uintptr*)pc.array, pc.len);
+ FLUSH(&retn);
+}
+
+void
+runtime·FuncForPC(uintptr pc, void *retf)
+{
+ retf = runtime·findfunc(pc);
+ FLUSH(&retf);
+}
+
+uint32
+runtime·fastrand1(void)
+{
+ uint32 x;
+
+ x = m->fastrand;
+ x += x;
+ if(x & 0x80000000L)
+ x ^= 0x88888eefUL;
+ m->fastrand = x;
+ return x;
+}
diff --git a/src/pkg/runtime/runtime.h b/src/pkg/runtime/runtime.h
new file mode 100644
index 000000000..526a320ea
--- /dev/null
+++ b/src/pkg/runtime/runtime.h
@@ -0,0 +1,635 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+ * basic types
+ */
+typedef signed char int8;
+typedef unsigned char uint8;
+typedef signed short int16;
+typedef unsigned short uint16;
+typedef signed int int32;
+typedef unsigned int uint32;
+typedef signed long long int int64;
+typedef unsigned long long int uint64;
+typedef float float32;
+typedef double float64;
+
+#ifdef _64BIT
+typedef uint64 uintptr;
+typedef int64 intptr;
+#else
+typedef uint32 uintptr;
+typedef int32 intptr;
+#endif
+
+/*
+ * get rid of C types
+ * the / / / forces a syntax error immediately,
+ * which will show "last name: XXunsigned".
+ */
+#define unsigned XXunsigned / / /
+#define signed XXsigned / / /
+#define char XXchar / / /
+#define short XXshort / / /
+#define int XXint / / /
+#define long XXlong / / /
+#define float XXfloat / / /
+#define double XXdouble / / /
+
+/*
+ * defined types
+ */
+typedef uint8 bool;
+typedef uint8 byte;
+typedef struct Alg Alg;
+typedef struct Func Func;
+typedef struct G G;
+typedef struct Gobuf Gobuf;
+typedef struct Lock Lock;
+typedef struct M M;
+typedef struct Mem Mem;
+typedef union Note Note;
+typedef struct Slice Slice;
+typedef struct Stktop Stktop;
+typedef struct String String;
+typedef struct Usema Usema;
+typedef struct SigTab SigTab;
+typedef struct MCache MCache;
+typedef struct FixAlloc FixAlloc;
+typedef struct Iface Iface;
+typedef struct Itab Itab;
+typedef struct Eface Eface;
+typedef struct Type Type;
+typedef struct ChanType ChanType;
+typedef struct MapType MapType;
+typedef struct Defer Defer;
+typedef struct Panic Panic;
+typedef struct Hmap Hmap;
+typedef struct Hchan Hchan;
+typedef struct Complex64 Complex64;
+typedef struct Complex128 Complex128;
+
+/*
+ * per-cpu declaration.
+ * "extern register" is a special storage class implemented by 6c, 8c, etc.
+ * on machines with lots of registers, it allocates a register that will not be
+ * used in generated code. on the x86, it allocates a slot indexed by a
+ * segment register.
+ *
+ * amd64: allocated downwards from R15
+ * x86: allocated upwards from 0(GS)
+ * arm: allocated downwards from R10
+ *
+ * every C file linked into a Go program must include runtime.h
+ * so that the C compiler knows to avoid other uses of these registers.
+ * the Go compilers know to avoid them.
+ */
+extern register G* g;
+extern register M* m;
+
+/*
+ * defined constants
+ */
+enum
+{
+ // G status
+ //
+ // If you add to this list, add to the list
+ // of "okay during garbage collection" status
+ // in mgc0.c too.
+ Gidle,
+ Grunnable,
+ Grunning,
+ Gsyscall,
+ Gwaiting,
+ Gmoribund,
+ Gdead,
+};
+enum
+{
+ true = 1,
+ false = 0,
+};
+
+/*
+ * structures
+ */
+struct Lock
+{
+ uint32 key;
+#ifdef __WINDOWS__
+ void* event;
+#else
+ uint32 sema; // for OS X
+#endif
+};
+struct Usema
+{
+ uint32 u;
+ uint32 k;
+};
+union Note
+{
+ struct { // Linux
+ uint32 state;
+ };
+ struct { // Windows
+ Lock lock;
+ };
+ struct { // OS X
+ int32 wakeup;
+ Usema sema;
+ };
+};
+struct String
+{
+ byte* str;
+ int32 len;
+};
+struct Iface
+{
+ Itab* tab;
+ void* data;
+};
+struct Eface
+{
+ Type* type;
+ void* data;
+};
+struct Complex64
+{
+ float32 real;
+ float32 imag;
+};
+struct Complex128
+{
+ float64 real;
+ float64 imag;
+};
+
+struct Slice
+{ // must not move anything
+ byte* array; // actual data
+ uint32 len; // number of elements
+ uint32 cap; // allocated number of elements
+};
+struct Gobuf
+{
+ // The offsets of these fields are known to (hard-coded in) libmach.
+ byte* sp;
+ byte* pc;
+ G* g;
+};
+struct G
+{
+ byte* stackguard; // cannot move - also known to linker, libmach, libcgo
+ byte* stackbase; // cannot move - also known to libmach, libcgo
+ Defer* defer;
+ Panic* panic;
+ Gobuf sched;
+ byte* gcstack; // if status==Gsyscall, gcstack = stackbase to use during gc
+ byte* gcsp; // if status==Gsyscall, gcsp = sched.sp to use during gc
+ byte* gcguard; // if status==Gsyscall, gcguard = stackguard to use during gc
+ byte* stack0;
+ byte* entry; // initial function
+ G* alllink; // on allg
+ void* param; // passed parameter on wakeup
+ int16 status;
+ int32 goid;
+ uint32 selgen; // valid sudog pointer
+ G* schedlink;
+ bool readyonstop;
+ bool ispanic;
+ M* m; // for debuggers, but offset not hard-coded
+ M* lockedm;
+ M* idlem;
+ int32 sig;
+ uintptr sigcode0;
+ uintptr sigcode1;
+ uintptr sigpc;
+ uintptr gopc; // pc of go statement that created this goroutine
+};
+struct M
+{
+ // The offsets of these fields are known to (hard-coded in) libmach.
+ G* g0; // goroutine with scheduling stack
+ void (*morepc)(void);
+ void* moreargp; // argument pointer for more stack
+ Gobuf morebuf; // gobuf arg to morestack
+
+ // Fields not known to debuggers.
+ uint32 moreframesize; // size arguments to morestack
+ uint32 moreargsize;
+ uintptr cret; // return value from C
+ uint64 procid; // for debuggers, but offset not hard-coded
+ G* gsignal; // signal-handling G
+ uint32 tls[8]; // thread-local storage (for 386 extern register)
+ G* curg; // current running goroutine
+ int32 id;
+ int32 mallocing;
+ int32 gcing;
+ int32 locks;
+ int32 nomemprof;
+ int32 waitnextg;
+ int32 dying;
+ int32 profilehz;
+ uint32 fastrand;
+ uint64 ncgocall;
+ Note havenextg;
+ G* nextg;
+ M* alllink; // on allm
+ M* schedlink;
+ uint32 machport; // Return address for Mach IPC (OS X)
+ MCache *mcache;
+ FixAlloc *stackalloc;
+ G* lockedg;
+ G* idleg;
+ uint32 freglo[16]; // D[i] lsb and F[i]
+ uint32 freghi[16]; // D[i] msb and F[i+16]
+ uint32 fflag; // floating point compare flags
+#ifdef __WINDOWS__
+#ifdef _64BIT
+ void* gostack;
+#endif
+#endif
+};
+
+struct Stktop
+{
+ // The offsets of these fields are known to (hard-coded in) libmach.
+ uint8* stackguard;
+ uint8* stackbase;
+ Gobuf gobuf;
+ uint32 argsize;
+
+ uint8* argp; // pointer to arguments in old frame
+ uintptr free; // if free>0, call stackfree using free as size
+ bool panic; // is this frame the top of a panic?
+};
+struct Alg
+{
+ uintptr (*hash)(uint32, void*);
+ uint32 (*equal)(uint32, void*, void*);
+ void (*print)(uint32, void*);
+ void (*copy)(uint32, void*, void*);
+};
+struct SigTab
+{
+ int32 flags;
+ int8 *name;
+};
+enum
+{
+ SigCatch = 1<<0,
+ SigIgnore = 1<<1,
+ SigRestart = 1<<2,
+ SigQueue = 1<<3,
+ SigPanic = 1<<4,
+};
+
+// NOTE(rsc): keep in sync with extern.go:/type.Func.
+// Eventually, the loaded symbol table should be closer to this form.
+struct Func
+{
+ String name;
+ String type; // go type string
+ String src; // src file name
+ Slice pcln; // pc/ln tab for this func
+ uintptr entry; // entry pc
+ uintptr pc0; // starting pc, ln for table
+ int32 ln0;
+ int32 frame; // stack frame size
+ int32 args; // number of 32-bit in/out args
+ int32 locals; // number of 32-bit locals
+};
+
+#ifdef __WINDOWS__
+enum {
+ Windows = 1
+};
+#else
+enum {
+ Windows = 0
+};
+#endif
+
+/*
+ * defined macros
+ * you need super-gopher-guru privilege
+ * to add this list.
+ */
+#define nelem(x) (sizeof(x)/sizeof((x)[0]))
+#define nil ((void*)0)
+#define offsetof(s,m) (uint32)(&(((s*)0)->m))
+
+/*
+ * known to compiler
+ */
+enum
+{
+ AMEM,
+ ANOEQ,
+ ASTRING,
+ AINTER,
+ ANILINTER,
+ ASLICE,
+ AMEM8,
+ AMEM16,
+ AMEM32,
+ AMEM64,
+ AMEM128,
+ ANOEQ8,
+ ANOEQ16,
+ ANOEQ32,
+ ANOEQ64,
+ ANOEQ128,
+ Amax
+};
+
+
+enum {
+ Structrnd = sizeof(uintptr)
+};
+
+/*
+ * deferred subroutine calls
+ */
+struct Defer
+{
+ int32 siz;
+ byte* argp; // where args were copied from
+ byte* pc;
+ byte* fn;
+ Defer* link;
+ byte args[8]; // padded to actual size
+};
+
+/*
+ * panics
+ */
+struct Panic
+{
+ Eface arg; // argument to panic
+ byte* stackbase; // g->stackbase in panic
+ Panic* link; // link to earlier panic
+ bool recovered; // whether this panic is over
+};
+
+/*
+ * external data
+ */
+extern Alg runtime·algarray[Amax];
+extern String runtime·emptystring;
+G* runtime·allg;
+M* runtime·allm;
+extern int32 runtime·gomaxprocs;
+extern bool runtime·singleproc;
+extern uint32 runtime·panicking;
+extern int32 runtime·gcwaiting; // gc is waiting to run
+int8* runtime·goos;
+extern bool runtime·iscgo;
+extern void (*runtime·destroylock)(Lock*);
+
+/*
+ * common functions and data
+ */
+int32 runtime·strcmp(byte*, byte*);
+byte* runtime·strstr(byte*, byte*);
+int32 runtime·findnull(byte*);
+int32 runtime·findnullw(uint16*);
+void runtime·dump(byte*, int32);
+int32 runtime·runetochar(byte*, int32);
+int32 runtime·charntorune(int32*, uint8*, int32);
+
+/*
+ * very low level c-called
+ */
+#define FLUSH(x) USED(x)
+
+void runtime·gogo(Gobuf*, uintptr);
+void runtime·gogocall(Gobuf*, void(*)(void));
+void runtime·gosave(Gobuf*);
+void runtime·lessstack(void);
+void runtime·goargs(void);
+void runtime·goenvs(void);
+void runtime·goenvs_unix(void);
+void* runtime·getu(void);
+void runtime·throw(int8*);
+void runtime·panicstring(int8*);
+uint32 runtime·rnd(uint32, uint32);
+void runtime·prints(int8*);
+void runtime·printf(int8*, ...);
+byte* runtime·mchr(byte*, byte, byte*);
+int32 runtime·mcmp(byte*, byte*, uint32);
+void runtime·memmove(void*, void*, uint32);
+void* runtime·mal(uintptr);
+String runtime·catstring(String, String);
+String runtime·gostring(byte*);
+String runtime·gostringn(byte*, int32);
+Slice runtime·gobytes(byte*, int32);
+String runtime·gostringnocopy(byte*);
+String runtime·gostringw(uint16*);
+void runtime·initsig(int32);
+int32 runtime·gotraceback(void);
+void runtime·traceback(uint8 *pc, uint8 *sp, uint8 *lr, G* gp);
+void runtime·tracebackothers(G*);
+int32 runtime·write(int32, void*, int32);
+int32 runtime·mincore(void*, uintptr, byte*);
+bool runtime·cas(uint32*, uint32, uint32);
+bool runtime·casp(void**, void*, void*);
+// Don't confuse with XADD x86 instruction,
+// this one is actually 'addx', that is, add-and-fetch.
+uint32 runtime·xadd(uint32 volatile*, int32);
+uint32 runtime·xchg(uint32 volatile*, uint32);
+uint32 runtime·atomicload(uint32 volatile*);
+void runtime·atomicstore(uint32 volatile*, uint32);
+void* runtime·atomicloadp(void* volatile*);
+void runtime·atomicstorep(void* volatile*, void*);
+void runtime·jmpdefer(byte*, void*);
+void runtime·exit1(int32);
+void runtime·ready(G*);
+byte* runtime·getenv(int8*);
+int32 runtime·atoi(byte*);
+void runtime·newosproc(M *m, G *g, void *stk, void (*fn)(void));
+void runtime·signalstack(byte*, int32);
+G* runtime·malg(int32);
+void runtime·minit(void);
+Func* runtime·findfunc(uintptr);
+int32 runtime·funcline(Func*, uintptr);
+void* runtime·stackalloc(uint32);
+void runtime·stackfree(void*, uintptr);
+MCache* runtime·allocmcache(void);
+void runtime·mallocinit(void);
+bool runtime·ifaceeq_c(Iface, Iface);
+bool runtime·efaceeq_c(Eface, Eface);
+uintptr runtime·ifacehash(Iface);
+uintptr runtime·efacehash(Eface);
+uintptr runtime·nohash(uint32, void*);
+uint32 runtime·noequal(uint32, void*, void*);
+void* runtime·malloc(uintptr size);
+void runtime·free(void *v);
+void runtime·addfinalizer(void*, void(*fn)(void*), int32);
+void runtime·walkfintab(void (*fn)(void*));
+void runtime·runpanic(Panic*);
+void* runtime·getcallersp(void*);
+int32 runtime·mcount(void);
+void runtime·mcall(void(*)(G*));
+uint32 runtime·fastrand1(void);
+
+void runtime·exit(int32);
+void runtime·breakpoint(void);
+void runtime·gosched(void);
+void runtime·goexit(void);
+void runtime·asmcgocall(void (*fn)(void*), void*);
+void runtime·entersyscall(void);
+void runtime·exitsyscall(void);
+G* runtime·newproc1(byte*, byte*, int32, int32, void*);
+void runtime·siginit(void);
+bool runtime·sigsend(int32 sig);
+void runtime·gettime(int64*, int32*);
+int32 runtime·callers(int32, uintptr*, int32);
+int32 runtime·gentraceback(byte*, byte*, byte*, G*, int32, uintptr*, int32);
+int64 runtime·nanotime(void);
+void runtime·dopanic(int32);
+void runtime·startpanic(void);
+void runtime·sigprof(uint8 *pc, uint8 *sp, uint8 *lr, G *gp);
+void runtime·resetcpuprofiler(int32);
+void runtime·setcpuprofilerate(void(*)(uintptr*, int32), int32);
+
+#pragma varargck argpos runtime·printf 1
+#pragma varargck type "d" int32
+#pragma varargck type "d" uint32
+#pragma varargck type "D" int64
+#pragma varargck type "D" uint64
+#pragma varargck type "x" int32
+#pragma varargck type "x" uint32
+#pragma varargck type "X" int64
+#pragma varargck type "X" uint64
+#pragma varargck type "p" void*
+#pragma varargck type "p" uintptr
+#pragma varargck type "s" int8*
+#pragma varargck type "s" uint8*
+#pragma varargck type "S" String
+
+// TODO(rsc): Remove. These are only temporary,
+// for the mark and sweep collector.
+void runtime·stoptheworld(void);
+void runtime·starttheworld(void);
+
+/*
+ * mutual exclusion locks. in the uncontended case,
+ * as fast as spin locks (just a few user-level instructions),
+ * but on the contention path they sleep in the kernel.
+ * a zeroed Lock is unlocked (no need to initialize each lock).
+ */
+void runtime·lock(Lock*);
+void runtime·unlock(Lock*);
+
+/*
+ * sleep and wakeup on one-time events.
+ * before any calls to notesleep or notewakeup,
+ * must call noteclear to initialize the Note.
+ * then, exactly one thread can call notesleep
+ * and exactly one thread can call notewakeup (once).
+ * once notewakeup has been called, the notesleep
+ * will return. future notesleep will return immediately.
+ * subsequent noteclear must be called only after
+ * previous notesleep has returned, e.g. it's disallowed
+ * to call noteclear straight after notewakeup.
+ */
+void runtime·noteclear(Note*);
+void runtime·notesleep(Note*);
+void runtime·notewakeup(Note*);
+
+/*
+ * This is consistent across Linux and BSD.
+ * If a new OS is added that is different, move this to
+ * $GOOS/$GOARCH/defs.h.
+ */
+#define EACCES 13
+
+/*
+ * low level C-called
+ */
+uint8* runtime·mmap(byte*, uintptr, int32, int32, int32, uint32);
+void runtime·munmap(uint8*, uintptr);
+void runtime·memclr(byte*, uintptr);
+void runtime·setcallerpc(void*, void*);
+void* runtime·getcallerpc(void*);
+
+/*
+ * runtime go-called
+ */
+void runtime·printbool(bool);
+void runtime·printfloat(float64);
+void runtime·printint(int64);
+void runtime·printiface(Iface);
+void runtime·printeface(Eface);
+void runtime·printstring(String);
+void runtime·printpc(void*);
+void runtime·printpointer(void*);
+void runtime·printuint(uint64);
+void runtime·printhex(uint64);
+void runtime·printslice(Slice);
+void runtime·printcomplex(Complex128);
+void reflect·call(byte*, byte*, uint32);
+void runtime·panic(Eface);
+void runtime·panicindex(void);
+void runtime·panicslice(void);
+
+/*
+ * runtime c-called (but written in Go)
+ */
+void runtime·newError(String, Eface*);
+void runtime·printany(Eface);
+void runtime·newTypeAssertionError(Type*, Type*, Type*, String*, String*, String*, String*, Eface*);
+void runtime·newErrorString(String, Eface*);
+void runtime·fadd64c(uint64, uint64, uint64*);
+void runtime·fsub64c(uint64, uint64, uint64*);
+void runtime·fmul64c(uint64, uint64, uint64*);
+void runtime·fdiv64c(uint64, uint64, uint64*);
+void runtime·fneg64c(uint64, uint64*);
+void runtime·f32to64c(uint32, uint64*);
+void runtime·f64to32c(uint64, uint32*);
+void runtime·fcmp64c(uint64, uint64, int32*, bool*);
+void runtime·fintto64c(int64, uint64*);
+void runtime·f64tointc(uint64, int64*, bool*);
+
+/*
+ * wrapped for go users
+ */
+float64 runtime·Inf(int32 sign);
+float64 runtime·NaN(void);
+float32 runtime·float32frombits(uint32 i);
+uint32 runtime·float32tobits(float32 f);
+float64 runtime·float64frombits(uint64 i);
+uint64 runtime·float64tobits(float64 f);
+float64 runtime·frexp(float64 d, int32 *ep);
+bool runtime·isInf(float64 f, int32 sign);
+bool runtime·isNaN(float64 f);
+float64 runtime·ldexp(float64 d, int32 e);
+float64 runtime·modf(float64 d, float64 *ip);
+void runtime·semacquire(uint32*);
+void runtime·semrelease(uint32*);
+String runtime·signame(int32 sig);
+int32 runtime·gomaxprocsfunc(int32 n);
+void runtime·procyield(uint32);
+void runtime·osyield(void);
+
+void runtime·mapassign(MapType*, Hmap*, byte*, byte*);
+void runtime·mapaccess(MapType*, Hmap*, byte*, byte*, bool*);
+void runtime·mapiternext(struct hash_iter*);
+bool runtime·mapiterkey(struct hash_iter*, void*);
+void runtime·mapiterkeyvalue(struct hash_iter*, void*, void*);
+Hmap* runtime·makemap_c(MapType*, int64);
+
+Hchan* runtime·makechan_c(ChanType*, int64);
+void runtime·chansend(ChanType*, Hchan*, void*, bool*);
+void runtime·chanrecv(ChanType*, Hchan*, void*, bool*, bool*);
+int32 runtime·chanlen(Hchan*);
+int32 runtime·chancap(Hchan*);
+
+void runtime·ifaceE2I(struct InterfaceType*, Eface, Iface*);
+
diff --git a/src/pkg/runtime/runtime1.goc b/src/pkg/runtime/runtime1.goc
new file mode 100644
index 000000000..da2d0c572
--- /dev/null
+++ b/src/pkg/runtime/runtime1.goc
@@ -0,0 +1,10 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+#include "runtime.h"
+
+func GOMAXPROCS(n int32) (ret int32) {
+ ret = runtime·gomaxprocsfunc(n);
+}
diff --git a/src/pkg/runtime/sema.goc b/src/pkg/runtime/sema.goc
new file mode 100644
index 000000000..ae84351ed
--- /dev/null
+++ b/src/pkg/runtime/sema.goc
@@ -0,0 +1,180 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Semaphore implementation exposed to Go.
+// Intended use is provide a sleep and wakeup
+// primitive that can be used in the contended case
+// of other synchronization primitives.
+// Thus it targets the same goal as Linux's futex,
+// but it has much simpler semantics.
+//
+// That is, don't think of these as semaphores.
+// Think of them as a way to implement sleep and wakeup
+// such that every sleep is paired with a single wakeup,
+// even if, due to races, the wakeup happens before the sleep.
+//
+// See Mullender and Cox, ``Semaphores in Plan 9,''
+// http://swtch.com/semaphore.pdf
+
+package runtime
+#include "runtime.h"
+
+typedef struct Sema Sema;
+struct Sema
+{
+ uint32 volatile *addr;
+ G *g;
+ Sema *prev;
+ Sema *next;
+};
+
+typedef struct SemaRoot SemaRoot;
+struct SemaRoot
+{
+ Lock;
+ Sema *head;
+ Sema *tail;
+ // Number of waiters. Read w/o the lock.
+ uint32 volatile nwait;
+};
+
+// Prime to not correlate with any user patterns.
+#define SEMTABLESZ 251
+
+static union
+{
+ SemaRoot;
+ // Modern processors tend to have 64-byte cache lines,
+ // potentially with 128-byte effective cache line size for reading.
+ // While there are hypothetical architectures
+ // with 16-4096 byte cache lines, 128 looks like a good compromise.
+ uint8 pad[128];
+} semtable[SEMTABLESZ];
+
+static SemaRoot*
+semroot(uint32 *addr)
+{
+ return &semtable[((uintptr)addr >> 3) % SEMTABLESZ];
+}
+
+static void
+semqueue(SemaRoot *root, uint32 volatile *addr, Sema *s)
+{
+ s->g = g;
+ s->addr = addr;
+ s->next = nil;
+ s->prev = root->tail;
+ if(root->tail)
+ root->tail->next = s;
+ else
+ root->head = s;
+ root->tail = s;
+}
+
+static void
+semdequeue(SemaRoot *root, Sema *s)
+{
+ if(s->next)
+ s->next->prev = s->prev;
+ else
+ root->tail = s->prev;
+ if(s->prev)
+ s->prev->next = s->next;
+ else
+ root->head = s->next;
+ s->prev = nil;
+ s->next = nil;
+}
+
+static int32
+cansemacquire(uint32 *addr)
+{
+ uint32 v;
+
+ while((v = runtime·atomicload(addr)) > 0)
+ if(runtime·cas(addr, v, v-1))
+ return 1;
+ return 0;
+}
+
+void
+runtime·semacquire(uint32 volatile *addr)
+{
+ Sema s;
+ SemaRoot *root;
+
+ // Easy case.
+ if(cansemacquire(addr))
+ return;
+
+ // Harder case:
+ // increment waiter count
+ // try cansemacquire one more time, return if succeeded
+ // enqueue itself as a waiter
+ // sleep
+ // (waiter descriptor is dequeued by signaler)
+ root = semroot(addr);
+ for(;;) {
+ runtime·lock(root);
+ // Add ourselves to nwait to disable "easy case" in semrelease.
+ runtime·xadd(&root->nwait, 1);
+ // Check cansemacquire to avoid missed wakeup.
+ if(cansemacquire(addr)) {
+ runtime·xadd(&root->nwait, -1);
+ runtime·unlock(root);
+ return;
+ }
+ // Any semrelease after the cansemacquire knows we're waiting
+ // (we set nwait above), so go to sleep.
+ semqueue(root, addr, &s);
+ g->status = Gwaiting;
+ runtime·unlock(root);
+ runtime·gosched();
+ if(cansemacquire(addr))
+ return;
+ }
+}
+
+void
+runtime·semrelease(uint32 volatile *addr)
+{
+ Sema *s;
+ SemaRoot *root;
+
+ root = semroot(addr);
+ runtime·xadd(addr, 1);
+
+ // Easy case: no waiters?
+ // This check must happen after the xadd, to avoid a missed wakeup
+ // (see loop in semacquire).
+ if(runtime·atomicload(&root->nwait) == 0)
+ return;
+
+ // Harder case: search for a waiter and wake it.
+ runtime·lock(root);
+ if(runtime·atomicload(&root->nwait) == 0) {
+ // The count is already consumed by another goroutine,
+ // so no need to wake up another goroutine.
+ runtime·unlock(root);
+ return;
+ }
+ for(s = root->head; s; s = s->next) {
+ if(s->addr == addr) {
+ runtime·xadd(&root->nwait, -1);
+ semdequeue(root, s);
+ break;
+ }
+ }
+ runtime·unlock(root);
+ if(s)
+ runtime·ready(s->g);
+}
+
+func Semacquire(addr *uint32) {
+ runtime·semacquire(addr);
+}
+
+func Semrelease(addr *uint32) {
+ runtime·semrelease(addr);
+}
diff --git a/src/pkg/runtime/sema_test.go b/src/pkg/runtime/sema_test.go
new file mode 100644
index 000000000..d95bb1ec5
--- /dev/null
+++ b/src/pkg/runtime/sema_test.go
@@ -0,0 +1,100 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime_test
+
+import (
+ "runtime"
+ "sync/atomic"
+ "testing"
+)
+
+func BenchmarkSemaUncontended(b *testing.B) {
+ type PaddedSem struct {
+ sem uint32
+ pad [32]uint32
+ }
+ const CallsPerSched = 1000
+ procs := runtime.GOMAXPROCS(-1)
+ N := int32(b.N / CallsPerSched)
+ c := make(chan bool, procs)
+ for p := 0; p < procs; p++ {
+ go func() {
+ sem := new(PaddedSem)
+ for atomic.AddInt32(&N, -1) >= 0 {
+ runtime.Gosched()
+ for g := 0; g < CallsPerSched; g++ {
+ runtime.Semrelease(&sem.sem)
+ runtime.Semacquire(&sem.sem)
+ }
+ }
+ c <- true
+ }()
+ }
+ for p := 0; p < procs; p++ {
+ <-c
+ }
+}
+
+func benchmarkSema(b *testing.B, block, work bool) {
+ const CallsPerSched = 1000
+ const LocalWork = 100
+ procs := runtime.GOMAXPROCS(-1)
+ N := int32(b.N / CallsPerSched)
+ c := make(chan bool, procs)
+ c2 := make(chan bool, procs/2)
+ sem := uint32(0)
+ if block {
+ for p := 0; p < procs/2; p++ {
+ go func() {
+ runtime.Semacquire(&sem)
+ c2 <- true
+ }()
+ }
+ }
+ for p := 0; p < procs; p++ {
+ go func() {
+ foo := 0
+ for atomic.AddInt32(&N, -1) >= 0 {
+ runtime.Gosched()
+ for g := 0; g < CallsPerSched; g++ {
+ runtime.Semrelease(&sem)
+ if work {
+ for i := 0; i < LocalWork; i++ {
+ foo *= 2
+ foo /= 2
+ }
+ }
+ runtime.Semacquire(&sem)
+ }
+ }
+ c <- foo == 42
+ runtime.Semrelease(&sem)
+ }()
+ }
+ if block {
+ for p := 0; p < procs/2; p++ {
+ <-c2
+ }
+ }
+ for p := 0; p < procs; p++ {
+ <-c
+ }
+}
+
+func BenchmarkSemaSyntNonblock(b *testing.B) {
+ benchmarkSema(b, false, false)
+}
+
+func BenchmarkSemaSyntBlock(b *testing.B) {
+ benchmarkSema(b, true, false)
+}
+
+func BenchmarkSemaWorkNonblock(b *testing.B) {
+ benchmarkSema(b, false, true)
+}
+
+func BenchmarkSemaWorkBlock(b *testing.B) {
+ benchmarkSema(b, true, true)
+}
diff --git a/src/pkg/runtime/sig.go b/src/pkg/runtime/sig.go
new file mode 100644
index 000000000..6d560b900
--- /dev/null
+++ b/src/pkg/runtime/sig.go
@@ -0,0 +1,16 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+// Sigrecv returns a bitmask of signals that have arrived since the last call to Sigrecv.
+// It blocks until at least one signal arrives.
+func Sigrecv() uint32
+
+// Signame returns a string describing the signal, or "" if the signal is unknown.
+func Signame(sig int32) string
+
+// Siginit enables receipt of signals via Sigrecv. It should typically
+// be called during initialization.
+func Siginit()
diff --git a/src/pkg/runtime/sigqueue.goc b/src/pkg/runtime/sigqueue.goc
new file mode 100644
index 000000000..504590a54
--- /dev/null
+++ b/src/pkg/runtime/sigqueue.goc
@@ -0,0 +1,99 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements runtime support for signal handling.
+//
+// Most synchronization primitives are not available from
+// the signal handler (it cannot block and cannot use locks)
+// so the handler communicates with a processing goroutine
+// via struct sig, below.
+//
+// Ownership for sig.Note passes back and forth between
+// the signal handler and the signal goroutine in rounds.
+// The initial state is that sig.note is cleared (setup by siginit).
+// At the beginning of each round, mask == 0.
+// The round goes through three stages:
+//
+// (In parallel)
+// 1a) One or more signals arrive and are handled
+// by sigsend using cas to set bits in sig.mask.
+// The handler that changes sig.mask from zero to non-zero
+// calls notewakeup(&sig).
+// 1b) Sigrecv calls notesleep(&sig) to wait for the wakeup.
+//
+// 2) Having received the wakeup, sigrecv knows that sigsend
+// will not send another wakeup, so it can noteclear(&sig)
+// to prepare for the next round. (Sigsend may still be adding
+// signals to sig.mask at this point, which is fine.)
+//
+// 3) Sigrecv uses cas to grab the current sig.mask and zero it,
+// triggering the next round.
+//
+// The signal handler takes ownership of the note by atomically
+// changing mask from a zero to non-zero value. It gives up
+// ownership by calling notewakeup. The signal goroutine takes
+// ownership by returning from notesleep (caused by the notewakeup)
+// and gives up ownership by clearing mask.
+
+package runtime
+#include "runtime.h"
+#include "defs.h"
+
+static struct {
+ Note;
+ uint32 mask;
+ bool inuse;
+} sig;
+
+void
+runtime·siginit(void)
+{
+ runtime·noteclear(&sig);
+}
+
+// Called from sighandler to send a signal back out of the signal handling thread.
+bool
+runtime·sigsend(int32 s)
+{
+ uint32 bit, mask;
+
+ if(!sig.inuse)
+ return false;
+ bit = 1 << s;
+ for(;;) {
+ mask = sig.mask;
+ if(mask & bit)
+ break; // signal already in queue
+ if(runtime·cas(&sig.mask, mask, mask|bit)) {
+ // Added to queue.
+ // Only send a wakeup for the first signal in each round.
+ if(mask == 0)
+ runtime·notewakeup(&sig);
+ break;
+ }
+ }
+ return true;
+}
+
+// Called to receive a bitmask of queued signals.
+func Sigrecv() (m uint32) {
+ runtime·entersyscall();
+ runtime·notesleep(&sig);
+ runtime·exitsyscall();
+ runtime·noteclear(&sig);
+ for(;;) {
+ m = sig.mask;
+ if(runtime·cas(&sig.mask, m, 0))
+ break;
+ }
+}
+
+func Signame(sig int32) (name String) {
+ name = runtime·signame(sig);
+}
+
+func Siginit() {
+ runtime·initsig(SigQueue);
+ sig.inuse = true; // enable reception of signals; cannot disable
+}
diff --git a/src/pkg/runtime/slice.c b/src/pkg/runtime/slice.c
new file mode 100644
index 000000000..70534279b
--- /dev/null
+++ b/src/pkg/runtime/slice.c
@@ -0,0 +1,330 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "runtime.h"
+#include "type.h"
+#include "malloc.h"
+
+static int32 debug = 0;
+
+static void makeslice1(SliceType*, int32, int32, Slice*);
+static void growslice1(SliceType*, Slice, int32, Slice *);
+static void appendslice1(SliceType*, Slice, Slice, Slice*);
+ void runtime·slicecopy(Slice to, Slice fm, uintptr width, int32 ret);
+
+// see also unsafe·NewArray
+// makeslice(typ *Type, len, cap int64) (ary []any);
+void
+runtime·makeslice(SliceType *t, int64 len, int64 cap, Slice ret)
+{
+ if(len < 0 || (int32)len != len)
+ runtime·panicstring("makeslice: len out of range");
+ if(cap < len || (int32)cap != cap || t->elem->size > 0 && cap > ((uintptr)-1) / t->elem->size)
+ runtime·panicstring("makeslice: cap out of range");
+
+ makeslice1(t, len, cap, &ret);
+
+ if(debug) {
+ runtime·printf("makeslice(%S, %D, %D); ret=",
+ *t->string, len, cap);
+ runtime·printslice(ret);
+ }
+}
+
+static void
+makeslice1(SliceType *t, int32 len, int32 cap, Slice *ret)
+{
+ uintptr size;
+
+ size = cap*t->elem->size;
+
+ ret->len = len;
+ ret->cap = cap;
+
+ if((t->elem->kind&KindNoPointers))
+ ret->array = runtime·mallocgc(size, FlagNoPointers, 1, 1);
+ else
+ ret->array = runtime·mal(size);
+}
+
+// appendslice(type *Type, x, y, []T) []T
+void
+runtime·appendslice(SliceType *t, Slice x, Slice y, Slice ret)
+{
+ appendslice1(t, x, y, &ret);
+}
+
+static void
+appendslice1(SliceType *t, Slice x, Slice y, Slice *ret)
+{
+ int32 m;
+ uintptr w;
+
+ m = x.len+y.len;
+
+ if(m < x.len)
+ runtime·throw("append: slice overflow");
+
+ if(m > x.cap)
+ growslice1(t, x, m, ret);
+ else
+ *ret = x;
+
+ w = t->elem->size;
+ runtime·memmove(ret->array + ret->len*w, y.array, y.len*w);
+ ret->len += y.len;
+}
+
+// growslice(type *Type, x, []T, n int64) []T
+void
+runtime·growslice(SliceType *t, Slice old, int64 n, Slice ret)
+{
+ int64 cap;
+
+ if(n < 1)
+ runtime·panicstring("growslice: invalid n");
+
+ cap = old.cap + n;
+
+ if((int32)cap != cap || cap > ((uintptr)-1) / t->elem->size)
+ runtime·panicstring("growslice: cap out of range");
+
+ growslice1(t, old, cap, &ret);
+
+ FLUSH(&ret);
+
+ if(debug) {
+ runtime·printf("growslice(%S,", *t->string);
+ runtime·printslice(old);
+ runtime·printf(", new cap=%D) =", cap);
+ runtime·printslice(ret);
+ }
+}
+
+static void
+growslice1(SliceType *t, Slice x, int32 newcap, Slice *ret)
+{
+ int32 m;
+
+ m = x.cap;
+ if(m == 0)
+ m = newcap;
+ else {
+ do {
+ if(x.len < 1024)
+ m += m;
+ else
+ m += m/4;
+ } while(m < newcap);
+ }
+ makeslice1(t, x.len, m, ret);
+ runtime·memmove(ret->array, x.array, ret->len * t->elem->size);
+}
+
+// sliceslice(old []any, lb uint64, hb uint64, width uint64) (ary []any);
+void
+runtime·sliceslice(Slice old, uint64 lb, uint64 hb, uint64 width, Slice ret)
+{
+ if(hb > old.cap || lb > hb) {
+ if(debug) {
+ runtime·prints("runtime.sliceslice: old=");
+ runtime·printslice(old);
+ runtime·prints("; lb=");
+ runtime·printint(lb);
+ runtime·prints("; hb=");
+ runtime·printint(hb);
+ runtime·prints("; width=");
+ runtime·printint(width);
+ runtime·prints("\n");
+
+ runtime·prints("oldarray: nel=");
+ runtime·printint(old.len);
+ runtime·prints("; cap=");
+ runtime·printint(old.cap);
+ runtime·prints("\n");
+ }
+ runtime·panicslice();
+ }
+
+ // new array is inside old array
+ ret.len = hb - lb;
+ ret.cap = old.cap - lb;
+ ret.array = old.array + lb*width;
+
+ FLUSH(&ret);
+
+ if(debug) {
+ runtime·prints("runtime.sliceslice: old=");
+ runtime·printslice(old);
+ runtime·prints("; lb=");
+ runtime·printint(lb);
+ runtime·prints("; hb=");
+ runtime·printint(hb);
+ runtime·prints("; width=");
+ runtime·printint(width);
+ runtime·prints("; ret=");
+ runtime·printslice(ret);
+ runtime·prints("\n");
+ }
+}
+
+// sliceslice1(old []any, lb uint64, width uint64) (ary []any);
+void
+runtime·sliceslice1(Slice old, uint64 lb, uint64 width, Slice ret)
+{
+ if(lb > old.len) {
+ if(debug) {
+ runtime·prints("runtime.sliceslice: old=");
+ runtime·printslice(old);
+ runtime·prints("; lb=");
+ runtime·printint(lb);
+ runtime·prints("; width=");
+ runtime·printint(width);
+ runtime·prints("\n");
+
+ runtime·prints("oldarray: nel=");
+ runtime·printint(old.len);
+ runtime·prints("; cap=");
+ runtime·printint(old.cap);
+ runtime·prints("\n");
+ }
+ runtime·panicslice();
+ }
+
+ // new array is inside old array
+ ret.len = old.len - lb;
+ ret.cap = old.cap - lb;
+ ret.array = old.array + lb*width;
+
+ FLUSH(&ret);
+
+ if(debug) {
+ runtime·prints("runtime.sliceslice: old=");
+ runtime·printslice(old);
+ runtime·prints("; lb=");
+ runtime·printint(lb);
+ runtime·prints("; width=");
+ runtime·printint(width);
+ runtime·prints("; ret=");
+ runtime·printslice(ret);
+ runtime·prints("\n");
+ }
+}
+
+// slicearray(old *any, nel uint64, lb uint64, hb uint64, width uint64) (ary []any);
+void
+runtime·slicearray(byte* old, uint64 nel, uint64 lb, uint64 hb, uint64 width, Slice ret)
+{
+ if(nel > 0 && old == nil) {
+ // crash if old == nil.
+ // could give a better message
+ // but this is consistent with all the in-line checks
+ // that the compiler inserts for other uses.
+ *old = 0;
+ }
+
+ if(hb > nel || lb > hb) {
+ if(debug) {
+ runtime·prints("runtime.slicearray: old=");
+ runtime·printpointer(old);
+ runtime·prints("; nel=");
+ runtime·printint(nel);
+ runtime·prints("; lb=");
+ runtime·printint(lb);
+ runtime·prints("; hb=");
+ runtime·printint(hb);
+ runtime·prints("; width=");
+ runtime·printint(width);
+ runtime·prints("\n");
+ }
+ runtime·panicslice();
+ }
+
+ // new array is inside old array
+ ret.len = hb-lb;
+ ret.cap = nel-lb;
+ ret.array = old + lb*width;
+
+ FLUSH(&ret);
+
+ if(debug) {
+ runtime·prints("runtime.slicearray: old=");
+ runtime·printpointer(old);
+ runtime·prints("; nel=");
+ runtime·printint(nel);
+ runtime·prints("; lb=");
+ runtime·printint(lb);
+ runtime·prints("; hb=");
+ runtime·printint(hb);
+ runtime·prints("; width=");
+ runtime·printint(width);
+ runtime·prints("; ret=");
+ runtime·printslice(ret);
+ runtime·prints("\n");
+ }
+}
+
+// slicecopy(to any, fr any, wid uint32) int
+void
+runtime·slicecopy(Slice to, Slice fm, uintptr width, int32 ret)
+{
+ if(fm.len == 0 || to.len == 0 || width == 0) {
+ ret = 0;
+ goto out;
+ }
+
+ ret = fm.len;
+ if(to.len < ret)
+ ret = to.len;
+
+ if(ret == 1 && width == 1) { // common case worth about 2x to do here
+ *to.array = *fm.array; // known to be a byte pointer
+ } else {
+ runtime·memmove(to.array, fm.array, ret*width);
+ }
+
+out:
+ FLUSH(&ret);
+
+ if(debug) {
+ runtime·prints("main·copy: to=");
+ runtime·printslice(to);
+ runtime·prints("; fm=");
+ runtime·printslice(fm);
+ runtime·prints("; width=");
+ runtime·printint(width);
+ runtime·prints("; ret=");
+ runtime·printint(ret);
+ runtime·prints("\n");
+ }
+}
+
+void
+runtime·slicestringcopy(Slice to, String fm, int32 ret)
+{
+ if(fm.len == 0 || to.len == 0) {
+ ret = 0;
+ goto out;
+ }
+
+ ret = fm.len;
+ if(to.len < ret)
+ ret = to.len;
+
+ runtime·memmove(to.array, fm.str, ret);
+
+out:
+ FLUSH(&ret);
+}
+
+void
+runtime·printslice(Slice a)
+{
+ runtime·prints("[");
+ runtime·printint(a.len);
+ runtime·prints("/");
+ runtime·printint(a.cap);
+ runtime·prints("]");
+ runtime·printpointer(a.array);
+}
diff --git a/src/pkg/runtime/softfloat64.go b/src/pkg/runtime/softfloat64.go
new file mode 100644
index 000000000..e0c3b7b73
--- /dev/null
+++ b/src/pkg/runtime/softfloat64.go
@@ -0,0 +1,498 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Software IEEE754 64-bit floating point.
+// Only referred to (and thus linked in) by arm port
+// and by gotest in this directory.
+
+package runtime
+
+const (
+ mantbits64 uint = 52
+ expbits64 uint = 11
+ bias64 = -1<<(expbits64-1) + 1
+
+ nan64 uint64 = (1<<expbits64-1)<<mantbits64 + 1
+ inf64 uint64 = (1<<expbits64 - 1) << mantbits64
+ neg64 uint64 = 1 << (expbits64 + mantbits64)
+
+ mantbits32 uint = 23
+ expbits32 uint = 8
+ bias32 = -1<<(expbits32-1) + 1
+
+ nan32 uint32 = (1<<expbits32-1)<<mantbits32 + 1
+ inf32 uint32 = (1<<expbits32 - 1) << mantbits32
+ neg32 uint32 = 1 << (expbits32 + mantbits32)
+)
+
+func funpack64(f uint64) (sign, mant uint64, exp int, inf, nan bool) {
+ sign = f & (1 << (mantbits64 + expbits64))
+ mant = f & (1<<mantbits64 - 1)
+ exp = int(f>>mantbits64) & (1<<expbits64 - 1)
+
+ switch exp {
+ case 1<<expbits64 - 1:
+ if mant != 0 {
+ nan = true
+ return
+ }
+ inf = true
+ return
+
+ case 0:
+ // denormalized
+ if mant != 0 {
+ exp += bias64 + 1
+ for mant < 1<<mantbits64 {
+ mant <<= 1
+ exp--
+ }
+ }
+
+ default:
+ // add implicit top bit
+ mant |= 1 << mantbits64
+ exp += bias64
+ }
+ return
+}
+
+func funpack32(f uint32) (sign, mant uint32, exp int, inf, nan bool) {
+ sign = f & (1 << (mantbits32 + expbits32))
+ mant = f & (1<<mantbits32 - 1)
+ exp = int(f>>mantbits32) & (1<<expbits32 - 1)
+
+ switch exp {
+ case 1<<expbits32 - 1:
+ if mant != 0 {
+ nan = true
+ return
+ }
+ inf = true
+ return
+
+ case 0:
+ // denormalized
+ if mant != 0 {
+ exp += bias32 + 1
+ for mant < 1<<mantbits32 {
+ mant <<= 1
+ exp--
+ }
+ }
+
+ default:
+ // add implicit top bit
+ mant |= 1 << mantbits32
+ exp += bias32
+ }
+ return
+}
+
+func fpack64(sign, mant uint64, exp int, trunc uint64) uint64 {
+ mant0, exp0, trunc0 := mant, exp, trunc
+ if mant == 0 {
+ return sign
+ }
+ for mant < 1<<mantbits64 {
+ mant <<= 1
+ exp--
+ }
+ for mant >= 4<<mantbits64 {
+ trunc |= mant & 1
+ mant >>= 1
+ exp++
+ }
+ if mant >= 2<<mantbits64 {
+ if mant&1 != 0 && (trunc != 0 || mant&2 != 0) {
+ mant++
+ if mant >= 4<<mantbits64 {
+ mant >>= 1
+ exp++
+ }
+ }
+ mant >>= 1
+ exp++
+ }
+ if exp >= 1<<expbits64-1+bias64 {
+ return sign ^ inf64
+ }
+ if exp < bias64+1 {
+ if exp < bias64-int(mantbits64) {
+ return sign | 0
+ }
+ // repeat expecting denormal
+ mant, exp, trunc = mant0, exp0, trunc0
+ for exp < bias64 {
+ trunc |= mant & 1
+ mant >>= 1
+ exp++
+ }
+ if mant&1 != 0 && (trunc != 0 || mant&2 != 0) {
+ mant++
+ }
+ mant >>= 1
+ exp++
+ if mant < 1<<mantbits64 {
+ return sign | mant
+ }
+ }
+ return sign | uint64(exp-bias64)<<mantbits64 | mant&(1<<mantbits64-1)
+}
+
+func fpack32(sign, mant uint32, exp int, trunc uint32) uint32 {
+ mant0, exp0, trunc0 := mant, exp, trunc
+ if mant == 0 {
+ return sign
+ }
+ for mant < 1<<mantbits32 {
+ mant <<= 1
+ exp--
+ }
+ for mant >= 4<<mantbits32 {
+ trunc |= mant & 1
+ mant >>= 1
+ exp++
+ }
+ if mant >= 2<<mantbits32 {
+ if mant&1 != 0 && (trunc != 0 || mant&2 != 0) {
+ mant++
+ if mant >= 4<<mantbits32 {
+ mant >>= 1
+ exp++
+ }
+ }
+ mant >>= 1
+ exp++
+ }
+ if exp >= 1<<expbits32-1+bias32 {
+ return sign ^ inf32
+ }
+ if exp < bias32+1 {
+ if exp < bias32-int(mantbits32) {
+ return sign | 0
+ }
+ // repeat expecting denormal
+ mant, exp, trunc = mant0, exp0, trunc0
+ for exp < bias32 {
+ trunc |= mant & 1
+ mant >>= 1
+ exp++
+ }
+ if mant&1 != 0 && (trunc != 0 || mant&2 != 0) {
+ mant++
+ }
+ mant >>= 1
+ exp++
+ if mant < 1<<mantbits32 {
+ return sign | mant
+ }
+ }
+ return sign | uint32(exp-bias32)<<mantbits32 | mant&(1<<mantbits32-1)
+}
+
+func fadd64(f, g uint64) uint64 {
+ fs, fm, fe, fi, fn := funpack64(f)
+ gs, gm, ge, gi, gn := funpack64(g)
+
+ // Special cases.
+ switch {
+ case fn || gn: // NaN + x or x + NaN = NaN
+ return nan64
+
+ case fi && gi && fs != gs: // +Inf + -Inf or -Inf + +Inf = NaN
+ return nan64
+
+ case fi: // ±Inf + g = ±Inf
+ return f
+
+ case gi: // f + ±Inf = ±Inf
+ return g
+
+ case fm == 0 && gm == 0 && fs != 0 && gs != 0: // -0 + -0 = -0
+ return f
+
+ case fm == 0: // 0 + g = g but 0 + -0 = +0
+ if gm == 0 {
+ g ^= gs
+ }
+ return g
+
+ case gm == 0: // f + 0 = f
+ return f
+
+ }
+
+ if fe < ge || fe == ge && fm < gm {
+ f, g, fs, fm, fe, gs, gm, ge = g, f, gs, gm, ge, fs, fm, fe
+ }
+
+ shift := uint(fe - ge)
+ fm <<= 2
+ gm <<= 2
+ trunc := gm & (1<<shift - 1)
+ gm >>= shift
+ if fs == gs {
+ fm += gm
+ } else {
+ fm -= gm
+ if trunc != 0 {
+ fm--
+ }
+ }
+ if fm == 0 {
+ fs = 0
+ }
+ return fpack64(fs, fm, fe-2, trunc)
+}
+
+func fsub64(f, g uint64) uint64 {
+ return fadd64(f, fneg64(g))
+}
+
+func fneg64(f uint64) uint64 {
+ return f ^ (1 << (mantbits64 + expbits64))
+}
+
+func fmul64(f, g uint64) uint64 {
+ fs, fm, fe, fi, fn := funpack64(f)
+ gs, gm, ge, gi, gn := funpack64(g)
+
+ // Special cases.
+ switch {
+ case fn || gn: // NaN * g or f * NaN = NaN
+ return nan64
+
+ case fi && gi: // Inf * Inf = Inf (with sign adjusted)
+ return f ^ gs
+
+ case fi && gm == 0, fm == 0 && gi: // 0 * Inf = Inf * 0 = NaN
+ return nan64
+
+ case fm == 0: // 0 * x = 0 (with sign adjusted)
+ return f ^ gs
+
+ case gm == 0: // x * 0 = 0 (with sign adjusted)
+ return g ^ fs
+ }
+
+ // 53-bit * 53-bit = 107- or 108-bit
+ lo, hi := mullu(fm, gm)
+ shift := mantbits64 - 1
+ trunc := lo & (1<<shift - 1)
+ mant := hi<<(64-shift) | lo>>shift
+ return fpack64(fs^gs, mant, fe+ge-1, trunc)
+}
+
+func fdiv64(f, g uint64) uint64 {
+ fs, fm, fe, fi, fn := funpack64(f)
+ gs, gm, ge, gi, gn := funpack64(g)
+
+ // Special cases.
+ switch {
+ case fn || gn: // NaN / g = f / NaN = NaN
+ return nan64
+
+ case fi && gi: // ±Inf / ±Inf = NaN
+ return nan64
+
+ case !fi && !gi && fm == 0 && gm == 0: // 0 / 0 = NaN
+ return nan64
+
+ case fi, !gi && gm == 0: // Inf / g = f / 0 = Inf
+ return fs ^ gs ^ inf64
+
+ case gi, fm == 0: // f / Inf = 0 / g = Inf
+ return fs ^ gs ^ 0
+ }
+ _, _, _, _ = fi, fn, gi, gn
+
+ // 53-bit<<54 / 53-bit = 53- or 54-bit.
+ shift := mantbits64 + 2
+ q, r := divlu(fm>>(64-shift), fm<<shift, gm)
+ return fpack64(fs^gs, q, fe-ge-2, r)
+}
+
+func f64to32(f uint64) uint32 {
+ fs, fm, fe, fi, fn := funpack64(f)
+ if fn {
+ return nan32
+ }
+ fs32 := uint32(fs >> 32)
+ if fi {
+ return fs32 ^ inf32
+ }
+ const d = mantbits64 - mantbits32 - 1
+ return fpack32(fs32, uint32(fm>>d), fe-1, uint32(fm&(1<<d-1)))
+}
+
+func f32to64(f uint32) uint64 {
+ const d = mantbits64 - mantbits32
+ fs, fm, fe, fi, fn := funpack32(f)
+ if fn {
+ return nan64
+ }
+ fs64 := uint64(fs) << 32
+ if fi {
+ return fs64 ^ inf64
+ }
+ return fpack64(fs64, uint64(fm)<<d, fe, 0)
+}
+
+func fcmp64(f, g uint64) (cmp int, isnan bool) {
+ fs, fm, _, fi, fn := funpack64(f)
+ gs, gm, _, gi, gn := funpack64(g)
+
+ switch {
+ case fn, gn: // flag NaN
+ return 0, true
+
+ case !fi && !gi && fm == 0 && gm == 0: // ±0 == ±0
+ return 0, false
+
+ case fs > gs: // f < 0, g > 0
+ return -1, false
+
+ case fs < gs: // f > 0, g < 0
+ return +1, false
+
+ // Same sign, not NaN.
+ // Can compare encodings directly now.
+ // Reverse for sign.
+ case fs == 0 && f < g, fs != 0 && f > g:
+ return -1, false
+
+ case fs == 0 && f > g, fs != 0 && f < g:
+ return +1, false
+ }
+
+ // f == g
+ return 0, false
+}
+
+func f64toint(f uint64) (val int64, ok bool) {
+ fs, fm, fe, fi, fn := funpack64(f)
+
+ switch {
+ case fi, fn: // NaN
+ return 0, false
+
+ case fe < -1: // f < 0.5
+ return 0, false
+
+ case fe > 63: // f >= 2^63
+ if fs != 0 && fm == 0 { // f == -2^63
+ return -1 << 63, true
+ }
+ if fs != 0 {
+ return 0, false
+ }
+ return 0, false
+ }
+
+ for fe > int(mantbits64) {
+ fe--
+ fm <<= 1
+ }
+ for fe < int(mantbits64) {
+ fe++
+ fm >>= 1
+ }
+ val = int64(fm)
+ if fs != 0 {
+ val = -val
+ }
+ return val, true
+}
+
+func fintto64(val int64) (f uint64) {
+ fs := uint64(val) & (1 << 63)
+ mant := uint64(val)
+ if fs != 0 {
+ mant = -mant
+ }
+ return fpack64(fs, mant, int(mantbits64), 0)
+}
+
+// 64x64 -> 128 multiply.
+// adapted from hacker's delight.
+func mullu(u, v uint64) (lo, hi uint64) {
+ const (
+ s = 32
+ mask = 1<<s - 1
+ )
+ u0 := u & mask
+ u1 := u >> s
+ v0 := v & mask
+ v1 := v >> s
+ w0 := u0 * v0
+ t := u1*v0 + w0>>s
+ w1 := t & mask
+ w2 := t >> s
+ w1 += u0 * v1
+ return u * v, u1*v1 + w2 + w1>>s
+}
+
+// 128/64 -> 64 quotient, 64 remainder.
+// adapted from hacker's delight
+func divlu(u1, u0, v uint64) (q, r uint64) {
+ const b = 1 << 32
+
+ if u1 >= v {
+ return 1<<64 - 1, 1<<64 - 1
+ }
+
+ // s = nlz(v); v <<= s
+ s := uint(0)
+ for v&(1<<63) == 0 {
+ s++
+ v <<= 1
+ }
+
+ vn1 := v >> 32
+ vn0 := v & (1<<32 - 1)
+ un32 := u1<<s | u0>>(64-s)
+ un10 := u0 << s
+ un1 := un10 >> 32
+ un0 := un10 & (1<<32 - 1)
+ q1 := un32 / vn1
+ rhat := un32 - q1*vn1
+
+again1:
+ if q1 >= b || q1*vn0 > b*rhat+un1 {
+ q1--
+ rhat += vn1
+ if rhat < b {
+ goto again1
+ }
+ }
+
+ un21 := un32*b + un1 - q1*v
+ q0 := un21 / vn1
+ rhat = un21 - q0*vn1
+
+again2:
+ if q0 >= b || q0*vn0 > b*rhat+un0 {
+ q0--
+ rhat += vn1
+ if rhat < b {
+ goto again2
+ }
+ }
+
+ return q1*b + q0, (un21*b + un0 - q0*v) >> s
+}
+
+// callable from C
+
+func fadd64c(f, g uint64, ret *uint64) { *ret = fadd64(f, g) }
+func fsub64c(f, g uint64, ret *uint64) { *ret = fsub64(f, g) }
+func fmul64c(f, g uint64, ret *uint64) { *ret = fmul64(f, g) }
+func fdiv64c(f, g uint64, ret *uint64) { *ret = fdiv64(f, g) }
+func fneg64c(f uint64, ret *uint64) { *ret = fneg64(f) }
+func f32to64c(f uint32, ret *uint64) { *ret = f32to64(f) }
+func f64to32c(f uint64, ret *uint32) { *ret = f64to32(f) }
+func fcmp64c(f, g uint64, ret *int, retnan *bool) { *ret, *retnan = fcmp64(f, g) }
+func fintto64c(val int64, ret *uint64) { *ret = fintto64(val) }
+func f64tointc(f uint64, ret *int64, retok *bool) { *ret, *retok = f64toint(f) }
diff --git a/src/pkg/runtime/softfloat64_test.go b/src/pkg/runtime/softfloat64_test.go
new file mode 100644
index 000000000..fb7f3d3c0
--- /dev/null
+++ b/src/pkg/runtime/softfloat64_test.go
@@ -0,0 +1,198 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime_test
+
+import (
+ "math"
+ "rand"
+ . "runtime"
+ "testing"
+)
+
+// turn uint64 op into float64 op
+func fop(f func(x, y uint64) uint64) func(x, y float64) float64 {
+ return func(x, y float64) float64 {
+ bx := math.Float64bits(x)
+ by := math.Float64bits(y)
+ return math.Float64frombits(f(bx, by))
+ }
+}
+
+func add(x, y float64) float64 { return x + y }
+func sub(x, y float64) float64 { return x - y }
+func mul(x, y float64) float64 { return x * y }
+func div(x, y float64) float64 { return x / y }
+
+func TestFloat64(t *testing.T) {
+ base := []float64{
+ 0,
+ math.Copysign(0, -1),
+ -1,
+ 1,
+ math.NaN(),
+ math.Inf(+1),
+ math.Inf(-1),
+ 0.1,
+ 1.5,
+ 1.9999999999999998, // all 1s mantissa
+ 1.3333333333333333, // 1.010101010101...
+ 1.1428571428571428, // 1.001001001001...
+ 1.112536929253601e-308, // first normal
+ 2,
+ 4,
+ 8,
+ 16,
+ 32,
+ 64,
+ 128,
+ 256,
+ 3,
+ 12,
+ 1234,
+ 123456,
+ -0.1,
+ -1.5,
+ -1.9999999999999998,
+ -1.3333333333333333,
+ -1.1428571428571428,
+ -2,
+ -3,
+ 1e-200,
+ 1e-300,
+ 1e-310,
+ 5e-324,
+ 1e-105,
+ 1e-305,
+ 1e+200,
+ 1e+306,
+ 1e+307,
+ 1e+308,
+ }
+ all := make([]float64, 200)
+ copy(all, base)
+ for i := len(base); i < len(all); i++ {
+ all[i] = rand.NormFloat64()
+ }
+
+ test(t, "+", add, fop(Fadd64), all)
+ test(t, "-", sub, fop(Fsub64), all)
+ if GOARCH != "386" { // 386 is not precise!
+ test(t, "*", mul, fop(Fmul64), all)
+ test(t, "/", div, fop(Fdiv64), all)
+ }
+}
+
+// 64 -hw-> 32 -hw-> 64
+func trunc32(f float64) float64 {
+ return float64(float32(f))
+}
+
+// 64 -sw->32 -hw-> 64
+func to32sw(f float64) float64 {
+ return float64(math.Float32frombits(F64to32(math.Float64bits(f))))
+}
+
+// 64 -hw->32 -sw-> 64
+func to64sw(f float64) float64 {
+ return math.Float64frombits(F32to64(math.Float32bits(float32(f))))
+}
+
+// float64 -hw-> int64 -hw-> float64
+func hwint64(f float64) float64 {
+ return float64(int64(f))
+}
+
+// float64 -hw-> int32 -hw-> float64
+func hwint32(f float64) float64 {
+ return float64(int32(f))
+}
+
+// float64 -sw-> int64 -hw-> float64
+func toint64sw(f float64) float64 {
+ i, ok := F64toint(math.Float64bits(f))
+ if !ok {
+ // There's no right answer for out of range.
+ // Match the hardware to pass the test.
+ i = int64(f)
+ }
+ return float64(i)
+}
+
+// float64 -hw-> int64 -sw-> float64
+func fromint64sw(f float64) float64 {
+ return math.Float64frombits(Fintto64(int64(f)))
+}
+
+var nerr int
+
+func err(t *testing.T, format string, args ...interface{}) {
+ t.Errorf(format, args...)
+
+ // cut errors off after a while.
+ // otherwise we spend all our time
+ // allocating memory to hold the
+ // formatted output.
+ if nerr++; nerr >= 10 {
+ t.Fatal("too many errors")
+ }
+}
+
+func test(t *testing.T, op string, hw, sw func(float64, float64) float64, all []float64) {
+ for _, f := range all {
+ for _, g := range all {
+ h := hw(f, g)
+ s := sw(f, g)
+ if !same(h, s) {
+ err(t, "%g %s %g = sw %g, hw %g\n", f, op, g, s, h)
+ }
+ testu(t, "to32", trunc32, to32sw, h)
+ testu(t, "to64", trunc32, to64sw, h)
+ testu(t, "toint64", hwint64, toint64sw, h)
+ testu(t, "fromint64", hwint64, fromint64sw, h)
+ testcmp(t, f, h)
+ testcmp(t, h, f)
+ testcmp(t, g, h)
+ testcmp(t, h, g)
+ }
+ }
+}
+
+func testu(t *testing.T, op string, hw, sw func(float64) float64, v float64) {
+ h := hw(v)
+ s := sw(v)
+ if !same(h, s) {
+ err(t, "%s %g = sw %g, hw %g\n", op, v, s, h)
+ }
+}
+
+func hwcmp(f, g float64) (cmp int, isnan bool) {
+ switch {
+ case f < g:
+ return -1, false
+ case f > g:
+ return +1, false
+ case f == g:
+ return 0, false
+ }
+ return 0, true // must be NaN
+}
+
+func testcmp(t *testing.T, f, g float64) {
+ hcmp, hisnan := hwcmp(f, g)
+ scmp, sisnan := Fcmp64(math.Float64bits(f), math.Float64bits(g))
+ if hcmp != scmp || hisnan != sisnan {
+ err(t, "cmp(%g, %g) = sw %v, %v, hw %v, %v\n", f, g, scmp, sisnan, hcmp, hisnan)
+ }
+}
+
+func same(f, g float64) bool {
+ if math.IsNaN(f) && math.IsNaN(g) {
+ return true
+ }
+ if math.Copysign(1, f) != math.Copysign(1, g) {
+ return false
+ }
+ return f == g
+}
diff --git a/src/pkg/runtime/stack.h b/src/pkg/runtime/stack.h
new file mode 100644
index 000000000..44d5533f4
--- /dev/null
+++ b/src/pkg/runtime/stack.h
@@ -0,0 +1,97 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Stack layout parameters.
+Included both by runtime (compiled via 6c) and linkers (compiled via gcc).
+
+The per-goroutine g->stackguard is set to point StackGuard bytes
+above the bottom of the stack. Each function compares its stack
+pointer against g->stackguard to check for overflow. To cut one
+instruction from the check sequence for functions with tiny frames,
+the stack is allowed to protrude StackSmall bytes below the stack
+guard. Functions with large frames don't bother with the check and
+always call morestack. The sequences are (for amd64, others are
+similar):
+
+ guard = g->stackguard
+ frame = function's stack frame size
+ argsize = size of function arguments (call + return)
+
+ stack frame size <= StackSmall:
+ CMPQ guard, SP
+ JHI 3(PC)
+ MOVQ m->morearg, $(argsize << 32)
+ CALL morestack(SB)
+
+ stack frame size > StackSmall but < StackBig
+ LEAQ (frame-StackSmall)(SP), R0
+ CMPQ guard, R0
+ JHI 3(PC)
+ MOVQ m->morearg, $(argsize << 32)
+ CALL morestack(SB)
+
+ stack frame size >= StackBig:
+ MOVQ m->morearg, $((argsize << 32) | frame)
+ CALL morestack(SB)
+
+The bottom StackGuard - StackSmall bytes are important: there has
+to be enough room to execute functions that refuse to check for
+stack overflow, either because they need to be adjacent to the
+actual caller's frame (deferproc) or because they handle the imminent
+stack overflow (morestack).
+
+For example, deferproc might call malloc, which does one of the
+above checks (without allocating a full frame), which might trigger
+a call to morestack. This sequence needs to fit in the bottom
+section of the stack. On amd64, morestack's frame is 40 bytes, and
+deferproc's frame is 56 bytes. That fits well within the
+StackGuard - StackSmall = 128 bytes at the bottom.
+The linkers explore all possible call traces involving non-splitting
+functions to make sure that this limit cannot be violated.
+ */
+
+enum {
+ // StackSystem is a number of additional bytes to add
+ // to each stack below the usual guard area for OS-specific
+ // purposes like signal handling. Used on Windows because
+ // it does not use a separate stack.
+#ifdef __WINDOWS__
+ StackSystem = 2048,
+#else
+ StackSystem = 0,
+#endif
+
+ // The amount of extra stack to allocate beyond the size
+ // needed for the single frame that triggered the split.
+ StackExtra = 1024,
+
+ // The minimum stack segment size to allocate.
+ // If the amount needed for the splitting frame + StackExtra
+ // is less than this number, the stack will have this size instead.
+ StackMin = 4096,
+ FixedStack = StackMin + StackSystem,
+
+ // Functions that need frames bigger than this call morestack
+ // unconditionally. That is, on entry to a function it is assumed
+ // that the amount of space available in the current stack segment
+ // couldn't possibly be bigger than StackBig. If stack segments
+ // do run with more space than StackBig, the space may not be
+ // used efficiently. As a result, StackBig should not be significantly
+ // smaller than StackMin or StackExtra.
+ StackBig = 4096,
+
+ // The stack guard is a pointer this many bytes above the
+ // bottom of the stack.
+ StackGuard = 256 + StackSystem,
+
+ // After a stack split check the SP is allowed to be this
+ // many bytes below the stack guard. This saves an instruction
+ // in the checking sequence for tiny frames.
+ StackSmall = 128,
+
+ // The maximum number of bytes that a chain of NOSPLIT
+ // functions can use.
+ StackLimit = StackGuard - StackSystem - StackSmall,
+};
diff --git a/src/pkg/runtime/string.goc b/src/pkg/runtime/string.goc
new file mode 100644
index 000000000..48bf3183b
--- /dev/null
+++ b/src/pkg/runtime/string.goc
@@ -0,0 +1,360 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+#include "runtime.h"
+#include "malloc.h"
+
+String runtime·emptystring;
+
+int32
+runtime·findnull(byte *s)
+{
+ int32 l;
+
+ if(s == nil)
+ return 0;
+ for(l=0; s[l]!=0; l++)
+ ;
+ return l;
+}
+
+int32
+runtime·findnullw(uint16 *s)
+{
+ int32 l;
+
+ if(s == nil)
+ return 0;
+ for(l=0; s[l]!=0; l++)
+ ;
+ return l;
+}
+
+uint32 runtime·maxstring = 256;
+
+String
+runtime·gostringsize(int32 l)
+{
+ String s;
+ uint32 ms;
+
+ if(l == 0)
+ return runtime·emptystring;
+ s.str = runtime·mal(l+1); // leave room for NUL for C runtime (e.g., callers of getenv)
+ s.len = l;
+ for(;;) {
+ ms = runtime·maxstring;
+ if((uint32)l <= ms || runtime·cas(&runtime·maxstring, ms, (uint32)l))
+ break;
+ }
+ return s;
+}
+
+String
+runtime·gostring(byte *str)
+{
+ int32 l;
+ String s;
+
+ l = runtime·findnull(str);
+ s = runtime·gostringsize(l);
+ runtime·memmove(s.str, str, l);
+ return s;
+}
+
+String
+runtime·gostringn(byte *str, int32 l)
+{
+ String s;
+
+ s = runtime·gostringsize(l);
+ runtime·memmove(s.str, str, l);
+ return s;
+}
+
+Slice
+runtime·gobytes(byte *p, int32 n)
+{
+ Slice sl;
+
+ sl.array = runtime·mallocgc(n, FlagNoPointers, 1, 0);
+ runtime·memmove(sl.array, p, n);
+ return sl;
+}
+
+String
+runtime·gostringnocopy(byte *str)
+{
+ String s;
+
+ s.str = str;
+ s.len = runtime·findnull(str);
+ return s;
+}
+
+String
+runtime·gostringw(uint16 *str)
+{
+ int32 n, i;
+ byte buf[8];
+ String s;
+
+ n = 0;
+ for(i=0; str[i]; i++)
+ n += runtime·runetochar(buf, str[i]);
+ s = runtime·gostringsize(n+4);
+ n = 0;
+ for(i=0; str[i]; i++)
+ n += runtime·runetochar(s.str+n, str[i]);
+ s.len = n;
+ return s;
+}
+
+String
+runtime·catstring(String s1, String s2)
+{
+ String s3;
+
+ if(s1.len == 0)
+ return s2;
+ if(s2.len == 0)
+ return s1;
+
+ s3 = runtime·gostringsize(s1.len + s2.len);
+ runtime·memmove(s3.str, s1.str, s1.len);
+ runtime·memmove(s3.str+s1.len, s2.str, s2.len);
+ return s3;
+}
+
+static String
+concatstring(int32 n, String *s)
+{
+ int32 i, l;
+ String out;
+
+ l = 0;
+ for(i=0; i<n; i++) {
+ if(l + s[i].len < l)
+ runtime·throw("string concatenation too long");
+ l += s[i].len;
+ }
+
+ out = runtime·gostringsize(l);
+ l = 0;
+ for(i=0; i<n; i++) {
+ runtime·memmove(out.str+l, s[i].str, s[i].len);
+ l += s[i].len;
+ }
+ return out;
+}
+
+#pragma textflag 7
+// s1 is the first of n strings.
+// the output string follows.
+func concatstring(n int32, s1 String) {
+ (&s1)[n] = concatstring(n, &s1);
+}
+
+static int32
+cmpstring(String s1, String s2)
+{
+ uint32 i, l;
+ byte c1, c2;
+
+ l = s1.len;
+ if(s2.len < l)
+ l = s2.len;
+ for(i=0; i<l; i++) {
+ c1 = s1.str[i];
+ c2 = s2.str[i];
+ if(c1 < c2)
+ return -1;
+ if(c1 > c2)
+ return +1;
+ }
+ if(s1.len < s2.len)
+ return -1;
+ if(s1.len > s2.len)
+ return +1;
+ return 0;
+}
+
+func cmpstring(s1 String, s2 String) (v int32) {
+ v = cmpstring(s1, s2);
+}
+
+int32
+runtime·strcmp(byte *s1, byte *s2)
+{
+ uint32 i;
+ byte c1, c2;
+
+ for(i=0;; i++) {
+ c1 = s1[i];
+ c2 = s2[i];
+ if(c1 < c2)
+ return -1;
+ if(c1 > c2)
+ return +1;
+ if(c1 == 0)
+ return 0;
+ }
+}
+
+byte*
+runtime·strstr(byte *s1, byte *s2)
+{
+ byte *sp1, *sp2;
+
+ if(*s2 == 0)
+ return s1;
+ for(; *s1; s1++) {
+ if(*s1 != *s2)
+ continue;
+ sp1 = s1;
+ sp2 = s2;
+ for(;;) {
+ if(*sp2 == 0)
+ return s1;
+ if(*sp1++ != *sp2++)
+ break;
+ }
+ }
+ return nil;
+}
+
+func slicestring(si String, lindex int32, hindex int32) (so String) {
+ int32 l;
+
+ if(lindex < 0 || lindex > si.len ||
+ hindex < lindex || hindex > si.len) {
+ runtime·panicslice();
+ }
+
+ l = hindex-lindex;
+ so.str = si.str + lindex;
+ so.len = l;
+}
+
+func slicestring1(si String, lindex int32) (so String) {
+ int32 l;
+
+ if(lindex < 0 || lindex > si.len) {
+ runtime·panicslice();
+ }
+
+ l = si.len-lindex;
+ so.str = si.str + lindex;
+ so.len = l;
+}
+
+func intstring(v int64) (s String) {
+ s = runtime·gostringsize(8);
+ s.len = runtime·runetochar(s.str, v);
+}
+
+func slicebytetostring(b Slice) (s String) {
+ s = runtime·gostringsize(b.len);
+ runtime·memmove(s.str, b.array, s.len);
+}
+
+func stringtoslicebyte(s String) (b Slice) {
+ b.array = runtime·mallocgc(s.len, FlagNoPointers, 1, 1);
+ b.len = s.len;
+ b.cap = s.len;
+ runtime·memmove(b.array, s.str, s.len);
+}
+
+func sliceinttostring(b Slice) (s String) {
+ int32 siz1, siz2, i;
+ int32 *a;
+ byte dum[8];
+
+ a = (int32*)b.array;
+ siz1 = 0;
+ for(i=0; i<b.len; i++) {
+ siz1 += runtime·runetochar(dum, a[i]);
+ }
+
+ s = runtime·gostringsize(siz1+4);
+ siz2 = 0;
+ for(i=0; i<b.len; i++) {
+ // check for race
+ if(siz2 >= siz1)
+ break;
+ siz2 += runtime·runetochar(s.str+siz2, a[i]);
+ }
+ s.len = siz2;
+}
+
+func stringtosliceint(s String) (b Slice) {
+ int32 n;
+ int32 dum, *r;
+ uint8 *p, *ep;
+
+ // two passes.
+ // unlike sliceinttostring, no race because strings are immutable.
+ p = s.str;
+ ep = s.str+s.len;
+ n = 0;
+ while(p < ep) {
+ p += runtime·charntorune(&dum, p, ep-p);
+ n++;
+ }
+
+ b.array = runtime·mallocgc(n*sizeof(r[0]), FlagNoPointers, 1, 1);
+ b.len = n;
+ b.cap = n;
+ p = s.str;
+ r = (int32*)b.array;
+ while(p < ep)
+ p += runtime·charntorune(r++, p, ep-p);
+}
+
+enum
+{
+ Runeself = 0x80,
+};
+
+func stringiter(s String, k int32) (retk int32) {
+ int32 l;
+
+ if(k >= s.len) {
+ // retk=0 is end of iteration
+ retk = 0;
+ goto out;
+ }
+
+ l = s.str[k];
+ if(l < Runeself) {
+ retk = k+1;
+ goto out;
+ }
+
+ // multi-char rune
+ retk = k + runtime·charntorune(&l, s.str+k, s.len-k);
+
+out:
+}
+
+func stringiter2(s String, k int32) (retk int32, retv int32) {
+ if(k >= s.len) {
+ // retk=0 is end of iteration
+ retk = 0;
+ retv = 0;
+ goto out;
+ }
+
+ retv = s.str[k];
+ if(retv < Runeself) {
+ retk = k+1;
+ goto out;
+ }
+
+ // multi-char rune
+ retk = k + runtime·charntorune(&retv, s.str+k, s.len-k);
+
+out:
+}
diff --git a/src/pkg/runtime/symtab.c b/src/pkg/runtime/symtab.c
new file mode 100644
index 000000000..d2ebf9b40
--- /dev/null
+++ b/src/pkg/runtime/symtab.c
@@ -0,0 +1,466 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Runtime symbol table access. Work in progress.
+// The Plan 9 symbol table is not in a particularly convenient form.
+// The routines here massage it into a more usable form; eventually
+// we'll change 6l to do this for us, but it is easier to experiment
+// here than to change 6l and all the other tools.
+//
+// The symbol table also needs to be better integrated with the type
+// strings table in the future. This is just a quick way to get started
+// and figure out exactly what we want.
+
+#include "runtime.h"
+#include "defs.h"
+#include "os.h"
+#include "arch.h"
+
+extern byte pclntab[], epclntab[], symtab[], esymtab[];
+
+typedef struct Sym Sym;
+struct Sym
+{
+ uintptr value;
+ byte symtype;
+ byte *name;
+// byte *gotype;
+};
+
+// Walk over symtab, calling fn(&s) for each symbol.
+static void
+walksymtab(void (*fn)(Sym*))
+{
+ byte *p, *ep, *q;
+ Sym s;
+
+ p = symtab;
+ ep = esymtab;
+ while(p < ep) {
+ if(p + 7 > ep)
+ break;
+ s.value = ((uint32)p[0]<<24) | ((uint32)p[1]<<16) | ((uint32)p[2]<<8) | ((uint32)p[3]);
+
+ if(!(p[4]&0x80))
+ break;
+ s.symtype = p[4] & ~0x80;
+ p += 5;
+ s.name = p;
+ if(s.symtype == 'z' || s.symtype == 'Z') {
+ // path reference string - skip first byte,
+ // then 2-byte pairs ending at two zeros.
+ q = p+1;
+ for(;;) {
+ if(q+2 > ep)
+ return;
+ if(q[0] == '\0' && q[1] == '\0')
+ break;
+ q += 2;
+ }
+ p = q+2;
+ }else{
+ q = runtime·mchr(p, '\0', ep);
+ if(q == nil)
+ break;
+ p = q+1;
+ }
+ p += 4; // go type
+ fn(&s);
+ }
+}
+
+// Symtab walker; accumulates info about functions.
+
+static Func *func;
+static int32 nfunc;
+
+static byte **fname;
+static int32 nfname;
+
+static uint32 funcinit;
+static Lock funclock;
+
+static void
+dofunc(Sym *sym)
+{
+ Func *f;
+
+ switch(sym->symtype) {
+ case 't':
+ case 'T':
+ case 'l':
+ case 'L':
+ if(runtime·strcmp(sym->name, (byte*)"etext") == 0)
+ break;
+ if(func == nil) {
+ nfunc++;
+ break;
+ }
+ f = &func[nfunc++];
+ f->name = runtime·gostringnocopy(sym->name);
+ f->entry = sym->value;
+ if(sym->symtype == 'L' || sym->symtype == 'l')
+ f->frame = -sizeof(uintptr);
+ break;
+ case 'm':
+ if(nfunc > 0 && func != nil)
+ func[nfunc-1].frame += sym->value;
+ break;
+ case 'p':
+ if(nfunc > 0 && func != nil) {
+ f = &func[nfunc-1];
+ // args counts 32-bit words.
+ // sym->value is the arg's offset.
+ // don't know width of this arg, so assume it is 64 bits.
+ if(f->args < sym->value/4 + 2)
+ f->args = sym->value/4 + 2;
+ }
+ break;
+ case 'f':
+ if(fname == nil) {
+ if(sym->value >= nfname) {
+ if(sym->value >= 0x10000) {
+ runtime·printf("invalid symbol file index %p\n", sym->value);
+ runtime·throw("mangled symbol table");
+ }
+ nfname = sym->value+1;
+ }
+ break;
+ }
+ fname[sym->value] = sym->name;
+ break;
+ }
+}
+
+// put together the path name for a z entry.
+// the f entries have been accumulated into fname already.
+static void
+makepath(byte *buf, int32 nbuf, byte *path)
+{
+ int32 n, len;
+ byte *p, *ep, *q;
+
+ if(nbuf <= 0)
+ return;
+
+ p = buf;
+ ep = buf + nbuf;
+ *p = '\0';
+ for(;;) {
+ if(path[0] == 0 && path[1] == 0)
+ break;
+ n = (path[0]<<8) | path[1];
+ path += 2;
+ if(n >= nfname)
+ break;
+ q = fname[n];
+ len = runtime·findnull(q);
+ if(p+1+len >= ep)
+ break;
+ if(p > buf && p[-1] != '/')
+ *p++ = '/';
+ runtime·memmove(p, q, len+1);
+ p += len;
+ }
+}
+
+// walk symtab accumulating path names for use by pc/ln table.
+// don't need the full generality of the z entry history stack because
+// there are no includes in go (and only sensible includes in our c);
+// assume code only appear in top-level files.
+static void
+dosrcline(Sym *sym)
+{
+ static byte srcbuf[1000];
+ static struct {
+ String srcstring;
+ int32 aline;
+ int32 delta;
+ } files[200];
+ static int32 incstart;
+ static int32 nfunc, nfile, nhist;
+ Func *f;
+ int32 i;
+
+ switch(sym->symtype) {
+ case 't':
+ case 'T':
+ if(runtime·strcmp(sym->name, (byte*)"etext") == 0)
+ break;
+ f = &func[nfunc++];
+ // find source file
+ for(i = 0; i < nfile - 1; i++) {
+ if (files[i+1].aline > f->ln0)
+ break;
+ }
+ f->src = files[i].srcstring;
+ f->ln0 -= files[i].delta;
+ break;
+ case 'z':
+ if(sym->value == 1) {
+ // entry for main source file for a new object.
+ makepath(srcbuf, sizeof srcbuf, sym->name+1);
+ nhist = 0;
+ nfile = 0;
+ if(nfile == nelem(files))
+ return;
+ files[nfile].srcstring = runtime·gostring(srcbuf);
+ files[nfile].aline = 0;
+ files[nfile++].delta = 0;
+ } else {
+ // push or pop of included file.
+ makepath(srcbuf, sizeof srcbuf, sym->name+1);
+ if(srcbuf[0] != '\0') {
+ if(nhist++ == 0)
+ incstart = sym->value;
+ if(nhist == 0 && nfile < nelem(files)) {
+ // new top-level file
+ files[nfile].srcstring = runtime·gostring(srcbuf);
+ files[nfile].aline = sym->value;
+ // this is "line 0"
+ files[nfile++].delta = sym->value - 1;
+ }
+ }else{
+ if(--nhist == 0)
+ files[nfile-1].delta += sym->value - incstart;
+ }
+ }
+ }
+}
+
+// Interpret pc/ln table, saving the subpiece for each func.
+static void
+splitpcln(void)
+{
+ int32 line;
+ uintptr pc;
+ byte *p, *ep;
+ Func *f, *ef;
+ int32 pcquant;
+
+ if(pclntab == epclntab || nfunc == 0)
+ return;
+
+ switch(thechar) {
+ case '5':
+ pcquant = 4;
+ break;
+ default: // 6, 8
+ pcquant = 1;
+ break;
+ }
+
+ // pc/ln table bounds
+ p = pclntab;
+ ep = epclntab;
+
+ f = func;
+ ef = func + nfunc;
+ pc = func[0].entry; // text base
+ f->pcln.array = p;
+ f->pc0 = pc;
+ line = 0;
+ for(;;) {
+ while(p < ep && *p > 128)
+ pc += pcquant * (*p++ - 128);
+ // runtime·printf("pc<%p targetpc=%p line=%d\n", pc, targetpc, line);
+ if(*p == 0) {
+ if(p+5 > ep)
+ break;
+ // 4 byte add to line
+ line += (p[1]<<24) | (p[2]<<16) | (p[3]<<8) | p[4];
+ p += 5;
+ } else if(*p <= 64)
+ line += *p++;
+ else
+ line -= *p++ - 64;
+
+ // pc, line now match.
+ // Because the state machine begins at pc==entry and line==0,
+ // it can happen - just at the beginning! - that the update may
+ // have updated line but left pc alone, to tell us the true line
+ // number for pc==entry. In that case, update f->ln0.
+ // Having the correct initial line number is important for choosing
+ // the correct file in dosrcline above.
+ if(f == func && pc == f->pc0) {
+ f->pcln.array = p;
+ f->pc0 = pc + pcquant;
+ f->ln0 = line;
+ }
+
+ if(f < ef && pc >= (f+1)->entry) {
+ f->pcln.len = p - f->pcln.array;
+ f->pcln.cap = f->pcln.len;
+ do
+ f++;
+ while(f < ef && pc >= (f+1)->entry);
+ f->pcln.array = p;
+ // pc0 and ln0 are the starting values for
+ // the loop over f->pcln, so pc must be
+ // adjusted by the same pcquant update
+ // that we're going to do as we continue our loop.
+ f->pc0 = pc + pcquant;
+ f->ln0 = line;
+ }
+
+ pc += pcquant;
+ }
+ if(f < ef) {
+ f->pcln.len = p - f->pcln.array;
+ f->pcln.cap = f->pcln.len;
+ }
+}
+
+
+// Return actual file line number for targetpc in func f.
+// (Source file is f->src.)
+// NOTE(rsc): If you edit this function, also edit extern.go:/FileLine
+int32
+runtime·funcline(Func *f, uintptr targetpc)
+{
+ byte *p, *ep;
+ uintptr pc;
+ int32 line;
+ int32 pcquant;
+
+ enum {
+ debug = 0
+ };
+
+ switch(thechar) {
+ case '5':
+ pcquant = 4;
+ break;
+ default: // 6, 8
+ pcquant = 1;
+ break;
+ }
+
+ p = f->pcln.array;
+ ep = p + f->pcln.len;
+ pc = f->pc0;
+ line = f->ln0;
+ if(debug && !runtime·panicking)
+ runtime·printf("funcline start pc=%p targetpc=%p line=%d tab=%p+%d\n",
+ pc, targetpc, line, p, (int32)f->pcln.len);
+ for(;;) {
+ // Table is a sequence of updates.
+
+ // Each update says first how to adjust the pc,
+ // in possibly multiple instructions...
+ while(p < ep && *p > 128)
+ pc += pcquant * (*p++ - 128);
+
+ if(debug && !runtime·panicking)
+ runtime·printf("pc<%p targetpc=%p line=%d\n", pc, targetpc, line);
+
+ // If the pc has advanced too far or we're out of data,
+ // stop and the last known line number.
+ if(pc > targetpc || p >= ep)
+ break;
+
+ // ... and then how to adjust the line number,
+ // in a single instruction.
+ if(*p == 0) {
+ if(p+5 > ep)
+ break;
+ line += (p[1]<<24) | (p[2]<<16) | (p[3]<<8) | p[4];
+ p += 5;
+ } else if(*p <= 64)
+ line += *p++;
+ else
+ line -= *p++ - 64;
+ // Now pc, line pair is consistent.
+ if(debug && !runtime·panicking)
+ runtime·printf("pc=%p targetpc=%p line=%d\n", pc, targetpc, line);
+
+ // PC increments implicitly on each iteration.
+ pc += pcquant;
+ }
+ return line;
+}
+
+static void
+buildfuncs(void)
+{
+ extern byte etext[];
+
+ if(func != nil)
+ return;
+
+ // Memory profiling uses this code;
+ // can deadlock if the profiler ends
+ // up back here.
+ m->nomemprof++;
+
+ // count funcs, fnames
+ nfunc = 0;
+ nfname = 0;
+ walksymtab(dofunc);
+
+ // initialize tables
+ func = runtime·mal((nfunc+1)*sizeof func[0]);
+ func[nfunc].entry = (uint64)etext;
+ fname = runtime·mal(nfname*sizeof fname[0]);
+ nfunc = 0;
+ walksymtab(dofunc);
+
+ // split pc/ln table by func
+ splitpcln();
+
+ // record src file and line info for each func
+ walksymtab(dosrcline);
+
+ m->nomemprof--;
+}
+
+Func*
+runtime·findfunc(uintptr addr)
+{
+ Func *f;
+ int32 nf, n;
+
+ // Use atomic double-checked locking,
+ // because when called from pprof signal
+ // handler, findfunc must run without
+ // grabbing any locks.
+ // (Before enabling the signal handler,
+ // SetCPUProfileRate calls findfunc to trigger
+ // the initialization outside the handler.)
+ if(runtime·atomicload(&funcinit) == 0) {
+ runtime·lock(&funclock);
+ if(funcinit == 0) {
+ buildfuncs();
+ runtime·atomicstore(&funcinit, 1);
+ }
+ runtime·unlock(&funclock);
+ }
+
+ if(nfunc == 0)
+ return nil;
+ if(addr < func[0].entry || addr >= func[nfunc].entry)
+ return nil;
+
+ // binary search to find func with entry <= addr.
+ f = func;
+ nf = nfunc;
+ while(nf > 0) {
+ n = nf/2;
+ if(f[n].entry <= addr && addr < f[n+1].entry)
+ return &f[n];
+ else if(addr < f[n].entry)
+ nf = n;
+ else {
+ f += n+1;
+ nf -= n+1;
+ }
+ }
+
+ // can't get here -- we already checked above
+ // that the address was in the table bounds.
+ // this can only happen if the table isn't sorted
+ // by address or if the binary search above is buggy.
+ runtime·prints("findfunc unreachable\n");
+ return nil;
+}
diff --git a/src/pkg/runtime/symtab_test.go b/src/pkg/runtime/symtab_test.go
new file mode 100644
index 000000000..bd9fe18c4
--- /dev/null
+++ b/src/pkg/runtime/symtab_test.go
@@ -0,0 +1,47 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime_test
+
+import (
+ "runtime"
+ "strings"
+ "testing"
+)
+
+func TestCaller(t *testing.T) {
+ procs := runtime.GOMAXPROCS(-1)
+ c := make(chan bool, procs)
+ for p := 0; p < procs; p++ {
+ go func() {
+ for i := 0; i < 1000; i++ {
+ testCallerFoo(t)
+ }
+ c <- true
+ }()
+ defer func() {
+ <-c
+ }()
+ }
+}
+
+func testCallerFoo(t *testing.T) {
+ testCallerBar(t)
+}
+
+func testCallerBar(t *testing.T) {
+ for i := 0; i < 2; i++ {
+ pc, file, line, ok := runtime.Caller(i)
+ f := runtime.FuncForPC(pc)
+ if !ok ||
+ !strings.HasSuffix(file, "symtab_test.go") ||
+ (i == 0 && !strings.HasSuffix(f.Name(), "testCallerBar")) ||
+ (i == 1 && !strings.HasSuffix(f.Name(), "testCallerFoo")) ||
+ line < 5 || line > 1000 ||
+ f.Entry() >= pc {
+ t.Errorf("incorrect symbol info %d: %t %d %d %s %s %d",
+ i, ok, f.Entry(), pc, f.Name(), file, line)
+ }
+ }
+}
diff --git a/src/pkg/runtime/type.go b/src/pkg/runtime/type.go
new file mode 100644
index 000000000..30f3ec642
--- /dev/null
+++ b/src/pkg/runtime/type.go
@@ -0,0 +1,208 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+ * Runtime type representation.
+ *
+ * The following files know the exact layout of these
+ * data structures and must be kept in sync with this file:
+ *
+ * ../../cmd/gc/reflect.c
+ * ../../cmd/ld/dwarf.c decodetype_*
+ * ../reflect/type.go
+ * type.h
+ */
+
+package runtime
+
+import "unsafe"
+
+// The compiler can only construct empty interface values at
+// compile time; non-empty interface values get created
+// during initialization. Type is an empty interface
+// so that the compiler can lay out references as data.
+type Type interface{}
+
+// All types begin with a few common fields needed for
+// the interface runtime.
+type commonType struct {
+ size uintptr // size in bytes
+ hash uint32 // hash of type; avoids computation in hash tables
+ alg uint8 // algorithm for copy+hash+cmp (../runtime/runtime.h:/AMEM)
+ align uint8 // alignment of variable with this type
+ fieldAlign uint8 // alignment of struct field with this type
+ kind uint8 // enumeration for C
+ string *string // string form; unnecessary but undeniably useful
+ *uncommonType // (relatively) uncommon fields
+ ptrToThis *Type // pointer to this type, if used in binary or has methods
+}
+
+// Values for commonType.kind.
+const (
+ kindBool = 1 + iota
+ kindInt
+ kindInt8
+ kindInt16
+ kindInt32
+ kindInt64
+ kindUint
+ kindUint8
+ kindUint16
+ kindUint32
+ kindUint64
+ kindUintptr
+ kindFloat32
+ kindFloat64
+ kindComplex64
+ kindComplex128
+ kindArray
+ kindChan
+ kindFunc
+ kindInterface
+ kindMap
+ kindPtr
+ kindSlice
+ kindString
+ kindStruct
+ kindUnsafePointer
+
+ kindNoPointers = 1 << 7 // OR'ed into kind
+)
+
+// Method on non-interface type
+type _method struct { // underscore is to avoid collision with C
+ name *string // name of method
+ pkgPath *string // nil for exported Names; otherwise import path
+ mtyp *Type // method type (without receiver)
+ typ *Type // .(*FuncType) underneath (with receiver)
+ ifn unsafe.Pointer // fn used in interface call (one-word receiver)
+ tfn unsafe.Pointer // fn used for normal method call
+}
+
+// uncommonType is present only for types with names or methods
+// (if T is a named type, the uncommonTypes for T and *T have methods).
+// Using a pointer to this struct reduces the overall size required
+// to describe an unnamed type with no methods.
+type uncommonType struct {
+ name *string // name of type
+ pkgPath *string // import path; nil for built-in types like int, string
+ methods []_method // methods associated with type
+}
+
+// BoolType represents a boolean type.
+type BoolType commonType
+
+// FloatType represents a float type.
+type FloatType commonType
+
+// ComplexType represents a complex type.
+type ComplexType commonType
+
+// IntType represents an int type.
+type IntType commonType
+
+// UintType represents a uint type.
+type UintType commonType
+
+// StringType represents a string type.
+type StringType commonType
+
+// UintptrType represents a uintptr type.
+type UintptrType commonType
+
+// UnsafePointerType represents an unsafe.Pointer type.
+type UnsafePointerType commonType
+
+// ArrayType represents a fixed array type.
+type ArrayType struct {
+ commonType
+ elem *Type // array element type
+ slice *Type // slice type
+ len uintptr
+}
+
+// SliceType represents a slice type.
+type SliceType struct {
+ commonType
+ elem *Type // slice element type
+}
+
+// ChanDir represents a channel type's direction.
+type ChanDir int
+
+const (
+ RecvDir ChanDir = 1 << iota // <-chan
+ SendDir // chan<-
+ BothDir = RecvDir | SendDir // chan
+)
+
+// ChanType represents a channel type.
+type ChanType struct {
+ commonType
+ elem *Type // channel element type
+ dir uintptr // channel direction (ChanDir)
+}
+
+// FuncType represents a function type.
+type FuncType struct {
+ commonType
+ dotdotdot bool // last input parameter is ...
+ in []*Type // input parameter types
+ out []*Type // output parameter types
+}
+
+// Method on interface type
+type _imethod struct { // underscore is to avoid collision with C
+ name *string // name of method
+ pkgPath *string // nil for exported Names; otherwise import path
+ typ *Type // .(*FuncType) underneath
+}
+
+// InterfaceType represents an interface type.
+type InterfaceType struct {
+ commonType
+ methods []_imethod // sorted by hash
+}
+
+// MapType represents a map type.
+type MapType struct {
+ commonType
+ key *Type // map key type
+ elem *Type // map element (value) type
+}
+
+// PtrType represents a pointer type.
+type PtrType struct {
+ commonType
+ elem *Type // pointer element (pointed at) type
+}
+
+// Struct field
+type structField struct {
+ name *string // nil for embedded fields
+ pkgPath *string // nil for exported Names; otherwise import path
+ typ *Type // type of field
+ tag *string // nil if no tag
+ offset uintptr // byte offset of field within struct
+}
+
+// StructType represents a struct type.
+type StructType struct {
+ commonType
+ fields []structField // sorted by offset
+}
+
+/*
+ * Must match iface.c:/Itab and compilers.
+ * NOTE: this is the version used by the reflection code, there is another
+ * one in iface_defs.go that is closer to the original C version.
+ */
+type Itable struct {
+ Itype *Type // (*tab.inter).(*InterfaceType) is the interface type
+ Type *Type
+ link *Itable
+ bad int32
+ unused int32
+ Fn [100000]uintptr // bigger than we'll ever see
+}
diff --git a/src/pkg/runtime/type.h b/src/pkg/runtime/type.h
new file mode 100644
index 000000000..8c80c62d3
--- /dev/null
+++ b/src/pkg/runtime/type.h
@@ -0,0 +1,131 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+ * Runtime type representation; master is type.go
+ *
+ * The *Types here correspond 1-1 to type.go's *Type's, but are
+ * prefixed with an extra header of 2 pointers, corresponding to the
+ * interface{} structure, which itself is called type Type again on
+ * the Go side.
+ */
+
+typedef struct CommonType CommonType;
+typedef struct UncommonType UncommonType;
+typedef struct InterfaceType InterfaceType;
+typedef struct Method Method;
+typedef struct IMethod IMethod;
+typedef struct SliceType SliceType;
+typedef struct FuncType FuncType;
+
+struct CommonType
+{
+ uintptr size;
+ uint32 hash;
+ uint8 alg;
+ uint8 align;
+ uint8 fieldAlign;
+ uint8 kind;
+ String *string;
+ UncommonType *x;
+ Type *ptrto;
+};
+
+enum {
+ KindBool = 1,
+ KindInt,
+ KindInt8,
+ KindInt16,
+ KindInt32,
+ KindInt64,
+ KindUint,
+ KindUint8,
+ KindUint16,
+ KindUint32,
+ KindUint64,
+ KindUintptr,
+ KindFloat32,
+ KindFloat64,
+ KindComplex64,
+ KindComplex128,
+ KindArray,
+ KindChan,
+ KindFunc,
+ KindInterface,
+ KindMap,
+ KindPtr,
+ KindSlice,
+ KindString,
+ KindStruct,
+ KindUnsafePointer,
+
+ KindNoPointers = 1<<7,
+};
+
+struct Method
+{
+ String *name;
+ String *pkgPath;
+ Type *mtyp;
+ Type *typ;
+ void (*ifn)(void);
+ void (*tfn)(void);
+};
+
+struct UncommonType
+{
+ String *name;
+ String *pkgPath;
+ Slice mhdr;
+ Method m[];
+};
+
+struct Type
+{
+ void *type; // interface{} value
+ void *ptr;
+ CommonType;
+};
+
+struct IMethod
+{
+ String *name;
+ String *pkgPath;
+ Type *type;
+};
+
+struct InterfaceType
+{
+ Type;
+ Slice mhdr;
+ IMethod m[];
+};
+
+struct MapType
+{
+ Type;
+ Type *key;
+ Type *elem;
+};
+
+struct ChanType
+{
+ Type;
+ Type *elem;
+ uintptr dir;
+};
+
+struct SliceType
+{
+ Type;
+ Type *elem;
+};
+
+struct FuncType
+{
+ Type;
+ bool dotdotdot;
+ Slice in;
+ Slice out;
+};
diff --git a/src/pkg/runtime/windows/386/defs.h b/src/pkg/runtime/windows/386/defs.h
new file mode 100644
index 000000000..49fc19504
--- /dev/null
+++ b/src/pkg/runtime/windows/386/defs.h
@@ -0,0 +1,81 @@
+// c:\Users\Hector\Code\go\bin\godefs.exe defs.c
+
+// MACHINE GENERATED - DO NOT EDIT.
+
+// Constants
+enum {
+ PROT_NONE = 0,
+ PROT_READ = 0x1,
+ PROT_WRITE = 0x2,
+ PROT_EXEC = 0x4,
+ MAP_ANON = 0x1,
+ MAP_PRIVATE = 0x2,
+ SIGINT = 0x2,
+ CTRL_C_EVENT = 0,
+ CTRL_BREAK_EVENT = 0x1,
+ EXCEPTION_ACCESS_VIOLATION = 0xc0000005,
+ EXCEPTION_BREAKPOINT = 0x80000003,
+ EXCEPTION_FLT_DENORMAL_OPERAND = 0xc000008d,
+ EXCEPTION_FLT_DIVIDE_BY_ZERO = 0xc000008e,
+ EXCEPTION_FLT_INEXACT_RESULT = 0xc000008f,
+ EXCEPTION_FLT_OVERFLOW = 0xc0000091,
+ EXCEPTION_FLT_UNDERFLOW = 0xc0000093,
+ EXCEPTION_INT_DIVIDE_BY_ZERO = 0xc0000094,
+ EXCEPTION_INT_OVERFLOW = 0xc0000095,
+};
+
+// Types
+#pragma pack on
+
+typedef struct ExceptionRecord ExceptionRecord;
+struct ExceptionRecord {
+ uint32 ExceptionCode;
+ uint32 ExceptionFlags;
+ ExceptionRecord *ExceptionRecord;
+ void *ExceptionAddress;
+ uint32 NumberParameters;
+ uint32 ExceptionInformation[15];
+};
+
+typedef struct FloatingSaveArea FloatingSaveArea;
+struct FloatingSaveArea {
+ uint32 ControlWord;
+ uint32 StatusWord;
+ uint32 TagWord;
+ uint32 ErrorOffset;
+ uint32 ErrorSelector;
+ uint32 DataOffset;
+ uint32 DataSelector;
+ uint8 RegisterArea[80];
+ uint32 Cr0NpxState;
+};
+
+typedef struct Context Context;
+struct Context {
+ uint32 ContextFlags;
+ uint32 Dr0;
+ uint32 Dr1;
+ uint32 Dr2;
+ uint32 Dr3;
+ uint32 Dr6;
+ uint32 Dr7;
+ FloatingSaveArea FloatSave;
+ uint32 SegGs;
+ uint32 SegFs;
+ uint32 SegEs;
+ uint32 SegDs;
+ uint32 Edi;
+ uint32 Esi;
+ uint32 Ebx;
+ uint32 Edx;
+ uint32 Ecx;
+ uint32 Eax;
+ uint32 Ebp;
+ uint32 Eip;
+ uint32 SegCs;
+ uint32 EFlags;
+ uint32 Esp;
+ uint32 SegSs;
+ uint8 ExtendedRegisters[512];
+};
+#pragma pack off
diff --git a/src/pkg/runtime/windows/386/rt0.s b/src/pkg/runtime/windows/386/rt0.s
new file mode 100644
index 000000000..3b023de2f
--- /dev/null
+++ b/src/pkg/runtime/windows/386/rt0.s
@@ -0,0 +1,14 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+TEXT _rt0_386_windows(SB),7,$0
+ // Set up SEH frame for bootstrap m
+ PUSHL $runtime·sigtramp(SB)
+ PUSHL 0(FS)
+ MOVL SP, 0(FS)
+
+ JMP _rt0_386(SB)
+
+DATA runtime·iswindows(SB)/4, $1
+GLOBL runtime·iswindows(SB), $4
diff --git a/src/pkg/runtime/windows/386/signal.c b/src/pkg/runtime/windows/386/signal.c
new file mode 100644
index 000000000..cc6a2302f
--- /dev/null
+++ b/src/pkg/runtime/windows/386/signal.c
@@ -0,0 +1,98 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "runtime.h"
+#include "defs.h"
+#include "os.h"
+
+void
+runtime·dumpregs(Context *r)
+{
+ runtime·printf("eax %x\n", r->Eax);
+ runtime·printf("ebx %x\n", r->Ebx);
+ runtime·printf("ecx %x\n", r->Ecx);
+ runtime·printf("edx %x\n", r->Edx);
+ runtime·printf("edi %x\n", r->Edi);
+ runtime·printf("esi %x\n", r->Esi);
+ runtime·printf("ebp %x\n", r->Ebp);
+ runtime·printf("esp %x\n", r->Esp);
+ runtime·printf("eip %x\n", r->Eip);
+ runtime·printf("eflags %x\n", r->EFlags);
+ runtime·printf("cs %x\n", r->SegCs);
+ runtime·printf("fs %x\n", r->SegFs);
+ runtime·printf("gs %x\n", r->SegGs);
+}
+
+void
+runtime·initsig(int32)
+{
+ runtime·siginit();
+}
+
+uint32
+runtime·sighandler(ExceptionRecord *info, void *frame, Context *r)
+{
+ uintptr *sp;
+ G *gp;
+
+ USED(frame);
+
+ switch(info->ExceptionCode) {
+ case EXCEPTION_BREAKPOINT:
+ r->Eip--; // because 8l generates 2 bytes for INT3
+ return 1;
+ }
+
+ if((gp = m->curg) != nil && runtime·issigpanic(info->ExceptionCode)) {
+ // Make it look like a call to the signal func.
+ // Have to pass arguments out of band since
+ // augmenting the stack frame would break
+ // the unwinding code.
+ gp->sig = info->ExceptionCode;
+ gp->sigcode0 = info->ExceptionInformation[0];
+ gp->sigcode1 = info->ExceptionInformation[1];
+ gp->sigpc = r->Eip;
+
+ // Only push runtime·sigpanic if r->eip != 0.
+ // If r->eip == 0, probably panicked because of a
+ // call to a nil func. Not pushing that onto sp will
+ // make the trace look like a call to runtime·sigpanic instead.
+ // (Otherwise the trace will end at runtime·sigpanic and we
+ // won't get to see who faulted.)
+ if(r->Eip != 0) {
+ sp = (uintptr*)r->Esp;
+ *--sp = r->Eip;
+ r->Esp = (uintptr)sp;
+ }
+ r->Eip = (uintptr)runtime·sigpanic;
+ return 0;
+ }
+
+ if(runtime·panicking) // traceback already printed
+ runtime·exit(2);
+ runtime·panicking = 1;
+
+ runtime·printf("Exception %x %p %p\n", info->ExceptionCode,
+ info->ExceptionInformation[0], info->ExceptionInformation[1]);
+
+ runtime·printf("PC=%x\n", r->Eip);
+ runtime·printf("\n");
+
+ if(runtime·gotraceback()){
+ runtime·traceback((void*)r->Eip, (void*)r->Esp, 0, m->curg);
+ runtime·tracebackothers(m->curg);
+ runtime·dumpregs(r);
+ }
+
+ runtime·exit(2);
+ return 0;
+}
+
+void
+runtime·resetcpuprofiler(int32 hz)
+{
+ // TODO: Enable profiling interrupts.
+
+ m->profilehz = hz;
+}
diff --git a/src/pkg/runtime/windows/386/sys.s b/src/pkg/runtime/windows/386/sys.s
new file mode 100644
index 000000000..703f77d55
--- /dev/null
+++ b/src/pkg/runtime/windows/386/sys.s
@@ -0,0 +1,256 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "386/asm.h"
+
+// void *stdcall_raw(void *fn, int32 count, uintptr *args)
+TEXT runtime·stdcall_raw(SB),7,$0
+ // Copy arguments from stack.
+ MOVL fn+0(FP), AX
+ MOVL count+4(FP), CX // words
+ MOVL args+8(FP), BP
+
+ // Switch to m->g0 if needed.
+ get_tls(DI)
+ MOVL m(DI), DX
+ MOVL m_g0(DX), SI
+ CMPL g(DI), SI
+ MOVL SP, BX
+ JEQ 2(PC)
+ MOVL (g_sched+gobuf_sp)(SI), SP
+ PUSHL BX
+ PUSHL g(DI)
+ MOVL SI, g(DI)
+
+ // Copy args to new stack.
+ MOVL CX, BX
+ SALL $2, BX
+ SUBL BX, SP // room for args
+ MOVL SP, DI
+ MOVL BP, SI
+ CLD
+ REP; MOVSL
+
+ // Call stdcall function.
+ CALL AX
+
+ // Restore original SP, g.
+ get_tls(DI)
+ POPL g(DI)
+ POPL SP
+
+ // Someday the convention will be D is always cleared.
+ CLD
+
+ RET
+
+// faster get/set last error
+TEXT runtime·getlasterror(SB),7,$0
+ MOVL 0x34(FS), AX
+ RET
+
+TEXT runtime·setlasterror(SB),7,$0
+ MOVL err+0(FP), AX
+ MOVL AX, 0x34(FS)
+ RET
+
+TEXT runtime·sigtramp(SB),7,$0
+ PUSHL BP // cdecl
+ PUSHL BX
+ PUSHL SI
+ PUSHL DI
+ PUSHL 0(FS)
+ CALL runtime·sigtramp1(SB)
+ POPL 0(FS)
+ POPL DI
+ POPL SI
+ POPL BX
+ POPL BP
+ RET
+
+TEXT runtime·sigtramp1(SB),0,$16-40
+ // unwinding?
+ MOVL info+24(FP), BX
+ MOVL 4(BX), CX // exception flags
+ ANDL $6, CX
+ MOVL $1, AX
+ JNZ sigdone
+
+ // place ourselves at the top of the SEH chain to
+ // ensure SEH frames lie within thread stack bounds
+ MOVL frame+28(FP), CX // our SEH frame
+ MOVL CX, 0(FS)
+
+ // copy arguments for call to sighandler
+ MOVL BX, 0(SP)
+ MOVL CX, 4(SP)
+ MOVL context+32(FP), BX
+ MOVL BX, 8(SP)
+ MOVL dispatcher+36(FP), BX
+ MOVL BX, 12(SP)
+
+ CALL runtime·sighandler(SB)
+ TESTL AX, AX
+ JZ sigdone
+
+ // call windows default handler early
+ MOVL 4(SP), BX // our SEH frame
+ MOVL 0(BX), BX // SEH frame of default handler
+ MOVL BX, 4(SP) // set establisher frame
+ CALL 4(BX)
+
+sigdone:
+ RET
+
+// Windows runs the ctrl handler in a new thread.
+TEXT runtime·ctrlhandler(SB),7,$0
+ PUSHL BP
+ MOVL SP, BP
+ PUSHL BX
+ PUSHL SI
+ PUSHL DI
+ PUSHL 0x2c(FS)
+ MOVL SP, BX
+
+ // setup dummy m, g
+ SUBL $(m_fflag+4), SP // at least space for m_fflag
+ LEAL m_tls(SP), CX
+ MOVL CX, 0x2c(FS)
+ MOVL SP, m(CX)
+ MOVL SP, DX
+ SUBL $8, SP // space for g_stack{guard,base}
+ MOVL SP, g(CX)
+ MOVL SP, m_g0(DX)
+ LEAL -4096(SP), CX
+ MOVL CX, g_stackguard(SP)
+ MOVL BX, g_stackbase(SP)
+
+ PUSHL 8(BP)
+ CALL runtime·ctrlhandler1(SB)
+ POPL CX
+
+ get_tls(CX)
+ MOVL g(CX), CX
+ MOVL g_stackbase(CX), SP
+ POPL 0x2c(FS)
+ POPL DI
+ POPL SI
+ POPL BX
+ POPL BP
+ MOVL 0(SP), CX
+ ADDL $8, SP
+ JMP CX
+
+// Called from dynamic function created by ../thread.c compilecallback,
+// running on Windows stack (not Go stack).
+// BX, BP, SI, DI registers and DF flag are preserved
+// as required by windows callback convention.
+// AX = address of go func we need to call
+// DX = total size of arguments
+//
+TEXT runtime·callbackasm+0(SB),7,$0
+ // preserve whatever's at the memory location that
+ // the callback will use to store the return value
+ LEAL 8(SP), CX
+ PUSHL 0(CX)(DX*1)
+ ADDL $4, DX // extend argsize by size of return value
+
+ // save registers as required for windows callback
+ PUSHL DI
+ PUSHL SI
+ PUSHL BP
+ PUSHL BX
+
+ // set up SEH frame again
+ PUSHL $runtime·sigtramp(SB)
+ PUSHL 0(FS)
+ MOVL SP, 0(FS)
+
+ // callback parameters
+ PUSHL DX
+ PUSHL CX
+ PUSHL AX
+
+ CLD
+
+ CALL runtime·cgocallback(SB)
+
+ POPL AX
+ POPL CX
+ POPL DX
+
+ // pop SEH frame
+ POPL 0(FS)
+ POPL BX
+
+ // restore registers as required for windows callback
+ POPL BX
+ POPL BP
+ POPL SI
+ POPL DI
+
+ CLD
+
+ MOVL -4(CX)(DX*1), AX
+ POPL -4(CX)(DX*1)
+ RET
+
+// void tstart(M *newm);
+TEXT runtime·tstart(SB),7,$0
+ MOVL newm+4(SP), CX // m
+ MOVL m_g0(CX), DX // g
+
+ // Set up SEH frame
+ PUSHL $runtime·sigtramp(SB)
+ PUSHL 0(FS)
+ MOVL SP, 0(FS)
+
+ // Layout new m scheduler stack on os stack.
+ MOVL SP, AX
+ MOVL AX, g_stackbase(DX)
+ SUBL $(64*1024), AX // stack size
+ MOVL AX, g_stackguard(DX)
+
+ // Set up tls.
+ LEAL m_tls(CX), SI
+ MOVL SI, 0x2c(FS)
+ MOVL CX, m(SI)
+ MOVL DX, g(SI)
+
+ // Someday the convention will be D is always cleared.
+ CLD
+
+ CALL runtime·stackcheck(SB) // clobbers AX,CX
+
+ CALL runtime·mstart(SB)
+
+ // Pop SEH frame
+ MOVL 0(FS), SP
+ POPL 0(FS)
+ POPL CX
+
+ RET
+
+// uint32 tstart_stdcall(M *newm);
+TEXT runtime·tstart_stdcall(SB),7,$0
+ MOVL newm+4(SP), BX
+
+ PUSHL BX
+ CALL runtime·tstart(SB)
+ POPL BX
+
+ // Adjust stack for stdcall to return properly.
+ MOVL (SP), AX // save return address
+ ADDL $4, SP // remove single parameter
+ MOVL AX, (SP) // restore return address
+
+ XORL AX, AX // return 0 == success
+
+ RET
+
+// setldt(int entry, int address, int limit)
+TEXT runtime·setldt(SB),7,$0
+ MOVL address+4(FP), CX
+ MOVL CX, 0x2c(FS)
+ RET
diff --git a/src/pkg/runtime/windows/amd64/defs.h b/src/pkg/runtime/windows/amd64/defs.h
new file mode 100644
index 000000000..830c6a855
--- /dev/null
+++ b/src/pkg/runtime/windows/amd64/defs.h
@@ -0,0 +1,40 @@
+// g:\opensource\go\bin\godefs.exe -f -m64 defs.c
+
+// MACHINE GENERATED - DO NOT EDIT.
+
+// Constants
+enum {
+ PROT_NONE = 0,
+ PROT_READ = 0x1,
+ PROT_WRITE = 0x2,
+ PROT_EXEC = 0x4,
+ MAP_ANON = 0x1,
+ MAP_PRIVATE = 0x2,
+ SIGINT = 0x2,
+ CTRL_C_EVENT = 0,
+ CTRL_BREAK_EVENT = 0x1,
+ EXCEPTION_ACCESS_VIOLATION = 0xc0000005,
+ EXCEPTION_BREAKPOINT = 0x80000003,
+ EXCEPTION_FLT_DENORMAL_OPERAND = 0xc000008d,
+ EXCEPTION_FLT_DIVIDE_BY_ZERO = 0xc000008e,
+ EXCEPTION_FLT_INEXACT_RESULT = 0xc000008f,
+ EXCEPTION_FLT_OVERFLOW = 0xc0000091,
+ EXCEPTION_FLT_UNDERFLOW = 0xc0000093,
+ EXCEPTION_INT_DIVIDE_BY_ZERO = 0xc0000094,
+ EXCEPTION_INT_OVERFLOW = 0xc0000095,
+};
+
+// Types
+#pragma pack on
+
+typedef struct ExceptionRecord ExceptionRecord;
+struct ExceptionRecord {
+ uint32 ExceptionCode;
+ uint32 ExceptionFlags;
+ ExceptionRecord *ExceptionRecord;
+ void *ExceptionAddress;
+ uint32 NumberParameters;
+ byte pad_godefs_0[4];
+ uint64 ExceptionInformation[15];
+};
+#pragma pack off
diff --git a/src/pkg/runtime/windows/amd64/rt0.s b/src/pkg/runtime/windows/amd64/rt0.s
new file mode 100644
index 000000000..35978bc74
--- /dev/null
+++ b/src/pkg/runtime/windows/amd64/rt0.s
@@ -0,0 +1,13 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "amd64/asm.h"
+
+TEXT _rt0_amd64_windows(SB),7,$-8
+ MOVQ $_rt0_amd64(SB), AX
+ MOVQ SP, DI
+ JMP AX
+
+DATA runtime·iswindows(SB)/4, $1
+GLOBL runtime·iswindows(SB), $4
diff --git a/src/pkg/runtime/windows/amd64/signal.c b/src/pkg/runtime/windows/amd64/signal.c
new file mode 100644
index 000000000..1fc3eb060
--- /dev/null
+++ b/src/pkg/runtime/windows/amd64/signal.c
@@ -0,0 +1,20 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "runtime.h"
+#include "defs.h"
+#include "os.h"
+
+void
+runtime·initsig(int32 queue)
+{
+}
+
+void
+runtime·resetcpuprofiler(int32 hz)
+{
+ // TODO: Enable profiling interrupts.
+
+ m->profilehz = hz;
+}
diff --git a/src/pkg/runtime/windows/amd64/sys.s b/src/pkg/runtime/windows/amd64/sys.s
new file mode 100644
index 000000000..2009d164e
--- /dev/null
+++ b/src/pkg/runtime/windows/amd64/sys.s
@@ -0,0 +1,130 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "amd64/asm.h"
+
+// void *stdcall_raw(void *fn, uintptr nargs, void *args)
+TEXT runtime·stdcall_raw(SB),7,$8
+ MOVQ fn+0(FP), AX
+ MOVQ nargs+8(FP), CX
+ MOVQ args+16(FP), R11
+
+ // Switch to m->g0 if needed.
+ get_tls(DI)
+ MOVQ m(DI), DX
+ MOVQ g(DI), SI
+ MOVQ SI, 0(SP) // save g
+ MOVQ SP, m_gostack(DX) // save SP
+ MOVQ m_g0(DX), SI
+ CMPQ g(DI), SI
+ JEQ 3(PC)
+ MOVQ (g_sched+gobuf_sp)(SI), SP
+ ANDQ $~15, SP
+ MOVQ SI, g(DI)
+
+ SUBQ $0x60, SP
+
+ // Copy args to new stack.
+ MOVQ SP, DI
+ MOVQ R11, SI
+ CLD
+ REP; MOVSQ
+ MOVQ 0(R11), CX
+ MOVQ 8(R11), DX
+ MOVQ 16(R11), R8
+ MOVQ 24(R11), R9
+
+ // Call stdcall function.
+ CALL AX
+
+ // Restore original SP, g.
+ get_tls(DI)
+ MOVQ m(DI), DX
+ MOVQ m_gostack(DX), SP // restore SP
+ MOVQ 0(SP), SI // restore g
+ MOVQ SI, g(DI)
+
+ RET
+
+// faster get/set last error
+TEXT runtime·getlasterror(SB),7,$0
+ MOVQ 0x30(GS), AX
+ MOVL 0x68(AX), AX
+ RET
+
+TEXT runtime·setlasterror(SB),7,$0
+ MOVL err+0(FP), AX
+ MOVQ 0x30(GS), CX
+ MOVL AX, 0x68(CX)
+ RET
+
+// Windows runs the ctrl handler in a new thread.
+TEXT runtime·ctrlhandler(SB),7,$0
+ // TODO
+ RET
+
+TEXT runtime·callbackasm(SB),7,$0
+ // TODO
+ RET
+
+// void tstart(M *newm);
+TEXT runtime·tstart(SB),7,$0
+ MOVQ newm+8(SP), CX // m
+ MOVQ m_g0(CX), DX // g
+
+ MOVQ SP, DI // remember stack
+
+ // Layout new m scheduler stack on os stack.
+ MOVQ SP, AX
+ MOVQ AX, g_stackbase(DX)
+ SUBQ $(64*1024), AX // stack size
+ MOVQ AX, g_stackguard(DX)
+
+ // Set up tls.
+ LEAQ m_tls(CX), SI
+ MOVQ SI, 0x58(GS)
+ MOVQ CX, m(SI)
+ MOVQ DX, g(SI)
+
+ // Someday the convention will be D is always cleared.
+ CLD
+
+ PUSHQ DI // original stack
+
+ CALL runtime·stackcheck(SB) // clobbers AX,CX
+
+ CALL runtime·mstart(SB)
+
+ POPQ DI // original stack
+ MOVQ DI, SP
+
+ RET
+
+// uint32 tstart_stdcall(M *newm);
+TEXT runtime·tstart_stdcall(SB),7,$0
+ MOVQ CX, BX // stdcall first arg in RCX
+
+ PUSHQ BX
+ CALL runtime·tstart+0(SB)
+ POPQ BX
+
+ // Adjust stack for stdcall to return properly.
+ MOVQ (SP), AX // save return address
+ ADDQ $8, SP // remove single parameter
+ MOVQ AX, (SP) // restore return address
+
+ XORL AX, AX // return 0 == success
+
+ RET
+
+TEXT runtime·notok(SB),7,$0
+ MOVQ $0xf1, BP
+ MOVQ BP, (BP)
+ RET
+
+// set tls base to DI
+TEXT runtime·settls(SB),7,$0
+ MOVQ DI, 0x58(GS)
+ RET
+
diff --git a/src/pkg/runtime/windows/defs.c b/src/pkg/runtime/windows/defs.c
new file mode 100644
index 000000000..3b2824940
--- /dev/null
+++ b/src/pkg/runtime/windows/defs.c
@@ -0,0 +1,37 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include <signal.h>
+#include <stdarg.h>
+#include <windef.h>
+#include <winbase.h>
+#include <wincon.h>
+
+enum {
+ $PROT_NONE = 0,
+ $PROT_READ = 1,
+ $PROT_WRITE = 2,
+ $PROT_EXEC = 4,
+
+ $MAP_ANON = 1,
+ $MAP_PRIVATE = 2,
+
+ $SIGINT = SIGINT,
+ $CTRL_C_EVENT = CTRL_C_EVENT,
+ $CTRL_BREAK_EVENT = CTRL_BREAK_EVENT,
+
+ $EXCEPTION_ACCESS_VIOLATION = STATUS_ACCESS_VIOLATION,
+ $EXCEPTION_BREAKPOINT = STATUS_BREAKPOINT,
+ $EXCEPTION_FLT_DENORMAL_OPERAND = STATUS_FLOAT_DENORMAL_OPERAND,
+ $EXCEPTION_FLT_DIVIDE_BY_ZERO = STATUS_FLOAT_DIVIDE_BY_ZERO,
+ $EXCEPTION_FLT_INEXACT_RESULT = STATUS_FLOAT_INEXACT_RESULT,
+ $EXCEPTION_FLT_OVERFLOW = STATUS_FLOAT_OVERFLOW,
+ $EXCEPTION_FLT_UNDERFLOW = STATUS_FLOAT_UNDERFLOW,
+ $EXCEPTION_INT_DIVIDE_BY_ZERO = STATUS_INTEGER_DIVIDE_BY_ZERO,
+ $EXCEPTION_INT_OVERFLOW = STATUS_INTEGER_OVERFLOW,
+};
+
+typedef EXCEPTION_RECORD $ExceptionRecord;
+typedef FLOATING_SAVE_AREA $FloatingSaveArea;
+typedef CONTEXT $Context;
diff --git a/src/pkg/runtime/windows/mem.c b/src/pkg/runtime/windows/mem.c
new file mode 100644
index 000000000..5d2291fa3
--- /dev/null
+++ b/src/pkg/runtime/windows/mem.c
@@ -0,0 +1,70 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "runtime.h"
+#include "os.h"
+#include "defs.h"
+#include "malloc.h"
+
+enum {
+ MEM_COMMIT = 0x1000,
+ MEM_RESERVE = 0x2000,
+ MEM_RELEASE = 0x8000,
+
+ PAGE_EXECUTE_READWRITE = 0x40,
+};
+
+#pragma dynimport runtime·VirtualAlloc VirtualAlloc "kernel32.dll"
+#pragma dynimport runtime·VirtualFree VirtualFree "kernel32.dll"
+extern void *runtime·VirtualAlloc;
+extern void *runtime·VirtualFree;
+
+void*
+runtime·SysAlloc(uintptr n)
+{
+ mstats.sys += n;
+ return runtime·stdcall(runtime·VirtualAlloc, 4, nil, n, (uintptr)(MEM_COMMIT|MEM_RESERVE), (uintptr)PAGE_EXECUTE_READWRITE);
+}
+
+void
+runtime·SysUnused(void *v, uintptr n)
+{
+ USED(v);
+ USED(n);
+}
+
+void
+runtime·SysFree(void *v, uintptr n)
+{
+ uintptr r;
+
+ mstats.sys -= n;
+ r = (uintptr)runtime·stdcall(runtime·VirtualFree, 3, v, (uintptr)0, (uintptr)MEM_RELEASE);
+ if(r == 0)
+ runtime·throw("runtime: failed to release pages");
+}
+
+void*
+runtime·SysReserve(void *v, uintptr n)
+{
+ // v is just a hint.
+ // First try at v.
+ v = runtime·stdcall(runtime·VirtualAlloc, 4, v, n, (uintptr)MEM_RESERVE, (uintptr)PAGE_EXECUTE_READWRITE);
+ if(v != nil)
+ return v;
+
+ // Next let the kernel choose the address.
+ return runtime·stdcall(runtime·VirtualAlloc, 4, nil, n, (uintptr)MEM_RESERVE, (uintptr)PAGE_EXECUTE_READWRITE);
+}
+
+void
+runtime·SysMap(void *v, uintptr n)
+{
+ void *p;
+
+ mstats.sys += n;
+ p = runtime·stdcall(runtime·VirtualAlloc, 4, v, n, (uintptr)MEM_COMMIT, (uintptr)PAGE_EXECUTE_READWRITE);
+ if(p != v)
+ runtime·throw("runtime: cannot map pages in arena address space");
+}
diff --git a/src/pkg/runtime/windows/os.h b/src/pkg/runtime/windows/os.h
new file mode 100644
index 000000000..bc9678733
--- /dev/null
+++ b/src/pkg/runtime/windows/os.h
@@ -0,0 +1,30 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+extern void *runtime·LoadLibraryEx;
+extern void *runtime·GetProcAddress;
+
+// Call a Windows function with stdcall conventions,
+// and switch to os stack during the call.
+#pragma varargck countpos runtime·stdcall 2
+#pragma varargck type runtime·stdcall void*
+#pragma varargck type runtime·stdcall uintptr
+void *runtime·stdcall_raw(void *fn, uintptr nargs, void *args);
+void *runtime·stdcall(void *fn, int32 count, ...);
+uintptr runtime·syscall(void *fn, uintptr nargs, void *args, uintptr *err);
+
+uintptr runtime·getlasterror(void);
+void runtime·setlasterror(uintptr err);
+
+// Function to be called by windows CreateThread
+// to start new os thread.
+uint32 runtime·tstart_stdcall(M *newm);
+
+uint32 runtime·issigpanic(uint32);
+void runtime·sigpanic(void);
+uint32 runtime·ctrlhandler(uint32 type);
+
+// Windows dll function to go callback entry.
+byte *runtime·compilecallback(Eface fn, bool cleanstack);
+void *runtime·callbackasm(void);
diff --git a/src/pkg/runtime/windows/signals.h b/src/pkg/runtime/windows/signals.h
new file mode 100644
index 000000000..6943714b0
--- /dev/null
+++ b/src/pkg/runtime/windows/signals.h
@@ -0,0 +1,3 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
diff --git a/src/pkg/runtime/windows/syscall.goc b/src/pkg/runtime/windows/syscall.goc
new file mode 100644
index 000000000..85071e051
--- /dev/null
+++ b/src/pkg/runtime/windows/syscall.goc
@@ -0,0 +1,67 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package syscall
+#include "runtime.h"
+#include "os.h"
+
+func loadlibraryex(filename uintptr) (handle uintptr) {
+ uintptr args[3] = { filename };
+ handle = runtime·syscall(runtime·LoadLibraryEx, 3, args, nil);
+}
+
+func getprocaddress(handle uintptr, procname uintptr) (proc uintptr) {
+ USED(procname);
+ proc = runtime·syscall(runtime·GetProcAddress, 2, &handle, nil);
+}
+
+func NewCallback(fn Eface) (code uintptr) {
+ code = (uintptr)runtime·compilecallback(fn, true);
+}
+
+func Syscall(fn uintptr, nargs uintptr, a1 uintptr, a2 uintptr, a3 uintptr) (r1 uintptr, r2 uintptr, err uintptr) {
+ USED(a2);
+ USED(a3);
+ r1 = runtime·syscall((void*)fn, nargs, &a1, &err);
+ r2 = 0;
+}
+
+func Syscall6(fn uintptr, nargs uintptr, a1 uintptr, a2 uintptr, a3 uintptr, a4 uintptr, a5 uintptr, a6 uintptr) (r1 uintptr, r2 uintptr, err uintptr) {
+ USED(a2);
+ USED(a3);
+ USED(a4);
+ USED(a5);
+ USED(a6);
+ r1 = runtime·syscall((void*)fn, nargs, &a1, &err);
+ r2 = 0;
+}
+
+func Syscall9(fn uintptr, nargs uintptr, a1 uintptr, a2 uintptr, a3 uintptr, a4 uintptr, a5 uintptr, a6 uintptr, a7 uintptr, a8 uintptr, a9 uintptr) (r1 uintptr, r2 uintptr, err uintptr) {
+ USED(a2);
+ USED(a3);
+ USED(a4);
+ USED(a5);
+ USED(a6);
+ USED(a7);
+ USED(a8);
+ USED(a9);
+ r1 = runtime·syscall((void*)fn, nargs, &a1, &err);
+ r2 = 0;
+}
+
+func Syscall12(fn uintptr, nargs uintptr, a1 uintptr, a2 uintptr, a3 uintptr, a4 uintptr, a5 uintptr, a6 uintptr, a7 uintptr, a8 uintptr, a9 uintptr, a10 uintptr, a11 uintptr, a12 uintptr) (r1 uintptr, r2 uintptr, err uintptr) {
+ USED(a2);
+ USED(a3);
+ USED(a4);
+ USED(a5);
+ USED(a6);
+ USED(a7);
+ USED(a8);
+ USED(a9);
+ USED(a10);
+ USED(a11);
+ USED(a12);
+ r1 = runtime·syscall((void*)fn, nargs, &a1, &err);
+ r2 = 0;
+}
diff --git a/src/pkg/runtime/windows/thread.c b/src/pkg/runtime/windows/thread.c
new file mode 100644
index 000000000..e08d1b6f0
--- /dev/null
+++ b/src/pkg/runtime/windows/thread.c
@@ -0,0 +1,432 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "runtime.h"
+#include "type.h"
+#include "defs.h"
+#include "os.h"
+
+#pragma dynimport runtime·CloseHandle CloseHandle "kernel32.dll"
+#pragma dynimport runtime·CreateEvent CreateEventA "kernel32.dll"
+#pragma dynimport runtime·CreateThread CreateThread "kernel32.dll"
+#pragma dynimport runtime·ExitProcess ExitProcess "kernel32.dll"
+#pragma dynimport runtime·FreeEnvironmentStringsW FreeEnvironmentStringsW "kernel32.dll"
+#pragma dynimport runtime·GetEnvironmentStringsW GetEnvironmentStringsW "kernel32.dll"
+#pragma dynimport runtime·GetProcAddress GetProcAddress "kernel32.dll"
+#pragma dynimport runtime·GetStdHandle GetStdHandle "kernel32.dll"
+#pragma dynimport runtime·LoadLibraryEx LoadLibraryExA "kernel32.dll"
+#pragma dynimport runtime·QueryPerformanceCounter QueryPerformanceCounter "kernel32.dll"
+#pragma dynimport runtime·QueryPerformanceFrequency QueryPerformanceFrequency "kernel32.dll"
+#pragma dynimport runtime·SetConsoleCtrlHandler SetConsoleCtrlHandler "kernel32.dll"
+#pragma dynimport runtime·SetEvent SetEvent "kernel32.dll"
+#pragma dynimport runtime·WaitForSingleObject WaitForSingleObject "kernel32.dll"
+#pragma dynimport runtime·WriteFile WriteFile "kernel32.dll"
+
+extern void *runtime·CloseHandle;
+extern void *runtime·CreateEvent;
+extern void *runtime·CreateThread;
+extern void *runtime·ExitProcess;
+extern void *runtime·FreeEnvironmentStringsW;
+extern void *runtime·GetEnvironmentStringsW;
+extern void *runtime·GetProcAddress;
+extern void *runtime·GetStdHandle;
+extern void *runtime·LoadLibraryEx;
+extern void *runtime·QueryPerformanceCounter;
+extern void *runtime·QueryPerformanceFrequency;
+extern void *runtime·SetConsoleCtrlHandler;
+extern void *runtime·SetEvent;
+extern void *runtime·WaitForSingleObject;
+extern void *runtime·WriteFile;
+
+static int64 timerfreq;
+static void destroylock(Lock *l);
+
+void
+runtime·osinit(void)
+{
+ runtime·stdcall(runtime·QueryPerformanceFrequency, 1, &timerfreq);
+ runtime·stdcall(runtime·SetConsoleCtrlHandler, 2, runtime·ctrlhandler, (uintptr)1);
+ runtime·destroylock = destroylock;
+}
+
+void
+runtime·goenvs(void)
+{
+ extern Slice os·Envs;
+
+ uint16 *env;
+ String *s;
+ int32 i, n;
+ uint16 *p;
+
+ env = runtime·stdcall(runtime·GetEnvironmentStringsW, 0);
+
+ n = 0;
+ for(p=env; *p; n++)
+ p += runtime·findnullw(p)+1;
+
+ s = runtime·malloc(n*sizeof s[0]);
+
+ p = env;
+ for(i=0; i<n; i++) {
+ s[i] = runtime·gostringw(p);
+ p += runtime·findnullw(p)+1;
+ }
+ os·Envs.array = (byte*)s;
+ os·Envs.len = n;
+ os·Envs.cap = n;
+
+ runtime·stdcall(runtime·FreeEnvironmentStringsW, 1, env);
+}
+
+void
+runtime·exit(int32 code)
+{
+ runtime·stdcall(runtime·ExitProcess, 1, (uintptr)code);
+}
+
+int32
+runtime·write(int32 fd, void *buf, int32 n)
+{
+ void *handle;
+ uint32 written;
+
+ written = 0;
+ switch(fd) {
+ case 1:
+ handle = runtime·stdcall(runtime·GetStdHandle, 1, (uintptr)-11);
+ break;
+ case 2:
+ handle = runtime·stdcall(runtime·GetStdHandle, 1, (uintptr)-12);
+ break;
+ default:
+ return -1;
+ }
+ runtime·stdcall(runtime·WriteFile, 5, handle, buf, (uintptr)n, &written, (uintptr)0);
+ return written;
+}
+
+// Thread-safe allocation of an event.
+static void
+initevent(void **pevent)
+{
+ void *event;
+
+ event = runtime·stdcall(runtime·CreateEvent, 4, (uintptr)0, (uintptr)0, (uintptr)0, (uintptr)0);
+ if(!runtime·casp(pevent, 0, event)) {
+ // Someone else filled it in. Use theirs.
+ runtime·stdcall(runtime·CloseHandle, 1, event);
+ }
+}
+
+static void
+eventlock(Lock *l)
+{
+ // Allocate event if needed.
+ if(l->event == 0)
+ initevent(&l->event);
+
+ if(runtime·xadd(&l->key, 1) > 1) // someone else has it; wait
+ runtime·stdcall(runtime·WaitForSingleObject, 2, l->event, (uintptr)-1);
+}
+
+static void
+eventunlock(Lock *l)
+{
+ if(runtime·xadd(&l->key, -1) > 0) // someone else is waiting
+ runtime·stdcall(runtime·SetEvent, 1, l->event);
+}
+
+void
+runtime·lock(Lock *l)
+{
+ if(m->locks < 0)
+ runtime·throw("lock count");
+ m->locks++;
+ eventlock(l);
+}
+
+void
+runtime·unlock(Lock *l)
+{
+ m->locks--;
+ if(m->locks < 0)
+ runtime·throw("lock count");
+ eventunlock(l);
+}
+
+static void
+destroylock(Lock *l)
+{
+ if(l->event != 0)
+ runtime·stdcall(runtime·CloseHandle, 1, l->event);
+}
+
+void
+runtime·noteclear(Note *n)
+{
+ n->lock.key = 0; // memset(n, 0, sizeof *n)
+ eventlock(&n->lock);
+}
+
+void
+runtime·notewakeup(Note *n)
+{
+ eventunlock(&n->lock);
+}
+
+void
+runtime·notesleep(Note *n)
+{
+ eventlock(&n->lock);
+ eventunlock(&n->lock); // Let other sleepers find out too.
+}
+
+void
+runtime·newosproc(M *m, G *g, void *stk, void (*fn)(void))
+{
+ void *thandle;
+
+ USED(stk);
+ USED(g); // assuming g = m->g0
+ USED(fn); // assuming fn = mstart
+
+ thandle = runtime·stdcall(runtime·CreateThread, 6, (uintptr)0, (uintptr)0, runtime·tstart_stdcall, m, (uintptr)0, (uintptr)0);
+ if(thandle == 0) {
+ runtime·printf("runtime: failed to create new OS thread (have %d already; errno=%d)\n", runtime·mcount(), runtime·getlasterror());
+ runtime·throw("runtime.newosproc");
+ }
+}
+
+// Called to initialize a new m (including the bootstrap m).
+void
+runtime·minit(void)
+{
+}
+
+void
+runtime·gettime(int64 *sec, int32 *usec)
+{
+ int64 count;
+
+ runtime·stdcall(runtime·QueryPerformanceCounter, 1, &count);
+ *sec = count / timerfreq;
+ count %= timerfreq;
+ *usec = count*1000000 / timerfreq;
+}
+
+// Calling stdcall on os stack.
+#pragma textflag 7
+void *
+runtime·stdcall(void *fn, int32 count, ...)
+{
+ return runtime·stdcall_raw(fn, count, (uintptr*)&count + 1);
+}
+
+uintptr
+runtime·syscall(void *fn, uintptr nargs, void *args, uintptr *err)
+{
+ G *oldlock;
+ uintptr ret;
+
+ /*
+ * Lock g to m to ensure we stay on the same stack if we do a callback.
+ */
+ oldlock = m->lockedg;
+ m->lockedg = g;
+ g->lockedm = m;
+
+ runtime·entersyscall();
+ runtime·setlasterror(0);
+ ret = (uintptr)runtime·stdcall_raw(fn, nargs, args);
+ if(err)
+ *err = runtime·getlasterror();
+ runtime·exitsyscall();
+
+ m->lockedg = oldlock;
+ if(oldlock == nil)
+ g->lockedm = nil;
+
+ return ret;
+}
+
+uint32
+runtime·issigpanic(uint32 code)
+{
+ switch(code) {
+ case EXCEPTION_ACCESS_VIOLATION:
+ case EXCEPTION_INT_DIVIDE_BY_ZERO:
+ case EXCEPTION_INT_OVERFLOW:
+ case EXCEPTION_FLT_DENORMAL_OPERAND:
+ case EXCEPTION_FLT_DIVIDE_BY_ZERO:
+ case EXCEPTION_FLT_INEXACT_RESULT:
+ case EXCEPTION_FLT_OVERFLOW:
+ case EXCEPTION_FLT_UNDERFLOW:
+ return 1;
+ }
+ return 0;
+}
+
+void
+runtime·sigpanic(void)
+{
+ switch(g->sig) {
+ case EXCEPTION_ACCESS_VIOLATION:
+ if(g->sigcode1 < 0x1000)
+ runtime·panicstring("invalid memory address or nil pointer dereference");
+ runtime·printf("unexpected fault address %p\n", g->sigcode1);
+ runtime·throw("fault");
+ case EXCEPTION_INT_DIVIDE_BY_ZERO:
+ runtime·panicstring("integer divide by zero");
+ case EXCEPTION_INT_OVERFLOW:
+ runtime·panicstring("integer overflow");
+ case EXCEPTION_FLT_DENORMAL_OPERAND:
+ case EXCEPTION_FLT_DIVIDE_BY_ZERO:
+ case EXCEPTION_FLT_INEXACT_RESULT:
+ case EXCEPTION_FLT_OVERFLOW:
+ case EXCEPTION_FLT_UNDERFLOW:
+ runtime·panicstring("floating point error");
+ }
+ runtime·throw("fault");
+}
+
+String
+runtime·signame(int32 sig)
+{
+ int8 *s;
+
+ switch(sig) {
+ case SIGINT:
+ s = "SIGINT: interrupt";
+ break;
+ default:
+ return runtime·emptystring;
+ }
+ return runtime·gostringnocopy((byte*)s);
+}
+
+uint32
+runtime·ctrlhandler1(uint32 type)
+{
+ int32 s;
+
+ switch(type) {
+ case CTRL_C_EVENT:
+ case CTRL_BREAK_EVENT:
+ s = SIGINT;
+ break;
+ default:
+ return 0;
+ }
+
+ if(runtime·sigsend(s))
+ return 1;
+ runtime·exit(2); // SIGINT, SIGTERM, etc
+ return 0;
+}
+
+// Will keep all callbacks in a linked list, so they don't get garbage collected.
+typedef struct Callback Callback;
+struct Callback {
+ Callback* link;
+ void* gobody;
+ byte asmbody;
+};
+
+typedef struct Callbacks Callbacks;
+struct Callbacks {
+ Lock;
+ Callback* link;
+ int32 n;
+};
+
+static Callbacks cbs;
+
+// Call back from windows dll into go.
+byte *
+runtime·compilecallback(Eface fn, bool cleanstack)
+{
+ FuncType *ft;
+ Type *t;
+ int32 argsize, i, n;
+ byte *p;
+ Callback *c;
+
+ if(fn.type == nil || fn.type->kind != KindFunc)
+ runtime·panicstring("compilecallback: not a function");
+ ft = (FuncType*)fn.type;
+ if(ft->out.len != 1)
+ runtime·panicstring("compilecallback: function must have one output parameter");
+ if(((Type**)ft->out.array)[0]->size != sizeof(uintptr))
+ runtime·panicstring("compilecallback: output parameter size is wrong");
+ argsize = 0;
+ for(i=0; i<ft->in.len; i++) {
+ t = ((Type**)ft->in.array)[i];
+ if(t->size != sizeof(uintptr))
+ runtime·panicstring("compilecallback: input parameter size is wrong");
+ argsize += t->size;
+ }
+
+ // compute size of new fn.
+ // must match code laid out below.
+ n = 1+4; // MOVL fn, AX
+ n += 1+4; // MOVL argsize, DX
+ n += 1+4; // MOVL callbackasm, CX
+ n += 2; // CALL CX
+ n += 1; // RET
+ if(cleanstack)
+ n += 2; // ... argsize
+
+ runtime·lock(&cbs);
+ for(c = cbs.link; c != nil; c = c->link) {
+ if(c->gobody == fn.data) {
+ runtime·unlock(&cbs);
+ return &c->asmbody;
+ }
+ }
+ if(cbs.n >= 2000)
+ runtime·throw("too many callback functions");
+ c = runtime·mal(sizeof *c + n);
+ c->gobody = fn.data;
+ c->link = cbs.link;
+ cbs.link = c;
+ cbs.n++;
+ runtime·unlock(&cbs);
+
+ p = &c->asmbody;
+
+ // MOVL fn, AX
+ *p++ = 0xb8;
+ *(uint32*)p = (uint32)fn.data;
+ p += 4;
+
+ // MOVL argsize, DX
+ *p++ = 0xba;
+ *(uint32*)p = argsize;
+ p += 4;
+
+ // MOVL callbackasm, CX
+ *p++ = 0xb9;
+ *(uint32*)p = (uint32)runtime·callbackasm;
+ p += 4;
+
+ // CALL CX
+ *p++ = 0xff;
+ *p++ = 0xd1;
+
+ // RET argsize?
+ if(cleanstack) {
+ *p++ = 0xc2;
+ *(uint16*)p = argsize;
+ } else
+ *p = 0xc3;
+
+ return &c->asmbody;
+}
+
+void
+os·sigpipe(void)
+{
+ runtime·throw("too many writes on closed pipe");
+}