diff options
Diffstat (limited to 'src/pkg/runtime/amd64')
-rw-r--r-- | src/pkg/runtime/amd64/asm.s | 290 | ||||
-rw-r--r-- | src/pkg/runtime/amd64/closure.c | 12 | ||||
-rw-r--r-- | src/pkg/runtime/amd64/memmove.s | 2 | ||||
-rw-r--r-- | src/pkg/runtime/amd64/traceback.c | 44 |
4 files changed, 205 insertions, 143 deletions
diff --git a/src/pkg/runtime/amd64/asm.s b/src/pkg/runtime/amd64/asm.s index 52b0a89bc..235f27206 100644 --- a/src/pkg/runtime/amd64/asm.s +++ b/src/pkg/runtime/amd64/asm.s @@ -4,7 +4,7 @@ #include "amd64/asm.h" -TEXT _rt0_amd64(SB),7,$-8 +TEXT _rt0_amd64(SB),7,$-8 // copy arguments forward on an even stack MOVQ 0(DI), AX // argc LEAQ 8(DI), BX // argv @@ -16,54 +16,72 @@ TEXT _rt0_amd64(SB),7,$-8 // if there is an initcgo, call it. MOVQ initcgo(SB), AX TESTQ AX, AX - JZ 2(PC) + JZ needtls CALL AX - - // set the per-goroutine and per-mach registers - LEAQ m0(SB), m - LEAQ g0(SB), g - MOVQ g, m_g0(m) // m has pointer to its g0 + JMP ok + +needtls: + LEAQ runtime·tls0(SB), DI + CALL runtime·settls(SB) + + // store through it, to make sure it works + get_tls(BX) + MOVQ $0x123, g(BX) + MOVQ runtime·tls0(SB), AX + CMPQ AX, $0x123 + JEQ 2(PC) + MOVL AX, 0 // abort +ok: + // set the per-goroutine and per-mach "registers" + get_tls(BX) + LEAQ runtime·g0(SB), CX + MOVQ CX, g(BX) + LEAQ runtime·m0(SB), AX + MOVQ AX, m(BX) + + // save m->g0 = g0 + MOVQ CX, m_g0(AX) // create istack out of the given (operating system) stack LEAQ (-8192+104)(SP), AX - MOVQ AX, g_stackguard(g) - MOVQ SP, g_stackbase(g) + MOVQ AX, g_stackguard(CX) + MOVQ SP, g_stackbase(CX) CLD // convention is D is always left cleared - CALL check(SB) + CALL runtime·check(SB) MOVL 16(SP), AX // copy argc MOVL AX, 0(SP) MOVQ 24(SP), AX // copy argv MOVQ AX, 8(SP) - CALL args(SB) - CALL osinit(SB) - CALL schedinit(SB) + CALL runtime·args(SB) + CALL runtime·osinit(SB) + CALL runtime·schedinit(SB) // create a new goroutine to start program - PUSHQ $mainstart(SB) // entry + PUSHQ $runtime·mainstart(SB) // entry PUSHQ $0 // arg size - CALL ·newproc(SB) + CALL runtime·newproc(SB) POPQ AX POPQ AX // start this M - CALL mstart(SB) + CALL runtime·mstart(SB) - CALL notok(SB) // never returns + CALL runtime·notok(SB) // never returns RET -TEXT mainstart(SB),7,$0 +TEXT runtime·mainstart(SB),7,$0 CALL main·init(SB) - CALL initdone(SB) + CALL runtime·initdone(SB) CALL main·main(SB) PUSHQ $0 - CALL exit(SB) + CALL runtime·exit(SB) POPQ AX - CALL notok(SB) + CALL runtime·notok(SB) RET -TEXT breakpoint(SB),7,$0 +TEXT runtime·breakpoint(SB),7,$0 BYTE $0xcc RET @@ -73,23 +91,27 @@ TEXT breakpoint(SB),7,$0 // uintptr gosave(Gobuf*) // save state in Gobuf; setjmp -TEXT gosave(SB), 7, $0 +TEXT runtime·gosave(SB), 7, $0 MOVQ 8(SP), AX // gobuf LEAQ 8(SP), BX // caller's SP MOVQ BX, gobuf_sp(AX) MOVQ 0(SP), BX // caller's PC MOVQ BX, gobuf_pc(AX) - MOVQ g, gobuf_g(AX) + get_tls(CX) + MOVQ g(CX), BX + MOVQ BX, gobuf_g(AX) MOVL $0, AX // return 0 RET // void gogo(Gobuf*, uintptr) // restore state from Gobuf; longjmp -TEXT gogo(SB), 7, $0 +TEXT runtime·gogo(SB), 7, $0 MOVQ 16(SP), AX // return 2nd arg MOVQ 8(SP), BX // gobuf - MOVQ gobuf_g(BX), g - MOVQ 0(g), CX // make sure g != nil + MOVQ gobuf_g(BX), DX + MOVQ 0(DX), CX // make sure g != nil + get_tls(CX) + MOVQ DX, g(CX) MOVQ gobuf_sp(BX), SP // restore SP MOVQ gobuf_pc(BX), BX JMP BX @@ -97,11 +119,13 @@ TEXT gogo(SB), 7, $0 // void gogocall(Gobuf*, void (*fn)(void)) // restore state from Gobuf but then call fn. // (call fn, returning to state in Gobuf) -TEXT gogocall(SB), 7, $0 +TEXT runtime·gogocall(SB), 7, $0 MOVQ 16(SP), AX // fn MOVQ 8(SP), BX // gobuf - MOVQ gobuf_g(BX), g - MOVQ 0(g), CX // make sure g != nil + MOVQ gobuf_g(BX), DX + get_tls(CX) + MOVQ DX, g(CX) + MOVQ 0(DX), CX // make sure g != nil MOVQ gobuf_sp(BX), SP // restore SP MOVQ gobuf_pc(BX), BX PUSHQ BX @@ -113,24 +137,34 @@ TEXT gogocall(SB), 7, $0 */ // Called during function prolog when more stack is needed. -TEXT ·morestack(SB),7,$0 +// Caller has already done get_tls(CX); MOVQ m(CX), BX. +TEXT runtime·morestack(SB),7,$0 + // Cannot grow scheduler stack (m->g0). + MOVQ m_g0(BX), SI + CMPQ g(CX), SI + JNE 2(PC) + INT $3 + // Called from f. // Set m->morebuf to f's caller. MOVQ 8(SP), AX // f's caller's PC - MOVQ AX, (m_morebuf+gobuf_pc)(m) + MOVQ AX, (m_morebuf+gobuf_pc)(BX) LEAQ 16(SP), AX // f's caller's SP - MOVQ AX, (m_morebuf+gobuf_sp)(m) - MOVQ AX, (m_morefp)(m) - MOVQ g, (m_morebuf+gobuf_g)(m) + MOVQ AX, (m_morebuf+gobuf_sp)(BX) + MOVQ AX, (m_morefp)(BX) + get_tls(CX) + MOVQ g(CX), SI + MOVQ SI, (m_morebuf+gobuf_g)(BX) // Set m->morepc to f's PC. MOVQ 0(SP), AX - MOVQ AX, m_morepc(m) + MOVQ AX, m_morepc(BX) // Call newstack on m's scheduling stack. - MOVQ m_g0(m), g - MOVQ (m_sched+gobuf_sp)(m), SP - CALL newstack(SB) + MOVQ m_g0(BX), BP + MOVQ BP, g(CX) + MOVQ (m_sched+gobuf_sp)(BX), SP + CALL runtime·newstack(SB) MOVQ $0, 0x1003 // crash if newstack returns RET @@ -140,13 +174,17 @@ TEXT ·morestack(SB),7,$0 // // func call(fn *byte, arg *byte, argsize uint32). TEXT reflect·call(SB), 7, $0 + get_tls(CX) + MOVQ m(CX), BX + // Save our caller's state as the PC and SP to // restore when returning from f. MOVQ 0(SP), AX // our caller's PC - MOVQ AX, (m_morebuf+gobuf_pc)(m) + MOVQ AX, (m_morebuf+gobuf_pc)(BX) LEAQ 8(SP), AX // our caller's SP - MOVQ AX, (m_morebuf+gobuf_sp)(m) - MOVQ g, (m_morebuf+gobuf_g)(m) + MOVQ AX, (m_morebuf+gobuf_sp)(BX) + MOVQ g(CX), AX + MOVQ AX, (m_morebuf+gobuf_g)(BX) // Set up morestack arguments to call f on a new stack. // We set f's frame size to 1, as a hint to newstack @@ -155,94 +193,109 @@ TEXT reflect·call(SB), 7, $0 // the default stack, f's usual stack growth prolog will // allocate a new segment (and recopy the arguments). MOVQ 8(SP), AX // fn - MOVQ 16(SP), BX // arg frame + MOVQ 16(SP), DX // arg frame MOVL 24(SP), CX // arg size - MOVQ AX, m_morepc(m) // f's PC - MOVQ BX, m_morefp(m) // argument frame pointer - MOVL CX, m_moreargs(m) // f's argument size - MOVL $1, m_moreframe(m) // f's frame size + MOVQ AX, m_morepc(BX) // f's PC + MOVQ DX, m_morefp(BX) // argument frame pointer + MOVL CX, m_moreargs(BX) // f's argument size + MOVL $1, m_moreframe(BX) // f's frame size // Call newstack on m's scheduling stack. - MOVQ m_g0(m), g - MOVQ (m_sched+gobuf_sp)(m), SP - CALL newstack(SB) + MOVQ m_g0(BX), BP + get_tls(CX) + MOVQ BP, g(CX) + MOVQ (m_sched+gobuf_sp)(BX), SP + CALL runtime·newstack(SB) MOVQ $0, 0x1103 // crash if newstack returns RET // Return point when leaving stack. -TEXT ·lessstack(SB), 7, $0 +TEXT runtime·lessstack(SB), 7, $0 // Save return value in m->cret - MOVQ AX, m_cret(m) + get_tls(CX) + MOVQ m(CX), BX + MOVQ AX, m_cret(BX) // Call oldstack on m's scheduling stack. - MOVQ m_g0(m), g - MOVQ (m_sched+gobuf_sp)(m), SP - CALL oldstack(SB) + MOVQ m_g0(BX), DX + MOVQ DX, g(CX) + MOVQ (m_sched+gobuf_sp)(BX), SP + CALL runtime·oldstack(SB) MOVQ $0, 0x1004 // crash if oldstack returns RET // morestack trampolines -TEXT ·morestack00+0(SB),7,$0 +TEXT runtime·morestack00(SB),7,$0 + get_tls(CX) + MOVQ m(CX), BX MOVQ $0, AX - MOVQ AX, m_moreframe(m) - MOVQ $·morestack+0(SB), AX + MOVQ AX, m_moreframe(BX) + MOVQ $runtime·morestack(SB), AX JMP AX -TEXT ·morestack01+0(SB),7,$0 +TEXT runtime·morestack01(SB),7,$0 + get_tls(CX) + MOVQ m(CX), BX SHLQ $32, AX - MOVQ AX, m_moreframe(m) - MOVQ $·morestack+0(SB), AX + MOVQ AX, m_moreframe(BX) + MOVQ $runtime·morestack(SB), AX JMP AX -TEXT ·morestack10+0(SB),7,$0 +TEXT runtime·morestack10(SB),7,$0 + get_tls(CX) + MOVQ m(CX), BX MOVLQZX AX, AX - MOVQ AX, m_moreframe(m) - MOVQ $·morestack+0(SB), AX + MOVQ AX, m_moreframe(BX) + MOVQ $runtime·morestack(SB), AX JMP AX -TEXT ·morestack11+0(SB),7,$0 - MOVQ AX, m_moreframe(m) - MOVQ $·morestack+0(SB), AX +TEXT runtime·morestack11(SB),7,$0 + get_tls(CX) + MOVQ m(CX), BX + MOVQ AX, m_moreframe(BX) + MOVQ $runtime·morestack(SB), AX JMP AX // subcases of morestack01 // with const of 8,16,...48 -TEXT ·morestack8(SB),7,$0 +TEXT runtime·morestack8(SB),7,$0 PUSHQ $1 - MOVQ $·morestackx(SB), AX + MOVQ $morestack<>(SB), AX JMP AX -TEXT ·morestack16(SB),7,$0 +TEXT runtime·morestack16(SB),7,$0 PUSHQ $2 - MOVQ $·morestackx(SB), AX + MOVQ $morestack<>(SB), AX JMP AX -TEXT ·morestack24(SB),7,$0 +TEXT runtime·morestack24(SB),7,$0 PUSHQ $3 - MOVQ $·morestackx(SB), AX + MOVQ $morestack<>(SB), AX JMP AX -TEXT ·morestack32(SB),7,$0 +TEXT runtime·morestack32(SB),7,$0 PUSHQ $4 - MOVQ $·morestackx(SB), AX + MOVQ $morestack<>(SB), AX JMP AX -TEXT ·morestack40(SB),7,$0 +TEXT runtime·morestack40(SB),7,$0 PUSHQ $5 - MOVQ $·morestackx(SB), AX + MOVQ $morestack<>(SB), AX JMP AX -TEXT ·morestack48(SB),7,$0 +TEXT runtime·morestack48(SB),7,$0 PUSHQ $6 - MOVQ $·morestackx(SB), AX + MOVQ $morestack<>(SB), AX JMP AX -TEXT ·morestackx(SB),7,$0 +TEXT morestack<>(SB),7,$0 + get_tls(CX) + MOVQ m(CX), BX POPQ AX SHLQ $35, AX - MOVQ AX, m_moreframe(m) - MOVQ $·morestack(SB), AX + MOVQ AX, m_moreframe(BX) + MOVQ $runtime·morestack(SB), AX JMP AX // bool cas(int32 *val, int32 old, int32 new) @@ -252,7 +305,7 @@ TEXT ·morestackx(SB),7,$0 // return 1; // } else // return 0; -TEXT cas(SB), 7, $0 +TEXT runtime·cas(SB), 7, $0 MOVQ 8(SP), BX MOVL 16(SP), AX MOVL 20(SP), CX @@ -269,7 +322,7 @@ TEXT cas(SB), 7, $0 // 1. pop the caller // 2. sub 5 bytes from the callers return // 3. jmp to the argument -TEXT jmpdefer(SB), 7, $0 +TEXT runtime·jmpdefer(SB), 7, $0 MOVQ 8(SP), AX // fn MOVQ 16(SP), BX // caller sp LEAQ -8(BX), SP // caller sp after CALL @@ -279,80 +332,80 @@ TEXT jmpdefer(SB), 7, $0 // runcgo(void(*fn)(void*), void *arg) // Call fn(arg) on the scheduler stack, // aligned appropriately for the gcc ABI. -// Save g and m across the call, -// since the foreign code might reuse them. -TEXT runcgo(SB),7,$32 +TEXT runtime·runcgo(SB),7,$32 MOVQ fn+0(FP), R12 MOVQ arg+8(FP), R13 MOVQ SP, CX // Figure out if we need to switch to m->g0 stack. - MOVQ m_g0(m), SI - CMPQ SI, g + get_tls(DI) + MOVQ m(DI), DX + MOVQ m_g0(DX), SI + CMPQ g(DI), SI JEQ 2(PC) - MOVQ (m_sched+gobuf_sp)(m), SP + MOVQ (m_sched+gobuf_sp)(DX), SP // Now on a scheduling stack (a pthread-created stack). SUBQ $32, SP ANDQ $~15, SP // alignment for gcc ABI - MOVQ g, 24(SP) // save old g, m, SP - MOVQ m, 16(SP) + MOVQ g(DI), BP + MOVQ BP, 16(SP) + MOVQ SI, g(DI) MOVQ CX, 8(SP) - - // Save g and m values for a potential callback. The callback - // will start running with on the g0 stack and as such should - // have g set to m->g0. - MOVQ m, DI // DI = first argument in AMD64 ABI - // SI, second argument, set above - MOVQ libcgo_set_scheduler(SB), BX - CALL BX - MOVQ R13, DI // DI = first argument in AMD64 ABI CALL R12 - // Restore registers, stack pointer. - MOVQ 16(SP), m - MOVQ 24(SP), g + // Restore registers, g, stack pointer. + get_tls(DI) + MOVQ 16(SP), SI + MOVQ SI, g(DI) MOVQ 8(SP), SP RET // runcgocallback(G *g1, void* sp, void (*fn)(void)) // Switch to g1 and sp, call fn, switch back. fn's arguments are on // the new stack. -TEXT runcgocallback(SB),7,$48 +TEXT runtime·runcgocallback(SB),7,$48 MOVQ g1+0(FP), DX MOVQ sp+8(FP), AX MOVQ fp+16(FP), BX - MOVQ DX, g - // We are running on m's scheduler stack. Save current SP // into m->sched.sp so that a recursive call to runcgo doesn't // clobber our stack, and also so that we can restore // the SP when the call finishes. Reusing m->sched.sp // for this purpose depends on the fact that there is only // one possible gosave of m->sched. - MOVQ SP, (m_sched+gobuf_sp)(m) + get_tls(CX) + MOVQ DX, g(CX) + MOVQ m(CX), CX + MOVQ SP, (m_sched+gobuf_sp)(CX) // Set new SP, call fn MOVQ AX, SP CALL BX - // Restore old SP, return - MOVQ (m_sched+gobuf_sp)(m), SP + // Restore old g and SP, return + get_tls(CX) + MOVQ m(CX), DX + MOVQ m_g0(DX), BX + MOVQ BX, g(CX) + MOVQ (m_sched+gobuf_sp)(DX), SP RET // check that SP is in range [g->stackbase, g->stackguard) -TEXT stackcheck(SB), 7, $0 - CMPQ g_stackbase(g), SP +TEXT runtime·stackcheck(SB), 7, $0 + get_tls(CX) + MOVQ g(CX), AX + CMPQ g_stackbase(AX), SP JHI 2(PC) INT $3 - CMPQ SP, g_stackguard(g) + CMPQ SP, g_stackguard(AX) JHI 2(PC) INT $3 RET -TEXT ·memclr(SB),7,$0 +TEXT runtime·memclr(SB),7,$0 MOVQ 8(SP), DI // arg 1 addr MOVL 16(SP), CX // arg 2 count ADDL $7, CX @@ -363,20 +416,19 @@ TEXT ·memclr(SB),7,$0 STOSQ RET -TEXT ·getcallerpc+0(SB),7,$0 +TEXT runtime·getcallerpc(SB),7,$0 MOVQ x+0(FP),AX // addr of first arg MOVQ -8(AX),AX // get calling pc RET -TEXT ·setcallerpc+0(SB),7,$0 +TEXT runtime·setcallerpc(SB),7,$0 MOVQ x+0(FP),AX // addr of first arg MOVQ x+8(FP), BX MOVQ BX, -8(AX) // set calling pc RET -TEXT getcallersp(SB),7,$0 +TEXT runtime·getcallersp(SB),7,$0 MOVQ sp+0(FP), AX RET -GLOBL initcgo(SB), $8 -GLOBL libcgo_set_scheduler(SB), $8 +GLOBL runtime·tls0(SB), $64 diff --git a/src/pkg/runtime/amd64/closure.c b/src/pkg/runtime/amd64/closure.c index de2d1695f..5033468d2 100644 --- a/src/pkg/runtime/amd64/closure.c +++ b/src/pkg/runtime/amd64/closure.c @@ -9,20 +9,20 @@ // fn func(arg0, arg1, arg2 *ptr, callerpc uintptr, xxx) yyy, // arg0, arg1, arg2 *ptr) (func(xxx) yyy) void -·closure(int32 siz, byte *fn, byte *arg0) +runtime·closure(int32 siz, byte *fn, byte *arg0) { byte *p, *q, **ret; int32 i, n; int64 pcrel; if(siz < 0 || siz%8 != 0) - throw("bad closure size"); + runtime·throw("bad closure size"); ret = (byte**)((byte*)&arg0 + siz); if(siz > 100) { // TODO(rsc): implement stack growth preamble? - throw("closure too big"); + runtime·throw("closure too big"); } // compute size of new fn. @@ -40,12 +40,12 @@ void if(n%8) n += 8 - n%8; - p = mal(n); + p = runtime·mal(n); *ret = p; q = p + n - siz; if(siz > 0) { - mcpy(q, (byte*)&arg0, siz); + runtime·mcpy(q, (byte*)&arg0, siz); // SUBQ $siz, SP *p++ = 0x48; @@ -117,7 +117,7 @@ void *p++ = 0xc3; if(p > q) - throw("bad math in sys.closure"); + runtime·throw("bad math in sys.closure"); } diff --git a/src/pkg/runtime/amd64/memmove.s b/src/pkg/runtime/amd64/memmove.s index d755580dc..9966b0ba7 100644 --- a/src/pkg/runtime/amd64/memmove.s +++ b/src/pkg/runtime/amd64/memmove.s @@ -23,7 +23,7 @@ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. - TEXT memmove(SB), 7, $0 +TEXT runtime·memmove(SB), 7, $0 MOVQ to+0(FP), DI MOVQ fr+8(FP), SI diff --git a/src/pkg/runtime/amd64/traceback.c b/src/pkg/runtime/amd64/traceback.c index 20e9200e5..3ea80a661 100644 --- a/src/pkg/runtime/amd64/traceback.c +++ b/src/pkg/runtime/amd64/traceback.c @@ -6,6 +6,8 @@ #include "malloc.h" static uintptr isclosureentry(uintptr); +void runtime·deferproc(void); +void runtime·newproc(void); // This code is also used for the 386 tracebacks. // Use uintptr for an appropriate word-sized integer. @@ -19,7 +21,7 @@ gentraceback(byte *pc0, byte *sp, G *g, int32 skip, uintptr *pcbuf, int32 m) { byte *p; int32 i, n, iter, nascent; - uintptr pc, tracepc; + uintptr pc, tracepc, *fp; Stktop *stk; Func *f; @@ -33,7 +35,7 @@ gentraceback(byte *pc0, byte *sp, G *g, int32 skip, uintptr *pcbuf, int32 m) } nascent = 0; - if(pc0 == g->sched.pc && sp == g->sched.sp && pc0 == (byte*)goexit) { + if(pc0 == g->sched.pc && sp == g->sched.sp && pc0 == (byte*)runtime·goexit) { // Hasn't started yet. g->sched is set up for goexit // but goroutine will start at g->entry. nascent = 1; @@ -43,7 +45,7 @@ gentraceback(byte *pc0, byte *sp, G *g, int32 skip, uintptr *pcbuf, int32 m) n = 0; stk = (Stktop*)g->stackbase; for(iter = 0; iter < 100 && n < m; iter++) { // iter avoids looping forever - if(pc == (uintptr)·lessstack) { + if(pc == (uintptr)runtime·lessstack) { // Hit top of stack segment. Unwind to next segment. pc = (uintptr)stk->gobuf.pc; sp = stk->gobuf.sp; @@ -51,13 +53,14 @@ gentraceback(byte *pc0, byte *sp, G *g, int32 skip, uintptr *pcbuf, int32 m) continue; } - if(pc <= 0x1000 || (f = findfunc(pc)) == nil) { + if(pc <= 0x1000 || (f = runtime·findfunc(pc)) == nil) { // Dangerous, but worthwhile: see if this is a closure: // ADDQ $wwxxyyzz, SP; RET // [48] 81 c4 zz yy xx ww c3 // The 0x48 byte is only on amd64. p = (byte*)pc; - if(mheap.min < p && p+8 < mheap.max && // pointer in allocated memory + // We check p < p+8 to avoid wrapping and faulting if we lose track. + if(runtime·mheap.min < p && p < p+8 && p+8 < runtime·mheap.max && // pointer in allocated memory (sizeof(uintptr) != 8 || *p++ == 0x48) && // skip 0x48 byte on amd64 p[0] == 0x81 && p[1] == 0xc4 && p[6] == 0xc3) { sp += *(uint32*)(p+2); @@ -82,24 +85,29 @@ gentraceback(byte *pc0, byte *sp, G *g, int32 skip, uintptr *pcbuf, int32 m) // Print during crash. // main+0xf /home/rsc/go/src/runtime/x.go:23 // main(0x1, 0x2, 0x3) - printf("%S", f->name); + runtime·printf("%S", f->name); if(pc > f->entry) - printf("+%p", (uintptr)(pc - f->entry)); + runtime·printf("+%p", (uintptr)(pc - f->entry)); tracepc = pc; // back up to CALL instruction for funcline. if(n > 0 && pc > f->entry) tracepc--; - printf(" %S:%d\n", f->src, funcline(f, tracepc)); - printf("\t%S(", f->name); + runtime·printf(" %S:%d\n", f->src, runtime·funcline(f, tracepc)); + runtime·printf("\t%S(", f->name); + fp = (uintptr*)sp; + if(f->frame < sizeof(uintptr)) + fp++; + else + fp += f->frame/sizeof(uintptr); for(i = 0; i < f->args; i++) { if(i != 0) - prints(", "); - ·printhex(((uintptr*)sp)[i]); + runtime·prints(", "); + runtime·printhex(fp[i]); if(i >= 4) { - prints(", ..."); + runtime·prints(", ..."); break; } } - prints(")\n"); + runtime·prints(")\n"); n++; } @@ -115,24 +123,26 @@ gentraceback(byte *pc0, byte *sp, G *g, int32 skip, uintptr *pcbuf, int32 m) else sp += f->frame; pc = *((uintptr*)sp - 1); + if(f->entry == (uintptr)runtime·deferproc || f->entry == (uintptr)runtime·newproc) + sp += 2*sizeof(uintptr); } return n; } void -traceback(byte *pc0, byte *sp, byte*, G *g) +runtime·traceback(byte *pc0, byte *sp, byte*, G *g) { gentraceback(pc0, sp, g, 0, nil, 100); } int32 -callers(int32 skip, uintptr *pcbuf, int32 m) +runtime·callers(int32 skip, uintptr *pcbuf, int32 m) { byte *pc, *sp; // our caller's pc, sp. sp = (byte*)&skip; - pc = ·getcallerpc(&skip); + pc = runtime·getcallerpc(&skip); return gentraceback(pc, sp, g, skip, pcbuf, m); } @@ -144,7 +154,7 @@ isclosureentry(uintptr pc) int32 i, siz; p = (byte*)pc; - if(p < mheap.min || p+32 > mheap.max) + if(p < runtime·mheap.min || p+32 > runtime·mheap.max) return 0; // SUBQ $siz, SP |