summaryrefslogtreecommitdiff
path: root/src/pkg/runtime/386
diff options
context:
space:
mode:
Diffstat (limited to 'src/pkg/runtime/386')
-rw-r--r--src/pkg/runtime/386/arch.h3
-rw-r--r--src/pkg/runtime/386/asm.s549
-rw-r--r--src/pkg/runtime/386/atomic.c19
-rw-r--r--src/pkg/runtime/386/closure.c105
-rw-r--r--src/pkg/runtime/386/memmove.s86
-rw-r--r--src/pkg/runtime/386/vlop.s48
-rw-r--r--src/pkg/runtime/386/vlrt.c815
7 files changed, 1625 insertions, 0 deletions
diff --git a/src/pkg/runtime/386/arch.h b/src/pkg/runtime/386/arch.h
new file mode 100644
index 000000000..d95c7aa81
--- /dev/null
+++ b/src/pkg/runtime/386/arch.h
@@ -0,0 +1,3 @@
+enum {
+ thechar = '8'
+};
diff --git a/src/pkg/runtime/386/asm.s b/src/pkg/runtime/386/asm.s
new file mode 100644
index 000000000..a14518839
--- /dev/null
+++ b/src/pkg/runtime/386/asm.s
@@ -0,0 +1,549 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "386/asm.h"
+
+TEXT _rt0_386(SB),7,$0
+ // Linux, Windows start the FPU in extended double precision.
+ // Other operating systems use double precision.
+ // Change to double precision to match them,
+ // and to match other hardware that only has double.
+ PUSHL $0x27F
+ FLDCW 0(SP)
+ POPL AX
+
+ // copy arguments forward on an even stack
+ MOVL 0(SP), AX // argc
+ LEAL 4(SP), BX // argv
+ SUBL $128, SP // plenty of scratch
+ ANDL $~15, SP
+ MOVL AX, 120(SP) // save argc, argv away
+ MOVL BX, 124(SP)
+
+ // if there is an initcgo, call it to let it
+ // initialize and to set up GS. if not,
+ // we set up GS ourselves.
+ MOVL initcgo(SB), AX
+ TESTL AX, AX
+ JZ 4(PC)
+ CALL AX
+ // skip runtime·ldt0setup(SB) and tls test after initcgo for non-windows
+ CMPL runtime·iswindows(SB), $0
+ JEQ ok
+
+ // skip runtime·ldt0setup(SB) and tls test on Plan 9 in all cases
+ CMPL runtime·isplan9(SB), $1
+ JEQ ok
+
+ // set up %gs
+ CALL runtime·ldt0setup(SB)
+
+ // store through it, to make sure it works
+ get_tls(BX)
+ MOVL $0x123, g(BX)
+ MOVL runtime·tls0(SB), AX
+ CMPL AX, $0x123
+ JEQ ok
+ MOVL AX, 0 // abort
+ok:
+ // set up m and g "registers"
+ get_tls(BX)
+ LEAL runtime·g0(SB), CX
+ MOVL CX, g(BX)
+ LEAL runtime·m0(SB), AX
+ MOVL AX, m(BX)
+
+ // save m->g0 = g0
+ MOVL CX, m_g0(AX)
+
+ // create istack out of the OS stack
+ LEAL (-64*1024+104)(SP), AX // TODO: 104?
+ MOVL AX, g_stackguard(CX)
+ MOVL SP, g_stackbase(CX)
+ CALL runtime·emptyfunc(SB) // fault if stack check is wrong
+
+ // convention is D is always cleared
+ CLD
+
+ CALL runtime·check(SB)
+
+ // saved argc, argv
+ MOVL 120(SP), AX
+ MOVL AX, 0(SP)
+ MOVL 124(SP), AX
+ MOVL AX, 4(SP)
+ CALL runtime·args(SB)
+ CALL runtime·osinit(SB)
+ CALL runtime·schedinit(SB)
+
+ // create a new goroutine to start program
+ PUSHL $runtime·mainstart(SB) // entry
+ PUSHL $0 // arg size
+ CALL runtime·newproc(SB)
+ POPL AX
+ POPL AX
+
+ // start this M
+ CALL runtime·mstart(SB)
+
+ INT $3
+ RET
+
+TEXT runtime·mainstart(SB),7,$0
+ CALL main·init(SB)
+ CALL runtime·initdone(SB)
+ CALL main·main(SB)
+ PUSHL $0
+ CALL runtime·exit(SB)
+ POPL AX
+ INT $3
+ RET
+
+TEXT runtime·breakpoint(SB),7,$0
+ INT $3
+ RET
+
+/*
+ * go-routine
+ */
+
+// void gosave(Gobuf*)
+// save state in Gobuf; setjmp
+TEXT runtime·gosave(SB), 7, $0
+ MOVL 4(SP), AX // gobuf
+ LEAL 4(SP), BX // caller's SP
+ MOVL BX, gobuf_sp(AX)
+ MOVL 0(SP), BX // caller's PC
+ MOVL BX, gobuf_pc(AX)
+ get_tls(CX)
+ MOVL g(CX), BX
+ MOVL BX, gobuf_g(AX)
+ RET
+
+// void gogo(Gobuf*, uintptr)
+// restore state from Gobuf; longjmp
+TEXT runtime·gogo(SB), 7, $0
+ MOVL 8(SP), AX // return 2nd arg
+ MOVL 4(SP), BX // gobuf
+ MOVL gobuf_g(BX), DX
+ MOVL 0(DX), CX // make sure g != nil
+ get_tls(CX)
+ MOVL DX, g(CX)
+ MOVL gobuf_sp(BX), SP // restore SP
+ MOVL gobuf_pc(BX), BX
+ JMP BX
+
+// void gogocall(Gobuf*, void (*fn)(void))
+// restore state from Gobuf but then call fn.
+// (call fn, returning to state in Gobuf)
+TEXT runtime·gogocall(SB), 7, $0
+ MOVL 8(SP), AX // fn
+ MOVL 4(SP), BX // gobuf
+ MOVL gobuf_g(BX), DX
+ get_tls(CX)
+ MOVL DX, g(CX)
+ MOVL 0(DX), CX // make sure g != nil
+ MOVL gobuf_sp(BX), SP // restore SP
+ MOVL gobuf_pc(BX), BX
+ PUSHL BX
+ JMP AX
+ POPL BX // not reached
+
+// void mcall(void (*fn)(G*))
+// Switch to m->g0's stack, call fn(g).
+// Fn must never return. It should gogo(&g->sched)
+// to keep running g.
+TEXT runtime·mcall(SB), 7, $0
+ MOVL fn+0(FP), DI
+
+ get_tls(CX)
+ MOVL g(CX), AX // save state in g->gobuf
+ MOVL 0(SP), BX // caller's PC
+ MOVL BX, (g_sched+gobuf_pc)(AX)
+ LEAL 4(SP), BX // caller's SP
+ MOVL BX, (g_sched+gobuf_sp)(AX)
+ MOVL AX, (g_sched+gobuf_g)(AX)
+
+ // switch to m->g0 & its stack, call fn
+ MOVL m(CX), BX
+ MOVL m_g0(BX), SI
+ CMPL SI, AX // if g == m->g0 call badmcall
+ JNE 2(PC)
+ CALL runtime·badmcall(SB)
+ MOVL SI, g(CX) // g = m->g0
+ MOVL (g_sched+gobuf_sp)(SI), SP // sp = m->g0->gobuf.sp
+ PUSHL AX
+ CALL DI
+ POPL AX
+ CALL runtime·badmcall2(SB)
+ RET
+
+/*
+ * support for morestack
+ */
+
+// Called during function prolog when more stack is needed.
+TEXT runtime·morestack(SB),7,$0
+ // Cannot grow scheduler stack (m->g0).
+ get_tls(CX)
+ MOVL m(CX), BX
+ MOVL m_g0(BX), SI
+ CMPL g(CX), SI
+ JNE 2(PC)
+ INT $3
+
+ // frame size in DX
+ // arg size in AX
+ // Save in m.
+ MOVL DX, m_moreframesize(BX)
+ MOVL AX, m_moreargsize(BX)
+
+ // Called from f.
+ // Set m->morebuf to f's caller.
+ MOVL 4(SP), DI // f's caller's PC
+ MOVL DI, (m_morebuf+gobuf_pc)(BX)
+ LEAL 8(SP), CX // f's caller's SP
+ MOVL CX, (m_morebuf+gobuf_sp)(BX)
+ MOVL CX, m_moreargp(BX)
+ get_tls(CX)
+ MOVL g(CX), SI
+ MOVL SI, (m_morebuf+gobuf_g)(BX)
+
+ // Set m->morepc to f's PC.
+ MOVL 0(SP), AX
+ MOVL AX, m_morepc(BX)
+
+ // Call newstack on m->g0's stack.
+ MOVL m_g0(BX), BP
+ MOVL BP, g(CX)
+ MOVL (g_sched+gobuf_sp)(BP), AX
+ MOVL -4(AX), BX // fault if CALL would, before smashing SP
+ MOVL AX, SP
+ CALL runtime·newstack(SB)
+ MOVL $0, 0x1003 // crash if newstack returns
+ RET
+
+// Called from reflection library. Mimics morestack,
+// reuses stack growth code to create a frame
+// with the desired args running the desired function.
+//
+// func call(fn *byte, arg *byte, argsize uint32).
+TEXT reflect·call(SB), 7, $0
+ get_tls(CX)
+ MOVL m(CX), BX
+
+ // Save our caller's state as the PC and SP to
+ // restore when returning from f.
+ MOVL 0(SP), AX // our caller's PC
+ MOVL AX, (m_morebuf+gobuf_pc)(BX)
+ LEAL 4(SP), AX // our caller's SP
+ MOVL AX, (m_morebuf+gobuf_sp)(BX)
+ MOVL g(CX), AX
+ MOVL AX, (m_morebuf+gobuf_g)(BX)
+
+ // Set up morestack arguments to call f on a new stack.
+ // We set f's frame size to 1, as a hint to newstack
+ // that this is a call from reflect·call.
+ // If it turns out that f needs a larger frame than
+ // the default stack, f's usual stack growth prolog will
+ // allocate a new segment (and recopy the arguments).
+ MOVL 4(SP), AX // fn
+ MOVL 8(SP), DX // arg frame
+ MOVL 12(SP), CX // arg size
+
+ MOVL AX, m_morepc(BX) // f's PC
+ MOVL DX, m_moreargp(BX) // f's argument pointer
+ MOVL CX, m_moreargsize(BX) // f's argument size
+ MOVL $1, m_moreframesize(BX) // f's frame size
+
+ // Call newstack on m->g0's stack.
+ MOVL m_g0(BX), BP
+ get_tls(CX)
+ MOVL BP, g(CX)
+ MOVL (g_sched+gobuf_sp)(BP), SP
+ CALL runtime·newstack(SB)
+ MOVL $0, 0x1103 // crash if newstack returns
+ RET
+
+
+// Return point when leaving stack.
+TEXT runtime·lessstack(SB), 7, $0
+ // Save return value in m->cret
+ get_tls(CX)
+ MOVL m(CX), BX
+ MOVL AX, m_cret(BX)
+
+ // Call oldstack on m->g0's stack.
+ MOVL m_g0(BX), BP
+ MOVL BP, g(CX)
+ MOVL (g_sched+gobuf_sp)(BP), SP
+ CALL runtime·oldstack(SB)
+ MOVL $0, 0x1004 // crash if oldstack returns
+ RET
+
+
+// bool cas(int32 *val, int32 old, int32 new)
+// Atomically:
+// if(*val == old){
+// *val = new;
+// return 1;
+// }else
+// return 0;
+TEXT runtime·cas(SB), 7, $0
+ MOVL 4(SP), BX
+ MOVL 8(SP), AX
+ MOVL 12(SP), CX
+ LOCK
+ CMPXCHGL CX, 0(BX)
+ JZ 3(PC)
+ MOVL $0, AX
+ RET
+ MOVL $1, AX
+ RET
+
+// bool casp(void **p, void *old, void *new)
+// Atomically:
+// if(*p == old){
+// *p = new;
+// return 1;
+// }else
+// return 0;
+TEXT runtime·casp(SB), 7, $0
+ MOVL 4(SP), BX
+ MOVL 8(SP), AX
+ MOVL 12(SP), CX
+ LOCK
+ CMPXCHGL CX, 0(BX)
+ JZ 3(PC)
+ MOVL $0, AX
+ RET
+ MOVL $1, AX
+ RET
+
+// uint32 xadd(uint32 volatile *val, int32 delta)
+// Atomically:
+// *val += delta;
+// return *val;
+TEXT runtime·xadd(SB), 7, $0
+ MOVL 4(SP), BX
+ MOVL 8(SP), AX
+ MOVL AX, CX
+ LOCK
+ XADDL AX, 0(BX)
+ ADDL CX, AX
+ RET
+
+TEXT runtime·xchg(SB), 7, $0
+ MOVL 4(SP), BX
+ MOVL 8(SP), AX
+ XCHGL AX, 0(BX)
+ RET
+
+TEXT runtime·procyield(SB),7,$0
+ MOVL 4(SP), AX
+again:
+ PAUSE
+ SUBL $1, AX
+ JNZ again
+ RET
+
+TEXT runtime·atomicstorep(SB), 7, $0
+ MOVL 4(SP), BX
+ MOVL 8(SP), AX
+ XCHGL AX, 0(BX)
+ RET
+
+TEXT runtime·atomicstore(SB), 7, $0
+ MOVL 4(SP), BX
+ MOVL 8(SP), AX
+ XCHGL AX, 0(BX)
+ RET
+
+// void jmpdefer(fn, sp);
+// called from deferreturn.
+// 1. pop the caller
+// 2. sub 5 bytes from the callers return
+// 3. jmp to the argument
+TEXT runtime·jmpdefer(SB), 7, $0
+ MOVL 4(SP), AX // fn
+ MOVL 8(SP), BX // caller sp
+ LEAL -4(BX), SP // caller sp after CALL
+ SUBL $5, (SP) // return to CALL again
+ JMP AX // but first run the deferred function
+
+// Dummy function to use in saved gobuf.PC,
+// to match SP pointing at a return address.
+// The gobuf.PC is unused by the contortions here
+// but setting it to return will make the traceback code work.
+TEXT return<>(SB),7,$0
+ RET
+
+// asmcgocall(void(*fn)(void*), void *arg)
+// Call fn(arg) on the scheduler stack,
+// aligned appropriately for the gcc ABI.
+// See cgocall.c for more details.
+TEXT runtime·asmcgocall(SB),7,$0
+ MOVL fn+0(FP), AX
+ MOVL arg+4(FP), BX
+ MOVL SP, DX
+
+ // Figure out if we need to switch to m->g0 stack.
+ // We get called to create new OS threads too, and those
+ // come in on the m->g0 stack already.
+ get_tls(CX)
+ MOVL m(CX), BP
+ MOVL m_g0(BP), SI
+ MOVL g(CX), DI
+ CMPL SI, DI
+ JEQ 6(PC)
+ MOVL SP, (g_sched+gobuf_sp)(DI)
+ MOVL $return<>(SB), (g_sched+gobuf_pc)(DI)
+ MOVL DI, (g_sched+gobuf_g)(DI)
+ MOVL SI, g(CX)
+ MOVL (g_sched+gobuf_sp)(SI), SP
+
+ // Now on a scheduling stack (a pthread-created stack).
+ SUBL $32, SP
+ ANDL $~15, SP // alignment, perhaps unnecessary
+ MOVL DI, 8(SP) // save g
+ MOVL DX, 4(SP) // save SP
+ MOVL BX, 0(SP) // first argument in x86-32 ABI
+ CALL AX
+
+ // Restore registers, g, stack pointer.
+ get_tls(CX)
+ MOVL 8(SP), DI
+ MOVL DI, g(CX)
+ MOVL 4(SP), SP
+ RET
+
+// cgocallback(void (*fn)(void*), void *frame, uintptr framesize)
+// See cgocall.c for more details.
+TEXT runtime·cgocallback(SB),7,$12
+ MOVL fn+0(FP), AX
+ MOVL frame+4(FP), BX
+ MOVL framesize+8(FP), DX
+
+ // Save current m->g0->sched.sp on stack and then set it to SP.
+ get_tls(CX)
+ MOVL m(CX), BP
+ MOVL m_g0(BP), SI
+ PUSHL (g_sched+gobuf_sp)(SI)
+ MOVL SP, (g_sched+gobuf_sp)(SI)
+
+ // Switch to m->curg stack and call runtime.cgocallback
+ // with the three arguments. Because we are taking over
+ // the execution of m->curg but *not* resuming what had
+ // been running, we need to save that information (m->curg->gobuf)
+ // so that we can restore it when we're done.
+ // We can restore m->curg->gobuf.sp easily, because calling
+ // runtime.cgocallback leaves SP unchanged upon return.
+ // To save m->curg->gobuf.pc, we push it onto the stack.
+ // This has the added benefit that it looks to the traceback
+ // routine like cgocallback is going to return to that
+ // PC (because we defined cgocallback to have
+ // a frame size of 12, the same amount that we use below),
+ // so that the traceback will seamlessly trace back into
+ // the earlier calls.
+ MOVL m_curg(BP), SI
+ MOVL SI, g(CX)
+ MOVL (g_sched+gobuf_sp)(SI), DI // prepare stack as DI
+
+ // Push gobuf.pc
+ MOVL (g_sched+gobuf_pc)(SI), BP
+ SUBL $4, DI
+ MOVL BP, 0(DI)
+
+ // Push arguments to cgocallbackg.
+ // Frame size here must match the frame size above
+ // to trick traceback routines into doing the right thing.
+ SUBL $12, DI
+ MOVL AX, 0(DI)
+ MOVL BX, 4(DI)
+ MOVL DX, 8(DI)
+
+ // Switch stack and make the call.
+ MOVL DI, SP
+ CALL runtime·cgocallbackg(SB)
+
+ // Restore g->gobuf (== m->curg->gobuf) from saved values.
+ get_tls(CX)
+ MOVL g(CX), SI
+ MOVL 12(SP), BP
+ MOVL BP, (g_sched+gobuf_pc)(SI)
+ LEAL (12+4)(SP), DI
+ MOVL DI, (g_sched+gobuf_sp)(SI)
+
+ // Switch back to m->g0's stack and restore m->g0->sched.sp.
+ // (Unlike m->curg, the g0 goroutine never uses sched.pc,
+ // so we do not have to restore it.)
+ MOVL m(CX), BP
+ MOVL m_g0(BP), SI
+ MOVL SI, g(CX)
+ MOVL (g_sched+gobuf_sp)(SI), SP
+ POPL (g_sched+gobuf_sp)(SI)
+
+ // Done!
+ RET
+
+// check that SP is in range [g->stackbase, g->stackguard)
+TEXT runtime·stackcheck(SB), 7, $0
+ get_tls(CX)
+ MOVL g(CX), AX
+ CMPL g_stackbase(AX), SP
+ JHI 2(PC)
+ INT $3
+ CMPL SP, g_stackguard(AX)
+ JHI 2(PC)
+ INT $3
+ RET
+
+TEXT runtime·memclr(SB),7,$0
+ MOVL 4(SP), DI // arg 1 addr
+ MOVL 8(SP), CX // arg 2 count
+ MOVL CX, BX
+ ANDL $3, BX
+ SHRL $2, CX
+ MOVL $0, AX
+ CLD
+ REP
+ STOSL
+ MOVL BX, CX
+ REP
+ STOSB
+ RET
+
+TEXT runtime·getcallerpc(SB),7,$0
+ MOVL x+0(FP),AX // addr of first arg
+ MOVL -4(AX),AX // get calling pc
+ RET
+
+TEXT runtime·setcallerpc(SB),7,$0
+ MOVL x+0(FP),AX // addr of first arg
+ MOVL x+4(FP), BX
+ MOVL BX, -4(AX) // set calling pc
+ RET
+
+TEXT runtime·getcallersp(SB), 7, $0
+ MOVL sp+0(FP), AX
+ RET
+
+TEXT runtime·ldt0setup(SB),7,$16
+ // set up ldt 7 to point at tls0
+ // ldt 1 would be fine on Linux, but on OS X, 7 is as low as we can go.
+ // the entry number is just a hint. setldt will set up GS with what it used.
+ MOVL $7, 0(SP)
+ LEAL runtime·tls0(SB), AX
+ MOVL AX, 4(SP)
+ MOVL $32, 8(SP) // sizeof(tls array)
+ CALL runtime·setldt(SB)
+ RET
+
+TEXT runtime·emptyfunc(SB),0,$0
+ RET
+
+TEXT runtime·abort(SB),7,$0
+ INT $0x3
+
+GLOBL runtime·tls0(SB), $32
diff --git a/src/pkg/runtime/386/atomic.c b/src/pkg/runtime/386/atomic.c
new file mode 100644
index 000000000..a4f2a114f
--- /dev/null
+++ b/src/pkg/runtime/386/atomic.c
@@ -0,0 +1,19 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "runtime.h"
+
+#pragma textflag 7
+uint32
+runtime·atomicload(uint32 volatile* addr)
+{
+ return *addr;
+}
+
+#pragma textflag 7
+void*
+runtime·atomicloadp(void* volatile* addr)
+{
+ return *addr;
+}
diff --git a/src/pkg/runtime/386/closure.c b/src/pkg/runtime/386/closure.c
new file mode 100644
index 000000000..b4d867711
--- /dev/null
+++ b/src/pkg/runtime/386/closure.c
@@ -0,0 +1,105 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "runtime.h"
+
+#pragma textflag 7
+// func closure(siz int32,
+// fn func(arg0, arg1, arg2 *ptr, callerpc uintptr, xxx) yyy,
+// arg0, arg1, arg2 *ptr) (func(xxx) yyy)
+void
+runtime·closure(int32 siz, byte *fn, byte *arg0)
+{
+ byte *p, *q, **ret;
+ int32 i, n;
+ int32 pcrel;
+
+ if(siz < 0 || siz%4 != 0)
+ runtime·throw("bad closure size");
+
+ ret = (byte**)((byte*)&arg0 + siz);
+
+ if(siz > 100) {
+ // TODO(rsc): implement stack growth preamble?
+ runtime·throw("closure too big");
+ }
+
+ // compute size of new fn.
+ // must match code laid out below.
+ n = 6+5+2+1; // SUBL MOVL MOVL CLD
+ if(siz <= 4*4)
+ n += 1*siz/4; // MOVSL MOVSL...
+ else
+ n += 6+2; // MOVL REP MOVSL
+ n += 5; // CALL
+ n += 6+1; // ADDL RET
+
+ // store args aligned after code, so gc can find them.
+ n += siz;
+ if(n%4)
+ n += 4 - n%4;
+
+ p = runtime·mal(n);
+ *ret = p;
+ q = p + n - siz;
+
+ if(siz > 0) {
+ runtime·memmove(q, (byte*)&arg0, siz);
+
+ // SUBL $siz, SP
+ *p++ = 0x81;
+ *p++ = 0xec;
+ *(uint32*)p = siz;
+ p += 4;
+
+ // MOVL $q, SI
+ *p++ = 0xbe;
+ *(byte**)p = q;
+ p += 4;
+
+ // MOVL SP, DI
+ *p++ = 0x89;
+ *p++ = 0xe7;
+
+ // CLD
+ *p++ = 0xfc;
+
+ if(siz <= 4*4) {
+ for(i=0; i<siz; i+=4) {
+ // MOVSL
+ *p++ = 0xa5;
+ }
+ } else {
+ // MOVL $(siz/4), CX [32-bit immediate siz/4]
+ *p++ = 0xc7;
+ *p++ = 0xc1;
+ *(uint32*)p = siz/4;
+ p += 4;
+
+ // REP; MOVSL
+ *p++ = 0xf3;
+ *p++ = 0xa5;
+ }
+ }
+
+ // call fn
+ pcrel = fn - (p+5);
+ // direct call with pc-relative offset
+ // CALL fn
+ *p++ = 0xe8;
+ *(int32*)p = pcrel;
+ p += 4;
+
+ // ADDL $siz, SP
+ *p++ = 0x81;
+ *p++ = 0xc4;
+ *(uint32*)p = siz;
+ p += 4;
+
+ // RET
+ *p++ = 0xc3;
+
+ if(p > q)
+ runtime·throw("bad math in sys.closure");
+}
diff --git a/src/pkg/runtime/386/memmove.s b/src/pkg/runtime/386/memmove.s
new file mode 100644
index 000000000..203a8187c
--- /dev/null
+++ b/src/pkg/runtime/386/memmove.s
@@ -0,0 +1,86 @@
+// Inferno's libkern/memmove-386.s
+// http://code.google.com/p/inferno-os/source/browse/libkern/memmove-386.s
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Revisions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com). All rights reserved.
+// Portions Copyright 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+TEXT runtime·memmove(SB), 7, $0
+ MOVL to+0(FP), DI
+ MOVL fr+4(FP), SI
+ MOVL n+8(FP), BX
+/*
+ * check and set for backwards
+ */
+ CMPL SI, DI
+ JLS back
+
+/*
+ * forward copy loop
+ */
+forward:
+ MOVL BX, CX
+ SHRL $2, CX
+ ANDL $3, BX
+
+ REP; MOVSL
+ MOVL BX, CX
+ REP; MOVSB
+
+ MOVL to+0(FP),AX
+ RET
+/*
+ * check overlap
+ */
+back:
+ MOVL SI, CX
+ ADDL BX, CX
+ CMPL CX, DI
+ JLS forward
+/*
+ * whole thing backwards has
+ * adjusted addresses
+ */
+
+ ADDL BX, DI
+ ADDL BX, SI
+ STD
+
+/*
+ * copy
+ */
+ MOVL BX, CX
+ SHRL $2, CX
+ ANDL $3, BX
+
+ SUBL $4, DI
+ SUBL $4, SI
+ REP; MOVSL
+
+ ADDL $3, DI
+ ADDL $3, SI
+ MOVL BX, CX
+ REP; MOVSB
+
+ CLD
+ MOVL to+0(FP),AX
+ RET
+
diff --git a/src/pkg/runtime/386/vlop.s b/src/pkg/runtime/386/vlop.s
new file mode 100644
index 000000000..28f6da82d
--- /dev/null
+++ b/src/pkg/runtime/386/vlop.s
@@ -0,0 +1,48 @@
+// Inferno's libkern/vlop-386.s
+// http://code.google.com/p/inferno-os/source/browse/libkern/vlop-386.s
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Revisions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com). All rights reserved.
+// Portions Copyright 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+/*
+ * C runtime for 64-bit divide.
+ */
+
+TEXT _mul64by32(SB), 7, $0
+ MOVL r+0(FP), CX
+ MOVL a+4(FP), AX
+ MULL b+12(FP)
+ MOVL AX, 0(CX)
+ MOVL DX, BX
+ MOVL a+8(FP), AX
+ MULL b+12(FP)
+ ADDL AX, BX
+ MOVL BX, 4(CX)
+ RET
+
+TEXT _div64by32(SB), 7, $0
+ MOVL r+12(FP), CX
+ MOVL a+0(FP), AX
+ MOVL a+4(FP), DX
+ DIVL b+8(FP)
+ MOVL DX, 0(CX)
+ RET
diff --git a/src/pkg/runtime/386/vlrt.c b/src/pkg/runtime/386/vlrt.c
new file mode 100644
index 000000000..1631dbe10
--- /dev/null
+++ b/src/pkg/runtime/386/vlrt.c
@@ -0,0 +1,815 @@
+// Inferno's libkern/vlrt-386.c
+// http://code.google.com/p/inferno-os/source/browse/libkern/vlrt-386.c
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Revisions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com). All rights reserved.
+// Portions Copyright 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+/*
+ * C runtime for 64-bit divide, others.
+ *
+ * TODO(rsc): The simple functions are dregs--8c knows how
+ * to generate the code directly now. Find and remove.
+ */
+
+typedef unsigned long ulong;
+typedef unsigned int uint;
+typedef unsigned short ushort;
+typedef unsigned char uchar;
+typedef signed char schar;
+
+#define SIGN(n) (1UL<<(n-1))
+
+typedef struct Vlong Vlong;
+struct Vlong
+{
+ union
+ {
+ long long v;
+ struct
+ {
+ ulong lo;
+ ulong hi;
+ };
+ struct
+ {
+ ushort lols;
+ ushort loms;
+ ushort hils;
+ ushort hims;
+ };
+ };
+};
+
+void runtime·abort(void);
+
+void
+_d2v(Vlong *y, double d)
+{
+ union { double d; struct Vlong; } x;
+ ulong xhi, xlo, ylo, yhi;
+ int sh;
+
+ x.d = d;
+
+ xhi = (x.hi & 0xfffff) | 0x100000;
+ xlo = x.lo;
+ sh = 1075 - ((x.hi >> 20) & 0x7ff);
+
+ ylo = 0;
+ yhi = 0;
+ if(sh >= 0) {
+ /* v = (hi||lo) >> sh */
+ if(sh < 32) {
+ if(sh == 0) {
+ ylo = xlo;
+ yhi = xhi;
+ } else {
+ ylo = (xlo >> sh) | (xhi << (32-sh));
+ yhi = xhi >> sh;
+ }
+ } else {
+ if(sh == 32) {
+ ylo = xhi;
+ } else
+ if(sh < 64) {
+ ylo = xhi >> (sh-32);
+ }
+ }
+ } else {
+ /* v = (hi||lo) << -sh */
+ sh = -sh;
+ if(sh <= 10) {
+ ylo = xlo << sh;
+ yhi = (xhi << sh) | (xlo >> (32-sh));
+ } else {
+ /* overflow */
+ yhi = d; /* causes something awful */
+ }
+ }
+ if(x.hi & SIGN(32)) {
+ if(ylo != 0) {
+ ylo = -ylo;
+ yhi = ~yhi;
+ } else
+ yhi = -yhi;
+ }
+
+ y->hi = yhi;
+ y->lo = ylo;
+}
+
+void
+_f2v(Vlong *y, float f)
+{
+
+ _d2v(y, f);
+}
+
+double
+_v2d(Vlong x)
+{
+ if(x.hi & SIGN(32)) {
+ if(x.lo) {
+ x.lo = -x.lo;
+ x.hi = ~x.hi;
+ } else
+ x.hi = -x.hi;
+ return -((long)x.hi*4294967296. + x.lo);
+ }
+ return (long)x.hi*4294967296. + x.lo;
+}
+
+float
+_v2f(Vlong x)
+{
+ return _v2d(x);
+}
+
+ulong _div64by32(Vlong, ulong, ulong*);
+void _mul64by32(Vlong*, Vlong, ulong);
+
+static void
+slowdodiv(Vlong num, Vlong den, Vlong *q, Vlong *r)
+{
+ ulong numlo, numhi, denhi, denlo, quohi, quolo, t;
+ int i;
+
+ numhi = num.hi;
+ numlo = num.lo;
+ denhi = den.hi;
+ denlo = den.lo;
+
+ /*
+ * get a divide by zero
+ */
+ if(denlo==0 && denhi==0) {
+ numlo = numlo / denlo;
+ }
+
+ /*
+ * set up the divisor and find the number of iterations needed
+ */
+ if(numhi >= SIGN(32)) {
+ quohi = SIGN(32);
+ quolo = 0;
+ } else {
+ quohi = numhi;
+ quolo = numlo;
+ }
+ i = 0;
+ while(denhi < quohi || (denhi == quohi && denlo < quolo)) {
+ denhi = (denhi<<1) | (denlo>>31);
+ denlo <<= 1;
+ i++;
+ }
+
+ quohi = 0;
+ quolo = 0;
+ for(; i >= 0; i--) {
+ quohi = (quohi<<1) | (quolo>>31);
+ quolo <<= 1;
+ if(numhi > denhi || (numhi == denhi && numlo >= denlo)) {
+ t = numlo;
+ numlo -= denlo;
+ if(numlo > t)
+ numhi--;
+ numhi -= denhi;
+ quolo |= 1;
+ }
+ denlo = (denlo>>1) | (denhi<<31);
+ denhi >>= 1;
+ }
+
+ if(q) {
+ q->lo = quolo;
+ q->hi = quohi;
+ }
+ if(r) {
+ r->lo = numlo;
+ r->hi = numhi;
+ }
+}
+
+static void
+dodiv(Vlong num, Vlong den, Vlong *qp, Vlong *rp)
+{
+ ulong n;
+ Vlong x, q, r;
+
+ if(den.hi > num.hi || (den.hi == num.hi && den.lo > num.lo)){
+ if(qp) {
+ qp->hi = 0;
+ qp->lo = 0;
+ }
+ if(rp) {
+ rp->hi = num.hi;
+ rp->lo = num.lo;
+ }
+ return;
+ }
+
+ if(den.hi != 0){
+ q.hi = 0;
+ n = num.hi/den.hi;
+ _mul64by32(&x, den, n);
+ if(x.hi > num.hi || (x.hi == num.hi && x.lo > num.lo))
+ slowdodiv(num, den, &q, &r);
+ else {
+ q.lo = n;
+ r.v = num.v - x.v;
+ }
+ } else {
+ if(num.hi >= den.lo){
+ q.hi = n = num.hi/den.lo;
+ num.hi -= den.lo*n;
+ } else {
+ q.hi = 0;
+ }
+ q.lo = _div64by32(num, den.lo, &r.lo);
+ r.hi = 0;
+ }
+ if(qp) {
+ qp->lo = q.lo;
+ qp->hi = q.hi;
+ }
+ if(rp) {
+ rp->lo = r.lo;
+ rp->hi = r.hi;
+ }
+}
+
+void
+_divvu(Vlong *q, Vlong n, Vlong d)
+{
+
+ if(n.hi == 0 && d.hi == 0) {
+ q->hi = 0;
+ q->lo = n.lo / d.lo;
+ return;
+ }
+ dodiv(n, d, q, 0);
+}
+
+void
+runtime·uint64div(Vlong n, Vlong d, Vlong q)
+{
+ _divvu(&q, n, d);
+}
+
+void
+_modvu(Vlong *r, Vlong n, Vlong d)
+{
+
+ if(n.hi == 0 && d.hi == 0) {
+ r->hi = 0;
+ r->lo = n.lo % d.lo;
+ return;
+ }
+ dodiv(n, d, 0, r);
+}
+
+void
+runtime·uint64mod(Vlong n, Vlong d, Vlong q)
+{
+ _modvu(&q, n, d);
+}
+
+static void
+vneg(Vlong *v)
+{
+
+ if(v->lo == 0) {
+ v->hi = -v->hi;
+ return;
+ }
+ v->lo = -v->lo;
+ v->hi = ~v->hi;
+}
+
+void
+_divv(Vlong *q, Vlong n, Vlong d)
+{
+ long nneg, dneg;
+
+ if(n.hi == (((long)n.lo)>>31) && d.hi == (((long)d.lo)>>31)) {
+ if((long)n.lo == -0x80000000 && (long)d.lo == -1) {
+ // special case: 32-bit -0x80000000 / -1 causes divide error,
+ // but it's okay in this 64-bit context.
+ q->lo = 0x80000000;
+ q->hi = 0;
+ return;
+ }
+ q->lo = (long)n.lo / (long)d.lo;
+ q->hi = ((long)q->lo) >> 31;
+ return;
+ }
+ nneg = n.hi >> 31;
+ if(nneg)
+ vneg(&n);
+ dneg = d.hi >> 31;
+ if(dneg)
+ vneg(&d);
+ dodiv(n, d, q, 0);
+ if(nneg != dneg)
+ vneg(q);
+}
+
+void
+runtime·int64div(Vlong n, Vlong d, Vlong q)
+{
+ _divv(&q, n, d);
+}
+
+void
+_modv(Vlong *r, Vlong n, Vlong d)
+{
+ long nneg, dneg;
+
+ if(n.hi == (((long)n.lo)>>31) && d.hi == (((long)d.lo)>>31)) {
+ if((long)n.lo == -0x80000000 && (long)d.lo == -1) {
+ // special case: 32-bit -0x80000000 % -1 causes divide error,
+ // but it's okay in this 64-bit context.
+ r->lo = 0;
+ r->hi = 0;
+ return;
+ }
+ r->lo = (long)n.lo % (long)d.lo;
+ r->hi = ((long)r->lo) >> 31;
+ return;
+ }
+ nneg = n.hi >> 31;
+ if(nneg)
+ vneg(&n);
+ dneg = d.hi >> 31;
+ if(dneg)
+ vneg(&d);
+ dodiv(n, d, 0, r);
+ if(nneg)
+ vneg(r);
+}
+
+void
+runtime·int64mod(Vlong n, Vlong d, Vlong q)
+{
+ _modv(&q, n, d);
+}
+
+void
+_rshav(Vlong *r, Vlong a, int b)
+{
+ long t;
+
+ t = a.hi;
+ if(b >= 32) {
+ r->hi = t>>31;
+ if(b >= 64) {
+ /* this is illegal re C standard */
+ r->lo = t>>31;
+ return;
+ }
+ r->lo = t >> (b-32);
+ return;
+ }
+ if(b <= 0) {
+ r->hi = t;
+ r->lo = a.lo;
+ return;
+ }
+ r->hi = t >> b;
+ r->lo = (t << (32-b)) | (a.lo >> b);
+}
+
+void
+_rshlv(Vlong *r, Vlong a, int b)
+{
+ ulong t;
+
+ t = a.hi;
+ if(b >= 32) {
+ r->hi = 0;
+ if(b >= 64) {
+ /* this is illegal re C standard */
+ r->lo = 0;
+ return;
+ }
+ r->lo = t >> (b-32);
+ return;
+ }
+ if(b <= 0) {
+ r->hi = t;
+ r->lo = a.lo;
+ return;
+ }
+ r->hi = t >> b;
+ r->lo = (t << (32-b)) | (a.lo >> b);
+}
+
+void
+_lshv(Vlong *r, Vlong a, int b)
+{
+ ulong t;
+
+ t = a.lo;
+ if(b >= 32) {
+ r->lo = 0;
+ if(b >= 64) {
+ /* this is illegal re C standard */
+ r->hi = 0;
+ return;
+ }
+ r->hi = t << (b-32);
+ return;
+ }
+ if(b <= 0) {
+ r->lo = t;
+ r->hi = a.hi;
+ return;
+ }
+ r->lo = t << b;
+ r->hi = (t >> (32-b)) | (a.hi << b);
+}
+
+void
+_andv(Vlong *r, Vlong a, Vlong b)
+{
+ r->hi = a.hi & b.hi;
+ r->lo = a.lo & b.lo;
+}
+
+void
+_orv(Vlong *r, Vlong a, Vlong b)
+{
+ r->hi = a.hi | b.hi;
+ r->lo = a.lo | b.lo;
+}
+
+void
+_xorv(Vlong *r, Vlong a, Vlong b)
+{
+ r->hi = a.hi ^ b.hi;
+ r->lo = a.lo ^ b.lo;
+}
+
+void
+_vpp(Vlong *l, Vlong *r)
+{
+
+ l->hi = r->hi;
+ l->lo = r->lo;
+ r->lo++;
+ if(r->lo == 0)
+ r->hi++;
+}
+
+void
+_vmm(Vlong *l, Vlong *r)
+{
+
+ l->hi = r->hi;
+ l->lo = r->lo;
+ if(r->lo == 0)
+ r->hi--;
+ r->lo--;
+}
+
+void
+_ppv(Vlong *l, Vlong *r)
+{
+
+ r->lo++;
+ if(r->lo == 0)
+ r->hi++;
+ l->hi = r->hi;
+ l->lo = r->lo;
+}
+
+void
+_mmv(Vlong *l, Vlong *r)
+{
+
+ if(r->lo == 0)
+ r->hi--;
+ r->lo--;
+ l->hi = r->hi;
+ l->lo = r->lo;
+}
+
+void
+_vasop(Vlong *ret, void *lv, void fn(Vlong*, Vlong, Vlong), int type, Vlong rv)
+{
+ Vlong t, u;
+
+ u.lo = 0;
+ u.hi = 0;
+ switch(type) {
+ default:
+ runtime·abort();
+ break;
+
+ case 1: /* schar */
+ t.lo = *(schar*)lv;
+ t.hi = t.lo >> 31;
+ fn(&u, t, rv);
+ *(schar*)lv = u.lo;
+ break;
+
+ case 2: /* uchar */
+ t.lo = *(uchar*)lv;
+ t.hi = 0;
+ fn(&u, t, rv);
+ *(uchar*)lv = u.lo;
+ break;
+
+ case 3: /* short */
+ t.lo = *(short*)lv;
+ t.hi = t.lo >> 31;
+ fn(&u, t, rv);
+ *(short*)lv = u.lo;
+ break;
+
+ case 4: /* ushort */
+ t.lo = *(ushort*)lv;
+ t.hi = 0;
+ fn(&u, t, rv);
+ *(ushort*)lv = u.lo;
+ break;
+
+ case 9: /* int */
+ t.lo = *(int*)lv;
+ t.hi = t.lo >> 31;
+ fn(&u, t, rv);
+ *(int*)lv = u.lo;
+ break;
+
+ case 10: /* uint */
+ t.lo = *(uint*)lv;
+ t.hi = 0;
+ fn(&u, t, rv);
+ *(uint*)lv = u.lo;
+ break;
+
+ case 5: /* long */
+ t.lo = *(long*)lv;
+ t.hi = t.lo >> 31;
+ fn(&u, t, rv);
+ *(long*)lv = u.lo;
+ break;
+
+ case 6: /* ulong */
+ t.lo = *(ulong*)lv;
+ t.hi = 0;
+ fn(&u, t, rv);
+ *(ulong*)lv = u.lo;
+ break;
+
+ case 7: /* vlong */
+ case 8: /* uvlong */
+ fn(&u, *(Vlong*)lv, rv);
+ *(Vlong*)lv = u;
+ break;
+ }
+ *ret = u;
+}
+
+void
+_p2v(Vlong *ret, void *p)
+{
+ long t;
+
+ t = (ulong)p;
+ ret->lo = t;
+ ret->hi = 0;
+}
+
+void
+_sl2v(Vlong *ret, long sl)
+{
+ long t;
+
+ t = sl;
+ ret->lo = t;
+ ret->hi = t >> 31;
+}
+
+void
+_ul2v(Vlong *ret, ulong ul)
+{
+ long t;
+
+ t = ul;
+ ret->lo = t;
+ ret->hi = 0;
+}
+
+void
+_si2v(Vlong *ret, int si)
+{
+ long t;
+
+ t = si;
+ ret->lo = t;
+ ret->hi = t >> 31;
+}
+
+void
+_ui2v(Vlong *ret, uint ui)
+{
+ long t;
+
+ t = ui;
+ ret->lo = t;
+ ret->hi = 0;
+}
+
+void
+_sh2v(Vlong *ret, long sh)
+{
+ long t;
+
+ t = (sh << 16) >> 16;
+ ret->lo = t;
+ ret->hi = t >> 31;
+}
+
+void
+_uh2v(Vlong *ret, ulong ul)
+{
+ long t;
+
+ t = ul & 0xffff;
+ ret->lo = t;
+ ret->hi = 0;
+}
+
+void
+_sc2v(Vlong *ret, long uc)
+{
+ long t;
+
+ t = (uc << 24) >> 24;
+ ret->lo = t;
+ ret->hi = t >> 31;
+}
+
+void
+_uc2v(Vlong *ret, ulong ul)
+{
+ long t;
+
+ t = ul & 0xff;
+ ret->lo = t;
+ ret->hi = 0;
+}
+
+long
+_v2sc(Vlong rv)
+{
+ long t;
+
+ t = rv.lo & 0xff;
+ return (t << 24) >> 24;
+}
+
+long
+_v2uc(Vlong rv)
+{
+
+ return rv.lo & 0xff;
+}
+
+long
+_v2sh(Vlong rv)
+{
+ long t;
+
+ t = rv.lo & 0xffff;
+ return (t << 16) >> 16;
+}
+
+long
+_v2uh(Vlong rv)
+{
+
+ return rv.lo & 0xffff;
+}
+
+long
+_v2sl(Vlong rv)
+{
+
+ return rv.lo;
+}
+
+long
+_v2ul(Vlong rv)
+{
+
+ return rv.lo;
+}
+
+long
+_v2si(Vlong rv)
+{
+
+ return rv.lo;
+}
+
+long
+_v2ui(Vlong rv)
+{
+
+ return rv.lo;
+}
+
+int
+_testv(Vlong rv)
+{
+ return rv.lo || rv.hi;
+}
+
+int
+_eqv(Vlong lv, Vlong rv)
+{
+ return lv.lo == rv.lo && lv.hi == rv.hi;
+}
+
+int
+_nev(Vlong lv, Vlong rv)
+{
+ return lv.lo != rv.lo || lv.hi != rv.hi;
+}
+
+int
+_ltv(Vlong lv, Vlong rv)
+{
+ return (long)lv.hi < (long)rv.hi ||
+ (lv.hi == rv.hi && lv.lo < rv.lo);
+}
+
+int
+_lev(Vlong lv, Vlong rv)
+{
+ return (long)lv.hi < (long)rv.hi ||
+ (lv.hi == rv.hi && lv.lo <= rv.lo);
+}
+
+int
+_gtv(Vlong lv, Vlong rv)
+{
+ return (long)lv.hi > (long)rv.hi ||
+ (lv.hi == rv.hi && lv.lo > rv.lo);
+}
+
+int
+_gev(Vlong lv, Vlong rv)
+{
+ return (long)lv.hi > (long)rv.hi ||
+ (lv.hi == rv.hi && lv.lo >= rv.lo);
+}
+
+int
+_lov(Vlong lv, Vlong rv)
+{
+ return lv.hi < rv.hi ||
+ (lv.hi == rv.hi && lv.lo < rv.lo);
+}
+
+int
+_lsv(Vlong lv, Vlong rv)
+{
+ return lv.hi < rv.hi ||
+ (lv.hi == rv.hi && lv.lo <= rv.lo);
+}
+
+int
+_hiv(Vlong lv, Vlong rv)
+{
+ return lv.hi > rv.hi ||
+ (lv.hi == rv.hi && lv.lo > rv.lo);
+}
+
+int
+_hsv(Vlong lv, Vlong rv)
+{
+ return lv.hi > rv.hi ||
+ (lv.hi == rv.hi && lv.lo >= rv.lo);
+}