diff options
author | kristerw <kristerw@pkgsrc.org> | 2004-01-03 09:50:39 +0000 |
---|---|---|
committer | kristerw <kristerw@pkgsrc.org> | 2004-01-03 09:50:39 +0000 |
commit | 8e3896d83ad27ebfee6ccdb35af3e4abeb0d98e2 (patch) | |
tree | 8a8d50e46507294538762e1be447cfc819891b2b /lang | |
parent | a56602c899d5ca612a3f93af27fc5dffb2be3ee6 (diff) | |
download | pkgsrc-8e3896d83ad27ebfee6ccdb35af3e4abeb0d98e2.tar.gz |
Fix multi-line strings and an incorrect function call identified by gcc3.
Diffstat (limited to 'lang')
-rw-r--r-- | lang/nhc98/distinfo | 4 | ||||
-rw-r--r-- | lang/nhc98/patches/patch-ab | 11 | ||||
-rw-r--r-- | lang/nhc98/patches/patch-ac | 503 |
3 files changed, 517 insertions, 1 deletions
diff --git a/lang/nhc98/distinfo b/lang/nhc98/distinfo index b05f695ee1a..b76dd3a1f10 100644 --- a/lang/nhc98/distinfo +++ b/lang/nhc98/distinfo @@ -1,5 +1,7 @@ -$NetBSD: distinfo,v 1.8 2003/03/19 23:14:35 kristerw Exp $ +$NetBSD: distinfo,v 1.9 2004/01/03 09:50:39 kristerw Exp $ SHA1 (nhc98src-1.16.tar.gz) = 7ca27c190d06462cab5d642726b7e5e73b61a7e4 Size (nhc98src-1.16.tar.gz) = 5689485 bytes SHA1 (patch-aa) = ba484e67af2574abda0708f882e4c5f973ef125e +SHA1 (patch-ab) = 064fe3b954dadf46155a3ddcd1e0ba439ff5167d +SHA1 (patch-ac) = 5478a4b98583ee2ca165bc460a720d9f2a0742dc diff --git a/lang/nhc98/patches/patch-ab b/lang/nhc98/patches/patch-ab new file mode 100644 index 00000000000..5a5393611ad --- /dev/null +++ b/lang/nhc98/patches/patch-ab @@ -0,0 +1,11 @@ +$NetBSD: patch-ab,v 1.1 2004/01/03 09:50:39 kristerw Exp $ + +--- src/runtime/Integer/alloca.c.orig Sat Jan 3 07:32:14 2004 ++++ src/runtime/Integer/alloca.c Sat Jan 3 07:31:44 2004 +@@ -4,5 +4,5 @@ + void *alloca (unsigned size) + { + fprintf(stderr,"Falilure alloca %d called:-(\n",size); +- abort(-1); ++ abort(); + } diff --git a/lang/nhc98/patches/patch-ac b/lang/nhc98/patches/patch-ac new file mode 100644 index 00000000000..4648417bcde --- /dev/null +++ b/lang/nhc98/patches/patch-ac @@ -0,0 +1,503 @@ +$NetBSD: patch-ac,v 1.1 2004/01/03 09:50:39 kristerw Exp $ + +--- src/runtime/Integer/asmlong.h.orig Sat Jan 3 07:16:57 2004 ++++ src/runtime/Integer/asmlong.h Sat Jan 3 07:28:52 2004 +@@ -1,13 +1,13 @@ + #if defined (__a29k__) || defined (___AM29K__) + #define add_ssaaaa(sh, sl, ah, al, bh, bl) \ +- __asm__ ("add %1,%4,%5 +- addc %0,%2,%3" \ ++ __asm__ ("add %1,%4,%5\n\t" \ ++ "addc %0,%2,%3" \ + : "=r" ((unsigned long int)(sh)), "=&r" ((unsigned long int)(sl)) \ + : "%r" ((unsigned long int)(ah)), "rI" ((unsigned long int)(bh)), \ + "%r" ((unsigned long int)(al)), "rI" ((unsigned long int)(bl))) + #define sub_ddmmss(sh, sl, ah, al, bh, bl) \ +- __asm__ ("sub %1,%4,%5 +- subc %0,%2,%3" \ ++ __asm__ ("sub %1,%4,%5\n\t" \ ++ "subc %0,%2,%3" \ + : "=r" ((unsigned long int)(sh)), "=&r" ((unsigned long int)(sl)) \ + : "r" ((unsigned long int)(ah)), "rI" ((unsigned long int)(bh)), \ + "r" ((unsigned long int)(al)), "rI" ((unsigned long int)(bl))) +@@ -31,14 +31,14 @@ + + #if defined (__arm__) + #define add_ssaaaa(sh, sl, ah, al, bh, bl) \ +- __asm__ ("adds %1,%4,%5 +- adc %0,%2,%3" \ ++ __asm__ ("adds %1,%4,%5\n\t" \ ++ "adc %0,%2,%3" \ + : "=r" ((unsigned long int)(sh)), "=&r" ((unsigned long int)(sl)) \ + : "%r" ((unsigned long int)(ah)), "rI" ((unsigned long int)(bh)), \ + "%r" ((unsigned long int)(al)), "rI" ((unsigned long int)(bl))) + #define sub_ddmmss(sh, sl, ah, al, bh, bl) \ +- __asm__ ("subs %1,%4,%5 +- sbc %0,%2,%3" \ ++ __asm__ ("subs %1,%4,%5\n\t" \ ++ "sbc %0,%2,%3" \ + : "=r" ((unsigned long int)(sh)), "=&r" ((unsigned long int)(sl)) \ + : "r" ((unsigned long int)(ah)), "rI" ((unsigned long int)(bh)), \ + "r" ((unsigned long int)(al)), "rI" ((unsigned long int)(bl))) +@@ -46,14 +46,14 @@ + + #if defined (__gmicro__) + #define add_ssaaaa(sh, sl, ah, al, bh, bl) \ +- __asm__ ("add.w %5,%1 +- addx %3,%0" \ ++ __asm__ ("add.w %5,%1\n\t" \ ++ "addx %3,%0" \ + : "=g" ((unsigned long int)(sh)), "=&g" ((unsigned long int)(sl))\ + : "%0" ((unsigned long int)(ah)), "g" ((unsigned long int)(bh)), \ + "%1" ((unsigned long int)(al)), "g" ((unsigned long int)(bl))) + #define sub_ddmmss(sh, sl, ah, al, bh, bl) \ +- __asm__ ("sub.w %5,%1 +- subx %3,%0" \ ++ __asm__ ("sub.w %5,%1\n\t" \ ++ "subx %3,%0" \ + : "=g" ((unsigned long int)(sh)), "=&g" ((unsigned long int)(sl))\ + : "0" ((unsigned long int)(ah)), "g" ((unsigned long int)(bh)), \ + "1" ((unsigned long int)(al)), "g" ((unsigned long int)(bl))) +@@ -74,14 +74,14 @@ + + #if defined (__hppa) + #define add_ssaaaa(sh, sl, ah, al, bh, bl) \ +- __asm__ ("add %4,%5,%1 +- addc %2,%3,%0" \ ++ __asm__ ("add %4,%5,%1\n\t" \ ++ "addc %2,%3,%0" \ + : "=r" ((unsigned long int)(sh)), "=&r" ((unsigned long int)(sl))\ + : "%r" ((unsigned long int)(ah)), "r" ((unsigned long int)(bh)),\ + "%r" ((unsigned long int)(al)), "r" ((unsigned long int)(bl))) + #define sub_ddmmss(sh, sl, ah, al, bh, bl) \ +- __asm__ ("sub %5,%4,%1 +- subb %3,%2,%0" \ ++ __asm__ ("sub %5,%4,%1\n\t" \ ++ "subb %3,%2,%0" \ + : "=r" ((unsigned long int)(sh)), "=&r" ((unsigned long int)(sl))\ + : "r" ((unsigned long int)(ah)), "r" ((unsigned long int)(bh)), \ + "r" ((unsigned long int)(al)), "r" ((unsigned long int)(bl))) +@@ -89,14 +89,14 @@ + + #if defined (__i386__) || defined (__i486__) + #define add_ssaaaa(sh, sl, ah, al, bh, bl) \ +- __asm__ ("addl %5,%1 +- adcl %3,%0" \ ++ __asm__ ("addl %5,%1\n\t" \ ++ "adcl %3,%0" \ + : "=r" ((unsigned long int)(sh)), "=&r" ((unsigned long int)(sl))\ + : "%0" ((unsigned long int)(ah)), "g" ((unsigned long int)(bh)), \ + "%1" ((unsigned long int)(al)), "g" ((unsigned long int)(bl))) + #define sub_ddmmss(sh, sl, ah, al, bh, bl) \ +- __asm__ ("subl %5,%1 +- sbbl %3,%0" \ ++ __asm__ ("subl %5,%1\n\t" \ ++ "sbbl %3,%0" \ + : "=r" ((unsigned long int)(sh)), "=&r" ((unsigned long int)(sl))\ + : "0" ((unsigned long int)(ah)), "g" ((unsigned long int)(bh)), \ + "1" ((unsigned long int)(al)), "g" ((unsigned long int)(bl))) +@@ -157,14 +157,14 @@ + + #if defined (___IBMR2__) /* IBM RS6000 */ + #define add_ssaaaa(sh, sl, ah, al, bh, bl) \ +- __asm__ ("a %1,%4,%5 +- ae %0,%2,%3" \ ++ __asm__ ("a %1,%4,%5\n\t" \ ++ "ae %0,%2,%3" \ + : "=r" ((unsigned long int)(sh)), "=&r" ((unsigned long int)(sl))\ + : "%r" ((unsigned long int)(ah)), "r" ((unsigned long int)(bh)),\ + "%r" ((unsigned long int)(al)), "r" ((unsigned long int)(bl))) + #define sub_ddmmss(sh, sl, ah, al, bh, bl) \ +- __asm__ ("sf %1,%5,%4 +- sfe %0,%3,%2" \ ++ __asm__ ("sf %1,%5,%4\n\t" \ ++ "sfe %0,%3,%2" \ + : "=r" ((unsigned long int)(sh)), "=&r" ((unsigned long int)(sl))\ + : "r" ((unsigned long int)(ah)), "r" ((unsigned long int)(bh)), \ + "r" ((unsigned long int)(al)), "r" ((unsigned long int)(bl))) +@@ -264,32 +264,32 @@ + : "od" ((unsigned long int)(x)), "n" (0)) + #else /* not mc68020 */ + #define umul_ppmm(xh, xl, a, b) \ +- __asm__ ("| Inlined umul_ppmm +- movel %2,d0 +- movel %3,d1 +- movel d0,d2 +- swap d0 +- movel d1,d3 +- swap d1 +- movew d2,d4 +- mulu d3,d4 +- mulu d1,d2 +- mulu d0,d3 +- mulu d0,d1 +- movel d4,d0 +- eorw d0,d0 +- swap d0 +- addl d0,d2 +- addl d3,d2 +- jcc 1f +- addl #65536,d1 +-1: swap d2 +- moveq #0,d0 +- movew d2,d0 +- movew d4,d2 +- movel d2,%1 +- addl d1,d0 +- movel d0,%0" \ ++ __asm__ ("| Inlined umul_ppmm\n\t" \ ++ "movel %2,d0\n\t" \ ++ "movel %3,d1\n\t" \ ++ "movel d0,d2\n\t" \ ++ "swap d0\n\t" \ ++ "movel d1,d3\n\t" \ ++ "swap d1\n\t" \ ++ "movew d2,d4\n\t" \ ++ "mulu d3,d4\n\t" \ ++ "mulu d1,d2\n\t" \ ++ "mulu d0,d3\n\t" \ ++ "mulu d0,d1\n\t" \ ++ "movel d4,d0\n\t" \ ++ "eorw d0,d0\n\t" \ ++ "swap d0\n\t" \ ++ "addl d0,d2\n\t" \ ++ "addl d3,d2\n\t" \ ++ "jcc 1f\n\t" \ ++ "addl #65536,d1\n"i \ ++"1: swap d2\n\t" \ ++ "moveq #0,d0\n\t" \ ++ "movew d2,d0\n\t" \ ++ "movew d4,d2\n\t" \ ++ "movel d2,%1\n\t" \ ++ "addl d1,d0\n\t" \ ++ "movel d0,%0" \ + : "=g" ((unsigned long int)(xh)), "=g" ((unsigned long int)(xl)) \ + :"g" ((unsigned long int)(a)), "g" ((unsigned long int)(b)) \ + : "d0", "d1", "d2", "d3", "d4") +@@ -298,14 +298,14 @@ + + #if defined (__m88000__) + #define add_ssaaaa(sh, sl, ah, al, bh, bl) \ +- __asm__ ("addu.co %1,%r4,%r5 +- addu.ci %0,%r2,%r3" \ ++ __asm__ ("addu.co %1,%r4,%r5\n\t" \ ++ "addu.ci %0,%r2,%r3" \ + : "=r" ((unsigned long int)(sh)), "=&r" ((unsigned long int)(sl))\ + : "%rJ" ((unsigned long int)(ah)), "rJ" ((unsigned long int)(bh)),\ + "%rJ" ((unsigned long int)(al)), "rJ" ((unsigned long int)(bl))) + #define sub_ddmmss(sh, sl, ah, al, bh, bl) \ +- __asm__ ("subu.co %1,%r4,%r5 +- subu.ci %0,%r2,%r3" \ ++ __asm__ ("subu.co %1,%r4,%r5\n\t" \ ++ "subu.ci %0,%r2,%r3" \ + : "=r" ((unsigned long int)(sh)), "=&r" ((unsigned long int)(sl))\ + : "rJ" ((unsigned long int)(ah)), "rJ" ((unsigned long int)(bh)),\ + "rJ" ((unsigned long int)(al)), "rJ" ((unsigned long int)(bl))) +@@ -322,9 +322,9 @@ + + #if defined (__mips__) + #define umul_ppmm(w1, w0, u, v) \ +- __asm__ ("multu %2,%3 +- mflo %0 +- mfhi %1" \ ++ __asm__ ("multu %2,%3\n\t" \ ++ "mflo %0\n\t" \ ++ "mfhi %1" \ + : "=r" ((unsigned long int)(w0)), "=r" ((unsigned long int)(w1))\ + : "r" ((unsigned long int)(u)), "r" ((unsigned long int)(v))) + #define UMUL_TIME 5 +@@ -338,11 +338,11 @@ + : "%0" ((unsigned long int)(u)), "g" ((unsigned long int)(v))); \ + __w; }) + #define div_qrnnd(q, r, n1, n0, d) \ +- __asm__ ("movd %2,r0 +- movd %3,r1 +- deid %4,r0 +- movd r1,%0 +- movd r0,%1" \ ++ __asm__ ("movd %2,r0\n\t" \ ++ "movd %3,r1\n\t" \ ++ "deid %4,r0\n\t" \ ++ "movd r1,%0\n\t" \ ++ "movd r0,%1" \ + : "=g" ((unsigned long int)(q)), "=g" ((unsigned long int)(r)) \ + : "g" ((unsigned long int)(n0)), "g" ((unsigned long int)(n1)), \ + "g" ((unsigned long int)(d)) : "r0", "r1") +@@ -350,23 +350,23 @@ + + #if defined (__pyr__) + #define add_ssaaaa(sh, sl, ah, al, bh, bl) \ +- __asm__ ("addw %5,%1 +- addwc %3,%0" \ ++ __asm__ ("addw %5,%1\n\t" \ ++ "addwc %3,%0" \ + : "=r" ((unsigned long int)(sh)), "=&r" ((unsigned long int)(sl))\ + : "%0" ((unsigned long int)(ah)), "g" ((unsigned long int)(bh)),\ + "%1" ((unsigned long int)(al)), "g" ((unsigned long int)(bl))) + #define sub_ddmmss(sh, sl, ah, al, bh, bl) \ +- __asm__ ("subw %5,%1 +- subwb %3,%0" \ ++ __asm__ ("subw %5,%1\n\t" \ ++ "subwb %3,%0" \ + : "=r" ((unsigned long int)(sh)), "=&r" ((unsigned long int)(sl))\ + : "0" ((unsigned long int)(ah)), "g" ((unsigned long int)(bh)), \ + "1" ((unsigned long int)(al)), "g" ((unsigned long int)(bl))) + /* This insn doesn't work on ancient pyramids. */ + #define umul_ppmm(w1, w0, u, v) \ +- __asm__ ("movw %2,tr11 +- uemul %3,tr10 +- movw tr10,%0 +- movw tr11,%1" \ ++ __asm__ ("movw %2,tr11\n\t" \ ++ "uemul %3,tr10\n\t" \ ++ "movw tr10,%0\n\t" \ ++ "movw tr11,%1" \ + : "=r" ((unsigned long int)(w1)), "=r" ((unsigned long int)(w0))\ + : "r" ((unsigned long int)(u)), "r" ((unsigned long int)(v)) \ + : "tr10", "tr11") +@@ -374,14 +374,14 @@ + + #if defined (__ibm032__) /* RT/ROMP */ + #define add_ssaaaa(sh, sl, ah, al, bh, bl) \ +- __asm__ ("a %1,%5 +- ae %0,%3" \ ++ __asm__ ("a %1,%5\n\t" \ ++ "ae %0,%3" \ + : "=r" ((unsigned long int)(sh)), "=&r" ((unsigned long int)(sl))\ + : "%0" ((unsigned long int)(ah)), "r" ((unsigned long int)(bh)),\ + "%1" ((unsigned long int)(al)), "r" ((unsigned long int)(bl))) + #define sub_ddmmss(sh, sl, ah, al, bh, bl) \ +- __asm__ ("s %1,%5 +- se %0,%3" \ ++ __asm__ ("s %1,%5\n\t" \ ++ "se %0,%3" \ + : "=r" ((unsigned long int)(sh)), "=&r" ((unsigned long int)(sl))\ + : "0" ((unsigned long int)(ah)), "r" ((unsigned long int)(bh)), \ + "1" ((unsigned long int)(al)), "r" ((unsigned long int)(bl))) +@@ -389,26 +389,26 @@ + do { \ + unsigned long int __m0 = (m0), __m1 = (m1); \ + __asm__ ( \ +- "s r2,r2 +- mts r10,%2 +- m r2,%3 +- m r2,%3 +- m r2,%3 +- m r2,%3 +- m r2,%3 +- m r2,%3 +- m r2,%3 +- m r2,%3 +- m r2,%3 +- m r2,%3 +- m r2,%3 +- m r2,%3 +- m r2,%3 +- m r2,%3 +- m r2,%3 +- m r2,%3 +- cas %0,r2,r0 +- mfs r10,%1" \ ++ "s r2,r2\n\t" \ ++ "mts r10,%2\n\t" \ ++ "m r2,%3\n\t" \ ++ "m r2,%3\n\t" \ ++ "m r2,%3\n\t" \ ++ "m r2,%3\n\t" \ ++ "m r2,%3\n\t" \ ++ "m r2,%3\n\t" \ ++ "m r2,%3\n\t" \ ++ "m r2,%3\n\t" \ ++ "m r2,%3\n\t" \ ++ "m r2,%3\n\t" \ ++ "m r2,%3\n\t" \ ++ "m r2,%3\n\t" \ ++ "m r2,%3\n\t" \ ++ "m r2,%3\n\t" \ ++ "m r2,%3\n\t" \ ++ "m r2,%3\n\t" \ ++ "cas %0,r2,r0\n\t" \ ++ "mfs r10,%1" \ + : "=r" ((unsigned long int)(ph)), "=r" ((unsigned long int)(pl)) \ + : "%r" (__m0), "r" (__m1) \ + : "r2"); \ +@@ -433,14 +433,14 @@ + + #if defined (__sparc__) + #define add_ssaaaa(sh, sl, ah, al, bh, bl) \ +- __asm__ ("addcc %4,%5,%1 +- addx %2,%3,%0" \ ++ __asm__ ("addcc %4,%5,%1\n\t" \ ++ "addx %2,%3,%0" \ + : "=r" ((unsigned long int)(sh)), "=&r" ((unsigned long int)(sl))\ + : "%r" ((unsigned long int)(ah)), "rI" ((unsigned long int)(bh)),\ + "%r" ((unsigned long int)(al)), "rI" ((unsigned long int)(bl))) + #define sub_ddmmss(sh, sl, ah, al, bh, bl) \ +- __asm__ ("subcc %4,%5,%1 +- subx %2,%3,%0" \ ++ __asm__ ("subcc %4,%5,%1\n\t" \ ++ "subx %2,%3,%0" \ + : "=r" ((unsigned long int)(sh)), "=&r" ((unsigned long int)(sl))\ + : "r" ((unsigned long int)(ah)), "rI" ((unsigned long int)(bh)),\ + "r" ((unsigned long int)(al)), "rI" ((unsigned long int)(bl))) +@@ -458,46 +458,46 @@ + /* SPARC without integer multiplication and divide instructions. + (i.e. at least Sun4/20,40,60,65,75,110,260,280,330,360,380,470,490) */ + #define umul_ppmm(w1, w0, u, v) \ +- __asm__ ("! Inlined umul_ppmm +- wr %%g0,%2,%%y ! SPARC has 0-3 delay insn after a wr +- sra %3,31,%%g2 ! Don't move this insn +- and %2,%%g2,%%g2 ! Don't move this insn +- andcc %%g0,0,%%g1 ! Don't move this insn +- mulscc %%g1,%3,%%g1 +- mulscc %%g1,%3,%%g1 +- mulscc %%g1,%3,%%g1 +- mulscc %%g1,%3,%%g1 +- mulscc %%g1,%3,%%g1 +- mulscc %%g1,%3,%%g1 +- mulscc %%g1,%3,%%g1 +- mulscc %%g1,%3,%%g1 +- mulscc %%g1,%3,%%g1 +- mulscc %%g1,%3,%%g1 +- mulscc %%g1,%3,%%g1 +- mulscc %%g1,%3,%%g1 +- mulscc %%g1,%3,%%g1 +- mulscc %%g1,%3,%%g1 +- mulscc %%g1,%3,%%g1 +- mulscc %%g1,%3,%%g1 +- mulscc %%g1,%3,%%g1 +- mulscc %%g1,%3,%%g1 +- mulscc %%g1,%3,%%g1 +- mulscc %%g1,%3,%%g1 +- mulscc %%g1,%3,%%g1 +- mulscc %%g1,%3,%%g1 +- mulscc %%g1,%3,%%g1 +- mulscc %%g1,%3,%%g1 +- mulscc %%g1,%3,%%g1 +- mulscc %%g1,%3,%%g1 +- mulscc %%g1,%3,%%g1 +- mulscc %%g1,%3,%%g1 +- mulscc %%g1,%3,%%g1 +- mulscc %%g1,%3,%%g1 +- mulscc %%g1,%3,%%g1 +- mulscc %%g1,%3,%%g1 +- mulscc %%g1,0,%%g1 +- add %%g1,%%g2,%0 +- rd %%y,%1" \ ++ __asm__ ("! Inlined umul_ppmm\n\t" \ ++ "wr %%g0,%2,%%y ! SPARC has 0-3 delay insn after a wr\n\t" \ ++ "sra %3,31,%%g2 ! Don't move this insn\n\t" \ ++ "and %2,%%g2,%%g2 ! Don't move this insn\n\t" \ ++ "andcc %%g0,0,%%g1 ! Don't move this insn\n\t" \ ++ "mulscc %%g1,%3,%%g1\n\t" \ ++ "mulscc %%g1,%3,%%g1\n\t" \ ++ "mulscc %%g1,%3,%%g1\n\t" \ ++ "mulscc %%g1,%3,%%g1\n\t" \ ++ "mulscc %%g1,%3,%%g1\n\t" \ ++ "mulscc %%g1,%3,%%g1\n\t" \ ++ "mulscc %%g1,%3,%%g1\n\t" \ ++ "mulscc %%g1,%3,%%g1\n\t" \ ++ "mulscc %%g1,%3,%%g1\n\t" \ ++ "mulscc %%g1,%3,%%g1\n\t" \ ++ "mulscc %%g1,%3,%%g1\n\t" \ ++ "mulscc %%g1,%3,%%g1\n\t" \ ++ "mulscc %%g1,%3,%%g1\n\t" \ ++ "mulscc %%g1,%3,%%g1\n\t" \ ++ "mulscc %%g1,%3,%%g1\n\t" \ ++ "mulscc %%g1,%3,%%g1\n\t" \ ++ "mulscc %%g1,%3,%%g1\n\t" \ ++ "mulscc %%g1,%3,%%g1\n\t" \ ++ "mulscc %%g1,%3,%%g1\n\t" \ ++ "mulscc %%g1,%3,%%g1\n\t" \ ++ "mulscc %%g1,%3,%%g1\n\t" \ ++ "mulscc %%g1,%3,%%g1\n\t" \ ++ "mulscc %%g1,%3,%%g1\n\t" \ ++ "mulscc %%g1,%3,%%g1\n\t" \ ++ "mulscc %%g1,%3,%%g1\n\t" \ ++ "mulscc %%g1,%3,%%g1\n\t" \ ++ "mulscc %%g1,%3,%%g1\n\t" \ ++ "mulscc %%g1,%3,%%g1\n\t" \ ++ "mulscc %%g1,%3,%%g1\n\t" \ ++ "mulscc %%g1,%3,%%g1\n\t" \ ++ "mulscc %%g1,%3,%%g1\n\t" \ ++ "mulscc %%g1,%3,%%g1\n\t" \ ++ "mulscc %%g1,0,%%g1\n\t" \ ++ "add %%g1,%%g2,%0\n\t" \ ++ "rd %%y,%1" \ + : "=r" ((unsigned long int)(w1)), "=r" ((unsigned long int)(w0))\ + : "%rI" ((unsigned long int)(u)), "r" ((unsigned long int)(v)) \ + : "%g1", "%g2") +@@ -505,30 +505,30 @@ + /* It's quite necessary to add this much assembler for the sparc. + The default udiv_qrnnd (in C) is more than 10 times slower! */ + #define udiv_qrnnd(q, r, n1, n0, d) \ +- __asm__ ("! Inlined udiv_qrnnd +- mov 32,%%g1 +- subcc %1,%2,%%g0 +-1: bcs 5f +- addxcc %0,%0,%0 ! shift n1n0 and a q-bit in lsb +- sub %1,%2,%1 ! this kills msb of n +- addx %1,%1,%1 ! so this can't give carry +- subcc %%g1,1,%%g1 +-2: bne 1b +- subcc %1,%2,%%g0 +- bcs 3f +- addxcc %0,%0,%0 ! shift n1n0 and a q-bit in lsb +- b 3f +- sub %1,%2,%1 ! this kills msb of n +-4: sub %1,%2,%1 +-5: addxcc %1,%1,%1 +- bcc 2b +- subcc %%g1,1,%%g1 +-! Got carry from n. Subtract next step to cancel this carry. +- bne 4b +- addcc %0,%0,%0 ! shift n1n0 and a 0-bit in lsb +- sub %1,%2,%1 +-3: xnor %0,0,%0 +- ! End of inline udiv_qrnnd" \ ++ __asm__ ("! Inlined udiv_qrnnd\n\t" \ ++ "mov 32,%%g1\n\t" \ ++ "subcc %1,%2,%%g0\n" \ ++"1: bcs 5f\n\t" \ ++ "addxcc %0,%0,%0 ! shift n1n0 and a q-bit in lsb\n\t" \ ++ "sub %1,%2,%1 ! this kills msb of n\n\t" \ ++ "addx %1,%1,%1 ! so this can't give carry\n\t" \ ++ "subcc %%g1,1,%%g1\n" \ ++"2: bne 1b\n\t" \ ++ "subcc %1,%2,%%g0\n\t" \ ++ "bcs 3f\n\t" \ ++ "addxcc %0,%0,%0 ! shift n1n0 and a q-bit in lsb\n\t" \ ++ "b 3f\n\t" \ ++ "sub %1,%2,%1 ! this kills msb of n\n" \ ++"4: sub %1,%2,%1\n" \ ++"5: addxcc %1,%1,%1\n\t" \ ++ "bcc 2b\n\t" \ ++ "subcc %%g1,1,%%g1\n\t" \ ++"! Got carry from n. Subtract next step to cancel this carry.\n\t" \ ++ "bne 4b\n\t" \ ++ "addcc %0,%0,%0 ! shift n1n0 and a 0-bit in lsb\n\t" \ ++ "sub %1,%2,%1\n" \ ++"3: xnor %0,0,%0\n\t" \ ++ "! End of inline udiv_qrnnd" \ + : "=r&" ((unsigned long int)(q)), "=r&" ((unsigned long int)(r))\ + : "r" ((unsigned long int)(d)), "1" ((unsigned long int)(n1)), \ + "0" ((unsigned long int)(n0)) : "%g1") +@@ -538,14 +538,14 @@ + + #if defined (__vax__) + #define add_ssaaaa(sh, sl, ah, al, bh, bl) \ +- __asm__ ("addl2 %5,%1 +- adwc %3,%0" \ ++ __asm__ ("addl2 %5,%1\n\t" \ ++ "adwc %3,%0" \ + : "=g" ((unsigned long int)(sh)), "=&g" ((unsigned long int)(sl))\ + : "%0" ((unsigned long int)(ah)), "g" ((unsigned long int)(bh)),\ + "%1" ((unsigned long int)(al)), "g" ((unsigned long int)(bl))) + #define sub_ddmmss(sh, sl, ah, al, bh, bl) \ +- __asm__ ("subl2 %5,%1 +- sbwc %3,%0" \ ++ __asm__ ("subl2 %5,%1\n\t" \ ++ "sbwc %3,%0" \ + : "=g" ((unsigned long int)(sh)), "=&g" ((unsigned long int)(sl))\ + : "0" ((unsigned long int)(ah)), "g" ((unsigned long int)(bh)), \ + "1" ((unsigned long int)(al)), "g" ((unsigned long int)(bl))) |