1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
|
--- mpn/vax/add_n.s.orig 2010-02-06 13:43:14.000000000 +0100
+++ mpn/vax/add_n.s 2010-05-17 22:55:26.000000000 +0200
@@ -30,30 +30,30 @@
.globl ___gmpn_add_n
___gmpn_add_n:
.word 0x0
- movl 16(ap),r0
- movl 12(ap),r1
- movl 8(ap),r2
- movl 4(ap),r3
- mnegl r0,r5
- addl2 $3,r0
- ashl $-2,r0,r0 # unroll loop count
- bicl2 $-4,r5 # mask out low 2 bits
- movaq (r5)[r5],r5 # 9x
- jmp Loop(r5)
+ movl 16(%ap),%r0
+ movl 12(%ap),%r1
+ movl 8(%ap),%r2
+ movl 4(%ap),%r3
+ mnegl %r0,%r5
+ addl2 $3,%r0
+ ashl $-2,%r0,%r0 # unroll loop count
+ bicl2 $-4,%r5 # mask out low 2 bits
+ movaq (%r5)[%r5],%r5 # 9x
+ jmp Loop(%r5)
-Loop: movl (r2)+,r4
- adwc (r1)+,r4
- movl r4,(r3)+
- movl (r2)+,r4
- adwc (r1)+,r4
- movl r4,(r3)+
- movl (r2)+,r4
- adwc (r1)+,r4
- movl r4,(r3)+
- movl (r2)+,r4
- adwc (r1)+,r4
- movl r4,(r3)+
- sobgtr r0,Loop
+Loop: movl (%r2)+,%r4
+ adwc (%r1)+,%r4
+ movl %r4,(%r3)+
+ movl (%r2)+,%r4
+ adwc (%r1)+,%r4
+ movl %r4,(%r3)+
+ movl (%r2)+,%r4
+ adwc (%r1)+,%r4
+ movl %r4,(%r3)+
+ movl (%r2)+,%r4
+ adwc (%r1)+,%r4
+ movl %r4,(%r3)+
+ sobgtr %r0,Loop
- adwc r0,r0
+ adwc %r0,%r0
ret
|