diff options
author | djm <> | 2012-10-13 21:23:50 +0000 |
---|---|---|
committer | djm <> | 2012-10-13 21:23:50 +0000 |
commit | e9d65189905c6e99c1062d65e26bf83eebb0a26a (patch) | |
tree | 10ebe51c3542099b0ab8325d8f322372375dc3b4 /src/lib/libcrypto/bn/asm | |
parent | 59625e84c89bf82e1c6d20c55785b618eb56ea72 (diff) | |
parent | 228cae30b117c2493f69ad3c195341cd6ec8d430 (diff) | |
download | openbsd-e9d65189905c6e99c1062d65e26bf83eebb0a26a.tar.gz openbsd-e9d65189905c6e99c1062d65e26bf83eebb0a26a.tar.bz2 openbsd-e9d65189905c6e99c1062d65e26bf83eebb0a26a.zip |
This commit was generated by cvs2git to track changes on a CVS vendor
branch.
Diffstat (limited to 'src/lib/libcrypto/bn/asm')
-rw-r--r-- | src/lib/libcrypto/bn/asm/armv4-gf2m.pl | 278 | ||||
-rw-r--r-- | src/lib/libcrypto/bn/asm/armv4-mont.pl | 23 | ||||
-rw-r--r-- | src/lib/libcrypto/bn/asm/ia64-mont.pl | 851 | ||||
-rw-r--r-- | src/lib/libcrypto/bn/asm/mips-mont.pl | 426 | ||||
-rw-r--r-- | src/lib/libcrypto/bn/asm/mips.pl | 2585 | ||||
-rw-r--r-- | src/lib/libcrypto/bn/asm/modexp512-x86_64.pl | 1496 | ||||
-rw-r--r-- | src/lib/libcrypto/bn/asm/parisc-mont.pl | 993 | ||||
-rw-r--r-- | src/lib/libcrypto/bn/asm/ppc-mont.pl | 107 | ||||
-rw-r--r-- | src/lib/libcrypto/bn/asm/ppc.pl | 43 | ||||
-rw-r--r-- | src/lib/libcrypto/bn/asm/ppc64-mont.pl | 338 | ||||
-rw-r--r-- | src/lib/libcrypto/bn/asm/s390x-gf2m.pl | 221 | ||||
-rw-r--r-- | src/lib/libcrypto/bn/asm/s390x-mont.pl | 102 | ||||
-rw-r--r-- | src/lib/libcrypto/bn/asm/x86-gf2m.pl | 313 | ||||
-rw-r--r-- | src/lib/libcrypto/bn/asm/x86_64-gf2m.pl | 389 | ||||
-rwxr-xr-x | src/lib/libcrypto/bn/asm/x86_64-mont.pl | 1486 | ||||
-rwxr-xr-x | src/lib/libcrypto/bn/asm/x86_64-mont5.pl | 1070 |
16 files changed, 10473 insertions, 248 deletions
diff --git a/src/lib/libcrypto/bn/asm/armv4-gf2m.pl b/src/lib/libcrypto/bn/asm/armv4-gf2m.pl new file mode 100644 index 0000000000..c52e0b75b5 --- /dev/null +++ b/src/lib/libcrypto/bn/asm/armv4-gf2m.pl | |||
@@ -0,0 +1,278 @@ | |||
1 | #!/usr/bin/env perl | ||
2 | # | ||
3 | # ==================================================================== | ||
4 | # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL | ||
5 | # project. The module is, however, dual licensed under OpenSSL and | ||
6 | # CRYPTOGAMS licenses depending on where you obtain it. For further | ||
7 | # details see http://www.openssl.org/~appro/cryptogams/. | ||
8 | # ==================================================================== | ||
9 | # | ||
10 | # May 2011 | ||
11 | # | ||
12 | # The module implements bn_GF2m_mul_2x2 polynomial multiplication | ||
13 | # used in bn_gf2m.c. It's kind of low-hanging mechanical port from | ||
14 | # C for the time being... Except that it has two code paths: pure | ||
15 | # integer code suitable for any ARMv4 and later CPU and NEON code | ||
16 | # suitable for ARMv7. Pure integer 1x1 multiplication subroutine runs | ||
17 | # in ~45 cycles on dual-issue core such as Cortex A8, which is ~50% | ||
18 | # faster than compiler-generated code. For ECDH and ECDSA verify (but | ||
19 | # not for ECDSA sign) it means 25%-45% improvement depending on key | ||
20 | # length, more for longer keys. Even though NEON 1x1 multiplication | ||
21 | # runs in even less cycles, ~30, improvement is measurable only on | ||
22 | # longer keys. One has to optimize code elsewhere to get NEON glow... | ||
23 | |||
24 | while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {} | ||
25 | open STDOUT,">$output"; | ||
26 | |||
27 | sub Dlo() { shift=~m|q([1]?[0-9])|?"d".($1*2):""; } | ||
28 | sub Dhi() { shift=~m|q([1]?[0-9])|?"d".($1*2+1):""; } | ||
29 | sub Q() { shift=~m|d([1-3]?[02468])|?"q".($1/2):""; } | ||
30 | |||
31 | $code=<<___; | ||
32 | #include "arm_arch.h" | ||
33 | |||
34 | .text | ||
35 | .code 32 | ||
36 | |||
37 | #if __ARM_ARCH__>=7 | ||
38 | .fpu neon | ||
39 | |||
40 | .type mul_1x1_neon,%function | ||
41 | .align 5 | ||
42 | mul_1x1_neon: | ||
43 | vshl.u64 `&Dlo("q1")`,d16,#8 @ q1-q3 are slided $a | ||
44 | vmull.p8 `&Q("d0")`,d16,d17 @ a·bb | ||
45 | vshl.u64 `&Dlo("q2")`,d16,#16 | ||
46 | vmull.p8 q1,`&Dlo("q1")`,d17 @ a<<8·bb | ||
47 | vshl.u64 `&Dlo("q3")`,d16,#24 | ||
48 | vmull.p8 q2,`&Dlo("q2")`,d17 @ a<<16·bb | ||
49 | vshr.u64 `&Dlo("q1")`,#8 | ||
50 | vmull.p8 q3,`&Dlo("q3")`,d17 @ a<<24·bb | ||
51 | vshl.u64 `&Dhi("q1")`,#24 | ||
52 | veor d0,`&Dlo("q1")` | ||
53 | vshr.u64 `&Dlo("q2")`,#16 | ||
54 | veor d0,`&Dhi("q1")` | ||
55 | vshl.u64 `&Dhi("q2")`,#16 | ||
56 | veor d0,`&Dlo("q2")` | ||
57 | vshr.u64 `&Dlo("q3")`,#24 | ||
58 | veor d0,`&Dhi("q2")` | ||
59 | vshl.u64 `&Dhi("q3")`,#8 | ||
60 | veor d0,`&Dlo("q3")` | ||
61 | veor d0,`&Dhi("q3")` | ||
62 | bx lr | ||
63 | .size mul_1x1_neon,.-mul_1x1_neon | ||
64 | #endif | ||
65 | ___ | ||
66 | ################ | ||
67 | # private interface to mul_1x1_ialu | ||
68 | # | ||
69 | $a="r1"; | ||
70 | $b="r0"; | ||
71 | |||
72 | ($a0,$a1,$a2,$a12,$a4,$a14)= | ||
73 | ($hi,$lo,$t0,$t1, $i0,$i1 )=map("r$_",(4..9),12); | ||
74 | |||
75 | $mask="r12"; | ||
76 | |||
77 | $code.=<<___; | ||
78 | .type mul_1x1_ialu,%function | ||
79 | .align 5 | ||
80 | mul_1x1_ialu: | ||
81 | mov $a0,#0 | ||
82 | bic $a1,$a,#3<<30 @ a1=a&0x3fffffff | ||
83 | str $a0,[sp,#0] @ tab[0]=0 | ||
84 | add $a2,$a1,$a1 @ a2=a1<<1 | ||
85 | str $a1,[sp,#4] @ tab[1]=a1 | ||
86 | eor $a12,$a1,$a2 @ a1^a2 | ||
87 | str $a2,[sp,#8] @ tab[2]=a2 | ||
88 | mov $a4,$a1,lsl#2 @ a4=a1<<2 | ||
89 | str $a12,[sp,#12] @ tab[3]=a1^a2 | ||
90 | eor $a14,$a1,$a4 @ a1^a4 | ||
91 | str $a4,[sp,#16] @ tab[4]=a4 | ||
92 | eor $a0,$a2,$a4 @ a2^a4 | ||
93 | str $a14,[sp,#20] @ tab[5]=a1^a4 | ||
94 | eor $a12,$a12,$a4 @ a1^a2^a4 | ||
95 | str $a0,[sp,#24] @ tab[6]=a2^a4 | ||
96 | and $i0,$mask,$b,lsl#2 | ||
97 | str $a12,[sp,#28] @ tab[7]=a1^a2^a4 | ||
98 | |||
99 | and $i1,$mask,$b,lsr#1 | ||
100 | ldr $lo,[sp,$i0] @ tab[b & 0x7] | ||
101 | and $i0,$mask,$b,lsr#4 | ||
102 | ldr $t1,[sp,$i1] @ tab[b >> 3 & 0x7] | ||
103 | and $i1,$mask,$b,lsr#7 | ||
104 | ldr $t0,[sp,$i0] @ tab[b >> 6 & 0x7] | ||
105 | eor $lo,$lo,$t1,lsl#3 @ stall | ||
106 | mov $hi,$t1,lsr#29 | ||
107 | ldr $t1,[sp,$i1] @ tab[b >> 9 & 0x7] | ||
108 | |||
109 | and $i0,$mask,$b,lsr#10 | ||
110 | eor $lo,$lo,$t0,lsl#6 | ||
111 | eor $hi,$hi,$t0,lsr#26 | ||
112 | ldr $t0,[sp,$i0] @ tab[b >> 12 & 0x7] | ||
113 | |||
114 | and $i1,$mask,$b,lsr#13 | ||
115 | eor $lo,$lo,$t1,lsl#9 | ||
116 | eor $hi,$hi,$t1,lsr#23 | ||
117 | ldr $t1,[sp,$i1] @ tab[b >> 15 & 0x7] | ||
118 | |||
119 | and $i0,$mask,$b,lsr#16 | ||
120 | eor $lo,$lo,$t0,lsl#12 | ||
121 | eor $hi,$hi,$t0,lsr#20 | ||
122 | ldr $t0,[sp,$i0] @ tab[b >> 18 & 0x7] | ||
123 | |||
124 | and $i1,$mask,$b,lsr#19 | ||
125 | eor $lo,$lo,$t1,lsl#15 | ||
126 | eor $hi,$hi,$t1,lsr#17 | ||
127 | ldr $t1,[sp,$i1] @ tab[b >> 21 & 0x7] | ||
128 | |||
129 | and $i0,$mask,$b,lsr#22 | ||
130 | eor $lo,$lo,$t0,lsl#18 | ||
131 | eor $hi,$hi,$t0,lsr#14 | ||
132 | ldr $t0,[sp,$i0] @ tab[b >> 24 & 0x7] | ||
133 | |||
134 | and $i1,$mask,$b,lsr#25 | ||
135 | eor $lo,$lo,$t1,lsl#21 | ||
136 | eor $hi,$hi,$t1,lsr#11 | ||
137 | ldr $t1,[sp,$i1] @ tab[b >> 27 & 0x7] | ||
138 | |||
139 | tst $a,#1<<30 | ||
140 | and $i0,$mask,$b,lsr#28 | ||
141 | eor $lo,$lo,$t0,lsl#24 | ||
142 | eor $hi,$hi,$t0,lsr#8 | ||
143 | ldr $t0,[sp,$i0] @ tab[b >> 30 ] | ||
144 | |||
145 | eorne $lo,$lo,$b,lsl#30 | ||
146 | eorne $hi,$hi,$b,lsr#2 | ||
147 | tst $a,#1<<31 | ||
148 | eor $lo,$lo,$t1,lsl#27 | ||
149 | eor $hi,$hi,$t1,lsr#5 | ||
150 | eorne $lo,$lo,$b,lsl#31 | ||
151 | eorne $hi,$hi,$b,lsr#1 | ||
152 | eor $lo,$lo,$t0,lsl#30 | ||
153 | eor $hi,$hi,$t0,lsr#2 | ||
154 | |||
155 | mov pc,lr | ||
156 | .size mul_1x1_ialu,.-mul_1x1_ialu | ||
157 | ___ | ||
158 | ################ | ||
159 | # void bn_GF2m_mul_2x2(BN_ULONG *r, | ||
160 | # BN_ULONG a1,BN_ULONG a0, | ||
161 | # BN_ULONG b1,BN_ULONG b0); # r[3..0]=a1a0·b1b0 | ||
162 | |||
163 | ($A1,$B1,$A0,$B0,$A1B1,$A0B0)=map("d$_",(18..23)); | ||
164 | |||
165 | $code.=<<___; | ||
166 | .global bn_GF2m_mul_2x2 | ||
167 | .type bn_GF2m_mul_2x2,%function | ||
168 | .align 5 | ||
169 | bn_GF2m_mul_2x2: | ||
170 | #if __ARM_ARCH__>=7 | ||
171 | ldr r12,.LOPENSSL_armcap | ||
172 | .Lpic: ldr r12,[pc,r12] | ||
173 | tst r12,#1 | ||
174 | beq .Lialu | ||
175 | |||
176 | veor $A1,$A1 | ||
177 | vmov.32 $B1,r3,r3 @ two copies of b1 | ||
178 | vmov.32 ${A1}[0],r1 @ a1 | ||
179 | |||
180 | veor $A0,$A0 | ||
181 | vld1.32 ${B0}[],[sp,:32] @ two copies of b0 | ||
182 | vmov.32 ${A0}[0],r2 @ a0 | ||
183 | mov r12,lr | ||
184 | |||
185 | vmov d16,$A1 | ||
186 | vmov d17,$B1 | ||
187 | bl mul_1x1_neon @ a1·b1 | ||
188 | vmov $A1B1,d0 | ||
189 | |||
190 | vmov d16,$A0 | ||
191 | vmov d17,$B0 | ||
192 | bl mul_1x1_neon @ a0·b0 | ||
193 | vmov $A0B0,d0 | ||
194 | |||
195 | veor d16,$A0,$A1 | ||
196 | veor d17,$B0,$B1 | ||
197 | veor $A0,$A0B0,$A1B1 | ||
198 | bl mul_1x1_neon @ (a0+a1)·(b0+b1) | ||
199 | |||
200 | veor d0,$A0 @ (a0+a1)·(b0+b1)-a0·b0-a1·b1 | ||
201 | vshl.u64 d1,d0,#32 | ||
202 | vshr.u64 d0,d0,#32 | ||
203 | veor $A0B0,d1 | ||
204 | veor $A1B1,d0 | ||
205 | vst1.32 {${A0B0}[0]},[r0,:32]! | ||
206 | vst1.32 {${A0B0}[1]},[r0,:32]! | ||
207 | vst1.32 {${A1B1}[0]},[r0,:32]! | ||
208 | vst1.32 {${A1B1}[1]},[r0,:32] | ||
209 | bx r12 | ||
210 | .align 4 | ||
211 | .Lialu: | ||
212 | #endif | ||
213 | ___ | ||
214 | $ret="r10"; # reassigned 1st argument | ||
215 | $code.=<<___; | ||
216 | stmdb sp!,{r4-r10,lr} | ||
217 | mov $ret,r0 @ reassign 1st argument | ||
218 | mov $b,r3 @ $b=b1 | ||
219 | ldr r3,[sp,#32] @ load b0 | ||
220 | mov $mask,#7<<2 | ||
221 | sub sp,sp,#32 @ allocate tab[8] | ||
222 | |||
223 | bl mul_1x1_ialu @ a1·b1 | ||
224 | str $lo,[$ret,#8] | ||
225 | str $hi,[$ret,#12] | ||
226 | |||
227 | eor $b,$b,r3 @ flip b0 and b1 | ||
228 | eor $a,$a,r2 @ flip a0 and a1 | ||
229 | eor r3,r3,$b | ||
230 | eor r2,r2,$a | ||
231 | eor $b,$b,r3 | ||
232 | eor $a,$a,r2 | ||
233 | bl mul_1x1_ialu @ a0·b0 | ||
234 | str $lo,[$ret] | ||
235 | str $hi,[$ret,#4] | ||
236 | |||
237 | eor $a,$a,r2 | ||
238 | eor $b,$b,r3 | ||
239 | bl mul_1x1_ialu @ (a1+a0)·(b1+b0) | ||
240 | ___ | ||
241 | @r=map("r$_",(6..9)); | ||
242 | $code.=<<___; | ||
243 | ldmia $ret,{@r[0]-@r[3]} | ||
244 | eor $lo,$lo,$hi | ||
245 | eor $hi,$hi,@r[1] | ||
246 | eor $lo,$lo,@r[0] | ||
247 | eor $hi,$hi,@r[2] | ||
248 | eor $lo,$lo,@r[3] | ||
249 | eor $hi,$hi,@r[3] | ||
250 | str $hi,[$ret,#8] | ||
251 | eor $lo,$lo,$hi | ||
252 | add sp,sp,#32 @ destroy tab[8] | ||
253 | str $lo,[$ret,#4] | ||
254 | |||
255 | #if __ARM_ARCH__>=5 | ||
256 | ldmia sp!,{r4-r10,pc} | ||
257 | #else | ||
258 | ldmia sp!,{r4-r10,lr} | ||
259 | tst lr,#1 | ||
260 | moveq pc,lr @ be binary compatible with V4, yet | ||
261 | bx lr @ interoperable with Thumb ISA:-) | ||
262 | #endif | ||
263 | .size bn_GF2m_mul_2x2,.-bn_GF2m_mul_2x2 | ||
264 | #if __ARM_ARCH__>=7 | ||
265 | .align 5 | ||
266 | .LOPENSSL_armcap: | ||
267 | .word OPENSSL_armcap_P-(.Lpic+8) | ||
268 | #endif | ||
269 | .asciz "GF(2^m) Multiplication for ARMv4/NEON, CRYPTOGAMS by <appro\@openssl.org>" | ||
270 | .align 5 | ||
271 | |||
272 | .comm OPENSSL_armcap_P,4,4 | ||
273 | ___ | ||
274 | |||
275 | $code =~ s/\`([^\`]*)\`/eval $1/gem; | ||
276 | $code =~ s/\bbx\s+lr\b/.word\t0xe12fff1e/gm; # make it possible to compile with -march=armv4 | ||
277 | print $code; | ||
278 | close STDOUT; # enforce flush | ||
diff --git a/src/lib/libcrypto/bn/asm/armv4-mont.pl b/src/lib/libcrypto/bn/asm/armv4-mont.pl index 14e0d2d1dd..f78a8b5f0f 100644 --- a/src/lib/libcrypto/bn/asm/armv4-mont.pl +++ b/src/lib/libcrypto/bn/asm/armv4-mont.pl | |||
@@ -23,6 +23,9 @@ | |||
23 | # than 1/2KB. Windows CE port would be trivial, as it's exclusively | 23 | # than 1/2KB. Windows CE port would be trivial, as it's exclusively |
24 | # about decorations, ABI and instruction syntax are identical. | 24 | # about decorations, ABI and instruction syntax are identical. |
25 | 25 | ||
26 | while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {} | ||
27 | open STDOUT,">$output"; | ||
28 | |||
26 | $num="r0"; # starts as num argument, but holds &tp[num-1] | 29 | $num="r0"; # starts as num argument, but holds &tp[num-1] |
27 | $ap="r1"; | 30 | $ap="r1"; |
28 | $bp="r2"; $bi="r2"; $rp="r2"; | 31 | $bp="r2"; $bi="r2"; $rp="r2"; |
@@ -89,9 +92,9 @@ bn_mul_mont: | |||
89 | .L1st: | 92 | .L1st: |
90 | ldr $aj,[$ap],#4 @ ap[j],ap++ | 93 | ldr $aj,[$ap],#4 @ ap[j],ap++ |
91 | mov $alo,$ahi | 94 | mov $alo,$ahi |
95 | ldr $nj,[$np],#4 @ np[j],np++ | ||
92 | mov $ahi,#0 | 96 | mov $ahi,#0 |
93 | umlal $alo,$ahi,$aj,$bi @ ap[j]*bp[0] | 97 | umlal $alo,$ahi,$aj,$bi @ ap[j]*bp[0] |
94 | ldr $nj,[$np],#4 @ np[j],np++ | ||
95 | mov $nhi,#0 | 98 | mov $nhi,#0 |
96 | umlal $nlo,$nhi,$nj,$n0 @ np[j]*n0 | 99 | umlal $nlo,$nhi,$nj,$n0 @ np[j]*n0 |
97 | adds $nlo,$nlo,$alo | 100 | adds $nlo,$nlo,$alo |
@@ -101,21 +104,21 @@ bn_mul_mont: | |||
101 | bne .L1st | 104 | bne .L1st |
102 | 105 | ||
103 | adds $nlo,$nlo,$ahi | 106 | adds $nlo,$nlo,$ahi |
107 | ldr $tp,[$_bp] @ restore bp | ||
104 | mov $nhi,#0 | 108 | mov $nhi,#0 |
109 | ldr $n0,[$_n0] @ restore n0 | ||
105 | adc $nhi,$nhi,#0 | 110 | adc $nhi,$nhi,#0 |
106 | ldr $tp,[$_bp] @ restore bp | ||
107 | str $nlo,[$num] @ tp[num-1]= | 111 | str $nlo,[$num] @ tp[num-1]= |
108 | ldr $n0,[$_n0] @ restore n0 | ||
109 | str $nhi,[$num,#4] @ tp[num]= | 112 | str $nhi,[$num,#4] @ tp[num]= |
110 | 113 | ||
111 | .Louter: | 114 | .Louter: |
112 | sub $tj,$num,sp @ "original" $num-1 value | 115 | sub $tj,$num,sp @ "original" $num-1 value |
113 | sub $ap,$ap,$tj @ "rewind" ap to &ap[1] | 116 | sub $ap,$ap,$tj @ "rewind" ap to &ap[1] |
114 | sub $np,$np,$tj @ "rewind" np to &np[1] | ||
115 | ldr $bi,[$tp,#4]! @ *(++bp) | 117 | ldr $bi,[$tp,#4]! @ *(++bp) |
118 | sub $np,$np,$tj @ "rewind" np to &np[1] | ||
116 | ldr $aj,[$ap,#-4] @ ap[0] | 119 | ldr $aj,[$ap,#-4] @ ap[0] |
117 | ldr $nj,[$np,#-4] @ np[0] | ||
118 | ldr $alo,[sp] @ tp[0] | 120 | ldr $alo,[sp] @ tp[0] |
121 | ldr $nj,[$np,#-4] @ np[0] | ||
119 | ldr $tj,[sp,#4] @ tp[1] | 122 | ldr $tj,[sp,#4] @ tp[1] |
120 | 123 | ||
121 | mov $ahi,#0 | 124 | mov $ahi,#0 |
@@ -129,13 +132,13 @@ bn_mul_mont: | |||
129 | .Linner: | 132 | .Linner: |
130 | ldr $aj,[$ap],#4 @ ap[j],ap++ | 133 | ldr $aj,[$ap],#4 @ ap[j],ap++ |
131 | adds $alo,$ahi,$tj @ +=tp[j] | 134 | adds $alo,$ahi,$tj @ +=tp[j] |
135 | ldr $nj,[$np],#4 @ np[j],np++ | ||
132 | mov $ahi,#0 | 136 | mov $ahi,#0 |
133 | umlal $alo,$ahi,$aj,$bi @ ap[j]*bp[i] | 137 | umlal $alo,$ahi,$aj,$bi @ ap[j]*bp[i] |
134 | ldr $nj,[$np],#4 @ np[j],np++ | ||
135 | mov $nhi,#0 | 138 | mov $nhi,#0 |
136 | umlal $nlo,$nhi,$nj,$n0 @ np[j]*n0 | 139 | umlal $nlo,$nhi,$nj,$n0 @ np[j]*n0 |
137 | ldr $tj,[$tp,#8] @ tp[j+1] | ||
138 | adc $ahi,$ahi,#0 | 140 | adc $ahi,$ahi,#0 |
141 | ldr $tj,[$tp,#8] @ tp[j+1] | ||
139 | adds $nlo,$nlo,$alo | 142 | adds $nlo,$nlo,$alo |
140 | str $nlo,[$tp],#4 @ tp[j-1]=,tp++ | 143 | str $nlo,[$tp],#4 @ tp[j-1]=,tp++ |
141 | adc $nlo,$nhi,#0 | 144 | adc $nlo,$nhi,#0 |
@@ -144,13 +147,13 @@ bn_mul_mont: | |||
144 | 147 | ||
145 | adds $nlo,$nlo,$ahi | 148 | adds $nlo,$nlo,$ahi |
146 | mov $nhi,#0 | 149 | mov $nhi,#0 |
150 | ldr $tp,[$_bp] @ restore bp | ||
147 | adc $nhi,$nhi,#0 | 151 | adc $nhi,$nhi,#0 |
152 | ldr $n0,[$_n0] @ restore n0 | ||
148 | adds $nlo,$nlo,$tj | 153 | adds $nlo,$nlo,$tj |
149 | adc $nhi,$nhi,#0 | ||
150 | ldr $tp,[$_bp] @ restore bp | ||
151 | ldr $tj,[$_bpend] @ restore &bp[num] | 154 | ldr $tj,[$_bpend] @ restore &bp[num] |
155 | adc $nhi,$nhi,#0 | ||
152 | str $nlo,[$num] @ tp[num-1]= | 156 | str $nlo,[$num] @ tp[num-1]= |
153 | ldr $n0,[$_n0] @ restore n0 | ||
154 | str $nhi,[$num,#4] @ tp[num]= | 157 | str $nhi,[$num,#4] @ tp[num]= |
155 | 158 | ||
156 | cmp $tp,$tj | 159 | cmp $tp,$tj |
diff --git a/src/lib/libcrypto/bn/asm/ia64-mont.pl b/src/lib/libcrypto/bn/asm/ia64-mont.pl new file mode 100644 index 0000000000..e258658428 --- /dev/null +++ b/src/lib/libcrypto/bn/asm/ia64-mont.pl | |||
@@ -0,0 +1,851 @@ | |||
1 | #!/usr/bin/env perl | ||
2 | # | ||
3 | # ==================================================================== | ||
4 | # Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL | ||
5 | # project. The module is, however, dual licensed under OpenSSL and | ||
6 | # CRYPTOGAMS licenses depending on where you obtain it. For further | ||
7 | # details see http://www.openssl.org/~appro/cryptogams/. | ||
8 | # ==================================================================== | ||
9 | |||
10 | # January 2010 | ||
11 | # | ||
12 | # "Teaser" Montgomery multiplication module for IA-64. There are | ||
13 | # several possibilities for improvement: | ||
14 | # | ||
15 | # - modulo-scheduling outer loop would eliminate quite a number of | ||
16 | # stalls after ldf8, xma and getf.sig outside inner loop and | ||
17 | # improve shorter key performance; | ||
18 | # - shorter vector support [with input vectors being fetched only | ||
19 | # once] should be added; | ||
20 | # - 2x unroll with help of n0[1] would make the code scalable on | ||
21 | # "wider" IA-64, "wider" than Itanium 2 that is, which is not of | ||
22 | # acute interest, because upcoming Tukwila's individual cores are | ||
23 | # reportedly based on Itanium 2 design; | ||
24 | # - dedicated squaring procedure(?); | ||
25 | # | ||
26 | # January 2010 | ||
27 | # | ||
28 | # Shorter vector support is implemented by zero-padding ap and np | ||
29 | # vectors up to 8 elements, or 512 bits. This means that 256-bit | ||
30 | # inputs will be processed only 2 times faster than 512-bit inputs, | ||
31 | # not 4 [as one would expect, because algorithm complexity is n^2]. | ||
32 | # The reason for padding is that inputs shorter than 512 bits won't | ||
33 | # be processed faster anyway, because minimal critical path of the | ||
34 | # core loop happens to match 512-bit timing. Either way, it resulted | ||
35 | # in >100% improvement of 512-bit RSA sign benchmark and 50% - of | ||
36 | # 1024-bit one [in comparison to original version of *this* module]. | ||
37 | # | ||
38 | # So far 'openssl speed rsa dsa' output on 900MHz Itanium 2 *with* | ||
39 | # this module is: | ||
40 | # sign verify sign/s verify/s | ||
41 | # rsa 512 bits 0.000290s 0.000024s 3452.8 42031.4 | ||
42 | # rsa 1024 bits 0.000793s 0.000058s 1261.7 17172.0 | ||
43 | # rsa 2048 bits 0.005908s 0.000148s 169.3 6754.0 | ||
44 | # rsa 4096 bits 0.033456s 0.000469s 29.9 2133.6 | ||
45 | # dsa 512 bits 0.000253s 0.000198s 3949.9 5057.0 | ||
46 | # dsa 1024 bits 0.000585s 0.000607s 1708.4 1647.4 | ||
47 | # dsa 2048 bits 0.001453s 0.001703s 688.1 587.4 | ||
48 | # | ||
49 | # ... and *without* (but still with ia64.S): | ||
50 | # | ||
51 | # rsa 512 bits 0.000670s 0.000041s 1491.8 24145.5 | ||
52 | # rsa 1024 bits 0.001988s 0.000080s 502.9 12499.3 | ||
53 | # rsa 2048 bits 0.008702s 0.000189s 114.9 5293.9 | ||
54 | # rsa 4096 bits 0.043860s 0.000533s 22.8 1875.9 | ||
55 | # dsa 512 bits 0.000441s 0.000427s 2265.3 2340.6 | ||
56 | # dsa 1024 bits 0.000823s 0.000867s 1215.6 1153.2 | ||
57 | # dsa 2048 bits 0.001894s 0.002179s 528.1 458.9 | ||
58 | # | ||
59 | # As it can be seen, RSA sign performance improves by 130-30%, | ||
60 | # hereafter less for longer keys, while verify - by 74-13%. | ||
61 | # DSA performance improves by 115-30%. | ||
62 | |||
63 | if ($^O eq "hpux") { | ||
64 | $ADDP="addp4"; | ||
65 | for (@ARGV) { $ADDP="add" if (/[\+DD|\-mlp]64/); } | ||
66 | } else { $ADDP="add"; } | ||
67 | |||
68 | $code=<<___; | ||
69 | .explicit | ||
70 | .text | ||
71 | |||
72 | // int bn_mul_mont (BN_ULONG *rp,const BN_ULONG *ap, | ||
73 | // const BN_ULONG *bp,const BN_ULONG *np, | ||
74 | // const BN_ULONG *n0p,int num); | ||
75 | .align 64 | ||
76 | .global bn_mul_mont# | ||
77 | .proc bn_mul_mont# | ||
78 | bn_mul_mont: | ||
79 | .prologue | ||
80 | .body | ||
81 | { .mmi; cmp4.le p6,p7=2,r37;; | ||
82 | (p6) cmp4.lt.unc p8,p9=8,r37 | ||
83 | mov ret0=r0 };; | ||
84 | { .bbb; | ||
85 | (p9) br.cond.dptk.many bn_mul_mont_8 | ||
86 | (p8) br.cond.dpnt.many bn_mul_mont_general | ||
87 | (p7) br.ret.spnt.many b0 };; | ||
88 | .endp bn_mul_mont# | ||
89 | |||
90 | prevfs=r2; prevpr=r3; prevlc=r10; prevsp=r11; | ||
91 | |||
92 | rptr=r8; aptr=r9; bptr=r14; nptr=r15; | ||
93 | tptr=r16; // &tp[0] | ||
94 | tp_1=r17; // &tp[-1] | ||
95 | num=r18; len=r19; lc=r20; | ||
96 | topbit=r21; // carry bit from tmp[num] | ||
97 | |||
98 | n0=f6; | ||
99 | m0=f7; | ||
100 | bi=f8; | ||
101 | |||
102 | .align 64 | ||
103 | .local bn_mul_mont_general# | ||
104 | .proc bn_mul_mont_general# | ||
105 | bn_mul_mont_general: | ||
106 | .prologue | ||
107 | { .mmi; .save ar.pfs,prevfs | ||
108 | alloc prevfs=ar.pfs,6,2,0,8 | ||
109 | $ADDP aptr=0,in1 | ||
110 | .save ar.lc,prevlc | ||
111 | mov prevlc=ar.lc } | ||
112 | { .mmi; .vframe prevsp | ||
113 | mov prevsp=sp | ||
114 | $ADDP bptr=0,in2 | ||
115 | .save pr,prevpr | ||
116 | mov prevpr=pr };; | ||
117 | |||
118 | .body | ||
119 | .rotf alo[6],nlo[4],ahi[8],nhi[6] | ||
120 | .rotr a[3],n[3],t[2] | ||
121 | |||
122 | { .mmi; ldf8 bi=[bptr],8 // (*bp++) | ||
123 | ldf8 alo[4]=[aptr],16 // ap[0] | ||
124 | $ADDP r30=8,in1 };; | ||
125 | { .mmi; ldf8 alo[3]=[r30],16 // ap[1] | ||
126 | ldf8 alo[2]=[aptr],16 // ap[2] | ||
127 | $ADDP in4=0,in4 };; | ||
128 | { .mmi; ldf8 alo[1]=[r30] // ap[3] | ||
129 | ldf8 n0=[in4] // n0 | ||
130 | $ADDP rptr=0,in0 } | ||
131 | { .mmi; $ADDP nptr=0,in3 | ||
132 | mov r31=16 | ||
133 | zxt4 num=in5 };; | ||
134 | { .mmi; ldf8 nlo[2]=[nptr],8 // np[0] | ||
135 | shladd len=num,3,r0 | ||
136 | shladd r31=num,3,r31 };; | ||
137 | { .mmi; ldf8 nlo[1]=[nptr],8 // np[1] | ||
138 | add lc=-5,num | ||
139 | sub r31=sp,r31 };; | ||
140 | { .mfb; and sp=-16,r31 // alloca | ||
141 | xmpy.hu ahi[2]=alo[4],bi // ap[0]*bp[0] | ||
142 | nop.b 0 } | ||
143 | { .mfb; nop.m 0 | ||
144 | xmpy.lu alo[4]=alo[4],bi | ||
145 | brp.loop.imp .L1st_ctop,.L1st_cend-16 | ||
146 | };; | ||
147 | { .mfi; nop.m 0 | ||
148 | xma.hu ahi[1]=alo[3],bi,ahi[2] // ap[1]*bp[0] | ||
149 | add tp_1=8,sp } | ||
150 | { .mfi; nop.m 0 | ||
151 | xma.lu alo[3]=alo[3],bi,ahi[2] | ||
152 | mov pr.rot=0x20001f<<16 | ||
153 | // ------^----- (p40) at first (p23) | ||
154 | // ----------^^ p[16:20]=1 | ||
155 | };; | ||
156 | { .mfi; nop.m 0 | ||
157 | xmpy.lu m0=alo[4],n0 // (ap[0]*bp[0])*n0 | ||
158 | mov ar.lc=lc } | ||
159 | { .mfi; nop.m 0 | ||
160 | fcvt.fxu.s1 nhi[1]=f0 | ||
161 | mov ar.ec=8 };; | ||
162 | |||
163 | .align 32 | ||
164 | .L1st_ctop: | ||
165 | .pred.rel "mutex",p40,p42 | ||
166 | { .mfi; (p16) ldf8 alo[0]=[aptr],8 // *(aptr++) | ||
167 | (p18) xma.hu ahi[0]=alo[2],bi,ahi[1] | ||
168 | (p40) add n[2]=n[2],a[2] } // (p23) } | ||
169 | { .mfi; (p18) ldf8 nlo[0]=[nptr],8 // *(nptr++)(p16) | ||
170 | (p18) xma.lu alo[2]=alo[2],bi,ahi[1] | ||
171 | (p42) add n[2]=n[2],a[2],1 };; // (p23) | ||
172 | { .mfi; (p21) getf.sig a[0]=alo[5] | ||
173 | (p20) xma.hu nhi[0]=nlo[2],m0,nhi[1] | ||
174 | (p42) cmp.leu p41,p39=n[2],a[2] } // (p23) | ||
175 | { .mfi; (p23) st8 [tp_1]=n[2],8 | ||
176 | (p20) xma.lu nlo[2]=nlo[2],m0,nhi[1] | ||
177 | (p40) cmp.ltu p41,p39=n[2],a[2] } // (p23) | ||
178 | { .mmb; (p21) getf.sig n[0]=nlo[3] | ||
179 | (p16) nop.m 0 | ||
180 | br.ctop.sptk .L1st_ctop };; | ||
181 | .L1st_cend: | ||
182 | |||
183 | { .mmi; getf.sig a[0]=ahi[6] // (p24) | ||
184 | getf.sig n[0]=nhi[4] | ||
185 | add num=-1,num };; // num-- | ||
186 | { .mmi; .pred.rel "mutex",p40,p42 | ||
187 | (p40) add n[0]=n[0],a[0] | ||
188 | (p42) add n[0]=n[0],a[0],1 | ||
189 | sub aptr=aptr,len };; // rewind | ||
190 | { .mmi; .pred.rel "mutex",p40,p42 | ||
191 | (p40) cmp.ltu p41,p39=n[0],a[0] | ||
192 | (p42) cmp.leu p41,p39=n[0],a[0] | ||
193 | sub nptr=nptr,len };; | ||
194 | { .mmi; .pred.rel "mutex",p39,p41 | ||
195 | (p39) add topbit=r0,r0 | ||
196 | (p41) add topbit=r0,r0,1 | ||
197 | nop.i 0 } | ||
198 | { .mmi; st8 [tp_1]=n[0] | ||
199 | add tptr=16,sp | ||
200 | add tp_1=8,sp };; | ||
201 | |||
202 | .Louter: | ||
203 | { .mmi; ldf8 bi=[bptr],8 // (*bp++) | ||
204 | ldf8 ahi[3]=[tptr] // tp[0] | ||
205 | add r30=8,aptr };; | ||
206 | { .mmi; ldf8 alo[4]=[aptr],16 // ap[0] | ||
207 | ldf8 alo[3]=[r30],16 // ap[1] | ||
208 | add r31=8,nptr };; | ||
209 | { .mfb; ldf8 alo[2]=[aptr],16 // ap[2] | ||
210 | xma.hu ahi[2]=alo[4],bi,ahi[3] // ap[0]*bp[i]+tp[0] | ||
211 | brp.loop.imp .Linner_ctop,.Linner_cend-16 | ||
212 | } | ||
213 | { .mfb; ldf8 alo[1]=[r30] // ap[3] | ||
214 | xma.lu alo[4]=alo[4],bi,ahi[3] | ||
215 | clrrrb.pr };; | ||
216 | { .mfi; ldf8 nlo[2]=[nptr],16 // np[0] | ||
217 | xma.hu ahi[1]=alo[3],bi,ahi[2] // ap[1]*bp[i] | ||
218 | nop.i 0 } | ||
219 | { .mfi; ldf8 nlo[1]=[r31] // np[1] | ||
220 | xma.lu alo[3]=alo[3],bi,ahi[2] | ||
221 | mov pr.rot=0x20101f<<16 | ||
222 | // ------^----- (p40) at first (p23) | ||
223 | // --------^--- (p30) at first (p22) | ||
224 | // ----------^^ p[16:20]=1 | ||
225 | };; | ||
226 | { .mfi; st8 [tptr]=r0 // tp[0] is already accounted | ||
227 | xmpy.lu m0=alo[4],n0 // (ap[0]*bp[i]+tp[0])*n0 | ||
228 | mov ar.lc=lc } | ||
229 | { .mfi; | ||
230 | fcvt.fxu.s1 nhi[1]=f0 | ||
231 | mov ar.ec=8 };; | ||
232 | |||
233 | // This loop spins in 4*(n+7) ticks on Itanium 2 and should spin in | ||
234 | // 7*(n+7) ticks on Itanium (the one codenamed Merced). Factor of 7 | ||
235 | // in latter case accounts for two-tick pipeline stall, which means | ||
236 | // that its performance would be ~20% lower than optimal one. No | ||
237 | // attempt was made to address this, because original Itanium is | ||
238 | // hardly represented out in the wild... | ||
239 | .align 32 | ||
240 | .Linner_ctop: | ||
241 | .pred.rel "mutex",p40,p42 | ||
242 | .pred.rel "mutex",p30,p32 | ||
243 | { .mfi; (p16) ldf8 alo[0]=[aptr],8 // *(aptr++) | ||
244 | (p18) xma.hu ahi[0]=alo[2],bi,ahi[1] | ||
245 | (p40) add n[2]=n[2],a[2] } // (p23) | ||
246 | { .mfi; (p16) nop.m 0 | ||
247 | (p18) xma.lu alo[2]=alo[2],bi,ahi[1] | ||
248 | (p42) add n[2]=n[2],a[2],1 };; // (p23) | ||
249 | { .mfi; (p21) getf.sig a[0]=alo[5] | ||
250 | (p16) nop.f 0 | ||
251 | (p40) cmp.ltu p41,p39=n[2],a[2] } // (p23) | ||
252 | { .mfi; (p21) ld8 t[0]=[tptr],8 | ||
253 | (p16) nop.f 0 | ||
254 | (p42) cmp.leu p41,p39=n[2],a[2] };; // (p23) | ||
255 | { .mfi; (p18) ldf8 nlo[0]=[nptr],8 // *(nptr++) | ||
256 | (p20) xma.hu nhi[0]=nlo[2],m0,nhi[1] | ||
257 | (p30) add a[1]=a[1],t[1] } // (p22) | ||
258 | { .mfi; (p16) nop.m 0 | ||
259 | (p20) xma.lu nlo[2]=nlo[2],m0,nhi[1] | ||
260 | (p32) add a[1]=a[1],t[1],1 };; // (p22) | ||
261 | { .mmi; (p21) getf.sig n[0]=nlo[3] | ||
262 | (p16) nop.m 0 | ||
263 | (p30) cmp.ltu p31,p29=a[1],t[1] } // (p22) | ||
264 | { .mmb; (p23) st8 [tp_1]=n[2],8 | ||
265 | (p32) cmp.leu p31,p29=a[1],t[1] // (p22) | ||
266 | br.ctop.sptk .Linner_ctop };; | ||
267 | .Linner_cend: | ||
268 | |||
269 | { .mmi; getf.sig a[0]=ahi[6] // (p24) | ||
270 | getf.sig n[0]=nhi[4] | ||
271 | nop.i 0 };; | ||
272 | |||
273 | { .mmi; .pred.rel "mutex",p31,p33 | ||
274 | (p31) add a[0]=a[0],topbit | ||
275 | (p33) add a[0]=a[0],topbit,1 | ||
276 | mov topbit=r0 };; | ||
277 | { .mfi; .pred.rel "mutex",p31,p33 | ||
278 | (p31) cmp.ltu p32,p30=a[0],topbit | ||
279 | (p33) cmp.leu p32,p30=a[0],topbit | ||
280 | } | ||
281 | { .mfi; .pred.rel "mutex",p40,p42 | ||
282 | (p40) add n[0]=n[0],a[0] | ||
283 | (p42) add n[0]=n[0],a[0],1 | ||
284 | };; | ||
285 | { .mmi; .pred.rel "mutex",p44,p46 | ||
286 | (p40) cmp.ltu p41,p39=n[0],a[0] | ||
287 | (p42) cmp.leu p41,p39=n[0],a[0] | ||
288 | (p32) add topbit=r0,r0,1 } | ||
289 | |||
290 | { .mmi; st8 [tp_1]=n[0],8 | ||
291 | cmp4.ne p6,p0=1,num | ||
292 | sub aptr=aptr,len };; // rewind | ||
293 | { .mmi; sub nptr=nptr,len | ||
294 | (p41) add topbit=r0,r0,1 | ||
295 | add tptr=16,sp } | ||
296 | { .mmb; add tp_1=8,sp | ||
297 | add num=-1,num // num-- | ||
298 | (p6) br.cond.sptk.many .Louter };; | ||
299 | |||
300 | { .mbb; add lc=4,lc | ||
301 | brp.loop.imp .Lsub_ctop,.Lsub_cend-16 | ||
302 | clrrrb.pr };; | ||
303 | { .mii; nop.m 0 | ||
304 | mov pr.rot=0x10001<<16 | ||
305 | // ------^---- (p33) at first (p17) | ||
306 | mov ar.lc=lc } | ||
307 | { .mii; nop.m 0 | ||
308 | mov ar.ec=3 | ||
309 | nop.i 0 };; | ||
310 | |||
311 | .Lsub_ctop: | ||
312 | .pred.rel "mutex",p33,p35 | ||
313 | { .mfi; (p16) ld8 t[0]=[tptr],8 // t=*(tp++) | ||
314 | (p16) nop.f 0 | ||
315 | (p33) sub n[1]=t[1],n[1] } // (p17) | ||
316 | { .mfi; (p16) ld8 n[0]=[nptr],8 // n=*(np++) | ||
317 | (p16) nop.f 0 | ||
318 | (p35) sub n[1]=t[1],n[1],1 };; // (p17) | ||
319 | { .mib; (p18) st8 [rptr]=n[2],8 // *(rp++)=r | ||
320 | (p33) cmp.gtu p34,p32=n[1],t[1] // (p17) | ||
321 | (p18) nop.b 0 } | ||
322 | { .mib; (p18) nop.m 0 | ||
323 | (p35) cmp.geu p34,p32=n[1],t[1] // (p17) | ||
324 | br.ctop.sptk .Lsub_ctop };; | ||
325 | .Lsub_cend: | ||
326 | |||
327 | { .mmb; .pred.rel "mutex",p34,p36 | ||
328 | (p34) sub topbit=topbit,r0 // (p19) | ||
329 | (p36) sub topbit=topbit,r0,1 | ||
330 | brp.loop.imp .Lcopy_ctop,.Lcopy_cend-16 | ||
331 | } | ||
332 | { .mmb; sub rptr=rptr,len // rewind | ||
333 | sub tptr=tptr,len | ||
334 | clrrrb.pr };; | ||
335 | { .mmi; and aptr=tptr,topbit | ||
336 | andcm bptr=rptr,topbit | ||
337 | mov pr.rot=1<<16 };; | ||
338 | { .mii; or nptr=aptr,bptr | ||
339 | mov ar.lc=lc | ||
340 | mov ar.ec=3 };; | ||
341 | |||
342 | .Lcopy_ctop: | ||
343 | { .mmb; (p16) ld8 n[0]=[nptr],8 | ||
344 | (p18) st8 [tptr]=r0,8 | ||
345 | (p16) nop.b 0 } | ||
346 | { .mmb; (p16) nop.m 0 | ||
347 | (p18) st8 [rptr]=n[2],8 | ||
348 | br.ctop.sptk .Lcopy_ctop };; | ||
349 | .Lcopy_cend: | ||
350 | |||
351 | { .mmi; mov ret0=1 // signal "handled" | ||
352 | rum 1<<5 // clear um.mfh | ||
353 | mov ar.lc=prevlc } | ||
354 | { .mib; .restore sp | ||
355 | mov sp=prevsp | ||
356 | mov pr=prevpr,0x1ffff | ||
357 | br.ret.sptk.many b0 };; | ||
358 | .endp bn_mul_mont_general# | ||
359 | |||
360 | a1=r16; a2=r17; a3=r18; a4=r19; a5=r20; a6=r21; a7=r22; a8=r23; | ||
361 | n1=r24; n2=r25; n3=r26; n4=r27; n5=r28; n6=r29; n7=r30; n8=r31; | ||
362 | t0=r15; | ||
363 | |||
364 | ai0=f8; ai1=f9; ai2=f10; ai3=f11; ai4=f12; ai5=f13; ai6=f14; ai7=f15; | ||
365 | ni0=f16; ni1=f17; ni2=f18; ni3=f19; ni4=f20; ni5=f21; ni6=f22; ni7=f23; | ||
366 | |||
367 | .align 64 | ||
368 | .skip 48 // aligns loop body | ||
369 | .local bn_mul_mont_8# | ||
370 | .proc bn_mul_mont_8# | ||
371 | bn_mul_mont_8: | ||
372 | .prologue | ||
373 | { .mmi; .save ar.pfs,prevfs | ||
374 | alloc prevfs=ar.pfs,6,2,0,8 | ||
375 | .vframe prevsp | ||
376 | mov prevsp=sp | ||
377 | .save ar.lc,prevlc | ||
378 | mov prevlc=ar.lc } | ||
379 | { .mmi; add r17=-6*16,sp | ||
380 | add sp=-7*16,sp | ||
381 | .save pr,prevpr | ||
382 | mov prevpr=pr };; | ||
383 | |||
384 | { .mmi; .save.gf 0,0x10 | ||
385 | stf.spill [sp]=f16,-16 | ||
386 | .save.gf 0,0x20 | ||
387 | stf.spill [r17]=f17,32 | ||
388 | add r16=-5*16,prevsp};; | ||
389 | { .mmi; .save.gf 0,0x40 | ||
390 | stf.spill [r16]=f18,32 | ||
391 | .save.gf 0,0x80 | ||
392 | stf.spill [r17]=f19,32 | ||
393 | $ADDP aptr=0,in1 };; | ||
394 | { .mmi; .save.gf 0,0x100 | ||
395 | stf.spill [r16]=f20,32 | ||
396 | .save.gf 0,0x200 | ||
397 | stf.spill [r17]=f21,32 | ||
398 | $ADDP r29=8,in1 };; | ||
399 | { .mmi; .save.gf 0,0x400 | ||
400 | stf.spill [r16]=f22 | ||
401 | .save.gf 0,0x800 | ||
402 | stf.spill [r17]=f23 | ||
403 | $ADDP rptr=0,in0 };; | ||
404 | |||
405 | .body | ||
406 | .rotf bj[8],mj[2],tf[2],alo[10],ahi[10],nlo[10],nhi[10] | ||
407 | .rotr t[8] | ||
408 | |||
409 | // load input vectors padding them to 8 elements | ||
410 | { .mmi; ldf8 ai0=[aptr],16 // ap[0] | ||
411 | ldf8 ai1=[r29],16 // ap[1] | ||
412 | $ADDP bptr=0,in2 } | ||
413 | { .mmi; $ADDP r30=8,in2 | ||
414 | $ADDP nptr=0,in3 | ||
415 | $ADDP r31=8,in3 };; | ||
416 | { .mmi; ldf8 bj[7]=[bptr],16 // bp[0] | ||
417 | ldf8 bj[6]=[r30],16 // bp[1] | ||
418 | cmp4.le p4,p5=3,in5 } | ||
419 | { .mmi; ldf8 ni0=[nptr],16 // np[0] | ||
420 | ldf8 ni1=[r31],16 // np[1] | ||
421 | cmp4.le p6,p7=4,in5 };; | ||
422 | |||
423 | { .mfi; (p4)ldf8 ai2=[aptr],16 // ap[2] | ||
424 | (p5)fcvt.fxu ai2=f0 | ||
425 | cmp4.le p8,p9=5,in5 } | ||
426 | { .mfi; (p6)ldf8 ai3=[r29],16 // ap[3] | ||
427 | (p7)fcvt.fxu ai3=f0 | ||
428 | cmp4.le p10,p11=6,in5 } | ||
429 | { .mfi; (p4)ldf8 bj[5]=[bptr],16 // bp[2] | ||
430 | (p5)fcvt.fxu bj[5]=f0 | ||
431 | cmp4.le p12,p13=7,in5 } | ||
432 | { .mfi; (p6)ldf8 bj[4]=[r30],16 // bp[3] | ||
433 | (p7)fcvt.fxu bj[4]=f0 | ||
434 | cmp4.le p14,p15=8,in5 } | ||
435 | { .mfi; (p4)ldf8 ni2=[nptr],16 // np[2] | ||
436 | (p5)fcvt.fxu ni2=f0 | ||
437 | addp4 r28=-1,in5 } | ||
438 | { .mfi; (p6)ldf8 ni3=[r31],16 // np[3] | ||
439 | (p7)fcvt.fxu ni3=f0 | ||
440 | $ADDP in4=0,in4 };; | ||
441 | |||
442 | { .mfi; ldf8 n0=[in4] | ||
443 | fcvt.fxu tf[1]=f0 | ||
444 | nop.i 0 } | ||
445 | |||
446 | { .mfi; (p8)ldf8 ai4=[aptr],16 // ap[4] | ||
447 | (p9)fcvt.fxu ai4=f0 | ||
448 | mov t[0]=r0 } | ||
449 | { .mfi; (p10)ldf8 ai5=[r29],16 // ap[5] | ||
450 | (p11)fcvt.fxu ai5=f0 | ||
451 | mov t[1]=r0 } | ||
452 | { .mfi; (p8)ldf8 bj[3]=[bptr],16 // bp[4] | ||
453 | (p9)fcvt.fxu bj[3]=f0 | ||
454 | mov t[2]=r0 } | ||
455 | { .mfi; (p10)ldf8 bj[2]=[r30],16 // bp[5] | ||
456 | (p11)fcvt.fxu bj[2]=f0 | ||
457 | mov t[3]=r0 } | ||
458 | { .mfi; (p8)ldf8 ni4=[nptr],16 // np[4] | ||
459 | (p9)fcvt.fxu ni4=f0 | ||
460 | mov t[4]=r0 } | ||
461 | { .mfi; (p10)ldf8 ni5=[r31],16 // np[5] | ||
462 | (p11)fcvt.fxu ni5=f0 | ||
463 | mov t[5]=r0 };; | ||
464 | |||
465 | { .mfi; (p12)ldf8 ai6=[aptr],16 // ap[6] | ||
466 | (p13)fcvt.fxu ai6=f0 | ||
467 | mov t[6]=r0 } | ||
468 | { .mfi; (p14)ldf8 ai7=[r29],16 // ap[7] | ||
469 | (p15)fcvt.fxu ai7=f0 | ||
470 | mov t[7]=r0 } | ||
471 | { .mfi; (p12)ldf8 bj[1]=[bptr],16 // bp[6] | ||
472 | (p13)fcvt.fxu bj[1]=f0 | ||
473 | mov ar.lc=r28 } | ||
474 | { .mfi; (p14)ldf8 bj[0]=[r30],16 // bp[7] | ||
475 | (p15)fcvt.fxu bj[0]=f0 | ||
476 | mov ar.ec=1 } | ||
477 | { .mfi; (p12)ldf8 ni6=[nptr],16 // np[6] | ||
478 | (p13)fcvt.fxu ni6=f0 | ||
479 | mov pr.rot=1<<16 } | ||
480 | { .mfb; (p14)ldf8 ni7=[r31],16 // np[7] | ||
481 | (p15)fcvt.fxu ni7=f0 | ||
482 | brp.loop.imp .Louter_8_ctop,.Louter_8_cend-16 | ||
483 | };; | ||
484 | |||
485 | // The loop is scheduled for 32*n ticks on Itanium 2. Actual attempt | ||
486 | // to measure with help of Interval Time Counter indicated that the | ||
487 | // factor is a tad higher: 33 or 34, if not 35. Exact measurement and | ||
488 | // addressing the issue is problematic, because I don't have access | ||
489 | // to platform-specific instruction-level profiler. On Itanium it | ||
490 | // should run in 56*n ticks, because of higher xma latency... | ||
491 | .Louter_8_ctop: | ||
492 | .pred.rel "mutex",p40,p42 | ||
493 | .pred.rel "mutex",p48,p50 | ||
494 | { .mfi; (p16) nop.m 0 // 0: | ||
495 | (p16) xma.hu ahi[0]=ai0,bj[7],tf[1] // ap[0]*b[i]+t[0] | ||
496 | (p40) add a3=a3,n3 } // (p17) a3+=n3 | ||
497 | { .mfi; (p42) add a3=a3,n3,1 | ||
498 | (p16) xma.lu alo[0]=ai0,bj[7],tf[1] | ||
499 | (p16) nop.i 0 };; | ||
500 | { .mii; (p17) getf.sig a7=alo[8] // 1: | ||
501 | (p48) add t[6]=t[6],a3 // (p17) t[6]+=a3 | ||
502 | (p50) add t[6]=t[6],a3,1 };; | ||
503 | { .mfi; (p17) getf.sig a8=ahi[8] // 2: | ||
504 | (p17) xma.hu nhi[7]=ni6,mj[1],nhi[6] // np[6]*m0 | ||
505 | (p40) cmp.ltu p43,p41=a3,n3 } | ||
506 | { .mfi; (p42) cmp.leu p43,p41=a3,n3 | ||
507 | (p17) xma.lu nlo[7]=ni6,mj[1],nhi[6] | ||
508 | (p16) nop.i 0 };; | ||
509 | { .mii; (p17) getf.sig n5=nlo[6] // 3: | ||
510 | (p48) cmp.ltu p51,p49=t[6],a3 | ||
511 | (p50) cmp.leu p51,p49=t[6],a3 };; | ||
512 | .pred.rel "mutex",p41,p43 | ||
513 | .pred.rel "mutex",p49,p51 | ||
514 | { .mfi; (p16) nop.m 0 // 4: | ||
515 | (p16) xma.hu ahi[1]=ai1,bj[7],ahi[0] // ap[1]*b[i] | ||
516 | (p41) add a4=a4,n4 } // (p17) a4+=n4 | ||
517 | { .mfi; (p43) add a4=a4,n4,1 | ||
518 | (p16) xma.lu alo[1]=ai1,bj[7],ahi[0] | ||
519 | (p16) nop.i 0 };; | ||
520 | { .mfi; (p49) add t[5]=t[5],a4 // 5: (p17) t[5]+=a4 | ||
521 | (p16) xmpy.lu mj[0]=alo[0],n0 // (ap[0]*b[i]+t[0])*n0 | ||
522 | (p51) add t[5]=t[5],a4,1 };; | ||
523 | { .mfi; (p16) nop.m 0 // 6: | ||
524 | (p17) xma.hu nhi[8]=ni7,mj[1],nhi[7] // np[7]*m0 | ||
525 | (p41) cmp.ltu p42,p40=a4,n4 } | ||
526 | { .mfi; (p43) cmp.leu p42,p40=a4,n4 | ||
527 | (p17) xma.lu nlo[8]=ni7,mj[1],nhi[7] | ||
528 | (p16) nop.i 0 };; | ||
529 | { .mii; (p17) getf.sig n6=nlo[7] // 7: | ||
530 | (p49) cmp.ltu p50,p48=t[5],a4 | ||
531 | (p51) cmp.leu p50,p48=t[5],a4 };; | ||
532 | .pred.rel "mutex",p40,p42 | ||
533 | .pred.rel "mutex",p48,p50 | ||
534 | { .mfi; (p16) nop.m 0 // 8: | ||
535 | (p16) xma.hu ahi[2]=ai2,bj[7],ahi[1] // ap[2]*b[i] | ||
536 | (p40) add a5=a5,n5 } // (p17) a5+=n5 | ||
537 | { .mfi; (p42) add a5=a5,n5,1 | ||
538 | (p16) xma.lu alo[2]=ai2,bj[7],ahi[1] | ||
539 | (p16) nop.i 0 };; | ||
540 | { .mii; (p16) getf.sig a1=alo[1] // 9: | ||
541 | (p48) add t[4]=t[4],a5 // p(17) t[4]+=a5 | ||
542 | (p50) add t[4]=t[4],a5,1 };; | ||
543 | { .mfi; (p16) nop.m 0 // 10: | ||
544 | (p16) xma.hu nhi[0]=ni0,mj[0],alo[0] // np[0]*m0 | ||
545 | (p40) cmp.ltu p43,p41=a5,n5 } | ||
546 | { .mfi; (p42) cmp.leu p43,p41=a5,n5 | ||
547 | (p16) xma.lu nlo[0]=ni0,mj[0],alo[0] | ||
548 | (p16) nop.i 0 };; | ||
549 | { .mii; (p17) getf.sig n7=nlo[8] // 11: | ||
550 | (p48) cmp.ltu p51,p49=t[4],a5 | ||
551 | (p50) cmp.leu p51,p49=t[4],a5 };; | ||
552 | .pred.rel "mutex",p41,p43 | ||
553 | .pred.rel "mutex",p49,p51 | ||
554 | { .mfi; (p17) getf.sig n8=nhi[8] // 12: | ||
555 | (p16) xma.hu ahi[3]=ai3,bj[7],ahi[2] // ap[3]*b[i] | ||
556 | (p41) add a6=a6,n6 } // (p17) a6+=n6 | ||
557 | { .mfi; (p43) add a6=a6,n6,1 | ||
558 | (p16) xma.lu alo[3]=ai3,bj[7],ahi[2] | ||
559 | (p16) nop.i 0 };; | ||
560 | { .mii; (p16) getf.sig a2=alo[2] // 13: | ||
561 | (p49) add t[3]=t[3],a6 // (p17) t[3]+=a6 | ||
562 | (p51) add t[3]=t[3],a6,1 };; | ||
563 | { .mfi; (p16) nop.m 0 // 14: | ||
564 | (p16) xma.hu nhi[1]=ni1,mj[0],nhi[0] // np[1]*m0 | ||
565 | (p41) cmp.ltu p42,p40=a6,n6 } | ||
566 | { .mfi; (p43) cmp.leu p42,p40=a6,n6 | ||
567 | (p16) xma.lu nlo[1]=ni1,mj[0],nhi[0] | ||
568 | (p16) nop.i 0 };; | ||
569 | { .mii; (p16) nop.m 0 // 15: | ||
570 | (p49) cmp.ltu p50,p48=t[3],a6 | ||
571 | (p51) cmp.leu p50,p48=t[3],a6 };; | ||
572 | .pred.rel "mutex",p40,p42 | ||
573 | .pred.rel "mutex",p48,p50 | ||
574 | { .mfi; (p16) nop.m 0 // 16: | ||
575 | (p16) xma.hu ahi[4]=ai4,bj[7],ahi[3] // ap[4]*b[i] | ||
576 | (p40) add a7=a7,n7 } // (p17) a7+=n7 | ||
577 | { .mfi; (p42) add a7=a7,n7,1 | ||
578 | (p16) xma.lu alo[4]=ai4,bj[7],ahi[3] | ||
579 | (p16) nop.i 0 };; | ||
580 | { .mii; (p16) getf.sig a3=alo[3] // 17: | ||
581 | (p48) add t[2]=t[2],a7 // (p17) t[2]+=a7 | ||
582 | (p50) add t[2]=t[2],a7,1 };; | ||
583 | { .mfi; (p16) nop.m 0 // 18: | ||
584 | (p16) xma.hu nhi[2]=ni2,mj[0],nhi[1] // np[2]*m0 | ||
585 | (p40) cmp.ltu p43,p41=a7,n7 } | ||
586 | { .mfi; (p42) cmp.leu p43,p41=a7,n7 | ||
587 | (p16) xma.lu nlo[2]=ni2,mj[0],nhi[1] | ||
588 | (p16) nop.i 0 };; | ||
589 | { .mii; (p16) getf.sig n1=nlo[1] // 19: | ||
590 | (p48) cmp.ltu p51,p49=t[2],a7 | ||
591 | (p50) cmp.leu p51,p49=t[2],a7 };; | ||
592 | .pred.rel "mutex",p41,p43 | ||
593 | .pred.rel "mutex",p49,p51 | ||
594 | { .mfi; (p16) nop.m 0 // 20: | ||
595 | (p16) xma.hu ahi[5]=ai5,bj[7],ahi[4] // ap[5]*b[i] | ||
596 | (p41) add a8=a8,n8 } // (p17) a8+=n8 | ||
597 | { .mfi; (p43) add a8=a8,n8,1 | ||
598 | (p16) xma.lu alo[5]=ai5,bj[7],ahi[4] | ||
599 | (p16) nop.i 0 };; | ||
600 | { .mii; (p16) getf.sig a4=alo[4] // 21: | ||
601 | (p49) add t[1]=t[1],a8 // (p17) t[1]+=a8 | ||
602 | (p51) add t[1]=t[1],a8,1 };; | ||
603 | { .mfi; (p16) nop.m 0 // 22: | ||
604 | (p16) xma.hu nhi[3]=ni3,mj[0],nhi[2] // np[3]*m0 | ||
605 | (p41) cmp.ltu p42,p40=a8,n8 } | ||
606 | { .mfi; (p43) cmp.leu p42,p40=a8,n8 | ||
607 | (p16) xma.lu nlo[3]=ni3,mj[0],nhi[2] | ||
608 | (p16) nop.i 0 };; | ||
609 | { .mii; (p16) getf.sig n2=nlo[2] // 23: | ||
610 | (p49) cmp.ltu p50,p48=t[1],a8 | ||
611 | (p51) cmp.leu p50,p48=t[1],a8 };; | ||
612 | { .mfi; (p16) nop.m 0 // 24: | ||
613 | (p16) xma.hu ahi[6]=ai6,bj[7],ahi[5] // ap[6]*b[i] | ||
614 | (p16) add a1=a1,n1 } // (p16) a1+=n1 | ||
615 | { .mfi; (p16) nop.m 0 | ||
616 | (p16) xma.lu alo[6]=ai6,bj[7],ahi[5] | ||
617 | (p17) mov t[0]=r0 };; | ||
618 | { .mii; (p16) getf.sig a5=alo[5] // 25: | ||
619 | (p16) add t0=t[7],a1 // (p16) t[7]+=a1 | ||
620 | (p42) add t[0]=t[0],r0,1 };; | ||
621 | { .mfi; (p16) setf.sig tf[0]=t0 // 26: | ||
622 | (p16) xma.hu nhi[4]=ni4,mj[0],nhi[3] // np[4]*m0 | ||
623 | (p50) add t[0]=t[0],r0,1 } | ||
624 | { .mfi; (p16) cmp.ltu.unc p42,p40=a1,n1 | ||
625 | (p16) xma.lu nlo[4]=ni4,mj[0],nhi[3] | ||
626 | (p16) nop.i 0 };; | ||
627 | { .mii; (p16) getf.sig n3=nlo[3] // 27: | ||
628 | (p16) cmp.ltu.unc p50,p48=t0,a1 | ||
629 | (p16) nop.i 0 };; | ||
630 | .pred.rel "mutex",p40,p42 | ||
631 | .pred.rel "mutex",p48,p50 | ||
632 | { .mfi; (p16) nop.m 0 // 28: | ||
633 | (p16) xma.hu ahi[7]=ai7,bj[7],ahi[6] // ap[7]*b[i] | ||
634 | (p40) add a2=a2,n2 } // (p16) a2+=n2 | ||
635 | { .mfi; (p42) add a2=a2,n2,1 | ||
636 | (p16) xma.lu alo[7]=ai7,bj[7],ahi[6] | ||
637 | (p16) nop.i 0 };; | ||
638 | { .mii; (p16) getf.sig a6=alo[6] // 29: | ||
639 | (p48) add t[6]=t[6],a2 // (p16) t[6]+=a2 | ||
640 | (p50) add t[6]=t[6],a2,1 };; | ||
641 | { .mfi; (p16) nop.m 0 // 30: | ||
642 | (p16) xma.hu nhi[5]=ni5,mj[0],nhi[4] // np[5]*m0 | ||
643 | (p40) cmp.ltu p41,p39=a2,n2 } | ||
644 | { .mfi; (p42) cmp.leu p41,p39=a2,n2 | ||
645 | (p16) xma.lu nlo[5]=ni5,mj[0],nhi[4] | ||
646 | (p16) nop.i 0 };; | ||
647 | { .mfi; (p16) getf.sig n4=nlo[4] // 31: | ||
648 | (p16) nop.f 0 | ||
649 | (p48) cmp.ltu p49,p47=t[6],a2 } | ||
650 | { .mfb; (p50) cmp.leu p49,p47=t[6],a2 | ||
651 | (p16) nop.f 0 | ||
652 | br.ctop.sptk.many .Louter_8_ctop };; | ||
653 | .Louter_8_cend: | ||
654 | |||
655 | // above loop has to execute one more time, without (p16), which is | ||
656 | // replaced with merged move of np[8] to GPR bank | ||
657 | .pred.rel "mutex",p40,p42 | ||
658 | .pred.rel "mutex",p48,p50 | ||
659 | { .mmi; (p0) getf.sig n1=ni0 // 0: | ||
660 | (p40) add a3=a3,n3 // (p17) a3+=n3 | ||
661 | (p42) add a3=a3,n3,1 };; | ||
662 | { .mii; (p17) getf.sig a7=alo[8] // 1: | ||
663 | (p48) add t[6]=t[6],a3 // (p17) t[6]+=a3 | ||
664 | (p50) add t[6]=t[6],a3,1 };; | ||
665 | { .mfi; (p17) getf.sig a8=ahi[8] // 2: | ||
666 | (p17) xma.hu nhi[7]=ni6,mj[1],nhi[6] // np[6]*m0 | ||
667 | (p40) cmp.ltu p43,p41=a3,n3 } | ||
668 | { .mfi; (p42) cmp.leu p43,p41=a3,n3 | ||
669 | (p17) xma.lu nlo[7]=ni6,mj[1],nhi[6] | ||
670 | (p0) nop.i 0 };; | ||
671 | { .mii; (p17) getf.sig n5=nlo[6] // 3: | ||
672 | (p48) cmp.ltu p51,p49=t[6],a3 | ||
673 | (p50) cmp.leu p51,p49=t[6],a3 };; | ||
674 | .pred.rel "mutex",p41,p43 | ||
675 | .pred.rel "mutex",p49,p51 | ||
676 | { .mmi; (p0) getf.sig n2=ni1 // 4: | ||
677 | (p41) add a4=a4,n4 // (p17) a4+=n4 | ||
678 | (p43) add a4=a4,n4,1 };; | ||
679 | { .mfi; (p49) add t[5]=t[5],a4 // 5: (p17) t[5]+=a4 | ||
680 | (p0) nop.f 0 | ||
681 | (p51) add t[5]=t[5],a4,1 };; | ||
682 | { .mfi; (p0) getf.sig n3=ni2 // 6: | ||
683 | (p17) xma.hu nhi[8]=ni7,mj[1],nhi[7] // np[7]*m0 | ||
684 | (p41) cmp.ltu p42,p40=a4,n4 } | ||
685 | { .mfi; (p43) cmp.leu p42,p40=a4,n4 | ||
686 | (p17) xma.lu nlo[8]=ni7,mj[1],nhi[7] | ||
687 | (p0) nop.i 0 };; | ||
688 | { .mii; (p17) getf.sig n6=nlo[7] // 7: | ||
689 | (p49) cmp.ltu p50,p48=t[5],a4 | ||
690 | (p51) cmp.leu p50,p48=t[5],a4 };; | ||
691 | .pred.rel "mutex",p40,p42 | ||
692 | .pred.rel "mutex",p48,p50 | ||
693 | { .mii; (p0) getf.sig n4=ni3 // 8: | ||
694 | (p40) add a5=a5,n5 // (p17) a5+=n5 | ||
695 | (p42) add a5=a5,n5,1 };; | ||
696 | { .mii; (p0) nop.m 0 // 9: | ||
697 | (p48) add t[4]=t[4],a5 // p(17) t[4]+=a5 | ||
698 | (p50) add t[4]=t[4],a5,1 };; | ||
699 | { .mii; (p0) nop.m 0 // 10: | ||
700 | (p40) cmp.ltu p43,p41=a5,n5 | ||
701 | (p42) cmp.leu p43,p41=a5,n5 };; | ||
702 | { .mii; (p17) getf.sig n7=nlo[8] // 11: | ||
703 | (p48) cmp.ltu p51,p49=t[4],a5 | ||
704 | (p50) cmp.leu p51,p49=t[4],a5 };; | ||
705 | .pred.rel "mutex",p41,p43 | ||
706 | .pred.rel "mutex",p49,p51 | ||
707 | { .mii; (p17) getf.sig n8=nhi[8] // 12: | ||
708 | (p41) add a6=a6,n6 // (p17) a6+=n6 | ||
709 | (p43) add a6=a6,n6,1 };; | ||
710 | { .mii; (p0) getf.sig n5=ni4 // 13: | ||
711 | (p49) add t[3]=t[3],a6 // (p17) t[3]+=a6 | ||
712 | (p51) add t[3]=t[3],a6,1 };; | ||
713 | { .mii; (p0) nop.m 0 // 14: | ||
714 | (p41) cmp.ltu p42,p40=a6,n6 | ||
715 | (p43) cmp.leu p42,p40=a6,n6 };; | ||
716 | { .mii; (p0) getf.sig n6=ni5 // 15: | ||
717 | (p49) cmp.ltu p50,p48=t[3],a6 | ||
718 | (p51) cmp.leu p50,p48=t[3],a6 };; | ||
719 | .pred.rel "mutex",p40,p42 | ||
720 | .pred.rel "mutex",p48,p50 | ||
721 | { .mii; (p0) nop.m 0 // 16: | ||
722 | (p40) add a7=a7,n7 // (p17) a7+=n7 | ||
723 | (p42) add a7=a7,n7,1 };; | ||
724 | { .mii; (p0) nop.m 0 // 17: | ||
725 | (p48) add t[2]=t[2],a7 // (p17) t[2]+=a7 | ||
726 | (p50) add t[2]=t[2],a7,1 };; | ||
727 | { .mii; (p0) nop.m 0 // 18: | ||
728 | (p40) cmp.ltu p43,p41=a7,n7 | ||
729 | (p42) cmp.leu p43,p41=a7,n7 };; | ||
730 | { .mii; (p0) getf.sig n7=ni6 // 19: | ||
731 | (p48) cmp.ltu p51,p49=t[2],a7 | ||
732 | (p50) cmp.leu p51,p49=t[2],a7 };; | ||
733 | .pred.rel "mutex",p41,p43 | ||
734 | .pred.rel "mutex",p49,p51 | ||
735 | { .mii; (p0) nop.m 0 // 20: | ||
736 | (p41) add a8=a8,n8 // (p17) a8+=n8 | ||
737 | (p43) add a8=a8,n8,1 };; | ||
738 | { .mmi; (p0) nop.m 0 // 21: | ||
739 | (p49) add t[1]=t[1],a8 // (p17) t[1]+=a8 | ||
740 | (p51) add t[1]=t[1],a8,1 } | ||
741 | { .mmi; (p17) mov t[0]=r0 | ||
742 | (p41) cmp.ltu p42,p40=a8,n8 | ||
743 | (p43) cmp.leu p42,p40=a8,n8 };; | ||
744 | { .mmi; (p0) getf.sig n8=ni7 // 22: | ||
745 | (p49) cmp.ltu p50,p48=t[1],a8 | ||
746 | (p51) cmp.leu p50,p48=t[1],a8 } | ||
747 | { .mmi; (p42) add t[0]=t[0],r0,1 | ||
748 | (p0) add r16=-7*16,prevsp | ||
749 | (p0) add r17=-6*16,prevsp };; | ||
750 | |||
751 | // subtract np[8] from carrybit|tmp[8] | ||
752 | // carrybit|tmp[8] layout upon exit from above loop is: | ||
753 | // t[0]|t[1]|t[2]|t[3]|t[4]|t[5]|t[6]|t[7]|t0 (least significant) | ||
754 | { .mmi; (p50)add t[0]=t[0],r0,1 | ||
755 | add r18=-5*16,prevsp | ||
756 | sub n1=t0,n1 };; | ||
757 | { .mmi; cmp.gtu p34,p32=n1,t0;; | ||
758 | .pred.rel "mutex",p32,p34 | ||
759 | (p32)sub n2=t[7],n2 | ||
760 | (p34)sub n2=t[7],n2,1 };; | ||
761 | { .mii; (p32)cmp.gtu p35,p33=n2,t[7] | ||
762 | (p34)cmp.geu p35,p33=n2,t[7];; | ||
763 | .pred.rel "mutex",p33,p35 | ||
764 | (p33)sub n3=t[6],n3 } | ||
765 | { .mmi; (p35)sub n3=t[6],n3,1;; | ||
766 | (p33)cmp.gtu p34,p32=n3,t[6] | ||
767 | (p35)cmp.geu p34,p32=n3,t[6] };; | ||
768 | .pred.rel "mutex",p32,p34 | ||
769 | { .mii; (p32)sub n4=t[5],n4 | ||
770 | (p34)sub n4=t[5],n4,1;; | ||
771 | (p32)cmp.gtu p35,p33=n4,t[5] } | ||
772 | { .mmi; (p34)cmp.geu p35,p33=n4,t[5];; | ||
773 | .pred.rel "mutex",p33,p35 | ||
774 | (p33)sub n5=t[4],n5 | ||
775 | (p35)sub n5=t[4],n5,1 };; | ||
776 | { .mii; (p33)cmp.gtu p34,p32=n5,t[4] | ||
777 | (p35)cmp.geu p34,p32=n5,t[4];; | ||
778 | .pred.rel "mutex",p32,p34 | ||
779 | (p32)sub n6=t[3],n6 } | ||
780 | { .mmi; (p34)sub n6=t[3],n6,1;; | ||
781 | (p32)cmp.gtu p35,p33=n6,t[3] | ||
782 | (p34)cmp.geu p35,p33=n6,t[3] };; | ||
783 | .pred.rel "mutex",p33,p35 | ||
784 | { .mii; (p33)sub n7=t[2],n7 | ||
785 | (p35)sub n7=t[2],n7,1;; | ||
786 | (p33)cmp.gtu p34,p32=n7,t[2] } | ||
787 | { .mmi; (p35)cmp.geu p34,p32=n7,t[2];; | ||
788 | .pred.rel "mutex",p32,p34 | ||
789 | (p32)sub n8=t[1],n8 | ||
790 | (p34)sub n8=t[1],n8,1 };; | ||
791 | { .mii; (p32)cmp.gtu p35,p33=n8,t[1] | ||
792 | (p34)cmp.geu p35,p33=n8,t[1];; | ||
793 | .pred.rel "mutex",p33,p35 | ||
794 | (p33)sub a8=t[0],r0 } | ||
795 | { .mmi; (p35)sub a8=t[0],r0,1;; | ||
796 | (p33)cmp.gtu p34,p32=a8,t[0] | ||
797 | (p35)cmp.geu p34,p32=a8,t[0] };; | ||
798 | |||
799 | // save the result, either tmp[num] or tmp[num]-np[num] | ||
800 | .pred.rel "mutex",p32,p34 | ||
801 | { .mmi; (p32)st8 [rptr]=n1,8 | ||
802 | (p34)st8 [rptr]=t0,8 | ||
803 | add r19=-4*16,prevsp};; | ||
804 | { .mmb; (p32)st8 [rptr]=n2,8 | ||
805 | (p34)st8 [rptr]=t[7],8 | ||
806 | (p5)br.cond.dpnt.few .Ldone };; | ||
807 | { .mmb; (p32)st8 [rptr]=n3,8 | ||
808 | (p34)st8 [rptr]=t[6],8 | ||
809 | (p7)br.cond.dpnt.few .Ldone };; | ||
810 | { .mmb; (p32)st8 [rptr]=n4,8 | ||
811 | (p34)st8 [rptr]=t[5],8 | ||
812 | (p9)br.cond.dpnt.few .Ldone };; | ||
813 | { .mmb; (p32)st8 [rptr]=n5,8 | ||
814 | (p34)st8 [rptr]=t[4],8 | ||
815 | (p11)br.cond.dpnt.few .Ldone };; | ||
816 | { .mmb; (p32)st8 [rptr]=n6,8 | ||
817 | (p34)st8 [rptr]=t[3],8 | ||
818 | (p13)br.cond.dpnt.few .Ldone };; | ||
819 | { .mmb; (p32)st8 [rptr]=n7,8 | ||
820 | (p34)st8 [rptr]=t[2],8 | ||
821 | (p15)br.cond.dpnt.few .Ldone };; | ||
822 | { .mmb; (p32)st8 [rptr]=n8,8 | ||
823 | (p34)st8 [rptr]=t[1],8 | ||
824 | nop.b 0 };; | ||
825 | .Ldone: // epilogue | ||
826 | { .mmi; ldf.fill f16=[r16],64 | ||
827 | ldf.fill f17=[r17],64 | ||
828 | nop.i 0 } | ||
829 | { .mmi; ldf.fill f18=[r18],64 | ||
830 | ldf.fill f19=[r19],64 | ||
831 | mov pr=prevpr,0x1ffff };; | ||
832 | { .mmi; ldf.fill f20=[r16] | ||
833 | ldf.fill f21=[r17] | ||
834 | mov ar.lc=prevlc } | ||
835 | { .mmi; ldf.fill f22=[r18] | ||
836 | ldf.fill f23=[r19] | ||
837 | mov ret0=1 } // signal "handled" | ||
838 | { .mib; rum 1<<5 | ||
839 | .restore sp | ||
840 | mov sp=prevsp | ||
841 | br.ret.sptk.many b0 };; | ||
842 | .endp bn_mul_mont_8# | ||
843 | |||
844 | .type copyright#,\@object | ||
845 | copyright: | ||
846 | stringz "Montgomery multiplication for IA-64, CRYPTOGAMS by <appro\@openssl.org>" | ||
847 | ___ | ||
848 | |||
849 | $output=shift and open STDOUT,">$output"; | ||
850 | print $code; | ||
851 | close STDOUT; | ||
diff --git a/src/lib/libcrypto/bn/asm/mips-mont.pl b/src/lib/libcrypto/bn/asm/mips-mont.pl new file mode 100644 index 0000000000..b944a12b8e --- /dev/null +++ b/src/lib/libcrypto/bn/asm/mips-mont.pl | |||
@@ -0,0 +1,426 @@ | |||
1 | #!/usr/bin/env perl | ||
2 | # | ||
3 | # ==================================================================== | ||
4 | # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL | ||
5 | # project. The module is, however, dual licensed under OpenSSL and | ||
6 | # CRYPTOGAMS licenses depending on where you obtain it. For further | ||
7 | # details see http://www.openssl.org/~appro/cryptogams/. | ||
8 | # ==================================================================== | ||
9 | |||
10 | # This module doesn't present direct interest for OpenSSL, because it | ||
11 | # doesn't provide better performance for longer keys, at least not on | ||
12 | # in-order-execution cores. While 512-bit RSA sign operations can be | ||
13 | # 65% faster in 64-bit mode, 1024-bit ones are only 15% faster, and | ||
14 | # 4096-bit ones are up to 15% slower. In 32-bit mode it varies from | ||
15 | # 16% improvement for 512-bit RSA sign to -33% for 4096-bit RSA | ||
16 | # verify:-( All comparisons are against bn_mul_mont-free assembler. | ||
17 | # The module might be of interest to embedded system developers, as | ||
18 | # the code is smaller than 1KB, yet offers >3x improvement on MIPS64 | ||
19 | # and 75-30% [less for longer keys] on MIPS32 over compiler-generated | ||
20 | # code. | ||
21 | |||
22 | ###################################################################### | ||
23 | # There is a number of MIPS ABI in use, O32 and N32/64 are most | ||
24 | # widely used. Then there is a new contender: NUBI. It appears that if | ||
25 | # one picks the latter, it's possible to arrange code in ABI neutral | ||
26 | # manner. Therefore let's stick to NUBI register layout: | ||
27 | # | ||
28 | ($zero,$at,$t0,$t1,$t2)=map("\$$_",(0..2,24,25)); | ||
29 | ($a0,$a1,$a2,$a3,$a4,$a5,$a6,$a7)=map("\$$_",(4..11)); | ||
30 | ($s0,$s1,$s2,$s3,$s4,$s5,$s6,$s7,$s8,$s9,$s10,$s11)=map("\$$_",(12..23)); | ||
31 | ($gp,$tp,$sp,$fp,$ra)=map("\$$_",(3,28..31)); | ||
32 | # | ||
33 | # The return value is placed in $a0. Following coding rules facilitate | ||
34 | # interoperability: | ||
35 | # | ||
36 | # - never ever touch $tp, "thread pointer", former $gp; | ||
37 | # - copy return value to $t0, former $v0 [or to $a0 if you're adapting | ||
38 | # old code]; | ||
39 | # - on O32 populate $a4-$a7 with 'lw $aN,4*N($sp)' if necessary; | ||
40 | # | ||
41 | # For reference here is register layout for N32/64 MIPS ABIs: | ||
42 | # | ||
43 | # ($zero,$at,$v0,$v1)=map("\$$_",(0..3)); | ||
44 | # ($a0,$a1,$a2,$a3,$a4,$a5,$a6,$a7)=map("\$$_",(4..11)); | ||
45 | # ($t0,$t1,$t2,$t3,$t8,$t9)=map("\$$_",(12..15,24,25)); | ||
46 | # ($s0,$s1,$s2,$s3,$s4,$s5,$s6,$s7)=map("\$$_",(16..23)); | ||
47 | # ($gp,$sp,$fp,$ra)=map("\$$_",(28..31)); | ||
48 | # | ||
49 | $flavour = shift; # supported flavours are o32,n32,64,nubi32,nubi64 | ||
50 | |||
51 | if ($flavour =~ /64|n32/i) { | ||
52 | $PTR_ADD="dadd"; # incidentally works even on n32 | ||
53 | $PTR_SUB="dsub"; # incidentally works even on n32 | ||
54 | $REG_S="sd"; | ||
55 | $REG_L="ld"; | ||
56 | $SZREG=8; | ||
57 | } else { | ||
58 | $PTR_ADD="add"; | ||
59 | $PTR_SUB="sub"; | ||
60 | $REG_S="sw"; | ||
61 | $REG_L="lw"; | ||
62 | $SZREG=4; | ||
63 | } | ||
64 | $SAVED_REGS_MASK = ($flavour =~ /nubi/i) ? 0x00fff000 : 0x00ff0000; | ||
65 | # | ||
66 | # <appro@openssl.org> | ||
67 | # | ||
68 | ###################################################################### | ||
69 | |||
70 | while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {} | ||
71 | open STDOUT,">$output"; | ||
72 | |||
73 | if ($flavour =~ /64|n32/i) { | ||
74 | $LD="ld"; | ||
75 | $ST="sd"; | ||
76 | $MULTU="dmultu"; | ||
77 | $ADDU="daddu"; | ||
78 | $SUBU="dsubu"; | ||
79 | $BNSZ=8; | ||
80 | } else { | ||
81 | $LD="lw"; | ||
82 | $ST="sw"; | ||
83 | $MULTU="multu"; | ||
84 | $ADDU="addu"; | ||
85 | $SUBU="subu"; | ||
86 | $BNSZ=4; | ||
87 | } | ||
88 | |||
89 | # int bn_mul_mont( | ||
90 | $rp=$a0; # BN_ULONG *rp, | ||
91 | $ap=$a1; # const BN_ULONG *ap, | ||
92 | $bp=$a2; # const BN_ULONG *bp, | ||
93 | $np=$a3; # const BN_ULONG *np, | ||
94 | $n0=$a4; # const BN_ULONG *n0, | ||
95 | $num=$a5; # int num); | ||
96 | |||
97 | $lo0=$a6; | ||
98 | $hi0=$a7; | ||
99 | $lo1=$t1; | ||
100 | $hi1=$t2; | ||
101 | $aj=$s0; | ||
102 | $bi=$s1; | ||
103 | $nj=$s2; | ||
104 | $tp=$s3; | ||
105 | $alo=$s4; | ||
106 | $ahi=$s5; | ||
107 | $nlo=$s6; | ||
108 | $nhi=$s7; | ||
109 | $tj=$s8; | ||
110 | $i=$s9; | ||
111 | $j=$s10; | ||
112 | $m1=$s11; | ||
113 | |||
114 | $FRAMESIZE=14; | ||
115 | |||
116 | $code=<<___; | ||
117 | .text | ||
118 | |||
119 | .set noat | ||
120 | .set noreorder | ||
121 | |||
122 | .align 5 | ||
123 | .globl bn_mul_mont | ||
124 | .ent bn_mul_mont | ||
125 | bn_mul_mont: | ||
126 | ___ | ||
127 | $code.=<<___ if ($flavour =~ /o32/i); | ||
128 | lw $n0,16($sp) | ||
129 | lw $num,20($sp) | ||
130 | ___ | ||
131 | $code.=<<___; | ||
132 | slt $at,$num,4 | ||
133 | bnez $at,1f | ||
134 | li $t0,0 | ||
135 | slt $at,$num,17 # on in-order CPU | ||
136 | bnezl $at,bn_mul_mont_internal | ||
137 | nop | ||
138 | 1: jr $ra | ||
139 | li $a0,0 | ||
140 | .end bn_mul_mont | ||
141 | |||
142 | .align 5 | ||
143 | .ent bn_mul_mont_internal | ||
144 | bn_mul_mont_internal: | ||
145 | .frame $fp,$FRAMESIZE*$SZREG,$ra | ||
146 | .mask 0x40000000|$SAVED_REGS_MASK,-$SZREG | ||
147 | $PTR_SUB $sp,$FRAMESIZE*$SZREG | ||
148 | $REG_S $fp,($FRAMESIZE-1)*$SZREG($sp) | ||
149 | $REG_S $s11,($FRAMESIZE-2)*$SZREG($sp) | ||
150 | $REG_S $s10,($FRAMESIZE-3)*$SZREG($sp) | ||
151 | $REG_S $s9,($FRAMESIZE-4)*$SZREG($sp) | ||
152 | $REG_S $s8,($FRAMESIZE-5)*$SZREG($sp) | ||
153 | $REG_S $s7,($FRAMESIZE-6)*$SZREG($sp) | ||
154 | $REG_S $s6,($FRAMESIZE-7)*$SZREG($sp) | ||
155 | $REG_S $s5,($FRAMESIZE-8)*$SZREG($sp) | ||
156 | $REG_S $s4,($FRAMESIZE-9)*$SZREG($sp) | ||
157 | ___ | ||
158 | $code.=<<___ if ($flavour =~ /nubi/i); | ||
159 | $REG_S $s3,($FRAMESIZE-10)*$SZREG($sp) | ||
160 | $REG_S $s2,($FRAMESIZE-11)*$SZREG($sp) | ||
161 | $REG_S $s1,($FRAMESIZE-12)*$SZREG($sp) | ||
162 | $REG_S $s0,($FRAMESIZE-13)*$SZREG($sp) | ||
163 | ___ | ||
164 | $code.=<<___; | ||
165 | move $fp,$sp | ||
166 | |||
167 | .set reorder | ||
168 | $LD $n0,0($n0) | ||
169 | $LD $bi,0($bp) # bp[0] | ||
170 | $LD $aj,0($ap) # ap[0] | ||
171 | $LD $nj,0($np) # np[0] | ||
172 | |||
173 | $PTR_SUB $sp,2*$BNSZ # place for two extra words | ||
174 | sll $num,`log($BNSZ)/log(2)` | ||
175 | li $at,-4096 | ||
176 | $PTR_SUB $sp,$num | ||
177 | and $sp,$at | ||
178 | |||
179 | $MULTU $aj,$bi | ||
180 | $LD $alo,$BNSZ($ap) | ||
181 | $LD $nlo,$BNSZ($np) | ||
182 | mflo $lo0 | ||
183 | mfhi $hi0 | ||
184 | $MULTU $lo0,$n0 | ||
185 | mflo $m1 | ||
186 | |||
187 | $MULTU $alo,$bi | ||
188 | mflo $alo | ||
189 | mfhi $ahi | ||
190 | |||
191 | $MULTU $nj,$m1 | ||
192 | mflo $lo1 | ||
193 | mfhi $hi1 | ||
194 | $MULTU $nlo,$m1 | ||
195 | $ADDU $lo1,$lo0 | ||
196 | sltu $at,$lo1,$lo0 | ||
197 | $ADDU $hi1,$at | ||
198 | mflo $nlo | ||
199 | mfhi $nhi | ||
200 | |||
201 | move $tp,$sp | ||
202 | li $j,2*$BNSZ | ||
203 | .align 4 | ||
204 | .L1st: | ||
205 | .set noreorder | ||
206 | $PTR_ADD $aj,$ap,$j | ||
207 | $PTR_ADD $nj,$np,$j | ||
208 | $LD $aj,($aj) | ||
209 | $LD $nj,($nj) | ||
210 | |||
211 | $MULTU $aj,$bi | ||
212 | $ADDU $lo0,$alo,$hi0 | ||
213 | $ADDU $lo1,$nlo,$hi1 | ||
214 | sltu $at,$lo0,$hi0 | ||
215 | sltu $t0,$lo1,$hi1 | ||
216 | $ADDU $hi0,$ahi,$at | ||
217 | $ADDU $hi1,$nhi,$t0 | ||
218 | mflo $alo | ||
219 | mfhi $ahi | ||
220 | |||
221 | $ADDU $lo1,$lo0 | ||
222 | sltu $at,$lo1,$lo0 | ||
223 | $MULTU $nj,$m1 | ||
224 | $ADDU $hi1,$at | ||
225 | addu $j,$BNSZ | ||
226 | $ST $lo1,($tp) | ||
227 | sltu $t0,$j,$num | ||
228 | mflo $nlo | ||
229 | mfhi $nhi | ||
230 | |||
231 | bnez $t0,.L1st | ||
232 | $PTR_ADD $tp,$BNSZ | ||
233 | .set reorder | ||
234 | |||
235 | $ADDU $lo0,$alo,$hi0 | ||
236 | sltu $at,$lo0,$hi0 | ||
237 | $ADDU $hi0,$ahi,$at | ||
238 | |||
239 | $ADDU $lo1,$nlo,$hi1 | ||
240 | sltu $t0,$lo1,$hi1 | ||
241 | $ADDU $hi1,$nhi,$t0 | ||
242 | $ADDU $lo1,$lo0 | ||
243 | sltu $at,$lo1,$lo0 | ||
244 | $ADDU $hi1,$at | ||
245 | |||
246 | $ST $lo1,($tp) | ||
247 | |||
248 | $ADDU $hi1,$hi0 | ||
249 | sltu $at,$hi1,$hi0 | ||
250 | $ST $hi1,$BNSZ($tp) | ||
251 | $ST $at,2*$BNSZ($tp) | ||
252 | |||
253 | li $i,$BNSZ | ||
254 | .align 4 | ||
255 | .Louter: | ||
256 | $PTR_ADD $bi,$bp,$i | ||
257 | $LD $bi,($bi) | ||
258 | $LD $aj,($ap) | ||
259 | $LD $alo,$BNSZ($ap) | ||
260 | $LD $tj,($sp) | ||
261 | |||
262 | $MULTU $aj,$bi | ||
263 | $LD $nj,($np) | ||
264 | $LD $nlo,$BNSZ($np) | ||
265 | mflo $lo0 | ||
266 | mfhi $hi0 | ||
267 | $ADDU $lo0,$tj | ||
268 | $MULTU $lo0,$n0 | ||
269 | sltu $at,$lo0,$tj | ||
270 | $ADDU $hi0,$at | ||
271 | mflo $m1 | ||
272 | |||
273 | $MULTU $alo,$bi | ||
274 | mflo $alo | ||
275 | mfhi $ahi | ||
276 | |||
277 | $MULTU $nj,$m1 | ||
278 | mflo $lo1 | ||
279 | mfhi $hi1 | ||
280 | |||
281 | $MULTU $nlo,$m1 | ||
282 | $ADDU $lo1,$lo0 | ||
283 | sltu $at,$lo1,$lo0 | ||
284 | $ADDU $hi1,$at | ||
285 | mflo $nlo | ||
286 | mfhi $nhi | ||
287 | |||
288 | move $tp,$sp | ||
289 | li $j,2*$BNSZ | ||
290 | $LD $tj,$BNSZ($tp) | ||
291 | .align 4 | ||
292 | .Linner: | ||
293 | .set noreorder | ||
294 | $PTR_ADD $aj,$ap,$j | ||
295 | $PTR_ADD $nj,$np,$j | ||
296 | $LD $aj,($aj) | ||
297 | $LD $nj,($nj) | ||
298 | |||
299 | $MULTU $aj,$bi | ||
300 | $ADDU $lo0,$alo,$hi0 | ||
301 | $ADDU $lo1,$nlo,$hi1 | ||
302 | sltu $at,$lo0,$hi0 | ||
303 | sltu $t0,$lo1,$hi1 | ||
304 | $ADDU $hi0,$ahi,$at | ||
305 | $ADDU $hi1,$nhi,$t0 | ||
306 | mflo $alo | ||
307 | mfhi $ahi | ||
308 | |||
309 | $ADDU $lo0,$tj | ||
310 | addu $j,$BNSZ | ||
311 | $MULTU $nj,$m1 | ||
312 | sltu $at,$lo0,$tj | ||
313 | $ADDU $lo1,$lo0 | ||
314 | $ADDU $hi0,$at | ||
315 | sltu $t0,$lo1,$lo0 | ||
316 | $LD $tj,2*$BNSZ($tp) | ||
317 | $ADDU $hi1,$t0 | ||
318 | sltu $at,$j,$num | ||
319 | mflo $nlo | ||
320 | mfhi $nhi | ||
321 | $ST $lo1,($tp) | ||
322 | bnez $at,.Linner | ||
323 | $PTR_ADD $tp,$BNSZ | ||
324 | .set reorder | ||
325 | |||
326 | $ADDU $lo0,$alo,$hi0 | ||
327 | sltu $at,$lo0,$hi0 | ||
328 | $ADDU $hi0,$ahi,$at | ||
329 | $ADDU $lo0,$tj | ||
330 | sltu $t0,$lo0,$tj | ||
331 | $ADDU $hi0,$t0 | ||
332 | |||
333 | $LD $tj,2*$BNSZ($tp) | ||
334 | $ADDU $lo1,$nlo,$hi1 | ||
335 | sltu $at,$lo1,$hi1 | ||
336 | $ADDU $hi1,$nhi,$at | ||
337 | $ADDU $lo1,$lo0 | ||
338 | sltu $t0,$lo1,$lo0 | ||
339 | $ADDU $hi1,$t0 | ||
340 | $ST $lo1,($tp) | ||
341 | |||
342 | $ADDU $lo1,$hi1,$hi0 | ||
343 | sltu $hi1,$lo1,$hi0 | ||
344 | $ADDU $lo1,$tj | ||
345 | sltu $at,$lo1,$tj | ||
346 | $ADDU $hi1,$at | ||
347 | $ST $lo1,$BNSZ($tp) | ||
348 | $ST $hi1,2*$BNSZ($tp) | ||
349 | |||
350 | addu $i,$BNSZ | ||
351 | sltu $t0,$i,$num | ||
352 | bnez $t0,.Louter | ||
353 | |||
354 | .set noreorder | ||
355 | $PTR_ADD $tj,$sp,$num # &tp[num] | ||
356 | move $tp,$sp | ||
357 | move $ap,$sp | ||
358 | li $hi0,0 # clear borrow bit | ||
359 | |||
360 | .align 4 | ||
361 | .Lsub: $LD $lo0,($tp) | ||
362 | $LD $lo1,($np) | ||
363 | $PTR_ADD $tp,$BNSZ | ||
364 | $PTR_ADD $np,$BNSZ | ||
365 | $SUBU $lo1,$lo0,$lo1 # tp[i]-np[i] | ||
366 | sgtu $at,$lo1,$lo0 | ||
367 | $SUBU $lo0,$lo1,$hi0 | ||
368 | sgtu $hi0,$lo0,$lo1 | ||
369 | $ST $lo0,($rp) | ||
370 | or $hi0,$at | ||
371 | sltu $at,$tp,$tj | ||
372 | bnez $at,.Lsub | ||
373 | $PTR_ADD $rp,$BNSZ | ||
374 | |||
375 | $SUBU $hi0,$hi1,$hi0 # handle upmost overflow bit | ||
376 | move $tp,$sp | ||
377 | $PTR_SUB $rp,$num # restore rp | ||
378 | not $hi1,$hi0 | ||
379 | |||
380 | and $ap,$hi0,$sp | ||
381 | and $bp,$hi1,$rp | ||
382 | or $ap,$ap,$bp # ap=borrow?tp:rp | ||
383 | |||
384 | .align 4 | ||
385 | .Lcopy: $LD $aj,($ap) | ||
386 | $PTR_ADD $ap,$BNSZ | ||
387 | $ST $zero,($tp) | ||
388 | $PTR_ADD $tp,$BNSZ | ||
389 | sltu $at,$tp,$tj | ||
390 | $ST $aj,($rp) | ||
391 | bnez $at,.Lcopy | ||
392 | $PTR_ADD $rp,$BNSZ | ||
393 | |||
394 | li $a0,1 | ||
395 | li $t0,1 | ||
396 | |||
397 | .set noreorder | ||
398 | move $sp,$fp | ||
399 | $REG_L $fp,($FRAMESIZE-1)*$SZREG($sp) | ||
400 | $REG_L $s11,($FRAMESIZE-2)*$SZREG($sp) | ||
401 | $REG_L $s10,($FRAMESIZE-3)*$SZREG($sp) | ||
402 | $REG_L $s9,($FRAMESIZE-4)*$SZREG($sp) | ||
403 | $REG_L $s8,($FRAMESIZE-5)*$SZREG($sp) | ||
404 | $REG_L $s7,($FRAMESIZE-6)*$SZREG($sp) | ||
405 | $REG_L $s6,($FRAMESIZE-7)*$SZREG($sp) | ||
406 | $REG_L $s5,($FRAMESIZE-8)*$SZREG($sp) | ||
407 | $REG_L $s4,($FRAMESIZE-9)*$SZREG($sp) | ||
408 | ___ | ||
409 | $code.=<<___ if ($flavour =~ /nubi/i); | ||
410 | $REG_L $s3,($FRAMESIZE-10)*$SZREG($sp) | ||
411 | $REG_L $s2,($FRAMESIZE-11)*$SZREG($sp) | ||
412 | $REG_L $s1,($FRAMESIZE-12)*$SZREG($sp) | ||
413 | $REG_L $s0,($FRAMESIZE-13)*$SZREG($sp) | ||
414 | ___ | ||
415 | $code.=<<___; | ||
416 | jr $ra | ||
417 | $PTR_ADD $sp,$FRAMESIZE*$SZREG | ||
418 | .end bn_mul_mont_internal | ||
419 | .rdata | ||
420 | .asciiz "Montgomery Multiplication for MIPS, CRYPTOGAMS by <appro\@openssl.org>" | ||
421 | ___ | ||
422 | |||
423 | $code =~ s/\`([^\`]*)\`/eval $1/gem; | ||
424 | |||
425 | print $code; | ||
426 | close STDOUT; | ||
diff --git a/src/lib/libcrypto/bn/asm/mips.pl b/src/lib/libcrypto/bn/asm/mips.pl new file mode 100644 index 0000000000..c162a3ec23 --- /dev/null +++ b/src/lib/libcrypto/bn/asm/mips.pl | |||
@@ -0,0 +1,2585 @@ | |||
1 | #!/usr/bin/env perl | ||
2 | # | ||
3 | # ==================================================================== | ||
4 | # Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL | ||
5 | # project. | ||
6 | # | ||
7 | # Rights for redistribution and usage in source and binary forms are | ||
8 | # granted according to the OpenSSL license. Warranty of any kind is | ||
9 | # disclaimed. | ||
10 | # ==================================================================== | ||
11 | |||
12 | |||
13 | # July 1999 | ||
14 | # | ||
15 | # This is drop-in MIPS III/IV ISA replacement for crypto/bn/bn_asm.c. | ||
16 | # | ||
17 | # The module is designed to work with either of the "new" MIPS ABI(5), | ||
18 | # namely N32 or N64, offered by IRIX 6.x. It's not ment to work under | ||
19 | # IRIX 5.x not only because it doesn't support new ABIs but also | ||
20 | # because 5.x kernels put R4x00 CPU into 32-bit mode and all those | ||
21 | # 64-bit instructions (daddu, dmultu, etc.) found below gonna only | ||
22 | # cause illegal instruction exception:-( | ||
23 | # | ||
24 | # In addition the code depends on preprocessor flags set up by MIPSpro | ||
25 | # compiler driver (either as or cc) and therefore (probably?) can't be | ||
26 | # compiled by the GNU assembler. GNU C driver manages fine though... | ||
27 | # I mean as long as -mmips-as is specified or is the default option, | ||
28 | # because then it simply invokes /usr/bin/as which in turn takes | ||
29 | # perfect care of the preprocessor definitions. Another neat feature | ||
30 | # offered by the MIPSpro assembler is an optimization pass. This gave | ||
31 | # me the opportunity to have the code looking more regular as all those | ||
32 | # architecture dependent instruction rescheduling details were left to | ||
33 | # the assembler. Cool, huh? | ||
34 | # | ||
35 | # Performance improvement is astonishing! 'apps/openssl speed rsa dsa' | ||
36 | # goes way over 3 times faster! | ||
37 | # | ||
38 | # <appro@fy.chalmers.se> | ||
39 | |||
40 | # October 2010 | ||
41 | # | ||
42 | # Adapt the module even for 32-bit ABIs and other OSes. The former was | ||
43 | # achieved by mechanical replacement of 64-bit arithmetic instructions | ||
44 | # such as dmultu, daddu, etc. with their 32-bit counterparts and | ||
45 | # adjusting offsets denoting multiples of BN_ULONG. Above mentioned | ||
46 | # >3x performance improvement naturally does not apply to 32-bit code | ||
47 | # [because there is no instruction 32-bit compiler can't use], one | ||
48 | # has to content with 40-85% improvement depending on benchmark and | ||
49 | # key length, more for longer keys. | ||
50 | |||
51 | $flavour = shift; | ||
52 | while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {} | ||
53 | open STDOUT,">$output"; | ||
54 | |||
55 | if ($flavour =~ /64|n32/i) { | ||
56 | $LD="ld"; | ||
57 | $ST="sd"; | ||
58 | $MULTU="dmultu"; | ||
59 | $DIVU="ddivu"; | ||
60 | $ADDU="daddu"; | ||
61 | $SUBU="dsubu"; | ||
62 | $SRL="dsrl"; | ||
63 | $SLL="dsll"; | ||
64 | $BNSZ=8; | ||
65 | $PTR_ADD="daddu"; | ||
66 | $PTR_SUB="dsubu"; | ||
67 | $SZREG=8; | ||
68 | $REG_S="sd"; | ||
69 | $REG_L="ld"; | ||
70 | } else { | ||
71 | $LD="lw"; | ||
72 | $ST="sw"; | ||
73 | $MULTU="multu"; | ||
74 | $DIVU="divu"; | ||
75 | $ADDU="addu"; | ||
76 | $SUBU="subu"; | ||
77 | $SRL="srl"; | ||
78 | $SLL="sll"; | ||
79 | $BNSZ=4; | ||
80 | $PTR_ADD="addu"; | ||
81 | $PTR_SUB="subu"; | ||
82 | $SZREG=4; | ||
83 | $REG_S="sw"; | ||
84 | $REG_L="lw"; | ||
85 | $code=".set mips2\n"; | ||
86 | } | ||
87 | |||
88 | # Below is N32/64 register layout used in the original module. | ||
89 | # | ||
90 | ($zero,$at,$v0,$v1)=map("\$$_",(0..3)); | ||
91 | ($a0,$a1,$a2,$a3,$a4,$a5,$a6,$a7)=map("\$$_",(4..11)); | ||
92 | ($t0,$t1,$t2,$t3,$t8,$t9)=map("\$$_",(12..15,24,25)); | ||
93 | ($s0,$s1,$s2,$s3,$s4,$s5,$s6,$s7)=map("\$$_",(16..23)); | ||
94 | ($gp,$sp,$fp,$ra)=map("\$$_",(28..31)); | ||
95 | ($ta0,$ta1,$ta2,$ta3)=($a4,$a5,$a6,$a7); | ||
96 | # | ||
97 | # No special adaptation is required for O32. NUBI on the other hand | ||
98 | # is treated by saving/restoring ($v1,$t0..$t3). | ||
99 | |||
100 | $gp=$v1 if ($flavour =~ /nubi/i); | ||
101 | |||
102 | $minus4=$v1; | ||
103 | |||
104 | $code.=<<___; | ||
105 | .rdata | ||
106 | .asciiz "mips3.s, Version 1.2" | ||
107 | .asciiz "MIPS II/III/IV ISA artwork by Andy Polyakov <appro\@fy.chalmers.se>" | ||
108 | |||
109 | .text | ||
110 | .set noat | ||
111 | |||
112 | .align 5 | ||
113 | .globl bn_mul_add_words | ||
114 | .ent bn_mul_add_words | ||
115 | bn_mul_add_words: | ||
116 | .set noreorder | ||
117 | bgtz $a2,bn_mul_add_words_internal | ||
118 | move $v0,$zero | ||
119 | jr $ra | ||
120 | move $a0,$v0 | ||
121 | .end bn_mul_add_words | ||
122 | |||
123 | .align 5 | ||
124 | .ent bn_mul_add_words_internal | ||
125 | bn_mul_add_words_internal: | ||
126 | ___ | ||
127 | $code.=<<___ if ($flavour =~ /nubi/i); | ||
128 | .frame $sp,6*$SZREG,$ra | ||
129 | .mask 0x8000f008,-$SZREG | ||
130 | .set noreorder | ||
131 | $PTR_SUB $sp,6*$SZREG | ||
132 | $REG_S $ra,5*$SZREG($sp) | ||
133 | $REG_S $t3,4*$SZREG($sp) | ||
134 | $REG_S $t2,3*$SZREG($sp) | ||
135 | $REG_S $t1,2*$SZREG($sp) | ||
136 | $REG_S $t0,1*$SZREG($sp) | ||
137 | $REG_S $gp,0*$SZREG($sp) | ||
138 | ___ | ||
139 | $code.=<<___; | ||
140 | .set reorder | ||
141 | li $minus4,-4 | ||
142 | and $ta0,$a2,$minus4 | ||
143 | $LD $t0,0($a1) | ||
144 | beqz $ta0,.L_bn_mul_add_words_tail | ||
145 | |||
146 | .L_bn_mul_add_words_loop: | ||
147 | $MULTU $t0,$a3 | ||
148 | $LD $t1,0($a0) | ||
149 | $LD $t2,$BNSZ($a1) | ||
150 | $LD $t3,$BNSZ($a0) | ||
151 | $LD $ta0,2*$BNSZ($a1) | ||
152 | $LD $ta1,2*$BNSZ($a0) | ||
153 | $ADDU $t1,$v0 | ||
154 | sltu $v0,$t1,$v0 # All manuals say it "compares 32-bit | ||
155 | # values", but it seems to work fine | ||
156 | # even on 64-bit registers. | ||
157 | mflo $at | ||
158 | mfhi $t0 | ||
159 | $ADDU $t1,$at | ||
160 | $ADDU $v0,$t0 | ||
161 | $MULTU $t2,$a3 | ||
162 | sltu $at,$t1,$at | ||
163 | $ST $t1,0($a0) | ||
164 | $ADDU $v0,$at | ||
165 | |||
166 | $LD $ta2,3*$BNSZ($a1) | ||
167 | $LD $ta3,3*$BNSZ($a0) | ||
168 | $ADDU $t3,$v0 | ||
169 | sltu $v0,$t3,$v0 | ||
170 | mflo $at | ||
171 | mfhi $t2 | ||
172 | $ADDU $t3,$at | ||
173 | $ADDU $v0,$t2 | ||
174 | $MULTU $ta0,$a3 | ||
175 | sltu $at,$t3,$at | ||
176 | $ST $t3,$BNSZ($a0) | ||
177 | $ADDU $v0,$at | ||
178 | |||
179 | subu $a2,4 | ||
180 | $PTR_ADD $a0,4*$BNSZ | ||
181 | $PTR_ADD $a1,4*$BNSZ | ||
182 | $ADDU $ta1,$v0 | ||
183 | sltu $v0,$ta1,$v0 | ||
184 | mflo $at | ||
185 | mfhi $ta0 | ||
186 | $ADDU $ta1,$at | ||
187 | $ADDU $v0,$ta0 | ||
188 | $MULTU $ta2,$a3 | ||
189 | sltu $at,$ta1,$at | ||
190 | $ST $ta1,-2*$BNSZ($a0) | ||
191 | $ADDU $v0,$at | ||
192 | |||
193 | |||
194 | and $ta0,$a2,$minus4 | ||
195 | $ADDU $ta3,$v0 | ||
196 | sltu $v0,$ta3,$v0 | ||
197 | mflo $at | ||
198 | mfhi $ta2 | ||
199 | $ADDU $ta3,$at | ||
200 | $ADDU $v0,$ta2 | ||
201 | sltu $at,$ta3,$at | ||
202 | $ST $ta3,-$BNSZ($a0) | ||
203 | $ADDU $v0,$at | ||
204 | .set noreorder | ||
205 | bgtzl $ta0,.L_bn_mul_add_words_loop | ||
206 | $LD $t0,0($a1) | ||
207 | |||
208 | beqz $a2,.L_bn_mul_add_words_return | ||
209 | nop | ||
210 | |||
211 | .L_bn_mul_add_words_tail: | ||
212 | .set reorder | ||
213 | $LD $t0,0($a1) | ||
214 | $MULTU $t0,$a3 | ||
215 | $LD $t1,0($a0) | ||
216 | subu $a2,1 | ||
217 | $ADDU $t1,$v0 | ||
218 | sltu $v0,$t1,$v0 | ||
219 | mflo $at | ||
220 | mfhi $t0 | ||
221 | $ADDU $t1,$at | ||
222 | $ADDU $v0,$t0 | ||
223 | sltu $at,$t1,$at | ||
224 | $ST $t1,0($a0) | ||
225 | $ADDU $v0,$at | ||
226 | beqz $a2,.L_bn_mul_add_words_return | ||
227 | |||
228 | $LD $t0,$BNSZ($a1) | ||
229 | $MULTU $t0,$a3 | ||
230 | $LD $t1,$BNSZ($a0) | ||
231 | subu $a2,1 | ||
232 | $ADDU $t1,$v0 | ||
233 | sltu $v0,$t1,$v0 | ||
234 | mflo $at | ||
235 | mfhi $t0 | ||
236 | $ADDU $t1,$at | ||
237 | $ADDU $v0,$t0 | ||
238 | sltu $at,$t1,$at | ||
239 | $ST $t1,$BNSZ($a0) | ||
240 | $ADDU $v0,$at | ||
241 | beqz $a2,.L_bn_mul_add_words_return | ||
242 | |||
243 | $LD $t0,2*$BNSZ($a1) | ||
244 | $MULTU $t0,$a3 | ||
245 | $LD $t1,2*$BNSZ($a0) | ||
246 | $ADDU $t1,$v0 | ||
247 | sltu $v0,$t1,$v0 | ||
248 | mflo $at | ||
249 | mfhi $t0 | ||
250 | $ADDU $t1,$at | ||
251 | $ADDU $v0,$t0 | ||
252 | sltu $at,$t1,$at | ||
253 | $ST $t1,2*$BNSZ($a0) | ||
254 | $ADDU $v0,$at | ||
255 | |||
256 | .L_bn_mul_add_words_return: | ||
257 | .set noreorder | ||
258 | ___ | ||
259 | $code.=<<___ if ($flavour =~ /nubi/i); | ||
260 | $REG_L $t3,4*$SZREG($sp) | ||
261 | $REG_L $t2,3*$SZREG($sp) | ||
262 | $REG_L $t1,2*$SZREG($sp) | ||
263 | $REG_L $t0,1*$SZREG($sp) | ||
264 | $REG_L $gp,0*$SZREG($sp) | ||
265 | $PTR_ADD $sp,6*$SZREG | ||
266 | ___ | ||
267 | $code.=<<___; | ||
268 | jr $ra | ||
269 | move $a0,$v0 | ||
270 | .end bn_mul_add_words_internal | ||
271 | |||
272 | .align 5 | ||
273 | .globl bn_mul_words | ||
274 | .ent bn_mul_words | ||
275 | bn_mul_words: | ||
276 | .set noreorder | ||
277 | bgtz $a2,bn_mul_words_internal | ||
278 | move $v0,$zero | ||
279 | jr $ra | ||
280 | move $a0,$v0 | ||
281 | .end bn_mul_words | ||
282 | |||
283 | .align 5 | ||
284 | .ent bn_mul_words_internal | ||
285 | bn_mul_words_internal: | ||
286 | ___ | ||
287 | $code.=<<___ if ($flavour =~ /nubi/i); | ||
288 | .frame $sp,6*$SZREG,$ra | ||
289 | .mask 0x8000f008,-$SZREG | ||
290 | .set noreorder | ||
291 | $PTR_SUB $sp,6*$SZREG | ||
292 | $REG_S $ra,5*$SZREG($sp) | ||
293 | $REG_S $t3,4*$SZREG($sp) | ||
294 | $REG_S $t2,3*$SZREG($sp) | ||
295 | $REG_S $t1,2*$SZREG($sp) | ||
296 | $REG_S $t0,1*$SZREG($sp) | ||
297 | $REG_S $gp,0*$SZREG($sp) | ||
298 | ___ | ||
299 | $code.=<<___; | ||
300 | .set reorder | ||
301 | li $minus4,-4 | ||
302 | and $ta0,$a2,$minus4 | ||
303 | $LD $t0,0($a1) | ||
304 | beqz $ta0,.L_bn_mul_words_tail | ||
305 | |||
306 | .L_bn_mul_words_loop: | ||
307 | $MULTU $t0,$a3 | ||
308 | $LD $t2,$BNSZ($a1) | ||
309 | $LD $ta0,2*$BNSZ($a1) | ||
310 | $LD $ta2,3*$BNSZ($a1) | ||
311 | mflo $at | ||
312 | mfhi $t0 | ||
313 | $ADDU $v0,$at | ||
314 | sltu $t1,$v0,$at | ||
315 | $MULTU $t2,$a3 | ||
316 | $ST $v0,0($a0) | ||
317 | $ADDU $v0,$t1,$t0 | ||
318 | |||
319 | subu $a2,4 | ||
320 | $PTR_ADD $a0,4*$BNSZ | ||
321 | $PTR_ADD $a1,4*$BNSZ | ||
322 | mflo $at | ||
323 | mfhi $t2 | ||
324 | $ADDU $v0,$at | ||
325 | sltu $t3,$v0,$at | ||
326 | $MULTU $ta0,$a3 | ||
327 | $ST $v0,-3*$BNSZ($a0) | ||
328 | $ADDU $v0,$t3,$t2 | ||
329 | |||
330 | mflo $at | ||
331 | mfhi $ta0 | ||
332 | $ADDU $v0,$at | ||
333 | sltu $ta1,$v0,$at | ||
334 | $MULTU $ta2,$a3 | ||
335 | $ST $v0,-2*$BNSZ($a0) | ||
336 | $ADDU $v0,$ta1,$ta0 | ||
337 | |||
338 | and $ta0,$a2,$minus4 | ||
339 | mflo $at | ||
340 | mfhi $ta2 | ||
341 | $ADDU $v0,$at | ||
342 | sltu $ta3,$v0,$at | ||
343 | $ST $v0,-$BNSZ($a0) | ||
344 | $ADDU $v0,$ta3,$ta2 | ||
345 | .set noreorder | ||
346 | bgtzl $ta0,.L_bn_mul_words_loop | ||
347 | $LD $t0,0($a1) | ||
348 | |||
349 | beqz $a2,.L_bn_mul_words_return | ||
350 | nop | ||
351 | |||
352 | .L_bn_mul_words_tail: | ||
353 | .set reorder | ||
354 | $LD $t0,0($a1) | ||
355 | $MULTU $t0,$a3 | ||
356 | subu $a2,1 | ||
357 | mflo $at | ||
358 | mfhi $t0 | ||
359 | $ADDU $v0,$at | ||
360 | sltu $t1,$v0,$at | ||
361 | $ST $v0,0($a0) | ||
362 | $ADDU $v0,$t1,$t0 | ||
363 | beqz $a2,.L_bn_mul_words_return | ||
364 | |||
365 | $LD $t0,$BNSZ($a1) | ||
366 | $MULTU $t0,$a3 | ||
367 | subu $a2,1 | ||
368 | mflo $at | ||
369 | mfhi $t0 | ||
370 | $ADDU $v0,$at | ||
371 | sltu $t1,$v0,$at | ||
372 | $ST $v0,$BNSZ($a0) | ||
373 | $ADDU $v0,$t1,$t0 | ||
374 | beqz $a2,.L_bn_mul_words_return | ||
375 | |||
376 | $LD $t0,2*$BNSZ($a1) | ||
377 | $MULTU $t0,$a3 | ||
378 | mflo $at | ||
379 | mfhi $t0 | ||
380 | $ADDU $v0,$at | ||
381 | sltu $t1,$v0,$at | ||
382 | $ST $v0,2*$BNSZ($a0) | ||
383 | $ADDU $v0,$t1,$t0 | ||
384 | |||
385 | .L_bn_mul_words_return: | ||
386 | .set noreorder | ||
387 | ___ | ||
388 | $code.=<<___ if ($flavour =~ /nubi/i); | ||
389 | $REG_L $t3,4*$SZREG($sp) | ||
390 | $REG_L $t2,3*$SZREG($sp) | ||
391 | $REG_L $t1,2*$SZREG($sp) | ||
392 | $REG_L $t0,1*$SZREG($sp) | ||
393 | $REG_L $gp,0*$SZREG($sp) | ||
394 | $PTR_ADD $sp,6*$SZREG | ||
395 | ___ | ||
396 | $code.=<<___; | ||
397 | jr $ra | ||
398 | move $a0,$v0 | ||
399 | .end bn_mul_words_internal | ||
400 | |||
401 | .align 5 | ||
402 | .globl bn_sqr_words | ||
403 | .ent bn_sqr_words | ||
404 | bn_sqr_words: | ||
405 | .set noreorder | ||
406 | bgtz $a2,bn_sqr_words_internal | ||
407 | move $v0,$zero | ||
408 | jr $ra | ||
409 | move $a0,$v0 | ||
410 | .end bn_sqr_words | ||
411 | |||
412 | .align 5 | ||
413 | .ent bn_sqr_words_internal | ||
414 | bn_sqr_words_internal: | ||
415 | ___ | ||
416 | $code.=<<___ if ($flavour =~ /nubi/i); | ||
417 | .frame $sp,6*$SZREG,$ra | ||
418 | .mask 0x8000f008,-$SZREG | ||
419 | .set noreorder | ||
420 | $PTR_SUB $sp,6*$SZREG | ||
421 | $REG_S $ra,5*$SZREG($sp) | ||
422 | $REG_S $t3,4*$SZREG($sp) | ||
423 | $REG_S $t2,3*$SZREG($sp) | ||
424 | $REG_S $t1,2*$SZREG($sp) | ||
425 | $REG_S $t0,1*$SZREG($sp) | ||
426 | $REG_S $gp,0*$SZREG($sp) | ||
427 | ___ | ||
428 | $code.=<<___; | ||
429 | .set reorder | ||
430 | li $minus4,-4 | ||
431 | and $ta0,$a2,$minus4 | ||
432 | $LD $t0,0($a1) | ||
433 | beqz $ta0,.L_bn_sqr_words_tail | ||
434 | |||
435 | .L_bn_sqr_words_loop: | ||
436 | $MULTU $t0,$t0 | ||
437 | $LD $t2,$BNSZ($a1) | ||
438 | $LD $ta0,2*$BNSZ($a1) | ||
439 | $LD $ta2,3*$BNSZ($a1) | ||
440 | mflo $t1 | ||
441 | mfhi $t0 | ||
442 | $ST $t1,0($a0) | ||
443 | $ST $t0,$BNSZ($a0) | ||
444 | |||
445 | $MULTU $t2,$t2 | ||
446 | subu $a2,4 | ||
447 | $PTR_ADD $a0,8*$BNSZ | ||
448 | $PTR_ADD $a1,4*$BNSZ | ||
449 | mflo $t3 | ||
450 | mfhi $t2 | ||
451 | $ST $t3,-6*$BNSZ($a0) | ||
452 | $ST $t2,-5*$BNSZ($a0) | ||
453 | |||
454 | $MULTU $ta0,$ta0 | ||
455 | mflo $ta1 | ||
456 | mfhi $ta0 | ||
457 | $ST $ta1,-4*$BNSZ($a0) | ||
458 | $ST $ta0,-3*$BNSZ($a0) | ||
459 | |||
460 | |||
461 | $MULTU $ta2,$ta2 | ||
462 | and $ta0,$a2,$minus4 | ||
463 | mflo $ta3 | ||
464 | mfhi $ta2 | ||
465 | $ST $ta3,-2*$BNSZ($a0) | ||
466 | $ST $ta2,-$BNSZ($a0) | ||
467 | |||
468 | .set noreorder | ||
469 | bgtzl $ta0,.L_bn_sqr_words_loop | ||
470 | $LD $t0,0($a1) | ||
471 | |||
472 | beqz $a2,.L_bn_sqr_words_return | ||
473 | nop | ||
474 | |||
475 | .L_bn_sqr_words_tail: | ||
476 | .set reorder | ||
477 | $LD $t0,0($a1) | ||
478 | $MULTU $t0,$t0 | ||
479 | subu $a2,1 | ||
480 | mflo $t1 | ||
481 | mfhi $t0 | ||
482 | $ST $t1,0($a0) | ||
483 | $ST $t0,$BNSZ($a0) | ||
484 | beqz $a2,.L_bn_sqr_words_return | ||
485 | |||
486 | $LD $t0,$BNSZ($a1) | ||
487 | $MULTU $t0,$t0 | ||
488 | subu $a2,1 | ||
489 | mflo $t1 | ||
490 | mfhi $t0 | ||
491 | $ST $t1,2*$BNSZ($a0) | ||
492 | $ST $t0,3*$BNSZ($a0) | ||
493 | beqz $a2,.L_bn_sqr_words_return | ||
494 | |||
495 | $LD $t0,2*$BNSZ($a1) | ||
496 | $MULTU $t0,$t0 | ||
497 | mflo $t1 | ||
498 | mfhi $t0 | ||
499 | $ST $t1,4*$BNSZ($a0) | ||
500 | $ST $t0,5*$BNSZ($a0) | ||
501 | |||
502 | .L_bn_sqr_words_return: | ||
503 | .set noreorder | ||
504 | ___ | ||
505 | $code.=<<___ if ($flavour =~ /nubi/i); | ||
506 | $REG_L $t3,4*$SZREG($sp) | ||
507 | $REG_L $t2,3*$SZREG($sp) | ||
508 | $REG_L $t1,2*$SZREG($sp) | ||
509 | $REG_L $t0,1*$SZREG($sp) | ||
510 | $REG_L $gp,0*$SZREG($sp) | ||
511 | $PTR_ADD $sp,6*$SZREG | ||
512 | ___ | ||
513 | $code.=<<___; | ||
514 | jr $ra | ||
515 | move $a0,$v0 | ||
516 | |||
517 | .end bn_sqr_words_internal | ||
518 | |||
519 | .align 5 | ||
520 | .globl bn_add_words | ||
521 | .ent bn_add_words | ||
522 | bn_add_words: | ||
523 | .set noreorder | ||
524 | bgtz $a3,bn_add_words_internal | ||
525 | move $v0,$zero | ||
526 | jr $ra | ||
527 | move $a0,$v0 | ||
528 | .end bn_add_words | ||
529 | |||
530 | .align 5 | ||
531 | .ent bn_add_words_internal | ||
532 | bn_add_words_internal: | ||
533 | ___ | ||
534 | $code.=<<___ if ($flavour =~ /nubi/i); | ||
535 | .frame $sp,6*$SZREG,$ra | ||
536 | .mask 0x8000f008,-$SZREG | ||
537 | .set noreorder | ||
538 | $PTR_SUB $sp,6*$SZREG | ||
539 | $REG_S $ra,5*$SZREG($sp) | ||
540 | $REG_S $t3,4*$SZREG($sp) | ||
541 | $REG_S $t2,3*$SZREG($sp) | ||
542 | $REG_S $t1,2*$SZREG($sp) | ||
543 | $REG_S $t0,1*$SZREG($sp) | ||
544 | $REG_S $gp,0*$SZREG($sp) | ||
545 | ___ | ||
546 | $code.=<<___; | ||
547 | .set reorder | ||
548 | li $minus4,-4 | ||
549 | and $at,$a3,$minus4 | ||
550 | $LD $t0,0($a1) | ||
551 | beqz $at,.L_bn_add_words_tail | ||
552 | |||
553 | .L_bn_add_words_loop: | ||
554 | $LD $ta0,0($a2) | ||
555 | subu $a3,4 | ||
556 | $LD $t1,$BNSZ($a1) | ||
557 | and $at,$a3,$minus4 | ||
558 | $LD $t2,2*$BNSZ($a1) | ||
559 | $PTR_ADD $a2,4*$BNSZ | ||
560 | $LD $t3,3*$BNSZ($a1) | ||
561 | $PTR_ADD $a0,4*$BNSZ | ||
562 | $LD $ta1,-3*$BNSZ($a2) | ||
563 | $PTR_ADD $a1,4*$BNSZ | ||
564 | $LD $ta2,-2*$BNSZ($a2) | ||
565 | $LD $ta3,-$BNSZ($a2) | ||
566 | $ADDU $ta0,$t0 | ||
567 | sltu $t8,$ta0,$t0 | ||
568 | $ADDU $t0,$ta0,$v0 | ||
569 | sltu $v0,$t0,$ta0 | ||
570 | $ST $t0,-4*$BNSZ($a0) | ||
571 | $ADDU $v0,$t8 | ||
572 | |||
573 | $ADDU $ta1,$t1 | ||
574 | sltu $t9,$ta1,$t1 | ||
575 | $ADDU $t1,$ta1,$v0 | ||
576 | sltu $v0,$t1,$ta1 | ||
577 | $ST $t1,-3*$BNSZ($a0) | ||
578 | $ADDU $v0,$t9 | ||
579 | |||
580 | $ADDU $ta2,$t2 | ||
581 | sltu $t8,$ta2,$t2 | ||
582 | $ADDU $t2,$ta2,$v0 | ||
583 | sltu $v0,$t2,$ta2 | ||
584 | $ST $t2,-2*$BNSZ($a0) | ||
585 | $ADDU $v0,$t8 | ||
586 | |||
587 | $ADDU $ta3,$t3 | ||
588 | sltu $t9,$ta3,$t3 | ||
589 | $ADDU $t3,$ta3,$v0 | ||
590 | sltu $v0,$t3,$ta3 | ||
591 | $ST $t3,-$BNSZ($a0) | ||
592 | $ADDU $v0,$t9 | ||
593 | |||
594 | .set noreorder | ||
595 | bgtzl $at,.L_bn_add_words_loop | ||
596 | $LD $t0,0($a1) | ||
597 | |||
598 | beqz $a3,.L_bn_add_words_return | ||
599 | nop | ||
600 | |||
601 | .L_bn_add_words_tail: | ||
602 | .set reorder | ||
603 | $LD $t0,0($a1) | ||
604 | $LD $ta0,0($a2) | ||
605 | $ADDU $ta0,$t0 | ||
606 | subu $a3,1 | ||
607 | sltu $t8,$ta0,$t0 | ||
608 | $ADDU $t0,$ta0,$v0 | ||
609 | sltu $v0,$t0,$ta0 | ||
610 | $ST $t0,0($a0) | ||
611 | $ADDU $v0,$t8 | ||
612 | beqz $a3,.L_bn_add_words_return | ||
613 | |||
614 | $LD $t1,$BNSZ($a1) | ||
615 | $LD $ta1,$BNSZ($a2) | ||
616 | $ADDU $ta1,$t1 | ||
617 | subu $a3,1 | ||
618 | sltu $t9,$ta1,$t1 | ||
619 | $ADDU $t1,$ta1,$v0 | ||
620 | sltu $v0,$t1,$ta1 | ||
621 | $ST $t1,$BNSZ($a0) | ||
622 | $ADDU $v0,$t9 | ||
623 | beqz $a3,.L_bn_add_words_return | ||
624 | |||
625 | $LD $t2,2*$BNSZ($a1) | ||
626 | $LD $ta2,2*$BNSZ($a2) | ||
627 | $ADDU $ta2,$t2 | ||
628 | sltu $t8,$ta2,$t2 | ||
629 | $ADDU $t2,$ta2,$v0 | ||
630 | sltu $v0,$t2,$ta2 | ||
631 | $ST $t2,2*$BNSZ($a0) | ||
632 | $ADDU $v0,$t8 | ||
633 | |||
634 | .L_bn_add_words_return: | ||
635 | .set noreorder | ||
636 | ___ | ||
637 | $code.=<<___ if ($flavour =~ /nubi/i); | ||
638 | $REG_L $t3,4*$SZREG($sp) | ||
639 | $REG_L $t2,3*$SZREG($sp) | ||
640 | $REG_L $t1,2*$SZREG($sp) | ||
641 | $REG_L $t0,1*$SZREG($sp) | ||
642 | $REG_L $gp,0*$SZREG($sp) | ||
643 | $PTR_ADD $sp,6*$SZREG | ||
644 | ___ | ||
645 | $code.=<<___; | ||
646 | jr $ra | ||
647 | move $a0,$v0 | ||
648 | |||
649 | .end bn_add_words_internal | ||
650 | |||
651 | .align 5 | ||
652 | .globl bn_sub_words | ||
653 | .ent bn_sub_words | ||
654 | bn_sub_words: | ||
655 | .set noreorder | ||
656 | bgtz $a3,bn_sub_words_internal | ||
657 | move $v0,$zero | ||
658 | jr $ra | ||
659 | move $a0,$zero | ||
660 | .end bn_sub_words | ||
661 | |||
662 | .align 5 | ||
663 | .ent bn_sub_words_internal | ||
664 | bn_sub_words_internal: | ||
665 | ___ | ||
666 | $code.=<<___ if ($flavour =~ /nubi/i); | ||
667 | .frame $sp,6*$SZREG,$ra | ||
668 | .mask 0x8000f008,-$SZREG | ||
669 | .set noreorder | ||
670 | $PTR_SUB $sp,6*$SZREG | ||
671 | $REG_S $ra,5*$SZREG($sp) | ||
672 | $REG_S $t3,4*$SZREG($sp) | ||
673 | $REG_S $t2,3*$SZREG($sp) | ||
674 | $REG_S $t1,2*$SZREG($sp) | ||
675 | $REG_S $t0,1*$SZREG($sp) | ||
676 | $REG_S $gp,0*$SZREG($sp) | ||
677 | ___ | ||
678 | $code.=<<___; | ||
679 | .set reorder | ||
680 | li $minus4,-4 | ||
681 | and $at,$a3,$minus4 | ||
682 | $LD $t0,0($a1) | ||
683 | beqz $at,.L_bn_sub_words_tail | ||
684 | |||
685 | .L_bn_sub_words_loop: | ||
686 | $LD $ta0,0($a2) | ||
687 | subu $a3,4 | ||
688 | $LD $t1,$BNSZ($a1) | ||
689 | and $at,$a3,$minus4 | ||
690 | $LD $t2,2*$BNSZ($a1) | ||
691 | $PTR_ADD $a2,4*$BNSZ | ||
692 | $LD $t3,3*$BNSZ($a1) | ||
693 | $PTR_ADD $a0,4*$BNSZ | ||
694 | $LD $ta1,-3*$BNSZ($a2) | ||
695 | $PTR_ADD $a1,4*$BNSZ | ||
696 | $LD $ta2,-2*$BNSZ($a2) | ||
697 | $LD $ta3,-$BNSZ($a2) | ||
698 | sltu $t8,$t0,$ta0 | ||
699 | $SUBU $ta0,$t0,$ta0 | ||
700 | $SUBU $t0,$ta0,$v0 | ||
701 | sgtu $v0,$t0,$ta0 | ||
702 | $ST $t0,-4*$BNSZ($a0) | ||
703 | $ADDU $v0,$t8 | ||
704 | |||
705 | sltu $t9,$t1,$ta1 | ||
706 | $SUBU $ta1,$t1,$ta1 | ||
707 | $SUBU $t1,$ta1,$v0 | ||
708 | sgtu $v0,$t1,$ta1 | ||
709 | $ST $t1,-3*$BNSZ($a0) | ||
710 | $ADDU $v0,$t9 | ||
711 | |||
712 | |||
713 | sltu $t8,$t2,$ta2 | ||
714 | $SUBU $ta2,$t2,$ta2 | ||
715 | $SUBU $t2,$ta2,$v0 | ||
716 | sgtu $v0,$t2,$ta2 | ||
717 | $ST $t2,-2*$BNSZ($a0) | ||
718 | $ADDU $v0,$t8 | ||
719 | |||
720 | sltu $t9,$t3,$ta3 | ||
721 | $SUBU $ta3,$t3,$ta3 | ||
722 | $SUBU $t3,$ta3,$v0 | ||
723 | sgtu $v0,$t3,$ta3 | ||
724 | $ST $t3,-$BNSZ($a0) | ||
725 | $ADDU $v0,$t9 | ||
726 | |||
727 | .set noreorder | ||
728 | bgtzl $at,.L_bn_sub_words_loop | ||
729 | $LD $t0,0($a1) | ||
730 | |||
731 | beqz $a3,.L_bn_sub_words_return | ||
732 | nop | ||
733 | |||
734 | .L_bn_sub_words_tail: | ||
735 | .set reorder | ||
736 | $LD $t0,0($a1) | ||
737 | $LD $ta0,0($a2) | ||
738 | subu $a3,1 | ||
739 | sltu $t8,$t0,$ta0 | ||
740 | $SUBU $ta0,$t0,$ta0 | ||
741 | $SUBU $t0,$ta0,$v0 | ||
742 | sgtu $v0,$t0,$ta0 | ||
743 | $ST $t0,0($a0) | ||
744 | $ADDU $v0,$t8 | ||
745 | beqz $a3,.L_bn_sub_words_return | ||
746 | |||
747 | $LD $t1,$BNSZ($a1) | ||
748 | subu $a3,1 | ||
749 | $LD $ta1,$BNSZ($a2) | ||
750 | sltu $t9,$t1,$ta1 | ||
751 | $SUBU $ta1,$t1,$ta1 | ||
752 | $SUBU $t1,$ta1,$v0 | ||
753 | sgtu $v0,$t1,$ta1 | ||
754 | $ST $t1,$BNSZ($a0) | ||
755 | $ADDU $v0,$t9 | ||
756 | beqz $a3,.L_bn_sub_words_return | ||
757 | |||
758 | $LD $t2,2*$BNSZ($a1) | ||
759 | $LD $ta2,2*$BNSZ($a2) | ||
760 | sltu $t8,$t2,$ta2 | ||
761 | $SUBU $ta2,$t2,$ta2 | ||
762 | $SUBU $t2,$ta2,$v0 | ||
763 | sgtu $v0,$t2,$ta2 | ||
764 | $ST $t2,2*$BNSZ($a0) | ||
765 | $ADDU $v0,$t8 | ||
766 | |||
767 | .L_bn_sub_words_return: | ||
768 | .set noreorder | ||
769 | ___ | ||
770 | $code.=<<___ if ($flavour =~ /nubi/i); | ||
771 | $REG_L $t3,4*$SZREG($sp) | ||
772 | $REG_L $t2,3*$SZREG($sp) | ||
773 | $REG_L $t1,2*$SZREG($sp) | ||
774 | $REG_L $t0,1*$SZREG($sp) | ||
775 | $REG_L $gp,0*$SZREG($sp) | ||
776 | $PTR_ADD $sp,6*$SZREG | ||
777 | ___ | ||
778 | $code.=<<___; | ||
779 | jr $ra | ||
780 | move $a0,$v0 | ||
781 | .end bn_sub_words_internal | ||
782 | |||
783 | .align 5 | ||
784 | .globl bn_div_3_words | ||
785 | .ent bn_div_3_words | ||
786 | bn_div_3_words: | ||
787 | .set noreorder | ||
788 | move $a3,$a0 # we know that bn_div_words does not | ||
789 | # touch $a3, $ta2, $ta3 and preserves $a2 | ||
790 | # so that we can save two arguments | ||
791 | # and return address in registers | ||
792 | # instead of stack:-) | ||
793 | |||
794 | $LD $a0,($a3) | ||
795 | move $ta2,$a1 | ||
796 | bne $a0,$a2,bn_div_3_words_internal | ||
797 | $LD $a1,-$BNSZ($a3) | ||
798 | li $v0,-1 | ||
799 | jr $ra | ||
800 | move $a0,$v0 | ||
801 | .end bn_div_3_words | ||
802 | |||
803 | .align 5 | ||
804 | .ent bn_div_3_words_internal | ||
805 | bn_div_3_words_internal: | ||
806 | ___ | ||
807 | $code.=<<___ if ($flavour =~ /nubi/i); | ||
808 | .frame $sp,6*$SZREG,$ra | ||
809 | .mask 0x8000f008,-$SZREG | ||
810 | .set noreorder | ||
811 | $PTR_SUB $sp,6*$SZREG | ||
812 | $REG_S $ra,5*$SZREG($sp) | ||
813 | $REG_S $t3,4*$SZREG($sp) | ||
814 | $REG_S $t2,3*$SZREG($sp) | ||
815 | $REG_S $t1,2*$SZREG($sp) | ||
816 | $REG_S $t0,1*$SZREG($sp) | ||
817 | $REG_S $gp,0*$SZREG($sp) | ||
818 | ___ | ||
819 | $code.=<<___; | ||
820 | .set reorder | ||
821 | move $ta3,$ra | ||
822 | bal bn_div_words | ||
823 | move $ra,$ta3 | ||
824 | $MULTU $ta2,$v0 | ||
825 | $LD $t2,-2*$BNSZ($a3) | ||
826 | move $ta0,$zero | ||
827 | mfhi $t1 | ||
828 | mflo $t0 | ||
829 | sltu $t8,$t1,$a1 | ||
830 | .L_bn_div_3_words_inner_loop: | ||
831 | bnez $t8,.L_bn_div_3_words_inner_loop_done | ||
832 | sgeu $at,$t2,$t0 | ||
833 | seq $t9,$t1,$a1 | ||
834 | and $at,$t9 | ||
835 | sltu $t3,$t0,$ta2 | ||
836 | $ADDU $a1,$a2 | ||
837 | $SUBU $t1,$t3 | ||
838 | $SUBU $t0,$ta2 | ||
839 | sltu $t8,$t1,$a1 | ||
840 | sltu $ta0,$a1,$a2 | ||
841 | or $t8,$ta0 | ||
842 | .set noreorder | ||
843 | beqzl $at,.L_bn_div_3_words_inner_loop | ||
844 | $SUBU $v0,1 | ||
845 | .set reorder | ||
846 | .L_bn_div_3_words_inner_loop_done: | ||
847 | .set noreorder | ||
848 | ___ | ||
849 | $code.=<<___ if ($flavour =~ /nubi/i); | ||
850 | $REG_L $t3,4*$SZREG($sp) | ||
851 | $REG_L $t2,3*$SZREG($sp) | ||
852 | $REG_L $t1,2*$SZREG($sp) | ||
853 | $REG_L $t0,1*$SZREG($sp) | ||
854 | $REG_L $gp,0*$SZREG($sp) | ||
855 | $PTR_ADD $sp,6*$SZREG | ||
856 | ___ | ||
857 | $code.=<<___; | ||
858 | jr $ra | ||
859 | move $a0,$v0 | ||
860 | .end bn_div_3_words_internal | ||
861 | |||
862 | .align 5 | ||
863 | .globl bn_div_words | ||
864 | .ent bn_div_words | ||
865 | bn_div_words: | ||
866 | .set noreorder | ||
867 | bnez $a2,bn_div_words_internal | ||
868 | li $v0,-1 # I would rather signal div-by-zero | ||
869 | # which can be done with 'break 7' | ||
870 | jr $ra | ||
871 | move $a0,$v0 | ||
872 | .end bn_div_words | ||
873 | |||
874 | .align 5 | ||
875 | .ent bn_div_words_internal | ||
876 | bn_div_words_internal: | ||
877 | ___ | ||
878 | $code.=<<___ if ($flavour =~ /nubi/i); | ||
879 | .frame $sp,6*$SZREG,$ra | ||
880 | .mask 0x8000f008,-$SZREG | ||
881 | .set noreorder | ||
882 | $PTR_SUB $sp,6*$SZREG | ||
883 | $REG_S $ra,5*$SZREG($sp) | ||
884 | $REG_S $t3,4*$SZREG($sp) | ||
885 | $REG_S $t2,3*$SZREG($sp) | ||
886 | $REG_S $t1,2*$SZREG($sp) | ||
887 | $REG_S $t0,1*$SZREG($sp) | ||
888 | $REG_S $gp,0*$SZREG($sp) | ||
889 | ___ | ||
890 | $code.=<<___; | ||
891 | move $v1,$zero | ||
892 | bltz $a2,.L_bn_div_words_body | ||
893 | move $t9,$v1 | ||
894 | $SLL $a2,1 | ||
895 | bgtz $a2,.-4 | ||
896 | addu $t9,1 | ||
897 | |||
898 | .set reorder | ||
899 | negu $t1,$t9 | ||
900 | li $t2,-1 | ||
901 | $SLL $t2,$t1 | ||
902 | and $t2,$a0 | ||
903 | $SRL $at,$a1,$t1 | ||
904 | .set noreorder | ||
905 | bnezl $t2,.+8 | ||
906 | break 6 # signal overflow | ||
907 | .set reorder | ||
908 | $SLL $a0,$t9 | ||
909 | $SLL $a1,$t9 | ||
910 | or $a0,$at | ||
911 | ___ | ||
912 | $QT=$ta0; | ||
913 | $HH=$ta1; | ||
914 | $DH=$v1; | ||
915 | $code.=<<___; | ||
916 | .L_bn_div_words_body: | ||
917 | $SRL $DH,$a2,4*$BNSZ # bits | ||
918 | sgeu $at,$a0,$a2 | ||
919 | .set noreorder | ||
920 | bnezl $at,.+8 | ||
921 | $SUBU $a0,$a2 | ||
922 | .set reorder | ||
923 | |||
924 | li $QT,-1 | ||
925 | $SRL $HH,$a0,4*$BNSZ # bits | ||
926 | $SRL $QT,4*$BNSZ # q=0xffffffff | ||
927 | beq $DH,$HH,.L_bn_div_words_skip_div1 | ||
928 | $DIVU $zero,$a0,$DH | ||
929 | mflo $QT | ||
930 | .L_bn_div_words_skip_div1: | ||
931 | $MULTU $a2,$QT | ||
932 | $SLL $t3,$a0,4*$BNSZ # bits | ||
933 | $SRL $at,$a1,4*$BNSZ # bits | ||
934 | or $t3,$at | ||
935 | mflo $t0 | ||
936 | mfhi $t1 | ||
937 | .L_bn_div_words_inner_loop1: | ||
938 | sltu $t2,$t3,$t0 | ||
939 | seq $t8,$HH,$t1 | ||
940 | sltu $at,$HH,$t1 | ||
941 | and $t2,$t8 | ||
942 | sltu $v0,$t0,$a2 | ||
943 | or $at,$t2 | ||
944 | .set noreorder | ||
945 | beqz $at,.L_bn_div_words_inner_loop1_done | ||
946 | $SUBU $t1,$v0 | ||
947 | $SUBU $t0,$a2 | ||
948 | b .L_bn_div_words_inner_loop1 | ||
949 | $SUBU $QT,1 | ||
950 | .set reorder | ||
951 | .L_bn_div_words_inner_loop1_done: | ||
952 | |||
953 | $SLL $a1,4*$BNSZ # bits | ||
954 | $SUBU $a0,$t3,$t0 | ||
955 | $SLL $v0,$QT,4*$BNSZ # bits | ||
956 | |||
957 | li $QT,-1 | ||
958 | $SRL $HH,$a0,4*$BNSZ # bits | ||
959 | $SRL $QT,4*$BNSZ # q=0xffffffff | ||
960 | beq $DH,$HH,.L_bn_div_words_skip_div2 | ||
961 | $DIVU $zero,$a0,$DH | ||
962 | mflo $QT | ||
963 | .L_bn_div_words_skip_div2: | ||
964 | $MULTU $a2,$QT | ||
965 | $SLL $t3,$a0,4*$BNSZ # bits | ||
966 | $SRL $at,$a1,4*$BNSZ # bits | ||
967 | or $t3,$at | ||
968 | mflo $t0 | ||
969 | mfhi $t1 | ||
970 | .L_bn_div_words_inner_loop2: | ||
971 | sltu $t2,$t3,$t0 | ||
972 | seq $t8,$HH,$t1 | ||
973 | sltu $at,$HH,$t1 | ||
974 | and $t2,$t8 | ||
975 | sltu $v1,$t0,$a2 | ||
976 | or $at,$t2 | ||
977 | .set noreorder | ||
978 | beqz $at,.L_bn_div_words_inner_loop2_done | ||
979 | $SUBU $t1,$v1 | ||
980 | $SUBU $t0,$a2 | ||
981 | b .L_bn_div_words_inner_loop2 | ||
982 | $SUBU $QT,1 | ||
983 | .set reorder | ||
984 | .L_bn_div_words_inner_loop2_done: | ||
985 | |||
986 | $SUBU $a0,$t3,$t0 | ||
987 | or $v0,$QT | ||
988 | $SRL $v1,$a0,$t9 # $v1 contains remainder if anybody wants it | ||
989 | $SRL $a2,$t9 # restore $a2 | ||
990 | |||
991 | .set noreorder | ||
992 | move $a1,$v1 | ||
993 | ___ | ||
994 | $code.=<<___ if ($flavour =~ /nubi/i); | ||
995 | $REG_L $t3,4*$SZREG($sp) | ||
996 | $REG_L $t2,3*$SZREG($sp) | ||
997 | $REG_L $t1,2*$SZREG($sp) | ||
998 | $REG_L $t0,1*$SZREG($sp) | ||
999 | $REG_L $gp,0*$SZREG($sp) | ||
1000 | $PTR_ADD $sp,6*$SZREG | ||
1001 | ___ | ||
1002 | $code.=<<___; | ||
1003 | jr $ra | ||
1004 | move $a0,$v0 | ||
1005 | .end bn_div_words_internal | ||
1006 | ___ | ||
1007 | undef $HH; undef $QT; undef $DH; | ||
1008 | |||
1009 | ($a_0,$a_1,$a_2,$a_3)=($t0,$t1,$t2,$t3); | ||
1010 | ($b_0,$b_1,$b_2,$b_3)=($ta0,$ta1,$ta2,$ta3); | ||
1011 | |||
1012 | ($a_4,$a_5,$a_6,$a_7)=($s0,$s2,$s4,$a1); # once we load a[7], no use for $a1 | ||
1013 | ($b_4,$b_5,$b_6,$b_7)=($s1,$s3,$s5,$a2); # once we load b[7], no use for $a2 | ||
1014 | |||
1015 | ($t_1,$t_2,$c_1,$c_2,$c_3)=($t8,$t9,$v0,$v1,$a3); | ||
1016 | |||
1017 | $code.=<<___; | ||
1018 | |||
1019 | .align 5 | ||
1020 | .globl bn_mul_comba8 | ||
1021 | .ent bn_mul_comba8 | ||
1022 | bn_mul_comba8: | ||
1023 | .set noreorder | ||
1024 | ___ | ||
1025 | $code.=<<___ if ($flavour =~ /nubi/i); | ||
1026 | .frame $sp,12*$SZREG,$ra | ||
1027 | .mask 0x803ff008,-$SZREG | ||
1028 | $PTR_SUB $sp,12*$SZREG | ||
1029 | $REG_S $ra,11*$SZREG($sp) | ||
1030 | $REG_S $s5,10*$SZREG($sp) | ||
1031 | $REG_S $s4,9*$SZREG($sp) | ||
1032 | $REG_S $s3,8*$SZREG($sp) | ||
1033 | $REG_S $s2,7*$SZREG($sp) | ||
1034 | $REG_S $s1,6*$SZREG($sp) | ||
1035 | $REG_S $s0,5*$SZREG($sp) | ||
1036 | $REG_S $t3,4*$SZREG($sp) | ||
1037 | $REG_S $t2,3*$SZREG($sp) | ||
1038 | $REG_S $t1,2*$SZREG($sp) | ||
1039 | $REG_S $t0,1*$SZREG($sp) | ||
1040 | $REG_S $gp,0*$SZREG($sp) | ||
1041 | ___ | ||
1042 | $code.=<<___ if ($flavour !~ /nubi/i); | ||
1043 | .frame $sp,6*$SZREG,$ra | ||
1044 | .mask 0x003f0000,-$SZREG | ||
1045 | $PTR_SUB $sp,6*$SZREG | ||
1046 | $REG_S $s5,5*$SZREG($sp) | ||
1047 | $REG_S $s4,4*$SZREG($sp) | ||
1048 | $REG_S $s3,3*$SZREG($sp) | ||
1049 | $REG_S $s2,2*$SZREG($sp) | ||
1050 | $REG_S $s1,1*$SZREG($sp) | ||
1051 | $REG_S $s0,0*$SZREG($sp) | ||
1052 | ___ | ||
1053 | $code.=<<___; | ||
1054 | |||
1055 | .set reorder | ||
1056 | $LD $a_0,0($a1) # If compiled with -mips3 option on | ||
1057 | # R5000 box assembler barks on this | ||
1058 | # 1ine with "should not have mult/div | ||
1059 | # as last instruction in bb (R10K | ||
1060 | # bug)" warning. If anybody out there | ||
1061 | # has a clue about how to circumvent | ||
1062 | # this do send me a note. | ||
1063 | # <appro\@fy.chalmers.se> | ||
1064 | |||
1065 | $LD $b_0,0($a2) | ||
1066 | $LD $a_1,$BNSZ($a1) | ||
1067 | $LD $a_2,2*$BNSZ($a1) | ||
1068 | $MULTU $a_0,$b_0 # mul_add_c(a[0],b[0],c1,c2,c3); | ||
1069 | $LD $a_3,3*$BNSZ($a1) | ||
1070 | $LD $b_1,$BNSZ($a2) | ||
1071 | $LD $b_2,2*$BNSZ($a2) | ||
1072 | $LD $b_3,3*$BNSZ($a2) | ||
1073 | mflo $c_1 | ||
1074 | mfhi $c_2 | ||
1075 | |||
1076 | $LD $a_4,4*$BNSZ($a1) | ||
1077 | $LD $a_5,5*$BNSZ($a1) | ||
1078 | $MULTU $a_0,$b_1 # mul_add_c(a[0],b[1],c2,c3,c1); | ||
1079 | $LD $a_6,6*$BNSZ($a1) | ||
1080 | $LD $a_7,7*$BNSZ($a1) | ||
1081 | $LD $b_4,4*$BNSZ($a2) | ||
1082 | $LD $b_5,5*$BNSZ($a2) | ||
1083 | mflo $t_1 | ||
1084 | mfhi $t_2 | ||
1085 | $ADDU $c_2,$t_1 | ||
1086 | sltu $at,$c_2,$t_1 | ||
1087 | $MULTU $a_1,$b_0 # mul_add_c(a[1],b[0],c2,c3,c1); | ||
1088 | $ADDU $c_3,$t_2,$at | ||
1089 | $LD $b_6,6*$BNSZ($a2) | ||
1090 | $LD $b_7,7*$BNSZ($a2) | ||
1091 | $ST $c_1,0($a0) # r[0]=c1; | ||
1092 | mflo $t_1 | ||
1093 | mfhi $t_2 | ||
1094 | $ADDU $c_2,$t_1 | ||
1095 | sltu $at,$c_2,$t_1 | ||
1096 | $MULTU $a_2,$b_0 # mul_add_c(a[2],b[0],c3,c1,c2); | ||
1097 | $ADDU $t_2,$at | ||
1098 | $ADDU $c_3,$t_2 | ||
1099 | sltu $c_1,$c_3,$t_2 | ||
1100 | $ST $c_2,$BNSZ($a0) # r[1]=c2; | ||
1101 | |||
1102 | mflo $t_1 | ||
1103 | mfhi $t_2 | ||
1104 | $ADDU $c_3,$t_1 | ||
1105 | sltu $at,$c_3,$t_1 | ||
1106 | $MULTU $a_1,$b_1 # mul_add_c(a[1],b[1],c3,c1,c2); | ||
1107 | $ADDU $t_2,$at | ||
1108 | $ADDU $c_1,$t_2 | ||
1109 | mflo $t_1 | ||
1110 | mfhi $t_2 | ||
1111 | $ADDU $c_3,$t_1 | ||
1112 | sltu $at,$c_3,$t_1 | ||
1113 | $MULTU $a_0,$b_2 # mul_add_c(a[0],b[2],c3,c1,c2); | ||
1114 | $ADDU $t_2,$at | ||
1115 | $ADDU $c_1,$t_2 | ||
1116 | sltu $c_2,$c_1,$t_2 | ||
1117 | mflo $t_1 | ||
1118 | mfhi $t_2 | ||
1119 | $ADDU $c_3,$t_1 | ||
1120 | sltu $at,$c_3,$t_1 | ||
1121 | $MULTU $a_0,$b_3 # mul_add_c(a[0],b[3],c1,c2,c3); | ||
1122 | $ADDU $t_2,$at | ||
1123 | $ADDU $c_1,$t_2 | ||
1124 | sltu $at,$c_1,$t_2 | ||
1125 | $ADDU $c_2,$at | ||
1126 | $ST $c_3,2*$BNSZ($a0) # r[2]=c3; | ||
1127 | |||
1128 | mflo $t_1 | ||
1129 | mfhi $t_2 | ||
1130 | $ADDU $c_1,$t_1 | ||
1131 | sltu $at,$c_1,$t_1 | ||
1132 | $MULTU $a_1,$b_2 # mul_add_c(a[1],b[2],c1,c2,c3); | ||
1133 | $ADDU $t_2,$at | ||
1134 | $ADDU $c_2,$t_2 | ||
1135 | sltu $c_3,$c_2,$t_2 | ||
1136 | mflo $t_1 | ||
1137 | mfhi $t_2 | ||
1138 | $ADDU $c_1,$t_1 | ||
1139 | sltu $at,$c_1,$t_1 | ||
1140 | $MULTU $a_2,$b_1 # mul_add_c(a[2],b[1],c1,c2,c3); | ||
1141 | $ADDU $t_2,$at | ||
1142 | $ADDU $c_2,$t_2 | ||
1143 | sltu $at,$c_2,$t_2 | ||
1144 | $ADDU $c_3,$at | ||
1145 | mflo $t_1 | ||
1146 | mfhi $t_2 | ||
1147 | $ADDU $c_1,$t_1 | ||
1148 | sltu $at,$c_1,$t_1 | ||
1149 | $MULTU $a_3,$b_0 # mul_add_c(a[3],b[0],c1,c2,c3); | ||
1150 | $ADDU $t_2,$at | ||
1151 | $ADDU $c_2,$t_2 | ||
1152 | sltu $at,$c_2,$t_2 | ||
1153 | $ADDU $c_3,$at | ||
1154 | mflo $t_1 | ||
1155 | mfhi $t_2 | ||
1156 | $ADDU $c_1,$t_1 | ||
1157 | sltu $at,$c_1,$t_1 | ||
1158 | $MULTU $a_4,$b_0 # mul_add_c(a[4],b[0],c2,c3,c1); | ||
1159 | $ADDU $t_2,$at | ||
1160 | $ADDU $c_2,$t_2 | ||
1161 | sltu $at,$c_2,$t_2 | ||
1162 | $ADDU $c_3,$at | ||
1163 | $ST $c_1,3*$BNSZ($a0) # r[3]=c1; | ||
1164 | |||
1165 | mflo $t_1 | ||
1166 | mfhi $t_2 | ||
1167 | $ADDU $c_2,$t_1 | ||
1168 | sltu $at,$c_2,$t_1 | ||
1169 | $MULTU $a_3,$b_1 # mul_add_c(a[3],b[1],c2,c3,c1); | ||
1170 | $ADDU $t_2,$at | ||
1171 | $ADDU $c_3,$t_2 | ||
1172 | sltu $c_1,$c_3,$t_2 | ||
1173 | mflo $t_1 | ||
1174 | mfhi $t_2 | ||
1175 | $ADDU $c_2,$t_1 | ||
1176 | sltu $at,$c_2,$t_1 | ||
1177 | $MULTU $a_2,$b_2 # mul_add_c(a[2],b[2],c2,c3,c1); | ||
1178 | $ADDU $t_2,$at | ||
1179 | $ADDU $c_3,$t_2 | ||
1180 | sltu $at,$c_3,$t_2 | ||
1181 | $ADDU $c_1,$at | ||
1182 | mflo $t_1 | ||
1183 | mfhi $t_2 | ||
1184 | $ADDU $c_2,$t_1 | ||
1185 | sltu $at,$c_2,$t_1 | ||
1186 | $MULTU $a_1,$b_3 # mul_add_c(a[1],b[3],c2,c3,c1); | ||
1187 | $ADDU $t_2,$at | ||
1188 | $ADDU $c_3,$t_2 | ||
1189 | sltu $at,$c_3,$t_2 | ||
1190 | $ADDU $c_1,$at | ||
1191 | mflo $t_1 | ||
1192 | mfhi $t_2 | ||
1193 | $ADDU $c_2,$t_1 | ||
1194 | sltu $at,$c_2,$t_1 | ||
1195 | $MULTU $a_0,$b_4 # mul_add_c(a[0],b[4],c2,c3,c1); | ||
1196 | $ADDU $t_2,$at | ||
1197 | $ADDU $c_3,$t_2 | ||
1198 | sltu $at,$c_3,$t_2 | ||
1199 | $ADDU $c_1,$at | ||
1200 | mflo $t_1 | ||
1201 | mfhi $t_2 | ||
1202 | $ADDU $c_2,$t_1 | ||
1203 | sltu $at,$c_2,$t_1 | ||
1204 | $MULTU $a_0,$b_5 # mul_add_c(a[0],b[5],c3,c1,c2); | ||
1205 | $ADDU $t_2,$at | ||
1206 | $ADDU $c_3,$t_2 | ||
1207 | sltu $at,$c_3,$t_2 | ||
1208 | $ADDU $c_1,$at | ||
1209 | $ST $c_2,4*$BNSZ($a0) # r[4]=c2; | ||
1210 | |||
1211 | mflo $t_1 | ||
1212 | mfhi $t_2 | ||
1213 | $ADDU $c_3,$t_1 | ||
1214 | sltu $at,$c_3,$t_1 | ||
1215 | $MULTU $a_1,$b_4 # mul_add_c(a[1],b[4],c3,c1,c2); | ||
1216 | $ADDU $t_2,$at | ||
1217 | $ADDU $c_1,$t_2 | ||
1218 | sltu $c_2,$c_1,$t_2 | ||
1219 | mflo $t_1 | ||
1220 | mfhi $t_2 | ||
1221 | $ADDU $c_3,$t_1 | ||
1222 | sltu $at,$c_3,$t_1 | ||
1223 | $MULTU $a_2,$b_3 # mul_add_c(a[2],b[3],c3,c1,c2); | ||
1224 | $ADDU $t_2,$at | ||
1225 | $ADDU $c_1,$t_2 | ||
1226 | sltu $at,$c_1,$t_2 | ||
1227 | $ADDU $c_2,$at | ||
1228 | mflo $t_1 | ||
1229 | mfhi $t_2 | ||
1230 | $ADDU $c_3,$t_1 | ||
1231 | sltu $at,$c_3,$t_1 | ||
1232 | $MULTU $a_3,$b_2 # mul_add_c(a[3],b[2],c3,c1,c2); | ||
1233 | $ADDU $t_2,$at | ||
1234 | $ADDU $c_1,$t_2 | ||
1235 | sltu $at,$c_1,$t_2 | ||
1236 | $ADDU $c_2,$at | ||
1237 | mflo $t_1 | ||
1238 | mfhi $t_2 | ||
1239 | $ADDU $c_3,$t_1 | ||
1240 | sltu $at,$c_3,$t_1 | ||
1241 | $MULTU $a_4,$b_1 # mul_add_c(a[4],b[1],c3,c1,c2); | ||
1242 | $ADDU $t_2,$at | ||
1243 | $ADDU $c_1,$t_2 | ||
1244 | sltu $at,$c_1,$t_2 | ||
1245 | $ADDU $c_2,$at | ||
1246 | mflo $t_1 | ||
1247 | mfhi $t_2 | ||
1248 | $ADDU $c_3,$t_1 | ||
1249 | sltu $at,$c_3,$t_1 | ||
1250 | $MULTU $a_5,$b_0 # mul_add_c(a[5],b[0],c3,c1,c2); | ||
1251 | $ADDU $t_2,$at | ||
1252 | $ADDU $c_1,$t_2 | ||
1253 | sltu $at,$c_1,$t_2 | ||
1254 | $ADDU $c_2,$at | ||
1255 | mflo $t_1 | ||
1256 | mfhi $t_2 | ||
1257 | $ADDU $c_3,$t_1 | ||
1258 | sltu $at,$c_3,$t_1 | ||
1259 | $MULTU $a_6,$b_0 # mul_add_c(a[6],b[0],c1,c2,c3); | ||
1260 | $ADDU $t_2,$at | ||
1261 | $ADDU $c_1,$t_2 | ||
1262 | sltu $at,$c_1,$t_2 | ||
1263 | $ADDU $c_2,$at | ||
1264 | $ST $c_3,5*$BNSZ($a0) # r[5]=c3; | ||
1265 | |||
1266 | mflo $t_1 | ||
1267 | mfhi $t_2 | ||
1268 | $ADDU $c_1,$t_1 | ||
1269 | sltu $at,$c_1,$t_1 | ||
1270 | $MULTU $a_5,$b_1 # mul_add_c(a[5],b[1],c1,c2,c3); | ||
1271 | $ADDU $t_2,$at | ||
1272 | $ADDU $c_2,$t_2 | ||
1273 | sltu $c_3,$c_2,$t_2 | ||
1274 | mflo $t_1 | ||
1275 | mfhi $t_2 | ||
1276 | $ADDU $c_1,$t_1 | ||
1277 | sltu $at,$c_1,$t_1 | ||
1278 | $MULTU $a_4,$b_2 # mul_add_c(a[4],b[2],c1,c2,c3); | ||
1279 | $ADDU $t_2,$at | ||
1280 | $ADDU $c_2,$t_2 | ||
1281 | sltu $at,$c_2,$t_2 | ||
1282 | $ADDU $c_3,$at | ||
1283 | mflo $t_1 | ||
1284 | mfhi $t_2 | ||
1285 | $ADDU $c_1,$t_1 | ||
1286 | sltu $at,$c_1,$t_1 | ||
1287 | $MULTU $a_3,$b_3 # mul_add_c(a[3],b[3],c1,c2,c3); | ||
1288 | $ADDU $t_2,$at | ||
1289 | $ADDU $c_2,$t_2 | ||
1290 | sltu $at,$c_2,$t_2 | ||
1291 | $ADDU $c_3,$at | ||
1292 | mflo $t_1 | ||
1293 | mfhi $t_2 | ||
1294 | $ADDU $c_1,$t_1 | ||
1295 | sltu $at,$c_1,$t_1 | ||
1296 | $MULTU $a_2,$b_4 # mul_add_c(a[2],b[4],c1,c2,c3); | ||
1297 | $ADDU $t_2,$at | ||
1298 | $ADDU $c_2,$t_2 | ||
1299 | sltu $at,$c_2,$t_2 | ||
1300 | $ADDU $c_3,$at | ||
1301 | mflo $t_1 | ||
1302 | mfhi $t_2 | ||
1303 | $ADDU $c_1,$t_1 | ||
1304 | sltu $at,$c_1,$t_1 | ||
1305 | $MULTU $a_1,$b_5 # mul_add_c(a[1],b[5],c1,c2,c3); | ||
1306 | $ADDU $t_2,$at | ||
1307 | $ADDU $c_2,$t_2 | ||
1308 | sltu $at,$c_2,$t_2 | ||
1309 | $ADDU $c_3,$at | ||
1310 | mflo $t_1 | ||
1311 | mfhi $t_2 | ||
1312 | $ADDU $c_1,$t_1 | ||
1313 | sltu $at,$c_1,$t_1 | ||
1314 | $MULTU $a_0,$b_6 # mul_add_c(a[0],b[6],c1,c2,c3); | ||
1315 | $ADDU $t_2,$at | ||
1316 | $ADDU $c_2,$t_2 | ||
1317 | sltu $at,$c_2,$t_2 | ||
1318 | $ADDU $c_3,$at | ||
1319 | mflo $t_1 | ||
1320 | mfhi $t_2 | ||
1321 | $ADDU $c_1,$t_1 | ||
1322 | sltu $at,$c_1,$t_1 | ||
1323 | $MULTU $a_0,$b_7 # mul_add_c(a[0],b[7],c2,c3,c1); | ||
1324 | $ADDU $t_2,$at | ||
1325 | $ADDU $c_2,$t_2 | ||
1326 | sltu $at,$c_2,$t_2 | ||
1327 | $ADDU $c_3,$at | ||
1328 | $ST $c_1,6*$BNSZ($a0) # r[6]=c1; | ||
1329 | |||
1330 | mflo $t_1 | ||
1331 | mfhi $t_2 | ||
1332 | $ADDU $c_2,$t_1 | ||
1333 | sltu $at,$c_2,$t_1 | ||
1334 | $MULTU $a_1,$b_6 # mul_add_c(a[1],b[6],c2,c3,c1); | ||
1335 | $ADDU $t_2,$at | ||
1336 | $ADDU $c_3,$t_2 | ||
1337 | sltu $c_1,$c_3,$t_2 | ||
1338 | mflo $t_1 | ||
1339 | mfhi $t_2 | ||
1340 | $ADDU $c_2,$t_1 | ||
1341 | sltu $at,$c_2,$t_1 | ||
1342 | $MULTU $a_2,$b_5 # mul_add_c(a[2],b[5],c2,c3,c1); | ||
1343 | $ADDU $t_2,$at | ||
1344 | $ADDU $c_3,$t_2 | ||
1345 | sltu $at,$c_3,$t_2 | ||
1346 | $ADDU $c_1,$at | ||
1347 | mflo $t_1 | ||
1348 | mfhi $t_2 | ||
1349 | $ADDU $c_2,$t_1 | ||
1350 | sltu $at,$c_2,$t_1 | ||
1351 | $MULTU $a_3,$b_4 # mul_add_c(a[3],b[4],c2,c3,c1); | ||
1352 | $ADDU $t_2,$at | ||
1353 | $ADDU $c_3,$t_2 | ||
1354 | sltu $at,$c_3,$t_2 | ||
1355 | $ADDU $c_1,$at | ||
1356 | mflo $t_1 | ||
1357 | mfhi $t_2 | ||
1358 | $ADDU $c_2,$t_1 | ||
1359 | sltu $at,$c_2,$t_1 | ||
1360 | $MULTU $a_4,$b_3 # mul_add_c(a[4],b[3],c2,c3,c1); | ||
1361 | $ADDU $t_2,$at | ||
1362 | $ADDU $c_3,$t_2 | ||
1363 | sltu $at,$c_3,$t_2 | ||
1364 | $ADDU $c_1,$at | ||
1365 | mflo $t_1 | ||
1366 | mfhi $t_2 | ||
1367 | $ADDU $c_2,$t_1 | ||
1368 | sltu $at,$c_2,$t_1 | ||
1369 | $MULTU $a_5,$b_2 # mul_add_c(a[5],b[2],c2,c3,c1); | ||
1370 | $ADDU $t_2,$at | ||
1371 | $ADDU $c_3,$t_2 | ||
1372 | sltu $at,$c_3,$t_2 | ||
1373 | $ADDU $c_1,$at | ||
1374 | mflo $t_1 | ||
1375 | mfhi $t_2 | ||
1376 | $ADDU $c_2,$t_1 | ||
1377 | sltu $at,$c_2,$t_1 | ||
1378 | $MULTU $a_6,$b_1 # mul_add_c(a[6],b[1],c2,c3,c1); | ||
1379 | $ADDU $t_2,$at | ||
1380 | $ADDU $c_3,$t_2 | ||
1381 | sltu $at,$c_3,$t_2 | ||
1382 | $ADDU $c_1,$at | ||
1383 | mflo $t_1 | ||
1384 | mfhi $t_2 | ||
1385 | $ADDU $c_2,$t_1 | ||
1386 | sltu $at,$c_2,$t_1 | ||
1387 | $MULTU $a_7,$b_0 # mul_add_c(a[7],b[0],c2,c3,c1); | ||
1388 | $ADDU $t_2,$at | ||
1389 | $ADDU $c_3,$t_2 | ||
1390 | sltu $at,$c_3,$t_2 | ||
1391 | $ADDU $c_1,$at | ||
1392 | mflo $t_1 | ||
1393 | mfhi $t_2 | ||
1394 | $ADDU $c_2,$t_1 | ||
1395 | sltu $at,$c_2,$t_1 | ||
1396 | $MULTU $a_7,$b_1 # mul_add_c(a[7],b[1],c3,c1,c2); | ||
1397 | $ADDU $t_2,$at | ||
1398 | $ADDU $c_3,$t_2 | ||
1399 | sltu $at,$c_3,$t_2 | ||
1400 | $ADDU $c_1,$at | ||
1401 | $ST $c_2,7*$BNSZ($a0) # r[7]=c2; | ||
1402 | |||
1403 | mflo $t_1 | ||
1404 | mfhi $t_2 | ||
1405 | $ADDU $c_3,$t_1 | ||
1406 | sltu $at,$c_3,$t_1 | ||
1407 | $MULTU $a_6,$b_2 # mul_add_c(a[6],b[2],c3,c1,c2); | ||
1408 | $ADDU $t_2,$at | ||
1409 | $ADDU $c_1,$t_2 | ||
1410 | sltu $c_2,$c_1,$t_2 | ||
1411 | mflo $t_1 | ||
1412 | mfhi $t_2 | ||
1413 | $ADDU $c_3,$t_1 | ||
1414 | sltu $at,$c_3,$t_1 | ||
1415 | $MULTU $a_5,$b_3 # mul_add_c(a[5],b[3],c3,c1,c2); | ||
1416 | $ADDU $t_2,$at | ||
1417 | $ADDU $c_1,$t_2 | ||
1418 | sltu $at,$c_1,$t_2 | ||
1419 | $ADDU $c_2,$at | ||
1420 | mflo $t_1 | ||
1421 | mfhi $t_2 | ||
1422 | $ADDU $c_3,$t_1 | ||
1423 | sltu $at,$c_3,$t_1 | ||
1424 | $MULTU $a_4,$b_4 # mul_add_c(a[4],b[4],c3,c1,c2); | ||
1425 | $ADDU $t_2,$at | ||
1426 | $ADDU $c_1,$t_2 | ||
1427 | sltu $at,$c_1,$t_2 | ||
1428 | $ADDU $c_2,$at | ||
1429 | mflo $t_1 | ||
1430 | mfhi $t_2 | ||
1431 | $ADDU $c_3,$t_1 | ||
1432 | sltu $at,$c_3,$t_1 | ||
1433 | $MULTU $a_3,$b_5 # mul_add_c(a[3],b[5],c3,c1,c2); | ||
1434 | $ADDU $t_2,$at | ||
1435 | $ADDU $c_1,$t_2 | ||
1436 | sltu $at,$c_1,$t_2 | ||
1437 | $ADDU $c_2,$at | ||
1438 | mflo $t_1 | ||
1439 | mfhi $t_2 | ||
1440 | $ADDU $c_3,$t_1 | ||
1441 | sltu $at,$c_3,$t_1 | ||
1442 | $MULTU $a_2,$b_6 # mul_add_c(a[2],b[6],c3,c1,c2); | ||
1443 | $ADDU $t_2,$at | ||
1444 | $ADDU $c_1,$t_2 | ||
1445 | sltu $at,$c_1,$t_2 | ||
1446 | $ADDU $c_2,$at | ||
1447 | mflo $t_1 | ||
1448 | mfhi $t_2 | ||
1449 | $ADDU $c_3,$t_1 | ||
1450 | sltu $at,$c_3,$t_1 | ||
1451 | $MULTU $a_1,$b_7 # mul_add_c(a[1],b[7],c3,c1,c2); | ||
1452 | $ADDU $t_2,$at | ||
1453 | $ADDU $c_1,$t_2 | ||
1454 | sltu $at,$c_1,$t_2 | ||
1455 | $ADDU $c_2,$at | ||
1456 | mflo $t_1 | ||
1457 | mfhi $t_2 | ||
1458 | $ADDU $c_3,$t_1 | ||
1459 | sltu $at,$c_3,$t_1 | ||
1460 | $MULTU $a_2,$b_7 # mul_add_c(a[2],b[7],c1,c2,c3); | ||
1461 | $ADDU $t_2,$at | ||
1462 | $ADDU $c_1,$t_2 | ||
1463 | sltu $at,$c_1,$t_2 | ||
1464 | $ADDU $c_2,$at | ||
1465 | $ST $c_3,8*$BNSZ($a0) # r[8]=c3; | ||
1466 | |||
1467 | mflo $t_1 | ||
1468 | mfhi $t_2 | ||
1469 | $ADDU $c_1,$t_1 | ||
1470 | sltu $at,$c_1,$t_1 | ||
1471 | $MULTU $a_3,$b_6 # mul_add_c(a[3],b[6],c1,c2,c3); | ||
1472 | $ADDU $t_2,$at | ||
1473 | $ADDU $c_2,$t_2 | ||
1474 | sltu $c_3,$c_2,$t_2 | ||
1475 | mflo $t_1 | ||
1476 | mfhi $t_2 | ||
1477 | $ADDU $c_1,$t_1 | ||
1478 | sltu $at,$c_1,$t_1 | ||
1479 | $MULTU $a_4,$b_5 # mul_add_c(a[4],b[5],c1,c2,c3); | ||
1480 | $ADDU $t_2,$at | ||
1481 | $ADDU $c_2,$t_2 | ||
1482 | sltu $at,$c_2,$t_2 | ||
1483 | $ADDU $c_3,$at | ||
1484 | mflo $t_1 | ||
1485 | mfhi $t_2 | ||
1486 | $ADDU $c_1,$t_1 | ||
1487 | sltu $at,$c_1,$t_1 | ||
1488 | $MULTU $a_5,$b_4 # mul_add_c(a[5],b[4],c1,c2,c3); | ||
1489 | $ADDU $t_2,$at | ||
1490 | $ADDU $c_2,$t_2 | ||
1491 | sltu $at,$c_2,$t_2 | ||
1492 | $ADDU $c_3,$at | ||
1493 | mflo $t_1 | ||
1494 | mfhi $t_2 | ||
1495 | $ADDU $c_1,$t_1 | ||
1496 | sltu $at,$c_1,$t_1 | ||
1497 | $MULTU $a_6,$b_3 # mul_add_c(a[6],b[3],c1,c2,c3); | ||
1498 | $ADDU $t_2,$at | ||
1499 | $ADDU $c_2,$t_2 | ||
1500 | sltu $at,$c_2,$t_2 | ||
1501 | $ADDU $c_3,$at | ||
1502 | mflo $t_1 | ||
1503 | mfhi $t_2 | ||
1504 | $ADDU $c_1,$t_1 | ||
1505 | sltu $at,$c_1,$t_1 | ||
1506 | $MULTU $a_7,$b_2 # mul_add_c(a[7],b[2],c1,c2,c3); | ||
1507 | $ADDU $t_2,$at | ||
1508 | $ADDU $c_2,$t_2 | ||
1509 | sltu $at,$c_2,$t_2 | ||
1510 | $ADDU $c_3,$at | ||
1511 | mflo $t_1 | ||
1512 | mfhi $t_2 | ||
1513 | $ADDU $c_1,$t_1 | ||
1514 | sltu $at,$c_1,$t_1 | ||
1515 | $MULTU $a_7,$b_3 # mul_add_c(a[7],b[3],c2,c3,c1); | ||
1516 | $ADDU $t_2,$at | ||
1517 | $ADDU $c_2,$t_2 | ||
1518 | sltu $at,$c_2,$t_2 | ||
1519 | $ADDU $c_3,$at | ||
1520 | $ST $c_1,9*$BNSZ($a0) # r[9]=c1; | ||
1521 | |||
1522 | mflo $t_1 | ||
1523 | mfhi $t_2 | ||
1524 | $ADDU $c_2,$t_1 | ||
1525 | sltu $at,$c_2,$t_1 | ||
1526 | $MULTU $a_6,$b_4 # mul_add_c(a[6],b[4],c2,c3,c1); | ||
1527 | $ADDU $t_2,$at | ||
1528 | $ADDU $c_3,$t_2 | ||
1529 | sltu $c_1,$c_3,$t_2 | ||
1530 | mflo $t_1 | ||
1531 | mfhi $t_2 | ||
1532 | $ADDU $c_2,$t_1 | ||
1533 | sltu $at,$c_2,$t_1 | ||
1534 | $MULTU $a_5,$b_5 # mul_add_c(a[5],b[5],c2,c3,c1); | ||
1535 | $ADDU $t_2,$at | ||
1536 | $ADDU $c_3,$t_2 | ||
1537 | sltu $at,$c_3,$t_2 | ||
1538 | $ADDU $c_1,$at | ||
1539 | mflo $t_1 | ||
1540 | mfhi $t_2 | ||
1541 | $ADDU $c_2,$t_1 | ||
1542 | sltu $at,$c_2,$t_1 | ||
1543 | $MULTU $a_4,$b_6 # mul_add_c(a[4],b[6],c2,c3,c1); | ||
1544 | $ADDU $t_2,$at | ||
1545 | $ADDU $c_3,$t_2 | ||
1546 | sltu $at,$c_3,$t_2 | ||
1547 | $ADDU $c_1,$at | ||
1548 | mflo $t_1 | ||
1549 | mfhi $t_2 | ||
1550 | $ADDU $c_2,$t_1 | ||
1551 | sltu $at,$c_2,$t_1 | ||
1552 | $MULTU $a_3,$b_7 # mul_add_c(a[3],b[7],c2,c3,c1); | ||
1553 | $ADDU $t_2,$at | ||
1554 | $ADDU $c_3,$t_2 | ||
1555 | sltu $at,$c_3,$t_2 | ||
1556 | $ADDU $c_1,$at | ||
1557 | mflo $t_1 | ||
1558 | mfhi $t_2 | ||
1559 | $ADDU $c_2,$t_1 | ||
1560 | sltu $at,$c_2,$t_1 | ||
1561 | $MULTU $a_4,$b_7 # mul_add_c(a[4],b[7],c3,c1,c2); | ||
1562 | $ADDU $t_2,$at | ||
1563 | $ADDU $c_3,$t_2 | ||
1564 | sltu $at,$c_3,$t_2 | ||
1565 | $ADDU $c_1,$at | ||
1566 | $ST $c_2,10*$BNSZ($a0) # r[10]=c2; | ||
1567 | |||
1568 | mflo $t_1 | ||
1569 | mfhi $t_2 | ||
1570 | $ADDU $c_3,$t_1 | ||
1571 | sltu $at,$c_3,$t_1 | ||
1572 | $MULTU $a_5,$b_6 # mul_add_c(a[5],b[6],c3,c1,c2); | ||
1573 | $ADDU $t_2,$at | ||
1574 | $ADDU $c_1,$t_2 | ||
1575 | sltu $c_2,$c_1,$t_2 | ||
1576 | mflo $t_1 | ||
1577 | mfhi $t_2 | ||
1578 | $ADDU $c_3,$t_1 | ||
1579 | sltu $at,$c_3,$t_1 | ||
1580 | $MULTU $a_6,$b_5 # mul_add_c(a[6],b[5],c3,c1,c2); | ||
1581 | $ADDU $t_2,$at | ||
1582 | $ADDU $c_1,$t_2 | ||
1583 | sltu $at,$c_1,$t_2 | ||
1584 | $ADDU $c_2,$at | ||
1585 | mflo $t_1 | ||
1586 | mfhi $t_2 | ||
1587 | $ADDU $c_3,$t_1 | ||
1588 | sltu $at,$c_3,$t_1 | ||
1589 | $MULTU $a_7,$b_4 # mul_add_c(a[7],b[4],c3,c1,c2); | ||
1590 | $ADDU $t_2,$at | ||
1591 | $ADDU $c_1,$t_2 | ||
1592 | sltu $at,$c_1,$t_2 | ||
1593 | $ADDU $c_2,$at | ||
1594 | mflo $t_1 | ||
1595 | mfhi $t_2 | ||
1596 | $ADDU $c_3,$t_1 | ||
1597 | sltu $at,$c_3,$t_1 | ||
1598 | $MULTU $a_7,$b_5 # mul_add_c(a[7],b[5],c1,c2,c3); | ||
1599 | $ADDU $t_2,$at | ||
1600 | $ADDU $c_1,$t_2 | ||
1601 | sltu $at,$c_1,$t_2 | ||
1602 | $ADDU $c_2,$at | ||
1603 | $ST $c_3,11*$BNSZ($a0) # r[11]=c3; | ||
1604 | |||
1605 | mflo $t_1 | ||
1606 | mfhi $t_2 | ||
1607 | $ADDU $c_1,$t_1 | ||
1608 | sltu $at,$c_1,$t_1 | ||
1609 | $MULTU $a_6,$b_6 # mul_add_c(a[6],b[6],c1,c2,c3); | ||
1610 | $ADDU $t_2,$at | ||
1611 | $ADDU $c_2,$t_2 | ||
1612 | sltu $c_3,$c_2,$t_2 | ||
1613 | mflo $t_1 | ||
1614 | mfhi $t_2 | ||
1615 | $ADDU $c_1,$t_1 | ||
1616 | sltu $at,$c_1,$t_1 | ||
1617 | $MULTU $a_5,$b_7 # mul_add_c(a[5],b[7],c1,c2,c3); | ||
1618 | $ADDU $t_2,$at | ||
1619 | $ADDU $c_2,$t_2 | ||
1620 | sltu $at,$c_2,$t_2 | ||
1621 | $ADDU $c_3,$at | ||
1622 | mflo $t_1 | ||
1623 | mfhi $t_2 | ||
1624 | $ADDU $c_1,$t_1 | ||
1625 | sltu $at,$c_1,$t_1 | ||
1626 | $MULTU $a_6,$b_7 # mul_add_c(a[6],b[7],c2,c3,c1); | ||
1627 | $ADDU $t_2,$at | ||
1628 | $ADDU $c_2,$t_2 | ||
1629 | sltu $at,$c_2,$t_2 | ||
1630 | $ADDU $c_3,$at | ||
1631 | $ST $c_1,12*$BNSZ($a0) # r[12]=c1; | ||
1632 | |||
1633 | mflo $t_1 | ||
1634 | mfhi $t_2 | ||
1635 | $ADDU $c_2,$t_1 | ||
1636 | sltu $at,$c_2,$t_1 | ||
1637 | $MULTU $a_7,$b_6 # mul_add_c(a[7],b[6],c2,c3,c1); | ||
1638 | $ADDU $t_2,$at | ||
1639 | $ADDU $c_3,$t_2 | ||
1640 | sltu $c_1,$c_3,$t_2 | ||
1641 | mflo $t_1 | ||
1642 | mfhi $t_2 | ||
1643 | $ADDU $c_2,$t_1 | ||
1644 | sltu $at,$c_2,$t_1 | ||
1645 | $MULTU $a_7,$b_7 # mul_add_c(a[7],b[7],c3,c1,c2); | ||
1646 | $ADDU $t_2,$at | ||
1647 | $ADDU $c_3,$t_2 | ||
1648 | sltu $at,$c_3,$t_2 | ||
1649 | $ADDU $c_1,$at | ||
1650 | $ST $c_2,13*$BNSZ($a0) # r[13]=c2; | ||
1651 | |||
1652 | mflo $t_1 | ||
1653 | mfhi $t_2 | ||
1654 | $ADDU $c_3,$t_1 | ||
1655 | sltu $at,$c_3,$t_1 | ||
1656 | $ADDU $t_2,$at | ||
1657 | $ADDU $c_1,$t_2 | ||
1658 | $ST $c_3,14*$BNSZ($a0) # r[14]=c3; | ||
1659 | $ST $c_1,15*$BNSZ($a0) # r[15]=c1; | ||
1660 | |||
1661 | .set noreorder | ||
1662 | ___ | ||
1663 | $code.=<<___ if ($flavour =~ /nubi/i); | ||
1664 | $REG_L $s5,10*$SZREG($sp) | ||
1665 | $REG_L $s4,9*$SZREG($sp) | ||
1666 | $REG_L $s3,8*$SZREG($sp) | ||
1667 | $REG_L $s2,7*$SZREG($sp) | ||
1668 | $REG_L $s1,6*$SZREG($sp) | ||
1669 | $REG_L $s0,5*$SZREG($sp) | ||
1670 | $REG_L $t3,4*$SZREG($sp) | ||
1671 | $REG_L $t2,3*$SZREG($sp) | ||
1672 | $REG_L $t1,2*$SZREG($sp) | ||
1673 | $REG_L $t0,1*$SZREG($sp) | ||
1674 | $REG_L $gp,0*$SZREG($sp) | ||
1675 | jr $ra | ||
1676 | $PTR_ADD $sp,12*$SZREG | ||
1677 | ___ | ||
1678 | $code.=<<___ if ($flavour !~ /nubi/i); | ||
1679 | $REG_L $s5,5*$SZREG($sp) | ||
1680 | $REG_L $s4,4*$SZREG($sp) | ||
1681 | $REG_L $s3,3*$SZREG($sp) | ||
1682 | $REG_L $s2,2*$SZREG($sp) | ||
1683 | $REG_L $s1,1*$SZREG($sp) | ||
1684 | $REG_L $s0,0*$SZREG($sp) | ||
1685 | jr $ra | ||
1686 | $PTR_ADD $sp,6*$SZREG | ||
1687 | ___ | ||
1688 | $code.=<<___; | ||
1689 | .end bn_mul_comba8 | ||
1690 | |||
1691 | .align 5 | ||
1692 | .globl bn_mul_comba4 | ||
1693 | .ent bn_mul_comba4 | ||
1694 | bn_mul_comba4: | ||
1695 | ___ | ||
1696 | $code.=<<___ if ($flavour =~ /nubi/i); | ||
1697 | .frame $sp,6*$SZREG,$ra | ||
1698 | .mask 0x8000f008,-$SZREG | ||
1699 | .set noreorder | ||
1700 | $PTR_SUB $sp,6*$SZREG | ||
1701 | $REG_S $ra,5*$SZREG($sp) | ||
1702 | $REG_S $t3,4*$SZREG($sp) | ||
1703 | $REG_S $t2,3*$SZREG($sp) | ||
1704 | $REG_S $t1,2*$SZREG($sp) | ||
1705 | $REG_S $t0,1*$SZREG($sp) | ||
1706 | $REG_S $gp,0*$SZREG($sp) | ||
1707 | ___ | ||
1708 | $code.=<<___; | ||
1709 | .set reorder | ||
1710 | $LD $a_0,0($a1) | ||
1711 | $LD $b_0,0($a2) | ||
1712 | $LD $a_1,$BNSZ($a1) | ||
1713 | $LD $a_2,2*$BNSZ($a1) | ||
1714 | $MULTU $a_0,$b_0 # mul_add_c(a[0],b[0],c1,c2,c3); | ||
1715 | $LD $a_3,3*$BNSZ($a1) | ||
1716 | $LD $b_1,$BNSZ($a2) | ||
1717 | $LD $b_2,2*$BNSZ($a2) | ||
1718 | $LD $b_3,3*$BNSZ($a2) | ||
1719 | mflo $c_1 | ||
1720 | mfhi $c_2 | ||
1721 | $ST $c_1,0($a0) | ||
1722 | |||
1723 | $MULTU $a_0,$b_1 # mul_add_c(a[0],b[1],c2,c3,c1); | ||
1724 | mflo $t_1 | ||
1725 | mfhi $t_2 | ||
1726 | $ADDU $c_2,$t_1 | ||
1727 | sltu $at,$c_2,$t_1 | ||
1728 | $MULTU $a_1,$b_0 # mul_add_c(a[1],b[0],c2,c3,c1); | ||
1729 | $ADDU $c_3,$t_2,$at | ||
1730 | mflo $t_1 | ||
1731 | mfhi $t_2 | ||
1732 | $ADDU $c_2,$t_1 | ||
1733 | sltu $at,$c_2,$t_1 | ||
1734 | $MULTU $a_2,$b_0 # mul_add_c(a[2],b[0],c3,c1,c2); | ||
1735 | $ADDU $t_2,$at | ||
1736 | $ADDU $c_3,$t_2 | ||
1737 | sltu $c_1,$c_3,$t_2 | ||
1738 | $ST $c_2,$BNSZ($a0) | ||
1739 | |||
1740 | mflo $t_1 | ||
1741 | mfhi $t_2 | ||
1742 | $ADDU $c_3,$t_1 | ||
1743 | sltu $at,$c_3,$t_1 | ||
1744 | $MULTU $a_1,$b_1 # mul_add_c(a[1],b[1],c3,c1,c2); | ||
1745 | $ADDU $t_2,$at | ||
1746 | $ADDU $c_1,$t_2 | ||
1747 | mflo $t_1 | ||
1748 | mfhi $t_2 | ||
1749 | $ADDU $c_3,$t_1 | ||
1750 | sltu $at,$c_3,$t_1 | ||
1751 | $MULTU $a_0,$b_2 # mul_add_c(a[0],b[2],c3,c1,c2); | ||
1752 | $ADDU $t_2,$at | ||
1753 | $ADDU $c_1,$t_2 | ||
1754 | sltu $c_2,$c_1,$t_2 | ||
1755 | mflo $t_1 | ||
1756 | mfhi $t_2 | ||
1757 | $ADDU $c_3,$t_1 | ||
1758 | sltu $at,$c_3,$t_1 | ||
1759 | $MULTU $a_0,$b_3 # mul_add_c(a[0],b[3],c1,c2,c3); | ||
1760 | $ADDU $t_2,$at | ||
1761 | $ADDU $c_1,$t_2 | ||
1762 | sltu $at,$c_1,$t_2 | ||
1763 | $ADDU $c_2,$at | ||
1764 | $ST $c_3,2*$BNSZ($a0) | ||
1765 | |||
1766 | mflo $t_1 | ||
1767 | mfhi $t_2 | ||
1768 | $ADDU $c_1,$t_1 | ||
1769 | sltu $at,$c_1,$t_1 | ||
1770 | $MULTU $a_1,$b_2 # mul_add_c(a[1],b[2],c1,c2,c3); | ||
1771 | $ADDU $t_2,$at | ||
1772 | $ADDU $c_2,$t_2 | ||
1773 | sltu $c_3,$c_2,$t_2 | ||
1774 | mflo $t_1 | ||
1775 | mfhi $t_2 | ||
1776 | $ADDU $c_1,$t_1 | ||
1777 | sltu $at,$c_1,$t_1 | ||
1778 | $MULTU $a_2,$b_1 # mul_add_c(a[2],b[1],c1,c2,c3); | ||
1779 | $ADDU $t_2,$at | ||
1780 | $ADDU $c_2,$t_2 | ||
1781 | sltu $at,$c_2,$t_2 | ||
1782 | $ADDU $c_3,$at | ||
1783 | mflo $t_1 | ||
1784 | mfhi $t_2 | ||
1785 | $ADDU $c_1,$t_1 | ||
1786 | sltu $at,$c_1,$t_1 | ||
1787 | $MULTU $a_3,$b_0 # mul_add_c(a[3],b[0],c1,c2,c3); | ||
1788 | $ADDU $t_2,$at | ||
1789 | $ADDU $c_2,$t_2 | ||
1790 | sltu $at,$c_2,$t_2 | ||
1791 | $ADDU $c_3,$at | ||
1792 | mflo $t_1 | ||
1793 | mfhi $t_2 | ||
1794 | $ADDU $c_1,$t_1 | ||
1795 | sltu $at,$c_1,$t_1 | ||
1796 | $MULTU $a_3,$b_1 # mul_add_c(a[3],b[1],c2,c3,c1); | ||
1797 | $ADDU $t_2,$at | ||
1798 | $ADDU $c_2,$t_2 | ||
1799 | sltu $at,$c_2,$t_2 | ||
1800 | $ADDU $c_3,$at | ||
1801 | $ST $c_1,3*$BNSZ($a0) | ||
1802 | |||
1803 | mflo $t_1 | ||
1804 | mfhi $t_2 | ||
1805 | $ADDU $c_2,$t_1 | ||
1806 | sltu $at,$c_2,$t_1 | ||
1807 | $MULTU $a_2,$b_2 # mul_add_c(a[2],b[2],c2,c3,c1); | ||
1808 | $ADDU $t_2,$at | ||
1809 | $ADDU $c_3,$t_2 | ||
1810 | sltu $c_1,$c_3,$t_2 | ||
1811 | mflo $t_1 | ||
1812 | mfhi $t_2 | ||
1813 | $ADDU $c_2,$t_1 | ||
1814 | sltu $at,$c_2,$t_1 | ||
1815 | $MULTU $a_1,$b_3 # mul_add_c(a[1],b[3],c2,c3,c1); | ||
1816 | $ADDU $t_2,$at | ||
1817 | $ADDU $c_3,$t_2 | ||
1818 | sltu $at,$c_3,$t_2 | ||
1819 | $ADDU $c_1,$at | ||
1820 | mflo $t_1 | ||
1821 | mfhi $t_2 | ||
1822 | $ADDU $c_2,$t_1 | ||
1823 | sltu $at,$c_2,$t_1 | ||
1824 | $MULTU $a_2,$b_3 # mul_add_c(a[2],b[3],c3,c1,c2); | ||
1825 | $ADDU $t_2,$at | ||
1826 | $ADDU $c_3,$t_2 | ||
1827 | sltu $at,$c_3,$t_2 | ||
1828 | $ADDU $c_1,$at | ||
1829 | $ST $c_2,4*$BNSZ($a0) | ||
1830 | |||
1831 | mflo $t_1 | ||
1832 | mfhi $t_2 | ||
1833 | $ADDU $c_3,$t_1 | ||
1834 | sltu $at,$c_3,$t_1 | ||
1835 | $MULTU $a_3,$b_2 # mul_add_c(a[3],b[2],c3,c1,c2); | ||
1836 | $ADDU $t_2,$at | ||
1837 | $ADDU $c_1,$t_2 | ||
1838 | sltu $c_2,$c_1,$t_2 | ||
1839 | mflo $t_1 | ||
1840 | mfhi $t_2 | ||
1841 | $ADDU $c_3,$t_1 | ||
1842 | sltu $at,$c_3,$t_1 | ||
1843 | $MULTU $a_3,$b_3 # mul_add_c(a[3],b[3],c1,c2,c3); | ||
1844 | $ADDU $t_2,$at | ||
1845 | $ADDU $c_1,$t_2 | ||
1846 | sltu $at,$c_1,$t_2 | ||
1847 | $ADDU $c_2,$at | ||
1848 | $ST $c_3,5*$BNSZ($a0) | ||
1849 | |||
1850 | mflo $t_1 | ||
1851 | mfhi $t_2 | ||
1852 | $ADDU $c_1,$t_1 | ||
1853 | sltu $at,$c_1,$t_1 | ||
1854 | $ADDU $t_2,$at | ||
1855 | $ADDU $c_2,$t_2 | ||
1856 | $ST $c_1,6*$BNSZ($a0) | ||
1857 | $ST $c_2,7*$BNSZ($a0) | ||
1858 | |||
1859 | .set noreorder | ||
1860 | ___ | ||
1861 | $code.=<<___ if ($flavour =~ /nubi/i); | ||
1862 | $REG_L $t3,4*$SZREG($sp) | ||
1863 | $REG_L $t2,3*$SZREG($sp) | ||
1864 | $REG_L $t1,2*$SZREG($sp) | ||
1865 | $REG_L $t0,1*$SZREG($sp) | ||
1866 | $REG_L $gp,0*$SZREG($sp) | ||
1867 | $PTR_ADD $sp,6*$SZREG | ||
1868 | ___ | ||
1869 | $code.=<<___; | ||
1870 | jr $ra | ||
1871 | nop | ||
1872 | .end bn_mul_comba4 | ||
1873 | ___ | ||
1874 | |||
1875 | ($a_4,$a_5,$a_6,$a_7)=($b_0,$b_1,$b_2,$b_3); | ||
1876 | |||
1877 | $code.=<<___; | ||
1878 | |||
1879 | .align 5 | ||
1880 | .globl bn_sqr_comba8 | ||
1881 | .ent bn_sqr_comba8 | ||
1882 | bn_sqr_comba8: | ||
1883 | ___ | ||
1884 | $code.=<<___ if ($flavour =~ /nubi/i); | ||
1885 | .frame $sp,6*$SZREG,$ra | ||
1886 | .mask 0x8000f008,-$SZREG | ||
1887 | .set noreorder | ||
1888 | $PTR_SUB $sp,6*$SZREG | ||
1889 | $REG_S $ra,5*$SZREG($sp) | ||
1890 | $REG_S $t3,4*$SZREG($sp) | ||
1891 | $REG_S $t2,3*$SZREG($sp) | ||
1892 | $REG_S $t1,2*$SZREG($sp) | ||
1893 | $REG_S $t0,1*$SZREG($sp) | ||
1894 | $REG_S $gp,0*$SZREG($sp) | ||
1895 | ___ | ||
1896 | $code.=<<___; | ||
1897 | .set reorder | ||
1898 | $LD $a_0,0($a1) | ||
1899 | $LD $a_1,$BNSZ($a1) | ||
1900 | $LD $a_2,2*$BNSZ($a1) | ||
1901 | $LD $a_3,3*$BNSZ($a1) | ||
1902 | |||
1903 | $MULTU $a_0,$a_0 # mul_add_c(a[0],b[0],c1,c2,c3); | ||
1904 | $LD $a_4,4*$BNSZ($a1) | ||
1905 | $LD $a_5,5*$BNSZ($a1) | ||
1906 | $LD $a_6,6*$BNSZ($a1) | ||
1907 | $LD $a_7,7*$BNSZ($a1) | ||
1908 | mflo $c_1 | ||
1909 | mfhi $c_2 | ||
1910 | $ST $c_1,0($a0) | ||
1911 | |||
1912 | $MULTU $a_0,$a_1 # mul_add_c2(a[0],b[1],c2,c3,c1); | ||
1913 | mflo $t_1 | ||
1914 | mfhi $t_2 | ||
1915 | slt $c_1,$t_2,$zero | ||
1916 | $SLL $t_2,1 | ||
1917 | $MULTU $a_2,$a_0 # mul_add_c2(a[2],b[0],c3,c1,c2); | ||
1918 | slt $a2,$t_1,$zero | ||
1919 | $ADDU $t_2,$a2 | ||
1920 | $SLL $t_1,1 | ||
1921 | $ADDU $c_2,$t_1 | ||
1922 | sltu $at,$c_2,$t_1 | ||
1923 | $ADDU $c_3,$t_2,$at | ||
1924 | $ST $c_2,$BNSZ($a0) | ||
1925 | |||
1926 | mflo $t_1 | ||
1927 | mfhi $t_2 | ||
1928 | slt $c_2,$t_2,$zero | ||
1929 | $SLL $t_2,1 | ||
1930 | $MULTU $a_1,$a_1 # mul_add_c(a[1],b[1],c3,c1,c2); | ||
1931 | slt $a2,$t_1,$zero | ||
1932 | $ADDU $t_2,$a2 | ||
1933 | $SLL $t_1,1 | ||
1934 | $ADDU $c_3,$t_1 | ||
1935 | sltu $at,$c_3,$t_1 | ||
1936 | $ADDU $t_2,$at | ||
1937 | $ADDU $c_1,$t_2 | ||
1938 | sltu $at,$c_1,$t_2 | ||
1939 | $ADDU $c_2,$at | ||
1940 | mflo $t_1 | ||
1941 | mfhi $t_2 | ||
1942 | $ADDU $c_3,$t_1 | ||
1943 | sltu $at,$c_3,$t_1 | ||
1944 | $MULTU $a_0,$a_3 # mul_add_c2(a[0],b[3],c1,c2,c3); | ||
1945 | $ADDU $t_2,$at | ||
1946 | $ADDU $c_1,$t_2 | ||
1947 | sltu $at,$c_1,$t_2 | ||
1948 | $ADDU $c_2,$at | ||
1949 | $ST $c_3,2*$BNSZ($a0) | ||
1950 | |||
1951 | mflo $t_1 | ||
1952 | mfhi $t_2 | ||
1953 | slt $c_3,$t_2,$zero | ||
1954 | $SLL $t_2,1 | ||
1955 | $MULTU $a_1,$a_2 # mul_add_c2(a[1],b[2],c1,c2,c3); | ||
1956 | slt $a2,$t_1,$zero | ||
1957 | $ADDU $t_2,$a2 | ||
1958 | $SLL $t_1,1 | ||
1959 | $ADDU $c_1,$t_1 | ||
1960 | sltu $at,$c_1,$t_1 | ||
1961 | $ADDU $t_2,$at | ||
1962 | $ADDU $c_2,$t_2 | ||
1963 | sltu $at,$c_2,$t_2 | ||
1964 | $ADDU $c_3,$at | ||
1965 | mflo $t_1 | ||
1966 | mfhi $t_2 | ||
1967 | slt $at,$t_2,$zero | ||
1968 | $ADDU $c_3,$at | ||
1969 | $MULTU $a_4,$a_0 # mul_add_c2(a[4],b[0],c2,c3,c1); | ||
1970 | $SLL $t_2,1 | ||
1971 | slt $a2,$t_1,$zero | ||
1972 | $ADDU $t_2,$a2 | ||
1973 | $SLL $t_1,1 | ||
1974 | $ADDU $c_1,$t_1 | ||
1975 | sltu $at,$c_1,$t_1 | ||
1976 | $ADDU $t_2,$at | ||
1977 | $ADDU $c_2,$t_2 | ||
1978 | sltu $at,$c_2,$t_2 | ||
1979 | $ADDU $c_3,$at | ||
1980 | $ST $c_1,3*$BNSZ($a0) | ||
1981 | |||
1982 | mflo $t_1 | ||
1983 | mfhi $t_2 | ||
1984 | slt $c_1,$t_2,$zero | ||
1985 | $SLL $t_2,1 | ||
1986 | $MULTU $a_3,$a_1 # mul_add_c2(a[3],b[1],c2,c3,c1); | ||
1987 | slt $a2,$t_1,$zero | ||
1988 | $ADDU $t_2,$a2 | ||
1989 | $SLL $t_1,1 | ||
1990 | $ADDU $c_2,$t_1 | ||
1991 | sltu $at,$c_2,$t_1 | ||
1992 | $ADDU $t_2,$at | ||
1993 | $ADDU $c_3,$t_2 | ||
1994 | sltu $at,$c_3,$t_2 | ||
1995 | $ADDU $c_1,$at | ||
1996 | mflo $t_1 | ||
1997 | mfhi $t_2 | ||
1998 | slt $at,$t_2,$zero | ||
1999 | $ADDU $c_1,$at | ||
2000 | $MULTU $a_2,$a_2 # mul_add_c(a[2],b[2],c2,c3,c1); | ||
2001 | $SLL $t_2,1 | ||
2002 | slt $a2,$t_1,$zero | ||
2003 | $ADDU $t_2,$a2 | ||
2004 | $SLL $t_1,1 | ||
2005 | $ADDU $c_2,$t_1 | ||
2006 | sltu $at,$c_2,$t_1 | ||
2007 | $ADDU $t_2,$at | ||
2008 | $ADDU $c_3,$t_2 | ||
2009 | sltu $at,$c_3,$t_2 | ||
2010 | $ADDU $c_1,$at | ||
2011 | mflo $t_1 | ||
2012 | mfhi $t_2 | ||
2013 | $ADDU $c_2,$t_1 | ||
2014 | sltu $at,$c_2,$t_1 | ||
2015 | $MULTU $a_0,$a_5 # mul_add_c2(a[0],b[5],c3,c1,c2); | ||
2016 | $ADDU $t_2,$at | ||
2017 | $ADDU $c_3,$t_2 | ||
2018 | sltu $at,$c_3,$t_2 | ||
2019 | $ADDU $c_1,$at | ||
2020 | $ST $c_2,4*$BNSZ($a0) | ||
2021 | |||
2022 | mflo $t_1 | ||
2023 | mfhi $t_2 | ||
2024 | slt $c_2,$t_2,$zero | ||
2025 | $SLL $t_2,1 | ||
2026 | $MULTU $a_1,$a_4 # mul_add_c2(a[1],b[4],c3,c1,c2); | ||
2027 | slt $a2,$t_1,$zero | ||
2028 | $ADDU $t_2,$a2 | ||
2029 | $SLL $t_1,1 | ||
2030 | $ADDU $c_3,$t_1 | ||
2031 | sltu $at,$c_3,$t_1 | ||
2032 | $ADDU $t_2,$at | ||
2033 | $ADDU $c_1,$t_2 | ||
2034 | sltu $at,$c_1,$t_2 | ||
2035 | $ADDU $c_2,$at | ||
2036 | mflo $t_1 | ||
2037 | mfhi $t_2 | ||
2038 | slt $at,$t_2,$zero | ||
2039 | $ADDU $c_2,$at | ||
2040 | $MULTU $a_2,$a_3 # mul_add_c2(a[2],b[3],c3,c1,c2); | ||
2041 | $SLL $t_2,1 | ||
2042 | slt $a2,$t_1,$zero | ||
2043 | $ADDU $t_2,$a2 | ||
2044 | $SLL $t_1,1 | ||
2045 | $ADDU $c_3,$t_1 | ||
2046 | sltu $at,$c_3,$t_1 | ||
2047 | $ADDU $t_2,$at | ||
2048 | $ADDU $c_1,$t_2 | ||
2049 | sltu $at,$c_1,$t_2 | ||
2050 | $ADDU $c_2,$at | ||
2051 | mflo $t_1 | ||
2052 | mfhi $t_2 | ||
2053 | slt $at,$t_2,$zero | ||
2054 | $MULTU $a_6,$a_0 # mul_add_c2(a[6],b[0],c1,c2,c3); | ||
2055 | $ADDU $c_2,$at | ||
2056 | $SLL $t_2,1 | ||
2057 | slt $a2,$t_1,$zero | ||
2058 | $ADDU $t_2,$a2 | ||
2059 | $SLL $t_1,1 | ||
2060 | $ADDU $c_3,$t_1 | ||
2061 | sltu $at,$c_3,$t_1 | ||
2062 | $ADDU $t_2,$at | ||
2063 | $ADDU $c_1,$t_2 | ||
2064 | sltu $at,$c_1,$t_2 | ||
2065 | $ADDU $c_2,$at | ||
2066 | $ST $c_3,5*$BNSZ($a0) | ||
2067 | |||
2068 | mflo $t_1 | ||
2069 | mfhi $t_2 | ||
2070 | slt $c_3,$t_2,$zero | ||
2071 | $SLL $t_2,1 | ||
2072 | $MULTU $a_5,$a_1 # mul_add_c2(a[5],b[1],c1,c2,c3); | ||
2073 | slt $a2,$t_1,$zero | ||
2074 | $ADDU $t_2,$a2 | ||
2075 | $SLL $t_1,1 | ||
2076 | $ADDU $c_1,$t_1 | ||
2077 | sltu $at,$c_1,$t_1 | ||
2078 | $ADDU $t_2,$at | ||
2079 | $ADDU $c_2,$t_2 | ||
2080 | sltu $at,$c_2,$t_2 | ||
2081 | $ADDU $c_3,$at | ||
2082 | mflo $t_1 | ||
2083 | mfhi $t_2 | ||
2084 | slt $at,$t_2,$zero | ||
2085 | $ADDU $c_3,$at | ||
2086 | $MULTU $a_4,$a_2 # mul_add_c2(a[4],b[2],c1,c2,c3); | ||
2087 | $SLL $t_2,1 | ||
2088 | slt $a2,$t_1,$zero | ||
2089 | $ADDU $t_2,$a2 | ||
2090 | $SLL $t_1,1 | ||
2091 | $ADDU $c_1,$t_1 | ||
2092 | sltu $at,$c_1,$t_1 | ||
2093 | $ADDU $t_2,$at | ||
2094 | $ADDU $c_2,$t_2 | ||
2095 | sltu $at,$c_2,$t_2 | ||
2096 | $ADDU $c_3,$at | ||
2097 | mflo $t_1 | ||
2098 | mfhi $t_2 | ||
2099 | slt $at,$t_2,$zero | ||
2100 | $ADDU $c_3,$at | ||
2101 | $MULTU $a_3,$a_3 # mul_add_c(a[3],b[3],c1,c2,c3); | ||
2102 | $SLL $t_2,1 | ||
2103 | slt $a2,$t_1,$zero | ||
2104 | $ADDU $t_2,$a2 | ||
2105 | $SLL $t_1,1 | ||
2106 | $ADDU $c_1,$t_1 | ||
2107 | sltu $at,$c_1,$t_1 | ||
2108 | $ADDU $t_2,$at | ||
2109 | $ADDU $c_2,$t_2 | ||
2110 | sltu $at,$c_2,$t_2 | ||
2111 | $ADDU $c_3,$at | ||
2112 | mflo $t_1 | ||
2113 | mfhi $t_2 | ||
2114 | $ADDU $c_1,$t_1 | ||
2115 | sltu $at,$c_1,$t_1 | ||
2116 | $MULTU $a_0,$a_7 # mul_add_c2(a[0],b[7],c2,c3,c1); | ||
2117 | $ADDU $t_2,$at | ||
2118 | $ADDU $c_2,$t_2 | ||
2119 | sltu $at,$c_2,$t_2 | ||
2120 | $ADDU $c_3,$at | ||
2121 | $ST $c_1,6*$BNSZ($a0) | ||
2122 | |||
2123 | mflo $t_1 | ||
2124 | mfhi $t_2 | ||
2125 | slt $c_1,$t_2,$zero | ||
2126 | $SLL $t_2,1 | ||
2127 | $MULTU $a_1,$a_6 # mul_add_c2(a[1],b[6],c2,c3,c1); | ||
2128 | slt $a2,$t_1,$zero | ||
2129 | $ADDU $t_2,$a2 | ||
2130 | $SLL $t_1,1 | ||
2131 | $ADDU $c_2,$t_1 | ||
2132 | sltu $at,$c_2,$t_1 | ||
2133 | $ADDU $t_2,$at | ||
2134 | $ADDU $c_3,$t_2 | ||
2135 | sltu $at,$c_3,$t_2 | ||
2136 | $ADDU $c_1,$at | ||
2137 | mflo $t_1 | ||
2138 | mfhi $t_2 | ||
2139 | slt $at,$t_2,$zero | ||
2140 | $ADDU $c_1,$at | ||
2141 | $MULTU $a_2,$a_5 # mul_add_c2(a[2],b[5],c2,c3,c1); | ||
2142 | $SLL $t_2,1 | ||
2143 | slt $a2,$t_1,$zero | ||
2144 | $ADDU $t_2,$a2 | ||
2145 | $SLL $t_1,1 | ||
2146 | $ADDU $c_2,$t_1 | ||
2147 | sltu $at,$c_2,$t_1 | ||
2148 | $ADDU $t_2,$at | ||
2149 | $ADDU $c_3,$t_2 | ||
2150 | sltu $at,$c_3,$t_2 | ||
2151 | $ADDU $c_1,$at | ||
2152 | mflo $t_1 | ||
2153 | mfhi $t_2 | ||
2154 | slt $at,$t_2,$zero | ||
2155 | $ADDU $c_1,$at | ||
2156 | $MULTU $a_3,$a_4 # mul_add_c2(a[3],b[4],c2,c3,c1); | ||
2157 | $SLL $t_2,1 | ||
2158 | slt $a2,$t_1,$zero | ||
2159 | $ADDU $t_2,$a2 | ||
2160 | $SLL $t_1,1 | ||
2161 | $ADDU $c_2,$t_1 | ||
2162 | sltu $at,$c_2,$t_1 | ||
2163 | $ADDU $t_2,$at | ||
2164 | $ADDU $c_3,$t_2 | ||
2165 | sltu $at,$c_3,$t_2 | ||
2166 | $ADDU $c_1,$at | ||
2167 | mflo $t_1 | ||
2168 | mfhi $t_2 | ||
2169 | slt $at,$t_2,$zero | ||
2170 | $ADDU $c_1,$at | ||
2171 | $MULTU $a_7,$a_1 # mul_add_c2(a[7],b[1],c3,c1,c2); | ||
2172 | $SLL $t_2,1 | ||
2173 | slt $a2,$t_1,$zero | ||
2174 | $ADDU $t_2,$a2 | ||
2175 | $SLL $t_1,1 | ||
2176 | $ADDU $c_2,$t_1 | ||
2177 | sltu $at,$c_2,$t_1 | ||
2178 | $ADDU $t_2,$at | ||
2179 | $ADDU $c_3,$t_2 | ||
2180 | sltu $at,$c_3,$t_2 | ||
2181 | $ADDU $c_1,$at | ||
2182 | $ST $c_2,7*$BNSZ($a0) | ||
2183 | |||
2184 | mflo $t_1 | ||
2185 | mfhi $t_2 | ||
2186 | slt $c_2,$t_2,$zero | ||
2187 | $SLL $t_2,1 | ||
2188 | $MULTU $a_6,$a_2 # mul_add_c2(a[6],b[2],c3,c1,c2); | ||
2189 | slt $a2,$t_1,$zero | ||
2190 | $ADDU $t_2,$a2 | ||
2191 | $SLL $t_1,1 | ||
2192 | $ADDU $c_3,$t_1 | ||
2193 | sltu $at,$c_3,$t_1 | ||
2194 | $ADDU $t_2,$at | ||
2195 | $ADDU $c_1,$t_2 | ||
2196 | sltu $at,$c_1,$t_2 | ||
2197 | $ADDU $c_2,$at | ||
2198 | mflo $t_1 | ||
2199 | mfhi $t_2 | ||
2200 | slt $at,$t_2,$zero | ||
2201 | $ADDU $c_2,$at | ||
2202 | $MULTU $a_5,$a_3 # mul_add_c2(a[5],b[3],c3,c1,c2); | ||
2203 | $SLL $t_2,1 | ||
2204 | slt $a2,$t_1,$zero | ||
2205 | $ADDU $t_2,$a2 | ||
2206 | $SLL $t_1,1 | ||
2207 | $ADDU $c_3,$t_1 | ||
2208 | sltu $at,$c_3,$t_1 | ||
2209 | $ADDU $t_2,$at | ||
2210 | $ADDU $c_1,$t_2 | ||
2211 | sltu $at,$c_1,$t_2 | ||
2212 | $ADDU $c_2,$at | ||
2213 | mflo $t_1 | ||
2214 | mfhi $t_2 | ||
2215 | slt $at,$t_2,$zero | ||
2216 | $ADDU $c_2,$at | ||
2217 | $MULTU $a_4,$a_4 # mul_add_c(a[4],b[4],c3,c1,c2); | ||
2218 | $SLL $t_2,1 | ||
2219 | slt $a2,$t_1,$zero | ||
2220 | $ADDU $t_2,$a2 | ||
2221 | $SLL $t_1,1 | ||
2222 | $ADDU $c_3,$t_1 | ||
2223 | sltu $at,$c_3,$t_1 | ||
2224 | $ADDU $t_2,$at | ||
2225 | $ADDU $c_1,$t_2 | ||
2226 | sltu $at,$c_1,$t_2 | ||
2227 | $ADDU $c_2,$at | ||
2228 | mflo $t_1 | ||
2229 | mfhi $t_2 | ||
2230 | $ADDU $c_3,$t_1 | ||
2231 | sltu $at,$c_3,$t_1 | ||
2232 | $MULTU $a_2,$a_7 # mul_add_c2(a[2],b[7],c1,c2,c3); | ||
2233 | $ADDU $t_2,$at | ||
2234 | $ADDU $c_1,$t_2 | ||
2235 | sltu $at,$c_1,$t_2 | ||
2236 | $ADDU $c_2,$at | ||
2237 | $ST $c_3,8*$BNSZ($a0) | ||
2238 | |||
2239 | mflo $t_1 | ||
2240 | mfhi $t_2 | ||
2241 | slt $c_3,$t_2,$zero | ||
2242 | $SLL $t_2,1 | ||
2243 | $MULTU $a_3,$a_6 # mul_add_c2(a[3],b[6],c1,c2,c3); | ||
2244 | slt $a2,$t_1,$zero | ||
2245 | $ADDU $t_2,$a2 | ||
2246 | $SLL $t_1,1 | ||
2247 | $ADDU $c_1,$t_1 | ||
2248 | sltu $at,$c_1,$t_1 | ||
2249 | $ADDU $t_2,$at | ||
2250 | $ADDU $c_2,$t_2 | ||
2251 | sltu $at,$c_2,$t_2 | ||
2252 | $ADDU $c_3,$at | ||
2253 | mflo $t_1 | ||
2254 | mfhi $t_2 | ||
2255 | slt $at,$t_2,$zero | ||
2256 | $ADDU $c_3,$at | ||
2257 | $MULTU $a_4,$a_5 # mul_add_c2(a[4],b[5],c1,c2,c3); | ||
2258 | $SLL $t_2,1 | ||
2259 | slt $a2,$t_1,$zero | ||
2260 | $ADDU $t_2,$a2 | ||
2261 | $SLL $t_1,1 | ||
2262 | $ADDU $c_1,$t_1 | ||
2263 | sltu $at,$c_1,$t_1 | ||
2264 | $ADDU $t_2,$at | ||
2265 | $ADDU $c_2,$t_2 | ||
2266 | sltu $at,$c_2,$t_2 | ||
2267 | $ADDU $c_3,$at | ||
2268 | mflo $t_1 | ||
2269 | mfhi $t_2 | ||
2270 | slt $at,$t_2,$zero | ||
2271 | $ADDU $c_3,$at | ||
2272 | $MULTU $a_7,$a_3 # mul_add_c2(a[7],b[3],c2,c3,c1); | ||
2273 | $SLL $t_2,1 | ||
2274 | slt $a2,$t_1,$zero | ||
2275 | $ADDU $t_2,$a2 | ||
2276 | $SLL $t_1,1 | ||
2277 | $ADDU $c_1,$t_1 | ||
2278 | sltu $at,$c_1,$t_1 | ||
2279 | $ADDU $t_2,$at | ||
2280 | $ADDU $c_2,$t_2 | ||
2281 | sltu $at,$c_2,$t_2 | ||
2282 | $ADDU $c_3,$at | ||
2283 | $ST $c_1,9*$BNSZ($a0) | ||
2284 | |||
2285 | mflo $t_1 | ||
2286 | mfhi $t_2 | ||
2287 | slt $c_1,$t_2,$zero | ||
2288 | $SLL $t_2,1 | ||
2289 | $MULTU $a_6,$a_4 # mul_add_c2(a[6],b[4],c2,c3,c1); | ||
2290 | slt $a2,$t_1,$zero | ||
2291 | $ADDU $t_2,$a2 | ||
2292 | $SLL $t_1,1 | ||
2293 | $ADDU $c_2,$t_1 | ||
2294 | sltu $at,$c_2,$t_1 | ||
2295 | $ADDU $t_2,$at | ||
2296 | $ADDU $c_3,$t_2 | ||
2297 | sltu $at,$c_3,$t_2 | ||
2298 | $ADDU $c_1,$at | ||
2299 | mflo $t_1 | ||
2300 | mfhi $t_2 | ||
2301 | slt $at,$t_2,$zero | ||
2302 | $ADDU $c_1,$at | ||
2303 | $MULTU $a_5,$a_5 # mul_add_c(a[5],b[5],c2,c3,c1); | ||
2304 | $SLL $t_2,1 | ||
2305 | slt $a2,$t_1,$zero | ||
2306 | $ADDU $t_2,$a2 | ||
2307 | $SLL $t_1,1 | ||
2308 | $ADDU $c_2,$t_1 | ||
2309 | sltu $at,$c_2,$t_1 | ||
2310 | $ADDU $t_2,$at | ||
2311 | $ADDU $c_3,$t_2 | ||
2312 | sltu $at,$c_3,$t_2 | ||
2313 | $ADDU $c_1,$at | ||
2314 | mflo $t_1 | ||
2315 | mfhi $t_2 | ||
2316 | $ADDU $c_2,$t_1 | ||
2317 | sltu $at,$c_2,$t_1 | ||
2318 | $MULTU $a_4,$a_7 # mul_add_c2(a[4],b[7],c3,c1,c2); | ||
2319 | $ADDU $t_2,$at | ||
2320 | $ADDU $c_3,$t_2 | ||
2321 | sltu $at,$c_3,$t_2 | ||
2322 | $ADDU $c_1,$at | ||
2323 | $ST $c_2,10*$BNSZ($a0) | ||
2324 | |||
2325 | mflo $t_1 | ||
2326 | mfhi $t_2 | ||
2327 | slt $c_2,$t_2,$zero | ||
2328 | $SLL $t_2,1 | ||
2329 | $MULTU $a_5,$a_6 # mul_add_c2(a[5],b[6],c3,c1,c2); | ||
2330 | slt $a2,$t_1,$zero | ||
2331 | $ADDU $t_2,$a2 | ||
2332 | $SLL $t_1,1 | ||
2333 | $ADDU $c_3,$t_1 | ||
2334 | sltu $at,$c_3,$t_1 | ||
2335 | $ADDU $t_2,$at | ||
2336 | $ADDU $c_1,$t_2 | ||
2337 | sltu $at,$c_1,$t_2 | ||
2338 | $ADDU $c_2,$at | ||
2339 | mflo $t_1 | ||
2340 | mfhi $t_2 | ||
2341 | slt $at,$t_2,$zero | ||
2342 | $ADDU $c_2,$at | ||
2343 | $MULTU $a_7,$a_5 # mul_add_c2(a[7],b[5],c1,c2,c3); | ||
2344 | $SLL $t_2,1 | ||
2345 | slt $a2,$t_1,$zero | ||
2346 | $ADDU $t_2,$a2 | ||
2347 | $SLL $t_1,1 | ||
2348 | $ADDU $c_3,$t_1 | ||
2349 | sltu $at,$c_3,$t_1 | ||
2350 | $ADDU $t_2,$at | ||
2351 | $ADDU $c_1,$t_2 | ||
2352 | sltu $at,$c_1,$t_2 | ||
2353 | $ADDU $c_2,$at | ||
2354 | $ST $c_3,11*$BNSZ($a0) | ||
2355 | |||
2356 | mflo $t_1 | ||
2357 | mfhi $t_2 | ||
2358 | slt $c_3,$t_2,$zero | ||
2359 | $SLL $t_2,1 | ||
2360 | $MULTU $a_6,$a_6 # mul_add_c(a[6],b[6],c1,c2,c3); | ||
2361 | slt $a2,$t_1,$zero | ||
2362 | $ADDU $t_2,$a2 | ||
2363 | $SLL $t_1,1 | ||
2364 | $ADDU $c_1,$t_1 | ||
2365 | sltu $at,$c_1,$t_1 | ||
2366 | $ADDU $t_2,$at | ||
2367 | $ADDU $c_2,$t_2 | ||
2368 | sltu $at,$c_2,$t_2 | ||
2369 | $ADDU $c_3,$at | ||
2370 | mflo $t_1 | ||
2371 | mfhi $t_2 | ||
2372 | $ADDU $c_1,$t_1 | ||
2373 | sltu $at,$c_1,$t_1 | ||
2374 | $MULTU $a_6,$a_7 # mul_add_c2(a[6],b[7],c2,c3,c1); | ||
2375 | $ADDU $t_2,$at | ||
2376 | $ADDU $c_2,$t_2 | ||
2377 | sltu $at,$c_2,$t_2 | ||
2378 | $ADDU $c_3,$at | ||
2379 | $ST $c_1,12*$BNSZ($a0) | ||
2380 | |||
2381 | mflo $t_1 | ||
2382 | mfhi $t_2 | ||
2383 | slt $c_1,$t_2,$zero | ||
2384 | $SLL $t_2,1 | ||
2385 | $MULTU $a_7,$a_7 # mul_add_c(a[7],b[7],c3,c1,c2); | ||
2386 | slt $a2,$t_1,$zero | ||
2387 | $ADDU $t_2,$a2 | ||
2388 | $SLL $t_1,1 | ||
2389 | $ADDU $c_2,$t_1 | ||
2390 | sltu $at,$c_2,$t_1 | ||
2391 | $ADDU $t_2,$at | ||
2392 | $ADDU $c_3,$t_2 | ||
2393 | sltu $at,$c_3,$t_2 | ||
2394 | $ADDU $c_1,$at | ||
2395 | $ST $c_2,13*$BNSZ($a0) | ||
2396 | |||
2397 | mflo $t_1 | ||
2398 | mfhi $t_2 | ||
2399 | $ADDU $c_3,$t_1 | ||
2400 | sltu $at,$c_3,$t_1 | ||
2401 | $ADDU $t_2,$at | ||
2402 | $ADDU $c_1,$t_2 | ||
2403 | $ST $c_3,14*$BNSZ($a0) | ||
2404 | $ST $c_1,15*$BNSZ($a0) | ||
2405 | |||
2406 | .set noreorder | ||
2407 | ___ | ||
2408 | $code.=<<___ if ($flavour =~ /nubi/i); | ||
2409 | $REG_L $t3,4*$SZREG($sp) | ||
2410 | $REG_L $t2,3*$SZREG($sp) | ||
2411 | $REG_L $t1,2*$SZREG($sp) | ||
2412 | $REG_L $t0,1*$SZREG($sp) | ||
2413 | $REG_L $gp,0*$SZREG($sp) | ||
2414 | $PTR_ADD $sp,6*$SZREG | ||
2415 | ___ | ||
2416 | $code.=<<___; | ||
2417 | jr $ra | ||
2418 | nop | ||
2419 | .end bn_sqr_comba8 | ||
2420 | |||
2421 | .align 5 | ||
2422 | .globl bn_sqr_comba4 | ||
2423 | .ent bn_sqr_comba4 | ||
2424 | bn_sqr_comba4: | ||
2425 | ___ | ||
2426 | $code.=<<___ if ($flavour =~ /nubi/i); | ||
2427 | .frame $sp,6*$SZREG,$ra | ||
2428 | .mask 0x8000f008,-$SZREG | ||
2429 | .set noreorder | ||
2430 | $PTR_SUB $sp,6*$SZREG | ||
2431 | $REG_S $ra,5*$SZREG($sp) | ||
2432 | $REG_S $t3,4*$SZREG($sp) | ||
2433 | $REG_S $t2,3*$SZREG($sp) | ||
2434 | $REG_S $t1,2*$SZREG($sp) | ||
2435 | $REG_S $t0,1*$SZREG($sp) | ||
2436 | $REG_S $gp,0*$SZREG($sp) | ||
2437 | ___ | ||
2438 | $code.=<<___; | ||
2439 | .set reorder | ||
2440 | $LD $a_0,0($a1) | ||
2441 | $LD $a_1,$BNSZ($a1) | ||
2442 | $MULTU $a_0,$a_0 # mul_add_c(a[0],b[0],c1,c2,c3); | ||
2443 | $LD $a_2,2*$BNSZ($a1) | ||
2444 | $LD $a_3,3*$BNSZ($a1) | ||
2445 | mflo $c_1 | ||
2446 | mfhi $c_2 | ||
2447 | $ST $c_1,0($a0) | ||
2448 | |||
2449 | $MULTU $a_0,$a_1 # mul_add_c2(a[0],b[1],c2,c3,c1); | ||
2450 | mflo $t_1 | ||
2451 | mfhi $t_2 | ||
2452 | slt $c_1,$t_2,$zero | ||
2453 | $SLL $t_2,1 | ||
2454 | $MULTU $a_2,$a_0 # mul_add_c2(a[2],b[0],c3,c1,c2); | ||
2455 | slt $a2,$t_1,$zero | ||
2456 | $ADDU $t_2,$a2 | ||
2457 | $SLL $t_1,1 | ||
2458 | $ADDU $c_2,$t_1 | ||
2459 | sltu $at,$c_2,$t_1 | ||
2460 | $ADDU $c_3,$t_2,$at | ||
2461 | $ST $c_2,$BNSZ($a0) | ||
2462 | |||
2463 | mflo $t_1 | ||
2464 | mfhi $t_2 | ||
2465 | slt $c_2,$t_2,$zero | ||
2466 | $SLL $t_2,1 | ||
2467 | $MULTU $a_1,$a_1 # mul_add_c(a[1],b[1],c3,c1,c2); | ||
2468 | slt $a2,$t_1,$zero | ||
2469 | $ADDU $t_2,$a2 | ||
2470 | $SLL $t_1,1 | ||
2471 | $ADDU $c_3,$t_1 | ||
2472 | sltu $at,$c_3,$t_1 | ||
2473 | $ADDU $t_2,$at | ||
2474 | $ADDU $c_1,$t_2 | ||
2475 | sltu $at,$c_1,$t_2 | ||
2476 | $ADDU $c_2,$at | ||
2477 | mflo $t_1 | ||
2478 | mfhi $t_2 | ||
2479 | $ADDU $c_3,$t_1 | ||
2480 | sltu $at,$c_3,$t_1 | ||
2481 | $MULTU $a_0,$a_3 # mul_add_c2(a[0],b[3],c1,c2,c3); | ||
2482 | $ADDU $t_2,$at | ||
2483 | $ADDU $c_1,$t_2 | ||
2484 | sltu $at,$c_1,$t_2 | ||
2485 | $ADDU $c_2,$at | ||
2486 | $ST $c_3,2*$BNSZ($a0) | ||
2487 | |||
2488 | mflo $t_1 | ||
2489 | mfhi $t_2 | ||
2490 | slt $c_3,$t_2,$zero | ||
2491 | $SLL $t_2,1 | ||
2492 | $MULTU $a_1,$a_2 # mul_add_c(a2[1],b[2],c1,c2,c3); | ||
2493 | slt $a2,$t_1,$zero | ||
2494 | $ADDU $t_2,$a2 | ||
2495 | $SLL $t_1,1 | ||
2496 | $ADDU $c_1,$t_1 | ||
2497 | sltu $at,$c_1,$t_1 | ||
2498 | $ADDU $t_2,$at | ||
2499 | $ADDU $c_2,$t_2 | ||
2500 | sltu $at,$c_2,$t_2 | ||
2501 | $ADDU $c_3,$at | ||
2502 | mflo $t_1 | ||
2503 | mfhi $t_2 | ||
2504 | slt $at,$t_2,$zero | ||
2505 | $ADDU $c_3,$at | ||
2506 | $MULTU $a_3,$a_1 # mul_add_c2(a[3],b[1],c2,c3,c1); | ||
2507 | $SLL $t_2,1 | ||
2508 | slt $a2,$t_1,$zero | ||
2509 | $ADDU $t_2,$a2 | ||
2510 | $SLL $t_1,1 | ||
2511 | $ADDU $c_1,$t_1 | ||
2512 | sltu $at,$c_1,$t_1 | ||
2513 | $ADDU $t_2,$at | ||
2514 | $ADDU $c_2,$t_2 | ||
2515 | sltu $at,$c_2,$t_2 | ||
2516 | $ADDU $c_3,$at | ||
2517 | $ST $c_1,3*$BNSZ($a0) | ||
2518 | |||
2519 | mflo $t_1 | ||
2520 | mfhi $t_2 | ||
2521 | slt $c_1,$t_2,$zero | ||
2522 | $SLL $t_2,1 | ||
2523 | $MULTU $a_2,$a_2 # mul_add_c(a[2],b[2],c2,c3,c1); | ||
2524 | slt $a2,$t_1,$zero | ||
2525 | $ADDU $t_2,$a2 | ||
2526 | $SLL $t_1,1 | ||
2527 | $ADDU $c_2,$t_1 | ||
2528 | sltu $at,$c_2,$t_1 | ||
2529 | $ADDU $t_2,$at | ||
2530 | $ADDU $c_3,$t_2 | ||
2531 | sltu $at,$c_3,$t_2 | ||
2532 | $ADDU $c_1,$at | ||
2533 | mflo $t_1 | ||
2534 | mfhi $t_2 | ||
2535 | $ADDU $c_2,$t_1 | ||
2536 | sltu $at,$c_2,$t_1 | ||
2537 | $MULTU $a_2,$a_3 # mul_add_c2(a[2],b[3],c3,c1,c2); | ||
2538 | $ADDU $t_2,$at | ||
2539 | $ADDU $c_3,$t_2 | ||
2540 | sltu $at,$c_3,$t_2 | ||
2541 | $ADDU $c_1,$at | ||
2542 | $ST $c_2,4*$BNSZ($a0) | ||
2543 | |||
2544 | mflo $t_1 | ||
2545 | mfhi $t_2 | ||
2546 | slt $c_2,$t_2,$zero | ||
2547 | $SLL $t_2,1 | ||
2548 | $MULTU $a_3,$a_3 # mul_add_c(a[3],b[3],c1,c2,c3); | ||
2549 | slt $a2,$t_1,$zero | ||
2550 | $ADDU $t_2,$a2 | ||
2551 | $SLL $t_1,1 | ||
2552 | $ADDU $c_3,$t_1 | ||
2553 | sltu $at,$c_3,$t_1 | ||
2554 | $ADDU $t_2,$at | ||
2555 | $ADDU $c_1,$t_2 | ||
2556 | sltu $at,$c_1,$t_2 | ||
2557 | $ADDU $c_2,$at | ||
2558 | $ST $c_3,5*$BNSZ($a0) | ||
2559 | |||
2560 | mflo $t_1 | ||
2561 | mfhi $t_2 | ||
2562 | $ADDU $c_1,$t_1 | ||
2563 | sltu $at,$c_1,$t_1 | ||
2564 | $ADDU $t_2,$at | ||
2565 | $ADDU $c_2,$t_2 | ||
2566 | $ST $c_1,6*$BNSZ($a0) | ||
2567 | $ST $c_2,7*$BNSZ($a0) | ||
2568 | |||
2569 | .set noreorder | ||
2570 | ___ | ||
2571 | $code.=<<___ if ($flavour =~ /nubi/i); | ||
2572 | $REG_L $t3,4*$SZREG($sp) | ||
2573 | $REG_L $t2,3*$SZREG($sp) | ||
2574 | $REG_L $t1,2*$SZREG($sp) | ||
2575 | $REG_L $t0,1*$SZREG($sp) | ||
2576 | $REG_L $gp,0*$SZREG($sp) | ||
2577 | $PTR_ADD $sp,6*$SZREG | ||
2578 | ___ | ||
2579 | $code.=<<___; | ||
2580 | jr $ra | ||
2581 | nop | ||
2582 | .end bn_sqr_comba4 | ||
2583 | ___ | ||
2584 | print $code; | ||
2585 | close STDOUT; | ||
diff --git a/src/lib/libcrypto/bn/asm/modexp512-x86_64.pl b/src/lib/libcrypto/bn/asm/modexp512-x86_64.pl new file mode 100644 index 0000000000..54aeb01921 --- /dev/null +++ b/src/lib/libcrypto/bn/asm/modexp512-x86_64.pl | |||
@@ -0,0 +1,1496 @@ | |||
1 | #!/usr/bin/env perl | ||
2 | # | ||
3 | # Copyright (c) 2010-2011 Intel Corp. | ||
4 | # Author: Vinodh.Gopal@intel.com | ||
5 | # Jim Guilford | ||
6 | # Erdinc.Ozturk@intel.com | ||
7 | # Maxim.Perminov@intel.com | ||
8 | # | ||
9 | # More information about algorithm used can be found at: | ||
10 | # http://www.cse.buffalo.edu/srds2009/escs2009_submission_Gopal.pdf | ||
11 | # | ||
12 | # ==================================================================== | ||
13 | # Copyright (c) 2011 The OpenSSL Project. All rights reserved. | ||
14 | # | ||
15 | # Redistribution and use in source and binary forms, with or without | ||
16 | # modification, are permitted provided that the following conditions | ||
17 | # are met: | ||
18 | # | ||
19 | # 1. Redistributions of source code must retain the above copyright | ||
20 | # notice, this list of conditions and the following disclaimer. | ||
21 | # | ||
22 | # 2. Redistributions in binary form must reproduce the above copyright | ||
23 | # notice, this list of conditions and the following disclaimer in | ||
24 | # the documentation and/or other materials provided with the | ||
25 | # distribution. | ||
26 | # | ||
27 | # 3. All advertising materials mentioning features or use of this | ||
28 | # software must display the following acknowledgment: | ||
29 | # "This product includes software developed by the OpenSSL Project | ||
30 | # for use in the OpenSSL Toolkit. (http://www.OpenSSL.org/)" | ||
31 | # | ||
32 | # 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to | ||
33 | # endorse or promote products derived from this software without | ||
34 | # prior written permission. For written permission, please contact | ||
35 | # licensing@OpenSSL.org. | ||
36 | # | ||
37 | # 5. Products derived from this software may not be called "OpenSSL" | ||
38 | # nor may "OpenSSL" appear in their names without prior written | ||
39 | # permission of the OpenSSL Project. | ||
40 | # | ||
41 | # 6. Redistributions of any form whatsoever must retain the following | ||
42 | # acknowledgment: | ||
43 | # "This product includes software developed by the OpenSSL Project | ||
44 | # for use in the OpenSSL Toolkit (http://www.OpenSSL.org/)" | ||
45 | # | ||
46 | # THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY | ||
47 | # EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | ||
48 | # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | ||
49 | # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR | ||
50 | # ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
51 | # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT | ||
52 | # NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; | ||
53 | # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | ||
54 | # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, | ||
55 | # STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | ||
56 | # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED | ||
57 | # OF THE POSSIBILITY OF SUCH DAMAGE. | ||
58 | # ==================================================================== | ||
59 | |||
60 | $flavour = shift; | ||
61 | $output = shift; | ||
62 | if ($flavour =~ /\./) { $output = $flavour; undef $flavour; } | ||
63 | |||
64 | my $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/); | ||
65 | |||
66 | $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1; | ||
67 | ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or | ||
68 | ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or | ||
69 | die "can't locate x86_64-xlate.pl"; | ||
70 | |||
71 | open STDOUT,"| $^X $xlate $flavour $output"; | ||
72 | |||
73 | use strict; | ||
74 | my $code=".text\n\n"; | ||
75 | my $m=0; | ||
76 | |||
77 | # | ||
78 | # Define x512 macros | ||
79 | # | ||
80 | |||
81 | #MULSTEP_512_ADD MACRO x7, x6, x5, x4, x3, x2, x1, x0, dst, src1, src2, add_src, tmp1, tmp2 | ||
82 | # | ||
83 | # uses rax, rdx, and args | ||
84 | sub MULSTEP_512_ADD | ||
85 | { | ||
86 | my ($x, $DST, $SRC2, $ASRC, $OP, $TMP)=@_; | ||
87 | my @X=@$x; # make a copy | ||
88 | $code.=<<___; | ||
89 | mov (+8*0)($SRC2), %rax | ||
90 | mul $OP # rdx:rax = %OP * [0] | ||
91 | mov ($ASRC), $X[0] | ||
92 | add %rax, $X[0] | ||
93 | adc \$0, %rdx | ||
94 | mov $X[0], $DST | ||
95 | ___ | ||
96 | for(my $i=1;$i<8;$i++) { | ||
97 | $code.=<<___; | ||
98 | mov %rdx, $TMP | ||
99 | |||
100 | mov (+8*$i)($SRC2), %rax | ||
101 | mul $OP # rdx:rax = %OP * [$i] | ||
102 | mov (+8*$i)($ASRC), $X[$i] | ||
103 | add %rax, $X[$i] | ||
104 | adc \$0, %rdx | ||
105 | add $TMP, $X[$i] | ||
106 | adc \$0, %rdx | ||
107 | ___ | ||
108 | } | ||
109 | $code.=<<___; | ||
110 | mov %rdx, $X[0] | ||
111 | ___ | ||
112 | } | ||
113 | |||
114 | #MULSTEP_512 MACRO x7, x6, x5, x4, x3, x2, x1, x0, dst, src2, src1_val, tmp | ||
115 | # | ||
116 | # uses rax, rdx, and args | ||
117 | sub MULSTEP_512 | ||
118 | { | ||
119 | my ($x, $DST, $SRC2, $OP, $TMP)=@_; | ||
120 | my @X=@$x; # make a copy | ||
121 | $code.=<<___; | ||
122 | mov (+8*0)($SRC2), %rax | ||
123 | mul $OP # rdx:rax = %OP * [0] | ||
124 | add %rax, $X[0] | ||
125 | adc \$0, %rdx | ||
126 | mov $X[0], $DST | ||
127 | ___ | ||
128 | for(my $i=1;$i<8;$i++) { | ||
129 | $code.=<<___; | ||
130 | mov %rdx, $TMP | ||
131 | |||
132 | mov (+8*$i)($SRC2), %rax | ||
133 | mul $OP # rdx:rax = %OP * [$i] | ||
134 | add %rax, $X[$i] | ||
135 | adc \$0, %rdx | ||
136 | add $TMP, $X[$i] | ||
137 | adc \$0, %rdx | ||
138 | ___ | ||
139 | } | ||
140 | $code.=<<___; | ||
141 | mov %rdx, $X[0] | ||
142 | ___ | ||
143 | } | ||
144 | |||
145 | # | ||
146 | # Swizzle Macros | ||
147 | # | ||
148 | |||
149 | # macro to copy data from flat space to swizzled table | ||
150 | #MACRO swizzle pDst, pSrc, tmp1, tmp2 | ||
151 | # pDst and pSrc are modified | ||
152 | sub swizzle | ||
153 | { | ||
154 | my ($pDst, $pSrc, $cnt, $d0)=@_; | ||
155 | $code.=<<___; | ||
156 | mov \$8, $cnt | ||
157 | loop_$m: | ||
158 | mov ($pSrc), $d0 | ||
159 | mov $d0#w, ($pDst) | ||
160 | shr \$16, $d0 | ||
161 | mov $d0#w, (+64*1)($pDst) | ||
162 | shr \$16, $d0 | ||
163 | mov $d0#w, (+64*2)($pDst) | ||
164 | shr \$16, $d0 | ||
165 | mov $d0#w, (+64*3)($pDst) | ||
166 | lea 8($pSrc), $pSrc | ||
167 | lea 64*4($pDst), $pDst | ||
168 | dec $cnt | ||
169 | jnz loop_$m | ||
170 | ___ | ||
171 | |||
172 | $m++; | ||
173 | } | ||
174 | |||
175 | # macro to copy data from swizzled table to flat space | ||
176 | #MACRO unswizzle pDst, pSrc, tmp*3 | ||
177 | sub unswizzle | ||
178 | { | ||
179 | my ($pDst, $pSrc, $cnt, $d0, $d1)=@_; | ||
180 | $code.=<<___; | ||
181 | mov \$4, $cnt | ||
182 | loop_$m: | ||
183 | movzxw (+64*3+256*0)($pSrc), $d0 | ||
184 | movzxw (+64*3+256*1)($pSrc), $d1 | ||
185 | shl \$16, $d0 | ||
186 | shl \$16, $d1 | ||
187 | mov (+64*2+256*0)($pSrc), $d0#w | ||
188 | mov (+64*2+256*1)($pSrc), $d1#w | ||
189 | shl \$16, $d0 | ||
190 | shl \$16, $d1 | ||
191 | mov (+64*1+256*0)($pSrc), $d0#w | ||
192 | mov (+64*1+256*1)($pSrc), $d1#w | ||
193 | shl \$16, $d0 | ||
194 | shl \$16, $d1 | ||
195 | mov (+64*0+256*0)($pSrc), $d0#w | ||
196 | mov (+64*0+256*1)($pSrc), $d1#w | ||
197 | mov $d0, (+8*0)($pDst) | ||
198 | mov $d1, (+8*1)($pDst) | ||
199 | lea 256*2($pSrc), $pSrc | ||
200 | lea 8*2($pDst), $pDst | ||
201 | sub \$1, $cnt | ||
202 | jnz loop_$m | ||
203 | ___ | ||
204 | |||
205 | $m++; | ||
206 | } | ||
207 | |||
208 | # | ||
209 | # Data Structures | ||
210 | # | ||
211 | |||
212 | # Reduce Data | ||
213 | # | ||
214 | # | ||
215 | # Offset Value | ||
216 | # 0C0 Carries | ||
217 | # 0B8 X2[10] | ||
218 | # 0B0 X2[9] | ||
219 | # 0A8 X2[8] | ||
220 | # 0A0 X2[7] | ||
221 | # 098 X2[6] | ||
222 | # 090 X2[5] | ||
223 | # 088 X2[4] | ||
224 | # 080 X2[3] | ||
225 | # 078 X2[2] | ||
226 | # 070 X2[1] | ||
227 | # 068 X2[0] | ||
228 | # 060 X1[12] P[10] | ||
229 | # 058 X1[11] P[9] Z[8] | ||
230 | # 050 X1[10] P[8] Z[7] | ||
231 | # 048 X1[9] P[7] Z[6] | ||
232 | # 040 X1[8] P[6] Z[5] | ||
233 | # 038 X1[7] P[5] Z[4] | ||
234 | # 030 X1[6] P[4] Z[3] | ||
235 | # 028 X1[5] P[3] Z[2] | ||
236 | # 020 X1[4] P[2] Z[1] | ||
237 | # 018 X1[3] P[1] Z[0] | ||
238 | # 010 X1[2] P[0] Y[2] | ||
239 | # 008 X1[1] Q[1] Y[1] | ||
240 | # 000 X1[0] Q[0] Y[0] | ||
241 | |||
242 | my $X1_offset = 0; # 13 qwords | ||
243 | my $X2_offset = $X1_offset + 13*8; # 11 qwords | ||
244 | my $Carries_offset = $X2_offset + 11*8; # 1 qword | ||
245 | my $Q_offset = 0; # 2 qwords | ||
246 | my $P_offset = $Q_offset + 2*8; # 11 qwords | ||
247 | my $Y_offset = 0; # 3 qwords | ||
248 | my $Z_offset = $Y_offset + 3*8; # 9 qwords | ||
249 | |||
250 | my $Red_Data_Size = $Carries_offset + 1*8; # (25 qwords) | ||
251 | |||
252 | # | ||
253 | # Stack Frame | ||
254 | # | ||
255 | # | ||
256 | # offset value | ||
257 | # ... <old stack contents> | ||
258 | # ... | ||
259 | # 280 Garray | ||
260 | |||
261 | # 278 tmp16[15] | ||
262 | # ... ... | ||
263 | # 200 tmp16[0] | ||
264 | |||
265 | # 1F8 tmp[7] | ||
266 | # ... ... | ||
267 | # 1C0 tmp[0] | ||
268 | |||
269 | # 1B8 GT[7] | ||
270 | # ... ... | ||
271 | # 180 GT[0] | ||
272 | |||
273 | # 178 Reduce Data | ||
274 | # ... ... | ||
275 | # 0B8 Reduce Data | ||
276 | # 0B0 reserved | ||
277 | # 0A8 reserved | ||
278 | # 0A0 reserved | ||
279 | # 098 reserved | ||
280 | # 090 reserved | ||
281 | # 088 reduce result addr | ||
282 | # 080 exp[8] | ||
283 | |||
284 | # ... | ||
285 | # 048 exp[1] | ||
286 | # 040 exp[0] | ||
287 | |||
288 | # 038 reserved | ||
289 | # 030 loop_idx | ||
290 | # 028 pg | ||
291 | # 020 i | ||
292 | # 018 pData ; arg 4 | ||
293 | # 010 pG ; arg 2 | ||
294 | # 008 pResult ; arg 1 | ||
295 | # 000 rsp ; stack pointer before subtract | ||
296 | |||
297 | my $rsp_offset = 0; | ||
298 | my $pResult_offset = 8*1 + $rsp_offset; | ||
299 | my $pG_offset = 8*1 + $pResult_offset; | ||
300 | my $pData_offset = 8*1 + $pG_offset; | ||
301 | my $i_offset = 8*1 + $pData_offset; | ||
302 | my $pg_offset = 8*1 + $i_offset; | ||
303 | my $loop_idx_offset = 8*1 + $pg_offset; | ||
304 | my $reserved1_offset = 8*1 + $loop_idx_offset; | ||
305 | my $exp_offset = 8*1 + $reserved1_offset; | ||
306 | my $red_result_addr_offset= 8*9 + $exp_offset; | ||
307 | my $reserved2_offset = 8*1 + $red_result_addr_offset; | ||
308 | my $Reduce_Data_offset = 8*5 + $reserved2_offset; | ||
309 | my $GT_offset = $Red_Data_Size + $Reduce_Data_offset; | ||
310 | my $tmp_offset = 8*8 + $GT_offset; | ||
311 | my $tmp16_offset = 8*8 + $tmp_offset; | ||
312 | my $garray_offset = 8*16 + $tmp16_offset; | ||
313 | my $mem_size = 8*8*32 + $garray_offset; | ||
314 | |||
315 | # | ||
316 | # Offsets within Reduce Data | ||
317 | # | ||
318 | # | ||
319 | # struct MODF_2FOLD_MONT_512_C1_DATA { | ||
320 | # UINT64 t[8][8]; | ||
321 | # UINT64 m[8]; | ||
322 | # UINT64 m1[8]; /* 2^768 % m */ | ||
323 | # UINT64 m2[8]; /* 2^640 % m */ | ||
324 | # UINT64 k1[2]; /* (- 1/m) % 2^128 */ | ||
325 | # }; | ||
326 | |||
327 | my $T = 0; | ||
328 | my $M = 512; # = 8 * 8 * 8 | ||
329 | my $M1 = 576; # = 8 * 8 * 9 /* += 8 * 8 */ | ||
330 | my $M2 = 640; # = 8 * 8 * 10 /* += 8 * 8 */ | ||
331 | my $K1 = 704; # = 8 * 8 * 11 /* += 8 * 8 */ | ||
332 | |||
333 | # | ||
334 | # FUNCTIONS | ||
335 | # | ||
336 | |||
337 | {{{ | ||
338 | # | ||
339 | # MULADD_128x512 : Function to multiply 128-bits (2 qwords) by 512-bits (8 qwords) | ||
340 | # and add 512-bits (8 qwords) | ||
341 | # to get 640 bits (10 qwords) | ||
342 | # Input: 128-bit mul source: [rdi+8*1], rbp | ||
343 | # 512-bit mul source: [rsi+8*n] | ||
344 | # 512-bit add source: r15, r14, ..., r9, r8 | ||
345 | # Output: r9, r8, r15, r14, r13, r12, r11, r10, [rcx+8*1], [rcx+8*0] | ||
346 | # Clobbers all regs except: rcx, rsi, rdi | ||
347 | $code.=<<___; | ||
348 | .type MULADD_128x512,\@abi-omnipotent | ||
349 | .align 16 | ||
350 | MULADD_128x512: | ||
351 | ___ | ||
352 | &MULSTEP_512([map("%r$_",(8..15))], "(+8*0)(%rcx)", "%rsi", "%rbp", "%rbx"); | ||
353 | $code.=<<___; | ||
354 | mov (+8*1)(%rdi), %rbp | ||
355 | ___ | ||
356 | &MULSTEP_512([map("%r$_",(9..15,8))], "(+8*1)(%rcx)", "%rsi", "%rbp", "%rbx"); | ||
357 | $code.=<<___; | ||
358 | ret | ||
359 | .size MULADD_128x512,.-MULADD_128x512 | ||
360 | ___ | ||
361 | }}} | ||
362 | |||
363 | {{{ | ||
364 | #MULADD_256x512 MACRO pDst, pA, pB, OP, TMP, X7, X6, X5, X4, X3, X2, X1, X0 | ||
365 | # | ||
366 | # Inputs: pDst: Destination (768 bits, 12 qwords) | ||
367 | # pA: Multiplicand (1024 bits, 16 qwords) | ||
368 | # pB: Multiplicand (512 bits, 8 qwords) | ||
369 | # Dst = Ah * B + Al | ||
370 | # where Ah is (in qwords) A[15:12] (256 bits) and Al is A[7:0] (512 bits) | ||
371 | # Results in X3 X2 X1 X0 X7 X6 X5 X4 Dst[3:0] | ||
372 | # Uses registers: arguments, RAX, RDX | ||
373 | sub MULADD_256x512 | ||
374 | { | ||
375 | my ($pDst, $pA, $pB, $OP, $TMP, $X)=@_; | ||
376 | $code.=<<___; | ||
377 | mov (+8*12)($pA), $OP | ||
378 | ___ | ||
379 | &MULSTEP_512_ADD($X, "(+8*0)($pDst)", $pB, $pA, $OP, $TMP); | ||
380 | push(@$X,shift(@$X)); | ||
381 | |||
382 | $code.=<<___; | ||
383 | mov (+8*13)($pA), $OP | ||
384 | ___ | ||
385 | &MULSTEP_512($X, "(+8*1)($pDst)", $pB, $OP, $TMP); | ||
386 | push(@$X,shift(@$X)); | ||
387 | |||
388 | $code.=<<___; | ||
389 | mov (+8*14)($pA), $OP | ||
390 | ___ | ||
391 | &MULSTEP_512($X, "(+8*2)($pDst)", $pB, $OP, $TMP); | ||
392 | push(@$X,shift(@$X)); | ||
393 | |||
394 | $code.=<<___; | ||
395 | mov (+8*15)($pA), $OP | ||
396 | ___ | ||
397 | &MULSTEP_512($X, "(+8*3)($pDst)", $pB, $OP, $TMP); | ||
398 | push(@$X,shift(@$X)); | ||
399 | } | ||
400 | |||
401 | # | ||
402 | # mont_reduce(UINT64 *x, /* 1024 bits, 16 qwords */ | ||
403 | # UINT64 *m, /* 512 bits, 8 qwords */ | ||
404 | # MODF_2FOLD_MONT_512_C1_DATA *data, | ||
405 | # UINT64 *r) /* 512 bits, 8 qwords */ | ||
406 | # Input: x (number to be reduced): tmp16 (Implicit) | ||
407 | # m (modulus): [pM] (Implicit) | ||
408 | # data (reduce data): [pData] (Implicit) | ||
409 | # Output: r (result): Address in [red_res_addr] | ||
410 | # result also in: r9, r8, r15, r14, r13, r12, r11, r10 | ||
411 | |||
412 | my @X=map("%r$_",(8..15)); | ||
413 | |||
414 | $code.=<<___; | ||
415 | .type mont_reduce,\@abi-omnipotent | ||
416 | .align 16 | ||
417 | mont_reduce: | ||
418 | ___ | ||
419 | |||
420 | my $STACK_DEPTH = 8; | ||
421 | # | ||
422 | # X1 = Xh * M1 + Xl | ||
423 | $code.=<<___; | ||
424 | lea (+$Reduce_Data_offset+$X1_offset+$STACK_DEPTH)(%rsp), %rdi # pX1 (Dst) 769 bits, 13 qwords | ||
425 | mov (+$pData_offset+$STACK_DEPTH)(%rsp), %rsi # pM1 (Bsrc) 512 bits, 8 qwords | ||
426 | add \$$M1, %rsi | ||
427 | lea (+$tmp16_offset+$STACK_DEPTH)(%rsp), %rcx # X (Asrc) 1024 bits, 16 qwords | ||
428 | |||
429 | ___ | ||
430 | |||
431 | &MULADD_256x512("%rdi", "%rcx", "%rsi", "%rbp", "%rbx", \@X); # rotates @X 4 times | ||
432 | # results in r11, r10, r9, r8, r15, r14, r13, r12, X1[3:0] | ||
433 | |||
434 | $code.=<<___; | ||
435 | xor %rax, %rax | ||
436 | # X1 += xl | ||
437 | add (+8*8)(%rcx), $X[4] | ||
438 | adc (+8*9)(%rcx), $X[5] | ||
439 | adc (+8*10)(%rcx), $X[6] | ||
440 | adc (+8*11)(%rcx), $X[7] | ||
441 | adc \$0, %rax | ||
442 | # X1 is now rax, r11-r8, r15-r12, tmp16[3:0] | ||
443 | |||
444 | # | ||
445 | # check for carry ;; carry stored in rax | ||
446 | mov $X[4], (+8*8)(%rdi) # rdi points to X1 | ||
447 | mov $X[5], (+8*9)(%rdi) | ||
448 | mov $X[6], %rbp | ||
449 | mov $X[7], (+8*11)(%rdi) | ||
450 | |||
451 | mov %rax, (+$Reduce_Data_offset+$Carries_offset+$STACK_DEPTH)(%rsp) | ||
452 | |||
453 | mov (+8*0)(%rdi), $X[4] | ||
454 | mov (+8*1)(%rdi), $X[5] | ||
455 | mov (+8*2)(%rdi), $X[6] | ||
456 | mov (+8*3)(%rdi), $X[7] | ||
457 | |||
458 | # X1 is now stored in: X1[11], rbp, X1[9:8], r15-r8 | ||
459 | # rdi -> X1 | ||
460 | # rsi -> M1 | ||
461 | |||
462 | # | ||
463 | # X2 = Xh * M2 + Xl | ||
464 | # do first part (X2 = Xh * M2) | ||
465 | add \$8*10, %rdi # rdi -> pXh ; 128 bits, 2 qwords | ||
466 | # Xh is actually { [rdi+8*1], rbp } | ||
467 | add \$`$M2-$M1`, %rsi # rsi -> M2 | ||
468 | lea (+$Reduce_Data_offset+$X2_offset+$STACK_DEPTH)(%rsp), %rcx # rcx -> pX2 ; 641 bits, 11 qwords | ||
469 | ___ | ||
470 | unshift(@X,pop(@X)); unshift(@X,pop(@X)); | ||
471 | $code.=<<___; | ||
472 | |||
473 | call MULADD_128x512 # args in rcx, rdi / rbp, rsi, r15-r8 | ||
474 | # result in r9, r8, r15, r14, r13, r12, r11, r10, X2[1:0] | ||
475 | mov (+$Reduce_Data_offset+$Carries_offset+$STACK_DEPTH)(%rsp), %rax | ||
476 | |||
477 | # X2 += Xl | ||
478 | add (+8*8-8*10)(%rdi), $X[6] # (-8*10) is to adjust rdi -> Xh to Xl | ||
479 | adc (+8*9-8*10)(%rdi), $X[7] | ||
480 | mov $X[6], (+8*8)(%rcx) | ||
481 | mov $X[7], (+8*9)(%rcx) | ||
482 | |||
483 | adc %rax, %rax | ||
484 | mov %rax, (+$Reduce_Data_offset+$Carries_offset+$STACK_DEPTH)(%rsp) | ||
485 | |||
486 | lea (+$Reduce_Data_offset+$Q_offset+$STACK_DEPTH)(%rsp), %rdi # rdi -> pQ ; 128 bits, 2 qwords | ||
487 | add \$`$K1-$M2`, %rsi # rsi -> pK1 ; 128 bits, 2 qwords | ||
488 | |||
489 | # MUL_128x128t128 rdi, rcx, rsi ; Q = X2 * K1 (bottom half) | ||
490 | # B1:B0 = rsi[1:0] = K1[1:0] | ||
491 | # A1:A0 = rcx[1:0] = X2[1:0] | ||
492 | # Result = rdi[1],rbp = Q[1],rbp | ||
493 | mov (%rsi), %r8 # B0 | ||
494 | mov (+8*1)(%rsi), %rbx # B1 | ||
495 | |||
496 | mov (%rcx), %rax # A0 | ||
497 | mul %r8 # B0 | ||
498 | mov %rax, %rbp | ||
499 | mov %rdx, %r9 | ||
500 | |||
501 | mov (+8*1)(%rcx), %rax # A1 | ||
502 | mul %r8 # B0 | ||
503 | add %rax, %r9 | ||
504 | |||
505 | mov (%rcx), %rax # A0 | ||
506 | mul %rbx # B1 | ||
507 | add %rax, %r9 | ||
508 | |||
509 | mov %r9, (+8*1)(%rdi) | ||
510 | # end MUL_128x128t128 | ||
511 | |||
512 | sub \$`$K1-$M`, %rsi | ||
513 | |||
514 | mov (%rcx), $X[6] | ||
515 | mov (+8*1)(%rcx), $X[7] # r9:r8 = X2[1:0] | ||
516 | |||
517 | call MULADD_128x512 # args in rcx, rdi / rbp, rsi, r15-r8 | ||
518 | # result in r9, r8, r15, r14, r13, r12, r11, r10, X2[1:0] | ||
519 | |||
520 | # load first half of m to rdx, rdi, rbx, rax | ||
521 | # moved this here for efficiency | ||
522 | mov (+8*0)(%rsi), %rax | ||
523 | mov (+8*1)(%rsi), %rbx | ||
524 | mov (+8*2)(%rsi), %rdi | ||
525 | mov (+8*3)(%rsi), %rdx | ||
526 | |||
527 | # continue with reduction | ||
528 | mov (+$Reduce_Data_offset+$Carries_offset+$STACK_DEPTH)(%rsp), %rbp | ||
529 | |||
530 | add (+8*8)(%rcx), $X[6] | ||
531 | adc (+8*9)(%rcx), $X[7] | ||
532 | |||
533 | #accumulate the final carry to rbp | ||
534 | adc %rbp, %rbp | ||
535 | |||
536 | # Add in overflow corrections: R = (X2>>128) += T[overflow] | ||
537 | # R = {r9, r8, r15, r14, ..., r10} | ||
538 | shl \$3, %rbp | ||
539 | mov (+$pData_offset+$STACK_DEPTH)(%rsp), %rcx # rsi -> Data (and points to T) | ||
540 | add %rcx, %rbp # pT ; 512 bits, 8 qwords, spread out | ||
541 | |||
542 | # rsi will be used to generate a mask after the addition | ||
543 | xor %rsi, %rsi | ||
544 | |||
545 | add (+8*8*0)(%rbp), $X[0] | ||
546 | adc (+8*8*1)(%rbp), $X[1] | ||
547 | adc (+8*8*2)(%rbp), $X[2] | ||
548 | adc (+8*8*3)(%rbp), $X[3] | ||
549 | adc (+8*8*4)(%rbp), $X[4] | ||
550 | adc (+8*8*5)(%rbp), $X[5] | ||
551 | adc (+8*8*6)(%rbp), $X[6] | ||
552 | adc (+8*8*7)(%rbp), $X[7] | ||
553 | |||
554 | # if there is a carry: rsi = 0xFFFFFFFFFFFFFFFF | ||
555 | # if carry is clear: rsi = 0x0000000000000000 | ||
556 | sbb \$0, %rsi | ||
557 | |||
558 | # if carry is clear, subtract 0. Otherwise, subtract 256 bits of m | ||
559 | and %rsi, %rax | ||
560 | and %rsi, %rbx | ||
561 | and %rsi, %rdi | ||
562 | and %rsi, %rdx | ||
563 | |||
564 | mov \$1, %rbp | ||
565 | sub %rax, $X[0] | ||
566 | sbb %rbx, $X[1] | ||
567 | sbb %rdi, $X[2] | ||
568 | sbb %rdx, $X[3] | ||
569 | |||
570 | # if there is a borrow: rbp = 0 | ||
571 | # if there is no borrow: rbp = 1 | ||
572 | # this is used to save the borrows in between the first half and the 2nd half of the subtraction of m | ||
573 | sbb \$0, %rbp | ||
574 | |||
575 | #load second half of m to rdx, rdi, rbx, rax | ||
576 | |||
577 | add \$$M, %rcx | ||
578 | mov (+8*4)(%rcx), %rax | ||
579 | mov (+8*5)(%rcx), %rbx | ||
580 | mov (+8*6)(%rcx), %rdi | ||
581 | mov (+8*7)(%rcx), %rdx | ||
582 | |||
583 | # use the rsi mask as before | ||
584 | # if carry is clear, subtract 0. Otherwise, subtract 256 bits of m | ||
585 | and %rsi, %rax | ||
586 | and %rsi, %rbx | ||
587 | and %rsi, %rdi | ||
588 | and %rsi, %rdx | ||
589 | |||
590 | # if rbp = 0, there was a borrow before, it is moved to the carry flag | ||
591 | # if rbp = 1, there was not a borrow before, carry flag is cleared | ||
592 | sub \$1, %rbp | ||
593 | |||
594 | sbb %rax, $X[4] | ||
595 | sbb %rbx, $X[5] | ||
596 | sbb %rdi, $X[6] | ||
597 | sbb %rdx, $X[7] | ||
598 | |||
599 | # write R back to memory | ||
600 | |||
601 | mov (+$red_result_addr_offset+$STACK_DEPTH)(%rsp), %rsi | ||
602 | mov $X[0], (+8*0)(%rsi) | ||
603 | mov $X[1], (+8*1)(%rsi) | ||
604 | mov $X[2], (+8*2)(%rsi) | ||
605 | mov $X[3], (+8*3)(%rsi) | ||
606 | mov $X[4], (+8*4)(%rsi) | ||
607 | mov $X[5], (+8*5)(%rsi) | ||
608 | mov $X[6], (+8*6)(%rsi) | ||
609 | mov $X[7], (+8*7)(%rsi) | ||
610 | |||
611 | ret | ||
612 | .size mont_reduce,.-mont_reduce | ||
613 | ___ | ||
614 | }}} | ||
615 | |||
616 | {{{ | ||
617 | #MUL_512x512 MACRO pDst, pA, pB, x7, x6, x5, x4, x3, x2, x1, x0, tmp*2 | ||
618 | # | ||
619 | # Inputs: pDst: Destination (1024 bits, 16 qwords) | ||
620 | # pA: Multiplicand (512 bits, 8 qwords) | ||
621 | # pB: Multiplicand (512 bits, 8 qwords) | ||
622 | # Uses registers rax, rdx, args | ||
623 | # B operand in [pB] and also in x7...x0 | ||
624 | sub MUL_512x512 | ||
625 | { | ||
626 | my ($pDst, $pA, $pB, $x, $OP, $TMP, $pDst_o)=@_; | ||
627 | my ($pDst, $pDst_o) = ($pDst =~ m/([^+]*)\+?(.*)?/); | ||
628 | my @X=@$x; # make a copy | ||
629 | |||
630 | $code.=<<___; | ||
631 | mov (+8*0)($pA), $OP | ||
632 | |||
633 | mov $X[0], %rax | ||
634 | mul $OP # rdx:rax = %OP * [0] | ||
635 | mov %rax, (+$pDst_o+8*0)($pDst) | ||
636 | mov %rdx, $X[0] | ||
637 | ___ | ||
638 | for(my $i=1;$i<8;$i++) { | ||
639 | $code.=<<___; | ||
640 | mov $X[$i], %rax | ||
641 | mul $OP # rdx:rax = %OP * [$i] | ||
642 | add %rax, $X[$i-1] | ||
643 | adc \$0, %rdx | ||
644 | mov %rdx, $X[$i] | ||
645 | ___ | ||
646 | } | ||
647 | |||
648 | for(my $i=1;$i<8;$i++) { | ||
649 | $code.=<<___; | ||
650 | mov (+8*$i)($pA), $OP | ||
651 | ___ | ||
652 | |||
653 | &MULSTEP_512(\@X, "(+$pDst_o+8*$i)($pDst)", $pB, $OP, $TMP); | ||
654 | push(@X,shift(@X)); | ||
655 | } | ||
656 | |||
657 | $code.=<<___; | ||
658 | mov $X[0], (+$pDst_o+8*8)($pDst) | ||
659 | mov $X[1], (+$pDst_o+8*9)($pDst) | ||
660 | mov $X[2], (+$pDst_o+8*10)($pDst) | ||
661 | mov $X[3], (+$pDst_o+8*11)($pDst) | ||
662 | mov $X[4], (+$pDst_o+8*12)($pDst) | ||
663 | mov $X[5], (+$pDst_o+8*13)($pDst) | ||
664 | mov $X[6], (+$pDst_o+8*14)($pDst) | ||
665 | mov $X[7], (+$pDst_o+8*15)($pDst) | ||
666 | ___ | ||
667 | } | ||
668 | |||
669 | # | ||
670 | # mont_mul_a3b : subroutine to compute (Src1 * Src2) % M (all 512-bits) | ||
671 | # Input: src1: Address of source 1: rdi | ||
672 | # src2: Address of source 2: rsi | ||
673 | # Output: dst: Address of destination: [red_res_addr] | ||
674 | # src2 and result also in: r9, r8, r15, r14, r13, r12, r11, r10 | ||
675 | # Temp: Clobbers [tmp16], all registers | ||
676 | $code.=<<___; | ||
677 | .type mont_mul_a3b,\@abi-omnipotent | ||
678 | .align 16 | ||
679 | mont_mul_a3b: | ||
680 | # | ||
681 | # multiply tmp = src1 * src2 | ||
682 | # For multiply: dst = rcx, src1 = rdi, src2 = rsi | ||
683 | # stack depth is extra 8 from call | ||
684 | ___ | ||
685 | &MUL_512x512("%rsp+$tmp16_offset+8", "%rdi", "%rsi", [map("%r$_",(10..15,8..9))], "%rbp", "%rbx"); | ||
686 | $code.=<<___; | ||
687 | # | ||
688 | # Dst = tmp % m | ||
689 | # Call reduce(tmp, m, data, dst) | ||
690 | |||
691 | # tail recursion optimization: jmp to mont_reduce and return from there | ||
692 | jmp mont_reduce | ||
693 | # call mont_reduce | ||
694 | # ret | ||
695 | .size mont_mul_a3b,.-mont_mul_a3b | ||
696 | ___ | ||
697 | }}} | ||
698 | |||
699 | {{{ | ||
700 | #SQR_512 MACRO pDest, pA, x7, x6, x5, x4, x3, x2, x1, x0, tmp*4 | ||
701 | # | ||
702 | # Input in memory [pA] and also in x7...x0 | ||
703 | # Uses all argument registers plus rax and rdx | ||
704 | # | ||
705 | # This version computes all of the off-diagonal terms into memory, | ||
706 | # and then it adds in the diagonal terms | ||
707 | |||
708 | sub SQR_512 | ||
709 | { | ||
710 | my ($pDst, $pA, $x, $A, $tmp, $x7, $x6, $pDst_o)=@_; | ||
711 | my ($pDst, $pDst_o) = ($pDst =~ m/([^+]*)\+?(.*)?/); | ||
712 | my @X=@$x; # make a copy | ||
713 | $code.=<<___; | ||
714 | # ------------------ | ||
715 | # first pass 01...07 | ||
716 | # ------------------ | ||
717 | mov $X[0], $A | ||
718 | |||
719 | mov $X[1],%rax | ||
720 | mul $A | ||
721 | mov %rax, (+$pDst_o+8*1)($pDst) | ||
722 | ___ | ||
723 | for(my $i=2;$i<8;$i++) { | ||
724 | $code.=<<___; | ||
725 | mov %rdx, $X[$i-2] | ||
726 | mov $X[$i],%rax | ||
727 | mul $A | ||
728 | add %rax, $X[$i-2] | ||
729 | adc \$0, %rdx | ||
730 | ___ | ||
731 | } | ||
732 | $code.=<<___; | ||
733 | mov %rdx, $x7 | ||
734 | |||
735 | mov $X[0], (+$pDst_o+8*2)($pDst) | ||
736 | |||
737 | # ------------------ | ||
738 | # second pass 12...17 | ||
739 | # ------------------ | ||
740 | |||
741 | mov (+8*1)($pA), $A | ||
742 | |||
743 | mov (+8*2)($pA),%rax | ||
744 | mul $A | ||
745 | add %rax, $X[1] | ||
746 | adc \$0, %rdx | ||
747 | mov $X[1], (+$pDst_o+8*3)($pDst) | ||
748 | |||
749 | mov %rdx, $X[0] | ||
750 | mov (+8*3)($pA),%rax | ||
751 | mul $A | ||
752 | add %rax, $X[2] | ||
753 | adc \$0, %rdx | ||
754 | add $X[0], $X[2] | ||
755 | adc \$0, %rdx | ||
756 | mov $X[2], (+$pDst_o+8*4)($pDst) | ||
757 | |||
758 | mov %rdx, $X[0] | ||
759 | mov (+8*4)($pA),%rax | ||
760 | mul $A | ||
761 | add %rax, $X[3] | ||
762 | adc \$0, %rdx | ||
763 | add $X[0], $X[3] | ||
764 | adc \$0, %rdx | ||
765 | |||
766 | mov %rdx, $X[0] | ||
767 | mov (+8*5)($pA),%rax | ||
768 | mul $A | ||
769 | add %rax, $X[4] | ||
770 | adc \$0, %rdx | ||
771 | add $X[0], $X[4] | ||
772 | adc \$0, %rdx | ||
773 | |||
774 | mov %rdx, $X[0] | ||
775 | mov $X[6],%rax | ||
776 | mul $A | ||
777 | add %rax, $X[5] | ||
778 | adc \$0, %rdx | ||
779 | add $X[0], $X[5] | ||
780 | adc \$0, %rdx | ||
781 | |||
782 | mov %rdx, $X[0] | ||
783 | mov $X[7],%rax | ||
784 | mul $A | ||
785 | add %rax, $x7 | ||
786 | adc \$0, %rdx | ||
787 | add $X[0], $x7 | ||
788 | adc \$0, %rdx | ||
789 | |||
790 | mov %rdx, $X[1] | ||
791 | |||
792 | # ------------------ | ||
793 | # third pass 23...27 | ||
794 | # ------------------ | ||
795 | mov (+8*2)($pA), $A | ||
796 | |||
797 | mov (+8*3)($pA),%rax | ||
798 | mul $A | ||
799 | add %rax, $X[3] | ||
800 | adc \$0, %rdx | ||
801 | mov $X[3], (+$pDst_o+8*5)($pDst) | ||
802 | |||
803 | mov %rdx, $X[0] | ||
804 | mov (+8*4)($pA),%rax | ||
805 | mul $A | ||
806 | add %rax, $X[4] | ||
807 | adc \$0, %rdx | ||
808 | add $X[0], $X[4] | ||
809 | adc \$0, %rdx | ||
810 | mov $X[4], (+$pDst_o+8*6)($pDst) | ||
811 | |||
812 | mov %rdx, $X[0] | ||
813 | mov (+8*5)($pA),%rax | ||
814 | mul $A | ||
815 | add %rax, $X[5] | ||
816 | adc \$0, %rdx | ||
817 | add $X[0], $X[5] | ||
818 | adc \$0, %rdx | ||
819 | |||
820 | mov %rdx, $X[0] | ||
821 | mov $X[6],%rax | ||
822 | mul $A | ||
823 | add %rax, $x7 | ||
824 | adc \$0, %rdx | ||
825 | add $X[0], $x7 | ||
826 | adc \$0, %rdx | ||
827 | |||
828 | mov %rdx, $X[0] | ||
829 | mov $X[7],%rax | ||
830 | mul $A | ||
831 | add %rax, $X[1] | ||
832 | adc \$0, %rdx | ||
833 | add $X[0], $X[1] | ||
834 | adc \$0, %rdx | ||
835 | |||
836 | mov %rdx, $X[2] | ||
837 | |||
838 | # ------------------ | ||
839 | # fourth pass 34...37 | ||
840 | # ------------------ | ||
841 | |||
842 | mov (+8*3)($pA), $A | ||
843 | |||
844 | mov (+8*4)($pA),%rax | ||
845 | mul $A | ||
846 | add %rax, $X[5] | ||
847 | adc \$0, %rdx | ||
848 | mov $X[5], (+$pDst_o+8*7)($pDst) | ||
849 | |||
850 | mov %rdx, $X[0] | ||
851 | mov (+8*5)($pA),%rax | ||
852 | mul $A | ||
853 | add %rax, $x7 | ||
854 | adc \$0, %rdx | ||
855 | add $X[0], $x7 | ||
856 | adc \$0, %rdx | ||
857 | mov $x7, (+$pDst_o+8*8)($pDst) | ||
858 | |||
859 | mov %rdx, $X[0] | ||
860 | mov $X[6],%rax | ||
861 | mul $A | ||
862 | add %rax, $X[1] | ||
863 | adc \$0, %rdx | ||
864 | add $X[0], $X[1] | ||
865 | adc \$0, %rdx | ||
866 | |||
867 | mov %rdx, $X[0] | ||
868 | mov $X[7],%rax | ||
869 | mul $A | ||
870 | add %rax, $X[2] | ||
871 | adc \$0, %rdx | ||
872 | add $X[0], $X[2] | ||
873 | adc \$0, %rdx | ||
874 | |||
875 | mov %rdx, $X[5] | ||
876 | |||
877 | # ------------------ | ||
878 | # fifth pass 45...47 | ||
879 | # ------------------ | ||
880 | mov (+8*4)($pA), $A | ||
881 | |||
882 | mov (+8*5)($pA),%rax | ||
883 | mul $A | ||
884 | add %rax, $X[1] | ||
885 | adc \$0, %rdx | ||
886 | mov $X[1], (+$pDst_o+8*9)($pDst) | ||
887 | |||
888 | mov %rdx, $X[0] | ||
889 | mov $X[6],%rax | ||
890 | mul $A | ||
891 | add %rax, $X[2] | ||
892 | adc \$0, %rdx | ||
893 | add $X[0], $X[2] | ||
894 | adc \$0, %rdx | ||
895 | mov $X[2], (+$pDst_o+8*10)($pDst) | ||
896 | |||
897 | mov %rdx, $X[0] | ||
898 | mov $X[7],%rax | ||
899 | mul $A | ||
900 | add %rax, $X[5] | ||
901 | adc \$0, %rdx | ||
902 | add $X[0], $X[5] | ||
903 | adc \$0, %rdx | ||
904 | |||
905 | mov %rdx, $X[1] | ||
906 | |||
907 | # ------------------ | ||
908 | # sixth pass 56...57 | ||
909 | # ------------------ | ||
910 | mov (+8*5)($pA), $A | ||
911 | |||
912 | mov $X[6],%rax | ||
913 | mul $A | ||
914 | add %rax, $X[5] | ||
915 | adc \$0, %rdx | ||
916 | mov $X[5], (+$pDst_o+8*11)($pDst) | ||
917 | |||
918 | mov %rdx, $X[0] | ||
919 | mov $X[7],%rax | ||
920 | mul $A | ||
921 | add %rax, $X[1] | ||
922 | adc \$0, %rdx | ||
923 | add $X[0], $X[1] | ||
924 | adc \$0, %rdx | ||
925 | mov $X[1], (+$pDst_o+8*12)($pDst) | ||
926 | |||
927 | mov %rdx, $X[2] | ||
928 | |||
929 | # ------------------ | ||
930 | # seventh pass 67 | ||
931 | # ------------------ | ||
932 | mov $X[6], $A | ||
933 | |||
934 | mov $X[7],%rax | ||
935 | mul $A | ||
936 | add %rax, $X[2] | ||
937 | adc \$0, %rdx | ||
938 | mov $X[2], (+$pDst_o+8*13)($pDst) | ||
939 | |||
940 | mov %rdx, (+$pDst_o+8*14)($pDst) | ||
941 | |||
942 | # start finalize (add in squares, and double off-terms) | ||
943 | mov (+$pDst_o+8*1)($pDst), $X[0] | ||
944 | mov (+$pDst_o+8*2)($pDst), $X[1] | ||
945 | mov (+$pDst_o+8*3)($pDst), $X[2] | ||
946 | mov (+$pDst_o+8*4)($pDst), $X[3] | ||
947 | mov (+$pDst_o+8*5)($pDst), $X[4] | ||
948 | mov (+$pDst_o+8*6)($pDst), $X[5] | ||
949 | |||
950 | mov (+8*3)($pA), %rax | ||
951 | mul %rax | ||
952 | mov %rax, $x6 | ||
953 | mov %rdx, $X[6] | ||
954 | |||
955 | add $X[0], $X[0] | ||
956 | adc $X[1], $X[1] | ||
957 | adc $X[2], $X[2] | ||
958 | adc $X[3], $X[3] | ||
959 | adc $X[4], $X[4] | ||
960 | adc $X[5], $X[5] | ||
961 | adc \$0, $X[6] | ||
962 | |||
963 | mov (+8*0)($pA), %rax | ||
964 | mul %rax | ||
965 | mov %rax, (+$pDst_o+8*0)($pDst) | ||
966 | mov %rdx, $A | ||
967 | |||
968 | mov (+8*1)($pA), %rax | ||
969 | mul %rax | ||
970 | |||
971 | add $A, $X[0] | ||
972 | adc %rax, $X[1] | ||
973 | adc \$0, %rdx | ||
974 | |||
975 | mov %rdx, $A | ||
976 | mov $X[0], (+$pDst_o+8*1)($pDst) | ||
977 | mov $X[1], (+$pDst_o+8*2)($pDst) | ||
978 | |||
979 | mov (+8*2)($pA), %rax | ||
980 | mul %rax | ||
981 | |||
982 | add $A, $X[2] | ||
983 | adc %rax, $X[3] | ||
984 | adc \$0, %rdx | ||
985 | |||
986 | mov %rdx, $A | ||
987 | |||
988 | mov $X[2], (+$pDst_o+8*3)($pDst) | ||
989 | mov $X[3], (+$pDst_o+8*4)($pDst) | ||
990 | |||
991 | xor $tmp, $tmp | ||
992 | add $A, $X[4] | ||
993 | adc $x6, $X[5] | ||
994 | adc \$0, $tmp | ||
995 | |||
996 | mov $X[4], (+$pDst_o+8*5)($pDst) | ||
997 | mov $X[5], (+$pDst_o+8*6)($pDst) | ||
998 | |||
999 | # %%tmp has 0/1 in column 7 | ||
1000 | # %%A6 has a full value in column 7 | ||
1001 | |||
1002 | mov (+$pDst_o+8*7)($pDst), $X[0] | ||
1003 | mov (+$pDst_o+8*8)($pDst), $X[1] | ||
1004 | mov (+$pDst_o+8*9)($pDst), $X[2] | ||
1005 | mov (+$pDst_o+8*10)($pDst), $X[3] | ||
1006 | mov (+$pDst_o+8*11)($pDst), $X[4] | ||
1007 | mov (+$pDst_o+8*12)($pDst), $X[5] | ||
1008 | mov (+$pDst_o+8*13)($pDst), $x6 | ||
1009 | mov (+$pDst_o+8*14)($pDst), $x7 | ||
1010 | |||
1011 | mov $X[7], %rax | ||
1012 | mul %rax | ||
1013 | mov %rax, $X[7] | ||
1014 | mov %rdx, $A | ||
1015 | |||
1016 | add $X[0], $X[0] | ||
1017 | adc $X[1], $X[1] | ||
1018 | adc $X[2], $X[2] | ||
1019 | adc $X[3], $X[3] | ||
1020 | adc $X[4], $X[4] | ||
1021 | adc $X[5], $X[5] | ||
1022 | adc $x6, $x6 | ||
1023 | adc $x7, $x7 | ||
1024 | adc \$0, $A | ||
1025 | |||
1026 | add $tmp, $X[0] | ||
1027 | |||
1028 | mov (+8*4)($pA), %rax | ||
1029 | mul %rax | ||
1030 | |||
1031 | add $X[6], $X[0] | ||
1032 | adc %rax, $X[1] | ||
1033 | adc \$0, %rdx | ||
1034 | |||
1035 | mov %rdx, $tmp | ||
1036 | |||
1037 | mov $X[0], (+$pDst_o+8*7)($pDst) | ||
1038 | mov $X[1], (+$pDst_o+8*8)($pDst) | ||
1039 | |||
1040 | mov (+8*5)($pA), %rax | ||
1041 | mul %rax | ||
1042 | |||
1043 | add $tmp, $X[2] | ||
1044 | adc %rax, $X[3] | ||
1045 | adc \$0, %rdx | ||
1046 | |||
1047 | mov %rdx, $tmp | ||
1048 | |||
1049 | mov $X[2], (+$pDst_o+8*9)($pDst) | ||
1050 | mov $X[3], (+$pDst_o+8*10)($pDst) | ||
1051 | |||
1052 | mov (+8*6)($pA), %rax | ||
1053 | mul %rax | ||
1054 | |||
1055 | add $tmp, $X[4] | ||
1056 | adc %rax, $X[5] | ||
1057 | adc \$0, %rdx | ||
1058 | |||
1059 | mov $X[4], (+$pDst_o+8*11)($pDst) | ||
1060 | mov $X[5], (+$pDst_o+8*12)($pDst) | ||
1061 | |||
1062 | add %rdx, $x6 | ||
1063 | adc $X[7], $x7 | ||
1064 | adc \$0, $A | ||
1065 | |||
1066 | mov $x6, (+$pDst_o+8*13)($pDst) | ||
1067 | mov $x7, (+$pDst_o+8*14)($pDst) | ||
1068 | mov $A, (+$pDst_o+8*15)($pDst) | ||
1069 | ___ | ||
1070 | } | ||
1071 | |||
1072 | # | ||
1073 | # sqr_reduce: subroutine to compute Result = reduce(Result * Result) | ||
1074 | # | ||
1075 | # input and result also in: r9, r8, r15, r14, r13, r12, r11, r10 | ||
1076 | # | ||
1077 | $code.=<<___; | ||
1078 | .type sqr_reduce,\@abi-omnipotent | ||
1079 | .align 16 | ||
1080 | sqr_reduce: | ||
1081 | mov (+$pResult_offset+8)(%rsp), %rcx | ||
1082 | ___ | ||
1083 | &SQR_512("%rsp+$tmp16_offset+8", "%rcx", [map("%r$_",(10..15,8..9))], "%rbx", "%rbp", "%rsi", "%rdi"); | ||
1084 | $code.=<<___; | ||
1085 | # tail recursion optimization: jmp to mont_reduce and return from there | ||
1086 | jmp mont_reduce | ||
1087 | # call mont_reduce | ||
1088 | # ret | ||
1089 | .size sqr_reduce,.-sqr_reduce | ||
1090 | ___ | ||
1091 | }}} | ||
1092 | |||
1093 | # | ||
1094 | # MAIN FUNCTION | ||
1095 | # | ||
1096 | |||
1097 | #mod_exp_512(UINT64 *result, /* 512 bits, 8 qwords */ | ||
1098 | # UINT64 *g, /* 512 bits, 8 qwords */ | ||
1099 | # UINT64 *exp, /* 512 bits, 8 qwords */ | ||
1100 | # struct mod_ctx_512 *data) | ||
1101 | |||
1102 | # window size = 5 | ||
1103 | # table size = 2^5 = 32 | ||
1104 | #table_entries equ 32 | ||
1105 | #table_size equ table_entries * 8 | ||
1106 | $code.=<<___; | ||
1107 | .globl mod_exp_512 | ||
1108 | .type mod_exp_512,\@function,4 | ||
1109 | mod_exp_512: | ||
1110 | push %rbp | ||
1111 | push %rbx | ||
1112 | push %r12 | ||
1113 | push %r13 | ||
1114 | push %r14 | ||
1115 | push %r15 | ||
1116 | |||
1117 | # adjust stack down and then align it with cache boundary | ||
1118 | mov %rsp, %r8 | ||
1119 | sub \$$mem_size, %rsp | ||
1120 | and \$-64, %rsp | ||
1121 | |||
1122 | # store previous stack pointer and arguments | ||
1123 | mov %r8, (+$rsp_offset)(%rsp) | ||
1124 | mov %rdi, (+$pResult_offset)(%rsp) | ||
1125 | mov %rsi, (+$pG_offset)(%rsp) | ||
1126 | mov %rcx, (+$pData_offset)(%rsp) | ||
1127 | .Lbody: | ||
1128 | # transform g into montgomery space | ||
1129 | # GT = reduce(g * C2) = reduce(g * (2^256)) | ||
1130 | # reduce expects to have the input in [tmp16] | ||
1131 | pxor %xmm4, %xmm4 | ||
1132 | movdqu (+16*0)(%rsi), %xmm0 | ||
1133 | movdqu (+16*1)(%rsi), %xmm1 | ||
1134 | movdqu (+16*2)(%rsi), %xmm2 | ||
1135 | movdqu (+16*3)(%rsi), %xmm3 | ||
1136 | movdqa %xmm4, (+$tmp16_offset+16*0)(%rsp) | ||
1137 | movdqa %xmm4, (+$tmp16_offset+16*1)(%rsp) | ||
1138 | movdqa %xmm4, (+$tmp16_offset+16*6)(%rsp) | ||
1139 | movdqa %xmm4, (+$tmp16_offset+16*7)(%rsp) | ||
1140 | movdqa %xmm0, (+$tmp16_offset+16*2)(%rsp) | ||
1141 | movdqa %xmm1, (+$tmp16_offset+16*3)(%rsp) | ||
1142 | movdqa %xmm2, (+$tmp16_offset+16*4)(%rsp) | ||
1143 | movdqa %xmm3, (+$tmp16_offset+16*5)(%rsp) | ||
1144 | |||
1145 | # load pExp before rdx gets blown away | ||
1146 | movdqu (+16*0)(%rdx), %xmm0 | ||
1147 | movdqu (+16*1)(%rdx), %xmm1 | ||
1148 | movdqu (+16*2)(%rdx), %xmm2 | ||
1149 | movdqu (+16*3)(%rdx), %xmm3 | ||
1150 | |||
1151 | lea (+$GT_offset)(%rsp), %rbx | ||
1152 | mov %rbx, (+$red_result_addr_offset)(%rsp) | ||
1153 | call mont_reduce | ||
1154 | |||
1155 | # Initialize tmp = C | ||
1156 | lea (+$tmp_offset)(%rsp), %rcx | ||
1157 | xor %rax, %rax | ||
1158 | mov %rax, (+8*0)(%rcx) | ||
1159 | mov %rax, (+8*1)(%rcx) | ||
1160 | mov %rax, (+8*3)(%rcx) | ||
1161 | mov %rax, (+8*4)(%rcx) | ||
1162 | mov %rax, (+8*5)(%rcx) | ||
1163 | mov %rax, (+8*6)(%rcx) | ||
1164 | mov %rax, (+8*7)(%rcx) | ||
1165 | mov %rax, (+$exp_offset+8*8)(%rsp) | ||
1166 | movq \$1, (+8*2)(%rcx) | ||
1167 | |||
1168 | lea (+$garray_offset)(%rsp), %rbp | ||
1169 | mov %rcx, %rsi # pTmp | ||
1170 | mov %rbp, %rdi # Garray[][0] | ||
1171 | ___ | ||
1172 | |||
1173 | &swizzle("%rdi", "%rcx", "%rax", "%rbx"); | ||
1174 | |||
1175 | # for (rax = 31; rax != 0; rax--) { | ||
1176 | # tmp = reduce(tmp * G) | ||
1177 | # swizzle(pg, tmp); | ||
1178 | # pg += 2; } | ||
1179 | $code.=<<___; | ||
1180 | mov \$31, %rax | ||
1181 | mov %rax, (+$i_offset)(%rsp) | ||
1182 | mov %rbp, (+$pg_offset)(%rsp) | ||
1183 | # rsi -> pTmp | ||
1184 | mov %rsi, (+$red_result_addr_offset)(%rsp) | ||
1185 | mov (+8*0)(%rsi), %r10 | ||
1186 | mov (+8*1)(%rsi), %r11 | ||
1187 | mov (+8*2)(%rsi), %r12 | ||
1188 | mov (+8*3)(%rsi), %r13 | ||
1189 | mov (+8*4)(%rsi), %r14 | ||
1190 | mov (+8*5)(%rsi), %r15 | ||
1191 | mov (+8*6)(%rsi), %r8 | ||
1192 | mov (+8*7)(%rsi), %r9 | ||
1193 | init_loop: | ||
1194 | lea (+$GT_offset)(%rsp), %rdi | ||
1195 | call mont_mul_a3b | ||
1196 | lea (+$tmp_offset)(%rsp), %rsi | ||
1197 | mov (+$pg_offset)(%rsp), %rbp | ||
1198 | add \$2, %rbp | ||
1199 | mov %rbp, (+$pg_offset)(%rsp) | ||
1200 | mov %rsi, %rcx # rcx = rsi = addr of tmp | ||
1201 | ___ | ||
1202 | |||
1203 | &swizzle("%rbp", "%rcx", "%rax", "%rbx"); | ||
1204 | $code.=<<___; | ||
1205 | mov (+$i_offset)(%rsp), %rax | ||
1206 | sub \$1, %rax | ||
1207 | mov %rax, (+$i_offset)(%rsp) | ||
1208 | jne init_loop | ||
1209 | |||
1210 | # | ||
1211 | # Copy exponent onto stack | ||
1212 | movdqa %xmm0, (+$exp_offset+16*0)(%rsp) | ||
1213 | movdqa %xmm1, (+$exp_offset+16*1)(%rsp) | ||
1214 | movdqa %xmm2, (+$exp_offset+16*2)(%rsp) | ||
1215 | movdqa %xmm3, (+$exp_offset+16*3)(%rsp) | ||
1216 | |||
1217 | |||
1218 | # | ||
1219 | # Do exponentiation | ||
1220 | # Initialize result to G[exp{511:507}] | ||
1221 | mov (+$exp_offset+62)(%rsp), %eax | ||
1222 | mov %rax, %rdx | ||
1223 | shr \$11, %rax | ||
1224 | and \$0x07FF, %edx | ||
1225 | mov %edx, (+$exp_offset+62)(%rsp) | ||
1226 | lea (+$garray_offset)(%rsp,%rax,2), %rsi | ||
1227 | mov (+$pResult_offset)(%rsp), %rdx | ||
1228 | ___ | ||
1229 | |||
1230 | &unswizzle("%rdx", "%rsi", "%rbp", "%rbx", "%rax"); | ||
1231 | |||
1232 | # | ||
1233 | # Loop variables | ||
1234 | # rcx = [loop_idx] = index: 510-5 to 0 by 5 | ||
1235 | $code.=<<___; | ||
1236 | movq \$505, (+$loop_idx_offset)(%rsp) | ||
1237 | |||
1238 | mov (+$pResult_offset)(%rsp), %rcx | ||
1239 | mov %rcx, (+$red_result_addr_offset)(%rsp) | ||
1240 | mov (+8*0)(%rcx), %r10 | ||
1241 | mov (+8*1)(%rcx), %r11 | ||
1242 | mov (+8*2)(%rcx), %r12 | ||
1243 | mov (+8*3)(%rcx), %r13 | ||
1244 | mov (+8*4)(%rcx), %r14 | ||
1245 | mov (+8*5)(%rcx), %r15 | ||
1246 | mov (+8*6)(%rcx), %r8 | ||
1247 | mov (+8*7)(%rcx), %r9 | ||
1248 | jmp sqr_2 | ||
1249 | |||
1250 | main_loop_a3b: | ||
1251 | call sqr_reduce | ||
1252 | call sqr_reduce | ||
1253 | call sqr_reduce | ||
1254 | sqr_2: | ||
1255 | call sqr_reduce | ||
1256 | call sqr_reduce | ||
1257 | |||
1258 | # | ||
1259 | # Do multiply, first look up proper value in Garray | ||
1260 | mov (+$loop_idx_offset)(%rsp), %rcx # bit index | ||
1261 | mov %rcx, %rax | ||
1262 | shr \$4, %rax # rax is word pointer | ||
1263 | mov (+$exp_offset)(%rsp,%rax,2), %edx | ||
1264 | and \$15, %rcx | ||
1265 | shrq %cl, %rdx | ||
1266 | and \$0x1F, %rdx | ||
1267 | |||
1268 | lea (+$garray_offset)(%rsp,%rdx,2), %rsi | ||
1269 | lea (+$tmp_offset)(%rsp), %rdx | ||
1270 | mov %rdx, %rdi | ||
1271 | ___ | ||
1272 | |||
1273 | &unswizzle("%rdx", "%rsi", "%rbp", "%rbx", "%rax"); | ||
1274 | # rdi = tmp = pG | ||
1275 | |||
1276 | # | ||
1277 | # Call mod_mul_a1(pDst, pSrc1, pSrc2, pM, pData) | ||
1278 | # result result pG M Data | ||
1279 | $code.=<<___; | ||
1280 | mov (+$pResult_offset)(%rsp), %rsi | ||
1281 | call mont_mul_a3b | ||
1282 | |||
1283 | # | ||
1284 | # finish loop | ||
1285 | mov (+$loop_idx_offset)(%rsp), %rcx | ||
1286 | sub \$5, %rcx | ||
1287 | mov %rcx, (+$loop_idx_offset)(%rsp) | ||
1288 | jge main_loop_a3b | ||
1289 | |||
1290 | # | ||
1291 | |||
1292 | end_main_loop_a3b: | ||
1293 | # transform result out of Montgomery space | ||
1294 | # result = reduce(result) | ||
1295 | mov (+$pResult_offset)(%rsp), %rdx | ||
1296 | pxor %xmm4, %xmm4 | ||
1297 | movdqu (+16*0)(%rdx), %xmm0 | ||
1298 | movdqu (+16*1)(%rdx), %xmm1 | ||
1299 | movdqu (+16*2)(%rdx), %xmm2 | ||
1300 | movdqu (+16*3)(%rdx), %xmm3 | ||
1301 | movdqa %xmm4, (+$tmp16_offset+16*4)(%rsp) | ||
1302 | movdqa %xmm4, (+$tmp16_offset+16*5)(%rsp) | ||
1303 | movdqa %xmm4, (+$tmp16_offset+16*6)(%rsp) | ||
1304 | movdqa %xmm4, (+$tmp16_offset+16*7)(%rsp) | ||
1305 | movdqa %xmm0, (+$tmp16_offset+16*0)(%rsp) | ||
1306 | movdqa %xmm1, (+$tmp16_offset+16*1)(%rsp) | ||
1307 | movdqa %xmm2, (+$tmp16_offset+16*2)(%rsp) | ||
1308 | movdqa %xmm3, (+$tmp16_offset+16*3)(%rsp) | ||
1309 | call mont_reduce | ||
1310 | |||
1311 | # If result > m, subract m | ||
1312 | # load result into r15:r8 | ||
1313 | mov (+$pResult_offset)(%rsp), %rax | ||
1314 | mov (+8*0)(%rax), %r8 | ||
1315 | mov (+8*1)(%rax), %r9 | ||
1316 | mov (+8*2)(%rax), %r10 | ||
1317 | mov (+8*3)(%rax), %r11 | ||
1318 | mov (+8*4)(%rax), %r12 | ||
1319 | mov (+8*5)(%rax), %r13 | ||
1320 | mov (+8*6)(%rax), %r14 | ||
1321 | mov (+8*7)(%rax), %r15 | ||
1322 | |||
1323 | # subtract m | ||
1324 | mov (+$pData_offset)(%rsp), %rbx | ||
1325 | add \$$M, %rbx | ||
1326 | |||
1327 | sub (+8*0)(%rbx), %r8 | ||
1328 | sbb (+8*1)(%rbx), %r9 | ||
1329 | sbb (+8*2)(%rbx), %r10 | ||
1330 | sbb (+8*3)(%rbx), %r11 | ||
1331 | sbb (+8*4)(%rbx), %r12 | ||
1332 | sbb (+8*5)(%rbx), %r13 | ||
1333 | sbb (+8*6)(%rbx), %r14 | ||
1334 | sbb (+8*7)(%rbx), %r15 | ||
1335 | |||
1336 | # if Carry is clear, replace result with difference | ||
1337 | mov (+8*0)(%rax), %rsi | ||
1338 | mov (+8*1)(%rax), %rdi | ||
1339 | mov (+8*2)(%rax), %rcx | ||
1340 | mov (+8*3)(%rax), %rdx | ||
1341 | cmovnc %r8, %rsi | ||
1342 | cmovnc %r9, %rdi | ||
1343 | cmovnc %r10, %rcx | ||
1344 | cmovnc %r11, %rdx | ||
1345 | mov %rsi, (+8*0)(%rax) | ||
1346 | mov %rdi, (+8*1)(%rax) | ||
1347 | mov %rcx, (+8*2)(%rax) | ||
1348 | mov %rdx, (+8*3)(%rax) | ||
1349 | |||
1350 | mov (+8*4)(%rax), %rsi | ||
1351 | mov (+8*5)(%rax), %rdi | ||
1352 | mov (+8*6)(%rax), %rcx | ||
1353 | mov (+8*7)(%rax), %rdx | ||
1354 | cmovnc %r12, %rsi | ||
1355 | cmovnc %r13, %rdi | ||
1356 | cmovnc %r14, %rcx | ||
1357 | cmovnc %r15, %rdx | ||
1358 | mov %rsi, (+8*4)(%rax) | ||
1359 | mov %rdi, (+8*5)(%rax) | ||
1360 | mov %rcx, (+8*6)(%rax) | ||
1361 | mov %rdx, (+8*7)(%rax) | ||
1362 | |||
1363 | mov (+$rsp_offset)(%rsp), %rsi | ||
1364 | mov 0(%rsi),%r15 | ||
1365 | mov 8(%rsi),%r14 | ||
1366 | mov 16(%rsi),%r13 | ||
1367 | mov 24(%rsi),%r12 | ||
1368 | mov 32(%rsi),%rbx | ||
1369 | mov 40(%rsi),%rbp | ||
1370 | lea 48(%rsi),%rsp | ||
1371 | .Lepilogue: | ||
1372 | ret | ||
1373 | .size mod_exp_512, . - mod_exp_512 | ||
1374 | ___ | ||
1375 | |||
1376 | if ($win64) { | ||
1377 | # EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame, | ||
1378 | # CONTEXT *context,DISPATCHER_CONTEXT *disp) | ||
1379 | my $rec="%rcx"; | ||
1380 | my $frame="%rdx"; | ||
1381 | my $context="%r8"; | ||
1382 | my $disp="%r9"; | ||
1383 | |||
1384 | $code.=<<___; | ||
1385 | .extern __imp_RtlVirtualUnwind | ||
1386 | .type mod_exp_512_se_handler,\@abi-omnipotent | ||
1387 | .align 16 | ||
1388 | mod_exp_512_se_handler: | ||
1389 | push %rsi | ||
1390 | push %rdi | ||
1391 | push %rbx | ||
1392 | push %rbp | ||
1393 | push %r12 | ||
1394 | push %r13 | ||
1395 | push %r14 | ||
1396 | push %r15 | ||
1397 | pushfq | ||
1398 | sub \$64,%rsp | ||
1399 | |||
1400 | mov 120($context),%rax # pull context->Rax | ||
1401 | mov 248($context),%rbx # pull context->Rip | ||
1402 | |||
1403 | lea .Lbody(%rip),%r10 | ||
1404 | cmp %r10,%rbx # context->Rip<prologue label | ||
1405 | jb .Lin_prologue | ||
1406 | |||
1407 | mov 152($context),%rax # pull context->Rsp | ||
1408 | |||
1409 | lea .Lepilogue(%rip),%r10 | ||
1410 | cmp %r10,%rbx # context->Rip>=epilogue label | ||
1411 | jae .Lin_prologue | ||
1412 | |||
1413 | mov $rsp_offset(%rax),%rax # pull saved Rsp | ||
1414 | |||
1415 | mov 32(%rax),%rbx | ||
1416 | mov 40(%rax),%rbp | ||
1417 | mov 24(%rax),%r12 | ||
1418 | mov 16(%rax),%r13 | ||
1419 | mov 8(%rax),%r14 | ||
1420 | mov 0(%rax),%r15 | ||
1421 | lea 48(%rax),%rax | ||
1422 | mov %rbx,144($context) # restore context->Rbx | ||
1423 | mov %rbp,160($context) # restore context->Rbp | ||
1424 | mov %r12,216($context) # restore context->R12 | ||
1425 | mov %r13,224($context) # restore context->R13 | ||
1426 | mov %r14,232($context) # restore context->R14 | ||
1427 | mov %r15,240($context) # restore context->R15 | ||
1428 | |||
1429 | .Lin_prologue: | ||
1430 | mov 8(%rax),%rdi | ||
1431 | mov 16(%rax),%rsi | ||
1432 | mov %rax,152($context) # restore context->Rsp | ||
1433 | mov %rsi,168($context) # restore context->Rsi | ||
1434 | mov %rdi,176($context) # restore context->Rdi | ||
1435 | |||
1436 | mov 40($disp),%rdi # disp->ContextRecord | ||
1437 | mov $context,%rsi # context | ||
1438 | mov \$154,%ecx # sizeof(CONTEXT) | ||
1439 | .long 0xa548f3fc # cld; rep movsq | ||
1440 | |||
1441 | mov $disp,%rsi | ||
1442 | xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER | ||
1443 | mov 8(%rsi),%rdx # arg2, disp->ImageBase | ||
1444 | mov 0(%rsi),%r8 # arg3, disp->ControlPc | ||
1445 | mov 16(%rsi),%r9 # arg4, disp->FunctionEntry | ||
1446 | mov 40(%rsi),%r10 # disp->ContextRecord | ||
1447 | lea 56(%rsi),%r11 # &disp->HandlerData | ||
1448 | lea 24(%rsi),%r12 # &disp->EstablisherFrame | ||
1449 | mov %r10,32(%rsp) # arg5 | ||
1450 | mov %r11,40(%rsp) # arg6 | ||
1451 | mov %r12,48(%rsp) # arg7 | ||
1452 | mov %rcx,56(%rsp) # arg8, (NULL) | ||
1453 | call *__imp_RtlVirtualUnwind(%rip) | ||
1454 | |||
1455 | mov \$1,%eax # ExceptionContinueSearch | ||
1456 | add \$64,%rsp | ||
1457 | popfq | ||
1458 | pop %r15 | ||
1459 | pop %r14 | ||
1460 | pop %r13 | ||
1461 | pop %r12 | ||
1462 | pop %rbp | ||
1463 | pop %rbx | ||
1464 | pop %rdi | ||
1465 | pop %rsi | ||
1466 | ret | ||
1467 | .size mod_exp_512_se_handler,.-mod_exp_512_se_handler | ||
1468 | |||
1469 | .section .pdata | ||
1470 | .align 4 | ||
1471 | .rva .LSEH_begin_mod_exp_512 | ||
1472 | .rva .LSEH_end_mod_exp_512 | ||
1473 | .rva .LSEH_info_mod_exp_512 | ||
1474 | |||
1475 | .section .xdata | ||
1476 | .align 8 | ||
1477 | .LSEH_info_mod_exp_512: | ||
1478 | .byte 9,0,0,0 | ||
1479 | .rva mod_exp_512_se_handler | ||
1480 | ___ | ||
1481 | } | ||
1482 | |||
1483 | sub reg_part { | ||
1484 | my ($reg,$conv)=@_; | ||
1485 | if ($reg =~ /%r[0-9]+/) { $reg .= $conv; } | ||
1486 | elsif ($conv eq "b") { $reg =~ s/%[er]([^x]+)x?/%$1l/; } | ||
1487 | elsif ($conv eq "w") { $reg =~ s/%[er](.+)/%$1/; } | ||
1488 | elsif ($conv eq "d") { $reg =~ s/%[er](.+)/%e$1/; } | ||
1489 | return $reg; | ||
1490 | } | ||
1491 | |||
1492 | $code =~ s/(%[a-z0-9]+)#([bwd])/reg_part($1,$2)/gem; | ||
1493 | $code =~ s/\`([^\`]*)\`/eval $1/gem; | ||
1494 | $code =~ s/(\(\+[^)]+\))/eval $1/gem; | ||
1495 | print $code; | ||
1496 | close STDOUT; | ||
diff --git a/src/lib/libcrypto/bn/asm/parisc-mont.pl b/src/lib/libcrypto/bn/asm/parisc-mont.pl new file mode 100644 index 0000000000..4a766a87fb --- /dev/null +++ b/src/lib/libcrypto/bn/asm/parisc-mont.pl | |||
@@ -0,0 +1,993 @@ | |||
1 | #!/usr/bin/env perl | ||
2 | |||
3 | # ==================================================================== | ||
4 | # Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL | ||
5 | # project. The module is, however, dual licensed under OpenSSL and | ||
6 | # CRYPTOGAMS licenses depending on where you obtain it. For further | ||
7 | # details see http://www.openssl.org/~appro/cryptogams/. | ||
8 | # ==================================================================== | ||
9 | |||
10 | # On PA-7100LC this module performs ~90-50% better, less for longer | ||
11 | # keys, than code generated by gcc 3.2 for PA-RISC 1.1. Latter means | ||
12 | # that compiler utilized xmpyu instruction to perform 32x32=64-bit | ||
13 | # multiplication, which in turn means that "baseline" performance was | ||
14 | # optimal in respect to instruction set capabilities. Fair comparison | ||
15 | # with vendor compiler is problematic, because OpenSSL doesn't define | ||
16 | # BN_LLONG [presumably] for historical reasons, which drives compiler | ||
17 | # toward 4 times 16x16=32-bit multiplicatons [plus complementary | ||
18 | # shifts and additions] instead. This means that you should observe | ||
19 | # several times improvement over code generated by vendor compiler | ||
20 | # for PA-RISC 1.1, but the "baseline" is far from optimal. The actual | ||
21 | # improvement coefficient was never collected on PA-7100LC, or any | ||
22 | # other 1.1 CPU, because I don't have access to such machine with | ||
23 | # vendor compiler. But to give you a taste, PA-RISC 1.1 code path | ||
24 | # reportedly outperformed code generated by cc +DA1.1 +O3 by factor | ||
25 | # of ~5x on PA-8600. | ||
26 | # | ||
27 | # On PA-RISC 2.0 it has to compete with pa-risc2[W].s, which is | ||
28 | # reportedly ~2x faster than vendor compiler generated code [according | ||
29 | # to comment in pa-risc2[W].s]. Here comes a catch. Execution core of | ||
30 | # this implementation is actually 32-bit one, in the sense that it | ||
31 | # operates on 32-bit values. But pa-risc2[W].s operates on arrays of | ||
32 | # 64-bit BN_LONGs... How do they interoperate then? No problem. This | ||
33 | # module picks halves of 64-bit values in reverse order and pretends | ||
34 | # they were 32-bit BN_LONGs. But can 32-bit core compete with "pure" | ||
35 | # 64-bit code such as pa-risc2[W].s then? Well, the thing is that | ||
36 | # 32x32=64-bit multiplication is the best even PA-RISC 2.0 can do, | ||
37 | # i.e. there is no "wider" multiplication like on most other 64-bit | ||
38 | # platforms. This means that even being effectively 32-bit, this | ||
39 | # implementation performs "64-bit" computational task in same amount | ||
40 | # of arithmetic operations, most notably multiplications. It requires | ||
41 | # more memory references, most notably to tp[num], but this doesn't | ||
42 | # seem to exhaust memory port capacity. And indeed, dedicated PA-RISC | ||
43 | # 2.0 code path, provides virtually same performance as pa-risc2[W].s: | ||
44 | # it's ~10% better for shortest key length and ~10% worse for longest | ||
45 | # one. | ||
46 | # | ||
47 | # In case it wasn't clear. The module has two distinct code paths: | ||
48 | # PA-RISC 1.1 and PA-RISC 2.0 ones. Latter features carry-free 64-bit | ||
49 | # additions and 64-bit integer loads, not to mention specific | ||
50 | # instruction scheduling. In 64-bit build naturally only 2.0 code path | ||
51 | # is assembled. In 32-bit application context both code paths are | ||
52 | # assembled, PA-RISC 2.0 CPU is detected at run-time and proper path | ||
53 | # is taken automatically. Also, in 32-bit build the module imposes | ||
54 | # couple of limitations: vector lengths has to be even and vector | ||
55 | # addresses has to be 64-bit aligned. Normally neither is a problem: | ||
56 | # most common key lengths are even and vectors are commonly malloc-ed, | ||
57 | # which ensures alignment. | ||
58 | # | ||
59 | # Special thanks to polarhome.com for providing HP-UX account on | ||
60 | # PA-RISC 1.1 machine, and to correspondent who chose to remain | ||
61 | # anonymous for testing the code on PA-RISC 2.0 machine. | ||
62 | |||
63 | $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1; | ||
64 | |||
65 | $flavour = shift; | ||
66 | $output = shift; | ||
67 | |||
68 | open STDOUT,">$output"; | ||
69 | |||
70 | if ($flavour =~ /64/) { | ||
71 | $LEVEL ="2.0W"; | ||
72 | $SIZE_T =8; | ||
73 | $FRAME_MARKER =80; | ||
74 | $SAVED_RP =16; | ||
75 | $PUSH ="std"; | ||
76 | $PUSHMA ="std,ma"; | ||
77 | $POP ="ldd"; | ||
78 | $POPMB ="ldd,mb"; | ||
79 | $BN_SZ =$SIZE_T; | ||
80 | } else { | ||
81 | $LEVEL ="1.1"; #$LEVEL.="\n\t.ALLOW\t2.0"; | ||
82 | $SIZE_T =4; | ||
83 | $FRAME_MARKER =48; | ||
84 | $SAVED_RP =20; | ||
85 | $PUSH ="stw"; | ||
86 | $PUSHMA ="stwm"; | ||
87 | $POP ="ldw"; | ||
88 | $POPMB ="ldwm"; | ||
89 | $BN_SZ =$SIZE_T; | ||
90 | if (open CONF,"<${dir}../../opensslconf.h") { | ||
91 | while(<CONF>) { | ||
92 | if (m/#\s*define\s+SIXTY_FOUR_BIT/) { | ||
93 | $BN_SZ=8; | ||
94 | $LEVEL="2.0"; | ||
95 | last; | ||
96 | } | ||
97 | } | ||
98 | close CONF; | ||
99 | } | ||
100 | } | ||
101 | |||
102 | $FRAME=8*$SIZE_T+$FRAME_MARKER; # 8 saved regs + frame marker | ||
103 | # [+ argument transfer] | ||
104 | $LOCALS=$FRAME-$FRAME_MARKER; | ||
105 | $FRAME+=32; # local variables | ||
106 | |||
107 | $tp="%r31"; | ||
108 | $ti1="%r29"; | ||
109 | $ti0="%r28"; | ||
110 | |||
111 | $rp="%r26"; | ||
112 | $ap="%r25"; | ||
113 | $bp="%r24"; | ||
114 | $np="%r23"; | ||
115 | $n0="%r22"; # passed through stack in 32-bit | ||
116 | $num="%r21"; # passed through stack in 32-bit | ||
117 | $idx="%r20"; | ||
118 | $arrsz="%r19"; | ||
119 | |||
120 | $nm1="%r7"; | ||
121 | $nm0="%r6"; | ||
122 | $ab1="%r5"; | ||
123 | $ab0="%r4"; | ||
124 | |||
125 | $fp="%r3"; | ||
126 | $hi1="%r2"; | ||
127 | $hi0="%r1"; | ||
128 | |||
129 | $xfer=$n0; # accomodates [-16..15] offset in fld[dw]s | ||
130 | |||
131 | $fm0="%fr4"; $fti=$fm0; | ||
132 | $fbi="%fr5L"; | ||
133 | $fn0="%fr5R"; | ||
134 | $fai="%fr6"; $fab0="%fr7"; $fab1="%fr8"; | ||
135 | $fni="%fr9"; $fnm0="%fr10"; $fnm1="%fr11"; | ||
136 | |||
137 | $code=<<___; | ||
138 | .LEVEL $LEVEL | ||
139 | .SPACE \$TEXT\$ | ||
140 | .SUBSPA \$CODE\$,QUAD=0,ALIGN=8,ACCESS=0x2C,CODE_ONLY | ||
141 | |||
142 | .EXPORT bn_mul_mont,ENTRY,ARGW0=GR,ARGW1=GR,ARGW2=GR,ARGW3=GR | ||
143 | .ALIGN 64 | ||
144 | bn_mul_mont | ||
145 | .PROC | ||
146 | .CALLINFO FRAME=`$FRAME-8*$SIZE_T`,NO_CALLS,SAVE_RP,SAVE_SP,ENTRY_GR=6 | ||
147 | .ENTRY | ||
148 | $PUSH %r2,-$SAVED_RP(%sp) ; standard prologue | ||
149 | $PUSHMA %r3,$FRAME(%sp) | ||
150 | $PUSH %r4,`-$FRAME+1*$SIZE_T`(%sp) | ||
151 | $PUSH %r5,`-$FRAME+2*$SIZE_T`(%sp) | ||
152 | $PUSH %r6,`-$FRAME+3*$SIZE_T`(%sp) | ||
153 | $PUSH %r7,`-$FRAME+4*$SIZE_T`(%sp) | ||
154 | $PUSH %r8,`-$FRAME+5*$SIZE_T`(%sp) | ||
155 | $PUSH %r9,`-$FRAME+6*$SIZE_T`(%sp) | ||
156 | $PUSH %r10,`-$FRAME+7*$SIZE_T`(%sp) | ||
157 | ldo -$FRAME(%sp),$fp | ||
158 | ___ | ||
159 | $code.=<<___ if ($SIZE_T==4); | ||
160 | ldw `-$FRAME_MARKER-4`($fp),$n0 | ||
161 | ldw `-$FRAME_MARKER-8`($fp),$num | ||
162 | nop | ||
163 | nop ; alignment | ||
164 | ___ | ||
165 | $code.=<<___ if ($BN_SZ==4); | ||
166 | comiclr,<= 6,$num,%r0 ; are vectors long enough? | ||
167 | b L\$abort | ||
168 | ldi 0,%r28 ; signal "unhandled" | ||
169 | add,ev %r0,$num,$num ; is $num even? | ||
170 | b L\$abort | ||
171 | nop | ||
172 | or $ap,$np,$ti1 | ||
173 | extru,= $ti1,31,3,%r0 ; are ap and np 64-bit aligned? | ||
174 | b L\$abort | ||
175 | nop | ||
176 | nop ; alignment | ||
177 | nop | ||
178 | |||
179 | fldws 0($n0),${fn0} | ||
180 | fldws,ma 4($bp),${fbi} ; bp[0] | ||
181 | ___ | ||
182 | $code.=<<___ if ($BN_SZ==8); | ||
183 | comib,> 3,$num,L\$abort ; are vectors long enough? | ||
184 | ldi 0,%r28 ; signal "unhandled" | ||
185 | addl $num,$num,$num ; I operate on 32-bit values | ||
186 | |||
187 | fldws 4($n0),${fn0} ; only low part of n0 | ||
188 | fldws 4($bp),${fbi} ; bp[0] in flipped word order | ||
189 | ___ | ||
190 | $code.=<<___; | ||
191 | fldds 0($ap),${fai} ; ap[0,1] | ||
192 | fldds 0($np),${fni} ; np[0,1] | ||
193 | |||
194 | sh2addl $num,%r0,$arrsz | ||
195 | ldi 31,$hi0 | ||
196 | ldo 36($arrsz),$hi1 ; space for tp[num+1] | ||
197 | andcm $hi1,$hi0,$hi1 ; align | ||
198 | addl $hi1,%sp,%sp | ||
199 | $PUSH $fp,-$SIZE_T(%sp) | ||
200 | |||
201 | ldo `$LOCALS+16`($fp),$xfer | ||
202 | ldo `$LOCALS+32+4`($fp),$tp | ||
203 | |||
204 | xmpyu ${fai}L,${fbi},${fab0} ; ap[0]*bp[0] | ||
205 | xmpyu ${fai}R,${fbi},${fab1} ; ap[1]*bp[0] | ||
206 | xmpyu ${fn0},${fab0}R,${fm0} | ||
207 | |||
208 | addl $arrsz,$ap,$ap ; point at the end | ||
209 | addl $arrsz,$np,$np | ||
210 | subi 0,$arrsz,$idx ; j=0 | ||
211 | ldo 8($idx),$idx ; j++++ | ||
212 | |||
213 | xmpyu ${fni}L,${fm0}R,${fnm0} ; np[0]*m | ||
214 | xmpyu ${fni}R,${fm0}R,${fnm1} ; np[1]*m | ||
215 | fstds ${fab0},-16($xfer) | ||
216 | fstds ${fnm0},-8($xfer) | ||
217 | fstds ${fab1},0($xfer) | ||
218 | fstds ${fnm1},8($xfer) | ||
219 | flddx $idx($ap),${fai} ; ap[2,3] | ||
220 | flddx $idx($np),${fni} ; np[2,3] | ||
221 | ___ | ||
222 | $code.=<<___ if ($BN_SZ==4); | ||
223 | mtctl $hi0,%cr11 ; $hi0 still holds 31 | ||
224 | extrd,u,*= $hi0,%sar,1,$hi0 ; executes on PA-RISC 1.0 | ||
225 | b L\$parisc11 | ||
226 | nop | ||
227 | ___ | ||
228 | $code.=<<___; # PA-RISC 2.0 code-path | ||
229 | xmpyu ${fai}L,${fbi},${fab0} ; ap[j]*bp[0] | ||
230 | xmpyu ${fni}L,${fm0}R,${fnm0} ; np[j]*m | ||
231 | ldd -16($xfer),$ab0 | ||
232 | fstds ${fab0},-16($xfer) | ||
233 | |||
234 | extrd,u $ab0,31,32,$hi0 | ||
235 | extrd,u $ab0,63,32,$ab0 | ||
236 | ldd -8($xfer),$nm0 | ||
237 | fstds ${fnm0},-8($xfer) | ||
238 | ldo 8($idx),$idx ; j++++ | ||
239 | addl $ab0,$nm0,$nm0 ; low part is discarded | ||
240 | extrd,u $nm0,31,32,$hi1 | ||
241 | |||
242 | L\$1st | ||
243 | xmpyu ${fai}R,${fbi},${fab1} ; ap[j+1]*bp[0] | ||
244 | xmpyu ${fni}R,${fm0}R,${fnm1} ; np[j+1]*m | ||
245 | ldd 0($xfer),$ab1 | ||
246 | fstds ${fab1},0($xfer) | ||
247 | addl $hi0,$ab1,$ab1 | ||
248 | extrd,u $ab1,31,32,$hi0 | ||
249 | ldd 8($xfer),$nm1 | ||
250 | fstds ${fnm1},8($xfer) | ||
251 | extrd,u $ab1,63,32,$ab1 | ||
252 | addl $hi1,$nm1,$nm1 | ||
253 | flddx $idx($ap),${fai} ; ap[j,j+1] | ||
254 | flddx $idx($np),${fni} ; np[j,j+1] | ||
255 | addl $ab1,$nm1,$nm1 | ||
256 | extrd,u $nm1,31,32,$hi1 | ||
257 | |||
258 | xmpyu ${fai}L,${fbi},${fab0} ; ap[j]*bp[0] | ||
259 | xmpyu ${fni}L,${fm0}R,${fnm0} ; np[j]*m | ||
260 | ldd -16($xfer),$ab0 | ||
261 | fstds ${fab0},-16($xfer) | ||
262 | addl $hi0,$ab0,$ab0 | ||
263 | extrd,u $ab0,31,32,$hi0 | ||
264 | ldd -8($xfer),$nm0 | ||
265 | fstds ${fnm0},-8($xfer) | ||
266 | extrd,u $ab0,63,32,$ab0 | ||
267 | addl $hi1,$nm0,$nm0 | ||
268 | stw $nm1,-4($tp) ; tp[j-1] | ||
269 | addl $ab0,$nm0,$nm0 | ||
270 | stw,ma $nm0,8($tp) ; tp[j-1] | ||
271 | addib,<> 8,$idx,L\$1st ; j++++ | ||
272 | extrd,u $nm0,31,32,$hi1 | ||
273 | |||
274 | xmpyu ${fai}R,${fbi},${fab1} ; ap[j]*bp[0] | ||
275 | xmpyu ${fni}R,${fm0}R,${fnm1} ; np[j]*m | ||
276 | ldd 0($xfer),$ab1 | ||
277 | fstds ${fab1},0($xfer) | ||
278 | addl $hi0,$ab1,$ab1 | ||
279 | extrd,u $ab1,31,32,$hi0 | ||
280 | ldd 8($xfer),$nm1 | ||
281 | fstds ${fnm1},8($xfer) | ||
282 | extrd,u $ab1,63,32,$ab1 | ||
283 | addl $hi1,$nm1,$nm1 | ||
284 | ldd -16($xfer),$ab0 | ||
285 | addl $ab1,$nm1,$nm1 | ||
286 | ldd -8($xfer),$nm0 | ||
287 | extrd,u $nm1,31,32,$hi1 | ||
288 | |||
289 | addl $hi0,$ab0,$ab0 | ||
290 | extrd,u $ab0,31,32,$hi0 | ||
291 | stw $nm1,-4($tp) ; tp[j-1] | ||
292 | extrd,u $ab0,63,32,$ab0 | ||
293 | addl $hi1,$nm0,$nm0 | ||
294 | ldd 0($xfer),$ab1 | ||
295 | addl $ab0,$nm0,$nm0 | ||
296 | ldd,mb 8($xfer),$nm1 | ||
297 | extrd,u $nm0,31,32,$hi1 | ||
298 | stw,ma $nm0,8($tp) ; tp[j-1] | ||
299 | |||
300 | ldo -1($num),$num ; i-- | ||
301 | subi 0,$arrsz,$idx ; j=0 | ||
302 | ___ | ||
303 | $code.=<<___ if ($BN_SZ==4); | ||
304 | fldws,ma 4($bp),${fbi} ; bp[1] | ||
305 | ___ | ||
306 | $code.=<<___ if ($BN_SZ==8); | ||
307 | fldws 0($bp),${fbi} ; bp[1] in flipped word order | ||
308 | ___ | ||
309 | $code.=<<___; | ||
310 | flddx $idx($ap),${fai} ; ap[0,1] | ||
311 | flddx $idx($np),${fni} ; np[0,1] | ||
312 | fldws 8($xfer),${fti}R ; tp[0] | ||
313 | addl $hi0,$ab1,$ab1 | ||
314 | extrd,u $ab1,31,32,$hi0 | ||
315 | extrd,u $ab1,63,32,$ab1 | ||
316 | ldo 8($idx),$idx ; j++++ | ||
317 | xmpyu ${fai}L,${fbi},${fab0} ; ap[0]*bp[1] | ||
318 | xmpyu ${fai}R,${fbi},${fab1} ; ap[1]*bp[1] | ||
319 | addl $hi1,$nm1,$nm1 | ||
320 | addl $ab1,$nm1,$nm1 | ||
321 | extrd,u $nm1,31,32,$hi1 | ||
322 | fstws,mb ${fab0}L,-8($xfer) ; save high part | ||
323 | stw $nm1,-4($tp) ; tp[j-1] | ||
324 | |||
325 | fcpy,sgl %fr0,${fti}L ; zero high part | ||
326 | fcpy,sgl %fr0,${fab0}L | ||
327 | addl $hi1,$hi0,$hi0 | ||
328 | extrd,u $hi0,31,32,$hi1 | ||
329 | fcnvxf,dbl,dbl ${fti},${fti} ; 32-bit unsigned int -> double | ||
330 | fcnvxf,dbl,dbl ${fab0},${fab0} | ||
331 | stw $hi0,0($tp) | ||
332 | stw $hi1,4($tp) | ||
333 | |||
334 | fadd,dbl ${fti},${fab0},${fab0} ; add tp[0] | ||
335 | fcnvfx,dbl,dbl ${fab0},${fab0} ; double -> 33-bit unsigned int | ||
336 | xmpyu ${fn0},${fab0}R,${fm0} | ||
337 | ldo `$LOCALS+32+4`($fp),$tp | ||
338 | L\$outer | ||
339 | xmpyu ${fni}L,${fm0}R,${fnm0} ; np[0]*m | ||
340 | xmpyu ${fni}R,${fm0}R,${fnm1} ; np[1]*m | ||
341 | fstds ${fab0},-16($xfer) ; 33-bit value | ||
342 | fstds ${fnm0},-8($xfer) | ||
343 | flddx $idx($ap),${fai} ; ap[2] | ||
344 | flddx $idx($np),${fni} ; np[2] | ||
345 | ldo 8($idx),$idx ; j++++ | ||
346 | ldd -16($xfer),$ab0 ; 33-bit value | ||
347 | ldd -8($xfer),$nm0 | ||
348 | ldw 0($xfer),$hi0 ; high part | ||
349 | |||
350 | xmpyu ${fai}L,${fbi},${fab0} ; ap[j]*bp[i] | ||
351 | xmpyu ${fni}L,${fm0}R,${fnm0} ; np[j]*m | ||
352 | extrd,u $ab0,31,32,$ti0 ; carry bit | ||
353 | extrd,u $ab0,63,32,$ab0 | ||
354 | fstds ${fab1},0($xfer) | ||
355 | addl $ti0,$hi0,$hi0 ; account carry bit | ||
356 | fstds ${fnm1},8($xfer) | ||
357 | addl $ab0,$nm0,$nm0 ; low part is discarded | ||
358 | ldw 0($tp),$ti1 ; tp[1] | ||
359 | extrd,u $nm0,31,32,$hi1 | ||
360 | fstds ${fab0},-16($xfer) | ||
361 | fstds ${fnm0},-8($xfer) | ||
362 | |||
363 | L\$inner | ||
364 | xmpyu ${fai}R,${fbi},${fab1} ; ap[j+1]*bp[i] | ||
365 | xmpyu ${fni}R,${fm0}R,${fnm1} ; np[j+1]*m | ||
366 | ldd 0($xfer),$ab1 | ||
367 | fstds ${fab1},0($xfer) | ||
368 | addl $hi0,$ti1,$ti1 | ||
369 | addl $ti1,$ab1,$ab1 | ||
370 | ldd 8($xfer),$nm1 | ||
371 | fstds ${fnm1},8($xfer) | ||
372 | extrd,u $ab1,31,32,$hi0 | ||
373 | extrd,u $ab1,63,32,$ab1 | ||
374 | flddx $idx($ap),${fai} ; ap[j,j+1] | ||
375 | flddx $idx($np),${fni} ; np[j,j+1] | ||
376 | addl $hi1,$nm1,$nm1 | ||
377 | addl $ab1,$nm1,$nm1 | ||
378 | ldw 4($tp),$ti0 ; tp[j] | ||
379 | stw $nm1,-4($tp) ; tp[j-1] | ||
380 | |||
381 | xmpyu ${fai}L,${fbi},${fab0} ; ap[j]*bp[i] | ||
382 | xmpyu ${fni}L,${fm0}R,${fnm0} ; np[j]*m | ||
383 | ldd -16($xfer),$ab0 | ||
384 | fstds ${fab0},-16($xfer) | ||
385 | addl $hi0,$ti0,$ti0 | ||
386 | addl $ti0,$ab0,$ab0 | ||
387 | ldd -8($xfer),$nm0 | ||
388 | fstds ${fnm0},-8($xfer) | ||
389 | extrd,u $ab0,31,32,$hi0 | ||
390 | extrd,u $nm1,31,32,$hi1 | ||
391 | ldw 8($tp),$ti1 ; tp[j] | ||
392 | extrd,u $ab0,63,32,$ab0 | ||
393 | addl $hi1,$nm0,$nm0 | ||
394 | addl $ab0,$nm0,$nm0 | ||
395 | stw,ma $nm0,8($tp) ; tp[j-1] | ||
396 | addib,<> 8,$idx,L\$inner ; j++++ | ||
397 | extrd,u $nm0,31,32,$hi1 | ||
398 | |||
399 | xmpyu ${fai}R,${fbi},${fab1} ; ap[j]*bp[i] | ||
400 | xmpyu ${fni}R,${fm0}R,${fnm1} ; np[j]*m | ||
401 | ldd 0($xfer),$ab1 | ||
402 | fstds ${fab1},0($xfer) | ||
403 | addl $hi0,$ti1,$ti1 | ||
404 | addl $ti1,$ab1,$ab1 | ||
405 | ldd 8($xfer),$nm1 | ||
406 | fstds ${fnm1},8($xfer) | ||
407 | extrd,u $ab1,31,32,$hi0 | ||
408 | extrd,u $ab1,63,32,$ab1 | ||
409 | ldw 4($tp),$ti0 ; tp[j] | ||
410 | addl $hi1,$nm1,$nm1 | ||
411 | addl $ab1,$nm1,$nm1 | ||
412 | ldd -16($xfer),$ab0 | ||
413 | ldd -8($xfer),$nm0 | ||
414 | extrd,u $nm1,31,32,$hi1 | ||
415 | |||
416 | addl $hi0,$ab0,$ab0 | ||
417 | addl $ti0,$ab0,$ab0 | ||
418 | stw $nm1,-4($tp) ; tp[j-1] | ||
419 | extrd,u $ab0,31,32,$hi0 | ||
420 | ldw 8($tp),$ti1 ; tp[j] | ||
421 | extrd,u $ab0,63,32,$ab0 | ||
422 | addl $hi1,$nm0,$nm0 | ||
423 | ldd 0($xfer),$ab1 | ||
424 | addl $ab0,$nm0,$nm0 | ||
425 | ldd,mb 8($xfer),$nm1 | ||
426 | extrd,u $nm0,31,32,$hi1 | ||
427 | stw,ma $nm0,8($tp) ; tp[j-1] | ||
428 | |||
429 | addib,= -1,$num,L\$outerdone ; i-- | ||
430 | subi 0,$arrsz,$idx ; j=0 | ||
431 | ___ | ||
432 | $code.=<<___ if ($BN_SZ==4); | ||
433 | fldws,ma 4($bp),${fbi} ; bp[i] | ||
434 | ___ | ||
435 | $code.=<<___ if ($BN_SZ==8); | ||
436 | ldi 12,$ti0 ; bp[i] in flipped word order | ||
437 | addl,ev %r0,$num,$num | ||
438 | ldi -4,$ti0 | ||
439 | addl $ti0,$bp,$bp | ||
440 | fldws 0($bp),${fbi} | ||
441 | ___ | ||
442 | $code.=<<___; | ||
443 | flddx $idx($ap),${fai} ; ap[0] | ||
444 | addl $hi0,$ab1,$ab1 | ||
445 | flddx $idx($np),${fni} ; np[0] | ||
446 | fldws 8($xfer),${fti}R ; tp[0] | ||
447 | addl $ti1,$ab1,$ab1 | ||
448 | extrd,u $ab1,31,32,$hi0 | ||
449 | extrd,u $ab1,63,32,$ab1 | ||
450 | |||
451 | ldo 8($idx),$idx ; j++++ | ||
452 | xmpyu ${fai}L,${fbi},${fab0} ; ap[0]*bp[i] | ||
453 | xmpyu ${fai}R,${fbi},${fab1} ; ap[1]*bp[i] | ||
454 | ldw 4($tp),$ti0 ; tp[j] | ||
455 | |||
456 | addl $hi1,$nm1,$nm1 | ||
457 | fstws,mb ${fab0}L,-8($xfer) ; save high part | ||
458 | addl $ab1,$nm1,$nm1 | ||
459 | extrd,u $nm1,31,32,$hi1 | ||
460 | fcpy,sgl %fr0,${fti}L ; zero high part | ||
461 | fcpy,sgl %fr0,${fab0}L | ||
462 | stw $nm1,-4($tp) ; tp[j-1] | ||
463 | |||
464 | fcnvxf,dbl,dbl ${fti},${fti} ; 32-bit unsigned int -> double | ||
465 | fcnvxf,dbl,dbl ${fab0},${fab0} | ||
466 | addl $hi1,$hi0,$hi0 | ||
467 | fadd,dbl ${fti},${fab0},${fab0} ; add tp[0] | ||
468 | addl $ti0,$hi0,$hi0 | ||
469 | extrd,u $hi0,31,32,$hi1 | ||
470 | fcnvfx,dbl,dbl ${fab0},${fab0} ; double -> 33-bit unsigned int | ||
471 | stw $hi0,0($tp) | ||
472 | stw $hi1,4($tp) | ||
473 | xmpyu ${fn0},${fab0}R,${fm0} | ||
474 | |||
475 | b L\$outer | ||
476 | ldo `$LOCALS+32+4`($fp),$tp | ||
477 | |||
478 | L\$outerdone | ||
479 | addl $hi0,$ab1,$ab1 | ||
480 | addl $ti1,$ab1,$ab1 | ||
481 | extrd,u $ab1,31,32,$hi0 | ||
482 | extrd,u $ab1,63,32,$ab1 | ||
483 | |||
484 | ldw 4($tp),$ti0 ; tp[j] | ||
485 | |||
486 | addl $hi1,$nm1,$nm1 | ||
487 | addl $ab1,$nm1,$nm1 | ||
488 | extrd,u $nm1,31,32,$hi1 | ||
489 | stw $nm1,-4($tp) ; tp[j-1] | ||
490 | |||
491 | addl $hi1,$hi0,$hi0 | ||
492 | addl $ti0,$hi0,$hi0 | ||
493 | extrd,u $hi0,31,32,$hi1 | ||
494 | stw $hi0,0($tp) | ||
495 | stw $hi1,4($tp) | ||
496 | |||
497 | ldo `$LOCALS+32`($fp),$tp | ||
498 | sub %r0,%r0,%r0 ; clear borrow | ||
499 | ___ | ||
500 | $code.=<<___ if ($BN_SZ==4); | ||
501 | ldws,ma 4($tp),$ti0 | ||
502 | extru,= $rp,31,3,%r0 ; is rp 64-bit aligned? | ||
503 | b L\$sub_pa11 | ||
504 | addl $tp,$arrsz,$tp | ||
505 | L\$sub | ||
506 | ldwx $idx($np),$hi0 | ||
507 | subb $ti0,$hi0,$hi1 | ||
508 | ldwx $idx($tp),$ti0 | ||
509 | addib,<> 4,$idx,L\$sub | ||
510 | stws,ma $hi1,4($rp) | ||
511 | |||
512 | subb $ti0,%r0,$hi1 | ||
513 | ldo -4($tp),$tp | ||
514 | ___ | ||
515 | $code.=<<___ if ($BN_SZ==8); | ||
516 | ldd,ma 8($tp),$ti0 | ||
517 | L\$sub | ||
518 | ldd $idx($np),$hi0 | ||
519 | shrpd $ti0,$ti0,32,$ti0 ; flip word order | ||
520 | std $ti0,-8($tp) ; save flipped value | ||
521 | sub,db $ti0,$hi0,$hi1 | ||
522 | ldd,ma 8($tp),$ti0 | ||
523 | addib,<> 8,$idx,L\$sub | ||
524 | std,ma $hi1,8($rp) | ||
525 | |||
526 | extrd,u $ti0,31,32,$ti0 ; carry in flipped word order | ||
527 | sub,db $ti0,%r0,$hi1 | ||
528 | ldo -8($tp),$tp | ||
529 | ___ | ||
530 | $code.=<<___; | ||
531 | and $tp,$hi1,$ap | ||
532 | andcm $rp,$hi1,$bp | ||
533 | or $ap,$bp,$np | ||
534 | |||
535 | sub $rp,$arrsz,$rp ; rewind rp | ||
536 | subi 0,$arrsz,$idx | ||
537 | ldo `$LOCALS+32`($fp),$tp | ||
538 | L\$copy | ||
539 | ldd $idx($np),$hi0 | ||
540 | std,ma %r0,8($tp) | ||
541 | addib,<> 8,$idx,.-8 ; L\$copy | ||
542 | std,ma $hi0,8($rp) | ||
543 | ___ | ||
544 | |||
545 | if ($BN_SZ==4) { # PA-RISC 1.1 code-path | ||
546 | $ablo=$ab0; | ||
547 | $abhi=$ab1; | ||
548 | $nmlo0=$nm0; | ||
549 | $nmhi0=$nm1; | ||
550 | $nmlo1="%r9"; | ||
551 | $nmhi1="%r8"; | ||
552 | |||
553 | $code.=<<___; | ||
554 | b L\$done | ||
555 | nop | ||
556 | |||
557 | .ALIGN 8 | ||
558 | L\$parisc11 | ||
559 | xmpyu ${fai}L,${fbi},${fab0} ; ap[j]*bp[0] | ||
560 | xmpyu ${fni}L,${fm0}R,${fnm0} ; np[j]*m | ||
561 | ldw -12($xfer),$ablo | ||
562 | ldw -16($xfer),$hi0 | ||
563 | ldw -4($xfer),$nmlo0 | ||
564 | ldw -8($xfer),$nmhi0 | ||
565 | fstds ${fab0},-16($xfer) | ||
566 | fstds ${fnm0},-8($xfer) | ||
567 | |||
568 | ldo 8($idx),$idx ; j++++ | ||
569 | add $ablo,$nmlo0,$nmlo0 ; discarded | ||
570 | addc %r0,$nmhi0,$hi1 | ||
571 | ldw 4($xfer),$ablo | ||
572 | ldw 0($xfer),$abhi | ||
573 | nop | ||
574 | |||
575 | L\$1st_pa11 | ||
576 | xmpyu ${fai}R,${fbi},${fab1} ; ap[j+1]*bp[0] | ||
577 | flddx $idx($ap),${fai} ; ap[j,j+1] | ||
578 | xmpyu ${fni}R,${fm0}R,${fnm1} ; np[j+1]*m | ||
579 | flddx $idx($np),${fni} ; np[j,j+1] | ||
580 | add $hi0,$ablo,$ablo | ||
581 | ldw 12($xfer),$nmlo1 | ||
582 | addc %r0,$abhi,$hi0 | ||
583 | ldw 8($xfer),$nmhi1 | ||
584 | add $ablo,$nmlo1,$nmlo1 | ||
585 | fstds ${fab1},0($xfer) | ||
586 | addc %r0,$nmhi1,$nmhi1 | ||
587 | fstds ${fnm1},8($xfer) | ||
588 | add $hi1,$nmlo1,$nmlo1 | ||
589 | ldw -12($xfer),$ablo | ||
590 | addc %r0,$nmhi1,$hi1 | ||
591 | ldw -16($xfer),$abhi | ||
592 | |||
593 | xmpyu ${fai}L,${fbi},${fab0} ; ap[j]*bp[0] | ||
594 | ldw -4($xfer),$nmlo0 | ||
595 | xmpyu ${fni}L,${fm0}R,${fnm0} ; np[j]*m | ||
596 | ldw -8($xfer),$nmhi0 | ||
597 | add $hi0,$ablo,$ablo | ||
598 | stw $nmlo1,-4($tp) ; tp[j-1] | ||
599 | addc %r0,$abhi,$hi0 | ||
600 | fstds ${fab0},-16($xfer) | ||
601 | add $ablo,$nmlo0,$nmlo0 | ||
602 | fstds ${fnm0},-8($xfer) | ||
603 | addc %r0,$nmhi0,$nmhi0 | ||
604 | ldw 0($xfer),$abhi | ||
605 | add $hi1,$nmlo0,$nmlo0 | ||
606 | ldw 4($xfer),$ablo | ||
607 | stws,ma $nmlo0,8($tp) ; tp[j-1] | ||
608 | addib,<> 8,$idx,L\$1st_pa11 ; j++++ | ||
609 | addc %r0,$nmhi0,$hi1 | ||
610 | |||
611 | ldw 8($xfer),$nmhi1 | ||
612 | ldw 12($xfer),$nmlo1 | ||
613 | xmpyu ${fai}R,${fbi},${fab1} ; ap[j]*bp[0] | ||
614 | xmpyu ${fni}R,${fm0}R,${fnm1} ; np[j]*m | ||
615 | add $hi0,$ablo,$ablo | ||
616 | fstds ${fab1},0($xfer) | ||
617 | addc %r0,$abhi,$hi0 | ||
618 | fstds ${fnm1},8($xfer) | ||
619 | add $ablo,$nmlo1,$nmlo1 | ||
620 | ldw -16($xfer),$abhi | ||
621 | addc %r0,$nmhi1,$nmhi1 | ||
622 | ldw -12($xfer),$ablo | ||
623 | add $hi1,$nmlo1,$nmlo1 | ||
624 | ldw -8($xfer),$nmhi0 | ||
625 | addc %r0,$nmhi1,$hi1 | ||
626 | ldw -4($xfer),$nmlo0 | ||
627 | |||
628 | add $hi0,$ablo,$ablo | ||
629 | stw $nmlo1,-4($tp) ; tp[j-1] | ||
630 | addc %r0,$abhi,$hi0 | ||
631 | ldw 0($xfer),$abhi | ||
632 | add $ablo,$nmlo0,$nmlo0 | ||
633 | ldw 4($xfer),$ablo | ||
634 | addc %r0,$nmhi0,$nmhi0 | ||
635 | ldws,mb 8($xfer),$nmhi1 | ||
636 | add $hi1,$nmlo0,$nmlo0 | ||
637 | ldw 4($xfer),$nmlo1 | ||
638 | addc %r0,$nmhi0,$hi1 | ||
639 | stws,ma $nmlo0,8($tp) ; tp[j-1] | ||
640 | |||
641 | ldo -1($num),$num ; i-- | ||
642 | subi 0,$arrsz,$idx ; j=0 | ||
643 | |||
644 | fldws,ma 4($bp),${fbi} ; bp[1] | ||
645 | flddx $idx($ap),${fai} ; ap[0,1] | ||
646 | flddx $idx($np),${fni} ; np[0,1] | ||
647 | fldws 8($xfer),${fti}R ; tp[0] | ||
648 | add $hi0,$ablo,$ablo | ||
649 | addc %r0,$abhi,$hi0 | ||
650 | ldo 8($idx),$idx ; j++++ | ||
651 | xmpyu ${fai}L,${fbi},${fab0} ; ap[0]*bp[1] | ||
652 | xmpyu ${fai}R,${fbi},${fab1} ; ap[1]*bp[1] | ||
653 | add $hi1,$nmlo1,$nmlo1 | ||
654 | addc %r0,$nmhi1,$nmhi1 | ||
655 | add $ablo,$nmlo1,$nmlo1 | ||
656 | addc %r0,$nmhi1,$hi1 | ||
657 | fstws,mb ${fab0}L,-8($xfer) ; save high part | ||
658 | stw $nmlo1,-4($tp) ; tp[j-1] | ||
659 | |||
660 | fcpy,sgl %fr0,${fti}L ; zero high part | ||
661 | fcpy,sgl %fr0,${fab0}L | ||
662 | add $hi1,$hi0,$hi0 | ||
663 | addc %r0,%r0,$hi1 | ||
664 | fcnvxf,dbl,dbl ${fti},${fti} ; 32-bit unsigned int -> double | ||
665 | fcnvxf,dbl,dbl ${fab0},${fab0} | ||
666 | stw $hi0,0($tp) | ||
667 | stw $hi1,4($tp) | ||
668 | |||
669 | fadd,dbl ${fti},${fab0},${fab0} ; add tp[0] | ||
670 | fcnvfx,dbl,dbl ${fab0},${fab0} ; double -> 33-bit unsigned int | ||
671 | xmpyu ${fn0},${fab0}R,${fm0} | ||
672 | ldo `$LOCALS+32+4`($fp),$tp | ||
673 | L\$outer_pa11 | ||
674 | xmpyu ${fni}L,${fm0}R,${fnm0} ; np[0]*m | ||
675 | xmpyu ${fni}R,${fm0}R,${fnm1} ; np[1]*m | ||
676 | fstds ${fab0},-16($xfer) ; 33-bit value | ||
677 | fstds ${fnm0},-8($xfer) | ||
678 | flddx $idx($ap),${fai} ; ap[2,3] | ||
679 | flddx $idx($np),${fni} ; np[2,3] | ||
680 | ldw -16($xfer),$abhi ; carry bit actually | ||
681 | ldo 8($idx),$idx ; j++++ | ||
682 | ldw -12($xfer),$ablo | ||
683 | ldw -8($xfer),$nmhi0 | ||
684 | ldw -4($xfer),$nmlo0 | ||
685 | ldw 0($xfer),$hi0 ; high part | ||
686 | |||
687 | xmpyu ${fai}L,${fbi},${fab0} ; ap[j]*bp[i] | ||
688 | xmpyu ${fni}L,${fm0}R,${fnm0} ; np[j]*m | ||
689 | fstds ${fab1},0($xfer) | ||
690 | addl $abhi,$hi0,$hi0 ; account carry bit | ||
691 | fstds ${fnm1},8($xfer) | ||
692 | add $ablo,$nmlo0,$nmlo0 ; discarded | ||
693 | ldw 0($tp),$ti1 ; tp[1] | ||
694 | addc %r0,$nmhi0,$hi1 | ||
695 | fstds ${fab0},-16($xfer) | ||
696 | fstds ${fnm0},-8($xfer) | ||
697 | ldw 4($xfer),$ablo | ||
698 | ldw 0($xfer),$abhi | ||
699 | |||
700 | L\$inner_pa11 | ||
701 | xmpyu ${fai}R,${fbi},${fab1} ; ap[j+1]*bp[i] | ||
702 | flddx $idx($ap),${fai} ; ap[j,j+1] | ||
703 | xmpyu ${fni}R,${fm0}R,${fnm1} ; np[j+1]*m | ||
704 | flddx $idx($np),${fni} ; np[j,j+1] | ||
705 | add $hi0,$ablo,$ablo | ||
706 | ldw 4($tp),$ti0 ; tp[j] | ||
707 | addc %r0,$abhi,$abhi | ||
708 | ldw 12($xfer),$nmlo1 | ||
709 | add $ti1,$ablo,$ablo | ||
710 | ldw 8($xfer),$nmhi1 | ||
711 | addc %r0,$abhi,$hi0 | ||
712 | fstds ${fab1},0($xfer) | ||
713 | add $ablo,$nmlo1,$nmlo1 | ||
714 | fstds ${fnm1},8($xfer) | ||
715 | addc %r0,$nmhi1,$nmhi1 | ||
716 | ldw -12($xfer),$ablo | ||
717 | add $hi1,$nmlo1,$nmlo1 | ||
718 | ldw -16($xfer),$abhi | ||
719 | addc %r0,$nmhi1,$hi1 | ||
720 | |||
721 | xmpyu ${fai}L,${fbi},${fab0} ; ap[j]*bp[i] | ||
722 | ldw 8($tp),$ti1 ; tp[j] | ||
723 | xmpyu ${fni}L,${fm0}R,${fnm0} ; np[j]*m | ||
724 | ldw -4($xfer),$nmlo0 | ||
725 | add $hi0,$ablo,$ablo | ||
726 | ldw -8($xfer),$nmhi0 | ||
727 | addc %r0,$abhi,$abhi | ||
728 | stw $nmlo1,-4($tp) ; tp[j-1] | ||
729 | add $ti0,$ablo,$ablo | ||
730 | fstds ${fab0},-16($xfer) | ||
731 | addc %r0,$abhi,$hi0 | ||
732 | fstds ${fnm0},-8($xfer) | ||
733 | add $ablo,$nmlo0,$nmlo0 | ||
734 | ldw 4($xfer),$ablo | ||
735 | addc %r0,$nmhi0,$nmhi0 | ||
736 | ldw 0($xfer),$abhi | ||
737 | add $hi1,$nmlo0,$nmlo0 | ||
738 | stws,ma $nmlo0,8($tp) ; tp[j-1] | ||
739 | addib,<> 8,$idx,L\$inner_pa11 ; j++++ | ||
740 | addc %r0,$nmhi0,$hi1 | ||
741 | |||
742 | xmpyu ${fai}R,${fbi},${fab1} ; ap[j]*bp[i] | ||
743 | ldw 12($xfer),$nmlo1 | ||
744 | xmpyu ${fni}R,${fm0}R,${fnm1} ; np[j]*m | ||
745 | ldw 8($xfer),$nmhi1 | ||
746 | add $hi0,$ablo,$ablo | ||
747 | ldw 4($tp),$ti0 ; tp[j] | ||
748 | addc %r0,$abhi,$abhi | ||
749 | fstds ${fab1},0($xfer) | ||
750 | add $ti1,$ablo,$ablo | ||
751 | fstds ${fnm1},8($xfer) | ||
752 | addc %r0,$abhi,$hi0 | ||
753 | ldw -16($xfer),$abhi | ||
754 | add $ablo,$nmlo1,$nmlo1 | ||
755 | ldw -12($xfer),$ablo | ||
756 | addc %r0,$nmhi1,$nmhi1 | ||
757 | ldw -8($xfer),$nmhi0 | ||
758 | add $hi1,$nmlo1,$nmlo1 | ||
759 | ldw -4($xfer),$nmlo0 | ||
760 | addc %r0,$nmhi1,$hi1 | ||
761 | |||
762 | add $hi0,$ablo,$ablo | ||
763 | stw $nmlo1,-4($tp) ; tp[j-1] | ||
764 | addc %r0,$abhi,$abhi | ||
765 | add $ti0,$ablo,$ablo | ||
766 | ldw 8($tp),$ti1 ; tp[j] | ||
767 | addc %r0,$abhi,$hi0 | ||
768 | ldw 0($xfer),$abhi | ||
769 | add $ablo,$nmlo0,$nmlo0 | ||
770 | ldw 4($xfer),$ablo | ||
771 | addc %r0,$nmhi0,$nmhi0 | ||
772 | ldws,mb 8($xfer),$nmhi1 | ||
773 | add $hi1,$nmlo0,$nmlo0 | ||
774 | ldw 4($xfer),$nmlo1 | ||
775 | addc %r0,$nmhi0,$hi1 | ||
776 | stws,ma $nmlo0,8($tp) ; tp[j-1] | ||
777 | |||
778 | addib,= -1,$num,L\$outerdone_pa11; i-- | ||
779 | subi 0,$arrsz,$idx ; j=0 | ||
780 | |||
781 | fldws,ma 4($bp),${fbi} ; bp[i] | ||
782 | flddx $idx($ap),${fai} ; ap[0] | ||
783 | add $hi0,$ablo,$ablo | ||
784 | addc %r0,$abhi,$abhi | ||
785 | flddx $idx($np),${fni} ; np[0] | ||
786 | fldws 8($xfer),${fti}R ; tp[0] | ||
787 | add $ti1,$ablo,$ablo | ||
788 | addc %r0,$abhi,$hi0 | ||
789 | |||
790 | ldo 8($idx),$idx ; j++++ | ||
791 | xmpyu ${fai}L,${fbi},${fab0} ; ap[0]*bp[i] | ||
792 | xmpyu ${fai}R,${fbi},${fab1} ; ap[1]*bp[i] | ||
793 | ldw 4($tp),$ti0 ; tp[j] | ||
794 | |||
795 | add $hi1,$nmlo1,$nmlo1 | ||
796 | addc %r0,$nmhi1,$nmhi1 | ||
797 | fstws,mb ${fab0}L,-8($xfer) ; save high part | ||
798 | add $ablo,$nmlo1,$nmlo1 | ||
799 | addc %r0,$nmhi1,$hi1 | ||
800 | fcpy,sgl %fr0,${fti}L ; zero high part | ||
801 | fcpy,sgl %fr0,${fab0}L | ||
802 | stw $nmlo1,-4($tp) ; tp[j-1] | ||
803 | |||
804 | fcnvxf,dbl,dbl ${fti},${fti} ; 32-bit unsigned int -> double | ||
805 | fcnvxf,dbl,dbl ${fab0},${fab0} | ||
806 | add $hi1,$hi0,$hi0 | ||
807 | addc %r0,%r0,$hi1 | ||
808 | fadd,dbl ${fti},${fab0},${fab0} ; add tp[0] | ||
809 | add $ti0,$hi0,$hi0 | ||
810 | addc %r0,$hi1,$hi1 | ||
811 | fcnvfx,dbl,dbl ${fab0},${fab0} ; double -> 33-bit unsigned int | ||
812 | stw $hi0,0($tp) | ||
813 | stw $hi1,4($tp) | ||
814 | xmpyu ${fn0},${fab0}R,${fm0} | ||
815 | |||
816 | b L\$outer_pa11 | ||
817 | ldo `$LOCALS+32+4`($fp),$tp | ||
818 | |||
819 | L\$outerdone_pa11 | ||
820 | add $hi0,$ablo,$ablo | ||
821 | addc %r0,$abhi,$abhi | ||
822 | add $ti1,$ablo,$ablo | ||
823 | addc %r0,$abhi,$hi0 | ||
824 | |||
825 | ldw 4($tp),$ti0 ; tp[j] | ||
826 | |||
827 | add $hi1,$nmlo1,$nmlo1 | ||
828 | addc %r0,$nmhi1,$nmhi1 | ||
829 | add $ablo,$nmlo1,$nmlo1 | ||
830 | addc %r0,$nmhi1,$hi1 | ||
831 | stw $nmlo1,-4($tp) ; tp[j-1] | ||
832 | |||
833 | add $hi1,$hi0,$hi0 | ||
834 | addc %r0,%r0,$hi1 | ||
835 | add $ti0,$hi0,$hi0 | ||
836 | addc %r0,$hi1,$hi1 | ||
837 | stw $hi0,0($tp) | ||
838 | stw $hi1,4($tp) | ||
839 | |||
840 | ldo `$LOCALS+32+4`($fp),$tp | ||
841 | sub %r0,%r0,%r0 ; clear borrow | ||
842 | ldw -4($tp),$ti0 | ||
843 | addl $tp,$arrsz,$tp | ||
844 | L\$sub_pa11 | ||
845 | ldwx $idx($np),$hi0 | ||
846 | subb $ti0,$hi0,$hi1 | ||
847 | ldwx $idx($tp),$ti0 | ||
848 | addib,<> 4,$idx,L\$sub_pa11 | ||
849 | stws,ma $hi1,4($rp) | ||
850 | |||
851 | subb $ti0,%r0,$hi1 | ||
852 | ldo -4($tp),$tp | ||
853 | and $tp,$hi1,$ap | ||
854 | andcm $rp,$hi1,$bp | ||
855 | or $ap,$bp,$np | ||
856 | |||
857 | sub $rp,$arrsz,$rp ; rewind rp | ||
858 | subi 0,$arrsz,$idx | ||
859 | ldo `$LOCALS+32`($fp),$tp | ||
860 | L\$copy_pa11 | ||
861 | ldwx $idx($np),$hi0 | ||
862 | stws,ma %r0,4($tp) | ||
863 | addib,<> 4,$idx,L\$copy_pa11 | ||
864 | stws,ma $hi0,4($rp) | ||
865 | |||
866 | nop ; alignment | ||
867 | L\$done | ||
868 | ___ | ||
869 | } | ||
870 | |||
871 | $code.=<<___; | ||
872 | ldi 1,%r28 ; signal "handled" | ||
873 | ldo $FRAME($fp),%sp ; destroy tp[num+1] | ||
874 | |||
875 | $POP `-$FRAME-$SAVED_RP`(%sp),%r2 ; standard epilogue | ||
876 | $POP `-$FRAME+1*$SIZE_T`(%sp),%r4 | ||
877 | $POP `-$FRAME+2*$SIZE_T`(%sp),%r5 | ||
878 | $POP `-$FRAME+3*$SIZE_T`(%sp),%r6 | ||
879 | $POP `-$FRAME+4*$SIZE_T`(%sp),%r7 | ||
880 | $POP `-$FRAME+5*$SIZE_T`(%sp),%r8 | ||
881 | $POP `-$FRAME+6*$SIZE_T`(%sp),%r9 | ||
882 | $POP `-$FRAME+7*$SIZE_T`(%sp),%r10 | ||
883 | L\$abort | ||
884 | bv (%r2) | ||
885 | .EXIT | ||
886 | $POPMB -$FRAME(%sp),%r3 | ||
887 | .PROCEND | ||
888 | .STRINGZ "Montgomery Multiplication for PA-RISC, CRYPTOGAMS by <appro\@openssl.org>" | ||
889 | ___ | ||
890 | |||
891 | # Explicitly encode PA-RISC 2.0 instructions used in this module, so | ||
892 | # that it can be compiled with .LEVEL 1.0. It should be noted that I | ||
893 | # wouldn't have to do this, if GNU assembler understood .ALLOW 2.0 | ||
894 | # directive... | ||
895 | |||
896 | my $ldd = sub { | ||
897 | my ($mod,$args) = @_; | ||
898 | my $orig = "ldd$mod\t$args"; | ||
899 | |||
900 | if ($args =~ /%r([0-9]+)\(%r([0-9]+)\),%r([0-9]+)/) # format 4 | ||
901 | { my $opcode=(0x03<<26)|($2<<21)|($1<<16)|(3<<6)|$3; | ||
902 | sprintf "\t.WORD\t0x%08x\t; %s",$opcode,$orig; | ||
903 | } | ||
904 | elsif ($args =~ /(\-?[0-9]+)\(%r([0-9]+)\),%r([0-9]+)/) # format 5 | ||
905 | { my $opcode=(0x03<<26)|($2<<21)|(1<<12)|(3<<6)|$3; | ||
906 | $opcode|=(($1&0xF)<<17)|(($1&0x10)<<12); # encode offset | ||
907 | $opcode|=(1<<5) if ($mod =~ /^,m/); | ||
908 | $opcode|=(1<<13) if ($mod =~ /^,mb/); | ||
909 | sprintf "\t.WORD\t0x%08x\t; %s",$opcode,$orig; | ||
910 | } | ||
911 | else { "\t".$orig; } | ||
912 | }; | ||
913 | |||
914 | my $std = sub { | ||
915 | my ($mod,$args) = @_; | ||
916 | my $orig = "std$mod\t$args"; | ||
917 | |||
918 | if ($args =~ /%r([0-9]+),(\-?[0-9]+)\(%r([0-9]+)\)/) # format 6 | ||
919 | { my $opcode=(0x03<<26)|($3<<21)|($1<<16)|(1<<12)|(0xB<<6); | ||
920 | $opcode|=(($2&0xF)<<1)|(($2&0x10)>>4); # encode offset | ||
921 | $opcode|=(1<<5) if ($mod =~ /^,m/); | ||
922 | $opcode|=(1<<13) if ($mod =~ /^,mb/); | ||
923 | sprintf "\t.WORD\t0x%08x\t; %s",$opcode,$orig; | ||
924 | } | ||
925 | else { "\t".$orig; } | ||
926 | }; | ||
927 | |||
928 | my $extrd = sub { | ||
929 | my ($mod,$args) = @_; | ||
930 | my $orig = "extrd$mod\t$args"; | ||
931 | |||
932 | # I only have ",u" completer, it's implicitly encoded... | ||
933 | if ($args =~ /%r([0-9]+),([0-9]+),([0-9]+),%r([0-9]+)/) # format 15 | ||
934 | { my $opcode=(0x36<<26)|($1<<21)|($4<<16); | ||
935 | my $len=32-$3; | ||
936 | $opcode |= (($2&0x20)<<6)|(($2&0x1f)<<5); # encode pos | ||
937 | $opcode |= (($len&0x20)<<7)|($len&0x1f); # encode len | ||
938 | sprintf "\t.WORD\t0x%08x\t; %s",$opcode,$orig; | ||
939 | } | ||
940 | elsif ($args =~ /%r([0-9]+),%sar,([0-9]+),%r([0-9]+)/) # format 12 | ||
941 | { my $opcode=(0x34<<26)|($1<<21)|($3<<16)|(2<<11)|(1<<9); | ||
942 | my $len=32-$2; | ||
943 | $opcode |= (($len&0x20)<<3)|($len&0x1f); # encode len | ||
944 | $opcode |= (1<<13) if ($mod =~ /,\**=/); | ||
945 | sprintf "\t.WORD\t0x%08x\t; %s",$opcode,$orig; | ||
946 | } | ||
947 | else { "\t".$orig; } | ||
948 | }; | ||
949 | |||
950 | my $shrpd = sub { | ||
951 | my ($mod,$args) = @_; | ||
952 | my $orig = "shrpd$mod\t$args"; | ||
953 | |||
954 | if ($args =~ /%r([0-9]+),%r([0-9]+),([0-9]+),%r([0-9]+)/) # format 14 | ||
955 | { my $opcode=(0x34<<26)|($2<<21)|($1<<16)|(1<<10)|$4; | ||
956 | my $cpos=63-$3; | ||
957 | $opcode |= (($cpos&0x20)<<6)|(($cpos&0x1f)<<5); # encode sa | ||
958 | sprintf "\t.WORD\t0x%08x\t; %s",$opcode,$orig; | ||
959 | } | ||
960 | else { "\t".$orig; } | ||
961 | }; | ||
962 | |||
963 | my $sub = sub { | ||
964 | my ($mod,$args) = @_; | ||
965 | my $orig = "sub$mod\t$args"; | ||
966 | |||
967 | if ($mod eq ",db" && $args =~ /%r([0-9]+),%r([0-9]+),%r([0-9]+)/) { | ||
968 | my $opcode=(0x02<<26)|($2<<21)|($1<<16)|$3; | ||
969 | $opcode|=(1<<10); # e1 | ||
970 | $opcode|=(1<<8); # e2 | ||
971 | $opcode|=(1<<5); # d | ||
972 | sprintf "\t.WORD\t0x%08x\t; %s",$opcode,$orig | ||
973 | } | ||
974 | else { "\t".$orig; } | ||
975 | }; | ||
976 | |||
977 | sub assemble { | ||
978 | my ($mnemonic,$mod,$args)=@_; | ||
979 | my $opcode = eval("\$$mnemonic"); | ||
980 | |||
981 | ref($opcode) eq 'CODE' ? &$opcode($mod,$args) : "\t$mnemonic$mod\t$args"; | ||
982 | } | ||
983 | |||
984 | foreach (split("\n",$code)) { | ||
985 | s/\`([^\`]*)\`/eval $1/ge; | ||
986 | # flip word order in 64-bit mode... | ||
987 | s/(xmpyu\s+)($fai|$fni)([LR])/$1.$2.($3 eq "L"?"R":"L")/e if ($BN_SZ==8); | ||
988 | # assemble 2.0 instructions in 32-bit mode... | ||
989 | s/^\s+([a-z]+)([\S]*)\s+([\S]*)/&assemble($1,$2,$3)/e if ($BN_SZ==4); | ||
990 | |||
991 | print $_,"\n"; | ||
992 | } | ||
993 | close STDOUT; | ||
diff --git a/src/lib/libcrypto/bn/asm/ppc-mont.pl b/src/lib/libcrypto/bn/asm/ppc-mont.pl index 7849eae959..f9b6992ccc 100644 --- a/src/lib/libcrypto/bn/asm/ppc-mont.pl +++ b/src/lib/libcrypto/bn/asm/ppc-mont.pl | |||
@@ -31,7 +31,6 @@ if ($flavour =~ /32/) { | |||
31 | $BNSZ= $BITS/8; | 31 | $BNSZ= $BITS/8; |
32 | $SIZE_T=4; | 32 | $SIZE_T=4; |
33 | $RZONE= 224; | 33 | $RZONE= 224; |
34 | $FRAME= $SIZE_T*16; | ||
35 | 34 | ||
36 | $LD= "lwz"; # load | 35 | $LD= "lwz"; # load |
37 | $LDU= "lwzu"; # load and update | 36 | $LDU= "lwzu"; # load and update |
@@ -51,7 +50,6 @@ if ($flavour =~ /32/) { | |||
51 | $BNSZ= $BITS/8; | 50 | $BNSZ= $BITS/8; |
52 | $SIZE_T=8; | 51 | $SIZE_T=8; |
53 | $RZONE= 288; | 52 | $RZONE= 288; |
54 | $FRAME= $SIZE_T*16; | ||
55 | 53 | ||
56 | # same as above, but 64-bit mnemonics... | 54 | # same as above, but 64-bit mnemonics... |
57 | $LD= "ld"; # load | 55 | $LD= "ld"; # load |
@@ -69,6 +67,9 @@ if ($flavour =~ /32/) { | |||
69 | $POP= $LD; | 67 | $POP= $LD; |
70 | } else { die "nonsense $flavour"; } | 68 | } else { die "nonsense $flavour"; } |
71 | 69 | ||
70 | $FRAME=8*$SIZE_T+$RZONE; | ||
71 | $LOCALS=8*$SIZE_T; | ||
72 | |||
72 | $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1; | 73 | $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1; |
73 | ( $xlate="${dir}ppc-xlate.pl" and -f $xlate ) or | 74 | ( $xlate="${dir}ppc-xlate.pl" and -f $xlate ) or |
74 | ( $xlate="${dir}../../perlasm/ppc-xlate.pl" and -f $xlate) or | 75 | ( $xlate="${dir}../../perlasm/ppc-xlate.pl" and -f $xlate) or |
@@ -89,18 +90,18 @@ $aj="r10"; | |||
89 | $nj="r11"; | 90 | $nj="r11"; |
90 | $tj="r12"; | 91 | $tj="r12"; |
91 | # non-volatile registers | 92 | # non-volatile registers |
92 | $i="r14"; | 93 | $i="r20"; |
93 | $j="r15"; | 94 | $j="r21"; |
94 | $tp="r16"; | 95 | $tp="r22"; |
95 | $m0="r17"; | 96 | $m0="r23"; |
96 | $m1="r18"; | 97 | $m1="r24"; |
97 | $lo0="r19"; | 98 | $lo0="r25"; |
98 | $hi0="r20"; | 99 | $hi0="r26"; |
99 | $lo1="r21"; | 100 | $lo1="r27"; |
100 | $hi1="r22"; | 101 | $hi1="r28"; |
101 | $alo="r23"; | 102 | $alo="r29"; |
102 | $ahi="r24"; | 103 | $ahi="r30"; |
103 | $nlo="r25"; | 104 | $nlo="r31"; |
104 | # | 105 | # |
105 | $nhi="r0"; | 106 | $nhi="r0"; |
106 | 107 | ||
@@ -108,42 +109,48 @@ $code=<<___; | |||
108 | .machine "any" | 109 | .machine "any" |
109 | .text | 110 | .text |
110 | 111 | ||
111 | .globl .bn_mul_mont | 112 | .globl .bn_mul_mont_int |
112 | .align 4 | 113 | .align 4 |
113 | .bn_mul_mont: | 114 | .bn_mul_mont_int: |
114 | cmpwi $num,4 | 115 | cmpwi $num,4 |
115 | mr $rp,r3 ; $rp is reassigned | 116 | mr $rp,r3 ; $rp is reassigned |
116 | li r3,0 | 117 | li r3,0 |
117 | bltlr | 118 | bltlr |
118 | 119 | ___ | |
120 | $code.=<<___ if ($BNSZ==4); | ||
121 | cmpwi $num,32 ; longer key performance is not better | ||
122 | bgelr | ||
123 | ___ | ||
124 | $code.=<<___; | ||
119 | slwi $num,$num,`log($BNSZ)/log(2)` | 125 | slwi $num,$num,`log($BNSZ)/log(2)` |
120 | li $tj,-4096 | 126 | li $tj,-4096 |
121 | addi $ovf,$num,`$FRAME+$RZONE` | 127 | addi $ovf,$num,$FRAME |
122 | subf $ovf,$ovf,$sp ; $sp-$ovf | 128 | subf $ovf,$ovf,$sp ; $sp-$ovf |
123 | and $ovf,$ovf,$tj ; minimize TLB usage | 129 | and $ovf,$ovf,$tj ; minimize TLB usage |
124 | subf $ovf,$sp,$ovf ; $ovf-$sp | 130 | subf $ovf,$sp,$ovf ; $ovf-$sp |
131 | mr $tj,$sp | ||
125 | srwi $num,$num,`log($BNSZ)/log(2)` | 132 | srwi $num,$num,`log($BNSZ)/log(2)` |
126 | $STUX $sp,$sp,$ovf | 133 | $STUX $sp,$sp,$ovf |
127 | 134 | ||
128 | $PUSH r14,`4*$SIZE_T`($sp) | 135 | $PUSH r20,`-12*$SIZE_T`($tj) |
129 | $PUSH r15,`5*$SIZE_T`($sp) | 136 | $PUSH r21,`-11*$SIZE_T`($tj) |
130 | $PUSH r16,`6*$SIZE_T`($sp) | 137 | $PUSH r22,`-10*$SIZE_T`($tj) |
131 | $PUSH r17,`7*$SIZE_T`($sp) | 138 | $PUSH r23,`-9*$SIZE_T`($tj) |
132 | $PUSH r18,`8*$SIZE_T`($sp) | 139 | $PUSH r24,`-8*$SIZE_T`($tj) |
133 | $PUSH r19,`9*$SIZE_T`($sp) | 140 | $PUSH r25,`-7*$SIZE_T`($tj) |
134 | $PUSH r20,`10*$SIZE_T`($sp) | 141 | $PUSH r26,`-6*$SIZE_T`($tj) |
135 | $PUSH r21,`11*$SIZE_T`($sp) | 142 | $PUSH r27,`-5*$SIZE_T`($tj) |
136 | $PUSH r22,`12*$SIZE_T`($sp) | 143 | $PUSH r28,`-4*$SIZE_T`($tj) |
137 | $PUSH r23,`13*$SIZE_T`($sp) | 144 | $PUSH r29,`-3*$SIZE_T`($tj) |
138 | $PUSH r24,`14*$SIZE_T`($sp) | 145 | $PUSH r30,`-2*$SIZE_T`($tj) |
139 | $PUSH r25,`15*$SIZE_T`($sp) | 146 | $PUSH r31,`-1*$SIZE_T`($tj) |
140 | 147 | ||
141 | $LD $n0,0($n0) ; pull n0[0] value | 148 | $LD $n0,0($n0) ; pull n0[0] value |
142 | addi $num,$num,-2 ; adjust $num for counter register | 149 | addi $num,$num,-2 ; adjust $num for counter register |
143 | 150 | ||
144 | $LD $m0,0($bp) ; m0=bp[0] | 151 | $LD $m0,0($bp) ; m0=bp[0] |
145 | $LD $aj,0($ap) ; ap[0] | 152 | $LD $aj,0($ap) ; ap[0] |
146 | addi $tp,$sp,$FRAME | 153 | addi $tp,$sp,$LOCALS |
147 | $UMULL $lo0,$aj,$m0 ; ap[0]*bp[0] | 154 | $UMULL $lo0,$aj,$m0 ; ap[0]*bp[0] |
148 | $UMULH $hi0,$aj,$m0 | 155 | $UMULH $hi0,$aj,$m0 |
149 | 156 | ||
@@ -205,8 +212,8 @@ L1st: | |||
205 | Louter: | 212 | Louter: |
206 | $LDX $m0,$bp,$i ; m0=bp[i] | 213 | $LDX $m0,$bp,$i ; m0=bp[i] |
207 | $LD $aj,0($ap) ; ap[0] | 214 | $LD $aj,0($ap) ; ap[0] |
208 | addi $tp,$sp,$FRAME | 215 | addi $tp,$sp,$LOCALS |
209 | $LD $tj,$FRAME($sp) ; tp[0] | 216 | $LD $tj,$LOCALS($sp); tp[0] |
210 | $UMULL $lo0,$aj,$m0 ; ap[0]*bp[i] | 217 | $UMULL $lo0,$aj,$m0 ; ap[0]*bp[i] |
211 | $UMULH $hi0,$aj,$m0 | 218 | $UMULH $hi0,$aj,$m0 |
212 | $LD $aj,$BNSZ($ap) ; ap[1] | 219 | $LD $aj,$BNSZ($ap) ; ap[1] |
@@ -273,7 +280,7 @@ Linner: | |||
273 | 280 | ||
274 | addi $num,$num,2 ; restore $num | 281 | addi $num,$num,2 ; restore $num |
275 | subfc $j,$j,$j ; j=0 and "clear" XER[CA] | 282 | subfc $j,$j,$j ; j=0 and "clear" XER[CA] |
276 | addi $tp,$sp,$FRAME | 283 | addi $tp,$sp,$LOCALS |
277 | mtctr $num | 284 | mtctr $num |
278 | 285 | ||
279 | .align 4 | 286 | .align 4 |
@@ -299,23 +306,27 @@ Lcopy: ; copy or in-place refresh | |||
299 | addi $j,$j,$BNSZ | 306 | addi $j,$j,$BNSZ |
300 | bdnz- Lcopy | 307 | bdnz- Lcopy |
301 | 308 | ||
302 | $POP r14,`4*$SIZE_T`($sp) | 309 | $POP $tj,0($sp) |
303 | $POP r15,`5*$SIZE_T`($sp) | ||
304 | $POP r16,`6*$SIZE_T`($sp) | ||
305 | $POP r17,`7*$SIZE_T`($sp) | ||
306 | $POP r18,`8*$SIZE_T`($sp) | ||
307 | $POP r19,`9*$SIZE_T`($sp) | ||
308 | $POP r20,`10*$SIZE_T`($sp) | ||
309 | $POP r21,`11*$SIZE_T`($sp) | ||
310 | $POP r22,`12*$SIZE_T`($sp) | ||
311 | $POP r23,`13*$SIZE_T`($sp) | ||
312 | $POP r24,`14*$SIZE_T`($sp) | ||
313 | $POP r25,`15*$SIZE_T`($sp) | ||
314 | $POP $sp,0($sp) | ||
315 | li r3,1 | 310 | li r3,1 |
311 | $POP r20,`-12*$SIZE_T`($tj) | ||
312 | $POP r21,`-11*$SIZE_T`($tj) | ||
313 | $POP r22,`-10*$SIZE_T`($tj) | ||
314 | $POP r23,`-9*$SIZE_T`($tj) | ||
315 | $POP r24,`-8*$SIZE_T`($tj) | ||
316 | $POP r25,`-7*$SIZE_T`($tj) | ||
317 | $POP r26,`-6*$SIZE_T`($tj) | ||
318 | $POP r27,`-5*$SIZE_T`($tj) | ||
319 | $POP r28,`-4*$SIZE_T`($tj) | ||
320 | $POP r29,`-3*$SIZE_T`($tj) | ||
321 | $POP r30,`-2*$SIZE_T`($tj) | ||
322 | $POP r31,`-1*$SIZE_T`($tj) | ||
323 | mr $sp,$tj | ||
316 | blr | 324 | blr |
317 | .long 0 | 325 | .long 0 |
318 | .asciz "Montgomery Multiplication for PPC, CRYPTOGAMS by <appro\@fy.chalmers.se>" | 326 | .byte 0,12,4,0,0x80,12,6,0 |
327 | .long 0 | ||
328 | |||
329 | .asciz "Montgomery Multiplication for PPC, CRYPTOGAMS by <appro\@openssl.org>" | ||
319 | ___ | 330 | ___ |
320 | 331 | ||
321 | $code =~ s/\`([^\`]*)\`/eval $1/gem; | 332 | $code =~ s/\`([^\`]*)\`/eval $1/gem; |
diff --git a/src/lib/libcrypto/bn/asm/ppc.pl b/src/lib/libcrypto/bn/asm/ppc.pl index f4093177e6..1249ce2299 100644 --- a/src/lib/libcrypto/bn/asm/ppc.pl +++ b/src/lib/libcrypto/bn/asm/ppc.pl | |||
@@ -389,7 +389,9 @@ $data=<<EOF; | |||
389 | $ST r9,`6*$BNSZ`(r3) #r[6]=c1 | 389 | $ST r9,`6*$BNSZ`(r3) #r[6]=c1 |
390 | $ST r10,`7*$BNSZ`(r3) #r[7]=c2 | 390 | $ST r10,`7*$BNSZ`(r3) #r[7]=c2 |
391 | blr | 391 | blr |
392 | .long 0x00000000 | 392 | .long 0 |
393 | .byte 0,12,0x14,0,0,0,2,0 | ||
394 | .long 0 | ||
393 | 395 | ||
394 | # | 396 | # |
395 | # NOTE: The following label name should be changed to | 397 | # NOTE: The following label name should be changed to |
@@ -814,8 +816,9 @@ $data=<<EOF; | |||
814 | 816 | ||
815 | 817 | ||
816 | blr | 818 | blr |
817 | 819 | .long 0 | |
818 | .long 0x00000000 | 820 | .byte 0,12,0x14,0,0,0,2,0 |
821 | .long 0 | ||
819 | 822 | ||
820 | # | 823 | # |
821 | # NOTE: The following label name should be changed to | 824 | # NOTE: The following label name should be changed to |
@@ -966,7 +969,9 @@ $data=<<EOF; | |||
966 | $ST r10,`6*$BNSZ`(r3) #r[6]=c1 | 969 | $ST r10,`6*$BNSZ`(r3) #r[6]=c1 |
967 | $ST r11,`7*$BNSZ`(r3) #r[7]=c2 | 970 | $ST r11,`7*$BNSZ`(r3) #r[7]=c2 |
968 | blr | 971 | blr |
969 | .long 0x00000000 | 972 | .long 0 |
973 | .byte 0,12,0x14,0,0,0,3,0 | ||
974 | .long 0 | ||
970 | 975 | ||
971 | # | 976 | # |
972 | # NOTE: The following label name should be changed to | 977 | # NOTE: The following label name should be changed to |
@@ -1502,7 +1507,9 @@ $data=<<EOF; | |||
1502 | $ST r12,`14*$BNSZ`(r3) #r[14]=c3; | 1507 | $ST r12,`14*$BNSZ`(r3) #r[14]=c3; |
1503 | $ST r10,`15*$BNSZ`(r3) #r[15]=c1; | 1508 | $ST r10,`15*$BNSZ`(r3) #r[15]=c1; |
1504 | blr | 1509 | blr |
1505 | .long 0x00000000 | 1510 | .long 0 |
1511 | .byte 0,12,0x14,0,0,0,3,0 | ||
1512 | .long 0 | ||
1506 | 1513 | ||
1507 | # | 1514 | # |
1508 | # NOTE: The following label name should be changed to | 1515 | # NOTE: The following label name should be changed to |
@@ -1550,8 +1557,9 @@ Lppcasm_sub_adios: | |||
1550 | subfze r3,r0 # if carry bit is set then r3 = 0 else -1 | 1557 | subfze r3,r0 # if carry bit is set then r3 = 0 else -1 |
1551 | andi. r3,r3,1 # keep only last bit. | 1558 | andi. r3,r3,1 # keep only last bit. |
1552 | blr | 1559 | blr |
1553 | .long 0x00000000 | 1560 | .long 0 |
1554 | 1561 | .byte 0,12,0x14,0,0,0,4,0 | |
1562 | .long 0 | ||
1555 | 1563 | ||
1556 | # | 1564 | # |
1557 | # NOTE: The following label name should be changed to | 1565 | # NOTE: The following label name should be changed to |
@@ -1594,7 +1602,9 @@ Lppcasm_add_mainloop: | |||
1594 | Lppcasm_add_adios: | 1602 | Lppcasm_add_adios: |
1595 | addze r3,r0 #return carry bit. | 1603 | addze r3,r0 #return carry bit. |
1596 | blr | 1604 | blr |
1597 | .long 0x00000000 | 1605 | .long 0 |
1606 | .byte 0,12,0x14,0,0,0,4,0 | ||
1607 | .long 0 | ||
1598 | 1608 | ||
1599 | # | 1609 | # |
1600 | # NOTE: The following label name should be changed to | 1610 | # NOTE: The following label name should be changed to |
@@ -1707,7 +1717,9 @@ Lppcasm_div8: | |||
1707 | Lppcasm_div9: | 1717 | Lppcasm_div9: |
1708 | or r3,r8,r0 | 1718 | or r3,r8,r0 |
1709 | blr | 1719 | blr |
1710 | .long 0x00000000 | 1720 | .long 0 |
1721 | .byte 0,12,0x14,0,0,0,3,0 | ||
1722 | .long 0 | ||
1711 | 1723 | ||
1712 | # | 1724 | # |
1713 | # NOTE: The following label name should be changed to | 1725 | # NOTE: The following label name should be changed to |
@@ -1746,8 +1758,9 @@ Lppcasm_sqr_mainloop: | |||
1746 | bdnz- Lppcasm_sqr_mainloop | 1758 | bdnz- Lppcasm_sqr_mainloop |
1747 | Lppcasm_sqr_adios: | 1759 | Lppcasm_sqr_adios: |
1748 | blr | 1760 | blr |
1749 | .long 0x00000000 | 1761 | .long 0 |
1750 | 1762 | .byte 0,12,0x14,0,0,0,3,0 | |
1763 | .long 0 | ||
1751 | 1764 | ||
1752 | # | 1765 | # |
1753 | # NOTE: The following label name should be changed to | 1766 | # NOTE: The following label name should be changed to |
@@ -1850,7 +1863,9 @@ Lppcasm_mw_REM: | |||
1850 | Lppcasm_mw_OVER: | 1863 | Lppcasm_mw_OVER: |
1851 | addi r3,r12,0 | 1864 | addi r3,r12,0 |
1852 | blr | 1865 | blr |
1853 | .long 0x00000000 | 1866 | .long 0 |
1867 | .byte 0,12,0x14,0,0,0,4,0 | ||
1868 | .long 0 | ||
1854 | 1869 | ||
1855 | # | 1870 | # |
1856 | # NOTE: The following label name should be changed to | 1871 | # NOTE: The following label name should be changed to |
@@ -1973,7 +1988,9 @@ Lppcasm_maw_leftover: | |||
1973 | Lppcasm_maw_adios: | 1988 | Lppcasm_maw_adios: |
1974 | addi r3,r12,0 | 1989 | addi r3,r12,0 |
1975 | blr | 1990 | blr |
1976 | .long 0x00000000 | 1991 | .long 0 |
1992 | .byte 0,12,0x14,0,0,0,4,0 | ||
1993 | .long 0 | ||
1977 | .align 4 | 1994 | .align 4 |
1978 | EOF | 1995 | EOF |
1979 | $data =~ s/\`([^\`]*)\`/eval $1/gem; | 1996 | $data =~ s/\`([^\`]*)\`/eval $1/gem; |
diff --git a/src/lib/libcrypto/bn/asm/ppc64-mont.pl b/src/lib/libcrypto/bn/asm/ppc64-mont.pl index 3449b35855..a14e769ad0 100644 --- a/src/lib/libcrypto/bn/asm/ppc64-mont.pl +++ b/src/lib/libcrypto/bn/asm/ppc64-mont.pl | |||
@@ -45,23 +45,40 @@ | |||
45 | # on 1.8GHz PPC970, it's only 5-55% faster. Still far from impressive | 45 | # on 1.8GHz PPC970, it's only 5-55% faster. Still far from impressive |
46 | # in absolute terms, but it's apparently the way Power 6 is... | 46 | # in absolute terms, but it's apparently the way Power 6 is... |
47 | 47 | ||
48 | # December 2009 | ||
49 | |||
50 | # Adapted for 32-bit build this module delivers 25-120%, yes, more | ||
51 | # than *twice* for longer keys, performance improvement over 32-bit | ||
52 | # ppc-mont.pl on 1.8GHz PPC970. However! This implementation utilizes | ||
53 | # even 64-bit integer operations and the trouble is that most PPC | ||
54 | # operating systems don't preserve upper halves of general purpose | ||
55 | # registers upon 32-bit signal delivery. They do preserve them upon | ||
56 | # context switch, but not signalling:-( This means that asynchronous | ||
57 | # signals have to be blocked upon entry to this subroutine. Signal | ||
58 | # masking (and of course complementary unmasking) has quite an impact | ||
59 | # on performance, naturally larger for shorter keys. It's so severe | ||
60 | # that 512-bit key performance can be as low as 1/3 of expected one. | ||
61 | # This is why this routine can be engaged for longer key operations | ||
62 | # only on these OSes, see crypto/ppccap.c for further details. MacOS X | ||
63 | # is an exception from this and doesn't require signal masking, and | ||
64 | # that's where above improvement coefficients were collected. For | ||
65 | # others alternative would be to break dependence on upper halves of | ||
66 | # GPRs by sticking to 32-bit integer operations... | ||
67 | |||
48 | $flavour = shift; | 68 | $flavour = shift; |
49 | 69 | ||
50 | if ($flavour =~ /32/) { | 70 | if ($flavour =~ /32/) { |
51 | $SIZE_T=4; | 71 | $SIZE_T=4; |
52 | $RZONE= 224; | 72 | $RZONE= 224; |
53 | $FRAME= $SIZE_T*12+8*12; | 73 | $fname= "bn_mul_mont_fpu64"; |
54 | $fname= "bn_mul_mont_ppc64"; | ||
55 | 74 | ||
56 | $STUX= "stwux"; # store indexed and update | 75 | $STUX= "stwux"; # store indexed and update |
57 | $PUSH= "stw"; | 76 | $PUSH= "stw"; |
58 | $POP= "lwz"; | 77 | $POP= "lwz"; |
59 | die "not implemented yet"; | ||
60 | } elsif ($flavour =~ /64/) { | 78 | } elsif ($flavour =~ /64/) { |
61 | $SIZE_T=8; | 79 | $SIZE_T=8; |
62 | $RZONE= 288; | 80 | $RZONE= 288; |
63 | $FRAME= $SIZE_T*12+8*12; | 81 | $fname= "bn_mul_mont_fpu64"; |
64 | $fname= "bn_mul_mont"; | ||
65 | 82 | ||
66 | # same as above, but 64-bit mnemonics... | 83 | # same as above, but 64-bit mnemonics... |
67 | $STUX= "stdux"; # store indexed and update | 84 | $STUX= "stdux"; # store indexed and update |
@@ -76,7 +93,7 @@ die "can't locate ppc-xlate.pl"; | |||
76 | 93 | ||
77 | open STDOUT,"| $^X $xlate $flavour ".shift || die "can't call $xlate: $!"; | 94 | open STDOUT,"| $^X $xlate $flavour ".shift || die "can't call $xlate: $!"; |
78 | 95 | ||
79 | $FRAME=($FRAME+63)&~63; | 96 | $FRAME=64; # padded frame header |
80 | $TRANSFER=16*8; | 97 | $TRANSFER=16*8; |
81 | 98 | ||
82 | $carry="r0"; | 99 | $carry="r0"; |
@@ -93,16 +110,16 @@ $tp="r10"; | |||
93 | $j="r11"; | 110 | $j="r11"; |
94 | $i="r12"; | 111 | $i="r12"; |
95 | # non-volatile registers | 112 | # non-volatile registers |
96 | $nap_d="r14"; # interleaved ap and np in double format | 113 | $nap_d="r22"; # interleaved ap and np in double format |
97 | $a0="r15"; # ap[0] | 114 | $a0="r23"; # ap[0] |
98 | $t0="r16"; # temporary registers | 115 | $t0="r24"; # temporary registers |
99 | $t1="r17"; | 116 | $t1="r25"; |
100 | $t2="r18"; | 117 | $t2="r26"; |
101 | $t3="r19"; | 118 | $t3="r27"; |
102 | $t4="r20"; | 119 | $t4="r28"; |
103 | $t5="r21"; | 120 | $t5="r29"; |
104 | $t6="r22"; | 121 | $t6="r30"; |
105 | $t7="r23"; | 122 | $t7="r31"; |
106 | 123 | ||
107 | # PPC offers enough register bank capacity to unroll inner loops twice | 124 | # PPC offers enough register bank capacity to unroll inner loops twice |
108 | # | 125 | # |
@@ -132,28 +149,17 @@ $ba="f0"; $bb="f1"; $bc="f2"; $bd="f3"; | |||
132 | $na="f4"; $nb="f5"; $nc="f6"; $nd="f7"; | 149 | $na="f4"; $nb="f5"; $nc="f6"; $nd="f7"; |
133 | $dota="f8"; $dotb="f9"; | 150 | $dota="f8"; $dotb="f9"; |
134 | $A0="f10"; $A1="f11"; $A2="f12"; $A3="f13"; | 151 | $A0="f10"; $A1="f11"; $A2="f12"; $A3="f13"; |
135 | $N0="f14"; $N1="f15"; $N2="f16"; $N3="f17"; | 152 | $N0="f20"; $N1="f21"; $N2="f22"; $N3="f23"; |
136 | $T0a="f18"; $T0b="f19"; | 153 | $T0a="f24"; $T0b="f25"; |
137 | $T1a="f20"; $T1b="f21"; | 154 | $T1a="f26"; $T1b="f27"; |
138 | $T2a="f22"; $T2b="f23"; | 155 | $T2a="f28"; $T2b="f29"; |
139 | $T3a="f24"; $T3b="f25"; | 156 | $T3a="f30"; $T3b="f31"; |
140 | 157 | ||
141 | # sp----------->+-------------------------------+ | 158 | # sp----------->+-------------------------------+ |
142 | # | saved sp | | 159 | # | saved sp | |
143 | # +-------------------------------+ | 160 | # +-------------------------------+ |
144 | # | | | ||
145 | # +-------------------------------+ | ||
146 | # | 10 saved gpr, r14-r23 | | ||
147 | # . . | ||
148 | # . . | ||
149 | # +12*size_t +-------------------------------+ | ||
150 | # | 12 saved fpr, f14-f25 | | ||
151 | # . . | 161 | # . . |
152 | # . . | 162 | # +64 +-------------------------------+ |
153 | # +12*8 +-------------------------------+ | ||
154 | # | padding to 64 byte boundary | | ||
155 | # . . | ||
156 | # +X +-------------------------------+ | ||
157 | # | 16 gpr<->fpr transfer zone | | 163 | # | 16 gpr<->fpr transfer zone | |
158 | # . . | 164 | # . . |
159 | # . . | 165 | # . . |
@@ -173,6 +179,16 @@ $T3a="f24"; $T3b="f25"; | |||
173 | # . . | 179 | # . . |
174 | # . . | 180 | # . . |
175 | # +-------------------------------+ | 181 | # +-------------------------------+ |
182 | # . . | ||
183 | # -12*size_t +-------------------------------+ | ||
184 | # | 10 saved gpr, r22-r31 | | ||
185 | # . . | ||
186 | # . . | ||
187 | # -12*8 +-------------------------------+ | ||
188 | # | 12 saved fpr, f20-f31 | | ||
189 | # . . | ||
190 | # . . | ||
191 | # +-------------------------------+ | ||
176 | 192 | ||
177 | $code=<<___; | 193 | $code=<<___; |
178 | .machine "any" | 194 | .machine "any" |
@@ -181,14 +197,14 @@ $code=<<___; | |||
181 | .globl .$fname | 197 | .globl .$fname |
182 | .align 5 | 198 | .align 5 |
183 | .$fname: | 199 | .$fname: |
184 | cmpwi $num,4 | 200 | cmpwi $num,`3*8/$SIZE_T` |
185 | mr $rp,r3 ; $rp is reassigned | 201 | mr $rp,r3 ; $rp is reassigned |
186 | li r3,0 ; possible "not handled" return code | 202 | li r3,0 ; possible "not handled" return code |
187 | bltlr- | 203 | bltlr- |
188 | andi. r0,$num,1 ; $num has to be even | 204 | andi. r0,$num,`16/$SIZE_T-1` ; $num has to be "even" |
189 | bnelr- | 205 | bnelr- |
190 | 206 | ||
191 | slwi $num,$num,3 ; num*=8 | 207 | slwi $num,$num,`log($SIZE_T)/log(2)` ; num*=sizeof(BN_LONG) |
192 | li $i,-4096 | 208 | li $i,-4096 |
193 | slwi $tp,$num,2 ; place for {an}p_{lh}[num], i.e. 4*num | 209 | slwi $tp,$num,2 ; place for {an}p_{lh}[num], i.e. 4*num |
194 | add $tp,$tp,$num ; place for tp[num+1] | 210 | add $tp,$tp,$num ; place for tp[num+1] |
@@ -196,35 +212,50 @@ $code=<<___; | |||
196 | subf $tp,$tp,$sp ; $sp-$tp | 212 | subf $tp,$tp,$sp ; $sp-$tp |
197 | and $tp,$tp,$i ; minimize TLB usage | 213 | and $tp,$tp,$i ; minimize TLB usage |
198 | subf $tp,$sp,$tp ; $tp-$sp | 214 | subf $tp,$sp,$tp ; $tp-$sp |
215 | mr $i,$sp | ||
199 | $STUX $sp,$sp,$tp ; alloca | 216 | $STUX $sp,$sp,$tp ; alloca |
200 | 217 | ||
201 | $PUSH r14,`2*$SIZE_T`($sp) | 218 | $PUSH r22,`-12*8-10*$SIZE_T`($i) |
202 | $PUSH r15,`3*$SIZE_T`($sp) | 219 | $PUSH r23,`-12*8-9*$SIZE_T`($i) |
203 | $PUSH r16,`4*$SIZE_T`($sp) | 220 | $PUSH r24,`-12*8-8*$SIZE_T`($i) |
204 | $PUSH r17,`5*$SIZE_T`($sp) | 221 | $PUSH r25,`-12*8-7*$SIZE_T`($i) |
205 | $PUSH r18,`6*$SIZE_T`($sp) | 222 | $PUSH r26,`-12*8-6*$SIZE_T`($i) |
206 | $PUSH r19,`7*$SIZE_T`($sp) | 223 | $PUSH r27,`-12*8-5*$SIZE_T`($i) |
207 | $PUSH r20,`8*$SIZE_T`($sp) | 224 | $PUSH r28,`-12*8-4*$SIZE_T`($i) |
208 | $PUSH r21,`9*$SIZE_T`($sp) | 225 | $PUSH r29,`-12*8-3*$SIZE_T`($i) |
209 | $PUSH r22,`10*$SIZE_T`($sp) | 226 | $PUSH r30,`-12*8-2*$SIZE_T`($i) |
210 | $PUSH r23,`11*$SIZE_T`($sp) | 227 | $PUSH r31,`-12*8-1*$SIZE_T`($i) |
211 | stfd f14,`12*$SIZE_T+0`($sp) | 228 | stfd f20,`-12*8`($i) |
212 | stfd f15,`12*$SIZE_T+8`($sp) | 229 | stfd f21,`-11*8`($i) |
213 | stfd f16,`12*$SIZE_T+16`($sp) | 230 | stfd f22,`-10*8`($i) |
214 | stfd f17,`12*$SIZE_T+24`($sp) | 231 | stfd f23,`-9*8`($i) |
215 | stfd f18,`12*$SIZE_T+32`($sp) | 232 | stfd f24,`-8*8`($i) |
216 | stfd f19,`12*$SIZE_T+40`($sp) | 233 | stfd f25,`-7*8`($i) |
217 | stfd f20,`12*$SIZE_T+48`($sp) | 234 | stfd f26,`-6*8`($i) |
218 | stfd f21,`12*$SIZE_T+56`($sp) | 235 | stfd f27,`-5*8`($i) |
219 | stfd f22,`12*$SIZE_T+64`($sp) | 236 | stfd f28,`-4*8`($i) |
220 | stfd f23,`12*$SIZE_T+72`($sp) | 237 | stfd f29,`-3*8`($i) |
221 | stfd f24,`12*$SIZE_T+80`($sp) | 238 | stfd f30,`-2*8`($i) |
222 | stfd f25,`12*$SIZE_T+88`($sp) | 239 | stfd f31,`-1*8`($i) |
223 | 240 | ___ | |
241 | $code.=<<___ if ($SIZE_T==8); | ||
224 | ld $a0,0($ap) ; pull ap[0] value | 242 | ld $a0,0($ap) ; pull ap[0] value |
225 | ld $n0,0($n0) ; pull n0[0] value | 243 | ld $n0,0($n0) ; pull n0[0] value |
226 | ld $t3,0($bp) ; bp[0] | 244 | ld $t3,0($bp) ; bp[0] |
227 | 245 | ___ | |
246 | $code.=<<___ if ($SIZE_T==4); | ||
247 | mr $t1,$n0 | ||
248 | lwz $a0,0($ap) ; pull ap[0,1] value | ||
249 | lwz $t0,4($ap) | ||
250 | lwz $n0,0($t1) ; pull n0[0,1] value | ||
251 | lwz $t1,4($t1) | ||
252 | lwz $t3,0($bp) ; bp[0,1] | ||
253 | lwz $t2,4($bp) | ||
254 | insrdi $a0,$t0,32,0 | ||
255 | insrdi $n0,$t1,32,0 | ||
256 | insrdi $t3,$t2,32,0 | ||
257 | ___ | ||
258 | $code.=<<___; | ||
228 | addi $tp,$sp,`$FRAME+$TRANSFER+8+64` | 259 | addi $tp,$sp,`$FRAME+$TRANSFER+8+64` |
229 | li $i,-64 | 260 | li $i,-64 |
230 | add $nap_d,$tp,$num | 261 | add $nap_d,$tp,$num |
@@ -258,6 +289,8 @@ $code=<<___; | |||
258 | std $t5,`$FRAME+40`($sp) | 289 | std $t5,`$FRAME+40`($sp) |
259 | std $t6,`$FRAME+48`($sp) | 290 | std $t6,`$FRAME+48`($sp) |
260 | std $t7,`$FRAME+56`($sp) | 291 | std $t7,`$FRAME+56`($sp) |
292 | ___ | ||
293 | $code.=<<___ if ($SIZE_T==8); | ||
261 | lwz $t0,4($ap) ; load a[j] as 32-bit word pair | 294 | lwz $t0,4($ap) ; load a[j] as 32-bit word pair |
262 | lwz $t1,0($ap) | 295 | lwz $t1,0($ap) |
263 | lwz $t2,12($ap) ; load a[j+1] as 32-bit word pair | 296 | lwz $t2,12($ap) ; load a[j+1] as 32-bit word pair |
@@ -266,6 +299,18 @@ $code=<<___; | |||
266 | lwz $t5,0($np) | 299 | lwz $t5,0($np) |
267 | lwz $t6,12($np) ; load n[j+1] as 32-bit word pair | 300 | lwz $t6,12($np) ; load n[j+1] as 32-bit word pair |
268 | lwz $t7,8($np) | 301 | lwz $t7,8($np) |
302 | ___ | ||
303 | $code.=<<___ if ($SIZE_T==4); | ||
304 | lwz $t0,0($ap) ; load a[j..j+3] as 32-bit word pairs | ||
305 | lwz $t1,4($ap) | ||
306 | lwz $t2,8($ap) | ||
307 | lwz $t3,12($ap) | ||
308 | lwz $t4,0($np) ; load n[j..j+3] as 32-bit word pairs | ||
309 | lwz $t5,4($np) | ||
310 | lwz $t6,8($np) | ||
311 | lwz $t7,12($np) | ||
312 | ___ | ||
313 | $code.=<<___; | ||
269 | lfd $ba,`$FRAME+0`($sp) | 314 | lfd $ba,`$FRAME+0`($sp) |
270 | lfd $bb,`$FRAME+8`($sp) | 315 | lfd $bb,`$FRAME+8`($sp) |
271 | lfd $bc,`$FRAME+16`($sp) | 316 | lfd $bc,`$FRAME+16`($sp) |
@@ -374,6 +419,8 @@ $code=<<___; | |||
374 | 419 | ||
375 | .align 5 | 420 | .align 5 |
376 | L1st: | 421 | L1st: |
422 | ___ | ||
423 | $code.=<<___ if ($SIZE_T==8); | ||
377 | lwz $t0,4($ap) ; load a[j] as 32-bit word pair | 424 | lwz $t0,4($ap) ; load a[j] as 32-bit word pair |
378 | lwz $t1,0($ap) | 425 | lwz $t1,0($ap) |
379 | lwz $t2,12($ap) ; load a[j+1] as 32-bit word pair | 426 | lwz $t2,12($ap) ; load a[j+1] as 32-bit word pair |
@@ -382,6 +429,18 @@ L1st: | |||
382 | lwz $t5,0($np) | 429 | lwz $t5,0($np) |
383 | lwz $t6,12($np) ; load n[j+1] as 32-bit word pair | 430 | lwz $t6,12($np) ; load n[j+1] as 32-bit word pair |
384 | lwz $t7,8($np) | 431 | lwz $t7,8($np) |
432 | ___ | ||
433 | $code.=<<___ if ($SIZE_T==4); | ||
434 | lwz $t0,0($ap) ; load a[j..j+3] as 32-bit word pairs | ||
435 | lwz $t1,4($ap) | ||
436 | lwz $t2,8($ap) | ||
437 | lwz $t3,12($ap) | ||
438 | lwz $t4,0($np) ; load n[j..j+3] as 32-bit word pairs | ||
439 | lwz $t5,4($np) | ||
440 | lwz $t6,8($np) | ||
441 | lwz $t7,12($np) | ||
442 | ___ | ||
443 | $code.=<<___; | ||
385 | std $t0,`$FRAME+64`($sp) | 444 | std $t0,`$FRAME+64`($sp) |
386 | std $t1,`$FRAME+72`($sp) | 445 | std $t1,`$FRAME+72`($sp) |
387 | std $t2,`$FRAME+80`($sp) | 446 | std $t2,`$FRAME+80`($sp) |
@@ -559,7 +618,17 @@ L1st: | |||
559 | li $i,8 ; i=1 | 618 | li $i,8 ; i=1 |
560 | .align 5 | 619 | .align 5 |
561 | Louter: | 620 | Louter: |
621 | ___ | ||
622 | $code.=<<___ if ($SIZE_T==8); | ||
562 | ldx $t3,$bp,$i ; bp[i] | 623 | ldx $t3,$bp,$i ; bp[i] |
624 | ___ | ||
625 | $code.=<<___ if ($SIZE_T==4); | ||
626 | add $t0,$bp,$i | ||
627 | lwz $t3,0($t0) ; bp[i,i+1] | ||
628 | lwz $t0,4($t0) | ||
629 | insrdi $t3,$t0,32,0 | ||
630 | ___ | ||
631 | $code.=<<___; | ||
563 | ld $t6,`$FRAME+$TRANSFER+8`($sp) ; tp[0] | 632 | ld $t6,`$FRAME+$TRANSFER+8`($sp) ; tp[0] |
564 | mulld $t7,$a0,$t3 ; ap[0]*bp[i] | 633 | mulld $t7,$a0,$t3 ; ap[0]*bp[i] |
565 | 634 | ||
@@ -761,6 +830,13 @@ Linner: | |||
761 | stfd $T0b,`$FRAME+8`($sp) | 830 | stfd $T0b,`$FRAME+8`($sp) |
762 | add $t7,$t7,$carry | 831 | add $t7,$t7,$carry |
763 | addc $t3,$t0,$t1 | 832 | addc $t3,$t0,$t1 |
833 | ___ | ||
834 | $code.=<<___ if ($SIZE_T==4); # adjust XER[CA] | ||
835 | extrdi $t0,$t0,32,0 | ||
836 | extrdi $t1,$t1,32,0 | ||
837 | adde $t0,$t0,$t1 | ||
838 | ___ | ||
839 | $code.=<<___; | ||
764 | stfd $T1a,`$FRAME+16`($sp) | 840 | stfd $T1a,`$FRAME+16`($sp) |
765 | stfd $T1b,`$FRAME+24`($sp) | 841 | stfd $T1b,`$FRAME+24`($sp) |
766 | insrdi $t4,$t7,16,0 ; 64..127 bits | 842 | insrdi $t4,$t7,16,0 ; 64..127 bits |
@@ -768,6 +844,13 @@ Linner: | |||
768 | stfd $T2a,`$FRAME+32`($sp) | 844 | stfd $T2a,`$FRAME+32`($sp) |
769 | stfd $T2b,`$FRAME+40`($sp) | 845 | stfd $T2b,`$FRAME+40`($sp) |
770 | adde $t5,$t4,$t2 | 846 | adde $t5,$t4,$t2 |
847 | ___ | ||
848 | $code.=<<___ if ($SIZE_T==4); # adjust XER[CA] | ||
849 | extrdi $t4,$t4,32,0 | ||
850 | extrdi $t2,$t2,32,0 | ||
851 | adde $t4,$t4,$t2 | ||
852 | ___ | ||
853 | $code.=<<___; | ||
771 | stfd $T3a,`$FRAME+48`($sp) | 854 | stfd $T3a,`$FRAME+48`($sp) |
772 | stfd $T3b,`$FRAME+56`($sp) | 855 | stfd $T3b,`$FRAME+56`($sp) |
773 | addze $carry,$carry | 856 | addze $carry,$carry |
@@ -816,7 +899,21 @@ Linner: | |||
816 | ld $t7,`$FRAME+72`($sp) | 899 | ld $t7,`$FRAME+72`($sp) |
817 | 900 | ||
818 | addc $t3,$t0,$t1 | 901 | addc $t3,$t0,$t1 |
902 | ___ | ||
903 | $code.=<<___ if ($SIZE_T==4); # adjust XER[CA] | ||
904 | extrdi $t0,$t0,32,0 | ||
905 | extrdi $t1,$t1,32,0 | ||
906 | adde $t0,$t0,$t1 | ||
907 | ___ | ||
908 | $code.=<<___; | ||
819 | adde $t5,$t4,$t2 | 909 | adde $t5,$t4,$t2 |
910 | ___ | ||
911 | $code.=<<___ if ($SIZE_T==4); # adjust XER[CA] | ||
912 | extrdi $t4,$t4,32,0 | ||
913 | extrdi $t2,$t2,32,0 | ||
914 | adde $t4,$t4,$t2 | ||
915 | ___ | ||
916 | $code.=<<___; | ||
820 | addze $carry,$carry | 917 | addze $carry,$carry |
821 | 918 | ||
822 | std $t3,-16($tp) ; tp[j-1] | 919 | std $t3,-16($tp) ; tp[j-1] |
@@ -835,7 +932,9 @@ Linner: | |||
835 | subf $nap_d,$t7,$nap_d ; rewind pointer | 932 | subf $nap_d,$t7,$nap_d ; rewind pointer |
836 | cmpw $i,$num | 933 | cmpw $i,$num |
837 | blt- Louter | 934 | blt- Louter |
935 | ___ | ||
838 | 936 | ||
937 | $code.=<<___ if ($SIZE_T==8); | ||
839 | subf $np,$num,$np ; rewind np | 938 | subf $np,$num,$np ; rewind np |
840 | addi $j,$j,1 ; restore counter | 939 | addi $j,$j,1 ; restore counter |
841 | subfc $i,$i,$i ; j=0 and "clear" XER[CA] | 940 | subfc $i,$i,$i ; j=0 and "clear" XER[CA] |
@@ -883,34 +982,105 @@ Lcopy: ; copy or in-place refresh | |||
883 | stdx $i,$t4,$i | 982 | stdx $i,$t4,$i |
884 | addi $i,$i,16 | 983 | addi $i,$i,16 |
885 | bdnz- Lcopy | 984 | bdnz- Lcopy |
985 | ___ | ||
986 | $code.=<<___ if ($SIZE_T==4); | ||
987 | subf $np,$num,$np ; rewind np | ||
988 | addi $j,$j,1 ; restore counter | ||
989 | subfc $i,$i,$i ; j=0 and "clear" XER[CA] | ||
990 | addi $tp,$sp,`$FRAME+$TRANSFER` | ||
991 | addi $np,$np,-4 | ||
992 | addi $rp,$rp,-4 | ||
993 | addi $ap,$sp,`$FRAME+$TRANSFER+4` | ||
994 | mtctr $j | ||
995 | |||
996 | .align 4 | ||
997 | Lsub: ld $t0,8($tp) ; load tp[j..j+3] in 64-bit word order | ||
998 | ldu $t2,16($tp) | ||
999 | lwz $t4,4($np) ; load np[j..j+3] in 32-bit word order | ||
1000 | lwz $t5,8($np) | ||
1001 | lwz $t6,12($np) | ||
1002 | lwzu $t7,16($np) | ||
1003 | extrdi $t1,$t0,32,0 | ||
1004 | extrdi $t3,$t2,32,0 | ||
1005 | subfe $t4,$t4,$t0 ; tp[j]-np[j] | ||
1006 | stw $t0,4($ap) ; save tp[j..j+3] in 32-bit word order | ||
1007 | subfe $t5,$t5,$t1 ; tp[j+1]-np[j+1] | ||
1008 | stw $t1,8($ap) | ||
1009 | subfe $t6,$t6,$t2 ; tp[j+2]-np[j+2] | ||
1010 | stw $t2,12($ap) | ||
1011 | subfe $t7,$t7,$t3 ; tp[j+3]-np[j+3] | ||
1012 | stwu $t3,16($ap) | ||
1013 | stw $t4,4($rp) | ||
1014 | stw $t5,8($rp) | ||
1015 | stw $t6,12($rp) | ||
1016 | stwu $t7,16($rp) | ||
1017 | bdnz- Lsub | ||
1018 | |||
1019 | li $i,0 | ||
1020 | subfe $ovf,$i,$ovf ; handle upmost overflow bit | ||
1021 | addi $tp,$sp,`$FRAME+$TRANSFER+4` | ||
1022 | subf $rp,$num,$rp ; rewind rp | ||
1023 | and $ap,$tp,$ovf | ||
1024 | andc $np,$rp,$ovf | ||
1025 | or $ap,$ap,$np ; ap=borrow?tp:rp | ||
1026 | addi $tp,$sp,`$FRAME+$TRANSFER` | ||
1027 | mtctr $j | ||
1028 | |||
1029 | .align 4 | ||
1030 | Lcopy: ; copy or in-place refresh | ||
1031 | lwz $t0,4($ap) | ||
1032 | lwz $t1,8($ap) | ||
1033 | lwz $t2,12($ap) | ||
1034 | lwzu $t3,16($ap) | ||
1035 | std $i,8($nap_d) ; zap nap_d | ||
1036 | std $i,16($nap_d) | ||
1037 | std $i,24($nap_d) | ||
1038 | std $i,32($nap_d) | ||
1039 | std $i,40($nap_d) | ||
1040 | std $i,48($nap_d) | ||
1041 | std $i,56($nap_d) | ||
1042 | stdu $i,64($nap_d) | ||
1043 | stw $t0,4($rp) | ||
1044 | stw $t1,8($rp) | ||
1045 | stw $t2,12($rp) | ||
1046 | stwu $t3,16($rp) | ||
1047 | std $i,8($tp) ; zap tp at once | ||
1048 | stdu $i,16($tp) | ||
1049 | bdnz- Lcopy | ||
1050 | ___ | ||
886 | 1051 | ||
887 | $POP r14,`2*$SIZE_T`($sp) | 1052 | $code.=<<___; |
888 | $POP r15,`3*$SIZE_T`($sp) | 1053 | $POP $i,0($sp) |
889 | $POP r16,`4*$SIZE_T`($sp) | ||
890 | $POP r17,`5*$SIZE_T`($sp) | ||
891 | $POP r18,`6*$SIZE_T`($sp) | ||
892 | $POP r19,`7*$SIZE_T`($sp) | ||
893 | $POP r20,`8*$SIZE_T`($sp) | ||
894 | $POP r21,`9*$SIZE_T`($sp) | ||
895 | $POP r22,`10*$SIZE_T`($sp) | ||
896 | $POP r23,`11*$SIZE_T`($sp) | ||
897 | lfd f14,`12*$SIZE_T+0`($sp) | ||
898 | lfd f15,`12*$SIZE_T+8`($sp) | ||
899 | lfd f16,`12*$SIZE_T+16`($sp) | ||
900 | lfd f17,`12*$SIZE_T+24`($sp) | ||
901 | lfd f18,`12*$SIZE_T+32`($sp) | ||
902 | lfd f19,`12*$SIZE_T+40`($sp) | ||
903 | lfd f20,`12*$SIZE_T+48`($sp) | ||
904 | lfd f21,`12*$SIZE_T+56`($sp) | ||
905 | lfd f22,`12*$SIZE_T+64`($sp) | ||
906 | lfd f23,`12*$SIZE_T+72`($sp) | ||
907 | lfd f24,`12*$SIZE_T+80`($sp) | ||
908 | lfd f25,`12*$SIZE_T+88`($sp) | ||
909 | $POP $sp,0($sp) | ||
910 | li r3,1 ; signal "handled" | 1054 | li r3,1 ; signal "handled" |
1055 | $POP r22,`-12*8-10*$SIZE_T`($i) | ||
1056 | $POP r23,`-12*8-9*$SIZE_T`($i) | ||
1057 | $POP r24,`-12*8-8*$SIZE_T`($i) | ||
1058 | $POP r25,`-12*8-7*$SIZE_T`($i) | ||
1059 | $POP r26,`-12*8-6*$SIZE_T`($i) | ||
1060 | $POP r27,`-12*8-5*$SIZE_T`($i) | ||
1061 | $POP r28,`-12*8-4*$SIZE_T`($i) | ||
1062 | $POP r29,`-12*8-3*$SIZE_T`($i) | ||
1063 | $POP r30,`-12*8-2*$SIZE_T`($i) | ||
1064 | $POP r31,`-12*8-1*$SIZE_T`($i) | ||
1065 | lfd f20,`-12*8`($i) | ||
1066 | lfd f21,`-11*8`($i) | ||
1067 | lfd f22,`-10*8`($i) | ||
1068 | lfd f23,`-9*8`($i) | ||
1069 | lfd f24,`-8*8`($i) | ||
1070 | lfd f25,`-7*8`($i) | ||
1071 | lfd f26,`-6*8`($i) | ||
1072 | lfd f27,`-5*8`($i) | ||
1073 | lfd f28,`-4*8`($i) | ||
1074 | lfd f29,`-3*8`($i) | ||
1075 | lfd f30,`-2*8`($i) | ||
1076 | lfd f31,`-1*8`($i) | ||
1077 | mr $sp,$i | ||
911 | blr | 1078 | blr |
912 | .long 0 | 1079 | .long 0 |
913 | .asciz "Montgomery Multiplication for PPC64, CRYPTOGAMS by <appro\@fy.chalmers.se>" | 1080 | .byte 0,12,4,0,0x8c,10,6,0 |
1081 | .long 0 | ||
1082 | |||
1083 | .asciz "Montgomery Multiplication for PPC64, CRYPTOGAMS by <appro\@openssl.org>" | ||
914 | ___ | 1084 | ___ |
915 | 1085 | ||
916 | $code =~ s/\`([^\`]*)\`/eval $1/gem; | 1086 | $code =~ s/\`([^\`]*)\`/eval $1/gem; |
diff --git a/src/lib/libcrypto/bn/asm/s390x-gf2m.pl b/src/lib/libcrypto/bn/asm/s390x-gf2m.pl new file mode 100644 index 0000000000..cd9f13eca2 --- /dev/null +++ b/src/lib/libcrypto/bn/asm/s390x-gf2m.pl | |||
@@ -0,0 +1,221 @@ | |||
1 | #!/usr/bin/env perl | ||
2 | # | ||
3 | # ==================================================================== | ||
4 | # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL | ||
5 | # project. The module is, however, dual licensed under OpenSSL and | ||
6 | # CRYPTOGAMS licenses depending on where you obtain it. For further | ||
7 | # details see http://www.openssl.org/~appro/cryptogams/. | ||
8 | # ==================================================================== | ||
9 | # | ||
10 | # May 2011 | ||
11 | # | ||
12 | # The module implements bn_GF2m_mul_2x2 polynomial multiplication used | ||
13 | # in bn_gf2m.c. It's kind of low-hanging mechanical port from C for | ||
14 | # the time being... gcc 4.3 appeared to generate poor code, therefore | ||
15 | # the effort. And indeed, the module delivers 55%-90%(*) improvement | ||
16 | # on haviest ECDSA verify and ECDH benchmarks for 163- and 571-bit | ||
17 | # key lengths on z990, 30%-55%(*) - on z10, and 70%-110%(*) - on z196. | ||
18 | # This is for 64-bit build. In 32-bit "highgprs" case improvement is | ||
19 | # even higher, for example on z990 it was measured 80%-150%. ECDSA | ||
20 | # sign is modest 9%-12% faster. Keep in mind that these coefficients | ||
21 | # are not ones for bn_GF2m_mul_2x2 itself, as not all CPU time is | ||
22 | # burnt in it... | ||
23 | # | ||
24 | # (*) gcc 4.1 was observed to deliver better results than gcc 4.3, | ||
25 | # so that improvement coefficients can vary from one specific | ||
26 | # setup to another. | ||
27 | |||
28 | $flavour = shift; | ||
29 | |||
30 | if ($flavour =~ /3[12]/) { | ||
31 | $SIZE_T=4; | ||
32 | $g=""; | ||
33 | } else { | ||
34 | $SIZE_T=8; | ||
35 | $g="g"; | ||
36 | } | ||
37 | |||
38 | while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {} | ||
39 | open STDOUT,">$output"; | ||
40 | |||
41 | $stdframe=16*$SIZE_T+4*8; | ||
42 | |||
43 | $rp="%r2"; | ||
44 | $a1="%r3"; | ||
45 | $a0="%r4"; | ||
46 | $b1="%r5"; | ||
47 | $b0="%r6"; | ||
48 | |||
49 | $ra="%r14"; | ||
50 | $sp="%r15"; | ||
51 | |||
52 | @T=("%r0","%r1"); | ||
53 | @i=("%r12","%r13"); | ||
54 | |||
55 | ($a1,$a2,$a4,$a8,$a12,$a48)=map("%r$_",(6..11)); | ||
56 | ($lo,$hi,$b)=map("%r$_",(3..5)); $a=$lo; $mask=$a8; | ||
57 | |||
58 | $code.=<<___; | ||
59 | .text | ||
60 | |||
61 | .type _mul_1x1,\@function | ||
62 | .align 16 | ||
63 | _mul_1x1: | ||
64 | lgr $a1,$a | ||
65 | sllg $a2,$a,1 | ||
66 | sllg $a4,$a,2 | ||
67 | sllg $a8,$a,3 | ||
68 | |||
69 | srag $lo,$a1,63 # broadcast 63rd bit | ||
70 | nihh $a1,0x1fff | ||
71 | srag @i[0],$a2,63 # broadcast 62nd bit | ||
72 | nihh $a2,0x3fff | ||
73 | srag @i[1],$a4,63 # broadcast 61st bit | ||
74 | nihh $a4,0x7fff | ||
75 | ngr $lo,$b | ||
76 | ngr @i[0],$b | ||
77 | ngr @i[1],$b | ||
78 | |||
79 | lghi @T[0],0 | ||
80 | lgr $a12,$a1 | ||
81 | stg @T[0],`$stdframe+0*8`($sp) # tab[0]=0 | ||
82 | xgr $a12,$a2 | ||
83 | stg $a1,`$stdframe+1*8`($sp) # tab[1]=a1 | ||
84 | lgr $a48,$a4 | ||
85 | stg $a2,`$stdframe+2*8`($sp) # tab[2]=a2 | ||
86 | xgr $a48,$a8 | ||
87 | stg $a12,`$stdframe+3*8`($sp) # tab[3]=a1^a2 | ||
88 | xgr $a1,$a4 | ||
89 | |||
90 | stg $a4,`$stdframe+4*8`($sp) # tab[4]=a4 | ||
91 | xgr $a2,$a4 | ||
92 | stg $a1,`$stdframe+5*8`($sp) # tab[5]=a1^a4 | ||
93 | xgr $a12,$a4 | ||
94 | stg $a2,`$stdframe+6*8`($sp) # tab[6]=a2^a4 | ||
95 | xgr $a1,$a48 | ||
96 | stg $a12,`$stdframe+7*8`($sp) # tab[7]=a1^a2^a4 | ||
97 | xgr $a2,$a48 | ||
98 | |||
99 | stg $a8,`$stdframe+8*8`($sp) # tab[8]=a8 | ||
100 | xgr $a12,$a48 | ||
101 | stg $a1,`$stdframe+9*8`($sp) # tab[9]=a1^a8 | ||
102 | xgr $a1,$a4 | ||
103 | stg $a2,`$stdframe+10*8`($sp) # tab[10]=a2^a8 | ||
104 | xgr $a2,$a4 | ||
105 | stg $a12,`$stdframe+11*8`($sp) # tab[11]=a1^a2^a8 | ||
106 | |||
107 | xgr $a12,$a4 | ||
108 | stg $a48,`$stdframe+12*8`($sp) # tab[12]=a4^a8 | ||
109 | srlg $hi,$lo,1 | ||
110 | stg $a1,`$stdframe+13*8`($sp) # tab[13]=a1^a4^a8 | ||
111 | sllg $lo,$lo,63 | ||
112 | stg $a2,`$stdframe+14*8`($sp) # tab[14]=a2^a4^a8 | ||
113 | srlg @T[0],@i[0],2 | ||
114 | stg $a12,`$stdframe+15*8`($sp) # tab[15]=a1^a2^a4^a8 | ||
115 | |||
116 | lghi $mask,`0xf<<3` | ||
117 | sllg $a1,@i[0],62 | ||
118 | sllg @i[0],$b,3 | ||
119 | srlg @T[1],@i[1],3 | ||
120 | ngr @i[0],$mask | ||
121 | sllg $a2,@i[1],61 | ||
122 | srlg @i[1],$b,4-3 | ||
123 | xgr $hi,@T[0] | ||
124 | ngr @i[1],$mask | ||
125 | xgr $lo,$a1 | ||
126 | xgr $hi,@T[1] | ||
127 | xgr $lo,$a2 | ||
128 | |||
129 | xg $lo,$stdframe(@i[0],$sp) | ||
130 | srlg @i[0],$b,8-3 | ||
131 | ngr @i[0],$mask | ||
132 | ___ | ||
133 | for($n=1;$n<14;$n++) { | ||
134 | $code.=<<___; | ||
135 | lg @T[1],$stdframe(@i[1],$sp) | ||
136 | srlg @i[1],$b,`($n+2)*4`-3 | ||
137 | sllg @T[0],@T[1],`$n*4` | ||
138 | ngr @i[1],$mask | ||
139 | srlg @T[1],@T[1],`64-$n*4` | ||
140 | xgr $lo,@T[0] | ||
141 | xgr $hi,@T[1] | ||
142 | ___ | ||
143 | push(@i,shift(@i)); push(@T,shift(@T)); | ||
144 | } | ||
145 | $code.=<<___; | ||
146 | lg @T[1],$stdframe(@i[1],$sp) | ||
147 | sllg @T[0],@T[1],`$n*4` | ||
148 | srlg @T[1],@T[1],`64-$n*4` | ||
149 | xgr $lo,@T[0] | ||
150 | xgr $hi,@T[1] | ||
151 | |||
152 | lg @T[0],$stdframe(@i[0],$sp) | ||
153 | sllg @T[1],@T[0],`($n+1)*4` | ||
154 | srlg @T[0],@T[0],`64-($n+1)*4` | ||
155 | xgr $lo,@T[1] | ||
156 | xgr $hi,@T[0] | ||
157 | |||
158 | br $ra | ||
159 | .size _mul_1x1,.-_mul_1x1 | ||
160 | |||
161 | .globl bn_GF2m_mul_2x2 | ||
162 | .type bn_GF2m_mul_2x2,\@function | ||
163 | .align 16 | ||
164 | bn_GF2m_mul_2x2: | ||
165 | stm${g} %r3,%r15,3*$SIZE_T($sp) | ||
166 | |||
167 | lghi %r1,-$stdframe-128 | ||
168 | la %r0,0($sp) | ||
169 | la $sp,0(%r1,$sp) # alloca | ||
170 | st${g} %r0,0($sp) # back chain | ||
171 | ___ | ||
172 | if ($SIZE_T==8) { | ||
173 | my @r=map("%r$_",(6..9)); | ||
174 | $code.=<<___; | ||
175 | bras $ra,_mul_1x1 # a1·b1 | ||
176 | stmg $lo,$hi,16($rp) | ||
177 | |||
178 | lg $a,`$stdframe+128+4*$SIZE_T`($sp) | ||
179 | lg $b,`$stdframe+128+6*$SIZE_T`($sp) | ||
180 | bras $ra,_mul_1x1 # a0·b0 | ||
181 | stmg $lo,$hi,0($rp) | ||
182 | |||
183 | lg $a,`$stdframe+128+3*$SIZE_T`($sp) | ||
184 | lg $b,`$stdframe+128+5*$SIZE_T`($sp) | ||
185 | xg $a,`$stdframe+128+4*$SIZE_T`($sp) | ||
186 | xg $b,`$stdframe+128+6*$SIZE_T`($sp) | ||
187 | bras $ra,_mul_1x1 # (a0+a1)·(b0+b1) | ||
188 | lmg @r[0],@r[3],0($rp) | ||
189 | |||
190 | xgr $lo,$hi | ||
191 | xgr $hi,@r[1] | ||
192 | xgr $lo,@r[0] | ||
193 | xgr $hi,@r[2] | ||
194 | xgr $lo,@r[3] | ||
195 | xgr $hi,@r[3] | ||
196 | xgr $lo,$hi | ||
197 | stg $hi,16($rp) | ||
198 | stg $lo,8($rp) | ||
199 | ___ | ||
200 | } else { | ||
201 | $code.=<<___; | ||
202 | sllg %r3,%r3,32 | ||
203 | sllg %r5,%r5,32 | ||
204 | or %r3,%r4 | ||
205 | or %r5,%r6 | ||
206 | bras $ra,_mul_1x1 | ||
207 | rllg $lo,$lo,32 | ||
208 | rllg $hi,$hi,32 | ||
209 | stmg $lo,$hi,0($rp) | ||
210 | ___ | ||
211 | } | ||
212 | $code.=<<___; | ||
213 | lm${g} %r6,%r15,`$stdframe+128+6*$SIZE_T`($sp) | ||
214 | br $ra | ||
215 | .size bn_GF2m_mul_2x2,.-bn_GF2m_mul_2x2 | ||
216 | .string "GF(2^m) Multiplication for s390x, CRYPTOGAMS by <appro\@openssl.org>" | ||
217 | ___ | ||
218 | |||
219 | $code =~ s/\`([^\`]*)\`/eval($1)/gem; | ||
220 | print $code; | ||
221 | close STDOUT; | ||
diff --git a/src/lib/libcrypto/bn/asm/s390x-mont.pl b/src/lib/libcrypto/bn/asm/s390x-mont.pl index f61246f5b6..9fd64e81ee 100644 --- a/src/lib/libcrypto/bn/asm/s390x-mont.pl +++ b/src/lib/libcrypto/bn/asm/s390x-mont.pl | |||
@@ -32,6 +32,33 @@ | |||
32 | # Reschedule to minimize/avoid Address Generation Interlock hazard, | 32 | # Reschedule to minimize/avoid Address Generation Interlock hazard, |
33 | # make inner loops counter-based. | 33 | # make inner loops counter-based. |
34 | 34 | ||
35 | # November 2010. | ||
36 | # | ||
37 | # Adapt for -m31 build. If kernel supports what's called "highgprs" | ||
38 | # feature on Linux [see /proc/cpuinfo], it's possible to use 64-bit | ||
39 | # instructions and achieve "64-bit" performance even in 31-bit legacy | ||
40 | # application context. The feature is not specific to any particular | ||
41 | # processor, as long as it's "z-CPU". Latter implies that the code | ||
42 | # remains z/Architecture specific. Compatibility with 32-bit BN_ULONG | ||
43 | # is achieved by swapping words after 64-bit loads, follow _dswap-s. | ||
44 | # On z990 it was measured to perform 2.6-2.2 times better than | ||
45 | # compiler-generated code, less for longer keys... | ||
46 | |||
47 | $flavour = shift; | ||
48 | |||
49 | if ($flavour =~ /3[12]/) { | ||
50 | $SIZE_T=4; | ||
51 | $g=""; | ||
52 | } else { | ||
53 | $SIZE_T=8; | ||
54 | $g="g"; | ||
55 | } | ||
56 | |||
57 | while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {} | ||
58 | open STDOUT,">$output"; | ||
59 | |||
60 | $stdframe=16*$SIZE_T+4*8; | ||
61 | |||
35 | $mn0="%r0"; | 62 | $mn0="%r0"; |
36 | $num="%r1"; | 63 | $num="%r1"; |
37 | 64 | ||
@@ -60,34 +87,44 @@ $code.=<<___; | |||
60 | .globl bn_mul_mont | 87 | .globl bn_mul_mont |
61 | .type bn_mul_mont,\@function | 88 | .type bn_mul_mont,\@function |
62 | bn_mul_mont: | 89 | bn_mul_mont: |
63 | lgf $num,164($sp) # pull $num | 90 | lgf $num,`$stdframe+$SIZE_T-4`($sp) # pull $num |
64 | sla $num,3 # $num to enumerate bytes | 91 | sla $num,`log($SIZE_T)/log(2)` # $num to enumerate bytes |
65 | la $bp,0($num,$bp) | 92 | la $bp,0($num,$bp) |
66 | 93 | ||
67 | stg %r2,16($sp) | 94 | st${g} %r2,2*$SIZE_T($sp) |
68 | 95 | ||
69 | cghi $num,16 # | 96 | cghi $num,16 # |
70 | lghi %r2,0 # | 97 | lghi %r2,0 # |
71 | blr %r14 # if($num<16) return 0; | 98 | blr %r14 # if($num<16) return 0; |
99 | ___ | ||
100 | $code.=<<___ if ($flavour =~ /3[12]/); | ||
101 | tmll $num,4 | ||
102 | bnzr %r14 # if ($num&1) return 0; | ||
103 | ___ | ||
104 | $code.=<<___ if ($flavour !~ /3[12]/); | ||
72 | cghi $num,96 # | 105 | cghi $num,96 # |
73 | bhr %r14 # if($num>96) return 0; | 106 | bhr %r14 # if($num>96) return 0; |
107 | ___ | ||
108 | $code.=<<___; | ||
109 | stm${g} %r3,%r15,3*$SIZE_T($sp) | ||
74 | 110 | ||
75 | stmg %r3,%r15,24($sp) | 111 | lghi $rp,-$stdframe-8 # leave room for carry bit |
76 | |||
77 | lghi $rp,-160-8 # leave room for carry bit | ||
78 | lcgr $j,$num # -$num | 112 | lcgr $j,$num # -$num |
79 | lgr %r0,$sp | 113 | lgr %r0,$sp |
80 | la $rp,0($rp,$sp) | 114 | la $rp,0($rp,$sp) |
81 | la $sp,0($j,$rp) # alloca | 115 | la $sp,0($j,$rp) # alloca |
82 | stg %r0,0($sp) # back chain | 116 | st${g} %r0,0($sp) # back chain |
83 | 117 | ||
84 | sra $num,3 # restore $num | 118 | sra $num,3 # restore $num |
85 | la $bp,0($j,$bp) # restore $bp | 119 | la $bp,0($j,$bp) # restore $bp |
86 | ahi $num,-1 # adjust $num for inner loop | 120 | ahi $num,-1 # adjust $num for inner loop |
87 | lg $n0,0($n0) # pull n0 | 121 | lg $n0,0($n0) # pull n0 |
122 | _dswap $n0 | ||
88 | 123 | ||
89 | lg $bi,0($bp) | 124 | lg $bi,0($bp) |
125 | _dswap $bi | ||
90 | lg $alo,0($ap) | 126 | lg $alo,0($ap) |
127 | _dswap $alo | ||
91 | mlgr $ahi,$bi # ap[0]*bp[0] | 128 | mlgr $ahi,$bi # ap[0]*bp[0] |
92 | lgr $AHI,$ahi | 129 | lgr $AHI,$ahi |
93 | 130 | ||
@@ -95,6 +132,7 @@ bn_mul_mont: | |||
95 | msgr $mn0,$n0 | 132 | msgr $mn0,$n0 |
96 | 133 | ||
97 | lg $nlo,0($np) # | 134 | lg $nlo,0($np) # |
135 | _dswap $nlo | ||
98 | mlgr $nhi,$mn0 # np[0]*m1 | 136 | mlgr $nhi,$mn0 # np[0]*m1 |
99 | algr $nlo,$alo # +="tp[0]" | 137 | algr $nlo,$alo # +="tp[0]" |
100 | lghi $NHI,0 | 138 | lghi $NHI,0 |
@@ -106,12 +144,14 @@ bn_mul_mont: | |||
106 | .align 16 | 144 | .align 16 |
107 | .L1st: | 145 | .L1st: |
108 | lg $alo,0($j,$ap) | 146 | lg $alo,0($j,$ap) |
147 | _dswap $alo | ||
109 | mlgr $ahi,$bi # ap[j]*bp[0] | 148 | mlgr $ahi,$bi # ap[j]*bp[0] |
110 | algr $alo,$AHI | 149 | algr $alo,$AHI |
111 | lghi $AHI,0 | 150 | lghi $AHI,0 |
112 | alcgr $AHI,$ahi | 151 | alcgr $AHI,$ahi |
113 | 152 | ||
114 | lg $nlo,0($j,$np) | 153 | lg $nlo,0($j,$np) |
154 | _dswap $nlo | ||
115 | mlgr $nhi,$mn0 # np[j]*m1 | 155 | mlgr $nhi,$mn0 # np[j]*m1 |
116 | algr $nlo,$NHI | 156 | algr $nlo,$NHI |
117 | lghi $NHI,0 | 157 | lghi $NHI,0 |
@@ -119,22 +159,24 @@ bn_mul_mont: | |||
119 | algr $nlo,$alo | 159 | algr $nlo,$alo |
120 | alcgr $NHI,$nhi | 160 | alcgr $NHI,$nhi |
121 | 161 | ||
122 | stg $nlo,160-8($j,$sp) # tp[j-1]= | 162 | stg $nlo,$stdframe-8($j,$sp) # tp[j-1]= |
123 | la $j,8($j) # j++ | 163 | la $j,8($j) # j++ |
124 | brct $count,.L1st | 164 | brct $count,.L1st |
125 | 165 | ||
126 | algr $NHI,$AHI | 166 | algr $NHI,$AHI |
127 | lghi $AHI,0 | 167 | lghi $AHI,0 |
128 | alcgr $AHI,$AHI # upmost overflow bit | 168 | alcgr $AHI,$AHI # upmost overflow bit |
129 | stg $NHI,160-8($j,$sp) | 169 | stg $NHI,$stdframe-8($j,$sp) |
130 | stg $AHI,160($j,$sp) | 170 | stg $AHI,$stdframe($j,$sp) |
131 | la $bp,8($bp) # bp++ | 171 | la $bp,8($bp) # bp++ |
132 | 172 | ||
133 | .Louter: | 173 | .Louter: |
134 | lg $bi,0($bp) # bp[i] | 174 | lg $bi,0($bp) # bp[i] |
175 | _dswap $bi | ||
135 | lg $alo,0($ap) | 176 | lg $alo,0($ap) |
177 | _dswap $alo | ||
136 | mlgr $ahi,$bi # ap[0]*bp[i] | 178 | mlgr $ahi,$bi # ap[0]*bp[i] |
137 | alg $alo,160($sp) # +=tp[0] | 179 | alg $alo,$stdframe($sp) # +=tp[0] |
138 | lghi $AHI,0 | 180 | lghi $AHI,0 |
139 | alcgr $AHI,$ahi | 181 | alcgr $AHI,$ahi |
140 | 182 | ||
@@ -142,6 +184,7 @@ bn_mul_mont: | |||
142 | msgr $mn0,$n0 # tp[0]*n0 | 184 | msgr $mn0,$n0 # tp[0]*n0 |
143 | 185 | ||
144 | lg $nlo,0($np) # np[0] | 186 | lg $nlo,0($np) # np[0] |
187 | _dswap $nlo | ||
145 | mlgr $nhi,$mn0 # np[0]*m1 | 188 | mlgr $nhi,$mn0 # np[0]*m1 |
146 | algr $nlo,$alo # +="tp[0]" | 189 | algr $nlo,$alo # +="tp[0]" |
147 | lghi $NHI,0 | 190 | lghi $NHI,0 |
@@ -153,14 +196,16 @@ bn_mul_mont: | |||
153 | .align 16 | 196 | .align 16 |
154 | .Linner: | 197 | .Linner: |
155 | lg $alo,0($j,$ap) | 198 | lg $alo,0($j,$ap) |
199 | _dswap $alo | ||
156 | mlgr $ahi,$bi # ap[j]*bp[i] | 200 | mlgr $ahi,$bi # ap[j]*bp[i] |
157 | algr $alo,$AHI | 201 | algr $alo,$AHI |
158 | lghi $AHI,0 | 202 | lghi $AHI,0 |
159 | alcgr $ahi,$AHI | 203 | alcgr $ahi,$AHI |
160 | alg $alo,160($j,$sp)# +=tp[j] | 204 | alg $alo,$stdframe($j,$sp)# +=tp[j] |
161 | alcgr $AHI,$ahi | 205 | alcgr $AHI,$ahi |
162 | 206 | ||
163 | lg $nlo,0($j,$np) | 207 | lg $nlo,0($j,$np) |
208 | _dswap $nlo | ||
164 | mlgr $nhi,$mn0 # np[j]*m1 | 209 | mlgr $nhi,$mn0 # np[j]*m1 |
165 | algr $nlo,$NHI | 210 | algr $nlo,$NHI |
166 | lghi $NHI,0 | 211 | lghi $NHI,0 |
@@ -168,31 +213,33 @@ bn_mul_mont: | |||
168 | algr $nlo,$alo # +="tp[j]" | 213 | algr $nlo,$alo # +="tp[j]" |
169 | alcgr $NHI,$nhi | 214 | alcgr $NHI,$nhi |
170 | 215 | ||
171 | stg $nlo,160-8($j,$sp) # tp[j-1]= | 216 | stg $nlo,$stdframe-8($j,$sp) # tp[j-1]= |
172 | la $j,8($j) # j++ | 217 | la $j,8($j) # j++ |
173 | brct $count,.Linner | 218 | brct $count,.Linner |
174 | 219 | ||
175 | algr $NHI,$AHI | 220 | algr $NHI,$AHI |
176 | lghi $AHI,0 | 221 | lghi $AHI,0 |
177 | alcgr $AHI,$AHI | 222 | alcgr $AHI,$AHI |
178 | alg $NHI,160($j,$sp)# accumulate previous upmost overflow bit | 223 | alg $NHI,$stdframe($j,$sp)# accumulate previous upmost overflow bit |
179 | lghi $ahi,0 | 224 | lghi $ahi,0 |
180 | alcgr $AHI,$ahi # new upmost overflow bit | 225 | alcgr $AHI,$ahi # new upmost overflow bit |
181 | stg $NHI,160-8($j,$sp) | 226 | stg $NHI,$stdframe-8($j,$sp) |
182 | stg $AHI,160($j,$sp) | 227 | stg $AHI,$stdframe($j,$sp) |
183 | 228 | ||
184 | la $bp,8($bp) # bp++ | 229 | la $bp,8($bp) # bp++ |
185 | clg $bp,160+8+32($j,$sp) # compare to &bp[num] | 230 | cl${g} $bp,`$stdframe+8+4*$SIZE_T`($j,$sp) # compare to &bp[num] |
186 | jne .Louter | 231 | jne .Louter |
187 | 232 | ||
188 | lg $rp,160+8+16($j,$sp) # reincarnate rp | 233 | l${g} $rp,`$stdframe+8+2*$SIZE_T`($j,$sp) # reincarnate rp |
189 | la $ap,160($sp) | 234 | la $ap,$stdframe($sp) |
190 | ahi $num,1 # restore $num, incidentally clears "borrow" | 235 | ahi $num,1 # restore $num, incidentally clears "borrow" |
191 | 236 | ||
192 | la $j,0(%r0) | 237 | la $j,0(%r0) |
193 | lr $count,$num | 238 | lr $count,$num |
194 | .Lsub: lg $alo,0($j,$ap) | 239 | .Lsub: lg $alo,0($j,$ap) |
195 | slbg $alo,0($j,$np) | 240 | lg $nlo,0($j,$np) |
241 | _dswap $nlo | ||
242 | slbgr $alo,$nlo | ||
196 | stg $alo,0($j,$rp) | 243 | stg $alo,0($j,$rp) |
197 | la $j,8($j) | 244 | la $j,8($j) |
198 | brct $count,.Lsub | 245 | brct $count,.Lsub |
@@ -207,19 +254,24 @@ bn_mul_mont: | |||
207 | 254 | ||
208 | la $j,0(%r0) | 255 | la $j,0(%r0) |
209 | lgr $count,$num | 256 | lgr $count,$num |
210 | .Lcopy: lg $alo,0($j,$ap) # copy or in-place refresh | 257 | .Lcopy: lg $alo,0($j,$ap) # copy or in-place refresh |
211 | stg $j,160($j,$sp) # zap tp | 258 | _dswap $alo |
259 | stg $j,$stdframe($j,$sp) # zap tp | ||
212 | stg $alo,0($j,$rp) | 260 | stg $alo,0($j,$rp) |
213 | la $j,8($j) | 261 | la $j,8($j) |
214 | brct $count,.Lcopy | 262 | brct $count,.Lcopy |
215 | 263 | ||
216 | la %r1,160+8+48($j,$sp) | 264 | la %r1,`$stdframe+8+6*$SIZE_T`($j,$sp) |
217 | lmg %r6,%r15,0(%r1) | 265 | lm${g} %r6,%r15,0(%r1) |
218 | lghi %r2,1 # signal "processed" | 266 | lghi %r2,1 # signal "processed" |
219 | br %r14 | 267 | br %r14 |
220 | .size bn_mul_mont,.-bn_mul_mont | 268 | .size bn_mul_mont,.-bn_mul_mont |
221 | .string "Montgomery Multiplication for s390x, CRYPTOGAMS by <appro\@openssl.org>" | 269 | .string "Montgomery Multiplication for s390x, CRYPTOGAMS by <appro\@openssl.org>" |
222 | ___ | 270 | ___ |
223 | 271 | ||
224 | print $code; | 272 | foreach (split("\n",$code)) { |
273 | s/\`([^\`]*)\`/eval $1/ge; | ||
274 | s/_dswap\s+(%r[0-9]+)/sprintf("rllg\t%s,%s,32",$1,$1) if($SIZE_T==4)/e; | ||
275 | print $_,"\n"; | ||
276 | } | ||
225 | close STDOUT; | 277 | close STDOUT; |
diff --git a/src/lib/libcrypto/bn/asm/x86-gf2m.pl b/src/lib/libcrypto/bn/asm/x86-gf2m.pl new file mode 100644 index 0000000000..808a1e5969 --- /dev/null +++ b/src/lib/libcrypto/bn/asm/x86-gf2m.pl | |||
@@ -0,0 +1,313 @@ | |||
1 | #!/usr/bin/env perl | ||
2 | # | ||
3 | # ==================================================================== | ||
4 | # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL | ||
5 | # project. The module is, however, dual licensed under OpenSSL and | ||
6 | # CRYPTOGAMS licenses depending on where you obtain it. For further | ||
7 | # details see http://www.openssl.org/~appro/cryptogams/. | ||
8 | # ==================================================================== | ||
9 | # | ||
10 | # May 2011 | ||
11 | # | ||
12 | # The module implements bn_GF2m_mul_2x2 polynomial multiplication used | ||
13 | # in bn_gf2m.c. It's kind of low-hanging mechanical port from C for | ||
14 | # the time being... Except that it has three code paths: pure integer | ||
15 | # code suitable for any x86 CPU, MMX code suitable for PIII and later | ||
16 | # and PCLMULQDQ suitable for Westmere and later. Improvement varies | ||
17 | # from one benchmark and µ-arch to another. Below are interval values | ||
18 | # for 163- and 571-bit ECDH benchmarks relative to compiler-generated | ||
19 | # code: | ||
20 | # | ||
21 | # PIII 16%-30% | ||
22 | # P4 12%-12% | ||
23 | # Opteron 18%-40% | ||
24 | # Core2 19%-44% | ||
25 | # Atom 38%-64% | ||
26 | # Westmere 53%-121%(PCLMULQDQ)/20%-32%(MMX) | ||
27 | # Sandy Bridge 72%-127%(PCLMULQDQ)/27%-23%(MMX) | ||
28 | # | ||
29 | # Note that above improvement coefficients are not coefficients for | ||
30 | # bn_GF2m_mul_2x2 itself. For example 120% ECDH improvement is result | ||
31 | # of bn_GF2m_mul_2x2 being >4x faster. As it gets faster, benchmark | ||
32 | # is more and more dominated by other subroutines, most notably by | ||
33 | # BN_GF2m_mod[_mul]_arr... | ||
34 | |||
35 | $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1; | ||
36 | push(@INC,"${dir}","${dir}../../perlasm"); | ||
37 | require "x86asm.pl"; | ||
38 | |||
39 | &asm_init($ARGV[0],$0,$x86only = $ARGV[$#ARGV] eq "386"); | ||
40 | |||
41 | $sse2=0; | ||
42 | for (@ARGV) { $sse2=1 if (/-DOPENSSL_IA32_SSE2/); } | ||
43 | |||
44 | &external_label("OPENSSL_ia32cap_P") if ($sse2); | ||
45 | |||
46 | $a="eax"; | ||
47 | $b="ebx"; | ||
48 | ($a1,$a2,$a4)=("ecx","edx","ebp"); | ||
49 | |||
50 | $R="mm0"; | ||
51 | @T=("mm1","mm2"); | ||
52 | ($A,$B,$B30,$B31)=("mm2","mm3","mm4","mm5"); | ||
53 | @i=("esi","edi"); | ||
54 | |||
55 | if (!$x86only) { | ||
56 | &function_begin_B("_mul_1x1_mmx"); | ||
57 | &sub ("esp",32+4); | ||
58 | &mov ($a1,$a); | ||
59 | &lea ($a2,&DWP(0,$a,$a)); | ||
60 | &and ($a1,0x3fffffff); | ||
61 | &lea ($a4,&DWP(0,$a2,$a2)); | ||
62 | &mov (&DWP(0*4,"esp"),0); | ||
63 | &and ($a2,0x7fffffff); | ||
64 | &movd ($A,$a); | ||
65 | &movd ($B,$b); | ||
66 | &mov (&DWP(1*4,"esp"),$a1); # a1 | ||
67 | &xor ($a1,$a2); # a1^a2 | ||
68 | &pxor ($B31,$B31); | ||
69 | &pxor ($B30,$B30); | ||
70 | &mov (&DWP(2*4,"esp"),$a2); # a2 | ||
71 | &xor ($a2,$a4); # a2^a4 | ||
72 | &mov (&DWP(3*4,"esp"),$a1); # a1^a2 | ||
73 | &pcmpgtd($B31,$A); # broadcast 31st bit | ||
74 | &paddd ($A,$A); # $A<<=1 | ||
75 | &xor ($a1,$a2); # a1^a4=a1^a2^a2^a4 | ||
76 | &mov (&DWP(4*4,"esp"),$a4); # a4 | ||
77 | &xor ($a4,$a2); # a2=a4^a2^a4 | ||
78 | &pand ($B31,$B); | ||
79 | &pcmpgtd($B30,$A); # broadcast 30th bit | ||
80 | &mov (&DWP(5*4,"esp"),$a1); # a1^a4 | ||
81 | &xor ($a4,$a1); # a1^a2^a4 | ||
82 | &psllq ($B31,31); | ||
83 | &pand ($B30,$B); | ||
84 | &mov (&DWP(6*4,"esp"),$a2); # a2^a4 | ||
85 | &mov (@i[0],0x7); | ||
86 | &mov (&DWP(7*4,"esp"),$a4); # a1^a2^a4 | ||
87 | &mov ($a4,@i[0]); | ||
88 | &and (@i[0],$b); | ||
89 | &shr ($b,3); | ||
90 | &mov (@i[1],$a4); | ||
91 | &psllq ($B30,30); | ||
92 | &and (@i[1],$b); | ||
93 | &shr ($b,3); | ||
94 | &movd ($R,&DWP(0,"esp",@i[0],4)); | ||
95 | &mov (@i[0],$a4); | ||
96 | &and (@i[0],$b); | ||
97 | &shr ($b,3); | ||
98 | for($n=1;$n<9;$n++) { | ||
99 | &movd (@T[1],&DWP(0,"esp",@i[1],4)); | ||
100 | &mov (@i[1],$a4); | ||
101 | &psllq (@T[1],3*$n); | ||
102 | &and (@i[1],$b); | ||
103 | &shr ($b,3); | ||
104 | &pxor ($R,@T[1]); | ||
105 | |||
106 | push(@i,shift(@i)); push(@T,shift(@T)); | ||
107 | } | ||
108 | &movd (@T[1],&DWP(0,"esp",@i[1],4)); | ||
109 | &pxor ($R,$B30); | ||
110 | &psllq (@T[1],3*$n++); | ||
111 | &pxor ($R,@T[1]); | ||
112 | |||
113 | &movd (@T[0],&DWP(0,"esp",@i[0],4)); | ||
114 | &pxor ($R,$B31); | ||
115 | &psllq (@T[0],3*$n); | ||
116 | &add ("esp",32+4); | ||
117 | &pxor ($R,@T[0]); | ||
118 | &ret (); | ||
119 | &function_end_B("_mul_1x1_mmx"); | ||
120 | } | ||
121 | |||
122 | ($lo,$hi)=("eax","edx"); | ||
123 | @T=("ecx","ebp"); | ||
124 | |||
125 | &function_begin_B("_mul_1x1_ialu"); | ||
126 | &sub ("esp",32+4); | ||
127 | &mov ($a1,$a); | ||
128 | &lea ($a2,&DWP(0,$a,$a)); | ||
129 | &lea ($a4,&DWP(0,"",$a,4)); | ||
130 | &and ($a1,0x3fffffff); | ||
131 | &lea (@i[1],&DWP(0,$lo,$lo)); | ||
132 | &sar ($lo,31); # broadcast 31st bit | ||
133 | &mov (&DWP(0*4,"esp"),0); | ||
134 | &and ($a2,0x7fffffff); | ||
135 | &mov (&DWP(1*4,"esp"),$a1); # a1 | ||
136 | &xor ($a1,$a2); # a1^a2 | ||
137 | &mov (&DWP(2*4,"esp"),$a2); # a2 | ||
138 | &xor ($a2,$a4); # a2^a4 | ||
139 | &mov (&DWP(3*4,"esp"),$a1); # a1^a2 | ||
140 | &xor ($a1,$a2); # a1^a4=a1^a2^a2^a4 | ||
141 | &mov (&DWP(4*4,"esp"),$a4); # a4 | ||
142 | &xor ($a4,$a2); # a2=a4^a2^a4 | ||
143 | &mov (&DWP(5*4,"esp"),$a1); # a1^a4 | ||
144 | &xor ($a4,$a1); # a1^a2^a4 | ||
145 | &sar (@i[1],31); # broardcast 30th bit | ||
146 | &and ($lo,$b); | ||
147 | &mov (&DWP(6*4,"esp"),$a2); # a2^a4 | ||
148 | &and (@i[1],$b); | ||
149 | &mov (&DWP(7*4,"esp"),$a4); # a1^a2^a4 | ||
150 | &mov ($hi,$lo); | ||
151 | &shl ($lo,31); | ||
152 | &mov (@T[0],@i[1]); | ||
153 | &shr ($hi,1); | ||
154 | |||
155 | &mov (@i[0],0x7); | ||
156 | &shl (@i[1],30); | ||
157 | &and (@i[0],$b); | ||
158 | &shr (@T[0],2); | ||
159 | &xor ($lo,@i[1]); | ||
160 | |||
161 | &shr ($b,3); | ||
162 | &mov (@i[1],0x7); # 5-byte instruction!? | ||
163 | &and (@i[1],$b); | ||
164 | &shr ($b,3); | ||
165 | &xor ($hi,@T[0]); | ||
166 | &xor ($lo,&DWP(0,"esp",@i[0],4)); | ||
167 | &mov (@i[0],0x7); | ||
168 | &and (@i[0],$b); | ||
169 | &shr ($b,3); | ||
170 | for($n=1;$n<9;$n++) { | ||
171 | &mov (@T[1],&DWP(0,"esp",@i[1],4)); | ||
172 | &mov (@i[1],0x7); | ||
173 | &mov (@T[0],@T[1]); | ||
174 | &shl (@T[1],3*$n); | ||
175 | &and (@i[1],$b); | ||
176 | &shr (@T[0],32-3*$n); | ||
177 | &xor ($lo,@T[1]); | ||
178 | &shr ($b,3); | ||
179 | &xor ($hi,@T[0]); | ||
180 | |||
181 | push(@i,shift(@i)); push(@T,shift(@T)); | ||
182 | } | ||
183 | &mov (@T[1],&DWP(0,"esp",@i[1],4)); | ||
184 | &mov (@T[0],@T[1]); | ||
185 | &shl (@T[1],3*$n); | ||
186 | &mov (@i[1],&DWP(0,"esp",@i[0],4)); | ||
187 | &shr (@T[0],32-3*$n); $n++; | ||
188 | &mov (@i[0],@i[1]); | ||
189 | &xor ($lo,@T[1]); | ||
190 | &shl (@i[1],3*$n); | ||
191 | &xor ($hi,@T[0]); | ||
192 | &shr (@i[0],32-3*$n); | ||
193 | &xor ($lo,@i[1]); | ||
194 | &xor ($hi,@i[0]); | ||
195 | |||
196 | &add ("esp",32+4); | ||
197 | &ret (); | ||
198 | &function_end_B("_mul_1x1_ialu"); | ||
199 | |||
200 | # void bn_GF2m_mul_2x2(BN_ULONG *r, BN_ULONG a1, BN_ULONG a0, BN_ULONG b1, BN_ULONG b0); | ||
201 | &function_begin_B("bn_GF2m_mul_2x2"); | ||
202 | if (!$x86only) { | ||
203 | &picmeup("edx","OPENSSL_ia32cap_P"); | ||
204 | &mov ("eax",&DWP(0,"edx")); | ||
205 | &mov ("edx",&DWP(4,"edx")); | ||
206 | &test ("eax",1<<23); # check MMX bit | ||
207 | &jz (&label("ialu")); | ||
208 | if ($sse2) { | ||
209 | &test ("eax",1<<24); # check FXSR bit | ||
210 | &jz (&label("mmx")); | ||
211 | &test ("edx",1<<1); # check PCLMULQDQ bit | ||
212 | &jz (&label("mmx")); | ||
213 | |||
214 | &movups ("xmm0",&QWP(8,"esp")); | ||
215 | &shufps ("xmm0","xmm0",0b10110001); | ||
216 | &pclmulqdq ("xmm0","xmm0",1); | ||
217 | &mov ("eax",&DWP(4,"esp")); | ||
218 | &movups (&QWP(0,"eax"),"xmm0"); | ||
219 | &ret (); | ||
220 | |||
221 | &set_label("mmx",16); | ||
222 | } | ||
223 | &push ("ebp"); | ||
224 | &push ("ebx"); | ||
225 | &push ("esi"); | ||
226 | &push ("edi"); | ||
227 | &mov ($a,&wparam(1)); | ||
228 | &mov ($b,&wparam(3)); | ||
229 | &call ("_mul_1x1_mmx"); # a1·b1 | ||
230 | &movq ("mm7",$R); | ||
231 | |||
232 | &mov ($a,&wparam(2)); | ||
233 | &mov ($b,&wparam(4)); | ||
234 | &call ("_mul_1x1_mmx"); # a0·b0 | ||
235 | &movq ("mm6",$R); | ||
236 | |||
237 | &mov ($a,&wparam(1)); | ||
238 | &mov ($b,&wparam(3)); | ||
239 | &xor ($a,&wparam(2)); | ||
240 | &xor ($b,&wparam(4)); | ||
241 | &call ("_mul_1x1_mmx"); # (a0+a1)·(b0+b1) | ||
242 | &pxor ($R,"mm7"); | ||
243 | &mov ($a,&wparam(0)); | ||
244 | &pxor ($R,"mm6"); # (a0+a1)·(b0+b1)-a1·b1-a0·b0 | ||
245 | |||
246 | &movq ($A,$R); | ||
247 | &psllq ($R,32); | ||
248 | &pop ("edi"); | ||
249 | &psrlq ($A,32); | ||
250 | &pop ("esi"); | ||
251 | &pxor ($R,"mm6"); | ||
252 | &pop ("ebx"); | ||
253 | &pxor ($A,"mm7"); | ||
254 | &movq (&QWP(0,$a),$R); | ||
255 | &pop ("ebp"); | ||
256 | &movq (&QWP(8,$a),$A); | ||
257 | &emms (); | ||
258 | &ret (); | ||
259 | &set_label("ialu",16); | ||
260 | } | ||
261 | &push ("ebp"); | ||
262 | &push ("ebx"); | ||
263 | &push ("esi"); | ||
264 | &push ("edi"); | ||
265 | &stack_push(4+1); | ||
266 | |||
267 | &mov ($a,&wparam(1)); | ||
268 | &mov ($b,&wparam(3)); | ||
269 | &call ("_mul_1x1_ialu"); # a1·b1 | ||
270 | &mov (&DWP(8,"esp"),$lo); | ||
271 | &mov (&DWP(12,"esp"),$hi); | ||
272 | |||
273 | &mov ($a,&wparam(2)); | ||
274 | &mov ($b,&wparam(4)); | ||
275 | &call ("_mul_1x1_ialu"); # a0·b0 | ||
276 | &mov (&DWP(0,"esp"),$lo); | ||
277 | &mov (&DWP(4,"esp"),$hi); | ||
278 | |||
279 | &mov ($a,&wparam(1)); | ||
280 | &mov ($b,&wparam(3)); | ||
281 | &xor ($a,&wparam(2)); | ||
282 | &xor ($b,&wparam(4)); | ||
283 | &call ("_mul_1x1_ialu"); # (a0+a1)·(b0+b1) | ||
284 | |||
285 | &mov ("ebp",&wparam(0)); | ||
286 | @r=("ebx","ecx","edi","esi"); | ||
287 | &mov (@r[0],&DWP(0,"esp")); | ||
288 | &mov (@r[1],&DWP(4,"esp")); | ||
289 | &mov (@r[2],&DWP(8,"esp")); | ||
290 | &mov (@r[3],&DWP(12,"esp")); | ||
291 | |||
292 | &xor ($lo,$hi); | ||
293 | &xor ($hi,@r[1]); | ||
294 | &xor ($lo,@r[0]); | ||
295 | &mov (&DWP(0,"ebp"),@r[0]); | ||
296 | &xor ($hi,@r[2]); | ||
297 | &mov (&DWP(12,"ebp"),@r[3]); | ||
298 | &xor ($lo,@r[3]); | ||
299 | &stack_pop(4+1); | ||
300 | &xor ($hi,@r[3]); | ||
301 | &pop ("edi"); | ||
302 | &xor ($lo,$hi); | ||
303 | &pop ("esi"); | ||
304 | &mov (&DWP(8,"ebp"),$hi); | ||
305 | &pop ("ebx"); | ||
306 | &mov (&DWP(4,"ebp"),$lo); | ||
307 | &pop ("ebp"); | ||
308 | &ret (); | ||
309 | &function_end_B("bn_GF2m_mul_2x2"); | ||
310 | |||
311 | &asciz ("GF(2^m) Multiplication for x86, CRYPTOGAMS by <appro\@openssl.org>"); | ||
312 | |||
313 | &asm_finish(); | ||
diff --git a/src/lib/libcrypto/bn/asm/x86_64-gf2m.pl b/src/lib/libcrypto/bn/asm/x86_64-gf2m.pl new file mode 100644 index 0000000000..1658acbbdd --- /dev/null +++ b/src/lib/libcrypto/bn/asm/x86_64-gf2m.pl | |||
@@ -0,0 +1,389 @@ | |||
1 | #!/usr/bin/env perl | ||
2 | # | ||
3 | # ==================================================================== | ||
4 | # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL | ||
5 | # project. The module is, however, dual licensed under OpenSSL and | ||
6 | # CRYPTOGAMS licenses depending on where you obtain it. For further | ||
7 | # details see http://www.openssl.org/~appro/cryptogams/. | ||
8 | # ==================================================================== | ||
9 | # | ||
10 | # May 2011 | ||
11 | # | ||
12 | # The module implements bn_GF2m_mul_2x2 polynomial multiplication used | ||
13 | # in bn_gf2m.c. It's kind of low-hanging mechanical port from C for | ||
14 | # the time being... Except that it has two code paths: code suitable | ||
15 | # for any x86_64 CPU and PCLMULQDQ one suitable for Westmere and | ||
16 | # later. Improvement varies from one benchmark and µ-arch to another. | ||
17 | # Vanilla code path is at most 20% faster than compiler-generated code | ||
18 | # [not very impressive], while PCLMULQDQ - whole 85%-160% better on | ||
19 | # 163- and 571-bit ECDH benchmarks on Intel CPUs. Keep in mind that | ||
20 | # these coefficients are not ones for bn_GF2m_mul_2x2 itself, as not | ||
21 | # all CPU time is burnt in it... | ||
22 | |||
23 | $flavour = shift; | ||
24 | $output = shift; | ||
25 | if ($flavour =~ /\./) { $output = $flavour; undef $flavour; } | ||
26 | |||
27 | $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/); | ||
28 | |||
29 | $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1; | ||
30 | ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or | ||
31 | ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or | ||
32 | die "can't locate x86_64-xlate.pl"; | ||
33 | |||
34 | open STDOUT,"| $^X $xlate $flavour $output"; | ||
35 | |||
36 | ($lo,$hi)=("%rax","%rdx"); $a=$lo; | ||
37 | ($i0,$i1)=("%rsi","%rdi"); | ||
38 | ($t0,$t1)=("%rbx","%rcx"); | ||
39 | ($b,$mask)=("%rbp","%r8"); | ||
40 | ($a1,$a2,$a4,$a8,$a12,$a48)=map("%r$_",(9..15)); | ||
41 | ($R,$Tx)=("%xmm0","%xmm1"); | ||
42 | |||
43 | $code.=<<___; | ||
44 | .text | ||
45 | |||
46 | .type _mul_1x1,\@abi-omnipotent | ||
47 | .align 16 | ||
48 | _mul_1x1: | ||
49 | sub \$128+8,%rsp | ||
50 | mov \$-1,$a1 | ||
51 | lea ($a,$a),$i0 | ||
52 | shr \$3,$a1 | ||
53 | lea (,$a,4),$i1 | ||
54 | and $a,$a1 # a1=a&0x1fffffffffffffff | ||
55 | lea (,$a,8),$a8 | ||
56 | sar \$63,$a # broadcast 63rd bit | ||
57 | lea ($a1,$a1),$a2 | ||
58 | sar \$63,$i0 # broadcast 62nd bit | ||
59 | lea (,$a1,4),$a4 | ||
60 | and $b,$a | ||
61 | sar \$63,$i1 # boardcast 61st bit | ||
62 | mov $a,$hi # $a is $lo | ||
63 | shl \$63,$lo | ||
64 | and $b,$i0 | ||
65 | shr \$1,$hi | ||
66 | mov $i0,$t1 | ||
67 | shl \$62,$i0 | ||
68 | and $b,$i1 | ||
69 | shr \$2,$t1 | ||
70 | xor $i0,$lo | ||
71 | mov $i1,$t0 | ||
72 | shl \$61,$i1 | ||
73 | xor $t1,$hi | ||
74 | shr \$3,$t0 | ||
75 | xor $i1,$lo | ||
76 | xor $t0,$hi | ||
77 | |||
78 | mov $a1,$a12 | ||
79 | movq \$0,0(%rsp) # tab[0]=0 | ||
80 | xor $a2,$a12 # a1^a2 | ||
81 | mov $a1,8(%rsp) # tab[1]=a1 | ||
82 | mov $a4,$a48 | ||
83 | mov $a2,16(%rsp) # tab[2]=a2 | ||
84 | xor $a8,$a48 # a4^a8 | ||
85 | mov $a12,24(%rsp) # tab[3]=a1^a2 | ||
86 | |||
87 | xor $a4,$a1 | ||
88 | mov $a4,32(%rsp) # tab[4]=a4 | ||
89 | xor $a4,$a2 | ||
90 | mov $a1,40(%rsp) # tab[5]=a1^a4 | ||
91 | xor $a4,$a12 | ||
92 | mov $a2,48(%rsp) # tab[6]=a2^a4 | ||
93 | xor $a48,$a1 # a1^a4^a4^a8=a1^a8 | ||
94 | mov $a12,56(%rsp) # tab[7]=a1^a2^a4 | ||
95 | xor $a48,$a2 # a2^a4^a4^a8=a1^a8 | ||
96 | |||
97 | mov $a8,64(%rsp) # tab[8]=a8 | ||
98 | xor $a48,$a12 # a1^a2^a4^a4^a8=a1^a2^a8 | ||
99 | mov $a1,72(%rsp) # tab[9]=a1^a8 | ||
100 | xor $a4,$a1 # a1^a8^a4 | ||
101 | mov $a2,80(%rsp) # tab[10]=a2^a8 | ||
102 | xor $a4,$a2 # a2^a8^a4 | ||
103 | mov $a12,88(%rsp) # tab[11]=a1^a2^a8 | ||
104 | |||
105 | xor $a4,$a12 # a1^a2^a8^a4 | ||
106 | mov $a48,96(%rsp) # tab[12]=a4^a8 | ||
107 | mov $mask,$i0 | ||
108 | mov $a1,104(%rsp) # tab[13]=a1^a4^a8 | ||
109 | and $b,$i0 | ||
110 | mov $a2,112(%rsp) # tab[14]=a2^a4^a8 | ||
111 | shr \$4,$b | ||
112 | mov $a12,120(%rsp) # tab[15]=a1^a2^a4^a8 | ||
113 | mov $mask,$i1 | ||
114 | and $b,$i1 | ||
115 | shr \$4,$b | ||
116 | |||
117 | movq (%rsp,$i0,8),$R # half of calculations is done in SSE2 | ||
118 | mov $mask,$i0 | ||
119 | and $b,$i0 | ||
120 | shr \$4,$b | ||
121 | ___ | ||
122 | for ($n=1;$n<8;$n++) { | ||
123 | $code.=<<___; | ||
124 | mov (%rsp,$i1,8),$t1 | ||
125 | mov $mask,$i1 | ||
126 | mov $t1,$t0 | ||
127 | shl \$`8*$n-4`,$t1 | ||
128 | and $b,$i1 | ||
129 | movq (%rsp,$i0,8),$Tx | ||
130 | shr \$`64-(8*$n-4)`,$t0 | ||
131 | xor $t1,$lo | ||
132 | pslldq \$$n,$Tx | ||
133 | mov $mask,$i0 | ||
134 | shr \$4,$b | ||
135 | xor $t0,$hi | ||
136 | and $b,$i0 | ||
137 | shr \$4,$b | ||
138 | pxor $Tx,$R | ||
139 | ___ | ||
140 | } | ||
141 | $code.=<<___; | ||
142 | mov (%rsp,$i1,8),$t1 | ||
143 | mov $t1,$t0 | ||
144 | shl \$`8*$n-4`,$t1 | ||
145 | movq $R,$i0 | ||
146 | shr \$`64-(8*$n-4)`,$t0 | ||
147 | xor $t1,$lo | ||
148 | psrldq \$8,$R | ||
149 | xor $t0,$hi | ||
150 | movq $R,$i1 | ||
151 | xor $i0,$lo | ||
152 | xor $i1,$hi | ||
153 | |||
154 | add \$128+8,%rsp | ||
155 | ret | ||
156 | .Lend_mul_1x1: | ||
157 | .size _mul_1x1,.-_mul_1x1 | ||
158 | ___ | ||
159 | |||
160 | ($rp,$a1,$a0,$b1,$b0) = $win64? ("%rcx","%rdx","%r8", "%r9","%r10") : # Win64 order | ||
161 | ("%rdi","%rsi","%rdx","%rcx","%r8"); # Unix order | ||
162 | |||
163 | $code.=<<___; | ||
164 | .extern OPENSSL_ia32cap_P | ||
165 | .globl bn_GF2m_mul_2x2 | ||
166 | .type bn_GF2m_mul_2x2,\@abi-omnipotent | ||
167 | .align 16 | ||
168 | bn_GF2m_mul_2x2: | ||
169 | mov OPENSSL_ia32cap_P(%rip),%rax | ||
170 | bt \$33,%rax | ||
171 | jnc .Lvanilla_mul_2x2 | ||
172 | |||
173 | movq $a1,%xmm0 | ||
174 | movq $b1,%xmm1 | ||
175 | movq $a0,%xmm2 | ||
176 | ___ | ||
177 | $code.=<<___ if ($win64); | ||
178 | movq 40(%rsp),%xmm3 | ||
179 | ___ | ||
180 | $code.=<<___ if (!$win64); | ||
181 | movq $b0,%xmm3 | ||
182 | ___ | ||
183 | $code.=<<___; | ||
184 | movdqa %xmm0,%xmm4 | ||
185 | movdqa %xmm1,%xmm5 | ||
186 | pclmulqdq \$0,%xmm1,%xmm0 # a1·b1 | ||
187 | pxor %xmm2,%xmm4 | ||
188 | pxor %xmm3,%xmm5 | ||
189 | pclmulqdq \$0,%xmm3,%xmm2 # a0·b0 | ||
190 | pclmulqdq \$0,%xmm5,%xmm4 # (a0+a1)·(b0+b1) | ||
191 | xorps %xmm0,%xmm4 | ||
192 | xorps %xmm2,%xmm4 # (a0+a1)·(b0+b1)-a0·b0-a1·b1 | ||
193 | movdqa %xmm4,%xmm5 | ||
194 | pslldq \$8,%xmm4 | ||
195 | psrldq \$8,%xmm5 | ||
196 | pxor %xmm4,%xmm2 | ||
197 | pxor %xmm5,%xmm0 | ||
198 | movdqu %xmm2,0($rp) | ||
199 | movdqu %xmm0,16($rp) | ||
200 | ret | ||
201 | |||
202 | .align 16 | ||
203 | .Lvanilla_mul_2x2: | ||
204 | lea -8*17(%rsp),%rsp | ||
205 | ___ | ||
206 | $code.=<<___ if ($win64); | ||
207 | mov `8*17+40`(%rsp),$b0 | ||
208 | mov %rdi,8*15(%rsp) | ||
209 | mov %rsi,8*16(%rsp) | ||
210 | ___ | ||
211 | $code.=<<___; | ||
212 | mov %r14,8*10(%rsp) | ||
213 | mov %r13,8*11(%rsp) | ||
214 | mov %r12,8*12(%rsp) | ||
215 | mov %rbp,8*13(%rsp) | ||
216 | mov %rbx,8*14(%rsp) | ||
217 | .Lbody_mul_2x2: | ||
218 | mov $rp,32(%rsp) # save the arguments | ||
219 | mov $a1,40(%rsp) | ||
220 | mov $a0,48(%rsp) | ||
221 | mov $b1,56(%rsp) | ||
222 | mov $b0,64(%rsp) | ||
223 | |||
224 | mov \$0xf,$mask | ||
225 | mov $a1,$a | ||
226 | mov $b1,$b | ||
227 | call _mul_1x1 # a1·b1 | ||
228 | mov $lo,16(%rsp) | ||
229 | mov $hi,24(%rsp) | ||
230 | |||
231 | mov 48(%rsp),$a | ||
232 | mov 64(%rsp),$b | ||
233 | call _mul_1x1 # a0·b0 | ||
234 | mov $lo,0(%rsp) | ||
235 | mov $hi,8(%rsp) | ||
236 | |||
237 | mov 40(%rsp),$a | ||
238 | mov 56(%rsp),$b | ||
239 | xor 48(%rsp),$a | ||
240 | xor 64(%rsp),$b | ||
241 | call _mul_1x1 # (a0+a1)·(b0+b1) | ||
242 | ___ | ||
243 | @r=("%rbx","%rcx","%rdi","%rsi"); | ||
244 | $code.=<<___; | ||
245 | mov 0(%rsp),@r[0] | ||
246 | mov 8(%rsp),@r[1] | ||
247 | mov 16(%rsp),@r[2] | ||
248 | mov 24(%rsp),@r[3] | ||
249 | mov 32(%rsp),%rbp | ||
250 | |||
251 | xor $hi,$lo | ||
252 | xor @r[1],$hi | ||
253 | xor @r[0],$lo | ||
254 | mov @r[0],0(%rbp) | ||
255 | xor @r[2],$hi | ||
256 | mov @r[3],24(%rbp) | ||
257 | xor @r[3],$lo | ||
258 | xor @r[3],$hi | ||
259 | xor $hi,$lo | ||
260 | mov $hi,16(%rbp) | ||
261 | mov $lo,8(%rbp) | ||
262 | |||
263 | mov 8*10(%rsp),%r14 | ||
264 | mov 8*11(%rsp),%r13 | ||
265 | mov 8*12(%rsp),%r12 | ||
266 | mov 8*13(%rsp),%rbp | ||
267 | mov 8*14(%rsp),%rbx | ||
268 | ___ | ||
269 | $code.=<<___ if ($win64); | ||
270 | mov 8*15(%rsp),%rdi | ||
271 | mov 8*16(%rsp),%rsi | ||
272 | ___ | ||
273 | $code.=<<___; | ||
274 | lea 8*17(%rsp),%rsp | ||
275 | ret | ||
276 | .Lend_mul_2x2: | ||
277 | .size bn_GF2m_mul_2x2,.-bn_GF2m_mul_2x2 | ||
278 | .asciz "GF(2^m) Multiplication for x86_64, CRYPTOGAMS by <appro\@openssl.org>" | ||
279 | .align 16 | ||
280 | ___ | ||
281 | |||
282 | # EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame, | ||
283 | # CONTEXT *context,DISPATCHER_CONTEXT *disp) | ||
284 | if ($win64) { | ||
285 | $rec="%rcx"; | ||
286 | $frame="%rdx"; | ||
287 | $context="%r8"; | ||
288 | $disp="%r9"; | ||
289 | |||
290 | $code.=<<___; | ||
291 | .extern __imp_RtlVirtualUnwind | ||
292 | |||
293 | .type se_handler,\@abi-omnipotent | ||
294 | .align 16 | ||
295 | se_handler: | ||
296 | push %rsi | ||
297 | push %rdi | ||
298 | push %rbx | ||
299 | push %rbp | ||
300 | push %r12 | ||
301 | push %r13 | ||
302 | push %r14 | ||
303 | push %r15 | ||
304 | pushfq | ||
305 | sub \$64,%rsp | ||
306 | |||
307 | mov 152($context),%rax # pull context->Rsp | ||
308 | mov 248($context),%rbx # pull context->Rip | ||
309 | |||
310 | lea .Lbody_mul_2x2(%rip),%r10 | ||
311 | cmp %r10,%rbx # context->Rip<"prologue" label | ||
312 | jb .Lin_prologue | ||
313 | |||
314 | mov 8*10(%rax),%r14 # mimic epilogue | ||
315 | mov 8*11(%rax),%r13 | ||
316 | mov 8*12(%rax),%r12 | ||
317 | mov 8*13(%rax),%rbp | ||
318 | mov 8*14(%rax),%rbx | ||
319 | mov 8*15(%rax),%rdi | ||
320 | mov 8*16(%rax),%rsi | ||
321 | |||
322 | mov %rbx,144($context) # restore context->Rbx | ||
323 | mov %rbp,160($context) # restore context->Rbp | ||
324 | mov %rsi,168($context) # restore context->Rsi | ||
325 | mov %rdi,176($context) # restore context->Rdi | ||
326 | mov %r12,216($context) # restore context->R12 | ||
327 | mov %r13,224($context) # restore context->R13 | ||
328 | mov %r14,232($context) # restore context->R14 | ||
329 | |||
330 | .Lin_prologue: | ||
331 | lea 8*17(%rax),%rax | ||
332 | mov %rax,152($context) # restore context->Rsp | ||
333 | |||
334 | mov 40($disp),%rdi # disp->ContextRecord | ||
335 | mov $context,%rsi # context | ||
336 | mov \$154,%ecx # sizeof(CONTEXT) | ||
337 | .long 0xa548f3fc # cld; rep movsq | ||
338 | |||
339 | mov $disp,%rsi | ||
340 | xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER | ||
341 | mov 8(%rsi),%rdx # arg2, disp->ImageBase | ||
342 | mov 0(%rsi),%r8 # arg3, disp->ControlPc | ||
343 | mov 16(%rsi),%r9 # arg4, disp->FunctionEntry | ||
344 | mov 40(%rsi),%r10 # disp->ContextRecord | ||
345 | lea 56(%rsi),%r11 # &disp->HandlerData | ||
346 | lea 24(%rsi),%r12 # &disp->EstablisherFrame | ||
347 | mov %r10,32(%rsp) # arg5 | ||
348 | mov %r11,40(%rsp) # arg6 | ||
349 | mov %r12,48(%rsp) # arg7 | ||
350 | mov %rcx,56(%rsp) # arg8, (NULL) | ||
351 | call *__imp_RtlVirtualUnwind(%rip) | ||
352 | |||
353 | mov \$1,%eax # ExceptionContinueSearch | ||
354 | add \$64,%rsp | ||
355 | popfq | ||
356 | pop %r15 | ||
357 | pop %r14 | ||
358 | pop %r13 | ||
359 | pop %r12 | ||
360 | pop %rbp | ||
361 | pop %rbx | ||
362 | pop %rdi | ||
363 | pop %rsi | ||
364 | ret | ||
365 | .size se_handler,.-se_handler | ||
366 | |||
367 | .section .pdata | ||
368 | .align 4 | ||
369 | .rva _mul_1x1 | ||
370 | .rva .Lend_mul_1x1 | ||
371 | .rva .LSEH_info_1x1 | ||
372 | |||
373 | .rva .Lvanilla_mul_2x2 | ||
374 | .rva .Lend_mul_2x2 | ||
375 | .rva .LSEH_info_2x2 | ||
376 | .section .xdata | ||
377 | .align 8 | ||
378 | .LSEH_info_1x1: | ||
379 | .byte 0x01,0x07,0x02,0x00 | ||
380 | .byte 0x07,0x01,0x11,0x00 # sub rsp,128+8 | ||
381 | .LSEH_info_2x2: | ||
382 | .byte 9,0,0,0 | ||
383 | .rva se_handler | ||
384 | ___ | ||
385 | } | ||
386 | |||
387 | $code =~ s/\`([^\`]*)\`/eval($1)/gem; | ||
388 | print $code; | ||
389 | close STDOUT; | ||
diff --git a/src/lib/libcrypto/bn/asm/x86_64-mont.pl b/src/lib/libcrypto/bn/asm/x86_64-mont.pl index 3b7a6f243f..5d79b35e1c 100755 --- a/src/lib/libcrypto/bn/asm/x86_64-mont.pl +++ b/src/lib/libcrypto/bn/asm/x86_64-mont.pl | |||
@@ -1,7 +1,7 @@ | |||
1 | #!/usr/bin/env perl | 1 | #!/usr/bin/env perl |
2 | 2 | ||
3 | # ==================================================================== | 3 | # ==================================================================== |
4 | # Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL | 4 | # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL |
5 | # project. The module is, however, dual licensed under OpenSSL and | 5 | # project. The module is, however, dual licensed under OpenSSL and |
6 | # CRYPTOGAMS licenses depending on where you obtain it. For further | 6 | # CRYPTOGAMS licenses depending on where you obtain it. For further |
7 | # details see http://www.openssl.org/~appro/cryptogams/. | 7 | # details see http://www.openssl.org/~appro/cryptogams/. |
@@ -15,6 +15,20 @@ | |||
15 | # respectful 50%. It remains to be seen if loop unrolling and | 15 | # respectful 50%. It remains to be seen if loop unrolling and |
16 | # dedicated squaring routine can provide further improvement... | 16 | # dedicated squaring routine can provide further improvement... |
17 | 17 | ||
18 | # July 2011. | ||
19 | # | ||
20 | # Add dedicated squaring procedure. Performance improvement varies | ||
21 | # from platform to platform, but in average it's ~5%/15%/25%/33% | ||
22 | # for 512-/1024-/2048-/4096-bit RSA *sign* benchmarks respectively. | ||
23 | |||
24 | # August 2011. | ||
25 | # | ||
26 | # Unroll and modulo-schedule inner loops in such manner that they | ||
27 | # are "fallen through" for input lengths of 8, which is critical for | ||
28 | # 1024-bit RSA *sign*. Average performance improvement in comparison | ||
29 | # to *initial* version of this module from 2005 is ~0%/30%/40%/45% | ||
30 | # for 512-/1024-/2048-/4096-bit RSA *sign* benchmarks respectively. | ||
31 | |||
18 | $flavour = shift; | 32 | $flavour = shift; |
19 | $output = shift; | 33 | $output = shift; |
20 | if ($flavour =~ /\./) { $output = $flavour; undef $flavour; } | 34 | if ($flavour =~ /\./) { $output = $flavour; undef $flavour; } |
@@ -37,7 +51,6 @@ $n0="%r8"; # const BN_ULONG *n0, | |||
37 | $num="%r9"; # int num); | 51 | $num="%r9"; # int num); |
38 | $lo0="%r10"; | 52 | $lo0="%r10"; |
39 | $hi0="%r11"; | 53 | $hi0="%r11"; |
40 | $bp="%r12"; # reassign $bp | ||
41 | $hi1="%r13"; | 54 | $hi1="%r13"; |
42 | $i="%r14"; | 55 | $i="%r14"; |
43 | $j="%r15"; | 56 | $j="%r15"; |
@@ -51,6 +64,16 @@ $code=<<___; | |||
51 | .type bn_mul_mont,\@function,6 | 64 | .type bn_mul_mont,\@function,6 |
52 | .align 16 | 65 | .align 16 |
53 | bn_mul_mont: | 66 | bn_mul_mont: |
67 | test \$3,${num}d | ||
68 | jnz .Lmul_enter | ||
69 | cmp \$8,${num}d | ||
70 | jb .Lmul_enter | ||
71 | cmp $ap,$bp | ||
72 | jne .Lmul4x_enter | ||
73 | jmp .Lsqr4x_enter | ||
74 | |||
75 | .align 16 | ||
76 | .Lmul_enter: | ||
54 | push %rbx | 77 | push %rbx |
55 | push %rbp | 78 | push %rbp |
56 | push %r12 | 79 | push %r12 |
@@ -66,48 +89,66 @@ bn_mul_mont: | |||
66 | and \$-1024,%rsp # minimize TLB usage | 89 | and \$-1024,%rsp # minimize TLB usage |
67 | 90 | ||
68 | mov %r11,8(%rsp,$num,8) # tp[num+1]=%rsp | 91 | mov %r11,8(%rsp,$num,8) # tp[num+1]=%rsp |
69 | .Lprologue: | 92 | .Lmul_body: |
70 | mov %rdx,$bp # $bp reassigned, remember? | 93 | mov $bp,%r12 # reassign $bp |
71 | 94 | ___ | |
95 | $bp="%r12"; | ||
96 | $code.=<<___; | ||
72 | mov ($n0),$n0 # pull n0[0] value | 97 | mov ($n0),$n0 # pull n0[0] value |
98 | mov ($bp),$m0 # m0=bp[0] | ||
99 | mov ($ap),%rax | ||
73 | 100 | ||
74 | xor $i,$i # i=0 | 101 | xor $i,$i # i=0 |
75 | xor $j,$j # j=0 | 102 | xor $j,$j # j=0 |
76 | 103 | ||
77 | mov ($bp),$m0 # m0=bp[0] | 104 | mov $n0,$m1 |
78 | mov ($ap),%rax | ||
79 | mulq $m0 # ap[0]*bp[0] | 105 | mulq $m0 # ap[0]*bp[0] |
80 | mov %rax,$lo0 | 106 | mov %rax,$lo0 |
81 | mov %rdx,$hi0 | 107 | mov ($np),%rax |
82 | 108 | ||
83 | imulq $n0,%rax # "tp[0]"*n0 | 109 | imulq $lo0,$m1 # "tp[0]"*n0 |
84 | mov %rax,$m1 | 110 | mov %rdx,$hi0 |
85 | 111 | ||
86 | mulq ($np) # np[0]*m1 | 112 | mulq $m1 # np[0]*m1 |
87 | add $lo0,%rax # discarded | 113 | add %rax,$lo0 # discarded |
114 | mov 8($ap),%rax | ||
88 | adc \$0,%rdx | 115 | adc \$0,%rdx |
89 | mov %rdx,$hi1 | 116 | mov %rdx,$hi1 |
90 | 117 | ||
91 | lea 1($j),$j # j++ | 118 | lea 1($j),$j # j++ |
119 | jmp .L1st_enter | ||
120 | |||
121 | .align 16 | ||
92 | .L1st: | 122 | .L1st: |
123 | add %rax,$hi1 | ||
93 | mov ($ap,$j,8),%rax | 124 | mov ($ap,$j,8),%rax |
94 | mulq $m0 # ap[j]*bp[0] | ||
95 | add $hi0,%rax | ||
96 | adc \$0,%rdx | 125 | adc \$0,%rdx |
97 | mov %rax,$lo0 | 126 | add $hi0,$hi1 # np[j]*m1+ap[j]*bp[0] |
127 | mov $lo0,$hi0 | ||
128 | adc \$0,%rdx | ||
129 | mov $hi1,-16(%rsp,$j,8) # tp[j-1] | ||
130 | mov %rdx,$hi1 | ||
131 | |||
132 | .L1st_enter: | ||
133 | mulq $m0 # ap[j]*bp[0] | ||
134 | add %rax,$hi0 | ||
98 | mov ($np,$j,8),%rax | 135 | mov ($np,$j,8),%rax |
99 | mov %rdx,$hi0 | 136 | adc \$0,%rdx |
137 | lea 1($j),$j # j++ | ||
138 | mov %rdx,$lo0 | ||
100 | 139 | ||
101 | mulq $m1 # np[j]*m1 | 140 | mulq $m1 # np[j]*m1 |
102 | add $hi1,%rax | 141 | cmp $num,$j |
103 | lea 1($j),$j # j++ | 142 | jne .L1st |
143 | |||
144 | add %rax,$hi1 | ||
145 | mov ($ap),%rax # ap[0] | ||
104 | adc \$0,%rdx | 146 | adc \$0,%rdx |
105 | add $lo0,%rax # np[j]*m1+ap[j]*bp[0] | 147 | add $hi0,$hi1 # np[j]*m1+ap[j]*bp[0] |
106 | adc \$0,%rdx | 148 | adc \$0,%rdx |
107 | mov %rax,-16(%rsp,$j,8) # tp[j-1] | 149 | mov $hi1,-16(%rsp,$j,8) # tp[j-1] |
108 | cmp $num,$j | ||
109 | mov %rdx,$hi1 | 150 | mov %rdx,$hi1 |
110 | jl .L1st | 151 | mov $lo0,$hi0 |
111 | 152 | ||
112 | xor %rdx,%rdx | 153 | xor %rdx,%rdx |
113 | add $hi0,$hi1 | 154 | add $hi0,$hi1 |
@@ -116,50 +157,64 @@ bn_mul_mont: | |||
116 | mov %rdx,(%rsp,$num,8) # store upmost overflow bit | 157 | mov %rdx,(%rsp,$num,8) # store upmost overflow bit |
117 | 158 | ||
118 | lea 1($i),$i # i++ | 159 | lea 1($i),$i # i++ |
119 | .align 4 | 160 | jmp .Louter |
161 | .align 16 | ||
120 | .Louter: | 162 | .Louter: |
121 | xor $j,$j # j=0 | ||
122 | |||
123 | mov ($bp,$i,8),$m0 # m0=bp[i] | 163 | mov ($bp,$i,8),$m0 # m0=bp[i] |
124 | mov ($ap),%rax # ap[0] | 164 | xor $j,$j # j=0 |
165 | mov $n0,$m1 | ||
166 | mov (%rsp),$lo0 | ||
125 | mulq $m0 # ap[0]*bp[i] | 167 | mulq $m0 # ap[0]*bp[i] |
126 | add (%rsp),%rax # ap[0]*bp[i]+tp[0] | 168 | add %rax,$lo0 # ap[0]*bp[i]+tp[0] |
169 | mov ($np),%rax | ||
127 | adc \$0,%rdx | 170 | adc \$0,%rdx |
128 | mov %rax,$lo0 | ||
129 | mov %rdx,$hi0 | ||
130 | 171 | ||
131 | imulq $n0,%rax # tp[0]*n0 | 172 | imulq $lo0,$m1 # tp[0]*n0 |
132 | mov %rax,$m1 | 173 | mov %rdx,$hi0 |
133 | 174 | ||
134 | mulq ($np,$j,8) # np[0]*m1 | 175 | mulq $m1 # np[0]*m1 |
135 | add $lo0,%rax # discarded | 176 | add %rax,$lo0 # discarded |
136 | mov 8(%rsp),$lo0 # tp[1] | 177 | mov 8($ap),%rax |
137 | adc \$0,%rdx | 178 | adc \$0,%rdx |
179 | mov 8(%rsp),$lo0 # tp[1] | ||
138 | mov %rdx,$hi1 | 180 | mov %rdx,$hi1 |
139 | 181 | ||
140 | lea 1($j),$j # j++ | 182 | lea 1($j),$j # j++ |
141 | .align 4 | 183 | jmp .Linner_enter |
184 | |||
185 | .align 16 | ||
142 | .Linner: | 186 | .Linner: |
187 | add %rax,$hi1 | ||
143 | mov ($ap,$j,8),%rax | 188 | mov ($ap,$j,8),%rax |
144 | mulq $m0 # ap[j]*bp[i] | ||
145 | add $hi0,%rax | ||
146 | adc \$0,%rdx | 189 | adc \$0,%rdx |
147 | add %rax,$lo0 # ap[j]*bp[i]+tp[j] | 190 | add $lo0,$hi1 # np[j]*m1+ap[j]*bp[i]+tp[j] |
191 | mov (%rsp,$j,8),$lo0 | ||
192 | adc \$0,%rdx | ||
193 | mov $hi1,-16(%rsp,$j,8) # tp[j-1] | ||
194 | mov %rdx,$hi1 | ||
195 | |||
196 | .Linner_enter: | ||
197 | mulq $m0 # ap[j]*bp[i] | ||
198 | add %rax,$hi0 | ||
148 | mov ($np,$j,8),%rax | 199 | mov ($np,$j,8),%rax |
149 | adc \$0,%rdx | 200 | adc \$0,%rdx |
201 | add $hi0,$lo0 # ap[j]*bp[i]+tp[j] | ||
150 | mov %rdx,$hi0 | 202 | mov %rdx,$hi0 |
203 | adc \$0,$hi0 | ||
204 | lea 1($j),$j # j++ | ||
151 | 205 | ||
152 | mulq $m1 # np[j]*m1 | 206 | mulq $m1 # np[j]*m1 |
153 | add $hi1,%rax | 207 | cmp $num,$j |
154 | lea 1($j),$j # j++ | 208 | jne .Linner |
155 | adc \$0,%rdx | 209 | |
156 | add $lo0,%rax # np[j]*m1+ap[j]*bp[i]+tp[j] | 210 | add %rax,$hi1 |
211 | mov ($ap),%rax # ap[0] | ||
157 | adc \$0,%rdx | 212 | adc \$0,%rdx |
213 | add $lo0,$hi1 # np[j]*m1+ap[j]*bp[i]+tp[j] | ||
158 | mov (%rsp,$j,8),$lo0 | 214 | mov (%rsp,$j,8),$lo0 |
159 | cmp $num,$j | 215 | adc \$0,%rdx |
160 | mov %rax,-16(%rsp,$j,8) # tp[j-1] | 216 | mov $hi1,-16(%rsp,$j,8) # tp[j-1] |
161 | mov %rdx,$hi1 | 217 | mov %rdx,$hi1 |
162 | jl .Linner | ||
163 | 218 | ||
164 | xor %rdx,%rdx | 219 | xor %rdx,%rdx |
165 | add $hi0,$hi1 | 220 | add $hi0,$hi1 |
@@ -173,35 +228,449 @@ bn_mul_mont: | |||
173 | cmp $num,$i | 228 | cmp $num,$i |
174 | jl .Louter | 229 | jl .Louter |
175 | 230 | ||
176 | lea (%rsp),$ap # borrow ap for tp | ||
177 | lea -1($num),$j # j=num-1 | ||
178 | |||
179 | mov ($ap),%rax # tp[0] | ||
180 | xor $i,$i # i=0 and clear CF! | 231 | xor $i,$i # i=0 and clear CF! |
232 | mov (%rsp),%rax # tp[0] | ||
233 | lea (%rsp),$ap # borrow ap for tp | ||
234 | mov $num,$j # j=num | ||
181 | jmp .Lsub | 235 | jmp .Lsub |
182 | .align 16 | 236 | .align 16 |
183 | .Lsub: sbb ($np,$i,8),%rax | 237 | .Lsub: sbb ($np,$i,8),%rax |
184 | mov %rax,($rp,$i,8) # rp[i]=tp[i]-np[i] | 238 | mov %rax,($rp,$i,8) # rp[i]=tp[i]-np[i] |
185 | dec $j # doesn't affect CF! | ||
186 | mov 8($ap,$i,8),%rax # tp[i+1] | 239 | mov 8($ap,$i,8),%rax # tp[i+1] |
187 | lea 1($i),$i # i++ | 240 | lea 1($i),$i # i++ |
188 | jge .Lsub | 241 | dec $j # doesnn't affect CF! |
242 | jnz .Lsub | ||
189 | 243 | ||
190 | sbb \$0,%rax # handle upmost overflow bit | 244 | sbb \$0,%rax # handle upmost overflow bit |
245 | xor $i,$i | ||
191 | and %rax,$ap | 246 | and %rax,$ap |
192 | not %rax | 247 | not %rax |
193 | mov $rp,$np | 248 | mov $rp,$np |
194 | and %rax,$np | 249 | and %rax,$np |
195 | lea -1($num),$j | 250 | mov $num,$j # j=num |
196 | or $np,$ap # ap=borrow?tp:rp | 251 | or $np,$ap # ap=borrow?tp:rp |
197 | .align 16 | 252 | .align 16 |
198 | .Lcopy: # copy or in-place refresh | 253 | .Lcopy: # copy or in-place refresh |
254 | mov ($ap,$i,8),%rax | ||
255 | mov $i,(%rsp,$i,8) # zap temporary vector | ||
256 | mov %rax,($rp,$i,8) # rp[i]=tp[i] | ||
257 | lea 1($i),$i | ||
258 | sub \$1,$j | ||
259 | jnz .Lcopy | ||
260 | |||
261 | mov 8(%rsp,$num,8),%rsi # restore %rsp | ||
262 | mov \$1,%rax | ||
263 | mov (%rsi),%r15 | ||
264 | mov 8(%rsi),%r14 | ||
265 | mov 16(%rsi),%r13 | ||
266 | mov 24(%rsi),%r12 | ||
267 | mov 32(%rsi),%rbp | ||
268 | mov 40(%rsi),%rbx | ||
269 | lea 48(%rsi),%rsp | ||
270 | .Lmul_epilogue: | ||
271 | ret | ||
272 | .size bn_mul_mont,.-bn_mul_mont | ||
273 | ___ | ||
274 | {{{ | ||
275 | my @A=("%r10","%r11"); | ||
276 | my @N=("%r13","%rdi"); | ||
277 | $code.=<<___; | ||
278 | .type bn_mul4x_mont,\@function,6 | ||
279 | .align 16 | ||
280 | bn_mul4x_mont: | ||
281 | .Lmul4x_enter: | ||
282 | push %rbx | ||
283 | push %rbp | ||
284 | push %r12 | ||
285 | push %r13 | ||
286 | push %r14 | ||
287 | push %r15 | ||
288 | |||
289 | mov ${num}d,${num}d | ||
290 | lea 4($num),%r10 | ||
291 | mov %rsp,%r11 | ||
292 | neg %r10 | ||
293 | lea (%rsp,%r10,8),%rsp # tp=alloca(8*(num+4)) | ||
294 | and \$-1024,%rsp # minimize TLB usage | ||
295 | |||
296 | mov %r11,8(%rsp,$num,8) # tp[num+1]=%rsp | ||
297 | .Lmul4x_body: | ||
298 | mov $rp,16(%rsp,$num,8) # tp[num+2]=$rp | ||
299 | mov %rdx,%r12 # reassign $bp | ||
300 | ___ | ||
301 | $bp="%r12"; | ||
302 | $code.=<<___; | ||
303 | mov ($n0),$n0 # pull n0[0] value | ||
304 | mov ($bp),$m0 # m0=bp[0] | ||
305 | mov ($ap),%rax | ||
306 | |||
307 | xor $i,$i # i=0 | ||
308 | xor $j,$j # j=0 | ||
309 | |||
310 | mov $n0,$m1 | ||
311 | mulq $m0 # ap[0]*bp[0] | ||
312 | mov %rax,$A[0] | ||
313 | mov ($np),%rax | ||
314 | |||
315 | imulq $A[0],$m1 # "tp[0]"*n0 | ||
316 | mov %rdx,$A[1] | ||
317 | |||
318 | mulq $m1 # np[0]*m1 | ||
319 | add %rax,$A[0] # discarded | ||
320 | mov 8($ap),%rax | ||
321 | adc \$0,%rdx | ||
322 | mov %rdx,$N[1] | ||
323 | |||
324 | mulq $m0 | ||
325 | add %rax,$A[1] | ||
326 | mov 8($np),%rax | ||
327 | adc \$0,%rdx | ||
328 | mov %rdx,$A[0] | ||
329 | |||
330 | mulq $m1 | ||
331 | add %rax,$N[1] | ||
332 | mov 16($ap),%rax | ||
333 | adc \$0,%rdx | ||
334 | add $A[1],$N[1] | ||
335 | lea 4($j),$j # j++ | ||
336 | adc \$0,%rdx | ||
337 | mov $N[1],(%rsp) | ||
338 | mov %rdx,$N[0] | ||
339 | jmp .L1st4x | ||
340 | .align 16 | ||
341 | .L1st4x: | ||
342 | mulq $m0 # ap[j]*bp[0] | ||
343 | add %rax,$A[0] | ||
344 | mov -16($np,$j,8),%rax | ||
345 | adc \$0,%rdx | ||
346 | mov %rdx,$A[1] | ||
347 | |||
348 | mulq $m1 # np[j]*m1 | ||
349 | add %rax,$N[0] | ||
350 | mov -8($ap,$j,8),%rax | ||
351 | adc \$0,%rdx | ||
352 | add $A[0],$N[0] # np[j]*m1+ap[j]*bp[0] | ||
353 | adc \$0,%rdx | ||
354 | mov $N[0],-24(%rsp,$j,8) # tp[j-1] | ||
355 | mov %rdx,$N[1] | ||
356 | |||
357 | mulq $m0 # ap[j]*bp[0] | ||
358 | add %rax,$A[1] | ||
359 | mov -8($np,$j,8),%rax | ||
360 | adc \$0,%rdx | ||
361 | mov %rdx,$A[0] | ||
362 | |||
363 | mulq $m1 # np[j]*m1 | ||
364 | add %rax,$N[1] | ||
199 | mov ($ap,$j,8),%rax | 365 | mov ($ap,$j,8),%rax |
200 | mov %rax,($rp,$j,8) # rp[i]=tp[i] | 366 | adc \$0,%rdx |
201 | mov $i,(%rsp,$j,8) # zap temporary vector | 367 | add $A[1],$N[1] # np[j]*m1+ap[j]*bp[0] |
368 | adc \$0,%rdx | ||
369 | mov $N[1],-16(%rsp,$j,8) # tp[j-1] | ||
370 | mov %rdx,$N[0] | ||
371 | |||
372 | mulq $m0 # ap[j]*bp[0] | ||
373 | add %rax,$A[0] | ||
374 | mov ($np,$j,8),%rax | ||
375 | adc \$0,%rdx | ||
376 | mov %rdx,$A[1] | ||
377 | |||
378 | mulq $m1 # np[j]*m1 | ||
379 | add %rax,$N[0] | ||
380 | mov 8($ap,$j,8),%rax | ||
381 | adc \$0,%rdx | ||
382 | add $A[0],$N[0] # np[j]*m1+ap[j]*bp[0] | ||
383 | adc \$0,%rdx | ||
384 | mov $N[0],-8(%rsp,$j,8) # tp[j-1] | ||
385 | mov %rdx,$N[1] | ||
386 | |||
387 | mulq $m0 # ap[j]*bp[0] | ||
388 | add %rax,$A[1] | ||
389 | mov 8($np,$j,8),%rax | ||
390 | adc \$0,%rdx | ||
391 | lea 4($j),$j # j++ | ||
392 | mov %rdx,$A[0] | ||
393 | |||
394 | mulq $m1 # np[j]*m1 | ||
395 | add %rax,$N[1] | ||
396 | mov -16($ap,$j,8),%rax | ||
397 | adc \$0,%rdx | ||
398 | add $A[1],$N[1] # np[j]*m1+ap[j]*bp[0] | ||
399 | adc \$0,%rdx | ||
400 | mov $N[1],-32(%rsp,$j,8) # tp[j-1] | ||
401 | mov %rdx,$N[0] | ||
402 | cmp $num,$j | ||
403 | jl .L1st4x | ||
404 | |||
405 | mulq $m0 # ap[j]*bp[0] | ||
406 | add %rax,$A[0] | ||
407 | mov -16($np,$j,8),%rax | ||
408 | adc \$0,%rdx | ||
409 | mov %rdx,$A[1] | ||
410 | |||
411 | mulq $m1 # np[j]*m1 | ||
412 | add %rax,$N[0] | ||
413 | mov -8($ap,$j,8),%rax | ||
414 | adc \$0,%rdx | ||
415 | add $A[0],$N[0] # np[j]*m1+ap[j]*bp[0] | ||
416 | adc \$0,%rdx | ||
417 | mov $N[0],-24(%rsp,$j,8) # tp[j-1] | ||
418 | mov %rdx,$N[1] | ||
419 | |||
420 | mulq $m0 # ap[j]*bp[0] | ||
421 | add %rax,$A[1] | ||
422 | mov -8($np,$j,8),%rax | ||
423 | adc \$0,%rdx | ||
424 | mov %rdx,$A[0] | ||
425 | |||
426 | mulq $m1 # np[j]*m1 | ||
427 | add %rax,$N[1] | ||
428 | mov ($ap),%rax # ap[0] | ||
429 | adc \$0,%rdx | ||
430 | add $A[1],$N[1] # np[j]*m1+ap[j]*bp[0] | ||
431 | adc \$0,%rdx | ||
432 | mov $N[1],-16(%rsp,$j,8) # tp[j-1] | ||
433 | mov %rdx,$N[0] | ||
434 | |||
435 | xor $N[1],$N[1] | ||
436 | add $A[0],$N[0] | ||
437 | adc \$0,$N[1] | ||
438 | mov $N[0],-8(%rsp,$j,8) | ||
439 | mov $N[1],(%rsp,$j,8) # store upmost overflow bit | ||
440 | |||
441 | lea 1($i),$i # i++ | ||
442 | .align 4 | ||
443 | .Louter4x: | ||
444 | mov ($bp,$i,8),$m0 # m0=bp[i] | ||
445 | xor $j,$j # j=0 | ||
446 | mov (%rsp),$A[0] | ||
447 | mov $n0,$m1 | ||
448 | mulq $m0 # ap[0]*bp[i] | ||
449 | add %rax,$A[0] # ap[0]*bp[i]+tp[0] | ||
450 | mov ($np),%rax | ||
451 | adc \$0,%rdx | ||
452 | |||
453 | imulq $A[0],$m1 # tp[0]*n0 | ||
454 | mov %rdx,$A[1] | ||
455 | |||
456 | mulq $m1 # np[0]*m1 | ||
457 | add %rax,$A[0] # "$N[0]", discarded | ||
458 | mov 8($ap),%rax | ||
459 | adc \$0,%rdx | ||
460 | mov %rdx,$N[1] | ||
461 | |||
462 | mulq $m0 # ap[j]*bp[i] | ||
463 | add %rax,$A[1] | ||
464 | mov 8($np),%rax | ||
465 | adc \$0,%rdx | ||
466 | add 8(%rsp),$A[1] # +tp[1] | ||
467 | adc \$0,%rdx | ||
468 | mov %rdx,$A[0] | ||
469 | |||
470 | mulq $m1 # np[j]*m1 | ||
471 | add %rax,$N[1] | ||
472 | mov 16($ap),%rax | ||
473 | adc \$0,%rdx | ||
474 | add $A[1],$N[1] # np[j]*m1+ap[j]*bp[i]+tp[j] | ||
475 | lea 4($j),$j # j+=2 | ||
476 | adc \$0,%rdx | ||
477 | mov $N[1],(%rsp) # tp[j-1] | ||
478 | mov %rdx,$N[0] | ||
479 | jmp .Linner4x | ||
480 | .align 16 | ||
481 | .Linner4x: | ||
482 | mulq $m0 # ap[j]*bp[i] | ||
483 | add %rax,$A[0] | ||
484 | mov -16($np,$j,8),%rax | ||
485 | adc \$0,%rdx | ||
486 | add -16(%rsp,$j,8),$A[0] # ap[j]*bp[i]+tp[j] | ||
487 | adc \$0,%rdx | ||
488 | mov %rdx,$A[1] | ||
489 | |||
490 | mulq $m1 # np[j]*m1 | ||
491 | add %rax,$N[0] | ||
492 | mov -8($ap,$j,8),%rax | ||
493 | adc \$0,%rdx | ||
494 | add $A[0],$N[0] | ||
495 | adc \$0,%rdx | ||
496 | mov $N[0],-24(%rsp,$j,8) # tp[j-1] | ||
497 | mov %rdx,$N[1] | ||
498 | |||
499 | mulq $m0 # ap[j]*bp[i] | ||
500 | add %rax,$A[1] | ||
501 | mov -8($np,$j,8),%rax | ||
502 | adc \$0,%rdx | ||
503 | add -8(%rsp,$j,8),$A[1] | ||
504 | adc \$0,%rdx | ||
505 | mov %rdx,$A[0] | ||
506 | |||
507 | mulq $m1 # np[j]*m1 | ||
508 | add %rax,$N[1] | ||
509 | mov ($ap,$j,8),%rax | ||
510 | adc \$0,%rdx | ||
511 | add $A[1],$N[1] | ||
512 | adc \$0,%rdx | ||
513 | mov $N[1],-16(%rsp,$j,8) # tp[j-1] | ||
514 | mov %rdx,$N[0] | ||
515 | |||
516 | mulq $m0 # ap[j]*bp[i] | ||
517 | add %rax,$A[0] | ||
518 | mov ($np,$j,8),%rax | ||
519 | adc \$0,%rdx | ||
520 | add (%rsp,$j,8),$A[0] # ap[j]*bp[i]+tp[j] | ||
521 | adc \$0,%rdx | ||
522 | mov %rdx,$A[1] | ||
523 | |||
524 | mulq $m1 # np[j]*m1 | ||
525 | add %rax,$N[0] | ||
526 | mov 8($ap,$j,8),%rax | ||
527 | adc \$0,%rdx | ||
528 | add $A[0],$N[0] | ||
529 | adc \$0,%rdx | ||
530 | mov $N[0],-8(%rsp,$j,8) # tp[j-1] | ||
531 | mov %rdx,$N[1] | ||
532 | |||
533 | mulq $m0 # ap[j]*bp[i] | ||
534 | add %rax,$A[1] | ||
535 | mov 8($np,$j,8),%rax | ||
536 | adc \$0,%rdx | ||
537 | add 8(%rsp,$j,8),$A[1] | ||
538 | adc \$0,%rdx | ||
539 | lea 4($j),$j # j++ | ||
540 | mov %rdx,$A[0] | ||
541 | |||
542 | mulq $m1 # np[j]*m1 | ||
543 | add %rax,$N[1] | ||
544 | mov -16($ap,$j,8),%rax | ||
545 | adc \$0,%rdx | ||
546 | add $A[1],$N[1] | ||
547 | adc \$0,%rdx | ||
548 | mov $N[1],-32(%rsp,$j,8) # tp[j-1] | ||
549 | mov %rdx,$N[0] | ||
550 | cmp $num,$j | ||
551 | jl .Linner4x | ||
552 | |||
553 | mulq $m0 # ap[j]*bp[i] | ||
554 | add %rax,$A[0] | ||
555 | mov -16($np,$j,8),%rax | ||
556 | adc \$0,%rdx | ||
557 | add -16(%rsp,$j,8),$A[0] # ap[j]*bp[i]+tp[j] | ||
558 | adc \$0,%rdx | ||
559 | mov %rdx,$A[1] | ||
560 | |||
561 | mulq $m1 # np[j]*m1 | ||
562 | add %rax,$N[0] | ||
563 | mov -8($ap,$j,8),%rax | ||
564 | adc \$0,%rdx | ||
565 | add $A[0],$N[0] | ||
566 | adc \$0,%rdx | ||
567 | mov $N[0],-24(%rsp,$j,8) # tp[j-1] | ||
568 | mov %rdx,$N[1] | ||
569 | |||
570 | mulq $m0 # ap[j]*bp[i] | ||
571 | add %rax,$A[1] | ||
572 | mov -8($np,$j,8),%rax | ||
573 | adc \$0,%rdx | ||
574 | add -8(%rsp,$j,8),$A[1] | ||
575 | adc \$0,%rdx | ||
576 | lea 1($i),$i # i++ | ||
577 | mov %rdx,$A[0] | ||
578 | |||
579 | mulq $m1 # np[j]*m1 | ||
580 | add %rax,$N[1] | ||
581 | mov ($ap),%rax # ap[0] | ||
582 | adc \$0,%rdx | ||
583 | add $A[1],$N[1] | ||
584 | adc \$0,%rdx | ||
585 | mov $N[1],-16(%rsp,$j,8) # tp[j-1] | ||
586 | mov %rdx,$N[0] | ||
587 | |||
588 | xor $N[1],$N[1] | ||
589 | add $A[0],$N[0] | ||
590 | adc \$0,$N[1] | ||
591 | add (%rsp,$num,8),$N[0] # pull upmost overflow bit | ||
592 | adc \$0,$N[1] | ||
593 | mov $N[0],-8(%rsp,$j,8) | ||
594 | mov $N[1],(%rsp,$j,8) # store upmost overflow bit | ||
595 | |||
596 | cmp $num,$i | ||
597 | jl .Louter4x | ||
598 | ___ | ||
599 | { | ||
600 | my @ri=("%rax","%rdx",$m0,$m1); | ||
601 | $code.=<<___; | ||
602 | mov 16(%rsp,$num,8),$rp # restore $rp | ||
603 | mov 0(%rsp),@ri[0] # tp[0] | ||
604 | pxor %xmm0,%xmm0 | ||
605 | mov 8(%rsp),@ri[1] # tp[1] | ||
606 | shr \$2,$num # num/=4 | ||
607 | lea (%rsp),$ap # borrow ap for tp | ||
608 | xor $i,$i # i=0 and clear CF! | ||
609 | |||
610 | sub 0($np),@ri[0] | ||
611 | mov 16($ap),@ri[2] # tp[2] | ||
612 | mov 24($ap),@ri[3] # tp[3] | ||
613 | sbb 8($np),@ri[1] | ||
614 | lea -1($num),$j # j=num/4-1 | ||
615 | jmp .Lsub4x | ||
616 | .align 16 | ||
617 | .Lsub4x: | ||
618 | mov @ri[0],0($rp,$i,8) # rp[i]=tp[i]-np[i] | ||
619 | mov @ri[1],8($rp,$i,8) # rp[i]=tp[i]-np[i] | ||
620 | sbb 16($np,$i,8),@ri[2] | ||
621 | mov 32($ap,$i,8),@ri[0] # tp[i+1] | ||
622 | mov 40($ap,$i,8),@ri[1] | ||
623 | sbb 24($np,$i,8),@ri[3] | ||
624 | mov @ri[2],16($rp,$i,8) # rp[i]=tp[i]-np[i] | ||
625 | mov @ri[3],24($rp,$i,8) # rp[i]=tp[i]-np[i] | ||
626 | sbb 32($np,$i,8),@ri[0] | ||
627 | mov 48($ap,$i,8),@ri[2] | ||
628 | mov 56($ap,$i,8),@ri[3] | ||
629 | sbb 40($np,$i,8),@ri[1] | ||
630 | lea 4($i),$i # i++ | ||
631 | dec $j # doesnn't affect CF! | ||
632 | jnz .Lsub4x | ||
633 | |||
634 | mov @ri[0],0($rp,$i,8) # rp[i]=tp[i]-np[i] | ||
635 | mov 32($ap,$i,8),@ri[0] # load overflow bit | ||
636 | sbb 16($np,$i,8),@ri[2] | ||
637 | mov @ri[1],8($rp,$i,8) # rp[i]=tp[i]-np[i] | ||
638 | sbb 24($np,$i,8),@ri[3] | ||
639 | mov @ri[2],16($rp,$i,8) # rp[i]=tp[i]-np[i] | ||
640 | |||
641 | sbb \$0,@ri[0] # handle upmost overflow bit | ||
642 | mov @ri[3],24($rp,$i,8) # rp[i]=tp[i]-np[i] | ||
643 | xor $i,$i # i=0 | ||
644 | and @ri[0],$ap | ||
645 | not @ri[0] | ||
646 | mov $rp,$np | ||
647 | and @ri[0],$np | ||
648 | lea -1($num),$j | ||
649 | or $np,$ap # ap=borrow?tp:rp | ||
650 | |||
651 | movdqu ($ap),%xmm1 | ||
652 | movdqa %xmm0,(%rsp) | ||
653 | movdqu %xmm1,($rp) | ||
654 | jmp .Lcopy4x | ||
655 | .align 16 | ||
656 | .Lcopy4x: # copy or in-place refresh | ||
657 | movdqu 16($ap,$i),%xmm2 | ||
658 | movdqu 32($ap,$i),%xmm1 | ||
659 | movdqa %xmm0,16(%rsp,$i) | ||
660 | movdqu %xmm2,16($rp,$i) | ||
661 | movdqa %xmm0,32(%rsp,$i) | ||
662 | movdqu %xmm1,32($rp,$i) | ||
663 | lea 32($i),$i | ||
202 | dec $j | 664 | dec $j |
203 | jge .Lcopy | 665 | jnz .Lcopy4x |
204 | 666 | ||
667 | shl \$2,$num | ||
668 | movdqu 16($ap,$i),%xmm2 | ||
669 | movdqa %xmm0,16(%rsp,$i) | ||
670 | movdqu %xmm2,16($rp,$i) | ||
671 | ___ | ||
672 | } | ||
673 | $code.=<<___; | ||
205 | mov 8(%rsp,$num,8),%rsi # restore %rsp | 674 | mov 8(%rsp,$num,8),%rsi # restore %rsp |
206 | mov \$1,%rax | 675 | mov \$1,%rax |
207 | mov (%rsi),%r15 | 676 | mov (%rsi),%r15 |
@@ -211,9 +680,823 @@ bn_mul_mont: | |||
211 | mov 32(%rsi),%rbp | 680 | mov 32(%rsi),%rbp |
212 | mov 40(%rsi),%rbx | 681 | mov 40(%rsi),%rbx |
213 | lea 48(%rsi),%rsp | 682 | lea 48(%rsi),%rsp |
214 | .Lepilogue: | 683 | .Lmul4x_epilogue: |
215 | ret | 684 | ret |
216 | .size bn_mul_mont,.-bn_mul_mont | 685 | .size bn_mul4x_mont,.-bn_mul4x_mont |
686 | ___ | ||
687 | }}} | ||
688 | {{{ | ||
689 | ###################################################################### | ||
690 | # void bn_sqr4x_mont( | ||
691 | my $rptr="%rdi"; # const BN_ULONG *rptr, | ||
692 | my $aptr="%rsi"; # const BN_ULONG *aptr, | ||
693 | my $bptr="%rdx"; # not used | ||
694 | my $nptr="%rcx"; # const BN_ULONG *nptr, | ||
695 | my $n0 ="%r8"; # const BN_ULONG *n0); | ||
696 | my $num ="%r9"; # int num, has to be divisible by 4 and | ||
697 | # not less than 8 | ||
698 | |||
699 | my ($i,$j,$tptr)=("%rbp","%rcx",$rptr); | ||
700 | my @A0=("%r10","%r11"); | ||
701 | my @A1=("%r12","%r13"); | ||
702 | my ($a0,$a1,$ai)=("%r14","%r15","%rbx"); | ||
703 | |||
704 | $code.=<<___; | ||
705 | .type bn_sqr4x_mont,\@function,6 | ||
706 | .align 16 | ||
707 | bn_sqr4x_mont: | ||
708 | .Lsqr4x_enter: | ||
709 | push %rbx | ||
710 | push %rbp | ||
711 | push %r12 | ||
712 | push %r13 | ||
713 | push %r14 | ||
714 | push %r15 | ||
715 | |||
716 | shl \$3,${num}d # convert $num to bytes | ||
717 | xor %r10,%r10 | ||
718 | mov %rsp,%r11 # put aside %rsp | ||
719 | sub $num,%r10 # -$num | ||
720 | mov ($n0),$n0 # *n0 | ||
721 | lea -72(%rsp,%r10,2),%rsp # alloca(frame+2*$num) | ||
722 | and \$-1024,%rsp # minimize TLB usage | ||
723 | ############################################################## | ||
724 | # Stack layout | ||
725 | # | ||
726 | # +0 saved $num, used in reduction section | ||
727 | # +8 &t[2*$num], used in reduction section | ||
728 | # +32 saved $rptr | ||
729 | # +40 saved $nptr | ||
730 | # +48 saved *n0 | ||
731 | # +56 saved %rsp | ||
732 | # +64 t[2*$num] | ||
733 | # | ||
734 | mov $rptr,32(%rsp) # save $rptr | ||
735 | mov $nptr,40(%rsp) | ||
736 | mov $n0, 48(%rsp) | ||
737 | mov %r11, 56(%rsp) # save original %rsp | ||
738 | .Lsqr4x_body: | ||
739 | ############################################################## | ||
740 | # Squaring part: | ||
741 | # | ||
742 | # a) multiply-n-add everything but a[i]*a[i]; | ||
743 | # b) shift result of a) by 1 to the left and accumulate | ||
744 | # a[i]*a[i] products; | ||
745 | # | ||
746 | lea 32(%r10),$i # $i=-($num-32) | ||
747 | lea ($aptr,$num),$aptr # end of a[] buffer, ($aptr,$i)=&ap[2] | ||
748 | |||
749 | mov $num,$j # $j=$num | ||
750 | |||
751 | # comments apply to $num==8 case | ||
752 | mov -32($aptr,$i),$a0 # a[0] | ||
753 | lea 64(%rsp,$num,2),$tptr # end of tp[] buffer, &tp[2*$num] | ||
754 | mov -24($aptr,$i),%rax # a[1] | ||
755 | lea -32($tptr,$i),$tptr # end of tp[] window, &tp[2*$num-"$i"] | ||
756 | mov -16($aptr,$i),$ai # a[2] | ||
757 | mov %rax,$a1 | ||
758 | |||
759 | mul $a0 # a[1]*a[0] | ||
760 | mov %rax,$A0[0] # a[1]*a[0] | ||
761 | mov $ai,%rax # a[2] | ||
762 | mov %rdx,$A0[1] | ||
763 | mov $A0[0],-24($tptr,$i) # t[1] | ||
764 | |||
765 | xor $A0[0],$A0[0] | ||
766 | mul $a0 # a[2]*a[0] | ||
767 | add %rax,$A0[1] | ||
768 | mov $ai,%rax | ||
769 | adc %rdx,$A0[0] | ||
770 | mov $A0[1],-16($tptr,$i) # t[2] | ||
771 | |||
772 | lea -16($i),$j # j=-16 | ||
773 | |||
774 | |||
775 | mov 8($aptr,$j),$ai # a[3] | ||
776 | mul $a1 # a[2]*a[1] | ||
777 | mov %rax,$A1[0] # a[2]*a[1]+t[3] | ||
778 | mov $ai,%rax | ||
779 | mov %rdx,$A1[1] | ||
780 | |||
781 | xor $A0[1],$A0[1] | ||
782 | add $A1[0],$A0[0] | ||
783 | lea 16($j),$j | ||
784 | adc \$0,$A0[1] | ||
785 | mul $a0 # a[3]*a[0] | ||
786 | add %rax,$A0[0] # a[3]*a[0]+a[2]*a[1]+t[3] | ||
787 | mov $ai,%rax | ||
788 | adc %rdx,$A0[1] | ||
789 | mov $A0[0],-8($tptr,$j) # t[3] | ||
790 | jmp .Lsqr4x_1st | ||
791 | |||
792 | .align 16 | ||
793 | .Lsqr4x_1st: | ||
794 | mov ($aptr,$j),$ai # a[4] | ||
795 | xor $A1[0],$A1[0] | ||
796 | mul $a1 # a[3]*a[1] | ||
797 | add %rax,$A1[1] # a[3]*a[1]+t[4] | ||
798 | mov $ai,%rax | ||
799 | adc %rdx,$A1[0] | ||
800 | |||
801 | xor $A0[0],$A0[0] | ||
802 | add $A1[1],$A0[1] | ||
803 | adc \$0,$A0[0] | ||
804 | mul $a0 # a[4]*a[0] | ||
805 | add %rax,$A0[1] # a[4]*a[0]+a[3]*a[1]+t[4] | ||
806 | mov $ai,%rax # a[3] | ||
807 | adc %rdx,$A0[0] | ||
808 | mov $A0[1],($tptr,$j) # t[4] | ||
809 | |||
810 | |||
811 | mov 8($aptr,$j),$ai # a[5] | ||
812 | xor $A1[1],$A1[1] | ||
813 | mul $a1 # a[4]*a[3] | ||
814 | add %rax,$A1[0] # a[4]*a[3]+t[5] | ||
815 | mov $ai,%rax | ||
816 | adc %rdx,$A1[1] | ||
817 | |||
818 | xor $A0[1],$A0[1] | ||
819 | add $A1[0],$A0[0] | ||
820 | adc \$0,$A0[1] | ||
821 | mul $a0 # a[5]*a[2] | ||
822 | add %rax,$A0[0] # a[5]*a[2]+a[4]*a[3]+t[5] | ||
823 | mov $ai,%rax | ||
824 | adc %rdx,$A0[1] | ||
825 | mov $A0[0],8($tptr,$j) # t[5] | ||
826 | |||
827 | mov 16($aptr,$j),$ai # a[6] | ||
828 | xor $A1[0],$A1[0] | ||
829 | mul $a1 # a[5]*a[3] | ||
830 | add %rax,$A1[1] # a[5]*a[3]+t[6] | ||
831 | mov $ai,%rax | ||
832 | adc %rdx,$A1[0] | ||
833 | |||
834 | xor $A0[0],$A0[0] | ||
835 | add $A1[1],$A0[1] | ||
836 | adc \$0,$A0[0] | ||
837 | mul $a0 # a[6]*a[2] | ||
838 | add %rax,$A0[1] # a[6]*a[2]+a[5]*a[3]+t[6] | ||
839 | mov $ai,%rax # a[3] | ||
840 | adc %rdx,$A0[0] | ||
841 | mov $A0[1],16($tptr,$j) # t[6] | ||
842 | |||
843 | |||
844 | mov 24($aptr,$j),$ai # a[7] | ||
845 | xor $A1[1],$A1[1] | ||
846 | mul $a1 # a[6]*a[5] | ||
847 | add %rax,$A1[0] # a[6]*a[5]+t[7] | ||
848 | mov $ai,%rax | ||
849 | adc %rdx,$A1[1] | ||
850 | |||
851 | xor $A0[1],$A0[1] | ||
852 | add $A1[0],$A0[0] | ||
853 | lea 32($j),$j | ||
854 | adc \$0,$A0[1] | ||
855 | mul $a0 # a[7]*a[4] | ||
856 | add %rax,$A0[0] # a[7]*a[4]+a[6]*a[5]+t[6] | ||
857 | mov $ai,%rax | ||
858 | adc %rdx,$A0[1] | ||
859 | mov $A0[0],-8($tptr,$j) # t[7] | ||
860 | |||
861 | cmp \$0,$j | ||
862 | jne .Lsqr4x_1st | ||
863 | |||
864 | xor $A1[0],$A1[0] | ||
865 | add $A0[1],$A1[1] | ||
866 | adc \$0,$A1[0] | ||
867 | mul $a1 # a[7]*a[5] | ||
868 | add %rax,$A1[1] | ||
869 | adc %rdx,$A1[0] | ||
870 | |||
871 | mov $A1[1],($tptr) # t[8] | ||
872 | lea 16($i),$i | ||
873 | mov $A1[0],8($tptr) # t[9] | ||
874 | jmp .Lsqr4x_outer | ||
875 | |||
876 | .align 16 | ||
877 | .Lsqr4x_outer: # comments apply to $num==6 case | ||
878 | mov -32($aptr,$i),$a0 # a[0] | ||
879 | lea 64(%rsp,$num,2),$tptr # end of tp[] buffer, &tp[2*$num] | ||
880 | mov -24($aptr,$i),%rax # a[1] | ||
881 | lea -32($tptr,$i),$tptr # end of tp[] window, &tp[2*$num-"$i"] | ||
882 | mov -16($aptr,$i),$ai # a[2] | ||
883 | mov %rax,$a1 | ||
884 | |||
885 | mov -24($tptr,$i),$A0[0] # t[1] | ||
886 | xor $A0[1],$A0[1] | ||
887 | mul $a0 # a[1]*a[0] | ||
888 | add %rax,$A0[0] # a[1]*a[0]+t[1] | ||
889 | mov $ai,%rax # a[2] | ||
890 | adc %rdx,$A0[1] | ||
891 | mov $A0[0],-24($tptr,$i) # t[1] | ||
892 | |||
893 | xor $A0[0],$A0[0] | ||
894 | add -16($tptr,$i),$A0[1] # a[2]*a[0]+t[2] | ||
895 | adc \$0,$A0[0] | ||
896 | mul $a0 # a[2]*a[0] | ||
897 | add %rax,$A0[1] | ||
898 | mov $ai,%rax | ||
899 | adc %rdx,$A0[0] | ||
900 | mov $A0[1],-16($tptr,$i) # t[2] | ||
901 | |||
902 | lea -16($i),$j # j=-16 | ||
903 | xor $A1[0],$A1[0] | ||
904 | |||
905 | |||
906 | mov 8($aptr,$j),$ai # a[3] | ||
907 | xor $A1[1],$A1[1] | ||
908 | add 8($tptr,$j),$A1[0] | ||
909 | adc \$0,$A1[1] | ||
910 | mul $a1 # a[2]*a[1] | ||
911 | add %rax,$A1[0] # a[2]*a[1]+t[3] | ||
912 | mov $ai,%rax | ||
913 | adc %rdx,$A1[1] | ||
914 | |||
915 | xor $A0[1],$A0[1] | ||
916 | add $A1[0],$A0[0] | ||
917 | adc \$0,$A0[1] | ||
918 | mul $a0 # a[3]*a[0] | ||
919 | add %rax,$A0[0] # a[3]*a[0]+a[2]*a[1]+t[3] | ||
920 | mov $ai,%rax | ||
921 | adc %rdx,$A0[1] | ||
922 | mov $A0[0],8($tptr,$j) # t[3] | ||
923 | |||
924 | lea 16($j),$j | ||
925 | jmp .Lsqr4x_inner | ||
926 | |||
927 | .align 16 | ||
928 | .Lsqr4x_inner: | ||
929 | mov ($aptr,$j),$ai # a[4] | ||
930 | xor $A1[0],$A1[0] | ||
931 | add ($tptr,$j),$A1[1] | ||
932 | adc \$0,$A1[0] | ||
933 | mul $a1 # a[3]*a[1] | ||
934 | add %rax,$A1[1] # a[3]*a[1]+t[4] | ||
935 | mov $ai,%rax | ||
936 | adc %rdx,$A1[0] | ||
937 | |||
938 | xor $A0[0],$A0[0] | ||
939 | add $A1[1],$A0[1] | ||
940 | adc \$0,$A0[0] | ||
941 | mul $a0 # a[4]*a[0] | ||
942 | add %rax,$A0[1] # a[4]*a[0]+a[3]*a[1]+t[4] | ||
943 | mov $ai,%rax # a[3] | ||
944 | adc %rdx,$A0[0] | ||
945 | mov $A0[1],($tptr,$j) # t[4] | ||
946 | |||
947 | mov 8($aptr,$j),$ai # a[5] | ||
948 | xor $A1[1],$A1[1] | ||
949 | add 8($tptr,$j),$A1[0] | ||
950 | adc \$0,$A1[1] | ||
951 | mul $a1 # a[4]*a[3] | ||
952 | add %rax,$A1[0] # a[4]*a[3]+t[5] | ||
953 | mov $ai,%rax | ||
954 | adc %rdx,$A1[1] | ||
955 | |||
956 | xor $A0[1],$A0[1] | ||
957 | add $A1[0],$A0[0] | ||
958 | lea 16($j),$j # j++ | ||
959 | adc \$0,$A0[1] | ||
960 | mul $a0 # a[5]*a[2] | ||
961 | add %rax,$A0[0] # a[5]*a[2]+a[4]*a[3]+t[5] | ||
962 | mov $ai,%rax | ||
963 | adc %rdx,$A0[1] | ||
964 | mov $A0[0],-8($tptr,$j) # t[5], "preloaded t[1]" below | ||
965 | |||
966 | cmp \$0,$j | ||
967 | jne .Lsqr4x_inner | ||
968 | |||
969 | xor $A1[0],$A1[0] | ||
970 | add $A0[1],$A1[1] | ||
971 | adc \$0,$A1[0] | ||
972 | mul $a1 # a[5]*a[3] | ||
973 | add %rax,$A1[1] | ||
974 | adc %rdx,$A1[0] | ||
975 | |||
976 | mov $A1[1],($tptr) # t[6], "preloaded t[2]" below | ||
977 | mov $A1[0],8($tptr) # t[7], "preloaded t[3]" below | ||
978 | |||
979 | add \$16,$i | ||
980 | jnz .Lsqr4x_outer | ||
981 | |||
982 | # comments apply to $num==4 case | ||
983 | mov -32($aptr),$a0 # a[0] | ||
984 | lea 64(%rsp,$num,2),$tptr # end of tp[] buffer, &tp[2*$num] | ||
985 | mov -24($aptr),%rax # a[1] | ||
986 | lea -32($tptr,$i),$tptr # end of tp[] window, &tp[2*$num-"$i"] | ||
987 | mov -16($aptr),$ai # a[2] | ||
988 | mov %rax,$a1 | ||
989 | |||
990 | xor $A0[1],$A0[1] | ||
991 | mul $a0 # a[1]*a[0] | ||
992 | add %rax,$A0[0] # a[1]*a[0]+t[1], preloaded t[1] | ||
993 | mov $ai,%rax # a[2] | ||
994 | adc %rdx,$A0[1] | ||
995 | mov $A0[0],-24($tptr) # t[1] | ||
996 | |||
997 | xor $A0[0],$A0[0] | ||
998 | add $A1[1],$A0[1] # a[2]*a[0]+t[2], preloaded t[2] | ||
999 | adc \$0,$A0[0] | ||
1000 | mul $a0 # a[2]*a[0] | ||
1001 | add %rax,$A0[1] | ||
1002 | mov $ai,%rax | ||
1003 | adc %rdx,$A0[0] | ||
1004 | mov $A0[1],-16($tptr) # t[2] | ||
1005 | |||
1006 | mov -8($aptr),$ai # a[3] | ||
1007 | mul $a1 # a[2]*a[1] | ||
1008 | add %rax,$A1[0] # a[2]*a[1]+t[3], preloaded t[3] | ||
1009 | mov $ai,%rax | ||
1010 | adc \$0,%rdx | ||
1011 | |||
1012 | xor $A0[1],$A0[1] | ||
1013 | add $A1[0],$A0[0] | ||
1014 | mov %rdx,$A1[1] | ||
1015 | adc \$0,$A0[1] | ||
1016 | mul $a0 # a[3]*a[0] | ||
1017 | add %rax,$A0[0] # a[3]*a[0]+a[2]*a[1]+t[3] | ||
1018 | mov $ai,%rax | ||
1019 | adc %rdx,$A0[1] | ||
1020 | mov $A0[0],-8($tptr) # t[3] | ||
1021 | |||
1022 | xor $A1[0],$A1[0] | ||
1023 | add $A0[1],$A1[1] | ||
1024 | adc \$0,$A1[0] | ||
1025 | mul $a1 # a[3]*a[1] | ||
1026 | add %rax,$A1[1] | ||
1027 | mov -16($aptr),%rax # a[2] | ||
1028 | adc %rdx,$A1[0] | ||
1029 | |||
1030 | mov $A1[1],($tptr) # t[4] | ||
1031 | mov $A1[0],8($tptr) # t[5] | ||
1032 | |||
1033 | mul $ai # a[2]*a[3] | ||
1034 | ___ | ||
1035 | { | ||
1036 | my ($shift,$carry)=($a0,$a1); | ||
1037 | my @S=(@A1,$ai,$n0); | ||
1038 | $code.=<<___; | ||
1039 | add \$16,$i | ||
1040 | xor $shift,$shift | ||
1041 | sub $num,$i # $i=16-$num | ||
1042 | xor $carry,$carry | ||
1043 | |||
1044 | add $A1[0],%rax # t[5] | ||
1045 | adc \$0,%rdx | ||
1046 | mov %rax,8($tptr) # t[5] | ||
1047 | mov %rdx,16($tptr) # t[6] | ||
1048 | mov $carry,24($tptr) # t[7] | ||
1049 | |||
1050 | mov -16($aptr,$i),%rax # a[0] | ||
1051 | lea 64(%rsp,$num,2),$tptr | ||
1052 | xor $A0[0],$A0[0] # t[0] | ||
1053 | mov -24($tptr,$i,2),$A0[1] # t[1] | ||
1054 | |||
1055 | lea ($shift,$A0[0],2),$S[0] # t[2*i]<<1 | shift | ||
1056 | shr \$63,$A0[0] | ||
1057 | lea ($j,$A0[1],2),$S[1] # t[2*i+1]<<1 | | ||
1058 | shr \$63,$A0[1] | ||
1059 | or $A0[0],$S[1] # | t[2*i]>>63 | ||
1060 | mov -16($tptr,$i,2),$A0[0] # t[2*i+2] # prefetch | ||
1061 | mov $A0[1],$shift # shift=t[2*i+1]>>63 | ||
1062 | mul %rax # a[i]*a[i] | ||
1063 | neg $carry # mov $carry,cf | ||
1064 | mov -8($tptr,$i,2),$A0[1] # t[2*i+2+1] # prefetch | ||
1065 | adc %rax,$S[0] | ||
1066 | mov -8($aptr,$i),%rax # a[i+1] # prefetch | ||
1067 | mov $S[0],-32($tptr,$i,2) | ||
1068 | adc %rdx,$S[1] | ||
1069 | |||
1070 | lea ($shift,$A0[0],2),$S[2] # t[2*i]<<1 | shift | ||
1071 | mov $S[1],-24($tptr,$i,2) | ||
1072 | sbb $carry,$carry # mov cf,$carry | ||
1073 | shr \$63,$A0[0] | ||
1074 | lea ($j,$A0[1],2),$S[3] # t[2*i+1]<<1 | | ||
1075 | shr \$63,$A0[1] | ||
1076 | or $A0[0],$S[3] # | t[2*i]>>63 | ||
1077 | mov 0($tptr,$i,2),$A0[0] # t[2*i+2] # prefetch | ||
1078 | mov $A0[1],$shift # shift=t[2*i+1]>>63 | ||
1079 | mul %rax # a[i]*a[i] | ||
1080 | neg $carry # mov $carry,cf | ||
1081 | mov 8($tptr,$i,2),$A0[1] # t[2*i+2+1] # prefetch | ||
1082 | adc %rax,$S[2] | ||
1083 | mov 0($aptr,$i),%rax # a[i+1] # prefetch | ||
1084 | mov $S[2],-16($tptr,$i,2) | ||
1085 | adc %rdx,$S[3] | ||
1086 | lea 16($i),$i | ||
1087 | mov $S[3],-40($tptr,$i,2) | ||
1088 | sbb $carry,$carry # mov cf,$carry | ||
1089 | jmp .Lsqr4x_shift_n_add | ||
1090 | |||
1091 | .align 16 | ||
1092 | .Lsqr4x_shift_n_add: | ||
1093 | lea ($shift,$A0[0],2),$S[0] # t[2*i]<<1 | shift | ||
1094 | shr \$63,$A0[0] | ||
1095 | lea ($j,$A0[1],2),$S[1] # t[2*i+1]<<1 | | ||
1096 | shr \$63,$A0[1] | ||
1097 | or $A0[0],$S[1] # | t[2*i]>>63 | ||
1098 | mov -16($tptr,$i,2),$A0[0] # t[2*i+2] # prefetch | ||
1099 | mov $A0[1],$shift # shift=t[2*i+1]>>63 | ||
1100 | mul %rax # a[i]*a[i] | ||
1101 | neg $carry # mov $carry,cf | ||
1102 | mov -8($tptr,$i,2),$A0[1] # t[2*i+2+1] # prefetch | ||
1103 | adc %rax,$S[0] | ||
1104 | mov -8($aptr,$i),%rax # a[i+1] # prefetch | ||
1105 | mov $S[0],-32($tptr,$i,2) | ||
1106 | adc %rdx,$S[1] | ||
1107 | |||
1108 | lea ($shift,$A0[0],2),$S[2] # t[2*i]<<1 | shift | ||
1109 | mov $S[1],-24($tptr,$i,2) | ||
1110 | sbb $carry,$carry # mov cf,$carry | ||
1111 | shr \$63,$A0[0] | ||
1112 | lea ($j,$A0[1],2),$S[3] # t[2*i+1]<<1 | | ||
1113 | shr \$63,$A0[1] | ||
1114 | or $A0[0],$S[3] # | t[2*i]>>63 | ||
1115 | mov 0($tptr,$i,2),$A0[0] # t[2*i+2] # prefetch | ||
1116 | mov $A0[1],$shift # shift=t[2*i+1]>>63 | ||
1117 | mul %rax # a[i]*a[i] | ||
1118 | neg $carry # mov $carry,cf | ||
1119 | mov 8($tptr,$i,2),$A0[1] # t[2*i+2+1] # prefetch | ||
1120 | adc %rax,$S[2] | ||
1121 | mov 0($aptr,$i),%rax # a[i+1] # prefetch | ||
1122 | mov $S[2],-16($tptr,$i,2) | ||
1123 | adc %rdx,$S[3] | ||
1124 | |||
1125 | lea ($shift,$A0[0],2),$S[0] # t[2*i]<<1 | shift | ||
1126 | mov $S[3],-8($tptr,$i,2) | ||
1127 | sbb $carry,$carry # mov cf,$carry | ||
1128 | shr \$63,$A0[0] | ||
1129 | lea ($j,$A0[1],2),$S[1] # t[2*i+1]<<1 | | ||
1130 | shr \$63,$A0[1] | ||
1131 | or $A0[0],$S[1] # | t[2*i]>>63 | ||
1132 | mov 16($tptr,$i,2),$A0[0] # t[2*i+2] # prefetch | ||
1133 | mov $A0[1],$shift # shift=t[2*i+1]>>63 | ||
1134 | mul %rax # a[i]*a[i] | ||
1135 | neg $carry # mov $carry,cf | ||
1136 | mov 24($tptr,$i,2),$A0[1] # t[2*i+2+1] # prefetch | ||
1137 | adc %rax,$S[0] | ||
1138 | mov 8($aptr,$i),%rax # a[i+1] # prefetch | ||
1139 | mov $S[0],0($tptr,$i,2) | ||
1140 | adc %rdx,$S[1] | ||
1141 | |||
1142 | lea ($shift,$A0[0],2),$S[2] # t[2*i]<<1 | shift | ||
1143 | mov $S[1],8($tptr,$i,2) | ||
1144 | sbb $carry,$carry # mov cf,$carry | ||
1145 | shr \$63,$A0[0] | ||
1146 | lea ($j,$A0[1],2),$S[3] # t[2*i+1]<<1 | | ||
1147 | shr \$63,$A0[1] | ||
1148 | or $A0[0],$S[3] # | t[2*i]>>63 | ||
1149 | mov 32($tptr,$i,2),$A0[0] # t[2*i+2] # prefetch | ||
1150 | mov $A0[1],$shift # shift=t[2*i+1]>>63 | ||
1151 | mul %rax # a[i]*a[i] | ||
1152 | neg $carry # mov $carry,cf | ||
1153 | mov 40($tptr,$i,2),$A0[1] # t[2*i+2+1] # prefetch | ||
1154 | adc %rax,$S[2] | ||
1155 | mov 16($aptr,$i),%rax # a[i+1] # prefetch | ||
1156 | mov $S[2],16($tptr,$i,2) | ||
1157 | adc %rdx,$S[3] | ||
1158 | mov $S[3],24($tptr,$i,2) | ||
1159 | sbb $carry,$carry # mov cf,$carry | ||
1160 | add \$32,$i | ||
1161 | jnz .Lsqr4x_shift_n_add | ||
1162 | |||
1163 | lea ($shift,$A0[0],2),$S[0] # t[2*i]<<1 | shift | ||
1164 | shr \$63,$A0[0] | ||
1165 | lea ($j,$A0[1],2),$S[1] # t[2*i+1]<<1 | | ||
1166 | shr \$63,$A0[1] | ||
1167 | or $A0[0],$S[1] # | t[2*i]>>63 | ||
1168 | mov -16($tptr),$A0[0] # t[2*i+2] # prefetch | ||
1169 | mov $A0[1],$shift # shift=t[2*i+1]>>63 | ||
1170 | mul %rax # a[i]*a[i] | ||
1171 | neg $carry # mov $carry,cf | ||
1172 | mov -8($tptr),$A0[1] # t[2*i+2+1] # prefetch | ||
1173 | adc %rax,$S[0] | ||
1174 | mov -8($aptr),%rax # a[i+1] # prefetch | ||
1175 | mov $S[0],-32($tptr) | ||
1176 | adc %rdx,$S[1] | ||
1177 | |||
1178 | lea ($shift,$A0[0],2),$S[2] # t[2*i]<<1|shift | ||
1179 | mov $S[1],-24($tptr) | ||
1180 | sbb $carry,$carry # mov cf,$carry | ||
1181 | shr \$63,$A0[0] | ||
1182 | lea ($j,$A0[1],2),$S[3] # t[2*i+1]<<1 | | ||
1183 | shr \$63,$A0[1] | ||
1184 | or $A0[0],$S[3] # | t[2*i]>>63 | ||
1185 | mul %rax # a[i]*a[i] | ||
1186 | neg $carry # mov $carry,cf | ||
1187 | adc %rax,$S[2] | ||
1188 | adc %rdx,$S[3] | ||
1189 | mov $S[2],-16($tptr) | ||
1190 | mov $S[3],-8($tptr) | ||
1191 | ___ | ||
1192 | } | ||
1193 | ############################################################## | ||
1194 | # Montgomery reduction part, "word-by-word" algorithm. | ||
1195 | # | ||
1196 | { | ||
1197 | my ($topbit,$nptr)=("%rbp",$aptr); | ||
1198 | my ($m0,$m1)=($a0,$a1); | ||
1199 | my @Ni=("%rbx","%r9"); | ||
1200 | $code.=<<___; | ||
1201 | mov 40(%rsp),$nptr # restore $nptr | ||
1202 | mov 48(%rsp),$n0 # restore *n0 | ||
1203 | xor $j,$j | ||
1204 | mov $num,0(%rsp) # save $num | ||
1205 | sub $num,$j # $j=-$num | ||
1206 | mov 64(%rsp),$A0[0] # t[0] # modsched # | ||
1207 | mov $n0,$m0 # # modsched # | ||
1208 | lea 64(%rsp,$num,2),%rax # end of t[] buffer | ||
1209 | lea 64(%rsp,$num),$tptr # end of t[] window | ||
1210 | mov %rax,8(%rsp) # save end of t[] buffer | ||
1211 | lea ($nptr,$num),$nptr # end of n[] buffer | ||
1212 | xor $topbit,$topbit # $topbit=0 | ||
1213 | |||
1214 | mov 0($nptr,$j),%rax # n[0] # modsched # | ||
1215 | mov 8($nptr,$j),$Ni[1] # n[1] # modsched # | ||
1216 | imulq $A0[0],$m0 # m0=t[0]*n0 # modsched # | ||
1217 | mov %rax,$Ni[0] # # modsched # | ||
1218 | jmp .Lsqr4x_mont_outer | ||
1219 | |||
1220 | .align 16 | ||
1221 | .Lsqr4x_mont_outer: | ||
1222 | xor $A0[1],$A0[1] | ||
1223 | mul $m0 # n[0]*m0 | ||
1224 | add %rax,$A0[0] # n[0]*m0+t[0] | ||
1225 | mov $Ni[1],%rax | ||
1226 | adc %rdx,$A0[1] | ||
1227 | mov $n0,$m1 | ||
1228 | |||
1229 | xor $A0[0],$A0[0] | ||
1230 | add 8($tptr,$j),$A0[1] | ||
1231 | adc \$0,$A0[0] | ||
1232 | mul $m0 # n[1]*m0 | ||
1233 | add %rax,$A0[1] # n[1]*m0+t[1] | ||
1234 | mov $Ni[0],%rax | ||
1235 | adc %rdx,$A0[0] | ||
1236 | |||
1237 | imulq $A0[1],$m1 | ||
1238 | |||
1239 | mov 16($nptr,$j),$Ni[0] # n[2] | ||
1240 | xor $A1[1],$A1[1] | ||
1241 | add $A0[1],$A1[0] | ||
1242 | adc \$0,$A1[1] | ||
1243 | mul $m1 # n[0]*m1 | ||
1244 | add %rax,$A1[0] # n[0]*m1+"t[1]" | ||
1245 | mov $Ni[0],%rax | ||
1246 | adc %rdx,$A1[1] | ||
1247 | mov $A1[0],8($tptr,$j) # "t[1]" | ||
1248 | |||
1249 | xor $A0[1],$A0[1] | ||
1250 | add 16($tptr,$j),$A0[0] | ||
1251 | adc \$0,$A0[1] | ||
1252 | mul $m0 # n[2]*m0 | ||
1253 | add %rax,$A0[0] # n[2]*m0+t[2] | ||
1254 | mov $Ni[1],%rax | ||
1255 | adc %rdx,$A0[1] | ||
1256 | |||
1257 | mov 24($nptr,$j),$Ni[1] # n[3] | ||
1258 | xor $A1[0],$A1[0] | ||
1259 | add $A0[0],$A1[1] | ||
1260 | adc \$0,$A1[0] | ||
1261 | mul $m1 # n[1]*m1 | ||
1262 | add %rax,$A1[1] # n[1]*m1+"t[2]" | ||
1263 | mov $Ni[1],%rax | ||
1264 | adc %rdx,$A1[0] | ||
1265 | mov $A1[1],16($tptr,$j) # "t[2]" | ||
1266 | |||
1267 | xor $A0[0],$A0[0] | ||
1268 | add 24($tptr,$j),$A0[1] | ||
1269 | lea 32($j),$j | ||
1270 | adc \$0,$A0[0] | ||
1271 | mul $m0 # n[3]*m0 | ||
1272 | add %rax,$A0[1] # n[3]*m0+t[3] | ||
1273 | mov $Ni[0],%rax | ||
1274 | adc %rdx,$A0[0] | ||
1275 | jmp .Lsqr4x_mont_inner | ||
1276 | |||
1277 | .align 16 | ||
1278 | .Lsqr4x_mont_inner: | ||
1279 | mov ($nptr,$j),$Ni[0] # n[4] | ||
1280 | xor $A1[1],$A1[1] | ||
1281 | add $A0[1],$A1[0] | ||
1282 | adc \$0,$A1[1] | ||
1283 | mul $m1 # n[2]*m1 | ||
1284 | add %rax,$A1[0] # n[2]*m1+"t[3]" | ||
1285 | mov $Ni[0],%rax | ||
1286 | adc %rdx,$A1[1] | ||
1287 | mov $A1[0],-8($tptr,$j) # "t[3]" | ||
1288 | |||
1289 | xor $A0[1],$A0[1] | ||
1290 | add ($tptr,$j),$A0[0] | ||
1291 | adc \$0,$A0[1] | ||
1292 | mul $m0 # n[4]*m0 | ||
1293 | add %rax,$A0[0] # n[4]*m0+t[4] | ||
1294 | mov $Ni[1],%rax | ||
1295 | adc %rdx,$A0[1] | ||
1296 | |||
1297 | mov 8($nptr,$j),$Ni[1] # n[5] | ||
1298 | xor $A1[0],$A1[0] | ||
1299 | add $A0[0],$A1[1] | ||
1300 | adc \$0,$A1[0] | ||
1301 | mul $m1 # n[3]*m1 | ||
1302 | add %rax,$A1[1] # n[3]*m1+"t[4]" | ||
1303 | mov $Ni[1],%rax | ||
1304 | adc %rdx,$A1[0] | ||
1305 | mov $A1[1],($tptr,$j) # "t[4]" | ||
1306 | |||
1307 | xor $A0[0],$A0[0] | ||
1308 | add 8($tptr,$j),$A0[1] | ||
1309 | adc \$0,$A0[0] | ||
1310 | mul $m0 # n[5]*m0 | ||
1311 | add %rax,$A0[1] # n[5]*m0+t[5] | ||
1312 | mov $Ni[0],%rax | ||
1313 | adc %rdx,$A0[0] | ||
1314 | |||
1315 | |||
1316 | mov 16($nptr,$j),$Ni[0] # n[6] | ||
1317 | xor $A1[1],$A1[1] | ||
1318 | add $A0[1],$A1[0] | ||
1319 | adc \$0,$A1[1] | ||
1320 | mul $m1 # n[4]*m1 | ||
1321 | add %rax,$A1[0] # n[4]*m1+"t[5]" | ||
1322 | mov $Ni[0],%rax | ||
1323 | adc %rdx,$A1[1] | ||
1324 | mov $A1[0],8($tptr,$j) # "t[5]" | ||
1325 | |||
1326 | xor $A0[1],$A0[1] | ||
1327 | add 16($tptr,$j),$A0[0] | ||
1328 | adc \$0,$A0[1] | ||
1329 | mul $m0 # n[6]*m0 | ||
1330 | add %rax,$A0[0] # n[6]*m0+t[6] | ||
1331 | mov $Ni[1],%rax | ||
1332 | adc %rdx,$A0[1] | ||
1333 | |||
1334 | mov 24($nptr,$j),$Ni[1] # n[7] | ||
1335 | xor $A1[0],$A1[0] | ||
1336 | add $A0[0],$A1[1] | ||
1337 | adc \$0,$A1[0] | ||
1338 | mul $m1 # n[5]*m1 | ||
1339 | add %rax,$A1[1] # n[5]*m1+"t[6]" | ||
1340 | mov $Ni[1],%rax | ||
1341 | adc %rdx,$A1[0] | ||
1342 | mov $A1[1],16($tptr,$j) # "t[6]" | ||
1343 | |||
1344 | xor $A0[0],$A0[0] | ||
1345 | add 24($tptr,$j),$A0[1] | ||
1346 | lea 32($j),$j | ||
1347 | adc \$0,$A0[0] | ||
1348 | mul $m0 # n[7]*m0 | ||
1349 | add %rax,$A0[1] # n[7]*m0+t[7] | ||
1350 | mov $Ni[0],%rax | ||
1351 | adc %rdx,$A0[0] | ||
1352 | cmp \$0,$j | ||
1353 | jne .Lsqr4x_mont_inner | ||
1354 | |||
1355 | sub 0(%rsp),$j # $j=-$num # modsched # | ||
1356 | mov $n0,$m0 # # modsched # | ||
1357 | |||
1358 | xor $A1[1],$A1[1] | ||
1359 | add $A0[1],$A1[0] | ||
1360 | adc \$0,$A1[1] | ||
1361 | mul $m1 # n[6]*m1 | ||
1362 | add %rax,$A1[0] # n[6]*m1+"t[7]" | ||
1363 | mov $Ni[1],%rax | ||
1364 | adc %rdx,$A1[1] | ||
1365 | mov $A1[0],-8($tptr) # "t[7]" | ||
1366 | |||
1367 | xor $A0[1],$A0[1] | ||
1368 | add ($tptr),$A0[0] # +t[8] | ||
1369 | adc \$0,$A0[1] | ||
1370 | mov 0($nptr,$j),$Ni[0] # n[0] # modsched # | ||
1371 | add $topbit,$A0[0] | ||
1372 | adc \$0,$A0[1] | ||
1373 | |||
1374 | imulq 16($tptr,$j),$m0 # m0=t[0]*n0 # modsched # | ||
1375 | xor $A1[0],$A1[0] | ||
1376 | mov 8($nptr,$j),$Ni[1] # n[1] # modsched # | ||
1377 | add $A0[0],$A1[1] | ||
1378 | mov 16($tptr,$j),$A0[0] # t[0] # modsched # | ||
1379 | adc \$0,$A1[0] | ||
1380 | mul $m1 # n[7]*m1 | ||
1381 | add %rax,$A1[1] # n[7]*m1+"t[8]" | ||
1382 | mov $Ni[0],%rax # # modsched # | ||
1383 | adc %rdx,$A1[0] | ||
1384 | mov $A1[1],($tptr) # "t[8]" | ||
1385 | |||
1386 | xor $topbit,$topbit | ||
1387 | add 8($tptr),$A1[0] # +t[9] | ||
1388 | adc $topbit,$topbit | ||
1389 | add $A0[1],$A1[0] | ||
1390 | lea 16($tptr),$tptr # "t[$num]>>128" | ||
1391 | adc \$0,$topbit | ||
1392 | mov $A1[0],-8($tptr) # "t[9]" | ||
1393 | cmp 8(%rsp),$tptr # are we done? | ||
1394 | jb .Lsqr4x_mont_outer | ||
1395 | |||
1396 | mov 0(%rsp),$num # restore $num | ||
1397 | mov $topbit,($tptr) # save $topbit | ||
1398 | ___ | ||
1399 | } | ||
1400 | ############################################################## | ||
1401 | # Post-condition, 4x unrolled copy from bn_mul_mont | ||
1402 | # | ||
1403 | { | ||
1404 | my ($tptr,$nptr)=("%rbx",$aptr); | ||
1405 | my @ri=("%rax","%rdx","%r10","%r11"); | ||
1406 | $code.=<<___; | ||
1407 | mov 64(%rsp,$num),@ri[0] # tp[0] | ||
1408 | lea 64(%rsp,$num),$tptr # upper half of t[2*$num] holds result | ||
1409 | mov 40(%rsp),$nptr # restore $nptr | ||
1410 | shr \$5,$num # num/4 | ||
1411 | mov 8($tptr),@ri[1] # t[1] | ||
1412 | xor $i,$i # i=0 and clear CF! | ||
1413 | |||
1414 | mov 32(%rsp),$rptr # restore $rptr | ||
1415 | sub 0($nptr),@ri[0] | ||
1416 | mov 16($tptr),@ri[2] # t[2] | ||
1417 | mov 24($tptr),@ri[3] # t[3] | ||
1418 | sbb 8($nptr),@ri[1] | ||
1419 | lea -1($num),$j # j=num/4-1 | ||
1420 | jmp .Lsqr4x_sub | ||
1421 | .align 16 | ||
1422 | .Lsqr4x_sub: | ||
1423 | mov @ri[0],0($rptr,$i,8) # rp[i]=tp[i]-np[i] | ||
1424 | mov @ri[1],8($rptr,$i,8) # rp[i]=tp[i]-np[i] | ||
1425 | sbb 16($nptr,$i,8),@ri[2] | ||
1426 | mov 32($tptr,$i,8),@ri[0] # tp[i+1] | ||
1427 | mov 40($tptr,$i,8),@ri[1] | ||
1428 | sbb 24($nptr,$i,8),@ri[3] | ||
1429 | mov @ri[2],16($rptr,$i,8) # rp[i]=tp[i]-np[i] | ||
1430 | mov @ri[3],24($rptr,$i,8) # rp[i]=tp[i]-np[i] | ||
1431 | sbb 32($nptr,$i,8),@ri[0] | ||
1432 | mov 48($tptr,$i,8),@ri[2] | ||
1433 | mov 56($tptr,$i,8),@ri[3] | ||
1434 | sbb 40($nptr,$i,8),@ri[1] | ||
1435 | lea 4($i),$i # i++ | ||
1436 | dec $j # doesn't affect CF! | ||
1437 | jnz .Lsqr4x_sub | ||
1438 | |||
1439 | mov @ri[0],0($rptr,$i,8) # rp[i]=tp[i]-np[i] | ||
1440 | mov 32($tptr,$i,8),@ri[0] # load overflow bit | ||
1441 | sbb 16($nptr,$i,8),@ri[2] | ||
1442 | mov @ri[1],8($rptr,$i,8) # rp[i]=tp[i]-np[i] | ||
1443 | sbb 24($nptr,$i,8),@ri[3] | ||
1444 | mov @ri[2],16($rptr,$i,8) # rp[i]=tp[i]-np[i] | ||
1445 | |||
1446 | sbb \$0,@ri[0] # handle upmost overflow bit | ||
1447 | mov @ri[3],24($rptr,$i,8) # rp[i]=tp[i]-np[i] | ||
1448 | xor $i,$i # i=0 | ||
1449 | and @ri[0],$tptr | ||
1450 | not @ri[0] | ||
1451 | mov $rptr,$nptr | ||
1452 | and @ri[0],$nptr | ||
1453 | lea -1($num),$j | ||
1454 | or $nptr,$tptr # tp=borrow?tp:rp | ||
1455 | |||
1456 | pxor %xmm0,%xmm0 | ||
1457 | lea 64(%rsp,$num,8),$nptr | ||
1458 | movdqu ($tptr),%xmm1 | ||
1459 | lea ($nptr,$num,8),$nptr | ||
1460 | movdqa %xmm0,64(%rsp) # zap lower half of temporary vector | ||
1461 | movdqa %xmm0,($nptr) # zap upper half of temporary vector | ||
1462 | movdqu %xmm1,($rptr) | ||
1463 | jmp .Lsqr4x_copy | ||
1464 | .align 16 | ||
1465 | .Lsqr4x_copy: # copy or in-place refresh | ||
1466 | movdqu 16($tptr,$i),%xmm2 | ||
1467 | movdqu 32($tptr,$i),%xmm1 | ||
1468 | movdqa %xmm0,80(%rsp,$i) # zap lower half of temporary vector | ||
1469 | movdqa %xmm0,96(%rsp,$i) # zap lower half of temporary vector | ||
1470 | movdqa %xmm0,16($nptr,$i) # zap upper half of temporary vector | ||
1471 | movdqa %xmm0,32($nptr,$i) # zap upper half of temporary vector | ||
1472 | movdqu %xmm2,16($rptr,$i) | ||
1473 | movdqu %xmm1,32($rptr,$i) | ||
1474 | lea 32($i),$i | ||
1475 | dec $j | ||
1476 | jnz .Lsqr4x_copy | ||
1477 | |||
1478 | movdqu 16($tptr,$i),%xmm2 | ||
1479 | movdqa %xmm0,80(%rsp,$i) # zap lower half of temporary vector | ||
1480 | movdqa %xmm0,16($nptr,$i) # zap upper half of temporary vector | ||
1481 | movdqu %xmm2,16($rptr,$i) | ||
1482 | ___ | ||
1483 | } | ||
1484 | $code.=<<___; | ||
1485 | mov 56(%rsp),%rsi # restore %rsp | ||
1486 | mov \$1,%rax | ||
1487 | mov 0(%rsi),%r15 | ||
1488 | mov 8(%rsi),%r14 | ||
1489 | mov 16(%rsi),%r13 | ||
1490 | mov 24(%rsi),%r12 | ||
1491 | mov 32(%rsi),%rbp | ||
1492 | mov 40(%rsi),%rbx | ||
1493 | lea 48(%rsi),%rsp | ||
1494 | .Lsqr4x_epilogue: | ||
1495 | ret | ||
1496 | .size bn_sqr4x_mont,.-bn_sqr4x_mont | ||
1497 | ___ | ||
1498 | }}} | ||
1499 | $code.=<<___; | ||
217 | .asciz "Montgomery Multiplication for x86_64, CRYPTOGAMS by <appro\@openssl.org>" | 1500 | .asciz "Montgomery Multiplication for x86_64, CRYPTOGAMS by <appro\@openssl.org>" |
218 | .align 16 | 1501 | .align 16 |
219 | ___ | 1502 | ___ |
@@ -228,9 +1511,9 @@ $disp="%r9"; | |||
228 | 1511 | ||
229 | $code.=<<___; | 1512 | $code.=<<___; |
230 | .extern __imp_RtlVirtualUnwind | 1513 | .extern __imp_RtlVirtualUnwind |
231 | .type se_handler,\@abi-omnipotent | 1514 | .type mul_handler,\@abi-omnipotent |
232 | .align 16 | 1515 | .align 16 |
233 | se_handler: | 1516 | mul_handler: |
234 | push %rsi | 1517 | push %rsi |
235 | push %rdi | 1518 | push %rdi |
236 | push %rbx | 1519 | push %rbx |
@@ -245,15 +1528,20 @@ se_handler: | |||
245 | mov 120($context),%rax # pull context->Rax | 1528 | mov 120($context),%rax # pull context->Rax |
246 | mov 248($context),%rbx # pull context->Rip | 1529 | mov 248($context),%rbx # pull context->Rip |
247 | 1530 | ||
248 | lea .Lprologue(%rip),%r10 | 1531 | mov 8($disp),%rsi # disp->ImageBase |
249 | cmp %r10,%rbx # context->Rip<.Lprologue | 1532 | mov 56($disp),%r11 # disp->HandlerData |
250 | jb .Lin_prologue | 1533 | |
1534 | mov 0(%r11),%r10d # HandlerData[0] | ||
1535 | lea (%rsi,%r10),%r10 # end of prologue label | ||
1536 | cmp %r10,%rbx # context->Rip<end of prologue label | ||
1537 | jb .Lcommon_seh_tail | ||
251 | 1538 | ||
252 | mov 152($context),%rax # pull context->Rsp | 1539 | mov 152($context),%rax # pull context->Rsp |
253 | 1540 | ||
254 | lea .Lepilogue(%rip),%r10 | 1541 | mov 4(%r11),%r10d # HandlerData[1] |
255 | cmp %r10,%rbx # context->Rip>=.Lepilogue | 1542 | lea (%rsi,%r10),%r10 # epilogue label |
256 | jae .Lin_prologue | 1543 | cmp %r10,%rbx # context->Rip>=epilogue label |
1544 | jae .Lcommon_seh_tail | ||
257 | 1545 | ||
258 | mov 192($context),%r10 # pull $num | 1546 | mov 192($context),%r10 # pull $num |
259 | mov 8(%rax,%r10,8),%rax # pull saved stack pointer | 1547 | mov 8(%rax,%r10,8),%rax # pull saved stack pointer |
@@ -272,7 +1560,53 @@ se_handler: | |||
272 | mov %r14,232($context) # restore context->R14 | 1560 | mov %r14,232($context) # restore context->R14 |
273 | mov %r15,240($context) # restore context->R15 | 1561 | mov %r15,240($context) # restore context->R15 |
274 | 1562 | ||
275 | .Lin_prologue: | 1563 | jmp .Lcommon_seh_tail |
1564 | .size mul_handler,.-mul_handler | ||
1565 | |||
1566 | .type sqr_handler,\@abi-omnipotent | ||
1567 | .align 16 | ||
1568 | sqr_handler: | ||
1569 | push %rsi | ||
1570 | push %rdi | ||
1571 | push %rbx | ||
1572 | push %rbp | ||
1573 | push %r12 | ||
1574 | push %r13 | ||
1575 | push %r14 | ||
1576 | push %r15 | ||
1577 | pushfq | ||
1578 | sub \$64,%rsp | ||
1579 | |||
1580 | mov 120($context),%rax # pull context->Rax | ||
1581 | mov 248($context),%rbx # pull context->Rip | ||
1582 | |||
1583 | lea .Lsqr4x_body(%rip),%r10 | ||
1584 | cmp %r10,%rbx # context->Rip<.Lsqr_body | ||
1585 | jb .Lcommon_seh_tail | ||
1586 | |||
1587 | mov 152($context),%rax # pull context->Rsp | ||
1588 | |||
1589 | lea .Lsqr4x_epilogue(%rip),%r10 | ||
1590 | cmp %r10,%rbx # context->Rip>=.Lsqr_epilogue | ||
1591 | jae .Lcommon_seh_tail | ||
1592 | |||
1593 | mov 56(%rax),%rax # pull saved stack pointer | ||
1594 | lea 48(%rax),%rax | ||
1595 | |||
1596 | mov -8(%rax),%rbx | ||
1597 | mov -16(%rax),%rbp | ||
1598 | mov -24(%rax),%r12 | ||
1599 | mov -32(%rax),%r13 | ||
1600 | mov -40(%rax),%r14 | ||
1601 | mov -48(%rax),%r15 | ||
1602 | mov %rbx,144($context) # restore context->Rbx | ||
1603 | mov %rbp,160($context) # restore context->Rbp | ||
1604 | mov %r12,216($context) # restore context->R12 | ||
1605 | mov %r13,224($context) # restore context->R13 | ||
1606 | mov %r14,232($context) # restore context->R14 | ||
1607 | mov %r15,240($context) # restore context->R15 | ||
1608 | |||
1609 | .Lcommon_seh_tail: | ||
276 | mov 8(%rax),%rdi | 1610 | mov 8(%rax),%rdi |
277 | mov 16(%rax),%rsi | 1611 | mov 16(%rax),%rsi |
278 | mov %rax,152($context) # restore context->Rsp | 1612 | mov %rax,152($context) # restore context->Rsp |
@@ -310,7 +1644,7 @@ se_handler: | |||
310 | pop %rdi | 1644 | pop %rdi |
311 | pop %rsi | 1645 | pop %rsi |
312 | ret | 1646 | ret |
313 | .size se_handler,.-se_handler | 1647 | .size sqr_handler,.-sqr_handler |
314 | 1648 | ||
315 | .section .pdata | 1649 | .section .pdata |
316 | .align 4 | 1650 | .align 4 |
@@ -318,11 +1652,27 @@ se_handler: | |||
318 | .rva .LSEH_end_bn_mul_mont | 1652 | .rva .LSEH_end_bn_mul_mont |
319 | .rva .LSEH_info_bn_mul_mont | 1653 | .rva .LSEH_info_bn_mul_mont |
320 | 1654 | ||
1655 | .rva .LSEH_begin_bn_mul4x_mont | ||
1656 | .rva .LSEH_end_bn_mul4x_mont | ||
1657 | .rva .LSEH_info_bn_mul4x_mont | ||
1658 | |||
1659 | .rva .LSEH_begin_bn_sqr4x_mont | ||
1660 | .rva .LSEH_end_bn_sqr4x_mont | ||
1661 | .rva .LSEH_info_bn_sqr4x_mont | ||
1662 | |||
321 | .section .xdata | 1663 | .section .xdata |
322 | .align 8 | 1664 | .align 8 |
323 | .LSEH_info_bn_mul_mont: | 1665 | .LSEH_info_bn_mul_mont: |
324 | .byte 9,0,0,0 | 1666 | .byte 9,0,0,0 |
325 | .rva se_handler | 1667 | .rva mul_handler |
1668 | .rva .Lmul_body,.Lmul_epilogue # HandlerData[] | ||
1669 | .LSEH_info_bn_mul4x_mont: | ||
1670 | .byte 9,0,0,0 | ||
1671 | .rva mul_handler | ||
1672 | .rva .Lmul4x_body,.Lmul4x_epilogue # HandlerData[] | ||
1673 | .LSEH_info_bn_sqr4x_mont: | ||
1674 | .byte 9,0,0,0 | ||
1675 | .rva sqr_handler | ||
326 | ___ | 1676 | ___ |
327 | } | 1677 | } |
328 | 1678 | ||
diff --git a/src/lib/libcrypto/bn/asm/x86_64-mont5.pl b/src/lib/libcrypto/bn/asm/x86_64-mont5.pl new file mode 100755 index 0000000000..057cda28aa --- /dev/null +++ b/src/lib/libcrypto/bn/asm/x86_64-mont5.pl | |||
@@ -0,0 +1,1070 @@ | |||
1 | #!/usr/bin/env perl | ||
2 | |||
3 | # ==================================================================== | ||
4 | # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL | ||
5 | # project. The module is, however, dual licensed under OpenSSL and | ||
6 | # CRYPTOGAMS licenses depending on where you obtain it. For further | ||
7 | # details see http://www.openssl.org/~appro/cryptogams/. | ||
8 | # ==================================================================== | ||
9 | |||
10 | # August 2011. | ||
11 | # | ||
12 | # Companion to x86_64-mont.pl that optimizes cache-timing attack | ||
13 | # countermeasures. The subroutines are produced by replacing bp[i] | ||
14 | # references in their x86_64-mont.pl counterparts with cache-neutral | ||
15 | # references to powers table computed in BN_mod_exp_mont_consttime. | ||
16 | # In addition subroutine that scatters elements of the powers table | ||
17 | # is implemented, so that scatter-/gathering can be tuned without | ||
18 | # bn_exp.c modifications. | ||
19 | |||
20 | $flavour = shift; | ||
21 | $output = shift; | ||
22 | if ($flavour =~ /\./) { $output = $flavour; undef $flavour; } | ||
23 | |||
24 | $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/); | ||
25 | |||
26 | $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1; | ||
27 | ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or | ||
28 | ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or | ||
29 | die "can't locate x86_64-xlate.pl"; | ||
30 | |||
31 | open STDOUT,"| $^X $xlate $flavour $output"; | ||
32 | |||
33 | # int bn_mul_mont_gather5( | ||
34 | $rp="%rdi"; # BN_ULONG *rp, | ||
35 | $ap="%rsi"; # const BN_ULONG *ap, | ||
36 | $bp="%rdx"; # const BN_ULONG *bp, | ||
37 | $np="%rcx"; # const BN_ULONG *np, | ||
38 | $n0="%r8"; # const BN_ULONG *n0, | ||
39 | $num="%r9"; # int num, | ||
40 | # int idx); # 0 to 2^5-1, "index" in $bp holding | ||
41 | # pre-computed powers of a', interlaced | ||
42 | # in such manner that b[0] is $bp[idx], | ||
43 | # b[1] is [2^5+idx], etc. | ||
44 | $lo0="%r10"; | ||
45 | $hi0="%r11"; | ||
46 | $hi1="%r13"; | ||
47 | $i="%r14"; | ||
48 | $j="%r15"; | ||
49 | $m0="%rbx"; | ||
50 | $m1="%rbp"; | ||
51 | |||
52 | $code=<<___; | ||
53 | .text | ||
54 | |||
55 | .globl bn_mul_mont_gather5 | ||
56 | .type bn_mul_mont_gather5,\@function,6 | ||
57 | .align 64 | ||
58 | bn_mul_mont_gather5: | ||
59 | test \$3,${num}d | ||
60 | jnz .Lmul_enter | ||
61 | cmp \$8,${num}d | ||
62 | jb .Lmul_enter | ||
63 | jmp .Lmul4x_enter | ||
64 | |||
65 | .align 16 | ||
66 | .Lmul_enter: | ||
67 | mov ${num}d,${num}d | ||
68 | mov `($win64?56:8)`(%rsp),%r10d # load 7th argument | ||
69 | push %rbx | ||
70 | push %rbp | ||
71 | push %r12 | ||
72 | push %r13 | ||
73 | push %r14 | ||
74 | push %r15 | ||
75 | ___ | ||
76 | $code.=<<___ if ($win64); | ||
77 | lea -0x28(%rsp),%rsp | ||
78 | movaps %xmm6,(%rsp) | ||
79 | movaps %xmm7,0x10(%rsp) | ||
80 | .Lmul_alloca: | ||
81 | ___ | ||
82 | $code.=<<___; | ||
83 | mov %rsp,%rax | ||
84 | lea 2($num),%r11 | ||
85 | neg %r11 | ||
86 | lea (%rsp,%r11,8),%rsp # tp=alloca(8*(num+2)) | ||
87 | and \$-1024,%rsp # minimize TLB usage | ||
88 | |||
89 | mov %rax,8(%rsp,$num,8) # tp[num+1]=%rsp | ||
90 | .Lmul_body: | ||
91 | mov $bp,%r12 # reassign $bp | ||
92 | ___ | ||
93 | $bp="%r12"; | ||
94 | $STRIDE=2**5*8; # 5 is "window size" | ||
95 | $N=$STRIDE/4; # should match cache line size | ||
96 | $code.=<<___; | ||
97 | mov %r10,%r11 | ||
98 | shr \$`log($N/8)/log(2)`,%r10 | ||
99 | and \$`$N/8-1`,%r11 | ||
100 | not %r10 | ||
101 | lea .Lmagic_masks(%rip),%rax | ||
102 | and \$`2**5/($N/8)-1`,%r10 # 5 is "window size" | ||
103 | lea 96($bp,%r11,8),$bp # pointer within 1st cache line | ||
104 | movq 0(%rax,%r10,8),%xmm4 # set of masks denoting which | ||
105 | movq 8(%rax,%r10,8),%xmm5 # cache line contains element | ||
106 | movq 16(%rax,%r10,8),%xmm6 # denoted by 7th argument | ||
107 | movq 24(%rax,%r10,8),%xmm7 | ||
108 | |||
109 | movq `0*$STRIDE/4-96`($bp),%xmm0 | ||
110 | movq `1*$STRIDE/4-96`($bp),%xmm1 | ||
111 | pand %xmm4,%xmm0 | ||
112 | movq `2*$STRIDE/4-96`($bp),%xmm2 | ||
113 | pand %xmm5,%xmm1 | ||
114 | movq `3*$STRIDE/4-96`($bp),%xmm3 | ||
115 | pand %xmm6,%xmm2 | ||
116 | por %xmm1,%xmm0 | ||
117 | pand %xmm7,%xmm3 | ||
118 | por %xmm2,%xmm0 | ||
119 | lea $STRIDE($bp),$bp | ||
120 | por %xmm3,%xmm0 | ||
121 | |||
122 | movq %xmm0,$m0 # m0=bp[0] | ||
123 | |||
124 | mov ($n0),$n0 # pull n0[0] value | ||
125 | mov ($ap),%rax | ||
126 | |||
127 | xor $i,$i # i=0 | ||
128 | xor $j,$j # j=0 | ||
129 | |||
130 | movq `0*$STRIDE/4-96`($bp),%xmm0 | ||
131 | movq `1*$STRIDE/4-96`($bp),%xmm1 | ||
132 | pand %xmm4,%xmm0 | ||
133 | movq `2*$STRIDE/4-96`($bp),%xmm2 | ||
134 | pand %xmm5,%xmm1 | ||
135 | |||
136 | mov $n0,$m1 | ||
137 | mulq $m0 # ap[0]*bp[0] | ||
138 | mov %rax,$lo0 | ||
139 | mov ($np),%rax | ||
140 | |||
141 | movq `3*$STRIDE/4-96`($bp),%xmm3 | ||
142 | pand %xmm6,%xmm2 | ||
143 | por %xmm1,%xmm0 | ||
144 | pand %xmm7,%xmm3 | ||
145 | |||
146 | imulq $lo0,$m1 # "tp[0]"*n0 | ||
147 | mov %rdx,$hi0 | ||
148 | |||
149 | por %xmm2,%xmm0 | ||
150 | lea $STRIDE($bp),$bp | ||
151 | por %xmm3,%xmm0 | ||
152 | |||
153 | mulq $m1 # np[0]*m1 | ||
154 | add %rax,$lo0 # discarded | ||
155 | mov 8($ap),%rax | ||
156 | adc \$0,%rdx | ||
157 | mov %rdx,$hi1 | ||
158 | |||
159 | lea 1($j),$j # j++ | ||
160 | jmp .L1st_enter | ||
161 | |||
162 | .align 16 | ||
163 | .L1st: | ||
164 | add %rax,$hi1 | ||
165 | mov ($ap,$j,8),%rax | ||
166 | adc \$0,%rdx | ||
167 | add $hi0,$hi1 # np[j]*m1+ap[j]*bp[0] | ||
168 | mov $lo0,$hi0 | ||
169 | adc \$0,%rdx | ||
170 | mov $hi1,-16(%rsp,$j,8) # tp[j-1] | ||
171 | mov %rdx,$hi1 | ||
172 | |||
173 | .L1st_enter: | ||
174 | mulq $m0 # ap[j]*bp[0] | ||
175 | add %rax,$hi0 | ||
176 | mov ($np,$j,8),%rax | ||
177 | adc \$0,%rdx | ||
178 | lea 1($j),$j # j++ | ||
179 | mov %rdx,$lo0 | ||
180 | |||
181 | mulq $m1 # np[j]*m1 | ||
182 | cmp $num,$j | ||
183 | jne .L1st | ||
184 | |||
185 | movq %xmm0,$m0 # bp[1] | ||
186 | |||
187 | add %rax,$hi1 | ||
188 | mov ($ap),%rax # ap[0] | ||
189 | adc \$0,%rdx | ||
190 | add $hi0,$hi1 # np[j]*m1+ap[j]*bp[0] | ||
191 | adc \$0,%rdx | ||
192 | mov $hi1,-16(%rsp,$j,8) # tp[j-1] | ||
193 | mov %rdx,$hi1 | ||
194 | mov $lo0,$hi0 | ||
195 | |||
196 | xor %rdx,%rdx | ||
197 | add $hi0,$hi1 | ||
198 | adc \$0,%rdx | ||
199 | mov $hi1,-8(%rsp,$num,8) | ||
200 | mov %rdx,(%rsp,$num,8) # store upmost overflow bit | ||
201 | |||
202 | lea 1($i),$i # i++ | ||
203 | jmp .Louter | ||
204 | .align 16 | ||
205 | .Louter: | ||
206 | xor $j,$j # j=0 | ||
207 | mov $n0,$m1 | ||
208 | mov (%rsp),$lo0 | ||
209 | |||
210 | movq `0*$STRIDE/4-96`($bp),%xmm0 | ||
211 | movq `1*$STRIDE/4-96`($bp),%xmm1 | ||
212 | pand %xmm4,%xmm0 | ||
213 | movq `2*$STRIDE/4-96`($bp),%xmm2 | ||
214 | pand %xmm5,%xmm1 | ||
215 | |||
216 | mulq $m0 # ap[0]*bp[i] | ||
217 | add %rax,$lo0 # ap[0]*bp[i]+tp[0] | ||
218 | mov ($np),%rax | ||
219 | adc \$0,%rdx | ||
220 | |||
221 | movq `3*$STRIDE/4-96`($bp),%xmm3 | ||
222 | pand %xmm6,%xmm2 | ||
223 | por %xmm1,%xmm0 | ||
224 | pand %xmm7,%xmm3 | ||
225 | |||
226 | imulq $lo0,$m1 # tp[0]*n0 | ||
227 | mov %rdx,$hi0 | ||
228 | |||
229 | por %xmm2,%xmm0 | ||
230 | lea $STRIDE($bp),$bp | ||
231 | por %xmm3,%xmm0 | ||
232 | |||
233 | mulq $m1 # np[0]*m1 | ||
234 | add %rax,$lo0 # discarded | ||
235 | mov 8($ap),%rax | ||
236 | adc \$0,%rdx | ||
237 | mov 8(%rsp),$lo0 # tp[1] | ||
238 | mov %rdx,$hi1 | ||
239 | |||
240 | lea 1($j),$j # j++ | ||
241 | jmp .Linner_enter | ||
242 | |||
243 | .align 16 | ||
244 | .Linner: | ||
245 | add %rax,$hi1 | ||
246 | mov ($ap,$j,8),%rax | ||
247 | adc \$0,%rdx | ||
248 | add $lo0,$hi1 # np[j]*m1+ap[j]*bp[i]+tp[j] | ||
249 | mov (%rsp,$j,8),$lo0 | ||
250 | adc \$0,%rdx | ||
251 | mov $hi1,-16(%rsp,$j,8) # tp[j-1] | ||
252 | mov %rdx,$hi1 | ||
253 | |||
254 | .Linner_enter: | ||
255 | mulq $m0 # ap[j]*bp[i] | ||
256 | add %rax,$hi0 | ||
257 | mov ($np,$j,8),%rax | ||
258 | adc \$0,%rdx | ||
259 | add $hi0,$lo0 # ap[j]*bp[i]+tp[j] | ||
260 | mov %rdx,$hi0 | ||
261 | adc \$0,$hi0 | ||
262 | lea 1($j),$j # j++ | ||
263 | |||
264 | mulq $m1 # np[j]*m1 | ||
265 | cmp $num,$j | ||
266 | jne .Linner | ||
267 | |||
268 | movq %xmm0,$m0 # bp[i+1] | ||
269 | |||
270 | add %rax,$hi1 | ||
271 | mov ($ap),%rax # ap[0] | ||
272 | adc \$0,%rdx | ||
273 | add $lo0,$hi1 # np[j]*m1+ap[j]*bp[i]+tp[j] | ||
274 | mov (%rsp,$j,8),$lo0 | ||
275 | adc \$0,%rdx | ||
276 | mov $hi1,-16(%rsp,$j,8) # tp[j-1] | ||
277 | mov %rdx,$hi1 | ||
278 | |||
279 | xor %rdx,%rdx | ||
280 | add $hi0,$hi1 | ||
281 | adc \$0,%rdx | ||
282 | add $lo0,$hi1 # pull upmost overflow bit | ||
283 | adc \$0,%rdx | ||
284 | mov $hi1,-8(%rsp,$num,8) | ||
285 | mov %rdx,(%rsp,$num,8) # store upmost overflow bit | ||
286 | |||
287 | lea 1($i),$i # i++ | ||
288 | cmp $num,$i | ||
289 | jl .Louter | ||
290 | |||
291 | xor $i,$i # i=0 and clear CF! | ||
292 | mov (%rsp),%rax # tp[0] | ||
293 | lea (%rsp),$ap # borrow ap for tp | ||
294 | mov $num,$j # j=num | ||
295 | jmp .Lsub | ||
296 | .align 16 | ||
297 | .Lsub: sbb ($np,$i,8),%rax | ||
298 | mov %rax,($rp,$i,8) # rp[i]=tp[i]-np[i] | ||
299 | mov 8($ap,$i,8),%rax # tp[i+1] | ||
300 | lea 1($i),$i # i++ | ||
301 | dec $j # doesnn't affect CF! | ||
302 | jnz .Lsub | ||
303 | |||
304 | sbb \$0,%rax # handle upmost overflow bit | ||
305 | xor $i,$i | ||
306 | and %rax,$ap | ||
307 | not %rax | ||
308 | mov $rp,$np | ||
309 | and %rax,$np | ||
310 | mov $num,$j # j=num | ||
311 | or $np,$ap # ap=borrow?tp:rp | ||
312 | .align 16 | ||
313 | .Lcopy: # copy or in-place refresh | ||
314 | mov ($ap,$i,8),%rax | ||
315 | mov $i,(%rsp,$i,8) # zap temporary vector | ||
316 | mov %rax,($rp,$i,8) # rp[i]=tp[i] | ||
317 | lea 1($i),$i | ||
318 | sub \$1,$j | ||
319 | jnz .Lcopy | ||
320 | |||
321 | mov 8(%rsp,$num,8),%rsi # restore %rsp | ||
322 | mov \$1,%rax | ||
323 | ___ | ||
324 | $code.=<<___ if ($win64); | ||
325 | movaps (%rsi),%xmm6 | ||
326 | movaps 0x10(%rsi),%xmm7 | ||
327 | lea 0x28(%rsi),%rsi | ||
328 | ___ | ||
329 | $code.=<<___; | ||
330 | mov (%rsi),%r15 | ||
331 | mov 8(%rsi),%r14 | ||
332 | mov 16(%rsi),%r13 | ||
333 | mov 24(%rsi),%r12 | ||
334 | mov 32(%rsi),%rbp | ||
335 | mov 40(%rsi),%rbx | ||
336 | lea 48(%rsi),%rsp | ||
337 | .Lmul_epilogue: | ||
338 | ret | ||
339 | .size bn_mul_mont_gather5,.-bn_mul_mont_gather5 | ||
340 | ___ | ||
341 | {{{ | ||
342 | my @A=("%r10","%r11"); | ||
343 | my @N=("%r13","%rdi"); | ||
344 | $code.=<<___; | ||
345 | .type bn_mul4x_mont_gather5,\@function,6 | ||
346 | .align 16 | ||
347 | bn_mul4x_mont_gather5: | ||
348 | .Lmul4x_enter: | ||
349 | mov ${num}d,${num}d | ||
350 | mov `($win64?56:8)`(%rsp),%r10d # load 7th argument | ||
351 | push %rbx | ||
352 | push %rbp | ||
353 | push %r12 | ||
354 | push %r13 | ||
355 | push %r14 | ||
356 | push %r15 | ||
357 | ___ | ||
358 | $code.=<<___ if ($win64); | ||
359 | lea -0x28(%rsp),%rsp | ||
360 | movaps %xmm6,(%rsp) | ||
361 | movaps %xmm7,0x10(%rsp) | ||
362 | .Lmul4x_alloca: | ||
363 | ___ | ||
364 | $code.=<<___; | ||
365 | mov %rsp,%rax | ||
366 | lea 4($num),%r11 | ||
367 | neg %r11 | ||
368 | lea (%rsp,%r11,8),%rsp # tp=alloca(8*(num+4)) | ||
369 | and \$-1024,%rsp # minimize TLB usage | ||
370 | |||
371 | mov %rax,8(%rsp,$num,8) # tp[num+1]=%rsp | ||
372 | .Lmul4x_body: | ||
373 | mov $rp,16(%rsp,$num,8) # tp[num+2]=$rp | ||
374 | mov %rdx,%r12 # reassign $bp | ||
375 | ___ | ||
376 | $bp="%r12"; | ||
377 | $STRIDE=2**5*8; # 5 is "window size" | ||
378 | $N=$STRIDE/4; # should match cache line size | ||
379 | $code.=<<___; | ||
380 | mov %r10,%r11 | ||
381 | shr \$`log($N/8)/log(2)`,%r10 | ||
382 | and \$`$N/8-1`,%r11 | ||
383 | not %r10 | ||
384 | lea .Lmagic_masks(%rip),%rax | ||
385 | and \$`2**5/($N/8)-1`,%r10 # 5 is "window size" | ||
386 | lea 96($bp,%r11,8),$bp # pointer within 1st cache line | ||
387 | movq 0(%rax,%r10,8),%xmm4 # set of masks denoting which | ||
388 | movq 8(%rax,%r10,8),%xmm5 # cache line contains element | ||
389 | movq 16(%rax,%r10,8),%xmm6 # denoted by 7th argument | ||
390 | movq 24(%rax,%r10,8),%xmm7 | ||
391 | |||
392 | movq `0*$STRIDE/4-96`($bp),%xmm0 | ||
393 | movq `1*$STRIDE/4-96`($bp),%xmm1 | ||
394 | pand %xmm4,%xmm0 | ||
395 | movq `2*$STRIDE/4-96`($bp),%xmm2 | ||
396 | pand %xmm5,%xmm1 | ||
397 | movq `3*$STRIDE/4-96`($bp),%xmm3 | ||
398 | pand %xmm6,%xmm2 | ||
399 | por %xmm1,%xmm0 | ||
400 | pand %xmm7,%xmm3 | ||
401 | por %xmm2,%xmm0 | ||
402 | lea $STRIDE($bp),$bp | ||
403 | por %xmm3,%xmm0 | ||
404 | |||
405 | movq %xmm0,$m0 # m0=bp[0] | ||
406 | mov ($n0),$n0 # pull n0[0] value | ||
407 | mov ($ap),%rax | ||
408 | |||
409 | xor $i,$i # i=0 | ||
410 | xor $j,$j # j=0 | ||
411 | |||
412 | movq `0*$STRIDE/4-96`($bp),%xmm0 | ||
413 | movq `1*$STRIDE/4-96`($bp),%xmm1 | ||
414 | pand %xmm4,%xmm0 | ||
415 | movq `2*$STRIDE/4-96`($bp),%xmm2 | ||
416 | pand %xmm5,%xmm1 | ||
417 | |||
418 | mov $n0,$m1 | ||
419 | mulq $m0 # ap[0]*bp[0] | ||
420 | mov %rax,$A[0] | ||
421 | mov ($np),%rax | ||
422 | |||
423 | movq `3*$STRIDE/4-96`($bp),%xmm3 | ||
424 | pand %xmm6,%xmm2 | ||
425 | por %xmm1,%xmm0 | ||
426 | pand %xmm7,%xmm3 | ||
427 | |||
428 | imulq $A[0],$m1 # "tp[0]"*n0 | ||
429 | mov %rdx,$A[1] | ||
430 | |||
431 | por %xmm2,%xmm0 | ||
432 | lea $STRIDE($bp),$bp | ||
433 | por %xmm3,%xmm0 | ||
434 | |||
435 | mulq $m1 # np[0]*m1 | ||
436 | add %rax,$A[0] # discarded | ||
437 | mov 8($ap),%rax | ||
438 | adc \$0,%rdx | ||
439 | mov %rdx,$N[1] | ||
440 | |||
441 | mulq $m0 | ||
442 | add %rax,$A[1] | ||
443 | mov 8($np),%rax | ||
444 | adc \$0,%rdx | ||
445 | mov %rdx,$A[0] | ||
446 | |||
447 | mulq $m1 | ||
448 | add %rax,$N[1] | ||
449 | mov 16($ap),%rax | ||
450 | adc \$0,%rdx | ||
451 | add $A[1],$N[1] | ||
452 | lea 4($j),$j # j++ | ||
453 | adc \$0,%rdx | ||
454 | mov $N[1],(%rsp) | ||
455 | mov %rdx,$N[0] | ||
456 | jmp .L1st4x | ||
457 | .align 16 | ||
458 | .L1st4x: | ||
459 | mulq $m0 # ap[j]*bp[0] | ||
460 | add %rax,$A[0] | ||
461 | mov -16($np,$j,8),%rax | ||
462 | adc \$0,%rdx | ||
463 | mov %rdx,$A[1] | ||
464 | |||
465 | mulq $m1 # np[j]*m1 | ||
466 | add %rax,$N[0] | ||
467 | mov -8($ap,$j,8),%rax | ||
468 | adc \$0,%rdx | ||
469 | add $A[0],$N[0] # np[j]*m1+ap[j]*bp[0] | ||
470 | adc \$0,%rdx | ||
471 | mov $N[0],-24(%rsp,$j,8) # tp[j-1] | ||
472 | mov %rdx,$N[1] | ||
473 | |||
474 | mulq $m0 # ap[j]*bp[0] | ||
475 | add %rax,$A[1] | ||
476 | mov -8($np,$j,8),%rax | ||
477 | adc \$0,%rdx | ||
478 | mov %rdx,$A[0] | ||
479 | |||
480 | mulq $m1 # np[j]*m1 | ||
481 | add %rax,$N[1] | ||
482 | mov ($ap,$j,8),%rax | ||
483 | adc \$0,%rdx | ||
484 | add $A[1],$N[1] # np[j]*m1+ap[j]*bp[0] | ||
485 | adc \$0,%rdx | ||
486 | mov $N[1],-16(%rsp,$j,8) # tp[j-1] | ||
487 | mov %rdx,$N[0] | ||
488 | |||
489 | mulq $m0 # ap[j]*bp[0] | ||
490 | add %rax,$A[0] | ||
491 | mov ($np,$j,8),%rax | ||
492 | adc \$0,%rdx | ||
493 | mov %rdx,$A[1] | ||
494 | |||
495 | mulq $m1 # np[j]*m1 | ||
496 | add %rax,$N[0] | ||
497 | mov 8($ap,$j,8),%rax | ||
498 | adc \$0,%rdx | ||
499 | add $A[0],$N[0] # np[j]*m1+ap[j]*bp[0] | ||
500 | adc \$0,%rdx | ||
501 | mov $N[0],-8(%rsp,$j,8) # tp[j-1] | ||
502 | mov %rdx,$N[1] | ||
503 | |||
504 | mulq $m0 # ap[j]*bp[0] | ||
505 | add %rax,$A[1] | ||
506 | mov 8($np,$j,8),%rax | ||
507 | adc \$0,%rdx | ||
508 | lea 4($j),$j # j++ | ||
509 | mov %rdx,$A[0] | ||
510 | |||
511 | mulq $m1 # np[j]*m1 | ||
512 | add %rax,$N[1] | ||
513 | mov -16($ap,$j,8),%rax | ||
514 | adc \$0,%rdx | ||
515 | add $A[1],$N[1] # np[j]*m1+ap[j]*bp[0] | ||
516 | adc \$0,%rdx | ||
517 | mov $N[1],-32(%rsp,$j,8) # tp[j-1] | ||
518 | mov %rdx,$N[0] | ||
519 | cmp $num,$j | ||
520 | jl .L1st4x | ||
521 | |||
522 | mulq $m0 # ap[j]*bp[0] | ||
523 | add %rax,$A[0] | ||
524 | mov -16($np,$j,8),%rax | ||
525 | adc \$0,%rdx | ||
526 | mov %rdx,$A[1] | ||
527 | |||
528 | mulq $m1 # np[j]*m1 | ||
529 | add %rax,$N[0] | ||
530 | mov -8($ap,$j,8),%rax | ||
531 | adc \$0,%rdx | ||
532 | add $A[0],$N[0] # np[j]*m1+ap[j]*bp[0] | ||
533 | adc \$0,%rdx | ||
534 | mov $N[0],-24(%rsp,$j,8) # tp[j-1] | ||
535 | mov %rdx,$N[1] | ||
536 | |||
537 | mulq $m0 # ap[j]*bp[0] | ||
538 | add %rax,$A[1] | ||
539 | mov -8($np,$j,8),%rax | ||
540 | adc \$0,%rdx | ||
541 | mov %rdx,$A[0] | ||
542 | |||
543 | mulq $m1 # np[j]*m1 | ||
544 | add %rax,$N[1] | ||
545 | mov ($ap),%rax # ap[0] | ||
546 | adc \$0,%rdx | ||
547 | add $A[1],$N[1] # np[j]*m1+ap[j]*bp[0] | ||
548 | adc \$0,%rdx | ||
549 | mov $N[1],-16(%rsp,$j,8) # tp[j-1] | ||
550 | mov %rdx,$N[0] | ||
551 | |||
552 | movq %xmm0,$m0 # bp[1] | ||
553 | |||
554 | xor $N[1],$N[1] | ||
555 | add $A[0],$N[0] | ||
556 | adc \$0,$N[1] | ||
557 | mov $N[0],-8(%rsp,$j,8) | ||
558 | mov $N[1],(%rsp,$j,8) # store upmost overflow bit | ||
559 | |||
560 | lea 1($i),$i # i++ | ||
561 | .align 4 | ||
562 | .Louter4x: | ||
563 | xor $j,$j # j=0 | ||
564 | movq `0*$STRIDE/4-96`($bp),%xmm0 | ||
565 | movq `1*$STRIDE/4-96`($bp),%xmm1 | ||
566 | pand %xmm4,%xmm0 | ||
567 | movq `2*$STRIDE/4-96`($bp),%xmm2 | ||
568 | pand %xmm5,%xmm1 | ||
569 | |||
570 | mov (%rsp),$A[0] | ||
571 | mov $n0,$m1 | ||
572 | mulq $m0 # ap[0]*bp[i] | ||
573 | add %rax,$A[0] # ap[0]*bp[i]+tp[0] | ||
574 | mov ($np),%rax | ||
575 | adc \$0,%rdx | ||
576 | |||
577 | movq `3*$STRIDE/4-96`($bp),%xmm3 | ||
578 | pand %xmm6,%xmm2 | ||
579 | por %xmm1,%xmm0 | ||
580 | pand %xmm7,%xmm3 | ||
581 | |||
582 | imulq $A[0],$m1 # tp[0]*n0 | ||
583 | mov %rdx,$A[1] | ||
584 | |||
585 | por %xmm2,%xmm0 | ||
586 | lea $STRIDE($bp),$bp | ||
587 | por %xmm3,%xmm0 | ||
588 | |||
589 | mulq $m1 # np[0]*m1 | ||
590 | add %rax,$A[0] # "$N[0]", discarded | ||
591 | mov 8($ap),%rax | ||
592 | adc \$0,%rdx | ||
593 | mov %rdx,$N[1] | ||
594 | |||
595 | mulq $m0 # ap[j]*bp[i] | ||
596 | add %rax,$A[1] | ||
597 | mov 8($np),%rax | ||
598 | adc \$0,%rdx | ||
599 | add 8(%rsp),$A[1] # +tp[1] | ||
600 | adc \$0,%rdx | ||
601 | mov %rdx,$A[0] | ||
602 | |||
603 | mulq $m1 # np[j]*m1 | ||
604 | add %rax,$N[1] | ||
605 | mov 16($ap),%rax | ||
606 | adc \$0,%rdx | ||
607 | add $A[1],$N[1] # np[j]*m1+ap[j]*bp[i]+tp[j] | ||
608 | lea 4($j),$j # j+=2 | ||
609 | adc \$0,%rdx | ||
610 | mov %rdx,$N[0] | ||
611 | jmp .Linner4x | ||
612 | .align 16 | ||
613 | .Linner4x: | ||
614 | mulq $m0 # ap[j]*bp[i] | ||
615 | add %rax,$A[0] | ||
616 | mov -16($np,$j,8),%rax | ||
617 | adc \$0,%rdx | ||
618 | add -16(%rsp,$j,8),$A[0] # ap[j]*bp[i]+tp[j] | ||
619 | adc \$0,%rdx | ||
620 | mov %rdx,$A[1] | ||
621 | |||
622 | mulq $m1 # np[j]*m1 | ||
623 | add %rax,$N[0] | ||
624 | mov -8($ap,$j,8),%rax | ||
625 | adc \$0,%rdx | ||
626 | add $A[0],$N[0] | ||
627 | adc \$0,%rdx | ||
628 | mov $N[1],-32(%rsp,$j,8) # tp[j-1] | ||
629 | mov %rdx,$N[1] | ||
630 | |||
631 | mulq $m0 # ap[j]*bp[i] | ||
632 | add %rax,$A[1] | ||
633 | mov -8($np,$j,8),%rax | ||
634 | adc \$0,%rdx | ||
635 | add -8(%rsp,$j,8),$A[1] | ||
636 | adc \$0,%rdx | ||
637 | mov %rdx,$A[0] | ||
638 | |||
639 | mulq $m1 # np[j]*m1 | ||
640 | add %rax,$N[1] | ||
641 | mov ($ap,$j,8),%rax | ||
642 | adc \$0,%rdx | ||
643 | add $A[1],$N[1] | ||
644 | adc \$0,%rdx | ||
645 | mov $N[0],-24(%rsp,$j,8) # tp[j-1] | ||
646 | mov %rdx,$N[0] | ||
647 | |||
648 | mulq $m0 # ap[j]*bp[i] | ||
649 | add %rax,$A[0] | ||
650 | mov ($np,$j,8),%rax | ||
651 | adc \$0,%rdx | ||
652 | add (%rsp,$j,8),$A[0] # ap[j]*bp[i]+tp[j] | ||
653 | adc \$0,%rdx | ||
654 | mov %rdx,$A[1] | ||
655 | |||
656 | mulq $m1 # np[j]*m1 | ||
657 | add %rax,$N[0] | ||
658 | mov 8($ap,$j,8),%rax | ||
659 | adc \$0,%rdx | ||
660 | add $A[0],$N[0] | ||
661 | adc \$0,%rdx | ||
662 | mov $N[1],-16(%rsp,$j,8) # tp[j-1] | ||
663 | mov %rdx,$N[1] | ||
664 | |||
665 | mulq $m0 # ap[j]*bp[i] | ||
666 | add %rax,$A[1] | ||
667 | mov 8($np,$j,8),%rax | ||
668 | adc \$0,%rdx | ||
669 | add 8(%rsp,$j,8),$A[1] | ||
670 | adc \$0,%rdx | ||
671 | lea 4($j),$j # j++ | ||
672 | mov %rdx,$A[0] | ||
673 | |||
674 | mulq $m1 # np[j]*m1 | ||
675 | add %rax,$N[1] | ||
676 | mov -16($ap,$j,8),%rax | ||
677 | adc \$0,%rdx | ||
678 | add $A[1],$N[1] | ||
679 | adc \$0,%rdx | ||
680 | mov $N[0],-40(%rsp,$j,8) # tp[j-1] | ||
681 | mov %rdx,$N[0] | ||
682 | cmp $num,$j | ||
683 | jl .Linner4x | ||
684 | |||
685 | mulq $m0 # ap[j]*bp[i] | ||
686 | add %rax,$A[0] | ||
687 | mov -16($np,$j,8),%rax | ||
688 | adc \$0,%rdx | ||
689 | add -16(%rsp,$j,8),$A[0] # ap[j]*bp[i]+tp[j] | ||
690 | adc \$0,%rdx | ||
691 | mov %rdx,$A[1] | ||
692 | |||
693 | mulq $m1 # np[j]*m1 | ||
694 | add %rax,$N[0] | ||
695 | mov -8($ap,$j,8),%rax | ||
696 | adc \$0,%rdx | ||
697 | add $A[0],$N[0] | ||
698 | adc \$0,%rdx | ||
699 | mov $N[1],-32(%rsp,$j,8) # tp[j-1] | ||
700 | mov %rdx,$N[1] | ||
701 | |||
702 | mulq $m0 # ap[j]*bp[i] | ||
703 | add %rax,$A[1] | ||
704 | mov -8($np,$j,8),%rax | ||
705 | adc \$0,%rdx | ||
706 | add -8(%rsp,$j,8),$A[1] | ||
707 | adc \$0,%rdx | ||
708 | lea 1($i),$i # i++ | ||
709 | mov %rdx,$A[0] | ||
710 | |||
711 | mulq $m1 # np[j]*m1 | ||
712 | add %rax,$N[1] | ||
713 | mov ($ap),%rax # ap[0] | ||
714 | adc \$0,%rdx | ||
715 | add $A[1],$N[1] | ||
716 | adc \$0,%rdx | ||
717 | mov $N[0],-24(%rsp,$j,8) # tp[j-1] | ||
718 | mov %rdx,$N[0] | ||
719 | |||
720 | movq %xmm0,$m0 # bp[i+1] | ||
721 | mov $N[1],-16(%rsp,$j,8) # tp[j-1] | ||
722 | |||
723 | xor $N[1],$N[1] | ||
724 | add $A[0],$N[0] | ||
725 | adc \$0,$N[1] | ||
726 | add (%rsp,$num,8),$N[0] # pull upmost overflow bit | ||
727 | adc \$0,$N[1] | ||
728 | mov $N[0],-8(%rsp,$j,8) | ||
729 | mov $N[1],(%rsp,$j,8) # store upmost overflow bit | ||
730 | |||
731 | cmp $num,$i | ||
732 | jl .Louter4x | ||
733 | ___ | ||
734 | { | ||
735 | my @ri=("%rax","%rdx",$m0,$m1); | ||
736 | $code.=<<___; | ||
737 | mov 16(%rsp,$num,8),$rp # restore $rp | ||
738 | mov 0(%rsp),@ri[0] # tp[0] | ||
739 | pxor %xmm0,%xmm0 | ||
740 | mov 8(%rsp),@ri[1] # tp[1] | ||
741 | shr \$2,$num # num/=4 | ||
742 | lea (%rsp),$ap # borrow ap for tp | ||
743 | xor $i,$i # i=0 and clear CF! | ||
744 | |||
745 | sub 0($np),@ri[0] | ||
746 | mov 16($ap),@ri[2] # tp[2] | ||
747 | mov 24($ap),@ri[3] # tp[3] | ||
748 | sbb 8($np),@ri[1] | ||
749 | lea -1($num),$j # j=num/4-1 | ||
750 | jmp .Lsub4x | ||
751 | .align 16 | ||
752 | .Lsub4x: | ||
753 | mov @ri[0],0($rp,$i,8) # rp[i]=tp[i]-np[i] | ||
754 | mov @ri[1],8($rp,$i,8) # rp[i]=tp[i]-np[i] | ||
755 | sbb 16($np,$i,8),@ri[2] | ||
756 | mov 32($ap,$i,8),@ri[0] # tp[i+1] | ||
757 | mov 40($ap,$i,8),@ri[1] | ||
758 | sbb 24($np,$i,8),@ri[3] | ||
759 | mov @ri[2],16($rp,$i,8) # rp[i]=tp[i]-np[i] | ||
760 | mov @ri[3],24($rp,$i,8) # rp[i]=tp[i]-np[i] | ||
761 | sbb 32($np,$i,8),@ri[0] | ||
762 | mov 48($ap,$i,8),@ri[2] | ||
763 | mov 56($ap,$i,8),@ri[3] | ||
764 | sbb 40($np,$i,8),@ri[1] | ||
765 | lea 4($i),$i # i++ | ||
766 | dec $j # doesnn't affect CF! | ||
767 | jnz .Lsub4x | ||
768 | |||
769 | mov @ri[0],0($rp,$i,8) # rp[i]=tp[i]-np[i] | ||
770 | mov 32($ap,$i,8),@ri[0] # load overflow bit | ||
771 | sbb 16($np,$i,8),@ri[2] | ||
772 | mov @ri[1],8($rp,$i,8) # rp[i]=tp[i]-np[i] | ||
773 | sbb 24($np,$i,8),@ri[3] | ||
774 | mov @ri[2],16($rp,$i,8) # rp[i]=tp[i]-np[i] | ||
775 | |||
776 | sbb \$0,@ri[0] # handle upmost overflow bit | ||
777 | mov @ri[3],24($rp,$i,8) # rp[i]=tp[i]-np[i] | ||
778 | xor $i,$i # i=0 | ||
779 | and @ri[0],$ap | ||
780 | not @ri[0] | ||
781 | mov $rp,$np | ||
782 | and @ri[0],$np | ||
783 | lea -1($num),$j | ||
784 | or $np,$ap # ap=borrow?tp:rp | ||
785 | |||
786 | movdqu ($ap),%xmm1 | ||
787 | movdqa %xmm0,(%rsp) | ||
788 | movdqu %xmm1,($rp) | ||
789 | jmp .Lcopy4x | ||
790 | .align 16 | ||
791 | .Lcopy4x: # copy or in-place refresh | ||
792 | movdqu 16($ap,$i),%xmm2 | ||
793 | movdqu 32($ap,$i),%xmm1 | ||
794 | movdqa %xmm0,16(%rsp,$i) | ||
795 | movdqu %xmm2,16($rp,$i) | ||
796 | movdqa %xmm0,32(%rsp,$i) | ||
797 | movdqu %xmm1,32($rp,$i) | ||
798 | lea 32($i),$i | ||
799 | dec $j | ||
800 | jnz .Lcopy4x | ||
801 | |||
802 | shl \$2,$num | ||
803 | movdqu 16($ap,$i),%xmm2 | ||
804 | movdqa %xmm0,16(%rsp,$i) | ||
805 | movdqu %xmm2,16($rp,$i) | ||
806 | ___ | ||
807 | } | ||
808 | $code.=<<___; | ||
809 | mov 8(%rsp,$num,8),%rsi # restore %rsp | ||
810 | mov \$1,%rax | ||
811 | ___ | ||
812 | $code.=<<___ if ($win64); | ||
813 | movaps (%rsi),%xmm6 | ||
814 | movaps 0x10(%rsi),%xmm7 | ||
815 | lea 0x28(%rsi),%rsi | ||
816 | ___ | ||
817 | $code.=<<___; | ||
818 | mov (%rsi),%r15 | ||
819 | mov 8(%rsi),%r14 | ||
820 | mov 16(%rsi),%r13 | ||
821 | mov 24(%rsi),%r12 | ||
822 | mov 32(%rsi),%rbp | ||
823 | mov 40(%rsi),%rbx | ||
824 | lea 48(%rsi),%rsp | ||
825 | .Lmul4x_epilogue: | ||
826 | ret | ||
827 | .size bn_mul4x_mont_gather5,.-bn_mul4x_mont_gather5 | ||
828 | ___ | ||
829 | }}} | ||
830 | |||
831 | { | ||
832 | my ($inp,$num,$tbl,$idx)=$win64?("%rcx","%rdx","%r8", "%r9") : # Win64 order | ||
833 | ("%rdi","%rsi","%rdx","%rcx"); # Unix order | ||
834 | my $out=$inp; | ||
835 | my $STRIDE=2**5*8; | ||
836 | my $N=$STRIDE/4; | ||
837 | |||
838 | $code.=<<___; | ||
839 | .globl bn_scatter5 | ||
840 | .type bn_scatter5,\@abi-omnipotent | ||
841 | .align 16 | ||
842 | bn_scatter5: | ||
843 | cmp \$0, $num | ||
844 | jz .Lscatter_epilogue | ||
845 | lea ($tbl,$idx,8),$tbl | ||
846 | .Lscatter: | ||
847 | mov ($inp),%rax | ||
848 | lea 8($inp),$inp | ||
849 | mov %rax,($tbl) | ||
850 | lea 32*8($tbl),$tbl | ||
851 | sub \$1,$num | ||
852 | jnz .Lscatter | ||
853 | .Lscatter_epilogue: | ||
854 | ret | ||
855 | .size bn_scatter5,.-bn_scatter5 | ||
856 | |||
857 | .globl bn_gather5 | ||
858 | .type bn_gather5,\@abi-omnipotent | ||
859 | .align 16 | ||
860 | bn_gather5: | ||
861 | ___ | ||
862 | $code.=<<___ if ($win64); | ||
863 | .LSEH_begin_bn_gather5: | ||
864 | # I can't trust assembler to use specific encoding:-( | ||
865 | .byte 0x48,0x83,0xec,0x28 #sub \$0x28,%rsp | ||
866 | .byte 0x0f,0x29,0x34,0x24 #movaps %xmm6,(%rsp) | ||
867 | .byte 0x0f,0x29,0x7c,0x24,0x10 #movdqa %xmm7,0x10(%rsp) | ||
868 | ___ | ||
869 | $code.=<<___; | ||
870 | mov $idx,%r11 | ||
871 | shr \$`log($N/8)/log(2)`,$idx | ||
872 | and \$`$N/8-1`,%r11 | ||
873 | not $idx | ||
874 | lea .Lmagic_masks(%rip),%rax | ||
875 | and \$`2**5/($N/8)-1`,$idx # 5 is "window size" | ||
876 | lea 96($tbl,%r11,8),$tbl # pointer within 1st cache line | ||
877 | movq 0(%rax,$idx,8),%xmm4 # set of masks denoting which | ||
878 | movq 8(%rax,$idx,8),%xmm5 # cache line contains element | ||
879 | movq 16(%rax,$idx,8),%xmm6 # denoted by 7th argument | ||
880 | movq 24(%rax,$idx,8),%xmm7 | ||
881 | jmp .Lgather | ||
882 | .align 16 | ||
883 | .Lgather: | ||
884 | movq `0*$STRIDE/4-96`($tbl),%xmm0 | ||
885 | movq `1*$STRIDE/4-96`($tbl),%xmm1 | ||
886 | pand %xmm4,%xmm0 | ||
887 | movq `2*$STRIDE/4-96`($tbl),%xmm2 | ||
888 | pand %xmm5,%xmm1 | ||
889 | movq `3*$STRIDE/4-96`($tbl),%xmm3 | ||
890 | pand %xmm6,%xmm2 | ||
891 | por %xmm1,%xmm0 | ||
892 | pand %xmm7,%xmm3 | ||
893 | por %xmm2,%xmm0 | ||
894 | lea $STRIDE($tbl),$tbl | ||
895 | por %xmm3,%xmm0 | ||
896 | |||
897 | movq %xmm0,($out) # m0=bp[0] | ||
898 | lea 8($out),$out | ||
899 | sub \$1,$num | ||
900 | jnz .Lgather | ||
901 | ___ | ||
902 | $code.=<<___ if ($win64); | ||
903 | movaps %xmm6,(%rsp) | ||
904 | movaps %xmm7,0x10(%rsp) | ||
905 | lea 0x28(%rsp),%rsp | ||
906 | ___ | ||
907 | $code.=<<___; | ||
908 | ret | ||
909 | .LSEH_end_bn_gather5: | ||
910 | .size bn_gather5,.-bn_gather5 | ||
911 | ___ | ||
912 | } | ||
913 | $code.=<<___; | ||
914 | .align 64 | ||
915 | .Lmagic_masks: | ||
916 | .long 0,0, 0,0, 0,0, -1,-1 | ||
917 | .long 0,0, 0,0, 0,0, 0,0 | ||
918 | .asciz "Montgomery Multiplication with scatter/gather for x86_64, CRYPTOGAMS by <appro\@openssl.org>" | ||
919 | ___ | ||
920 | |||
921 | # EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame, | ||
922 | # CONTEXT *context,DISPATCHER_CONTEXT *disp) | ||
923 | if ($win64) { | ||
924 | $rec="%rcx"; | ||
925 | $frame="%rdx"; | ||
926 | $context="%r8"; | ||
927 | $disp="%r9"; | ||
928 | |||
929 | $code.=<<___; | ||
930 | .extern __imp_RtlVirtualUnwind | ||
931 | .type mul_handler,\@abi-omnipotent | ||
932 | .align 16 | ||
933 | mul_handler: | ||
934 | push %rsi | ||
935 | push %rdi | ||
936 | push %rbx | ||
937 | push %rbp | ||
938 | push %r12 | ||
939 | push %r13 | ||
940 | push %r14 | ||
941 | push %r15 | ||
942 | pushfq | ||
943 | sub \$64,%rsp | ||
944 | |||
945 | mov 120($context),%rax # pull context->Rax | ||
946 | mov 248($context),%rbx # pull context->Rip | ||
947 | |||
948 | mov 8($disp),%rsi # disp->ImageBase | ||
949 | mov 56($disp),%r11 # disp->HandlerData | ||
950 | |||
951 | mov 0(%r11),%r10d # HandlerData[0] | ||
952 | lea (%rsi,%r10),%r10 # end of prologue label | ||
953 | cmp %r10,%rbx # context->Rip<end of prologue label | ||
954 | jb .Lcommon_seh_tail | ||
955 | |||
956 | lea `40+48`(%rax),%rax | ||
957 | |||
958 | mov 4(%r11),%r10d # HandlerData[1] | ||
959 | lea (%rsi,%r10),%r10 # end of alloca label | ||
960 | cmp %r10,%rbx # context->Rip<end of alloca label | ||
961 | jb .Lcommon_seh_tail | ||
962 | |||
963 | mov 152($context),%rax # pull context->Rsp | ||
964 | |||
965 | mov 8(%r11),%r10d # HandlerData[2] | ||
966 | lea (%rsi,%r10),%r10 # epilogue label | ||
967 | cmp %r10,%rbx # context->Rip>=epilogue label | ||
968 | jae .Lcommon_seh_tail | ||
969 | |||
970 | mov 192($context),%r10 # pull $num | ||
971 | mov 8(%rax,%r10,8),%rax # pull saved stack pointer | ||
972 | |||
973 | movaps (%rax),%xmm0 | ||
974 | movaps 16(%rax),%xmm1 | ||
975 | lea `40+48`(%rax),%rax | ||
976 | |||
977 | mov -8(%rax),%rbx | ||
978 | mov -16(%rax),%rbp | ||
979 | mov -24(%rax),%r12 | ||
980 | mov -32(%rax),%r13 | ||
981 | mov -40(%rax),%r14 | ||
982 | mov -48(%rax),%r15 | ||
983 | mov %rbx,144($context) # restore context->Rbx | ||
984 | mov %rbp,160($context) # restore context->Rbp | ||
985 | mov %r12,216($context) # restore context->R12 | ||
986 | mov %r13,224($context) # restore context->R13 | ||
987 | mov %r14,232($context) # restore context->R14 | ||
988 | mov %r15,240($context) # restore context->R15 | ||
989 | movups %xmm0,512($context) # restore context->Xmm6 | ||
990 | movups %xmm1,528($context) # restore context->Xmm7 | ||
991 | |||
992 | .Lcommon_seh_tail: | ||
993 | mov 8(%rax),%rdi | ||
994 | mov 16(%rax),%rsi | ||
995 | mov %rax,152($context) # restore context->Rsp | ||
996 | mov %rsi,168($context) # restore context->Rsi | ||
997 | mov %rdi,176($context) # restore context->Rdi | ||
998 | |||
999 | mov 40($disp),%rdi # disp->ContextRecord | ||
1000 | mov $context,%rsi # context | ||
1001 | mov \$154,%ecx # sizeof(CONTEXT) | ||
1002 | .long 0xa548f3fc # cld; rep movsq | ||
1003 | |||
1004 | mov $disp,%rsi | ||
1005 | xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER | ||
1006 | mov 8(%rsi),%rdx # arg2, disp->ImageBase | ||
1007 | mov 0(%rsi),%r8 # arg3, disp->ControlPc | ||
1008 | mov 16(%rsi),%r9 # arg4, disp->FunctionEntry | ||
1009 | mov 40(%rsi),%r10 # disp->ContextRecord | ||
1010 | lea 56(%rsi),%r11 # &disp->HandlerData | ||
1011 | lea 24(%rsi),%r12 # &disp->EstablisherFrame | ||
1012 | mov %r10,32(%rsp) # arg5 | ||
1013 | mov %r11,40(%rsp) # arg6 | ||
1014 | mov %r12,48(%rsp) # arg7 | ||
1015 | mov %rcx,56(%rsp) # arg8, (NULL) | ||
1016 | call *__imp_RtlVirtualUnwind(%rip) | ||
1017 | |||
1018 | mov \$1,%eax # ExceptionContinueSearch | ||
1019 | add \$64,%rsp | ||
1020 | popfq | ||
1021 | pop %r15 | ||
1022 | pop %r14 | ||
1023 | pop %r13 | ||
1024 | pop %r12 | ||
1025 | pop %rbp | ||
1026 | pop %rbx | ||
1027 | pop %rdi | ||
1028 | pop %rsi | ||
1029 | ret | ||
1030 | .size mul_handler,.-mul_handler | ||
1031 | |||
1032 | .section .pdata | ||
1033 | .align 4 | ||
1034 | .rva .LSEH_begin_bn_mul_mont_gather5 | ||
1035 | .rva .LSEH_end_bn_mul_mont_gather5 | ||
1036 | .rva .LSEH_info_bn_mul_mont_gather5 | ||
1037 | |||
1038 | .rva .LSEH_begin_bn_mul4x_mont_gather5 | ||
1039 | .rva .LSEH_end_bn_mul4x_mont_gather5 | ||
1040 | .rva .LSEH_info_bn_mul4x_mont_gather5 | ||
1041 | |||
1042 | .rva .LSEH_begin_bn_gather5 | ||
1043 | .rva .LSEH_end_bn_gather5 | ||
1044 | .rva .LSEH_info_bn_gather5 | ||
1045 | |||
1046 | .section .xdata | ||
1047 | .align 8 | ||
1048 | .LSEH_info_bn_mul_mont_gather5: | ||
1049 | .byte 9,0,0,0 | ||
1050 | .rva mul_handler | ||
1051 | .rva .Lmul_alloca,.Lmul_body,.Lmul_epilogue # HandlerData[] | ||
1052 | .align 8 | ||
1053 | .LSEH_info_bn_mul4x_mont_gather5: | ||
1054 | .byte 9,0,0,0 | ||
1055 | .rva mul_handler | ||
1056 | .rva .Lmul4x_alloca,.Lmul4x_body,.Lmul4x_epilogue # HandlerData[] | ||
1057 | .align 8 | ||
1058 | .LSEH_info_bn_gather5: | ||
1059 | .byte 0x01,0x0d,0x05,0x00 | ||
1060 | .byte 0x0d,0x78,0x01,0x00 #movaps 0x10(rsp),xmm7 | ||
1061 | .byte 0x08,0x68,0x00,0x00 #movaps (rsp),xmm6 | ||
1062 | .byte 0x04,0x42,0x00,0x00 #sub rsp,0x28 | ||
1063 | .align 8 | ||
1064 | ___ | ||
1065 | } | ||
1066 | |||
1067 | $code =~ s/\`([^\`]*)\`/eval($1)/gem; | ||
1068 | |||
1069 | print $code; | ||
1070 | close STDOUT; | ||