summaryrefslogtreecommitdiff
path: root/src/lib/libcrypto/bn/asm
diff options
context:
space:
mode:
Diffstat (limited to 'src/lib/libcrypto/bn/asm')
-rw-r--r--src/lib/libcrypto/bn/asm/alpha-mont.pl317
-rw-r--r--src/lib/libcrypto/bn/asm/armv4-mont.pl200
-rw-r--r--src/lib/libcrypto/bn/asm/bn-586.pl675
-rw-r--r--src/lib/libcrypto/bn/asm/co-586.pl286
-rw-r--r--src/lib/libcrypto/bn/asm/ia64.S1555
-rw-r--r--src/lib/libcrypto/bn/asm/pa-risc2.s1618
-rw-r--r--src/lib/libcrypto/bn/asm/pa-risc2W.s1605
-rw-r--r--src/lib/libcrypto/bn/asm/ppc-mont.pl323
-rw-r--r--src/lib/libcrypto/bn/asm/ppc.pl2078
-rw-r--r--src/lib/libcrypto/bn/asm/ppc64-mont.pl918
-rw-r--r--src/lib/libcrypto/bn/asm/s390x-mont.pl225
-rwxr-xr-xsrc/lib/libcrypto/bn/asm/s390x.S678
-rw-r--r--src/lib/libcrypto/bn/asm/sparcv8.S1458
-rw-r--r--src/lib/libcrypto/bn/asm/sparcv8plus.S1547
-rw-r--r--src/lib/libcrypto/bn/asm/sparcv9-mont.pl606
-rwxr-xr-xsrc/lib/libcrypto/bn/asm/sparcv9a-mont.pl882
-rw-r--r--src/lib/libcrypto/bn/asm/via-mont.pl242
-rwxr-xr-xsrc/lib/libcrypto/bn/asm/x86-mont.pl591
-rw-r--r--src/lib/libcrypto/bn/asm/x86.pl28
-rw-r--r--src/lib/libcrypto/bn/asm/x86/add.pl76
-rw-r--r--src/lib/libcrypto/bn/asm/x86/comba.pl277
-rw-r--r--src/lib/libcrypto/bn/asm/x86/div.pl15
-rw-r--r--src/lib/libcrypto/bn/asm/x86/mul.pl77
-rw-r--r--src/lib/libcrypto/bn/asm/x86/mul_add.pl87
-rw-r--r--src/lib/libcrypto/bn/asm/x86/sqr.pl60
-rw-r--r--src/lib/libcrypto/bn/asm/x86/sub.pl76
-rw-r--r--src/lib/libcrypto/bn/asm/x86_64-gcc.c597
-rwxr-xr-xsrc/lib/libcrypto/bn/asm/x86_64-mont.pl214
28 files changed, 0 insertions, 17311 deletions
diff --git a/src/lib/libcrypto/bn/asm/alpha-mont.pl b/src/lib/libcrypto/bn/asm/alpha-mont.pl
deleted file mode 100644
index 7a2cc3173b..0000000000
--- a/src/lib/libcrypto/bn/asm/alpha-mont.pl
+++ /dev/null
@@ -1,317 +0,0 @@
1#!/usr/bin/env perl
2#
3# ====================================================================
4# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
5# project. The module is, however, dual licensed under OpenSSL and
6# CRYPTOGAMS licenses depending on where you obtain it. For further
7# details see http://www.openssl.org/~appro/cryptogams/.
8# ====================================================================
9#
10# On 21264 RSA sign performance improves by 70/35/20/15 percent for
11# 512/1024/2048/4096 bit key lengths. This is against vendor compiler
12# instructed to '-tune host' code with in-line assembler. Other
13# benchmarks improve by 15-20%. To anchor it to something else, the
14# code provides approximately the same performance per GHz as AMD64.
15# I.e. if you compare 1GHz 21264 and 2GHz Opteron, you'll observe ~2x
16# difference.
17
18# int bn_mul_mont(
19$rp="a0"; # BN_ULONG *rp,
20$ap="a1"; # const BN_ULONG *ap,
21$bp="a2"; # const BN_ULONG *bp,
22$np="a3"; # const BN_ULONG *np,
23$n0="a4"; # const BN_ULONG *n0,
24$num="a5"; # int num);
25
26$lo0="t0";
27$hi0="t1";
28$lo1="t2";
29$hi1="t3";
30$aj="t4";
31$bi="t5";
32$nj="t6";
33$tp="t7";
34$alo="t8";
35$ahi="t9";
36$nlo="t10";
37$nhi="t11";
38$tj="t12";
39$i="s3";
40$j="s4";
41$m1="s5";
42
43$code=<<___;
44#include <asm.h>
45#include <regdef.h>
46
47.text
48
49.set noat
50.set noreorder
51
52.globl bn_mul_mont
53.align 5
54.ent bn_mul_mont
55bn_mul_mont:
56 lda sp,-40(sp)
57 stq ra,0(sp)
58 stq s3,8(sp)
59 stq s4,16(sp)
60 stq s5,24(sp)
61 stq fp,32(sp)
62 mov sp,fp
63 .mask 0x0400f000,-40
64 .frame fp,40,ra
65 .prologue 0
66
67 .align 4
68 .set reorder
69 sextl $num,$num
70 mov 0,v0
71 cmplt $num,4,AT
72 bne AT,.Lexit
73
74 ldq $hi0,0($ap) # ap[0]
75 s8addq $num,16,AT
76 ldq $aj,8($ap)
77 subq sp,AT,sp
78 ldq $bi,0($bp) # bp[0]
79 mov -4096,AT
80 ldq $n0,0($n0)
81 and sp,AT,sp
82
83 mulq $hi0,$bi,$lo0
84 ldq $hi1,0($np) # np[0]
85 umulh $hi0,$bi,$hi0
86 ldq $nj,8($np)
87
88 mulq $lo0,$n0,$m1
89
90 mulq $hi1,$m1,$lo1
91 umulh $hi1,$m1,$hi1
92
93 addq $lo1,$lo0,$lo1
94 cmpult $lo1,$lo0,AT
95 addq $hi1,AT,$hi1
96
97 mulq $aj,$bi,$alo
98 mov 2,$j
99 umulh $aj,$bi,$ahi
100 mov sp,$tp
101
102 mulq $nj,$m1,$nlo
103 s8addq $j,$ap,$aj
104 umulh $nj,$m1,$nhi
105 s8addq $j,$np,$nj
106.align 4
107.L1st:
108 .set noreorder
109 ldq $aj,($aj)
110 addl $j,1,$j
111 ldq $nj,($nj)
112 lda $tp,8($tp)
113
114 addq $alo,$hi0,$lo0
115 mulq $aj,$bi,$alo
116 cmpult $lo0,$hi0,AT
117 addq $nlo,$hi1,$lo1
118
119 mulq $nj,$m1,$nlo
120 addq $ahi,AT,$hi0
121 cmpult $lo1,$hi1,v0
122 cmplt $j,$num,$tj
123
124 umulh $aj,$bi,$ahi
125 addq $nhi,v0,$hi1
126 addq $lo1,$lo0,$lo1
127 s8addq $j,$ap,$aj
128
129 umulh $nj,$m1,$nhi
130 cmpult $lo1,$lo0,v0
131 addq $hi1,v0,$hi1
132 s8addq $j,$np,$nj
133
134 stq $lo1,-8($tp)
135 nop
136 unop
137 bne $tj,.L1st
138 .set reorder
139
140 addq $alo,$hi0,$lo0
141 addq $nlo,$hi1,$lo1
142 cmpult $lo0,$hi0,AT
143 cmpult $lo1,$hi1,v0
144 addq $ahi,AT,$hi0
145 addq $nhi,v0,$hi1
146
147 addq $lo1,$lo0,$lo1
148 cmpult $lo1,$lo0,v0
149 addq $hi1,v0,$hi1
150
151 stq $lo1,0($tp)
152
153 addq $hi1,$hi0,$hi1
154 cmpult $hi1,$hi0,AT
155 stq $hi1,8($tp)
156 stq AT,16($tp)
157
158 mov 1,$i
159.align 4
160.Louter:
161 s8addq $i,$bp,$bi
162 ldq $hi0,($ap)
163 ldq $aj,8($ap)
164 ldq $bi,($bi)
165 ldq $hi1,($np)
166 ldq $nj,8($np)
167 ldq $tj,(sp)
168
169 mulq $hi0,$bi,$lo0
170 umulh $hi0,$bi,$hi0
171
172 addq $lo0,$tj,$lo0
173 cmpult $lo0,$tj,AT
174 addq $hi0,AT,$hi0
175
176 mulq $lo0,$n0,$m1
177
178 mulq $hi1,$m1,$lo1
179 umulh $hi1,$m1,$hi1
180
181 addq $lo1,$lo0,$lo1
182 cmpult $lo1,$lo0,AT
183 mov 2,$j
184 addq $hi1,AT,$hi1
185
186 mulq $aj,$bi,$alo
187 mov sp,$tp
188 umulh $aj,$bi,$ahi
189
190 mulq $nj,$m1,$nlo
191 s8addq $j,$ap,$aj
192 umulh $nj,$m1,$nhi
193.align 4
194.Linner:
195 .set noreorder
196 ldq $tj,8($tp) #L0
197 nop #U1
198 ldq $aj,($aj) #L1
199 s8addq $j,$np,$nj #U0
200
201 ldq $nj,($nj) #L0
202 nop #U1
203 addq $alo,$hi0,$lo0 #L1
204 lda $tp,8($tp)
205
206 mulq $aj,$bi,$alo #U1
207 cmpult $lo0,$hi0,AT #L0
208 addq $nlo,$hi1,$lo1 #L1
209 addl $j,1,$j
210
211 mulq $nj,$m1,$nlo #U1
212 addq $ahi,AT,$hi0 #L0
213 addq $lo0,$tj,$lo0 #L1
214 cmpult $lo1,$hi1,v0 #U0
215
216 umulh $aj,$bi,$ahi #U1
217 cmpult $lo0,$tj,AT #L0
218 addq $lo1,$lo0,$lo1 #L1
219 addq $nhi,v0,$hi1 #U0
220
221 umulh $nj,$m1,$nhi #U1
222 s8addq $j,$ap,$aj #L0
223 cmpult $lo1,$lo0,v0 #L1
224 cmplt $j,$num,$tj #U0 # borrow $tj
225
226 addq $hi0,AT,$hi0 #L0
227 addq $hi1,v0,$hi1 #U1
228 stq $lo1,-8($tp) #L1
229 bne $tj,.Linner #U0
230 .set reorder
231
232 ldq $tj,8($tp)
233 addq $alo,$hi0,$lo0
234 addq $nlo,$hi1,$lo1
235 cmpult $lo0,$hi0,AT
236 cmpult $lo1,$hi1,v0
237 addq $ahi,AT,$hi0
238 addq $nhi,v0,$hi1
239
240 addq $lo0,$tj,$lo0
241 cmpult $lo0,$tj,AT
242 addq $hi0,AT,$hi0
243
244 ldq $tj,16($tp)
245 addq $lo1,$lo0,$j
246 cmpult $j,$lo0,v0
247 addq $hi1,v0,$hi1
248
249 addq $hi1,$hi0,$lo1
250 stq $j,($tp)
251 cmpult $lo1,$hi0,$hi1
252 addq $lo1,$tj,$lo1
253 cmpult $lo1,$tj,AT
254 addl $i,1,$i
255 addq $hi1,AT,$hi1
256 stq $lo1,8($tp)
257 cmplt $i,$num,$tj # borrow $tj
258 stq $hi1,16($tp)
259 bne $tj,.Louter
260
261 s8addq $num,sp,$tj # &tp[num]
262 mov $rp,$bp # put rp aside
263 mov sp,$tp
264 mov sp,$ap
265 mov 0,$hi0 # clear borrow bit
266
267.align 4
268.Lsub: ldq $lo0,($tp)
269 ldq $lo1,($np)
270 lda $tp,8($tp)
271 lda $np,8($np)
272 subq $lo0,$lo1,$lo1 # tp[i]-np[i]
273 cmpult $lo0,$lo1,AT
274 subq $lo1,$hi0,$lo0
275 cmpult $lo1,$lo0,$hi0
276 or $hi0,AT,$hi0
277 stq $lo0,($rp)
278 cmpult $tp,$tj,v0
279 lda $rp,8($rp)
280 bne v0,.Lsub
281
282 subq $hi1,$hi0,$hi0 # handle upmost overflow bit
283 mov sp,$tp
284 mov $bp,$rp # restore rp
285
286 and sp,$hi0,$ap
287 bic $bp,$hi0,$bp
288 bis $bp,$ap,$ap # ap=borrow?tp:rp
289
290.align 4
291.Lcopy: ldq $aj,($ap) # copy or in-place refresh
292 lda $tp,8($tp)
293 lda $rp,8($rp)
294 lda $ap,8($ap)
295 stq zero,-8($tp) # zap tp
296 cmpult $tp,$tj,AT
297 stq $aj,-8($rp)
298 bne AT,.Lcopy
299 mov 1,v0
300
301.Lexit:
302 .set noreorder
303 mov fp,sp
304 /*ldq ra,0(sp)*/
305 ldq s3,8(sp)
306 ldq s4,16(sp)
307 ldq s5,24(sp)
308 ldq fp,32(sp)
309 lda sp,40(sp)
310 ret (ra)
311.end bn_mul_mont
312.rdata
313.asciiz "Montgomery Multiplication for Alpha, CRYPTOGAMS by <appro\@openssl.org>"
314___
315
316print $code;
317close STDOUT;
diff --git a/src/lib/libcrypto/bn/asm/armv4-mont.pl b/src/lib/libcrypto/bn/asm/armv4-mont.pl
deleted file mode 100644
index 05d5dc1a48..0000000000
--- a/src/lib/libcrypto/bn/asm/armv4-mont.pl
+++ /dev/null
@@ -1,200 +0,0 @@
1#!/usr/bin/env perl
2
3# ====================================================================
4# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
5# project. The module is, however, dual licensed under OpenSSL and
6# CRYPTOGAMS licenses depending on where you obtain it. For further
7# details see http://www.openssl.org/~appro/cryptogams/.
8# ====================================================================
9
10# January 2007.
11
12# Montgomery multiplication for ARMv4.
13#
14# Performance improvement naturally varies among CPU implementations
15# and compilers. The code was observed to provide +65-35% improvement
16# [depending on key length, less for longer keys] on ARM920T, and
17# +115-80% on Intel IXP425. This is compared to pre-bn_mul_mont code
18# base and compiler generated code with in-lined umull and even umlal
19# instructions. The latter means that this code didn't really have an
20# "advantage" of utilizing some "secret" instruction.
21#
22# The code is interoperable with Thumb ISA and is rather compact, less
23# than 1/2KB. Windows CE port would be trivial, as it's exclusively
24# about decorations, ABI and instruction syntax are identical.
25
26$num="r0"; # starts as num argument, but holds &tp[num-1]
27$ap="r1";
28$bp="r2"; $bi="r2"; $rp="r2";
29$np="r3";
30$tp="r4";
31$aj="r5";
32$nj="r6";
33$tj="r7";
34$n0="r8";
35########### # r9 is reserved by ELF as platform specific, e.g. TLS pointer
36$alo="r10"; # sl, gcc uses it to keep @GOT
37$ahi="r11"; # fp
38$nlo="r12"; # ip
39########### # r13 is stack pointer
40$nhi="r14"; # lr
41########### # r15 is program counter
42
43#### argument block layout relative to &tp[num-1], a.k.a. $num
44$_rp="$num,#12*4";
45# ap permanently resides in r1
46$_bp="$num,#13*4";
47# np permanently resides in r3
48$_n0="$num,#14*4";
49$_num="$num,#15*4"; $_bpend=$_num;
50
51$code=<<___;
52.text
53
54.global bn_mul_mont
55.type bn_mul_mont,%function
56
57.align 2
58bn_mul_mont:
59 stmdb sp!,{r0,r2} @ sp points at argument block
60 ldr $num,[sp,#3*4] @ load num
61 cmp $num,#2
62 movlt r0,#0
63 addlt sp,sp,#2*4
64 blt .Labrt
65
66 stmdb sp!,{r4-r12,lr} @ save 10 registers
67
68 mov $num,$num,lsl#2 @ rescale $num for byte count
69 sub sp,sp,$num @ alloca(4*num)
70 sub sp,sp,#4 @ +extra dword
71 sub $num,$num,#4 @ "num=num-1"
72 add $tp,$bp,$num @ &bp[num-1]
73
74 add $num,sp,$num @ $num to point at &tp[num-1]
75 ldr $n0,[$_n0] @ &n0
76 ldr $bi,[$bp] @ bp[0]
77 ldr $aj,[$ap],#4 @ ap[0],ap++
78 ldr $nj,[$np],#4 @ np[0],np++
79 ldr $n0,[$n0] @ *n0
80 str $tp,[$_bpend] @ save &bp[num]
81
82 umull $alo,$ahi,$aj,$bi @ ap[0]*bp[0]
83 str $n0,[$_n0] @ save n0 value
84 mul $n0,$alo,$n0 @ "tp[0]"*n0
85 mov $nlo,#0
86 umlal $alo,$nlo,$nj,$n0 @ np[0]*n0+"t[0]"
87 mov $tp,sp
88
89.L1st:
90 ldr $aj,[$ap],#4 @ ap[j],ap++
91 mov $alo,$ahi
92 mov $ahi,#0
93 umlal $alo,$ahi,$aj,$bi @ ap[j]*bp[0]
94 ldr $nj,[$np],#4 @ np[j],np++
95 mov $nhi,#0
96 umlal $nlo,$nhi,$nj,$n0 @ np[j]*n0
97 adds $nlo,$nlo,$alo
98 str $nlo,[$tp],#4 @ tp[j-1]=,tp++
99 adc $nlo,$nhi,#0
100 cmp $tp,$num
101 bne .L1st
102
103 adds $nlo,$nlo,$ahi
104 mov $nhi,#0
105 adc $nhi,$nhi,#0
106 ldr $tp,[$_bp] @ restore bp
107 str $nlo,[$num] @ tp[num-1]=
108 ldr $n0,[$_n0] @ restore n0
109 str $nhi,[$num,#4] @ tp[num]=
110
111.Louter:
112 sub $tj,$num,sp @ "original" $num-1 value
113 sub $ap,$ap,$tj @ "rewind" ap to &ap[1]
114 sub $np,$np,$tj @ "rewind" np to &np[1]
115 ldr $bi,[$tp,#4]! @ *(++bp)
116 ldr $aj,[$ap,#-4] @ ap[0]
117 ldr $nj,[$np,#-4] @ np[0]
118 ldr $alo,[sp] @ tp[0]
119 ldr $tj,[sp,#4] @ tp[1]
120
121 mov $ahi,#0
122 umlal $alo,$ahi,$aj,$bi @ ap[0]*bp[i]+tp[0]
123 str $tp,[$_bp] @ save bp
124 mul $n0,$alo,$n0
125 mov $nlo,#0
126 umlal $alo,$nlo,$nj,$n0 @ np[0]*n0+"tp[0]"
127 mov $tp,sp
128
129.Linner:
130 ldr $aj,[$ap],#4 @ ap[j],ap++
131 adds $alo,$ahi,$tj @ +=tp[j]
132 mov $ahi,#0
133 umlal $alo,$ahi,$aj,$bi @ ap[j]*bp[i]
134 ldr $nj,[$np],#4 @ np[j],np++
135 mov $nhi,#0
136 umlal $nlo,$nhi,$nj,$n0 @ np[j]*n0
137 ldr $tj,[$tp,#8] @ tp[j+1]
138 adc $ahi,$ahi,#0
139 adds $nlo,$nlo,$alo
140 str $nlo,[$tp],#4 @ tp[j-1]=,tp++
141 adc $nlo,$nhi,#0
142 cmp $tp,$num
143 bne .Linner
144
145 adds $nlo,$nlo,$ahi
146 mov $nhi,#0
147 adc $nhi,$nhi,#0
148 adds $nlo,$nlo,$tj
149 adc $nhi,$nhi,#0
150 ldr $tp,[$_bp] @ restore bp
151 ldr $tj,[$_bpend] @ restore &bp[num]
152 str $nlo,[$num] @ tp[num-1]=
153 ldr $n0,[$_n0] @ restore n0
154 str $nhi,[$num,#4] @ tp[num]=
155
156 cmp $tp,$tj
157 bne .Louter
158
159 ldr $rp,[$_rp] @ pull rp
160 add $num,$num,#4 @ $num to point at &tp[num]
161 sub $aj,$num,sp @ "original" num value
162 mov $tp,sp @ "rewind" $tp
163 mov $ap,$tp @ "borrow" $ap
164 sub $np,$np,$aj @ "rewind" $np to &np[0]
165
166 subs $tj,$tj,$tj @ "clear" carry flag
167.Lsub: ldr $tj,[$tp],#4
168 ldr $nj,[$np],#4
169 sbcs $tj,$tj,$nj @ tp[j]-np[j]
170 str $tj,[$rp],#4 @ rp[j]=
171 teq $tp,$num @ preserve carry
172 bne .Lsub
173 sbcs $nhi,$nhi,#0 @ upmost carry
174 mov $tp,sp @ "rewind" $tp
175 sub $rp,$rp,$aj @ "rewind" $rp
176
177 and $ap,$tp,$nhi
178 bic $np,$rp,$nhi
179 orr $ap,$ap,$np @ ap=borrow?tp:rp
180
181.Lcopy: ldr $tj,[$ap],#4 @ copy or in-place refresh
182 str sp,[$tp],#4 @ zap tp
183 str $tj,[$rp],#4
184 cmp $tp,$num
185 bne .Lcopy
186
187 add sp,$num,#4 @ skip over tp[num+1]
188 ldmia sp!,{r4-r12,lr} @ restore registers
189 add sp,sp,#2*4 @ skip over {r0,r2}
190 mov r0,#1
191.Labrt: tst lr,#1
192 moveq pc,lr @ be binary compatible with V4, yet
193 bx lr @ interoperable with Thumb ISA:-)
194.size bn_mul_mont,.-bn_mul_mont
195.asciz "Montgomery multiplication for ARMv4, CRYPTOGAMS by <appro\@openssl.org>"
196___
197
198$code =~ s/\bbx\s+lr\b/.word\t0xe12fff1e/gm; # make it possible to compile with -march=armv4
199print $code;
200close STDOUT;
diff --git a/src/lib/libcrypto/bn/asm/bn-586.pl b/src/lib/libcrypto/bn/asm/bn-586.pl
deleted file mode 100644
index 26c2685a72..0000000000
--- a/src/lib/libcrypto/bn/asm/bn-586.pl
+++ /dev/null
@@ -1,675 +0,0 @@
1#!/usr/local/bin/perl
2
3push(@INC,"perlasm","../../perlasm");
4require "x86asm.pl";
5
6&asm_init($ARGV[0],$0);
7
8$sse2=0;
9for (@ARGV) { $sse2=1 if (/-DOPENSSL_IA32_SSE2/); }
10
11&external_label("OPENSSL_ia32cap_P") if ($sse2);
12
13&bn_mul_add_words("bn_mul_add_words");
14&bn_mul_words("bn_mul_words");
15&bn_sqr_words("bn_sqr_words");
16&bn_div_words("bn_div_words");
17&bn_add_words("bn_add_words");
18&bn_sub_words("bn_sub_words");
19&bn_sub_part_words("bn_sub_part_words");
20
21&asm_finish();
22
23sub bn_mul_add_words
24 {
25 local($name)=@_;
26
27 &function_begin($name,$sse2?"EXTRN\t_OPENSSL_ia32cap_P:DWORD":"");
28
29 &comment("");
30 $Low="eax";
31 $High="edx";
32 $a="ebx";
33 $w="ebp";
34 $r="edi";
35 $c="esi";
36
37 &xor($c,$c); # clear carry
38 &mov($r,&wparam(0)); #
39
40 &mov("ecx",&wparam(2)); #
41 &mov($a,&wparam(1)); #
42
43 &and("ecx",0xfffffff8); # num / 8
44 &mov($w,&wparam(3)); #
45
46 &push("ecx"); # Up the stack for a tmp variable
47
48 &jz(&label("maw_finish"));
49
50 if ($sse2) {
51 &picmeup("eax","OPENSSL_ia32cap_P");
52 &bt(&DWP(0,"eax"),26);
53 &jnc(&label("maw_loop"));
54
55 &movd("mm0",$w); # mm0 = w
56 &pxor("mm1","mm1"); # mm1 = carry_in
57
58 &set_label("maw_sse2_loop",0);
59 &movd("mm3",&DWP(0,$r,"",0)); # mm3 = r[0]
60 &paddq("mm1","mm3"); # mm1 = carry_in + r[0]
61 &movd("mm2",&DWP(0,$a,"",0)); # mm2 = a[0]
62 &pmuludq("mm2","mm0"); # mm2 = w*a[0]
63 &movd("mm4",&DWP(4,$a,"",0)); # mm4 = a[1]
64 &pmuludq("mm4","mm0"); # mm4 = w*a[1]
65 &movd("mm6",&DWP(8,$a,"",0)); # mm6 = a[2]
66 &pmuludq("mm6","mm0"); # mm6 = w*a[2]
67 &movd("mm7",&DWP(12,$a,"",0)); # mm7 = a[3]
68 &pmuludq("mm7","mm0"); # mm7 = w*a[3]
69 &paddq("mm1","mm2"); # mm1 = carry_in + r[0] + w*a[0]
70 &movd("mm3",&DWP(4,$r,"",0)); # mm3 = r[1]
71 &paddq("mm3","mm4"); # mm3 = r[1] + w*a[1]
72 &movd("mm5",&DWP(8,$r,"",0)); # mm5 = r[2]
73 &paddq("mm5","mm6"); # mm5 = r[2] + w*a[2]
74 &movd("mm4",&DWP(12,$r,"",0)); # mm4 = r[3]
75 &paddq("mm7","mm4"); # mm7 = r[3] + w*a[3]
76 &movd(&DWP(0,$r,"",0),"mm1");
77 &movd("mm2",&DWP(16,$a,"",0)); # mm2 = a[4]
78 &pmuludq("mm2","mm0"); # mm2 = w*a[4]
79 &psrlq("mm1",32); # mm1 = carry0
80 &movd("mm4",&DWP(20,$a,"",0)); # mm4 = a[5]
81 &pmuludq("mm4","mm0"); # mm4 = w*a[5]
82 &paddq("mm1","mm3"); # mm1 = carry0 + r[1] + w*a[1]
83 &movd("mm6",&DWP(24,$a,"",0)); # mm6 = a[6]
84 &pmuludq("mm6","mm0"); # mm6 = w*a[6]
85 &movd(&DWP(4,$r,"",0),"mm1");
86 &psrlq("mm1",32); # mm1 = carry1
87 &movd("mm3",&DWP(28,$a,"",0)); # mm3 = a[7]
88 &add($a,32);
89 &pmuludq("mm3","mm0"); # mm3 = w*a[7]
90 &paddq("mm1","mm5"); # mm1 = carry1 + r[2] + w*a[2]
91 &movd("mm5",&DWP(16,$r,"",0)); # mm5 = r[4]
92 &paddq("mm2","mm5"); # mm2 = r[4] + w*a[4]
93 &movd(&DWP(8,$r,"",0),"mm1");
94 &psrlq("mm1",32); # mm1 = carry2
95 &paddq("mm1","mm7"); # mm1 = carry2 + r[3] + w*a[3]
96 &movd("mm5",&DWP(20,$r,"",0)); # mm5 = r[5]
97 &paddq("mm4","mm5"); # mm4 = r[5] + w*a[5]
98 &movd(&DWP(12,$r,"",0),"mm1");
99 &psrlq("mm1",32); # mm1 = carry3
100 &paddq("mm1","mm2"); # mm1 = carry3 + r[4] + w*a[4]
101 &movd("mm5",&DWP(24,$r,"",0)); # mm5 = r[6]
102 &paddq("mm6","mm5"); # mm6 = r[6] + w*a[6]
103 &movd(&DWP(16,$r,"",0),"mm1");
104 &psrlq("mm1",32); # mm1 = carry4
105 &paddq("mm1","mm4"); # mm1 = carry4 + r[5] + w*a[5]
106 &movd("mm5",&DWP(28,$r,"",0)); # mm5 = r[7]
107 &paddq("mm3","mm5"); # mm3 = r[7] + w*a[7]
108 &movd(&DWP(20,$r,"",0),"mm1");
109 &psrlq("mm1",32); # mm1 = carry5
110 &paddq("mm1","mm6"); # mm1 = carry5 + r[6] + w*a[6]
111 &movd(&DWP(24,$r,"",0),"mm1");
112 &psrlq("mm1",32); # mm1 = carry6
113 &paddq("mm1","mm3"); # mm1 = carry6 + r[7] + w*a[7]
114 &movd(&DWP(28,$r,"",0),"mm1");
115 &add($r,32);
116 &psrlq("mm1",32); # mm1 = carry_out
117
118 &sub("ecx",8);
119 &jnz(&label("maw_sse2_loop"));
120
121 &movd($c,"mm1"); # c = carry_out
122 &emms();
123
124 &jmp(&label("maw_finish"));
125 }
126
127 &set_label("maw_loop",0);
128
129 &mov(&swtmp(0),"ecx"); #
130
131 for ($i=0; $i<32; $i+=4)
132 {
133 &comment("Round $i");
134
135 &mov("eax",&DWP($i,$a,"",0)); # *a
136 &mul($w); # *a * w
137 &add("eax",$c); # L(t)+= *r
138 &mov($c,&DWP($i,$r,"",0)); # L(t)+= *r
139 &adc("edx",0); # H(t)+=carry
140 &add("eax",$c); # L(t)+=c
141 &adc("edx",0); # H(t)+=carry
142 &mov(&DWP($i,$r,"",0),"eax"); # *r= L(t);
143 &mov($c,"edx"); # c= H(t);
144 }
145
146 &comment("");
147 &mov("ecx",&swtmp(0)); #
148 &add($a,32);
149 &add($r,32);
150 &sub("ecx",8);
151 &jnz(&label("maw_loop"));
152
153 &set_label("maw_finish",0);
154 &mov("ecx",&wparam(2)); # get num
155 &and("ecx",7);
156 &jnz(&label("maw_finish2")); # helps branch prediction
157 &jmp(&label("maw_end"));
158
159 &set_label("maw_finish2",1);
160 for ($i=0; $i<7; $i++)
161 {
162 &comment("Tail Round $i");
163 &mov("eax",&DWP($i*4,$a,"",0));# *a
164 &mul($w); # *a * w
165 &add("eax",$c); # L(t)+=c
166 &mov($c,&DWP($i*4,$r,"",0)); # L(t)+= *r
167 &adc("edx",0); # H(t)+=carry
168 &add("eax",$c);
169 &adc("edx",0); # H(t)+=carry
170 &dec("ecx") if ($i != 7-1);
171 &mov(&DWP($i*4,$r,"",0),"eax"); # *r= L(t);
172 &mov($c,"edx"); # c= H(t);
173 &jz(&label("maw_end")) if ($i != 7-1);
174 }
175 &set_label("maw_end",0);
176 &mov("eax",$c);
177
178 &pop("ecx"); # clear variable from
179
180 &function_end($name);
181 }
182
183sub bn_mul_words
184 {
185 local($name)=@_;
186
187 &function_begin($name,"");
188
189 &comment("");
190 $Low="eax";
191 $High="edx";
192 $a="ebx";
193 $w="ecx";
194 $r="edi";
195 $c="esi";
196 $num="ebp";
197
198 &xor($c,$c); # clear carry
199 &mov($r,&wparam(0)); #
200 &mov($a,&wparam(1)); #
201 &mov($num,&wparam(2)); #
202 &mov($w,&wparam(3)); #
203
204 &and($num,0xfffffff8); # num / 8
205 &jz(&label("mw_finish"));
206
207 &set_label("mw_loop",0);
208 for ($i=0; $i<32; $i+=4)
209 {
210 &comment("Round $i");
211
212 &mov("eax",&DWP($i,$a,"",0)); # *a
213 &mul($w); # *a * w
214 &add("eax",$c); # L(t)+=c
215 # XXX
216
217 &adc("edx",0); # H(t)+=carry
218 &mov(&DWP($i,$r,"",0),"eax"); # *r= L(t);
219
220 &mov($c,"edx"); # c= H(t);
221 }
222
223 &comment("");
224 &add($a,32);
225 &add($r,32);
226 &sub($num,8);
227 &jz(&label("mw_finish"));
228 &jmp(&label("mw_loop"));
229
230 &set_label("mw_finish",0);
231 &mov($num,&wparam(2)); # get num
232 &and($num,7);
233 &jnz(&label("mw_finish2"));
234 &jmp(&label("mw_end"));
235
236 &set_label("mw_finish2",1);
237 for ($i=0; $i<7; $i++)
238 {
239 &comment("Tail Round $i");
240 &mov("eax",&DWP($i*4,$a,"",0));# *a
241 &mul($w); # *a * w
242 &add("eax",$c); # L(t)+=c
243 # XXX
244 &adc("edx",0); # H(t)+=carry
245 &mov(&DWP($i*4,$r,"",0),"eax");# *r= L(t);
246 &mov($c,"edx"); # c= H(t);
247 &dec($num) if ($i != 7-1);
248 &jz(&label("mw_end")) if ($i != 7-1);
249 }
250 &set_label("mw_end",0);
251 &mov("eax",$c);
252
253 &function_end($name);
254 }
255
256sub bn_sqr_words
257 {
258 local($name)=@_;
259
260 &function_begin($name,"");
261
262 &comment("");
263 $r="esi";
264 $a="edi";
265 $num="ebx";
266
267 &mov($r,&wparam(0)); #
268 &mov($a,&wparam(1)); #
269 &mov($num,&wparam(2)); #
270
271 &and($num,0xfffffff8); # num / 8
272 &jz(&label("sw_finish"));
273
274 &set_label("sw_loop",0);
275 for ($i=0; $i<32; $i+=4)
276 {
277 &comment("Round $i");
278 &mov("eax",&DWP($i,$a,"",0)); # *a
279 # XXX
280 &mul("eax"); # *a * *a
281 &mov(&DWP($i*2,$r,"",0),"eax"); #
282 &mov(&DWP($i*2+4,$r,"",0),"edx");#
283 }
284
285 &comment("");
286 &add($a,32);
287 &add($r,64);
288 &sub($num,8);
289 &jnz(&label("sw_loop"));
290
291 &set_label("sw_finish",0);
292 &mov($num,&wparam(2)); # get num
293 &and($num,7);
294 &jz(&label("sw_end"));
295
296 for ($i=0; $i<7; $i++)
297 {
298 &comment("Tail Round $i");
299 &mov("eax",&DWP($i*4,$a,"",0)); # *a
300 # XXX
301 &mul("eax"); # *a * *a
302 &mov(&DWP($i*8,$r,"",0),"eax"); #
303 &dec($num) if ($i != 7-1);
304 &mov(&DWP($i*8+4,$r,"",0),"edx");
305 &jz(&label("sw_end")) if ($i != 7-1);
306 }
307 &set_label("sw_end",0);
308
309 &function_end($name);
310 }
311
312sub bn_div_words
313 {
314 local($name)=@_;
315
316 &function_begin($name,"");
317 &mov("edx",&wparam(0)); #
318 &mov("eax",&wparam(1)); #
319 &mov("ebx",&wparam(2)); #
320 &div("ebx");
321 &function_end($name);
322 }
323
324sub bn_add_words
325 {
326 local($name)=@_;
327
328 &function_begin($name,"");
329
330 &comment("");
331 $a="esi";
332 $b="edi";
333 $c="eax";
334 $r="ebx";
335 $tmp1="ecx";
336 $tmp2="edx";
337 $num="ebp";
338
339 &mov($r,&wparam(0)); # get r
340 &mov($a,&wparam(1)); # get a
341 &mov($b,&wparam(2)); # get b
342 &mov($num,&wparam(3)); # get num
343 &xor($c,$c); # clear carry
344 &and($num,0xfffffff8); # num / 8
345
346 &jz(&label("aw_finish"));
347
348 &set_label("aw_loop",0);
349 for ($i=0; $i<8; $i++)
350 {
351 &comment("Round $i");
352
353 &mov($tmp1,&DWP($i*4,$a,"",0)); # *a
354 &mov($tmp2,&DWP($i*4,$b,"",0)); # *b
355 &add($tmp1,$c);
356 &mov($c,0);
357 &adc($c,$c);
358 &add($tmp1,$tmp2);
359 &adc($c,0);
360 &mov(&DWP($i*4,$r,"",0),$tmp1); # *r
361 }
362
363 &comment("");
364 &add($a,32);
365 &add($b,32);
366 &add($r,32);
367 &sub($num,8);
368 &jnz(&label("aw_loop"));
369
370 &set_label("aw_finish",0);
371 &mov($num,&wparam(3)); # get num
372 &and($num,7);
373 &jz(&label("aw_end"));
374
375 for ($i=0; $i<7; $i++)
376 {
377 &comment("Tail Round $i");
378 &mov($tmp1,&DWP($i*4,$a,"",0)); # *a
379 &mov($tmp2,&DWP($i*4,$b,"",0));# *b
380 &add($tmp1,$c);
381 &mov($c,0);
382 &adc($c,$c);
383 &add($tmp1,$tmp2);
384 &adc($c,0);
385 &dec($num) if ($i != 6);
386 &mov(&DWP($i*4,$r,"",0),$tmp1); # *r
387 &jz(&label("aw_end")) if ($i != 6);
388 }
389 &set_label("aw_end",0);
390
391# &mov("eax",$c); # $c is "eax"
392
393 &function_end($name);
394 }
395
396sub bn_sub_words
397 {
398 local($name)=@_;
399
400 &function_begin($name,"");
401
402 &comment("");
403 $a="esi";
404 $b="edi";
405 $c="eax";
406 $r="ebx";
407 $tmp1="ecx";
408 $tmp2="edx";
409 $num="ebp";
410
411 &mov($r,&wparam(0)); # get r
412 &mov($a,&wparam(1)); # get a
413 &mov($b,&wparam(2)); # get b
414 &mov($num,&wparam(3)); # get num
415 &xor($c,$c); # clear carry
416 &and($num,0xfffffff8); # num / 8
417
418 &jz(&label("aw_finish"));
419
420 &set_label("aw_loop",0);
421 for ($i=0; $i<8; $i++)
422 {
423 &comment("Round $i");
424
425 &mov($tmp1,&DWP($i*4,$a,"",0)); # *a
426 &mov($tmp2,&DWP($i*4,$b,"",0)); # *b
427 &sub($tmp1,$c);
428 &mov($c,0);
429 &adc($c,$c);
430 &sub($tmp1,$tmp2);
431 &adc($c,0);
432 &mov(&DWP($i*4,$r,"",0),$tmp1); # *r
433 }
434
435 &comment("");
436 &add($a,32);
437 &add($b,32);
438 &add($r,32);
439 &sub($num,8);
440 &jnz(&label("aw_loop"));
441
442 &set_label("aw_finish",0);
443 &mov($num,&wparam(3)); # get num
444 &and($num,7);
445 &jz(&label("aw_end"));
446
447 for ($i=0; $i<7; $i++)
448 {
449 &comment("Tail Round $i");
450 &mov($tmp1,&DWP($i*4,$a,"",0)); # *a
451 &mov($tmp2,&DWP($i*4,$b,"",0));# *b
452 &sub($tmp1,$c);
453 &mov($c,0);
454 &adc($c,$c);
455 &sub($tmp1,$tmp2);
456 &adc($c,0);
457 &dec($num) if ($i != 6);
458 &mov(&DWP($i*4,$r,"",0),$tmp1); # *r
459 &jz(&label("aw_end")) if ($i != 6);
460 }
461 &set_label("aw_end",0);
462
463# &mov("eax",$c); # $c is "eax"
464
465 &function_end($name);
466 }
467
468sub bn_sub_part_words
469 {
470 local($name)=@_;
471
472 &function_begin($name,"");
473
474 &comment("");
475 $a="esi";
476 $b="edi";
477 $c="eax";
478 $r="ebx";
479 $tmp1="ecx";
480 $tmp2="edx";
481 $num="ebp";
482
483 &mov($r,&wparam(0)); # get r
484 &mov($a,&wparam(1)); # get a
485 &mov($b,&wparam(2)); # get b
486 &mov($num,&wparam(3)); # get num
487 &xor($c,$c); # clear carry
488 &and($num,0xfffffff8); # num / 8
489
490 &jz(&label("aw_finish"));
491
492 &set_label("aw_loop",0);
493 for ($i=0; $i<8; $i++)
494 {
495 &comment("Round $i");
496
497 &mov($tmp1,&DWP($i*4,$a,"",0)); # *a
498 &mov($tmp2,&DWP($i*4,$b,"",0)); # *b
499 &sub($tmp1,$c);
500 &mov($c,0);
501 &adc($c,$c);
502 &sub($tmp1,$tmp2);
503 &adc($c,0);
504 &mov(&DWP($i*4,$r,"",0),$tmp1); # *r
505 }
506
507 &comment("");
508 &add($a,32);
509 &add($b,32);
510 &add($r,32);
511 &sub($num,8);
512 &jnz(&label("aw_loop"));
513
514 &set_label("aw_finish",0);
515 &mov($num,&wparam(3)); # get num
516 &and($num,7);
517 &jz(&label("aw_end"));
518
519 for ($i=0; $i<7; $i++)
520 {
521 &comment("Tail Round $i");
522 &mov($tmp1,&DWP(0,$a,"",0)); # *a
523 &mov($tmp2,&DWP(0,$b,"",0));# *b
524 &sub($tmp1,$c);
525 &mov($c,0);
526 &adc($c,$c);
527 &sub($tmp1,$tmp2);
528 &adc($c,0);
529 &mov(&DWP(0,$r,"",0),$tmp1); # *r
530 &add($a, 4);
531 &add($b, 4);
532 &add($r, 4);
533 &dec($num) if ($i != 6);
534 &jz(&label("aw_end")) if ($i != 6);
535 }
536 &set_label("aw_end",0);
537
538 &cmp(&wparam(4),0);
539 &je(&label("pw_end"));
540
541 &mov($num,&wparam(4)); # get dl
542 &cmp($num,0);
543 &je(&label("pw_end"));
544 &jge(&label("pw_pos"));
545
546 &comment("pw_neg");
547 &mov($tmp2,0);
548 &sub($tmp2,$num);
549 &mov($num,$tmp2);
550 &and($num,0xfffffff8); # num / 8
551 &jz(&label("pw_neg_finish"));
552
553 &set_label("pw_neg_loop",0);
554 for ($i=0; $i<8; $i++)
555 {
556 &comment("dl<0 Round $i");
557
558 &mov($tmp1,0);
559 &mov($tmp2,&DWP($i*4,$b,"",0)); # *b
560 &sub($tmp1,$c);
561 &mov($c,0);
562 &adc($c,$c);
563 &sub($tmp1,$tmp2);
564 &adc($c,0);
565 &mov(&DWP($i*4,$r,"",0),$tmp1); # *r
566 }
567
568 &comment("");
569 &add($b,32);
570 &add($r,32);
571 &sub($num,8);
572 &jnz(&label("pw_neg_loop"));
573
574 &set_label("pw_neg_finish",0);
575 &mov($tmp2,&wparam(4)); # get dl
576 &mov($num,0);
577 &sub($num,$tmp2);
578 &and($num,7);
579 &jz(&label("pw_end"));
580
581 for ($i=0; $i<7; $i++)
582 {
583 &comment("dl<0 Tail Round $i");
584 &mov($tmp1,0);
585 &mov($tmp2,&DWP($i*4,$b,"",0));# *b
586 &sub($tmp1,$c);
587 &mov($c,0);
588 &adc($c,$c);
589 &sub($tmp1,$tmp2);
590 &adc($c,0);
591 &dec($num) if ($i != 6);
592 &mov(&DWP($i*4,$r,"",0),$tmp1); # *r
593 &jz(&label("pw_end")) if ($i != 6);
594 }
595
596 &jmp(&label("pw_end"));
597
598 &set_label("pw_pos",0);
599
600 &and($num,0xfffffff8); # num / 8
601 &jz(&label("pw_pos_finish"));
602
603 &set_label("pw_pos_loop",0);
604
605 for ($i=0; $i<8; $i++)
606 {
607 &comment("dl>0 Round $i");
608
609 &mov($tmp1,&DWP($i*4,$a,"",0)); # *a
610 &sub($tmp1,$c);
611 &mov(&DWP($i*4,$r,"",0),$tmp1); # *r
612 &jnc(&label("pw_nc".$i));
613 }
614
615 &comment("");
616 &add($a,32);
617 &add($r,32);
618 &sub($num,8);
619 &jnz(&label("pw_pos_loop"));
620
621 &set_label("pw_pos_finish",0);
622 &mov($num,&wparam(4)); # get dl
623 &and($num,7);
624 &jz(&label("pw_end"));
625
626 for ($i=0; $i<7; $i++)
627 {
628 &comment("dl>0 Tail Round $i");
629 &mov($tmp1,&DWP($i*4,$a,"",0)); # *a
630 &sub($tmp1,$c);
631 &mov(&DWP($i*4,$r,"",0),$tmp1); # *r
632 &jnc(&label("pw_tail_nc".$i));
633 &dec($num) if ($i != 6);
634 &jz(&label("pw_end")) if ($i != 6);
635 }
636 &mov($c,1);
637 &jmp(&label("pw_end"));
638
639 &set_label("pw_nc_loop",0);
640 for ($i=0; $i<8; $i++)
641 {
642 &mov($tmp1,&DWP($i*4,$a,"",0)); # *a
643 &mov(&DWP($i*4,$r,"",0),$tmp1); # *r
644 &set_label("pw_nc".$i,0);
645 }
646
647 &comment("");
648 &add($a,32);
649 &add($r,32);
650 &sub($num,8);
651 &jnz(&label("pw_nc_loop"));
652
653 &mov($num,&wparam(4)); # get dl
654 &and($num,7);
655 &jz(&label("pw_nc_end"));
656
657 for ($i=0; $i<7; $i++)
658 {
659 &mov($tmp1,&DWP($i*4,$a,"",0)); # *a
660 &mov(&DWP($i*4,$r,"",0),$tmp1); # *r
661 &set_label("pw_tail_nc".$i,0);
662 &dec($num) if ($i != 6);
663 &jz(&label("pw_nc_end")) if ($i != 6);
664 }
665
666 &set_label("pw_nc_end",0);
667 &mov($c,0);
668
669 &set_label("pw_end",0);
670
671# &mov("eax",$c); # $c is "eax"
672
673 &function_end($name);
674 }
675
diff --git a/src/lib/libcrypto/bn/asm/co-586.pl b/src/lib/libcrypto/bn/asm/co-586.pl
deleted file mode 100644
index 5d962cb957..0000000000
--- a/src/lib/libcrypto/bn/asm/co-586.pl
+++ /dev/null
@@ -1,286 +0,0 @@
1#!/usr/local/bin/perl
2
3push(@INC,"perlasm","../../perlasm");
4require "x86asm.pl";
5
6&asm_init($ARGV[0],$0);
7
8&bn_mul_comba("bn_mul_comba8",8);
9&bn_mul_comba("bn_mul_comba4",4);
10&bn_sqr_comba("bn_sqr_comba8",8);
11&bn_sqr_comba("bn_sqr_comba4",4);
12
13&asm_finish();
14
15sub mul_add_c
16 {
17 local($a,$ai,$b,$bi,$c0,$c1,$c2,$pos,$i,$na,$nb)=@_;
18
19 # pos == -1 if eax and edx are pre-loaded, 0 to load from next
20 # words, and 1 if load return value
21
22 &comment("mul a[$ai]*b[$bi]");
23
24 # "eax" and "edx" will always be pre-loaded.
25 # &mov("eax",&DWP($ai*4,$a,"",0)) ;
26 # &mov("edx",&DWP($bi*4,$b,"",0));
27
28 &mul("edx");
29 &add($c0,"eax");
30 &mov("eax",&DWP(($na)*4,$a,"",0)) if $pos == 0; # laod next a
31 &mov("eax",&wparam(0)) if $pos > 0; # load r[]
32 ###
33 &adc($c1,"edx");
34 &mov("edx",&DWP(($nb)*4,$b,"",0)) if $pos == 0; # laod next b
35 &mov("edx",&DWP(($nb)*4,$b,"",0)) if $pos == 1; # laod next b
36 ###
37 &adc($c2,0);
38 # is pos > 1, it means it is the last loop
39 &mov(&DWP($i*4,"eax","",0),$c0) if $pos > 0; # save r[];
40 &mov("eax",&DWP(($na)*4,$a,"",0)) if $pos == 1; # laod next a
41 }
42
43sub sqr_add_c
44 {
45 local($r,$a,$ai,$bi,$c0,$c1,$c2,$pos,$i,$na,$nb)=@_;
46
47 # pos == -1 if eax and edx are pre-loaded, 0 to load from next
48 # words, and 1 if load return value
49
50 &comment("sqr a[$ai]*a[$bi]");
51
52 # "eax" and "edx" will always be pre-loaded.
53 # &mov("eax",&DWP($ai*4,$a,"",0)) ;
54 # &mov("edx",&DWP($bi*4,$b,"",0));
55
56 if ($ai == $bi)
57 { &mul("eax");}
58 else
59 { &mul("edx");}
60 &add($c0,"eax");
61 &mov("eax",&DWP(($na)*4,$a,"",0)) if $pos == 0; # load next a
62 ###
63 &adc($c1,"edx");
64 &mov("edx",&DWP(($nb)*4,$a,"",0)) if ($pos == 1) && ($na != $nb);
65 ###
66 &adc($c2,0);
67 # is pos > 1, it means it is the last loop
68 &mov(&DWP($i*4,$r,"",0),$c0) if $pos > 0; # save r[];
69 &mov("eax",&DWP(($na)*4,$a,"",0)) if $pos == 1; # load next b
70 }
71
72sub sqr_add_c2
73 {
74 local($r,$a,$ai,$bi,$c0,$c1,$c2,$pos,$i,$na,$nb)=@_;
75
76 # pos == -1 if eax and edx are pre-loaded, 0 to load from next
77 # words, and 1 if load return value
78
79 &comment("sqr a[$ai]*a[$bi]");
80
81 # "eax" and "edx" will always be pre-loaded.
82 # &mov("eax",&DWP($ai*4,$a,"",0)) ;
83 # &mov("edx",&DWP($bi*4,$a,"",0));
84
85 if ($ai == $bi)
86 { &mul("eax");}
87 else
88 { &mul("edx");}
89 &add("eax","eax");
90 ###
91 &adc("edx","edx");
92 ###
93 &adc($c2,0);
94 &add($c0,"eax");
95 &adc($c1,"edx");
96 &mov("eax",&DWP(($na)*4,$a,"",0)) if $pos == 0; # load next a
97 &mov("eax",&DWP(($na)*4,$a,"",0)) if $pos == 1; # load next b
98 &adc($c2,0);
99 &mov(&DWP($i*4,$r,"",0),$c0) if $pos > 0; # save r[];
100 &mov("edx",&DWP(($nb)*4,$a,"",0)) if ($pos <= 1) && ($na != $nb);
101 ###
102 }
103
104sub bn_mul_comba
105 {
106 local($name,$num)=@_;
107 local($a,$b,$c0,$c1,$c2);
108 local($i,$as,$ae,$bs,$be,$ai,$bi);
109 local($tot,$end);
110
111 &function_begin_B($name,"");
112
113 $c0="ebx";
114 $c1="ecx";
115 $c2="ebp";
116 $a="esi";
117 $b="edi";
118
119 $as=0;
120 $ae=0;
121 $bs=0;
122 $be=0;
123 $tot=$num+$num-1;
124
125 &push("esi");
126 &mov($a,&wparam(1));
127 &push("edi");
128 &mov($b,&wparam(2));
129 &push("ebp");
130 &push("ebx");
131
132 &xor($c0,$c0);
133 &mov("eax",&DWP(0,$a,"",0)); # load the first word
134 &xor($c1,$c1);
135 &mov("edx",&DWP(0,$b,"",0)); # load the first second
136
137 for ($i=0; $i<$tot; $i++)
138 {
139 $ai=$as;
140 $bi=$bs;
141 $end=$be+1;
142
143 &comment("################## Calculate word $i");
144
145 for ($j=$bs; $j<$end; $j++)
146 {
147 &xor($c2,$c2) if ($j == $bs);
148 if (($j+1) == $end)
149 {
150 $v=1;
151 $v=2 if (($i+1) == $tot);
152 }
153 else
154 { $v=0; }
155 if (($j+1) != $end)
156 {
157 $na=($ai-1);
158 $nb=($bi+1);
159 }
160 else
161 {
162 $na=$as+($i < ($num-1));
163 $nb=$bs+($i >= ($num-1));
164 }
165#printf STDERR "[$ai,$bi] -> [$na,$nb]\n";
166 &mul_add_c($a,$ai,$b,$bi,$c0,$c1,$c2,$v,$i,$na,$nb);
167 if ($v)
168 {
169 &comment("saved r[$i]");
170 # &mov("eax",&wparam(0));
171 # &mov(&DWP($i*4,"eax","",0),$c0);
172 ($c0,$c1,$c2)=($c1,$c2,$c0);
173 }
174 $ai--;
175 $bi++;
176 }
177 $as++ if ($i < ($num-1));
178 $ae++ if ($i >= ($num-1));
179
180 $bs++ if ($i >= ($num-1));
181 $be++ if ($i < ($num-1));
182 }
183 &comment("save r[$i]");
184 # &mov("eax",&wparam(0));
185 &mov(&DWP($i*4,"eax","",0),$c0);
186
187 &pop("ebx");
188 &pop("ebp");
189 &pop("edi");
190 &pop("esi");
191 &ret();
192 &function_end_B($name);
193 }
194
195sub bn_sqr_comba
196 {
197 local($name,$num)=@_;
198 local($r,$a,$c0,$c1,$c2)=@_;
199 local($i,$as,$ae,$bs,$be,$ai,$bi);
200 local($b,$tot,$end,$half);
201
202 &function_begin_B($name,"");
203
204 $c0="ebx";
205 $c1="ecx";
206 $c2="ebp";
207 $a="esi";
208 $r="edi";
209
210 &push("esi");
211 &push("edi");
212 &push("ebp");
213 &push("ebx");
214 &mov($r,&wparam(0));
215 &mov($a,&wparam(1));
216 &xor($c0,$c0);
217 &xor($c1,$c1);
218 &mov("eax",&DWP(0,$a,"",0)); # load the first word
219
220 $as=0;
221 $ae=0;
222 $bs=0;
223 $be=0;
224 $tot=$num+$num-1;
225
226 for ($i=0; $i<$tot; $i++)
227 {
228 $ai=$as;
229 $bi=$bs;
230 $end=$be+1;
231
232 &comment("############### Calculate word $i");
233 for ($j=$bs; $j<$end; $j++)
234 {
235 &xor($c2,$c2) if ($j == $bs);
236 if (($ai-1) < ($bi+1))
237 {
238 $v=1;
239 $v=2 if ($i+1) == $tot;
240 }
241 else
242 { $v=0; }
243 if (!$v)
244 {
245 $na=$ai-1;
246 $nb=$bi+1;
247 }
248 else
249 {
250 $na=$as+($i < ($num-1));
251 $nb=$bs+($i >= ($num-1));
252 }
253 if ($ai == $bi)
254 {
255 &sqr_add_c($r,$a,$ai,$bi,
256 $c0,$c1,$c2,$v,$i,$na,$nb);
257 }
258 else
259 {
260 &sqr_add_c2($r,$a,$ai,$bi,
261 $c0,$c1,$c2,$v,$i,$na,$nb);
262 }
263 if ($v)
264 {
265 &comment("saved r[$i]");
266 #&mov(&DWP($i*4,$r,"",0),$c0);
267 ($c0,$c1,$c2)=($c1,$c2,$c0);
268 last;
269 }
270 $ai--;
271 $bi++;
272 }
273 $as++ if ($i < ($num-1));
274 $ae++ if ($i >= ($num-1));
275
276 $bs++ if ($i >= ($num-1));
277 $be++ if ($i < ($num-1));
278 }
279 &mov(&DWP($i*4,$r,"",0),$c0);
280 &pop("ebx");
281 &pop("ebp");
282 &pop("edi");
283 &pop("esi");
284 &ret();
285 &function_end_B($name);
286 }
diff --git a/src/lib/libcrypto/bn/asm/ia64.S b/src/lib/libcrypto/bn/asm/ia64.S
deleted file mode 100644
index 951abc53ea..0000000000
--- a/src/lib/libcrypto/bn/asm/ia64.S
+++ /dev/null
@@ -1,1555 +0,0 @@
1.explicit
2.text
3.ident "ia64.S, Version 2.1"
4.ident "IA-64 ISA artwork by Andy Polyakov <appro@fy.chalmers.se>"
5
6//
7// ====================================================================
8// Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
9// project.
10//
11// Rights for redistribution and usage in source and binary forms are
12// granted according to the OpenSSL license. Warranty of any kind is
13// disclaimed.
14// ====================================================================
15//
16// Version 2.x is Itanium2 re-tune. Few words about how Itanum2 is
17// different from Itanium to this module viewpoint. Most notably, is it
18// "wider" than Itanium? Can you experience loop scalability as
19// discussed in commentary sections? Not really:-( Itanium2 has 6
20// integer ALU ports, i.e. it's 2 ports wider, but it's not enough to
21// spin twice as fast, as I need 8 IALU ports. Amount of floating point
22// ports is the same, i.e. 2, while I need 4. In other words, to this
23// module Itanium2 remains effectively as "wide" as Itanium. Yet it's
24// essentially different in respect to this module, and a re-tune was
25// required. Well, because some intruction latencies has changed. Most
26// noticeably those intensively used:
27//
28// Itanium Itanium2
29// ldf8 9 6 L2 hit
30// ld8 2 1 L1 hit
31// getf 2 5
32// xma[->getf] 7[+1] 4[+0]
33// add[->st8] 1[+1] 1[+0]
34//
35// What does it mean? You might ratiocinate that the original code
36// should run just faster... Because sum of latencies is smaller...
37// Wrong! Note that getf latency increased. This means that if a loop is
38// scheduled for lower latency (as they were), then it will suffer from
39// stall condition and the code will therefore turn anti-scalable, e.g.
40// original bn_mul_words spun at 5*n or 2.5 times slower than expected
41// on Itanium2! What to do? Reschedule loops for Itanium2? But then
42// Itanium would exhibit anti-scalability. So I've chosen to reschedule
43// for worst latency for every instruction aiming for best *all-round*
44// performance.
45
46// Q. How much faster does it get?
47// A. Here is the output from 'openssl speed rsa dsa' for vanilla
48// 0.9.6a compiled with gcc version 2.96 20000731 (Red Hat
49// Linux 7.1 2.96-81):
50//
51// sign verify sign/s verify/s
52// rsa 512 bits 0.0036s 0.0003s 275.3 2999.2
53// rsa 1024 bits 0.0203s 0.0011s 49.3 894.1
54// rsa 2048 bits 0.1331s 0.0040s 7.5 250.9
55// rsa 4096 bits 0.9270s 0.0147s 1.1 68.1
56// sign verify sign/s verify/s
57// dsa 512 bits 0.0035s 0.0043s 288.3 234.8
58// dsa 1024 bits 0.0111s 0.0135s 90.0 74.2
59//
60// And here is similar output but for this assembler
61// implementation:-)
62//
63// sign verify sign/s verify/s
64// rsa 512 bits 0.0021s 0.0001s 549.4 9638.5
65// rsa 1024 bits 0.0055s 0.0002s 183.8 4481.1
66// rsa 2048 bits 0.0244s 0.0006s 41.4 1726.3
67// rsa 4096 bits 0.1295s 0.0018s 7.7 561.5
68// sign verify sign/s verify/s
69// dsa 512 bits 0.0012s 0.0013s 891.9 756.6
70// dsa 1024 bits 0.0023s 0.0028s 440.4 376.2
71//
72// Yes, you may argue that it's not fair comparison as it's
73// possible to craft the C implementation with BN_UMULT_HIGH
74// inline assembler macro. But of course! Here is the output
75// with the macro:
76//
77// sign verify sign/s verify/s
78// rsa 512 bits 0.0020s 0.0002s 495.0 6561.0
79// rsa 1024 bits 0.0086s 0.0004s 116.2 2235.7
80// rsa 2048 bits 0.0519s 0.0015s 19.3 667.3
81// rsa 4096 bits 0.3464s 0.0053s 2.9 187.7
82// sign verify sign/s verify/s
83// dsa 512 bits 0.0016s 0.0020s 613.1 510.5
84// dsa 1024 bits 0.0045s 0.0054s 221.0 183.9
85//
86// My code is still way faster, huh:-) And I believe that even
87// higher performance can be achieved. Note that as keys get
88// longer, performance gain is larger. Why? According to the
89// profiler there is another player in the field, namely
90// BN_from_montgomery consuming larger and larger portion of CPU
91// time as keysize decreases. I therefore consider putting effort
92// to assembler implementation of the following routine:
93//
94// void bn_mul_add_mont (BN_ULONG *rp,BN_ULONG *np,int nl,BN_ULONG n0)
95// {
96// int i,j;
97// BN_ULONG v;
98//
99// for (i=0; i<nl; i++)
100// {
101// v=bn_mul_add_words(rp,np,nl,(rp[0]*n0)&BN_MASK2);
102// nrp++;
103// rp++;
104// if (((nrp[-1]+=v)&BN_MASK2) < v)
105// for (j=0; ((++nrp[j])&BN_MASK2) == 0; j++) ;
106// }
107// }
108//
109// It might as well be beneficial to implement even combaX
110// variants, as it appears as it can literally unleash the
111// performance (see comment section to bn_mul_comba8 below).
112//
113// And finally for your reference the output for 0.9.6a compiled
114// with SGIcc version 0.01.0-12 (keep in mind that for the moment
115// of this writing it's not possible to convince SGIcc to use
116// BN_UMULT_HIGH inline assembler macro, yet the code is fast,
117// i.e. for a compiler generated one:-):
118//
119// sign verify sign/s verify/s
120// rsa 512 bits 0.0022s 0.0002s 452.7 5894.3
121// rsa 1024 bits 0.0097s 0.0005s 102.7 2002.9
122// rsa 2048 bits 0.0578s 0.0017s 17.3 600.2
123// rsa 4096 bits 0.3838s 0.0061s 2.6 164.5
124// sign verify sign/s verify/s
125// dsa 512 bits 0.0018s 0.0022s 547.3 459.6
126// dsa 1024 bits 0.0051s 0.0062s 196.6 161.3
127//
128// Oh! Benchmarks were performed on 733MHz Lion-class Itanium
129// system running Redhat Linux 7.1 (very special thanks to Ray
130// McCaffity of Williams Communications for providing an account).
131//
132// Q. What's the heck with 'rum 1<<5' at the end of every function?
133// A. Well, by clearing the "upper FP registers written" bit of the
134// User Mask I want to excuse the kernel from preserving upper
135// (f32-f128) FP register bank over process context switch, thus
136// minimizing bus bandwidth consumption during the switch (i.e.
137// after PKI opration completes and the program is off doing
138// something else like bulk symmetric encryption). Having said
139// this, I also want to point out that it might be good idea
140// to compile the whole toolkit (as well as majority of the
141// programs for that matter) with -mfixed-range=f32-f127 command
142// line option. No, it doesn't prevent the compiler from writing
143// to upper bank, but at least discourages to do so. If you don't
144// like the idea you have the option to compile the module with
145// -Drum=nop.m in command line.
146//
147
148#if defined(_HPUX_SOURCE) && !defined(_LP64)
149#define ADDP addp4
150#else
151#define ADDP add
152#endif
153
154#if 1
155//
156// bn_[add|sub]_words routines.
157//
158// Loops are spinning in 2*(n+5) ticks on Itanuim (provided that the
159// data reside in L1 cache, i.e. 2 ticks away). It's possible to
160// compress the epilogue and get down to 2*n+6, but at the cost of
161// scalability (the neat feature of this implementation is that it
162// shall automagically spin in n+5 on "wider" IA-64 implementations:-)
163// I consider that the epilogue is short enough as it is to trade tiny
164// performance loss on Itanium for scalability.
165//
166// BN_ULONG bn_add_words(BN_ULONG *rp, BN_ULONG *ap, BN_ULONG *bp,int num)
167//
168.global bn_add_words#
169.proc bn_add_words#
170.align 64
171.skip 32 // makes the loop body aligned at 64-byte boundary
172bn_add_words:
173 .prologue
174 .save ar.pfs,r2
175{ .mii; alloc r2=ar.pfs,4,12,0,16
176 cmp4.le p6,p0=r35,r0 };;
177{ .mfb; mov r8=r0 // return value
178(p6) br.ret.spnt.many b0 };;
179
180{ .mib; sub r10=r35,r0,1
181 .save ar.lc,r3
182 mov r3=ar.lc
183 brp.loop.imp .L_bn_add_words_ctop,.L_bn_add_words_cend-16
184 }
185{ .mib; ADDP r14=0,r32 // rp
186 .save pr,r9
187 mov r9=pr };;
188 .body
189{ .mii; ADDP r15=0,r33 // ap
190 mov ar.lc=r10
191 mov ar.ec=6 }
192{ .mib; ADDP r16=0,r34 // bp
193 mov pr.rot=1<<16 };;
194
195.L_bn_add_words_ctop:
196{ .mii; (p16) ld8 r32=[r16],8 // b=*(bp++)
197 (p18) add r39=r37,r34
198 (p19) cmp.ltu.unc p56,p0=r40,r38 }
199{ .mfb; (p0) nop.m 0x0
200 (p0) nop.f 0x0
201 (p0) nop.b 0x0 }
202{ .mii; (p16) ld8 r35=[r15],8 // a=*(ap++)
203 (p58) cmp.eq.or p57,p0=-1,r41 // (p20)
204 (p58) add r41=1,r41 } // (p20)
205{ .mfb; (p21) st8 [r14]=r42,8 // *(rp++)=r
206 (p0) nop.f 0x0
207 br.ctop.sptk .L_bn_add_words_ctop };;
208.L_bn_add_words_cend:
209
210{ .mii;
211(p59) add r8=1,r8 // return value
212 mov pr=r9,0x1ffff
213 mov ar.lc=r3 }
214{ .mbb; nop.b 0x0
215 br.ret.sptk.many b0 };;
216.endp bn_add_words#
217
218//
219// BN_ULONG bn_sub_words(BN_ULONG *rp, BN_ULONG *ap, BN_ULONG *bp,int num)
220//
221.global bn_sub_words#
222.proc bn_sub_words#
223.align 64
224.skip 32 // makes the loop body aligned at 64-byte boundary
225bn_sub_words:
226 .prologue
227 .save ar.pfs,r2
228{ .mii; alloc r2=ar.pfs,4,12,0,16
229 cmp4.le p6,p0=r35,r0 };;
230{ .mfb; mov r8=r0 // return value
231(p6) br.ret.spnt.many b0 };;
232
233{ .mib; sub r10=r35,r0,1
234 .save ar.lc,r3
235 mov r3=ar.lc
236 brp.loop.imp .L_bn_sub_words_ctop,.L_bn_sub_words_cend-16
237 }
238{ .mib; ADDP r14=0,r32 // rp
239 .save pr,r9
240 mov r9=pr };;
241 .body
242{ .mii; ADDP r15=0,r33 // ap
243 mov ar.lc=r10
244 mov ar.ec=6 }
245{ .mib; ADDP r16=0,r34 // bp
246 mov pr.rot=1<<16 };;
247
248.L_bn_sub_words_ctop:
249{ .mii; (p16) ld8 r32=[r16],8 // b=*(bp++)
250 (p18) sub r39=r37,r34
251 (p19) cmp.gtu.unc p56,p0=r40,r38 }
252{ .mfb; (p0) nop.m 0x0
253 (p0) nop.f 0x0
254 (p0) nop.b 0x0 }
255{ .mii; (p16) ld8 r35=[r15],8 // a=*(ap++)
256 (p58) cmp.eq.or p57,p0=0,r41 // (p20)
257 (p58) add r41=-1,r41 } // (p20)
258{ .mbb; (p21) st8 [r14]=r42,8 // *(rp++)=r
259 (p0) nop.b 0x0
260 br.ctop.sptk .L_bn_sub_words_ctop };;
261.L_bn_sub_words_cend:
262
263{ .mii;
264(p59) add r8=1,r8 // return value
265 mov pr=r9,0x1ffff
266 mov ar.lc=r3 }
267{ .mbb; nop.b 0x0
268 br.ret.sptk.many b0 };;
269.endp bn_sub_words#
270#endif
271
272#if 0
273#define XMA_TEMPTATION
274#endif
275
276#if 1
277//
278// BN_ULONG bn_mul_words(BN_ULONG *rp, BN_ULONG *ap, int num, BN_ULONG w)
279//
280.global bn_mul_words#
281.proc bn_mul_words#
282.align 64
283.skip 32 // makes the loop body aligned at 64-byte boundary
284bn_mul_words:
285 .prologue
286 .save ar.pfs,r2
287#ifdef XMA_TEMPTATION
288{ .mfi; alloc r2=ar.pfs,4,0,0,0 };;
289#else
290{ .mfi; alloc r2=ar.pfs,4,12,0,16 };;
291#endif
292{ .mib; mov r8=r0 // return value
293 cmp4.le p6,p0=r34,r0
294(p6) br.ret.spnt.many b0 };;
295
296{ .mii; sub r10=r34,r0,1
297 .save ar.lc,r3
298 mov r3=ar.lc
299 .save pr,r9
300 mov r9=pr };;
301
302 .body
303{ .mib; setf.sig f8=r35 // w
304 mov pr.rot=0x800001<<16
305 // ------^----- serves as (p50) at first (p27)
306 brp.loop.imp .L_bn_mul_words_ctop,.L_bn_mul_words_cend-16
307 }
308
309#ifndef XMA_TEMPTATION
310
311{ .mmi; ADDP r14=0,r32 // rp
312 ADDP r15=0,r33 // ap
313 mov ar.lc=r10 }
314{ .mmi; mov r40=0 // serves as r35 at first (p27)
315 mov ar.ec=13 };;
316
317// This loop spins in 2*(n+12) ticks. It's scheduled for data in Itanium
318// L2 cache (i.e. 9 ticks away) as floating point load/store instructions
319// bypass L1 cache and L2 latency is actually best-case scenario for
320// ldf8. The loop is not scalable and shall run in 2*(n+12) even on
321// "wider" IA-64 implementations. It's a trade-off here. n+24 loop
322// would give us ~5% in *overall* performance improvement on "wider"
323// IA-64, but would hurt Itanium for about same because of longer
324// epilogue. As it's a matter of few percents in either case I've
325// chosen to trade the scalability for development time (you can see
326// this very instruction sequence in bn_mul_add_words loop which in
327// turn is scalable).
328.L_bn_mul_words_ctop:
329{ .mfi; (p25) getf.sig r36=f52 // low
330 (p21) xmpy.lu f48=f37,f8
331 (p28) cmp.ltu p54,p50=r41,r39 }
332{ .mfi; (p16) ldf8 f32=[r15],8
333 (p21) xmpy.hu f40=f37,f8
334 (p0) nop.i 0x0 };;
335{ .mii; (p25) getf.sig r32=f44 // high
336 .pred.rel "mutex",p50,p54
337 (p50) add r40=r38,r35 // (p27)
338 (p54) add r40=r38,r35,1 } // (p27)
339{ .mfb; (p28) st8 [r14]=r41,8
340 (p0) nop.f 0x0
341 br.ctop.sptk .L_bn_mul_words_ctop };;
342.L_bn_mul_words_cend:
343
344{ .mii; nop.m 0x0
345.pred.rel "mutex",p51,p55
346(p51) add r8=r36,r0
347(p55) add r8=r36,r0,1 }
348{ .mfb; nop.m 0x0
349 nop.f 0x0
350 nop.b 0x0 }
351
352#else // XMA_TEMPTATION
353
354 setf.sig f37=r0 // serves as carry at (p18) tick
355 mov ar.lc=r10
356 mov ar.ec=5;;
357
358// Most of you examining this code very likely wonder why in the name
359// of Intel the following loop is commented out? Indeed, it looks so
360// neat that you find it hard to believe that it's something wrong
361// with it, right? The catch is that every iteration depends on the
362// result from previous one and the latter isn't available instantly.
363// The loop therefore spins at the latency of xma minus 1, or in other
364// words at 6*(n+4) ticks:-( Compare to the "production" loop above
365// that runs in 2*(n+11) where the low latency problem is worked around
366// by moving the dependency to one-tick latent interger ALU. Note that
367// "distance" between ldf8 and xma is not latency of ldf8, but the
368// *difference* between xma and ldf8 latencies.
369.L_bn_mul_words_ctop:
370{ .mfi; (p16) ldf8 f32=[r33],8
371 (p18) xma.hu f38=f34,f8,f39 }
372{ .mfb; (p20) stf8 [r32]=f37,8
373 (p18) xma.lu f35=f34,f8,f39
374 br.ctop.sptk .L_bn_mul_words_ctop };;
375.L_bn_mul_words_cend:
376
377 getf.sig r8=f41 // the return value
378
379#endif // XMA_TEMPTATION
380
381{ .mii; nop.m 0x0
382 mov pr=r9,0x1ffff
383 mov ar.lc=r3 }
384{ .mfb; rum 1<<5 // clear um.mfh
385 nop.f 0x0
386 br.ret.sptk.many b0 };;
387.endp bn_mul_words#
388#endif
389
390#if 1
391//
392// BN_ULONG bn_mul_add_words(BN_ULONG *rp, BN_ULONG *ap, int num, BN_ULONG w)
393//
394.global bn_mul_add_words#
395.proc bn_mul_add_words#
396.align 64
397.skip 48 // makes the loop body aligned at 64-byte boundary
398bn_mul_add_words:
399 .prologue
400 .save ar.pfs,r2
401{ .mmi; alloc r2=ar.pfs,4,4,0,8
402 cmp4.le p6,p0=r34,r0
403 .save ar.lc,r3
404 mov r3=ar.lc };;
405{ .mib; mov r8=r0 // return value
406 sub r10=r34,r0,1
407(p6) br.ret.spnt.many b0 };;
408
409{ .mib; setf.sig f8=r35 // w
410 .save pr,r9
411 mov r9=pr
412 brp.loop.imp .L_bn_mul_add_words_ctop,.L_bn_mul_add_words_cend-16
413 }
414 .body
415{ .mmi; ADDP r14=0,r32 // rp
416 ADDP r15=0,r33 // ap
417 mov ar.lc=r10 }
418{ .mii; ADDP r16=0,r32 // rp copy
419 mov pr.rot=0x2001<<16
420 // ------^----- serves as (p40) at first (p27)
421 mov ar.ec=11 };;
422
423// This loop spins in 3*(n+10) ticks on Itanium and in 2*(n+10) on
424// Itanium 2. Yes, unlike previous versions it scales:-) Previous
425// version was peforming *all* additions in IALU and was starving
426// for those even on Itanium 2. In this version one addition is
427// moved to FPU and is folded with multiplication. This is at cost
428// of propogating the result from previous call to this subroutine
429// to L2 cache... In other words negligible even for shorter keys.
430// *Overall* performance improvement [over previous version] varies
431// from 11 to 22 percent depending on key length.
432.L_bn_mul_add_words_ctop:
433.pred.rel "mutex",p40,p42
434{ .mfi; (p23) getf.sig r36=f45 // low
435 (p20) xma.lu f42=f36,f8,f50 // low
436 (p40) add r39=r39,r35 } // (p27)
437{ .mfi; (p16) ldf8 f32=[r15],8 // *(ap++)
438 (p20) xma.hu f36=f36,f8,f50 // high
439 (p42) add r39=r39,r35,1 };; // (p27)
440{ .mmi; (p24) getf.sig r32=f40 // high
441 (p16) ldf8 f46=[r16],8 // *(rp1++)
442 (p40) cmp.ltu p41,p39=r39,r35 } // (p27)
443{ .mib; (p26) st8 [r14]=r39,8 // *(rp2++)
444 (p42) cmp.leu p41,p39=r39,r35 // (p27)
445 br.ctop.sptk .L_bn_mul_add_words_ctop};;
446.L_bn_mul_add_words_cend:
447
448{ .mmi; .pred.rel "mutex",p40,p42
449(p40) add r8=r35,r0
450(p42) add r8=r35,r0,1
451 mov pr=r9,0x1ffff }
452{ .mib; rum 1<<5 // clear um.mfh
453 mov ar.lc=r3
454 br.ret.sptk.many b0 };;
455.endp bn_mul_add_words#
456#endif
457
458#if 1
459//
460// void bn_sqr_words(BN_ULONG *rp, BN_ULONG *ap, int num)
461//
462.global bn_sqr_words#
463.proc bn_sqr_words#
464.align 64
465.skip 32 // makes the loop body aligned at 64-byte boundary
466bn_sqr_words:
467 .prologue
468 .save ar.pfs,r2
469{ .mii; alloc r2=ar.pfs,3,0,0,0
470 sxt4 r34=r34 };;
471{ .mii; cmp.le p6,p0=r34,r0
472 mov r8=r0 } // return value
473{ .mfb; ADDP r32=0,r32
474 nop.f 0x0
475(p6) br.ret.spnt.many b0 };;
476
477{ .mii; sub r10=r34,r0,1
478 .save ar.lc,r3
479 mov r3=ar.lc
480 .save pr,r9
481 mov r9=pr };;
482
483 .body
484{ .mib; ADDP r33=0,r33
485 mov pr.rot=1<<16
486 brp.loop.imp .L_bn_sqr_words_ctop,.L_bn_sqr_words_cend-16
487 }
488{ .mii; add r34=8,r32
489 mov ar.lc=r10
490 mov ar.ec=18 };;
491
492// 2*(n+17) on Itanium, (n+17) on "wider" IA-64 implementations. It's
493// possible to compress the epilogue (I'm getting tired to write this
494// comment over and over) and get down to 2*n+16 at the cost of
495// scalability. The decision will very likely be reconsidered after the
496// benchmark program is profiled. I.e. if perfomance gain on Itanium
497// will appear larger than loss on "wider" IA-64, then the loop should
498// be explicitely split and the epilogue compressed.
499.L_bn_sqr_words_ctop:
500{ .mfi; (p16) ldf8 f32=[r33],8
501 (p25) xmpy.lu f42=f41,f41
502 (p0) nop.i 0x0 }
503{ .mib; (p33) stf8 [r32]=f50,16
504 (p0) nop.i 0x0
505 (p0) nop.b 0x0 }
506{ .mfi; (p0) nop.m 0x0
507 (p25) xmpy.hu f52=f41,f41
508 (p0) nop.i 0x0 }
509{ .mib; (p33) stf8 [r34]=f60,16
510 (p0) nop.i 0x0
511 br.ctop.sptk .L_bn_sqr_words_ctop };;
512.L_bn_sqr_words_cend:
513
514{ .mii; nop.m 0x0
515 mov pr=r9,0x1ffff
516 mov ar.lc=r3 }
517{ .mfb; rum 1<<5 // clear um.mfh
518 nop.f 0x0
519 br.ret.sptk.many b0 };;
520.endp bn_sqr_words#
521#endif
522
523#if 1
524// Apparently we win nothing by implementing special bn_sqr_comba8.
525// Yes, it is possible to reduce the number of multiplications by
526// almost factor of two, but then the amount of additions would
527// increase by factor of two (as we would have to perform those
528// otherwise performed by xma ourselves). Normally we would trade
529// anyway as multiplications are way more expensive, but not this
530// time... Multiplication kernel is fully pipelined and as we drain
531// one 128-bit multiplication result per clock cycle multiplications
532// are effectively as inexpensive as additions. Special implementation
533// might become of interest for "wider" IA-64 implementation as you'll
534// be able to get through the multiplication phase faster (there won't
535// be any stall issues as discussed in the commentary section below and
536// you therefore will be able to employ all 4 FP units)... But these
537// Itanium days it's simply too hard to justify the effort so I just
538// drop down to bn_mul_comba8 code:-)
539//
540// void bn_sqr_comba8(BN_ULONG *r, BN_ULONG *a)
541//
542.global bn_sqr_comba8#
543.proc bn_sqr_comba8#
544.align 64
545bn_sqr_comba8:
546 .prologue
547 .save ar.pfs,r2
548#if defined(_HPUX_SOURCE) && !defined(_LP64)
549{ .mii; alloc r2=ar.pfs,2,1,0,0
550 addp4 r33=0,r33
551 addp4 r32=0,r32 };;
552{ .mii;
553#else
554{ .mii; alloc r2=ar.pfs,2,1,0,0
555#endif
556 mov r34=r33
557 add r14=8,r33 };;
558 .body
559{ .mii; add r17=8,r34
560 add r15=16,r33
561 add r18=16,r34 }
562{ .mfb; add r16=24,r33
563 br .L_cheat_entry_point8 };;
564.endp bn_sqr_comba8#
565#endif
566
567#if 1
568// I've estimated this routine to run in ~120 ticks, but in reality
569// (i.e. according to ar.itc) it takes ~160 ticks. Are those extra
570// cycles consumed for instructions fetch? Or did I misinterpret some
571// clause in Itanium µ-architecture manual? Comments are welcomed and
572// highly appreciated.
573//
574// On Itanium 2 it takes ~190 ticks. This is because of stalls on
575// result from getf.sig. I do nothing about it at this point for
576// reasons depicted below.
577//
578// However! It should be noted that even 160 ticks is darn good result
579// as it's over 10 (yes, ten, spelled as t-e-n) times faster than the
580// C version (compiled with gcc with inline assembler). I really
581// kicked compiler's butt here, didn't I? Yeah! This brings us to the
582// following statement. It's damn shame that this routine isn't called
583// very often nowadays! According to the profiler most CPU time is
584// consumed by bn_mul_add_words called from BN_from_montgomery. In
585// order to estimate what we're missing, I've compared the performance
586// of this routine against "traditional" implementation, i.e. against
587// following routine:
588//
589// void bn_mul_comba8(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
590// { r[ 8]=bn_mul_words( &(r[0]),a,8,b[0]);
591// r[ 9]=bn_mul_add_words(&(r[1]),a,8,b[1]);
592// r[10]=bn_mul_add_words(&(r[2]),a,8,b[2]);
593// r[11]=bn_mul_add_words(&(r[3]),a,8,b[3]);
594// r[12]=bn_mul_add_words(&(r[4]),a,8,b[4]);
595// r[13]=bn_mul_add_words(&(r[5]),a,8,b[5]);
596// r[14]=bn_mul_add_words(&(r[6]),a,8,b[6]);
597// r[15]=bn_mul_add_words(&(r[7]),a,8,b[7]);
598// }
599//
600// The one below is over 8 times faster than the one above:-( Even
601// more reasons to "combafy" bn_mul_add_mont...
602//
603// And yes, this routine really made me wish there were an optimizing
604// assembler! It also feels like it deserves a dedication.
605//
606// To my wife for being there and to my kids...
607//
608// void bn_mul_comba8(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
609//
610#define carry1 r14
611#define carry2 r15
612#define carry3 r34
613.global bn_mul_comba8#
614.proc bn_mul_comba8#
615.align 64
616bn_mul_comba8:
617 .prologue
618 .save ar.pfs,r2
619#if defined(_HPUX_SOURCE) && !defined(_LP64)
620{ .mii; alloc r2=ar.pfs,3,0,0,0
621 addp4 r33=0,r33
622 addp4 r34=0,r34 };;
623{ .mii; addp4 r32=0,r32
624#else
625{ .mii; alloc r2=ar.pfs,3,0,0,0
626#endif
627 add r14=8,r33
628 add r17=8,r34 }
629 .body
630{ .mii; add r15=16,r33
631 add r18=16,r34
632 add r16=24,r33 }
633.L_cheat_entry_point8:
634{ .mmi; add r19=24,r34
635
636 ldf8 f32=[r33],32 };;
637
638{ .mmi; ldf8 f120=[r34],32
639 ldf8 f121=[r17],32 }
640{ .mmi; ldf8 f122=[r18],32
641 ldf8 f123=[r19],32 };;
642{ .mmi; ldf8 f124=[r34]
643 ldf8 f125=[r17] }
644{ .mmi; ldf8 f126=[r18]
645 ldf8 f127=[r19] }
646
647{ .mmi; ldf8 f33=[r14],32
648 ldf8 f34=[r15],32 }
649{ .mmi; ldf8 f35=[r16],32;;
650 ldf8 f36=[r33] }
651{ .mmi; ldf8 f37=[r14]
652 ldf8 f38=[r15] }
653{ .mfi; ldf8 f39=[r16]
654// -------\ Entering multiplier's heaven /-------
655// ------------\ /------------
656// -----------------\ /-----------------
657// ----------------------\/----------------------
658 xma.hu f41=f32,f120,f0 }
659{ .mfi; xma.lu f40=f32,f120,f0 };; // (*)
660{ .mfi; xma.hu f51=f32,f121,f0 }
661{ .mfi; xma.lu f50=f32,f121,f0 };;
662{ .mfi; xma.hu f61=f32,f122,f0 }
663{ .mfi; xma.lu f60=f32,f122,f0 };;
664{ .mfi; xma.hu f71=f32,f123,f0 }
665{ .mfi; xma.lu f70=f32,f123,f0 };;
666{ .mfi; xma.hu f81=f32,f124,f0 }
667{ .mfi; xma.lu f80=f32,f124,f0 };;
668{ .mfi; xma.hu f91=f32,f125,f0 }
669{ .mfi; xma.lu f90=f32,f125,f0 };;
670{ .mfi; xma.hu f101=f32,f126,f0 }
671{ .mfi; xma.lu f100=f32,f126,f0 };;
672{ .mfi; xma.hu f111=f32,f127,f0 }
673{ .mfi; xma.lu f110=f32,f127,f0 };;//
674// (*) You can argue that splitting at every second bundle would
675// prevent "wider" IA-64 implementations from achieving the peak
676// performance. Well, not really... The catch is that if you
677// intend to keep 4 FP units busy by splitting at every fourth
678// bundle and thus perform these 16 multiplications in 4 ticks,
679// the first bundle *below* would stall because the result from
680// the first xma bundle *above* won't be available for another 3
681// ticks (if not more, being an optimist, I assume that "wider"
682// implementation will have same latency:-). This stall will hold
683// you back and the performance would be as if every second bundle
684// were split *anyway*...
685{ .mfi; getf.sig r16=f40
686 xma.hu f42=f33,f120,f41
687 add r33=8,r32 }
688{ .mfi; xma.lu f41=f33,f120,f41 };;
689{ .mfi; getf.sig r24=f50
690 xma.hu f52=f33,f121,f51 }
691{ .mfi; xma.lu f51=f33,f121,f51 };;
692{ .mfi; st8 [r32]=r16,16
693 xma.hu f62=f33,f122,f61 }
694{ .mfi; xma.lu f61=f33,f122,f61 };;
695{ .mfi; xma.hu f72=f33,f123,f71 }
696{ .mfi; xma.lu f71=f33,f123,f71 };;
697{ .mfi; xma.hu f82=f33,f124,f81 }
698{ .mfi; xma.lu f81=f33,f124,f81 };;
699{ .mfi; xma.hu f92=f33,f125,f91 }
700{ .mfi; xma.lu f91=f33,f125,f91 };;
701{ .mfi; xma.hu f102=f33,f126,f101 }
702{ .mfi; xma.lu f101=f33,f126,f101 };;
703{ .mfi; xma.hu f112=f33,f127,f111 }
704{ .mfi; xma.lu f111=f33,f127,f111 };;//
705//-------------------------------------------------//
706{ .mfi; getf.sig r25=f41
707 xma.hu f43=f34,f120,f42 }
708{ .mfi; xma.lu f42=f34,f120,f42 };;
709{ .mfi; getf.sig r16=f60
710 xma.hu f53=f34,f121,f52 }
711{ .mfi; xma.lu f52=f34,f121,f52 };;
712{ .mfi; getf.sig r17=f51
713 xma.hu f63=f34,f122,f62
714 add r25=r25,r24 }
715{ .mfi; xma.lu f62=f34,f122,f62
716 mov carry1=0 };;
717{ .mfi; cmp.ltu p6,p0=r25,r24
718 xma.hu f73=f34,f123,f72 }
719{ .mfi; xma.lu f72=f34,f123,f72 };;
720{ .mfi; st8 [r33]=r25,16
721 xma.hu f83=f34,f124,f82
722(p6) add carry1=1,carry1 }
723{ .mfi; xma.lu f82=f34,f124,f82 };;
724{ .mfi; xma.hu f93=f34,f125,f92 }
725{ .mfi; xma.lu f92=f34,f125,f92 };;
726{ .mfi; xma.hu f103=f34,f126,f102 }
727{ .mfi; xma.lu f102=f34,f126,f102 };;
728{ .mfi; xma.hu f113=f34,f127,f112 }
729{ .mfi; xma.lu f112=f34,f127,f112 };;//
730//-------------------------------------------------//
731{ .mfi; getf.sig r18=f42
732 xma.hu f44=f35,f120,f43
733 add r17=r17,r16 }
734{ .mfi; xma.lu f43=f35,f120,f43 };;
735{ .mfi; getf.sig r24=f70
736 xma.hu f54=f35,f121,f53 }
737{ .mfi; mov carry2=0
738 xma.lu f53=f35,f121,f53 };;
739{ .mfi; getf.sig r25=f61
740 xma.hu f64=f35,f122,f63
741 cmp.ltu p7,p0=r17,r16 }
742{ .mfi; add r18=r18,r17
743 xma.lu f63=f35,f122,f63 };;
744{ .mfi; getf.sig r26=f52
745 xma.hu f74=f35,f123,f73
746(p7) add carry2=1,carry2 }
747{ .mfi; cmp.ltu p7,p0=r18,r17
748 xma.lu f73=f35,f123,f73
749 add r18=r18,carry1 };;
750{ .mfi;
751 xma.hu f84=f35,f124,f83
752(p7) add carry2=1,carry2 }
753{ .mfi; cmp.ltu p7,p0=r18,carry1
754 xma.lu f83=f35,f124,f83 };;
755{ .mfi; st8 [r32]=r18,16
756 xma.hu f94=f35,f125,f93
757(p7) add carry2=1,carry2 }
758{ .mfi; xma.lu f93=f35,f125,f93 };;
759{ .mfi; xma.hu f104=f35,f126,f103 }
760{ .mfi; xma.lu f103=f35,f126,f103 };;
761{ .mfi; xma.hu f114=f35,f127,f113 }
762{ .mfi; mov carry1=0
763 xma.lu f113=f35,f127,f113
764 add r25=r25,r24 };;//
765//-------------------------------------------------//
766{ .mfi; getf.sig r27=f43
767 xma.hu f45=f36,f120,f44
768 cmp.ltu p6,p0=r25,r24 }
769{ .mfi; xma.lu f44=f36,f120,f44
770 add r26=r26,r25 };;
771{ .mfi; getf.sig r16=f80
772 xma.hu f55=f36,f121,f54
773(p6) add carry1=1,carry1 }
774{ .mfi; xma.lu f54=f36,f121,f54 };;
775{ .mfi; getf.sig r17=f71
776 xma.hu f65=f36,f122,f64
777 cmp.ltu p6,p0=r26,r25 }
778{ .mfi; xma.lu f64=f36,f122,f64
779 add r27=r27,r26 };;
780{ .mfi; getf.sig r18=f62
781 xma.hu f75=f36,f123,f74
782(p6) add carry1=1,carry1 }
783{ .mfi; cmp.ltu p6,p0=r27,r26
784 xma.lu f74=f36,f123,f74
785 add r27=r27,carry2 };;
786{ .mfi; getf.sig r19=f53
787 xma.hu f85=f36,f124,f84
788(p6) add carry1=1,carry1 }
789{ .mfi; xma.lu f84=f36,f124,f84
790 cmp.ltu p6,p0=r27,carry2 };;
791{ .mfi; st8 [r33]=r27,16
792 xma.hu f95=f36,f125,f94
793(p6) add carry1=1,carry1 }
794{ .mfi; xma.lu f94=f36,f125,f94 };;
795{ .mfi; xma.hu f105=f36,f126,f104 }
796{ .mfi; mov carry2=0
797 xma.lu f104=f36,f126,f104
798 add r17=r17,r16 };;
799{ .mfi; xma.hu f115=f36,f127,f114
800 cmp.ltu p7,p0=r17,r16 }
801{ .mfi; xma.lu f114=f36,f127,f114
802 add r18=r18,r17 };;//
803//-------------------------------------------------//
804{ .mfi; getf.sig r20=f44
805 xma.hu f46=f37,f120,f45
806(p7) add carry2=1,carry2 }
807{ .mfi; cmp.ltu p7,p0=r18,r17
808 xma.lu f45=f37,f120,f45
809 add r19=r19,r18 };;
810{ .mfi; getf.sig r24=f90
811 xma.hu f56=f37,f121,f55 }
812{ .mfi; xma.lu f55=f37,f121,f55 };;
813{ .mfi; getf.sig r25=f81
814 xma.hu f66=f37,f122,f65
815(p7) add carry2=1,carry2 }
816{ .mfi; cmp.ltu p7,p0=r19,r18
817 xma.lu f65=f37,f122,f65
818 add r20=r20,r19 };;
819{ .mfi; getf.sig r26=f72
820 xma.hu f76=f37,f123,f75
821(p7) add carry2=1,carry2 }
822{ .mfi; cmp.ltu p7,p0=r20,r19
823 xma.lu f75=f37,f123,f75
824 add r20=r20,carry1 };;
825{ .mfi; getf.sig r27=f63
826 xma.hu f86=f37,f124,f85
827(p7) add carry2=1,carry2 }
828{ .mfi; xma.lu f85=f37,f124,f85
829 cmp.ltu p7,p0=r20,carry1 };;
830{ .mfi; getf.sig r28=f54
831 xma.hu f96=f37,f125,f95
832(p7) add carry2=1,carry2 }
833{ .mfi; st8 [r32]=r20,16
834 xma.lu f95=f37,f125,f95 };;
835{ .mfi; xma.hu f106=f37,f126,f105 }
836{ .mfi; mov carry1=0
837 xma.lu f105=f37,f126,f105
838 add r25=r25,r24 };;
839{ .mfi; xma.hu f116=f37,f127,f115
840 cmp.ltu p6,p0=r25,r24 }
841{ .mfi; xma.lu f115=f37,f127,f115
842 add r26=r26,r25 };;//
843//-------------------------------------------------//
844{ .mfi; getf.sig r29=f45
845 xma.hu f47=f38,f120,f46
846(p6) add carry1=1,carry1 }
847{ .mfi; cmp.ltu p6,p0=r26,r25
848 xma.lu f46=f38,f120,f46
849 add r27=r27,r26 };;
850{ .mfi; getf.sig r16=f100
851 xma.hu f57=f38,f121,f56
852(p6) add carry1=1,carry1 }
853{ .mfi; cmp.ltu p6,p0=r27,r26
854 xma.lu f56=f38,f121,f56
855 add r28=r28,r27 };;
856{ .mfi; getf.sig r17=f91
857 xma.hu f67=f38,f122,f66
858(p6) add carry1=1,carry1 }
859{ .mfi; cmp.ltu p6,p0=r28,r27
860 xma.lu f66=f38,f122,f66
861 add r29=r29,r28 };;
862{ .mfi; getf.sig r18=f82
863 xma.hu f77=f38,f123,f76
864(p6) add carry1=1,carry1 }
865{ .mfi; cmp.ltu p6,p0=r29,r28
866 xma.lu f76=f38,f123,f76
867 add r29=r29,carry2 };;
868{ .mfi; getf.sig r19=f73
869 xma.hu f87=f38,f124,f86
870(p6) add carry1=1,carry1 }
871{ .mfi; xma.lu f86=f38,f124,f86
872 cmp.ltu p6,p0=r29,carry2 };;
873{ .mfi; getf.sig r20=f64
874 xma.hu f97=f38,f125,f96
875(p6) add carry1=1,carry1 }
876{ .mfi; st8 [r33]=r29,16
877 xma.lu f96=f38,f125,f96 };;
878{ .mfi; getf.sig r21=f55
879 xma.hu f107=f38,f126,f106 }
880{ .mfi; mov carry2=0
881 xma.lu f106=f38,f126,f106
882 add r17=r17,r16 };;
883{ .mfi; xma.hu f117=f38,f127,f116
884 cmp.ltu p7,p0=r17,r16 }
885{ .mfi; xma.lu f116=f38,f127,f116
886 add r18=r18,r17 };;//
887//-------------------------------------------------//
888{ .mfi; getf.sig r22=f46
889 xma.hu f48=f39,f120,f47
890(p7) add carry2=1,carry2 }
891{ .mfi; cmp.ltu p7,p0=r18,r17
892 xma.lu f47=f39,f120,f47
893 add r19=r19,r18 };;
894{ .mfi; getf.sig r24=f110
895 xma.hu f58=f39,f121,f57
896(p7) add carry2=1,carry2 }
897{ .mfi; cmp.ltu p7,p0=r19,r18
898 xma.lu f57=f39,f121,f57
899 add r20=r20,r19 };;
900{ .mfi; getf.sig r25=f101
901 xma.hu f68=f39,f122,f67
902(p7) add carry2=1,carry2 }
903{ .mfi; cmp.ltu p7,p0=r20,r19
904 xma.lu f67=f39,f122,f67
905 add r21=r21,r20 };;
906{ .mfi; getf.sig r26=f92
907 xma.hu f78=f39,f123,f77
908(p7) add carry2=1,carry2 }
909{ .mfi; cmp.ltu p7,p0=r21,r20
910 xma.lu f77=f39,f123,f77
911 add r22=r22,r21 };;
912{ .mfi; getf.sig r27=f83
913 xma.hu f88=f39,f124,f87
914(p7) add carry2=1,carry2 }
915{ .mfi; cmp.ltu p7,p0=r22,r21
916 xma.lu f87=f39,f124,f87
917 add r22=r22,carry1 };;
918{ .mfi; getf.sig r28=f74
919 xma.hu f98=f39,f125,f97
920(p7) add carry2=1,carry2 }
921{ .mfi; xma.lu f97=f39,f125,f97
922 cmp.ltu p7,p0=r22,carry1 };;
923{ .mfi; getf.sig r29=f65
924 xma.hu f108=f39,f126,f107
925(p7) add carry2=1,carry2 }
926{ .mfi; st8 [r32]=r22,16
927 xma.lu f107=f39,f126,f107 };;
928{ .mfi; getf.sig r30=f56
929 xma.hu f118=f39,f127,f117 }
930{ .mfi; xma.lu f117=f39,f127,f117 };;//
931//-------------------------------------------------//
932// Leaving muliplier's heaven... Quite a ride, huh?
933
934{ .mii; getf.sig r31=f47
935 add r25=r25,r24
936 mov carry1=0 };;
937{ .mii; getf.sig r16=f111
938 cmp.ltu p6,p0=r25,r24
939 add r26=r26,r25 };;
940{ .mfb; getf.sig r17=f102 }
941{ .mii;
942(p6) add carry1=1,carry1
943 cmp.ltu p6,p0=r26,r25
944 add r27=r27,r26 };;
945{ .mfb; nop.m 0x0 }
946{ .mii;
947(p6) add carry1=1,carry1
948 cmp.ltu p6,p0=r27,r26
949 add r28=r28,r27 };;
950{ .mii; getf.sig r18=f93
951 add r17=r17,r16
952 mov carry3=0 }
953{ .mii;
954(p6) add carry1=1,carry1
955 cmp.ltu p6,p0=r28,r27
956 add r29=r29,r28 };;
957{ .mii; getf.sig r19=f84
958 cmp.ltu p7,p0=r17,r16 }
959{ .mii;
960(p6) add carry1=1,carry1
961 cmp.ltu p6,p0=r29,r28
962 add r30=r30,r29 };;
963{ .mii; getf.sig r20=f75
964 add r18=r18,r17 }
965{ .mii;
966(p6) add carry1=1,carry1
967 cmp.ltu p6,p0=r30,r29
968 add r31=r31,r30 };;
969{ .mfb; getf.sig r21=f66 }
970{ .mii; (p7) add carry3=1,carry3
971 cmp.ltu p7,p0=r18,r17
972 add r19=r19,r18 }
973{ .mfb; nop.m 0x0 }
974{ .mii;
975(p6) add carry1=1,carry1
976 cmp.ltu p6,p0=r31,r30
977 add r31=r31,carry2 };;
978{ .mfb; getf.sig r22=f57 }
979{ .mii; (p7) add carry3=1,carry3
980 cmp.ltu p7,p0=r19,r18
981 add r20=r20,r19 }
982{ .mfb; nop.m 0x0 }
983{ .mii;
984(p6) add carry1=1,carry1
985 cmp.ltu p6,p0=r31,carry2 };;
986{ .mfb; getf.sig r23=f48 }
987{ .mii; (p7) add carry3=1,carry3
988 cmp.ltu p7,p0=r20,r19
989 add r21=r21,r20 }
990{ .mii;
991(p6) add carry1=1,carry1 }
992{ .mfb; st8 [r33]=r31,16 };;
993
994{ .mfb; getf.sig r24=f112 }
995{ .mii; (p7) add carry3=1,carry3
996 cmp.ltu p7,p0=r21,r20
997 add r22=r22,r21 };;
998{ .mfb; getf.sig r25=f103 }
999{ .mii; (p7) add carry3=1,carry3
1000 cmp.ltu p7,p0=r22,r21
1001 add r23=r23,r22 };;
1002{ .mfb; getf.sig r26=f94 }
1003{ .mii; (p7) add carry3=1,carry3
1004 cmp.ltu p7,p0=r23,r22
1005 add r23=r23,carry1 };;
1006{ .mfb; getf.sig r27=f85 }
1007{ .mii; (p7) add carry3=1,carry3
1008 cmp.ltu p7,p8=r23,carry1};;
1009{ .mii; getf.sig r28=f76
1010 add r25=r25,r24
1011 mov carry1=0 }
1012{ .mii; st8 [r32]=r23,16
1013 (p7) add carry2=1,carry3
1014 (p8) add carry2=0,carry3 };;
1015
1016{ .mfb; nop.m 0x0 }
1017{ .mii; getf.sig r29=f67
1018 cmp.ltu p6,p0=r25,r24
1019 add r26=r26,r25 };;
1020{ .mfb; getf.sig r30=f58 }
1021{ .mii;
1022(p6) add carry1=1,carry1
1023 cmp.ltu p6,p0=r26,r25
1024 add r27=r27,r26 };;
1025{ .mfb; getf.sig r16=f113 }
1026{ .mii;
1027(p6) add carry1=1,carry1
1028 cmp.ltu p6,p0=r27,r26
1029 add r28=r28,r27 };;
1030{ .mfb; getf.sig r17=f104 }
1031{ .mii;
1032(p6) add carry1=1,carry1
1033 cmp.ltu p6,p0=r28,r27
1034 add r29=r29,r28 };;
1035{ .mfb; getf.sig r18=f95 }
1036{ .mii;
1037(p6) add carry1=1,carry1
1038 cmp.ltu p6,p0=r29,r28
1039 add r30=r30,r29 };;
1040{ .mii; getf.sig r19=f86
1041 add r17=r17,r16
1042 mov carry3=0 }
1043{ .mii;
1044(p6) add carry1=1,carry1
1045 cmp.ltu p6,p0=r30,r29
1046 add r30=r30,carry2 };;
1047{ .mii; getf.sig r20=f77
1048 cmp.ltu p7,p0=r17,r16
1049 add r18=r18,r17 }
1050{ .mii;
1051(p6) add carry1=1,carry1
1052 cmp.ltu p6,p0=r30,carry2 };;
1053{ .mfb; getf.sig r21=f68 }
1054{ .mii; st8 [r33]=r30,16
1055(p6) add carry1=1,carry1 };;
1056
1057{ .mfb; getf.sig r24=f114 }
1058{ .mii; (p7) add carry3=1,carry3
1059 cmp.ltu p7,p0=r18,r17
1060 add r19=r19,r18 };;
1061{ .mfb; getf.sig r25=f105 }
1062{ .mii; (p7) add carry3=1,carry3
1063 cmp.ltu p7,p0=r19,r18
1064 add r20=r20,r19 };;
1065{ .mfb; getf.sig r26=f96 }
1066{ .mii; (p7) add carry3=1,carry3
1067 cmp.ltu p7,p0=r20,r19
1068 add r21=r21,r20 };;
1069{ .mfb; getf.sig r27=f87 }
1070{ .mii; (p7) add carry3=1,carry3
1071 cmp.ltu p7,p0=r21,r20
1072 add r21=r21,carry1 };;
1073{ .mib; getf.sig r28=f78
1074 add r25=r25,r24 }
1075{ .mib; (p7) add carry3=1,carry3
1076 cmp.ltu p7,p8=r21,carry1};;
1077{ .mii; st8 [r32]=r21,16
1078 (p7) add carry2=1,carry3
1079 (p8) add carry2=0,carry3 }
1080
1081{ .mii; mov carry1=0
1082 cmp.ltu p6,p0=r25,r24
1083 add r26=r26,r25 };;
1084{ .mfb; getf.sig r16=f115 }
1085{ .mii;
1086(p6) add carry1=1,carry1
1087 cmp.ltu p6,p0=r26,r25
1088 add r27=r27,r26 };;
1089{ .mfb; getf.sig r17=f106 }
1090{ .mii;
1091(p6) add carry1=1,carry1
1092 cmp.ltu p6,p0=r27,r26
1093 add r28=r28,r27 };;
1094{ .mfb; getf.sig r18=f97 }
1095{ .mii;
1096(p6) add carry1=1,carry1
1097 cmp.ltu p6,p0=r28,r27
1098 add r28=r28,carry2 };;
1099{ .mib; getf.sig r19=f88
1100 add r17=r17,r16 }
1101{ .mib;
1102(p6) add carry1=1,carry1
1103 cmp.ltu p6,p0=r28,carry2 };;
1104{ .mii; st8 [r33]=r28,16
1105(p6) add carry1=1,carry1 }
1106
1107{ .mii; mov carry2=0
1108 cmp.ltu p7,p0=r17,r16
1109 add r18=r18,r17 };;
1110{ .mfb; getf.sig r24=f116 }
1111{ .mii; (p7) add carry2=1,carry2
1112 cmp.ltu p7,p0=r18,r17
1113 add r19=r19,r18 };;
1114{ .mfb; getf.sig r25=f107 }
1115{ .mii; (p7) add carry2=1,carry2
1116 cmp.ltu p7,p0=r19,r18
1117 add r19=r19,carry1 };;
1118{ .mfb; getf.sig r26=f98 }
1119{ .mii; (p7) add carry2=1,carry2
1120 cmp.ltu p7,p0=r19,carry1};;
1121{ .mii; st8 [r32]=r19,16
1122 (p7) add carry2=1,carry2 }
1123
1124{ .mfb; add r25=r25,r24 };;
1125
1126{ .mfb; getf.sig r16=f117 }
1127{ .mii; mov carry1=0
1128 cmp.ltu p6,p0=r25,r24
1129 add r26=r26,r25 };;
1130{ .mfb; getf.sig r17=f108 }
1131{ .mii;
1132(p6) add carry1=1,carry1
1133 cmp.ltu p6,p0=r26,r25
1134 add r26=r26,carry2 };;
1135{ .mfb; nop.m 0x0 }
1136{ .mii;
1137(p6) add carry1=1,carry1
1138 cmp.ltu p6,p0=r26,carry2 };;
1139{ .mii; st8 [r33]=r26,16
1140(p6) add carry1=1,carry1 }
1141
1142{ .mfb; add r17=r17,r16 };;
1143{ .mfb; getf.sig r24=f118 }
1144{ .mii; mov carry2=0
1145 cmp.ltu p7,p0=r17,r16
1146 add r17=r17,carry1 };;
1147{ .mii; (p7) add carry2=1,carry2
1148 cmp.ltu p7,p0=r17,carry1};;
1149{ .mii; st8 [r32]=r17
1150 (p7) add carry2=1,carry2 };;
1151{ .mfb; add r24=r24,carry2 };;
1152{ .mib; st8 [r33]=r24 }
1153
1154{ .mib; rum 1<<5 // clear um.mfh
1155 br.ret.sptk.many b0 };;
1156.endp bn_mul_comba8#
1157#undef carry3
1158#undef carry2
1159#undef carry1
1160#endif
1161
1162#if 1
1163// It's possible to make it faster (see comment to bn_sqr_comba8), but
1164// I reckon it doesn't worth the effort. Basically because the routine
1165// (actually both of them) practically never called... So I just play
1166// same trick as with bn_sqr_comba8.
1167//
1168// void bn_sqr_comba4(BN_ULONG *r, BN_ULONG *a)
1169//
1170.global bn_sqr_comba4#
1171.proc bn_sqr_comba4#
1172.align 64
1173bn_sqr_comba4:
1174 .prologue
1175 .save ar.pfs,r2
1176#if defined(_HPUX_SOURCE) && !defined(_LP64)
1177{ .mii; alloc r2=ar.pfs,2,1,0,0
1178 addp4 r32=0,r32
1179 addp4 r33=0,r33 };;
1180{ .mii;
1181#else
1182{ .mii; alloc r2=ar.pfs,2,1,0,0
1183#endif
1184 mov r34=r33
1185 add r14=8,r33 };;
1186 .body
1187{ .mii; add r17=8,r34
1188 add r15=16,r33
1189 add r18=16,r34 }
1190{ .mfb; add r16=24,r33
1191 br .L_cheat_entry_point4 };;
1192.endp bn_sqr_comba4#
1193#endif
1194
1195#if 1
1196// Runs in ~115 cycles and ~4.5 times faster than C. Well, whatever...
1197//
1198// void bn_mul_comba4(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
1199//
1200#define carry1 r14
1201#define carry2 r15
1202.global bn_mul_comba4#
1203.proc bn_mul_comba4#
1204.align 64
1205bn_mul_comba4:
1206 .prologue
1207 .save ar.pfs,r2
1208#if defined(_HPUX_SOURCE) && !defined(_LP64)
1209{ .mii; alloc r2=ar.pfs,3,0,0,0
1210 addp4 r33=0,r33
1211 addp4 r34=0,r34 };;
1212{ .mii; addp4 r32=0,r32
1213#else
1214{ .mii; alloc r2=ar.pfs,3,0,0,0
1215#endif
1216 add r14=8,r33
1217 add r17=8,r34 }
1218 .body
1219{ .mii; add r15=16,r33
1220 add r18=16,r34
1221 add r16=24,r33 };;
1222.L_cheat_entry_point4:
1223{ .mmi; add r19=24,r34
1224
1225 ldf8 f32=[r33] }
1226
1227{ .mmi; ldf8 f120=[r34]
1228 ldf8 f121=[r17] };;
1229{ .mmi; ldf8 f122=[r18]
1230 ldf8 f123=[r19] }
1231
1232{ .mmi; ldf8 f33=[r14]
1233 ldf8 f34=[r15] }
1234{ .mfi; ldf8 f35=[r16]
1235
1236 xma.hu f41=f32,f120,f0 }
1237{ .mfi; xma.lu f40=f32,f120,f0 };;
1238{ .mfi; xma.hu f51=f32,f121,f0 }
1239{ .mfi; xma.lu f50=f32,f121,f0 };;
1240{ .mfi; xma.hu f61=f32,f122,f0 }
1241{ .mfi; xma.lu f60=f32,f122,f0 };;
1242{ .mfi; xma.hu f71=f32,f123,f0 }
1243{ .mfi; xma.lu f70=f32,f123,f0 };;//
1244// Major stall takes place here, and 3 more places below. Result from
1245// first xma is not available for another 3 ticks.
1246{ .mfi; getf.sig r16=f40
1247 xma.hu f42=f33,f120,f41
1248 add r33=8,r32 }
1249{ .mfi; xma.lu f41=f33,f120,f41 };;
1250{ .mfi; getf.sig r24=f50
1251 xma.hu f52=f33,f121,f51 }
1252{ .mfi; xma.lu f51=f33,f121,f51 };;
1253{ .mfi; st8 [r32]=r16,16
1254 xma.hu f62=f33,f122,f61 }
1255{ .mfi; xma.lu f61=f33,f122,f61 };;
1256{ .mfi; xma.hu f72=f33,f123,f71 }
1257{ .mfi; xma.lu f71=f33,f123,f71 };;//
1258//-------------------------------------------------//
1259{ .mfi; getf.sig r25=f41
1260 xma.hu f43=f34,f120,f42 }
1261{ .mfi; xma.lu f42=f34,f120,f42 };;
1262{ .mfi; getf.sig r16=f60
1263 xma.hu f53=f34,f121,f52 }
1264{ .mfi; xma.lu f52=f34,f121,f52 };;
1265{ .mfi; getf.sig r17=f51
1266 xma.hu f63=f34,f122,f62
1267 add r25=r25,r24 }
1268{ .mfi; mov carry1=0
1269 xma.lu f62=f34,f122,f62 };;
1270{ .mfi; st8 [r33]=r25,16
1271 xma.hu f73=f34,f123,f72
1272 cmp.ltu p6,p0=r25,r24 }
1273{ .mfi; xma.lu f72=f34,f123,f72 };;//
1274//-------------------------------------------------//
1275{ .mfi; getf.sig r18=f42
1276 xma.hu f44=f35,f120,f43
1277(p6) add carry1=1,carry1 }
1278{ .mfi; add r17=r17,r16
1279 xma.lu f43=f35,f120,f43
1280 mov carry2=0 };;
1281{ .mfi; getf.sig r24=f70
1282 xma.hu f54=f35,f121,f53
1283 cmp.ltu p7,p0=r17,r16 }
1284{ .mfi; xma.lu f53=f35,f121,f53 };;
1285{ .mfi; getf.sig r25=f61
1286 xma.hu f64=f35,f122,f63
1287 add r18=r18,r17 }
1288{ .mfi; xma.lu f63=f35,f122,f63
1289(p7) add carry2=1,carry2 };;
1290{ .mfi; getf.sig r26=f52
1291 xma.hu f74=f35,f123,f73
1292 cmp.ltu p7,p0=r18,r17 }
1293{ .mfi; xma.lu f73=f35,f123,f73
1294 add r18=r18,carry1 };;
1295//-------------------------------------------------//
1296{ .mii; st8 [r32]=r18,16
1297(p7) add carry2=1,carry2
1298 cmp.ltu p7,p0=r18,carry1 };;
1299
1300{ .mfi; getf.sig r27=f43 // last major stall
1301(p7) add carry2=1,carry2 };;
1302{ .mii; getf.sig r16=f71
1303 add r25=r25,r24
1304 mov carry1=0 };;
1305{ .mii; getf.sig r17=f62
1306 cmp.ltu p6,p0=r25,r24
1307 add r26=r26,r25 };;
1308{ .mii;
1309(p6) add carry1=1,carry1
1310 cmp.ltu p6,p0=r26,r25
1311 add r27=r27,r26 };;
1312{ .mii;
1313(p6) add carry1=1,carry1
1314 cmp.ltu p6,p0=r27,r26
1315 add r27=r27,carry2 };;
1316{ .mii; getf.sig r18=f53
1317(p6) add carry1=1,carry1
1318 cmp.ltu p6,p0=r27,carry2 };;
1319{ .mfi; st8 [r33]=r27,16
1320(p6) add carry1=1,carry1 }
1321
1322{ .mii; getf.sig r19=f44
1323 add r17=r17,r16
1324 mov carry2=0 };;
1325{ .mii; getf.sig r24=f72
1326 cmp.ltu p7,p0=r17,r16
1327 add r18=r18,r17 };;
1328{ .mii; (p7) add carry2=1,carry2
1329 cmp.ltu p7,p0=r18,r17
1330 add r19=r19,r18 };;
1331{ .mii; (p7) add carry2=1,carry2
1332 cmp.ltu p7,p0=r19,r18
1333 add r19=r19,carry1 };;
1334{ .mii; getf.sig r25=f63
1335 (p7) add carry2=1,carry2
1336 cmp.ltu p7,p0=r19,carry1};;
1337{ .mii; st8 [r32]=r19,16
1338 (p7) add carry2=1,carry2 }
1339
1340{ .mii; getf.sig r26=f54
1341 add r25=r25,r24
1342 mov carry1=0 };;
1343{ .mii; getf.sig r16=f73
1344 cmp.ltu p6,p0=r25,r24
1345 add r26=r26,r25 };;
1346{ .mii;
1347(p6) add carry1=1,carry1
1348 cmp.ltu p6,p0=r26,r25
1349 add r26=r26,carry2 };;
1350{ .mii; getf.sig r17=f64
1351(p6) add carry1=1,carry1
1352 cmp.ltu p6,p0=r26,carry2 };;
1353{ .mii; st8 [r33]=r26,16
1354(p6) add carry1=1,carry1 }
1355
1356{ .mii; getf.sig r24=f74
1357 add r17=r17,r16
1358 mov carry2=0 };;
1359{ .mii; cmp.ltu p7,p0=r17,r16
1360 add r17=r17,carry1 };;
1361
1362{ .mii; (p7) add carry2=1,carry2
1363 cmp.ltu p7,p0=r17,carry1};;
1364{ .mii; st8 [r32]=r17,16
1365 (p7) add carry2=1,carry2 };;
1366
1367{ .mii; add r24=r24,carry2 };;
1368{ .mii; st8 [r33]=r24 }
1369
1370{ .mib; rum 1<<5 // clear um.mfh
1371 br.ret.sptk.many b0 };;
1372.endp bn_mul_comba4#
1373#undef carry2
1374#undef carry1
1375#endif
1376
1377#if 1
1378//
1379// BN_ULONG bn_div_words(BN_ULONG h, BN_ULONG l, BN_ULONG d)
1380//
1381// In the nutshell it's a port of my MIPS III/IV implementation.
1382//
1383#define AT r14
1384#define H r16
1385#define HH r20
1386#define L r17
1387#define D r18
1388#define DH r22
1389#define I r21
1390
1391#if 0
1392// Some preprocessors (most notably HP-UX) appear to be allergic to
1393// macros enclosed to parenthesis [as these three were].
1394#define cont p16
1395#define break p0 // p20
1396#define equ p24
1397#else
1398cont=p16
1399break=p0
1400equ=p24
1401#endif
1402
1403.global abort#
1404.global bn_div_words#
1405.proc bn_div_words#
1406.align 64
1407bn_div_words:
1408 .prologue
1409 .save ar.pfs,r2
1410{ .mii; alloc r2=ar.pfs,3,5,0,8
1411 .save b0,r3
1412 mov r3=b0
1413 .save pr,r10
1414 mov r10=pr };;
1415{ .mmb; cmp.eq p6,p0=r34,r0
1416 mov r8=-1
1417(p6) br.ret.spnt.many b0 };;
1418
1419 .body
1420{ .mii; mov H=r32 // save h
1421 mov ar.ec=0 // don't rotate at exit
1422 mov pr.rot=0 }
1423{ .mii; mov L=r33 // save l
1424 mov r36=r0 };;
1425
1426.L_divw_shift: // -vv- note signed comparison
1427{ .mfi; (p0) cmp.lt p16,p0=r0,r34 // d
1428 (p0) shladd r33=r34,1,r0 }
1429{ .mfb; (p0) add r35=1,r36
1430 (p0) nop.f 0x0
1431(p16) br.wtop.dpnt .L_divw_shift };;
1432
1433{ .mii; mov D=r34
1434 shr.u DH=r34,32
1435 sub r35=64,r36 };;
1436{ .mii; setf.sig f7=DH
1437 shr.u AT=H,r35
1438 mov I=r36 };;
1439{ .mib; cmp.ne p6,p0=r0,AT
1440 shl H=H,r36
1441(p6) br.call.spnt.clr b0=abort };; // overflow, die...
1442
1443{ .mfi; fcvt.xuf.s1 f7=f7
1444 shr.u AT=L,r35 };;
1445{ .mii; shl L=L,r36
1446 or H=H,AT };;
1447
1448{ .mii; nop.m 0x0
1449 cmp.leu p6,p0=D,H;;
1450(p6) sub H=H,D }
1451
1452{ .mlx; setf.sig f14=D
1453 movl AT=0xffffffff };;
1454///////////////////////////////////////////////////////////
1455{ .mii; setf.sig f6=H
1456 shr.u HH=H,32;;
1457 cmp.eq p6,p7=HH,DH };;
1458{ .mfb;
1459(p6) setf.sig f8=AT
1460(p7) fcvt.xuf.s1 f6=f6
1461(p7) br.call.sptk b6=.L_udiv64_32_b6 };;
1462
1463{ .mfi; getf.sig r33=f8 // q
1464 xmpy.lu f9=f8,f14 }
1465{ .mfi; xmpy.hu f10=f8,f14
1466 shrp H=H,L,32 };;
1467
1468{ .mmi; getf.sig r35=f9 // tl
1469 getf.sig r31=f10 };; // th
1470
1471.L_divw_1st_iter:
1472{ .mii; (p0) add r32=-1,r33
1473 (p0) cmp.eq equ,cont=HH,r31 };;
1474{ .mii; (p0) cmp.ltu p8,p0=r35,D
1475 (p0) sub r34=r35,D
1476 (equ) cmp.leu break,cont=r35,H };;
1477{ .mib; (cont) cmp.leu cont,break=HH,r31
1478 (p8) add r31=-1,r31
1479(cont) br.wtop.spnt .L_divw_1st_iter };;
1480///////////////////////////////////////////////////////////
1481{ .mii; sub H=H,r35
1482 shl r8=r33,32
1483 shl L=L,32 };;
1484///////////////////////////////////////////////////////////
1485{ .mii; setf.sig f6=H
1486 shr.u HH=H,32;;
1487 cmp.eq p6,p7=HH,DH };;
1488{ .mfb;
1489(p6) setf.sig f8=AT
1490(p7) fcvt.xuf.s1 f6=f6
1491(p7) br.call.sptk b6=.L_udiv64_32_b6 };;
1492
1493{ .mfi; getf.sig r33=f8 // q
1494 xmpy.lu f9=f8,f14 }
1495{ .mfi; xmpy.hu f10=f8,f14
1496 shrp H=H,L,32 };;
1497
1498{ .mmi; getf.sig r35=f9 // tl
1499 getf.sig r31=f10 };; // th
1500
1501.L_divw_2nd_iter:
1502{ .mii; (p0) add r32=-1,r33
1503 (p0) cmp.eq equ,cont=HH,r31 };;
1504{ .mii; (p0) cmp.ltu p8,p0=r35,D
1505 (p0) sub r34=r35,D
1506 (equ) cmp.leu break,cont=r35,H };;
1507{ .mib; (cont) cmp.leu cont,break=HH,r31
1508 (p8) add r31=-1,r31
1509(cont) br.wtop.spnt .L_divw_2nd_iter };;
1510///////////////////////////////////////////////////////////
1511{ .mii; sub H=H,r35
1512 or r8=r8,r33
1513 mov ar.pfs=r2 };;
1514{ .mii; shr.u r9=H,I // remainder if anybody wants it
1515 mov pr=r10,0x1ffff }
1516{ .mfb; br.ret.sptk.many b0 };;
1517
1518// Unsigned 64 by 32 (well, by 64 for the moment) bit integer division
1519// procedure.
1520//
1521// inputs: f6 = (double)a, f7 = (double)b
1522// output: f8 = (int)(a/b)
1523// clobbered: f8,f9,f10,f11,pred
1524pred=p15
1525// One can argue that this snippet is copyrighted to Intel
1526// Corporation, as it's essentially identical to one of those
1527// found in "Divide, Square Root and Remainder" section at
1528// http://www.intel.com/software/products/opensource/libraries/num.htm.
1529// Yes, I admit that the referred code was used as template,
1530// but after I realized that there hardly is any other instruction
1531// sequence which would perform this operation. I mean I figure that
1532// any independent attempt to implement high-performance division
1533// will result in code virtually identical to the Intel code. It
1534// should be noted though that below division kernel is 1 cycle
1535// faster than Intel one (note commented splits:-), not to mention
1536// original prologue (rather lack of one) and epilogue.
1537.align 32
1538.skip 16
1539.L_udiv64_32_b6:
1540 frcpa.s1 f8,pred=f6,f7;; // [0] y0 = 1 / b
1541
1542(pred) fnma.s1 f9=f7,f8,f1 // [5] e0 = 1 - b * y0
1543(pred) fmpy.s1 f10=f6,f8;; // [5] q0 = a * y0
1544(pred) fmpy.s1 f11=f9,f9 // [10] e1 = e0 * e0
1545(pred) fma.s1 f10=f9,f10,f10;; // [10] q1 = q0 + e0 * q0
1546(pred) fma.s1 f8=f9,f8,f8 //;; // [15] y1 = y0 + e0 * y0
1547(pred) fma.s1 f9=f11,f10,f10;; // [15] q2 = q1 + e1 * q1
1548(pred) fma.s1 f8=f11,f8,f8 //;; // [20] y2 = y1 + e1 * y1
1549(pred) fnma.s1 f10=f7,f9,f6;; // [20] r2 = a - b * q2
1550(pred) fma.s1 f8=f10,f8,f9;; // [25] q3 = q2 + r2 * y2
1551
1552 fcvt.fxu.trunc.s1 f8=f8 // [30] q = trunc(q3)
1553 br.ret.sptk.many b6;;
1554.endp bn_div_words#
1555#endif
diff --git a/src/lib/libcrypto/bn/asm/pa-risc2.s b/src/lib/libcrypto/bn/asm/pa-risc2.s
deleted file mode 100644
index f3b16290eb..0000000000
--- a/src/lib/libcrypto/bn/asm/pa-risc2.s
+++ /dev/null
@@ -1,1618 +0,0 @@
1;
2; PA-RISC 2.0 implementation of bn_asm code, based on the
3; 64-bit version of the code. This code is effectively the
4; same as the 64-bit version except the register model is
5; slightly different given all values must be 32-bit between
6; function calls. Thus the 64-bit return values are returned
7; in %ret0 and %ret1 vs just %ret0 as is done in 64-bit
8;
9;
10; This code is approximately 2x faster than the C version
11; for RSA/DSA.
12;
13; See http://devresource.hp.com/ for more details on the PA-RISC
14; architecture. Also see the book "PA-RISC 2.0 Architecture"
15; by Gerry Kane for information on the instruction set architecture.
16;
17; Code written by Chris Ruemmler (with some help from the HP C
18; compiler).
19;
20; The code compiles with HP's assembler
21;
22
23 .level 2.0N
24 .space $TEXT$
25 .subspa $CODE$,QUAD=0,ALIGN=8,ACCESS=0x2c,CODE_ONLY
26
27;
28; Global Register definitions used for the routines.
29;
30; Some information about HP's runtime architecture for 32-bits.
31;
32; "Caller save" means the calling function must save the register
33; if it wants the register to be preserved.
34; "Callee save" means if a function uses the register, it must save
35; the value before using it.
36;
37; For the floating point registers
38;
39; "caller save" registers: fr4-fr11, fr22-fr31
40; "callee save" registers: fr12-fr21
41; "special" registers: fr0-fr3 (status and exception registers)
42;
43; For the integer registers
44; value zero : r0
45; "caller save" registers: r1,r19-r26
46; "callee save" registers: r3-r18
47; return register : r2 (rp)
48; return values ; r28,r29 (ret0,ret1)
49; Stack pointer ; r30 (sp)
50; millicode return ptr ; r31 (also a caller save register)
51
52
53;
54; Arguments to the routines
55;
56r_ptr .reg %r26
57a_ptr .reg %r25
58b_ptr .reg %r24
59num .reg %r24
60n .reg %r23
61
62;
63; Note that the "w" argument for bn_mul_add_words and bn_mul_words
64; is passed on the stack at a delta of -56 from the top of stack
65; as the routine is entered.
66;
67
68;
69; Globals used in some routines
70;
71
72top_overflow .reg %r23
73high_mask .reg %r22 ; value 0xffffffff80000000L
74
75
76;------------------------------------------------------------------------------
77;
78; bn_mul_add_words
79;
80;BN_ULONG bn_mul_add_words(BN_ULONG *r_ptr, BN_ULONG *a_ptr,
81; int num, BN_ULONG w)
82;
83; arg0 = r_ptr
84; arg1 = a_ptr
85; arg3 = num
86; -56(sp) = w
87;
88; Local register definitions
89;
90
91fm1 .reg %fr22
92fm .reg %fr23
93ht_temp .reg %fr24
94ht_temp_1 .reg %fr25
95lt_temp .reg %fr26
96lt_temp_1 .reg %fr27
97fm1_1 .reg %fr28
98fm_1 .reg %fr29
99
100fw_h .reg %fr7L
101fw_l .reg %fr7R
102fw .reg %fr7
103
104fht_0 .reg %fr8L
105flt_0 .reg %fr8R
106t_float_0 .reg %fr8
107
108fht_1 .reg %fr9L
109flt_1 .reg %fr9R
110t_float_1 .reg %fr9
111
112tmp_0 .reg %r31
113tmp_1 .reg %r21
114m_0 .reg %r20
115m_1 .reg %r19
116ht_0 .reg %r1
117ht_1 .reg %r3
118lt_0 .reg %r4
119lt_1 .reg %r5
120m1_0 .reg %r6
121m1_1 .reg %r7
122rp_val .reg %r8
123rp_val_1 .reg %r9
124
125bn_mul_add_words
126 .export bn_mul_add_words,entry,NO_RELOCATION,LONG_RETURN
127 .proc
128 .callinfo frame=128
129 .entry
130 .align 64
131
132 STD %r3,0(%sp) ; save r3
133 STD %r4,8(%sp) ; save r4
134 NOP ; Needed to make the loop 16-byte aligned
135 NOP ; needed to make the loop 16-byte aligned
136
137 STD %r5,16(%sp) ; save r5
138 NOP
139 STD %r6,24(%sp) ; save r6
140 STD %r7,32(%sp) ; save r7
141
142 STD %r8,40(%sp) ; save r8
143 STD %r9,48(%sp) ; save r9
144 COPY %r0,%ret1 ; return 0 by default
145 DEPDI,Z 1,31,1,top_overflow ; top_overflow = 1 << 32
146
147 CMPIB,>= 0,num,bn_mul_add_words_exit ; if (num <= 0) then exit
148 LDO 128(%sp),%sp ; bump stack
149
150 ;
151 ; The loop is unrolled twice, so if there is only 1 number
152 ; then go straight to the cleanup code.
153 ;
154 CMPIB,= 1,num,bn_mul_add_words_single_top
155 FLDD -184(%sp),fw ; (-56-128) load up w into fw (fw_h/fw_l)
156
157 ;
158 ; This loop is unrolled 2 times (64-byte aligned as well)
159 ;
160 ; PA-RISC 2.0 chips have two fully pipelined multipliers, thus
161 ; two 32-bit mutiplies can be issued per cycle.
162 ;
163bn_mul_add_words_unroll2
164
165 FLDD 0(a_ptr),t_float_0 ; load up 64-bit value (fr8L) ht(L)/lt(R)
166 FLDD 8(a_ptr),t_float_1 ; load up 64-bit value (fr8L) ht(L)/lt(R)
167 LDD 0(r_ptr),rp_val ; rp[0]
168 LDD 8(r_ptr),rp_val_1 ; rp[1]
169
170 XMPYU fht_0,fw_l,fm1 ; m1[0] = fht_0*fw_l
171 XMPYU fht_1,fw_l,fm1_1 ; m1[1] = fht_1*fw_l
172 FSTD fm1,-16(%sp) ; -16(sp) = m1[0]
173 FSTD fm1_1,-48(%sp) ; -48(sp) = m1[1]
174
175 XMPYU flt_0,fw_h,fm ; m[0] = flt_0*fw_h
176 XMPYU flt_1,fw_h,fm_1 ; m[1] = flt_1*fw_h
177 FSTD fm,-8(%sp) ; -8(sp) = m[0]
178 FSTD fm_1,-40(%sp) ; -40(sp) = m[1]
179
180 XMPYU fht_0,fw_h,ht_temp ; ht_temp = fht_0*fw_h
181 XMPYU fht_1,fw_h,ht_temp_1 ; ht_temp_1 = fht_1*fw_h
182 FSTD ht_temp,-24(%sp) ; -24(sp) = ht_temp
183 FSTD ht_temp_1,-56(%sp) ; -56(sp) = ht_temp_1
184
185 XMPYU flt_0,fw_l,lt_temp ; lt_temp = lt*fw_l
186 XMPYU flt_1,fw_l,lt_temp_1 ; lt_temp = lt*fw_l
187 FSTD lt_temp,-32(%sp) ; -32(sp) = lt_temp
188 FSTD lt_temp_1,-64(%sp) ; -64(sp) = lt_temp_1
189
190 LDD -8(%sp),m_0 ; m[0]
191 LDD -40(%sp),m_1 ; m[1]
192 LDD -16(%sp),m1_0 ; m1[0]
193 LDD -48(%sp),m1_1 ; m1[1]
194
195 LDD -24(%sp),ht_0 ; ht[0]
196 LDD -56(%sp),ht_1 ; ht[1]
197 ADD,L m1_0,m_0,tmp_0 ; tmp_0 = m[0] + m1[0];
198 ADD,L m1_1,m_1,tmp_1 ; tmp_1 = m[1] + m1[1];
199
200 LDD -32(%sp),lt_0
201 LDD -64(%sp),lt_1
202 CMPCLR,*>>= tmp_0,m1_0, %r0 ; if (m[0] < m1[0])
203 ADD,L ht_0,top_overflow,ht_0 ; ht[0] += (1<<32)
204
205 CMPCLR,*>>= tmp_1,m1_1,%r0 ; if (m[1] < m1[1])
206 ADD,L ht_1,top_overflow,ht_1 ; ht[1] += (1<<32)
207 EXTRD,U tmp_0,31,32,m_0 ; m[0]>>32
208 DEPD,Z tmp_0,31,32,m1_0 ; m1[0] = m[0]<<32
209
210 EXTRD,U tmp_1,31,32,m_1 ; m[1]>>32
211 DEPD,Z tmp_1,31,32,m1_1 ; m1[1] = m[1]<<32
212 ADD,L ht_0,m_0,ht_0 ; ht[0]+= (m[0]>>32)
213 ADD,L ht_1,m_1,ht_1 ; ht[1]+= (m[1]>>32)
214
215 ADD lt_0,m1_0,lt_0 ; lt[0] = lt[0]+m1[0];
216 ADD,DC ht_0,%r0,ht_0 ; ht[0]++
217 ADD lt_1,m1_1,lt_1 ; lt[1] = lt[1]+m1[1];
218 ADD,DC ht_1,%r0,ht_1 ; ht[1]++
219
220 ADD %ret1,lt_0,lt_0 ; lt[0] = lt[0] + c;
221 ADD,DC ht_0,%r0,ht_0 ; ht[0]++
222 ADD lt_0,rp_val,lt_0 ; lt[0] = lt[0]+rp[0]
223 ADD,DC ht_0,%r0,ht_0 ; ht[0]++
224
225 LDO -2(num),num ; num = num - 2;
226 ADD ht_0,lt_1,lt_1 ; lt[1] = lt[1] + ht_0 (c);
227 ADD,DC ht_1,%r0,ht_1 ; ht[1]++
228 STD lt_0,0(r_ptr) ; rp[0] = lt[0]
229
230 ADD lt_1,rp_val_1,lt_1 ; lt[1] = lt[1]+rp[1]
231 ADD,DC ht_1,%r0,%ret1 ; ht[1]++
232 LDO 16(a_ptr),a_ptr ; a_ptr += 2
233
234 STD lt_1,8(r_ptr) ; rp[1] = lt[1]
235 CMPIB,<= 2,num,bn_mul_add_words_unroll2 ; go again if more to do
236 LDO 16(r_ptr),r_ptr ; r_ptr += 2
237
238 CMPIB,=,N 0,num,bn_mul_add_words_exit ; are we done, or cleanup last one
239
240 ;
241 ; Top of loop aligned on 64-byte boundary
242 ;
243bn_mul_add_words_single_top
244 FLDD 0(a_ptr),t_float_0 ; load up 64-bit value (fr8L) ht(L)/lt(R)
245 LDD 0(r_ptr),rp_val ; rp[0]
246 LDO 8(a_ptr),a_ptr ; a_ptr++
247 XMPYU fht_0,fw_l,fm1 ; m1 = ht*fw_l
248 FSTD fm1,-16(%sp) ; -16(sp) = m1
249 XMPYU flt_0,fw_h,fm ; m = lt*fw_h
250 FSTD fm,-8(%sp) ; -8(sp) = m
251 XMPYU fht_0,fw_h,ht_temp ; ht_temp = ht*fw_h
252 FSTD ht_temp,-24(%sp) ; -24(sp) = ht
253 XMPYU flt_0,fw_l,lt_temp ; lt_temp = lt*fw_l
254 FSTD lt_temp,-32(%sp) ; -32(sp) = lt
255
256 LDD -8(%sp),m_0
257 LDD -16(%sp),m1_0 ; m1 = temp1
258 ADD,L m_0,m1_0,tmp_0 ; tmp_0 = m + m1;
259 LDD -24(%sp),ht_0
260 LDD -32(%sp),lt_0
261
262 CMPCLR,*>>= tmp_0,m1_0,%r0 ; if (m < m1)
263 ADD,L ht_0,top_overflow,ht_0 ; ht += (1<<32)
264
265 EXTRD,U tmp_0,31,32,m_0 ; m>>32
266 DEPD,Z tmp_0,31,32,m1_0 ; m1 = m<<32
267
268 ADD,L ht_0,m_0,ht_0 ; ht+= (m>>32)
269 ADD lt_0,m1_0,tmp_0 ; tmp_0 = lt+m1;
270 ADD,DC ht_0,%r0,ht_0 ; ht++
271 ADD %ret1,tmp_0,lt_0 ; lt = lt + c;
272 ADD,DC ht_0,%r0,ht_0 ; ht++
273 ADD lt_0,rp_val,lt_0 ; lt = lt+rp[0]
274 ADD,DC ht_0,%r0,%ret1 ; ht++
275 STD lt_0,0(r_ptr) ; rp[0] = lt
276
277bn_mul_add_words_exit
278 .EXIT
279
280 EXTRD,U %ret1,31,32,%ret0 ; for 32-bit, return in ret0/ret1
281 LDD -80(%sp),%r9 ; restore r9
282 LDD -88(%sp),%r8 ; restore r8
283 LDD -96(%sp),%r7 ; restore r7
284 LDD -104(%sp),%r6 ; restore r6
285 LDD -112(%sp),%r5 ; restore r5
286 LDD -120(%sp),%r4 ; restore r4
287 BVE (%rp)
288 LDD,MB -128(%sp),%r3 ; restore r3
289 .PROCEND ;in=23,24,25,26,29;out=28;
290
291;----------------------------------------------------------------------------
292;
293;BN_ULONG bn_mul_words(BN_ULONG *rp, BN_ULONG *ap, int num, BN_ULONG w)
294;
295; arg0 = rp
296; arg1 = ap
297; arg3 = num
298; w on stack at -56(sp)
299
300bn_mul_words
301 .proc
302 .callinfo frame=128
303 .entry
304 .EXPORT bn_mul_words,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
305 .align 64
306
307 STD %r3,0(%sp) ; save r3
308 STD %r4,8(%sp) ; save r4
309 NOP
310 STD %r5,16(%sp) ; save r5
311
312 STD %r6,24(%sp) ; save r6
313 STD %r7,32(%sp) ; save r7
314 COPY %r0,%ret1 ; return 0 by default
315 DEPDI,Z 1,31,1,top_overflow ; top_overflow = 1 << 32
316
317 CMPIB,>= 0,num,bn_mul_words_exit
318 LDO 128(%sp),%sp ; bump stack
319
320 ;
321 ; See if only 1 word to do, thus just do cleanup
322 ;
323 CMPIB,= 1,num,bn_mul_words_single_top
324 FLDD -184(%sp),fw ; (-56-128) load up w into fw (fw_h/fw_l)
325
326 ;
327 ; This loop is unrolled 2 times (64-byte aligned as well)
328 ;
329 ; PA-RISC 2.0 chips have two fully pipelined multipliers, thus
330 ; two 32-bit mutiplies can be issued per cycle.
331 ;
332bn_mul_words_unroll2
333
334 FLDD 0(a_ptr),t_float_0 ; load up 64-bit value (fr8L) ht(L)/lt(R)
335 FLDD 8(a_ptr),t_float_1 ; load up 64-bit value (fr8L) ht(L)/lt(R)
336 XMPYU fht_0,fw_l,fm1 ; m1[0] = fht_0*fw_l
337 XMPYU fht_1,fw_l,fm1_1 ; m1[1] = ht*fw_l
338
339 FSTD fm1,-16(%sp) ; -16(sp) = m1
340 FSTD fm1_1,-48(%sp) ; -48(sp) = m1
341 XMPYU flt_0,fw_h,fm ; m = lt*fw_h
342 XMPYU flt_1,fw_h,fm_1 ; m = lt*fw_h
343
344 FSTD fm,-8(%sp) ; -8(sp) = m
345 FSTD fm_1,-40(%sp) ; -40(sp) = m
346 XMPYU fht_0,fw_h,ht_temp ; ht_temp = fht_0*fw_h
347 XMPYU fht_1,fw_h,ht_temp_1 ; ht_temp = ht*fw_h
348
349 FSTD ht_temp,-24(%sp) ; -24(sp) = ht
350 FSTD ht_temp_1,-56(%sp) ; -56(sp) = ht
351 XMPYU flt_0,fw_l,lt_temp ; lt_temp = lt*fw_l
352 XMPYU flt_1,fw_l,lt_temp_1 ; lt_temp = lt*fw_l
353
354 FSTD lt_temp,-32(%sp) ; -32(sp) = lt
355 FSTD lt_temp_1,-64(%sp) ; -64(sp) = lt
356 LDD -8(%sp),m_0
357 LDD -40(%sp),m_1
358
359 LDD -16(%sp),m1_0
360 LDD -48(%sp),m1_1
361 LDD -24(%sp),ht_0
362 LDD -56(%sp),ht_1
363
364 ADD,L m1_0,m_0,tmp_0 ; tmp_0 = m + m1;
365 ADD,L m1_1,m_1,tmp_1 ; tmp_1 = m + m1;
366 LDD -32(%sp),lt_0
367 LDD -64(%sp),lt_1
368
369 CMPCLR,*>>= tmp_0,m1_0, %r0 ; if (m < m1)
370 ADD,L ht_0,top_overflow,ht_0 ; ht += (1<<32)
371 CMPCLR,*>>= tmp_1,m1_1,%r0 ; if (m < m1)
372 ADD,L ht_1,top_overflow,ht_1 ; ht += (1<<32)
373
374 EXTRD,U tmp_0,31,32,m_0 ; m>>32
375 DEPD,Z tmp_0,31,32,m1_0 ; m1 = m<<32
376 EXTRD,U tmp_1,31,32,m_1 ; m>>32
377 DEPD,Z tmp_1,31,32,m1_1 ; m1 = m<<32
378
379 ADD,L ht_0,m_0,ht_0 ; ht+= (m>>32)
380 ADD,L ht_1,m_1,ht_1 ; ht+= (m>>32)
381 ADD lt_0,m1_0,lt_0 ; lt = lt+m1;
382 ADD,DC ht_0,%r0,ht_0 ; ht++
383
384 ADD lt_1,m1_1,lt_1 ; lt = lt+m1;
385 ADD,DC ht_1,%r0,ht_1 ; ht++
386 ADD %ret1,lt_0,lt_0 ; lt = lt + c (ret1);
387 ADD,DC ht_0,%r0,ht_0 ; ht++
388
389 ADD ht_0,lt_1,lt_1 ; lt = lt + c (ht_0)
390 ADD,DC ht_1,%r0,ht_1 ; ht++
391 STD lt_0,0(r_ptr) ; rp[0] = lt
392 STD lt_1,8(r_ptr) ; rp[1] = lt
393
394 COPY ht_1,%ret1 ; carry = ht
395 LDO -2(num),num ; num = num - 2;
396 LDO 16(a_ptr),a_ptr ; ap += 2
397 CMPIB,<= 2,num,bn_mul_words_unroll2
398 LDO 16(r_ptr),r_ptr ; rp++
399
400 CMPIB,=,N 0,num,bn_mul_words_exit ; are we done?
401
402 ;
403 ; Top of loop aligned on 64-byte boundary
404 ;
405bn_mul_words_single_top
406 FLDD 0(a_ptr),t_float_0 ; load up 64-bit value (fr8L) ht(L)/lt(R)
407
408 XMPYU fht_0,fw_l,fm1 ; m1 = ht*fw_l
409 FSTD fm1,-16(%sp) ; -16(sp) = m1
410 XMPYU flt_0,fw_h,fm ; m = lt*fw_h
411 FSTD fm,-8(%sp) ; -8(sp) = m
412 XMPYU fht_0,fw_h,ht_temp ; ht_temp = ht*fw_h
413 FSTD ht_temp,-24(%sp) ; -24(sp) = ht
414 XMPYU flt_0,fw_l,lt_temp ; lt_temp = lt*fw_l
415 FSTD lt_temp,-32(%sp) ; -32(sp) = lt
416
417 LDD -8(%sp),m_0
418 LDD -16(%sp),m1_0
419 ADD,L m_0,m1_0,tmp_0 ; tmp_0 = m + m1;
420 LDD -24(%sp),ht_0
421 LDD -32(%sp),lt_0
422
423 CMPCLR,*>>= tmp_0,m1_0,%r0 ; if (m < m1)
424 ADD,L ht_0,top_overflow,ht_0 ; ht += (1<<32)
425
426 EXTRD,U tmp_0,31,32,m_0 ; m>>32
427 DEPD,Z tmp_0,31,32,m1_0 ; m1 = m<<32
428
429 ADD,L ht_0,m_0,ht_0 ; ht+= (m>>32)
430 ADD lt_0,m1_0,lt_0 ; lt= lt+m1;
431 ADD,DC ht_0,%r0,ht_0 ; ht++
432
433 ADD %ret1,lt_0,lt_0 ; lt = lt + c;
434 ADD,DC ht_0,%r0,ht_0 ; ht++
435
436 COPY ht_0,%ret1 ; copy carry
437 STD lt_0,0(r_ptr) ; rp[0] = lt
438
439bn_mul_words_exit
440 .EXIT
441 EXTRD,U %ret1,31,32,%ret0 ; for 32-bit, return in ret0/ret1
442 LDD -96(%sp),%r7 ; restore r7
443 LDD -104(%sp),%r6 ; restore r6
444 LDD -112(%sp),%r5 ; restore r5
445 LDD -120(%sp),%r4 ; restore r4
446 BVE (%rp)
447 LDD,MB -128(%sp),%r3 ; restore r3
448 .PROCEND
449
450;----------------------------------------------------------------------------
451;
452;void bn_sqr_words(BN_ULONG *rp, BN_ULONG *ap, int num)
453;
454; arg0 = rp
455; arg1 = ap
456; arg2 = num
457;
458
459bn_sqr_words
460 .proc
461 .callinfo FRAME=128,ENTRY_GR=%r3,ARGS_SAVED,ORDERING_AWARE
462 .EXPORT bn_sqr_words,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
463 .entry
464 .align 64
465
466 STD %r3,0(%sp) ; save r3
467 STD %r4,8(%sp) ; save r4
468 NOP
469 STD %r5,16(%sp) ; save r5
470
471 CMPIB,>= 0,num,bn_sqr_words_exit
472 LDO 128(%sp),%sp ; bump stack
473
474 ;
475 ; If only 1, the goto straight to cleanup
476 ;
477 CMPIB,= 1,num,bn_sqr_words_single_top
478 DEPDI,Z -1,32,33,high_mask ; Create Mask 0xffffffff80000000L
479
480 ;
481 ; This loop is unrolled 2 times (64-byte aligned as well)
482 ;
483
484bn_sqr_words_unroll2
485 FLDD 0(a_ptr),t_float_0 ; a[0]
486 FLDD 8(a_ptr),t_float_1 ; a[1]
487 XMPYU fht_0,flt_0,fm ; m[0]
488 XMPYU fht_1,flt_1,fm_1 ; m[1]
489
490 FSTD fm,-24(%sp) ; store m[0]
491 FSTD fm_1,-56(%sp) ; store m[1]
492 XMPYU flt_0,flt_0,lt_temp ; lt[0]
493 XMPYU flt_1,flt_1,lt_temp_1 ; lt[1]
494
495 FSTD lt_temp,-16(%sp) ; store lt[0]
496 FSTD lt_temp_1,-48(%sp) ; store lt[1]
497 XMPYU fht_0,fht_0,ht_temp ; ht[0]
498 XMPYU fht_1,fht_1,ht_temp_1 ; ht[1]
499
500 FSTD ht_temp,-8(%sp) ; store ht[0]
501 FSTD ht_temp_1,-40(%sp) ; store ht[1]
502 LDD -24(%sp),m_0
503 LDD -56(%sp),m_1
504
505 AND m_0,high_mask,tmp_0 ; m[0] & Mask
506 AND m_1,high_mask,tmp_1 ; m[1] & Mask
507 DEPD,Z m_0,30,31,m_0 ; m[0] << 32+1
508 DEPD,Z m_1,30,31,m_1 ; m[1] << 32+1
509
510 LDD -16(%sp),lt_0
511 LDD -48(%sp),lt_1
512 EXTRD,U tmp_0,32,33,tmp_0 ; tmp_0 = m[0]&Mask >> 32-1
513 EXTRD,U tmp_1,32,33,tmp_1 ; tmp_1 = m[1]&Mask >> 32-1
514
515 LDD -8(%sp),ht_0
516 LDD -40(%sp),ht_1
517 ADD,L ht_0,tmp_0,ht_0 ; ht[0] += tmp_0
518 ADD,L ht_1,tmp_1,ht_1 ; ht[1] += tmp_1
519
520 ADD lt_0,m_0,lt_0 ; lt = lt+m
521 ADD,DC ht_0,%r0,ht_0 ; ht[0]++
522 STD lt_0,0(r_ptr) ; rp[0] = lt[0]
523 STD ht_0,8(r_ptr) ; rp[1] = ht[1]
524
525 ADD lt_1,m_1,lt_1 ; lt = lt+m
526 ADD,DC ht_1,%r0,ht_1 ; ht[1]++
527 STD lt_1,16(r_ptr) ; rp[2] = lt[1]
528 STD ht_1,24(r_ptr) ; rp[3] = ht[1]
529
530 LDO -2(num),num ; num = num - 2;
531 LDO 16(a_ptr),a_ptr ; ap += 2
532 CMPIB,<= 2,num,bn_sqr_words_unroll2
533 LDO 32(r_ptr),r_ptr ; rp += 4
534
535 CMPIB,=,N 0,num,bn_sqr_words_exit ; are we done?
536
537 ;
538 ; Top of loop aligned on 64-byte boundary
539 ;
540bn_sqr_words_single_top
541 FLDD 0(a_ptr),t_float_0 ; load up 64-bit value (fr8L) ht(L)/lt(R)
542
543 XMPYU fht_0,flt_0,fm ; m
544 FSTD fm,-24(%sp) ; store m
545
546 XMPYU flt_0,flt_0,lt_temp ; lt
547 FSTD lt_temp,-16(%sp) ; store lt
548
549 XMPYU fht_0,fht_0,ht_temp ; ht
550 FSTD ht_temp,-8(%sp) ; store ht
551
552 LDD -24(%sp),m_0 ; load m
553 AND m_0,high_mask,tmp_0 ; m & Mask
554 DEPD,Z m_0,30,31,m_0 ; m << 32+1
555 LDD -16(%sp),lt_0 ; lt
556
557 LDD -8(%sp),ht_0 ; ht
558 EXTRD,U tmp_0,32,33,tmp_0 ; tmp_0 = m&Mask >> 32-1
559 ADD m_0,lt_0,lt_0 ; lt = lt+m
560 ADD,L ht_0,tmp_0,ht_0 ; ht += tmp_0
561 ADD,DC ht_0,%r0,ht_0 ; ht++
562
563 STD lt_0,0(r_ptr) ; rp[0] = lt
564 STD ht_0,8(r_ptr) ; rp[1] = ht
565
566bn_sqr_words_exit
567 .EXIT
568 LDD -112(%sp),%r5 ; restore r5
569 LDD -120(%sp),%r4 ; restore r4
570 BVE (%rp)
571 LDD,MB -128(%sp),%r3
572 .PROCEND ;in=23,24,25,26,29;out=28;
573
574
575;----------------------------------------------------------------------------
576;
577;BN_ULONG bn_add_words(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n)
578;
579; arg0 = rp
580; arg1 = ap
581; arg2 = bp
582; arg3 = n
583
584t .reg %r22
585b .reg %r21
586l .reg %r20
587
588bn_add_words
589 .proc
590 .entry
591 .callinfo
592 .EXPORT bn_add_words,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
593 .align 64
594
595 CMPIB,>= 0,n,bn_add_words_exit
596 COPY %r0,%ret1 ; return 0 by default
597
598 ;
599 ; If 2 or more numbers do the loop
600 ;
601 CMPIB,= 1,n,bn_add_words_single_top
602 NOP
603
604 ;
605 ; This loop is unrolled 2 times (64-byte aligned as well)
606 ;
607bn_add_words_unroll2
608 LDD 0(a_ptr),t
609 LDD 0(b_ptr),b
610 ADD t,%ret1,t ; t = t+c;
611 ADD,DC %r0,%r0,%ret1 ; set c to carry
612 ADD t,b,l ; l = t + b[0]
613 ADD,DC %ret1,%r0,%ret1 ; c+= carry
614 STD l,0(r_ptr)
615
616 LDD 8(a_ptr),t
617 LDD 8(b_ptr),b
618 ADD t,%ret1,t ; t = t+c;
619 ADD,DC %r0,%r0,%ret1 ; set c to carry
620 ADD t,b,l ; l = t + b[0]
621 ADD,DC %ret1,%r0,%ret1 ; c+= carry
622 STD l,8(r_ptr)
623
624 LDO -2(n),n
625 LDO 16(a_ptr),a_ptr
626 LDO 16(b_ptr),b_ptr
627
628 CMPIB,<= 2,n,bn_add_words_unroll2
629 LDO 16(r_ptr),r_ptr
630
631 CMPIB,=,N 0,n,bn_add_words_exit ; are we done?
632
633bn_add_words_single_top
634 LDD 0(a_ptr),t
635 LDD 0(b_ptr),b
636
637 ADD t,%ret1,t ; t = t+c;
638 ADD,DC %r0,%r0,%ret1 ; set c to carry (could use CMPCLR??)
639 ADD t,b,l ; l = t + b[0]
640 ADD,DC %ret1,%r0,%ret1 ; c+= carry
641 STD l,0(r_ptr)
642
643bn_add_words_exit
644 .EXIT
645 BVE (%rp)
646 EXTRD,U %ret1,31,32,%ret0 ; for 32-bit, return in ret0/ret1
647 .PROCEND ;in=23,24,25,26,29;out=28;
648
649;----------------------------------------------------------------------------
650;
651;BN_ULONG bn_sub_words(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n)
652;
653; arg0 = rp
654; arg1 = ap
655; arg2 = bp
656; arg3 = n
657
658t1 .reg %r22
659t2 .reg %r21
660sub_tmp1 .reg %r20
661sub_tmp2 .reg %r19
662
663
664bn_sub_words
665 .proc
666 .callinfo
667 .EXPORT bn_sub_words,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
668 .entry
669 .align 64
670
671 CMPIB,>= 0,n,bn_sub_words_exit
672 COPY %r0,%ret1 ; return 0 by default
673
674 ;
675 ; If 2 or more numbers do the loop
676 ;
677 CMPIB,= 1,n,bn_sub_words_single_top
678 NOP
679
680 ;
681 ; This loop is unrolled 2 times (64-byte aligned as well)
682 ;
683bn_sub_words_unroll2
684 LDD 0(a_ptr),t1
685 LDD 0(b_ptr),t2
686 SUB t1,t2,sub_tmp1 ; t3 = t1-t2;
687 SUB sub_tmp1,%ret1,sub_tmp1 ; t3 = t3- c;
688
689 CMPCLR,*>> t1,t2,sub_tmp2 ; clear if t1 > t2
690 LDO 1(%r0),sub_tmp2
691
692 CMPCLR,*= t1,t2,%r0
693 COPY sub_tmp2,%ret1
694 STD sub_tmp1,0(r_ptr)
695
696 LDD 8(a_ptr),t1
697 LDD 8(b_ptr),t2
698 SUB t1,t2,sub_tmp1 ; t3 = t1-t2;
699 SUB sub_tmp1,%ret1,sub_tmp1 ; t3 = t3- c;
700 CMPCLR,*>> t1,t2,sub_tmp2 ; clear if t1 > t2
701 LDO 1(%r0),sub_tmp2
702
703 CMPCLR,*= t1,t2,%r0
704 COPY sub_tmp2,%ret1
705 STD sub_tmp1,8(r_ptr)
706
707 LDO -2(n),n
708 LDO 16(a_ptr),a_ptr
709 LDO 16(b_ptr),b_ptr
710
711 CMPIB,<= 2,n,bn_sub_words_unroll2
712 LDO 16(r_ptr),r_ptr
713
714 CMPIB,=,N 0,n,bn_sub_words_exit ; are we done?
715
716bn_sub_words_single_top
717 LDD 0(a_ptr),t1
718 LDD 0(b_ptr),t2
719 SUB t1,t2,sub_tmp1 ; t3 = t1-t2;
720 SUB sub_tmp1,%ret1,sub_tmp1 ; t3 = t3- c;
721 CMPCLR,*>> t1,t2,sub_tmp2 ; clear if t1 > t2
722 LDO 1(%r0),sub_tmp2
723
724 CMPCLR,*= t1,t2,%r0
725 COPY sub_tmp2,%ret1
726
727 STD sub_tmp1,0(r_ptr)
728
729bn_sub_words_exit
730 .EXIT
731 BVE (%rp)
732 EXTRD,U %ret1,31,32,%ret0 ; for 32-bit, return in ret0/ret1
733 .PROCEND ;in=23,24,25,26,29;out=28;
734
735;------------------------------------------------------------------------------
736;
737; unsigned long bn_div_words(unsigned long h, unsigned long l, unsigned long d)
738;
739; arg0 = h
740; arg1 = l
741; arg2 = d
742;
743; This is mainly just output from the HP C compiler.
744;
745;------------------------------------------------------------------------------
746bn_div_words
747 .PROC
748 .EXPORT bn_div_words,ENTRY,PRIV_LEV=3,ARGW0=GR,ARGW1=GR,ARGW2=GR,ARGW3=GR,RTNVAL=GR,LONG_RETURN
749 .IMPORT BN_num_bits_word,CODE
750 ;--- not PIC .IMPORT __iob,DATA
751 ;--- not PIC .IMPORT fprintf,CODE
752 .IMPORT abort,CODE
753 .IMPORT $$div2U,MILLICODE
754 .CALLINFO CALLER,FRAME=144,ENTRY_GR=%r9,SAVE_RP,ARGS_SAVED,ORDERING_AWARE
755 .ENTRY
756 STW %r2,-20(%r30) ;offset 0x8ec
757 STW,MA %r3,192(%r30) ;offset 0x8f0
758 STW %r4,-188(%r30) ;offset 0x8f4
759 DEPD %r5,31,32,%r6 ;offset 0x8f8
760 STD %r6,-184(%r30) ;offset 0x8fc
761 DEPD %r7,31,32,%r8 ;offset 0x900
762 STD %r8,-176(%r30) ;offset 0x904
763 STW %r9,-168(%r30) ;offset 0x908
764 LDD -248(%r30),%r3 ;offset 0x90c
765 COPY %r26,%r4 ;offset 0x910
766 COPY %r24,%r5 ;offset 0x914
767 DEPD %r25,31,32,%r4 ;offset 0x918
768 CMPB,*<> %r3,%r0,$0006000C ;offset 0x91c
769 DEPD %r23,31,32,%r5 ;offset 0x920
770 MOVIB,TR -1,%r29,$00060002 ;offset 0x924
771 EXTRD,U %r29,31,32,%r28 ;offset 0x928
772$0006002A
773 LDO -1(%r29),%r29 ;offset 0x92c
774 SUB %r23,%r7,%r23 ;offset 0x930
775$00060024
776 SUB %r4,%r31,%r25 ;offset 0x934
777 AND %r25,%r19,%r26 ;offset 0x938
778 CMPB,*<>,N %r0,%r26,$00060046 ;offset 0x93c
779 DEPD,Z %r25,31,32,%r20 ;offset 0x940
780 OR %r20,%r24,%r21 ;offset 0x944
781 CMPB,*<<,N %r21,%r23,$0006002A ;offset 0x948
782 SUB %r31,%r2,%r31 ;offset 0x94c
783$00060046
784$0006002E
785 DEPD,Z %r23,31,32,%r25 ;offset 0x950
786 EXTRD,U %r23,31,32,%r26 ;offset 0x954
787 AND %r25,%r19,%r24 ;offset 0x958
788 ADD,L %r31,%r26,%r31 ;offset 0x95c
789 CMPCLR,*>>= %r5,%r24,%r0 ;offset 0x960
790 LDO 1(%r31),%r31 ;offset 0x964
791$00060032
792 CMPB,*<<=,N %r31,%r4,$00060036 ;offset 0x968
793 LDO -1(%r29),%r29 ;offset 0x96c
794 ADD,L %r4,%r3,%r4 ;offset 0x970
795$00060036
796 ADDIB,=,N -1,%r8,$D0 ;offset 0x974
797 SUB %r5,%r24,%r28 ;offset 0x978
798$0006003A
799 SUB %r4,%r31,%r24 ;offset 0x97c
800 SHRPD %r24,%r28,32,%r4 ;offset 0x980
801 DEPD,Z %r29,31,32,%r9 ;offset 0x984
802 DEPD,Z %r28,31,32,%r5 ;offset 0x988
803$0006001C
804 EXTRD,U %r4,31,32,%r31 ;offset 0x98c
805 CMPB,*<>,N %r31,%r2,$00060020 ;offset 0x990
806 MOVB,TR %r6,%r29,$D1 ;offset 0x994
807 STD %r29,-152(%r30) ;offset 0x998
808$0006000C
809 EXTRD,U %r3,31,32,%r25 ;offset 0x99c
810 COPY %r3,%r26 ;offset 0x9a0
811 EXTRD,U %r3,31,32,%r9 ;offset 0x9a4
812 EXTRD,U %r4,31,32,%r8 ;offset 0x9a8
813 .CALL ARGW0=GR,ARGW1=GR,RTNVAL=GR ;in=25,26;out=28;
814 B,L BN_num_bits_word,%r2 ;offset 0x9ac
815 EXTRD,U %r5,31,32,%r7 ;offset 0x9b0
816 LDI 64,%r20 ;offset 0x9b4
817 DEPD %r7,31,32,%r5 ;offset 0x9b8
818 DEPD %r8,31,32,%r4 ;offset 0x9bc
819 DEPD %r9,31,32,%r3 ;offset 0x9c0
820 CMPB,= %r28,%r20,$00060012 ;offset 0x9c4
821 COPY %r28,%r24 ;offset 0x9c8
822 MTSARCM %r24 ;offset 0x9cc
823 DEPDI,Z -1,%sar,1,%r19 ;offset 0x9d0
824 CMPB,*>>,N %r4,%r19,$D2 ;offset 0x9d4
825$00060012
826 SUBI 64,%r24,%r31 ;offset 0x9d8
827 CMPCLR,*<< %r4,%r3,%r0 ;offset 0x9dc
828 SUB %r4,%r3,%r4 ;offset 0x9e0
829$00060016
830 CMPB,= %r31,%r0,$0006001A ;offset 0x9e4
831 COPY %r0,%r9 ;offset 0x9e8
832 MTSARCM %r31 ;offset 0x9ec
833 DEPD,Z %r3,%sar,64,%r3 ;offset 0x9f0
834 SUBI 64,%r31,%r26 ;offset 0x9f4
835 MTSAR %r26 ;offset 0x9f8
836 SHRPD %r4,%r5,%sar,%r4 ;offset 0x9fc
837 MTSARCM %r31 ;offset 0xa00
838 DEPD,Z %r5,%sar,64,%r5 ;offset 0xa04
839$0006001A
840 DEPDI,Z -1,31,32,%r19 ;offset 0xa08
841 AND %r3,%r19,%r29 ;offset 0xa0c
842 EXTRD,U %r29,31,32,%r2 ;offset 0xa10
843 DEPDI,Z -1,63,32,%r6 ;offset 0xa14
844 MOVIB,TR 2,%r8,$0006001C ;offset 0xa18
845 EXTRD,U %r3,63,32,%r7 ;offset 0xa1c
846$D2
847 ;--- not PIC ADDIL LR'__iob-$global$,%r27,%r1 ;offset 0xa20
848 ;--- not PIC LDIL LR'C$7,%r21 ;offset 0xa24
849 ;--- not PIC LDO RR'__iob-$global$+32(%r1),%r26 ;offset 0xa28
850 ;--- not PIC .CALL ARGW0=GR,ARGW1=GR,ARGW2=GR,RTNVAL=GR ;in=24,25,26;out=28;
851 ;--- not PIC B,L fprintf,%r2 ;offset 0xa2c
852 ;--- not PIC LDO RR'C$7(%r21),%r25 ;offset 0xa30
853 .CALL ;
854 B,L abort,%r2 ;offset 0xa34
855 NOP ;offset 0xa38
856 B $D3 ;offset 0xa3c
857 LDW -212(%r30),%r2 ;offset 0xa40
858$00060020
859 COPY %r4,%r26 ;offset 0xa44
860 EXTRD,U %r4,31,32,%r25 ;offset 0xa48
861 COPY %r2,%r24 ;offset 0xa4c
862 .CALL ;in=23,24,25,26;out=20,21,22,28,29; (MILLICALL)
863 B,L $$div2U,%r31 ;offset 0xa50
864 EXTRD,U %r2,31,32,%r23 ;offset 0xa54
865 DEPD %r28,31,32,%r29 ;offset 0xa58
866$00060022
867 STD %r29,-152(%r30) ;offset 0xa5c
868$D1
869 AND %r5,%r19,%r24 ;offset 0xa60
870 EXTRD,U %r24,31,32,%r24 ;offset 0xa64
871 STW %r2,-160(%r30) ;offset 0xa68
872 STW %r7,-128(%r30) ;offset 0xa6c
873 FLDD -152(%r30),%fr4 ;offset 0xa70
874 FLDD -152(%r30),%fr7 ;offset 0xa74
875 FLDW -160(%r30),%fr8L ;offset 0xa78
876 FLDW -128(%r30),%fr5L ;offset 0xa7c
877 XMPYU %fr8L,%fr7L,%fr10 ;offset 0xa80
878 FSTD %fr10,-136(%r30) ;offset 0xa84
879 XMPYU %fr8L,%fr7R,%fr22 ;offset 0xa88
880 FSTD %fr22,-144(%r30) ;offset 0xa8c
881 XMPYU %fr5L,%fr4L,%fr11 ;offset 0xa90
882 XMPYU %fr5L,%fr4R,%fr23 ;offset 0xa94
883 FSTD %fr11,-112(%r30) ;offset 0xa98
884 FSTD %fr23,-120(%r30) ;offset 0xa9c
885 LDD -136(%r30),%r28 ;offset 0xaa0
886 DEPD,Z %r28,31,32,%r31 ;offset 0xaa4
887 LDD -144(%r30),%r20 ;offset 0xaa8
888 ADD,L %r20,%r31,%r31 ;offset 0xaac
889 LDD -112(%r30),%r22 ;offset 0xab0
890 DEPD,Z %r22,31,32,%r22 ;offset 0xab4
891 LDD -120(%r30),%r21 ;offset 0xab8
892 B $00060024 ;offset 0xabc
893 ADD,L %r21,%r22,%r23 ;offset 0xac0
894$D0
895 OR %r9,%r29,%r29 ;offset 0xac4
896$00060040
897 EXTRD,U %r29,31,32,%r28 ;offset 0xac8
898$00060002
899$L2
900 LDW -212(%r30),%r2 ;offset 0xacc
901$D3
902 LDW -168(%r30),%r9 ;offset 0xad0
903 LDD -176(%r30),%r8 ;offset 0xad4
904 EXTRD,U %r8,31,32,%r7 ;offset 0xad8
905 LDD -184(%r30),%r6 ;offset 0xadc
906 EXTRD,U %r6,31,32,%r5 ;offset 0xae0
907 LDW -188(%r30),%r4 ;offset 0xae4
908 BVE (%r2) ;offset 0xae8
909 .EXIT
910 LDW,MB -192(%r30),%r3 ;offset 0xaec
911 .PROCEND ;in=23,25;out=28,29;fpin=105,107;
912
913
914
915
916;----------------------------------------------------------------------------
917;
918; Registers to hold 64-bit values to manipulate. The "L" part
919; of the register corresponds to the upper 32-bits, while the "R"
920; part corresponds to the lower 32-bits
921;
922; Note, that when using b6 and b7, the code must save these before
923; using them because they are callee save registers
924;
925;
926; Floating point registers to use to save values that
927; are manipulated. These don't collide with ftemp1-6 and
928; are all caller save registers
929;
930a0 .reg %fr22
931a0L .reg %fr22L
932a0R .reg %fr22R
933
934a1 .reg %fr23
935a1L .reg %fr23L
936a1R .reg %fr23R
937
938a2 .reg %fr24
939a2L .reg %fr24L
940a2R .reg %fr24R
941
942a3 .reg %fr25
943a3L .reg %fr25L
944a3R .reg %fr25R
945
946a4 .reg %fr26
947a4L .reg %fr26L
948a4R .reg %fr26R
949
950a5 .reg %fr27
951a5L .reg %fr27L
952a5R .reg %fr27R
953
954a6 .reg %fr28
955a6L .reg %fr28L
956a6R .reg %fr28R
957
958a7 .reg %fr29
959a7L .reg %fr29L
960a7R .reg %fr29R
961
962b0 .reg %fr30
963b0L .reg %fr30L
964b0R .reg %fr30R
965
966b1 .reg %fr31
967b1L .reg %fr31L
968b1R .reg %fr31R
969
970;
971; Temporary floating point variables, these are all caller save
972; registers
973;
974ftemp1 .reg %fr4
975ftemp2 .reg %fr5
976ftemp3 .reg %fr6
977ftemp4 .reg %fr7
978
979;
980; The B set of registers when used.
981;
982
983b2 .reg %fr8
984b2L .reg %fr8L
985b2R .reg %fr8R
986
987b3 .reg %fr9
988b3L .reg %fr9L
989b3R .reg %fr9R
990
991b4 .reg %fr10
992b4L .reg %fr10L
993b4R .reg %fr10R
994
995b5 .reg %fr11
996b5L .reg %fr11L
997b5R .reg %fr11R
998
999b6 .reg %fr12
1000b6L .reg %fr12L
1001b6R .reg %fr12R
1002
1003b7 .reg %fr13
1004b7L .reg %fr13L
1005b7R .reg %fr13R
1006
1007c1 .reg %r21 ; only reg
1008temp1 .reg %r20 ; only reg
1009temp2 .reg %r19 ; only reg
1010temp3 .reg %r31 ; only reg
1011
1012m1 .reg %r28
1013c2 .reg %r23
1014high_one .reg %r1
1015ht .reg %r6
1016lt .reg %r5
1017m .reg %r4
1018c3 .reg %r3
1019
1020SQR_ADD_C .macro A0L,A0R,C1,C2,C3
1021 XMPYU A0L,A0R,ftemp1 ; m
1022 FSTD ftemp1,-24(%sp) ; store m
1023
1024 XMPYU A0R,A0R,ftemp2 ; lt
1025 FSTD ftemp2,-16(%sp) ; store lt
1026
1027 XMPYU A0L,A0L,ftemp3 ; ht
1028 FSTD ftemp3,-8(%sp) ; store ht
1029
1030 LDD -24(%sp),m ; load m
1031 AND m,high_mask,temp2 ; m & Mask
1032 DEPD,Z m,30,31,temp3 ; m << 32+1
1033 LDD -16(%sp),lt ; lt
1034
1035 LDD -8(%sp),ht ; ht
1036 EXTRD,U temp2,32,33,temp1 ; temp1 = m&Mask >> 32-1
1037 ADD temp3,lt,lt ; lt = lt+m
1038 ADD,L ht,temp1,ht ; ht += temp1
1039 ADD,DC ht,%r0,ht ; ht++
1040
1041 ADD C1,lt,C1 ; c1=c1+lt
1042 ADD,DC ht,%r0,ht ; ht++
1043
1044 ADD C2,ht,C2 ; c2=c2+ht
1045 ADD,DC C3,%r0,C3 ; c3++
1046.endm
1047
1048SQR_ADD_C2 .macro A0L,A0R,A1L,A1R,C1,C2,C3
1049 XMPYU A0L,A1R,ftemp1 ; m1 = bl*ht
1050 FSTD ftemp1,-16(%sp) ;
1051 XMPYU A0R,A1L,ftemp2 ; m = bh*lt
1052 FSTD ftemp2,-8(%sp) ;
1053 XMPYU A0R,A1R,ftemp3 ; lt = bl*lt
1054 FSTD ftemp3,-32(%sp)
1055 XMPYU A0L,A1L,ftemp4 ; ht = bh*ht
1056 FSTD ftemp4,-24(%sp) ;
1057
1058 LDD -8(%sp),m ; r21 = m
1059 LDD -16(%sp),m1 ; r19 = m1
1060 ADD,L m,m1,m ; m+m1
1061
1062 DEPD,Z m,31,32,temp3 ; (m+m1<<32)
1063 LDD -24(%sp),ht ; r24 = ht
1064
1065 CMPCLR,*>>= m,m1,%r0 ; if (m < m1)
1066 ADD,L ht,high_one,ht ; ht+=high_one
1067
1068 EXTRD,U m,31,32,temp1 ; m >> 32
1069 LDD -32(%sp),lt ; lt
1070 ADD,L ht,temp1,ht ; ht+= m>>32
1071 ADD lt,temp3,lt ; lt = lt+m1
1072 ADD,DC ht,%r0,ht ; ht++
1073
1074 ADD ht,ht,ht ; ht=ht+ht;
1075 ADD,DC C3,%r0,C3 ; add in carry (c3++)
1076
1077 ADD lt,lt,lt ; lt=lt+lt;
1078 ADD,DC ht,%r0,ht ; add in carry (ht++)
1079
1080 ADD C1,lt,C1 ; c1=c1+lt
1081 ADD,DC,*NUV ht,%r0,ht ; add in carry (ht++)
1082 LDO 1(C3),C3 ; bump c3 if overflow,nullify otherwise
1083
1084 ADD C2,ht,C2 ; c2 = c2 + ht
1085 ADD,DC C3,%r0,C3 ; add in carry (c3++)
1086.endm
1087
1088;
1089;void bn_sqr_comba8(BN_ULONG *r, BN_ULONG *a)
1090; arg0 = r_ptr
1091; arg1 = a_ptr
1092;
1093
1094bn_sqr_comba8
1095 .PROC
1096 .CALLINFO FRAME=128,ENTRY_GR=%r3,ARGS_SAVED,ORDERING_AWARE
1097 .EXPORT bn_sqr_comba8,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
1098 .ENTRY
1099 .align 64
1100
1101 STD %r3,0(%sp) ; save r3
1102 STD %r4,8(%sp) ; save r4
1103 STD %r5,16(%sp) ; save r5
1104 STD %r6,24(%sp) ; save r6
1105
1106 ;
1107 ; Zero out carries
1108 ;
1109 COPY %r0,c1
1110 COPY %r0,c2
1111 COPY %r0,c3
1112
1113 LDO 128(%sp),%sp ; bump stack
1114 DEPDI,Z -1,32,33,high_mask ; Create Mask 0xffffffff80000000L
1115 DEPDI,Z 1,31,1,high_one ; Create Value 1 << 32
1116
1117 ;
1118 ; Load up all of the values we are going to use
1119 ;
1120 FLDD 0(a_ptr),a0
1121 FLDD 8(a_ptr),a1
1122 FLDD 16(a_ptr),a2
1123 FLDD 24(a_ptr),a3
1124 FLDD 32(a_ptr),a4
1125 FLDD 40(a_ptr),a5
1126 FLDD 48(a_ptr),a6
1127 FLDD 56(a_ptr),a7
1128
1129 SQR_ADD_C a0L,a0R,c1,c2,c3
1130 STD c1,0(r_ptr) ; r[0] = c1;
1131 COPY %r0,c1
1132
1133 SQR_ADD_C2 a1L,a1R,a0L,a0R,c2,c3,c1
1134 STD c2,8(r_ptr) ; r[1] = c2;
1135 COPY %r0,c2
1136
1137 SQR_ADD_C a1L,a1R,c3,c1,c2
1138 SQR_ADD_C2 a2L,a2R,a0L,a0R,c3,c1,c2
1139 STD c3,16(r_ptr) ; r[2] = c3;
1140 COPY %r0,c3
1141
1142 SQR_ADD_C2 a3L,a3R,a0L,a0R,c1,c2,c3
1143 SQR_ADD_C2 a2L,a2R,a1L,a1R,c1,c2,c3
1144 STD c1,24(r_ptr) ; r[3] = c1;
1145 COPY %r0,c1
1146
1147 SQR_ADD_C a2L,a2R,c2,c3,c1
1148 SQR_ADD_C2 a3L,a3R,a1L,a1R,c2,c3,c1
1149 SQR_ADD_C2 a4L,a4R,a0L,a0R,c2,c3,c1
1150 STD c2,32(r_ptr) ; r[4] = c2;
1151 COPY %r0,c2
1152
1153 SQR_ADD_C2 a5L,a5R,a0L,a0R,c3,c1,c2
1154 SQR_ADD_C2 a4L,a4R,a1L,a1R,c3,c1,c2
1155 SQR_ADD_C2 a3L,a3R,a2L,a2R,c3,c1,c2
1156 STD c3,40(r_ptr) ; r[5] = c3;
1157 COPY %r0,c3
1158
1159 SQR_ADD_C a3L,a3R,c1,c2,c3
1160 SQR_ADD_C2 a4L,a4R,a2L,a2R,c1,c2,c3
1161 SQR_ADD_C2 a5L,a5R,a1L,a1R,c1,c2,c3
1162 SQR_ADD_C2 a6L,a6R,a0L,a0R,c1,c2,c3
1163 STD c1,48(r_ptr) ; r[6] = c1;
1164 COPY %r0,c1
1165
1166 SQR_ADD_C2 a7L,a7R,a0L,a0R,c2,c3,c1
1167 SQR_ADD_C2 a6L,a6R,a1L,a1R,c2,c3,c1
1168 SQR_ADD_C2 a5L,a5R,a2L,a2R,c2,c3,c1
1169 SQR_ADD_C2 a4L,a4R,a3L,a3R,c2,c3,c1
1170 STD c2,56(r_ptr) ; r[7] = c2;
1171 COPY %r0,c2
1172
1173 SQR_ADD_C a4L,a4R,c3,c1,c2
1174 SQR_ADD_C2 a5L,a5R,a3L,a3R,c3,c1,c2
1175 SQR_ADD_C2 a6L,a6R,a2L,a2R,c3,c1,c2
1176 SQR_ADD_C2 a7L,a7R,a1L,a1R,c3,c1,c2
1177 STD c3,64(r_ptr) ; r[8] = c3;
1178 COPY %r0,c3
1179
1180 SQR_ADD_C2 a7L,a7R,a2L,a2R,c1,c2,c3
1181 SQR_ADD_C2 a6L,a6R,a3L,a3R,c1,c2,c3
1182 SQR_ADD_C2 a5L,a5R,a4L,a4R,c1,c2,c3
1183 STD c1,72(r_ptr) ; r[9] = c1;
1184 COPY %r0,c1
1185
1186 SQR_ADD_C a5L,a5R,c2,c3,c1
1187 SQR_ADD_C2 a6L,a6R,a4L,a4R,c2,c3,c1
1188 SQR_ADD_C2 a7L,a7R,a3L,a3R,c2,c3,c1
1189 STD c2,80(r_ptr) ; r[10] = c2;
1190 COPY %r0,c2
1191
1192 SQR_ADD_C2 a7L,a7R,a4L,a4R,c3,c1,c2
1193 SQR_ADD_C2 a6L,a6R,a5L,a5R,c3,c1,c2
1194 STD c3,88(r_ptr) ; r[11] = c3;
1195 COPY %r0,c3
1196
1197 SQR_ADD_C a6L,a6R,c1,c2,c3
1198 SQR_ADD_C2 a7L,a7R,a5L,a5R,c1,c2,c3
1199 STD c1,96(r_ptr) ; r[12] = c1;
1200 COPY %r0,c1
1201
1202 SQR_ADD_C2 a7L,a7R,a6L,a6R,c2,c3,c1
1203 STD c2,104(r_ptr) ; r[13] = c2;
1204 COPY %r0,c2
1205
1206 SQR_ADD_C a7L,a7R,c3,c1,c2
1207 STD c3, 112(r_ptr) ; r[14] = c3
1208 STD c1, 120(r_ptr) ; r[15] = c1
1209
1210 .EXIT
1211 LDD -104(%sp),%r6 ; restore r6
1212 LDD -112(%sp),%r5 ; restore r5
1213 LDD -120(%sp),%r4 ; restore r4
1214 BVE (%rp)
1215 LDD,MB -128(%sp),%r3
1216
1217 .PROCEND
1218
1219;-----------------------------------------------------------------------------
1220;
1221;void bn_sqr_comba4(BN_ULONG *r, BN_ULONG *a)
1222; arg0 = r_ptr
1223; arg1 = a_ptr
1224;
1225
1226bn_sqr_comba4
1227 .proc
1228 .callinfo FRAME=128,ENTRY_GR=%r3,ARGS_SAVED,ORDERING_AWARE
1229 .EXPORT bn_sqr_comba4,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
1230 .entry
1231 .align 64
1232 STD %r3,0(%sp) ; save r3
1233 STD %r4,8(%sp) ; save r4
1234 STD %r5,16(%sp) ; save r5
1235 STD %r6,24(%sp) ; save r6
1236
1237 ;
1238 ; Zero out carries
1239 ;
1240 COPY %r0,c1
1241 COPY %r0,c2
1242 COPY %r0,c3
1243
1244 LDO 128(%sp),%sp ; bump stack
1245 DEPDI,Z -1,32,33,high_mask ; Create Mask 0xffffffff80000000L
1246 DEPDI,Z 1,31,1,high_one ; Create Value 1 << 32
1247
1248 ;
1249 ; Load up all of the values we are going to use
1250 ;
1251 FLDD 0(a_ptr),a0
1252 FLDD 8(a_ptr),a1
1253 FLDD 16(a_ptr),a2
1254 FLDD 24(a_ptr),a3
1255 FLDD 32(a_ptr),a4
1256 FLDD 40(a_ptr),a5
1257 FLDD 48(a_ptr),a6
1258 FLDD 56(a_ptr),a7
1259
1260 SQR_ADD_C a0L,a0R,c1,c2,c3
1261
1262 STD c1,0(r_ptr) ; r[0] = c1;
1263 COPY %r0,c1
1264
1265 SQR_ADD_C2 a1L,a1R,a0L,a0R,c2,c3,c1
1266
1267 STD c2,8(r_ptr) ; r[1] = c2;
1268 COPY %r0,c2
1269
1270 SQR_ADD_C a1L,a1R,c3,c1,c2
1271 SQR_ADD_C2 a2L,a2R,a0L,a0R,c3,c1,c2
1272
1273 STD c3,16(r_ptr) ; r[2] = c3;
1274 COPY %r0,c3
1275
1276 SQR_ADD_C2 a3L,a3R,a0L,a0R,c1,c2,c3
1277 SQR_ADD_C2 a2L,a2R,a1L,a1R,c1,c2,c3
1278
1279 STD c1,24(r_ptr) ; r[3] = c1;
1280 COPY %r0,c1
1281
1282 SQR_ADD_C a2L,a2R,c2,c3,c1
1283 SQR_ADD_C2 a3L,a3R,a1L,a1R,c2,c3,c1
1284
1285 STD c2,32(r_ptr) ; r[4] = c2;
1286 COPY %r0,c2
1287
1288 SQR_ADD_C2 a3L,a3R,a2L,a2R,c3,c1,c2
1289 STD c3,40(r_ptr) ; r[5] = c3;
1290 COPY %r0,c3
1291
1292 SQR_ADD_C a3L,a3R,c1,c2,c3
1293 STD c1,48(r_ptr) ; r[6] = c1;
1294 STD c2,56(r_ptr) ; r[7] = c2;
1295
1296 .EXIT
1297 LDD -104(%sp),%r6 ; restore r6
1298 LDD -112(%sp),%r5 ; restore r5
1299 LDD -120(%sp),%r4 ; restore r4
1300 BVE (%rp)
1301 LDD,MB -128(%sp),%r3
1302
1303 .PROCEND
1304
1305
1306;---------------------------------------------------------------------------
1307
1308MUL_ADD_C .macro A0L,A0R,B0L,B0R,C1,C2,C3
1309 XMPYU A0L,B0R,ftemp1 ; m1 = bl*ht
1310 FSTD ftemp1,-16(%sp) ;
1311 XMPYU A0R,B0L,ftemp2 ; m = bh*lt
1312 FSTD ftemp2,-8(%sp) ;
1313 XMPYU A0R,B0R,ftemp3 ; lt = bl*lt
1314 FSTD ftemp3,-32(%sp)
1315 XMPYU A0L,B0L,ftemp4 ; ht = bh*ht
1316 FSTD ftemp4,-24(%sp) ;
1317
1318 LDD -8(%sp),m ; r21 = m
1319 LDD -16(%sp),m1 ; r19 = m1
1320 ADD,L m,m1,m ; m+m1
1321
1322 DEPD,Z m,31,32,temp3 ; (m+m1<<32)
1323 LDD -24(%sp),ht ; r24 = ht
1324
1325 CMPCLR,*>>= m,m1,%r0 ; if (m < m1)
1326 ADD,L ht,high_one,ht ; ht+=high_one
1327
1328 EXTRD,U m,31,32,temp1 ; m >> 32
1329 LDD -32(%sp),lt ; lt
1330 ADD,L ht,temp1,ht ; ht+= m>>32
1331 ADD lt,temp3,lt ; lt = lt+m1
1332 ADD,DC ht,%r0,ht ; ht++
1333
1334 ADD C1,lt,C1 ; c1=c1+lt
1335 ADD,DC ht,%r0,ht ; bump c3 if overflow,nullify otherwise
1336
1337 ADD C2,ht,C2 ; c2 = c2 + ht
1338 ADD,DC C3,%r0,C3 ; add in carry (c3++)
1339.endm
1340
1341
1342;
1343;void bn_mul_comba8(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
1344; arg0 = r_ptr
1345; arg1 = a_ptr
1346; arg2 = b_ptr
1347;
1348
1349bn_mul_comba8
1350 .proc
1351 .callinfo FRAME=128,ENTRY_GR=%r3,ARGS_SAVED,ORDERING_AWARE
1352 .EXPORT bn_mul_comba8,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
1353 .entry
1354 .align 64
1355
1356 STD %r3,0(%sp) ; save r3
1357 STD %r4,8(%sp) ; save r4
1358 STD %r5,16(%sp) ; save r5
1359 STD %r6,24(%sp) ; save r6
1360 FSTD %fr12,32(%sp) ; save r6
1361 FSTD %fr13,40(%sp) ; save r7
1362
1363 ;
1364 ; Zero out carries
1365 ;
1366 COPY %r0,c1
1367 COPY %r0,c2
1368 COPY %r0,c3
1369
1370 LDO 128(%sp),%sp ; bump stack
1371 DEPDI,Z 1,31,1,high_one ; Create Value 1 << 32
1372
1373 ;
1374 ; Load up all of the values we are going to use
1375 ;
1376 FLDD 0(a_ptr),a0
1377 FLDD 8(a_ptr),a1
1378 FLDD 16(a_ptr),a2
1379 FLDD 24(a_ptr),a3
1380 FLDD 32(a_ptr),a4
1381 FLDD 40(a_ptr),a5
1382 FLDD 48(a_ptr),a6
1383 FLDD 56(a_ptr),a7
1384
1385 FLDD 0(b_ptr),b0
1386 FLDD 8(b_ptr),b1
1387 FLDD 16(b_ptr),b2
1388 FLDD 24(b_ptr),b3
1389 FLDD 32(b_ptr),b4
1390 FLDD 40(b_ptr),b5
1391 FLDD 48(b_ptr),b6
1392 FLDD 56(b_ptr),b7
1393
1394 MUL_ADD_C a0L,a0R,b0L,b0R,c1,c2,c3
1395 STD c1,0(r_ptr)
1396 COPY %r0,c1
1397
1398 MUL_ADD_C a0L,a0R,b1L,b1R,c2,c3,c1
1399 MUL_ADD_C a1L,a1R,b0L,b0R,c2,c3,c1
1400 STD c2,8(r_ptr)
1401 COPY %r0,c2
1402
1403 MUL_ADD_C a2L,a2R,b0L,b0R,c3,c1,c2
1404 MUL_ADD_C a1L,a1R,b1L,b1R,c3,c1,c2
1405 MUL_ADD_C a0L,a0R,b2L,b2R,c3,c1,c2
1406 STD c3,16(r_ptr)
1407 COPY %r0,c3
1408
1409 MUL_ADD_C a0L,a0R,b3L,b3R,c1,c2,c3
1410 MUL_ADD_C a1L,a1R,b2L,b2R,c1,c2,c3
1411 MUL_ADD_C a2L,a2R,b1L,b1R,c1,c2,c3
1412 MUL_ADD_C a3L,a3R,b0L,b0R,c1,c2,c3
1413 STD c1,24(r_ptr)
1414 COPY %r0,c1
1415
1416 MUL_ADD_C a4L,a4R,b0L,b0R,c2,c3,c1
1417 MUL_ADD_C a3L,a3R,b1L,b1R,c2,c3,c1
1418 MUL_ADD_C a2L,a2R,b2L,b2R,c2,c3,c1
1419 MUL_ADD_C a1L,a1R,b3L,b3R,c2,c3,c1
1420 MUL_ADD_C a0L,a0R,b4L,b4R,c2,c3,c1
1421 STD c2,32(r_ptr)
1422 COPY %r0,c2
1423
1424 MUL_ADD_C a0L,a0R,b5L,b5R,c3,c1,c2
1425 MUL_ADD_C a1L,a1R,b4L,b4R,c3,c1,c2
1426 MUL_ADD_C a2L,a2R,b3L,b3R,c3,c1,c2
1427 MUL_ADD_C a3L,a3R,b2L,b2R,c3,c1,c2
1428 MUL_ADD_C a4L,a4R,b1L,b1R,c3,c1,c2
1429 MUL_ADD_C a5L,a5R,b0L,b0R,c3,c1,c2
1430 STD c3,40(r_ptr)
1431 COPY %r0,c3
1432
1433 MUL_ADD_C a6L,a6R,b0L,b0R,c1,c2,c3
1434 MUL_ADD_C a5L,a5R,b1L,b1R,c1,c2,c3
1435 MUL_ADD_C a4L,a4R,b2L,b2R,c1,c2,c3
1436 MUL_ADD_C a3L,a3R,b3L,b3R,c1,c2,c3
1437 MUL_ADD_C a2L,a2R,b4L,b4R,c1,c2,c3
1438 MUL_ADD_C a1L,a1R,b5L,b5R,c1,c2,c3
1439 MUL_ADD_C a0L,a0R,b6L,b6R,c1,c2,c3
1440 STD c1,48(r_ptr)
1441 COPY %r0,c1
1442
1443 MUL_ADD_C a0L,a0R,b7L,b7R,c2,c3,c1
1444 MUL_ADD_C a1L,a1R,b6L,b6R,c2,c3,c1
1445 MUL_ADD_C a2L,a2R,b5L,b5R,c2,c3,c1
1446 MUL_ADD_C a3L,a3R,b4L,b4R,c2,c3,c1
1447 MUL_ADD_C a4L,a4R,b3L,b3R,c2,c3,c1
1448 MUL_ADD_C a5L,a5R,b2L,b2R,c2,c3,c1
1449 MUL_ADD_C a6L,a6R,b1L,b1R,c2,c3,c1
1450 MUL_ADD_C a7L,a7R,b0L,b0R,c2,c3,c1
1451 STD c2,56(r_ptr)
1452 COPY %r0,c2
1453
1454 MUL_ADD_C a7L,a7R,b1L,b1R,c3,c1,c2
1455 MUL_ADD_C a6L,a6R,b2L,b2R,c3,c1,c2
1456 MUL_ADD_C a5L,a5R,b3L,b3R,c3,c1,c2
1457 MUL_ADD_C a4L,a4R,b4L,b4R,c3,c1,c2
1458 MUL_ADD_C a3L,a3R,b5L,b5R,c3,c1,c2
1459 MUL_ADD_C a2L,a2R,b6L,b6R,c3,c1,c2
1460 MUL_ADD_C a1L,a1R,b7L,b7R,c3,c1,c2
1461 STD c3,64(r_ptr)
1462 COPY %r0,c3
1463
1464 MUL_ADD_C a2L,a2R,b7L,b7R,c1,c2,c3
1465 MUL_ADD_C a3L,a3R,b6L,b6R,c1,c2,c3
1466 MUL_ADD_C a4L,a4R,b5L,b5R,c1,c2,c3
1467 MUL_ADD_C a5L,a5R,b4L,b4R,c1,c2,c3
1468 MUL_ADD_C a6L,a6R,b3L,b3R,c1,c2,c3
1469 MUL_ADD_C a7L,a7R,b2L,b2R,c1,c2,c3
1470 STD c1,72(r_ptr)
1471 COPY %r0,c1
1472
1473 MUL_ADD_C a7L,a7R,b3L,b3R,c2,c3,c1
1474 MUL_ADD_C a6L,a6R,b4L,b4R,c2,c3,c1
1475 MUL_ADD_C a5L,a5R,b5L,b5R,c2,c3,c1
1476 MUL_ADD_C a4L,a4R,b6L,b6R,c2,c3,c1
1477 MUL_ADD_C a3L,a3R,b7L,b7R,c2,c3,c1
1478 STD c2,80(r_ptr)
1479 COPY %r0,c2
1480
1481 MUL_ADD_C a4L,a4R,b7L,b7R,c3,c1,c2
1482 MUL_ADD_C a5L,a5R,b6L,b6R,c3,c1,c2
1483 MUL_ADD_C a6L,a6R,b5L,b5R,c3,c1,c2
1484 MUL_ADD_C a7L,a7R,b4L,b4R,c3,c1,c2
1485 STD c3,88(r_ptr)
1486 COPY %r0,c3
1487
1488 MUL_ADD_C a7L,a7R,b5L,b5R,c1,c2,c3
1489 MUL_ADD_C a6L,a6R,b6L,b6R,c1,c2,c3
1490 MUL_ADD_C a5L,a5R,b7L,b7R,c1,c2,c3
1491 STD c1,96(r_ptr)
1492 COPY %r0,c1
1493
1494 MUL_ADD_C a6L,a6R,b7L,b7R,c2,c3,c1
1495 MUL_ADD_C a7L,a7R,b6L,b6R,c2,c3,c1
1496 STD c2,104(r_ptr)
1497 COPY %r0,c2
1498
1499 MUL_ADD_C a7L,a7R,b7L,b7R,c3,c1,c2
1500 STD c3,112(r_ptr)
1501 STD c1,120(r_ptr)
1502
1503 .EXIT
1504 FLDD -88(%sp),%fr13
1505 FLDD -96(%sp),%fr12
1506 LDD -104(%sp),%r6 ; restore r6
1507 LDD -112(%sp),%r5 ; restore r5
1508 LDD -120(%sp),%r4 ; restore r4
1509 BVE (%rp)
1510 LDD,MB -128(%sp),%r3
1511
1512 .PROCEND
1513
1514;-----------------------------------------------------------------------------
1515;
1516;void bn_mul_comba4(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
1517; arg0 = r_ptr
1518; arg1 = a_ptr
1519; arg2 = b_ptr
1520;
1521
1522bn_mul_comba4
1523 .proc
1524 .callinfo FRAME=128,ENTRY_GR=%r3,ARGS_SAVED,ORDERING_AWARE
1525 .EXPORT bn_mul_comba4,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
1526 .entry
1527 .align 64
1528
1529 STD %r3,0(%sp) ; save r3
1530 STD %r4,8(%sp) ; save r4
1531 STD %r5,16(%sp) ; save r5
1532 STD %r6,24(%sp) ; save r6
1533 FSTD %fr12,32(%sp) ; save r6
1534 FSTD %fr13,40(%sp) ; save r7
1535
1536 ;
1537 ; Zero out carries
1538 ;
1539 COPY %r0,c1
1540 COPY %r0,c2
1541 COPY %r0,c3
1542
1543 LDO 128(%sp),%sp ; bump stack
1544 DEPDI,Z 1,31,1,high_one ; Create Value 1 << 32
1545
1546 ;
1547 ; Load up all of the values we are going to use
1548 ;
1549 FLDD 0(a_ptr),a0
1550 FLDD 8(a_ptr),a1
1551 FLDD 16(a_ptr),a2
1552 FLDD 24(a_ptr),a3
1553
1554 FLDD 0(b_ptr),b0
1555 FLDD 8(b_ptr),b1
1556 FLDD 16(b_ptr),b2
1557 FLDD 24(b_ptr),b3
1558
1559 MUL_ADD_C a0L,a0R,b0L,b0R,c1,c2,c3
1560 STD c1,0(r_ptr)
1561 COPY %r0,c1
1562
1563 MUL_ADD_C a0L,a0R,b1L,b1R,c2,c3,c1
1564 MUL_ADD_C a1L,a1R,b0L,b0R,c2,c3,c1
1565 STD c2,8(r_ptr)
1566 COPY %r0,c2
1567
1568 MUL_ADD_C a2L,a2R,b0L,b0R,c3,c1,c2
1569 MUL_ADD_C a1L,a1R,b1L,b1R,c3,c1,c2
1570 MUL_ADD_C a0L,a0R,b2L,b2R,c3,c1,c2
1571 STD c3,16(r_ptr)
1572 COPY %r0,c3
1573
1574 MUL_ADD_C a0L,a0R,b3L,b3R,c1,c2,c3
1575 MUL_ADD_C a1L,a1R,b2L,b2R,c1,c2,c3
1576 MUL_ADD_C a2L,a2R,b1L,b1R,c1,c2,c3
1577 MUL_ADD_C a3L,a3R,b0L,b0R,c1,c2,c3
1578 STD c1,24(r_ptr)
1579 COPY %r0,c1
1580
1581 MUL_ADD_C a3L,a3R,b1L,b1R,c2,c3,c1
1582 MUL_ADD_C a2L,a2R,b2L,b2R,c2,c3,c1
1583 MUL_ADD_C a1L,a1R,b3L,b3R,c2,c3,c1
1584 STD c2,32(r_ptr)
1585 COPY %r0,c2
1586
1587 MUL_ADD_C a2L,a2R,b3L,b3R,c3,c1,c2
1588 MUL_ADD_C a3L,a3R,b2L,b2R,c3,c1,c2
1589 STD c3,40(r_ptr)
1590 COPY %r0,c3
1591
1592 MUL_ADD_C a3L,a3R,b3L,b3R,c1,c2,c3
1593 STD c1,48(r_ptr)
1594 STD c2,56(r_ptr)
1595
1596 .EXIT
1597 FLDD -88(%sp),%fr13
1598 FLDD -96(%sp),%fr12
1599 LDD -104(%sp),%r6 ; restore r6
1600 LDD -112(%sp),%r5 ; restore r5
1601 LDD -120(%sp),%r4 ; restore r4
1602 BVE (%rp)
1603 LDD,MB -128(%sp),%r3
1604
1605 .PROCEND
1606
1607
1608;--- not PIC .SPACE $TEXT$
1609;--- not PIC .SUBSPA $CODE$
1610;--- not PIC .SPACE $PRIVATE$,SORT=16
1611;--- not PIC .IMPORT $global$,DATA
1612;--- not PIC .SPACE $TEXT$
1613;--- not PIC .SUBSPA $CODE$
1614;--- not PIC .SUBSPA $LIT$,ACCESS=0x2c
1615;--- not PIC C$7
1616;--- not PIC .ALIGN 8
1617;--- not PIC .STRINGZ "Division would overflow (%d)\n"
1618 .END
diff --git a/src/lib/libcrypto/bn/asm/pa-risc2W.s b/src/lib/libcrypto/bn/asm/pa-risc2W.s
deleted file mode 100644
index a99545754d..0000000000
--- a/src/lib/libcrypto/bn/asm/pa-risc2W.s
+++ /dev/null
@@ -1,1605 +0,0 @@
1;
2; PA-RISC 64-bit implementation of bn_asm code
3;
4; This code is approximately 2x faster than the C version
5; for RSA/DSA.
6;
7; See http://devresource.hp.com/ for more details on the PA-RISC
8; architecture. Also see the book "PA-RISC 2.0 Architecture"
9; by Gerry Kane for information on the instruction set architecture.
10;
11; Code written by Chris Ruemmler (with some help from the HP C
12; compiler).
13;
14; The code compiles with HP's assembler
15;
16
17 .level 2.0W
18 .space $TEXT$
19 .subspa $CODE$,QUAD=0,ALIGN=8,ACCESS=0x2c,CODE_ONLY
20
21;
22; Global Register definitions used for the routines.
23;
24; Some information about HP's runtime architecture for 64-bits.
25;
26; "Caller save" means the calling function must save the register
27; if it wants the register to be preserved.
28; "Callee save" means if a function uses the register, it must save
29; the value before using it.
30;
31; For the floating point registers
32;
33; "caller save" registers: fr4-fr11, fr22-fr31
34; "callee save" registers: fr12-fr21
35; "special" registers: fr0-fr3 (status and exception registers)
36;
37; For the integer registers
38; value zero : r0
39; "caller save" registers: r1,r19-r26
40; "callee save" registers: r3-r18
41; return register : r2 (rp)
42; return values ; r28 (ret0,ret1)
43; Stack pointer ; r30 (sp)
44; global data pointer ; r27 (dp)
45; argument pointer ; r29 (ap)
46; millicode return ptr ; r31 (also a caller save register)
47
48
49;
50; Arguments to the routines
51;
52r_ptr .reg %r26
53a_ptr .reg %r25
54b_ptr .reg %r24
55num .reg %r24
56w .reg %r23
57n .reg %r23
58
59
60;
61; Globals used in some routines
62;
63
64top_overflow .reg %r29
65high_mask .reg %r22 ; value 0xffffffff80000000L
66
67
68;------------------------------------------------------------------------------
69;
70; bn_mul_add_words
71;
72;BN_ULONG bn_mul_add_words(BN_ULONG *r_ptr, BN_ULONG *a_ptr,
73; int num, BN_ULONG w)
74;
75; arg0 = r_ptr
76; arg1 = a_ptr
77; arg2 = num
78; arg3 = w
79;
80; Local register definitions
81;
82
83fm1 .reg %fr22
84fm .reg %fr23
85ht_temp .reg %fr24
86ht_temp_1 .reg %fr25
87lt_temp .reg %fr26
88lt_temp_1 .reg %fr27
89fm1_1 .reg %fr28
90fm_1 .reg %fr29
91
92fw_h .reg %fr7L
93fw_l .reg %fr7R
94fw .reg %fr7
95
96fht_0 .reg %fr8L
97flt_0 .reg %fr8R
98t_float_0 .reg %fr8
99
100fht_1 .reg %fr9L
101flt_1 .reg %fr9R
102t_float_1 .reg %fr9
103
104tmp_0 .reg %r31
105tmp_1 .reg %r21
106m_0 .reg %r20
107m_1 .reg %r19
108ht_0 .reg %r1
109ht_1 .reg %r3
110lt_0 .reg %r4
111lt_1 .reg %r5
112m1_0 .reg %r6
113m1_1 .reg %r7
114rp_val .reg %r8
115rp_val_1 .reg %r9
116
117bn_mul_add_words
118 .export bn_mul_add_words,entry,NO_RELOCATION,LONG_RETURN
119 .proc
120 .callinfo frame=128
121 .entry
122 .align 64
123
124 STD %r3,0(%sp) ; save r3
125 STD %r4,8(%sp) ; save r4
126 NOP ; Needed to make the loop 16-byte aligned
127 NOP ; Needed to make the loop 16-byte aligned
128
129 STD %r5,16(%sp) ; save r5
130 STD %r6,24(%sp) ; save r6
131 STD %r7,32(%sp) ; save r7
132 STD %r8,40(%sp) ; save r8
133
134 STD %r9,48(%sp) ; save r9
135 COPY %r0,%ret0 ; return 0 by default
136 DEPDI,Z 1,31,1,top_overflow ; top_overflow = 1 << 32
137 STD w,56(%sp) ; store w on stack
138
139 CMPIB,>= 0,num,bn_mul_add_words_exit ; if (num <= 0) then exit
140 LDO 128(%sp),%sp ; bump stack
141
142 ;
143 ; The loop is unrolled twice, so if there is only 1 number
144 ; then go straight to the cleanup code.
145 ;
146 CMPIB,= 1,num,bn_mul_add_words_single_top
147 FLDD -72(%sp),fw ; load up w into fp register fw (fw_h/fw_l)
148
149 ;
150 ; This loop is unrolled 2 times (64-byte aligned as well)
151 ;
152 ; PA-RISC 2.0 chips have two fully pipelined multipliers, thus
153 ; two 32-bit mutiplies can be issued per cycle.
154 ;
155bn_mul_add_words_unroll2
156
157 FLDD 0(a_ptr),t_float_0 ; load up 64-bit value (fr8L) ht(L)/lt(R)
158 FLDD 8(a_ptr),t_float_1 ; load up 64-bit value (fr8L) ht(L)/lt(R)
159 LDD 0(r_ptr),rp_val ; rp[0]
160 LDD 8(r_ptr),rp_val_1 ; rp[1]
161
162 XMPYU fht_0,fw_l,fm1 ; m1[0] = fht_0*fw_l
163 XMPYU fht_1,fw_l,fm1_1 ; m1[1] = fht_1*fw_l
164 FSTD fm1,-16(%sp) ; -16(sp) = m1[0]
165 FSTD fm1_1,-48(%sp) ; -48(sp) = m1[1]
166
167 XMPYU flt_0,fw_h,fm ; m[0] = flt_0*fw_h
168 XMPYU flt_1,fw_h,fm_1 ; m[1] = flt_1*fw_h
169 FSTD fm,-8(%sp) ; -8(sp) = m[0]
170 FSTD fm_1,-40(%sp) ; -40(sp) = m[1]
171
172 XMPYU fht_0,fw_h,ht_temp ; ht_temp = fht_0*fw_h
173 XMPYU fht_1,fw_h,ht_temp_1 ; ht_temp_1 = fht_1*fw_h
174 FSTD ht_temp,-24(%sp) ; -24(sp) = ht_temp
175 FSTD ht_temp_1,-56(%sp) ; -56(sp) = ht_temp_1
176
177 XMPYU flt_0,fw_l,lt_temp ; lt_temp = lt*fw_l
178 XMPYU flt_1,fw_l,lt_temp_1 ; lt_temp = lt*fw_l
179 FSTD lt_temp,-32(%sp) ; -32(sp) = lt_temp
180 FSTD lt_temp_1,-64(%sp) ; -64(sp) = lt_temp_1
181
182 LDD -8(%sp),m_0 ; m[0]
183 LDD -40(%sp),m_1 ; m[1]
184 LDD -16(%sp),m1_0 ; m1[0]
185 LDD -48(%sp),m1_1 ; m1[1]
186
187 LDD -24(%sp),ht_0 ; ht[0]
188 LDD -56(%sp),ht_1 ; ht[1]
189 ADD,L m1_0,m_0,tmp_0 ; tmp_0 = m[0] + m1[0];
190 ADD,L m1_1,m_1,tmp_1 ; tmp_1 = m[1] + m1[1];
191
192 LDD -32(%sp),lt_0
193 LDD -64(%sp),lt_1
194 CMPCLR,*>>= tmp_0,m1_0, %r0 ; if (m[0] < m1[0])
195 ADD,L ht_0,top_overflow,ht_0 ; ht[0] += (1<<32)
196
197 CMPCLR,*>>= tmp_1,m1_1,%r0 ; if (m[1] < m1[1])
198 ADD,L ht_1,top_overflow,ht_1 ; ht[1] += (1<<32)
199 EXTRD,U tmp_0,31,32,m_0 ; m[0]>>32
200 DEPD,Z tmp_0,31,32,m1_0 ; m1[0] = m[0]<<32
201
202 EXTRD,U tmp_1,31,32,m_1 ; m[1]>>32
203 DEPD,Z tmp_1,31,32,m1_1 ; m1[1] = m[1]<<32
204 ADD,L ht_0,m_0,ht_0 ; ht[0]+= (m[0]>>32)
205 ADD,L ht_1,m_1,ht_1 ; ht[1]+= (m[1]>>32)
206
207 ADD lt_0,m1_0,lt_0 ; lt[0] = lt[0]+m1[0];
208 ADD,DC ht_0,%r0,ht_0 ; ht[0]++
209 ADD lt_1,m1_1,lt_1 ; lt[1] = lt[1]+m1[1];
210 ADD,DC ht_1,%r0,ht_1 ; ht[1]++
211
212 ADD %ret0,lt_0,lt_0 ; lt[0] = lt[0] + c;
213 ADD,DC ht_0,%r0,ht_0 ; ht[0]++
214 ADD lt_0,rp_val,lt_0 ; lt[0] = lt[0]+rp[0]
215 ADD,DC ht_0,%r0,ht_0 ; ht[0]++
216
217 LDO -2(num),num ; num = num - 2;
218 ADD ht_0,lt_1,lt_1 ; lt[1] = lt[1] + ht_0 (c);
219 ADD,DC ht_1,%r0,ht_1 ; ht[1]++
220 STD lt_0,0(r_ptr) ; rp[0] = lt[0]
221
222 ADD lt_1,rp_val_1,lt_1 ; lt[1] = lt[1]+rp[1]
223 ADD,DC ht_1,%r0,%ret0 ; ht[1]++
224 LDO 16(a_ptr),a_ptr ; a_ptr += 2
225
226 STD lt_1,8(r_ptr) ; rp[1] = lt[1]
227 CMPIB,<= 2,num,bn_mul_add_words_unroll2 ; go again if more to do
228 LDO 16(r_ptr),r_ptr ; r_ptr += 2
229
230 CMPIB,=,N 0,num,bn_mul_add_words_exit ; are we done, or cleanup last one
231
232 ;
233 ; Top of loop aligned on 64-byte boundary
234 ;
235bn_mul_add_words_single_top
236 FLDD 0(a_ptr),t_float_0 ; load up 64-bit value (fr8L) ht(L)/lt(R)
237 LDD 0(r_ptr),rp_val ; rp[0]
238 LDO 8(a_ptr),a_ptr ; a_ptr++
239 XMPYU fht_0,fw_l,fm1 ; m1 = ht*fw_l
240 FSTD fm1,-16(%sp) ; -16(sp) = m1
241 XMPYU flt_0,fw_h,fm ; m = lt*fw_h
242 FSTD fm,-8(%sp) ; -8(sp) = m
243 XMPYU fht_0,fw_h,ht_temp ; ht_temp = ht*fw_h
244 FSTD ht_temp,-24(%sp) ; -24(sp) = ht
245 XMPYU flt_0,fw_l,lt_temp ; lt_temp = lt*fw_l
246 FSTD lt_temp,-32(%sp) ; -32(sp) = lt
247
248 LDD -8(%sp),m_0
249 LDD -16(%sp),m1_0 ; m1 = temp1
250 ADD,L m_0,m1_0,tmp_0 ; tmp_0 = m + m1;
251 LDD -24(%sp),ht_0
252 LDD -32(%sp),lt_0
253
254 CMPCLR,*>>= tmp_0,m1_0,%r0 ; if (m < m1)
255 ADD,L ht_0,top_overflow,ht_0 ; ht += (1<<32)
256
257 EXTRD,U tmp_0,31,32,m_0 ; m>>32
258 DEPD,Z tmp_0,31,32,m1_0 ; m1 = m<<32
259
260 ADD,L ht_0,m_0,ht_0 ; ht+= (m>>32)
261 ADD lt_0,m1_0,tmp_0 ; tmp_0 = lt+m1;
262 ADD,DC ht_0,%r0,ht_0 ; ht++
263 ADD %ret0,tmp_0,lt_0 ; lt = lt + c;
264 ADD,DC ht_0,%r0,ht_0 ; ht++
265 ADD lt_0,rp_val,lt_0 ; lt = lt+rp[0]
266 ADD,DC ht_0,%r0,%ret0 ; ht++
267 STD lt_0,0(r_ptr) ; rp[0] = lt
268
269bn_mul_add_words_exit
270 .EXIT
271 LDD -80(%sp),%r9 ; restore r9
272 LDD -88(%sp),%r8 ; restore r8
273 LDD -96(%sp),%r7 ; restore r7
274 LDD -104(%sp),%r6 ; restore r6
275 LDD -112(%sp),%r5 ; restore r5
276 LDD -120(%sp),%r4 ; restore r4
277 BVE (%rp)
278 LDD,MB -128(%sp),%r3 ; restore r3
279 .PROCEND ;in=23,24,25,26,29;out=28;
280
281;----------------------------------------------------------------------------
282;
283;BN_ULONG bn_mul_words(BN_ULONG *rp, BN_ULONG *ap, int num, BN_ULONG w)
284;
285; arg0 = rp
286; arg1 = ap
287; arg2 = num
288; arg3 = w
289
290bn_mul_words
291 .proc
292 .callinfo frame=128
293 .entry
294 .EXPORT bn_mul_words,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
295 .align 64
296
297 STD %r3,0(%sp) ; save r3
298 STD %r4,8(%sp) ; save r4
299 STD %r5,16(%sp) ; save r5
300 STD %r6,24(%sp) ; save r6
301
302 STD %r7,32(%sp) ; save r7
303 COPY %r0,%ret0 ; return 0 by default
304 DEPDI,Z 1,31,1,top_overflow ; top_overflow = 1 << 32
305 STD w,56(%sp) ; w on stack
306
307 CMPIB,>= 0,num,bn_mul_words_exit
308 LDO 128(%sp),%sp ; bump stack
309
310 ;
311 ; See if only 1 word to do, thus just do cleanup
312 ;
313 CMPIB,= 1,num,bn_mul_words_single_top
314 FLDD -72(%sp),fw ; load up w into fp register fw (fw_h/fw_l)
315
316 ;
317 ; This loop is unrolled 2 times (64-byte aligned as well)
318 ;
319 ; PA-RISC 2.0 chips have two fully pipelined multipliers, thus
320 ; two 32-bit mutiplies can be issued per cycle.
321 ;
322bn_mul_words_unroll2
323
324 FLDD 0(a_ptr),t_float_0 ; load up 64-bit value (fr8L) ht(L)/lt(R)
325 FLDD 8(a_ptr),t_float_1 ; load up 64-bit value (fr8L) ht(L)/lt(R)
326 XMPYU fht_0,fw_l,fm1 ; m1[0] = fht_0*fw_l
327 XMPYU fht_1,fw_l,fm1_1 ; m1[1] = ht*fw_l
328
329 FSTD fm1,-16(%sp) ; -16(sp) = m1
330 FSTD fm1_1,-48(%sp) ; -48(sp) = m1
331 XMPYU flt_0,fw_h,fm ; m = lt*fw_h
332 XMPYU flt_1,fw_h,fm_1 ; m = lt*fw_h
333
334 FSTD fm,-8(%sp) ; -8(sp) = m
335 FSTD fm_1,-40(%sp) ; -40(sp) = m
336 XMPYU fht_0,fw_h,ht_temp ; ht_temp = fht_0*fw_h
337 XMPYU fht_1,fw_h,ht_temp_1 ; ht_temp = ht*fw_h
338
339 FSTD ht_temp,-24(%sp) ; -24(sp) = ht
340 FSTD ht_temp_1,-56(%sp) ; -56(sp) = ht
341 XMPYU flt_0,fw_l,lt_temp ; lt_temp = lt*fw_l
342 XMPYU flt_1,fw_l,lt_temp_1 ; lt_temp = lt*fw_l
343
344 FSTD lt_temp,-32(%sp) ; -32(sp) = lt
345 FSTD lt_temp_1,-64(%sp) ; -64(sp) = lt
346 LDD -8(%sp),m_0
347 LDD -40(%sp),m_1
348
349 LDD -16(%sp),m1_0
350 LDD -48(%sp),m1_1
351 LDD -24(%sp),ht_0
352 LDD -56(%sp),ht_1
353
354 ADD,L m1_0,m_0,tmp_0 ; tmp_0 = m + m1;
355 ADD,L m1_1,m_1,tmp_1 ; tmp_1 = m + m1;
356 LDD -32(%sp),lt_0
357 LDD -64(%sp),lt_1
358
359 CMPCLR,*>>= tmp_0,m1_0, %r0 ; if (m < m1)
360 ADD,L ht_0,top_overflow,ht_0 ; ht += (1<<32)
361 CMPCLR,*>>= tmp_1,m1_1,%r0 ; if (m < m1)
362 ADD,L ht_1,top_overflow,ht_1 ; ht += (1<<32)
363
364 EXTRD,U tmp_0,31,32,m_0 ; m>>32
365 DEPD,Z tmp_0,31,32,m1_0 ; m1 = m<<32
366 EXTRD,U tmp_1,31,32,m_1 ; m>>32
367 DEPD,Z tmp_1,31,32,m1_1 ; m1 = m<<32
368
369 ADD,L ht_0,m_0,ht_0 ; ht+= (m>>32)
370 ADD,L ht_1,m_1,ht_1 ; ht+= (m>>32)
371 ADD lt_0,m1_0,lt_0 ; lt = lt+m1;
372 ADD,DC ht_0,%r0,ht_0 ; ht++
373
374 ADD lt_1,m1_1,lt_1 ; lt = lt+m1;
375 ADD,DC ht_1,%r0,ht_1 ; ht++
376 ADD %ret0,lt_0,lt_0 ; lt = lt + c (ret0);
377 ADD,DC ht_0,%r0,ht_0 ; ht++
378
379 ADD ht_0,lt_1,lt_1 ; lt = lt + c (ht_0)
380 ADD,DC ht_1,%r0,ht_1 ; ht++
381 STD lt_0,0(r_ptr) ; rp[0] = lt
382 STD lt_1,8(r_ptr) ; rp[1] = lt
383
384 COPY ht_1,%ret0 ; carry = ht
385 LDO -2(num),num ; num = num - 2;
386 LDO 16(a_ptr),a_ptr ; ap += 2
387 CMPIB,<= 2,num,bn_mul_words_unroll2
388 LDO 16(r_ptr),r_ptr ; rp++
389
390 CMPIB,=,N 0,num,bn_mul_words_exit ; are we done?
391
392 ;
393 ; Top of loop aligned on 64-byte boundary
394 ;
395bn_mul_words_single_top
396 FLDD 0(a_ptr),t_float_0 ; load up 64-bit value (fr8L) ht(L)/lt(R)
397
398 XMPYU fht_0,fw_l,fm1 ; m1 = ht*fw_l
399 FSTD fm1,-16(%sp) ; -16(sp) = m1
400 XMPYU flt_0,fw_h,fm ; m = lt*fw_h
401 FSTD fm,-8(%sp) ; -8(sp) = m
402 XMPYU fht_0,fw_h,ht_temp ; ht_temp = ht*fw_h
403 FSTD ht_temp,-24(%sp) ; -24(sp) = ht
404 XMPYU flt_0,fw_l,lt_temp ; lt_temp = lt*fw_l
405 FSTD lt_temp,-32(%sp) ; -32(sp) = lt
406
407 LDD -8(%sp),m_0
408 LDD -16(%sp),m1_0
409 ADD,L m_0,m1_0,tmp_0 ; tmp_0 = m + m1;
410 LDD -24(%sp),ht_0
411 LDD -32(%sp),lt_0
412
413 CMPCLR,*>>= tmp_0,m1_0,%r0 ; if (m < m1)
414 ADD,L ht_0,top_overflow,ht_0 ; ht += (1<<32)
415
416 EXTRD,U tmp_0,31,32,m_0 ; m>>32
417 DEPD,Z tmp_0,31,32,m1_0 ; m1 = m<<32
418
419 ADD,L ht_0,m_0,ht_0 ; ht+= (m>>32)
420 ADD lt_0,m1_0,lt_0 ; lt= lt+m1;
421 ADD,DC ht_0,%r0,ht_0 ; ht++
422
423 ADD %ret0,lt_0,lt_0 ; lt = lt + c;
424 ADD,DC ht_0,%r0,ht_0 ; ht++
425
426 COPY ht_0,%ret0 ; copy carry
427 STD lt_0,0(r_ptr) ; rp[0] = lt
428
429bn_mul_words_exit
430 .EXIT
431 LDD -96(%sp),%r7 ; restore r7
432 LDD -104(%sp),%r6 ; restore r6
433 LDD -112(%sp),%r5 ; restore r5
434 LDD -120(%sp),%r4 ; restore r4
435 BVE (%rp)
436 LDD,MB -128(%sp),%r3 ; restore r3
437 .PROCEND ;in=23,24,25,26,29;out=28;
438
439;----------------------------------------------------------------------------
440;
441;void bn_sqr_words(BN_ULONG *rp, BN_ULONG *ap, int num)
442;
443; arg0 = rp
444; arg1 = ap
445; arg2 = num
446;
447
448bn_sqr_words
449 .proc
450 .callinfo FRAME=128,ENTRY_GR=%r3,ARGS_SAVED,ORDERING_AWARE
451 .EXPORT bn_sqr_words,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
452 .entry
453 .align 64
454
455 STD %r3,0(%sp) ; save r3
456 STD %r4,8(%sp) ; save r4
457 NOP
458 STD %r5,16(%sp) ; save r5
459
460 CMPIB,>= 0,num,bn_sqr_words_exit
461 LDO 128(%sp),%sp ; bump stack
462
463 ;
464 ; If only 1, the goto straight to cleanup
465 ;
466 CMPIB,= 1,num,bn_sqr_words_single_top
467 DEPDI,Z -1,32,33,high_mask ; Create Mask 0xffffffff80000000L
468
469 ;
470 ; This loop is unrolled 2 times (64-byte aligned as well)
471 ;
472
473bn_sqr_words_unroll2
474 FLDD 0(a_ptr),t_float_0 ; a[0]
475 FLDD 8(a_ptr),t_float_1 ; a[1]
476 XMPYU fht_0,flt_0,fm ; m[0]
477 XMPYU fht_1,flt_1,fm_1 ; m[1]
478
479 FSTD fm,-24(%sp) ; store m[0]
480 FSTD fm_1,-56(%sp) ; store m[1]
481 XMPYU flt_0,flt_0,lt_temp ; lt[0]
482 XMPYU flt_1,flt_1,lt_temp_1 ; lt[1]
483
484 FSTD lt_temp,-16(%sp) ; store lt[0]
485 FSTD lt_temp_1,-48(%sp) ; store lt[1]
486 XMPYU fht_0,fht_0,ht_temp ; ht[0]
487 XMPYU fht_1,fht_1,ht_temp_1 ; ht[1]
488
489 FSTD ht_temp,-8(%sp) ; store ht[0]
490 FSTD ht_temp_1,-40(%sp) ; store ht[1]
491 LDD -24(%sp),m_0
492 LDD -56(%sp),m_1
493
494 AND m_0,high_mask,tmp_0 ; m[0] & Mask
495 AND m_1,high_mask,tmp_1 ; m[1] & Mask
496 DEPD,Z m_0,30,31,m_0 ; m[0] << 32+1
497 DEPD,Z m_1,30,31,m_1 ; m[1] << 32+1
498
499 LDD -16(%sp),lt_0
500 LDD -48(%sp),lt_1
501 EXTRD,U tmp_0,32,33,tmp_0 ; tmp_0 = m[0]&Mask >> 32-1
502 EXTRD,U tmp_1,32,33,tmp_1 ; tmp_1 = m[1]&Mask >> 32-1
503
504 LDD -8(%sp),ht_0
505 LDD -40(%sp),ht_1
506 ADD,L ht_0,tmp_0,ht_0 ; ht[0] += tmp_0
507 ADD,L ht_1,tmp_1,ht_1 ; ht[1] += tmp_1
508
509 ADD lt_0,m_0,lt_0 ; lt = lt+m
510 ADD,DC ht_0,%r0,ht_0 ; ht[0]++
511 STD lt_0,0(r_ptr) ; rp[0] = lt[0]
512 STD ht_0,8(r_ptr) ; rp[1] = ht[1]
513
514 ADD lt_1,m_1,lt_1 ; lt = lt+m
515 ADD,DC ht_1,%r0,ht_1 ; ht[1]++
516 STD lt_1,16(r_ptr) ; rp[2] = lt[1]
517 STD ht_1,24(r_ptr) ; rp[3] = ht[1]
518
519 LDO -2(num),num ; num = num - 2;
520 LDO 16(a_ptr),a_ptr ; ap += 2
521 CMPIB,<= 2,num,bn_sqr_words_unroll2
522 LDO 32(r_ptr),r_ptr ; rp += 4
523
524 CMPIB,=,N 0,num,bn_sqr_words_exit ; are we done?
525
526 ;
527 ; Top of loop aligned on 64-byte boundary
528 ;
529bn_sqr_words_single_top
530 FLDD 0(a_ptr),t_float_0 ; load up 64-bit value (fr8L) ht(L)/lt(R)
531
532 XMPYU fht_0,flt_0,fm ; m
533 FSTD fm,-24(%sp) ; store m
534
535 XMPYU flt_0,flt_0,lt_temp ; lt
536 FSTD lt_temp,-16(%sp) ; store lt
537
538 XMPYU fht_0,fht_0,ht_temp ; ht
539 FSTD ht_temp,-8(%sp) ; store ht
540
541 LDD -24(%sp),m_0 ; load m
542 AND m_0,high_mask,tmp_0 ; m & Mask
543 DEPD,Z m_0,30,31,m_0 ; m << 32+1
544 LDD -16(%sp),lt_0 ; lt
545
546 LDD -8(%sp),ht_0 ; ht
547 EXTRD,U tmp_0,32,33,tmp_0 ; tmp_0 = m&Mask >> 32-1
548 ADD m_0,lt_0,lt_0 ; lt = lt+m
549 ADD,L ht_0,tmp_0,ht_0 ; ht += tmp_0
550 ADD,DC ht_0,%r0,ht_0 ; ht++
551
552 STD lt_0,0(r_ptr) ; rp[0] = lt
553 STD ht_0,8(r_ptr) ; rp[1] = ht
554
555bn_sqr_words_exit
556 .EXIT
557 LDD -112(%sp),%r5 ; restore r5
558 LDD -120(%sp),%r4 ; restore r4
559 BVE (%rp)
560 LDD,MB -128(%sp),%r3
561 .PROCEND ;in=23,24,25,26,29;out=28;
562
563
564;----------------------------------------------------------------------------
565;
566;BN_ULONG bn_add_words(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n)
567;
568; arg0 = rp
569; arg1 = ap
570; arg2 = bp
571; arg3 = n
572
573t .reg %r22
574b .reg %r21
575l .reg %r20
576
577bn_add_words
578 .proc
579 .entry
580 .callinfo
581 .EXPORT bn_add_words,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
582 .align 64
583
584 CMPIB,>= 0,n,bn_add_words_exit
585 COPY %r0,%ret0 ; return 0 by default
586
587 ;
588 ; If 2 or more numbers do the loop
589 ;
590 CMPIB,= 1,n,bn_add_words_single_top
591 NOP
592
593 ;
594 ; This loop is unrolled 2 times (64-byte aligned as well)
595 ;
596bn_add_words_unroll2
597 LDD 0(a_ptr),t
598 LDD 0(b_ptr),b
599 ADD t,%ret0,t ; t = t+c;
600 ADD,DC %r0,%r0,%ret0 ; set c to carry
601 ADD t,b,l ; l = t + b[0]
602 ADD,DC %ret0,%r0,%ret0 ; c+= carry
603 STD l,0(r_ptr)
604
605 LDD 8(a_ptr),t
606 LDD 8(b_ptr),b
607 ADD t,%ret0,t ; t = t+c;
608 ADD,DC %r0,%r0,%ret0 ; set c to carry
609 ADD t,b,l ; l = t + b[0]
610 ADD,DC %ret0,%r0,%ret0 ; c+= carry
611 STD l,8(r_ptr)
612
613 LDO -2(n),n
614 LDO 16(a_ptr),a_ptr
615 LDO 16(b_ptr),b_ptr
616
617 CMPIB,<= 2,n,bn_add_words_unroll2
618 LDO 16(r_ptr),r_ptr
619
620 CMPIB,=,N 0,n,bn_add_words_exit ; are we done?
621
622bn_add_words_single_top
623 LDD 0(a_ptr),t
624 LDD 0(b_ptr),b
625
626 ADD t,%ret0,t ; t = t+c;
627 ADD,DC %r0,%r0,%ret0 ; set c to carry (could use CMPCLR??)
628 ADD t,b,l ; l = t + b[0]
629 ADD,DC %ret0,%r0,%ret0 ; c+= carry
630 STD l,0(r_ptr)
631
632bn_add_words_exit
633 .EXIT
634 BVE (%rp)
635 NOP
636 .PROCEND ;in=23,24,25,26,29;out=28;
637
638;----------------------------------------------------------------------------
639;
640;BN_ULONG bn_sub_words(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n)
641;
642; arg0 = rp
643; arg1 = ap
644; arg2 = bp
645; arg3 = n
646
647t1 .reg %r22
648t2 .reg %r21
649sub_tmp1 .reg %r20
650sub_tmp2 .reg %r19
651
652
653bn_sub_words
654 .proc
655 .callinfo
656 .EXPORT bn_sub_words,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
657 .entry
658 .align 64
659
660 CMPIB,>= 0,n,bn_sub_words_exit
661 COPY %r0,%ret0 ; return 0 by default
662
663 ;
664 ; If 2 or more numbers do the loop
665 ;
666 CMPIB,= 1,n,bn_sub_words_single_top
667 NOP
668
669 ;
670 ; This loop is unrolled 2 times (64-byte aligned as well)
671 ;
672bn_sub_words_unroll2
673 LDD 0(a_ptr),t1
674 LDD 0(b_ptr),t2
675 SUB t1,t2,sub_tmp1 ; t3 = t1-t2;
676 SUB sub_tmp1,%ret0,sub_tmp1 ; t3 = t3- c;
677
678 CMPCLR,*>> t1,t2,sub_tmp2 ; clear if t1 > t2
679 LDO 1(%r0),sub_tmp2
680
681 CMPCLR,*= t1,t2,%r0
682 COPY sub_tmp2,%ret0
683 STD sub_tmp1,0(r_ptr)
684
685 LDD 8(a_ptr),t1
686 LDD 8(b_ptr),t2
687 SUB t1,t2,sub_tmp1 ; t3 = t1-t2;
688 SUB sub_tmp1,%ret0,sub_tmp1 ; t3 = t3- c;
689 CMPCLR,*>> t1,t2,sub_tmp2 ; clear if t1 > t2
690 LDO 1(%r0),sub_tmp2
691
692 CMPCLR,*= t1,t2,%r0
693 COPY sub_tmp2,%ret0
694 STD sub_tmp1,8(r_ptr)
695
696 LDO -2(n),n
697 LDO 16(a_ptr),a_ptr
698 LDO 16(b_ptr),b_ptr
699
700 CMPIB,<= 2,n,bn_sub_words_unroll2
701 LDO 16(r_ptr),r_ptr
702
703 CMPIB,=,N 0,n,bn_sub_words_exit ; are we done?
704
705bn_sub_words_single_top
706 LDD 0(a_ptr),t1
707 LDD 0(b_ptr),t2
708 SUB t1,t2,sub_tmp1 ; t3 = t1-t2;
709 SUB sub_tmp1,%ret0,sub_tmp1 ; t3 = t3- c;
710 CMPCLR,*>> t1,t2,sub_tmp2 ; clear if t1 > t2
711 LDO 1(%r0),sub_tmp2
712
713 CMPCLR,*= t1,t2,%r0
714 COPY sub_tmp2,%ret0
715
716 STD sub_tmp1,0(r_ptr)
717
718bn_sub_words_exit
719 .EXIT
720 BVE (%rp)
721 NOP
722 .PROCEND ;in=23,24,25,26,29;out=28;
723
724;------------------------------------------------------------------------------
725;
726; unsigned long bn_div_words(unsigned long h, unsigned long l, unsigned long d)
727;
728; arg0 = h
729; arg1 = l
730; arg2 = d
731;
732; This is mainly just modified assembly from the compiler, thus the
733; lack of variable names.
734;
735;------------------------------------------------------------------------------
736bn_div_words
737 .proc
738 .callinfo CALLER,FRAME=272,ENTRY_GR=%r10,SAVE_RP,ARGS_SAVED,ORDERING_AWARE
739 .EXPORT bn_div_words,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
740 .IMPORT BN_num_bits_word,CODE,NO_RELOCATION
741 .IMPORT __iob,DATA
742 .IMPORT fprintf,CODE,NO_RELOCATION
743 .IMPORT abort,CODE,NO_RELOCATION
744 .IMPORT $$div2U,MILLICODE
745 .entry
746 STD %r2,-16(%r30)
747 STD,MA %r3,352(%r30)
748 STD %r4,-344(%r30)
749 STD %r5,-336(%r30)
750 STD %r6,-328(%r30)
751 STD %r7,-320(%r30)
752 STD %r8,-312(%r30)
753 STD %r9,-304(%r30)
754 STD %r10,-296(%r30)
755
756 STD %r27,-288(%r30) ; save gp
757
758 COPY %r24,%r3 ; save d
759 COPY %r26,%r4 ; save h (high 64-bits)
760 LDO -1(%r0),%ret0 ; return -1 by default
761
762 CMPB,*= %r0,%arg2,$D3 ; if (d == 0)
763 COPY %r25,%r5 ; save l (low 64-bits)
764
765 LDO -48(%r30),%r29 ; create ap
766 .CALL ;in=26,29;out=28;
767 B,L BN_num_bits_word,%r2
768 COPY %r3,%r26
769 LDD -288(%r30),%r27 ; restore gp
770 LDI 64,%r21
771
772 CMPB,= %r21,%ret0,$00000012 ;if (i == 64) (forward)
773 COPY %ret0,%r24 ; i
774 MTSARCM %r24
775 DEPDI,Z -1,%sar,1,%r29
776 CMPB,*<<,N %r29,%r4,bn_div_err_case ; if (h > 1<<i) (forward)
777
778$00000012
779 SUBI 64,%r24,%r31 ; i = 64 - i;
780 CMPCLR,*<< %r4,%r3,%r0 ; if (h >= d)
781 SUB %r4,%r3,%r4 ; h -= d
782 CMPB,= %r31,%r0,$0000001A ; if (i)
783 COPY %r0,%r10 ; ret = 0
784 MTSARCM %r31 ; i to shift
785 DEPD,Z %r3,%sar,64,%r3 ; d <<= i;
786 SUBI 64,%r31,%r19 ; 64 - i; redundent
787 MTSAR %r19 ; (64 -i) to shift
788 SHRPD %r4,%r5,%sar,%r4 ; l>> (64-i)
789 MTSARCM %r31 ; i to shift
790 DEPD,Z %r5,%sar,64,%r5 ; l <<= i;
791
792$0000001A
793 DEPDI,Z -1,31,32,%r19
794 EXTRD,U %r3,31,32,%r6 ; dh=(d&0xfff)>>32
795 EXTRD,U %r3,63,32,%r8 ; dl = d&0xffffff
796 LDO 2(%r0),%r9
797 STD %r3,-280(%r30) ; "d" to stack
798
799$0000001C
800 DEPDI,Z -1,63,32,%r29 ;
801 EXTRD,U %r4,31,32,%r31 ; h >> 32
802 CMPB,*=,N %r31,%r6,$D2 ; if ((h>>32) != dh)(forward) div
803 COPY %r4,%r26
804 EXTRD,U %r4,31,32,%r25
805 COPY %r6,%r24
806 .CALL ;in=23,24,25,26;out=20,21,22,28,29; (MILLICALL)
807 B,L $$div2U,%r2
808 EXTRD,U %r6,31,32,%r23
809 DEPD %r28,31,32,%r29
810$D2
811 STD %r29,-272(%r30) ; q
812 AND %r5,%r19,%r24 ; t & 0xffffffff00000000;
813 EXTRD,U %r24,31,32,%r24 ; ???
814 FLDD -272(%r30),%fr7 ; q
815 FLDD -280(%r30),%fr8 ; d
816 XMPYU %fr8L,%fr7L,%fr10
817 FSTD %fr10,-256(%r30)
818 XMPYU %fr8L,%fr7R,%fr22
819 FSTD %fr22,-264(%r30)
820 XMPYU %fr8R,%fr7L,%fr11
821 XMPYU %fr8R,%fr7R,%fr23
822 FSTD %fr11,-232(%r30)
823 FSTD %fr23,-240(%r30)
824 LDD -256(%r30),%r28
825 DEPD,Z %r28,31,32,%r2
826 LDD -264(%r30),%r20
827 ADD,L %r20,%r2,%r31
828 LDD -232(%r30),%r22
829 DEPD,Z %r22,31,32,%r22
830 LDD -240(%r30),%r21
831 B $00000024 ; enter loop
832 ADD,L %r21,%r22,%r23
833
834$0000002A
835 LDO -1(%r29),%r29
836 SUB %r23,%r8,%r23
837$00000024
838 SUB %r4,%r31,%r25
839 AND %r25,%r19,%r26
840 CMPB,*<>,N %r0,%r26,$00000046 ; (forward)
841 DEPD,Z %r25,31,32,%r20
842 OR %r20,%r24,%r21
843 CMPB,*<<,N %r21,%r23,$0000002A ;(backward)
844 SUB %r31,%r6,%r31
845;-------------Break path---------------------
846
847$00000046
848 DEPD,Z %r23,31,32,%r25 ;tl
849 EXTRD,U %r23,31,32,%r26 ;t
850 AND %r25,%r19,%r24 ;tl = (tl<<32)&0xfffffff0000000L
851 ADD,L %r31,%r26,%r31 ;th += t;
852 CMPCLR,*>>= %r5,%r24,%r0 ;if (l<tl)
853 LDO 1(%r31),%r31 ; th++;
854 CMPB,*<<=,N %r31,%r4,$00000036 ;if (n < th) (forward)
855 LDO -1(%r29),%r29 ;q--;
856 ADD,L %r4,%r3,%r4 ;h += d;
857$00000036
858 ADDIB,=,N -1,%r9,$D1 ;if (--count == 0) break (forward)
859 SUB %r5,%r24,%r28 ; l -= tl;
860 SUB %r4,%r31,%r24 ; h -= th;
861 SHRPD %r24,%r28,32,%r4 ; h = ((h<<32)|(l>>32));
862 DEPD,Z %r29,31,32,%r10 ; ret = q<<32
863 b $0000001C
864 DEPD,Z %r28,31,32,%r5 ; l = l << 32
865
866$D1
867 OR %r10,%r29,%r28 ; ret |= q
868$D3
869 LDD -368(%r30),%r2
870$D0
871 LDD -296(%r30),%r10
872 LDD -304(%r30),%r9
873 LDD -312(%r30),%r8
874 LDD -320(%r30),%r7
875 LDD -328(%r30),%r6
876 LDD -336(%r30),%r5
877 LDD -344(%r30),%r4
878 BVE (%r2)
879 .EXIT
880 LDD,MB -352(%r30),%r3
881
882bn_div_err_case
883 MFIA %r6
884 ADDIL L'bn_div_words-bn_div_err_case,%r6,%r1
885 LDO R'bn_div_words-bn_div_err_case(%r1),%r6
886 ADDIL LT'__iob,%r27,%r1
887 LDD RT'__iob(%r1),%r26
888 ADDIL L'C$4-bn_div_words,%r6,%r1
889 LDO R'C$4-bn_div_words(%r1),%r25
890 LDO 64(%r26),%r26
891 .CALL ;in=24,25,26,29;out=28;
892 B,L fprintf,%r2
893 LDO -48(%r30),%r29
894 LDD -288(%r30),%r27
895 .CALL ;in=29;
896 B,L abort,%r2
897 LDO -48(%r30),%r29
898 LDD -288(%r30),%r27
899 B $D0
900 LDD -368(%r30),%r2
901 .PROCEND ;in=24,25,26,29;out=28;
902
903;----------------------------------------------------------------------------
904;
905; Registers to hold 64-bit values to manipulate. The "L" part
906; of the register corresponds to the upper 32-bits, while the "R"
907; part corresponds to the lower 32-bits
908;
909; Note, that when using b6 and b7, the code must save these before
910; using them because they are callee save registers
911;
912;
913; Floating point registers to use to save values that
914; are manipulated. These don't collide with ftemp1-6 and
915; are all caller save registers
916;
917a0 .reg %fr22
918a0L .reg %fr22L
919a0R .reg %fr22R
920
921a1 .reg %fr23
922a1L .reg %fr23L
923a1R .reg %fr23R
924
925a2 .reg %fr24
926a2L .reg %fr24L
927a2R .reg %fr24R
928
929a3 .reg %fr25
930a3L .reg %fr25L
931a3R .reg %fr25R
932
933a4 .reg %fr26
934a4L .reg %fr26L
935a4R .reg %fr26R
936
937a5 .reg %fr27
938a5L .reg %fr27L
939a5R .reg %fr27R
940
941a6 .reg %fr28
942a6L .reg %fr28L
943a6R .reg %fr28R
944
945a7 .reg %fr29
946a7L .reg %fr29L
947a7R .reg %fr29R
948
949b0 .reg %fr30
950b0L .reg %fr30L
951b0R .reg %fr30R
952
953b1 .reg %fr31
954b1L .reg %fr31L
955b1R .reg %fr31R
956
957;
958; Temporary floating point variables, these are all caller save
959; registers
960;
961ftemp1 .reg %fr4
962ftemp2 .reg %fr5
963ftemp3 .reg %fr6
964ftemp4 .reg %fr7
965
966;
967; The B set of registers when used.
968;
969
970b2 .reg %fr8
971b2L .reg %fr8L
972b2R .reg %fr8R
973
974b3 .reg %fr9
975b3L .reg %fr9L
976b3R .reg %fr9R
977
978b4 .reg %fr10
979b4L .reg %fr10L
980b4R .reg %fr10R
981
982b5 .reg %fr11
983b5L .reg %fr11L
984b5R .reg %fr11R
985
986b6 .reg %fr12
987b6L .reg %fr12L
988b6R .reg %fr12R
989
990b7 .reg %fr13
991b7L .reg %fr13L
992b7R .reg %fr13R
993
994c1 .reg %r21 ; only reg
995temp1 .reg %r20 ; only reg
996temp2 .reg %r19 ; only reg
997temp3 .reg %r31 ; only reg
998
999m1 .reg %r28
1000c2 .reg %r23
1001high_one .reg %r1
1002ht .reg %r6
1003lt .reg %r5
1004m .reg %r4
1005c3 .reg %r3
1006
1007SQR_ADD_C .macro A0L,A0R,C1,C2,C3
1008 XMPYU A0L,A0R,ftemp1 ; m
1009 FSTD ftemp1,-24(%sp) ; store m
1010
1011 XMPYU A0R,A0R,ftemp2 ; lt
1012 FSTD ftemp2,-16(%sp) ; store lt
1013
1014 XMPYU A0L,A0L,ftemp3 ; ht
1015 FSTD ftemp3,-8(%sp) ; store ht
1016
1017 LDD -24(%sp),m ; load m
1018 AND m,high_mask,temp2 ; m & Mask
1019 DEPD,Z m,30,31,temp3 ; m << 32+1
1020 LDD -16(%sp),lt ; lt
1021
1022 LDD -8(%sp),ht ; ht
1023 EXTRD,U temp2,32,33,temp1 ; temp1 = m&Mask >> 32-1
1024 ADD temp3,lt,lt ; lt = lt+m
1025 ADD,L ht,temp1,ht ; ht += temp1
1026 ADD,DC ht,%r0,ht ; ht++
1027
1028 ADD C1,lt,C1 ; c1=c1+lt
1029 ADD,DC ht,%r0,ht ; ht++
1030
1031 ADD C2,ht,C2 ; c2=c2+ht
1032 ADD,DC C3,%r0,C3 ; c3++
1033.endm
1034
1035SQR_ADD_C2 .macro A0L,A0R,A1L,A1R,C1,C2,C3
1036 XMPYU A0L,A1R,ftemp1 ; m1 = bl*ht
1037 FSTD ftemp1,-16(%sp) ;
1038 XMPYU A0R,A1L,ftemp2 ; m = bh*lt
1039 FSTD ftemp2,-8(%sp) ;
1040 XMPYU A0R,A1R,ftemp3 ; lt = bl*lt
1041 FSTD ftemp3,-32(%sp)
1042 XMPYU A0L,A1L,ftemp4 ; ht = bh*ht
1043 FSTD ftemp4,-24(%sp) ;
1044
1045 LDD -8(%sp),m ; r21 = m
1046 LDD -16(%sp),m1 ; r19 = m1
1047 ADD,L m,m1,m ; m+m1
1048
1049 DEPD,Z m,31,32,temp3 ; (m+m1<<32)
1050 LDD -24(%sp),ht ; r24 = ht
1051
1052 CMPCLR,*>>= m,m1,%r0 ; if (m < m1)
1053 ADD,L ht,high_one,ht ; ht+=high_one
1054
1055 EXTRD,U m,31,32,temp1 ; m >> 32
1056 LDD -32(%sp),lt ; lt
1057 ADD,L ht,temp1,ht ; ht+= m>>32
1058 ADD lt,temp3,lt ; lt = lt+m1
1059 ADD,DC ht,%r0,ht ; ht++
1060
1061 ADD ht,ht,ht ; ht=ht+ht;
1062 ADD,DC C3,%r0,C3 ; add in carry (c3++)
1063
1064 ADD lt,lt,lt ; lt=lt+lt;
1065 ADD,DC ht,%r0,ht ; add in carry (ht++)
1066
1067 ADD C1,lt,C1 ; c1=c1+lt
1068 ADD,DC,*NUV ht,%r0,ht ; add in carry (ht++)
1069 LDO 1(C3),C3 ; bump c3 if overflow,nullify otherwise
1070
1071 ADD C2,ht,C2 ; c2 = c2 + ht
1072 ADD,DC C3,%r0,C3 ; add in carry (c3++)
1073.endm
1074
1075;
1076;void bn_sqr_comba8(BN_ULONG *r, BN_ULONG *a)
1077; arg0 = r_ptr
1078; arg1 = a_ptr
1079;
1080
1081bn_sqr_comba8
1082 .PROC
1083 .CALLINFO FRAME=128,ENTRY_GR=%r3,ARGS_SAVED,ORDERING_AWARE
1084 .EXPORT bn_sqr_comba8,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
1085 .ENTRY
1086 .align 64
1087
1088 STD %r3,0(%sp) ; save r3
1089 STD %r4,8(%sp) ; save r4
1090 STD %r5,16(%sp) ; save r5
1091 STD %r6,24(%sp) ; save r6
1092
1093 ;
1094 ; Zero out carries
1095 ;
1096 COPY %r0,c1
1097 COPY %r0,c2
1098 COPY %r0,c3
1099
1100 LDO 128(%sp),%sp ; bump stack
1101 DEPDI,Z -1,32,33,high_mask ; Create Mask 0xffffffff80000000L
1102 DEPDI,Z 1,31,1,high_one ; Create Value 1 << 32
1103
1104 ;
1105 ; Load up all of the values we are going to use
1106 ;
1107 FLDD 0(a_ptr),a0
1108 FLDD 8(a_ptr),a1
1109 FLDD 16(a_ptr),a2
1110 FLDD 24(a_ptr),a3
1111 FLDD 32(a_ptr),a4
1112 FLDD 40(a_ptr),a5
1113 FLDD 48(a_ptr),a6
1114 FLDD 56(a_ptr),a7
1115
1116 SQR_ADD_C a0L,a0R,c1,c2,c3
1117 STD c1,0(r_ptr) ; r[0] = c1;
1118 COPY %r0,c1
1119
1120 SQR_ADD_C2 a1L,a1R,a0L,a0R,c2,c3,c1
1121 STD c2,8(r_ptr) ; r[1] = c2;
1122 COPY %r0,c2
1123
1124 SQR_ADD_C a1L,a1R,c3,c1,c2
1125 SQR_ADD_C2 a2L,a2R,a0L,a0R,c3,c1,c2
1126 STD c3,16(r_ptr) ; r[2] = c3;
1127 COPY %r0,c3
1128
1129 SQR_ADD_C2 a3L,a3R,a0L,a0R,c1,c2,c3
1130 SQR_ADD_C2 a2L,a2R,a1L,a1R,c1,c2,c3
1131 STD c1,24(r_ptr) ; r[3] = c1;
1132 COPY %r0,c1
1133
1134 SQR_ADD_C a2L,a2R,c2,c3,c1
1135 SQR_ADD_C2 a3L,a3R,a1L,a1R,c2,c3,c1
1136 SQR_ADD_C2 a4L,a4R,a0L,a0R,c2,c3,c1
1137 STD c2,32(r_ptr) ; r[4] = c2;
1138 COPY %r0,c2
1139
1140 SQR_ADD_C2 a5L,a5R,a0L,a0R,c3,c1,c2
1141 SQR_ADD_C2 a4L,a4R,a1L,a1R,c3,c1,c2
1142 SQR_ADD_C2 a3L,a3R,a2L,a2R,c3,c1,c2
1143 STD c3,40(r_ptr) ; r[5] = c3;
1144 COPY %r0,c3
1145
1146 SQR_ADD_C a3L,a3R,c1,c2,c3
1147 SQR_ADD_C2 a4L,a4R,a2L,a2R,c1,c2,c3
1148 SQR_ADD_C2 a5L,a5R,a1L,a1R,c1,c2,c3
1149 SQR_ADD_C2 a6L,a6R,a0L,a0R,c1,c2,c3
1150 STD c1,48(r_ptr) ; r[6] = c1;
1151 COPY %r0,c1
1152
1153 SQR_ADD_C2 a7L,a7R,a0L,a0R,c2,c3,c1
1154 SQR_ADD_C2 a6L,a6R,a1L,a1R,c2,c3,c1
1155 SQR_ADD_C2 a5L,a5R,a2L,a2R,c2,c3,c1
1156 SQR_ADD_C2 a4L,a4R,a3L,a3R,c2,c3,c1
1157 STD c2,56(r_ptr) ; r[7] = c2;
1158 COPY %r0,c2
1159
1160 SQR_ADD_C a4L,a4R,c3,c1,c2
1161 SQR_ADD_C2 a5L,a5R,a3L,a3R,c3,c1,c2
1162 SQR_ADD_C2 a6L,a6R,a2L,a2R,c3,c1,c2
1163 SQR_ADD_C2 a7L,a7R,a1L,a1R,c3,c1,c2
1164 STD c3,64(r_ptr) ; r[8] = c3;
1165 COPY %r0,c3
1166
1167 SQR_ADD_C2 a7L,a7R,a2L,a2R,c1,c2,c3
1168 SQR_ADD_C2 a6L,a6R,a3L,a3R,c1,c2,c3
1169 SQR_ADD_C2 a5L,a5R,a4L,a4R,c1,c2,c3
1170 STD c1,72(r_ptr) ; r[9] = c1;
1171 COPY %r0,c1
1172
1173 SQR_ADD_C a5L,a5R,c2,c3,c1
1174 SQR_ADD_C2 a6L,a6R,a4L,a4R,c2,c3,c1
1175 SQR_ADD_C2 a7L,a7R,a3L,a3R,c2,c3,c1
1176 STD c2,80(r_ptr) ; r[10] = c2;
1177 COPY %r0,c2
1178
1179 SQR_ADD_C2 a7L,a7R,a4L,a4R,c3,c1,c2
1180 SQR_ADD_C2 a6L,a6R,a5L,a5R,c3,c1,c2
1181 STD c3,88(r_ptr) ; r[11] = c3;
1182 COPY %r0,c3
1183
1184 SQR_ADD_C a6L,a6R,c1,c2,c3
1185 SQR_ADD_C2 a7L,a7R,a5L,a5R,c1,c2,c3
1186 STD c1,96(r_ptr) ; r[12] = c1;
1187 COPY %r0,c1
1188
1189 SQR_ADD_C2 a7L,a7R,a6L,a6R,c2,c3,c1
1190 STD c2,104(r_ptr) ; r[13] = c2;
1191 COPY %r0,c2
1192
1193 SQR_ADD_C a7L,a7R,c3,c1,c2
1194 STD c3, 112(r_ptr) ; r[14] = c3
1195 STD c1, 120(r_ptr) ; r[15] = c1
1196
1197 .EXIT
1198 LDD -104(%sp),%r6 ; restore r6
1199 LDD -112(%sp),%r5 ; restore r5
1200 LDD -120(%sp),%r4 ; restore r4
1201 BVE (%rp)
1202 LDD,MB -128(%sp),%r3
1203
1204 .PROCEND
1205
1206;-----------------------------------------------------------------------------
1207;
1208;void bn_sqr_comba4(BN_ULONG *r, BN_ULONG *a)
1209; arg0 = r_ptr
1210; arg1 = a_ptr
1211;
1212
1213bn_sqr_comba4
1214 .proc
1215 .callinfo FRAME=128,ENTRY_GR=%r3,ARGS_SAVED,ORDERING_AWARE
1216 .EXPORT bn_sqr_comba4,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
1217 .entry
1218 .align 64
1219 STD %r3,0(%sp) ; save r3
1220 STD %r4,8(%sp) ; save r4
1221 STD %r5,16(%sp) ; save r5
1222 STD %r6,24(%sp) ; save r6
1223
1224 ;
1225 ; Zero out carries
1226 ;
1227 COPY %r0,c1
1228 COPY %r0,c2
1229 COPY %r0,c3
1230
1231 LDO 128(%sp),%sp ; bump stack
1232 DEPDI,Z -1,32,33,high_mask ; Create Mask 0xffffffff80000000L
1233 DEPDI,Z 1,31,1,high_one ; Create Value 1 << 32
1234
1235 ;
1236 ; Load up all of the values we are going to use
1237 ;
1238 FLDD 0(a_ptr),a0
1239 FLDD 8(a_ptr),a1
1240 FLDD 16(a_ptr),a2
1241 FLDD 24(a_ptr),a3
1242 FLDD 32(a_ptr),a4
1243 FLDD 40(a_ptr),a5
1244 FLDD 48(a_ptr),a6
1245 FLDD 56(a_ptr),a7
1246
1247 SQR_ADD_C a0L,a0R,c1,c2,c3
1248
1249 STD c1,0(r_ptr) ; r[0] = c1;
1250 COPY %r0,c1
1251
1252 SQR_ADD_C2 a1L,a1R,a0L,a0R,c2,c3,c1
1253
1254 STD c2,8(r_ptr) ; r[1] = c2;
1255 COPY %r0,c2
1256
1257 SQR_ADD_C a1L,a1R,c3,c1,c2
1258 SQR_ADD_C2 a2L,a2R,a0L,a0R,c3,c1,c2
1259
1260 STD c3,16(r_ptr) ; r[2] = c3;
1261 COPY %r0,c3
1262
1263 SQR_ADD_C2 a3L,a3R,a0L,a0R,c1,c2,c3
1264 SQR_ADD_C2 a2L,a2R,a1L,a1R,c1,c2,c3
1265
1266 STD c1,24(r_ptr) ; r[3] = c1;
1267 COPY %r0,c1
1268
1269 SQR_ADD_C a2L,a2R,c2,c3,c1
1270 SQR_ADD_C2 a3L,a3R,a1L,a1R,c2,c3,c1
1271
1272 STD c2,32(r_ptr) ; r[4] = c2;
1273 COPY %r0,c2
1274
1275 SQR_ADD_C2 a3L,a3R,a2L,a2R,c3,c1,c2
1276 STD c3,40(r_ptr) ; r[5] = c3;
1277 COPY %r0,c3
1278
1279 SQR_ADD_C a3L,a3R,c1,c2,c3
1280 STD c1,48(r_ptr) ; r[6] = c1;
1281 STD c2,56(r_ptr) ; r[7] = c2;
1282
1283 .EXIT
1284 LDD -104(%sp),%r6 ; restore r6
1285 LDD -112(%sp),%r5 ; restore r5
1286 LDD -120(%sp),%r4 ; restore r4
1287 BVE (%rp)
1288 LDD,MB -128(%sp),%r3
1289
1290 .PROCEND
1291
1292
1293;---------------------------------------------------------------------------
1294
1295MUL_ADD_C .macro A0L,A0R,B0L,B0R,C1,C2,C3
1296 XMPYU A0L,B0R,ftemp1 ; m1 = bl*ht
1297 FSTD ftemp1,-16(%sp) ;
1298 XMPYU A0R,B0L,ftemp2 ; m = bh*lt
1299 FSTD ftemp2,-8(%sp) ;
1300 XMPYU A0R,B0R,ftemp3 ; lt = bl*lt
1301 FSTD ftemp3,-32(%sp)
1302 XMPYU A0L,B0L,ftemp4 ; ht = bh*ht
1303 FSTD ftemp4,-24(%sp) ;
1304
1305 LDD -8(%sp),m ; r21 = m
1306 LDD -16(%sp),m1 ; r19 = m1
1307 ADD,L m,m1,m ; m+m1
1308
1309 DEPD,Z m,31,32,temp3 ; (m+m1<<32)
1310 LDD -24(%sp),ht ; r24 = ht
1311
1312 CMPCLR,*>>= m,m1,%r0 ; if (m < m1)
1313 ADD,L ht,high_one,ht ; ht+=high_one
1314
1315 EXTRD,U m,31,32,temp1 ; m >> 32
1316 LDD -32(%sp),lt ; lt
1317 ADD,L ht,temp1,ht ; ht+= m>>32
1318 ADD lt,temp3,lt ; lt = lt+m1
1319 ADD,DC ht,%r0,ht ; ht++
1320
1321 ADD C1,lt,C1 ; c1=c1+lt
1322 ADD,DC ht,%r0,ht ; bump c3 if overflow,nullify otherwise
1323
1324 ADD C2,ht,C2 ; c2 = c2 + ht
1325 ADD,DC C3,%r0,C3 ; add in carry (c3++)
1326.endm
1327
1328
1329;
1330;void bn_mul_comba8(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
1331; arg0 = r_ptr
1332; arg1 = a_ptr
1333; arg2 = b_ptr
1334;
1335
1336bn_mul_comba8
1337 .proc
1338 .callinfo FRAME=128,ENTRY_GR=%r3,ARGS_SAVED,ORDERING_AWARE
1339 .EXPORT bn_mul_comba8,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
1340 .entry
1341 .align 64
1342
1343 STD %r3,0(%sp) ; save r3
1344 STD %r4,8(%sp) ; save r4
1345 STD %r5,16(%sp) ; save r5
1346 STD %r6,24(%sp) ; save r6
1347 FSTD %fr12,32(%sp) ; save r6
1348 FSTD %fr13,40(%sp) ; save r7
1349
1350 ;
1351 ; Zero out carries
1352 ;
1353 COPY %r0,c1
1354 COPY %r0,c2
1355 COPY %r0,c3
1356
1357 LDO 128(%sp),%sp ; bump stack
1358 DEPDI,Z 1,31,1,high_one ; Create Value 1 << 32
1359
1360 ;
1361 ; Load up all of the values we are going to use
1362 ;
1363 FLDD 0(a_ptr),a0
1364 FLDD 8(a_ptr),a1
1365 FLDD 16(a_ptr),a2
1366 FLDD 24(a_ptr),a3
1367 FLDD 32(a_ptr),a4
1368 FLDD 40(a_ptr),a5
1369 FLDD 48(a_ptr),a6
1370 FLDD 56(a_ptr),a7
1371
1372 FLDD 0(b_ptr),b0
1373 FLDD 8(b_ptr),b1
1374 FLDD 16(b_ptr),b2
1375 FLDD 24(b_ptr),b3
1376 FLDD 32(b_ptr),b4
1377 FLDD 40(b_ptr),b5
1378 FLDD 48(b_ptr),b6
1379 FLDD 56(b_ptr),b7
1380
1381 MUL_ADD_C a0L,a0R,b0L,b0R,c1,c2,c3
1382 STD c1,0(r_ptr)
1383 COPY %r0,c1
1384
1385 MUL_ADD_C a0L,a0R,b1L,b1R,c2,c3,c1
1386 MUL_ADD_C a1L,a1R,b0L,b0R,c2,c3,c1
1387 STD c2,8(r_ptr)
1388 COPY %r0,c2
1389
1390 MUL_ADD_C a2L,a2R,b0L,b0R,c3,c1,c2
1391 MUL_ADD_C a1L,a1R,b1L,b1R,c3,c1,c2
1392 MUL_ADD_C a0L,a0R,b2L,b2R,c3,c1,c2
1393 STD c3,16(r_ptr)
1394 COPY %r0,c3
1395
1396 MUL_ADD_C a0L,a0R,b3L,b3R,c1,c2,c3
1397 MUL_ADD_C a1L,a1R,b2L,b2R,c1,c2,c3
1398 MUL_ADD_C a2L,a2R,b1L,b1R,c1,c2,c3
1399 MUL_ADD_C a3L,a3R,b0L,b0R,c1,c2,c3
1400 STD c1,24(r_ptr)
1401 COPY %r0,c1
1402
1403 MUL_ADD_C a4L,a4R,b0L,b0R,c2,c3,c1
1404 MUL_ADD_C a3L,a3R,b1L,b1R,c2,c3,c1
1405 MUL_ADD_C a2L,a2R,b2L,b2R,c2,c3,c1
1406 MUL_ADD_C a1L,a1R,b3L,b3R,c2,c3,c1
1407 MUL_ADD_C a0L,a0R,b4L,b4R,c2,c3,c1
1408 STD c2,32(r_ptr)
1409 COPY %r0,c2
1410
1411 MUL_ADD_C a0L,a0R,b5L,b5R,c3,c1,c2
1412 MUL_ADD_C a1L,a1R,b4L,b4R,c3,c1,c2
1413 MUL_ADD_C a2L,a2R,b3L,b3R,c3,c1,c2
1414 MUL_ADD_C a3L,a3R,b2L,b2R,c3,c1,c2
1415 MUL_ADD_C a4L,a4R,b1L,b1R,c3,c1,c2
1416 MUL_ADD_C a5L,a5R,b0L,b0R,c3,c1,c2
1417 STD c3,40(r_ptr)
1418 COPY %r0,c3
1419
1420 MUL_ADD_C a6L,a6R,b0L,b0R,c1,c2,c3
1421 MUL_ADD_C a5L,a5R,b1L,b1R,c1,c2,c3
1422 MUL_ADD_C a4L,a4R,b2L,b2R,c1,c2,c3
1423 MUL_ADD_C a3L,a3R,b3L,b3R,c1,c2,c3
1424 MUL_ADD_C a2L,a2R,b4L,b4R,c1,c2,c3
1425 MUL_ADD_C a1L,a1R,b5L,b5R,c1,c2,c3
1426 MUL_ADD_C a0L,a0R,b6L,b6R,c1,c2,c3
1427 STD c1,48(r_ptr)
1428 COPY %r0,c1
1429
1430 MUL_ADD_C a0L,a0R,b7L,b7R,c2,c3,c1
1431 MUL_ADD_C a1L,a1R,b6L,b6R,c2,c3,c1
1432 MUL_ADD_C a2L,a2R,b5L,b5R,c2,c3,c1
1433 MUL_ADD_C a3L,a3R,b4L,b4R,c2,c3,c1
1434 MUL_ADD_C a4L,a4R,b3L,b3R,c2,c3,c1
1435 MUL_ADD_C a5L,a5R,b2L,b2R,c2,c3,c1
1436 MUL_ADD_C a6L,a6R,b1L,b1R,c2,c3,c1
1437 MUL_ADD_C a7L,a7R,b0L,b0R,c2,c3,c1
1438 STD c2,56(r_ptr)
1439 COPY %r0,c2
1440
1441 MUL_ADD_C a7L,a7R,b1L,b1R,c3,c1,c2
1442 MUL_ADD_C a6L,a6R,b2L,b2R,c3,c1,c2
1443 MUL_ADD_C a5L,a5R,b3L,b3R,c3,c1,c2
1444 MUL_ADD_C a4L,a4R,b4L,b4R,c3,c1,c2
1445 MUL_ADD_C a3L,a3R,b5L,b5R,c3,c1,c2
1446 MUL_ADD_C a2L,a2R,b6L,b6R,c3,c1,c2
1447 MUL_ADD_C a1L,a1R,b7L,b7R,c3,c1,c2
1448 STD c3,64(r_ptr)
1449 COPY %r0,c3
1450
1451 MUL_ADD_C a2L,a2R,b7L,b7R,c1,c2,c3
1452 MUL_ADD_C a3L,a3R,b6L,b6R,c1,c2,c3
1453 MUL_ADD_C a4L,a4R,b5L,b5R,c1,c2,c3
1454 MUL_ADD_C a5L,a5R,b4L,b4R,c1,c2,c3
1455 MUL_ADD_C a6L,a6R,b3L,b3R,c1,c2,c3
1456 MUL_ADD_C a7L,a7R,b2L,b2R,c1,c2,c3
1457 STD c1,72(r_ptr)
1458 COPY %r0,c1
1459
1460 MUL_ADD_C a7L,a7R,b3L,b3R,c2,c3,c1
1461 MUL_ADD_C a6L,a6R,b4L,b4R,c2,c3,c1
1462 MUL_ADD_C a5L,a5R,b5L,b5R,c2,c3,c1
1463 MUL_ADD_C a4L,a4R,b6L,b6R,c2,c3,c1
1464 MUL_ADD_C a3L,a3R,b7L,b7R,c2,c3,c1
1465 STD c2,80(r_ptr)
1466 COPY %r0,c2
1467
1468 MUL_ADD_C a4L,a4R,b7L,b7R,c3,c1,c2
1469 MUL_ADD_C a5L,a5R,b6L,b6R,c3,c1,c2
1470 MUL_ADD_C a6L,a6R,b5L,b5R,c3,c1,c2
1471 MUL_ADD_C a7L,a7R,b4L,b4R,c3,c1,c2
1472 STD c3,88(r_ptr)
1473 COPY %r0,c3
1474
1475 MUL_ADD_C a7L,a7R,b5L,b5R,c1,c2,c3
1476 MUL_ADD_C a6L,a6R,b6L,b6R,c1,c2,c3
1477 MUL_ADD_C a5L,a5R,b7L,b7R,c1,c2,c3
1478 STD c1,96(r_ptr)
1479 COPY %r0,c1
1480
1481 MUL_ADD_C a6L,a6R,b7L,b7R,c2,c3,c1
1482 MUL_ADD_C a7L,a7R,b6L,b6R,c2,c3,c1
1483 STD c2,104(r_ptr)
1484 COPY %r0,c2
1485
1486 MUL_ADD_C a7L,a7R,b7L,b7R,c3,c1,c2
1487 STD c3,112(r_ptr)
1488 STD c1,120(r_ptr)
1489
1490 .EXIT
1491 FLDD -88(%sp),%fr13
1492 FLDD -96(%sp),%fr12
1493 LDD -104(%sp),%r6 ; restore r6
1494 LDD -112(%sp),%r5 ; restore r5
1495 LDD -120(%sp),%r4 ; restore r4
1496 BVE (%rp)
1497 LDD,MB -128(%sp),%r3
1498
1499 .PROCEND
1500
1501;-----------------------------------------------------------------------------
1502;
1503;void bn_mul_comba4(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
1504; arg0 = r_ptr
1505; arg1 = a_ptr
1506; arg2 = b_ptr
1507;
1508
1509bn_mul_comba4
1510 .proc
1511 .callinfo FRAME=128,ENTRY_GR=%r3,ARGS_SAVED,ORDERING_AWARE
1512 .EXPORT bn_mul_comba4,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
1513 .entry
1514 .align 64
1515
1516 STD %r3,0(%sp) ; save r3
1517 STD %r4,8(%sp) ; save r4
1518 STD %r5,16(%sp) ; save r5
1519 STD %r6,24(%sp) ; save r6
1520 FSTD %fr12,32(%sp) ; save r6
1521 FSTD %fr13,40(%sp) ; save r7
1522
1523 ;
1524 ; Zero out carries
1525 ;
1526 COPY %r0,c1
1527 COPY %r0,c2
1528 COPY %r0,c3
1529
1530 LDO 128(%sp),%sp ; bump stack
1531 DEPDI,Z 1,31,1,high_one ; Create Value 1 << 32
1532
1533 ;
1534 ; Load up all of the values we are going to use
1535 ;
1536 FLDD 0(a_ptr),a0
1537 FLDD 8(a_ptr),a1
1538 FLDD 16(a_ptr),a2
1539 FLDD 24(a_ptr),a3
1540
1541 FLDD 0(b_ptr),b0
1542 FLDD 8(b_ptr),b1
1543 FLDD 16(b_ptr),b2
1544 FLDD 24(b_ptr),b3
1545
1546 MUL_ADD_C a0L,a0R,b0L,b0R,c1,c2,c3
1547 STD c1,0(r_ptr)
1548 COPY %r0,c1
1549
1550 MUL_ADD_C a0L,a0R,b1L,b1R,c2,c3,c1
1551 MUL_ADD_C a1L,a1R,b0L,b0R,c2,c3,c1
1552 STD c2,8(r_ptr)
1553 COPY %r0,c2
1554
1555 MUL_ADD_C a2L,a2R,b0L,b0R,c3,c1,c2
1556 MUL_ADD_C a1L,a1R,b1L,b1R,c3,c1,c2
1557 MUL_ADD_C a0L,a0R,b2L,b2R,c3,c1,c2
1558 STD c3,16(r_ptr)
1559 COPY %r0,c3
1560
1561 MUL_ADD_C a0L,a0R,b3L,b3R,c1,c2,c3
1562 MUL_ADD_C a1L,a1R,b2L,b2R,c1,c2,c3
1563 MUL_ADD_C a2L,a2R,b1L,b1R,c1,c2,c3
1564 MUL_ADD_C a3L,a3R,b0L,b0R,c1,c2,c3
1565 STD c1,24(r_ptr)
1566 COPY %r0,c1
1567
1568 MUL_ADD_C a3L,a3R,b1L,b1R,c2,c3,c1
1569 MUL_ADD_C a2L,a2R,b2L,b2R,c2,c3,c1
1570 MUL_ADD_C a1L,a1R,b3L,b3R,c2,c3,c1
1571 STD c2,32(r_ptr)
1572 COPY %r0,c2
1573
1574 MUL_ADD_C a2L,a2R,b3L,b3R,c3,c1,c2
1575 MUL_ADD_C a3L,a3R,b2L,b2R,c3,c1,c2
1576 STD c3,40(r_ptr)
1577 COPY %r0,c3
1578
1579 MUL_ADD_C a3L,a3R,b3L,b3R,c1,c2,c3
1580 STD c1,48(r_ptr)
1581 STD c2,56(r_ptr)
1582
1583 .EXIT
1584 FLDD -88(%sp),%fr13
1585 FLDD -96(%sp),%fr12
1586 LDD -104(%sp),%r6 ; restore r6
1587 LDD -112(%sp),%r5 ; restore r5
1588 LDD -120(%sp),%r4 ; restore r4
1589 BVE (%rp)
1590 LDD,MB -128(%sp),%r3
1591
1592 .PROCEND
1593
1594
1595 .SPACE $TEXT$
1596 .SUBSPA $CODE$
1597 .SPACE $PRIVATE$,SORT=16
1598 .IMPORT $global$,DATA
1599 .SPACE $TEXT$
1600 .SUBSPA $CODE$
1601 .SUBSPA $LIT$,ACCESS=0x2c
1602C$4
1603 .ALIGN 8
1604 .STRINGZ "Division would overflow (%d)\n"
1605 .END
diff --git a/src/lib/libcrypto/bn/asm/ppc-mont.pl b/src/lib/libcrypto/bn/asm/ppc-mont.pl
deleted file mode 100644
index 7849eae959..0000000000
--- a/src/lib/libcrypto/bn/asm/ppc-mont.pl
+++ /dev/null
@@ -1,323 +0,0 @@
1#!/usr/bin/env perl
2
3# ====================================================================
4# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
5# project. The module is, however, dual licensed under OpenSSL and
6# CRYPTOGAMS licenses depending on where you obtain it. For further
7# details see http://www.openssl.org/~appro/cryptogams/.
8# ====================================================================
9
10# April 2006
11
12# "Teaser" Montgomery multiplication module for PowerPC. It's possible
13# to gain a bit more by modulo-scheduling outer loop, then dedicated
14# squaring procedure should give further 20% and code can be adapted
15# for 32-bit application running on 64-bit CPU. As for the latter.
16# It won't be able to achieve "native" 64-bit performance, because in
17# 32-bit application context every addc instruction will have to be
18# expanded as addc, twice right shift by 32 and finally adde, etc.
19# So far RSA *sign* performance improvement over pre-bn_mul_mont asm
20# for 64-bit application running on PPC970/G5 is:
21#
22# 512-bit +65%
23# 1024-bit +35%
24# 2048-bit +18%
25# 4096-bit +4%
26
27$flavour = shift;
28
29if ($flavour =~ /32/) {
30 $BITS= 32;
31 $BNSZ= $BITS/8;
32 $SIZE_T=4;
33 $RZONE= 224;
34 $FRAME= $SIZE_T*16;
35
36 $LD= "lwz"; # load
37 $LDU= "lwzu"; # load and update
38 $LDX= "lwzx"; # load indexed
39 $ST= "stw"; # store
40 $STU= "stwu"; # store and update
41 $STX= "stwx"; # store indexed
42 $STUX= "stwux"; # store indexed and update
43 $UMULL= "mullw"; # unsigned multiply low
44 $UMULH= "mulhwu"; # unsigned multiply high
45 $UCMP= "cmplw"; # unsigned compare
46 $SHRI= "srwi"; # unsigned shift right by immediate
47 $PUSH= $ST;
48 $POP= $LD;
49} elsif ($flavour =~ /64/) {
50 $BITS= 64;
51 $BNSZ= $BITS/8;
52 $SIZE_T=8;
53 $RZONE= 288;
54 $FRAME= $SIZE_T*16;
55
56 # same as above, but 64-bit mnemonics...
57 $LD= "ld"; # load
58 $LDU= "ldu"; # load and update
59 $LDX= "ldx"; # load indexed
60 $ST= "std"; # store
61 $STU= "stdu"; # store and update
62 $STX= "stdx"; # store indexed
63 $STUX= "stdux"; # store indexed and update
64 $UMULL= "mulld"; # unsigned multiply low
65 $UMULH= "mulhdu"; # unsigned multiply high
66 $UCMP= "cmpld"; # unsigned compare
67 $SHRI= "srdi"; # unsigned shift right by immediate
68 $PUSH= $ST;
69 $POP= $LD;
70} else { die "nonsense $flavour"; }
71
72$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
73( $xlate="${dir}ppc-xlate.pl" and -f $xlate ) or
74( $xlate="${dir}../../perlasm/ppc-xlate.pl" and -f $xlate) or
75die "can't locate ppc-xlate.pl";
76
77open STDOUT,"| $^X $xlate $flavour ".shift || die "can't call $xlate: $!";
78
79$sp="r1";
80$toc="r2";
81$rp="r3"; $ovf="r3";
82$ap="r4";
83$bp="r5";
84$np="r6";
85$n0="r7";
86$num="r8";
87$rp="r9"; # $rp is reassigned
88$aj="r10";
89$nj="r11";
90$tj="r12";
91# non-volatile registers
92$i="r14";
93$j="r15";
94$tp="r16";
95$m0="r17";
96$m1="r18";
97$lo0="r19";
98$hi0="r20";
99$lo1="r21";
100$hi1="r22";
101$alo="r23";
102$ahi="r24";
103$nlo="r25";
104#
105$nhi="r0";
106
107$code=<<___;
108.machine "any"
109.text
110
111.globl .bn_mul_mont
112.align 4
113.bn_mul_mont:
114 cmpwi $num,4
115 mr $rp,r3 ; $rp is reassigned
116 li r3,0
117 bltlr
118
119 slwi $num,$num,`log($BNSZ)/log(2)`
120 li $tj,-4096
121 addi $ovf,$num,`$FRAME+$RZONE`
122 subf $ovf,$ovf,$sp ; $sp-$ovf
123 and $ovf,$ovf,$tj ; minimize TLB usage
124 subf $ovf,$sp,$ovf ; $ovf-$sp
125 srwi $num,$num,`log($BNSZ)/log(2)`
126 $STUX $sp,$sp,$ovf
127
128 $PUSH r14,`4*$SIZE_T`($sp)
129 $PUSH r15,`5*$SIZE_T`($sp)
130 $PUSH r16,`6*$SIZE_T`($sp)
131 $PUSH r17,`7*$SIZE_T`($sp)
132 $PUSH r18,`8*$SIZE_T`($sp)
133 $PUSH r19,`9*$SIZE_T`($sp)
134 $PUSH r20,`10*$SIZE_T`($sp)
135 $PUSH r21,`11*$SIZE_T`($sp)
136 $PUSH r22,`12*$SIZE_T`($sp)
137 $PUSH r23,`13*$SIZE_T`($sp)
138 $PUSH r24,`14*$SIZE_T`($sp)
139 $PUSH r25,`15*$SIZE_T`($sp)
140
141 $LD $n0,0($n0) ; pull n0[0] value
142 addi $num,$num,-2 ; adjust $num for counter register
143
144 $LD $m0,0($bp) ; m0=bp[0]
145 $LD $aj,0($ap) ; ap[0]
146 addi $tp,$sp,$FRAME
147 $UMULL $lo0,$aj,$m0 ; ap[0]*bp[0]
148 $UMULH $hi0,$aj,$m0
149
150 $LD $aj,$BNSZ($ap) ; ap[1]
151 $LD $nj,0($np) ; np[0]
152
153 $UMULL $m1,$lo0,$n0 ; "tp[0]"*n0
154
155 $UMULL $alo,$aj,$m0 ; ap[1]*bp[0]
156 $UMULH $ahi,$aj,$m0
157
158 $UMULL $lo1,$nj,$m1 ; np[0]*m1
159 $UMULH $hi1,$nj,$m1
160 $LD $nj,$BNSZ($np) ; np[1]
161 addc $lo1,$lo1,$lo0
162 addze $hi1,$hi1
163
164 $UMULL $nlo,$nj,$m1 ; np[1]*m1
165 $UMULH $nhi,$nj,$m1
166
167 mtctr $num
168 li $j,`2*$BNSZ`
169.align 4
170L1st:
171 $LDX $aj,$ap,$j ; ap[j]
172 addc $lo0,$alo,$hi0
173 $LDX $nj,$np,$j ; np[j]
174 addze $hi0,$ahi
175 $UMULL $alo,$aj,$m0 ; ap[j]*bp[0]
176 addc $lo1,$nlo,$hi1
177 $UMULH $ahi,$aj,$m0
178 addze $hi1,$nhi
179 $UMULL $nlo,$nj,$m1 ; np[j]*m1
180 addc $lo1,$lo1,$lo0 ; np[j]*m1+ap[j]*bp[0]
181 $UMULH $nhi,$nj,$m1
182 addze $hi1,$hi1
183 $ST $lo1,0($tp) ; tp[j-1]
184
185 addi $j,$j,$BNSZ ; j++
186 addi $tp,$tp,$BNSZ ; tp++
187 bdnz- L1st
188;L1st
189 addc $lo0,$alo,$hi0
190 addze $hi0,$ahi
191
192 addc $lo1,$nlo,$hi1
193 addze $hi1,$nhi
194 addc $lo1,$lo1,$lo0 ; np[j]*m1+ap[j]*bp[0]
195 addze $hi1,$hi1
196 $ST $lo1,0($tp) ; tp[j-1]
197
198 li $ovf,0
199 addc $hi1,$hi1,$hi0
200 addze $ovf,$ovf ; upmost overflow bit
201 $ST $hi1,$BNSZ($tp)
202
203 li $i,$BNSZ
204.align 4
205Louter:
206 $LDX $m0,$bp,$i ; m0=bp[i]
207 $LD $aj,0($ap) ; ap[0]
208 addi $tp,$sp,$FRAME
209 $LD $tj,$FRAME($sp) ; tp[0]
210 $UMULL $lo0,$aj,$m0 ; ap[0]*bp[i]
211 $UMULH $hi0,$aj,$m0
212 $LD $aj,$BNSZ($ap) ; ap[1]
213 $LD $nj,0($np) ; np[0]
214 addc $lo0,$lo0,$tj ; ap[0]*bp[i]+tp[0]
215 $UMULL $alo,$aj,$m0 ; ap[j]*bp[i]
216 addze $hi0,$hi0
217 $UMULL $m1,$lo0,$n0 ; tp[0]*n0
218 $UMULH $ahi,$aj,$m0
219 $UMULL $lo1,$nj,$m1 ; np[0]*m1
220 $UMULH $hi1,$nj,$m1
221 $LD $nj,$BNSZ($np) ; np[1]
222 addc $lo1,$lo1,$lo0
223 $UMULL $nlo,$nj,$m1 ; np[1]*m1
224 addze $hi1,$hi1
225 $UMULH $nhi,$nj,$m1
226
227 mtctr $num
228 li $j,`2*$BNSZ`
229.align 4
230Linner:
231 $LDX $aj,$ap,$j ; ap[j]
232 addc $lo0,$alo,$hi0
233 $LD $tj,$BNSZ($tp) ; tp[j]
234 addze $hi0,$ahi
235 $LDX $nj,$np,$j ; np[j]
236 addc $lo1,$nlo,$hi1
237 $UMULL $alo,$aj,$m0 ; ap[j]*bp[i]
238 addze $hi1,$nhi
239 $UMULH $ahi,$aj,$m0
240 addc $lo0,$lo0,$tj ; ap[j]*bp[i]+tp[j]
241 $UMULL $nlo,$nj,$m1 ; np[j]*m1
242 addze $hi0,$hi0
243 $UMULH $nhi,$nj,$m1
244 addc $lo1,$lo1,$lo0 ; np[j]*m1+ap[j]*bp[i]+tp[j]
245 addi $j,$j,$BNSZ ; j++
246 addze $hi1,$hi1
247 $ST $lo1,0($tp) ; tp[j-1]
248 addi $tp,$tp,$BNSZ ; tp++
249 bdnz- Linner
250;Linner
251 $LD $tj,$BNSZ($tp) ; tp[j]
252 addc $lo0,$alo,$hi0
253 addze $hi0,$ahi
254 addc $lo0,$lo0,$tj ; ap[j]*bp[i]+tp[j]
255 addze $hi0,$hi0
256
257 addc $lo1,$nlo,$hi1
258 addze $hi1,$nhi
259 addc $lo1,$lo1,$lo0 ; np[j]*m1+ap[j]*bp[i]+tp[j]
260 addze $hi1,$hi1
261 $ST $lo1,0($tp) ; tp[j-1]
262
263 addic $ovf,$ovf,-1 ; move upmost overflow to XER[CA]
264 li $ovf,0
265 adde $hi1,$hi1,$hi0
266 addze $ovf,$ovf
267 $ST $hi1,$BNSZ($tp)
268;
269 slwi $tj,$num,`log($BNSZ)/log(2)`
270 $UCMP $i,$tj
271 addi $i,$i,$BNSZ
272 ble- Louter
273
274 addi $num,$num,2 ; restore $num
275 subfc $j,$j,$j ; j=0 and "clear" XER[CA]
276 addi $tp,$sp,$FRAME
277 mtctr $num
278
279.align 4
280Lsub: $LDX $tj,$tp,$j
281 $LDX $nj,$np,$j
282 subfe $aj,$nj,$tj ; tp[j]-np[j]
283 $STX $aj,$rp,$j
284 addi $j,$j,$BNSZ
285 bdnz- Lsub
286
287 li $j,0
288 mtctr $num
289 subfe $ovf,$j,$ovf ; handle upmost overflow bit
290 and $ap,$tp,$ovf
291 andc $np,$rp,$ovf
292 or $ap,$ap,$np ; ap=borrow?tp:rp
293
294.align 4
295Lcopy: ; copy or in-place refresh
296 $LDX $tj,$ap,$j
297 $STX $tj,$rp,$j
298 $STX $j,$tp,$j ; zap at once
299 addi $j,$j,$BNSZ
300 bdnz- Lcopy
301
302 $POP r14,`4*$SIZE_T`($sp)
303 $POP r15,`5*$SIZE_T`($sp)
304 $POP r16,`6*$SIZE_T`($sp)
305 $POP r17,`7*$SIZE_T`($sp)
306 $POP r18,`8*$SIZE_T`($sp)
307 $POP r19,`9*$SIZE_T`($sp)
308 $POP r20,`10*$SIZE_T`($sp)
309 $POP r21,`11*$SIZE_T`($sp)
310 $POP r22,`12*$SIZE_T`($sp)
311 $POP r23,`13*$SIZE_T`($sp)
312 $POP r24,`14*$SIZE_T`($sp)
313 $POP r25,`15*$SIZE_T`($sp)
314 $POP $sp,0($sp)
315 li r3,1
316 blr
317 .long 0
318.asciz "Montgomery Multiplication for PPC, CRYPTOGAMS by <appro\@fy.chalmers.se>"
319___
320
321$code =~ s/\`([^\`]*)\`/eval $1/gem;
322print $code;
323close STDOUT;
diff --git a/src/lib/libcrypto/bn/asm/ppc.pl b/src/lib/libcrypto/bn/asm/ppc.pl
deleted file mode 100644
index 08e0053473..0000000000
--- a/src/lib/libcrypto/bn/asm/ppc.pl
+++ /dev/null
@@ -1,2078 +0,0 @@
1#!/usr/bin/env perl
2#
3# Implemented as a Perl wrapper as we want to support several different
4# architectures with single file. We pick up the target based on the
5# file name we are asked to generate.
6#
7# It should be noted though that this perl code is nothing like
8# <openssl>/crypto/perlasm/x86*. In this case perl is used pretty much
9# as pre-processor to cover for platform differences in name decoration,
10# linker tables, 32-/64-bit instruction sets...
11#
12# As you might know there're several PowerPC ABI in use. Most notably
13# Linux and AIX use different 32-bit ABIs. Good news are that these ABIs
14# are similar enough to implement leaf(!) functions, which would be ABI
15# neutral. And that's what you find here: ABI neutral leaf functions.
16# In case you wonder what that is...
17#
18# AIX performance
19#
20# MEASUREMENTS WITH cc ON a 200 MhZ PowerPC 604e.
21#
22# The following is the performance of 32-bit compiler
23# generated code:
24#
25# OpenSSL 0.9.6c 21 dec 2001
26# built on: Tue Jun 11 11:06:51 EDT 2002
27# options:bn(64,32) ...
28#compiler: cc -DTHREADS -DAIX -DB_ENDIAN -DBN_LLONG -O3
29# sign verify sign/s verify/s
30#rsa 512 bits 0.0098s 0.0009s 102.0 1170.6
31#rsa 1024 bits 0.0507s 0.0026s 19.7 387.5
32#rsa 2048 bits 0.3036s 0.0085s 3.3 117.1
33#rsa 4096 bits 2.0040s 0.0299s 0.5 33.4
34#dsa 512 bits 0.0087s 0.0106s 114.3 94.5
35#dsa 1024 bits 0.0256s 0.0313s 39.0 32.0
36#
37# Same bechmark with this assembler code:
38#
39#rsa 512 bits 0.0056s 0.0005s 178.6 2049.2
40#rsa 1024 bits 0.0283s 0.0015s 35.3 674.1
41#rsa 2048 bits 0.1744s 0.0050s 5.7 201.2
42#rsa 4096 bits 1.1644s 0.0179s 0.9 55.7
43#dsa 512 bits 0.0052s 0.0062s 191.6 162.0
44#dsa 1024 bits 0.0149s 0.0180s 67.0 55.5
45#
46# Number of operations increases by at almost 75%
47#
48# Here are performance numbers for 64-bit compiler
49# generated code:
50#
51# OpenSSL 0.9.6g [engine] 9 Aug 2002
52# built on: Fri Apr 18 16:59:20 EDT 2003
53# options:bn(64,64) ...
54# compiler: cc -DTHREADS -D_REENTRANT -q64 -DB_ENDIAN -O3
55# sign verify sign/s verify/s
56#rsa 512 bits 0.0028s 0.0003s 357.1 3844.4
57#rsa 1024 bits 0.0148s 0.0008s 67.5 1239.7
58#rsa 2048 bits 0.0963s 0.0028s 10.4 353.0
59#rsa 4096 bits 0.6538s 0.0102s 1.5 98.1
60#dsa 512 bits 0.0026s 0.0032s 382.5 313.7
61#dsa 1024 bits 0.0081s 0.0099s 122.8 100.6
62#
63# Same benchmark with this assembler code:
64#
65#rsa 512 bits 0.0020s 0.0002s 510.4 6273.7
66#rsa 1024 bits 0.0088s 0.0005s 114.1 2128.3
67#rsa 2048 bits 0.0540s 0.0016s 18.5 622.5
68#rsa 4096 bits 0.3700s 0.0058s 2.7 171.0
69#dsa 512 bits 0.0016s 0.0020s 610.7 507.1
70#dsa 1024 bits 0.0047s 0.0058s 212.5 173.2
71#
72# Again, performance increases by at about 75%
73#
74# Mac OS X, Apple G5 1.8GHz (Note this is 32 bit code)
75# OpenSSL 0.9.7c 30 Sep 2003
76#
77# Original code.
78#
79#rsa 512 bits 0.0011s 0.0001s 906.1 11012.5
80#rsa 1024 bits 0.0060s 0.0003s 166.6 3363.1
81#rsa 2048 bits 0.0370s 0.0010s 27.1 982.4
82#rsa 4096 bits 0.2426s 0.0036s 4.1 280.4
83#dsa 512 bits 0.0010s 0.0012s 1038.1 841.5
84#dsa 1024 bits 0.0030s 0.0037s 329.6 269.7
85#dsa 2048 bits 0.0101s 0.0127s 98.9 78.6
86#
87# Same benchmark with this assembler code:
88#
89#rsa 512 bits 0.0007s 0.0001s 1416.2 16645.9
90#rsa 1024 bits 0.0036s 0.0002s 274.4 5380.6
91#rsa 2048 bits 0.0222s 0.0006s 45.1 1589.5
92#rsa 4096 bits 0.1469s 0.0022s 6.8 449.6
93#dsa 512 bits 0.0006s 0.0007s 1664.2 1376.2
94#dsa 1024 bits 0.0018s 0.0023s 545.0 442.2
95#dsa 2048 bits 0.0061s 0.0075s 163.5 132.8
96#
97# Performance increase of ~60%
98#
99# If you have comments or suggestions to improve code send
100# me a note at schari@us.ibm.com
101#
102
103$opf = shift;
104
105if ($opf =~ /32\.s/) {
106 $BITS= 32;
107 $BNSZ= $BITS/8;
108 $ISA= "\"ppc\"";
109
110 $LD= "lwz"; # load
111 $LDU= "lwzu"; # load and update
112 $ST= "stw"; # store
113 $STU= "stwu"; # store and update
114 $UMULL= "mullw"; # unsigned multiply low
115 $UMULH= "mulhwu"; # unsigned multiply high
116 $UDIV= "divwu"; # unsigned divide
117 $UCMPI= "cmplwi"; # unsigned compare with immediate
118 $UCMP= "cmplw"; # unsigned compare
119 $CNTLZ= "cntlzw"; # count leading zeros
120 $SHL= "slw"; # shift left
121 $SHR= "srw"; # unsigned shift right
122 $SHRI= "srwi"; # unsigned shift right by immediate
123 $SHLI= "slwi"; # shift left by immediate
124 $CLRU= "clrlwi"; # clear upper bits
125 $INSR= "insrwi"; # insert right
126 $ROTL= "rotlwi"; # rotate left by immediate
127 $TR= "tw"; # conditional trap
128} elsif ($opf =~ /64\.s/) {
129 $BITS= 64;
130 $BNSZ= $BITS/8;
131 $ISA= "\"ppc64\"";
132
133 # same as above, but 64-bit mnemonics...
134 $LD= "ld"; # load
135 $LDU= "ldu"; # load and update
136 $ST= "std"; # store
137 $STU= "stdu"; # store and update
138 $UMULL= "mulld"; # unsigned multiply low
139 $UMULH= "mulhdu"; # unsigned multiply high
140 $UDIV= "divdu"; # unsigned divide
141 $UCMPI= "cmpldi"; # unsigned compare with immediate
142 $UCMP= "cmpld"; # unsigned compare
143 $CNTLZ= "cntlzd"; # count leading zeros
144 $SHL= "sld"; # shift left
145 $SHR= "srd"; # unsigned shift right
146 $SHRI= "srdi"; # unsigned shift right by immediate
147 $SHLI= "sldi"; # shift left by immediate
148 $CLRU= "clrldi"; # clear upper bits
149 $INSR= "insrdi"; # insert right
150 $ROTL= "rotldi"; # rotate left by immediate
151 $TR= "td"; # conditional trap
152} else { die "nonsense $opf"; }
153
154( defined shift || open STDOUT,">$opf" ) || die "can't open $opf: $!";
155
156# function entry points from the AIX code
157#
158# There are other, more elegant, ways to handle this. We (IBM) chose
159# this approach as it plays well with scripts we run to 'namespace'
160# OpenSSL .i.e. we add a prefix to all the public symbols so we can
161# co-exist in the same process with other implementations of OpenSSL.
162# 'cleverer' ways of doing these substitutions tend to hide data we
163# need to be obvious.
164#
165my @items = ("bn_sqr_comba4",
166 "bn_sqr_comba8",
167 "bn_mul_comba4",
168 "bn_mul_comba8",
169 "bn_sub_words",
170 "bn_add_words",
171 "bn_div_words",
172 "bn_sqr_words",
173 "bn_mul_words",
174 "bn_mul_add_words");
175
176if ($opf =~ /linux/) { do_linux(); }
177elsif ($opf =~ /aix/) { do_aix(); }
178elsif ($opf =~ /osx/) { do_osx(); }
179else { do_bsd(); }
180
181sub do_linux {
182 $d=&data();
183
184 if ($BITS==64) {
185 foreach $t (@items) {
186 $d =~ s/\.$t:/\
187\t.section\t".opd","aw"\
188\t.align\t3\
189\t.globl\t$t\
190$t:\
191\t.quad\t.$t,.TOC.\@tocbase,0\
192\t.size\t$t,24\
193\t.previous\n\
194\t.type\t.$t,\@function\
195\t.globl\t.$t\
196.$t:/g;
197 }
198 }
199 else {
200 foreach $t (@items) {
201 $d=~s/\.$t/$t/g;
202 }
203 }
204 # hide internal labels to avoid pollution of name table...
205 $d=~s/Lppcasm_/.Lppcasm_/gm;
206 print $d;
207}
208
209sub do_aix {
210 # AIX assembler is smart enough to please the linker without
211 # making us do something special...
212 print &data();
213}
214
215# MacOSX 32 bit
216sub do_osx {
217 $d=&data();
218 # Change the bn symbol prefix from '.' to '_'
219 foreach $t (@items) {
220 $d=~s/\.$t/_$t/g;
221 }
222 # Change .machine to something OS X asm will accept
223 $d=~s/\.machine.*/.text/g;
224 $d=~s/\#/;/g; # change comment from '#' to ';'
225 print $d;
226}
227
228# BSD (Untested)
229sub do_bsd {
230 $d=&data();
231 foreach $t (@items) {
232 $d=~s/\.$t/_$t/g;
233 }
234 print $d;
235}
236
237sub data {
238 local($data)=<<EOF;
239#--------------------------------------------------------------------
240#
241#
242#
243#
244# File: ppc32.s
245#
246# Created by: Suresh Chari
247# IBM Thomas J. Watson Research Library
248# Hawthorne, NY
249#
250#
251# Description: Optimized assembly routines for OpenSSL crypto
252# on the 32 bitPowerPC platform.
253#
254#
255# Version History
256#
257# 2. Fixed bn_add,bn_sub and bn_div_words, added comments,
258# cleaned up code. Also made a single version which can
259# be used for both the AIX and Linux compilers. See NOTE
260# below.
261# 12/05/03 Suresh Chari
262# (with lots of help from) Andy Polyakov
263##
264# 1. Initial version 10/20/02 Suresh Chari
265#
266#
267# The following file works for the xlc,cc
268# and gcc compilers.
269#
270# NOTE: To get the file to link correctly with the gcc compiler
271# you have to change the names of the routines and remove
272# the first .(dot) character. This should automatically
273# be done in the build process.
274#
275# Hand optimized assembly code for the following routines
276#
277# bn_sqr_comba4
278# bn_sqr_comba8
279# bn_mul_comba4
280# bn_mul_comba8
281# bn_sub_words
282# bn_add_words
283# bn_div_words
284# bn_sqr_words
285# bn_mul_words
286# bn_mul_add_words
287#
288# NOTE: It is possible to optimize this code more for
289# specific PowerPC or Power architectures. On the Northstar
290# architecture the optimizations in this file do
291# NOT provide much improvement.
292#
293# If you have comments or suggestions to improve code send
294# me a note at schari\@us.ibm.com
295#
296#--------------------------------------------------------------------------
297#
298# Defines to be used in the assembly code.
299#
300.set r0,0 # we use it as storage for value of 0
301.set SP,1 # preserved
302.set RTOC,2 # preserved
303.set r3,3 # 1st argument/return value
304.set r4,4 # 2nd argument/volatile register
305.set r5,5 # 3rd argument/volatile register
306.set r6,6 # ...
307.set r7,7
308.set r8,8
309.set r9,9
310.set r10,10
311.set r11,11
312.set r12,12
313.set r13,13 # not used, nor any other "below" it...
314
315.set BO_IF_NOT,4
316.set BO_IF,12
317.set BO_dCTR_NZERO,16
318.set BO_dCTR_ZERO,18
319.set BO_ALWAYS,20
320.set CR0_LT,0;
321.set CR0_GT,1;
322.set CR0_EQ,2
323.set CR1_FX,4;
324.set CR1_FEX,5;
325.set CR1_VX,6
326.set LR,8
327
328# Declare function names to be global
329# NOTE: For gcc these names MUST be changed to remove
330# the first . i.e. for example change ".bn_sqr_comba4"
331# to "bn_sqr_comba4". This should be automatically done
332# in the build.
333
334 .globl .bn_sqr_comba4
335 .globl .bn_sqr_comba8
336 .globl .bn_mul_comba4
337 .globl .bn_mul_comba8
338 .globl .bn_sub_words
339 .globl .bn_add_words
340 .globl .bn_div_words
341 .globl .bn_sqr_words
342 .globl .bn_mul_words
343 .globl .bn_mul_add_words
344
345# .text section
346
347 .machine $ISA
348
349#
350# NOTE: The following label name should be changed to
351# "bn_sqr_comba4" i.e. remove the first dot
352# for the gcc compiler. This should be automatically
353# done in the build
354#
355
356.align 4
357.bn_sqr_comba4:
358#
359# Optimized version of bn_sqr_comba4.
360#
361# void bn_sqr_comba4(BN_ULONG *r, BN_ULONG *a)
362# r3 contains r
363# r4 contains a
364#
365# Freely use registers r5,r6,r7,r8,r9,r10,r11 as follows:
366#
367# r5,r6 are the two BN_ULONGs being multiplied.
368# r7,r8 are the results of the 32x32 giving 64 bit multiply.
369# r9,r10, r11 are the equivalents of c1,c2, c3.
370# Here's the assembly
371#
372#
373 xor r0,r0,r0 # set r0 = 0. Used in the addze
374 # instructions below
375
376 #sqr_add_c(a,0,c1,c2,c3)
377 $LD r5,`0*$BNSZ`(r4)
378 $UMULL r9,r5,r5
379 $UMULH r10,r5,r5 #in first iteration. No need
380 #to add since c1=c2=c3=0.
381 # Note c3(r11) is NOT set to 0
382 # but will be.
383
384 $ST r9,`0*$BNSZ`(r3) # r[0]=c1;
385 # sqr_add_c2(a,1,0,c2,c3,c1);
386 $LD r6,`1*$BNSZ`(r4)
387 $UMULL r7,r5,r6
388 $UMULH r8,r5,r6
389
390 addc r7,r7,r7 # compute (r7,r8)=2*(r7,r8)
391 adde r8,r8,r8
392 addze r9,r0 # catch carry if any.
393 # r9= r0(=0) and carry
394
395 addc r10,r7,r10 # now add to temp result.
396 addze r11,r8 # r8 added to r11 which is 0
397 addze r9,r9
398
399 $ST r10,`1*$BNSZ`(r3) #r[1]=c2;
400 #sqr_add_c(a,1,c3,c1,c2)
401 $UMULL r7,r6,r6
402 $UMULH r8,r6,r6
403 addc r11,r7,r11
404 adde r9,r8,r9
405 addze r10,r0
406 #sqr_add_c2(a,2,0,c3,c1,c2)
407 $LD r6,`2*$BNSZ`(r4)
408 $UMULL r7,r5,r6
409 $UMULH r8,r5,r6
410
411 addc r7,r7,r7
412 adde r8,r8,r8
413 addze r10,r10
414
415 addc r11,r7,r11
416 adde r9,r8,r9
417 addze r10,r10
418 $ST r11,`2*$BNSZ`(r3) #r[2]=c3
419 #sqr_add_c2(a,3,0,c1,c2,c3);
420 $LD r6,`3*$BNSZ`(r4)
421 $UMULL r7,r5,r6
422 $UMULH r8,r5,r6
423 addc r7,r7,r7
424 adde r8,r8,r8
425 addze r11,r0
426
427 addc r9,r7,r9
428 adde r10,r8,r10
429 addze r11,r11
430 #sqr_add_c2(a,2,1,c1,c2,c3);
431 $LD r5,`1*$BNSZ`(r4)
432 $LD r6,`2*$BNSZ`(r4)
433 $UMULL r7,r5,r6
434 $UMULH r8,r5,r6
435
436 addc r7,r7,r7
437 adde r8,r8,r8
438 addze r11,r11
439 addc r9,r7,r9
440 adde r10,r8,r10
441 addze r11,r11
442 $ST r9,`3*$BNSZ`(r3) #r[3]=c1
443 #sqr_add_c(a,2,c2,c3,c1);
444 $UMULL r7,r6,r6
445 $UMULH r8,r6,r6
446 addc r10,r7,r10
447 adde r11,r8,r11
448 addze r9,r0
449 #sqr_add_c2(a,3,1,c2,c3,c1);
450 $LD r6,`3*$BNSZ`(r4)
451 $UMULL r7,r5,r6
452 $UMULH r8,r5,r6
453 addc r7,r7,r7
454 adde r8,r8,r8
455 addze r9,r9
456
457 addc r10,r7,r10
458 adde r11,r8,r11
459 addze r9,r9
460 $ST r10,`4*$BNSZ`(r3) #r[4]=c2
461 #sqr_add_c2(a,3,2,c3,c1,c2);
462 $LD r5,`2*$BNSZ`(r4)
463 $UMULL r7,r5,r6
464 $UMULH r8,r5,r6
465 addc r7,r7,r7
466 adde r8,r8,r8
467 addze r10,r0
468
469 addc r11,r7,r11
470 adde r9,r8,r9
471 addze r10,r10
472 $ST r11,`5*$BNSZ`(r3) #r[5] = c3
473 #sqr_add_c(a,3,c1,c2,c3);
474 $UMULL r7,r6,r6
475 $UMULH r8,r6,r6
476 addc r9,r7,r9
477 adde r10,r8,r10
478
479 $ST r9,`6*$BNSZ`(r3) #r[6]=c1
480 $ST r10,`7*$BNSZ`(r3) #r[7]=c2
481 bclr BO_ALWAYS,CR0_LT
482 .long 0x00000000
483
484#
485# NOTE: The following label name should be changed to
486# "bn_sqr_comba8" i.e. remove the first dot
487# for the gcc compiler. This should be automatically
488# done in the build
489#
490
491.align 4
492.bn_sqr_comba8:
493#
494# This is an optimized version of the bn_sqr_comba8 routine.
495# Tightly uses the adde instruction
496#
497#
498# void bn_sqr_comba8(BN_ULONG *r, BN_ULONG *a)
499# r3 contains r
500# r4 contains a
501#
502# Freely use registers r5,r6,r7,r8,r9,r10,r11 as follows:
503#
504# r5,r6 are the two BN_ULONGs being multiplied.
505# r7,r8 are the results of the 32x32 giving 64 bit multiply.
506# r9,r10, r11 are the equivalents of c1,c2, c3.
507#
508# Possible optimization of loading all 8 longs of a into registers
509# doesnt provide any speedup
510#
511
512 xor r0,r0,r0 #set r0 = 0.Used in addze
513 #instructions below.
514
515 #sqr_add_c(a,0,c1,c2,c3);
516 $LD r5,`0*$BNSZ`(r4)
517 $UMULL r9,r5,r5 #1st iteration: no carries.
518 $UMULH r10,r5,r5
519 $ST r9,`0*$BNSZ`(r3) # r[0]=c1;
520 #sqr_add_c2(a,1,0,c2,c3,c1);
521 $LD r6,`1*$BNSZ`(r4)
522 $UMULL r7,r5,r6
523 $UMULH r8,r5,r6
524
525 addc r10,r7,r10 #add the two register number
526 adde r11,r8,r0 # (r8,r7) to the three register
527 addze r9,r0 # number (r9,r11,r10).NOTE:r0=0
528
529 addc r10,r7,r10 #add the two register number
530 adde r11,r8,r11 # (r8,r7) to the three register
531 addze r9,r9 # number (r9,r11,r10).
532
533 $ST r10,`1*$BNSZ`(r3) # r[1]=c2
534
535 #sqr_add_c(a,1,c3,c1,c2);
536 $UMULL r7,r6,r6
537 $UMULH r8,r6,r6
538 addc r11,r7,r11
539 adde r9,r8,r9
540 addze r10,r0
541 #sqr_add_c2(a,2,0,c3,c1,c2);
542 $LD r6,`2*$BNSZ`(r4)
543 $UMULL r7,r5,r6
544 $UMULH r8,r5,r6
545
546 addc r11,r7,r11
547 adde r9,r8,r9
548 addze r10,r10
549
550 addc r11,r7,r11
551 adde r9,r8,r9
552 addze r10,r10
553
554 $ST r11,`2*$BNSZ`(r3) #r[2]=c3
555 #sqr_add_c2(a,3,0,c1,c2,c3);
556 $LD r6,`3*$BNSZ`(r4) #r6 = a[3]. r5 is already a[0].
557 $UMULL r7,r5,r6
558 $UMULH r8,r5,r6
559
560 addc r9,r7,r9
561 adde r10,r8,r10
562 addze r11,r0
563
564 addc r9,r7,r9
565 adde r10,r8,r10
566 addze r11,r11
567 #sqr_add_c2(a,2,1,c1,c2,c3);
568 $LD r5,`1*$BNSZ`(r4)
569 $LD r6,`2*$BNSZ`(r4)
570 $UMULL r7,r5,r6
571 $UMULH r8,r5,r6
572
573 addc r9,r7,r9
574 adde r10,r8,r10
575 addze r11,r11
576
577 addc r9,r7,r9
578 adde r10,r8,r10
579 addze r11,r11
580
581 $ST r9,`3*$BNSZ`(r3) #r[3]=c1;
582 #sqr_add_c(a,2,c2,c3,c1);
583 $UMULL r7,r6,r6
584 $UMULH r8,r6,r6
585
586 addc r10,r7,r10
587 adde r11,r8,r11
588 addze r9,r0
589 #sqr_add_c2(a,3,1,c2,c3,c1);
590 $LD r6,`3*$BNSZ`(r4)
591 $UMULL r7,r5,r6
592 $UMULH r8,r5,r6
593
594 addc r10,r7,r10
595 adde r11,r8,r11
596 addze r9,r9
597
598 addc r10,r7,r10
599 adde r11,r8,r11
600 addze r9,r9
601 #sqr_add_c2(a,4,0,c2,c3,c1);
602 $LD r5,`0*$BNSZ`(r4)
603 $LD r6,`4*$BNSZ`(r4)
604 $UMULL r7,r5,r6
605 $UMULH r8,r5,r6
606
607 addc r10,r7,r10
608 adde r11,r8,r11
609 addze r9,r9
610
611 addc r10,r7,r10
612 adde r11,r8,r11
613 addze r9,r9
614 $ST r10,`4*$BNSZ`(r3) #r[4]=c2;
615 #sqr_add_c2(a,5,0,c3,c1,c2);
616 $LD r6,`5*$BNSZ`(r4)
617 $UMULL r7,r5,r6
618 $UMULH r8,r5,r6
619
620 addc r11,r7,r11
621 adde r9,r8,r9
622 addze r10,r0
623
624 addc r11,r7,r11
625 adde r9,r8,r9
626 addze r10,r10
627 #sqr_add_c2(a,4,1,c3,c1,c2);
628 $LD r5,`1*$BNSZ`(r4)
629 $LD r6,`4*$BNSZ`(r4)
630 $UMULL r7,r5,r6
631 $UMULH r8,r5,r6
632
633 addc r11,r7,r11
634 adde r9,r8,r9
635 addze r10,r10
636
637 addc r11,r7,r11
638 adde r9,r8,r9
639 addze r10,r10
640 #sqr_add_c2(a,3,2,c3,c1,c2);
641 $LD r5,`2*$BNSZ`(r4)
642 $LD r6,`3*$BNSZ`(r4)
643 $UMULL r7,r5,r6
644 $UMULH r8,r5,r6
645
646 addc r11,r7,r11
647 adde r9,r8,r9
648 addze r10,r10
649
650 addc r11,r7,r11
651 adde r9,r8,r9
652 addze r10,r10
653 $ST r11,`5*$BNSZ`(r3) #r[5]=c3;
654 #sqr_add_c(a,3,c1,c2,c3);
655 $UMULL r7,r6,r6
656 $UMULH r8,r6,r6
657 addc r9,r7,r9
658 adde r10,r8,r10
659 addze r11,r0
660 #sqr_add_c2(a,4,2,c1,c2,c3);
661 $LD r6,`4*$BNSZ`(r4)
662 $UMULL r7,r5,r6
663 $UMULH r8,r5,r6
664
665 addc r9,r7,r9
666 adde r10,r8,r10
667 addze r11,r11
668
669 addc r9,r7,r9
670 adde r10,r8,r10
671 addze r11,r11
672 #sqr_add_c2(a,5,1,c1,c2,c3);
673 $LD r5,`1*$BNSZ`(r4)
674 $LD r6,`5*$BNSZ`(r4)
675 $UMULL r7,r5,r6
676 $UMULH r8,r5,r6
677
678 addc r9,r7,r9
679 adde r10,r8,r10
680 addze r11,r11
681
682 addc r9,r7,r9
683 adde r10,r8,r10
684 addze r11,r11
685 #sqr_add_c2(a,6,0,c1,c2,c3);
686 $LD r5,`0*$BNSZ`(r4)
687 $LD r6,`6*$BNSZ`(r4)
688 $UMULL r7,r5,r6
689 $UMULH r8,r5,r6
690 addc r9,r7,r9
691 adde r10,r8,r10
692 addze r11,r11
693 addc r9,r7,r9
694 adde r10,r8,r10
695 addze r11,r11
696 $ST r9,`6*$BNSZ`(r3) #r[6]=c1;
697 #sqr_add_c2(a,7,0,c2,c3,c1);
698 $LD r6,`7*$BNSZ`(r4)
699 $UMULL r7,r5,r6
700 $UMULH r8,r5,r6
701
702 addc r10,r7,r10
703 adde r11,r8,r11
704 addze r9,r0
705 addc r10,r7,r10
706 adde r11,r8,r11
707 addze r9,r9
708 #sqr_add_c2(a,6,1,c2,c3,c1);
709 $LD r5,`1*$BNSZ`(r4)
710 $LD r6,`6*$BNSZ`(r4)
711 $UMULL r7,r5,r6
712 $UMULH r8,r5,r6
713
714 addc r10,r7,r10
715 adde r11,r8,r11
716 addze r9,r9
717 addc r10,r7,r10
718 adde r11,r8,r11
719 addze r9,r9
720 #sqr_add_c2(a,5,2,c2,c3,c1);
721 $LD r5,`2*$BNSZ`(r4)
722 $LD r6,`5*$BNSZ`(r4)
723 $UMULL r7,r5,r6
724 $UMULH r8,r5,r6
725 addc r10,r7,r10
726 adde r11,r8,r11
727 addze r9,r9
728 addc r10,r7,r10
729 adde r11,r8,r11
730 addze r9,r9
731 #sqr_add_c2(a,4,3,c2,c3,c1);
732 $LD r5,`3*$BNSZ`(r4)
733 $LD r6,`4*$BNSZ`(r4)
734 $UMULL r7,r5,r6
735 $UMULH r8,r5,r6
736
737 addc r10,r7,r10
738 adde r11,r8,r11
739 addze r9,r9
740 addc r10,r7,r10
741 adde r11,r8,r11
742 addze r9,r9
743 $ST r10,`7*$BNSZ`(r3) #r[7]=c2;
744 #sqr_add_c(a,4,c3,c1,c2);
745 $UMULL r7,r6,r6
746 $UMULH r8,r6,r6
747 addc r11,r7,r11
748 adde r9,r8,r9
749 addze r10,r0
750 #sqr_add_c2(a,5,3,c3,c1,c2);
751 $LD r6,`5*$BNSZ`(r4)
752 $UMULL r7,r5,r6
753 $UMULH r8,r5,r6
754 addc r11,r7,r11
755 adde r9,r8,r9
756 addze r10,r10
757 addc r11,r7,r11
758 adde r9,r8,r9
759 addze r10,r10
760 #sqr_add_c2(a,6,2,c3,c1,c2);
761 $LD r5,`2*$BNSZ`(r4)
762 $LD r6,`6*$BNSZ`(r4)
763 $UMULL r7,r5,r6
764 $UMULH r8,r5,r6
765 addc r11,r7,r11
766 adde r9,r8,r9
767 addze r10,r10
768
769 addc r11,r7,r11
770 adde r9,r8,r9
771 addze r10,r10
772 #sqr_add_c2(a,7,1,c3,c1,c2);
773 $LD r5,`1*$BNSZ`(r4)
774 $LD r6,`7*$BNSZ`(r4)
775 $UMULL r7,r5,r6
776 $UMULH r8,r5,r6
777 addc r11,r7,r11
778 adde r9,r8,r9
779 addze r10,r10
780 addc r11,r7,r11
781 adde r9,r8,r9
782 addze r10,r10
783 $ST r11,`8*$BNSZ`(r3) #r[8]=c3;
784 #sqr_add_c2(a,7,2,c1,c2,c3);
785 $LD r5,`2*$BNSZ`(r4)
786 $UMULL r7,r5,r6
787 $UMULH r8,r5,r6
788
789 addc r9,r7,r9
790 adde r10,r8,r10
791 addze r11,r0
792 addc r9,r7,r9
793 adde r10,r8,r10
794 addze r11,r11
795 #sqr_add_c2(a,6,3,c1,c2,c3);
796 $LD r5,`3*$BNSZ`(r4)
797 $LD r6,`6*$BNSZ`(r4)
798 $UMULL r7,r5,r6
799 $UMULH r8,r5,r6
800 addc r9,r7,r9
801 adde r10,r8,r10
802 addze r11,r11
803 addc r9,r7,r9
804 adde r10,r8,r10
805 addze r11,r11
806 #sqr_add_c2(a,5,4,c1,c2,c3);
807 $LD r5,`4*$BNSZ`(r4)
808 $LD r6,`5*$BNSZ`(r4)
809 $UMULL r7,r5,r6
810 $UMULH r8,r5,r6
811 addc r9,r7,r9
812 adde r10,r8,r10
813 addze r11,r11
814 addc r9,r7,r9
815 adde r10,r8,r10
816 addze r11,r11
817 $ST r9,`9*$BNSZ`(r3) #r[9]=c1;
818 #sqr_add_c(a,5,c2,c3,c1);
819 $UMULL r7,r6,r6
820 $UMULH r8,r6,r6
821 addc r10,r7,r10
822 adde r11,r8,r11
823 addze r9,r0
824 #sqr_add_c2(a,6,4,c2,c3,c1);
825 $LD r6,`6*$BNSZ`(r4)
826 $UMULL r7,r5,r6
827 $UMULH r8,r5,r6
828 addc r10,r7,r10
829 adde r11,r8,r11
830 addze r9,r9
831 addc r10,r7,r10
832 adde r11,r8,r11
833 addze r9,r9
834 #sqr_add_c2(a,7,3,c2,c3,c1);
835 $LD r5,`3*$BNSZ`(r4)
836 $LD r6,`7*$BNSZ`(r4)
837 $UMULL r7,r5,r6
838 $UMULH r8,r5,r6
839 addc r10,r7,r10
840 adde r11,r8,r11
841 addze r9,r9
842 addc r10,r7,r10
843 adde r11,r8,r11
844 addze r9,r9
845 $ST r10,`10*$BNSZ`(r3) #r[10]=c2;
846 #sqr_add_c2(a,7,4,c3,c1,c2);
847 $LD r5,`4*$BNSZ`(r4)
848 $UMULL r7,r5,r6
849 $UMULH r8,r5,r6
850 addc r11,r7,r11
851 adde r9,r8,r9
852 addze r10,r0
853 addc r11,r7,r11
854 adde r9,r8,r9
855 addze r10,r10
856 #sqr_add_c2(a,6,5,c3,c1,c2);
857 $LD r5,`5*$BNSZ`(r4)
858 $LD r6,`6*$BNSZ`(r4)
859 $UMULL r7,r5,r6
860 $UMULH r8,r5,r6
861 addc r11,r7,r11
862 adde r9,r8,r9
863 addze r10,r10
864 addc r11,r7,r11
865 adde r9,r8,r9
866 addze r10,r10
867 $ST r11,`11*$BNSZ`(r3) #r[11]=c3;
868 #sqr_add_c(a,6,c1,c2,c3);
869 $UMULL r7,r6,r6
870 $UMULH r8,r6,r6
871 addc r9,r7,r9
872 adde r10,r8,r10
873 addze r11,r0
874 #sqr_add_c2(a,7,5,c1,c2,c3)
875 $LD r6,`7*$BNSZ`(r4)
876 $UMULL r7,r5,r6
877 $UMULH r8,r5,r6
878 addc r9,r7,r9
879 adde r10,r8,r10
880 addze r11,r11
881 addc r9,r7,r9
882 adde r10,r8,r10
883 addze r11,r11
884 $ST r9,`12*$BNSZ`(r3) #r[12]=c1;
885
886 #sqr_add_c2(a,7,6,c2,c3,c1)
887 $LD r5,`6*$BNSZ`(r4)
888 $UMULL r7,r5,r6
889 $UMULH r8,r5,r6
890 addc r10,r7,r10
891 adde r11,r8,r11
892 addze r9,r0
893 addc r10,r7,r10
894 adde r11,r8,r11
895 addze r9,r9
896 $ST r10,`13*$BNSZ`(r3) #r[13]=c2;
897 #sqr_add_c(a,7,c3,c1,c2);
898 $UMULL r7,r6,r6
899 $UMULH r8,r6,r6
900 addc r11,r7,r11
901 adde r9,r8,r9
902 $ST r11,`14*$BNSZ`(r3) #r[14]=c3;
903 $ST r9, `15*$BNSZ`(r3) #r[15]=c1;
904
905
906 bclr BO_ALWAYS,CR0_LT
907
908 .long 0x00000000
909
910#
911# NOTE: The following label name should be changed to
912# "bn_mul_comba4" i.e. remove the first dot
913# for the gcc compiler. This should be automatically
914# done in the build
915#
916
917.align 4
918.bn_mul_comba4:
919#
920# This is an optimized version of the bn_mul_comba4 routine.
921#
922# void bn_mul_comba4(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
923# r3 contains r
924# r4 contains a
925# r5 contains b
926# r6, r7 are the 2 BN_ULONGs being multiplied.
927# r8, r9 are the results of the 32x32 giving 64 multiply.
928# r10, r11, r12 are the equivalents of c1, c2, and c3.
929#
930 xor r0,r0,r0 #r0=0. Used in addze below.
931 #mul_add_c(a[0],b[0],c1,c2,c3);
932 $LD r6,`0*$BNSZ`(r4)
933 $LD r7,`0*$BNSZ`(r5)
934 $UMULL r10,r6,r7
935 $UMULH r11,r6,r7
936 $ST r10,`0*$BNSZ`(r3) #r[0]=c1
937 #mul_add_c(a[0],b[1],c2,c3,c1);
938 $LD r7,`1*$BNSZ`(r5)
939 $UMULL r8,r6,r7
940 $UMULH r9,r6,r7
941 addc r11,r8,r11
942 adde r12,r9,r0
943 addze r10,r0
944 #mul_add_c(a[1],b[0],c2,c3,c1);
945 $LD r6, `1*$BNSZ`(r4)
946 $LD r7, `0*$BNSZ`(r5)
947 $UMULL r8,r6,r7
948 $UMULH r9,r6,r7
949 addc r11,r8,r11
950 adde r12,r9,r12
951 addze r10,r10
952 $ST r11,`1*$BNSZ`(r3) #r[1]=c2
953 #mul_add_c(a[2],b[0],c3,c1,c2);
954 $LD r6,`2*$BNSZ`(r4)
955 $UMULL r8,r6,r7
956 $UMULH r9,r6,r7
957 addc r12,r8,r12
958 adde r10,r9,r10
959 addze r11,r0
960 #mul_add_c(a[1],b[1],c3,c1,c2);
961 $LD r6,`1*$BNSZ`(r4)
962 $LD r7,`1*$BNSZ`(r5)
963 $UMULL r8,r6,r7
964 $UMULH r9,r6,r7
965 addc r12,r8,r12
966 adde r10,r9,r10
967 addze r11,r11
968 #mul_add_c(a[0],b[2],c3,c1,c2);
969 $LD r6,`0*$BNSZ`(r4)
970 $LD r7,`2*$BNSZ`(r5)
971 $UMULL r8,r6,r7
972 $UMULH r9,r6,r7
973 addc r12,r8,r12
974 adde r10,r9,r10
975 addze r11,r11
976 $ST r12,`2*$BNSZ`(r3) #r[2]=c3
977 #mul_add_c(a[0],b[3],c1,c2,c3);
978 $LD r7,`3*$BNSZ`(r5)
979 $UMULL r8,r6,r7
980 $UMULH r9,r6,r7
981 addc r10,r8,r10
982 adde r11,r9,r11
983 addze r12,r0
984 #mul_add_c(a[1],b[2],c1,c2,c3);
985 $LD r6,`1*$BNSZ`(r4)
986 $LD r7,`2*$BNSZ`(r5)
987 $UMULL r8,r6,r7
988 $UMULH r9,r6,r7
989 addc r10,r8,r10
990 adde r11,r9,r11
991 addze r12,r12
992 #mul_add_c(a[2],b[1],c1,c2,c3);
993 $LD r6,`2*$BNSZ`(r4)
994 $LD r7,`1*$BNSZ`(r5)
995 $UMULL r8,r6,r7
996 $UMULH r9,r6,r7
997 addc r10,r8,r10
998 adde r11,r9,r11
999 addze r12,r12
1000 #mul_add_c(a[3],b[0],c1,c2,c3);
1001 $LD r6,`3*$BNSZ`(r4)
1002 $LD r7,`0*$BNSZ`(r5)
1003 $UMULL r8,r6,r7
1004 $UMULH r9,r6,r7
1005 addc r10,r8,r10
1006 adde r11,r9,r11
1007 addze r12,r12
1008 $ST r10,`3*$BNSZ`(r3) #r[3]=c1
1009 #mul_add_c(a[3],b[1],c2,c3,c1);
1010 $LD r7,`1*$BNSZ`(r5)
1011 $UMULL r8,r6,r7
1012 $UMULH r9,r6,r7
1013 addc r11,r8,r11
1014 adde r12,r9,r12
1015 addze r10,r0
1016 #mul_add_c(a[2],b[2],c2,c3,c1);
1017 $LD r6,`2*$BNSZ`(r4)
1018 $LD r7,`2*$BNSZ`(r5)
1019 $UMULL r8,r6,r7
1020 $UMULH r9,r6,r7
1021 addc r11,r8,r11
1022 adde r12,r9,r12
1023 addze r10,r10
1024 #mul_add_c(a[1],b[3],c2,c3,c1);
1025 $LD r6,`1*$BNSZ`(r4)
1026 $LD r7,`3*$BNSZ`(r5)
1027 $UMULL r8,r6,r7
1028 $UMULH r9,r6,r7
1029 addc r11,r8,r11
1030 adde r12,r9,r12
1031 addze r10,r10
1032 $ST r11,`4*$BNSZ`(r3) #r[4]=c2
1033 #mul_add_c(a[2],b[3],c3,c1,c2);
1034 $LD r6,`2*$BNSZ`(r4)
1035 $UMULL r8,r6,r7
1036 $UMULH r9,r6,r7
1037 addc r12,r8,r12
1038 adde r10,r9,r10
1039 addze r11,r0
1040 #mul_add_c(a[3],b[2],c3,c1,c2);
1041 $LD r6,`3*$BNSZ`(r4)
1042 $LD r7,`2*$BNSZ`(r4)
1043 $UMULL r8,r6,r7
1044 $UMULH r9,r6,r7
1045 addc r12,r8,r12
1046 adde r10,r9,r10
1047 addze r11,r11
1048 $ST r12,`5*$BNSZ`(r3) #r[5]=c3
1049 #mul_add_c(a[3],b[3],c1,c2,c3);
1050 $LD r7,`3*$BNSZ`(r5)
1051 $UMULL r8,r6,r7
1052 $UMULH r9,r6,r7
1053 addc r10,r8,r10
1054 adde r11,r9,r11
1055
1056 $ST r10,`6*$BNSZ`(r3) #r[6]=c1
1057 $ST r11,`7*$BNSZ`(r3) #r[7]=c2
1058 bclr BO_ALWAYS,CR0_LT
1059 .long 0x00000000
1060
1061#
1062# NOTE: The following label name should be changed to
1063# "bn_mul_comba8" i.e. remove the first dot
1064# for the gcc compiler. This should be automatically
1065# done in the build
1066#
1067
1068.align 4
1069.bn_mul_comba8:
1070#
1071# Optimized version of the bn_mul_comba8 routine.
1072#
1073# void bn_mul_comba8(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
1074# r3 contains r
1075# r4 contains a
1076# r5 contains b
1077# r6, r7 are the 2 BN_ULONGs being multiplied.
1078# r8, r9 are the results of the 32x32 giving 64 multiply.
1079# r10, r11, r12 are the equivalents of c1, c2, and c3.
1080#
1081 xor r0,r0,r0 #r0=0. Used in addze below.
1082
1083 #mul_add_c(a[0],b[0],c1,c2,c3);
1084 $LD r6,`0*$BNSZ`(r4) #a[0]
1085 $LD r7,`0*$BNSZ`(r5) #b[0]
1086 $UMULL r10,r6,r7
1087 $UMULH r11,r6,r7
1088 $ST r10,`0*$BNSZ`(r3) #r[0]=c1;
1089 #mul_add_c(a[0],b[1],c2,c3,c1);
1090 $LD r7,`1*$BNSZ`(r5)
1091 $UMULL r8,r6,r7
1092 $UMULH r9,r6,r7
1093 addc r11,r11,r8
1094 addze r12,r9 # since we didnt set r12 to zero before.
1095 addze r10,r0
1096 #mul_add_c(a[1],b[0],c2,c3,c1);
1097 $LD r6,`1*$BNSZ`(r4)
1098 $LD r7,`0*$BNSZ`(r5)
1099 $UMULL r8,r6,r7
1100 $UMULH r9,r6,r7
1101 addc r11,r11,r8
1102 adde r12,r12,r9
1103 addze r10,r10
1104 $ST r11,`1*$BNSZ`(r3) #r[1]=c2;
1105 #mul_add_c(a[2],b[0],c3,c1,c2);
1106 $LD r6,`2*$BNSZ`(r4)
1107 $UMULL r8,r6,r7
1108 $UMULH r9,r6,r7
1109 addc r12,r12,r8
1110 adde r10,r10,r9
1111 addze r11,r0
1112 #mul_add_c(a[1],b[1],c3,c1,c2);
1113 $LD r6,`1*$BNSZ`(r4)
1114 $LD r7,`1*$BNSZ`(r5)
1115 $UMULL r8,r6,r7
1116 $UMULH r9,r6,r7
1117 addc r12,r12,r8
1118 adde r10,r10,r9
1119 addze r11,r11
1120 #mul_add_c(a[0],b[2],c3,c1,c2);
1121 $LD r6,`0*$BNSZ`(r4)
1122 $LD r7,`2*$BNSZ`(r5)
1123 $UMULL r8,r6,r7
1124 $UMULH r9,r6,r7
1125 addc r12,r12,r8
1126 adde r10,r10,r9
1127 addze r11,r11
1128 $ST r12,`2*$BNSZ`(r3) #r[2]=c3;
1129 #mul_add_c(a[0],b[3],c1,c2,c3);
1130 $LD r7,`3*$BNSZ`(r5)
1131 $UMULL r8,r6,r7
1132 $UMULH r9,r6,r7
1133 addc r10,r10,r8
1134 adde r11,r11,r9
1135 addze r12,r0
1136 #mul_add_c(a[1],b[2],c1,c2,c3);
1137 $LD r6,`1*$BNSZ`(r4)
1138 $LD r7,`2*$BNSZ`(r5)
1139 $UMULL r8,r6,r7
1140 $UMULH r9,r6,r7
1141 addc r10,r10,r8
1142 adde r11,r11,r9
1143 addze r12,r12
1144
1145 #mul_add_c(a[2],b[1],c1,c2,c3);
1146 $LD r6,`2*$BNSZ`(r4)
1147 $LD r7,`1*$BNSZ`(r5)
1148 $UMULL r8,r6,r7
1149 $UMULH r9,r6,r7
1150 addc r10,r10,r8
1151 adde r11,r11,r9
1152 addze r12,r12
1153 #mul_add_c(a[3],b[0],c1,c2,c3);
1154 $LD r6,`3*$BNSZ`(r4)
1155 $LD r7,`0*$BNSZ`(r5)
1156 $UMULL r8,r6,r7
1157 $UMULH r9,r6,r7
1158 addc r10,r10,r8
1159 adde r11,r11,r9
1160 addze r12,r12
1161 $ST r10,`3*$BNSZ`(r3) #r[3]=c1;
1162 #mul_add_c(a[4],b[0],c2,c3,c1);
1163 $LD r6,`4*$BNSZ`(r4)
1164 $UMULL r8,r6,r7
1165 $UMULH r9,r6,r7
1166 addc r11,r11,r8
1167 adde r12,r12,r9
1168 addze r10,r0
1169 #mul_add_c(a[3],b[1],c2,c3,c1);
1170 $LD r6,`3*$BNSZ`(r4)
1171 $LD r7,`1*$BNSZ`(r5)
1172 $UMULL r8,r6,r7
1173 $UMULH r9,r6,r7
1174 addc r11,r11,r8
1175 adde r12,r12,r9
1176 addze r10,r10
1177 #mul_add_c(a[2],b[2],c2,c3,c1);
1178 $LD r6,`2*$BNSZ`(r4)
1179 $LD r7,`2*$BNSZ`(r5)
1180 $UMULL r8,r6,r7
1181 $UMULH r9,r6,r7
1182 addc r11,r11,r8
1183 adde r12,r12,r9
1184 addze r10,r10
1185 #mul_add_c(a[1],b[3],c2,c3,c1);
1186 $LD r6,`1*$BNSZ`(r4)
1187 $LD r7,`3*$BNSZ`(r5)
1188 $UMULL r8,r6,r7
1189 $UMULH r9,r6,r7
1190 addc r11,r11,r8
1191 adde r12,r12,r9
1192 addze r10,r10
1193 #mul_add_c(a[0],b[4],c2,c3,c1);
1194 $LD r6,`0*$BNSZ`(r4)
1195 $LD r7,`4*$BNSZ`(r5)
1196 $UMULL r8,r6,r7
1197 $UMULH r9,r6,r7
1198 addc r11,r11,r8
1199 adde r12,r12,r9
1200 addze r10,r10
1201 $ST r11,`4*$BNSZ`(r3) #r[4]=c2;
1202 #mul_add_c(a[0],b[5],c3,c1,c2);
1203 $LD r7,`5*$BNSZ`(r5)
1204 $UMULL r8,r6,r7
1205 $UMULH r9,r6,r7
1206 addc r12,r12,r8
1207 adde r10,r10,r9
1208 addze r11,r0
1209 #mul_add_c(a[1],b[4],c3,c1,c2);
1210 $LD r6,`1*$BNSZ`(r4)
1211 $LD r7,`4*$BNSZ`(r5)
1212 $UMULL r8,r6,r7
1213 $UMULH r9,r6,r7
1214 addc r12,r12,r8
1215 adde r10,r10,r9
1216 addze r11,r11
1217 #mul_add_c(a[2],b[3],c3,c1,c2);
1218 $LD r6,`2*$BNSZ`(r4)
1219 $LD r7,`3*$BNSZ`(r5)
1220 $UMULL r8,r6,r7
1221 $UMULH r9,r6,r7
1222 addc r12,r12,r8
1223 adde r10,r10,r9
1224 addze r11,r11
1225 #mul_add_c(a[3],b[2],c3,c1,c2);
1226 $LD r6,`3*$BNSZ`(r4)
1227 $LD r7,`2*$BNSZ`(r5)
1228 $UMULL r8,r6,r7
1229 $UMULH r9,r6,r7
1230 addc r12,r12,r8
1231 adde r10,r10,r9
1232 addze r11,r11
1233 #mul_add_c(a[4],b[1],c3,c1,c2);
1234 $LD r6,`4*$BNSZ`(r4)
1235 $LD r7,`1*$BNSZ`(r5)
1236 $UMULL r8,r6,r7
1237 $UMULH r9,r6,r7
1238 addc r12,r12,r8
1239 adde r10,r10,r9
1240 addze r11,r11
1241 #mul_add_c(a[5],b[0],c3,c1,c2);
1242 $LD r6,`5*$BNSZ`(r4)
1243 $LD r7,`0*$BNSZ`(r5)
1244 $UMULL r8,r6,r7
1245 $UMULH r9,r6,r7
1246 addc r12,r12,r8
1247 adde r10,r10,r9
1248 addze r11,r11
1249 $ST r12,`5*$BNSZ`(r3) #r[5]=c3;
1250 #mul_add_c(a[6],b[0],c1,c2,c3);
1251 $LD r6,`6*$BNSZ`(r4)
1252 $UMULL r8,r6,r7
1253 $UMULH r9,r6,r7
1254 addc r10,r10,r8
1255 adde r11,r11,r9
1256 addze r12,r0
1257 #mul_add_c(a[5],b[1],c1,c2,c3);
1258 $LD r6,`5*$BNSZ`(r4)
1259 $LD r7,`1*$BNSZ`(r5)
1260 $UMULL r8,r6,r7
1261 $UMULH r9,r6,r7
1262 addc r10,r10,r8
1263 adde r11,r11,r9
1264 addze r12,r12
1265 #mul_add_c(a[4],b[2],c1,c2,c3);
1266 $LD r6,`4*$BNSZ`(r4)
1267 $LD r7,`2*$BNSZ`(r5)
1268 $UMULL r8,r6,r7
1269 $UMULH r9,r6,r7
1270 addc r10,r10,r8
1271 adde r11,r11,r9
1272 addze r12,r12
1273 #mul_add_c(a[3],b[3],c1,c2,c3);
1274 $LD r6,`3*$BNSZ`(r4)
1275 $LD r7,`3*$BNSZ`(r5)
1276 $UMULL r8,r6,r7
1277 $UMULH r9,r6,r7
1278 addc r10,r10,r8
1279 adde r11,r11,r9
1280 addze r12,r12
1281 #mul_add_c(a[2],b[4],c1,c2,c3);
1282 $LD r6,`2*$BNSZ`(r4)
1283 $LD r7,`4*$BNSZ`(r5)
1284 $UMULL r8,r6,r7
1285 $UMULH r9,r6,r7
1286 addc r10,r10,r8
1287 adde r11,r11,r9
1288 addze r12,r12
1289 #mul_add_c(a[1],b[5],c1,c2,c3);
1290 $LD r6,`1*$BNSZ`(r4)
1291 $LD r7,`5*$BNSZ`(r5)
1292 $UMULL r8,r6,r7
1293 $UMULH r9,r6,r7
1294 addc r10,r10,r8
1295 adde r11,r11,r9
1296 addze r12,r12
1297 #mul_add_c(a[0],b[6],c1,c2,c3);
1298 $LD r6,`0*$BNSZ`(r4)
1299 $LD r7,`6*$BNSZ`(r5)
1300 $UMULL r8,r6,r7
1301 $UMULH r9,r6,r7
1302 addc r10,r10,r8
1303 adde r11,r11,r9
1304 addze r12,r12
1305 $ST r10,`6*$BNSZ`(r3) #r[6]=c1;
1306 #mul_add_c(a[0],b[7],c2,c3,c1);
1307 $LD r7,`7*$BNSZ`(r5)
1308 $UMULL r8,r6,r7
1309 $UMULH r9,r6,r7
1310 addc r11,r11,r8
1311 adde r12,r12,r9
1312 addze r10,r0
1313 #mul_add_c(a[1],b[6],c2,c3,c1);
1314 $LD r6,`1*$BNSZ`(r4)
1315 $LD r7,`6*$BNSZ`(r5)
1316 $UMULL r8,r6,r7
1317 $UMULH r9,r6,r7
1318 addc r11,r11,r8
1319 adde r12,r12,r9
1320 addze r10,r10
1321 #mul_add_c(a[2],b[5],c2,c3,c1);
1322 $LD r6,`2*$BNSZ`(r4)
1323 $LD r7,`5*$BNSZ`(r5)
1324 $UMULL r8,r6,r7
1325 $UMULH r9,r6,r7
1326 addc r11,r11,r8
1327 adde r12,r12,r9
1328 addze r10,r10
1329 #mul_add_c(a[3],b[4],c2,c3,c1);
1330 $LD r6,`3*$BNSZ`(r4)
1331 $LD r7,`4*$BNSZ`(r5)
1332 $UMULL r8,r6,r7
1333 $UMULH r9,r6,r7
1334 addc r11,r11,r8
1335 adde r12,r12,r9
1336 addze r10,r10
1337 #mul_add_c(a[4],b[3],c2,c3,c1);
1338 $LD r6,`4*$BNSZ`(r4)
1339 $LD r7,`3*$BNSZ`(r5)
1340 $UMULL r8,r6,r7
1341 $UMULH r9,r6,r7
1342 addc r11,r11,r8
1343 adde r12,r12,r9
1344 addze r10,r10
1345 #mul_add_c(a[5],b[2],c2,c3,c1);
1346 $LD r6,`5*$BNSZ`(r4)
1347 $LD r7,`2*$BNSZ`(r5)
1348 $UMULL r8,r6,r7
1349 $UMULH r9,r6,r7
1350 addc r11,r11,r8
1351 adde r12,r12,r9
1352 addze r10,r10
1353 #mul_add_c(a[6],b[1],c2,c3,c1);
1354 $LD r6,`6*$BNSZ`(r4)
1355 $LD r7,`1*$BNSZ`(r5)
1356 $UMULL r8,r6,r7
1357 $UMULH r9,r6,r7
1358 addc r11,r11,r8
1359 adde r12,r12,r9
1360 addze r10,r10
1361 #mul_add_c(a[7],b[0],c2,c3,c1);
1362 $LD r6,`7*$BNSZ`(r4)
1363 $LD r7,`0*$BNSZ`(r5)
1364 $UMULL r8,r6,r7
1365 $UMULH r9,r6,r7
1366 addc r11,r11,r8
1367 adde r12,r12,r9
1368 addze r10,r10
1369 $ST r11,`7*$BNSZ`(r3) #r[7]=c2;
1370 #mul_add_c(a[7],b[1],c3,c1,c2);
1371 $LD r7,`1*$BNSZ`(r5)
1372 $UMULL r8,r6,r7
1373 $UMULH r9,r6,r7
1374 addc r12,r12,r8
1375 adde r10,r10,r9
1376 addze r11,r0
1377 #mul_add_c(a[6],b[2],c3,c1,c2);
1378 $LD r6,`6*$BNSZ`(r4)
1379 $LD r7,`2*$BNSZ`(r5)
1380 $UMULL r8,r6,r7
1381 $UMULH r9,r6,r7
1382 addc r12,r12,r8
1383 adde r10,r10,r9
1384 addze r11,r11
1385 #mul_add_c(a[5],b[3],c3,c1,c2);
1386 $LD r6,`5*$BNSZ`(r4)
1387 $LD r7,`3*$BNSZ`(r5)
1388 $UMULL r8,r6,r7
1389 $UMULH r9,r6,r7
1390 addc r12,r12,r8
1391 adde r10,r10,r9
1392 addze r11,r11
1393 #mul_add_c(a[4],b[4],c3,c1,c2);
1394 $LD r6,`4*$BNSZ`(r4)
1395 $LD r7,`4*$BNSZ`(r5)
1396 $UMULL r8,r6,r7
1397 $UMULH r9,r6,r7
1398 addc r12,r12,r8
1399 adde r10,r10,r9
1400 addze r11,r11
1401 #mul_add_c(a[3],b[5],c3,c1,c2);
1402 $LD r6,`3*$BNSZ`(r4)
1403 $LD r7,`5*$BNSZ`(r5)
1404 $UMULL r8,r6,r7
1405 $UMULH r9,r6,r7
1406 addc r12,r12,r8
1407 adde r10,r10,r9
1408 addze r11,r11
1409 #mul_add_c(a[2],b[6],c3,c1,c2);
1410 $LD r6,`2*$BNSZ`(r4)
1411 $LD r7,`6*$BNSZ`(r5)
1412 $UMULL r8,r6,r7
1413 $UMULH r9,r6,r7
1414 addc r12,r12,r8
1415 adde r10,r10,r9
1416 addze r11,r11
1417 #mul_add_c(a[1],b[7],c3,c1,c2);
1418 $LD r6,`1*$BNSZ`(r4)
1419 $LD r7,`7*$BNSZ`(r5)
1420 $UMULL r8,r6,r7
1421 $UMULH r9,r6,r7
1422 addc r12,r12,r8
1423 adde r10,r10,r9
1424 addze r11,r11
1425 $ST r12,`8*$BNSZ`(r3) #r[8]=c3;
1426 #mul_add_c(a[2],b[7],c1,c2,c3);
1427 $LD r6,`2*$BNSZ`(r4)
1428 $UMULL r8,r6,r7
1429 $UMULH r9,r6,r7
1430 addc r10,r10,r8
1431 adde r11,r11,r9
1432 addze r12,r0
1433 #mul_add_c(a[3],b[6],c1,c2,c3);
1434 $LD r6,`3*$BNSZ`(r4)
1435 $LD r7,`6*$BNSZ`(r5)
1436 $UMULL r8,r6,r7
1437 $UMULH r9,r6,r7
1438 addc r10,r10,r8
1439 adde r11,r11,r9
1440 addze r12,r12
1441 #mul_add_c(a[4],b[5],c1,c2,c3);
1442 $LD r6,`4*$BNSZ`(r4)
1443 $LD r7,`5*$BNSZ`(r5)
1444 $UMULL r8,r6,r7
1445 $UMULH r9,r6,r7
1446 addc r10,r10,r8
1447 adde r11,r11,r9
1448 addze r12,r12
1449 #mul_add_c(a[5],b[4],c1,c2,c3);
1450 $LD r6,`5*$BNSZ`(r4)
1451 $LD r7,`4*$BNSZ`(r5)
1452 $UMULL r8,r6,r7
1453 $UMULH r9,r6,r7
1454 addc r10,r10,r8
1455 adde r11,r11,r9
1456 addze r12,r12
1457 #mul_add_c(a[6],b[3],c1,c2,c3);
1458 $LD r6,`6*$BNSZ`(r4)
1459 $LD r7,`3*$BNSZ`(r5)
1460 $UMULL r8,r6,r7
1461 $UMULH r9,r6,r7
1462 addc r10,r10,r8
1463 adde r11,r11,r9
1464 addze r12,r12
1465 #mul_add_c(a[7],b[2],c1,c2,c3);
1466 $LD r6,`7*$BNSZ`(r4)
1467 $LD r7,`2*$BNSZ`(r5)
1468 $UMULL r8,r6,r7
1469 $UMULH r9,r6,r7
1470 addc r10,r10,r8
1471 adde r11,r11,r9
1472 addze r12,r12
1473 $ST r10,`9*$BNSZ`(r3) #r[9]=c1;
1474 #mul_add_c(a[7],b[3],c2,c3,c1);
1475 $LD r7,`3*$BNSZ`(r5)
1476 $UMULL r8,r6,r7
1477 $UMULH r9,r6,r7
1478 addc r11,r11,r8
1479 adde r12,r12,r9
1480 addze r10,r0
1481 #mul_add_c(a[6],b[4],c2,c3,c1);
1482 $LD r6,`6*$BNSZ`(r4)
1483 $LD r7,`4*$BNSZ`(r5)
1484 $UMULL r8,r6,r7
1485 $UMULH r9,r6,r7
1486 addc r11,r11,r8
1487 adde r12,r12,r9
1488 addze r10,r10
1489 #mul_add_c(a[5],b[5],c2,c3,c1);
1490 $LD r6,`5*$BNSZ`(r4)
1491 $LD r7,`5*$BNSZ`(r5)
1492 $UMULL r8,r6,r7
1493 $UMULH r9,r6,r7
1494 addc r11,r11,r8
1495 adde r12,r12,r9
1496 addze r10,r10
1497 #mul_add_c(a[4],b[6],c2,c3,c1);
1498 $LD r6,`4*$BNSZ`(r4)
1499 $LD r7,`6*$BNSZ`(r5)
1500 $UMULL r8,r6,r7
1501 $UMULH r9,r6,r7
1502 addc r11,r11,r8
1503 adde r12,r12,r9
1504 addze r10,r10
1505 #mul_add_c(a[3],b[7],c2,c3,c1);
1506 $LD r6,`3*$BNSZ`(r4)
1507 $LD r7,`7*$BNSZ`(r5)
1508 $UMULL r8,r6,r7
1509 $UMULH r9,r6,r7
1510 addc r11,r11,r8
1511 adde r12,r12,r9
1512 addze r10,r10
1513 $ST r11,`10*$BNSZ`(r3) #r[10]=c2;
1514 #mul_add_c(a[4],b[7],c3,c1,c2);
1515 $LD r6,`4*$BNSZ`(r4)
1516 $UMULL r8,r6,r7
1517 $UMULH r9,r6,r7
1518 addc r12,r12,r8
1519 adde r10,r10,r9
1520 addze r11,r0
1521 #mul_add_c(a[5],b[6],c3,c1,c2);
1522 $LD r6,`5*$BNSZ`(r4)
1523 $LD r7,`6*$BNSZ`(r5)
1524 $UMULL r8,r6,r7
1525 $UMULH r9,r6,r7
1526 addc r12,r12,r8
1527 adde r10,r10,r9
1528 addze r11,r11
1529 #mul_add_c(a[6],b[5],c3,c1,c2);
1530 $LD r6,`6*$BNSZ`(r4)
1531 $LD r7,`5*$BNSZ`(r5)
1532 $UMULL r8,r6,r7
1533 $UMULH r9,r6,r7
1534 addc r12,r12,r8
1535 adde r10,r10,r9
1536 addze r11,r11
1537 #mul_add_c(a[7],b[4],c3,c1,c2);
1538 $LD r6,`7*$BNSZ`(r4)
1539 $LD r7,`4*$BNSZ`(r5)
1540 $UMULL r8,r6,r7
1541 $UMULH r9,r6,r7
1542 addc r12,r12,r8
1543 adde r10,r10,r9
1544 addze r11,r11
1545 $ST r12,`11*$BNSZ`(r3) #r[11]=c3;
1546 #mul_add_c(a[7],b[5],c1,c2,c3);
1547 $LD r7,`5*$BNSZ`(r5)
1548 $UMULL r8,r6,r7
1549 $UMULH r9,r6,r7
1550 addc r10,r10,r8
1551 adde r11,r11,r9
1552 addze r12,r0
1553 #mul_add_c(a[6],b[6],c1,c2,c3);
1554 $LD r6,`6*$BNSZ`(r4)
1555 $LD r7,`6*$BNSZ`(r5)
1556 $UMULL r8,r6,r7
1557 $UMULH r9,r6,r7
1558 addc r10,r10,r8
1559 adde r11,r11,r9
1560 addze r12,r12
1561 #mul_add_c(a[5],b[7],c1,c2,c3);
1562 $LD r6,`5*$BNSZ`(r4)
1563 $LD r7,`7*$BNSZ`(r5)
1564 $UMULL r8,r6,r7
1565 $UMULH r9,r6,r7
1566 addc r10,r10,r8
1567 adde r11,r11,r9
1568 addze r12,r12
1569 $ST r10,`12*$BNSZ`(r3) #r[12]=c1;
1570 #mul_add_c(a[6],b[7],c2,c3,c1);
1571 $LD r6,`6*$BNSZ`(r4)
1572 $UMULL r8,r6,r7
1573 $UMULH r9,r6,r7
1574 addc r11,r11,r8
1575 adde r12,r12,r9
1576 addze r10,r0
1577 #mul_add_c(a[7],b[6],c2,c3,c1);
1578 $LD r6,`7*$BNSZ`(r4)
1579 $LD r7,`6*$BNSZ`(r5)
1580 $UMULL r8,r6,r7
1581 $UMULH r9,r6,r7
1582 addc r11,r11,r8
1583 adde r12,r12,r9
1584 addze r10,r10
1585 $ST r11,`13*$BNSZ`(r3) #r[13]=c2;
1586 #mul_add_c(a[7],b[7],c3,c1,c2);
1587 $LD r7,`7*$BNSZ`(r5)
1588 $UMULL r8,r6,r7
1589 $UMULH r9,r6,r7
1590 addc r12,r12,r8
1591 adde r10,r10,r9
1592 $ST r12,`14*$BNSZ`(r3) #r[14]=c3;
1593 $ST r10,`15*$BNSZ`(r3) #r[15]=c1;
1594 bclr BO_ALWAYS,CR0_LT
1595 .long 0x00000000
1596
1597#
1598# NOTE: The following label name should be changed to
1599# "bn_sub_words" i.e. remove the first dot
1600# for the gcc compiler. This should be automatically
1601# done in the build
1602#
1603#
1604.align 4
1605.bn_sub_words:
1606#
1607# Handcoded version of bn_sub_words
1608#
1609#BN_ULONG bn_sub_words(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n)
1610#
1611# r3 = r
1612# r4 = a
1613# r5 = b
1614# r6 = n
1615#
1616# Note: No loop unrolling done since this is not a performance
1617# critical loop.
1618
1619 xor r0,r0,r0 #set r0 = 0
1620#
1621# check for r6 = 0 AND set carry bit.
1622#
1623 subfc. r7,r0,r6 # If r6 is 0 then result is 0.
1624 # if r6 > 0 then result !=0
1625 # In either case carry bit is set.
1626 bc BO_IF,CR0_EQ,Lppcasm_sub_adios
1627 addi r4,r4,-$BNSZ
1628 addi r3,r3,-$BNSZ
1629 addi r5,r5,-$BNSZ
1630 mtctr r6
1631Lppcasm_sub_mainloop:
1632 $LDU r7,$BNSZ(r4)
1633 $LDU r8,$BNSZ(r5)
1634 subfe r6,r8,r7 # r6 = r7+carry bit + onescomplement(r8)
1635 # if carry = 1 this is r7-r8. Else it
1636 # is r7-r8 -1 as we need.
1637 $STU r6,$BNSZ(r3)
1638 bc BO_dCTR_NZERO,CR0_EQ,Lppcasm_sub_mainloop
1639Lppcasm_sub_adios:
1640 subfze r3,r0 # if carry bit is set then r3 = 0 else -1
1641 andi. r3,r3,1 # keep only last bit.
1642 bclr BO_ALWAYS,CR0_LT
1643 .long 0x00000000
1644
1645
1646#
1647# NOTE: The following label name should be changed to
1648# "bn_add_words" i.e. remove the first dot
1649# for the gcc compiler. This should be automatically
1650# done in the build
1651#
1652
1653.align 4
1654.bn_add_words:
1655#
1656# Handcoded version of bn_add_words
1657#
1658#BN_ULONG bn_add_words(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n)
1659#
1660# r3 = r
1661# r4 = a
1662# r5 = b
1663# r6 = n
1664#
1665# Note: No loop unrolling done since this is not a performance
1666# critical loop.
1667
1668 xor r0,r0,r0
1669#
1670# check for r6 = 0. Is this needed?
1671#
1672 addic. r6,r6,0 #test r6 and clear carry bit.
1673 bc BO_IF,CR0_EQ,Lppcasm_add_adios
1674 addi r4,r4,-$BNSZ
1675 addi r3,r3,-$BNSZ
1676 addi r5,r5,-$BNSZ
1677 mtctr r6
1678Lppcasm_add_mainloop:
1679 $LDU r7,$BNSZ(r4)
1680 $LDU r8,$BNSZ(r5)
1681 adde r8,r7,r8
1682 $STU r8,$BNSZ(r3)
1683 bc BO_dCTR_NZERO,CR0_EQ,Lppcasm_add_mainloop
1684Lppcasm_add_adios:
1685 addze r3,r0 #return carry bit.
1686 bclr BO_ALWAYS,CR0_LT
1687 .long 0x00000000
1688
1689#
1690# NOTE: The following label name should be changed to
1691# "bn_div_words" i.e. remove the first dot
1692# for the gcc compiler. This should be automatically
1693# done in the build
1694#
1695
1696.align 4
1697.bn_div_words:
1698#
1699# This is a cleaned up version of code generated by
1700# the AIX compiler. The only optimization is to use
1701# the PPC instruction to count leading zeros instead
1702# of call to num_bits_word. Since this was compiled
1703# only at level -O2 we can possibly squeeze it more?
1704#
1705# r3 = h
1706# r4 = l
1707# r5 = d
1708
1709 $UCMPI 0,r5,0 # compare r5 and 0
1710 bc BO_IF_NOT,CR0_EQ,Lppcasm_div1 # proceed if d!=0
1711 li r3,-1 # d=0 return -1
1712 bclr BO_ALWAYS,CR0_LT
1713Lppcasm_div1:
1714 xor r0,r0,r0 #r0=0
1715 li r8,$BITS
1716 $CNTLZ. r7,r5 #r7 = num leading 0s in d.
1717 bc BO_IF,CR0_EQ,Lppcasm_div2 #proceed if no leading zeros
1718 subf r8,r7,r8 #r8 = BN_num_bits_word(d)
1719 $SHR. r9,r3,r8 #are there any bits above r8'th?
1720 $TR 16,r9,r0 #if there're, signal to dump core...
1721Lppcasm_div2:
1722 $UCMP 0,r3,r5 #h>=d?
1723 bc BO_IF,CR0_LT,Lppcasm_div3 #goto Lppcasm_div3 if not
1724 subf r3,r5,r3 #h-=d ;
1725Lppcasm_div3: #r7 = BN_BITS2-i. so r7=i
1726 cmpi 0,0,r7,0 # is (i == 0)?
1727 bc BO_IF,CR0_EQ,Lppcasm_div4
1728 $SHL r3,r3,r7 # h = (h<< i)
1729 $SHR r8,r4,r8 # r8 = (l >> BN_BITS2 -i)
1730 $SHL r5,r5,r7 # d<<=i
1731 or r3,r3,r8 # h = (h<<i)|(l>>(BN_BITS2-i))
1732 $SHL r4,r4,r7 # l <<=i
1733Lppcasm_div4:
1734 $SHRI r9,r5,`$BITS/2` # r9 = dh
1735 # dl will be computed when needed
1736 # as it saves registers.
1737 li r6,2 #r6=2
1738 mtctr r6 #counter will be in count.
1739Lppcasm_divouterloop:
1740 $SHRI r8,r3,`$BITS/2` #r8 = (h>>BN_BITS4)
1741 $SHRI r11,r4,`$BITS/2` #r11= (l&BN_MASK2h)>>BN_BITS4
1742 # compute here for innerloop.
1743 $UCMP 0,r8,r9 # is (h>>BN_BITS4)==dh
1744 bc BO_IF_NOT,CR0_EQ,Lppcasm_div5 # goto Lppcasm_div5 if not
1745
1746 li r8,-1
1747 $CLRU r8,r8,`$BITS/2` #q = BN_MASK2l
1748 b Lppcasm_div6
1749Lppcasm_div5:
1750 $UDIV r8,r3,r9 #q = h/dh
1751Lppcasm_div6:
1752 $UMULL r12,r9,r8 #th = q*dh
1753 $CLRU r10,r5,`$BITS/2` #r10=dl
1754 $UMULL r6,r8,r10 #tl = q*dl
1755
1756Lppcasm_divinnerloop:
1757 subf r10,r12,r3 #t = h -th
1758 $SHRI r7,r10,`$BITS/2` #r7= (t &BN_MASK2H), sort of...
1759 addic. r7,r7,0 #test if r7 == 0. used below.
1760 # now want to compute
1761 # r7 = (t<<BN_BITS4)|((l&BN_MASK2h)>>BN_BITS4)
1762 # the following 2 instructions do that
1763 $SHLI r7,r10,`$BITS/2` # r7 = (t<<BN_BITS4)
1764 or r7,r7,r11 # r7|=((l&BN_MASK2h)>>BN_BITS4)
1765 $UCMP 1,r6,r7 # compare (tl <= r7)
1766 bc BO_IF_NOT,CR0_EQ,Lppcasm_divinnerexit
1767 bc BO_IF_NOT,CR1_FEX,Lppcasm_divinnerexit
1768 addi r8,r8,-1 #q--
1769 subf r12,r9,r12 #th -=dh
1770 $CLRU r10,r5,`$BITS/2` #r10=dl. t is no longer needed in loop.
1771 subf r6,r10,r6 #tl -=dl
1772 b Lppcasm_divinnerloop
1773Lppcasm_divinnerexit:
1774 $SHRI r10,r6,`$BITS/2` #t=(tl>>BN_BITS4)
1775 $SHLI r11,r6,`$BITS/2` #tl=(tl<<BN_BITS4)&BN_MASK2h;
1776 $UCMP 1,r4,r11 # compare l and tl
1777 add r12,r12,r10 # th+=t
1778 bc BO_IF_NOT,CR1_FX,Lppcasm_div7 # if (l>=tl) goto Lppcasm_div7
1779 addi r12,r12,1 # th++
1780Lppcasm_div7:
1781 subf r11,r11,r4 #r11=l-tl
1782 $UCMP 1,r3,r12 #compare h and th
1783 bc BO_IF_NOT,CR1_FX,Lppcasm_div8 #if (h>=th) goto Lppcasm_div8
1784 addi r8,r8,-1 # q--
1785 add r3,r5,r3 # h+=d
1786Lppcasm_div8:
1787 subf r12,r12,r3 #r12 = h-th
1788 $SHLI r4,r11,`$BITS/2` #l=(l&BN_MASK2l)<<BN_BITS4
1789 # want to compute
1790 # h = ((h<<BN_BITS4)|(l>>BN_BITS4))&BN_MASK2
1791 # the following 2 instructions will do this.
1792 $INSR r11,r12,`$BITS/2`,`$BITS/2` # r11 is the value we want rotated $BITS/2.
1793 $ROTL r3,r11,`$BITS/2` # rotate by $BITS/2 and store in r3
1794 bc BO_dCTR_ZERO,CR0_EQ,Lppcasm_div9#if (count==0) break ;
1795 $SHLI r0,r8,`$BITS/2` #ret =q<<BN_BITS4
1796 b Lppcasm_divouterloop
1797Lppcasm_div9:
1798 or r3,r8,r0
1799 bclr BO_ALWAYS,CR0_LT
1800 .long 0x00000000
1801
1802#
1803# NOTE: The following label name should be changed to
1804# "bn_sqr_words" i.e. remove the first dot
1805# for the gcc compiler. This should be automatically
1806# done in the build
1807#
1808.align 4
1809.bn_sqr_words:
1810#
1811# Optimized version of bn_sqr_words
1812#
1813# void bn_sqr_words(BN_ULONG *r, BN_ULONG *a, int n)
1814#
1815# r3 = r
1816# r4 = a
1817# r5 = n
1818#
1819# r6 = a[i].
1820# r7,r8 = product.
1821#
1822# No unrolling done here. Not performance critical.
1823
1824 addic. r5,r5,0 #test r5.
1825 bc BO_IF,CR0_EQ,Lppcasm_sqr_adios
1826 addi r4,r4,-$BNSZ
1827 addi r3,r3,-$BNSZ
1828 mtctr r5
1829Lppcasm_sqr_mainloop:
1830 #sqr(r[0],r[1],a[0]);
1831 $LDU r6,$BNSZ(r4)
1832 $UMULL r7,r6,r6
1833 $UMULH r8,r6,r6
1834 $STU r7,$BNSZ(r3)
1835 $STU r8,$BNSZ(r3)
1836 bc BO_dCTR_NZERO,CR0_EQ,Lppcasm_sqr_mainloop
1837Lppcasm_sqr_adios:
1838 bclr BO_ALWAYS,CR0_LT
1839 .long 0x00000000
1840
1841
1842#
1843# NOTE: The following label name should be changed to
1844# "bn_mul_words" i.e. remove the first dot
1845# for the gcc compiler. This should be automatically
1846# done in the build
1847#
1848
1849.align 4
1850.bn_mul_words:
1851#
1852# BN_ULONG bn_mul_words(BN_ULONG *rp, BN_ULONG *ap, int num, BN_ULONG w)
1853#
1854# r3 = rp
1855# r4 = ap
1856# r5 = num
1857# r6 = w
1858 xor r0,r0,r0
1859 xor r12,r12,r12 # used for carry
1860 rlwinm. r7,r5,30,2,31 # num >> 2
1861 bc BO_IF,CR0_EQ,Lppcasm_mw_REM
1862 mtctr r7
1863Lppcasm_mw_LOOP:
1864 #mul(rp[0],ap[0],w,c1);
1865 $LD r8,`0*$BNSZ`(r4)
1866 $UMULL r9,r6,r8
1867 $UMULH r10,r6,r8
1868 addc r9,r9,r12
1869 #addze r10,r10 #carry is NOT ignored.
1870 #will be taken care of
1871 #in second spin below
1872 #using adde.
1873 $ST r9,`0*$BNSZ`(r3)
1874 #mul(rp[1],ap[1],w,c1);
1875 $LD r8,`1*$BNSZ`(r4)
1876 $UMULL r11,r6,r8
1877 $UMULH r12,r6,r8
1878 adde r11,r11,r10
1879 #addze r12,r12
1880 $ST r11,`1*$BNSZ`(r3)
1881 #mul(rp[2],ap[2],w,c1);
1882 $LD r8,`2*$BNSZ`(r4)
1883 $UMULL r9,r6,r8
1884 $UMULH r10,r6,r8
1885 adde r9,r9,r12
1886 #addze r10,r10
1887 $ST r9,`2*$BNSZ`(r3)
1888 #mul_add(rp[3],ap[3],w,c1);
1889 $LD r8,`3*$BNSZ`(r4)
1890 $UMULL r11,r6,r8
1891 $UMULH r12,r6,r8
1892 adde r11,r11,r10
1893 addze r12,r12 #this spin we collect carry into
1894 #r12
1895 $ST r11,`3*$BNSZ`(r3)
1896
1897 addi r3,r3,`4*$BNSZ`
1898 addi r4,r4,`4*$BNSZ`
1899 bc BO_dCTR_NZERO,CR0_EQ,Lppcasm_mw_LOOP
1900
1901Lppcasm_mw_REM:
1902 andi. r5,r5,0x3
1903 bc BO_IF,CR0_EQ,Lppcasm_mw_OVER
1904 #mul(rp[0],ap[0],w,c1);
1905 $LD r8,`0*$BNSZ`(r4)
1906 $UMULL r9,r6,r8
1907 $UMULH r10,r6,r8
1908 addc r9,r9,r12
1909 addze r10,r10
1910 $ST r9,`0*$BNSZ`(r3)
1911 addi r12,r10,0
1912
1913 addi r5,r5,-1
1914 cmpli 0,0,r5,0
1915 bc BO_IF,CR0_EQ,Lppcasm_mw_OVER
1916
1917
1918 #mul(rp[1],ap[1],w,c1);
1919 $LD r8,`1*$BNSZ`(r4)
1920 $UMULL r9,r6,r8
1921 $UMULH r10,r6,r8
1922 addc r9,r9,r12
1923 addze r10,r10
1924 $ST r9,`1*$BNSZ`(r3)
1925 addi r12,r10,0
1926
1927 addi r5,r5,-1
1928 cmpli 0,0,r5,0
1929 bc BO_IF,CR0_EQ,Lppcasm_mw_OVER
1930
1931 #mul_add(rp[2],ap[2],w,c1);
1932 $LD r8,`2*$BNSZ`(r4)
1933 $UMULL r9,r6,r8
1934 $UMULH r10,r6,r8
1935 addc r9,r9,r12
1936 addze r10,r10
1937 $ST r9,`2*$BNSZ`(r3)
1938 addi r12,r10,0
1939
1940Lppcasm_mw_OVER:
1941 addi r3,r12,0
1942 bclr BO_ALWAYS,CR0_LT
1943 .long 0x00000000
1944
1945#
1946# NOTE: The following label name should be changed to
1947# "bn_mul_add_words" i.e. remove the first dot
1948# for the gcc compiler. This should be automatically
1949# done in the build
1950#
1951
1952.align 4
1953.bn_mul_add_words:
1954#
1955# BN_ULONG bn_mul_add_words(BN_ULONG *rp, BN_ULONG *ap, int num, BN_ULONG w)
1956#
1957# r3 = rp
1958# r4 = ap
1959# r5 = num
1960# r6 = w
1961#
1962# empirical evidence suggests that unrolled version performs best!!
1963#
1964 xor r0,r0,r0 #r0 = 0
1965 xor r12,r12,r12 #r12 = 0 . used for carry
1966 rlwinm. r7,r5,30,2,31 # num >> 2
1967 bc BO_IF,CR0_EQ,Lppcasm_maw_leftover # if (num < 4) go LPPCASM_maw_leftover
1968 mtctr r7
1969Lppcasm_maw_mainloop:
1970 #mul_add(rp[0],ap[0],w,c1);
1971 $LD r8,`0*$BNSZ`(r4)
1972 $LD r11,`0*$BNSZ`(r3)
1973 $UMULL r9,r6,r8
1974 $UMULH r10,r6,r8
1975 addc r9,r9,r12 #r12 is carry.
1976 addze r10,r10
1977 addc r9,r9,r11
1978 #addze r10,r10
1979 #the above instruction addze
1980 #is NOT needed. Carry will NOT
1981 #be ignored. It's not affected
1982 #by multiply and will be collected
1983 #in the next spin
1984 $ST r9,`0*$BNSZ`(r3)
1985
1986 #mul_add(rp[1],ap[1],w,c1);
1987 $LD r8,`1*$BNSZ`(r4)
1988 $LD r9,`1*$BNSZ`(r3)
1989 $UMULL r11,r6,r8
1990 $UMULH r12,r6,r8
1991 adde r11,r11,r10 #r10 is carry.
1992 addze r12,r12
1993 addc r11,r11,r9
1994 #addze r12,r12
1995 $ST r11,`1*$BNSZ`(r3)
1996
1997 #mul_add(rp[2],ap[2],w,c1);
1998 $LD r8,`2*$BNSZ`(r4)
1999 $UMULL r9,r6,r8
2000 $LD r11,`2*$BNSZ`(r3)
2001 $UMULH r10,r6,r8
2002 adde r9,r9,r12
2003 addze r10,r10
2004 addc r9,r9,r11
2005 #addze r10,r10
2006 $ST r9,`2*$BNSZ`(r3)
2007
2008 #mul_add(rp[3],ap[3],w,c1);
2009 $LD r8,`3*$BNSZ`(r4)
2010 $UMULL r11,r6,r8
2011 $LD r9,`3*$BNSZ`(r3)
2012 $UMULH r12,r6,r8
2013 adde r11,r11,r10
2014 addze r12,r12
2015 addc r11,r11,r9
2016 addze r12,r12
2017 $ST r11,`3*$BNSZ`(r3)
2018 addi r3,r3,`4*$BNSZ`
2019 addi r4,r4,`4*$BNSZ`
2020 bc BO_dCTR_NZERO,CR0_EQ,Lppcasm_maw_mainloop
2021
2022Lppcasm_maw_leftover:
2023 andi. r5,r5,0x3
2024 bc BO_IF,CR0_EQ,Lppcasm_maw_adios
2025 addi r3,r3,-$BNSZ
2026 addi r4,r4,-$BNSZ
2027 #mul_add(rp[0],ap[0],w,c1);
2028 mtctr r5
2029 $LDU r8,$BNSZ(r4)
2030 $UMULL r9,r6,r8
2031 $UMULH r10,r6,r8
2032 $LDU r11,$BNSZ(r3)
2033 addc r9,r9,r11
2034 addze r10,r10
2035 addc r9,r9,r12
2036 addze r12,r10
2037 $ST r9,0(r3)
2038
2039 bc BO_dCTR_ZERO,CR0_EQ,Lppcasm_maw_adios
2040 #mul_add(rp[1],ap[1],w,c1);
2041 $LDU r8,$BNSZ(r4)
2042 $UMULL r9,r6,r8
2043 $UMULH r10,r6,r8
2044 $LDU r11,$BNSZ(r3)
2045 addc r9,r9,r11
2046 addze r10,r10
2047 addc r9,r9,r12
2048 addze r12,r10
2049 $ST r9,0(r3)
2050
2051 bc BO_dCTR_ZERO,CR0_EQ,Lppcasm_maw_adios
2052 #mul_add(rp[2],ap[2],w,c1);
2053 $LDU r8,$BNSZ(r4)
2054 $UMULL r9,r6,r8
2055 $UMULH r10,r6,r8
2056 $LDU r11,$BNSZ(r3)
2057 addc r9,r9,r11
2058 addze r10,r10
2059 addc r9,r9,r12
2060 addze r12,r10
2061 $ST r9,0(r3)
2062
2063Lppcasm_maw_adios:
2064 addi r3,r12,0
2065 bclr BO_ALWAYS,CR0_LT
2066 .long 0x00000000
2067 .align 4
2068EOF
2069 $data =~ s/\`([^\`]*)\`/eval $1/gem;
2070
2071 # if some assembler chokes on some simplified mnemonic,
2072 # this is the spot to fix it up, e.g.:
2073 # GNU as doesn't seem to accept cmplw, 32-bit unsigned compare
2074 $data =~ s/^(\s*)cmplw(\s+)([^,]+),(.*)/$1cmpl$2$3,0,$4/gm;
2075 # assembler X doesn't accept li, load immediate value
2076 #$data =~ s/^(\s*)li(\s+)([^,]+),(.*)/$1addi$2$3,0,$4/gm;
2077 return($data);
2078}
diff --git a/src/lib/libcrypto/bn/asm/ppc64-mont.pl b/src/lib/libcrypto/bn/asm/ppc64-mont.pl
deleted file mode 100644
index 3449b35855..0000000000
--- a/src/lib/libcrypto/bn/asm/ppc64-mont.pl
+++ /dev/null
@@ -1,918 +0,0 @@
1#!/usr/bin/env perl
2
3# ====================================================================
4# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
5# project. The module is, however, dual licensed under OpenSSL and
6# CRYPTOGAMS licenses depending on where you obtain it. For further
7# details see http://www.openssl.org/~appro/cryptogams/.
8# ====================================================================
9
10# December 2007
11
12# The reason for undertaken effort is basically following. Even though
13# Power 6 CPU operates at incredible 4.7GHz clock frequency, its PKI
14# performance was observed to be less than impressive, essentially as
15# fast as 1.8GHz PPC970, or 2.6 times(!) slower than one would hope.
16# Well, it's not surprising that IBM had to make some sacrifices to
17# boost the clock frequency that much, but no overall improvement?
18# Having observed how much difference did switching to FPU make on
19# UltraSPARC, playing same stunt on Power 6 appeared appropriate...
20# Unfortunately the resulting performance improvement is not as
21# impressive, ~30%, and in absolute terms is still very far from what
22# one would expect from 4.7GHz CPU. There is a chance that I'm doing
23# something wrong, but in the lack of assembler level micro-profiling
24# data or at least decent platform guide I can't tell... Or better
25# results might be achieved with VMX... Anyway, this module provides
26# *worse* performance on other PowerPC implementations, ~40-15% slower
27# on PPC970 depending on key length and ~40% slower on Power 5 for all
28# key lengths. As it's obviously inappropriate as "best all-round"
29# alternative, it has to be complemented with run-time CPU family
30# detection. Oh! It should also be noted that unlike other PowerPC
31# implementation IALU ppc-mont.pl module performs *suboptimaly* on
32# >=1024-bit key lengths on Power 6. It should also be noted that
33# *everything* said so far applies to 64-bit builds! As far as 32-bit
34# application executed on 64-bit CPU goes, this module is likely to
35# become preferred choice, because it's easy to adapt it for such
36# case and *is* faster than 32-bit ppc-mont.pl on *all* processors.
37
38# February 2008
39
40# Micro-profiling assisted optimization results in ~15% improvement
41# over original ppc64-mont.pl version, or overall ~50% improvement
42# over ppc.pl module on Power 6. If compared to ppc-mont.pl on same
43# Power 6 CPU, this module is 5-150% faster depending on key length,
44# [hereafter] more for longer keys. But if compared to ppc-mont.pl
45# on 1.8GHz PPC970, it's only 5-55% faster. Still far from impressive
46# in absolute terms, but it's apparently the way Power 6 is...
47
48$flavour = shift;
49
50if ($flavour =~ /32/) {
51 $SIZE_T=4;
52 $RZONE= 224;
53 $FRAME= $SIZE_T*12+8*12;
54 $fname= "bn_mul_mont_ppc64";
55
56 $STUX= "stwux"; # store indexed and update
57 $PUSH= "stw";
58 $POP= "lwz";
59 die "not implemented yet";
60} elsif ($flavour =~ /64/) {
61 $SIZE_T=8;
62 $RZONE= 288;
63 $FRAME= $SIZE_T*12+8*12;
64 $fname= "bn_mul_mont";
65
66 # same as above, but 64-bit mnemonics...
67 $STUX= "stdux"; # store indexed and update
68 $PUSH= "std";
69 $POP= "ld";
70} else { die "nonsense $flavour"; }
71
72$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
73( $xlate="${dir}ppc-xlate.pl" and -f $xlate ) or
74( $xlate="${dir}../../perlasm/ppc-xlate.pl" and -f $xlate) or
75die "can't locate ppc-xlate.pl";
76
77open STDOUT,"| $^X $xlate $flavour ".shift || die "can't call $xlate: $!";
78
79$FRAME=($FRAME+63)&~63;
80$TRANSFER=16*8;
81
82$carry="r0";
83$sp="r1";
84$toc="r2";
85$rp="r3"; $ovf="r3";
86$ap="r4";
87$bp="r5";
88$np="r6";
89$n0="r7";
90$num="r8";
91$rp="r9"; # $rp is reassigned
92$tp="r10";
93$j="r11";
94$i="r12";
95# non-volatile registers
96$nap_d="r14"; # interleaved ap and np in double format
97$a0="r15"; # ap[0]
98$t0="r16"; # temporary registers
99$t1="r17";
100$t2="r18";
101$t3="r19";
102$t4="r20";
103$t5="r21";
104$t6="r22";
105$t7="r23";
106
107# PPC offers enough register bank capacity to unroll inner loops twice
108#
109# ..A3A2A1A0
110# dcba
111# -----------
112# A0a
113# A0b
114# A0c
115# A0d
116# A1a
117# A1b
118# A1c
119# A1d
120# A2a
121# A2b
122# A2c
123# A2d
124# A3a
125# A3b
126# A3c
127# A3d
128# ..a
129# ..b
130#
131$ba="f0"; $bb="f1"; $bc="f2"; $bd="f3";
132$na="f4"; $nb="f5"; $nc="f6"; $nd="f7";
133$dota="f8"; $dotb="f9";
134$A0="f10"; $A1="f11"; $A2="f12"; $A3="f13";
135$N0="f14"; $N1="f15"; $N2="f16"; $N3="f17";
136$T0a="f18"; $T0b="f19";
137$T1a="f20"; $T1b="f21";
138$T2a="f22"; $T2b="f23";
139$T3a="f24"; $T3b="f25";
140
141# sp----------->+-------------------------------+
142# | saved sp |
143# +-------------------------------+
144# | |
145# +-------------------------------+
146# | 10 saved gpr, r14-r23 |
147# . .
148# . .
149# +12*size_t +-------------------------------+
150# | 12 saved fpr, f14-f25 |
151# . .
152# . .
153# +12*8 +-------------------------------+
154# | padding to 64 byte boundary |
155# . .
156# +X +-------------------------------+
157# | 16 gpr<->fpr transfer zone |
158# . .
159# . .
160# +16*8 +-------------------------------+
161# | __int64 tmp[-1] |
162# +-------------------------------+
163# | __int64 tmp[num] |
164# . .
165# . .
166# . .
167# +(num+1)*8 +-------------------------------+
168# | padding to 64 byte boundary |
169# . .
170# +X +-------------------------------+
171# | double nap_d[4*num] |
172# . .
173# . .
174# . .
175# +-------------------------------+
176
177$code=<<___;
178.machine "any"
179.text
180
181.globl .$fname
182.align 5
183.$fname:
184 cmpwi $num,4
185 mr $rp,r3 ; $rp is reassigned
186 li r3,0 ; possible "not handled" return code
187 bltlr-
188 andi. r0,$num,1 ; $num has to be even
189 bnelr-
190
191 slwi $num,$num,3 ; num*=8
192 li $i,-4096
193 slwi $tp,$num,2 ; place for {an}p_{lh}[num], i.e. 4*num
194 add $tp,$tp,$num ; place for tp[num+1]
195 addi $tp,$tp,`$FRAME+$TRANSFER+8+64+$RZONE`
196 subf $tp,$tp,$sp ; $sp-$tp
197 and $tp,$tp,$i ; minimize TLB usage
198 subf $tp,$sp,$tp ; $tp-$sp
199 $STUX $sp,$sp,$tp ; alloca
200
201 $PUSH r14,`2*$SIZE_T`($sp)
202 $PUSH r15,`3*$SIZE_T`($sp)
203 $PUSH r16,`4*$SIZE_T`($sp)
204 $PUSH r17,`5*$SIZE_T`($sp)
205 $PUSH r18,`6*$SIZE_T`($sp)
206 $PUSH r19,`7*$SIZE_T`($sp)
207 $PUSH r20,`8*$SIZE_T`($sp)
208 $PUSH r21,`9*$SIZE_T`($sp)
209 $PUSH r22,`10*$SIZE_T`($sp)
210 $PUSH r23,`11*$SIZE_T`($sp)
211 stfd f14,`12*$SIZE_T+0`($sp)
212 stfd f15,`12*$SIZE_T+8`($sp)
213 stfd f16,`12*$SIZE_T+16`($sp)
214 stfd f17,`12*$SIZE_T+24`($sp)
215 stfd f18,`12*$SIZE_T+32`($sp)
216 stfd f19,`12*$SIZE_T+40`($sp)
217 stfd f20,`12*$SIZE_T+48`($sp)
218 stfd f21,`12*$SIZE_T+56`($sp)
219 stfd f22,`12*$SIZE_T+64`($sp)
220 stfd f23,`12*$SIZE_T+72`($sp)
221 stfd f24,`12*$SIZE_T+80`($sp)
222 stfd f25,`12*$SIZE_T+88`($sp)
223
224 ld $a0,0($ap) ; pull ap[0] value
225 ld $n0,0($n0) ; pull n0[0] value
226 ld $t3,0($bp) ; bp[0]
227
228 addi $tp,$sp,`$FRAME+$TRANSFER+8+64`
229 li $i,-64
230 add $nap_d,$tp,$num
231 and $nap_d,$nap_d,$i ; align to 64 bytes
232
233 mulld $t7,$a0,$t3 ; ap[0]*bp[0]
234 ; nap_d is off by 1, because it's used with stfdu/lfdu
235 addi $nap_d,$nap_d,-8
236 srwi $j,$num,`3+1` ; counter register, num/2
237 mulld $t7,$t7,$n0 ; tp[0]*n0
238 addi $j,$j,-1
239 addi $tp,$sp,`$FRAME+$TRANSFER-8`
240 li $carry,0
241 mtctr $j
242
243 ; transfer bp[0] to FPU as 4x16-bit values
244 extrdi $t0,$t3,16,48
245 extrdi $t1,$t3,16,32
246 extrdi $t2,$t3,16,16
247 extrdi $t3,$t3,16,0
248 std $t0,`$FRAME+0`($sp)
249 std $t1,`$FRAME+8`($sp)
250 std $t2,`$FRAME+16`($sp)
251 std $t3,`$FRAME+24`($sp)
252 ; transfer (ap[0]*bp[0])*n0 to FPU as 4x16-bit values
253 extrdi $t4,$t7,16,48
254 extrdi $t5,$t7,16,32
255 extrdi $t6,$t7,16,16
256 extrdi $t7,$t7,16,0
257 std $t4,`$FRAME+32`($sp)
258 std $t5,`$FRAME+40`($sp)
259 std $t6,`$FRAME+48`($sp)
260 std $t7,`$FRAME+56`($sp)
261 lwz $t0,4($ap) ; load a[j] as 32-bit word pair
262 lwz $t1,0($ap)
263 lwz $t2,12($ap) ; load a[j+1] as 32-bit word pair
264 lwz $t3,8($ap)
265 lwz $t4,4($np) ; load n[j] as 32-bit word pair
266 lwz $t5,0($np)
267 lwz $t6,12($np) ; load n[j+1] as 32-bit word pair
268 lwz $t7,8($np)
269 lfd $ba,`$FRAME+0`($sp)
270 lfd $bb,`$FRAME+8`($sp)
271 lfd $bc,`$FRAME+16`($sp)
272 lfd $bd,`$FRAME+24`($sp)
273 lfd $na,`$FRAME+32`($sp)
274 lfd $nb,`$FRAME+40`($sp)
275 lfd $nc,`$FRAME+48`($sp)
276 lfd $nd,`$FRAME+56`($sp)
277 std $t0,`$FRAME+64`($sp)
278 std $t1,`$FRAME+72`($sp)
279 std $t2,`$FRAME+80`($sp)
280 std $t3,`$FRAME+88`($sp)
281 std $t4,`$FRAME+96`($sp)
282 std $t5,`$FRAME+104`($sp)
283 std $t6,`$FRAME+112`($sp)
284 std $t7,`$FRAME+120`($sp)
285 fcfid $ba,$ba
286 fcfid $bb,$bb
287 fcfid $bc,$bc
288 fcfid $bd,$bd
289 fcfid $na,$na
290 fcfid $nb,$nb
291 fcfid $nc,$nc
292 fcfid $nd,$nd
293
294 lfd $A0,`$FRAME+64`($sp)
295 lfd $A1,`$FRAME+72`($sp)
296 lfd $A2,`$FRAME+80`($sp)
297 lfd $A3,`$FRAME+88`($sp)
298 lfd $N0,`$FRAME+96`($sp)
299 lfd $N1,`$FRAME+104`($sp)
300 lfd $N2,`$FRAME+112`($sp)
301 lfd $N3,`$FRAME+120`($sp)
302 fcfid $A0,$A0
303 fcfid $A1,$A1
304 fcfid $A2,$A2
305 fcfid $A3,$A3
306 fcfid $N0,$N0
307 fcfid $N1,$N1
308 fcfid $N2,$N2
309 fcfid $N3,$N3
310 addi $ap,$ap,16
311 addi $np,$np,16
312
313 fmul $T1a,$A1,$ba
314 fmul $T1b,$A1,$bb
315 stfd $A0,8($nap_d) ; save a[j] in double format
316 stfd $A1,16($nap_d)
317 fmul $T2a,$A2,$ba
318 fmul $T2b,$A2,$bb
319 stfd $A2,24($nap_d) ; save a[j+1] in double format
320 stfd $A3,32($nap_d)
321 fmul $T3a,$A3,$ba
322 fmul $T3b,$A3,$bb
323 stfd $N0,40($nap_d) ; save n[j] in double format
324 stfd $N1,48($nap_d)
325 fmul $T0a,$A0,$ba
326 fmul $T0b,$A0,$bb
327 stfd $N2,56($nap_d) ; save n[j+1] in double format
328 stfdu $N3,64($nap_d)
329
330 fmadd $T1a,$A0,$bc,$T1a
331 fmadd $T1b,$A0,$bd,$T1b
332 fmadd $T2a,$A1,$bc,$T2a
333 fmadd $T2b,$A1,$bd,$T2b
334 fmadd $T3a,$A2,$bc,$T3a
335 fmadd $T3b,$A2,$bd,$T3b
336 fmul $dota,$A3,$bc
337 fmul $dotb,$A3,$bd
338
339 fmadd $T1a,$N1,$na,$T1a
340 fmadd $T1b,$N1,$nb,$T1b
341 fmadd $T2a,$N2,$na,$T2a
342 fmadd $T2b,$N2,$nb,$T2b
343 fmadd $T3a,$N3,$na,$T3a
344 fmadd $T3b,$N3,$nb,$T3b
345 fmadd $T0a,$N0,$na,$T0a
346 fmadd $T0b,$N0,$nb,$T0b
347
348 fmadd $T1a,$N0,$nc,$T1a
349 fmadd $T1b,$N0,$nd,$T1b
350 fmadd $T2a,$N1,$nc,$T2a
351 fmadd $T2b,$N1,$nd,$T2b
352 fmadd $T3a,$N2,$nc,$T3a
353 fmadd $T3b,$N2,$nd,$T3b
354 fmadd $dota,$N3,$nc,$dota
355 fmadd $dotb,$N3,$nd,$dotb
356
357 fctid $T0a,$T0a
358 fctid $T0b,$T0b
359 fctid $T1a,$T1a
360 fctid $T1b,$T1b
361 fctid $T2a,$T2a
362 fctid $T2b,$T2b
363 fctid $T3a,$T3a
364 fctid $T3b,$T3b
365
366 stfd $T0a,`$FRAME+0`($sp)
367 stfd $T0b,`$FRAME+8`($sp)
368 stfd $T1a,`$FRAME+16`($sp)
369 stfd $T1b,`$FRAME+24`($sp)
370 stfd $T2a,`$FRAME+32`($sp)
371 stfd $T2b,`$FRAME+40`($sp)
372 stfd $T3a,`$FRAME+48`($sp)
373 stfd $T3b,`$FRAME+56`($sp)
374
375.align 5
376L1st:
377 lwz $t0,4($ap) ; load a[j] as 32-bit word pair
378 lwz $t1,0($ap)
379 lwz $t2,12($ap) ; load a[j+1] as 32-bit word pair
380 lwz $t3,8($ap)
381 lwz $t4,4($np) ; load n[j] as 32-bit word pair
382 lwz $t5,0($np)
383 lwz $t6,12($np) ; load n[j+1] as 32-bit word pair
384 lwz $t7,8($np)
385 std $t0,`$FRAME+64`($sp)
386 std $t1,`$FRAME+72`($sp)
387 std $t2,`$FRAME+80`($sp)
388 std $t3,`$FRAME+88`($sp)
389 std $t4,`$FRAME+96`($sp)
390 std $t5,`$FRAME+104`($sp)
391 std $t6,`$FRAME+112`($sp)
392 std $t7,`$FRAME+120`($sp)
393 ld $t0,`$FRAME+0`($sp)
394 ld $t1,`$FRAME+8`($sp)
395 ld $t2,`$FRAME+16`($sp)
396 ld $t3,`$FRAME+24`($sp)
397 ld $t4,`$FRAME+32`($sp)
398 ld $t5,`$FRAME+40`($sp)
399 ld $t6,`$FRAME+48`($sp)
400 ld $t7,`$FRAME+56`($sp)
401 lfd $A0,`$FRAME+64`($sp)
402 lfd $A1,`$FRAME+72`($sp)
403 lfd $A2,`$FRAME+80`($sp)
404 lfd $A3,`$FRAME+88`($sp)
405 lfd $N0,`$FRAME+96`($sp)
406 lfd $N1,`$FRAME+104`($sp)
407 lfd $N2,`$FRAME+112`($sp)
408 lfd $N3,`$FRAME+120`($sp)
409 fcfid $A0,$A0
410 fcfid $A1,$A1
411 fcfid $A2,$A2
412 fcfid $A3,$A3
413 fcfid $N0,$N0
414 fcfid $N1,$N1
415 fcfid $N2,$N2
416 fcfid $N3,$N3
417 addi $ap,$ap,16
418 addi $np,$np,16
419
420 fmul $T1a,$A1,$ba
421 fmul $T1b,$A1,$bb
422 fmul $T2a,$A2,$ba
423 fmul $T2b,$A2,$bb
424 stfd $A0,8($nap_d) ; save a[j] in double format
425 stfd $A1,16($nap_d)
426 fmul $T3a,$A3,$ba
427 fmul $T3b,$A3,$bb
428 fmadd $T0a,$A0,$ba,$dota
429 fmadd $T0b,$A0,$bb,$dotb
430 stfd $A2,24($nap_d) ; save a[j+1] in double format
431 stfd $A3,32($nap_d)
432
433 fmadd $T1a,$A0,$bc,$T1a
434 fmadd $T1b,$A0,$bd,$T1b
435 fmadd $T2a,$A1,$bc,$T2a
436 fmadd $T2b,$A1,$bd,$T2b
437 stfd $N0,40($nap_d) ; save n[j] in double format
438 stfd $N1,48($nap_d)
439 fmadd $T3a,$A2,$bc,$T3a
440 fmadd $T3b,$A2,$bd,$T3b
441 add $t0,$t0,$carry ; can not overflow
442 fmul $dota,$A3,$bc
443 fmul $dotb,$A3,$bd
444 stfd $N2,56($nap_d) ; save n[j+1] in double format
445 stfdu $N3,64($nap_d)
446 srdi $carry,$t0,16
447 add $t1,$t1,$carry
448 srdi $carry,$t1,16
449
450 fmadd $T1a,$N1,$na,$T1a
451 fmadd $T1b,$N1,$nb,$T1b
452 insrdi $t0,$t1,16,32
453 fmadd $T2a,$N2,$na,$T2a
454 fmadd $T2b,$N2,$nb,$T2b
455 add $t2,$t2,$carry
456 fmadd $T3a,$N3,$na,$T3a
457 fmadd $T3b,$N3,$nb,$T3b
458 srdi $carry,$t2,16
459 fmadd $T0a,$N0,$na,$T0a
460 fmadd $T0b,$N0,$nb,$T0b
461 insrdi $t0,$t2,16,16
462 add $t3,$t3,$carry
463 srdi $carry,$t3,16
464
465 fmadd $T1a,$N0,$nc,$T1a
466 fmadd $T1b,$N0,$nd,$T1b
467 insrdi $t0,$t3,16,0 ; 0..63 bits
468 fmadd $T2a,$N1,$nc,$T2a
469 fmadd $T2b,$N1,$nd,$T2b
470 add $t4,$t4,$carry
471 fmadd $T3a,$N2,$nc,$T3a
472 fmadd $T3b,$N2,$nd,$T3b
473 srdi $carry,$t4,16
474 fmadd $dota,$N3,$nc,$dota
475 fmadd $dotb,$N3,$nd,$dotb
476 add $t5,$t5,$carry
477 srdi $carry,$t5,16
478 insrdi $t4,$t5,16,32
479
480 fctid $T0a,$T0a
481 fctid $T0b,$T0b
482 add $t6,$t6,$carry
483 fctid $T1a,$T1a
484 fctid $T1b,$T1b
485 srdi $carry,$t6,16
486 fctid $T2a,$T2a
487 fctid $T2b,$T2b
488 insrdi $t4,$t6,16,16
489 fctid $T3a,$T3a
490 fctid $T3b,$T3b
491 add $t7,$t7,$carry
492 insrdi $t4,$t7,16,0 ; 64..127 bits
493 srdi $carry,$t7,16 ; upper 33 bits
494
495 stfd $T0a,`$FRAME+0`($sp)
496 stfd $T0b,`$FRAME+8`($sp)
497 stfd $T1a,`$FRAME+16`($sp)
498 stfd $T1b,`$FRAME+24`($sp)
499 stfd $T2a,`$FRAME+32`($sp)
500 stfd $T2b,`$FRAME+40`($sp)
501 stfd $T3a,`$FRAME+48`($sp)
502 stfd $T3b,`$FRAME+56`($sp)
503 std $t0,8($tp) ; tp[j-1]
504 stdu $t4,16($tp) ; tp[j]
505 bdnz- L1st
506
507 fctid $dota,$dota
508 fctid $dotb,$dotb
509
510 ld $t0,`$FRAME+0`($sp)
511 ld $t1,`$FRAME+8`($sp)
512 ld $t2,`$FRAME+16`($sp)
513 ld $t3,`$FRAME+24`($sp)
514 ld $t4,`$FRAME+32`($sp)
515 ld $t5,`$FRAME+40`($sp)
516 ld $t6,`$FRAME+48`($sp)
517 ld $t7,`$FRAME+56`($sp)
518 stfd $dota,`$FRAME+64`($sp)
519 stfd $dotb,`$FRAME+72`($sp)
520
521 add $t0,$t0,$carry ; can not overflow
522 srdi $carry,$t0,16
523 add $t1,$t1,$carry
524 srdi $carry,$t1,16
525 insrdi $t0,$t1,16,32
526 add $t2,$t2,$carry
527 srdi $carry,$t2,16
528 insrdi $t0,$t2,16,16
529 add $t3,$t3,$carry
530 srdi $carry,$t3,16
531 insrdi $t0,$t3,16,0 ; 0..63 bits
532 add $t4,$t4,$carry
533 srdi $carry,$t4,16
534 add $t5,$t5,$carry
535 srdi $carry,$t5,16
536 insrdi $t4,$t5,16,32
537 add $t6,$t6,$carry
538 srdi $carry,$t6,16
539 insrdi $t4,$t6,16,16
540 add $t7,$t7,$carry
541 insrdi $t4,$t7,16,0 ; 64..127 bits
542 srdi $carry,$t7,16 ; upper 33 bits
543 ld $t6,`$FRAME+64`($sp)
544 ld $t7,`$FRAME+72`($sp)
545
546 std $t0,8($tp) ; tp[j-1]
547 stdu $t4,16($tp) ; tp[j]
548
549 add $t6,$t6,$carry ; can not overflow
550 srdi $carry,$t6,16
551 add $t7,$t7,$carry
552 insrdi $t6,$t7,48,0
553 srdi $ovf,$t7,48
554 std $t6,8($tp) ; tp[num-1]
555
556 slwi $t7,$num,2
557 subf $nap_d,$t7,$nap_d ; rewind pointer
558
559 li $i,8 ; i=1
560.align 5
561Louter:
562 ldx $t3,$bp,$i ; bp[i]
563 ld $t6,`$FRAME+$TRANSFER+8`($sp) ; tp[0]
564 mulld $t7,$a0,$t3 ; ap[0]*bp[i]
565
566 addi $tp,$sp,`$FRAME+$TRANSFER`
567 add $t7,$t7,$t6 ; ap[0]*bp[i]+tp[0]
568 li $carry,0
569 mulld $t7,$t7,$n0 ; tp[0]*n0
570 mtctr $j
571
572 ; transfer bp[i] to FPU as 4x16-bit values
573 extrdi $t0,$t3,16,48
574 extrdi $t1,$t3,16,32
575 extrdi $t2,$t3,16,16
576 extrdi $t3,$t3,16,0
577 std $t0,`$FRAME+0`($sp)
578 std $t1,`$FRAME+8`($sp)
579 std $t2,`$FRAME+16`($sp)
580 std $t3,`$FRAME+24`($sp)
581 ; transfer (ap[0]*bp[i]+tp[0])*n0 to FPU as 4x16-bit values
582 extrdi $t4,$t7,16,48
583 extrdi $t5,$t7,16,32
584 extrdi $t6,$t7,16,16
585 extrdi $t7,$t7,16,0
586 std $t4,`$FRAME+32`($sp)
587 std $t5,`$FRAME+40`($sp)
588 std $t6,`$FRAME+48`($sp)
589 std $t7,`$FRAME+56`($sp)
590
591 lfd $A0,8($nap_d) ; load a[j] in double format
592 lfd $A1,16($nap_d)
593 lfd $A2,24($nap_d) ; load a[j+1] in double format
594 lfd $A3,32($nap_d)
595 lfd $N0,40($nap_d) ; load n[j] in double format
596 lfd $N1,48($nap_d)
597 lfd $N2,56($nap_d) ; load n[j+1] in double format
598 lfdu $N3,64($nap_d)
599
600 lfd $ba,`$FRAME+0`($sp)
601 lfd $bb,`$FRAME+8`($sp)
602 lfd $bc,`$FRAME+16`($sp)
603 lfd $bd,`$FRAME+24`($sp)
604 lfd $na,`$FRAME+32`($sp)
605 lfd $nb,`$FRAME+40`($sp)
606 lfd $nc,`$FRAME+48`($sp)
607 lfd $nd,`$FRAME+56`($sp)
608
609 fcfid $ba,$ba
610 fcfid $bb,$bb
611 fcfid $bc,$bc
612 fcfid $bd,$bd
613 fcfid $na,$na
614 fcfid $nb,$nb
615 fcfid $nc,$nc
616 fcfid $nd,$nd
617
618 fmul $T1a,$A1,$ba
619 fmul $T1b,$A1,$bb
620 fmul $T2a,$A2,$ba
621 fmul $T2b,$A2,$bb
622 fmul $T3a,$A3,$ba
623 fmul $T3b,$A3,$bb
624 fmul $T0a,$A0,$ba
625 fmul $T0b,$A0,$bb
626
627 fmadd $T1a,$A0,$bc,$T1a
628 fmadd $T1b,$A0,$bd,$T1b
629 fmadd $T2a,$A1,$bc,$T2a
630 fmadd $T2b,$A1,$bd,$T2b
631 fmadd $T3a,$A2,$bc,$T3a
632 fmadd $T3b,$A2,$bd,$T3b
633 fmul $dota,$A3,$bc
634 fmul $dotb,$A3,$bd
635
636 fmadd $T1a,$N1,$na,$T1a
637 fmadd $T1b,$N1,$nb,$T1b
638 lfd $A0,8($nap_d) ; load a[j] in double format
639 lfd $A1,16($nap_d)
640 fmadd $T2a,$N2,$na,$T2a
641 fmadd $T2b,$N2,$nb,$T2b
642 lfd $A2,24($nap_d) ; load a[j+1] in double format
643 lfd $A3,32($nap_d)
644 fmadd $T3a,$N3,$na,$T3a
645 fmadd $T3b,$N3,$nb,$T3b
646 fmadd $T0a,$N0,$na,$T0a
647 fmadd $T0b,$N0,$nb,$T0b
648
649 fmadd $T1a,$N0,$nc,$T1a
650 fmadd $T1b,$N0,$nd,$T1b
651 fmadd $T2a,$N1,$nc,$T2a
652 fmadd $T2b,$N1,$nd,$T2b
653 fmadd $T3a,$N2,$nc,$T3a
654 fmadd $T3b,$N2,$nd,$T3b
655 fmadd $dota,$N3,$nc,$dota
656 fmadd $dotb,$N3,$nd,$dotb
657
658 fctid $T0a,$T0a
659 fctid $T0b,$T0b
660 fctid $T1a,$T1a
661 fctid $T1b,$T1b
662 fctid $T2a,$T2a
663 fctid $T2b,$T2b
664 fctid $T3a,$T3a
665 fctid $T3b,$T3b
666
667 stfd $T0a,`$FRAME+0`($sp)
668 stfd $T0b,`$FRAME+8`($sp)
669 stfd $T1a,`$FRAME+16`($sp)
670 stfd $T1b,`$FRAME+24`($sp)
671 stfd $T2a,`$FRAME+32`($sp)
672 stfd $T2b,`$FRAME+40`($sp)
673 stfd $T3a,`$FRAME+48`($sp)
674 stfd $T3b,`$FRAME+56`($sp)
675
676.align 5
677Linner:
678 fmul $T1a,$A1,$ba
679 fmul $T1b,$A1,$bb
680 fmul $T2a,$A2,$ba
681 fmul $T2b,$A2,$bb
682 lfd $N0,40($nap_d) ; load n[j] in double format
683 lfd $N1,48($nap_d)
684 fmul $T3a,$A3,$ba
685 fmul $T3b,$A3,$bb
686 fmadd $T0a,$A0,$ba,$dota
687 fmadd $T0b,$A0,$bb,$dotb
688 lfd $N2,56($nap_d) ; load n[j+1] in double format
689 lfdu $N3,64($nap_d)
690
691 fmadd $T1a,$A0,$bc,$T1a
692 fmadd $T1b,$A0,$bd,$T1b
693 fmadd $T2a,$A1,$bc,$T2a
694 fmadd $T2b,$A1,$bd,$T2b
695 lfd $A0,8($nap_d) ; load a[j] in double format
696 lfd $A1,16($nap_d)
697 fmadd $T3a,$A2,$bc,$T3a
698 fmadd $T3b,$A2,$bd,$T3b
699 fmul $dota,$A3,$bc
700 fmul $dotb,$A3,$bd
701 lfd $A2,24($nap_d) ; load a[j+1] in double format
702 lfd $A3,32($nap_d)
703
704 fmadd $T1a,$N1,$na,$T1a
705 fmadd $T1b,$N1,$nb,$T1b
706 ld $t0,`$FRAME+0`($sp)
707 ld $t1,`$FRAME+8`($sp)
708 fmadd $T2a,$N2,$na,$T2a
709 fmadd $T2b,$N2,$nb,$T2b
710 ld $t2,`$FRAME+16`($sp)
711 ld $t3,`$FRAME+24`($sp)
712 fmadd $T3a,$N3,$na,$T3a
713 fmadd $T3b,$N3,$nb,$T3b
714 add $t0,$t0,$carry ; can not overflow
715 ld $t4,`$FRAME+32`($sp)
716 ld $t5,`$FRAME+40`($sp)
717 fmadd $T0a,$N0,$na,$T0a
718 fmadd $T0b,$N0,$nb,$T0b
719 srdi $carry,$t0,16
720 add $t1,$t1,$carry
721 srdi $carry,$t1,16
722 ld $t6,`$FRAME+48`($sp)
723 ld $t7,`$FRAME+56`($sp)
724
725 fmadd $T1a,$N0,$nc,$T1a
726 fmadd $T1b,$N0,$nd,$T1b
727 insrdi $t0,$t1,16,32
728 ld $t1,8($tp) ; tp[j]
729 fmadd $T2a,$N1,$nc,$T2a
730 fmadd $T2b,$N1,$nd,$T2b
731 add $t2,$t2,$carry
732 fmadd $T3a,$N2,$nc,$T3a
733 fmadd $T3b,$N2,$nd,$T3b
734 srdi $carry,$t2,16
735 insrdi $t0,$t2,16,16
736 fmadd $dota,$N3,$nc,$dota
737 fmadd $dotb,$N3,$nd,$dotb
738 add $t3,$t3,$carry
739 ldu $t2,16($tp) ; tp[j+1]
740 srdi $carry,$t3,16
741 insrdi $t0,$t3,16,0 ; 0..63 bits
742 add $t4,$t4,$carry
743
744 fctid $T0a,$T0a
745 fctid $T0b,$T0b
746 srdi $carry,$t4,16
747 fctid $T1a,$T1a
748 fctid $T1b,$T1b
749 add $t5,$t5,$carry
750 fctid $T2a,$T2a
751 fctid $T2b,$T2b
752 srdi $carry,$t5,16
753 insrdi $t4,$t5,16,32
754 fctid $T3a,$T3a
755 fctid $T3b,$T3b
756 add $t6,$t6,$carry
757 srdi $carry,$t6,16
758 insrdi $t4,$t6,16,16
759
760 stfd $T0a,`$FRAME+0`($sp)
761 stfd $T0b,`$FRAME+8`($sp)
762 add $t7,$t7,$carry
763 addc $t3,$t0,$t1
764 stfd $T1a,`$FRAME+16`($sp)
765 stfd $T1b,`$FRAME+24`($sp)
766 insrdi $t4,$t7,16,0 ; 64..127 bits
767 srdi $carry,$t7,16 ; upper 33 bits
768 stfd $T2a,`$FRAME+32`($sp)
769 stfd $T2b,`$FRAME+40`($sp)
770 adde $t5,$t4,$t2
771 stfd $T3a,`$FRAME+48`($sp)
772 stfd $T3b,`$FRAME+56`($sp)
773 addze $carry,$carry
774 std $t3,-16($tp) ; tp[j-1]
775 std $t5,-8($tp) ; tp[j]
776 bdnz- Linner
777
778 fctid $dota,$dota
779 fctid $dotb,$dotb
780 ld $t0,`$FRAME+0`($sp)
781 ld $t1,`$FRAME+8`($sp)
782 ld $t2,`$FRAME+16`($sp)
783 ld $t3,`$FRAME+24`($sp)
784 ld $t4,`$FRAME+32`($sp)
785 ld $t5,`$FRAME+40`($sp)
786 ld $t6,`$FRAME+48`($sp)
787 ld $t7,`$FRAME+56`($sp)
788 stfd $dota,`$FRAME+64`($sp)
789 stfd $dotb,`$FRAME+72`($sp)
790
791 add $t0,$t0,$carry ; can not overflow
792 srdi $carry,$t0,16
793 add $t1,$t1,$carry
794 srdi $carry,$t1,16
795 insrdi $t0,$t1,16,32
796 add $t2,$t2,$carry
797 ld $t1,8($tp) ; tp[j]
798 srdi $carry,$t2,16
799 insrdi $t0,$t2,16,16
800 add $t3,$t3,$carry
801 ldu $t2,16($tp) ; tp[j+1]
802 srdi $carry,$t3,16
803 insrdi $t0,$t3,16,0 ; 0..63 bits
804 add $t4,$t4,$carry
805 srdi $carry,$t4,16
806 add $t5,$t5,$carry
807 srdi $carry,$t5,16
808 insrdi $t4,$t5,16,32
809 add $t6,$t6,$carry
810 srdi $carry,$t6,16
811 insrdi $t4,$t6,16,16
812 add $t7,$t7,$carry
813 insrdi $t4,$t7,16,0 ; 64..127 bits
814 srdi $carry,$t7,16 ; upper 33 bits
815 ld $t6,`$FRAME+64`($sp)
816 ld $t7,`$FRAME+72`($sp)
817
818 addc $t3,$t0,$t1
819 adde $t5,$t4,$t2
820 addze $carry,$carry
821
822 std $t3,-16($tp) ; tp[j-1]
823 std $t5,-8($tp) ; tp[j]
824
825 add $carry,$carry,$ovf ; comsume upmost overflow
826 add $t6,$t6,$carry ; can not overflow
827 srdi $carry,$t6,16
828 add $t7,$t7,$carry
829 insrdi $t6,$t7,48,0
830 srdi $ovf,$t7,48
831 std $t6,0($tp) ; tp[num-1]
832
833 slwi $t7,$num,2
834 addi $i,$i,8
835 subf $nap_d,$t7,$nap_d ; rewind pointer
836 cmpw $i,$num
837 blt- Louter
838
839 subf $np,$num,$np ; rewind np
840 addi $j,$j,1 ; restore counter
841 subfc $i,$i,$i ; j=0 and "clear" XER[CA]
842 addi $tp,$sp,`$FRAME+$TRANSFER+8`
843 addi $t4,$sp,`$FRAME+$TRANSFER+16`
844 addi $t5,$np,8
845 addi $t6,$rp,8
846 mtctr $j
847
848.align 4
849Lsub: ldx $t0,$tp,$i
850 ldx $t1,$np,$i
851 ldx $t2,$t4,$i
852 ldx $t3,$t5,$i
853 subfe $t0,$t1,$t0 ; tp[j]-np[j]
854 subfe $t2,$t3,$t2 ; tp[j+1]-np[j+1]
855 stdx $t0,$rp,$i
856 stdx $t2,$t6,$i
857 addi $i,$i,16
858 bdnz- Lsub
859
860 li $i,0
861 subfe $ovf,$i,$ovf ; handle upmost overflow bit
862 and $ap,$tp,$ovf
863 andc $np,$rp,$ovf
864 or $ap,$ap,$np ; ap=borrow?tp:rp
865 addi $t7,$ap,8
866 mtctr $j
867
868.align 4
869Lcopy: ; copy or in-place refresh
870 ldx $t0,$ap,$i
871 ldx $t1,$t7,$i
872 std $i,8($nap_d) ; zap nap_d
873 std $i,16($nap_d)
874 std $i,24($nap_d)
875 std $i,32($nap_d)
876 std $i,40($nap_d)
877 std $i,48($nap_d)
878 std $i,56($nap_d)
879 stdu $i,64($nap_d)
880 stdx $t0,$rp,$i
881 stdx $t1,$t6,$i
882 stdx $i,$tp,$i ; zap tp at once
883 stdx $i,$t4,$i
884 addi $i,$i,16
885 bdnz- Lcopy
886
887 $POP r14,`2*$SIZE_T`($sp)
888 $POP r15,`3*$SIZE_T`($sp)
889 $POP r16,`4*$SIZE_T`($sp)
890 $POP r17,`5*$SIZE_T`($sp)
891 $POP r18,`6*$SIZE_T`($sp)
892 $POP r19,`7*$SIZE_T`($sp)
893 $POP r20,`8*$SIZE_T`($sp)
894 $POP r21,`9*$SIZE_T`($sp)
895 $POP r22,`10*$SIZE_T`($sp)
896 $POP r23,`11*$SIZE_T`($sp)
897 lfd f14,`12*$SIZE_T+0`($sp)
898 lfd f15,`12*$SIZE_T+8`($sp)
899 lfd f16,`12*$SIZE_T+16`($sp)
900 lfd f17,`12*$SIZE_T+24`($sp)
901 lfd f18,`12*$SIZE_T+32`($sp)
902 lfd f19,`12*$SIZE_T+40`($sp)
903 lfd f20,`12*$SIZE_T+48`($sp)
904 lfd f21,`12*$SIZE_T+56`($sp)
905 lfd f22,`12*$SIZE_T+64`($sp)
906 lfd f23,`12*$SIZE_T+72`($sp)
907 lfd f24,`12*$SIZE_T+80`($sp)
908 lfd f25,`12*$SIZE_T+88`($sp)
909 $POP $sp,0($sp)
910 li r3,1 ; signal "handled"
911 blr
912 .long 0
913.asciz "Montgomery Multiplication for PPC64, CRYPTOGAMS by <appro\@fy.chalmers.se>"
914___
915
916$code =~ s/\`([^\`]*)\`/eval $1/gem;
917print $code;
918close STDOUT;
diff --git a/src/lib/libcrypto/bn/asm/s390x-mont.pl b/src/lib/libcrypto/bn/asm/s390x-mont.pl
deleted file mode 100644
index d23251033b..0000000000
--- a/src/lib/libcrypto/bn/asm/s390x-mont.pl
+++ /dev/null
@@ -1,225 +0,0 @@
1#!/usr/bin/env perl
2
3# ====================================================================
4# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
5# project. The module is, however, dual licensed under OpenSSL and
6# CRYPTOGAMS licenses depending on where you obtain it. For further
7# details see http://www.openssl.org/~appro/cryptogams/.
8# ====================================================================
9
10# April 2007.
11#
12# Performance improvement over vanilla C code varies from 85% to 45%
13# depending on key length and benchmark. Unfortunately in this context
14# these are not very impressive results [for code that utilizes "wide"
15# 64x64=128-bit multiplication, which is not commonly available to C
16# programmers], at least hand-coded bn_asm.c replacement is known to
17# provide 30-40% better results for longest keys. Well, on a second
18# thought it's not very surprising, because z-CPUs are single-issue
19# and _strictly_ in-order execution, while bn_mul_mont is more or less
20# dependent on CPU ability to pipe-line instructions and have several
21# of them "in-flight" at the same time. I mean while other methods,
22# for example Karatsuba, aim to minimize amount of multiplications at
23# the cost of other operations increase, bn_mul_mont aim to neatly
24# "overlap" multiplications and the other operations [and on most
25# platforms even minimize the amount of the other operations, in
26# particular references to memory]. But it's possible to improve this
27# module performance by implementing dedicated squaring code-path and
28# possibly by unrolling loops...
29
30# January 2009.
31#
32# Reschedule to minimize/avoid Address Generation Interlock hazard,
33# make inner loops counter-based.
34
35$mn0="%r0";
36$num="%r1";
37
38# int bn_mul_mont(
39$rp="%r2"; # BN_ULONG *rp,
40$ap="%r3"; # const BN_ULONG *ap,
41$bp="%r4"; # const BN_ULONG *bp,
42$np="%r5"; # const BN_ULONG *np,
43$n0="%r6"; # const BN_ULONG *n0,
44#$num="160(%r15)" # int num);
45
46$bi="%r2"; # zaps rp
47$j="%r7";
48
49$ahi="%r8";
50$alo="%r9";
51$nhi="%r10";
52$nlo="%r11";
53$AHI="%r12";
54$NHI="%r13";
55$count="%r14";
56$sp="%r15";
57
58$code.=<<___;
59.text
60.globl bn_mul_mont
61.type bn_mul_mont,\@function
62bn_mul_mont:
63 lgf $num,164($sp) # pull $num
64 sla $num,3 # $num to enumerate bytes
65 la $bp,0($num,$bp)
66
67 stg %r2,16($sp)
68
69 cghi $num,16 #
70 lghi %r2,0 #
71 blr %r14 # if($num<16) return 0;
72 cghi $num,128 #
73 bhr %r14 # if($num>128) return 0;
74
75 stmg %r3,%r15,24($sp)
76
77 lghi $rp,-160-8 # leave room for carry bit
78 lcgr $j,$num # -$num
79 lgr %r0,$sp
80 la $rp,0($rp,$sp)
81 la $sp,0($j,$rp) # alloca
82 stg %r0,0($sp) # back chain
83
84 sra $num,3 # restore $num
85 la $bp,0($j,$bp) # restore $bp
86 ahi $num,-1 # adjust $num for inner loop
87 lg $n0,0($n0) # pull n0
88
89 lg $bi,0($bp)
90 lg $alo,0($ap)
91 mlgr $ahi,$bi # ap[0]*bp[0]
92 lgr $AHI,$ahi
93
94 lgr $mn0,$alo # "tp[0]"*n0
95 msgr $mn0,$n0
96
97 lg $nlo,0($np) #
98 mlgr $nhi,$mn0 # np[0]*m1
99 algr $nlo,$alo # +="tp[0]"
100 lghi $NHI,0
101 alcgr $NHI,$nhi
102
103 la $j,8(%r0) # j=1
104 lr $count,$num
105
106.align 16
107.L1st:
108 lg $alo,0($j,$ap)
109 mlgr $ahi,$bi # ap[j]*bp[0]
110 algr $alo,$AHI
111 lghi $AHI,0
112 alcgr $AHI,$ahi
113
114 lg $nlo,0($j,$np)
115 mlgr $nhi,$mn0 # np[j]*m1
116 algr $nlo,$NHI
117 lghi $NHI,0
118 alcgr $nhi,$NHI # +="tp[j]"
119 algr $nlo,$alo
120 alcgr $NHI,$nhi
121
122 stg $nlo,160-8($j,$sp) # tp[j-1]=
123 la $j,8($j) # j++
124 brct $count,.L1st
125
126 algr $NHI,$AHI
127 lghi $AHI,0
128 alcgr $AHI,$AHI # upmost overflow bit
129 stg $NHI,160-8($j,$sp)
130 stg $AHI,160($j,$sp)
131 la $bp,8($bp) # bp++
132
133.Louter:
134 lg $bi,0($bp) # bp[i]
135 lg $alo,0($ap)
136 mlgr $ahi,$bi # ap[0]*bp[i]
137 alg $alo,160($sp) # +=tp[0]
138 lghi $AHI,0
139 alcgr $AHI,$ahi
140
141 lgr $mn0,$alo
142 msgr $mn0,$n0 # tp[0]*n0
143
144 lg $nlo,0($np) # np[0]
145 mlgr $nhi,$mn0 # np[0]*m1
146 algr $nlo,$alo # +="tp[0]"
147 lghi $NHI,0
148 alcgr $NHI,$nhi
149
150 la $j,8(%r0) # j=1
151 lr $count,$num
152
153.align 16
154.Linner:
155 lg $alo,0($j,$ap)
156 mlgr $ahi,$bi # ap[j]*bp[i]
157 algr $alo,$AHI
158 lghi $AHI,0
159 alcgr $ahi,$AHI
160 alg $alo,160($j,$sp)# +=tp[j]
161 alcgr $AHI,$ahi
162
163 lg $nlo,0($j,$np)
164 mlgr $nhi,$mn0 # np[j]*m1
165 algr $nlo,$NHI
166 lghi $NHI,0
167 alcgr $nhi,$NHI
168 algr $nlo,$alo # +="tp[j]"
169 alcgr $NHI,$nhi
170
171 stg $nlo,160-8($j,$sp) # tp[j-1]=
172 la $j,8($j) # j++
173 brct $count,.Linner
174
175 algr $NHI,$AHI
176 lghi $AHI,0
177 alcgr $AHI,$AHI
178 alg $NHI,160($j,$sp)# accumulate previous upmost overflow bit
179 lghi $ahi,0
180 alcgr $AHI,$ahi # new upmost overflow bit
181 stg $NHI,160-8($j,$sp)
182 stg $AHI,160($j,$sp)
183
184 la $bp,8($bp) # bp++
185 clg $bp,160+8+32($j,$sp) # compare to &bp[num]
186 jne .Louter
187
188 lg $rp,160+8+16($j,$sp) # reincarnate rp
189 la $ap,160($sp)
190 ahi $num,1 # restore $num, incidentally clears "borrow"
191
192 la $j,0(%r0)
193 lr $count,$num
194.Lsub: lg $alo,0($j,$ap)
195 slbg $alo,0($j,$np)
196 stg $alo,0($j,$rp)
197 la $j,8($j)
198 brct $count,.Lsub
199 lghi $ahi,0
200 slbgr $AHI,$ahi # handle upmost carry
201
202 ngr $ap,$AHI
203 lghi $np,-1
204 xgr $np,$AHI
205 ngr $np,$rp
206 ogr $ap,$np # ap=borrow?tp:rp
207
208 la $j,0(%r0)
209 lgr $count,$num
210.Lcopy: lg $alo,0($j,$ap) # copy or in-place refresh
211 stg $j,160($j,$sp) # zap tp
212 stg $alo,0($j,$rp)
213 la $j,8($j)
214 brct $count,.Lcopy
215
216 la %r1,160+8+48($j,$sp)
217 lmg %r6,%r15,0(%r1)
218 lghi %r2,1 # signal "processed"
219 br %r14
220.size bn_mul_mont,.-bn_mul_mont
221.string "Montgomery Multiplication for s390x, CRYPTOGAMS by <appro\@openssl.org>"
222___
223
224print $code;
225close STDOUT;
diff --git a/src/lib/libcrypto/bn/asm/s390x.S b/src/lib/libcrypto/bn/asm/s390x.S
deleted file mode 100755
index 8f45f5d513..0000000000
--- a/src/lib/libcrypto/bn/asm/s390x.S
+++ /dev/null
@@ -1,678 +0,0 @@
1.ident "s390x.S, version 1.0"
2// ====================================================================
3// Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
4// project.
5//
6// Rights for redistribution and usage in source and binary forms are
7// granted according to the OpenSSL license. Warranty of any kind is
8// disclaimed.
9// ====================================================================
10
11.text
12
13#define zero %r0
14
15// BN_ULONG bn_mul_add_words(BN_ULONG *r2,BN_ULONG *r3,int r4,BN_ULONG r5);
16.globl bn_mul_add_words
17.type bn_mul_add_words,@function
18.align 4
19bn_mul_add_words:
20 lghi zero,0 // zero = 0
21 la %r1,0(%r2) // put rp aside
22 lghi %r2,0 // i=0;
23 ltgfr %r4,%r4
24 bler %r14 // if (len<=0) return 0;
25
26 stmg %r6,%r10,48(%r15)
27 lghi %r8,0 // carry = 0
28 srag %r10,%r4,2 // cnt=len/4
29 jz .Loop1_madd
30
31.Loop4_madd:
32 lg %r7,0(%r2,%r3) // ap[i]
33 mlgr %r6,%r5 // *=w
34 algr %r7,%r8 // +=carry
35 alcgr %r6,zero
36 alg %r7,0(%r2,%r1) // +=rp[i]
37 alcgr %r6,zero
38 stg %r7,0(%r2,%r1) // rp[i]=
39
40 lg %r9,8(%r2,%r3)
41 mlgr %r8,%r5
42 algr %r9,%r6
43 alcgr %r8,zero
44 alg %r9,8(%r2,%r1)
45 alcgr %r8,zero
46 stg %r9,8(%r2,%r1)
47
48 lg %r7,16(%r2,%r3)
49 mlgr %r6,%r5
50 algr %r7,%r8
51 alcgr %r6,zero
52 alg %r7,16(%r2,%r1)
53 alcgr %r6,zero
54 stg %r7,16(%r2,%r1)
55
56 lg %r9,24(%r2,%r3)
57 mlgr %r8,%r5
58 algr %r9,%r6
59 alcgr %r8,zero
60 alg %r9,24(%r2,%r1)
61 alcgr %r8,zero
62 stg %r9,24(%r2,%r1)
63
64 la %r2,32(%r2) // i+=4
65 brct %r10,.Loop4_madd
66
67 lghi %r10,3
68 nr %r4,%r10 // cnt=len%4
69 jz .Lend_madd
70
71.Loop1_madd:
72 lg %r7,0(%r2,%r3) // ap[i]
73 mlgr %r6,%r5 // *=w
74 algr %r7,%r8 // +=carry
75 alcgr %r6,zero
76 alg %r7,0(%r2,%r1) // +=rp[i]
77 alcgr %r6,zero
78 stg %r7,0(%r2,%r1) // rp[i]=
79
80 lgr %r8,%r6
81 la %r2,8(%r2) // i++
82 brct %r4,.Loop1_madd
83
84.Lend_madd:
85 lgr %r2,%r8
86 lmg %r6,%r10,48(%r15)
87 br %r14
88.size bn_mul_add_words,.-bn_mul_add_words
89
90// BN_ULONG bn_mul_words(BN_ULONG *r2,BN_ULONG *r3,int r4,BN_ULONG r5);
91.globl bn_mul_words
92.type bn_mul_words,@function
93.align 4
94bn_mul_words:
95 lghi zero,0 // zero = 0
96 la %r1,0(%r2) // put rp aside
97 lghi %r2,0 // i=0;
98 ltgfr %r4,%r4
99 bler %r14 // if (len<=0) return 0;
100
101 stmg %r6,%r10,48(%r15)
102 lghi %r8,0 // carry = 0
103 srag %r10,%r4,2 // cnt=len/4
104 jz .Loop1_mul
105
106.Loop4_mul:
107 lg %r7,0(%r2,%r3) // ap[i]
108 mlgr %r6,%r5 // *=w
109 algr %r7,%r8 // +=carry
110 alcgr %r6,zero
111 stg %r7,0(%r2,%r1) // rp[i]=
112
113 lg %r9,8(%r2,%r3)
114 mlgr %r8,%r5
115 algr %r9,%r6
116 alcgr %r8,zero
117 stg %r9,8(%r2,%r1)
118
119 lg %r7,16(%r2,%r3)
120 mlgr %r6,%r5
121 algr %r7,%r8
122 alcgr %r6,zero
123 stg %r7,16(%r2,%r1)
124
125 lg %r9,24(%r2,%r3)
126 mlgr %r8,%r5
127 algr %r9,%r6
128 alcgr %r8,zero
129 stg %r9,24(%r2,%r1)
130
131 la %r2,32(%r2) // i+=4
132 brct %r10,.Loop4_mul
133
134 lghi %r10,3
135 nr %r4,%r10 // cnt=len%4
136 jz .Lend_mul
137
138.Loop1_mul:
139 lg %r7,0(%r2,%r3) // ap[i]
140 mlgr %r6,%r5 // *=w
141 algr %r7,%r8 // +=carry
142 alcgr %r6,zero
143 stg %r7,0(%r2,%r1) // rp[i]=
144
145 lgr %r8,%r6
146 la %r2,8(%r2) // i++
147 brct %r4,.Loop1_mul
148
149.Lend_mul:
150 lgr %r2,%r8
151 lmg %r6,%r10,48(%r15)
152 br %r14
153.size bn_mul_words,.-bn_mul_words
154
155// void bn_sqr_words(BN_ULONG *r2,BN_ULONG *r2,int r4)
156.globl bn_sqr_words
157.type bn_sqr_words,@function
158.align 4
159bn_sqr_words:
160 ltgfr %r4,%r4
161 bler %r14
162
163 stmg %r6,%r7,48(%r15)
164 srag %r1,%r4,2 // cnt=len/4
165 jz .Loop1_sqr
166
167.Loop4_sqr:
168 lg %r7,0(%r3)
169 mlgr %r6,%r7
170 stg %r7,0(%r2)
171 stg %r6,8(%r2)
172
173 lg %r7,8(%r3)
174 mlgr %r6,%r7
175 stg %r7,16(%r2)
176 stg %r6,24(%r2)
177
178 lg %r7,16(%r3)
179 mlgr %r6,%r7
180 stg %r7,32(%r2)
181 stg %r6,40(%r2)
182
183 lg %r7,24(%r3)
184 mlgr %r6,%r7
185 stg %r7,48(%r2)
186 stg %r6,56(%r2)
187
188 la %r3,32(%r3)
189 la %r2,64(%r2)
190 brct %r1,.Loop4_sqr
191
192 lghi %r1,3
193 nr %r4,%r1 // cnt=len%4
194 jz .Lend_sqr
195
196.Loop1_sqr:
197 lg %r7,0(%r3)
198 mlgr %r6,%r7
199 stg %r7,0(%r2)
200 stg %r6,8(%r2)
201
202 la %r3,8(%r3)
203 la %r2,16(%r2)
204 brct %r4,.Loop1_sqr
205
206.Lend_sqr:
207 lmg %r6,%r7,48(%r15)
208 br %r14
209.size bn_sqr_words,.-bn_sqr_words
210
211// BN_ULONG bn_div_words(BN_ULONG h,BN_ULONG l,BN_ULONG d);
212.globl bn_div_words
213.type bn_div_words,@function
214.align 4
215bn_div_words:
216 dlgr %r2,%r4
217 lgr %r2,%r3
218 br %r14
219.size bn_div_words,.-bn_div_words
220
221// BN_ULONG bn_add_words(BN_ULONG *r2,BN_ULONG *r3,BN_ULONG *r4,int r5);
222.globl bn_add_words
223.type bn_add_words,@function
224.align 4
225bn_add_words:
226 la %r1,0(%r2) // put rp aside
227 lghi %r2,0 // i=0
228 ltgfr %r5,%r5
229 bler %r14 // if (len<=0) return 0;
230
231 stg %r6,48(%r15)
232 lghi %r6,3
233 nr %r6,%r5 // len%4
234 sra %r5,2 // len/4, use sra because it sets condition code
235 jz .Loop1_add // carry is incidentally cleared if branch taken
236 algr %r2,%r2 // clear carry
237
238.Loop4_add:
239 lg %r0,0(%r2,%r3)
240 alcg %r0,0(%r2,%r4)
241 stg %r0,0(%r2,%r1)
242 lg %r0,8(%r2,%r3)
243 alcg %r0,8(%r2,%r4)
244 stg %r0,8(%r2,%r1)
245 lg %r0,16(%r2,%r3)
246 alcg %r0,16(%r2,%r4)
247 stg %r0,16(%r2,%r1)
248 lg %r0,24(%r2,%r3)
249 alcg %r0,24(%r2,%r4)
250 stg %r0,24(%r2,%r1)
251
252 la %r2,32(%r2) // i+=4
253 brct %r5,.Loop4_add
254
255 la %r6,1(%r6) // see if len%4 is zero ...
256 brct %r6,.Loop1_add // without touching condition code:-)
257
258.Lexit_add:
259 lghi %r2,0
260 alcgr %r2,%r2
261 lg %r6,48(%r15)
262 br %r14
263
264.Loop1_add:
265 lg %r0,0(%r2,%r3)
266 alcg %r0,0(%r2,%r4)
267 stg %r0,0(%r2,%r1)
268
269 la %r2,8(%r2) // i++
270 brct %r6,.Loop1_add
271
272 j .Lexit_add
273.size bn_add_words,.-bn_add_words
274
275// BN_ULONG bn_sub_words(BN_ULONG *r2,BN_ULONG *r3,BN_ULONG *r4,int r5);
276.globl bn_sub_words
277.type bn_sub_words,@function
278.align 4
279bn_sub_words:
280 la %r1,0(%r2) // put rp aside
281 lghi %r2,0 // i=0
282 ltgfr %r5,%r5
283 bler %r14 // if (len<=0) return 0;
284
285 stg %r6,48(%r15)
286 lghi %r6,3
287 nr %r6,%r5 // len%4
288 sra %r5,2 // len/4, use sra because it sets condition code
289 jnz .Loop4_sub // borrow is incidentally cleared if branch taken
290 slgr %r2,%r2 // clear borrow
291
292.Loop1_sub:
293 lg %r0,0(%r2,%r3)
294 slbg %r0,0(%r2,%r4)
295 stg %r0,0(%r2,%r1)
296
297 la %r2,8(%r2) // i++
298 brct %r6,.Loop1_sub
299 j .Lexit_sub
300
301.Loop4_sub:
302 lg %r0,0(%r2,%r3)
303 slbg %r0,0(%r2,%r4)
304 stg %r0,0(%r2,%r1)
305 lg %r0,8(%r2,%r3)
306 slbg %r0,8(%r2,%r4)
307 stg %r0,8(%r2,%r1)
308 lg %r0,16(%r2,%r3)
309 slbg %r0,16(%r2,%r4)
310 stg %r0,16(%r2,%r1)
311 lg %r0,24(%r2,%r3)
312 slbg %r0,24(%r2,%r4)
313 stg %r0,24(%r2,%r1)
314
315 la %r2,32(%r2) // i+=4
316 brct %r5,.Loop4_sub
317
318 la %r6,1(%r6) // see if len%4 is zero ...
319 brct %r6,.Loop1_sub // without touching condition code:-)
320
321.Lexit_sub:
322 lghi %r2,0
323 slbgr %r2,%r2
324 lcgr %r2,%r2
325 lg %r6,48(%r15)
326 br %r14
327.size bn_sub_words,.-bn_sub_words
328
329#define c1 %r1
330#define c2 %r5
331#define c3 %r8
332
333#define mul_add_c(ai,bi,c1,c2,c3) \
334 lg %r7,ai*8(%r3); \
335 mlg %r6,bi*8(%r4); \
336 algr c1,%r7; \
337 alcgr c2,%r6; \
338 alcgr c3,zero
339
340// void bn_mul_comba8(BN_ULONG *r2,BN_ULONG *r3,BN_ULONG *r4);
341.globl bn_mul_comba8
342.type bn_mul_comba8,@function
343.align 4
344bn_mul_comba8:
345 stmg %r6,%r8,48(%r15)
346
347 lghi c1,0
348 lghi c2,0
349 lghi c3,0
350 lghi zero,0
351
352 mul_add_c(0,0,c1,c2,c3);
353 stg c1,0*8(%r2)
354 lghi c1,0
355
356 mul_add_c(0,1,c2,c3,c1);
357 mul_add_c(1,0,c2,c3,c1);
358 stg c2,1*8(%r2)
359 lghi c2,0
360
361 mul_add_c(2,0,c3,c1,c2);
362 mul_add_c(1,1,c3,c1,c2);
363 mul_add_c(0,2,c3,c1,c2);
364 stg c3,2*8(%r2)
365 lghi c3,0
366
367 mul_add_c(0,3,c1,c2,c3);
368 mul_add_c(1,2,c1,c2,c3);
369 mul_add_c(2,1,c1,c2,c3);
370 mul_add_c(3,0,c1,c2,c3);
371 stg c1,3*8(%r2)
372 lghi c1,0
373
374 mul_add_c(4,0,c2,c3,c1);
375 mul_add_c(3,1,c2,c3,c1);
376 mul_add_c(2,2,c2,c3,c1);
377 mul_add_c(1,3,c2,c3,c1);
378 mul_add_c(0,4,c2,c3,c1);
379 stg c2,4*8(%r2)
380 lghi c2,0
381
382 mul_add_c(0,5,c3,c1,c2);
383 mul_add_c(1,4,c3,c1,c2);
384 mul_add_c(2,3,c3,c1,c2);
385 mul_add_c(3,2,c3,c1,c2);
386 mul_add_c(4,1,c3,c1,c2);
387 mul_add_c(5,0,c3,c1,c2);
388 stg c3,5*8(%r2)
389 lghi c3,0
390
391 mul_add_c(6,0,c1,c2,c3);
392 mul_add_c(5,1,c1,c2,c3);
393 mul_add_c(4,2,c1,c2,c3);
394 mul_add_c(3,3,c1,c2,c3);
395 mul_add_c(2,4,c1,c2,c3);
396 mul_add_c(1,5,c1,c2,c3);
397 mul_add_c(0,6,c1,c2,c3);
398 stg c1,6*8(%r2)
399 lghi c1,0
400
401 mul_add_c(0,7,c2,c3,c1);
402 mul_add_c(1,6,c2,c3,c1);
403 mul_add_c(2,5,c2,c3,c1);
404 mul_add_c(3,4,c2,c3,c1);
405 mul_add_c(4,3,c2,c3,c1);
406 mul_add_c(5,2,c2,c3,c1);
407 mul_add_c(6,1,c2,c3,c1);
408 mul_add_c(7,0,c2,c3,c1);
409 stg c2,7*8(%r2)
410 lghi c2,0
411
412 mul_add_c(7,1,c3,c1,c2);
413 mul_add_c(6,2,c3,c1,c2);
414 mul_add_c(5,3,c3,c1,c2);
415 mul_add_c(4,4,c3,c1,c2);
416 mul_add_c(3,5,c3,c1,c2);
417 mul_add_c(2,6,c3,c1,c2);
418 mul_add_c(1,7,c3,c1,c2);
419 stg c3,8*8(%r2)
420 lghi c3,0
421
422 mul_add_c(2,7,c1,c2,c3);
423 mul_add_c(3,6,c1,c2,c3);
424 mul_add_c(4,5,c1,c2,c3);
425 mul_add_c(5,4,c1,c2,c3);
426 mul_add_c(6,3,c1,c2,c3);
427 mul_add_c(7,2,c1,c2,c3);
428 stg c1,9*8(%r2)
429 lghi c1,0
430
431 mul_add_c(7,3,c2,c3,c1);
432 mul_add_c(6,4,c2,c3,c1);
433 mul_add_c(5,5,c2,c3,c1);
434 mul_add_c(4,6,c2,c3,c1);
435 mul_add_c(3,7,c2,c3,c1);
436 stg c2,10*8(%r2)
437 lghi c2,0
438
439 mul_add_c(4,7,c3,c1,c2);
440 mul_add_c(5,6,c3,c1,c2);
441 mul_add_c(6,5,c3,c1,c2);
442 mul_add_c(7,4,c3,c1,c2);
443 stg c3,11*8(%r2)
444 lghi c3,0
445
446 mul_add_c(7,5,c1,c2,c3);
447 mul_add_c(6,6,c1,c2,c3);
448 mul_add_c(5,7,c1,c2,c3);
449 stg c1,12*8(%r2)
450 lghi c1,0
451
452
453 mul_add_c(6,7,c2,c3,c1);
454 mul_add_c(7,6,c2,c3,c1);
455 stg c2,13*8(%r2)
456 lghi c2,0
457
458 mul_add_c(7,7,c3,c1,c2);
459 stg c3,14*8(%r2)
460 stg c1,15*8(%r2)
461
462 lmg %r6,%r8,48(%r15)
463 br %r14
464.size bn_mul_comba8,.-bn_mul_comba8
465
466// void bn_mul_comba4(BN_ULONG *r2,BN_ULONG *r3,BN_ULONG *r4);
467.globl bn_mul_comba4
468.type bn_mul_comba4,@function
469.align 4
470bn_mul_comba4:
471 stmg %r6,%r8,48(%r15)
472
473 lghi c1,0
474 lghi c2,0
475 lghi c3,0
476 lghi zero,0
477
478 mul_add_c(0,0,c1,c2,c3);
479 stg c1,0*8(%r3)
480 lghi c1,0
481
482 mul_add_c(0,1,c2,c3,c1);
483 mul_add_c(1,0,c2,c3,c1);
484 stg c2,1*8(%r2)
485 lghi c2,0
486
487 mul_add_c(2,0,c3,c1,c2);
488 mul_add_c(1,1,c3,c1,c2);
489 mul_add_c(0,2,c3,c1,c2);
490 stg c3,2*8(%r2)
491 lghi c3,0
492
493 mul_add_c(0,3,c1,c2,c3);
494 mul_add_c(1,2,c1,c2,c3);
495 mul_add_c(2,1,c1,c2,c3);
496 mul_add_c(3,0,c1,c2,c3);
497 stg c1,3*8(%r2)
498 lghi c1,0
499
500 mul_add_c(3,1,c2,c3,c1);
501 mul_add_c(2,2,c2,c3,c1);
502 mul_add_c(1,3,c2,c3,c1);
503 stg c2,4*8(%r2)
504 lghi c2,0
505
506 mul_add_c(2,3,c3,c1,c2);
507 mul_add_c(3,2,c3,c1,c2);
508 stg c3,5*8(%r2)
509 lghi c3,0
510
511 mul_add_c(3,3,c1,c2,c3);
512 stg c1,6*8(%r2)
513 stg c2,7*8(%r2)
514
515 stmg %r6,%r8,48(%r15)
516 br %r14
517.size bn_mul_comba4,.-bn_mul_comba4
518
519#define sqr_add_c(ai,c1,c2,c3) \
520 lg %r7,ai*8(%r3); \
521 mlgr %r6,%r7; \
522 algr c1,%r7; \
523 alcgr c2,%r6; \
524 alcgr c3,zero
525
526#define sqr_add_c2(ai,aj,c1,c2,c3) \
527 lg %r7,ai*8(%r3); \
528 mlg %r6,aj*8(%r3); \
529 algr c1,%r7; \
530 alcgr c2,%r6; \
531 alcgr c3,zero; \
532 algr c1,%r7; \
533 alcgr c2,%r6; \
534 alcgr c3,zero
535
536// void bn_sqr_comba8(BN_ULONG *r2,BN_ULONG *r3);
537.globl bn_sqr_comba8
538.type bn_sqr_comba8,@function
539.align 4
540bn_sqr_comba8:
541 stmg %r6,%r8,48(%r15)
542
543 lghi c1,0
544 lghi c2,0
545 lghi c3,0
546 lghi zero,0
547
548 sqr_add_c(0,c1,c2,c3);
549 stg c1,0*8(%r2)
550 lghi c1,0
551
552 sqr_add_c2(1,0,c2,c3,c1);
553 stg c2,1*8(%r2)
554 lghi c2,0
555
556 sqr_add_c(1,c3,c1,c2);
557 sqr_add_c2(2,0,c3,c1,c2);
558 stg c3,2*8(%r2)
559 lghi c3,0
560
561 sqr_add_c2(3,0,c1,c2,c3);
562 sqr_add_c2(2,1,c1,c2,c3);
563 stg c1,3*8(%r2)
564 lghi c1,0
565
566 sqr_add_c(2,c2,c3,c1);
567 sqr_add_c2(3,1,c2,c3,c1);
568 sqr_add_c2(4,0,c2,c3,c1);
569 stg c2,4*8(%r2)
570 lghi c2,0
571
572 sqr_add_c2(5,0,c3,c1,c2);
573 sqr_add_c2(4,1,c3,c1,c2);
574 sqr_add_c2(3,2,c3,c1,c2);
575 stg c3,5*8(%r2)
576 lghi c3,0
577
578 sqr_add_c(3,c1,c2,c3);
579 sqr_add_c2(4,2,c1,c2,c3);
580 sqr_add_c2(5,1,c1,c2,c3);
581 sqr_add_c2(6,0,c1,c2,c3);
582 stg c1,6*8(%r2)
583 lghi c1,0
584
585 sqr_add_c2(7,0,c2,c3,c1);
586 sqr_add_c2(6,1,c2,c3,c1);
587 sqr_add_c2(5,2,c2,c3,c1);
588 sqr_add_c2(4,3,c2,c3,c1);
589 stg c2,7*8(%r2)
590 lghi c2,0
591
592 sqr_add_c(4,c3,c1,c2);
593 sqr_add_c2(5,3,c3,c1,c2);
594 sqr_add_c2(6,2,c3,c1,c2);
595 sqr_add_c2(7,1,c3,c1,c2);
596 stg c3,8*8(%r2)
597 lghi c3,0
598
599 sqr_add_c2(7,2,c1,c2,c3);
600 sqr_add_c2(6,3,c1,c2,c3);
601 sqr_add_c2(5,4,c1,c2,c3);
602 stg c1,9*8(%r2)
603 lghi c1,0
604
605 sqr_add_c(5,c2,c3,c1);
606 sqr_add_c2(6,4,c2,c3,c1);
607 sqr_add_c2(7,3,c2,c3,c1);
608 stg c2,10*8(%r2)
609 lghi c2,0
610
611 sqr_add_c2(7,4,c3,c1,c2);
612 sqr_add_c2(6,5,c3,c1,c2);
613 stg c3,11*8(%r2)
614 lghi c3,0
615
616 sqr_add_c(6,c1,c2,c3);
617 sqr_add_c2(7,5,c1,c2,c3);
618 stg c1,12*8(%r2)
619 lghi c1,0
620
621 sqr_add_c2(7,6,c2,c3,c1);
622 stg c2,13*8(%r2)
623 lghi c2,0
624
625 sqr_add_c(7,c3,c1,c2);
626 stg c3,14*8(%r2)
627 stg c1,15*8(%r2)
628
629 lmg %r6,%r8,48(%r15)
630 br %r14
631.size bn_sqr_comba8,.-bn_sqr_comba8
632
633// void bn_sqr_comba4(BN_ULONG *r2,BN_ULONG *r3);
634.globl bn_sqr_comba4
635.type bn_sqr_comba4,@function
636.align 4
637bn_sqr_comba4:
638 stmg %r6,%r8,48(%r15)
639
640 lghi c1,0
641 lghi c2,0
642 lghi c3,0
643 lghi zero,0
644
645 sqr_add_c(0,c1,c2,c3);
646 stg c1,0*8(%r2)
647 lghi c1,0
648
649 sqr_add_c2(1,0,c2,c3,c1);
650 stg c2,1*8(%r2)
651 lghi c2,0
652
653 sqr_add_c(1,c3,c1,c2);
654 sqr_add_c2(2,0,c3,c1,c2);
655 stg c3,2*8(%r2)
656 lghi c3,0
657
658 sqr_add_c2(3,0,c1,c2,c3);
659 sqr_add_c2(2,1,c1,c2,c3);
660 stg c1,3*8(%r2)
661 lghi c1,0
662
663 sqr_add_c(2,c2,c3,c1);
664 sqr_add_c2(3,1,c2,c3,c1);
665 stg c2,4*8(%r2)
666 lghi c2,0
667
668 sqr_add_c2(3,2,c3,c1,c2);
669 stg c3,5*8(%r2)
670 lghi c3,0
671
672 sqr_add_c(3,c1,c2,c3);
673 stg c1,6*8(%r2)
674 stg c2,7*8(%r2)
675
676 lmg %r6,%r8,48(%r15)
677 br %r14
678.size bn_sqr_comba4,.-bn_sqr_comba4
diff --git a/src/lib/libcrypto/bn/asm/sparcv8.S b/src/lib/libcrypto/bn/asm/sparcv8.S
deleted file mode 100644
index 88c5dc480a..0000000000
--- a/src/lib/libcrypto/bn/asm/sparcv8.S
+++ /dev/null
@@ -1,1458 +0,0 @@
1.ident "sparcv8.s, Version 1.4"
2.ident "SPARC v8 ISA artwork by Andy Polyakov <appro@fy.chalmers.se>"
3
4/*
5 * ====================================================================
6 * Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
7 * project.
8 *
9 * Rights for redistribution and usage in source and binary forms are
10 * granted according to the OpenSSL license. Warranty of any kind is
11 * disclaimed.
12 * ====================================================================
13 */
14
15/*
16 * This is my modest contributon to OpenSSL project (see
17 * http://www.openssl.org/ for more information about it) and is
18 * a drop-in SuperSPARC ISA replacement for crypto/bn/bn_asm.c
19 * module. For updates see http://fy.chalmers.se/~appro/hpe/.
20 *
21 * See bn_asm.sparc.v8plus.S for more details.
22 */
23
24/*
25 * Revision history.
26 *
27 * 1.1 - new loop unrolling model(*);
28 * 1.2 - made gas friendly;
29 * 1.3 - fixed problem with /usr/ccs/lib/cpp;
30 * 1.4 - some retunes;
31 *
32 * (*) see bn_asm.sparc.v8plus.S for details
33 */
34
35.section ".text",#alloc,#execinstr
36.file "bn_asm.sparc.v8.S"
37
38.align 32
39
40.global bn_mul_add_words
41/*
42 * BN_ULONG bn_mul_add_words(rp,ap,num,w)
43 * BN_ULONG *rp,*ap;
44 * int num;
45 * BN_ULONG w;
46 */
47bn_mul_add_words:
48 cmp %o2,0
49 bg,a .L_bn_mul_add_words_proceed
50 ld [%o1],%g2
51 retl
52 clr %o0
53
54.L_bn_mul_add_words_proceed:
55 andcc %o2,-4,%g0
56 bz .L_bn_mul_add_words_tail
57 clr %o5
58
59.L_bn_mul_add_words_loop:
60 ld [%o0],%o4
61 ld [%o1+4],%g3
62 umul %o3,%g2,%g2
63 rd %y,%g1
64 addcc %o4,%o5,%o4
65 addx %g1,0,%g1
66 addcc %o4,%g2,%o4
67 st %o4,[%o0]
68 addx %g1,0,%o5
69
70 ld [%o0+4],%o4
71 ld [%o1+8],%g2
72 umul %o3,%g3,%g3
73 dec 4,%o2
74 rd %y,%g1
75 addcc %o4,%o5,%o4
76 addx %g1,0,%g1
77 addcc %o4,%g3,%o4
78 st %o4,[%o0+4]
79 addx %g1,0,%o5
80
81 ld [%o0+8],%o4
82 ld [%o1+12],%g3
83 umul %o3,%g2,%g2
84 inc 16,%o1
85 rd %y,%g1
86 addcc %o4,%o5,%o4
87 addx %g1,0,%g1
88 addcc %o4,%g2,%o4
89 st %o4,[%o0+8]
90 addx %g1,0,%o5
91
92 ld [%o0+12],%o4
93 umul %o3,%g3,%g3
94 inc 16,%o0
95 rd %y,%g1
96 addcc %o4,%o5,%o4
97 addx %g1,0,%g1
98 addcc %o4,%g3,%o4
99 st %o4,[%o0-4]
100 addx %g1,0,%o5
101 andcc %o2,-4,%g0
102 bnz,a .L_bn_mul_add_words_loop
103 ld [%o1],%g2
104
105 tst %o2
106 bnz,a .L_bn_mul_add_words_tail
107 ld [%o1],%g2
108.L_bn_mul_add_words_return:
109 retl
110 mov %o5,%o0
111 nop
112
113.L_bn_mul_add_words_tail:
114 ld [%o0],%o4
115 umul %o3,%g2,%g2
116 addcc %o4,%o5,%o4
117 rd %y,%g1
118 addx %g1,0,%g1
119 addcc %o4,%g2,%o4
120 addx %g1,0,%o5
121 deccc %o2
122 bz .L_bn_mul_add_words_return
123 st %o4,[%o0]
124
125 ld [%o1+4],%g2
126 ld [%o0+4],%o4
127 umul %o3,%g2,%g2
128 rd %y,%g1
129 addcc %o4,%o5,%o4
130 addx %g1,0,%g1
131 addcc %o4,%g2,%o4
132 addx %g1,0,%o5
133 deccc %o2
134 bz .L_bn_mul_add_words_return
135 st %o4,[%o0+4]
136
137 ld [%o1+8],%g2
138 ld [%o0+8],%o4
139 umul %o3,%g2,%g2
140 rd %y,%g1
141 addcc %o4,%o5,%o4
142 addx %g1,0,%g1
143 addcc %o4,%g2,%o4
144 st %o4,[%o0+8]
145 retl
146 addx %g1,0,%o0
147
148.type bn_mul_add_words,#function
149.size bn_mul_add_words,(.-bn_mul_add_words)
150
151.align 32
152
153.global bn_mul_words
154/*
155 * BN_ULONG bn_mul_words(rp,ap,num,w)
156 * BN_ULONG *rp,*ap;
157 * int num;
158 * BN_ULONG w;
159 */
160bn_mul_words:
161 cmp %o2,0
162 bg,a .L_bn_mul_words_proceeed
163 ld [%o1],%g2
164 retl
165 clr %o0
166
167.L_bn_mul_words_proceeed:
168 andcc %o2,-4,%g0
169 bz .L_bn_mul_words_tail
170 clr %o5
171
172.L_bn_mul_words_loop:
173 ld [%o1+4],%g3
174 umul %o3,%g2,%g2
175 addcc %g2,%o5,%g2
176 rd %y,%g1
177 addx %g1,0,%o5
178 st %g2,[%o0]
179
180 ld [%o1+8],%g2
181 umul %o3,%g3,%g3
182 addcc %g3,%o5,%g3
183 rd %y,%g1
184 dec 4,%o2
185 addx %g1,0,%o5
186 st %g3,[%o0+4]
187
188 ld [%o1+12],%g3
189 umul %o3,%g2,%g2
190 addcc %g2,%o5,%g2
191 rd %y,%g1
192 inc 16,%o1
193 st %g2,[%o0+8]
194 addx %g1,0,%o5
195
196 umul %o3,%g3,%g3
197 addcc %g3,%o5,%g3
198 rd %y,%g1
199 inc 16,%o0
200 addx %g1,0,%o5
201 st %g3,[%o0-4]
202 andcc %o2,-4,%g0
203 nop
204 bnz,a .L_bn_mul_words_loop
205 ld [%o1],%g2
206
207 tst %o2
208 bnz,a .L_bn_mul_words_tail
209 ld [%o1],%g2
210.L_bn_mul_words_return:
211 retl
212 mov %o5,%o0
213 nop
214
215.L_bn_mul_words_tail:
216 umul %o3,%g2,%g2
217 addcc %g2,%o5,%g2
218 rd %y,%g1
219 addx %g1,0,%o5
220 deccc %o2
221 bz .L_bn_mul_words_return
222 st %g2,[%o0]
223 nop
224
225 ld [%o1+4],%g2
226 umul %o3,%g2,%g2
227 addcc %g2,%o5,%g2
228 rd %y,%g1
229 addx %g1,0,%o5
230 deccc %o2
231 bz .L_bn_mul_words_return
232 st %g2,[%o0+4]
233
234 ld [%o1+8],%g2
235 umul %o3,%g2,%g2
236 addcc %g2,%o5,%g2
237 rd %y,%g1
238 st %g2,[%o0+8]
239 retl
240 addx %g1,0,%o0
241
242.type bn_mul_words,#function
243.size bn_mul_words,(.-bn_mul_words)
244
245.align 32
246.global bn_sqr_words
247/*
248 * void bn_sqr_words(r,a,n)
249 * BN_ULONG *r,*a;
250 * int n;
251 */
252bn_sqr_words:
253 cmp %o2,0
254 bg,a .L_bn_sqr_words_proceeed
255 ld [%o1],%g2
256 retl
257 clr %o0
258
259.L_bn_sqr_words_proceeed:
260 andcc %o2,-4,%g0
261 bz .L_bn_sqr_words_tail
262 clr %o5
263
264.L_bn_sqr_words_loop:
265 ld [%o1+4],%g3
266 umul %g2,%g2,%o4
267 st %o4,[%o0]
268 rd %y,%o5
269 st %o5,[%o0+4]
270
271 ld [%o1+8],%g2
272 umul %g3,%g3,%o4
273 dec 4,%o2
274 st %o4,[%o0+8]
275 rd %y,%o5
276 st %o5,[%o0+12]
277 nop
278
279 ld [%o1+12],%g3
280 umul %g2,%g2,%o4
281 st %o4,[%o0+16]
282 rd %y,%o5
283 inc 16,%o1
284 st %o5,[%o0+20]
285
286 umul %g3,%g3,%o4
287 inc 32,%o0
288 st %o4,[%o0-8]
289 rd %y,%o5
290 st %o5,[%o0-4]
291 andcc %o2,-4,%g2
292 bnz,a .L_bn_sqr_words_loop
293 ld [%o1],%g2
294
295 tst %o2
296 nop
297 bnz,a .L_bn_sqr_words_tail
298 ld [%o1],%g2
299.L_bn_sqr_words_return:
300 retl
301 clr %o0
302
303.L_bn_sqr_words_tail:
304 umul %g2,%g2,%o4
305 st %o4,[%o0]
306 deccc %o2
307 rd %y,%o5
308 bz .L_bn_sqr_words_return
309 st %o5,[%o0+4]
310
311 ld [%o1+4],%g2
312 umul %g2,%g2,%o4
313 st %o4,[%o0+8]
314 deccc %o2
315 rd %y,%o5
316 nop
317 bz .L_bn_sqr_words_return
318 st %o5,[%o0+12]
319
320 ld [%o1+8],%g2
321 umul %g2,%g2,%o4
322 st %o4,[%o0+16]
323 rd %y,%o5
324 st %o5,[%o0+20]
325 retl
326 clr %o0
327
328.type bn_sqr_words,#function
329.size bn_sqr_words,(.-bn_sqr_words)
330
331.align 32
332
333.global bn_div_words
334/*
335 * BN_ULONG bn_div_words(h,l,d)
336 * BN_ULONG h,l,d;
337 */
338bn_div_words:
339 wr %o0,%y
340 udiv %o1,%o2,%o0
341 retl
342 nop
343
344.type bn_div_words,#function
345.size bn_div_words,(.-bn_div_words)
346
347.align 32
348
349.global bn_add_words
350/*
351 * BN_ULONG bn_add_words(rp,ap,bp,n)
352 * BN_ULONG *rp,*ap,*bp;
353 * int n;
354 */
355bn_add_words:
356 cmp %o3,0
357 bg,a .L_bn_add_words_proceed
358 ld [%o1],%o4
359 retl
360 clr %o0
361
362.L_bn_add_words_proceed:
363 andcc %o3,-4,%g0
364 bz .L_bn_add_words_tail
365 clr %g1
366 ba .L_bn_add_words_warn_loop
367 addcc %g0,0,%g0 ! clear carry flag
368
369.L_bn_add_words_loop:
370 ld [%o1],%o4
371.L_bn_add_words_warn_loop:
372 ld [%o2],%o5
373 ld [%o1+4],%g3
374 ld [%o2+4],%g4
375 dec 4,%o3
376 addxcc %o5,%o4,%o5
377 st %o5,[%o0]
378
379 ld [%o1+8],%o4
380 ld [%o2+8],%o5
381 inc 16,%o1
382 addxcc %g3,%g4,%g3
383 st %g3,[%o0+4]
384
385 ld [%o1-4],%g3
386 ld [%o2+12],%g4
387 inc 16,%o2
388 addxcc %o5,%o4,%o5
389 st %o5,[%o0+8]
390
391 inc 16,%o0
392 addxcc %g3,%g4,%g3
393 st %g3,[%o0-4]
394 addx %g0,0,%g1
395 andcc %o3,-4,%g0
396 bnz,a .L_bn_add_words_loop
397 addcc %g1,-1,%g0
398
399 tst %o3
400 bnz,a .L_bn_add_words_tail
401 ld [%o1],%o4
402.L_bn_add_words_return:
403 retl
404 mov %g1,%o0
405
406.L_bn_add_words_tail:
407 addcc %g1,-1,%g0
408 ld [%o2],%o5
409 addxcc %o5,%o4,%o5
410 addx %g0,0,%g1
411 deccc %o3
412 bz .L_bn_add_words_return
413 st %o5,[%o0]
414
415 ld [%o1+4],%o4
416 addcc %g1,-1,%g0
417 ld [%o2+4],%o5
418 addxcc %o5,%o4,%o5
419 addx %g0,0,%g1
420 deccc %o3
421 bz .L_bn_add_words_return
422 st %o5,[%o0+4]
423
424 ld [%o1+8],%o4
425 addcc %g1,-1,%g0
426 ld [%o2+8],%o5
427 addxcc %o5,%o4,%o5
428 st %o5,[%o0+8]
429 retl
430 addx %g0,0,%o0
431
432.type bn_add_words,#function
433.size bn_add_words,(.-bn_add_words)
434
435.align 32
436
437.global bn_sub_words
438/*
439 * BN_ULONG bn_sub_words(rp,ap,bp,n)
440 * BN_ULONG *rp,*ap,*bp;
441 * int n;
442 */
443bn_sub_words:
444 cmp %o3,0
445 bg,a .L_bn_sub_words_proceed
446 ld [%o1],%o4
447 retl
448 clr %o0
449
450.L_bn_sub_words_proceed:
451 andcc %o3,-4,%g0
452 bz .L_bn_sub_words_tail
453 clr %g1
454 ba .L_bn_sub_words_warm_loop
455 addcc %g0,0,%g0 ! clear carry flag
456
457.L_bn_sub_words_loop:
458 ld [%o1],%o4
459.L_bn_sub_words_warm_loop:
460 ld [%o2],%o5
461 ld [%o1+4],%g3
462 ld [%o2+4],%g4
463 dec 4,%o3
464 subxcc %o4,%o5,%o5
465 st %o5,[%o0]
466
467 ld [%o1+8],%o4
468 ld [%o2+8],%o5
469 inc 16,%o1
470 subxcc %g3,%g4,%g4
471 st %g4,[%o0+4]
472
473 ld [%o1-4],%g3
474 ld [%o2+12],%g4
475 inc 16,%o2
476 subxcc %o4,%o5,%o5
477 st %o5,[%o0+8]
478
479 inc 16,%o0
480 subxcc %g3,%g4,%g4
481 st %g4,[%o0-4]
482 addx %g0,0,%g1
483 andcc %o3,-4,%g0
484 bnz,a .L_bn_sub_words_loop
485 addcc %g1,-1,%g0
486
487 tst %o3
488 nop
489 bnz,a .L_bn_sub_words_tail
490 ld [%o1],%o4
491.L_bn_sub_words_return:
492 retl
493 mov %g1,%o0
494
495.L_bn_sub_words_tail:
496 addcc %g1,-1,%g0
497 ld [%o2],%o5
498 subxcc %o4,%o5,%o5
499 addx %g0,0,%g1
500 deccc %o3
501 bz .L_bn_sub_words_return
502 st %o5,[%o0]
503 nop
504
505 ld [%o1+4],%o4
506 addcc %g1,-1,%g0
507 ld [%o2+4],%o5
508 subxcc %o4,%o5,%o5
509 addx %g0,0,%g1
510 deccc %o3
511 bz .L_bn_sub_words_return
512 st %o5,[%o0+4]
513
514 ld [%o1+8],%o4
515 addcc %g1,-1,%g0
516 ld [%o2+8],%o5
517 subxcc %o4,%o5,%o5
518 st %o5,[%o0+8]
519 retl
520 addx %g0,0,%o0
521
522.type bn_sub_words,#function
523.size bn_sub_words,(.-bn_sub_words)
524
525#define FRAME_SIZE -96
526
527/*
528 * Here is register usage map for *all* routines below.
529 */
530#define t_1 %o0
531#define t_2 %o1
532#define c_1 %o2
533#define c_2 %o3
534#define c_3 %o4
535
536#define ap(I) [%i1+4*I]
537#define bp(I) [%i2+4*I]
538#define rp(I) [%i0+4*I]
539
540#define a_0 %l0
541#define a_1 %l1
542#define a_2 %l2
543#define a_3 %l3
544#define a_4 %l4
545#define a_5 %l5
546#define a_6 %l6
547#define a_7 %l7
548
549#define b_0 %i3
550#define b_1 %i4
551#define b_2 %i5
552#define b_3 %o5
553#define b_4 %g1
554#define b_5 %g2
555#define b_6 %g3
556#define b_7 %g4
557
558.align 32
559.global bn_mul_comba8
560/*
561 * void bn_mul_comba8(r,a,b)
562 * BN_ULONG *r,*a,*b;
563 */
564bn_mul_comba8:
565 save %sp,FRAME_SIZE,%sp
566 ld ap(0),a_0
567 ld bp(0),b_0
568 umul a_0,b_0,c_1 !=!mul_add_c(a[0],b[0],c1,c2,c3);
569 ld bp(1),b_1
570 rd %y,c_2
571 st c_1,rp(0) !r[0]=c1;
572
573 umul a_0,b_1,t_1 !=!mul_add_c(a[0],b[1],c2,c3,c1);
574 ld ap(1),a_1
575 addcc c_2,t_1,c_2
576 rd %y,t_2
577 addxcc %g0,t_2,c_3 !=
578 addx %g0,%g0,c_1
579 ld ap(2),a_2
580 umul a_1,b_0,t_1 !mul_add_c(a[1],b[0],c2,c3,c1);
581 addcc c_2,t_1,c_2 !=
582 rd %y,t_2
583 addxcc c_3,t_2,c_3
584 st c_2,rp(1) !r[1]=c2;
585 addx c_1,%g0,c_1 !=
586
587 umul a_2,b_0,t_1 !mul_add_c(a[2],b[0],c3,c1,c2);
588 addcc c_3,t_1,c_3
589 rd %y,t_2
590 addxcc c_1,t_2,c_1 !=
591 addx %g0,%g0,c_2
592 ld bp(2),b_2
593 umul a_1,b_1,t_1 !mul_add_c(a[1],b[1],c3,c1,c2);
594 addcc c_3,t_1,c_3 !=
595 rd %y,t_2
596 addxcc c_1,t_2,c_1
597 ld bp(3),b_3
598 addx c_2,%g0,c_2 !=
599 umul a_0,b_2,t_1 !mul_add_c(a[0],b[2],c3,c1,c2);
600 addcc c_3,t_1,c_3
601 rd %y,t_2
602 addxcc c_1,t_2,c_1 !=
603 addx c_2,%g0,c_2
604 st c_3,rp(2) !r[2]=c3;
605
606 umul a_0,b_3,t_1 !mul_add_c(a[0],b[3],c1,c2,c3);
607 addcc c_1,t_1,c_1 !=
608 rd %y,t_2
609 addxcc c_2,t_2,c_2
610 addx %g0,%g0,c_3
611 umul a_1,b_2,t_1 !=!mul_add_c(a[1],b[2],c1,c2,c3);
612 addcc c_1,t_1,c_1
613 rd %y,t_2
614 addxcc c_2,t_2,c_2
615 addx c_3,%g0,c_3 !=
616 ld ap(3),a_3
617 umul a_2,b_1,t_1 !mul_add_c(a[2],b[1],c1,c2,c3);
618 addcc c_1,t_1,c_1
619 rd %y,t_2 !=
620 addxcc c_2,t_2,c_2
621 addx c_3,%g0,c_3
622 ld ap(4),a_4
623 umul a_3,b_0,t_1 !mul_add_c(a[3],b[0],c1,c2,c3);!=
624 addcc c_1,t_1,c_1
625 rd %y,t_2
626 addxcc c_2,t_2,c_2
627 addx c_3,%g0,c_3 !=
628 st c_1,rp(3) !r[3]=c1;
629
630 umul a_4,b_0,t_1 !mul_add_c(a[4],b[0],c2,c3,c1);
631 addcc c_2,t_1,c_2
632 rd %y,t_2 !=
633 addxcc c_3,t_2,c_3
634 addx %g0,%g0,c_1
635 umul a_3,b_1,t_1 !mul_add_c(a[3],b[1],c2,c3,c1);
636 addcc c_2,t_1,c_2 !=
637 rd %y,t_2
638 addxcc c_3,t_2,c_3
639 addx c_1,%g0,c_1
640 umul a_2,b_2,t_1 !=!mul_add_c(a[2],b[2],c2,c3,c1);
641 addcc c_2,t_1,c_2
642 rd %y,t_2
643 addxcc c_3,t_2,c_3
644 addx c_1,%g0,c_1 !=
645 ld bp(4),b_4
646 umul a_1,b_3,t_1 !mul_add_c(a[1],b[3],c2,c3,c1);
647 addcc c_2,t_1,c_2
648 rd %y,t_2 !=
649 addxcc c_3,t_2,c_3
650 addx c_1,%g0,c_1
651 ld bp(5),b_5
652 umul a_0,b_4,t_1 !=!mul_add_c(a[0],b[4],c2,c3,c1);
653 addcc c_2,t_1,c_2
654 rd %y,t_2
655 addxcc c_3,t_2,c_3
656 addx c_1,%g0,c_1 !=
657 st c_2,rp(4) !r[4]=c2;
658
659 umul a_0,b_5,t_1 !mul_add_c(a[0],b[5],c3,c1,c2);
660 addcc c_3,t_1,c_3
661 rd %y,t_2 !=
662 addxcc c_1,t_2,c_1
663 addx %g0,%g0,c_2
664 umul a_1,b_4,t_1 !mul_add_c(a[1],b[4],c3,c1,c2);
665 addcc c_3,t_1,c_3 !=
666 rd %y,t_2
667 addxcc c_1,t_2,c_1
668 addx c_2,%g0,c_2
669 umul a_2,b_3,t_1 !=!mul_add_c(a[2],b[3],c3,c1,c2);
670 addcc c_3,t_1,c_3
671 rd %y,t_2
672 addxcc c_1,t_2,c_1
673 addx c_2,%g0,c_2 !=
674 umul a_3,b_2,t_1 !mul_add_c(a[3],b[2],c3,c1,c2);
675 addcc c_3,t_1,c_3
676 rd %y,t_2
677 addxcc c_1,t_2,c_1 !=
678 addx c_2,%g0,c_2
679 ld ap(5),a_5
680 umul a_4,b_1,t_1 !mul_add_c(a[4],b[1],c3,c1,c2);
681 addcc c_3,t_1,c_3 !=
682 rd %y,t_2
683 addxcc c_1,t_2,c_1
684 ld ap(6),a_6
685 addx c_2,%g0,c_2 !=
686 umul a_5,b_0,t_1 !mul_add_c(a[5],b[0],c3,c1,c2);
687 addcc c_3,t_1,c_3
688 rd %y,t_2
689 addxcc c_1,t_2,c_1 !=
690 addx c_2,%g0,c_2
691 st c_3,rp(5) !r[5]=c3;
692
693 umul a_6,b_0,t_1 !mul_add_c(a[6],b[0],c1,c2,c3);
694 addcc c_1,t_1,c_1 !=
695 rd %y,t_2
696 addxcc c_2,t_2,c_2
697 addx %g0,%g0,c_3
698 umul a_5,b_1,t_1 !=!mul_add_c(a[5],b[1],c1,c2,c3);
699 addcc c_1,t_1,c_1
700 rd %y,t_2
701 addxcc c_2,t_2,c_2
702 addx c_3,%g0,c_3 !=
703 umul a_4,b_2,t_1 !mul_add_c(a[4],b[2],c1,c2,c3);
704 addcc c_1,t_1,c_1
705 rd %y,t_2
706 addxcc c_2,t_2,c_2 !=
707 addx c_3,%g0,c_3
708 umul a_3,b_3,t_1 !mul_add_c(a[3],b[3],c1,c2,c3);
709 addcc c_1,t_1,c_1
710 rd %y,t_2 !=
711 addxcc c_2,t_2,c_2
712 addx c_3,%g0,c_3
713 umul a_2,b_4,t_1 !mul_add_c(a[2],b[4],c1,c2,c3);
714 addcc c_1,t_1,c_1 !=
715 rd %y,t_2
716 addxcc c_2,t_2,c_2
717 ld bp(6),b_6
718 addx c_3,%g0,c_3 !=
719 umul a_1,b_5,t_1 !mul_add_c(a[1],b[5],c1,c2,c3);
720 addcc c_1,t_1,c_1
721 rd %y,t_2
722 addxcc c_2,t_2,c_2 !=
723 addx c_3,%g0,c_3
724 ld bp(7),b_7
725 umul a_0,b_6,t_1 !mul_add_c(a[0],b[6],c1,c2,c3);
726 addcc c_1,t_1,c_1 !=
727 rd %y,t_2
728 addxcc c_2,t_2,c_2
729 st c_1,rp(6) !r[6]=c1;
730 addx c_3,%g0,c_3 !=
731
732 umul a_0,b_7,t_1 !mul_add_c(a[0],b[7],c2,c3,c1);
733 addcc c_2,t_1,c_2
734 rd %y,t_2
735 addxcc c_3,t_2,c_3 !=
736 addx %g0,%g0,c_1
737 umul a_1,b_6,t_1 !mul_add_c(a[1],b[6],c2,c3,c1);
738 addcc c_2,t_1,c_2
739 rd %y,t_2 !=
740 addxcc c_3,t_2,c_3
741 addx c_1,%g0,c_1
742 umul a_2,b_5,t_1 !mul_add_c(a[2],b[5],c2,c3,c1);
743 addcc c_2,t_1,c_2 !=
744 rd %y,t_2
745 addxcc c_3,t_2,c_3
746 addx c_1,%g0,c_1
747 umul a_3,b_4,t_1 !=!mul_add_c(a[3],b[4],c2,c3,c1);
748 addcc c_2,t_1,c_2
749 rd %y,t_2
750 addxcc c_3,t_2,c_3
751 addx c_1,%g0,c_1 !=
752 umul a_4,b_3,t_1 !mul_add_c(a[4],b[3],c2,c3,c1);
753 addcc c_2,t_1,c_2
754 rd %y,t_2
755 addxcc c_3,t_2,c_3 !=
756 addx c_1,%g0,c_1
757 umul a_5,b_2,t_1 !mul_add_c(a[5],b[2],c2,c3,c1);
758 addcc c_2,t_1,c_2
759 rd %y,t_2 !=
760 addxcc c_3,t_2,c_3
761 addx c_1,%g0,c_1
762 ld ap(7),a_7
763 umul a_6,b_1,t_1 !=!mul_add_c(a[6],b[1],c2,c3,c1);
764 addcc c_2,t_1,c_2
765 rd %y,t_2
766 addxcc c_3,t_2,c_3
767 addx c_1,%g0,c_1 !=
768 umul a_7,b_0,t_1 !mul_add_c(a[7],b[0],c2,c3,c1);
769 addcc c_2,t_1,c_2
770 rd %y,t_2
771 addxcc c_3,t_2,c_3 !=
772 addx c_1,%g0,c_1
773 st c_2,rp(7) !r[7]=c2;
774
775 umul a_7,b_1,t_1 !mul_add_c(a[7],b[1],c3,c1,c2);
776 addcc c_3,t_1,c_3 !=
777 rd %y,t_2
778 addxcc c_1,t_2,c_1
779 addx %g0,%g0,c_2
780 umul a_6,b_2,t_1 !=!mul_add_c(a[6],b[2],c3,c1,c2);
781 addcc c_3,t_1,c_3
782 rd %y,t_2
783 addxcc c_1,t_2,c_1
784 addx c_2,%g0,c_2 !=
785 umul a_5,b_3,t_1 !mul_add_c(a[5],b[3],c3,c1,c2);
786 addcc c_3,t_1,c_3
787 rd %y,t_2
788 addxcc c_1,t_2,c_1 !=
789 addx c_2,%g0,c_2
790 umul a_4,b_4,t_1 !mul_add_c(a[4],b[4],c3,c1,c2);
791 addcc c_3,t_1,c_3
792 rd %y,t_2 !=
793 addxcc c_1,t_2,c_1
794 addx c_2,%g0,c_2
795 umul a_3,b_5,t_1 !mul_add_c(a[3],b[5],c3,c1,c2);
796 addcc c_3,t_1,c_3 !=
797 rd %y,t_2
798 addxcc c_1,t_2,c_1
799 addx c_2,%g0,c_2
800 umul a_2,b_6,t_1 !=!mul_add_c(a[2],b[6],c3,c1,c2);
801 addcc c_3,t_1,c_3
802 rd %y,t_2
803 addxcc c_1,t_2,c_1
804 addx c_2,%g0,c_2 !=
805 umul a_1,b_7,t_1 !mul_add_c(a[1],b[7],c3,c1,c2);
806 addcc c_3,t_1,c_3
807 rd %y,t_2
808 addxcc c_1,t_2,c_1 !
809 addx c_2,%g0,c_2
810 st c_3,rp(8) !r[8]=c3;
811
812 umul a_2,b_7,t_1 !mul_add_c(a[2],b[7],c1,c2,c3);
813 addcc c_1,t_1,c_1 !=
814 rd %y,t_2
815 addxcc c_2,t_2,c_2
816 addx %g0,%g0,c_3
817 umul a_3,b_6,t_1 !=!mul_add_c(a[3],b[6],c1,c2,c3);
818 addcc c_1,t_1,c_1
819 rd %y,t_2
820 addxcc c_2,t_2,c_2
821 addx c_3,%g0,c_3 !=
822 umul a_4,b_5,t_1 !mul_add_c(a[4],b[5],c1,c2,c3);
823 addcc c_1,t_1,c_1
824 rd %y,t_2
825 addxcc c_2,t_2,c_2 !=
826 addx c_3,%g0,c_3
827 umul a_5,b_4,t_1 !mul_add_c(a[5],b[4],c1,c2,c3);
828 addcc c_1,t_1,c_1
829 rd %y,t_2 !=
830 addxcc c_2,t_2,c_2
831 addx c_3,%g0,c_3
832 umul a_6,b_3,t_1 !mul_add_c(a[6],b[3],c1,c2,c3);
833 addcc c_1,t_1,c_1 !=
834 rd %y,t_2
835 addxcc c_2,t_2,c_2
836 addx c_3,%g0,c_3
837 umul a_7,b_2,t_1 !=!mul_add_c(a[7],b[2],c1,c2,c3);
838 addcc c_1,t_1,c_1
839 rd %y,t_2
840 addxcc c_2,t_2,c_2
841 addx c_3,%g0,c_3 !=
842 st c_1,rp(9) !r[9]=c1;
843
844 umul a_7,b_3,t_1 !mul_add_c(a[7],b[3],c2,c3,c1);
845 addcc c_2,t_1,c_2
846 rd %y,t_2 !=
847 addxcc c_3,t_2,c_3
848 addx %g0,%g0,c_1
849 umul a_6,b_4,t_1 !mul_add_c(a[6],b[4],c2,c3,c1);
850 addcc c_2,t_1,c_2 !=
851 rd %y,t_2
852 addxcc c_3,t_2,c_3
853 addx c_1,%g0,c_1
854 umul a_5,b_5,t_1 !=!mul_add_c(a[5],b[5],c2,c3,c1);
855 addcc c_2,t_1,c_2
856 rd %y,t_2
857 addxcc c_3,t_2,c_3
858 addx c_1,%g0,c_1 !=
859 umul a_4,b_6,t_1 !mul_add_c(a[4],b[6],c2,c3,c1);
860 addcc c_2,t_1,c_2
861 rd %y,t_2
862 addxcc c_3,t_2,c_3 !=
863 addx c_1,%g0,c_1
864 umul a_3,b_7,t_1 !mul_add_c(a[3],b[7],c2,c3,c1);
865 addcc c_2,t_1,c_2
866 rd %y,t_2 !=
867 addxcc c_3,t_2,c_3
868 addx c_1,%g0,c_1
869 st c_2,rp(10) !r[10]=c2;
870
871 umul a_4,b_7,t_1 !=!mul_add_c(a[4],b[7],c3,c1,c2);
872 addcc c_3,t_1,c_3
873 rd %y,t_2
874 addxcc c_1,t_2,c_1
875 addx %g0,%g0,c_2 !=
876 umul a_5,b_6,t_1 !mul_add_c(a[5],b[6],c3,c1,c2);
877 addcc c_3,t_1,c_3
878 rd %y,t_2
879 addxcc c_1,t_2,c_1 !=
880 addx c_2,%g0,c_2
881 umul a_6,b_5,t_1 !mul_add_c(a[6],b[5],c3,c1,c2);
882 addcc c_3,t_1,c_3
883 rd %y,t_2 !=
884 addxcc c_1,t_2,c_1
885 addx c_2,%g0,c_2
886 umul a_7,b_4,t_1 !mul_add_c(a[7],b[4],c3,c1,c2);
887 addcc c_3,t_1,c_3 !=
888 rd %y,t_2
889 addxcc c_1,t_2,c_1
890 st c_3,rp(11) !r[11]=c3;
891 addx c_2,%g0,c_2 !=
892
893 umul a_7,b_5,t_1 !mul_add_c(a[7],b[5],c1,c2,c3);
894 addcc c_1,t_1,c_1
895 rd %y,t_2
896 addxcc c_2,t_2,c_2 !=
897 addx %g0,%g0,c_3
898 umul a_6,b_6,t_1 !mul_add_c(a[6],b[6],c1,c2,c3);
899 addcc c_1,t_1,c_1
900 rd %y,t_2 !=
901 addxcc c_2,t_2,c_2
902 addx c_3,%g0,c_3
903 umul a_5,b_7,t_1 !mul_add_c(a[5],b[7],c1,c2,c3);
904 addcc c_1,t_1,c_1 !=
905 rd %y,t_2
906 addxcc c_2,t_2,c_2
907 st c_1,rp(12) !r[12]=c1;
908 addx c_3,%g0,c_3 !=
909
910 umul a_6,b_7,t_1 !mul_add_c(a[6],b[7],c2,c3,c1);
911 addcc c_2,t_1,c_2
912 rd %y,t_2
913 addxcc c_3,t_2,c_3 !=
914 addx %g0,%g0,c_1
915 umul a_7,b_6,t_1 !mul_add_c(a[7],b[6],c2,c3,c1);
916 addcc c_2,t_1,c_2
917 rd %y,t_2 !=
918 addxcc c_3,t_2,c_3
919 addx c_1,%g0,c_1
920 st c_2,rp(13) !r[13]=c2;
921
922 umul a_7,b_7,t_1 !=!mul_add_c(a[7],b[7],c3,c1,c2);
923 addcc c_3,t_1,c_3
924 rd %y,t_2
925 addxcc c_1,t_2,c_1
926 nop !=
927 st c_3,rp(14) !r[14]=c3;
928 st c_1,rp(15) !r[15]=c1;
929
930 ret
931 restore %g0,%g0,%o0
932
933.type bn_mul_comba8,#function
934.size bn_mul_comba8,(.-bn_mul_comba8)
935
936.align 32
937
938.global bn_mul_comba4
939/*
940 * void bn_mul_comba4(r,a,b)
941 * BN_ULONG *r,*a,*b;
942 */
943bn_mul_comba4:
944 save %sp,FRAME_SIZE,%sp
945 ld ap(0),a_0
946 ld bp(0),b_0
947 umul a_0,b_0,c_1 !=!mul_add_c(a[0],b[0],c1,c2,c3);
948 ld bp(1),b_1
949 rd %y,c_2
950 st c_1,rp(0) !r[0]=c1;
951
952 umul a_0,b_1,t_1 !=!mul_add_c(a[0],b[1],c2,c3,c1);
953 ld ap(1),a_1
954 addcc c_2,t_1,c_2
955 rd %y,t_2 !=
956 addxcc %g0,t_2,c_3
957 addx %g0,%g0,c_1
958 ld ap(2),a_2
959 umul a_1,b_0,t_1 !=!mul_add_c(a[1],b[0],c2,c3,c1);
960 addcc c_2,t_1,c_2
961 rd %y,t_2
962 addxcc c_3,t_2,c_3
963 addx c_1,%g0,c_1 !=
964 st c_2,rp(1) !r[1]=c2;
965
966 umul a_2,b_0,t_1 !mul_add_c(a[2],b[0],c3,c1,c2);
967 addcc c_3,t_1,c_3
968 rd %y,t_2 !=
969 addxcc c_1,t_2,c_1
970 addx %g0,%g0,c_2
971 ld bp(2),b_2
972 umul a_1,b_1,t_1 !=!mul_add_c(a[1],b[1],c3,c1,c2);
973 addcc c_3,t_1,c_3
974 rd %y,t_2
975 addxcc c_1,t_2,c_1
976 addx c_2,%g0,c_2 !=
977 ld bp(3),b_3
978 umul a_0,b_2,t_1 !mul_add_c(a[0],b[2],c3,c1,c2);
979 addcc c_3,t_1,c_3
980 rd %y,t_2 !=
981 addxcc c_1,t_2,c_1
982 addx c_2,%g0,c_2
983 st c_3,rp(2) !r[2]=c3;
984
985 umul a_0,b_3,t_1 !=!mul_add_c(a[0],b[3],c1,c2,c3);
986 addcc c_1,t_1,c_1
987 rd %y,t_2
988 addxcc c_2,t_2,c_2
989 addx %g0,%g0,c_3 !=
990 umul a_1,b_2,t_1 !mul_add_c(a[1],b[2],c1,c2,c3);
991 addcc c_1,t_1,c_1
992 rd %y,t_2
993 addxcc c_2,t_2,c_2 !=
994 addx c_3,%g0,c_3
995 ld ap(3),a_3
996 umul a_2,b_1,t_1 !mul_add_c(a[2],b[1],c1,c2,c3);
997 addcc c_1,t_1,c_1 !=
998 rd %y,t_2
999 addxcc c_2,t_2,c_2
1000 addx c_3,%g0,c_3
1001 umul a_3,b_0,t_1 !=!mul_add_c(a[3],b[0],c1,c2,c3);
1002 addcc c_1,t_1,c_1
1003 rd %y,t_2
1004 addxcc c_2,t_2,c_2
1005 addx c_3,%g0,c_3 !=
1006 st c_1,rp(3) !r[3]=c1;
1007
1008 umul a_3,b_1,t_1 !mul_add_c(a[3],b[1],c2,c3,c1);
1009 addcc c_2,t_1,c_2
1010 rd %y,t_2 !=
1011 addxcc c_3,t_2,c_3
1012 addx %g0,%g0,c_1
1013 umul a_2,b_2,t_1 !mul_add_c(a[2],b[2],c2,c3,c1);
1014 addcc c_2,t_1,c_2 !=
1015 rd %y,t_2
1016 addxcc c_3,t_2,c_3
1017 addx c_1,%g0,c_1
1018 umul a_1,b_3,t_1 !=!mul_add_c(a[1],b[3],c2,c3,c1);
1019 addcc c_2,t_1,c_2
1020 rd %y,t_2
1021 addxcc c_3,t_2,c_3
1022 addx c_1,%g0,c_1 !=
1023 st c_2,rp(4) !r[4]=c2;
1024
1025 umul a_2,b_3,t_1 !mul_add_c(a[2],b[3],c3,c1,c2);
1026 addcc c_3,t_1,c_3
1027 rd %y,t_2 !=
1028 addxcc c_1,t_2,c_1
1029 addx %g0,%g0,c_2
1030 umul a_3,b_2,t_1 !mul_add_c(a[3],b[2],c3,c1,c2);
1031 addcc c_3,t_1,c_3 !=
1032 rd %y,t_2
1033 addxcc c_1,t_2,c_1
1034 st c_3,rp(5) !r[5]=c3;
1035 addx c_2,%g0,c_2 !=
1036
1037 umul a_3,b_3,t_1 !mul_add_c(a[3],b[3],c1,c2,c3);
1038 addcc c_1,t_1,c_1
1039 rd %y,t_2
1040 addxcc c_2,t_2,c_2 !=
1041 st c_1,rp(6) !r[6]=c1;
1042 st c_2,rp(7) !r[7]=c2;
1043
1044 ret
1045 restore %g0,%g0,%o0
1046
1047.type bn_mul_comba4,#function
1048.size bn_mul_comba4,(.-bn_mul_comba4)
1049
1050.align 32
1051
1052.global bn_sqr_comba8
1053bn_sqr_comba8:
1054 save %sp,FRAME_SIZE,%sp
1055 ld ap(0),a_0
1056 ld ap(1),a_1
1057 umul a_0,a_0,c_1 !=!sqr_add_c(a,0,c1,c2,c3);
1058 rd %y,c_2
1059 st c_1,rp(0) !r[0]=c1;
1060
1061 ld ap(2),a_2
1062 umul a_0,a_1,t_1 !=!sqr_add_c2(a,1,0,c2,c3,c1);
1063 addcc c_2,t_1,c_2
1064 rd %y,t_2
1065 addxcc %g0,t_2,c_3
1066 addx %g0,%g0,c_1 !=
1067 addcc c_2,t_1,c_2
1068 addxcc c_3,t_2,c_3
1069 st c_2,rp(1) !r[1]=c2;
1070 addx c_1,%g0,c_1 !=
1071
1072 umul a_2,a_0,t_1 !sqr_add_c2(a,2,0,c3,c1,c2);
1073 addcc c_3,t_1,c_3
1074 rd %y,t_2
1075 addxcc c_1,t_2,c_1 !=
1076 addx %g0,%g0,c_2
1077 addcc c_3,t_1,c_3
1078 addxcc c_1,t_2,c_1
1079 addx c_2,%g0,c_2 !=
1080 ld ap(3),a_3
1081 umul a_1,a_1,t_1 !sqr_add_c(a,1,c3,c1,c2);
1082 addcc c_3,t_1,c_3
1083 rd %y,t_2 !=
1084 addxcc c_1,t_2,c_1
1085 addx c_2,%g0,c_2
1086 st c_3,rp(2) !r[2]=c3;
1087
1088 umul a_0,a_3,t_1 !=!sqr_add_c2(a,3,0,c1,c2,c3);
1089 addcc c_1,t_1,c_1
1090 rd %y,t_2
1091 addxcc c_2,t_2,c_2
1092 addx %g0,%g0,c_3 !=
1093 addcc c_1,t_1,c_1
1094 addxcc c_2,t_2,c_2
1095 ld ap(4),a_4
1096 addx c_3,%g0,c_3 !=
1097 umul a_1,a_2,t_1 !sqr_add_c2(a,2,1,c1,c2,c3);
1098 addcc c_1,t_1,c_1
1099 rd %y,t_2
1100 addxcc c_2,t_2,c_2 !=
1101 addx c_3,%g0,c_3
1102 addcc c_1,t_1,c_1
1103 addxcc c_2,t_2,c_2
1104 addx c_3,%g0,c_3 !=
1105 st c_1,rp(3) !r[3]=c1;
1106
1107 umul a_4,a_0,t_1 !sqr_add_c2(a,4,0,c2,c3,c1);
1108 addcc c_2,t_1,c_2
1109 rd %y,t_2 !=
1110 addxcc c_3,t_2,c_3
1111 addx %g0,%g0,c_1
1112 addcc c_2,t_1,c_2
1113 addxcc c_3,t_2,c_3 !=
1114 addx c_1,%g0,c_1
1115 umul a_3,a_1,t_1 !sqr_add_c2(a,3,1,c2,c3,c1);
1116 addcc c_2,t_1,c_2
1117 rd %y,t_2 !=
1118 addxcc c_3,t_2,c_3
1119 addx c_1,%g0,c_1
1120 addcc c_2,t_1,c_2
1121 addxcc c_3,t_2,c_3 !=
1122 addx c_1,%g0,c_1
1123 ld ap(5),a_5
1124 umul a_2,a_2,t_1 !sqr_add_c(a,2,c2,c3,c1);
1125 addcc c_2,t_1,c_2 !=
1126 rd %y,t_2
1127 addxcc c_3,t_2,c_3
1128 st c_2,rp(4) !r[4]=c2;
1129 addx c_1,%g0,c_1 !=
1130
1131 umul a_0,a_5,t_1 !sqr_add_c2(a,5,0,c3,c1,c2);
1132 addcc c_3,t_1,c_3
1133 rd %y,t_2
1134 addxcc c_1,t_2,c_1 !=
1135 addx %g0,%g0,c_2
1136 addcc c_3,t_1,c_3
1137 addxcc c_1,t_2,c_1
1138 addx c_2,%g0,c_2 !=
1139 umul a_1,a_4,t_1 !sqr_add_c2(a,4,1,c3,c1,c2);
1140 addcc c_3,t_1,c_3
1141 rd %y,t_2
1142 addxcc c_1,t_2,c_1 !=
1143 addx c_2,%g0,c_2
1144 addcc c_3,t_1,c_3
1145 addxcc c_1,t_2,c_1
1146 addx c_2,%g0,c_2 !=
1147 ld ap(6),a_6
1148 umul a_2,a_3,t_1 !sqr_add_c2(a,3,2,c3,c1,c2);
1149 addcc c_3,t_1,c_3
1150 rd %y,t_2 !=
1151 addxcc c_1,t_2,c_1
1152 addx c_2,%g0,c_2
1153 addcc c_3,t_1,c_3
1154 addxcc c_1,t_2,c_1 !=
1155 addx c_2,%g0,c_2
1156 st c_3,rp(5) !r[5]=c3;
1157
1158 umul a_6,a_0,t_1 !sqr_add_c2(a,6,0,c1,c2,c3);
1159 addcc c_1,t_1,c_1 !=
1160 rd %y,t_2
1161 addxcc c_2,t_2,c_2
1162 addx %g0,%g0,c_3
1163 addcc c_1,t_1,c_1 !=
1164 addxcc c_2,t_2,c_2
1165 addx c_3,%g0,c_3
1166 umul a_5,a_1,t_1 !sqr_add_c2(a,5,1,c1,c2,c3);
1167 addcc c_1,t_1,c_1 !=
1168 rd %y,t_2
1169 addxcc c_2,t_2,c_2
1170 addx c_3,%g0,c_3
1171 addcc c_1,t_1,c_1 !=
1172 addxcc c_2,t_2,c_2
1173 addx c_3,%g0,c_3
1174 umul a_4,a_2,t_1 !sqr_add_c2(a,4,2,c1,c2,c3);
1175 addcc c_1,t_1,c_1 !=
1176 rd %y,t_2
1177 addxcc c_2,t_2,c_2
1178 addx c_3,%g0,c_3
1179 addcc c_1,t_1,c_1 !=
1180 addxcc c_2,t_2,c_2
1181 addx c_3,%g0,c_3
1182 ld ap(7),a_7
1183 umul a_3,a_3,t_1 !=!sqr_add_c(a,3,c1,c2,c3);
1184 addcc c_1,t_1,c_1
1185 rd %y,t_2
1186 addxcc c_2,t_2,c_2
1187 addx c_3,%g0,c_3 !=
1188 st c_1,rp(6) !r[6]=c1;
1189
1190 umul a_0,a_7,t_1 !sqr_add_c2(a,7,0,c2,c3,c1);
1191 addcc c_2,t_1,c_2
1192 rd %y,t_2 !=
1193 addxcc c_3,t_2,c_3
1194 addx %g0,%g0,c_1
1195 addcc c_2,t_1,c_2
1196 addxcc c_3,t_2,c_3 !=
1197 addx c_1,%g0,c_1
1198 umul a_1,a_6,t_1 !sqr_add_c2(a,6,1,c2,c3,c1);
1199 addcc c_2,t_1,c_2
1200 rd %y,t_2 !=
1201 addxcc c_3,t_2,c_3
1202 addx c_1,%g0,c_1
1203 addcc c_2,t_1,c_2
1204 addxcc c_3,t_2,c_3 !=
1205 addx c_1,%g0,c_1
1206 umul a_2,a_5,t_1 !sqr_add_c2(a,5,2,c2,c3,c1);
1207 addcc c_2,t_1,c_2
1208 rd %y,t_2 !=
1209 addxcc c_3,t_2,c_3
1210 addx c_1,%g0,c_1
1211 addcc c_2,t_1,c_2
1212 addxcc c_3,t_2,c_3 !=
1213 addx c_1,%g0,c_1
1214 umul a_3,a_4,t_1 !sqr_add_c2(a,4,3,c2,c3,c1);
1215 addcc c_2,t_1,c_2
1216 rd %y,t_2 !=
1217 addxcc c_3,t_2,c_3
1218 addx c_1,%g0,c_1
1219 addcc c_2,t_1,c_2
1220 addxcc c_3,t_2,c_3 !=
1221 addx c_1,%g0,c_1
1222 st c_2,rp(7) !r[7]=c2;
1223
1224 umul a_7,a_1,t_1 !sqr_add_c2(a,7,1,c3,c1,c2);
1225 addcc c_3,t_1,c_3 !=
1226 rd %y,t_2
1227 addxcc c_1,t_2,c_1
1228 addx %g0,%g0,c_2
1229 addcc c_3,t_1,c_3 !=
1230 addxcc c_1,t_2,c_1
1231 addx c_2,%g0,c_2
1232 umul a_6,a_2,t_1 !sqr_add_c2(a,6,2,c3,c1,c2);
1233 addcc c_3,t_1,c_3 !=
1234 rd %y,t_2
1235 addxcc c_1,t_2,c_1
1236 addx c_2,%g0,c_2
1237 addcc c_3,t_1,c_3 !=
1238 addxcc c_1,t_2,c_1
1239 addx c_2,%g0,c_2
1240 umul a_5,a_3,t_1 !sqr_add_c2(a,5,3,c3,c1,c2);
1241 addcc c_3,t_1,c_3 !=
1242 rd %y,t_2
1243 addxcc c_1,t_2,c_1
1244 addx c_2,%g0,c_2
1245 addcc c_3,t_1,c_3 !=
1246 addxcc c_1,t_2,c_1
1247 addx c_2,%g0,c_2
1248 umul a_4,a_4,t_1 !sqr_add_c(a,4,c3,c1,c2);
1249 addcc c_3,t_1,c_3 !=
1250 rd %y,t_2
1251 addxcc c_1,t_2,c_1
1252 st c_3,rp(8) !r[8]=c3;
1253 addx c_2,%g0,c_2 !=
1254
1255 umul a_2,a_7,t_1 !sqr_add_c2(a,7,2,c1,c2,c3);
1256 addcc c_1,t_1,c_1
1257 rd %y,t_2
1258 addxcc c_2,t_2,c_2 !=
1259 addx %g0,%g0,c_3
1260 addcc c_1,t_1,c_1
1261 addxcc c_2,t_2,c_2
1262 addx c_3,%g0,c_3 !=
1263 umul a_3,a_6,t_1 !sqr_add_c2(a,6,3,c1,c2,c3);
1264 addcc c_1,t_1,c_1
1265 rd %y,t_2
1266 addxcc c_2,t_2,c_2 !=
1267 addx c_3,%g0,c_3
1268 addcc c_1,t_1,c_1
1269 addxcc c_2,t_2,c_2
1270 addx c_3,%g0,c_3 !=
1271 umul a_4,a_5,t_1 !sqr_add_c2(a,5,4,c1,c2,c3);
1272 addcc c_1,t_1,c_1
1273 rd %y,t_2
1274 addxcc c_2,t_2,c_2 !=
1275 addx c_3,%g0,c_3
1276 addcc c_1,t_1,c_1
1277 addxcc c_2,t_2,c_2
1278 addx c_3,%g0,c_3 !=
1279 st c_1,rp(9) !r[9]=c1;
1280
1281 umul a_7,a_3,t_1 !sqr_add_c2(a,7,3,c2,c3,c1);
1282 addcc c_2,t_1,c_2
1283 rd %y,t_2 !=
1284 addxcc c_3,t_2,c_3
1285 addx %g0,%g0,c_1
1286 addcc c_2,t_1,c_2
1287 addxcc c_3,t_2,c_3 !=
1288 addx c_1,%g0,c_1
1289 umul a_6,a_4,t_1 !sqr_add_c2(a,6,4,c2,c3,c1);
1290 addcc c_2,t_1,c_2
1291 rd %y,t_2 !=
1292 addxcc c_3,t_2,c_3
1293 addx c_1,%g0,c_1
1294 addcc c_2,t_1,c_2
1295 addxcc c_3,t_2,c_3 !=
1296 addx c_1,%g0,c_1
1297 umul a_5,a_5,t_1 !sqr_add_c(a,5,c2,c3,c1);
1298 addcc c_2,t_1,c_2
1299 rd %y,t_2 !=
1300 addxcc c_3,t_2,c_3
1301 addx c_1,%g0,c_1
1302 st c_2,rp(10) !r[10]=c2;
1303
1304 umul a_4,a_7,t_1 !=!sqr_add_c2(a,7,4,c3,c1,c2);
1305 addcc c_3,t_1,c_3
1306 rd %y,t_2
1307 addxcc c_1,t_2,c_1
1308 addx %g0,%g0,c_2 !=
1309 addcc c_3,t_1,c_3
1310 addxcc c_1,t_2,c_1
1311 addx c_2,%g0,c_2
1312 umul a_5,a_6,t_1 !=!sqr_add_c2(a,6,5,c3,c1,c2);
1313 addcc c_3,t_1,c_3
1314 rd %y,t_2
1315 addxcc c_1,t_2,c_1
1316 addx c_2,%g0,c_2 !=
1317 addcc c_3,t_1,c_3
1318 addxcc c_1,t_2,c_1
1319 st c_3,rp(11) !r[11]=c3;
1320 addx c_2,%g0,c_2 !=
1321
1322 umul a_7,a_5,t_1 !sqr_add_c2(a,7,5,c1,c2,c3);
1323 addcc c_1,t_1,c_1
1324 rd %y,t_2
1325 addxcc c_2,t_2,c_2 !=
1326 addx %g0,%g0,c_3
1327 addcc c_1,t_1,c_1
1328 addxcc c_2,t_2,c_2
1329 addx c_3,%g0,c_3 !=
1330 umul a_6,a_6,t_1 !sqr_add_c(a,6,c1,c2,c3);
1331 addcc c_1,t_1,c_1
1332 rd %y,t_2
1333 addxcc c_2,t_2,c_2 !=
1334 addx c_3,%g0,c_3
1335 st c_1,rp(12) !r[12]=c1;
1336
1337 umul a_6,a_7,t_1 !sqr_add_c2(a,7,6,c2,c3,c1);
1338 addcc c_2,t_1,c_2 !=
1339 rd %y,t_2
1340 addxcc c_3,t_2,c_3
1341 addx %g0,%g0,c_1
1342 addcc c_2,t_1,c_2 !=
1343 addxcc c_3,t_2,c_3
1344 st c_2,rp(13) !r[13]=c2;
1345 addx c_1,%g0,c_1 !=
1346
1347 umul a_7,a_7,t_1 !sqr_add_c(a,7,c3,c1,c2);
1348 addcc c_3,t_1,c_3
1349 rd %y,t_2
1350 addxcc c_1,t_2,c_1 !=
1351 st c_3,rp(14) !r[14]=c3;
1352 st c_1,rp(15) !r[15]=c1;
1353
1354 ret
1355 restore %g0,%g0,%o0
1356
1357.type bn_sqr_comba8,#function
1358.size bn_sqr_comba8,(.-bn_sqr_comba8)
1359
1360.align 32
1361
1362.global bn_sqr_comba4
1363/*
1364 * void bn_sqr_comba4(r,a)
1365 * BN_ULONG *r,*a;
1366 */
1367bn_sqr_comba4:
1368 save %sp,FRAME_SIZE,%sp
1369 ld ap(0),a_0
1370 umul a_0,a_0,c_1 !sqr_add_c(a,0,c1,c2,c3);
1371 ld ap(1),a_1 !=
1372 rd %y,c_2
1373 st c_1,rp(0) !r[0]=c1;
1374
1375 ld ap(2),a_2
1376 umul a_0,a_1,t_1 !=!sqr_add_c2(a,1,0,c2,c3,c1);
1377 addcc c_2,t_1,c_2
1378 rd %y,t_2
1379 addxcc %g0,t_2,c_3
1380 addx %g0,%g0,c_1 !=
1381 addcc c_2,t_1,c_2
1382 addxcc c_3,t_2,c_3
1383 addx c_1,%g0,c_1 !=
1384 st c_2,rp(1) !r[1]=c2;
1385
1386 umul a_2,a_0,t_1 !sqr_add_c2(a,2,0,c3,c1,c2);
1387 addcc c_3,t_1,c_3
1388 rd %y,t_2 !=
1389 addxcc c_1,t_2,c_1
1390 addx %g0,%g0,c_2
1391 addcc c_3,t_1,c_3
1392 addxcc c_1,t_2,c_1 !=
1393 addx c_2,%g0,c_2
1394 ld ap(3),a_3
1395 umul a_1,a_1,t_1 !sqr_add_c(a,1,c3,c1,c2);
1396 addcc c_3,t_1,c_3 !=
1397 rd %y,t_2
1398 addxcc c_1,t_2,c_1
1399 st c_3,rp(2) !r[2]=c3;
1400 addx c_2,%g0,c_2 !=
1401
1402 umul a_0,a_3,t_1 !sqr_add_c2(a,3,0,c1,c2,c3);
1403 addcc c_1,t_1,c_1
1404 rd %y,t_2
1405 addxcc c_2,t_2,c_2 !=
1406 addx %g0,%g0,c_3
1407 addcc c_1,t_1,c_1
1408 addxcc c_2,t_2,c_2
1409 addx c_3,%g0,c_3 !=
1410 umul a_1,a_2,t_1 !sqr_add_c2(a,2,1,c1,c2,c3);
1411 addcc c_1,t_1,c_1
1412 rd %y,t_2
1413 addxcc c_2,t_2,c_2 !=
1414 addx c_3,%g0,c_3
1415 addcc c_1,t_1,c_1
1416 addxcc c_2,t_2,c_2
1417 addx c_3,%g0,c_3 !=
1418 st c_1,rp(3) !r[3]=c1;
1419
1420 umul a_3,a_1,t_1 !sqr_add_c2(a,3,1,c2,c3,c1);
1421 addcc c_2,t_1,c_2
1422 rd %y,t_2 !=
1423 addxcc c_3,t_2,c_3
1424 addx %g0,%g0,c_1
1425 addcc c_2,t_1,c_2
1426 addxcc c_3,t_2,c_3 !=
1427 addx c_1,%g0,c_1
1428 umul a_2,a_2,t_1 !sqr_add_c(a,2,c2,c3,c1);
1429 addcc c_2,t_1,c_2
1430 rd %y,t_2 !=
1431 addxcc c_3,t_2,c_3
1432 addx c_1,%g0,c_1
1433 st c_2,rp(4) !r[4]=c2;
1434
1435 umul a_2,a_3,t_1 !=!sqr_add_c2(a,3,2,c3,c1,c2);
1436 addcc c_3,t_1,c_3
1437 rd %y,t_2
1438 addxcc c_1,t_2,c_1
1439 addx %g0,%g0,c_2 !=
1440 addcc c_3,t_1,c_3
1441 addxcc c_1,t_2,c_1
1442 st c_3,rp(5) !r[5]=c3;
1443 addx c_2,%g0,c_2 !=
1444
1445 umul a_3,a_3,t_1 !sqr_add_c(a,3,c1,c2,c3);
1446 addcc c_1,t_1,c_1
1447 rd %y,t_2
1448 addxcc c_2,t_2,c_2 !=
1449 st c_1,rp(6) !r[6]=c1;
1450 st c_2,rp(7) !r[7]=c2;
1451
1452 ret
1453 restore %g0,%g0,%o0
1454
1455.type bn_sqr_comba4,#function
1456.size bn_sqr_comba4,(.-bn_sqr_comba4)
1457
1458.align 32
diff --git a/src/lib/libcrypto/bn/asm/sparcv8plus.S b/src/lib/libcrypto/bn/asm/sparcv8plus.S
deleted file mode 100644
index 8c56e2e7e7..0000000000
--- a/src/lib/libcrypto/bn/asm/sparcv8plus.S
+++ /dev/null
@@ -1,1547 +0,0 @@
1.ident "sparcv8plus.s, Version 1.4"
2.ident "SPARC v9 ISA artwork by Andy Polyakov <appro@fy.chalmers.se>"
3
4/*
5 * ====================================================================
6 * Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
7 * project.
8 *
9 * Rights for redistribution and usage in source and binary forms are
10 * granted according to the OpenSSL license. Warranty of any kind is
11 * disclaimed.
12 * ====================================================================
13 */
14
15/*
16 * This is my modest contributon to OpenSSL project (see
17 * http://www.openssl.org/ for more information about it) and is
18 * a drop-in UltraSPARC ISA replacement for crypto/bn/bn_asm.c
19 * module. For updates see http://fy.chalmers.se/~appro/hpe/.
20 *
21 * Questions-n-answers.
22 *
23 * Q. How to compile?
24 * A. With SC4.x/SC5.x:
25 *
26 * cc -xarch=v8plus -c bn_asm.sparc.v8plus.S -o bn_asm.o
27 *
28 * and with gcc:
29 *
30 * gcc -mcpu=ultrasparc -c bn_asm.sparc.v8plus.S -o bn_asm.o
31 *
32 * or if above fails (it does if you have gas installed):
33 *
34 * gcc -E bn_asm.sparc.v8plus.S | as -xarch=v8plus /dev/fd/0 -o bn_asm.o
35 *
36 * Quick-n-dirty way to fuse the module into the library.
37 * Provided that the library is already configured and built
38 * (in 0.9.2 case with no-asm option):
39 *
40 * # cd crypto/bn
41 * # cp /some/place/bn_asm.sparc.v8plus.S .
42 * # cc -xarch=v8plus -c bn_asm.sparc.v8plus.S -o bn_asm.o
43 * # make
44 * # cd ../..
45 * # make; make test
46 *
47 * Quick-n-dirty way to get rid of it:
48 *
49 * # cd crypto/bn
50 * # touch bn_asm.c
51 * # make
52 * # cd ../..
53 * # make; make test
54 *
55 * Q. V8plus achitecture? What kind of beast is that?
56 * A. Well, it's rather a programming model than an architecture...
57 * It's actually v9-compliant, i.e. *any* UltraSPARC, CPU under
58 * special conditions, namely when kernel doesn't preserve upper
59 * 32 bits of otherwise 64-bit registers during a context switch.
60 *
61 * Q. Why just UltraSPARC? What about SuperSPARC?
62 * A. Original release did target UltraSPARC only. Now SuperSPARC
63 * version is provided along. Both version share bn_*comba[48]
64 * implementations (see comment later in code for explanation).
65 * But what's so special about this UltraSPARC implementation?
66 * Why didn't I let compiler do the job? Trouble is that most of
67 * available compilers (well, SC5.0 is the only exception) don't
68 * attempt to take advantage of UltraSPARC's 64-bitness under
69 * 32-bit kernels even though it's perfectly possible (see next
70 * question).
71 *
72 * Q. 64-bit registers under 32-bit kernels? Didn't you just say it
73 * doesn't work?
74 * A. You can't adress *all* registers as 64-bit wide:-( The catch is
75 * that you actually may rely upon %o0-%o5 and %g1-%g4 being fully
76 * preserved if you're in a leaf function, i.e. such never calling
77 * any other functions. All functions in this module are leaf and
78 * 10 registers is a handful. And as a matter of fact none-"comba"
79 * routines don't require even that much and I could even afford to
80 * not allocate own stack frame for 'em:-)
81 *
82 * Q. What about 64-bit kernels?
83 * A. What about 'em? Just kidding:-) Pure 64-bit version is currently
84 * under evaluation and development...
85 *
86 * Q. What about shared libraries?
87 * A. What about 'em? Kidding again:-) Code does *not* contain any
88 * code position dependencies and it's safe to include it into
89 * shared library as is.
90 *
91 * Q. How much faster does it go?
92 * A. Do you have a good benchmark? In either case below is what I
93 * experience with crypto/bn/expspeed.c test program:
94 *
95 * v8plus module on U10/300MHz against bn_asm.c compiled with:
96 *
97 * cc-5.0 -xarch=v8plus -xO5 -xdepend +7-12%
98 * cc-4.2 -xarch=v8plus -xO5 -xdepend +25-35%
99 * egcs-1.1.2 -mcpu=ultrasparc -O3 +35-45%
100 *
101 * v8 module on SS10/60MHz against bn_asm.c compiled with:
102 *
103 * cc-5.0 -xarch=v8 -xO5 -xdepend +7-10%
104 * cc-4.2 -xarch=v8 -xO5 -xdepend +10%
105 * egcs-1.1.2 -mv8 -O3 +35-45%
106 *
107 * As you can see it's damn hard to beat the new Sun C compiler
108 * and it's in first place GNU C users who will appreciate this
109 * assembler implementation:-)
110 */
111
112/*
113 * Revision history.
114 *
115 * 1.0 - initial release;
116 * 1.1 - new loop unrolling model(*);
117 * - some more fine tuning;
118 * 1.2 - made gas friendly;
119 * - updates to documentation concerning v9;
120 * - new performance comparison matrix;
121 * 1.3 - fixed problem with /usr/ccs/lib/cpp;
122 * 1.4 - native V9 bn_*_comba[48] implementation (15% more efficient)
123 * resulting in slight overall performance kick;
124 * - some retunes;
125 * - support for GNU as added;
126 *
127 * (*) Originally unrolled loop looked like this:
128 * for (;;) {
129 * op(p+0); if (--n==0) break;
130 * op(p+1); if (--n==0) break;
131 * op(p+2); if (--n==0) break;
132 * op(p+3); if (--n==0) break;
133 * p+=4;
134 * }
135 * I unroll according to following:
136 * while (n&~3) {
137 * op(p+0); op(p+1); op(p+2); op(p+3);
138 * p+=4; n=-4;
139 * }
140 * if (n) {
141 * op(p+0); if (--n==0) return;
142 * op(p+2); if (--n==0) return;
143 * op(p+3); return;
144 * }
145 */
146
147/*
148 * GNU assembler can't stand stuw:-(
149 */
150#define stuw st
151
152.section ".text",#alloc,#execinstr
153.file "bn_asm.sparc.v8plus.S"
154
155.align 32
156
157.global bn_mul_add_words
158/*
159 * BN_ULONG bn_mul_add_words(rp,ap,num,w)
160 * BN_ULONG *rp,*ap;
161 * int num;
162 * BN_ULONG w;
163 */
164bn_mul_add_words:
165 sra %o2,%g0,%o2 ! signx %o2
166 brgz,a %o2,.L_bn_mul_add_words_proceed
167 lduw [%o1],%g2
168 retl
169 clr %o0
170 nop
171 nop
172 nop
173
174.L_bn_mul_add_words_proceed:
175 srl %o3,%g0,%o3 ! clruw %o3
176 andcc %o2,-4,%g0
177 bz,pn %icc,.L_bn_mul_add_words_tail
178 clr %o5
179
180.L_bn_mul_add_words_loop: ! wow! 32 aligned!
181 lduw [%o0],%g1
182 lduw [%o1+4],%g3
183 mulx %o3,%g2,%g2
184 add %g1,%o5,%o4
185 nop
186 add %o4,%g2,%o4
187 stuw %o4,[%o0]
188 srlx %o4,32,%o5
189
190 lduw [%o0+4],%g1
191 lduw [%o1+8],%g2
192 mulx %o3,%g3,%g3
193 add %g1,%o5,%o4
194 dec 4,%o2
195 add %o4,%g3,%o4
196 stuw %o4,[%o0+4]
197 srlx %o4,32,%o5
198
199 lduw [%o0+8],%g1
200 lduw [%o1+12],%g3
201 mulx %o3,%g2,%g2
202 add %g1,%o5,%o4
203 inc 16,%o1
204 add %o4,%g2,%o4
205 stuw %o4,[%o0+8]
206 srlx %o4,32,%o5
207
208 lduw [%o0+12],%g1
209 mulx %o3,%g3,%g3
210 add %g1,%o5,%o4
211 inc 16,%o0
212 add %o4,%g3,%o4
213 andcc %o2,-4,%g0
214 stuw %o4,[%o0-4]
215 srlx %o4,32,%o5
216 bnz,a,pt %icc,.L_bn_mul_add_words_loop
217 lduw [%o1],%g2
218
219 brnz,a,pn %o2,.L_bn_mul_add_words_tail
220 lduw [%o1],%g2
221.L_bn_mul_add_words_return:
222 retl
223 mov %o5,%o0
224
225.L_bn_mul_add_words_tail:
226 lduw [%o0],%g1
227 mulx %o3,%g2,%g2
228 add %g1,%o5,%o4
229 dec %o2
230 add %o4,%g2,%o4
231 srlx %o4,32,%o5
232 brz,pt %o2,.L_bn_mul_add_words_return
233 stuw %o4,[%o0]
234
235 lduw [%o1+4],%g2
236 lduw [%o0+4],%g1
237 mulx %o3,%g2,%g2
238 add %g1,%o5,%o4
239 dec %o2
240 add %o4,%g2,%o4
241 srlx %o4,32,%o5
242 brz,pt %o2,.L_bn_mul_add_words_return
243 stuw %o4,[%o0+4]
244
245 lduw [%o1+8],%g2
246 lduw [%o0+8],%g1
247 mulx %o3,%g2,%g2
248 add %g1,%o5,%o4
249 add %o4,%g2,%o4
250 stuw %o4,[%o0+8]
251 retl
252 srlx %o4,32,%o0
253
254.type bn_mul_add_words,#function
255.size bn_mul_add_words,(.-bn_mul_add_words)
256
257.align 32
258
259.global bn_mul_words
260/*
261 * BN_ULONG bn_mul_words(rp,ap,num,w)
262 * BN_ULONG *rp,*ap;
263 * int num;
264 * BN_ULONG w;
265 */
266bn_mul_words:
267 sra %o2,%g0,%o2 ! signx %o2
268 brgz,a %o2,.L_bn_mul_words_proceeed
269 lduw [%o1],%g2
270 retl
271 clr %o0
272 nop
273 nop
274 nop
275
276.L_bn_mul_words_proceeed:
277 srl %o3,%g0,%o3 ! clruw %o3
278 andcc %o2,-4,%g0
279 bz,pn %icc,.L_bn_mul_words_tail
280 clr %o5
281
282.L_bn_mul_words_loop: ! wow! 32 aligned!
283 lduw [%o1+4],%g3
284 mulx %o3,%g2,%g2
285 add %g2,%o5,%o4
286 nop
287 stuw %o4,[%o0]
288 srlx %o4,32,%o5
289
290 lduw [%o1+8],%g2
291 mulx %o3,%g3,%g3
292 add %g3,%o5,%o4
293 dec 4,%o2
294 stuw %o4,[%o0+4]
295 srlx %o4,32,%o5
296
297 lduw [%o1+12],%g3
298 mulx %o3,%g2,%g2
299 add %g2,%o5,%o4
300 inc 16,%o1
301 stuw %o4,[%o0+8]
302 srlx %o4,32,%o5
303
304 mulx %o3,%g3,%g3
305 add %g3,%o5,%o4
306 inc 16,%o0
307 stuw %o4,[%o0-4]
308 srlx %o4,32,%o5
309 andcc %o2,-4,%g0
310 bnz,a,pt %icc,.L_bn_mul_words_loop
311 lduw [%o1],%g2
312 nop
313 nop
314
315 brnz,a,pn %o2,.L_bn_mul_words_tail
316 lduw [%o1],%g2
317.L_bn_mul_words_return:
318 retl
319 mov %o5,%o0
320
321.L_bn_mul_words_tail:
322 mulx %o3,%g2,%g2
323 add %g2,%o5,%o4
324 dec %o2
325 srlx %o4,32,%o5
326 brz,pt %o2,.L_bn_mul_words_return
327 stuw %o4,[%o0]
328
329 lduw [%o1+4],%g2
330 mulx %o3,%g2,%g2
331 add %g2,%o5,%o4
332 dec %o2
333 srlx %o4,32,%o5
334 brz,pt %o2,.L_bn_mul_words_return
335 stuw %o4,[%o0+4]
336
337 lduw [%o1+8],%g2
338 mulx %o3,%g2,%g2
339 add %g2,%o5,%o4
340 stuw %o4,[%o0+8]
341 retl
342 srlx %o4,32,%o0
343
344.type bn_mul_words,#function
345.size bn_mul_words,(.-bn_mul_words)
346
347.align 32
348.global bn_sqr_words
349/*
350 * void bn_sqr_words(r,a,n)
351 * BN_ULONG *r,*a;
352 * int n;
353 */
354bn_sqr_words:
355 sra %o2,%g0,%o2 ! signx %o2
356 brgz,a %o2,.L_bn_sqr_words_proceeed
357 lduw [%o1],%g2
358 retl
359 clr %o0
360 nop
361 nop
362 nop
363
364.L_bn_sqr_words_proceeed:
365 andcc %o2,-4,%g0
366 nop
367 bz,pn %icc,.L_bn_sqr_words_tail
368 nop
369
370.L_bn_sqr_words_loop: ! wow! 32 aligned!
371 lduw [%o1+4],%g3
372 mulx %g2,%g2,%o4
373 stuw %o4,[%o0]
374 srlx %o4,32,%o5
375 stuw %o5,[%o0+4]
376 nop
377
378 lduw [%o1+8],%g2
379 mulx %g3,%g3,%o4
380 dec 4,%o2
381 stuw %o4,[%o0+8]
382 srlx %o4,32,%o5
383 stuw %o5,[%o0+12]
384
385 lduw [%o1+12],%g3
386 mulx %g2,%g2,%o4
387 srlx %o4,32,%o5
388 stuw %o4,[%o0+16]
389 inc 16,%o1
390 stuw %o5,[%o0+20]
391
392 mulx %g3,%g3,%o4
393 inc 32,%o0
394 stuw %o4,[%o0-8]
395 srlx %o4,32,%o5
396 andcc %o2,-4,%g2
397 stuw %o5,[%o0-4]
398 bnz,a,pt %icc,.L_bn_sqr_words_loop
399 lduw [%o1],%g2
400 nop
401
402 brnz,a,pn %o2,.L_bn_sqr_words_tail
403 lduw [%o1],%g2
404.L_bn_sqr_words_return:
405 retl
406 clr %o0
407
408.L_bn_sqr_words_tail:
409 mulx %g2,%g2,%o4
410 dec %o2
411 stuw %o4,[%o0]
412 srlx %o4,32,%o5
413 brz,pt %o2,.L_bn_sqr_words_return
414 stuw %o5,[%o0+4]
415
416 lduw [%o1+4],%g2
417 mulx %g2,%g2,%o4
418 dec %o2
419 stuw %o4,[%o0+8]
420 srlx %o4,32,%o5
421 brz,pt %o2,.L_bn_sqr_words_return
422 stuw %o5,[%o0+12]
423
424 lduw [%o1+8],%g2
425 mulx %g2,%g2,%o4
426 srlx %o4,32,%o5
427 stuw %o4,[%o0+16]
428 stuw %o5,[%o0+20]
429 retl
430 clr %o0
431
432.type bn_sqr_words,#function
433.size bn_sqr_words,(.-bn_sqr_words)
434
435.align 32
436.global bn_div_words
437/*
438 * BN_ULONG bn_div_words(h,l,d)
439 * BN_ULONG h,l,d;
440 */
441bn_div_words:
442 sllx %o0,32,%o0
443 or %o0,%o1,%o0
444 udivx %o0,%o2,%o0
445 retl
446 srl %o0,%g0,%o0 ! clruw %o0
447
448.type bn_div_words,#function
449.size bn_div_words,(.-bn_div_words)
450
451.align 32
452
453.global bn_add_words
454/*
455 * BN_ULONG bn_add_words(rp,ap,bp,n)
456 * BN_ULONG *rp,*ap,*bp;
457 * int n;
458 */
459bn_add_words:
460 sra %o3,%g0,%o3 ! signx %o3
461 brgz,a %o3,.L_bn_add_words_proceed
462 lduw [%o1],%o4
463 retl
464 clr %o0
465
466.L_bn_add_words_proceed:
467 andcc %o3,-4,%g0
468 bz,pn %icc,.L_bn_add_words_tail
469 addcc %g0,0,%g0 ! clear carry flag
470
471.L_bn_add_words_loop: ! wow! 32 aligned!
472 dec 4,%o3
473 lduw [%o2],%o5
474 lduw [%o1+4],%g1
475 lduw [%o2+4],%g2
476 lduw [%o1+8],%g3
477 lduw [%o2+8],%g4
478 addccc %o5,%o4,%o5
479 stuw %o5,[%o0]
480
481 lduw [%o1+12],%o4
482 lduw [%o2+12],%o5
483 inc 16,%o1
484 addccc %g1,%g2,%g1
485 stuw %g1,[%o0+4]
486
487 inc 16,%o2
488 addccc %g3,%g4,%g3
489 stuw %g3,[%o0+8]
490
491 inc 16,%o0
492 addccc %o5,%o4,%o5
493 stuw %o5,[%o0-4]
494 and %o3,-4,%g1
495 brnz,a,pt %g1,.L_bn_add_words_loop
496 lduw [%o1],%o4
497
498 brnz,a,pn %o3,.L_bn_add_words_tail
499 lduw [%o1],%o4
500.L_bn_add_words_return:
501 clr %o0
502 retl
503 movcs %icc,1,%o0
504 nop
505
506.L_bn_add_words_tail:
507 lduw [%o2],%o5
508 dec %o3
509 addccc %o5,%o4,%o5
510 brz,pt %o3,.L_bn_add_words_return
511 stuw %o5,[%o0]
512
513 lduw [%o1+4],%o4
514 lduw [%o2+4],%o5
515 dec %o3
516 addccc %o5,%o4,%o5
517 brz,pt %o3,.L_bn_add_words_return
518 stuw %o5,[%o0+4]
519
520 lduw [%o1+8],%o4
521 lduw [%o2+8],%o5
522 addccc %o5,%o4,%o5
523 stuw %o5,[%o0+8]
524 clr %o0
525 retl
526 movcs %icc,1,%o0
527
528.type bn_add_words,#function
529.size bn_add_words,(.-bn_add_words)
530
531.global bn_sub_words
532/*
533 * BN_ULONG bn_sub_words(rp,ap,bp,n)
534 * BN_ULONG *rp,*ap,*bp;
535 * int n;
536 */
537bn_sub_words:
538 sra %o3,%g0,%o3 ! signx %o3
539 brgz,a %o3,.L_bn_sub_words_proceed
540 lduw [%o1],%o4
541 retl
542 clr %o0
543
544.L_bn_sub_words_proceed:
545 andcc %o3,-4,%g0
546 bz,pn %icc,.L_bn_sub_words_tail
547 addcc %g0,0,%g0 ! clear carry flag
548
549.L_bn_sub_words_loop: ! wow! 32 aligned!
550 dec 4,%o3
551 lduw [%o2],%o5
552 lduw [%o1+4],%g1
553 lduw [%o2+4],%g2
554 lduw [%o1+8],%g3
555 lduw [%o2+8],%g4
556 subccc %o4,%o5,%o5
557 stuw %o5,[%o0]
558
559 lduw [%o1+12],%o4
560 lduw [%o2+12],%o5
561 inc 16,%o1
562 subccc %g1,%g2,%g2
563 stuw %g2,[%o0+4]
564
565 inc 16,%o2
566 subccc %g3,%g4,%g4
567 stuw %g4,[%o0+8]
568
569 inc 16,%o0
570 subccc %o4,%o5,%o5
571 stuw %o5,[%o0-4]
572 and %o3,-4,%g1
573 brnz,a,pt %g1,.L_bn_sub_words_loop
574 lduw [%o1],%o4
575
576 brnz,a,pn %o3,.L_bn_sub_words_tail
577 lduw [%o1],%o4
578.L_bn_sub_words_return:
579 clr %o0
580 retl
581 movcs %icc,1,%o0
582 nop
583
584.L_bn_sub_words_tail: ! wow! 32 aligned!
585 lduw [%o2],%o5
586 dec %o3
587 subccc %o4,%o5,%o5
588 brz,pt %o3,.L_bn_sub_words_return
589 stuw %o5,[%o0]
590
591 lduw [%o1+4],%o4
592 lduw [%o2+4],%o5
593 dec %o3
594 subccc %o4,%o5,%o5
595 brz,pt %o3,.L_bn_sub_words_return
596 stuw %o5,[%o0+4]
597
598 lduw [%o1+8],%o4
599 lduw [%o2+8],%o5
600 subccc %o4,%o5,%o5
601 stuw %o5,[%o0+8]
602 clr %o0
603 retl
604 movcs %icc,1,%o0
605
606.type bn_sub_words,#function
607.size bn_sub_words,(.-bn_sub_words)
608
609/*
610 * Code below depends on the fact that upper parts of the %l0-%l7
611 * and %i0-%i7 are zeroed by kernel after context switch. In
612 * previous versions this comment stated that "the trouble is that
613 * it's not feasible to implement the mumbo-jumbo in less V9
614 * instructions:-(" which apparently isn't true thanks to
615 * 'bcs,a %xcc,.+8; inc %rd' pair. But the performance improvement
616 * results not from the shorter code, but from elimination of
617 * multicycle none-pairable 'rd %y,%rd' instructions.
618 *
619 * Andy.
620 */
621
622#define FRAME_SIZE -96
623
624/*
625 * Here is register usage map for *all* routines below.
626 */
627#define t_1 %o0
628#define t_2 %o1
629#define c_12 %o2
630#define c_3 %o3
631
632#define ap(I) [%i1+4*I]
633#define bp(I) [%i2+4*I]
634#define rp(I) [%i0+4*I]
635
636#define a_0 %l0
637#define a_1 %l1
638#define a_2 %l2
639#define a_3 %l3
640#define a_4 %l4
641#define a_5 %l5
642#define a_6 %l6
643#define a_7 %l7
644
645#define b_0 %i3
646#define b_1 %i4
647#define b_2 %i5
648#define b_3 %o4
649#define b_4 %o5
650#define b_5 %o7
651#define b_6 %g1
652#define b_7 %g4
653
654.align 32
655.global bn_mul_comba8
656/*
657 * void bn_mul_comba8(r,a,b)
658 * BN_ULONG *r,*a,*b;
659 */
660bn_mul_comba8:
661 save %sp,FRAME_SIZE,%sp
662 mov 1,t_2
663 lduw ap(0),a_0
664 sllx t_2,32,t_2
665 lduw bp(0),b_0 !=
666 lduw bp(1),b_1
667 mulx a_0,b_0,t_1 !mul_add_c(a[0],b[0],c1,c2,c3);
668 srlx t_1,32,c_12
669 stuw t_1,rp(0) !=!r[0]=c1;
670
671 lduw ap(1),a_1
672 mulx a_0,b_1,t_1 !mul_add_c(a[0],b[1],c2,c3,c1);
673 addcc c_12,t_1,c_12
674 clr c_3 !=
675 bcs,a %xcc,.+8
676 add c_3,t_2,c_3
677 lduw ap(2),a_2
678 mulx a_1,b_0,t_1 !=!mul_add_c(a[1],b[0],c2,c3,c1);
679 addcc c_12,t_1,t_1
680 bcs,a %xcc,.+8
681 add c_3,t_2,c_3
682 srlx t_1,32,c_12 !=
683 stuw t_1,rp(1) !r[1]=c2;
684 or c_12,c_3,c_12
685
686 mulx a_2,b_0,t_1 !mul_add_c(a[2],b[0],c3,c1,c2);
687 addcc c_12,t_1,c_12 !=
688 clr c_3
689 bcs,a %xcc,.+8
690 add c_3,t_2,c_3
691 lduw bp(2),b_2 !=
692 mulx a_1,b_1,t_1 !mul_add_c(a[1],b[1],c3,c1,c2);
693 addcc c_12,t_1,c_12
694 bcs,a %xcc,.+8
695 add c_3,t_2,c_3 !=
696 lduw bp(3),b_3
697 mulx a_0,b_2,t_1 !mul_add_c(a[0],b[2],c3,c1,c2);
698 addcc c_12,t_1,t_1
699 bcs,a %xcc,.+8 !=
700 add c_3,t_2,c_3
701 srlx t_1,32,c_12
702 stuw t_1,rp(2) !r[2]=c3;
703 or c_12,c_3,c_12 !=
704
705 mulx a_0,b_3,t_1 !mul_add_c(a[0],b[3],c1,c2,c3);
706 addcc c_12,t_1,c_12
707 clr c_3
708 bcs,a %xcc,.+8 !=
709 add c_3,t_2,c_3
710 mulx a_1,b_2,t_1 !=!mul_add_c(a[1],b[2],c1,c2,c3);
711 addcc c_12,t_1,c_12
712 bcs,a %xcc,.+8 !=
713 add c_3,t_2,c_3
714 lduw ap(3),a_3
715 mulx a_2,b_1,t_1 !mul_add_c(a[2],b[1],c1,c2,c3);
716 addcc c_12,t_1,c_12 !=
717 bcs,a %xcc,.+8
718 add c_3,t_2,c_3
719 lduw ap(4),a_4
720 mulx a_3,b_0,t_1 !=!mul_add_c(a[3],b[0],c1,c2,c3);!=
721 addcc c_12,t_1,t_1
722 bcs,a %xcc,.+8
723 add c_3,t_2,c_3
724 srlx t_1,32,c_12 !=
725 stuw t_1,rp(3) !r[3]=c1;
726 or c_12,c_3,c_12
727
728 mulx a_4,b_0,t_1 !mul_add_c(a[4],b[0],c2,c3,c1);
729 addcc c_12,t_1,c_12 !=
730 clr c_3
731 bcs,a %xcc,.+8
732 add c_3,t_2,c_3
733 mulx a_3,b_1,t_1 !=!mul_add_c(a[3],b[1],c2,c3,c1);
734 addcc c_12,t_1,c_12
735 bcs,a %xcc,.+8
736 add c_3,t_2,c_3
737 mulx a_2,b_2,t_1 !=!mul_add_c(a[2],b[2],c2,c3,c1);
738 addcc c_12,t_1,c_12
739 bcs,a %xcc,.+8
740 add c_3,t_2,c_3
741 lduw bp(4),b_4 !=
742 mulx a_1,b_3,t_1 !mul_add_c(a[1],b[3],c2,c3,c1);
743 addcc c_12,t_1,c_12
744 bcs,a %xcc,.+8
745 add c_3,t_2,c_3 !=
746 lduw bp(5),b_5
747 mulx a_0,b_4,t_1 !mul_add_c(a[0],b[4],c2,c3,c1);
748 addcc c_12,t_1,t_1
749 bcs,a %xcc,.+8 !=
750 add c_3,t_2,c_3
751 srlx t_1,32,c_12
752 stuw t_1,rp(4) !r[4]=c2;
753 or c_12,c_3,c_12 !=
754
755 mulx a_0,b_5,t_1 !mul_add_c(a[0],b[5],c3,c1,c2);
756 addcc c_12,t_1,c_12
757 clr c_3
758 bcs,a %xcc,.+8 !=
759 add c_3,t_2,c_3
760 mulx a_1,b_4,t_1 !mul_add_c(a[1],b[4],c3,c1,c2);
761 addcc c_12,t_1,c_12
762 bcs,a %xcc,.+8 !=
763 add c_3,t_2,c_3
764 mulx a_2,b_3,t_1 !mul_add_c(a[2],b[3],c3,c1,c2);
765 addcc c_12,t_1,c_12
766 bcs,a %xcc,.+8 !=
767 add c_3,t_2,c_3
768 mulx a_3,b_2,t_1 !mul_add_c(a[3],b[2],c3,c1,c2);
769 addcc c_12,t_1,c_12
770 bcs,a %xcc,.+8 !=
771 add c_3,t_2,c_3
772 lduw ap(5),a_5
773 mulx a_4,b_1,t_1 !mul_add_c(a[4],b[1],c3,c1,c2);
774 addcc c_12,t_1,c_12 !=
775 bcs,a %xcc,.+8
776 add c_3,t_2,c_3
777 lduw ap(6),a_6
778 mulx a_5,b_0,t_1 !=!mul_add_c(a[5],b[0],c3,c1,c2);
779 addcc c_12,t_1,t_1
780 bcs,a %xcc,.+8
781 add c_3,t_2,c_3
782 srlx t_1,32,c_12 !=
783 stuw t_1,rp(5) !r[5]=c3;
784 or c_12,c_3,c_12
785
786 mulx a_6,b_0,t_1 !mul_add_c(a[6],b[0],c1,c2,c3);
787 addcc c_12,t_1,c_12 !=
788 clr c_3
789 bcs,a %xcc,.+8
790 add c_3,t_2,c_3
791 mulx a_5,b_1,t_1 !=!mul_add_c(a[5],b[1],c1,c2,c3);
792 addcc c_12,t_1,c_12
793 bcs,a %xcc,.+8
794 add c_3,t_2,c_3
795 mulx a_4,b_2,t_1 !=!mul_add_c(a[4],b[2],c1,c2,c3);
796 addcc c_12,t_1,c_12
797 bcs,a %xcc,.+8
798 add c_3,t_2,c_3
799 mulx a_3,b_3,t_1 !=!mul_add_c(a[3],b[3],c1,c2,c3);
800 addcc c_12,t_1,c_12
801 bcs,a %xcc,.+8
802 add c_3,t_2,c_3
803 mulx a_2,b_4,t_1 !=!mul_add_c(a[2],b[4],c1,c2,c3);
804 addcc c_12,t_1,c_12
805 bcs,a %xcc,.+8
806 add c_3,t_2,c_3
807 lduw bp(6),b_6 !=
808 mulx a_1,b_5,t_1 !mul_add_c(a[1],b[5],c1,c2,c3);
809 addcc c_12,t_1,c_12
810 bcs,a %xcc,.+8
811 add c_3,t_2,c_3 !=
812 lduw bp(7),b_7
813 mulx a_0,b_6,t_1 !mul_add_c(a[0],b[6],c1,c2,c3);
814 addcc c_12,t_1,t_1
815 bcs,a %xcc,.+8 !=
816 add c_3,t_2,c_3
817 srlx t_1,32,c_12
818 stuw t_1,rp(6) !r[6]=c1;
819 or c_12,c_3,c_12 !=
820
821 mulx a_0,b_7,t_1 !mul_add_c(a[0],b[7],c2,c3,c1);
822 addcc c_12,t_1,c_12
823 clr c_3
824 bcs,a %xcc,.+8 !=
825 add c_3,t_2,c_3
826 mulx a_1,b_6,t_1 !mul_add_c(a[1],b[6],c2,c3,c1);
827 addcc c_12,t_1,c_12
828 bcs,a %xcc,.+8 !=
829 add c_3,t_2,c_3
830 mulx a_2,b_5,t_1 !mul_add_c(a[2],b[5],c2,c3,c1);
831 addcc c_12,t_1,c_12
832 bcs,a %xcc,.+8 !=
833 add c_3,t_2,c_3
834 mulx a_3,b_4,t_1 !mul_add_c(a[3],b[4],c2,c3,c1);
835 addcc c_12,t_1,c_12
836 bcs,a %xcc,.+8 !=
837 add c_3,t_2,c_3
838 mulx a_4,b_3,t_1 !mul_add_c(a[4],b[3],c2,c3,c1);
839 addcc c_12,t_1,c_12
840 bcs,a %xcc,.+8 !=
841 add c_3,t_2,c_3
842 mulx a_5,b_2,t_1 !mul_add_c(a[5],b[2],c2,c3,c1);
843 addcc c_12,t_1,c_12
844 bcs,a %xcc,.+8 !=
845 add c_3,t_2,c_3
846 lduw ap(7),a_7
847 mulx a_6,b_1,t_1 !=!mul_add_c(a[6],b[1],c2,c3,c1);
848 addcc c_12,t_1,c_12
849 bcs,a %xcc,.+8
850 add c_3,t_2,c_3
851 mulx a_7,b_0,t_1 !=!mul_add_c(a[7],b[0],c2,c3,c1);
852 addcc c_12,t_1,t_1
853 bcs,a %xcc,.+8
854 add c_3,t_2,c_3
855 srlx t_1,32,c_12 !=
856 stuw t_1,rp(7) !r[7]=c2;
857 or c_12,c_3,c_12
858
859 mulx a_7,b_1,t_1 !=!mul_add_c(a[7],b[1],c3,c1,c2);
860 addcc c_12,t_1,c_12
861 clr c_3
862 bcs,a %xcc,.+8
863 add c_3,t_2,c_3 !=
864 mulx a_6,b_2,t_1 !mul_add_c(a[6],b[2],c3,c1,c2);
865 addcc c_12,t_1,c_12
866 bcs,a %xcc,.+8
867 add c_3,t_2,c_3 !=
868 mulx a_5,b_3,t_1 !mul_add_c(a[5],b[3],c3,c1,c2);
869 addcc c_12,t_1,c_12
870 bcs,a %xcc,.+8
871 add c_3,t_2,c_3 !=
872 mulx a_4,b_4,t_1 !mul_add_c(a[4],b[4],c3,c1,c2);
873 addcc c_12,t_1,c_12
874 bcs,a %xcc,.+8
875 add c_3,t_2,c_3 !=
876 mulx a_3,b_5,t_1 !mul_add_c(a[3],b[5],c3,c1,c2);
877 addcc c_12,t_1,c_12
878 bcs,a %xcc,.+8
879 add c_3,t_2,c_3 !=
880 mulx a_2,b_6,t_1 !mul_add_c(a[2],b[6],c3,c1,c2);
881 addcc c_12,t_1,c_12
882 bcs,a %xcc,.+8
883 add c_3,t_2,c_3 !=
884 mulx a_1,b_7,t_1 !mul_add_c(a[1],b[7],c3,c1,c2);
885 addcc c_12,t_1,t_1
886 bcs,a %xcc,.+8
887 add c_3,t_2,c_3 !=
888 srlx t_1,32,c_12
889 stuw t_1,rp(8) !r[8]=c3;
890 or c_12,c_3,c_12
891
892 mulx a_2,b_7,t_1 !=!mul_add_c(a[2],b[7],c1,c2,c3);
893 addcc c_12,t_1,c_12
894 clr c_3
895 bcs,a %xcc,.+8
896 add c_3,t_2,c_3 !=
897 mulx a_3,b_6,t_1 !mul_add_c(a[3],b[6],c1,c2,c3);
898 addcc c_12,t_1,c_12
899 bcs,a %xcc,.+8 !=
900 add c_3,t_2,c_3
901 mulx a_4,b_5,t_1 !mul_add_c(a[4],b[5],c1,c2,c3);
902 addcc c_12,t_1,c_12
903 bcs,a %xcc,.+8 !=
904 add c_3,t_2,c_3
905 mulx a_5,b_4,t_1 !mul_add_c(a[5],b[4],c1,c2,c3);
906 addcc c_12,t_1,c_12
907 bcs,a %xcc,.+8 !=
908 add c_3,t_2,c_3
909 mulx a_6,b_3,t_1 !mul_add_c(a[6],b[3],c1,c2,c3);
910 addcc c_12,t_1,c_12
911 bcs,a %xcc,.+8 !=
912 add c_3,t_2,c_3
913 mulx a_7,b_2,t_1 !mul_add_c(a[7],b[2],c1,c2,c3);
914 addcc c_12,t_1,t_1
915 bcs,a %xcc,.+8 !=
916 add c_3,t_2,c_3
917 srlx t_1,32,c_12
918 stuw t_1,rp(9) !r[9]=c1;
919 or c_12,c_3,c_12 !=
920
921 mulx a_7,b_3,t_1 !mul_add_c(a[7],b[3],c2,c3,c1);
922 addcc c_12,t_1,c_12
923 clr c_3
924 bcs,a %xcc,.+8 !=
925 add c_3,t_2,c_3
926 mulx a_6,b_4,t_1 !mul_add_c(a[6],b[4],c2,c3,c1);
927 addcc c_12,t_1,c_12
928 bcs,a %xcc,.+8 !=
929 add c_3,t_2,c_3
930 mulx a_5,b_5,t_1 !mul_add_c(a[5],b[5],c2,c3,c1);
931 addcc c_12,t_1,c_12
932 bcs,a %xcc,.+8 !=
933 add c_3,t_2,c_3
934 mulx a_4,b_6,t_1 !mul_add_c(a[4],b[6],c2,c3,c1);
935 addcc c_12,t_1,c_12
936 bcs,a %xcc,.+8 !=
937 add c_3,t_2,c_3
938 mulx a_3,b_7,t_1 !mul_add_c(a[3],b[7],c2,c3,c1);
939 addcc c_12,t_1,t_1
940 bcs,a %xcc,.+8 !=
941 add c_3,t_2,c_3
942 srlx t_1,32,c_12
943 stuw t_1,rp(10) !r[10]=c2;
944 or c_12,c_3,c_12 !=
945
946 mulx a_4,b_7,t_1 !mul_add_c(a[4],b[7],c3,c1,c2);
947 addcc c_12,t_1,c_12
948 clr c_3
949 bcs,a %xcc,.+8 !=
950 add c_3,t_2,c_3
951 mulx a_5,b_6,t_1 !mul_add_c(a[5],b[6],c3,c1,c2);
952 addcc c_12,t_1,c_12
953 bcs,a %xcc,.+8 !=
954 add c_3,t_2,c_3
955 mulx a_6,b_5,t_1 !mul_add_c(a[6],b[5],c3,c1,c2);
956 addcc c_12,t_1,c_12
957 bcs,a %xcc,.+8 !=
958 add c_3,t_2,c_3
959 mulx a_7,b_4,t_1 !mul_add_c(a[7],b[4],c3,c1,c2);
960 addcc c_12,t_1,t_1
961 bcs,a %xcc,.+8 !=
962 add c_3,t_2,c_3
963 srlx t_1,32,c_12
964 stuw t_1,rp(11) !r[11]=c3;
965 or c_12,c_3,c_12 !=
966
967 mulx a_7,b_5,t_1 !mul_add_c(a[7],b[5],c1,c2,c3);
968 addcc c_12,t_1,c_12
969 clr c_3
970 bcs,a %xcc,.+8 !=
971 add c_3,t_2,c_3
972 mulx a_6,b_6,t_1 !mul_add_c(a[6],b[6],c1,c2,c3);
973 addcc c_12,t_1,c_12
974 bcs,a %xcc,.+8 !=
975 add c_3,t_2,c_3
976 mulx a_5,b_7,t_1 !mul_add_c(a[5],b[7],c1,c2,c3);
977 addcc c_12,t_1,t_1
978 bcs,a %xcc,.+8 !=
979 add c_3,t_2,c_3
980 srlx t_1,32,c_12
981 stuw t_1,rp(12) !r[12]=c1;
982 or c_12,c_3,c_12 !=
983
984 mulx a_6,b_7,t_1 !mul_add_c(a[6],b[7],c2,c3,c1);
985 addcc c_12,t_1,c_12
986 clr c_3
987 bcs,a %xcc,.+8 !=
988 add c_3,t_2,c_3
989 mulx a_7,b_6,t_1 !mul_add_c(a[7],b[6],c2,c3,c1);
990 addcc c_12,t_1,t_1
991 bcs,a %xcc,.+8 !=
992 add c_3,t_2,c_3
993 srlx t_1,32,c_12
994 st t_1,rp(13) !r[13]=c2;
995 or c_12,c_3,c_12 !=
996
997 mulx a_7,b_7,t_1 !mul_add_c(a[7],b[7],c3,c1,c2);
998 addcc c_12,t_1,t_1
999 srlx t_1,32,c_12 !=
1000 stuw t_1,rp(14) !r[14]=c3;
1001 stuw c_12,rp(15) !r[15]=c1;
1002
1003 ret
1004 restore %g0,%g0,%o0 !=
1005
1006.type bn_mul_comba8,#function
1007.size bn_mul_comba8,(.-bn_mul_comba8)
1008
1009.align 32
1010
1011.global bn_mul_comba4
1012/*
1013 * void bn_mul_comba4(r,a,b)
1014 * BN_ULONG *r,*a,*b;
1015 */
1016bn_mul_comba4:
1017 save %sp,FRAME_SIZE,%sp
1018 lduw ap(0),a_0
1019 mov 1,t_2
1020 lduw bp(0),b_0
1021 sllx t_2,32,t_2 !=
1022 lduw bp(1),b_1
1023 mulx a_0,b_0,t_1 !mul_add_c(a[0],b[0],c1,c2,c3);
1024 srlx t_1,32,c_12
1025 stuw t_1,rp(0) !=!r[0]=c1;
1026
1027 lduw ap(1),a_1
1028 mulx a_0,b_1,t_1 !mul_add_c(a[0],b[1],c2,c3,c1);
1029 addcc c_12,t_1,c_12
1030 clr c_3 !=
1031 bcs,a %xcc,.+8
1032 add c_3,t_2,c_3
1033 lduw ap(2),a_2
1034 mulx a_1,b_0,t_1 !=!mul_add_c(a[1],b[0],c2,c3,c1);
1035 addcc c_12,t_1,t_1
1036 bcs,a %xcc,.+8
1037 add c_3,t_2,c_3
1038 srlx t_1,32,c_12 !=
1039 stuw t_1,rp(1) !r[1]=c2;
1040 or c_12,c_3,c_12
1041
1042 mulx a_2,b_0,t_1 !mul_add_c(a[2],b[0],c3,c1,c2);
1043 addcc c_12,t_1,c_12 !=
1044 clr c_3
1045 bcs,a %xcc,.+8
1046 add c_3,t_2,c_3
1047 lduw bp(2),b_2 !=
1048 mulx a_1,b_1,t_1 !mul_add_c(a[1],b[1],c3,c1,c2);
1049 addcc c_12,t_1,c_12
1050 bcs,a %xcc,.+8
1051 add c_3,t_2,c_3 !=
1052 lduw bp(3),b_3
1053 mulx a_0,b_2,t_1 !mul_add_c(a[0],b[2],c3,c1,c2);
1054 addcc c_12,t_1,t_1
1055 bcs,a %xcc,.+8 !=
1056 add c_3,t_2,c_3
1057 srlx t_1,32,c_12
1058 stuw t_1,rp(2) !r[2]=c3;
1059 or c_12,c_3,c_12 !=
1060
1061 mulx a_0,b_3,t_1 !mul_add_c(a[0],b[3],c1,c2,c3);
1062 addcc c_12,t_1,c_12
1063 clr c_3
1064 bcs,a %xcc,.+8 !=
1065 add c_3,t_2,c_3
1066 mulx a_1,b_2,t_1 !mul_add_c(a[1],b[2],c1,c2,c3);
1067 addcc c_12,t_1,c_12
1068 bcs,a %xcc,.+8 !=
1069 add c_3,t_2,c_3
1070 lduw ap(3),a_3
1071 mulx a_2,b_1,t_1 !mul_add_c(a[2],b[1],c1,c2,c3);
1072 addcc c_12,t_1,c_12 !=
1073 bcs,a %xcc,.+8
1074 add c_3,t_2,c_3
1075 mulx a_3,b_0,t_1 !mul_add_c(a[3],b[0],c1,c2,c3);!=
1076 addcc c_12,t_1,t_1 !=
1077 bcs,a %xcc,.+8
1078 add c_3,t_2,c_3
1079 srlx t_1,32,c_12
1080 stuw t_1,rp(3) !=!r[3]=c1;
1081 or c_12,c_3,c_12
1082
1083 mulx a_3,b_1,t_1 !mul_add_c(a[3],b[1],c2,c3,c1);
1084 addcc c_12,t_1,c_12
1085 clr c_3 !=
1086 bcs,a %xcc,.+8
1087 add c_3,t_2,c_3
1088 mulx a_2,b_2,t_1 !mul_add_c(a[2],b[2],c2,c3,c1);
1089 addcc c_12,t_1,c_12 !=
1090 bcs,a %xcc,.+8
1091 add c_3,t_2,c_3
1092 mulx a_1,b_3,t_1 !mul_add_c(a[1],b[3],c2,c3,c1);
1093 addcc c_12,t_1,t_1 !=
1094 bcs,a %xcc,.+8
1095 add c_3,t_2,c_3
1096 srlx t_1,32,c_12
1097 stuw t_1,rp(4) !=!r[4]=c2;
1098 or c_12,c_3,c_12
1099
1100 mulx a_2,b_3,t_1 !mul_add_c(a[2],b[3],c3,c1,c2);
1101 addcc c_12,t_1,c_12
1102 clr c_3 !=
1103 bcs,a %xcc,.+8
1104 add c_3,t_2,c_3
1105 mulx a_3,b_2,t_1 !mul_add_c(a[3],b[2],c3,c1,c2);
1106 addcc c_12,t_1,t_1 !=
1107 bcs,a %xcc,.+8
1108 add c_3,t_2,c_3
1109 srlx t_1,32,c_12
1110 stuw t_1,rp(5) !=!r[5]=c3;
1111 or c_12,c_3,c_12
1112
1113 mulx a_3,b_3,t_1 !mul_add_c(a[3],b[3],c1,c2,c3);
1114 addcc c_12,t_1,t_1
1115 srlx t_1,32,c_12 !=
1116 stuw t_1,rp(6) !r[6]=c1;
1117 stuw c_12,rp(7) !r[7]=c2;
1118
1119 ret
1120 restore %g0,%g0,%o0
1121
1122.type bn_mul_comba4,#function
1123.size bn_mul_comba4,(.-bn_mul_comba4)
1124
1125.align 32
1126
1127.global bn_sqr_comba8
1128bn_sqr_comba8:
1129 save %sp,FRAME_SIZE,%sp
1130 mov 1,t_2
1131 lduw ap(0),a_0
1132 sllx t_2,32,t_2
1133 lduw ap(1),a_1
1134 mulx a_0,a_0,t_1 !sqr_add_c(a,0,c1,c2,c3);
1135 srlx t_1,32,c_12
1136 stuw t_1,rp(0) !r[0]=c1;
1137
1138 lduw ap(2),a_2
1139 mulx a_0,a_1,t_1 !=!sqr_add_c2(a,1,0,c2,c3,c1);
1140 addcc c_12,t_1,c_12
1141 clr c_3
1142 bcs,a %xcc,.+8
1143 add c_3,t_2,c_3
1144 addcc c_12,t_1,t_1
1145 bcs,a %xcc,.+8
1146 add c_3,t_2,c_3
1147 srlx t_1,32,c_12
1148 stuw t_1,rp(1) !r[1]=c2;
1149 or c_12,c_3,c_12
1150
1151 mulx a_2,a_0,t_1 !sqr_add_c2(a,2,0,c3,c1,c2);
1152 addcc c_12,t_1,c_12
1153 clr c_3
1154 bcs,a %xcc,.+8
1155 add c_3,t_2,c_3
1156 addcc c_12,t_1,c_12
1157 bcs,a %xcc,.+8
1158 add c_3,t_2,c_3
1159 lduw ap(3),a_3
1160 mulx a_1,a_1,t_1 !sqr_add_c(a,1,c3,c1,c2);
1161 addcc c_12,t_1,t_1
1162 bcs,a %xcc,.+8
1163 add c_3,t_2,c_3
1164 srlx t_1,32,c_12
1165 stuw t_1,rp(2) !r[2]=c3;
1166 or c_12,c_3,c_12
1167
1168 mulx a_0,a_3,t_1 !sqr_add_c2(a,3,0,c1,c2,c3);
1169 addcc c_12,t_1,c_12
1170 clr c_3
1171 bcs,a %xcc,.+8
1172 add c_3,t_2,c_3
1173 addcc c_12,t_1,c_12
1174 bcs,a %xcc,.+8
1175 add c_3,t_2,c_3
1176 lduw ap(4),a_4
1177 mulx a_1,a_2,t_1 !sqr_add_c2(a,2,1,c1,c2,c3);
1178 addcc c_12,t_1,c_12
1179 bcs,a %xcc,.+8
1180 add c_3,t_2,c_3
1181 addcc c_12,t_1,t_1
1182 bcs,a %xcc,.+8
1183 add c_3,t_2,c_3
1184 srlx t_1,32,c_12
1185 st t_1,rp(3) !r[3]=c1;
1186 or c_12,c_3,c_12
1187
1188 mulx a_4,a_0,t_1 !sqr_add_c2(a,4,0,c2,c3,c1);
1189 addcc c_12,t_1,c_12
1190 clr c_3
1191 bcs,a %xcc,.+8
1192 add c_3,t_2,c_3
1193 addcc c_12,t_1,c_12
1194 bcs,a %xcc,.+8
1195 add c_3,t_2,c_3
1196 mulx a_3,a_1,t_1 !sqr_add_c2(a,3,1,c2,c3,c1);
1197 addcc c_12,t_1,c_12
1198 bcs,a %xcc,.+8
1199 add c_3,t_2,c_3
1200 addcc c_12,t_1,c_12
1201 bcs,a %xcc,.+8
1202 add c_3,t_2,c_3
1203 lduw ap(5),a_5
1204 mulx a_2,a_2,t_1 !sqr_add_c(a,2,c2,c3,c1);
1205 addcc c_12,t_1,t_1
1206 bcs,a %xcc,.+8
1207 add c_3,t_2,c_3
1208 srlx t_1,32,c_12
1209 stuw t_1,rp(4) !r[4]=c2;
1210 or c_12,c_3,c_12
1211
1212 mulx a_0,a_5,t_1 !sqr_add_c2(a,5,0,c3,c1,c2);
1213 addcc c_12,t_1,c_12
1214 clr c_3
1215 bcs,a %xcc,.+8
1216 add c_3,t_2,c_3
1217 addcc c_12,t_1,c_12
1218 bcs,a %xcc,.+8
1219 add c_3,t_2,c_3
1220 mulx a_1,a_4,t_1 !sqr_add_c2(a,4,1,c3,c1,c2);
1221 addcc c_12,t_1,c_12
1222 bcs,a %xcc,.+8
1223 add c_3,t_2,c_3
1224 addcc c_12,t_1,c_12
1225 bcs,a %xcc,.+8
1226 add c_3,t_2,c_3
1227 lduw ap(6),a_6
1228 mulx a_2,a_3,t_1 !sqr_add_c2(a,3,2,c3,c1,c2);
1229 addcc c_12,t_1,c_12
1230 bcs,a %xcc,.+8
1231 add c_3,t_2,c_3
1232 addcc c_12,t_1,t_1
1233 bcs,a %xcc,.+8
1234 add c_3,t_2,c_3
1235 srlx t_1,32,c_12
1236 stuw t_1,rp(5) !r[5]=c3;
1237 or c_12,c_3,c_12
1238
1239 mulx a_6,a_0,t_1 !sqr_add_c2(a,6,0,c1,c2,c3);
1240 addcc c_12,t_1,c_12
1241 clr c_3
1242 bcs,a %xcc,.+8
1243 add c_3,t_2,c_3
1244 addcc c_12,t_1,c_12
1245 bcs,a %xcc,.+8
1246 add c_3,t_2,c_3
1247 mulx a_5,a_1,t_1 !sqr_add_c2(a,5,1,c1,c2,c3);
1248 addcc c_12,t_1,c_12
1249 bcs,a %xcc,.+8
1250 add c_3,t_2,c_3
1251 addcc c_12,t_1,c_12
1252 bcs,a %xcc,.+8
1253 add c_3,t_2,c_3
1254 mulx a_4,a_2,t_1 !sqr_add_c2(a,4,2,c1,c2,c3);
1255 addcc c_12,t_1,c_12
1256 bcs,a %xcc,.+8
1257 add c_3,t_2,c_3
1258 addcc c_12,t_1,c_12
1259 bcs,a %xcc,.+8
1260 add c_3,t_2,c_3
1261 lduw ap(7),a_7
1262 mulx a_3,a_3,t_1 !=!sqr_add_c(a,3,c1,c2,c3);
1263 addcc c_12,t_1,t_1
1264 bcs,a %xcc,.+8
1265 add c_3,t_2,c_3
1266 srlx t_1,32,c_12
1267 stuw t_1,rp(6) !r[6]=c1;
1268 or c_12,c_3,c_12
1269
1270 mulx a_0,a_7,t_1 !sqr_add_c2(a,7,0,c2,c3,c1);
1271 addcc c_12,t_1,c_12
1272 clr c_3
1273 bcs,a %xcc,.+8
1274 add c_3,t_2,c_3
1275 addcc c_12,t_1,c_12
1276 bcs,a %xcc,.+8
1277 add c_3,t_2,c_3
1278 mulx a_1,a_6,t_1 !sqr_add_c2(a,6,1,c2,c3,c1);
1279 addcc c_12,t_1,c_12
1280 bcs,a %xcc,.+8
1281 add c_3,t_2,c_3
1282 addcc c_12,t_1,c_12
1283 bcs,a %xcc,.+8
1284 add c_3,t_2,c_3
1285 mulx a_2,a_5,t_1 !sqr_add_c2(a,5,2,c2,c3,c1);
1286 addcc c_12,t_1,c_12
1287 bcs,a %xcc,.+8
1288 add c_3,t_2,c_3
1289 addcc c_12,t_1,c_12
1290 bcs,a %xcc,.+8
1291 add c_3,t_2,c_3
1292 mulx a_3,a_4,t_1 !sqr_add_c2(a,4,3,c2,c3,c1);
1293 addcc c_12,t_1,c_12
1294 bcs,a %xcc,.+8
1295 add c_3,t_2,c_3
1296 addcc c_12,t_1,t_1
1297 bcs,a %xcc,.+8
1298 add c_3,t_2,c_3
1299 srlx t_1,32,c_12
1300 stuw t_1,rp(7) !r[7]=c2;
1301 or c_12,c_3,c_12
1302
1303 mulx a_7,a_1,t_1 !sqr_add_c2(a,7,1,c3,c1,c2);
1304 addcc c_12,t_1,c_12
1305 clr c_3
1306 bcs,a %xcc,.+8
1307 add c_3,t_2,c_3
1308 addcc c_12,t_1,c_12
1309 bcs,a %xcc,.+8
1310 add c_3,t_2,c_3
1311 mulx a_6,a_2,t_1 !sqr_add_c2(a,6,2,c3,c1,c2);
1312 addcc c_12,t_1,c_12
1313 bcs,a %xcc,.+8
1314 add c_3,t_2,c_3
1315 addcc c_12,t_1,c_12
1316 bcs,a %xcc,.+8
1317 add c_3,t_2,c_3
1318 mulx a_5,a_3,t_1 !sqr_add_c2(a,5,3,c3,c1,c2);
1319 addcc c_12,t_1,c_12
1320 bcs,a %xcc,.+8
1321 add c_3,t_2,c_3
1322 addcc c_12,t_1,c_12
1323 bcs,a %xcc,.+8
1324 add c_3,t_2,c_3
1325 mulx a_4,a_4,t_1 !sqr_add_c(a,4,c3,c1,c2);
1326 addcc c_12,t_1,t_1
1327 bcs,a %xcc,.+8
1328 add c_3,t_2,c_3
1329 srlx t_1,32,c_12
1330 stuw t_1,rp(8) !r[8]=c3;
1331 or c_12,c_3,c_12
1332
1333 mulx a_2,a_7,t_1 !sqr_add_c2(a,7,2,c1,c2,c3);
1334 addcc c_12,t_1,c_12
1335 clr c_3
1336 bcs,a %xcc,.+8
1337 add c_3,t_2,c_3
1338 addcc c_12,t_1,c_12
1339 bcs,a %xcc,.+8
1340 add c_3,t_2,c_3
1341 mulx a_3,a_6,t_1 !sqr_add_c2(a,6,3,c1,c2,c3);
1342 addcc c_12,t_1,c_12
1343 bcs,a %xcc,.+8
1344 add c_3,t_2,c_3
1345 addcc c_12,t_1,c_12
1346 bcs,a %xcc,.+8
1347 add c_3,t_2,c_3
1348 mulx a_4,a_5,t_1 !sqr_add_c2(a,5,4,c1,c2,c3);
1349 addcc c_12,t_1,c_12
1350 bcs,a %xcc,.+8
1351 add c_3,t_2,c_3
1352 addcc c_12,t_1,t_1
1353 bcs,a %xcc,.+8
1354 add c_3,t_2,c_3
1355 srlx t_1,32,c_12
1356 stuw t_1,rp(9) !r[9]=c1;
1357 or c_12,c_3,c_12
1358
1359 mulx a_7,a_3,t_1 !sqr_add_c2(a,7,3,c2,c3,c1);
1360 addcc c_12,t_1,c_12
1361 clr c_3
1362 bcs,a %xcc,.+8
1363 add c_3,t_2,c_3
1364 addcc c_12,t_1,c_12
1365 bcs,a %xcc,.+8
1366 add c_3,t_2,c_3
1367 mulx a_6,a_4,t_1 !sqr_add_c2(a,6,4,c2,c3,c1);
1368 addcc c_12,t_1,c_12
1369 bcs,a %xcc,.+8
1370 add c_3,t_2,c_3
1371 addcc c_12,t_1,c_12
1372 bcs,a %xcc,.+8
1373 add c_3,t_2,c_3
1374 mulx a_5,a_5,t_1 !sqr_add_c(a,5,c2,c3,c1);
1375 addcc c_12,t_1,t_1
1376 bcs,a %xcc,.+8
1377 add c_3,t_2,c_3
1378 srlx t_1,32,c_12
1379 stuw t_1,rp(10) !r[10]=c2;
1380 or c_12,c_3,c_12
1381
1382 mulx a_4,a_7,t_1 !sqr_add_c2(a,7,4,c3,c1,c2);
1383 addcc c_12,t_1,c_12
1384 clr c_3
1385 bcs,a %xcc,.+8
1386 add c_3,t_2,c_3
1387 addcc c_12,t_1,c_12
1388 bcs,a %xcc,.+8
1389 add c_3,t_2,c_3
1390 mulx a_5,a_6,t_1 !sqr_add_c2(a,6,5,c3,c1,c2);
1391 addcc c_12,t_1,c_12
1392 bcs,a %xcc,.+8
1393 add c_3,t_2,c_3
1394 addcc c_12,t_1,t_1
1395 bcs,a %xcc,.+8
1396 add c_3,t_2,c_3
1397 srlx t_1,32,c_12
1398 stuw t_1,rp(11) !r[11]=c3;
1399 or c_12,c_3,c_12
1400
1401 mulx a_7,a_5,t_1 !sqr_add_c2(a,7,5,c1,c2,c3);
1402 addcc c_12,t_1,c_12
1403 clr c_3
1404 bcs,a %xcc,.+8
1405 add c_3,t_2,c_3
1406 addcc c_12,t_1,c_12
1407 bcs,a %xcc,.+8
1408 add c_3,t_2,c_3
1409 mulx a_6,a_6,t_1 !sqr_add_c(a,6,c1,c2,c3);
1410 addcc c_12,t_1,t_1
1411 bcs,a %xcc,.+8
1412 add c_3,t_2,c_3
1413 srlx t_1,32,c_12
1414 stuw t_1,rp(12) !r[12]=c1;
1415 or c_12,c_3,c_12
1416
1417 mulx a_6,a_7,t_1 !sqr_add_c2(a,7,6,c2,c3,c1);
1418 addcc c_12,t_1,c_12
1419 clr c_3
1420 bcs,a %xcc,.+8
1421 add c_3,t_2,c_3
1422 addcc c_12,t_1,t_1
1423 bcs,a %xcc,.+8
1424 add c_3,t_2,c_3
1425 srlx t_1,32,c_12
1426 stuw t_1,rp(13) !r[13]=c2;
1427 or c_12,c_3,c_12
1428
1429 mulx a_7,a_7,t_1 !sqr_add_c(a,7,c3,c1,c2);
1430 addcc c_12,t_1,t_1
1431 srlx t_1,32,c_12
1432 stuw t_1,rp(14) !r[14]=c3;
1433 stuw c_12,rp(15) !r[15]=c1;
1434
1435 ret
1436 restore %g0,%g0,%o0
1437
1438.type bn_sqr_comba8,#function
1439.size bn_sqr_comba8,(.-bn_sqr_comba8)
1440
1441.align 32
1442
1443.global bn_sqr_comba4
1444/*
1445 * void bn_sqr_comba4(r,a)
1446 * BN_ULONG *r,*a;
1447 */
1448bn_sqr_comba4:
1449 save %sp,FRAME_SIZE,%sp
1450 mov 1,t_2
1451 lduw ap(0),a_0
1452 sllx t_2,32,t_2
1453 lduw ap(1),a_1
1454 mulx a_0,a_0,t_1 !sqr_add_c(a,0,c1,c2,c3);
1455 srlx t_1,32,c_12
1456 stuw t_1,rp(0) !r[0]=c1;
1457
1458 lduw ap(2),a_2
1459 mulx a_0,a_1,t_1 !sqr_add_c2(a,1,0,c2,c3,c1);
1460 addcc c_12,t_1,c_12
1461 clr c_3
1462 bcs,a %xcc,.+8
1463 add c_3,t_2,c_3
1464 addcc c_12,t_1,t_1
1465 bcs,a %xcc,.+8
1466 add c_3,t_2,c_3
1467 srlx t_1,32,c_12
1468 stuw t_1,rp(1) !r[1]=c2;
1469 or c_12,c_3,c_12
1470
1471 mulx a_2,a_0,t_1 !sqr_add_c2(a,2,0,c3,c1,c2);
1472 addcc c_12,t_1,c_12
1473 clr c_3
1474 bcs,a %xcc,.+8
1475 add c_3,t_2,c_3
1476 addcc c_12,t_1,c_12
1477 bcs,a %xcc,.+8
1478 add c_3,t_2,c_3
1479 lduw ap(3),a_3
1480 mulx a_1,a_1,t_1 !sqr_add_c(a,1,c3,c1,c2);
1481 addcc c_12,t_1,t_1
1482 bcs,a %xcc,.+8
1483 add c_3,t_2,c_3
1484 srlx t_1,32,c_12
1485 stuw t_1,rp(2) !r[2]=c3;
1486 or c_12,c_3,c_12
1487
1488 mulx a_0,a_3,t_1 !sqr_add_c2(a,3,0,c1,c2,c3);
1489 addcc c_12,t_1,c_12
1490 clr c_3
1491 bcs,a %xcc,.+8
1492 add c_3,t_2,c_3
1493 addcc c_12,t_1,c_12
1494 bcs,a %xcc,.+8
1495 add c_3,t_2,c_3
1496 mulx a_1,a_2,t_1 !sqr_add_c2(a,2,1,c1,c2,c3);
1497 addcc c_12,t_1,c_12
1498 bcs,a %xcc,.+8
1499 add c_3,t_2,c_3
1500 addcc c_12,t_1,t_1
1501 bcs,a %xcc,.+8
1502 add c_3,t_2,c_3
1503 srlx t_1,32,c_12
1504 stuw t_1,rp(3) !r[3]=c1;
1505 or c_12,c_3,c_12
1506
1507 mulx a_3,a_1,t_1 !sqr_add_c2(a,3,1,c2,c3,c1);
1508 addcc c_12,t_1,c_12
1509 clr c_3
1510 bcs,a %xcc,.+8
1511 add c_3,t_2,c_3
1512 addcc c_12,t_1,c_12
1513 bcs,a %xcc,.+8
1514 add c_3,t_2,c_3
1515 mulx a_2,a_2,t_1 !sqr_add_c(a,2,c2,c3,c1);
1516 addcc c_12,t_1,t_1
1517 bcs,a %xcc,.+8
1518 add c_3,t_2,c_3
1519 srlx t_1,32,c_12
1520 stuw t_1,rp(4) !r[4]=c2;
1521 or c_12,c_3,c_12
1522
1523 mulx a_2,a_3,t_1 !sqr_add_c2(a,3,2,c3,c1,c2);
1524 addcc c_12,t_1,c_12
1525 clr c_3
1526 bcs,a %xcc,.+8
1527 add c_3,t_2,c_3
1528 addcc c_12,t_1,t_1
1529 bcs,a %xcc,.+8
1530 add c_3,t_2,c_3
1531 srlx t_1,32,c_12
1532 stuw t_1,rp(5) !r[5]=c3;
1533 or c_12,c_3,c_12
1534
1535 mulx a_3,a_3,t_1 !sqr_add_c(a,3,c1,c2,c3);
1536 addcc c_12,t_1,t_1
1537 srlx t_1,32,c_12
1538 stuw t_1,rp(6) !r[6]=c1;
1539 stuw c_12,rp(7) !r[7]=c2;
1540
1541 ret
1542 restore %g0,%g0,%o0
1543
1544.type bn_sqr_comba4,#function
1545.size bn_sqr_comba4,(.-bn_sqr_comba4)
1546
1547.align 32
diff --git a/src/lib/libcrypto/bn/asm/sparcv9-mont.pl b/src/lib/libcrypto/bn/asm/sparcv9-mont.pl
deleted file mode 100644
index b8fb1e8a25..0000000000
--- a/src/lib/libcrypto/bn/asm/sparcv9-mont.pl
+++ /dev/null
@@ -1,606 +0,0 @@
1#!/usr/bin/env perl
2
3# ====================================================================
4# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
5# project. The module is, however, dual licensed under OpenSSL and
6# CRYPTOGAMS licenses depending on where you obtain it. For further
7# details see http://www.openssl.org/~appro/cryptogams/.
8# ====================================================================
9
10# December 2005
11#
12# Pure SPARCv9/8+ and IALU-only bn_mul_mont implementation. The reasons
13# for undertaken effort are multiple. First of all, UltraSPARC is not
14# the whole SPARCv9 universe and other VIS-free implementations deserve
15# optimized code as much. Secondly, newly introduced UltraSPARC T1,
16# a.k.a. Niagara, has shared FPU and concurrent FPU-intensive pathes,
17# such as sparcv9a-mont, will simply sink it. Yes, T1 is equipped with
18# several integrated RSA/DSA accelerator circuits accessible through
19# kernel driver [only(*)], but having decent user-land software
20# implementation is important too. Finally, reasons like desire to
21# experiment with dedicated squaring procedure. Yes, this module
22# implements one, because it was easiest to draft it in SPARCv9
23# instructions...
24
25# (*) Engine accessing the driver in question is on my TODO list.
26# For reference, acceleator is estimated to give 6 to 10 times
27# improvement on single-threaded RSA sign. It should be noted
28# that 6-10x improvement coefficient does not actually mean
29# something extraordinary in terms of absolute [single-threaded]
30# performance, as SPARCv9 instruction set is by all means least
31# suitable for high performance crypto among other 64 bit
32# platforms. 6-10x factor simply places T1 in same performance
33# domain as say AMD64 and IA-64. Improvement of RSA verify don't
34# appear impressive at all, but it's the sign operation which is
35# far more critical/interesting.
36
37# You might notice that inner loops are modulo-scheduled:-) This has
38# essentially negligible impact on UltraSPARC performance, it's
39# Fujitsu SPARC64 V users who should notice and hopefully appreciate
40# the advantage... Currently this module surpasses sparcv9a-mont.pl
41# by ~20% on UltraSPARC-III and later cores, but recall that sparcv9a
42# module still have hidden potential [see TODO list there], which is
43# estimated to be larger than 20%...
44
45# int bn_mul_mont(
46$rp="%i0"; # BN_ULONG *rp,
47$ap="%i1"; # const BN_ULONG *ap,
48$bp="%i2"; # const BN_ULONG *bp,
49$np="%i3"; # const BN_ULONG *np,
50$n0="%i4"; # const BN_ULONG *n0,
51$num="%i5"; # int num);
52
53$bits=32;
54for (@ARGV) { $bits=64 if (/\-m64/ || /\-xarch\=v9/); }
55if ($bits==64) { $bias=2047; $frame=192; }
56else { $bias=0; $frame=128; }
57
58$car0="%o0";
59$car1="%o1";
60$car2="%o2"; # 1 bit
61$acc0="%o3";
62$acc1="%o4";
63$mask="%g1"; # 32 bits, what a waste...
64$tmp0="%g4";
65$tmp1="%g5";
66
67$i="%l0";
68$j="%l1";
69$mul0="%l2";
70$mul1="%l3";
71$tp="%l4";
72$apj="%l5";
73$npj="%l6";
74$tpj="%l7";
75
76$fname="bn_mul_mont_int";
77
78$code=<<___;
79.section ".text",#alloc,#execinstr
80
81.global $fname
82.align 32
83$fname:
84 cmp %o5,4 ! 128 bits minimum
85 bge,pt %icc,.Lenter
86 sethi %hi(0xffffffff),$mask
87 retl
88 clr %o0
89.align 32
90.Lenter:
91 save %sp,-$frame,%sp
92 sll $num,2,$num ! num*=4
93 or $mask,%lo(0xffffffff),$mask
94 ld [$n0],$n0
95 cmp $ap,$bp
96 and $num,$mask,$num
97 ld [$bp],$mul0 ! bp[0]
98 nop
99
100 add %sp,$bias,%o7 ! real top of stack
101 ld [$ap],$car0 ! ap[0] ! redundant in squaring context
102 sub %o7,$num,%o7
103 ld [$ap+4],$apj ! ap[1]
104 and %o7,-1024,%o7
105 ld [$np],$car1 ! np[0]
106 sub %o7,$bias,%sp ! alloca
107 ld [$np+4],$npj ! np[1]
108 be,pt `$bits==32?"%icc":"%xcc"`,.Lbn_sqr_mont
109 mov 12,$j
110
111 mulx $car0,$mul0,$car0 ! ap[0]*bp[0]
112 mulx $apj,$mul0,$tmp0 !prologue! ap[1]*bp[0]
113 and $car0,$mask,$acc0
114 add %sp,$bias+$frame,$tp
115 ld [$ap+8],$apj !prologue!
116
117 mulx $n0,$acc0,$mul1 ! "t[0]"*n0
118 and $mul1,$mask,$mul1
119
120 mulx $car1,$mul1,$car1 ! np[0]*"t[0]"*n0
121 mulx $npj,$mul1,$acc1 !prologue! np[1]*"t[0]"*n0
122 srlx $car0,32,$car0
123 add $acc0,$car1,$car1
124 ld [$np+8],$npj !prologue!
125 srlx $car1,32,$car1
126 mov $tmp0,$acc0 !prologue!
127
128.L1st:
129 mulx $apj,$mul0,$tmp0
130 mulx $npj,$mul1,$tmp1
131 add $acc0,$car0,$car0
132 ld [$ap+$j],$apj ! ap[j]
133 and $car0,$mask,$acc0
134 add $acc1,$car1,$car1
135 ld [$np+$j],$npj ! np[j]
136 srlx $car0,32,$car0
137 add $acc0,$car1,$car1
138 add $j,4,$j ! j++
139 mov $tmp0,$acc0
140 st $car1,[$tp]
141 cmp $j,$num
142 mov $tmp1,$acc1
143 srlx $car1,32,$car1
144 bl %icc,.L1st
145 add $tp,4,$tp ! tp++
146!.L1st
147
148 mulx $apj,$mul0,$tmp0 !epilogue!
149 mulx $npj,$mul1,$tmp1
150 add $acc0,$car0,$car0
151 and $car0,$mask,$acc0
152 add $acc1,$car1,$car1
153 srlx $car0,32,$car0
154 add $acc0,$car1,$car1
155 st $car1,[$tp]
156 srlx $car1,32,$car1
157
158 add $tmp0,$car0,$car0
159 and $car0,$mask,$acc0
160 add $tmp1,$car1,$car1
161 srlx $car0,32,$car0
162 add $acc0,$car1,$car1
163 st $car1,[$tp+4]
164 srlx $car1,32,$car1
165
166 add $car0,$car1,$car1
167 st $car1,[$tp+8]
168 srlx $car1,32,$car2
169
170 mov 4,$i ! i++
171 ld [$bp+4],$mul0 ! bp[1]
172.Louter:
173 add %sp,$bias+$frame,$tp
174 ld [$ap],$car0 ! ap[0]
175 ld [$ap+4],$apj ! ap[1]
176 ld [$np],$car1 ! np[0]
177 ld [$np+4],$npj ! np[1]
178 ld [$tp],$tmp1 ! tp[0]
179 ld [$tp+4],$tpj ! tp[1]
180 mov 12,$j
181
182 mulx $car0,$mul0,$car0
183 mulx $apj,$mul0,$tmp0 !prologue!
184 add $tmp1,$car0,$car0
185 ld [$ap+8],$apj !prologue!
186 and $car0,$mask,$acc0
187
188 mulx $n0,$acc0,$mul1
189 and $mul1,$mask,$mul1
190
191 mulx $car1,$mul1,$car1
192 mulx $npj,$mul1,$acc1 !prologue!
193 srlx $car0,32,$car0
194 add $acc0,$car1,$car1
195 ld [$np+8],$npj !prologue!
196 srlx $car1,32,$car1
197 mov $tmp0,$acc0 !prologue!
198
199.Linner:
200 mulx $apj,$mul0,$tmp0
201 mulx $npj,$mul1,$tmp1
202 add $tpj,$car0,$car0
203 ld [$ap+$j],$apj ! ap[j]
204 add $acc0,$car0,$car0
205 add $acc1,$car1,$car1
206 ld [$np+$j],$npj ! np[j]
207 and $car0,$mask,$acc0
208 ld [$tp+8],$tpj ! tp[j]
209 srlx $car0,32,$car0
210 add $acc0,$car1,$car1
211 add $j,4,$j ! j++
212 mov $tmp0,$acc0
213 st $car1,[$tp] ! tp[j-1]
214 srlx $car1,32,$car1
215 mov $tmp1,$acc1
216 cmp $j,$num
217 bl %icc,.Linner
218 add $tp,4,$tp ! tp++
219!.Linner
220
221 mulx $apj,$mul0,$tmp0 !epilogue!
222 mulx $npj,$mul1,$tmp1
223 add $tpj,$car0,$car0
224 add $acc0,$car0,$car0
225 ld [$tp+8],$tpj ! tp[j]
226 and $car0,$mask,$acc0
227 add $acc1,$car1,$car1
228 srlx $car0,32,$car0
229 add $acc0,$car1,$car1
230 st $car1,[$tp] ! tp[j-1]
231 srlx $car1,32,$car1
232
233 add $tpj,$car0,$car0
234 add $tmp0,$car0,$car0
235 and $car0,$mask,$acc0
236 add $tmp1,$car1,$car1
237 add $acc0,$car1,$car1
238 st $car1,[$tp+4] ! tp[j-1]
239 srlx $car0,32,$car0
240 add $i,4,$i ! i++
241 srlx $car1,32,$car1
242
243 add $car0,$car1,$car1
244 cmp $i,$num
245 add $car2,$car1,$car1
246 st $car1,[$tp+8]
247
248 srlx $car1,32,$car2
249 bl,a %icc,.Louter
250 ld [$bp+$i],$mul0 ! bp[i]
251!.Louter
252
253 add $tp,12,$tp
254
255.Ltail:
256 add $np,$num,$np
257 add $rp,$num,$rp
258 mov $tp,$ap
259 sub %g0,$num,%o7 ! k=-num
260 ba .Lsub
261 subcc %g0,%g0,%g0 ! clear %icc.c
262.align 16
263.Lsub:
264 ld [$tp+%o7],%o0
265 ld [$np+%o7],%o1
266 subccc %o0,%o1,%o1 ! tp[j]-np[j]
267 add $rp,%o7,$i
268 add %o7,4,%o7
269 brnz %o7,.Lsub
270 st %o1,[$i]
271 subc $car2,0,$car2 ! handle upmost overflow bit
272 and $tp,$car2,$ap
273 andn $rp,$car2,$np
274 or $ap,$np,$ap
275 sub %g0,$num,%o7
276
277.Lcopy:
278 ld [$ap+%o7],%o0 ! copy or in-place refresh
279 st %g0,[$tp+%o7] ! zap tp
280 st %o0,[$rp+%o7]
281 add %o7,4,%o7
282 brnz %o7,.Lcopy
283 nop
284 mov 1,%i0
285 ret
286 restore
287___
288
289########
290######## .Lbn_sqr_mont gives up to 20% *overall* improvement over
291######## code without following dedicated squaring procedure.
292########
293$sbit="%i2"; # re-use $bp!
294
295$code.=<<___;
296.align 32
297.Lbn_sqr_mont:
298 mulx $mul0,$mul0,$car0 ! ap[0]*ap[0]
299 mulx $apj,$mul0,$tmp0 !prologue!
300 and $car0,$mask,$acc0
301 add %sp,$bias+$frame,$tp
302 ld [$ap+8],$apj !prologue!
303
304 mulx $n0,$acc0,$mul1 ! "t[0]"*n0
305 srlx $car0,32,$car0
306 and $mul1,$mask,$mul1
307
308 mulx $car1,$mul1,$car1 ! np[0]*"t[0]"*n0
309 mulx $npj,$mul1,$acc1 !prologue!
310 and $car0,1,$sbit
311 ld [$np+8],$npj !prologue!
312 srlx $car0,1,$car0
313 add $acc0,$car1,$car1
314 srlx $car1,32,$car1
315 mov $tmp0,$acc0 !prologue!
316
317.Lsqr_1st:
318 mulx $apj,$mul0,$tmp0
319 mulx $npj,$mul1,$tmp1
320 add $acc0,$car0,$car0 ! ap[j]*a0+c0
321 add $acc1,$car1,$car1
322 ld [$ap+$j],$apj ! ap[j]
323 and $car0,$mask,$acc0
324 ld [$np+$j],$npj ! np[j]
325 srlx $car0,32,$car0
326 add $acc0,$acc0,$acc0
327 or $sbit,$acc0,$acc0
328 mov $tmp1,$acc1
329 srlx $acc0,32,$sbit
330 add $j,4,$j ! j++
331 and $acc0,$mask,$acc0
332 cmp $j,$num
333 add $acc0,$car1,$car1
334 st $car1,[$tp]
335 mov $tmp0,$acc0
336 srlx $car1,32,$car1
337 bl %icc,.Lsqr_1st
338 add $tp,4,$tp ! tp++
339!.Lsqr_1st
340
341 mulx $apj,$mul0,$tmp0 ! epilogue
342 mulx $npj,$mul1,$tmp1
343 add $acc0,$car0,$car0 ! ap[j]*a0+c0
344 add $acc1,$car1,$car1
345 and $car0,$mask,$acc0
346 srlx $car0,32,$car0
347 add $acc0,$acc0,$acc0
348 or $sbit,$acc0,$acc0
349 srlx $acc0,32,$sbit
350 and $acc0,$mask,$acc0
351 add $acc0,$car1,$car1
352 st $car1,[$tp]
353 srlx $car1,32,$car1
354
355 add $tmp0,$car0,$car0 ! ap[j]*a0+c0
356 add $tmp1,$car1,$car1
357 and $car0,$mask,$acc0
358 srlx $car0,32,$car0
359 add $acc0,$acc0,$acc0
360 or $sbit,$acc0,$acc0
361 srlx $acc0,32,$sbit
362 and $acc0,$mask,$acc0
363 add $acc0,$car1,$car1
364 st $car1,[$tp+4]
365 srlx $car1,32,$car1
366
367 add $car0,$car0,$car0
368 or $sbit,$car0,$car0
369 add $car0,$car1,$car1
370 st $car1,[$tp+8]
371 srlx $car1,32,$car2
372
373 ld [%sp+$bias+$frame],$tmp0 ! tp[0]
374 ld [%sp+$bias+$frame+4],$tmp1 ! tp[1]
375 ld [%sp+$bias+$frame+8],$tpj ! tp[2]
376 ld [$ap+4],$mul0 ! ap[1]
377 ld [$ap+8],$apj ! ap[2]
378 ld [$np],$car1 ! np[0]
379 ld [$np+4],$npj ! np[1]
380 mulx $n0,$tmp0,$mul1
381
382 mulx $mul0,$mul0,$car0
383 and $mul1,$mask,$mul1
384
385 mulx $car1,$mul1,$car1
386 mulx $npj,$mul1,$acc1
387 add $tmp0,$car1,$car1
388 and $car0,$mask,$acc0
389 ld [$np+8],$npj ! np[2]
390 srlx $car1,32,$car1
391 add $tmp1,$car1,$car1
392 srlx $car0,32,$car0
393 add $acc0,$car1,$car1
394 and $car0,1,$sbit
395 add $acc1,$car1,$car1
396 srlx $car0,1,$car0
397 mov 12,$j
398 st $car1,[%sp+$bias+$frame] ! tp[0]=
399 srlx $car1,32,$car1
400 add %sp,$bias+$frame+4,$tp
401
402.Lsqr_2nd:
403 mulx $apj,$mul0,$acc0
404 mulx $npj,$mul1,$acc1
405 add $acc0,$car0,$car0
406 add $tpj,$car1,$car1
407 ld [$ap+$j],$apj ! ap[j]
408 and $car0,$mask,$acc0
409 ld [$np+$j],$npj ! np[j]
410 srlx $car0,32,$car0
411 add $acc1,$car1,$car1
412 ld [$tp+8],$tpj ! tp[j]
413 add $acc0,$acc0,$acc0
414 add $j,4,$j ! j++
415 or $sbit,$acc0,$acc0
416 srlx $acc0,32,$sbit
417 and $acc0,$mask,$acc0
418 cmp $j,$num
419 add $acc0,$car1,$car1
420 st $car1,[$tp] ! tp[j-1]
421 srlx $car1,32,$car1
422 bl %icc,.Lsqr_2nd
423 add $tp,4,$tp ! tp++
424!.Lsqr_2nd
425
426 mulx $apj,$mul0,$acc0
427 mulx $npj,$mul1,$acc1
428 add $acc0,$car0,$car0
429 add $tpj,$car1,$car1
430 and $car0,$mask,$acc0
431 srlx $car0,32,$car0
432 add $acc1,$car1,$car1
433 add $acc0,$acc0,$acc0
434 or $sbit,$acc0,$acc0
435 srlx $acc0,32,$sbit
436 and $acc0,$mask,$acc0
437 add $acc0,$car1,$car1
438 st $car1,[$tp] ! tp[j-1]
439 srlx $car1,32,$car1
440
441 add $car0,$car0,$car0
442 or $sbit,$car0,$car0
443 add $car0,$car1,$car1
444 add $car2,$car1,$car1
445 st $car1,[$tp+4]
446 srlx $car1,32,$car2
447
448 ld [%sp+$bias+$frame],$tmp1 ! tp[0]
449 ld [%sp+$bias+$frame+4],$tpj ! tp[1]
450 ld [$ap+8],$mul0 ! ap[2]
451 ld [$np],$car1 ! np[0]
452 ld [$np+4],$npj ! np[1]
453 mulx $n0,$tmp1,$mul1
454 and $mul1,$mask,$mul1
455 mov 8,$i
456
457 mulx $mul0,$mul0,$car0
458 mulx $car1,$mul1,$car1
459 and $car0,$mask,$acc0
460 add $tmp1,$car1,$car1
461 srlx $car0,32,$car0
462 add %sp,$bias+$frame,$tp
463 srlx $car1,32,$car1
464 and $car0,1,$sbit
465 srlx $car0,1,$car0
466 mov 4,$j
467
468.Lsqr_outer:
469.Lsqr_inner1:
470 mulx $npj,$mul1,$acc1
471 add $tpj,$car1,$car1
472 add $j,4,$j
473 ld [$tp+8],$tpj
474 cmp $j,$i
475 add $acc1,$car1,$car1
476 ld [$np+$j],$npj
477 st $car1,[$tp]
478 srlx $car1,32,$car1
479 bl %icc,.Lsqr_inner1
480 add $tp,4,$tp
481!.Lsqr_inner1
482
483 add $j,4,$j
484 ld [$ap+$j],$apj ! ap[j]
485 mulx $npj,$mul1,$acc1
486 add $tpj,$car1,$car1
487 ld [$np+$j],$npj ! np[j]
488 add $acc0,$car1,$car1
489 ld [$tp+8],$tpj ! tp[j]
490 add $acc1,$car1,$car1
491 st $car1,[$tp]
492 srlx $car1,32,$car1
493
494 add $j,4,$j
495 cmp $j,$num
496 be,pn %icc,.Lsqr_no_inner2
497 add $tp,4,$tp
498
499.Lsqr_inner2:
500 mulx $apj,$mul0,$acc0
501 mulx $npj,$mul1,$acc1
502 add $tpj,$car1,$car1
503 add $acc0,$car0,$car0
504 ld [$ap+$j],$apj ! ap[j]
505 and $car0,$mask,$acc0
506 ld [$np+$j],$npj ! np[j]
507 srlx $car0,32,$car0
508 add $acc0,$acc0,$acc0
509 ld [$tp+8],$tpj ! tp[j]
510 or $sbit,$acc0,$acc0
511 add $j,4,$j ! j++
512 srlx $acc0,32,$sbit
513 and $acc0,$mask,$acc0
514 cmp $j,$num
515 add $acc0,$car1,$car1
516 add $acc1,$car1,$car1
517 st $car1,[$tp] ! tp[j-1]
518 srlx $car1,32,$car1
519 bl %icc,.Lsqr_inner2
520 add $tp,4,$tp ! tp++
521
522.Lsqr_no_inner2:
523 mulx $apj,$mul0,$acc0
524 mulx $npj,$mul1,$acc1
525 add $tpj,$car1,$car1
526 add $acc0,$car0,$car0
527 and $car0,$mask,$acc0
528 srlx $car0,32,$car0
529 add $acc0,$acc0,$acc0
530 or $sbit,$acc0,$acc0
531 srlx $acc0,32,$sbit
532 and $acc0,$mask,$acc0
533 add $acc0,$car1,$car1
534 add $acc1,$car1,$car1
535 st $car1,[$tp] ! tp[j-1]
536 srlx $car1,32,$car1
537
538 add $car0,$car0,$car0
539 or $sbit,$car0,$car0
540 add $car0,$car1,$car1
541 add $car2,$car1,$car1
542 st $car1,[$tp+4]
543 srlx $car1,32,$car2
544
545 add $i,4,$i ! i++
546 ld [%sp+$bias+$frame],$tmp1 ! tp[0]
547 ld [%sp+$bias+$frame+4],$tpj ! tp[1]
548 ld [$ap+$i],$mul0 ! ap[j]
549 ld [$np],$car1 ! np[0]
550 ld [$np+4],$npj ! np[1]
551 mulx $n0,$tmp1,$mul1
552 and $mul1,$mask,$mul1
553 add $i,4,$tmp0
554
555 mulx $mul0,$mul0,$car0
556 mulx $car1,$mul1,$car1
557 and $car0,$mask,$acc0
558 add $tmp1,$car1,$car1
559 srlx $car0,32,$car0
560 add %sp,$bias+$frame,$tp
561 srlx $car1,32,$car1
562 and $car0,1,$sbit
563 srlx $car0,1,$car0
564
565 cmp $tmp0,$num ! i<num-1
566 bl %icc,.Lsqr_outer
567 mov 4,$j
568
569.Lsqr_last:
570 mulx $npj,$mul1,$acc1
571 add $tpj,$car1,$car1
572 add $j,4,$j
573 ld [$tp+8],$tpj
574 cmp $j,$i
575 add $acc1,$car1,$car1
576 ld [$np+$j],$npj
577 st $car1,[$tp]
578 srlx $car1,32,$car1
579 bl %icc,.Lsqr_last
580 add $tp,4,$tp
581!.Lsqr_last
582
583 mulx $npj,$mul1,$acc1
584 add $tpj,$car1,$car1
585 add $acc0,$car1,$car1
586 add $acc1,$car1,$car1
587 st $car1,[$tp]
588 srlx $car1,32,$car1
589
590 add $car0,$car0,$car0 ! recover $car0
591 or $sbit,$car0,$car0
592 add $car0,$car1,$car1
593 add $car2,$car1,$car1
594 st $car1,[$tp+4]
595 srlx $car1,32,$car2
596
597 ba .Ltail
598 add $tp,8,$tp
599.type $fname,#function
600.size $fname,(.-$fname)
601.asciz "Montgomery Multipltication for SPARCv9, CRYPTOGAMS by <appro\@openssl.org>"
602.align 32
603___
604$code =~ s/\`([^\`]*)\`/eval($1)/gem;
605print $code;
606close STDOUT;
diff --git a/src/lib/libcrypto/bn/asm/sparcv9a-mont.pl b/src/lib/libcrypto/bn/asm/sparcv9a-mont.pl
deleted file mode 100755
index a14205f2f0..0000000000
--- a/src/lib/libcrypto/bn/asm/sparcv9a-mont.pl
+++ /dev/null
@@ -1,882 +0,0 @@
1#!/usr/bin/env perl
2
3# ====================================================================
4# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
5# project. The module is, however, dual licensed under OpenSSL and
6# CRYPTOGAMS licenses depending on where you obtain it. For further
7# details see http://www.openssl.org/~appro/cryptogams/.
8# ====================================================================
9
10# October 2005
11#
12# "Teaser" Montgomery multiplication module for UltraSPARC. Why FPU?
13# Because unlike integer multiplier, which simply stalls whole CPU,
14# FPU is fully pipelined and can effectively emit 48 bit partial
15# product every cycle. Why not blended SPARC v9? One can argue that
16# making this module dependent on UltraSPARC VIS extension limits its
17# binary compatibility. Well yes, it does exclude SPARC64 prior-V(!)
18# implementations from compatibility matrix. But the rest, whole Sun
19# UltraSPARC family and brand new Fujitsu's SPARC64 V, all support
20# VIS extension instructions used in this module. This is considered
21# good enough to not care about HAL SPARC64 users [if any] who have
22# integer-only pure SPARCv9 module to "fall down" to.
23
24# USI&II cores currently exhibit uniform 2x improvement [over pre-
25# bn_mul_mont codebase] for all key lengths and benchmarks. On USIII
26# performance improves few percents for shorter keys and worsens few
27# percents for longer keys. This is because USIII integer multiplier
28# is >3x faster than USI&II one, which is harder to match [but see
29# TODO list below]. It should also be noted that SPARC64 V features
30# out-of-order execution, which *might* mean that integer multiplier
31# is pipelined, which in turn *might* be impossible to match... On
32# additional note, SPARC64 V implements FP Multiply-Add instruction,
33# which is perfectly usable in this context... In other words, as far
34# as Fujitsu SPARC64 V goes, talk to the author:-)
35
36# The implementation implies following "non-natural" limitations on
37# input arguments:
38# - num may not be less than 4;
39# - num has to be even;
40# Failure to meet either condition has no fatal effects, simply
41# doesn't give any performance gain.
42
43# TODO:
44# - modulo-schedule inner loop for better performance (on in-order
45# execution core such as UltraSPARC this shall result in further
46# noticeable(!) improvement);
47# - dedicated squaring procedure[?];
48
49######################################################################
50# November 2006
51#
52# Modulo-scheduled inner loops allow to interleave floating point and
53# integer instructions and minimize Read-After-Write penalties. This
54# results in *further* 20-50% perfromance improvement [depending on
55# key length, more for longer keys] on USI&II cores and 30-80% - on
56# USIII&IV.
57
58$fname="bn_mul_mont_fpu";
59$bits=32;
60for (@ARGV) { $bits=64 if (/\-m64/ || /\-xarch\=v9/); }
61
62if ($bits==64) {
63 $bias=2047;
64 $frame=192;
65} else {
66 $bias=0;
67 $frame=128; # 96 rounded up to largest known cache-line
68}
69$locals=64;
70
71# In order to provide for 32-/64-bit ABI duality, I keep integers wider
72# than 32 bit in %g1-%g4 and %o0-%o5. %l0-%l7 and %i0-%i5 are used
73# exclusively for pointers, indexes and other small values...
74# int bn_mul_mont(
75$rp="%i0"; # BN_ULONG *rp,
76$ap="%i1"; # const BN_ULONG *ap,
77$bp="%i2"; # const BN_ULONG *bp,
78$np="%i3"; # const BN_ULONG *np,
79$n0="%i4"; # const BN_ULONG *n0,
80$num="%i5"; # int num);
81
82$tp="%l0"; # t[num]
83$ap_l="%l1"; # a[num],n[num] are smashed to 32-bit words and saved
84$ap_h="%l2"; # to these four vectors as double-precision FP values.
85$np_l="%l3"; # This way a bunch of fxtods are eliminated in second
86$np_h="%l4"; # loop and L1-cache aliasing is minimized...
87$i="%l5";
88$j="%l6";
89$mask="%l7"; # 16-bit mask, 0xffff
90
91$n0="%g4"; # reassigned(!) to "64-bit" register
92$carry="%i4"; # %i4 reused(!) for a carry bit
93
94# FP register naming chart
95#
96# ..HILO
97# dcba
98# --------
99# LOa
100# LOb
101# LOc
102# LOd
103# HIa
104# HIb
105# HIc
106# HId
107# ..a
108# ..b
109$ba="%f0"; $bb="%f2"; $bc="%f4"; $bd="%f6";
110$na="%f8"; $nb="%f10"; $nc="%f12"; $nd="%f14";
111$alo="%f16"; $alo_="%f17"; $ahi="%f18"; $ahi_="%f19";
112$nlo="%f20"; $nlo_="%f21"; $nhi="%f22"; $nhi_="%f23";
113
114$dota="%f24"; $dotb="%f26";
115
116$aloa="%f32"; $alob="%f34"; $aloc="%f36"; $alod="%f38";
117$ahia="%f40"; $ahib="%f42"; $ahic="%f44"; $ahid="%f46";
118$nloa="%f48"; $nlob="%f50"; $nloc="%f52"; $nlod="%f54";
119$nhia="%f56"; $nhib="%f58"; $nhic="%f60"; $nhid="%f62";
120
121$ASI_FL16_P=0xD2; # magic ASI value to engage 16-bit FP load
122
123$code=<<___;
124.section ".text",#alloc,#execinstr
125
126.global $fname
127.align 32
128$fname:
129 save %sp,-$frame-$locals,%sp
130
131 cmp $num,4
132 bl,a,pn %icc,.Lret
133 clr %i0
134 andcc $num,1,%g0 ! $num has to be even...
135 bnz,a,pn %icc,.Lret
136 clr %i0 ! signal "unsupported input value"
137
138 srl $num,1,$num
139 sethi %hi(0xffff),$mask
140 ld [%i4+0],$n0 ! $n0 reassigned, remember?
141 or $mask,%lo(0xffff),$mask
142 ld [%i4+4],%o0
143 sllx %o0,32,%o0
144 or %o0,$n0,$n0 ! $n0=n0[1].n0[0]
145
146 sll $num,3,$num ! num*=8
147
148 add %sp,$bias,%o0 ! real top of stack
149 sll $num,2,%o1
150 add %o1,$num,%o1 ! %o1=num*5
151 sub %o0,%o1,%o0
152 and %o0,-2048,%o0 ! optimize TLB utilization
153 sub %o0,$bias,%sp ! alloca(5*num*8)
154
155 rd %asi,%o7 ! save %asi
156 add %sp,$bias+$frame+$locals,$tp
157 add $tp,$num,$ap_l
158 add $ap_l,$num,$ap_l ! [an]p_[lh] point at the vectors' ends !
159 add $ap_l,$num,$ap_h
160 add $ap_h,$num,$np_l
161 add $np_l,$num,$np_h
162
163 wr %g0,$ASI_FL16_P,%asi ! setup %asi for 16-bit FP loads
164
165 add $rp,$num,$rp ! readjust input pointers to point
166 add $ap,$num,$ap ! at the ends too...
167 add $bp,$num,$bp
168 add $np,$num,$np
169
170 stx %o7,[%sp+$bias+$frame+48] ! save %asi
171
172 sub %g0,$num,$i ! i=-num
173 sub %g0,$num,$j ! j=-num
174
175 add $ap,$j,%o3
176 add $bp,$i,%o4
177
178 ld [%o3+4],%g1 ! bp[0]
179 ld [%o3+0],%o0
180 ld [%o4+4],%g5 ! ap[0]
181 sllx %g1,32,%g1
182 ld [%o4+0],%o1
183 sllx %g5,32,%g5
184 or %g1,%o0,%o0
185 or %g5,%o1,%o1
186
187 add $np,$j,%o5
188
189 mulx %o1,%o0,%o0 ! ap[0]*bp[0]
190 mulx $n0,%o0,%o0 ! ap[0]*bp[0]*n0
191 stx %o0,[%sp+$bias+$frame+0]
192
193 ld [%o3+0],$alo_ ! load a[j] as pair of 32-bit words
194 fzeros $alo
195 ld [%o3+4],$ahi_
196 fzeros $ahi
197 ld [%o5+0],$nlo_ ! load n[j] as pair of 32-bit words
198 fzeros $nlo
199 ld [%o5+4],$nhi_
200 fzeros $nhi
201
202 ! transfer b[i] to FPU as 4x16-bit values
203 ldda [%o4+2]%asi,$ba
204 fxtod $alo,$alo
205 ldda [%o4+0]%asi,$bb
206 fxtod $ahi,$ahi
207 ldda [%o4+6]%asi,$bc
208 fxtod $nlo,$nlo
209 ldda [%o4+4]%asi,$bd
210 fxtod $nhi,$nhi
211
212 ! transfer ap[0]*b[0]*n0 to FPU as 4x16-bit values
213 ldda [%sp+$bias+$frame+6]%asi,$na
214 fxtod $ba,$ba
215 ldda [%sp+$bias+$frame+4]%asi,$nb
216 fxtod $bb,$bb
217 ldda [%sp+$bias+$frame+2]%asi,$nc
218 fxtod $bc,$bc
219 ldda [%sp+$bias+$frame+0]%asi,$nd
220 fxtod $bd,$bd
221
222 std $alo,[$ap_l+$j] ! save smashed ap[j] in double format
223 fxtod $na,$na
224 std $ahi,[$ap_h+$j]
225 fxtod $nb,$nb
226 std $nlo,[$np_l+$j] ! save smashed np[j] in double format
227 fxtod $nc,$nc
228 std $nhi,[$np_h+$j]
229 fxtod $nd,$nd
230
231 fmuld $alo,$ba,$aloa
232 fmuld $nlo,$na,$nloa
233 fmuld $alo,$bb,$alob
234 fmuld $nlo,$nb,$nlob
235 fmuld $alo,$bc,$aloc
236 faddd $aloa,$nloa,$nloa
237 fmuld $nlo,$nc,$nloc
238 fmuld $alo,$bd,$alod
239 faddd $alob,$nlob,$nlob
240 fmuld $nlo,$nd,$nlod
241 fmuld $ahi,$ba,$ahia
242 faddd $aloc,$nloc,$nloc
243 fmuld $nhi,$na,$nhia
244 fmuld $ahi,$bb,$ahib
245 faddd $alod,$nlod,$nlod
246 fmuld $nhi,$nb,$nhib
247 fmuld $ahi,$bc,$ahic
248 faddd $ahia,$nhia,$nhia
249 fmuld $nhi,$nc,$nhic
250 fmuld $ahi,$bd,$ahid
251 faddd $ahib,$nhib,$nhib
252 fmuld $nhi,$nd,$nhid
253
254 faddd $ahic,$nhic,$dota ! $nhic
255 faddd $ahid,$nhid,$dotb ! $nhid
256
257 faddd $nloc,$nhia,$nloc
258 faddd $nlod,$nhib,$nlod
259
260 fdtox $nloa,$nloa
261 fdtox $nlob,$nlob
262 fdtox $nloc,$nloc
263 fdtox $nlod,$nlod
264
265 std $nloa,[%sp+$bias+$frame+0]
266 add $j,8,$j
267 std $nlob,[%sp+$bias+$frame+8]
268 add $ap,$j,%o4
269 std $nloc,[%sp+$bias+$frame+16]
270 add $np,$j,%o5
271 std $nlod,[%sp+$bias+$frame+24]
272
273 ld [%o4+0],$alo_ ! load a[j] as pair of 32-bit words
274 fzeros $alo
275 ld [%o4+4],$ahi_
276 fzeros $ahi
277 ld [%o5+0],$nlo_ ! load n[j] as pair of 32-bit words
278 fzeros $nlo
279 ld [%o5+4],$nhi_
280 fzeros $nhi
281
282 fxtod $alo,$alo
283 fxtod $ahi,$ahi
284 fxtod $nlo,$nlo
285 fxtod $nhi,$nhi
286
287 ldx [%sp+$bias+$frame+0],%o0
288 fmuld $alo,$ba,$aloa
289 ldx [%sp+$bias+$frame+8],%o1
290 fmuld $nlo,$na,$nloa
291 ldx [%sp+$bias+$frame+16],%o2
292 fmuld $alo,$bb,$alob
293 ldx [%sp+$bias+$frame+24],%o3
294 fmuld $nlo,$nb,$nlob
295
296 srlx %o0,16,%o7
297 std $alo,[$ap_l+$j] ! save smashed ap[j] in double format
298 fmuld $alo,$bc,$aloc
299 add %o7,%o1,%o1
300 std $ahi,[$ap_h+$j]
301 faddd $aloa,$nloa,$nloa
302 fmuld $nlo,$nc,$nloc
303 srlx %o1,16,%o7
304 std $nlo,[$np_l+$j] ! save smashed np[j] in double format
305 fmuld $alo,$bd,$alod
306 add %o7,%o2,%o2
307 std $nhi,[$np_h+$j]
308 faddd $alob,$nlob,$nlob
309 fmuld $nlo,$nd,$nlod
310 srlx %o2,16,%o7
311 fmuld $ahi,$ba,$ahia
312 add %o7,%o3,%o3 ! %o3.%o2[0..15].%o1[0..15].%o0[0..15]
313 faddd $aloc,$nloc,$nloc
314 fmuld $nhi,$na,$nhia
315 !and %o0,$mask,%o0
316 !and %o1,$mask,%o1
317 !and %o2,$mask,%o2
318 !sllx %o1,16,%o1
319 !sllx %o2,32,%o2
320 !sllx %o3,48,%o7
321 !or %o1,%o0,%o0
322 !or %o2,%o0,%o0
323 !or %o7,%o0,%o0 ! 64-bit result
324 srlx %o3,16,%g1 ! 34-bit carry
325 fmuld $ahi,$bb,$ahib
326
327 faddd $alod,$nlod,$nlod
328 fmuld $nhi,$nb,$nhib
329 fmuld $ahi,$bc,$ahic
330 faddd $ahia,$nhia,$nhia
331 fmuld $nhi,$nc,$nhic
332 fmuld $ahi,$bd,$ahid
333 faddd $ahib,$nhib,$nhib
334 fmuld $nhi,$nd,$nhid
335
336 faddd $dota,$nloa,$nloa
337 faddd $dotb,$nlob,$nlob
338 faddd $ahic,$nhic,$dota ! $nhic
339 faddd $ahid,$nhid,$dotb ! $nhid
340
341 faddd $nloc,$nhia,$nloc
342 faddd $nlod,$nhib,$nlod
343
344 fdtox $nloa,$nloa
345 fdtox $nlob,$nlob
346 fdtox $nloc,$nloc
347 fdtox $nlod,$nlod
348
349 std $nloa,[%sp+$bias+$frame+0]
350 std $nlob,[%sp+$bias+$frame+8]
351 addcc $j,8,$j
352 std $nloc,[%sp+$bias+$frame+16]
353 bz,pn %icc,.L1stskip
354 std $nlod,[%sp+$bias+$frame+24]
355
356.align 32 ! incidentally already aligned !
357.L1st:
358 add $ap,$j,%o4
359 add $np,$j,%o5
360 ld [%o4+0],$alo_ ! load a[j] as pair of 32-bit words
361 fzeros $alo
362 ld [%o4+4],$ahi_
363 fzeros $ahi
364 ld [%o5+0],$nlo_ ! load n[j] as pair of 32-bit words
365 fzeros $nlo
366 ld [%o5+4],$nhi_
367 fzeros $nhi
368
369 fxtod $alo,$alo
370 fxtod $ahi,$ahi
371 fxtod $nlo,$nlo
372 fxtod $nhi,$nhi
373
374 ldx [%sp+$bias+$frame+0],%o0
375 fmuld $alo,$ba,$aloa
376 ldx [%sp+$bias+$frame+8],%o1
377 fmuld $nlo,$na,$nloa
378 ldx [%sp+$bias+$frame+16],%o2
379 fmuld $alo,$bb,$alob
380 ldx [%sp+$bias+$frame+24],%o3
381 fmuld $nlo,$nb,$nlob
382
383 srlx %o0,16,%o7
384 std $alo,[$ap_l+$j] ! save smashed ap[j] in double format
385 fmuld $alo,$bc,$aloc
386 add %o7,%o1,%o1
387 std $ahi,[$ap_h+$j]
388 faddd $aloa,$nloa,$nloa
389 fmuld $nlo,$nc,$nloc
390 srlx %o1,16,%o7
391 std $nlo,[$np_l+$j] ! save smashed np[j] in double format
392 fmuld $alo,$bd,$alod
393 add %o7,%o2,%o2
394 std $nhi,[$np_h+$j]
395 faddd $alob,$nlob,$nlob
396 fmuld $nlo,$nd,$nlod
397 srlx %o2,16,%o7
398 fmuld $ahi,$ba,$ahia
399 add %o7,%o3,%o3 ! %o3.%o2[0..15].%o1[0..15].%o0[0..15]
400 and %o0,$mask,%o0
401 faddd $aloc,$nloc,$nloc
402 fmuld $nhi,$na,$nhia
403 and %o1,$mask,%o1
404 and %o2,$mask,%o2
405 fmuld $ahi,$bb,$ahib
406 sllx %o1,16,%o1
407 faddd $alod,$nlod,$nlod
408 fmuld $nhi,$nb,$nhib
409 sllx %o2,32,%o2
410 fmuld $ahi,$bc,$ahic
411 sllx %o3,48,%o7
412 or %o1,%o0,%o0
413 faddd $ahia,$nhia,$nhia
414 fmuld $nhi,$nc,$nhic
415 or %o2,%o0,%o0
416 fmuld $ahi,$bd,$ahid
417 or %o7,%o0,%o0 ! 64-bit result
418 faddd $ahib,$nhib,$nhib
419 fmuld $nhi,$nd,$nhid
420 addcc %g1,%o0,%o0
421 faddd $dota,$nloa,$nloa
422 srlx %o3,16,%g1 ! 34-bit carry
423 faddd $dotb,$nlob,$nlob
424 bcs,a %xcc,.+8
425 add %g1,1,%g1
426
427 stx %o0,[$tp] ! tp[j-1]=
428
429 faddd $ahic,$nhic,$dota ! $nhic
430 faddd $ahid,$nhid,$dotb ! $nhid
431
432 faddd $nloc,$nhia,$nloc
433 faddd $nlod,$nhib,$nlod
434
435 fdtox $nloa,$nloa
436 fdtox $nlob,$nlob
437 fdtox $nloc,$nloc
438 fdtox $nlod,$nlod
439
440 std $nloa,[%sp+$bias+$frame+0]
441 std $nlob,[%sp+$bias+$frame+8]
442 std $nloc,[%sp+$bias+$frame+16]
443 std $nlod,[%sp+$bias+$frame+24]
444
445 addcc $j,8,$j
446 bnz,pt %icc,.L1st
447 add $tp,8,$tp
448
449.L1stskip:
450 fdtox $dota,$dota
451 fdtox $dotb,$dotb
452
453 ldx [%sp+$bias+$frame+0],%o0
454 ldx [%sp+$bias+$frame+8],%o1
455 ldx [%sp+$bias+$frame+16],%o2
456 ldx [%sp+$bias+$frame+24],%o3
457
458 srlx %o0,16,%o7
459 std $dota,[%sp+$bias+$frame+32]
460 add %o7,%o1,%o1
461 std $dotb,[%sp+$bias+$frame+40]
462 srlx %o1,16,%o7
463 add %o7,%o2,%o2
464 srlx %o2,16,%o7
465 add %o7,%o3,%o3 ! %o3.%o2[0..15].%o1[0..15].%o0[0..15]
466 and %o0,$mask,%o0
467 and %o1,$mask,%o1
468 and %o2,$mask,%o2
469 sllx %o1,16,%o1
470 sllx %o2,32,%o2
471 sllx %o3,48,%o7
472 or %o1,%o0,%o0
473 or %o2,%o0,%o0
474 or %o7,%o0,%o0 ! 64-bit result
475 ldx [%sp+$bias+$frame+32],%o4
476 addcc %g1,%o0,%o0
477 ldx [%sp+$bias+$frame+40],%o5
478 srlx %o3,16,%g1 ! 34-bit carry
479 bcs,a %xcc,.+8
480 add %g1,1,%g1
481
482 stx %o0,[$tp] ! tp[j-1]=
483 add $tp,8,$tp
484
485 srlx %o4,16,%o7
486 add %o7,%o5,%o5
487 and %o4,$mask,%o4
488 sllx %o5,16,%o7
489 or %o7,%o4,%o4
490 addcc %g1,%o4,%o4
491 srlx %o5,48,%g1
492 bcs,a %xcc,.+8
493 add %g1,1,%g1
494
495 mov %g1,$carry
496 stx %o4,[$tp] ! tp[num-1]=
497
498 ba .Louter
499 add $i,8,$i
500.align 32
501.Louter:
502 sub %g0,$num,$j ! j=-num
503 add %sp,$bias+$frame+$locals,$tp
504
505 add $ap,$j,%o3
506 add $bp,$i,%o4
507
508 ld [%o3+4],%g1 ! bp[i]
509 ld [%o3+0],%o0
510 ld [%o4+4],%g5 ! ap[0]
511 sllx %g1,32,%g1
512 ld [%o4+0],%o1
513 sllx %g5,32,%g5
514 or %g1,%o0,%o0
515 or %g5,%o1,%o1
516
517 ldx [$tp],%o2 ! tp[0]
518 mulx %o1,%o0,%o0
519 addcc %o2,%o0,%o0
520 mulx $n0,%o0,%o0 ! (ap[0]*bp[i]+t[0])*n0
521 stx %o0,[%sp+$bias+$frame+0]
522
523 ! transfer b[i] to FPU as 4x16-bit values
524 ldda [%o4+2]%asi,$ba
525 ldda [%o4+0]%asi,$bb
526 ldda [%o4+6]%asi,$bc
527 ldda [%o4+4]%asi,$bd
528
529 ! transfer (ap[0]*b[i]+t[0])*n0 to FPU as 4x16-bit values
530 ldda [%sp+$bias+$frame+6]%asi,$na
531 fxtod $ba,$ba
532 ldda [%sp+$bias+$frame+4]%asi,$nb
533 fxtod $bb,$bb
534 ldda [%sp+$bias+$frame+2]%asi,$nc
535 fxtod $bc,$bc
536 ldda [%sp+$bias+$frame+0]%asi,$nd
537 fxtod $bd,$bd
538 ldd [$ap_l+$j],$alo ! load a[j] in double format
539 fxtod $na,$na
540 ldd [$ap_h+$j],$ahi
541 fxtod $nb,$nb
542 ldd [$np_l+$j],$nlo ! load n[j] in double format
543 fxtod $nc,$nc
544 ldd [$np_h+$j],$nhi
545 fxtod $nd,$nd
546
547 fmuld $alo,$ba,$aloa
548 fmuld $nlo,$na,$nloa
549 fmuld $alo,$bb,$alob
550 fmuld $nlo,$nb,$nlob
551 fmuld $alo,$bc,$aloc
552 faddd $aloa,$nloa,$nloa
553 fmuld $nlo,$nc,$nloc
554 fmuld $alo,$bd,$alod
555 faddd $alob,$nlob,$nlob
556 fmuld $nlo,$nd,$nlod
557 fmuld $ahi,$ba,$ahia
558 faddd $aloc,$nloc,$nloc
559 fmuld $nhi,$na,$nhia
560 fmuld $ahi,$bb,$ahib
561 faddd $alod,$nlod,$nlod
562 fmuld $nhi,$nb,$nhib
563 fmuld $ahi,$bc,$ahic
564 faddd $ahia,$nhia,$nhia
565 fmuld $nhi,$nc,$nhic
566 fmuld $ahi,$bd,$ahid
567 faddd $ahib,$nhib,$nhib
568 fmuld $nhi,$nd,$nhid
569
570 faddd $ahic,$nhic,$dota ! $nhic
571 faddd $ahid,$nhid,$dotb ! $nhid
572
573 faddd $nloc,$nhia,$nloc
574 faddd $nlod,$nhib,$nlod
575
576 fdtox $nloa,$nloa
577 fdtox $nlob,$nlob
578 fdtox $nloc,$nloc
579 fdtox $nlod,$nlod
580
581 std $nloa,[%sp+$bias+$frame+0]
582 std $nlob,[%sp+$bias+$frame+8]
583 std $nloc,[%sp+$bias+$frame+16]
584 add $j,8,$j
585 std $nlod,[%sp+$bias+$frame+24]
586
587 ldd [$ap_l+$j],$alo ! load a[j] in double format
588 ldd [$ap_h+$j],$ahi
589 ldd [$np_l+$j],$nlo ! load n[j] in double format
590 ldd [$np_h+$j],$nhi
591
592 fmuld $alo,$ba,$aloa
593 fmuld $nlo,$na,$nloa
594 fmuld $alo,$bb,$alob
595 fmuld $nlo,$nb,$nlob
596 fmuld $alo,$bc,$aloc
597 ldx [%sp+$bias+$frame+0],%o0
598 faddd $aloa,$nloa,$nloa
599 fmuld $nlo,$nc,$nloc
600 ldx [%sp+$bias+$frame+8],%o1
601 fmuld $alo,$bd,$alod
602 ldx [%sp+$bias+$frame+16],%o2
603 faddd $alob,$nlob,$nlob
604 fmuld $nlo,$nd,$nlod
605 ldx [%sp+$bias+$frame+24],%o3
606 fmuld $ahi,$ba,$ahia
607
608 srlx %o0,16,%o7
609 faddd $aloc,$nloc,$nloc
610 fmuld $nhi,$na,$nhia
611 add %o7,%o1,%o1
612 fmuld $ahi,$bb,$ahib
613 srlx %o1,16,%o7
614 faddd $alod,$nlod,$nlod
615 fmuld $nhi,$nb,$nhib
616 add %o7,%o2,%o2
617 fmuld $ahi,$bc,$ahic
618 srlx %o2,16,%o7
619 faddd $ahia,$nhia,$nhia
620 fmuld $nhi,$nc,$nhic
621 add %o7,%o3,%o3 ! %o3.%o2[0..15].%o1[0..15].%o0[0..15]
622 ! why?
623 and %o0,$mask,%o0
624 fmuld $ahi,$bd,$ahid
625 and %o1,$mask,%o1
626 and %o2,$mask,%o2
627 faddd $ahib,$nhib,$nhib
628 fmuld $nhi,$nd,$nhid
629 sllx %o1,16,%o1
630 faddd $dota,$nloa,$nloa
631 sllx %o2,32,%o2
632 faddd $dotb,$nlob,$nlob
633 sllx %o3,48,%o7
634 or %o1,%o0,%o0
635 faddd $ahic,$nhic,$dota ! $nhic
636 or %o2,%o0,%o0
637 faddd $ahid,$nhid,$dotb ! $nhid
638 or %o7,%o0,%o0 ! 64-bit result
639 ldx [$tp],%o7
640 faddd $nloc,$nhia,$nloc
641 addcc %o7,%o0,%o0
642 ! end-of-why?
643 faddd $nlod,$nhib,$nlod
644 srlx %o3,16,%g1 ! 34-bit carry
645 fdtox $nloa,$nloa
646 bcs,a %xcc,.+8
647 add %g1,1,%g1
648
649 fdtox $nlob,$nlob
650 fdtox $nloc,$nloc
651 fdtox $nlod,$nlod
652
653 std $nloa,[%sp+$bias+$frame+0]
654 std $nlob,[%sp+$bias+$frame+8]
655 addcc $j,8,$j
656 std $nloc,[%sp+$bias+$frame+16]
657 bz,pn %icc,.Linnerskip
658 std $nlod,[%sp+$bias+$frame+24]
659
660 ba .Linner
661 nop
662.align 32
663.Linner:
664 ldd [$ap_l+$j],$alo ! load a[j] in double format
665 ldd [$ap_h+$j],$ahi
666 ldd [$np_l+$j],$nlo ! load n[j] in double format
667 ldd [$np_h+$j],$nhi
668
669 fmuld $alo,$ba,$aloa
670 fmuld $nlo,$na,$nloa
671 fmuld $alo,$bb,$alob
672 fmuld $nlo,$nb,$nlob
673 fmuld $alo,$bc,$aloc
674 ldx [%sp+$bias+$frame+0],%o0
675 faddd $aloa,$nloa,$nloa
676 fmuld $nlo,$nc,$nloc
677 ldx [%sp+$bias+$frame+8],%o1
678 fmuld $alo,$bd,$alod
679 ldx [%sp+$bias+$frame+16],%o2
680 faddd $alob,$nlob,$nlob
681 fmuld $nlo,$nd,$nlod
682 ldx [%sp+$bias+$frame+24],%o3
683 fmuld $ahi,$ba,$ahia
684
685 srlx %o0,16,%o7
686 faddd $aloc,$nloc,$nloc
687 fmuld $nhi,$na,$nhia
688 add %o7,%o1,%o1
689 fmuld $ahi,$bb,$ahib
690 srlx %o1,16,%o7
691 faddd $alod,$nlod,$nlod
692 fmuld $nhi,$nb,$nhib
693 add %o7,%o2,%o2
694 fmuld $ahi,$bc,$ahic
695 srlx %o2,16,%o7
696 faddd $ahia,$nhia,$nhia
697 fmuld $nhi,$nc,$nhic
698 add %o7,%o3,%o3 ! %o3.%o2[0..15].%o1[0..15].%o0[0..15]
699 and %o0,$mask,%o0
700 fmuld $ahi,$bd,$ahid
701 and %o1,$mask,%o1
702 and %o2,$mask,%o2
703 faddd $ahib,$nhib,$nhib
704 fmuld $nhi,$nd,$nhid
705 sllx %o1,16,%o1
706 faddd $dota,$nloa,$nloa
707 sllx %o2,32,%o2
708 faddd $dotb,$nlob,$nlob
709 sllx %o3,48,%o7
710 or %o1,%o0,%o0
711 faddd $ahic,$nhic,$dota ! $nhic
712 or %o2,%o0,%o0
713 faddd $ahid,$nhid,$dotb ! $nhid
714 or %o7,%o0,%o0 ! 64-bit result
715 faddd $nloc,$nhia,$nloc
716 addcc %g1,%o0,%o0
717 ldx [$tp+8],%o7 ! tp[j]
718 faddd $nlod,$nhib,$nlod
719 srlx %o3,16,%g1 ! 34-bit carry
720 fdtox $nloa,$nloa
721 bcs,a %xcc,.+8
722 add %g1,1,%g1
723 fdtox $nlob,$nlob
724 addcc %o7,%o0,%o0
725 fdtox $nloc,$nloc
726 bcs,a %xcc,.+8
727 add %g1,1,%g1
728
729 stx %o0,[$tp] ! tp[j-1]
730 fdtox $nlod,$nlod
731
732 std $nloa,[%sp+$bias+$frame+0]
733 std $nlob,[%sp+$bias+$frame+8]
734 std $nloc,[%sp+$bias+$frame+16]
735 addcc $j,8,$j
736 std $nlod,[%sp+$bias+$frame+24]
737 bnz,pt %icc,.Linner
738 add $tp,8,$tp
739
740.Linnerskip:
741 fdtox $dota,$dota
742 fdtox $dotb,$dotb
743
744 ldx [%sp+$bias+$frame+0],%o0
745 ldx [%sp+$bias+$frame+8],%o1
746 ldx [%sp+$bias+$frame+16],%o2
747 ldx [%sp+$bias+$frame+24],%o3
748
749 srlx %o0,16,%o7
750 std $dota,[%sp+$bias+$frame+32]
751 add %o7,%o1,%o1
752 std $dotb,[%sp+$bias+$frame+40]
753 srlx %o1,16,%o7
754 add %o7,%o2,%o2
755 srlx %o2,16,%o7
756 add %o7,%o3,%o3 ! %o3.%o2[0..15].%o1[0..15].%o0[0..15]
757 and %o0,$mask,%o0
758 and %o1,$mask,%o1
759 and %o2,$mask,%o2
760 sllx %o1,16,%o1
761 sllx %o2,32,%o2
762 sllx %o3,48,%o7
763 or %o1,%o0,%o0
764 or %o2,%o0,%o0
765 ldx [%sp+$bias+$frame+32],%o4
766 or %o7,%o0,%o0 ! 64-bit result
767 ldx [%sp+$bias+$frame+40],%o5
768 addcc %g1,%o0,%o0
769 ldx [$tp+8],%o7 ! tp[j]
770 srlx %o3,16,%g1 ! 34-bit carry
771 bcs,a %xcc,.+8
772 add %g1,1,%g1
773
774 addcc %o7,%o0,%o0
775 bcs,a %xcc,.+8
776 add %g1,1,%g1
777
778 stx %o0,[$tp] ! tp[j-1]
779 add $tp,8,$tp
780
781 srlx %o4,16,%o7
782 add %o7,%o5,%o5
783 and %o4,$mask,%o4
784 sllx %o5,16,%o7
785 or %o7,%o4,%o4
786 addcc %g1,%o4,%o4
787 srlx %o5,48,%g1
788 bcs,a %xcc,.+8
789 add %g1,1,%g1
790
791 addcc $carry,%o4,%o4
792 stx %o4,[$tp] ! tp[num-1]
793 mov %g1,$carry
794 bcs,a %xcc,.+8
795 add $carry,1,$carry
796
797 addcc $i,8,$i
798 bnz %icc,.Louter
799 nop
800
801 add $tp,8,$tp ! adjust tp to point at the end
802 orn %g0,%g0,%g4
803 sub %g0,$num,%o7 ! n=-num
804 ba .Lsub
805 subcc %g0,%g0,%g0 ! clear %icc.c
806
807.align 32
808.Lsub:
809 ldx [$tp+%o7],%o0
810 add $np,%o7,%g1
811 ld [%g1+0],%o2
812 ld [%g1+4],%o3
813 srlx %o0,32,%o1
814 subccc %o0,%o2,%o2
815 add $rp,%o7,%g1
816 subccc %o1,%o3,%o3
817 st %o2,[%g1+0]
818 add %o7,8,%o7
819 brnz,pt %o7,.Lsub
820 st %o3,[%g1+4]
821 subc $carry,0,%g4
822 sub %g0,$num,%o7 ! n=-num
823 ba .Lcopy
824 nop
825
826.align 32
827.Lcopy:
828 ldx [$tp+%o7],%o0
829 add $rp,%o7,%g1
830 ld [%g1+0],%o2
831 ld [%g1+4],%o3
832 stx %g0,[$tp+%o7]
833 and %o0,%g4,%o0
834 srlx %o0,32,%o1
835 andn %o2,%g4,%o2
836 andn %o3,%g4,%o3
837 or %o2,%o0,%o0
838 or %o3,%o1,%o1
839 st %o0,[%g1+0]
840 add %o7,8,%o7
841 brnz,pt %o7,.Lcopy
842 st %o1,[%g1+4]
843 sub %g0,$num,%o7 ! n=-num
844
845.Lzap:
846 stx %g0,[$ap_l+%o7]
847 stx %g0,[$ap_h+%o7]
848 stx %g0,[$np_l+%o7]
849 stx %g0,[$np_h+%o7]
850 add %o7,8,%o7
851 brnz,pt %o7,.Lzap
852 nop
853
854 ldx [%sp+$bias+$frame+48],%o7
855 wr %g0,%o7,%asi ! restore %asi
856
857 mov 1,%i0
858.Lret:
859 ret
860 restore
861.type $fname,#function
862.size $fname,(.-$fname)
863.asciz "Montgomery Multipltication for UltraSPARC, CRYPTOGAMS by <appro\@openssl.org>"
864.align 32
865___
866
867$code =~ s/\`([^\`]*)\`/eval($1)/gem;
868
869# Below substitution makes it possible to compile without demanding
870# VIS extentions on command line, e.g. -xarch=v9 vs. -xarch=v9a. I
871# dare to do this, because VIS capability is detected at run-time now
872# and this routine is not called on CPU not capable to execute it. Do
873# note that fzeros is not the only VIS dependency! Another dependency
874# is implicit and is just _a_ numerical value loaded to %asi register,
875# which assembler can't recognize as VIS specific...
876$code =~ s/fzeros\s+%f([0-9]+)/
877 sprintf(".word\t0x%x\t! fzeros %%f%d",0x81b00c20|($1<<25),$1)
878 /gem;
879
880print $code;
881# flush
882close STDOUT;
diff --git a/src/lib/libcrypto/bn/asm/via-mont.pl b/src/lib/libcrypto/bn/asm/via-mont.pl
deleted file mode 100644
index c046a514c8..0000000000
--- a/src/lib/libcrypto/bn/asm/via-mont.pl
+++ /dev/null
@@ -1,242 +0,0 @@
1#!/usr/bin/env perl
2#
3# ====================================================================
4# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
5# project. The module is, however, dual licensed under OpenSSL and
6# CRYPTOGAMS licenses depending on where you obtain it. For further
7# details see http://www.openssl.org/~appro/cryptogams/.
8# ====================================================================
9#
10# Wrapper around 'rep montmul', VIA-specific instruction accessing
11# PadLock Montgomery Multiplier. The wrapper is designed as drop-in
12# replacement for OpenSSL bn_mul_mont [first implemented in 0.9.9].
13#
14# Below are interleaved outputs from 'openssl speed rsa dsa' for 4
15# different software configurations on 1.5GHz VIA Esther processor.
16# Lines marked with "software integer" denote performance of hand-
17# coded integer-only assembler found in OpenSSL 0.9.7. "Software SSE2"
18# refers to hand-coded SSE2 Montgomery multiplication procedure found
19# OpenSSL 0.9.9. "Hardware VIA SDK" refers to padlock_pmm routine from
20# Padlock SDK 2.0.1 available for download from VIA, which naturally
21# utilizes the magic 'repz montmul' instruction. And finally "hardware
22# this" refers to *this* implementation which also uses 'repz montmul'
23#
24# sign verify sign/s verify/s
25# rsa 512 bits 0.001720s 0.000140s 581.4 7149.7 software integer
26# rsa 512 bits 0.000690s 0.000086s 1450.3 11606.0 software SSE2
27# rsa 512 bits 0.006136s 0.000201s 163.0 4974.5 hardware VIA SDK
28# rsa 512 bits 0.000712s 0.000050s 1404.9 19858.5 hardware this
29#
30# rsa 1024 bits 0.008518s 0.000413s 117.4 2420.8 software integer
31# rsa 1024 bits 0.004275s 0.000277s 233.9 3609.7 software SSE2
32# rsa 1024 bits 0.012136s 0.000260s 82.4 3844.5 hardware VIA SDK
33# rsa 1024 bits 0.002522s 0.000116s 396.5 8650.9 hardware this
34#
35# rsa 2048 bits 0.050101s 0.001371s 20.0 729.6 software integer
36# rsa 2048 bits 0.030273s 0.001008s 33.0 991.9 software SSE2
37# rsa 2048 bits 0.030833s 0.000976s 32.4 1025.1 hardware VIA SDK
38# rsa 2048 bits 0.011879s 0.000342s 84.2 2921.7 hardware this
39#
40# rsa 4096 bits 0.327097s 0.004859s 3.1 205.8 software integer
41# rsa 4096 bits 0.229318s 0.003859s 4.4 259.2 software SSE2
42# rsa 4096 bits 0.233953s 0.003274s 4.3 305.4 hardware VIA SDK
43# rsa 4096 bits 0.070493s 0.001166s 14.2 857.6 hardware this
44#
45# dsa 512 bits 0.001342s 0.001651s 745.2 605.7 software integer
46# dsa 512 bits 0.000844s 0.000987s 1185.3 1013.1 software SSE2
47# dsa 512 bits 0.001902s 0.002247s 525.6 444.9 hardware VIA SDK
48# dsa 512 bits 0.000458s 0.000524s 2182.2 1909.1 hardware this
49#
50# dsa 1024 bits 0.003964s 0.004926s 252.3 203.0 software integer
51# dsa 1024 bits 0.002686s 0.003166s 372.3 315.8 software SSE2
52# dsa 1024 bits 0.002397s 0.002823s 417.1 354.3 hardware VIA SDK
53# dsa 1024 bits 0.000978s 0.001170s 1022.2 855.0 hardware this
54#
55# dsa 2048 bits 0.013280s 0.016518s 75.3 60.5 software integer
56# dsa 2048 bits 0.009911s 0.011522s 100.9 86.8 software SSE2
57# dsa 2048 bits 0.009542s 0.011763s 104.8 85.0 hardware VIA SDK
58# dsa 2048 bits 0.002884s 0.003352s 346.8 298.3 hardware this
59#
60# To give you some other reference point here is output for 2.4GHz P4
61# running hand-coded SSE2 bn_mul_mont found in 0.9.9, i.e. "software
62# SSE2" in above terms.
63#
64# rsa 512 bits 0.000407s 0.000047s 2454.2 21137.0
65# rsa 1024 bits 0.002426s 0.000141s 412.1 7100.0
66# rsa 2048 bits 0.015046s 0.000491s 66.5 2034.9
67# rsa 4096 bits 0.109770s 0.002379s 9.1 420.3
68# dsa 512 bits 0.000438s 0.000525s 2281.1 1904.1
69# dsa 1024 bits 0.001346s 0.001595s 742.7 627.0
70# dsa 2048 bits 0.004745s 0.005582s 210.7 179.1
71#
72# Conclusions:
73# - VIA SDK leaves a *lot* of room for improvement (which this
74# implementation successfully fills:-);
75# - 'rep montmul' gives up to >3x performance improvement depending on
76# key length;
77# - in terms of absolute performance it delivers approximately as much
78# as modern out-of-order 32-bit cores [again, for longer keys].
79
80$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
81push(@INC,"${dir}","${dir}../../perlasm");
82require "x86asm.pl";
83
84&asm_init($ARGV[0],"via-mont.pl");
85
86# int bn_mul_mont(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp, const BN_ULONG *np,const BN_ULONG *n0, int num);
87$func="bn_mul_mont_padlock";
88
89$pad=16*1; # amount of reserved bytes on top of every vector
90
91# stack layout
92$mZeroPrime=&DWP(0,"esp"); # these are specified by VIA
93$A=&DWP(4,"esp");
94$B=&DWP(8,"esp");
95$T=&DWP(12,"esp");
96$M=&DWP(16,"esp");
97$scratch=&DWP(20,"esp");
98$rp=&DWP(24,"esp"); # these are mine
99$sp=&DWP(28,"esp");
100# &DWP(32,"esp") # 32 byte scratch area
101# &DWP(64+(4*$num+$pad)*0,"esp") # padded tp[num]
102# &DWP(64+(4*$num+$pad)*1,"esp") # padded copy of ap[num]
103# &DWP(64+(4*$num+$pad)*2,"esp") # padded copy of bp[num]
104# &DWP(64+(4*$num+$pad)*3,"esp") # padded copy of np[num]
105# Note that SDK suggests to unconditionally allocate 2K per vector. This
106# has quite an impact on performance. It naturally depends on key length,
107# but to give an example 1024 bit private RSA key operations suffer >30%
108# penalty. I allocate only as much as actually required...
109
110&function_begin($func);
111 &xor ("eax","eax");
112 &mov ("ecx",&wparam(5)); # num
113 # meet VIA's limitations for num [note that the specification
114 # expresses them in bits, while we work with amount of 32-bit words]
115 &test ("ecx",3);
116 &jnz (&label("leave")); # num % 4 != 0
117 &cmp ("ecx",8);
118 &jb (&label("leave")); # num < 8
119 &cmp ("ecx",1024);
120 &ja (&label("leave")); # num > 1024
121
122 &pushf ();
123 &cld ();
124
125 &mov ("edi",&wparam(0)); # rp
126 &mov ("eax",&wparam(1)); # ap
127 &mov ("ebx",&wparam(2)); # bp
128 &mov ("edx",&wparam(3)); # np
129 &mov ("esi",&wparam(4)); # n0
130 &mov ("esi",&DWP(0,"esi")); # *n0
131
132 &lea ("ecx",&DWP($pad,"","ecx",4)); # ecx becomes vector size in bytes
133 &lea ("ebp",&DWP(64,"","ecx",4)); # allocate 4 vectors + 64 bytes
134 &neg ("ebp");
135 &add ("ebp","esp");
136 &and ("ebp",-64); # align to cache-line
137 &xchg ("ebp","esp"); # alloca
138
139 &mov ($rp,"edi"); # save rp
140 &mov ($sp,"ebp"); # save esp
141
142 &mov ($mZeroPrime,"esi");
143 &lea ("esi",&DWP(64,"esp")); # tp
144 &mov ($T,"esi");
145 &lea ("edi",&DWP(32,"esp")); # scratch area
146 &mov ($scratch,"edi");
147 &mov ("esi","eax");
148
149 &lea ("ebp",&DWP(-$pad,"ecx"));
150 &shr ("ebp",2); # restore original num value in ebp
151
152 &xor ("eax","eax");
153
154 &mov ("ecx","ebp");
155 &lea ("ecx",&DWP((32+$pad)/4,"ecx"));# padded tp + scratch
156 &data_byte(0xf3,0xab); # rep stosl, bzero
157
158 &mov ("ecx","ebp");
159 &lea ("edi",&DWP(64+$pad,"esp","ecx",4));# pointer to ap copy
160 &mov ($A,"edi");
161 &data_byte(0xf3,0xa5); # rep movsl, memcpy
162 &mov ("ecx",$pad/4);
163 &data_byte(0xf3,0xab); # rep stosl, bzero pad
164 # edi points at the end of padded ap copy...
165
166 &mov ("ecx","ebp");
167 &mov ("esi","ebx");
168 &mov ($B,"edi");
169 &data_byte(0xf3,0xa5); # rep movsl, memcpy
170 &mov ("ecx",$pad/4);
171 &data_byte(0xf3,0xab); # rep stosl, bzero pad
172 # edi points at the end of padded bp copy...
173
174 &mov ("ecx","ebp");
175 &mov ("esi","edx");
176 &mov ($M,"edi");
177 &data_byte(0xf3,0xa5); # rep movsl, memcpy
178 &mov ("ecx",$pad/4);
179 &data_byte(0xf3,0xab); # rep stosl, bzero pad
180 # edi points at the end of padded np copy...
181
182 # let magic happen...
183 &mov ("ecx","ebp");
184 &mov ("esi","esp");
185 &shl ("ecx",5); # convert word counter to bit counter
186 &align (4);
187 &data_byte(0xf3,0x0f,0xa6,0xc0);# rep montmul
188
189 &mov ("ecx","ebp");
190 &lea ("esi",&DWP(64,"esp")); # tp
191 # edi still points at the end of padded np copy...
192 &neg ("ebp");
193 &lea ("ebp",&DWP(-$pad,"edi","ebp",4)); # so just "rewind"
194 &mov ("edi",$rp); # restore rp
195 &xor ("edx","edx"); # i=0 and clear CF
196
197&set_label("sub",8);
198 &mov ("eax",&DWP(0,"esi","edx",4));
199 &sbb ("eax",&DWP(0,"ebp","edx",4));
200 &mov (&DWP(0,"edi","edx",4),"eax"); # rp[i]=tp[i]-np[i]
201 &lea ("edx",&DWP(1,"edx")); # i++
202 &loop (&label("sub")); # doesn't affect CF!
203
204 &mov ("eax",&DWP(0,"esi","edx",4)); # upmost overflow bit
205 &sbb ("eax",0);
206 &and ("esi","eax");
207 &not ("eax");
208 &mov ("ebp","edi");
209 &and ("ebp","eax");
210 &or ("esi","ebp"); # tp=carry?tp:rp
211
212 &mov ("ecx","edx"); # num
213 &xor ("edx","edx"); # i=0
214
215&set_label("copy",8);
216 &mov ("eax",&DWP(0,"esi","edx",4));
217 &mov (&DWP(64,"esp","edx",4),"ecx"); # zap tp
218 &mov (&DWP(0,"edi","edx",4),"eax");
219 &lea ("edx",&DWP(1,"edx")); # i++
220 &loop (&label("copy"));
221
222 &mov ("ebp",$sp);
223 &xor ("eax","eax");
224
225 &mov ("ecx",64/4);
226 &mov ("edi","esp"); # zap frame including scratch area
227 &data_byte(0xf3,0xab); # rep stosl, bzero
228
229 # zap copies of ap, bp and np
230 &lea ("edi",&DWP(64+$pad,"esp","edx",4));# pointer to ap
231 &lea ("ecx",&DWP(3*$pad/4,"edx","edx",2));
232 &data_byte(0xf3,0xab); # rep stosl, bzero
233
234 &mov ("esp","ebp");
235 &inc ("eax"); # signal "done"
236 &popf ();
237&set_label("leave");
238&function_end($func);
239
240&asciz("Padlock Montgomery Multiplication, CRYPTOGAMS by <appro\@openssl.org>");
241
242&asm_finish();
diff --git a/src/lib/libcrypto/bn/asm/x86-mont.pl b/src/lib/libcrypto/bn/asm/x86-mont.pl
deleted file mode 100755
index 5cd3cd2ed5..0000000000
--- a/src/lib/libcrypto/bn/asm/x86-mont.pl
+++ /dev/null
@@ -1,591 +0,0 @@
1#!/usr/bin/env perl
2
3# ====================================================================
4# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
5# project. The module is, however, dual licensed under OpenSSL and
6# CRYPTOGAMS licenses depending on where you obtain it. For further
7# details see http://www.openssl.org/~appro/cryptogams/.
8# ====================================================================
9
10# October 2005
11#
12# This is a "teaser" code, as it can be improved in several ways...
13# First of all non-SSE2 path should be implemented (yes, for now it
14# performs Montgomery multiplication/convolution only on SSE2-capable
15# CPUs such as P4, others fall down to original code). Then inner loop
16# can be unrolled and modulo-scheduled to improve ILP and possibly
17# moved to 128-bit XMM register bank (though it would require input
18# rearrangement and/or increase bus bandwidth utilization). Dedicated
19# squaring procedure should give further performance improvement...
20# Yet, for being draft, the code improves rsa512 *sign* benchmark by
21# 110%(!), rsa1024 one - by 70% and rsa4096 - by 20%:-)
22
23# December 2006
24#
25# Modulo-scheduling SSE2 loops results in further 15-20% improvement.
26# Integer-only code [being equipped with dedicated squaring procedure]
27# gives ~40% on rsa512 sign benchmark...
28
29$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
30push(@INC,"${dir}","${dir}../../perlasm");
31require "x86asm.pl";
32
33&asm_init($ARGV[0],$0);
34
35$sse2=0;
36for (@ARGV) { $sse2=1 if (/-DOPENSSL_IA32_SSE2/); }
37
38&external_label("OPENSSL_ia32cap_P") if ($sse2);
39
40&function_begin("bn_mul_mont");
41
42$i="edx";
43$j="ecx";
44$ap="esi"; $tp="esi"; # overlapping variables!!!
45$rp="edi"; $bp="edi"; # overlapping variables!!!
46$np="ebp";
47$num="ebx";
48
49$_num=&DWP(4*0,"esp"); # stack top layout
50$_rp=&DWP(4*1,"esp");
51$_ap=&DWP(4*2,"esp");
52$_bp=&DWP(4*3,"esp");
53$_np=&DWP(4*4,"esp");
54$_n0=&DWP(4*5,"esp"); $_n0q=&QWP(4*5,"esp");
55$_sp=&DWP(4*6,"esp");
56$_bpend=&DWP(4*7,"esp");
57$frame=32; # size of above frame rounded up to 16n
58
59 &xor ("eax","eax");
60 &mov ("edi",&wparam(5)); # int num
61 &cmp ("edi",4);
62 &jl (&label("just_leave"));
63
64 &lea ("esi",&wparam(0)); # put aside pointer to argument block
65 &lea ("edx",&wparam(1)); # load ap
66 &mov ("ebp","esp"); # saved stack pointer!
67 &add ("edi",2); # extra two words on top of tp
68 &neg ("edi");
69 &lea ("esp",&DWP(-$frame,"esp","edi",4)); # alloca($frame+4*(num+2))
70 &neg ("edi");
71
72 # minimize cache contention by arraning 2K window between stack
73 # pointer and ap argument [np is also position sensitive vector,
74 # but it's assumed to be near ap, as it's allocated at ~same
75 # time].
76 &mov ("eax","esp");
77 &sub ("eax","edx");
78 &and ("eax",2047);
79 &sub ("esp","eax"); # this aligns sp and ap modulo 2048
80
81 &xor ("edx","esp");
82 &and ("edx",2048);
83 &xor ("edx",2048);
84 &sub ("esp","edx"); # this splits them apart modulo 4096
85
86 &and ("esp",-64); # align to cache line
87
88 ################################# load argument block...
89 &mov ("eax",&DWP(0*4,"esi"));# BN_ULONG *rp
90 &mov ("ebx",&DWP(1*4,"esi"));# const BN_ULONG *ap
91 &mov ("ecx",&DWP(2*4,"esi"));# const BN_ULONG *bp
92 &mov ("edx",&DWP(3*4,"esi"));# const BN_ULONG *np
93 &mov ("esi",&DWP(4*4,"esi"));# const BN_ULONG *n0
94 #&mov ("edi",&DWP(5*4,"esi"));# int num
95
96 &mov ("esi",&DWP(0,"esi")); # pull n0[0]
97 &mov ($_rp,"eax"); # ... save a copy of argument block
98 &mov ($_ap,"ebx");
99 &mov ($_bp,"ecx");
100 &mov ($_np,"edx");
101 &mov ($_n0,"esi");
102 &lea ($num,&DWP(-3,"edi")); # num=num-1 to assist modulo-scheduling
103 #&mov ($_num,$num); # redundant as $num is not reused
104 &mov ($_sp,"ebp"); # saved stack pointer!
105
106if($sse2) {
107$acc0="mm0"; # mmx register bank layout
108$acc1="mm1";
109$car0="mm2";
110$car1="mm3";
111$mul0="mm4";
112$mul1="mm5";
113$temp="mm6";
114$mask="mm7";
115
116 &picmeup("eax","OPENSSL_ia32cap_P");
117 &bt (&DWP(0,"eax"),26);
118 &jnc (&label("non_sse2"));
119
120 &mov ("eax",-1);
121 &movd ($mask,"eax"); # mask 32 lower bits
122
123 &mov ($ap,$_ap); # load input pointers
124 &mov ($bp,$_bp);
125 &mov ($np,$_np);
126
127 &xor ($i,$i); # i=0
128 &xor ($j,$j); # j=0
129
130 &movd ($mul0,&DWP(0,$bp)); # bp[0]
131 &movd ($mul1,&DWP(0,$ap)); # ap[0]
132 &movd ($car1,&DWP(0,$np)); # np[0]
133
134 &pmuludq($mul1,$mul0); # ap[0]*bp[0]
135 &movq ($car0,$mul1);
136 &movq ($acc0,$mul1); # I wish movd worked for
137 &pand ($acc0,$mask); # inter-register transfers
138
139 &pmuludq($mul1,$_n0q); # *=n0
140
141 &pmuludq($car1,$mul1); # "t[0]"*np[0]*n0
142 &paddq ($car1,$acc0);
143
144 &movd ($acc1,&DWP(4,$np)); # np[1]
145 &movd ($acc0,&DWP(4,$ap)); # ap[1]
146
147 &psrlq ($car0,32);
148 &psrlq ($car1,32);
149
150 &inc ($j); # j++
151&set_label("1st",16);
152 &pmuludq($acc0,$mul0); # ap[j]*bp[0]
153 &pmuludq($acc1,$mul1); # np[j]*m1
154 &paddq ($car0,$acc0); # +=c0
155 &paddq ($car1,$acc1); # +=c1
156
157 &movq ($acc0,$car0);
158 &pand ($acc0,$mask);
159 &movd ($acc1,&DWP(4,$np,$j,4)); # np[j+1]
160 &paddq ($car1,$acc0); # +=ap[j]*bp[0];
161 &movd ($acc0,&DWP(4,$ap,$j,4)); # ap[j+1]
162 &psrlq ($car0,32);
163 &movd (&DWP($frame-4,"esp",$j,4),$car1); # tp[j-1]=
164 &psrlq ($car1,32);
165
166 &lea ($j,&DWP(1,$j));
167 &cmp ($j,$num);
168 &jl (&label("1st"));
169
170 &pmuludq($acc0,$mul0); # ap[num-1]*bp[0]
171 &pmuludq($acc1,$mul1); # np[num-1]*m1
172 &paddq ($car0,$acc0); # +=c0
173 &paddq ($car1,$acc1); # +=c1
174
175 &movq ($acc0,$car0);
176 &pand ($acc0,$mask);
177 &paddq ($car1,$acc0); # +=ap[num-1]*bp[0];
178 &movd (&DWP($frame-4,"esp",$j,4),$car1); # tp[num-2]=
179
180 &psrlq ($car0,32);
181 &psrlq ($car1,32);
182
183 &paddq ($car1,$car0);
184 &movq (&QWP($frame,"esp",$num,4),$car1); # tp[num].tp[num-1]
185
186 &inc ($i); # i++
187&set_label("outer");
188 &xor ($j,$j); # j=0
189
190 &movd ($mul0,&DWP(0,$bp,$i,4)); # bp[i]
191 &movd ($mul1,&DWP(0,$ap)); # ap[0]
192 &movd ($temp,&DWP($frame,"esp")); # tp[0]
193 &movd ($car1,&DWP(0,$np)); # np[0]
194 &pmuludq($mul1,$mul0); # ap[0]*bp[i]
195
196 &paddq ($mul1,$temp); # +=tp[0]
197 &movq ($acc0,$mul1);
198 &movq ($car0,$mul1);
199 &pand ($acc0,$mask);
200
201 &pmuludq($mul1,$_n0q); # *=n0
202
203 &pmuludq($car1,$mul1);
204 &paddq ($car1,$acc0);
205
206 &movd ($temp,&DWP($frame+4,"esp")); # tp[1]
207 &movd ($acc1,&DWP(4,$np)); # np[1]
208 &movd ($acc0,&DWP(4,$ap)); # ap[1]
209
210 &psrlq ($car0,32);
211 &psrlq ($car1,32);
212 &paddq ($car0,$temp); # +=tp[1]
213
214 &inc ($j); # j++
215 &dec ($num);
216&set_label("inner");
217 &pmuludq($acc0,$mul0); # ap[j]*bp[i]
218 &pmuludq($acc1,$mul1); # np[j]*m1
219 &paddq ($car0,$acc0); # +=c0
220 &paddq ($car1,$acc1); # +=c1
221
222 &movq ($acc0,$car0);
223 &movd ($temp,&DWP($frame+4,"esp",$j,4));# tp[j+1]
224 &pand ($acc0,$mask);
225 &movd ($acc1,&DWP(4,$np,$j,4)); # np[j+1]
226 &paddq ($car1,$acc0); # +=ap[j]*bp[i]+tp[j]
227 &movd ($acc0,&DWP(4,$ap,$j,4)); # ap[j+1]
228 &psrlq ($car0,32);
229 &movd (&DWP($frame-4,"esp",$j,4),$car1);# tp[j-1]=
230 &psrlq ($car1,32);
231 &paddq ($car0,$temp); # +=tp[j+1]
232
233 &dec ($num);
234 &lea ($j,&DWP(1,$j)); # j++
235 &jnz (&label("inner"));
236
237 &mov ($num,$j);
238 &pmuludq($acc0,$mul0); # ap[num-1]*bp[i]
239 &pmuludq($acc1,$mul1); # np[num-1]*m1
240 &paddq ($car0,$acc0); # +=c0
241 &paddq ($car1,$acc1); # +=c1
242
243 &movq ($acc0,$car0);
244 &pand ($acc0,$mask);
245 &paddq ($car1,$acc0); # +=ap[num-1]*bp[i]+tp[num-1]
246 &movd (&DWP($frame-4,"esp",$j,4),$car1); # tp[num-2]=
247 &psrlq ($car0,32);
248 &psrlq ($car1,32);
249
250 &movd ($temp,&DWP($frame+4,"esp",$num,4)); # += tp[num]
251 &paddq ($car1,$car0);
252 &paddq ($car1,$temp);
253 &movq (&QWP($frame,"esp",$num,4),$car1); # tp[num].tp[num-1]
254
255 &lea ($i,&DWP(1,$i)); # i++
256 &cmp ($i,$num);
257 &jle (&label("outer"));
258
259 &emms (); # done with mmx bank
260 &jmp (&label("common_tail"));
261
262&set_label("non_sse2",16);
263}
264
265if (0) {
266 &mov ("esp",$_sp);
267 &xor ("eax","eax"); # signal "not fast enough [yet]"
268 &jmp (&label("just_leave"));
269 # While the below code provides competitive performance for
270 # all key lengthes on modern Intel cores, it's still more
271 # than 10% slower for 4096-bit key elsewhere:-( "Competitive"
272 # means compared to the original integer-only assembler.
273 # 512-bit RSA sign is better by ~40%, but that's about all
274 # one can say about all CPUs...
275} else {
276$inp="esi"; # integer path uses these registers differently
277$word="edi";
278$carry="ebp";
279
280 &mov ($inp,$_ap);
281 &lea ($carry,&DWP(1,$num));
282 &mov ($word,$_bp);
283 &xor ($j,$j); # j=0
284 &mov ("edx",$inp);
285 &and ($carry,1); # see if num is even
286 &sub ("edx",$word); # see if ap==bp
287 &lea ("eax",&DWP(4,$word,$num,4)); # &bp[num]
288 &or ($carry,"edx");
289 &mov ($word,&DWP(0,$word)); # bp[0]
290 &jz (&label("bn_sqr_mont"));
291 &mov ($_bpend,"eax");
292 &mov ("eax",&DWP(0,$inp));
293 &xor ("edx","edx");
294
295&set_label("mull",16);
296 &mov ($carry,"edx");
297 &mul ($word); # ap[j]*bp[0]
298 &add ($carry,"eax");
299 &lea ($j,&DWP(1,$j));
300 &adc ("edx",0);
301 &mov ("eax",&DWP(0,$inp,$j,4)); # ap[j+1]
302 &cmp ($j,$num);
303 &mov (&DWP($frame-4,"esp",$j,4),$carry); # tp[j]=
304 &jl (&label("mull"));
305
306 &mov ($carry,"edx");
307 &mul ($word); # ap[num-1]*bp[0]
308 &mov ($word,$_n0);
309 &add ("eax",$carry);
310 &mov ($inp,$_np);
311 &adc ("edx",0);
312 &imul ($word,&DWP($frame,"esp")); # n0*tp[0]
313
314 &mov (&DWP($frame,"esp",$num,4),"eax"); # tp[num-1]=
315 &xor ($j,$j);
316 &mov (&DWP($frame+4,"esp",$num,4),"edx"); # tp[num]=
317 &mov (&DWP($frame+8,"esp",$num,4),$j); # tp[num+1]=
318
319 &mov ("eax",&DWP(0,$inp)); # np[0]
320 &mul ($word); # np[0]*m
321 &add ("eax",&DWP($frame,"esp")); # +=tp[0]
322 &mov ("eax",&DWP(4,$inp)); # np[1]
323 &adc ("edx",0);
324 &inc ($j);
325
326 &jmp (&label("2ndmadd"));
327
328&set_label("1stmadd",16);
329 &mov ($carry,"edx");
330 &mul ($word); # ap[j]*bp[i]
331 &add ($carry,&DWP($frame,"esp",$j,4)); # +=tp[j]
332 &lea ($j,&DWP(1,$j));
333 &adc ("edx",0);
334 &add ($carry,"eax");
335 &mov ("eax",&DWP(0,$inp,$j,4)); # ap[j+1]
336 &adc ("edx",0);
337 &cmp ($j,$num);
338 &mov (&DWP($frame-4,"esp",$j,4),$carry); # tp[j]=
339 &jl (&label("1stmadd"));
340
341 &mov ($carry,"edx");
342 &mul ($word); # ap[num-1]*bp[i]
343 &add ("eax",&DWP($frame,"esp",$num,4)); # +=tp[num-1]
344 &mov ($word,$_n0);
345 &adc ("edx",0);
346 &mov ($inp,$_np);
347 &add ($carry,"eax");
348 &adc ("edx",0);
349 &imul ($word,&DWP($frame,"esp")); # n0*tp[0]
350
351 &xor ($j,$j);
352 &add ("edx",&DWP($frame+4,"esp",$num,4)); # carry+=tp[num]
353 &mov (&DWP($frame,"esp",$num,4),$carry); # tp[num-1]=
354 &adc ($j,0);
355 &mov ("eax",&DWP(0,$inp)); # np[0]
356 &mov (&DWP($frame+4,"esp",$num,4),"edx"); # tp[num]=
357 &mov (&DWP($frame+8,"esp",$num,4),$j); # tp[num+1]=
358
359 &mul ($word); # np[0]*m
360 &add ("eax",&DWP($frame,"esp")); # +=tp[0]
361 &mov ("eax",&DWP(4,$inp)); # np[1]
362 &adc ("edx",0);
363 &mov ($j,1);
364
365&set_label("2ndmadd",16);
366 &mov ($carry,"edx");
367 &mul ($word); # np[j]*m
368 &add ($carry,&DWP($frame,"esp",$j,4)); # +=tp[j]
369 &lea ($j,&DWP(1,$j));
370 &adc ("edx",0);
371 &add ($carry,"eax");
372 &mov ("eax",&DWP(0,$inp,$j,4)); # np[j+1]
373 &adc ("edx",0);
374 &cmp ($j,$num);
375 &mov (&DWP($frame-8,"esp",$j,4),$carry); # tp[j-1]=
376 &jl (&label("2ndmadd"));
377
378 &mov ($carry,"edx");
379 &mul ($word); # np[j]*m
380 &add ($carry,&DWP($frame,"esp",$num,4)); # +=tp[num-1]
381 &adc ("edx",0);
382 &add ($carry,"eax");
383 &adc ("edx",0);
384 &mov (&DWP($frame-4,"esp",$num,4),$carry); # tp[num-2]=
385
386 &xor ("eax","eax");
387 &mov ($j,$_bp); # &bp[i]
388 &add ("edx",&DWP($frame+4,"esp",$num,4)); # carry+=tp[num]
389 &adc ("eax",&DWP($frame+8,"esp",$num,4)); # +=tp[num+1]
390 &lea ($j,&DWP(4,$j));
391 &mov (&DWP($frame,"esp",$num,4),"edx"); # tp[num-1]=
392 &cmp ($j,$_bpend);
393 &mov (&DWP($frame+4,"esp",$num,4),"eax"); # tp[num]=
394 &je (&label("common_tail"));
395
396 &mov ($word,&DWP(0,$j)); # bp[i+1]
397 &mov ($inp,$_ap);
398 &mov ($_bp,$j); # &bp[++i]
399 &xor ($j,$j);
400 &xor ("edx","edx");
401 &mov ("eax",&DWP(0,$inp));
402 &jmp (&label("1stmadd"));
403
404&set_label("bn_sqr_mont",16);
405$sbit=$num;
406 &mov ($_num,$num);
407 &mov ($_bp,$j); # i=0
408
409 &mov ("eax",$word); # ap[0]
410 &mul ($word); # ap[0]*ap[0]
411 &mov (&DWP($frame,"esp"),"eax"); # tp[0]=
412 &mov ($sbit,"edx");
413 &shr ("edx",1);
414 &and ($sbit,1);
415 &inc ($j);
416&set_label("sqr",16);
417 &mov ("eax",&DWP(0,$inp,$j,4)); # ap[j]
418 &mov ($carry,"edx");
419 &mul ($word); # ap[j]*ap[0]
420 &add ("eax",$carry);
421 &lea ($j,&DWP(1,$j));
422 &adc ("edx",0);
423 &lea ($carry,&DWP(0,$sbit,"eax",2));
424 &shr ("eax",31);
425 &cmp ($j,$_num);
426 &mov ($sbit,"eax");
427 &mov (&DWP($frame-4,"esp",$j,4),$carry); # tp[j]=
428 &jl (&label("sqr"));
429
430 &mov ("eax",&DWP(0,$inp,$j,4)); # ap[num-1]
431 &mov ($carry,"edx");
432 &mul ($word); # ap[num-1]*ap[0]
433 &add ("eax",$carry);
434 &mov ($word,$_n0);
435 &adc ("edx",0);
436 &mov ($inp,$_np);
437 &lea ($carry,&DWP(0,$sbit,"eax",2));
438 &imul ($word,&DWP($frame,"esp")); # n0*tp[0]
439 &shr ("eax",31);
440 &mov (&DWP($frame,"esp",$j,4),$carry); # tp[num-1]=
441
442 &lea ($carry,&DWP(0,"eax","edx",2));
443 &mov ("eax",&DWP(0,$inp)); # np[0]
444 &shr ("edx",31);
445 &mov (&DWP($frame+4,"esp",$j,4),$carry); # tp[num]=
446 &mov (&DWP($frame+8,"esp",$j,4),"edx"); # tp[num+1]=
447
448 &mul ($word); # np[0]*m
449 &add ("eax",&DWP($frame,"esp")); # +=tp[0]
450 &mov ($num,$j);
451 &adc ("edx",0);
452 &mov ("eax",&DWP(4,$inp)); # np[1]
453 &mov ($j,1);
454
455&set_label("3rdmadd",16);
456 &mov ($carry,"edx");
457 &mul ($word); # np[j]*m
458 &add ($carry,&DWP($frame,"esp",$j,4)); # +=tp[j]
459 &adc ("edx",0);
460 &add ($carry,"eax");
461 &mov ("eax",&DWP(4,$inp,$j,4)); # np[j+1]
462 &adc ("edx",0);
463 &mov (&DWP($frame-4,"esp",$j,4),$carry); # tp[j-1]=
464
465 &mov ($carry,"edx");
466 &mul ($word); # np[j+1]*m
467 &add ($carry,&DWP($frame+4,"esp",$j,4)); # +=tp[j+1]
468 &lea ($j,&DWP(2,$j));
469 &adc ("edx",0);
470 &add ($carry,"eax");
471 &mov ("eax",&DWP(0,$inp,$j,4)); # np[j+2]
472 &adc ("edx",0);
473 &cmp ($j,$num);
474 &mov (&DWP($frame-8,"esp",$j,4),$carry); # tp[j]=
475 &jl (&label("3rdmadd"));
476
477 &mov ($carry,"edx");
478 &mul ($word); # np[j]*m
479 &add ($carry,&DWP($frame,"esp",$num,4)); # +=tp[num-1]
480 &adc ("edx",0);
481 &add ($carry,"eax");
482 &adc ("edx",0);
483 &mov (&DWP($frame-4,"esp",$num,4),$carry); # tp[num-2]=
484
485 &mov ($j,$_bp); # i
486 &xor ("eax","eax");
487 &mov ($inp,$_ap);
488 &add ("edx",&DWP($frame+4,"esp",$num,4)); # carry+=tp[num]
489 &adc ("eax",&DWP($frame+8,"esp",$num,4)); # +=tp[num+1]
490 &mov (&DWP($frame,"esp",$num,4),"edx"); # tp[num-1]=
491 &cmp ($j,$num);
492 &mov (&DWP($frame+4,"esp",$num,4),"eax"); # tp[num]=
493 &je (&label("common_tail"));
494
495 &mov ($word,&DWP(4,$inp,$j,4)); # ap[i]
496 &lea ($j,&DWP(1,$j));
497 &mov ("eax",$word);
498 &mov ($_bp,$j); # ++i
499 &mul ($word); # ap[i]*ap[i]
500 &add ("eax",&DWP($frame,"esp",$j,4)); # +=tp[i]
501 &adc ("edx",0);
502 &mov (&DWP($frame,"esp",$j,4),"eax"); # tp[i]=
503 &xor ($carry,$carry);
504 &cmp ($j,$num);
505 &lea ($j,&DWP(1,$j));
506 &je (&label("sqrlast"));
507
508 &mov ($sbit,"edx"); # zaps $num
509 &shr ("edx",1);
510 &and ($sbit,1);
511&set_label("sqradd",16);
512 &mov ("eax",&DWP(0,$inp,$j,4)); # ap[j]
513 &mov ($carry,"edx");
514 &mul ($word); # ap[j]*ap[i]
515 &add ("eax",$carry);
516 &lea ($carry,&DWP(0,"eax","eax"));
517 &adc ("edx",0);
518 &shr ("eax",31);
519 &add ($carry,&DWP($frame,"esp",$j,4)); # +=tp[j]
520 &lea ($j,&DWP(1,$j));
521 &adc ("eax",0);
522 &add ($carry,$sbit);
523 &adc ("eax",0);
524 &cmp ($j,$_num);
525 &mov (&DWP($frame-4,"esp",$j,4),$carry); # tp[j]=
526 &mov ($sbit,"eax");
527 &jle (&label("sqradd"));
528
529 &mov ($carry,"edx");
530 &lea ("edx",&DWP(0,$sbit,"edx",2));
531 &shr ($carry,31);
532&set_label("sqrlast");
533 &mov ($word,$_n0);
534 &mov ($inp,$_np);
535 &imul ($word,&DWP($frame,"esp")); # n0*tp[0]
536
537 &add ("edx",&DWP($frame,"esp",$j,4)); # +=tp[num]
538 &mov ("eax",&DWP(0,$inp)); # np[0]
539 &adc ($carry,0);
540 &mov (&DWP($frame,"esp",$j,4),"edx"); # tp[num]=
541 &mov (&DWP($frame+4,"esp",$j,4),$carry); # tp[num+1]=
542
543 &mul ($word); # np[0]*m
544 &add ("eax",&DWP($frame,"esp")); # +=tp[0]
545 &lea ($num,&DWP(-1,$j));
546 &adc ("edx",0);
547 &mov ($j,1);
548 &mov ("eax",&DWP(4,$inp)); # np[1]
549
550 &jmp (&label("3rdmadd"));
551}
552
553&set_label("common_tail",16);
554 &mov ($np,$_np); # load modulus pointer
555 &mov ($rp,$_rp); # load result pointer
556 &lea ($tp,&DWP($frame,"esp")); # [$ap and $bp are zapped]
557
558 &mov ("eax",&DWP(0,$tp)); # tp[0]
559 &mov ($j,$num); # j=num-1
560 &xor ($i,$i); # i=0 and clear CF!
561
562&set_label("sub",16);
563 &sbb ("eax",&DWP(0,$np,$i,4));
564 &mov (&DWP(0,$rp,$i,4),"eax"); # rp[i]=tp[i]-np[i]
565 &dec ($j); # doesn't affect CF!
566 &mov ("eax",&DWP(4,$tp,$i,4)); # tp[i+1]
567 &lea ($i,&DWP(1,$i)); # i++
568 &jge (&label("sub"));
569
570 &sbb ("eax",0); # handle upmost overflow bit
571 &and ($tp,"eax");
572 &not ("eax");
573 &mov ($np,$rp);
574 &and ($np,"eax");
575 &or ($tp,$np); # tp=carry?tp:rp
576
577&set_label("copy",16); # copy or in-place refresh
578 &mov ("eax",&DWP(0,$tp,$num,4));
579 &mov (&DWP(0,$rp,$num,4),"eax"); # rp[i]=tp[i]
580 &mov (&DWP($frame,"esp",$num,4),$j); # zap temporary vector
581 &dec ($num);
582 &jge (&label("copy"));
583
584 &mov ("esp",$_sp); # pull saved stack pointer
585 &mov ("eax",1);
586&set_label("just_leave");
587&function_end("bn_mul_mont");
588
589&asciz("Montgomery Multiplication for x86, CRYPTOGAMS by <appro\@openssl.org>");
590
591&asm_finish();
diff --git a/src/lib/libcrypto/bn/asm/x86.pl b/src/lib/libcrypto/bn/asm/x86.pl
deleted file mode 100644
index 1bc4f1bb27..0000000000
--- a/src/lib/libcrypto/bn/asm/x86.pl
+++ /dev/null
@@ -1,28 +0,0 @@
1#!/usr/local/bin/perl
2
3push(@INC,"perlasm","../../perlasm");
4require "x86asm.pl";
5
6require("x86/mul_add.pl");
7require("x86/mul.pl");
8require("x86/sqr.pl");
9require("x86/div.pl");
10require("x86/add.pl");
11require("x86/sub.pl");
12require("x86/comba.pl");
13
14&asm_init($ARGV[0],$0);
15
16&bn_mul_add_words("bn_mul_add_words");
17&bn_mul_words("bn_mul_words");
18&bn_sqr_words("bn_sqr_words");
19&bn_div_words("bn_div_words");
20&bn_add_words("bn_add_words");
21&bn_sub_words("bn_sub_words");
22&bn_mul_comba("bn_mul_comba8",8);
23&bn_mul_comba("bn_mul_comba4",4);
24&bn_sqr_comba("bn_sqr_comba8",8);
25&bn_sqr_comba("bn_sqr_comba4",4);
26
27&asm_finish();
28
diff --git a/src/lib/libcrypto/bn/asm/x86/add.pl b/src/lib/libcrypto/bn/asm/x86/add.pl
deleted file mode 100644
index 0b5cf583e3..0000000000
--- a/src/lib/libcrypto/bn/asm/x86/add.pl
+++ /dev/null
@@ -1,76 +0,0 @@
1#!/usr/local/bin/perl
2# x86 assember
3
4sub bn_add_words
5 {
6 local($name)=@_;
7
8 &function_begin($name,"");
9
10 &comment("");
11 $a="esi";
12 $b="edi";
13 $c="eax";
14 $r="ebx";
15 $tmp1="ecx";
16 $tmp2="edx";
17 $num="ebp";
18
19 &mov($r,&wparam(0)); # get r
20 &mov($a,&wparam(1)); # get a
21 &mov($b,&wparam(2)); # get b
22 &mov($num,&wparam(3)); # get num
23 &xor($c,$c); # clear carry
24 &and($num,0xfffffff8); # num / 8
25
26 &jz(&label("aw_finish"));
27
28 &set_label("aw_loop",0);
29 for ($i=0; $i<8; $i++)
30 {
31 &comment("Round $i");
32
33 &mov($tmp1,&DWP($i*4,$a,"",0)); # *a
34 &mov($tmp2,&DWP($i*4,$b,"",0)); # *b
35 &add($tmp1,$c);
36 &mov($c,0);
37 &adc($c,$c);
38 &add($tmp1,$tmp2);
39 &adc($c,0);
40 &mov(&DWP($i*4,$r,"",0),$tmp1); # *r
41 }
42
43 &comment("");
44 &add($a,32);
45 &add($b,32);
46 &add($r,32);
47 &sub($num,8);
48 &jnz(&label("aw_loop"));
49
50 &set_label("aw_finish",0);
51 &mov($num,&wparam(3)); # get num
52 &and($num,7);
53 &jz(&label("aw_end"));
54
55 for ($i=0; $i<7; $i++)
56 {
57 &comment("Tail Round $i");
58 &mov($tmp1,&DWP($i*4,$a,"",0)); # *a
59 &mov($tmp2,&DWP($i*4,$b,"",0));# *b
60 &add($tmp1,$c);
61 &mov($c,0);
62 &adc($c,$c);
63 &add($tmp1,$tmp2);
64 &adc($c,0);
65 &dec($num) if ($i != 6);
66 &mov(&DWP($i*4,$r,"",0),$tmp1); # *a
67 &jz(&label("aw_end")) if ($i != 6);
68 }
69 &set_label("aw_end",0);
70
71# &mov("eax",$c); # $c is "eax"
72
73 &function_end($name);
74 }
75
761;
diff --git a/src/lib/libcrypto/bn/asm/x86/comba.pl b/src/lib/libcrypto/bn/asm/x86/comba.pl
deleted file mode 100644
index 2291253629..0000000000
--- a/src/lib/libcrypto/bn/asm/x86/comba.pl
+++ /dev/null
@@ -1,277 +0,0 @@
1#!/usr/local/bin/perl
2# x86 assember
3
4sub mul_add_c
5 {
6 local($a,$ai,$b,$bi,$c0,$c1,$c2,$pos,$i,$na,$nb)=@_;
7
8 # pos == -1 if eax and edx are pre-loaded, 0 to load from next
9 # words, and 1 if load return value
10
11 &comment("mul a[$ai]*b[$bi]");
12
13 # "eax" and "edx" will always be pre-loaded.
14 # &mov("eax",&DWP($ai*4,$a,"",0)) ;
15 # &mov("edx",&DWP($bi*4,$b,"",0));
16
17 &mul("edx");
18 &add($c0,"eax");
19 &mov("eax",&DWP(($na)*4,$a,"",0)) if $pos == 0; # laod next a
20 &mov("eax",&wparam(0)) if $pos > 0; # load r[]
21 ###
22 &adc($c1,"edx");
23 &mov("edx",&DWP(($nb)*4,$b,"",0)) if $pos == 0; # laod next b
24 &mov("edx",&DWP(($nb)*4,$b,"",0)) if $pos == 1; # laod next b
25 ###
26 &adc($c2,0);
27 # is pos > 1, it means it is the last loop
28 &mov(&DWP($i*4,"eax","",0),$c0) if $pos > 0; # save r[];
29 &mov("eax",&DWP(($na)*4,$a,"",0)) if $pos == 1; # laod next a
30 }
31
32sub sqr_add_c
33 {
34 local($r,$a,$ai,$bi,$c0,$c1,$c2,$pos,$i,$na,$nb)=@_;
35
36 # pos == -1 if eax and edx are pre-loaded, 0 to load from next
37 # words, and 1 if load return value
38
39 &comment("sqr a[$ai]*a[$bi]");
40
41 # "eax" and "edx" will always be pre-loaded.
42 # &mov("eax",&DWP($ai*4,$a,"",0)) ;
43 # &mov("edx",&DWP($bi*4,$b,"",0));
44
45 if ($ai == $bi)
46 { &mul("eax");}
47 else
48 { &mul("edx");}
49 &add($c0,"eax");
50 &mov("eax",&DWP(($na)*4,$a,"",0)) if $pos == 0; # load next a
51 ###
52 &adc($c1,"edx");
53 &mov("edx",&DWP(($nb)*4,$a,"",0)) if ($pos == 1) && ($na != $nb);
54 ###
55 &adc($c2,0);
56 # is pos > 1, it means it is the last loop
57 &mov(&DWP($i*4,$r,"",0),$c0) if $pos > 0; # save r[];
58 &mov("eax",&DWP(($na)*4,$a,"",0)) if $pos == 1; # load next b
59 }
60
61sub sqr_add_c2
62 {
63 local($r,$a,$ai,$bi,$c0,$c1,$c2,$pos,$i,$na,$nb)=@_;
64
65 # pos == -1 if eax and edx are pre-loaded, 0 to load from next
66 # words, and 1 if load return value
67
68 &comment("sqr a[$ai]*a[$bi]");
69
70 # "eax" and "edx" will always be pre-loaded.
71 # &mov("eax",&DWP($ai*4,$a,"",0)) ;
72 # &mov("edx",&DWP($bi*4,$a,"",0));
73
74 if ($ai == $bi)
75 { &mul("eax");}
76 else
77 { &mul("edx");}
78 &add("eax","eax");
79 ###
80 &adc("edx","edx");
81 ###
82 &adc($c2,0);
83 &add($c0,"eax");
84 &adc($c1,"edx");
85 &mov("eax",&DWP(($na)*4,$a,"",0)) if $pos == 0; # load next a
86 &mov("eax",&DWP(($na)*4,$a,"",0)) if $pos == 1; # load next b
87 &adc($c2,0);
88 &mov(&DWP($i*4,$r,"",0),$c0) if $pos > 0; # save r[];
89 &mov("edx",&DWP(($nb)*4,$a,"",0)) if ($pos <= 1) && ($na != $nb);
90 ###
91 }
92
93sub bn_mul_comba
94 {
95 local($name,$num)=@_;
96 local($a,$b,$c0,$c1,$c2);
97 local($i,$as,$ae,$bs,$be,$ai,$bi);
98 local($tot,$end);
99
100 &function_begin_B($name,"");
101
102 $c0="ebx";
103 $c1="ecx";
104 $c2="ebp";
105 $a="esi";
106 $b="edi";
107
108 $as=0;
109 $ae=0;
110 $bs=0;
111 $be=0;
112 $tot=$num+$num-1;
113
114 &push("esi");
115 &mov($a,&wparam(1));
116 &push("edi");
117 &mov($b,&wparam(2));
118 &push("ebp");
119 &push("ebx");
120
121 &xor($c0,$c0);
122 &mov("eax",&DWP(0,$a,"",0)); # load the first word
123 &xor($c1,$c1);
124 &mov("edx",&DWP(0,$b,"",0)); # load the first second
125
126 for ($i=0; $i<$tot; $i++)
127 {
128 $ai=$as;
129 $bi=$bs;
130 $end=$be+1;
131
132 &comment("################## Calculate word $i");
133
134 for ($j=$bs; $j<$end; $j++)
135 {
136 &xor($c2,$c2) if ($j == $bs);
137 if (($j+1) == $end)
138 {
139 $v=1;
140 $v=2 if (($i+1) == $tot);
141 }
142 else
143 { $v=0; }
144 if (($j+1) != $end)
145 {
146 $na=($ai-1);
147 $nb=($bi+1);
148 }
149 else
150 {
151 $na=$as+($i < ($num-1));
152 $nb=$bs+($i >= ($num-1));
153 }
154#printf STDERR "[$ai,$bi] -> [$na,$nb]\n";
155 &mul_add_c($a,$ai,$b,$bi,$c0,$c1,$c2,$v,$i,$na,$nb);
156 if ($v)
157 {
158 &comment("saved r[$i]");
159 # &mov("eax",&wparam(0));
160 # &mov(&DWP($i*4,"eax","",0),$c0);
161 ($c0,$c1,$c2)=($c1,$c2,$c0);
162 }
163 $ai--;
164 $bi++;
165 }
166 $as++ if ($i < ($num-1));
167 $ae++ if ($i >= ($num-1));
168
169 $bs++ if ($i >= ($num-1));
170 $be++ if ($i < ($num-1));
171 }
172 &comment("save r[$i]");
173 # &mov("eax",&wparam(0));
174 &mov(&DWP($i*4,"eax","",0),$c0);
175
176 &pop("ebx");
177 &pop("ebp");
178 &pop("edi");
179 &pop("esi");
180 &ret();
181 &function_end_B($name);
182 }
183
184sub bn_sqr_comba
185 {
186 local($name,$num)=@_;
187 local($r,$a,$c0,$c1,$c2)=@_;
188 local($i,$as,$ae,$bs,$be,$ai,$bi);
189 local($b,$tot,$end,$half);
190
191 &function_begin_B($name,"");
192
193 $c0="ebx";
194 $c1="ecx";
195 $c2="ebp";
196 $a="esi";
197 $r="edi";
198
199 &push("esi");
200 &push("edi");
201 &push("ebp");
202 &push("ebx");
203 &mov($r,&wparam(0));
204 &mov($a,&wparam(1));
205 &xor($c0,$c0);
206 &xor($c1,$c1);
207 &mov("eax",&DWP(0,$a,"",0)); # load the first word
208
209 $as=0;
210 $ae=0;
211 $bs=0;
212 $be=0;
213 $tot=$num+$num-1;
214
215 for ($i=0; $i<$tot; $i++)
216 {
217 $ai=$as;
218 $bi=$bs;
219 $end=$be+1;
220
221 &comment("############### Calculate word $i");
222 for ($j=$bs; $j<$end; $j++)
223 {
224 &xor($c2,$c2) if ($j == $bs);
225 if (($ai-1) < ($bi+1))
226 {
227 $v=1;
228 $v=2 if ($i+1) == $tot;
229 }
230 else
231 { $v=0; }
232 if (!$v)
233 {
234 $na=$ai-1;
235 $nb=$bi+1;
236 }
237 else
238 {
239 $na=$as+($i < ($num-1));
240 $nb=$bs+($i >= ($num-1));
241 }
242 if ($ai == $bi)
243 {
244 &sqr_add_c($r,$a,$ai,$bi,
245 $c0,$c1,$c2,$v,$i,$na,$nb);
246 }
247 else
248 {
249 &sqr_add_c2($r,$a,$ai,$bi,
250 $c0,$c1,$c2,$v,$i,$na,$nb);
251 }
252 if ($v)
253 {
254 &comment("saved r[$i]");
255 #&mov(&DWP($i*4,$r,"",0),$c0);
256 ($c0,$c1,$c2)=($c1,$c2,$c0);
257 last;
258 }
259 $ai--;
260 $bi++;
261 }
262 $as++ if ($i < ($num-1));
263 $ae++ if ($i >= ($num-1));
264
265 $bs++ if ($i >= ($num-1));
266 $be++ if ($i < ($num-1));
267 }
268 &mov(&DWP($i*4,$r,"",0),$c0);
269 &pop("ebx");
270 &pop("ebp");
271 &pop("edi");
272 &pop("esi");
273 &ret();
274 &function_end_B($name);
275 }
276
2771;
diff --git a/src/lib/libcrypto/bn/asm/x86/div.pl b/src/lib/libcrypto/bn/asm/x86/div.pl
deleted file mode 100644
index 0e90152caa..0000000000
--- a/src/lib/libcrypto/bn/asm/x86/div.pl
+++ /dev/null
@@ -1,15 +0,0 @@
1#!/usr/local/bin/perl
2# x86 assember
3
4sub bn_div_words
5 {
6 local($name)=@_;
7
8 &function_begin($name,"");
9 &mov("edx",&wparam(0)); #
10 &mov("eax",&wparam(1)); #
11 &mov("ebx",&wparam(2)); #
12 &div("ebx");
13 &function_end($name);
14 }
151;
diff --git a/src/lib/libcrypto/bn/asm/x86/mul.pl b/src/lib/libcrypto/bn/asm/x86/mul.pl
deleted file mode 100644
index 674cb9b055..0000000000
--- a/src/lib/libcrypto/bn/asm/x86/mul.pl
+++ /dev/null
@@ -1,77 +0,0 @@
1#!/usr/local/bin/perl
2# x86 assember
3
4sub bn_mul_words
5 {
6 local($name)=@_;
7
8 &function_begin($name,"");
9
10 &comment("");
11 $Low="eax";
12 $High="edx";
13 $a="ebx";
14 $w="ecx";
15 $r="edi";
16 $c="esi";
17 $num="ebp";
18
19 &xor($c,$c); # clear carry
20 &mov($r,&wparam(0)); #
21 &mov($a,&wparam(1)); #
22 &mov($num,&wparam(2)); #
23 &mov($w,&wparam(3)); #
24
25 &and($num,0xfffffff8); # num / 8
26 &jz(&label("mw_finish"));
27
28 &set_label("mw_loop",0);
29 for ($i=0; $i<32; $i+=4)
30 {
31 &comment("Round $i");
32
33 &mov("eax",&DWP($i,$a,"",0)); # *a
34 &mul($w); # *a * w
35 &add("eax",$c); # L(t)+=c
36 # XXX
37
38 &adc("edx",0); # H(t)+=carry
39 &mov(&DWP($i,$r,"",0),"eax"); # *r= L(t);
40
41 &mov($c,"edx"); # c= H(t);
42 }
43
44 &comment("");
45 &add($a,32);
46 &add($r,32);
47 &sub($num,8);
48 &jz(&label("mw_finish"));
49 &jmp(&label("mw_loop"));
50
51 &set_label("mw_finish",0);
52 &mov($num,&wparam(2)); # get num
53 &and($num,7);
54 &jnz(&label("mw_finish2"));
55 &jmp(&label("mw_end"));
56
57 &set_label("mw_finish2",1);
58 for ($i=0; $i<7; $i++)
59 {
60 &comment("Tail Round $i");
61 &mov("eax",&DWP($i*4,$a,"",0));# *a
62 &mul($w); # *a * w
63 &add("eax",$c); # L(t)+=c
64 # XXX
65 &adc("edx",0); # H(t)+=carry
66 &mov(&DWP($i*4,$r,"",0),"eax");# *r= L(t);
67 &mov($c,"edx"); # c= H(t);
68 &dec($num) if ($i != 7-1);
69 &jz(&label("mw_end")) if ($i != 7-1);
70 }
71 &set_label("mw_end",0);
72 &mov("eax",$c);
73
74 &function_end($name);
75 }
76
771;
diff --git a/src/lib/libcrypto/bn/asm/x86/mul_add.pl b/src/lib/libcrypto/bn/asm/x86/mul_add.pl
deleted file mode 100644
index 61830d3a90..0000000000
--- a/src/lib/libcrypto/bn/asm/x86/mul_add.pl
+++ /dev/null
@@ -1,87 +0,0 @@
1#!/usr/local/bin/perl
2# x86 assember
3
4sub bn_mul_add_words
5 {
6 local($name)=@_;
7
8 &function_begin($name,"");
9
10 &comment("");
11 $Low="eax";
12 $High="edx";
13 $a="ebx";
14 $w="ebp";
15 $r="edi";
16 $c="esi";
17
18 &xor($c,$c); # clear carry
19 &mov($r,&wparam(0)); #
20
21 &mov("ecx",&wparam(2)); #
22 &mov($a,&wparam(1)); #
23
24 &and("ecx",0xfffffff8); # num / 8
25 &mov($w,&wparam(3)); #
26
27 &push("ecx"); # Up the stack for a tmp variable
28
29 &jz(&label("maw_finish"));
30
31 &set_label("maw_loop",0);
32
33 &mov(&swtmp(0),"ecx"); #
34
35 for ($i=0; $i<32; $i+=4)
36 {
37 &comment("Round $i");
38
39 &mov("eax",&DWP($i,$a,"",0)); # *a
40 &mul($w); # *a * w
41 &add("eax",$c); # L(t)+= *r
42 &mov($c,&DWP($i,$r,"",0)); # L(t)+= *r
43 &adc("edx",0); # H(t)+=carry
44 &add("eax",$c); # L(t)+=c
45 &adc("edx",0); # H(t)+=carry
46 &mov(&DWP($i,$r,"",0),"eax"); # *r= L(t);
47 &mov($c,"edx"); # c= H(t);
48 }
49
50 &comment("");
51 &mov("ecx",&swtmp(0)); #
52 &add($a,32);
53 &add($r,32);
54 &sub("ecx",8);
55 &jnz(&label("maw_loop"));
56
57 &set_label("maw_finish",0);
58 &mov("ecx",&wparam(2)); # get num
59 &and("ecx",7);
60 &jnz(&label("maw_finish2")); # helps branch prediction
61 &jmp(&label("maw_end"));
62
63 &set_label("maw_finish2",1);
64 for ($i=0; $i<7; $i++)
65 {
66 &comment("Tail Round $i");
67 &mov("eax",&DWP($i*4,$a,"",0));# *a
68 &mul($w); # *a * w
69 &add("eax",$c); # L(t)+=c
70 &mov($c,&DWP($i*4,$r,"",0)); # L(t)+= *r
71 &adc("edx",0); # H(t)+=carry
72 &add("eax",$c);
73 &adc("edx",0); # H(t)+=carry
74 &dec("ecx") if ($i != 7-1);
75 &mov(&DWP($i*4,$r,"",0),"eax"); # *r= L(t);
76 &mov($c,"edx"); # c= H(t);
77 &jz(&label("maw_end")) if ($i != 7-1);
78 }
79 &set_label("maw_end",0);
80 &mov("eax",$c);
81
82 &pop("ecx"); # clear variable from
83
84 &function_end($name);
85 }
86
871;
diff --git a/src/lib/libcrypto/bn/asm/x86/sqr.pl b/src/lib/libcrypto/bn/asm/x86/sqr.pl
deleted file mode 100644
index 1f90993cf6..0000000000
--- a/src/lib/libcrypto/bn/asm/x86/sqr.pl
+++ /dev/null
@@ -1,60 +0,0 @@
1#!/usr/local/bin/perl
2# x86 assember
3
4sub bn_sqr_words
5 {
6 local($name)=@_;
7
8 &function_begin($name,"");
9
10 &comment("");
11 $r="esi";
12 $a="edi";
13 $num="ebx";
14
15 &mov($r,&wparam(0)); #
16 &mov($a,&wparam(1)); #
17 &mov($num,&wparam(2)); #
18
19 &and($num,0xfffffff8); # num / 8
20 &jz(&label("sw_finish"));
21
22 &set_label("sw_loop",0);
23 for ($i=0; $i<32; $i+=4)
24 {
25 &comment("Round $i");
26 &mov("eax",&DWP($i,$a,"",0)); # *a
27 # XXX
28 &mul("eax"); # *a * *a
29 &mov(&DWP($i*2,$r,"",0),"eax"); #
30 &mov(&DWP($i*2+4,$r,"",0),"edx");#
31 }
32
33 &comment("");
34 &add($a,32);
35 &add($r,64);
36 &sub($num,8);
37 &jnz(&label("sw_loop"));
38
39 &set_label("sw_finish",0);
40 &mov($num,&wparam(2)); # get num
41 &and($num,7);
42 &jz(&label("sw_end"));
43
44 for ($i=0; $i<7; $i++)
45 {
46 &comment("Tail Round $i");
47 &mov("eax",&DWP($i*4,$a,"",0)); # *a
48 # XXX
49 &mul("eax"); # *a * *a
50 &mov(&DWP($i*8,$r,"",0),"eax"); #
51 &dec($num) if ($i != 7-1);
52 &mov(&DWP($i*8+4,$r,"",0),"edx");
53 &jz(&label("sw_end")) if ($i != 7-1);
54 }
55 &set_label("sw_end",0);
56
57 &function_end($name);
58 }
59
601;
diff --git a/src/lib/libcrypto/bn/asm/x86/sub.pl b/src/lib/libcrypto/bn/asm/x86/sub.pl
deleted file mode 100644
index 837b0e1b07..0000000000
--- a/src/lib/libcrypto/bn/asm/x86/sub.pl
+++ /dev/null
@@ -1,76 +0,0 @@
1#!/usr/local/bin/perl
2# x86 assember
3
4sub bn_sub_words
5 {
6 local($name)=@_;
7
8 &function_begin($name,"");
9
10 &comment("");
11 $a="esi";
12 $b="edi";
13 $c="eax";
14 $r="ebx";
15 $tmp1="ecx";
16 $tmp2="edx";
17 $num="ebp";
18
19 &mov($r,&wparam(0)); # get r
20 &mov($a,&wparam(1)); # get a
21 &mov($b,&wparam(2)); # get b
22 &mov($num,&wparam(3)); # get num
23 &xor($c,$c); # clear carry
24 &and($num,0xfffffff8); # num / 8
25
26 &jz(&label("aw_finish"));
27
28 &set_label("aw_loop",0);
29 for ($i=0; $i<8; $i++)
30 {
31 &comment("Round $i");
32
33 &mov($tmp1,&DWP($i*4,$a,"",0)); # *a
34 &mov($tmp2,&DWP($i*4,$b,"",0)); # *b
35 &sub($tmp1,$c);
36 &mov($c,0);
37 &adc($c,$c);
38 &sub($tmp1,$tmp2);
39 &adc($c,0);
40 &mov(&DWP($i*4,$r,"",0),$tmp1); # *r
41 }
42
43 &comment("");
44 &add($a,32);
45 &add($b,32);
46 &add($r,32);
47 &sub($num,8);
48 &jnz(&label("aw_loop"));
49
50 &set_label("aw_finish",0);
51 &mov($num,&wparam(3)); # get num
52 &and($num,7);
53 &jz(&label("aw_end"));
54
55 for ($i=0; $i<7; $i++)
56 {
57 &comment("Tail Round $i");
58 &mov($tmp1,&DWP($i*4,$a,"",0)); # *a
59 &mov($tmp2,&DWP($i*4,$b,"",0));# *b
60 &sub($tmp1,$c);
61 &mov($c,0);
62 &adc($c,$c);
63 &sub($tmp1,$tmp2);
64 &adc($c,0);
65 &dec($num) if ($i != 6);
66 &mov(&DWP($i*4,$r,"",0),$tmp1); # *a
67 &jz(&label("aw_end")) if ($i != 6);
68 }
69 &set_label("aw_end",0);
70
71# &mov("eax",$c); # $c is "eax"
72
73 &function_end($name);
74 }
75
761;
diff --git a/src/lib/libcrypto/bn/asm/x86_64-gcc.c b/src/lib/libcrypto/bn/asm/x86_64-gcc.c
deleted file mode 100644
index f13f52dd85..0000000000
--- a/src/lib/libcrypto/bn/asm/x86_64-gcc.c
+++ /dev/null
@@ -1,597 +0,0 @@
1#ifdef __SUNPRO_C
2# include "../bn_asm.c" /* kind of dirty hack for Sun Studio */
3#else
4/*
5 * x86_64 BIGNUM accelerator version 0.1, December 2002.
6 *
7 * Implemented by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
8 * project.
9 *
10 * Rights for redistribution and usage in source and binary forms are
11 * granted according to the OpenSSL license. Warranty of any kind is
12 * disclaimed.
13 *
14 * Q. Version 0.1? It doesn't sound like Andy, he used to assign real
15 * versions, like 1.0...
16 * A. Well, that's because this code is basically a quick-n-dirty
17 * proof-of-concept hack. As you can see it's implemented with
18 * inline assembler, which means that you're bound to GCC and that
19 * there might be enough room for further improvement.
20 *
21 * Q. Why inline assembler?
22 * A. x86_64 features own ABI which I'm not familiar with. This is
23 * why I decided to let the compiler take care of subroutine
24 * prologue/epilogue as well as register allocation. For reference.
25 * Win64 implements different ABI for AMD64, different from Linux.
26 *
27 * Q. How much faster does it get?
28 * A. 'apps/openssl speed rsa dsa' output with no-asm:
29 *
30 * sign verify sign/s verify/s
31 * rsa 512 bits 0.0006s 0.0001s 1683.8 18456.2
32 * rsa 1024 bits 0.0028s 0.0002s 356.0 6407.0
33 * rsa 2048 bits 0.0172s 0.0005s 58.0 1957.8
34 * rsa 4096 bits 0.1155s 0.0018s 8.7 555.6
35 * sign verify sign/s verify/s
36 * dsa 512 bits 0.0005s 0.0006s 2100.8 1768.3
37 * dsa 1024 bits 0.0014s 0.0018s 692.3 559.2
38 * dsa 2048 bits 0.0049s 0.0061s 204.7 165.0
39 *
40 * 'apps/openssl speed rsa dsa' output with this module:
41 *
42 * sign verify sign/s verify/s
43 * rsa 512 bits 0.0004s 0.0000s 2767.1 33297.9
44 * rsa 1024 bits 0.0012s 0.0001s 867.4 14674.7
45 * rsa 2048 bits 0.0061s 0.0002s 164.0 5270.0
46 * rsa 4096 bits 0.0384s 0.0006s 26.1 1650.8
47 * sign verify sign/s verify/s
48 * dsa 512 bits 0.0002s 0.0003s 4442.2 3786.3
49 * dsa 1024 bits 0.0005s 0.0007s 1835.1 1497.4
50 * dsa 2048 bits 0.0016s 0.0020s 620.4 504.6
51 *
52 * For the reference. IA-32 assembler implementation performs
53 * very much like 64-bit code compiled with no-asm on the same
54 * machine.
55 */
56
57#define BN_ULONG unsigned long
58
59/*
60 * "m"(a), "+m"(r) is the way to favor DirectPath µ-code;
61 * "g"(0) let the compiler to decide where does it
62 * want to keep the value of zero;
63 */
64#define mul_add(r,a,word,carry) do { \
65 register BN_ULONG high,low; \
66 asm ("mulq %3" \
67 : "=a"(low),"=d"(high) \
68 : "a"(word),"m"(a) \
69 : "cc"); \
70 asm ("addq %2,%0; adcq %3,%1" \
71 : "+r"(carry),"+d"(high)\
72 : "a"(low),"g"(0) \
73 : "cc"); \
74 asm ("addq %2,%0; adcq %3,%1" \
75 : "+m"(r),"+d"(high) \
76 : "r"(carry),"g"(0) \
77 : "cc"); \
78 carry=high; \
79 } while (0)
80
81#define mul(r,a,word,carry) do { \
82 register BN_ULONG high,low; \
83 asm ("mulq %3" \
84 : "=a"(low),"=d"(high) \
85 : "a"(word),"g"(a) \
86 : "cc"); \
87 asm ("addq %2,%0; adcq %3,%1" \
88 : "+r"(carry),"+d"(high)\
89 : "a"(low),"g"(0) \
90 : "cc"); \
91 (r)=carry, carry=high; \
92 } while (0)
93
94#define sqr(r0,r1,a) \
95 asm ("mulq %2" \
96 : "=a"(r0),"=d"(r1) \
97 : "a"(a) \
98 : "cc");
99
100BN_ULONG bn_mul_add_words(BN_ULONG *rp, BN_ULONG *ap, int num, BN_ULONG w)
101 {
102 BN_ULONG c1=0;
103
104 if (num <= 0) return(c1);
105
106 while (num&~3)
107 {
108 mul_add(rp[0],ap[0],w,c1);
109 mul_add(rp[1],ap[1],w,c1);
110 mul_add(rp[2],ap[2],w,c1);
111 mul_add(rp[3],ap[3],w,c1);
112 ap+=4; rp+=4; num-=4;
113 }
114 if (num)
115 {
116 mul_add(rp[0],ap[0],w,c1); if (--num==0) return c1;
117 mul_add(rp[1],ap[1],w,c1); if (--num==0) return c1;
118 mul_add(rp[2],ap[2],w,c1); return c1;
119 }
120
121 return(c1);
122 }
123
124BN_ULONG bn_mul_words(BN_ULONG *rp, BN_ULONG *ap, int num, BN_ULONG w)
125 {
126 BN_ULONG c1=0;
127
128 if (num <= 0) return(c1);
129
130 while (num&~3)
131 {
132 mul(rp[0],ap[0],w,c1);
133 mul(rp[1],ap[1],w,c1);
134 mul(rp[2],ap[2],w,c1);
135 mul(rp[3],ap[3],w,c1);
136 ap+=4; rp+=4; num-=4;
137 }
138 if (num)
139 {
140 mul(rp[0],ap[0],w,c1); if (--num == 0) return c1;
141 mul(rp[1],ap[1],w,c1); if (--num == 0) return c1;
142 mul(rp[2],ap[2],w,c1);
143 }
144 return(c1);
145 }
146
147void bn_sqr_words(BN_ULONG *r, BN_ULONG *a, int n)
148 {
149 if (n <= 0) return;
150
151 while (n&~3)
152 {
153 sqr(r[0],r[1],a[0]);
154 sqr(r[2],r[3],a[1]);
155 sqr(r[4],r[5],a[2]);
156 sqr(r[6],r[7],a[3]);
157 a+=4; r+=8; n-=4;
158 }
159 if (n)
160 {
161 sqr(r[0],r[1],a[0]); if (--n == 0) return;
162 sqr(r[2],r[3],a[1]); if (--n == 0) return;
163 sqr(r[4],r[5],a[2]);
164 }
165 }
166
167BN_ULONG bn_div_words(BN_ULONG h, BN_ULONG l, BN_ULONG d)
168{ BN_ULONG ret,waste;
169
170 asm ("divq %4"
171 : "=a"(ret),"=d"(waste)
172 : "a"(l),"d"(h),"g"(d)
173 : "cc");
174
175 return ret;
176}
177
178BN_ULONG bn_add_words (BN_ULONG *rp, BN_ULONG *ap, BN_ULONG *bp,int n)
179{ BN_ULONG ret=0,i=0;
180
181 if (n <= 0) return 0;
182
183 asm (
184 " subq %2,%2 \n"
185 ".align 16 \n"
186 "1: movq (%4,%2,8),%0 \n"
187 " adcq (%5,%2,8),%0 \n"
188 " movq %0,(%3,%2,8) \n"
189 " leaq 1(%2),%2 \n"
190 " loop 1b \n"
191 " sbbq %0,%0 \n"
192 : "=&a"(ret),"+c"(n),"=&r"(i)
193 : "r"(rp),"r"(ap),"r"(bp)
194 : "cc"
195 );
196
197 return ret&1;
198}
199
200#ifndef SIMICS
201BN_ULONG bn_sub_words (BN_ULONG *rp, BN_ULONG *ap, BN_ULONG *bp,int n)
202{ BN_ULONG ret=0,i=0;
203
204 if (n <= 0) return 0;
205
206 asm (
207 " subq %2,%2 \n"
208 ".align 16 \n"
209 "1: movq (%4,%2,8),%0 \n"
210 " sbbq (%5,%2,8),%0 \n"
211 " movq %0,(%3,%2,8) \n"
212 " leaq 1(%2),%2 \n"
213 " loop 1b \n"
214 " sbbq %0,%0 \n"
215 : "=&a"(ret),"+c"(n),"=&r"(i)
216 : "r"(rp),"r"(ap),"r"(bp)
217 : "cc"
218 );
219
220 return ret&1;
221}
222#else
223/* Simics 1.4<7 has buggy sbbq:-( */
224#define BN_MASK2 0xffffffffffffffffL
225BN_ULONG bn_sub_words(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n)
226 {
227 BN_ULONG t1,t2;
228 int c=0;
229
230 if (n <= 0) return((BN_ULONG)0);
231
232 for (;;)
233 {
234 t1=a[0]; t2=b[0];
235 r[0]=(t1-t2-c)&BN_MASK2;
236 if (t1 != t2) c=(t1 < t2);
237 if (--n <= 0) break;
238
239 t1=a[1]; t2=b[1];
240 r[1]=(t1-t2-c)&BN_MASK2;
241 if (t1 != t2) c=(t1 < t2);
242 if (--n <= 0) break;
243
244 t1=a[2]; t2=b[2];
245 r[2]=(t1-t2-c)&BN_MASK2;
246 if (t1 != t2) c=(t1 < t2);
247 if (--n <= 0) break;
248
249 t1=a[3]; t2=b[3];
250 r[3]=(t1-t2-c)&BN_MASK2;
251 if (t1 != t2) c=(t1 < t2);
252 if (--n <= 0) break;
253
254 a+=4;
255 b+=4;
256 r+=4;
257 }
258 return(c);
259 }
260#endif
261
262/* mul_add_c(a,b,c0,c1,c2) -- c+=a*b for three word number c=(c2,c1,c0) */
263/* mul_add_c2(a,b,c0,c1,c2) -- c+=2*a*b for three word number c=(c2,c1,c0) */
264/* sqr_add_c(a,i,c0,c1,c2) -- c+=a[i]^2 for three word number c=(c2,c1,c0) */
265/* sqr_add_c2(a,i,c0,c1,c2) -- c+=2*a[i]*a[j] for three word number c=(c2,c1,c0) */
266
267#if 0
268/* original macros are kept for reference purposes */
269#define mul_add_c(a,b,c0,c1,c2) { \
270 BN_ULONG ta=(a),tb=(b); \
271 t1 = ta * tb; \
272 t2 = BN_UMULT_HIGH(ta,tb); \
273 c0 += t1; t2 += (c0<t1)?1:0; \
274 c1 += t2; c2 += (c1<t2)?1:0; \
275 }
276
277#define mul_add_c2(a,b,c0,c1,c2) { \
278 BN_ULONG ta=(a),tb=(b),t0; \
279 t1 = BN_UMULT_HIGH(ta,tb); \
280 t0 = ta * tb; \
281 t2 = t1+t1; c2 += (t2<t1)?1:0; \
282 t1 = t0+t0; t2 += (t1<t0)?1:0; \
283 c0 += t1; t2 += (c0<t1)?1:0; \
284 c1 += t2; c2 += (c1<t2)?1:0; \
285 }
286#else
287#define mul_add_c(a,b,c0,c1,c2) do { \
288 asm ("mulq %3" \
289 : "=a"(t1),"=d"(t2) \
290 : "a"(a),"m"(b) \
291 : "cc"); \
292 asm ("addq %2,%0; adcq %3,%1" \
293 : "+r"(c0),"+d"(t2) \
294 : "a"(t1),"g"(0) \
295 : "cc"); \
296 asm ("addq %2,%0; adcq %3,%1" \
297 : "+r"(c1),"+r"(c2) \
298 : "d"(t2),"g"(0) \
299 : "cc"); \
300 } while (0)
301
302#define sqr_add_c(a,i,c0,c1,c2) do { \
303 asm ("mulq %2" \
304 : "=a"(t1),"=d"(t2) \
305 : "a"(a[i]) \
306 : "cc"); \
307 asm ("addq %2,%0; adcq %3,%1" \
308 : "+r"(c0),"+d"(t2) \
309 : "a"(t1),"g"(0) \
310 : "cc"); \
311 asm ("addq %2,%0; adcq %3,%1" \
312 : "+r"(c1),"+r"(c2) \
313 : "d"(t2),"g"(0) \
314 : "cc"); \
315 } while (0)
316
317#define mul_add_c2(a,b,c0,c1,c2) do { \
318 asm ("mulq %3" \
319 : "=a"(t1),"=d"(t2) \
320 : "a"(a),"m"(b) \
321 : "cc"); \
322 asm ("addq %0,%0; adcq %2,%1" \
323 : "+d"(t2),"+r"(c2) \
324 : "g"(0) \
325 : "cc"); \
326 asm ("addq %0,%0; adcq %2,%1" \
327 : "+a"(t1),"+d"(t2) \
328 : "g"(0) \
329 : "cc"); \
330 asm ("addq %2,%0; adcq %3,%1" \
331 : "+r"(c0),"+d"(t2) \
332 : "a"(t1),"g"(0) \
333 : "cc"); \
334 asm ("addq %2,%0; adcq %3,%1" \
335 : "+r"(c1),"+r"(c2) \
336 : "d"(t2),"g"(0) \
337 : "cc"); \
338 } while (0)
339#endif
340
341#define sqr_add_c2(a,i,j,c0,c1,c2) \
342 mul_add_c2((a)[i],(a)[j],c0,c1,c2)
343
344void bn_mul_comba8(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
345 {
346 BN_ULONG t1,t2;
347 BN_ULONG c1,c2,c3;
348
349 c1=0;
350 c2=0;
351 c3=0;
352 mul_add_c(a[0],b[0],c1,c2,c3);
353 r[0]=c1;
354 c1=0;
355 mul_add_c(a[0],b[1],c2,c3,c1);
356 mul_add_c(a[1],b[0],c2,c3,c1);
357 r[1]=c2;
358 c2=0;
359 mul_add_c(a[2],b[0],c3,c1,c2);
360 mul_add_c(a[1],b[1],c3,c1,c2);
361 mul_add_c(a[0],b[2],c3,c1,c2);
362 r[2]=c3;
363 c3=0;
364 mul_add_c(a[0],b[3],c1,c2,c3);
365 mul_add_c(a[1],b[2],c1,c2,c3);
366 mul_add_c(a[2],b[1],c1,c2,c3);
367 mul_add_c(a[3],b[0],c1,c2,c3);
368 r[3]=c1;
369 c1=0;
370 mul_add_c(a[4],b[0],c2,c3,c1);
371 mul_add_c(a[3],b[1],c2,c3,c1);
372 mul_add_c(a[2],b[2],c2,c3,c1);
373 mul_add_c(a[1],b[3],c2,c3,c1);
374 mul_add_c(a[0],b[4],c2,c3,c1);
375 r[4]=c2;
376 c2=0;
377 mul_add_c(a[0],b[5],c3,c1,c2);
378 mul_add_c(a[1],b[4],c3,c1,c2);
379 mul_add_c(a[2],b[3],c3,c1,c2);
380 mul_add_c(a[3],b[2],c3,c1,c2);
381 mul_add_c(a[4],b[1],c3,c1,c2);
382 mul_add_c(a[5],b[0],c3,c1,c2);
383 r[5]=c3;
384 c3=0;
385 mul_add_c(a[6],b[0],c1,c2,c3);
386 mul_add_c(a[5],b[1],c1,c2,c3);
387 mul_add_c(a[4],b[2],c1,c2,c3);
388 mul_add_c(a[3],b[3],c1,c2,c3);
389 mul_add_c(a[2],b[4],c1,c2,c3);
390 mul_add_c(a[1],b[5],c1,c2,c3);
391 mul_add_c(a[0],b[6],c1,c2,c3);
392 r[6]=c1;
393 c1=0;
394 mul_add_c(a[0],b[7],c2,c3,c1);
395 mul_add_c(a[1],b[6],c2,c3,c1);
396 mul_add_c(a[2],b[5],c2,c3,c1);
397 mul_add_c(a[3],b[4],c2,c3,c1);
398 mul_add_c(a[4],b[3],c2,c3,c1);
399 mul_add_c(a[5],b[2],c2,c3,c1);
400 mul_add_c(a[6],b[1],c2,c3,c1);
401 mul_add_c(a[7],b[0],c2,c3,c1);
402 r[7]=c2;
403 c2=0;
404 mul_add_c(a[7],b[1],c3,c1,c2);
405 mul_add_c(a[6],b[2],c3,c1,c2);
406 mul_add_c(a[5],b[3],c3,c1,c2);
407 mul_add_c(a[4],b[4],c3,c1,c2);
408 mul_add_c(a[3],b[5],c3,c1,c2);
409 mul_add_c(a[2],b[6],c3,c1,c2);
410 mul_add_c(a[1],b[7],c3,c1,c2);
411 r[8]=c3;
412 c3=0;
413 mul_add_c(a[2],b[7],c1,c2,c3);
414 mul_add_c(a[3],b[6],c1,c2,c3);
415 mul_add_c(a[4],b[5],c1,c2,c3);
416 mul_add_c(a[5],b[4],c1,c2,c3);
417 mul_add_c(a[6],b[3],c1,c2,c3);
418 mul_add_c(a[7],b[2],c1,c2,c3);
419 r[9]=c1;
420 c1=0;
421 mul_add_c(a[7],b[3],c2,c3,c1);
422 mul_add_c(a[6],b[4],c2,c3,c1);
423 mul_add_c(a[5],b[5],c2,c3,c1);
424 mul_add_c(a[4],b[6],c2,c3,c1);
425 mul_add_c(a[3],b[7],c2,c3,c1);
426 r[10]=c2;
427 c2=0;
428 mul_add_c(a[4],b[7],c3,c1,c2);
429 mul_add_c(a[5],b[6],c3,c1,c2);
430 mul_add_c(a[6],b[5],c3,c1,c2);
431 mul_add_c(a[7],b[4],c3,c1,c2);
432 r[11]=c3;
433 c3=0;
434 mul_add_c(a[7],b[5],c1,c2,c3);
435 mul_add_c(a[6],b[6],c1,c2,c3);
436 mul_add_c(a[5],b[7],c1,c2,c3);
437 r[12]=c1;
438 c1=0;
439 mul_add_c(a[6],b[7],c2,c3,c1);
440 mul_add_c(a[7],b[6],c2,c3,c1);
441 r[13]=c2;
442 c2=0;
443 mul_add_c(a[7],b[7],c3,c1,c2);
444 r[14]=c3;
445 r[15]=c1;
446 }
447
448void bn_mul_comba4(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
449 {
450 BN_ULONG t1,t2;
451 BN_ULONG c1,c2,c3;
452
453 c1=0;
454 c2=0;
455 c3=0;
456 mul_add_c(a[0],b[0],c1,c2,c3);
457 r[0]=c1;
458 c1=0;
459 mul_add_c(a[0],b[1],c2,c3,c1);
460 mul_add_c(a[1],b[0],c2,c3,c1);
461 r[1]=c2;
462 c2=0;
463 mul_add_c(a[2],b[0],c3,c1,c2);
464 mul_add_c(a[1],b[1],c3,c1,c2);
465 mul_add_c(a[0],b[2],c3,c1,c2);
466 r[2]=c3;
467 c3=0;
468 mul_add_c(a[0],b[3],c1,c2,c3);
469 mul_add_c(a[1],b[2],c1,c2,c3);
470 mul_add_c(a[2],b[1],c1,c2,c3);
471 mul_add_c(a[3],b[0],c1,c2,c3);
472 r[3]=c1;
473 c1=0;
474 mul_add_c(a[3],b[1],c2,c3,c1);
475 mul_add_c(a[2],b[2],c2,c3,c1);
476 mul_add_c(a[1],b[3],c2,c3,c1);
477 r[4]=c2;
478 c2=0;
479 mul_add_c(a[2],b[3],c3,c1,c2);
480 mul_add_c(a[3],b[2],c3,c1,c2);
481 r[5]=c3;
482 c3=0;
483 mul_add_c(a[3],b[3],c1,c2,c3);
484 r[6]=c1;
485 r[7]=c2;
486 }
487
488void bn_sqr_comba8(BN_ULONG *r, BN_ULONG *a)
489 {
490 BN_ULONG t1,t2;
491 BN_ULONG c1,c2,c3;
492
493 c1=0;
494 c2=0;
495 c3=0;
496 sqr_add_c(a,0,c1,c2,c3);
497 r[0]=c1;
498 c1=0;
499 sqr_add_c2(a,1,0,c2,c3,c1);
500 r[1]=c2;
501 c2=0;
502 sqr_add_c(a,1,c3,c1,c2);
503 sqr_add_c2(a,2,0,c3,c1,c2);
504 r[2]=c3;
505 c3=0;
506 sqr_add_c2(a,3,0,c1,c2,c3);
507 sqr_add_c2(a,2,1,c1,c2,c3);
508 r[3]=c1;
509 c1=0;
510 sqr_add_c(a,2,c2,c3,c1);
511 sqr_add_c2(a,3,1,c2,c3,c1);
512 sqr_add_c2(a,4,0,c2,c3,c1);
513 r[4]=c2;
514 c2=0;
515 sqr_add_c2(a,5,0,c3,c1,c2);
516 sqr_add_c2(a,4,1,c3,c1,c2);
517 sqr_add_c2(a,3,2,c3,c1,c2);
518 r[5]=c3;
519 c3=0;
520 sqr_add_c(a,3,c1,c2,c3);
521 sqr_add_c2(a,4,2,c1,c2,c3);
522 sqr_add_c2(a,5,1,c1,c2,c3);
523 sqr_add_c2(a,6,0,c1,c2,c3);
524 r[6]=c1;
525 c1=0;
526 sqr_add_c2(a,7,0,c2,c3,c1);
527 sqr_add_c2(a,6,1,c2,c3,c1);
528 sqr_add_c2(a,5,2,c2,c3,c1);
529 sqr_add_c2(a,4,3,c2,c3,c1);
530 r[7]=c2;
531 c2=0;
532 sqr_add_c(a,4,c3,c1,c2);
533 sqr_add_c2(a,5,3,c3,c1,c2);
534 sqr_add_c2(a,6,2,c3,c1,c2);
535 sqr_add_c2(a,7,1,c3,c1,c2);
536 r[8]=c3;
537 c3=0;
538 sqr_add_c2(a,7,2,c1,c2,c3);
539 sqr_add_c2(a,6,3,c1,c2,c3);
540 sqr_add_c2(a,5,4,c1,c2,c3);
541 r[9]=c1;
542 c1=0;
543 sqr_add_c(a,5,c2,c3,c1);
544 sqr_add_c2(a,6,4,c2,c3,c1);
545 sqr_add_c2(a,7,3,c2,c3,c1);
546 r[10]=c2;
547 c2=0;
548 sqr_add_c2(a,7,4,c3,c1,c2);
549 sqr_add_c2(a,6,5,c3,c1,c2);
550 r[11]=c3;
551 c3=0;
552 sqr_add_c(a,6,c1,c2,c3);
553 sqr_add_c2(a,7,5,c1,c2,c3);
554 r[12]=c1;
555 c1=0;
556 sqr_add_c2(a,7,6,c2,c3,c1);
557 r[13]=c2;
558 c2=0;
559 sqr_add_c(a,7,c3,c1,c2);
560 r[14]=c3;
561 r[15]=c1;
562 }
563
564void bn_sqr_comba4(BN_ULONG *r, BN_ULONG *a)
565 {
566 BN_ULONG t1,t2;
567 BN_ULONG c1,c2,c3;
568
569 c1=0;
570 c2=0;
571 c3=0;
572 sqr_add_c(a,0,c1,c2,c3);
573 r[0]=c1;
574 c1=0;
575 sqr_add_c2(a,1,0,c2,c3,c1);
576 r[1]=c2;
577 c2=0;
578 sqr_add_c(a,1,c3,c1,c2);
579 sqr_add_c2(a,2,0,c3,c1,c2);
580 r[2]=c3;
581 c3=0;
582 sqr_add_c2(a,3,0,c1,c2,c3);
583 sqr_add_c2(a,2,1,c1,c2,c3);
584 r[3]=c1;
585 c1=0;
586 sqr_add_c(a,2,c2,c3,c1);
587 sqr_add_c2(a,3,1,c2,c3,c1);
588 r[4]=c2;
589 c2=0;
590 sqr_add_c2(a,3,2,c3,c1,c2);
591 r[5]=c3;
592 c3=0;
593 sqr_add_c(a,3,c1,c2,c3);
594 r[6]=c1;
595 r[7]=c2;
596 }
597#endif
diff --git a/src/lib/libcrypto/bn/asm/x86_64-mont.pl b/src/lib/libcrypto/bn/asm/x86_64-mont.pl
deleted file mode 100755
index c43b69592a..0000000000
--- a/src/lib/libcrypto/bn/asm/x86_64-mont.pl
+++ /dev/null
@@ -1,214 +0,0 @@
1#!/usr/bin/env perl
2
3# ====================================================================
4# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
5# project. The module is, however, dual licensed under OpenSSL and
6# CRYPTOGAMS licenses depending on where you obtain it. For further
7# details see http://www.openssl.org/~appro/cryptogams/.
8# ====================================================================
9
10# October 2005.
11#
12# Montgomery multiplication routine for x86_64. While it gives modest
13# 9% improvement of rsa4096 sign on Opteron, rsa512 sign runs more
14# than twice, >2x, as fast. Most common rsa1024 sign is improved by
15# respectful 50%. It remains to be seen if loop unrolling and
16# dedicated squaring routine can provide further improvement...
17
18$output=shift;
19
20$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
21( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
22( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
23die "can't locate x86_64-xlate.pl";
24
25open STDOUT,"| $^X $xlate $output";
26
27# int bn_mul_mont(
28$rp="%rdi"; # BN_ULONG *rp,
29$ap="%rsi"; # const BN_ULONG *ap,
30$bp="%rdx"; # const BN_ULONG *bp,
31$np="%rcx"; # const BN_ULONG *np,
32$n0="%r8"; # const BN_ULONG *n0,
33$num="%r9"; # int num);
34$lo0="%r10";
35$hi0="%r11";
36$bp="%r12"; # reassign $bp
37$hi1="%r13";
38$i="%r14";
39$j="%r15";
40$m0="%rbx";
41$m1="%rbp";
42
43$code=<<___;
44.text
45
46.globl bn_mul_mont
47.type bn_mul_mont,\@function,6
48.align 16
49bn_mul_mont:
50 push %rbx
51 push %rbp
52 push %r12
53 push %r13
54 push %r14
55 push %r15
56
57 mov ${num}d,${num}d
58 lea 2($num),%rax
59 mov %rsp,%rbp
60 neg %rax
61 lea (%rsp,%rax,8),%rsp # tp=alloca(8*(num+2))
62 and \$-1024,%rsp # minimize TLB usage
63
64 mov %rbp,8(%rsp,$num,8) # tp[num+1]=%rsp
65 mov %rdx,$bp # $bp reassigned, remember?
66
67 mov ($n0),$n0 # pull n0[0] value
68
69 xor $i,$i # i=0
70 xor $j,$j # j=0
71
72 mov ($bp),$m0 # m0=bp[0]
73 mov ($ap),%rax
74 mulq $m0 # ap[0]*bp[0]
75 mov %rax,$lo0
76 mov %rdx,$hi0
77
78 imulq $n0,%rax # "tp[0]"*n0
79 mov %rax,$m1
80
81 mulq ($np) # np[0]*m1
82 add $lo0,%rax # discarded
83 adc \$0,%rdx
84 mov %rdx,$hi1
85
86 lea 1($j),$j # j++
87.L1st:
88 mov ($ap,$j,8),%rax
89 mulq $m0 # ap[j]*bp[0]
90 add $hi0,%rax
91 adc \$0,%rdx
92 mov %rax,$lo0
93 mov ($np,$j,8),%rax
94 mov %rdx,$hi0
95
96 mulq $m1 # np[j]*m1
97 add $hi1,%rax
98 lea 1($j),$j # j++
99 adc \$0,%rdx
100 add $lo0,%rax # np[j]*m1+ap[j]*bp[0]
101 adc \$0,%rdx
102 mov %rax,-16(%rsp,$j,8) # tp[j-1]
103 cmp $num,$j
104 mov %rdx,$hi1
105 jl .L1st
106
107 xor %rdx,%rdx
108 add $hi0,$hi1
109 adc \$0,%rdx
110 mov $hi1,-8(%rsp,$num,8)
111 mov %rdx,(%rsp,$num,8) # store upmost overflow bit
112
113 lea 1($i),$i # i++
114.align 4
115.Louter:
116 xor $j,$j # j=0
117
118 mov ($bp,$i,8),$m0 # m0=bp[i]
119 mov ($ap),%rax # ap[0]
120 mulq $m0 # ap[0]*bp[i]
121 add (%rsp),%rax # ap[0]*bp[i]+tp[0]
122 adc \$0,%rdx
123 mov %rax,$lo0
124 mov %rdx,$hi0
125
126 imulq $n0,%rax # tp[0]*n0
127 mov %rax,$m1
128
129 mulq ($np,$j,8) # np[0]*m1
130 add $lo0,%rax # discarded
131 mov 8(%rsp),$lo0 # tp[1]
132 adc \$0,%rdx
133 mov %rdx,$hi1
134
135 lea 1($j),$j # j++
136.align 4
137.Linner:
138 mov ($ap,$j,8),%rax
139 mulq $m0 # ap[j]*bp[i]
140 add $hi0,%rax
141 adc \$0,%rdx
142 add %rax,$lo0 # ap[j]*bp[i]+tp[j]
143 mov ($np,$j,8),%rax
144 adc \$0,%rdx
145 mov %rdx,$hi0
146
147 mulq $m1 # np[j]*m1
148 add $hi1,%rax
149 lea 1($j),$j # j++
150 adc \$0,%rdx
151 add $lo0,%rax # np[j]*m1+ap[j]*bp[i]+tp[j]
152 adc \$0,%rdx
153 mov (%rsp,$j,8),$lo0
154 cmp $num,$j
155 mov %rax,-16(%rsp,$j,8) # tp[j-1]
156 mov %rdx,$hi1
157 jl .Linner
158
159 xor %rdx,%rdx
160 add $hi0,$hi1
161 adc \$0,%rdx
162 add $lo0,$hi1 # pull upmost overflow bit
163 adc \$0,%rdx
164 mov $hi1,-8(%rsp,$num,8)
165 mov %rdx,(%rsp,$num,8) # store upmost overflow bit
166
167 lea 1($i),$i # i++
168 cmp $num,$i
169 jl .Louter
170
171 lea (%rsp),$ap # borrow ap for tp
172 lea -1($num),$j # j=num-1
173
174 mov ($ap),%rax # tp[0]
175 xor $i,$i # i=0 and clear CF!
176 jmp .Lsub
177.align 16
178.Lsub: sbb ($np,$i,8),%rax
179 mov %rax,($rp,$i,8) # rp[i]=tp[i]-np[i]
180 dec $j # doesn't affect CF!
181 mov 8($ap,$i,8),%rax # tp[i+1]
182 lea 1($i),$i # i++
183 jge .Lsub
184
185 sbb \$0,%rax # handle upmost overflow bit
186 and %rax,$ap
187 not %rax
188 mov $rp,$np
189 and %rax,$np
190 lea -1($num),$j
191 or $np,$ap # ap=borrow?tp:rp
192.align 16
193.Lcopy: # copy or in-place refresh
194 mov ($ap,$j,8),%rax
195 mov %rax,($rp,$j,8) # rp[i]=tp[i]
196 mov $i,(%rsp,$j,8) # zap temporary vector
197 dec $j
198 jge .Lcopy
199
200 mov 8(%rsp,$num,8),%rsp # restore %rsp
201 mov \$1,%rax
202 pop %r15
203 pop %r14
204 pop %r13
205 pop %r12
206 pop %rbp
207 pop %rbx
208 ret
209.size bn_mul_mont,.-bn_mul_mont
210.asciz "Montgomery Multiplication for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
211___
212
213print $code;
214close STDOUT;