summaryrefslogtreecommitdiff
path: root/src/lib/libcrypto/bn/asm/ppc.pl
diff options
context:
space:
mode:
authorcvs2svn <admin@example.com>2025-04-14 17:32:06 +0000
committercvs2svn <admin@example.com>2025-04-14 17:32:06 +0000
commitb1ddde874c215cc8891531ed92876f091b7eb83e (patch)
treeedb6da6af7e865d488dc1a29309f1e1ec226e603 /src/lib/libcrypto/bn/asm/ppc.pl
parentf0a36529837a161734c802ae4c42e84e42347be2 (diff)
downloadopenbsd-tb_20250414.tar.gz
openbsd-tb_20250414.tar.bz2
openbsd-tb_20250414.zip
This commit was manufactured by cvs2git to create tag 'tb_20250414'.tb_20250414
Diffstat (limited to 'src/lib/libcrypto/bn/asm/ppc.pl')
-rw-r--r--src/lib/libcrypto/bn/asm/ppc.pl1968
1 files changed, 0 insertions, 1968 deletions
diff --git a/src/lib/libcrypto/bn/asm/ppc.pl b/src/lib/libcrypto/bn/asm/ppc.pl
deleted file mode 100644
index c9b7f9477d..0000000000
--- a/src/lib/libcrypto/bn/asm/ppc.pl
+++ /dev/null
@@ -1,1968 +0,0 @@
1#!/usr/bin/env perl
2#
3# Implemented as a Perl wrapper as we want to support several different
4# architectures with single file. We pick up the target based on the
5# file name we are asked to generate.
6#
7# It should be noted though that this perl code is nothing like
8# <openssl>/crypto/perlasm/x86*. In this case perl is used pretty much
9# as pre-processor to cover for platform differences in name decoration,
10# linker tables, 32-/64-bit instruction sets...
11#
12# As you might know there're several PowerPC ABI in use. Most notably
13# Linux and AIX use different 32-bit ABIs. Good news are that these ABIs
14# are similar enough to implement leaf(!) functions, which would be ABI
15# neutral. And that's what you find here: ABI neutral leaf functions.
16# In case you wonder what that is...
17#
18# AIX performance
19#
20# MEASUREMENTS WITH cc ON a 200 MhZ PowerPC 604e.
21#
22# The following is the performance of 32-bit compiler
23# generated code:
24#
25# OpenSSL 0.9.6c 21 dec 2001
26# built on: Tue Jun 11 11:06:51 EDT 2002
27# options:bn(64,32) ...
28#compiler: cc -DTHREADS -DAIX -DB_ENDIAN -DBN_LLONG -O3
29# sign verify sign/s verify/s
30#rsa 512 bits 0.0098s 0.0009s 102.0 1170.6
31#rsa 1024 bits 0.0507s 0.0026s 19.7 387.5
32#rsa 2048 bits 0.3036s 0.0085s 3.3 117.1
33#rsa 4096 bits 2.0040s 0.0299s 0.5 33.4
34#dsa 512 bits 0.0087s 0.0106s 114.3 94.5
35#dsa 1024 bits 0.0256s 0.0313s 39.0 32.0
36#
37# Same benchmark with this assembler code:
38#
39#rsa 512 bits 0.0056s 0.0005s 178.6 2049.2
40#rsa 1024 bits 0.0283s 0.0015s 35.3 674.1
41#rsa 2048 bits 0.1744s 0.0050s 5.7 201.2
42#rsa 4096 bits 1.1644s 0.0179s 0.9 55.7
43#dsa 512 bits 0.0052s 0.0062s 191.6 162.0
44#dsa 1024 bits 0.0149s 0.0180s 67.0 55.5
45#
46# Number of operations increases by at almost 75%
47#
48# Here are performance numbers for 64-bit compiler
49# generated code:
50#
51# OpenSSL 0.9.6g [engine] 9 Aug 2002
52# built on: Fri Apr 18 16:59:20 EDT 2003
53# options:bn(64,64) ...
54# compiler: cc -DTHREADS -D_REENTRANT -q64 -DB_ENDIAN -O3
55# sign verify sign/s verify/s
56#rsa 512 bits 0.0028s 0.0003s 357.1 3844.4
57#rsa 1024 bits 0.0148s 0.0008s 67.5 1239.7
58#rsa 2048 bits 0.0963s 0.0028s 10.4 353.0
59#rsa 4096 bits 0.6538s 0.0102s 1.5 98.1
60#dsa 512 bits 0.0026s 0.0032s 382.5 313.7
61#dsa 1024 bits 0.0081s 0.0099s 122.8 100.6
62#
63# Same benchmark with this assembler code:
64#
65#rsa 512 bits 0.0020s 0.0002s 510.4 6273.7
66#rsa 1024 bits 0.0088s 0.0005s 114.1 2128.3
67#rsa 2048 bits 0.0540s 0.0016s 18.5 622.5
68#rsa 4096 bits 0.3700s 0.0058s 2.7 171.0
69#dsa 512 bits 0.0016s 0.0020s 610.7 507.1
70#dsa 1024 bits 0.0047s 0.0058s 212.5 173.2
71#
72# Again, performance increases by at about 75%
73#
74# Mac OS X, Apple G5 1.8GHz (Note this is 32 bit code)
75# OpenSSL 0.9.7c 30 Sep 2003
76#
77# Original code.
78#
79#rsa 512 bits 0.0011s 0.0001s 906.1 11012.5
80#rsa 1024 bits 0.0060s 0.0003s 166.6 3363.1
81#rsa 2048 bits 0.0370s 0.0010s 27.1 982.4
82#rsa 4096 bits 0.2426s 0.0036s 4.1 280.4
83#dsa 512 bits 0.0010s 0.0012s 1038.1 841.5
84#dsa 1024 bits 0.0030s 0.0037s 329.6 269.7
85#dsa 2048 bits 0.0101s 0.0127s 98.9 78.6
86#
87# Same benchmark with this assembler code:
88#
89#rsa 512 bits 0.0007s 0.0001s 1416.2 16645.9
90#rsa 1024 bits 0.0036s 0.0002s 274.4 5380.6
91#rsa 2048 bits 0.0222s 0.0006s 45.1 1589.5
92#rsa 4096 bits 0.1469s 0.0022s 6.8 449.6
93#dsa 512 bits 0.0006s 0.0007s 1664.2 1376.2
94#dsa 1024 bits 0.0018s 0.0023s 545.0 442.2
95#dsa 2048 bits 0.0061s 0.0075s 163.5 132.8
96#
97# Performance increase of ~60%
98#
99# If you have comments or suggestions to improve code send
100# me a note at schari@us.ibm.com
101#
102
103$flavour = shift;
104
105if ($flavour =~ /32/) {
106 $BITS= 32;
107 $BNSZ= $BITS/8;
108 $ISA= "\"ppc\"";
109
110 $LD= "lwz"; # load
111 $LDU= "lwzu"; # load and update
112 $ST= "stw"; # store
113 $STU= "stwu"; # store and update
114 $UMULL= "mullw"; # unsigned multiply low
115 $UMULH= "mulhwu"; # unsigned multiply high
116 $UDIV= "divwu"; # unsigned divide
117 $UCMPI= "cmplwi"; # unsigned compare with immediate
118 $UCMP= "cmplw"; # unsigned compare
119 $CNTLZ= "cntlzw"; # count leading zeros
120 $SHL= "slw"; # shift left
121 $SHR= "srw"; # unsigned shift right
122 $SHRI= "srwi"; # unsigned shift right by immediate
123 $SHLI= "slwi"; # shift left by immediate
124 $CLRU= "clrlwi"; # clear upper bits
125 $INSR= "insrwi"; # insert right
126 $ROTL= "rotlwi"; # rotate left by immediate
127 $TR= "tw"; # conditional trap
128} elsif ($flavour =~ /64/) {
129 $BITS= 64;
130 $BNSZ= $BITS/8;
131 $ISA= "\"ppc64\"";
132
133 # same as above, but 64-bit mnemonics...
134 $LD= "ld"; # load
135 $LDU= "ldu"; # load and update
136 $ST= "std"; # store
137 $STU= "stdu"; # store and update
138 $UMULL= "mulld"; # unsigned multiply low
139 $UMULH= "mulhdu"; # unsigned multiply high
140 $UDIV= "divdu"; # unsigned divide
141 $UCMPI= "cmpldi"; # unsigned compare with immediate
142 $UCMP= "cmpld"; # unsigned compare
143 $CNTLZ= "cntlzd"; # count leading zeros
144 $SHL= "sld"; # shift left
145 $SHR= "srd"; # unsigned shift right
146 $SHRI= "srdi"; # unsigned shift right by immediate
147 $SHLI= "sldi"; # shift left by immediate
148 $CLRU= "clrldi"; # clear upper bits
149 $INSR= "insrdi"; # insert right
150 $ROTL= "rotldi"; # rotate left by immediate
151 $TR= "td"; # conditional trap
152} else { die "nonsense $flavour"; }
153
154$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
155( $xlate="${dir}ppc-xlate.pl" and -f $xlate ) or
156( $xlate="${dir}../../perlasm/ppc-xlate.pl" and -f $xlate) or
157die "can't locate ppc-xlate.pl";
158
159open STDOUT,"| $^X $xlate $flavour ".shift || die "can't call $xlate: $!";
160
161$data=<<EOF;
162#--------------------------------------------------------------------
163#
164#
165#
166#
167# File: ppc32.s
168#
169# Created by: Suresh Chari
170# IBM Thomas J. Watson Research Library
171# Hawthorne, NY
172#
173#
174# Description: Optimized assembly routines for OpenSSL crypto
175# on the 32 bitPowerPC platform.
176#
177#
178# Version History
179#
180# 2. Fixed bn_add,bn_sub and bn_div_words, added comments,
181# cleaned up code. Also made a single version which can
182# be used for both the AIX and Linux compilers. See NOTE
183# below.
184# 12/05/03 Suresh Chari
185# (with lots of help from) Andy Polyakov
186##
187# 1. Initial version 10/20/02 Suresh Chari
188#
189#
190# The following file works for the xlc,cc
191# and gcc compilers.
192#
193# NOTE: To get the file to link correctly with the gcc compiler
194# you have to change the names of the routines and remove
195# the first .(dot) character. This should automatically
196# be done in the build process.
197#
198# Hand optimized assembly code for the following routines
199#
200# bn_sqr_comba4
201# bn_sqr_comba8
202# bn_mul_comba4
203# bn_mul_comba8
204# bn_sub_words
205# bn_add_words
206# bn_div_words
207# bn_sqr_words
208# bn_mul_words
209# bn_mul_add_words
210#
211# NOTE: It is possible to optimize this code more for
212# specific PowerPC or Power architectures. On the Northstar
213# architecture the optimizations in this file do
214# NOT provide much improvement.
215#
216# If you have comments or suggestions to improve code send
217# me a note at schari\@us.ibm.com
218#
219#--------------------------------------------------------------------------
220#
221# Defines to be used in the assembly code.
222#
223#.set r0,0 # we use it as storage for value of 0
224#.set SP,1 # preserved
225#.set RTOC,2 # preserved
226#.set r3,3 # 1st argument/return value
227#.set r4,4 # 2nd argument/volatile register
228#.set r5,5 # 3rd argument/volatile register
229#.set r6,6 # ...
230#.set r7,7
231#.set r8,8
232#.set r9,9
233#.set r10,10
234#.set r11,11
235#.set r12,12
236#.set r13,13 # not used, nor any other "below" it...
237
238# Declare function names to be global
239# NOTE: For gcc these names MUST be changed to remove
240# the first . i.e. for example change ".bn_sqr_comba4"
241# to "bn_sqr_comba4". This should be automatically done
242# in the build.
243
244 .globl .bn_sqr_comba4
245 .globl .bn_sqr_comba8
246 .globl .bn_mul_comba4
247 .globl .bn_mul_comba8
248 .globl .bn_sub_words
249 .globl .bn_add_words
250 .globl .bn_div_words
251 .globl .bn_sqr_words
252 .globl .bn_mul_words
253 .globl .bn_mul_add_words
254
255# .text section
256
257 .machine "any"
258
259#
260# NOTE: The following label name should be changed to
261# "bn_sqr_comba4" i.e. remove the first dot
262# for the gcc compiler. This should be automatically
263# done in the build
264#
265
266.align 4
267.bn_sqr_comba4:
268#
269# Optimized version of bn_sqr_comba4.
270#
271# void bn_sqr_comba4(BN_ULONG *r, BN_ULONG *a)
272# r3 contains r
273# r4 contains a
274#
275# Freely use registers r5,r6,r7,r8,r9,r10,r11 as follows:
276#
277# r5,r6 are the two BN_ULONGs being multiplied.
278# r7,r8 are the results of the 32x32 giving 64 bit multiply.
279# r9,r10, r11 are the equivalents of c1,c2, c3.
280# Here's the assembly
281#
282#
283 xor r0,r0,r0 # set r0 = 0. Used in the addze
284 # instructions below
285
286 #sqr_add_c(a,0,c1,c2,c3)
287 $LD r5,`0*$BNSZ`(r4)
288 $UMULL r9,r5,r5
289 $UMULH r10,r5,r5 #in first iteration. No need
290 #to add since c1=c2=c3=0.
291 # Note c3(r11) is NOT set to 0
292 # but will be.
293
294 $ST r9,`0*$BNSZ`(r3) # r[0]=c1;
295 # sqr_add_c2(a,1,0,c2,c3,c1);
296 $LD r6,`1*$BNSZ`(r4)
297 $UMULL r7,r5,r6
298 $UMULH r8,r5,r6
299
300 addc r7,r7,r7 # compute (r7,r8)=2*(r7,r8)
301 adde r8,r8,r8
302 addze r9,r0 # catch carry if any.
303 # r9= r0(=0) and carry
304
305 addc r10,r7,r10 # now add to temp result.
306 addze r11,r8 # r8 added to r11 which is 0
307 addze r9,r9
308
309 $ST r10,`1*$BNSZ`(r3) #r[1]=c2;
310 #sqr_add_c(a,1,c3,c1,c2)
311 $UMULL r7,r6,r6
312 $UMULH r8,r6,r6
313 addc r11,r7,r11
314 adde r9,r8,r9
315 addze r10,r0
316 #sqr_add_c2(a,2,0,c3,c1,c2)
317 $LD r6,`2*$BNSZ`(r4)
318 $UMULL r7,r5,r6
319 $UMULH r8,r5,r6
320
321 addc r7,r7,r7
322 adde r8,r8,r8
323 addze r10,r10
324
325 addc r11,r7,r11
326 adde r9,r8,r9
327 addze r10,r10
328 $ST r11,`2*$BNSZ`(r3) #r[2]=c3
329 #sqr_add_c2(a,3,0,c1,c2,c3);
330 $LD r6,`3*$BNSZ`(r4)
331 $UMULL r7,r5,r6
332 $UMULH r8,r5,r6
333 addc r7,r7,r7
334 adde r8,r8,r8
335 addze r11,r0
336
337 addc r9,r7,r9
338 adde r10,r8,r10
339 addze r11,r11
340 #sqr_add_c2(a,2,1,c1,c2,c3);
341 $LD r5,`1*$BNSZ`(r4)
342 $LD r6,`2*$BNSZ`(r4)
343 $UMULL r7,r5,r6
344 $UMULH r8,r5,r6
345
346 addc r7,r7,r7
347 adde r8,r8,r8
348 addze r11,r11
349 addc r9,r7,r9
350 adde r10,r8,r10
351 addze r11,r11
352 $ST r9,`3*$BNSZ`(r3) #r[3]=c1
353 #sqr_add_c(a,2,c2,c3,c1);
354 $UMULL r7,r6,r6
355 $UMULH r8,r6,r6
356 addc r10,r7,r10
357 adde r11,r8,r11
358 addze r9,r0
359 #sqr_add_c2(a,3,1,c2,c3,c1);
360 $LD r6,`3*$BNSZ`(r4)
361 $UMULL r7,r5,r6
362 $UMULH r8,r5,r6
363 addc r7,r7,r7
364 adde r8,r8,r8
365 addze r9,r9
366
367 addc r10,r7,r10
368 adde r11,r8,r11
369 addze r9,r9
370 $ST r10,`4*$BNSZ`(r3) #r[4]=c2
371 #sqr_add_c2(a,3,2,c3,c1,c2);
372 $LD r5,`2*$BNSZ`(r4)
373 $UMULL r7,r5,r6
374 $UMULH r8,r5,r6
375 addc r7,r7,r7
376 adde r8,r8,r8
377 addze r10,r0
378
379 addc r11,r7,r11
380 adde r9,r8,r9
381 addze r10,r10
382 $ST r11,`5*$BNSZ`(r3) #r[5] = c3
383 #sqr_add_c(a,3,c1,c2,c3);
384 $UMULL r7,r6,r6
385 $UMULH r8,r6,r6
386 addc r9,r7,r9
387 adde r10,r8,r10
388
389 $ST r9,`6*$BNSZ`(r3) #r[6]=c1
390 $ST r10,`7*$BNSZ`(r3) #r[7]=c2
391 blr
392
393#
394# NOTE: The following label name should be changed to
395# "bn_sqr_comba8" i.e. remove the first dot
396# for the gcc compiler. This should be automatically
397# done in the build
398#
399
400.align 4
401.bn_sqr_comba8:
402#
403# This is an optimized version of the bn_sqr_comba8 routine.
404# Tightly uses the adde instruction
405#
406#
407# void bn_sqr_comba8(BN_ULONG *r, BN_ULONG *a)
408# r3 contains r
409# r4 contains a
410#
411# Freely use registers r5,r6,r7,r8,r9,r10,r11 as follows:
412#
413# r5,r6 are the two BN_ULONGs being multiplied.
414# r7,r8 are the results of the 32x32 giving 64 bit multiply.
415# r9,r10, r11 are the equivalents of c1,c2, c3.
416#
417# Possible optimization of loading all 8 longs of a into registers
418# doesnt provide any speedup
419#
420
421 xor r0,r0,r0 #set r0 = 0.Used in addze
422 #instructions below.
423
424 #sqr_add_c(a,0,c1,c2,c3);
425 $LD r5,`0*$BNSZ`(r4)
426 $UMULL r9,r5,r5 #1st iteration: no carries.
427 $UMULH r10,r5,r5
428 $ST r9,`0*$BNSZ`(r3) # r[0]=c1;
429 #sqr_add_c2(a,1,0,c2,c3,c1);
430 $LD r6,`1*$BNSZ`(r4)
431 $UMULL r7,r5,r6
432 $UMULH r8,r5,r6
433
434 addc r10,r7,r10 #add the two register number
435 adde r11,r8,r0 # (r8,r7) to the three register
436 addze r9,r0 # number (r9,r11,r10).NOTE:r0=0
437
438 addc r10,r7,r10 #add the two register number
439 adde r11,r8,r11 # (r8,r7) to the three register
440 addze r9,r9 # number (r9,r11,r10).
441
442 $ST r10,`1*$BNSZ`(r3) # r[1]=c2
443
444 #sqr_add_c(a,1,c3,c1,c2);
445 $UMULL r7,r6,r6
446 $UMULH r8,r6,r6
447 addc r11,r7,r11
448 adde r9,r8,r9
449 addze r10,r0
450 #sqr_add_c2(a,2,0,c3,c1,c2);
451 $LD r6,`2*$BNSZ`(r4)
452 $UMULL r7,r5,r6
453 $UMULH r8,r5,r6
454
455 addc r11,r7,r11
456 adde r9,r8,r9
457 addze r10,r10
458
459 addc r11,r7,r11
460 adde r9,r8,r9
461 addze r10,r10
462
463 $ST r11,`2*$BNSZ`(r3) #r[2]=c3
464 #sqr_add_c2(a,3,0,c1,c2,c3);
465 $LD r6,`3*$BNSZ`(r4) #r6 = a[3]. r5 is already a[0].
466 $UMULL r7,r5,r6
467 $UMULH r8,r5,r6
468
469 addc r9,r7,r9
470 adde r10,r8,r10
471 addze r11,r0
472
473 addc r9,r7,r9
474 adde r10,r8,r10
475 addze r11,r11
476 #sqr_add_c2(a,2,1,c1,c2,c3);
477 $LD r5,`1*$BNSZ`(r4)
478 $LD r6,`2*$BNSZ`(r4)
479 $UMULL r7,r5,r6
480 $UMULH r8,r5,r6
481
482 addc r9,r7,r9
483 adde r10,r8,r10
484 addze r11,r11
485
486 addc r9,r7,r9
487 adde r10,r8,r10
488 addze r11,r11
489
490 $ST r9,`3*$BNSZ`(r3) #r[3]=c1;
491 #sqr_add_c(a,2,c2,c3,c1);
492 $UMULL r7,r6,r6
493 $UMULH r8,r6,r6
494
495 addc r10,r7,r10
496 adde r11,r8,r11
497 addze r9,r0
498 #sqr_add_c2(a,3,1,c2,c3,c1);
499 $LD r6,`3*$BNSZ`(r4)
500 $UMULL r7,r5,r6
501 $UMULH r8,r5,r6
502
503 addc r10,r7,r10
504 adde r11,r8,r11
505 addze r9,r9
506
507 addc r10,r7,r10
508 adde r11,r8,r11
509 addze r9,r9
510 #sqr_add_c2(a,4,0,c2,c3,c1);
511 $LD r5,`0*$BNSZ`(r4)
512 $LD r6,`4*$BNSZ`(r4)
513 $UMULL r7,r5,r6
514 $UMULH r8,r5,r6
515
516 addc r10,r7,r10
517 adde r11,r8,r11
518 addze r9,r9
519
520 addc r10,r7,r10
521 adde r11,r8,r11
522 addze r9,r9
523 $ST r10,`4*$BNSZ`(r3) #r[4]=c2;
524 #sqr_add_c2(a,5,0,c3,c1,c2);
525 $LD r6,`5*$BNSZ`(r4)
526 $UMULL r7,r5,r6
527 $UMULH r8,r5,r6
528
529 addc r11,r7,r11
530 adde r9,r8,r9
531 addze r10,r0
532
533 addc r11,r7,r11
534 adde r9,r8,r9
535 addze r10,r10
536 #sqr_add_c2(a,4,1,c3,c1,c2);
537 $LD r5,`1*$BNSZ`(r4)
538 $LD r6,`4*$BNSZ`(r4)
539 $UMULL r7,r5,r6
540 $UMULH r8,r5,r6
541
542 addc r11,r7,r11
543 adde r9,r8,r9
544 addze r10,r10
545
546 addc r11,r7,r11
547 adde r9,r8,r9
548 addze r10,r10
549 #sqr_add_c2(a,3,2,c3,c1,c2);
550 $LD r5,`2*$BNSZ`(r4)
551 $LD r6,`3*$BNSZ`(r4)
552 $UMULL r7,r5,r6
553 $UMULH r8,r5,r6
554
555 addc r11,r7,r11
556 adde r9,r8,r9
557 addze r10,r10
558
559 addc r11,r7,r11
560 adde r9,r8,r9
561 addze r10,r10
562 $ST r11,`5*$BNSZ`(r3) #r[5]=c3;
563 #sqr_add_c(a,3,c1,c2,c3);
564 $UMULL r7,r6,r6
565 $UMULH r8,r6,r6
566 addc r9,r7,r9
567 adde r10,r8,r10
568 addze r11,r0
569 #sqr_add_c2(a,4,2,c1,c2,c3);
570 $LD r6,`4*$BNSZ`(r4)
571 $UMULL r7,r5,r6
572 $UMULH r8,r5,r6
573
574 addc r9,r7,r9
575 adde r10,r8,r10
576 addze r11,r11
577
578 addc r9,r7,r9
579 adde r10,r8,r10
580 addze r11,r11
581 #sqr_add_c2(a,5,1,c1,c2,c3);
582 $LD r5,`1*$BNSZ`(r4)
583 $LD r6,`5*$BNSZ`(r4)
584 $UMULL r7,r5,r6
585 $UMULH r8,r5,r6
586
587 addc r9,r7,r9
588 adde r10,r8,r10
589 addze r11,r11
590
591 addc r9,r7,r9
592 adde r10,r8,r10
593 addze r11,r11
594 #sqr_add_c2(a,6,0,c1,c2,c3);
595 $LD r5,`0*$BNSZ`(r4)
596 $LD r6,`6*$BNSZ`(r4)
597 $UMULL r7,r5,r6
598 $UMULH r8,r5,r6
599 addc r9,r7,r9
600 adde r10,r8,r10
601 addze r11,r11
602 addc r9,r7,r9
603 adde r10,r8,r10
604 addze r11,r11
605 $ST r9,`6*$BNSZ`(r3) #r[6]=c1;
606 #sqr_add_c2(a,7,0,c2,c3,c1);
607 $LD r6,`7*$BNSZ`(r4)
608 $UMULL r7,r5,r6
609 $UMULH r8,r5,r6
610
611 addc r10,r7,r10
612 adde r11,r8,r11
613 addze r9,r0
614 addc r10,r7,r10
615 adde r11,r8,r11
616 addze r9,r9
617 #sqr_add_c2(a,6,1,c2,c3,c1);
618 $LD r5,`1*$BNSZ`(r4)
619 $LD r6,`6*$BNSZ`(r4)
620 $UMULL r7,r5,r6
621 $UMULH r8,r5,r6
622
623 addc r10,r7,r10
624 adde r11,r8,r11
625 addze r9,r9
626 addc r10,r7,r10
627 adde r11,r8,r11
628 addze r9,r9
629 #sqr_add_c2(a,5,2,c2,c3,c1);
630 $LD r5,`2*$BNSZ`(r4)
631 $LD r6,`5*$BNSZ`(r4)
632 $UMULL r7,r5,r6
633 $UMULH r8,r5,r6
634 addc r10,r7,r10
635 adde r11,r8,r11
636 addze r9,r9
637 addc r10,r7,r10
638 adde r11,r8,r11
639 addze r9,r9
640 #sqr_add_c2(a,4,3,c2,c3,c1);
641 $LD r5,`3*$BNSZ`(r4)
642 $LD r6,`4*$BNSZ`(r4)
643 $UMULL r7,r5,r6
644 $UMULH r8,r5,r6
645
646 addc r10,r7,r10
647 adde r11,r8,r11
648 addze r9,r9
649 addc r10,r7,r10
650 adde r11,r8,r11
651 addze r9,r9
652 $ST r10,`7*$BNSZ`(r3) #r[7]=c2;
653 #sqr_add_c(a,4,c3,c1,c2);
654 $UMULL r7,r6,r6
655 $UMULH r8,r6,r6
656 addc r11,r7,r11
657 adde r9,r8,r9
658 addze r10,r0
659 #sqr_add_c2(a,5,3,c3,c1,c2);
660 $LD r6,`5*$BNSZ`(r4)
661 $UMULL r7,r5,r6
662 $UMULH r8,r5,r6
663 addc r11,r7,r11
664 adde r9,r8,r9
665 addze r10,r10
666 addc r11,r7,r11
667 adde r9,r8,r9
668 addze r10,r10
669 #sqr_add_c2(a,6,2,c3,c1,c2);
670 $LD r5,`2*$BNSZ`(r4)
671 $LD r6,`6*$BNSZ`(r4)
672 $UMULL r7,r5,r6
673 $UMULH r8,r5,r6
674 addc r11,r7,r11
675 adde r9,r8,r9
676 addze r10,r10
677
678 addc r11,r7,r11
679 adde r9,r8,r9
680 addze r10,r10
681 #sqr_add_c2(a,7,1,c3,c1,c2);
682 $LD r5,`1*$BNSZ`(r4)
683 $LD r6,`7*$BNSZ`(r4)
684 $UMULL r7,r5,r6
685 $UMULH r8,r5,r6
686 addc r11,r7,r11
687 adde r9,r8,r9
688 addze r10,r10
689 addc r11,r7,r11
690 adde r9,r8,r9
691 addze r10,r10
692 $ST r11,`8*$BNSZ`(r3) #r[8]=c3;
693 #sqr_add_c2(a,7,2,c1,c2,c3);
694 $LD r5,`2*$BNSZ`(r4)
695 $UMULL r7,r5,r6
696 $UMULH r8,r5,r6
697
698 addc r9,r7,r9
699 adde r10,r8,r10
700 addze r11,r0
701 addc r9,r7,r9
702 adde r10,r8,r10
703 addze r11,r11
704 #sqr_add_c2(a,6,3,c1,c2,c3);
705 $LD r5,`3*$BNSZ`(r4)
706 $LD r6,`6*$BNSZ`(r4)
707 $UMULL r7,r5,r6
708 $UMULH r8,r5,r6
709 addc r9,r7,r9
710 adde r10,r8,r10
711 addze r11,r11
712 addc r9,r7,r9
713 adde r10,r8,r10
714 addze r11,r11
715 #sqr_add_c2(a,5,4,c1,c2,c3);
716 $LD r5,`4*$BNSZ`(r4)
717 $LD r6,`5*$BNSZ`(r4)
718 $UMULL r7,r5,r6
719 $UMULH r8,r5,r6
720 addc r9,r7,r9
721 adde r10,r8,r10
722 addze r11,r11
723 addc r9,r7,r9
724 adde r10,r8,r10
725 addze r11,r11
726 $ST r9,`9*$BNSZ`(r3) #r[9]=c1;
727 #sqr_add_c(a,5,c2,c3,c1);
728 $UMULL r7,r6,r6
729 $UMULH r8,r6,r6
730 addc r10,r7,r10
731 adde r11,r8,r11
732 addze r9,r0
733 #sqr_add_c2(a,6,4,c2,c3,c1);
734 $LD r6,`6*$BNSZ`(r4)
735 $UMULL r7,r5,r6
736 $UMULH r8,r5,r6
737 addc r10,r7,r10
738 adde r11,r8,r11
739 addze r9,r9
740 addc r10,r7,r10
741 adde r11,r8,r11
742 addze r9,r9
743 #sqr_add_c2(a,7,3,c2,c3,c1);
744 $LD r5,`3*$BNSZ`(r4)
745 $LD r6,`7*$BNSZ`(r4)
746 $UMULL r7,r5,r6
747 $UMULH r8,r5,r6
748 addc r10,r7,r10
749 adde r11,r8,r11
750 addze r9,r9
751 addc r10,r7,r10
752 adde r11,r8,r11
753 addze r9,r9
754 $ST r10,`10*$BNSZ`(r3) #r[10]=c2;
755 #sqr_add_c2(a,7,4,c3,c1,c2);
756 $LD r5,`4*$BNSZ`(r4)
757 $UMULL r7,r5,r6
758 $UMULH r8,r5,r6
759 addc r11,r7,r11
760 adde r9,r8,r9
761 addze r10,r0
762 addc r11,r7,r11
763 adde r9,r8,r9
764 addze r10,r10
765 #sqr_add_c2(a,6,5,c3,c1,c2);
766 $LD r5,`5*$BNSZ`(r4)
767 $LD r6,`6*$BNSZ`(r4)
768 $UMULL r7,r5,r6
769 $UMULH r8,r5,r6
770 addc r11,r7,r11
771 adde r9,r8,r9
772 addze r10,r10
773 addc r11,r7,r11
774 adde r9,r8,r9
775 addze r10,r10
776 $ST r11,`11*$BNSZ`(r3) #r[11]=c3;
777 #sqr_add_c(a,6,c1,c2,c3);
778 $UMULL r7,r6,r6
779 $UMULH r8,r6,r6
780 addc r9,r7,r9
781 adde r10,r8,r10
782 addze r11,r0
783 #sqr_add_c2(a,7,5,c1,c2,c3)
784 $LD r6,`7*$BNSZ`(r4)
785 $UMULL r7,r5,r6
786 $UMULH r8,r5,r6
787 addc r9,r7,r9
788 adde r10,r8,r10
789 addze r11,r11
790 addc r9,r7,r9
791 adde r10,r8,r10
792 addze r11,r11
793 $ST r9,`12*$BNSZ`(r3) #r[12]=c1;
794
795 #sqr_add_c2(a,7,6,c2,c3,c1)
796 $LD r5,`6*$BNSZ`(r4)
797 $UMULL r7,r5,r6
798 $UMULH r8,r5,r6
799 addc r10,r7,r10
800 adde r11,r8,r11
801 addze r9,r0
802 addc r10,r7,r10
803 adde r11,r8,r11
804 addze r9,r9
805 $ST r10,`13*$BNSZ`(r3) #r[13]=c2;
806 #sqr_add_c(a,7,c3,c1,c2);
807 $UMULL r7,r6,r6
808 $UMULH r8,r6,r6
809 addc r11,r7,r11
810 adde r9,r8,r9
811 $ST r11,`14*$BNSZ`(r3) #r[14]=c3;
812 $ST r9, `15*$BNSZ`(r3) #r[15]=c1;
813
814
815 blr
816
817#
818# NOTE: The following label name should be changed to
819# "bn_mul_comba4" i.e. remove the first dot
820# for the gcc compiler. This should be automatically
821# done in the build
822#
823
824.align 4
825.bn_mul_comba4:
826#
827# This is an optimized version of the bn_mul_comba4 routine.
828#
829# void bn_mul_comba4(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
830# r3 contains r
831# r4 contains a
832# r5 contains b
833# r6, r7 are the 2 BN_ULONGs being multiplied.
834# r8, r9 are the results of the 32x32 giving 64 multiply.
835# r10, r11, r12 are the equivalents of c1, c2, and c3.
836#
837 xor r0,r0,r0 #r0=0. Used in addze below.
838 #mul_add_c(a[0],b[0],c1,c2,c3);
839 $LD r6,`0*$BNSZ`(r4)
840 $LD r7,`0*$BNSZ`(r5)
841 $UMULL r10,r6,r7
842 $UMULH r11,r6,r7
843 $ST r10,`0*$BNSZ`(r3) #r[0]=c1
844 #mul_add_c(a[0],b[1],c2,c3,c1);
845 $LD r7,`1*$BNSZ`(r5)
846 $UMULL r8,r6,r7
847 $UMULH r9,r6,r7
848 addc r11,r8,r11
849 adde r12,r9,r0
850 addze r10,r0
851 #mul_add_c(a[1],b[0],c2,c3,c1);
852 $LD r6, `1*$BNSZ`(r4)
853 $LD r7, `0*$BNSZ`(r5)
854 $UMULL r8,r6,r7
855 $UMULH r9,r6,r7
856 addc r11,r8,r11
857 adde r12,r9,r12
858 addze r10,r10
859 $ST r11,`1*$BNSZ`(r3) #r[1]=c2
860 #mul_add_c(a[2],b[0],c3,c1,c2);
861 $LD r6,`2*$BNSZ`(r4)
862 $UMULL r8,r6,r7
863 $UMULH r9,r6,r7
864 addc r12,r8,r12
865 adde r10,r9,r10
866 addze r11,r0
867 #mul_add_c(a[1],b[1],c3,c1,c2);
868 $LD r6,`1*$BNSZ`(r4)
869 $LD r7,`1*$BNSZ`(r5)
870 $UMULL r8,r6,r7
871 $UMULH r9,r6,r7
872 addc r12,r8,r12
873 adde r10,r9,r10
874 addze r11,r11
875 #mul_add_c(a[0],b[2],c3,c1,c2);
876 $LD r6,`0*$BNSZ`(r4)
877 $LD r7,`2*$BNSZ`(r5)
878 $UMULL r8,r6,r7
879 $UMULH r9,r6,r7
880 addc r12,r8,r12
881 adde r10,r9,r10
882 addze r11,r11
883 $ST r12,`2*$BNSZ`(r3) #r[2]=c3
884 #mul_add_c(a[0],b[3],c1,c2,c3);
885 $LD r7,`3*$BNSZ`(r5)
886 $UMULL r8,r6,r7
887 $UMULH r9,r6,r7
888 addc r10,r8,r10
889 adde r11,r9,r11
890 addze r12,r0
891 #mul_add_c(a[1],b[2],c1,c2,c3);
892 $LD r6,`1*$BNSZ`(r4)
893 $LD r7,`2*$BNSZ`(r5)
894 $UMULL r8,r6,r7
895 $UMULH r9,r6,r7
896 addc r10,r8,r10
897 adde r11,r9,r11
898 addze r12,r12
899 #mul_add_c(a[2],b[1],c1,c2,c3);
900 $LD r6,`2*$BNSZ`(r4)
901 $LD r7,`1*$BNSZ`(r5)
902 $UMULL r8,r6,r7
903 $UMULH r9,r6,r7
904 addc r10,r8,r10
905 adde r11,r9,r11
906 addze r12,r12
907 #mul_add_c(a[3],b[0],c1,c2,c3);
908 $LD r6,`3*$BNSZ`(r4)
909 $LD r7,`0*$BNSZ`(r5)
910 $UMULL r8,r6,r7
911 $UMULH r9,r6,r7
912 addc r10,r8,r10
913 adde r11,r9,r11
914 addze r12,r12
915 $ST r10,`3*$BNSZ`(r3) #r[3]=c1
916 #mul_add_c(a[3],b[1],c2,c3,c1);
917 $LD r7,`1*$BNSZ`(r5)
918 $UMULL r8,r6,r7
919 $UMULH r9,r6,r7
920 addc r11,r8,r11
921 adde r12,r9,r12
922 addze r10,r0
923 #mul_add_c(a[2],b[2],c2,c3,c1);
924 $LD r6,`2*$BNSZ`(r4)
925 $LD r7,`2*$BNSZ`(r5)
926 $UMULL r8,r6,r7
927 $UMULH r9,r6,r7
928 addc r11,r8,r11
929 adde r12,r9,r12
930 addze r10,r10
931 #mul_add_c(a[1],b[3],c2,c3,c1);
932 $LD r6,`1*$BNSZ`(r4)
933 $LD r7,`3*$BNSZ`(r5)
934 $UMULL r8,r6,r7
935 $UMULH r9,r6,r7
936 addc r11,r8,r11
937 adde r12,r9,r12
938 addze r10,r10
939 $ST r11,`4*$BNSZ`(r3) #r[4]=c2
940 #mul_add_c(a[2],b[3],c3,c1,c2);
941 $LD r6,`2*$BNSZ`(r4)
942 $UMULL r8,r6,r7
943 $UMULH r9,r6,r7
944 addc r12,r8,r12
945 adde r10,r9,r10
946 addze r11,r0
947 #mul_add_c(a[3],b[2],c3,c1,c2);
948 $LD r6,`3*$BNSZ`(r4)
949 $LD r7,`2*$BNSZ`(r5)
950 $UMULL r8,r6,r7
951 $UMULH r9,r6,r7
952 addc r12,r8,r12
953 adde r10,r9,r10
954 addze r11,r11
955 $ST r12,`5*$BNSZ`(r3) #r[5]=c3
956 #mul_add_c(a[3],b[3],c1,c2,c3);
957 $LD r7,`3*$BNSZ`(r5)
958 $UMULL r8,r6,r7
959 $UMULH r9,r6,r7
960 addc r10,r8,r10
961 adde r11,r9,r11
962
963 $ST r10,`6*$BNSZ`(r3) #r[6]=c1
964 $ST r11,`7*$BNSZ`(r3) #r[7]=c2
965 blr
966
967#
968# NOTE: The following label name should be changed to
969# "bn_mul_comba8" i.e. remove the first dot
970# for the gcc compiler. This should be automatically
971# done in the build
972#
973
974.align 4
975.bn_mul_comba8:
976#
977# Optimized version of the bn_mul_comba8 routine.
978#
979# void bn_mul_comba8(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
980# r3 contains r
981# r4 contains a
982# r5 contains b
983# r6, r7 are the 2 BN_ULONGs being multiplied.
984# r8, r9 are the results of the 32x32 giving 64 multiply.
985# r10, r11, r12 are the equivalents of c1, c2, and c3.
986#
987 xor r0,r0,r0 #r0=0. Used in addze below.
988
989 #mul_add_c(a[0],b[0],c1,c2,c3);
990 $LD r6,`0*$BNSZ`(r4) #a[0]
991 $LD r7,`0*$BNSZ`(r5) #b[0]
992 $UMULL r10,r6,r7
993 $UMULH r11,r6,r7
994 $ST r10,`0*$BNSZ`(r3) #r[0]=c1;
995 #mul_add_c(a[0],b[1],c2,c3,c1);
996 $LD r7,`1*$BNSZ`(r5)
997 $UMULL r8,r6,r7
998 $UMULH r9,r6,r7
999 addc r11,r11,r8
1000 addze r12,r9 # since we didnt set r12 to zero before.
1001 addze r10,r0
1002 #mul_add_c(a[1],b[0],c2,c3,c1);
1003 $LD r6,`1*$BNSZ`(r4)
1004 $LD r7,`0*$BNSZ`(r5)
1005 $UMULL r8,r6,r7
1006 $UMULH r9,r6,r7
1007 addc r11,r11,r8
1008 adde r12,r12,r9
1009 addze r10,r10
1010 $ST r11,`1*$BNSZ`(r3) #r[1]=c2;
1011 #mul_add_c(a[2],b[0],c3,c1,c2);
1012 $LD r6,`2*$BNSZ`(r4)
1013 $UMULL r8,r6,r7
1014 $UMULH r9,r6,r7
1015 addc r12,r12,r8
1016 adde r10,r10,r9
1017 addze r11,r0
1018 #mul_add_c(a[1],b[1],c3,c1,c2);
1019 $LD r6,`1*$BNSZ`(r4)
1020 $LD r7,`1*$BNSZ`(r5)
1021 $UMULL r8,r6,r7
1022 $UMULH r9,r6,r7
1023 addc r12,r12,r8
1024 adde r10,r10,r9
1025 addze r11,r11
1026 #mul_add_c(a[0],b[2],c3,c1,c2);
1027 $LD r6,`0*$BNSZ`(r4)
1028 $LD r7,`2*$BNSZ`(r5)
1029 $UMULL r8,r6,r7
1030 $UMULH r9,r6,r7
1031 addc r12,r12,r8
1032 adde r10,r10,r9
1033 addze r11,r11
1034 $ST r12,`2*$BNSZ`(r3) #r[2]=c3;
1035 #mul_add_c(a[0],b[3],c1,c2,c3);
1036 $LD r7,`3*$BNSZ`(r5)
1037 $UMULL r8,r6,r7
1038 $UMULH r9,r6,r7
1039 addc r10,r10,r8
1040 adde r11,r11,r9
1041 addze r12,r0
1042 #mul_add_c(a[1],b[2],c1,c2,c3);
1043 $LD r6,`1*$BNSZ`(r4)
1044 $LD r7,`2*$BNSZ`(r5)
1045 $UMULL r8,r6,r7
1046 $UMULH r9,r6,r7
1047 addc r10,r10,r8
1048 adde r11,r11,r9
1049 addze r12,r12
1050
1051 #mul_add_c(a[2],b[1],c1,c2,c3);
1052 $LD r6,`2*$BNSZ`(r4)
1053 $LD r7,`1*$BNSZ`(r5)
1054 $UMULL r8,r6,r7
1055 $UMULH r9,r6,r7
1056 addc r10,r10,r8
1057 adde r11,r11,r9
1058 addze r12,r12
1059 #mul_add_c(a[3],b[0],c1,c2,c3);
1060 $LD r6,`3*$BNSZ`(r4)
1061 $LD r7,`0*$BNSZ`(r5)
1062 $UMULL r8,r6,r7
1063 $UMULH r9,r6,r7
1064 addc r10,r10,r8
1065 adde r11,r11,r9
1066 addze r12,r12
1067 $ST r10,`3*$BNSZ`(r3) #r[3]=c1;
1068 #mul_add_c(a[4],b[0],c2,c3,c1);
1069 $LD r6,`4*$BNSZ`(r4)
1070 $UMULL r8,r6,r7
1071 $UMULH r9,r6,r7
1072 addc r11,r11,r8
1073 adde r12,r12,r9
1074 addze r10,r0
1075 #mul_add_c(a[3],b[1],c2,c3,c1);
1076 $LD r6,`3*$BNSZ`(r4)
1077 $LD r7,`1*$BNSZ`(r5)
1078 $UMULL r8,r6,r7
1079 $UMULH r9,r6,r7
1080 addc r11,r11,r8
1081 adde r12,r12,r9
1082 addze r10,r10
1083 #mul_add_c(a[2],b[2],c2,c3,c1);
1084 $LD r6,`2*$BNSZ`(r4)
1085 $LD r7,`2*$BNSZ`(r5)
1086 $UMULL r8,r6,r7
1087 $UMULH r9,r6,r7
1088 addc r11,r11,r8
1089 adde r12,r12,r9
1090 addze r10,r10
1091 #mul_add_c(a[1],b[3],c2,c3,c1);
1092 $LD r6,`1*$BNSZ`(r4)
1093 $LD r7,`3*$BNSZ`(r5)
1094 $UMULL r8,r6,r7
1095 $UMULH r9,r6,r7
1096 addc r11,r11,r8
1097 adde r12,r12,r9
1098 addze r10,r10
1099 #mul_add_c(a[0],b[4],c2,c3,c1);
1100 $LD r6,`0*$BNSZ`(r4)
1101 $LD r7,`4*$BNSZ`(r5)
1102 $UMULL r8,r6,r7
1103 $UMULH r9,r6,r7
1104 addc r11,r11,r8
1105 adde r12,r12,r9
1106 addze r10,r10
1107 $ST r11,`4*$BNSZ`(r3) #r[4]=c2;
1108 #mul_add_c(a[0],b[5],c3,c1,c2);
1109 $LD r7,`5*$BNSZ`(r5)
1110 $UMULL r8,r6,r7
1111 $UMULH r9,r6,r7
1112 addc r12,r12,r8
1113 adde r10,r10,r9
1114 addze r11,r0
1115 #mul_add_c(a[1],b[4],c3,c1,c2);
1116 $LD r6,`1*$BNSZ`(r4)
1117 $LD r7,`4*$BNSZ`(r5)
1118 $UMULL r8,r6,r7
1119 $UMULH r9,r6,r7
1120 addc r12,r12,r8
1121 adde r10,r10,r9
1122 addze r11,r11
1123 #mul_add_c(a[2],b[3],c3,c1,c2);
1124 $LD r6,`2*$BNSZ`(r4)
1125 $LD r7,`3*$BNSZ`(r5)
1126 $UMULL r8,r6,r7
1127 $UMULH r9,r6,r7
1128 addc r12,r12,r8
1129 adde r10,r10,r9
1130 addze r11,r11
1131 #mul_add_c(a[3],b[2],c3,c1,c2);
1132 $LD r6,`3*$BNSZ`(r4)
1133 $LD r7,`2*$BNSZ`(r5)
1134 $UMULL r8,r6,r7
1135 $UMULH r9,r6,r7
1136 addc r12,r12,r8
1137 adde r10,r10,r9
1138 addze r11,r11
1139 #mul_add_c(a[4],b[1],c3,c1,c2);
1140 $LD r6,`4*$BNSZ`(r4)
1141 $LD r7,`1*$BNSZ`(r5)
1142 $UMULL r8,r6,r7
1143 $UMULH r9,r6,r7
1144 addc r12,r12,r8
1145 adde r10,r10,r9
1146 addze r11,r11
1147 #mul_add_c(a[5],b[0],c3,c1,c2);
1148 $LD r6,`5*$BNSZ`(r4)
1149 $LD r7,`0*$BNSZ`(r5)
1150 $UMULL r8,r6,r7
1151 $UMULH r9,r6,r7
1152 addc r12,r12,r8
1153 adde r10,r10,r9
1154 addze r11,r11
1155 $ST r12,`5*$BNSZ`(r3) #r[5]=c3;
1156 #mul_add_c(a[6],b[0],c1,c2,c3);
1157 $LD r6,`6*$BNSZ`(r4)
1158 $UMULL r8,r6,r7
1159 $UMULH r9,r6,r7
1160 addc r10,r10,r8
1161 adde r11,r11,r9
1162 addze r12,r0
1163 #mul_add_c(a[5],b[1],c1,c2,c3);
1164 $LD r6,`5*$BNSZ`(r4)
1165 $LD r7,`1*$BNSZ`(r5)
1166 $UMULL r8,r6,r7
1167 $UMULH r9,r6,r7
1168 addc r10,r10,r8
1169 adde r11,r11,r9
1170 addze r12,r12
1171 #mul_add_c(a[4],b[2],c1,c2,c3);
1172 $LD r6,`4*$BNSZ`(r4)
1173 $LD r7,`2*$BNSZ`(r5)
1174 $UMULL r8,r6,r7
1175 $UMULH r9,r6,r7
1176 addc r10,r10,r8
1177 adde r11,r11,r9
1178 addze r12,r12
1179 #mul_add_c(a[3],b[3],c1,c2,c3);
1180 $LD r6,`3*$BNSZ`(r4)
1181 $LD r7,`3*$BNSZ`(r5)
1182 $UMULL r8,r6,r7
1183 $UMULH r9,r6,r7
1184 addc r10,r10,r8
1185 adde r11,r11,r9
1186 addze r12,r12
1187 #mul_add_c(a[2],b[4],c1,c2,c3);
1188 $LD r6,`2*$BNSZ`(r4)
1189 $LD r7,`4*$BNSZ`(r5)
1190 $UMULL r8,r6,r7
1191 $UMULH r9,r6,r7
1192 addc r10,r10,r8
1193 adde r11,r11,r9
1194 addze r12,r12
1195 #mul_add_c(a[1],b[5],c1,c2,c3);
1196 $LD r6,`1*$BNSZ`(r4)
1197 $LD r7,`5*$BNSZ`(r5)
1198 $UMULL r8,r6,r7
1199 $UMULH r9,r6,r7
1200 addc r10,r10,r8
1201 adde r11,r11,r9
1202 addze r12,r12
1203 #mul_add_c(a[0],b[6],c1,c2,c3);
1204 $LD r6,`0*$BNSZ`(r4)
1205 $LD r7,`6*$BNSZ`(r5)
1206 $UMULL r8,r6,r7
1207 $UMULH r9,r6,r7
1208 addc r10,r10,r8
1209 adde r11,r11,r9
1210 addze r12,r12
1211 $ST r10,`6*$BNSZ`(r3) #r[6]=c1;
1212 #mul_add_c(a[0],b[7],c2,c3,c1);
1213 $LD r7,`7*$BNSZ`(r5)
1214 $UMULL r8,r6,r7
1215 $UMULH r9,r6,r7
1216 addc r11,r11,r8
1217 adde r12,r12,r9
1218 addze r10,r0
1219 #mul_add_c(a[1],b[6],c2,c3,c1);
1220 $LD r6,`1*$BNSZ`(r4)
1221 $LD r7,`6*$BNSZ`(r5)
1222 $UMULL r8,r6,r7
1223 $UMULH r9,r6,r7
1224 addc r11,r11,r8
1225 adde r12,r12,r9
1226 addze r10,r10
1227 #mul_add_c(a[2],b[5],c2,c3,c1);
1228 $LD r6,`2*$BNSZ`(r4)
1229 $LD r7,`5*$BNSZ`(r5)
1230 $UMULL r8,r6,r7
1231 $UMULH r9,r6,r7
1232 addc r11,r11,r8
1233 adde r12,r12,r9
1234 addze r10,r10
1235 #mul_add_c(a[3],b[4],c2,c3,c1);
1236 $LD r6,`3*$BNSZ`(r4)
1237 $LD r7,`4*$BNSZ`(r5)
1238 $UMULL r8,r6,r7
1239 $UMULH r9,r6,r7
1240 addc r11,r11,r8
1241 adde r12,r12,r9
1242 addze r10,r10
1243 #mul_add_c(a[4],b[3],c2,c3,c1);
1244 $LD r6,`4*$BNSZ`(r4)
1245 $LD r7,`3*$BNSZ`(r5)
1246 $UMULL r8,r6,r7
1247 $UMULH r9,r6,r7
1248 addc r11,r11,r8
1249 adde r12,r12,r9
1250 addze r10,r10
1251 #mul_add_c(a[5],b[2],c2,c3,c1);
1252 $LD r6,`5*$BNSZ`(r4)
1253 $LD r7,`2*$BNSZ`(r5)
1254 $UMULL r8,r6,r7
1255 $UMULH r9,r6,r7
1256 addc r11,r11,r8
1257 adde r12,r12,r9
1258 addze r10,r10
1259 #mul_add_c(a[6],b[1],c2,c3,c1);
1260 $LD r6,`6*$BNSZ`(r4)
1261 $LD r7,`1*$BNSZ`(r5)
1262 $UMULL r8,r6,r7
1263 $UMULH r9,r6,r7
1264 addc r11,r11,r8
1265 adde r12,r12,r9
1266 addze r10,r10
1267 #mul_add_c(a[7],b[0],c2,c3,c1);
1268 $LD r6,`7*$BNSZ`(r4)
1269 $LD r7,`0*$BNSZ`(r5)
1270 $UMULL r8,r6,r7
1271 $UMULH r9,r6,r7
1272 addc r11,r11,r8
1273 adde r12,r12,r9
1274 addze r10,r10
1275 $ST r11,`7*$BNSZ`(r3) #r[7]=c2;
1276 #mul_add_c(a[7],b[1],c3,c1,c2);
1277 $LD r7,`1*$BNSZ`(r5)
1278 $UMULL r8,r6,r7
1279 $UMULH r9,r6,r7
1280 addc r12,r12,r8
1281 adde r10,r10,r9
1282 addze r11,r0
1283 #mul_add_c(a[6],b[2],c3,c1,c2);
1284 $LD r6,`6*$BNSZ`(r4)
1285 $LD r7,`2*$BNSZ`(r5)
1286 $UMULL r8,r6,r7
1287 $UMULH r9,r6,r7
1288 addc r12,r12,r8
1289 adde r10,r10,r9
1290 addze r11,r11
1291 #mul_add_c(a[5],b[3],c3,c1,c2);
1292 $LD r6,`5*$BNSZ`(r4)
1293 $LD r7,`3*$BNSZ`(r5)
1294 $UMULL r8,r6,r7
1295 $UMULH r9,r6,r7
1296 addc r12,r12,r8
1297 adde r10,r10,r9
1298 addze r11,r11
1299 #mul_add_c(a[4],b[4],c3,c1,c2);
1300 $LD r6,`4*$BNSZ`(r4)
1301 $LD r7,`4*$BNSZ`(r5)
1302 $UMULL r8,r6,r7
1303 $UMULH r9,r6,r7
1304 addc r12,r12,r8
1305 adde r10,r10,r9
1306 addze r11,r11
1307 #mul_add_c(a[3],b[5],c3,c1,c2);
1308 $LD r6,`3*$BNSZ`(r4)
1309 $LD r7,`5*$BNSZ`(r5)
1310 $UMULL r8,r6,r7
1311 $UMULH r9,r6,r7
1312 addc r12,r12,r8
1313 adde r10,r10,r9
1314 addze r11,r11
1315 #mul_add_c(a[2],b[6],c3,c1,c2);
1316 $LD r6,`2*$BNSZ`(r4)
1317 $LD r7,`6*$BNSZ`(r5)
1318 $UMULL r8,r6,r7
1319 $UMULH r9,r6,r7
1320 addc r12,r12,r8
1321 adde r10,r10,r9
1322 addze r11,r11
1323 #mul_add_c(a[1],b[7],c3,c1,c2);
1324 $LD r6,`1*$BNSZ`(r4)
1325 $LD r7,`7*$BNSZ`(r5)
1326 $UMULL r8,r6,r7
1327 $UMULH r9,r6,r7
1328 addc r12,r12,r8
1329 adde r10,r10,r9
1330 addze r11,r11
1331 $ST r12,`8*$BNSZ`(r3) #r[8]=c3;
1332 #mul_add_c(a[2],b[7],c1,c2,c3);
1333 $LD r6,`2*$BNSZ`(r4)
1334 $UMULL r8,r6,r7
1335 $UMULH r9,r6,r7
1336 addc r10,r10,r8
1337 adde r11,r11,r9
1338 addze r12,r0
1339 #mul_add_c(a[3],b[6],c1,c2,c3);
1340 $LD r6,`3*$BNSZ`(r4)
1341 $LD r7,`6*$BNSZ`(r5)
1342 $UMULL r8,r6,r7
1343 $UMULH r9,r6,r7
1344 addc r10,r10,r8
1345 adde r11,r11,r9
1346 addze r12,r12
1347 #mul_add_c(a[4],b[5],c1,c2,c3);
1348 $LD r6,`4*$BNSZ`(r4)
1349 $LD r7,`5*$BNSZ`(r5)
1350 $UMULL r8,r6,r7
1351 $UMULH r9,r6,r7
1352 addc r10,r10,r8
1353 adde r11,r11,r9
1354 addze r12,r12
1355 #mul_add_c(a[5],b[4],c1,c2,c3);
1356 $LD r6,`5*$BNSZ`(r4)
1357 $LD r7,`4*$BNSZ`(r5)
1358 $UMULL r8,r6,r7
1359 $UMULH r9,r6,r7
1360 addc r10,r10,r8
1361 adde r11,r11,r9
1362 addze r12,r12
1363 #mul_add_c(a[6],b[3],c1,c2,c3);
1364 $LD r6,`6*$BNSZ`(r4)
1365 $LD r7,`3*$BNSZ`(r5)
1366 $UMULL r8,r6,r7
1367 $UMULH r9,r6,r7
1368 addc r10,r10,r8
1369 adde r11,r11,r9
1370 addze r12,r12
1371 #mul_add_c(a[7],b[2],c1,c2,c3);
1372 $LD r6,`7*$BNSZ`(r4)
1373 $LD r7,`2*$BNSZ`(r5)
1374 $UMULL r8,r6,r7
1375 $UMULH r9,r6,r7
1376 addc r10,r10,r8
1377 adde r11,r11,r9
1378 addze r12,r12
1379 $ST r10,`9*$BNSZ`(r3) #r[9]=c1;
1380 #mul_add_c(a[7],b[3],c2,c3,c1);
1381 $LD r7,`3*$BNSZ`(r5)
1382 $UMULL r8,r6,r7
1383 $UMULH r9,r6,r7
1384 addc r11,r11,r8
1385 adde r12,r12,r9
1386 addze r10,r0
1387 #mul_add_c(a[6],b[4],c2,c3,c1);
1388 $LD r6,`6*$BNSZ`(r4)
1389 $LD r7,`4*$BNSZ`(r5)
1390 $UMULL r8,r6,r7
1391 $UMULH r9,r6,r7
1392 addc r11,r11,r8
1393 adde r12,r12,r9
1394 addze r10,r10
1395 #mul_add_c(a[5],b[5],c2,c3,c1);
1396 $LD r6,`5*$BNSZ`(r4)
1397 $LD r7,`5*$BNSZ`(r5)
1398 $UMULL r8,r6,r7
1399 $UMULH r9,r6,r7
1400 addc r11,r11,r8
1401 adde r12,r12,r9
1402 addze r10,r10
1403 #mul_add_c(a[4],b[6],c2,c3,c1);
1404 $LD r6,`4*$BNSZ`(r4)
1405 $LD r7,`6*$BNSZ`(r5)
1406 $UMULL r8,r6,r7
1407 $UMULH r9,r6,r7
1408 addc r11,r11,r8
1409 adde r12,r12,r9
1410 addze r10,r10
1411 #mul_add_c(a[3],b[7],c2,c3,c1);
1412 $LD r6,`3*$BNSZ`(r4)
1413 $LD r7,`7*$BNSZ`(r5)
1414 $UMULL r8,r6,r7
1415 $UMULH r9,r6,r7
1416 addc r11,r11,r8
1417 adde r12,r12,r9
1418 addze r10,r10
1419 $ST r11,`10*$BNSZ`(r3) #r[10]=c2;
1420 #mul_add_c(a[4],b[7],c3,c1,c2);
1421 $LD r6,`4*$BNSZ`(r4)
1422 $UMULL r8,r6,r7
1423 $UMULH r9,r6,r7
1424 addc r12,r12,r8
1425 adde r10,r10,r9
1426 addze r11,r0
1427 #mul_add_c(a[5],b[6],c3,c1,c2);
1428 $LD r6,`5*$BNSZ`(r4)
1429 $LD r7,`6*$BNSZ`(r5)
1430 $UMULL r8,r6,r7
1431 $UMULH r9,r6,r7
1432 addc r12,r12,r8
1433 adde r10,r10,r9
1434 addze r11,r11
1435 #mul_add_c(a[6],b[5],c3,c1,c2);
1436 $LD r6,`6*$BNSZ`(r4)
1437 $LD r7,`5*$BNSZ`(r5)
1438 $UMULL r8,r6,r7
1439 $UMULH r9,r6,r7
1440 addc r12,r12,r8
1441 adde r10,r10,r9
1442 addze r11,r11
1443 #mul_add_c(a[7],b[4],c3,c1,c2);
1444 $LD r6,`7*$BNSZ`(r4)
1445 $LD r7,`4*$BNSZ`(r5)
1446 $UMULL r8,r6,r7
1447 $UMULH r9,r6,r7
1448 addc r12,r12,r8
1449 adde r10,r10,r9
1450 addze r11,r11
1451 $ST r12,`11*$BNSZ`(r3) #r[11]=c3;
1452 #mul_add_c(a[7],b[5],c1,c2,c3);
1453 $LD r7,`5*$BNSZ`(r5)
1454 $UMULL r8,r6,r7
1455 $UMULH r9,r6,r7
1456 addc r10,r10,r8
1457 adde r11,r11,r9
1458 addze r12,r0
1459 #mul_add_c(a[6],b[6],c1,c2,c3);
1460 $LD r6,`6*$BNSZ`(r4)
1461 $LD r7,`6*$BNSZ`(r5)
1462 $UMULL r8,r6,r7
1463 $UMULH r9,r6,r7
1464 addc r10,r10,r8
1465 adde r11,r11,r9
1466 addze r12,r12
1467 #mul_add_c(a[5],b[7],c1,c2,c3);
1468 $LD r6,`5*$BNSZ`(r4)
1469 $LD r7,`7*$BNSZ`(r5)
1470 $UMULL r8,r6,r7
1471 $UMULH r9,r6,r7
1472 addc r10,r10,r8
1473 adde r11,r11,r9
1474 addze r12,r12
1475 $ST r10,`12*$BNSZ`(r3) #r[12]=c1;
1476 #mul_add_c(a[6],b[7],c2,c3,c1);
1477 $LD r6,`6*$BNSZ`(r4)
1478 $UMULL r8,r6,r7
1479 $UMULH r9,r6,r7
1480 addc r11,r11,r8
1481 adde r12,r12,r9
1482 addze r10,r0
1483 #mul_add_c(a[7],b[6],c2,c3,c1);
1484 $LD r6,`7*$BNSZ`(r4)
1485 $LD r7,`6*$BNSZ`(r5)
1486 $UMULL r8,r6,r7
1487 $UMULH r9,r6,r7
1488 addc r11,r11,r8
1489 adde r12,r12,r9
1490 addze r10,r10
1491 $ST r11,`13*$BNSZ`(r3) #r[13]=c2;
1492 #mul_add_c(a[7],b[7],c3,c1,c2);
1493 $LD r7,`7*$BNSZ`(r5)
1494 $UMULL r8,r6,r7
1495 $UMULH r9,r6,r7
1496 addc r12,r12,r8
1497 adde r10,r10,r9
1498 $ST r12,`14*$BNSZ`(r3) #r[14]=c3;
1499 $ST r10,`15*$BNSZ`(r3) #r[15]=c1;
1500 blr
1501
1502#
1503# NOTE: The following label name should be changed to
1504# "bn_sub_words" i.e. remove the first dot
1505# for the gcc compiler. This should be automatically
1506# done in the build
1507#
1508#
1509.align 4
1510.bn_sub_words:
1511#
1512# Handcoded version of bn_sub_words
1513#
1514#BN_ULONG bn_sub_words(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n)
1515#
1516# r3 = r
1517# r4 = a
1518# r5 = b
1519# r6 = n
1520#
1521# Note: No loop unrolling done since this is not a performance
1522# critical loop.
1523
1524 xor r0,r0,r0 #set r0 = 0
1525#
1526# check for r6 = 0 AND set carry bit.
1527#
1528 subfc. r7,r0,r6 # If r6 is 0 then result is 0.
1529 # if r6 > 0 then result !=0
1530 # In either case carry bit is set.
1531 beq Lppcasm_sub_adios
1532 addi r4,r4,-$BNSZ
1533 addi r3,r3,-$BNSZ
1534 addi r5,r5,-$BNSZ
1535 mtctr r6
1536Lppcasm_sub_mainloop:
1537 $LDU r7,$BNSZ(r4)
1538 $LDU r8,$BNSZ(r5)
1539 subfe r6,r8,r7 # r6 = r7+carry bit + onescomplement(r8)
1540 # if carry = 1 this is r7-r8. Else it
1541 # is r7-r8 -1 as we need.
1542 $STU r6,$BNSZ(r3)
1543 bdnz- Lppcasm_sub_mainloop
1544Lppcasm_sub_adios:
1545 subfze r3,r0 # if carry bit is set then r3 = 0 else -1
1546 andi. r3,r3,1 # keep only last bit.
1547 blr
1548
1549#
1550# NOTE: The following label name should be changed to
1551# "bn_add_words" i.e. remove the first dot
1552# for the gcc compiler. This should be automatically
1553# done in the build
1554#
1555
1556.align 4
1557.bn_add_words:
1558#
1559# Handcoded version of bn_add_words
1560#
1561#BN_ULONG bn_add_words(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n)
1562#
1563# r3 = r
1564# r4 = a
1565# r5 = b
1566# r6 = n
1567#
1568# Note: No loop unrolling done since this is not a performance
1569# critical loop.
1570
1571 xor r0,r0,r0
1572#
1573# check for r6 = 0. Is this needed?
1574#
1575 addic. r6,r6,0 #test r6 and clear carry bit.
1576 beq Lppcasm_add_adios
1577 addi r4,r4,-$BNSZ
1578 addi r3,r3,-$BNSZ
1579 addi r5,r5,-$BNSZ
1580 mtctr r6
1581Lppcasm_add_mainloop:
1582 $LDU r7,$BNSZ(r4)
1583 $LDU r8,$BNSZ(r5)
1584 adde r8,r7,r8
1585 $STU r8,$BNSZ(r3)
1586 bdnz- Lppcasm_add_mainloop
1587Lppcasm_add_adios:
1588 addze r3,r0 #return carry bit.
1589 blr
1590
1591#
1592# NOTE: The following label name should be changed to
1593# "bn_div_words" i.e. remove the first dot
1594# for the gcc compiler. This should be automatically
1595# done in the build
1596#
1597
1598.align 4
1599.bn_div_words:
1600#
1601# This is a cleaned up version of code generated by
1602# the AIX compiler. The only optimization is to use
1603# the PPC instruction to count leading zeros instead
1604# of call to num_bits_word. Since this was compiled
1605# only at level -O2 we can possibly squeeze it more?
1606#
1607# r3 = h
1608# r4 = l
1609# r5 = d
1610
1611 $UCMPI 0,r5,0 # compare r5 and 0
1612 bne Lppcasm_div1 # proceed if d!=0
1613 li r3,-1 # d=0 return -1
1614 blr
1615Lppcasm_div1:
1616 xor r0,r0,r0 #r0=0
1617 li r8,$BITS
1618 $CNTLZ. r7,r5 #r7 = num leading 0s in d.
1619 beq Lppcasm_div2 #proceed if no leading zeros
1620 subf r8,r7,r8 #r8 = BN_num_bits_word(d)
1621 $SHR. r9,r3,r8 #are there any bits above r8'th?
1622 $TR 16,r9,r0 #if there're, signal to dump core...
1623Lppcasm_div2:
1624 $UCMP 0,r3,r5 #h>=d?
1625 blt Lppcasm_div3 #goto Lppcasm_div3 if not
1626 subf r3,r5,r3 #h-=d ;
1627Lppcasm_div3: #r7 = BN_BITS2-i. so r7=i
1628 cmpi 0,0,r7,0 # is (i == 0)?
1629 beq Lppcasm_div4
1630 $SHL r3,r3,r7 # h = (h<< i)
1631 $SHR r8,r4,r8 # r8 = (l >> BN_BITS2 -i)
1632 $SHL r5,r5,r7 # d<<=i
1633 or r3,r3,r8 # h = (h<<i)|(l>>(BN_BITS2-i))
1634 $SHL r4,r4,r7 # l <<=i
1635Lppcasm_div4:
1636 $SHRI r9,r5,`$BITS/2` # r9 = dh
1637 # dl will be computed when needed
1638 # as it saves registers.
1639 li r6,2 #r6=2
1640 mtctr r6 #counter will be in count.
1641Lppcasm_divouterloop:
1642 $SHRI r8,r3,`$BITS/2` #r8 = (h>>BN_BITS4)
1643 $SHRI r11,r4,`$BITS/2` #r11= (l&BN_MASK2h)>>BN_BITS4
1644 # compute here for innerloop.
1645 $UCMP 0,r8,r9 # is (h>>BN_BITS4)==dh
1646 bne Lppcasm_div5 # goto Lppcasm_div5 if not
1647
1648 li r8,-1
1649 $CLRU r8,r8,`$BITS/2` #q = BN_MASK2l
1650 b Lppcasm_div6
1651Lppcasm_div5:
1652 $UDIV r8,r3,r9 #q = h/dh
1653Lppcasm_div6:
1654 $UMULL r12,r9,r8 #th = q*dh
1655 $CLRU r10,r5,`$BITS/2` #r10=dl
1656 $UMULL r6,r8,r10 #tl = q*dl
1657
1658Lppcasm_divinnerloop:
1659 subf r10,r12,r3 #t = h -th
1660 $SHRI r7,r10,`$BITS/2` #r7= (t &BN_MASK2H), sort of...
1661 addic. r7,r7,0 #test if r7 == 0. used below.
1662 # now want to compute
1663 # r7 = (t<<BN_BITS4)|((l&BN_MASK2h)>>BN_BITS4)
1664 # the following 2 instructions do that
1665 $SHLI r7,r10,`$BITS/2` # r7 = (t<<BN_BITS4)
1666 or r7,r7,r11 # r7|=((l&BN_MASK2h)>>BN_BITS4)
1667 $UCMP cr1,r6,r7 # compare (tl <= r7)
1668 bne Lppcasm_divinnerexit
1669 ble cr1,Lppcasm_divinnerexit
1670 addi r8,r8,-1 #q--
1671 subf r12,r9,r12 #th -=dh
1672 $CLRU r10,r5,`$BITS/2` #r10=dl. t is no longer needed in loop.
1673 subf r6,r10,r6 #tl -=dl
1674 b Lppcasm_divinnerloop
1675Lppcasm_divinnerexit:
1676 $SHRI r10,r6,`$BITS/2` #t=(tl>>BN_BITS4)
1677 $SHLI r11,r6,`$BITS/2` #tl=(tl<<BN_BITS4)&BN_MASK2h;
1678 $UCMP cr1,r4,r11 # compare l and tl
1679 add r12,r12,r10 # th+=t
1680 bge cr1,Lppcasm_div7 # if (l>=tl) goto Lppcasm_div7
1681 addi r12,r12,1 # th++
1682Lppcasm_div7:
1683 subf r11,r11,r4 #r11=l-tl
1684 $UCMP cr1,r3,r12 #compare h and th
1685 bge cr1,Lppcasm_div8 #if (h>=th) goto Lppcasm_div8
1686 addi r8,r8,-1 # q--
1687 add r3,r5,r3 # h+=d
1688Lppcasm_div8:
1689 subf r12,r12,r3 #r12 = h-th
1690 $SHLI r4,r11,`$BITS/2` #l=(l&BN_MASK2l)<<BN_BITS4
1691 # want to compute
1692 # h = ((h<<BN_BITS4)|(l>>BN_BITS4))&BN_MASK2
1693 # the following 2 instructions will do this.
1694 $INSR r11,r12,`$BITS/2`,`$BITS/2` # r11 is the value we want rotated $BITS/2.
1695 $ROTL r3,r11,`$BITS/2` # rotate by $BITS/2 and store in r3
1696 bdz Lppcasm_div9 #if (count==0) break ;
1697 $SHLI r0,r8,`$BITS/2` #ret =q<<BN_BITS4
1698 b Lppcasm_divouterloop
1699Lppcasm_div9:
1700 or r3,r8,r0
1701 blr
1702
1703#
1704# NOTE: The following label name should be changed to
1705# "bn_sqr_words" i.e. remove the first dot
1706# for the gcc compiler. This should be automatically
1707# done in the build
1708#
1709.align 4
1710.bn_sqr_words:
1711#
1712# Optimized version of bn_sqr_words
1713#
1714# void bn_sqr_words(BN_ULONG *r, BN_ULONG *a, int n)
1715#
1716# r3 = r
1717# r4 = a
1718# r5 = n
1719#
1720# r6 = a[i].
1721# r7,r8 = product.
1722#
1723# No unrolling done here. Not performance critical.
1724
1725 addic. r5,r5,0 #test r5.
1726 beq Lppcasm_sqr_adios
1727 addi r4,r4,-$BNSZ
1728 addi r3,r3,-$BNSZ
1729 mtctr r5
1730Lppcasm_sqr_mainloop:
1731 #sqr(r[0],r[1],a[0]);
1732 $LDU r6,$BNSZ(r4)
1733 $UMULL r7,r6,r6
1734 $UMULH r8,r6,r6
1735 $STU r7,$BNSZ(r3)
1736 $STU r8,$BNSZ(r3)
1737 bdnz- Lppcasm_sqr_mainloop
1738Lppcasm_sqr_adios:
1739 blr
1740
1741#
1742# NOTE: The following label name should be changed to
1743# "bn_mul_words" i.e. remove the first dot
1744# for the gcc compiler. This should be automatically
1745# done in the build
1746#
1747
1748.align 4
1749.bn_mul_words:
1750#
1751# BN_ULONG bn_mul_words(BN_ULONG *rp, BN_ULONG *ap, int num, BN_ULONG w)
1752#
1753# r3 = rp
1754# r4 = ap
1755# r5 = num
1756# r6 = w
1757 xor r0,r0,r0
1758 xor r12,r12,r12 # used for carry
1759 rlwinm. r7,r5,30,2,31 # num >> 2
1760 beq Lppcasm_mw_REM
1761 mtctr r7
1762Lppcasm_mw_LOOP:
1763 #mul(rp[0],ap[0],w,c1);
1764 $LD r8,`0*$BNSZ`(r4)
1765 $UMULL r9,r6,r8
1766 $UMULH r10,r6,r8
1767 addc r9,r9,r12
1768 #addze r10,r10 #carry is NOT ignored.
1769 #will be taken care of
1770 #in second spin below
1771 #using adde.
1772 $ST r9,`0*$BNSZ`(r3)
1773 #mul(rp[1],ap[1],w,c1);
1774 $LD r8,`1*$BNSZ`(r4)
1775 $UMULL r11,r6,r8
1776 $UMULH r12,r6,r8
1777 adde r11,r11,r10
1778 #addze r12,r12
1779 $ST r11,`1*$BNSZ`(r3)
1780 #mul(rp[2],ap[2],w,c1);
1781 $LD r8,`2*$BNSZ`(r4)
1782 $UMULL r9,r6,r8
1783 $UMULH r10,r6,r8
1784 adde r9,r9,r12
1785 #addze r10,r10
1786 $ST r9,`2*$BNSZ`(r3)
1787 #mul_add(rp[3],ap[3],w,c1);
1788 $LD r8,`3*$BNSZ`(r4)
1789 $UMULL r11,r6,r8
1790 $UMULH r12,r6,r8
1791 adde r11,r11,r10
1792 addze r12,r12 #this spin we collect carry into
1793 #r12
1794 $ST r11,`3*$BNSZ`(r3)
1795
1796 addi r3,r3,`4*$BNSZ`
1797 addi r4,r4,`4*$BNSZ`
1798 bdnz- Lppcasm_mw_LOOP
1799
1800Lppcasm_mw_REM:
1801 andi. r5,r5,0x3
1802 beq Lppcasm_mw_OVER
1803 #mul(rp[0],ap[0],w,c1);
1804 $LD r8,`0*$BNSZ`(r4)
1805 $UMULL r9,r6,r8
1806 $UMULH r10,r6,r8
1807 addc r9,r9,r12
1808 addze r10,r10
1809 $ST r9,`0*$BNSZ`(r3)
1810 addi r12,r10,0
1811
1812 addi r5,r5,-1
1813 cmpli 0,0,r5,0
1814 beq Lppcasm_mw_OVER
1815
1816
1817 #mul(rp[1],ap[1],w,c1);
1818 $LD r8,`1*$BNSZ`(r4)
1819 $UMULL r9,r6,r8
1820 $UMULH r10,r6,r8
1821 addc r9,r9,r12
1822 addze r10,r10
1823 $ST r9,`1*$BNSZ`(r3)
1824 addi r12,r10,0
1825
1826 addi r5,r5,-1
1827 cmpli 0,0,r5,0
1828 beq Lppcasm_mw_OVER
1829
1830 #mul_add(rp[2],ap[2],w,c1);
1831 $LD r8,`2*$BNSZ`(r4)
1832 $UMULL r9,r6,r8
1833 $UMULH r10,r6,r8
1834 addc r9,r9,r12
1835 addze r10,r10
1836 $ST r9,`2*$BNSZ`(r3)
1837 addi r12,r10,0
1838
1839Lppcasm_mw_OVER:
1840 addi r3,r12,0
1841 blr
1842
1843#
1844# NOTE: The following label name should be changed to
1845# "bn_mul_add_words" i.e. remove the first dot
1846# for the gcc compiler. This should be automatically
1847# done in the build
1848#
1849
1850.align 4
1851.bn_mul_add_words:
1852#
1853# BN_ULONG bn_mul_add_words(BN_ULONG *rp, BN_ULONG *ap, int num, BN_ULONG w)
1854#
1855# r3 = rp
1856# r4 = ap
1857# r5 = num
1858# r6 = w
1859#
1860# empirical evidence suggests that unrolled version performs best!!
1861#
1862 xor r0,r0,r0 #r0 = 0
1863 xor r12,r12,r12 #r12 = 0 . used for carry
1864 rlwinm. r7,r5,30,2,31 # num >> 2
1865 beq Lppcasm_maw_leftover # if (num < 4) go LPPCASM_maw_leftover
1866 mtctr r7
1867Lppcasm_maw_mainloop:
1868 #mul_add(rp[0],ap[0],w,c1);
1869 $LD r8,`0*$BNSZ`(r4)
1870 $LD r11,`0*$BNSZ`(r3)
1871 $UMULL r9,r6,r8
1872 $UMULH r10,r6,r8
1873 addc r9,r9,r12 #r12 is carry.
1874 addze r10,r10
1875 addc r9,r9,r11
1876 #addze r10,r10
1877 #the above instruction addze
1878 #is NOT needed. Carry will NOT
1879 #be ignored. It's not affected
1880 #by multiply and will be collected
1881 #in the next spin
1882 $ST r9,`0*$BNSZ`(r3)
1883
1884 #mul_add(rp[1],ap[1],w,c1);
1885 $LD r8,`1*$BNSZ`(r4)
1886 $LD r9,`1*$BNSZ`(r3)
1887 $UMULL r11,r6,r8
1888 $UMULH r12,r6,r8
1889 adde r11,r11,r10 #r10 is carry.
1890 addze r12,r12
1891 addc r11,r11,r9
1892 #addze r12,r12
1893 $ST r11,`1*$BNSZ`(r3)
1894
1895 #mul_add(rp[2],ap[2],w,c1);
1896 $LD r8,`2*$BNSZ`(r4)
1897 $UMULL r9,r6,r8
1898 $LD r11,`2*$BNSZ`(r3)
1899 $UMULH r10,r6,r8
1900 adde r9,r9,r12
1901 addze r10,r10
1902 addc r9,r9,r11
1903 #addze r10,r10
1904 $ST r9,`2*$BNSZ`(r3)
1905
1906 #mul_add(rp[3],ap[3],w,c1);
1907 $LD r8,`3*$BNSZ`(r4)
1908 $UMULL r11,r6,r8
1909 $LD r9,`3*$BNSZ`(r3)
1910 $UMULH r12,r6,r8
1911 adde r11,r11,r10
1912 addze r12,r12
1913 addc r11,r11,r9
1914 addze r12,r12
1915 $ST r11,`3*$BNSZ`(r3)
1916 addi r3,r3,`4*$BNSZ`
1917 addi r4,r4,`4*$BNSZ`
1918 bdnz- Lppcasm_maw_mainloop
1919
1920Lppcasm_maw_leftover:
1921 andi. r5,r5,0x3
1922 beq Lppcasm_maw_adios
1923 addi r3,r3,-$BNSZ
1924 addi r4,r4,-$BNSZ
1925 #mul_add(rp[0],ap[0],w,c1);
1926 mtctr r5
1927 $LDU r8,$BNSZ(r4)
1928 $UMULL r9,r6,r8
1929 $UMULH r10,r6,r8
1930 $LDU r11,$BNSZ(r3)
1931 addc r9,r9,r11
1932 addze r10,r10
1933 addc r9,r9,r12
1934 addze r12,r10
1935 $ST r9,0(r3)
1936
1937 bdz Lppcasm_maw_adios
1938 #mul_add(rp[1],ap[1],w,c1);
1939 $LDU r8,$BNSZ(r4)
1940 $UMULL r9,r6,r8
1941 $UMULH r10,r6,r8
1942 $LDU r11,$BNSZ(r3)
1943 addc r9,r9,r11
1944 addze r10,r10
1945 addc r9,r9,r12
1946 addze r12,r10
1947 $ST r9,0(r3)
1948
1949 bdz Lppcasm_maw_adios
1950 #mul_add(rp[2],ap[2],w,c1);
1951 $LDU r8,$BNSZ(r4)
1952 $UMULL r9,r6,r8
1953 $UMULH r10,r6,r8
1954 $LDU r11,$BNSZ(r3)
1955 addc r9,r9,r11
1956 addze r10,r10
1957 addc r9,r9,r12
1958 addze r12,r10
1959 $ST r9,0(r3)
1960
1961Lppcasm_maw_adios:
1962 addi r3,r12,0
1963 blr
1964 .align 4
1965EOF
1966$data =~ s/\`([^\`]*)\`/eval $1/gem;
1967print $data;
1968close STDOUT;