From 5a3c0a05c7f2c5d3c584b7c8d6aec836dd724c80 Mon Sep 17 00:00:00 2001 From: djm <> Date: Sat, 6 Sep 2008 12:15:56 +0000 Subject: import of OpenSSL 0.9.8h --- src/lib/libcrypto/bn/asm/bn-586.pl | 86 ++++++++++++- src/lib/libcrypto/bn/asm/ia64.S | 35 +++--- src/lib/libcrypto/bn/asm/x86_64-gcc.c | 4 + src/lib/libcrypto/bn/asm/x86_64-mont.pl | 214 ++++++++++++++++++++++++++++++++ 4 files changed, 317 insertions(+), 22 deletions(-) create mode 100755 src/lib/libcrypto/bn/asm/x86_64-mont.pl (limited to 'src/lib/libcrypto/bn/asm') diff --git a/src/lib/libcrypto/bn/asm/bn-586.pl b/src/lib/libcrypto/bn/asm/bn-586.pl index c4de4a2bee..26c2685a72 100644 --- a/src/lib/libcrypto/bn/asm/bn-586.pl +++ b/src/lib/libcrypto/bn/asm/bn-586.pl @@ -5,13 +5,18 @@ require "x86asm.pl"; &asm_init($ARGV[0],$0); +$sse2=0; +for (@ARGV) { $sse2=1 if (/-DOPENSSL_IA32_SSE2/); } + +&external_label("OPENSSL_ia32cap_P") if ($sse2); + &bn_mul_add_words("bn_mul_add_words"); &bn_mul_words("bn_mul_words"); &bn_sqr_words("bn_sqr_words"); &bn_div_words("bn_div_words"); &bn_add_words("bn_add_words"); &bn_sub_words("bn_sub_words"); -#&bn_sub_part_words("bn_sub_part_words"); +&bn_sub_part_words("bn_sub_part_words"); &asm_finish(); @@ -19,7 +24,7 @@ sub bn_mul_add_words { local($name)=@_; - &function_begin($name,""); + &function_begin($name,$sse2?"EXTRN\t_OPENSSL_ia32cap_P:DWORD":""); &comment(""); $Low="eax"; @@ -42,6 +47,83 @@ sub bn_mul_add_words &jz(&label("maw_finish")); + if ($sse2) { + &picmeup("eax","OPENSSL_ia32cap_P"); + &bt(&DWP(0,"eax"),26); + &jnc(&label("maw_loop")); + + &movd("mm0",$w); # mm0 = w + &pxor("mm1","mm1"); # mm1 = carry_in + + &set_label("maw_sse2_loop",0); + &movd("mm3",&DWP(0,$r,"",0)); # mm3 = r[0] + &paddq("mm1","mm3"); # mm1 = carry_in + r[0] + &movd("mm2",&DWP(0,$a,"",0)); # mm2 = a[0] + &pmuludq("mm2","mm0"); # mm2 = w*a[0] + &movd("mm4",&DWP(4,$a,"",0)); # mm4 = a[1] + &pmuludq("mm4","mm0"); # mm4 = w*a[1] + &movd("mm6",&DWP(8,$a,"",0)); # mm6 = a[2] + &pmuludq("mm6","mm0"); # mm6 = w*a[2] + &movd("mm7",&DWP(12,$a,"",0)); # mm7 = a[3] + &pmuludq("mm7","mm0"); # mm7 = w*a[3] + &paddq("mm1","mm2"); # mm1 = carry_in + r[0] + w*a[0] + &movd("mm3",&DWP(4,$r,"",0)); # mm3 = r[1] + &paddq("mm3","mm4"); # mm3 = r[1] + w*a[1] + &movd("mm5",&DWP(8,$r,"",0)); # mm5 = r[2] + &paddq("mm5","mm6"); # mm5 = r[2] + w*a[2] + &movd("mm4",&DWP(12,$r,"",0)); # mm4 = r[3] + &paddq("mm7","mm4"); # mm7 = r[3] + w*a[3] + &movd(&DWP(0,$r,"",0),"mm1"); + &movd("mm2",&DWP(16,$a,"",0)); # mm2 = a[4] + &pmuludq("mm2","mm0"); # mm2 = w*a[4] + &psrlq("mm1",32); # mm1 = carry0 + &movd("mm4",&DWP(20,$a,"",0)); # mm4 = a[5] + &pmuludq("mm4","mm0"); # mm4 = w*a[5] + &paddq("mm1","mm3"); # mm1 = carry0 + r[1] + w*a[1] + &movd("mm6",&DWP(24,$a,"",0)); # mm6 = a[6] + &pmuludq("mm6","mm0"); # mm6 = w*a[6] + &movd(&DWP(4,$r,"",0),"mm1"); + &psrlq("mm1",32); # mm1 = carry1 + &movd("mm3",&DWP(28,$a,"",0)); # mm3 = a[7] + &add($a,32); + &pmuludq("mm3","mm0"); # mm3 = w*a[7] + &paddq("mm1","mm5"); # mm1 = carry1 + r[2] + w*a[2] + &movd("mm5",&DWP(16,$r,"",0)); # mm5 = r[4] + &paddq("mm2","mm5"); # mm2 = r[4] + w*a[4] + &movd(&DWP(8,$r,"",0),"mm1"); + &psrlq("mm1",32); # mm1 = carry2 + &paddq("mm1","mm7"); # mm1 = carry2 + r[3] + w*a[3] + &movd("mm5",&DWP(20,$r,"",0)); # mm5 = r[5] + &paddq("mm4","mm5"); # mm4 = r[5] + w*a[5] + &movd(&DWP(12,$r,"",0),"mm1"); + &psrlq("mm1",32); # mm1 = carry3 + &paddq("mm1","mm2"); # mm1 = carry3 + r[4] + w*a[4] + &movd("mm5",&DWP(24,$r,"",0)); # mm5 = r[6] + &paddq("mm6","mm5"); # mm6 = r[6] + w*a[6] + &movd(&DWP(16,$r,"",0),"mm1"); + &psrlq("mm1",32); # mm1 = carry4 + &paddq("mm1","mm4"); # mm1 = carry4 + r[5] + w*a[5] + &movd("mm5",&DWP(28,$r,"",0)); # mm5 = r[7] + &paddq("mm3","mm5"); # mm3 = r[7] + w*a[7] + &movd(&DWP(20,$r,"",0),"mm1"); + &psrlq("mm1",32); # mm1 = carry5 + &paddq("mm1","mm6"); # mm1 = carry5 + r[6] + w*a[6] + &movd(&DWP(24,$r,"",0),"mm1"); + &psrlq("mm1",32); # mm1 = carry6 + &paddq("mm1","mm3"); # mm1 = carry6 + r[7] + w*a[7] + &movd(&DWP(28,$r,"",0),"mm1"); + &add($r,32); + &psrlq("mm1",32); # mm1 = carry_out + + &sub("ecx",8); + &jnz(&label("maw_sse2_loop")); + + &movd($c,"mm1"); # c = carry_out + &emms(); + + &jmp(&label("maw_finish")); + } + &set_label("maw_loop",0); &mov(&swtmp(0),"ecx"); # diff --git a/src/lib/libcrypto/bn/asm/ia64.S b/src/lib/libcrypto/bn/asm/ia64.S index 7b82b820e6..951abc53ea 100644 --- a/src/lib/libcrypto/bn/asm/ia64.S +++ b/src/lib/libcrypto/bn/asm/ia64.S @@ -171,21 +171,21 @@ .skip 32 // makes the loop body aligned at 64-byte boundary bn_add_words: .prologue - .fframe 0 .save ar.pfs,r2 { .mii; alloc r2=ar.pfs,4,12,0,16 cmp4.le p6,p0=r35,r0 };; { .mfb; mov r8=r0 // return value (p6) br.ret.spnt.many b0 };; - .save ar.lc,r3 { .mib; sub r10=r35,r0,1 + .save ar.lc,r3 mov r3=ar.lc brp.loop.imp .L_bn_add_words_ctop,.L_bn_add_words_cend-16 } - .body { .mib; ADDP r14=0,r32 // rp + .save pr,r9 mov r9=pr };; + .body { .mii; ADDP r15=0,r33 // ap mov ar.lc=r10 mov ar.ec=6 } @@ -224,21 +224,21 @@ bn_add_words: .skip 32 // makes the loop body aligned at 64-byte boundary bn_sub_words: .prologue - .fframe 0 .save ar.pfs,r2 { .mii; alloc r2=ar.pfs,4,12,0,16 cmp4.le p6,p0=r35,r0 };; { .mfb; mov r8=r0 // return value (p6) br.ret.spnt.many b0 };; - .save ar.lc,r3 { .mib; sub r10=r35,r0,1 + .save ar.lc,r3 mov r3=ar.lc brp.loop.imp .L_bn_sub_words_ctop,.L_bn_sub_words_cend-16 } - .body { .mib; ADDP r14=0,r32 // rp + .save pr,r9 mov r9=pr };; + .body { .mii; ADDP r15=0,r33 // ap mov ar.lc=r10 mov ar.ec=6 } @@ -283,7 +283,6 @@ bn_sub_words: .skip 32 // makes the loop body aligned at 64-byte boundary bn_mul_words: .prologue - .fframe 0 .save ar.pfs,r2 #ifdef XMA_TEMPTATION { .mfi; alloc r2=ar.pfs,4,0,0,0 };; @@ -294,9 +293,10 @@ bn_mul_words: cmp4.le p6,p0=r34,r0 (p6) br.ret.spnt.many b0 };; - .save ar.lc,r3 { .mii; sub r10=r34,r0,1 + .save ar.lc,r3 mov r3=ar.lc + .save pr,r9 mov r9=pr };; .body @@ -397,22 +397,21 @@ bn_mul_words: .skip 48 // makes the loop body aligned at 64-byte boundary bn_mul_add_words: .prologue - .fframe 0 .save ar.pfs,r2 - .save ar.lc,r3 - .save pr,r9 { .mmi; alloc r2=ar.pfs,4,4,0,8 cmp4.le p6,p0=r34,r0 + .save ar.lc,r3 mov r3=ar.lc };; { .mib; mov r8=r0 // return value sub r10=r34,r0,1 (p6) br.ret.spnt.many b0 };; - .body { .mib; setf.sig f8=r35 // w + .save pr,r9 mov r9=pr brp.loop.imp .L_bn_mul_add_words_ctop,.L_bn_mul_add_words_cend-16 } + .body { .mmi; ADDP r14=0,r32 // rp ADDP r15=0,r33 // ap mov ar.lc=r10 } @@ -466,7 +465,6 @@ bn_mul_add_words: .skip 32 // makes the loop body aligned at 64-byte boundary bn_sqr_words: .prologue - .fframe 0 .save ar.pfs,r2 { .mii; alloc r2=ar.pfs,3,0,0,0 sxt4 r34=r34 };; @@ -476,9 +474,10 @@ bn_sqr_words: nop.f 0x0 (p6) br.ret.spnt.many b0 };; - .save ar.lc,r3 { .mii; sub r10=r34,r0,1 + .save ar.lc,r3 mov r3=ar.lc + .save pr,r9 mov r9=pr };; .body @@ -545,7 +544,6 @@ bn_sqr_words: .align 64 bn_sqr_comba8: .prologue - .fframe 0 .save ar.pfs,r2 #if defined(_HPUX_SOURCE) && !defined(_LP64) { .mii; alloc r2=ar.pfs,2,1,0,0 @@ -617,7 +615,6 @@ bn_sqr_comba8: .align 64 bn_mul_comba8: .prologue - .fframe 0 .save ar.pfs,r2 #if defined(_HPUX_SOURCE) && !defined(_LP64) { .mii; alloc r2=ar.pfs,3,0,0,0 @@ -1175,7 +1172,6 @@ bn_mul_comba8: .align 64 bn_sqr_comba4: .prologue - .fframe 0 .save ar.pfs,r2 #if defined(_HPUX_SOURCE) && !defined(_LP64) { .mii; alloc r2=ar.pfs,2,1,0,0 @@ -1208,7 +1204,6 @@ bn_sqr_comba4: .align 64 bn_mul_comba4: .prologue - .fframe 0 .save ar.pfs,r2 #if defined(_HPUX_SOURCE) && !defined(_LP64) { .mii; alloc r2=ar.pfs,3,0,0,0 @@ -1411,11 +1406,11 @@ equ=p24 .align 64 bn_div_words: .prologue - .fframe 0 .save ar.pfs,r2 - .save b0,r3 { .mii; alloc r2=ar.pfs,3,5,0,8 + .save b0,r3 mov r3=b0 + .save pr,r10 mov r10=pr };; { .mmb; cmp.eq p6,p0=r34,r0 mov r8=-1 diff --git a/src/lib/libcrypto/bn/asm/x86_64-gcc.c b/src/lib/libcrypto/bn/asm/x86_64-gcc.c index 7378344251..f13f52dd85 100644 --- a/src/lib/libcrypto/bn/asm/x86_64-gcc.c +++ b/src/lib/libcrypto/bn/asm/x86_64-gcc.c @@ -1,3 +1,6 @@ +#ifdef __SUNPRO_C +# include "../bn_asm.c" /* kind of dirty hack for Sun Studio */ +#else /* * x86_64 BIGNUM accelerator version 0.1, December 2002. * @@ -591,3 +594,4 @@ void bn_sqr_comba4(BN_ULONG *r, BN_ULONG *a) r[6]=c1; r[7]=c2; } +#endif diff --git a/src/lib/libcrypto/bn/asm/x86_64-mont.pl b/src/lib/libcrypto/bn/asm/x86_64-mont.pl new file mode 100755 index 0000000000..c43b69592a --- /dev/null +++ b/src/lib/libcrypto/bn/asm/x86_64-mont.pl @@ -0,0 +1,214 @@ +#!/usr/bin/env perl + +# ==================================================================== +# Written by Andy Polyakov for the OpenSSL +# project. The module is, however, dual licensed under OpenSSL and +# CRYPTOGAMS licenses depending on where you obtain it. For further +# details see http://www.openssl.org/~appro/cryptogams/. +# ==================================================================== + +# October 2005. +# +# Montgomery multiplication routine for x86_64. While it gives modest +# 9% improvement of rsa4096 sign on Opteron, rsa512 sign runs more +# than twice, >2x, as fast. Most common rsa1024 sign is improved by +# respectful 50%. It remains to be seen if loop unrolling and +# dedicated squaring routine can provide further improvement... + +$output=shift; + +$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1; +( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or +( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or +die "can't locate x86_64-xlate.pl"; + +open STDOUT,"| $^X $xlate $output"; + +# int bn_mul_mont( +$rp="%rdi"; # BN_ULONG *rp, +$ap="%rsi"; # const BN_ULONG *ap, +$bp="%rdx"; # const BN_ULONG *bp, +$np="%rcx"; # const BN_ULONG *np, +$n0="%r8"; # const BN_ULONG *n0, +$num="%r9"; # int num); +$lo0="%r10"; +$hi0="%r11"; +$bp="%r12"; # reassign $bp +$hi1="%r13"; +$i="%r14"; +$j="%r15"; +$m0="%rbx"; +$m1="%rbp"; + +$code=<<___; +.text + +.globl bn_mul_mont +.type bn_mul_mont,\@function,6 +.align 16 +bn_mul_mont: + push %rbx + push %rbp + push %r12 + push %r13 + push %r14 + push %r15 + + mov ${num}d,${num}d + lea 2($num),%rax + mov %rsp,%rbp + neg %rax + lea (%rsp,%rax,8),%rsp # tp=alloca(8*(num+2)) + and \$-1024,%rsp # minimize TLB usage + + mov %rbp,8(%rsp,$num,8) # tp[num+1]=%rsp + mov %rdx,$bp # $bp reassigned, remember? + + mov ($n0),$n0 # pull n0[0] value + + xor $i,$i # i=0 + xor $j,$j # j=0 + + mov ($bp),$m0 # m0=bp[0] + mov ($ap),%rax + mulq $m0 # ap[0]*bp[0] + mov %rax,$lo0 + mov %rdx,$hi0 + + imulq $n0,%rax # "tp[0]"*n0 + mov %rax,$m1 + + mulq ($np) # np[0]*m1 + add $lo0,%rax # discarded + adc \$0,%rdx + mov %rdx,$hi1 + + lea 1($j),$j # j++ +.L1st: + mov ($ap,$j,8),%rax + mulq $m0 # ap[j]*bp[0] + add $hi0,%rax + adc \$0,%rdx + mov %rax,$lo0 + mov ($np,$j,8),%rax + mov %rdx,$hi0 + + mulq $m1 # np[j]*m1 + add $hi1,%rax + lea 1($j),$j # j++ + adc \$0,%rdx + add $lo0,%rax # np[j]*m1+ap[j]*bp[0] + adc \$0,%rdx + mov %rax,-16(%rsp,$j,8) # tp[j-1] + cmp $num,$j + mov %rdx,$hi1 + jl .L1st + + xor %rdx,%rdx + add $hi0,$hi1 + adc \$0,%rdx + mov $hi1,-8(%rsp,$num,8) + mov %rdx,(%rsp,$num,8) # store upmost overflow bit + + lea 1($i),$i # i++ +.align 4 +.Louter: + xor $j,$j # j=0 + + mov ($bp,$i,8),$m0 # m0=bp[i] + mov ($ap),%rax # ap[0] + mulq $m0 # ap[0]*bp[i] + add (%rsp),%rax # ap[0]*bp[i]+tp[0] + adc \$0,%rdx + mov %rax,$lo0 + mov %rdx,$hi0 + + imulq $n0,%rax # tp[0]*n0 + mov %rax,$m1 + + mulq ($np,$j,8) # np[0]*m1 + add $lo0,%rax # discarded + mov 8(%rsp),$lo0 # tp[1] + adc \$0,%rdx + mov %rdx,$hi1 + + lea 1($j),$j # j++ +.align 4 +.Linner: + mov ($ap,$j,8),%rax + mulq $m0 # ap[j]*bp[i] + add $hi0,%rax + adc \$0,%rdx + add %rax,$lo0 # ap[j]*bp[i]+tp[j] + mov ($np,$j,8),%rax + adc \$0,%rdx + mov %rdx,$hi0 + + mulq $m1 # np[j]*m1 + add $hi1,%rax + lea 1($j),$j # j++ + adc \$0,%rdx + add $lo0,%rax # np[j]*m1+ap[j]*bp[i]+tp[j] + adc \$0,%rdx + mov (%rsp,$j,8),$lo0 + cmp $num,$j + mov %rax,-16(%rsp,$j,8) # tp[j-1] + mov %rdx,$hi1 + jl .Linner + + xor %rdx,%rdx + add $hi0,$hi1 + adc \$0,%rdx + add $lo0,$hi1 # pull upmost overflow bit + adc \$0,%rdx + mov $hi1,-8(%rsp,$num,8) + mov %rdx,(%rsp,$num,8) # store upmost overflow bit + + lea 1($i),$i # i++ + cmp $num,$i + jl .Louter + + lea (%rsp),$ap # borrow ap for tp + lea -1($num),$j # j=num-1 + + mov ($ap),%rax # tp[0] + xor $i,$i # i=0 and clear CF! + jmp .Lsub +.align 16 +.Lsub: sbb ($np,$i,8),%rax + mov %rax,($rp,$i,8) # rp[i]=tp[i]-np[i] + dec $j # doesn't affect CF! + mov 8($ap,$i,8),%rax # tp[i+1] + lea 1($i),$i # i++ + jge .Lsub + + sbb \$0,%rax # handle upmost overflow bit + and %rax,$ap + not %rax + mov $rp,$np + and %rax,$np + lea -1($num),$j + or $np,$ap # ap=borrow?tp:rp +.align 16 +.Lcopy: # copy or in-place refresh + mov ($ap,$j,8),%rax + mov %rax,($rp,$j,8) # rp[i]=tp[i] + mov $i,(%rsp,$j,8) # zap temporary vector + dec $j + jge .Lcopy + + mov 8(%rsp,$num,8),%rsp # restore %rsp + mov \$1,%rax + pop %r15 + pop %r14 + pop %r13 + pop %r12 + pop %rbp + pop %rbx + ret +.size bn_mul_mont,.-bn_mul_mont +.asciz "Montgomery Multiplication for x86_64, CRYPTOGAMS by " +___ + +print $code; +close STDOUT; -- cgit v1.2.3-55-g6feb