diff options
Diffstat (limited to 'src/lib/libcrypto/sha/asm')
| -rw-r--r-- | src/lib/libcrypto/sha/asm/README | 1 | ||||
| -rw-r--r-- | src/lib/libcrypto/sha/asm/sha1-586.pl | 219 | ||||
| -rw-r--r-- | src/lib/libcrypto/sha/asm/sha1-ia64.pl | 305 | ||||
| -rwxr-xr-x | src/lib/libcrypto/sha/asm/sha1-x86_64.pl | 242 | ||||
| -rwxr-xr-x | src/lib/libcrypto/sha/asm/sha512-ia64.pl | 672 | ||||
| -rw-r--r-- | src/lib/libcrypto/sha/asm/sha512-sse2.pl | 404 | ||||
| -rwxr-xr-x | src/lib/libcrypto/sha/asm/sha512-x86_64.pl | 344 |
7 files changed, 2187 insertions, 0 deletions
diff --git a/src/lib/libcrypto/sha/asm/README b/src/lib/libcrypto/sha/asm/README new file mode 100644 index 0000000000..b7e755765f --- /dev/null +++ b/src/lib/libcrypto/sha/asm/README | |||
| @@ -0,0 +1 @@ | |||
| C2.pl works | |||
diff --git a/src/lib/libcrypto/sha/asm/sha1-586.pl b/src/lib/libcrypto/sha/asm/sha1-586.pl new file mode 100644 index 0000000000..a787dd37da --- /dev/null +++ b/src/lib/libcrypto/sha/asm/sha1-586.pl | |||
| @@ -0,0 +1,219 @@ | |||
| 1 | #!/usr/bin/env perl | ||
| 2 | |||
| 3 | # ==================================================================== | ||
| 4 | # [Re]written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL | ||
| 5 | # project. The module is, however, dual licensed under OpenSSL and | ||
| 6 | # CRYPTOGAMS licenses depending on where you obtain it. For further | ||
| 7 | # details see http://www.openssl.org/~appro/cryptogams/. | ||
| 8 | # ==================================================================== | ||
| 9 | |||
| 10 | # "[Re]written" was achieved in two major overhauls. In 2004 BODY_* | ||
| 11 | # functions were re-implemented to address P4 performance issue [see | ||
| 12 | # commentary below], and in 2006 the rest was rewritten in order to | ||
| 13 | # gain freedom to liberate licensing terms. | ||
| 14 | |||
| 15 | # It was noted that Intel IA-32 C compiler generates code which | ||
| 16 | # performs ~30% *faster* on P4 CPU than original *hand-coded* | ||
| 17 | # SHA1 assembler implementation. To address this problem (and | ||
| 18 | # prove that humans are still better than machines:-), the | ||
| 19 | # original code was overhauled, which resulted in following | ||
| 20 | # performance changes: | ||
| 21 | # | ||
| 22 | # compared with original compared with Intel cc | ||
| 23 | # assembler impl. generated code | ||
| 24 | # Pentium -16% +48% | ||
| 25 | # PIII/AMD +8% +16% | ||
| 26 | # P4 +85%(!) +45% | ||
| 27 | # | ||
| 28 | # As you can see Pentium came out as looser:-( Yet I reckoned that | ||
| 29 | # improvement on P4 outweights the loss and incorporate this | ||
| 30 | # re-tuned code to 0.9.7 and later. | ||
| 31 | # ---------------------------------------------------------------- | ||
| 32 | # <appro@fy.chalmers.se> | ||
| 33 | |||
| 34 | $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1; | ||
| 35 | push(@INC,"${dir}","${dir}../../perlasm"); | ||
| 36 | require "x86asm.pl"; | ||
| 37 | |||
| 38 | &asm_init($ARGV[0],"sha1-586.pl",$ARGV[$#ARGV] eq "386"); | ||
| 39 | |||
| 40 | $A="eax"; | ||
| 41 | $B="ebx"; | ||
| 42 | $C="ecx"; | ||
| 43 | $D="edx"; | ||
| 44 | $E="edi"; | ||
| 45 | $T="esi"; | ||
| 46 | $tmp1="ebp"; | ||
| 47 | |||
| 48 | @V=($A,$B,$C,$D,$E,$T); | ||
| 49 | |||
| 50 | sub BODY_00_15 | ||
| 51 | { | ||
| 52 | local($n,$a,$b,$c,$d,$e,$f)=@_; | ||
| 53 | |||
| 54 | &comment("00_15 $n"); | ||
| 55 | |||
| 56 | &mov($f,$c); # f to hold F_00_19(b,c,d) | ||
| 57 | if ($n==0) { &mov($tmp1,$a); } | ||
| 58 | else { &mov($a,$tmp1); } | ||
| 59 | &rotl($tmp1,5); # tmp1=ROTATE(a,5) | ||
| 60 | &xor($f,$d); | ||
| 61 | &add($tmp1,$e); # tmp1+=e; | ||
| 62 | &and($f,$b); | ||
| 63 | &mov($e,&swtmp($n%16)); # e becomes volatile and is loaded | ||
| 64 | # with xi, also note that e becomes | ||
| 65 | # f in next round... | ||
| 66 | &xor($f,$d); # f holds F_00_19(b,c,d) | ||
| 67 | &rotr($b,2); # b=ROTATE(b,30) | ||
| 68 | &lea($tmp1,&DWP(0x5a827999,$tmp1,$e)); # tmp1+=K_00_19+xi | ||
| 69 | |||
| 70 | if ($n==15) { &add($f,$tmp1); } # f+=tmp1 | ||
| 71 | else { &add($tmp1,$f); } # f becomes a in next round | ||
| 72 | } | ||
| 73 | |||
| 74 | sub BODY_16_19 | ||
| 75 | { | ||
| 76 | local($n,$a,$b,$c,$d,$e,$f)=@_; | ||
| 77 | |||
| 78 | &comment("16_19 $n"); | ||
| 79 | |||
| 80 | &mov($f,&swtmp($n%16)); # f to hold Xupdate(xi,xa,xb,xc,xd) | ||
| 81 | &mov($tmp1,$c); # tmp1 to hold F_00_19(b,c,d) | ||
| 82 | &xor($f,&swtmp(($n+2)%16)); | ||
| 83 | &xor($tmp1,$d); | ||
| 84 | &xor($f,&swtmp(($n+8)%16)); | ||
| 85 | &and($tmp1,$b); # tmp1 holds F_00_19(b,c,d) | ||
| 86 | &rotr($b,2); # b=ROTATE(b,30) | ||
| 87 | &xor($f,&swtmp(($n+13)%16)); # f holds xa^xb^xc^xd | ||
| 88 | &rotl($f,1); # f=ROTATE(f,1) | ||
| 89 | &xor($tmp1,$d); # tmp1=F_00_19(b,c,d) | ||
| 90 | &mov(&swtmp($n%16),$f); # xi=f | ||
| 91 | &lea($f,&DWP(0x5a827999,$f,$e));# f+=K_00_19+e | ||
| 92 | &mov($e,$a); # e becomes volatile | ||
| 93 | &rotl($e,5); # e=ROTATE(a,5) | ||
| 94 | &add($f,$tmp1); # f+=F_00_19(b,c,d) | ||
| 95 | &add($f,$e); # f+=ROTATE(a,5) | ||
| 96 | } | ||
| 97 | |||
| 98 | sub BODY_20_39 | ||
| 99 | { | ||
| 100 | local($n,$a,$b,$c,$d,$e,$f)=@_; | ||
| 101 | local $K=($n<40)?0x6ed9eba1:0xca62c1d6; | ||
| 102 | |||
| 103 | &comment("20_39 $n"); | ||
| 104 | |||
| 105 | &mov($tmp1,$b); # tmp1 to hold F_20_39(b,c,d) | ||
| 106 | &mov($f,&swtmp($n%16)); # f to hold Xupdate(xi,xa,xb,xc,xd) | ||
| 107 | &rotr($b,2); # b=ROTATE(b,30) | ||
| 108 | &xor($f,&swtmp(($n+2)%16)); | ||
| 109 | &xor($tmp1,$c); | ||
| 110 | &xor($f,&swtmp(($n+8)%16)); | ||
| 111 | &xor($tmp1,$d); # tmp1 holds F_20_39(b,c,d) | ||
| 112 | &xor($f,&swtmp(($n+13)%16)); # f holds xa^xb^xc^xd | ||
| 113 | &rotl($f,1); # f=ROTATE(f,1) | ||
| 114 | &add($tmp1,$e); | ||
| 115 | &mov(&swtmp($n%16),$f); # xi=f | ||
| 116 | &mov($e,$a); # e becomes volatile | ||
| 117 | &rotl($e,5); # e=ROTATE(a,5) | ||
| 118 | &lea($f,&DWP($K,$f,$tmp1)); # f+=K_20_39+e | ||
| 119 | &add($f,$e); # f+=ROTATE(a,5) | ||
| 120 | } | ||
| 121 | |||
| 122 | sub BODY_40_59 | ||
| 123 | { | ||
| 124 | local($n,$a,$b,$c,$d,$e,$f)=@_; | ||
| 125 | |||
| 126 | &comment("40_59 $n"); | ||
| 127 | |||
| 128 | &mov($f,&swtmp($n%16)); # f to hold Xupdate(xi,xa,xb,xc,xd) | ||
| 129 | &mov($tmp1,&swtmp(($n+2)%16)); | ||
| 130 | &xor($f,$tmp1); | ||
| 131 | &mov($tmp1,&swtmp(($n+8)%16)); | ||
| 132 | &xor($f,$tmp1); | ||
| 133 | &mov($tmp1,&swtmp(($n+13)%16)); | ||
| 134 | &xor($f,$tmp1); # f holds xa^xb^xc^xd | ||
| 135 | &mov($tmp1,$b); # tmp1 to hold F_40_59(b,c,d) | ||
| 136 | &rotl($f,1); # f=ROTATE(f,1) | ||
| 137 | &or($tmp1,$c); | ||
| 138 | &mov(&swtmp($n%16),$f); # xi=f | ||
| 139 | &and($tmp1,$d); | ||
| 140 | &lea($f,&DWP(0x8f1bbcdc,$f,$e));# f+=K_40_59+e | ||
| 141 | &mov($e,$b); # e becomes volatile and is used | ||
| 142 | # to calculate F_40_59(b,c,d) | ||
| 143 | &rotr($b,2); # b=ROTATE(b,30) | ||
| 144 | &and($e,$c); | ||
| 145 | &or($tmp1,$e); # tmp1 holds F_40_59(b,c,d) | ||
| 146 | &mov($e,$a); | ||
| 147 | &rotl($e,5); # e=ROTATE(a,5) | ||
| 148 | &add($f,$tmp1); # f+=tmp1; | ||
| 149 | &add($f,$e); # f+=ROTATE(a,5) | ||
| 150 | } | ||
| 151 | |||
| 152 | &function_begin("sha1_block_data_order"); | ||
| 153 | &mov($tmp1,&wparam(0)); # SHA_CTX *c | ||
| 154 | &mov($T,&wparam(1)); # const void *input | ||
| 155 | &mov($A,&wparam(2)); # size_t num | ||
| 156 | &stack_push(16); # allocate X[16] | ||
| 157 | &shl($A,6); | ||
| 158 | &add($A,$T); | ||
| 159 | &mov(&wparam(2),$A); # pointer beyond the end of input | ||
| 160 | &mov($E,&DWP(16,$tmp1));# pre-load E | ||
| 161 | |||
| 162 | &set_label("loop",16); | ||
| 163 | |||
| 164 | # copy input chunk to X, but reversing byte order! | ||
| 165 | for ($i=0; $i<16; $i+=4) | ||
| 166 | { | ||
| 167 | &mov($A,&DWP(4*($i+0),$T)); | ||
| 168 | &mov($B,&DWP(4*($i+1),$T)); | ||
| 169 | &mov($C,&DWP(4*($i+2),$T)); | ||
| 170 | &mov($D,&DWP(4*($i+3),$T)); | ||
| 171 | &bswap($A); | ||
| 172 | &bswap($B); | ||
| 173 | &bswap($C); | ||
| 174 | &bswap($D); | ||
| 175 | &mov(&swtmp($i+0),$A); | ||
| 176 | &mov(&swtmp($i+1),$B); | ||
| 177 | &mov(&swtmp($i+2),$C); | ||
| 178 | &mov(&swtmp($i+3),$D); | ||
| 179 | } | ||
| 180 | &mov(&wparam(1),$T); # redundant in 1st spin | ||
| 181 | |||
| 182 | &mov($A,&DWP(0,$tmp1)); # load SHA_CTX | ||
| 183 | &mov($B,&DWP(4,$tmp1)); | ||
| 184 | &mov($C,&DWP(8,$tmp1)); | ||
| 185 | &mov($D,&DWP(12,$tmp1)); | ||
| 186 | # E is pre-loaded | ||
| 187 | |||
| 188 | for($i=0;$i<16;$i++) { &BODY_00_15($i,@V); unshift(@V,pop(@V)); } | ||
| 189 | for(;$i<20;$i++) { &BODY_16_19($i,@V); unshift(@V,pop(@V)); } | ||
| 190 | for(;$i<40;$i++) { &BODY_20_39($i,@V); unshift(@V,pop(@V)); } | ||
| 191 | for(;$i<60;$i++) { &BODY_40_59($i,@V); unshift(@V,pop(@V)); } | ||
| 192 | for(;$i<80;$i++) { &BODY_20_39($i,@V); unshift(@V,pop(@V)); } | ||
| 193 | |||
| 194 | (($V[5] eq $D) and ($V[0] eq $E)) or die; # double-check | ||
| 195 | |||
| 196 | &mov($tmp1,&wparam(0)); # re-load SHA_CTX* | ||
| 197 | &mov($D,&wparam(1)); # D is last "T" and is discarded | ||
| 198 | |||
| 199 | &add($E,&DWP(0,$tmp1)); # E is last "A"... | ||
| 200 | &add($T,&DWP(4,$tmp1)); | ||
| 201 | &add($A,&DWP(8,$tmp1)); | ||
| 202 | &add($B,&DWP(12,$tmp1)); | ||
| 203 | &add($C,&DWP(16,$tmp1)); | ||
| 204 | |||
| 205 | &mov(&DWP(0,$tmp1),$E); # update SHA_CTX | ||
| 206 | &add($D,64); # advance input pointer | ||
| 207 | &mov(&DWP(4,$tmp1),$T); | ||
| 208 | &cmp($D,&wparam(2)); # have we reached the end yet? | ||
| 209 | &mov(&DWP(8,$tmp1),$A); | ||
| 210 | &mov($E,$C); # C is last "E" which needs to be "pre-loaded" | ||
| 211 | &mov(&DWP(12,$tmp1),$B); | ||
| 212 | &mov($T,$D); # input pointer | ||
| 213 | &mov(&DWP(16,$tmp1),$C); | ||
| 214 | &jb(&label("loop")); | ||
| 215 | |||
| 216 | &stack_pop(16); | ||
| 217 | &function_end("sha1_block_data_order"); | ||
| 218 | |||
| 219 | &asm_finish(); | ||
diff --git a/src/lib/libcrypto/sha/asm/sha1-ia64.pl b/src/lib/libcrypto/sha/asm/sha1-ia64.pl new file mode 100644 index 0000000000..aa18c1089b --- /dev/null +++ b/src/lib/libcrypto/sha/asm/sha1-ia64.pl | |||
| @@ -0,0 +1,305 @@ | |||
| 1 | #!/usr/bin/env perl | ||
| 2 | # | ||
| 3 | # ==================================================================== | ||
| 4 | # Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL | ||
| 5 | # project. The module is, however, dual licensed under OpenSSL and | ||
| 6 | # CRYPTOGAMS licenses depending on where you obtain it. For further | ||
| 7 | # details see http://www.openssl.org/~appro/cryptogams/. | ||
| 8 | # ==================================================================== | ||
| 9 | # | ||
| 10 | # Eternal question is what's wrong with compiler generated code? The | ||
| 11 | # trick is that it's possible to reduce the number of shifts required | ||
| 12 | # to perform rotations by maintaining copy of 32-bit value in upper | ||
| 13 | # bits of 64-bit register. Just follow mux2 and shrp instructions... | ||
| 14 | # Performance under big-endian OS such as HP-UX is 179MBps*1GHz, which | ||
| 15 | # is >50% better than HP C and >2x better than gcc. | ||
| 16 | |||
| 17 | $code=<<___; | ||
| 18 | .ident \"sha1-ia64.s, version 1.2\" | ||
| 19 | .ident \"IA-64 ISA artwork by Andy Polyakov <appro\@fy.chalmers.se>\" | ||
| 20 | .explicit | ||
| 21 | |||
| 22 | ___ | ||
| 23 | |||
| 24 | |||
| 25 | if ($^O eq "hpux") { | ||
| 26 | $ADDP="addp4"; | ||
| 27 | for (@ARGV) { $ADDP="add" if (/[\+DD|\-mlp]64/); } | ||
| 28 | } else { $ADDP="add"; } | ||
| 29 | for (@ARGV) { $big_endian=1 if (/\-DB_ENDIAN/); | ||
| 30 | $big_endian=0 if (/\-DL_ENDIAN/); } | ||
| 31 | if (!defined($big_endian)) | ||
| 32 | { $big_endian=(unpack('L',pack('N',1))==1); } | ||
| 33 | |||
| 34 | #$human=1; | ||
| 35 | if ($human) { # useful for visual code auditing... | ||
| 36 | ($A,$B,$C,$D,$E,$T) = ("A","B","C","D","E","T"); | ||
| 37 | ($h0,$h1,$h2,$h3,$h4) = ("h0","h1","h2","h3","h4"); | ||
| 38 | ($K_00_19, $K_20_39, $K_40_59, $K_60_79) = | ||
| 39 | ( "K_00_19","K_20_39","K_40_59","K_60_79" ); | ||
| 40 | @X= ( "X0", "X1", "X2", "X3", "X4", "X5", "X6", "X7", | ||
| 41 | "X8", "X9","X10","X11","X12","X13","X14","X15" ); | ||
| 42 | } | ||
| 43 | else { | ||
| 44 | ($A,$B,$C,$D,$E,$T) = ("loc0","loc1","loc2","loc3","loc4","loc5"); | ||
| 45 | ($h0,$h1,$h2,$h3,$h4) = ("loc6","loc7","loc8","loc9","loc10"); | ||
| 46 | ($K_00_19, $K_20_39, $K_40_59, $K_60_79) = | ||
| 47 | ( "r14", "r15", "loc11", "loc12" ); | ||
| 48 | @X= ( "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23", | ||
| 49 | "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31" ); | ||
| 50 | } | ||
| 51 | |||
| 52 | sub BODY_00_15 { | ||
| 53 | local *code=shift; | ||
| 54 | local ($i,$a,$b,$c,$d,$e,$f)=@_; | ||
| 55 | |||
| 56 | $code.=<<___ if ($i==0); | ||
| 57 | { .mmi; ld1 $X[$i&0xf]=[inp],2 // MSB | ||
| 58 | ld1 tmp2=[tmp3],2 };; | ||
| 59 | { .mmi; ld1 tmp0=[inp],2 | ||
| 60 | ld1 tmp4=[tmp3],2 // LSB | ||
| 61 | dep $X[$i&0xf]=$X[$i&0xf],tmp2,8,8 };; | ||
| 62 | ___ | ||
| 63 | if ($i<15) { | ||
| 64 | $code.=<<___; | ||
| 65 | { .mmi; ld1 $X[($i+1)&0xf]=[inp],2 // +1 | ||
| 66 | dep tmp1=tmp0,tmp4,8,8 };; | ||
| 67 | { .mmi; ld1 tmp2=[tmp3],2 // +1 | ||
| 68 | and tmp4=$c,$b | ||
| 69 | dep $X[$i&0xf]=$X[$i&0xf],tmp1,16,16 } //;; | ||
| 70 | { .mmi; andcm tmp1=$d,$b | ||
| 71 | add tmp0=$e,$K_00_19 | ||
| 72 | dep.z tmp5=$a,5,27 };; // a<<5 | ||
| 73 | { .mmi; or tmp4=tmp4,tmp1 // F_00_19(b,c,d)=(b&c)|(~b&d) | ||
| 74 | add $f=tmp0,$X[$i&0xf] // f=xi+e+K_00_19 | ||
| 75 | extr.u tmp1=$a,27,5 };; // a>>27 | ||
| 76 | { .mmi; ld1 tmp0=[inp],2 // +1 | ||
| 77 | add $f=$f,tmp4 // f+=F_00_19(b,c,d) | ||
| 78 | shrp $b=tmp6,tmp6,2 } // b=ROTATE(b,30) | ||
| 79 | { .mmi; ld1 tmp4=[tmp3],2 // +1 | ||
| 80 | or tmp5=tmp1,tmp5 // ROTATE(a,5) | ||
| 81 | mux2 tmp6=$a,0x44 };; // see b in next iteration | ||
| 82 | { .mii; add $f=$f,tmp5 // f+=ROTATE(a,5) | ||
| 83 | dep $X[($i+1)&0xf]=$X[($i+1)&0xf],tmp2,8,8 // +1 | ||
| 84 | mux2 $X[$i&0xf]=$X[$i&0xf],0x44 } //;; | ||
| 85 | |||
| 86 | ___ | ||
| 87 | } | ||
| 88 | else { | ||
| 89 | $code.=<<___; | ||
| 90 | { .mii; and tmp3=$c,$b | ||
| 91 | dep tmp1=tmp0,tmp4,8,8;; | ||
| 92 | dep $X[$i&0xf]=$X[$i&0xf],tmp1,16,16 } //;; | ||
| 93 | { .mmi; andcm tmp1=$d,$b | ||
| 94 | add tmp0=$e,$K_00_19 | ||
| 95 | dep.z tmp5=$a,5,27 };; // a<<5 | ||
| 96 | { .mmi; or tmp4=tmp3,tmp1 // F_00_19(b,c,d)=(b&c)|(~b&d) | ||
| 97 | add $f=tmp0,$X[$i&0xf] // f=xi+e+K_00_19 | ||
| 98 | extr.u tmp1=$a,27,5 } // a>>27 | ||
| 99 | { .mmi; xor tmp2=$X[($i+0+1)&0xf],$X[($i+2+1)&0xf] // +1 | ||
| 100 | xor tmp3=$X[($i+8+1)&0xf],$X[($i+13+1)&0xf] // +1 | ||
| 101 | nop.i 0 };; | ||
| 102 | { .mmi; add $f=$f,tmp4 // f+=F_00_19(b,c,d) | ||
| 103 | xor tmp2=tmp2,tmp3 // +1 | ||
| 104 | shrp $b=tmp6,tmp6,2 } // b=ROTATE(b,30) | ||
| 105 | { .mmi; or tmp1=tmp1,tmp5 // ROTATE(a,5) | ||
| 106 | mux2 tmp6=$a,0x44 };; // see b in next iteration | ||
| 107 | { .mii; add $f=$f,tmp1 // f+=ROTATE(a,5) | ||
| 108 | shrp $e=tmp2,tmp2,31 // f+1=ROTATE(x[0]^x[2]^x[8]^x[13],1) | ||
| 109 | mux2 $X[$i&0xf]=$X[$i&0xf],0x44 };; | ||
| 110 | |||
| 111 | ___ | ||
| 112 | } | ||
| 113 | } | ||
| 114 | |||
| 115 | sub BODY_16_19 { | ||
| 116 | local *code=shift; | ||
| 117 | local ($i,$a,$b,$c,$d,$e,$f)=@_; | ||
| 118 | |||
| 119 | $code.=<<___; | ||
| 120 | { .mmi; mov $X[$i&0xf]=$f // Xupdate | ||
| 121 | and tmp0=$c,$b | ||
| 122 | dep.z tmp5=$a,5,27 } // a<<5 | ||
| 123 | { .mmi; andcm tmp1=$d,$b | ||
| 124 | add tmp4=$e,$K_00_19 };; | ||
| 125 | { .mmi; or tmp0=tmp0,tmp1 // F_00_19(b,c,d)=(b&c)|(~b&d) | ||
| 126 | add $f=$f,tmp4 // f+=e+K_00_19 | ||
| 127 | extr.u tmp1=$a,27,5 } // a>>27 | ||
| 128 | { .mmi; xor tmp2=$X[($i+0+1)&0xf],$X[($i+2+1)&0xf] // +1 | ||
| 129 | xor tmp3=$X[($i+8+1)&0xf],$X[($i+13+1)&0xf] // +1 | ||
| 130 | nop.i 0 };; | ||
| 131 | { .mmi; add $f=$f,tmp0 // f+=F_00_19(b,c,d) | ||
| 132 | xor tmp2=tmp2,tmp3 // +1 | ||
| 133 | shrp $b=tmp6,tmp6,2 } // b=ROTATE(b,30) | ||
| 134 | { .mmi; or tmp1=tmp1,tmp5 // ROTATE(a,5) | ||
| 135 | mux2 tmp6=$a,0x44 };; // see b in next iteration | ||
| 136 | { .mii; add $f=$f,tmp1 // f+=ROTATE(a,5) | ||
| 137 | shrp $e=tmp2,tmp2,31 // f+1=ROTATE(x[0]^x[2]^x[8]^x[13],1) | ||
| 138 | nop.i 0 };; | ||
| 139 | |||
| 140 | ___ | ||
| 141 | } | ||
| 142 | |||
| 143 | sub BODY_20_39 { | ||
| 144 | local *code=shift; | ||
| 145 | local ($i,$a,$b,$c,$d,$e,$f,$Konst)=@_; | ||
| 146 | $Konst = $K_20_39 if (!defined($Konst)); | ||
| 147 | |||
| 148 | if ($i<79) { | ||
| 149 | $code.=<<___; | ||
| 150 | { .mib; mov $X[$i&0xf]=$f // Xupdate | ||
| 151 | dep.z tmp5=$a,5,27 } // a<<5 | ||
| 152 | { .mib; xor tmp0=$c,$b | ||
| 153 | add tmp4=$e,$Konst };; | ||
| 154 | { .mmi; xor tmp0=tmp0,$d // F_20_39(b,c,d)=b^c^d | ||
| 155 | add $f=$f,tmp4 // f+=e+K_20_39 | ||
| 156 | extr.u tmp1=$a,27,5 } // a>>27 | ||
| 157 | { .mmi; xor tmp2=$X[($i+0+1)&0xf],$X[($i+2+1)&0xf] // +1 | ||
| 158 | xor tmp3=$X[($i+8+1)&0xf],$X[($i+13+1)&0xf] // +1 | ||
| 159 | nop.i 0 };; | ||
| 160 | { .mmi; add $f=$f,tmp0 // f+=F_20_39(b,c,d) | ||
| 161 | xor tmp2=tmp2,tmp3 // +1 | ||
| 162 | shrp $b=tmp6,tmp6,2 } // b=ROTATE(b,30) | ||
| 163 | { .mmi; or tmp1=tmp1,tmp5 // ROTATE(a,5) | ||
| 164 | mux2 tmp6=$a,0x44 };; // see b in next iteration | ||
| 165 | { .mii; add $f=$f,tmp1 // f+=ROTATE(a,5) | ||
| 166 | shrp $e=tmp2,tmp2,31 // f+1=ROTATE(x[0]^x[2]^x[8]^x[13],1) | ||
| 167 | nop.i 0 };; | ||
| 168 | |||
| 169 | ___ | ||
| 170 | } | ||
| 171 | else { | ||
| 172 | $code.=<<___; | ||
| 173 | { .mib; mov $X[$i&0xf]=$f // Xupdate | ||
| 174 | dep.z tmp5=$a,5,27 } // a<<5 | ||
| 175 | { .mib; xor tmp0=$c,$b | ||
| 176 | add tmp4=$e,$Konst };; | ||
| 177 | { .mib; xor tmp0=tmp0,$d // F_20_39(b,c,d)=b^c^d | ||
| 178 | extr.u tmp1=$a,27,5 } // a>>27 | ||
| 179 | { .mib; add $f=$f,tmp4 // f+=e+K_20_39 | ||
| 180 | add $h1=$h1,$a };; // wrap up | ||
| 181 | { .mmi; add $f=$f,tmp0 // f+=F_20_39(b,c,d) | ||
| 182 | shrp $b=tmp6,tmp6,2 } // b=ROTATE(b,30) ;;? | ||
| 183 | { .mmi; or tmp1=tmp1,tmp5 // ROTATE(a,5) | ||
| 184 | add $h3=$h3,$c };; // wrap up | ||
| 185 | { .mib; add tmp3=1,inp // used in unaligned codepath | ||
| 186 | add $f=$f,tmp1 } // f+=ROTATE(a,5) | ||
| 187 | { .mib; add $h2=$h2,$b // wrap up | ||
| 188 | add $h4=$h4,$d };; // wrap up | ||
| 189 | |||
| 190 | ___ | ||
| 191 | } | ||
| 192 | } | ||
| 193 | |||
| 194 | sub BODY_40_59 { | ||
| 195 | local *code=shift; | ||
| 196 | local ($i,$a,$b,$c,$d,$e,$f)=@_; | ||
| 197 | |||
| 198 | $code.=<<___; | ||
| 199 | { .mmi; mov $X[$i&0xf]=$f // Xupdate | ||
| 200 | and tmp0=$c,$b | ||
| 201 | dep.z tmp5=$a,5,27 } // a<<5 | ||
| 202 | { .mmi; and tmp1=$d,$b | ||
| 203 | add tmp4=$e,$K_40_59 };; | ||
| 204 | { .mmi; or tmp0=tmp0,tmp1 // (b&c)|(b&d) | ||
| 205 | add $f=$f,tmp4 // f+=e+K_40_59 | ||
| 206 | extr.u tmp1=$a,27,5 } // a>>27 | ||
| 207 | { .mmi; and tmp4=$c,$d | ||
| 208 | xor tmp2=$X[($i+0+1)&0xf],$X[($i+2+1)&0xf] // +1 | ||
| 209 | xor tmp3=$X[($i+8+1)&0xf],$X[($i+13+1)&0xf] // +1 | ||
| 210 | };; | ||
| 211 | { .mmi; or tmp1=tmp1,tmp5 // ROTATE(a,5) | ||
| 212 | xor tmp2=tmp2,tmp3 // +1 | ||
| 213 | shrp $b=tmp6,tmp6,2 } // b=ROTATE(b,30) | ||
| 214 | { .mmi; or tmp0=tmp0,tmp4 // F_40_59(b,c,d)=(b&c)|(b&d)|(c&d) | ||
| 215 | mux2 tmp6=$a,0x44 };; // see b in next iteration | ||
| 216 | { .mii; add $f=$f,tmp0 // f+=F_40_59(b,c,d) | ||
| 217 | shrp $e=tmp2,tmp2,31;; // f+1=ROTATE(x[0]^x[2]^x[8]^x[13],1) | ||
| 218 | add $f=$f,tmp1 };; // f+=ROTATE(a,5) | ||
| 219 | |||
| 220 | ___ | ||
| 221 | } | ||
| 222 | sub BODY_60_79 { &BODY_20_39(@_,$K_60_79); } | ||
| 223 | |||
| 224 | $code.=<<___; | ||
| 225 | .text | ||
| 226 | |||
| 227 | tmp0=r8; | ||
| 228 | tmp1=r9; | ||
| 229 | tmp2=r10; | ||
| 230 | tmp3=r11; | ||
| 231 | ctx=r32; // in0 | ||
| 232 | inp=r33; // in1 | ||
| 233 | |||
| 234 | // void sha1_block_data_order(SHA_CTX *c,const void *p,size_t num); | ||
| 235 | .global sha1_block_data_order# | ||
| 236 | .proc sha1_block_data_order# | ||
| 237 | .align 32 | ||
| 238 | sha1_block_data_order: | ||
| 239 | .prologue | ||
| 240 | { .mmi; alloc tmp1=ar.pfs,3,15,0,0 | ||
| 241 | $ADDP tmp0=4,ctx | ||
| 242 | .save ar.lc,r3 | ||
| 243 | mov r3=ar.lc } | ||
| 244 | { .mmi; $ADDP ctx=0,ctx | ||
| 245 | $ADDP inp=0,inp | ||
| 246 | mov r2=pr };; | ||
| 247 | tmp4=in2; | ||
| 248 | tmp5=loc13; | ||
| 249 | tmp6=loc14; | ||
| 250 | .body | ||
| 251 | { .mlx; ld4 $h0=[ctx],8 | ||
| 252 | movl $K_00_19=0x5a827999 } | ||
| 253 | { .mlx; ld4 $h1=[tmp0],8 | ||
| 254 | movl $K_20_39=0x6ed9eba1 };; | ||
| 255 | { .mlx; ld4 $h2=[ctx],8 | ||
| 256 | movl $K_40_59=0x8f1bbcdc } | ||
| 257 | { .mlx; ld4 $h3=[tmp0] | ||
| 258 | movl $K_60_79=0xca62c1d6 };; | ||
| 259 | { .mmi; ld4 $h4=[ctx],-16 | ||
| 260 | add in2=-1,in2 // adjust num for ar.lc | ||
| 261 | mov ar.ec=1 };; | ||
| 262 | { .mmi; nop.m 0 | ||
| 263 | add tmp3=1,inp | ||
| 264 | mov ar.lc=in2 };; // brp.loop.imp: too far | ||
| 265 | |||
| 266 | .Ldtop: | ||
| 267 | { .mmi; mov $A=$h0 | ||
| 268 | mov $B=$h1 | ||
| 269 | mux2 tmp6=$h1,0x44 } | ||
| 270 | { .mmi; mov $C=$h2 | ||
| 271 | mov $D=$h3 | ||
| 272 | mov $E=$h4 };; | ||
| 273 | |||
| 274 | ___ | ||
| 275 | |||
| 276 | { my $i,@V=($A,$B,$C,$D,$E,$T); | ||
| 277 | |||
| 278 | for($i=0;$i<16;$i++) { &BODY_00_15(\$code,$i,@V); unshift(@V,pop(@V)); } | ||
| 279 | for(;$i<20;$i++) { &BODY_16_19(\$code,$i,@V); unshift(@V,pop(@V)); } | ||
| 280 | for(;$i<40;$i++) { &BODY_20_39(\$code,$i,@V); unshift(@V,pop(@V)); } | ||
| 281 | for(;$i<60;$i++) { &BODY_40_59(\$code,$i,@V); unshift(@V,pop(@V)); } | ||
| 282 | for(;$i<80;$i++) { &BODY_60_79(\$code,$i,@V); unshift(@V,pop(@V)); } | ||
| 283 | |||
| 284 | (($V[5] eq $D) and ($V[0] eq $E)) or die; # double-check | ||
| 285 | } | ||
| 286 | |||
| 287 | $code.=<<___; | ||
| 288 | { .mmb; add $h0=$h0,$E | ||
| 289 | nop.m 0 | ||
| 290 | br.ctop.dptk.many .Ldtop };; | ||
| 291 | .Ldend: | ||
| 292 | { .mmi; add tmp0=4,ctx | ||
| 293 | mov ar.lc=r3 };; | ||
| 294 | { .mmi; st4 [ctx]=$h0,8 | ||
| 295 | st4 [tmp0]=$h1,8 };; | ||
| 296 | { .mmi; st4 [ctx]=$h2,8 | ||
| 297 | st4 [tmp0]=$h3 };; | ||
| 298 | { .mib; st4 [ctx]=$h4,-16 | ||
| 299 | mov pr=r2,0x1ffff | ||
| 300 | br.ret.sptk.many b0 };; | ||
| 301 | .endp sha1_block_data_order# | ||
| 302 | stringz "SHA1 block transform for IA64, CRYPTOGAMS by <appro\@openssl.org>" | ||
| 303 | ___ | ||
| 304 | |||
| 305 | print $code; | ||
diff --git a/src/lib/libcrypto/sha/asm/sha1-x86_64.pl b/src/lib/libcrypto/sha/asm/sha1-x86_64.pl new file mode 100755 index 0000000000..f7ed67a726 --- /dev/null +++ b/src/lib/libcrypto/sha/asm/sha1-x86_64.pl | |||
| @@ -0,0 +1,242 @@ | |||
| 1 | #!/usr/bin/env perl | ||
| 2 | # | ||
| 3 | # ==================================================================== | ||
| 4 | # Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL | ||
| 5 | # project. The module is, however, dual licensed under OpenSSL and | ||
| 6 | # CRYPTOGAMS licenses depending on where you obtain it. For further | ||
| 7 | # details see http://www.openssl.org/~appro/cryptogams/. | ||
| 8 | # ==================================================================== | ||
| 9 | # | ||
| 10 | # sha1_block procedure for x86_64. | ||
| 11 | # | ||
| 12 | # It was brought to my attention that on EM64T compiler-generated code | ||
| 13 | # was far behind 32-bit assembler implementation. This is unlike on | ||
| 14 | # Opteron where compiler-generated code was only 15% behind 32-bit | ||
| 15 | # assembler, which originally made it hard to motivate the effort. | ||
| 16 | # There was suggestion to mechanically translate 32-bit code, but I | ||
| 17 | # dismissed it, reasoning that x86_64 offers enough register bank | ||
| 18 | # capacity to fully utilize SHA-1 parallelism. Therefore this fresh | ||
| 19 | # implementation:-) However! While 64-bit code does performs better | ||
| 20 | # on Opteron, I failed to beat 32-bit assembler on EM64T core. Well, | ||
| 21 | # x86_64 does offer larger *addressable* bank, but out-of-order core | ||
| 22 | # reaches for even more registers through dynamic aliasing, and EM64T | ||
| 23 | # core must have managed to run-time optimize even 32-bit code just as | ||
| 24 | # good as 64-bit one. Performance improvement is summarized in the | ||
| 25 | # following table: | ||
| 26 | # | ||
| 27 | # gcc 3.4 32-bit asm cycles/byte | ||
| 28 | # Opteron +45% +20% 6.8 | ||
| 29 | # Xeon P4 +65% +0% 9.9 | ||
| 30 | # Core2 +60% +10% 7.0 | ||
| 31 | |||
| 32 | $output=shift; | ||
| 33 | |||
| 34 | $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1; | ||
| 35 | ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or | ||
| 36 | ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or | ||
| 37 | die "can't locate x86_64-xlate.pl"; | ||
| 38 | |||
| 39 | open STDOUT,"| $^X $xlate $output"; | ||
| 40 | |||
| 41 | $ctx="%rdi"; # 1st arg | ||
| 42 | $inp="%rsi"; # 2nd arg | ||
| 43 | $num="%rdx"; # 3rd arg | ||
| 44 | |||
| 45 | # reassign arguments in order to produce more compact code | ||
| 46 | $ctx="%r8"; | ||
| 47 | $inp="%r9"; | ||
| 48 | $num="%r10"; | ||
| 49 | |||
| 50 | $xi="%eax"; | ||
| 51 | $t0="%ebx"; | ||
| 52 | $t1="%ecx"; | ||
| 53 | $A="%edx"; | ||
| 54 | $B="%esi"; | ||
| 55 | $C="%edi"; | ||
| 56 | $D="%ebp"; | ||
| 57 | $E="%r11d"; | ||
| 58 | $T="%r12d"; | ||
| 59 | |||
| 60 | @V=($A,$B,$C,$D,$E,$T); | ||
| 61 | |||
| 62 | sub PROLOGUE { | ||
| 63 | my $func=shift; | ||
| 64 | $code.=<<___; | ||
| 65 | .globl $func | ||
| 66 | .type $func,\@function,3 | ||
| 67 | .align 16 | ||
| 68 | $func: | ||
| 69 | push %rbx | ||
| 70 | push %rbp | ||
| 71 | push %r12 | ||
| 72 | mov %rsp,%rax | ||
| 73 | mov %rdi,$ctx # reassigned argument | ||
| 74 | sub \$`8+16*4`,%rsp | ||
| 75 | mov %rsi,$inp # reassigned argument | ||
| 76 | and \$-64,%rsp | ||
| 77 | mov %rdx,$num # reassigned argument | ||
| 78 | mov %rax,`16*4`(%rsp) | ||
| 79 | |||
| 80 | mov 0($ctx),$A | ||
| 81 | mov 4($ctx),$B | ||
| 82 | mov 8($ctx),$C | ||
| 83 | mov 12($ctx),$D | ||
| 84 | mov 16($ctx),$E | ||
| 85 | ___ | ||
| 86 | } | ||
| 87 | |||
| 88 | sub EPILOGUE { | ||
| 89 | my $func=shift; | ||
| 90 | $code.=<<___; | ||
| 91 | mov `16*4`(%rsp),%rsp | ||
| 92 | pop %r12 | ||
| 93 | pop %rbp | ||
| 94 | pop %rbx | ||
| 95 | ret | ||
| 96 | .size $func,.-$func | ||
| 97 | ___ | ||
| 98 | } | ||
| 99 | |||
| 100 | sub BODY_00_19 { | ||
| 101 | my ($i,$a,$b,$c,$d,$e,$f,$host)=@_; | ||
| 102 | my $j=$i+1; | ||
| 103 | $code.=<<___ if ($i==0); | ||
| 104 | mov `4*$i`($inp),$xi | ||
| 105 | `"bswap $xi" if(!defined($host))` | ||
| 106 | mov $xi,`4*$i`(%rsp) | ||
| 107 | ___ | ||
| 108 | $code.=<<___ if ($i<15); | ||
| 109 | lea 0x5a827999($xi,$e),$f | ||
| 110 | mov $c,$t0 | ||
| 111 | mov `4*$j`($inp),$xi | ||
| 112 | mov $a,$e | ||
| 113 | xor $d,$t0 | ||
| 114 | `"bswap $xi" if(!defined($host))` | ||
| 115 | rol \$5,$e | ||
| 116 | and $b,$t0 | ||
| 117 | mov $xi,`4*$j`(%rsp) | ||
| 118 | add $e,$f | ||
| 119 | xor $d,$t0 | ||
| 120 | rol \$30,$b | ||
| 121 | add $t0,$f | ||
| 122 | ___ | ||
| 123 | $code.=<<___ if ($i>=15); | ||
| 124 | lea 0x5a827999($xi,$e),$f | ||
| 125 | mov `4*($j%16)`(%rsp),$xi | ||
| 126 | mov $c,$t0 | ||
| 127 | mov $a,$e | ||
| 128 | xor `4*(($j+2)%16)`(%rsp),$xi | ||
| 129 | xor $d,$t0 | ||
| 130 | rol \$5,$e | ||
| 131 | xor `4*(($j+8)%16)`(%rsp),$xi | ||
| 132 | and $b,$t0 | ||
| 133 | add $e,$f | ||
| 134 | xor `4*(($j+13)%16)`(%rsp),$xi | ||
| 135 | xor $d,$t0 | ||
| 136 | rol \$30,$b | ||
| 137 | add $t0,$f | ||
| 138 | rol \$1,$xi | ||
| 139 | mov $xi,`4*($j%16)`(%rsp) | ||
| 140 | ___ | ||
| 141 | } | ||
| 142 | |||
| 143 | sub BODY_20_39 { | ||
| 144 | my ($i,$a,$b,$c,$d,$e,$f)=@_; | ||
| 145 | my $j=$i+1; | ||
| 146 | my $K=($i<40)?0x6ed9eba1:0xca62c1d6; | ||
| 147 | $code.=<<___ if ($i<79); | ||
| 148 | lea $K($xi,$e),$f | ||
| 149 | mov `4*($j%16)`(%rsp),$xi | ||
| 150 | mov $c,$t0 | ||
| 151 | mov $a,$e | ||
| 152 | xor `4*(($j+2)%16)`(%rsp),$xi | ||
| 153 | xor $b,$t0 | ||
| 154 | rol \$5,$e | ||
| 155 | xor `4*(($j+8)%16)`(%rsp),$xi | ||
| 156 | xor $d,$t0 | ||
| 157 | add $e,$f | ||
| 158 | xor `4*(($j+13)%16)`(%rsp),$xi | ||
| 159 | rol \$30,$b | ||
| 160 | add $t0,$f | ||
| 161 | rol \$1,$xi | ||
| 162 | ___ | ||
| 163 | $code.=<<___ if ($i<76); | ||
| 164 | mov $xi,`4*($j%16)`(%rsp) | ||
| 165 | ___ | ||
| 166 | $code.=<<___ if ($i==79); | ||
| 167 | lea $K($xi,$e),$f | ||
| 168 | mov $c,$t0 | ||
| 169 | mov $a,$e | ||
| 170 | xor $b,$t0 | ||
| 171 | rol \$5,$e | ||
| 172 | xor $d,$t0 | ||
| 173 | add $e,$f | ||
| 174 | rol \$30,$b | ||
| 175 | add $t0,$f | ||
| 176 | ___ | ||
| 177 | } | ||
| 178 | |||
| 179 | sub BODY_40_59 { | ||
| 180 | my ($i,$a,$b,$c,$d,$e,$f)=@_; | ||
| 181 | my $j=$i+1; | ||
| 182 | $code.=<<___; | ||
| 183 | lea 0x8f1bbcdc($xi,$e),$f | ||
| 184 | mov `4*($j%16)`(%rsp),$xi | ||
| 185 | mov $b,$t0 | ||
| 186 | mov $b,$t1 | ||
| 187 | xor `4*(($j+2)%16)`(%rsp),$xi | ||
| 188 | mov $a,$e | ||
| 189 | and $c,$t0 | ||
| 190 | xor `4*(($j+8)%16)`(%rsp),$xi | ||
| 191 | or $c,$t1 | ||
| 192 | rol \$5,$e | ||
| 193 | xor `4*(($j+13)%16)`(%rsp),$xi | ||
| 194 | and $d,$t1 | ||
| 195 | add $e,$f | ||
| 196 | rol \$1,$xi | ||
| 197 | or $t1,$t0 | ||
| 198 | rol \$30,$b | ||
| 199 | mov $xi,`4*($j%16)`(%rsp) | ||
| 200 | add $t0,$f | ||
| 201 | ___ | ||
| 202 | } | ||
| 203 | |||
| 204 | $code=".text\n"; | ||
| 205 | |||
| 206 | &PROLOGUE("sha1_block_data_order"); | ||
| 207 | $code.=".align 4\n.Lloop:\n"; | ||
| 208 | for($i=0;$i<20;$i++) { &BODY_00_19($i,@V); unshift(@V,pop(@V)); } | ||
| 209 | for(;$i<40;$i++) { &BODY_20_39($i,@V); unshift(@V,pop(@V)); } | ||
| 210 | for(;$i<60;$i++) { &BODY_40_59($i,@V); unshift(@V,pop(@V)); } | ||
| 211 | for(;$i<80;$i++) { &BODY_20_39($i,@V); unshift(@V,pop(@V)); } | ||
| 212 | $code.=<<___; | ||
| 213 | add 0($ctx),$E | ||
| 214 | add 4($ctx),$T | ||
| 215 | add 8($ctx),$A | ||
| 216 | add 12($ctx),$B | ||
| 217 | add 16($ctx),$C | ||
| 218 | mov $E,0($ctx) | ||
| 219 | mov $T,4($ctx) | ||
| 220 | mov $A,8($ctx) | ||
| 221 | mov $B,12($ctx) | ||
| 222 | mov $C,16($ctx) | ||
| 223 | |||
| 224 | xchg $E,$A # mov $E,$A | ||
| 225 | xchg $T,$B # mov $T,$B | ||
| 226 | xchg $E,$C # mov $A,$C | ||
| 227 | xchg $T,$D # mov $B,$D | ||
| 228 | # mov $C,$E | ||
| 229 | lea `16*4`($inp),$inp | ||
| 230 | sub \$1,$num | ||
| 231 | jnz .Lloop | ||
| 232 | ___ | ||
| 233 | &EPILOGUE("sha1_block_data_order"); | ||
| 234 | $code.=<<___; | ||
| 235 | .asciz "SHA1 block transform for x86_64, CRYPTOGAMS by <appro\@openssl.org>" | ||
| 236 | ___ | ||
| 237 | |||
| 238 | #################################################################### | ||
| 239 | |||
| 240 | $code =~ s/\`([^\`]*)\`/eval $1/gem; | ||
| 241 | print $code; | ||
| 242 | close STDOUT; | ||
diff --git a/src/lib/libcrypto/sha/asm/sha512-ia64.pl b/src/lib/libcrypto/sha/asm/sha512-ia64.pl new file mode 100755 index 0000000000..1c6ce56522 --- /dev/null +++ b/src/lib/libcrypto/sha/asm/sha512-ia64.pl | |||
| @@ -0,0 +1,672 @@ | |||
| 1 | #!/usr/bin/env perl | ||
| 2 | # | ||
| 3 | # ==================================================================== | ||
| 4 | # Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL | ||
| 5 | # project. The module is, however, dual licensed under OpenSSL and | ||
| 6 | # CRYPTOGAMS licenses depending on where you obtain it. For further | ||
| 7 | # details see http://www.openssl.org/~appro/cryptogams/. | ||
| 8 | # ==================================================================== | ||
| 9 | # | ||
| 10 | # SHA256/512_Transform for Itanium. | ||
| 11 | # | ||
| 12 | # sha512_block runs in 1003 cycles on Itanium 2, which is almost 50% | ||
| 13 | # faster than gcc and >60%(!) faster than code generated by HP-UX | ||
| 14 | # compiler (yes, HP-UX is generating slower code, because unlike gcc, | ||
| 15 | # it failed to deploy "shift right pair," 'shrp' instruction, which | ||
| 16 | # substitutes for 64-bit rotate). | ||
| 17 | # | ||
| 18 | # 924 cycles long sha256_block outperforms gcc by over factor of 2(!) | ||
| 19 | # and HP-UX compiler - by >40% (yes, gcc won sha512_block, but lost | ||
| 20 | # this one big time). Note that "formally" 924 is about 100 cycles | ||
| 21 | # too much. I mean it's 64 32-bit rounds vs. 80 virtually identical | ||
| 22 | # 64-bit ones and 1003*64/80 gives 802. Extra cycles, 2 per round, | ||
| 23 | # are spent on extra work to provide for 32-bit rotations. 32-bit | ||
| 24 | # rotations are still handled by 'shrp' instruction and for this | ||
| 25 | # reason lower 32 bits are deposited to upper half of 64-bit register | ||
| 26 | # prior 'shrp' issue. And in order to minimize the amount of such | ||
| 27 | # operations, X[16] values are *maintained* with copies of lower | ||
| 28 | # halves in upper halves, which is why you'll spot such instructions | ||
| 29 | # as custom 'mux2', "parallel 32-bit add," 'padd4' and "parallel | ||
| 30 | # 32-bit unsigned right shift," 'pshr4.u' instructions here. | ||
| 31 | # | ||
| 32 | # Rules of engagement. | ||
| 33 | # | ||
| 34 | # There is only one integer shifter meaning that if I have two rotate, | ||
| 35 | # deposit or extract instructions in adjacent bundles, they shall | ||
| 36 | # split [at run-time if they have to]. But note that variable and | ||
| 37 | # parallel shifts are performed by multi-media ALU and *are* pairable | ||
| 38 | # with rotates [and alike]. On the backside MMALU is rather slow: it | ||
| 39 | # takes 2 extra cycles before the result of integer operation is | ||
| 40 | # available *to* MMALU and 2(*) extra cycles before the result of MM | ||
| 41 | # operation is available "back" *to* integer ALU, not to mention that | ||
| 42 | # MMALU itself has 2 cycles latency. However! I explicitly scheduled | ||
| 43 | # these MM instructions to avoid MM stalls, so that all these extra | ||
| 44 | # latencies get "hidden" in instruction-level parallelism. | ||
| 45 | # | ||
| 46 | # (*) 2 cycles on Itanium 1 and 1 cycle on Itanium 2. But I schedule | ||
| 47 | # for 2 in order to provide for best *overall* performance, | ||
| 48 | # because on Itanium 1 stall on MM result is accompanied by | ||
| 49 | # pipeline flush, which takes 6 cycles:-( | ||
| 50 | # | ||
| 51 | # Resulting performance numbers for 900MHz Itanium 2 system: | ||
| 52 | # | ||
| 53 | # The 'numbers' are in 1000s of bytes per second processed. | ||
| 54 | # type 16 bytes 64 bytes 256 bytes 1024 bytes 8192 bytes | ||
| 55 | # sha1(*) 6210.14k 20376.30k 52447.83k 85870.05k 105478.12k | ||
| 56 | # sha256 7476.45k 20572.05k 41538.34k 56062.29k 62093.18k | ||
| 57 | # sha512 4996.56k 20026.28k 47597.20k 85278.79k 111501.31k | ||
| 58 | # | ||
| 59 | # (*) SHA1 numbers are for HP-UX compiler and are presented purely | ||
| 60 | # for reference purposes. I bet it can improved too... | ||
| 61 | # | ||
| 62 | # To generate code, pass the file name with either 256 or 512 in its | ||
| 63 | # name and compiler flags. | ||
| 64 | |||
| 65 | $output=shift; | ||
| 66 | |||
| 67 | if ($output =~ /512.*\.[s|asm]/) { | ||
| 68 | $SZ=8; | ||
| 69 | $BITS=8*$SZ; | ||
| 70 | $LDW="ld8"; | ||
| 71 | $STW="st8"; | ||
| 72 | $ADD="add"; | ||
| 73 | $SHRU="shr.u"; | ||
| 74 | $TABLE="K512"; | ||
| 75 | $func="sha512_block_data_order"; | ||
| 76 | @Sigma0=(28,34,39); | ||
| 77 | @Sigma1=(14,18,41); | ||
| 78 | @sigma0=(1, 8, 7); | ||
| 79 | @sigma1=(19,61, 6); | ||
| 80 | $rounds=80; | ||
| 81 | } elsif ($output =~ /256.*\.[s|asm]/) { | ||
| 82 | $SZ=4; | ||
| 83 | $BITS=8*$SZ; | ||
| 84 | $LDW="ld4"; | ||
| 85 | $STW="st4"; | ||
| 86 | $ADD="padd4"; | ||
| 87 | $SHRU="pshr4.u"; | ||
| 88 | $TABLE="K256"; | ||
| 89 | $func="sha256_block_data_order"; | ||
| 90 | @Sigma0=( 2,13,22); | ||
| 91 | @Sigma1=( 6,11,25); | ||
| 92 | @sigma0=( 7,18, 3); | ||
| 93 | @sigma1=(17,19,10); | ||
| 94 | $rounds=64; | ||
| 95 | } else { die "nonsense $output"; } | ||
| 96 | |||
| 97 | open STDOUT,">$output" || die "can't open $output: $!"; | ||
| 98 | |||
| 99 | if ($^O eq "hpux") { | ||
| 100 | $ADDP="addp4"; | ||
| 101 | for (@ARGV) { $ADDP="add" if (/[\+DD|\-mlp]64/); } | ||
| 102 | } else { $ADDP="add"; } | ||
| 103 | for (@ARGV) { $big_endian=1 if (/\-DB_ENDIAN/); | ||
| 104 | $big_endian=0 if (/\-DL_ENDIAN/); } | ||
| 105 | if (!defined($big_endian)) | ||
| 106 | { $big_endian=(unpack('L',pack('N',1))==1); } | ||
| 107 | |||
| 108 | $code=<<___; | ||
| 109 | .ident \"$output, version 1.1\" | ||
| 110 | .ident \"IA-64 ISA artwork by Andy Polyakov <appro\@fy.chalmers.se>\" | ||
| 111 | .explicit | ||
| 112 | .text | ||
| 113 | |||
| 114 | pfssave=r2; | ||
| 115 | lcsave=r3; | ||
| 116 | prsave=r14; | ||
| 117 | K=r15; | ||
| 118 | A=r16; B=r17; C=r18; D=r19; | ||
| 119 | E=r20; F=r21; G=r22; H=r23; | ||
| 120 | T1=r24; T2=r25; | ||
| 121 | s0=r26; s1=r27; t0=r28; t1=r29; | ||
| 122 | Ktbl=r30; | ||
| 123 | ctx=r31; // 1st arg | ||
| 124 | input=r48; // 2nd arg | ||
| 125 | num=r49; // 3rd arg | ||
| 126 | sgm0=r50; sgm1=r51; // small constants | ||
| 127 | A_=r54; B_=r55; C_=r56; D_=r57; | ||
| 128 | E_=r58; F_=r59; G_=r60; H_=r61; | ||
| 129 | |||
| 130 | // void $func (SHA_CTX *ctx, const void *in,size_t num[,int host]) | ||
| 131 | .global $func# | ||
| 132 | .proc $func# | ||
| 133 | .align 32 | ||
| 134 | $func: | ||
| 135 | .prologue | ||
| 136 | .save ar.pfs,pfssave | ||
| 137 | { .mmi; alloc pfssave=ar.pfs,3,27,0,16 | ||
| 138 | $ADDP ctx=0,r32 // 1st arg | ||
| 139 | .save ar.lc,lcsave | ||
| 140 | mov lcsave=ar.lc } | ||
| 141 | { .mmi; $ADDP input=0,r33 // 2nd arg | ||
| 142 | mov num=r34 // 3rd arg | ||
| 143 | .save pr,prsave | ||
| 144 | mov prsave=pr };; | ||
| 145 | |||
| 146 | .body | ||
| 147 | { .mib; add r8=0*$SZ,ctx | ||
| 148 | add r9=1*$SZ,ctx | ||
| 149 | brp.loop.imp .L_first16,.L_first16_end-16 } | ||
| 150 | { .mib; add r10=2*$SZ,ctx | ||
| 151 | add r11=3*$SZ,ctx | ||
| 152 | brp.loop.imp .L_rest,.L_rest_end-16 };; | ||
| 153 | |||
| 154 | // load A-H | ||
| 155 | .Lpic_point: | ||
| 156 | { .mmi; $LDW A_=[r8],4*$SZ | ||
| 157 | $LDW B_=[r9],4*$SZ | ||
| 158 | mov Ktbl=ip } | ||
| 159 | { .mmi; $LDW C_=[r10],4*$SZ | ||
| 160 | $LDW D_=[r11],4*$SZ | ||
| 161 | mov sgm0=$sigma0[2] };; | ||
| 162 | { .mmi; $LDW E_=[r8] | ||
| 163 | $LDW F_=[r9] | ||
| 164 | add Ktbl=($TABLE#-.Lpic_point),Ktbl } | ||
| 165 | { .mmi; $LDW G_=[r10] | ||
| 166 | $LDW H_=[r11] | ||
| 167 | cmp.ne p0,p16=0,r0 };; // used in sha256_block | ||
| 168 | ___ | ||
| 169 | $code.=<<___ if ($BITS==64); | ||
| 170 | { .mii; and r8=7,input | ||
| 171 | and input=~7,input;; | ||
| 172 | cmp.eq p9,p0=1,r8 } | ||
| 173 | { .mmi; cmp.eq p10,p0=2,r8 | ||
| 174 | cmp.eq p11,p0=3,r8 | ||
| 175 | cmp.eq p12,p0=4,r8 } | ||
| 176 | { .mmi; cmp.eq p13,p0=5,r8 | ||
| 177 | cmp.eq p14,p0=6,r8 | ||
| 178 | cmp.eq p15,p0=7,r8 };; | ||
| 179 | ___ | ||
| 180 | $code.=<<___; | ||
| 181 | .L_outer: | ||
| 182 | .rotr X[16] | ||
| 183 | { .mmi; mov A=A_ | ||
| 184 | mov B=B_ | ||
| 185 | mov ar.lc=14 } | ||
| 186 | { .mmi; mov C=C_ | ||
| 187 | mov D=D_ | ||
| 188 | mov E=E_ } | ||
| 189 | { .mmi; mov F=F_ | ||
| 190 | mov G=G_ | ||
| 191 | mov ar.ec=2 } | ||
| 192 | { .mmi; ld1 X[15]=[input],$SZ // eliminated in 64-bit | ||
| 193 | mov H=H_ | ||
| 194 | mov sgm1=$sigma1[2] };; | ||
| 195 | |||
| 196 | ___ | ||
| 197 | $t0="t0", $t1="t1", $code.=<<___ if ($BITS==32); | ||
| 198 | .align 32 | ||
| 199 | .L_first16: | ||
| 200 | { .mmi; add r9=1-$SZ,input | ||
| 201 | add r10=2-$SZ,input | ||
| 202 | add r11=3-$SZ,input };; | ||
| 203 | { .mmi; ld1 r9=[r9] | ||
| 204 | ld1 r10=[r10] | ||
| 205 | dep.z $t1=E,32,32 } | ||
| 206 | { .mmi; $LDW K=[Ktbl],$SZ | ||
| 207 | ld1 r11=[r11] | ||
| 208 | zxt4 E=E };; | ||
| 209 | { .mii; or $t1=$t1,E | ||
| 210 | dep X[15]=X[15],r9,8,8 | ||
| 211 | dep r11=r10,r11,8,8 };; | ||
| 212 | { .mmi; and T1=F,E | ||
| 213 | and T2=A,B | ||
| 214 | dep X[15]=X[15],r11,16,16 } | ||
| 215 | { .mmi; andcm r8=G,E | ||
| 216 | and r9=A,C | ||
| 217 | mux2 $t0=A,0x44 };; // copy lower half to upper | ||
| 218 | { .mmi; (p16) ld1 X[15-1]=[input],$SZ // prefetch | ||
| 219 | xor T1=T1,r8 // T1=((e & f) ^ (~e & g)) | ||
| 220 | _rotr r11=$t1,$Sigma1[0] } // ROTR(e,14) | ||
| 221 | { .mib; and r10=B,C | ||
| 222 | xor T2=T2,r9 };; | ||
| 223 | ___ | ||
| 224 | $t0="A", $t1="E", $code.=<<___ if ($BITS==64); | ||
| 225 | // in 64-bit mode I load whole X[16] at once and take care of alignment... | ||
| 226 | { .mmi; add r8=1*$SZ,input | ||
| 227 | add r9=2*$SZ,input | ||
| 228 | add r10=3*$SZ,input };; | ||
| 229 | { .mmb; $LDW X[15]=[input],4*$SZ | ||
| 230 | $LDW X[14]=[r8],4*$SZ | ||
| 231 | (p9) br.cond.dpnt.many .L1byte };; | ||
| 232 | { .mmb; $LDW X[13]=[r9],4*$SZ | ||
| 233 | $LDW X[12]=[r10],4*$SZ | ||
| 234 | (p10) br.cond.dpnt.many .L2byte };; | ||
| 235 | { .mmb; $LDW X[11]=[input],4*$SZ | ||
| 236 | $LDW X[10]=[r8],4*$SZ | ||
| 237 | (p11) br.cond.dpnt.many .L3byte };; | ||
| 238 | { .mmb; $LDW X[ 9]=[r9],4*$SZ | ||
| 239 | $LDW X[ 8]=[r10],4*$SZ | ||
| 240 | (p12) br.cond.dpnt.many .L4byte };; | ||
| 241 | { .mmb; $LDW X[ 7]=[input],4*$SZ | ||
| 242 | $LDW X[ 6]=[r8],4*$SZ | ||
| 243 | (p13) br.cond.dpnt.many .L5byte };; | ||
| 244 | { .mmb; $LDW X[ 5]=[r9],4*$SZ | ||
| 245 | $LDW X[ 4]=[r10],4*$SZ | ||
| 246 | (p14) br.cond.dpnt.many .L6byte };; | ||
| 247 | { .mmb; $LDW X[ 3]=[input],4*$SZ | ||
| 248 | $LDW X[ 2]=[r8],4*$SZ | ||
| 249 | (p15) br.cond.dpnt.many .L7byte };; | ||
| 250 | { .mmb; $LDW X[ 1]=[r9],4*$SZ | ||
| 251 | $LDW X[ 0]=[r10],4*$SZ | ||
| 252 | br.many .L_first16 };; | ||
| 253 | .L1byte: | ||
| 254 | { .mmi; $LDW X[13]=[r9],4*$SZ | ||
| 255 | $LDW X[12]=[r10],4*$SZ | ||
| 256 | shrp X[15]=X[15],X[14],56 };; | ||
| 257 | { .mmi; $LDW X[11]=[input],4*$SZ | ||
| 258 | $LDW X[10]=[r8],4*$SZ | ||
| 259 | shrp X[14]=X[14],X[13],56 } | ||
| 260 | { .mmi; $LDW X[ 9]=[r9],4*$SZ | ||
| 261 | $LDW X[ 8]=[r10],4*$SZ | ||
| 262 | shrp X[13]=X[13],X[12],56 };; | ||
| 263 | { .mmi; $LDW X[ 7]=[input],4*$SZ | ||
| 264 | $LDW X[ 6]=[r8],4*$SZ | ||
| 265 | shrp X[12]=X[12],X[11],56 } | ||
| 266 | { .mmi; $LDW X[ 5]=[r9],4*$SZ | ||
| 267 | $LDW X[ 4]=[r10],4*$SZ | ||
| 268 | shrp X[11]=X[11],X[10],56 };; | ||
| 269 | { .mmi; $LDW X[ 3]=[input],4*$SZ | ||
| 270 | $LDW X[ 2]=[r8],4*$SZ | ||
| 271 | shrp X[10]=X[10],X[ 9],56 } | ||
| 272 | { .mmi; $LDW X[ 1]=[r9],4*$SZ | ||
| 273 | $LDW X[ 0]=[r10],4*$SZ | ||
| 274 | shrp X[ 9]=X[ 9],X[ 8],56 };; | ||
| 275 | { .mii; $LDW T1=[input] | ||
| 276 | shrp X[ 8]=X[ 8],X[ 7],56 | ||
| 277 | shrp X[ 7]=X[ 7],X[ 6],56 } | ||
| 278 | { .mii; shrp X[ 6]=X[ 6],X[ 5],56 | ||
| 279 | shrp X[ 5]=X[ 5],X[ 4],56 };; | ||
| 280 | { .mii; shrp X[ 4]=X[ 4],X[ 3],56 | ||
| 281 | shrp X[ 3]=X[ 3],X[ 2],56 } | ||
| 282 | { .mii; shrp X[ 2]=X[ 2],X[ 1],56 | ||
| 283 | shrp X[ 1]=X[ 1],X[ 0],56 } | ||
| 284 | { .mib; shrp X[ 0]=X[ 0],T1,56 | ||
| 285 | br.many .L_first16 };; | ||
| 286 | .L2byte: | ||
| 287 | { .mmi; $LDW X[11]=[input],4*$SZ | ||
| 288 | $LDW X[10]=[r8],4*$SZ | ||
| 289 | shrp X[15]=X[15],X[14],48 } | ||
| 290 | { .mmi; $LDW X[ 9]=[r9],4*$SZ | ||
| 291 | $LDW X[ 8]=[r10],4*$SZ | ||
| 292 | shrp X[14]=X[14],X[13],48 };; | ||
| 293 | { .mmi; $LDW X[ 7]=[input],4*$SZ | ||
| 294 | $LDW X[ 6]=[r8],4*$SZ | ||
| 295 | shrp X[13]=X[13],X[12],48 } | ||
| 296 | { .mmi; $LDW X[ 5]=[r9],4*$SZ | ||
| 297 | $LDW X[ 4]=[r10],4*$SZ | ||
| 298 | shrp X[12]=X[12],X[11],48 };; | ||
| 299 | { .mmi; $LDW X[ 3]=[input],4*$SZ | ||
| 300 | $LDW X[ 2]=[r8],4*$SZ | ||
| 301 | shrp X[11]=X[11],X[10],48 } | ||
| 302 | { .mmi; $LDW X[ 1]=[r9],4*$SZ | ||
| 303 | $LDW X[ 0]=[r10],4*$SZ | ||
| 304 | shrp X[10]=X[10],X[ 9],48 };; | ||
| 305 | { .mii; $LDW T1=[input] | ||
| 306 | shrp X[ 9]=X[ 9],X[ 8],48 | ||
| 307 | shrp X[ 8]=X[ 8],X[ 7],48 } | ||
| 308 | { .mii; shrp X[ 7]=X[ 7],X[ 6],48 | ||
| 309 | shrp X[ 6]=X[ 6],X[ 5],48 };; | ||
| 310 | { .mii; shrp X[ 5]=X[ 5],X[ 4],48 | ||
| 311 | shrp X[ 4]=X[ 4],X[ 3],48 } | ||
| 312 | { .mii; shrp X[ 3]=X[ 3],X[ 2],48 | ||
| 313 | shrp X[ 2]=X[ 2],X[ 1],48 } | ||
| 314 | { .mii; shrp X[ 1]=X[ 1],X[ 0],48 | ||
| 315 | shrp X[ 0]=X[ 0],T1,48 } | ||
| 316 | { .mfb; br.many .L_first16 };; | ||
| 317 | .L3byte: | ||
| 318 | { .mmi; $LDW X[ 9]=[r9],4*$SZ | ||
| 319 | $LDW X[ 8]=[r10],4*$SZ | ||
| 320 | shrp X[15]=X[15],X[14],40 };; | ||
| 321 | { .mmi; $LDW X[ 7]=[input],4*$SZ | ||
| 322 | $LDW X[ 6]=[r8],4*$SZ | ||
| 323 | shrp X[14]=X[14],X[13],40 } | ||
| 324 | { .mmi; $LDW X[ 5]=[r9],4*$SZ | ||
| 325 | $LDW X[ 4]=[r10],4*$SZ | ||
| 326 | shrp X[13]=X[13],X[12],40 };; | ||
| 327 | { .mmi; $LDW X[ 3]=[input],4*$SZ | ||
| 328 | $LDW X[ 2]=[r8],4*$SZ | ||
| 329 | shrp X[12]=X[12],X[11],40 } | ||
| 330 | { .mmi; $LDW X[ 1]=[r9],4*$SZ | ||
| 331 | $LDW X[ 0]=[r10],4*$SZ | ||
| 332 | shrp X[11]=X[11],X[10],40 };; | ||
| 333 | { .mii; $LDW T1=[input] | ||
| 334 | shrp X[10]=X[10],X[ 9],40 | ||
| 335 | shrp X[ 9]=X[ 9],X[ 8],40 } | ||
| 336 | { .mii; shrp X[ 8]=X[ 8],X[ 7],40 | ||
| 337 | shrp X[ 7]=X[ 7],X[ 6],40 };; | ||
| 338 | { .mii; shrp X[ 6]=X[ 6],X[ 5],40 | ||
| 339 | shrp X[ 5]=X[ 5],X[ 4],40 } | ||
| 340 | { .mii; shrp X[ 4]=X[ 4],X[ 3],40 | ||
| 341 | shrp X[ 3]=X[ 3],X[ 2],40 } | ||
| 342 | { .mii; shrp X[ 2]=X[ 2],X[ 1],40 | ||
| 343 | shrp X[ 1]=X[ 1],X[ 0],40 } | ||
| 344 | { .mib; shrp X[ 0]=X[ 0],T1,40 | ||
| 345 | br.many .L_first16 };; | ||
| 346 | .L4byte: | ||
| 347 | { .mmi; $LDW X[ 7]=[input],4*$SZ | ||
| 348 | $LDW X[ 6]=[r8],4*$SZ | ||
| 349 | shrp X[15]=X[15],X[14],32 } | ||
| 350 | { .mmi; $LDW X[ 5]=[r9],4*$SZ | ||
| 351 | $LDW X[ 4]=[r10],4*$SZ | ||
| 352 | shrp X[14]=X[14],X[13],32 };; | ||
| 353 | { .mmi; $LDW X[ 3]=[input],4*$SZ | ||
| 354 | $LDW X[ 2]=[r8],4*$SZ | ||
| 355 | shrp X[13]=X[13],X[12],32 } | ||
| 356 | { .mmi; $LDW X[ 1]=[r9],4*$SZ | ||
| 357 | $LDW X[ 0]=[r10],4*$SZ | ||
| 358 | shrp X[12]=X[12],X[11],32 };; | ||
| 359 | { .mii; $LDW T1=[input] | ||
| 360 | shrp X[11]=X[11],X[10],32 | ||
| 361 | shrp X[10]=X[10],X[ 9],32 } | ||
| 362 | { .mii; shrp X[ 9]=X[ 9],X[ 8],32 | ||
| 363 | shrp X[ 8]=X[ 8],X[ 7],32 };; | ||
| 364 | { .mii; shrp X[ 7]=X[ 7],X[ 6],32 | ||
| 365 | shrp X[ 6]=X[ 6],X[ 5],32 } | ||
| 366 | { .mii; shrp X[ 5]=X[ 5],X[ 4],32 | ||
| 367 | shrp X[ 4]=X[ 4],X[ 3],32 } | ||
| 368 | { .mii; shrp X[ 3]=X[ 3],X[ 2],32 | ||
| 369 | shrp X[ 2]=X[ 2],X[ 1],32 } | ||
| 370 | { .mii; shrp X[ 1]=X[ 1],X[ 0],32 | ||
| 371 | shrp X[ 0]=X[ 0],T1,32 } | ||
| 372 | { .mfb; br.many .L_first16 };; | ||
| 373 | .L5byte: | ||
| 374 | { .mmi; $LDW X[ 5]=[r9],4*$SZ | ||
| 375 | $LDW X[ 4]=[r10],4*$SZ | ||
| 376 | shrp X[15]=X[15],X[14],24 };; | ||
| 377 | { .mmi; $LDW X[ 3]=[input],4*$SZ | ||
| 378 | $LDW X[ 2]=[r8],4*$SZ | ||
| 379 | shrp X[14]=X[14],X[13],24 } | ||
| 380 | { .mmi; $LDW X[ 1]=[r9],4*$SZ | ||
| 381 | $LDW X[ 0]=[r10],4*$SZ | ||
| 382 | shrp X[13]=X[13],X[12],24 };; | ||
| 383 | { .mii; $LDW T1=[input] | ||
| 384 | shrp X[12]=X[12],X[11],24 | ||
| 385 | shrp X[11]=X[11],X[10],24 } | ||
| 386 | { .mii; shrp X[10]=X[10],X[ 9],24 | ||
| 387 | shrp X[ 9]=X[ 9],X[ 8],24 };; | ||
| 388 | { .mii; shrp X[ 8]=X[ 8],X[ 7],24 | ||
| 389 | shrp X[ 7]=X[ 7],X[ 6],24 } | ||
| 390 | { .mii; shrp X[ 6]=X[ 6],X[ 5],24 | ||
| 391 | shrp X[ 5]=X[ 5],X[ 4],24 } | ||
| 392 | { .mii; shrp X[ 4]=X[ 4],X[ 3],24 | ||
| 393 | shrp X[ 3]=X[ 3],X[ 2],24 } | ||
| 394 | { .mii; shrp X[ 2]=X[ 2],X[ 1],24 | ||
| 395 | shrp X[ 1]=X[ 1],X[ 0],24 } | ||
| 396 | { .mib; shrp X[ 0]=X[ 0],T1,24 | ||
| 397 | br.many .L_first16 };; | ||
| 398 | .L6byte: | ||
| 399 | { .mmi; $LDW X[ 3]=[input],4*$SZ | ||
| 400 | $LDW X[ 2]=[r8],4*$SZ | ||
| 401 | shrp X[15]=X[15],X[14],16 } | ||
| 402 | { .mmi; $LDW X[ 1]=[r9],4*$SZ | ||
| 403 | $LDW X[ 0]=[r10],4*$SZ | ||
| 404 | shrp X[14]=X[14],X[13],16 };; | ||
| 405 | { .mii; $LDW T1=[input] | ||
| 406 | shrp X[13]=X[13],X[12],16 | ||
| 407 | shrp X[12]=X[12],X[11],16 } | ||
| 408 | { .mii; shrp X[11]=X[11],X[10],16 | ||
| 409 | shrp X[10]=X[10],X[ 9],16 };; | ||
| 410 | { .mii; shrp X[ 9]=X[ 9],X[ 8],16 | ||
| 411 | shrp X[ 8]=X[ 8],X[ 7],16 } | ||
| 412 | { .mii; shrp X[ 7]=X[ 7],X[ 6],16 | ||
| 413 | shrp X[ 6]=X[ 6],X[ 5],16 } | ||
| 414 | { .mii; shrp X[ 5]=X[ 5],X[ 4],16 | ||
| 415 | shrp X[ 4]=X[ 4],X[ 3],16 } | ||
| 416 | { .mii; shrp X[ 3]=X[ 3],X[ 2],16 | ||
| 417 | shrp X[ 2]=X[ 2],X[ 1],16 } | ||
| 418 | { .mii; shrp X[ 1]=X[ 1],X[ 0],16 | ||
| 419 | shrp X[ 0]=X[ 0],T1,16 } | ||
| 420 | { .mfb; br.many .L_first16 };; | ||
| 421 | .L7byte: | ||
| 422 | { .mmi; $LDW X[ 1]=[r9],4*$SZ | ||
| 423 | $LDW X[ 0]=[r10],4*$SZ | ||
| 424 | shrp X[15]=X[15],X[14],8 };; | ||
| 425 | { .mii; $LDW T1=[input] | ||
| 426 | shrp X[14]=X[14],X[13],8 | ||
| 427 | shrp X[13]=X[13],X[12],8 } | ||
| 428 | { .mii; shrp X[12]=X[12],X[11],8 | ||
| 429 | shrp X[11]=X[11],X[10],8 };; | ||
| 430 | { .mii; shrp X[10]=X[10],X[ 9],8 | ||
| 431 | shrp X[ 9]=X[ 9],X[ 8],8 } | ||
| 432 | { .mii; shrp X[ 8]=X[ 8],X[ 7],8 | ||
| 433 | shrp X[ 7]=X[ 7],X[ 6],8 } | ||
| 434 | { .mii; shrp X[ 6]=X[ 6],X[ 5],8 | ||
| 435 | shrp X[ 5]=X[ 5],X[ 4],8 } | ||
| 436 | { .mii; shrp X[ 4]=X[ 4],X[ 3],8 | ||
| 437 | shrp X[ 3]=X[ 3],X[ 2],8 } | ||
| 438 | { .mii; shrp X[ 2]=X[ 2],X[ 1],8 | ||
| 439 | shrp X[ 1]=X[ 1],X[ 0],8 } | ||
| 440 | { .mib; shrp X[ 0]=X[ 0],T1,8 | ||
| 441 | br.many .L_first16 };; | ||
| 442 | |||
| 443 | .align 32 | ||
| 444 | .L_first16: | ||
| 445 | { .mmi; $LDW K=[Ktbl],$SZ | ||
| 446 | and T1=F,E | ||
| 447 | and T2=A,B } | ||
| 448 | { .mmi; //$LDW X[15]=[input],$SZ // X[i]=*input++ | ||
| 449 | andcm r8=G,E | ||
| 450 | and r9=A,C };; | ||
| 451 | { .mmi; xor T1=T1,r8 //T1=((e & f) ^ (~e & g)) | ||
| 452 | and r10=B,C | ||
| 453 | _rotr r11=$t1,$Sigma1[0] } // ROTR(e,14) | ||
| 454 | { .mmi; xor T2=T2,r9 | ||
| 455 | mux1 X[15]=X[15],\@rev };; // eliminated in big-endian | ||
| 456 | ___ | ||
| 457 | $code.=<<___; | ||
| 458 | { .mib; add T1=T1,H // T1=Ch(e,f,g)+h | ||
| 459 | _rotr r8=$t1,$Sigma1[1] } // ROTR(e,18) | ||
| 460 | { .mib; xor T2=T2,r10 // T2=((a & b) ^ (a & c) ^ (b & c)) | ||
| 461 | mov H=G };; | ||
| 462 | { .mib; xor r11=r8,r11 | ||
| 463 | _rotr r9=$t1,$Sigma1[2] } // ROTR(e,41) | ||
| 464 | { .mib; mov G=F | ||
| 465 | mov F=E };; | ||
| 466 | { .mib; xor r9=r9,r11 // r9=Sigma1(e) | ||
| 467 | _rotr r10=$t0,$Sigma0[0] } // ROTR(a,28) | ||
| 468 | { .mib; add T1=T1,K // T1=Ch(e,f,g)+h+K512[i] | ||
| 469 | mov E=D };; | ||
| 470 | { .mib; add T1=T1,r9 // T1+=Sigma1(e) | ||
| 471 | _rotr r11=$t0,$Sigma0[1] } // ROTR(a,34) | ||
| 472 | { .mib; mov D=C | ||
| 473 | mov C=B };; | ||
| 474 | { .mib; add T1=T1,X[15] // T1+=X[i] | ||
| 475 | _rotr r8=$t0,$Sigma0[2] } // ROTR(a,39) | ||
| 476 | { .mib; xor r10=r10,r11 | ||
| 477 | mux2 X[15]=X[15],0x44 };; // eliminated in 64-bit | ||
| 478 | { .mmi; xor r10=r8,r10 // r10=Sigma0(a) | ||
| 479 | mov B=A | ||
| 480 | add A=T1,T2 };; | ||
| 481 | { .mib; add E=E,T1 | ||
| 482 | add A=A,r10 // T2=Maj(a,b,c)+Sigma0(a) | ||
| 483 | br.ctop.sptk .L_first16 };; | ||
| 484 | .L_first16_end: | ||
| 485 | |||
| 486 | { .mii; mov ar.lc=$rounds-17 | ||
| 487 | mov ar.ec=1 };; | ||
| 488 | |||
| 489 | .align 32 | ||
| 490 | .L_rest: | ||
| 491 | .rotr X[16] | ||
| 492 | { .mib; $LDW K=[Ktbl],$SZ | ||
| 493 | _rotr r8=X[15-1],$sigma0[0] } // ROTR(s0,1) | ||
| 494 | { .mib; $ADD X[15]=X[15],X[15-9] // X[i&0xF]+=X[(i+9)&0xF] | ||
| 495 | $SHRU s0=X[15-1],sgm0 };; // s0=X[(i+1)&0xF]>>7 | ||
| 496 | { .mib; and T1=F,E | ||
| 497 | _rotr r9=X[15-1],$sigma0[1] } // ROTR(s0,8) | ||
| 498 | { .mib; andcm r10=G,E | ||
| 499 | $SHRU s1=X[15-14],sgm1 };; // s1=X[(i+14)&0xF]>>6 | ||
| 500 | { .mmi; xor T1=T1,r10 // T1=((e & f) ^ (~e & g)) | ||
| 501 | xor r9=r8,r9 | ||
| 502 | _rotr r10=X[15-14],$sigma1[0] };;// ROTR(s1,19) | ||
| 503 | { .mib; and T2=A,B | ||
| 504 | _rotr r11=X[15-14],$sigma1[1] }// ROTR(s1,61) | ||
| 505 | { .mib; and r8=A,C };; | ||
| 506 | ___ | ||
| 507 | $t0="t0", $t1="t1", $code.=<<___ if ($BITS==32); | ||
| 508 | // I adhere to mmi; in order to hold Itanium 1 back and avoid 6 cycle | ||
| 509 | // pipeline flush in last bundle. Note that even on Itanium2 the | ||
| 510 | // latter stalls for one clock cycle... | ||
| 511 | { .mmi; xor s0=s0,r9 // s0=sigma0(X[(i+1)&0xF]) | ||
| 512 | dep.z $t1=E,32,32 } | ||
| 513 | { .mmi; xor r10=r11,r10 | ||
| 514 | zxt4 E=E };; | ||
| 515 | { .mmi; or $t1=$t1,E | ||
| 516 | xor s1=s1,r10 // s1=sigma1(X[(i+14)&0xF]) | ||
| 517 | mux2 $t0=A,0x44 };; // copy lower half to upper | ||
| 518 | { .mmi; xor T2=T2,r8 | ||
| 519 | _rotr r9=$t1,$Sigma1[0] } // ROTR(e,14) | ||
| 520 | { .mmi; and r10=B,C | ||
| 521 | add T1=T1,H // T1=Ch(e,f,g)+h | ||
| 522 | $ADD X[15]=X[15],s0 };; // X[i&0xF]+=sigma0(X[(i+1)&0xF]) | ||
| 523 | ___ | ||
| 524 | $t0="A", $t1="E", $code.=<<___ if ($BITS==64); | ||
| 525 | { .mib; xor s0=s0,r9 // s0=sigma0(X[(i+1)&0xF]) | ||
| 526 | _rotr r9=$t1,$Sigma1[0] } // ROTR(e,14) | ||
| 527 | { .mib; xor r10=r11,r10 | ||
| 528 | xor T2=T2,r8 };; | ||
| 529 | { .mib; xor s1=s1,r10 // s1=sigma1(X[(i+14)&0xF]) | ||
| 530 | add T1=T1,H } | ||
| 531 | { .mib; and r10=B,C | ||
| 532 | $ADD X[15]=X[15],s0 };; // X[i&0xF]+=sigma0(X[(i+1)&0xF]) | ||
| 533 | ___ | ||
| 534 | $code.=<<___; | ||
| 535 | { .mmi; xor T2=T2,r10 // T2=((a & b) ^ (a & c) ^ (b & c)) | ||
| 536 | mov H=G | ||
| 537 | _rotr r8=$t1,$Sigma1[1] };; // ROTR(e,18) | ||
| 538 | { .mmi; xor r11=r8,r9 | ||
| 539 | $ADD X[15]=X[15],s1 // X[i&0xF]+=sigma1(X[(i+14)&0xF]) | ||
| 540 | _rotr r9=$t1,$Sigma1[2] } // ROTR(e,41) | ||
| 541 | { .mmi; mov G=F | ||
| 542 | mov F=E };; | ||
| 543 | { .mib; xor r9=r9,r11 // r9=Sigma1(e) | ||
| 544 | _rotr r10=$t0,$Sigma0[0] } // ROTR(a,28) | ||
| 545 | { .mib; add T1=T1,K // T1=Ch(e,f,g)+h+K512[i] | ||
| 546 | mov E=D };; | ||
| 547 | { .mib; add T1=T1,r9 // T1+=Sigma1(e) | ||
| 548 | _rotr r11=$t0,$Sigma0[1] } // ROTR(a,34) | ||
| 549 | { .mib; mov D=C | ||
| 550 | mov C=B };; | ||
| 551 | { .mmi; add T1=T1,X[15] // T1+=X[i] | ||
| 552 | xor r10=r10,r11 | ||
| 553 | _rotr r8=$t0,$Sigma0[2] };; // ROTR(a,39) | ||
| 554 | { .mmi; xor r10=r8,r10 // r10=Sigma0(a) | ||
| 555 | mov B=A | ||
| 556 | add A=T1,T2 };; | ||
| 557 | { .mib; add E=E,T1 | ||
| 558 | add A=A,r10 // T2=Maj(a,b,c)+Sigma0(a) | ||
| 559 | br.ctop.sptk .L_rest };; | ||
| 560 | .L_rest_end: | ||
| 561 | |||
| 562 | { .mmi; add A_=A_,A | ||
| 563 | add B_=B_,B | ||
| 564 | add C_=C_,C } | ||
| 565 | { .mmi; add D_=D_,D | ||
| 566 | add E_=E_,E | ||
| 567 | cmp.ltu p16,p0=1,num };; | ||
| 568 | { .mmi; add F_=F_,F | ||
| 569 | add G_=G_,G | ||
| 570 | add H_=H_,H } | ||
| 571 | { .mmb; add Ktbl=-$SZ*$rounds,Ktbl | ||
| 572 | (p16) add num=-1,num | ||
| 573 | (p16) br.dptk.many .L_outer };; | ||
| 574 | |||
| 575 | { .mib; add r8=0*$SZ,ctx | ||
| 576 | add r9=1*$SZ,ctx } | ||
| 577 | { .mib; add r10=2*$SZ,ctx | ||
| 578 | add r11=3*$SZ,ctx };; | ||
| 579 | { .mmi; $STW [r8]=A_,4*$SZ | ||
| 580 | $STW [r9]=B_,4*$SZ | ||
| 581 | mov ar.lc=lcsave } | ||
| 582 | { .mmi; $STW [r10]=C_,4*$SZ | ||
| 583 | $STW [r11]=D_,4*$SZ | ||
| 584 | mov pr=prsave,0x1ffff };; | ||
| 585 | { .mmb; $STW [r8]=E_ | ||
| 586 | $STW [r9]=F_ } | ||
| 587 | { .mmb; $STW [r10]=G_ | ||
| 588 | $STW [r11]=H_ | ||
| 589 | br.ret.sptk.many b0 };; | ||
| 590 | .endp $func# | ||
| 591 | ___ | ||
| 592 | |||
| 593 | $code =~ s/\`([^\`]*)\`/eval $1/gem; | ||
| 594 | $code =~ s/_rotr(\s+)([^=]+)=([^,]+),([0-9]+)/shrp$1$2=$3,$3,$4/gm; | ||
| 595 | if ($BITS==64) { | ||
| 596 | $code =~ s/mux2(\s+)\S+/nop.i$1 0x0/gm; | ||
| 597 | $code =~ s/mux1(\s+)\S+/nop.i$1 0x0/gm if ($big_endian); | ||
| 598 | $code =~ s/(shrp\s+X\[[^=]+)=([^,]+),([^,]+),([1-9]+)/$1=$3,$2,64-$4/gm | ||
| 599 | if (!$big_endian); | ||
| 600 | $code =~ s/ld1(\s+)X\[\S+/nop.m$1 0x0/gm; | ||
| 601 | } | ||
| 602 | |||
| 603 | print $code; | ||
| 604 | |||
| 605 | print<<___ if ($BITS==32); | ||
| 606 | .align 64 | ||
| 607 | .type K256#,\@object | ||
| 608 | K256: data4 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5 | ||
| 609 | data4 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5 | ||
| 610 | data4 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3 | ||
| 611 | data4 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174 | ||
| 612 | data4 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc | ||
| 613 | data4 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da | ||
| 614 | data4 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7 | ||
| 615 | data4 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967 | ||
| 616 | data4 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13 | ||
| 617 | data4 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85 | ||
| 618 | data4 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3 | ||
| 619 | data4 0xd192e819,0xd6990624,0xf40e3585,0x106aa070 | ||
| 620 | data4 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5 | ||
| 621 | data4 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3 | ||
| 622 | data4 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208 | ||
| 623 | data4 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2 | ||
| 624 | .size K256#,$SZ*$rounds | ||
| 625 | stringz "SHA256 block transform for IA64, CRYPTOGAMS by <appro\@openssl.org>" | ||
| 626 | ___ | ||
| 627 | print<<___ if ($BITS==64); | ||
| 628 | .align 64 | ||
| 629 | .type K512#,\@object | ||
| 630 | K512: data8 0x428a2f98d728ae22,0x7137449123ef65cd | ||
| 631 | data8 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc | ||
| 632 | data8 0x3956c25bf348b538,0x59f111f1b605d019 | ||
| 633 | data8 0x923f82a4af194f9b,0xab1c5ed5da6d8118 | ||
| 634 | data8 0xd807aa98a3030242,0x12835b0145706fbe | ||
| 635 | data8 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2 | ||
| 636 | data8 0x72be5d74f27b896f,0x80deb1fe3b1696b1 | ||
| 637 | data8 0x9bdc06a725c71235,0xc19bf174cf692694 | ||
| 638 | data8 0xe49b69c19ef14ad2,0xefbe4786384f25e3 | ||
| 639 | data8 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65 | ||
| 640 | data8 0x2de92c6f592b0275,0x4a7484aa6ea6e483 | ||
| 641 | data8 0x5cb0a9dcbd41fbd4,0x76f988da831153b5 | ||
| 642 | data8 0x983e5152ee66dfab,0xa831c66d2db43210 | ||
| 643 | data8 0xb00327c898fb213f,0xbf597fc7beef0ee4 | ||
| 644 | data8 0xc6e00bf33da88fc2,0xd5a79147930aa725 | ||
| 645 | data8 0x06ca6351e003826f,0x142929670a0e6e70 | ||
| 646 | data8 0x27b70a8546d22ffc,0x2e1b21385c26c926 | ||
| 647 | data8 0x4d2c6dfc5ac42aed,0x53380d139d95b3df | ||
| 648 | data8 0x650a73548baf63de,0x766a0abb3c77b2a8 | ||
| 649 | data8 0x81c2c92e47edaee6,0x92722c851482353b | ||
| 650 | data8 0xa2bfe8a14cf10364,0xa81a664bbc423001 | ||
| 651 | data8 0xc24b8b70d0f89791,0xc76c51a30654be30 | ||
| 652 | data8 0xd192e819d6ef5218,0xd69906245565a910 | ||
| 653 | data8 0xf40e35855771202a,0x106aa07032bbd1b8 | ||
| 654 | data8 0x19a4c116b8d2d0c8,0x1e376c085141ab53 | ||
| 655 | data8 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8 | ||
| 656 | data8 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb | ||
| 657 | data8 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3 | ||
| 658 | data8 0x748f82ee5defb2fc,0x78a5636f43172f60 | ||
| 659 | data8 0x84c87814a1f0ab72,0x8cc702081a6439ec | ||
| 660 | data8 0x90befffa23631e28,0xa4506cebde82bde9 | ||
| 661 | data8 0xbef9a3f7b2c67915,0xc67178f2e372532b | ||
| 662 | data8 0xca273eceea26619c,0xd186b8c721c0c207 | ||
| 663 | data8 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178 | ||
| 664 | data8 0x06f067aa72176fba,0x0a637dc5a2c898a6 | ||
| 665 | data8 0x113f9804bef90dae,0x1b710b35131c471b | ||
| 666 | data8 0x28db77f523047d84,0x32caab7b40c72493 | ||
| 667 | data8 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c | ||
| 668 | data8 0x4cc5d4becb3e42b6,0x597f299cfc657e2a | ||
| 669 | data8 0x5fcb6fab3ad6faec,0x6c44198c4a475817 | ||
| 670 | .size K512#,$SZ*$rounds | ||
| 671 | stringz "SHA512 block transform for IA64, CRYPTOGAMS by <appro\@openssl.org>" | ||
| 672 | ___ | ||
diff --git a/src/lib/libcrypto/sha/asm/sha512-sse2.pl b/src/lib/libcrypto/sha/asm/sha512-sse2.pl new file mode 100644 index 0000000000..10902bf673 --- /dev/null +++ b/src/lib/libcrypto/sha/asm/sha512-sse2.pl | |||
| @@ -0,0 +1,404 @@ | |||
| 1 | #!/usr/bin/env perl | ||
| 2 | # | ||
| 3 | # ==================================================================== | ||
| 4 | # Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL | ||
| 5 | # project. Rights for redistribution and usage in source and binary | ||
| 6 | # forms are granted according to the OpenSSL license. | ||
| 7 | # ==================================================================== | ||
| 8 | # | ||
| 9 | # SHA512_Transform_SSE2. | ||
| 10 | # | ||
| 11 | # As the name suggests, this is an IA-32 SSE2 implementation of | ||
| 12 | # SHA512_Transform. Motivating factor for the undertaken effort was that | ||
| 13 | # SHA512 was observed to *consistently* perform *significantly* poorer | ||
| 14 | # than SHA256 [2x and slower is common] on 32-bit platforms. On 64-bit | ||
| 15 | # platforms on the other hand SHA512 tend to outperform SHA256 [~50% | ||
| 16 | # seem to be common improvement factor]. All this is perfectly natural, | ||
| 17 | # as SHA512 is a 64-bit algorithm. But isn't IA-32 SSE2 essentially | ||
| 18 | # a 64-bit instruction set? Is it rich enough to implement SHA512? | ||
| 19 | # If answer was "no," then you wouldn't have been reading this... | ||
| 20 | # | ||
| 21 | # Throughput performance in MBps (larger is better): | ||
| 22 | # | ||
| 23 | # 2.4GHz P4 1.4GHz AMD32 1.4GHz AMD64(*) | ||
| 24 | # SHA256/gcc(*) 54 43 59 | ||
| 25 | # SHA512/gcc 17 23 92 | ||
| 26 | # SHA512/sse2 61(**) 57(**) | ||
| 27 | # SHA512/icc 26 28 | ||
| 28 | # SHA256/icc(*) 65 54 | ||
| 29 | # | ||
| 30 | # (*) AMD64 and SHA256 numbers are presented mostly for amusement or | ||
| 31 | # reference purposes. | ||
| 32 | # (**) I.e. it gives ~2-3x speed-up if compared with compiler generated | ||
| 33 | # code. One can argue that hand-coded *non*-SSE2 implementation | ||
| 34 | # would perform better than compiler generated one as well, and | ||
| 35 | # that comparison is therefore not exactly fair. Well, as SHA512 | ||
| 36 | # puts enormous pressure on IA-32 GP register bank, I reckon that | ||
| 37 | # hand-coded version wouldn't perform significantly better than | ||
| 38 | # one compiled with icc, ~20% perhaps... So that this code would | ||
| 39 | # still outperform it with distinguishing marginal. But feel free | ||
| 40 | # to prove me wrong:-) | ||
| 41 | # <appro@fy.chalmers.se> | ||
| 42 | push(@INC,"perlasm","../../perlasm"); | ||
| 43 | require "x86asm.pl"; | ||
| 44 | |||
| 45 | &asm_init($ARGV[0],"sha512-sse2.pl",$ARGV[$#ARGV] eq "386"); | ||
| 46 | |||
| 47 | $K512="esi"; # K512[80] table, found at the end... | ||
| 48 | #$W512="esp"; # $W512 is not just W512[16]: it comprises *two* copies | ||
| 49 | # of W512[16] and a copy of A-H variables... | ||
| 50 | $W512_SZ=8*(16+16+8); # see above... | ||
| 51 | #$Kidx="ebx"; # index in K512 table, advances from 0 to 80... | ||
| 52 | $Widx="edx"; # index in W512, wraps around at 16... | ||
| 53 | $data="edi"; # 16 qwords of input data... | ||
| 54 | $A="mm0"; # B-D and | ||
| 55 | $E="mm1"; # F-H are allocated dynamically... | ||
| 56 | $Aoff=256+0; # A-H offsets relative to $W512... | ||
| 57 | $Boff=256+8; | ||
| 58 | $Coff=256+16; | ||
| 59 | $Doff=256+24; | ||
| 60 | $Eoff=256+32; | ||
| 61 | $Foff=256+40; | ||
| 62 | $Goff=256+48; | ||
| 63 | $Hoff=256+56; | ||
| 64 | |||
| 65 | sub SHA2_ROUND() | ||
| 66 | { local ($kidx,$widx)=@_; | ||
| 67 | |||
| 68 | # One can argue that one could reorder instructions for better | ||
| 69 | # performance. Well, I tried and it doesn't seem to make any | ||
| 70 | # noticeable difference. Modern out-of-order execution cores | ||
| 71 | # reorder instructions to their liking in either case and they | ||
| 72 | # apparently do decent job. So we can keep the code more | ||
| 73 | # readable/regular/comprehensible:-) | ||
| 74 | |||
| 75 | # I adhere to 64-bit %mmX registers in order to avoid/not care | ||
| 76 | # about #GP exceptions on misaligned 128-bit access, most | ||
| 77 | # notably in paddq with memory operand. Not to mention that | ||
| 78 | # SSE2 intructions operating on %mmX can be scheduled every | ||
| 79 | # cycle [and not every second one if operating on %xmmN]. | ||
| 80 | |||
| 81 | &movq ("mm4",&QWP($Foff,$W512)); # load f | ||
| 82 | &movq ("mm5",&QWP($Goff,$W512)); # load g | ||
| 83 | &movq ("mm6",&QWP($Hoff,$W512)); # load h | ||
| 84 | |||
| 85 | &movq ("mm2",$E); # %mm2 is sliding right | ||
| 86 | &movq ("mm3",$E); # %mm3 is sliding left | ||
| 87 | &psrlq ("mm2",14); | ||
| 88 | &psllq ("mm3",23); | ||
| 89 | &movq ("mm7","mm2"); # %mm7 is T1 | ||
| 90 | &pxor ("mm7","mm3"); | ||
| 91 | &psrlq ("mm2",4); | ||
| 92 | &psllq ("mm3",23); | ||
| 93 | &pxor ("mm7","mm2"); | ||
| 94 | &pxor ("mm7","mm3"); | ||
| 95 | &psrlq ("mm2",23); | ||
| 96 | &psllq ("mm3",4); | ||
| 97 | &pxor ("mm7","mm2"); | ||
| 98 | &pxor ("mm7","mm3"); # T1=Sigma1_512(e) | ||
| 99 | |||
| 100 | &movq (&QWP($Foff,$W512),$E); # f = e | ||
| 101 | &movq (&QWP($Goff,$W512),"mm4"); # g = f | ||
| 102 | &movq (&QWP($Hoff,$W512),"mm5"); # h = g | ||
| 103 | |||
| 104 | &pxor ("mm4","mm5"); # f^=g | ||
| 105 | &pand ("mm4",$E); # f&=e | ||
| 106 | &pxor ("mm4","mm5"); # f^=g | ||
| 107 | &paddq ("mm7","mm4"); # T1+=Ch(e,f,g) | ||
| 108 | |||
| 109 | &movq ("mm2",&QWP($Boff,$W512)); # load b | ||
| 110 | &movq ("mm3",&QWP($Coff,$W512)); # load c | ||
| 111 | &movq ($E,&QWP($Doff,$W512)); # e = d | ||
| 112 | |||
| 113 | &paddq ("mm7","mm6"); # T1+=h | ||
| 114 | &paddq ("mm7",&QWP(0,$K512,$kidx,8)); # T1+=K512[i] | ||
| 115 | &paddq ("mm7",&QWP(0,$W512,$widx,8)); # T1+=W512[i] | ||
| 116 | &paddq ($E,"mm7"); # e += T1 | ||
| 117 | |||
| 118 | &movq ("mm4",$A); # %mm4 is sliding right | ||
| 119 | &movq ("mm5",$A); # %mm5 is sliding left | ||
| 120 | &psrlq ("mm4",28); | ||
| 121 | &psllq ("mm5",25); | ||
| 122 | &movq ("mm6","mm4"); # %mm6 is T2 | ||
| 123 | &pxor ("mm6","mm5"); | ||
| 124 | &psrlq ("mm4",6); | ||
| 125 | &psllq ("mm5",5); | ||
| 126 | &pxor ("mm6","mm4"); | ||
| 127 | &pxor ("mm6","mm5"); | ||
| 128 | &psrlq ("mm4",5); | ||
| 129 | &psllq ("mm5",6); | ||
| 130 | &pxor ("mm6","mm4"); | ||
| 131 | &pxor ("mm6","mm5"); # T2=Sigma0_512(a) | ||
| 132 | |||
| 133 | &movq (&QWP($Boff,$W512),$A); # b = a | ||
| 134 | &movq (&QWP($Coff,$W512),"mm2"); # c = b | ||
| 135 | &movq (&QWP($Doff,$W512),"mm3"); # d = c | ||
| 136 | |||
| 137 | &movq ("mm4",$A); # %mm4=a | ||
| 138 | &por ($A,"mm3"); # a=a|c | ||
| 139 | &pand ("mm4","mm3"); # %mm4=a&c | ||
| 140 | &pand ($A,"mm2"); # a=(a|c)&b | ||
| 141 | &por ("mm4",$A); # %mm4=(a&c)|((a|c)&b) | ||
| 142 | &paddq ("mm6","mm4"); # T2+=Maj(a,b,c) | ||
| 143 | |||
| 144 | &movq ($A,"mm7"); # a=T1 | ||
| 145 | &paddq ($A,"mm6"); # a+=T2 | ||
| 146 | } | ||
| 147 | |||
| 148 | $func="sha512_block_sse2"; | ||
| 149 | |||
| 150 | &function_begin_B($func); | ||
| 151 | if (0) {# Caller is expected to check if it's appropriate to | ||
| 152 | # call this routine. Below 3 lines are retained for | ||
| 153 | # debugging purposes... | ||
| 154 | &picmeup("eax","OPENSSL_ia32cap"); | ||
| 155 | &bt (&DWP(0,"eax"),26); | ||
| 156 | &jnc ("SHA512_Transform"); | ||
| 157 | } | ||
| 158 | |||
| 159 | &push ("ebp"); | ||
| 160 | &mov ("ebp","esp"); | ||
| 161 | &push ("ebx"); | ||
| 162 | &push ("esi"); | ||
| 163 | &push ("edi"); | ||
| 164 | |||
| 165 | &mov ($Widx,&DWP(8,"ebp")); # A-H state, 1st arg | ||
| 166 | &mov ($data,&DWP(12,"ebp")); # input data, 2nd arg | ||
| 167 | &call (&label("pic_point")); # make it PIC! | ||
| 168 | &set_label("pic_point"); | ||
| 169 | &blindpop($K512); | ||
| 170 | &lea ($K512,&DWP(&label("K512")."-".&label("pic_point"),$K512)); | ||
| 171 | |||
| 172 | $W512 = "esp"; # start using %esp as W512 | ||
| 173 | &sub ($W512,$W512_SZ); | ||
| 174 | &and ($W512,-16); # ensure 128-bit alignment | ||
| 175 | |||
| 176 | # make private copy of A-H | ||
| 177 | # v assume the worst and stick to unaligned load | ||
| 178 | &movdqu ("xmm0",&QWP(0,$Widx)); | ||
| 179 | &movdqu ("xmm1",&QWP(16,$Widx)); | ||
| 180 | &movdqu ("xmm2",&QWP(32,$Widx)); | ||
| 181 | &movdqu ("xmm3",&QWP(48,$Widx)); | ||
| 182 | |||
| 183 | &align(8); | ||
| 184 | &set_label("_chunk_loop"); | ||
| 185 | |||
| 186 | &movdqa (&QWP($Aoff,$W512),"xmm0"); # a,b | ||
| 187 | &movdqa (&QWP($Coff,$W512),"xmm1"); # c,d | ||
| 188 | &movdqa (&QWP($Eoff,$W512),"xmm2"); # e,f | ||
| 189 | &movdqa (&QWP($Goff,$W512),"xmm3"); # g,h | ||
| 190 | |||
| 191 | &xor ($Widx,$Widx); | ||
| 192 | |||
| 193 | &movdq2q($A,"xmm0"); # load a | ||
| 194 | &movdq2q($E,"xmm2"); # load e | ||
| 195 | |||
| 196 | # Why aren't loops unrolled? It makes sense to unroll if | ||
| 197 | # execution time for loop body is comparable with branch | ||
| 198 | # penalties and/or if whole data-set resides in register bank. | ||
| 199 | # Neither is case here... Well, it would be possible to | ||
| 200 | # eliminate few store operations, but it would hardly affect | ||
| 201 | # so to say stop-watch performance, as there is a lot of | ||
| 202 | # available memory slots to fill. It will only relieve some | ||
| 203 | # pressure off memory bus... | ||
| 204 | |||
| 205 | # flip input stream byte order... | ||
| 206 | &mov ("eax",&DWP(0,$data,$Widx,8)); | ||
| 207 | &mov ("ebx",&DWP(4,$data,$Widx,8)); | ||
| 208 | &bswap ("eax"); | ||
| 209 | &bswap ("ebx"); | ||
| 210 | &mov (&DWP(0,$W512,$Widx,8),"ebx"); # W512[i] | ||
| 211 | &mov (&DWP(4,$W512,$Widx,8),"eax"); | ||
| 212 | &mov (&DWP(128+0,$W512,$Widx,8),"ebx"); # copy of W512[i] | ||
| 213 | &mov (&DWP(128+4,$W512,$Widx,8),"eax"); | ||
| 214 | |||
| 215 | &align(8); | ||
| 216 | &set_label("_1st_loop"); # 0-15 | ||
| 217 | # flip input stream byte order... | ||
| 218 | &mov ("eax",&DWP(0+8,$data,$Widx,8)); | ||
| 219 | &mov ("ebx",&DWP(4+8,$data,$Widx,8)); | ||
| 220 | &bswap ("eax"); | ||
| 221 | &bswap ("ebx"); | ||
| 222 | &mov (&DWP(0+8,$W512,$Widx,8),"ebx"); # W512[i] | ||
| 223 | &mov (&DWP(4+8,$W512,$Widx,8),"eax"); | ||
| 224 | &mov (&DWP(128+0+8,$W512,$Widx,8),"ebx"); # copy of W512[i] | ||
| 225 | &mov (&DWP(128+4+8,$W512,$Widx,8),"eax"); | ||
| 226 | &set_label("_1st_looplet"); | ||
| 227 | &SHA2_ROUND($Widx,$Widx); &inc($Widx); | ||
| 228 | |||
| 229 | &cmp ($Widx,15) | ||
| 230 | &jl (&label("_1st_loop")); | ||
| 231 | &je (&label("_1st_looplet")); # playing similar trick on 2nd loop | ||
| 232 | # does not improve performance... | ||
| 233 | |||
| 234 | $Kidx = "ebx"; # start using %ebx as Kidx | ||
| 235 | &mov ($Kidx,$Widx); | ||
| 236 | |||
| 237 | &align(8); | ||
| 238 | &set_label("_2nd_loop"); # 16-79 | ||
| 239 | &and($Widx,0xf); | ||
| 240 | |||
| 241 | # 128-bit fragment! I update W512[i] and W512[i+1] in | ||
| 242 | # parallel:-) Note that I refer to W512[(i&0xf)+N] and not to | ||
| 243 | # W512[(i+N)&0xf]! This is exactly what I maintain the second | ||
| 244 | # copy of W512[16] for... | ||
| 245 | &movdqu ("xmm0",&QWP(8*1,$W512,$Widx,8)); # s0=W512[i+1] | ||
| 246 | &movdqa ("xmm2","xmm0"); # %xmm2 is sliding right | ||
| 247 | &movdqa ("xmm3","xmm0"); # %xmm3 is sliding left | ||
| 248 | &psrlq ("xmm2",1); | ||
| 249 | &psllq ("xmm3",56); | ||
| 250 | &movdqa ("xmm0","xmm2"); | ||
| 251 | &pxor ("xmm0","xmm3"); | ||
| 252 | &psrlq ("xmm2",6); | ||
| 253 | &psllq ("xmm3",7); | ||
| 254 | &pxor ("xmm0","xmm2"); | ||
| 255 | &pxor ("xmm0","xmm3"); | ||
| 256 | &psrlq ("xmm2",1); | ||
| 257 | &pxor ("xmm0","xmm2"); # s0 = sigma0_512(s0); | ||
| 258 | |||
| 259 | &movdqa ("xmm1",&QWP(8*14,$W512,$Widx,8)); # s1=W512[i+14] | ||
| 260 | &movdqa ("xmm4","xmm1"); # %xmm4 is sliding right | ||
| 261 | &movdqa ("xmm5","xmm1"); # %xmm5 is sliding left | ||
| 262 | &psrlq ("xmm4",6); | ||
| 263 | &psllq ("xmm5",3); | ||
| 264 | &movdqa ("xmm1","xmm4"); | ||
| 265 | &pxor ("xmm1","xmm5"); | ||
| 266 | &psrlq ("xmm4",13); | ||
| 267 | &psllq ("xmm5",42); | ||
| 268 | &pxor ("xmm1","xmm4"); | ||
| 269 | &pxor ("xmm1","xmm5"); | ||
| 270 | &psrlq ("xmm4",42); | ||
| 271 | &pxor ("xmm1","xmm4"); # s1 = sigma1_512(s1); | ||
| 272 | |||
| 273 | # + have to explictly load W512[i+9] as it's not 128-bit | ||
| 274 | # v aligned and paddq would throw an exception... | ||
| 275 | &movdqu ("xmm6",&QWP(8*9,$W512,$Widx,8)); | ||
| 276 | &paddq ("xmm0","xmm1"); # s0 += s1 | ||
| 277 | &paddq ("xmm0","xmm6"); # s0 += W512[i+9] | ||
| 278 | &paddq ("xmm0",&QWP(0,$W512,$Widx,8)); # s0 += W512[i] | ||
| 279 | |||
| 280 | &movdqa (&QWP(0,$W512,$Widx,8),"xmm0"); # W512[i] = s0 | ||
| 281 | &movdqa (&QWP(16*8,$W512,$Widx,8),"xmm0"); # copy of W512[i] | ||
| 282 | |||
| 283 | # as the above fragment was 128-bit, we "owe" 2 rounds... | ||
| 284 | &SHA2_ROUND($Kidx,$Widx); &inc($Kidx); &inc($Widx); | ||
| 285 | &SHA2_ROUND($Kidx,$Widx); &inc($Kidx); &inc($Widx); | ||
| 286 | |||
| 287 | &cmp ($Kidx,80); | ||
| 288 | &jl (&label("_2nd_loop")); | ||
| 289 | |||
| 290 | # update A-H state | ||
| 291 | &mov ($Widx,&DWP(8,"ebp")); # A-H state, 1st arg | ||
| 292 | &movq (&QWP($Aoff,$W512),$A); # write out a | ||
| 293 | &movq (&QWP($Eoff,$W512),$E); # write out e | ||
| 294 | &movdqu ("xmm0",&QWP(0,$Widx)); | ||
| 295 | &movdqu ("xmm1",&QWP(16,$Widx)); | ||
| 296 | &movdqu ("xmm2",&QWP(32,$Widx)); | ||
| 297 | &movdqu ("xmm3",&QWP(48,$Widx)); | ||
| 298 | &paddq ("xmm0",&QWP($Aoff,$W512)); # 128-bit additions... | ||
| 299 | &paddq ("xmm1",&QWP($Coff,$W512)); | ||
| 300 | &paddq ("xmm2",&QWP($Eoff,$W512)); | ||
| 301 | &paddq ("xmm3",&QWP($Goff,$W512)); | ||
| 302 | &movdqu (&QWP(0,$Widx),"xmm0"); | ||
| 303 | &movdqu (&QWP(16,$Widx),"xmm1"); | ||
| 304 | &movdqu (&QWP(32,$Widx),"xmm2"); | ||
| 305 | &movdqu (&QWP(48,$Widx),"xmm3"); | ||
| 306 | |||
| 307 | &add ($data,16*8); # advance input data pointer | ||
| 308 | &dec (&DWP(16,"ebp")); # decrement 3rd arg | ||
| 309 | &jnz (&label("_chunk_loop")); | ||
| 310 | |||
| 311 | # epilogue | ||
| 312 | &emms (); # required for at least ELF and Win32 ABIs | ||
| 313 | &mov ("edi",&DWP(-12,"ebp")); | ||
| 314 | &mov ("esi",&DWP(-8,"ebp")); | ||
| 315 | &mov ("ebx",&DWP(-4,"ebp")); | ||
| 316 | &leave (); | ||
| 317 | &ret (); | ||
| 318 | |||
| 319 | &align(64); | ||
| 320 | &set_label("K512"); # Yes! I keep it in the code segment! | ||
| 321 | &data_word(0xd728ae22,0x428a2f98); # u64 | ||
| 322 | &data_word(0x23ef65cd,0x71374491); # u64 | ||
| 323 | &data_word(0xec4d3b2f,0xb5c0fbcf); # u64 | ||
| 324 | &data_word(0x8189dbbc,0xe9b5dba5); # u64 | ||
| 325 | &data_word(0xf348b538,0x3956c25b); # u64 | ||
| 326 | &data_word(0xb605d019,0x59f111f1); # u64 | ||
| 327 | &data_word(0xaf194f9b,0x923f82a4); # u64 | ||
| 328 | &data_word(0xda6d8118,0xab1c5ed5); # u64 | ||
| 329 | &data_word(0xa3030242,0xd807aa98); # u64 | ||
| 330 | &data_word(0x45706fbe,0x12835b01); # u64 | ||
| 331 | &data_word(0x4ee4b28c,0x243185be); # u64 | ||
| 332 | &data_word(0xd5ffb4e2,0x550c7dc3); # u64 | ||
| 333 | &data_word(0xf27b896f,0x72be5d74); # u64 | ||
| 334 | &data_word(0x3b1696b1,0x80deb1fe); # u64 | ||
| 335 | &data_word(0x25c71235,0x9bdc06a7); # u64 | ||
| 336 | &data_word(0xcf692694,0xc19bf174); # u64 | ||
| 337 | &data_word(0x9ef14ad2,0xe49b69c1); # u64 | ||
| 338 | &data_word(0x384f25e3,0xefbe4786); # u64 | ||
| 339 | &data_word(0x8b8cd5b5,0x0fc19dc6); # u64 | ||
| 340 | &data_word(0x77ac9c65,0x240ca1cc); # u64 | ||
| 341 | &data_word(0x592b0275,0x2de92c6f); # u64 | ||
| 342 | &data_word(0x6ea6e483,0x4a7484aa); # u64 | ||
| 343 | &data_word(0xbd41fbd4,0x5cb0a9dc); # u64 | ||
| 344 | &data_word(0x831153b5,0x76f988da); # u64 | ||
| 345 | &data_word(0xee66dfab,0x983e5152); # u64 | ||
| 346 | &data_word(0x2db43210,0xa831c66d); # u64 | ||
| 347 | &data_word(0x98fb213f,0xb00327c8); # u64 | ||
| 348 | &data_word(0xbeef0ee4,0xbf597fc7); # u64 | ||
| 349 | &data_word(0x3da88fc2,0xc6e00bf3); # u64 | ||
| 350 | &data_word(0x930aa725,0xd5a79147); # u64 | ||
| 351 | &data_word(0xe003826f,0x06ca6351); # u64 | ||
| 352 | &data_word(0x0a0e6e70,0x14292967); # u64 | ||
| 353 | &data_word(0x46d22ffc,0x27b70a85); # u64 | ||
| 354 | &data_word(0x5c26c926,0x2e1b2138); # u64 | ||
| 355 | &data_word(0x5ac42aed,0x4d2c6dfc); # u64 | ||
| 356 | &data_word(0x9d95b3df,0x53380d13); # u64 | ||
| 357 | &data_word(0x8baf63de,0x650a7354); # u64 | ||
| 358 | &data_word(0x3c77b2a8,0x766a0abb); # u64 | ||
| 359 | &data_word(0x47edaee6,0x81c2c92e); # u64 | ||
| 360 | &data_word(0x1482353b,0x92722c85); # u64 | ||
| 361 | &data_word(0x4cf10364,0xa2bfe8a1); # u64 | ||
| 362 | &data_word(0xbc423001,0xa81a664b); # u64 | ||
| 363 | &data_word(0xd0f89791,0xc24b8b70); # u64 | ||
| 364 | &data_word(0x0654be30,0xc76c51a3); # u64 | ||
| 365 | &data_word(0xd6ef5218,0xd192e819); # u64 | ||
| 366 | &data_word(0x5565a910,0xd6990624); # u64 | ||
| 367 | &data_word(0x5771202a,0xf40e3585); # u64 | ||
| 368 | &data_word(0x32bbd1b8,0x106aa070); # u64 | ||
| 369 | &data_word(0xb8d2d0c8,0x19a4c116); # u64 | ||
| 370 | &data_word(0x5141ab53,0x1e376c08); # u64 | ||
| 371 | &data_word(0xdf8eeb99,0x2748774c); # u64 | ||
| 372 | &data_word(0xe19b48a8,0x34b0bcb5); # u64 | ||
| 373 | &data_word(0xc5c95a63,0x391c0cb3); # u64 | ||
| 374 | &data_word(0xe3418acb,0x4ed8aa4a); # u64 | ||
| 375 | &data_word(0x7763e373,0x5b9cca4f); # u64 | ||
| 376 | &data_word(0xd6b2b8a3,0x682e6ff3); # u64 | ||
| 377 | &data_word(0x5defb2fc,0x748f82ee); # u64 | ||
| 378 | &data_word(0x43172f60,0x78a5636f); # u64 | ||
| 379 | &data_word(0xa1f0ab72,0x84c87814); # u64 | ||
| 380 | &data_word(0x1a6439ec,0x8cc70208); # u64 | ||
| 381 | &data_word(0x23631e28,0x90befffa); # u64 | ||
| 382 | &data_word(0xde82bde9,0xa4506ceb); # u64 | ||
| 383 | &data_word(0xb2c67915,0xbef9a3f7); # u64 | ||
| 384 | &data_word(0xe372532b,0xc67178f2); # u64 | ||
| 385 | &data_word(0xea26619c,0xca273ece); # u64 | ||
| 386 | &data_word(0x21c0c207,0xd186b8c7); # u64 | ||
| 387 | &data_word(0xcde0eb1e,0xeada7dd6); # u64 | ||
| 388 | &data_word(0xee6ed178,0xf57d4f7f); # u64 | ||
| 389 | &data_word(0x72176fba,0x06f067aa); # u64 | ||
| 390 | &data_word(0xa2c898a6,0x0a637dc5); # u64 | ||
| 391 | &data_word(0xbef90dae,0x113f9804); # u64 | ||
| 392 | &data_word(0x131c471b,0x1b710b35); # u64 | ||
| 393 | &data_word(0x23047d84,0x28db77f5); # u64 | ||
| 394 | &data_word(0x40c72493,0x32caab7b); # u64 | ||
| 395 | &data_word(0x15c9bebc,0x3c9ebe0a); # u64 | ||
| 396 | &data_word(0x9c100d4c,0x431d67c4); # u64 | ||
| 397 | &data_word(0xcb3e42b6,0x4cc5d4be); # u64 | ||
| 398 | &data_word(0xfc657e2a,0x597f299c); # u64 | ||
| 399 | &data_word(0x3ad6faec,0x5fcb6fab); # u64 | ||
| 400 | &data_word(0x4a475817,0x6c44198c); # u64 | ||
| 401 | |||
| 402 | &function_end_B($func); | ||
| 403 | |||
| 404 | &asm_finish(); | ||
diff --git a/src/lib/libcrypto/sha/asm/sha512-x86_64.pl b/src/lib/libcrypto/sha/asm/sha512-x86_64.pl new file mode 100755 index 0000000000..b6252d31ec --- /dev/null +++ b/src/lib/libcrypto/sha/asm/sha512-x86_64.pl | |||
| @@ -0,0 +1,344 @@ | |||
| 1 | #!/usr/bin/env perl | ||
| 2 | # | ||
| 3 | # ==================================================================== | ||
| 4 | # Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL | ||
| 5 | # project. Rights for redistribution and usage in source and binary | ||
| 6 | # forms are granted according to the OpenSSL license. | ||
| 7 | # ==================================================================== | ||
| 8 | # | ||
| 9 | # sha256/512_block procedure for x86_64. | ||
| 10 | # | ||
| 11 | # 40% improvement over compiler-generated code on Opteron. On EM64T | ||
| 12 | # sha256 was observed to run >80% faster and sha512 - >40%. No magical | ||
| 13 | # tricks, just straight implementation... I really wonder why gcc | ||
| 14 | # [being armed with inline assembler] fails to generate as fast code. | ||
| 15 | # The only thing which is cool about this module is that it's very | ||
| 16 | # same instruction sequence used for both SHA-256 and SHA-512. In | ||
| 17 | # former case the instructions operate on 32-bit operands, while in | ||
| 18 | # latter - on 64-bit ones. All I had to do is to get one flavor right, | ||
| 19 | # the other one passed the test right away:-) | ||
| 20 | # | ||
| 21 | # sha256_block runs in ~1005 cycles on Opteron, which gives you | ||
| 22 | # asymptotic performance of 64*1000/1005=63.7MBps times CPU clock | ||
| 23 | # frequency in GHz. sha512_block runs in ~1275 cycles, which results | ||
| 24 | # in 128*1000/1275=100MBps per GHz. Is there room for improvement? | ||
| 25 | # Well, if you compare it to IA-64 implementation, which maintains | ||
| 26 | # X[16] in register bank[!], tends to 4 instructions per CPU clock | ||
| 27 | # cycle and runs in 1003 cycles, 1275 is very good result for 3-way | ||
| 28 | # issue Opteron pipeline and X[16] maintained in memory. So that *if* | ||
| 29 | # there is a way to improve it, *then* the only way would be to try to | ||
| 30 | # offload X[16] updates to SSE unit, but that would require "deeper" | ||
| 31 | # loop unroll, which in turn would naturally cause size blow-up, not | ||
| 32 | # to mention increased complexity! And once again, only *if* it's | ||
| 33 | # actually possible to noticeably improve overall ILP, instruction | ||
| 34 | # level parallelism, on a given CPU implementation in this case. | ||
| 35 | # | ||
| 36 | # Special note on Intel EM64T. While Opteron CPU exhibits perfect | ||
| 37 | # perfromance ratio of 1.5 between 64- and 32-bit flavors [see above], | ||
| 38 | # [currently available] EM64T CPUs apparently are far from it. On the | ||
| 39 | # contrary, 64-bit version, sha512_block, is ~30% *slower* than 32-bit | ||
| 40 | # sha256_block:-( This is presumably because 64-bit shifts/rotates | ||
| 41 | # apparently are not atomic instructions, but implemented in microcode. | ||
| 42 | |||
| 43 | $output=shift; | ||
| 44 | |||
| 45 | $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1; | ||
| 46 | ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or | ||
| 47 | ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or | ||
| 48 | die "can't locate x86_64-xlate.pl"; | ||
| 49 | |||
| 50 | open STDOUT,"| $^X $xlate $output"; | ||
| 51 | |||
| 52 | if ($output =~ /512/) { | ||
| 53 | $func="sha512_block_data_order"; | ||
| 54 | $TABLE="K512"; | ||
| 55 | $SZ=8; | ||
| 56 | @ROT=($A,$B,$C,$D,$E,$F,$G,$H)=("%rax","%rbx","%rcx","%rdx", | ||
| 57 | "%r8", "%r9", "%r10","%r11"); | ||
| 58 | ($T1,$a0,$a1,$a2)=("%r12","%r13","%r14","%r15"); | ||
| 59 | @Sigma0=(28,34,39); | ||
| 60 | @Sigma1=(14,18,41); | ||
| 61 | @sigma0=(1, 8, 7); | ||
| 62 | @sigma1=(19,61, 6); | ||
| 63 | $rounds=80; | ||
| 64 | } else { | ||
| 65 | $func="sha256_block_data_order"; | ||
| 66 | $TABLE="K256"; | ||
| 67 | $SZ=4; | ||
| 68 | @ROT=($A,$B,$C,$D,$E,$F,$G,$H)=("%eax","%ebx","%ecx","%edx", | ||
| 69 | "%r8d","%r9d","%r10d","%r11d"); | ||
| 70 | ($T1,$a0,$a1,$a2)=("%r12d","%r13d","%r14d","%r15d"); | ||
| 71 | @Sigma0=( 2,13,22); | ||
| 72 | @Sigma1=( 6,11,25); | ||
| 73 | @sigma0=( 7,18, 3); | ||
| 74 | @sigma1=(17,19,10); | ||
| 75 | $rounds=64; | ||
| 76 | } | ||
| 77 | |||
| 78 | $ctx="%rdi"; # 1st arg | ||
| 79 | $round="%rdi"; # zaps $ctx | ||
| 80 | $inp="%rsi"; # 2nd arg | ||
| 81 | $Tbl="%rbp"; | ||
| 82 | |||
| 83 | $_ctx="16*$SZ+0*8(%rsp)"; | ||
| 84 | $_inp="16*$SZ+1*8(%rsp)"; | ||
| 85 | $_end="16*$SZ+2*8(%rsp)"; | ||
| 86 | $_rsp="16*$SZ+3*8(%rsp)"; | ||
| 87 | $framesz="16*$SZ+4*8"; | ||
| 88 | |||
| 89 | |||
| 90 | sub ROUND_00_15() | ||
| 91 | { my ($i,$a,$b,$c,$d,$e,$f,$g,$h) = @_; | ||
| 92 | |||
| 93 | $code.=<<___; | ||
| 94 | mov $e,$a0 | ||
| 95 | mov $e,$a1 | ||
| 96 | mov $f,$a2 | ||
| 97 | |||
| 98 | ror \$$Sigma1[0],$a0 | ||
| 99 | ror \$$Sigma1[1],$a1 | ||
| 100 | xor $g,$a2 # f^g | ||
| 101 | |||
| 102 | xor $a1,$a0 | ||
| 103 | ror \$`$Sigma1[2]-$Sigma1[1]`,$a1 | ||
| 104 | and $e,$a2 # (f^g)&e | ||
| 105 | mov $T1,`$SZ*($i&0xf)`(%rsp) | ||
| 106 | |||
| 107 | xor $a1,$a0 # Sigma1(e) | ||
| 108 | xor $g,$a2 # Ch(e,f,g)=((f^g)&e)^g | ||
| 109 | add $h,$T1 # T1+=h | ||
| 110 | |||
| 111 | mov $a,$h | ||
| 112 | add $a0,$T1 # T1+=Sigma1(e) | ||
| 113 | |||
| 114 | add $a2,$T1 # T1+=Ch(e,f,g) | ||
| 115 | mov $a,$a0 | ||
| 116 | mov $a,$a1 | ||
| 117 | |||
| 118 | ror \$$Sigma0[0],$h | ||
| 119 | ror \$$Sigma0[1],$a0 | ||
| 120 | mov $a,$a2 | ||
| 121 | add ($Tbl,$round,$SZ),$T1 # T1+=K[round] | ||
| 122 | |||
| 123 | xor $a0,$h | ||
| 124 | ror \$`$Sigma0[2]-$Sigma0[1]`,$a0 | ||
| 125 | or $c,$a1 # a|c | ||
| 126 | |||
| 127 | xor $a0,$h # h=Sigma0(a) | ||
| 128 | and $c,$a2 # a&c | ||
| 129 | add $T1,$d # d+=T1 | ||
| 130 | |||
| 131 | and $b,$a1 # (a|c)&b | ||
| 132 | add $T1,$h # h+=T1 | ||
| 133 | |||
| 134 | or $a2,$a1 # Maj(a,b,c)=((a|c)&b)|(a&c) | ||
| 135 | lea 1($round),$round # round++ | ||
| 136 | |||
| 137 | add $a1,$h # h+=Maj(a,b,c) | ||
| 138 | ___ | ||
| 139 | } | ||
| 140 | |||
| 141 | sub ROUND_16_XX() | ||
| 142 | { my ($i,$a,$b,$c,$d,$e,$f,$g,$h) = @_; | ||
| 143 | |||
| 144 | $code.=<<___; | ||
| 145 | mov `$SZ*(($i+1)&0xf)`(%rsp),$a0 | ||
| 146 | mov `$SZ*(($i+14)&0xf)`(%rsp),$T1 | ||
| 147 | |||
| 148 | mov $a0,$a2 | ||
| 149 | |||
| 150 | shr \$$sigma0[2],$a0 | ||
| 151 | ror \$$sigma0[0],$a2 | ||
| 152 | |||
| 153 | xor $a2,$a0 | ||
| 154 | ror \$`$sigma0[1]-$sigma0[0]`,$a2 | ||
| 155 | |||
| 156 | xor $a2,$a0 # sigma0(X[(i+1)&0xf]) | ||
| 157 | mov $T1,$a1 | ||
| 158 | |||
| 159 | shr \$$sigma1[2],$T1 | ||
| 160 | ror \$$sigma1[0],$a1 | ||
| 161 | |||
| 162 | xor $a1,$T1 | ||
| 163 | ror \$`$sigma1[1]-$sigma1[0]`,$a1 | ||
| 164 | |||
| 165 | xor $a1,$T1 # sigma1(X[(i+14)&0xf]) | ||
| 166 | |||
| 167 | add $a0,$T1 | ||
| 168 | |||
| 169 | add `$SZ*(($i+9)&0xf)`(%rsp),$T1 | ||
| 170 | |||
| 171 | add `$SZ*($i&0xf)`(%rsp),$T1 | ||
| 172 | ___ | ||
| 173 | &ROUND_00_15(@_); | ||
| 174 | } | ||
| 175 | |||
| 176 | $code=<<___; | ||
| 177 | .text | ||
| 178 | |||
| 179 | .globl $func | ||
| 180 | .type $func,\@function,4 | ||
| 181 | .align 16 | ||
| 182 | $func: | ||
| 183 | push %rbx | ||
| 184 | push %rbp | ||
| 185 | push %r12 | ||
| 186 | push %r13 | ||
| 187 | push %r14 | ||
| 188 | push %r15 | ||
| 189 | mov %rsp,%rbp # copy %rsp | ||
| 190 | shl \$4,%rdx # num*16 | ||
| 191 | sub \$$framesz,%rsp | ||
| 192 | lea ($inp,%rdx,$SZ),%rdx # inp+num*16*$SZ | ||
| 193 | and \$-64,%rsp # align stack frame | ||
| 194 | mov $ctx,$_ctx # save ctx, 1st arg | ||
| 195 | mov $inp,$_inp # save inp, 2nd arh | ||
| 196 | mov %rdx,$_end # save end pointer, "3rd" arg | ||
| 197 | mov %rbp,$_rsp # save copy of %rsp | ||
| 198 | |||
| 199 | .picmeup $Tbl | ||
| 200 | lea $TABLE-.($Tbl),$Tbl | ||
| 201 | |||
| 202 | mov $SZ*0($ctx),$A | ||
| 203 | mov $SZ*1($ctx),$B | ||
| 204 | mov $SZ*2($ctx),$C | ||
| 205 | mov $SZ*3($ctx),$D | ||
| 206 | mov $SZ*4($ctx),$E | ||
| 207 | mov $SZ*5($ctx),$F | ||
| 208 | mov $SZ*6($ctx),$G | ||
| 209 | mov $SZ*7($ctx),$H | ||
| 210 | jmp .Lloop | ||
| 211 | |||
| 212 | .align 16 | ||
| 213 | .Lloop: | ||
| 214 | xor $round,$round | ||
| 215 | ___ | ||
| 216 | for($i=0;$i<16;$i++) { | ||
| 217 | $code.=" mov $SZ*$i($inp),$T1\n"; | ||
| 218 | $code.=" bswap $T1\n"; | ||
| 219 | &ROUND_00_15($i,@ROT); | ||
| 220 | unshift(@ROT,pop(@ROT)); | ||
| 221 | } | ||
| 222 | $code.=<<___; | ||
| 223 | jmp .Lrounds_16_xx | ||
| 224 | .align 16 | ||
| 225 | .Lrounds_16_xx: | ||
| 226 | ___ | ||
| 227 | for(;$i<32;$i++) { | ||
| 228 | &ROUND_16_XX($i,@ROT); | ||
| 229 | unshift(@ROT,pop(@ROT)); | ||
| 230 | } | ||
| 231 | |||
| 232 | $code.=<<___; | ||
| 233 | cmp \$$rounds,$round | ||
| 234 | jb .Lrounds_16_xx | ||
| 235 | |||
| 236 | mov $_ctx,$ctx | ||
| 237 | lea 16*$SZ($inp),$inp | ||
| 238 | |||
| 239 | add $SZ*0($ctx),$A | ||
| 240 | add $SZ*1($ctx),$B | ||
| 241 | add $SZ*2($ctx),$C | ||
| 242 | add $SZ*3($ctx),$D | ||
| 243 | add $SZ*4($ctx),$E | ||
| 244 | add $SZ*5($ctx),$F | ||
| 245 | add $SZ*6($ctx),$G | ||
| 246 | add $SZ*7($ctx),$H | ||
| 247 | |||
| 248 | cmp $_end,$inp | ||
| 249 | |||
| 250 | mov $A,$SZ*0($ctx) | ||
| 251 | mov $B,$SZ*1($ctx) | ||
| 252 | mov $C,$SZ*2($ctx) | ||
| 253 | mov $D,$SZ*3($ctx) | ||
| 254 | mov $E,$SZ*4($ctx) | ||
| 255 | mov $F,$SZ*5($ctx) | ||
| 256 | mov $G,$SZ*6($ctx) | ||
| 257 | mov $H,$SZ*7($ctx) | ||
| 258 | jb .Lloop | ||
| 259 | |||
| 260 | mov $_rsp,%rsp | ||
| 261 | pop %r15 | ||
| 262 | pop %r14 | ||
| 263 | pop %r13 | ||
| 264 | pop %r12 | ||
| 265 | pop %rbp | ||
| 266 | pop %rbx | ||
| 267 | |||
| 268 | ret | ||
| 269 | .size $func,.-$func | ||
| 270 | ___ | ||
| 271 | |||
| 272 | if ($SZ==4) { | ||
| 273 | $code.=<<___; | ||
| 274 | .align 64 | ||
| 275 | .type $TABLE,\@object | ||
| 276 | $TABLE: | ||
| 277 | .long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5 | ||
| 278 | .long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5 | ||
| 279 | .long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3 | ||
| 280 | .long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174 | ||
| 281 | .long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc | ||
| 282 | .long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da | ||
| 283 | .long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7 | ||
| 284 | .long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967 | ||
| 285 | .long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13 | ||
| 286 | .long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85 | ||
| 287 | .long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3 | ||
| 288 | .long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070 | ||
| 289 | .long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5 | ||
| 290 | .long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3 | ||
| 291 | .long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208 | ||
| 292 | .long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2 | ||
| 293 | ___ | ||
| 294 | } else { | ||
| 295 | $code.=<<___; | ||
| 296 | .align 64 | ||
| 297 | .type $TABLE,\@object | ||
| 298 | $TABLE: | ||
| 299 | .quad 0x428a2f98d728ae22,0x7137449123ef65cd | ||
| 300 | .quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc | ||
| 301 | .quad 0x3956c25bf348b538,0x59f111f1b605d019 | ||
| 302 | .quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118 | ||
| 303 | .quad 0xd807aa98a3030242,0x12835b0145706fbe | ||
| 304 | .quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2 | ||
| 305 | .quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1 | ||
| 306 | .quad 0x9bdc06a725c71235,0xc19bf174cf692694 | ||
| 307 | .quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3 | ||
| 308 | .quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65 | ||
| 309 | .quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483 | ||
| 310 | .quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5 | ||
| 311 | .quad 0x983e5152ee66dfab,0xa831c66d2db43210 | ||
| 312 | .quad 0xb00327c898fb213f,0xbf597fc7beef0ee4 | ||
| 313 | .quad 0xc6e00bf33da88fc2,0xd5a79147930aa725 | ||
| 314 | .quad 0x06ca6351e003826f,0x142929670a0e6e70 | ||
| 315 | .quad 0x27b70a8546d22ffc,0x2e1b21385c26c926 | ||
| 316 | .quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df | ||
| 317 | .quad 0x650a73548baf63de,0x766a0abb3c77b2a8 | ||
| 318 | .quad 0x81c2c92e47edaee6,0x92722c851482353b | ||
| 319 | .quad 0xa2bfe8a14cf10364,0xa81a664bbc423001 | ||
| 320 | .quad 0xc24b8b70d0f89791,0xc76c51a30654be30 | ||
| 321 | .quad 0xd192e819d6ef5218,0xd69906245565a910 | ||
| 322 | .quad 0xf40e35855771202a,0x106aa07032bbd1b8 | ||
| 323 | .quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53 | ||
| 324 | .quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8 | ||
| 325 | .quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb | ||
| 326 | .quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3 | ||
| 327 | .quad 0x748f82ee5defb2fc,0x78a5636f43172f60 | ||
| 328 | .quad 0x84c87814a1f0ab72,0x8cc702081a6439ec | ||
| 329 | .quad 0x90befffa23631e28,0xa4506cebde82bde9 | ||
| 330 | .quad 0xbef9a3f7b2c67915,0xc67178f2e372532b | ||
| 331 | .quad 0xca273eceea26619c,0xd186b8c721c0c207 | ||
| 332 | .quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178 | ||
| 333 | .quad 0x06f067aa72176fba,0x0a637dc5a2c898a6 | ||
| 334 | .quad 0x113f9804bef90dae,0x1b710b35131c471b | ||
| 335 | .quad 0x28db77f523047d84,0x32caab7b40c72493 | ||
| 336 | .quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c | ||
| 337 | .quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a | ||
| 338 | .quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817 | ||
| 339 | ___ | ||
| 340 | } | ||
| 341 | |||
| 342 | $code =~ s/\`([^\`]*)\`/eval $1/gem; | ||
| 343 | print $code; | ||
| 344 | close STDOUT; | ||
