diff options
Diffstat (limited to 'src/lib/libcrypto/sha')
-rw-r--r-- | src/lib/libcrypto/sha/asm/sha1-ia64.pl | 346 | ||||
-rwxr-xr-x | src/lib/libcrypto/sha/asm/sha1-x86_64.pl | 242 | ||||
-rwxr-xr-x | src/lib/libcrypto/sha/asm/sha512-ia64.pl | 672 | ||||
-rwxr-xr-x | src/lib/libcrypto/sha/asm/sha512-x86_64.pl | 344 | ||||
-rw-r--r-- | src/lib/libcrypto/sha/sha256.c | 282 | ||||
-rw-r--r-- | src/lib/libcrypto/sha/sha512.c | 537 |
6 files changed, 2128 insertions, 295 deletions
diff --git a/src/lib/libcrypto/sha/asm/sha1-ia64.pl b/src/lib/libcrypto/sha/asm/sha1-ia64.pl index cb9dfad124..aa18c1089b 100644 --- a/src/lib/libcrypto/sha/asm/sha1-ia64.pl +++ b/src/lib/libcrypto/sha/asm/sha1-ia64.pl | |||
@@ -2,8 +2,9 @@ | |||
2 | # | 2 | # |
3 | # ==================================================================== | 3 | # ==================================================================== |
4 | # Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL | 4 | # Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL |
5 | # project. Rights for redistribution and usage in source and binary | 5 | # project. The module is, however, dual licensed under OpenSSL and |
6 | # forms are granted according to the OpenSSL license. | 6 | # CRYPTOGAMS licenses depending on where you obtain it. For further |
7 | # details see http://www.openssl.org/~appro/cryptogams/. | ||
7 | # ==================================================================== | 8 | # ==================================================================== |
8 | # | 9 | # |
9 | # Eternal question is what's wrong with compiler generated code? The | 10 | # Eternal question is what's wrong with compiler generated code? The |
@@ -11,15 +12,10 @@ | |||
11 | # to perform rotations by maintaining copy of 32-bit value in upper | 12 | # to perform rotations by maintaining copy of 32-bit value in upper |
12 | # bits of 64-bit register. Just follow mux2 and shrp instructions... | 13 | # bits of 64-bit register. Just follow mux2 and shrp instructions... |
13 | # Performance under big-endian OS such as HP-UX is 179MBps*1GHz, which | 14 | # Performance under big-endian OS such as HP-UX is 179MBps*1GHz, which |
14 | # is >50% better than HP C and >2x better than gcc. As of this moment | 15 | # is >50% better than HP C and >2x better than gcc. |
15 | # performance under little-endian OS such as Linux and Windows will be | ||
16 | # a bit lower, because data has to be picked in reverse byte-order. | ||
17 | # It's possible to resolve this issue by implementing third function, | ||
18 | # sha1_block_asm_data_order_aligned, which would temporarily flip | ||
19 | # BE field in User Mask register... | ||
20 | 16 | ||
21 | $code=<<___; | 17 | $code=<<___; |
22 | .ident \"sha1-ia64.s, version 1.0\" | 18 | .ident \"sha1-ia64.s, version 1.2\" |
23 | .ident \"IA-64 ISA artwork by Andy Polyakov <appro\@fy.chalmers.se>\" | 19 | .ident \"IA-64 ISA artwork by Andy Polyakov <appro\@fy.chalmers.se>\" |
24 | .explicit | 20 | .explicit |
25 | 21 | ||
@@ -55,63 +51,55 @@ else { | |||
55 | 51 | ||
56 | sub BODY_00_15 { | 52 | sub BODY_00_15 { |
57 | local *code=shift; | 53 | local *code=shift; |
58 | local ($i,$a,$b,$c,$d,$e,$f,$unaligned)=@_; | 54 | local ($i,$a,$b,$c,$d,$e,$f)=@_; |
59 | 55 | ||
60 | if ($unaligned) { | 56 | $code.=<<___ if ($i==0); |
61 | $code.=<<___; | 57 | { .mmi; ld1 $X[$i&0xf]=[inp],2 // MSB |
62 | { .mmi; ld1 tmp0=[inp],2 // MSB | 58 | ld1 tmp2=[tmp3],2 };; |
63 | ld1 tmp1=[tmp3],2 };; | 59 | { .mmi; ld1 tmp0=[inp],2 |
64 | { .mmi; ld1 tmp2=[inp],2 | 60 | ld1 tmp4=[tmp3],2 // LSB |
65 | ld1 $X[$i&0xf]=[tmp3],2 // LSB | 61 | dep $X[$i&0xf]=$X[$i&0xf],tmp2,8,8 };; |
66 | dep tmp1=tmp0,tmp1,8,8 };; | ||
67 | { .mii; cmp.ne p16,p0=r0,r0 // no misaligned prefetch | ||
68 | dep $X[$i&0xf]=tmp2,$X[$i&0xf],8,8;; | ||
69 | dep $X[$i&0xf]=tmp1,$X[$i&0xf],16,16 };; | ||
70 | { .mmi; nop.m 0 | ||
71 | ___ | ||
72 | } | ||
73 | elsif ($i<15) { | ||
74 | $code.=<<___; | ||
75 | { .mmi; ld4 $X[($i+1)&0xf]=[inp],4 // prefetch | ||
76 | ___ | ||
77 | } | ||
78 | else { | ||
79 | $code.=<<___; | ||
80 | { .mmi; nop.m 0 | ||
81 | ___ | 62 | ___ |
82 | } | ||
83 | if ($i<15) { | 63 | if ($i<15) { |
84 | $code.=<<___; | 64 | $code.=<<___; |
85 | and tmp0=$c,$b | 65 | { .mmi; ld1 $X[($i+1)&0xf]=[inp],2 // +1 |
86 | dep.z tmp5=$a,5,27 } // a<<5 | 66 | dep tmp1=tmp0,tmp4,8,8 };; |
67 | { .mmi; ld1 tmp2=[tmp3],2 // +1 | ||
68 | and tmp4=$c,$b | ||
69 | dep $X[$i&0xf]=$X[$i&0xf],tmp1,16,16 } //;; | ||
87 | { .mmi; andcm tmp1=$d,$b | 70 | { .mmi; andcm tmp1=$d,$b |
88 | add tmp4=$e,$K_00_19 };; | 71 | add tmp0=$e,$K_00_19 |
89 | { .mmi; or tmp0=tmp0,tmp1 // F_00_19(b,c,d)=(b&c)|(~b&d) | 72 | dep.z tmp5=$a,5,27 };; // a<<5 |
90 | add $f=tmp4,$X[$i&0xf] // f=xi+e+K_00_19 | 73 | { .mmi; or tmp4=tmp4,tmp1 // F_00_19(b,c,d)=(b&c)|(~b&d) |
74 | add $f=tmp0,$X[$i&0xf] // f=xi+e+K_00_19 | ||
91 | extr.u tmp1=$a,27,5 };; // a>>27 | 75 | extr.u tmp1=$a,27,5 };; // a>>27 |
92 | { .mib; add $f=$f,tmp0 // f+=F_00_19(b,c,d) | 76 | { .mmi; ld1 tmp0=[inp],2 // +1 |
77 | add $f=$f,tmp4 // f+=F_00_19(b,c,d) | ||
93 | shrp $b=tmp6,tmp6,2 } // b=ROTATE(b,30) | 78 | shrp $b=tmp6,tmp6,2 } // b=ROTATE(b,30) |
94 | { .mib; or tmp1=tmp1,tmp5 // ROTATE(a,5) | 79 | { .mmi; ld1 tmp4=[tmp3],2 // +1 |
80 | or tmp5=tmp1,tmp5 // ROTATE(a,5) | ||
95 | mux2 tmp6=$a,0x44 };; // see b in next iteration | 81 | mux2 tmp6=$a,0x44 };; // see b in next iteration |
96 | { .mii; add $f=$f,tmp1 // f+=ROTATE(a,5) | 82 | { .mii; add $f=$f,tmp5 // f+=ROTATE(a,5) |
97 | mux2 $X[$i&0xf]=$X[$i&0xf],0x44 | 83 | dep $X[($i+1)&0xf]=$X[($i+1)&0xf],tmp2,8,8 // +1 |
98 | nop.i 0 };; | 84 | mux2 $X[$i&0xf]=$X[$i&0xf],0x44 } //;; |
99 | 85 | ||
100 | ___ | 86 | ___ |
101 | } | 87 | } |
102 | else { | 88 | else { |
103 | $code.=<<___; | 89 | $code.=<<___; |
104 | and tmp0=$c,$b | 90 | { .mii; and tmp3=$c,$b |
105 | dep.z tmp5=$a,5,27 } // a<<5 ;;? | 91 | dep tmp1=tmp0,tmp4,8,8;; |
92 | dep $X[$i&0xf]=$X[$i&0xf],tmp1,16,16 } //;; | ||
106 | { .mmi; andcm tmp1=$d,$b | 93 | { .mmi; andcm tmp1=$d,$b |
107 | add tmp4=$e,$K_00_19 };; | 94 | add tmp0=$e,$K_00_19 |
108 | { .mmi; or tmp0=tmp0,tmp1 // F_00_19(b,c,d)=(b&c)|(~b&d) | 95 | dep.z tmp5=$a,5,27 };; // a<<5 |
109 | add $f=tmp4,$X[$i&0xf] // f=xi+e+K_00_19 | 96 | { .mmi; or tmp4=tmp3,tmp1 // F_00_19(b,c,d)=(b&c)|(~b&d) |
97 | add $f=tmp0,$X[$i&0xf] // f=xi+e+K_00_19 | ||
110 | extr.u tmp1=$a,27,5 } // a>>27 | 98 | extr.u tmp1=$a,27,5 } // a>>27 |
111 | { .mmi; xor tmp2=$X[($i+0+1)&0xf],$X[($i+2+1)&0xf] // +1 | 99 | { .mmi; xor tmp2=$X[($i+0+1)&0xf],$X[($i+2+1)&0xf] // +1 |
112 | xor tmp3=$X[($i+8+1)&0xf],$X[($i+13+1)&0xf] // +1 | 100 | xor tmp3=$X[($i+8+1)&0xf],$X[($i+13+1)&0xf] // +1 |
113 | nop.i 0 };; | 101 | nop.i 0 };; |
114 | { .mmi; add $f=$f,tmp0 // f+=F_00_19(b,c,d) | 102 | { .mmi; add $f=$f,tmp4 // f+=F_00_19(b,c,d) |
115 | xor tmp2=tmp2,tmp3 // +1 | 103 | xor tmp2=tmp2,tmp3 // +1 |
116 | shrp $b=tmp6,tmp6,2 } // b=ROTATE(b,30) | 104 | shrp $b=tmp6,tmp6,2 } // b=ROTATE(b,30) |
117 | { .mmi; or tmp1=tmp1,tmp5 // ROTATE(a,5) | 105 | { .mmi; or tmp1=tmp1,tmp5 // ROTATE(a,5) |
@@ -190,9 +178,7 @@ $code.=<<___; | |||
190 | extr.u tmp1=$a,27,5 } // a>>27 | 178 | extr.u tmp1=$a,27,5 } // a>>27 |
191 | { .mib; add $f=$f,tmp4 // f+=e+K_20_39 | 179 | { .mib; add $f=$f,tmp4 // f+=e+K_20_39 |
192 | add $h1=$h1,$a };; // wrap up | 180 | add $h1=$h1,$a };; // wrap up |
193 | { .mmi; | 181 | { .mmi; add $f=$f,tmp0 // f+=F_20_39(b,c,d) |
194 | (p16) ld4.s $X[0]=[inp],4 // non-faulting prefetch | ||
195 | add $f=$f,tmp0 // f+=F_20_39(b,c,d) | ||
196 | shrp $b=tmp6,tmp6,2 } // b=ROTATE(b,30) ;;? | 182 | shrp $b=tmp6,tmp6,2 } // b=ROTATE(b,30) ;;? |
197 | { .mmi; or tmp1=tmp1,tmp5 // ROTATE(a,5) | 183 | { .mmi; or tmp1=tmp1,tmp5 // ROTATE(a,5) |
198 | add $h3=$h3,$c };; // wrap up | 184 | add $h3=$h3,$c };; // wrap up |
@@ -245,172 +231,15 @@ tmp3=r11; | |||
245 | ctx=r32; // in0 | 231 | ctx=r32; // in0 |
246 | inp=r33; // in1 | 232 | inp=r33; // in1 |
247 | 233 | ||
248 | // void sha1_block_asm_host_order(SHA_CTX *c,const void *p,size_t num); | 234 | // void sha1_block_data_order(SHA_CTX *c,const void *p,size_t num); |
249 | .global sha1_block_asm_host_order# | 235 | .global sha1_block_data_order# |
250 | .proc sha1_block_asm_host_order# | 236 | .proc sha1_block_data_order# |
251 | .align 32 | 237 | .align 32 |
252 | sha1_block_asm_host_order: | 238 | sha1_block_data_order: |
253 | .prologue | 239 | .prologue |
254 | .fframe 0 | ||
255 | .save ar.pfs,r0 | ||
256 | .save ar.lc,r3 | ||
257 | { .mmi; alloc tmp1=ar.pfs,3,15,0,0 | 240 | { .mmi; alloc tmp1=ar.pfs,3,15,0,0 |
258 | $ADDP tmp0=4,ctx | 241 | $ADDP tmp0=4,ctx |
259 | mov r3=ar.lc } | ||
260 | { .mmi; $ADDP ctx=0,ctx | ||
261 | $ADDP inp=0,inp | ||
262 | mov r2=pr };; | ||
263 | tmp4=in2; | ||
264 | tmp5=loc13; | ||
265 | tmp6=loc14; | ||
266 | .body | ||
267 | { .mlx; ld4 $h0=[ctx],8 | ||
268 | movl $K_00_19=0x5a827999 } | ||
269 | { .mlx; ld4 $h1=[tmp0],8 | ||
270 | movl $K_20_39=0x6ed9eba1 };; | ||
271 | { .mlx; ld4 $h2=[ctx],8 | ||
272 | movl $K_40_59=0x8f1bbcdc } | ||
273 | { .mlx; ld4 $h3=[tmp0] | ||
274 | movl $K_60_79=0xca62c1d6 };; | ||
275 | { .mmi; ld4 $h4=[ctx],-16 | ||
276 | add in2=-1,in2 // adjust num for ar.lc | ||
277 | mov ar.ec=1 };; | ||
278 | { .mmi; ld4 $X[0]=[inp],4 // prefetch | ||
279 | cmp.ne p16,p0=r0,in2 // prefecth at loop end | ||
280 | mov ar.lc=in2 };; // brp.loop.imp: too far | ||
281 | |||
282 | .Lhtop: | ||
283 | { .mmi; mov $A=$h0 | ||
284 | mov $B=$h1 | ||
285 | mux2 tmp6=$h1,0x44 } | ||
286 | { .mmi; mov $C=$h2 | ||
287 | mov $D=$h3 | ||
288 | mov $E=$h4 };; | ||
289 | |||
290 | ___ | ||
291 | |||
292 | &BODY_00_15(\$code, 0,$A,$B,$C,$D,$E,$T); | ||
293 | &BODY_00_15(\$code, 1,$T,$A,$B,$C,$D,$E); | ||
294 | &BODY_00_15(\$code, 2,$E,$T,$A,$B,$C,$D); | ||
295 | &BODY_00_15(\$code, 3,$D,$E,$T,$A,$B,$C); | ||
296 | &BODY_00_15(\$code, 4,$C,$D,$E,$T,$A,$B); | ||
297 | &BODY_00_15(\$code, 5,$B,$C,$D,$E,$T,$A); | ||
298 | &BODY_00_15(\$code, 6,$A,$B,$C,$D,$E,$T); | ||
299 | &BODY_00_15(\$code, 7,$T,$A,$B,$C,$D,$E); | ||
300 | &BODY_00_15(\$code, 8,$E,$T,$A,$B,$C,$D); | ||
301 | &BODY_00_15(\$code, 9,$D,$E,$T,$A,$B,$C); | ||
302 | &BODY_00_15(\$code,10,$C,$D,$E,$T,$A,$B); | ||
303 | &BODY_00_15(\$code,11,$B,$C,$D,$E,$T,$A); | ||
304 | &BODY_00_15(\$code,12,$A,$B,$C,$D,$E,$T); | ||
305 | &BODY_00_15(\$code,13,$T,$A,$B,$C,$D,$E); | ||
306 | &BODY_00_15(\$code,14,$E,$T,$A,$B,$C,$D); | ||
307 | &BODY_00_15(\$code,15,$D,$E,$T,$A,$B,$C); | ||
308 | |||
309 | &BODY_16_19(\$code,16,$C,$D,$E,$T,$A,$B); | ||
310 | &BODY_16_19(\$code,17,$B,$C,$D,$E,$T,$A); | ||
311 | &BODY_16_19(\$code,18,$A,$B,$C,$D,$E,$T); | ||
312 | &BODY_16_19(\$code,19,$T,$A,$B,$C,$D,$E); | ||
313 | |||
314 | &BODY_20_39(\$code,20,$E,$T,$A,$B,$C,$D); | ||
315 | &BODY_20_39(\$code,21,$D,$E,$T,$A,$B,$C); | ||
316 | &BODY_20_39(\$code,22,$C,$D,$E,$T,$A,$B); | ||
317 | &BODY_20_39(\$code,23,$B,$C,$D,$E,$T,$A); | ||
318 | &BODY_20_39(\$code,24,$A,$B,$C,$D,$E,$T); | ||
319 | &BODY_20_39(\$code,25,$T,$A,$B,$C,$D,$E); | ||
320 | &BODY_20_39(\$code,26,$E,$T,$A,$B,$C,$D); | ||
321 | &BODY_20_39(\$code,27,$D,$E,$T,$A,$B,$C); | ||
322 | &BODY_20_39(\$code,28,$C,$D,$E,$T,$A,$B); | ||
323 | &BODY_20_39(\$code,29,$B,$C,$D,$E,$T,$A); | ||
324 | &BODY_20_39(\$code,30,$A,$B,$C,$D,$E,$T); | ||
325 | &BODY_20_39(\$code,31,$T,$A,$B,$C,$D,$E); | ||
326 | &BODY_20_39(\$code,32,$E,$T,$A,$B,$C,$D); | ||
327 | &BODY_20_39(\$code,33,$D,$E,$T,$A,$B,$C); | ||
328 | &BODY_20_39(\$code,34,$C,$D,$E,$T,$A,$B); | ||
329 | &BODY_20_39(\$code,35,$B,$C,$D,$E,$T,$A); | ||
330 | &BODY_20_39(\$code,36,$A,$B,$C,$D,$E,$T); | ||
331 | &BODY_20_39(\$code,37,$T,$A,$B,$C,$D,$E); | ||
332 | &BODY_20_39(\$code,38,$E,$T,$A,$B,$C,$D); | ||
333 | &BODY_20_39(\$code,39,$D,$E,$T,$A,$B,$C); | ||
334 | |||
335 | &BODY_40_59(\$code,40,$C,$D,$E,$T,$A,$B); | ||
336 | &BODY_40_59(\$code,41,$B,$C,$D,$E,$T,$A); | ||
337 | &BODY_40_59(\$code,42,$A,$B,$C,$D,$E,$T); | ||
338 | &BODY_40_59(\$code,43,$T,$A,$B,$C,$D,$E); | ||
339 | &BODY_40_59(\$code,44,$E,$T,$A,$B,$C,$D); | ||
340 | &BODY_40_59(\$code,45,$D,$E,$T,$A,$B,$C); | ||
341 | &BODY_40_59(\$code,46,$C,$D,$E,$T,$A,$B); | ||
342 | &BODY_40_59(\$code,47,$B,$C,$D,$E,$T,$A); | ||
343 | &BODY_40_59(\$code,48,$A,$B,$C,$D,$E,$T); | ||
344 | &BODY_40_59(\$code,49,$T,$A,$B,$C,$D,$E); | ||
345 | &BODY_40_59(\$code,50,$E,$T,$A,$B,$C,$D); | ||
346 | &BODY_40_59(\$code,51,$D,$E,$T,$A,$B,$C); | ||
347 | &BODY_40_59(\$code,52,$C,$D,$E,$T,$A,$B); | ||
348 | &BODY_40_59(\$code,53,$B,$C,$D,$E,$T,$A); | ||
349 | &BODY_40_59(\$code,54,$A,$B,$C,$D,$E,$T); | ||
350 | &BODY_40_59(\$code,55,$T,$A,$B,$C,$D,$E); | ||
351 | &BODY_40_59(\$code,56,$E,$T,$A,$B,$C,$D); | ||
352 | &BODY_40_59(\$code,57,$D,$E,$T,$A,$B,$C); | ||
353 | &BODY_40_59(\$code,58,$C,$D,$E,$T,$A,$B); | ||
354 | &BODY_40_59(\$code,59,$B,$C,$D,$E,$T,$A); | ||
355 | |||
356 | &BODY_60_79(\$code,60,$A,$B,$C,$D,$E,$T); | ||
357 | &BODY_60_79(\$code,61,$T,$A,$B,$C,$D,$E); | ||
358 | &BODY_60_79(\$code,62,$E,$T,$A,$B,$C,$D); | ||
359 | &BODY_60_79(\$code,63,$D,$E,$T,$A,$B,$C); | ||
360 | &BODY_60_79(\$code,64,$C,$D,$E,$T,$A,$B); | ||
361 | &BODY_60_79(\$code,65,$B,$C,$D,$E,$T,$A); | ||
362 | &BODY_60_79(\$code,66,$A,$B,$C,$D,$E,$T); | ||
363 | &BODY_60_79(\$code,67,$T,$A,$B,$C,$D,$E); | ||
364 | &BODY_60_79(\$code,68,$E,$T,$A,$B,$C,$D); | ||
365 | &BODY_60_79(\$code,69,$D,$E,$T,$A,$B,$C); | ||
366 | &BODY_60_79(\$code,70,$C,$D,$E,$T,$A,$B); | ||
367 | &BODY_60_79(\$code,71,$B,$C,$D,$E,$T,$A); | ||
368 | &BODY_60_79(\$code,72,$A,$B,$C,$D,$E,$T); | ||
369 | &BODY_60_79(\$code,73,$T,$A,$B,$C,$D,$E); | ||
370 | &BODY_60_79(\$code,74,$E,$T,$A,$B,$C,$D); | ||
371 | &BODY_60_79(\$code,75,$D,$E,$T,$A,$B,$C); | ||
372 | &BODY_60_79(\$code,76,$C,$D,$E,$T,$A,$B); | ||
373 | &BODY_60_79(\$code,77,$B,$C,$D,$E,$T,$A); | ||
374 | &BODY_60_79(\$code,78,$A,$B,$C,$D,$E,$T); | ||
375 | &BODY_60_79(\$code,79,$T,$A,$B,$C,$D,$E); | ||
376 | |||
377 | $code.=<<___; | ||
378 | { .mmb; add $h0=$h0,$E | ||
379 | nop.m 0 | ||
380 | br.ctop.dptk.many .Lhtop };; | ||
381 | .Lhend: | ||
382 | { .mmi; add tmp0=4,ctx | ||
383 | mov ar.lc=r3 };; | ||
384 | { .mmi; st4 [ctx]=$h0,8 | ||
385 | st4 [tmp0]=$h1,8 };; | ||
386 | { .mmi; st4 [ctx]=$h2,8 | ||
387 | st4 [tmp0]=$h3 };; | ||
388 | { .mib; st4 [ctx]=$h4,-16 | ||
389 | mov pr=r2,0x1ffff | ||
390 | br.ret.sptk.many b0 };; | ||
391 | .endp sha1_block_asm_host_order# | ||
392 | ___ | ||
393 | |||
394 | |||
395 | $code.=<<___; | ||
396 | // void sha1_block_asm_data_order(SHA_CTX *c,const void *p,size_t num); | ||
397 | .global sha1_block_asm_data_order# | ||
398 | .proc sha1_block_asm_data_order# | ||
399 | .align 32 | ||
400 | sha1_block_asm_data_order: | ||
401 | ___ | ||
402 | $code.=<<___ if ($big_endian); | ||
403 | { .mmi; and r2=3,inp };; | ||
404 | { .mib; cmp.eq p6,p0=r0,r2 | ||
405 | (p6) br.dptk.many sha1_block_asm_host_order };; | ||
406 | ___ | ||
407 | $code.=<<___; | ||
408 | .prologue | ||
409 | .fframe 0 | ||
410 | .save ar.pfs,r0 | ||
411 | .save ar.lc,r3 | 242 | .save ar.lc,r3 |
412 | { .mmi; alloc tmp1=ar.pfs,3,15,0,0 | ||
413 | $ADDP tmp0=4,ctx | ||
414 | mov r3=ar.lc } | 243 | mov r3=ar.lc } |
415 | { .mmi; $ADDP ctx=0,ctx | 244 | { .mmi; $ADDP ctx=0,ctx |
416 | $ADDP inp=0,inp | 245 | $ADDP inp=0,inp |
@@ -444,90 +273,16 @@ tmp6=loc14; | |||
444 | 273 | ||
445 | ___ | 274 | ___ |
446 | 275 | ||
447 | &BODY_00_15(\$code, 0,$A,$B,$C,$D,$E,$T,1); | 276 | { my $i,@V=($A,$B,$C,$D,$E,$T); |
448 | &BODY_00_15(\$code, 1,$T,$A,$B,$C,$D,$E,1); | ||
449 | &BODY_00_15(\$code, 2,$E,$T,$A,$B,$C,$D,1); | ||
450 | &BODY_00_15(\$code, 3,$D,$E,$T,$A,$B,$C,1); | ||
451 | &BODY_00_15(\$code, 4,$C,$D,$E,$T,$A,$B,1); | ||
452 | &BODY_00_15(\$code, 5,$B,$C,$D,$E,$T,$A,1); | ||
453 | &BODY_00_15(\$code, 6,$A,$B,$C,$D,$E,$T,1); | ||
454 | &BODY_00_15(\$code, 7,$T,$A,$B,$C,$D,$E,1); | ||
455 | &BODY_00_15(\$code, 8,$E,$T,$A,$B,$C,$D,1); | ||
456 | &BODY_00_15(\$code, 9,$D,$E,$T,$A,$B,$C,1); | ||
457 | &BODY_00_15(\$code,10,$C,$D,$E,$T,$A,$B,1); | ||
458 | &BODY_00_15(\$code,11,$B,$C,$D,$E,$T,$A,1); | ||
459 | &BODY_00_15(\$code,12,$A,$B,$C,$D,$E,$T,1); | ||
460 | &BODY_00_15(\$code,13,$T,$A,$B,$C,$D,$E,1); | ||
461 | &BODY_00_15(\$code,14,$E,$T,$A,$B,$C,$D,1); | ||
462 | &BODY_00_15(\$code,15,$D,$E,$T,$A,$B,$C,1); | ||
463 | |||
464 | &BODY_16_19(\$code,16,$C,$D,$E,$T,$A,$B); | ||
465 | &BODY_16_19(\$code,17,$B,$C,$D,$E,$T,$A); | ||
466 | &BODY_16_19(\$code,18,$A,$B,$C,$D,$E,$T); | ||
467 | &BODY_16_19(\$code,19,$T,$A,$B,$C,$D,$E); | ||
468 | 277 | ||
469 | &BODY_20_39(\$code,20,$E,$T,$A,$B,$C,$D); | 278 | for($i=0;$i<16;$i++) { &BODY_00_15(\$code,$i,@V); unshift(@V,pop(@V)); } |
470 | &BODY_20_39(\$code,21,$D,$E,$T,$A,$B,$C); | 279 | for(;$i<20;$i++) { &BODY_16_19(\$code,$i,@V); unshift(@V,pop(@V)); } |
471 | &BODY_20_39(\$code,22,$C,$D,$E,$T,$A,$B); | 280 | for(;$i<40;$i++) { &BODY_20_39(\$code,$i,@V); unshift(@V,pop(@V)); } |
472 | &BODY_20_39(\$code,23,$B,$C,$D,$E,$T,$A); | 281 | for(;$i<60;$i++) { &BODY_40_59(\$code,$i,@V); unshift(@V,pop(@V)); } |
473 | &BODY_20_39(\$code,24,$A,$B,$C,$D,$E,$T); | 282 | for(;$i<80;$i++) { &BODY_60_79(\$code,$i,@V); unshift(@V,pop(@V)); } |
474 | &BODY_20_39(\$code,25,$T,$A,$B,$C,$D,$E); | ||
475 | &BODY_20_39(\$code,26,$E,$T,$A,$B,$C,$D); | ||
476 | &BODY_20_39(\$code,27,$D,$E,$T,$A,$B,$C); | ||
477 | &BODY_20_39(\$code,28,$C,$D,$E,$T,$A,$B); | ||
478 | &BODY_20_39(\$code,29,$B,$C,$D,$E,$T,$A); | ||
479 | &BODY_20_39(\$code,30,$A,$B,$C,$D,$E,$T); | ||
480 | &BODY_20_39(\$code,31,$T,$A,$B,$C,$D,$E); | ||
481 | &BODY_20_39(\$code,32,$E,$T,$A,$B,$C,$D); | ||
482 | &BODY_20_39(\$code,33,$D,$E,$T,$A,$B,$C); | ||
483 | &BODY_20_39(\$code,34,$C,$D,$E,$T,$A,$B); | ||
484 | &BODY_20_39(\$code,35,$B,$C,$D,$E,$T,$A); | ||
485 | &BODY_20_39(\$code,36,$A,$B,$C,$D,$E,$T); | ||
486 | &BODY_20_39(\$code,37,$T,$A,$B,$C,$D,$E); | ||
487 | &BODY_20_39(\$code,38,$E,$T,$A,$B,$C,$D); | ||
488 | &BODY_20_39(\$code,39,$D,$E,$T,$A,$B,$C); | ||
489 | 283 | ||
490 | &BODY_40_59(\$code,40,$C,$D,$E,$T,$A,$B); | 284 | (($V[5] eq $D) and ($V[0] eq $E)) or die; # double-check |
491 | &BODY_40_59(\$code,41,$B,$C,$D,$E,$T,$A); | 285 | } |
492 | &BODY_40_59(\$code,42,$A,$B,$C,$D,$E,$T); | ||
493 | &BODY_40_59(\$code,43,$T,$A,$B,$C,$D,$E); | ||
494 | &BODY_40_59(\$code,44,$E,$T,$A,$B,$C,$D); | ||
495 | &BODY_40_59(\$code,45,$D,$E,$T,$A,$B,$C); | ||
496 | &BODY_40_59(\$code,46,$C,$D,$E,$T,$A,$B); | ||
497 | &BODY_40_59(\$code,47,$B,$C,$D,$E,$T,$A); | ||
498 | &BODY_40_59(\$code,48,$A,$B,$C,$D,$E,$T); | ||
499 | &BODY_40_59(\$code,49,$T,$A,$B,$C,$D,$E); | ||
500 | &BODY_40_59(\$code,50,$E,$T,$A,$B,$C,$D); | ||
501 | &BODY_40_59(\$code,51,$D,$E,$T,$A,$B,$C); | ||
502 | &BODY_40_59(\$code,52,$C,$D,$E,$T,$A,$B); | ||
503 | &BODY_40_59(\$code,53,$B,$C,$D,$E,$T,$A); | ||
504 | &BODY_40_59(\$code,54,$A,$B,$C,$D,$E,$T); | ||
505 | &BODY_40_59(\$code,55,$T,$A,$B,$C,$D,$E); | ||
506 | &BODY_40_59(\$code,56,$E,$T,$A,$B,$C,$D); | ||
507 | &BODY_40_59(\$code,57,$D,$E,$T,$A,$B,$C); | ||
508 | &BODY_40_59(\$code,58,$C,$D,$E,$T,$A,$B); | ||
509 | &BODY_40_59(\$code,59,$B,$C,$D,$E,$T,$A); | ||
510 | |||
511 | &BODY_60_79(\$code,60,$A,$B,$C,$D,$E,$T); | ||
512 | &BODY_60_79(\$code,61,$T,$A,$B,$C,$D,$E); | ||
513 | &BODY_60_79(\$code,62,$E,$T,$A,$B,$C,$D); | ||
514 | &BODY_60_79(\$code,63,$D,$E,$T,$A,$B,$C); | ||
515 | &BODY_60_79(\$code,64,$C,$D,$E,$T,$A,$B); | ||
516 | &BODY_60_79(\$code,65,$B,$C,$D,$E,$T,$A); | ||
517 | &BODY_60_79(\$code,66,$A,$B,$C,$D,$E,$T); | ||
518 | &BODY_60_79(\$code,67,$T,$A,$B,$C,$D,$E); | ||
519 | &BODY_60_79(\$code,68,$E,$T,$A,$B,$C,$D); | ||
520 | &BODY_60_79(\$code,69,$D,$E,$T,$A,$B,$C); | ||
521 | &BODY_60_79(\$code,70,$C,$D,$E,$T,$A,$B); | ||
522 | &BODY_60_79(\$code,71,$B,$C,$D,$E,$T,$A); | ||
523 | &BODY_60_79(\$code,72,$A,$B,$C,$D,$E,$T); | ||
524 | &BODY_60_79(\$code,73,$T,$A,$B,$C,$D,$E); | ||
525 | &BODY_60_79(\$code,74,$E,$T,$A,$B,$C,$D); | ||
526 | &BODY_60_79(\$code,75,$D,$E,$T,$A,$B,$C); | ||
527 | &BODY_60_79(\$code,76,$C,$D,$E,$T,$A,$B); | ||
528 | &BODY_60_79(\$code,77,$B,$C,$D,$E,$T,$A); | ||
529 | &BODY_60_79(\$code,78,$A,$B,$C,$D,$E,$T); | ||
530 | &BODY_60_79(\$code,79,$T,$A,$B,$C,$D,$E); | ||
531 | 286 | ||
532 | $code.=<<___; | 287 | $code.=<<___; |
533 | { .mmb; add $h0=$h0,$E | 288 | { .mmb; add $h0=$h0,$E |
@@ -543,7 +298,8 @@ $code.=<<___; | |||
543 | { .mib; st4 [ctx]=$h4,-16 | 298 | { .mib; st4 [ctx]=$h4,-16 |
544 | mov pr=r2,0x1ffff | 299 | mov pr=r2,0x1ffff |
545 | br.ret.sptk.many b0 };; | 300 | br.ret.sptk.many b0 };; |
546 | .endp sha1_block_asm_data_order# | 301 | .endp sha1_block_data_order# |
302 | stringz "SHA1 block transform for IA64, CRYPTOGAMS by <appro\@openssl.org>" | ||
547 | ___ | 303 | ___ |
548 | 304 | ||
549 | print $code; | 305 | print $code; |
diff --git a/src/lib/libcrypto/sha/asm/sha1-x86_64.pl b/src/lib/libcrypto/sha/asm/sha1-x86_64.pl new file mode 100755 index 0000000000..f7ed67a726 --- /dev/null +++ b/src/lib/libcrypto/sha/asm/sha1-x86_64.pl | |||
@@ -0,0 +1,242 @@ | |||
1 | #!/usr/bin/env perl | ||
2 | # | ||
3 | # ==================================================================== | ||
4 | # Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL | ||
5 | # project. The module is, however, dual licensed under OpenSSL and | ||
6 | # CRYPTOGAMS licenses depending on where you obtain it. For further | ||
7 | # details see http://www.openssl.org/~appro/cryptogams/. | ||
8 | # ==================================================================== | ||
9 | # | ||
10 | # sha1_block procedure for x86_64. | ||
11 | # | ||
12 | # It was brought to my attention that on EM64T compiler-generated code | ||
13 | # was far behind 32-bit assembler implementation. This is unlike on | ||
14 | # Opteron where compiler-generated code was only 15% behind 32-bit | ||
15 | # assembler, which originally made it hard to motivate the effort. | ||
16 | # There was suggestion to mechanically translate 32-bit code, but I | ||
17 | # dismissed it, reasoning that x86_64 offers enough register bank | ||
18 | # capacity to fully utilize SHA-1 parallelism. Therefore this fresh | ||
19 | # implementation:-) However! While 64-bit code does performs better | ||
20 | # on Opteron, I failed to beat 32-bit assembler on EM64T core. Well, | ||
21 | # x86_64 does offer larger *addressable* bank, but out-of-order core | ||
22 | # reaches for even more registers through dynamic aliasing, and EM64T | ||
23 | # core must have managed to run-time optimize even 32-bit code just as | ||
24 | # good as 64-bit one. Performance improvement is summarized in the | ||
25 | # following table: | ||
26 | # | ||
27 | # gcc 3.4 32-bit asm cycles/byte | ||
28 | # Opteron +45% +20% 6.8 | ||
29 | # Xeon P4 +65% +0% 9.9 | ||
30 | # Core2 +60% +10% 7.0 | ||
31 | |||
32 | $output=shift; | ||
33 | |||
34 | $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1; | ||
35 | ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or | ||
36 | ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or | ||
37 | die "can't locate x86_64-xlate.pl"; | ||
38 | |||
39 | open STDOUT,"| $^X $xlate $output"; | ||
40 | |||
41 | $ctx="%rdi"; # 1st arg | ||
42 | $inp="%rsi"; # 2nd arg | ||
43 | $num="%rdx"; # 3rd arg | ||
44 | |||
45 | # reassign arguments in order to produce more compact code | ||
46 | $ctx="%r8"; | ||
47 | $inp="%r9"; | ||
48 | $num="%r10"; | ||
49 | |||
50 | $xi="%eax"; | ||
51 | $t0="%ebx"; | ||
52 | $t1="%ecx"; | ||
53 | $A="%edx"; | ||
54 | $B="%esi"; | ||
55 | $C="%edi"; | ||
56 | $D="%ebp"; | ||
57 | $E="%r11d"; | ||
58 | $T="%r12d"; | ||
59 | |||
60 | @V=($A,$B,$C,$D,$E,$T); | ||
61 | |||
62 | sub PROLOGUE { | ||
63 | my $func=shift; | ||
64 | $code.=<<___; | ||
65 | .globl $func | ||
66 | .type $func,\@function,3 | ||
67 | .align 16 | ||
68 | $func: | ||
69 | push %rbx | ||
70 | push %rbp | ||
71 | push %r12 | ||
72 | mov %rsp,%rax | ||
73 | mov %rdi,$ctx # reassigned argument | ||
74 | sub \$`8+16*4`,%rsp | ||
75 | mov %rsi,$inp # reassigned argument | ||
76 | and \$-64,%rsp | ||
77 | mov %rdx,$num # reassigned argument | ||
78 | mov %rax,`16*4`(%rsp) | ||
79 | |||
80 | mov 0($ctx),$A | ||
81 | mov 4($ctx),$B | ||
82 | mov 8($ctx),$C | ||
83 | mov 12($ctx),$D | ||
84 | mov 16($ctx),$E | ||
85 | ___ | ||
86 | } | ||
87 | |||
88 | sub EPILOGUE { | ||
89 | my $func=shift; | ||
90 | $code.=<<___; | ||
91 | mov `16*4`(%rsp),%rsp | ||
92 | pop %r12 | ||
93 | pop %rbp | ||
94 | pop %rbx | ||
95 | ret | ||
96 | .size $func,.-$func | ||
97 | ___ | ||
98 | } | ||
99 | |||
100 | sub BODY_00_19 { | ||
101 | my ($i,$a,$b,$c,$d,$e,$f,$host)=@_; | ||
102 | my $j=$i+1; | ||
103 | $code.=<<___ if ($i==0); | ||
104 | mov `4*$i`($inp),$xi | ||
105 | `"bswap $xi" if(!defined($host))` | ||
106 | mov $xi,`4*$i`(%rsp) | ||
107 | ___ | ||
108 | $code.=<<___ if ($i<15); | ||
109 | lea 0x5a827999($xi,$e),$f | ||
110 | mov $c,$t0 | ||
111 | mov `4*$j`($inp),$xi | ||
112 | mov $a,$e | ||
113 | xor $d,$t0 | ||
114 | `"bswap $xi" if(!defined($host))` | ||
115 | rol \$5,$e | ||
116 | and $b,$t0 | ||
117 | mov $xi,`4*$j`(%rsp) | ||
118 | add $e,$f | ||
119 | xor $d,$t0 | ||
120 | rol \$30,$b | ||
121 | add $t0,$f | ||
122 | ___ | ||
123 | $code.=<<___ if ($i>=15); | ||
124 | lea 0x5a827999($xi,$e),$f | ||
125 | mov `4*($j%16)`(%rsp),$xi | ||
126 | mov $c,$t0 | ||
127 | mov $a,$e | ||
128 | xor `4*(($j+2)%16)`(%rsp),$xi | ||
129 | xor $d,$t0 | ||
130 | rol \$5,$e | ||
131 | xor `4*(($j+8)%16)`(%rsp),$xi | ||
132 | and $b,$t0 | ||
133 | add $e,$f | ||
134 | xor `4*(($j+13)%16)`(%rsp),$xi | ||
135 | xor $d,$t0 | ||
136 | rol \$30,$b | ||
137 | add $t0,$f | ||
138 | rol \$1,$xi | ||
139 | mov $xi,`4*($j%16)`(%rsp) | ||
140 | ___ | ||
141 | } | ||
142 | |||
143 | sub BODY_20_39 { | ||
144 | my ($i,$a,$b,$c,$d,$e,$f)=@_; | ||
145 | my $j=$i+1; | ||
146 | my $K=($i<40)?0x6ed9eba1:0xca62c1d6; | ||
147 | $code.=<<___ if ($i<79); | ||
148 | lea $K($xi,$e),$f | ||
149 | mov `4*($j%16)`(%rsp),$xi | ||
150 | mov $c,$t0 | ||
151 | mov $a,$e | ||
152 | xor `4*(($j+2)%16)`(%rsp),$xi | ||
153 | xor $b,$t0 | ||
154 | rol \$5,$e | ||
155 | xor `4*(($j+8)%16)`(%rsp),$xi | ||
156 | xor $d,$t0 | ||
157 | add $e,$f | ||
158 | xor `4*(($j+13)%16)`(%rsp),$xi | ||
159 | rol \$30,$b | ||
160 | add $t0,$f | ||
161 | rol \$1,$xi | ||
162 | ___ | ||
163 | $code.=<<___ if ($i<76); | ||
164 | mov $xi,`4*($j%16)`(%rsp) | ||
165 | ___ | ||
166 | $code.=<<___ if ($i==79); | ||
167 | lea $K($xi,$e),$f | ||
168 | mov $c,$t0 | ||
169 | mov $a,$e | ||
170 | xor $b,$t0 | ||
171 | rol \$5,$e | ||
172 | xor $d,$t0 | ||
173 | add $e,$f | ||
174 | rol \$30,$b | ||
175 | add $t0,$f | ||
176 | ___ | ||
177 | } | ||
178 | |||
179 | sub BODY_40_59 { | ||
180 | my ($i,$a,$b,$c,$d,$e,$f)=@_; | ||
181 | my $j=$i+1; | ||
182 | $code.=<<___; | ||
183 | lea 0x8f1bbcdc($xi,$e),$f | ||
184 | mov `4*($j%16)`(%rsp),$xi | ||
185 | mov $b,$t0 | ||
186 | mov $b,$t1 | ||
187 | xor `4*(($j+2)%16)`(%rsp),$xi | ||
188 | mov $a,$e | ||
189 | and $c,$t0 | ||
190 | xor `4*(($j+8)%16)`(%rsp),$xi | ||
191 | or $c,$t1 | ||
192 | rol \$5,$e | ||
193 | xor `4*(($j+13)%16)`(%rsp),$xi | ||
194 | and $d,$t1 | ||
195 | add $e,$f | ||
196 | rol \$1,$xi | ||
197 | or $t1,$t0 | ||
198 | rol \$30,$b | ||
199 | mov $xi,`4*($j%16)`(%rsp) | ||
200 | add $t0,$f | ||
201 | ___ | ||
202 | } | ||
203 | |||
204 | $code=".text\n"; | ||
205 | |||
206 | &PROLOGUE("sha1_block_data_order"); | ||
207 | $code.=".align 4\n.Lloop:\n"; | ||
208 | for($i=0;$i<20;$i++) { &BODY_00_19($i,@V); unshift(@V,pop(@V)); } | ||
209 | for(;$i<40;$i++) { &BODY_20_39($i,@V); unshift(@V,pop(@V)); } | ||
210 | for(;$i<60;$i++) { &BODY_40_59($i,@V); unshift(@V,pop(@V)); } | ||
211 | for(;$i<80;$i++) { &BODY_20_39($i,@V); unshift(@V,pop(@V)); } | ||
212 | $code.=<<___; | ||
213 | add 0($ctx),$E | ||
214 | add 4($ctx),$T | ||
215 | add 8($ctx),$A | ||
216 | add 12($ctx),$B | ||
217 | add 16($ctx),$C | ||
218 | mov $E,0($ctx) | ||
219 | mov $T,4($ctx) | ||
220 | mov $A,8($ctx) | ||
221 | mov $B,12($ctx) | ||
222 | mov $C,16($ctx) | ||
223 | |||
224 | xchg $E,$A # mov $E,$A | ||
225 | xchg $T,$B # mov $T,$B | ||
226 | xchg $E,$C # mov $A,$C | ||
227 | xchg $T,$D # mov $B,$D | ||
228 | # mov $C,$E | ||
229 | lea `16*4`($inp),$inp | ||
230 | sub \$1,$num | ||
231 | jnz .Lloop | ||
232 | ___ | ||
233 | &EPILOGUE("sha1_block_data_order"); | ||
234 | $code.=<<___; | ||
235 | .asciz "SHA1 block transform for x86_64, CRYPTOGAMS by <appro\@openssl.org>" | ||
236 | ___ | ||
237 | |||
238 | #################################################################### | ||
239 | |||
240 | $code =~ s/\`([^\`]*)\`/eval $1/gem; | ||
241 | print $code; | ||
242 | close STDOUT; | ||
diff --git a/src/lib/libcrypto/sha/asm/sha512-ia64.pl b/src/lib/libcrypto/sha/asm/sha512-ia64.pl new file mode 100755 index 0000000000..1c6ce56522 --- /dev/null +++ b/src/lib/libcrypto/sha/asm/sha512-ia64.pl | |||
@@ -0,0 +1,672 @@ | |||
1 | #!/usr/bin/env perl | ||
2 | # | ||
3 | # ==================================================================== | ||
4 | # Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL | ||
5 | # project. The module is, however, dual licensed under OpenSSL and | ||
6 | # CRYPTOGAMS licenses depending on where you obtain it. For further | ||
7 | # details see http://www.openssl.org/~appro/cryptogams/. | ||
8 | # ==================================================================== | ||
9 | # | ||
10 | # SHA256/512_Transform for Itanium. | ||
11 | # | ||
12 | # sha512_block runs in 1003 cycles on Itanium 2, which is almost 50% | ||
13 | # faster than gcc and >60%(!) faster than code generated by HP-UX | ||
14 | # compiler (yes, HP-UX is generating slower code, because unlike gcc, | ||
15 | # it failed to deploy "shift right pair," 'shrp' instruction, which | ||
16 | # substitutes for 64-bit rotate). | ||
17 | # | ||
18 | # 924 cycles long sha256_block outperforms gcc by over factor of 2(!) | ||
19 | # and HP-UX compiler - by >40% (yes, gcc won sha512_block, but lost | ||
20 | # this one big time). Note that "formally" 924 is about 100 cycles | ||
21 | # too much. I mean it's 64 32-bit rounds vs. 80 virtually identical | ||
22 | # 64-bit ones and 1003*64/80 gives 802. Extra cycles, 2 per round, | ||
23 | # are spent on extra work to provide for 32-bit rotations. 32-bit | ||
24 | # rotations are still handled by 'shrp' instruction and for this | ||
25 | # reason lower 32 bits are deposited to upper half of 64-bit register | ||
26 | # prior 'shrp' issue. And in order to minimize the amount of such | ||
27 | # operations, X[16] values are *maintained* with copies of lower | ||
28 | # halves in upper halves, which is why you'll spot such instructions | ||
29 | # as custom 'mux2', "parallel 32-bit add," 'padd4' and "parallel | ||
30 | # 32-bit unsigned right shift," 'pshr4.u' instructions here. | ||
31 | # | ||
32 | # Rules of engagement. | ||
33 | # | ||
34 | # There is only one integer shifter meaning that if I have two rotate, | ||
35 | # deposit or extract instructions in adjacent bundles, they shall | ||
36 | # split [at run-time if they have to]. But note that variable and | ||
37 | # parallel shifts are performed by multi-media ALU and *are* pairable | ||
38 | # with rotates [and alike]. On the backside MMALU is rather slow: it | ||
39 | # takes 2 extra cycles before the result of integer operation is | ||
40 | # available *to* MMALU and 2(*) extra cycles before the result of MM | ||
41 | # operation is available "back" *to* integer ALU, not to mention that | ||
42 | # MMALU itself has 2 cycles latency. However! I explicitly scheduled | ||
43 | # these MM instructions to avoid MM stalls, so that all these extra | ||
44 | # latencies get "hidden" in instruction-level parallelism. | ||
45 | # | ||
46 | # (*) 2 cycles on Itanium 1 and 1 cycle on Itanium 2. But I schedule | ||
47 | # for 2 in order to provide for best *overall* performance, | ||
48 | # because on Itanium 1 stall on MM result is accompanied by | ||
49 | # pipeline flush, which takes 6 cycles:-( | ||
50 | # | ||
51 | # Resulting performance numbers for 900MHz Itanium 2 system: | ||
52 | # | ||
53 | # The 'numbers' are in 1000s of bytes per second processed. | ||
54 | # type 16 bytes 64 bytes 256 bytes 1024 bytes 8192 bytes | ||
55 | # sha1(*) 6210.14k 20376.30k 52447.83k 85870.05k 105478.12k | ||
56 | # sha256 7476.45k 20572.05k 41538.34k 56062.29k 62093.18k | ||
57 | # sha512 4996.56k 20026.28k 47597.20k 85278.79k 111501.31k | ||
58 | # | ||
59 | # (*) SHA1 numbers are for HP-UX compiler and are presented purely | ||
60 | # for reference purposes. I bet it can improved too... | ||
61 | # | ||
62 | # To generate code, pass the file name with either 256 or 512 in its | ||
63 | # name and compiler flags. | ||
64 | |||
65 | $output=shift; | ||
66 | |||
67 | if ($output =~ /512.*\.[s|asm]/) { | ||
68 | $SZ=8; | ||
69 | $BITS=8*$SZ; | ||
70 | $LDW="ld8"; | ||
71 | $STW="st8"; | ||
72 | $ADD="add"; | ||
73 | $SHRU="shr.u"; | ||
74 | $TABLE="K512"; | ||
75 | $func="sha512_block_data_order"; | ||
76 | @Sigma0=(28,34,39); | ||
77 | @Sigma1=(14,18,41); | ||
78 | @sigma0=(1, 8, 7); | ||
79 | @sigma1=(19,61, 6); | ||
80 | $rounds=80; | ||
81 | } elsif ($output =~ /256.*\.[s|asm]/) { | ||
82 | $SZ=4; | ||
83 | $BITS=8*$SZ; | ||
84 | $LDW="ld4"; | ||
85 | $STW="st4"; | ||
86 | $ADD="padd4"; | ||
87 | $SHRU="pshr4.u"; | ||
88 | $TABLE="K256"; | ||
89 | $func="sha256_block_data_order"; | ||
90 | @Sigma0=( 2,13,22); | ||
91 | @Sigma1=( 6,11,25); | ||
92 | @sigma0=( 7,18, 3); | ||
93 | @sigma1=(17,19,10); | ||
94 | $rounds=64; | ||
95 | } else { die "nonsense $output"; } | ||
96 | |||
97 | open STDOUT,">$output" || die "can't open $output: $!"; | ||
98 | |||
99 | if ($^O eq "hpux") { | ||
100 | $ADDP="addp4"; | ||
101 | for (@ARGV) { $ADDP="add" if (/[\+DD|\-mlp]64/); } | ||
102 | } else { $ADDP="add"; } | ||
103 | for (@ARGV) { $big_endian=1 if (/\-DB_ENDIAN/); | ||
104 | $big_endian=0 if (/\-DL_ENDIAN/); } | ||
105 | if (!defined($big_endian)) | ||
106 | { $big_endian=(unpack('L',pack('N',1))==1); } | ||
107 | |||
108 | $code=<<___; | ||
109 | .ident \"$output, version 1.1\" | ||
110 | .ident \"IA-64 ISA artwork by Andy Polyakov <appro\@fy.chalmers.se>\" | ||
111 | .explicit | ||
112 | .text | ||
113 | |||
114 | pfssave=r2; | ||
115 | lcsave=r3; | ||
116 | prsave=r14; | ||
117 | K=r15; | ||
118 | A=r16; B=r17; C=r18; D=r19; | ||
119 | E=r20; F=r21; G=r22; H=r23; | ||
120 | T1=r24; T2=r25; | ||
121 | s0=r26; s1=r27; t0=r28; t1=r29; | ||
122 | Ktbl=r30; | ||
123 | ctx=r31; // 1st arg | ||
124 | input=r48; // 2nd arg | ||
125 | num=r49; // 3rd arg | ||
126 | sgm0=r50; sgm1=r51; // small constants | ||
127 | A_=r54; B_=r55; C_=r56; D_=r57; | ||
128 | E_=r58; F_=r59; G_=r60; H_=r61; | ||
129 | |||
130 | // void $func (SHA_CTX *ctx, const void *in,size_t num[,int host]) | ||
131 | .global $func# | ||
132 | .proc $func# | ||
133 | .align 32 | ||
134 | $func: | ||
135 | .prologue | ||
136 | .save ar.pfs,pfssave | ||
137 | { .mmi; alloc pfssave=ar.pfs,3,27,0,16 | ||
138 | $ADDP ctx=0,r32 // 1st arg | ||
139 | .save ar.lc,lcsave | ||
140 | mov lcsave=ar.lc } | ||
141 | { .mmi; $ADDP input=0,r33 // 2nd arg | ||
142 | mov num=r34 // 3rd arg | ||
143 | .save pr,prsave | ||
144 | mov prsave=pr };; | ||
145 | |||
146 | .body | ||
147 | { .mib; add r8=0*$SZ,ctx | ||
148 | add r9=1*$SZ,ctx | ||
149 | brp.loop.imp .L_first16,.L_first16_end-16 } | ||
150 | { .mib; add r10=2*$SZ,ctx | ||
151 | add r11=3*$SZ,ctx | ||
152 | brp.loop.imp .L_rest,.L_rest_end-16 };; | ||
153 | |||
154 | // load A-H | ||
155 | .Lpic_point: | ||
156 | { .mmi; $LDW A_=[r8],4*$SZ | ||
157 | $LDW B_=[r9],4*$SZ | ||
158 | mov Ktbl=ip } | ||
159 | { .mmi; $LDW C_=[r10],4*$SZ | ||
160 | $LDW D_=[r11],4*$SZ | ||
161 | mov sgm0=$sigma0[2] };; | ||
162 | { .mmi; $LDW E_=[r8] | ||
163 | $LDW F_=[r9] | ||
164 | add Ktbl=($TABLE#-.Lpic_point),Ktbl } | ||
165 | { .mmi; $LDW G_=[r10] | ||
166 | $LDW H_=[r11] | ||
167 | cmp.ne p0,p16=0,r0 };; // used in sha256_block | ||
168 | ___ | ||
169 | $code.=<<___ if ($BITS==64); | ||
170 | { .mii; and r8=7,input | ||
171 | and input=~7,input;; | ||
172 | cmp.eq p9,p0=1,r8 } | ||
173 | { .mmi; cmp.eq p10,p0=2,r8 | ||
174 | cmp.eq p11,p0=3,r8 | ||
175 | cmp.eq p12,p0=4,r8 } | ||
176 | { .mmi; cmp.eq p13,p0=5,r8 | ||
177 | cmp.eq p14,p0=6,r8 | ||
178 | cmp.eq p15,p0=7,r8 };; | ||
179 | ___ | ||
180 | $code.=<<___; | ||
181 | .L_outer: | ||
182 | .rotr X[16] | ||
183 | { .mmi; mov A=A_ | ||
184 | mov B=B_ | ||
185 | mov ar.lc=14 } | ||
186 | { .mmi; mov C=C_ | ||
187 | mov D=D_ | ||
188 | mov E=E_ } | ||
189 | { .mmi; mov F=F_ | ||
190 | mov G=G_ | ||
191 | mov ar.ec=2 } | ||
192 | { .mmi; ld1 X[15]=[input],$SZ // eliminated in 64-bit | ||
193 | mov H=H_ | ||
194 | mov sgm1=$sigma1[2] };; | ||
195 | |||
196 | ___ | ||
197 | $t0="t0", $t1="t1", $code.=<<___ if ($BITS==32); | ||
198 | .align 32 | ||
199 | .L_first16: | ||
200 | { .mmi; add r9=1-$SZ,input | ||
201 | add r10=2-$SZ,input | ||
202 | add r11=3-$SZ,input };; | ||
203 | { .mmi; ld1 r9=[r9] | ||
204 | ld1 r10=[r10] | ||
205 | dep.z $t1=E,32,32 } | ||
206 | { .mmi; $LDW K=[Ktbl],$SZ | ||
207 | ld1 r11=[r11] | ||
208 | zxt4 E=E };; | ||
209 | { .mii; or $t1=$t1,E | ||
210 | dep X[15]=X[15],r9,8,8 | ||
211 | dep r11=r10,r11,8,8 };; | ||
212 | { .mmi; and T1=F,E | ||
213 | and T2=A,B | ||
214 | dep X[15]=X[15],r11,16,16 } | ||
215 | { .mmi; andcm r8=G,E | ||
216 | and r9=A,C | ||
217 | mux2 $t0=A,0x44 };; // copy lower half to upper | ||
218 | { .mmi; (p16) ld1 X[15-1]=[input],$SZ // prefetch | ||
219 | xor T1=T1,r8 // T1=((e & f) ^ (~e & g)) | ||
220 | _rotr r11=$t1,$Sigma1[0] } // ROTR(e,14) | ||
221 | { .mib; and r10=B,C | ||
222 | xor T2=T2,r9 };; | ||
223 | ___ | ||
224 | $t0="A", $t1="E", $code.=<<___ if ($BITS==64); | ||
225 | // in 64-bit mode I load whole X[16] at once and take care of alignment... | ||
226 | { .mmi; add r8=1*$SZ,input | ||
227 | add r9=2*$SZ,input | ||
228 | add r10=3*$SZ,input };; | ||
229 | { .mmb; $LDW X[15]=[input],4*$SZ | ||
230 | $LDW X[14]=[r8],4*$SZ | ||
231 | (p9) br.cond.dpnt.many .L1byte };; | ||
232 | { .mmb; $LDW X[13]=[r9],4*$SZ | ||
233 | $LDW X[12]=[r10],4*$SZ | ||
234 | (p10) br.cond.dpnt.many .L2byte };; | ||
235 | { .mmb; $LDW X[11]=[input],4*$SZ | ||
236 | $LDW X[10]=[r8],4*$SZ | ||
237 | (p11) br.cond.dpnt.many .L3byte };; | ||
238 | { .mmb; $LDW X[ 9]=[r9],4*$SZ | ||
239 | $LDW X[ 8]=[r10],4*$SZ | ||
240 | (p12) br.cond.dpnt.many .L4byte };; | ||
241 | { .mmb; $LDW X[ 7]=[input],4*$SZ | ||
242 | $LDW X[ 6]=[r8],4*$SZ | ||
243 | (p13) br.cond.dpnt.many .L5byte };; | ||
244 | { .mmb; $LDW X[ 5]=[r9],4*$SZ | ||
245 | $LDW X[ 4]=[r10],4*$SZ | ||
246 | (p14) br.cond.dpnt.many .L6byte };; | ||
247 | { .mmb; $LDW X[ 3]=[input],4*$SZ | ||
248 | $LDW X[ 2]=[r8],4*$SZ | ||
249 | (p15) br.cond.dpnt.many .L7byte };; | ||
250 | { .mmb; $LDW X[ 1]=[r9],4*$SZ | ||
251 | $LDW X[ 0]=[r10],4*$SZ | ||
252 | br.many .L_first16 };; | ||
253 | .L1byte: | ||
254 | { .mmi; $LDW X[13]=[r9],4*$SZ | ||
255 | $LDW X[12]=[r10],4*$SZ | ||
256 | shrp X[15]=X[15],X[14],56 };; | ||
257 | { .mmi; $LDW X[11]=[input],4*$SZ | ||
258 | $LDW X[10]=[r8],4*$SZ | ||
259 | shrp X[14]=X[14],X[13],56 } | ||
260 | { .mmi; $LDW X[ 9]=[r9],4*$SZ | ||
261 | $LDW X[ 8]=[r10],4*$SZ | ||
262 | shrp X[13]=X[13],X[12],56 };; | ||
263 | { .mmi; $LDW X[ 7]=[input],4*$SZ | ||
264 | $LDW X[ 6]=[r8],4*$SZ | ||
265 | shrp X[12]=X[12],X[11],56 } | ||
266 | { .mmi; $LDW X[ 5]=[r9],4*$SZ | ||
267 | $LDW X[ 4]=[r10],4*$SZ | ||
268 | shrp X[11]=X[11],X[10],56 };; | ||
269 | { .mmi; $LDW X[ 3]=[input],4*$SZ | ||
270 | $LDW X[ 2]=[r8],4*$SZ | ||
271 | shrp X[10]=X[10],X[ 9],56 } | ||
272 | { .mmi; $LDW X[ 1]=[r9],4*$SZ | ||
273 | $LDW X[ 0]=[r10],4*$SZ | ||
274 | shrp X[ 9]=X[ 9],X[ 8],56 };; | ||
275 | { .mii; $LDW T1=[input] | ||
276 | shrp X[ 8]=X[ 8],X[ 7],56 | ||
277 | shrp X[ 7]=X[ 7],X[ 6],56 } | ||
278 | { .mii; shrp X[ 6]=X[ 6],X[ 5],56 | ||
279 | shrp X[ 5]=X[ 5],X[ 4],56 };; | ||
280 | { .mii; shrp X[ 4]=X[ 4],X[ 3],56 | ||
281 | shrp X[ 3]=X[ 3],X[ 2],56 } | ||
282 | { .mii; shrp X[ 2]=X[ 2],X[ 1],56 | ||
283 | shrp X[ 1]=X[ 1],X[ 0],56 } | ||
284 | { .mib; shrp X[ 0]=X[ 0],T1,56 | ||
285 | br.many .L_first16 };; | ||
286 | .L2byte: | ||
287 | { .mmi; $LDW X[11]=[input],4*$SZ | ||
288 | $LDW X[10]=[r8],4*$SZ | ||
289 | shrp X[15]=X[15],X[14],48 } | ||
290 | { .mmi; $LDW X[ 9]=[r9],4*$SZ | ||
291 | $LDW X[ 8]=[r10],4*$SZ | ||
292 | shrp X[14]=X[14],X[13],48 };; | ||
293 | { .mmi; $LDW X[ 7]=[input],4*$SZ | ||
294 | $LDW X[ 6]=[r8],4*$SZ | ||
295 | shrp X[13]=X[13],X[12],48 } | ||
296 | { .mmi; $LDW X[ 5]=[r9],4*$SZ | ||
297 | $LDW X[ 4]=[r10],4*$SZ | ||
298 | shrp X[12]=X[12],X[11],48 };; | ||
299 | { .mmi; $LDW X[ 3]=[input],4*$SZ | ||
300 | $LDW X[ 2]=[r8],4*$SZ | ||
301 | shrp X[11]=X[11],X[10],48 } | ||
302 | { .mmi; $LDW X[ 1]=[r9],4*$SZ | ||
303 | $LDW X[ 0]=[r10],4*$SZ | ||
304 | shrp X[10]=X[10],X[ 9],48 };; | ||
305 | { .mii; $LDW T1=[input] | ||
306 | shrp X[ 9]=X[ 9],X[ 8],48 | ||
307 | shrp X[ 8]=X[ 8],X[ 7],48 } | ||
308 | { .mii; shrp X[ 7]=X[ 7],X[ 6],48 | ||
309 | shrp X[ 6]=X[ 6],X[ 5],48 };; | ||
310 | { .mii; shrp X[ 5]=X[ 5],X[ 4],48 | ||
311 | shrp X[ 4]=X[ 4],X[ 3],48 } | ||
312 | { .mii; shrp X[ 3]=X[ 3],X[ 2],48 | ||
313 | shrp X[ 2]=X[ 2],X[ 1],48 } | ||
314 | { .mii; shrp X[ 1]=X[ 1],X[ 0],48 | ||
315 | shrp X[ 0]=X[ 0],T1,48 } | ||
316 | { .mfb; br.many .L_first16 };; | ||
317 | .L3byte: | ||
318 | { .mmi; $LDW X[ 9]=[r9],4*$SZ | ||
319 | $LDW X[ 8]=[r10],4*$SZ | ||
320 | shrp X[15]=X[15],X[14],40 };; | ||
321 | { .mmi; $LDW X[ 7]=[input],4*$SZ | ||
322 | $LDW X[ 6]=[r8],4*$SZ | ||
323 | shrp X[14]=X[14],X[13],40 } | ||
324 | { .mmi; $LDW X[ 5]=[r9],4*$SZ | ||
325 | $LDW X[ 4]=[r10],4*$SZ | ||
326 | shrp X[13]=X[13],X[12],40 };; | ||
327 | { .mmi; $LDW X[ 3]=[input],4*$SZ | ||
328 | $LDW X[ 2]=[r8],4*$SZ | ||
329 | shrp X[12]=X[12],X[11],40 } | ||
330 | { .mmi; $LDW X[ 1]=[r9],4*$SZ | ||
331 | $LDW X[ 0]=[r10],4*$SZ | ||
332 | shrp X[11]=X[11],X[10],40 };; | ||
333 | { .mii; $LDW T1=[input] | ||
334 | shrp X[10]=X[10],X[ 9],40 | ||
335 | shrp X[ 9]=X[ 9],X[ 8],40 } | ||
336 | { .mii; shrp X[ 8]=X[ 8],X[ 7],40 | ||
337 | shrp X[ 7]=X[ 7],X[ 6],40 };; | ||
338 | { .mii; shrp X[ 6]=X[ 6],X[ 5],40 | ||
339 | shrp X[ 5]=X[ 5],X[ 4],40 } | ||
340 | { .mii; shrp X[ 4]=X[ 4],X[ 3],40 | ||
341 | shrp X[ 3]=X[ 3],X[ 2],40 } | ||
342 | { .mii; shrp X[ 2]=X[ 2],X[ 1],40 | ||
343 | shrp X[ 1]=X[ 1],X[ 0],40 } | ||
344 | { .mib; shrp X[ 0]=X[ 0],T1,40 | ||
345 | br.many .L_first16 };; | ||
346 | .L4byte: | ||
347 | { .mmi; $LDW X[ 7]=[input],4*$SZ | ||
348 | $LDW X[ 6]=[r8],4*$SZ | ||
349 | shrp X[15]=X[15],X[14],32 } | ||
350 | { .mmi; $LDW X[ 5]=[r9],4*$SZ | ||
351 | $LDW X[ 4]=[r10],4*$SZ | ||
352 | shrp X[14]=X[14],X[13],32 };; | ||
353 | { .mmi; $LDW X[ 3]=[input],4*$SZ | ||
354 | $LDW X[ 2]=[r8],4*$SZ | ||
355 | shrp X[13]=X[13],X[12],32 } | ||
356 | { .mmi; $LDW X[ 1]=[r9],4*$SZ | ||
357 | $LDW X[ 0]=[r10],4*$SZ | ||
358 | shrp X[12]=X[12],X[11],32 };; | ||
359 | { .mii; $LDW T1=[input] | ||
360 | shrp X[11]=X[11],X[10],32 | ||
361 | shrp X[10]=X[10],X[ 9],32 } | ||
362 | { .mii; shrp X[ 9]=X[ 9],X[ 8],32 | ||
363 | shrp X[ 8]=X[ 8],X[ 7],32 };; | ||
364 | { .mii; shrp X[ 7]=X[ 7],X[ 6],32 | ||
365 | shrp X[ 6]=X[ 6],X[ 5],32 } | ||
366 | { .mii; shrp X[ 5]=X[ 5],X[ 4],32 | ||
367 | shrp X[ 4]=X[ 4],X[ 3],32 } | ||
368 | { .mii; shrp X[ 3]=X[ 3],X[ 2],32 | ||
369 | shrp X[ 2]=X[ 2],X[ 1],32 } | ||
370 | { .mii; shrp X[ 1]=X[ 1],X[ 0],32 | ||
371 | shrp X[ 0]=X[ 0],T1,32 } | ||
372 | { .mfb; br.many .L_first16 };; | ||
373 | .L5byte: | ||
374 | { .mmi; $LDW X[ 5]=[r9],4*$SZ | ||
375 | $LDW X[ 4]=[r10],4*$SZ | ||
376 | shrp X[15]=X[15],X[14],24 };; | ||
377 | { .mmi; $LDW X[ 3]=[input],4*$SZ | ||
378 | $LDW X[ 2]=[r8],4*$SZ | ||
379 | shrp X[14]=X[14],X[13],24 } | ||
380 | { .mmi; $LDW X[ 1]=[r9],4*$SZ | ||
381 | $LDW X[ 0]=[r10],4*$SZ | ||
382 | shrp X[13]=X[13],X[12],24 };; | ||
383 | { .mii; $LDW T1=[input] | ||
384 | shrp X[12]=X[12],X[11],24 | ||
385 | shrp X[11]=X[11],X[10],24 } | ||
386 | { .mii; shrp X[10]=X[10],X[ 9],24 | ||
387 | shrp X[ 9]=X[ 9],X[ 8],24 };; | ||
388 | { .mii; shrp X[ 8]=X[ 8],X[ 7],24 | ||
389 | shrp X[ 7]=X[ 7],X[ 6],24 } | ||
390 | { .mii; shrp X[ 6]=X[ 6],X[ 5],24 | ||
391 | shrp X[ 5]=X[ 5],X[ 4],24 } | ||
392 | { .mii; shrp X[ 4]=X[ 4],X[ 3],24 | ||
393 | shrp X[ 3]=X[ 3],X[ 2],24 } | ||
394 | { .mii; shrp X[ 2]=X[ 2],X[ 1],24 | ||
395 | shrp X[ 1]=X[ 1],X[ 0],24 } | ||
396 | { .mib; shrp X[ 0]=X[ 0],T1,24 | ||
397 | br.many .L_first16 };; | ||
398 | .L6byte: | ||
399 | { .mmi; $LDW X[ 3]=[input],4*$SZ | ||
400 | $LDW X[ 2]=[r8],4*$SZ | ||
401 | shrp X[15]=X[15],X[14],16 } | ||
402 | { .mmi; $LDW X[ 1]=[r9],4*$SZ | ||
403 | $LDW X[ 0]=[r10],4*$SZ | ||
404 | shrp X[14]=X[14],X[13],16 };; | ||
405 | { .mii; $LDW T1=[input] | ||
406 | shrp X[13]=X[13],X[12],16 | ||
407 | shrp X[12]=X[12],X[11],16 } | ||
408 | { .mii; shrp X[11]=X[11],X[10],16 | ||
409 | shrp X[10]=X[10],X[ 9],16 };; | ||
410 | { .mii; shrp X[ 9]=X[ 9],X[ 8],16 | ||
411 | shrp X[ 8]=X[ 8],X[ 7],16 } | ||
412 | { .mii; shrp X[ 7]=X[ 7],X[ 6],16 | ||
413 | shrp X[ 6]=X[ 6],X[ 5],16 } | ||
414 | { .mii; shrp X[ 5]=X[ 5],X[ 4],16 | ||
415 | shrp X[ 4]=X[ 4],X[ 3],16 } | ||
416 | { .mii; shrp X[ 3]=X[ 3],X[ 2],16 | ||
417 | shrp X[ 2]=X[ 2],X[ 1],16 } | ||
418 | { .mii; shrp X[ 1]=X[ 1],X[ 0],16 | ||
419 | shrp X[ 0]=X[ 0],T1,16 } | ||
420 | { .mfb; br.many .L_first16 };; | ||
421 | .L7byte: | ||
422 | { .mmi; $LDW X[ 1]=[r9],4*$SZ | ||
423 | $LDW X[ 0]=[r10],4*$SZ | ||
424 | shrp X[15]=X[15],X[14],8 };; | ||
425 | { .mii; $LDW T1=[input] | ||
426 | shrp X[14]=X[14],X[13],8 | ||
427 | shrp X[13]=X[13],X[12],8 } | ||
428 | { .mii; shrp X[12]=X[12],X[11],8 | ||
429 | shrp X[11]=X[11],X[10],8 };; | ||
430 | { .mii; shrp X[10]=X[10],X[ 9],8 | ||
431 | shrp X[ 9]=X[ 9],X[ 8],8 } | ||
432 | { .mii; shrp X[ 8]=X[ 8],X[ 7],8 | ||
433 | shrp X[ 7]=X[ 7],X[ 6],8 } | ||
434 | { .mii; shrp X[ 6]=X[ 6],X[ 5],8 | ||
435 | shrp X[ 5]=X[ 5],X[ 4],8 } | ||
436 | { .mii; shrp X[ 4]=X[ 4],X[ 3],8 | ||
437 | shrp X[ 3]=X[ 3],X[ 2],8 } | ||
438 | { .mii; shrp X[ 2]=X[ 2],X[ 1],8 | ||
439 | shrp X[ 1]=X[ 1],X[ 0],8 } | ||
440 | { .mib; shrp X[ 0]=X[ 0],T1,8 | ||
441 | br.many .L_first16 };; | ||
442 | |||
443 | .align 32 | ||
444 | .L_first16: | ||
445 | { .mmi; $LDW K=[Ktbl],$SZ | ||
446 | and T1=F,E | ||
447 | and T2=A,B } | ||
448 | { .mmi; //$LDW X[15]=[input],$SZ // X[i]=*input++ | ||
449 | andcm r8=G,E | ||
450 | and r9=A,C };; | ||
451 | { .mmi; xor T1=T1,r8 //T1=((e & f) ^ (~e & g)) | ||
452 | and r10=B,C | ||
453 | _rotr r11=$t1,$Sigma1[0] } // ROTR(e,14) | ||
454 | { .mmi; xor T2=T2,r9 | ||
455 | mux1 X[15]=X[15],\@rev };; // eliminated in big-endian | ||
456 | ___ | ||
457 | $code.=<<___; | ||
458 | { .mib; add T1=T1,H // T1=Ch(e,f,g)+h | ||
459 | _rotr r8=$t1,$Sigma1[1] } // ROTR(e,18) | ||
460 | { .mib; xor T2=T2,r10 // T2=((a & b) ^ (a & c) ^ (b & c)) | ||
461 | mov H=G };; | ||
462 | { .mib; xor r11=r8,r11 | ||
463 | _rotr r9=$t1,$Sigma1[2] } // ROTR(e,41) | ||
464 | { .mib; mov G=F | ||
465 | mov F=E };; | ||
466 | { .mib; xor r9=r9,r11 // r9=Sigma1(e) | ||
467 | _rotr r10=$t0,$Sigma0[0] } // ROTR(a,28) | ||
468 | { .mib; add T1=T1,K // T1=Ch(e,f,g)+h+K512[i] | ||
469 | mov E=D };; | ||
470 | { .mib; add T1=T1,r9 // T1+=Sigma1(e) | ||
471 | _rotr r11=$t0,$Sigma0[1] } // ROTR(a,34) | ||
472 | { .mib; mov D=C | ||
473 | mov C=B };; | ||
474 | { .mib; add T1=T1,X[15] // T1+=X[i] | ||
475 | _rotr r8=$t0,$Sigma0[2] } // ROTR(a,39) | ||
476 | { .mib; xor r10=r10,r11 | ||
477 | mux2 X[15]=X[15],0x44 };; // eliminated in 64-bit | ||
478 | { .mmi; xor r10=r8,r10 // r10=Sigma0(a) | ||
479 | mov B=A | ||
480 | add A=T1,T2 };; | ||
481 | { .mib; add E=E,T1 | ||
482 | add A=A,r10 // T2=Maj(a,b,c)+Sigma0(a) | ||
483 | br.ctop.sptk .L_first16 };; | ||
484 | .L_first16_end: | ||
485 | |||
486 | { .mii; mov ar.lc=$rounds-17 | ||
487 | mov ar.ec=1 };; | ||
488 | |||
489 | .align 32 | ||
490 | .L_rest: | ||
491 | .rotr X[16] | ||
492 | { .mib; $LDW K=[Ktbl],$SZ | ||
493 | _rotr r8=X[15-1],$sigma0[0] } // ROTR(s0,1) | ||
494 | { .mib; $ADD X[15]=X[15],X[15-9] // X[i&0xF]+=X[(i+9)&0xF] | ||
495 | $SHRU s0=X[15-1],sgm0 };; // s0=X[(i+1)&0xF]>>7 | ||
496 | { .mib; and T1=F,E | ||
497 | _rotr r9=X[15-1],$sigma0[1] } // ROTR(s0,8) | ||
498 | { .mib; andcm r10=G,E | ||
499 | $SHRU s1=X[15-14],sgm1 };; // s1=X[(i+14)&0xF]>>6 | ||
500 | { .mmi; xor T1=T1,r10 // T1=((e & f) ^ (~e & g)) | ||
501 | xor r9=r8,r9 | ||
502 | _rotr r10=X[15-14],$sigma1[0] };;// ROTR(s1,19) | ||
503 | { .mib; and T2=A,B | ||
504 | _rotr r11=X[15-14],$sigma1[1] }// ROTR(s1,61) | ||
505 | { .mib; and r8=A,C };; | ||
506 | ___ | ||
507 | $t0="t0", $t1="t1", $code.=<<___ if ($BITS==32); | ||
508 | // I adhere to mmi; in order to hold Itanium 1 back and avoid 6 cycle | ||
509 | // pipeline flush in last bundle. Note that even on Itanium2 the | ||
510 | // latter stalls for one clock cycle... | ||
511 | { .mmi; xor s0=s0,r9 // s0=sigma0(X[(i+1)&0xF]) | ||
512 | dep.z $t1=E,32,32 } | ||
513 | { .mmi; xor r10=r11,r10 | ||
514 | zxt4 E=E };; | ||
515 | { .mmi; or $t1=$t1,E | ||
516 | xor s1=s1,r10 // s1=sigma1(X[(i+14)&0xF]) | ||
517 | mux2 $t0=A,0x44 };; // copy lower half to upper | ||
518 | { .mmi; xor T2=T2,r8 | ||
519 | _rotr r9=$t1,$Sigma1[0] } // ROTR(e,14) | ||
520 | { .mmi; and r10=B,C | ||
521 | add T1=T1,H // T1=Ch(e,f,g)+h | ||
522 | $ADD X[15]=X[15],s0 };; // X[i&0xF]+=sigma0(X[(i+1)&0xF]) | ||
523 | ___ | ||
524 | $t0="A", $t1="E", $code.=<<___ if ($BITS==64); | ||
525 | { .mib; xor s0=s0,r9 // s0=sigma0(X[(i+1)&0xF]) | ||
526 | _rotr r9=$t1,$Sigma1[0] } // ROTR(e,14) | ||
527 | { .mib; xor r10=r11,r10 | ||
528 | xor T2=T2,r8 };; | ||
529 | { .mib; xor s1=s1,r10 // s1=sigma1(X[(i+14)&0xF]) | ||
530 | add T1=T1,H } | ||
531 | { .mib; and r10=B,C | ||
532 | $ADD X[15]=X[15],s0 };; // X[i&0xF]+=sigma0(X[(i+1)&0xF]) | ||
533 | ___ | ||
534 | $code.=<<___; | ||
535 | { .mmi; xor T2=T2,r10 // T2=((a & b) ^ (a & c) ^ (b & c)) | ||
536 | mov H=G | ||
537 | _rotr r8=$t1,$Sigma1[1] };; // ROTR(e,18) | ||
538 | { .mmi; xor r11=r8,r9 | ||
539 | $ADD X[15]=X[15],s1 // X[i&0xF]+=sigma1(X[(i+14)&0xF]) | ||
540 | _rotr r9=$t1,$Sigma1[2] } // ROTR(e,41) | ||
541 | { .mmi; mov G=F | ||
542 | mov F=E };; | ||
543 | { .mib; xor r9=r9,r11 // r9=Sigma1(e) | ||
544 | _rotr r10=$t0,$Sigma0[0] } // ROTR(a,28) | ||
545 | { .mib; add T1=T1,K // T1=Ch(e,f,g)+h+K512[i] | ||
546 | mov E=D };; | ||
547 | { .mib; add T1=T1,r9 // T1+=Sigma1(e) | ||
548 | _rotr r11=$t0,$Sigma0[1] } // ROTR(a,34) | ||
549 | { .mib; mov D=C | ||
550 | mov C=B };; | ||
551 | { .mmi; add T1=T1,X[15] // T1+=X[i] | ||
552 | xor r10=r10,r11 | ||
553 | _rotr r8=$t0,$Sigma0[2] };; // ROTR(a,39) | ||
554 | { .mmi; xor r10=r8,r10 // r10=Sigma0(a) | ||
555 | mov B=A | ||
556 | add A=T1,T2 };; | ||
557 | { .mib; add E=E,T1 | ||
558 | add A=A,r10 // T2=Maj(a,b,c)+Sigma0(a) | ||
559 | br.ctop.sptk .L_rest };; | ||
560 | .L_rest_end: | ||
561 | |||
562 | { .mmi; add A_=A_,A | ||
563 | add B_=B_,B | ||
564 | add C_=C_,C } | ||
565 | { .mmi; add D_=D_,D | ||
566 | add E_=E_,E | ||
567 | cmp.ltu p16,p0=1,num };; | ||
568 | { .mmi; add F_=F_,F | ||
569 | add G_=G_,G | ||
570 | add H_=H_,H } | ||
571 | { .mmb; add Ktbl=-$SZ*$rounds,Ktbl | ||
572 | (p16) add num=-1,num | ||
573 | (p16) br.dptk.many .L_outer };; | ||
574 | |||
575 | { .mib; add r8=0*$SZ,ctx | ||
576 | add r9=1*$SZ,ctx } | ||
577 | { .mib; add r10=2*$SZ,ctx | ||
578 | add r11=3*$SZ,ctx };; | ||
579 | { .mmi; $STW [r8]=A_,4*$SZ | ||
580 | $STW [r9]=B_,4*$SZ | ||
581 | mov ar.lc=lcsave } | ||
582 | { .mmi; $STW [r10]=C_,4*$SZ | ||
583 | $STW [r11]=D_,4*$SZ | ||
584 | mov pr=prsave,0x1ffff };; | ||
585 | { .mmb; $STW [r8]=E_ | ||
586 | $STW [r9]=F_ } | ||
587 | { .mmb; $STW [r10]=G_ | ||
588 | $STW [r11]=H_ | ||
589 | br.ret.sptk.many b0 };; | ||
590 | .endp $func# | ||
591 | ___ | ||
592 | |||
593 | $code =~ s/\`([^\`]*)\`/eval $1/gem; | ||
594 | $code =~ s/_rotr(\s+)([^=]+)=([^,]+),([0-9]+)/shrp$1$2=$3,$3,$4/gm; | ||
595 | if ($BITS==64) { | ||
596 | $code =~ s/mux2(\s+)\S+/nop.i$1 0x0/gm; | ||
597 | $code =~ s/mux1(\s+)\S+/nop.i$1 0x0/gm if ($big_endian); | ||
598 | $code =~ s/(shrp\s+X\[[^=]+)=([^,]+),([^,]+),([1-9]+)/$1=$3,$2,64-$4/gm | ||
599 | if (!$big_endian); | ||
600 | $code =~ s/ld1(\s+)X\[\S+/nop.m$1 0x0/gm; | ||
601 | } | ||
602 | |||
603 | print $code; | ||
604 | |||
605 | print<<___ if ($BITS==32); | ||
606 | .align 64 | ||
607 | .type K256#,\@object | ||
608 | K256: data4 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5 | ||
609 | data4 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5 | ||
610 | data4 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3 | ||
611 | data4 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174 | ||
612 | data4 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc | ||
613 | data4 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da | ||
614 | data4 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7 | ||
615 | data4 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967 | ||
616 | data4 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13 | ||
617 | data4 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85 | ||
618 | data4 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3 | ||
619 | data4 0xd192e819,0xd6990624,0xf40e3585,0x106aa070 | ||
620 | data4 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5 | ||
621 | data4 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3 | ||
622 | data4 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208 | ||
623 | data4 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2 | ||
624 | .size K256#,$SZ*$rounds | ||
625 | stringz "SHA256 block transform for IA64, CRYPTOGAMS by <appro\@openssl.org>" | ||
626 | ___ | ||
627 | print<<___ if ($BITS==64); | ||
628 | .align 64 | ||
629 | .type K512#,\@object | ||
630 | K512: data8 0x428a2f98d728ae22,0x7137449123ef65cd | ||
631 | data8 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc | ||
632 | data8 0x3956c25bf348b538,0x59f111f1b605d019 | ||
633 | data8 0x923f82a4af194f9b,0xab1c5ed5da6d8118 | ||
634 | data8 0xd807aa98a3030242,0x12835b0145706fbe | ||
635 | data8 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2 | ||
636 | data8 0x72be5d74f27b896f,0x80deb1fe3b1696b1 | ||
637 | data8 0x9bdc06a725c71235,0xc19bf174cf692694 | ||
638 | data8 0xe49b69c19ef14ad2,0xefbe4786384f25e3 | ||
639 | data8 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65 | ||
640 | data8 0x2de92c6f592b0275,0x4a7484aa6ea6e483 | ||
641 | data8 0x5cb0a9dcbd41fbd4,0x76f988da831153b5 | ||
642 | data8 0x983e5152ee66dfab,0xa831c66d2db43210 | ||
643 | data8 0xb00327c898fb213f,0xbf597fc7beef0ee4 | ||
644 | data8 0xc6e00bf33da88fc2,0xd5a79147930aa725 | ||
645 | data8 0x06ca6351e003826f,0x142929670a0e6e70 | ||
646 | data8 0x27b70a8546d22ffc,0x2e1b21385c26c926 | ||
647 | data8 0x4d2c6dfc5ac42aed,0x53380d139d95b3df | ||
648 | data8 0x650a73548baf63de,0x766a0abb3c77b2a8 | ||
649 | data8 0x81c2c92e47edaee6,0x92722c851482353b | ||
650 | data8 0xa2bfe8a14cf10364,0xa81a664bbc423001 | ||
651 | data8 0xc24b8b70d0f89791,0xc76c51a30654be30 | ||
652 | data8 0xd192e819d6ef5218,0xd69906245565a910 | ||
653 | data8 0xf40e35855771202a,0x106aa07032bbd1b8 | ||
654 | data8 0x19a4c116b8d2d0c8,0x1e376c085141ab53 | ||
655 | data8 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8 | ||
656 | data8 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb | ||
657 | data8 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3 | ||
658 | data8 0x748f82ee5defb2fc,0x78a5636f43172f60 | ||
659 | data8 0x84c87814a1f0ab72,0x8cc702081a6439ec | ||
660 | data8 0x90befffa23631e28,0xa4506cebde82bde9 | ||
661 | data8 0xbef9a3f7b2c67915,0xc67178f2e372532b | ||
662 | data8 0xca273eceea26619c,0xd186b8c721c0c207 | ||
663 | data8 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178 | ||
664 | data8 0x06f067aa72176fba,0x0a637dc5a2c898a6 | ||
665 | data8 0x113f9804bef90dae,0x1b710b35131c471b | ||
666 | data8 0x28db77f523047d84,0x32caab7b40c72493 | ||
667 | data8 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c | ||
668 | data8 0x4cc5d4becb3e42b6,0x597f299cfc657e2a | ||
669 | data8 0x5fcb6fab3ad6faec,0x6c44198c4a475817 | ||
670 | .size K512#,$SZ*$rounds | ||
671 | stringz "SHA512 block transform for IA64, CRYPTOGAMS by <appro\@openssl.org>" | ||
672 | ___ | ||
diff --git a/src/lib/libcrypto/sha/asm/sha512-x86_64.pl b/src/lib/libcrypto/sha/asm/sha512-x86_64.pl new file mode 100755 index 0000000000..b6252d31ec --- /dev/null +++ b/src/lib/libcrypto/sha/asm/sha512-x86_64.pl | |||
@@ -0,0 +1,344 @@ | |||
1 | #!/usr/bin/env perl | ||
2 | # | ||
3 | # ==================================================================== | ||
4 | # Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL | ||
5 | # project. Rights for redistribution and usage in source and binary | ||
6 | # forms are granted according to the OpenSSL license. | ||
7 | # ==================================================================== | ||
8 | # | ||
9 | # sha256/512_block procedure for x86_64. | ||
10 | # | ||
11 | # 40% improvement over compiler-generated code on Opteron. On EM64T | ||
12 | # sha256 was observed to run >80% faster and sha512 - >40%. No magical | ||
13 | # tricks, just straight implementation... I really wonder why gcc | ||
14 | # [being armed with inline assembler] fails to generate as fast code. | ||
15 | # The only thing which is cool about this module is that it's very | ||
16 | # same instruction sequence used for both SHA-256 and SHA-512. In | ||
17 | # former case the instructions operate on 32-bit operands, while in | ||
18 | # latter - on 64-bit ones. All I had to do is to get one flavor right, | ||
19 | # the other one passed the test right away:-) | ||
20 | # | ||
21 | # sha256_block runs in ~1005 cycles on Opteron, which gives you | ||
22 | # asymptotic performance of 64*1000/1005=63.7MBps times CPU clock | ||
23 | # frequency in GHz. sha512_block runs in ~1275 cycles, which results | ||
24 | # in 128*1000/1275=100MBps per GHz. Is there room for improvement? | ||
25 | # Well, if you compare it to IA-64 implementation, which maintains | ||
26 | # X[16] in register bank[!], tends to 4 instructions per CPU clock | ||
27 | # cycle and runs in 1003 cycles, 1275 is very good result for 3-way | ||
28 | # issue Opteron pipeline and X[16] maintained in memory. So that *if* | ||
29 | # there is a way to improve it, *then* the only way would be to try to | ||
30 | # offload X[16] updates to SSE unit, but that would require "deeper" | ||
31 | # loop unroll, which in turn would naturally cause size blow-up, not | ||
32 | # to mention increased complexity! And once again, only *if* it's | ||
33 | # actually possible to noticeably improve overall ILP, instruction | ||
34 | # level parallelism, on a given CPU implementation in this case. | ||
35 | # | ||
36 | # Special note on Intel EM64T. While Opteron CPU exhibits perfect | ||
37 | # perfromance ratio of 1.5 between 64- and 32-bit flavors [see above], | ||
38 | # [currently available] EM64T CPUs apparently are far from it. On the | ||
39 | # contrary, 64-bit version, sha512_block, is ~30% *slower* than 32-bit | ||
40 | # sha256_block:-( This is presumably because 64-bit shifts/rotates | ||
41 | # apparently are not atomic instructions, but implemented in microcode. | ||
42 | |||
43 | $output=shift; | ||
44 | |||
45 | $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1; | ||
46 | ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or | ||
47 | ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or | ||
48 | die "can't locate x86_64-xlate.pl"; | ||
49 | |||
50 | open STDOUT,"| $^X $xlate $output"; | ||
51 | |||
52 | if ($output =~ /512/) { | ||
53 | $func="sha512_block_data_order"; | ||
54 | $TABLE="K512"; | ||
55 | $SZ=8; | ||
56 | @ROT=($A,$B,$C,$D,$E,$F,$G,$H)=("%rax","%rbx","%rcx","%rdx", | ||
57 | "%r8", "%r9", "%r10","%r11"); | ||
58 | ($T1,$a0,$a1,$a2)=("%r12","%r13","%r14","%r15"); | ||
59 | @Sigma0=(28,34,39); | ||
60 | @Sigma1=(14,18,41); | ||
61 | @sigma0=(1, 8, 7); | ||
62 | @sigma1=(19,61, 6); | ||
63 | $rounds=80; | ||
64 | } else { | ||
65 | $func="sha256_block_data_order"; | ||
66 | $TABLE="K256"; | ||
67 | $SZ=4; | ||
68 | @ROT=($A,$B,$C,$D,$E,$F,$G,$H)=("%eax","%ebx","%ecx","%edx", | ||
69 | "%r8d","%r9d","%r10d","%r11d"); | ||
70 | ($T1,$a0,$a1,$a2)=("%r12d","%r13d","%r14d","%r15d"); | ||
71 | @Sigma0=( 2,13,22); | ||
72 | @Sigma1=( 6,11,25); | ||
73 | @sigma0=( 7,18, 3); | ||
74 | @sigma1=(17,19,10); | ||
75 | $rounds=64; | ||
76 | } | ||
77 | |||
78 | $ctx="%rdi"; # 1st arg | ||
79 | $round="%rdi"; # zaps $ctx | ||
80 | $inp="%rsi"; # 2nd arg | ||
81 | $Tbl="%rbp"; | ||
82 | |||
83 | $_ctx="16*$SZ+0*8(%rsp)"; | ||
84 | $_inp="16*$SZ+1*8(%rsp)"; | ||
85 | $_end="16*$SZ+2*8(%rsp)"; | ||
86 | $_rsp="16*$SZ+3*8(%rsp)"; | ||
87 | $framesz="16*$SZ+4*8"; | ||
88 | |||
89 | |||
90 | sub ROUND_00_15() | ||
91 | { my ($i,$a,$b,$c,$d,$e,$f,$g,$h) = @_; | ||
92 | |||
93 | $code.=<<___; | ||
94 | mov $e,$a0 | ||
95 | mov $e,$a1 | ||
96 | mov $f,$a2 | ||
97 | |||
98 | ror \$$Sigma1[0],$a0 | ||
99 | ror \$$Sigma1[1],$a1 | ||
100 | xor $g,$a2 # f^g | ||
101 | |||
102 | xor $a1,$a0 | ||
103 | ror \$`$Sigma1[2]-$Sigma1[1]`,$a1 | ||
104 | and $e,$a2 # (f^g)&e | ||
105 | mov $T1,`$SZ*($i&0xf)`(%rsp) | ||
106 | |||
107 | xor $a1,$a0 # Sigma1(e) | ||
108 | xor $g,$a2 # Ch(e,f,g)=((f^g)&e)^g | ||
109 | add $h,$T1 # T1+=h | ||
110 | |||
111 | mov $a,$h | ||
112 | add $a0,$T1 # T1+=Sigma1(e) | ||
113 | |||
114 | add $a2,$T1 # T1+=Ch(e,f,g) | ||
115 | mov $a,$a0 | ||
116 | mov $a,$a1 | ||
117 | |||
118 | ror \$$Sigma0[0],$h | ||
119 | ror \$$Sigma0[1],$a0 | ||
120 | mov $a,$a2 | ||
121 | add ($Tbl,$round,$SZ),$T1 # T1+=K[round] | ||
122 | |||
123 | xor $a0,$h | ||
124 | ror \$`$Sigma0[2]-$Sigma0[1]`,$a0 | ||
125 | or $c,$a1 # a|c | ||
126 | |||
127 | xor $a0,$h # h=Sigma0(a) | ||
128 | and $c,$a2 # a&c | ||
129 | add $T1,$d # d+=T1 | ||
130 | |||
131 | and $b,$a1 # (a|c)&b | ||
132 | add $T1,$h # h+=T1 | ||
133 | |||
134 | or $a2,$a1 # Maj(a,b,c)=((a|c)&b)|(a&c) | ||
135 | lea 1($round),$round # round++ | ||
136 | |||
137 | add $a1,$h # h+=Maj(a,b,c) | ||
138 | ___ | ||
139 | } | ||
140 | |||
141 | sub ROUND_16_XX() | ||
142 | { my ($i,$a,$b,$c,$d,$e,$f,$g,$h) = @_; | ||
143 | |||
144 | $code.=<<___; | ||
145 | mov `$SZ*(($i+1)&0xf)`(%rsp),$a0 | ||
146 | mov `$SZ*(($i+14)&0xf)`(%rsp),$T1 | ||
147 | |||
148 | mov $a0,$a2 | ||
149 | |||
150 | shr \$$sigma0[2],$a0 | ||
151 | ror \$$sigma0[0],$a2 | ||
152 | |||
153 | xor $a2,$a0 | ||
154 | ror \$`$sigma0[1]-$sigma0[0]`,$a2 | ||
155 | |||
156 | xor $a2,$a0 # sigma0(X[(i+1)&0xf]) | ||
157 | mov $T1,$a1 | ||
158 | |||
159 | shr \$$sigma1[2],$T1 | ||
160 | ror \$$sigma1[0],$a1 | ||
161 | |||
162 | xor $a1,$T1 | ||
163 | ror \$`$sigma1[1]-$sigma1[0]`,$a1 | ||
164 | |||
165 | xor $a1,$T1 # sigma1(X[(i+14)&0xf]) | ||
166 | |||
167 | add $a0,$T1 | ||
168 | |||
169 | add `$SZ*(($i+9)&0xf)`(%rsp),$T1 | ||
170 | |||
171 | add `$SZ*($i&0xf)`(%rsp),$T1 | ||
172 | ___ | ||
173 | &ROUND_00_15(@_); | ||
174 | } | ||
175 | |||
176 | $code=<<___; | ||
177 | .text | ||
178 | |||
179 | .globl $func | ||
180 | .type $func,\@function,4 | ||
181 | .align 16 | ||
182 | $func: | ||
183 | push %rbx | ||
184 | push %rbp | ||
185 | push %r12 | ||
186 | push %r13 | ||
187 | push %r14 | ||
188 | push %r15 | ||
189 | mov %rsp,%rbp # copy %rsp | ||
190 | shl \$4,%rdx # num*16 | ||
191 | sub \$$framesz,%rsp | ||
192 | lea ($inp,%rdx,$SZ),%rdx # inp+num*16*$SZ | ||
193 | and \$-64,%rsp # align stack frame | ||
194 | mov $ctx,$_ctx # save ctx, 1st arg | ||
195 | mov $inp,$_inp # save inp, 2nd arh | ||
196 | mov %rdx,$_end # save end pointer, "3rd" arg | ||
197 | mov %rbp,$_rsp # save copy of %rsp | ||
198 | |||
199 | .picmeup $Tbl | ||
200 | lea $TABLE-.($Tbl),$Tbl | ||
201 | |||
202 | mov $SZ*0($ctx),$A | ||
203 | mov $SZ*1($ctx),$B | ||
204 | mov $SZ*2($ctx),$C | ||
205 | mov $SZ*3($ctx),$D | ||
206 | mov $SZ*4($ctx),$E | ||
207 | mov $SZ*5($ctx),$F | ||
208 | mov $SZ*6($ctx),$G | ||
209 | mov $SZ*7($ctx),$H | ||
210 | jmp .Lloop | ||
211 | |||
212 | .align 16 | ||
213 | .Lloop: | ||
214 | xor $round,$round | ||
215 | ___ | ||
216 | for($i=0;$i<16;$i++) { | ||
217 | $code.=" mov $SZ*$i($inp),$T1\n"; | ||
218 | $code.=" bswap $T1\n"; | ||
219 | &ROUND_00_15($i,@ROT); | ||
220 | unshift(@ROT,pop(@ROT)); | ||
221 | } | ||
222 | $code.=<<___; | ||
223 | jmp .Lrounds_16_xx | ||
224 | .align 16 | ||
225 | .Lrounds_16_xx: | ||
226 | ___ | ||
227 | for(;$i<32;$i++) { | ||
228 | &ROUND_16_XX($i,@ROT); | ||
229 | unshift(@ROT,pop(@ROT)); | ||
230 | } | ||
231 | |||
232 | $code.=<<___; | ||
233 | cmp \$$rounds,$round | ||
234 | jb .Lrounds_16_xx | ||
235 | |||
236 | mov $_ctx,$ctx | ||
237 | lea 16*$SZ($inp),$inp | ||
238 | |||
239 | add $SZ*0($ctx),$A | ||
240 | add $SZ*1($ctx),$B | ||
241 | add $SZ*2($ctx),$C | ||
242 | add $SZ*3($ctx),$D | ||
243 | add $SZ*4($ctx),$E | ||
244 | add $SZ*5($ctx),$F | ||
245 | add $SZ*6($ctx),$G | ||
246 | add $SZ*7($ctx),$H | ||
247 | |||
248 | cmp $_end,$inp | ||
249 | |||
250 | mov $A,$SZ*0($ctx) | ||
251 | mov $B,$SZ*1($ctx) | ||
252 | mov $C,$SZ*2($ctx) | ||
253 | mov $D,$SZ*3($ctx) | ||
254 | mov $E,$SZ*4($ctx) | ||
255 | mov $F,$SZ*5($ctx) | ||
256 | mov $G,$SZ*6($ctx) | ||
257 | mov $H,$SZ*7($ctx) | ||
258 | jb .Lloop | ||
259 | |||
260 | mov $_rsp,%rsp | ||
261 | pop %r15 | ||
262 | pop %r14 | ||
263 | pop %r13 | ||
264 | pop %r12 | ||
265 | pop %rbp | ||
266 | pop %rbx | ||
267 | |||
268 | ret | ||
269 | .size $func,.-$func | ||
270 | ___ | ||
271 | |||
272 | if ($SZ==4) { | ||
273 | $code.=<<___; | ||
274 | .align 64 | ||
275 | .type $TABLE,\@object | ||
276 | $TABLE: | ||
277 | .long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5 | ||
278 | .long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5 | ||
279 | .long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3 | ||
280 | .long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174 | ||
281 | .long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc | ||
282 | .long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da | ||
283 | .long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7 | ||
284 | .long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967 | ||
285 | .long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13 | ||
286 | .long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85 | ||
287 | .long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3 | ||
288 | .long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070 | ||
289 | .long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5 | ||
290 | .long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3 | ||
291 | .long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208 | ||
292 | .long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2 | ||
293 | ___ | ||
294 | } else { | ||
295 | $code.=<<___; | ||
296 | .align 64 | ||
297 | .type $TABLE,\@object | ||
298 | $TABLE: | ||
299 | .quad 0x428a2f98d728ae22,0x7137449123ef65cd | ||
300 | .quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc | ||
301 | .quad 0x3956c25bf348b538,0x59f111f1b605d019 | ||
302 | .quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118 | ||
303 | .quad 0xd807aa98a3030242,0x12835b0145706fbe | ||
304 | .quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2 | ||
305 | .quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1 | ||
306 | .quad 0x9bdc06a725c71235,0xc19bf174cf692694 | ||
307 | .quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3 | ||
308 | .quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65 | ||
309 | .quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483 | ||
310 | .quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5 | ||
311 | .quad 0x983e5152ee66dfab,0xa831c66d2db43210 | ||
312 | .quad 0xb00327c898fb213f,0xbf597fc7beef0ee4 | ||
313 | .quad 0xc6e00bf33da88fc2,0xd5a79147930aa725 | ||
314 | .quad 0x06ca6351e003826f,0x142929670a0e6e70 | ||
315 | .quad 0x27b70a8546d22ffc,0x2e1b21385c26c926 | ||
316 | .quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df | ||
317 | .quad 0x650a73548baf63de,0x766a0abb3c77b2a8 | ||
318 | .quad 0x81c2c92e47edaee6,0x92722c851482353b | ||
319 | .quad 0xa2bfe8a14cf10364,0xa81a664bbc423001 | ||
320 | .quad 0xc24b8b70d0f89791,0xc76c51a30654be30 | ||
321 | .quad 0xd192e819d6ef5218,0xd69906245565a910 | ||
322 | .quad 0xf40e35855771202a,0x106aa07032bbd1b8 | ||
323 | .quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53 | ||
324 | .quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8 | ||
325 | .quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb | ||
326 | .quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3 | ||
327 | .quad 0x748f82ee5defb2fc,0x78a5636f43172f60 | ||
328 | .quad 0x84c87814a1f0ab72,0x8cc702081a6439ec | ||
329 | .quad 0x90befffa23631e28,0xa4506cebde82bde9 | ||
330 | .quad 0xbef9a3f7b2c67915,0xc67178f2e372532b | ||
331 | .quad 0xca273eceea26619c,0xd186b8c721c0c207 | ||
332 | .quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178 | ||
333 | .quad 0x06f067aa72176fba,0x0a637dc5a2c898a6 | ||
334 | .quad 0x113f9804bef90dae,0x1b710b35131c471b | ||
335 | .quad 0x28db77f523047d84,0x32caab7b40c72493 | ||
336 | .quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c | ||
337 | .quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a | ||
338 | .quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817 | ||
339 | ___ | ||
340 | } | ||
341 | |||
342 | $code =~ s/\`([^\`]*)\`/eval $1/gem; | ||
343 | print $code; | ||
344 | close STDOUT; | ||
diff --git a/src/lib/libcrypto/sha/sha256.c b/src/lib/libcrypto/sha/sha256.c new file mode 100644 index 0000000000..867f90cc97 --- /dev/null +++ b/src/lib/libcrypto/sha/sha256.c | |||
@@ -0,0 +1,282 @@ | |||
1 | /* crypto/sha/sha256.c */ | ||
2 | /* ==================================================================== | ||
3 | * Copyright (c) 2004 The OpenSSL Project. All rights reserved | ||
4 | * according to the OpenSSL license [found in ../../LICENSE]. | ||
5 | * ==================================================================== | ||
6 | */ | ||
7 | #include <openssl/opensslconf.h> | ||
8 | #if !defined(OPENSSL_NO_SHA) && !defined(OPENSSL_NO_SHA256) | ||
9 | |||
10 | #include <stdlib.h> | ||
11 | #include <string.h> | ||
12 | |||
13 | #include <openssl/crypto.h> | ||
14 | #include <openssl/sha.h> | ||
15 | #include <openssl/opensslv.h> | ||
16 | |||
17 | const char SHA256_version[]="SHA-256" OPENSSL_VERSION_PTEXT; | ||
18 | |||
19 | int SHA224_Init (SHA256_CTX *c) | ||
20 | { | ||
21 | c->h[0]=0xc1059ed8UL; c->h[1]=0x367cd507UL; | ||
22 | c->h[2]=0x3070dd17UL; c->h[3]=0xf70e5939UL; | ||
23 | c->h[4]=0xffc00b31UL; c->h[5]=0x68581511UL; | ||
24 | c->h[6]=0x64f98fa7UL; c->h[7]=0xbefa4fa4UL; | ||
25 | c->Nl=0; c->Nh=0; | ||
26 | c->num=0; c->md_len=SHA224_DIGEST_LENGTH; | ||
27 | return 1; | ||
28 | } | ||
29 | |||
30 | int SHA256_Init (SHA256_CTX *c) | ||
31 | { | ||
32 | c->h[0]=0x6a09e667UL; c->h[1]=0xbb67ae85UL; | ||
33 | c->h[2]=0x3c6ef372UL; c->h[3]=0xa54ff53aUL; | ||
34 | c->h[4]=0x510e527fUL; c->h[5]=0x9b05688cUL; | ||
35 | c->h[6]=0x1f83d9abUL; c->h[7]=0x5be0cd19UL; | ||
36 | c->Nl=0; c->Nh=0; | ||
37 | c->num=0; c->md_len=SHA256_DIGEST_LENGTH; | ||
38 | return 1; | ||
39 | } | ||
40 | |||
41 | unsigned char *SHA224(const unsigned char *d, size_t n, unsigned char *md) | ||
42 | { | ||
43 | SHA256_CTX c; | ||
44 | static unsigned char m[SHA224_DIGEST_LENGTH]; | ||
45 | |||
46 | if (md == NULL) md=m; | ||
47 | SHA224_Init(&c); | ||
48 | SHA256_Update(&c,d,n); | ||
49 | SHA256_Final(md,&c); | ||
50 | OPENSSL_cleanse(&c,sizeof(c)); | ||
51 | return(md); | ||
52 | } | ||
53 | |||
54 | unsigned char *SHA256(const unsigned char *d, size_t n, unsigned char *md) | ||
55 | { | ||
56 | SHA256_CTX c; | ||
57 | static unsigned char m[SHA256_DIGEST_LENGTH]; | ||
58 | |||
59 | if (md == NULL) md=m; | ||
60 | SHA256_Init(&c); | ||
61 | SHA256_Update(&c,d,n); | ||
62 | SHA256_Final(md,&c); | ||
63 | OPENSSL_cleanse(&c,sizeof(c)); | ||
64 | return(md); | ||
65 | } | ||
66 | |||
67 | int SHA224_Update(SHA256_CTX *c, const void *data, size_t len) | ||
68 | { return SHA256_Update (c,data,len); } | ||
69 | int SHA224_Final (unsigned char *md, SHA256_CTX *c) | ||
70 | { return SHA256_Final (md,c); } | ||
71 | |||
72 | #define DATA_ORDER_IS_BIG_ENDIAN | ||
73 | |||
74 | #define HASH_LONG SHA_LONG | ||
75 | #define HASH_CTX SHA256_CTX | ||
76 | #define HASH_CBLOCK SHA_CBLOCK | ||
77 | /* | ||
78 | * Note that FIPS180-2 discusses "Truncation of the Hash Function Output." | ||
79 | * default: case below covers for it. It's not clear however if it's | ||
80 | * permitted to truncate to amount of bytes not divisible by 4. I bet not, | ||
81 | * but if it is, then default: case shall be extended. For reference. | ||
82 | * Idea behind separate cases for pre-defined lenghts is to let the | ||
83 | * compiler decide if it's appropriate to unroll small loops. | ||
84 | */ | ||
85 | #define HASH_MAKE_STRING(c,s) do { \ | ||
86 | unsigned long ll; \ | ||
87 | unsigned int xn; \ | ||
88 | switch ((c)->md_len) \ | ||
89 | { case SHA224_DIGEST_LENGTH: \ | ||
90 | for (xn=0;xn<SHA224_DIGEST_LENGTH/4;xn++) \ | ||
91 | { ll=(c)->h[xn]; HOST_l2c(ll,(s)); } \ | ||
92 | break; \ | ||
93 | case SHA256_DIGEST_LENGTH: \ | ||
94 | for (xn=0;xn<SHA256_DIGEST_LENGTH/4;xn++) \ | ||
95 | { ll=(c)->h[xn]; HOST_l2c(ll,(s)); } \ | ||
96 | break; \ | ||
97 | default: \ | ||
98 | if ((c)->md_len > SHA256_DIGEST_LENGTH) \ | ||
99 | return 0; \ | ||
100 | for (xn=0;xn<(c)->md_len/4;xn++) \ | ||
101 | { ll=(c)->h[xn]; HOST_l2c(ll,(s)); } \ | ||
102 | break; \ | ||
103 | } \ | ||
104 | } while (0) | ||
105 | |||
106 | #define HASH_UPDATE SHA256_Update | ||
107 | #define HASH_TRANSFORM SHA256_Transform | ||
108 | #define HASH_FINAL SHA256_Final | ||
109 | #define HASH_BLOCK_DATA_ORDER sha256_block_data_order | ||
110 | #ifndef SHA256_ASM | ||
111 | static | ||
112 | #endif | ||
113 | void sha256_block_data_order (SHA256_CTX *ctx, const void *in, size_t num); | ||
114 | |||
115 | #include "md32_common.h" | ||
116 | |||
117 | #ifndef SHA256_ASM | ||
118 | static const SHA_LONG K256[64] = { | ||
119 | 0x428a2f98UL,0x71374491UL,0xb5c0fbcfUL,0xe9b5dba5UL, | ||
120 | 0x3956c25bUL,0x59f111f1UL,0x923f82a4UL,0xab1c5ed5UL, | ||
121 | 0xd807aa98UL,0x12835b01UL,0x243185beUL,0x550c7dc3UL, | ||
122 | 0x72be5d74UL,0x80deb1feUL,0x9bdc06a7UL,0xc19bf174UL, | ||
123 | 0xe49b69c1UL,0xefbe4786UL,0x0fc19dc6UL,0x240ca1ccUL, | ||
124 | 0x2de92c6fUL,0x4a7484aaUL,0x5cb0a9dcUL,0x76f988daUL, | ||
125 | 0x983e5152UL,0xa831c66dUL,0xb00327c8UL,0xbf597fc7UL, | ||
126 | 0xc6e00bf3UL,0xd5a79147UL,0x06ca6351UL,0x14292967UL, | ||
127 | 0x27b70a85UL,0x2e1b2138UL,0x4d2c6dfcUL,0x53380d13UL, | ||
128 | 0x650a7354UL,0x766a0abbUL,0x81c2c92eUL,0x92722c85UL, | ||
129 | 0xa2bfe8a1UL,0xa81a664bUL,0xc24b8b70UL,0xc76c51a3UL, | ||
130 | 0xd192e819UL,0xd6990624UL,0xf40e3585UL,0x106aa070UL, | ||
131 | 0x19a4c116UL,0x1e376c08UL,0x2748774cUL,0x34b0bcb5UL, | ||
132 | 0x391c0cb3UL,0x4ed8aa4aUL,0x5b9cca4fUL,0x682e6ff3UL, | ||
133 | 0x748f82eeUL,0x78a5636fUL,0x84c87814UL,0x8cc70208UL, | ||
134 | 0x90befffaUL,0xa4506cebUL,0xbef9a3f7UL,0xc67178f2UL }; | ||
135 | |||
136 | /* | ||
137 | * FIPS specification refers to right rotations, while our ROTATE macro | ||
138 | * is left one. This is why you might notice that rotation coefficients | ||
139 | * differ from those observed in FIPS document by 32-N... | ||
140 | */ | ||
141 | #define Sigma0(x) (ROTATE((x),30) ^ ROTATE((x),19) ^ ROTATE((x),10)) | ||
142 | #define Sigma1(x) (ROTATE((x),26) ^ ROTATE((x),21) ^ ROTATE((x),7)) | ||
143 | #define sigma0(x) (ROTATE((x),25) ^ ROTATE((x),14) ^ ((x)>>3)) | ||
144 | #define sigma1(x) (ROTATE((x),15) ^ ROTATE((x),13) ^ ((x)>>10)) | ||
145 | |||
146 | #define Ch(x,y,z) (((x) & (y)) ^ ((~(x)) & (z))) | ||
147 | #define Maj(x,y,z) (((x) & (y)) ^ ((x) & (z)) ^ ((y) & (z))) | ||
148 | |||
149 | #ifdef OPENSSL_SMALL_FOOTPRINT | ||
150 | |||
151 | static void sha256_block_data_order (SHA256_CTX *ctx, const void *in, size_t num) | ||
152 | { | ||
153 | unsigned MD32_REG_T a,b,c,d,e,f,g,h,s0,s1,T1,T2; | ||
154 | SHA_LONG X[16],l; | ||
155 | int i; | ||
156 | const unsigned char *data=in; | ||
157 | |||
158 | while (num--) { | ||
159 | |||
160 | a = ctx->h[0]; b = ctx->h[1]; c = ctx->h[2]; d = ctx->h[3]; | ||
161 | e = ctx->h[4]; f = ctx->h[5]; g = ctx->h[6]; h = ctx->h[7]; | ||
162 | |||
163 | for (i=0;i<16;i++) | ||
164 | { | ||
165 | HOST_c2l(data,l); T1 = X[i] = l; | ||
166 | T1 += h + Sigma1(e) + Ch(e,f,g) + K256[i]; | ||
167 | T2 = Sigma0(a) + Maj(a,b,c); | ||
168 | h = g; g = f; f = e; e = d + T1; | ||
169 | d = c; c = b; b = a; a = T1 + T2; | ||
170 | } | ||
171 | |||
172 | for (;i<64;i++) | ||
173 | { | ||
174 | s0 = X[(i+1)&0x0f]; s0 = sigma0(s0); | ||
175 | s1 = X[(i+14)&0x0f]; s1 = sigma1(s1); | ||
176 | |||
177 | T1 = X[i&0xf] += s0 + s1 + X[(i+9)&0xf]; | ||
178 | T1 += h + Sigma1(e) + Ch(e,f,g) + K256[i]; | ||
179 | T2 = Sigma0(a) + Maj(a,b,c); | ||
180 | h = g; g = f; f = e; e = d + T1; | ||
181 | d = c; c = b; b = a; a = T1 + T2; | ||
182 | } | ||
183 | |||
184 | ctx->h[0] += a; ctx->h[1] += b; ctx->h[2] += c; ctx->h[3] += d; | ||
185 | ctx->h[4] += e; ctx->h[5] += f; ctx->h[6] += g; ctx->h[7] += h; | ||
186 | |||
187 | } | ||
188 | } | ||
189 | |||
190 | #else | ||
191 | |||
192 | #define ROUND_00_15(i,a,b,c,d,e,f,g,h) do { \ | ||
193 | T1 += h + Sigma1(e) + Ch(e,f,g) + K256[i]; \ | ||
194 | h = Sigma0(a) + Maj(a,b,c); \ | ||
195 | d += T1; h += T1; } while (0) | ||
196 | |||
197 | #define ROUND_16_63(i,a,b,c,d,e,f,g,h,X) do { \ | ||
198 | s0 = X[(i+1)&0x0f]; s0 = sigma0(s0); \ | ||
199 | s1 = X[(i+14)&0x0f]; s1 = sigma1(s1); \ | ||
200 | T1 = X[(i)&0x0f] += s0 + s1 + X[(i+9)&0x0f]; \ | ||
201 | ROUND_00_15(i,a,b,c,d,e,f,g,h); } while (0) | ||
202 | |||
203 | static void sha256_block_data_order (SHA256_CTX *ctx, const void *in, size_t num) | ||
204 | { | ||
205 | unsigned MD32_REG_T a,b,c,d,e,f,g,h,s0,s1,T1; | ||
206 | SHA_LONG X[16]; | ||
207 | int i; | ||
208 | const unsigned char *data=in; | ||
209 | const union { long one; char little; } is_endian = {1}; | ||
210 | |||
211 | while (num--) { | ||
212 | |||
213 | a = ctx->h[0]; b = ctx->h[1]; c = ctx->h[2]; d = ctx->h[3]; | ||
214 | e = ctx->h[4]; f = ctx->h[5]; g = ctx->h[6]; h = ctx->h[7]; | ||
215 | |||
216 | if (!is_endian.little && sizeof(SHA_LONG)==4 && ((size_t)in%4)==0) | ||
217 | { | ||
218 | const SHA_LONG *W=(const SHA_LONG *)data; | ||
219 | |||
220 | T1 = X[0] = W[0]; ROUND_00_15(0,a,b,c,d,e,f,g,h); | ||
221 | T1 = X[1] = W[1]; ROUND_00_15(1,h,a,b,c,d,e,f,g); | ||
222 | T1 = X[2] = W[2]; ROUND_00_15(2,g,h,a,b,c,d,e,f); | ||
223 | T1 = X[3] = W[3]; ROUND_00_15(3,f,g,h,a,b,c,d,e); | ||
224 | T1 = X[4] = W[4]; ROUND_00_15(4,e,f,g,h,a,b,c,d); | ||
225 | T1 = X[5] = W[5]; ROUND_00_15(5,d,e,f,g,h,a,b,c); | ||
226 | T1 = X[6] = W[6]; ROUND_00_15(6,c,d,e,f,g,h,a,b); | ||
227 | T1 = X[7] = W[7]; ROUND_00_15(7,b,c,d,e,f,g,h,a); | ||
228 | T1 = X[8] = W[8]; ROUND_00_15(8,a,b,c,d,e,f,g,h); | ||
229 | T1 = X[9] = W[9]; ROUND_00_15(9,h,a,b,c,d,e,f,g); | ||
230 | T1 = X[10] = W[10]; ROUND_00_15(10,g,h,a,b,c,d,e,f); | ||
231 | T1 = X[11] = W[11]; ROUND_00_15(11,f,g,h,a,b,c,d,e); | ||
232 | T1 = X[12] = W[12]; ROUND_00_15(12,e,f,g,h,a,b,c,d); | ||
233 | T1 = X[13] = W[13]; ROUND_00_15(13,d,e,f,g,h,a,b,c); | ||
234 | T1 = X[14] = W[14]; ROUND_00_15(14,c,d,e,f,g,h,a,b); | ||
235 | T1 = X[15] = W[15]; ROUND_00_15(15,b,c,d,e,f,g,h,a); | ||
236 | |||
237 | data += SHA256_CBLOCK; | ||
238 | } | ||
239 | else | ||
240 | { | ||
241 | SHA_LONG l; | ||
242 | |||
243 | HOST_c2l(data,l); T1 = X[0] = l; ROUND_00_15(0,a,b,c,d,e,f,g,h); | ||
244 | HOST_c2l(data,l); T1 = X[1] = l; ROUND_00_15(1,h,a,b,c,d,e,f,g); | ||
245 | HOST_c2l(data,l); T1 = X[2] = l; ROUND_00_15(2,g,h,a,b,c,d,e,f); | ||
246 | HOST_c2l(data,l); T1 = X[3] = l; ROUND_00_15(3,f,g,h,a,b,c,d,e); | ||
247 | HOST_c2l(data,l); T1 = X[4] = l; ROUND_00_15(4,e,f,g,h,a,b,c,d); | ||
248 | HOST_c2l(data,l); T1 = X[5] = l; ROUND_00_15(5,d,e,f,g,h,a,b,c); | ||
249 | HOST_c2l(data,l); T1 = X[6] = l; ROUND_00_15(6,c,d,e,f,g,h,a,b); | ||
250 | HOST_c2l(data,l); T1 = X[7] = l; ROUND_00_15(7,b,c,d,e,f,g,h,a); | ||
251 | HOST_c2l(data,l); T1 = X[8] = l; ROUND_00_15(8,a,b,c,d,e,f,g,h); | ||
252 | HOST_c2l(data,l); T1 = X[9] = l; ROUND_00_15(9,h,a,b,c,d,e,f,g); | ||
253 | HOST_c2l(data,l); T1 = X[10] = l; ROUND_00_15(10,g,h,a,b,c,d,e,f); | ||
254 | HOST_c2l(data,l); T1 = X[11] = l; ROUND_00_15(11,f,g,h,a,b,c,d,e); | ||
255 | HOST_c2l(data,l); T1 = X[12] = l; ROUND_00_15(12,e,f,g,h,a,b,c,d); | ||
256 | HOST_c2l(data,l); T1 = X[13] = l; ROUND_00_15(13,d,e,f,g,h,a,b,c); | ||
257 | HOST_c2l(data,l); T1 = X[14] = l; ROUND_00_15(14,c,d,e,f,g,h,a,b); | ||
258 | HOST_c2l(data,l); T1 = X[15] = l; ROUND_00_15(15,b,c,d,e,f,g,h,a); | ||
259 | } | ||
260 | |||
261 | for (i=16;i<64;i+=8) | ||
262 | { | ||
263 | ROUND_16_63(i+0,a,b,c,d,e,f,g,h,X); | ||
264 | ROUND_16_63(i+1,h,a,b,c,d,e,f,g,X); | ||
265 | ROUND_16_63(i+2,g,h,a,b,c,d,e,f,X); | ||
266 | ROUND_16_63(i+3,f,g,h,a,b,c,d,e,X); | ||
267 | ROUND_16_63(i+4,e,f,g,h,a,b,c,d,X); | ||
268 | ROUND_16_63(i+5,d,e,f,g,h,a,b,c,X); | ||
269 | ROUND_16_63(i+6,c,d,e,f,g,h,a,b,X); | ||
270 | ROUND_16_63(i+7,b,c,d,e,f,g,h,a,X); | ||
271 | } | ||
272 | |||
273 | ctx->h[0] += a; ctx->h[1] += b; ctx->h[2] += c; ctx->h[3] += d; | ||
274 | ctx->h[4] += e; ctx->h[5] += f; ctx->h[6] += g; ctx->h[7] += h; | ||
275 | |||
276 | } | ||
277 | } | ||
278 | |||
279 | #endif | ||
280 | #endif /* SHA256_ASM */ | ||
281 | |||
282 | #endif /* OPENSSL_NO_SHA256 */ | ||
diff --git a/src/lib/libcrypto/sha/sha512.c b/src/lib/libcrypto/sha/sha512.c new file mode 100644 index 0000000000..987fc07c99 --- /dev/null +++ b/src/lib/libcrypto/sha/sha512.c | |||
@@ -0,0 +1,537 @@ | |||
1 | /* crypto/sha/sha512.c */ | ||
2 | /* ==================================================================== | ||
3 | * Copyright (c) 2004 The OpenSSL Project. All rights reserved | ||
4 | * according to the OpenSSL license [found in ../../LICENSE]. | ||
5 | * ==================================================================== | ||
6 | */ | ||
7 | #include <openssl/opensslconf.h> | ||
8 | #if !defined(OPENSSL_NO_SHA) && !defined(OPENSSL_NO_SHA512) | ||
9 | /* | ||
10 | * IMPLEMENTATION NOTES. | ||
11 | * | ||
12 | * As you might have noticed 32-bit hash algorithms: | ||
13 | * | ||
14 | * - permit SHA_LONG to be wider than 32-bit (case on CRAY); | ||
15 | * - optimized versions implement two transform functions: one operating | ||
16 | * on [aligned] data in host byte order and one - on data in input | ||
17 | * stream byte order; | ||
18 | * - share common byte-order neutral collector and padding function | ||
19 | * implementations, ../md32_common.h; | ||
20 | * | ||
21 | * Neither of the above applies to this SHA-512 implementations. Reasons | ||
22 | * [in reverse order] are: | ||
23 | * | ||
24 | * - it's the only 64-bit hash algorithm for the moment of this writing, | ||
25 | * there is no need for common collector/padding implementation [yet]; | ||
26 | * - by supporting only one transform function [which operates on | ||
27 | * *aligned* data in input stream byte order, big-endian in this case] | ||
28 | * we minimize burden of maintenance in two ways: a) collector/padding | ||
29 | * function is simpler; b) only one transform function to stare at; | ||
30 | * - SHA_LONG64 is required to be exactly 64-bit in order to be able to | ||
31 | * apply a number of optimizations to mitigate potential performance | ||
32 | * penalties caused by previous design decision; | ||
33 | * | ||
34 | * Caveat lector. | ||
35 | * | ||
36 | * Implementation relies on the fact that "long long" is 64-bit on | ||
37 | * both 32- and 64-bit platforms. If some compiler vendor comes up | ||
38 | * with 128-bit long long, adjustment to sha.h would be required. | ||
39 | * As this implementation relies on 64-bit integer type, it's totally | ||
40 | * inappropriate for platforms which don't support it, most notably | ||
41 | * 16-bit platforms. | ||
42 | * <appro@fy.chalmers.se> | ||
43 | */ | ||
44 | #include <stdlib.h> | ||
45 | #include <string.h> | ||
46 | |||
47 | #include <openssl/crypto.h> | ||
48 | #include <openssl/sha.h> | ||
49 | #include <openssl/opensslv.h> | ||
50 | |||
51 | #include "cryptlib.h" | ||
52 | |||
53 | const char SHA512_version[]="SHA-512" OPENSSL_VERSION_PTEXT; | ||
54 | |||
55 | #if defined(__i386) || defined(__i386__) || defined(_M_IX86) || \ | ||
56 | defined(__x86_64) || defined(_M_AMD64) || defined(_M_X64) || \ | ||
57 | defined(__s390__) || defined(__s390x__) || \ | ||
58 | defined(SHA512_ASM) | ||
59 | #define SHA512_BLOCK_CAN_MANAGE_UNALIGNED_DATA | ||
60 | #endif | ||
61 | |||
62 | int SHA384_Init (SHA512_CTX *c) | ||
63 | { | ||
64 | c->h[0]=U64(0xcbbb9d5dc1059ed8); | ||
65 | c->h[1]=U64(0x629a292a367cd507); | ||
66 | c->h[2]=U64(0x9159015a3070dd17); | ||
67 | c->h[3]=U64(0x152fecd8f70e5939); | ||
68 | c->h[4]=U64(0x67332667ffc00b31); | ||
69 | c->h[5]=U64(0x8eb44a8768581511); | ||
70 | c->h[6]=U64(0xdb0c2e0d64f98fa7); | ||
71 | c->h[7]=U64(0x47b5481dbefa4fa4); | ||
72 | c->Nl=0; c->Nh=0; | ||
73 | c->num=0; c->md_len=SHA384_DIGEST_LENGTH; | ||
74 | return 1; | ||
75 | } | ||
76 | |||
77 | int SHA512_Init (SHA512_CTX *c) | ||
78 | { | ||
79 | c->h[0]=U64(0x6a09e667f3bcc908); | ||
80 | c->h[1]=U64(0xbb67ae8584caa73b); | ||
81 | c->h[2]=U64(0x3c6ef372fe94f82b); | ||
82 | c->h[3]=U64(0xa54ff53a5f1d36f1); | ||
83 | c->h[4]=U64(0x510e527fade682d1); | ||
84 | c->h[5]=U64(0x9b05688c2b3e6c1f); | ||
85 | c->h[6]=U64(0x1f83d9abfb41bd6b); | ||
86 | c->h[7]=U64(0x5be0cd19137e2179); | ||
87 | c->Nl=0; c->Nh=0; | ||
88 | c->num=0; c->md_len=SHA512_DIGEST_LENGTH; | ||
89 | return 1; | ||
90 | } | ||
91 | |||
92 | #ifndef SHA512_ASM | ||
93 | static | ||
94 | #endif | ||
95 | void sha512_block_data_order (SHA512_CTX *ctx, const void *in, size_t num); | ||
96 | |||
97 | int SHA512_Final (unsigned char *md, SHA512_CTX *c) | ||
98 | { | ||
99 | unsigned char *p=(unsigned char *)c->u.p; | ||
100 | size_t n=c->num; | ||
101 | |||
102 | p[n]=0x80; /* There always is a room for one */ | ||
103 | n++; | ||
104 | if (n > (sizeof(c->u)-16)) | ||
105 | memset (p+n,0,sizeof(c->u)-n), n=0, | ||
106 | sha512_block_data_order (c,p,1); | ||
107 | |||
108 | memset (p+n,0,sizeof(c->u)-16-n); | ||
109 | #ifdef B_ENDIAN | ||
110 | c->u.d[SHA_LBLOCK-2] = c->Nh; | ||
111 | c->u.d[SHA_LBLOCK-1] = c->Nl; | ||
112 | #else | ||
113 | p[sizeof(c->u)-1] = (unsigned char)(c->Nl); | ||
114 | p[sizeof(c->u)-2] = (unsigned char)(c->Nl>>8); | ||
115 | p[sizeof(c->u)-3] = (unsigned char)(c->Nl>>16); | ||
116 | p[sizeof(c->u)-4] = (unsigned char)(c->Nl>>24); | ||
117 | p[sizeof(c->u)-5] = (unsigned char)(c->Nl>>32); | ||
118 | p[sizeof(c->u)-6] = (unsigned char)(c->Nl>>40); | ||
119 | p[sizeof(c->u)-7] = (unsigned char)(c->Nl>>48); | ||
120 | p[sizeof(c->u)-8] = (unsigned char)(c->Nl>>56); | ||
121 | p[sizeof(c->u)-9] = (unsigned char)(c->Nh); | ||
122 | p[sizeof(c->u)-10] = (unsigned char)(c->Nh>>8); | ||
123 | p[sizeof(c->u)-11] = (unsigned char)(c->Nh>>16); | ||
124 | p[sizeof(c->u)-12] = (unsigned char)(c->Nh>>24); | ||
125 | p[sizeof(c->u)-13] = (unsigned char)(c->Nh>>32); | ||
126 | p[sizeof(c->u)-14] = (unsigned char)(c->Nh>>40); | ||
127 | p[sizeof(c->u)-15] = (unsigned char)(c->Nh>>48); | ||
128 | p[sizeof(c->u)-16] = (unsigned char)(c->Nh>>56); | ||
129 | #endif | ||
130 | |||
131 | sha512_block_data_order (c,p,1); | ||
132 | |||
133 | if (md==0) return 0; | ||
134 | |||
135 | switch (c->md_len) | ||
136 | { | ||
137 | /* Let compiler decide if it's appropriate to unroll... */ | ||
138 | case SHA384_DIGEST_LENGTH: | ||
139 | for (n=0;n<SHA384_DIGEST_LENGTH/8;n++) | ||
140 | { | ||
141 | SHA_LONG64 t = c->h[n]; | ||
142 | |||
143 | *(md++) = (unsigned char)(t>>56); | ||
144 | *(md++) = (unsigned char)(t>>48); | ||
145 | *(md++) = (unsigned char)(t>>40); | ||
146 | *(md++) = (unsigned char)(t>>32); | ||
147 | *(md++) = (unsigned char)(t>>24); | ||
148 | *(md++) = (unsigned char)(t>>16); | ||
149 | *(md++) = (unsigned char)(t>>8); | ||
150 | *(md++) = (unsigned char)(t); | ||
151 | } | ||
152 | break; | ||
153 | case SHA512_DIGEST_LENGTH: | ||
154 | for (n=0;n<SHA512_DIGEST_LENGTH/8;n++) | ||
155 | { | ||
156 | SHA_LONG64 t = c->h[n]; | ||
157 | |||
158 | *(md++) = (unsigned char)(t>>56); | ||
159 | *(md++) = (unsigned char)(t>>48); | ||
160 | *(md++) = (unsigned char)(t>>40); | ||
161 | *(md++) = (unsigned char)(t>>32); | ||
162 | *(md++) = (unsigned char)(t>>24); | ||
163 | *(md++) = (unsigned char)(t>>16); | ||
164 | *(md++) = (unsigned char)(t>>8); | ||
165 | *(md++) = (unsigned char)(t); | ||
166 | } | ||
167 | break; | ||
168 | /* ... as well as make sure md_len is not abused. */ | ||
169 | default: return 0; | ||
170 | } | ||
171 | |||
172 | return 1; | ||
173 | } | ||
174 | |||
175 | int SHA384_Final (unsigned char *md,SHA512_CTX *c) | ||
176 | { return SHA512_Final (md,c); } | ||
177 | |||
178 | int SHA512_Update (SHA512_CTX *c, const void *_data, size_t len) | ||
179 | { | ||
180 | SHA_LONG64 l; | ||
181 | unsigned char *p=c->u.p; | ||
182 | const unsigned char *data=(const unsigned char *)_data; | ||
183 | |||
184 | if (len==0) return 1; | ||
185 | |||
186 | l = (c->Nl+(((SHA_LONG64)len)<<3))&U64(0xffffffffffffffff); | ||
187 | if (l < c->Nl) c->Nh++; | ||
188 | if (sizeof(len)>=8) c->Nh+=(((SHA_LONG64)len)>>61); | ||
189 | c->Nl=l; | ||
190 | |||
191 | if (c->num != 0) | ||
192 | { | ||
193 | size_t n = sizeof(c->u) - c->num; | ||
194 | |||
195 | if (len < n) | ||
196 | { | ||
197 | memcpy (p+c->num,data,len), c->num += len; | ||
198 | return 1; | ||
199 | } | ||
200 | else { | ||
201 | memcpy (p+c->num,data,n), c->num = 0; | ||
202 | len-=n, data+=n; | ||
203 | sha512_block_data_order (c,p,1); | ||
204 | } | ||
205 | } | ||
206 | |||
207 | if (len >= sizeof(c->u)) | ||
208 | { | ||
209 | #ifndef SHA512_BLOCK_CAN_MANAGE_UNALIGNED_DATA | ||
210 | if ((size_t)data%sizeof(c->u.d[0]) != 0) | ||
211 | while (len >= sizeof(c->u)) | ||
212 | memcpy (p,data,sizeof(c->u)), | ||
213 | sha512_block_data_order (c,p,1), | ||
214 | len -= sizeof(c->u), | ||
215 | data += sizeof(c->u); | ||
216 | else | ||
217 | #endif | ||
218 | sha512_block_data_order (c,data,len/sizeof(c->u)), | ||
219 | data += len, | ||
220 | len %= sizeof(c->u), | ||
221 | data -= len; | ||
222 | } | ||
223 | |||
224 | if (len != 0) memcpy (p,data,len), c->num = (int)len; | ||
225 | |||
226 | return 1; | ||
227 | } | ||
228 | |||
229 | int SHA384_Update (SHA512_CTX *c, const void *data, size_t len) | ||
230 | { return SHA512_Update (c,data,len); } | ||
231 | |||
232 | void SHA512_Transform (SHA512_CTX *c, const unsigned char *data) | ||
233 | { sha512_block_data_order (c,data,1); } | ||
234 | |||
235 | unsigned char *SHA384(const unsigned char *d, size_t n, unsigned char *md) | ||
236 | { | ||
237 | SHA512_CTX c; | ||
238 | static unsigned char m[SHA384_DIGEST_LENGTH]; | ||
239 | |||
240 | if (md == NULL) md=m; | ||
241 | SHA384_Init(&c); | ||
242 | SHA512_Update(&c,d,n); | ||
243 | SHA512_Final(md,&c); | ||
244 | OPENSSL_cleanse(&c,sizeof(c)); | ||
245 | return(md); | ||
246 | } | ||
247 | |||
248 | unsigned char *SHA512(const unsigned char *d, size_t n, unsigned char *md) | ||
249 | { | ||
250 | SHA512_CTX c; | ||
251 | static unsigned char m[SHA512_DIGEST_LENGTH]; | ||
252 | |||
253 | if (md == NULL) md=m; | ||
254 | SHA512_Init(&c); | ||
255 | SHA512_Update(&c,d,n); | ||
256 | SHA512_Final(md,&c); | ||
257 | OPENSSL_cleanse(&c,sizeof(c)); | ||
258 | return(md); | ||
259 | } | ||
260 | |||
261 | #ifndef SHA512_ASM | ||
262 | static const SHA_LONG64 K512[80] = { | ||
263 | U64(0x428a2f98d728ae22),U64(0x7137449123ef65cd), | ||
264 | U64(0xb5c0fbcfec4d3b2f),U64(0xe9b5dba58189dbbc), | ||
265 | U64(0x3956c25bf348b538),U64(0x59f111f1b605d019), | ||
266 | U64(0x923f82a4af194f9b),U64(0xab1c5ed5da6d8118), | ||
267 | U64(0xd807aa98a3030242),U64(0x12835b0145706fbe), | ||
268 | U64(0x243185be4ee4b28c),U64(0x550c7dc3d5ffb4e2), | ||
269 | U64(0x72be5d74f27b896f),U64(0x80deb1fe3b1696b1), | ||
270 | U64(0x9bdc06a725c71235),U64(0xc19bf174cf692694), | ||
271 | U64(0xe49b69c19ef14ad2),U64(0xefbe4786384f25e3), | ||
272 | U64(0x0fc19dc68b8cd5b5),U64(0x240ca1cc77ac9c65), | ||
273 | U64(0x2de92c6f592b0275),U64(0x4a7484aa6ea6e483), | ||
274 | U64(0x5cb0a9dcbd41fbd4),U64(0x76f988da831153b5), | ||
275 | U64(0x983e5152ee66dfab),U64(0xa831c66d2db43210), | ||
276 | U64(0xb00327c898fb213f),U64(0xbf597fc7beef0ee4), | ||
277 | U64(0xc6e00bf33da88fc2),U64(0xd5a79147930aa725), | ||
278 | U64(0x06ca6351e003826f),U64(0x142929670a0e6e70), | ||
279 | U64(0x27b70a8546d22ffc),U64(0x2e1b21385c26c926), | ||
280 | U64(0x4d2c6dfc5ac42aed),U64(0x53380d139d95b3df), | ||
281 | U64(0x650a73548baf63de),U64(0x766a0abb3c77b2a8), | ||
282 | U64(0x81c2c92e47edaee6),U64(0x92722c851482353b), | ||
283 | U64(0xa2bfe8a14cf10364),U64(0xa81a664bbc423001), | ||
284 | U64(0xc24b8b70d0f89791),U64(0xc76c51a30654be30), | ||
285 | U64(0xd192e819d6ef5218),U64(0xd69906245565a910), | ||
286 | U64(0xf40e35855771202a),U64(0x106aa07032bbd1b8), | ||
287 | U64(0x19a4c116b8d2d0c8),U64(0x1e376c085141ab53), | ||
288 | U64(0x2748774cdf8eeb99),U64(0x34b0bcb5e19b48a8), | ||
289 | U64(0x391c0cb3c5c95a63),U64(0x4ed8aa4ae3418acb), | ||
290 | U64(0x5b9cca4f7763e373),U64(0x682e6ff3d6b2b8a3), | ||
291 | U64(0x748f82ee5defb2fc),U64(0x78a5636f43172f60), | ||
292 | U64(0x84c87814a1f0ab72),U64(0x8cc702081a6439ec), | ||
293 | U64(0x90befffa23631e28),U64(0xa4506cebde82bde9), | ||
294 | U64(0xbef9a3f7b2c67915),U64(0xc67178f2e372532b), | ||
295 | U64(0xca273eceea26619c),U64(0xd186b8c721c0c207), | ||
296 | U64(0xeada7dd6cde0eb1e),U64(0xf57d4f7fee6ed178), | ||
297 | U64(0x06f067aa72176fba),U64(0x0a637dc5a2c898a6), | ||
298 | U64(0x113f9804bef90dae),U64(0x1b710b35131c471b), | ||
299 | U64(0x28db77f523047d84),U64(0x32caab7b40c72493), | ||
300 | U64(0x3c9ebe0a15c9bebc),U64(0x431d67c49c100d4c), | ||
301 | U64(0x4cc5d4becb3e42b6),U64(0x597f299cfc657e2a), | ||
302 | U64(0x5fcb6fab3ad6faec),U64(0x6c44198c4a475817) }; | ||
303 | |||
304 | #ifndef PEDANTIC | ||
305 | # if defined(__GNUC__) && __GNUC__>=2 && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM) | ||
306 | # if defined(__x86_64) || defined(__x86_64__) | ||
307 | # define ROTR(a,n) ({ unsigned long ret; \ | ||
308 | asm ("rorq %1,%0" \ | ||
309 | : "=r"(ret) \ | ||
310 | : "J"(n),"0"(a) \ | ||
311 | : "cc"); ret; }) | ||
312 | # if !defined(B_ENDIAN) | ||
313 | # define PULL64(x) ({ SHA_LONG64 ret=*((const SHA_LONG64 *)(&(x))); \ | ||
314 | asm ("bswapq %0" \ | ||
315 | : "=r"(ret) \ | ||
316 | : "0"(ret)); ret; }) | ||
317 | # endif | ||
318 | # elif (defined(__i386) || defined(__i386__)) && !defined(B_ENDIAN) | ||
319 | # if defined(I386_ONLY) | ||
320 | # define PULL64(x) ({ const unsigned int *p=(const unsigned int *)(&(x));\ | ||
321 | unsigned int hi=p[0],lo=p[1]; \ | ||
322 | asm("xchgb %%ah,%%al;xchgb %%dh,%%dl;"\ | ||
323 | "roll $16,%%eax; roll $16,%%edx; "\ | ||
324 | "xchgb %%ah,%%al;xchgb %%dh,%%dl;" \ | ||
325 | : "=a"(lo),"=d"(hi) \ | ||
326 | : "0"(lo),"1"(hi) : "cc"); \ | ||
327 | ((SHA_LONG64)hi)<<32|lo; }) | ||
328 | # else | ||
329 | # define PULL64(x) ({ const unsigned int *p=(const unsigned int *)(&(x));\ | ||
330 | unsigned int hi=p[0],lo=p[1]; \ | ||
331 | asm ("bswapl %0; bswapl %1;" \ | ||
332 | : "=r"(lo),"=r"(hi) \ | ||
333 | : "0"(lo),"1"(hi)); \ | ||
334 | ((SHA_LONG64)hi)<<32|lo; }) | ||
335 | # endif | ||
336 | # elif (defined(_ARCH_PPC) && defined(__64BIT__)) || defined(_ARCH_PPC64) | ||
337 | # define ROTR(a,n) ({ unsigned long ret; \ | ||
338 | asm ("rotrdi %0,%1,%2" \ | ||
339 | : "=r"(ret) \ | ||
340 | : "r"(a),"K"(n)); ret; }) | ||
341 | # endif | ||
342 | # elif defined(_MSC_VER) | ||
343 | # if defined(_WIN64) /* applies to both IA-64 and AMD64 */ | ||
344 | # define ROTR(a,n) _rotr64((a),n) | ||
345 | # endif | ||
346 | # if defined(_M_IX86) && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM) | ||
347 | # if defined(I386_ONLY) | ||
348 | static SHA_LONG64 __fastcall __pull64be(const void *x) | ||
349 | { _asm mov edx, [ecx + 0] | ||
350 | _asm mov eax, [ecx + 4] | ||
351 | _asm xchg dh,dl | ||
352 | _asm xchg ah,al | ||
353 | _asm rol edx,16 | ||
354 | _asm rol eax,16 | ||
355 | _asm xchg dh,dl | ||
356 | _asm xchg ah,al | ||
357 | } | ||
358 | # else | ||
359 | static SHA_LONG64 __fastcall __pull64be(const void *x) | ||
360 | { _asm mov edx, [ecx + 0] | ||
361 | _asm mov eax, [ecx + 4] | ||
362 | _asm bswap edx | ||
363 | _asm bswap eax | ||
364 | } | ||
365 | # endif | ||
366 | # define PULL64(x) __pull64be(&(x)) | ||
367 | # if _MSC_VER<=1200 | ||
368 | # pragma inline_depth(0) | ||
369 | # endif | ||
370 | # endif | ||
371 | # endif | ||
372 | #endif | ||
373 | |||
374 | #ifndef PULL64 | ||
375 | #define B(x,j) (((SHA_LONG64)(*(((const unsigned char *)(&x))+j)))<<((7-j)*8)) | ||
376 | #define PULL64(x) (B(x,0)|B(x,1)|B(x,2)|B(x,3)|B(x,4)|B(x,5)|B(x,6)|B(x,7)) | ||
377 | #endif | ||
378 | |||
379 | #ifndef ROTR | ||
380 | #define ROTR(x,s) (((x)>>s) | (x)<<(64-s)) | ||
381 | #endif | ||
382 | |||
383 | #define Sigma0(x) (ROTR((x),28) ^ ROTR((x),34) ^ ROTR((x),39)) | ||
384 | #define Sigma1(x) (ROTR((x),14) ^ ROTR((x),18) ^ ROTR((x),41)) | ||
385 | #define sigma0(x) (ROTR((x),1) ^ ROTR((x),8) ^ ((x)>>7)) | ||
386 | #define sigma1(x) (ROTR((x),19) ^ ROTR((x),61) ^ ((x)>>6)) | ||
387 | |||
388 | #define Ch(x,y,z) (((x) & (y)) ^ ((~(x)) & (z))) | ||
389 | #define Maj(x,y,z) (((x) & (y)) ^ ((x) & (z)) ^ ((y) & (z))) | ||
390 | |||
391 | #if defined(OPENSSL_IA32_SSE2) && !defined(OPENSSL_NO_ASM) && !defined(I386_ONLY) | ||
392 | #define GO_FOR_SSE2(ctx,in,num) do { \ | ||
393 | void sha512_block_sse2(void *,const void *,size_t); \ | ||
394 | if (!(OPENSSL_ia32cap_P & (1<<26))) break; \ | ||
395 | sha512_block_sse2(ctx->h,in,num); return; \ | ||
396 | } while (0) | ||
397 | #endif | ||
398 | |||
399 | #ifdef OPENSSL_SMALL_FOOTPRINT | ||
400 | |||
401 | static void sha512_block_data_order (SHA512_CTX *ctx, const void *in, size_t num) | ||
402 | { | ||
403 | const SHA_LONG64 *W=in; | ||
404 | SHA_LONG64 a,b,c,d,e,f,g,h,s0,s1,T1,T2; | ||
405 | SHA_LONG64 X[16]; | ||
406 | int i; | ||
407 | |||
408 | #ifdef GO_FOR_SSE2 | ||
409 | GO_FOR_SSE2(ctx,in,num); | ||
410 | #endif | ||
411 | |||
412 | while (num--) { | ||
413 | |||
414 | a = ctx->h[0]; b = ctx->h[1]; c = ctx->h[2]; d = ctx->h[3]; | ||
415 | e = ctx->h[4]; f = ctx->h[5]; g = ctx->h[6]; h = ctx->h[7]; | ||
416 | |||
417 | for (i=0;i<16;i++) | ||
418 | { | ||
419 | #ifdef B_ENDIAN | ||
420 | T1 = X[i] = W[i]; | ||
421 | #else | ||
422 | T1 = X[i] = PULL64(W[i]); | ||
423 | #endif | ||
424 | T1 += h + Sigma1(e) + Ch(e,f,g) + K512[i]; | ||
425 | T2 = Sigma0(a) + Maj(a,b,c); | ||
426 | h = g; g = f; f = e; e = d + T1; | ||
427 | d = c; c = b; b = a; a = T1 + T2; | ||
428 | } | ||
429 | |||
430 | for (;i<80;i++) | ||
431 | { | ||
432 | s0 = X[(i+1)&0x0f]; s0 = sigma0(s0); | ||
433 | s1 = X[(i+14)&0x0f]; s1 = sigma1(s1); | ||
434 | |||
435 | T1 = X[i&0xf] += s0 + s1 + X[(i+9)&0xf]; | ||
436 | T1 += h + Sigma1(e) + Ch(e,f,g) + K512[i]; | ||
437 | T2 = Sigma0(a) + Maj(a,b,c); | ||
438 | h = g; g = f; f = e; e = d + T1; | ||
439 | d = c; c = b; b = a; a = T1 + T2; | ||
440 | } | ||
441 | |||
442 | ctx->h[0] += a; ctx->h[1] += b; ctx->h[2] += c; ctx->h[3] += d; | ||
443 | ctx->h[4] += e; ctx->h[5] += f; ctx->h[6] += g; ctx->h[7] += h; | ||
444 | |||
445 | W+=SHA_LBLOCK; | ||
446 | } | ||
447 | } | ||
448 | |||
449 | #else | ||
450 | |||
451 | #define ROUND_00_15(i,a,b,c,d,e,f,g,h) do { \ | ||
452 | T1 += h + Sigma1(e) + Ch(e,f,g) + K512[i]; \ | ||
453 | h = Sigma0(a) + Maj(a,b,c); \ | ||
454 | d += T1; h += T1; } while (0) | ||
455 | |||
456 | #define ROUND_16_80(i,a,b,c,d,e,f,g,h,X) do { \ | ||
457 | s0 = X[(i+1)&0x0f]; s0 = sigma0(s0); \ | ||
458 | s1 = X[(i+14)&0x0f]; s1 = sigma1(s1); \ | ||
459 | T1 = X[(i)&0x0f] += s0 + s1 + X[(i+9)&0x0f]; \ | ||
460 | ROUND_00_15(i,a,b,c,d,e,f,g,h); } while (0) | ||
461 | |||
462 | static void sha512_block_data_order (SHA512_CTX *ctx, const void *in, size_t num) | ||
463 | { | ||
464 | const SHA_LONG64 *W=in; | ||
465 | SHA_LONG64 a,b,c,d,e,f,g,h,s0,s1,T1; | ||
466 | SHA_LONG64 X[16]; | ||
467 | int i; | ||
468 | |||
469 | #ifdef GO_FOR_SSE2 | ||
470 | GO_FOR_SSE2(ctx,in,num); | ||
471 | #endif | ||
472 | |||
473 | while (num--) { | ||
474 | |||
475 | a = ctx->h[0]; b = ctx->h[1]; c = ctx->h[2]; d = ctx->h[3]; | ||
476 | e = ctx->h[4]; f = ctx->h[5]; g = ctx->h[6]; h = ctx->h[7]; | ||
477 | |||
478 | #ifdef B_ENDIAN | ||
479 | T1 = X[0] = W[0]; ROUND_00_15(0,a,b,c,d,e,f,g,h); | ||
480 | T1 = X[1] = W[1]; ROUND_00_15(1,h,a,b,c,d,e,f,g); | ||
481 | T1 = X[2] = W[2]; ROUND_00_15(2,g,h,a,b,c,d,e,f); | ||
482 | T1 = X[3] = W[3]; ROUND_00_15(3,f,g,h,a,b,c,d,e); | ||
483 | T1 = X[4] = W[4]; ROUND_00_15(4,e,f,g,h,a,b,c,d); | ||
484 | T1 = X[5] = W[5]; ROUND_00_15(5,d,e,f,g,h,a,b,c); | ||
485 | T1 = X[6] = W[6]; ROUND_00_15(6,c,d,e,f,g,h,a,b); | ||
486 | T1 = X[7] = W[7]; ROUND_00_15(7,b,c,d,e,f,g,h,a); | ||
487 | T1 = X[8] = W[8]; ROUND_00_15(8,a,b,c,d,e,f,g,h); | ||
488 | T1 = X[9] = W[9]; ROUND_00_15(9,h,a,b,c,d,e,f,g); | ||
489 | T1 = X[10] = W[10]; ROUND_00_15(10,g,h,a,b,c,d,e,f); | ||
490 | T1 = X[11] = W[11]; ROUND_00_15(11,f,g,h,a,b,c,d,e); | ||
491 | T1 = X[12] = W[12]; ROUND_00_15(12,e,f,g,h,a,b,c,d); | ||
492 | T1 = X[13] = W[13]; ROUND_00_15(13,d,e,f,g,h,a,b,c); | ||
493 | T1 = X[14] = W[14]; ROUND_00_15(14,c,d,e,f,g,h,a,b); | ||
494 | T1 = X[15] = W[15]; ROUND_00_15(15,b,c,d,e,f,g,h,a); | ||
495 | #else | ||
496 | T1 = X[0] = PULL64(W[0]); ROUND_00_15(0,a,b,c,d,e,f,g,h); | ||
497 | T1 = X[1] = PULL64(W[1]); ROUND_00_15(1,h,a,b,c,d,e,f,g); | ||
498 | T1 = X[2] = PULL64(W[2]); ROUND_00_15(2,g,h,a,b,c,d,e,f); | ||
499 | T1 = X[3] = PULL64(W[3]); ROUND_00_15(3,f,g,h,a,b,c,d,e); | ||
500 | T1 = X[4] = PULL64(W[4]); ROUND_00_15(4,e,f,g,h,a,b,c,d); | ||
501 | T1 = X[5] = PULL64(W[5]); ROUND_00_15(5,d,e,f,g,h,a,b,c); | ||
502 | T1 = X[6] = PULL64(W[6]); ROUND_00_15(6,c,d,e,f,g,h,a,b); | ||
503 | T1 = X[7] = PULL64(W[7]); ROUND_00_15(7,b,c,d,e,f,g,h,a); | ||
504 | T1 = X[8] = PULL64(W[8]); ROUND_00_15(8,a,b,c,d,e,f,g,h); | ||
505 | T1 = X[9] = PULL64(W[9]); ROUND_00_15(9,h,a,b,c,d,e,f,g); | ||
506 | T1 = X[10] = PULL64(W[10]); ROUND_00_15(10,g,h,a,b,c,d,e,f); | ||
507 | T1 = X[11] = PULL64(W[11]); ROUND_00_15(11,f,g,h,a,b,c,d,e); | ||
508 | T1 = X[12] = PULL64(W[12]); ROUND_00_15(12,e,f,g,h,a,b,c,d); | ||
509 | T1 = X[13] = PULL64(W[13]); ROUND_00_15(13,d,e,f,g,h,a,b,c); | ||
510 | T1 = X[14] = PULL64(W[14]); ROUND_00_15(14,c,d,e,f,g,h,a,b); | ||
511 | T1 = X[15] = PULL64(W[15]); ROUND_00_15(15,b,c,d,e,f,g,h,a); | ||
512 | #endif | ||
513 | |||
514 | for (i=16;i<80;i+=8) | ||
515 | { | ||
516 | ROUND_16_80(i+0,a,b,c,d,e,f,g,h,X); | ||
517 | ROUND_16_80(i+1,h,a,b,c,d,e,f,g,X); | ||
518 | ROUND_16_80(i+2,g,h,a,b,c,d,e,f,X); | ||
519 | ROUND_16_80(i+3,f,g,h,a,b,c,d,e,X); | ||
520 | ROUND_16_80(i+4,e,f,g,h,a,b,c,d,X); | ||
521 | ROUND_16_80(i+5,d,e,f,g,h,a,b,c,X); | ||
522 | ROUND_16_80(i+6,c,d,e,f,g,h,a,b,X); | ||
523 | ROUND_16_80(i+7,b,c,d,e,f,g,h,a,X); | ||
524 | } | ||
525 | |||
526 | ctx->h[0] += a; ctx->h[1] += b; ctx->h[2] += c; ctx->h[3] += d; | ||
527 | ctx->h[4] += e; ctx->h[5] += f; ctx->h[6] += g; ctx->h[7] += h; | ||
528 | |||
529 | W+=SHA_LBLOCK; | ||
530 | } | ||
531 | } | ||
532 | |||
533 | #endif | ||
534 | |||
535 | #endif /* SHA512_ASM */ | ||
536 | |||
537 | #endif /* OPENSSL_NO_SHA512 */ | ||