summaryrefslogtreecommitdiff
path: root/src/lib/libcrypto/sha/asm/sha512-x86_64.pl
diff options
context:
space:
mode:
Diffstat (limited to 'src/lib/libcrypto/sha/asm/sha512-x86_64.pl')
-rwxr-xr-xsrc/lib/libcrypto/sha/asm/sha512-x86_64.pl450
1 files changed, 0 insertions, 450 deletions
diff --git a/src/lib/libcrypto/sha/asm/sha512-x86_64.pl b/src/lib/libcrypto/sha/asm/sha512-x86_64.pl
deleted file mode 100755
index f611a2d898..0000000000
--- a/src/lib/libcrypto/sha/asm/sha512-x86_64.pl
+++ /dev/null
@@ -1,450 +0,0 @@
1#!/usr/bin/env perl
2#
3# ====================================================================
4# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
5# project. Rights for redistribution and usage in source and binary
6# forms are granted according to the OpenSSL license.
7# ====================================================================
8#
9# sha256/512_block procedure for x86_64.
10#
11# 40% improvement over compiler-generated code on Opteron. On EM64T
12# sha256 was observed to run >80% faster and sha512 - >40%. No magical
13# tricks, just straight implementation... I really wonder why gcc
14# [being armed with inline assembler] fails to generate as fast code.
15# The only thing which is cool about this module is that it's very
16# same instruction sequence used for both SHA-256 and SHA-512. In
17# former case the instructions operate on 32-bit operands, while in
18# latter - on 64-bit ones. All I had to do is to get one flavor right,
19# the other one passed the test right away:-)
20#
21# sha256_block runs in ~1005 cycles on Opteron, which gives you
22# asymptotic performance of 64*1000/1005=63.7MBps times CPU clock
23# frequency in GHz. sha512_block runs in ~1275 cycles, which results
24# in 128*1000/1275=100MBps per GHz. Is there room for improvement?
25# Well, if you compare it to IA-64 implementation, which maintains
26# X[16] in register bank[!], tends to 4 instructions per CPU clock
27# cycle and runs in 1003 cycles, 1275 is very good result for 3-way
28# issue Opteron pipeline and X[16] maintained in memory. So that *if*
29# there is a way to improve it, *then* the only way would be to try to
30# offload X[16] updates to SSE unit, but that would require "deeper"
31# loop unroll, which in turn would naturally cause size blow-up, not
32# to mention increased complexity! And once again, only *if* it's
33# actually possible to noticeably improve overall ILP, instruction
34# level parallelism, on a given CPU implementation in this case.
35#
36# Special note on Intel EM64T. While Opteron CPU exhibits perfect
37# perfromance ratio of 1.5 between 64- and 32-bit flavors [see above],
38# [currently available] EM64T CPUs apparently are far from it. On the
39# contrary, 64-bit version, sha512_block, is ~30% *slower* than 32-bit
40# sha256_block:-( This is presumably because 64-bit shifts/rotates
41# apparently are not atomic instructions, but implemented in microcode.
42
43$flavour = shift;
44$output = shift;
45if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
46
47$win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
48
49$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
50( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
51( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
52die "can't locate x86_64-xlate.pl";
53
54open STDOUT,"| $^X $xlate $flavour $output";
55
56if ($output =~ /512/) {
57 $func="sha512_block_data_order";
58 $TABLE="K512";
59 $SZ=8;
60 @ROT=($A,$B,$C,$D,$E,$F,$G,$H)=("%rax","%rbx","%rcx","%rdx",
61 "%r8", "%r9", "%r10","%r11");
62 ($T1,$a0,$a1,$a2)=("%r12","%r13","%r14","%r15");
63 @Sigma0=(28,34,39);
64 @Sigma1=(14,18,41);
65 @sigma0=(1, 8, 7);
66 @sigma1=(19,61, 6);
67 $rounds=80;
68} else {
69 $func="sha256_block_data_order";
70 $TABLE="K256";
71 $SZ=4;
72 @ROT=($A,$B,$C,$D,$E,$F,$G,$H)=("%eax","%ebx","%ecx","%edx",
73 "%r8d","%r9d","%r10d","%r11d");
74 ($T1,$a0,$a1,$a2)=("%r12d","%r13d","%r14d","%r15d");
75 @Sigma0=( 2,13,22);
76 @Sigma1=( 6,11,25);
77 @sigma0=( 7,18, 3);
78 @sigma1=(17,19,10);
79 $rounds=64;
80}
81
82$ctx="%rdi"; # 1st arg
83$round="%rdi"; # zaps $ctx
84$inp="%rsi"; # 2nd arg
85$Tbl="%rbp";
86
87$_ctx="16*$SZ+0*8(%rsp)";
88$_inp="16*$SZ+1*8(%rsp)";
89$_end="16*$SZ+2*8(%rsp)";
90$_rsp="16*$SZ+3*8(%rsp)";
91$framesz="16*$SZ+4*8";
92
93
94sub ROUND_00_15()
95{ my ($i,$a,$b,$c,$d,$e,$f,$g,$h) = @_;
96
97$code.=<<___;
98 ror \$`$Sigma1[2]-$Sigma1[1]`,$a0
99 mov $f,$a2
100 mov $T1,`$SZ*($i&0xf)`(%rsp)
101
102 ror \$`$Sigma0[2]-$Sigma0[1]`,$a1
103 xor $e,$a0
104 xor $g,$a2 # f^g
105
106 ror \$`$Sigma1[1]-$Sigma1[0]`,$a0
107 add $h,$T1 # T1+=h
108 xor $a,$a1
109
110 add ($Tbl,$round,$SZ),$T1 # T1+=K[round]
111 and $e,$a2 # (f^g)&e
112 mov $b,$h
113
114 ror \$`$Sigma0[1]-$Sigma0[0]`,$a1
115 xor $e,$a0
116 xor $g,$a2 # Ch(e,f,g)=((f^g)&e)^g
117
118 xor $c,$h # b^c
119 xor $a,$a1
120 add $a2,$T1 # T1+=Ch(e,f,g)
121 mov $b,$a2
122
123 ror \$$Sigma1[0],$a0 # Sigma1(e)
124 and $a,$h # h=(b^c)&a
125 and $c,$a2 # b&c
126
127 ror \$$Sigma0[0],$a1 # Sigma0(a)
128 add $a0,$T1 # T1+=Sigma1(e)
129 add $a2,$h # h+=b&c (completes +=Maj(a,b,c)
130
131 add $T1,$d # d+=T1
132 add $T1,$h # h+=T1
133 lea 1($round),$round # round++
134 add $a1,$h # h+=Sigma0(a)
135
136___
137}
138
139sub ROUND_16_XX()
140{ my ($i,$a,$b,$c,$d,$e,$f,$g,$h) = @_;
141
142$code.=<<___;
143 mov `$SZ*(($i+1)&0xf)`(%rsp),$a0
144 mov `$SZ*(($i+14)&0xf)`(%rsp),$a1
145 mov $a0,$T1
146 mov $a1,$a2
147
148 ror \$`$sigma0[1]-$sigma0[0]`,$T1
149 xor $a0,$T1
150 shr \$$sigma0[2],$a0
151
152 ror \$$sigma0[0],$T1
153 xor $T1,$a0 # sigma0(X[(i+1)&0xf])
154 mov `$SZ*(($i+9)&0xf)`(%rsp),$T1
155
156 ror \$`$sigma1[1]-$sigma1[0]`,$a2
157 xor $a1,$a2
158 shr \$$sigma1[2],$a1
159
160 ror \$$sigma1[0],$a2
161 add $a0,$T1
162 xor $a2,$a1 # sigma1(X[(i+14)&0xf])
163
164 add `$SZ*($i&0xf)`(%rsp),$T1
165 mov $e,$a0
166 add $a1,$T1
167 mov $a,$a1
168___
169 &ROUND_00_15(@_);
170}
171
172$code=<<___;
173.text
174
175.globl $func
176.type $func,\@function,4
177.align 16
178$func:
179 push %rbx
180 push %rbp
181 push %r12
182 push %r13
183 push %r14
184 push %r15
185 mov %rsp,%r11 # copy %rsp
186 shl \$4,%rdx # num*16
187 sub \$$framesz,%rsp
188 lea ($inp,%rdx,$SZ),%rdx # inp+num*16*$SZ
189 and \$-64,%rsp # align stack frame
190 mov $ctx,$_ctx # save ctx, 1st arg
191 mov $inp,$_inp # save inp, 2nd arh
192 mov %rdx,$_end # save end pointer, "3rd" arg
193 mov %r11,$_rsp # save copy of %rsp
194.Lprologue:
195
196 lea $TABLE(%rip),$Tbl
197
198 mov $SZ*0($ctx),$A
199 mov $SZ*1($ctx),$B
200 mov $SZ*2($ctx),$C
201 mov $SZ*3($ctx),$D
202 mov $SZ*4($ctx),$E
203 mov $SZ*5($ctx),$F
204 mov $SZ*6($ctx),$G
205 mov $SZ*7($ctx),$H
206 jmp .Lloop
207
208.align 16
209.Lloop:
210 xor $round,$round
211___
212 for($i=0;$i<16;$i++) {
213 $code.=" mov $SZ*$i($inp),$T1\n";
214 $code.=" mov @ROT[4],$a0\n";
215 $code.=" mov @ROT[0],$a1\n";
216 $code.=" bswap $T1\n";
217 &ROUND_00_15($i,@ROT);
218 unshift(@ROT,pop(@ROT));
219 }
220$code.=<<___;
221 jmp .Lrounds_16_xx
222.align 16
223.Lrounds_16_xx:
224___
225 for(;$i<32;$i++) {
226 &ROUND_16_XX($i,@ROT);
227 unshift(@ROT,pop(@ROT));
228 }
229
230$code.=<<___;
231 cmp \$$rounds,$round
232 jb .Lrounds_16_xx
233
234 mov $_ctx,$ctx
235 lea 16*$SZ($inp),$inp
236
237 add $SZ*0($ctx),$A
238 add $SZ*1($ctx),$B
239 add $SZ*2($ctx),$C
240 add $SZ*3($ctx),$D
241 add $SZ*4($ctx),$E
242 add $SZ*5($ctx),$F
243 add $SZ*6($ctx),$G
244 add $SZ*7($ctx),$H
245
246 cmp $_end,$inp
247
248 mov $A,$SZ*0($ctx)
249 mov $B,$SZ*1($ctx)
250 mov $C,$SZ*2($ctx)
251 mov $D,$SZ*3($ctx)
252 mov $E,$SZ*4($ctx)
253 mov $F,$SZ*5($ctx)
254 mov $G,$SZ*6($ctx)
255 mov $H,$SZ*7($ctx)
256 jb .Lloop
257
258 mov $_rsp,%rsi
259 mov (%rsi),%r15
260 mov 8(%rsi),%r14
261 mov 16(%rsi),%r13
262 mov 24(%rsi),%r12
263 mov 32(%rsi),%rbp
264 mov 40(%rsi),%rbx
265 lea 48(%rsi),%rsp
266.Lepilogue:
267 ret
268.size $func,.-$func
269___
270
271if ($SZ==4) {
272$code.=<<___;
273.align 64
274.type $TABLE,\@object
275$TABLE:
276 .long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
277 .long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
278 .long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
279 .long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
280 .long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
281 .long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
282 .long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
283 .long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
284 .long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
285 .long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
286 .long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
287 .long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
288 .long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
289 .long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
290 .long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
291 .long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
292___
293} else {
294$code.=<<___;
295.align 64
296.type $TABLE,\@object
297$TABLE:
298 .quad 0x428a2f98d728ae22,0x7137449123ef65cd
299 .quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc
300 .quad 0x3956c25bf348b538,0x59f111f1b605d019
301 .quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118
302 .quad 0xd807aa98a3030242,0x12835b0145706fbe
303 .quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2
304 .quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1
305 .quad 0x9bdc06a725c71235,0xc19bf174cf692694
306 .quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3
307 .quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65
308 .quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483
309 .quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5
310 .quad 0x983e5152ee66dfab,0xa831c66d2db43210
311 .quad 0xb00327c898fb213f,0xbf597fc7beef0ee4
312 .quad 0xc6e00bf33da88fc2,0xd5a79147930aa725
313 .quad 0x06ca6351e003826f,0x142929670a0e6e70
314 .quad 0x27b70a8546d22ffc,0x2e1b21385c26c926
315 .quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df
316 .quad 0x650a73548baf63de,0x766a0abb3c77b2a8
317 .quad 0x81c2c92e47edaee6,0x92722c851482353b
318 .quad 0xa2bfe8a14cf10364,0xa81a664bbc423001
319 .quad 0xc24b8b70d0f89791,0xc76c51a30654be30
320 .quad 0xd192e819d6ef5218,0xd69906245565a910
321 .quad 0xf40e35855771202a,0x106aa07032bbd1b8
322 .quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53
323 .quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8
324 .quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb
325 .quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3
326 .quad 0x748f82ee5defb2fc,0x78a5636f43172f60
327 .quad 0x84c87814a1f0ab72,0x8cc702081a6439ec
328 .quad 0x90befffa23631e28,0xa4506cebde82bde9
329 .quad 0xbef9a3f7b2c67915,0xc67178f2e372532b
330 .quad 0xca273eceea26619c,0xd186b8c721c0c207
331 .quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178
332 .quad 0x06f067aa72176fba,0x0a637dc5a2c898a6
333 .quad 0x113f9804bef90dae,0x1b710b35131c471b
334 .quad 0x28db77f523047d84,0x32caab7b40c72493
335 .quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c
336 .quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a
337 .quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817
338___
339}
340
341# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
342# CONTEXT *context,DISPATCHER_CONTEXT *disp)
343if ($win64) {
344$rec="%rcx";
345$frame="%rdx";
346$context="%r8";
347$disp="%r9";
348
349$code.=<<___;
350.extern __imp_RtlVirtualUnwind
351.type se_handler,\@abi-omnipotent
352.align 16
353se_handler:
354 push %rsi
355 push %rdi
356 push %rbx
357 push %rbp
358 push %r12
359 push %r13
360 push %r14
361 push %r15
362 pushfq
363 sub \$64,%rsp
364
365 mov 120($context),%rax # pull context->Rax
366 mov 248($context),%rbx # pull context->Rip
367
368 lea .Lprologue(%rip),%r10
369 cmp %r10,%rbx # context->Rip<.Lprologue
370 jb .Lin_prologue
371
372 mov 152($context),%rax # pull context->Rsp
373
374 lea .Lepilogue(%rip),%r10
375 cmp %r10,%rbx # context->Rip>=.Lepilogue
376 jae .Lin_prologue
377
378 mov 16*$SZ+3*8(%rax),%rax # pull $_rsp
379 lea 48(%rax),%rax
380
381 mov -8(%rax),%rbx
382 mov -16(%rax),%rbp
383 mov -24(%rax),%r12
384 mov -32(%rax),%r13
385 mov -40(%rax),%r14
386 mov -48(%rax),%r15
387 mov %rbx,144($context) # restore context->Rbx
388 mov %rbp,160($context) # restore context->Rbp
389 mov %r12,216($context) # restore context->R12
390 mov %r13,224($context) # restore context->R13
391 mov %r14,232($context) # restore context->R14
392 mov %r15,240($context) # restore context->R15
393
394.Lin_prologue:
395 mov 8(%rax),%rdi
396 mov 16(%rax),%rsi
397 mov %rax,152($context) # restore context->Rsp
398 mov %rsi,168($context) # restore context->Rsi
399 mov %rdi,176($context) # restore context->Rdi
400
401 mov 40($disp),%rdi # disp->ContextRecord
402 mov $context,%rsi # context
403 mov \$154,%ecx # sizeof(CONTEXT)
404 .long 0xa548f3fc # cld; rep movsq
405
406 mov $disp,%rsi
407 xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
408 mov 8(%rsi),%rdx # arg2, disp->ImageBase
409 mov 0(%rsi),%r8 # arg3, disp->ControlPc
410 mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
411 mov 40(%rsi),%r10 # disp->ContextRecord
412 lea 56(%rsi),%r11 # &disp->HandlerData
413 lea 24(%rsi),%r12 # &disp->EstablisherFrame
414 mov %r10,32(%rsp) # arg5
415 mov %r11,40(%rsp) # arg6
416 mov %r12,48(%rsp) # arg7
417 mov %rcx,56(%rsp) # arg8, (NULL)
418 call *__imp_RtlVirtualUnwind(%rip)
419
420 mov \$1,%eax # ExceptionContinueSearch
421 add \$64,%rsp
422 popfq
423 pop %r15
424 pop %r14
425 pop %r13
426 pop %r12
427 pop %rbp
428 pop %rbx
429 pop %rdi
430 pop %rsi
431 ret
432.size se_handler,.-se_handler
433
434.section .pdata
435.align 4
436 .rva .LSEH_begin_$func
437 .rva .LSEH_end_$func
438 .rva .LSEH_info_$func
439
440.section .xdata
441.align 8
442.LSEH_info_$func:
443 .byte 9,0,0,0
444 .rva se_handler
445___
446}
447
448$code =~ s/\`([^\`]*)\`/eval $1/gem;
449print $code;
450close STDOUT;