summaryrefslogtreecommitdiff
path: root/src/lib/libcrypto/rc4/asm/rc4-x86_64.pl
diff options
context:
space:
mode:
Diffstat (limited to 'src/lib/libcrypto/rc4/asm/rc4-x86_64.pl')
-rwxr-xr-xsrc/lib/libcrypto/rc4/asm/rc4-x86_64.pl294
1 files changed, 61 insertions, 233 deletions
diff --git a/src/lib/libcrypto/rc4/asm/rc4-x86_64.pl b/src/lib/libcrypto/rc4/asm/rc4-x86_64.pl
index d6eac205e9..544386bf53 100755
--- a/src/lib/libcrypto/rc4/asm/rc4-x86_64.pl
+++ b/src/lib/libcrypto/rc4/asm/rc4-x86_64.pl
@@ -7,8 +7,6 @@
7# details see http://www.openssl.org/~appro/cryptogams/. 7# details see http://www.openssl.org/~appro/cryptogams/.
8# ==================================================================== 8# ====================================================================
9# 9#
10# July 2004
11#
12# 2.22x RC4 tune-up:-) It should be noted though that my hand [as in 10# 2.22x RC4 tune-up:-) It should be noted though that my hand [as in
13# "hand-coded assembler"] doesn't stand for the whole improvement 11# "hand-coded assembler"] doesn't stand for the whole improvement
14# coefficient. It turned out that eliminating RC4_CHAR from config 12# coefficient. It turned out that eliminating RC4_CHAR from config
@@ -21,8 +19,6 @@
21# to operate on partial registers, it turned out to be the best bet. 19# to operate on partial registers, it turned out to be the best bet.
22# At least for AMD... How IA32E would perform remains to be seen... 20# At least for AMD... How IA32E would perform remains to be seen...
23 21
24# November 2004
25#
26# As was shown by Marc Bevand reordering of couple of load operations 22# As was shown by Marc Bevand reordering of couple of load operations
27# results in even higher performance gain of 3.3x:-) At least on 23# results in even higher performance gain of 3.3x:-) At least on
28# Opteron... For reference, 1x in this case is RC4_CHAR C-code 24# Opteron... For reference, 1x in this case is RC4_CHAR C-code
@@ -30,8 +26,6 @@
30# Latter means that if you want to *estimate* what to expect from 26# Latter means that if you want to *estimate* what to expect from
31# *your* Opteron, then multiply 54 by 3.3 and clock frequency in GHz. 27# *your* Opteron, then multiply 54 by 3.3 and clock frequency in GHz.
32 28
33# November 2004
34#
35# Intel P4 EM64T core was found to run the AMD64 code really slow... 29# Intel P4 EM64T core was found to run the AMD64 code really slow...
36# The only way to achieve comparable performance on P4 was to keep 30# The only way to achieve comparable performance on P4 was to keep
37# RC4_CHAR. Kind of ironic, huh? As it's apparently impossible to 31# RC4_CHAR. Kind of ironic, huh? As it's apparently impossible to
@@ -39,14 +33,10 @@
39# on either AMD and Intel platforms, I implement both cases. See 33# on either AMD and Intel platforms, I implement both cases. See
40# rc4_skey.c for further details... 34# rc4_skey.c for further details...
41 35
42# April 2005
43#
44# P4 EM64T core appears to be "allergic" to 64-bit inc/dec. Replacing 36# P4 EM64T core appears to be "allergic" to 64-bit inc/dec. Replacing
45# those with add/sub results in 50% performance improvement of folded 37# those with add/sub results in 50% performance improvement of folded
46# loop... 38# loop...
47 39
48# May 2005
49#
50# As was shown by Zou Nanhai loop unrolling can improve Intel EM64T 40# As was shown by Zou Nanhai loop unrolling can improve Intel EM64T
51# performance by >30% [unlike P4 32-bit case that is]. But this is 41# performance by >30% [unlike P4 32-bit case that is]. But this is
52# provided that loads are reordered even more aggressively! Both code 42# provided that loads are reordered even more aggressively! Both code
@@ -60,8 +50,6 @@
60# is not implemented, then this final RC4_CHAR code-path should be 50# is not implemented, then this final RC4_CHAR code-path should be
61# preferred, as it provides better *all-round* performance]. 51# preferred, as it provides better *all-round* performance].
62 52
63# March 2007
64#
65# Intel Core2 was observed to perform poorly on both code paths:-( It 53# Intel Core2 was observed to perform poorly on both code paths:-( It
66# apparently suffers from some kind of partial register stall, which 54# apparently suffers from some kind of partial register stall, which
67# occurs in 64-bit mode only [as virtually identical 32-bit loop was 55# occurs in 64-bit mode only [as virtually identical 32-bit loop was
@@ -70,37 +58,6 @@
70# fit for Core2 and therefore the code was modified to skip cloop8 on 58# fit for Core2 and therefore the code was modified to skip cloop8 on
71# this CPU. 59# this CPU.
72 60
73# May 2010
74#
75# Intel Westmere was observed to perform suboptimally. Adding yet
76# another movzb to cloop1 improved performance by almost 50%! Core2
77# performance is improved too, but nominally...
78
79# May 2011
80#
81# The only code path that was not modified is P4-specific one. Non-P4
82# Intel code path optimization is heavily based on submission by Maxim
83# Perminov, Maxim Locktyukhin and Jim Guilford of Intel. I've used
84# some of the ideas even in attempt to optmize the original RC4_INT
85# code path... Current performance in cycles per processed byte (less
86# is better) and improvement coefficients relative to previous
87# version of this module are:
88#
89# Opteron 5.3/+0%(*)
90# P4 6.5
91# Core2 6.2/+15%(**)
92# Westmere 4.2/+60%
93# Sandy Bridge 4.2/+120%
94# Atom 9.3/+80%
95#
96# (*) But corresponding loop has less instructions, which should have
97# positive effect on upcoming Bulldozer, which has one less ALU.
98# For reference, Intel code runs at 6.8 cpb rate on Opteron.
99# (**) Note that Core2 result is ~15% lower than corresponding result
100# for 32-bit code, meaning that it's possible to improve it,
101# but more than likely at the cost of the others (see rc4-586.pl
102# to get the idea)...
103
104$flavour = shift; 61$flavour = shift;
105$output = shift; 62$output = shift;
106if ($flavour =~ /\./) { $output = $flavour; undef $flavour; } 63if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
@@ -119,10 +76,13 @@ $len="%rsi"; # arg2
119$inp="%rdx"; # arg3 76$inp="%rdx"; # arg3
120$out="%rcx"; # arg4 77$out="%rcx"; # arg4
121 78
122{ 79@XX=("%r8","%r10");
80@TX=("%r9","%r11");
81$YY="%r12";
82$TY="%r13";
83
123$code=<<___; 84$code=<<___;
124.text 85.text
125.extern OPENSSL_ia32cap_P
126 86
127.globl RC4 87.globl RC4
128.type RC4,\@function,4 88.type RC4,\@function,4
@@ -135,173 +95,48 @@ RC4: or $len,$len
135 push %r12 95 push %r12
136 push %r13 96 push %r13
137.Lprologue: 97.Lprologue:
138 mov $len,%r11
139 mov $inp,%r12
140 mov $out,%r13
141___
142my $len="%r11"; # reassign input arguments
143my $inp="%r12";
144my $out="%r13";
145 98
146my @XX=("%r10","%rsi"); 99 add \$8,$dat
147my @TX=("%rax","%rbx"); 100 movl -8($dat),$XX[0]#d
148my $YY="%rcx"; 101 movl -4($dat),$YY#d
149my $TY="%rdx";
150
151$code.=<<___;
152 xor $XX[0],$XX[0]
153 xor $YY,$YY
154
155 lea 8($dat),$dat
156 mov -8($dat),$XX[0]#b
157 mov -4($dat),$YY#b
158 cmpl \$-1,256($dat) 102 cmpl \$-1,256($dat)
159 je .LRC4_CHAR 103 je .LRC4_CHAR
160 mov OPENSSL_ia32cap_P(%rip),%r8d
161 xor $TX[1],$TX[1]
162 inc $XX[0]#b 104 inc $XX[0]#b
163 sub $XX[0],$TX[1]
164 sub $inp,$out
165 movl ($dat,$XX[0],4),$TX[0]#d 105 movl ($dat,$XX[0],4),$TX[0]#d
166 test \$-16,$len 106 test \$-8,$len
167 jz .Lloop1 107 jz .Lloop1
168 bt \$30,%r8d # Intel CPU? 108 jmp .Lloop8
169 jc .Lintel
170 and \$7,$TX[1]
171 lea 1($XX[0]),$XX[1]
172 jz .Loop8
173 sub $TX[1],$len
174.Loop8_warmup:
175 add $TX[0]#b,$YY#b
176 movl ($dat,$YY,4),$TY#d
177 movl $TX[0]#d,($dat,$YY,4)
178 movl $TY#d,($dat,$XX[0],4)
179 add $TY#b,$TX[0]#b
180 inc $XX[0]#b
181 movl ($dat,$TX[0],4),$TY#d
182 movl ($dat,$XX[0],4),$TX[0]#d
183 xorb ($inp),$TY#b
184 movb $TY#b,($out,$inp)
185 lea 1($inp),$inp
186 dec $TX[1]
187 jnz .Loop8_warmup
188
189 lea 1($XX[0]),$XX[1]
190 jmp .Loop8
191.align 16 109.align 16
192.Loop8: 110.Lloop8:
193___ 111___
194for ($i=0;$i<8;$i++) { 112for ($i=0;$i<8;$i++) {
195$code.=<<___ if ($i==7);
196 add \$8,$XX[1]#b
197___
198$code.=<<___; 113$code.=<<___;
199 add $TX[0]#b,$YY#b 114 add $TX[0]#b,$YY#b
115 mov $XX[0],$XX[1]
200 movl ($dat,$YY,4),$TY#d 116 movl ($dat,$YY,4),$TY#d
117 ror \$8,%rax # ror is redundant when $i=0
118 inc $XX[1]#b
119 movl ($dat,$XX[1],4),$TX[1]#d
120 cmp $XX[1],$YY
201 movl $TX[0]#d,($dat,$YY,4) 121 movl $TX[0]#d,($dat,$YY,4)
202 movl `4*($i==7?-1:$i)`($dat,$XX[1],4),$TX[1]#d 122 cmove $TX[0],$TX[1]
203 ror \$8,%r8 # ror is redundant when $i=0 123 movl $TY#d,($dat,$XX[0],4)
204 movl $TY#d,4*$i($dat,$XX[0],4)
205 add $TX[0]#b,$TY#b 124 add $TX[0]#b,$TY#b
206 movb ($dat,$TY,4),%r8b 125 movb ($dat,$TY,4),%al
207___ 126___
208push(@TX,shift(@TX)); #push(@XX,shift(@XX)); # "rotate" registers 127push(@TX,shift(@TX)); push(@XX,shift(@XX)); # "rotate" registers
209} 128}
210$code.=<<___; 129$code.=<<___;
211 add \$8,$XX[0]#b 130 ror \$8,%rax
212 ror \$8,%r8
213 sub \$8,$len 131 sub \$8,$len
214 132
215 xor ($inp),%r8 133 xor ($inp),%rax
216 mov %r8,($out,$inp) 134 add \$8,$inp
217 lea 8($inp),$inp 135 mov %rax,($out)
136 add \$8,$out
218 137
219 test \$-8,$len 138 test \$-8,$len
220 jnz .Loop8 139 jnz .Lloop8
221 cmp \$0,$len
222 jne .Lloop1
223 jmp .Lexit
224
225.align 16
226.Lintel:
227 test \$-32,$len
228 jz .Lloop1
229 and \$15,$TX[1]
230 jz .Loop16_is_hot
231 sub $TX[1],$len
232.Loop16_warmup:
233 add $TX[0]#b,$YY#b
234 movl ($dat,$YY,4),$TY#d
235 movl $TX[0]#d,($dat,$YY,4)
236 movl $TY#d,($dat,$XX[0],4)
237 add $TY#b,$TX[0]#b
238 inc $XX[0]#b
239 movl ($dat,$TX[0],4),$TY#d
240 movl ($dat,$XX[0],4),$TX[0]#d
241 xorb ($inp),$TY#b
242 movb $TY#b,($out,$inp)
243 lea 1($inp),$inp
244 dec $TX[1]
245 jnz .Loop16_warmup
246
247 mov $YY,$TX[1]
248 xor $YY,$YY
249 mov $TX[1]#b,$YY#b
250
251.Loop16_is_hot:
252 lea ($dat,$XX[0],4),$XX[1]
253___
254sub RC4_loop {
255 my $i=shift;
256 my $j=$i<0?0:$i;
257 my $xmm="%xmm".($j&1);
258
259 $code.=" add \$16,$XX[0]#b\n" if ($i==15);
260 $code.=" movdqu ($inp),%xmm2\n" if ($i==15);
261 $code.=" add $TX[0]#b,$YY#b\n" if ($i<=0);
262 $code.=" movl ($dat,$YY,4),$TY#d\n";
263 $code.=" pxor %xmm0,%xmm2\n" if ($i==0);
264 $code.=" psllq \$8,%xmm1\n" if ($i==0);
265 $code.=" pxor $xmm,$xmm\n" if ($i<=1);
266 $code.=" movl $TX[0]#d,($dat,$YY,4)\n";
267 $code.=" add $TY#b,$TX[0]#b\n";
268 $code.=" movl `4*($j+1)`($XX[1]),$TX[1]#d\n" if ($i<15);
269 $code.=" movz $TX[0]#b,$TX[0]#d\n";
270 $code.=" movl $TY#d,4*$j($XX[1])\n";
271 $code.=" pxor %xmm1,%xmm2\n" if ($i==0);
272 $code.=" lea ($dat,$XX[0],4),$XX[1]\n" if ($i==15);
273 $code.=" add $TX[1]#b,$YY#b\n" if ($i<15);
274 $code.=" pinsrw \$`($j>>1)&7`,($dat,$TX[0],4),$xmm\n";
275 $code.=" movdqu %xmm2,($out,$inp)\n" if ($i==0);
276 $code.=" lea 16($inp),$inp\n" if ($i==0);
277 $code.=" movl ($XX[1]),$TX[1]#d\n" if ($i==15);
278}
279 RC4_loop(-1);
280$code.=<<___;
281 jmp .Loop16_enter
282.align 16
283.Loop16:
284___
285
286for ($i=0;$i<16;$i++) {
287 $code.=".Loop16_enter:\n" if ($i==1);
288 RC4_loop($i);
289 push(@TX,shift(@TX)); # "rotate" registers
290}
291$code.=<<___;
292 mov $YY,$TX[1]
293 xor $YY,$YY # keyword to partial register
294 sub \$16,$len
295 mov $TX[1]#b,$YY#b
296 test \$-16,$len
297 jnz .Loop16
298
299 psllq \$8,%xmm1
300 pxor %xmm0,%xmm2
301 pxor %xmm1,%xmm2
302 movdqu %xmm2,($out,$inp)
303 lea 16($inp),$inp
304
305 cmp \$0,$len 140 cmp \$0,$len
306 jne .Lloop1 141 jne .Lloop1
307 jmp .Lexit 142 jmp .Lexit
@@ -317,8 +152,9 @@ $code.=<<___;
317 movl ($dat,$TX[0],4),$TY#d 152 movl ($dat,$TX[0],4),$TY#d
318 movl ($dat,$XX[0],4),$TX[0]#d 153 movl ($dat,$XX[0],4),$TX[0]#d
319 xorb ($inp),$TY#b 154 xorb ($inp),$TY#b
320 movb $TY#b,($out,$inp) 155 inc $inp
321 lea 1($inp),$inp 156 movb $TY#b,($out)
157 inc $out
322 dec $len 158 dec $len
323 jnz .Lloop1 159 jnz .Lloop1
324 jmp .Lexit 160 jmp .Lexit
@@ -329,11 +165,13 @@ $code.=<<___;
329 movzb ($dat,$XX[0]),$TX[0]#d 165 movzb ($dat,$XX[0]),$TX[0]#d
330 test \$-8,$len 166 test \$-8,$len
331 jz .Lcloop1 167 jz .Lcloop1
168 cmpl \$0,260($dat)
169 jnz .Lcloop1
332 jmp .Lcloop8 170 jmp .Lcloop8
333.align 16 171.align 16
334.Lcloop8: 172.Lcloop8:
335 mov ($inp),%r8d 173 mov ($inp),%eax
336 mov 4($inp),%r9d 174 mov 4($inp),%ebx
337___ 175___
338# unroll 2x4-wise, because 64-bit rotates kill Intel P4... 176# unroll 2x4-wise, because 64-bit rotates kill Intel P4...
339for ($i=0;$i<4;$i++) { 177for ($i=0;$i<4;$i++) {
@@ -350,8 +188,8 @@ $code.=<<___;
350 mov $TX[0],$TX[1] 188 mov $TX[0],$TX[1]
351.Lcmov$i: 189.Lcmov$i:
352 add $TX[0]#b,$TY#b 190 add $TX[0]#b,$TY#b
353 xor ($dat,$TY),%r8b 191 xor ($dat,$TY),%al
354 ror \$8,%r8d 192 ror \$8,%eax
355___ 193___
356push(@TX,shift(@TX)); push(@XX,shift(@XX)); # "rotate" registers 194push(@TX,shift(@TX)); push(@XX,shift(@XX)); # "rotate" registers
357} 195}
@@ -369,16 +207,16 @@ $code.=<<___;
369 mov $TX[0],$TX[1] 207 mov $TX[0],$TX[1]
370.Lcmov$i: 208.Lcmov$i:
371 add $TX[0]#b,$TY#b 209 add $TX[0]#b,$TY#b
372 xor ($dat,$TY),%r9b 210 xor ($dat,$TY),%bl
373 ror \$8,%r9d 211 ror \$8,%ebx
374___ 212___
375push(@TX,shift(@TX)); push(@XX,shift(@XX)); # "rotate" registers 213push(@TX,shift(@TX)); push(@XX,shift(@XX)); # "rotate" registers
376} 214}
377$code.=<<___; 215$code.=<<___;
378 lea -8($len),$len 216 lea -8($len),$len
379 mov %r8d,($out) 217 mov %eax,($out)
380 lea 8($inp),$inp 218 lea 8($inp),$inp
381 mov %r9d,4($out) 219 mov %ebx,4($out)
382 lea 8($out),$out 220 lea 8($out),$out
383 221
384 test \$-8,$len 222 test \$-8,$len
@@ -391,7 +229,6 @@ $code.=<<___;
391.align 16 229.align 16
392.Lcloop1: 230.Lcloop1:
393 add $TX[0]#b,$YY#b 231 add $TX[0]#b,$YY#b
394 movzb $YY#b,$YY#d
395 movzb ($dat,$YY),$TY#d 232 movzb ($dat,$YY),$TY#d
396 movb $TX[0]#b,($dat,$YY) 233 movb $TX[0]#b,($dat,$YY)
397 movb $TY#b,($dat,$XX[0]) 234 movb $TY#b,($dat,$XX[0])
@@ -423,16 +260,16 @@ $code.=<<___;
423 ret 260 ret
424.size RC4,.-RC4 261.size RC4,.-RC4
425___ 262___
426}
427 263
428$idx="%r8"; 264$idx="%r8";
429$ido="%r9"; 265$ido="%r9";
430 266
431$code.=<<___; 267$code.=<<___;
432.globl private_RC4_set_key 268.extern OPENSSL_ia32cap_P
433.type private_RC4_set_key,\@function,3 269.globl RC4_set_key
270.type RC4_set_key,\@function,3
434.align 16 271.align 16
435private_RC4_set_key: 272RC4_set_key:
436 lea 8($dat),$dat 273 lea 8($dat),$dat
437 lea ($inp,$len),$inp 274 lea ($inp,$len),$inp
438 neg $len 275 neg $len
@@ -442,10 +279,13 @@ private_RC4_set_key:
442 xor %r10,%r10 279 xor %r10,%r10
443 xor %r11,%r11 280 xor %r11,%r11
444 281
445 mov OPENSSL_ia32cap_P(%rip),$idx#d 282 mov PIC_GOT(OPENSSL_ia32cap_P),$idx#d
446 bt \$20,$idx#d # RC4_CHAR? 283 bt \$20,$idx#d
447 jc .Lc1stloop 284 jnc .Lw1stloop
448 jmp .Lw1stloop 285 bt \$30,$idx#d
286 setc $ido#b
287 mov $ido#d,260($dat)
288 jmp .Lc1stloop
449 289
450.align 16 290.align 16
451.Lw1stloop: 291.Lw1stloop:
@@ -499,29 +339,27 @@ private_RC4_set_key:
499 mov %eax,-8($dat) 339 mov %eax,-8($dat)
500 mov %eax,-4($dat) 340 mov %eax,-4($dat)
501 ret 341 ret
502.size private_RC4_set_key,.-private_RC4_set_key 342.size RC4_set_key,.-RC4_set_key
503 343
504.globl RC4_options 344.globl RC4_options
505.type RC4_options,\@abi-omnipotent 345.type RC4_options,\@abi-omnipotent
506.align 16 346.align 16
507RC4_options: 347RC4_options:
508 lea .Lopts(%rip),%rax 348 lea .Lopts(%rip),%rax
509 mov OPENSSL_ia32cap_P(%rip),%edx 349 mov PIC_GOT(OPENSSL_ia32cap_P),%edx
510 bt \$20,%edx 350 bt \$20,%edx
511 jc .L8xchar
512 bt \$30,%edx
513 jnc .Ldone 351 jnc .Ldone
514 add \$25,%rax
515 ret
516.L8xchar:
517 add \$12,%rax 352 add \$12,%rax
353 bt \$30,%edx
354 jnc .Ldone
355 add \$13,%rax
518.Ldone: 356.Ldone:
519 ret 357 ret
520.align 64 358.align 64
521.Lopts: 359.Lopts:
522.asciz "rc4(8x,int)" 360.asciz "rc4(8x,int)"
523.asciz "rc4(8x,char)" 361.asciz "rc4(8x,char)"
524.asciz "rc4(16x,int)" 362.asciz "rc4(1x,char)"
525.asciz "RC4 for x86_64, CRYPTOGAMS by <appro\@openssl.org>" 363.asciz "RC4 for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
526.align 64 364.align 64
527.size RC4_options,.-RC4_options 365.size RC4_options,.-RC4_options
@@ -644,32 +482,22 @@ key_se_handler:
644 .rva .LSEH_end_RC4 482 .rva .LSEH_end_RC4
645 .rva .LSEH_info_RC4 483 .rva .LSEH_info_RC4
646 484
647 .rva .LSEH_begin_private_RC4_set_key 485 .rva .LSEH_begin_RC4_set_key
648 .rva .LSEH_end_private_RC4_set_key 486 .rva .LSEH_end_RC4_set_key
649 .rva .LSEH_info_private_RC4_set_key 487 .rva .LSEH_info_RC4_set_key
650 488
651.section .xdata 489.section .xdata
652.align 8 490.align 8
653.LSEH_info_RC4: 491.LSEH_info_RC4:
654 .byte 9,0,0,0 492 .byte 9,0,0,0
655 .rva stream_se_handler 493 .rva stream_se_handler
656.LSEH_info_private_RC4_set_key: 494.LSEH_info_RC4_set_key:
657 .byte 9,0,0,0 495 .byte 9,0,0,0
658 .rva key_se_handler 496 .rva key_se_handler
659___ 497___
660} 498}
661 499
662sub reg_part { 500$code =~ s/#([bwd])/$1/gm;
663my ($reg,$conv)=@_;
664 if ($reg =~ /%r[0-9]+/) { $reg .= $conv; }
665 elsif ($conv eq "b") { $reg =~ s/%[er]([^x]+)x?/%$1l/; }
666 elsif ($conv eq "w") { $reg =~ s/%[er](.+)/%$1/; }
667 elsif ($conv eq "d") { $reg =~ s/%[er](.+)/%e$1/; }
668 return $reg;
669}
670
671$code =~ s/(%[a-z0-9]+)#([bwd])/reg_part($1,$2)/gem;
672$code =~ s/\`([^\`]*)\`/eval $1/gem;
673 501
674print $code; 502print $code;
675 503