summaryrefslogtreecommitdiff
path: root/src/lib/libcrypto/rc4
diff options
context:
space:
mode:
authordjm <>2008-09-06 12:15:56 +0000
committerdjm <>2008-09-06 12:15:56 +0000
commit12867252827c8efaa8ddd1fa3b3d6e321e2bcdef (patch)
treeb7a1f167ae5aeff4cfd8a18b598b68fe98a066fd /src/lib/libcrypto/rc4
parentf519f07de9bfb123f2b32aa3965e6f73c8364b80 (diff)
parent5a3c0a05c7f2c5d3c584b7c8d6aec836dd724c80 (diff)
downloadopenbsd-12867252827c8efaa8ddd1fa3b3d6e321e2bcdef.tar.gz
openbsd-12867252827c8efaa8ddd1fa3b3d6e321e2bcdef.tar.bz2
openbsd-12867252827c8efaa8ddd1fa3b3d6e321e2bcdef.zip
This commit was generated by cvs2git to track changes on a CVS vendor
branch.
Diffstat (limited to 'src/lib/libcrypto/rc4')
-rwxr-xr-xsrc/lib/libcrypto/rc4/asm/rc4-x86_64.pl286
1 files changed, 250 insertions, 36 deletions
diff --git a/src/lib/libcrypto/rc4/asm/rc4-x86_64.pl b/src/lib/libcrypto/rc4/asm/rc4-x86_64.pl
index b628daca70..2d47320485 100755
--- a/src/lib/libcrypto/rc4/asm/rc4-x86_64.pl
+++ b/src/lib/libcrypto/rc4/asm/rc4-x86_64.pl
@@ -2,29 +2,70 @@
2# 2#
3# ==================================================================== 3# ====================================================================
4# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL 4# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
5# project. Rights for redistribution and usage in source and binary 5# project. The module is, however, dual licensed under OpenSSL and
6# forms are granted according to the OpenSSL license. 6# CRYPTOGAMS licenses depending on where you obtain it. For further
7# details see http://www.openssl.org/~appro/cryptogams/.
7# ==================================================================== 8# ====================================================================
8# 9#
9# Unlike 0.9.7f this code expects RC4_CHAR back in config line! See 10# 2.22x RC4 tune-up:-) It should be noted though that my hand [as in
10# commentary section in corresponding script in development branch 11# "hand-coded assembler"] doesn't stand for the whole improvement
11# for background information about this option carousel. For those 12# coefficient. It turned out that eliminating RC4_CHAR from config
12# who don't have energy to figure out these gory details, here is 13# line results in ~40% improvement (yes, even for C implementation).
13# basis in form of performance matrix relative to the original 14# Presumably it has everything to do with AMD cache architecture and
14# 0.9.7e C code-base: 15# RAW or whatever penalties. Once again! The module *requires* config
15# 16# line *without* RC4_CHAR! As for coding "secret," I bet on partial
16# 0.9.7e 0.9.7f this 17# register arithmetics. For example instead of 'inc %r8; and $255,%r8'
17# AMD64 1x 3.3x 2.4x 18# I simply 'inc %r8b'. Even though optimization manual discourages
18# EM64T 1x 0.8x 1.5x 19# to operate on partial registers, it turned out to be the best bet.
19# 20# At least for AMD... How IA32E would perform remains to be seen...
20# In other words idea is to trade -25% AMD64 performance to compensate 21
21# for deterioration and gain +90% on EM64T core. Development branch 22# As was shown by Marc Bevand reordering of couple of load operations
22# maintains best performance for either target, i.e. 3.3x for AMD64 23# results in even higher performance gain of 3.3x:-) At least on
23# and 1.5x for EM64T. 24# Opteron... For reference, 1x in this case is RC4_CHAR C-code
25# compiled with gcc 3.3.2, which performs at ~54MBps per 1GHz clock.
26# Latter means that if you want to *estimate* what to expect from
27# *your* Opteron, then multiply 54 by 3.3 and clock frequency in GHz.
28
29# Intel P4 EM64T core was found to run the AMD64 code really slow...
30# The only way to achieve comparable performance on P4 was to keep
31# RC4_CHAR. Kind of ironic, huh? As it's apparently impossible to
32# compose blended code, which would perform even within 30% marginal
33# on either AMD and Intel platforms, I implement both cases. See
34# rc4_skey.c for further details...
35
36# P4 EM64T core appears to be "allergic" to 64-bit inc/dec. Replacing
37# those with add/sub results in 50% performance improvement of folded
38# loop...
39
40# As was shown by Zou Nanhai loop unrolling can improve Intel EM64T
41# performance by >30% [unlike P4 32-bit case that is]. But this is
42# provided that loads are reordered even more aggressively! Both code
43# pathes, AMD64 and EM64T, reorder loads in essentially same manner
44# as my IA-64 implementation. On Opteron this resulted in modest 5%
45# improvement [I had to test it], while final Intel P4 performance
46# achieves respectful 432MBps on 2.8GHz processor now. For reference.
47# If executed on Xeon, current RC4_CHAR code-path is 2.7x faster than
48# RC4_INT code-path. While if executed on Opteron, it's only 25%
49# slower than the RC4_INT one [meaning that if CPU µ-arch detection
50# is not implemented, then this final RC4_CHAR code-path should be
51# preferred, as it provides better *all-round* performance].
52
53# Intel Core2 was observed to perform poorly on both code paths:-( It
54# apparently suffers from some kind of partial register stall, which
55# occurs in 64-bit mode only [as virtually identical 32-bit loop was
56# observed to outperform 64-bit one by almost 50%]. Adding two movzb to
57# cloop1 boosts its performance by 80%! This loop appears to be optimal
58# fit for Core2 and therefore the code was modified to skip cloop8 on
59# this CPU.
24 60
25$output=shift; 61$output=shift;
26 62
27open STDOUT,">$output" || die "can't open $output: $!"; 63$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
64( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
65( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
66die "can't locate x86_64-xlate.pl";
67
68open STDOUT,"| $^X $xlate $output";
28 69
29$dat="%rdi"; # arg1 70$dat="%rdi"; # arg1
30$len="%rsi"; # arg2 71$len="%rsi"; # arg2
@@ -36,29 +77,101 @@ $out="%rcx"; # arg4
36$YY="%r12"; 77$YY="%r12";
37$TY="%r13"; 78$TY="%r13";
38 79
39$code=<<___;; 80$code=<<___;
40.text 81.text
41 82
42.globl RC4 83.globl RC4
43.type RC4,\@function 84.type RC4,\@function,4
44.align 16 85.align 16
45RC4: or $len,$len 86RC4: or $len,$len
46 jne .Lentry 87 jne .Lentry
47 repret 88 ret
48.Lentry: 89.Lentry:
49 push %r12 90 push %r12
50 push %r13 91 push %r13
51 92
52 add \$2,$dat 93 add \$8,$dat
53 movzb -2($dat),$XX[0]#d 94 movl -8($dat),$XX[0]#d
54 movzb -1($dat),$YY#d 95 movl -4($dat),$YY#d
96 cmpl \$-1,256($dat)
97 je .LRC4_CHAR
98 inc $XX[0]#b
99 movl ($dat,$XX[0],4),$TX[0]#d
100 test \$-8,$len
101 jz .Lloop1
102 jmp .Lloop8
103.align 16
104.Lloop8:
105___
106for ($i=0;$i<8;$i++) {
107$code.=<<___;
108 add $TX[0]#b,$YY#b
109 mov $XX[0],$XX[1]
110 movl ($dat,$YY,4),$TY#d
111 ror \$8,%rax # ror is redundant when $i=0
112 inc $XX[1]#b
113 movl ($dat,$XX[1],4),$TX[1]#d
114 cmp $XX[1],$YY
115 movl $TX[0]#d,($dat,$YY,4)
116 cmove $TX[0],$TX[1]
117 movl $TY#d,($dat,$XX[0],4)
118 add $TX[0]#b,$TY#b
119 movb ($dat,$TY,4),%al
120___
121push(@TX,shift(@TX)); push(@XX,shift(@XX)); # "rotate" registers
122}
123$code.=<<___;
124 ror \$8,%rax
125 sub \$8,$len
126
127 xor ($inp),%rax
128 add \$8,$inp
129 mov %rax,($out)
130 add \$8,$out
55 131
132 test \$-8,$len
133 jnz .Lloop8
134 cmp \$0,$len
135 jne .Lloop1
136___
137$code.=<<___;
138.Lexit:
139 sub \$1,$XX[0]#b
140 movl $XX[0]#d,-8($dat)
141 movl $YY#d,-4($dat)
142
143 pop %r13
144 pop %r12
145 ret
146.align 16
147.Lloop1:
148 add $TX[0]#b,$YY#b
149 movl ($dat,$YY,4),$TY#d
150 movl $TX[0]#d,($dat,$YY,4)
151 movl $TY#d,($dat,$XX[0],4)
152 add $TY#b,$TX[0]#b
153 inc $XX[0]#b
154 movl ($dat,$TX[0],4),$TY#d
155 movl ($dat,$XX[0],4),$TX[0]#d
156 xorb ($inp),$TY#b
157 inc $inp
158 movb $TY#b,($out)
159 inc $out
160 dec $len
161 jnz .Lloop1
162 jmp .Lexit
163
164.align 16
165.LRC4_CHAR:
56 add \$1,$XX[0]#b 166 add \$1,$XX[0]#b
57 movzb ($dat,$XX[0]),$TX[0]#d 167 movzb ($dat,$XX[0]),$TX[0]#d
58 test \$-8,$len 168 test \$-8,$len
59 jz .Lcloop1 169 jz .Lcloop1
170 cmp \$0,260($dat)
171 jnz .Lcloop1
60 push %rbx 172 push %rbx
61.align 16 # incidentally aligned already 173 jmp .Lcloop8
174.align 16
62.Lcloop8: 175.Lcloop8:
63 mov ($inp),%eax 176 mov ($inp),%eax
64 mov 4($inp),%ebx 177 mov 4($inp),%ebx
@@ -114,15 +227,9 @@ $code.=<<___;
114 pop %rbx 227 pop %rbx
115 cmp \$0,$len 228 cmp \$0,$len
116 jne .Lcloop1 229 jne .Lcloop1
117.Lexit: 230 jmp .Lexit
118 sub \$1,$XX[0]#b 231___
119 movb $XX[0]#b,-2($dat) 232$code.=<<___;
120 movb $YY#b,-1($dat)
121
122 pop %r13
123 pop %r12
124 repret
125
126.align 16 233.align 16
127.Lcloop1: 234.Lcloop1:
128 add $TX[0]#b,$YY#b 235 add $TX[0]#b,$YY#b
@@ -131,6 +238,8 @@ $code.=<<___;
131 movb $TY#b,($dat,$XX[0]) 238 movb $TY#b,($dat,$XX[0])
132 add $TX[0]#b,$TY#b 239 add $TX[0]#b,$TY#b
133 add \$1,$XX[0]#b 240 add \$1,$XX[0]#b
241 movzb $TY#b,$TY#d
242 movzb $XX[0]#b,$XX[0]#d
134 movzb ($dat,$TY),$TY#d 243 movzb ($dat,$TY),$TY#d
135 movzb ($dat,$XX[0]),$TX[0]#d 244 movzb ($dat,$XX[0]),$TX[0]#d
136 xorb ($inp),$TY#b 245 xorb ($inp),$TY#b
@@ -143,8 +252,113 @@ $code.=<<___;
143.size RC4,.-RC4 252.size RC4,.-RC4
144___ 253___
145 254
146$code =~ s/#([bwd])/$1/gm; 255$idx="%r8";
256$ido="%r9";
257
258$code.=<<___;
259.extern OPENSSL_ia32cap_P
260.globl RC4_set_key
261.type RC4_set_key,\@function,3
262.align 16
263RC4_set_key:
264 lea 8($dat),$dat
265 lea ($inp,$len),$inp
266 neg $len
267 mov $len,%rcx
268 xor %eax,%eax
269 xor $ido,$ido
270 xor %r10,%r10
271 xor %r11,%r11
147 272
148$code =~ s/repret/.byte\t0xF3,0xC3/gm; 273 mov OPENSSL_ia32cap_P(%rip),$idx#d
274 bt \$20,$idx#d
275 jnc .Lw1stloop
276 bt \$30,$idx#d
277 setc $ido#b
278 mov $ido#d,260($dat)
279 jmp .Lc1stloop
280
281.align 16
282.Lw1stloop:
283 mov %eax,($dat,%rax,4)
284 add \$1,%al
285 jnc .Lw1stloop
286
287 xor $ido,$ido
288 xor $idx,$idx
289.align 16
290.Lw2ndloop:
291 mov ($dat,$ido,4),%r10d
292 add ($inp,$len,1),$idx#b
293 add %r10b,$idx#b
294 add \$1,$len
295 mov ($dat,$idx,4),%r11d
296 cmovz %rcx,$len
297 mov %r10d,($dat,$idx,4)
298 mov %r11d,($dat,$ido,4)
299 add \$1,$ido#b
300 jnc .Lw2ndloop
301 jmp .Lexit_key
302
303.align 16
304.Lc1stloop:
305 mov %al,($dat,%rax)
306 add \$1,%al
307 jnc .Lc1stloop
308
309 xor $ido,$ido
310 xor $idx,$idx
311.align 16
312.Lc2ndloop:
313 mov ($dat,$ido),%r10b
314 add ($inp,$len),$idx#b
315 add %r10b,$idx#b
316 add \$1,$len
317 mov ($dat,$idx),%r11b
318 jnz .Lcnowrap
319 mov %rcx,$len
320.Lcnowrap:
321 mov %r10b,($dat,$idx)
322 mov %r11b,($dat,$ido)
323 add \$1,$ido#b
324 jnc .Lc2ndloop
325 movl \$-1,256($dat)
326
327.align 16
328.Lexit_key:
329 xor %eax,%eax
330 mov %eax,-8($dat)
331 mov %eax,-4($dat)
332 ret
333.size RC4_set_key,.-RC4_set_key
334
335.globl RC4_options
336.type RC4_options,\@function,0
337.align 16
338RC4_options:
339 .picmeup %rax
340 lea .Lopts-.(%rax),%rax
341 mov OPENSSL_ia32cap_P(%rip),%edx
342 bt \$20,%edx
343 jnc .Ldone
344 add \$12,%rax
345 bt \$30,%edx
346 jnc .Ldone
347 add \$13,%rax
348.Ldone:
349 ret
350.align 64
351.Lopts:
352.asciz "rc4(8x,int)"
353.asciz "rc4(8x,char)"
354.asciz "rc4(1x,char)"
355.asciz "RC4 for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
356.align 64
357.size RC4_options,.-RC4_options
358___
359
360$code =~ s/#([bwd])/$1/gm;
149 361
150print $code; 362print $code;
363
364close STDOUT;