summaryrefslogtreecommitdiff
path: root/src/lib/libcrypto/rc4/asm/rc4-x86_64.pl
diff options
context:
space:
mode:
Diffstat (limited to 'src/lib/libcrypto/rc4/asm/rc4-x86_64.pl')
-rwxr-xr-xsrc/lib/libcrypto/rc4/asm/rc4-x86_64.pl363
1 files changed, 363 insertions, 0 deletions
diff --git a/src/lib/libcrypto/rc4/asm/rc4-x86_64.pl b/src/lib/libcrypto/rc4/asm/rc4-x86_64.pl
new file mode 100755
index 0000000000..92c52f3433
--- /dev/null
+++ b/src/lib/libcrypto/rc4/asm/rc4-x86_64.pl
@@ -0,0 +1,363 @@
1#!/usr/bin/env perl
2#
3# ====================================================================
4# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
5# project. The module is, however, dual licensed under OpenSSL and
6# CRYPTOGAMS licenses depending on where you obtain it. For further
7# details see http://www.openssl.org/~appro/cryptogams/.
8# ====================================================================
9#
10# 2.22x RC4 tune-up:-) It should be noted though that my hand [as in
11# "hand-coded assembler"] doesn't stand for the whole improvement
12# coefficient. It turned out that eliminating RC4_CHAR from config
13# line results in ~40% improvement (yes, even for C implementation).
14# Presumably it has everything to do with AMD cache architecture and
15# RAW or whatever penalties. Once again! The module *requires* config
16# line *without* RC4_CHAR! As for coding "secret," I bet on partial
17# register arithmetics. For example instead of 'inc %r8; and $255,%r8'
18# I simply 'inc %r8b'. Even though optimization manual discourages
19# to operate on partial registers, it turned out to be the best bet.
20# At least for AMD... How IA32E would perform remains to be seen...
21
22# As was shown by Marc Bevand reordering of couple of load operations
23# results in even higher performance gain of 3.3x:-) At least on
24# Opteron... For reference, 1x in this case is RC4_CHAR C-code
25# compiled with gcc 3.3.2, which performs at ~54MBps per 1GHz clock.
26# Latter means that if you want to *estimate* what to expect from
27# *your* Opteron, then multiply 54 by 3.3 and clock frequency in GHz.
28
29# Intel P4 EM64T core was found to run the AMD64 code really slow...
30# The only way to achieve comparable performance on P4 was to keep
31# RC4_CHAR. Kind of ironic, huh? As it's apparently impossible to
32# compose blended code, which would perform even within 30% marginal
33# on either AMD and Intel platforms, I implement both cases. See
34# rc4_skey.c for further details...
35
36# P4 EM64T core appears to be "allergic" to 64-bit inc/dec. Replacing
37# those with add/sub results in 50% performance improvement of folded
38# loop...
39
40# As was shown by Zou Nanhai loop unrolling can improve Intel EM64T
41# performance by >30% [unlike P4 32-bit case that is]. But this is
42# provided that loads are reordered even more aggressively! Both code
43# pathes, AMD64 and EM64T, reorder loads in essentially same manner
44# as my IA-64 implementation. On Opteron this resulted in modest 5%
45# improvement [I had to test it], while final Intel P4 performance
46# achieves respectful 432MBps on 2.8GHz processor now. For reference.
47# If executed on Xeon, current RC4_CHAR code-path is 2.7x faster than
48# RC4_INT code-path. While if executed on Opteron, it's only 25%
49# slower than the RC4_INT one [meaning that if CPU µ-arch detection
50# is not implemented, then this final RC4_CHAR code-path should be
51# preferred, as it provides better *all-round* performance].
52
53# Intel Core2 was observed to perform poorly on both code paths:-( It
54# apparently suffers from some kind of partial register stall, which
55# occurs in 64-bit mode only [as virtually identical 32-bit loop was
56# observed to outperform 64-bit one by almost 50%]. Adding two movzb to
57# cloop1 boosts its performance by 80%! This loop appears to be optimal
58# fit for Core2 and therefore the code was modified to skip cloop8 on
59# this CPU.
60
61$output=shift;
62
63$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
64( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
65( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
66die "can't locate x86_64-xlate.pl";
67
68open STDOUT,"| $^X $xlate $output";
69
70$dat="%rdi"; # arg1
71$len="%rsi"; # arg2
72$inp="%rdx"; # arg3
73$out="%rcx"; # arg4
74
75@XX=("%r8","%r10");
76@TX=("%r9","%r11");
77$YY="%r12";
78$TY="%r13";
79
80$code=<<___;
81.text
82
83.globl RC4
84.type RC4,\@function,4
85.align 16
86RC4: or $len,$len
87 jne .Lentry
88 ret
89.Lentry:
90 push %r12
91 push %r13
92
93 add \$8,$dat
94 movl -8($dat),$XX[0]#d
95 movl -4($dat),$YY#d
96 cmpl \$-1,256($dat)
97 je .LRC4_CHAR
98 inc $XX[0]#b
99 movl ($dat,$XX[0],4),$TX[0]#d
100 test \$-8,$len
101 jz .Lloop1
102 jmp .Lloop8
103.align 16
104.Lloop8:
105___
106for ($i=0;$i<8;$i++) {
107$code.=<<___;
108 add $TX[0]#b,$YY#b
109 mov $XX[0],$XX[1]
110 movl ($dat,$YY,4),$TY#d
111 ror \$8,%rax # ror is redundant when $i=0
112 inc $XX[1]#b
113 movl ($dat,$XX[1],4),$TX[1]#d
114 cmp $XX[1],$YY
115 movl $TX[0]#d,($dat,$YY,4)
116 cmove $TX[0],$TX[1]
117 movl $TY#d,($dat,$XX[0],4)
118 add $TX[0]#b,$TY#b
119 movb ($dat,$TY,4),%al
120___
121push(@TX,shift(@TX)); push(@XX,shift(@XX)); # "rotate" registers
122}
123$code.=<<___;
124 ror \$8,%rax
125 sub \$8,$len
126
127 xor ($inp),%rax
128 add \$8,$inp
129 mov %rax,($out)
130 add \$8,$out
131
132 test \$-8,$len
133 jnz .Lloop8
134 cmp \$0,$len
135 jne .Lloop1
136___
137$code.=<<___;
138.Lexit:
139 sub \$1,$XX[0]#b
140 movl $XX[0]#d,-8($dat)
141 movl $YY#d,-4($dat)
142
143 pop %r13
144 pop %r12
145 ret
146.align 16
147.Lloop1:
148 add $TX[0]#b,$YY#b
149 movl ($dat,$YY,4),$TY#d
150 movl $TX[0]#d,($dat,$YY,4)
151 movl $TY#d,($dat,$XX[0],4)
152 add $TY#b,$TX[0]#b
153 inc $XX[0]#b
154 movl ($dat,$TX[0],4),$TY#d
155 movl ($dat,$XX[0],4),$TX[0]#d
156 xorb ($inp),$TY#b
157 inc $inp
158 movb $TY#b,($out)
159 inc $out
160 dec $len
161 jnz .Lloop1
162 jmp .Lexit
163
164.align 16
165.LRC4_CHAR:
166 add \$1,$XX[0]#b
167 movzb ($dat,$XX[0]),$TX[0]#d
168 test \$-8,$len
169 jz .Lcloop1
170 cmp \$0,260($dat)
171 jnz .Lcloop1
172 push %rbx
173 jmp .Lcloop8
174.align 16
175.Lcloop8:
176 mov ($inp),%eax
177 mov 4($inp),%ebx
178___
179# unroll 2x4-wise, because 64-bit rotates kill Intel P4...
180for ($i=0;$i<4;$i++) {
181$code.=<<___;
182 add $TX[0]#b,$YY#b
183 lea 1($XX[0]),$XX[1]
184 movzb ($dat,$YY),$TY#d
185 movzb $XX[1]#b,$XX[1]#d
186 movzb ($dat,$XX[1]),$TX[1]#d
187 movb $TX[0]#b,($dat,$YY)
188 cmp $XX[1],$YY
189 movb $TY#b,($dat,$XX[0])
190 jne .Lcmov$i # Intel cmov is sloooow...
191 mov $TX[0],$TX[1]
192.Lcmov$i:
193 add $TX[0]#b,$TY#b
194 xor ($dat,$TY),%al
195 ror \$8,%eax
196___
197push(@TX,shift(@TX)); push(@XX,shift(@XX)); # "rotate" registers
198}
199for ($i=4;$i<8;$i++) {
200$code.=<<___;
201 add $TX[0]#b,$YY#b
202 lea 1($XX[0]),$XX[1]
203 movzb ($dat,$YY),$TY#d
204 movzb $XX[1]#b,$XX[1]#d
205 movzb ($dat,$XX[1]),$TX[1]#d
206 movb $TX[0]#b,($dat,$YY)
207 cmp $XX[1],$YY
208 movb $TY#b,($dat,$XX[0])
209 jne .Lcmov$i # Intel cmov is sloooow...
210 mov $TX[0],$TX[1]
211.Lcmov$i:
212 add $TX[0]#b,$TY#b
213 xor ($dat,$TY),%bl
214 ror \$8,%ebx
215___
216push(@TX,shift(@TX)); push(@XX,shift(@XX)); # "rotate" registers
217}
218$code.=<<___;
219 lea -8($len),$len
220 mov %eax,($out)
221 lea 8($inp),$inp
222 mov %ebx,4($out)
223 lea 8($out),$out
224
225 test \$-8,$len
226 jnz .Lcloop8
227 pop %rbx
228 cmp \$0,$len
229 jne .Lcloop1
230 jmp .Lexit
231___
232$code.=<<___;
233.align 16
234.Lcloop1:
235 add $TX[0]#b,$YY#b
236 movzb ($dat,$YY),$TY#d
237 movb $TX[0]#b,($dat,$YY)
238 movb $TY#b,($dat,$XX[0])
239 add $TX[0]#b,$TY#b
240 add \$1,$XX[0]#b
241 movzb $TY#b,$TY#d
242 movzb $XX[0]#b,$XX[0]#d
243 movzb ($dat,$TY),$TY#d
244 movzb ($dat,$XX[0]),$TX[0]#d
245 xorb ($inp),$TY#b
246 lea 1($inp),$inp
247 movb $TY#b,($out)
248 lea 1($out),$out
249 sub \$1,$len
250 jnz .Lcloop1
251 jmp .Lexit
252.size RC4,.-RC4
253___
254
255$idx="%r8";
256$ido="%r9";
257
258$code.=<<___;
259.extern OPENSSL_ia32cap_P
260.globl RC4_set_key
261.type RC4_set_key,\@function,3
262.align 16
263RC4_set_key:
264 lea 8($dat),$dat
265 lea ($inp,$len),$inp
266 neg $len
267 mov $len,%rcx
268 xor %eax,%eax
269 xor $ido,$ido
270 xor %r10,%r10
271 xor %r11,%r11
272 mov PIC_GOT(OPENSSL_ia32cap_P),$idx#d
273 bt \$20,$idx#d
274 jnc .Lw1stloop
275 bt \$30,$idx#d
276 setc $ido#b
277 mov $ido#d,260($dat)
278 jmp .Lc1stloop
279
280.align 16
281.Lw1stloop:
282 mov %eax,($dat,%rax,4)
283 add \$1,%al
284 jnc .Lw1stloop
285
286 xor $ido,$ido
287 xor $idx,$idx
288.align 16
289.Lw2ndloop:
290 mov ($dat,$ido,4),%r10d
291 add ($inp,$len,1),$idx#b
292 add %r10b,$idx#b
293 add \$1,$len
294 mov ($dat,$idx,4),%r11d
295 cmovz %rcx,$len
296 mov %r10d,($dat,$idx,4)
297 mov %r11d,($dat,$ido,4)
298 add \$1,$ido#b
299 jnc .Lw2ndloop
300 jmp .Lexit_key
301
302.align 16
303.Lc1stloop:
304 mov %al,($dat,%rax)
305 add \$1,%al
306 jnc .Lc1stloop
307
308 xor $ido,$ido
309 xor $idx,$idx
310.align 16
311.Lc2ndloop:
312 mov ($dat,$ido),%r10b
313 add ($inp,$len),$idx#b
314 add %r10b,$idx#b
315 add \$1,$len
316 mov ($dat,$idx),%r11b
317 jnz .Lcnowrap
318 mov %rcx,$len
319.Lcnowrap:
320 mov %r10b,($dat,$idx)
321 mov %r11b,($dat,$ido)
322 add \$1,$ido#b
323 jnc .Lc2ndloop
324 movl \$-1,256($dat)
325
326.align 16
327.Lexit_key:
328 xor %eax,%eax
329 mov %eax,-8($dat)
330 mov %eax,-4($dat)
331 ret
332.size RC4_set_key,.-RC4_set_key
333
334.globl RC4_options
335.type RC4_options,\@function,0
336.align 16
337RC4_options:
338 .picmeup %rax
339 lea .Lopts-.(%rax),%rax
340 mov PIC_GOT(OPENSSL_ia32cap_P),%edx
341 bt \$20,%edx
342 jnc .Ldone
343 add \$12,%rax
344 bt \$30,%edx
345 jnc .Ldone
346 add \$13,%rax
347.Ldone:
348 ret
349.align 64
350.Lopts:
351.asciz "rc4(8x,int)"
352.asciz "rc4(8x,char)"
353.asciz "rc4(1x,char)"
354.asciz "RC4 for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
355.align 64
356.size RC4_options,.-RC4_options
357___
358
359$code =~ s/#([bwd])/$1/gm;
360
361print $code;
362
363close STDOUT;