summaryrefslogtreecommitdiff
path: root/src/lib/libcrypto/rc4/asm
diff options
context:
space:
mode:
Diffstat (limited to 'src/lib/libcrypto/rc4/asm')
-rw-r--r--src/lib/libcrypto/rc4/asm/rc4-586.pl230
-rwxr-xr-xsrc/lib/libcrypto/rc4/asm/rc4-x86_64.pl365
2 files changed, 0 insertions, 595 deletions
diff --git a/src/lib/libcrypto/rc4/asm/rc4-586.pl b/src/lib/libcrypto/rc4/asm/rc4-586.pl
deleted file mode 100644
index ef7eee766c..0000000000
--- a/src/lib/libcrypto/rc4/asm/rc4-586.pl
+++ /dev/null
@@ -1,230 +0,0 @@
1#!/usr/local/bin/perl
2
3# At some point it became apparent that the original SSLeay RC4
4# assembler implementation performs suboptimaly on latest IA-32
5# microarchitectures. After re-tuning performance has changed as
6# following:
7#
8# Pentium +0%
9# Pentium III +17%
10# AMD +52%(*)
11# P4 +180%(**)
12#
13# (*) This number is actually a trade-off:-) It's possible to
14# achieve +72%, but at the cost of -48% off PIII performance.
15# In other words code performing further 13% faster on AMD
16# would perform almost 2 times slower on Intel PIII...
17# For reference! This code delivers ~80% of rc4-amd64.pl
18# performance on the same Opteron machine.
19# (**) This number requires compressed key schedule set up by
20# RC4_set_key and therefore doesn't apply to 0.9.7 [option for
21# compressed key schedule is implemented in 0.9.8 and later,
22# see commentary section in rc4_skey.c for further details].
23#
24# <appro@fy.chalmers.se>
25
26push(@INC,"perlasm","../../perlasm");
27require "x86asm.pl";
28
29&asm_init($ARGV[0],"rc4-586.pl");
30
31$x="eax";
32$y="ebx";
33$tx="ecx";
34$ty="edx";
35$in="esi";
36$out="edi";
37$d="ebp";
38
39&RC4("RC4");
40
41&asm_finish();
42
43sub RC4_loop
44 {
45 local($n,$p,$char)=@_;
46
47 &comment("Round $n");
48
49 if ($char)
50 {
51 if ($p >= 0)
52 {
53 &mov($ty, &swtmp(2));
54 &cmp($ty, $in);
55 &jbe(&label("finished"));
56 &inc($in);
57 }
58 else
59 {
60 &add($ty, 8);
61 &inc($in);
62 &cmp($ty, $in);
63 &jb(&label("finished"));
64 &mov(&swtmp(2), $ty);
65 }
66 }
67 # Moved out
68 # &mov( $tx, &DWP(0,$d,$x,4)) if $p < 0;
69
70 &add( &LB($y), &LB($tx));
71 &mov( $ty, &DWP(0,$d,$y,4));
72 # XXX
73 &mov( &DWP(0,$d,$x,4),$ty);
74 &add( $ty, $tx);
75 &mov( &DWP(0,$d,$y,4),$tx);
76 &and( $ty, 0xff);
77 &inc( &LB($x)); # NEXT ROUND
78 &mov( $tx, &DWP(0,$d,$x,4)) if $p < 1; # NEXT ROUND
79 &mov( $ty, &DWP(0,$d,$ty,4));
80
81 if (!$char)
82 {
83 #moved up into last round
84 if ($p >= 1)
85 {
86 &add( $out, 8)
87 }
88 &movb( &BP($n,"esp","",0), &LB($ty));
89 }
90 else
91 {
92 # Note in+=8 has occured
93 &movb( &HB($ty), &BP(-1,$in,"",0));
94 # XXX
95 &xorb(&LB($ty), &HB($ty));
96 # XXX
97 &movb(&BP($n,$out,"",0),&LB($ty));
98 }
99 }
100
101
102sub RC4
103 {
104 local($name)=@_;
105
106 &function_begin_B($name,"");
107
108 &mov($ty,&wparam(1)); # len
109 &cmp($ty,0);
110 &jne(&label("proceed"));
111 &ret();
112 &set_label("proceed");
113
114 &comment("");
115
116 &push("ebp");
117 &push("ebx");
118 &push("esi");
119 &xor( $x, $x); # avoid partial register stalls
120 &push("edi");
121 &xor( $y, $y); # avoid partial register stalls
122 &mov( $d, &wparam(0)); # key
123 &mov( $in, &wparam(2));
124
125 &movb( &LB($x), &BP(0,$d,"",1));
126 &movb( &LB($y), &BP(4,$d,"",1));
127
128 &mov( $out, &wparam(3));
129 &inc( &LB($x));
130
131 &stack_push(3); # 3 temp variables
132 &add( $d, 8);
133
134 # detect compressed schedule, see commentary section in rc4_skey.c...
135 # in 0.9.7 context ~50 bytes below RC4_CHAR label remain redundant,
136 # as compressed key schedule is set up in 0.9.8 and later.
137 &cmp(&DWP(256,$d),-1);
138 &je(&label("RC4_CHAR"));
139
140 &lea( $ty, &DWP(-8,$ty,$in));
141
142 # check for 0 length input
143
144 &mov( &swtmp(2), $ty); # this is now address to exit at
145 &mov( $tx, &DWP(0,$d,$x,4));
146
147 &cmp( $ty, $in);
148 &jb( &label("end")); # less than 8 bytes
149
150 &set_label("start");
151
152 # filling DELAY SLOT
153 &add( $in, 8);
154
155 &RC4_loop(0,-1,0);
156 &RC4_loop(1,0,0);
157 &RC4_loop(2,0,0);
158 &RC4_loop(3,0,0);
159 &RC4_loop(4,0,0);
160 &RC4_loop(5,0,0);
161 &RC4_loop(6,0,0);
162 &RC4_loop(7,1,0);
163
164 &comment("apply the cipher text");
165 # xor the cipher data with input
166
167 #&add( $out, 8); #moved up into last round
168
169 &mov( $tx, &swtmp(0));
170 &mov( $ty, &DWP(-8,$in,"",0));
171 &xor( $tx, $ty);
172 &mov( $ty, &DWP(-4,$in,"",0));
173 &mov( &DWP(-8,$out,"",0), $tx);
174 &mov( $tx, &swtmp(1));
175 &xor( $tx, $ty);
176 &mov( $ty, &swtmp(2)); # load end ptr;
177 &mov( &DWP(-4,$out,"",0), $tx);
178 &mov( $tx, &DWP(0,$d,$x,4));
179 &cmp($in, $ty);
180 &jbe(&label("start"));
181
182 &set_label("end");
183
184 # There is quite a bit of extra crap in RC4_loop() for this
185 # first round
186 &RC4_loop(0,-1,1);
187 &RC4_loop(1,0,1);
188 &RC4_loop(2,0,1);
189 &RC4_loop(3,0,1);
190 &RC4_loop(4,0,1);
191 &RC4_loop(5,0,1);
192 &RC4_loop(6,1,1);
193
194 &jmp(&label("finished"));
195
196 &align(16);
197 # this is essentially Intel P4 specific codepath, see rc4_skey.c,
198 # and is engaged in 0.9.8 and later context...
199 &set_label("RC4_CHAR");
200
201 &lea ($ty,&DWP(0,$in,$ty));
202 &mov (&swtmp(2),$ty);
203 &movz ($tx,&BP(0,$d,$x));
204
205 # strangely enough unrolled loop performs over 20% slower...
206 &set_label("RC4_CHAR_loop");
207 &add (&LB($y),&LB($tx));
208 &movz ($ty,&BP(0,$d,$y));
209 &movb (&BP(0,$d,$y),&LB($tx));
210 &movb (&BP(0,$d,$x),&LB($ty));
211 &add (&LB($ty),&LB($tx));
212 &movz ($ty,&BP(0,$d,$ty));
213 &add (&LB($x),1);
214 &xorb (&LB($ty),&BP(0,$in));
215 &lea ($in,&DWP(1,$in));
216 &movz ($tx,&BP(0,$d,$x));
217 &cmp ($in,&swtmp(2));
218 &movb (&BP(0,$out),&LB($ty));
219 &lea ($out,&DWP(1,$out));
220 &jb (&label("RC4_CHAR_loop"));
221
222 &set_label("finished");
223 &dec( $x);
224 &stack_pop(3);
225 &movb( &BP(-4,$d,"",0),&LB($y));
226 &movb( &BP(-8,$d,"",0),&LB($x));
227
228 &function_end($name);
229 }
230
diff --git a/src/lib/libcrypto/rc4/asm/rc4-x86_64.pl b/src/lib/libcrypto/rc4/asm/rc4-x86_64.pl
deleted file mode 100755
index 3a54623495..0000000000
--- a/src/lib/libcrypto/rc4/asm/rc4-x86_64.pl
+++ /dev/null
@@ -1,365 +0,0 @@
1#!/usr/bin/env perl
2#
3# ====================================================================
4# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
5# project. The module is, however, dual licensed under OpenSSL and
6# CRYPTOGAMS licenses depending on where you obtain it. For further
7# details see http://www.openssl.org/~appro/cryptogams/.
8# ====================================================================
9#
10# 2.22x RC4 tune-up:-) It should be noted though that my hand [as in
11# "hand-coded assembler"] doesn't stand for the whole improvement
12# coefficient. It turned out that eliminating RC4_CHAR from config
13# line results in ~40% improvement (yes, even for C implementation).
14# Presumably it has everything to do with AMD cache architecture and
15# RAW or whatever penalties. Once again! The module *requires* config
16# line *without* RC4_CHAR! As for coding "secret," I bet on partial
17# register arithmetics. For example instead of 'inc %r8; and $255,%r8'
18# I simply 'inc %r8b'. Even though optimization manual discourages
19# to operate on partial registers, it turned out to be the best bet.
20# At least for AMD... How IA32E would perform remains to be seen...
21
22# As was shown by Marc Bevand reordering of couple of load operations
23# results in even higher performance gain of 3.3x:-) At least on
24# Opteron... For reference, 1x in this case is RC4_CHAR C-code
25# compiled with gcc 3.3.2, which performs at ~54MBps per 1GHz clock.
26# Latter means that if you want to *estimate* what to expect from
27# *your* Opteron, then multiply 54 by 3.3 and clock frequency in GHz.
28
29# Intel P4 EM64T core was found to run the AMD64 code really slow...
30# The only way to achieve comparable performance on P4 was to keep
31# RC4_CHAR. Kind of ironic, huh? As it's apparently impossible to
32# compose blended code, which would perform even within 30% marginal
33# on either AMD and Intel platforms, I implement both cases. See
34# rc4_skey.c for further details...
35
36# P4 EM64T core appears to be "allergic" to 64-bit inc/dec. Replacing
37# those with add/sub results in 50% performance improvement of folded
38# loop...
39
40# As was shown by Zou Nanhai loop unrolling can improve Intel EM64T
41# performance by >30% [unlike P4 32-bit case that is]. But this is
42# provided that loads are reordered even more aggressively! Both code
43# pathes, AMD64 and EM64T, reorder loads in essentially same manner
44# as my IA-64 implementation. On Opteron this resulted in modest 5%
45# improvement [I had to test it], while final Intel P4 performance
46# achieves respectful 432MBps on 2.8GHz processor now. For reference.
47# If executed on Xeon, current RC4_CHAR code-path is 2.7x faster than
48# RC4_INT code-path. While if executed on Opteron, it's only 25%
49# slower than the RC4_INT one [meaning that if CPU µ-arch detection
50# is not implemented, then this final RC4_CHAR code-path should be
51# preferred, as it provides better *all-round* performance].
52
53# Intel Core2 was observed to perform poorly on both code paths:-( It
54# apparently suffers from some kind of partial register stall, which
55# occurs in 64-bit mode only [as virtually identical 32-bit loop was
56# observed to outperform 64-bit one by almost 50%]. Adding two movzb to
57# cloop1 boosts its performance by 80%! This loop appears to be optimal
58# fit for Core2 and therefore the code was modified to skip cloop8 on
59# this CPU.
60
61$output=shift;
62
63$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
64( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
65( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
66die "can't locate x86_64-xlate.pl";
67
68open STDOUT,"| $^X $xlate $output";
69
70$dat="%rdi"; # arg1
71$len="%rsi"; # arg2
72$inp="%rdx"; # arg3
73$out="%rcx"; # arg4
74
75@XX=("%r8","%r10");
76@TX=("%r9","%r11");
77$YY="%r12";
78$TY="%r13";
79
80$code=<<___;
81.text
82
83.globl RC4
84.type RC4,\@function,4
85.align 16
86RC4: or $len,$len
87 jne .Lentry
88 ret
89.Lentry:
90 push %r12
91 push %r13
92
93 add \$8,$dat
94 movl -8($dat),$XX[0]#d
95 movl -4($dat),$YY#d
96 cmpl \$-1,256($dat)
97 je .LRC4_CHAR
98 inc $XX[0]#b
99 movl ($dat,$XX[0],4),$TX[0]#d
100 test \$-8,$len
101 jz .Lloop1
102 jmp .Lloop8
103.align 16
104.Lloop8:
105___
106for ($i=0;$i<8;$i++) {
107$code.=<<___;
108 add $TX[0]#b,$YY#b
109 mov $XX[0],$XX[1]
110 movl ($dat,$YY,4),$TY#d
111 ror \$8,%rax # ror is redundant when $i=0
112 inc $XX[1]#b
113 movl ($dat,$XX[1],4),$TX[1]#d
114 cmp $XX[1],$YY
115 movl $TX[0]#d,($dat,$YY,4)
116 cmove $TX[0],$TX[1]
117 movl $TY#d,($dat,$XX[0],4)
118 add $TX[0]#b,$TY#b
119 movb ($dat,$TY,4),%al
120___
121push(@TX,shift(@TX)); push(@XX,shift(@XX)); # "rotate" registers
122}
123$code.=<<___;
124 ror \$8,%rax
125 sub \$8,$len
126
127 xor ($inp),%rax
128 add \$8,$inp
129 mov %rax,($out)
130 add \$8,$out
131
132 test \$-8,$len
133 jnz .Lloop8
134 cmp \$0,$len
135 jne .Lloop1
136___
137$code.=<<___;
138.Lexit:
139 sub \$1,$XX[0]#b
140 movl $XX[0]#d,-8($dat)
141 movl $YY#d,-4($dat)
142
143 pop %r13
144 pop %r12
145 ret
146.align 16
147.Lloop1:
148 add $TX[0]#b,$YY#b
149 movl ($dat,$YY,4),$TY#d
150 movl $TX[0]#d,($dat,$YY,4)
151 movl $TY#d,($dat,$XX[0],4)
152 add $TY#b,$TX[0]#b
153 inc $XX[0]#b
154 movl ($dat,$TX[0],4),$TY#d
155 movl ($dat,$XX[0],4),$TX[0]#d
156 xorb ($inp),$TY#b
157 inc $inp
158 movb $TY#b,($out)
159 inc $out
160 dec $len
161 jnz .Lloop1
162 jmp .Lexit
163
164.align 16
165.LRC4_CHAR:
166 add \$1,$XX[0]#b
167 movzb ($dat,$XX[0]),$TX[0]#d
168 test \$-8,$len
169 jz .Lcloop1
170 cmp \$0,260($dat)
171 jnz .Lcloop1
172 push %rbx
173 jmp .Lcloop8
174.align 16
175.Lcloop8:
176 mov ($inp),%eax
177 mov 4($inp),%ebx
178___
179# unroll 2x4-wise, because 64-bit rotates kill Intel P4...
180for ($i=0;$i<4;$i++) {
181$code.=<<___;
182 add $TX[0]#b,$YY#b
183 lea 1($XX[0]),$XX[1]
184 movzb ($dat,$YY),$TY#d
185 movzb $XX[1]#b,$XX[1]#d
186 movzb ($dat,$XX[1]),$TX[1]#d
187 movb $TX[0]#b,($dat,$YY)
188 cmp $XX[1],$YY
189 movb $TY#b,($dat,$XX[0])
190 jne .Lcmov$i # Intel cmov is sloooow...
191 mov $TX[0],$TX[1]
192.Lcmov$i:
193 add $TX[0]#b,$TY#b
194 xor ($dat,$TY),%al
195 ror \$8,%eax
196___
197push(@TX,shift(@TX)); push(@XX,shift(@XX)); # "rotate" registers
198}
199for ($i=4;$i<8;$i++) {
200$code.=<<___;
201 add $TX[0]#b,$YY#b
202 lea 1($XX[0]),$XX[1]
203 movzb ($dat,$YY),$TY#d
204 movzb $XX[1]#b,$XX[1]#d
205 movzb ($dat,$XX[1]),$TX[1]#d
206 movb $TX[0]#b,($dat,$YY)
207 cmp $XX[1],$YY
208 movb $TY#b,($dat,$XX[0])
209 jne .Lcmov$i # Intel cmov is sloooow...
210 mov $TX[0],$TX[1]
211.Lcmov$i:
212 add $TX[0]#b,$TY#b
213 xor ($dat,$TY),%bl
214 ror \$8,%ebx
215___
216push(@TX,shift(@TX)); push(@XX,shift(@XX)); # "rotate" registers
217}
218$code.=<<___;
219 lea -8($len),$len
220 mov %eax,($out)
221 lea 8($inp),$inp
222 mov %ebx,4($out)
223 lea 8($out),$out
224
225 test \$-8,$len
226 jnz .Lcloop8
227 pop %rbx
228 cmp \$0,$len
229 jne .Lcloop1
230 jmp .Lexit
231___
232$code.=<<___;
233.align 16
234.Lcloop1:
235 add $TX[0]#b,$YY#b
236 movzb ($dat,$YY),$TY#d
237 movb $TX[0]#b,($dat,$YY)
238 movb $TY#b,($dat,$XX[0])
239 add $TX[0]#b,$TY#b
240 add \$1,$XX[0]#b
241 movzb $TY#b,$TY#d
242 movzb $XX[0]#b,$XX[0]#d
243 movzb ($dat,$TY),$TY#d
244 movzb ($dat,$XX[0]),$TX[0]#d
245 xorb ($inp),$TY#b
246 lea 1($inp),$inp
247 movb $TY#b,($out)
248 lea 1($out),$out
249 sub \$1,$len
250 jnz .Lcloop1
251 jmp .Lexit
252.size RC4,.-RC4
253___
254
255$idx="%r8";
256$ido="%r9";
257
258$code.=<<___;
259.extern OPENSSL_ia32cap_P
260.globl RC4_set_key
261.type RC4_set_key,\@function,3
262.align 16
263RC4_set_key:
264 lea 8($dat),$dat
265 lea ($inp,$len),$inp
266 neg $len
267 mov $len,%rcx
268 xor %eax,%eax
269 xor $ido,$ido
270 xor %r10,%r10
271 xor %r11,%r11
272 mov PIC_GOT(OPENSSL_ia32cap_P),$idx#d
273 bt \$20,$idx#d
274 jnc .Lw1stloop
275 bt \$30,$idx#d
276 setc $ido#b
277 mov $ido#d,260($dat)
278 jmp .Lc1stloop
279
280.align 16
281.Lw1stloop:
282 mov %eax,($dat,%rax,4)
283 add \$1,%al
284 jnc .Lw1stloop
285
286 xor $ido,$ido
287 xor $idx,$idx
288.align 16
289.Lw2ndloop:
290 mov ($dat,$ido,4),%r10d
291 add ($inp,$len,1),$idx#b
292 add %r10b,$idx#b
293 add \$1,$len
294 mov ($dat,$idx,4),%r11d
295 cmovz %rcx,$len
296 mov %r10d,($dat,$idx,4)
297 mov %r11d,($dat,$ido,4)
298 add \$1,$ido#b
299 jnc .Lw2ndloop
300 jmp .Lexit_key
301
302.align 16
303.Lc1stloop:
304 mov %al,($dat,%rax)
305 add \$1,%al
306 jnc .Lc1stloop
307
308 xor $ido,$ido
309 xor $idx,$idx
310.align 16
311.Lc2ndloop:
312 mov ($dat,$ido),%r10b
313 add ($inp,$len),$idx#b
314 add %r10b,$idx#b
315 add \$1,$len
316 mov ($dat,$idx),%r11b
317 jnz .Lcnowrap
318 mov %rcx,$len
319.Lcnowrap:
320 mov %r10b,($dat,$idx)
321 mov %r11b,($dat,$ido)
322 add \$1,$ido#b
323 jnc .Lc2ndloop
324 movl \$-1,256($dat)
325
326.align 16
327.Lexit_key:
328 xor %eax,%eax
329 mov %eax,-8($dat)
330 mov %eax,-4($dat)
331 ret
332.size RC4_set_key,.-RC4_set_key
333
334.globl RC4_options
335.type RC4_options,\@function,0
336.align 16
337RC4_options:
338 .picmeup %rax
339 lea .Lopts-.(%rax),%rax
340 mov PIC_GOT(OPENSSL_ia32cap_P),%edx
341 bt \$20,%edx
342 jnc .Ldone
343 add \$12,%rax
344 bt \$30,%edx
345 jnc .Ldone
346 add \$13,%rax
347.Ldone:
348 ret
349.align 64
350.Lopts:
351.asciz "rc4(8x,int)"
352.asciz "rc4(8x,char)"
353.asciz "rc4(1x,char)"
354.asciz "RC4 for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
355.align 64
356.size RC4_options,.-RC4_options
357___
358
359$code =~ s/#([bwd])/$1/gm;
360
361$code =~ s/RC4_set_key/private_RC4_set_key/g if ($ENV{FIPSCANLIB} ne "");
362
363print $code;
364
365close STDOUT;