summaryrefslogtreecommitdiff
path: root/src/lib/libcrypto/rc4
diff options
context:
space:
mode:
Diffstat (limited to 'src/lib/libcrypto/rc4')
-rw-r--r--src/lib/libcrypto/rc4/asm/rc4-586.pl230
-rwxr-xr-xsrc/lib/libcrypto/rc4/asm/rc4-x86_64.pl365
-rw-r--r--src/lib/libcrypto/rc4/rc4.h90
-rw-r--r--src/lib/libcrypto/rc4/rc4_enc.c315
-rw-r--r--src/lib/libcrypto/rc4/rc4_locl.h5
-rw-r--r--src/lib/libcrypto/rc4/rc4_skey.c165
6 files changed, 0 insertions, 1170 deletions
diff --git a/src/lib/libcrypto/rc4/asm/rc4-586.pl b/src/lib/libcrypto/rc4/asm/rc4-586.pl
deleted file mode 100644
index ef7eee766c..0000000000
--- a/src/lib/libcrypto/rc4/asm/rc4-586.pl
+++ /dev/null
@@ -1,230 +0,0 @@
1#!/usr/local/bin/perl
2
3# At some point it became apparent that the original SSLeay RC4
4# assembler implementation performs suboptimaly on latest IA-32
5# microarchitectures. After re-tuning performance has changed as
6# following:
7#
8# Pentium +0%
9# Pentium III +17%
10# AMD +52%(*)
11# P4 +180%(**)
12#
13# (*) This number is actually a trade-off:-) It's possible to
14# achieve +72%, but at the cost of -48% off PIII performance.
15# In other words code performing further 13% faster on AMD
16# would perform almost 2 times slower on Intel PIII...
17# For reference! This code delivers ~80% of rc4-amd64.pl
18# performance on the same Opteron machine.
19# (**) This number requires compressed key schedule set up by
20# RC4_set_key and therefore doesn't apply to 0.9.7 [option for
21# compressed key schedule is implemented in 0.9.8 and later,
22# see commentary section in rc4_skey.c for further details].
23#
24# <appro@fy.chalmers.se>
25
26push(@INC,"perlasm","../../perlasm");
27require "x86asm.pl";
28
29&asm_init($ARGV[0],"rc4-586.pl");
30
31$x="eax";
32$y="ebx";
33$tx="ecx";
34$ty="edx";
35$in="esi";
36$out="edi";
37$d="ebp";
38
39&RC4("RC4");
40
41&asm_finish();
42
43sub RC4_loop
44 {
45 local($n,$p,$char)=@_;
46
47 &comment("Round $n");
48
49 if ($char)
50 {
51 if ($p >= 0)
52 {
53 &mov($ty, &swtmp(2));
54 &cmp($ty, $in);
55 &jbe(&label("finished"));
56 &inc($in);
57 }
58 else
59 {
60 &add($ty, 8);
61 &inc($in);
62 &cmp($ty, $in);
63 &jb(&label("finished"));
64 &mov(&swtmp(2), $ty);
65 }
66 }
67 # Moved out
68 # &mov( $tx, &DWP(0,$d,$x,4)) if $p < 0;
69
70 &add( &LB($y), &LB($tx));
71 &mov( $ty, &DWP(0,$d,$y,4));
72 # XXX
73 &mov( &DWP(0,$d,$x,4),$ty);
74 &add( $ty, $tx);
75 &mov( &DWP(0,$d,$y,4),$tx);
76 &and( $ty, 0xff);
77 &inc( &LB($x)); # NEXT ROUND
78 &mov( $tx, &DWP(0,$d,$x,4)) if $p < 1; # NEXT ROUND
79 &mov( $ty, &DWP(0,$d,$ty,4));
80
81 if (!$char)
82 {
83 #moved up into last round
84 if ($p >= 1)
85 {
86 &add( $out, 8)
87 }
88 &movb( &BP($n,"esp","",0), &LB($ty));
89 }
90 else
91 {
92 # Note in+=8 has occured
93 &movb( &HB($ty), &BP(-1,$in,"",0));
94 # XXX
95 &xorb(&LB($ty), &HB($ty));
96 # XXX
97 &movb(&BP($n,$out,"",0),&LB($ty));
98 }
99 }
100
101
102sub RC4
103 {
104 local($name)=@_;
105
106 &function_begin_B($name,"");
107
108 &mov($ty,&wparam(1)); # len
109 &cmp($ty,0);
110 &jne(&label("proceed"));
111 &ret();
112 &set_label("proceed");
113
114 &comment("");
115
116 &push("ebp");
117 &push("ebx");
118 &push("esi");
119 &xor( $x, $x); # avoid partial register stalls
120 &push("edi");
121 &xor( $y, $y); # avoid partial register stalls
122 &mov( $d, &wparam(0)); # key
123 &mov( $in, &wparam(2));
124
125 &movb( &LB($x), &BP(0,$d,"",1));
126 &movb( &LB($y), &BP(4,$d,"",1));
127
128 &mov( $out, &wparam(3));
129 &inc( &LB($x));
130
131 &stack_push(3); # 3 temp variables
132 &add( $d, 8);
133
134 # detect compressed schedule, see commentary section in rc4_skey.c...
135 # in 0.9.7 context ~50 bytes below RC4_CHAR label remain redundant,
136 # as compressed key schedule is set up in 0.9.8 and later.
137 &cmp(&DWP(256,$d),-1);
138 &je(&label("RC4_CHAR"));
139
140 &lea( $ty, &DWP(-8,$ty,$in));
141
142 # check for 0 length input
143
144 &mov( &swtmp(2), $ty); # this is now address to exit at
145 &mov( $tx, &DWP(0,$d,$x,4));
146
147 &cmp( $ty, $in);
148 &jb( &label("end")); # less than 8 bytes
149
150 &set_label("start");
151
152 # filling DELAY SLOT
153 &add( $in, 8);
154
155 &RC4_loop(0,-1,0);
156 &RC4_loop(1,0,0);
157 &RC4_loop(2,0,0);
158 &RC4_loop(3,0,0);
159 &RC4_loop(4,0,0);
160 &RC4_loop(5,0,0);
161 &RC4_loop(6,0,0);
162 &RC4_loop(7,1,0);
163
164 &comment("apply the cipher text");
165 # xor the cipher data with input
166
167 #&add( $out, 8); #moved up into last round
168
169 &mov( $tx, &swtmp(0));
170 &mov( $ty, &DWP(-8,$in,"",0));
171 &xor( $tx, $ty);
172 &mov( $ty, &DWP(-4,$in,"",0));
173 &mov( &DWP(-8,$out,"",0), $tx);
174 &mov( $tx, &swtmp(1));
175 &xor( $tx, $ty);
176 &mov( $ty, &swtmp(2)); # load end ptr;
177 &mov( &DWP(-4,$out,"",0), $tx);
178 &mov( $tx, &DWP(0,$d,$x,4));
179 &cmp($in, $ty);
180 &jbe(&label("start"));
181
182 &set_label("end");
183
184 # There is quite a bit of extra crap in RC4_loop() for this
185 # first round
186 &RC4_loop(0,-1,1);
187 &RC4_loop(1,0,1);
188 &RC4_loop(2,0,1);
189 &RC4_loop(3,0,1);
190 &RC4_loop(4,0,1);
191 &RC4_loop(5,0,1);
192 &RC4_loop(6,1,1);
193
194 &jmp(&label("finished"));
195
196 &align(16);
197 # this is essentially Intel P4 specific codepath, see rc4_skey.c,
198 # and is engaged in 0.9.8 and later context...
199 &set_label("RC4_CHAR");
200
201 &lea ($ty,&DWP(0,$in,$ty));
202 &mov (&swtmp(2),$ty);
203 &movz ($tx,&BP(0,$d,$x));
204
205 # strangely enough unrolled loop performs over 20% slower...
206 &set_label("RC4_CHAR_loop");
207 &add (&LB($y),&LB($tx));
208 &movz ($ty,&BP(0,$d,$y));
209 &movb (&BP(0,$d,$y),&LB($tx));
210 &movb (&BP(0,$d,$x),&LB($ty));
211 &add (&LB($ty),&LB($tx));
212 &movz ($ty,&BP(0,$d,$ty));
213 &add (&LB($x),1);
214 &xorb (&LB($ty),&BP(0,$in));
215 &lea ($in,&DWP(1,$in));
216 &movz ($tx,&BP(0,$d,$x));
217 &cmp ($in,&swtmp(2));
218 &movb (&BP(0,$out),&LB($ty));
219 &lea ($out,&DWP(1,$out));
220 &jb (&label("RC4_CHAR_loop"));
221
222 &set_label("finished");
223 &dec( $x);
224 &stack_pop(3);
225 &movb( &BP(-4,$d,"",0),&LB($y));
226 &movb( &BP(-8,$d,"",0),&LB($x));
227
228 &function_end($name);
229 }
230
diff --git a/src/lib/libcrypto/rc4/asm/rc4-x86_64.pl b/src/lib/libcrypto/rc4/asm/rc4-x86_64.pl
deleted file mode 100755
index 3a54623495..0000000000
--- a/src/lib/libcrypto/rc4/asm/rc4-x86_64.pl
+++ /dev/null
@@ -1,365 +0,0 @@
1#!/usr/bin/env perl
2#
3# ====================================================================
4# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
5# project. The module is, however, dual licensed under OpenSSL and
6# CRYPTOGAMS licenses depending on where you obtain it. For further
7# details see http://www.openssl.org/~appro/cryptogams/.
8# ====================================================================
9#
10# 2.22x RC4 tune-up:-) It should be noted though that my hand [as in
11# "hand-coded assembler"] doesn't stand for the whole improvement
12# coefficient. It turned out that eliminating RC4_CHAR from config
13# line results in ~40% improvement (yes, even for C implementation).
14# Presumably it has everything to do with AMD cache architecture and
15# RAW or whatever penalties. Once again! The module *requires* config
16# line *without* RC4_CHAR! As for coding "secret," I bet on partial
17# register arithmetics. For example instead of 'inc %r8; and $255,%r8'
18# I simply 'inc %r8b'. Even though optimization manual discourages
19# to operate on partial registers, it turned out to be the best bet.
20# At least for AMD... How IA32E would perform remains to be seen...
21
22# As was shown by Marc Bevand reordering of couple of load operations
23# results in even higher performance gain of 3.3x:-) At least on
24# Opteron... For reference, 1x in this case is RC4_CHAR C-code
25# compiled with gcc 3.3.2, which performs at ~54MBps per 1GHz clock.
26# Latter means that if you want to *estimate* what to expect from
27# *your* Opteron, then multiply 54 by 3.3 and clock frequency in GHz.
28
29# Intel P4 EM64T core was found to run the AMD64 code really slow...
30# The only way to achieve comparable performance on P4 was to keep
31# RC4_CHAR. Kind of ironic, huh? As it's apparently impossible to
32# compose blended code, which would perform even within 30% marginal
33# on either AMD and Intel platforms, I implement both cases. See
34# rc4_skey.c for further details...
35
36# P4 EM64T core appears to be "allergic" to 64-bit inc/dec. Replacing
37# those with add/sub results in 50% performance improvement of folded
38# loop...
39
40# As was shown by Zou Nanhai loop unrolling can improve Intel EM64T
41# performance by >30% [unlike P4 32-bit case that is]. But this is
42# provided that loads are reordered even more aggressively! Both code
43# pathes, AMD64 and EM64T, reorder loads in essentially same manner
44# as my IA-64 implementation. On Opteron this resulted in modest 5%
45# improvement [I had to test it], while final Intel P4 performance
46# achieves respectful 432MBps on 2.8GHz processor now. For reference.
47# If executed on Xeon, current RC4_CHAR code-path is 2.7x faster than
48# RC4_INT code-path. While if executed on Opteron, it's only 25%
49# slower than the RC4_INT one [meaning that if CPU µ-arch detection
50# is not implemented, then this final RC4_CHAR code-path should be
51# preferred, as it provides better *all-round* performance].
52
53# Intel Core2 was observed to perform poorly on both code paths:-( It
54# apparently suffers from some kind of partial register stall, which
55# occurs in 64-bit mode only [as virtually identical 32-bit loop was
56# observed to outperform 64-bit one by almost 50%]. Adding two movzb to
57# cloop1 boosts its performance by 80%! This loop appears to be optimal
58# fit for Core2 and therefore the code was modified to skip cloop8 on
59# this CPU.
60
61$output=shift;
62
63$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
64( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
65( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
66die "can't locate x86_64-xlate.pl";
67
68open STDOUT,"| $^X $xlate $output";
69
70$dat="%rdi"; # arg1
71$len="%rsi"; # arg2
72$inp="%rdx"; # arg3
73$out="%rcx"; # arg4
74
75@XX=("%r8","%r10");
76@TX=("%r9","%r11");
77$YY="%r12";
78$TY="%r13";
79
80$code=<<___;
81.text
82
83.globl RC4
84.type RC4,\@function,4
85.align 16
86RC4: or $len,$len
87 jne .Lentry
88 ret
89.Lentry:
90 push %r12
91 push %r13
92
93 add \$8,$dat
94 movl -8($dat),$XX[0]#d
95 movl -4($dat),$YY#d
96 cmpl \$-1,256($dat)
97 je .LRC4_CHAR
98 inc $XX[0]#b
99 movl ($dat,$XX[0],4),$TX[0]#d
100 test \$-8,$len
101 jz .Lloop1
102 jmp .Lloop8
103.align 16
104.Lloop8:
105___
106for ($i=0;$i<8;$i++) {
107$code.=<<___;
108 add $TX[0]#b,$YY#b
109 mov $XX[0],$XX[1]
110 movl ($dat,$YY,4),$TY#d
111 ror \$8,%rax # ror is redundant when $i=0
112 inc $XX[1]#b
113 movl ($dat,$XX[1],4),$TX[1]#d
114 cmp $XX[1],$YY
115 movl $TX[0]#d,($dat,$YY,4)
116 cmove $TX[0],$TX[1]
117 movl $TY#d,($dat,$XX[0],4)
118 add $TX[0]#b,$TY#b
119 movb ($dat,$TY,4),%al
120___
121push(@TX,shift(@TX)); push(@XX,shift(@XX)); # "rotate" registers
122}
123$code.=<<___;
124 ror \$8,%rax
125 sub \$8,$len
126
127 xor ($inp),%rax
128 add \$8,$inp
129 mov %rax,($out)
130 add \$8,$out
131
132 test \$-8,$len
133 jnz .Lloop8
134 cmp \$0,$len
135 jne .Lloop1
136___
137$code.=<<___;
138.Lexit:
139 sub \$1,$XX[0]#b
140 movl $XX[0]#d,-8($dat)
141 movl $YY#d,-4($dat)
142
143 pop %r13
144 pop %r12
145 ret
146.align 16
147.Lloop1:
148 add $TX[0]#b,$YY#b
149 movl ($dat,$YY,4),$TY#d
150 movl $TX[0]#d,($dat,$YY,4)
151 movl $TY#d,($dat,$XX[0],4)
152 add $TY#b,$TX[0]#b
153 inc $XX[0]#b
154 movl ($dat,$TX[0],4),$TY#d
155 movl ($dat,$XX[0],4),$TX[0]#d
156 xorb ($inp),$TY#b
157 inc $inp
158 movb $TY#b,($out)
159 inc $out
160 dec $len
161 jnz .Lloop1
162 jmp .Lexit
163
164.align 16
165.LRC4_CHAR:
166 add \$1,$XX[0]#b
167 movzb ($dat,$XX[0]),$TX[0]#d
168 test \$-8,$len
169 jz .Lcloop1
170 cmp \$0,260($dat)
171 jnz .Lcloop1
172 push %rbx
173 jmp .Lcloop8
174.align 16
175.Lcloop8:
176 mov ($inp),%eax
177 mov 4($inp),%ebx
178___
179# unroll 2x4-wise, because 64-bit rotates kill Intel P4...
180for ($i=0;$i<4;$i++) {
181$code.=<<___;
182 add $TX[0]#b,$YY#b
183 lea 1($XX[0]),$XX[1]
184 movzb ($dat,$YY),$TY#d
185 movzb $XX[1]#b,$XX[1]#d
186 movzb ($dat,$XX[1]),$TX[1]#d
187 movb $TX[0]#b,($dat,$YY)
188 cmp $XX[1],$YY
189 movb $TY#b,($dat,$XX[0])
190 jne .Lcmov$i # Intel cmov is sloooow...
191 mov $TX[0],$TX[1]
192.Lcmov$i:
193 add $TX[0]#b,$TY#b
194 xor ($dat,$TY),%al
195 ror \$8,%eax
196___
197push(@TX,shift(@TX)); push(@XX,shift(@XX)); # "rotate" registers
198}
199for ($i=4;$i<8;$i++) {
200$code.=<<___;
201 add $TX[0]#b,$YY#b
202 lea 1($XX[0]),$XX[1]
203 movzb ($dat,$YY),$TY#d
204 movzb $XX[1]#b,$XX[1]#d
205 movzb ($dat,$XX[1]),$TX[1]#d
206 movb $TX[0]#b,($dat,$YY)
207 cmp $XX[1],$YY
208 movb $TY#b,($dat,$XX[0])
209 jne .Lcmov$i # Intel cmov is sloooow...
210 mov $TX[0],$TX[1]
211.Lcmov$i:
212 add $TX[0]#b,$TY#b
213 xor ($dat,$TY),%bl
214 ror \$8,%ebx
215___
216push(@TX,shift(@TX)); push(@XX,shift(@XX)); # "rotate" registers
217}
218$code.=<<___;
219 lea -8($len),$len
220 mov %eax,($out)
221 lea 8($inp),$inp
222 mov %ebx,4($out)
223 lea 8($out),$out
224
225 test \$-8,$len
226 jnz .Lcloop8
227 pop %rbx
228 cmp \$0,$len
229 jne .Lcloop1
230 jmp .Lexit
231___
232$code.=<<___;
233.align 16
234.Lcloop1:
235 add $TX[0]#b,$YY#b
236 movzb ($dat,$YY),$TY#d
237 movb $TX[0]#b,($dat,$YY)
238 movb $TY#b,($dat,$XX[0])
239 add $TX[0]#b,$TY#b
240 add \$1,$XX[0]#b
241 movzb $TY#b,$TY#d
242 movzb $XX[0]#b,$XX[0]#d
243 movzb ($dat,$TY),$TY#d
244 movzb ($dat,$XX[0]),$TX[0]#d
245 xorb ($inp),$TY#b
246 lea 1($inp),$inp
247 movb $TY#b,($out)
248 lea 1($out),$out
249 sub \$1,$len
250 jnz .Lcloop1
251 jmp .Lexit
252.size RC4,.-RC4
253___
254
255$idx="%r8";
256$ido="%r9";
257
258$code.=<<___;
259.extern OPENSSL_ia32cap_P
260.globl RC4_set_key
261.type RC4_set_key,\@function,3
262.align 16
263RC4_set_key:
264 lea 8($dat),$dat
265 lea ($inp,$len),$inp
266 neg $len
267 mov $len,%rcx
268 xor %eax,%eax
269 xor $ido,$ido
270 xor %r10,%r10
271 xor %r11,%r11
272 mov PIC_GOT(OPENSSL_ia32cap_P),$idx#d
273 bt \$20,$idx#d
274 jnc .Lw1stloop
275 bt \$30,$idx#d
276 setc $ido#b
277 mov $ido#d,260($dat)
278 jmp .Lc1stloop
279
280.align 16
281.Lw1stloop:
282 mov %eax,($dat,%rax,4)
283 add \$1,%al
284 jnc .Lw1stloop
285
286 xor $ido,$ido
287 xor $idx,$idx
288.align 16
289.Lw2ndloop:
290 mov ($dat,$ido,4),%r10d
291 add ($inp,$len,1),$idx#b
292 add %r10b,$idx#b
293 add \$1,$len
294 mov ($dat,$idx,4),%r11d
295 cmovz %rcx,$len
296 mov %r10d,($dat,$idx,4)
297 mov %r11d,($dat,$ido,4)
298 add \$1,$ido#b
299 jnc .Lw2ndloop
300 jmp .Lexit_key
301
302.align 16
303.Lc1stloop:
304 mov %al,($dat,%rax)
305 add \$1,%al
306 jnc .Lc1stloop
307
308 xor $ido,$ido
309 xor $idx,$idx
310.align 16
311.Lc2ndloop:
312 mov ($dat,$ido),%r10b
313 add ($inp,$len),$idx#b
314 add %r10b,$idx#b
315 add \$1,$len
316 mov ($dat,$idx),%r11b
317 jnz .Lcnowrap
318 mov %rcx,$len
319.Lcnowrap:
320 mov %r10b,($dat,$idx)
321 mov %r11b,($dat,$ido)
322 add \$1,$ido#b
323 jnc .Lc2ndloop
324 movl \$-1,256($dat)
325
326.align 16
327.Lexit_key:
328 xor %eax,%eax
329 mov %eax,-8($dat)
330 mov %eax,-4($dat)
331 ret
332.size RC4_set_key,.-RC4_set_key
333
334.globl RC4_options
335.type RC4_options,\@function,0
336.align 16
337RC4_options:
338 .picmeup %rax
339 lea .Lopts-.(%rax),%rax
340 mov PIC_GOT(OPENSSL_ia32cap_P),%edx
341 bt \$20,%edx
342 jnc .Ldone
343 add \$12,%rax
344 bt \$30,%edx
345 jnc .Ldone
346 add \$13,%rax
347.Ldone:
348 ret
349.align 64
350.Lopts:
351.asciz "rc4(8x,int)"
352.asciz "rc4(8x,char)"
353.asciz "rc4(1x,char)"
354.asciz "RC4 for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
355.align 64
356.size RC4_options,.-RC4_options
357___
358
359$code =~ s/#([bwd])/$1/gm;
360
361$code =~ s/RC4_set_key/private_RC4_set_key/g if ($ENV{FIPSCANLIB} ne "");
362
363print $code;
364
365close STDOUT;
diff --git a/src/lib/libcrypto/rc4/rc4.h b/src/lib/libcrypto/rc4/rc4.h
deleted file mode 100644
index 2d8620d33b..0000000000
--- a/src/lib/libcrypto/rc4/rc4.h
+++ /dev/null
@@ -1,90 +0,0 @@
1/* crypto/rc4/rc4.h */
2/* Copyright (C) 1995-1997 Eric Young (eay@cryptsoft.com)
3 * All rights reserved.
4 *
5 * This package is an SSL implementation written
6 * by Eric Young (eay@cryptsoft.com).
7 * The implementation was written so as to conform with Netscapes SSL.
8 *
9 * This library is free for commercial and non-commercial use as long as
10 * the following conditions are aheared to. The following conditions
11 * apply to all code found in this distribution, be it the RC4, RSA,
12 * lhash, DES, etc., code; not just the SSL code. The SSL documentation
13 * included with this distribution is covered by the same copyright terms
14 * except that the holder is Tim Hudson (tjh@cryptsoft.com).
15 *
16 * Copyright remains Eric Young's, and as such any Copyright notices in
17 * the code are not to be removed.
18 * If this package is used in a product, Eric Young should be given attribution
19 * as the author of the parts of the library used.
20 * This can be in the form of a textual message at program startup or
21 * in documentation (online or textual) provided with the package.
22 *
23 * Redistribution and use in source and binary forms, with or without
24 * modification, are permitted provided that the following conditions
25 * are met:
26 * 1. Redistributions of source code must retain the copyright
27 * notice, this list of conditions and the following disclaimer.
28 * 2. Redistributions in binary form must reproduce the above copyright
29 * notice, this list of conditions and the following disclaimer in the
30 * documentation and/or other materials provided with the distribution.
31 * 3. All advertising materials mentioning features or use of this software
32 * must display the following acknowledgement:
33 * "This product includes cryptographic software written by
34 * Eric Young (eay@cryptsoft.com)"
35 * The word 'cryptographic' can be left out if the rouines from the library
36 * being used are not cryptographic related :-).
37 * 4. If you include any Windows specific code (or a derivative thereof) from
38 * the apps directory (application code) you must include an acknowledgement:
39 * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
40 *
41 * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
42 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
43 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
44 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
45 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
46 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
47 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
48 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
49 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
50 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
51 * SUCH DAMAGE.
52 *
53 * The licence and distribution terms for any publically available version or
54 * derivative of this code cannot be changed. i.e. this code cannot simply be
55 * copied and put under another distribution licence
56 * [including the GNU Public Licence.]
57 */
58
59#ifndef HEADER_RC4_H
60#define HEADER_RC4_H
61
62#include <openssl/opensslconf.h> /* OPENSSL_NO_RC4, RC4_INT */
63#ifdef OPENSSL_NO_RC4
64#error RC4 is disabled.
65#endif
66
67#ifdef __cplusplus
68extern "C" {
69#endif
70
71typedef struct rc4_key_st
72 {
73 RC4_INT x,y;
74 RC4_INT data[256];
75 } RC4_KEY;
76
77
78const char *RC4_options(void);
79#ifdef OPENSSL_FIPS
80void private_RC4_set_key(RC4_KEY *key, int len, const unsigned char *data);
81#endif
82void RC4_set_key(RC4_KEY *key, int len, const unsigned char *data);
83void RC4(RC4_KEY *key, unsigned long len, const unsigned char *indata,
84 unsigned char *outdata);
85
86#ifdef __cplusplus
87}
88#endif
89
90#endif
diff --git a/src/lib/libcrypto/rc4/rc4_enc.c b/src/lib/libcrypto/rc4/rc4_enc.c
deleted file mode 100644
index 0660ea60a2..0000000000
--- a/src/lib/libcrypto/rc4/rc4_enc.c
+++ /dev/null
@@ -1,315 +0,0 @@
1/* crypto/rc4/rc4_enc.c */
2/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
3 * All rights reserved.
4 *
5 * This package is an SSL implementation written
6 * by Eric Young (eay@cryptsoft.com).
7 * The implementation was written so as to conform with Netscapes SSL.
8 *
9 * This library is free for commercial and non-commercial use as long as
10 * the following conditions are aheared to. The following conditions
11 * apply to all code found in this distribution, be it the RC4, RSA,
12 * lhash, DES, etc., code; not just the SSL code. The SSL documentation
13 * included with this distribution is covered by the same copyright terms
14 * except that the holder is Tim Hudson (tjh@cryptsoft.com).
15 *
16 * Copyright remains Eric Young's, and as such any Copyright notices in
17 * the code are not to be removed.
18 * If this package is used in a product, Eric Young should be given attribution
19 * as the author of the parts of the library used.
20 * This can be in the form of a textual message at program startup or
21 * in documentation (online or textual) provided with the package.
22 *
23 * Redistribution and use in source and binary forms, with or without
24 * modification, are permitted provided that the following conditions
25 * are met:
26 * 1. Redistributions of source code must retain the copyright
27 * notice, this list of conditions and the following disclaimer.
28 * 2. Redistributions in binary form must reproduce the above copyright
29 * notice, this list of conditions and the following disclaimer in the
30 * documentation and/or other materials provided with the distribution.
31 * 3. All advertising materials mentioning features or use of this software
32 * must display the following acknowledgement:
33 * "This product includes cryptographic software written by
34 * Eric Young (eay@cryptsoft.com)"
35 * The word 'cryptographic' can be left out if the rouines from the library
36 * being used are not cryptographic related :-).
37 * 4. If you include any Windows specific code (or a derivative thereof) from
38 * the apps directory (application code) you must include an acknowledgement:
39 * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
40 *
41 * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
42 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
43 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
44 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
45 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
46 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
47 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
48 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
49 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
50 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
51 * SUCH DAMAGE.
52 *
53 * The licence and distribution terms for any publically available version or
54 * derivative of this code cannot be changed. i.e. this code cannot simply be
55 * copied and put under another distribution licence
56 * [including the GNU Public Licence.]
57 */
58
59#include <openssl/rc4.h>
60#include "rc4_locl.h"
61
62/* RC4 as implemented from a posting from
63 * Newsgroups: sci.crypt
64 * From: sterndark@netcom.com (David Sterndark)
65 * Subject: RC4 Algorithm revealed.
66 * Message-ID: <sternCvKL4B.Hyy@netcom.com>
67 * Date: Wed, 14 Sep 1994 06:35:31 GMT
68 */
69
70void RC4(RC4_KEY *key, unsigned long len, const unsigned char *indata,
71 unsigned char *outdata)
72 {
73 register RC4_INT *d;
74 register RC4_INT x,y,tx,ty;
75 int i;
76
77 x=key->x;
78 y=key->y;
79 d=key->data;
80
81#if defined(RC4_CHUNK)
82 /*
83 * The original reason for implementing this(*) was the fact that
84 * pre-21164a Alpha CPUs don't have byte load/store instructions
85 * and e.g. a byte store has to be done with 64-bit load, shift,
86 * and, or and finally 64-bit store. Peaking data and operating
87 * at natural word size made it possible to reduce amount of
88 * instructions as well as to perform early read-ahead without
89 * suffering from RAW (read-after-write) hazard. This resulted
90 * in ~40%(**) performance improvement on 21064 box with gcc.
91 * But it's not only Alpha users who win here:-) Thanks to the
92 * early-n-wide read-ahead this implementation also exhibits
93 * >40% speed-up on SPARC and 20-30% on 64-bit MIPS (depending
94 * on sizeof(RC4_INT)).
95 *
96 * (*) "this" means code which recognizes the case when input
97 * and output pointers appear to be aligned at natural CPU
98 * word boundary
99 * (**) i.e. according to 'apps/openssl speed rc4' benchmark,
100 * crypto/rc4/rc4speed.c exhibits almost 70% speed-up...
101 *
102 * Cavets.
103 *
104 * - RC4_CHUNK="unsigned long long" should be a #1 choice for
105 * UltraSPARC. Unfortunately gcc generates very slow code
106 * (2.5-3 times slower than one generated by Sun's WorkShop
107 * C) and therefore gcc (at least 2.95 and earlier) should
108 * always be told that RC4_CHUNK="unsigned long".
109 *
110 * <appro@fy.chalmers.se>
111 */
112
113# define RC4_STEP ( \
114 x=(x+1) &0xff, \
115 tx=d[x], \
116 y=(tx+y)&0xff, \
117 ty=d[y], \
118 d[y]=tx, \
119 d[x]=ty, \
120 (RC4_CHUNK)d[(tx+ty)&0xff]\
121 )
122
123 if ( ( ((unsigned long)indata & (sizeof(RC4_CHUNK)-1)) |
124 ((unsigned long)outdata & (sizeof(RC4_CHUNK)-1)) ) == 0 )
125 {
126 RC4_CHUNK ichunk,otp;
127 const union { long one; char little; } is_endian = {1};
128
129 /*
130 * I reckon we can afford to implement both endian
131 * cases and to decide which way to take at run-time
132 * because the machine code appears to be very compact
133 * and redundant 1-2KB is perfectly tolerable (i.e.
134 * in case the compiler fails to eliminate it:-). By
135 * suggestion from Terrel Larson <terr@terralogic.net>
136 * who also stands for the is_endian union:-)
137 *
138 * Special notes.
139 *
140 * - is_endian is declared automatic as doing otherwise
141 * (declaring static) prevents gcc from eliminating
142 * the redundant code;
143 * - compilers (those I've tried) don't seem to have
144 * problems eliminating either the operators guarded
145 * by "if (sizeof(RC4_CHUNK)==8)" or the condition
146 * expressions themselves so I've got 'em to replace
147 * corresponding #ifdefs from the previous version;
148 * - I chose to let the redundant switch cases when
149 * sizeof(RC4_CHUNK)!=8 be (were also #ifdefed
150 * before);
151 * - in case you wonder "&(sizeof(RC4_CHUNK)*8-1)" in
152 * [LB]ESHFT guards against "shift is out of range"
153 * warnings when sizeof(RC4_CHUNK)!=8
154 *
155 * <appro@fy.chalmers.se>
156 */
157 if (!is_endian.little)
158 { /* BIG-ENDIAN CASE */
159# define BESHFT(c) (((sizeof(RC4_CHUNK)-(c)-1)*8)&(sizeof(RC4_CHUNK)*8-1))
160 for (;len&~(sizeof(RC4_CHUNK)-1);len-=sizeof(RC4_CHUNK))
161 {
162 ichunk = *(RC4_CHUNK *)indata;
163 otp = RC4_STEP<<BESHFT(0);
164 otp |= RC4_STEP<<BESHFT(1);
165 otp |= RC4_STEP<<BESHFT(2);
166 otp |= RC4_STEP<<BESHFT(3);
167 if (sizeof(RC4_CHUNK)==8)
168 {
169 otp |= RC4_STEP<<BESHFT(4);
170 otp |= RC4_STEP<<BESHFT(5);
171 otp |= RC4_STEP<<BESHFT(6);
172 otp |= RC4_STEP<<BESHFT(7);
173 }
174 *(RC4_CHUNK *)outdata = otp^ichunk;
175 indata += sizeof(RC4_CHUNK);
176 outdata += sizeof(RC4_CHUNK);
177 }
178 if (len)
179 {
180 RC4_CHUNK mask=(RC4_CHUNK)-1, ochunk;
181
182 ichunk = *(RC4_CHUNK *)indata;
183 ochunk = *(RC4_CHUNK *)outdata;
184 otp = 0;
185 i = BESHFT(0);
186 mask <<= (sizeof(RC4_CHUNK)-len)<<3;
187 switch (len&(sizeof(RC4_CHUNK)-1))
188 {
189 case 7: otp = RC4_STEP<<i, i-=8;
190 case 6: otp |= RC4_STEP<<i, i-=8;
191 case 5: otp |= RC4_STEP<<i, i-=8;
192 case 4: otp |= RC4_STEP<<i, i-=8;
193 case 3: otp |= RC4_STEP<<i, i-=8;
194 case 2: otp |= RC4_STEP<<i, i-=8;
195 case 1: otp |= RC4_STEP<<i, i-=8;
196 case 0: ; /*
197 * it's never the case,
198 * but it has to be here
199 * for ultrix?
200 */
201 }
202 ochunk &= ~mask;
203 ochunk |= (otp^ichunk) & mask;
204 *(RC4_CHUNK *)outdata = ochunk;
205 }
206 key->x=x;
207 key->y=y;
208 return;
209 }
210 else
211 { /* LITTLE-ENDIAN CASE */
212# define LESHFT(c) (((c)*8)&(sizeof(RC4_CHUNK)*8-1))
213 for (;len&~(sizeof(RC4_CHUNK)-1);len-=sizeof(RC4_CHUNK))
214 {
215 ichunk = *(RC4_CHUNK *)indata;
216 otp = RC4_STEP;
217 otp |= RC4_STEP<<8;
218 otp |= RC4_STEP<<16;
219 otp |= RC4_STEP<<24;
220 if (sizeof(RC4_CHUNK)==8)
221 {
222 otp |= RC4_STEP<<LESHFT(4);
223 otp |= RC4_STEP<<LESHFT(5);
224 otp |= RC4_STEP<<LESHFT(6);
225 otp |= RC4_STEP<<LESHFT(7);
226 }
227 *(RC4_CHUNK *)outdata = otp^ichunk;
228 indata += sizeof(RC4_CHUNK);
229 outdata += sizeof(RC4_CHUNK);
230 }
231 if (len)
232 {
233 RC4_CHUNK mask=(RC4_CHUNK)-1, ochunk;
234
235 ichunk = *(RC4_CHUNK *)indata;
236 ochunk = *(RC4_CHUNK *)outdata;
237 otp = 0;
238 i = 0;
239 mask >>= (sizeof(RC4_CHUNK)-len)<<3;
240 switch (len&(sizeof(RC4_CHUNK)-1))
241 {
242 case 7: otp = RC4_STEP, i+=8;
243 case 6: otp |= RC4_STEP<<i, i+=8;
244 case 5: otp |= RC4_STEP<<i, i+=8;
245 case 4: otp |= RC4_STEP<<i, i+=8;
246 case 3: otp |= RC4_STEP<<i, i+=8;
247 case 2: otp |= RC4_STEP<<i, i+=8;
248 case 1: otp |= RC4_STEP<<i, i+=8;
249 case 0: ; /*
250 * it's never the case,
251 * but it has to be here
252 * for ultrix?
253 */
254 }
255 ochunk &= ~mask;
256 ochunk |= (otp^ichunk) & mask;
257 *(RC4_CHUNK *)outdata = ochunk;
258 }
259 key->x=x;
260 key->y=y;
261 return;
262 }
263 }
264#endif
265#define LOOP(in,out) \
266 x=((x+1)&0xff); \
267 tx=d[x]; \
268 y=(tx+y)&0xff; \
269 d[x]=ty=d[y]; \
270 d[y]=tx; \
271 (out) = d[(tx+ty)&0xff]^ (in);
272
273#ifndef RC4_INDEX
274#define RC4_LOOP(a,b,i) LOOP(*((a)++),*((b)++))
275#else
276#define RC4_LOOP(a,b,i) LOOP(a[i],b[i])
277#endif
278
279 i=(int)(len>>3L);
280 if (i)
281 {
282 for (;;)
283 {
284 RC4_LOOP(indata,outdata,0);
285 RC4_LOOP(indata,outdata,1);
286 RC4_LOOP(indata,outdata,2);
287 RC4_LOOP(indata,outdata,3);
288 RC4_LOOP(indata,outdata,4);
289 RC4_LOOP(indata,outdata,5);
290 RC4_LOOP(indata,outdata,6);
291 RC4_LOOP(indata,outdata,7);
292#ifdef RC4_INDEX
293 indata+=8;
294 outdata+=8;
295#endif
296 if (--i == 0) break;
297 }
298 }
299 i=(int)len&0x07;
300 if (i)
301 {
302 for (;;)
303 {
304 RC4_LOOP(indata,outdata,0); if (--i == 0) break;
305 RC4_LOOP(indata,outdata,1); if (--i == 0) break;
306 RC4_LOOP(indata,outdata,2); if (--i == 0) break;
307 RC4_LOOP(indata,outdata,3); if (--i == 0) break;
308 RC4_LOOP(indata,outdata,4); if (--i == 0) break;
309 RC4_LOOP(indata,outdata,5); if (--i == 0) break;
310 RC4_LOOP(indata,outdata,6); if (--i == 0) break;
311 }
312 }
313 key->x=x;
314 key->y=y;
315 }
diff --git a/src/lib/libcrypto/rc4/rc4_locl.h b/src/lib/libcrypto/rc4/rc4_locl.h
deleted file mode 100644
index c712e1632e..0000000000
--- a/src/lib/libcrypto/rc4/rc4_locl.h
+++ /dev/null
@@ -1,5 +0,0 @@
1#ifndef HEADER_RC4_LOCL_H
2#define HEADER_RC4_LOCL_H
3#include <openssl/opensslconf.h>
4#include <cryptlib.h>
5#endif
diff --git a/src/lib/libcrypto/rc4/rc4_skey.c b/src/lib/libcrypto/rc4/rc4_skey.c
deleted file mode 100644
index 4478d1a4b3..0000000000
--- a/src/lib/libcrypto/rc4/rc4_skey.c
+++ /dev/null
@@ -1,165 +0,0 @@
1/* crypto/rc4/rc4_skey.c */
2/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
3 * All rights reserved.
4 *
5 * This package is an SSL implementation written
6 * by Eric Young (eay@cryptsoft.com).
7 * The implementation was written so as to conform with Netscapes SSL.
8 *
9 * This library is free for commercial and non-commercial use as long as
10 * the following conditions are aheared to. The following conditions
11 * apply to all code found in this distribution, be it the RC4, RSA,
12 * lhash, DES, etc., code; not just the SSL code. The SSL documentation
13 * included with this distribution is covered by the same copyright terms
14 * except that the holder is Tim Hudson (tjh@cryptsoft.com).
15 *
16 * Copyright remains Eric Young's, and as such any Copyright notices in
17 * the code are not to be removed.
18 * If this package is used in a product, Eric Young should be given attribution
19 * as the author of the parts of the library used.
20 * This can be in the form of a textual message at program startup or
21 * in documentation (online or textual) provided with the package.
22 *
23 * Redistribution and use in source and binary forms, with or without
24 * modification, are permitted provided that the following conditions
25 * are met:
26 * 1. Redistributions of source code must retain the copyright
27 * notice, this list of conditions and the following disclaimer.
28 * 2. Redistributions in binary form must reproduce the above copyright
29 * notice, this list of conditions and the following disclaimer in the
30 * documentation and/or other materials provided with the distribution.
31 * 3. All advertising materials mentioning features or use of this software
32 * must display the following acknowledgement:
33 * "This product includes cryptographic software written by
34 * Eric Young (eay@cryptsoft.com)"
35 * The word 'cryptographic' can be left out if the rouines from the library
36 * being used are not cryptographic related :-).
37 * 4. If you include any Windows specific code (or a derivative thereof) from
38 * the apps directory (application code) you must include an acknowledgement:
39 * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
40 *
41 * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
42 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
43 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
44 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
45 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
46 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
47 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
48 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
49 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
50 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
51 * SUCH DAMAGE.
52 *
53 * The licence and distribution terms for any publically available version or
54 * derivative of this code cannot be changed. i.e. this code cannot simply be
55 * copied and put under another distribution licence
56 * [including the GNU Public Licence.]
57 */
58
59#include <openssl/rc4.h>
60#include "rc4_locl.h"
61#include <openssl/opensslv.h>
62#include <openssl/crypto.h>
63#ifdef OPENSSL_FIPS
64#include <openssl/fips.h>
65#endif
66
67
68const char RC4_version[]="RC4" OPENSSL_VERSION_PTEXT;
69
70const char *RC4_options(void)
71 {
72#ifdef RC4_INDEX
73 if (sizeof(RC4_INT) == 1)
74 return("rc4(idx,char)");
75 else
76 return("rc4(idx,int)");
77#else
78 if (sizeof(RC4_INT) == 1)
79 return("rc4(ptr,char)");
80 else
81 return("rc4(ptr,int)");
82#endif
83 }
84
85/* RC4 as implemented from a posting from
86 * Newsgroups: sci.crypt
87 * From: sterndark@netcom.com (David Sterndark)
88 * Subject: RC4 Algorithm revealed.
89 * Message-ID: <sternCvKL4B.Hyy@netcom.com>
90 * Date: Wed, 14 Sep 1994 06:35:31 GMT
91 */
92
93#ifdef OPENSSL_FIPS
94void private_RC4_set_key(RC4_KEY *key, int len, const unsigned char *data)
95#else
96void RC4_set_key(RC4_KEY *key, int len, const unsigned char *data)
97#endif
98 {
99 register RC4_INT tmp;
100 register int id1,id2;
101 register RC4_INT *d;
102 unsigned int i;
103
104 d= &(key->data[0]);
105 key->x = 0;
106 key->y = 0;
107 id1=id2=0;
108
109#define SK_LOOP(d,n) { \
110 tmp=d[(n)]; \
111 id2 = (data[id1] + tmp + id2) & 0xff; \
112 if (++id1 == len) id1=0; \
113 d[(n)]=d[id2]; \
114 d[id2]=tmp; }
115
116#if defined(OPENSSL_CPUID_OBJ) && !defined(OPENSSL_NO_ASM)
117# if defined(__i386) || defined(__i386__) || defined(_M_IX86) || \
118 defined(__INTEL__) || \
119 defined(__x86_64) || defined(__x86_64__) || defined(_M_AMD64)
120 if (sizeof(RC4_INT) > 1) {
121 /*
122 * Unlike all other x86 [and x86_64] implementations,
123 * Intel P4 core [including EM64T] was found to perform
124 * poorly with wider RC4_INT. Performance improvement
125 * for IA-32 hand-coded assembler turned out to be 2.8x
126 * if re-coded for RC4_CHAR! It's however inappropriate
127 * to just switch to RC4_CHAR for x86[_64], as non-P4
128 * implementations suffer from significant performance
129 * losses then, e.g. PIII exhibits >2x deterioration,
130 * and so does Opteron. In order to assure optimal
131 * all-round performance, we detect P4 at run-time by
132 * checking upon reserved bit 20 in CPU capability
133 * vector and set up compressed key schedule, which is
134 * recognized by correspondingly updated assembler
135 * module... Bit 20 is set up by OPENSSL_ia32_cpuid.
136 *
137 * <appro@fy.chalmers.se>
138 */
139#ifdef OPENSSL_FIPS
140 unsigned long *ia32cap_ptr = OPENSSL_ia32cap_loc();
141 if (ia32cap_ptr && (*ia32cap_ptr & (1<<28))) {
142#else
143 if (OPENSSL_ia32cap_P & (1<<28)) {
144#endif
145 unsigned char *cp=(unsigned char *)d;
146
147 for (i=0;i<256;i++) cp[i]=i;
148 for (i=0;i<256;i++) SK_LOOP(cp,i);
149 /* mark schedule as compressed! */
150 d[256/sizeof(RC4_INT)]=-1;
151 return;
152 }
153 }
154# endif
155#endif
156 for (i=0; i < 256; i++) d[i]=i;
157 for (i=0; i < 256; i+=4)
158 {
159 SK_LOOP(d,i+0);
160 SK_LOOP(d,i+1);
161 SK_LOOP(d,i+2);
162 SK_LOOP(d,i+3);
163 }
164 }
165