diff options
author | cvs2svn <admin@example.com> | 2014-02-27 21:04:58 +0000 |
---|---|---|
committer | cvs2svn <admin@example.com> | 2014-02-27 21:04:58 +0000 |
commit | 726818f36b5221c023cd04c4b90bdbc08e94cd96 (patch) | |
tree | cf8221f3aa5bf5a578ddf1ecf5677ad08c04d342 /src/lib/libcrypto/rc4 | |
parent | 3b6d92e82b1421b811bcdec7f7fdfb31eeef18de (diff) | |
download | openbsd-OPENBSD_5_5_BASE.tar.gz openbsd-OPENBSD_5_5_BASE.tar.bz2 openbsd-OPENBSD_5_5_BASE.zip |
This commit was manufactured by cvs2git to create tag 'OPENBSD_5_5_BASE'.OPENBSD_5_5_BASE
Diffstat (limited to 'src/lib/libcrypto/rc4')
-rw-r--r-- | src/lib/libcrypto/rc4/asm/rc4-586.pl | 410 | ||||
-rw-r--r-- | src/lib/libcrypto/rc4/asm/rc4-ia64.pl | 755 | ||||
-rw-r--r-- | src/lib/libcrypto/rc4/asm/rc4-md5-x86_64.pl | 631 | ||||
-rw-r--r-- | src/lib/libcrypto/rc4/asm/rc4-parisc.pl | 313 | ||||
-rw-r--r-- | src/lib/libcrypto/rc4/asm/rc4-s390x.pl | 234 | ||||
-rwxr-xr-x | src/lib/libcrypto/rc4/asm/rc4-x86_64.pl | 676 | ||||
-rw-r--r-- | src/lib/libcrypto/rc4/rc4.h | 90 | ||||
-rw-r--r-- | src/lib/libcrypto/rc4/rc4_enc.c | 315 | ||||
-rw-r--r-- | src/lib/libcrypto/rc4/rc4_locl.h | 5 | ||||
-rw-r--r-- | src/lib/libcrypto/rc4/rc4_skey.c | 116 |
10 files changed, 0 insertions, 3545 deletions
diff --git a/src/lib/libcrypto/rc4/asm/rc4-586.pl b/src/lib/libcrypto/rc4/asm/rc4-586.pl deleted file mode 100644 index 5c9ac6ad28..0000000000 --- a/src/lib/libcrypto/rc4/asm/rc4-586.pl +++ /dev/null | |||
@@ -1,410 +0,0 @@ | |||
1 | #!/usr/bin/env perl | ||
2 | |||
3 | # ==================================================================== | ||
4 | # [Re]written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL | ||
5 | # project. The module is, however, dual licensed under OpenSSL and | ||
6 | # CRYPTOGAMS licenses depending on where you obtain it. For further | ||
7 | # details see http://www.openssl.org/~appro/cryptogams/. | ||
8 | # ==================================================================== | ||
9 | |||
10 | # At some point it became apparent that the original SSLeay RC4 | ||
11 | # assembler implementation performs suboptimally on latest IA-32 | ||
12 | # microarchitectures. After re-tuning performance has changed as | ||
13 | # following: | ||
14 | # | ||
15 | # Pentium -10% | ||
16 | # Pentium III +12% | ||
17 | # AMD +50%(*) | ||
18 | # P4 +250%(**) | ||
19 | # | ||
20 | # (*) This number is actually a trade-off:-) It's possible to | ||
21 | # achieve +72%, but at the cost of -48% off PIII performance. | ||
22 | # In other words code performing further 13% faster on AMD | ||
23 | # would perform almost 2 times slower on Intel PIII... | ||
24 | # For reference! This code delivers ~80% of rc4-amd64.pl | ||
25 | # performance on the same Opteron machine. | ||
26 | # (**) This number requires compressed key schedule set up by | ||
27 | # RC4_set_key [see commentary below for further details]. | ||
28 | # | ||
29 | # <appro@fy.chalmers.se> | ||
30 | |||
31 | # May 2011 | ||
32 | # | ||
33 | # Optimize for Core2 and Westmere [and incidentally Opteron]. Current | ||
34 | # performance in cycles per processed byte (less is better) and | ||
35 | # improvement relative to previous version of this module is: | ||
36 | # | ||
37 | # Pentium 10.2 # original numbers | ||
38 | # Pentium III 7.8(*) | ||
39 | # Intel P4 7.5 | ||
40 | # | ||
41 | # Opteron 6.1/+20% # new MMX numbers | ||
42 | # Core2 5.3/+67%(**) | ||
43 | # Westmere 5.1/+94%(**) | ||
44 | # Sandy Bridge 5.0/+8% | ||
45 | # Atom 12.6/+6% | ||
46 | # | ||
47 | # (*) PIII can actually deliver 6.6 cycles per byte with MMX code, | ||
48 | # but this specific code performs poorly on Core2. And vice | ||
49 | # versa, below MMX/SSE code delivering 5.8/7.1 on Core2 performs | ||
50 | # poorly on PIII, at 8.0/14.5:-( As PIII is not a "hot" CPU | ||
51 | # [anymore], I chose to discard PIII-specific code path and opt | ||
52 | # for original IALU-only code, which is why MMX/SSE code path | ||
53 | # is guarded by SSE2 bit (see below), not MMX/SSE. | ||
54 | # (**) Performance vs. block size on Core2 and Westmere had a maximum | ||
55 | # at ... 64 bytes block size. And it was quite a maximum, 40-60% | ||
56 | # in comparison to largest 8KB block size. Above improvement | ||
57 | # coefficients are for the largest block size. | ||
58 | |||
59 | $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1; | ||
60 | push(@INC,"${dir}","${dir}../../perlasm"); | ||
61 | require "x86asm.pl"; | ||
62 | |||
63 | &asm_init($ARGV[0],"rc4-586.pl"); | ||
64 | |||
65 | $xx="eax"; | ||
66 | $yy="ebx"; | ||
67 | $tx="ecx"; | ||
68 | $ty="edx"; | ||
69 | $inp="esi"; | ||
70 | $out="ebp"; | ||
71 | $dat="edi"; | ||
72 | |||
73 | sub RC4_loop { | ||
74 | my $i=shift; | ||
75 | my $func = ($i==0)?*mov:*or; | ||
76 | |||
77 | &add (&LB($yy),&LB($tx)); | ||
78 | &mov ($ty,&DWP(0,$dat,$yy,4)); | ||
79 | &mov (&DWP(0,$dat,$yy,4),$tx); | ||
80 | &mov (&DWP(0,$dat,$xx,4),$ty); | ||
81 | &add ($ty,$tx); | ||
82 | &inc (&LB($xx)); | ||
83 | &and ($ty,0xff); | ||
84 | &ror ($out,8) if ($i!=0); | ||
85 | if ($i<3) { | ||
86 | &mov ($tx,&DWP(0,$dat,$xx,4)); | ||
87 | } else { | ||
88 | &mov ($tx,&wparam(3)); # reload [re-biased] out | ||
89 | } | ||
90 | &$func ($out,&DWP(0,$dat,$ty,4)); | ||
91 | } | ||
92 | |||
93 | if ($alt=0) { | ||
94 | # >20% faster on Atom and Sandy Bridge[!], 8% faster on Opteron, | ||
95 | # but ~40% slower on Core2 and Westmere... Attempt to add movz | ||
96 | # brings down Opteron by 25%, Atom and Sandy Bridge by 15%, yet | ||
97 | # on Core2 with movz it's almost 20% slower than below alternative | ||
98 | # code... Yes, it's a total mess... | ||
99 | my @XX=($xx,$out); | ||
100 | $RC4_loop_mmx = sub { # SSE actually... | ||
101 | my $i=shift; | ||
102 | my $j=$i<=0?0:$i>>1; | ||
103 | my $mm=$i<=0?"mm0":"mm".($i&1); | ||
104 | |||
105 | &add (&LB($yy),&LB($tx)); | ||
106 | &lea (@XX[1],&DWP(1,@XX[0])); | ||
107 | &pxor ("mm2","mm0") if ($i==0); | ||
108 | &psllq ("mm1",8) if ($i==0); | ||
109 | &and (@XX[1],0xff); | ||
110 | &pxor ("mm0","mm0") if ($i<=0); | ||
111 | &mov ($ty,&DWP(0,$dat,$yy,4)); | ||
112 | &mov (&DWP(0,$dat,$yy,4),$tx); | ||
113 | &pxor ("mm1","mm2") if ($i==0); | ||
114 | &mov (&DWP(0,$dat,$XX[0],4),$ty); | ||
115 | &add (&LB($ty),&LB($tx)); | ||
116 | &movd (@XX[0],"mm7") if ($i==0); | ||
117 | &mov ($tx,&DWP(0,$dat,@XX[1],4)); | ||
118 | &pxor ("mm1","mm1") if ($i==1); | ||
119 | &movq ("mm2",&QWP(0,$inp)) if ($i==1); | ||
120 | &movq (&QWP(-8,(@XX[0],$inp)),"mm1") if ($i==0); | ||
121 | &pinsrw ($mm,&DWP(0,$dat,$ty,4),$j); | ||
122 | |||
123 | push (@XX,shift(@XX)) if ($i>=0); | ||
124 | } | ||
125 | } else { | ||
126 | # Using pinsrw here improves performane on Intel CPUs by 2-3%, but | ||
127 | # brings down AMD by 7%... | ||
128 | $RC4_loop_mmx = sub { | ||
129 | my $i=shift; | ||
130 | |||
131 | &add (&LB($yy),&LB($tx)); | ||
132 | &psllq ("mm1",8*(($i-1)&7)) if (abs($i)!=1); | ||
133 | &mov ($ty,&DWP(0,$dat,$yy,4)); | ||
134 | &mov (&DWP(0,$dat,$yy,4),$tx); | ||
135 | &mov (&DWP(0,$dat,$xx,4),$ty); | ||
136 | &inc ($xx); | ||
137 | &add ($ty,$tx); | ||
138 | &movz ($xx,&LB($xx)); # (*) | ||
139 | &movz ($ty,&LB($ty)); # (*) | ||
140 | &pxor ("mm2",$i==1?"mm0":"mm1") if ($i>=0); | ||
141 | &movq ("mm0",&QWP(0,$inp)) if ($i<=0); | ||
142 | &movq (&QWP(-8,($out,$inp)),"mm2") if ($i==0); | ||
143 | &mov ($tx,&DWP(0,$dat,$xx,4)); | ||
144 | &movd ($i>0?"mm1":"mm2",&DWP(0,$dat,$ty,4)); | ||
145 | |||
146 | # (*) This is the key to Core2 and Westmere performance. | ||
147 | # Whithout movz out-of-order execution logic confuses | ||
148 | # itself and fails to reorder loads and stores. Problem | ||
149 | # appears to be fixed in Sandy Bridge... | ||
150 | } | ||
151 | } | ||
152 | |||
153 | &external_label("OPENSSL_ia32cap_P"); | ||
154 | |||
155 | # void RC4(RC4_KEY *key,size_t len,const unsigned char *inp,unsigned char *out); | ||
156 | &function_begin("RC4"); | ||
157 | &mov ($dat,&wparam(0)); # load key schedule pointer | ||
158 | &mov ($ty, &wparam(1)); # load len | ||
159 | &mov ($inp,&wparam(2)); # load inp | ||
160 | &mov ($out,&wparam(3)); # load out | ||
161 | |||
162 | &xor ($xx,$xx); # avoid partial register stalls | ||
163 | &xor ($yy,$yy); | ||
164 | |||
165 | &cmp ($ty,0); # safety net | ||
166 | &je (&label("abort")); | ||
167 | |||
168 | &mov (&LB($xx),&BP(0,$dat)); # load key->x | ||
169 | &mov (&LB($yy),&BP(4,$dat)); # load key->y | ||
170 | &add ($dat,8); | ||
171 | |||
172 | &lea ($tx,&DWP(0,$inp,$ty)); | ||
173 | &sub ($out,$inp); # re-bias out | ||
174 | &mov (&wparam(1),$tx); # save input+len | ||
175 | |||
176 | &inc (&LB($xx)); | ||
177 | |||
178 | # detect compressed key schedule... | ||
179 | &cmp (&DWP(256,$dat),-1); | ||
180 | &je (&label("RC4_CHAR")); | ||
181 | |||
182 | &mov ($tx,&DWP(0,$dat,$xx,4)); | ||
183 | |||
184 | &and ($ty,-4); # how many 4-byte chunks? | ||
185 | &jz (&label("loop1")); | ||
186 | |||
187 | &test ($ty,-8); | ||
188 | &mov (&wparam(3),$out); # $out as accumulator in these loops | ||
189 | &jz (&label("go4loop4")); | ||
190 | |||
191 | &picmeup($out,"OPENSSL_ia32cap_P"); | ||
192 | &bt (&DWP(0,$out),26); # check SSE2 bit [could have been MMX] | ||
193 | &jnc (&label("go4loop4")); | ||
194 | |||
195 | &mov ($out,&wparam(3)) if (!$alt); | ||
196 | &movd ("mm7",&wparam(3)) if ($alt); | ||
197 | &and ($ty,-8); | ||
198 | &lea ($ty,&DWP(-8,$inp,$ty)); | ||
199 | &mov (&DWP(-4,$dat),$ty); # save input+(len/8)*8-8 | ||
200 | |||
201 | &$RC4_loop_mmx(-1); | ||
202 | &jmp(&label("loop_mmx_enter")); | ||
203 | |||
204 | &set_label("loop_mmx",16); | ||
205 | &$RC4_loop_mmx(0); | ||
206 | &set_label("loop_mmx_enter"); | ||
207 | for ($i=1;$i<8;$i++) { &$RC4_loop_mmx($i); } | ||
208 | &mov ($ty,$yy); | ||
209 | &xor ($yy,$yy); # this is second key to Core2 | ||
210 | &mov (&LB($yy),&LB($ty)); # and Westmere performance... | ||
211 | &cmp ($inp,&DWP(-4,$dat)); | ||
212 | &lea ($inp,&DWP(8,$inp)); | ||
213 | &jb (&label("loop_mmx")); | ||
214 | |||
215 | if ($alt) { | ||
216 | &movd ($out,"mm7"); | ||
217 | &pxor ("mm2","mm0"); | ||
218 | &psllq ("mm1",8); | ||
219 | &pxor ("mm1","mm2"); | ||
220 | &movq (&QWP(-8,$out,$inp),"mm1"); | ||
221 | } else { | ||
222 | &psllq ("mm1",56); | ||
223 | &pxor ("mm2","mm1"); | ||
224 | &movq (&QWP(-8,$out,$inp),"mm2"); | ||
225 | } | ||
226 | &emms (); | ||
227 | |||
228 | &cmp ($inp,&wparam(1)); # compare to input+len | ||
229 | &je (&label("done")); | ||
230 | &jmp (&label("loop1")); | ||
231 | |||
232 | &set_label("go4loop4",16); | ||
233 | &lea ($ty,&DWP(-4,$inp,$ty)); | ||
234 | &mov (&wparam(2),$ty); # save input+(len/4)*4-4 | ||
235 | |||
236 | &set_label("loop4"); | ||
237 | for ($i=0;$i<4;$i++) { RC4_loop($i); } | ||
238 | &ror ($out,8); | ||
239 | &xor ($out,&DWP(0,$inp)); | ||
240 | &cmp ($inp,&wparam(2)); # compare to input+(len/4)*4-4 | ||
241 | &mov (&DWP(0,$tx,$inp),$out);# $tx holds re-biased out here | ||
242 | &lea ($inp,&DWP(4,$inp)); | ||
243 | &mov ($tx,&DWP(0,$dat,$xx,4)); | ||
244 | &jb (&label("loop4")); | ||
245 | |||
246 | &cmp ($inp,&wparam(1)); # compare to input+len | ||
247 | &je (&label("done")); | ||
248 | &mov ($out,&wparam(3)); # restore $out | ||
249 | |||
250 | &set_label("loop1",16); | ||
251 | &add (&LB($yy),&LB($tx)); | ||
252 | &mov ($ty,&DWP(0,$dat,$yy,4)); | ||
253 | &mov (&DWP(0,$dat,$yy,4),$tx); | ||
254 | &mov (&DWP(0,$dat,$xx,4),$ty); | ||
255 | &add ($ty,$tx); | ||
256 | &inc (&LB($xx)); | ||
257 | &and ($ty,0xff); | ||
258 | &mov ($ty,&DWP(0,$dat,$ty,4)); | ||
259 | &xor (&LB($ty),&BP(0,$inp)); | ||
260 | &lea ($inp,&DWP(1,$inp)); | ||
261 | &mov ($tx,&DWP(0,$dat,$xx,4)); | ||
262 | &cmp ($inp,&wparam(1)); # compare to input+len | ||
263 | &mov (&BP(-1,$out,$inp),&LB($ty)); | ||
264 | &jb (&label("loop1")); | ||
265 | |||
266 | &jmp (&label("done")); | ||
267 | |||
268 | # this is essentially Intel P4 specific codepath... | ||
269 | &set_label("RC4_CHAR",16); | ||
270 | &movz ($tx,&BP(0,$dat,$xx)); | ||
271 | # strangely enough unrolled loop performs over 20% slower... | ||
272 | &set_label("cloop1"); | ||
273 | &add (&LB($yy),&LB($tx)); | ||
274 | &movz ($ty,&BP(0,$dat,$yy)); | ||
275 | &mov (&BP(0,$dat,$yy),&LB($tx)); | ||
276 | &mov (&BP(0,$dat,$xx),&LB($ty)); | ||
277 | &add (&LB($ty),&LB($tx)); | ||
278 | &movz ($ty,&BP(0,$dat,$ty)); | ||
279 | &add (&LB($xx),1); | ||
280 | &xor (&LB($ty),&BP(0,$inp)); | ||
281 | &lea ($inp,&DWP(1,$inp)); | ||
282 | &movz ($tx,&BP(0,$dat,$xx)); | ||
283 | &cmp ($inp,&wparam(1)); | ||
284 | &mov (&BP(-1,$out,$inp),&LB($ty)); | ||
285 | &jb (&label("cloop1")); | ||
286 | |||
287 | &set_label("done"); | ||
288 | &dec (&LB($xx)); | ||
289 | &mov (&DWP(-4,$dat),$yy); # save key->y | ||
290 | &mov (&BP(-8,$dat),&LB($xx)); # save key->x | ||
291 | &set_label("abort"); | ||
292 | &function_end("RC4"); | ||
293 | |||
294 | ######################################################################## | ||
295 | |||
296 | $inp="esi"; | ||
297 | $out="edi"; | ||
298 | $idi="ebp"; | ||
299 | $ido="ecx"; | ||
300 | $idx="edx"; | ||
301 | |||
302 | # void RC4_set_key(RC4_KEY *key,int len,const unsigned char *data); | ||
303 | &function_begin("private_RC4_set_key"); | ||
304 | &mov ($out,&wparam(0)); # load key | ||
305 | &mov ($idi,&wparam(1)); # load len | ||
306 | &mov ($inp,&wparam(2)); # load data | ||
307 | &picmeup($idx,"OPENSSL_ia32cap_P"); | ||
308 | |||
309 | &lea ($out,&DWP(2*4,$out)); # &key->data | ||
310 | &lea ($inp,&DWP(0,$inp,$idi)); # $inp to point at the end | ||
311 | &neg ($idi); | ||
312 | &xor ("eax","eax"); | ||
313 | &mov (&DWP(-4,$out),$idi); # borrow key->y | ||
314 | |||
315 | &bt (&DWP(0,$idx),20); # check for bit#20 | ||
316 | &jc (&label("c1stloop")); | ||
317 | |||
318 | &set_label("w1stloop",16); | ||
319 | &mov (&DWP(0,$out,"eax",4),"eax"); # key->data[i]=i; | ||
320 | &add (&LB("eax"),1); # i++; | ||
321 | &jnc (&label("w1stloop")); | ||
322 | |||
323 | &xor ($ido,$ido); | ||
324 | &xor ($idx,$idx); | ||
325 | |||
326 | &set_label("w2ndloop",16); | ||
327 | &mov ("eax",&DWP(0,$out,$ido,4)); | ||
328 | &add (&LB($idx),&BP(0,$inp,$idi)); | ||
329 | &add (&LB($idx),&LB("eax")); | ||
330 | &add ($idi,1); | ||
331 | &mov ("ebx",&DWP(0,$out,$idx,4)); | ||
332 | &jnz (&label("wnowrap")); | ||
333 | &mov ($idi,&DWP(-4,$out)); | ||
334 | &set_label("wnowrap"); | ||
335 | &mov (&DWP(0,$out,$idx,4),"eax"); | ||
336 | &mov (&DWP(0,$out,$ido,4),"ebx"); | ||
337 | &add (&LB($ido),1); | ||
338 | &jnc (&label("w2ndloop")); | ||
339 | &jmp (&label("exit")); | ||
340 | |||
341 | # Unlike all other x86 [and x86_64] implementations, Intel P4 core | ||
342 | # [including EM64T] was found to perform poorly with above "32-bit" key | ||
343 | # schedule, a.k.a. RC4_INT. Performance improvement for IA-32 hand-coded | ||
344 | # assembler turned out to be 3.5x if re-coded for compressed 8-bit one, | ||
345 | # a.k.a. RC4_CHAR! It's however inappropriate to just switch to 8-bit | ||
346 | # schedule for x86[_64], because non-P4 implementations suffer from | ||
347 | # significant performance losses then, e.g. PIII exhibits >2x | ||
348 | # deterioration, and so does Opteron. In order to assure optimal | ||
349 | # all-round performance, we detect P4 at run-time and set up compressed | ||
350 | # key schedule, which is recognized by RC4 procedure. | ||
351 | |||
352 | &set_label("c1stloop",16); | ||
353 | &mov (&BP(0,$out,"eax"),&LB("eax")); # key->data[i]=i; | ||
354 | &add (&LB("eax"),1); # i++; | ||
355 | &jnc (&label("c1stloop")); | ||
356 | |||
357 | &xor ($ido,$ido); | ||
358 | &xor ($idx,$idx); | ||
359 | &xor ("ebx","ebx"); | ||
360 | |||
361 | &set_label("c2ndloop",16); | ||
362 | &mov (&LB("eax"),&BP(0,$out,$ido)); | ||
363 | &add (&LB($idx),&BP(0,$inp,$idi)); | ||
364 | &add (&LB($idx),&LB("eax")); | ||
365 | &add ($idi,1); | ||
366 | &mov (&LB("ebx"),&BP(0,$out,$idx)); | ||
367 | &jnz (&label("cnowrap")); | ||
368 | &mov ($idi,&DWP(-4,$out)); | ||
369 | &set_label("cnowrap"); | ||
370 | &mov (&BP(0,$out,$idx),&LB("eax")); | ||
371 | &mov (&BP(0,$out,$ido),&LB("ebx")); | ||
372 | &add (&LB($ido),1); | ||
373 | &jnc (&label("c2ndloop")); | ||
374 | |||
375 | &mov (&DWP(256,$out),-1); # mark schedule as compressed | ||
376 | |||
377 | &set_label("exit"); | ||
378 | &xor ("eax","eax"); | ||
379 | &mov (&DWP(-8,$out),"eax"); # key->x=0; | ||
380 | &mov (&DWP(-4,$out),"eax"); # key->y=0; | ||
381 | &function_end("private_RC4_set_key"); | ||
382 | |||
383 | # const char *RC4_options(void); | ||
384 | &function_begin_B("RC4_options"); | ||
385 | &call (&label("pic_point")); | ||
386 | &set_label("pic_point"); | ||
387 | &blindpop("eax"); | ||
388 | &lea ("eax",&DWP(&label("opts")."-".&label("pic_point"),"eax")); | ||
389 | &picmeup("edx","OPENSSL_ia32cap_P"); | ||
390 | &mov ("edx",&DWP(0,"edx")); | ||
391 | &bt ("edx",20); | ||
392 | &jc (&label("1xchar")); | ||
393 | &bt ("edx",26); | ||
394 | &jnc (&label("ret")); | ||
395 | &add ("eax",25); | ||
396 | &ret (); | ||
397 | &set_label("1xchar"); | ||
398 | &add ("eax",12); | ||
399 | &set_label("ret"); | ||
400 | &ret (); | ||
401 | &set_label("opts",64); | ||
402 | &asciz ("rc4(4x,int)"); | ||
403 | &asciz ("rc4(1x,char)"); | ||
404 | &asciz ("rc4(8x,mmx)"); | ||
405 | &asciz ("RC4 for x86, CRYPTOGAMS by <appro\@openssl.org>"); | ||
406 | &align (64); | ||
407 | &function_end_B("RC4_options"); | ||
408 | |||
409 | &asm_finish(); | ||
410 | |||
diff --git a/src/lib/libcrypto/rc4/asm/rc4-ia64.pl b/src/lib/libcrypto/rc4/asm/rc4-ia64.pl deleted file mode 100644 index 49cd5b5e69..0000000000 --- a/src/lib/libcrypto/rc4/asm/rc4-ia64.pl +++ /dev/null | |||
@@ -1,755 +0,0 @@ | |||
1 | #!/usr/bin/env perl | ||
2 | # | ||
3 | # ==================================================================== | ||
4 | # Written by David Mosberger <David.Mosberger@acm.org> based on the | ||
5 | # Itanium optimized Crypto code which was released by HP Labs at | ||
6 | # http://www.hpl.hp.com/research/linux/crypto/. | ||
7 | # | ||
8 | # Copyright (c) 2005 Hewlett-Packard Development Company, L.P. | ||
9 | # | ||
10 | # Permission is hereby granted, free of charge, to any person obtaining | ||
11 | # a copy of this software and associated documentation files (the | ||
12 | # "Software"), to deal in the Software without restriction, including | ||
13 | # without limitation the rights to use, copy, modify, merge, publish, | ||
14 | # distribute, sublicense, and/or sell copies of the Software, and to | ||
15 | # permit persons to whom the Software is furnished to do so, subject to | ||
16 | # the following conditions: | ||
17 | # | ||
18 | # The above copyright notice and this permission notice shall be | ||
19 | # included in all copies or substantial portions of the Software. | ||
20 | |||
21 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
22 | # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
23 | # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
24 | # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE | ||
25 | # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION | ||
26 | # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION | ||
27 | # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ | ||
28 | |||
29 | |||
30 | |||
31 | # This is a little helper program which generates a software-pipelined | ||
32 | # for RC4 encryption. The basic algorithm looks like this: | ||
33 | # | ||
34 | # for (counter = 0; counter < len; ++counter) | ||
35 | # { | ||
36 | # in = inp[counter]; | ||
37 | # SI = S[I]; | ||
38 | # J = (SI + J) & 0xff; | ||
39 | # SJ = S[J]; | ||
40 | # T = (SI + SJ) & 0xff; | ||
41 | # S[I] = SJ, S[J] = SI; | ||
42 | # ST = S[T]; | ||
43 | # outp[counter] = in ^ ST; | ||
44 | # I = (I + 1) & 0xff; | ||
45 | # } | ||
46 | # | ||
47 | # Pipelining this loop isn't easy, because the stores to the S[] array | ||
48 | # need to be observed in the right order. The loop generated by the | ||
49 | # code below has the following pipeline diagram: | ||
50 | # | ||
51 | # cycle | ||
52 | # | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |10 |11 |12 |13 |14 |15 |16 |17 | | ||
53 | # iter | ||
54 | # 1: xxx LDI xxx xxx xxx LDJ xxx SWP xxx LDT xxx xxx | ||
55 | # 2: xxx LDI xxx xxx xxx LDJ xxx SWP xxx LDT xxx xxx | ||
56 | # 3: xxx LDI xxx xxx xxx LDJ xxx SWP xxx LDT xxx xxx | ||
57 | # | ||
58 | # where: | ||
59 | # LDI = load of S[I] | ||
60 | # LDJ = load of S[J] | ||
61 | # SWP = swap of S[I] and S[J] | ||
62 | # LDT = load of S[T] | ||
63 | # | ||
64 | # Note that in the above diagram, the major trouble-spot is that LDI | ||
65 | # of the 2nd iteration is performed BEFORE the SWP of the first | ||
66 | # iteration. Fortunately, this is easy to detect (I of the 1st | ||
67 | # iteration will be equal to J of the 2nd iteration) and when this | ||
68 | # happens, we simply forward the proper value from the 1st iteration | ||
69 | # to the 2nd one. The proper value in this case is simply the value | ||
70 | # of S[I] from the first iteration (thanks to the fact that SWP | ||
71 | # simply swaps the contents of S[I] and S[J]). | ||
72 | # | ||
73 | # Another potential trouble-spot is in cycle 7, where SWP of the 1st | ||
74 | # iteration issues at the same time as the LDI of the 3rd iteration. | ||
75 | # However, thanks to IA-64 execution semantics, this can be taken | ||
76 | # care of simply by placing LDI later in the instruction-group than | ||
77 | # SWP. IA-64 CPUs will automatically forward the value if they | ||
78 | # detect that the SWP and LDI are accessing the same memory-location. | ||
79 | |||
80 | # The core-loop that can be pipelined then looks like this (annotated | ||
81 | # with McKinley/Madison issue port & latency numbers, assuming L1 | ||
82 | # cache hits for the most part): | ||
83 | |||
84 | # operation: instruction: issue-ports: latency | ||
85 | # ------------------ ----------------------------- ------------- ------- | ||
86 | |||
87 | # Data = *inp++ ld1 data = [inp], 1 M0-M1 1 cyc c0 | ||
88 | # shladd Iptr = I, KeyTable, 3 M0-M3, I0, I1 1 cyc | ||
89 | # I = (I + 1) & 0xff padd1 nextI = I, one M0-M3, I0, I1 3 cyc | ||
90 | # ;; | ||
91 | # SI = S[I] ld8 SI = [Iptr] M0-M1 1 cyc c1 * after SWAP! | ||
92 | # ;; | ||
93 | # cmp.eq.unc pBypass = I, J * after J is valid! | ||
94 | # J = SI + J add J = J, SI M0-M3, I0, I1 1 cyc c2 | ||
95 | # (pBypass) br.cond.spnt Bypass | ||
96 | # ;; | ||
97 | # --------------------------------------------------------------------------------------- | ||
98 | # J = J & 0xff zxt1 J = J I0, I1, 1 cyc c3 | ||
99 | # ;; | ||
100 | # shladd Jptr = J, KeyTable, 3 M0-M3, I0, I1 1 cyc c4 | ||
101 | # ;; | ||
102 | # SJ = S[J] ld8 SJ = [Jptr] M0-M1 1 cyc c5 | ||
103 | # ;; | ||
104 | # --------------------------------------------------------------------------------------- | ||
105 | # T = (SI + SJ) add T = SI, SJ M0-M3, I0, I1 1 cyc c6 | ||
106 | # ;; | ||
107 | # T = T & 0xff zxt1 T = T I0, I1 1 cyc | ||
108 | # S[I] = SJ st8 [Iptr] = SJ M2-M3 c7 | ||
109 | # S[J] = SI st8 [Jptr] = SI M2-M3 | ||
110 | # ;; | ||
111 | # shladd Tptr = T, KeyTable, 3 M0-M3, I0, I1 1 cyc c8 | ||
112 | # ;; | ||
113 | # --------------------------------------------------------------------------------------- | ||
114 | # T = S[T] ld8 T = [Tptr] M0-M1 1 cyc c9 | ||
115 | # ;; | ||
116 | # data ^= T xor data = data, T M0-M3, I0, I1 1 cyc c10 | ||
117 | # ;; | ||
118 | # *out++ = Data ^ T dep word = word, data, 8, POS I0, I1 1 cyc c11 | ||
119 | # ;; | ||
120 | # --------------------------------------------------------------------------------------- | ||
121 | |||
122 | # There are several points worth making here: | ||
123 | |||
124 | # - Note that due to the bypass/forwarding-path, the first two | ||
125 | # phases of the loop are strangly mingled together. In | ||
126 | # particular, note that the first stage of the pipeline is | ||
127 | # using the value of "J", as calculated by the second stage. | ||
128 | # - Each bundle-pair will have exactly 6 instructions. | ||
129 | # - Pipelined, the loop can execute in 3 cycles/iteration and | ||
130 | # 4 stages. However, McKinley/Madison can issue "st1" to | ||
131 | # the same bank at a rate of at most one per 4 cycles. Thus, | ||
132 | # instead of storing each byte, we accumulate them in a word | ||
133 | # and then write them back at once with a single "st8" (this | ||
134 | # implies that the setup code needs to ensure that the output | ||
135 | # buffer is properly aligned, if need be, by encoding the | ||
136 | # first few bytes separately). | ||
137 | # - There is no space for a "br.ctop" instruction. For this | ||
138 | # reason we can't use module-loop support in IA-64 and have | ||
139 | # to do a traditional, purely software-pipelined loop. | ||
140 | # - We can't replace any of the remaining "add/zxt1" pairs with | ||
141 | # "padd1" because the latency for that instruction is too high | ||
142 | # and would push the loop to the point where more bypasses | ||
143 | # would be needed, which we don't have space for. | ||
144 | # - The above loop runs at around 3.26 cycles/byte, or roughly | ||
145 | # 440 MByte/sec on a 1.5GHz Madison. This is well below the | ||
146 | # system bus bandwidth and hence with judicious use of | ||
147 | # "lfetch" this loop can run at (almost) peak speed even when | ||
148 | # the input and output data reside in memory. The | ||
149 | # max. latency that can be tolerated is (PREFETCH_DISTANCE * | ||
150 | # L2_LINE_SIZE * 3 cyc), or about 384 cycles assuming (at | ||
151 | # least) 1-ahead prefetching of 128 byte cache-lines. Note | ||
152 | # that we do NOT prefetch into L1, since that would only | ||
153 | # interfere with the S[] table values stored there. This is | ||
154 | # acceptable because there is a 10 cycle latency between | ||
155 | # load and first use of the input data. | ||
156 | # - We use a branch to out-of-line bypass-code of cycle-pressure: | ||
157 | # we calculate the next J, check for the need to activate the | ||
158 | # bypass path, and activate the bypass path ALL IN THE SAME | ||
159 | # CYCLE. If we didn't have these constraints, we could do | ||
160 | # the bypass with a simple conditional move instruction. | ||
161 | # Fortunately, the bypass paths get activated relatively | ||
162 | # infrequently, so the extra branches don't cost all that much | ||
163 | # (about 0.04 cycles/byte, measured on a 16396 byte file with | ||
164 | # random input data). | ||
165 | # | ||
166 | |||
167 | $phases = 4; # number of stages/phases in the pipelined-loop | ||
168 | $unroll_count = 6; # number of times we unrolled it | ||
169 | $pComI = (1 << 0); | ||
170 | $pComJ = (1 << 1); | ||
171 | $pComT = (1 << 2); | ||
172 | $pOut = (1 << 3); | ||
173 | |||
174 | $NData = 4; | ||
175 | $NIP = 3; | ||
176 | $NJP = 2; | ||
177 | $NI = 2; | ||
178 | $NSI = 3; | ||
179 | $NSJ = 2; | ||
180 | $NT = 2; | ||
181 | $NOutWord = 2; | ||
182 | |||
183 | # | ||
184 | # $threshold is the minimum length before we attempt to use the | ||
185 | # big software-pipelined loop. It MUST be greater-or-equal | ||
186 | # to: | ||
187 | # PHASES * (UNROLL_COUNT + 1) + 7 | ||
188 | # | ||
189 | # The "+ 7" comes from the fact we may have to encode up to | ||
190 | # 7 bytes separately before the output pointer is aligned. | ||
191 | # | ||
192 | $threshold = (3 * ($phases * ($unroll_count + 1)) + 7); | ||
193 | |||
194 | sub I { | ||
195 | local *code = shift; | ||
196 | local $format = shift; | ||
197 | $code .= sprintf ("\t\t".$format."\n", @_); | ||
198 | } | ||
199 | |||
200 | sub P { | ||
201 | local *code = shift; | ||
202 | local $format = shift; | ||
203 | $code .= sprintf ($format."\n", @_); | ||
204 | } | ||
205 | |||
206 | sub STOP { | ||
207 | local *code = shift; | ||
208 | $code .=<<___; | ||
209 | ;; | ||
210 | ___ | ||
211 | } | ||
212 | |||
213 | sub emit_body { | ||
214 | local *c = shift; | ||
215 | local *bypass = shift; | ||
216 | local ($iteration, $p) = @_; | ||
217 | |||
218 | local $i0 = $iteration; | ||
219 | local $i1 = $iteration - 1; | ||
220 | local $i2 = $iteration - 2; | ||
221 | local $i3 = $iteration - 3; | ||
222 | local $iw0 = ($iteration - 3) / 8; | ||
223 | local $iw1 = ($iteration > 3) ? ($iteration - 4) / 8 : 1; | ||
224 | local $byte_num = ($iteration - 3) % 8; | ||
225 | local $label = $iteration + 1; | ||
226 | local $pAny = ($p & 0xf) == 0xf; | ||
227 | local $pByp = (($p & $pComI) && ($iteration > 0)); | ||
228 | |||
229 | $c.=<<___; | ||
230 | ////////////////////////////////////////////////// | ||
231 | ___ | ||
232 | |||
233 | if (($p & 0xf) == 0) { | ||
234 | $c.="#ifdef HOST_IS_BIG_ENDIAN\n"; | ||
235 | &I(\$c,"shr.u OutWord[%u] = OutWord[%u], 32;;", | ||
236 | $iw1 % $NOutWord, $iw1 % $NOutWord); | ||
237 | $c.="#endif\n"; | ||
238 | &I(\$c, "st4 [OutPtr] = OutWord[%u], 4", $iw1 % $NOutWord); | ||
239 | return; | ||
240 | } | ||
241 | |||
242 | # Cycle 0 | ||
243 | &I(\$c, "{ .mmi") if ($pAny); | ||
244 | &I(\$c, "ld1 Data[%u] = [InPtr], 1", $i0 % $NData) if ($p & $pComI); | ||
245 | &I(\$c, "padd1 I[%u] = One, I[%u]", $i0 % $NI, $i1 % $NI)if ($p & $pComI); | ||
246 | &I(\$c, "zxt1 J = J") if ($p & $pComJ); | ||
247 | &I(\$c, "}") if ($pAny); | ||
248 | &I(\$c, "{ .mmi") if ($pAny); | ||
249 | &I(\$c, "LKEY T[%u] = [T[%u]]", $i1 % $NT, $i1 % $NT) if ($p & $pOut); | ||
250 | &I(\$c, "add T[%u] = SI[%u], SJ[%u]", | ||
251 | $i0 % $NT, $i2 % $NSI, $i1 % $NSJ) if ($p & $pComT); | ||
252 | &I(\$c, "KEYADDR(IPr[%u], I[%u])", $i0 % $NIP, $i1 % $NI) if ($p & $pComI); | ||
253 | &I(\$c, "}") if ($pAny); | ||
254 | &STOP(\$c); | ||
255 | |||
256 | # Cycle 1 | ||
257 | &I(\$c, "{ .mmi") if ($pAny); | ||
258 | &I(\$c, "SKEY [IPr[%u]] = SJ[%u]", $i2 % $NIP, $i1%$NSJ)if ($p & $pComT); | ||
259 | &I(\$c, "SKEY [JP[%u]] = SI[%u]", $i1 % $NJP, $i2%$NSI) if ($p & $pComT); | ||
260 | &I(\$c, "zxt1 T[%u] = T[%u]", $i0 % $NT, $i0 % $NT) if ($p & $pComT); | ||
261 | &I(\$c, "}") if ($pAny); | ||
262 | &I(\$c, "{ .mmi") if ($pAny); | ||
263 | &I(\$c, "LKEY SI[%u] = [IPr[%u]]", $i0 % $NSI, $i0%$NIP)if ($p & $pComI); | ||
264 | &I(\$c, "KEYADDR(JP[%u], J)", $i0 % $NJP) if ($p & $pComJ); | ||
265 | &I(\$c, "xor Data[%u] = Data[%u], T[%u]", | ||
266 | $i3 % $NData, $i3 % $NData, $i1 % $NT) if ($p & $pOut); | ||
267 | &I(\$c, "}") if ($pAny); | ||
268 | &STOP(\$c); | ||
269 | |||
270 | # Cycle 2 | ||
271 | &I(\$c, "{ .mmi") if ($pAny); | ||
272 | &I(\$c, "LKEY SJ[%u] = [JP[%u]]", $i0 % $NSJ, $i0%$NJP) if ($p & $pComJ); | ||
273 | &I(\$c, "cmp.eq pBypass, p0 = I[%u], J", $i1 % $NI) if ($pByp); | ||
274 | &I(\$c, "dep OutWord[%u] = Data[%u], OutWord[%u], BYTE_POS(%u), 8", | ||
275 | $iw0%$NOutWord, $i3%$NData, $iw1%$NOutWord, $byte_num) if ($p & $pOut); | ||
276 | &I(\$c, "}") if ($pAny); | ||
277 | &I(\$c, "{ .mmb") if ($pAny); | ||
278 | &I(\$c, "add J = J, SI[%u]", $i0 % $NSI) if ($p & $pComI); | ||
279 | &I(\$c, "KEYADDR(T[%u], T[%u])", $i0 % $NT, $i0 % $NT) if ($p & $pComT); | ||
280 | &P(\$c, "(pBypass)\tbr.cond.spnt.many .rc4Bypass%u",$label)if ($pByp); | ||
281 | &I(\$c, "}") if ($pAny); | ||
282 | &STOP(\$c); | ||
283 | |||
284 | &P(\$c, ".rc4Resume%u:", $label) if ($pByp); | ||
285 | if ($byte_num == 0 && $iteration >= $phases) { | ||
286 | &I(\$c, "st8 [OutPtr] = OutWord[%u], 8", | ||
287 | $iw1 % $NOutWord) if ($p & $pOut); | ||
288 | if ($iteration == (1 + $unroll_count) * $phases - 1) { | ||
289 | if ($unroll_count == 6) { | ||
290 | &I(\$c, "mov OutWord[%u] = OutWord[%u]", | ||
291 | $iw1 % $NOutWord, $iw0 % $NOutWord); | ||
292 | } | ||
293 | &I(\$c, "lfetch.nt1 [InPrefetch], %u", | ||
294 | $unroll_count * $phases); | ||
295 | &I(\$c, "lfetch.excl.nt1 [OutPrefetch], %u", | ||
296 | $unroll_count * $phases); | ||
297 | &I(\$c, "br.cloop.sptk.few .rc4Loop"); | ||
298 | } | ||
299 | } | ||
300 | |||
301 | if ($pByp) { | ||
302 | &P(\$bypass, ".rc4Bypass%u:", $label); | ||
303 | &I(\$bypass, "sub J = J, SI[%u]", $i0 % $NSI); | ||
304 | &I(\$bypass, "nop 0"); | ||
305 | &I(\$bypass, "nop 0"); | ||
306 | &I(\$bypass, ";;"); | ||
307 | &I(\$bypass, "add J = J, SI[%u]", $i1 % $NSI); | ||
308 | &I(\$bypass, "mov SI[%u] = SI[%u]", $i0 % $NSI, $i1 % $NSI); | ||
309 | &I(\$bypass, "br.sptk.many .rc4Resume%u\n", $label); | ||
310 | &I(\$bypass, ";;"); | ||
311 | } | ||
312 | } | ||
313 | |||
314 | $code=<<___; | ||
315 | .ident \"rc4-ia64.s, version 3.0\" | ||
316 | .ident \"Copyright (c) 2005 Hewlett-Packard Development Company, L.P.\" | ||
317 | |||
318 | #define LCSave r8 | ||
319 | #define PRSave r9 | ||
320 | |||
321 | /* Inputs become invalid once rotation begins! */ | ||
322 | |||
323 | #define StateTable in0 | ||
324 | #define DataLen in1 | ||
325 | #define InputBuffer in2 | ||
326 | #define OutputBuffer in3 | ||
327 | |||
328 | #define KTable r14 | ||
329 | #define J r15 | ||
330 | #define InPtr r16 | ||
331 | #define OutPtr r17 | ||
332 | #define InPrefetch r18 | ||
333 | #define OutPrefetch r19 | ||
334 | #define One r20 | ||
335 | #define LoopCount r21 | ||
336 | #define Remainder r22 | ||
337 | #define IFinal r23 | ||
338 | #define EndPtr r24 | ||
339 | |||
340 | #define tmp0 r25 | ||
341 | #define tmp1 r26 | ||
342 | |||
343 | #define pBypass p6 | ||
344 | #define pDone p7 | ||
345 | #define pSmall p8 | ||
346 | #define pAligned p9 | ||
347 | #define pUnaligned p10 | ||
348 | |||
349 | #define pComputeI pPhase[0] | ||
350 | #define pComputeJ pPhase[1] | ||
351 | #define pComputeT pPhase[2] | ||
352 | #define pOutput pPhase[3] | ||
353 | |||
354 | #define RetVal r8 | ||
355 | #define L_OK p7 | ||
356 | #define L_NOK p8 | ||
357 | |||
358 | #define _NINPUTS 4 | ||
359 | #define _NOUTPUT 0 | ||
360 | |||
361 | #define _NROTATE 24 | ||
362 | #define _NLOCALS (_NROTATE - _NINPUTS - _NOUTPUT) | ||
363 | |||
364 | #ifndef SZ | ||
365 | # define SZ 4 // this must be set to sizeof(RC4_INT) | ||
366 | #endif | ||
367 | |||
368 | #if SZ == 1 | ||
369 | # define LKEY ld1 | ||
370 | # define SKEY st1 | ||
371 | # define KEYADDR(dst, i) add dst = i, KTable | ||
372 | #elif SZ == 2 | ||
373 | # define LKEY ld2 | ||
374 | # define SKEY st2 | ||
375 | # define KEYADDR(dst, i) shladd dst = i, 1, KTable | ||
376 | #elif SZ == 4 | ||
377 | # define LKEY ld4 | ||
378 | # define SKEY st4 | ||
379 | # define KEYADDR(dst, i) shladd dst = i, 2, KTable | ||
380 | #else | ||
381 | # define LKEY ld8 | ||
382 | # define SKEY st8 | ||
383 | # define KEYADDR(dst, i) shladd dst = i, 3, KTable | ||
384 | #endif | ||
385 | |||
386 | #if defined(_HPUX_SOURCE) && !defined(_LP64) | ||
387 | # define ADDP addp4 | ||
388 | #else | ||
389 | # define ADDP add | ||
390 | #endif | ||
391 | |||
392 | /* Define a macro for the bit number of the n-th byte: */ | ||
393 | |||
394 | #if defined(_HPUX_SOURCE) || defined(B_ENDIAN) | ||
395 | # define HOST_IS_BIG_ENDIAN | ||
396 | # define BYTE_POS(n) (56 - (8 * (n))) | ||
397 | #else | ||
398 | # define BYTE_POS(n) (8 * (n)) | ||
399 | #endif | ||
400 | |||
401 | /* | ||
402 | We must perform the first phase of the pipeline explicitly since | ||
403 | we will always load from the stable the first time. The br.cexit | ||
404 | will never be taken since regardless of the number of bytes because | ||
405 | the epilogue count is 4. | ||
406 | */ | ||
407 | /* MODSCHED_RC4 macro was split to _PROLOGUE and _LOOP, because HP-UX | ||
408 | assembler failed on original macro with syntax error. <appro> */ | ||
409 | #define MODSCHED_RC4_PROLOGUE \\ | ||
410 | { \\ | ||
411 | ld1 Data[0] = [InPtr], 1; \\ | ||
412 | add IFinal = 1, I[1]; \\ | ||
413 | KEYADDR(IPr[0], I[1]); \\ | ||
414 | } ;; \\ | ||
415 | { \\ | ||
416 | LKEY SI[0] = [IPr[0]]; \\ | ||
417 | mov pr.rot = 0x10000; \\ | ||
418 | mov ar.ec = 4; \\ | ||
419 | } ;; \\ | ||
420 | { \\ | ||
421 | add J = J, SI[0]; \\ | ||
422 | zxt1 I[0] = IFinal; \\ | ||
423 | br.cexit.spnt.few .+16; /* never taken */ \\ | ||
424 | } ;; | ||
425 | #define MODSCHED_RC4_LOOP(label) \\ | ||
426 | label: \\ | ||
427 | { .mmi; \\ | ||
428 | (pComputeI) ld1 Data[0] = [InPtr], 1; \\ | ||
429 | (pComputeI) add IFinal = 1, I[1]; \\ | ||
430 | (pComputeJ) zxt1 J = J; \\ | ||
431 | }{ .mmi; \\ | ||
432 | (pOutput) LKEY T[1] = [T[1]]; \\ | ||
433 | (pComputeT) add T[0] = SI[2], SJ[1]; \\ | ||
434 | (pComputeI) KEYADDR(IPr[0], I[1]); \\ | ||
435 | } ;; \\ | ||
436 | { .mmi; \\ | ||
437 | (pComputeT) SKEY [IPr[2]] = SJ[1]; \\ | ||
438 | (pComputeT) SKEY [JP[1]] = SI[2]; \\ | ||
439 | (pComputeT) zxt1 T[0] = T[0]; \\ | ||
440 | }{ .mmi; \\ | ||
441 | (pComputeI) LKEY SI[0] = [IPr[0]]; \\ | ||
442 | (pComputeJ) KEYADDR(JP[0], J); \\ | ||
443 | (pComputeI) cmp.eq.unc pBypass, p0 = I[1], J; \\ | ||
444 | } ;; \\ | ||
445 | { .mmi; \\ | ||
446 | (pComputeJ) LKEY SJ[0] = [JP[0]]; \\ | ||
447 | (pOutput) xor Data[3] = Data[3], T[1]; \\ | ||
448 | nop 0x0; \\ | ||
449 | }{ .mmi; \\ | ||
450 | (pComputeT) KEYADDR(T[0], T[0]); \\ | ||
451 | (pBypass) mov SI[0] = SI[1]; \\ | ||
452 | (pComputeI) zxt1 I[0] = IFinal; \\ | ||
453 | } ;; \\ | ||
454 | { .mmb; \\ | ||
455 | (pOutput) st1 [OutPtr] = Data[3], 1; \\ | ||
456 | (pComputeI) add J = J, SI[0]; \\ | ||
457 | br.ctop.sptk.few label; \\ | ||
458 | } ;; | ||
459 | |||
460 | .text | ||
461 | |||
462 | .align 32 | ||
463 | |||
464 | .type RC4, \@function | ||
465 | .global RC4 | ||
466 | |||
467 | .proc RC4 | ||
468 | .prologue | ||
469 | |||
470 | RC4: | ||
471 | { | ||
472 | .mmi | ||
473 | alloc r2 = ar.pfs, _NINPUTS, _NLOCALS, _NOUTPUT, _NROTATE | ||
474 | |||
475 | .rotr Data[4], I[2], IPr[3], SI[3], JP[2], SJ[2], T[2], \\ | ||
476 | OutWord[2] | ||
477 | .rotp pPhase[4] | ||
478 | |||
479 | ADDP InPrefetch = 0, InputBuffer | ||
480 | ADDP KTable = 0, StateTable | ||
481 | } | ||
482 | { | ||
483 | .mmi | ||
484 | ADDP InPtr = 0, InputBuffer | ||
485 | ADDP OutPtr = 0, OutputBuffer | ||
486 | mov RetVal = r0 | ||
487 | } | ||
488 | ;; | ||
489 | { | ||
490 | .mmi | ||
491 | lfetch.nt1 [InPrefetch], 0x80 | ||
492 | ADDP OutPrefetch = 0, OutputBuffer | ||
493 | } | ||
494 | { // Return 0 if the input length is nonsensical | ||
495 | .mib | ||
496 | ADDP StateTable = 0, StateTable | ||
497 | cmp.ge.unc L_NOK, L_OK = r0, DataLen | ||
498 | (L_NOK) br.ret.sptk.few rp | ||
499 | } | ||
500 | ;; | ||
501 | { | ||
502 | .mib | ||
503 | cmp.eq.or L_NOK, L_OK = r0, InPtr | ||
504 | cmp.eq.or L_NOK, L_OK = r0, OutPtr | ||
505 | nop 0x0 | ||
506 | } | ||
507 | { | ||
508 | .mib | ||
509 | cmp.eq.or L_NOK, L_OK = r0, StateTable | ||
510 | nop 0x0 | ||
511 | (L_NOK) br.ret.sptk.few rp | ||
512 | } | ||
513 | ;; | ||
514 | LKEY I[1] = [KTable], SZ | ||
515 | /* Prefetch the state-table. It contains 256 elements of size SZ */ | ||
516 | |||
517 | #if SZ == 1 | ||
518 | ADDP tmp0 = 1*128, StateTable | ||
519 | #elif SZ == 2 | ||
520 | ADDP tmp0 = 3*128, StateTable | ||
521 | ADDP tmp1 = 2*128, StateTable | ||
522 | #elif SZ == 4 | ||
523 | ADDP tmp0 = 7*128, StateTable | ||
524 | ADDP tmp1 = 6*128, StateTable | ||
525 | #elif SZ == 8 | ||
526 | ADDP tmp0 = 15*128, StateTable | ||
527 | ADDP tmp1 = 14*128, StateTable | ||
528 | #endif | ||
529 | ;; | ||
530 | #if SZ >= 8 | ||
531 | lfetch.fault.nt1 [tmp0], -256 // 15 | ||
532 | lfetch.fault.nt1 [tmp1], -256;; | ||
533 | lfetch.fault.nt1 [tmp0], -256 // 13 | ||
534 | lfetch.fault.nt1 [tmp1], -256;; | ||
535 | lfetch.fault.nt1 [tmp0], -256 // 11 | ||
536 | lfetch.fault.nt1 [tmp1], -256;; | ||
537 | lfetch.fault.nt1 [tmp0], -256 // 9 | ||
538 | lfetch.fault.nt1 [tmp1], -256;; | ||
539 | #endif | ||
540 | #if SZ >= 4 | ||
541 | lfetch.fault.nt1 [tmp0], -256 // 7 | ||
542 | lfetch.fault.nt1 [tmp1], -256;; | ||
543 | lfetch.fault.nt1 [tmp0], -256 // 5 | ||
544 | lfetch.fault.nt1 [tmp1], -256;; | ||
545 | #endif | ||
546 | #if SZ >= 2 | ||
547 | lfetch.fault.nt1 [tmp0], -256 // 3 | ||
548 | lfetch.fault.nt1 [tmp1], -256;; | ||
549 | #endif | ||
550 | { | ||
551 | .mii | ||
552 | lfetch.fault.nt1 [tmp0] // 1 | ||
553 | add I[1]=1,I[1];; | ||
554 | zxt1 I[1]=I[1] | ||
555 | } | ||
556 | { | ||
557 | .mmi | ||
558 | lfetch.nt1 [InPrefetch], 0x80 | ||
559 | lfetch.excl.nt1 [OutPrefetch], 0x80 | ||
560 | .save pr, PRSave | ||
561 | mov PRSave = pr | ||
562 | } ;; | ||
563 | { | ||
564 | .mmi | ||
565 | lfetch.excl.nt1 [OutPrefetch], 0x80 | ||
566 | LKEY J = [KTable], SZ | ||
567 | ADDP EndPtr = DataLen, InPtr | ||
568 | } ;; | ||
569 | { | ||
570 | .mmi | ||
571 | ADDP EndPtr = -1, EndPtr // Make it point to | ||
572 | // last data byte. | ||
573 | mov One = 1 | ||
574 | .save ar.lc, LCSave | ||
575 | mov LCSave = ar.lc | ||
576 | .body | ||
577 | } ;; | ||
578 | { | ||
579 | .mmb | ||
580 | sub Remainder = 0, OutPtr | ||
581 | cmp.gtu pSmall, p0 = $threshold, DataLen | ||
582 | (pSmall) br.cond.dpnt .rc4Remainder // Data too small for | ||
583 | // big loop. | ||
584 | } ;; | ||
585 | { | ||
586 | .mmi | ||
587 | and Remainder = 0x7, Remainder | ||
588 | ;; | ||
589 | cmp.eq pAligned, pUnaligned = Remainder, r0 | ||
590 | nop 0x0 | ||
591 | } ;; | ||
592 | { | ||
593 | .mmb | ||
594 | .pred.rel "mutex",pUnaligned,pAligned | ||
595 | (pUnaligned) add Remainder = -1, Remainder | ||
596 | (pAligned) sub Remainder = EndPtr, InPtr | ||
597 | (pAligned) br.cond.dptk.many .rc4Aligned | ||
598 | } ;; | ||
599 | { | ||
600 | .mmi | ||
601 | nop 0x0 | ||
602 | nop 0x0 | ||
603 | mov.i ar.lc = Remainder | ||
604 | } | ||
605 | |||
606 | /* Do the initial few bytes via the compact, modulo-scheduled loop | ||
607 | until the output pointer is 8-byte-aligned. */ | ||
608 | |||
609 | MODSCHED_RC4_PROLOGUE | ||
610 | MODSCHED_RC4_LOOP(.RC4AlignLoop) | ||
611 | |||
612 | { | ||
613 | .mib | ||
614 | sub Remainder = EndPtr, InPtr | ||
615 | zxt1 IFinal = IFinal | ||
616 | clrrrb // Clear CFM.rrb.pr so | ||
617 | ;; // next "mov pr.rot = N" | ||
618 | // does the right thing. | ||
619 | } | ||
620 | { | ||
621 | .mmi | ||
622 | mov I[1] = IFinal | ||
623 | nop 0x0 | ||
624 | nop 0x0 | ||
625 | } ;; | ||
626 | |||
627 | |||
628 | .rc4Aligned: | ||
629 | |||
630 | /* | ||
631 | Unrolled loop count = (Remainder - ($unroll_count+1)*$phases)/($unroll_count*$phases) | ||
632 | */ | ||
633 | |||
634 | { | ||
635 | .mlx | ||
636 | add LoopCount = 1 - ($unroll_count + 1)*$phases, Remainder | ||
637 | movl Remainder = 0xaaaaaaaaaaaaaaab | ||
638 | } ;; | ||
639 | { | ||
640 | .mmi | ||
641 | setf.sig f6 = LoopCount // M2, M3 6 cyc | ||
642 | setf.sig f7 = Remainder // M2, M3 6 cyc | ||
643 | nop 0x0 | ||
644 | } ;; | ||
645 | { | ||
646 | .mfb | ||
647 | nop 0x0 | ||
648 | xmpy.hu f6 = f6, f7 | ||
649 | nop 0x0 | ||
650 | } ;; | ||
651 | { | ||
652 | .mmi | ||
653 | getf.sig LoopCount = f6;; // M2 5 cyc | ||
654 | nop 0x0 | ||
655 | shr.u LoopCount = LoopCount, 4 | ||
656 | } ;; | ||
657 | { | ||
658 | .mmi | ||
659 | nop 0x0 | ||
660 | nop 0x0 | ||
661 | mov.i ar.lc = LoopCount | ||
662 | } ;; | ||
663 | |||
664 | /* Now comes the unrolled loop: */ | ||
665 | |||
666 | .rc4Prologue: | ||
667 | ___ | ||
668 | |||
669 | $iteration = 0; | ||
670 | |||
671 | # Generate the prologue: | ||
672 | $predicates = 1; | ||
673 | for ($i = 0; $i < $phases; ++$i) { | ||
674 | &emit_body (\$code, \$bypass, $iteration++, $predicates); | ||
675 | $predicates = ($predicates << 1) | 1; | ||
676 | } | ||
677 | |||
678 | $code.=<<___; | ||
679 | .rc4Loop: | ||
680 | ___ | ||
681 | |||
682 | # Generate the body: | ||
683 | for ($i = 0; $i < $unroll_count*$phases; ++$i) { | ||
684 | &emit_body (\$code, \$bypass, $iteration++, $predicates); | ||
685 | } | ||
686 | |||
687 | $code.=<<___; | ||
688 | .rc4Epilogue: | ||
689 | ___ | ||
690 | |||
691 | # Generate the epilogue: | ||
692 | for ($i = 0; $i < $phases; ++$i) { | ||
693 | $predicates <<= 1; | ||
694 | &emit_body (\$code, \$bypass, $iteration++, $predicates); | ||
695 | } | ||
696 | |||
697 | $code.=<<___; | ||
698 | { | ||
699 | .mmi | ||
700 | lfetch.nt1 [EndPtr] // fetch line with last byte | ||
701 | mov IFinal = I[1] | ||
702 | nop 0x0 | ||
703 | } | ||
704 | |||
705 | .rc4Remainder: | ||
706 | { | ||
707 | .mmi | ||
708 | sub Remainder = EndPtr, InPtr // Calculate | ||
709 | // # of bytes | ||
710 | // left - 1 | ||
711 | nop 0x0 | ||
712 | nop 0x0 | ||
713 | } ;; | ||
714 | { | ||
715 | .mib | ||
716 | cmp.eq pDone, p0 = -1, Remainder // done already? | ||
717 | mov.i ar.lc = Remainder | ||
718 | (pDone) br.cond.dptk.few .rc4Complete | ||
719 | } | ||
720 | |||
721 | /* Do the remaining bytes via the compact, modulo-scheduled loop */ | ||
722 | |||
723 | MODSCHED_RC4_PROLOGUE | ||
724 | MODSCHED_RC4_LOOP(.RC4RestLoop) | ||
725 | |||
726 | .rc4Complete: | ||
727 | { | ||
728 | .mmi | ||
729 | add KTable = -SZ, KTable | ||
730 | add IFinal = -1, IFinal | ||
731 | mov ar.lc = LCSave | ||
732 | } ;; | ||
733 | { | ||
734 | .mii | ||
735 | SKEY [KTable] = J,-SZ | ||
736 | zxt1 IFinal = IFinal | ||
737 | mov pr = PRSave, 0x1FFFF | ||
738 | } ;; | ||
739 | { | ||
740 | .mib | ||
741 | SKEY [KTable] = IFinal | ||
742 | add RetVal = 1, r0 | ||
743 | br.ret.sptk.few rp | ||
744 | } ;; | ||
745 | ___ | ||
746 | |||
747 | # Last but not least, emit the code for the bypass-code of the unrolled loop: | ||
748 | |||
749 | $code.=$bypass; | ||
750 | |||
751 | $code.=<<___; | ||
752 | .endp RC4 | ||
753 | ___ | ||
754 | |||
755 | print $code; | ||
diff --git a/src/lib/libcrypto/rc4/asm/rc4-md5-x86_64.pl b/src/lib/libcrypto/rc4/asm/rc4-md5-x86_64.pl deleted file mode 100644 index 7f684092d4..0000000000 --- a/src/lib/libcrypto/rc4/asm/rc4-md5-x86_64.pl +++ /dev/null | |||
@@ -1,631 +0,0 @@ | |||
1 | #!/usr/bin/env perl | ||
2 | # | ||
3 | # ==================================================================== | ||
4 | # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL | ||
5 | # project. The module is, however, dual licensed under OpenSSL and | ||
6 | # CRYPTOGAMS licenses depending on where you obtain it. For further | ||
7 | # details see http://www.openssl.org/~appro/cryptogams/. | ||
8 | # ==================================================================== | ||
9 | |||
10 | # June 2011 | ||
11 | # | ||
12 | # This is RC4+MD5 "stitch" implementation. The idea, as spelled in | ||
13 | # http://download.intel.com/design/intarch/papers/323686.pdf, is that | ||
14 | # since both algorithms exhibit instruction-level parallelism, ILP, | ||
15 | # below theoretical maximum, interleaving them would allow to utilize | ||
16 | # processor resources better and achieve better performance. RC4 | ||
17 | # instruction sequence is virtually identical to rc4-x86_64.pl, which | ||
18 | # is heavily based on submission by Maxim Perminov, Maxim Locktyukhin | ||
19 | # and Jim Guilford of Intel. MD5 is fresh implementation aiming to | ||
20 | # minimize register usage, which was used as "main thread" with RC4 | ||
21 | # weaved into it, one RC4 round per one MD5 round. In addition to the | ||
22 | # stiched subroutine the script can generate standalone replacement | ||
23 | # md5_block_asm_data_order and RC4. Below are performance numbers in | ||
24 | # cycles per processed byte, less is better, for these the standalone | ||
25 | # subroutines, sum of them, and stitched one: | ||
26 | # | ||
27 | # RC4 MD5 RC4+MD5 stitch gain | ||
28 | # Opteron 6.5(*) 5.4 11.9 7.0 +70%(*) | ||
29 | # Core2 6.5 5.8 12.3 7.7 +60% | ||
30 | # Westmere 4.3 5.2 9.5 7.0 +36% | ||
31 | # Sandy Bridge 4.2 5.5 9.7 6.8 +43% | ||
32 | # Atom 9.3 6.5 15.8 11.1 +42% | ||
33 | # | ||
34 | # (*) rc4-x86_64.pl delivers 5.3 on Opteron, so real improvement | ||
35 | # is +53%... | ||
36 | |||
37 | my ($rc4,$md5)=(1,1); # what to generate? | ||
38 | my $D="#" if (!$md5); # if set to "#", MD5 is stitched into RC4(), | ||
39 | # but its result is discarded. Idea here is | ||
40 | # to be able to use 'openssl speed rc4' for | ||
41 | # benchmarking the stitched subroutine... | ||
42 | |||
43 | my $flavour = shift; | ||
44 | my $output = shift; | ||
45 | if ($flavour =~ /\./) { $output = $flavour; undef $flavour; } | ||
46 | |||
47 | my $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/); | ||
48 | |||
49 | $0 =~ m/(.*[\/\\])[^\/\\]+$/; my $dir=$1; my $xlate; | ||
50 | ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or | ||
51 | ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or | ||
52 | die "can't locate x86_64-xlate.pl"; | ||
53 | |||
54 | open STDOUT,"| $^X $xlate $flavour $output"; | ||
55 | |||
56 | my ($dat,$in0,$out,$ctx,$inp,$len, $func,$nargs); | ||
57 | |||
58 | if ($rc4 && !$md5) { | ||
59 | ($dat,$len,$in0,$out) = ("%rdi","%rsi","%rdx","%rcx"); | ||
60 | $func="RC4"; $nargs=4; | ||
61 | } elsif ($md5 && !$rc4) { | ||
62 | ($ctx,$inp,$len) = ("%rdi","%rsi","%rdx"); | ||
63 | $func="md5_block_asm_data_order"; $nargs=3; | ||
64 | } else { | ||
65 | ($dat,$in0,$out,$ctx,$inp,$len) = ("%rdi","%rsi","%rdx","%rcx","%r8","%r9"); | ||
66 | $func="rc4_md5_enc"; $nargs=6; | ||
67 | # void rc4_md5_enc( | ||
68 | # RC4_KEY *key, # | ||
69 | # const void *in0, # RC4 input | ||
70 | # void *out, # RC4 output | ||
71 | # MD5_CTX *ctx, # | ||
72 | # const void *inp, # MD5 input | ||
73 | # size_t len); # number of 64-byte blocks | ||
74 | } | ||
75 | |||
76 | my @K=( 0xd76aa478,0xe8c7b756,0x242070db,0xc1bdceee, | ||
77 | 0xf57c0faf,0x4787c62a,0xa8304613,0xfd469501, | ||
78 | 0x698098d8,0x8b44f7af,0xffff5bb1,0x895cd7be, | ||
79 | 0x6b901122,0xfd987193,0xa679438e,0x49b40821, | ||
80 | |||
81 | 0xf61e2562,0xc040b340,0x265e5a51,0xe9b6c7aa, | ||
82 | 0xd62f105d,0x02441453,0xd8a1e681,0xe7d3fbc8, | ||
83 | 0x21e1cde6,0xc33707d6,0xf4d50d87,0x455a14ed, | ||
84 | 0xa9e3e905,0xfcefa3f8,0x676f02d9,0x8d2a4c8a, | ||
85 | |||
86 | 0xfffa3942,0x8771f681,0x6d9d6122,0xfde5380c, | ||
87 | 0xa4beea44,0x4bdecfa9,0xf6bb4b60,0xbebfbc70, | ||
88 | 0x289b7ec6,0xeaa127fa,0xd4ef3085,0x04881d05, | ||
89 | 0xd9d4d039,0xe6db99e5,0x1fa27cf8,0xc4ac5665, | ||
90 | |||
91 | 0xf4292244,0x432aff97,0xab9423a7,0xfc93a039, | ||
92 | 0x655b59c3,0x8f0ccc92,0xffeff47d,0x85845dd1, | ||
93 | 0x6fa87e4f,0xfe2ce6e0,0xa3014314,0x4e0811a1, | ||
94 | 0xf7537e82,0xbd3af235,0x2ad7d2bb,0xeb86d391 ); | ||
95 | |||
96 | my @V=("%r8d","%r9d","%r10d","%r11d"); # MD5 registers | ||
97 | my $tmp="%r12d"; | ||
98 | |||
99 | my @XX=("%rbp","%rsi"); # RC4 registers | ||
100 | my @TX=("%rax","%rbx"); | ||
101 | my $YY="%rcx"; | ||
102 | my $TY="%rdx"; | ||
103 | |||
104 | my $MOD=32; # 16, 32 or 64 | ||
105 | |||
106 | $code.=<<___; | ||
107 | .text | ||
108 | .align 16 | ||
109 | |||
110 | .globl $func | ||
111 | .type $func,\@function,$nargs | ||
112 | $func: | ||
113 | cmp \$0,$len | ||
114 | je .Labort | ||
115 | push %rbx | ||
116 | push %rbp | ||
117 | push %r12 | ||
118 | push %r13 | ||
119 | push %r14 | ||
120 | push %r15 | ||
121 | sub \$40,%rsp | ||
122 | .Lbody: | ||
123 | ___ | ||
124 | if ($rc4) { | ||
125 | $code.=<<___; | ||
126 | $D#md5# mov $ctx,%r11 # reassign arguments | ||
127 | mov $len,%r12 | ||
128 | mov $in0,%r13 | ||
129 | mov $out,%r14 | ||
130 | $D#md5# mov $inp,%r15 | ||
131 | ___ | ||
132 | $ctx="%r11" if ($md5); # reassign arguments | ||
133 | $len="%r12"; | ||
134 | $in0="%r13"; | ||
135 | $out="%r14"; | ||
136 | $inp="%r15" if ($md5); | ||
137 | $inp=$in0 if (!$md5); | ||
138 | $code.=<<___; | ||
139 | xor $XX[0],$XX[0] | ||
140 | xor $YY,$YY | ||
141 | |||
142 | lea 8($dat),$dat | ||
143 | mov -8($dat),$XX[0]#b | ||
144 | mov -4($dat),$YY#b | ||
145 | |||
146 | inc $XX[0]#b | ||
147 | sub $in0,$out | ||
148 | movl ($dat,$XX[0],4),$TX[0]#d | ||
149 | ___ | ||
150 | $code.=<<___ if (!$md5); | ||
151 | xor $TX[1],$TX[1] | ||
152 | test \$-128,$len | ||
153 | jz .Loop1 | ||
154 | sub $XX[0],$TX[1] | ||
155 | and \$`$MOD-1`,$TX[1] | ||
156 | jz .Loop${MOD}_is_hot | ||
157 | sub $TX[1],$len | ||
158 | .Loop${MOD}_warmup: | ||
159 | add $TX[0]#b,$YY#b | ||
160 | movl ($dat,$YY,4),$TY#d | ||
161 | movl $TX[0]#d,($dat,$YY,4) | ||
162 | movl $TY#d,($dat,$XX[0],4) | ||
163 | add $TY#b,$TX[0]#b | ||
164 | inc $XX[0]#b | ||
165 | movl ($dat,$TX[0],4),$TY#d | ||
166 | movl ($dat,$XX[0],4),$TX[0]#d | ||
167 | xorb ($in0),$TY#b | ||
168 | movb $TY#b,($out,$in0) | ||
169 | lea 1($in0),$in0 | ||
170 | dec $TX[1] | ||
171 | jnz .Loop${MOD}_warmup | ||
172 | |||
173 | mov $YY,$TX[1] | ||
174 | xor $YY,$YY | ||
175 | mov $TX[1]#b,$YY#b | ||
176 | |||
177 | .Loop${MOD}_is_hot: | ||
178 | mov $len,32(%rsp) # save original $len | ||
179 | shr \$6,$len # number of 64-byte blocks | ||
180 | ___ | ||
181 | if ($D && !$md5) { # stitch in dummy MD5 | ||
182 | $md5=1; | ||
183 | $ctx="%r11"; | ||
184 | $inp="%r15"; | ||
185 | $code.=<<___; | ||
186 | mov %rsp,$ctx | ||
187 | mov $in0,$inp | ||
188 | ___ | ||
189 | } | ||
190 | } | ||
191 | $code.=<<___; | ||
192 | #rc4# add $TX[0]#b,$YY#b | ||
193 | #rc4# lea ($dat,$XX[0],4),$XX[1] | ||
194 | shl \$6,$len | ||
195 | add $inp,$len # pointer to the end of input | ||
196 | mov $len,16(%rsp) | ||
197 | |||
198 | #md5# mov $ctx,24(%rsp) # save pointer to MD5_CTX | ||
199 | #md5# mov 0*4($ctx),$V[0] # load current hash value from MD5_CTX | ||
200 | #md5# mov 1*4($ctx),$V[1] | ||
201 | #md5# mov 2*4($ctx),$V[2] | ||
202 | #md5# mov 3*4($ctx),$V[3] | ||
203 | jmp .Loop | ||
204 | |||
205 | .align 16 | ||
206 | .Loop: | ||
207 | #md5# mov $V[0],0*4(%rsp) # put aside current hash value | ||
208 | #md5# mov $V[1],1*4(%rsp) | ||
209 | #md5# mov $V[2],2*4(%rsp) | ||
210 | #md5# mov $V[3],$tmp # forward reference | ||
211 | #md5# mov $V[3],3*4(%rsp) | ||
212 | ___ | ||
213 | |||
214 | sub R0 { | ||
215 | my ($i,$a,$b,$c,$d)=@_; | ||
216 | my @rot0=(7,12,17,22); | ||
217 | my $j=$i%16; | ||
218 | my $k=$i%$MOD; | ||
219 | my $xmm="%xmm".($j&1); | ||
220 | $code.=" movdqu ($in0),%xmm2\n" if ($rc4 && $j==15); | ||
221 | $code.=" add \$$MOD,$XX[0]#b\n" if ($rc4 && $j==15 && $k==$MOD-1); | ||
222 | $code.=" pxor $xmm,$xmm\n" if ($rc4 && $j<=1); | ||
223 | $code.=<<___; | ||
224 | #rc4# movl ($dat,$YY,4),$TY#d | ||
225 | #md5# xor $c,$tmp | ||
226 | #rc4# movl $TX[0]#d,($dat,$YY,4) | ||
227 | #md5# and $b,$tmp | ||
228 | #md5# add 4*`$j`($inp),$a | ||
229 | #rc4# add $TY#b,$TX[0]#b | ||
230 | #rc4# movl `4*(($k+1)%$MOD)`(`$k==$MOD-1?"$dat,$XX[0],4":"$XX[1]"`),$TX[1]#d | ||
231 | #md5# add \$$K[$i],$a | ||
232 | #md5# xor $d,$tmp | ||
233 | #rc4# movz $TX[0]#b,$TX[0]#d | ||
234 | #rc4# movl $TY#d,4*$k($XX[1]) | ||
235 | #md5# add $tmp,$a | ||
236 | #rc4# add $TX[1]#b,$YY#b | ||
237 | #md5# rol \$$rot0[$j%4],$a | ||
238 | #md5# mov `$j==15?"$b":"$c"`,$tmp # forward reference | ||
239 | #rc4# pinsrw \$`($j>>1)&7`,($dat,$TX[0],4),$xmm\n | ||
240 | #md5# add $b,$a | ||
241 | ___ | ||
242 | $code.=<<___ if ($rc4 && $j==15 && $k==$MOD-1); | ||
243 | mov $YY,$XX[1] | ||
244 | xor $YY,$YY # keyword to partial register | ||
245 | mov $XX[1]#b,$YY#b | ||
246 | lea ($dat,$XX[0],4),$XX[1] | ||
247 | ___ | ||
248 | $code.=<<___ if ($rc4 && $j==15); | ||
249 | psllq \$8,%xmm1 | ||
250 | pxor %xmm0,%xmm2 | ||
251 | pxor %xmm1,%xmm2 | ||
252 | ___ | ||
253 | } | ||
254 | sub R1 { | ||
255 | my ($i,$a,$b,$c,$d)=@_; | ||
256 | my @rot1=(5,9,14,20); | ||
257 | my $j=$i%16; | ||
258 | my $k=$i%$MOD; | ||
259 | my $xmm="%xmm".($j&1); | ||
260 | $code.=" movdqu 16($in0),%xmm3\n" if ($rc4 && $j==15); | ||
261 | $code.=" add \$$MOD,$XX[0]#b\n" if ($rc4 && $j==15 && $k==$MOD-1); | ||
262 | $code.=" pxor $xmm,$xmm\n" if ($rc4 && $j<=1); | ||
263 | $code.=<<___; | ||
264 | #rc4# movl ($dat,$YY,4),$TY#d | ||
265 | #md5# xor $b,$tmp | ||
266 | #rc4# movl $TX[0]#d,($dat,$YY,4) | ||
267 | #md5# and $d,$tmp | ||
268 | #md5# add 4*`((1+5*$j)%16)`($inp),$a | ||
269 | #rc4# add $TY#b,$TX[0]#b | ||
270 | #rc4# movl `4*(($k+1)%$MOD)`(`$k==$MOD-1?"$dat,$XX[0],4":"$XX[1]"`),$TX[1]#d | ||
271 | #md5# add \$$K[$i],$a | ||
272 | #md5# xor $c,$tmp | ||
273 | #rc4# movz $TX[0]#b,$TX[0]#d | ||
274 | #rc4# movl $TY#d,4*$k($XX[1]) | ||
275 | #md5# add $tmp,$a | ||
276 | #rc4# add $TX[1]#b,$YY#b | ||
277 | #md5# rol \$$rot1[$j%4],$a | ||
278 | #md5# mov `$j==15?"$c":"$b"`,$tmp # forward reference | ||
279 | #rc4# pinsrw \$`($j>>1)&7`,($dat,$TX[0],4),$xmm\n | ||
280 | #md5# add $b,$a | ||
281 | ___ | ||
282 | $code.=<<___ if ($rc4 && $j==15 && $k==$MOD-1); | ||
283 | mov $YY,$XX[1] | ||
284 | xor $YY,$YY # keyword to partial register | ||
285 | mov $XX[1]#b,$YY#b | ||
286 | lea ($dat,$XX[0],4),$XX[1] | ||
287 | ___ | ||
288 | $code.=<<___ if ($rc4 && $j==15); | ||
289 | psllq \$8,%xmm1 | ||
290 | pxor %xmm0,%xmm3 | ||
291 | pxor %xmm1,%xmm3 | ||
292 | ___ | ||
293 | } | ||
294 | sub R2 { | ||
295 | my ($i,$a,$b,$c,$d)=@_; | ||
296 | my @rot2=(4,11,16,23); | ||
297 | my $j=$i%16; | ||
298 | my $k=$i%$MOD; | ||
299 | my $xmm="%xmm".($j&1); | ||
300 | $code.=" movdqu 32($in0),%xmm4\n" if ($rc4 && $j==15); | ||
301 | $code.=" add \$$MOD,$XX[0]#b\n" if ($rc4 && $j==15 && $k==$MOD-1); | ||
302 | $code.=" pxor $xmm,$xmm\n" if ($rc4 && $j<=1); | ||
303 | $code.=<<___; | ||
304 | #rc4# movl ($dat,$YY,4),$TY#d | ||
305 | #md5# xor $c,$tmp | ||
306 | #rc4# movl $TX[0]#d,($dat,$YY,4) | ||
307 | #md5# xor $b,$tmp | ||
308 | #md5# add 4*`((5+3*$j)%16)`($inp),$a | ||
309 | #rc4# add $TY#b,$TX[0]#b | ||
310 | #rc4# movl `4*(($k+1)%$MOD)`(`$k==$MOD-1?"$dat,$XX[0],4":"$XX[1]"`),$TX[1]#d | ||
311 | #md5# add \$$K[$i],$a | ||
312 | #rc4# movz $TX[0]#b,$TX[0]#d | ||
313 | #md5# add $tmp,$a | ||
314 | #rc4# movl $TY#d,4*$k($XX[1]) | ||
315 | #rc4# add $TX[1]#b,$YY#b | ||
316 | #md5# rol \$$rot2[$j%4],$a | ||
317 | #md5# mov `$j==15?"\\\$-1":"$c"`,$tmp # forward reference | ||
318 | #rc4# pinsrw \$`($j>>1)&7`,($dat,$TX[0],4),$xmm\n | ||
319 | #md5# add $b,$a | ||
320 | ___ | ||
321 | $code.=<<___ if ($rc4 && $j==15 && $k==$MOD-1); | ||
322 | mov $YY,$XX[1] | ||
323 | xor $YY,$YY # keyword to partial register | ||
324 | mov $XX[1]#b,$YY#b | ||
325 | lea ($dat,$XX[0],4),$XX[1] | ||
326 | ___ | ||
327 | $code.=<<___ if ($rc4 && $j==15); | ||
328 | psllq \$8,%xmm1 | ||
329 | pxor %xmm0,%xmm4 | ||
330 | pxor %xmm1,%xmm4 | ||
331 | ___ | ||
332 | } | ||
333 | sub R3 { | ||
334 | my ($i,$a,$b,$c,$d)=@_; | ||
335 | my @rot3=(6,10,15,21); | ||
336 | my $j=$i%16; | ||
337 | my $k=$i%$MOD; | ||
338 | my $xmm="%xmm".($j&1); | ||
339 | $code.=" movdqu 48($in0),%xmm5\n" if ($rc4 && $j==15); | ||
340 | $code.=" add \$$MOD,$XX[0]#b\n" if ($rc4 && $j==15 && $k==$MOD-1); | ||
341 | $code.=" pxor $xmm,$xmm\n" if ($rc4 && $j<=1); | ||
342 | $code.=<<___; | ||
343 | #rc4# movl ($dat,$YY,4),$TY#d | ||
344 | #md5# xor $d,$tmp | ||
345 | #rc4# movl $TX[0]#d,($dat,$YY,4) | ||
346 | #md5# or $b,$tmp | ||
347 | #md5# add 4*`((7*$j)%16)`($inp),$a | ||
348 | #rc4# add $TY#b,$TX[0]#b | ||
349 | #rc4# movl `4*(($k+1)%$MOD)`(`$k==$MOD-1?"$dat,$XX[0],4":"$XX[1]"`),$TX[1]#d | ||
350 | #md5# add \$$K[$i],$a | ||
351 | #rc4# movz $TX[0]#b,$TX[0]#d | ||
352 | #md5# xor $c,$tmp | ||
353 | #rc4# movl $TY#d,4*$k($XX[1]) | ||
354 | #md5# add $tmp,$a | ||
355 | #rc4# add $TX[1]#b,$YY#b | ||
356 | #md5# rol \$$rot3[$j%4],$a | ||
357 | #md5# mov \$-1,$tmp # forward reference | ||
358 | #rc4# pinsrw \$`($j>>1)&7`,($dat,$TX[0],4),$xmm\n | ||
359 | #md5# add $b,$a | ||
360 | ___ | ||
361 | $code.=<<___ if ($rc4 && $j==15); | ||
362 | mov $XX[0],$XX[1] | ||
363 | xor $XX[0],$XX[0] # keyword to partial register | ||
364 | mov $XX[1]#b,$XX[0]#b | ||
365 | mov $YY,$XX[1] | ||
366 | xor $YY,$YY # keyword to partial register | ||
367 | mov $XX[1]#b,$YY#b | ||
368 | lea ($dat,$XX[0],4),$XX[1] | ||
369 | psllq \$8,%xmm1 | ||
370 | pxor %xmm0,%xmm5 | ||
371 | pxor %xmm1,%xmm5 | ||
372 | ___ | ||
373 | } | ||
374 | |||
375 | my $i=0; | ||
376 | for(;$i<16;$i++) { R0($i,@V); unshift(@V,pop(@V)); push(@TX,shift(@TX)); } | ||
377 | for(;$i<32;$i++) { R1($i,@V); unshift(@V,pop(@V)); push(@TX,shift(@TX)); } | ||
378 | for(;$i<48;$i++) { R2($i,@V); unshift(@V,pop(@V)); push(@TX,shift(@TX)); } | ||
379 | for(;$i<64;$i++) { R3($i,@V); unshift(@V,pop(@V)); push(@TX,shift(@TX)); } | ||
380 | |||
381 | $code.=<<___; | ||
382 | #md5# add 0*4(%rsp),$V[0] # accumulate hash value | ||
383 | #md5# add 1*4(%rsp),$V[1] | ||
384 | #md5# add 2*4(%rsp),$V[2] | ||
385 | #md5# add 3*4(%rsp),$V[3] | ||
386 | |||
387 | #rc4# movdqu %xmm2,($out,$in0) # write RC4 output | ||
388 | #rc4# movdqu %xmm3,16($out,$in0) | ||
389 | #rc4# movdqu %xmm4,32($out,$in0) | ||
390 | #rc4# movdqu %xmm5,48($out,$in0) | ||
391 | #md5# lea 64($inp),$inp | ||
392 | #rc4# lea 64($in0),$in0 | ||
393 | cmp 16(%rsp),$inp # are we done? | ||
394 | jb .Loop | ||
395 | |||
396 | #md5# mov 24(%rsp),$len # restore pointer to MD5_CTX | ||
397 | #rc4# sub $TX[0]#b,$YY#b # correct $YY | ||
398 | #md5# mov $V[0],0*4($len) # write MD5_CTX | ||
399 | #md5# mov $V[1],1*4($len) | ||
400 | #md5# mov $V[2],2*4($len) | ||
401 | #md5# mov $V[3],3*4($len) | ||
402 | ___ | ||
403 | $code.=<<___ if ($rc4 && (!$md5 || $D)); | ||
404 | mov 32(%rsp),$len # restore original $len | ||
405 | and \$63,$len # remaining bytes | ||
406 | jnz .Loop1 | ||
407 | jmp .Ldone | ||
408 | |||
409 | .align 16 | ||
410 | .Loop1: | ||
411 | add $TX[0]#b,$YY#b | ||
412 | movl ($dat,$YY,4),$TY#d | ||
413 | movl $TX[0]#d,($dat,$YY,4) | ||
414 | movl $TY#d,($dat,$XX[0],4) | ||
415 | add $TY#b,$TX[0]#b | ||
416 | inc $XX[0]#b | ||
417 | movl ($dat,$TX[0],4),$TY#d | ||
418 | movl ($dat,$XX[0],4),$TX[0]#d | ||
419 | xorb ($in0),$TY#b | ||
420 | movb $TY#b,($out,$in0) | ||
421 | lea 1($in0),$in0 | ||
422 | dec $len | ||
423 | jnz .Loop1 | ||
424 | |||
425 | .Ldone: | ||
426 | ___ | ||
427 | $code.=<<___; | ||
428 | #rc4# sub \$1,$XX[0]#b | ||
429 | #rc4# movl $XX[0]#d,-8($dat) | ||
430 | #rc4# movl $YY#d,-4($dat) | ||
431 | |||
432 | mov 40(%rsp),%r15 | ||
433 | mov 48(%rsp),%r14 | ||
434 | mov 56(%rsp),%r13 | ||
435 | mov 64(%rsp),%r12 | ||
436 | mov 72(%rsp),%rbp | ||
437 | mov 80(%rsp),%rbx | ||
438 | lea 88(%rsp),%rsp | ||
439 | .Lepilogue: | ||
440 | .Labort: | ||
441 | ret | ||
442 | .size $func,.-$func | ||
443 | ___ | ||
444 | |||
445 | if ($rc4 && $D) { # sole purpose of this section is to provide | ||
446 | # option to use the generated module as drop-in | ||
447 | # replacement for rc4-x86_64.pl for debugging | ||
448 | # and testing purposes... | ||
449 | my ($idx,$ido)=("%r8","%r9"); | ||
450 | my ($dat,$len,$inp)=("%rdi","%rsi","%rdx"); | ||
451 | |||
452 | $code.=<<___; | ||
453 | .globl RC4_set_key | ||
454 | .type RC4_set_key,\@function,3 | ||
455 | .align 16 | ||
456 | RC4_set_key: | ||
457 | lea 8($dat),$dat | ||
458 | lea ($inp,$len),$inp | ||
459 | neg $len | ||
460 | mov $len,%rcx | ||
461 | xor %eax,%eax | ||
462 | xor $ido,$ido | ||
463 | xor %r10,%r10 | ||
464 | xor %r11,%r11 | ||
465 | jmp .Lw1stloop | ||
466 | |||
467 | .align 16 | ||
468 | .Lw1stloop: | ||
469 | mov %eax,($dat,%rax,4) | ||
470 | add \$1,%al | ||
471 | jnc .Lw1stloop | ||
472 | |||
473 | xor $ido,$ido | ||
474 | xor $idx,$idx | ||
475 | .align 16 | ||
476 | .Lw2ndloop: | ||
477 | mov ($dat,$ido,4),%r10d | ||
478 | add ($inp,$len,1),$idx#b | ||
479 | add %r10b,$idx#b | ||
480 | add \$1,$len | ||
481 | mov ($dat,$idx,4),%r11d | ||
482 | cmovz %rcx,$len | ||
483 | mov %r10d,($dat,$idx,4) | ||
484 | mov %r11d,($dat,$ido,4) | ||
485 | add \$1,$ido#b | ||
486 | jnc .Lw2ndloop | ||
487 | |||
488 | xor %eax,%eax | ||
489 | mov %eax,-8($dat) | ||
490 | mov %eax,-4($dat) | ||
491 | ret | ||
492 | .size RC4_set_key,.-RC4_set_key | ||
493 | |||
494 | .globl RC4_options | ||
495 | .type RC4_options,\@abi-omnipotent | ||
496 | .align 16 | ||
497 | RC4_options: | ||
498 | lea .Lopts(%rip),%rax | ||
499 | ret | ||
500 | .align 64 | ||
501 | .Lopts: | ||
502 | .asciz "rc4(64x,int)" | ||
503 | .align 64 | ||
504 | .size RC4_options,.-RC4_options | ||
505 | ___ | ||
506 | } | ||
507 | # EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame, | ||
508 | # CONTEXT *context,DISPATCHER_CONTEXT *disp) | ||
509 | if ($win64) { | ||
510 | my $rec="%rcx"; | ||
511 | my $frame="%rdx"; | ||
512 | my $context="%r8"; | ||
513 | my $disp="%r9"; | ||
514 | |||
515 | $code.=<<___; | ||
516 | .extern __imp_RtlVirtualUnwind | ||
517 | .type se_handler,\@abi-omnipotent | ||
518 | .align 16 | ||
519 | se_handler: | ||
520 | push %rsi | ||
521 | push %rdi | ||
522 | push %rbx | ||
523 | push %rbp | ||
524 | push %r12 | ||
525 | push %r13 | ||
526 | push %r14 | ||
527 | push %r15 | ||
528 | pushfq | ||
529 | sub \$64,%rsp | ||
530 | |||
531 | mov 120($context),%rax # pull context->Rax | ||
532 | mov 248($context),%rbx # pull context->Rip | ||
533 | |||
534 | lea .Lbody(%rip),%r10 | ||
535 | cmp %r10,%rbx # context->Rip<.Lbody | ||
536 | jb .Lin_prologue | ||
537 | |||
538 | mov 152($context),%rax # pull context->Rsp | ||
539 | |||
540 | lea .Lepilogue(%rip),%r10 | ||
541 | cmp %r10,%rbx # context->Rip>=.Lepilogue | ||
542 | jae .Lin_prologue | ||
543 | |||
544 | mov 40(%rax),%r15 | ||
545 | mov 48(%rax),%r14 | ||
546 | mov 56(%rax),%r13 | ||
547 | mov 64(%rax),%r12 | ||
548 | mov 72(%rax),%rbp | ||
549 | mov 80(%rax),%rbx | ||
550 | lea 88(%rax),%rax | ||
551 | |||
552 | mov %rbx,144($context) # restore context->Rbx | ||
553 | mov %rbp,160($context) # restore context->Rbp | ||
554 | mov %r12,216($context) # restore context->R12 | ||
555 | mov %r13,224($context) # restore context->R12 | ||
556 | mov %r14,232($context) # restore context->R14 | ||
557 | mov %r15,240($context) # restore context->R15 | ||
558 | |||
559 | .Lin_prologue: | ||
560 | mov 8(%rax),%rdi | ||
561 | mov 16(%rax),%rsi | ||
562 | mov %rax,152($context) # restore context->Rsp | ||
563 | mov %rsi,168($context) # restore context->Rsi | ||
564 | mov %rdi,176($context) # restore context->Rdi | ||
565 | |||
566 | mov 40($disp),%rdi # disp->ContextRecord | ||
567 | mov $context,%rsi # context | ||
568 | mov \$154,%ecx # sizeof(CONTEXT) | ||
569 | .long 0xa548f3fc # cld; rep movsq | ||
570 | |||
571 | mov $disp,%rsi | ||
572 | xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER | ||
573 | mov 8(%rsi),%rdx # arg2, disp->ImageBase | ||
574 | mov 0(%rsi),%r8 # arg3, disp->ControlPc | ||
575 | mov 16(%rsi),%r9 # arg4, disp->FunctionEntry | ||
576 | mov 40(%rsi),%r10 # disp->ContextRecord | ||
577 | lea 56(%rsi),%r11 # &disp->HandlerData | ||
578 | lea 24(%rsi),%r12 # &disp->EstablisherFrame | ||
579 | mov %r10,32(%rsp) # arg5 | ||
580 | mov %r11,40(%rsp) # arg6 | ||
581 | mov %r12,48(%rsp) # arg7 | ||
582 | mov %rcx,56(%rsp) # arg8, (NULL) | ||
583 | call *__imp_RtlVirtualUnwind(%rip) | ||
584 | |||
585 | mov \$1,%eax # ExceptionContinueSearch | ||
586 | add \$64,%rsp | ||
587 | popfq | ||
588 | pop %r15 | ||
589 | pop %r14 | ||
590 | pop %r13 | ||
591 | pop %r12 | ||
592 | pop %rbp | ||
593 | pop %rbx | ||
594 | pop %rdi | ||
595 | pop %rsi | ||
596 | ret | ||
597 | .size se_handler,.-se_handler | ||
598 | |||
599 | .section .pdata | ||
600 | .align 4 | ||
601 | .rva .LSEH_begin_$func | ||
602 | .rva .LSEH_end_$func | ||
603 | .rva .LSEH_info_$func | ||
604 | |||
605 | .section .xdata | ||
606 | .align 8 | ||
607 | .LSEH_info_$func: | ||
608 | .byte 9,0,0,0 | ||
609 | .rva se_handler | ||
610 | ___ | ||
611 | } | ||
612 | |||
613 | sub reg_part { | ||
614 | my ($reg,$conv)=@_; | ||
615 | if ($reg =~ /%r[0-9]+/) { $reg .= $conv; } | ||
616 | elsif ($conv eq "b") { $reg =~ s/%[er]([^x]+)x?/%$1l/; } | ||
617 | elsif ($conv eq "w") { $reg =~ s/%[er](.+)/%$1/; } | ||
618 | elsif ($conv eq "d") { $reg =~ s/%[er](.+)/%e$1/; } | ||
619 | return $reg; | ||
620 | } | ||
621 | |||
622 | $code =~ s/(%[a-z0-9]+)#([bwd])/reg_part($1,$2)/gem; | ||
623 | $code =~ s/\`([^\`]*)\`/eval $1/gem; | ||
624 | $code =~ s/pinsrw\s+\$0,/movd /gm; | ||
625 | |||
626 | $code =~ s/#md5#//gm if ($md5); | ||
627 | $code =~ s/#rc4#//gm if ($rc4); | ||
628 | |||
629 | print $code; | ||
630 | |||
631 | close STDOUT; | ||
diff --git a/src/lib/libcrypto/rc4/asm/rc4-parisc.pl b/src/lib/libcrypto/rc4/asm/rc4-parisc.pl deleted file mode 100644 index 9165067080..0000000000 --- a/src/lib/libcrypto/rc4/asm/rc4-parisc.pl +++ /dev/null | |||
@@ -1,313 +0,0 @@ | |||
1 | #!/usr/bin/env perl | ||
2 | |||
3 | # ==================================================================== | ||
4 | # Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL | ||
5 | # project. The module is, however, dual licensed under OpenSSL and | ||
6 | # CRYPTOGAMS licenses depending on where you obtain it. For further | ||
7 | # details see http://www.openssl.org/~appro/cryptogams/. | ||
8 | # ==================================================================== | ||
9 | |||
10 | # RC4 for PA-RISC. | ||
11 | |||
12 | # June 2009. | ||
13 | # | ||
14 | # Performance is 33% better than gcc 3.2 generated code on PA-7100LC. | ||
15 | # For reference, [4x] unrolled loop is >40% faster than folded one. | ||
16 | # It's possible to unroll loop 8 times on PA-RISC 2.0, but improvement | ||
17 | # is believed to be not sufficient to justify the effort... | ||
18 | # | ||
19 | # Special thanks to polarhome.com for providing HP-UX account. | ||
20 | |||
21 | $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1; | ||
22 | |||
23 | $flavour = shift; | ||
24 | $output = shift; | ||
25 | open STDOUT,">$output"; | ||
26 | |||
27 | if ($flavour =~ /64/) { | ||
28 | $LEVEL ="2.0W"; | ||
29 | $SIZE_T =8; | ||
30 | $FRAME_MARKER =80; | ||
31 | $SAVED_RP =16; | ||
32 | $PUSH ="std"; | ||
33 | $PUSHMA ="std,ma"; | ||
34 | $POP ="ldd"; | ||
35 | $POPMB ="ldd,mb"; | ||
36 | } else { | ||
37 | $LEVEL ="1.0"; | ||
38 | $SIZE_T =4; | ||
39 | $FRAME_MARKER =48; | ||
40 | $SAVED_RP =20; | ||
41 | $PUSH ="stw"; | ||
42 | $PUSHMA ="stwm"; | ||
43 | $POP ="ldw"; | ||
44 | $POPMB ="ldwm"; | ||
45 | } | ||
46 | |||
47 | $FRAME=4*$SIZE_T+$FRAME_MARKER; # 4 saved regs + frame marker | ||
48 | # [+ argument transfer] | ||
49 | $SZ=1; # defaults to RC4_CHAR | ||
50 | if (open CONF,"<${dir}../../opensslconf.h") { | ||
51 | while(<CONF>) { | ||
52 | if (m/#\s*define\s+RC4_INT\s+(.*)/) { | ||
53 | $SZ = ($1=~/char$/) ? 1 : 4; | ||
54 | last; | ||
55 | } | ||
56 | } | ||
57 | close CONF; | ||
58 | } | ||
59 | |||
60 | if ($SZ==1) { # RC4_CHAR | ||
61 | $LD="ldb"; | ||
62 | $LDX="ldbx"; | ||
63 | $MKX="addl"; | ||
64 | $ST="stb"; | ||
65 | } else { # RC4_INT (~5% faster than RC4_CHAR on PA-7100LC) | ||
66 | $LD="ldw"; | ||
67 | $LDX="ldwx,s"; | ||
68 | $MKX="sh2addl"; | ||
69 | $ST="stw"; | ||
70 | } | ||
71 | |||
72 | $key="%r26"; | ||
73 | $len="%r25"; | ||
74 | $inp="%r24"; | ||
75 | $out="%r23"; | ||
76 | |||
77 | @XX=("%r19","%r20"); | ||
78 | @TX=("%r21","%r22"); | ||
79 | $YY="%r28"; | ||
80 | $TY="%r29"; | ||
81 | |||
82 | $acc="%r1"; | ||
83 | $ix="%r2"; | ||
84 | $iy="%r3"; | ||
85 | $dat0="%r4"; | ||
86 | $dat1="%r5"; | ||
87 | $rem="%r6"; | ||
88 | $mask="%r31"; | ||
89 | |||
90 | sub unrolledloopbody { | ||
91 | for ($i=0;$i<4;$i++) { | ||
92 | $code.=<<___; | ||
93 | ldo 1($XX[0]),$XX[1] | ||
94 | `sprintf("$LDX %$TY(%$key),%$dat1") if ($i>0)` | ||
95 | and $mask,$XX[1],$XX[1] | ||
96 | $LDX $YY($key),$TY | ||
97 | $MKX $YY,$key,$ix | ||
98 | $LDX $XX[1]($key),$TX[1] | ||
99 | $MKX $XX[0],$key,$iy | ||
100 | $ST $TX[0],0($ix) | ||
101 | comclr,<> $XX[1],$YY,%r0 ; conditional | ||
102 | copy $TX[0],$TX[1] ; move | ||
103 | `sprintf("%sdep %$dat1,%d,8,%$acc",$i==1?"z":"",8*($i-1)+7) if ($i>0)` | ||
104 | $ST $TY,0($iy) | ||
105 | addl $TX[0],$TY,$TY | ||
106 | addl $TX[1],$YY,$YY | ||
107 | and $mask,$TY,$TY | ||
108 | and $mask,$YY,$YY | ||
109 | ___ | ||
110 | push(@TX,shift(@TX)); push(@XX,shift(@XX)); # "rotate" registers | ||
111 | } } | ||
112 | |||
113 | sub foldedloop { | ||
114 | my ($label,$count)=@_; | ||
115 | $code.=<<___; | ||
116 | $label | ||
117 | $MKX $YY,$key,$iy | ||
118 | $LDX $YY($key),$TY | ||
119 | $MKX $XX[0],$key,$ix | ||
120 | $ST $TX[0],0($iy) | ||
121 | ldo 1($XX[0]),$XX[0] | ||
122 | $ST $TY,0($ix) | ||
123 | addl $TX[0],$TY,$TY | ||
124 | ldbx $inp($out),$dat1 | ||
125 | and $mask,$TY,$TY | ||
126 | and $mask,$XX[0],$XX[0] | ||
127 | $LDX $TY($key),$acc | ||
128 | $LDX $XX[0]($key),$TX[0] | ||
129 | ldo 1($out),$out | ||
130 | xor $dat1,$acc,$acc | ||
131 | addl $TX[0],$YY,$YY | ||
132 | stb $acc,-1($out) | ||
133 | addib,<> -1,$count,$label ; $count is always small | ||
134 | and $mask,$YY,$YY | ||
135 | ___ | ||
136 | } | ||
137 | |||
138 | $code=<<___; | ||
139 | .LEVEL $LEVEL | ||
140 | .SPACE \$TEXT\$ | ||
141 | .SUBSPA \$CODE\$,QUAD=0,ALIGN=8,ACCESS=0x2C,CODE_ONLY | ||
142 | |||
143 | .EXPORT RC4,ENTRY,ARGW0=GR,ARGW1=GR,ARGW2=GR,ARGW3=GR | ||
144 | RC4 | ||
145 | .PROC | ||
146 | .CALLINFO FRAME=`$FRAME-4*$SIZE_T`,NO_CALLS,SAVE_RP,ENTRY_GR=6 | ||
147 | .ENTRY | ||
148 | $PUSH %r2,-$SAVED_RP(%sp) ; standard prologue | ||
149 | $PUSHMA %r3,$FRAME(%sp) | ||
150 | $PUSH %r4,`-$FRAME+1*$SIZE_T`(%sp) | ||
151 | $PUSH %r5,`-$FRAME+2*$SIZE_T`(%sp) | ||
152 | $PUSH %r6,`-$FRAME+3*$SIZE_T`(%sp) | ||
153 | |||
154 | cmpib,*= 0,$len,L\$abort | ||
155 | sub $inp,$out,$inp ; distance between $inp and $out | ||
156 | |||
157 | $LD `0*$SZ`($key),$XX[0] | ||
158 | $LD `1*$SZ`($key),$YY | ||
159 | ldo `2*$SZ`($key),$key | ||
160 | |||
161 | ldi 0xff,$mask | ||
162 | ldi 3,$dat0 | ||
163 | |||
164 | ldo 1($XX[0]),$XX[0] ; warm up loop | ||
165 | and $mask,$XX[0],$XX[0] | ||
166 | $LDX $XX[0]($key),$TX[0] | ||
167 | addl $TX[0],$YY,$YY | ||
168 | cmpib,*>>= 6,$len,L\$oop1 ; is $len large enough to bother? | ||
169 | and $mask,$YY,$YY | ||
170 | |||
171 | and,<> $out,$dat0,$rem ; is $out aligned? | ||
172 | b L\$alignedout | ||
173 | subi 4,$rem,$rem | ||
174 | sub $len,$rem,$len | ||
175 | ___ | ||
176 | &foldedloop("L\$alignout",$rem); # process till $out is aligned | ||
177 | |||
178 | $code.=<<___; | ||
179 | L\$alignedout ; $len is at least 4 here | ||
180 | and,<> $inp,$dat0,$acc ; is $inp aligned? | ||
181 | b L\$oop4 | ||
182 | sub $inp,$acc,$rem ; align $inp | ||
183 | |||
184 | sh3addl $acc,%r0,$acc | ||
185 | subi 32,$acc,$acc | ||
186 | mtctl $acc,%cr11 ; load %sar with vshd align factor | ||
187 | ldwx $rem($out),$dat0 | ||
188 | ldo 4($rem),$rem | ||
189 | L\$oop4misalignedinp | ||
190 | ___ | ||
191 | &unrolledloopbody(); | ||
192 | $code.=<<___; | ||
193 | $LDX $TY($key),$ix | ||
194 | ldwx $rem($out),$dat1 | ||
195 | ldo -4($len),$len | ||
196 | or $ix,$acc,$acc ; last piece, no need to dep | ||
197 | vshd $dat0,$dat1,$iy ; align data | ||
198 | copy $dat1,$dat0 | ||
199 | xor $iy,$acc,$acc | ||
200 | stw $acc,0($out) | ||
201 | cmpib,*<< 3,$len,L\$oop4misalignedinp | ||
202 | ldo 4($out),$out | ||
203 | cmpib,*= 0,$len,L\$done | ||
204 | nop | ||
205 | b L\$oop1 | ||
206 | nop | ||
207 | |||
208 | .ALIGN 8 | ||
209 | L\$oop4 | ||
210 | ___ | ||
211 | &unrolledloopbody(); | ||
212 | $code.=<<___; | ||
213 | $LDX $TY($key),$ix | ||
214 | ldwx $inp($out),$dat0 | ||
215 | ldo -4($len),$len | ||
216 | or $ix,$acc,$acc ; last piece, no need to dep | ||
217 | xor $dat0,$acc,$acc | ||
218 | stw $acc,0($out) | ||
219 | cmpib,*<< 3,$len,L\$oop4 | ||
220 | ldo 4($out),$out | ||
221 | cmpib,*= 0,$len,L\$done | ||
222 | nop | ||
223 | ___ | ||
224 | &foldedloop("L\$oop1",$len); | ||
225 | $code.=<<___; | ||
226 | L\$done | ||
227 | $POP `-$FRAME-$SAVED_RP`(%sp),%r2 | ||
228 | ldo -1($XX[0]),$XX[0] ; chill out loop | ||
229 | sub $YY,$TX[0],$YY | ||
230 | and $mask,$XX[0],$XX[0] | ||
231 | and $mask,$YY,$YY | ||
232 | $ST $XX[0],`-2*$SZ`($key) | ||
233 | $ST $YY,`-1*$SZ`($key) | ||
234 | $POP `-$FRAME+1*$SIZE_T`(%sp),%r4 | ||
235 | $POP `-$FRAME+2*$SIZE_T`(%sp),%r5 | ||
236 | $POP `-$FRAME+3*$SIZE_T`(%sp),%r6 | ||
237 | L\$abort | ||
238 | bv (%r2) | ||
239 | .EXIT | ||
240 | $POPMB -$FRAME(%sp),%r3 | ||
241 | .PROCEND | ||
242 | ___ | ||
243 | |||
244 | $code.=<<___; | ||
245 | |||
246 | .EXPORT private_RC4_set_key,ENTRY,ARGW0=GR,ARGW1=GR,ARGW2=GR | ||
247 | .ALIGN 8 | ||
248 | private_RC4_set_key | ||
249 | .PROC | ||
250 | .CALLINFO NO_CALLS | ||
251 | .ENTRY | ||
252 | $ST %r0,`0*$SZ`($key) | ||
253 | $ST %r0,`1*$SZ`($key) | ||
254 | ldo `2*$SZ`($key),$key | ||
255 | copy %r0,@XX[0] | ||
256 | L\$1st | ||
257 | $ST @XX[0],0($key) | ||
258 | ldo 1(@XX[0]),@XX[0] | ||
259 | bb,>= @XX[0],`31-8`,L\$1st ; @XX[0]<256 | ||
260 | ldo $SZ($key),$key | ||
261 | |||
262 | ldo `-256*$SZ`($key),$key ; rewind $key | ||
263 | addl $len,$inp,$inp ; $inp to point at the end | ||
264 | sub %r0,$len,%r23 ; inverse index | ||
265 | copy %r0,@XX[0] | ||
266 | copy %r0,@XX[1] | ||
267 | ldi 0xff,$mask | ||
268 | |||
269 | L\$2nd | ||
270 | $LDX @XX[0]($key),@TX[0] | ||
271 | ldbx %r23($inp),@TX[1] | ||
272 | addi,nuv 1,%r23,%r23 ; increment and conditional | ||
273 | sub %r0,$len,%r23 ; inverse index | ||
274 | addl @TX[0],@XX[1],@XX[1] | ||
275 | addl @TX[1],@XX[1],@XX[1] | ||
276 | and $mask,@XX[1],@XX[1] | ||
277 | $MKX @XX[0],$key,$TY | ||
278 | $LDX @XX[1]($key),@TX[1] | ||
279 | $MKX @XX[1],$key,$YY | ||
280 | ldo 1(@XX[0]),@XX[0] | ||
281 | $ST @TX[0],0($YY) | ||
282 | bb,>= @XX[0],`31-8`,L\$2nd ; @XX[0]<256 | ||
283 | $ST @TX[1],0($TY) | ||
284 | |||
285 | bv,n (%r2) | ||
286 | .EXIT | ||
287 | nop | ||
288 | .PROCEND | ||
289 | |||
290 | .EXPORT RC4_options,ENTRY | ||
291 | .ALIGN 8 | ||
292 | RC4_options | ||
293 | .PROC | ||
294 | .CALLINFO NO_CALLS | ||
295 | .ENTRY | ||
296 | blr %r0,%r28 | ||
297 | ldi 3,%r1 | ||
298 | L\$pic | ||
299 | andcm %r28,%r1,%r28 | ||
300 | bv (%r2) | ||
301 | .EXIT | ||
302 | ldo L\$opts-L\$pic(%r28),%r28 | ||
303 | .PROCEND | ||
304 | .ALIGN 8 | ||
305 | L\$opts | ||
306 | .STRINGZ "rc4(4x,`$SZ==1?"char":"int"`)" | ||
307 | .STRINGZ "RC4 for PA-RISC, CRYPTOGAMS by <appro\@openssl.org>" | ||
308 | ___ | ||
309 | $code =~ s/\`([^\`]*)\`/eval $1/gem; | ||
310 | $code =~ s/cmpib,\*/comib,/gm if ($SIZE_T==4); | ||
311 | |||
312 | print $code; | ||
313 | close STDOUT; | ||
diff --git a/src/lib/libcrypto/rc4/asm/rc4-s390x.pl b/src/lib/libcrypto/rc4/asm/rc4-s390x.pl deleted file mode 100644 index 7528ece13c..0000000000 --- a/src/lib/libcrypto/rc4/asm/rc4-s390x.pl +++ /dev/null | |||
@@ -1,234 +0,0 @@ | |||
1 | #!/usr/bin/env perl | ||
2 | # | ||
3 | # ==================================================================== | ||
4 | # Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL | ||
5 | # project. The module is, however, dual licensed under OpenSSL and | ||
6 | # CRYPTOGAMS licenses depending on where you obtain it. For further | ||
7 | # details see http://www.openssl.org/~appro/cryptogams/. | ||
8 | # ==================================================================== | ||
9 | # | ||
10 | # February 2009 | ||
11 | # | ||
12 | # Performance is 2x of gcc 3.4.6 on z10. Coding "secret" is to | ||
13 | # "cluster" Address Generation Interlocks, so that one pipeline stall | ||
14 | # resolves several dependencies. | ||
15 | |||
16 | # November 2010. | ||
17 | # | ||
18 | # Adapt for -m31 build. If kernel supports what's called "highgprs" | ||
19 | # feature on Linux [see /proc/cpuinfo], it's possible to use 64-bit | ||
20 | # instructions and achieve "64-bit" performance even in 31-bit legacy | ||
21 | # application context. The feature is not specific to any particular | ||
22 | # processor, as long as it's "z-CPU". Latter implies that the code | ||
23 | # remains z/Architecture specific. On z990 it was measured to perform | ||
24 | # 50% better than code generated by gcc 4.3. | ||
25 | |||
26 | $flavour = shift; | ||
27 | |||
28 | if ($flavour =~ /3[12]/) { | ||
29 | $SIZE_T=4; | ||
30 | $g=""; | ||
31 | } else { | ||
32 | $SIZE_T=8; | ||
33 | $g="g"; | ||
34 | } | ||
35 | |||
36 | while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {} | ||
37 | open STDOUT,">$output"; | ||
38 | |||
39 | $rp="%r14"; | ||
40 | $sp="%r15"; | ||
41 | $code=<<___; | ||
42 | .text | ||
43 | |||
44 | ___ | ||
45 | |||
46 | # void RC4(RC4_KEY *key,size_t len,const void *inp,void *out) | ||
47 | { | ||
48 | $acc="%r0"; | ||
49 | $cnt="%r1"; | ||
50 | $key="%r2"; | ||
51 | $len="%r3"; | ||
52 | $inp="%r4"; | ||
53 | $out="%r5"; | ||
54 | |||
55 | @XX=("%r6","%r7"); | ||
56 | @TX=("%r8","%r9"); | ||
57 | $YY="%r10"; | ||
58 | $TY="%r11"; | ||
59 | |||
60 | $code.=<<___; | ||
61 | .globl RC4 | ||
62 | .type RC4,\@function | ||
63 | .align 64 | ||
64 | RC4: | ||
65 | stm${g} %r6,%r11,6*$SIZE_T($sp) | ||
66 | ___ | ||
67 | $code.=<<___ if ($flavour =~ /3[12]/); | ||
68 | llgfr $len,$len | ||
69 | ___ | ||
70 | $code.=<<___; | ||
71 | llgc $XX[0],0($key) | ||
72 | llgc $YY,1($key) | ||
73 | la $XX[0],1($XX[0]) | ||
74 | nill $XX[0],0xff | ||
75 | srlg $cnt,$len,3 | ||
76 | ltgr $cnt,$cnt | ||
77 | llgc $TX[0],2($XX[0],$key) | ||
78 | jz .Lshort | ||
79 | j .Loop8 | ||
80 | |||
81 | .align 64 | ||
82 | .Loop8: | ||
83 | ___ | ||
84 | for ($i=0;$i<8;$i++) { | ||
85 | $code.=<<___; | ||
86 | la $YY,0($YY,$TX[0]) # $i | ||
87 | nill $YY,255 | ||
88 | la $XX[1],1($XX[0]) | ||
89 | nill $XX[1],255 | ||
90 | ___ | ||
91 | $code.=<<___ if ($i==1); | ||
92 | llgc $acc,2($TY,$key) | ||
93 | ___ | ||
94 | $code.=<<___ if ($i>1); | ||
95 | sllg $acc,$acc,8 | ||
96 | ic $acc,2($TY,$key) | ||
97 | ___ | ||
98 | $code.=<<___; | ||
99 | llgc $TY,2($YY,$key) | ||
100 | stc $TX[0],2($YY,$key) | ||
101 | llgc $TX[1],2($XX[1],$key) | ||
102 | stc $TY,2($XX[0],$key) | ||
103 | cr $XX[1],$YY | ||
104 | jne .Lcmov$i | ||
105 | la $TX[1],0($TX[0]) | ||
106 | .Lcmov$i: | ||
107 | la $TY,0($TY,$TX[0]) | ||
108 | nill $TY,255 | ||
109 | ___ | ||
110 | push(@TX,shift(@TX)); push(@XX,shift(@XX)); # "rotate" registers | ||
111 | } | ||
112 | |||
113 | $code.=<<___; | ||
114 | lg $TX[1],0($inp) | ||
115 | sllg $acc,$acc,8 | ||
116 | la $inp,8($inp) | ||
117 | ic $acc,2($TY,$key) | ||
118 | xgr $acc,$TX[1] | ||
119 | stg $acc,0($out) | ||
120 | la $out,8($out) | ||
121 | brctg $cnt,.Loop8 | ||
122 | |||
123 | .Lshort: | ||
124 | lghi $acc,7 | ||
125 | ngr $len,$acc | ||
126 | jz .Lexit | ||
127 | j .Loop1 | ||
128 | |||
129 | .align 16 | ||
130 | .Loop1: | ||
131 | la $YY,0($YY,$TX[0]) | ||
132 | nill $YY,255 | ||
133 | llgc $TY,2($YY,$key) | ||
134 | stc $TX[0],2($YY,$key) | ||
135 | stc $TY,2($XX[0],$key) | ||
136 | ar $TY,$TX[0] | ||
137 | ahi $XX[0],1 | ||
138 | nill $TY,255 | ||
139 | nill $XX[0],255 | ||
140 | llgc $acc,0($inp) | ||
141 | la $inp,1($inp) | ||
142 | llgc $TY,2($TY,$key) | ||
143 | llgc $TX[0],2($XX[0],$key) | ||
144 | xr $acc,$TY | ||
145 | stc $acc,0($out) | ||
146 | la $out,1($out) | ||
147 | brct $len,.Loop1 | ||
148 | |||
149 | .Lexit: | ||
150 | ahi $XX[0],-1 | ||
151 | stc $XX[0],0($key) | ||
152 | stc $YY,1($key) | ||
153 | lm${g} %r6,%r11,6*$SIZE_T($sp) | ||
154 | br $rp | ||
155 | .size RC4,.-RC4 | ||
156 | .string "RC4 for s390x, CRYPTOGAMS by <appro\@openssl.org>" | ||
157 | |||
158 | ___ | ||
159 | } | ||
160 | |||
161 | # void RC4_set_key(RC4_KEY *key,unsigned int len,const void *inp) | ||
162 | { | ||
163 | $cnt="%r0"; | ||
164 | $idx="%r1"; | ||
165 | $key="%r2"; | ||
166 | $len="%r3"; | ||
167 | $inp="%r4"; | ||
168 | $acc="%r5"; | ||
169 | $dat="%r6"; | ||
170 | $ikey="%r7"; | ||
171 | $iinp="%r8"; | ||
172 | |||
173 | $code.=<<___; | ||
174 | .globl private_RC4_set_key | ||
175 | .type private_RC4_set_key,\@function | ||
176 | .align 64 | ||
177 | private_RC4_set_key: | ||
178 | stm${g} %r6,%r8,6*$SIZE_T($sp) | ||
179 | lhi $cnt,256 | ||
180 | la $idx,0(%r0) | ||
181 | sth $idx,0($key) | ||
182 | .align 4 | ||
183 | .L1stloop: | ||
184 | stc $idx,2($idx,$key) | ||
185 | la $idx,1($idx) | ||
186 | brct $cnt,.L1stloop | ||
187 | |||
188 | lghi $ikey,-256 | ||
189 | lr $cnt,$len | ||
190 | la $iinp,0(%r0) | ||
191 | la $idx,0(%r0) | ||
192 | .align 16 | ||
193 | .L2ndloop: | ||
194 | llgc $acc,2+256($ikey,$key) | ||
195 | llgc $dat,0($iinp,$inp) | ||
196 | la $idx,0($idx,$acc) | ||
197 | la $ikey,1($ikey) | ||
198 | la $idx,0($idx,$dat) | ||
199 | nill $idx,255 | ||
200 | la $iinp,1($iinp) | ||
201 | tml $ikey,255 | ||
202 | llgc $dat,2($idx,$key) | ||
203 | stc $dat,2+256-1($ikey,$key) | ||
204 | stc $acc,2($idx,$key) | ||
205 | jz .Ldone | ||
206 | brct $cnt,.L2ndloop | ||
207 | lr $cnt,$len | ||
208 | la $iinp,0(%r0) | ||
209 | j .L2ndloop | ||
210 | .Ldone: | ||
211 | lm${g} %r6,%r8,6*$SIZE_T($sp) | ||
212 | br $rp | ||
213 | .size private_RC4_set_key,.-private_RC4_set_key | ||
214 | |||
215 | ___ | ||
216 | } | ||
217 | |||
218 | # const char *RC4_options() | ||
219 | $code.=<<___; | ||
220 | .globl RC4_options | ||
221 | .type RC4_options,\@function | ||
222 | .align 16 | ||
223 | RC4_options: | ||
224 | larl %r2,.Loptions | ||
225 | br %r14 | ||
226 | .size RC4_options,.-RC4_options | ||
227 | .section .rodata | ||
228 | .Loptions: | ||
229 | .align 8 | ||
230 | .string "rc4(8x,char)" | ||
231 | ___ | ||
232 | |||
233 | print $code; | ||
234 | close STDOUT; # force flush | ||
diff --git a/src/lib/libcrypto/rc4/asm/rc4-x86_64.pl b/src/lib/libcrypto/rc4/asm/rc4-x86_64.pl deleted file mode 100755 index d6eac205e9..0000000000 --- a/src/lib/libcrypto/rc4/asm/rc4-x86_64.pl +++ /dev/null | |||
@@ -1,676 +0,0 @@ | |||
1 | #!/usr/bin/env perl | ||
2 | # | ||
3 | # ==================================================================== | ||
4 | # Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL | ||
5 | # project. The module is, however, dual licensed under OpenSSL and | ||
6 | # CRYPTOGAMS licenses depending on where you obtain it. For further | ||
7 | # details see http://www.openssl.org/~appro/cryptogams/. | ||
8 | # ==================================================================== | ||
9 | # | ||
10 | # July 2004 | ||
11 | # | ||
12 | # 2.22x RC4 tune-up:-) It should be noted though that my hand [as in | ||
13 | # "hand-coded assembler"] doesn't stand for the whole improvement | ||
14 | # coefficient. It turned out that eliminating RC4_CHAR from config | ||
15 | # line results in ~40% improvement (yes, even for C implementation). | ||
16 | # Presumably it has everything to do with AMD cache architecture and | ||
17 | # RAW or whatever penalties. Once again! The module *requires* config | ||
18 | # line *without* RC4_CHAR! As for coding "secret," I bet on partial | ||
19 | # register arithmetics. For example instead of 'inc %r8; and $255,%r8' | ||
20 | # I simply 'inc %r8b'. Even though optimization manual discourages | ||
21 | # to operate on partial registers, it turned out to be the best bet. | ||
22 | # At least for AMD... How IA32E would perform remains to be seen... | ||
23 | |||
24 | # November 2004 | ||
25 | # | ||
26 | # As was shown by Marc Bevand reordering of couple of load operations | ||
27 | # results in even higher performance gain of 3.3x:-) At least on | ||
28 | # Opteron... For reference, 1x in this case is RC4_CHAR C-code | ||
29 | # compiled with gcc 3.3.2, which performs at ~54MBps per 1GHz clock. | ||
30 | # Latter means that if you want to *estimate* what to expect from | ||
31 | # *your* Opteron, then multiply 54 by 3.3 and clock frequency in GHz. | ||
32 | |||
33 | # November 2004 | ||
34 | # | ||
35 | # Intel P4 EM64T core was found to run the AMD64 code really slow... | ||
36 | # The only way to achieve comparable performance on P4 was to keep | ||
37 | # RC4_CHAR. Kind of ironic, huh? As it's apparently impossible to | ||
38 | # compose blended code, which would perform even within 30% marginal | ||
39 | # on either AMD and Intel platforms, I implement both cases. See | ||
40 | # rc4_skey.c for further details... | ||
41 | |||
42 | # April 2005 | ||
43 | # | ||
44 | # P4 EM64T core appears to be "allergic" to 64-bit inc/dec. Replacing | ||
45 | # those with add/sub results in 50% performance improvement of folded | ||
46 | # loop... | ||
47 | |||
48 | # May 2005 | ||
49 | # | ||
50 | # As was shown by Zou Nanhai loop unrolling can improve Intel EM64T | ||
51 | # performance by >30% [unlike P4 32-bit case that is]. But this is | ||
52 | # provided that loads are reordered even more aggressively! Both code | ||
53 | # pathes, AMD64 and EM64T, reorder loads in essentially same manner | ||
54 | # as my IA-64 implementation. On Opteron this resulted in modest 5% | ||
55 | # improvement [I had to test it], while final Intel P4 performance | ||
56 | # achieves respectful 432MBps on 2.8GHz processor now. For reference. | ||
57 | # If executed on Xeon, current RC4_CHAR code-path is 2.7x faster than | ||
58 | # RC4_INT code-path. While if executed on Opteron, it's only 25% | ||
59 | # slower than the RC4_INT one [meaning that if CPU µ-arch detection | ||
60 | # is not implemented, then this final RC4_CHAR code-path should be | ||
61 | # preferred, as it provides better *all-round* performance]. | ||
62 | |||
63 | # March 2007 | ||
64 | # | ||
65 | # Intel Core2 was observed to perform poorly on both code paths:-( It | ||
66 | # apparently suffers from some kind of partial register stall, which | ||
67 | # occurs in 64-bit mode only [as virtually identical 32-bit loop was | ||
68 | # observed to outperform 64-bit one by almost 50%]. Adding two movzb to | ||
69 | # cloop1 boosts its performance by 80%! This loop appears to be optimal | ||
70 | # fit for Core2 and therefore the code was modified to skip cloop8 on | ||
71 | # this CPU. | ||
72 | |||
73 | # May 2010 | ||
74 | # | ||
75 | # Intel Westmere was observed to perform suboptimally. Adding yet | ||
76 | # another movzb to cloop1 improved performance by almost 50%! Core2 | ||
77 | # performance is improved too, but nominally... | ||
78 | |||
79 | # May 2011 | ||
80 | # | ||
81 | # The only code path that was not modified is P4-specific one. Non-P4 | ||
82 | # Intel code path optimization is heavily based on submission by Maxim | ||
83 | # Perminov, Maxim Locktyukhin and Jim Guilford of Intel. I've used | ||
84 | # some of the ideas even in attempt to optmize the original RC4_INT | ||
85 | # code path... Current performance in cycles per processed byte (less | ||
86 | # is better) and improvement coefficients relative to previous | ||
87 | # version of this module are: | ||
88 | # | ||
89 | # Opteron 5.3/+0%(*) | ||
90 | # P4 6.5 | ||
91 | # Core2 6.2/+15%(**) | ||
92 | # Westmere 4.2/+60% | ||
93 | # Sandy Bridge 4.2/+120% | ||
94 | # Atom 9.3/+80% | ||
95 | # | ||
96 | # (*) But corresponding loop has less instructions, which should have | ||
97 | # positive effect on upcoming Bulldozer, which has one less ALU. | ||
98 | # For reference, Intel code runs at 6.8 cpb rate on Opteron. | ||
99 | # (**) Note that Core2 result is ~15% lower than corresponding result | ||
100 | # for 32-bit code, meaning that it's possible to improve it, | ||
101 | # but more than likely at the cost of the others (see rc4-586.pl | ||
102 | # to get the idea)... | ||
103 | |||
104 | $flavour = shift; | ||
105 | $output = shift; | ||
106 | if ($flavour =~ /\./) { $output = $flavour; undef $flavour; } | ||
107 | |||
108 | $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/); | ||
109 | |||
110 | $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1; | ||
111 | ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or | ||
112 | ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or | ||
113 | die "can't locate x86_64-xlate.pl"; | ||
114 | |||
115 | open STDOUT,"| $^X $xlate $flavour $output"; | ||
116 | |||
117 | $dat="%rdi"; # arg1 | ||
118 | $len="%rsi"; # arg2 | ||
119 | $inp="%rdx"; # arg3 | ||
120 | $out="%rcx"; # arg4 | ||
121 | |||
122 | { | ||
123 | $code=<<___; | ||
124 | .text | ||
125 | .extern OPENSSL_ia32cap_P | ||
126 | |||
127 | .globl RC4 | ||
128 | .type RC4,\@function,4 | ||
129 | .align 16 | ||
130 | RC4: or $len,$len | ||
131 | jne .Lentry | ||
132 | ret | ||
133 | .Lentry: | ||
134 | push %rbx | ||
135 | push %r12 | ||
136 | push %r13 | ||
137 | .Lprologue: | ||
138 | mov $len,%r11 | ||
139 | mov $inp,%r12 | ||
140 | mov $out,%r13 | ||
141 | ___ | ||
142 | my $len="%r11"; # reassign input arguments | ||
143 | my $inp="%r12"; | ||
144 | my $out="%r13"; | ||
145 | |||
146 | my @XX=("%r10","%rsi"); | ||
147 | my @TX=("%rax","%rbx"); | ||
148 | my $YY="%rcx"; | ||
149 | my $TY="%rdx"; | ||
150 | |||
151 | $code.=<<___; | ||
152 | xor $XX[0],$XX[0] | ||
153 | xor $YY,$YY | ||
154 | |||
155 | lea 8($dat),$dat | ||
156 | mov -8($dat),$XX[0]#b | ||
157 | mov -4($dat),$YY#b | ||
158 | cmpl \$-1,256($dat) | ||
159 | je .LRC4_CHAR | ||
160 | mov OPENSSL_ia32cap_P(%rip),%r8d | ||
161 | xor $TX[1],$TX[1] | ||
162 | inc $XX[0]#b | ||
163 | sub $XX[0],$TX[1] | ||
164 | sub $inp,$out | ||
165 | movl ($dat,$XX[0],4),$TX[0]#d | ||
166 | test \$-16,$len | ||
167 | jz .Lloop1 | ||
168 | bt \$30,%r8d # Intel CPU? | ||
169 | jc .Lintel | ||
170 | and \$7,$TX[1] | ||
171 | lea 1($XX[0]),$XX[1] | ||
172 | jz .Loop8 | ||
173 | sub $TX[1],$len | ||
174 | .Loop8_warmup: | ||
175 | add $TX[0]#b,$YY#b | ||
176 | movl ($dat,$YY,4),$TY#d | ||
177 | movl $TX[0]#d,($dat,$YY,4) | ||
178 | movl $TY#d,($dat,$XX[0],4) | ||
179 | add $TY#b,$TX[0]#b | ||
180 | inc $XX[0]#b | ||
181 | movl ($dat,$TX[0],4),$TY#d | ||
182 | movl ($dat,$XX[0],4),$TX[0]#d | ||
183 | xorb ($inp),$TY#b | ||
184 | movb $TY#b,($out,$inp) | ||
185 | lea 1($inp),$inp | ||
186 | dec $TX[1] | ||
187 | jnz .Loop8_warmup | ||
188 | |||
189 | lea 1($XX[0]),$XX[1] | ||
190 | jmp .Loop8 | ||
191 | .align 16 | ||
192 | .Loop8: | ||
193 | ___ | ||
194 | for ($i=0;$i<8;$i++) { | ||
195 | $code.=<<___ if ($i==7); | ||
196 | add \$8,$XX[1]#b | ||
197 | ___ | ||
198 | $code.=<<___; | ||
199 | add $TX[0]#b,$YY#b | ||
200 | movl ($dat,$YY,4),$TY#d | ||
201 | movl $TX[0]#d,($dat,$YY,4) | ||
202 | movl `4*($i==7?-1:$i)`($dat,$XX[1],4),$TX[1]#d | ||
203 | ror \$8,%r8 # ror is redundant when $i=0 | ||
204 | movl $TY#d,4*$i($dat,$XX[0],4) | ||
205 | add $TX[0]#b,$TY#b | ||
206 | movb ($dat,$TY,4),%r8b | ||
207 | ___ | ||
208 | push(@TX,shift(@TX)); #push(@XX,shift(@XX)); # "rotate" registers | ||
209 | } | ||
210 | $code.=<<___; | ||
211 | add \$8,$XX[0]#b | ||
212 | ror \$8,%r8 | ||
213 | sub \$8,$len | ||
214 | |||
215 | xor ($inp),%r8 | ||
216 | mov %r8,($out,$inp) | ||
217 | lea 8($inp),$inp | ||
218 | |||
219 | test \$-8,$len | ||
220 | jnz .Loop8 | ||
221 | cmp \$0,$len | ||
222 | jne .Lloop1 | ||
223 | jmp .Lexit | ||
224 | |||
225 | .align 16 | ||
226 | .Lintel: | ||
227 | test \$-32,$len | ||
228 | jz .Lloop1 | ||
229 | and \$15,$TX[1] | ||
230 | jz .Loop16_is_hot | ||
231 | sub $TX[1],$len | ||
232 | .Loop16_warmup: | ||
233 | add $TX[0]#b,$YY#b | ||
234 | movl ($dat,$YY,4),$TY#d | ||
235 | movl $TX[0]#d,($dat,$YY,4) | ||
236 | movl $TY#d,($dat,$XX[0],4) | ||
237 | add $TY#b,$TX[0]#b | ||
238 | inc $XX[0]#b | ||
239 | movl ($dat,$TX[0],4),$TY#d | ||
240 | movl ($dat,$XX[0],4),$TX[0]#d | ||
241 | xorb ($inp),$TY#b | ||
242 | movb $TY#b,($out,$inp) | ||
243 | lea 1($inp),$inp | ||
244 | dec $TX[1] | ||
245 | jnz .Loop16_warmup | ||
246 | |||
247 | mov $YY,$TX[1] | ||
248 | xor $YY,$YY | ||
249 | mov $TX[1]#b,$YY#b | ||
250 | |||
251 | .Loop16_is_hot: | ||
252 | lea ($dat,$XX[0],4),$XX[1] | ||
253 | ___ | ||
254 | sub RC4_loop { | ||
255 | my $i=shift; | ||
256 | my $j=$i<0?0:$i; | ||
257 | my $xmm="%xmm".($j&1); | ||
258 | |||
259 | $code.=" add \$16,$XX[0]#b\n" if ($i==15); | ||
260 | $code.=" movdqu ($inp),%xmm2\n" if ($i==15); | ||
261 | $code.=" add $TX[0]#b,$YY#b\n" if ($i<=0); | ||
262 | $code.=" movl ($dat,$YY,4),$TY#d\n"; | ||
263 | $code.=" pxor %xmm0,%xmm2\n" if ($i==0); | ||
264 | $code.=" psllq \$8,%xmm1\n" if ($i==0); | ||
265 | $code.=" pxor $xmm,$xmm\n" if ($i<=1); | ||
266 | $code.=" movl $TX[0]#d,($dat,$YY,4)\n"; | ||
267 | $code.=" add $TY#b,$TX[0]#b\n"; | ||
268 | $code.=" movl `4*($j+1)`($XX[1]),$TX[1]#d\n" if ($i<15); | ||
269 | $code.=" movz $TX[0]#b,$TX[0]#d\n"; | ||
270 | $code.=" movl $TY#d,4*$j($XX[1])\n"; | ||
271 | $code.=" pxor %xmm1,%xmm2\n" if ($i==0); | ||
272 | $code.=" lea ($dat,$XX[0],4),$XX[1]\n" if ($i==15); | ||
273 | $code.=" add $TX[1]#b,$YY#b\n" if ($i<15); | ||
274 | $code.=" pinsrw \$`($j>>1)&7`,($dat,$TX[0],4),$xmm\n"; | ||
275 | $code.=" movdqu %xmm2,($out,$inp)\n" if ($i==0); | ||
276 | $code.=" lea 16($inp),$inp\n" if ($i==0); | ||
277 | $code.=" movl ($XX[1]),$TX[1]#d\n" if ($i==15); | ||
278 | } | ||
279 | RC4_loop(-1); | ||
280 | $code.=<<___; | ||
281 | jmp .Loop16_enter | ||
282 | .align 16 | ||
283 | .Loop16: | ||
284 | ___ | ||
285 | |||
286 | for ($i=0;$i<16;$i++) { | ||
287 | $code.=".Loop16_enter:\n" if ($i==1); | ||
288 | RC4_loop($i); | ||
289 | push(@TX,shift(@TX)); # "rotate" registers | ||
290 | } | ||
291 | $code.=<<___; | ||
292 | mov $YY,$TX[1] | ||
293 | xor $YY,$YY # keyword to partial register | ||
294 | sub \$16,$len | ||
295 | mov $TX[1]#b,$YY#b | ||
296 | test \$-16,$len | ||
297 | jnz .Loop16 | ||
298 | |||
299 | psllq \$8,%xmm1 | ||
300 | pxor %xmm0,%xmm2 | ||
301 | pxor %xmm1,%xmm2 | ||
302 | movdqu %xmm2,($out,$inp) | ||
303 | lea 16($inp),$inp | ||
304 | |||
305 | cmp \$0,$len | ||
306 | jne .Lloop1 | ||
307 | jmp .Lexit | ||
308 | |||
309 | .align 16 | ||
310 | .Lloop1: | ||
311 | add $TX[0]#b,$YY#b | ||
312 | movl ($dat,$YY,4),$TY#d | ||
313 | movl $TX[0]#d,($dat,$YY,4) | ||
314 | movl $TY#d,($dat,$XX[0],4) | ||
315 | add $TY#b,$TX[0]#b | ||
316 | inc $XX[0]#b | ||
317 | movl ($dat,$TX[0],4),$TY#d | ||
318 | movl ($dat,$XX[0],4),$TX[0]#d | ||
319 | xorb ($inp),$TY#b | ||
320 | movb $TY#b,($out,$inp) | ||
321 | lea 1($inp),$inp | ||
322 | dec $len | ||
323 | jnz .Lloop1 | ||
324 | jmp .Lexit | ||
325 | |||
326 | .align 16 | ||
327 | .LRC4_CHAR: | ||
328 | add \$1,$XX[0]#b | ||
329 | movzb ($dat,$XX[0]),$TX[0]#d | ||
330 | test \$-8,$len | ||
331 | jz .Lcloop1 | ||
332 | jmp .Lcloop8 | ||
333 | .align 16 | ||
334 | .Lcloop8: | ||
335 | mov ($inp),%r8d | ||
336 | mov 4($inp),%r9d | ||
337 | ___ | ||
338 | # unroll 2x4-wise, because 64-bit rotates kill Intel P4... | ||
339 | for ($i=0;$i<4;$i++) { | ||
340 | $code.=<<___; | ||
341 | add $TX[0]#b,$YY#b | ||
342 | lea 1($XX[0]),$XX[1] | ||
343 | movzb ($dat,$YY),$TY#d | ||
344 | movzb $XX[1]#b,$XX[1]#d | ||
345 | movzb ($dat,$XX[1]),$TX[1]#d | ||
346 | movb $TX[0]#b,($dat,$YY) | ||
347 | cmp $XX[1],$YY | ||
348 | movb $TY#b,($dat,$XX[0]) | ||
349 | jne .Lcmov$i # Intel cmov is sloooow... | ||
350 | mov $TX[0],$TX[1] | ||
351 | .Lcmov$i: | ||
352 | add $TX[0]#b,$TY#b | ||
353 | xor ($dat,$TY),%r8b | ||
354 | ror \$8,%r8d | ||
355 | ___ | ||
356 | push(@TX,shift(@TX)); push(@XX,shift(@XX)); # "rotate" registers | ||
357 | } | ||
358 | for ($i=4;$i<8;$i++) { | ||
359 | $code.=<<___; | ||
360 | add $TX[0]#b,$YY#b | ||
361 | lea 1($XX[0]),$XX[1] | ||
362 | movzb ($dat,$YY),$TY#d | ||
363 | movzb $XX[1]#b,$XX[1]#d | ||
364 | movzb ($dat,$XX[1]),$TX[1]#d | ||
365 | movb $TX[0]#b,($dat,$YY) | ||
366 | cmp $XX[1],$YY | ||
367 | movb $TY#b,($dat,$XX[0]) | ||
368 | jne .Lcmov$i # Intel cmov is sloooow... | ||
369 | mov $TX[0],$TX[1] | ||
370 | .Lcmov$i: | ||
371 | add $TX[0]#b,$TY#b | ||
372 | xor ($dat,$TY),%r9b | ||
373 | ror \$8,%r9d | ||
374 | ___ | ||
375 | push(@TX,shift(@TX)); push(@XX,shift(@XX)); # "rotate" registers | ||
376 | } | ||
377 | $code.=<<___; | ||
378 | lea -8($len),$len | ||
379 | mov %r8d,($out) | ||
380 | lea 8($inp),$inp | ||
381 | mov %r9d,4($out) | ||
382 | lea 8($out),$out | ||
383 | |||
384 | test \$-8,$len | ||
385 | jnz .Lcloop8 | ||
386 | cmp \$0,$len | ||
387 | jne .Lcloop1 | ||
388 | jmp .Lexit | ||
389 | ___ | ||
390 | $code.=<<___; | ||
391 | .align 16 | ||
392 | .Lcloop1: | ||
393 | add $TX[0]#b,$YY#b | ||
394 | movzb $YY#b,$YY#d | ||
395 | movzb ($dat,$YY),$TY#d | ||
396 | movb $TX[0]#b,($dat,$YY) | ||
397 | movb $TY#b,($dat,$XX[0]) | ||
398 | add $TX[0]#b,$TY#b | ||
399 | add \$1,$XX[0]#b | ||
400 | movzb $TY#b,$TY#d | ||
401 | movzb $XX[0]#b,$XX[0]#d | ||
402 | movzb ($dat,$TY),$TY#d | ||
403 | movzb ($dat,$XX[0]),$TX[0]#d | ||
404 | xorb ($inp),$TY#b | ||
405 | lea 1($inp),$inp | ||
406 | movb $TY#b,($out) | ||
407 | lea 1($out),$out | ||
408 | sub \$1,$len | ||
409 | jnz .Lcloop1 | ||
410 | jmp .Lexit | ||
411 | |||
412 | .align 16 | ||
413 | .Lexit: | ||
414 | sub \$1,$XX[0]#b | ||
415 | movl $XX[0]#d,-8($dat) | ||
416 | movl $YY#d,-4($dat) | ||
417 | |||
418 | mov (%rsp),%r13 | ||
419 | mov 8(%rsp),%r12 | ||
420 | mov 16(%rsp),%rbx | ||
421 | add \$24,%rsp | ||
422 | .Lepilogue: | ||
423 | ret | ||
424 | .size RC4,.-RC4 | ||
425 | ___ | ||
426 | } | ||
427 | |||
428 | $idx="%r8"; | ||
429 | $ido="%r9"; | ||
430 | |||
431 | $code.=<<___; | ||
432 | .globl private_RC4_set_key | ||
433 | .type private_RC4_set_key,\@function,3 | ||
434 | .align 16 | ||
435 | private_RC4_set_key: | ||
436 | lea 8($dat),$dat | ||
437 | lea ($inp,$len),$inp | ||
438 | neg $len | ||
439 | mov $len,%rcx | ||
440 | xor %eax,%eax | ||
441 | xor $ido,$ido | ||
442 | xor %r10,%r10 | ||
443 | xor %r11,%r11 | ||
444 | |||
445 | mov OPENSSL_ia32cap_P(%rip),$idx#d | ||
446 | bt \$20,$idx#d # RC4_CHAR? | ||
447 | jc .Lc1stloop | ||
448 | jmp .Lw1stloop | ||
449 | |||
450 | .align 16 | ||
451 | .Lw1stloop: | ||
452 | mov %eax,($dat,%rax,4) | ||
453 | add \$1,%al | ||
454 | jnc .Lw1stloop | ||
455 | |||
456 | xor $ido,$ido | ||
457 | xor $idx,$idx | ||
458 | .align 16 | ||
459 | .Lw2ndloop: | ||
460 | mov ($dat,$ido,4),%r10d | ||
461 | add ($inp,$len,1),$idx#b | ||
462 | add %r10b,$idx#b | ||
463 | add \$1,$len | ||
464 | mov ($dat,$idx,4),%r11d | ||
465 | cmovz %rcx,$len | ||
466 | mov %r10d,($dat,$idx,4) | ||
467 | mov %r11d,($dat,$ido,4) | ||
468 | add \$1,$ido#b | ||
469 | jnc .Lw2ndloop | ||
470 | jmp .Lexit_key | ||
471 | |||
472 | .align 16 | ||
473 | .Lc1stloop: | ||
474 | mov %al,($dat,%rax) | ||
475 | add \$1,%al | ||
476 | jnc .Lc1stloop | ||
477 | |||
478 | xor $ido,$ido | ||
479 | xor $idx,$idx | ||
480 | .align 16 | ||
481 | .Lc2ndloop: | ||
482 | mov ($dat,$ido),%r10b | ||
483 | add ($inp,$len),$idx#b | ||
484 | add %r10b,$idx#b | ||
485 | add \$1,$len | ||
486 | mov ($dat,$idx),%r11b | ||
487 | jnz .Lcnowrap | ||
488 | mov %rcx,$len | ||
489 | .Lcnowrap: | ||
490 | mov %r10b,($dat,$idx) | ||
491 | mov %r11b,($dat,$ido) | ||
492 | add \$1,$ido#b | ||
493 | jnc .Lc2ndloop | ||
494 | movl \$-1,256($dat) | ||
495 | |||
496 | .align 16 | ||
497 | .Lexit_key: | ||
498 | xor %eax,%eax | ||
499 | mov %eax,-8($dat) | ||
500 | mov %eax,-4($dat) | ||
501 | ret | ||
502 | .size private_RC4_set_key,.-private_RC4_set_key | ||
503 | |||
504 | .globl RC4_options | ||
505 | .type RC4_options,\@abi-omnipotent | ||
506 | .align 16 | ||
507 | RC4_options: | ||
508 | lea .Lopts(%rip),%rax | ||
509 | mov OPENSSL_ia32cap_P(%rip),%edx | ||
510 | bt \$20,%edx | ||
511 | jc .L8xchar | ||
512 | bt \$30,%edx | ||
513 | jnc .Ldone | ||
514 | add \$25,%rax | ||
515 | ret | ||
516 | .L8xchar: | ||
517 | add \$12,%rax | ||
518 | .Ldone: | ||
519 | ret | ||
520 | .align 64 | ||
521 | .Lopts: | ||
522 | .asciz "rc4(8x,int)" | ||
523 | .asciz "rc4(8x,char)" | ||
524 | .asciz "rc4(16x,int)" | ||
525 | .asciz "RC4 for x86_64, CRYPTOGAMS by <appro\@openssl.org>" | ||
526 | .align 64 | ||
527 | .size RC4_options,.-RC4_options | ||
528 | ___ | ||
529 | |||
530 | # EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame, | ||
531 | # CONTEXT *context,DISPATCHER_CONTEXT *disp) | ||
532 | if ($win64) { | ||
533 | $rec="%rcx"; | ||
534 | $frame="%rdx"; | ||
535 | $context="%r8"; | ||
536 | $disp="%r9"; | ||
537 | |||
538 | $code.=<<___; | ||
539 | .extern __imp_RtlVirtualUnwind | ||
540 | .type stream_se_handler,\@abi-omnipotent | ||
541 | .align 16 | ||
542 | stream_se_handler: | ||
543 | push %rsi | ||
544 | push %rdi | ||
545 | push %rbx | ||
546 | push %rbp | ||
547 | push %r12 | ||
548 | push %r13 | ||
549 | push %r14 | ||
550 | push %r15 | ||
551 | pushfq | ||
552 | sub \$64,%rsp | ||
553 | |||
554 | mov 120($context),%rax # pull context->Rax | ||
555 | mov 248($context),%rbx # pull context->Rip | ||
556 | |||
557 | lea .Lprologue(%rip),%r10 | ||
558 | cmp %r10,%rbx # context->Rip<prologue label | ||
559 | jb .Lin_prologue | ||
560 | |||
561 | mov 152($context),%rax # pull context->Rsp | ||
562 | |||
563 | lea .Lepilogue(%rip),%r10 | ||
564 | cmp %r10,%rbx # context->Rip>=epilogue label | ||
565 | jae .Lin_prologue | ||
566 | |||
567 | lea 24(%rax),%rax | ||
568 | |||
569 | mov -8(%rax),%rbx | ||
570 | mov -16(%rax),%r12 | ||
571 | mov -24(%rax),%r13 | ||
572 | mov %rbx,144($context) # restore context->Rbx | ||
573 | mov %r12,216($context) # restore context->R12 | ||
574 | mov %r13,224($context) # restore context->R13 | ||
575 | |||
576 | .Lin_prologue: | ||
577 | mov 8(%rax),%rdi | ||
578 | mov 16(%rax),%rsi | ||
579 | mov %rax,152($context) # restore context->Rsp | ||
580 | mov %rsi,168($context) # restore context->Rsi | ||
581 | mov %rdi,176($context) # restore context->Rdi | ||
582 | |||
583 | jmp .Lcommon_seh_exit | ||
584 | .size stream_se_handler,.-stream_se_handler | ||
585 | |||
586 | .type key_se_handler,\@abi-omnipotent | ||
587 | .align 16 | ||
588 | key_se_handler: | ||
589 | push %rsi | ||
590 | push %rdi | ||
591 | push %rbx | ||
592 | push %rbp | ||
593 | push %r12 | ||
594 | push %r13 | ||
595 | push %r14 | ||
596 | push %r15 | ||
597 | pushfq | ||
598 | sub \$64,%rsp | ||
599 | |||
600 | mov 152($context),%rax # pull context->Rsp | ||
601 | mov 8(%rax),%rdi | ||
602 | mov 16(%rax),%rsi | ||
603 | mov %rsi,168($context) # restore context->Rsi | ||
604 | mov %rdi,176($context) # restore context->Rdi | ||
605 | |||
606 | .Lcommon_seh_exit: | ||
607 | |||
608 | mov 40($disp),%rdi # disp->ContextRecord | ||
609 | mov $context,%rsi # context | ||
610 | mov \$154,%ecx # sizeof(CONTEXT) | ||
611 | .long 0xa548f3fc # cld; rep movsq | ||
612 | |||
613 | mov $disp,%rsi | ||
614 | xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER | ||
615 | mov 8(%rsi),%rdx # arg2, disp->ImageBase | ||
616 | mov 0(%rsi),%r8 # arg3, disp->ControlPc | ||
617 | mov 16(%rsi),%r9 # arg4, disp->FunctionEntry | ||
618 | mov 40(%rsi),%r10 # disp->ContextRecord | ||
619 | lea 56(%rsi),%r11 # &disp->HandlerData | ||
620 | lea 24(%rsi),%r12 # &disp->EstablisherFrame | ||
621 | mov %r10,32(%rsp) # arg5 | ||
622 | mov %r11,40(%rsp) # arg6 | ||
623 | mov %r12,48(%rsp) # arg7 | ||
624 | mov %rcx,56(%rsp) # arg8, (NULL) | ||
625 | call *__imp_RtlVirtualUnwind(%rip) | ||
626 | |||
627 | mov \$1,%eax # ExceptionContinueSearch | ||
628 | add \$64,%rsp | ||
629 | popfq | ||
630 | pop %r15 | ||
631 | pop %r14 | ||
632 | pop %r13 | ||
633 | pop %r12 | ||
634 | pop %rbp | ||
635 | pop %rbx | ||
636 | pop %rdi | ||
637 | pop %rsi | ||
638 | ret | ||
639 | .size key_se_handler,.-key_se_handler | ||
640 | |||
641 | .section .pdata | ||
642 | .align 4 | ||
643 | .rva .LSEH_begin_RC4 | ||
644 | .rva .LSEH_end_RC4 | ||
645 | .rva .LSEH_info_RC4 | ||
646 | |||
647 | .rva .LSEH_begin_private_RC4_set_key | ||
648 | .rva .LSEH_end_private_RC4_set_key | ||
649 | .rva .LSEH_info_private_RC4_set_key | ||
650 | |||
651 | .section .xdata | ||
652 | .align 8 | ||
653 | .LSEH_info_RC4: | ||
654 | .byte 9,0,0,0 | ||
655 | .rva stream_se_handler | ||
656 | .LSEH_info_private_RC4_set_key: | ||
657 | .byte 9,0,0,0 | ||
658 | .rva key_se_handler | ||
659 | ___ | ||
660 | } | ||
661 | |||
662 | sub reg_part { | ||
663 | my ($reg,$conv)=@_; | ||
664 | if ($reg =~ /%r[0-9]+/) { $reg .= $conv; } | ||
665 | elsif ($conv eq "b") { $reg =~ s/%[er]([^x]+)x?/%$1l/; } | ||
666 | elsif ($conv eq "w") { $reg =~ s/%[er](.+)/%$1/; } | ||
667 | elsif ($conv eq "d") { $reg =~ s/%[er](.+)/%e$1/; } | ||
668 | return $reg; | ||
669 | } | ||
670 | |||
671 | $code =~ s/(%[a-z0-9]+)#([bwd])/reg_part($1,$2)/gem; | ||
672 | $code =~ s/\`([^\`]*)\`/eval $1/gem; | ||
673 | |||
674 | print $code; | ||
675 | |||
676 | close STDOUT; | ||
diff --git a/src/lib/libcrypto/rc4/rc4.h b/src/lib/libcrypto/rc4/rc4.h deleted file mode 100644 index 88ceb46bc5..0000000000 --- a/src/lib/libcrypto/rc4/rc4.h +++ /dev/null | |||
@@ -1,90 +0,0 @@ | |||
1 | /* crypto/rc4/rc4.h */ | ||
2 | /* Copyright (C) 1995-1997 Eric Young (eay@cryptsoft.com) | ||
3 | * All rights reserved. | ||
4 | * | ||
5 | * This package is an SSL implementation written | ||
6 | * by Eric Young (eay@cryptsoft.com). | ||
7 | * The implementation was written so as to conform with Netscapes SSL. | ||
8 | * | ||
9 | * This library is free for commercial and non-commercial use as long as | ||
10 | * the following conditions are aheared to. The following conditions | ||
11 | * apply to all code found in this distribution, be it the RC4, RSA, | ||
12 | * lhash, DES, etc., code; not just the SSL code. The SSL documentation | ||
13 | * included with this distribution is covered by the same copyright terms | ||
14 | * except that the holder is Tim Hudson (tjh@cryptsoft.com). | ||
15 | * | ||
16 | * Copyright remains Eric Young's, and as such any Copyright notices in | ||
17 | * the code are not to be removed. | ||
18 | * If this package is used in a product, Eric Young should be given attribution | ||
19 | * as the author of the parts of the library used. | ||
20 | * This can be in the form of a textual message at program startup or | ||
21 | * in documentation (online or textual) provided with the package. | ||
22 | * | ||
23 | * Redistribution and use in source and binary forms, with or without | ||
24 | * modification, are permitted provided that the following conditions | ||
25 | * are met: | ||
26 | * 1. Redistributions of source code must retain the copyright | ||
27 | * notice, this list of conditions and the following disclaimer. | ||
28 | * 2. Redistributions in binary form must reproduce the above copyright | ||
29 | * notice, this list of conditions and the following disclaimer in the | ||
30 | * documentation and/or other materials provided with the distribution. | ||
31 | * 3. All advertising materials mentioning features or use of this software | ||
32 | * must display the following acknowledgement: | ||
33 | * "This product includes cryptographic software written by | ||
34 | * Eric Young (eay@cryptsoft.com)" | ||
35 | * The word 'cryptographic' can be left out if the rouines from the library | ||
36 | * being used are not cryptographic related :-). | ||
37 | * 4. If you include any Windows specific code (or a derivative thereof) from | ||
38 | * the apps directory (application code) you must include an acknowledgement: | ||
39 | * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)" | ||
40 | * | ||
41 | * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND | ||
42 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | ||
43 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | ||
44 | * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE | ||
45 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | ||
46 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | ||
47 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | ||
48 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | ||
49 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | ||
50 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | ||
51 | * SUCH DAMAGE. | ||
52 | * | ||
53 | * The licence and distribution terms for any publically available version or | ||
54 | * derivative of this code cannot be changed. i.e. this code cannot simply be | ||
55 | * copied and put under another distribution licence | ||
56 | * [including the GNU Public Licence.] | ||
57 | */ | ||
58 | |||
59 | #ifndef HEADER_RC4_H | ||
60 | #define HEADER_RC4_H | ||
61 | |||
62 | #include <openssl/opensslconf.h> /* OPENSSL_NO_RC4, RC4_INT */ | ||
63 | #ifdef OPENSSL_NO_RC4 | ||
64 | #error RC4 is disabled. | ||
65 | #endif | ||
66 | |||
67 | #include <stddef.h> | ||
68 | |||
69 | #ifdef __cplusplus | ||
70 | extern "C" { | ||
71 | #endif | ||
72 | |||
73 | typedef struct rc4_key_st | ||
74 | { | ||
75 | RC4_INT x,y; | ||
76 | RC4_INT data[256]; | ||
77 | } RC4_KEY; | ||
78 | |||
79 | |||
80 | const char *RC4_options(void); | ||
81 | void RC4_set_key(RC4_KEY *key, int len, const unsigned char *data); | ||
82 | void private_RC4_set_key(RC4_KEY *key, int len, const unsigned char *data); | ||
83 | void RC4(RC4_KEY *key, size_t len, const unsigned char *indata, | ||
84 | unsigned char *outdata); | ||
85 | |||
86 | #ifdef __cplusplus | ||
87 | } | ||
88 | #endif | ||
89 | |||
90 | #endif | ||
diff --git a/src/lib/libcrypto/rc4/rc4_enc.c b/src/lib/libcrypto/rc4/rc4_enc.c deleted file mode 100644 index 8c4fc6c7a3..0000000000 --- a/src/lib/libcrypto/rc4/rc4_enc.c +++ /dev/null | |||
@@ -1,315 +0,0 @@ | |||
1 | /* crypto/rc4/rc4_enc.c */ | ||
2 | /* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com) | ||
3 | * All rights reserved. | ||
4 | * | ||
5 | * This package is an SSL implementation written | ||
6 | * by Eric Young (eay@cryptsoft.com). | ||
7 | * The implementation was written so as to conform with Netscapes SSL. | ||
8 | * | ||
9 | * This library is free for commercial and non-commercial use as long as | ||
10 | * the following conditions are aheared to. The following conditions | ||
11 | * apply to all code found in this distribution, be it the RC4, RSA, | ||
12 | * lhash, DES, etc., code; not just the SSL code. The SSL documentation | ||
13 | * included with this distribution is covered by the same copyright terms | ||
14 | * except that the holder is Tim Hudson (tjh@cryptsoft.com). | ||
15 | * | ||
16 | * Copyright remains Eric Young's, and as such any Copyright notices in | ||
17 | * the code are not to be removed. | ||
18 | * If this package is used in a product, Eric Young should be given attribution | ||
19 | * as the author of the parts of the library used. | ||
20 | * This can be in the form of a textual message at program startup or | ||
21 | * in documentation (online or textual) provided with the package. | ||
22 | * | ||
23 | * Redistribution and use in source and binary forms, with or without | ||
24 | * modification, are permitted provided that the following conditions | ||
25 | * are met: | ||
26 | * 1. Redistributions of source code must retain the copyright | ||
27 | * notice, this list of conditions and the following disclaimer. | ||
28 | * 2. Redistributions in binary form must reproduce the above copyright | ||
29 | * notice, this list of conditions and the following disclaimer in the | ||
30 | * documentation and/or other materials provided with the distribution. | ||
31 | * 3. All advertising materials mentioning features or use of this software | ||
32 | * must display the following acknowledgement: | ||
33 | * "This product includes cryptographic software written by | ||
34 | * Eric Young (eay@cryptsoft.com)" | ||
35 | * The word 'cryptographic' can be left out if the rouines from the library | ||
36 | * being used are not cryptographic related :-). | ||
37 | * 4. If you include any Windows specific code (or a derivative thereof) from | ||
38 | * the apps directory (application code) you must include an acknowledgement: | ||
39 | * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)" | ||
40 | * | ||
41 | * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND | ||
42 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | ||
43 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | ||
44 | * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE | ||
45 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | ||
46 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | ||
47 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | ||
48 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | ||
49 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | ||
50 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | ||
51 | * SUCH DAMAGE. | ||
52 | * | ||
53 | * The licence and distribution terms for any publically available version or | ||
54 | * derivative of this code cannot be changed. i.e. this code cannot simply be | ||
55 | * copied and put under another distribution licence | ||
56 | * [including the GNU Public Licence.] | ||
57 | */ | ||
58 | |||
59 | #include <openssl/rc4.h> | ||
60 | #include "rc4_locl.h" | ||
61 | |||
62 | /* RC4 as implemented from a posting from | ||
63 | * Newsgroups: sci.crypt | ||
64 | * From: sterndark@netcom.com (David Sterndark) | ||
65 | * Subject: RC4 Algorithm revealed. | ||
66 | * Message-ID: <sternCvKL4B.Hyy@netcom.com> | ||
67 | * Date: Wed, 14 Sep 1994 06:35:31 GMT | ||
68 | */ | ||
69 | |||
70 | void RC4(RC4_KEY *key, size_t len, const unsigned char *indata, | ||
71 | unsigned char *outdata) | ||
72 | { | ||
73 | register RC4_INT *d; | ||
74 | register RC4_INT x,y,tx,ty; | ||
75 | size_t i; | ||
76 | |||
77 | x=key->x; | ||
78 | y=key->y; | ||
79 | d=key->data; | ||
80 | |||
81 | #if defined(RC4_CHUNK) | ||
82 | /* | ||
83 | * The original reason for implementing this(*) was the fact that | ||
84 | * pre-21164a Alpha CPUs don't have byte load/store instructions | ||
85 | * and e.g. a byte store has to be done with 64-bit load, shift, | ||
86 | * and, or and finally 64-bit store. Peaking data and operating | ||
87 | * at natural word size made it possible to reduce amount of | ||
88 | * instructions as well as to perform early read-ahead without | ||
89 | * suffering from RAW (read-after-write) hazard. This resulted | ||
90 | * in ~40%(**) performance improvement on 21064 box with gcc. | ||
91 | * But it's not only Alpha users who win here:-) Thanks to the | ||
92 | * early-n-wide read-ahead this implementation also exhibits | ||
93 | * >40% speed-up on SPARC and 20-30% on 64-bit MIPS (depending | ||
94 | * on sizeof(RC4_INT)). | ||
95 | * | ||
96 | * (*) "this" means code which recognizes the case when input | ||
97 | * and output pointers appear to be aligned at natural CPU | ||
98 | * word boundary | ||
99 | * (**) i.e. according to 'apps/openssl speed rc4' benchmark, | ||
100 | * crypto/rc4/rc4speed.c exhibits almost 70% speed-up... | ||
101 | * | ||
102 | * Cavets. | ||
103 | * | ||
104 | * - RC4_CHUNK="unsigned long long" should be a #1 choice for | ||
105 | * UltraSPARC. Unfortunately gcc generates very slow code | ||
106 | * (2.5-3 times slower than one generated by Sun's WorkShop | ||
107 | * C) and therefore gcc (at least 2.95 and earlier) should | ||
108 | * always be told that RC4_CHUNK="unsigned long". | ||
109 | * | ||
110 | * <appro@fy.chalmers.se> | ||
111 | */ | ||
112 | |||
113 | # define RC4_STEP ( \ | ||
114 | x=(x+1) &0xff, \ | ||
115 | tx=d[x], \ | ||
116 | y=(tx+y)&0xff, \ | ||
117 | ty=d[y], \ | ||
118 | d[y]=tx, \ | ||
119 | d[x]=ty, \ | ||
120 | (RC4_CHUNK)d[(tx+ty)&0xff]\ | ||
121 | ) | ||
122 | |||
123 | if ( ( ((size_t)indata & (sizeof(RC4_CHUNK)-1)) | | ||
124 | ((size_t)outdata & (sizeof(RC4_CHUNK)-1)) ) == 0 ) | ||
125 | { | ||
126 | RC4_CHUNK ichunk,otp; | ||
127 | const union { long one; char little; } is_endian = {1}; | ||
128 | |||
129 | /* | ||
130 | * I reckon we can afford to implement both endian | ||
131 | * cases and to decide which way to take at run-time | ||
132 | * because the machine code appears to be very compact | ||
133 | * and redundant 1-2KB is perfectly tolerable (i.e. | ||
134 | * in case the compiler fails to eliminate it:-). By | ||
135 | * suggestion from Terrel Larson <terr@terralogic.net> | ||
136 | * who also stands for the is_endian union:-) | ||
137 | * | ||
138 | * Special notes. | ||
139 | * | ||
140 | * - is_endian is declared automatic as doing otherwise | ||
141 | * (declaring static) prevents gcc from eliminating | ||
142 | * the redundant code; | ||
143 | * - compilers (those I've tried) don't seem to have | ||
144 | * problems eliminating either the operators guarded | ||
145 | * by "if (sizeof(RC4_CHUNK)==8)" or the condition | ||
146 | * expressions themselves so I've got 'em to replace | ||
147 | * corresponding #ifdefs from the previous version; | ||
148 | * - I chose to let the redundant switch cases when | ||
149 | * sizeof(RC4_CHUNK)!=8 be (were also #ifdefed | ||
150 | * before); | ||
151 | * - in case you wonder "&(sizeof(RC4_CHUNK)*8-1)" in | ||
152 | * [LB]ESHFT guards against "shift is out of range" | ||
153 | * warnings when sizeof(RC4_CHUNK)!=8 | ||
154 | * | ||
155 | * <appro@fy.chalmers.se> | ||
156 | */ | ||
157 | if (!is_endian.little) | ||
158 | { /* BIG-ENDIAN CASE */ | ||
159 | # define BESHFT(c) (((sizeof(RC4_CHUNK)-(c)-1)*8)&(sizeof(RC4_CHUNK)*8-1)) | ||
160 | for (;len&(0-sizeof(RC4_CHUNK));len-=sizeof(RC4_CHUNK)) | ||
161 | { | ||
162 | ichunk = *(RC4_CHUNK *)indata; | ||
163 | otp = RC4_STEP<<BESHFT(0); | ||
164 | otp |= RC4_STEP<<BESHFT(1); | ||
165 | otp |= RC4_STEP<<BESHFT(2); | ||
166 | otp |= RC4_STEP<<BESHFT(3); | ||
167 | if (sizeof(RC4_CHUNK)==8) | ||
168 | { | ||
169 | otp |= RC4_STEP<<BESHFT(4); | ||
170 | otp |= RC4_STEP<<BESHFT(5); | ||
171 | otp |= RC4_STEP<<BESHFT(6); | ||
172 | otp |= RC4_STEP<<BESHFT(7); | ||
173 | } | ||
174 | *(RC4_CHUNK *)outdata = otp^ichunk; | ||
175 | indata += sizeof(RC4_CHUNK); | ||
176 | outdata += sizeof(RC4_CHUNK); | ||
177 | } | ||
178 | if (len) | ||
179 | { | ||
180 | RC4_CHUNK mask=(RC4_CHUNK)-1, ochunk; | ||
181 | |||
182 | ichunk = *(RC4_CHUNK *)indata; | ||
183 | ochunk = *(RC4_CHUNK *)outdata; | ||
184 | otp = 0; | ||
185 | i = BESHFT(0); | ||
186 | mask <<= (sizeof(RC4_CHUNK)-len)<<3; | ||
187 | switch (len&(sizeof(RC4_CHUNK)-1)) | ||
188 | { | ||
189 | case 7: otp = RC4_STEP<<i, i-=8; | ||
190 | case 6: otp |= RC4_STEP<<i, i-=8; | ||
191 | case 5: otp |= RC4_STEP<<i, i-=8; | ||
192 | case 4: otp |= RC4_STEP<<i, i-=8; | ||
193 | case 3: otp |= RC4_STEP<<i, i-=8; | ||
194 | case 2: otp |= RC4_STEP<<i, i-=8; | ||
195 | case 1: otp |= RC4_STEP<<i, i-=8; | ||
196 | case 0: ; /* | ||
197 | * it's never the case, | ||
198 | * but it has to be here | ||
199 | * for ultrix? | ||
200 | */ | ||
201 | } | ||
202 | ochunk &= ~mask; | ||
203 | ochunk |= (otp^ichunk) & mask; | ||
204 | *(RC4_CHUNK *)outdata = ochunk; | ||
205 | } | ||
206 | key->x=x; | ||
207 | key->y=y; | ||
208 | return; | ||
209 | } | ||
210 | else | ||
211 | { /* LITTLE-ENDIAN CASE */ | ||
212 | # define LESHFT(c) (((c)*8)&(sizeof(RC4_CHUNK)*8-1)) | ||
213 | for (;len&(0-sizeof(RC4_CHUNK));len-=sizeof(RC4_CHUNK)) | ||
214 | { | ||
215 | ichunk = *(RC4_CHUNK *)indata; | ||
216 | otp = RC4_STEP; | ||
217 | otp |= RC4_STEP<<8; | ||
218 | otp |= RC4_STEP<<16; | ||
219 | otp |= RC4_STEP<<24; | ||
220 | if (sizeof(RC4_CHUNK)==8) | ||
221 | { | ||
222 | otp |= RC4_STEP<<LESHFT(4); | ||
223 | otp |= RC4_STEP<<LESHFT(5); | ||
224 | otp |= RC4_STEP<<LESHFT(6); | ||
225 | otp |= RC4_STEP<<LESHFT(7); | ||
226 | } | ||
227 | *(RC4_CHUNK *)outdata = otp^ichunk; | ||
228 | indata += sizeof(RC4_CHUNK); | ||
229 | outdata += sizeof(RC4_CHUNK); | ||
230 | } | ||
231 | if (len) | ||
232 | { | ||
233 | RC4_CHUNK mask=(RC4_CHUNK)-1, ochunk; | ||
234 | |||
235 | ichunk = *(RC4_CHUNK *)indata; | ||
236 | ochunk = *(RC4_CHUNK *)outdata; | ||
237 | otp = 0; | ||
238 | i = 0; | ||
239 | mask >>= (sizeof(RC4_CHUNK)-len)<<3; | ||
240 | switch (len&(sizeof(RC4_CHUNK)-1)) | ||
241 | { | ||
242 | case 7: otp = RC4_STEP, i+=8; | ||
243 | case 6: otp |= RC4_STEP<<i, i+=8; | ||
244 | case 5: otp |= RC4_STEP<<i, i+=8; | ||
245 | case 4: otp |= RC4_STEP<<i, i+=8; | ||
246 | case 3: otp |= RC4_STEP<<i, i+=8; | ||
247 | case 2: otp |= RC4_STEP<<i, i+=8; | ||
248 | case 1: otp |= RC4_STEP<<i, i+=8; | ||
249 | case 0: ; /* | ||
250 | * it's never the case, | ||
251 | * but it has to be here | ||
252 | * for ultrix? | ||
253 | */ | ||
254 | } | ||
255 | ochunk &= ~mask; | ||
256 | ochunk |= (otp^ichunk) & mask; | ||
257 | *(RC4_CHUNK *)outdata = ochunk; | ||
258 | } | ||
259 | key->x=x; | ||
260 | key->y=y; | ||
261 | return; | ||
262 | } | ||
263 | } | ||
264 | #endif | ||
265 | #define LOOP(in,out) \ | ||
266 | x=((x+1)&0xff); \ | ||
267 | tx=d[x]; \ | ||
268 | y=(tx+y)&0xff; \ | ||
269 | d[x]=ty=d[y]; \ | ||
270 | d[y]=tx; \ | ||
271 | (out) = d[(tx+ty)&0xff]^ (in); | ||
272 | |||
273 | #ifndef RC4_INDEX | ||
274 | #define RC4_LOOP(a,b,i) LOOP(*((a)++),*((b)++)) | ||
275 | #else | ||
276 | #define RC4_LOOP(a,b,i) LOOP(a[i],b[i]) | ||
277 | #endif | ||
278 | |||
279 | i=len>>3; | ||
280 | if (i) | ||
281 | { | ||
282 | for (;;) | ||
283 | { | ||
284 | RC4_LOOP(indata,outdata,0); | ||
285 | RC4_LOOP(indata,outdata,1); | ||
286 | RC4_LOOP(indata,outdata,2); | ||
287 | RC4_LOOP(indata,outdata,3); | ||
288 | RC4_LOOP(indata,outdata,4); | ||
289 | RC4_LOOP(indata,outdata,5); | ||
290 | RC4_LOOP(indata,outdata,6); | ||
291 | RC4_LOOP(indata,outdata,7); | ||
292 | #ifdef RC4_INDEX | ||
293 | indata+=8; | ||
294 | outdata+=8; | ||
295 | #endif | ||
296 | if (--i == 0) break; | ||
297 | } | ||
298 | } | ||
299 | i=len&0x07; | ||
300 | if (i) | ||
301 | { | ||
302 | for (;;) | ||
303 | { | ||
304 | RC4_LOOP(indata,outdata,0); if (--i == 0) break; | ||
305 | RC4_LOOP(indata,outdata,1); if (--i == 0) break; | ||
306 | RC4_LOOP(indata,outdata,2); if (--i == 0) break; | ||
307 | RC4_LOOP(indata,outdata,3); if (--i == 0) break; | ||
308 | RC4_LOOP(indata,outdata,4); if (--i == 0) break; | ||
309 | RC4_LOOP(indata,outdata,5); if (--i == 0) break; | ||
310 | RC4_LOOP(indata,outdata,6); if (--i == 0) break; | ||
311 | } | ||
312 | } | ||
313 | key->x=x; | ||
314 | key->y=y; | ||
315 | } | ||
diff --git a/src/lib/libcrypto/rc4/rc4_locl.h b/src/lib/libcrypto/rc4/rc4_locl.h deleted file mode 100644 index c712e1632e..0000000000 --- a/src/lib/libcrypto/rc4/rc4_locl.h +++ /dev/null | |||
@@ -1,5 +0,0 @@ | |||
1 | #ifndef HEADER_RC4_LOCL_H | ||
2 | #define HEADER_RC4_LOCL_H | ||
3 | #include <openssl/opensslconf.h> | ||
4 | #include <cryptlib.h> | ||
5 | #endif | ||
diff --git a/src/lib/libcrypto/rc4/rc4_skey.c b/src/lib/libcrypto/rc4/rc4_skey.c deleted file mode 100644 index fda27636e7..0000000000 --- a/src/lib/libcrypto/rc4/rc4_skey.c +++ /dev/null | |||
@@ -1,116 +0,0 @@ | |||
1 | /* crypto/rc4/rc4_skey.c */ | ||
2 | /* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com) | ||
3 | * All rights reserved. | ||
4 | * | ||
5 | * This package is an SSL implementation written | ||
6 | * by Eric Young (eay@cryptsoft.com). | ||
7 | * The implementation was written so as to conform with Netscapes SSL. | ||
8 | * | ||
9 | * This library is free for commercial and non-commercial use as long as | ||
10 | * the following conditions are aheared to. The following conditions | ||
11 | * apply to all code found in this distribution, be it the RC4, RSA, | ||
12 | * lhash, DES, etc., code; not just the SSL code. The SSL documentation | ||
13 | * included with this distribution is covered by the same copyright terms | ||
14 | * except that the holder is Tim Hudson (tjh@cryptsoft.com). | ||
15 | * | ||
16 | * Copyright remains Eric Young's, and as such any Copyright notices in | ||
17 | * the code are not to be removed. | ||
18 | * If this package is used in a product, Eric Young should be given attribution | ||
19 | * as the author of the parts of the library used. | ||
20 | * This can be in the form of a textual message at program startup or | ||
21 | * in documentation (online or textual) provided with the package. | ||
22 | * | ||
23 | * Redistribution and use in source and binary forms, with or without | ||
24 | * modification, are permitted provided that the following conditions | ||
25 | * are met: | ||
26 | * 1. Redistributions of source code must retain the copyright | ||
27 | * notice, this list of conditions and the following disclaimer. | ||
28 | * 2. Redistributions in binary form must reproduce the above copyright | ||
29 | * notice, this list of conditions and the following disclaimer in the | ||
30 | * documentation and/or other materials provided with the distribution. | ||
31 | * 3. All advertising materials mentioning features or use of this software | ||
32 | * must display the following acknowledgement: | ||
33 | * "This product includes cryptographic software written by | ||
34 | * Eric Young (eay@cryptsoft.com)" | ||
35 | * The word 'cryptographic' can be left out if the rouines from the library | ||
36 | * being used are not cryptographic related :-). | ||
37 | * 4. If you include any Windows specific code (or a derivative thereof) from | ||
38 | * the apps directory (application code) you must include an acknowledgement: | ||
39 | * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)" | ||
40 | * | ||
41 | * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND | ||
42 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | ||
43 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | ||
44 | * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE | ||
45 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | ||
46 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | ||
47 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | ||
48 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | ||
49 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | ||
50 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | ||
51 | * SUCH DAMAGE. | ||
52 | * | ||
53 | * The licence and distribution terms for any publically available version or | ||
54 | * derivative of this code cannot be changed. i.e. this code cannot simply be | ||
55 | * copied and put under another distribution licence | ||
56 | * [including the GNU Public Licence.] | ||
57 | */ | ||
58 | |||
59 | #include <openssl/rc4.h> | ||
60 | #include "rc4_locl.h" | ||
61 | #include <openssl/opensslv.h> | ||
62 | |||
63 | const char RC4_version[]="RC4" OPENSSL_VERSION_PTEXT; | ||
64 | |||
65 | const char *RC4_options(void) | ||
66 | { | ||
67 | #ifdef RC4_INDEX | ||
68 | if (sizeof(RC4_INT) == 1) | ||
69 | return("rc4(idx,char)"); | ||
70 | else | ||
71 | return("rc4(idx,int)"); | ||
72 | #else | ||
73 | if (sizeof(RC4_INT) == 1) | ||
74 | return("rc4(ptr,char)"); | ||
75 | else | ||
76 | return("rc4(ptr,int)"); | ||
77 | #endif | ||
78 | } | ||
79 | |||
80 | /* RC4 as implemented from a posting from | ||
81 | * Newsgroups: sci.crypt | ||
82 | * From: sterndark@netcom.com (David Sterndark) | ||
83 | * Subject: RC4 Algorithm revealed. | ||
84 | * Message-ID: <sternCvKL4B.Hyy@netcom.com> | ||
85 | * Date: Wed, 14 Sep 1994 06:35:31 GMT | ||
86 | */ | ||
87 | |||
88 | void private_RC4_set_key(RC4_KEY *key, int len, const unsigned char *data) | ||
89 | { | ||
90 | register RC4_INT tmp; | ||
91 | register int id1,id2; | ||
92 | register RC4_INT *d; | ||
93 | unsigned int i; | ||
94 | |||
95 | d= &(key->data[0]); | ||
96 | key->x = 0; | ||
97 | key->y = 0; | ||
98 | id1=id2=0; | ||
99 | |||
100 | #define SK_LOOP(d,n) { \ | ||
101 | tmp=d[(n)]; \ | ||
102 | id2 = (data[id1] + tmp + id2) & 0xff; \ | ||
103 | if (++id1 == len) id1=0; \ | ||
104 | d[(n)]=d[id2]; \ | ||
105 | d[id2]=tmp; } | ||
106 | |||
107 | for (i=0; i < 256; i++) d[i]=i; | ||
108 | for (i=0; i < 256; i+=4) | ||
109 | { | ||
110 | SK_LOOP(d,i+0); | ||
111 | SK_LOOP(d,i+1); | ||
112 | SK_LOOP(d,i+2); | ||
113 | SK_LOOP(d,i+3); | ||
114 | } | ||
115 | } | ||
116 | |||