summaryrefslogtreecommitdiff
path: root/src/lib/libcrypto/rc4/asm
diff options
context:
space:
mode:
Diffstat (limited to 'src/lib/libcrypto/rc4/asm')
-rw-r--r--src/lib/libcrypto/rc4/asm/rc4-586.pl229
-rwxr-xr-xsrc/lib/libcrypto/rc4/asm/rc4-x86_64.pl150
2 files changed, 0 insertions, 379 deletions
diff --git a/src/lib/libcrypto/rc4/asm/rc4-586.pl b/src/lib/libcrypto/rc4/asm/rc4-586.pl
deleted file mode 100644
index d6e98f0811..0000000000
--- a/src/lib/libcrypto/rc4/asm/rc4-586.pl
+++ /dev/null
@@ -1,229 +0,0 @@
1#!/usr/local/bin/perl
2
3# At some point it became apparent that the original SSLeay RC4
4# assembler implementation performs suboptimaly on latest IA-32
5# microarchitectures. After re-tuning performance has changed as
6# following:
7#
8# Pentium +0%
9# Pentium III +17%
10# AMD +52%(*)
11# P4 +180%(**)
12#
13# (*) This number is actually a trade-off:-) It's possible to
14# achieve +72%, but at the cost of -48% off PIII performance.
15# In other words code performing further 13% faster on AMD
16# would perform almost 2 times slower on Intel PIII...
17# For reference! This code delivers ~80% of rc4-amd64.pl
18# performance on the same Opteron machine.
19# (**) This number requires compressed key schedule set up by
20# RC4_set_key and therefore doesn't apply to 0.9.7 [option for
21# compressed key schedule is implemented in 0.9.8 and later,
22# see commentary section in rc4_skey.c for further details].
23#
24# <appro@fy.chalmers.se>
25
26push(@INC,"perlasm","../../perlasm");
27require "x86asm.pl";
28
29&asm_init($ARGV[0],"rc4-586.pl");
30
31$x="eax";
32$y="ebx";
33$tx="ecx";
34$ty="edx";
35$in="esi";
36$out="edi";
37$d="ebp";
38
39&RC4("RC4");
40
41&asm_finish();
42
43sub RC4_loop
44 {
45 local($n,$p,$char)=@_;
46
47 &comment("Round $n");
48
49 if ($char)
50 {
51 if ($p >= 0)
52 {
53 &mov($ty, &swtmp(2));
54 &cmp($ty, $in);
55 &jbe(&label("finished"));
56 &inc($in);
57 }
58 else
59 {
60 &add($ty, 8);
61 &inc($in);
62 &cmp($ty, $in);
63 &jb(&label("finished"));
64 &mov(&swtmp(2), $ty);
65 }
66 }
67 # Moved out
68 # &mov( $tx, &DWP(0,$d,$x,4)) if $p < 0;
69
70 &add( &LB($y), &LB($tx));
71 &mov( $ty, &DWP(0,$d,$y,4));
72 # XXX
73 &mov( &DWP(0,$d,$x,4),$ty);
74 &add( $ty, $tx);
75 &mov( &DWP(0,$d,$y,4),$tx);
76 &and( $ty, 0xff);
77 &inc( &LB($x)); # NEXT ROUND
78 &mov( $tx, &DWP(0,$d,$x,4)) if $p < 1; # NEXT ROUND
79 &mov( $ty, &DWP(0,$d,$ty,4));
80
81 if (!$char)
82 {
83 #moved up into last round
84 if ($p >= 1)
85 {
86 &add( $out, 8)
87 }
88 &movb( &BP($n,"esp","",0), &LB($ty));
89 }
90 else
91 {
92 # Note in+=8 has occured
93 &movb( &HB($ty), &BP(-1,$in,"",0));
94 # XXX
95 &xorb(&LB($ty), &HB($ty));
96 # XXX
97 &movb(&BP($n,$out,"",0),&LB($ty));
98 }
99 }
100
101
102sub RC4
103 {
104 local($name)=@_;
105
106 &function_begin_B($name,"");
107
108 &mov($ty,&wparam(1)); # len
109 &cmp($ty,0);
110 &jne(&label("proceed"));
111 &ret();
112 &set_label("proceed");
113
114 &comment("");
115
116 &push("ebp");
117 &push("ebx");
118 &push("esi");
119 &xor( $x, $x); # avoid partial register stalls
120 &push("edi");
121 &xor( $y, $y); # avoid partial register stalls
122 &mov( $d, &wparam(0)); # key
123 &mov( $in, &wparam(2));
124
125 &movb( &LB($x), &BP(0,$d,"",1));
126 &movb( &LB($y), &BP(4,$d,"",1));
127
128 &mov( $out, &wparam(3));
129 &inc( &LB($x));
130
131 &stack_push(3); # 3 temp variables
132 &add( $d, 8);
133
134 # detect compressed schedule, see commentary section in rc4_skey.c...
135 # in 0.9.7 context ~50 bytes below RC4_CHAR label remain redundant,
136 # as compressed key schedule is set up in 0.9.8 and later.
137 &cmp(&DWP(256,$d),-1);
138 &je(&label("RC4_CHAR"));
139
140 &lea( $ty, &DWP(-8,$ty,$in));
141
142 # check for 0 length input
143
144 &mov( &swtmp(2), $ty); # this is now address to exit at
145 &mov( $tx, &DWP(0,$d,$x,4));
146
147 &cmp( $ty, $in);
148 &jb( &label("end")); # less than 8 bytes
149
150 &set_label("start");
151
152 # filling DELAY SLOT
153 &add( $in, 8);
154
155 &RC4_loop(0,-1,0);
156 &RC4_loop(1,0,0);
157 &RC4_loop(2,0,0);
158 &RC4_loop(3,0,0);
159 &RC4_loop(4,0,0);
160 &RC4_loop(5,0,0);
161 &RC4_loop(6,0,0);
162 &RC4_loop(7,1,0);
163
164 &comment("apply the cipher text");
165 # xor the cipher data with input
166
167 #&add( $out, 8); #moved up into last round
168
169 &mov( $tx, &swtmp(0));
170 &mov( $ty, &DWP(-8,$in,"",0));
171 &xor( $tx, $ty);
172 &mov( $ty, &DWP(-4,$in,"",0));
173 &mov( &DWP(-8,$out,"",0), $tx);
174 &mov( $tx, &swtmp(1));
175 &xor( $tx, $ty);
176 &mov( $ty, &swtmp(2)); # load end ptr;
177 &mov( &DWP(-4,$out,"",0), $tx);
178 &mov( $tx, &DWP(0,$d,$x,4));
179 &cmp($in, $ty);
180 &jbe(&label("start"));
181
182 &set_label("end");
183
184 # There is quite a bit of extra crap in RC4_loop() for this
185 # first round
186 &RC4_loop(0,-1,1);
187 &RC4_loop(1,0,1);
188 &RC4_loop(2,0,1);
189 &RC4_loop(3,0,1);
190 &RC4_loop(4,0,1);
191 &RC4_loop(5,0,1);
192 &RC4_loop(6,1,1);
193
194 &jmp(&label("finished"));
195
196 &align(16);
197 # this is essentially Intel P4 specific codepath, see rc4_skey.c,
198 # and is engaged in 0.9.8 and later context...
199 &set_label("RC4_CHAR");
200
201 &lea ($ty,&DWP(0,$in,$ty));
202 &mov (&swtmp(2),$ty);
203
204 # strangely enough unrolled loop performs over 20% slower...
205 &set_label("RC4_CHAR_loop");
206 &movz ($tx,&BP(0,$d,$x));
207 &add (&LB($y),&LB($tx));
208 &movz ($ty,&BP(0,$d,$y));
209 &movb (&BP(0,$d,$y),&LB($tx));
210 &movb (&BP(0,$d,$x),&LB($ty));
211 &add (&LB($ty),&LB($tx));
212 &movz ($ty,&BP(0,$d,$ty));
213 &xorb (&LB($ty),&BP(0,$in));
214 &movb (&BP(0,$out),&LB($ty));
215 &inc (&LB($x));
216 &inc ($in);
217 &inc ($out);
218 &cmp ($in,&swtmp(2));
219 &jb (&label("RC4_CHAR_loop"));
220
221 &set_label("finished");
222 &dec( $x);
223 &stack_pop(3);
224 &movb( &BP(-4,$d,"",0),&LB($y));
225 &movb( &BP(-8,$d,"",0),&LB($x));
226
227 &function_end($name);
228 }
229
diff --git a/src/lib/libcrypto/rc4/asm/rc4-x86_64.pl b/src/lib/libcrypto/rc4/asm/rc4-x86_64.pl
deleted file mode 100755
index b628daca70..0000000000
--- a/src/lib/libcrypto/rc4/asm/rc4-x86_64.pl
+++ /dev/null
@@ -1,150 +0,0 @@
1#!/usr/bin/env perl
2#
3# ====================================================================
4# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
5# project. Rights for redistribution and usage in source and binary
6# forms are granted according to the OpenSSL license.
7# ====================================================================
8#
9# Unlike 0.9.7f this code expects RC4_CHAR back in config line! See
10# commentary section in corresponding script in development branch
11# for background information about this option carousel. For those
12# who don't have energy to figure out these gory details, here is
13# basis in form of performance matrix relative to the original
14# 0.9.7e C code-base:
15#
16# 0.9.7e 0.9.7f this
17# AMD64 1x 3.3x 2.4x
18# EM64T 1x 0.8x 1.5x
19#
20# In other words idea is to trade -25% AMD64 performance to compensate
21# for deterioration and gain +90% on EM64T core. Development branch
22# maintains best performance for either target, i.e. 3.3x for AMD64
23# and 1.5x for EM64T.
24
25$output=shift;
26
27open STDOUT,">$output" || die "can't open $output: $!";
28
29$dat="%rdi"; # arg1
30$len="%rsi"; # arg2
31$inp="%rdx"; # arg3
32$out="%rcx"; # arg4
33
34@XX=("%r8","%r10");
35@TX=("%r9","%r11");
36$YY="%r12";
37$TY="%r13";
38
39$code=<<___;;
40.text
41
42.globl RC4
43.type RC4,\@function
44.align 16
45RC4: or $len,$len
46 jne .Lentry
47 repret
48.Lentry:
49 push %r12
50 push %r13
51
52 add \$2,$dat
53 movzb -2($dat),$XX[0]#d
54 movzb -1($dat),$YY#d
55
56 add \$1,$XX[0]#b
57 movzb ($dat,$XX[0]),$TX[0]#d
58 test \$-8,$len
59 jz .Lcloop1
60 push %rbx
61.align 16 # incidentally aligned already
62.Lcloop8:
63 mov ($inp),%eax
64 mov 4($inp),%ebx
65___
66# unroll 2x4-wise, because 64-bit rotates kill Intel P4...
67for ($i=0;$i<4;$i++) {
68$code.=<<___;
69 add $TX[0]#b,$YY#b
70 lea 1($XX[0]),$XX[1]
71 movzb ($dat,$YY),$TY#d
72 movzb $XX[1]#b,$XX[1]#d
73 movzb ($dat,$XX[1]),$TX[1]#d
74 movb $TX[0]#b,($dat,$YY)
75 cmp $XX[1],$YY
76 movb $TY#b,($dat,$XX[0])
77 jne .Lcmov$i # Intel cmov is sloooow...
78 mov $TX[0],$TX[1]
79.Lcmov$i:
80 add $TX[0]#b,$TY#b
81 xor ($dat,$TY),%al
82 ror \$8,%eax
83___
84push(@TX,shift(@TX)); push(@XX,shift(@XX)); # "rotate" registers
85}
86for ($i=4;$i<8;$i++) {
87$code.=<<___;
88 add $TX[0]#b,$YY#b
89 lea 1($XX[0]),$XX[1]
90 movzb ($dat,$YY),$TY#d
91 movzb $XX[1]#b,$XX[1]#d
92 movzb ($dat,$XX[1]),$TX[1]#d
93 movb $TX[0]#b,($dat,$YY)
94 cmp $XX[1],$YY
95 movb $TY#b,($dat,$XX[0])
96 jne .Lcmov$i # Intel cmov is sloooow...
97 mov $TX[0],$TX[1]
98.Lcmov$i:
99 add $TX[0]#b,$TY#b
100 xor ($dat,$TY),%bl
101 ror \$8,%ebx
102___
103push(@TX,shift(@TX)); push(@XX,shift(@XX)); # "rotate" registers
104}
105$code.=<<___;
106 lea -8($len),$len
107 mov %eax,($out)
108 lea 8($inp),$inp
109 mov %ebx,4($out)
110 lea 8($out),$out
111
112 test \$-8,$len
113 jnz .Lcloop8
114 pop %rbx
115 cmp \$0,$len
116 jne .Lcloop1
117.Lexit:
118 sub \$1,$XX[0]#b
119 movb $XX[0]#b,-2($dat)
120 movb $YY#b,-1($dat)
121
122 pop %r13
123 pop %r12
124 repret
125
126.align 16
127.Lcloop1:
128 add $TX[0]#b,$YY#b
129 movzb ($dat,$YY),$TY#d
130 movb $TX[0]#b,($dat,$YY)
131 movb $TY#b,($dat,$XX[0])
132 add $TX[0]#b,$TY#b
133 add \$1,$XX[0]#b
134 movzb ($dat,$TY),$TY#d
135 movzb ($dat,$XX[0]),$TX[0]#d
136 xorb ($inp),$TY#b
137 lea 1($inp),$inp
138 movb $TY#b,($out)
139 lea 1($out),$out
140 sub \$1,$len
141 jnz .Lcloop1
142 jmp .Lexit
143.size RC4,.-RC4
144___
145
146$code =~ s/#([bwd])/$1/gm;
147
148$code =~ s/repret/.byte\t0xF3,0xC3/gm;
149
150print $code;