diff options
Diffstat (limited to 'src/lib/libcrypto/modes/asm')
-rw-r--r-- | src/lib/libcrypto/modes/asm/ghash-alpha.pl | 444 | ||||
-rw-r--r-- | src/lib/libcrypto/modes/asm/ghash-armv4.pl | 430 | ||||
-rw-r--r-- | src/lib/libcrypto/modes/asm/ghash-parisc.pl | 740 | ||||
-rw-r--r-- | src/lib/libcrypto/modes/asm/ghash-sparcv9.pl | 351 | ||||
-rw-r--r-- | src/lib/libcrypto/modes/asm/ghash-x86.pl | 1326 | ||||
-rw-r--r-- | src/lib/libcrypto/modes/asm/ghash-x86_64.pl | 812 |
6 files changed, 0 insertions, 4103 deletions
diff --git a/src/lib/libcrypto/modes/asm/ghash-alpha.pl b/src/lib/libcrypto/modes/asm/ghash-alpha.pl deleted file mode 100644 index 9d847006c4..0000000000 --- a/src/lib/libcrypto/modes/asm/ghash-alpha.pl +++ /dev/null | |||
@@ -1,444 +0,0 @@ | |||
1 | #!/usr/bin/env perl | ||
2 | # | ||
3 | # ==================================================================== | ||
4 | # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL | ||
5 | # project. The module is, however, dual licensed under OpenSSL and | ||
6 | # CRYPTOGAMS licenses depending on where you obtain it. For further | ||
7 | # details see http://www.openssl.org/~appro/cryptogams/. | ||
8 | # ==================================================================== | ||
9 | # | ||
10 | # March 2010 | ||
11 | # | ||
12 | # The module implements "4-bit" GCM GHASH function and underlying | ||
13 | # single multiplication operation in GF(2^128). "4-bit" means that it | ||
14 | # uses 256 bytes per-key table [+128 bytes shared table]. Even though | ||
15 | # loops are aggressively modulo-scheduled in respect to references to | ||
16 | # Htbl and Z.hi updates for 8 cycles per byte, measured performance is | ||
17 | # ~12 cycles per processed byte on 21264 CPU. It seems to be a dynamic | ||
18 | # scheduling "glitch," because uprofile(1) indicates uniform sample | ||
19 | # distribution, as if all instruction bundles execute in 1.5 cycles. | ||
20 | # Meaning that it could have been even faster, yet 12 cycles is ~60% | ||
21 | # better than gcc-generated code and ~80% than code generated by vendor | ||
22 | # compiler. | ||
23 | |||
24 | $cnt="v0"; # $0 | ||
25 | $t0="t0"; | ||
26 | $t1="t1"; | ||
27 | $t2="t2"; | ||
28 | $Thi0="t3"; # $4 | ||
29 | $Tlo0="t4"; | ||
30 | $Thi1="t5"; | ||
31 | $Tlo1="t6"; | ||
32 | $rem="t7"; # $8 | ||
33 | ################# | ||
34 | $Xi="a0"; # $16, input argument block | ||
35 | $Htbl="a1"; | ||
36 | $inp="a2"; | ||
37 | $len="a3"; | ||
38 | $nlo="a4"; # $20 | ||
39 | $nhi="a5"; | ||
40 | $Zhi="t8"; | ||
41 | $Zlo="t9"; | ||
42 | $Xhi="t10"; # $24 | ||
43 | $Xlo="t11"; | ||
44 | $remp="t12"; | ||
45 | $rem_4bit="AT"; # $28 | ||
46 | |||
47 | { my $N; | ||
48 | sub loop() { | ||
49 | |||
50 | $N++; | ||
51 | $code.=<<___; | ||
52 | .align 4 | ||
53 | extbl $Xlo,7,$nlo | ||
54 | and $nlo,0xf0,$nhi | ||
55 | sll $nlo,4,$nlo | ||
56 | and $nlo,0xf0,$nlo | ||
57 | |||
58 | addq $nlo,$Htbl,$nlo | ||
59 | ldq $Zlo,8($nlo) | ||
60 | addq $nhi,$Htbl,$nhi | ||
61 | ldq $Zhi,0($nlo) | ||
62 | |||
63 | and $Zlo,0x0f,$remp | ||
64 | sll $Zhi,60,$t0 | ||
65 | lda $cnt,6(zero) | ||
66 | extbl $Xlo,6,$nlo | ||
67 | |||
68 | ldq $Tlo1,8($nhi) | ||
69 | s8addq $remp,$rem_4bit,$remp | ||
70 | ldq $Thi1,0($nhi) | ||
71 | srl $Zlo,4,$Zlo | ||
72 | |||
73 | ldq $rem,0($remp) | ||
74 | srl $Zhi,4,$Zhi | ||
75 | xor $t0,$Zlo,$Zlo | ||
76 | and $nlo,0xf0,$nhi | ||
77 | |||
78 | xor $Tlo1,$Zlo,$Zlo | ||
79 | sll $nlo,4,$nlo | ||
80 | xor $Thi1,$Zhi,$Zhi | ||
81 | and $nlo,0xf0,$nlo | ||
82 | |||
83 | addq $nlo,$Htbl,$nlo | ||
84 | ldq $Tlo0,8($nlo) | ||
85 | addq $nhi,$Htbl,$nhi | ||
86 | ldq $Thi0,0($nlo) | ||
87 | |||
88 | .Looplo$N: | ||
89 | and $Zlo,0x0f,$remp | ||
90 | sll $Zhi,60,$t0 | ||
91 | subq $cnt,1,$cnt | ||
92 | srl $Zlo,4,$Zlo | ||
93 | |||
94 | ldq $Tlo1,8($nhi) | ||
95 | xor $rem,$Zhi,$Zhi | ||
96 | ldq $Thi1,0($nhi) | ||
97 | s8addq $remp,$rem_4bit,$remp | ||
98 | |||
99 | ldq $rem,0($remp) | ||
100 | srl $Zhi,4,$Zhi | ||
101 | xor $t0,$Zlo,$Zlo | ||
102 | extbl $Xlo,$cnt,$nlo | ||
103 | |||
104 | and $nlo,0xf0,$nhi | ||
105 | xor $Thi0,$Zhi,$Zhi | ||
106 | xor $Tlo0,$Zlo,$Zlo | ||
107 | sll $nlo,4,$nlo | ||
108 | |||
109 | |||
110 | and $Zlo,0x0f,$remp | ||
111 | sll $Zhi,60,$t0 | ||
112 | and $nlo,0xf0,$nlo | ||
113 | srl $Zlo,4,$Zlo | ||
114 | |||
115 | s8addq $remp,$rem_4bit,$remp | ||
116 | xor $rem,$Zhi,$Zhi | ||
117 | addq $nlo,$Htbl,$nlo | ||
118 | addq $nhi,$Htbl,$nhi | ||
119 | |||
120 | ldq $rem,0($remp) | ||
121 | srl $Zhi,4,$Zhi | ||
122 | ldq $Tlo0,8($nlo) | ||
123 | xor $t0,$Zlo,$Zlo | ||
124 | |||
125 | xor $Tlo1,$Zlo,$Zlo | ||
126 | xor $Thi1,$Zhi,$Zhi | ||
127 | ldq $Thi0,0($nlo) | ||
128 | bne $cnt,.Looplo$N | ||
129 | |||
130 | |||
131 | and $Zlo,0x0f,$remp | ||
132 | sll $Zhi,60,$t0 | ||
133 | lda $cnt,7(zero) | ||
134 | srl $Zlo,4,$Zlo | ||
135 | |||
136 | ldq $Tlo1,8($nhi) | ||
137 | xor $rem,$Zhi,$Zhi | ||
138 | ldq $Thi1,0($nhi) | ||
139 | s8addq $remp,$rem_4bit,$remp | ||
140 | |||
141 | ldq $rem,0($remp) | ||
142 | srl $Zhi,4,$Zhi | ||
143 | xor $t0,$Zlo,$Zlo | ||
144 | extbl $Xhi,$cnt,$nlo | ||
145 | |||
146 | and $nlo,0xf0,$nhi | ||
147 | xor $Thi0,$Zhi,$Zhi | ||
148 | xor $Tlo0,$Zlo,$Zlo | ||
149 | sll $nlo,4,$nlo | ||
150 | |||
151 | and $Zlo,0x0f,$remp | ||
152 | sll $Zhi,60,$t0 | ||
153 | and $nlo,0xf0,$nlo | ||
154 | srl $Zlo,4,$Zlo | ||
155 | |||
156 | s8addq $remp,$rem_4bit,$remp | ||
157 | xor $rem,$Zhi,$Zhi | ||
158 | addq $nlo,$Htbl,$nlo | ||
159 | addq $nhi,$Htbl,$nhi | ||
160 | |||
161 | ldq $rem,0($remp) | ||
162 | srl $Zhi,4,$Zhi | ||
163 | ldq $Tlo0,8($nlo) | ||
164 | xor $t0,$Zlo,$Zlo | ||
165 | |||
166 | xor $Tlo1,$Zlo,$Zlo | ||
167 | xor $Thi1,$Zhi,$Zhi | ||
168 | ldq $Thi0,0($nlo) | ||
169 | unop | ||
170 | |||
171 | |||
172 | .Loophi$N: | ||
173 | and $Zlo,0x0f,$remp | ||
174 | sll $Zhi,60,$t0 | ||
175 | subq $cnt,1,$cnt | ||
176 | srl $Zlo,4,$Zlo | ||
177 | |||
178 | ldq $Tlo1,8($nhi) | ||
179 | xor $rem,$Zhi,$Zhi | ||
180 | ldq $Thi1,0($nhi) | ||
181 | s8addq $remp,$rem_4bit,$remp | ||
182 | |||
183 | ldq $rem,0($remp) | ||
184 | srl $Zhi,4,$Zhi | ||
185 | xor $t0,$Zlo,$Zlo | ||
186 | extbl $Xhi,$cnt,$nlo | ||
187 | |||
188 | and $nlo,0xf0,$nhi | ||
189 | xor $Thi0,$Zhi,$Zhi | ||
190 | xor $Tlo0,$Zlo,$Zlo | ||
191 | sll $nlo,4,$nlo | ||
192 | |||
193 | |||
194 | and $Zlo,0x0f,$remp | ||
195 | sll $Zhi,60,$t0 | ||
196 | and $nlo,0xf0,$nlo | ||
197 | srl $Zlo,4,$Zlo | ||
198 | |||
199 | s8addq $remp,$rem_4bit,$remp | ||
200 | xor $rem,$Zhi,$Zhi | ||
201 | addq $nlo,$Htbl,$nlo | ||
202 | addq $nhi,$Htbl,$nhi | ||
203 | |||
204 | ldq $rem,0($remp) | ||
205 | srl $Zhi,4,$Zhi | ||
206 | ldq $Tlo0,8($nlo) | ||
207 | xor $t0,$Zlo,$Zlo | ||
208 | |||
209 | xor $Tlo1,$Zlo,$Zlo | ||
210 | xor $Thi1,$Zhi,$Zhi | ||
211 | ldq $Thi0,0($nlo) | ||
212 | bne $cnt,.Loophi$N | ||
213 | |||
214 | |||
215 | and $Zlo,0x0f,$remp | ||
216 | sll $Zhi,60,$t0 | ||
217 | srl $Zlo,4,$Zlo | ||
218 | |||
219 | ldq $Tlo1,8($nhi) | ||
220 | xor $rem,$Zhi,$Zhi | ||
221 | ldq $Thi1,0($nhi) | ||
222 | s8addq $remp,$rem_4bit,$remp | ||
223 | |||
224 | ldq $rem,0($remp) | ||
225 | srl $Zhi,4,$Zhi | ||
226 | xor $t0,$Zlo,$Zlo | ||
227 | |||
228 | xor $Tlo0,$Zlo,$Zlo | ||
229 | xor $Thi0,$Zhi,$Zhi | ||
230 | |||
231 | and $Zlo,0x0f,$remp | ||
232 | sll $Zhi,60,$t0 | ||
233 | srl $Zlo,4,$Zlo | ||
234 | |||
235 | s8addq $remp,$rem_4bit,$remp | ||
236 | xor $rem,$Zhi,$Zhi | ||
237 | |||
238 | ldq $rem,0($remp) | ||
239 | srl $Zhi,4,$Zhi | ||
240 | xor $Tlo1,$Zlo,$Zlo | ||
241 | xor $Thi1,$Zhi,$Zhi | ||
242 | xor $t0,$Zlo,$Zlo | ||
243 | xor $rem,$Zhi,$Zhi | ||
244 | ___ | ||
245 | }} | ||
246 | |||
247 | $code=<<___; | ||
248 | #include <machine/asm.h> | ||
249 | |||
250 | .text | ||
251 | |||
252 | .set noat | ||
253 | .set noreorder | ||
254 | .globl gcm_gmult_4bit | ||
255 | .align 4 | ||
256 | .ent gcm_gmult_4bit | ||
257 | gcm_gmult_4bit: | ||
258 | .frame sp,0,ra | ||
259 | .prologue 0 | ||
260 | |||
261 | ldq $Xlo,8($Xi) | ||
262 | ldq $Xhi,0($Xi) | ||
263 | |||
264 | lda $rem_4bit,rem_4bit | ||
265 | ___ | ||
266 | |||
267 | &loop(); | ||
268 | |||
269 | $code.=<<___; | ||
270 | srl $Zlo,24,$t0 # byte swap | ||
271 | srl $Zlo,8,$t1 | ||
272 | |||
273 | sll $Zlo,8,$t2 | ||
274 | sll $Zlo,24,$Zlo | ||
275 | zapnot $t0,0x11,$t0 | ||
276 | zapnot $t1,0x22,$t1 | ||
277 | |||
278 | zapnot $Zlo,0x88,$Zlo | ||
279 | or $t0,$t1,$t0 | ||
280 | zapnot $t2,0x44,$t2 | ||
281 | |||
282 | or $Zlo,$t0,$Zlo | ||
283 | srl $Zhi,24,$t0 | ||
284 | srl $Zhi,8,$t1 | ||
285 | |||
286 | or $Zlo,$t2,$Zlo | ||
287 | sll $Zhi,8,$t2 | ||
288 | sll $Zhi,24,$Zhi | ||
289 | |||
290 | srl $Zlo,32,$Xlo | ||
291 | sll $Zlo,32,$Zlo | ||
292 | |||
293 | zapnot $t0,0x11,$t0 | ||
294 | zapnot $t1,0x22,$t1 | ||
295 | or $Zlo,$Xlo,$Xlo | ||
296 | |||
297 | zapnot $Zhi,0x88,$Zhi | ||
298 | or $t0,$t1,$t0 | ||
299 | zapnot $t2,0x44,$t2 | ||
300 | |||
301 | or $Zhi,$t0,$Zhi | ||
302 | or $Zhi,$t2,$Zhi | ||
303 | |||
304 | srl $Zhi,32,$Xhi | ||
305 | sll $Zhi,32,$Zhi | ||
306 | |||
307 | or $Zhi,$Xhi,$Xhi | ||
308 | stq $Xlo,8($Xi) | ||
309 | stq $Xhi,0($Xi) | ||
310 | |||
311 | ret (ra) | ||
312 | .end gcm_gmult_4bit | ||
313 | ___ | ||
314 | |||
315 | $inhi="s0"; | ||
316 | $inlo="s1"; | ||
317 | |||
318 | $code.=<<___; | ||
319 | .globl gcm_ghash_4bit | ||
320 | .align 4 | ||
321 | .ent gcm_ghash_4bit | ||
322 | gcm_ghash_4bit: | ||
323 | lda sp,-32(sp) | ||
324 | stq ra,0(sp) | ||
325 | stq s0,8(sp) | ||
326 | stq s1,16(sp) | ||
327 | .mask 0x04000600,-32 | ||
328 | .frame sp,32,ra | ||
329 | .prologue 0 | ||
330 | |||
331 | ldq_u $inhi,0($inp) | ||
332 | ldq_u $Thi0,7($inp) | ||
333 | ldq_u $inlo,8($inp) | ||
334 | ldq_u $Tlo0,15($inp) | ||
335 | ldq $Xhi,0($Xi) | ||
336 | ldq $Xlo,8($Xi) | ||
337 | |||
338 | lda $rem_4bit,rem_4bit | ||
339 | |||
340 | .Louter: | ||
341 | extql $inhi,$inp,$inhi | ||
342 | extqh $Thi0,$inp,$Thi0 | ||
343 | or $inhi,$Thi0,$inhi | ||
344 | lda $inp,16($inp) | ||
345 | |||
346 | extql $inlo,$inp,$inlo | ||
347 | extqh $Tlo0,$inp,$Tlo0 | ||
348 | or $inlo,$Tlo0,$inlo | ||
349 | subq $len,16,$len | ||
350 | |||
351 | xor $Xlo,$inlo,$Xlo | ||
352 | xor $Xhi,$inhi,$Xhi | ||
353 | ___ | ||
354 | |||
355 | &loop(); | ||
356 | |||
357 | $code.=<<___; | ||
358 | srl $Zlo,24,$t0 # byte swap | ||
359 | srl $Zlo,8,$t1 | ||
360 | |||
361 | sll $Zlo,8,$t2 | ||
362 | sll $Zlo,24,$Zlo | ||
363 | zapnot $t0,0x11,$t0 | ||
364 | zapnot $t1,0x22,$t1 | ||
365 | |||
366 | zapnot $Zlo,0x88,$Zlo | ||
367 | or $t0,$t1,$t0 | ||
368 | zapnot $t2,0x44,$t2 | ||
369 | |||
370 | or $Zlo,$t0,$Zlo | ||
371 | srl $Zhi,24,$t0 | ||
372 | srl $Zhi,8,$t1 | ||
373 | |||
374 | or $Zlo,$t2,$Zlo | ||
375 | sll $Zhi,8,$t2 | ||
376 | sll $Zhi,24,$Zhi | ||
377 | |||
378 | srl $Zlo,32,$Xlo | ||
379 | sll $Zlo,32,$Zlo | ||
380 | beq $len,.Ldone | ||
381 | |||
382 | zapnot $t0,0x11,$t0 | ||
383 | zapnot $t1,0x22,$t1 | ||
384 | or $Zlo,$Xlo,$Xlo | ||
385 | ldq_u $inhi,0($inp) | ||
386 | |||
387 | zapnot $Zhi,0x88,$Zhi | ||
388 | or $t0,$t1,$t0 | ||
389 | zapnot $t2,0x44,$t2 | ||
390 | ldq_u $Thi0,7($inp) | ||
391 | |||
392 | or $Zhi,$t0,$Zhi | ||
393 | or $Zhi,$t2,$Zhi | ||
394 | ldq_u $inlo,8($inp) | ||
395 | ldq_u $Tlo0,15($inp) | ||
396 | |||
397 | srl $Zhi,32,$Xhi | ||
398 | sll $Zhi,32,$Zhi | ||
399 | |||
400 | or $Zhi,$Xhi,$Xhi | ||
401 | br zero,.Louter | ||
402 | |||
403 | .Ldone: | ||
404 | zapnot $t0,0x11,$t0 | ||
405 | zapnot $t1,0x22,$t1 | ||
406 | or $Zlo,$Xlo,$Xlo | ||
407 | |||
408 | zapnot $Zhi,0x88,$Zhi | ||
409 | or $t0,$t1,$t0 | ||
410 | zapnot $t2,0x44,$t2 | ||
411 | |||
412 | or $Zhi,$t0,$Zhi | ||
413 | or $Zhi,$t2,$Zhi | ||
414 | |||
415 | srl $Zhi,32,$Xhi | ||
416 | sll $Zhi,32,$Zhi | ||
417 | |||
418 | or $Zhi,$Xhi,$Xhi | ||
419 | |||
420 | stq $Xlo,8($Xi) | ||
421 | stq $Xhi,0($Xi) | ||
422 | |||
423 | .set noreorder | ||
424 | /*ldq ra,0(sp)*/ | ||
425 | ldq s0,8(sp) | ||
426 | ldq s1,16(sp) | ||
427 | lda sp,32(sp) | ||
428 | ret (ra) | ||
429 | .end gcm_ghash_4bit | ||
430 | |||
431 | .section .rodata | ||
432 | .align 4 | ||
433 | rem_4bit: | ||
434 | .long 0,0x0000<<16, 0,0x1C20<<16, 0,0x3840<<16, 0,0x2460<<16 | ||
435 | .long 0,0x7080<<16, 0,0x6CA0<<16, 0,0x48C0<<16, 0,0x54E0<<16 | ||
436 | .long 0,0xE100<<16, 0,0xFD20<<16, 0,0xD940<<16, 0,0xC560<<16 | ||
437 | .long 0,0x9180<<16, 0,0x8DA0<<16, 0,0xA9C0<<16, 0,0xB5E0<<16 | ||
438 | .previous | ||
439 | |||
440 | ___ | ||
441 | $output=shift and open STDOUT,">$output"; | ||
442 | print $code; | ||
443 | close STDOUT; | ||
444 | |||
diff --git a/src/lib/libcrypto/modes/asm/ghash-armv4.pl b/src/lib/libcrypto/modes/asm/ghash-armv4.pl deleted file mode 100644 index 2d57806b46..0000000000 --- a/src/lib/libcrypto/modes/asm/ghash-armv4.pl +++ /dev/null | |||
@@ -1,430 +0,0 @@ | |||
1 | #!/usr/bin/env perl | ||
2 | # | ||
3 | # ==================================================================== | ||
4 | # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL | ||
5 | # project. The module is, however, dual licensed under OpenSSL and | ||
6 | # CRYPTOGAMS licenses depending on where you obtain it. For further | ||
7 | # details see http://www.openssl.org/~appro/cryptogams/. | ||
8 | # ==================================================================== | ||
9 | # | ||
10 | # April 2010 | ||
11 | # | ||
12 | # The module implements "4-bit" GCM GHASH function and underlying | ||
13 | # single multiplication operation in GF(2^128). "4-bit" means that it | ||
14 | # uses 256 bytes per-key table [+32 bytes shared table]. There is no | ||
15 | # experimental performance data available yet. The only approximation | ||
16 | # that can be made at this point is based on code size. Inner loop is | ||
17 | # 32 instructions long and on single-issue core should execute in <40 | ||
18 | # cycles. Having verified that gcc 3.4 didn't unroll corresponding | ||
19 | # loop, this assembler loop body was found to be ~3x smaller than | ||
20 | # compiler-generated one... | ||
21 | # | ||
22 | # July 2010 | ||
23 | # | ||
24 | # Rescheduling for dual-issue pipeline resulted in 8.5% improvement on | ||
25 | # Cortex A8 core and ~25 cycles per processed byte (which was observed | ||
26 | # to be ~3 times faster than gcc-generated code:-) | ||
27 | # | ||
28 | # February 2011 | ||
29 | # | ||
30 | # Profiler-assisted and platform-specific optimization resulted in 7% | ||
31 | # improvement on Cortex A8 core and ~23.5 cycles per byte. | ||
32 | # | ||
33 | # March 2011 | ||
34 | # | ||
35 | # Add NEON implementation featuring polynomial multiplication, i.e. no | ||
36 | # lookup tables involved. On Cortex A8 it was measured to process one | ||
37 | # byte in 15 cycles or 55% faster than integer-only code. | ||
38 | |||
39 | # ==================================================================== | ||
40 | # Note about "528B" variant. In ARM case it makes lesser sense to | ||
41 | # implement it for following reasons: | ||
42 | # | ||
43 | # - performance improvement won't be anywhere near 50%, because 128- | ||
44 | # bit shift operation is neatly fused with 128-bit xor here, and | ||
45 | # "538B" variant would eliminate only 4-5 instructions out of 32 | ||
46 | # in the inner loop (meaning that estimated improvement is ~15%); | ||
47 | # - ARM-based systems are often embedded ones and extra memory | ||
48 | # consumption might be unappreciated (for so little improvement); | ||
49 | # | ||
50 | # Byte order [in]dependence. ========================================= | ||
51 | # | ||
52 | # Caller is expected to maintain specific *dword* order in Htable, | ||
53 | # namely with *least* significant dword of 128-bit value at *lower* | ||
54 | # address. This differs completely from C code and has everything to | ||
55 | # do with ldm instruction and order in which dwords are "consumed" by | ||
56 | # algorithm. *Byte* order within these dwords in turn is whatever | ||
57 | # *native* byte order on current platform. See gcm128.c for working | ||
58 | # example... | ||
59 | |||
60 | while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {} | ||
61 | open STDOUT,">$output"; | ||
62 | |||
63 | $Xi="r0"; # argument block | ||
64 | $Htbl="r1"; | ||
65 | $inp="r2"; | ||
66 | $len="r3"; | ||
67 | |||
68 | $Zll="r4"; # variables | ||
69 | $Zlh="r5"; | ||
70 | $Zhl="r6"; | ||
71 | $Zhh="r7"; | ||
72 | $Tll="r8"; | ||
73 | $Tlh="r9"; | ||
74 | $Thl="r10"; | ||
75 | $Thh="r11"; | ||
76 | $nlo="r12"; | ||
77 | ################# r13 is stack pointer | ||
78 | $nhi="r14"; | ||
79 | ################# r15 is program counter | ||
80 | |||
81 | $rem_4bit=$inp; # used in gcm_gmult_4bit | ||
82 | $cnt=$len; | ||
83 | |||
84 | sub Zsmash() { | ||
85 | my $i=12; | ||
86 | my @args=@_; | ||
87 | for ($Zll,$Zlh,$Zhl,$Zhh) { | ||
88 | $code.=<<___; | ||
89 | #if __ARM_ARCH__>=7 && defined(__ARMEL__) | ||
90 | rev $_,$_ | ||
91 | str $_,[$Xi,#$i] | ||
92 | #elif defined(__ARMEB__) | ||
93 | str $_,[$Xi,#$i] | ||
94 | #else | ||
95 | mov $Tlh,$_,lsr#8 | ||
96 | strb $_,[$Xi,#$i+3] | ||
97 | mov $Thl,$_,lsr#16 | ||
98 | strb $Tlh,[$Xi,#$i+2] | ||
99 | mov $Thh,$_,lsr#24 | ||
100 | strb $Thl,[$Xi,#$i+1] | ||
101 | strb $Thh,[$Xi,#$i] | ||
102 | #endif | ||
103 | ___ | ||
104 | $code.="\t".shift(@args)."\n"; | ||
105 | $i-=4; | ||
106 | } | ||
107 | } | ||
108 | |||
109 | $code=<<___; | ||
110 | #include "arm_arch.h" | ||
111 | |||
112 | .text | ||
113 | .syntax unified | ||
114 | .code 32 | ||
115 | |||
116 | .type rem_4bit,%object | ||
117 | .align 5 | ||
118 | rem_4bit: | ||
119 | .short 0x0000,0x1C20,0x3840,0x2460 | ||
120 | .short 0x7080,0x6CA0,0x48C0,0x54E0 | ||
121 | .short 0xE100,0xFD20,0xD940,0xC560 | ||
122 | .short 0x9180,0x8DA0,0xA9C0,0xB5E0 | ||
123 | .size rem_4bit,.-rem_4bit | ||
124 | |||
125 | .type rem_4bit_get,%function | ||
126 | rem_4bit_get: | ||
127 | sub $rem_4bit,pc,#8 | ||
128 | sub $rem_4bit,$rem_4bit,#32 @ &rem_4bit | ||
129 | b .Lrem_4bit_got | ||
130 | nop | ||
131 | .size rem_4bit_get,.-rem_4bit_get | ||
132 | |||
133 | .global gcm_ghash_4bit | ||
134 | .type gcm_ghash_4bit,%function | ||
135 | gcm_ghash_4bit: | ||
136 | sub r12,pc,#8 | ||
137 | add $len,$inp,$len @ $len to point at the end | ||
138 | stmdb sp!,{r3-r11,lr} @ save $len/end too | ||
139 | sub r12,r12,#48 @ &rem_4bit | ||
140 | |||
141 | ldmia r12,{r4-r11} @ copy rem_4bit ... | ||
142 | stmdb sp!,{r4-r11} @ ... to stack | ||
143 | |||
144 | ldrb $nlo,[$inp,#15] | ||
145 | ldrb $nhi,[$Xi,#15] | ||
146 | .Louter: | ||
147 | eor $nlo,$nlo,$nhi | ||
148 | and $nhi,$nlo,#0xf0 | ||
149 | and $nlo,$nlo,#0x0f | ||
150 | mov $cnt,#14 | ||
151 | |||
152 | add $Zhh,$Htbl,$nlo,lsl#4 | ||
153 | ldmia $Zhh,{$Zll-$Zhh} @ load Htbl[nlo] | ||
154 | add $Thh,$Htbl,$nhi | ||
155 | ldrb $nlo,[$inp,#14] | ||
156 | |||
157 | and $nhi,$Zll,#0xf @ rem | ||
158 | ldmia $Thh,{$Tll-$Thh} @ load Htbl[nhi] | ||
159 | add $nhi,$nhi,$nhi | ||
160 | eor $Zll,$Tll,$Zll,lsr#4 | ||
161 | ldrh $Tll,[sp,$nhi] @ rem_4bit[rem] | ||
162 | eor $Zll,$Zll,$Zlh,lsl#28 | ||
163 | ldrb $nhi,[$Xi,#14] | ||
164 | eor $Zlh,$Tlh,$Zlh,lsr#4 | ||
165 | eor $Zlh,$Zlh,$Zhl,lsl#28 | ||
166 | eor $Zhl,$Thl,$Zhl,lsr#4 | ||
167 | eor $Zhl,$Zhl,$Zhh,lsl#28 | ||
168 | eor $Zhh,$Thh,$Zhh,lsr#4 | ||
169 | eor $nlo,$nlo,$nhi | ||
170 | and $nhi,$nlo,#0xf0 | ||
171 | and $nlo,$nlo,#0x0f | ||
172 | eor $Zhh,$Zhh,$Tll,lsl#16 | ||
173 | |||
174 | .Linner: | ||
175 | add $Thh,$Htbl,$nlo,lsl#4 | ||
176 | and $nlo,$Zll,#0xf @ rem | ||
177 | subs $cnt,$cnt,#1 | ||
178 | add $nlo,$nlo,$nlo | ||
179 | ldmia $Thh,{$Tll-$Thh} @ load Htbl[nlo] | ||
180 | eor $Zll,$Tll,$Zll,lsr#4 | ||
181 | eor $Zll,$Zll,$Zlh,lsl#28 | ||
182 | eor $Zlh,$Tlh,$Zlh,lsr#4 | ||
183 | eor $Zlh,$Zlh,$Zhl,lsl#28 | ||
184 | ldrh $Tll,[sp,$nlo] @ rem_4bit[rem] | ||
185 | eor $Zhl,$Thl,$Zhl,lsr#4 | ||
186 | ldrbpl $nlo,[$inp,$cnt] | ||
187 | eor $Zhl,$Zhl,$Zhh,lsl#28 | ||
188 | eor $Zhh,$Thh,$Zhh,lsr#4 | ||
189 | |||
190 | add $Thh,$Htbl,$nhi | ||
191 | and $nhi,$Zll,#0xf @ rem | ||
192 | eor $Zhh,$Zhh,$Tll,lsl#16 @ ^= rem_4bit[rem] | ||
193 | add $nhi,$nhi,$nhi | ||
194 | ldmia $Thh,{$Tll-$Thh} @ load Htbl[nhi] | ||
195 | eor $Zll,$Tll,$Zll,lsr#4 | ||
196 | ldrbpl $Tll,[$Xi,$cnt] | ||
197 | eor $Zll,$Zll,$Zlh,lsl#28 | ||
198 | eor $Zlh,$Tlh,$Zlh,lsr#4 | ||
199 | ldrh $Tlh,[sp,$nhi] | ||
200 | eor $Zlh,$Zlh,$Zhl,lsl#28 | ||
201 | eor $Zhl,$Thl,$Zhl,lsr#4 | ||
202 | eor $Zhl,$Zhl,$Zhh,lsl#28 | ||
203 | eorpl $nlo,$nlo,$Tll | ||
204 | eor $Zhh,$Thh,$Zhh,lsr#4 | ||
205 | andpl $nhi,$nlo,#0xf0 | ||
206 | andpl $nlo,$nlo,#0x0f | ||
207 | eor $Zhh,$Zhh,$Tlh,lsl#16 @ ^= rem_4bit[rem] | ||
208 | bpl .Linner | ||
209 | |||
210 | ldr $len,[sp,#32] @ re-load $len/end | ||
211 | add $inp,$inp,#16 | ||
212 | mov $nhi,$Zll | ||
213 | ___ | ||
214 | &Zsmash("cmp\t$inp,$len","ldrbne\t$nlo,[$inp,#15]"); | ||
215 | $code.=<<___; | ||
216 | bne .Louter | ||
217 | |||
218 | add sp,sp,#36 | ||
219 | #if __ARM_ARCH__>=5 | ||
220 | ldmia sp!,{r4-r11,pc} | ||
221 | #else | ||
222 | ldmia sp!,{r4-r11,lr} | ||
223 | tst lr,#1 | ||
224 | moveq pc,lr @ be binary compatible with V4, yet | ||
225 | bx lr @ interoperable with Thumb ISA:-) | ||
226 | #endif | ||
227 | .size gcm_ghash_4bit,.-gcm_ghash_4bit | ||
228 | |||
229 | .global gcm_gmult_4bit | ||
230 | .type gcm_gmult_4bit,%function | ||
231 | gcm_gmult_4bit: | ||
232 | stmdb sp!,{r4-r11,lr} | ||
233 | ldrb $nlo,[$Xi,#15] | ||
234 | b rem_4bit_get | ||
235 | .Lrem_4bit_got: | ||
236 | and $nhi,$nlo,#0xf0 | ||
237 | and $nlo,$nlo,#0x0f | ||
238 | mov $cnt,#14 | ||
239 | |||
240 | add $Zhh,$Htbl,$nlo,lsl#4 | ||
241 | ldmia $Zhh,{$Zll-$Zhh} @ load Htbl[nlo] | ||
242 | ldrb $nlo,[$Xi,#14] | ||
243 | |||
244 | add $Thh,$Htbl,$nhi | ||
245 | and $nhi,$Zll,#0xf @ rem | ||
246 | ldmia $Thh,{$Tll-$Thh} @ load Htbl[nhi] | ||
247 | add $nhi,$nhi,$nhi | ||
248 | eor $Zll,$Tll,$Zll,lsr#4 | ||
249 | ldrh $Tll,[$rem_4bit,$nhi] @ rem_4bit[rem] | ||
250 | eor $Zll,$Zll,$Zlh,lsl#28 | ||
251 | eor $Zlh,$Tlh,$Zlh,lsr#4 | ||
252 | eor $Zlh,$Zlh,$Zhl,lsl#28 | ||
253 | eor $Zhl,$Thl,$Zhl,lsr#4 | ||
254 | eor $Zhl,$Zhl,$Zhh,lsl#28 | ||
255 | eor $Zhh,$Thh,$Zhh,lsr#4 | ||
256 | and $nhi,$nlo,#0xf0 | ||
257 | eor $Zhh,$Zhh,$Tll,lsl#16 | ||
258 | and $nlo,$nlo,#0x0f | ||
259 | |||
260 | .Loop: | ||
261 | add $Thh,$Htbl,$nlo,lsl#4 | ||
262 | and $nlo,$Zll,#0xf @ rem | ||
263 | subs $cnt,$cnt,#1 | ||
264 | add $nlo,$nlo,$nlo | ||
265 | ldmia $Thh,{$Tll-$Thh} @ load Htbl[nlo] | ||
266 | eor $Zll,$Tll,$Zll,lsr#4 | ||
267 | eor $Zll,$Zll,$Zlh,lsl#28 | ||
268 | eor $Zlh,$Tlh,$Zlh,lsr#4 | ||
269 | eor $Zlh,$Zlh,$Zhl,lsl#28 | ||
270 | ldrh $Tll,[$rem_4bit,$nlo] @ rem_4bit[rem] | ||
271 | eor $Zhl,$Thl,$Zhl,lsr#4 | ||
272 | ldrbpl $nlo,[$Xi,$cnt] | ||
273 | eor $Zhl,$Zhl,$Zhh,lsl#28 | ||
274 | eor $Zhh,$Thh,$Zhh,lsr#4 | ||
275 | |||
276 | add $Thh,$Htbl,$nhi | ||
277 | and $nhi,$Zll,#0xf @ rem | ||
278 | eor $Zhh,$Zhh,$Tll,lsl#16 @ ^= rem_4bit[rem] | ||
279 | add $nhi,$nhi,$nhi | ||
280 | ldmia $Thh,{$Tll-$Thh} @ load Htbl[nhi] | ||
281 | eor $Zll,$Tll,$Zll,lsr#4 | ||
282 | eor $Zll,$Zll,$Zlh,lsl#28 | ||
283 | eor $Zlh,$Tlh,$Zlh,lsr#4 | ||
284 | ldrh $Tll,[$rem_4bit,$nhi] @ rem_4bit[rem] | ||
285 | eor $Zlh,$Zlh,$Zhl,lsl#28 | ||
286 | eor $Zhl,$Thl,$Zhl,lsr#4 | ||
287 | eor $Zhl,$Zhl,$Zhh,lsl#28 | ||
288 | eor $Zhh,$Thh,$Zhh,lsr#4 | ||
289 | andpl $nhi,$nlo,#0xf0 | ||
290 | andpl $nlo,$nlo,#0x0f | ||
291 | eor $Zhh,$Zhh,$Tll,lsl#16 @ ^= rem_4bit[rem] | ||
292 | bpl .Loop | ||
293 | ___ | ||
294 | &Zsmash(); | ||
295 | $code.=<<___; | ||
296 | #if __ARM_ARCH__>=5 | ||
297 | ldmia sp!,{r4-r11,pc} | ||
298 | #else | ||
299 | ldmia sp!,{r4-r11,lr} | ||
300 | tst lr,#1 | ||
301 | moveq pc,lr @ be binary compatible with V4, yet | ||
302 | bx lr @ interoperable with Thumb ISA:-) | ||
303 | #endif | ||
304 | .size gcm_gmult_4bit,.-gcm_gmult_4bit | ||
305 | ___ | ||
306 | { | ||
307 | my $cnt=$Htbl; # $Htbl is used once in the very beginning | ||
308 | |||
309 | my ($Hhi, $Hlo, $Zo, $T, $xi, $mod) = map("d$_",(0..7)); | ||
310 | my ($Qhi, $Qlo, $Z, $R, $zero, $Qpost, $IN) = map("q$_",(8..15)); | ||
311 | |||
312 | # Z:Zo keeps 128-bit result shifted by 1 to the right, with bottom bit | ||
313 | # in Zo. Or should I say "top bit", because GHASH is specified in | ||
314 | # reverse bit order? Otherwise straightforward 128-bt H by one input | ||
315 | # byte multiplication and modulo-reduction, times 16. | ||
316 | |||
317 | sub Dlo() { shift=~m|q([1]?[0-9])|?"d".($1*2):""; } | ||
318 | sub Dhi() { shift=~m|q([1]?[0-9])|?"d".($1*2+1):""; } | ||
319 | sub Q() { shift=~m|d([1-3]?[02468])|?"q".($1/2):""; } | ||
320 | |||
321 | $code.=<<___; | ||
322 | #if __ARM_ARCH__>=7 && !defined(__STRICT_ALIGNMENT) | ||
323 | .fpu neon | ||
324 | |||
325 | .global gcm_gmult_neon | ||
326 | .type gcm_gmult_neon,%function | ||
327 | .align 4 | ||
328 | gcm_gmult_neon: | ||
329 | sub $Htbl,#16 @ point at H in GCM128_CTX | ||
330 | vld1.64 `&Dhi("$IN")`,[$Xi,:64]!@ load Xi | ||
331 | vmov.i32 $mod,#0xe1 @ our irreducible polynomial | ||
332 | vld1.64 `&Dlo("$IN")`,[$Xi,:64]! | ||
333 | vshr.u64 $mod,#32 | ||
334 | vldmia $Htbl,{$Hhi-$Hlo} @ load H | ||
335 | veor $zero,$zero | ||
336 | #ifdef __ARMEL__ | ||
337 | vrev64.8 $IN,$IN | ||
338 | #endif | ||
339 | veor $Qpost,$Qpost | ||
340 | veor $R,$R | ||
341 | mov $cnt,#16 | ||
342 | veor $Z,$Z | ||
343 | mov $len,#16 | ||
344 | veor $Zo,$Zo | ||
345 | vdup.8 $xi,`&Dlo("$IN")`[0] @ broadcast lowest byte | ||
346 | b .Linner_neon | ||
347 | .size gcm_gmult_neon,.-gcm_gmult_neon | ||
348 | |||
349 | .global gcm_ghash_neon | ||
350 | .type gcm_ghash_neon,%function | ||
351 | .align 4 | ||
352 | gcm_ghash_neon: | ||
353 | vld1.64 `&Dhi("$Z")`,[$Xi,:64]! @ load Xi | ||
354 | vmov.i32 $mod,#0xe1 @ our irreducible polynomial | ||
355 | vld1.64 `&Dlo("$Z")`,[$Xi,:64]! | ||
356 | vshr.u64 $mod,#32 | ||
357 | vldmia $Xi,{$Hhi-$Hlo} @ load H | ||
358 | veor $zero,$zero | ||
359 | nop | ||
360 | #ifdef __ARMEL__ | ||
361 | vrev64.8 $Z,$Z | ||
362 | #endif | ||
363 | .Louter_neon: | ||
364 | vld1.64 `&Dhi($IN)`,[$inp]! @ load inp | ||
365 | veor $Qpost,$Qpost | ||
366 | vld1.64 `&Dlo($IN)`,[$inp]! | ||
367 | veor $R,$R | ||
368 | mov $cnt,#16 | ||
369 | #ifdef __ARMEL__ | ||
370 | vrev64.8 $IN,$IN | ||
371 | #endif | ||
372 | veor $Zo,$Zo | ||
373 | veor $IN,$Z @ inp^=Xi | ||
374 | veor $Z,$Z | ||
375 | vdup.8 $xi,`&Dlo("$IN")`[0] @ broadcast lowest byte | ||
376 | .Linner_neon: | ||
377 | subs $cnt,$cnt,#1 | ||
378 | vmull.p8 $Qlo,$Hlo,$xi @ H.lo·Xi[i] | ||
379 | vmull.p8 $Qhi,$Hhi,$xi @ H.hi·Xi[i] | ||
380 | vext.8 $IN,$zero,#1 @ IN>>=8 | ||
381 | |||
382 | veor $Z,$Qpost @ modulo-scheduled part | ||
383 | vshl.i64 `&Dlo("$R")`,#48 | ||
384 | vdup.8 $xi,`&Dlo("$IN")`[0] @ broadcast lowest byte | ||
385 | veor $T,`&Dlo("$Qlo")`,`&Dlo("$Z")` | ||
386 | |||
387 | veor `&Dhi("$Z")`,`&Dlo("$R")` | ||
388 | vuzp.8 $Qlo,$Qhi | ||
389 | vsli.8 $Zo,$T,#1 @ compose the "carry" byte | ||
390 | vext.8 $Z,$zero,#1 @ Z>>=8 | ||
391 | |||
392 | vmull.p8 $R,$Zo,$mod @ "carry"·0xe1 | ||
393 | vshr.u8 $Zo,$T,#7 @ save Z's bottom bit | ||
394 | vext.8 $Qpost,$Qlo,$zero,#1 @ Qlo>>=8 | ||
395 | veor $Z,$Qhi | ||
396 | bne .Linner_neon | ||
397 | |||
398 | veor $Z,$Qpost @ modulo-scheduled artefact | ||
399 | vshl.i64 `&Dlo("$R")`,#48 | ||
400 | veor `&Dhi("$Z")`,`&Dlo("$R")` | ||
401 | |||
402 | @ finalization, normalize Z:Zo | ||
403 | vand $Zo,$mod @ suffices to mask the bit | ||
404 | vshr.u64 `&Dhi(&Q("$Zo"))`,`&Dlo("$Z")`,#63 | ||
405 | vshl.i64 $Z,#1 | ||
406 | subs $len,#16 | ||
407 | vorr $Z,`&Q("$Zo")` @ Z=Z:Zo<<1 | ||
408 | bne .Louter_neon | ||
409 | |||
410 | #ifdef __ARMEL__ | ||
411 | vrev64.8 $Z,$Z | ||
412 | #endif | ||
413 | sub $Xi,#16 | ||
414 | vst1.64 `&Dhi("$Z")`,[$Xi,:64]! @ write out Xi | ||
415 | vst1.64 `&Dlo("$Z")`,[$Xi,:64] | ||
416 | |||
417 | bx lr | ||
418 | .size gcm_ghash_neon,.-gcm_ghash_neon | ||
419 | #endif | ||
420 | ___ | ||
421 | } | ||
422 | $code.=<<___; | ||
423 | .asciz "GHASH for ARMv4/NEON, CRYPTOGAMS by <appro\@openssl.org>" | ||
424 | .align 2 | ||
425 | ___ | ||
426 | |||
427 | $code =~ s/\`([^\`]*)\`/eval $1/gem; | ||
428 | $code =~ s/\bbx\s+lr\b/.word\t0xe12fff1e/gm; # make it possible to compile with -march=armv4 | ||
429 | print $code; | ||
430 | close STDOUT; # enforce flush | ||
diff --git a/src/lib/libcrypto/modes/asm/ghash-parisc.pl b/src/lib/libcrypto/modes/asm/ghash-parisc.pl deleted file mode 100644 index 3f98513105..0000000000 --- a/src/lib/libcrypto/modes/asm/ghash-parisc.pl +++ /dev/null | |||
@@ -1,740 +0,0 @@ | |||
1 | #!/usr/bin/env perl | ||
2 | # | ||
3 | # ==================================================================== | ||
4 | # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL | ||
5 | # project. The module is, however, dual licensed under OpenSSL and | ||
6 | # CRYPTOGAMS licenses depending on where you obtain it. For further | ||
7 | # details see http://www.openssl.org/~appro/cryptogams/. | ||
8 | # ==================================================================== | ||
9 | # | ||
10 | # April 2010 | ||
11 | # | ||
12 | # The module implements "4-bit" GCM GHASH function and underlying | ||
13 | # single multiplication operation in GF(2^128). "4-bit" means that it | ||
14 | # uses 256 bytes per-key table [+128 bytes shared table]. On PA-7100LC | ||
15 | # it processes one byte in 19.6 cycles, which is more than twice as | ||
16 | # fast as code generated by gcc 3.2. PA-RISC 2.0 loop is scheduled for | ||
17 | # 8 cycles, but measured performance on PA-8600 system is ~9 cycles per | ||
18 | # processed byte. This is ~2.2x faster than 64-bit code generated by | ||
19 | # vendor compiler (which used to be very hard to beat:-). | ||
20 | # | ||
21 | # Special thanks to polarhome.com for providing HP-UX account. | ||
22 | |||
23 | $flavour = shift; | ||
24 | $output = shift; | ||
25 | open STDOUT,">$output"; | ||
26 | |||
27 | if ($flavour =~ /64/) { | ||
28 | $LEVEL ="2.0W"; | ||
29 | $SIZE_T =8; | ||
30 | $FRAME_MARKER =80; | ||
31 | $SAVED_RP =16; | ||
32 | $PUSH ="std"; | ||
33 | $PUSHMA ="std,ma"; | ||
34 | $POP ="ldd"; | ||
35 | $POPMB ="ldd,mb"; | ||
36 | $NREGS =6; | ||
37 | } else { | ||
38 | $LEVEL ="1.0"; #"\n\t.ALLOW\t2.0"; | ||
39 | $SIZE_T =4; | ||
40 | $FRAME_MARKER =48; | ||
41 | $SAVED_RP =20; | ||
42 | $PUSH ="stw"; | ||
43 | $PUSHMA ="stwm"; | ||
44 | $POP ="ldw"; | ||
45 | $POPMB ="ldwm"; | ||
46 | $NREGS =11; | ||
47 | } | ||
48 | |||
49 | $FRAME=10*$SIZE_T+$FRAME_MARKER;# NREGS saved regs + frame marker | ||
50 | # [+ argument transfer] | ||
51 | |||
52 | ################# volatile registers | ||
53 | $Xi="%r26"; # argument block | ||
54 | $Htbl="%r25"; | ||
55 | $inp="%r24"; | ||
56 | $len="%r23"; | ||
57 | $Hhh=$Htbl; # variables | ||
58 | $Hll="%r22"; | ||
59 | $Zhh="%r21"; | ||
60 | $Zll="%r20"; | ||
61 | $cnt="%r19"; | ||
62 | $rem_4bit="%r28"; | ||
63 | $rem="%r29"; | ||
64 | $mask0xf0="%r31"; | ||
65 | |||
66 | ################# preserved registers | ||
67 | $Thh="%r1"; | ||
68 | $Tll="%r2"; | ||
69 | $nlo="%r3"; | ||
70 | $nhi="%r4"; | ||
71 | $byte="%r5"; | ||
72 | if ($SIZE_T==4) { | ||
73 | $Zhl="%r6"; | ||
74 | $Zlh="%r7"; | ||
75 | $Hhl="%r8"; | ||
76 | $Hlh="%r9"; | ||
77 | $Thl="%r10"; | ||
78 | $Tlh="%r11"; | ||
79 | } | ||
80 | $rem2="%r6"; # used in PA-RISC 2.0 code | ||
81 | |||
82 | $code.=<<___; | ||
83 | .LEVEL $LEVEL | ||
84 | .text | ||
85 | |||
86 | .EXPORT gcm_gmult_4bit,ENTRY,ARGW0=GR,ARGW1=GR | ||
87 | .ALIGN 64 | ||
88 | gcm_gmult_4bit | ||
89 | .PROC | ||
90 | .CALLINFO FRAME=`$FRAME-10*$SIZE_T`,NO_CALLS,SAVE_RP,ENTRY_GR=$NREGS | ||
91 | .ENTRY | ||
92 | $PUSH %r2,-$SAVED_RP(%sp) ; standard prologue | ||
93 | $PUSHMA %r3,$FRAME(%sp) | ||
94 | $PUSH %r4,`-$FRAME+1*$SIZE_T`(%sp) | ||
95 | $PUSH %r5,`-$FRAME+2*$SIZE_T`(%sp) | ||
96 | $PUSH %r6,`-$FRAME+3*$SIZE_T`(%sp) | ||
97 | ___ | ||
98 | $code.=<<___ if ($SIZE_T==4); | ||
99 | $PUSH %r7,`-$FRAME+4*$SIZE_T`(%sp) | ||
100 | $PUSH %r8,`-$FRAME+5*$SIZE_T`(%sp) | ||
101 | $PUSH %r9,`-$FRAME+6*$SIZE_T`(%sp) | ||
102 | $PUSH %r10,`-$FRAME+7*$SIZE_T`(%sp) | ||
103 | $PUSH %r11,`-$FRAME+8*$SIZE_T`(%sp) | ||
104 | ___ | ||
105 | $code.=<<___; | ||
106 | addl $inp,$len,$len | ||
107 | #ifdef __PIC__ | ||
108 | addil LT'L\$rem_4bit, %r19 | ||
109 | ldw RT'L\$rem_4bit(%r1), $rem_4bit | ||
110 | #else | ||
111 | ldil L'L\$rem_4bit, %t1 | ||
112 | ldo R'L\$rem_4bit(%t1), $rem_4bit | ||
113 | #endif | ||
114 | ldi 0xf0,$mask0xf0 | ||
115 | ___ | ||
116 | $code.=<<___ if ($SIZE_T==4); | ||
117 | #ifndef __OpenBSD__ | ||
118 | ldi 31,$rem | ||
119 | mtctl $rem,%cr11 | ||
120 | extrd,u,*= $rem,%sar,1,$rem ; executes on PA-RISC 1.0 | ||
121 | b L\$parisc1_gmult | ||
122 | nop | ||
123 | ___ | ||
124 | |||
125 | $code.=<<___; | ||
126 | ldb 15($Xi),$nlo | ||
127 | ldo 8($Htbl),$Hll | ||
128 | |||
129 | and $mask0xf0,$nlo,$nhi | ||
130 | depd,z $nlo,59,4,$nlo | ||
131 | |||
132 | ldd $nlo($Hll),$Zll | ||
133 | ldd $nlo($Hhh),$Zhh | ||
134 | |||
135 | depd,z $Zll,60,4,$rem | ||
136 | shrpd $Zhh,$Zll,4,$Zll | ||
137 | extrd,u $Zhh,59,60,$Zhh | ||
138 | ldb 14($Xi),$nlo | ||
139 | |||
140 | ldd $nhi($Hll),$Tll | ||
141 | ldd $nhi($Hhh),$Thh | ||
142 | and $mask0xf0,$nlo,$nhi | ||
143 | depd,z $nlo,59,4,$nlo | ||
144 | |||
145 | xor $Tll,$Zll,$Zll | ||
146 | xor $Thh,$Zhh,$Zhh | ||
147 | ldd $rem($rem_4bit),$rem | ||
148 | b L\$oop_gmult_pa2 | ||
149 | ldi 13,$cnt | ||
150 | |||
151 | .ALIGN 8 | ||
152 | L\$oop_gmult_pa2 | ||
153 | xor $rem,$Zhh,$Zhh ; moved here to work around gas bug | ||
154 | depd,z $Zll,60,4,$rem | ||
155 | |||
156 | shrpd $Zhh,$Zll,4,$Zll | ||
157 | extrd,u $Zhh,59,60,$Zhh | ||
158 | ldd $nlo($Hll),$Tll | ||
159 | ldd $nlo($Hhh),$Thh | ||
160 | |||
161 | xor $Tll,$Zll,$Zll | ||
162 | xor $Thh,$Zhh,$Zhh | ||
163 | ldd $rem($rem_4bit),$rem | ||
164 | |||
165 | xor $rem,$Zhh,$Zhh | ||
166 | depd,z $Zll,60,4,$rem | ||
167 | ldbx $cnt($Xi),$nlo | ||
168 | |||
169 | shrpd $Zhh,$Zll,4,$Zll | ||
170 | extrd,u $Zhh,59,60,$Zhh | ||
171 | ldd $nhi($Hll),$Tll | ||
172 | ldd $nhi($Hhh),$Thh | ||
173 | |||
174 | and $mask0xf0,$nlo,$nhi | ||
175 | depd,z $nlo,59,4,$nlo | ||
176 | ldd $rem($rem_4bit),$rem | ||
177 | |||
178 | xor $Tll,$Zll,$Zll | ||
179 | addib,uv -1,$cnt,L\$oop_gmult_pa2 | ||
180 | xor $Thh,$Zhh,$Zhh | ||
181 | |||
182 | xor $rem,$Zhh,$Zhh | ||
183 | depd,z $Zll,60,4,$rem | ||
184 | |||
185 | shrpd $Zhh,$Zll,4,$Zll | ||
186 | extrd,u $Zhh,59,60,$Zhh | ||
187 | ldd $nlo($Hll),$Tll | ||
188 | ldd $nlo($Hhh),$Thh | ||
189 | |||
190 | xor $Tll,$Zll,$Zll | ||
191 | xor $Thh,$Zhh,$Zhh | ||
192 | ldd $rem($rem_4bit),$rem | ||
193 | |||
194 | xor $rem,$Zhh,$Zhh | ||
195 | depd,z $Zll,60,4,$rem | ||
196 | |||
197 | shrpd $Zhh,$Zll,4,$Zll | ||
198 | extrd,u $Zhh,59,60,$Zhh | ||
199 | ldd $nhi($Hll),$Tll | ||
200 | ldd $nhi($Hhh),$Thh | ||
201 | |||
202 | xor $Tll,$Zll,$Zll | ||
203 | xor $Thh,$Zhh,$Zhh | ||
204 | ldd $rem($rem_4bit),$rem | ||
205 | |||
206 | xor $rem,$Zhh,$Zhh | ||
207 | std $Zll,8($Xi) | ||
208 | std $Zhh,0($Xi) | ||
209 | ___ | ||
210 | |||
211 | $code.=<<___ if ($SIZE_T==4); | ||
212 | b L\$done_gmult | ||
213 | nop | ||
214 | |||
215 | L\$parisc1_gmult | ||
216 | #endif | ||
217 | ldb 15($Xi),$nlo | ||
218 | ldo 12($Htbl),$Hll | ||
219 | ldo 8($Htbl),$Hlh | ||
220 | ldo 4($Htbl),$Hhl | ||
221 | |||
222 | and $mask0xf0,$nlo,$nhi | ||
223 | zdep $nlo,27,4,$nlo | ||
224 | |||
225 | ldwx $nlo($Hll),$Zll | ||
226 | ldwx $nlo($Hlh),$Zlh | ||
227 | ldwx $nlo($Hhl),$Zhl | ||
228 | ldwx $nlo($Hhh),$Zhh | ||
229 | zdep $Zll,28,4,$rem | ||
230 | ldb 14($Xi),$nlo | ||
231 | ldwx $rem($rem_4bit),$rem | ||
232 | shrpw $Zlh,$Zll,4,$Zll | ||
233 | ldwx $nhi($Hll),$Tll | ||
234 | shrpw $Zhl,$Zlh,4,$Zlh | ||
235 | ldwx $nhi($Hlh),$Tlh | ||
236 | shrpw $Zhh,$Zhl,4,$Zhl | ||
237 | ldwx $nhi($Hhl),$Thl | ||
238 | extru $Zhh,27,28,$Zhh | ||
239 | ldwx $nhi($Hhh),$Thh | ||
240 | xor $rem,$Zhh,$Zhh | ||
241 | and $mask0xf0,$nlo,$nhi | ||
242 | zdep $nlo,27,4,$nlo | ||
243 | |||
244 | xor $Tll,$Zll,$Zll | ||
245 | ldwx $nlo($Hll),$Tll | ||
246 | xor $Tlh,$Zlh,$Zlh | ||
247 | ldwx $nlo($Hlh),$Tlh | ||
248 | xor $Thl,$Zhl,$Zhl | ||
249 | b L\$oop_gmult_pa1 | ||
250 | ldi 13,$cnt | ||
251 | |||
252 | .ALIGN 8 | ||
253 | L\$oop_gmult_pa1 | ||
254 | zdep $Zll,28,4,$rem | ||
255 | ldwx $nlo($Hhl),$Thl | ||
256 | xor $Thh,$Zhh,$Zhh | ||
257 | ldwx $rem($rem_4bit),$rem | ||
258 | shrpw $Zlh,$Zll,4,$Zll | ||
259 | ldwx $nlo($Hhh),$Thh | ||
260 | shrpw $Zhl,$Zlh,4,$Zlh | ||
261 | ldbx $cnt($Xi),$nlo | ||
262 | xor $Tll,$Zll,$Zll | ||
263 | ldwx $nhi($Hll),$Tll | ||
264 | shrpw $Zhh,$Zhl,4,$Zhl | ||
265 | xor $Tlh,$Zlh,$Zlh | ||
266 | ldwx $nhi($Hlh),$Tlh | ||
267 | extru $Zhh,27,28,$Zhh | ||
268 | xor $Thl,$Zhl,$Zhl | ||
269 | ldwx $nhi($Hhl),$Thl | ||
270 | xor $rem,$Zhh,$Zhh | ||
271 | zdep $Zll,28,4,$rem | ||
272 | xor $Thh,$Zhh,$Zhh | ||
273 | ldwx $nhi($Hhh),$Thh | ||
274 | shrpw $Zlh,$Zll,4,$Zll | ||
275 | ldwx $rem($rem_4bit),$rem | ||
276 | shrpw $Zhl,$Zlh,4,$Zlh | ||
277 | shrpw $Zhh,$Zhl,4,$Zhl | ||
278 | and $mask0xf0,$nlo,$nhi | ||
279 | extru $Zhh,27,28,$Zhh | ||
280 | zdep $nlo,27,4,$nlo | ||
281 | xor $Tll,$Zll,$Zll | ||
282 | ldwx $nlo($Hll),$Tll | ||
283 | xor $Tlh,$Zlh,$Zlh | ||
284 | ldwx $nlo($Hlh),$Tlh | ||
285 | xor $rem,$Zhh,$Zhh | ||
286 | addib,uv -1,$cnt,L\$oop_gmult_pa1 | ||
287 | xor $Thl,$Zhl,$Zhl | ||
288 | |||
289 | zdep $Zll,28,4,$rem | ||
290 | ldwx $nlo($Hhl),$Thl | ||
291 | xor $Thh,$Zhh,$Zhh | ||
292 | ldwx $rem($rem_4bit),$rem | ||
293 | shrpw $Zlh,$Zll,4,$Zll | ||
294 | ldwx $nlo($Hhh),$Thh | ||
295 | shrpw $Zhl,$Zlh,4,$Zlh | ||
296 | xor $Tll,$Zll,$Zll | ||
297 | ldwx $nhi($Hll),$Tll | ||
298 | shrpw $Zhh,$Zhl,4,$Zhl | ||
299 | xor $Tlh,$Zlh,$Zlh | ||
300 | ldwx $nhi($Hlh),$Tlh | ||
301 | extru $Zhh,27,28,$Zhh | ||
302 | xor $rem,$Zhh,$Zhh | ||
303 | xor $Thl,$Zhl,$Zhl | ||
304 | ldwx $nhi($Hhl),$Thl | ||
305 | xor $Thh,$Zhh,$Zhh | ||
306 | ldwx $nhi($Hhh),$Thh | ||
307 | zdep $Zll,28,4,$rem | ||
308 | ldwx $rem($rem_4bit),$rem | ||
309 | shrpw $Zlh,$Zll,4,$Zll | ||
310 | shrpw $Zhl,$Zlh,4,$Zlh | ||
311 | shrpw $Zhh,$Zhl,4,$Zhl | ||
312 | extru $Zhh,27,28,$Zhh | ||
313 | xor $Tll,$Zll,$Zll | ||
314 | xor $Tlh,$Zlh,$Zlh | ||
315 | xor $rem,$Zhh,$Zhh | ||
316 | stw $Zll,12($Xi) | ||
317 | xor $Thl,$Zhl,$Zhl | ||
318 | stw $Zlh,8($Xi) | ||
319 | xor $Thh,$Zhh,$Zhh | ||
320 | stw $Zhl,4($Xi) | ||
321 | stw $Zhh,0($Xi) | ||
322 | ___ | ||
323 | $code.=<<___; | ||
324 | L\$done_gmult | ||
325 | $POP `-$FRAME-$SAVED_RP`(%sp),%r2 ; standard epilogue | ||
326 | $POP `-$FRAME+1*$SIZE_T`(%sp),%r4 | ||
327 | $POP `-$FRAME+2*$SIZE_T`(%sp),%r5 | ||
328 | $POP `-$FRAME+3*$SIZE_T`(%sp),%r6 | ||
329 | ___ | ||
330 | $code.=<<___ if ($SIZE_T==4); | ||
331 | $POP `-$FRAME+4*$SIZE_T`(%sp),%r7 | ||
332 | $POP `-$FRAME+5*$SIZE_T`(%sp),%r8 | ||
333 | $POP `-$FRAME+6*$SIZE_T`(%sp),%r9 | ||
334 | $POP `-$FRAME+7*$SIZE_T`(%sp),%r10 | ||
335 | $POP `-$FRAME+8*$SIZE_T`(%sp),%r11 | ||
336 | ___ | ||
337 | $code.=<<___; | ||
338 | bv (%r2) | ||
339 | .EXIT | ||
340 | $POPMB -$FRAME(%sp),%r3 | ||
341 | .PROCEND | ||
342 | |||
343 | .EXPORT gcm_ghash_4bit,ENTRY,ARGW0=GR,ARGW1=GR,ARGW2=GR,ARGW3=GR | ||
344 | .ALIGN 64 | ||
345 | gcm_ghash_4bit | ||
346 | .PROC | ||
347 | .CALLINFO FRAME=`$FRAME-10*$SIZE_T`,NO_CALLS,SAVE_RP,ENTRY_GR=11 | ||
348 | .ENTRY | ||
349 | $PUSH %r2,-$SAVED_RP(%sp) ; standard prologue | ||
350 | $PUSHMA %r3,$FRAME(%sp) | ||
351 | $PUSH %r4,`-$FRAME+1*$SIZE_T`(%sp) | ||
352 | $PUSH %r5,`-$FRAME+2*$SIZE_T`(%sp) | ||
353 | $PUSH %r6,`-$FRAME+3*$SIZE_T`(%sp) | ||
354 | ___ | ||
355 | $code.=<<___ if ($SIZE_T==4); | ||
356 | $PUSH %r7,`-$FRAME+4*$SIZE_T`(%sp) | ||
357 | $PUSH %r8,`-$FRAME+5*$SIZE_T`(%sp) | ||
358 | $PUSH %r9,`-$FRAME+6*$SIZE_T`(%sp) | ||
359 | $PUSH %r10,`-$FRAME+7*$SIZE_T`(%sp) | ||
360 | $PUSH %r11,`-$FRAME+8*$SIZE_T`(%sp) | ||
361 | ___ | ||
362 | $code.=<<___; | ||
363 | addl $inp,$len,$len | ||
364 | #ifdef __PIC__ | ||
365 | addil LT'L\$rem_4bit, %r19 | ||
366 | ldw RT'L\$rem_4bit(%r1), $rem_4bit | ||
367 | #else | ||
368 | ldil L'L\$rem_4bit, %t1 | ||
369 | ldo R'L\$rem_4bit(%t1), $rem_4bit | ||
370 | #endif | ||
371 | ldi 0xf0,$mask0xf0 | ||
372 | ___ | ||
373 | $code.=<<___ if ($SIZE_T==4); | ||
374 | #ifndef __OpenBSD__ | ||
375 | ldi 31,$rem | ||
376 | mtctl $rem,%cr11 | ||
377 | extrd,u,*= $rem,%sar,1,$rem ; executes on PA-RISC 1.0 | ||
378 | b L\$parisc1_ghash | ||
379 | nop | ||
380 | ___ | ||
381 | |||
382 | $code.=<<___; | ||
383 | ldb 15($Xi),$nlo | ||
384 | ldo 8($Htbl),$Hll | ||
385 | |||
386 | L\$outer_ghash_pa2 | ||
387 | ldb 15($inp),$nhi | ||
388 | xor $nhi,$nlo,$nlo | ||
389 | and $mask0xf0,$nlo,$nhi | ||
390 | depd,z $nlo,59,4,$nlo | ||
391 | |||
392 | ldd $nlo($Hll),$Zll | ||
393 | ldd $nlo($Hhh),$Zhh | ||
394 | |||
395 | depd,z $Zll,60,4,$rem | ||
396 | shrpd $Zhh,$Zll,4,$Zll | ||
397 | extrd,u $Zhh,59,60,$Zhh | ||
398 | ldb 14($Xi),$nlo | ||
399 | ldb 14($inp),$byte | ||
400 | |||
401 | ldd $nhi($Hll),$Tll | ||
402 | ldd $nhi($Hhh),$Thh | ||
403 | xor $byte,$nlo,$nlo | ||
404 | and $mask0xf0,$nlo,$nhi | ||
405 | depd,z $nlo,59,4,$nlo | ||
406 | |||
407 | xor $Tll,$Zll,$Zll | ||
408 | xor $Thh,$Zhh,$Zhh | ||
409 | ldd $rem($rem_4bit),$rem | ||
410 | b L\$oop_ghash_pa2 | ||
411 | ldi 13,$cnt | ||
412 | |||
413 | .ALIGN 8 | ||
414 | L\$oop_ghash_pa2 | ||
415 | xor $rem,$Zhh,$Zhh ; moved here to work around gas bug | ||
416 | depd,z $Zll,60,4,$rem2 | ||
417 | |||
418 | shrpd $Zhh,$Zll,4,$Zll | ||
419 | extrd,u $Zhh,59,60,$Zhh | ||
420 | ldd $nlo($Hll),$Tll | ||
421 | ldd $nlo($Hhh),$Thh | ||
422 | |||
423 | xor $Tll,$Zll,$Zll | ||
424 | xor $Thh,$Zhh,$Zhh | ||
425 | ldbx $cnt($Xi),$nlo | ||
426 | ldbx $cnt($inp),$byte | ||
427 | |||
428 | depd,z $Zll,60,4,$rem | ||
429 | shrpd $Zhh,$Zll,4,$Zll | ||
430 | ldd $rem2($rem_4bit),$rem2 | ||
431 | |||
432 | xor $rem2,$Zhh,$Zhh | ||
433 | xor $byte,$nlo,$nlo | ||
434 | ldd $nhi($Hll),$Tll | ||
435 | ldd $nhi($Hhh),$Thh | ||
436 | |||
437 | and $mask0xf0,$nlo,$nhi | ||
438 | depd,z $nlo,59,4,$nlo | ||
439 | |||
440 | extrd,u $Zhh,59,60,$Zhh | ||
441 | xor $Tll,$Zll,$Zll | ||
442 | |||
443 | ldd $rem($rem_4bit),$rem | ||
444 | addib,uv -1,$cnt,L\$oop_ghash_pa2 | ||
445 | xor $Thh,$Zhh,$Zhh | ||
446 | |||
447 | xor $rem,$Zhh,$Zhh | ||
448 | depd,z $Zll,60,4,$rem2 | ||
449 | |||
450 | shrpd $Zhh,$Zll,4,$Zll | ||
451 | extrd,u $Zhh,59,60,$Zhh | ||
452 | ldd $nlo($Hll),$Tll | ||
453 | ldd $nlo($Hhh),$Thh | ||
454 | |||
455 | xor $Tll,$Zll,$Zll | ||
456 | xor $Thh,$Zhh,$Zhh | ||
457 | |||
458 | depd,z $Zll,60,4,$rem | ||
459 | shrpd $Zhh,$Zll,4,$Zll | ||
460 | ldd $rem2($rem_4bit),$rem2 | ||
461 | |||
462 | xor $rem2,$Zhh,$Zhh | ||
463 | ldd $nhi($Hll),$Tll | ||
464 | ldd $nhi($Hhh),$Thh | ||
465 | |||
466 | extrd,u $Zhh,59,60,$Zhh | ||
467 | xor $Tll,$Zll,$Zll | ||
468 | xor $Thh,$Zhh,$Zhh | ||
469 | ldd $rem($rem_4bit),$rem | ||
470 | |||
471 | xor $rem,$Zhh,$Zhh | ||
472 | std $Zll,8($Xi) | ||
473 | ldo 16($inp),$inp | ||
474 | std $Zhh,0($Xi) | ||
475 | cmpb,*<> $inp,$len,L\$outer_ghash_pa2 | ||
476 | copy $Zll,$nlo | ||
477 | ___ | ||
478 | |||
479 | $code.=<<___ if ($SIZE_T==4); | ||
480 | b L\$done_ghash | ||
481 | nop | ||
482 | |||
483 | L\$parisc1_ghash | ||
484 | #endif | ||
485 | ldb 15($Xi),$nlo | ||
486 | ldo 12($Htbl),$Hll | ||
487 | ldo 8($Htbl),$Hlh | ||
488 | ldo 4($Htbl),$Hhl | ||
489 | |||
490 | L\$outer_ghash_pa1 | ||
491 | ldb 15($inp),$byte | ||
492 | xor $byte,$nlo,$nlo | ||
493 | and $mask0xf0,$nlo,$nhi | ||
494 | zdep $nlo,27,4,$nlo | ||
495 | |||
496 | ldwx $nlo($Hll),$Zll | ||
497 | ldwx $nlo($Hlh),$Zlh | ||
498 | ldwx $nlo($Hhl),$Zhl | ||
499 | ldwx $nlo($Hhh),$Zhh | ||
500 | zdep $Zll,28,4,$rem | ||
501 | ldb 14($Xi),$nlo | ||
502 | ldb 14($inp),$byte | ||
503 | ldwx $rem($rem_4bit),$rem | ||
504 | shrpw $Zlh,$Zll,4,$Zll | ||
505 | ldwx $nhi($Hll),$Tll | ||
506 | shrpw $Zhl,$Zlh,4,$Zlh | ||
507 | ldwx $nhi($Hlh),$Tlh | ||
508 | shrpw $Zhh,$Zhl,4,$Zhl | ||
509 | ldwx $nhi($Hhl),$Thl | ||
510 | extru $Zhh,27,28,$Zhh | ||
511 | ldwx $nhi($Hhh),$Thh | ||
512 | xor $byte,$nlo,$nlo | ||
513 | xor $rem,$Zhh,$Zhh | ||
514 | and $mask0xf0,$nlo,$nhi | ||
515 | zdep $nlo,27,4,$nlo | ||
516 | |||
517 | xor $Tll,$Zll,$Zll | ||
518 | ldwx $nlo($Hll),$Tll | ||
519 | xor $Tlh,$Zlh,$Zlh | ||
520 | ldwx $nlo($Hlh),$Tlh | ||
521 | xor $Thl,$Zhl,$Zhl | ||
522 | b L\$oop_ghash_pa1 | ||
523 | ldi 13,$cnt | ||
524 | |||
525 | .ALIGN 8 | ||
526 | L\$oop_ghash_pa1 | ||
527 | zdep $Zll,28,4,$rem | ||
528 | ldwx $nlo($Hhl),$Thl | ||
529 | xor $Thh,$Zhh,$Zhh | ||
530 | ldwx $rem($rem_4bit),$rem | ||
531 | shrpw $Zlh,$Zll,4,$Zll | ||
532 | ldwx $nlo($Hhh),$Thh | ||
533 | shrpw $Zhl,$Zlh,4,$Zlh | ||
534 | ldbx $cnt($Xi),$nlo | ||
535 | xor $Tll,$Zll,$Zll | ||
536 | ldwx $nhi($Hll),$Tll | ||
537 | shrpw $Zhh,$Zhl,4,$Zhl | ||
538 | ldbx $cnt($inp),$byte | ||
539 | xor $Tlh,$Zlh,$Zlh | ||
540 | ldwx $nhi($Hlh),$Tlh | ||
541 | extru $Zhh,27,28,$Zhh | ||
542 | xor $Thl,$Zhl,$Zhl | ||
543 | ldwx $nhi($Hhl),$Thl | ||
544 | xor $rem,$Zhh,$Zhh | ||
545 | zdep $Zll,28,4,$rem | ||
546 | xor $Thh,$Zhh,$Zhh | ||
547 | ldwx $nhi($Hhh),$Thh | ||
548 | shrpw $Zlh,$Zll,4,$Zll | ||
549 | ldwx $rem($rem_4bit),$rem | ||
550 | shrpw $Zhl,$Zlh,4,$Zlh | ||
551 | xor $byte,$nlo,$nlo | ||
552 | shrpw $Zhh,$Zhl,4,$Zhl | ||
553 | and $mask0xf0,$nlo,$nhi | ||
554 | extru $Zhh,27,28,$Zhh | ||
555 | zdep $nlo,27,4,$nlo | ||
556 | xor $Tll,$Zll,$Zll | ||
557 | ldwx $nlo($Hll),$Tll | ||
558 | xor $Tlh,$Zlh,$Zlh | ||
559 | ldwx $nlo($Hlh),$Tlh | ||
560 | xor $rem,$Zhh,$Zhh | ||
561 | addib,uv -1,$cnt,L\$oop_ghash_pa1 | ||
562 | xor $Thl,$Zhl,$Zhl | ||
563 | |||
564 | zdep $Zll,28,4,$rem | ||
565 | ldwx $nlo($Hhl),$Thl | ||
566 | xor $Thh,$Zhh,$Zhh | ||
567 | ldwx $rem($rem_4bit),$rem | ||
568 | shrpw $Zlh,$Zll,4,$Zll | ||
569 | ldwx $nlo($Hhh),$Thh | ||
570 | shrpw $Zhl,$Zlh,4,$Zlh | ||
571 | xor $Tll,$Zll,$Zll | ||
572 | ldwx $nhi($Hll),$Tll | ||
573 | shrpw $Zhh,$Zhl,4,$Zhl | ||
574 | xor $Tlh,$Zlh,$Zlh | ||
575 | ldwx $nhi($Hlh),$Tlh | ||
576 | extru $Zhh,27,28,$Zhh | ||
577 | xor $rem,$Zhh,$Zhh | ||
578 | xor $Thl,$Zhl,$Zhl | ||
579 | ldwx $nhi($Hhl),$Thl | ||
580 | xor $Thh,$Zhh,$Zhh | ||
581 | ldwx $nhi($Hhh),$Thh | ||
582 | zdep $Zll,28,4,$rem | ||
583 | ldwx $rem($rem_4bit),$rem | ||
584 | shrpw $Zlh,$Zll,4,$Zll | ||
585 | shrpw $Zhl,$Zlh,4,$Zlh | ||
586 | shrpw $Zhh,$Zhl,4,$Zhl | ||
587 | extru $Zhh,27,28,$Zhh | ||
588 | xor $Tll,$Zll,$Zll | ||
589 | xor $Tlh,$Zlh,$Zlh | ||
590 | xor $rem,$Zhh,$Zhh | ||
591 | stw $Zll,12($Xi) | ||
592 | xor $Thl,$Zhl,$Zhl | ||
593 | stw $Zlh,8($Xi) | ||
594 | xor $Thh,$Zhh,$Zhh | ||
595 | stw $Zhl,4($Xi) | ||
596 | ldo 16($inp),$inp | ||
597 | stw $Zhh,0($Xi) | ||
598 | comb,<> $inp,$len,L\$outer_ghash_pa1 | ||
599 | copy $Zll,$nlo | ||
600 | ___ | ||
601 | $code.=<<___; | ||
602 | L\$done_ghash | ||
603 | $POP `-$FRAME-$SAVED_RP`(%sp),%r2 ; standard epilogue | ||
604 | $POP `-$FRAME+1*$SIZE_T`(%sp),%r4 | ||
605 | $POP `-$FRAME+2*$SIZE_T`(%sp),%r5 | ||
606 | $POP `-$FRAME+3*$SIZE_T`(%sp),%r6 | ||
607 | ___ | ||
608 | $code.=<<___ if ($SIZE_T==4); | ||
609 | $POP `-$FRAME+4*$SIZE_T`(%sp),%r7 | ||
610 | $POP `-$FRAME+5*$SIZE_T`(%sp),%r8 | ||
611 | $POP `-$FRAME+6*$SIZE_T`(%sp),%r9 | ||
612 | $POP `-$FRAME+7*$SIZE_T`(%sp),%r10 | ||
613 | $POP `-$FRAME+8*$SIZE_T`(%sp),%r11 | ||
614 | ___ | ||
615 | $code.=<<___; | ||
616 | bv (%r2) | ||
617 | .EXIT | ||
618 | $POPMB -$FRAME(%sp),%r3 | ||
619 | .PROCEND | ||
620 | |||
621 | .section .rodata | ||
622 | .ALIGN 64 | ||
623 | L\$rem_4bit | ||
624 | .WORD `0x0000<<16`,0,`0x1C20<<16`,0,`0x3840<<16`,0,`0x2460<<16`,0 | ||
625 | .WORD `0x7080<<16`,0,`0x6CA0<<16`,0,`0x48C0<<16`,0,`0x54E0<<16`,0 | ||
626 | .WORD `0xE100<<16`,0,`0xFD20<<16`,0,`0xD940<<16`,0,`0xC560<<16`,0 | ||
627 | .WORD `0x9180<<16`,0,`0x8DA0<<16`,0,`0xA9C0<<16`,0,`0xB5E0<<16`,0 | ||
628 | .previous | ||
629 | |||
630 | .ALIGN 64 | ||
631 | ___ | ||
632 | |||
633 | # Explicitly encode PA-RISC 2.0 instructions used in this module, so | ||
634 | # that it can be compiled with .LEVEL 1.0. It should be noted that I | ||
635 | # wouldn't have to do this, if GNU assembler understood .ALLOW 2.0 | ||
636 | # directive... | ||
637 | |||
638 | my $ldd = sub { | ||
639 | my ($mod,$args) = @_; | ||
640 | my $orig = "ldd$mod\t$args"; | ||
641 | |||
642 | if ($args =~ /%r([0-9]+)\(%r([0-9]+)\),%r([0-9]+)/) # format 4 | ||
643 | { my $opcode=(0x03<<26)|($2<<21)|($1<<16)|(3<<6)|$3; | ||
644 | sprintf "\t.WORD\t0x%08x\t; %s",$opcode,$orig; | ||
645 | } | ||
646 | elsif ($args =~ /(\-?[0-9]+)\(%r([0-9]+)\),%r([0-9]+)/) # format 5 | ||
647 | { my $opcode=(0x03<<26)|($2<<21)|(1<<12)|(3<<6)|$3; | ||
648 | $opcode|=(($1&0xF)<<17)|(($1&0x10)<<12); # encode offset | ||
649 | $opcode|=(1<<5) if ($mod =~ /^,m/); | ||
650 | $opcode|=(1<<13) if ($mod =~ /^,mb/); | ||
651 | sprintf "\t.WORD\t0x%08x\t; %s",$opcode,$orig; | ||
652 | } | ||
653 | else { "\t".$orig; } | ||
654 | }; | ||
655 | |||
656 | my $std = sub { | ||
657 | my ($mod,$args) = @_; | ||
658 | my $orig = "std$mod\t$args"; | ||
659 | |||
660 | if ($args =~ /%r([0-9]+),(\-?[0-9]+)\(%r([0-9]+)\)/) # format 3 suffices | ||
661 | { my $opcode=(0x1c<<26)|($3<<21)|($1<<16)|(($2&0x1FF8)<<1)|(($2>>13)&1); | ||
662 | sprintf "\t.WORD\t0x%08x\t; %s",$opcode,$orig; | ||
663 | } | ||
664 | else { "\t".$orig; } | ||
665 | }; | ||
666 | |||
667 | my $extrd = sub { | ||
668 | my ($mod,$args) = @_; | ||
669 | my $orig = "extrd$mod\t$args"; | ||
670 | |||
671 | # I only have ",u" completer, it's implicitly encoded... | ||
672 | if ($args =~ /%r([0-9]+),([0-9]+),([0-9]+),%r([0-9]+)/) # format 15 | ||
673 | { my $opcode=(0x36<<26)|($1<<21)|($4<<16); | ||
674 | my $len=32-$3; | ||
675 | $opcode |= (($2&0x20)<<6)|(($2&0x1f)<<5); # encode pos | ||
676 | $opcode |= (($len&0x20)<<7)|($len&0x1f); # encode len | ||
677 | sprintf "\t.WORD\t0x%08x\t; %s",$opcode,$orig; | ||
678 | } | ||
679 | elsif ($args =~ /%r([0-9]+),%sar,([0-9]+),%r([0-9]+)/) # format 12 | ||
680 | { my $opcode=(0x34<<26)|($1<<21)|($3<<16)|(2<<11)|(1<<9); | ||
681 | my $len=32-$2; | ||
682 | $opcode |= (($len&0x20)<<3)|($len&0x1f); # encode len | ||
683 | $opcode |= (1<<13) if ($mod =~ /,\**=/); | ||
684 | sprintf "\t.WORD\t0x%08x\t; %s",$opcode,$orig; | ||
685 | } | ||
686 | else { "\t".$orig; } | ||
687 | }; | ||
688 | |||
689 | my $shrpd = sub { | ||
690 | my ($mod,$args) = @_; | ||
691 | my $orig = "shrpd$mod\t$args"; | ||
692 | |||
693 | if ($args =~ /%r([0-9]+),%r([0-9]+),([0-9]+),%r([0-9]+)/) # format 14 | ||
694 | { my $opcode=(0x34<<26)|($2<<21)|($1<<16)|(1<<10)|$4; | ||
695 | my $cpos=63-$3; | ||
696 | $opcode |= (($cpos&0x20)<<6)|(($cpos&0x1f)<<5); # encode sa | ||
697 | sprintf "\t.WORD\t0x%08x\t; %s",$opcode,$orig; | ||
698 | } | ||
699 | elsif ($args =~ /%r([0-9]+),%r([0-9]+),%sar,%r([0-9]+)/) # format 11 | ||
700 | { sprintf "\t.WORD\t0x%08x\t; %s", | ||
701 | (0x34<<26)|($2<<21)|($1<<16)|(1<<9)|$3,$orig; | ||
702 | } | ||
703 | else { "\t".$orig; } | ||
704 | }; | ||
705 | |||
706 | my $depd = sub { | ||
707 | my ($mod,$args) = @_; | ||
708 | my $orig = "depd$mod\t$args"; | ||
709 | |||
710 | # I only have ",z" completer, it's implicitly encoded... | ||
711 | if ($args =~ /%r([0-9]+),([0-9]+),([0-9]+),%r([0-9]+)/) # format 16 | ||
712 | { my $opcode=(0x3c<<26)|($4<<21)|($1<<16); | ||
713 | my $cpos=63-$2; | ||
714 | my $len=32-$3; | ||
715 | $opcode |= (($cpos&0x20)<<6)|(($cpos&0x1f)<<5); # encode pos | ||
716 | $opcode |= (($len&0x20)<<7)|($len&0x1f); # encode len | ||
717 | sprintf "\t.WORD\t0x%08x\t; %s",$opcode,$orig; | ||
718 | } | ||
719 | else { "\t".$orig; } | ||
720 | }; | ||
721 | |||
722 | sub assemble { | ||
723 | my ($mnemonic,$mod,$args)=@_; | ||
724 | my $opcode = eval("\$$mnemonic"); | ||
725 | |||
726 | ref($opcode) eq 'CODE' ? &$opcode($mod,$args) : "\t$mnemonic$mod\t$args"; | ||
727 | } | ||
728 | |||
729 | foreach (split("\n",$code)) { | ||
730 | s/\`([^\`]*)\`/eval $1/ge; | ||
731 | if ($SIZE_T==4) { | ||
732 | s/^\s+([a-z]+)([\S]*)\s+([\S]*)/&assemble($1,$2,$3)/e; | ||
733 | s/cmpb,\*/comb,/; | ||
734 | s/,\*/,/; | ||
735 | } | ||
736 | s/\bbv\b/bve/ if ($SIZE_T==8); | ||
737 | print $_,"\n"; | ||
738 | } | ||
739 | |||
740 | close STDOUT; | ||
diff --git a/src/lib/libcrypto/modes/asm/ghash-sparcv9.pl b/src/lib/libcrypto/modes/asm/ghash-sparcv9.pl deleted file mode 100644 index ce75045f09..0000000000 --- a/src/lib/libcrypto/modes/asm/ghash-sparcv9.pl +++ /dev/null | |||
@@ -1,351 +0,0 @@ | |||
1 | #!/usr/bin/env perl | ||
2 | |||
3 | # ==================================================================== | ||
4 | # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL | ||
5 | # project. The module is, however, dual licensed under OpenSSL and | ||
6 | # CRYPTOGAMS licenses depending on where you obtain it. For further | ||
7 | # details see http://www.openssl.org/~appro/cryptogams/. | ||
8 | # ==================================================================== | ||
9 | |||
10 | # March 2010 | ||
11 | # | ||
12 | # The module implements "4-bit" GCM GHASH function and underlying | ||
13 | # single multiplication operation in GF(2^128). "4-bit" means that it | ||
14 | # uses 256 bytes per-key table [+128 bytes shared table]. Performance | ||
15 | # results are for streamed GHASH subroutine on UltraSPARC pre-Tx CPU | ||
16 | # and are expressed in cycles per processed byte, less is better: | ||
17 | # | ||
18 | # gcc 3.3.x cc 5.2 this assembler | ||
19 | # | ||
20 | # 32-bit build 81.4 43.3 12.6 (+546%/+244%) | ||
21 | # 64-bit build 20.2 21.2 12.6 (+60%/+68%) | ||
22 | # | ||
23 | # Here is data collected on UltraSPARC T1 system running Linux: | ||
24 | # | ||
25 | # gcc 4.4.1 this assembler | ||
26 | # | ||
27 | # 32-bit build 566 50 (+1000%) | ||
28 | # 64-bit build 56 50 (+12%) | ||
29 | # | ||
30 | # I don't quite understand why difference between 32-bit and 64-bit | ||
31 | # compiler-generated code is so big. Compilers *were* instructed to | ||
32 | # generate code for UltraSPARC and should have used 64-bit registers | ||
33 | # for Z vector (see C code) even in 32-bit build... Oh well, it only | ||
34 | # means more impressive improvement coefficients for this assembler | ||
35 | # module;-) Loops are aggressively modulo-scheduled in respect to | ||
36 | # references to input data and Z.hi updates to achieve 12 cycles | ||
37 | # timing. To anchor to something else, sha1-sparcv9.pl spends 11.6 | ||
38 | # cycles to process one byte on UltraSPARC pre-Tx CPU and ~24 on T1. | ||
39 | |||
40 | $bits=32; | ||
41 | for (@ARGV) { $bits=64 if (/\-m64/ || /\-xarch\=v9/); } | ||
42 | if ($bits==64) { $bias=2047; $frame=192; } | ||
43 | else { $bias=0; $frame=112; } | ||
44 | |||
45 | $output=shift; | ||
46 | open STDOUT,">$output"; | ||
47 | |||
48 | $Zhi="%o0"; # 64-bit values | ||
49 | $Zlo="%o1"; | ||
50 | $Thi="%o2"; | ||
51 | $Tlo="%o3"; | ||
52 | $rem="%o4"; | ||
53 | $tmp="%o5"; | ||
54 | |||
55 | $nhi="%l0"; # small values and pointers | ||
56 | $nlo="%l1"; | ||
57 | $xi0="%l2"; | ||
58 | $xi1="%l3"; | ||
59 | $rem_4bit="%l4"; | ||
60 | $remi="%l5"; | ||
61 | $Htblo="%l6"; | ||
62 | $cnt="%l7"; | ||
63 | |||
64 | $Xi="%i0"; # input argument block | ||
65 | $Htbl="%i1"; | ||
66 | $inp="%i2"; | ||
67 | $len="%i3"; | ||
68 | |||
69 | $code.=<<___; | ||
70 | .section ".rodata",#alloc | ||
71 | |||
72 | .align 64 | ||
73 | rem_4bit: | ||
74 | .long `0x0000<<16`,0,`0x1C20<<16`,0,`0x3840<<16`,0,`0x2460<<16`,0 | ||
75 | .long `0x7080<<16`,0,`0x6CA0<<16`,0,`0x48C0<<16`,0,`0x54E0<<16`,0 | ||
76 | .long `0xE100<<16`,0,`0xFD20<<16`,0,`0xD940<<16`,0,`0xC560<<16`,0 | ||
77 | .long `0x9180<<16`,0,`0x8DA0<<16`,0,`0xA9C0<<16`,0,`0xB5E0<<16`,0 | ||
78 | .type rem_4bit,#object | ||
79 | .size rem_4bit,(.-rem_4bit) | ||
80 | |||
81 | .section ".text",#alloc,#execinstr | ||
82 | .globl gcm_ghash_4bit | ||
83 | .align 32 | ||
84 | gcm_ghash_4bit: | ||
85 | save %sp,-$frame,%sp | ||
86 | #ifdef __PIC__ | ||
87 | sethi %hi(_GLOBAL_OFFSET_TABLE_-4), $tmp | ||
88 | rd %pc, $rem | ||
89 | or $tmp, %lo(_GLOBAL_OFFSET_TABLE_+4), $tmp | ||
90 | add $tmp, $rem, $tmp | ||
91 | #endif | ||
92 | |||
93 | ldub [$inp+15],$nlo | ||
94 | ldub [$Xi+15],$xi0 | ||
95 | ldub [$Xi+14],$xi1 | ||
96 | add $len,$inp,$len | ||
97 | add $Htbl,8,$Htblo | ||
98 | |||
99 | #ifdef __PIC__ | ||
100 | set rem_4bit, $rem_4bit | ||
101 | ldx [$rem_4bit+$tmp], $rem_4bit | ||
102 | #else | ||
103 | set rem_4bit, $rem_4bit | ||
104 | #endif | ||
105 | |||
106 | .Louter: | ||
107 | xor $xi0,$nlo,$nlo | ||
108 | and $nlo,0xf0,$nhi | ||
109 | and $nlo,0x0f,$nlo | ||
110 | sll $nlo,4,$nlo | ||
111 | ldx [$Htblo+$nlo],$Zlo | ||
112 | ldx [$Htbl+$nlo],$Zhi | ||
113 | |||
114 | ldub [$inp+14],$nlo | ||
115 | |||
116 | ldx [$Htblo+$nhi],$Tlo | ||
117 | and $Zlo,0xf,$remi | ||
118 | ldx [$Htbl+$nhi],$Thi | ||
119 | sll $remi,3,$remi | ||
120 | ldx [$rem_4bit+$remi],$rem | ||
121 | srlx $Zlo,4,$Zlo | ||
122 | mov 13,$cnt | ||
123 | sllx $Zhi,60,$tmp | ||
124 | xor $Tlo,$Zlo,$Zlo | ||
125 | srlx $Zhi,4,$Zhi | ||
126 | xor $Zlo,$tmp,$Zlo | ||
127 | |||
128 | xor $xi1,$nlo,$nlo | ||
129 | and $Zlo,0xf,$remi | ||
130 | and $nlo,0xf0,$nhi | ||
131 | and $nlo,0x0f,$nlo | ||
132 | ba .Lghash_inner | ||
133 | sll $nlo,4,$nlo | ||
134 | .align 32 | ||
135 | .Lghash_inner: | ||
136 | ldx [$Htblo+$nlo],$Tlo | ||
137 | sll $remi,3,$remi | ||
138 | xor $Thi,$Zhi,$Zhi | ||
139 | ldx [$Htbl+$nlo],$Thi | ||
140 | srlx $Zlo,4,$Zlo | ||
141 | xor $rem,$Zhi,$Zhi | ||
142 | ldx [$rem_4bit+$remi],$rem | ||
143 | sllx $Zhi,60,$tmp | ||
144 | xor $Tlo,$Zlo,$Zlo | ||
145 | ldub [$inp+$cnt],$nlo | ||
146 | srlx $Zhi,4,$Zhi | ||
147 | xor $Zlo,$tmp,$Zlo | ||
148 | ldub [$Xi+$cnt],$xi1 | ||
149 | xor $Thi,$Zhi,$Zhi | ||
150 | and $Zlo,0xf,$remi | ||
151 | |||
152 | ldx [$Htblo+$nhi],$Tlo | ||
153 | sll $remi,3,$remi | ||
154 | xor $rem,$Zhi,$Zhi | ||
155 | ldx [$Htbl+$nhi],$Thi | ||
156 | srlx $Zlo,4,$Zlo | ||
157 | ldx [$rem_4bit+$remi],$rem | ||
158 | sllx $Zhi,60,$tmp | ||
159 | xor $xi1,$nlo,$nlo | ||
160 | srlx $Zhi,4,$Zhi | ||
161 | and $nlo,0xf0,$nhi | ||
162 | addcc $cnt,-1,$cnt | ||
163 | xor $Zlo,$tmp,$Zlo | ||
164 | and $nlo,0x0f,$nlo | ||
165 | xor $Tlo,$Zlo,$Zlo | ||
166 | sll $nlo,4,$nlo | ||
167 | blu .Lghash_inner | ||
168 | and $Zlo,0xf,$remi | ||
169 | |||
170 | ldx [$Htblo+$nlo],$Tlo | ||
171 | sll $remi,3,$remi | ||
172 | xor $Thi,$Zhi,$Zhi | ||
173 | ldx [$Htbl+$nlo],$Thi | ||
174 | srlx $Zlo,4,$Zlo | ||
175 | xor $rem,$Zhi,$Zhi | ||
176 | ldx [$rem_4bit+$remi],$rem | ||
177 | sllx $Zhi,60,$tmp | ||
178 | xor $Tlo,$Zlo,$Zlo | ||
179 | srlx $Zhi,4,$Zhi | ||
180 | xor $Zlo,$tmp,$Zlo | ||
181 | xor $Thi,$Zhi,$Zhi | ||
182 | |||
183 | add $inp,16,$inp | ||
184 | cmp $inp,$len | ||
185 | be,pn `$bits==64?"%xcc":"%icc"`,.Ldone | ||
186 | and $Zlo,0xf,$remi | ||
187 | |||
188 | ldx [$Htblo+$nhi],$Tlo | ||
189 | sll $remi,3,$remi | ||
190 | xor $rem,$Zhi,$Zhi | ||
191 | ldx [$Htbl+$nhi],$Thi | ||
192 | srlx $Zlo,4,$Zlo | ||
193 | ldx [$rem_4bit+$remi],$rem | ||
194 | sllx $Zhi,60,$tmp | ||
195 | xor $Tlo,$Zlo,$Zlo | ||
196 | ldub [$inp+15],$nlo | ||
197 | srlx $Zhi,4,$Zhi | ||
198 | xor $Zlo,$tmp,$Zlo | ||
199 | xor $Thi,$Zhi,$Zhi | ||
200 | stx $Zlo,[$Xi+8] | ||
201 | xor $rem,$Zhi,$Zhi | ||
202 | stx $Zhi,[$Xi] | ||
203 | srl $Zlo,8,$xi1 | ||
204 | and $Zlo,0xff,$xi0 | ||
205 | ba .Louter | ||
206 | and $xi1,0xff,$xi1 | ||
207 | .align 32 | ||
208 | .Ldone: | ||
209 | ldx [$Htblo+$nhi],$Tlo | ||
210 | sll $remi,3,$remi | ||
211 | xor $rem,$Zhi,$Zhi | ||
212 | ldx [$Htbl+$nhi],$Thi | ||
213 | srlx $Zlo,4,$Zlo | ||
214 | ldx [$rem_4bit+$remi],$rem | ||
215 | sllx $Zhi,60,$tmp | ||
216 | xor $Tlo,$Zlo,$Zlo | ||
217 | srlx $Zhi,4,$Zhi | ||
218 | xor $Zlo,$tmp,$Zlo | ||
219 | xor $Thi,$Zhi,$Zhi | ||
220 | stx $Zlo,[$Xi+8] | ||
221 | xor $rem,$Zhi,$Zhi | ||
222 | stx $Zhi,[$Xi] | ||
223 | |||
224 | ret | ||
225 | restore | ||
226 | .type gcm_ghash_4bit,#function | ||
227 | .size gcm_ghash_4bit,(.-gcm_ghash_4bit) | ||
228 | ___ | ||
229 | |||
230 | undef $inp; | ||
231 | undef $len; | ||
232 | |||
233 | $code.=<<___; | ||
234 | .globl gcm_gmult_4bit | ||
235 | .align 32 | ||
236 | gcm_gmult_4bit: | ||
237 | save %sp,-$frame,%sp | ||
238 | #ifdef __PIC__ | ||
239 | sethi %hi(_GLOBAL_OFFSET_TABLE_-4), $tmp | ||
240 | rd %pc, $rem | ||
241 | or $tmp, %lo(_GLOBAL_OFFSET_TABLE_+4), $tmp | ||
242 | add $tmp, $rem, $tmp | ||
243 | #endif | ||
244 | |||
245 | ldub [$Xi+15],$nlo | ||
246 | add $Htbl,8,$Htblo | ||
247 | |||
248 | #ifdef __PIC__ | ||
249 | set rem_4bit, $rem_4bit | ||
250 | ldx [$rem_4bit+$tmp], $rem_4bit | ||
251 | #else | ||
252 | set rem_4bit, $rem_4bit | ||
253 | #endif | ||
254 | |||
255 | and $nlo,0xf0,$nhi | ||
256 | and $nlo,0x0f,$nlo | ||
257 | sll $nlo,4,$nlo | ||
258 | ldx [$Htblo+$nlo],$Zlo | ||
259 | ldx [$Htbl+$nlo],$Zhi | ||
260 | |||
261 | ldub [$Xi+14],$nlo | ||
262 | |||
263 | ldx [$Htblo+$nhi],$Tlo | ||
264 | and $Zlo,0xf,$remi | ||
265 | ldx [$Htbl+$nhi],$Thi | ||
266 | sll $remi,3,$remi | ||
267 | ldx [$rem_4bit+$remi],$rem | ||
268 | srlx $Zlo,4,$Zlo | ||
269 | mov 13,$cnt | ||
270 | sllx $Zhi,60,$tmp | ||
271 | xor $Tlo,$Zlo,$Zlo | ||
272 | srlx $Zhi,4,$Zhi | ||
273 | xor $Zlo,$tmp,$Zlo | ||
274 | |||
275 | and $Zlo,0xf,$remi | ||
276 | and $nlo,0xf0,$nhi | ||
277 | and $nlo,0x0f,$nlo | ||
278 | ba .Lgmult_inner | ||
279 | sll $nlo,4,$nlo | ||
280 | .align 32 | ||
281 | .Lgmult_inner: | ||
282 | ldx [$Htblo+$nlo],$Tlo | ||
283 | sll $remi,3,$remi | ||
284 | xor $Thi,$Zhi,$Zhi | ||
285 | ldx [$Htbl+$nlo],$Thi | ||
286 | srlx $Zlo,4,$Zlo | ||
287 | xor $rem,$Zhi,$Zhi | ||
288 | ldx [$rem_4bit+$remi],$rem | ||
289 | sllx $Zhi,60,$tmp | ||
290 | xor $Tlo,$Zlo,$Zlo | ||
291 | ldub [$Xi+$cnt],$nlo | ||
292 | srlx $Zhi,4,$Zhi | ||
293 | xor $Zlo,$tmp,$Zlo | ||
294 | xor $Thi,$Zhi,$Zhi | ||
295 | and $Zlo,0xf,$remi | ||
296 | |||
297 | ldx [$Htblo+$nhi],$Tlo | ||
298 | sll $remi,3,$remi | ||
299 | xor $rem,$Zhi,$Zhi | ||
300 | ldx [$Htbl+$nhi],$Thi | ||
301 | srlx $Zlo,4,$Zlo | ||
302 | ldx [$rem_4bit+$remi],$rem | ||
303 | sllx $Zhi,60,$tmp | ||
304 | srlx $Zhi,4,$Zhi | ||
305 | and $nlo,0xf0,$nhi | ||
306 | addcc $cnt,-1,$cnt | ||
307 | xor $Zlo,$tmp,$Zlo | ||
308 | and $nlo,0x0f,$nlo | ||
309 | xor $Tlo,$Zlo,$Zlo | ||
310 | sll $nlo,4,$nlo | ||
311 | blu .Lgmult_inner | ||
312 | and $Zlo,0xf,$remi | ||
313 | |||
314 | ldx [$Htblo+$nlo],$Tlo | ||
315 | sll $remi,3,$remi | ||
316 | xor $Thi,$Zhi,$Zhi | ||
317 | ldx [$Htbl+$nlo],$Thi | ||
318 | srlx $Zlo,4,$Zlo | ||
319 | xor $rem,$Zhi,$Zhi | ||
320 | ldx [$rem_4bit+$remi],$rem | ||
321 | sllx $Zhi,60,$tmp | ||
322 | xor $Tlo,$Zlo,$Zlo | ||
323 | srlx $Zhi,4,$Zhi | ||
324 | xor $Zlo,$tmp,$Zlo | ||
325 | xor $Thi,$Zhi,$Zhi | ||
326 | and $Zlo,0xf,$remi | ||
327 | |||
328 | ldx [$Htblo+$nhi],$Tlo | ||
329 | sll $remi,3,$remi | ||
330 | xor $rem,$Zhi,$Zhi | ||
331 | ldx [$Htbl+$nhi],$Thi | ||
332 | srlx $Zlo,4,$Zlo | ||
333 | ldx [$rem_4bit+$remi],$rem | ||
334 | sllx $Zhi,60,$tmp | ||
335 | xor $Tlo,$Zlo,$Zlo | ||
336 | srlx $Zhi,4,$Zhi | ||
337 | xor $Zlo,$tmp,$Zlo | ||
338 | xor $Thi,$Zhi,$Zhi | ||
339 | stx $Zlo,[$Xi+8] | ||
340 | xor $rem,$Zhi,$Zhi | ||
341 | stx $Zhi,[$Xi] | ||
342 | |||
343 | ret | ||
344 | restore | ||
345 | .type gcm_gmult_4bit,#function | ||
346 | .size gcm_gmult_4bit,(.-gcm_gmult_4bit) | ||
347 | ___ | ||
348 | |||
349 | $code =~ s/\`([^\`]*)\`/eval $1/gem; | ||
350 | print $code; | ||
351 | close STDOUT; | ||
diff --git a/src/lib/libcrypto/modes/asm/ghash-x86.pl b/src/lib/libcrypto/modes/asm/ghash-x86.pl deleted file mode 100644 index 47833582b6..0000000000 --- a/src/lib/libcrypto/modes/asm/ghash-x86.pl +++ /dev/null | |||
@@ -1,1326 +0,0 @@ | |||
1 | #!/usr/bin/env perl | ||
2 | # | ||
3 | # ==================================================================== | ||
4 | # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL | ||
5 | # project. The module is, however, dual licensed under OpenSSL and | ||
6 | # CRYPTOGAMS licenses depending on where you obtain it. For further | ||
7 | # details see http://www.openssl.org/~appro/cryptogams/. | ||
8 | # ==================================================================== | ||
9 | # | ||
10 | # March, May, June 2010 | ||
11 | # | ||
12 | # The module implements "4-bit" GCM GHASH function and underlying | ||
13 | # single multiplication operation in GF(2^128). "4-bit" means that it | ||
14 | # uses 256 bytes per-key table [+64/128 bytes fixed table]. It has two | ||
15 | # code paths: vanilla x86 and vanilla MMX. Former will be executed on | ||
16 | # 486 and Pentium, latter on all others. MMX GHASH features so called | ||
17 | # "528B" variant of "4-bit" method utilizing additional 256+16 bytes | ||
18 | # of per-key storage [+512 bytes shared table]. Performance results | ||
19 | # are for streamed GHASH subroutine and are expressed in cycles per | ||
20 | # processed byte, less is better: | ||
21 | # | ||
22 | # gcc 2.95.3(*) MMX assembler x86 assembler | ||
23 | # | ||
24 | # Pentium 105/111(**) - 50 | ||
25 | # PIII 68 /75 12.2 24 | ||
26 | # P4 125/125 17.8 84(***) | ||
27 | # Opteron 66 /70 10.1 30 | ||
28 | # Core2 54 /67 8.4 18 | ||
29 | # | ||
30 | # (*) gcc 3.4.x was observed to generate few percent slower code, | ||
31 | # which is one of reasons why 2.95.3 results were chosen, | ||
32 | # another reason is lack of 3.4.x results for older CPUs; | ||
33 | # comparison with MMX results is not completely fair, because C | ||
34 | # results are for vanilla "256B" implementation, while | ||
35 | # assembler results are for "528B";-) | ||
36 | # (**) second number is result for code compiled with -fPIC flag, | ||
37 | # which is actually more relevant, because assembler code is | ||
38 | # position-independent; | ||
39 | # (***) see comment in non-MMX routine for further details; | ||
40 | # | ||
41 | # To summarize, it's >2-5 times faster than gcc-generated code. To | ||
42 | # anchor it to something else SHA1 assembler processes one byte in | ||
43 | # 11-13 cycles on contemporary x86 cores. As for choice of MMX in | ||
44 | # particular, see comment at the end of the file... | ||
45 | |||
46 | # May 2010 | ||
47 | # | ||
48 | # Add PCLMULQDQ version performing at 2.10 cycles per processed byte. | ||
49 | # The question is how close is it to theoretical limit? The pclmulqdq | ||
50 | # instruction latency appears to be 14 cycles and there can't be more | ||
51 | # than 2 of them executing at any given time. This means that single | ||
52 | # Karatsuba multiplication would take 28 cycles *plus* few cycles for | ||
53 | # pre- and post-processing. Then multiplication has to be followed by | ||
54 | # modulo-reduction. Given that aggregated reduction method [see | ||
55 | # "Carry-less Multiplication and Its Usage for Computing the GCM Mode" | ||
56 | # white paper by Intel] allows you to perform reduction only once in | ||
57 | # a while we can assume that asymptotic performance can be estimated | ||
58 | # as (28+Tmod/Naggr)/16, where Tmod is time to perform reduction | ||
59 | # and Naggr is the aggregation factor. | ||
60 | # | ||
61 | # Before we proceed to this implementation let's have closer look at | ||
62 | # the best-performing code suggested by Intel in their white paper. | ||
63 | # By tracing inter-register dependencies Tmod is estimated as ~19 | ||
64 | # cycles and Naggr chosen by Intel is 4, resulting in 2.05 cycles per | ||
65 | # processed byte. As implied, this is quite optimistic estimate, | ||
66 | # because it does not account for Karatsuba pre- and post-processing, | ||
67 | # which for a single multiplication is ~5 cycles. Unfortunately Intel | ||
68 | # does not provide performance data for GHASH alone. But benchmarking | ||
69 | # AES_GCM_encrypt ripped out of Fig. 15 of the white paper with aadt | ||
70 | # alone resulted in 2.46 cycles per byte of out 16KB buffer. Note that | ||
71 | # the result accounts even for pre-computing of degrees of the hash | ||
72 | # key H, but its portion is negligible at 16KB buffer size. | ||
73 | # | ||
74 | # Moving on to the implementation in question. Tmod is estimated as | ||
75 | # ~13 cycles and Naggr is 2, giving asymptotic performance of ... | ||
76 | # 2.16. How is it possible that measured performance is better than | ||
77 | # optimistic theoretical estimate? There is one thing Intel failed | ||
78 | # to recognize. By serializing GHASH with CTR in same subroutine | ||
79 | # former's performance is really limited to above (Tmul + Tmod/Naggr) | ||
80 | # equation. But if GHASH procedure is detached, the modulo-reduction | ||
81 | # can be interleaved with Naggr-1 multiplications at instruction level | ||
82 | # and under ideal conditions even disappear from the equation. So that | ||
83 | # optimistic theoretical estimate for this implementation is ... | ||
84 | # 28/16=1.75, and not 2.16. Well, it's probably way too optimistic, | ||
85 | # at least for such small Naggr. I'd argue that (28+Tproc/Naggr), | ||
86 | # where Tproc is time required for Karatsuba pre- and post-processing, | ||
87 | # is more realistic estimate. In this case it gives ... 1.91 cycles. | ||
88 | # Or in other words, depending on how well we can interleave reduction | ||
89 | # and one of the two multiplications the performance should be between | ||
90 | # 1.91 and 2.16. As already mentioned, this implementation processes | ||
91 | # one byte out of 8KB buffer in 2.10 cycles, while x86_64 counterpart | ||
92 | # - in 2.02. x86_64 performance is better, because larger register | ||
93 | # bank allows to interleave reduction and multiplication better. | ||
94 | # | ||
95 | # Does it make sense to increase Naggr? To start with it's virtually | ||
96 | # impossible in 32-bit mode, because of limited register bank | ||
97 | # capacity. Otherwise improvement has to be weighed agiainst slower | ||
98 | # setup, as well as code size and complexity increase. As even | ||
99 | # optimistic estimate doesn't promise 30% performance improvement, | ||
100 | # there are currently no plans to increase Naggr. | ||
101 | # | ||
102 | # Special thanks to David Woodhouse <dwmw2@infradead.org> for | ||
103 | # providing access to a Westmere-based system on behalf of Intel | ||
104 | # Open Source Technology Centre. | ||
105 | |||
106 | # January 2010 | ||
107 | # | ||
108 | # Tweaked to optimize transitions between integer and FP operations | ||
109 | # on same XMM register, PCLMULQDQ subroutine was measured to process | ||
110 | # one byte in 2.07 cycles on Sandy Bridge, and in 2.12 - on Westmere. | ||
111 | # The minor regression on Westmere is outweighed by ~15% improvement | ||
112 | # on Sandy Bridge. Strangely enough attempt to modify 64-bit code in | ||
113 | # similar manner resulted in almost 20% degradation on Sandy Bridge, | ||
114 | # where original 64-bit code processes one byte in 1.95 cycles. | ||
115 | |||
116 | $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1; | ||
117 | push(@INC,"${dir}","${dir}../../perlasm"); | ||
118 | require "x86asm.pl"; | ||
119 | |||
120 | &asm_init($ARGV[0],"ghash-x86.pl",$x86only = $ARGV[$#ARGV] eq "386"); | ||
121 | |||
122 | $sse2=0; | ||
123 | for (@ARGV) { $sse2=1 if (/-DOPENSSL_IA32_SSE2/); } | ||
124 | |||
125 | ($Zhh,$Zhl,$Zlh,$Zll) = ("ebp","edx","ecx","ebx"); | ||
126 | $inp = "edi"; | ||
127 | $Htbl = "esi"; | ||
128 | |||
129 | $unroll = 0; # Affects x86 loop. Folded loop performs ~7% worse | ||
130 | # than unrolled, which has to be weighted against | ||
131 | # 2.5x x86-specific code size reduction. | ||
132 | |||
133 | sub x86_loop { | ||
134 | my $off = shift; | ||
135 | my $rem = "eax"; | ||
136 | |||
137 | &mov ($Zhh,&DWP(4,$Htbl,$Zll)); | ||
138 | &mov ($Zhl,&DWP(0,$Htbl,$Zll)); | ||
139 | &mov ($Zlh,&DWP(12,$Htbl,$Zll)); | ||
140 | &mov ($Zll,&DWP(8,$Htbl,$Zll)); | ||
141 | &xor ($rem,$rem); # avoid partial register stalls on PIII | ||
142 | |||
143 | # shrd practically kills P4, 2.5x deterioration, but P4 has | ||
144 | # MMX code-path to execute. shrd runs tad faster [than twice | ||
145 | # the shifts, move's and or's] on pre-MMX Pentium (as well as | ||
146 | # PIII and Core2), *but* minimizes code size, spares register | ||
147 | # and thus allows to fold the loop... | ||
148 | if (!$unroll) { | ||
149 | my $cnt = $inp; | ||
150 | &mov ($cnt,15); | ||
151 | &jmp (&label("x86_loop")); | ||
152 | &set_label("x86_loop",16); | ||
153 | for($i=1;$i<=2;$i++) { | ||
154 | &mov (&LB($rem),&LB($Zll)); | ||
155 | &shrd ($Zll,$Zlh,4); | ||
156 | &and (&LB($rem),0xf); | ||
157 | &shrd ($Zlh,$Zhl,4); | ||
158 | &shrd ($Zhl,$Zhh,4); | ||
159 | &shr ($Zhh,4); | ||
160 | &xor ($Zhh,&DWP($off+16,"esp",$rem,4)); | ||
161 | |||
162 | &mov (&LB($rem),&BP($off,"esp",$cnt)); | ||
163 | if ($i&1) { | ||
164 | &and (&LB($rem),0xf0); | ||
165 | } else { | ||
166 | &shl (&LB($rem),4); | ||
167 | } | ||
168 | |||
169 | &xor ($Zll,&DWP(8,$Htbl,$rem)); | ||
170 | &xor ($Zlh,&DWP(12,$Htbl,$rem)); | ||
171 | &xor ($Zhl,&DWP(0,$Htbl,$rem)); | ||
172 | &xor ($Zhh,&DWP(4,$Htbl,$rem)); | ||
173 | |||
174 | if ($i&1) { | ||
175 | &dec ($cnt); | ||
176 | &js (&label("x86_break")); | ||
177 | } else { | ||
178 | &jmp (&label("x86_loop")); | ||
179 | } | ||
180 | } | ||
181 | &set_label("x86_break",16); | ||
182 | } else { | ||
183 | for($i=1;$i<32;$i++) { | ||
184 | &comment($i); | ||
185 | &mov (&LB($rem),&LB($Zll)); | ||
186 | &shrd ($Zll,$Zlh,4); | ||
187 | &and (&LB($rem),0xf); | ||
188 | &shrd ($Zlh,$Zhl,4); | ||
189 | &shrd ($Zhl,$Zhh,4); | ||
190 | &shr ($Zhh,4); | ||
191 | &xor ($Zhh,&DWP($off+16,"esp",$rem,4)); | ||
192 | |||
193 | if ($i&1) { | ||
194 | &mov (&LB($rem),&BP($off+15-($i>>1),"esp")); | ||
195 | &and (&LB($rem),0xf0); | ||
196 | } else { | ||
197 | &mov (&LB($rem),&BP($off+15-($i>>1),"esp")); | ||
198 | &shl (&LB($rem),4); | ||
199 | } | ||
200 | |||
201 | &xor ($Zll,&DWP(8,$Htbl,$rem)); | ||
202 | &xor ($Zlh,&DWP(12,$Htbl,$rem)); | ||
203 | &xor ($Zhl,&DWP(0,$Htbl,$rem)); | ||
204 | &xor ($Zhh,&DWP(4,$Htbl,$rem)); | ||
205 | } | ||
206 | } | ||
207 | &bswap ($Zll); | ||
208 | &bswap ($Zlh); | ||
209 | &bswap ($Zhl); | ||
210 | if (!$x86only) { | ||
211 | &bswap ($Zhh); | ||
212 | } else { | ||
213 | &mov ("eax",$Zhh); | ||
214 | &bswap ("eax"); | ||
215 | &mov ($Zhh,"eax"); | ||
216 | } | ||
217 | } | ||
218 | |||
219 | if ($unroll) { | ||
220 | &function_begin_B("_x86_gmult_4bit_inner"); | ||
221 | &x86_loop(4); | ||
222 | &ret (); | ||
223 | &function_end_B("_x86_gmult_4bit_inner"); | ||
224 | } | ||
225 | |||
226 | sub deposit_rem_4bit { | ||
227 | my $bias = shift; | ||
228 | |||
229 | &mov (&DWP($bias+0, "esp"),0x0000<<16); | ||
230 | &mov (&DWP($bias+4, "esp"),0x1C20<<16); | ||
231 | &mov (&DWP($bias+8, "esp"),0x3840<<16); | ||
232 | &mov (&DWP($bias+12,"esp"),0x2460<<16); | ||
233 | &mov (&DWP($bias+16,"esp"),0x7080<<16); | ||
234 | &mov (&DWP($bias+20,"esp"),0x6CA0<<16); | ||
235 | &mov (&DWP($bias+24,"esp"),0x48C0<<16); | ||
236 | &mov (&DWP($bias+28,"esp"),0x54E0<<16); | ||
237 | &mov (&DWP($bias+32,"esp"),0xE100<<16); | ||
238 | &mov (&DWP($bias+36,"esp"),0xFD20<<16); | ||
239 | &mov (&DWP($bias+40,"esp"),0xD940<<16); | ||
240 | &mov (&DWP($bias+44,"esp"),0xC560<<16); | ||
241 | &mov (&DWP($bias+48,"esp"),0x9180<<16); | ||
242 | &mov (&DWP($bias+52,"esp"),0x8DA0<<16); | ||
243 | &mov (&DWP($bias+56,"esp"),0xA9C0<<16); | ||
244 | &mov (&DWP($bias+60,"esp"),0xB5E0<<16); | ||
245 | } | ||
246 | |||
247 | $suffix = $x86only ? "" : "_x86"; | ||
248 | |||
249 | &function_begin("gcm_gmult_4bit".$suffix); | ||
250 | &stack_push(16+4+1); # +1 for stack alignment | ||
251 | &mov ($inp,&wparam(0)); # load Xi | ||
252 | &mov ($Htbl,&wparam(1)); # load Htable | ||
253 | |||
254 | &mov ($Zhh,&DWP(0,$inp)); # load Xi[16] | ||
255 | &mov ($Zhl,&DWP(4,$inp)); | ||
256 | &mov ($Zlh,&DWP(8,$inp)); | ||
257 | &mov ($Zll,&DWP(12,$inp)); | ||
258 | |||
259 | &deposit_rem_4bit(16); | ||
260 | |||
261 | &mov (&DWP(0,"esp"),$Zhh); # copy Xi[16] on stack | ||
262 | &mov (&DWP(4,"esp"),$Zhl); | ||
263 | &mov (&DWP(8,"esp"),$Zlh); | ||
264 | &mov (&DWP(12,"esp"),$Zll); | ||
265 | &shr ($Zll,20); | ||
266 | &and ($Zll,0xf0); | ||
267 | |||
268 | if ($unroll) { | ||
269 | &call ("_x86_gmult_4bit_inner"); | ||
270 | } else { | ||
271 | &x86_loop(0); | ||
272 | &mov ($inp,&wparam(0)); | ||
273 | } | ||
274 | |||
275 | &mov (&DWP(12,$inp),$Zll); | ||
276 | &mov (&DWP(8,$inp),$Zlh); | ||
277 | &mov (&DWP(4,$inp),$Zhl); | ||
278 | &mov (&DWP(0,$inp),$Zhh); | ||
279 | &stack_pop(16+4+1); | ||
280 | &function_end("gcm_gmult_4bit".$suffix); | ||
281 | |||
282 | &function_begin("gcm_ghash_4bit".$suffix); | ||
283 | &stack_push(16+4+1); # +1 for 64-bit alignment | ||
284 | &mov ($Zll,&wparam(0)); # load Xi | ||
285 | &mov ($Htbl,&wparam(1)); # load Htable | ||
286 | &mov ($inp,&wparam(2)); # load in | ||
287 | &mov ("ecx",&wparam(3)); # load len | ||
288 | &add ("ecx",$inp); | ||
289 | &mov (&wparam(3),"ecx"); | ||
290 | |||
291 | &mov ($Zhh,&DWP(0,$Zll)); # load Xi[16] | ||
292 | &mov ($Zhl,&DWP(4,$Zll)); | ||
293 | &mov ($Zlh,&DWP(8,$Zll)); | ||
294 | &mov ($Zll,&DWP(12,$Zll)); | ||
295 | |||
296 | &deposit_rem_4bit(16); | ||
297 | |||
298 | &set_label("x86_outer_loop",16); | ||
299 | &xor ($Zll,&DWP(12,$inp)); # xor with input | ||
300 | &xor ($Zlh,&DWP(8,$inp)); | ||
301 | &xor ($Zhl,&DWP(4,$inp)); | ||
302 | &xor ($Zhh,&DWP(0,$inp)); | ||
303 | &mov (&DWP(12,"esp"),$Zll); # dump it on stack | ||
304 | &mov (&DWP(8,"esp"),$Zlh); | ||
305 | &mov (&DWP(4,"esp"),$Zhl); | ||
306 | &mov (&DWP(0,"esp"),$Zhh); | ||
307 | |||
308 | &shr ($Zll,20); | ||
309 | &and ($Zll,0xf0); | ||
310 | |||
311 | if ($unroll) { | ||
312 | &call ("_x86_gmult_4bit_inner"); | ||
313 | } else { | ||
314 | &x86_loop(0); | ||
315 | &mov ($inp,&wparam(2)); | ||
316 | } | ||
317 | &lea ($inp,&DWP(16,$inp)); | ||
318 | &cmp ($inp,&wparam(3)); | ||
319 | &mov (&wparam(2),$inp) if (!$unroll); | ||
320 | &jb (&label("x86_outer_loop")); | ||
321 | |||
322 | &mov ($inp,&wparam(0)); # load Xi | ||
323 | &mov (&DWP(12,$inp),$Zll); | ||
324 | &mov (&DWP(8,$inp),$Zlh); | ||
325 | &mov (&DWP(4,$inp),$Zhl); | ||
326 | &mov (&DWP(0,$inp),$Zhh); | ||
327 | &stack_pop(16+4+1); | ||
328 | &function_end("gcm_ghash_4bit".$suffix); | ||
329 | |||
330 | if (!$x86only) {{{ | ||
331 | |||
332 | &static_label("rem_4bit"); | ||
333 | |||
334 | if (!$sse2) {{ # pure-MMX "May" version... | ||
335 | |||
336 | $S=12; # shift factor for rem_4bit | ||
337 | |||
338 | &function_begin_B("_mmx_gmult_4bit_inner"); | ||
339 | # MMX version performs 3.5 times better on P4 (see comment in non-MMX | ||
340 | # routine for further details), 100% better on Opteron, ~70% better | ||
341 | # on Core2 and PIII... In other words effort is considered to be well | ||
342 | # spent... Since initial release the loop was unrolled in order to | ||
343 | # "liberate" register previously used as loop counter. Instead it's | ||
344 | # used to optimize critical path in 'Z.hi ^= rem_4bit[Z.lo&0xf]'. | ||
345 | # The path involves move of Z.lo from MMX to integer register, | ||
346 | # effective address calculation and finally merge of value to Z.hi. | ||
347 | # Reference to rem_4bit is scheduled so late that I had to >>4 | ||
348 | # rem_4bit elements. This resulted in 20-45% procent improvement | ||
349 | # on contemporary µ-archs. | ||
350 | { | ||
351 | my $cnt; | ||
352 | my $rem_4bit = "eax"; | ||
353 | my @rem = ($Zhh,$Zll); | ||
354 | my $nhi = $Zhl; | ||
355 | my $nlo = $Zlh; | ||
356 | |||
357 | my ($Zlo,$Zhi) = ("mm0","mm1"); | ||
358 | my $tmp = "mm2"; | ||
359 | |||
360 | &xor ($nlo,$nlo); # avoid partial register stalls on PIII | ||
361 | &mov ($nhi,$Zll); | ||
362 | &mov (&LB($nlo),&LB($nhi)); | ||
363 | &shl (&LB($nlo),4); | ||
364 | &and ($nhi,0xf0); | ||
365 | &movq ($Zlo,&QWP(8,$Htbl,$nlo)); | ||
366 | &movq ($Zhi,&QWP(0,$Htbl,$nlo)); | ||
367 | &movd ($rem[0],$Zlo); | ||
368 | |||
369 | for ($cnt=28;$cnt>=-2;$cnt--) { | ||
370 | my $odd = $cnt&1; | ||
371 | my $nix = $odd ? $nlo : $nhi; | ||
372 | |||
373 | &shl (&LB($nlo),4) if ($odd); | ||
374 | &psrlq ($Zlo,4); | ||
375 | &movq ($tmp,$Zhi); | ||
376 | &psrlq ($Zhi,4); | ||
377 | &pxor ($Zlo,&QWP(8,$Htbl,$nix)); | ||
378 | &mov (&LB($nlo),&BP($cnt/2,$inp)) if (!$odd && $cnt>=0); | ||
379 | &psllq ($tmp,60); | ||
380 | &and ($nhi,0xf0) if ($odd); | ||
381 | &pxor ($Zhi,&QWP(0,$rem_4bit,$rem[1],8)) if ($cnt<28); | ||
382 | &and ($rem[0],0xf); | ||
383 | &pxor ($Zhi,&QWP(0,$Htbl,$nix)); | ||
384 | &mov ($nhi,$nlo) if (!$odd && $cnt>=0); | ||
385 | &movd ($rem[1],$Zlo); | ||
386 | &pxor ($Zlo,$tmp); | ||
387 | |||
388 | push (@rem,shift(@rem)); # "rotate" registers | ||
389 | } | ||
390 | |||
391 | &mov ($inp,&DWP(4,$rem_4bit,$rem[1],8)); # last rem_4bit[rem] | ||
392 | |||
393 | &psrlq ($Zlo,32); # lower part of Zlo is already there | ||
394 | &movd ($Zhl,$Zhi); | ||
395 | &psrlq ($Zhi,32); | ||
396 | &movd ($Zlh,$Zlo); | ||
397 | &movd ($Zhh,$Zhi); | ||
398 | &shl ($inp,4); # compensate for rem_4bit[i] being >>4 | ||
399 | |||
400 | &bswap ($Zll); | ||
401 | &bswap ($Zhl); | ||
402 | &bswap ($Zlh); | ||
403 | &xor ($Zhh,$inp); | ||
404 | &bswap ($Zhh); | ||
405 | |||
406 | &ret (); | ||
407 | } | ||
408 | &function_end_B("_mmx_gmult_4bit_inner"); | ||
409 | |||
410 | &function_begin("gcm_gmult_4bit_mmx"); | ||
411 | &mov ($inp,&wparam(0)); # load Xi | ||
412 | &mov ($Htbl,&wparam(1)); # load Htable | ||
413 | |||
414 | &picsetup("eax"); | ||
415 | &picsymbol("eax", &label("rem_4bit"), "eax"); | ||
416 | |||
417 | &movz ($Zll,&BP(15,$inp)); | ||
418 | |||
419 | &call ("_mmx_gmult_4bit_inner"); | ||
420 | |||
421 | &mov ($inp,&wparam(0)); # load Xi | ||
422 | &emms (); | ||
423 | &mov (&DWP(12,$inp),$Zll); | ||
424 | &mov (&DWP(4,$inp),$Zhl); | ||
425 | &mov (&DWP(8,$inp),$Zlh); | ||
426 | &mov (&DWP(0,$inp),$Zhh); | ||
427 | &function_end("gcm_gmult_4bit_mmx"); | ||
428 | |||
429 | # Streamed version performs 20% better on P4, 7% on Opteron, | ||
430 | # 10% on Core2 and PIII... | ||
431 | &function_begin("gcm_ghash_4bit_mmx"); | ||
432 | &mov ($Zhh,&wparam(0)); # load Xi | ||
433 | &mov ($Htbl,&wparam(1)); # load Htable | ||
434 | &mov ($inp,&wparam(2)); # load in | ||
435 | &mov ($Zlh,&wparam(3)); # load len | ||
436 | |||
437 | &picsetup("eax"); | ||
438 | &picsymbol("eax", &label("rem_4bit"), "eax"); | ||
439 | |||
440 | &add ($Zlh,$inp); | ||
441 | &mov (&wparam(3),$Zlh); # len to point at the end of input | ||
442 | &stack_push(4+1); # +1 for stack alignment | ||
443 | |||
444 | &mov ($Zll,&DWP(12,$Zhh)); # load Xi[16] | ||
445 | &mov ($Zhl,&DWP(4,$Zhh)); | ||
446 | &mov ($Zlh,&DWP(8,$Zhh)); | ||
447 | &mov ($Zhh,&DWP(0,$Zhh)); | ||
448 | &jmp (&label("mmx_outer_loop")); | ||
449 | |||
450 | &set_label("mmx_outer_loop",16); | ||
451 | &xor ($Zll,&DWP(12,$inp)); | ||
452 | &xor ($Zhl,&DWP(4,$inp)); | ||
453 | &xor ($Zlh,&DWP(8,$inp)); | ||
454 | &xor ($Zhh,&DWP(0,$inp)); | ||
455 | &mov (&wparam(2),$inp); | ||
456 | &mov (&DWP(12,"esp"),$Zll); | ||
457 | &mov (&DWP(4,"esp"),$Zhl); | ||
458 | &mov (&DWP(8,"esp"),$Zlh); | ||
459 | &mov (&DWP(0,"esp"),$Zhh); | ||
460 | |||
461 | &mov ($inp,"esp"); | ||
462 | &shr ($Zll,24); | ||
463 | |||
464 | &call ("_mmx_gmult_4bit_inner"); | ||
465 | |||
466 | &mov ($inp,&wparam(2)); | ||
467 | &lea ($inp,&DWP(16,$inp)); | ||
468 | &cmp ($inp,&wparam(3)); | ||
469 | &jb (&label("mmx_outer_loop")); | ||
470 | |||
471 | &mov ($inp,&wparam(0)); # load Xi | ||
472 | &emms (); | ||
473 | &mov (&DWP(12,$inp),$Zll); | ||
474 | &mov (&DWP(4,$inp),$Zhl); | ||
475 | &mov (&DWP(8,$inp),$Zlh); | ||
476 | &mov (&DWP(0,$inp),$Zhh); | ||
477 | |||
478 | &stack_pop(4+1); | ||
479 | &function_end("gcm_ghash_4bit_mmx"); | ||
480 | |||
481 | }} else {{ # "June" MMX version... | ||
482 | # ... has slower "April" gcm_gmult_4bit_mmx with folded | ||
483 | # loop. This is done to conserve code size... | ||
484 | $S=16; # shift factor for rem_4bit | ||
485 | |||
486 | sub mmx_loop() { | ||
487 | # MMX version performs 2.8 times better on P4 (see comment in non-MMX | ||
488 | # routine for further details), 40% better on Opteron and Core2, 50% | ||
489 | # better on PIII... In other words effort is considered to be well | ||
490 | # spent... | ||
491 | my $inp = shift; | ||
492 | my $rem_4bit = shift; | ||
493 | my $cnt = $Zhh; | ||
494 | my $nhi = $Zhl; | ||
495 | my $nlo = $Zlh; | ||
496 | my $rem = $Zll; | ||
497 | |||
498 | my ($Zlo,$Zhi) = ("mm0","mm1"); | ||
499 | my $tmp = "mm2"; | ||
500 | |||
501 | &xor ($nlo,$nlo); # avoid partial register stalls on PIII | ||
502 | &mov ($nhi,$Zll); | ||
503 | &mov (&LB($nlo),&LB($nhi)); | ||
504 | &mov ($cnt,14); | ||
505 | &shl (&LB($nlo),4); | ||
506 | &and ($nhi,0xf0); | ||
507 | &movq ($Zlo,&QWP(8,$Htbl,$nlo)); | ||
508 | &movq ($Zhi,&QWP(0,$Htbl,$nlo)); | ||
509 | &movd ($rem,$Zlo); | ||
510 | &jmp (&label("mmx_loop")); | ||
511 | |||
512 | &set_label("mmx_loop",16); | ||
513 | &psrlq ($Zlo,4); | ||
514 | &and ($rem,0xf); | ||
515 | &movq ($tmp,$Zhi); | ||
516 | &psrlq ($Zhi,4); | ||
517 | &pxor ($Zlo,&QWP(8,$Htbl,$nhi)); | ||
518 | &mov (&LB($nlo),&BP(0,$inp,$cnt)); | ||
519 | &psllq ($tmp,60); | ||
520 | &pxor ($Zhi,&QWP(0,$rem_4bit,$rem,8)); | ||
521 | &dec ($cnt); | ||
522 | &movd ($rem,$Zlo); | ||
523 | &pxor ($Zhi,&QWP(0,$Htbl,$nhi)); | ||
524 | &mov ($nhi,$nlo); | ||
525 | &pxor ($Zlo,$tmp); | ||
526 | &js (&label("mmx_break")); | ||
527 | |||
528 | &shl (&LB($nlo),4); | ||
529 | &and ($rem,0xf); | ||
530 | &psrlq ($Zlo,4); | ||
531 | &and ($nhi,0xf0); | ||
532 | &movq ($tmp,$Zhi); | ||
533 | &psrlq ($Zhi,4); | ||
534 | &pxor ($Zlo,&QWP(8,$Htbl,$nlo)); | ||
535 | &psllq ($tmp,60); | ||
536 | &pxor ($Zhi,&QWP(0,$rem_4bit,$rem,8)); | ||
537 | &movd ($rem,$Zlo); | ||
538 | &pxor ($Zhi,&QWP(0,$Htbl,$nlo)); | ||
539 | &pxor ($Zlo,$tmp); | ||
540 | &jmp (&label("mmx_loop")); | ||
541 | |||
542 | &set_label("mmx_break",16); | ||
543 | &shl (&LB($nlo),4); | ||
544 | &and ($rem,0xf); | ||
545 | &psrlq ($Zlo,4); | ||
546 | &and ($nhi,0xf0); | ||
547 | &movq ($tmp,$Zhi); | ||
548 | &psrlq ($Zhi,4); | ||
549 | &pxor ($Zlo,&QWP(8,$Htbl,$nlo)); | ||
550 | &psllq ($tmp,60); | ||
551 | &pxor ($Zhi,&QWP(0,$rem_4bit,$rem,8)); | ||
552 | &movd ($rem,$Zlo); | ||
553 | &pxor ($Zhi,&QWP(0,$Htbl,$nlo)); | ||
554 | &pxor ($Zlo,$tmp); | ||
555 | |||
556 | &psrlq ($Zlo,4); | ||
557 | &and ($rem,0xf); | ||
558 | &movq ($tmp,$Zhi); | ||
559 | &psrlq ($Zhi,4); | ||
560 | &pxor ($Zlo,&QWP(8,$Htbl,$nhi)); | ||
561 | &psllq ($tmp,60); | ||
562 | &pxor ($Zhi,&QWP(0,$rem_4bit,$rem,8)); | ||
563 | &movd ($rem,$Zlo); | ||
564 | &pxor ($Zhi,&QWP(0,$Htbl,$nhi)); | ||
565 | &pxor ($Zlo,$tmp); | ||
566 | |||
567 | &psrlq ($Zlo,32); # lower part of Zlo is already there | ||
568 | &movd ($Zhl,$Zhi); | ||
569 | &psrlq ($Zhi,32); | ||
570 | &movd ($Zlh,$Zlo); | ||
571 | &movd ($Zhh,$Zhi); | ||
572 | |||
573 | &bswap ($Zll); | ||
574 | &bswap ($Zhl); | ||
575 | &bswap ($Zlh); | ||
576 | &bswap ($Zhh); | ||
577 | } | ||
578 | |||
579 | &function_begin("gcm_gmult_4bit_mmx"); | ||
580 | &mov ($inp,&wparam(0)); # load Xi | ||
581 | &mov ($Htbl,&wparam(1)); # load Htable | ||
582 | |||
583 | &picsetup("eax"); | ||
584 | &picsymbol("eax", &label("rem_4bit"), "eax"); | ||
585 | |||
586 | &movz ($Zll,&BP(15,$inp)); | ||
587 | |||
588 | &mmx_loop($inp,"eax"); | ||
589 | |||
590 | &emms (); | ||
591 | &mov (&DWP(12,$inp),$Zll); | ||
592 | &mov (&DWP(4,$inp),$Zhl); | ||
593 | &mov (&DWP(8,$inp),$Zlh); | ||
594 | &mov (&DWP(0,$inp),$Zhh); | ||
595 | &function_end("gcm_gmult_4bit_mmx"); | ||
596 | |||
597 | ###################################################################### | ||
598 | # Below subroutine is "528B" variant of "4-bit" GCM GHASH function | ||
599 | # (see gcm128.c for details). It provides further 20-40% performance | ||
600 | # improvement over above mentioned "May" version. | ||
601 | |||
602 | &static_label("rem_8bit"); | ||
603 | |||
604 | &function_begin("gcm_ghash_4bit_mmx"); | ||
605 | { my ($Zlo,$Zhi) = ("mm7","mm6"); | ||
606 | my $rem_8bit = "esi"; | ||
607 | my $Htbl = "ebx"; | ||
608 | |||
609 | # parameter block | ||
610 | &mov ("eax",&wparam(0)); # Xi | ||
611 | &mov ("ebx",&wparam(1)); # Htable | ||
612 | &mov ("ecx",&wparam(2)); # inp | ||
613 | &mov ("edx",&wparam(3)); # len | ||
614 | &mov ("ebp","esp"); # original %esp | ||
615 | |||
616 | &picsetup($rem_8bit); | ||
617 | &picsymbol($rem_8bit, &label("rem_8bit"), $rem_8bit); | ||
618 | |||
619 | &sub ("esp",512+16+16); # allocate stack frame... | ||
620 | &and ("esp",-64); # ...and align it | ||
621 | &sub ("esp",16); # place for (u8)(H[]<<4) | ||
622 | |||
623 | &add ("edx","ecx"); # pointer to the end of input | ||
624 | &mov (&DWP(528+16+0,"esp"),"eax"); # save Xi | ||
625 | &mov (&DWP(528+16+8,"esp"),"edx"); # save inp+len | ||
626 | &mov (&DWP(528+16+12,"esp"),"ebp"); # save original %esp | ||
627 | |||
628 | { my @lo = ("mm0","mm1","mm2"); | ||
629 | my @hi = ("mm3","mm4","mm5"); | ||
630 | my @tmp = ("mm6","mm7"); | ||
631 | my ($off1,$off2,$i) = (0,0,); | ||
632 | |||
633 | &add ($Htbl,128); # optimize for size | ||
634 | &lea ("edi",&DWP(16+128,"esp")); | ||
635 | &lea ("ebp",&DWP(16+256+128,"esp")); | ||
636 | |||
637 | # decompose Htable (low and high parts are kept separately), | ||
638 | # generate Htable[]>>4, (u8)(Htable[]<<4), save to stack... | ||
639 | for ($i=0;$i<18;$i++) { | ||
640 | |||
641 | &mov ("edx",&DWP(16*$i+8-128,$Htbl)) if ($i<16); | ||
642 | &movq ($lo[0],&QWP(16*$i+8-128,$Htbl)) if ($i<16); | ||
643 | &psllq ($tmp[1],60) if ($i>1); | ||
644 | &movq ($hi[0],&QWP(16*$i+0-128,$Htbl)) if ($i<16); | ||
645 | &por ($lo[2],$tmp[1]) if ($i>1); | ||
646 | &movq (&QWP($off1-128,"edi"),$lo[1]) if ($i>0 && $i<17); | ||
647 | &psrlq ($lo[1],4) if ($i>0 && $i<17); | ||
648 | &movq (&QWP($off1,"edi"),$hi[1]) if ($i>0 && $i<17); | ||
649 | &movq ($tmp[0],$hi[1]) if ($i>0 && $i<17); | ||
650 | &movq (&QWP($off2-128,"ebp"),$lo[2]) if ($i>1); | ||
651 | &psrlq ($hi[1],4) if ($i>0 && $i<17); | ||
652 | &movq (&QWP($off2,"ebp"),$hi[2]) if ($i>1); | ||
653 | &shl ("edx",4) if ($i<16); | ||
654 | &mov (&BP($i,"esp"),&LB("edx")) if ($i<16); | ||
655 | |||
656 | unshift (@lo,pop(@lo)); # "rotate" registers | ||
657 | unshift (@hi,pop(@hi)); | ||
658 | unshift (@tmp,pop(@tmp)); | ||
659 | $off1 += 8 if ($i>0); | ||
660 | $off2 += 8 if ($i>1); | ||
661 | } | ||
662 | } | ||
663 | |||
664 | &movq ($Zhi,&QWP(0,"eax")); | ||
665 | &mov ("ebx",&DWP(8,"eax")); | ||
666 | &mov ("edx",&DWP(12,"eax")); # load Xi | ||
667 | |||
668 | &set_label("outer",16); | ||
669 | { my $nlo = "eax"; | ||
670 | my $dat = "edx"; | ||
671 | my @nhi = ("edi","ebp"); | ||
672 | my @rem = ("ebx","ecx"); | ||
673 | my @red = ("mm0","mm1","mm2"); | ||
674 | my $tmp = "mm3"; | ||
675 | |||
676 | &xor ($dat,&DWP(12,"ecx")); # merge input data | ||
677 | &xor ("ebx",&DWP(8,"ecx")); | ||
678 | &pxor ($Zhi,&QWP(0,"ecx")); | ||
679 | &lea ("ecx",&DWP(16,"ecx")); # inp+=16 | ||
680 | #&mov (&DWP(528+12,"esp"),$dat); # save inp^Xi | ||
681 | &mov (&DWP(528+8,"esp"),"ebx"); | ||
682 | &movq (&QWP(528+0,"esp"),$Zhi); | ||
683 | &mov (&DWP(528+16+4,"esp"),"ecx"); # save inp | ||
684 | |||
685 | &xor ($nlo,$nlo); | ||
686 | &rol ($dat,8); | ||
687 | &mov (&LB($nlo),&LB($dat)); | ||
688 | &mov ($nhi[1],$nlo); | ||
689 | &and (&LB($nlo),0x0f); | ||
690 | &shr ($nhi[1],4); | ||
691 | &pxor ($red[0],$red[0]); | ||
692 | &rol ($dat,8); # next byte | ||
693 | &pxor ($red[1],$red[1]); | ||
694 | &pxor ($red[2],$red[2]); | ||
695 | |||
696 | # Just like in "May" version modulo-schedule for critical path in | ||
697 | # 'Z.hi ^= rem_8bit[Z.lo&0xff^((u8)H[nhi]<<4)]<<48'. Final 'pxor' | ||
698 | # is scheduled so late that rem_8bit[] has to be shifted *right* | ||
699 | # by 16, which is why last argument to pinsrw is 2, which | ||
700 | # corresponds to <<32=<<48>>16... | ||
701 | for ($j=11,$i=0;$i<15;$i++) { | ||
702 | |||
703 | if ($i>0) { | ||
704 | &pxor ($Zlo,&QWP(16,"esp",$nlo,8)); # Z^=H[nlo] | ||
705 | &rol ($dat,8); # next byte | ||
706 | &pxor ($Zhi,&QWP(16+128,"esp",$nlo,8)); | ||
707 | |||
708 | &pxor ($Zlo,$tmp); | ||
709 | &pxor ($Zhi,&QWP(16+256+128,"esp",$nhi[0],8)); | ||
710 | &xor (&LB($rem[1]),&BP(0,"esp",$nhi[0])); # rem^(H[nhi]<<4) | ||
711 | } else { | ||
712 | &movq ($Zlo,&QWP(16,"esp",$nlo,8)); | ||
713 | &movq ($Zhi,&QWP(16+128,"esp",$nlo,8)); | ||
714 | } | ||
715 | |||
716 | &mov (&LB($nlo),&LB($dat)); | ||
717 | &mov ($dat,&DWP(528+$j,"esp")) if (--$j%4==0 && $j>=0); | ||
718 | |||
719 | &movd ($rem[0],$Zlo); | ||
720 | &movz ($rem[1],&LB($rem[1])) if ($i>0); | ||
721 | &psrlq ($Zlo,8); # Z>>=8 | ||
722 | |||
723 | &movq ($tmp,$Zhi); | ||
724 | &mov ($nhi[0],$nlo); | ||
725 | &psrlq ($Zhi,8); | ||
726 | |||
727 | &pxor ($Zlo,&QWP(16+256+0,"esp",$nhi[1],8)); # Z^=H[nhi]>>4 | ||
728 | &and (&LB($nlo),0x0f); | ||
729 | &psllq ($tmp,56); | ||
730 | |||
731 | &pxor ($Zhi,$red[1]) if ($i>1); | ||
732 | &shr ($nhi[0],4); | ||
733 | &pinsrw ($red[0],&WP(0,$rem_8bit,$rem[1],2),2) if ($i>0); | ||
734 | |||
735 | unshift (@red,pop(@red)); # "rotate" registers | ||
736 | unshift (@rem,pop(@rem)); | ||
737 | unshift (@nhi,pop(@nhi)); | ||
738 | } | ||
739 | |||
740 | &pxor ($Zlo,&QWP(16,"esp",$nlo,8)); # Z^=H[nlo] | ||
741 | &pxor ($Zhi,&QWP(16+128,"esp",$nlo,8)); | ||
742 | &xor (&LB($rem[1]),&BP(0,"esp",$nhi[0])); # rem^(H[nhi]<<4) | ||
743 | |||
744 | &pxor ($Zlo,$tmp); | ||
745 | &pxor ($Zhi,&QWP(16+256+128,"esp",$nhi[0],8)); | ||
746 | &movz ($rem[1],&LB($rem[1])); | ||
747 | |||
748 | &pxor ($red[2],$red[2]); # clear 2nd word | ||
749 | &psllq ($red[1],4); | ||
750 | |||
751 | &movd ($rem[0],$Zlo); | ||
752 | &psrlq ($Zlo,4); # Z>>=4 | ||
753 | |||
754 | &movq ($tmp,$Zhi); | ||
755 | &psrlq ($Zhi,4); | ||
756 | &shl ($rem[0],4); # rem<<4 | ||
757 | |||
758 | &pxor ($Zlo,&QWP(16,"esp",$nhi[1],8)); # Z^=H[nhi] | ||
759 | &psllq ($tmp,60); | ||
760 | &movz ($rem[0],&LB($rem[0])); | ||
761 | |||
762 | &pxor ($Zlo,$tmp); | ||
763 | &pxor ($Zhi,&QWP(16+128,"esp",$nhi[1],8)); | ||
764 | |||
765 | &pinsrw ($red[0],&WP(0,$rem_8bit,$rem[1],2),2); | ||
766 | &pxor ($Zhi,$red[1]); | ||
767 | |||
768 | &movd ($dat,$Zlo); | ||
769 | &pinsrw ($red[2],&WP(0,$rem_8bit,$rem[0],2),3); # last is <<48 | ||
770 | |||
771 | &psllq ($red[0],12); # correct by <<16>>4 | ||
772 | &pxor ($Zhi,$red[0]); | ||
773 | &psrlq ($Zlo,32); | ||
774 | &pxor ($Zhi,$red[2]); | ||
775 | |||
776 | &mov ("ecx",&DWP(528+16+4,"esp")); # restore inp | ||
777 | &movd ("ebx",$Zlo); | ||
778 | &movq ($tmp,$Zhi); # 01234567 | ||
779 | &psllw ($Zhi,8); # 1.3.5.7. | ||
780 | &psrlw ($tmp,8); # .0.2.4.6 | ||
781 | &por ($Zhi,$tmp); # 10325476 | ||
782 | &bswap ($dat); | ||
783 | &pshufw ($Zhi,$Zhi,0b00011011); # 76543210 | ||
784 | &bswap ("ebx"); | ||
785 | |||
786 | &cmp ("ecx",&DWP(528+16+8,"esp")); # are we done? | ||
787 | &jne (&label("outer")); | ||
788 | } | ||
789 | |||
790 | &mov ("eax",&DWP(528+16+0,"esp")); # restore Xi | ||
791 | &mov (&DWP(12,"eax"),"edx"); | ||
792 | &mov (&DWP(8,"eax"),"ebx"); | ||
793 | &movq (&QWP(0,"eax"),$Zhi); | ||
794 | |||
795 | &mov ("esp",&DWP(528+16+12,"esp")); # restore original %esp | ||
796 | &emms (); | ||
797 | } | ||
798 | &function_end("gcm_ghash_4bit_mmx"); | ||
799 | }} | ||
800 | |||
801 | if ($sse2) {{ | ||
802 | ###################################################################### | ||
803 | # PCLMULQDQ version. | ||
804 | |||
805 | $Xip="eax"; | ||
806 | $Htbl="edx"; | ||
807 | $const="ecx"; | ||
808 | $inp="esi"; | ||
809 | $len="ebx"; | ||
810 | |||
811 | ($Xi,$Xhi)=("xmm0","xmm1"); $Hkey="xmm2"; | ||
812 | ($T1,$T2,$T3)=("xmm3","xmm4","xmm5"); | ||
813 | ($Xn,$Xhn)=("xmm6","xmm7"); | ||
814 | |||
815 | &static_label("bswap"); | ||
816 | |||
817 | sub clmul64x64_T2 { # minimal "register" pressure | ||
818 | my ($Xhi,$Xi,$Hkey)=@_; | ||
819 | |||
820 | &movdqa ($Xhi,$Xi); # | ||
821 | &pshufd ($T1,$Xi,0b01001110); | ||
822 | &pshufd ($T2,$Hkey,0b01001110); | ||
823 | &pxor ($T1,$Xi); # | ||
824 | &pxor ($T2,$Hkey); | ||
825 | |||
826 | &pclmulqdq ($Xi,$Hkey,0x00); ####### | ||
827 | &pclmulqdq ($Xhi,$Hkey,0x11); ####### | ||
828 | &pclmulqdq ($T1,$T2,0x00); ####### | ||
829 | &xorps ($T1,$Xi); # | ||
830 | &xorps ($T1,$Xhi); # | ||
831 | |||
832 | &movdqa ($T2,$T1); # | ||
833 | &psrldq ($T1,8); | ||
834 | &pslldq ($T2,8); # | ||
835 | &pxor ($Xhi,$T1); | ||
836 | &pxor ($Xi,$T2); # | ||
837 | } | ||
838 | |||
839 | sub clmul64x64_T3 { | ||
840 | # Even though this subroutine offers visually better ILP, it | ||
841 | # was empirically found to be a tad slower than above version. | ||
842 | # At least in gcm_ghash_clmul context. But it's just as well, | ||
843 | # because loop modulo-scheduling is possible only thanks to | ||
844 | # minimized "register" pressure... | ||
845 | my ($Xhi,$Xi,$Hkey)=@_; | ||
846 | |||
847 | &movdqa ($T1,$Xi); # | ||
848 | &movdqa ($Xhi,$Xi); | ||
849 | &pclmulqdq ($Xi,$Hkey,0x00); ####### | ||
850 | &pclmulqdq ($Xhi,$Hkey,0x11); ####### | ||
851 | &pshufd ($T2,$T1,0b01001110); # | ||
852 | &pshufd ($T3,$Hkey,0b01001110); | ||
853 | &pxor ($T2,$T1); # | ||
854 | &pxor ($T3,$Hkey); | ||
855 | &pclmulqdq ($T2,$T3,0x00); ####### | ||
856 | &pxor ($T2,$Xi); # | ||
857 | &pxor ($T2,$Xhi); # | ||
858 | |||
859 | &movdqa ($T3,$T2); # | ||
860 | &psrldq ($T2,8); | ||
861 | &pslldq ($T3,8); # | ||
862 | &pxor ($Xhi,$T2); | ||
863 | &pxor ($Xi,$T3); # | ||
864 | } | ||
865 | |||
866 | if (1) { # Algorithm 9 with <<1 twist. | ||
867 | # Reduction is shorter and uses only two | ||
868 | # temporary registers, which makes it better | ||
869 | # candidate for interleaving with 64x64 | ||
870 | # multiplication. Pre-modulo-scheduled loop | ||
871 | # was found to be ~20% faster than Algorithm 5 | ||
872 | # below. Algorithm 9 was therefore chosen for | ||
873 | # further optimization... | ||
874 | |||
875 | sub reduction_alg9 { # 17/13 times faster than Intel version | ||
876 | my ($Xhi,$Xi) = @_; | ||
877 | |||
878 | # 1st phase | ||
879 | &movdqa ($T1,$Xi); # | ||
880 | &psllq ($Xi,1); | ||
881 | &pxor ($Xi,$T1); # | ||
882 | &psllq ($Xi,5); # | ||
883 | &pxor ($Xi,$T1); # | ||
884 | &psllq ($Xi,57); # | ||
885 | &movdqa ($T2,$Xi); # | ||
886 | &pslldq ($Xi,8); | ||
887 | &psrldq ($T2,8); # | ||
888 | &pxor ($Xi,$T1); | ||
889 | &pxor ($Xhi,$T2); # | ||
890 | |||
891 | # 2nd phase | ||
892 | &movdqa ($T2,$Xi); | ||
893 | &psrlq ($Xi,5); | ||
894 | &pxor ($Xi,$T2); # | ||
895 | &psrlq ($Xi,1); # | ||
896 | &pxor ($Xi,$T2); # | ||
897 | &pxor ($T2,$Xhi); | ||
898 | &psrlq ($Xi,1); # | ||
899 | &pxor ($Xi,$T2); # | ||
900 | } | ||
901 | |||
902 | &function_begin_B("gcm_init_clmul"); | ||
903 | &mov ($Htbl,&wparam(0)); | ||
904 | &mov ($Xip,&wparam(1)); | ||
905 | |||
906 | &picsetup($const); | ||
907 | &picsymbol($const, &label("bswap"), $const); | ||
908 | |||
909 | &movdqu ($Hkey,&QWP(0,$Xip)); | ||
910 | &pshufd ($Hkey,$Hkey,0b01001110);# dword swap | ||
911 | |||
912 | # <<1 twist | ||
913 | &pshufd ($T2,$Hkey,0b11111111); # broadcast uppermost dword | ||
914 | &movdqa ($T1,$Hkey); | ||
915 | &psllq ($Hkey,1); | ||
916 | &pxor ($T3,$T3); # | ||
917 | &psrlq ($T1,63); | ||
918 | &pcmpgtd ($T3,$T2); # broadcast carry bit | ||
919 | &pslldq ($T1,8); | ||
920 | &por ($Hkey,$T1); # H<<=1 | ||
921 | |||
922 | # magic reduction | ||
923 | &pand ($T3,&QWP(16,$const)); # 0x1c2_polynomial | ||
924 | &pxor ($Hkey,$T3); # if(carry) H^=0x1c2_polynomial | ||
925 | |||
926 | # calculate H^2 | ||
927 | &movdqa ($Xi,$Hkey); | ||
928 | &clmul64x64_T2 ($Xhi,$Xi,$Hkey); | ||
929 | &reduction_alg9 ($Xhi,$Xi); | ||
930 | |||
931 | &movdqu (&QWP(0,$Htbl),$Hkey); # save H | ||
932 | &movdqu (&QWP(16,$Htbl),$Xi); # save H^2 | ||
933 | |||
934 | &ret (); | ||
935 | &function_end_B("gcm_init_clmul"); | ||
936 | |||
937 | &function_begin_B("gcm_gmult_clmul"); | ||
938 | &mov ($Xip,&wparam(0)); | ||
939 | &mov ($Htbl,&wparam(1)); | ||
940 | |||
941 | &picsetup($const); | ||
942 | &picsymbol($const, &label("bswap"), $const); | ||
943 | |||
944 | &movdqu ($Xi,&QWP(0,$Xip)); | ||
945 | &movdqa ($T3,&QWP(0,$const)); | ||
946 | &movups ($Hkey,&QWP(0,$Htbl)); | ||
947 | &pshufb ($Xi,$T3); | ||
948 | |||
949 | &clmul64x64_T2 ($Xhi,$Xi,$Hkey); | ||
950 | &reduction_alg9 ($Xhi,$Xi); | ||
951 | |||
952 | &pshufb ($Xi,$T3); | ||
953 | &movdqu (&QWP(0,$Xip),$Xi); | ||
954 | |||
955 | &ret (); | ||
956 | &function_end_B("gcm_gmult_clmul"); | ||
957 | |||
958 | &function_begin("gcm_ghash_clmul"); | ||
959 | &mov ($Xip,&wparam(0)); | ||
960 | &mov ($Htbl,&wparam(1)); | ||
961 | &mov ($inp,&wparam(2)); | ||
962 | &mov ($len,&wparam(3)); | ||
963 | |||
964 | &picsetup($const); | ||
965 | &picsymbol($const, &label("bswap"), $const); | ||
966 | |||
967 | &movdqu ($Xi,&QWP(0,$Xip)); | ||
968 | &movdqa ($T3,&QWP(0,$const)); | ||
969 | &movdqu ($Hkey,&QWP(0,$Htbl)); | ||
970 | &pshufb ($Xi,$T3); | ||
971 | |||
972 | &sub ($len,0x10); | ||
973 | &jz (&label("odd_tail")); | ||
974 | |||
975 | ####### | ||
976 | # Xi+2 =[H*(Ii+1 + Xi+1)] mod P = | ||
977 | # [(H*Ii+1) + (H*Xi+1)] mod P = | ||
978 | # [(H*Ii+1) + H^2*(Ii+Xi)] mod P | ||
979 | # | ||
980 | &movdqu ($T1,&QWP(0,$inp)); # Ii | ||
981 | &movdqu ($Xn,&QWP(16,$inp)); # Ii+1 | ||
982 | &pshufb ($T1,$T3); | ||
983 | &pshufb ($Xn,$T3); | ||
984 | &pxor ($Xi,$T1); # Ii+Xi | ||
985 | |||
986 | &clmul64x64_T2 ($Xhn,$Xn,$Hkey); # H*Ii+1 | ||
987 | &movups ($Hkey,&QWP(16,$Htbl)); # load H^2 | ||
988 | |||
989 | &lea ($inp,&DWP(32,$inp)); # i+=2 | ||
990 | &sub ($len,0x20); | ||
991 | &jbe (&label("even_tail")); | ||
992 | |||
993 | &set_label("mod_loop"); | ||
994 | &clmul64x64_T2 ($Xhi,$Xi,$Hkey); # H^2*(Ii+Xi) | ||
995 | &movdqu ($T1,&QWP(0,$inp)); # Ii | ||
996 | &movups ($Hkey,&QWP(0,$Htbl)); # load H | ||
997 | |||
998 | &pxor ($Xi,$Xn); # (H*Ii+1) + H^2*(Ii+Xi) | ||
999 | &pxor ($Xhi,$Xhn); | ||
1000 | |||
1001 | &movdqu ($Xn,&QWP(16,$inp)); # Ii+1 | ||
1002 | &pshufb ($T1,$T3); | ||
1003 | &pshufb ($Xn,$T3); | ||
1004 | |||
1005 | &movdqa ($T3,$Xn); #&clmul64x64_TX ($Xhn,$Xn,$Hkey); H*Ii+1 | ||
1006 | &movdqa ($Xhn,$Xn); | ||
1007 | &pxor ($Xhi,$T1); # "Ii+Xi", consume early | ||
1008 | |||
1009 | &movdqa ($T1,$Xi); #&reduction_alg9($Xhi,$Xi); 1st phase | ||
1010 | &psllq ($Xi,1); | ||
1011 | &pxor ($Xi,$T1); # | ||
1012 | &psllq ($Xi,5); # | ||
1013 | &pxor ($Xi,$T1); # | ||
1014 | &pclmulqdq ($Xn,$Hkey,0x00); ####### | ||
1015 | &psllq ($Xi,57); # | ||
1016 | &movdqa ($T2,$Xi); # | ||
1017 | &pslldq ($Xi,8); | ||
1018 | &psrldq ($T2,8); # | ||
1019 | &pxor ($Xi,$T1); | ||
1020 | &pshufd ($T1,$T3,0b01001110); | ||
1021 | &pxor ($Xhi,$T2); # | ||
1022 | &pxor ($T1,$T3); | ||
1023 | &pshufd ($T3,$Hkey,0b01001110); | ||
1024 | &pxor ($T3,$Hkey); # | ||
1025 | |||
1026 | &pclmulqdq ($Xhn,$Hkey,0x11); ####### | ||
1027 | &movdqa ($T2,$Xi); # 2nd phase | ||
1028 | &psrlq ($Xi,5); | ||
1029 | &pxor ($Xi,$T2); # | ||
1030 | &psrlq ($Xi,1); # | ||
1031 | &pxor ($Xi,$T2); # | ||
1032 | &pxor ($T2,$Xhi); | ||
1033 | &psrlq ($Xi,1); # | ||
1034 | &pxor ($Xi,$T2); # | ||
1035 | |||
1036 | &pclmulqdq ($T1,$T3,0x00); ####### | ||
1037 | &movups ($Hkey,&QWP(16,$Htbl)); # load H^2 | ||
1038 | &xorps ($T1,$Xn); # | ||
1039 | &xorps ($T1,$Xhn); # | ||
1040 | |||
1041 | &movdqa ($T3,$T1); # | ||
1042 | &psrldq ($T1,8); | ||
1043 | &pslldq ($T3,8); # | ||
1044 | &pxor ($Xhn,$T1); | ||
1045 | &pxor ($Xn,$T3); # | ||
1046 | &movdqa ($T3,&QWP(0,$const)); | ||
1047 | |||
1048 | &lea ($inp,&DWP(32,$inp)); | ||
1049 | &sub ($len,0x20); | ||
1050 | &ja (&label("mod_loop")); | ||
1051 | |||
1052 | &set_label("even_tail"); | ||
1053 | &clmul64x64_T2 ($Xhi,$Xi,$Hkey); # H^2*(Ii+Xi) | ||
1054 | |||
1055 | &pxor ($Xi,$Xn); # (H*Ii+1) + H^2*(Ii+Xi) | ||
1056 | &pxor ($Xhi,$Xhn); | ||
1057 | |||
1058 | &reduction_alg9 ($Xhi,$Xi); | ||
1059 | |||
1060 | &test ($len,$len); | ||
1061 | &jnz (&label("done")); | ||
1062 | |||
1063 | &movups ($Hkey,&QWP(0,$Htbl)); # load H | ||
1064 | &set_label("odd_tail"); | ||
1065 | &movdqu ($T1,&QWP(0,$inp)); # Ii | ||
1066 | &pshufb ($T1,$T3); | ||
1067 | &pxor ($Xi,$T1); # Ii+Xi | ||
1068 | |||
1069 | &clmul64x64_T2 ($Xhi,$Xi,$Hkey); # H*(Ii+Xi) | ||
1070 | &reduction_alg9 ($Xhi,$Xi); | ||
1071 | |||
1072 | &set_label("done"); | ||
1073 | &pshufb ($Xi,$T3); | ||
1074 | &movdqu (&QWP(0,$Xip),$Xi); | ||
1075 | &function_end("gcm_ghash_clmul"); | ||
1076 | |||
1077 | } else { # Algorithm 5. Kept for reference purposes. | ||
1078 | |||
1079 | sub reduction_alg5 { # 19/16 times faster than Intel version | ||
1080 | my ($Xhi,$Xi)=@_; | ||
1081 | |||
1082 | # <<1 | ||
1083 | &movdqa ($T1,$Xi); # | ||
1084 | &movdqa ($T2,$Xhi); | ||
1085 | &pslld ($Xi,1); | ||
1086 | &pslld ($Xhi,1); # | ||
1087 | &psrld ($T1,31); | ||
1088 | &psrld ($T2,31); # | ||
1089 | &movdqa ($T3,$T1); | ||
1090 | &pslldq ($T1,4); | ||
1091 | &psrldq ($T3,12); # | ||
1092 | &pslldq ($T2,4); | ||
1093 | &por ($Xhi,$T3); # | ||
1094 | &por ($Xi,$T1); | ||
1095 | &por ($Xhi,$T2); # | ||
1096 | |||
1097 | # 1st phase | ||
1098 | &movdqa ($T1,$Xi); | ||
1099 | &movdqa ($T2,$Xi); | ||
1100 | &movdqa ($T3,$Xi); # | ||
1101 | &pslld ($T1,31); | ||
1102 | &pslld ($T2,30); | ||
1103 | &pslld ($Xi,25); # | ||
1104 | &pxor ($T1,$T2); | ||
1105 | &pxor ($T1,$Xi); # | ||
1106 | &movdqa ($T2,$T1); # | ||
1107 | &pslldq ($T1,12); | ||
1108 | &psrldq ($T2,4); # | ||
1109 | &pxor ($T3,$T1); | ||
1110 | |||
1111 | # 2nd phase | ||
1112 | &pxor ($Xhi,$T3); # | ||
1113 | &movdqa ($Xi,$T3); | ||
1114 | &movdqa ($T1,$T3); | ||
1115 | &psrld ($Xi,1); # | ||
1116 | &psrld ($T1,2); | ||
1117 | &psrld ($T3,7); # | ||
1118 | &pxor ($Xi,$T1); | ||
1119 | &pxor ($Xhi,$T2); | ||
1120 | &pxor ($Xi,$T3); # | ||
1121 | &pxor ($Xi,$Xhi); # | ||
1122 | } | ||
1123 | |||
1124 | &function_begin_B("gcm_init_clmul"); | ||
1125 | &mov ($Htbl,&wparam(0)); | ||
1126 | &mov ($Xip,&wparam(1)); | ||
1127 | |||
1128 | &picsetup($const); | ||
1129 | &picsymbol($const, &label("bswap"), $const); | ||
1130 | |||
1131 | &movdqu ($Hkey,&QWP(0,$Xip)); | ||
1132 | &pshufd ($Hkey,$Hkey,0b01001110);# dword swap | ||
1133 | |||
1134 | # calculate H^2 | ||
1135 | &movdqa ($Xi,$Hkey); | ||
1136 | &clmul64x64_T3 ($Xhi,$Xi,$Hkey); | ||
1137 | &reduction_alg5 ($Xhi,$Xi); | ||
1138 | |||
1139 | &movdqu (&QWP(0,$Htbl),$Hkey); # save H | ||
1140 | &movdqu (&QWP(16,$Htbl),$Xi); # save H^2 | ||
1141 | |||
1142 | &ret (); | ||
1143 | &function_end_B("gcm_init_clmul"); | ||
1144 | |||
1145 | &function_begin_B("gcm_gmult_clmul"); | ||
1146 | &mov ($Xip,&wparam(0)); | ||
1147 | &mov ($Htbl,&wparam(1)); | ||
1148 | |||
1149 | &picsetup($const); | ||
1150 | &picsymbol($const, &label("bswap"), $const); | ||
1151 | |||
1152 | &movdqu ($Xi,&QWP(0,$Xip)); | ||
1153 | &movdqa ($Xn,&QWP(0,$const)); | ||
1154 | &movdqu ($Hkey,&QWP(0,$Htbl)); | ||
1155 | &pshufb ($Xi,$Xn); | ||
1156 | |||
1157 | &clmul64x64_T3 ($Xhi,$Xi,$Hkey); | ||
1158 | &reduction_alg5 ($Xhi,$Xi); | ||
1159 | |||
1160 | &pshufb ($Xi,$Xn); | ||
1161 | &movdqu (&QWP(0,$Xip),$Xi); | ||
1162 | |||
1163 | &ret (); | ||
1164 | &function_end_B("gcm_gmult_clmul"); | ||
1165 | |||
1166 | &function_begin("gcm_ghash_clmul"); | ||
1167 | &mov ($Xip,&wparam(0)); | ||
1168 | &mov ($Htbl,&wparam(1)); | ||
1169 | &mov ($inp,&wparam(2)); | ||
1170 | &mov ($len,&wparam(3)); | ||
1171 | |||
1172 | &picsetup($const); | ||
1173 | &picsymbol($const, &label("bswap"), $const); | ||
1174 | |||
1175 | &movdqu ($Xi,&QWP(0,$Xip)); | ||
1176 | &movdqa ($T3,&QWP(0,$const)); | ||
1177 | &movdqu ($Hkey,&QWP(0,$Htbl)); | ||
1178 | &pshufb ($Xi,$T3); | ||
1179 | |||
1180 | &sub ($len,0x10); | ||
1181 | &jz (&label("odd_tail")); | ||
1182 | |||
1183 | ####### | ||
1184 | # Xi+2 =[H*(Ii+1 + Xi+1)] mod P = | ||
1185 | # [(H*Ii+1) + (H*Xi+1)] mod P = | ||
1186 | # [(H*Ii+1) + H^2*(Ii+Xi)] mod P | ||
1187 | # | ||
1188 | &movdqu ($T1,&QWP(0,$inp)); # Ii | ||
1189 | &movdqu ($Xn,&QWP(16,$inp)); # Ii+1 | ||
1190 | &pshufb ($T1,$T3); | ||
1191 | &pshufb ($Xn,$T3); | ||
1192 | &pxor ($Xi,$T1); # Ii+Xi | ||
1193 | |||
1194 | &clmul64x64_T3 ($Xhn,$Xn,$Hkey); # H*Ii+1 | ||
1195 | &movdqu ($Hkey,&QWP(16,$Htbl)); # load H^2 | ||
1196 | |||
1197 | &sub ($len,0x20); | ||
1198 | &lea ($inp,&DWP(32,$inp)); # i+=2 | ||
1199 | &jbe (&label("even_tail")); | ||
1200 | |||
1201 | &set_label("mod_loop"); | ||
1202 | &clmul64x64_T3 ($Xhi,$Xi,$Hkey); # H^2*(Ii+Xi) | ||
1203 | &movdqu ($Hkey,&QWP(0,$Htbl)); # load H | ||
1204 | |||
1205 | &pxor ($Xi,$Xn); # (H*Ii+1) + H^2*(Ii+Xi) | ||
1206 | &pxor ($Xhi,$Xhn); | ||
1207 | |||
1208 | &reduction_alg5 ($Xhi,$Xi); | ||
1209 | |||
1210 | ####### | ||
1211 | &movdqa ($T3,&QWP(0,$const)); | ||
1212 | &movdqu ($T1,&QWP(0,$inp)); # Ii | ||
1213 | &movdqu ($Xn,&QWP(16,$inp)); # Ii+1 | ||
1214 | &pshufb ($T1,$T3); | ||
1215 | &pshufb ($Xn,$T3); | ||
1216 | &pxor ($Xi,$T1); # Ii+Xi | ||
1217 | |||
1218 | &clmul64x64_T3 ($Xhn,$Xn,$Hkey); # H*Ii+1 | ||
1219 | &movdqu ($Hkey,&QWP(16,$Htbl)); # load H^2 | ||
1220 | |||
1221 | &sub ($len,0x20); | ||
1222 | &lea ($inp,&DWP(32,$inp)); | ||
1223 | &ja (&label("mod_loop")); | ||
1224 | |||
1225 | &set_label("even_tail"); | ||
1226 | &clmul64x64_T3 ($Xhi,$Xi,$Hkey); # H^2*(Ii+Xi) | ||
1227 | |||
1228 | &pxor ($Xi,$Xn); # (H*Ii+1) + H^2*(Ii+Xi) | ||
1229 | &pxor ($Xhi,$Xhn); | ||
1230 | |||
1231 | &reduction_alg5 ($Xhi,$Xi); | ||
1232 | |||
1233 | &movdqa ($T3,&QWP(0,$const)); | ||
1234 | &test ($len,$len); | ||
1235 | &jnz (&label("done")); | ||
1236 | |||
1237 | &movdqu ($Hkey,&QWP(0,$Htbl)); # load H | ||
1238 | &set_label("odd_tail"); | ||
1239 | &movdqu ($T1,&QWP(0,$inp)); # Ii | ||
1240 | &pshufb ($T1,$T3); | ||
1241 | &pxor ($Xi,$T1); # Ii+Xi | ||
1242 | |||
1243 | &clmul64x64_T3 ($Xhi,$Xi,$Hkey); # H*(Ii+Xi) | ||
1244 | &reduction_alg5 ($Xhi,$Xi); | ||
1245 | |||
1246 | &movdqa ($T3,&QWP(0,$const)); | ||
1247 | &set_label("done"); | ||
1248 | &pshufb ($Xi,$T3); | ||
1249 | &movdqu (&QWP(0,$Xip),$Xi); | ||
1250 | &function_end("gcm_ghash_clmul"); | ||
1251 | |||
1252 | } | ||
1253 | |||
1254 | &rodataseg(); | ||
1255 | &set_label("bswap",64); | ||
1256 | &data_byte(15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0); | ||
1257 | &data_byte(1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xc2); # 0x1c2_polynomial | ||
1258 | &previous(); | ||
1259 | }} # $sse2 | ||
1260 | |||
1261 | &rodataseg(); | ||
1262 | &set_label("rem_4bit",64); | ||
1263 | &data_word(0,0x0000<<$S,0,0x1C20<<$S,0,0x3840<<$S,0,0x2460<<$S); | ||
1264 | &data_word(0,0x7080<<$S,0,0x6CA0<<$S,0,0x48C0<<$S,0,0x54E0<<$S); | ||
1265 | &data_word(0,0xE100<<$S,0,0xFD20<<$S,0,0xD940<<$S,0,0xC560<<$S); | ||
1266 | &data_word(0,0x9180<<$S,0,0x8DA0<<$S,0,0xA9C0<<$S,0,0xB5E0<<$S); | ||
1267 | &set_label("rem_8bit",64); | ||
1268 | &data_short(0x0000,0x01C2,0x0384,0x0246,0x0708,0x06CA,0x048C,0x054E); | ||
1269 | &data_short(0x0E10,0x0FD2,0x0D94,0x0C56,0x0918,0x08DA,0x0A9C,0x0B5E); | ||
1270 | &data_short(0x1C20,0x1DE2,0x1FA4,0x1E66,0x1B28,0x1AEA,0x18AC,0x196E); | ||
1271 | &data_short(0x1230,0x13F2,0x11B4,0x1076,0x1538,0x14FA,0x16BC,0x177E); | ||
1272 | &data_short(0x3840,0x3982,0x3BC4,0x3A06,0x3F48,0x3E8A,0x3CCC,0x3D0E); | ||
1273 | &data_short(0x3650,0x3792,0x35D4,0x3416,0x3158,0x309A,0x32DC,0x331E); | ||
1274 | &data_short(0x2460,0x25A2,0x27E4,0x2626,0x2368,0x22AA,0x20EC,0x212E); | ||
1275 | &data_short(0x2A70,0x2BB2,0x29F4,0x2836,0x2D78,0x2CBA,0x2EFC,0x2F3E); | ||
1276 | &data_short(0x7080,0x7142,0x7304,0x72C6,0x7788,0x764A,0x740C,0x75CE); | ||
1277 | &data_short(0x7E90,0x7F52,0x7D14,0x7CD6,0x7998,0x785A,0x7A1C,0x7BDE); | ||
1278 | &data_short(0x6CA0,0x6D62,0x6F24,0x6EE6,0x6BA8,0x6A6A,0x682C,0x69EE); | ||
1279 | &data_short(0x62B0,0x6372,0x6134,0x60F6,0x65B8,0x647A,0x663C,0x67FE); | ||
1280 | &data_short(0x48C0,0x4902,0x4B44,0x4A86,0x4FC8,0x4E0A,0x4C4C,0x4D8E); | ||
1281 | &data_short(0x46D0,0x4712,0x4554,0x4496,0x41D8,0x401A,0x425C,0x439E); | ||
1282 | &data_short(0x54E0,0x5522,0x5764,0x56A6,0x53E8,0x522A,0x506C,0x51AE); | ||
1283 | &data_short(0x5AF0,0x5B32,0x5974,0x58B6,0x5DF8,0x5C3A,0x5E7C,0x5FBE); | ||
1284 | &data_short(0xE100,0xE0C2,0xE284,0xE346,0xE608,0xE7CA,0xE58C,0xE44E); | ||
1285 | &data_short(0xEF10,0xEED2,0xEC94,0xED56,0xE818,0xE9DA,0xEB9C,0xEA5E); | ||
1286 | &data_short(0xFD20,0xFCE2,0xFEA4,0xFF66,0xFA28,0xFBEA,0xF9AC,0xF86E); | ||
1287 | &data_short(0xF330,0xF2F2,0xF0B4,0xF176,0xF438,0xF5FA,0xF7BC,0xF67E); | ||
1288 | &data_short(0xD940,0xD882,0xDAC4,0xDB06,0xDE48,0xDF8A,0xDDCC,0xDC0E); | ||
1289 | &data_short(0xD750,0xD692,0xD4D4,0xD516,0xD058,0xD19A,0xD3DC,0xD21E); | ||
1290 | &data_short(0xC560,0xC4A2,0xC6E4,0xC726,0xC268,0xC3AA,0xC1EC,0xC02E); | ||
1291 | &data_short(0xCB70,0xCAB2,0xC8F4,0xC936,0xCC78,0xCDBA,0xCFFC,0xCE3E); | ||
1292 | &data_short(0x9180,0x9042,0x9204,0x93C6,0x9688,0x974A,0x950C,0x94CE); | ||
1293 | &data_short(0x9F90,0x9E52,0x9C14,0x9DD6,0x9898,0x995A,0x9B1C,0x9ADE); | ||
1294 | &data_short(0x8DA0,0x8C62,0x8E24,0x8FE6,0x8AA8,0x8B6A,0x892C,0x88EE); | ||
1295 | &data_short(0x83B0,0x8272,0x8034,0x81F6,0x84B8,0x857A,0x873C,0x86FE); | ||
1296 | &data_short(0xA9C0,0xA802,0xAA44,0xAB86,0xAEC8,0xAF0A,0xAD4C,0xAC8E); | ||
1297 | &data_short(0xA7D0,0xA612,0xA454,0xA596,0xA0D8,0xA11A,0xA35C,0xA29E); | ||
1298 | &data_short(0xB5E0,0xB422,0xB664,0xB7A6,0xB2E8,0xB32A,0xB16C,0xB0AE); | ||
1299 | &data_short(0xBBF0,0xBA32,0xB874,0xB9B6,0xBCF8,0xBD3A,0xBF7C,0xBEBE); | ||
1300 | &previous(); | ||
1301 | }}} # !$x86only | ||
1302 | |||
1303 | &asm_finish(); | ||
1304 | |||
1305 | # A question was risen about choice of vanilla MMX. Or rather why wasn't | ||
1306 | # SSE2 chosen instead? In addition to the fact that MMX runs on legacy | ||
1307 | # CPUs such as PIII, "4-bit" MMX version was observed to provide better | ||
1308 | # performance than *corresponding* SSE2 one even on contemporary CPUs. | ||
1309 | # SSE2 results were provided by Peter-Michael Hager. He maintains SSE2 | ||
1310 | # implementation featuring full range of lookup-table sizes, but with | ||
1311 | # per-invocation lookup table setup. Latter means that table size is | ||
1312 | # chosen depending on how much data is to be hashed in every given call, | ||
1313 | # more data - larger table. Best reported result for Core2 is ~4 cycles | ||
1314 | # per processed byte out of 64KB block. This number accounts even for | ||
1315 | # 64KB table setup overhead. As discussed in gcm128.c we choose to be | ||
1316 | # more conservative in respect to lookup table sizes, but how do the | ||
1317 | # results compare? Minimalistic "256B" MMX version delivers ~11 cycles | ||
1318 | # on same platform. As also discussed in gcm128.c, next in line "8-bit | ||
1319 | # Shoup's" or "4KB" method should deliver twice the performance of | ||
1320 | # "256B" one, in other words not worse than ~6 cycles per byte. It | ||
1321 | # should be also be noted that in SSE2 case improvement can be "super- | ||
1322 | # linear," i.e. more than twice, mostly because >>8 maps to single | ||
1323 | # instruction on SSE2 register. This is unlike "4-bit" case when >>4 | ||
1324 | # maps to same amount of instructions in both MMX and SSE2 cases. | ||
1325 | # Bottom line is that switch to SSE2 is considered to be justifiable | ||
1326 | # only in case we choose to implement "8-bit" method... | ||
diff --git a/src/lib/libcrypto/modes/asm/ghash-x86_64.pl b/src/lib/libcrypto/modes/asm/ghash-x86_64.pl deleted file mode 100644 index bf547a041b..0000000000 --- a/src/lib/libcrypto/modes/asm/ghash-x86_64.pl +++ /dev/null | |||
@@ -1,812 +0,0 @@ | |||
1 | #!/usr/bin/env perl | ||
2 | # | ||
3 | # ==================================================================== | ||
4 | # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL | ||
5 | # project. The module is, however, dual licensed under OpenSSL and | ||
6 | # CRYPTOGAMS licenses depending on where you obtain it. For further | ||
7 | # details see http://www.openssl.org/~appro/cryptogams/. | ||
8 | # ==================================================================== | ||
9 | # | ||
10 | # March, June 2010 | ||
11 | # | ||
12 | # The module implements "4-bit" GCM GHASH function and underlying | ||
13 | # single multiplication operation in GF(2^128). "4-bit" means that | ||
14 | # it uses 256 bytes per-key table [+128 bytes shared table]. GHASH | ||
15 | # function features so called "528B" variant utilizing additional | ||
16 | # 256+16 bytes of per-key storage [+512 bytes shared table]. | ||
17 | # Performance results are for this streamed GHASH subroutine and are | ||
18 | # expressed in cycles per processed byte, less is better: | ||
19 | # | ||
20 | # gcc 3.4.x(*) assembler | ||
21 | # | ||
22 | # P4 28.6 14.0 +100% | ||
23 | # Opteron 19.3 7.7 +150% | ||
24 | # Core2 17.8 8.1(**) +120% | ||
25 | # | ||
26 | # (*) comparison is not completely fair, because C results are | ||
27 | # for vanilla "256B" implementation, while assembler results | ||
28 | # are for "528B";-) | ||
29 | # (**) it's mystery [to me] why Core2 result is not same as for | ||
30 | # Opteron; | ||
31 | |||
32 | # May 2010 | ||
33 | # | ||
34 | # Add PCLMULQDQ version performing at 2.02 cycles per processed byte. | ||
35 | # See ghash-x86.pl for background information and details about coding | ||
36 | # techniques. | ||
37 | # | ||
38 | # Special thanks to David Woodhouse <dwmw2@infradead.org> for | ||
39 | # providing access to a Westmere-based system on behalf of Intel | ||
40 | # Open Source Technology Centre. | ||
41 | |||
42 | $flavour = shift; | ||
43 | $output = shift; | ||
44 | if ($flavour =~ /\./) { $output = $flavour; undef $flavour; } | ||
45 | |||
46 | $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/); | ||
47 | |||
48 | $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1; | ||
49 | ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or | ||
50 | ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or | ||
51 | die "can't locate x86_64-xlate.pl"; | ||
52 | |||
53 | open OUT,"| \"$^X\" $xlate $flavour $output"; | ||
54 | *STDOUT=*OUT; | ||
55 | |||
56 | # common register layout | ||
57 | $nlo="%rax"; | ||
58 | $nhi="%rbx"; | ||
59 | $Zlo="%r8"; | ||
60 | $Zhi="%r9"; | ||
61 | $tmp="%r10"; | ||
62 | $rem_4bit = "%r11"; | ||
63 | |||
64 | $Xi="%rdi"; | ||
65 | $Htbl="%rsi"; | ||
66 | |||
67 | # per-function register layout | ||
68 | $cnt="%rcx"; | ||
69 | $rem="%rdx"; | ||
70 | |||
71 | sub LB() { my $r=shift; $r =~ s/%[er]([a-d])x/%\1l/ or | ||
72 | $r =~ s/%[er]([sd]i)/%\1l/ or | ||
73 | $r =~ s/%[er](bp)/%\1l/ or | ||
74 | $r =~ s/%(r[0-9]+)[d]?/%\1b/; $r; } | ||
75 | |||
76 | sub AUTOLOAD() # thunk [simplified] 32-bit style perlasm | ||
77 | { my $opcode = $AUTOLOAD; $opcode =~ s/.*:://; | ||
78 | my $arg = pop; | ||
79 | $arg = "\$$arg" if ($arg*1 eq $arg); | ||
80 | $code .= "\t$opcode\t".join(',',$arg,reverse @_)."\n"; | ||
81 | } | ||
82 | |||
83 | { my $N; | ||
84 | sub loop() { | ||
85 | my $inp = shift; | ||
86 | |||
87 | $N++; | ||
88 | $code.=<<___; | ||
89 | xor $nlo,$nlo | ||
90 | xor $nhi,$nhi | ||
91 | mov `&LB("$Zlo")`,`&LB("$nlo")` | ||
92 | mov `&LB("$Zlo")`,`&LB("$nhi")` | ||
93 | shl \$4,`&LB("$nlo")` | ||
94 | mov \$14,$cnt | ||
95 | mov 8($Htbl,$nlo),$Zlo | ||
96 | mov ($Htbl,$nlo),$Zhi | ||
97 | and \$0xf0,`&LB("$nhi")` | ||
98 | mov $Zlo,$rem | ||
99 | jmp .Loop$N | ||
100 | |||
101 | .align 16 | ||
102 | .Loop$N: | ||
103 | shr \$4,$Zlo | ||
104 | and \$0xf,$rem | ||
105 | mov $Zhi,$tmp | ||
106 | mov ($inp,$cnt),`&LB("$nlo")` | ||
107 | shr \$4,$Zhi | ||
108 | xor 8($Htbl,$nhi),$Zlo | ||
109 | shl \$60,$tmp | ||
110 | xor ($Htbl,$nhi),$Zhi | ||
111 | mov `&LB("$nlo")`,`&LB("$nhi")` | ||
112 | xor ($rem_4bit,$rem,8),$Zhi | ||
113 | mov $Zlo,$rem | ||
114 | shl \$4,`&LB("$nlo")` | ||
115 | xor $tmp,$Zlo | ||
116 | dec $cnt | ||
117 | js .Lbreak$N | ||
118 | |||
119 | shr \$4,$Zlo | ||
120 | and \$0xf,$rem | ||
121 | mov $Zhi,$tmp | ||
122 | shr \$4,$Zhi | ||
123 | xor 8($Htbl,$nlo),$Zlo | ||
124 | shl \$60,$tmp | ||
125 | xor ($Htbl,$nlo),$Zhi | ||
126 | and \$0xf0,`&LB("$nhi")` | ||
127 | xor ($rem_4bit,$rem,8),$Zhi | ||
128 | mov $Zlo,$rem | ||
129 | xor $tmp,$Zlo | ||
130 | jmp .Loop$N | ||
131 | |||
132 | .align 16 | ||
133 | .Lbreak$N: | ||
134 | shr \$4,$Zlo | ||
135 | and \$0xf,$rem | ||
136 | mov $Zhi,$tmp | ||
137 | shr \$4,$Zhi | ||
138 | xor 8($Htbl,$nlo),$Zlo | ||
139 | shl \$60,$tmp | ||
140 | xor ($Htbl,$nlo),$Zhi | ||
141 | and \$0xf0,`&LB("$nhi")` | ||
142 | xor ($rem_4bit,$rem,8),$Zhi | ||
143 | mov $Zlo,$rem | ||
144 | xor $tmp,$Zlo | ||
145 | |||
146 | shr \$4,$Zlo | ||
147 | and \$0xf,$rem | ||
148 | mov $Zhi,$tmp | ||
149 | shr \$4,$Zhi | ||
150 | xor 8($Htbl,$nhi),$Zlo | ||
151 | shl \$60,$tmp | ||
152 | xor ($Htbl,$nhi),$Zhi | ||
153 | xor $tmp,$Zlo | ||
154 | xor ($rem_4bit,$rem,8),$Zhi | ||
155 | |||
156 | bswap $Zlo | ||
157 | bswap $Zhi | ||
158 | ___ | ||
159 | }} | ||
160 | |||
161 | $code=<<___; | ||
162 | .text | ||
163 | |||
164 | .globl gcm_gmult_4bit | ||
165 | .type gcm_gmult_4bit,\@function,2 | ||
166 | .align 16 | ||
167 | gcm_gmult_4bit: | ||
168 | _CET_ENDBR | ||
169 | push %rbx | ||
170 | push %rbp # %rbp and %r12 are pushed exclusively in | ||
171 | push %r12 # order to reuse Win64 exception handler... | ||
172 | .Lgmult_prologue: | ||
173 | |||
174 | movzb 15($Xi),$Zlo | ||
175 | lea .Lrem_4bit(%rip),$rem_4bit | ||
176 | ___ | ||
177 | &loop ($Xi); | ||
178 | $code.=<<___; | ||
179 | mov $Zlo,8($Xi) | ||
180 | mov $Zhi,($Xi) | ||
181 | |||
182 | mov 16(%rsp),%rbx | ||
183 | lea 24(%rsp),%rsp | ||
184 | .Lgmult_epilogue: | ||
185 | ret | ||
186 | .size gcm_gmult_4bit,.-gcm_gmult_4bit | ||
187 | ___ | ||
188 | |||
189 | # per-function register layout | ||
190 | $inp="%rdx"; | ||
191 | $len="%rcx"; | ||
192 | $rem_8bit=$rem_4bit; | ||
193 | |||
194 | $code.=<<___; | ||
195 | .globl gcm_ghash_4bit | ||
196 | .type gcm_ghash_4bit,\@function,4 | ||
197 | .align 16 | ||
198 | gcm_ghash_4bit: | ||
199 | _CET_ENDBR | ||
200 | push %rbx | ||
201 | push %rbp | ||
202 | push %r12 | ||
203 | push %r13 | ||
204 | push %r14 | ||
205 | push %r15 | ||
206 | sub \$280,%rsp | ||
207 | .Lghash_prologue: | ||
208 | mov $inp,%r14 # reassign couple of args | ||
209 | mov $len,%r15 | ||
210 | ___ | ||
211 | { my $inp="%r14"; | ||
212 | my $dat="%edx"; | ||
213 | my $len="%r15"; | ||
214 | my @nhi=("%ebx","%ecx"); | ||
215 | my @rem=("%r12","%r13"); | ||
216 | my $Hshr4="%rbp"; | ||
217 | |||
218 | &sub ($Htbl,-128); # size optimization | ||
219 | &lea ($Hshr4,"16+128(%rsp)"); | ||
220 | { my @lo =($nlo,$nhi); | ||
221 | my @hi =($Zlo,$Zhi); | ||
222 | |||
223 | &xor ($dat,$dat); | ||
224 | for ($i=0,$j=-2;$i<18;$i++,$j++) { | ||
225 | &mov ("$j(%rsp)",&LB($dat)) if ($i>1); | ||
226 | &or ($lo[0],$tmp) if ($i>1); | ||
227 | &mov (&LB($dat),&LB($lo[1])) if ($i>0 && $i<17); | ||
228 | &shr ($lo[1],4) if ($i>0 && $i<17); | ||
229 | &mov ($tmp,$hi[1]) if ($i>0 && $i<17); | ||
230 | &shr ($hi[1],4) if ($i>0 && $i<17); | ||
231 | &mov ("8*$j($Hshr4)",$hi[0]) if ($i>1); | ||
232 | &mov ($hi[0],"16*$i+0-128($Htbl)") if ($i<16); | ||
233 | &shl (&LB($dat),4) if ($i>0 && $i<17); | ||
234 | &mov ("8*$j-128($Hshr4)",$lo[0]) if ($i>1); | ||
235 | &mov ($lo[0],"16*$i+8-128($Htbl)") if ($i<16); | ||
236 | &shl ($tmp,60) if ($i>0 && $i<17); | ||
237 | |||
238 | push (@lo,shift(@lo)); | ||
239 | push (@hi,shift(@hi)); | ||
240 | } | ||
241 | } | ||
242 | &add ($Htbl,-128); | ||
243 | &mov ($Zlo,"8($Xi)"); | ||
244 | &mov ($Zhi,"0($Xi)"); | ||
245 | &add ($len,$inp); # pointer to the end of data | ||
246 | &lea ($rem_8bit,".Lrem_8bit(%rip)"); | ||
247 | &jmp (".Louter_loop"); | ||
248 | |||
249 | $code.=".align 16\n.Louter_loop:\n"; | ||
250 | &xor ($Zhi,"($inp)"); | ||
251 | &mov ("%rdx","8($inp)"); | ||
252 | &lea ($inp,"16($inp)"); | ||
253 | &xor ("%rdx",$Zlo); | ||
254 | &mov ("($Xi)",$Zhi); | ||
255 | &mov ("8($Xi)","%rdx"); | ||
256 | &shr ("%rdx",32); | ||
257 | |||
258 | &xor ($nlo,$nlo); | ||
259 | &rol ($dat,8); | ||
260 | &mov (&LB($nlo),&LB($dat)); | ||
261 | &movz ($nhi[0],&LB($dat)); | ||
262 | &shl (&LB($nlo),4); | ||
263 | &shr ($nhi[0],4); | ||
264 | |||
265 | for ($j=11,$i=0;$i<15;$i++) { | ||
266 | &rol ($dat,8); | ||
267 | &xor ($Zlo,"8($Htbl,$nlo)") if ($i>0); | ||
268 | &xor ($Zhi,"($Htbl,$nlo)") if ($i>0); | ||
269 | &mov ($Zlo,"8($Htbl,$nlo)") if ($i==0); | ||
270 | &mov ($Zhi,"($Htbl,$nlo)") if ($i==0); | ||
271 | |||
272 | &mov (&LB($nlo),&LB($dat)); | ||
273 | &xor ($Zlo,$tmp) if ($i>0); | ||
274 | &movzw ($rem[1],"($rem_8bit,$rem[1],2)") if ($i>0); | ||
275 | |||
276 | &movz ($nhi[1],&LB($dat)); | ||
277 | &shl (&LB($nlo),4); | ||
278 | &movzb ($rem[0],"(%rsp,$nhi[0])"); | ||
279 | |||
280 | &shr ($nhi[1],4) if ($i<14); | ||
281 | &and ($nhi[1],0xf0) if ($i==14); | ||
282 | &shl ($rem[1],48) if ($i>0); | ||
283 | &xor ($rem[0],$Zlo); | ||
284 | |||
285 | &mov ($tmp,$Zhi); | ||
286 | &xor ($Zhi,$rem[1]) if ($i>0); | ||
287 | &shr ($Zlo,8); | ||
288 | |||
289 | &movz ($rem[0],&LB($rem[0])); | ||
290 | &mov ($dat,"$j($Xi)") if (--$j%4==0 && $j>=0); | ||
291 | &shr ($Zhi,8); | ||
292 | |||
293 | &xor ($Zlo,"-128($Hshr4,$nhi[0],8)"); | ||
294 | &shl ($tmp,56); | ||
295 | &xor ($Zhi,"($Hshr4,$nhi[0],8)"); | ||
296 | |||
297 | unshift (@nhi,pop(@nhi)); # "rotate" registers | ||
298 | unshift (@rem,pop(@rem)); | ||
299 | } | ||
300 | &movzw ($rem[1],"($rem_8bit,$rem[1],2)"); | ||
301 | &xor ($Zlo,"8($Htbl,$nlo)"); | ||
302 | &xor ($Zhi,"($Htbl,$nlo)"); | ||
303 | |||
304 | &shl ($rem[1],48); | ||
305 | &xor ($Zlo,$tmp); | ||
306 | |||
307 | &xor ($Zhi,$rem[1]); | ||
308 | &movz ($rem[0],&LB($Zlo)); | ||
309 | &shr ($Zlo,4); | ||
310 | |||
311 | &mov ($tmp,$Zhi); | ||
312 | &shl (&LB($rem[0]),4); | ||
313 | &shr ($Zhi,4); | ||
314 | |||
315 | &xor ($Zlo,"8($Htbl,$nhi[0])"); | ||
316 | &movzw ($rem[0],"($rem_8bit,$rem[0],2)"); | ||
317 | &shl ($tmp,60); | ||
318 | |||
319 | &xor ($Zhi,"($Htbl,$nhi[0])"); | ||
320 | &xor ($Zlo,$tmp); | ||
321 | &shl ($rem[0],48); | ||
322 | |||
323 | &bswap ($Zlo); | ||
324 | &xor ($Zhi,$rem[0]); | ||
325 | |||
326 | &bswap ($Zhi); | ||
327 | &cmp ($inp,$len); | ||
328 | &jb (".Louter_loop"); | ||
329 | } | ||
330 | $code.=<<___; | ||
331 | mov $Zlo,8($Xi) | ||
332 | mov $Zhi,($Xi) | ||
333 | |||
334 | lea 280(%rsp),%rsi | ||
335 | mov 0(%rsi),%r15 | ||
336 | mov 8(%rsi),%r14 | ||
337 | mov 16(%rsi),%r13 | ||
338 | mov 24(%rsi),%r12 | ||
339 | mov 32(%rsi),%rbp | ||
340 | mov 40(%rsi),%rbx | ||
341 | lea 48(%rsi),%rsp | ||
342 | .Lghash_epilogue: | ||
343 | ret | ||
344 | .size gcm_ghash_4bit,.-gcm_ghash_4bit | ||
345 | ___ | ||
346 | |||
347 | ###################################################################### | ||
348 | # PCLMULQDQ version. | ||
349 | |||
350 | @_4args=$win64? ("%rcx","%rdx","%r8", "%r9") : # Win64 order | ||
351 | ("%rdi","%rsi","%rdx","%rcx"); # Unix order | ||
352 | |||
353 | ($Xi,$Xhi)=("%xmm0","%xmm1"); $Hkey="%xmm2"; | ||
354 | ($T1,$T2,$T3)=("%xmm3","%xmm4","%xmm5"); | ||
355 | |||
356 | sub clmul64x64_T2 { # minimal register pressure | ||
357 | my ($Xhi,$Xi,$Hkey,$modulo)=@_; | ||
358 | |||
359 | $code.=<<___ if (!defined($modulo)); | ||
360 | movdqa $Xi,$Xhi # | ||
361 | pshufd \$0b01001110,$Xi,$T1 | ||
362 | pshufd \$0b01001110,$Hkey,$T2 | ||
363 | pxor $Xi,$T1 # | ||
364 | pxor $Hkey,$T2 | ||
365 | ___ | ||
366 | $code.=<<___; | ||
367 | pclmulqdq \$0x00,$Hkey,$Xi ####### | ||
368 | pclmulqdq \$0x11,$Hkey,$Xhi ####### | ||
369 | pclmulqdq \$0x00,$T2,$T1 ####### | ||
370 | pxor $Xi,$T1 # | ||
371 | pxor $Xhi,$T1 # | ||
372 | |||
373 | movdqa $T1,$T2 # | ||
374 | psrldq \$8,$T1 | ||
375 | pslldq \$8,$T2 # | ||
376 | pxor $T1,$Xhi | ||
377 | pxor $T2,$Xi # | ||
378 | ___ | ||
379 | } | ||
380 | |||
381 | sub reduction_alg9 { # 17/13 times faster than Intel version | ||
382 | my ($Xhi,$Xi) = @_; | ||
383 | |||
384 | $code.=<<___; | ||
385 | # 1st phase | ||
386 | movdqa $Xi,$T1 # | ||
387 | psllq \$1,$Xi | ||
388 | pxor $T1,$Xi # | ||
389 | psllq \$5,$Xi # | ||
390 | pxor $T1,$Xi # | ||
391 | psllq \$57,$Xi # | ||
392 | movdqa $Xi,$T2 # | ||
393 | pslldq \$8,$Xi | ||
394 | psrldq \$8,$T2 # | ||
395 | pxor $T1,$Xi | ||
396 | pxor $T2,$Xhi # | ||
397 | |||
398 | # 2nd phase | ||
399 | movdqa $Xi,$T2 | ||
400 | psrlq \$5,$Xi | ||
401 | pxor $T2,$Xi # | ||
402 | psrlq \$1,$Xi # | ||
403 | pxor $T2,$Xi # | ||
404 | pxor $Xhi,$T2 | ||
405 | psrlq \$1,$Xi # | ||
406 | pxor $T2,$Xi # | ||
407 | ___ | ||
408 | } | ||
409 | |||
410 | { my ($Htbl,$Xip)=@_4args; | ||
411 | |||
412 | $code.=<<___; | ||
413 | .globl gcm_init_clmul | ||
414 | .type gcm_init_clmul,\@abi-omnipotent | ||
415 | .align 16 | ||
416 | gcm_init_clmul: | ||
417 | _CET_ENDBR | ||
418 | movdqu ($Xip),$Hkey | ||
419 | pshufd \$0b01001110,$Hkey,$Hkey # dword swap | ||
420 | |||
421 | # <<1 twist | ||
422 | pshufd \$0b11111111,$Hkey,$T2 # broadcast uppermost dword | ||
423 | movdqa $Hkey,$T1 | ||
424 | psllq \$1,$Hkey | ||
425 | pxor $T3,$T3 # | ||
426 | psrlq \$63,$T1 | ||
427 | pcmpgtd $T2,$T3 # broadcast carry bit | ||
428 | pslldq \$8,$T1 | ||
429 | por $T1,$Hkey # H<<=1 | ||
430 | |||
431 | # magic reduction | ||
432 | pand .L0x1c2_polynomial(%rip),$T3 | ||
433 | pxor $T3,$Hkey # if(carry) H^=0x1c2_polynomial | ||
434 | |||
435 | # calculate H^2 | ||
436 | movdqa $Hkey,$Xi | ||
437 | ___ | ||
438 | &clmul64x64_T2 ($Xhi,$Xi,$Hkey); | ||
439 | &reduction_alg9 ($Xhi,$Xi); | ||
440 | $code.=<<___; | ||
441 | movdqu $Hkey,($Htbl) # save H | ||
442 | movdqu $Xi,16($Htbl) # save H^2 | ||
443 | ret | ||
444 | .size gcm_init_clmul,.-gcm_init_clmul | ||
445 | ___ | ||
446 | } | ||
447 | |||
448 | { my ($Xip,$Htbl)=@_4args; | ||
449 | |||
450 | $code.=<<___; | ||
451 | .globl gcm_gmult_clmul | ||
452 | .type gcm_gmult_clmul,\@abi-omnipotent | ||
453 | .align 16 | ||
454 | gcm_gmult_clmul: | ||
455 | _CET_ENDBR | ||
456 | movdqu ($Xip),$Xi | ||
457 | movdqa .Lbswap_mask(%rip),$T3 | ||
458 | movdqu ($Htbl),$Hkey | ||
459 | pshufb $T3,$Xi | ||
460 | ___ | ||
461 | &clmul64x64_T2 ($Xhi,$Xi,$Hkey); | ||
462 | &reduction_alg9 ($Xhi,$Xi); | ||
463 | $code.=<<___; | ||
464 | pshufb $T3,$Xi | ||
465 | movdqu $Xi,($Xip) | ||
466 | ret | ||
467 | .size gcm_gmult_clmul,.-gcm_gmult_clmul | ||
468 | ___ | ||
469 | } | ||
470 | |||
471 | { my ($Xip,$Htbl,$inp,$len)=@_4args; | ||
472 | my $Xn="%xmm6"; | ||
473 | my $Xhn="%xmm7"; | ||
474 | my $Hkey2="%xmm8"; | ||
475 | my $T1n="%xmm9"; | ||
476 | my $T2n="%xmm10"; | ||
477 | |||
478 | $code.=<<___; | ||
479 | .globl gcm_ghash_clmul | ||
480 | .type gcm_ghash_clmul,\@abi-omnipotent | ||
481 | .align 16 | ||
482 | gcm_ghash_clmul: | ||
483 | _CET_ENDBR | ||
484 | ___ | ||
485 | $code.=<<___ if ($win64); | ||
486 | .LSEH_begin_gcm_ghash_clmul: | ||
487 | # I can't trust assembler to use specific encoding:-( | ||
488 | .byte 0x48,0x83,0xec,0x58 #sub \$0x58,%rsp | ||
489 | .byte 0x0f,0x29,0x34,0x24 #movaps %xmm6,(%rsp) | ||
490 | .byte 0x0f,0x29,0x7c,0x24,0x10 #movdqa %xmm7,0x10(%rsp) | ||
491 | .byte 0x44,0x0f,0x29,0x44,0x24,0x20 #movaps %xmm8,0x20(%rsp) | ||
492 | .byte 0x44,0x0f,0x29,0x4c,0x24,0x30 #movaps %xmm9,0x30(%rsp) | ||
493 | .byte 0x44,0x0f,0x29,0x54,0x24,0x40 #movaps %xmm10,0x40(%rsp) | ||
494 | ___ | ||
495 | $code.=<<___; | ||
496 | movdqa .Lbswap_mask(%rip),$T3 | ||
497 | |||
498 | movdqu ($Xip),$Xi | ||
499 | movdqu ($Htbl),$Hkey | ||
500 | pshufb $T3,$Xi | ||
501 | |||
502 | sub \$0x10,$len | ||
503 | jz .Lodd_tail | ||
504 | |||
505 | movdqu 16($Htbl),$Hkey2 | ||
506 | ####### | ||
507 | # Xi+2 =[H*(Ii+1 + Xi+1)] mod P = | ||
508 | # [(H*Ii+1) + (H*Xi+1)] mod P = | ||
509 | # [(H*Ii+1) + H^2*(Ii+Xi)] mod P | ||
510 | # | ||
511 | movdqu ($inp),$T1 # Ii | ||
512 | movdqu 16($inp),$Xn # Ii+1 | ||
513 | pshufb $T3,$T1 | ||
514 | pshufb $T3,$Xn | ||
515 | pxor $T1,$Xi # Ii+Xi | ||
516 | ___ | ||
517 | &clmul64x64_T2 ($Xhn,$Xn,$Hkey); # H*Ii+1 | ||
518 | $code.=<<___; | ||
519 | movdqa $Xi,$Xhi # | ||
520 | pshufd \$0b01001110,$Xi,$T1 | ||
521 | pshufd \$0b01001110,$Hkey2,$T2 | ||
522 | pxor $Xi,$T1 # | ||
523 | pxor $Hkey2,$T2 | ||
524 | |||
525 | lea 32($inp),$inp # i+=2 | ||
526 | sub \$0x20,$len | ||
527 | jbe .Leven_tail | ||
528 | |||
529 | .Lmod_loop: | ||
530 | ___ | ||
531 | &clmul64x64_T2 ($Xhi,$Xi,$Hkey2,1); # H^2*(Ii+Xi) | ||
532 | $code.=<<___; | ||
533 | movdqu ($inp),$T1 # Ii | ||
534 | pxor $Xn,$Xi # (H*Ii+1) + H^2*(Ii+Xi) | ||
535 | pxor $Xhn,$Xhi | ||
536 | |||
537 | movdqu 16($inp),$Xn # Ii+1 | ||
538 | pshufb $T3,$T1 | ||
539 | pshufb $T3,$Xn | ||
540 | |||
541 | movdqa $Xn,$Xhn # | ||
542 | pshufd \$0b01001110,$Xn,$T1n | ||
543 | pshufd \$0b01001110,$Hkey,$T2n | ||
544 | pxor $Xn,$T1n # | ||
545 | pxor $Hkey,$T2n | ||
546 | pxor $T1,$Xhi # "Ii+Xi", consume early | ||
547 | |||
548 | movdqa $Xi,$T1 # 1st phase | ||
549 | psllq \$1,$Xi | ||
550 | pxor $T1,$Xi # | ||
551 | psllq \$5,$Xi # | ||
552 | pxor $T1,$Xi # | ||
553 | pclmulqdq \$0x00,$Hkey,$Xn ####### | ||
554 | psllq \$57,$Xi # | ||
555 | movdqa $Xi,$T2 # | ||
556 | pslldq \$8,$Xi | ||
557 | psrldq \$8,$T2 # | ||
558 | pxor $T1,$Xi | ||
559 | pxor $T2,$Xhi # | ||
560 | |||
561 | pclmulqdq \$0x11,$Hkey,$Xhn ####### | ||
562 | movdqa $Xi,$T2 # 2nd phase | ||
563 | psrlq \$5,$Xi | ||
564 | pxor $T2,$Xi # | ||
565 | psrlq \$1,$Xi # | ||
566 | pxor $T2,$Xi # | ||
567 | pxor $Xhi,$T2 | ||
568 | psrlq \$1,$Xi # | ||
569 | pxor $T2,$Xi # | ||
570 | |||
571 | pclmulqdq \$0x00,$T2n,$T1n ####### | ||
572 | movdqa $Xi,$Xhi # | ||
573 | pshufd \$0b01001110,$Xi,$T1 | ||
574 | pshufd \$0b01001110,$Hkey2,$T2 | ||
575 | pxor $Xi,$T1 # | ||
576 | pxor $Hkey2,$T2 | ||
577 | |||
578 | pxor $Xn,$T1n # | ||
579 | pxor $Xhn,$T1n # | ||
580 | movdqa $T1n,$T2n # | ||
581 | psrldq \$8,$T1n | ||
582 | pslldq \$8,$T2n # | ||
583 | pxor $T1n,$Xhn | ||
584 | pxor $T2n,$Xn # | ||
585 | |||
586 | lea 32($inp),$inp | ||
587 | sub \$0x20,$len | ||
588 | ja .Lmod_loop | ||
589 | |||
590 | .Leven_tail: | ||
591 | ___ | ||
592 | &clmul64x64_T2 ($Xhi,$Xi,$Hkey2,1); # H^2*(Ii+Xi) | ||
593 | $code.=<<___; | ||
594 | pxor $Xn,$Xi # (H*Ii+1) + H^2*(Ii+Xi) | ||
595 | pxor $Xhn,$Xhi | ||
596 | ___ | ||
597 | &reduction_alg9 ($Xhi,$Xi); | ||
598 | $code.=<<___; | ||
599 | test $len,$len | ||
600 | jnz .Ldone | ||
601 | |||
602 | .Lodd_tail: | ||
603 | movdqu ($inp),$T1 # Ii | ||
604 | pshufb $T3,$T1 | ||
605 | pxor $T1,$Xi # Ii+Xi | ||
606 | ___ | ||
607 | &clmul64x64_T2 ($Xhi,$Xi,$Hkey); # H*(Ii+Xi) | ||
608 | &reduction_alg9 ($Xhi,$Xi); | ||
609 | $code.=<<___; | ||
610 | .Ldone: | ||
611 | pshufb $T3,$Xi | ||
612 | movdqu $Xi,($Xip) | ||
613 | ___ | ||
614 | $code.=<<___ if ($win64); | ||
615 | movaps (%rsp),%xmm6 | ||
616 | movaps 0x10(%rsp),%xmm7 | ||
617 | movaps 0x20(%rsp),%xmm8 | ||
618 | movaps 0x30(%rsp),%xmm9 | ||
619 | movaps 0x40(%rsp),%xmm10 | ||
620 | add \$0x58,%rsp | ||
621 | ___ | ||
622 | $code.=<<___; | ||
623 | ret | ||
624 | .LSEH_end_gcm_ghash_clmul: | ||
625 | .size gcm_ghash_clmul,.-gcm_ghash_clmul | ||
626 | ___ | ||
627 | } | ||
628 | |||
629 | $code.=<<___; | ||
630 | .section .rodata | ||
631 | .align 64 | ||
632 | .Lbswap_mask: | ||
633 | .byte 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0 | ||
634 | .L0x1c2_polynomial: | ||
635 | .byte 1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xc2 | ||
636 | .align 64 | ||
637 | .type .Lrem_4bit,\@object | ||
638 | .Lrem_4bit: | ||
639 | .long 0,`0x0000<<16`,0,`0x1C20<<16`,0,`0x3840<<16`,0,`0x2460<<16` | ||
640 | .long 0,`0x7080<<16`,0,`0x6CA0<<16`,0,`0x48C0<<16`,0,`0x54E0<<16` | ||
641 | .long 0,`0xE100<<16`,0,`0xFD20<<16`,0,`0xD940<<16`,0,`0xC560<<16` | ||
642 | .long 0,`0x9180<<16`,0,`0x8DA0<<16`,0,`0xA9C0<<16`,0,`0xB5E0<<16` | ||
643 | .type .Lrem_8bit,\@object | ||
644 | .Lrem_8bit: | ||
645 | .value 0x0000,0x01C2,0x0384,0x0246,0x0708,0x06CA,0x048C,0x054E | ||
646 | .value 0x0E10,0x0FD2,0x0D94,0x0C56,0x0918,0x08DA,0x0A9C,0x0B5E | ||
647 | .value 0x1C20,0x1DE2,0x1FA4,0x1E66,0x1B28,0x1AEA,0x18AC,0x196E | ||
648 | .value 0x1230,0x13F2,0x11B4,0x1076,0x1538,0x14FA,0x16BC,0x177E | ||
649 | .value 0x3840,0x3982,0x3BC4,0x3A06,0x3F48,0x3E8A,0x3CCC,0x3D0E | ||
650 | .value 0x3650,0x3792,0x35D4,0x3416,0x3158,0x309A,0x32DC,0x331E | ||
651 | .value 0x2460,0x25A2,0x27E4,0x2626,0x2368,0x22AA,0x20EC,0x212E | ||
652 | .value 0x2A70,0x2BB2,0x29F4,0x2836,0x2D78,0x2CBA,0x2EFC,0x2F3E | ||
653 | .value 0x7080,0x7142,0x7304,0x72C6,0x7788,0x764A,0x740C,0x75CE | ||
654 | .value 0x7E90,0x7F52,0x7D14,0x7CD6,0x7998,0x785A,0x7A1C,0x7BDE | ||
655 | .value 0x6CA0,0x6D62,0x6F24,0x6EE6,0x6BA8,0x6A6A,0x682C,0x69EE | ||
656 | .value 0x62B0,0x6372,0x6134,0x60F6,0x65B8,0x647A,0x663C,0x67FE | ||
657 | .value 0x48C0,0x4902,0x4B44,0x4A86,0x4FC8,0x4E0A,0x4C4C,0x4D8E | ||
658 | .value 0x46D0,0x4712,0x4554,0x4496,0x41D8,0x401A,0x425C,0x439E | ||
659 | .value 0x54E0,0x5522,0x5764,0x56A6,0x53E8,0x522A,0x506C,0x51AE | ||
660 | .value 0x5AF0,0x5B32,0x5974,0x58B6,0x5DF8,0x5C3A,0x5E7C,0x5FBE | ||
661 | .value 0xE100,0xE0C2,0xE284,0xE346,0xE608,0xE7CA,0xE58C,0xE44E | ||
662 | .value 0xEF10,0xEED2,0xEC94,0xED56,0xE818,0xE9DA,0xEB9C,0xEA5E | ||
663 | .value 0xFD20,0xFCE2,0xFEA4,0xFF66,0xFA28,0xFBEA,0xF9AC,0xF86E | ||
664 | .value 0xF330,0xF2F2,0xF0B4,0xF176,0xF438,0xF5FA,0xF7BC,0xF67E | ||
665 | .value 0xD940,0xD882,0xDAC4,0xDB06,0xDE48,0xDF8A,0xDDCC,0xDC0E | ||
666 | .value 0xD750,0xD692,0xD4D4,0xD516,0xD058,0xD19A,0xD3DC,0xD21E | ||
667 | .value 0xC560,0xC4A2,0xC6E4,0xC726,0xC268,0xC3AA,0xC1EC,0xC02E | ||
668 | .value 0xCB70,0xCAB2,0xC8F4,0xC936,0xCC78,0xCDBA,0xCFFC,0xCE3E | ||
669 | .value 0x9180,0x9042,0x9204,0x93C6,0x9688,0x974A,0x950C,0x94CE | ||
670 | .value 0x9F90,0x9E52,0x9C14,0x9DD6,0x9898,0x995A,0x9B1C,0x9ADE | ||
671 | .value 0x8DA0,0x8C62,0x8E24,0x8FE6,0x8AA8,0x8B6A,0x892C,0x88EE | ||
672 | .value 0x83B0,0x8272,0x8034,0x81F6,0x84B8,0x857A,0x873C,0x86FE | ||
673 | .value 0xA9C0,0xA802,0xAA44,0xAB86,0xAEC8,0xAF0A,0xAD4C,0xAC8E | ||
674 | .value 0xA7D0,0xA612,0xA454,0xA596,0xA0D8,0xA11A,0xA35C,0xA29E | ||
675 | .value 0xB5E0,0xB422,0xB664,0xB7A6,0xB2E8,0xB32A,0xB16C,0xB0AE | ||
676 | .value 0xBBF0,0xBA32,0xB874,0xB9B6,0xBCF8,0xBD3A,0xBF7C,0xBEBE | ||
677 | .align 64 | ||
678 | .text | ||
679 | ___ | ||
680 | |||
681 | # EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame, | ||
682 | # CONTEXT *context,DISPATCHER_CONTEXT *disp) | ||
683 | if ($win64) { | ||
684 | $rec="%rcx"; | ||
685 | $frame="%rdx"; | ||
686 | $context="%r8"; | ||
687 | $disp="%r9"; | ||
688 | |||
689 | $code.=<<___; | ||
690 | .extern __imp_RtlVirtualUnwind | ||
691 | .type se_handler,\@abi-omnipotent | ||
692 | .align 16 | ||
693 | se_handler: | ||
694 | _CET_ENDBR | ||
695 | push %rsi | ||
696 | push %rdi | ||
697 | push %rbx | ||
698 | push %rbp | ||
699 | push %r12 | ||
700 | push %r13 | ||
701 | push %r14 | ||
702 | push %r15 | ||
703 | pushfq | ||
704 | sub \$64,%rsp | ||
705 | |||
706 | mov 120($context),%rax # pull context->Rax | ||
707 | mov 248($context),%rbx # pull context->Rip | ||
708 | |||
709 | mov 8($disp),%rsi # disp->ImageBase | ||
710 | mov 56($disp),%r11 # disp->HandlerData | ||
711 | |||
712 | mov 0(%r11),%r10d # HandlerData[0] | ||
713 | lea (%rsi,%r10),%r10 # prologue label | ||
714 | cmp %r10,%rbx # context->Rip<prologue label | ||
715 | jb .Lin_prologue | ||
716 | |||
717 | mov 152($context),%rax # pull context->Rsp | ||
718 | |||
719 | mov 4(%r11),%r10d # HandlerData[1] | ||
720 | lea (%rsi,%r10),%r10 # epilogue label | ||
721 | cmp %r10,%rbx # context->Rip>=epilogue label | ||
722 | jae .Lin_prologue | ||
723 | |||
724 | lea 24(%rax),%rax # adjust "rsp" | ||
725 | |||
726 | mov -8(%rax),%rbx | ||
727 | mov -16(%rax),%rbp | ||
728 | mov -24(%rax),%r12 | ||
729 | mov %rbx,144($context) # restore context->Rbx | ||
730 | mov %rbp,160($context) # restore context->Rbp | ||
731 | mov %r12,216($context) # restore context->R12 | ||
732 | |||
733 | .Lin_prologue: | ||
734 | mov 8(%rax),%rdi | ||
735 | mov 16(%rax),%rsi | ||
736 | mov %rax,152($context) # restore context->Rsp | ||
737 | mov %rsi,168($context) # restore context->Rsi | ||
738 | mov %rdi,176($context) # restore context->Rdi | ||
739 | |||
740 | mov 40($disp),%rdi # disp->ContextRecord | ||
741 | mov $context,%rsi # context | ||
742 | mov \$`1232/8`,%ecx # sizeof(CONTEXT) | ||
743 | .long 0xa548f3fc # cld; rep movsq | ||
744 | |||
745 | mov $disp,%rsi | ||
746 | xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER | ||
747 | mov 8(%rsi),%rdx # arg2, disp->ImageBase | ||
748 | mov 0(%rsi),%r8 # arg3, disp->ControlPc | ||
749 | mov 16(%rsi),%r9 # arg4, disp->FunctionEntry | ||
750 | mov 40(%rsi),%r10 # disp->ContextRecord | ||
751 | lea 56(%rsi),%r11 # &disp->HandlerData | ||
752 | lea 24(%rsi),%r12 # &disp->EstablisherFrame | ||
753 | mov %r10,32(%rsp) # arg5 | ||
754 | mov %r11,40(%rsp) # arg6 | ||
755 | mov %r12,48(%rsp) # arg7 | ||
756 | mov %rcx,56(%rsp) # arg8, (NULL) | ||
757 | call *__imp_RtlVirtualUnwind(%rip) | ||
758 | |||
759 | mov \$1,%eax # ExceptionContinueSearch | ||
760 | add \$64,%rsp | ||
761 | popfq | ||
762 | pop %r15 | ||
763 | pop %r14 | ||
764 | pop %r13 | ||
765 | pop %r12 | ||
766 | pop %rbp | ||
767 | pop %rbx | ||
768 | pop %rdi | ||
769 | pop %rsi | ||
770 | ret | ||
771 | .size se_handler,.-se_handler | ||
772 | |||
773 | .section .pdata | ||
774 | .align 4 | ||
775 | .rva .LSEH_begin_gcm_gmult_4bit | ||
776 | .rva .LSEH_end_gcm_gmult_4bit | ||
777 | .rva .LSEH_info_gcm_gmult_4bit | ||
778 | |||
779 | .rva .LSEH_begin_gcm_ghash_4bit | ||
780 | .rva .LSEH_end_gcm_ghash_4bit | ||
781 | .rva .LSEH_info_gcm_ghash_4bit | ||
782 | |||
783 | .rva .LSEH_begin_gcm_ghash_clmul | ||
784 | .rva .LSEH_end_gcm_ghash_clmul | ||
785 | .rva .LSEH_info_gcm_ghash_clmul | ||
786 | |||
787 | .section .xdata | ||
788 | .align 8 | ||
789 | .LSEH_info_gcm_gmult_4bit: | ||
790 | .byte 9,0,0,0 | ||
791 | .rva se_handler | ||
792 | .rva .Lgmult_prologue,.Lgmult_epilogue # HandlerData | ||
793 | .LSEH_info_gcm_ghash_4bit: | ||
794 | .byte 9,0,0,0 | ||
795 | .rva se_handler | ||
796 | .rva .Lghash_prologue,.Lghash_epilogue # HandlerData | ||
797 | .LSEH_info_gcm_ghash_clmul: | ||
798 | .byte 0x01,0x1f,0x0b,0x00 | ||
799 | .byte 0x1f,0xa8,0x04,0x00 #movaps 0x40(rsp),xmm10 | ||
800 | .byte 0x19,0x98,0x03,0x00 #movaps 0x30(rsp),xmm9 | ||
801 | .byte 0x13,0x88,0x02,0x00 #movaps 0x20(rsp),xmm8 | ||
802 | .byte 0x0d,0x78,0x01,0x00 #movaps 0x10(rsp),xmm7 | ||
803 | .byte 0x08,0x68,0x00,0x00 #movaps (rsp),xmm6 | ||
804 | .byte 0x04,0xa2,0x00,0x00 #sub rsp,0x58 | ||
805 | ___ | ||
806 | } | ||
807 | |||
808 | $code =~ s/\`([^\`]*)\`/eval($1)/gem; | ||
809 | |||
810 | print $code; | ||
811 | |||
812 | close STDOUT; | ||