summaryrefslogtreecommitdiff
path: root/src/lib/libcrypto/bn/asm/x86_64-gcc.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/lib/libcrypto/bn/asm/x86_64-gcc.c')
-rw-r--r--src/lib/libcrypto/bn/asm/x86_64-gcc.c54
1 files changed, 36 insertions, 18 deletions
diff --git a/src/lib/libcrypto/bn/asm/x86_64-gcc.c b/src/lib/libcrypto/bn/asm/x86_64-gcc.c
index 450e8e4322..7378344251 100644
--- a/src/lib/libcrypto/bn/asm/x86_64-gcc.c
+++ b/src/lib/libcrypto/bn/asm/x86_64-gcc.c
@@ -13,20 +13,42 @@
13 * A. Well, that's because this code is basically a quick-n-dirty 13 * A. Well, that's because this code is basically a quick-n-dirty
14 * proof-of-concept hack. As you can see it's implemented with 14 * proof-of-concept hack. As you can see it's implemented with
15 * inline assembler, which means that you're bound to GCC and that 15 * inline assembler, which means that you're bound to GCC and that
16 * there must be a room for fine-tuning. 16 * there might be enough room for further improvement.
17 * 17 *
18 * Q. Why inline assembler? 18 * Q. Why inline assembler?
19 * A. x86_64 features own ABI I'm not familiar with. Which is why 19 * A. x86_64 features own ABI which I'm not familiar with. This is
20 * I decided to let the compiler take care of subroutine 20 * why I decided to let the compiler take care of subroutine
21 * prologue/epilogue as well as register allocation. 21 * prologue/epilogue as well as register allocation. For reference.
22 * Win64 implements different ABI for AMD64, different from Linux.
22 * 23 *
23 * Q. How much faster does it get? 24 * Q. How much faster does it get?
24 * A. Unfortunately people sitting on x86_64 hardware are prohibited 25 * A. 'apps/openssl speed rsa dsa' output with no-asm:
25 * to disclose the performance numbers, so they (SuSE labs to be 26 *
26 * specific) wouldn't tell me. However! Very similar coding technique 27 * sign verify sign/s verify/s
27 * (reaching out for 128-bit result from 64x64-bit multiplication) 28 * rsa 512 bits 0.0006s 0.0001s 1683.8 18456.2
28 * results in >3 times performance improvement on MIPS and I see no 29 * rsa 1024 bits 0.0028s 0.0002s 356.0 6407.0
29 * reason why gain on x86_64 would be so much different:-) 30 * rsa 2048 bits 0.0172s 0.0005s 58.0 1957.8
31 * rsa 4096 bits 0.1155s 0.0018s 8.7 555.6
32 * sign verify sign/s verify/s
33 * dsa 512 bits 0.0005s 0.0006s 2100.8 1768.3
34 * dsa 1024 bits 0.0014s 0.0018s 692.3 559.2
35 * dsa 2048 bits 0.0049s 0.0061s 204.7 165.0
36 *
37 * 'apps/openssl speed rsa dsa' output with this module:
38 *
39 * sign verify sign/s verify/s
40 * rsa 512 bits 0.0004s 0.0000s 2767.1 33297.9
41 * rsa 1024 bits 0.0012s 0.0001s 867.4 14674.7
42 * rsa 2048 bits 0.0061s 0.0002s 164.0 5270.0
43 * rsa 4096 bits 0.0384s 0.0006s 26.1 1650.8
44 * sign verify sign/s verify/s
45 * dsa 512 bits 0.0002s 0.0003s 4442.2 3786.3
46 * dsa 1024 bits 0.0005s 0.0007s 1835.1 1497.4
47 * dsa 2048 bits 0.0016s 0.0020s 620.4 504.6
48 *
49 * For the reference. IA-32 assembler implementation performs
50 * very much like 64-bit code compiled with no-asm on the same
51 * machine.
30 */ 52 */
31 53
32#define BN_ULONG unsigned long 54#define BN_ULONG unsigned long
@@ -151,7 +173,7 @@ BN_ULONG bn_div_words(BN_ULONG h, BN_ULONG l, BN_ULONG d)
151} 173}
152 174
153BN_ULONG bn_add_words (BN_ULONG *rp, BN_ULONG *ap, BN_ULONG *bp,int n) 175BN_ULONG bn_add_words (BN_ULONG *rp, BN_ULONG *ap, BN_ULONG *bp,int n)
154{ BN_ULONG ret,i; 176{ BN_ULONG ret=0,i=0;
155 177
156 if (n <= 0) return 0; 178 if (n <= 0) return 0;
157 179
@@ -164,7 +186,7 @@ BN_ULONG bn_add_words (BN_ULONG *rp, BN_ULONG *ap, BN_ULONG *bp,int n)
164 " leaq 1(%2),%2 \n" 186 " leaq 1(%2),%2 \n"
165 " loop 1b \n" 187 " loop 1b \n"
166 " sbbq %0,%0 \n" 188 " sbbq %0,%0 \n"
167 : "+a"(ret),"+c"(n),"+r"(i) 189 : "=&a"(ret),"+c"(n),"=&r"(i)
168 : "r"(rp),"r"(ap),"r"(bp) 190 : "r"(rp),"r"(ap),"r"(bp)
169 : "cc" 191 : "cc"
170 ); 192 );
@@ -174,7 +196,7 @@ BN_ULONG bn_add_words (BN_ULONG *rp, BN_ULONG *ap, BN_ULONG *bp,int n)
174 196
175#ifndef SIMICS 197#ifndef SIMICS
176BN_ULONG bn_sub_words (BN_ULONG *rp, BN_ULONG *ap, BN_ULONG *bp,int n) 198BN_ULONG bn_sub_words (BN_ULONG *rp, BN_ULONG *ap, BN_ULONG *bp,int n)
177{ BN_ULONG ret,i; 199{ BN_ULONG ret=0,i=0;
178 200
179 if (n <= 0) return 0; 201 if (n <= 0) return 0;
180 202
@@ -187,7 +209,7 @@ BN_ULONG bn_sub_words (BN_ULONG *rp, BN_ULONG *ap, BN_ULONG *bp,int n)
187 " leaq 1(%2),%2 \n" 209 " leaq 1(%2),%2 \n"
188 " loop 1b \n" 210 " loop 1b \n"
189 " sbbq %0,%0 \n" 211 " sbbq %0,%0 \n"
190 : "+a"(ret),"+c"(n),"+r"(i) 212 : "=&a"(ret),"+c"(n),"=&r"(i)
191 : "r"(rp),"r"(ap),"r"(bp) 213 : "r"(rp),"r"(ap),"r"(bp)
192 : "cc" 214 : "cc"
193 ); 215 );
@@ -318,7 +340,6 @@ BN_ULONG bn_sub_words(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n)
318 340
319void bn_mul_comba8(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b) 341void bn_mul_comba8(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
320 { 342 {
321 BN_ULONG bl,bh;
322 BN_ULONG t1,t2; 343 BN_ULONG t1,t2;
323 BN_ULONG c1,c2,c3; 344 BN_ULONG c1,c2,c3;
324 345
@@ -423,7 +444,6 @@ void bn_mul_comba8(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
423 444
424void bn_mul_comba4(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b) 445void bn_mul_comba4(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
425 { 446 {
426 BN_ULONG bl,bh;
427 BN_ULONG t1,t2; 447 BN_ULONG t1,t2;
428 BN_ULONG c1,c2,c3; 448 BN_ULONG c1,c2,c3;
429 449
@@ -464,7 +484,6 @@ void bn_mul_comba4(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
464 484
465void bn_sqr_comba8(BN_ULONG *r, BN_ULONG *a) 485void bn_sqr_comba8(BN_ULONG *r, BN_ULONG *a)
466 { 486 {
467 BN_ULONG bl,bh;
468 BN_ULONG t1,t2; 487 BN_ULONG t1,t2;
469 BN_ULONG c1,c2,c3; 488 BN_ULONG c1,c2,c3;
470 489
@@ -541,7 +560,6 @@ void bn_sqr_comba8(BN_ULONG *r, BN_ULONG *a)
541 560
542void bn_sqr_comba4(BN_ULONG *r, BN_ULONG *a) 561void bn_sqr_comba4(BN_ULONG *r, BN_ULONG *a)
543 { 562 {
544 BN_ULONG bl,bh;
545 BN_ULONG t1,t2; 563 BN_ULONG t1,t2;
546 BN_ULONG c1,c2,c3; 564 BN_ULONG c1,c2,c3;
547 565