diff options
author | miod <> | 2014-04-17 21:17:12 +0000 |
---|---|---|
committer | miod <> | 2014-04-17 21:17:12 +0000 |
commit | aa8f085033b21b0ec0b70c18909ba27ef8752e51 (patch) | |
tree | da9109a7546d5e98b8647a37cc675d8b776d00ad /src | |
parent | e3644ed49258ff4a399142bf4ed0a7ba5f9bf76a (diff) | |
download | openbsd-aa8f085033b21b0ec0b70c18909ba27ef8752e51.tar.gz openbsd-aa8f085033b21b0ec0b70c18909ba27ef8752e51.tar.bz2 openbsd-aa8f085033b21b0ec0b70c18909ba27ef8752e51.zip |
Get rid of MS Visual C compiler and Intel C compiler specific defines.
Diffstat (limited to 'src')
26 files changed, 16 insertions, 476 deletions
diff --git a/src/lib/libcrypto/aes/aes_locl.h b/src/lib/libcrypto/aes/aes_locl.h index 054b442d41..1d0e86331e 100644 --- a/src/lib/libcrypto/aes/aes_locl.h +++ b/src/lib/libcrypto/aes/aes_locl.h | |||
@@ -62,14 +62,8 @@ | |||
62 | #include <stdlib.h> | 62 | #include <stdlib.h> |
63 | #include <string.h> | 63 | #include <string.h> |
64 | 64 | ||
65 | #if defined(_MSC_VER) && (defined(_M_IX86) || defined(_M_AMD64) || defined(_M_X64)) | 65 | #define GETU32(pt) (((u32)(pt)[0] << 24) ^ ((u32)(pt)[1] << 16) ^ ((u32)(pt)[2] << 8) ^ ((u32)(pt)[3])) |
66 | # define SWAP(x) (_lrotl(x, 8) & 0x00ff00ff | _lrotr(x, 8) & 0xff00ff00) | 66 | #define PUTU32(ct, st) { (ct)[0] = (u8)((st) >> 24); (ct)[1] = (u8)((st) >> 16); (ct)[2] = (u8)((st) >> 8); (ct)[3] = (u8)(st); } |
67 | # define GETU32(p) SWAP(*((u32 *)(p))) | ||
68 | # define PUTU32(ct, st) { *((u32 *)(ct)) = SWAP((st)); } | ||
69 | #else | ||
70 | # define GETU32(pt) (((u32)(pt)[0] << 24) ^ ((u32)(pt)[1] << 16) ^ ((u32)(pt)[2] << 8) ^ ((u32)(pt)[3])) | ||
71 | # define PUTU32(ct, st) { (ct)[0] = (u8)((st) >> 24); (ct)[1] = (u8)((st) >> 16); (ct)[2] = (u8)((st) >> 8); (ct)[3] = (u8)(st); } | ||
72 | #endif | ||
73 | 67 | ||
74 | #ifdef AES_LONG | 68 | #ifdef AES_LONG |
75 | typedef unsigned long u32; | 69 | typedef unsigned long u32; |
diff --git a/src/lib/libcrypto/aes/aes_x86core.c b/src/lib/libcrypto/aes/aes_x86core.c index c5d17b3127..295ea22bb4 100644 --- a/src/lib/libcrypto/aes/aes_x86core.c +++ b/src/lib/libcrypto/aes/aes_x86core.c | |||
@@ -91,9 +91,7 @@ typedef unsigned long long u64; | |||
91 | #endif | 91 | #endif |
92 | 92 | ||
93 | #undef ROTATE | 93 | #undef ROTATE |
94 | #if defined(_MSC_VER) || defined(__ICC) | 94 | #if defined(__GNUC__) && __GNUC__>=2 |
95 | # define ROTATE(a,n) _lrotl(a,n) | ||
96 | #elif defined(__GNUC__) && __GNUC__>=2 | ||
97 | # if defined(__i386) || defined(__i386__) || defined(__x86_64) || defined(__x86_64__) | 95 | # if defined(__i386) || defined(__i386__) || defined(__x86_64) || defined(__x86_64__) |
98 | # define ROTATE(a,n) ({ register unsigned int ret; \ | 96 | # define ROTATE(a,n) ({ register unsigned int ret; \ |
99 | asm ( \ | 97 | asm ( \ |
diff --git a/src/lib/libcrypto/bn/bn_lcl.h b/src/lib/libcrypto/bn/bn_lcl.h index 9194e86b39..1208deb3cf 100644 --- a/src/lib/libcrypto/bn/bn_lcl.h +++ b/src/lib/libcrypto/bn/bn_lcl.h | |||
@@ -271,15 +271,6 @@ extern "C" { | |||
271 | : "a"(a),"g"(b) \ | 271 | : "a"(a),"g"(b) \ |
272 | : "cc"); | 272 | : "cc"); |
273 | # endif | 273 | # endif |
274 | # elif (defined(_M_AMD64) || defined(_M_X64)) && defined(SIXTY_FOUR_BIT) | ||
275 | # if defined(_MSC_VER) && _MSC_VER>=1400 | ||
276 | unsigned __int64 __umulh (unsigned __int64 a,unsigned __int64 b); | ||
277 | unsigned __int64 _umul128 (unsigned __int64 a,unsigned __int64 b, | ||
278 | unsigned __int64 *h); | ||
279 | # pragma intrinsic(__umulh,_umul128) | ||
280 | # define BN_UMULT_HIGH(a,b) __umulh((a),(b)) | ||
281 | # define BN_UMULT_LOHI(low,high,a,b) ((low)=_umul128((a),(b),&(high))) | ||
282 | # endif | ||
283 | # elif defined(__mips) && (defined(SIXTY_FOUR_BIT) || defined(SIXTY_FOUR_BIT_LONG)) | 274 | # elif defined(__mips) && (defined(SIXTY_FOUR_BIT) || defined(SIXTY_FOUR_BIT_LONG)) |
284 | # if defined(__GNUC__) && __GNUC__>=2 | 275 | # if defined(__GNUC__) && __GNUC__>=2 |
285 | # if __GNUC__>=4 && __GNUC_MINOR__>=4 /* "h" constraint is no more since 4.4 */ | 276 | # if __GNUC__>=4 && __GNUC_MINOR__>=4 /* "h" constraint is no more since 4.4 */ |
diff --git a/src/lib/libcrypto/camellia/camellia.c b/src/lib/libcrypto/camellia/camellia.c index 75fc8991c0..da708bdf61 100644 --- a/src/lib/libcrypto/camellia/camellia.c +++ b/src/lib/libcrypto/camellia/camellia.c | |||
@@ -88,17 +88,7 @@ | |||
88 | 88 | ||
89 | /* 32-bit rotations */ | 89 | /* 32-bit rotations */ |
90 | #if !defined(PEDANTIC) && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM) | 90 | #if !defined(PEDANTIC) && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM) |
91 | # if defined(_MSC_VER) && (defined(_M_IX86) || defined(_M_AMD64) || defined(_M_X64)) | 91 | # if defined(__GNUC__) && __GNUC__>=2 |
92 | # define RightRotate(x, s) _lrotr(x, s) | ||
93 | # define LeftRotate(x, s) _lrotl(x, s) | ||
94 | # if _MSC_VER >= 1400 | ||
95 | # define SWAP(x) _byteswap_ulong(x) | ||
96 | # else | ||
97 | # define SWAP(x) (_lrotl(x, 8) & 0x00ff00ff | _lrotr(x, 8) & 0xff00ff00) | ||
98 | # endif | ||
99 | # define GETU32(p) SWAP(*((u32 *)(p))) | ||
100 | # define PUTU32(p,v) (*((u32 *)(p)) = SWAP((v))) | ||
101 | # elif defined(__GNUC__) && __GNUC__>=2 | ||
102 | # if defined(__i386) || defined(__x86_64) | 92 | # if defined(__i386) || defined(__x86_64) |
103 | # define RightRotate(x,s) ({u32 ret; asm ("rorl %1,%0":"=r"(ret):"I"(s),"0"(x):"cc"); ret; }) | 93 | # define RightRotate(x,s) ({u32 ret; asm ("rorl %1,%0":"=r"(ret):"I"(s),"0"(x):"cc"); ret; }) |
104 | # define LeftRotate(x,s) ({u32 ret; asm ("roll %1,%0":"=r"(ret):"I"(s),"0"(x):"cc"); ret; }) | 94 | # define LeftRotate(x,s) ({u32 ret; asm ("roll %1,%0":"=r"(ret):"I"(s),"0"(x):"cc"); ret; }) |
diff --git a/src/lib/libcrypto/cast/cast_lcl.h b/src/lib/libcrypto/cast/cast_lcl.h index e756021a33..cf0ca9e607 100644 --- a/src/lib/libcrypto/cast/cast_lcl.h +++ b/src/lib/libcrypto/cast/cast_lcl.h | |||
@@ -152,11 +152,7 @@ | |||
152 | *((c)++)=(unsigned char)(((l)>> 8L)&0xff), \ | 152 | *((c)++)=(unsigned char)(((l)>> 8L)&0xff), \ |
153 | *((c)++)=(unsigned char)(((l) )&0xff)) | 153 | *((c)++)=(unsigned char)(((l) )&0xff)) |
154 | 154 | ||
155 | #if defined(OPENSSL_SYS_WIN32) && defined(_MSC_VER) | ||
156 | #define ROTL(a,n) (_lrotl(a,n)) | ||
157 | #else | ||
158 | #define ROTL(a,n) ((((a)<<(n))&0xffffffffL)|((a)>>(32-(n)))) | 155 | #define ROTL(a,n) ((((a)<<(n))&0xffffffffL)|((a)>>(32-(n)))) |
159 | #endif | ||
160 | 156 | ||
161 | #define C_M 0x3fc | 157 | #define C_M 0x3fc |
162 | #define C_0 22L | 158 | #define C_0 22L |
diff --git a/src/lib/libcrypto/des/des_locl.h b/src/lib/libcrypto/des/des_locl.h index cf7811041e..bbae457377 100644 --- a/src/lib/libcrypto/des/des_locl.h +++ b/src/lib/libcrypto/des/des_locl.h | |||
@@ -152,9 +152,7 @@ | |||
152 | } \ | 152 | } \ |
153 | } | 153 | } |
154 | 154 | ||
155 | #if (defined(OPENSSL_SYS_WIN32) && defined(_MSC_VER)) || defined(__ICC) | 155 | #if defined(__GNUC__) && __GNUC__>=2 && !defined(__STRICT_ANSI__) && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM) && !defined(PEDANTIC) |
156 | #define ROTATE(a,n) (_lrotr(a,n)) | ||
157 | #elif defined(__GNUC__) && __GNUC__>=2 && !defined(__STRICT_ANSI__) && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM) && !defined(PEDANTIC) | ||
158 | # if defined(__i386) || defined(__i386__) || defined(__x86_64) || defined(__x86_64__) | 156 | # if defined(__i386) || defined(__i386__) || defined(__x86_64) || defined(__x86_64__) |
159 | # define ROTATE(a,n) ({ register unsigned int ret; \ | 157 | # define ROTATE(a,n) ({ register unsigned int ret; \ |
160 | asm ("rorl %1,%0" \ | 158 | asm ("rorl %1,%0" \ |
diff --git a/src/lib/libcrypto/engine/eng_padlock.c b/src/lib/libcrypto/engine/eng_padlock.c index d1fc8d9315..d5d9a16bf2 100644 --- a/src/lib/libcrypto/engine/eng_padlock.c +++ b/src/lib/libcrypto/engine/eng_padlock.c | |||
@@ -101,8 +101,7 @@ | |||
101 | compiler choice is limited to GCC and Microsoft C. */ | 101 | compiler choice is limited to GCC and Microsoft C. */ |
102 | #undef COMPILE_HW_PADLOCK | 102 | #undef COMPILE_HW_PADLOCK |
103 | #if !defined(I386_ONLY) && !defined(OPENSSL_NO_INLINE_ASM) | 103 | #if !defined(I386_ONLY) && !defined(OPENSSL_NO_INLINE_ASM) |
104 | # if (defined(__GNUC__) && (defined(__i386__) || defined(__i386))) || \ | 104 | # if (defined(__GNUC__) && (defined(__i386__) || defined(__i386))) |
105 | (defined(_MSC_VER) && defined(_M_IX86)) | ||
106 | # define COMPILE_HW_PADLOCK | 105 | # define COMPILE_HW_PADLOCK |
107 | # endif | 106 | # endif |
108 | #endif | 107 | #endif |
@@ -499,136 +498,6 @@ padlock_memcpy(void *dst,const void *src,size_t n) | |||
499 | 498 | ||
500 | return dst; | 499 | return dst; |
501 | } | 500 | } |
502 | |||
503 | #elif defined(_MSC_VER) | ||
504 | /* | ||
505 | * Unlike GCC these are real functions. In order to minimize impact | ||
506 | * on performance we adhere to __fastcall calling convention in | ||
507 | * order to get two first arguments passed through %ecx and %edx. | ||
508 | * Which kind of suits very well, as instructions in question use | ||
509 | * both %ecx and %edx as input:-) | ||
510 | */ | ||
511 | #define REP_XCRYPT(code) \ | ||
512 | _asm _emit 0xf3 \ | ||
513 | _asm _emit 0x0f _asm _emit 0xa7 \ | ||
514 | _asm _emit code | ||
515 | |||
516 | /* BIG FAT WARNING: | ||
517 | * The offsets used with 'lea' instructions | ||
518 | * describe items of the 'padlock_cipher_data' | ||
519 | * structure. | ||
520 | */ | ||
521 | #define PADLOCK_XCRYPT_ASM(name,code) \ | ||
522 | static void * __fastcall \ | ||
523 | name (size_t cnt, void *cdata, \ | ||
524 | void *outp, const void *inp) \ | ||
525 | { _asm mov eax,edx \ | ||
526 | _asm lea edx,[eax+16] \ | ||
527 | _asm lea ebx,[eax+32] \ | ||
528 | _asm mov edi,outp \ | ||
529 | _asm mov esi,inp \ | ||
530 | REP_XCRYPT(code) \ | ||
531 | } | ||
532 | |||
533 | PADLOCK_XCRYPT_ASM(padlock_xcrypt_ecb,0xc8) | ||
534 | PADLOCK_XCRYPT_ASM(padlock_xcrypt_cbc,0xd0) | ||
535 | PADLOCK_XCRYPT_ASM(padlock_xcrypt_cfb,0xe0) | ||
536 | PADLOCK_XCRYPT_ASM(padlock_xcrypt_ofb,0xe8) | ||
537 | |||
538 | static int __fastcall | ||
539 | padlock_xstore(void *outp,unsigned int code) | ||
540 | { _asm mov edi,ecx | ||
541 | _asm _emit 0x0f _asm _emit 0xa7 _asm _emit 0xc0 | ||
542 | } | ||
543 | |||
544 | static void __fastcall | ||
545 | padlock_reload_key(void) | ||
546 | { _asm pushfd _asm popfd } | ||
547 | |||
548 | static void __fastcall | ||
549 | padlock_verify_context(void *cdata) | ||
550 | { _asm { | ||
551 | pushfd | ||
552 | bt DWORD PTR[esp],30 | ||
553 | jnc skip | ||
554 | cmp ecx,padlock_saved_context | ||
555 | je skip | ||
556 | popfd | ||
557 | sub esp,4 | ||
558 | skip: add esp,4 | ||
559 | mov padlock_saved_context,ecx | ||
560 | } | ||
561 | } | ||
562 | |||
563 | static int | ||
564 | padlock_available(void) | ||
565 | { _asm { | ||
566 | pushfd | ||
567 | pop eax | ||
568 | mov ecx,eax | ||
569 | xor eax,1<<21 | ||
570 | push eax | ||
571 | popfd | ||
572 | pushfd | ||
573 | pop eax | ||
574 | xor eax,ecx | ||
575 | bt eax,21 | ||
576 | jnc noluck | ||
577 | mov eax,0 | ||
578 | cpuid | ||
579 | xor eax,eax | ||
580 | cmp ebx,'tneC' | ||
581 | jne noluck | ||
582 | cmp edx,'Hrua' | ||
583 | jne noluck | ||
584 | cmp ecx,'slua' | ||
585 | jne noluck | ||
586 | mov eax,0xC0000000 | ||
587 | cpuid | ||
588 | mov edx,eax | ||
589 | xor eax,eax | ||
590 | cmp edx,0xC0000001 | ||
591 | jb noluck | ||
592 | mov eax,0xC0000001 | ||
593 | cpuid | ||
594 | xor eax,eax | ||
595 | bt edx,6 | ||
596 | jnc skip_a | ||
597 | bt edx,7 | ||
598 | jnc skip_a | ||
599 | mov padlock_use_ace,1 | ||
600 | inc eax | ||
601 | skip_a: bt edx,2 | ||
602 | jnc skip_r | ||
603 | bt edx,3 | ||
604 | jnc skip_r | ||
605 | mov padlock_use_rng,1 | ||
606 | inc eax | ||
607 | skip_r: | ||
608 | noluck: | ||
609 | } | ||
610 | } | ||
611 | |||
612 | static void __fastcall | ||
613 | padlock_bswapl(void *key) | ||
614 | { _asm { | ||
615 | pushfd | ||
616 | cld | ||
617 | mov esi,ecx | ||
618 | mov edi,ecx | ||
619 | mov ecx,60 | ||
620 | up: lodsd | ||
621 | bswap eax | ||
622 | stosd | ||
623 | loop up | ||
624 | popfd | ||
625 | } | ||
626 | } | ||
627 | |||
628 | /* MS actually specifies status of Direction Flag and compiler even | ||
629 | * manages to compile following as 'rep movsd' all by itself... | ||
630 | */ | ||
631 | #define padlock_memcpy(o,i,n) ((unsigned char *)memcpy((o),(i),(n)&~3U)) | ||
632 | #endif | 501 | #endif |
633 | 502 | ||
634 | /* ===== AES encryption/decryption ===== */ | 503 | /* ===== AES encryption/decryption ===== */ |
diff --git a/src/lib/libcrypto/modes/modes_lcl.h b/src/lib/libcrypto/modes/modes_lcl.h index 9d83e12844..b32c1b43c5 100644 --- a/src/lib/libcrypto/modes/modes_lcl.h +++ b/src/lib/libcrypto/modes/modes_lcl.h | |||
@@ -60,18 +60,6 @@ typedef unsigned char u8; | |||
60 | : "=r"(ret) : "r"((u32)(x))); \ | 60 | : "=r"(ret) : "r"((u32)(x))); \ |
61 | ret; }) | 61 | ret; }) |
62 | # endif | 62 | # endif |
63 | #elif defined(_MSC_VER) | ||
64 | # if _MSC_VER>=1300 | ||
65 | # pragma intrinsic(_byteswap_uint64,_byteswap_ulong) | ||
66 | # define BSWAP8(x) _byteswap_uint64((u64)(x)) | ||
67 | # define BSWAP4(x) _byteswap_ulong((u32)(x)) | ||
68 | # elif defined(_M_IX86) | ||
69 | __inline u32 _bswap4(u32 val) { | ||
70 | _asm mov eax,val | ||
71 | _asm bswap eax | ||
72 | } | ||
73 | # define BSWAP4(x) _bswap4(x) | ||
74 | # endif | ||
75 | #endif | 63 | #endif |
76 | #endif | 64 | #endif |
77 | 65 | ||
diff --git a/src/lib/libcrypto/rc2/rc2_skey.c b/src/lib/libcrypto/rc2/rc2_skey.c index 26b8dd63f6..56e47845a4 100644 --- a/src/lib/libcrypto/rc2/rc2_skey.c +++ b/src/lib/libcrypto/rc2/rc2_skey.c | |||
@@ -85,10 +85,6 @@ static const unsigned char key_table[256]={ | |||
85 | 0xfe,0x7f,0xc1,0xad, | 85 | 0xfe,0x7f,0xc1,0xad, |
86 | }; | 86 | }; |
87 | 87 | ||
88 | #if defined(_MSC_VER) && defined(_ARM_) | ||
89 | #pragma optimize("g",off) | ||
90 | #endif | ||
91 | |||
92 | /* It has come to my attention that there are 2 versions of the RC2 | 88 | /* It has come to my attention that there are 2 versions of the RC2 |
93 | * key schedule. One which is normal, and anther which has a hook to | 89 | * key schedule. One which is normal, and anther which has a hook to |
94 | * use a reduced key length. | 90 | * use a reduced key length. |
@@ -140,7 +136,3 @@ void RC2_set_key(RC2_KEY *key, int len, const unsigned char *data, int bits) | |||
140 | for (i=127; i>=0; i-=2) | 136 | for (i=127; i>=0; i-=2) |
141 | *(ki--)=((k[i]<<8)|k[i-1])&0xffff; | 137 | *(ki--)=((k[i]<<8)|k[i-1])&0xffff; |
142 | } | 138 | } |
143 | |||
144 | #if defined(_MSC_VER) | ||
145 | #pragma optimize("",on) | ||
146 | #endif | ||
diff --git a/src/lib/libcrypto/rc5/rc5_locl.h b/src/lib/libcrypto/rc5/rc5_locl.h index d337f73fad..314ce8909a 100644 --- a/src/lib/libcrypto/rc5/rc5_locl.h +++ b/src/lib/libcrypto/rc5/rc5_locl.h | |||
@@ -146,10 +146,7 @@ | |||
146 | *((c)++)=(unsigned char)(((l)>> 8L)&0xff), \ | 146 | *((c)++)=(unsigned char)(((l)>> 8L)&0xff), \ |
147 | *((c)++)=(unsigned char)(((l) )&0xff)) | 147 | *((c)++)=(unsigned char)(((l) )&0xff)) |
148 | 148 | ||
149 | #if (defined(OPENSSL_SYS_WIN32) && defined(_MSC_VER)) || defined(__ICC) | 149 | #if defined(__GNUC__) && __GNUC__>=2 && !defined(__STRICT_ANSI__) && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM) && !defined(PEDANTIC) |
150 | #define ROTATE_l32(a,n) _lrotl(a,n) | ||
151 | #define ROTATE_r32(a,n) _lrotr(a,n) | ||
152 | #elif defined(__GNUC__) && __GNUC__>=2 && !defined(__STRICT_ANSI__) && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM) && !defined(PEDANTIC) | ||
153 | # if defined(__i386) || defined(__i386__) || defined(__x86_64) || defined(__x86_64__) | 150 | # if defined(__i386) || defined(__i386__) || defined(__x86_64) || defined(__x86_64__) |
154 | # define ROTATE_l32(a,n) ({ register unsigned int ret; \ | 151 | # define ROTATE_l32(a,n) ({ register unsigned int ret; \ |
155 | asm ("roll %%cl,%0" \ | 152 | asm ("roll %%cl,%0" \ |
diff --git a/src/lib/libcrypto/rsa/rsa_pss.c b/src/lib/libcrypto/rsa/rsa_pss.c index 75e8c18533..bd2fde07d4 100644 --- a/src/lib/libcrypto/rsa/rsa_pss.c +++ b/src/lib/libcrypto/rsa/rsa_pss.c | |||
@@ -66,10 +66,6 @@ | |||
66 | 66 | ||
67 | static const unsigned char zeroes[] = {0,0,0,0,0,0,0,0}; | 67 | static const unsigned char zeroes[] = {0,0,0,0,0,0,0,0}; |
68 | 68 | ||
69 | #if defined(_MSC_VER) && defined(_ARM_) | ||
70 | #pragma optimize("g", off) | ||
71 | #endif | ||
72 | |||
73 | int RSA_verify_PKCS1_PSS(RSA *rsa, const unsigned char *mHash, | 69 | int RSA_verify_PKCS1_PSS(RSA *rsa, const unsigned char *mHash, |
74 | const EVP_MD *Hash, const unsigned char *EM, int sLen) | 70 | const EVP_MD *Hash, const unsigned char *EM, int sLen) |
75 | { | 71 | { |
@@ -294,7 +290,3 @@ int RSA_padding_add_PKCS1_PSS_mgf1(RSA *rsa, unsigned char *EM, | |||
294 | return ret; | 290 | return ret; |
295 | 291 | ||
296 | } | 292 | } |
297 | |||
298 | #if defined(_MSC_VER) | ||
299 | #pragma optimize("",on) | ||
300 | #endif | ||
diff --git a/src/lib/libcrypto/sha/sha512.c b/src/lib/libcrypto/sha/sha512.c index 32bfecbf9b..c92f18e418 100644 --- a/src/lib/libcrypto/sha/sha512.c +++ b/src/lib/libcrypto/sha/sha512.c | |||
@@ -346,36 +346,6 @@ static const SHA_LONG64 K512[80] = { | |||
346 | : "=r"(ret) \ | 346 | : "=r"(ret) \ |
347 | : "r"(a),"K"(n)); ret; }) | 347 | : "r"(a),"K"(n)); ret; }) |
348 | # endif | 348 | # endif |
349 | # elif defined(_MSC_VER) | ||
350 | # if defined(_WIN64) /* applies to both IA-64 and AMD64 */ | ||
351 | # pragma intrinsic(_rotr64) | ||
352 | # define ROTR(a,n) _rotr64((a),n) | ||
353 | # endif | ||
354 | # if defined(_M_IX86) && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM) | ||
355 | # if defined(I386_ONLY) | ||
356 | static SHA_LONG64 __fastcall __pull64be(const void *x) | ||
357 | { _asm mov edx, [ecx + 0] | ||
358 | _asm mov eax, [ecx + 4] | ||
359 | _asm xchg dh,dl | ||
360 | _asm xchg ah,al | ||
361 | _asm rol edx,16 | ||
362 | _asm rol eax,16 | ||
363 | _asm xchg dh,dl | ||
364 | _asm xchg ah,al | ||
365 | } | ||
366 | # else | ||
367 | static SHA_LONG64 __fastcall __pull64be(const void *x) | ||
368 | { _asm mov edx, [ecx + 0] | ||
369 | _asm mov eax, [ecx + 4] | ||
370 | _asm bswap edx | ||
371 | _asm bswap eax | ||
372 | } | ||
373 | # endif | ||
374 | # define PULL64(x) __pull64be(&(x)) | ||
375 | # if _MSC_VER<=1200 | ||
376 | # pragma inline_depth(0) | ||
377 | # endif | ||
378 | # endif | ||
379 | # endif | 349 | # endif |
380 | #endif | 350 | #endif |
381 | 351 | ||
diff --git a/src/lib/libcrypto/whrlpool/wp_block.c b/src/lib/libcrypto/whrlpool/wp_block.c index ce977083ad..fadad01401 100644 --- a/src/lib/libcrypto/whrlpool/wp_block.c +++ b/src/lib/libcrypto/whrlpool/wp_block.c | |||
@@ -77,12 +77,7 @@ typedef unsigned long long u64; | |||
77 | #endif | 77 | #endif |
78 | 78 | ||
79 | #undef ROTATE | 79 | #undef ROTATE |
80 | #if defined(_MSC_VER) | 80 | #if defined(__GNUC__) && __GNUC__>=2 |
81 | # if defined(_WIN64) /* applies to both IA-64 and AMD64 */ | ||
82 | # pragma intrinsic(_rotl64) | ||
83 | # define ROTATE(a,n) _rotl64((a),n) | ||
84 | # endif | ||
85 | #elif defined(__GNUC__) && __GNUC__>=2 | ||
86 | # if defined(__x86_64) || defined(__x86_64__) | 81 | # if defined(__x86_64) || defined(__x86_64__) |
87 | # define ROTATE(a,n) ({ u64 ret; asm ("rolq %1,%0" \ | 82 | # define ROTATE(a,n) ({ u64 ret; asm ("rolq %1,%0" \ |
88 | : "=r"(ret) : "J"(n),"0"(a) : "cc"); ret; }) | 83 | : "=r"(ret) : "J"(n),"0"(a) : "cc"); ret; }) |
diff --git a/src/lib/libssl/src/crypto/aes/aes_locl.h b/src/lib/libssl/src/crypto/aes/aes_locl.h index 054b442d41..1d0e86331e 100644 --- a/src/lib/libssl/src/crypto/aes/aes_locl.h +++ b/src/lib/libssl/src/crypto/aes/aes_locl.h | |||
@@ -62,14 +62,8 @@ | |||
62 | #include <stdlib.h> | 62 | #include <stdlib.h> |
63 | #include <string.h> | 63 | #include <string.h> |
64 | 64 | ||
65 | #if defined(_MSC_VER) && (defined(_M_IX86) || defined(_M_AMD64) || defined(_M_X64)) | 65 | #define GETU32(pt) (((u32)(pt)[0] << 24) ^ ((u32)(pt)[1] << 16) ^ ((u32)(pt)[2] << 8) ^ ((u32)(pt)[3])) |
66 | # define SWAP(x) (_lrotl(x, 8) & 0x00ff00ff | _lrotr(x, 8) & 0xff00ff00) | 66 | #define PUTU32(ct, st) { (ct)[0] = (u8)((st) >> 24); (ct)[1] = (u8)((st) >> 16); (ct)[2] = (u8)((st) >> 8); (ct)[3] = (u8)(st); } |
67 | # define GETU32(p) SWAP(*((u32 *)(p))) | ||
68 | # define PUTU32(ct, st) { *((u32 *)(ct)) = SWAP((st)); } | ||
69 | #else | ||
70 | # define GETU32(pt) (((u32)(pt)[0] << 24) ^ ((u32)(pt)[1] << 16) ^ ((u32)(pt)[2] << 8) ^ ((u32)(pt)[3])) | ||
71 | # define PUTU32(ct, st) { (ct)[0] = (u8)((st) >> 24); (ct)[1] = (u8)((st) >> 16); (ct)[2] = (u8)((st) >> 8); (ct)[3] = (u8)(st); } | ||
72 | #endif | ||
73 | 67 | ||
74 | #ifdef AES_LONG | 68 | #ifdef AES_LONG |
75 | typedef unsigned long u32; | 69 | typedef unsigned long u32; |
diff --git a/src/lib/libssl/src/crypto/aes/aes_x86core.c b/src/lib/libssl/src/crypto/aes/aes_x86core.c index c5d17b3127..295ea22bb4 100644 --- a/src/lib/libssl/src/crypto/aes/aes_x86core.c +++ b/src/lib/libssl/src/crypto/aes/aes_x86core.c | |||
@@ -91,9 +91,7 @@ typedef unsigned long long u64; | |||
91 | #endif | 91 | #endif |
92 | 92 | ||
93 | #undef ROTATE | 93 | #undef ROTATE |
94 | #if defined(_MSC_VER) || defined(__ICC) | 94 | #if defined(__GNUC__) && __GNUC__>=2 |
95 | # define ROTATE(a,n) _lrotl(a,n) | ||
96 | #elif defined(__GNUC__) && __GNUC__>=2 | ||
97 | # if defined(__i386) || defined(__i386__) || defined(__x86_64) || defined(__x86_64__) | 95 | # if defined(__i386) || defined(__i386__) || defined(__x86_64) || defined(__x86_64__) |
98 | # define ROTATE(a,n) ({ register unsigned int ret; \ | 96 | # define ROTATE(a,n) ({ register unsigned int ret; \ |
99 | asm ( \ | 97 | asm ( \ |
diff --git a/src/lib/libssl/src/crypto/bn/bn_lcl.h b/src/lib/libssl/src/crypto/bn/bn_lcl.h index 9194e86b39..1208deb3cf 100644 --- a/src/lib/libssl/src/crypto/bn/bn_lcl.h +++ b/src/lib/libssl/src/crypto/bn/bn_lcl.h | |||
@@ -271,15 +271,6 @@ extern "C" { | |||
271 | : "a"(a),"g"(b) \ | 271 | : "a"(a),"g"(b) \ |
272 | : "cc"); | 272 | : "cc"); |
273 | # endif | 273 | # endif |
274 | # elif (defined(_M_AMD64) || defined(_M_X64)) && defined(SIXTY_FOUR_BIT) | ||
275 | # if defined(_MSC_VER) && _MSC_VER>=1400 | ||
276 | unsigned __int64 __umulh (unsigned __int64 a,unsigned __int64 b); | ||
277 | unsigned __int64 _umul128 (unsigned __int64 a,unsigned __int64 b, | ||
278 | unsigned __int64 *h); | ||
279 | # pragma intrinsic(__umulh,_umul128) | ||
280 | # define BN_UMULT_HIGH(a,b) __umulh((a),(b)) | ||
281 | # define BN_UMULT_LOHI(low,high,a,b) ((low)=_umul128((a),(b),&(high))) | ||
282 | # endif | ||
283 | # elif defined(__mips) && (defined(SIXTY_FOUR_BIT) || defined(SIXTY_FOUR_BIT_LONG)) | 274 | # elif defined(__mips) && (defined(SIXTY_FOUR_BIT) || defined(SIXTY_FOUR_BIT_LONG)) |
284 | # if defined(__GNUC__) && __GNUC__>=2 | 275 | # if defined(__GNUC__) && __GNUC__>=2 |
285 | # if __GNUC__>=4 && __GNUC_MINOR__>=4 /* "h" constraint is no more since 4.4 */ | 276 | # if __GNUC__>=4 && __GNUC_MINOR__>=4 /* "h" constraint is no more since 4.4 */ |
diff --git a/src/lib/libssl/src/crypto/camellia/camellia.c b/src/lib/libssl/src/crypto/camellia/camellia.c index 75fc8991c0..da708bdf61 100644 --- a/src/lib/libssl/src/crypto/camellia/camellia.c +++ b/src/lib/libssl/src/crypto/camellia/camellia.c | |||
@@ -88,17 +88,7 @@ | |||
88 | 88 | ||
89 | /* 32-bit rotations */ | 89 | /* 32-bit rotations */ |
90 | #if !defined(PEDANTIC) && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM) | 90 | #if !defined(PEDANTIC) && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM) |
91 | # if defined(_MSC_VER) && (defined(_M_IX86) || defined(_M_AMD64) || defined(_M_X64)) | 91 | # if defined(__GNUC__) && __GNUC__>=2 |
92 | # define RightRotate(x, s) _lrotr(x, s) | ||
93 | # define LeftRotate(x, s) _lrotl(x, s) | ||
94 | # if _MSC_VER >= 1400 | ||
95 | # define SWAP(x) _byteswap_ulong(x) | ||
96 | # else | ||
97 | # define SWAP(x) (_lrotl(x, 8) & 0x00ff00ff | _lrotr(x, 8) & 0xff00ff00) | ||
98 | # endif | ||
99 | # define GETU32(p) SWAP(*((u32 *)(p))) | ||
100 | # define PUTU32(p,v) (*((u32 *)(p)) = SWAP((v))) | ||
101 | # elif defined(__GNUC__) && __GNUC__>=2 | ||
102 | # if defined(__i386) || defined(__x86_64) | 92 | # if defined(__i386) || defined(__x86_64) |
103 | # define RightRotate(x,s) ({u32 ret; asm ("rorl %1,%0":"=r"(ret):"I"(s),"0"(x):"cc"); ret; }) | 93 | # define RightRotate(x,s) ({u32 ret; asm ("rorl %1,%0":"=r"(ret):"I"(s),"0"(x):"cc"); ret; }) |
104 | # define LeftRotate(x,s) ({u32 ret; asm ("roll %1,%0":"=r"(ret):"I"(s),"0"(x):"cc"); ret; }) | 94 | # define LeftRotate(x,s) ({u32 ret; asm ("roll %1,%0":"=r"(ret):"I"(s),"0"(x):"cc"); ret; }) |
diff --git a/src/lib/libssl/src/crypto/cast/cast_lcl.h b/src/lib/libssl/src/crypto/cast/cast_lcl.h index e756021a33..cf0ca9e607 100644 --- a/src/lib/libssl/src/crypto/cast/cast_lcl.h +++ b/src/lib/libssl/src/crypto/cast/cast_lcl.h | |||
@@ -152,11 +152,7 @@ | |||
152 | *((c)++)=(unsigned char)(((l)>> 8L)&0xff), \ | 152 | *((c)++)=(unsigned char)(((l)>> 8L)&0xff), \ |
153 | *((c)++)=(unsigned char)(((l) )&0xff)) | 153 | *((c)++)=(unsigned char)(((l) )&0xff)) |
154 | 154 | ||
155 | #if defined(OPENSSL_SYS_WIN32) && defined(_MSC_VER) | ||
156 | #define ROTL(a,n) (_lrotl(a,n)) | ||
157 | #else | ||
158 | #define ROTL(a,n) ((((a)<<(n))&0xffffffffL)|((a)>>(32-(n)))) | 155 | #define ROTL(a,n) ((((a)<<(n))&0xffffffffL)|((a)>>(32-(n)))) |
159 | #endif | ||
160 | 156 | ||
161 | #define C_M 0x3fc | 157 | #define C_M 0x3fc |
162 | #define C_0 22L | 158 | #define C_0 22L |
diff --git a/src/lib/libssl/src/crypto/des/des_locl.h b/src/lib/libssl/src/crypto/des/des_locl.h index cf7811041e..bbae457377 100644 --- a/src/lib/libssl/src/crypto/des/des_locl.h +++ b/src/lib/libssl/src/crypto/des/des_locl.h | |||
@@ -152,9 +152,7 @@ | |||
152 | } \ | 152 | } \ |
153 | } | 153 | } |
154 | 154 | ||
155 | #if (defined(OPENSSL_SYS_WIN32) && defined(_MSC_VER)) || defined(__ICC) | 155 | #if defined(__GNUC__) && __GNUC__>=2 && !defined(__STRICT_ANSI__) && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM) && !defined(PEDANTIC) |
156 | #define ROTATE(a,n) (_lrotr(a,n)) | ||
157 | #elif defined(__GNUC__) && __GNUC__>=2 && !defined(__STRICT_ANSI__) && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM) && !defined(PEDANTIC) | ||
158 | # if defined(__i386) || defined(__i386__) || defined(__x86_64) || defined(__x86_64__) | 156 | # if defined(__i386) || defined(__i386__) || defined(__x86_64) || defined(__x86_64__) |
159 | # define ROTATE(a,n) ({ register unsigned int ret; \ | 157 | # define ROTATE(a,n) ({ register unsigned int ret; \ |
160 | asm ("rorl %1,%0" \ | 158 | asm ("rorl %1,%0" \ |
diff --git a/src/lib/libssl/src/crypto/engine/eng_padlock.c b/src/lib/libssl/src/crypto/engine/eng_padlock.c index d1fc8d9315..d5d9a16bf2 100644 --- a/src/lib/libssl/src/crypto/engine/eng_padlock.c +++ b/src/lib/libssl/src/crypto/engine/eng_padlock.c | |||
@@ -101,8 +101,7 @@ | |||
101 | compiler choice is limited to GCC and Microsoft C. */ | 101 | compiler choice is limited to GCC and Microsoft C. */ |
102 | #undef COMPILE_HW_PADLOCK | 102 | #undef COMPILE_HW_PADLOCK |
103 | #if !defined(I386_ONLY) && !defined(OPENSSL_NO_INLINE_ASM) | 103 | #if !defined(I386_ONLY) && !defined(OPENSSL_NO_INLINE_ASM) |
104 | # if (defined(__GNUC__) && (defined(__i386__) || defined(__i386))) || \ | 104 | # if (defined(__GNUC__) && (defined(__i386__) || defined(__i386))) |
105 | (defined(_MSC_VER) && defined(_M_IX86)) | ||
106 | # define COMPILE_HW_PADLOCK | 105 | # define COMPILE_HW_PADLOCK |
107 | # endif | 106 | # endif |
108 | #endif | 107 | #endif |
@@ -499,136 +498,6 @@ padlock_memcpy(void *dst,const void *src,size_t n) | |||
499 | 498 | ||
500 | return dst; | 499 | return dst; |
501 | } | 500 | } |
502 | |||
503 | #elif defined(_MSC_VER) | ||
504 | /* | ||
505 | * Unlike GCC these are real functions. In order to minimize impact | ||
506 | * on performance we adhere to __fastcall calling convention in | ||
507 | * order to get two first arguments passed through %ecx and %edx. | ||
508 | * Which kind of suits very well, as instructions in question use | ||
509 | * both %ecx and %edx as input:-) | ||
510 | */ | ||
511 | #define REP_XCRYPT(code) \ | ||
512 | _asm _emit 0xf3 \ | ||
513 | _asm _emit 0x0f _asm _emit 0xa7 \ | ||
514 | _asm _emit code | ||
515 | |||
516 | /* BIG FAT WARNING: | ||
517 | * The offsets used with 'lea' instructions | ||
518 | * describe items of the 'padlock_cipher_data' | ||
519 | * structure. | ||
520 | */ | ||
521 | #define PADLOCK_XCRYPT_ASM(name,code) \ | ||
522 | static void * __fastcall \ | ||
523 | name (size_t cnt, void *cdata, \ | ||
524 | void *outp, const void *inp) \ | ||
525 | { _asm mov eax,edx \ | ||
526 | _asm lea edx,[eax+16] \ | ||
527 | _asm lea ebx,[eax+32] \ | ||
528 | _asm mov edi,outp \ | ||
529 | _asm mov esi,inp \ | ||
530 | REP_XCRYPT(code) \ | ||
531 | } | ||
532 | |||
533 | PADLOCK_XCRYPT_ASM(padlock_xcrypt_ecb,0xc8) | ||
534 | PADLOCK_XCRYPT_ASM(padlock_xcrypt_cbc,0xd0) | ||
535 | PADLOCK_XCRYPT_ASM(padlock_xcrypt_cfb,0xe0) | ||
536 | PADLOCK_XCRYPT_ASM(padlock_xcrypt_ofb,0xe8) | ||
537 | |||
538 | static int __fastcall | ||
539 | padlock_xstore(void *outp,unsigned int code) | ||
540 | { _asm mov edi,ecx | ||
541 | _asm _emit 0x0f _asm _emit 0xa7 _asm _emit 0xc0 | ||
542 | } | ||
543 | |||
544 | static void __fastcall | ||
545 | padlock_reload_key(void) | ||
546 | { _asm pushfd _asm popfd } | ||
547 | |||
548 | static void __fastcall | ||
549 | padlock_verify_context(void *cdata) | ||
550 | { _asm { | ||
551 | pushfd | ||
552 | bt DWORD PTR[esp],30 | ||
553 | jnc skip | ||
554 | cmp ecx,padlock_saved_context | ||
555 | je skip | ||
556 | popfd | ||
557 | sub esp,4 | ||
558 | skip: add esp,4 | ||
559 | mov padlock_saved_context,ecx | ||
560 | } | ||
561 | } | ||
562 | |||
563 | static int | ||
564 | padlock_available(void) | ||
565 | { _asm { | ||
566 | pushfd | ||
567 | pop eax | ||
568 | mov ecx,eax | ||
569 | xor eax,1<<21 | ||
570 | push eax | ||
571 | popfd | ||
572 | pushfd | ||
573 | pop eax | ||
574 | xor eax,ecx | ||
575 | bt eax,21 | ||
576 | jnc noluck | ||
577 | mov eax,0 | ||
578 | cpuid | ||
579 | xor eax,eax | ||
580 | cmp ebx,'tneC' | ||
581 | jne noluck | ||
582 | cmp edx,'Hrua' | ||
583 | jne noluck | ||
584 | cmp ecx,'slua' | ||
585 | jne noluck | ||
586 | mov eax,0xC0000000 | ||
587 | cpuid | ||
588 | mov edx,eax | ||
589 | xor eax,eax | ||
590 | cmp edx,0xC0000001 | ||
591 | jb noluck | ||
592 | mov eax,0xC0000001 | ||
593 | cpuid | ||
594 | xor eax,eax | ||
595 | bt edx,6 | ||
596 | jnc skip_a | ||
597 | bt edx,7 | ||
598 | jnc skip_a | ||
599 | mov padlock_use_ace,1 | ||
600 | inc eax | ||
601 | skip_a: bt edx,2 | ||
602 | jnc skip_r | ||
603 | bt edx,3 | ||
604 | jnc skip_r | ||
605 | mov padlock_use_rng,1 | ||
606 | inc eax | ||
607 | skip_r: | ||
608 | noluck: | ||
609 | } | ||
610 | } | ||
611 | |||
612 | static void __fastcall | ||
613 | padlock_bswapl(void *key) | ||
614 | { _asm { | ||
615 | pushfd | ||
616 | cld | ||
617 | mov esi,ecx | ||
618 | mov edi,ecx | ||
619 | mov ecx,60 | ||
620 | up: lodsd | ||
621 | bswap eax | ||
622 | stosd | ||
623 | loop up | ||
624 | popfd | ||
625 | } | ||
626 | } | ||
627 | |||
628 | /* MS actually specifies status of Direction Flag and compiler even | ||
629 | * manages to compile following as 'rep movsd' all by itself... | ||
630 | */ | ||
631 | #define padlock_memcpy(o,i,n) ((unsigned char *)memcpy((o),(i),(n)&~3U)) | ||
632 | #endif | 501 | #endif |
633 | 502 | ||
634 | /* ===== AES encryption/decryption ===== */ | 503 | /* ===== AES encryption/decryption ===== */ |
diff --git a/src/lib/libssl/src/crypto/modes/modes_lcl.h b/src/lib/libssl/src/crypto/modes/modes_lcl.h index 9d83e12844..b32c1b43c5 100644 --- a/src/lib/libssl/src/crypto/modes/modes_lcl.h +++ b/src/lib/libssl/src/crypto/modes/modes_lcl.h | |||
@@ -60,18 +60,6 @@ typedef unsigned char u8; | |||
60 | : "=r"(ret) : "r"((u32)(x))); \ | 60 | : "=r"(ret) : "r"((u32)(x))); \ |
61 | ret; }) | 61 | ret; }) |
62 | # endif | 62 | # endif |
63 | #elif defined(_MSC_VER) | ||
64 | # if _MSC_VER>=1300 | ||
65 | # pragma intrinsic(_byteswap_uint64,_byteswap_ulong) | ||
66 | # define BSWAP8(x) _byteswap_uint64((u64)(x)) | ||
67 | # define BSWAP4(x) _byteswap_ulong((u32)(x)) | ||
68 | # elif defined(_M_IX86) | ||
69 | __inline u32 _bswap4(u32 val) { | ||
70 | _asm mov eax,val | ||
71 | _asm bswap eax | ||
72 | } | ||
73 | # define BSWAP4(x) _bswap4(x) | ||
74 | # endif | ||
75 | #endif | 63 | #endif |
76 | #endif | 64 | #endif |
77 | 65 | ||
diff --git a/src/lib/libssl/src/crypto/rc2/rc2_skey.c b/src/lib/libssl/src/crypto/rc2/rc2_skey.c index 26b8dd63f6..56e47845a4 100644 --- a/src/lib/libssl/src/crypto/rc2/rc2_skey.c +++ b/src/lib/libssl/src/crypto/rc2/rc2_skey.c | |||
@@ -85,10 +85,6 @@ static const unsigned char key_table[256]={ | |||
85 | 0xfe,0x7f,0xc1,0xad, | 85 | 0xfe,0x7f,0xc1,0xad, |
86 | }; | 86 | }; |
87 | 87 | ||
88 | #if defined(_MSC_VER) && defined(_ARM_) | ||
89 | #pragma optimize("g",off) | ||
90 | #endif | ||
91 | |||
92 | /* It has come to my attention that there are 2 versions of the RC2 | 88 | /* It has come to my attention that there are 2 versions of the RC2 |
93 | * key schedule. One which is normal, and anther which has a hook to | 89 | * key schedule. One which is normal, and anther which has a hook to |
94 | * use a reduced key length. | 90 | * use a reduced key length. |
@@ -140,7 +136,3 @@ void RC2_set_key(RC2_KEY *key, int len, const unsigned char *data, int bits) | |||
140 | for (i=127; i>=0; i-=2) | 136 | for (i=127; i>=0; i-=2) |
141 | *(ki--)=((k[i]<<8)|k[i-1])&0xffff; | 137 | *(ki--)=((k[i]<<8)|k[i-1])&0xffff; |
142 | } | 138 | } |
143 | |||
144 | #if defined(_MSC_VER) | ||
145 | #pragma optimize("",on) | ||
146 | #endif | ||
diff --git a/src/lib/libssl/src/crypto/rc5/rc5_locl.h b/src/lib/libssl/src/crypto/rc5/rc5_locl.h index d337f73fad..314ce8909a 100644 --- a/src/lib/libssl/src/crypto/rc5/rc5_locl.h +++ b/src/lib/libssl/src/crypto/rc5/rc5_locl.h | |||
@@ -146,10 +146,7 @@ | |||
146 | *((c)++)=(unsigned char)(((l)>> 8L)&0xff), \ | 146 | *((c)++)=(unsigned char)(((l)>> 8L)&0xff), \ |
147 | *((c)++)=(unsigned char)(((l) )&0xff)) | 147 | *((c)++)=(unsigned char)(((l) )&0xff)) |
148 | 148 | ||
149 | #if (defined(OPENSSL_SYS_WIN32) && defined(_MSC_VER)) || defined(__ICC) | 149 | #if defined(__GNUC__) && __GNUC__>=2 && !defined(__STRICT_ANSI__) && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM) && !defined(PEDANTIC) |
150 | #define ROTATE_l32(a,n) _lrotl(a,n) | ||
151 | #define ROTATE_r32(a,n) _lrotr(a,n) | ||
152 | #elif defined(__GNUC__) && __GNUC__>=2 && !defined(__STRICT_ANSI__) && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM) && !defined(PEDANTIC) | ||
153 | # if defined(__i386) || defined(__i386__) || defined(__x86_64) || defined(__x86_64__) | 150 | # if defined(__i386) || defined(__i386__) || defined(__x86_64) || defined(__x86_64__) |
154 | # define ROTATE_l32(a,n) ({ register unsigned int ret; \ | 151 | # define ROTATE_l32(a,n) ({ register unsigned int ret; \ |
155 | asm ("roll %%cl,%0" \ | 152 | asm ("roll %%cl,%0" \ |
diff --git a/src/lib/libssl/src/crypto/rsa/rsa_pss.c b/src/lib/libssl/src/crypto/rsa/rsa_pss.c index 75e8c18533..bd2fde07d4 100644 --- a/src/lib/libssl/src/crypto/rsa/rsa_pss.c +++ b/src/lib/libssl/src/crypto/rsa/rsa_pss.c | |||
@@ -66,10 +66,6 @@ | |||
66 | 66 | ||
67 | static const unsigned char zeroes[] = {0,0,0,0,0,0,0,0}; | 67 | static const unsigned char zeroes[] = {0,0,0,0,0,0,0,0}; |
68 | 68 | ||
69 | #if defined(_MSC_VER) && defined(_ARM_) | ||
70 | #pragma optimize("g", off) | ||
71 | #endif | ||
72 | |||
73 | int RSA_verify_PKCS1_PSS(RSA *rsa, const unsigned char *mHash, | 69 | int RSA_verify_PKCS1_PSS(RSA *rsa, const unsigned char *mHash, |
74 | const EVP_MD *Hash, const unsigned char *EM, int sLen) | 70 | const EVP_MD *Hash, const unsigned char *EM, int sLen) |
75 | { | 71 | { |
@@ -294,7 +290,3 @@ int RSA_padding_add_PKCS1_PSS_mgf1(RSA *rsa, unsigned char *EM, | |||
294 | return ret; | 290 | return ret; |
295 | 291 | ||
296 | } | 292 | } |
297 | |||
298 | #if defined(_MSC_VER) | ||
299 | #pragma optimize("",on) | ||
300 | #endif | ||
diff --git a/src/lib/libssl/src/crypto/sha/sha512.c b/src/lib/libssl/src/crypto/sha/sha512.c index 32bfecbf9b..c92f18e418 100644 --- a/src/lib/libssl/src/crypto/sha/sha512.c +++ b/src/lib/libssl/src/crypto/sha/sha512.c | |||
@@ -346,36 +346,6 @@ static const SHA_LONG64 K512[80] = { | |||
346 | : "=r"(ret) \ | 346 | : "=r"(ret) \ |
347 | : "r"(a),"K"(n)); ret; }) | 347 | : "r"(a),"K"(n)); ret; }) |
348 | # endif | 348 | # endif |
349 | # elif defined(_MSC_VER) | ||
350 | # if defined(_WIN64) /* applies to both IA-64 and AMD64 */ | ||
351 | # pragma intrinsic(_rotr64) | ||
352 | # define ROTR(a,n) _rotr64((a),n) | ||
353 | # endif | ||
354 | # if defined(_M_IX86) && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM) | ||
355 | # if defined(I386_ONLY) | ||
356 | static SHA_LONG64 __fastcall __pull64be(const void *x) | ||
357 | { _asm mov edx, [ecx + 0] | ||
358 | _asm mov eax, [ecx + 4] | ||
359 | _asm xchg dh,dl | ||
360 | _asm xchg ah,al | ||
361 | _asm rol edx,16 | ||
362 | _asm rol eax,16 | ||
363 | _asm xchg dh,dl | ||
364 | _asm xchg ah,al | ||
365 | } | ||
366 | # else | ||
367 | static SHA_LONG64 __fastcall __pull64be(const void *x) | ||
368 | { _asm mov edx, [ecx + 0] | ||
369 | _asm mov eax, [ecx + 4] | ||
370 | _asm bswap edx | ||
371 | _asm bswap eax | ||
372 | } | ||
373 | # endif | ||
374 | # define PULL64(x) __pull64be(&(x)) | ||
375 | # if _MSC_VER<=1200 | ||
376 | # pragma inline_depth(0) | ||
377 | # endif | ||
378 | # endif | ||
379 | # endif | 349 | # endif |
380 | #endif | 350 | #endif |
381 | 351 | ||
diff --git a/src/lib/libssl/src/crypto/whrlpool/wp_block.c b/src/lib/libssl/src/crypto/whrlpool/wp_block.c index ce977083ad..fadad01401 100644 --- a/src/lib/libssl/src/crypto/whrlpool/wp_block.c +++ b/src/lib/libssl/src/crypto/whrlpool/wp_block.c | |||
@@ -77,12 +77,7 @@ typedef unsigned long long u64; | |||
77 | #endif | 77 | #endif |
78 | 78 | ||
79 | #undef ROTATE | 79 | #undef ROTATE |
80 | #if defined(_MSC_VER) | 80 | #if defined(__GNUC__) && __GNUC__>=2 |
81 | # if defined(_WIN64) /* applies to both IA-64 and AMD64 */ | ||
82 | # pragma intrinsic(_rotl64) | ||
83 | # define ROTATE(a,n) _rotl64((a),n) | ||
84 | # endif | ||
85 | #elif defined(__GNUC__) && __GNUC__>=2 | ||
86 | # if defined(__x86_64) || defined(__x86_64__) | 81 | # if defined(__x86_64) || defined(__x86_64__) |
87 | # define ROTATE(a,n) ({ u64 ret; asm ("rolq %1,%0" \ | 82 | # define ROTATE(a,n) ({ u64 ret; asm ("rolq %1,%0" \ |
88 | : "=r"(ret) : "J"(n),"0"(a) : "cc"); ret; }) | 83 | : "=r"(ret) : "J"(n),"0"(a) : "cc"); ret; }) |