summaryrefslogtreecommitdiff
path: root/src/lib
diff options
context:
space:
mode:
Diffstat (limited to 'src/lib')
-rw-r--r--src/lib/libcrypto/whrlpool/wp_block.c24
-rw-r--r--src/lib/libssl/src/crypto/whrlpool/wp_block.c24
2 files changed, 14 insertions, 34 deletions
diff --git a/src/lib/libcrypto/whrlpool/wp_block.c b/src/lib/libcrypto/whrlpool/wp_block.c
index 9c194f968b..77552b1103 100644
--- a/src/lib/libcrypto/whrlpool/wp_block.c
+++ b/src/lib/libcrypto/whrlpool/wp_block.c
@@ -48,16 +48,6 @@ typedef unsigned long long u64;
48 48
49#define ROUNDS 10 49#define ROUNDS 10
50 50
51#define STRICT_ALIGNMENT
52#if defined(__i386) || defined(__i386__) || \
53 defined(__x86_64) || defined(__x86_64__) || \
54 defined(_M_IX86) || defined(_M_AMD64) || defined(_M_X64)
55/* Well, formally there're couple of other architectures, which permit
56 * unaligned loads, specifically those not crossing cache lines, IA-64
57 * and PowerPC... */
58# undef STRICT_ALIGNMENT
59#endif
60
61#undef SMALL_REGISTER_BANK 51#undef SMALL_REGISTER_BANK
62#if defined(__i386) || defined(__i386__) || defined(_M_IX86) 52#if defined(__i386) || defined(__i386__) || defined(_M_IX86)
63# define SMALL_REGISTER_BANK 53# define SMALL_REGISTER_BANK
@@ -99,14 +89,14 @@ typedef unsigned long long u64;
99# define ROTATE(i,n) ((i)>>(n) ^ (i)<<(64-n)) 89# define ROTATE(i,n) ((i)>>(n) ^ (i)<<(64-n))
100# endif 90# endif
101# endif 91# endif
102# if defined(ROTATE) && !defined(STRICT_ALIGNMENT) 92# if defined(ROTATE) && !defined(__STRICT_ALIGNMENT)
103# define STRICT_ALIGNMENT /* ensure smallest table size */ 93# define __STRICT_ALIGNMENT /* ensure smallest table size */
104# endif 94# endif
105#endif 95#endif
106 96
107/* 97/*
108 * Table size depends on STRICT_ALIGNMENT and whether or not endian- 98 * Table size depends on __STRICT_ALIGNMENT and whether or not endian-
109 * specific ROTATE macro is defined. If STRICT_ALIGNMENT is not 99 * specific ROTATE macro is defined. If __STRICT_ALIGNMENT is not
110 * defined, which is normally the case on x86[_64] CPUs, the table is 100 * defined, which is normally the case on x86[_64] CPUs, the table is
111 * 4KB large unconditionally. Otherwise if ROTATE is defined, the 101 * 4KB large unconditionally. Otherwise if ROTATE is defined, the
112 * table is 2KB large, and otherwise - 16KB. 2KB table requires a 102 * table is 2KB large, and otherwise - 16KB. 2KB table requires a
@@ -127,7 +117,7 @@ typedef unsigned long long u64;
127 * ones to depend on smart compiler to fold byte loads if beneficial. 117 * ones to depend on smart compiler to fold byte loads if beneficial.
128 * Hand-coded assembler would be another alternative:-) 118 * Hand-coded assembler would be another alternative:-)
129 */ 119 */
130#ifdef STRICT_ALIGNMENT 120#ifdef __STRICT_ALIGNMENT
131# if defined(ROTATE) 121# if defined(ROTATE)
132# define N 1 122# define N 1
133# define LL(c0,c1,c2,c3,c4,c5,c6,c7) c0,c1,c2,c3,c4,c5,c6,c7 123# define LL(c0,c1,c2,c3,c4,c5,c6,c7) c0,c1,c2,c3,c4,c5,c6,c7
@@ -487,7 +477,7 @@ void whirlpool_block(WHIRLPOOL_CTX *ctx,const void *inp,size_t n)
487#else 477#else
488 u64 L0,L1,L2,L3,L4,L5,L6,L7; 478 u64 L0,L1,L2,L3,L4,L5,L6,L7;
489 479
490#ifdef STRICT_ALIGNMENT 480#ifdef __STRICT_ALIGNMENT
491 if ((size_t)p & 7) 481 if ((size_t)p & 7)
492 { 482 {
493 memcpy (S.c,p,64); 483 memcpy (S.c,p,64);
@@ -614,7 +604,7 @@ void whirlpool_block(WHIRLPOOL_CTX *ctx,const void *inp,size_t n)
614#endif 604#endif
615 } 605 }
616 606
617#ifdef STRICT_ALIGNMENT 607#ifdef __STRICT_ALIGNMENT
618 if ((size_t)p & 7) 608 if ((size_t)p & 7)
619 { 609 {
620 int i; 610 int i;
diff --git a/src/lib/libssl/src/crypto/whrlpool/wp_block.c b/src/lib/libssl/src/crypto/whrlpool/wp_block.c
index 9c194f968b..77552b1103 100644
--- a/src/lib/libssl/src/crypto/whrlpool/wp_block.c
+++ b/src/lib/libssl/src/crypto/whrlpool/wp_block.c
@@ -48,16 +48,6 @@ typedef unsigned long long u64;
48 48
49#define ROUNDS 10 49#define ROUNDS 10
50 50
51#define STRICT_ALIGNMENT
52#if defined(__i386) || defined(__i386__) || \
53 defined(__x86_64) || defined(__x86_64__) || \
54 defined(_M_IX86) || defined(_M_AMD64) || defined(_M_X64)
55/* Well, formally there're couple of other architectures, which permit
56 * unaligned loads, specifically those not crossing cache lines, IA-64
57 * and PowerPC... */
58# undef STRICT_ALIGNMENT
59#endif
60
61#undef SMALL_REGISTER_BANK 51#undef SMALL_REGISTER_BANK
62#if defined(__i386) || defined(__i386__) || defined(_M_IX86) 52#if defined(__i386) || defined(__i386__) || defined(_M_IX86)
63# define SMALL_REGISTER_BANK 53# define SMALL_REGISTER_BANK
@@ -99,14 +89,14 @@ typedef unsigned long long u64;
99# define ROTATE(i,n) ((i)>>(n) ^ (i)<<(64-n)) 89# define ROTATE(i,n) ((i)>>(n) ^ (i)<<(64-n))
100# endif 90# endif
101# endif 91# endif
102# if defined(ROTATE) && !defined(STRICT_ALIGNMENT) 92# if defined(ROTATE) && !defined(__STRICT_ALIGNMENT)
103# define STRICT_ALIGNMENT /* ensure smallest table size */ 93# define __STRICT_ALIGNMENT /* ensure smallest table size */
104# endif 94# endif
105#endif 95#endif
106 96
107/* 97/*
108 * Table size depends on STRICT_ALIGNMENT and whether or not endian- 98 * Table size depends on __STRICT_ALIGNMENT and whether or not endian-
109 * specific ROTATE macro is defined. If STRICT_ALIGNMENT is not 99 * specific ROTATE macro is defined. If __STRICT_ALIGNMENT is not
110 * defined, which is normally the case on x86[_64] CPUs, the table is 100 * defined, which is normally the case on x86[_64] CPUs, the table is
111 * 4KB large unconditionally. Otherwise if ROTATE is defined, the 101 * 4KB large unconditionally. Otherwise if ROTATE is defined, the
112 * table is 2KB large, and otherwise - 16KB. 2KB table requires a 102 * table is 2KB large, and otherwise - 16KB. 2KB table requires a
@@ -127,7 +117,7 @@ typedef unsigned long long u64;
127 * ones to depend on smart compiler to fold byte loads if beneficial. 117 * ones to depend on smart compiler to fold byte loads if beneficial.
128 * Hand-coded assembler would be another alternative:-) 118 * Hand-coded assembler would be another alternative:-)
129 */ 119 */
130#ifdef STRICT_ALIGNMENT 120#ifdef __STRICT_ALIGNMENT
131# if defined(ROTATE) 121# if defined(ROTATE)
132# define N 1 122# define N 1
133# define LL(c0,c1,c2,c3,c4,c5,c6,c7) c0,c1,c2,c3,c4,c5,c6,c7 123# define LL(c0,c1,c2,c3,c4,c5,c6,c7) c0,c1,c2,c3,c4,c5,c6,c7
@@ -487,7 +477,7 @@ void whirlpool_block(WHIRLPOOL_CTX *ctx,const void *inp,size_t n)
487#else 477#else
488 u64 L0,L1,L2,L3,L4,L5,L6,L7; 478 u64 L0,L1,L2,L3,L4,L5,L6,L7;
489 479
490#ifdef STRICT_ALIGNMENT 480#ifdef __STRICT_ALIGNMENT
491 if ((size_t)p & 7) 481 if ((size_t)p & 7)
492 { 482 {
493 memcpy (S.c,p,64); 483 memcpy (S.c,p,64);
@@ -614,7 +604,7 @@ void whirlpool_block(WHIRLPOOL_CTX *ctx,const void *inp,size_t n)
614#endif 604#endif
615 } 605 }
616 606
617#ifdef STRICT_ALIGNMENT 607#ifdef __STRICT_ALIGNMENT
618 if ((size_t)p & 7) 608 if ((size_t)p & 7)
619 { 609 {
620 int i; 610 int i;