diff options
Diffstat (limited to 'src/lib/libcrypto/md5/md5_locl.h')
-rw-r--r-- | src/lib/libcrypto/md5/md5_locl.h | 52 |
1 files changed, 4 insertions, 48 deletions
diff --git a/src/lib/libcrypto/md5/md5_locl.h b/src/lib/libcrypto/md5/md5_locl.h index 9e360da732..84e81b960d 100644 --- a/src/lib/libcrypto/md5/md5_locl.h +++ b/src/lib/libcrypto/md5/md5_locl.h | |||
@@ -66,49 +66,19 @@ | |||
66 | #endif | 66 | #endif |
67 | 67 | ||
68 | #ifdef MD5_ASM | 68 | #ifdef MD5_ASM |
69 | # if defined(__i386) || defined(__i386__) || defined(_M_IX86) || defined(__INTEL__) | 69 | # if defined(__i386) || defined(__i386__) || defined(_M_IX86) || defined(__INTEL__) || \ |
70 | # define md5_block_host_order md5_block_asm_host_order | 70 | defined(__x86_64) || defined(__x86_64__) || defined(_M_AMD64) || defined(_M_X64) |
71 | # elif defined(__sparc) && defined(OPENSSL_SYS_ULTRASPARC) | 71 | # define md5_block_data_order md5_block_asm_data_order |
72 | void md5_block_asm_data_order_aligned (MD5_CTX *c, const MD5_LONG *p,int num); | ||
73 | # define HASH_BLOCK_DATA_ORDER_ALIGNED md5_block_asm_data_order_aligned | ||
74 | # endif | 72 | # endif |
75 | #endif | 73 | #endif |
76 | 74 | ||
77 | void md5_block_host_order (MD5_CTX *c, const void *p,int num); | 75 | void md5_block_data_order (MD5_CTX *c, const void *p,size_t num); |
78 | void md5_block_data_order (MD5_CTX *c, const void *p,int num); | ||
79 | |||
80 | #if defined(__i386) || defined(__i386__) || defined(_M_IX86) || defined(__INTEL__) | ||
81 | /* | ||
82 | * *_block_host_order is expected to handle aligned data while | ||
83 | * *_block_data_order - unaligned. As algorithm and host (x86) | ||
84 | * are in this case of the same "endianness" these two are | ||
85 | * otherwise indistinguishable. But normally you don't want to | ||
86 | * call the same function because unaligned access in places | ||
87 | * where alignment is expected is usually a "Bad Thing". Indeed, | ||
88 | * on RISCs you get punished with BUS ERROR signal or *severe* | ||
89 | * performance degradation. Intel CPUs are in turn perfectly | ||
90 | * capable of loading unaligned data without such drastic side | ||
91 | * effect. Yes, they say it's slower than aligned load, but no | ||
92 | * exception is generated and therefore performance degradation | ||
93 | * is *incomparable* with RISCs. What we should weight here is | ||
94 | * costs of unaligned access against costs of aligning data. | ||
95 | * According to my measurements allowing unaligned access results | ||
96 | * in ~9% performance improvement on Pentium II operating at | ||
97 | * 266MHz. I won't be surprised if the difference will be higher | ||
98 | * on faster systems:-) | ||
99 | * | ||
100 | * <appro@fy.chalmers.se> | ||
101 | */ | ||
102 | #define md5_block_data_order md5_block_host_order | ||
103 | #endif | ||
104 | 76 | ||
105 | #define DATA_ORDER_IS_LITTLE_ENDIAN | 77 | #define DATA_ORDER_IS_LITTLE_ENDIAN |
106 | 78 | ||
107 | #define HASH_LONG MD5_LONG | 79 | #define HASH_LONG MD5_LONG |
108 | #define HASH_LONG_LOG2 MD5_LONG_LOG2 | ||
109 | #define HASH_CTX MD5_CTX | 80 | #define HASH_CTX MD5_CTX |
110 | #define HASH_CBLOCK MD5_CBLOCK | 81 | #define HASH_CBLOCK MD5_CBLOCK |
111 | #define HASH_LBLOCK MD5_LBLOCK | ||
112 | #define HASH_UPDATE MD5_Update | 82 | #define HASH_UPDATE MD5_Update |
113 | #define HASH_TRANSFORM MD5_Transform | 83 | #define HASH_TRANSFORM MD5_Transform |
114 | #define HASH_FINAL MD5_Final | 84 | #define HASH_FINAL MD5_Final |
@@ -119,21 +89,7 @@ void md5_block_data_order (MD5_CTX *c, const void *p,int num); | |||
119 | ll=(c)->C; HOST_l2c(ll,(s)); \ | 89 | ll=(c)->C; HOST_l2c(ll,(s)); \ |
120 | ll=(c)->D; HOST_l2c(ll,(s)); \ | 90 | ll=(c)->D; HOST_l2c(ll,(s)); \ |
121 | } while (0) | 91 | } while (0) |
122 | #define HASH_BLOCK_HOST_ORDER md5_block_host_order | ||
123 | #if !defined(L_ENDIAN) || defined(md5_block_data_order) | ||
124 | #define HASH_BLOCK_DATA_ORDER md5_block_data_order | 92 | #define HASH_BLOCK_DATA_ORDER md5_block_data_order |
125 | /* | ||
126 | * Little-endians (Intel and Alpha) feel better without this. | ||
127 | * It looks like memcpy does better job than generic | ||
128 | * md5_block_data_order on copying-n-aligning input data. | ||
129 | * But frankly speaking I didn't expect such result on Alpha. | ||
130 | * On the other hand I've got this with egcs-1.0.2 and if | ||
131 | * program is compiled with another (better?) compiler it | ||
132 | * might turn out other way around. | ||
133 | * | ||
134 | * <appro@fy.chalmers.se> | ||
135 | */ | ||
136 | #endif | ||
137 | 93 | ||
138 | #include "md32_common.h" | 94 | #include "md32_common.h" |
139 | 95 | ||