diff options
Diffstat (limited to 'src/lib/libcrypto/md32_common.h')
| -rw-r--r-- | src/lib/libcrypto/md32_common.h | 473 |
1 files changed, 116 insertions, 357 deletions
diff --git a/src/lib/libcrypto/md32_common.h b/src/lib/libcrypto/md32_common.h index 733da6acaf..089c450290 100644 --- a/src/lib/libcrypto/md32_common.h +++ b/src/lib/libcrypto/md32_common.h | |||
| @@ -1,6 +1,6 @@ | |||
| 1 | /* crypto/md32_common.h */ | 1 | /* crypto/md32_common.h */ |
| 2 | /* ==================================================================== | 2 | /* ==================================================================== |
| 3 | * Copyright (c) 1999-2002 The OpenSSL Project. All rights reserved. | 3 | * Copyright (c) 1999-2007 The OpenSSL Project. All rights reserved. |
| 4 | * | 4 | * |
| 5 | * Redistribution and use in source and binary forms, with or without | 5 | * Redistribution and use in source and binary forms, with or without |
| 6 | * modification, are permitted provided that the following conditions | 6 | * modification, are permitted provided that the following conditions |
| @@ -47,10 +47,6 @@ | |||
| 47 | * OF THE POSSIBILITY OF SUCH DAMAGE. | 47 | * OF THE POSSIBILITY OF SUCH DAMAGE. |
| 48 | * ==================================================================== | 48 | * ==================================================================== |
| 49 | * | 49 | * |
| 50 | * This product includes cryptographic software written by Eric Young | ||
| 51 | * (eay@cryptsoft.com). This product includes software written by Tim | ||
| 52 | * Hudson (tjh@cryptsoft.com). | ||
| 53 | * | ||
| 54 | */ | 50 | */ |
| 55 | 51 | ||
| 56 | /* | 52 | /* |
| @@ -76,40 +72,27 @@ | |||
| 76 | * typedef struct { | 72 | * typedef struct { |
| 77 | * ... | 73 | * ... |
| 78 | * HASH_LONG Nl,Nh; | 74 | * HASH_LONG Nl,Nh; |
| 75 | * either { | ||
| 79 | * HASH_LONG data[HASH_LBLOCK]; | 76 | * HASH_LONG data[HASH_LBLOCK]; |
| 80 | * int num; | 77 | * unsigned char data[HASH_CBLOCK]; |
| 78 | * }; | ||
| 79 | * unsigned int num; | ||
| 81 | * ... | 80 | * ... |
| 82 | * } HASH_CTX; | 81 | * } HASH_CTX; |
| 82 | * data[] vector is expected to be zeroed upon first call to | ||
| 83 | * HASH_UPDATE. | ||
| 83 | * HASH_UPDATE | 84 | * HASH_UPDATE |
| 84 | * name of "Update" function, implemented here. | 85 | * name of "Update" function, implemented here. |
| 85 | * HASH_TRANSFORM | 86 | * HASH_TRANSFORM |
| 86 | * name of "Transform" function, implemented here. | 87 | * name of "Transform" function, implemented here. |
| 87 | * HASH_FINAL | 88 | * HASH_FINAL |
| 88 | * name of "Final" function, implemented here. | 89 | * name of "Final" function, implemented here. |
| 89 | * HASH_BLOCK_HOST_ORDER | ||
| 90 | * name of "block" function treating *aligned* input message | ||
| 91 | * in host byte order, implemented externally. | ||
| 92 | * HASH_BLOCK_DATA_ORDER | 90 | * HASH_BLOCK_DATA_ORDER |
| 93 | * name of "block" function treating *unaligned* input message | 91 | * name of "block" function capable of treating *unaligned* input |
| 94 | * in original (data) byte order, implemented externally (it | 92 | * message in original (data) byte order, implemented externally. |
| 95 | * actually is optional if data and host are of the same | ||
| 96 | * "endianess"). | ||
| 97 | * HASH_MAKE_STRING | 93 | * HASH_MAKE_STRING |
| 98 | * macro convering context variables to an ASCII hash string. | 94 | * macro convering context variables to an ASCII hash string. |
| 99 | * | 95 | * |
| 100 | * Optional macros: | ||
| 101 | * | ||
| 102 | * B_ENDIAN or L_ENDIAN | ||
| 103 | * defines host byte-order. | ||
| 104 | * HASH_LONG_LOG2 | ||
| 105 | * defaults to 2 if not states otherwise. | ||
| 106 | * HASH_LBLOCK | ||
| 107 | * assumed to be HASH_CBLOCK/4 if not stated otherwise. | ||
| 108 | * HASH_BLOCK_DATA_ORDER_ALIGNED | ||
| 109 | * alternative "block" function capable of treating | ||
| 110 | * aligned input message in original (data) order, | ||
| 111 | * implemented externally. | ||
| 112 | * | ||
| 113 | * MD5 example: | 96 | * MD5 example: |
| 114 | * | 97 | * |
| 115 | * #define DATA_ORDER_IS_LITTLE_ENDIAN | 98 | * #define DATA_ORDER_IS_LITTLE_ENDIAN |
| @@ -118,20 +101,14 @@ | |||
| 118 | * #define HASH_LONG_LOG2 MD5_LONG_LOG2 | 101 | * #define HASH_LONG_LOG2 MD5_LONG_LOG2 |
| 119 | * #define HASH_CTX MD5_CTX | 102 | * #define HASH_CTX MD5_CTX |
| 120 | * #define HASH_CBLOCK MD5_CBLOCK | 103 | * #define HASH_CBLOCK MD5_CBLOCK |
| 121 | * #define HASH_LBLOCK MD5_LBLOCK | ||
| 122 | * #define HASH_UPDATE MD5_Update | 104 | * #define HASH_UPDATE MD5_Update |
| 123 | * #define HASH_TRANSFORM MD5_Transform | 105 | * #define HASH_TRANSFORM MD5_Transform |
| 124 | * #define HASH_FINAL MD5_Final | 106 | * #define HASH_FINAL MD5_Final |
| 125 | * #define HASH_BLOCK_HOST_ORDER md5_block_host_order | ||
| 126 | * #define HASH_BLOCK_DATA_ORDER md5_block_data_order | 107 | * #define HASH_BLOCK_DATA_ORDER md5_block_data_order |
| 127 | * | 108 | * |
| 128 | * <appro@fy.chalmers.se> | 109 | * <appro@fy.chalmers.se> |
| 129 | */ | 110 | */ |
| 130 | 111 | ||
| 131 | #include <openssl/crypto.h> | ||
| 132 | #include <openssl/fips.h> | ||
| 133 | #include <openssl/err.h> | ||
| 134 | |||
| 135 | #if !defined(DATA_ORDER_IS_BIG_ENDIAN) && !defined(DATA_ORDER_IS_LITTLE_ENDIAN) | 112 | #if !defined(DATA_ORDER_IS_BIG_ENDIAN) && !defined(DATA_ORDER_IS_LITTLE_ENDIAN) |
| 136 | #error "DATA_ORDER must be defined!" | 113 | #error "DATA_ORDER must be defined!" |
| 137 | #endif | 114 | #endif |
| @@ -156,34 +133,16 @@ | |||
| 156 | #error "HASH_FINAL must be defined!" | 133 | #error "HASH_FINAL must be defined!" |
| 157 | #endif | 134 | #endif |
| 158 | 135 | ||
| 159 | #ifndef HASH_BLOCK_HOST_ORDER | ||
| 160 | #error "HASH_BLOCK_HOST_ORDER must be defined!" | ||
| 161 | #endif | ||
| 162 | |||
| 163 | #if 0 | ||
| 164 | /* | ||
| 165 | * Moved below as it's required only if HASH_BLOCK_DATA_ORDER_ALIGNED | ||
| 166 | * isn't defined. | ||
| 167 | */ | ||
| 168 | #ifndef HASH_BLOCK_DATA_ORDER | 136 | #ifndef HASH_BLOCK_DATA_ORDER |
| 169 | #error "HASH_BLOCK_DATA_ORDER must be defined!" | 137 | #error "HASH_BLOCK_DATA_ORDER must be defined!" |
| 170 | #endif | 138 | #endif |
| 171 | #endif | ||
| 172 | |||
| 173 | #ifndef HASH_LBLOCK | ||
| 174 | #define HASH_LBLOCK (HASH_CBLOCK/4) | ||
| 175 | #endif | ||
| 176 | |||
| 177 | #ifndef HASH_LONG_LOG2 | ||
| 178 | #define HASH_LONG_LOG2 2 | ||
| 179 | #endif | ||
| 180 | 139 | ||
| 181 | /* | 140 | /* |
| 182 | * Engage compiler specific rotate intrinsic function if available. | 141 | * Engage compiler specific rotate intrinsic function if available. |
| 183 | */ | 142 | */ |
| 184 | #undef ROTATE | 143 | #undef ROTATE |
| 185 | #ifndef PEDANTIC | 144 | #ifndef PEDANTIC |
| 186 | # if 0 /* defined(_MSC_VER) */ | 145 | # if defined(_MSC_VER) || defined(__ICC) |
| 187 | # define ROTATE(a,n) _lrotl(a,n) | 146 | # define ROTATE(a,n) _lrotl(a,n) |
| 188 | # elif defined(__MWERKS__) | 147 | # elif defined(__MWERKS__) |
| 189 | # if defined(__POWERPC__) | 148 | # if defined(__POWERPC__) |
| @@ -199,7 +158,6 @@ | |||
| 199 | * Some GNU C inline assembler templates. Note that these are | 158 | * Some GNU C inline assembler templates. Note that these are |
| 200 | * rotates by *constant* number of bits! But that's exactly | 159 | * rotates by *constant* number of bits! But that's exactly |
| 201 | * what we need here... | 160 | * what we need here... |
| 202 | * | ||
| 203 | * <appro@fy.chalmers.se> | 161 | * <appro@fy.chalmers.se> |
| 204 | */ | 162 | */ |
| 205 | # if defined(__i386) || defined(__i386__) || defined(__x86_64) || defined(__x86_64__) | 163 | # if defined(__i386) || defined(__i386__) || defined(__x86_64) || defined(__x86_64__) |
| @@ -211,7 +169,8 @@ | |||
| 211 | : "cc"); \ | 169 | : "cc"); \ |
| 212 | ret; \ | 170 | ret; \ |
| 213 | }) | 171 | }) |
| 214 | # elif defined(__powerpc) || defined(__ppc__) || defined(__powerpc64__) | 172 | # elif defined(_ARCH_PPC) || defined(_ARCH_PPC64) || \ |
| 173 | defined(__powerpc) || defined(__ppc__) || defined(__powerpc64__) | ||
| 215 | # define ROTATE(a,n) ({ register unsigned int ret; \ | 174 | # define ROTATE(a,n) ({ register unsigned int ret; \ |
| 216 | asm ( \ | 175 | asm ( \ |
| 217 | "rlwinm %0,%1,%2,0,31" \ | 176 | "rlwinm %0,%1,%2,0,31" \ |
| @@ -219,194 +178,100 @@ | |||
| 219 | : "r"(a), "I"(n)); \ | 178 | : "r"(a), "I"(n)); \ |
| 220 | ret; \ | 179 | ret; \ |
| 221 | }) | 180 | }) |
| 222 | # endif | 181 | # elif defined(__s390x__) |
| 223 | # endif | 182 | # define ROTATE(a,n) ({ register unsigned int ret; \ |
| 224 | 183 | asm ("rll %0,%1,%2" \ | |
| 225 | /* | 184 | : "=r"(ret) \ |
| 226 | * Engage compiler specific "fetch in reverse byte order" | 185 | : "r"(a), "I"(n)); \ |
| 227 | * intrinsic function if available. | 186 | ret; \ |
| 228 | */ | ||
| 229 | # if defined(__GNUC__) && __GNUC__>=2 && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM) | ||
| 230 | /* some GNU C inline assembler templates by <appro@fy.chalmers.se> */ | ||
| 231 | # if (defined(__i386) || defined(__i386__) || defined(__x86_64) || defined(__x86_64__)) && !defined(I386_ONLY) | ||
| 232 | # define BE_FETCH32(a) ({ register unsigned int l=(a);\ | ||
| 233 | asm ( \ | ||
| 234 | "bswapl %0" \ | ||
| 235 | : "=r"(l) : "0"(l)); \ | ||
| 236 | l; \ | ||
| 237 | }) | ||
| 238 | # elif defined(__powerpc) | ||
| 239 | # define LE_FETCH32(a) ({ register unsigned int l; \ | ||
| 240 | asm ( \ | ||
| 241 | "lwbrx %0,0,%1" \ | ||
| 242 | : "=r"(l) \ | ||
| 243 | : "r"(a)); \ | ||
| 244 | l; \ | ||
| 245 | }) | ||
| 246 | |||
| 247 | # elif defined(__sparc) && defined(OPENSSL_SYS_ULTRASPARC) | ||
| 248 | # define LE_FETCH32(a) ({ register unsigned int l; \ | ||
| 249 | asm ( \ | ||
| 250 | "lda [%1]#ASI_PRIMARY_LITTLE,%0"\ | ||
| 251 | : "=r"(l) \ | ||
| 252 | : "r"(a)); \ | ||
| 253 | l; \ | ||
| 254 | }) | 187 | }) |
| 255 | # endif | 188 | # endif |
| 256 | # endif | 189 | # endif |
| 257 | #endif /* PEDANTIC */ | 190 | #endif /* PEDANTIC */ |
| 258 | 191 | ||
| 259 | #if HASH_LONG_LOG2==2 /* Engage only if sizeof(HASH_LONG)== 4 */ | ||
| 260 | /* A nice byte order reversal from Wei Dai <weidai@eskimo.com> */ | ||
| 261 | #ifdef ROTATE | ||
| 262 | /* 5 instructions with rotate instruction, else 9 */ | ||
| 263 | #define REVERSE_FETCH32(a,l) ( \ | ||
| 264 | l=*(const HASH_LONG *)(a), \ | ||
| 265 | ((ROTATE(l,8)&0x00FF00FF)|(ROTATE((l&0x00FF00FF),24))) \ | ||
| 266 | ) | ||
| 267 | #else | ||
| 268 | /* 6 instructions with rotate instruction, else 8 */ | ||
| 269 | #define REVERSE_FETCH32(a,l) ( \ | ||
| 270 | l=*(const HASH_LONG *)(a), \ | ||
| 271 | l=(((l>>8)&0x00FF00FF)|((l&0x00FF00FF)<<8)), \ | ||
| 272 | ROTATE(l,16) \ | ||
| 273 | ) | ||
| 274 | /* | ||
| 275 | * Originally the middle line started with l=(((l&0xFF00FF00)>>8)|... | ||
| 276 | * It's rewritten as above for two reasons: | ||
| 277 | * - RISCs aren't good at long constants and have to explicitely | ||
| 278 | * compose 'em with several (well, usually 2) instructions in a | ||
| 279 | * register before performing the actual operation and (as you | ||
| 280 | * already realized:-) having same constant should inspire the | ||
| 281 | * compiler to permanently allocate the only register for it; | ||
| 282 | * - most modern CPUs have two ALUs, but usually only one has | ||
| 283 | * circuitry for shifts:-( this minor tweak inspires compiler | ||
| 284 | * to schedule shift instructions in a better way... | ||
| 285 | * | ||
| 286 | * <appro@fy.chalmers.se> | ||
| 287 | */ | ||
| 288 | #endif | ||
| 289 | #endif | ||
| 290 | |||
| 291 | #ifndef ROTATE | 192 | #ifndef ROTATE |
| 292 | #define ROTATE(a,n) (((a)<<(n))|(((a)&0xffffffff)>>(32-(n)))) | 193 | #define ROTATE(a,n) (((a)<<(n))|(((a)&0xffffffff)>>(32-(n)))) |
| 293 | #endif | 194 | #endif |
| 294 | 195 | ||
| 295 | /* | 196 | #if defined(DATA_ORDER_IS_BIG_ENDIAN) |
| 296 | * Make some obvious choices. E.g., HASH_BLOCK_DATA_ORDER_ALIGNED | ||
| 297 | * and HASH_BLOCK_HOST_ORDER ought to be the same if input data | ||
| 298 | * and host are of the same "endianess". It's possible to mask | ||
| 299 | * this with blank #define HASH_BLOCK_DATA_ORDER though... | ||
| 300 | * | ||
| 301 | * <appro@fy.chalmers.se> | ||
| 302 | */ | ||
| 303 | #if defined(B_ENDIAN) | ||
| 304 | # if defined(DATA_ORDER_IS_BIG_ENDIAN) | ||
| 305 | # if !defined(HASH_BLOCK_DATA_ORDER_ALIGNED) && HASH_LONG_LOG2==2 | ||
| 306 | # define HASH_BLOCK_DATA_ORDER_ALIGNED HASH_BLOCK_HOST_ORDER | ||
| 307 | # endif | ||
| 308 | # elif defined(DATA_ORDER_IS_LITTLE_ENDIAN) | ||
| 309 | # ifndef HOST_FETCH32 | ||
| 310 | # ifdef LE_FETCH32 | ||
| 311 | # define HOST_FETCH32(p,l) LE_FETCH32(p) | ||
| 312 | # elif defined(REVERSE_FETCH32) | ||
| 313 | # define HOST_FETCH32(p,l) REVERSE_FETCH32(p,l) | ||
| 314 | # endif | ||
| 315 | # endif | ||
| 316 | # endif | ||
| 317 | #elif defined(L_ENDIAN) | ||
| 318 | # if defined(DATA_ORDER_IS_LITTLE_ENDIAN) | ||
| 319 | # if !defined(HASH_BLOCK_DATA_ORDER_ALIGNED) && HASH_LONG_LOG2==2 | ||
| 320 | # define HASH_BLOCK_DATA_ORDER_ALIGNED HASH_BLOCK_HOST_ORDER | ||
| 321 | # endif | ||
| 322 | # elif defined(DATA_ORDER_IS_BIG_ENDIAN) | ||
| 323 | # ifndef HOST_FETCH32 | ||
| 324 | # ifdef BE_FETCH32 | ||
| 325 | # define HOST_FETCH32(p,l) BE_FETCH32(p) | ||
| 326 | # elif defined(REVERSE_FETCH32) | ||
| 327 | # define HOST_FETCH32(p,l) REVERSE_FETCH32(p,l) | ||
| 328 | # endif | ||
| 329 | # endif | ||
| 330 | # endif | ||
| 331 | #endif | ||
| 332 | 197 | ||
| 333 | #if !defined(HASH_BLOCK_DATA_ORDER_ALIGNED) | 198 | #ifndef PEDANTIC |
| 334 | #ifndef HASH_BLOCK_DATA_ORDER | 199 | # if defined(__GNUC__) && __GNUC__>=2 && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM) |
| 335 | #error "HASH_BLOCK_DATA_ORDER must be defined!" | 200 | # if ((defined(__i386) || defined(__i386__)) && !defined(I386_ONLY)) || \ |
| 201 | (defined(__x86_64) || defined(__x86_64__)) | ||
| 202 | # if !defined(B_ENDIAN) | ||
| 203 | /* | ||
| 204 | * This gives ~30-40% performance improvement in SHA-256 compiled | ||
| 205 | * with gcc [on P4]. Well, first macro to be frank. We can pull | ||
| 206 | * this trick on x86* platforms only, because these CPUs can fetch | ||
| 207 | * unaligned data without raising an exception. | ||
| 208 | */ | ||
| 209 | # define HOST_c2l(c,l) ({ unsigned int r=*((const unsigned int *)(c)); \ | ||
| 210 | asm ("bswapl %0":"=r"(r):"0"(r)); \ | ||
| 211 | (c)+=4; (l)=r; }) | ||
| 212 | # define HOST_l2c(l,c) ({ unsigned int r=(l); \ | ||
| 213 | asm ("bswapl %0":"=r"(r):"0"(r)); \ | ||
| 214 | *((unsigned int *)(c))=r; (c)+=4; r; }) | ||
| 215 | # endif | ||
| 216 | # endif | ||
| 217 | # endif | ||
| 336 | #endif | 218 | #endif |
| 219 | #if defined(__s390__) || defined(__s390x__) | ||
| 220 | # define HOST_c2l(c,l) ((l)=*((const unsigned int *)(c)), (c)+=4, (l)) | ||
| 221 | # define HOST_l2c(l,c) (*((unsigned int *)(c))=(l), (c)+=4, (l)) | ||
| 337 | #endif | 222 | #endif |
| 338 | 223 | ||
| 339 | #if defined(DATA_ORDER_IS_BIG_ENDIAN) | 224 | #ifndef HOST_c2l |
| 340 | |||
| 341 | #define HOST_c2l(c,l) (l =(((unsigned long)(*((c)++)))<<24), \ | 225 | #define HOST_c2l(c,l) (l =(((unsigned long)(*((c)++)))<<24), \ |
| 342 | l|=(((unsigned long)(*((c)++)))<<16), \ | 226 | l|=(((unsigned long)(*((c)++)))<<16), \ |
| 343 | l|=(((unsigned long)(*((c)++)))<< 8), \ | 227 | l|=(((unsigned long)(*((c)++)))<< 8), \ |
| 344 | l|=(((unsigned long)(*((c)++))) ), \ | 228 | l|=(((unsigned long)(*((c)++))) ), \ |
| 345 | l) | 229 | l) |
| 346 | #define HOST_p_c2l(c,l,n) { \ | 230 | #endif |
| 347 | switch (n) { \ | 231 | #ifndef HOST_l2c |
| 348 | case 0: l =((unsigned long)(*((c)++)))<<24; \ | ||
| 349 | case 1: l|=((unsigned long)(*((c)++)))<<16; \ | ||
| 350 | case 2: l|=((unsigned long)(*((c)++)))<< 8; \ | ||
| 351 | case 3: l|=((unsigned long)(*((c)++))); \ | ||
| 352 | } } | ||
| 353 | #define HOST_p_c2l_p(c,l,sc,len) { \ | ||
| 354 | switch (sc) { \ | ||
| 355 | case 0: l =((unsigned long)(*((c)++)))<<24; \ | ||
| 356 | if (--len == 0) break; \ | ||
| 357 | case 1: l|=((unsigned long)(*((c)++)))<<16; \ | ||
| 358 | if (--len == 0) break; \ | ||
| 359 | case 2: l|=((unsigned long)(*((c)++)))<< 8; \ | ||
| 360 | } } | ||
| 361 | /* NOTE the pointer is not incremented at the end of this */ | ||
| 362 | #define HOST_c2l_p(c,l,n) { \ | ||
| 363 | l=0; (c)+=n; \ | ||
| 364 | switch (n) { \ | ||
| 365 | case 3: l =((unsigned long)(*(--(c))))<< 8; \ | ||
| 366 | case 2: l|=((unsigned long)(*(--(c))))<<16; \ | ||
| 367 | case 1: l|=((unsigned long)(*(--(c))))<<24; \ | ||
| 368 | } } | ||
| 369 | #define HOST_l2c(l,c) (*((c)++)=(unsigned char)(((l)>>24)&0xff), \ | 232 | #define HOST_l2c(l,c) (*((c)++)=(unsigned char)(((l)>>24)&0xff), \ |
| 370 | *((c)++)=(unsigned char)(((l)>>16)&0xff), \ | 233 | *((c)++)=(unsigned char)(((l)>>16)&0xff), \ |
| 371 | *((c)++)=(unsigned char)(((l)>> 8)&0xff), \ | 234 | *((c)++)=(unsigned char)(((l)>> 8)&0xff), \ |
| 372 | *((c)++)=(unsigned char)(((l) )&0xff), \ | 235 | *((c)++)=(unsigned char)(((l) )&0xff), \ |
| 373 | l) | 236 | l) |
| 237 | #endif | ||
| 374 | 238 | ||
| 375 | #elif defined(DATA_ORDER_IS_LITTLE_ENDIAN) | 239 | #elif defined(DATA_ORDER_IS_LITTLE_ENDIAN) |
| 376 | 240 | ||
| 241 | #ifndef PEDANTIC | ||
| 242 | # if defined(__GNUC__) && __GNUC__>=2 && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM) | ||
| 243 | # if defined(__s390x__) | ||
| 244 | # define HOST_c2l(c,l) ({ asm ("lrv %0,0(%1)" \ | ||
| 245 | :"=r"(l) : "r"(c)); \ | ||
| 246 | (c)+=4; (l); }) | ||
| 247 | # define HOST_l2c(l,c) ({ asm ("strv %0,0(%1)" \ | ||
| 248 | : : "r"(l),"r"(c) : "memory"); \ | ||
| 249 | (c)+=4; (l); }) | ||
| 250 | # endif | ||
| 251 | # endif | ||
| 252 | #endif | ||
| 253 | #if defined(__i386) || defined(__i386__) || defined(__x86_64) || defined(__x86_64__) | ||
| 254 | # ifndef B_ENDIAN | ||
| 255 | /* See comment in DATA_ORDER_IS_BIG_ENDIAN section. */ | ||
| 256 | # define HOST_c2l(c,l) ((l)=*((const unsigned int *)(c)), (c)+=4, l) | ||
| 257 | # define HOST_l2c(l,c) (*((unsigned int *)(c))=(l), (c)+=4, l) | ||
| 258 | # endif | ||
| 259 | #endif | ||
| 260 | |||
| 261 | #ifndef HOST_c2l | ||
| 377 | #define HOST_c2l(c,l) (l =(((unsigned long)(*((c)++))) ), \ | 262 | #define HOST_c2l(c,l) (l =(((unsigned long)(*((c)++))) ), \ |
| 378 | l|=(((unsigned long)(*((c)++)))<< 8), \ | 263 | l|=(((unsigned long)(*((c)++)))<< 8), \ |
| 379 | l|=(((unsigned long)(*((c)++)))<<16), \ | 264 | l|=(((unsigned long)(*((c)++)))<<16), \ |
| 380 | l|=(((unsigned long)(*((c)++)))<<24), \ | 265 | l|=(((unsigned long)(*((c)++)))<<24), \ |
| 381 | l) | 266 | l) |
| 382 | #define HOST_p_c2l(c,l,n) { \ | 267 | #endif |
| 383 | switch (n) { \ | 268 | #ifndef HOST_l2c |
| 384 | case 0: l =((unsigned long)(*((c)++))); \ | ||
| 385 | case 1: l|=((unsigned long)(*((c)++)))<< 8; \ | ||
| 386 | case 2: l|=((unsigned long)(*((c)++)))<<16; \ | ||
| 387 | case 3: l|=((unsigned long)(*((c)++)))<<24; \ | ||
| 388 | } } | ||
| 389 | #define HOST_p_c2l_p(c,l,sc,len) { \ | ||
| 390 | switch (sc) { \ | ||
| 391 | case 0: l =((unsigned long)(*((c)++))); \ | ||
| 392 | if (--len == 0) break; \ | ||
| 393 | case 1: l|=((unsigned long)(*((c)++)))<< 8; \ | ||
| 394 | if (--len == 0) break; \ | ||
| 395 | case 2: l|=((unsigned long)(*((c)++)))<<16; \ | ||
| 396 | } } | ||
| 397 | /* NOTE the pointer is not incremented at the end of this */ | ||
| 398 | #define HOST_c2l_p(c,l,n) { \ | ||
| 399 | l=0; (c)+=n; \ | ||
| 400 | switch (n) { \ | ||
| 401 | case 3: l =((unsigned long)(*(--(c))))<<16; \ | ||
| 402 | case 2: l|=((unsigned long)(*(--(c))))<< 8; \ | ||
| 403 | case 1: l|=((unsigned long)(*(--(c)))); \ | ||
| 404 | } } | ||
| 405 | #define HOST_l2c(l,c) (*((c)++)=(unsigned char)(((l) )&0xff), \ | 269 | #define HOST_l2c(l,c) (*((c)++)=(unsigned char)(((l) )&0xff), \ |
| 406 | *((c)++)=(unsigned char)(((l)>> 8)&0xff), \ | 270 | *((c)++)=(unsigned char)(((l)>> 8)&0xff), \ |
| 407 | *((c)++)=(unsigned char)(((l)>>16)&0xff), \ | 271 | *((c)++)=(unsigned char)(((l)>>16)&0xff), \ |
| 408 | *((c)++)=(unsigned char)(((l)>>24)&0xff), \ | 272 | *((c)++)=(unsigned char)(((l)>>24)&0xff), \ |
| 409 | l) | 273 | l) |
| 274 | #endif | ||
| 410 | 275 | ||
| 411 | #endif | 276 | #endif |
| 412 | 277 | ||
| @@ -414,118 +279,60 @@ | |||
| 414 | * Time for some action:-) | 279 | * Time for some action:-) |
| 415 | */ | 280 | */ |
| 416 | 281 | ||
| 417 | int HASH_UPDATE (HASH_CTX *c, const void *data_, unsigned long len) | 282 | int HASH_UPDATE (HASH_CTX *c, const void *data_, size_t len) |
| 418 | { | 283 | { |
| 419 | const unsigned char *data=data_; | 284 | const unsigned char *data=data_; |
| 420 | register HASH_LONG * p; | 285 | unsigned char *p; |
| 421 | register unsigned long l; | 286 | HASH_LONG l; |
| 422 | int sw,sc,ew,ec; | 287 | size_t n; |
| 423 | 288 | ||
| 424 | if (len==0) return 1; | 289 | if (len==0) return 1; |
| 425 | 290 | ||
| 426 | l=(c->Nl+(len<<3))&0xffffffffL; | 291 | l=(c->Nl+(((HASH_LONG)len)<<3))&0xffffffffUL; |
| 427 | /* 95-05-24 eay Fixed a bug with the overflow handling, thanks to | 292 | /* 95-05-24 eay Fixed a bug with the overflow handling, thanks to |
| 428 | * Wei Dai <weidai@eskimo.com> for pointing it out. */ | 293 | * Wei Dai <weidai@eskimo.com> for pointing it out. */ |
| 429 | if (l < c->Nl) /* overflow */ | 294 | if (l < c->Nl) /* overflow */ |
| 430 | c->Nh++; | 295 | c->Nh++; |
| 431 | c->Nh+=(len>>29); | 296 | c->Nh+=(len>>29); /* might cause compiler warning on 16-bit */ |
| 432 | c->Nl=l; | 297 | c->Nl=l; |
| 433 | 298 | ||
| 434 | if (c->num != 0) | 299 | n = c->num; |
| 300 | if (n != 0) | ||
| 435 | { | 301 | { |
| 436 | p=c->data; | 302 | p=(unsigned char *)c->data; |
| 437 | sw=c->num>>2; | ||
| 438 | sc=c->num&0x03; | ||
| 439 | 303 | ||
| 440 | if ((c->num+len) >= HASH_CBLOCK) | 304 | if ((n+len) >= HASH_CBLOCK) |
| 441 | { | 305 | { |
| 442 | l=p[sw]; HOST_p_c2l(data,l,sc); p[sw++]=l; | 306 | memcpy (p+n,data,HASH_CBLOCK-n); |
| 443 | for (; sw<HASH_LBLOCK; sw++) | 307 | HASH_BLOCK_DATA_ORDER (c,p,1); |
| 444 | { | 308 | n = HASH_CBLOCK-n; |
| 445 | HOST_c2l(data,l); p[sw]=l; | 309 | data += n; |
| 446 | } | 310 | len -= n; |
| 447 | HASH_BLOCK_HOST_ORDER (c,p,1); | 311 | c->num = 0; |
| 448 | len-=(HASH_CBLOCK-c->num); | 312 | memset (p,0,HASH_CBLOCK); /* keep it zeroed */ |
| 449 | c->num=0; | ||
| 450 | /* drop through and do the rest */ | ||
| 451 | } | 313 | } |
| 452 | else | 314 | else |
| 453 | { | 315 | { |
| 454 | c->num+=len; | 316 | memcpy (p+n,data,len); |
| 455 | if ((sc+len) < 4) /* ugly, add char's to a word */ | 317 | c->num += (unsigned int)len; |
| 456 | { | ||
| 457 | l=p[sw]; HOST_p_c2l_p(data,l,sc,len); p[sw]=l; | ||
| 458 | } | ||
| 459 | else | ||
| 460 | { | ||
| 461 | ew=(c->num>>2); | ||
| 462 | ec=(c->num&0x03); | ||
| 463 | if (sc) | ||
| 464 | l=p[sw]; | ||
| 465 | HOST_p_c2l(data,l,sc); | ||
| 466 | p[sw++]=l; | ||
| 467 | for (; sw < ew; sw++) | ||
| 468 | { | ||
| 469 | HOST_c2l(data,l); p[sw]=l; | ||
| 470 | } | ||
| 471 | if (ec) | ||
| 472 | { | ||
| 473 | HOST_c2l_p(data,l,ec); p[sw]=l; | ||
| 474 | } | ||
| 475 | } | ||
| 476 | return 1; | 318 | return 1; |
| 477 | } | 319 | } |
| 478 | } | 320 | } |
| 479 | 321 | ||
| 480 | sw=len/HASH_CBLOCK; | 322 | n = len/HASH_CBLOCK; |
| 481 | if (sw > 0) | 323 | if (n > 0) |
| 482 | { | 324 | { |
| 483 | #if defined(HASH_BLOCK_DATA_ORDER_ALIGNED) | 325 | HASH_BLOCK_DATA_ORDER (c,data,n); |
| 484 | /* | 326 | n *= HASH_CBLOCK; |
| 485 | * Note that HASH_BLOCK_DATA_ORDER_ALIGNED gets defined | 327 | data += n; |
| 486 | * only if sizeof(HASH_LONG)==4. | 328 | len -= n; |
| 487 | */ | ||
| 488 | if ((((unsigned long)data)%4) == 0) | ||
| 489 | { | ||
| 490 | /* data is properly aligned so that we can cast it: */ | ||
| 491 | HASH_BLOCK_DATA_ORDER_ALIGNED (c,(HASH_LONG *)data,sw); | ||
| 492 | sw*=HASH_CBLOCK; | ||
| 493 | data+=sw; | ||
| 494 | len-=sw; | ||
| 495 | } | ||
| 496 | else | ||
| 497 | #if !defined(HASH_BLOCK_DATA_ORDER) | ||
| 498 | while (sw--) | ||
| 499 | { | ||
| 500 | memcpy (p=c->data,data,HASH_CBLOCK); | ||
| 501 | HASH_BLOCK_DATA_ORDER_ALIGNED(c,p,1); | ||
| 502 | data+=HASH_CBLOCK; | ||
| 503 | len-=HASH_CBLOCK; | ||
| 504 | } | ||
| 505 | #endif | ||
| 506 | #endif | ||
| 507 | #if defined(HASH_BLOCK_DATA_ORDER) | ||
| 508 | { | ||
| 509 | HASH_BLOCK_DATA_ORDER(c,data,sw); | ||
| 510 | sw*=HASH_CBLOCK; | ||
| 511 | data+=sw; | ||
| 512 | len-=sw; | ||
| 513 | } | ||
| 514 | #endif | ||
| 515 | } | 329 | } |
| 516 | 330 | ||
| 517 | if (len!=0) | 331 | if (len != 0) |
| 518 | { | 332 | { |
| 519 | p = c->data; | 333 | p = (unsigned char *)c->data; |
| 520 | c->num = len; | 334 | c->num = len; |
| 521 | ew=len>>2; /* words to copy */ | 335 | memcpy (p,data,len); |
| 522 | ec=len&0x03; | ||
| 523 | for (; ew; ew--,p++) | ||
| 524 | { | ||
| 525 | HOST_c2l(data,l); *p=l; | ||
| 526 | } | ||
| 527 | HOST_c2l_p(data,l,ec); | ||
| 528 | *p=l; | ||
| 529 | } | 336 | } |
| 530 | return 1; | 337 | return 1; |
| 531 | } | 338 | } |
| @@ -533,81 +340,38 @@ int HASH_UPDATE (HASH_CTX *c, const void *data_, unsigned long len) | |||
| 533 | 340 | ||
| 534 | void HASH_TRANSFORM (HASH_CTX *c, const unsigned char *data) | 341 | void HASH_TRANSFORM (HASH_CTX *c, const unsigned char *data) |
| 535 | { | 342 | { |
| 536 | #if defined(HASH_BLOCK_DATA_ORDER_ALIGNED) | ||
| 537 | if ((((unsigned long)data)%4) == 0) | ||
| 538 | /* data is properly aligned so that we can cast it: */ | ||
| 539 | HASH_BLOCK_DATA_ORDER_ALIGNED (c,(HASH_LONG *)data,1); | ||
| 540 | else | ||
| 541 | #if !defined(HASH_BLOCK_DATA_ORDER) | ||
| 542 | { | ||
| 543 | memcpy (c->data,data,HASH_CBLOCK); | ||
| 544 | HASH_BLOCK_DATA_ORDER_ALIGNED (c,c->data,1); | ||
| 545 | } | ||
| 546 | #endif | ||
| 547 | #endif | ||
| 548 | #if defined(HASH_BLOCK_DATA_ORDER) | ||
| 549 | HASH_BLOCK_DATA_ORDER (c,data,1); | 343 | HASH_BLOCK_DATA_ORDER (c,data,1); |
| 550 | #endif | ||
| 551 | } | 344 | } |
| 552 | 345 | ||
| 553 | 346 | ||
| 554 | int HASH_FINAL (unsigned char *md, HASH_CTX *c) | 347 | int HASH_FINAL (unsigned char *md, HASH_CTX *c) |
| 555 | { | 348 | { |
| 556 | register HASH_LONG *p; | 349 | unsigned char *p = (unsigned char *)c->data; |
| 557 | register unsigned long l; | 350 | size_t n = c->num; |
| 558 | register int i,j; | ||
| 559 | static const unsigned char end[4]={0x80,0x00,0x00,0x00}; | ||
| 560 | const unsigned char *cp=end; | ||
| 561 | |||
| 562 | #if 0 | ||
| 563 | if(FIPS_mode() && !FIPS_md5_allowed()) | ||
| 564 | { | ||
| 565 | FIPSerr(FIPS_F_HASH_FINAL,FIPS_R_NON_FIPS_METHOD); | ||
| 566 | return 0; | ||
| 567 | } | ||
| 568 | #endif | ||
| 569 | 351 | ||
| 570 | /* c->num should definitly have room for at least one more byte. */ | 352 | p[n] = 0x80; /* there is always room for one */ |
| 571 | p=c->data; | 353 | n++; |
| 572 | i=c->num>>2; | ||
| 573 | j=c->num&0x03; | ||
| 574 | |||
| 575 | #if 0 | ||
| 576 | /* purify often complains about the following line as an | ||
| 577 | * Uninitialized Memory Read. While this can be true, the | ||
| 578 | * following p_c2l macro will reset l when that case is true. | ||
| 579 | * This is because j&0x03 contains the number of 'valid' bytes | ||
| 580 | * already in p[i]. If and only if j&0x03 == 0, the UMR will | ||
| 581 | * occur but this is also the only time p_c2l will do | ||
| 582 | * l= *(cp++) instead of l|= *(cp++) | ||
| 583 | * Many thanks to Alex Tang <altitude@cic.net> for pickup this | ||
| 584 | * 'potential bug' */ | ||
| 585 | #ifdef PURIFY | ||
| 586 | if (j==0) p[i]=0; /* Yeah, but that's not the way to fix it:-) */ | ||
| 587 | #endif | ||
| 588 | l=p[i]; | ||
| 589 | #else | ||
| 590 | l = (j==0) ? 0 : p[i]; | ||
| 591 | #endif | ||
| 592 | HOST_p_c2l(cp,l,j); p[i++]=l; /* i is the next 'undefined word' */ | ||
| 593 | 354 | ||
| 594 | if (i>(HASH_LBLOCK-2)) /* save room for Nl and Nh */ | 355 | if (n > (HASH_CBLOCK-8)) |
| 595 | { | 356 | { |
| 596 | if (i<HASH_LBLOCK) p[i]=0; | 357 | memset (p+n,0,HASH_CBLOCK-n); |
| 597 | HASH_BLOCK_HOST_ORDER (c,p,1); | 358 | n=0; |
| 598 | i=0; | 359 | HASH_BLOCK_DATA_ORDER (c,p,1); |
| 599 | } | 360 | } |
| 600 | for (; i<(HASH_LBLOCK-2); i++) | 361 | memset (p+n,0,HASH_CBLOCK-8-n); |
| 601 | p[i]=0; | ||
| 602 | 362 | ||
| 363 | p += HASH_CBLOCK-8; | ||
| 603 | #if defined(DATA_ORDER_IS_BIG_ENDIAN) | 364 | #if defined(DATA_ORDER_IS_BIG_ENDIAN) |
| 604 | p[HASH_LBLOCK-2]=c->Nh; | 365 | (void)HOST_l2c(c->Nh,p); |
| 605 | p[HASH_LBLOCK-1]=c->Nl; | 366 | (void)HOST_l2c(c->Nl,p); |
| 606 | #elif defined(DATA_ORDER_IS_LITTLE_ENDIAN) | 367 | #elif defined(DATA_ORDER_IS_LITTLE_ENDIAN) |
| 607 | p[HASH_LBLOCK-2]=c->Nl; | 368 | (void)HOST_l2c(c->Nl,p); |
| 608 | p[HASH_LBLOCK-1]=c->Nh; | 369 | (void)HOST_l2c(c->Nh,p); |
| 609 | #endif | 370 | #endif |
| 610 | HASH_BLOCK_HOST_ORDER (c,p,1); | 371 | p -= HASH_CBLOCK; |
| 372 | HASH_BLOCK_DATA_ORDER (c,p,1); | ||
| 373 | c->num=0; | ||
| 374 | memset (p,0,HASH_CBLOCK); | ||
| 611 | 375 | ||
| 612 | #ifndef HASH_MAKE_STRING | 376 | #ifndef HASH_MAKE_STRING |
| 613 | #error "HASH_MAKE_STRING must be defined!" | 377 | #error "HASH_MAKE_STRING must be defined!" |
| @@ -615,11 +379,6 @@ int HASH_FINAL (unsigned char *md, HASH_CTX *c) | |||
| 615 | HASH_MAKE_STRING(c,md); | 379 | HASH_MAKE_STRING(c,md); |
| 616 | #endif | 380 | #endif |
| 617 | 381 | ||
| 618 | c->num=0; | ||
| 619 | /* clear stuff, HASH_BLOCK may be leaving some stuff on the stack | ||
| 620 | * but I'm not worried :-) | ||
| 621 | OPENSSL_cleanse((void *)c,sizeof(HASH_CTX)); | ||
| 622 | */ | ||
| 623 | return 1; | 382 | return 1; |
| 624 | } | 383 | } |
| 625 | 384 | ||
