summaryrefslogtreecommitdiff
path: root/src/lib/libcrypto/sha/sha_locl.h
diff options
context:
space:
mode:
Diffstat (limited to 'src/lib/libcrypto/sha/sha_locl.h')
-rw-r--r--src/lib/libcrypto/sha/sha_locl.h78
1 files changed, 60 insertions, 18 deletions
diff --git a/src/lib/libcrypto/sha/sha_locl.h b/src/lib/libcrypto/sha/sha_locl.h
index 2814ad15fa..6646a8915b 100644
--- a/src/lib/libcrypto/sha/sha_locl.h
+++ b/src/lib/libcrypto/sha/sha_locl.h
@@ -59,6 +59,8 @@
59#include <stdlib.h> 59#include <stdlib.h>
60#include <string.h> 60#include <string.h>
61 61
62#include <openssl/opensslconf.h>
63
62#ifdef undef 64#ifdef undef
63/* one or the other needs to be defined */ 65/* one or the other needs to be defined */
64#ifndef SHA_1 /* FIPE 180-1 */ 66#ifndef SHA_1 /* FIPE 180-1 */
@@ -66,14 +68,6 @@
66#endif 68#endif
67#endif 69#endif
68 70
69#define ULONG unsigned long
70#define UCHAR unsigned char
71#define UINT unsigned int
72
73#ifdef NOCONST
74#define const
75#endif
76
77#undef c2nl 71#undef c2nl
78#define c2nl(c,l) (l =(((unsigned long)(*((c)++)))<<24), \ 72#define c2nl(c,l) (l =(((unsigned long)(*((c)++)))<<24), \
79 l|=(((unsigned long)(*((c)++)))<<16), \ 73 l|=(((unsigned long)(*((c)++)))<<16), \
@@ -166,30 +160,79 @@
166 *((c)++)=(unsigned char)(((l)>>16)&0xff), \ 160 *((c)++)=(unsigned char)(((l)>>16)&0xff), \
167 *((c)++)=(unsigned char)(((l)>>24)&0xff)) 161 *((c)++)=(unsigned char)(((l)>>24)&0xff))
168 162
163#ifndef SHA_LONG_LOG2
164#define SHA_LONG_LOG2 2 /* default to 32 bits */
165#endif
166
169#undef ROTATE 167#undef ROTATE
168#undef Endian_Reverse32
170#if defined(WIN32) 169#if defined(WIN32)
171#define ROTATE(a,n) _lrotl(a,n) 170#define ROTATE(a,n) _lrotl(a,n)
172#else 171#elif defined(__GNUC__) && !defined(PEDANTIC)
173#define ROTATE(a,n) (((a)<<(n))|(((a)&0xffffffff)>>(32-(n)))) 172/* some inline assembler templates by <appro@fy.chalmers.se> */
173#if defined(__i386) && !defined(NO_ASM)
174#define ROTATE(a,n) ({ register unsigned int ret; \
175 asm ("roll %1,%0" \
176 : "=r"(ret) \
177 : "I"(n), "0"(a) \
178 : "cc"); \
179 ret; \
180 })
181#ifndef I386_ONLY
182#define Endian_Reverse32(a) \
183 { register unsigned int ltmp=(a); \
184 asm ("bswapl %0" \
185 : "=r"(ltmp) : "0"(ltmp)); \
186 (a)=ltmp; \
187 }
188#endif
189#elif defined(__powerpc)
190#define ROTATE(a,n) ({ register unsigned int ret; \
191 asm ("rlwinm %0,%1,%2,0,31" \
192 : "=r"(ret) \
193 : "r"(a), "I"(n)); \
194 ret; \
195 })
196/* Endian_Reverse32 is not needed for PowerPC */
197#endif
174#endif 198#endif
175 199
176/* A nice byte order reversal from Wei Dai <weidai@eskimo.com> */ 200/* A nice byte order reversal from Wei Dai <weidai@eskimo.com> */
177#if defined(WIN32) 201#ifdef ROTATE
202#ifndef Endian_Reverse32
178/* 5 instructions with rotate instruction, else 9 */ 203/* 5 instructions with rotate instruction, else 9 */
179#define Endian_Reverse32(a) \ 204#define Endian_Reverse32(a) \
180 { \ 205 { \
181 unsigned long l=(a); \ 206 unsigned long t=(a); \
182 (a)=((ROTATE(l,8)&0x00FF00FF)|(ROTATE(l,24)&0xFF00FF00)); \ 207 (a)=((ROTATE(t,8)&0x00FF00FF)|(ROTATE((t&0x00FF00FF),24))); \
183 } 208 }
209#endif
184#else 210#else
211#define ROTATE(a,n) (((a)<<(n))|(((a)&0xffffffff)>>(32-(n))))
212#ifndef Endian_Reverse32
185/* 6 instructions with rotate instruction, else 8 */ 213/* 6 instructions with rotate instruction, else 8 */
186#define Endian_Reverse32(a) \ 214#define Endian_Reverse32(a) \
187 { \ 215 { \
188 unsigned long l=(a); \ 216 unsigned long t=(a); \
189 l=(((l&0xFF00FF00)>>8L)|((l&0x00FF00FF)<<8L)); \ 217 t=(((t>>8)&0x00FF00FF)|((t&0x00FF00FF)<<8)); \
190 (a)=ROTATE(l,16L); \ 218 (a)=ROTATE(t,16); \
191 } 219 }
192#endif 220#endif
221/*
222 * Originally the middle line started with l=(((l&0xFF00FF00)>>8)|...
223 * It's rewritten as above for two reasons:
224 * - RISCs aren't good at long constants and have to explicitely
225 * compose 'em with several (well, usually 2) instructions in a
226 * register before performing the actual operation and (as you
227 * already realized:-) having same constant should inspire the
228 * compiler to permanently allocate the only register for it;
229 * - most modern CPUs have two ALUs, but usually only one has
230 * circuitry for shifts:-( this minor tweak inspires compiler
231 * to schedule shift instructions in a better way...
232 *
233 * <appro@fy.chalmers.se>
234 */
235#endif
193 236
194/* As pointed out by Wei Dai <weidai@eskimo.com>, F() below can be 237/* As pointed out by Wei Dai <weidai@eskimo.com>, F() below can be
195 * simplified to the code in F_00_19. Wei attributes these optimisations 238 * simplified to the code in F_00_19. Wei attributes these optimisations
@@ -203,13 +246,12 @@
203#define F_40_59(b,c,d) (((b) & (c)) | (((b)|(c)) & (d))) 246#define F_40_59(b,c,d) (((b) & (c)) | (((b)|(c)) & (d)))
204#define F_60_79(b,c,d) F_20_39(b,c,d) 247#define F_60_79(b,c,d) F_20_39(b,c,d)
205 248
206#ifdef SHA_0
207#undef Xupdate 249#undef Xupdate
250#ifdef SHA_0
208#define Xupdate(a,i,ia,ib,ic,id) X[(i)&0x0f]=(a)=\ 251#define Xupdate(a,i,ia,ib,ic,id) X[(i)&0x0f]=(a)=\
209 (ia[(i)&0x0f]^ib[((i)+2)&0x0f]^ic[((i)+8)&0x0f]^id[((i)+13)&0x0f]); 252 (ia[(i)&0x0f]^ib[((i)+2)&0x0f]^ic[((i)+8)&0x0f]^id[((i)+13)&0x0f]);
210#endif 253#endif
211#ifdef SHA_1 254#ifdef SHA_1
212#undef Xupdate
213#define Xupdate(a,i,ia,ib,ic,id) (a)=\ 255#define Xupdate(a,i,ia,ib,ic,id) (a)=\
214 (ia[(i)&0x0f]^ib[((i)+2)&0x0f]^ic[((i)+8)&0x0f]^id[((i)+13)&0x0f]);\ 256 (ia[(i)&0x0f]^ib[((i)+2)&0x0f]^ic[((i)+8)&0x0f]^id[((i)+13)&0x0f]);\
215 X[(i)&0x0f]=(a)=ROTATE((a),1); 257 X[(i)&0x0f]=(a)=ROTATE((a),1);