summaryrefslogtreecommitdiff
path: root/src/lib/libcrypto/md32_common.h
diff options
context:
space:
mode:
authormiod <>2014-04-23 19:09:49 +0000
committermiod <>2014-04-23 19:09:49 +0000
commitba54475bcdd808f5501d943b18017bccbabd7bf5 (patch)
tree2f0bbc95834194b0eb3a2e2149c853e29107e563 /src/lib/libcrypto/md32_common.h
parent7386fc3d9177d598eb0bcef6e82db575c9472048 (diff)
downloadopenbsd-ba54475bcdd808f5501d943b18017bccbabd7bf5.tar.gz
openbsd-ba54475bcdd808f5501d943b18017bccbabd7bf5.tar.bz2
openbsd-ba54475bcdd808f5501d943b18017bccbabd7bf5.zip
Unifdef -UPEDANTIC. ok beck@ tedu@
Diffstat (limited to 'src/lib/libcrypto/md32_common.h')
-rw-r--r--src/lib/libcrypto/md32_common.h38
1 files changed, 16 insertions, 22 deletions
diff --git a/src/lib/libcrypto/md32_common.h b/src/lib/libcrypto/md32_common.h
index be097bfc70..7e25b2ad3b 100644
--- a/src/lib/libcrypto/md32_common.h
+++ b/src/lib/libcrypto/md32_common.h
@@ -141,16 +141,15 @@
141 * Engage compiler specific rotate intrinsic function if available. 141 * Engage compiler specific rotate intrinsic function if available.
142 */ 142 */
143#undef ROTATE 143#undef ROTATE
144#ifndef PEDANTIC 144#if defined(__GNUC__) && __GNUC__>=2 && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM)
145# if defined(__GNUC__) && __GNUC__>=2 && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM)
146 /* 145 /*
147 * Some GNU C inline assembler templates. Note that these are 146 * Some GNU C inline assembler templates. Note that these are
148 * rotates by *constant* number of bits! But that's exactly 147 * rotates by *constant* number of bits! But that's exactly
149 * what we need here... 148 * what we need here...
150 * <appro@fy.chalmers.se> 149 * <appro@fy.chalmers.se>
151 */ 150 */
152# if defined(__i386) || defined(__i386__) || defined(__x86_64) || defined(__x86_64__) 151# if defined(__i386) || defined(__i386__) || defined(__x86_64) || defined(__x86_64__)
153# define ROTATE(a,n) ({ register unsigned int ret; \ 152# define ROTATE(a,n) ({ register unsigned int ret; \
154 asm ( \ 153 asm ( \
155 "roll %1,%0" \ 154 "roll %1,%0" \
156 : "=r"(ret) \ 155 : "=r"(ret) \
@@ -158,25 +157,24 @@
158 : "cc"); \ 157 : "cc"); \
159 ret; \ 158 ret; \
160 }) 159 })
161# elif defined(_ARCH_PPC) || defined(_ARCH_PPC64) || \ 160# elif defined(_ARCH_PPC) || defined(_ARCH_PPC64) || \
162 defined(__powerpc) || defined(__ppc__) || defined(__powerpc64__) 161 defined(__powerpc) || defined(__ppc__) || defined(__powerpc64__)
163# define ROTATE(a,n) ({ register unsigned int ret; \ 162# define ROTATE(a,n) ({ register unsigned int ret; \
164 asm ( \ 163 asm ( \
165 "rlwinm %0,%1,%2,0,31" \ 164 "rlwinm %0,%1,%2,0,31" \
166 : "=r"(ret) \ 165 : "=r"(ret) \
167 : "r"(a), "I"(n)); \ 166 : "r"(a), "I"(n)); \
168 ret; \ 167 ret; \
169 }) 168 })
170# elif defined(__s390x__) 169# elif defined(__s390x__)
171# define ROTATE(a,n) ({ register unsigned int ret; \ 170# define ROTATE(a,n) ({ register unsigned int ret; \
172 asm ("rll %0,%1,%2" \ 171 asm ("rll %0,%1,%2" \
173 : "=r"(ret) \ 172 : "=r"(ret) \
174 : "r"(a), "I"(n)); \ 173 : "r"(a), "I"(n)); \
175 ret; \ 174 ret; \
176 }) 175 })
177# endif
178# endif 176# endif
179#endif /* PEDANTIC */ 177#endif
180 178
181#ifndef ROTATE 179#ifndef ROTATE
182#define ROTATE(a,n) (((a)<<(n))|(((a)&0xffffffff)>>(32-(n)))) 180#define ROTATE(a,n) (((a)<<(n))|(((a)&0xffffffff)>>(32-(n))))
@@ -184,9 +182,8 @@
184 182
185#if defined(DATA_ORDER_IS_BIG_ENDIAN) 183#if defined(DATA_ORDER_IS_BIG_ENDIAN)
186 184
187#ifndef PEDANTIC 185#if defined(__GNUC__) && __GNUC__>=2 && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM)
188# if defined(__GNUC__) && __GNUC__>=2 && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM) 186# if ((defined(__i386) || defined(__i386__)) && !defined(I386_ONLY)) || \
189# if ((defined(__i386) || defined(__i386__)) && !defined(I386_ONLY)) || \
190 (defined(__x86_64) || defined(__x86_64__)) 187 (defined(__x86_64) || defined(__x86_64__))
191 /* 188 /*
192 * This gives ~30-40% performance improvement in SHA-256 compiled 189 * This gives ~30-40% performance improvement in SHA-256 compiled
@@ -194,13 +191,12 @@
194 * this trick on x86* platforms only, because these CPUs can fetch 191 * this trick on x86* platforms only, because these CPUs can fetch
195 * unaligned data without raising an exception. 192 * unaligned data without raising an exception.
196 */ 193 */
197# define HOST_c2l(c,l) ({ unsigned int r=*((const unsigned int *)(c)); \ 194# define HOST_c2l(c,l) ({ unsigned int r=*((const unsigned int *)(c)); \
198 asm ("bswapl %0":"=r"(r):"0"(r)); \ 195 asm ("bswapl %0":"=r"(r):"0"(r)); \
199 (c)+=4; (l)=r; }) 196 (c)+=4; (l)=r; })
200# define HOST_l2c(l,c) ({ unsigned int r=(l); \ 197# define HOST_l2c(l,c) ({ unsigned int r=(l); \
201 asm ("bswapl %0":"=r"(r):"0"(r)); \ 198 asm ("bswapl %0":"=r"(r):"0"(r)); \
202 *((unsigned int *)(c))=r; (c)+=4; r; }) 199 *((unsigned int *)(c))=r; (c)+=4; r; })
203# endif
204# endif 200# endif
205#endif 201#endif
206#if defined(__s390__) || defined(__s390x__) 202#if defined(__s390__) || defined(__s390x__)
@@ -225,16 +221,14 @@
225 221
226#elif defined(DATA_ORDER_IS_LITTLE_ENDIAN) 222#elif defined(DATA_ORDER_IS_LITTLE_ENDIAN)
227 223
228#ifndef PEDANTIC 224#if defined(__GNUC__) && __GNUC__>=2 && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM)
229# if defined(__GNUC__) && __GNUC__>=2 && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM) 225# if defined(__s390x__)
230# if defined(__s390x__) 226# define HOST_c2l(c,l) ({ asm ("lrv %0,%1" \
231# define HOST_c2l(c,l) ({ asm ("lrv %0,%1" \
232 :"=d"(l) :"m"(*(const unsigned int *)(c)));\ 227 :"=d"(l) :"m"(*(const unsigned int *)(c)));\
233 (c)+=4; (l); }) 228 (c)+=4; (l); })
234# define HOST_l2c(l,c) ({ asm ("strv %1,%0" \ 229# define HOST_l2c(l,c) ({ asm ("strv %1,%0" \
235 :"=m"(*(unsigned int *)(c)) :"d"(l));\ 230 :"=m"(*(unsigned int *)(c)) :"d"(l));\
236 (c)+=4; (l); }) 231 (c)+=4; (l); })
237# endif
238# endif 232# endif
239#endif 233#endif
240#if defined(__i386) || defined(__i386__) || defined(__x86_64) || defined(__x86_64__) 234#if defined(__i386) || defined(__i386__) || defined(__x86_64) || defined(__x86_64__)