summaryrefslogtreecommitdiff
path: root/src/lib/libcrypto/arch/amd64/opensslconf.h
diff options
context:
space:
mode:
authorjsing <>2025-01-27 14:02:32 +0000
committerjsing <>2025-01-27 14:02:32 +0000
commitca5eb817689340c9e11f21f75b2eede73622ad2e (patch)
tree66ed89ba37c98ae1cad1f42ebb47d16e63ee92bd /src/lib/libcrypto/arch/amd64/opensslconf.h
parent8fad25f043c3a0bd7f94c15d0b2b2785d951295d (diff)
downloadopenbsd-ca5eb817689340c9e11f21f75b2eede73622ad2e.tar.gz
openbsd-ca5eb817689340c9e11f21f75b2eede73622ad2e.tar.bz2
openbsd-ca5eb817689340c9e11f21f75b2eede73622ad2e.zip
Mop up RC4_INDEX.
The RC4_INDEX define switches between base pointer indexing and per-byte pointer increment. This supposedly made a huge difference to performance on x86 at some point, however compilers have improved somewhat since then. There is no change (or effectively no change) in generated assembly on a the majority of LLVM platforms and even when there is some change (e.g. aarch64), there is no noticable performance difference. Simplify the (still messy) macros/code and mop up RC4_INDEX. ok tb@
Diffstat (limited to 'src/lib/libcrypto/arch/amd64/opensslconf.h')
-rw-r--r--src/lib/libcrypto/arch/amd64/opensslconf.h7
1 files changed, 0 insertions, 7 deletions
diff --git a/src/lib/libcrypto/arch/amd64/opensslconf.h b/src/lib/libcrypto/arch/amd64/opensslconf.h
index 7615744a6b..4187a44c86 100644
--- a/src/lib/libcrypto/arch/amd64/opensslconf.h
+++ b/src/lib/libcrypto/arch/amd64/opensslconf.h
@@ -60,13 +60,6 @@
60#undef THIRTY_TWO_BIT 60#undef THIRTY_TWO_BIT
61#endif 61#endif
62 62
63#if defined(HEADER_RC4_LOCL_H) && !defined(CONFIG_HEADER_RC4_LOCL_H)
64#define CONFIG_HEADER_RC4_LOCL_H
65/* if this is defined data[i] is used instead of *data, this is a %20
66 * speedup on x86 */
67#undef RC4_INDEX
68#endif
69
70#if defined(HEADER_BF_LOCL_H) && !defined(CONFIG_HEADER_BF_LOCL_H) 63#if defined(HEADER_BF_LOCL_H) && !defined(CONFIG_HEADER_BF_LOCL_H)
71#define CONFIG_HEADER_BF_LOCL_H 64#define CONFIG_HEADER_BF_LOCL_H
72#undef BF_PTR 65#undef BF_PTR