From ca5eb817689340c9e11f21f75b2eede73622ad2e Mon Sep 17 00:00:00 2001 From: jsing <> Date: Mon, 27 Jan 2025 14:02:32 +0000 Subject: Mop up RC4_INDEX. The RC4_INDEX define switches between base pointer indexing and per-byte pointer increment. This supposedly made a huge difference to performance on x86 at some point, however compilers have improved somewhat since then. There is no change (or effectively no change) in generated assembly on a the majority of LLVM platforms and even when there is some change (e.g. aarch64), there is no noticable performance difference. Simplify the (still messy) macros/code and mop up RC4_INDEX. ok tb@ --- src/lib/libcrypto/arch/arm/opensslconf.h | 7 ------- 1 file changed, 7 deletions(-) (limited to 'src/lib/libcrypto/arch/arm/opensslconf.h') diff --git a/src/lib/libcrypto/arch/arm/opensslconf.h b/src/lib/libcrypto/arch/arm/opensslconf.h index 3413abb672..98dcfee3a1 100644 --- a/src/lib/libcrypto/arch/arm/opensslconf.h +++ b/src/lib/libcrypto/arch/arm/opensslconf.h @@ -65,13 +65,6 @@ #undef EIGHT_BIT #endif -#if defined(HEADER_RC4_LOCL_H) && !defined(CONFIG_HEADER_RC4_LOCL_H) -#define CONFIG_HEADER_RC4_LOCL_H -/* if this is defined data[i] is used instead of *data, this is a %20 - * speedup on x86 */ -#define RC4_INDEX -#endif - #if defined(HEADER_BF_LOCL_H) && !defined(CONFIG_HEADER_BF_LOCL_H) #define CONFIG_HEADER_BF_LOCL_H #undef BF_PTR -- cgit v1.2.3-55-g6feb