From 2eb7e5ff6bb69760f9dd4a43e7e3520ebb930120 Mon Sep 17 00:00:00 2001 From: jmc <> Date: Mon, 26 Dec 2022 07:18:53 +0000 Subject: spelling fixes; from paul tagliamonte i removed the arithmetics -> arithmetic changes, as i felt they were not clearly correct ok tb --- src/lib/libcrypto/rc4/asm/rc4-586.pl | 4 ++-- src/lib/libcrypto/rc4/asm/rc4-x86_64.pl | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'src/lib/libcrypto/rc4/asm') diff --git a/src/lib/libcrypto/rc4/asm/rc4-586.pl b/src/lib/libcrypto/rc4/asm/rc4-586.pl index 03f0cff467..f3c3e117bc 100644 --- a/src/lib/libcrypto/rc4/asm/rc4-586.pl +++ b/src/lib/libcrypto/rc4/asm/rc4-586.pl @@ -123,7 +123,7 @@ if ($alt=0) { push (@XX,shift(@XX)) if ($i>=0); } } else { - # Using pinsrw here improves performane on Intel CPUs by 2-3%, but + # Using pinsrw here improves performance on Intel CPUs by 2-3%, but # brings down AMD by 7%... $RC4_loop_mmx = sub { my $i=shift; @@ -144,7 +144,7 @@ if ($alt=0) { &movd ($i>0?"mm1":"mm2",&DWP(0,$dat,$ty,4)); # (*) This is the key to Core2 and Westmere performance. - # Whithout movz out-of-order execution logic confuses + # Without movz out-of-order execution logic confuses # itself and fails to reorder loads and stores. Problem # appears to be fixed in Sandy Bridge... } diff --git a/src/lib/libcrypto/rc4/asm/rc4-x86_64.pl b/src/lib/libcrypto/rc4/asm/rc4-x86_64.pl index 2135b38ef8..18a967e546 100755 --- a/src/lib/libcrypto/rc4/asm/rc4-x86_64.pl +++ b/src/lib/libcrypto/rc4/asm/rc4-x86_64.pl @@ -50,7 +50,7 @@ # As was shown by Zou Nanhai loop unrolling can improve Intel EM64T # performance by >30% [unlike P4 32-bit case that is]. But this is # provided that loads are reordered even more aggressively! Both code -# pathes, AMD64 and EM64T, reorder loads in essentially same manner +# paths, AMD64 and EM64T, reorder loads in essentially same manner # as my IA-64 implementation. On Opteron this resulted in modest 5% # improvement [I had to test it], while final Intel P4 performance # achieves respectful 432MBps on 2.8GHz processor now. For reference. @@ -81,7 +81,7 @@ # The only code path that was not modified is P4-specific one. Non-P4 # Intel code path optimization is heavily based on submission by Maxim # Perminov, Maxim Locktyukhin and Jim Guilford of Intel. I've used -# some of the ideas even in attempt to optmize the original RC4_INT +# some of the ideas even in attempt to optimize the original RC4_INT # code path... Current performance in cycles per processed byte (less # is better) and improvement coefficients relative to previous # version of this module are: -- cgit v1.2.3-55-g6feb