From 2eb7e5ff6bb69760f9dd4a43e7e3520ebb930120 Mon Sep 17 00:00:00 2001 From: jmc <> Date: Mon, 26 Dec 2022 07:18:53 +0000 Subject: spelling fixes; from paul tagliamonte i removed the arithmetics -> arithmetic changes, as i felt they were not clearly correct ok tb --- src/lib/libcrypto/aes/asm/aes-586.pl | 6 +++--- src/lib/libcrypto/aes/asm/aes-mips.pl | 2 +- src/lib/libcrypto/aes/asm/aes-ppc.pl | 2 +- src/lib/libcrypto/aes/asm/aesni-sha1-x86_64.pl | 4 ++-- src/lib/libcrypto/aes/asm/aesni-x86_64.pl | 30 +++++++++++++------------- src/lib/libcrypto/aes/asm/bsaes-x86_64.pl | 2 +- 6 files changed, 23 insertions(+), 23 deletions(-) (limited to 'src/lib/libcrypto/aes') diff --git a/src/lib/libcrypto/aes/asm/aes-586.pl b/src/lib/libcrypto/aes/asm/aes-586.pl index 3ba8a26eaa..c5ae3f6903 100644 --- a/src/lib/libcrypto/aes/asm/aes-586.pl +++ b/src/lib/libcrypto/aes/asm/aes-586.pl @@ -48,8 +48,8 @@ # better performance on most recent µ-archs... # # Third version adds AES_cbc_encrypt implementation, which resulted in -# up to 40% performance imrovement of CBC benchmark results. 40% was -# observed on P4 core, where "overall" imrovement coefficient, i.e. if +# up to 40% performance improvement of CBC benchmark results. 40% was +# observed on P4 core, where "overall" improvement coefficient, i.e. if # compared to PIC generated by GCC and in CBC mode, was observed to be # as large as 4x:-) CBC performance is virtually identical to ECB now # and on some platforms even better, e.g. 17.6 "small" cycles/byte on @@ -228,7 +228,7 @@ $small_footprint=1; # $small_footprint=1 code is ~5% slower [on # contention and in hope to "collect" 5% back # in real-life applications... -$vertical_spin=0; # shift "verticaly" defaults to 0, because of +$vertical_spin=0; # shift "vertically" defaults to 0, because of # its proof-of-concept status... # Note that there is no decvert(), as well as last encryption round is # performed with "horizontal" shifts. This is because this "vertical" diff --git a/src/lib/libcrypto/aes/asm/aes-mips.pl b/src/lib/libcrypto/aes/asm/aes-mips.pl index 2f6ff74ffe..b95d1afd5a 100644 --- a/src/lib/libcrypto/aes/asm/aes-mips.pl +++ b/src/lib/libcrypto/aes/asm/aes-mips.pl @@ -106,7 +106,7 @@ my ($i0,$i1,$i2,$i3)=($at,$t0,$t1,$t2); my ($t0,$t1,$t2,$t3,$t4,$t5,$t6,$t7,$t8,$t9,$t10,$t11) = map("\$$_",(12..23)); my ($key0,$cnt)=($gp,$fp); -# instuction ordering is "stolen" from output from MIPSpro assembler +# instruction ordering is "stolen" from output from MIPSpro assembler # invoked with -mips3 -O3 arguments... $code.=<<___; .align 5 diff --git a/src/lib/libcrypto/aes/asm/aes-ppc.pl b/src/lib/libcrypto/aes/asm/aes-ppc.pl index 7c52cbe5f9..91a46f60ed 100644 --- a/src/lib/libcrypto/aes/asm/aes-ppc.pl +++ b/src/lib/libcrypto/aes/asm/aes-ppc.pl @@ -19,7 +19,7 @@ # February 2010 # # Rescheduling instructions to favour Power6 pipeline gave 10% -# performance improvement on the platfrom in question (and marginal +# performance improvement on the platform in question (and marginal # improvement even on others). It should be noted that Power6 fails # to process byte in 18 cycles, only in 23, because it fails to issue # 4 load instructions in two cycles, only in 3. As result non-compact diff --git a/src/lib/libcrypto/aes/asm/aesni-sha1-x86_64.pl b/src/lib/libcrypto/aes/asm/aesni-sha1-x86_64.pl index bc6c8f3fc0..880bcc2d58 100644 --- a/src/lib/libcrypto/aes/asm/aesni-sha1-x86_64.pl +++ b/src/lib/libcrypto/aes/asm/aesni-sha1-x86_64.pl @@ -250,7 +250,7 @@ ___ $r++; unshift(@rndkey,pop(@rndkey)); }; -sub Xupdate_ssse3_16_31() # recall that $Xi starts wtih 4 +sub Xupdate_ssse3_16_31() # recall that $Xi starts with 4 { use integer; my $body = shift; my @insns = (&$body,&$body,&$body,&$body); # 40 instructions @@ -767,7 +767,7 @@ ___ $r++; unshift(@rndkey,pop(@rndkey)); }; -sub Xupdate_avx_16_31() # recall that $Xi starts wtih 4 +sub Xupdate_avx_16_31() # recall that $Xi starts with 4 { use integer; my $body = shift; my @insns = (&$body,&$body,&$body,&$body); # 40 instructions diff --git a/src/lib/libcrypto/aes/asm/aesni-x86_64.pl b/src/lib/libcrypto/aes/asm/aesni-x86_64.pl index f0b30109ae..a849073728 100644 --- a/src/lib/libcrypto/aes/asm/aesni-x86_64.pl +++ b/src/lib/libcrypto/aes/asm/aesni-x86_64.pl @@ -52,7 +52,7 @@ # nothing one can do and the result appears optimal. CCM result is # identical to CBC, because CBC-MAC is essentially CBC encrypt without # saving output. CCM CTR "stays invisible," because it's neatly -# interleaved wih CBC-MAC. This provides ~30% improvement over +# interleaved with CBC-MAC. This provides ~30% improvement over # "straghtforward" CCM implementation with CTR and CBC-MAC performed # disjointly. Parallelizable modes practically achieve the theoretical # limit. @@ -136,7 +136,7 @@ # asymptotic, if it can be surpassed, isn't it? What happens there? # Rewind to CBC paragraph for the answer. Yes, out-of-order execution # magic is responsible for this. Processor overlaps not only the -# additional instructions with AES ones, but even AES instuctions +# additional instructions with AES ones, but even AES instructions # processing adjacent triplets of independent blocks. In the 6x case # additional instructions still claim disproportionally small amount # of additional cycles, but in 8x case number of instructions must be @@ -1350,7 +1350,7 @@ ___ movdqa @tweak[5],@tweak[$i] paddq @tweak[5],@tweak[5] # psllq 1,$tweak pand $twmask,$twres # isolate carry and residue - pcmpgtd @tweak[5],$twtmp # broadcat upper bits + pcmpgtd @tweak[5],$twtmp # broadcast upper bits pxor $twres,@tweak[5] ___ } @@ -1456,7 +1456,7 @@ $code.=<<___; aesenc $rndkey0,$inout0 pand $twmask,$twres # isolate carry and residue aesenc $rndkey0,$inout1 - pcmpgtd @tweak[5],$twtmp # broadcat upper bits + pcmpgtd @tweak[5],$twtmp # broadcast upper bits aesenc $rndkey0,$inout2 pxor $twres,@tweak[5] aesenc $rndkey0,$inout3 @@ -1471,7 +1471,7 @@ $code.=<<___; aesenc $rndkey1,$inout0 pand $twmask,$twres # isolate carry and residue aesenc $rndkey1,$inout1 - pcmpgtd @tweak[5],$twtmp # broadcat upper bits + pcmpgtd @tweak[5],$twtmp # broadcast upper bits aesenc $rndkey1,$inout2 pxor $twres,@tweak[5] aesenc $rndkey1,$inout3 @@ -1485,7 +1485,7 @@ $code.=<<___; aesenclast $rndkey0,$inout0 pand $twmask,$twres # isolate carry and residue aesenclast $rndkey0,$inout1 - pcmpgtd @tweak[5],$twtmp # broadcat upper bits + pcmpgtd @tweak[5],$twtmp # broadcast upper bits aesenclast $rndkey0,$inout2 pxor $twres,@tweak[5] aesenclast $rndkey0,$inout3 @@ -1499,7 +1499,7 @@ $code.=<<___; xorps `16*0`(%rsp),$inout0 # output^=tweak pand $twmask,$twres # isolate carry and residue xorps `16*1`(%rsp),$inout1 - pcmpgtd @tweak[5],$twtmp # broadcat upper bits + pcmpgtd @tweak[5],$twtmp # broadcast upper bits pxor $twres,@tweak[5] xorps `16*2`(%rsp),$inout2 @@ -1750,7 +1750,7 @@ ___ movdqa @tweak[5],@tweak[$i] paddq @tweak[5],@tweak[5] # psllq 1,$tweak pand $twmask,$twres # isolate carry and residue - pcmpgtd @tweak[5],$twtmp # broadcat upper bits + pcmpgtd @tweak[5],$twtmp # broadcast upper bits pxor $twres,@tweak[5] ___ } @@ -1856,7 +1856,7 @@ $code.=<<___; aesdec $rndkey0,$inout0 pand $twmask,$twres # isolate carry and residue aesdec $rndkey0,$inout1 - pcmpgtd @tweak[5],$twtmp # broadcat upper bits + pcmpgtd @tweak[5],$twtmp # broadcast upper bits aesdec $rndkey0,$inout2 pxor $twres,@tweak[5] aesdec $rndkey0,$inout3 @@ -1871,7 +1871,7 @@ $code.=<<___; aesdec $rndkey1,$inout0 pand $twmask,$twres # isolate carry and residue aesdec $rndkey1,$inout1 - pcmpgtd @tweak[5],$twtmp # broadcat upper bits + pcmpgtd @tweak[5],$twtmp # broadcast upper bits aesdec $rndkey1,$inout2 pxor $twres,@tweak[5] aesdec $rndkey1,$inout3 @@ -1885,7 +1885,7 @@ $code.=<<___; aesdeclast $rndkey0,$inout0 pand $twmask,$twres # isolate carry and residue aesdeclast $rndkey0,$inout1 - pcmpgtd @tweak[5],$twtmp # broadcat upper bits + pcmpgtd @tweak[5],$twtmp # broadcast upper bits aesdeclast $rndkey0,$inout2 pxor $twres,@tweak[5] aesdeclast $rndkey0,$inout3 @@ -1899,7 +1899,7 @@ $code.=<<___; xorps `16*0`(%rsp),$inout0 # output^=tweak pand $twmask,$twres # isolate carry and residue xorps `16*1`(%rsp),$inout1 - pcmpgtd @tweak[5],$twtmp # broadcat upper bits + pcmpgtd @tweak[5],$twtmp # broadcast upper bits pxor $twres,@tweak[5] xorps `16*2`(%rsp),$inout2 @@ -2520,7 +2520,7 @@ ___ # Vinodh Gopal # Kahraman Akdemir # -# Agressively optimized in respect to aeskeygenassist's critical path +# Aggressively optimized in respect to aeskeygenassist's critical path # and is contained in %xmm0-5 to meet Win64 ABI requirement. # $code.=<<___; @@ -2602,7 +2602,7 @@ __aesni_set_encrypt_key: .align 16 .L14rounds: - movups 16($inp),%xmm2 # remaning half of *userKey + movups 16($inp),%xmm2 # remaining half of *userKey mov \$13,$bits # 14 rounds for 256 lea 16(%rax),%rax $movkey %xmm0,($key) # round 0 @@ -2862,7 +2862,7 @@ xts_se_handler: mov 56($disp),%r11 # disp->HandlerData mov 0(%r11),%r10d # HandlerData[0] - lea (%rsi,%r10),%r10 # prologue lable + lea (%rsi,%r10),%r10 # prologue label cmp %r10,%rbx # context->Rip5x size reduction # from 12.5KB to 2.2KB; -# - above was possibile thanks to mixcolumns() modification that +# - above was possible thanks to mixcolumns() modification that # allowed to feed its output back to aesenc[last], this was # achieved at cost of two additional inter-registers moves; # - some instruction reordering and interleaving; -- cgit v1.2.3-55-g6feb