From 2eb7e5ff6bb69760f9dd4a43e7e3520ebb930120 Mon Sep 17 00:00:00 2001 From: jmc <> Date: Mon, 26 Dec 2022 07:18:53 +0000 Subject: spelling fixes; from paul tagliamonte i removed the arithmetics -> arithmetic changes, as i felt they were not clearly correct ok tb --- src/lib/libcrypto/modes/asm/ghash-x86.pl | 6 +++--- src/lib/libcrypto/modes/ctr128.c | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) (limited to 'src/lib/libcrypto/modes') diff --git a/src/lib/libcrypto/modes/asm/ghash-x86.pl b/src/lib/libcrypto/modes/asm/ghash-x86.pl index 83c727e07f..27492597ad 100644 --- a/src/lib/libcrypto/modes/asm/ghash-x86.pl +++ b/src/lib/libcrypto/modes/asm/ghash-x86.pl @@ -86,7 +86,7 @@ # where Tproc is time required for Karatsuba pre- and post-processing, # is more realistic estimate. In this case it gives ... 1.91 cycles. # Or in other words, depending on how well we can interleave reduction -# and one of the two multiplications the performance should be betwen +# and one of the two multiplications the performance should be between # 1.91 and 2.16. As already mentioned, this implementation processes # one byte out of 8KB buffer in 2.10 cycles, while x86_64 counterpart # - in 2.02. x86_64 performance is better, because larger register @@ -700,7 +700,7 @@ sub mmx_loop() { &pxor ($red[1],$red[1]); &pxor ($red[2],$red[2]); - # Just like in "May" verson modulo-schedule for critical path in + # Just like in "May" version modulo-schedule for critical path in # 'Z.hi ^= rem_8bit[Z.lo&0xff^((u8)H[nhi]<<4)]<<48'. Final 'pxor' # is scheduled so late that rem_8bit[] has to be shifted *right* # by 16, which is why last argument to pinsrw is 2, which @@ -1087,7 +1087,7 @@ my ($Xhi,$Xi) = @_; &movdqu (&QWP(0,$Xip),$Xi); &function_end("gcm_ghash_clmul"); -} else { # Algorith 5. Kept for reference purposes. +} else { # Algorithm 5. Kept for reference purposes. sub reduction_alg5 { # 19/16 times faster than Intel version my ($Xhi,$Xi)=@_; diff --git a/src/lib/libcrypto/modes/ctr128.c b/src/lib/libcrypto/modes/ctr128.c index 9dd8c0c55e..eadb80449c 100644 --- a/src/lib/libcrypto/modes/ctr128.c +++ b/src/lib/libcrypto/modes/ctr128.c @@ -1,4 +1,4 @@ -/* $OpenBSD: ctr128.c,v 1.8 2022/11/26 16:08:53 tb Exp $ */ +/* $OpenBSD: ctr128.c,v 1.9 2022/12/26 07:18:52 jmc Exp $ */ /* ==================================================================== * Copyright (c) 2008 The OpenSSL Project. All rights reserved. * @@ -109,7 +109,7 @@ ctr128_inc_aligned(unsigned char *counter) * This algorithm assumes that the counter is in the x lower bits * of the IV (ivec), and that the application has full control over * overflow and the rest of the IV. This implementation takes NO - * responsability for checking that the counter doesn't overflow + * responsibility for checking that the counter doesn't overflow * into the rest of the IV when incremented. */ void CRYPTO_ctr128_encrypt(const unsigned char *in, unsigned char *out, @@ -228,7 +228,7 @@ void CRYPTO_ctr128_encrypt_ctr32(const unsigned char *in, unsigned char *out, (*func)(in,out,blocks,key,ivec); /* (*ctr) does not update ivec, caller does: */ PUTU32(ivec+12,ctr32); - /* ... overflow was detected, propogate carry. */ + /* ... overflow was detected, propagate carry. */ if (ctr32 == 0) ctr96_inc(ivec); blocks *= 16; len -= blocks; -- cgit v1.2.3-55-g6feb