diff options
author | jmc <> | 2022-12-26 07:18:53 +0000 |
---|---|---|
committer | jmc <> | 2022-12-26 07:18:53 +0000 |
commit | 8144b51086b3c46594192ccbec62762e58d61200 (patch) | |
tree | 26f3d93398833b7449b8a97e9fe4af9904382dbf /src/lib/libcrypto/modes | |
parent | 54da696f897367a85e20e97a53d29b18b44cf8b7 (diff) | |
download | openbsd-8144b51086b3c46594192ccbec62762e58d61200.tar.gz openbsd-8144b51086b3c46594192ccbec62762e58d61200.tar.bz2 openbsd-8144b51086b3c46594192ccbec62762e58d61200.zip |
spelling fixes; from paul tagliamonte
i removed the arithmetics -> arithmetic changes, as i felt they
were not clearly correct
ok tb
Diffstat (limited to 'src/lib/libcrypto/modes')
-rw-r--r-- | src/lib/libcrypto/modes/asm/ghash-x86.pl | 6 | ||||
-rw-r--r-- | src/lib/libcrypto/modes/ctr128.c | 6 |
2 files changed, 6 insertions, 6 deletions
diff --git a/src/lib/libcrypto/modes/asm/ghash-x86.pl b/src/lib/libcrypto/modes/asm/ghash-x86.pl index 83c727e07f..27492597ad 100644 --- a/src/lib/libcrypto/modes/asm/ghash-x86.pl +++ b/src/lib/libcrypto/modes/asm/ghash-x86.pl | |||
@@ -86,7 +86,7 @@ | |||
86 | # where Tproc is time required for Karatsuba pre- and post-processing, | 86 | # where Tproc is time required for Karatsuba pre- and post-processing, |
87 | # is more realistic estimate. In this case it gives ... 1.91 cycles. | 87 | # is more realistic estimate. In this case it gives ... 1.91 cycles. |
88 | # Or in other words, depending on how well we can interleave reduction | 88 | # Or in other words, depending on how well we can interleave reduction |
89 | # and one of the two multiplications the performance should be betwen | 89 | # and one of the two multiplications the performance should be between |
90 | # 1.91 and 2.16. As already mentioned, this implementation processes | 90 | # 1.91 and 2.16. As already mentioned, this implementation processes |
91 | # one byte out of 8KB buffer in 2.10 cycles, while x86_64 counterpart | 91 | # one byte out of 8KB buffer in 2.10 cycles, while x86_64 counterpart |
92 | # - in 2.02. x86_64 performance is better, because larger register | 92 | # - in 2.02. x86_64 performance is better, because larger register |
@@ -700,7 +700,7 @@ sub mmx_loop() { | |||
700 | &pxor ($red[1],$red[1]); | 700 | &pxor ($red[1],$red[1]); |
701 | &pxor ($red[2],$red[2]); | 701 | &pxor ($red[2],$red[2]); |
702 | 702 | ||
703 | # Just like in "May" verson modulo-schedule for critical path in | 703 | # Just like in "May" version modulo-schedule for critical path in |
704 | # 'Z.hi ^= rem_8bit[Z.lo&0xff^((u8)H[nhi]<<4)]<<48'. Final 'pxor' | 704 | # 'Z.hi ^= rem_8bit[Z.lo&0xff^((u8)H[nhi]<<4)]<<48'. Final 'pxor' |
705 | # is scheduled so late that rem_8bit[] has to be shifted *right* | 705 | # is scheduled so late that rem_8bit[] has to be shifted *right* |
706 | # by 16, which is why last argument to pinsrw is 2, which | 706 | # by 16, which is why last argument to pinsrw is 2, which |
@@ -1087,7 +1087,7 @@ my ($Xhi,$Xi) = @_; | |||
1087 | &movdqu (&QWP(0,$Xip),$Xi); | 1087 | &movdqu (&QWP(0,$Xip),$Xi); |
1088 | &function_end("gcm_ghash_clmul"); | 1088 | &function_end("gcm_ghash_clmul"); |
1089 | 1089 | ||
1090 | } else { # Algorith 5. Kept for reference purposes. | 1090 | } else { # Algorithm 5. Kept for reference purposes. |
1091 | 1091 | ||
1092 | sub reduction_alg5 { # 19/16 times faster than Intel version | 1092 | sub reduction_alg5 { # 19/16 times faster than Intel version |
1093 | my ($Xhi,$Xi)=@_; | 1093 | my ($Xhi,$Xi)=@_; |
diff --git a/src/lib/libcrypto/modes/ctr128.c b/src/lib/libcrypto/modes/ctr128.c index 9dd8c0c55e..eadb80449c 100644 --- a/src/lib/libcrypto/modes/ctr128.c +++ b/src/lib/libcrypto/modes/ctr128.c | |||
@@ -1,4 +1,4 @@ | |||
1 | /* $OpenBSD: ctr128.c,v 1.8 2022/11/26 16:08:53 tb Exp $ */ | 1 | /* $OpenBSD: ctr128.c,v 1.9 2022/12/26 07:18:52 jmc Exp $ */ |
2 | /* ==================================================================== | 2 | /* ==================================================================== |
3 | * Copyright (c) 2008 The OpenSSL Project. All rights reserved. | 3 | * Copyright (c) 2008 The OpenSSL Project. All rights reserved. |
4 | * | 4 | * |
@@ -109,7 +109,7 @@ ctr128_inc_aligned(unsigned char *counter) | |||
109 | * This algorithm assumes that the counter is in the x lower bits | 109 | * This algorithm assumes that the counter is in the x lower bits |
110 | * of the IV (ivec), and that the application has full control over | 110 | * of the IV (ivec), and that the application has full control over |
111 | * overflow and the rest of the IV. This implementation takes NO | 111 | * overflow and the rest of the IV. This implementation takes NO |
112 | * responsability for checking that the counter doesn't overflow | 112 | * responsibility for checking that the counter doesn't overflow |
113 | * into the rest of the IV when incremented. | 113 | * into the rest of the IV when incremented. |
114 | */ | 114 | */ |
115 | void CRYPTO_ctr128_encrypt(const unsigned char *in, unsigned char *out, | 115 | void CRYPTO_ctr128_encrypt(const unsigned char *in, unsigned char *out, |
@@ -228,7 +228,7 @@ void CRYPTO_ctr128_encrypt_ctr32(const unsigned char *in, unsigned char *out, | |||
228 | (*func)(in,out,blocks,key,ivec); | 228 | (*func)(in,out,blocks,key,ivec); |
229 | /* (*ctr) does not update ivec, caller does: */ | 229 | /* (*ctr) does not update ivec, caller does: */ |
230 | PUTU32(ivec+12,ctr32); | 230 | PUTU32(ivec+12,ctr32); |
231 | /* ... overflow was detected, propogate carry. */ | 231 | /* ... overflow was detected, propagate carry. */ |
232 | if (ctr32 == 0) ctr96_inc(ivec); | 232 | if (ctr32 == 0) ctr96_inc(ivec); |
233 | blocks *= 16; | 233 | blocks *= 16; |
234 | len -= blocks; | 234 | len -= blocks; |