diff options
Diffstat (limited to 'src/lib/libcrypto/modes/gcm128.c')
-rw-r--r-- | src/lib/libcrypto/modes/gcm128.c | 92 |
1 files changed, 46 insertions, 46 deletions
diff --git a/src/lib/libcrypto/modes/gcm128.c b/src/lib/libcrypto/modes/gcm128.c index ed7373d56e..8714a33c2c 100644 --- a/src/lib/libcrypto/modes/gcm128.c +++ b/src/lib/libcrypto/modes/gcm128.c | |||
@@ -1,4 +1,4 @@ | |||
1 | /* $OpenBSD: gcm128.c,v 1.39 2025/05/18 07:26:09 jsing Exp $ */ | 1 | /* $OpenBSD: gcm128.c,v 1.40 2025/05/18 09:05:59 jsing Exp $ */ |
2 | /* ==================================================================== | 2 | /* ==================================================================== |
3 | * Copyright (c) 2010 The OpenSSL Project. All rights reserved. | 3 | * Copyright (c) 2010 The OpenSSL Project. All rights reserved. |
4 | * | 4 | * |
@@ -56,10 +56,10 @@ | |||
56 | #include "modes_local.h" | 56 | #include "modes_local.h" |
57 | 57 | ||
58 | static void | 58 | static void |
59 | gcm_init_4bit(u128 Htable[16], u64 H[2]) | 59 | gcm_init_4bit(u128 Htable[16], uint64_t H[2]) |
60 | { | 60 | { |
61 | u128 V; | 61 | u128 V; |
62 | u64 T; | 62 | uint64_t T; |
63 | int i; | 63 | int i; |
64 | 64 | ||
65 | Htable[0].hi = 0; | 65 | Htable[0].hi = 0; |
@@ -113,13 +113,13 @@ static const uint16_t rem_4bit[16] = { | |||
113 | }; | 113 | }; |
114 | 114 | ||
115 | static void | 115 | static void |
116 | gcm_gmult_4bit(u64 Xi[2], const u128 Htable[16]) | 116 | gcm_gmult_4bit(uint64_t Xi[2], const u128 Htable[16]) |
117 | { | 117 | { |
118 | u128 Z; | 118 | u128 Z; |
119 | int cnt = 15; | 119 | int cnt = 15; |
120 | size_t rem, nlo, nhi; | 120 | size_t rem, nlo, nhi; |
121 | 121 | ||
122 | nlo = ((const u8 *)Xi)[15]; | 122 | nlo = ((const uint8_t *)Xi)[15]; |
123 | nhi = nlo >> 4; | 123 | nhi = nlo >> 4; |
124 | nlo &= 0xf; | 124 | nlo &= 0xf; |
125 | 125 | ||
@@ -130,21 +130,21 @@ gcm_gmult_4bit(u64 Xi[2], const u128 Htable[16]) | |||
130 | rem = (size_t)Z.lo & 0xf; | 130 | rem = (size_t)Z.lo & 0xf; |
131 | Z.lo = (Z.hi << 60)|(Z.lo >> 4); | 131 | Z.lo = (Z.hi << 60)|(Z.lo >> 4); |
132 | Z.hi = (Z.hi >> 4); | 132 | Z.hi = (Z.hi >> 4); |
133 | Z.hi ^= (u64)rem_4bit[rem] << 48; | 133 | Z.hi ^= (uint64_t)rem_4bit[rem] << 48; |
134 | Z.hi ^= Htable[nhi].hi; | 134 | Z.hi ^= Htable[nhi].hi; |
135 | Z.lo ^= Htable[nhi].lo; | 135 | Z.lo ^= Htable[nhi].lo; |
136 | 136 | ||
137 | if (--cnt < 0) | 137 | if (--cnt < 0) |
138 | break; | 138 | break; |
139 | 139 | ||
140 | nlo = ((const u8 *)Xi)[cnt]; | 140 | nlo = ((const uint8_t *)Xi)[cnt]; |
141 | nhi = nlo >> 4; | 141 | nhi = nlo >> 4; |
142 | nlo &= 0xf; | 142 | nlo &= 0xf; |
143 | 143 | ||
144 | rem = (size_t)Z.lo & 0xf; | 144 | rem = (size_t)Z.lo & 0xf; |
145 | Z.lo = (Z.hi << 60)|(Z.lo >> 4); | 145 | Z.lo = (Z.hi << 60)|(Z.lo >> 4); |
146 | Z.hi = (Z.hi >> 4); | 146 | Z.hi = (Z.hi >> 4); |
147 | Z.hi ^= (u64)rem_4bit[rem] << 48; | 147 | Z.hi ^= (uint64_t)rem_4bit[rem] << 48; |
148 | Z.hi ^= Htable[nlo].hi; | 148 | Z.hi ^= Htable[nlo].hi; |
149 | Z.lo ^= Htable[nlo].lo; | 149 | Z.lo ^= Htable[nlo].lo; |
150 | } | 150 | } |
@@ -161,8 +161,8 @@ gcm_gmult_4bit(u64 Xi[2], const u128 Htable[16]) | |||
161 | * non-trivial optimization[s]... | 161 | * non-trivial optimization[s]... |
162 | */ | 162 | */ |
163 | static void | 163 | static void |
164 | gcm_ghash_4bit(u64 Xi[2], const u128 Htable[16], | 164 | gcm_ghash_4bit(uint64_t Xi[2], const u128 Htable[16], |
165 | const u8 *inp, size_t len) | 165 | const uint8_t *inp, size_t len) |
166 | { | 166 | { |
167 | u128 Z; | 167 | u128 Z; |
168 | int cnt; | 168 | int cnt; |
@@ -171,7 +171,7 @@ gcm_ghash_4bit(u64 Xi[2], const u128 Htable[16], | |||
171 | #if 1 | 171 | #if 1 |
172 | do { | 172 | do { |
173 | cnt = 15; | 173 | cnt = 15; |
174 | nlo = ((const u8 *)Xi)[15]; | 174 | nlo = ((const uint8_t *)Xi)[15]; |
175 | nlo ^= inp[15]; | 175 | nlo ^= inp[15]; |
176 | nhi = nlo >> 4; | 176 | nhi = nlo >> 4; |
177 | nlo &= 0xf; | 177 | nlo &= 0xf; |
@@ -183,14 +183,14 @@ gcm_ghash_4bit(u64 Xi[2], const u128 Htable[16], | |||
183 | rem = (size_t)Z.lo & 0xf; | 183 | rem = (size_t)Z.lo & 0xf; |
184 | Z.lo = (Z.hi << 60)|(Z.lo >> 4); | 184 | Z.lo = (Z.hi << 60)|(Z.lo >> 4); |
185 | Z.hi = (Z.hi >> 4); | 185 | Z.hi = (Z.hi >> 4); |
186 | Z.hi ^= (u64)rem_4bit[rem] << 48; | 186 | Z.hi ^= (uint64_t)rem_4bit[rem] << 48; |
187 | Z.hi ^= Htable[nhi].hi; | 187 | Z.hi ^= Htable[nhi].hi; |
188 | Z.lo ^= Htable[nhi].lo; | 188 | Z.lo ^= Htable[nhi].lo; |
189 | 189 | ||
190 | if (--cnt < 0) | 190 | if (--cnt < 0) |
191 | break; | 191 | break; |
192 | 192 | ||
193 | nlo = ((const u8 *)Xi)[cnt]; | 193 | nlo = ((const uint8_t *)Xi)[cnt]; |
194 | nlo ^= inp[cnt]; | 194 | nlo ^= inp[cnt]; |
195 | nhi = nlo >> 4; | 195 | nhi = nlo >> 4; |
196 | nlo &= 0xf; | 196 | nlo &= 0xf; |
@@ -198,7 +198,7 @@ gcm_ghash_4bit(u64 Xi[2], const u128 Htable[16], | |||
198 | rem = (size_t)Z.lo & 0xf; | 198 | rem = (size_t)Z.lo & 0xf; |
199 | Z.lo = (Z.hi << 60)|(Z.lo >> 4); | 199 | Z.lo = (Z.hi << 60)|(Z.lo >> 4); |
200 | Z.hi = (Z.hi >> 4); | 200 | Z.hi = (Z.hi >> 4); |
201 | Z.hi ^= (u64)rem_4bit[rem] << 48; | 201 | Z.hi ^= (uint64_t)rem_4bit[rem] << 48; |
202 | Z.hi ^= Htable[nlo].hi; | 202 | Z.hi ^= Htable[nlo].hi; |
203 | Z.lo ^= Htable[nlo].lo; | 203 | Z.lo ^= Htable[nlo].lo; |
204 | } | 204 | } |
@@ -210,7 +210,7 @@ gcm_ghash_4bit(u64 Xi[2], const u128 Htable[16], | |||
210 | * cache footprint... | 210 | * cache footprint... |
211 | */ | 211 | */ |
212 | u128 Hshr4[16]; /* Htable shifted right by 4 bits */ | 212 | u128 Hshr4[16]; /* Htable shifted right by 4 bits */ |
213 | u8 Hshl4[16]; /* Htable shifted left by 4 bits */ | 213 | uint8_t Hshl4[16]; /* Htable shifted left by 4 bits */ |
214 | static const unsigned short rem_8bit[256] = { | 214 | static const unsigned short rem_8bit[256] = { |
215 | 0x0000, 0x01C2, 0x0384, 0x0246, 0x0708, 0x06CA, 0x048C, 0x054E, | 215 | 0x0000, 0x01C2, 0x0384, 0x0246, 0x0708, 0x06CA, 0x048C, 0x054E, |
216 | 0x0E10, 0x0FD2, 0x0D94, 0x0C56, 0x0918, 0x08DA, 0x0A9C, 0x0B5E, | 216 | 0x0E10, 0x0FD2, 0x0D94, 0x0C56, 0x0918, 0x08DA, 0x0A9C, 0x0B5E, |
@@ -255,12 +255,12 @@ gcm_ghash_4bit(u64 Xi[2], const u128 Htable[16], | |||
255 | Z.lo = Htable[cnt].lo; | 255 | Z.lo = Htable[cnt].lo; |
256 | Hshr4[cnt].lo = (Z.hi << 60)|(Z.lo >> 4); | 256 | Hshr4[cnt].lo = (Z.hi << 60)|(Z.lo >> 4); |
257 | Hshr4[cnt].hi = (Z.hi >> 4); | 257 | Hshr4[cnt].hi = (Z.hi >> 4); |
258 | Hshl4[cnt] = (u8)(Z.lo << 4); | 258 | Hshl4[cnt] = (uint8_t)(Z.lo << 4); |
259 | } | 259 | } |
260 | 260 | ||
261 | do { | 261 | do { |
262 | for (Z.lo = 0, Z.hi = 0, cnt = 15; cnt; --cnt) { | 262 | for (Z.lo = 0, Z.hi = 0, cnt = 15; cnt; --cnt) { |
263 | nlo = ((const u8 *)Xi)[cnt]; | 263 | nlo = ((const uint8_t *)Xi)[cnt]; |
264 | nlo ^= inp[cnt]; | 264 | nlo ^= inp[cnt]; |
265 | nhi = nlo >> 4; | 265 | nhi = nlo >> 4; |
266 | nlo &= 0xf; | 266 | nlo &= 0xf; |
@@ -275,10 +275,10 @@ gcm_ghash_4bit(u64 Xi[2], const u128 Htable[16], | |||
275 | 275 | ||
276 | Z.hi ^= Hshr4[nhi].hi; | 276 | Z.hi ^= Hshr4[nhi].hi; |
277 | Z.lo ^= Hshr4[nhi].lo; | 277 | Z.lo ^= Hshr4[nhi].lo; |
278 | Z.hi ^= (u64)rem_8bit[rem ^ Hshl4[nhi]] << 48; | 278 | Z.hi ^= (uint64_t)rem_8bit[rem ^ Hshl4[nhi]] << 48; |
279 | } | 279 | } |
280 | 280 | ||
281 | nlo = ((const u8 *)Xi)[0]; | 281 | nlo = ((const uint8_t *)Xi)[0]; |
282 | nlo ^= inp[0]; | 282 | nlo ^= inp[0]; |
283 | nhi = nlo >> 4; | 283 | nhi = nlo >> 4; |
284 | nlo &= 0xf; | 284 | nlo &= 0xf; |
@@ -293,7 +293,7 @@ gcm_ghash_4bit(u64 Xi[2], const u128 Htable[16], | |||
293 | 293 | ||
294 | Z.hi ^= Htable[nhi].hi; | 294 | Z.hi ^= Htable[nhi].hi; |
295 | Z.lo ^= Htable[nhi].lo; | 295 | Z.lo ^= Htable[nhi].lo; |
296 | Z.hi ^= ((u64)rem_8bit[rem << 4]) << 48; | 296 | Z.hi ^= ((uint64_t)rem_8bit[rem << 4]) << 48; |
297 | #endif | 297 | #endif |
298 | 298 | ||
299 | Xi[0] = htobe64(Z.hi); | 299 | Xi[0] = htobe64(Z.hi); |
@@ -302,7 +302,7 @@ gcm_ghash_4bit(u64 Xi[2], const u128 Htable[16], | |||
302 | } | 302 | } |
303 | 303 | ||
304 | static inline void | 304 | static inline void |
305 | gcm_mul(GCM128_CONTEXT *ctx, u64 u[2]) | 305 | gcm_mul(GCM128_CONTEXT *ctx, uint64_t u[2]) |
306 | { | 306 | { |
307 | gcm_gmult_4bit(u, ctx->Htable); | 307 | gcm_gmult_4bit(u, ctx->Htable); |
308 | } | 308 | } |
@@ -313,12 +313,12 @@ gcm_ghash(GCM128_CONTEXT *ctx, const uint8_t *in, size_t len) | |||
313 | gcm_ghash_4bit(ctx->Xi.u, ctx->Htable, in, len); | 313 | gcm_ghash_4bit(ctx->Xi.u, ctx->Htable, in, len); |
314 | } | 314 | } |
315 | #else | 315 | #else |
316 | void gcm_gmult_4bit(u64 Xi[2], const u128 Htable[16]); | 316 | void gcm_gmult_4bit(uint64_t Xi[2], const u128 Htable[16]); |
317 | void gcm_ghash_4bit(u64 Xi[2], const u128 Htable[16], const u8 *inp, | 317 | void gcm_ghash_4bit(uint64_t Xi[2], const u128 Htable[16], const uint8_t *inp, |
318 | size_t len); | 318 | size_t len); |
319 | 319 | ||
320 | static inline void | 320 | static inline void |
321 | gcm_mul(GCM128_CONTEXT *ctx, u64 u[2]) | 321 | gcm_mul(GCM128_CONTEXT *ctx, uint64_t u[2]) |
322 | { | 322 | { |
323 | ctx->gmult(u, ctx->Htable); | 323 | ctx->gmult(u, ctx->Htable); |
324 | } | 324 | } |
@@ -350,27 +350,27 @@ gcm_ghash(GCM128_CONTEXT *ctx, const uint8_t *in, size_t len) | |||
350 | defined(_M_IX86) || defined(_M_AMD64) || defined(_M_X64)) | 350 | defined(_M_IX86) || defined(_M_AMD64) || defined(_M_X64)) |
351 | # define GHASH_ASM_X86_OR_64 | 351 | # define GHASH_ASM_X86_OR_64 |
352 | 352 | ||
353 | void gcm_init_clmul(u128 Htable[16], const u64 Xi[2]); | 353 | void gcm_init_clmul(u128 Htable[16], const uint64_t Xi[2]); |
354 | void gcm_gmult_clmul(u64 Xi[2], const u128 Htable[16]); | 354 | void gcm_gmult_clmul(uint64_t Xi[2], const u128 Htable[16]); |
355 | void gcm_ghash_clmul(u64 Xi[2], const u128 Htable[16], const u8 *inp, | 355 | void gcm_ghash_clmul(uint64_t Xi[2], const u128 Htable[16], const uint8_t *inp, |
356 | size_t len); | 356 | size_t len); |
357 | 357 | ||
358 | # if defined(__i386) || defined(__i386__) || defined(_M_IX86) | 358 | # if defined(__i386) || defined(__i386__) || defined(_M_IX86) |
359 | # define GHASH_ASM_X86 | 359 | # define GHASH_ASM_X86 |
360 | void gcm_gmult_4bit_mmx(u64 Xi[2], const u128 Htable[16]); | 360 | void gcm_gmult_4bit_mmx(uint64_t Xi[2], const u128 Htable[16]); |
361 | void gcm_ghash_4bit_mmx(u64 Xi[2], const u128 Htable[16], const u8 *inp, | 361 | void gcm_ghash_4bit_mmx(uint64_t Xi[2], const u128 Htable[16], const uint8_t *inp, |
362 | size_t len); | 362 | size_t len); |
363 | 363 | ||
364 | void gcm_gmult_4bit_x86(u64 Xi[2], const u128 Htable[16]); | 364 | void gcm_gmult_4bit_x86(uint64_t Xi[2], const u128 Htable[16]); |
365 | void gcm_ghash_4bit_x86(u64 Xi[2], const u128 Htable[16], const u8 *inp, | 365 | void gcm_ghash_4bit_x86(uint64_t Xi[2], const u128 Htable[16], const uint8_t *inp, |
366 | size_t len); | 366 | size_t len); |
367 | # endif | 367 | # endif |
368 | # elif defined(__arm__) || defined(__arm) | 368 | # elif defined(__arm__) || defined(__arm) |
369 | # include "arm_arch.h" | 369 | # include "arm_arch.h" |
370 | # if __ARM_ARCH__>=7 && !defined(__STRICT_ALIGNMENT) | 370 | # if __ARM_ARCH__>=7 && !defined(__STRICT_ALIGNMENT) |
371 | # define GHASH_ASM_ARM | 371 | # define GHASH_ASM_ARM |
372 | void gcm_gmult_neon(u64 Xi[2], const u128 Htable[16]); | 372 | void gcm_gmult_neon(uint64_t Xi[2], const u128 Htable[16]); |
373 | void gcm_ghash_neon(u64 Xi[2], const u128 Htable[16], const u8 *inp, | 373 | void gcm_ghash_neon(uint64_t Xi[2], const u128 Htable[16], const uint8_t *inp, |
374 | size_t len); | 374 | size_t len); |
375 | # endif | 375 | # endif |
376 | # endif | 376 | # endif |
@@ -452,7 +452,7 @@ CRYPTO_gcm128_setiv(GCM128_CONTEXT *ctx, const unsigned char *iv, size_t len) | |||
452 | ctr = 1; | 452 | ctr = 1; |
453 | } else { | 453 | } else { |
454 | size_t i; | 454 | size_t i; |
455 | u64 len0 = len; | 455 | uint64_t len0 = len; |
456 | 456 | ||
457 | while (len >= 16) { | 457 | while (len >= 16) { |
458 | for (i = 0; i < 16; ++i) | 458 | for (i = 0; i < 16; ++i) |
@@ -485,7 +485,7 @@ CRYPTO_gcm128_aad(GCM128_CONTEXT *ctx, const unsigned char *aad, size_t len) | |||
485 | { | 485 | { |
486 | size_t i; | 486 | size_t i; |
487 | unsigned int n; | 487 | unsigned int n; |
488 | u64 alen = ctx->len.u[0]; | 488 | uint64_t alen = ctx->len.u[0]; |
489 | 489 | ||
490 | if (ctx->len.u[1]) | 490 | if (ctx->len.u[1]) |
491 | return -2; | 491 | return -2; |
@@ -533,7 +533,7 @@ CRYPTO_gcm128_encrypt(GCM128_CONTEXT *ctx, | |||
533 | { | 533 | { |
534 | unsigned int n, ctr; | 534 | unsigned int n, ctr; |
535 | size_t i; | 535 | size_t i; |
536 | u64 mlen = ctx->len.u[1]; | 536 | uint64_t mlen = ctx->len.u[1]; |
537 | block128_f block = ctx->block; | 537 | block128_f block = ctx->block; |
538 | void *key = ctx->key; | 538 | void *key = ctx->key; |
539 | 539 | ||
@@ -670,7 +670,7 @@ CRYPTO_gcm128_decrypt(GCM128_CONTEXT *ctx, | |||
670 | { | 670 | { |
671 | unsigned int n, ctr; | 671 | unsigned int n, ctr; |
672 | size_t i; | 672 | size_t i; |
673 | u64 mlen = ctx->len.u[1]; | 673 | uint64_t mlen = ctx->len.u[1]; |
674 | block128_f block = ctx->block; | 674 | block128_f block = ctx->block; |
675 | void *key = ctx->key; | 675 | void *key = ctx->key; |
676 | 676 | ||
@@ -692,7 +692,7 @@ CRYPTO_gcm128_decrypt(GCM128_CONTEXT *ctx, | |||
692 | do { /* always true actually */ | 692 | do { /* always true actually */ |
693 | if (n) { | 693 | if (n) { |
694 | while (n && len) { | 694 | while (n && len) { |
695 | u8 c = *(in++); | 695 | uint8_t c = *(in++); |
696 | *(out++) = c ^ ctx->EKi.c[n]; | 696 | *(out++) = c ^ ctx->EKi.c[n]; |
697 | ctx->Xi.c[n] ^= c; | 697 | ctx->Xi.c[n] ^= c; |
698 | --len; | 698 | --len; |
@@ -775,7 +775,7 @@ CRYPTO_gcm128_decrypt(GCM128_CONTEXT *ctx, | |||
775 | ctx->Yi.d[3] = htobe32(ctr); | 775 | ctx->Yi.d[3] = htobe32(ctr); |
776 | 776 | ||
777 | while (len--) { | 777 | while (len--) { |
778 | u8 c = in[n]; | 778 | uint8_t c = in[n]; |
779 | ctx->Xi.c[n] ^= c; | 779 | ctx->Xi.c[n] ^= c; |
780 | out[n] = c ^ ctx->EKi.c[n]; | 780 | out[n] = c ^ ctx->EKi.c[n]; |
781 | ++n; | 781 | ++n; |
@@ -786,7 +786,7 @@ CRYPTO_gcm128_decrypt(GCM128_CONTEXT *ctx, | |||
786 | return 0; | 786 | return 0; |
787 | } while (0); | 787 | } while (0); |
788 | for (i = 0; i < len; ++i) { | 788 | for (i = 0; i < len; ++i) { |
789 | u8 c; | 789 | uint8_t c; |
790 | if (n == 0) { | 790 | if (n == 0) { |
791 | (*block)(ctx->Yi.c, ctx->EKi.c, key); | 791 | (*block)(ctx->Yi.c, ctx->EKi.c, key); |
792 | ++ctr; | 792 | ++ctr; |
@@ -812,7 +812,7 @@ CRYPTO_gcm128_encrypt_ctr32(GCM128_CONTEXT *ctx, | |||
812 | { | 812 | { |
813 | unsigned int n, ctr; | 813 | unsigned int n, ctr; |
814 | size_t i; | 814 | size_t i; |
815 | u64 mlen = ctx->len.u[1]; | 815 | uint64_t mlen = ctx->len.u[1]; |
816 | void *key = ctx->key; | 816 | void *key = ctx->key; |
817 | 817 | ||
818 | mlen += len; | 818 | mlen += len; |
@@ -886,7 +886,7 @@ CRYPTO_gcm128_decrypt_ctr32(GCM128_CONTEXT *ctx, | |||
886 | { | 886 | { |
887 | unsigned int n, ctr; | 887 | unsigned int n, ctr; |
888 | size_t i; | 888 | size_t i; |
889 | u64 mlen = ctx->len.u[1]; | 889 | uint64_t mlen = ctx->len.u[1]; |
890 | void *key = ctx->key; | 890 | void *key = ctx->key; |
891 | 891 | ||
892 | mlen += len; | 892 | mlen += len; |
@@ -905,7 +905,7 @@ CRYPTO_gcm128_decrypt_ctr32(GCM128_CONTEXT *ctx, | |||
905 | n = ctx->mres; | 905 | n = ctx->mres; |
906 | if (n) { | 906 | if (n) { |
907 | while (n && len) { | 907 | while (n && len) { |
908 | u8 c = *(in++); | 908 | uint8_t c = *(in++); |
909 | *(out++) = c ^ ctx->EKi.c[n]; | 909 | *(out++) = c ^ ctx->EKi.c[n]; |
910 | ctx->Xi.c[n] ^= c; | 910 | ctx->Xi.c[n] ^= c; |
911 | --len; | 911 | --len; |
@@ -945,7 +945,7 @@ CRYPTO_gcm128_decrypt_ctr32(GCM128_CONTEXT *ctx, | |||
945 | ++ctr; | 945 | ++ctr; |
946 | ctx->Yi.d[3] = htobe32(ctr); | 946 | ctx->Yi.d[3] = htobe32(ctr); |
947 | while (len--) { | 947 | while (len--) { |
948 | u8 c = in[n]; | 948 | uint8_t c = in[n]; |
949 | ctx->Xi.c[n] ^= c; | 949 | ctx->Xi.c[n] ^= c; |
950 | out[n] = c ^ ctx->EKi.c[n]; | 950 | out[n] = c ^ ctx->EKi.c[n]; |
951 | ++n; | 951 | ++n; |
@@ -961,8 +961,8 @@ int | |||
961 | CRYPTO_gcm128_finish(GCM128_CONTEXT *ctx, const unsigned char *tag, | 961 | CRYPTO_gcm128_finish(GCM128_CONTEXT *ctx, const unsigned char *tag, |
962 | size_t len) | 962 | size_t len) |
963 | { | 963 | { |
964 | u64 alen = ctx->len.u[0] << 3; | 964 | uint64_t alen = ctx->len.u[0] << 3; |
965 | u64 clen = ctx->len.u[1] << 3; | 965 | uint64_t clen = ctx->len.u[1] << 3; |
966 | 966 | ||
967 | if (ctx->mres || ctx->ares) | 967 | if (ctx->mres || ctx->ares) |
968 | gcm_mul(ctx, ctx->Xi.u); | 968 | gcm_mul(ctx, ctx->Xi.u); |