summaryrefslogtreecommitdiff
path: root/src/lib/libcrypto/modes/gcm128.c
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--src/lib/libcrypto/modes/gcm128.c92
1 files changed, 13 insertions, 79 deletions
diff --git a/src/lib/libcrypto/modes/gcm128.c b/src/lib/libcrypto/modes/gcm128.c
index 6c89bd44b7..21ba9eef57 100644
--- a/src/lib/libcrypto/modes/gcm128.c
+++ b/src/lib/libcrypto/modes/gcm128.c
@@ -1,4 +1,4 @@
1/* $OpenBSD: gcm128.c,v 1.27 2024/09/06 09:57:32 tb Exp $ */ 1/* $OpenBSD: gcm128.c,v 1.35 2025/04/25 12:08:53 jsing Exp $ */
2/* ==================================================================== 2/* ====================================================================
3 * Copyright (c) 2010 The OpenSSL Project. All rights reserved. 3 * Copyright (c) 2010 The OpenSSL Project. All rights reserved.
4 * 4 *
@@ -48,8 +48,6 @@
48 * ==================================================================== 48 * ====================================================================
49 */ 49 */
50 50
51#define OPENSSL_FIPSAPI
52
53#include <string.h> 51#include <string.h>
54 52
55#include <openssl/crypto.h> 53#include <openssl/crypto.h>
@@ -57,18 +55,6 @@
57#include "crypto_internal.h" 55#include "crypto_internal.h"
58#include "modes_local.h" 56#include "modes_local.h"
59 57
60#ifndef MODES_DEBUG
61# ifndef NDEBUG
62# define NDEBUG
63# endif
64#endif
65
66#if defined(BSWAP4) && defined(__STRICT_ALIGNMENT)
67/* redefine, because alignment is ensured */
68#undef GETU32
69#define GETU32(p) BSWAP4(*(const u32 *)(p))
70#endif
71
72#define PACK(s) ((size_t)(s)<<(sizeof(size_t)*8-16)) 58#define PACK(s) ((size_t)(s)<<(sizeof(size_t)*8-16))
73#define REDUCE1BIT(V) \ 59#define REDUCE1BIT(V) \
74 do { \ 60 do { \
@@ -246,16 +232,13 @@ static void
246gcm_init_4bit(u128 Htable[16], u64 H[2]) 232gcm_init_4bit(u128 Htable[16], u64 H[2])
247{ 233{
248 u128 V; 234 u128 V;
249#if defined(OPENSSL_SMALL_FOOTPRINT)
250 int i; 235 int i;
251#endif
252 236
253 Htable[0].hi = 0; 237 Htable[0].hi = 0;
254 Htable[0].lo = 0; 238 Htable[0].lo = 0;
255 V.hi = H[0]; 239 V.hi = H[0];
256 V.lo = H[1]; 240 V.lo = H[1];
257 241
258#if defined(OPENSSL_SMALL_FOOTPRINT)
259 for (Htable[8] = V, i = 4; i > 0; i >>= 1) { 242 for (Htable[8] = V, i = 4; i > 0; i >>= 1) {
260 REDUCE1BIT(V); 243 REDUCE1BIT(V);
261 Htable[i] = V; 244 Htable[i] = V;
@@ -269,34 +252,7 @@ gcm_init_4bit(u128 Htable[16], u64 H[2])
269 Hi[j].lo = V.lo ^ Htable[j].lo; 252 Hi[j].lo = V.lo ^ Htable[j].lo;
270 } 253 }
271 } 254 }
272#else 255
273 Htable[8] = V;
274 REDUCE1BIT(V);
275 Htable[4] = V;
276 REDUCE1BIT(V);
277 Htable[2] = V;
278 REDUCE1BIT(V);
279 Htable[1] = V;
280 Htable[3].hi = V.hi ^ Htable[2].hi, Htable[3].lo = V.lo ^ Htable[2].lo;
281 V = Htable[4];
282 Htable[5].hi = V.hi ^ Htable[1].hi, Htable[5].lo = V.lo ^ Htable[1].lo;
283 Htable[6].hi = V.hi ^ Htable[2].hi, Htable[6].lo = V.lo ^ Htable[2].lo;
284 Htable[7].hi = V.hi ^ Htable[3].hi, Htable[7].lo = V.lo ^ Htable[3].lo;
285 V = Htable[8];
286 Htable[9].hi = V.hi ^ Htable[1].hi, Htable[9].lo = V.lo ^ Htable[1].lo;
287 Htable[10].hi = V.hi ^ Htable[2].hi,
288 Htable[10].lo = V.lo ^ Htable[2].lo;
289 Htable[11].hi = V.hi ^ Htable[3].hi,
290 Htable[11].lo = V.lo ^ Htable[3].lo;
291 Htable[12].hi = V.hi ^ Htable[4].hi,
292 Htable[12].lo = V.lo ^ Htable[4].lo;
293 Htable[13].hi = V.hi ^ Htable[5].hi,
294 Htable[13].lo = V.lo ^ Htable[5].lo;
295 Htable[14].hi = V.hi ^ Htable[6].hi,
296 Htable[14].lo = V.lo ^ Htable[6].lo;
297 Htable[15].hi = V.hi ^ Htable[7].hi,
298 Htable[15].lo = V.lo ^ Htable[7].lo;
299#endif
300#if defined(GHASH_ASM) && (defined(__arm__) || defined(__arm)) 256#if defined(GHASH_ASM) && (defined(__arm__) || defined(__arm))
301 /* 257 /*
302 * ARM assembler expects specific dword order in Htable. 258 * ARM assembler expects specific dword order in Htable.
@@ -376,7 +332,6 @@ gcm_gmult_4bit(u64 Xi[2], const u128 Htable[16])
376 Xi[1] = htobe64(Z.lo); 332 Xi[1] = htobe64(Z.lo);
377} 333}
378 334
379#if !defined(OPENSSL_SMALL_FOOTPRINT)
380/* 335/*
381 * Streamed gcm_mult_4bit, see CRYPTO_gcm128_[en|de]crypt for 336 * Streamed gcm_mult_4bit, see CRYPTO_gcm128_[en|de]crypt for
382 * details... Compiler-generated code doesn't seem to give any 337 * details... Compiler-generated code doesn't seem to give any
@@ -532,7 +487,6 @@ gcm_ghash_4bit(u64 Xi[2], const u128 Htable[16],
532 Xi[1] = htobe64(Z.lo); 487 Xi[1] = htobe64(Z.lo);
533 } while (inp += 16, len -= 16); 488 } while (inp += 16, len -= 16);
534} 489}
535#endif
536#else 490#else
537void gcm_gmult_4bit(u64 Xi[2], const u128 Htable[16]); 491void gcm_gmult_4bit(u64 Xi[2], const u128 Htable[16]);
538void gcm_ghash_4bit(u64 Xi[2], const u128 Htable[16], const u8 *inp, 492void gcm_ghash_4bit(u64 Xi[2], const u128 Htable[16], const u8 *inp,
@@ -540,48 +494,32 @@ void gcm_ghash_4bit(u64 Xi[2], const u128 Htable[16], const u8 *inp,
540#endif 494#endif
541 495
542#define GCM_MUL(ctx,Xi) gcm_gmult_4bit(ctx->Xi.u,ctx->Htable) 496#define GCM_MUL(ctx,Xi) gcm_gmult_4bit(ctx->Xi.u,ctx->Htable)
543#if defined(GHASH_ASM) || !defined(OPENSSL_SMALL_FOOTPRINT)
544#define GHASH(ctx,in,len) gcm_ghash_4bit((ctx)->Xi.u,(ctx)->Htable,in,len) 497#define GHASH(ctx,in,len) gcm_ghash_4bit((ctx)->Xi.u,(ctx)->Htable,in,len)
545/* GHASH_CHUNK is "stride parameter" missioned to mitigate cache 498/* GHASH_CHUNK is "stride parameter" missioned to mitigate cache
546 * trashing effect. In other words idea is to hash data while it's 499 * trashing effect. In other words idea is to hash data while it's
547 * still in L1 cache after encryption pass... */ 500 * still in L1 cache after encryption pass... */
548#define GHASH_CHUNK (3*1024) 501#define GHASH_CHUNK (3*1024)
549#endif
550 502
551#else /* TABLE_BITS */ 503#else /* TABLE_BITS */
552 504
553static void 505static void
554gcm_gmult_1bit(u64 Xi[2], const u64 H[2]) 506gcm_gmult_1bit(u64 Xi[2], const u64 H[2])
555{ 507{
556 u128 V, Z = { 0,0 }; 508 u128 V, Z = { 0, 0 };
557 long X; 509 u64 X;
558 int i, j; 510 int i, j;
559 const long *xi = (const long *)Xi;
560 511
561 V.hi = H[0]; /* H is in host byte order, no byte swapping */ 512 V.hi = H[0]; /* H is in host byte order, no byte swapping */
562 V.lo = H[1]; 513 V.lo = H[1];
563 514
564 for (j = 0; j < 16/sizeof(long); ++j) { 515 for (j = 0; j < 2; j++) {
565#if BYTE_ORDER == LITTLE_ENDIAN 516 X = be64toh(Xi[j]);
566#if SIZE_MAX == 0xffffffffffffffff
567#ifdef BSWAP8
568 X = (long)(BSWAP8(xi[j]));
569#else
570 const u8 *p = (const u8 *)(xi + j);
571 X = (long)((u64)GETU32(p) << 32|GETU32(p + 4));
572#endif
573#else
574 const u8 *p = (const u8 *)(xi + j);
575 X = (long)GETU32(p);
576#endif
577#else /* BIG_ENDIAN */
578 X = xi[j];
579#endif
580 517
581 for (i = 0; i < 8*sizeof(long); ++i, X <<= 1) { 518 for (i = 0; i < 64; i++) {
582 u64 M = (u64)(X >> (8*sizeof(long) - 1)); 519 u64 M = 0 - (X >> 63);
583 Z.hi ^= V.hi & M; 520 Z.hi ^= V.hi & M;
584 Z.lo ^= V.lo & M; 521 Z.lo ^= V.lo & M;
522 X <<= 1;
585 523
586 REDUCE1BIT(V); 524 REDUCE1BIT(V);
587 } 525 }
@@ -850,7 +788,6 @@ CRYPTO_gcm128_encrypt(GCM128_CONTEXT *ctx,
850 ctr = be32toh(ctx->Yi.d[3]); 788 ctr = be32toh(ctx->Yi.d[3]);
851 789
852 n = ctx->mres; 790 n = ctx->mres;
853#if !defined(OPENSSL_SMALL_FOOTPRINT)
854 if (16 % sizeof(size_t) == 0) 791 if (16 % sizeof(size_t) == 0)
855 do { /* always true actually */ 792 do { /* always true actually */
856 if (n) { 793 if (n) {
@@ -946,7 +883,6 @@ CRYPTO_gcm128_encrypt(GCM128_CONTEXT *ctx,
946 ctx->mres = n; 883 ctx->mres = n;
947 return 0; 884 return 0;
948 } while (0); 885 } while (0);
949#endif
950 for (i = 0; i < len; ++i) { 886 for (i = 0; i < len; ++i) {
951 if (n == 0) { 887 if (n == 0) {
952 (*block)(ctx->Yi.c, ctx->EKi.c, key); 888 (*block)(ctx->Yi.c, ctx->EKi.c, key);
@@ -996,7 +932,6 @@ CRYPTO_gcm128_decrypt(GCM128_CONTEXT *ctx,
996 ctr = be32toh(ctx->Yi.d[3]); 932 ctr = be32toh(ctx->Yi.d[3]);
997 933
998 n = ctx->mres; 934 n = ctx->mres;
999#if !defined(OPENSSL_SMALL_FOOTPRINT)
1000 if (16 % sizeof(size_t) == 0) 935 if (16 % sizeof(size_t) == 0)
1001 do { /* always true actually */ 936 do { /* always true actually */
1002 if (n) { 937 if (n) {
@@ -1068,8 +1003,8 @@ CRYPTO_gcm128_decrypt(GCM128_CONTEXT *ctx,
1068 ctx->Yi.d[3] = htobe32(ctr); 1003 ctx->Yi.d[3] = htobe32(ctr);
1069 1004
1070 for (i = 0; i < 16/sizeof(size_t); ++i) { 1005 for (i = 0; i < 16/sizeof(size_t); ++i) {
1071 size_t c = in[i]; 1006 size_t c = in_t[i];
1072 out[i] = c ^ ctx->EKi.t[i]; 1007 out_t[i] = c ^ ctx->EKi.t[i];
1073 ctx->Xi.t[i] ^= c; 1008 ctx->Xi.t[i] ^= c;
1074 } 1009 }
1075 GCM_MUL(ctx, Xi); 1010 GCM_MUL(ctx, Xi);
@@ -1094,7 +1029,6 @@ CRYPTO_gcm128_decrypt(GCM128_CONTEXT *ctx,
1094 ctx->mres = n; 1029 ctx->mres = n;
1095 return 0; 1030 return 0;
1096 } while (0); 1031 } while (0);
1097#endif
1098 for (i = 0; i < len; ++i) { 1032 for (i = 0; i < len; ++i) {
1099 u8 c; 1033 u8 c;
1100 if (n == 0) { 1034 if (n == 0) {
@@ -1159,7 +1093,7 @@ CRYPTO_gcm128_encrypt_ctr32(GCM128_CONTEXT *ctx,
1159 return 0; 1093 return 0;
1160 } 1094 }
1161 } 1095 }
1162#if defined(GHASH) && !defined(OPENSSL_SMALL_FOOTPRINT) 1096#if defined(GHASH) && defined(GHASH_CHUNK)
1163 while (len >= GHASH_CHUNK) { 1097 while (len >= GHASH_CHUNK) {
1164 (*stream)(in, out, GHASH_CHUNK/16, key, ctx->Yi.c); 1098 (*stream)(in, out, GHASH_CHUNK/16, key, ctx->Yi.c);
1165 ctr += GHASH_CHUNK/16; 1099 ctr += GHASH_CHUNK/16;
@@ -1251,7 +1185,7 @@ CRYPTO_gcm128_decrypt_ctr32(GCM128_CONTEXT *ctx,
1251 return 0; 1185 return 0;
1252 } 1186 }
1253 } 1187 }
1254#if defined(GHASH) && !defined(OPENSSL_SMALL_FOOTPRINT) 1188#if defined(GHASH) && defined(GHASH_CHUNK)
1255 while (len >= GHASH_CHUNK) { 1189 while (len >= GHASH_CHUNK) {
1256 GHASH(ctx, in, GHASH_CHUNK); 1190 GHASH(ctx, in, GHASH_CHUNK);
1257 (*stream)(in, out, GHASH_CHUNK/16, key, ctx->Yi.c); 1191 (*stream)(in, out, GHASH_CHUNK/16, key, ctx->Yi.c);