diff options
| author | jsing <> | 2025-05-18 09:05:59 +0000 |
|---|---|---|
| committer | jsing <> | 2025-05-18 09:05:59 +0000 |
| commit | 0ac35676ac1491f404111288e14d55d54ca52445 (patch) | |
| tree | 54a864104bc0a796ddb2dd6600b7ec570965e978 /src | |
| parent | ac720367d191f33e5f43cb4b8f4e1fc28766bf5a (diff) | |
| download | openbsd-0ac35676ac1491f404111288e14d55d54ca52445.tar.gz openbsd-0ac35676ac1491f404111288e14d55d54ca52445.tar.bz2 openbsd-0ac35676ac1491f404111288e14d55d54ca52445.zip | |
Use stdint types instead of u64/u32/u8.
No change in generated assembly.
Diffstat (limited to 'src')
| -rw-r--r-- | src/lib/libcrypto/modes/ccm128.c | 90 | ||||
| -rw-r--r-- | src/lib/libcrypto/modes/ctr128.c | 12 | ||||
| -rw-r--r-- | src/lib/libcrypto/modes/gcm128.c | 92 | ||||
| -rw-r--r-- | src/lib/libcrypto/modes/modes_local.h | 27 | ||||
| -rw-r--r-- | src/lib/libcrypto/modes/xts128.c | 40 |
5 files changed, 127 insertions, 134 deletions
diff --git a/src/lib/libcrypto/modes/ccm128.c b/src/lib/libcrypto/modes/ccm128.c index 0f592dd9e5..c4df198c0b 100644 --- a/src/lib/libcrypto/modes/ccm128.c +++ b/src/lib/libcrypto/modes/ccm128.c | |||
| @@ -1,4 +1,4 @@ | |||
| 1 | /* $OpenBSD: ccm128.c,v 1.10 2025/04/21 16:01:18 jsing Exp $ */ | 1 | /* $OpenBSD: ccm128.c,v 1.11 2025/05/18 09:05:59 jsing Exp $ */ |
| 2 | /* ==================================================================== | 2 | /* ==================================================================== |
| 3 | * Copyright (c) 2011 The OpenSSL Project. All rights reserved. | 3 | * Copyright (c) 2011 The OpenSSL Project. All rights reserved. |
| 4 | * | 4 | * |
| @@ -61,7 +61,7 @@ CRYPTO_ccm128_init(CCM128_CONTEXT *ctx, | |||
| 61 | unsigned int M, unsigned int L, void *key, block128_f block) | 61 | unsigned int M, unsigned int L, void *key, block128_f block) |
| 62 | { | 62 | { |
| 63 | memset(ctx->nonce.c, 0, sizeof(ctx->nonce.c)); | 63 | memset(ctx->nonce.c, 0, sizeof(ctx->nonce.c)); |
| 64 | ctx->nonce.c[0] = ((u8)(L - 1) & 7) | (u8)(((M - 2)/2) & 7) << 3; | 64 | ctx->nonce.c[0] = ((uint8_t)(L - 1) & 7) | (uint8_t)(((M - 2)/2) & 7) << 3; |
| 65 | ctx->blocks = 0; | 65 | ctx->blocks = 0; |
| 66 | ctx->block = block; | 66 | ctx->block = block; |
| 67 | ctx->key = key; | 67 | ctx->key = key; |
| @@ -81,17 +81,17 @@ CRYPTO_ccm128_setiv(CCM128_CONTEXT *ctx, | |||
| 81 | return -1; /* nonce is too short */ | 81 | return -1; /* nonce is too short */ |
| 82 | 82 | ||
| 83 | if (sizeof(mlen) == 8 && L >= 3) { | 83 | if (sizeof(mlen) == 8 && L >= 3) { |
| 84 | ctx->nonce.c[8] = (u8)(mlen >> (56 % (sizeof(mlen)*8))); | 84 | ctx->nonce.c[8] = (uint8_t)(mlen >> (56 % (sizeof(mlen)*8))); |
| 85 | ctx->nonce.c[9] = (u8)(mlen >> (48 % (sizeof(mlen)*8))); | 85 | ctx->nonce.c[9] = (uint8_t)(mlen >> (48 % (sizeof(mlen)*8))); |
| 86 | ctx->nonce.c[10] = (u8)(mlen >> (40 % (sizeof(mlen)*8))); | 86 | ctx->nonce.c[10] = (uint8_t)(mlen >> (40 % (sizeof(mlen)*8))); |
| 87 | ctx->nonce.c[11] = (u8)(mlen >> (32 % (sizeof(mlen)*8))); | 87 | ctx->nonce.c[11] = (uint8_t)(mlen >> (32 % (sizeof(mlen)*8))); |
| 88 | } else | 88 | } else |
| 89 | ctx->nonce.u[1] = 0; | 89 | ctx->nonce.u[1] = 0; |
| 90 | 90 | ||
| 91 | ctx->nonce.c[12] = (u8)(mlen >> 24); | 91 | ctx->nonce.c[12] = (uint8_t)(mlen >> 24); |
| 92 | ctx->nonce.c[13] = (u8)(mlen >> 16); | 92 | ctx->nonce.c[13] = (uint8_t)(mlen >> 16); |
| 93 | ctx->nonce.c[14] = (u8)(mlen >> 8); | 93 | ctx->nonce.c[14] = (uint8_t)(mlen >> 8); |
| 94 | ctx->nonce.c[15] = (u8)mlen; | 94 | ctx->nonce.c[15] = (uint8_t)mlen; |
| 95 | 95 | ||
| 96 | ctx->nonce.c[0] &= ~0x40; /* clear Adata flag */ | 96 | ctx->nonce.c[0] &= ~0x40; /* clear Adata flag */ |
| 97 | memcpy(&ctx->nonce.c[1], nonce, 14 - L); | 97 | memcpy(&ctx->nonce.c[1], nonce, 14 - L); |
| @@ -116,29 +116,29 @@ CRYPTO_ccm128_aad(CCM128_CONTEXT *ctx, | |||
| 116 | ctx->blocks++; | 116 | ctx->blocks++; |
| 117 | 117 | ||
| 118 | if (alen < (0x10000 - 0x100)) { | 118 | if (alen < (0x10000 - 0x100)) { |
| 119 | ctx->cmac.c[0] ^= (u8)(alen >> 8); | 119 | ctx->cmac.c[0] ^= (uint8_t)(alen >> 8); |
| 120 | ctx->cmac.c[1] ^= (u8)alen; | 120 | ctx->cmac.c[1] ^= (uint8_t)alen; |
| 121 | i = 2; | 121 | i = 2; |
| 122 | } else if (sizeof(alen) == 8 && | 122 | } else if (sizeof(alen) == 8 && |
| 123 | alen >= (size_t)1 << (32 % (sizeof(alen)*8))) { | 123 | alen >= (size_t)1 << (32 % (sizeof(alen)*8))) { |
| 124 | ctx->cmac.c[0] ^= 0xFF; | 124 | ctx->cmac.c[0] ^= 0xFF; |
| 125 | ctx->cmac.c[1] ^= 0xFF; | 125 | ctx->cmac.c[1] ^= 0xFF; |
| 126 | ctx->cmac.c[2] ^= (u8)(alen >> (56 % (sizeof(alen)*8))); | 126 | ctx->cmac.c[2] ^= (uint8_t)(alen >> (56 % (sizeof(alen)*8))); |
| 127 | ctx->cmac.c[3] ^= (u8)(alen >> (48 % (sizeof(alen)*8))); | 127 | ctx->cmac.c[3] ^= (uint8_t)(alen >> (48 % (sizeof(alen)*8))); |
| 128 | ctx->cmac.c[4] ^= (u8)(alen >> (40 % (sizeof(alen)*8))); | 128 | ctx->cmac.c[4] ^= (uint8_t)(alen >> (40 % (sizeof(alen)*8))); |
| 129 | ctx->cmac.c[5] ^= (u8)(alen >> (32 % (sizeof(alen)*8))); | 129 | ctx->cmac.c[5] ^= (uint8_t)(alen >> (32 % (sizeof(alen)*8))); |
| 130 | ctx->cmac.c[6] ^= (u8)(alen >> 24); | 130 | ctx->cmac.c[6] ^= (uint8_t)(alen >> 24); |
| 131 | ctx->cmac.c[7] ^= (u8)(alen >> 16); | 131 | ctx->cmac.c[7] ^= (uint8_t)(alen >> 16); |
| 132 | ctx->cmac.c[8] ^= (u8)(alen >> 8); | 132 | ctx->cmac.c[8] ^= (uint8_t)(alen >> 8); |
| 133 | ctx->cmac.c[9] ^= (u8)alen; | 133 | ctx->cmac.c[9] ^= (uint8_t)alen; |
| 134 | i = 10; | 134 | i = 10; |
| 135 | } else { | 135 | } else { |
| 136 | ctx->cmac.c[0] ^= 0xFF; | 136 | ctx->cmac.c[0] ^= 0xFF; |
| 137 | ctx->cmac.c[1] ^= 0xFE; | 137 | ctx->cmac.c[1] ^= 0xFE; |
| 138 | ctx->cmac.c[2] ^= (u8)(alen >> 24); | 138 | ctx->cmac.c[2] ^= (uint8_t)(alen >> 24); |
| 139 | ctx->cmac.c[3] ^= (u8)(alen >> 16); | 139 | ctx->cmac.c[3] ^= (uint8_t)(alen >> 16); |
| 140 | ctx->cmac.c[4] ^= (u8)(alen >> 8); | 140 | ctx->cmac.c[4] ^= (uint8_t)(alen >> 8); |
| 141 | ctx->cmac.c[5] ^= (u8)alen; | 141 | ctx->cmac.c[5] ^= (uint8_t)alen; |
| 142 | i = 6; | 142 | i = 6; |
| 143 | } | 143 | } |
| 144 | 144 | ||
| @@ -160,7 +160,7 @@ static void | |||
| 160 | ctr64_inc(unsigned char *counter) | 160 | ctr64_inc(unsigned char *counter) |
| 161 | { | 161 | { |
| 162 | unsigned int n = 8; | 162 | unsigned int n = 8; |
| 163 | u8 c; | 163 | uint8_t c; |
| 164 | 164 | ||
| 165 | counter += 8; | 165 | counter += 8; |
| 166 | do { | 166 | do { |
| @@ -184,8 +184,8 @@ CRYPTO_ccm128_encrypt(CCM128_CONTEXT *ctx, | |||
| 184 | block128_f block = ctx->block; | 184 | block128_f block = ctx->block; |
| 185 | void *key = ctx->key; | 185 | void *key = ctx->key; |
| 186 | union { | 186 | union { |
| 187 | u64 u[2]; | 187 | uint64_t u[2]; |
| 188 | u8 c[16]; | 188 | uint8_t c[16]; |
| 189 | } scratch; | 189 | } scratch; |
| 190 | 190 | ||
| 191 | if (!(flags0 & 0x40)) | 191 | if (!(flags0 & 0x40)) |
| @@ -211,16 +211,16 @@ CRYPTO_ccm128_encrypt(CCM128_CONTEXT *ctx, | |||
| 211 | while (len >= 16) { | 211 | while (len >= 16) { |
| 212 | #ifdef __STRICT_ALIGNMENT | 212 | #ifdef __STRICT_ALIGNMENT |
| 213 | union { | 213 | union { |
| 214 | u64 u[2]; | 214 | uint64_t u[2]; |
| 215 | u8 c[16]; | 215 | uint8_t c[16]; |
| 216 | } temp; | 216 | } temp; |
| 217 | 217 | ||
| 218 | memcpy(temp.c, inp, 16); | 218 | memcpy(temp.c, inp, 16); |
| 219 | ctx->cmac.u[0] ^= temp.u[0]; | 219 | ctx->cmac.u[0] ^= temp.u[0]; |
| 220 | ctx->cmac.u[1] ^= temp.u[1]; | 220 | ctx->cmac.u[1] ^= temp.u[1]; |
| 221 | #else | 221 | #else |
| 222 | ctx->cmac.u[0] ^= ((u64 *)inp)[0]; | 222 | ctx->cmac.u[0] ^= ((uint64_t *)inp)[0]; |
| 223 | ctx->cmac.u[1] ^= ((u64 *)inp)[1]; | 223 | ctx->cmac.u[1] ^= ((uint64_t *)inp)[1]; |
| 224 | #endif | 224 | #endif |
| 225 | (*block)(ctx->cmac.c, ctx->cmac.c, key); | 225 | (*block)(ctx->cmac.c, ctx->cmac.c, key); |
| 226 | (*block)(ctx->nonce.c, scratch.c, key); | 226 | (*block)(ctx->nonce.c, scratch.c, key); |
| @@ -230,8 +230,8 @@ CRYPTO_ccm128_encrypt(CCM128_CONTEXT *ctx, | |||
| 230 | temp.u[1] ^= scratch.u[1]; | 230 | temp.u[1] ^= scratch.u[1]; |
| 231 | memcpy(out, temp.c, 16); | 231 | memcpy(out, temp.c, 16); |
| 232 | #else | 232 | #else |
| 233 | ((u64 *)out)[0] = scratch.u[0] ^ ((u64 *)inp)[0]; | 233 | ((uint64_t *)out)[0] = scratch.u[0] ^ ((u64 *)inp)[0]; |
| 234 | ((u64 *)out)[1] = scratch.u[1] ^ ((u64 *)inp)[1]; | 234 | ((uint64_t *)out)[1] = scratch.u[1] ^ ((u64 *)inp)[1]; |
| 235 | #endif | 235 | #endif |
| 236 | inp += 16; | 236 | inp += 16; |
| 237 | out += 16; | 237 | out += 16; |
| @@ -271,8 +271,8 @@ CRYPTO_ccm128_decrypt(CCM128_CONTEXT *ctx, | |||
| 271 | block128_f block = ctx->block; | 271 | block128_f block = ctx->block; |
| 272 | void *key = ctx->key; | 272 | void *key = ctx->key; |
| 273 | union { | 273 | union { |
| 274 | u64 u[2]; | 274 | uint64_t u[2]; |
| 275 | u8 c[16]; | 275 | uint8_t c[16]; |
| 276 | } scratch; | 276 | } scratch; |
| 277 | 277 | ||
| 278 | if (!(flags0 & 0x40)) | 278 | if (!(flags0 & 0x40)) |
| @@ -293,8 +293,8 @@ CRYPTO_ccm128_decrypt(CCM128_CONTEXT *ctx, | |||
| 293 | while (len >= 16) { | 293 | while (len >= 16) { |
| 294 | #ifdef __STRICT_ALIGNMENT | 294 | #ifdef __STRICT_ALIGNMENT |
| 295 | union { | 295 | union { |
| 296 | u64 u[2]; | 296 | uint64_t u[2]; |
| 297 | u8 c[16]; | 297 | uint8_t c[16]; |
| 298 | } temp; | 298 | } temp; |
| 299 | #endif | 299 | #endif |
| 300 | (*block)(ctx->nonce.c, scratch.c, key); | 300 | (*block)(ctx->nonce.c, scratch.c, key); |
| @@ -305,10 +305,10 @@ CRYPTO_ccm128_decrypt(CCM128_CONTEXT *ctx, | |||
| 305 | ctx->cmac.u[1] ^= (scratch.u[1] ^= temp.u[1]); | 305 | ctx->cmac.u[1] ^= (scratch.u[1] ^= temp.u[1]); |
| 306 | memcpy(out, scratch.c, 16); | 306 | memcpy(out, scratch.c, 16); |
| 307 | #else | 307 | #else |
| 308 | ctx->cmac.u[0] ^= (((u64 *)out)[0] = scratch.u[0] ^ | 308 | ctx->cmac.u[0] ^= (((uint64_t *)out)[0] = scratch.u[0] ^ |
| 309 | ((u64 *)inp)[0]); | 309 | ((uint64_t *)inp)[0]); |
| 310 | ctx->cmac.u[1] ^= (((u64 *)out)[1] = scratch.u[1] ^ | 310 | ctx->cmac.u[1] ^= (((uint64_t *)out)[1] = scratch.u[1] ^ |
| 311 | ((u64 *)inp)[1]); | 311 | ((uint64_t *)inp)[1]); |
| 312 | #endif | 312 | #endif |
| 313 | (*block)(ctx->cmac.c, ctx->cmac.c, key); | 313 | (*block)(ctx->cmac.c, ctx->cmac.c, key); |
| 314 | 314 | ||
| @@ -363,8 +363,8 @@ CRYPTO_ccm128_encrypt_ccm64(CCM128_CONTEXT *ctx, | |||
| 363 | block128_f block = ctx->block; | 363 | block128_f block = ctx->block; |
| 364 | void *key = ctx->key; | 364 | void *key = ctx->key; |
| 365 | union { | 365 | union { |
| 366 | u64 u[2]; | 366 | uint64_t u[2]; |
| 367 | u8 c[16]; | 367 | uint8_t c[16]; |
| 368 | } scratch; | 368 | } scratch; |
| 369 | 369 | ||
| 370 | if (!(flags0 & 0x40)) | 370 | if (!(flags0 & 0x40)) |
| @@ -430,8 +430,8 @@ CRYPTO_ccm128_decrypt_ccm64(CCM128_CONTEXT *ctx, | |||
| 430 | block128_f block = ctx->block; | 430 | block128_f block = ctx->block; |
| 431 | void *key = ctx->key; | 431 | void *key = ctx->key; |
| 432 | union { | 432 | union { |
| 433 | u64 u[2]; | 433 | uint64_t u[2]; |
| 434 | u8 c[16]; | 434 | uint8_t c[16]; |
| 435 | } scratch; | 435 | } scratch; |
| 436 | 436 | ||
| 437 | if (!(flags0 & 0x40)) | 437 | if (!(flags0 & 0x40)) |
diff --git a/src/lib/libcrypto/modes/ctr128.c b/src/lib/libcrypto/modes/ctr128.c index 30563ed6e3..87d9abb355 100644 --- a/src/lib/libcrypto/modes/ctr128.c +++ b/src/lib/libcrypto/modes/ctr128.c | |||
| @@ -1,4 +1,4 @@ | |||
| 1 | /* $OpenBSD: ctr128.c,v 1.17 2025/04/23 10:09:08 jsing Exp $ */ | 1 | /* $OpenBSD: ctr128.c,v 1.18 2025/05/18 09:05:59 jsing Exp $ */ |
| 2 | /* ==================================================================== | 2 | /* ==================================================================== |
| 3 | * Copyright (c) 2008 The OpenSSL Project. All rights reserved. | 3 | * Copyright (c) 2008 The OpenSSL Project. All rights reserved. |
| 4 | * | 4 | * |
| @@ -63,8 +63,8 @@ | |||
| 63 | static void | 63 | static void |
| 64 | ctr128_inc(unsigned char *counter) | 64 | ctr128_inc(unsigned char *counter) |
| 65 | { | 65 | { |
| 66 | u32 n = 16; | 66 | uint32_t n = 16; |
| 67 | u8 c; | 67 | uint8_t c; |
| 68 | 68 | ||
| 69 | do { | 69 | do { |
| 70 | --n; | 70 | --n; |
| @@ -175,8 +175,8 @@ LCRYPTO_ALIAS(CRYPTO_ctr128_encrypt); | |||
| 175 | static void | 175 | static void |
| 176 | ctr96_inc(unsigned char *counter) | 176 | ctr96_inc(unsigned char *counter) |
| 177 | { | 177 | { |
| 178 | u32 n = 12; | 178 | uint32_t n = 12; |
| 179 | u8 c; | 179 | uint8_t c; |
| 180 | 180 | ||
| 181 | do { | 181 | do { |
| 182 | --n; | 182 | --n; |
| @@ -223,7 +223,7 @@ CRYPTO_ctr128_encrypt_ctr32(const unsigned char *in, unsigned char *out, | |||
| 223 | * overflow, which is then handled by limiting the | 223 | * overflow, which is then handled by limiting the |
| 224 | * amount of blocks to the exact overflow point... | 224 | * amount of blocks to the exact overflow point... |
| 225 | */ | 225 | */ |
| 226 | ctr32 += (u32)blocks; | 226 | ctr32 += (uint32_t)blocks; |
| 227 | if (ctr32 < blocks) { | 227 | if (ctr32 < blocks) { |
| 228 | blocks -= ctr32; | 228 | blocks -= ctr32; |
| 229 | ctr32 = 0; | 229 | ctr32 = 0; |
diff --git a/src/lib/libcrypto/modes/gcm128.c b/src/lib/libcrypto/modes/gcm128.c index ed7373d56e..8714a33c2c 100644 --- a/src/lib/libcrypto/modes/gcm128.c +++ b/src/lib/libcrypto/modes/gcm128.c | |||
| @@ -1,4 +1,4 @@ | |||
| 1 | /* $OpenBSD: gcm128.c,v 1.39 2025/05/18 07:26:09 jsing Exp $ */ | 1 | /* $OpenBSD: gcm128.c,v 1.40 2025/05/18 09:05:59 jsing Exp $ */ |
| 2 | /* ==================================================================== | 2 | /* ==================================================================== |
| 3 | * Copyright (c) 2010 The OpenSSL Project. All rights reserved. | 3 | * Copyright (c) 2010 The OpenSSL Project. All rights reserved. |
| 4 | * | 4 | * |
| @@ -56,10 +56,10 @@ | |||
| 56 | #include "modes_local.h" | 56 | #include "modes_local.h" |
| 57 | 57 | ||
| 58 | static void | 58 | static void |
| 59 | gcm_init_4bit(u128 Htable[16], u64 H[2]) | 59 | gcm_init_4bit(u128 Htable[16], uint64_t H[2]) |
| 60 | { | 60 | { |
| 61 | u128 V; | 61 | u128 V; |
| 62 | u64 T; | 62 | uint64_t T; |
| 63 | int i; | 63 | int i; |
| 64 | 64 | ||
| 65 | Htable[0].hi = 0; | 65 | Htable[0].hi = 0; |
| @@ -113,13 +113,13 @@ static const uint16_t rem_4bit[16] = { | |||
| 113 | }; | 113 | }; |
| 114 | 114 | ||
| 115 | static void | 115 | static void |
| 116 | gcm_gmult_4bit(u64 Xi[2], const u128 Htable[16]) | 116 | gcm_gmult_4bit(uint64_t Xi[2], const u128 Htable[16]) |
| 117 | { | 117 | { |
| 118 | u128 Z; | 118 | u128 Z; |
| 119 | int cnt = 15; | 119 | int cnt = 15; |
| 120 | size_t rem, nlo, nhi; | 120 | size_t rem, nlo, nhi; |
| 121 | 121 | ||
| 122 | nlo = ((const u8 *)Xi)[15]; | 122 | nlo = ((const uint8_t *)Xi)[15]; |
| 123 | nhi = nlo >> 4; | 123 | nhi = nlo >> 4; |
| 124 | nlo &= 0xf; | 124 | nlo &= 0xf; |
| 125 | 125 | ||
| @@ -130,21 +130,21 @@ gcm_gmult_4bit(u64 Xi[2], const u128 Htable[16]) | |||
| 130 | rem = (size_t)Z.lo & 0xf; | 130 | rem = (size_t)Z.lo & 0xf; |
| 131 | Z.lo = (Z.hi << 60)|(Z.lo >> 4); | 131 | Z.lo = (Z.hi << 60)|(Z.lo >> 4); |
| 132 | Z.hi = (Z.hi >> 4); | 132 | Z.hi = (Z.hi >> 4); |
| 133 | Z.hi ^= (u64)rem_4bit[rem] << 48; | 133 | Z.hi ^= (uint64_t)rem_4bit[rem] << 48; |
| 134 | Z.hi ^= Htable[nhi].hi; | 134 | Z.hi ^= Htable[nhi].hi; |
| 135 | Z.lo ^= Htable[nhi].lo; | 135 | Z.lo ^= Htable[nhi].lo; |
| 136 | 136 | ||
| 137 | if (--cnt < 0) | 137 | if (--cnt < 0) |
| 138 | break; | 138 | break; |
| 139 | 139 | ||
| 140 | nlo = ((const u8 *)Xi)[cnt]; | 140 | nlo = ((const uint8_t *)Xi)[cnt]; |
| 141 | nhi = nlo >> 4; | 141 | nhi = nlo >> 4; |
| 142 | nlo &= 0xf; | 142 | nlo &= 0xf; |
| 143 | 143 | ||
| 144 | rem = (size_t)Z.lo & 0xf; | 144 | rem = (size_t)Z.lo & 0xf; |
| 145 | Z.lo = (Z.hi << 60)|(Z.lo >> 4); | 145 | Z.lo = (Z.hi << 60)|(Z.lo >> 4); |
| 146 | Z.hi = (Z.hi >> 4); | 146 | Z.hi = (Z.hi >> 4); |
| 147 | Z.hi ^= (u64)rem_4bit[rem] << 48; | 147 | Z.hi ^= (uint64_t)rem_4bit[rem] << 48; |
| 148 | Z.hi ^= Htable[nlo].hi; | 148 | Z.hi ^= Htable[nlo].hi; |
| 149 | Z.lo ^= Htable[nlo].lo; | 149 | Z.lo ^= Htable[nlo].lo; |
| 150 | } | 150 | } |
| @@ -161,8 +161,8 @@ gcm_gmult_4bit(u64 Xi[2], const u128 Htable[16]) | |||
| 161 | * non-trivial optimization[s]... | 161 | * non-trivial optimization[s]... |
| 162 | */ | 162 | */ |
| 163 | static void | 163 | static void |
| 164 | gcm_ghash_4bit(u64 Xi[2], const u128 Htable[16], | 164 | gcm_ghash_4bit(uint64_t Xi[2], const u128 Htable[16], |
| 165 | const u8 *inp, size_t len) | 165 | const uint8_t *inp, size_t len) |
| 166 | { | 166 | { |
| 167 | u128 Z; | 167 | u128 Z; |
| 168 | int cnt; | 168 | int cnt; |
| @@ -171,7 +171,7 @@ gcm_ghash_4bit(u64 Xi[2], const u128 Htable[16], | |||
| 171 | #if 1 | 171 | #if 1 |
| 172 | do { | 172 | do { |
| 173 | cnt = 15; | 173 | cnt = 15; |
| 174 | nlo = ((const u8 *)Xi)[15]; | 174 | nlo = ((const uint8_t *)Xi)[15]; |
| 175 | nlo ^= inp[15]; | 175 | nlo ^= inp[15]; |
| 176 | nhi = nlo >> 4; | 176 | nhi = nlo >> 4; |
| 177 | nlo &= 0xf; | 177 | nlo &= 0xf; |
| @@ -183,14 +183,14 @@ gcm_ghash_4bit(u64 Xi[2], const u128 Htable[16], | |||
| 183 | rem = (size_t)Z.lo & 0xf; | 183 | rem = (size_t)Z.lo & 0xf; |
| 184 | Z.lo = (Z.hi << 60)|(Z.lo >> 4); | 184 | Z.lo = (Z.hi << 60)|(Z.lo >> 4); |
| 185 | Z.hi = (Z.hi >> 4); | 185 | Z.hi = (Z.hi >> 4); |
| 186 | Z.hi ^= (u64)rem_4bit[rem] << 48; | 186 | Z.hi ^= (uint64_t)rem_4bit[rem] << 48; |
| 187 | Z.hi ^= Htable[nhi].hi; | 187 | Z.hi ^= Htable[nhi].hi; |
| 188 | Z.lo ^= Htable[nhi].lo; | 188 | Z.lo ^= Htable[nhi].lo; |
| 189 | 189 | ||
| 190 | if (--cnt < 0) | 190 | if (--cnt < 0) |
| 191 | break; | 191 | break; |
| 192 | 192 | ||
| 193 | nlo = ((const u8 *)Xi)[cnt]; | 193 | nlo = ((const uint8_t *)Xi)[cnt]; |
| 194 | nlo ^= inp[cnt]; | 194 | nlo ^= inp[cnt]; |
| 195 | nhi = nlo >> 4; | 195 | nhi = nlo >> 4; |
| 196 | nlo &= 0xf; | 196 | nlo &= 0xf; |
| @@ -198,7 +198,7 @@ gcm_ghash_4bit(u64 Xi[2], const u128 Htable[16], | |||
| 198 | rem = (size_t)Z.lo & 0xf; | 198 | rem = (size_t)Z.lo & 0xf; |
| 199 | Z.lo = (Z.hi << 60)|(Z.lo >> 4); | 199 | Z.lo = (Z.hi << 60)|(Z.lo >> 4); |
| 200 | Z.hi = (Z.hi >> 4); | 200 | Z.hi = (Z.hi >> 4); |
| 201 | Z.hi ^= (u64)rem_4bit[rem] << 48; | 201 | Z.hi ^= (uint64_t)rem_4bit[rem] << 48; |
| 202 | Z.hi ^= Htable[nlo].hi; | 202 | Z.hi ^= Htable[nlo].hi; |
| 203 | Z.lo ^= Htable[nlo].lo; | 203 | Z.lo ^= Htable[nlo].lo; |
| 204 | } | 204 | } |
| @@ -210,7 +210,7 @@ gcm_ghash_4bit(u64 Xi[2], const u128 Htable[16], | |||
| 210 | * cache footprint... | 210 | * cache footprint... |
| 211 | */ | 211 | */ |
| 212 | u128 Hshr4[16]; /* Htable shifted right by 4 bits */ | 212 | u128 Hshr4[16]; /* Htable shifted right by 4 bits */ |
| 213 | u8 Hshl4[16]; /* Htable shifted left by 4 bits */ | 213 | uint8_t Hshl4[16]; /* Htable shifted left by 4 bits */ |
| 214 | static const unsigned short rem_8bit[256] = { | 214 | static const unsigned short rem_8bit[256] = { |
| 215 | 0x0000, 0x01C2, 0x0384, 0x0246, 0x0708, 0x06CA, 0x048C, 0x054E, | 215 | 0x0000, 0x01C2, 0x0384, 0x0246, 0x0708, 0x06CA, 0x048C, 0x054E, |
| 216 | 0x0E10, 0x0FD2, 0x0D94, 0x0C56, 0x0918, 0x08DA, 0x0A9C, 0x0B5E, | 216 | 0x0E10, 0x0FD2, 0x0D94, 0x0C56, 0x0918, 0x08DA, 0x0A9C, 0x0B5E, |
| @@ -255,12 +255,12 @@ gcm_ghash_4bit(u64 Xi[2], const u128 Htable[16], | |||
| 255 | Z.lo = Htable[cnt].lo; | 255 | Z.lo = Htable[cnt].lo; |
| 256 | Hshr4[cnt].lo = (Z.hi << 60)|(Z.lo >> 4); | 256 | Hshr4[cnt].lo = (Z.hi << 60)|(Z.lo >> 4); |
| 257 | Hshr4[cnt].hi = (Z.hi >> 4); | 257 | Hshr4[cnt].hi = (Z.hi >> 4); |
| 258 | Hshl4[cnt] = (u8)(Z.lo << 4); | 258 | Hshl4[cnt] = (uint8_t)(Z.lo << 4); |
| 259 | } | 259 | } |
| 260 | 260 | ||
| 261 | do { | 261 | do { |
| 262 | for (Z.lo = 0, Z.hi = 0, cnt = 15; cnt; --cnt) { | 262 | for (Z.lo = 0, Z.hi = 0, cnt = 15; cnt; --cnt) { |
| 263 | nlo = ((const u8 *)Xi)[cnt]; | 263 | nlo = ((const uint8_t *)Xi)[cnt]; |
| 264 | nlo ^= inp[cnt]; | 264 | nlo ^= inp[cnt]; |
| 265 | nhi = nlo >> 4; | 265 | nhi = nlo >> 4; |
| 266 | nlo &= 0xf; | 266 | nlo &= 0xf; |
| @@ -275,10 +275,10 @@ gcm_ghash_4bit(u64 Xi[2], const u128 Htable[16], | |||
| 275 | 275 | ||
| 276 | Z.hi ^= Hshr4[nhi].hi; | 276 | Z.hi ^= Hshr4[nhi].hi; |
| 277 | Z.lo ^= Hshr4[nhi].lo; | 277 | Z.lo ^= Hshr4[nhi].lo; |
| 278 | Z.hi ^= (u64)rem_8bit[rem ^ Hshl4[nhi]] << 48; | 278 | Z.hi ^= (uint64_t)rem_8bit[rem ^ Hshl4[nhi]] << 48; |
| 279 | } | 279 | } |
| 280 | 280 | ||
| 281 | nlo = ((const u8 *)Xi)[0]; | 281 | nlo = ((const uint8_t *)Xi)[0]; |
| 282 | nlo ^= inp[0]; | 282 | nlo ^= inp[0]; |
| 283 | nhi = nlo >> 4; | 283 | nhi = nlo >> 4; |
| 284 | nlo &= 0xf; | 284 | nlo &= 0xf; |
| @@ -293,7 +293,7 @@ gcm_ghash_4bit(u64 Xi[2], const u128 Htable[16], | |||
| 293 | 293 | ||
| 294 | Z.hi ^= Htable[nhi].hi; | 294 | Z.hi ^= Htable[nhi].hi; |
| 295 | Z.lo ^= Htable[nhi].lo; | 295 | Z.lo ^= Htable[nhi].lo; |
| 296 | Z.hi ^= ((u64)rem_8bit[rem << 4]) << 48; | 296 | Z.hi ^= ((uint64_t)rem_8bit[rem << 4]) << 48; |
| 297 | #endif | 297 | #endif |
| 298 | 298 | ||
| 299 | Xi[0] = htobe64(Z.hi); | 299 | Xi[0] = htobe64(Z.hi); |
| @@ -302,7 +302,7 @@ gcm_ghash_4bit(u64 Xi[2], const u128 Htable[16], | |||
| 302 | } | 302 | } |
| 303 | 303 | ||
| 304 | static inline void | 304 | static inline void |
| 305 | gcm_mul(GCM128_CONTEXT *ctx, u64 u[2]) | 305 | gcm_mul(GCM128_CONTEXT *ctx, uint64_t u[2]) |
| 306 | { | 306 | { |
| 307 | gcm_gmult_4bit(u, ctx->Htable); | 307 | gcm_gmult_4bit(u, ctx->Htable); |
| 308 | } | 308 | } |
| @@ -313,12 +313,12 @@ gcm_ghash(GCM128_CONTEXT *ctx, const uint8_t *in, size_t len) | |||
| 313 | gcm_ghash_4bit(ctx->Xi.u, ctx->Htable, in, len); | 313 | gcm_ghash_4bit(ctx->Xi.u, ctx->Htable, in, len); |
| 314 | } | 314 | } |
| 315 | #else | 315 | #else |
| 316 | void gcm_gmult_4bit(u64 Xi[2], const u128 Htable[16]); | 316 | void gcm_gmult_4bit(uint64_t Xi[2], const u128 Htable[16]); |
| 317 | void gcm_ghash_4bit(u64 Xi[2], const u128 Htable[16], const u8 *inp, | 317 | void gcm_ghash_4bit(uint64_t Xi[2], const u128 Htable[16], const uint8_t *inp, |
| 318 | size_t len); | 318 | size_t len); |
| 319 | 319 | ||
| 320 | static inline void | 320 | static inline void |
| 321 | gcm_mul(GCM128_CONTEXT *ctx, u64 u[2]) | 321 | gcm_mul(GCM128_CONTEXT *ctx, uint64_t u[2]) |
| 322 | { | 322 | { |
| 323 | ctx->gmult(u, ctx->Htable); | 323 | ctx->gmult(u, ctx->Htable); |
| 324 | } | 324 | } |
| @@ -350,27 +350,27 @@ gcm_ghash(GCM128_CONTEXT *ctx, const uint8_t *in, size_t len) | |||
| 350 | defined(_M_IX86) || defined(_M_AMD64) || defined(_M_X64)) | 350 | defined(_M_IX86) || defined(_M_AMD64) || defined(_M_X64)) |
| 351 | # define GHASH_ASM_X86_OR_64 | 351 | # define GHASH_ASM_X86_OR_64 |
| 352 | 352 | ||
| 353 | void gcm_init_clmul(u128 Htable[16], const u64 Xi[2]); | 353 | void gcm_init_clmul(u128 Htable[16], const uint64_t Xi[2]); |
| 354 | void gcm_gmult_clmul(u64 Xi[2], const u128 Htable[16]); | 354 | void gcm_gmult_clmul(uint64_t Xi[2], const u128 Htable[16]); |
| 355 | void gcm_ghash_clmul(u64 Xi[2], const u128 Htable[16], const u8 *inp, | 355 | void gcm_ghash_clmul(uint64_t Xi[2], const u128 Htable[16], const uint8_t *inp, |
| 356 | size_t len); | 356 | size_t len); |
| 357 | 357 | ||
| 358 | # if defined(__i386) || defined(__i386__) || defined(_M_IX86) | 358 | # if defined(__i386) || defined(__i386__) || defined(_M_IX86) |
| 359 | # define GHASH_ASM_X86 | 359 | # define GHASH_ASM_X86 |
| 360 | void gcm_gmult_4bit_mmx(u64 Xi[2], const u128 Htable[16]); | 360 | void gcm_gmult_4bit_mmx(uint64_t Xi[2], const u128 Htable[16]); |
| 361 | void gcm_ghash_4bit_mmx(u64 Xi[2], const u128 Htable[16], const u8 *inp, | 361 | void gcm_ghash_4bit_mmx(uint64_t Xi[2], const u128 Htable[16], const uint8_t *inp, |
| 362 | size_t len); | 362 | size_t len); |
| 363 | 363 | ||
| 364 | void gcm_gmult_4bit_x86(u64 Xi[2], const u128 Htable[16]); | 364 | void gcm_gmult_4bit_x86(uint64_t Xi[2], const u128 Htable[16]); |
| 365 | void gcm_ghash_4bit_x86(u64 Xi[2], const u128 Htable[16], const u8 *inp, | 365 | void gcm_ghash_4bit_x86(uint64_t Xi[2], const u128 Htable[16], const uint8_t *inp, |
| 366 | size_t len); | 366 | size_t len); |
| 367 | # endif | 367 | # endif |
| 368 | # elif defined(__arm__) || defined(__arm) | 368 | # elif defined(__arm__) || defined(__arm) |
| 369 | # include "arm_arch.h" | 369 | # include "arm_arch.h" |
| 370 | # if __ARM_ARCH__>=7 && !defined(__STRICT_ALIGNMENT) | 370 | # if __ARM_ARCH__>=7 && !defined(__STRICT_ALIGNMENT) |
| 371 | # define GHASH_ASM_ARM | 371 | # define GHASH_ASM_ARM |
| 372 | void gcm_gmult_neon(u64 Xi[2], const u128 Htable[16]); | 372 | void gcm_gmult_neon(uint64_t Xi[2], const u128 Htable[16]); |
| 373 | void gcm_ghash_neon(u64 Xi[2], const u128 Htable[16], const u8 *inp, | 373 | void gcm_ghash_neon(uint64_t Xi[2], const u128 Htable[16], const uint8_t *inp, |
| 374 | size_t len); | 374 | size_t len); |
| 375 | # endif | 375 | # endif |
| 376 | # endif | 376 | # endif |
| @@ -452,7 +452,7 @@ CRYPTO_gcm128_setiv(GCM128_CONTEXT *ctx, const unsigned char *iv, size_t len) | |||
| 452 | ctr = 1; | 452 | ctr = 1; |
| 453 | } else { | 453 | } else { |
| 454 | size_t i; | 454 | size_t i; |
| 455 | u64 len0 = len; | 455 | uint64_t len0 = len; |
| 456 | 456 | ||
| 457 | while (len >= 16) { | 457 | while (len >= 16) { |
| 458 | for (i = 0; i < 16; ++i) | 458 | for (i = 0; i < 16; ++i) |
| @@ -485,7 +485,7 @@ CRYPTO_gcm128_aad(GCM128_CONTEXT *ctx, const unsigned char *aad, size_t len) | |||
| 485 | { | 485 | { |
| 486 | size_t i; | 486 | size_t i; |
| 487 | unsigned int n; | 487 | unsigned int n; |
| 488 | u64 alen = ctx->len.u[0]; | 488 | uint64_t alen = ctx->len.u[0]; |
| 489 | 489 | ||
| 490 | if (ctx->len.u[1]) | 490 | if (ctx->len.u[1]) |
| 491 | return -2; | 491 | return -2; |
| @@ -533,7 +533,7 @@ CRYPTO_gcm128_encrypt(GCM128_CONTEXT *ctx, | |||
| 533 | { | 533 | { |
| 534 | unsigned int n, ctr; | 534 | unsigned int n, ctr; |
| 535 | size_t i; | 535 | size_t i; |
| 536 | u64 mlen = ctx->len.u[1]; | 536 | uint64_t mlen = ctx->len.u[1]; |
| 537 | block128_f block = ctx->block; | 537 | block128_f block = ctx->block; |
| 538 | void *key = ctx->key; | 538 | void *key = ctx->key; |
| 539 | 539 | ||
| @@ -670,7 +670,7 @@ CRYPTO_gcm128_decrypt(GCM128_CONTEXT *ctx, | |||
| 670 | { | 670 | { |
| 671 | unsigned int n, ctr; | 671 | unsigned int n, ctr; |
| 672 | size_t i; | 672 | size_t i; |
| 673 | u64 mlen = ctx->len.u[1]; | 673 | uint64_t mlen = ctx->len.u[1]; |
| 674 | block128_f block = ctx->block; | 674 | block128_f block = ctx->block; |
| 675 | void *key = ctx->key; | 675 | void *key = ctx->key; |
| 676 | 676 | ||
| @@ -692,7 +692,7 @@ CRYPTO_gcm128_decrypt(GCM128_CONTEXT *ctx, | |||
| 692 | do { /* always true actually */ | 692 | do { /* always true actually */ |
| 693 | if (n) { | 693 | if (n) { |
| 694 | while (n && len) { | 694 | while (n && len) { |
| 695 | u8 c = *(in++); | 695 | uint8_t c = *(in++); |
| 696 | *(out++) = c ^ ctx->EKi.c[n]; | 696 | *(out++) = c ^ ctx->EKi.c[n]; |
| 697 | ctx->Xi.c[n] ^= c; | 697 | ctx->Xi.c[n] ^= c; |
| 698 | --len; | 698 | --len; |
| @@ -775,7 +775,7 @@ CRYPTO_gcm128_decrypt(GCM128_CONTEXT *ctx, | |||
| 775 | ctx->Yi.d[3] = htobe32(ctr); | 775 | ctx->Yi.d[3] = htobe32(ctr); |
| 776 | 776 | ||
| 777 | while (len--) { | 777 | while (len--) { |
| 778 | u8 c = in[n]; | 778 | uint8_t c = in[n]; |
| 779 | ctx->Xi.c[n] ^= c; | 779 | ctx->Xi.c[n] ^= c; |
| 780 | out[n] = c ^ ctx->EKi.c[n]; | 780 | out[n] = c ^ ctx->EKi.c[n]; |
| 781 | ++n; | 781 | ++n; |
| @@ -786,7 +786,7 @@ CRYPTO_gcm128_decrypt(GCM128_CONTEXT *ctx, | |||
| 786 | return 0; | 786 | return 0; |
| 787 | } while (0); | 787 | } while (0); |
| 788 | for (i = 0; i < len; ++i) { | 788 | for (i = 0; i < len; ++i) { |
| 789 | u8 c; | 789 | uint8_t c; |
| 790 | if (n == 0) { | 790 | if (n == 0) { |
| 791 | (*block)(ctx->Yi.c, ctx->EKi.c, key); | 791 | (*block)(ctx->Yi.c, ctx->EKi.c, key); |
| 792 | ++ctr; | 792 | ++ctr; |
| @@ -812,7 +812,7 @@ CRYPTO_gcm128_encrypt_ctr32(GCM128_CONTEXT *ctx, | |||
| 812 | { | 812 | { |
| 813 | unsigned int n, ctr; | 813 | unsigned int n, ctr; |
| 814 | size_t i; | 814 | size_t i; |
| 815 | u64 mlen = ctx->len.u[1]; | 815 | uint64_t mlen = ctx->len.u[1]; |
| 816 | void *key = ctx->key; | 816 | void *key = ctx->key; |
| 817 | 817 | ||
| 818 | mlen += len; | 818 | mlen += len; |
| @@ -886,7 +886,7 @@ CRYPTO_gcm128_decrypt_ctr32(GCM128_CONTEXT *ctx, | |||
| 886 | { | 886 | { |
| 887 | unsigned int n, ctr; | 887 | unsigned int n, ctr; |
| 888 | size_t i; | 888 | size_t i; |
| 889 | u64 mlen = ctx->len.u[1]; | 889 | uint64_t mlen = ctx->len.u[1]; |
| 890 | void *key = ctx->key; | 890 | void *key = ctx->key; |
| 891 | 891 | ||
| 892 | mlen += len; | 892 | mlen += len; |
| @@ -905,7 +905,7 @@ CRYPTO_gcm128_decrypt_ctr32(GCM128_CONTEXT *ctx, | |||
| 905 | n = ctx->mres; | 905 | n = ctx->mres; |
| 906 | if (n) { | 906 | if (n) { |
| 907 | while (n && len) { | 907 | while (n && len) { |
| 908 | u8 c = *(in++); | 908 | uint8_t c = *(in++); |
| 909 | *(out++) = c ^ ctx->EKi.c[n]; | 909 | *(out++) = c ^ ctx->EKi.c[n]; |
| 910 | ctx->Xi.c[n] ^= c; | 910 | ctx->Xi.c[n] ^= c; |
| 911 | --len; | 911 | --len; |
| @@ -945,7 +945,7 @@ CRYPTO_gcm128_decrypt_ctr32(GCM128_CONTEXT *ctx, | |||
| 945 | ++ctr; | 945 | ++ctr; |
| 946 | ctx->Yi.d[3] = htobe32(ctr); | 946 | ctx->Yi.d[3] = htobe32(ctr); |
| 947 | while (len--) { | 947 | while (len--) { |
| 948 | u8 c = in[n]; | 948 | uint8_t c = in[n]; |
| 949 | ctx->Xi.c[n] ^= c; | 949 | ctx->Xi.c[n] ^= c; |
| 950 | out[n] = c ^ ctx->EKi.c[n]; | 950 | out[n] = c ^ ctx->EKi.c[n]; |
| 951 | ++n; | 951 | ++n; |
| @@ -961,8 +961,8 @@ int | |||
| 961 | CRYPTO_gcm128_finish(GCM128_CONTEXT *ctx, const unsigned char *tag, | 961 | CRYPTO_gcm128_finish(GCM128_CONTEXT *ctx, const unsigned char *tag, |
| 962 | size_t len) | 962 | size_t len) |
| 963 | { | 963 | { |
| 964 | u64 alen = ctx->len.u[0] << 3; | 964 | uint64_t alen = ctx->len.u[0] << 3; |
| 965 | u64 clen = ctx->len.u[1] << 3; | 965 | uint64_t clen = ctx->len.u[1] << 3; |
| 966 | 966 | ||
| 967 | if (ctx->mres || ctx->ares) | 967 | if (ctx->mres || ctx->ares) |
| 968 | gcm_mul(ctx, ctx->Xi.u); | 968 | gcm_mul(ctx, ctx->Xi.u); |
diff --git a/src/lib/libcrypto/modes/modes_local.h b/src/lib/libcrypto/modes/modes_local.h index 81994876e3..d833d40ee3 100644 --- a/src/lib/libcrypto/modes/modes_local.h +++ b/src/lib/libcrypto/modes/modes_local.h | |||
| @@ -1,4 +1,4 @@ | |||
| 1 | /* $OpenBSD: modes_local.h,v 1.5 2025/05/17 14:43:17 jsing Exp $ */ | 1 | /* $OpenBSD: modes_local.h,v 1.6 2025/05/18 09:05:59 jsing Exp $ */ |
| 2 | /* ==================================================================== | 2 | /* ==================================================================== |
| 3 | * Copyright (c) 2010 The OpenSSL Project. All rights reserved. | 3 | * Copyright (c) 2010 The OpenSSL Project. All rights reserved. |
| 4 | * | 4 | * |
| @@ -15,37 +15,30 @@ | |||
| 15 | __BEGIN_HIDDEN_DECLS | 15 | __BEGIN_HIDDEN_DECLS |
| 16 | 16 | ||
| 17 | #if defined(_LP64) | 17 | #if defined(_LP64) |
| 18 | typedef long i64; | ||
| 19 | typedef unsigned long u64; | ||
| 20 | #define U64(C) C##UL | 18 | #define U64(C) C##UL |
| 21 | #else | 19 | #else |
| 22 | typedef long long i64; | ||
| 23 | typedef unsigned long long u64; | ||
| 24 | #define U64(C) C##ULL | 20 | #define U64(C) C##ULL |
| 25 | #endif | 21 | #endif |
| 26 | 22 | ||
| 27 | typedef unsigned int u32; | ||
| 28 | typedef unsigned char u8; | ||
| 29 | |||
| 30 | /* GCM definitions */ | 23 | /* GCM definitions */ |
| 31 | 24 | ||
| 32 | typedef struct { | 25 | typedef struct { |
| 33 | u64 hi, lo; | 26 | uint64_t hi, lo; |
| 34 | } u128; | 27 | } u128; |
| 35 | 28 | ||
| 36 | struct gcm128_context { | 29 | struct gcm128_context { |
| 37 | /* Following 6 names follow names in GCM specification */ | 30 | /* Following 6 names follow names in GCM specification */ |
| 38 | union { | 31 | union { |
| 39 | u64 u[2]; | 32 | uint64_t u[2]; |
| 40 | u32 d[4]; | 33 | uint32_t d[4]; |
| 41 | u8 c[16]; | 34 | uint8_t c[16]; |
| 42 | size_t t[16/sizeof(size_t)]; | 35 | size_t t[16/sizeof(size_t)]; |
| 43 | } Yi, EKi, EK0, len, Xi, H; | 36 | } Yi, EKi, EK0, len, Xi, H; |
| 44 | /* Relative position of Xi, H and pre-computed Htable is used | 37 | /* Relative position of Xi, H and pre-computed Htable is used |
| 45 | * in some assembler modules, i.e. don't change the order! */ | 38 | * in some assembler modules, i.e. don't change the order! */ |
| 46 | u128 Htable[16]; | 39 | u128 Htable[16]; |
| 47 | void (*gmult)(u64 Xi[2], const u128 Htable[16]); | 40 | void (*gmult)(uint64_t Xi[2], const u128 Htable[16]); |
| 48 | void (*ghash)(u64 Xi[2], const u128 Htable[16], const u8 *inp, | 41 | void (*ghash)(uint64_t Xi[2], const u128 Htable[16], const uint8_t *inp, |
| 49 | size_t len); | 42 | size_t len); |
| 50 | unsigned int mres, ares; | 43 | unsigned int mres, ares; |
| 51 | block128_f block; | 44 | block128_f block; |
| @@ -59,10 +52,10 @@ struct xts128_context { | |||
| 59 | 52 | ||
| 60 | struct ccm128_context { | 53 | struct ccm128_context { |
| 61 | union { | 54 | union { |
| 62 | u64 u[2]; | 55 | uint64_t u[2]; |
| 63 | u8 c[16]; | 56 | uint8_t c[16]; |
| 64 | } nonce, cmac; | 57 | } nonce, cmac; |
| 65 | u64 blocks; | 58 | uint64_t blocks; |
| 66 | block128_f block; | 59 | block128_f block; |
| 67 | void *key; | 60 | void *key; |
| 68 | }; | 61 | }; |
diff --git a/src/lib/libcrypto/modes/xts128.c b/src/lib/libcrypto/modes/xts128.c index 789af9ef65..9c863e73d6 100644 --- a/src/lib/libcrypto/modes/xts128.c +++ b/src/lib/libcrypto/modes/xts128.c | |||
| @@ -1,4 +1,4 @@ | |||
| 1 | /* $OpenBSD: xts128.c,v 1.14 2025/04/21 16:01:18 jsing Exp $ */ | 1 | /* $OpenBSD: xts128.c,v 1.15 2025/05/18 09:05:59 jsing Exp $ */ |
| 2 | /* ==================================================================== | 2 | /* ==================================================================== |
| 3 | * Copyright (c) 2011 The OpenSSL Project. All rights reserved. | 3 | * Copyright (c) 2011 The OpenSSL Project. All rights reserved. |
| 4 | * | 4 | * |
| @@ -61,9 +61,9 @@ CRYPTO_xts128_encrypt(const XTS128_CONTEXT *ctx, const unsigned char iv[16], | |||
| 61 | size_t len, int enc) | 61 | size_t len, int enc) |
| 62 | { | 62 | { |
| 63 | union { | 63 | union { |
| 64 | u64 u[2]; | 64 | uint64_t u[2]; |
| 65 | u32 d[4]; | 65 | uint32_t d[4]; |
| 66 | u8 c[16]; | 66 | uint8_t c[16]; |
| 67 | } tweak, scratch; | 67 | } tweak, scratch; |
| 68 | unsigned int i; | 68 | unsigned int i; |
| 69 | 69 | ||
| @@ -83,8 +83,8 @@ CRYPTO_xts128_encrypt(const XTS128_CONTEXT *ctx, const unsigned char iv[16], | |||
| 83 | scratch.u[0] ^= tweak.u[0]; | 83 | scratch.u[0] ^= tweak.u[0]; |
| 84 | scratch.u[1] ^= tweak.u[1]; | 84 | scratch.u[1] ^= tweak.u[1]; |
| 85 | #else | 85 | #else |
| 86 | scratch.u[0] = ((u64 *)inp)[0] ^ tweak.u[0]; | 86 | scratch.u[0] = ((uint64_t *)inp)[0] ^ tweak.u[0]; |
| 87 | scratch.u[1] = ((u64 *)inp)[1] ^ tweak.u[1]; | 87 | scratch.u[1] = ((uint64_t *)inp)[1] ^ tweak.u[1]; |
| 88 | #endif | 88 | #endif |
| 89 | (*ctx->block1)(scratch.c, scratch.c, ctx->key1); | 89 | (*ctx->block1)(scratch.c, scratch.c, ctx->key1); |
| 90 | #ifdef __STRICT_ALIGNMENT | 90 | #ifdef __STRICT_ALIGNMENT |
| @@ -92,8 +92,8 @@ CRYPTO_xts128_encrypt(const XTS128_CONTEXT *ctx, const unsigned char iv[16], | |||
| 92 | scratch.u[1] ^= tweak.u[1]; | 92 | scratch.u[1] ^= tweak.u[1]; |
| 93 | memcpy(out, scratch.c, 16); | 93 | memcpy(out, scratch.c, 16); |
| 94 | #else | 94 | #else |
| 95 | ((u64 *)out)[0] = scratch.u[0] ^= tweak.u[0]; | 95 | ((uint64_t *)out)[0] = scratch.u[0] ^= tweak.u[0]; |
| 96 | ((u64 *)out)[1] = scratch.u[1] ^= tweak.u[1]; | 96 | ((uint64_t *)out)[1] = scratch.u[1] ^= tweak.u[1]; |
| 97 | #endif | 97 | #endif |
| 98 | inp += 16; | 98 | inp += 16; |
| 99 | out += 16; | 99 | out += 16; |
| @@ -115,15 +115,15 @@ CRYPTO_xts128_encrypt(const XTS128_CONTEXT *ctx, const unsigned char iv[16], | |||
| 115 | for (c = 0, i = 0; i < 16; ++i) { | 115 | for (c = 0, i = 0; i < 16; ++i) { |
| 116 | /*+ substitutes for |, because c is 1 bit */ | 116 | /*+ substitutes for |, because c is 1 bit */ |
| 117 | c += ((size_t)tweak.c[i]) << 1; | 117 | c += ((size_t)tweak.c[i]) << 1; |
| 118 | tweak.c[i] = (u8)c; | 118 | tweak.c[i] = (uint8_t)c; |
| 119 | c = c >> 8; | 119 | c = c >> 8; |
| 120 | } | 120 | } |
| 121 | tweak.c[0] ^= (u8)(0x87 & (0 - c)); | 121 | tweak.c[0] ^= (uint8_t)(0x87 & (0 - c)); |
| 122 | #endif | 122 | #endif |
| 123 | } | 123 | } |
| 124 | if (enc) { | 124 | if (enc) { |
| 125 | for (i = 0; i < len; ++i) { | 125 | for (i = 0; i < len; ++i) { |
| 126 | u8 ch = inp[i]; | 126 | uint8_t ch = inp[i]; |
| 127 | out[i] = scratch.c[i]; | 127 | out[i] = scratch.c[i]; |
| 128 | scratch.c[i] = ch; | 128 | scratch.c[i] = ch; |
| 129 | } | 129 | } |
| @@ -135,8 +135,8 @@ CRYPTO_xts128_encrypt(const XTS128_CONTEXT *ctx, const unsigned char iv[16], | |||
| 135 | memcpy(out - 16, scratch.c, 16); | 135 | memcpy(out - 16, scratch.c, 16); |
| 136 | } else { | 136 | } else { |
| 137 | union { | 137 | union { |
| 138 | u64 u[2]; | 138 | uint64_t u[2]; |
| 139 | u8 c[16]; | 139 | uint8_t c[16]; |
| 140 | } tweak1; | 140 | } tweak1; |
| 141 | 141 | ||
| 142 | #if BYTE_ORDER == LITTLE_ENDIAN | 142 | #if BYTE_ORDER == LITTLE_ENDIAN |
| @@ -152,25 +152,25 @@ CRYPTO_xts128_encrypt(const XTS128_CONTEXT *ctx, const unsigned char iv[16], | |||
| 152 | for (c = 0, i = 0; i < 16; ++i) { | 152 | for (c = 0, i = 0; i < 16; ++i) { |
| 153 | /*+ substitutes for |, because c is 1 bit */ | 153 | /*+ substitutes for |, because c is 1 bit */ |
| 154 | c += ((size_t)tweak.c[i]) << 1; | 154 | c += ((size_t)tweak.c[i]) << 1; |
| 155 | tweak1.c[i] = (u8)c; | 155 | tweak1.c[i] = (uint8_t)c; |
| 156 | c = c >> 8; | 156 | c = c >> 8; |
| 157 | } | 157 | } |
| 158 | tweak1.c[0] ^= (u8)(0x87 & (0 - c)); | 158 | tweak1.c[0] ^= (uint8_t)(0x87 & (0 - c)); |
| 159 | #endif | 159 | #endif |
| 160 | #ifdef __STRICT_ALIGNMENT | 160 | #ifdef __STRICT_ALIGNMENT |
| 161 | memcpy(scratch.c, inp, 16); | 161 | memcpy(scratch.c, inp, 16); |
| 162 | scratch.u[0] ^= tweak1.u[0]; | 162 | scratch.u[0] ^= tweak1.u[0]; |
| 163 | scratch.u[1] ^= tweak1.u[1]; | 163 | scratch.u[1] ^= tweak1.u[1]; |
| 164 | #else | 164 | #else |
| 165 | scratch.u[0] = ((u64 *)inp)[0] ^ tweak1.u[0]; | 165 | scratch.u[0] = ((uint64_t *)inp)[0] ^ tweak1.u[0]; |
| 166 | scratch.u[1] = ((u64 *)inp)[1] ^ tweak1.u[1]; | 166 | scratch.u[1] = ((uint64_t *)inp)[1] ^ tweak1.u[1]; |
| 167 | #endif | 167 | #endif |
| 168 | (*ctx->block1)(scratch.c, scratch.c, ctx->key1); | 168 | (*ctx->block1)(scratch.c, scratch.c, ctx->key1); |
| 169 | scratch.u[0] ^= tweak1.u[0]; | 169 | scratch.u[0] ^= tweak1.u[0]; |
| 170 | scratch.u[1] ^= tweak1.u[1]; | 170 | scratch.u[1] ^= tweak1.u[1]; |
| 171 | 171 | ||
| 172 | for (i = 0; i < len; ++i) { | 172 | for (i = 0; i < len; ++i) { |
| 173 | u8 ch = inp[16 + i]; | 173 | uint8_t ch = inp[16 + i]; |
| 174 | out[16 + i] = scratch.c[i]; | 174 | out[16 + i] = scratch.c[i]; |
| 175 | scratch.c[i] = ch; | 175 | scratch.c[i] = ch; |
| 176 | } | 176 | } |
| @@ -182,8 +182,8 @@ CRYPTO_xts128_encrypt(const XTS128_CONTEXT *ctx, const unsigned char iv[16], | |||
| 182 | scratch.u[1] ^= tweak.u[1]; | 182 | scratch.u[1] ^= tweak.u[1]; |
| 183 | memcpy(out, scratch.c, 16); | 183 | memcpy(out, scratch.c, 16); |
| 184 | #else | 184 | #else |
| 185 | ((u64 *)out)[0] = scratch.u[0] ^ tweak.u[0]; | 185 | ((uint64_t *)out)[0] = scratch.u[0] ^ tweak.u[0]; |
| 186 | ((u64 *)out)[1] = scratch.u[1] ^ tweak.u[1]; | 186 | ((uint64_t *)out)[1] = scratch.u[1] ^ tweak.u[1]; |
| 187 | #endif | 187 | #endif |
| 188 | } | 188 | } |
| 189 | 189 | ||
