summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorbeck <>2023-07-08 14:55:36 +0000
committerbeck <>2023-07-08 14:55:36 +0000
commit3c27356c4047c5869d9365e12ad90ccbdbb882ab (patch)
tree8b52e2acda02335df7e6b79862b2a92ae5046a0f
parentcfe8c9ef5ce212d7cb6e1b00c34a8835dd08c925 (diff)
downloadopenbsd-3c27356c4047c5869d9365e12ad90ccbdbb882ab.tar.gz
openbsd-3c27356c4047c5869d9365e12ad90ccbdbb882ab.tar.bz2
openbsd-3c27356c4047c5869d9365e12ad90ccbdbb882ab.zip
Hit modes with the loving mallet of knfmt
ok tb@
-rw-r--r--src/lib/libcrypto/modes/cbc128.c104
-rw-r--r--src/lib/libcrypto/modes/ccm128.c439
-rw-r--r--src/lib/libcrypto/modes/cfb128.c268
-rw-r--r--src/lib/libcrypto/modes/ctr128.c148
-rw-r--r--src/lib/libcrypto/modes/gcm128.c1187
-rw-r--r--src/lib/libcrypto/modes/modes.h108
-rw-r--r--src/lib/libcrypto/modes/modes_local.h56
-rw-r--r--src/lib/libcrypto/modes/ofb128.c80
-rw-r--r--src/lib/libcrypto/modes/xts128.c116
9 files changed, 1342 insertions, 1164 deletions
diff --git a/src/lib/libcrypto/modes/cbc128.c b/src/lib/libcrypto/modes/cbc128.c
index f2eebc6e7a..27a2241ad4 100644
--- a/src/lib/libcrypto/modes/cbc128.c
+++ b/src/lib/libcrypto/modes/cbc128.c
@@ -1,4 +1,4 @@
1/* $OpenBSD: cbc128.c,v 1.6 2022/11/26 16:08:53 tb Exp $ */ 1/* $OpenBSD: cbc128.c,v 1.7 2023/07/08 14:55:36 beck Exp $ */
2/* ==================================================================== 2/* ====================================================================
3 * Copyright (c) 2008 The OpenSSL Project. All rights reserved. 3 * Copyright (c) 2008 The OpenSSL Project. All rights reserved.
4 * 4 *
@@ -7,7 +7,7 @@
7 * are met: 7 * are met:
8 * 8 *
9 * 1. Redistributions of source code must retain the above copyright 9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer. 10 * notice, this list of conditions and the following disclaimer.
11 * 11 *
12 * 2. Redistributions in binary form must reproduce the above copyright 12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in 13 * notice, this list of conditions and the following disclaimer in
@@ -66,117 +66,127 @@
66#define STRICT_ALIGNMENT 0 66#define STRICT_ALIGNMENT 0
67#endif 67#endif
68 68
69void CRYPTO_cbc128_encrypt(const unsigned char *in, unsigned char *out, 69void
70 size_t len, const void *key, 70CRYPTO_cbc128_encrypt(const unsigned char *in, unsigned char *out,
71 unsigned char ivec[16], block128_f block) 71 size_t len, const void *key,
72 unsigned char ivec[16], block128_f block)
72{ 73{
73 size_t n; 74 size_t n;
74 const unsigned char *iv = ivec; 75 const unsigned char *iv = ivec;
75 76
76#if !defined(OPENSSL_SMALL_FOOTPRINT) 77#if !defined(OPENSSL_SMALL_FOOTPRINT)
77 if (STRICT_ALIGNMENT && 78 if (STRICT_ALIGNMENT &&
78 ((size_t)in|(size_t)out|(size_t)ivec)%sizeof(size_t) != 0) { 79 ((size_t)in|(size_t)out|(size_t)ivec) % sizeof(size_t) != 0) {
79 while (len>=16) { 80 while (len >= 16) {
80 for(n=0; n<16; ++n) 81 for (n = 0; n < 16; ++n)
81 out[n] = in[n] ^ iv[n]; 82 out[n] = in[n] ^ iv[n];
82 (*block)(out, out, key); 83 (*block)(out, out, key);
83 iv = out; 84 iv = out;
84 len -= 16; 85 len -= 16;
85 in += 16; 86 in += 16;
86 out += 16; 87 out += 16;
87 } 88 }
88 } else { 89 } else {
89 while (len>=16) { 90 while (len >= 16) {
90 for(n=0; n<16; n+=sizeof(size_t)) 91 for (n = 0; n < 16; n += sizeof(size_t))
91 *(size_t*)(out+n) = 92 *(size_t *)(out + n) =
92 *(size_t*)(in+n) ^ *(size_t*)(iv+n); 93 *(size_t *)(in + n) ^ *(size_t *)(iv + n);
93 (*block)(out, out, key); 94 (*block)(out, out, key);
94 iv = out; 95 iv = out;
95 len -= 16; 96 len -= 16;
96 in += 16; 97 in += 16;
97 out += 16; 98 out += 16;
98 } 99 }
99 } 100 }
100#endif 101#endif
101 while (len) { 102 while (len) {
102 for(n=0; n<16 && n<len; ++n) 103 for (n = 0; n < 16 && n < len; ++n)
103 out[n] = in[n] ^ iv[n]; 104 out[n] = in[n] ^ iv[n];
104 for(; n<16; ++n) 105 for (; n < 16; ++n)
105 out[n] = iv[n]; 106 out[n] = iv[n];
106 (*block)(out, out, key); 107 (*block)(out, out, key);
107 iv = out; 108 iv = out;
108 if (len<=16) break; 109 if (len <= 16)
110 break;
109 len -= 16; 111 len -= 16;
110 in += 16; 112 in += 16;
111 out += 16; 113 out += 16;
112 } 114 }
113 memmove(ivec,iv,16); 115 memmove(ivec, iv, 16);
114} 116}
115 117
116void CRYPTO_cbc128_decrypt(const unsigned char *in, unsigned char *out, 118void
117 size_t len, const void *key, 119CRYPTO_cbc128_decrypt(const unsigned char *in, unsigned char *out,
118 unsigned char ivec[16], block128_f block) 120 size_t len, const void *key,
121 unsigned char ivec[16], block128_f block)
119{ 122{
120 size_t n; 123 size_t n;
121 union { size_t t[16/sizeof(size_t)]; unsigned char c[16]; } tmp; 124 union {
125 size_t t[16/sizeof(size_t)];
126 unsigned char c[16];
127 } tmp;
122 128
123#if !defined(OPENSSL_SMALL_FOOTPRINT) 129#if !defined(OPENSSL_SMALL_FOOTPRINT)
124 if (in != out) { 130 if (in != out) {
125 const unsigned char *iv = ivec; 131 const unsigned char *iv = ivec;
126 132
127 if (STRICT_ALIGNMENT && 133 if (STRICT_ALIGNMENT &&
128 ((size_t)in|(size_t)out|(size_t)ivec)%sizeof(size_t) != 0) { 134 ((size_t)in|(size_t)out|(size_t)ivec) % sizeof(size_t) !=
129 while (len>=16) { 135 0) {
136 while (len >= 16) {
130 (*block)(in, out, key); 137 (*block)(in, out, key);
131 for(n=0; n<16; ++n) 138 for (n = 0; n < 16; ++n)
132 out[n] ^= iv[n]; 139 out[n] ^= iv[n];
133 iv = in; 140 iv = in;
134 len -= 16; 141 len -= 16;
135 in += 16; 142 in += 16;
136 out += 16; 143 out += 16;
137 } 144 }
138 } else if (16%sizeof(size_t) == 0) { /* always true */ 145 } else if (16 % sizeof(size_t) == 0) { /* always true */
139 while (len>=16) { 146 while (len >= 16) {
140 size_t *out_t=(size_t *)out, *iv_t=(size_t *)iv; 147 size_t *out_t = (size_t *)out,
148 *iv_t = (size_t *)iv;
141 149
142 (*block)(in, out, key); 150 (*block)(in, out, key);
143 for(n=0; n<16/sizeof(size_t); n++) 151 for (n = 0; n < 16/sizeof(size_t); n++)
144 out_t[n] ^= iv_t[n]; 152 out_t[n] ^= iv_t[n];
145 iv = in; 153 iv = in;
146 len -= 16; 154 len -= 16;
147 in += 16; 155 in += 16;
148 out += 16; 156 out += 16;
149 } 157 }
150 } 158 }
151 memmove(ivec,iv,16); 159 memmove(ivec, iv, 16);
152 } else { 160 } else {
153 if (STRICT_ALIGNMENT && 161 if (STRICT_ALIGNMENT &&
154 ((size_t)in|(size_t)out|(size_t)ivec)%sizeof(size_t) != 0) { 162 ((size_t)in|(size_t)out|(size_t)ivec) % sizeof(size_t) !=
163 0) {
155 unsigned char c; 164 unsigned char c;
156 while (len>=16) { 165 while (len >= 16) {
157 (*block)(in, tmp.c, key); 166 (*block)(in, tmp.c, key);
158 for(n=0; n<16; ++n) { 167 for (n = 0; n < 16; ++n) {
159 c = in[n]; 168 c = in[n];
160 out[n] = tmp.c[n] ^ ivec[n]; 169 out[n] = tmp.c[n] ^ ivec[n];
161 ivec[n] = c; 170 ivec[n] = c;
162 } 171 }
163 len -= 16; 172 len -= 16;
164 in += 16; 173 in += 16;
165 out += 16; 174 out += 16;
166 } 175 }
167 } else if (16%sizeof(size_t) == 0) { /* always true */ 176 } else if (16 % sizeof(size_t) == 0) { /* always true */
168 while (len>=16) { 177 while (len >= 16) {
169 size_t c, *out_t=(size_t *)out, *ivec_t=(size_t *)ivec; 178 size_t c, *out_t = (size_t *)out,
170 const size_t *in_t=(const size_t *)in; 179 *ivec_t = (size_t *)ivec;
180 const size_t *in_t = (const size_t *)in;
171 181
172 (*block)(in, tmp.c, key); 182 (*block)(in, tmp.c, key);
173 for(n=0; n<16/sizeof(size_t); n++) { 183 for (n = 0; n < 16/sizeof(size_t); n++) {
174 c = in_t[n]; 184 c = in_t[n];
175 out_t[n] = tmp.t[n] ^ ivec_t[n]; 185 out_t[n] = tmp.t[n] ^ ivec_t[n];
176 ivec_t[n] = c; 186 ivec_t[n] = c;
177 } 187 }
178 len -= 16; 188 len -= 16;
179 in += 16; 189 in += 16;
180 out += 16; 190 out += 16;
181 } 191 }
182 } 192 }
@@ -185,18 +195,18 @@ void CRYPTO_cbc128_decrypt(const unsigned char *in, unsigned char *out,
185 while (len) { 195 while (len) {
186 unsigned char c; 196 unsigned char c;
187 (*block)(in, tmp.c, key); 197 (*block)(in, tmp.c, key);
188 for(n=0; n<16 && n<len; ++n) { 198 for (n = 0; n < 16 && n < len; ++n) {
189 c = in[n]; 199 c = in[n];
190 out[n] = tmp.c[n] ^ ivec[n]; 200 out[n] = tmp.c[n] ^ ivec[n];
191 ivec[n] = c; 201 ivec[n] = c;
192 } 202 }
193 if (len<=16) { 203 if (len <= 16) {
194 for (; n<16; ++n) 204 for (; n < 16; ++n)
195 ivec[n] = in[n]; 205 ivec[n] = in[n];
196 break; 206 break;
197 } 207 }
198 len -= 16; 208 len -= 16;
199 in += 16; 209 in += 16;
200 out += 16; 210 out += 16;
201 } 211 }
202} 212}
diff --git a/src/lib/libcrypto/modes/ccm128.c b/src/lib/libcrypto/modes/ccm128.c
index 978259e1ba..d1471ee2dd 100644
--- a/src/lib/libcrypto/modes/ccm128.c
+++ b/src/lib/libcrypto/modes/ccm128.c
@@ -1,4 +1,4 @@
1/* $OpenBSD: ccm128.c,v 1.6 2022/11/26 16:08:53 tb Exp $ */ 1/* $OpenBSD: ccm128.c,v 1.7 2023/07/08 14:55:36 beck Exp $ */
2/* ==================================================================== 2/* ====================================================================
3 * Copyright (c) 2011 The OpenSSL Project. All rights reserved. 3 * Copyright (c) 2011 The OpenSSL Project. All rights reserved.
4 * 4 *
@@ -7,7 +7,7 @@
7 * are met: 7 * are met:
8 * 8 *
9 * 1. Redistributions of source code must retain the above copyright 9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer. 10 * notice, this list of conditions and the following disclaimer.
11 * 11 *
12 * 2. Redistributions in binary form must reproduce the above copyright 12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in 13 * notice, this list of conditions and the following disclaimer in
@@ -60,11 +60,12 @@
60 60
61/* First you setup M and L parameters and pass the key schedule. 61/* First you setup M and L parameters and pass the key schedule.
62 * This is called once per session setup... */ 62 * This is called once per session setup... */
63void CRYPTO_ccm128_init(CCM128_CONTEXT *ctx, 63void
64 unsigned int M,unsigned int L,void *key,block128_f block) 64CRYPTO_ccm128_init(CCM128_CONTEXT *ctx,
65 unsigned int M, unsigned int L, void *key, block128_f block)
65{ 66{
66 memset(ctx->nonce.c,0,sizeof(ctx->nonce.c)); 67 memset(ctx->nonce.c, 0, sizeof(ctx->nonce.c));
67 ctx->nonce.c[0] = ((u8)(L-1)&7) | (u8)(((M-2)/2)&7)<<3; 68 ctx->nonce.c[0] = ((u8)(L - 1) & 7) | (u8)(((M - 2)/2) & 7) << 3;
68 ctx->blocks = 0; 69 ctx->blocks = 0;
69 ctx->block = block; 70 ctx->block = block;
70 ctx->key = key; 71 ctx->key = key;
@@ -73,79 +74,82 @@ void CRYPTO_ccm128_init(CCM128_CONTEXT *ctx,
73/* !!! Following interfaces are to be called *once* per packet !!! */ 74/* !!! Following interfaces are to be called *once* per packet !!! */
74 75
75/* Then you setup per-message nonce and pass the length of the message */ 76/* Then you setup per-message nonce and pass the length of the message */
76int CRYPTO_ccm128_setiv(CCM128_CONTEXT *ctx, 77int
77 const unsigned char *nonce,size_t nlen,size_t mlen) 78CRYPTO_ccm128_setiv(CCM128_CONTEXT *ctx,
79 const unsigned char *nonce, size_t nlen, size_t mlen)
78{ 80{
79 unsigned int L = ctx->nonce.c[0]&7; /* the L parameter */ 81 unsigned int L = ctx->nonce.c[0] & 7; /* the L parameter */
80 82
81 if (nlen<(14-L)) return -1; /* nonce is too short */ 83 if (nlen < (14 - L))
84 return -1; /* nonce is too short */
82 85
83 if (sizeof(mlen)==8 && L>=3) { 86 if (sizeof(mlen) == 8 && L >= 3) {
84 ctx->nonce.c[8] = (u8)(mlen>>(56%(sizeof(mlen)*8))); 87 ctx->nonce.c[8] = (u8)(mlen >> (56 % (sizeof(mlen)*8)));
85 ctx->nonce.c[9] = (u8)(mlen>>(48%(sizeof(mlen)*8))); 88 ctx->nonce.c[9] = (u8)(mlen >> (48 % (sizeof(mlen)*8)));
86 ctx->nonce.c[10] = (u8)(mlen>>(40%(sizeof(mlen)*8))); 89 ctx->nonce.c[10] = (u8)(mlen >> (40 % (sizeof(mlen)*8)));
87 ctx->nonce.c[11] = (u8)(mlen>>(32%(sizeof(mlen)*8))); 90 ctx->nonce.c[11] = (u8)(mlen >> (32 % (sizeof(mlen)*8)));
88 } 91 } else
89 else
90 ctx->nonce.u[1] = 0; 92 ctx->nonce.u[1] = 0;
91 93
92 ctx->nonce.c[12] = (u8)(mlen>>24); 94 ctx->nonce.c[12] = (u8)(mlen >> 24);
93 ctx->nonce.c[13] = (u8)(mlen>>16); 95 ctx->nonce.c[13] = (u8)(mlen >> 16);
94 ctx->nonce.c[14] = (u8)(mlen>>8); 96 ctx->nonce.c[14] = (u8)(mlen >> 8);
95 ctx->nonce.c[15] = (u8)mlen; 97 ctx->nonce.c[15] = (u8)mlen;
96 98
97 ctx->nonce.c[0] &= ~0x40; /* clear Adata flag */ 99 ctx->nonce.c[0] &= ~0x40; /* clear Adata flag */
98 memcpy(&ctx->nonce.c[1],nonce,14-L); 100 memcpy(&ctx->nonce.c[1], nonce, 14 - L);
99 101
100 return 0; 102 return 0;
101} 103}
102 104
103/* Then you pass additional authentication data, this is optional */ 105/* Then you pass additional authentication data, this is optional */
104void CRYPTO_ccm128_aad(CCM128_CONTEXT *ctx, 106void
105 const unsigned char *aad,size_t alen) 107CRYPTO_ccm128_aad(CCM128_CONTEXT *ctx,
106{ unsigned int i; 108 const unsigned char *aad, size_t alen)
109{
110 unsigned int i;
107 block128_f block = ctx->block; 111 block128_f block = ctx->block;
108 112
109 if (alen==0) return; 113 if (alen == 0)
114 return;
110 115
111 ctx->nonce.c[0] |= 0x40; /* set Adata flag */ 116 ctx->nonce.c[0] |= 0x40; /* set Adata flag */
112 (*block)(ctx->nonce.c,ctx->cmac.c,ctx->key), 117 (*block)(ctx->nonce.c, ctx->cmac.c, ctx->key),
113 ctx->blocks++; 118 ctx->blocks++;
114 119
115 if (alen<(0x10000-0x100)) { 120 if (alen < (0x10000 - 0x100)) {
116 ctx->cmac.c[0] ^= (u8)(alen>>8); 121 ctx->cmac.c[0] ^= (u8)(alen >> 8);
117 ctx->cmac.c[1] ^= (u8)alen; 122 ctx->cmac.c[1] ^= (u8)alen;
118 i=2; 123 i = 2;
119 } 124 } else if (sizeof(alen) == 8 &&
120 else if (sizeof(alen)==8 && alen>=(size_t)1<<(32%(sizeof(alen)*8))) { 125 alen >= (size_t)1 << (32 % (sizeof(alen)*8))) {
121 ctx->cmac.c[0] ^= 0xFF; 126 ctx->cmac.c[0] ^= 0xFF;
122 ctx->cmac.c[1] ^= 0xFF; 127 ctx->cmac.c[1] ^= 0xFF;
123 ctx->cmac.c[2] ^= (u8)(alen>>(56%(sizeof(alen)*8))); 128 ctx->cmac.c[2] ^= (u8)(alen >> (56 % (sizeof(alen)*8)));
124 ctx->cmac.c[3] ^= (u8)(alen>>(48%(sizeof(alen)*8))); 129 ctx->cmac.c[3] ^= (u8)(alen >> (48 % (sizeof(alen)*8)));
125 ctx->cmac.c[4] ^= (u8)(alen>>(40%(sizeof(alen)*8))); 130 ctx->cmac.c[4] ^= (u8)(alen >> (40 % (sizeof(alen)*8)));
126 ctx->cmac.c[5] ^= (u8)(alen>>(32%(sizeof(alen)*8))); 131 ctx->cmac.c[5] ^= (u8)(alen >> (32 % (sizeof(alen)*8)));
127 ctx->cmac.c[6] ^= (u8)(alen>>24); 132 ctx->cmac.c[6] ^= (u8)(alen >> 24);
128 ctx->cmac.c[7] ^= (u8)(alen>>16); 133 ctx->cmac.c[7] ^= (u8)(alen >> 16);
129 ctx->cmac.c[8] ^= (u8)(alen>>8); 134 ctx->cmac.c[8] ^= (u8)(alen >> 8);
130 ctx->cmac.c[9] ^= (u8)alen; 135 ctx->cmac.c[9] ^= (u8)alen;
131 i=10; 136 i = 10;
132 } 137 } else {
133 else {
134 ctx->cmac.c[0] ^= 0xFF; 138 ctx->cmac.c[0] ^= 0xFF;
135 ctx->cmac.c[1] ^= 0xFE; 139 ctx->cmac.c[1] ^= 0xFE;
136 ctx->cmac.c[2] ^= (u8)(alen>>24); 140 ctx->cmac.c[2] ^= (u8)(alen >> 24);
137 ctx->cmac.c[3] ^= (u8)(alen>>16); 141 ctx->cmac.c[3] ^= (u8)(alen >> 16);
138 ctx->cmac.c[4] ^= (u8)(alen>>8); 142 ctx->cmac.c[4] ^= (u8)(alen >> 8);
139 ctx->cmac.c[5] ^= (u8)alen; 143 ctx->cmac.c[5] ^= (u8)alen;
140 i=6; 144 i = 6;
141 } 145 }
142 146
143 do { 147 do {
144 for(;i<16 && alen;++i,++aad,--alen) 148 for (; i < 16 && alen; ++i, ++aad, --alen)
145 ctx->cmac.c[i] ^= *aad; 149 ctx->cmac.c[i] ^= *aad;
146 (*block)(ctx->cmac.c,ctx->cmac.c,ctx->key), 150 (*block)(ctx->cmac.c, ctx->cmac.c, ctx->key),
147 ctx->blocks++; 151 ctx->blocks++;
148 i=0; 152 i = 0;
149 } while (alen); 153 } while (alen);
150} 154}
151 155
@@ -153,9 +157,11 @@ void CRYPTO_ccm128_aad(CCM128_CONTEXT *ctx,
153 157
154/* counter part of nonce may not be larger than L*8 bits, 158/* counter part of nonce may not be larger than L*8 bits,
155 * L is not larger than 8, therefore 64-bit counter... */ 159 * L is not larger than 8, therefore 64-bit counter... */
156static void ctr64_inc(unsigned char *counter) { 160static void
157 unsigned int n=8; 161ctr64_inc(unsigned char *counter)
158 u8 c; 162{
163 unsigned int n = 8;
164 u8 c;
159 165
160 counter += 8; 166 counter += 8;
161 do { 167 do {
@@ -163,60 +169,70 @@ static void ctr64_inc(unsigned char *counter) {
163 c = counter[n]; 169 c = counter[n];
164 ++c; 170 ++c;
165 counter[n] = c; 171 counter[n] = c;
166 if (c) return; 172 if (c)
173 return;
167 } while (n); 174 } while (n);
168} 175}
169 176
170int CRYPTO_ccm128_encrypt(CCM128_CONTEXT *ctx, 177int
171 const unsigned char *inp, unsigned char *out, 178CRYPTO_ccm128_encrypt(CCM128_CONTEXT *ctx,
172 size_t len) 179 const unsigned char *inp, unsigned char *out,
180 size_t len)
173{ 181{
174 size_t n; 182 size_t n;
175 unsigned int i,L; 183 unsigned int i, L;
176 unsigned char flags0 = ctx->nonce.c[0]; 184 unsigned char flags0 = ctx->nonce.c[0];
177 block128_f block = ctx->block; 185 block128_f block = ctx->block;
178 void * key = ctx->key; 186 void *key = ctx->key;
179 union { u64 u[2]; u8 c[16]; } scratch; 187 union {
180 188 u64 u[2];
181 if (!(flags0&0x40)) 189 u8 c[16];
182 (*block)(ctx->nonce.c,ctx->cmac.c,key), 190 } scratch;
183 ctx->blocks++; 191
184 192 if (!(flags0 & 0x40))
185 ctx->nonce.c[0] = L = flags0&7; 193 (*block)(ctx->nonce.c, ctx->cmac.c, key),
186 for (n=0,i=15-L;i<15;++i) { 194 ctx->blocks++;
195
196 ctx->nonce.c[0] = L = flags0 & 7;
197 for (n = 0, i = 15 - L; i < 15; ++i) {
187 n |= ctx->nonce.c[i]; 198 n |= ctx->nonce.c[i];
188 ctx->nonce.c[i]=0; 199 ctx->nonce.c[i] = 0;
189 n <<= 8; 200 n <<= 8;
190 } 201 }
191 n |= ctx->nonce.c[15]; /* reconstructed length */ 202 n |= ctx->nonce.c[15]; /* reconstructed length */
192 ctx->nonce.c[15]=1; 203 ctx->nonce.c[15] = 1;
193 204
194 if (n!=len) return -1; /* length mismatch */ 205 if (n != len)
206 return -1; /* length mismatch */
195 207
196 ctx->blocks += ((len+15)>>3)|1; 208 ctx->blocks += ((len + 15) >> 3)|1;
197 if (ctx->blocks > (U64(1)<<61)) return -2; /* too much data */ 209 if (ctx->blocks > (U64(1) << 61))
210 return -2; /* too much data */
198 211
199 while (len>=16) { 212 while (len >= 16) {
200#ifdef __STRICT_ALIGNMENT 213#ifdef __STRICT_ALIGNMENT
201 union { u64 u[2]; u8 c[16]; } temp; 214 union {
215 u64 u[2];
216 u8 c[16];
217 } temp;
202 218
203 memcpy (temp.c,inp,16); 219 memcpy(temp.c, inp, 16);
204 ctx->cmac.u[0] ^= temp.u[0]; 220 ctx->cmac.u[0] ^= temp.u[0];
205 ctx->cmac.u[1] ^= temp.u[1]; 221 ctx->cmac.u[1] ^= temp.u[1];
206#else 222#else
207 ctx->cmac.u[0] ^= ((u64*)inp)[0]; 223 ctx->cmac.u[0] ^= ((u64 *)inp)[0];
208 ctx->cmac.u[1] ^= ((u64*)inp)[1]; 224 ctx->cmac.u[1] ^= ((u64 *)inp)[1];
209#endif 225#endif
210 (*block)(ctx->cmac.c,ctx->cmac.c,key); 226 (*block)(ctx->cmac.c, ctx->cmac.c, key);
211 (*block)(ctx->nonce.c,scratch.c,key); 227 (*block)(ctx->nonce.c, scratch.c, key);
212 ctr64_inc(ctx->nonce.c); 228 ctr64_inc(ctx->nonce.c);
213#ifdef __STRICT_ALIGNMENT 229#ifdef __STRICT_ALIGNMENT
214 temp.u[0] ^= scratch.u[0]; 230 temp.u[0] ^= scratch.u[0];
215 temp.u[1] ^= scratch.u[1]; 231 temp.u[1] ^= scratch.u[1];
216 memcpy(out,temp.c,16); 232 memcpy(out, temp.c, 16);
217#else 233#else
218 ((u64*)out)[0] = scratch.u[0]^((u64*)inp)[0]; 234 ((u64 *)out)[0] = scratch.u[0] ^ ((u64 *)inp)[0];
219 ((u64*)out)[1] = scratch.u[1]^((u64*)inp)[1]; 235 ((u64 *)out)[1] = scratch.u[1] ^ ((u64 *)inp)[1];
220#endif 236#endif
221 inp += 16; 237 inp += 16;
222 out += 16; 238 out += 16;
@@ -224,16 +240,18 @@ int CRYPTO_ccm128_encrypt(CCM128_CONTEXT *ctx,
224 } 240 }
225 241
226 if (len) { 242 if (len) {
227 for (i=0; i<len; ++i) ctx->cmac.c[i] ^= inp[i]; 243 for (i = 0; i < len; ++i)
228 (*block)(ctx->cmac.c,ctx->cmac.c,key); 244 ctx->cmac.c[i] ^= inp[i];
229 (*block)(ctx->nonce.c,scratch.c,key); 245 (*block)(ctx->cmac.c, ctx->cmac.c, key);
230 for (i=0; i<len; ++i) out[i] = scratch.c[i]^inp[i]; 246 (*block)(ctx->nonce.c, scratch.c, key);
247 for (i = 0; i < len; ++i)
248 out[i] = scratch.c[i] ^ inp[i];
231 } 249 }
232 250
233 for (i=15-L;i<16;++i) 251 for (i = 15 - L; i < 16; ++i)
234 ctx->nonce.c[i]=0; 252 ctx->nonce.c[i] = 0;
235 253
236 (*block)(ctx->nonce.c,scratch.c,key); 254 (*block)(ctx->nonce.c, scratch.c, key);
237 ctx->cmac.u[0] ^= scratch.u[0]; 255 ctx->cmac.u[0] ^= scratch.u[0];
238 ctx->cmac.u[1] ^= scratch.u[1]; 256 ctx->cmac.u[1] ^= scratch.u[1];
239 257
@@ -242,47 +260,57 @@ int CRYPTO_ccm128_encrypt(CCM128_CONTEXT *ctx,
242 return 0; 260 return 0;
243} 261}
244 262
245int CRYPTO_ccm128_decrypt(CCM128_CONTEXT *ctx, 263int
246 const unsigned char *inp, unsigned char *out, 264CRYPTO_ccm128_decrypt(CCM128_CONTEXT *ctx,
247 size_t len) 265 const unsigned char *inp, unsigned char *out,
266 size_t len)
248{ 267{
249 size_t n; 268 size_t n;
250 unsigned int i,L; 269 unsigned int i, L;
251 unsigned char flags0 = ctx->nonce.c[0]; 270 unsigned char flags0 = ctx->nonce.c[0];
252 block128_f block = ctx->block; 271 block128_f block = ctx->block;
253 void * key = ctx->key; 272 void *key = ctx->key;
254 union { u64 u[2]; u8 c[16]; } scratch; 273 union {
255 274 u64 u[2];
256 if (!(flags0&0x40)) 275 u8 c[16];
257 (*block)(ctx->nonce.c,ctx->cmac.c,key); 276 } scratch;
258 277
259 ctx->nonce.c[0] = L = flags0&7; 278 if (!(flags0 & 0x40))
260 for (n=0,i=15-L;i<15;++i) { 279 (*block)(ctx->nonce.c, ctx->cmac.c, key);
280
281 ctx->nonce.c[0] = L = flags0 & 7;
282 for (n = 0, i = 15 - L; i < 15; ++i) {
261 n |= ctx->nonce.c[i]; 283 n |= ctx->nonce.c[i];
262 ctx->nonce.c[i]=0; 284 ctx->nonce.c[i] = 0;
263 n <<= 8; 285 n <<= 8;
264 } 286 }
265 n |= ctx->nonce.c[15]; /* reconstructed length */ 287 n |= ctx->nonce.c[15]; /* reconstructed length */
266 ctx->nonce.c[15]=1; 288 ctx->nonce.c[15] = 1;
267 289
268 if (n!=len) return -1; 290 if (n != len)
291 return -1;
269 292
270 while (len>=16) { 293 while (len >= 16) {
271#ifdef __STRICT_ALIGNMENT 294#ifdef __STRICT_ALIGNMENT
272 union { u64 u[2]; u8 c[16]; } temp; 295 union {
296 u64 u[2];
297 u8 c[16];
298 } temp;
273#endif 299#endif
274 (*block)(ctx->nonce.c,scratch.c,key); 300 (*block)(ctx->nonce.c, scratch.c, key);
275 ctr64_inc(ctx->nonce.c); 301 ctr64_inc(ctx->nonce.c);
276#ifdef __STRICT_ALIGNMENT 302#ifdef __STRICT_ALIGNMENT
277 memcpy (temp.c,inp,16); 303 memcpy(temp.c, inp, 16);
278 ctx->cmac.u[0] ^= (scratch.u[0] ^= temp.u[0]); 304 ctx->cmac.u[0] ^= (scratch.u[0] ^= temp.u[0]);
279 ctx->cmac.u[1] ^= (scratch.u[1] ^= temp.u[1]); 305 ctx->cmac.u[1] ^= (scratch.u[1] ^= temp.u[1]);
280 memcpy (out,scratch.c,16); 306 memcpy(out, scratch.c, 16);
281#else 307#else
282 ctx->cmac.u[0] ^= (((u64*)out)[0] = scratch.u[0]^((u64*)inp)[0]); 308 ctx->cmac.u[0] ^= (((u64 *)out)[0] = scratch.u[0] ^
283 ctx->cmac.u[1] ^= (((u64*)out)[1] = scratch.u[1]^((u64*)inp)[1]); 309 ((u64 *)inp)[0]);
310 ctx->cmac.u[1] ^= (((u64 *)out)[1] = scratch.u[1] ^
311 ((u64 *)inp)[1]);
284#endif 312#endif
285 (*block)(ctx->cmac.c,ctx->cmac.c,key); 313 (*block)(ctx->cmac.c, ctx->cmac.c, key);
286 314
287 inp += 16; 315 inp += 16;
288 out += 16; 316 out += 16;
@@ -290,16 +318,16 @@ int CRYPTO_ccm128_decrypt(CCM128_CONTEXT *ctx,
290 } 318 }
291 319
292 if (len) { 320 if (len) {
293 (*block)(ctx->nonce.c,scratch.c,key); 321 (*block)(ctx->nonce.c, scratch.c, key);
294 for (i=0; i<len; ++i) 322 for (i = 0; i < len; ++i)
295 ctx->cmac.c[i] ^= (out[i] = scratch.c[i]^inp[i]); 323 ctx->cmac.c[i] ^= (out[i] = scratch.c[i] ^ inp[i]);
296 (*block)(ctx->cmac.c,ctx->cmac.c,key); 324 (*block)(ctx->cmac.c, ctx->cmac.c, key);
297 } 325 }
298 326
299 for (i=15-L;i<16;++i) 327 for (i = 15 - L; i < 16; ++i)
300 ctx->nonce.c[i]=0; 328 ctx->nonce.c[i] = 0;
301 329
302 (*block)(ctx->nonce.c,scratch.c,key); 330 (*block)(ctx->nonce.c, scratch.c, key);
303 ctx->cmac.u[0] ^= scratch.u[0]; 331 ctx->cmac.u[0] ^= scratch.u[0];
304 ctx->cmac.u[1] ^= scratch.u[1]; 332 ctx->cmac.u[1] ^= scratch.u[1];
305 333
@@ -308,68 +336,79 @@ int CRYPTO_ccm128_decrypt(CCM128_CONTEXT *ctx,
308 return 0; 336 return 0;
309} 337}
310 338
311static void ctr64_add (unsigned char *counter,size_t inc) 339static void
312{ size_t n=8, val=0; 340ctr64_add(unsigned char *counter, size_t inc)
341{
342 size_t n = 8, val = 0;
313 343
314 counter += 8; 344 counter += 8;
315 do { 345 do {
316 --n; 346 --n;
317 val += counter[n] + (inc&0xff); 347 val += counter[n] + (inc & 0xff);
318 counter[n] = (unsigned char)val; 348 counter[n] = (unsigned char)val;
319 val >>= 8; /* carry bit */ 349 val >>= 8; /* carry bit */
320 inc >>= 8; 350 inc >>= 8;
321 } while(n && (inc || val)); 351 } while (n && (inc || val));
322} 352}
323 353
324int CRYPTO_ccm128_encrypt_ccm64(CCM128_CONTEXT *ctx, 354int
325 const unsigned char *inp, unsigned char *out, 355CRYPTO_ccm128_encrypt_ccm64(CCM128_CONTEXT *ctx,
326 size_t len,ccm128_f stream) 356 const unsigned char *inp, unsigned char *out,
357 size_t len, ccm128_f stream)
327{ 358{
328 size_t n; 359 size_t n;
329 unsigned int i,L; 360 unsigned int i, L;
330 unsigned char flags0 = ctx->nonce.c[0]; 361 unsigned char flags0 = ctx->nonce.c[0];
331 block128_f block = ctx->block; 362 block128_f block = ctx->block;
332 void * key = ctx->key; 363 void *key = ctx->key;
333 union { u64 u[2]; u8 c[16]; } scratch; 364 union {
334 365 u64 u[2];
335 if (!(flags0&0x40)) 366 u8 c[16];
336 (*block)(ctx->nonce.c,ctx->cmac.c,key), 367 } scratch;
337 ctx->blocks++; 368
338 369 if (!(flags0 & 0x40))
339 ctx->nonce.c[0] = L = flags0&7; 370 (*block)(ctx->nonce.c, ctx->cmac.c, key),
340 for (n=0,i=15-L;i<15;++i) { 371 ctx->blocks++;
372
373 ctx->nonce.c[0] = L = flags0 & 7;
374 for (n = 0, i = 15 - L; i < 15; ++i) {
341 n |= ctx->nonce.c[i]; 375 n |= ctx->nonce.c[i];
342 ctx->nonce.c[i]=0; 376 ctx->nonce.c[i] = 0;
343 n <<= 8; 377 n <<= 8;
344 } 378 }
345 n |= ctx->nonce.c[15]; /* reconstructed length */ 379 n |= ctx->nonce.c[15]; /* reconstructed length */
346 ctx->nonce.c[15]=1; 380 ctx->nonce.c[15] = 1;
347 381
348 if (n!=len) return -1; /* length mismatch */ 382 if (n != len)
383 return -1; /* length mismatch */
349 384
350 ctx->blocks += ((len+15)>>3)|1; 385 ctx->blocks += ((len + 15) >> 3)|1;
351 if (ctx->blocks > (U64(1)<<61)) return -2; /* too much data */ 386 if (ctx->blocks > (U64(1) << 61))
387 return -2; /* too much data */
352 388
353 if ((n=len/16)) { 389 if ((n = len/16)) {
354 (*stream)(inp,out,n,key,ctx->nonce.c,ctx->cmac.c); 390 (*stream)(inp, out, n, key, ctx->nonce.c, ctx->cmac.c);
355 n *= 16; 391 n *= 16;
356 inp += n; 392 inp += n;
357 out += n; 393 out += n;
358 len -= n; 394 len -= n;
359 if (len) ctr64_add(ctx->nonce.c,n/16); 395 if (len)
396 ctr64_add(ctx->nonce.c, n/16);
360 } 397 }
361 398
362 if (len) { 399 if (len) {
363 for (i=0; i<len; ++i) ctx->cmac.c[i] ^= inp[i]; 400 for (i = 0; i < len; ++i)
364 (*block)(ctx->cmac.c,ctx->cmac.c,key); 401 ctx->cmac.c[i] ^= inp[i];
365 (*block)(ctx->nonce.c,scratch.c,key); 402 (*block)(ctx->cmac.c, ctx->cmac.c, key);
366 for (i=0; i<len; ++i) out[i] = scratch.c[i]^inp[i]; 403 (*block)(ctx->nonce.c, scratch.c, key);
404 for (i = 0; i < len; ++i)
405 out[i] = scratch.c[i] ^ inp[i];
367 } 406 }
368 407
369 for (i=15-L;i<16;++i) 408 for (i = 15 - L; i < 16; ++i)
370 ctx->nonce.c[i]=0; 409 ctx->nonce.c[i] = 0;
371 410
372 (*block)(ctx->nonce.c,scratch.c,key); 411 (*block)(ctx->nonce.c, scratch.c, key);
373 ctx->cmac.u[0] ^= scratch.u[0]; 412 ctx->cmac.u[0] ^= scratch.u[0];
374 ctx->cmac.u[1] ^= scratch.u[1]; 413 ctx->cmac.u[1] ^= scratch.u[1];
375 414
@@ -378,51 +417,57 @@ int CRYPTO_ccm128_encrypt_ccm64(CCM128_CONTEXT *ctx,
378 return 0; 417 return 0;
379} 418}
380 419
381int CRYPTO_ccm128_decrypt_ccm64(CCM128_CONTEXT *ctx, 420int
382 const unsigned char *inp, unsigned char *out, 421CRYPTO_ccm128_decrypt_ccm64(CCM128_CONTEXT *ctx,
383 size_t len,ccm128_f stream) 422 const unsigned char *inp, unsigned char *out,
423 size_t len, ccm128_f stream)
384{ 424{
385 size_t n; 425 size_t n;
386 unsigned int i,L; 426 unsigned int i, L;
387 unsigned char flags0 = ctx->nonce.c[0]; 427 unsigned char flags0 = ctx->nonce.c[0];
388 block128_f block = ctx->block; 428 block128_f block = ctx->block;
389 void * key = ctx->key; 429 void *key = ctx->key;
390 union { u64 u[2]; u8 c[16]; } scratch; 430 union {
391 431 u64 u[2];
392 if (!(flags0&0x40)) 432 u8 c[16];
393 (*block)(ctx->nonce.c,ctx->cmac.c,key); 433 } scratch;
394 434
395 ctx->nonce.c[0] = L = flags0&7; 435 if (!(flags0 & 0x40))
396 for (n=0,i=15-L;i<15;++i) { 436 (*block)(ctx->nonce.c, ctx->cmac.c, key);
437
438 ctx->nonce.c[0] = L = flags0 & 7;
439 for (n = 0, i = 15 - L; i < 15; ++i) {
397 n |= ctx->nonce.c[i]; 440 n |= ctx->nonce.c[i];
398 ctx->nonce.c[i]=0; 441 ctx->nonce.c[i] = 0;
399 n <<= 8; 442 n <<= 8;
400 } 443 }
401 n |= ctx->nonce.c[15]; /* reconstructed length */ 444 n |= ctx->nonce.c[15]; /* reconstructed length */
402 ctx->nonce.c[15]=1; 445 ctx->nonce.c[15] = 1;
403 446
404 if (n!=len) return -1; 447 if (n != len)
448 return -1;
405 449
406 if ((n=len/16)) { 450 if ((n = len/16)) {
407 (*stream)(inp,out,n,key,ctx->nonce.c,ctx->cmac.c); 451 (*stream)(inp, out, n, key, ctx->nonce.c, ctx->cmac.c);
408 n *= 16; 452 n *= 16;
409 inp += n; 453 inp += n;
410 out += n; 454 out += n;
411 len -= n; 455 len -= n;
412 if (len) ctr64_add(ctx->nonce.c,n/16); 456 if (len)
457 ctr64_add(ctx->nonce.c, n/16);
413 } 458 }
414 459
415 if (len) { 460 if (len) {
416 (*block)(ctx->nonce.c,scratch.c,key); 461 (*block)(ctx->nonce.c, scratch.c, key);
417 for (i=0; i<len; ++i) 462 for (i = 0; i < len; ++i)
418 ctx->cmac.c[i] ^= (out[i] = scratch.c[i]^inp[i]); 463 ctx->cmac.c[i] ^= (out[i] = scratch.c[i] ^ inp[i]);
419 (*block)(ctx->cmac.c,ctx->cmac.c,key); 464 (*block)(ctx->cmac.c, ctx->cmac.c, key);
420 } 465 }
421 466
422 for (i=15-L;i<16;++i) 467 for (i = 15 - L; i < 16; ++i)
423 ctx->nonce.c[i]=0; 468 ctx->nonce.c[i] = 0;
424 469
425 (*block)(ctx->nonce.c,scratch.c,key); 470 (*block)(ctx->nonce.c, scratch.c, key);
426 ctx->cmac.u[0] ^= scratch.u[0]; 471 ctx->cmac.u[0] ^= scratch.u[0];
427 ctx->cmac.u[1] ^= scratch.u[1]; 472 ctx->cmac.u[1] ^= scratch.u[1];
428 473
@@ -431,11 +476,15 @@ int CRYPTO_ccm128_decrypt_ccm64(CCM128_CONTEXT *ctx,
431 return 0; 476 return 0;
432} 477}
433 478
434size_t CRYPTO_ccm128_tag(CCM128_CONTEXT *ctx,unsigned char *tag,size_t len) 479size_t
435{ unsigned int M = (ctx->nonce.c[0]>>3)&7; /* the M parameter */ 480CRYPTO_ccm128_tag(CCM128_CONTEXT *ctx, unsigned char *tag, size_t len)
481{
482 unsigned int M = (ctx->nonce.c[0] >> 3) & 7; /* the M parameter */
436 483
437 M *= 2; M += 2; 484 M *= 2;
438 if (len != M) return 0; 485 M += 2;
439 memcpy(tag,ctx->cmac.c,M); 486 if (len != M)
487 return 0;
488 memcpy(tag, ctx->cmac.c, M);
440 return M; 489 return M;
441} 490}
diff --git a/src/lib/libcrypto/modes/cfb128.c b/src/lib/libcrypto/modes/cfb128.c
index 8555ce0552..f538a2b11c 100644
--- a/src/lib/libcrypto/modes/cfb128.c
+++ b/src/lib/libcrypto/modes/cfb128.c
@@ -1,4 +1,4 @@
1/* $OpenBSD: cfb128.c,v 1.5 2022/11/26 16:08:53 tb Exp $ */ 1/* $OpenBSD: cfb128.c,v 1.6 2023/07/08 14:55:36 beck Exp $ */
2/* ==================================================================== 2/* ====================================================================
3 * Copyright (c) 2008 The OpenSSL Project. All rights reserved. 3 * Copyright (c) 2008 The OpenSSL Project. All rights reserved.
4 * 4 *
@@ -7,7 +7,7 @@
7 * are met: 7 * are met:
8 * 8 *
9 * 1. Redistributions of source code must retain the above copyright 9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer. 10 * notice, this list of conditions and the following disclaimer.
11 * 11 *
12 * 2. Redistributions in binary form must reproduce the above copyright 12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in 13 * notice, this list of conditions and the following disclaimer in
@@ -63,172 +63,186 @@
63 * used. The extra state information to record how much of the 63 * used. The extra state information to record how much of the
64 * 128bit block we have used is contained in *num; 64 * 128bit block we have used is contained in *num;
65 */ 65 */
66void CRYPTO_cfb128_encrypt(const unsigned char *in, unsigned char *out, 66void
67 size_t len, const void *key, 67CRYPTO_cfb128_encrypt(const unsigned char *in, unsigned char *out,
68 unsigned char ivec[16], int *num, 68 size_t len, const void *key,
69 int enc, block128_f block) 69 unsigned char ivec[16], int *num,
70 int enc, block128_f block)
70{ 71{
71 unsigned int n; 72 unsigned int n;
72 size_t l = 0; 73 size_t l = 0;
73 74
74 n = *num; 75 n = *num;
75 76
76 if (enc) { 77 if (enc) {
77#if !defined(OPENSSL_SMALL_FOOTPRINT) 78#if !defined(OPENSSL_SMALL_FOOTPRINT)
78 if (16%sizeof(size_t) == 0) do { /* always true actually */ 79 if (16 % sizeof(size_t) == 0)
79 while (n && len) { 80 do { /* always true actually */
80 *(out++) = ivec[n] ^= *(in++); 81 while (n && len) {
81 --len; 82 *(out++) = ivec[n] ^= *(in++);
82 n = (n+1) % 16; 83 --len;
83 } 84 n = (n + 1) % 16;
85 }
84#ifdef __STRICT_ALIGNMENT 86#ifdef __STRICT_ALIGNMENT
85 if (((size_t)in|(size_t)out|(size_t)ivec)%sizeof(size_t) != 0) 87 if (((size_t)in|(size_t)out|(size_t)ivec) %
86 break; 88 sizeof(size_t) != 0)
89 break;
87#endif 90#endif
88 while (len>=16) { 91 while (len >= 16) {
89 (*block)(ivec, ivec, key); 92 (*block)(ivec, ivec, key);
90 for (; n<16; n+=sizeof(size_t)) { 93 for (; n < 16; n += sizeof(size_t)) {
91 *(size_t*)(out+n) = 94 *(size_t *)(out + n) =
92 *(size_t*)(ivec+n) ^= *(size_t*)(in+n); 95 *(size_t *)(ivec + n) ^= *(size_t *)(in +
93 } 96 n);
94 len -= 16; 97 }
95 out += 16; 98 len -= 16;
96 in += 16; 99 out += 16;
97 n = 0; 100 in += 16;
98 } 101 n = 0;
99 if (len) { 102 }
100 (*block)(ivec, ivec, key); 103 if (len) {
101 while (len--) { 104 (*block)(ivec, ivec, key);
102 out[n] = ivec[n] ^= in[n]; 105 while (len--) {
103 ++n; 106 out[n] = ivec[n] ^= in[n];
104 } 107 ++n;
105 } 108 }
106 *num = n; 109 }
107 return; 110 *num = n;
108 } while (0); 111 return;
112 } while (0);
109 /* the rest would be commonly eliminated by x86* compiler */ 113 /* the rest would be commonly eliminated by x86* compiler */
110#endif 114#endif
111 while (l<len) { 115 while (l < len) {
112 if (n == 0) { 116 if (n == 0) {
113 (*block)(ivec, ivec, key); 117 (*block)(ivec, ivec, key);
118 }
119 out[l] = ivec[n] ^= in[l];
120 ++l;
121 n = (n + 1) % 16;
114 } 122 }
115 out[l] = ivec[n] ^= in[l]; 123 *num = n;
116 ++l; 124 } else {
117 n = (n+1) % 16;
118 }
119 *num = n;
120 } else {
121#if !defined(OPENSSL_SMALL_FOOTPRINT) 125#if !defined(OPENSSL_SMALL_FOOTPRINT)
122 if (16%sizeof(size_t) == 0) do { /* always true actually */ 126 if (16 % sizeof(size_t) == 0)
123 while (n && len) { 127 do { /* always true actually */
124 unsigned char c; 128 while (n && len) {
125 *(out++) = ivec[n] ^ (c = *(in++)); ivec[n] = c; 129 unsigned char c;
126 --len; 130 *(out++) = ivec[n] ^ (c = *(in++));
127 n = (n+1) % 16; 131 ivec[n] = c;
128 } 132 --len;
133 n = (n + 1) % 16;
134 }
129#ifdef __STRICT_ALIGNMENT 135#ifdef __STRICT_ALIGNMENT
130 if (((size_t)in|(size_t)out|(size_t)ivec)%sizeof(size_t) != 0) 136 if (((size_t)in|(size_t)out|(size_t)ivec) %
131 break; 137 sizeof(size_t) != 0)
138 break;
132#endif 139#endif
133 while (len>=16) { 140 while (len >= 16) {
134 (*block)(ivec, ivec, key); 141 (*block)(ivec, ivec, key);
135 for (; n<16; n+=sizeof(size_t)) { 142 for (; n < 16; n += sizeof(size_t)) {
136 size_t t = *(size_t*)(in+n); 143 size_t t = *(size_t *)(in + n);
137 *(size_t*)(out+n) = *(size_t*)(ivec+n) ^ t; 144 *(size_t *)(out + n) = *(size_t *)(ivec +
138 *(size_t*)(ivec+n) = t; 145 n) ^ t;
139 } 146 *(size_t *)(ivec + n) = t;
140 len -= 16; 147 }
141 out += 16; 148 len -= 16;
142 in += 16; 149 out += 16;
143 n = 0; 150 in += 16;
144 } 151 n = 0;
145 if (len) { 152 }
146 (*block)(ivec, ivec, key); 153 if (len) {
147 while (len--) { 154 (*block)(ivec, ivec, key);
148 unsigned char c; 155 while (len--) {
149 out[n] = ivec[n] ^ (c = in[n]); ivec[n] = c; 156 unsigned char c;
150 ++n; 157 out[n] = ivec[n] ^ (c = in[n]);
151 } 158 ivec[n] = c;
152 } 159 ++n;
153 *num = n; 160 }
154 return; 161 }
155 } while (0); 162 *num = n;
163 return;
164 } while (0);
156 /* the rest would be commonly eliminated by x86* compiler */ 165 /* the rest would be commonly eliminated by x86* compiler */
157#endif 166#endif
158 while (l<len) { 167 while (l < len) {
159 unsigned char c; 168 unsigned char c;
160 if (n == 0) { 169 if (n == 0) {
161 (*block)(ivec, ivec, key); 170 (*block)(ivec, ivec, key);
171 }
172 out[l] = ivec[n] ^ (c = in[l]);
173 ivec[n] = c;
174 ++l;
175 n = (n + 1) % 16;
162 } 176 }
163 out[l] = ivec[n] ^ (c = in[l]); ivec[n] = c; 177 *num = n;
164 ++l;
165 n = (n+1) % 16;
166 } 178 }
167 *num=n;
168 }
169} 179}
170 180
171/* This expects a single block of size nbits for both in and out. Note that 181/* This expects a single block of size nbits for both in and out. Note that
172 it corrupts any extra bits in the last byte of out */ 182 it corrupts any extra bits in the last byte of out */
173static void cfbr_encrypt_block(const unsigned char *in,unsigned char *out, 183static void
174 int nbits,const void *key, 184cfbr_encrypt_block(const unsigned char *in, unsigned char *out,
175 unsigned char ivec[16],int enc, 185 int nbits, const void *key,
176 block128_f block) 186 unsigned char ivec[16], int enc,
187 block128_f block)
177{ 188{
178 int n,rem,num; 189 int n, rem, num;
179 unsigned char ovec[16*2 + 1]; /* +1 because we dererefence (but don't use) one byte off the end */ 190 unsigned char ovec[16*2 + 1]; /* +1 because we dererefence (but don't use) one byte off the end */
180 191
181 if (nbits<=0 || nbits>128) return; 192 if (nbits <= 0 || nbits > 128)
193 return;
182 194
183 /* fill in the first half of the new IV with the current IV */ 195 /* fill in the first half of the new IV with the current IV */
184 memcpy(ovec,ivec,16); 196 memcpy(ovec, ivec, 16);
185 /* construct the new IV */ 197 /* construct the new IV */
186 (*block)(ivec,ivec,key); 198 (*block)(ivec, ivec, key);
187 num = (nbits+7)/8; 199 num = (nbits + 7)/8;
188 if (enc) /* encrypt the input */ 200 if (enc) /* encrypt the input */
189 for(n=0 ; n < num ; ++n) 201 for (n = 0; n < num; ++n)
190 out[n] = (ovec[16+n] = in[n] ^ ivec[n]); 202 out[n] = (ovec[16 + n] = in[n] ^ ivec[n]);
191 else /* decrypt the input */ 203 else /* decrypt the input */
192 for(n=0 ; n < num ; ++n) 204 for (n = 0; n < num; ++n)
193 out[n] = (ovec[16+n] = in[n]) ^ ivec[n]; 205 out[n] = (ovec[16 + n] = in[n]) ^ ivec[n];
194 /* shift ovec left... */ 206 /* shift ovec left... */
195 rem = nbits%8; 207 rem = nbits % 8;
196 num = nbits/8; 208 num = nbits/8;
197 if(rem==0) 209 if (rem == 0)
198 memcpy(ivec,ovec+num,16); 210 memcpy(ivec, ovec + num, 16);
199 else 211 else
200 for(n=0 ; n < 16 ; ++n) 212 for (n = 0; n < 16; ++n)
201 ivec[n] = ovec[n+num]<<rem | ovec[n+num+1]>>(8-rem); 213 ivec[n] = ovec[n + num] << rem |
214 ovec[n + num + 1] >> (8 - rem);
202 215
203 /* it is not necessary to cleanse ovec, since the IV is not secret */ 216 /* it is not necessary to cleanse ovec, since the IV is not secret */
204} 217}
205 218
206/* N.B. This expects the input to be packed, MS bit first */ 219/* N.B. This expects the input to be packed, MS bit first */
207void CRYPTO_cfb128_1_encrypt(const unsigned char *in, unsigned char *out, 220void
208 size_t bits, const void *key, 221CRYPTO_cfb128_1_encrypt(const unsigned char *in, unsigned char *out,
209 unsigned char ivec[16], int *num, 222 size_t bits, const void *key,
210 int enc, block128_f block) 223 unsigned char ivec[16], int *num,
224 int enc, block128_f block)
211{ 225{
212 size_t n; 226 size_t n;
213 unsigned char c[1],d[1]; 227 unsigned char c[1], d[1];
214 228
215 for(n=0 ; n<bits ; ++n) 229 for (n = 0; n < bits; ++n)
216 { 230 {
217 c[0]=(in[n/8]&(1 << (7-n%8))) ? 0x80 : 0; 231 c[0] = (in[n/8] & (1 << (7 - n % 8))) ? 0x80 : 0;
218 cfbr_encrypt_block(c,d,1,key,ivec,enc,block); 232 cfbr_encrypt_block(c, d, 1, key, ivec, enc, block);
219 out[n/8]=(out[n/8]&~(1 << (unsigned int)(7-n%8))) | 233 out[n/8] = (out[n/8] & ~(1 << (unsigned int)(7 - n % 8))) |
220 ((d[0]&0x80) >> (unsigned int)(n%8)); 234 ((d[0] & 0x80) >> (unsigned int)(n % 8));
221 } 235 }
222} 236}
223 237
224void CRYPTO_cfb128_8_encrypt(const unsigned char *in, unsigned char *out, 238void
225 size_t length, const void *key, 239CRYPTO_cfb128_8_encrypt(const unsigned char *in, unsigned char *out,
226 unsigned char ivec[16], int *num, 240 size_t length, const void *key,
227 int enc, block128_f block) 241 unsigned char ivec[16], int *num,
242 int enc, block128_f block)
228{ 243{
229 size_t n; 244 size_t n;
230 245
231 for(n=0 ; n<length ; ++n) 246 for (n = 0; n < length; ++n)
232 cfbr_encrypt_block(&in[n],&out[n],8,key,ivec,enc,block); 247 cfbr_encrypt_block(&in[n], &out[n], 8, key, ivec, enc, block);
233} 248}
234
diff --git a/src/lib/libcrypto/modes/ctr128.c b/src/lib/libcrypto/modes/ctr128.c
index eadb80449c..7ba68a9c4c 100644
--- a/src/lib/libcrypto/modes/ctr128.c
+++ b/src/lib/libcrypto/modes/ctr128.c
@@ -1,4 +1,4 @@
1/* $OpenBSD: ctr128.c,v 1.9 2022/12/26 07:18:52 jmc Exp $ */ 1/* $OpenBSD: ctr128.c,v 1.10 2023/07/08 14:55:36 beck Exp $ */
2/* ==================================================================== 2/* ====================================================================
3 * Copyright (c) 2008 The OpenSSL Project. All rights reserved. 3 * Copyright (c) 2008 The OpenSSL Project. All rights reserved.
4 * 4 *
@@ -7,7 +7,7 @@
7 * are met: 7 * are met:
8 * 8 *
9 * 1. Redistributions of source code must retain the above copyright 9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer. 10 * notice, this list of conditions and the following disclaimer.
11 * 11 *
12 * 2. Redistributions in binary form must reproduce the above copyright 12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in 13 * notice, this list of conditions and the following disclaimer in
@@ -64,8 +64,10 @@
64 * is endian-neutral. */ 64 * is endian-neutral. */
65 65
66/* increment counter (128-bit int) by 1 */ 66/* increment counter (128-bit int) by 1 */
67static void ctr128_inc(unsigned char *counter) { 67static void
68 u32 n=16; 68ctr128_inc(unsigned char *counter)
69{
70 u32 n = 16;
69 u8 c; 71 u8 c;
70 72
71 do { 73 do {
@@ -73,7 +75,8 @@ static void ctr128_inc(unsigned char *counter) {
73 c = counter[n]; 75 c = counter[n];
74 ++c; 76 ++c;
75 counter[n] = c; 77 counter[n] = c;
76 if (c) return; 78 if (c)
79 return;
77 } while (n); 80 } while (n);
78} 81}
79 82
@@ -112,70 +115,76 @@ ctr128_inc_aligned(unsigned char *counter)
112 * responsibility for checking that the counter doesn't overflow 115 * responsibility for checking that the counter doesn't overflow
113 * into the rest of the IV when incremented. 116 * into the rest of the IV when incremented.
114 */ 117 */
115void CRYPTO_ctr128_encrypt(const unsigned char *in, unsigned char *out, 118void
116 size_t len, const void *key, 119CRYPTO_ctr128_encrypt(const unsigned char *in, unsigned char *out,
117 unsigned char ivec[16], unsigned char ecount_buf[16], 120 size_t len, const void *key,
118 unsigned int *num, block128_f block) 121 unsigned char ivec[16], unsigned char ecount_buf[16],
122 unsigned int *num, block128_f block)
119{ 123{
120 unsigned int n; 124 unsigned int n;
121 size_t l=0; 125 size_t l = 0;
122 126
123 assert(*num < 16); 127 assert(*num < 16);
124 128
125 n = *num; 129 n = *num;
126 130
127#if !defined(OPENSSL_SMALL_FOOTPRINT) 131#if !defined(OPENSSL_SMALL_FOOTPRINT)
128 if (16%sizeof(size_t) == 0) do { /* always true actually */ 132 if (16 % sizeof(size_t) == 0)
129 while (n && len) { 133 do { /* always true actually */
130 *(out++) = *(in++) ^ ecount_buf[n]; 134 while (n && len) {
131 --len; 135 *(out++) = *(in++) ^ ecount_buf[n];
132 n = (n+1) % 16; 136 --len;
133 } 137 n = (n + 1) % 16;
138 }
134 139
135#ifdef __STRICT_ALIGNMENT 140#ifdef __STRICT_ALIGNMENT
136 if (((size_t)in|(size_t)out|(size_t)ivec)%sizeof(size_t) != 0) 141 if (((size_t)in|(size_t)out|(size_t)ivec) %
137 break; 142 sizeof(size_t) != 0)
143 break;
138#endif 144#endif
139 while (len>=16) { 145 while (len >= 16) {
140 (*block)(ivec, ecount_buf, key); 146 (*block)(ivec, ecount_buf, key);
141 ctr128_inc_aligned(ivec); 147 ctr128_inc_aligned(ivec);
142 for (; n<16; n+=sizeof(size_t)) 148 for (; n < 16; n += sizeof(size_t))
143 *(size_t *)(out+n) = 149 *(size_t *)(out + n) =
144 *(size_t *)(in+n) ^ *(size_t *)(ecount_buf+n); 150 *(size_t *)(in + n) ^ *(size_t *)(ecount_buf +
145 len -= 16; 151 n);
146 out += 16; 152 len -= 16;
147 in += 16; 153 out += 16;
148 n = 0; 154 in += 16;
149 } 155 n = 0;
150 if (len) {
151 (*block)(ivec, ecount_buf, key);
152 ctr128_inc_aligned(ivec);
153 while (len--) {
154 out[n] = in[n] ^ ecount_buf[n];
155 ++n;
156 } 156 }
157 } 157 if (len) {
158 *num = n; 158 (*block)(ivec, ecount_buf, key);
159 return; 159 ctr128_inc_aligned(ivec);
160 } while(0); 160 while (len--) {
161 out[n] = in[n] ^ ecount_buf[n];
162 ++n;
163 }
164 }
165 *num = n;
166 return;
167 } while (0);
161 /* the rest would be commonly eliminated by x86* compiler */ 168 /* the rest would be commonly eliminated by x86* compiler */
162#endif 169#endif
163 while (l<len) { 170 while (l < len) {
164 if (n==0) { 171 if (n == 0) {
165 (*block)(ivec, ecount_buf, key); 172 (*block)(ivec, ecount_buf, key);
166 ctr128_inc(ivec); 173 ctr128_inc(ivec);
167 } 174 }
168 out[l] = in[l] ^ ecount_buf[n]; 175 out[l] = in[l] ^ ecount_buf[n];
169 ++l; 176 ++l;
170 n = (n+1) % 16; 177 n = (n + 1) % 16;
171 } 178 }
172 179
173 *num=n; 180 *num = n;
174} 181}
175 182
176/* increment upper 96 bits of 128-bit counter by 1 */ 183/* increment upper 96 bits of 128-bit counter by 1 */
177static void ctr96_inc(unsigned char *counter) { 184static void
178 u32 n=12; 185ctr96_inc(unsigned char *counter)
186{
187 u32 n = 12;
179 u8 c; 188 u8 c;
180 189
181 do { 190 do {
@@ -183,16 +192,18 @@ static void ctr96_inc(unsigned char *counter) {
183 c = counter[n]; 192 c = counter[n];
184 ++c; 193 ++c;
185 counter[n] = c; 194 counter[n] = c;
186 if (c) return; 195 if (c)
196 return;
187 } while (n); 197 } while (n);
188} 198}
189 199
190void CRYPTO_ctr128_encrypt_ctr32(const unsigned char *in, unsigned char *out, 200void
191 size_t len, const void *key, 201CRYPTO_ctr128_encrypt_ctr32(const unsigned char *in, unsigned char *out,
192 unsigned char ivec[16], unsigned char ecount_buf[16], 202 size_t len, const void *key,
193 unsigned int *num, ctr128_f func) 203 unsigned char ivec[16], unsigned char ecount_buf[16],
204 unsigned int *num, ctr128_f func)
194{ 205{
195 unsigned int n,ctr32; 206 unsigned int n, ctr32;
196 207
197 assert(*num < 16); 208 assert(*num < 16);
198 209
@@ -201,19 +212,20 @@ void CRYPTO_ctr128_encrypt_ctr32(const unsigned char *in, unsigned char *out,
201 while (n && len) { 212 while (n && len) {
202 *(out++) = *(in++) ^ ecount_buf[n]; 213 *(out++) = *(in++) ^ ecount_buf[n];
203 --len; 214 --len;
204 n = (n+1) % 16; 215 n = (n + 1) % 16;
205 } 216 }
206 217
207 ctr32 = GETU32(ivec+12); 218 ctr32 = GETU32(ivec + 12);
208 while (len>=16) { 219 while (len >= 16) {
209 size_t blocks = len/16; 220 size_t blocks = len/16;
210 /* 221 /*
211 * 1<<28 is just a not-so-small yet not-so-large number... 222 * 1<<28 is just a not-so-small yet not-so-large number...
212 * Below condition is practically never met, but it has to 223 * Below condition is practically never met, but it has to
213 * be checked for code correctness. 224 * be checked for code correctness.
214 */ 225 */
215 if (sizeof(size_t)>sizeof(unsigned int) && blocks>(1U<<28)) 226 if (sizeof(size_t) > sizeof(unsigned int) &&
216 blocks = (1U<<28); 227 blocks > (1U << 28))
228 blocks = (1U << 28);
217 /* 229 /*
218 * As (*func) operates on 32-bit counter, caller 230 * As (*func) operates on 32-bit counter, caller
219 * has to handle overflow. 'if' below detects the 231 * has to handle overflow. 'if' below detects the
@@ -223,29 +235,31 @@ void CRYPTO_ctr128_encrypt_ctr32(const unsigned char *in, unsigned char *out,
223 ctr32 += (u32)blocks; 235 ctr32 += (u32)blocks;
224 if (ctr32 < blocks) { 236 if (ctr32 < blocks) {
225 blocks -= ctr32; 237 blocks -= ctr32;
226 ctr32 = 0; 238 ctr32 = 0;
227 } 239 }
228 (*func)(in,out,blocks,key,ivec); 240 (*func)(in, out, blocks, key, ivec);
229 /* (*ctr) does not update ivec, caller does: */ 241 /* (*ctr) does not update ivec, caller does: */
230 PUTU32(ivec+12,ctr32); 242 PUTU32(ivec + 12, ctr32);
231 /* ... overflow was detected, propagate carry. */ 243 /* ... overflow was detected, propagate carry. */
232 if (ctr32 == 0) ctr96_inc(ivec); 244 if (ctr32 == 0)
245 ctr96_inc(ivec);
233 blocks *= 16; 246 blocks *= 16;
234 len -= blocks; 247 len -= blocks;
235 out += blocks; 248 out += blocks;
236 in += blocks; 249 in += blocks;
237 } 250 }
238 if (len) { 251 if (len) {
239 memset(ecount_buf,0,16); 252 memset(ecount_buf, 0, 16);
240 (*func)(ecount_buf,ecount_buf,1,key,ivec); 253 (*func)(ecount_buf, ecount_buf, 1, key, ivec);
241 ++ctr32; 254 ++ctr32;
242 PUTU32(ivec+12,ctr32); 255 PUTU32(ivec + 12, ctr32);
243 if (ctr32 == 0) ctr96_inc(ivec); 256 if (ctr32 == 0)
257 ctr96_inc(ivec);
244 while (len--) { 258 while (len--) {
245 out[n] = in[n] ^ ecount_buf[n]; 259 out[n] = in[n] ^ ecount_buf[n];
246 ++n; 260 ++n;
247 } 261 }
248 } 262 }
249 263
250 *num=n; 264 *num = n;
251} 265}
diff --git a/src/lib/libcrypto/modes/gcm128.c b/src/lib/libcrypto/modes/gcm128.c
index 36aac413c3..45d33db768 100644
--- a/src/lib/libcrypto/modes/gcm128.c
+++ b/src/lib/libcrypto/modes/gcm128.c
@@ -1,4 +1,4 @@
1/* $OpenBSD: gcm128.c,v 1.23 2022/11/26 16:08:53 tb Exp $ */ 1/* $OpenBSD: gcm128.c,v 1.24 2023/07/08 14:55:36 beck Exp $ */
2/* ==================================================================== 2/* ====================================================================
3 * Copyright (c) 2010 The OpenSSL Project. All rights reserved. 3 * Copyright (c) 2010 The OpenSSL Project. All rights reserved.
4 * 4 *
@@ -69,17 +69,17 @@
69#endif 69#endif
70 70
71#define PACK(s) ((size_t)(s)<<(sizeof(size_t)*8-16)) 71#define PACK(s) ((size_t)(s)<<(sizeof(size_t)*8-16))
72#define REDUCE1BIT(V) \ 72#define REDUCE1BIT(V) \
73 do { \ 73 do { \
74 if (sizeof(size_t)==8) { \ 74 if (sizeof(size_t)==8) { \
75 u64 T = U64(0xe100000000000000) & (0-(V.lo&1)); \ 75 u64 T = U64(0xe100000000000000) & (0-(V.lo&1)); \
76 V.lo = (V.hi<<63)|(V.lo>>1); \ 76 V.lo = (V.hi<<63)|(V.lo>>1); \
77 V.hi = (V.hi>>1 )^T; \ 77 V.hi = (V.hi>>1 )^T; \
78 } else { \ 78 } else { \
79 u32 T = 0xe1000000U & (0-(u32)(V.lo&1)); \ 79 u32 T = 0xe1000000U & (0-(u32)(V.lo&1)); \
80 V.lo = (V.hi<<63)|(V.lo>>1); \ 80 V.lo = (V.hi<<63)|(V.lo>>1); \
81 V.hi = (V.hi>>1 )^((u64)T<<32); \ 81 V.hi = (V.hi>>1 )^((u64)T<<32); \
82 } \ 82 } \
83 } while(0) 83 } while(0)
84 84
85/* 85/*
@@ -118,7 +118,8 @@
118 */ 118 */
119#if TABLE_BITS==8 119#if TABLE_BITS==8
120 120
121static void gcm_init_8bit(u128 Htable[256], u64 H[2]) 121static void
122gcm_init_8bit(u128 Htable[256], u64 H[2])
122{ 123{
123 int i, j; 124 int i, j;
124 u128 V; 125 u128 V;
@@ -128,24 +129,25 @@ static void gcm_init_8bit(u128 Htable[256], u64 H[2])
128 V.hi = H[0]; 129 V.hi = H[0];
129 V.lo = H[1]; 130 V.lo = H[1];
130 131
131 for (Htable[128]=V, i=64; i>0; i>>=1) { 132 for (Htable[128] = V, i = 64; i > 0; i >>= 1) {
132 REDUCE1BIT(V); 133 REDUCE1BIT(V);
133 Htable[i] = V; 134 Htable[i] = V;
134 } 135 }
135 136
136 for (i=2; i<256; i<<=1) { 137 for (i = 2; i < 256; i <<= 1) {
137 u128 *Hi = Htable+i, H0 = *Hi; 138 u128 *Hi = Htable + i, H0 = *Hi;
138 for (j=1; j<i; ++j) { 139 for (j = 1; j < i; ++j) {
139 Hi[j].hi = H0.hi^Htable[j].hi; 140 Hi[j].hi = H0.hi ^ Htable[j].hi;
140 Hi[j].lo = H0.lo^Htable[j].lo; 141 Hi[j].lo = H0.lo ^ Htable[j].lo;
141 } 142 }
142 } 143 }
143} 144}
144 145
145static void gcm_gmult_8bit(u64 Xi[2], const u128 Htable[256]) 146static void
147gcm_gmult_8bit(u64 Xi[2], const u128 Htable[256])
146{ 148{
147 u128 Z = { 0, 0}; 149 u128 Z = { 0, 0};
148 const u8 *xi = (const u8 *)Xi+15; 150 const u8 *xi = (const u8 *)Xi + 15;
149 size_t rem, n = *xi; 151 size_t rem, n = *xi;
150 static const size_t rem_8bit[256] = { 152 static const size_t rem_8bit[256] = {
151 PACK(0x0000), PACK(0x01C2), PACK(0x0384), PACK(0x0246), 153 PACK(0x0000), PACK(0x01C2), PACK(0x0384), PACK(0x0246),
@@ -217,17 +219,18 @@ static void gcm_gmult_8bit(u64 Xi[2], const u128 Htable[256])
217 Z.hi ^= Htable[n].hi; 219 Z.hi ^= Htable[n].hi;
218 Z.lo ^= Htable[n].lo; 220 Z.lo ^= Htable[n].lo;
219 221
220 if ((u8 *)Xi==xi) break; 222 if ((u8 *)Xi == xi)
223 break;
221 224
222 n = *(--xi); 225 n = *(--xi);
223 226
224 rem = (size_t)Z.lo&0xff; 227 rem = (size_t)Z.lo & 0xff;
225 Z.lo = (Z.hi<<56)|(Z.lo>>8); 228 Z.lo = (Z.hi << 56)|(Z.lo >> 8);
226 Z.hi = (Z.hi>>8); 229 Z.hi = (Z.hi >> 8);
227#if SIZE_MAX == 0xffffffffffffffff 230#if SIZE_MAX == 0xffffffffffffffff
228 Z.hi ^= rem_8bit[rem]; 231 Z.hi ^= rem_8bit[rem];
229#else 232#else
230 Z.hi ^= (u64)rem_8bit[rem]<<32; 233 Z.hi ^= (u64)rem_8bit[rem] << 32;
231#endif 234#endif
232 } 235 }
233 236
@@ -238,10 +241,14 @@ static void gcm_gmult_8bit(u64 Xi[2], const u128 Htable[256])
238#else 241#else
239 u8 *p = (u8 *)Xi; 242 u8 *p = (u8 *)Xi;
240 u32 v; 243 u32 v;
241 v = (u32)(Z.hi>>32); PUTU32(p,v); 244 v = (u32)(Z.hi >> 32);
242 v = (u32)(Z.hi); PUTU32(p+4,v); 245 PUTU32(p, v);
243 v = (u32)(Z.lo>>32); PUTU32(p+8,v); 246 v = (u32)(Z.hi);
244 v = (u32)(Z.lo); PUTU32(p+12,v); 247 PUTU32(p + 4, v);
248 v = (u32)(Z.lo >> 32);
249 PUTU32(p + 8, v);
250 v = (u32)(Z.lo);
251 PUTU32(p + 12, v);
245#endif 252#endif
246#else /* BIG_ENDIAN */ 253#else /* BIG_ENDIAN */
247 Xi[0] = Z.hi; 254 Xi[0] = Z.hi;
@@ -252,7 +259,8 @@ static void gcm_gmult_8bit(u64 Xi[2], const u128 Htable[256])
252 259
253#elif TABLE_BITS==4 260#elif TABLE_BITS==4
254 261
255static void gcm_init_4bit(u128 Htable[16], u64 H[2]) 262static void
263gcm_init_4bit(u128 Htable[16], u64 H[2])
256{ 264{
257 u128 V; 265 u128 V;
258#if defined(OPENSSL_SMALL_FOOTPRINT) 266#if defined(OPENSSL_SMALL_FOOTPRINT)
@@ -265,17 +273,17 @@ static void gcm_init_4bit(u128 Htable[16], u64 H[2])
265 V.lo = H[1]; 273 V.lo = H[1];
266 274
267#if defined(OPENSSL_SMALL_FOOTPRINT) 275#if defined(OPENSSL_SMALL_FOOTPRINT)
268 for (Htable[8]=V, i=4; i>0; i>>=1) { 276 for (Htable[8] = V, i = 4; i > 0; i >>= 1) {
269 REDUCE1BIT(V); 277 REDUCE1BIT(V);
270 Htable[i] = V; 278 Htable[i] = V;
271 } 279 }
272 280
273 for (i=2; i<16; i<<=1) { 281 for (i = 2; i < 16; i <<= 1) {
274 u128 *Hi = Htable+i; 282 u128 *Hi = Htable + i;
275 int j; 283 int j;
276 for (V=*Hi, j=1; j<i; ++j) { 284 for (V = *Hi, j = 1; j < i; ++j) {
277 Hi[j].hi = V.hi^Htable[j].hi; 285 Hi[j].hi = V.hi ^ Htable[j].hi;
278 Hi[j].lo = V.lo^Htable[j].lo; 286 Hi[j].lo = V.lo ^ Htable[j].lo;
279 } 287 }
280 } 288 }
281#else 289#else
@@ -286,19 +294,25 @@ static void gcm_init_4bit(u128 Htable[16], u64 H[2])
286 Htable[2] = V; 294 Htable[2] = V;
287 REDUCE1BIT(V); 295 REDUCE1BIT(V);
288 Htable[1] = V; 296 Htable[1] = V;
289 Htable[3].hi = V.hi^Htable[2].hi, Htable[3].lo = V.lo^Htable[2].lo; 297 Htable[3].hi = V.hi ^ Htable[2].hi, Htable[3].lo = V.lo ^ Htable[2].lo;
290 V=Htable[4]; 298 V = Htable[4];
291 Htable[5].hi = V.hi^Htable[1].hi, Htable[5].lo = V.lo^Htable[1].lo; 299 Htable[5].hi = V.hi ^ Htable[1].hi, Htable[5].lo = V.lo ^ Htable[1].lo;
292 Htable[6].hi = V.hi^Htable[2].hi, Htable[6].lo = V.lo^Htable[2].lo; 300 Htable[6].hi = V.hi ^ Htable[2].hi, Htable[6].lo = V.lo ^ Htable[2].lo;
293 Htable[7].hi = V.hi^Htable[3].hi, Htable[7].lo = V.lo^Htable[3].lo; 301 Htable[7].hi = V.hi ^ Htable[3].hi, Htable[7].lo = V.lo ^ Htable[3].lo;
294 V=Htable[8]; 302 V = Htable[8];
295 Htable[9].hi = V.hi^Htable[1].hi, Htable[9].lo = V.lo^Htable[1].lo; 303 Htable[9].hi = V.hi ^ Htable[1].hi, Htable[9].lo = V.lo ^ Htable[1].lo;
296 Htable[10].hi = V.hi^Htable[2].hi, Htable[10].lo = V.lo^Htable[2].lo; 304 Htable[10].hi = V.hi ^ Htable[2].hi,
297 Htable[11].hi = V.hi^Htable[3].hi, Htable[11].lo = V.lo^Htable[3].lo; 305 Htable[10].lo = V.lo ^ Htable[2].lo;
298 Htable[12].hi = V.hi^Htable[4].hi, Htable[12].lo = V.lo^Htable[4].lo; 306 Htable[11].hi = V.hi ^ Htable[3].hi,
299 Htable[13].hi = V.hi^Htable[5].hi, Htable[13].lo = V.lo^Htable[5].lo; 307 Htable[11].lo = V.lo ^ Htable[3].lo;
300 Htable[14].hi = V.hi^Htable[6].hi, Htable[14].lo = V.lo^Htable[6].lo; 308 Htable[12].hi = V.hi ^ Htable[4].hi,
301 Htable[15].hi = V.hi^Htable[7].hi, Htable[15].lo = V.lo^Htable[7].lo; 309 Htable[12].lo = V.lo ^ Htable[4].lo;
310 Htable[13].hi = V.hi ^ Htable[5].hi,
311 Htable[13].lo = V.lo ^ Htable[5].lo;
312 Htable[14].hi = V.hi ^ Htable[6].hi,
313 Htable[14].lo = V.lo ^ Htable[6].lo;
314 Htable[15].hi = V.hi ^ Htable[7].hi,
315 Htable[15].lo = V.lo ^ Htable[7].lo;
302#endif 316#endif
303#if defined(GHASH_ASM) && (defined(__arm__) || defined(__arm)) 317#if defined(GHASH_ASM) && (defined(__arm__) || defined(__arm))
304 /* 318 /*
@@ -307,16 +321,16 @@ static void gcm_init_4bit(u128 Htable[16], u64 H[2])
307 { 321 {
308 int j; 322 int j;
309#if BYTE_ORDER == LITTLE_ENDIAN 323#if BYTE_ORDER == LITTLE_ENDIAN
310 for (j=0;j<16;++j) { 324 for (j = 0; j < 16; ++j) {
311 V = Htable[j]; 325 V = Htable[j];
312 Htable[j].hi = V.lo; 326 Htable[j].hi = V.lo;
313 Htable[j].lo = V.hi; 327 Htable[j].lo = V.hi;
314 } 328 }
315#else /* BIG_ENDIAN */ 329#else /* BIG_ENDIAN */
316 for (j=0;j<16;++j) { 330 for (j = 0; j < 16; ++j) {
317 V = Htable[j]; 331 V = Htable[j];
318 Htable[j].hi = V.lo<<32|V.lo>>32; 332 Htable[j].hi = V.lo << 32|V.lo >> 32;
319 Htable[j].lo = V.hi<<32|V.hi>>32; 333 Htable[j].lo = V.hi << 32|V.hi >> 32;
320 } 334 }
321#endif 335#endif
322 } 336 }
@@ -330,44 +344,46 @@ static const size_t rem_4bit[16] = {
330 PACK(0xE100), PACK(0xFD20), PACK(0xD940), PACK(0xC560), 344 PACK(0xE100), PACK(0xFD20), PACK(0xD940), PACK(0xC560),
331 PACK(0x9180), PACK(0x8DA0), PACK(0xA9C0), PACK(0xB5E0) }; 345 PACK(0x9180), PACK(0x8DA0), PACK(0xA9C0), PACK(0xB5E0) };
332 346
333static void gcm_gmult_4bit(u64 Xi[2], const u128 Htable[16]) 347static void
348gcm_gmult_4bit(u64 Xi[2], const u128 Htable[16])
334{ 349{
335 u128 Z; 350 u128 Z;
336 int cnt = 15; 351 int cnt = 15;
337 size_t rem, nlo, nhi; 352 size_t rem, nlo, nhi;
338 353
339 nlo = ((const u8 *)Xi)[15]; 354 nlo = ((const u8 *)Xi)[15];
340 nhi = nlo>>4; 355 nhi = nlo >> 4;
341 nlo &= 0xf; 356 nlo &= 0xf;
342 357
343 Z.hi = Htable[nlo].hi; 358 Z.hi = Htable[nlo].hi;
344 Z.lo = Htable[nlo].lo; 359 Z.lo = Htable[nlo].lo;
345 360
346 while (1) { 361 while (1) {
347 rem = (size_t)Z.lo&0xf; 362 rem = (size_t)Z.lo & 0xf;
348 Z.lo = (Z.hi<<60)|(Z.lo>>4); 363 Z.lo = (Z.hi << 60)|(Z.lo >> 4);
349 Z.hi = (Z.hi>>4); 364 Z.hi = (Z.hi >> 4);
350#if SIZE_MAX == 0xffffffffffffffff 365#if SIZE_MAX == 0xffffffffffffffff
351 Z.hi ^= rem_4bit[rem]; 366 Z.hi ^= rem_4bit[rem];
352#else 367#else
353 Z.hi ^= (u64)rem_4bit[rem]<<32; 368 Z.hi ^= (u64)rem_4bit[rem] << 32;
354#endif 369#endif
355 Z.hi ^= Htable[nhi].hi; 370 Z.hi ^= Htable[nhi].hi;
356 Z.lo ^= Htable[nhi].lo; 371 Z.lo ^= Htable[nhi].lo;
357 372
358 if (--cnt<0) break; 373 if (--cnt < 0)
374 break;
359 375
360 nlo = ((const u8 *)Xi)[cnt]; 376 nlo = ((const u8 *)Xi)[cnt];
361 nhi = nlo>>4; 377 nhi = nlo >> 4;
362 nlo &= 0xf; 378 nlo &= 0xf;
363 379
364 rem = (size_t)Z.lo&0xf; 380 rem = (size_t)Z.lo & 0xf;
365 Z.lo = (Z.hi<<60)|(Z.lo>>4); 381 Z.lo = (Z.hi << 60)|(Z.lo >> 4);
366 Z.hi = (Z.hi>>4); 382 Z.hi = (Z.hi >> 4);
367#if SIZE_MAX == 0xffffffffffffffff 383#if SIZE_MAX == 0xffffffffffffffff
368 Z.hi ^= rem_4bit[rem]; 384 Z.hi ^= rem_4bit[rem];
369#else 385#else
370 Z.hi ^= (u64)rem_4bit[rem]<<32; 386 Z.hi ^= (u64)rem_4bit[rem] << 32;
371#endif 387#endif
372 Z.hi ^= Htable[nlo].hi; 388 Z.hi ^= Htable[nlo].hi;
373 Z.lo ^= Htable[nlo].lo; 389 Z.lo ^= Htable[nlo].lo;
@@ -380,10 +396,14 @@ static void gcm_gmult_4bit(u64 Xi[2], const u128 Htable[16])
380#else 396#else
381 u8 *p = (u8 *)Xi; 397 u8 *p = (u8 *)Xi;
382 u32 v; 398 u32 v;
383 v = (u32)(Z.hi>>32); PUTU32(p,v); 399 v = (u32)(Z.hi >> 32);
384 v = (u32)(Z.hi); PUTU32(p+4,v); 400 PUTU32(p, v);
385 v = (u32)(Z.lo>>32); PUTU32(p+8,v); 401 v = (u32)(Z.hi);
386 v = (u32)(Z.lo); PUTU32(p+12,v); 402 PUTU32(p + 4, v);
403 v = (u32)(Z.lo >> 32);
404 PUTU32(p + 8, v);
405 v = (u32)(Z.lo);
406 PUTU32(p + 12, v);
387#endif 407#endif
388#else /* BIG_ENDIAN */ 408#else /* BIG_ENDIAN */
389 Xi[0] = Z.hi; 409 Xi[0] = Z.hi;
@@ -399,54 +419,56 @@ static void gcm_gmult_4bit(u64 Xi[2], const u128 Htable[16])
399 * mostly as reference and a placeholder for possible future 419 * mostly as reference and a placeholder for possible future
400 * non-trivial optimization[s]... 420 * non-trivial optimization[s]...
401 */ 421 */
402static void gcm_ghash_4bit(u64 Xi[2],const u128 Htable[16], 422static void
403 const u8 *inp,size_t len) 423gcm_ghash_4bit(u64 Xi[2], const u128 Htable[16],
424 const u8 *inp, size_t len)
404{ 425{
405 u128 Z; 426 u128 Z;
406 int cnt; 427 int cnt;
407 size_t rem, nlo, nhi; 428 size_t rem, nlo, nhi;
408 429
409#if 1 430#if 1
410 do { 431 do {
411 cnt = 15; 432 cnt = 15;
412 nlo = ((const u8 *)Xi)[15]; 433 nlo = ((const u8 *)Xi)[15];
413 nlo ^= inp[15]; 434 nlo ^= inp[15];
414 nhi = nlo>>4; 435 nhi = nlo >> 4;
415 nlo &= 0xf; 436 nlo &= 0xf;
416 437
417 Z.hi = Htable[nlo].hi; 438 Z.hi = Htable[nlo].hi;
418 Z.lo = Htable[nlo].lo; 439 Z.lo = Htable[nlo].lo;
419 440
420 while (1) { 441 while (1) {
421 rem = (size_t)Z.lo&0xf; 442 rem = (size_t)Z.lo & 0xf;
422 Z.lo = (Z.hi<<60)|(Z.lo>>4); 443 Z.lo = (Z.hi << 60)|(Z.lo >> 4);
423 Z.hi = (Z.hi>>4); 444 Z.hi = (Z.hi >> 4);
424#if SIZE_MAX == 0xffffffffffffffff 445#if SIZE_MAX == 0xffffffffffffffff
425 Z.hi ^= rem_4bit[rem]; 446 Z.hi ^= rem_4bit[rem];
426#else 447#else
427 Z.hi ^= (u64)rem_4bit[rem]<<32; 448 Z.hi ^= (u64)rem_4bit[rem] << 32;
428#endif 449#endif
429 Z.hi ^= Htable[nhi].hi; 450 Z.hi ^= Htable[nhi].hi;
430 Z.lo ^= Htable[nhi].lo; 451 Z.lo ^= Htable[nhi].lo;
431 452
432 if (--cnt<0) break; 453 if (--cnt < 0)
454 break;
433 455
434 nlo = ((const u8 *)Xi)[cnt]; 456 nlo = ((const u8 *)Xi)[cnt];
435 nlo ^= inp[cnt]; 457 nlo ^= inp[cnt];
436 nhi = nlo>>4; 458 nhi = nlo >> 4;
437 nlo &= 0xf; 459 nlo &= 0xf;
438 460
439 rem = (size_t)Z.lo&0xf; 461 rem = (size_t)Z.lo & 0xf;
440 Z.lo = (Z.hi<<60)|(Z.lo>>4); 462 Z.lo = (Z.hi << 60)|(Z.lo >> 4);
441 Z.hi = (Z.hi>>4); 463 Z.hi = (Z.hi >> 4);
442#if SIZE_MAX == 0xffffffffffffffff 464#if SIZE_MAX == 0xffffffffffffffff
443 Z.hi ^= rem_4bit[rem]; 465 Z.hi ^= rem_4bit[rem];
444#else 466#else
445 Z.hi ^= (u64)rem_4bit[rem]<<32; 467 Z.hi ^= (u64)rem_4bit[rem] << 32;
446#endif 468#endif
447 Z.hi ^= Htable[nlo].hi; 469 Z.hi ^= Htable[nlo].hi;
448 Z.lo ^= Htable[nlo].lo; 470 Z.lo ^= Htable[nlo].lo;
449 } 471 }
450#else 472#else
451 /* 473 /*
452 * Extra 256+16 bytes per-key plus 512 bytes shared tables 474 * Extra 256+16 bytes per-key plus 512 bytes shared tables
@@ -454,115 +476,120 @@ static void gcm_ghash_4bit(u64 Xi[2],const u128 Htable[16],
454 * the rem_8bit even here, but the priority is to minimize 476 * the rem_8bit even here, but the priority is to minimize
455 * cache footprint... 477 * cache footprint...
456 */ 478 */
457 u128 Hshr4[16]; /* Htable shifted right by 4 bits */ 479 u128 Hshr4[16]; /* Htable shifted right by 4 bits */
458 u8 Hshl4[16]; /* Htable shifted left by 4 bits */ 480 u8 Hshl4[16]; /* Htable shifted left by 4 bits */
459 static const unsigned short rem_8bit[256] = { 481 static const unsigned short rem_8bit[256] = {
460 0x0000, 0x01C2, 0x0384, 0x0246, 0x0708, 0x06CA, 0x048C, 0x054E, 482 0x0000, 0x01C2, 0x0384, 0x0246, 0x0708, 0x06CA, 0x048C, 0x054E,
461 0x0E10, 0x0FD2, 0x0D94, 0x0C56, 0x0918, 0x08DA, 0x0A9C, 0x0B5E, 483 0x0E10, 0x0FD2, 0x0D94, 0x0C56, 0x0918, 0x08DA, 0x0A9C, 0x0B5E,
462 0x1C20, 0x1DE2, 0x1FA4, 0x1E66, 0x1B28, 0x1AEA, 0x18AC, 0x196E, 484 0x1C20, 0x1DE2, 0x1FA4, 0x1E66, 0x1B28, 0x1AEA, 0x18AC, 0x196E,
463 0x1230, 0x13F2, 0x11B4, 0x1076, 0x1538, 0x14FA, 0x16BC, 0x177E, 485 0x1230, 0x13F2, 0x11B4, 0x1076, 0x1538, 0x14FA, 0x16BC, 0x177E,
464 0x3840, 0x3982, 0x3BC4, 0x3A06, 0x3F48, 0x3E8A, 0x3CCC, 0x3D0E, 486 0x3840, 0x3982, 0x3BC4, 0x3A06, 0x3F48, 0x3E8A, 0x3CCC, 0x3D0E,
465 0x3650, 0x3792, 0x35D4, 0x3416, 0x3158, 0x309A, 0x32DC, 0x331E, 487 0x3650, 0x3792, 0x35D4, 0x3416, 0x3158, 0x309A, 0x32DC, 0x331E,
466 0x2460, 0x25A2, 0x27E4, 0x2626, 0x2368, 0x22AA, 0x20EC, 0x212E, 488 0x2460, 0x25A2, 0x27E4, 0x2626, 0x2368, 0x22AA, 0x20EC, 0x212E,
467 0x2A70, 0x2BB2, 0x29F4, 0x2836, 0x2D78, 0x2CBA, 0x2EFC, 0x2F3E, 489 0x2A70, 0x2BB2, 0x29F4, 0x2836, 0x2D78, 0x2CBA, 0x2EFC, 0x2F3E,
468 0x7080, 0x7142, 0x7304, 0x72C6, 0x7788, 0x764A, 0x740C, 0x75CE, 490 0x7080, 0x7142, 0x7304, 0x72C6, 0x7788, 0x764A, 0x740C, 0x75CE,
469 0x7E90, 0x7F52, 0x7D14, 0x7CD6, 0x7998, 0x785A, 0x7A1C, 0x7BDE, 491 0x7E90, 0x7F52, 0x7D14, 0x7CD6, 0x7998, 0x785A, 0x7A1C, 0x7BDE,
470 0x6CA0, 0x6D62, 0x6F24, 0x6EE6, 0x6BA8, 0x6A6A, 0x682C, 0x69EE, 492 0x6CA0, 0x6D62, 0x6F24, 0x6EE6, 0x6BA8, 0x6A6A, 0x682C, 0x69EE,
471 0x62B0, 0x6372, 0x6134, 0x60F6, 0x65B8, 0x647A, 0x663C, 0x67FE, 493 0x62B0, 0x6372, 0x6134, 0x60F6, 0x65B8, 0x647A, 0x663C, 0x67FE,
472 0x48C0, 0x4902, 0x4B44, 0x4A86, 0x4FC8, 0x4E0A, 0x4C4C, 0x4D8E, 494 0x48C0, 0x4902, 0x4B44, 0x4A86, 0x4FC8, 0x4E0A, 0x4C4C, 0x4D8E,
473 0x46D0, 0x4712, 0x4554, 0x4496, 0x41D8, 0x401A, 0x425C, 0x439E, 495 0x46D0, 0x4712, 0x4554, 0x4496, 0x41D8, 0x401A, 0x425C, 0x439E,
474 0x54E0, 0x5522, 0x5764, 0x56A6, 0x53E8, 0x522A, 0x506C, 0x51AE, 496 0x54E0, 0x5522, 0x5764, 0x56A6, 0x53E8, 0x522A, 0x506C, 0x51AE,
475 0x5AF0, 0x5B32, 0x5974, 0x58B6, 0x5DF8, 0x5C3A, 0x5E7C, 0x5FBE, 497 0x5AF0, 0x5B32, 0x5974, 0x58B6, 0x5DF8, 0x5C3A, 0x5E7C, 0x5FBE,
476 0xE100, 0xE0C2, 0xE284, 0xE346, 0xE608, 0xE7CA, 0xE58C, 0xE44E, 498 0xE100, 0xE0C2, 0xE284, 0xE346, 0xE608, 0xE7CA, 0xE58C, 0xE44E,
477 0xEF10, 0xEED2, 0xEC94, 0xED56, 0xE818, 0xE9DA, 0xEB9C, 0xEA5E, 499 0xEF10, 0xEED2, 0xEC94, 0xED56, 0xE818, 0xE9DA, 0xEB9C, 0xEA5E,
478 0xFD20, 0xFCE2, 0xFEA4, 0xFF66, 0xFA28, 0xFBEA, 0xF9AC, 0xF86E, 500 0xFD20, 0xFCE2, 0xFEA4, 0xFF66, 0xFA28, 0xFBEA, 0xF9AC, 0xF86E,
479 0xF330, 0xF2F2, 0xF0B4, 0xF176, 0xF438, 0xF5FA, 0xF7BC, 0xF67E, 501 0xF330, 0xF2F2, 0xF0B4, 0xF176, 0xF438, 0xF5FA, 0xF7BC, 0xF67E,
480 0xD940, 0xD882, 0xDAC4, 0xDB06, 0xDE48, 0xDF8A, 0xDDCC, 0xDC0E, 502 0xD940, 0xD882, 0xDAC4, 0xDB06, 0xDE48, 0xDF8A, 0xDDCC, 0xDC0E,
481 0xD750, 0xD692, 0xD4D4, 0xD516, 0xD058, 0xD19A, 0xD3DC, 0xD21E, 503 0xD750, 0xD692, 0xD4D4, 0xD516, 0xD058, 0xD19A, 0xD3DC, 0xD21E,
482 0xC560, 0xC4A2, 0xC6E4, 0xC726, 0xC268, 0xC3AA, 0xC1EC, 0xC02E, 504 0xC560, 0xC4A2, 0xC6E4, 0xC726, 0xC268, 0xC3AA, 0xC1EC, 0xC02E,
483 0xCB70, 0xCAB2, 0xC8F4, 0xC936, 0xCC78, 0xCDBA, 0xCFFC, 0xCE3E, 505 0xCB70, 0xCAB2, 0xC8F4, 0xC936, 0xCC78, 0xCDBA, 0xCFFC, 0xCE3E,
484 0x9180, 0x9042, 0x9204, 0x93C6, 0x9688, 0x974A, 0x950C, 0x94CE, 506 0x9180, 0x9042, 0x9204, 0x93C6, 0x9688, 0x974A, 0x950C, 0x94CE,
485 0x9F90, 0x9E52, 0x9C14, 0x9DD6, 0x9898, 0x995A, 0x9B1C, 0x9ADE, 507 0x9F90, 0x9E52, 0x9C14, 0x9DD6, 0x9898, 0x995A, 0x9B1C, 0x9ADE,
486 0x8DA0, 0x8C62, 0x8E24, 0x8FE6, 0x8AA8, 0x8B6A, 0x892C, 0x88EE, 508 0x8DA0, 0x8C62, 0x8E24, 0x8FE6, 0x8AA8, 0x8B6A, 0x892C, 0x88EE,
487 0x83B0, 0x8272, 0x8034, 0x81F6, 0x84B8, 0x857A, 0x873C, 0x86FE, 509 0x83B0, 0x8272, 0x8034, 0x81F6, 0x84B8, 0x857A, 0x873C, 0x86FE,
488 0xA9C0, 0xA802, 0xAA44, 0xAB86, 0xAEC8, 0xAF0A, 0xAD4C, 0xAC8E, 510 0xA9C0, 0xA802, 0xAA44, 0xAB86, 0xAEC8, 0xAF0A, 0xAD4C, 0xAC8E,
489 0xA7D0, 0xA612, 0xA454, 0xA596, 0xA0D8, 0xA11A, 0xA35C, 0xA29E, 511 0xA7D0, 0xA612, 0xA454, 0xA596, 0xA0D8, 0xA11A, 0xA35C, 0xA29E,
490 0xB5E0, 0xB422, 0xB664, 0xB7A6, 0xB2E8, 0xB32A, 0xB16C, 0xB0AE, 512 0xB5E0, 0xB422, 0xB664, 0xB7A6, 0xB2E8, 0xB32A, 0xB16C, 0xB0AE,
491 0xBBF0, 0xBA32, 0xB874, 0xB9B6, 0xBCF8, 0xBD3A, 0xBF7C, 0xBEBE }; 513 0xBBF0, 0xBA32, 0xB874, 0xB9B6, 0xBCF8, 0xBD3A, 0xBF7C, 0xBEBE };
492 /* 514 /*
493 * This pre-processing phase slows down procedure by approximately 515 * This pre-processing phase slows down procedure by approximately
494 * same time as it makes each loop spin faster. In other words 516 * same time as it makes each loop spin faster. In other words
495 * single block performance is approximately same as straightforward 517 * single block performance is approximately same as straightforward
496 * "4-bit" implementation, and then it goes only faster... 518 * "4-bit" implementation, and then it goes only faster...
497 */ 519 */
498 for (cnt=0; cnt<16; ++cnt) { 520 for (cnt = 0; cnt < 16; ++cnt) {
499 Z.hi = Htable[cnt].hi; 521 Z.hi = Htable[cnt].hi;
500 Z.lo = Htable[cnt].lo; 522 Z.lo = Htable[cnt].lo;
501 Hshr4[cnt].lo = (Z.hi<<60)|(Z.lo>>4); 523 Hshr4[cnt].lo = (Z.hi << 60)|(Z.lo >> 4);
502 Hshr4[cnt].hi = (Z.hi>>4); 524 Hshr4[cnt].hi = (Z.hi >> 4);
503 Hshl4[cnt] = (u8)(Z.lo<<4); 525 Hshl4[cnt] = (u8)(Z.lo << 4);
504 } 526 }
505
506 do {
507 for (Z.lo=0, Z.hi=0, cnt=15; cnt; --cnt) {
508 nlo = ((const u8 *)Xi)[cnt];
509 nlo ^= inp[cnt];
510 nhi = nlo>>4;
511 nlo &= 0xf;
512 527
513 Z.hi ^= Htable[nlo].hi; 528 do {
514 Z.lo ^= Htable[nlo].lo; 529 for (Z.lo = 0, Z.hi = 0, cnt = 15; cnt; --cnt) {
530 nlo = ((const u8 *)Xi)[cnt];
531 nlo ^= inp[cnt];
532 nhi = nlo >> 4;
533 nlo &= 0xf;
515 534
516 rem = (size_t)Z.lo&0xff; 535 Z.hi ^= Htable[nlo].hi;
536 Z.lo ^= Htable[nlo].lo;
517 537
518 Z.lo = (Z.hi<<56)|(Z.lo>>8); 538 rem = (size_t)Z.lo & 0xff;
519 Z.hi = (Z.hi>>8);
520 539
521 Z.hi ^= Hshr4[nhi].hi; 540 Z.lo = (Z.hi << 56)|(Z.lo >> 8);
522 Z.lo ^= Hshr4[nhi].lo; 541 Z.hi = (Z.hi >> 8);
523 Z.hi ^= (u64)rem_8bit[rem^Hshl4[nhi]]<<48;
524 }
525 542
526 nlo = ((const u8 *)Xi)[0]; 543 Z.hi ^= Hshr4[nhi].hi;
527 nlo ^= inp[0]; 544 Z.lo ^= Hshr4[nhi].lo;
528 nhi = nlo>>4; 545 Z.hi ^= (u64)rem_8bit[rem ^ Hshl4[nhi]] << 48;
529 nlo &= 0xf; 546 }
530 547
531 Z.hi ^= Htable[nlo].hi; 548 nlo = ((const u8 *)Xi)[0];
532 Z.lo ^= Htable[nlo].lo; 549 nlo ^= inp[0];
550 nhi = nlo >> 4;
551 nlo &= 0xf;
552
553 Z.hi ^= Htable[nlo].hi;
554 Z.lo ^= Htable[nlo].lo;
533 555
534 rem = (size_t)Z.lo&0xf; 556 rem = (size_t)Z.lo & 0xf;
535 557
536 Z.lo = (Z.hi<<60)|(Z.lo>>4); 558 Z.lo = (Z.hi << 60)|(Z.lo >> 4);
537 Z.hi = (Z.hi>>4); 559 Z.hi = (Z.hi >> 4);
538 560
539 Z.hi ^= Htable[nhi].hi; 561 Z.hi ^= Htable[nhi].hi;
540 Z.lo ^= Htable[nhi].lo; 562 Z.lo ^= Htable[nhi].lo;
541 Z.hi ^= ((u64)rem_8bit[rem<<4])<<48; 563 Z.hi ^= ((u64)rem_8bit[rem << 4]) << 48;
542#endif 564#endif
543 565
544#if BYTE_ORDER == LITTLE_ENDIAN 566#if BYTE_ORDER == LITTLE_ENDIAN
545#ifdef BSWAP8 567#ifdef BSWAP8
546 Xi[0] = BSWAP8(Z.hi); 568 Xi[0] = BSWAP8(Z.hi);
547 Xi[1] = BSWAP8(Z.lo); 569 Xi[1] = BSWAP8(Z.lo);
548#else 570#else
549 u8 *p = (u8 *)Xi; 571 u8 *p = (u8 *)Xi;
550 u32 v; 572 u32 v;
551 v = (u32)(Z.hi>>32); PUTU32(p,v); 573 v = (u32)(Z.hi >> 32);
552 v = (u32)(Z.hi); PUTU32(p+4,v); 574 PUTU32(p, v);
553 v = (u32)(Z.lo>>32); PUTU32(p+8,v); 575 v = (u32)(Z.hi);
554 v = (u32)(Z.lo); PUTU32(p+12,v); 576 PUTU32(p + 4, v);
577 v = (u32)(Z.lo >> 32);
578 PUTU32(p + 8, v);
579 v = (u32)(Z.lo);
580 PUTU32(p + 12, v);
555#endif 581#endif
556#else /* BIG_ENDIAN */ 582#else /* BIG_ENDIAN */
557 Xi[0] = Z.hi; 583 Xi[0] = Z.hi;
558 Xi[1] = Z.lo; 584 Xi[1] = Z.lo;
559#endif 585#endif
560 } while (inp+=16, len-=16); 586 } while (inp += 16, len -= 16);
561} 587}
562#endif 588#endif
563#else 589#else
564void gcm_gmult_4bit(u64 Xi[2],const u128 Htable[16]); 590void gcm_gmult_4bit(u64 Xi[2], const u128 Htable[16]);
565void gcm_ghash_4bit(u64 Xi[2],const u128 Htable[16],const u8 *inp,size_t len); 591void gcm_ghash_4bit(u64 Xi[2], const u128 Htable[16], const u8 *inp,
592 size_t len);
566#endif 593#endif
567 594
568#define GCM_MUL(ctx,Xi) gcm_gmult_4bit(ctx->Xi.u,ctx->Htable) 595#define GCM_MUL(ctx,Xi) gcm_gmult_4bit(ctx->Xi.u,ctx->Htable)
@@ -576,37 +603,38 @@ void gcm_ghash_4bit(u64 Xi[2],const u128 Htable[16],const u8 *inp,size_t len);
576 603
577#else /* TABLE_BITS */ 604#else /* TABLE_BITS */
578 605
579static void gcm_gmult_1bit(u64 Xi[2],const u64 H[2]) 606static void
607gcm_gmult_1bit(u64 Xi[2], const u64 H[2])
580{ 608{
581 u128 V,Z = { 0,0 }; 609 u128 V, Z = { 0,0 };
582 long X; 610 long X;
583 int i,j; 611 int i, j;
584 const long *xi = (const long *)Xi; 612 const long *xi = (const long *)Xi;
585 613
586 V.hi = H[0]; /* H is in host byte order, no byte swapping */ 614 V.hi = H[0]; /* H is in host byte order, no byte swapping */
587 V.lo = H[1]; 615 V.lo = H[1];
588 616
589 for (j=0; j<16/sizeof(long); ++j) { 617 for (j = 0; j < 16/sizeof(long); ++j) {
590#if BYTE_ORDER == LITTLE_ENDIAN 618#if BYTE_ORDER == LITTLE_ENDIAN
591#if SIZE_MAX == 0xffffffffffffffff 619#if SIZE_MAX == 0xffffffffffffffff
592#ifdef BSWAP8 620#ifdef BSWAP8
593 X = (long)(BSWAP8(xi[j])); 621 X = (long)(BSWAP8(xi[j]));
594#else 622#else
595 const u8 *p = (const u8 *)(xi+j); 623 const u8 *p = (const u8 *)(xi + j);
596 X = (long)((u64)GETU32(p)<<32|GETU32(p+4)); 624 X = (long)((u64)GETU32(p) << 32|GETU32(p + 4));
597#endif 625#endif
598#else 626#else
599 const u8 *p = (const u8 *)(xi+j); 627 const u8 *p = (const u8 *)(xi + j);
600 X = (long)GETU32(p); 628 X = (long)GETU32(p);
601#endif 629#endif
602#else /* BIG_ENDIAN */ 630#else /* BIG_ENDIAN */
603 X = xi[j]; 631 X = xi[j];
604#endif 632#endif
605 633
606 for (i=0; i<8*sizeof(long); ++i, X<<=1) { 634 for (i = 0; i < 8*sizeof(long); ++i, X <<= 1) {
607 u64 M = (u64)(X>>(8*sizeof(long)-1)); 635 u64 M = (u64)(X >> (8*sizeof(long) - 1));
608 Z.hi ^= V.hi&M; 636 Z.hi ^= V.hi & M;
609 Z.lo ^= V.lo&M; 637 Z.lo ^= V.lo & M;
610 638
611 REDUCE1BIT(V); 639 REDUCE1BIT(V);
612 } 640 }
@@ -619,10 +647,14 @@ static void gcm_gmult_1bit(u64 Xi[2],const u64 H[2])
619#else 647#else
620 u8 *p = (u8 *)Xi; 648 u8 *p = (u8 *)Xi;
621 u32 v; 649 u32 v;
622 v = (u32)(Z.hi>>32); PUTU32(p,v); 650 v = (u32)(Z.hi >> 32);
623 v = (u32)(Z.hi); PUTU32(p+4,v); 651 PUTU32(p, v);
624 v = (u32)(Z.lo>>32); PUTU32(p+8,v); 652 v = (u32)(Z.hi);
625 v = (u32)(Z.lo); PUTU32(p+12,v); 653 PUTU32(p + 4, v);
654 v = (u32)(Z.lo >> 32);
655 PUTU32(p + 8, v);
656 v = (u32)(Z.lo);
657 PUTU32(p + 12, v);
626#endif 658#endif
627#else /* BIG_ENDIAN */ 659#else /* BIG_ENDIAN */
628 Xi[0] = Z.hi; 660 Xi[0] = Z.hi;
@@ -633,39 +665,43 @@ static void gcm_gmult_1bit(u64 Xi[2],const u64 H[2])
633 665
634#endif 666#endif
635 667
636#if defined(GHASH_ASM) && \ 668#if defined(GHASH_ASM) && \
637 (defined(__i386) || defined(__i386__) || \ 669 (defined(__i386) || defined(__i386__) || \
638 defined(__x86_64) || defined(__x86_64__) || \ 670 defined(__x86_64) || defined(__x86_64__) || \
639 defined(_M_IX86) || defined(_M_AMD64) || defined(_M_X64)) 671 defined(_M_IX86) || defined(_M_AMD64) || defined(_M_X64))
640#include "x86_arch.h" 672#include "x86_arch.h"
641#endif 673#endif
642 674
643#if TABLE_BITS==4 && defined(GHASH_ASM) 675#if TABLE_BITS==4 && defined(GHASH_ASM)
644# if (defined(__i386) || defined(__i386__) || \ 676# if (defined(__i386) || defined(__i386__) || \
645 defined(__x86_64) || defined(__x86_64__) || \ 677 defined(__x86_64) || defined(__x86_64__) || \
646 defined(_M_IX86) || defined(_M_AMD64) || defined(_M_X64)) 678 defined(_M_IX86) || defined(_M_AMD64) || defined(_M_X64))
647# define GHASH_ASM_X86_OR_64 679# define GHASH_ASM_X86_OR_64
648# define GCM_FUNCREF_4BIT 680# define GCM_FUNCREF_4BIT
649 681
650void gcm_init_clmul(u128 Htable[16],const u64 Xi[2]); 682void gcm_init_clmul(u128 Htable[16], const u64 Xi[2]);
651void gcm_gmult_clmul(u64 Xi[2],const u128 Htable[16]); 683void gcm_gmult_clmul(u64 Xi[2], const u128 Htable[16]);
652void gcm_ghash_clmul(u64 Xi[2],const u128 Htable[16],const u8 *inp,size_t len); 684void gcm_ghash_clmul(u64 Xi[2], const u128 Htable[16], const u8 *inp,
685 size_t len);
653 686
654# if defined(__i386) || defined(__i386__) || defined(_M_IX86) 687# if defined(__i386) || defined(__i386__) || defined(_M_IX86)
655# define GHASH_ASM_X86 688# define GHASH_ASM_X86
656void gcm_gmult_4bit_mmx(u64 Xi[2],const u128 Htable[16]); 689void gcm_gmult_4bit_mmx(u64 Xi[2], const u128 Htable[16]);
657void gcm_ghash_4bit_mmx(u64 Xi[2],const u128 Htable[16],const u8 *inp,size_t len); 690void gcm_ghash_4bit_mmx(u64 Xi[2], const u128 Htable[16], const u8 *inp,
691 size_t len);
658 692
659void gcm_gmult_4bit_x86(u64 Xi[2],const u128 Htable[16]); 693void gcm_gmult_4bit_x86(u64 Xi[2], const u128 Htable[16]);
660void gcm_ghash_4bit_x86(u64 Xi[2],const u128 Htable[16],const u8 *inp,size_t len); 694void gcm_ghash_4bit_x86(u64 Xi[2], const u128 Htable[16], const u8 *inp,
695 size_t len);
661# endif 696# endif
662# elif defined(__arm__) || defined(__arm) 697# elif defined(__arm__) || defined(__arm)
663# include "arm_arch.h" 698# include "arm_arch.h"
664# if __ARM_ARCH__>=7 && !defined(__STRICT_ALIGNMENT) 699# if __ARM_ARCH__>=7 && !defined(__STRICT_ALIGNMENT)
665# define GHASH_ASM_ARM 700# define GHASH_ASM_ARM
666# define GCM_FUNCREF_4BIT 701# define GCM_FUNCREF_4BIT
667void gcm_gmult_neon(u64 Xi[2],const u128 Htable[16]); 702void gcm_gmult_neon(u64 Xi[2], const u128 Htable[16]);
668void gcm_ghash_neon(u64 Xi[2],const u128 Htable[16],const u8 *inp,size_t len); 703void gcm_ghash_neon(u64 Xi[2], const u128 Htable[16], const u8 *inp,
704 size_t len);
669# endif 705# endif
670# endif 706# endif
671#endif 707#endif
@@ -679,13 +715,14 @@ void gcm_ghash_neon(u64 Xi[2],const u128 Htable[16],const u8 *inp,size_t len);
679# endif 715# endif
680#endif 716#endif
681 717
682void CRYPTO_gcm128_init(GCM128_CONTEXT *ctx,void *key,block128_f block) 718void
719CRYPTO_gcm128_init(GCM128_CONTEXT *ctx, void *key, block128_f block)
683{ 720{
684 memset(ctx,0,sizeof(*ctx)); 721 memset(ctx, 0, sizeof(*ctx));
685 ctx->block = block; 722 ctx->block = block;
686 ctx->key = key; 723 ctx->key = key;
687 724
688 (*block)(ctx->H.c,ctx->H.c,key); 725 (*block)(ctx->H.c, ctx->H.c, key);
689 726
690#if BYTE_ORDER == LITTLE_ENDIAN 727#if BYTE_ORDER == LITTLE_ENDIAN
691 /* H is stored in host byte order */ 728 /* H is stored in host byte order */
@@ -694,29 +731,29 @@ void CRYPTO_gcm128_init(GCM128_CONTEXT *ctx,void *key,block128_f block)
694 ctx->H.u[1] = BSWAP8(ctx->H.u[1]); 731 ctx->H.u[1] = BSWAP8(ctx->H.u[1]);
695#else 732#else
696 u8 *p = ctx->H.c; 733 u8 *p = ctx->H.c;
697 u64 hi,lo; 734 u64 hi, lo;
698 hi = (u64)GETU32(p) <<32|GETU32(p+4); 735 hi = (u64)GETU32(p) << 32|GETU32(p + 4);
699 lo = (u64)GETU32(p+8)<<32|GETU32(p+12); 736 lo = (u64)GETU32(p + 8) << 32|GETU32(p + 12);
700 ctx->H.u[0] = hi; 737 ctx->H.u[0] = hi;
701 ctx->H.u[1] = lo; 738 ctx->H.u[1] = lo;
702#endif 739#endif
703#endif 740#endif
704 741
705#if TABLE_BITS==8 742#if TABLE_BITS==8
706 gcm_init_8bit(ctx->Htable,ctx->H.u); 743 gcm_init_8bit(ctx->Htable, ctx->H.u);
707#elif TABLE_BITS==4 744#elif TABLE_BITS==4
708# if defined(GHASH_ASM_X86_OR_64) 745# if defined(GHASH_ASM_X86_OR_64)
709# if !defined(GHASH_ASM_X86) || defined(OPENSSL_IA32_SSE2) 746# if !defined(GHASH_ASM_X86) || defined(OPENSSL_IA32_SSE2)
710 /* check FXSR and PCLMULQDQ bits */ 747 /* check FXSR and PCLMULQDQ bits */
711 if ((OPENSSL_cpu_caps() & (CPUCAP_MASK_FXSR | CPUCAP_MASK_PCLMUL)) == 748 if ((OPENSSL_cpu_caps() & (CPUCAP_MASK_FXSR | CPUCAP_MASK_PCLMUL)) ==
712 (CPUCAP_MASK_FXSR | CPUCAP_MASK_PCLMUL)) { 749 (CPUCAP_MASK_FXSR | CPUCAP_MASK_PCLMUL)) {
713 gcm_init_clmul(ctx->Htable,ctx->H.u); 750 gcm_init_clmul(ctx->Htable, ctx->H.u);
714 ctx->gmult = gcm_gmult_clmul; 751 ctx->gmult = gcm_gmult_clmul;
715 ctx->ghash = gcm_ghash_clmul; 752 ctx->ghash = gcm_ghash_clmul;
716 return; 753 return;
717 } 754 }
718# endif 755# endif
719 gcm_init_4bit(ctx->Htable,ctx->H.u); 756 gcm_init_4bit(ctx->Htable, ctx->H.u);
720# if defined(GHASH_ASM_X86) /* x86 only */ 757# if defined(GHASH_ASM_X86) /* x86 only */
721# if defined(OPENSSL_IA32_SSE2) 758# if defined(OPENSSL_IA32_SSE2)
722 if (OPENSSL_cpu_caps() & CPUCAP_MASK_SSE) { /* check SSE bit */ 759 if (OPENSSL_cpu_caps() & CPUCAP_MASK_SSE) { /* check SSE bit */
@@ -738,112 +775,116 @@ void CRYPTO_gcm128_init(GCM128_CONTEXT *ctx,void *key,block128_f block)
738 ctx->gmult = gcm_gmult_neon; 775 ctx->gmult = gcm_gmult_neon;
739 ctx->ghash = gcm_ghash_neon; 776 ctx->ghash = gcm_ghash_neon;
740 } else { 777 } else {
741 gcm_init_4bit(ctx->Htable,ctx->H.u); 778 gcm_init_4bit(ctx->Htable, ctx->H.u);
742 ctx->gmult = gcm_gmult_4bit; 779 ctx->gmult = gcm_gmult_4bit;
743 ctx->ghash = gcm_ghash_4bit; 780 ctx->ghash = gcm_ghash_4bit;
744 } 781 }
745# else 782# else
746 gcm_init_4bit(ctx->Htable,ctx->H.u); 783 gcm_init_4bit(ctx->Htable, ctx->H.u);
747# endif 784# endif
748#endif 785#endif
749} 786}
750 787
751void CRYPTO_gcm128_setiv(GCM128_CONTEXT *ctx,const unsigned char *iv,size_t len) 788void
789CRYPTO_gcm128_setiv(GCM128_CONTEXT *ctx, const unsigned char *iv, size_t len)
752{ 790{
753 unsigned int ctr; 791 unsigned int ctr;
754#ifdef GCM_FUNCREF_4BIT 792#ifdef GCM_FUNCREF_4BIT
755 void (*gcm_gmult_p)(u64 Xi[2],const u128 Htable[16]) = ctx->gmult; 793 void (*gcm_gmult_p)(u64 Xi[2], const u128 Htable[16]) = ctx->gmult;
756#endif 794#endif
757 795
758 ctx->Yi.u[0] = 0; 796 ctx->Yi.u[0] = 0;
759 ctx->Yi.u[1] = 0; 797 ctx->Yi.u[1] = 0;
760 ctx->Xi.u[0] = 0; 798 ctx->Xi.u[0] = 0;
761 ctx->Xi.u[1] = 0; 799 ctx->Xi.u[1] = 0;
762 ctx->len.u[0] = 0; /* AAD length */ 800 ctx->len.u[0] = 0; /* AAD length */
763 ctx->len.u[1] = 0; /* message length */ 801 ctx->len.u[1] = 0; /* message length */
764 ctx->ares = 0; 802 ctx->ares = 0;
765 ctx->mres = 0; 803 ctx->mres = 0;
766 804
767 if (len==12) { 805 if (len == 12) {
768 memcpy(ctx->Yi.c,iv,12); 806 memcpy(ctx->Yi.c, iv, 12);
769 ctx->Yi.c[15]=1; 807 ctx->Yi.c[15] = 1;
770 ctr=1; 808 ctr = 1;
771 } 809 } else {
772 else {
773 size_t i; 810 size_t i;
774 u64 len0 = len; 811 u64 len0 = len;
775 812
776 while (len>=16) { 813 while (len >= 16) {
777 for (i=0; i<16; ++i) ctx->Yi.c[i] ^= iv[i]; 814 for (i = 0; i < 16; ++i)
778 GCM_MUL(ctx,Yi); 815 ctx->Yi.c[i] ^= iv[i];
816 GCM_MUL(ctx, Yi);
779 iv += 16; 817 iv += 16;
780 len -= 16; 818 len -= 16;
781 } 819 }
782 if (len) { 820 if (len) {
783 for (i=0; i<len; ++i) ctx->Yi.c[i] ^= iv[i]; 821 for (i = 0; i < len; ++i)
784 GCM_MUL(ctx,Yi); 822 ctx->Yi.c[i] ^= iv[i];
823 GCM_MUL(ctx, Yi);
785 } 824 }
786 len0 <<= 3; 825 len0 <<= 3;
787#if BYTE_ORDER == LITTLE_ENDIAN 826#if BYTE_ORDER == LITTLE_ENDIAN
788#ifdef BSWAP8 827#ifdef BSWAP8
789 ctx->Yi.u[1] ^= BSWAP8(len0); 828 ctx->Yi.u[1] ^= BSWAP8(len0);
790#else 829#else
791 ctx->Yi.c[8] ^= (u8)(len0>>56); 830 ctx->Yi.c[8] ^= (u8)(len0 >> 56);
792 ctx->Yi.c[9] ^= (u8)(len0>>48); 831 ctx->Yi.c[9] ^= (u8)(len0 >> 48);
793 ctx->Yi.c[10] ^= (u8)(len0>>40); 832 ctx->Yi.c[10] ^= (u8)(len0 >> 40);
794 ctx->Yi.c[11] ^= (u8)(len0>>32); 833 ctx->Yi.c[11] ^= (u8)(len0 >> 32);
795 ctx->Yi.c[12] ^= (u8)(len0>>24); 834 ctx->Yi.c[12] ^= (u8)(len0 >> 24);
796 ctx->Yi.c[13] ^= (u8)(len0>>16); 835 ctx->Yi.c[13] ^= (u8)(len0 >> 16);
797 ctx->Yi.c[14] ^= (u8)(len0>>8); 836 ctx->Yi.c[14] ^= (u8)(len0 >> 8);
798 ctx->Yi.c[15] ^= (u8)(len0); 837 ctx->Yi.c[15] ^= (u8)(len0);
799#endif 838#endif
800#else /* BIG_ENDIAN */ 839#else /* BIG_ENDIAN */
801 ctx->Yi.u[1] ^= len0; 840 ctx->Yi.u[1] ^= len0;
802#endif 841#endif
803 842
804 GCM_MUL(ctx,Yi); 843 GCM_MUL(ctx, Yi);
805 844
806#if BYTE_ORDER == LITTLE_ENDIAN 845#if BYTE_ORDER == LITTLE_ENDIAN
807#ifdef BSWAP4 846#ifdef BSWAP4
808 ctr = BSWAP4(ctx->Yi.d[3]); 847 ctr = BSWAP4(ctx->Yi.d[3]);
809#else 848#else
810 ctr = GETU32(ctx->Yi.c+12); 849 ctr = GETU32(ctx->Yi.c + 12);
811#endif 850#endif
812#else /* BIG_ENDIAN */ 851#else /* BIG_ENDIAN */
813 ctr = ctx->Yi.d[3]; 852 ctr = ctx->Yi.d[3];
814#endif 853#endif
815 } 854 }
816 855
817 (*ctx->block)(ctx->Yi.c,ctx->EK0.c,ctx->key); 856 (*ctx->block)(ctx->Yi.c, ctx->EK0.c, ctx->key);
818 ++ctr; 857 ++ctr;
819#if BYTE_ORDER == LITTLE_ENDIAN 858#if BYTE_ORDER == LITTLE_ENDIAN
820#ifdef BSWAP4 859#ifdef BSWAP4
821 ctx->Yi.d[3] = BSWAP4(ctr); 860 ctx->Yi.d[3] = BSWAP4(ctr);
822#else 861#else
823 PUTU32(ctx->Yi.c+12,ctr); 862 PUTU32(ctx->Yi.c + 12, ctr);
824#endif 863#endif
825#else /* BIG_ENDIAN */ 864#else /* BIG_ENDIAN */
826 ctx->Yi.d[3] = ctr; 865 ctx->Yi.d[3] = ctr;
827#endif 866#endif
828} 867}
829 868
830int CRYPTO_gcm128_aad(GCM128_CONTEXT *ctx,const unsigned char *aad,size_t len) 869int
870CRYPTO_gcm128_aad(GCM128_CONTEXT *ctx, const unsigned char *aad, size_t len)
831{ 871{
832 size_t i; 872 size_t i;
833 unsigned int n; 873 unsigned int n;
834 u64 alen = ctx->len.u[0]; 874 u64 alen = ctx->len.u[0];
835#ifdef GCM_FUNCREF_4BIT 875#ifdef GCM_FUNCREF_4BIT
836 void (*gcm_gmult_p)(u64 Xi[2],const u128 Htable[16]) = ctx->gmult; 876 void (*gcm_gmult_p)(u64 Xi[2], const u128 Htable[16]) = ctx->gmult;
837# ifdef GHASH 877# ifdef GHASH
838 void (*gcm_ghash_p)(u64 Xi[2],const u128 Htable[16], 878 void (*gcm_ghash_p)(u64 Xi[2], const u128 Htable[16],
839 const u8 *inp,size_t len) = ctx->ghash; 879 const u8 *inp, size_t len) = ctx->ghash;
840# endif 880# endif
841#endif 881#endif
842 882
843 if (ctx->len.u[1]) return -2; 883 if (ctx->len.u[1])
884 return -2;
844 885
845 alen += len; 886 alen += len;
846 if (alen>(U64(1)<<61) || (sizeof(len)==8 && alen<len)) 887 if (alen > (U64(1) << 61) || (sizeof(len) == 8 && alen < len))
847 return -1; 888 return -1;
848 ctx->len.u[0] = alen; 889 ctx->len.u[0] = alen;
849 890
@@ -852,9 +893,10 @@ int CRYPTO_gcm128_aad(GCM128_CONTEXT *ctx,const unsigned char *aad,size_t len)
852 while (n && len) { 893 while (n && len) {
853 ctx->Xi.c[n] ^= *(aad++); 894 ctx->Xi.c[n] ^= *(aad++);
854 --len; 895 --len;
855 n = (n+1)%16; 896 n = (n + 1) % 16;
856 } 897 }
857 if (n==0) GCM_MUL(ctx,Xi); 898 if (n == 0)
899 GCM_MUL(ctx, Xi);
858 else { 900 else {
859 ctx->ares = n; 901 ctx->ares = n;
860 return 0; 902 return 0;
@@ -862,53 +904,56 @@ int CRYPTO_gcm128_aad(GCM128_CONTEXT *ctx,const unsigned char *aad,size_t len)
862 } 904 }
863 905
864#ifdef GHASH 906#ifdef GHASH
865 if ((i = (len&(size_t)-16))) { 907 if ((i = (len & (size_t)-16))) {
866 GHASH(ctx,aad,i); 908 GHASH(ctx, aad, i);
867 aad += i; 909 aad += i;
868 len -= i; 910 len -= i;
869 } 911 }
870#else 912#else
871 while (len>=16) { 913 while (len >= 16) {
872 for (i=0; i<16; ++i) ctx->Xi.c[i] ^= aad[i]; 914 for (i = 0; i < 16; ++i)
873 GCM_MUL(ctx,Xi); 915 ctx->Xi.c[i] ^= aad[i];
916 GCM_MUL(ctx, Xi);
874 aad += 16; 917 aad += 16;
875 len -= 16; 918 len -= 16;
876 } 919 }
877#endif 920#endif
878 if (len) { 921 if (len) {
879 n = (unsigned int)len; 922 n = (unsigned int)len;
880 for (i=0; i<len; ++i) ctx->Xi.c[i] ^= aad[i]; 923 for (i = 0; i < len; ++i)
924 ctx->Xi.c[i] ^= aad[i];
881 } 925 }
882 926
883 ctx->ares = n; 927 ctx->ares = n;
884 return 0; 928 return 0;
885} 929}
886 930
887int CRYPTO_gcm128_encrypt(GCM128_CONTEXT *ctx, 931int
888 const unsigned char *in, unsigned char *out, 932CRYPTO_gcm128_encrypt(GCM128_CONTEXT *ctx,
889 size_t len) 933 const unsigned char *in, unsigned char *out,
934 size_t len)
890{ 935{
891 unsigned int n, ctr; 936 unsigned int n, ctr;
892 size_t i; 937 size_t i;
893 u64 mlen = ctx->len.u[1]; 938 u64 mlen = ctx->len.u[1];
894 block128_f block = ctx->block; 939 block128_f block = ctx->block;
895 void *key = ctx->key; 940 void *key = ctx->key;
896#ifdef GCM_FUNCREF_4BIT 941#ifdef GCM_FUNCREF_4BIT
897 void (*gcm_gmult_p)(u64 Xi[2],const u128 Htable[16]) = ctx->gmult; 942 void (*gcm_gmult_p)(u64 Xi[2], const u128 Htable[16]) = ctx->gmult;
898# ifdef GHASH 943# ifdef GHASH
899 void (*gcm_ghash_p)(u64 Xi[2],const u128 Htable[16], 944 void (*gcm_ghash_p)(u64 Xi[2], const u128 Htable[16],
900 const u8 *inp,size_t len) = ctx->ghash; 945 const u8 *inp, size_t len) = ctx->ghash;
901# endif 946# endif
902#endif 947#endif
903 948
904 mlen += len; 949 mlen += len;
905 if (mlen>((U64(1)<<36)-32) || (sizeof(len)==8 && mlen<len)) 950 if (mlen > ((U64(1) << 36) - 32) || (sizeof(len) == 8 && mlen < len))
906 return -1; 951 return -1;
907 ctx->len.u[1] = mlen; 952 ctx->len.u[1] = mlen;
908 953
909 if (ctx->ares) { 954 if (ctx->ares) {
910 /* First call to encrypt finalizes GHASH(AAD) */ 955 /* First call to encrypt finalizes GHASH(AAD) */
911 GCM_MUL(ctx,Xi); 956 GCM_MUL(ctx, Xi);
912 ctx->ares = 0; 957 ctx->ares = 0;
913 } 958 }
914 959
@@ -916,7 +961,7 @@ int CRYPTO_gcm128_encrypt(GCM128_CONTEXT *ctx,
916#ifdef BSWAP4 961#ifdef BSWAP4
917 ctr = BSWAP4(ctx->Yi.d[3]); 962 ctr = BSWAP4(ctx->Yi.d[3]);
918#else 963#else
919 ctr = GETU32(ctx->Yi.c+12); 964 ctr = GETU32(ctx->Yi.c + 12);
920#endif 965#endif
921#else /* BIG_ENDIAN */ 966#else /* BIG_ENDIAN */
922 ctr = ctx->Yi.d[3]; 967 ctr = ctx->Yi.d[3];
@@ -924,173 +969,180 @@ int CRYPTO_gcm128_encrypt(GCM128_CONTEXT *ctx,
924 969
925 n = ctx->mres; 970 n = ctx->mres;
926#if !defined(OPENSSL_SMALL_FOOTPRINT) 971#if !defined(OPENSSL_SMALL_FOOTPRINT)
927 if (16%sizeof(size_t) == 0) do { /* always true actually */ 972 if (16 % sizeof(size_t) == 0)
928 if (n) { 973 do { /* always true actually */
929 while (n && len) { 974 if (n) {
930 ctx->Xi.c[n] ^= *(out++) = *(in++)^ctx->EKi.c[n]; 975 while (n && len) {
931 --len; 976 ctx->Xi.c[n] ^= *(out++) = *(in++) ^
932 n = (n+1)%16; 977 ctx->EKi.c[n];
978 --len;
979 n = (n + 1) % 16;
980 }
981 if (n == 0)
982 GCM_MUL(ctx, Xi);
983 else {
984 ctx->mres = n;
985 return 0;
986 }
933 } 987 }
934 if (n==0) GCM_MUL(ctx,Xi);
935 else {
936 ctx->mres = n;
937 return 0;
938 }
939 }
940#ifdef __STRICT_ALIGNMENT 988#ifdef __STRICT_ALIGNMENT
941 if (((size_t)in|(size_t)out)%sizeof(size_t) != 0) 989 if (((size_t)in|(size_t)out) % sizeof(size_t) != 0)
942 break; 990 break;
943#endif 991#endif
944#if defined(GHASH) && defined(GHASH_CHUNK) 992#if defined(GHASH) && defined(GHASH_CHUNK)
945 while (len>=GHASH_CHUNK) { 993 while (len >= GHASH_CHUNK) {
946 size_t j=GHASH_CHUNK; 994 size_t j = GHASH_CHUNK;
947 995
948 while (j) { 996 while (j) {
949 size_t *out_t=(size_t *)out; 997 size_t *out_t = (size_t *)out;
950 const size_t *in_t=(const size_t *)in; 998 const size_t *in_t = (const size_t *)in;
951 999
952 (*block)(ctx->Yi.c,ctx->EKi.c,key); 1000 (*block)(ctx->Yi.c, ctx->EKi.c, key);
953 ++ctr; 1001 ++ctr;
954#if BYTE_ORDER == LITTLE_ENDIAN 1002#if BYTE_ORDER == LITTLE_ENDIAN
955#ifdef BSWAP4 1003#ifdef BSWAP4
956 ctx->Yi.d[3] = BSWAP4(ctr); 1004 ctx->Yi.d[3] = BSWAP4(ctr);
957#else 1005#else
958 PUTU32(ctx->Yi.c+12,ctr); 1006 PUTU32(ctx->Yi.c + 12, ctr);
959#endif 1007#endif
960#else /* BIG_ENDIAN */ 1008#else /* BIG_ENDIAN */
961 ctx->Yi.d[3] = ctr; 1009 ctx->Yi.d[3] = ctr;
962#endif 1010#endif
963 for (i=0; i<16/sizeof(size_t); ++i) 1011 for (i = 0; i < 16/sizeof(size_t); ++i)
964 out_t[i] = in_t[i] ^ ctx->EKi.t[i]; 1012 out_t[i] = in_t[i] ^
965 out += 16; 1013 ctx->EKi.t[i];
966 in += 16; 1014 out += 16;
967 j -= 16; 1015 in += 16;
968 } 1016 j -= 16;
969 GHASH(ctx,out-GHASH_CHUNK,GHASH_CHUNK); 1017 }
970 len -= GHASH_CHUNK; 1018 GHASH(ctx, out - GHASH_CHUNK, GHASH_CHUNK);
971 } 1019 len -= GHASH_CHUNK;
972 if ((i = (len&(size_t)-16))) { 1020 }
973 size_t j=i; 1021 if ((i = (len & (size_t)-16))) {
1022 size_t j = i;
974 1023
975 while (len>=16) { 1024 while (len >= 16) {
976 size_t *out_t=(size_t *)out; 1025 size_t *out_t = (size_t *)out;
977 const size_t *in_t=(const size_t *)in; 1026 const size_t *in_t = (const size_t *)in;
978 1027
979 (*block)(ctx->Yi.c,ctx->EKi.c,key); 1028 (*block)(ctx->Yi.c, ctx->EKi.c, key);
980 ++ctr; 1029 ++ctr;
981#if BYTE_ORDER == LITTLE_ENDIAN 1030#if BYTE_ORDER == LITTLE_ENDIAN
982#ifdef BSWAP4 1031#ifdef BSWAP4
983 ctx->Yi.d[3] = BSWAP4(ctr); 1032 ctx->Yi.d[3] = BSWAP4(ctr);
984#else 1033#else
985 PUTU32(ctx->Yi.c+12,ctr); 1034 PUTU32(ctx->Yi.c + 12, ctr);
986#endif 1035#endif
987#else /* BIG_ENDIAN */ 1036#else /* BIG_ENDIAN */
988 ctx->Yi.d[3] = ctr; 1037 ctx->Yi.d[3] = ctr;
989#endif 1038#endif
990 for (i=0; i<16/sizeof(size_t); ++i) 1039 for (i = 0; i < 16/sizeof(size_t); ++i)
991 out_t[i] = in_t[i] ^ ctx->EKi.t[i]; 1040 out_t[i] = in_t[i] ^
992 out += 16; 1041 ctx->EKi.t[i];
993 in += 16; 1042 out += 16;
994 len -= 16; 1043 in += 16;
995 } 1044 len -= 16;
996 GHASH(ctx,out-j,j); 1045 }
997 } 1046 GHASH(ctx, out - j, j);
1047 }
998#else 1048#else
999 while (len>=16) { 1049 while (len >= 16) {
1000 size_t *out_t=(size_t *)out; 1050 size_t *out_t = (size_t *)out;
1001 const size_t *in_t=(const size_t *)in; 1051 const size_t *in_t = (const size_t *)in;
1002 1052
1003 (*block)(ctx->Yi.c,ctx->EKi.c,key); 1053 (*block)(ctx->Yi.c, ctx->EKi.c, key);
1004 ++ctr; 1054 ++ctr;
1005#if BYTE_ORDER == LITTLE_ENDIAN 1055#if BYTE_ORDER == LITTLE_ENDIAN
1006#ifdef BSWAP4 1056#ifdef BSWAP4
1007 ctx->Yi.d[3] = BSWAP4(ctr); 1057 ctx->Yi.d[3] = BSWAP4(ctr);
1008#else 1058#else
1009 PUTU32(ctx->Yi.c+12,ctr); 1059 PUTU32(ctx->Yi.c + 12, ctr);
1010#endif 1060#endif
1011#else /* BIG_ENDIAN */ 1061#else /* BIG_ENDIAN */
1012 ctx->Yi.d[3] = ctr; 1062 ctx->Yi.d[3] = ctr;
1013#endif 1063#endif
1014 for (i=0; i<16/sizeof(size_t); ++i) 1064 for (i = 0; i < 16/sizeof(size_t); ++i)
1015 ctx->Xi.t[i] ^= 1065 ctx->Xi.t[i] ^=
1016 out_t[i] = in_t[i]^ctx->EKi.t[i]; 1066 out_t[i] = in_t[i] ^ ctx->EKi.t[i];
1017 GCM_MUL(ctx,Xi); 1067 GCM_MUL(ctx, Xi);
1018 out += 16; 1068 out += 16;
1019 in += 16; 1069 in += 16;
1020 len -= 16; 1070 len -= 16;
1021 } 1071 }
1022#endif 1072#endif
1023 if (len) { 1073 if (len) {
1024 (*block)(ctx->Yi.c,ctx->EKi.c,key); 1074 (*block)(ctx->Yi.c, ctx->EKi.c, key);
1025 ++ctr; 1075 ++ctr;
1026#if BYTE_ORDER == LITTLE_ENDIAN 1076#if BYTE_ORDER == LITTLE_ENDIAN
1027#ifdef BSWAP4 1077#ifdef BSWAP4
1028 ctx->Yi.d[3] = BSWAP4(ctr); 1078 ctx->Yi.d[3] = BSWAP4(ctr);
1029#else 1079#else
1030 PUTU32(ctx->Yi.c+12,ctr); 1080 PUTU32(ctx->Yi.c + 12, ctr);
1031#endif 1081#endif
1032#else /* BIG_ENDIAN */ 1082#else /* BIG_ENDIAN */
1033 ctx->Yi.d[3] = ctr; 1083 ctx->Yi.d[3] = ctr;
1034#endif 1084#endif
1035 while (len--) { 1085 while (len--) {
1036 ctx->Xi.c[n] ^= out[n] = in[n]^ctx->EKi.c[n]; 1086 ctx->Xi.c[n] ^= out[n] = in[n] ^
1037 ++n; 1087 ctx->EKi.c[n];
1088 ++n;
1089 }
1038 } 1090 }
1039 }
1040 1091
1041 ctx->mres = n; 1092 ctx->mres = n;
1042 return 0; 1093 return 0;
1043 } while(0); 1094 } while (0);
1044#endif 1095#endif
1045 for (i=0;i<len;++i) { 1096 for (i = 0; i < len; ++i) {
1046 if (n==0) { 1097 if (n == 0) {
1047 (*block)(ctx->Yi.c,ctx->EKi.c,key); 1098 (*block)(ctx->Yi.c, ctx->EKi.c, key);
1048 ++ctr; 1099 ++ctr;
1049#if BYTE_ORDER == LITTLE_ENDIAN 1100#if BYTE_ORDER == LITTLE_ENDIAN
1050#ifdef BSWAP4 1101#ifdef BSWAP4
1051 ctx->Yi.d[3] = BSWAP4(ctr); 1102 ctx->Yi.d[3] = BSWAP4(ctr);
1052#else 1103#else
1053 PUTU32(ctx->Yi.c+12,ctr); 1104 PUTU32(ctx->Yi.c + 12, ctr);
1054#endif 1105#endif
1055#else /* BIG_ENDIAN */ 1106#else /* BIG_ENDIAN */
1056 ctx->Yi.d[3] = ctr; 1107 ctx->Yi.d[3] = ctr;
1057#endif 1108#endif
1058 } 1109 }
1059 ctx->Xi.c[n] ^= out[i] = in[i]^ctx->EKi.c[n]; 1110 ctx->Xi.c[n] ^= out[i] = in[i] ^ ctx->EKi.c[n];
1060 n = (n+1)%16; 1111 n = (n + 1) % 16;
1061 if (n==0) 1112 if (n == 0)
1062 GCM_MUL(ctx,Xi); 1113 GCM_MUL(ctx, Xi);
1063 } 1114 }
1064 1115
1065 ctx->mres = n; 1116 ctx->mres = n;
1066 return 0; 1117 return 0;
1067} 1118}
1068 1119
1069int CRYPTO_gcm128_decrypt(GCM128_CONTEXT *ctx, 1120int
1070 const unsigned char *in, unsigned char *out, 1121CRYPTO_gcm128_decrypt(GCM128_CONTEXT *ctx,
1071 size_t len) 1122 const unsigned char *in, unsigned char *out,
1123 size_t len)
1072{ 1124{
1073 unsigned int n, ctr; 1125 unsigned int n, ctr;
1074 size_t i; 1126 size_t i;
1075 u64 mlen = ctx->len.u[1]; 1127 u64 mlen = ctx->len.u[1];
1076 block128_f block = ctx->block; 1128 block128_f block = ctx->block;
1077 void *key = ctx->key; 1129 void *key = ctx->key;
1078#ifdef GCM_FUNCREF_4BIT 1130#ifdef GCM_FUNCREF_4BIT
1079 void (*gcm_gmult_p)(u64 Xi[2],const u128 Htable[16]) = ctx->gmult; 1131 void (*gcm_gmult_p)(u64 Xi[2], const u128 Htable[16]) = ctx->gmult;
1080# ifdef GHASH 1132# ifdef GHASH
1081 void (*gcm_ghash_p)(u64 Xi[2],const u128 Htable[16], 1133 void (*gcm_ghash_p)(u64 Xi[2], const u128 Htable[16],
1082 const u8 *inp,size_t len) = ctx->ghash; 1134 const u8 *inp, size_t len) = ctx->ghash;
1083# endif 1135# endif
1084#endif 1136#endif
1085 1137
1086 mlen += len; 1138 mlen += len;
1087 if (mlen>((U64(1)<<36)-32) || (sizeof(len)==8 && mlen<len)) 1139 if (mlen > ((U64(1) << 36) - 32) || (sizeof(len) == 8 && mlen < len))
1088 return -1; 1140 return -1;
1089 ctx->len.u[1] = mlen; 1141 ctx->len.u[1] = mlen;
1090 1142
1091 if (ctx->ares) { 1143 if (ctx->ares) {
1092 /* First call to decrypt finalizes GHASH(AAD) */ 1144 /* First call to decrypt finalizes GHASH(AAD) */
1093 GCM_MUL(ctx,Xi); 1145 GCM_MUL(ctx, Xi);
1094 ctx->ares = 0; 1146 ctx->ares = 0;
1095 } 1147 }
1096 1148
@@ -1098,7 +1150,7 @@ int CRYPTO_gcm128_decrypt(GCM128_CONTEXT *ctx,
1098#ifdef BSWAP4 1150#ifdef BSWAP4
1099 ctr = BSWAP4(ctx->Yi.d[3]); 1151 ctr = BSWAP4(ctx->Yi.d[3]);
1100#else 1152#else
1101 ctr = GETU32(ctx->Yi.c+12); 1153 ctr = GETU32(ctx->Yi.c + 12);
1102#endif 1154#endif
1103#else /* BIG_ENDIAN */ 1155#else /* BIG_ENDIAN */
1104 ctr = ctx->Yi.d[3]; 1156 ctr = ctx->Yi.d[3];
@@ -1106,179 +1158,184 @@ int CRYPTO_gcm128_decrypt(GCM128_CONTEXT *ctx,
1106 1158
1107 n = ctx->mres; 1159 n = ctx->mres;
1108#if !defined(OPENSSL_SMALL_FOOTPRINT) 1160#if !defined(OPENSSL_SMALL_FOOTPRINT)
1109 if (16%sizeof(size_t) == 0) do { /* always true actually */ 1161 if (16 % sizeof(size_t) == 0)
1110 if (n) { 1162 do { /* always true actually */
1111 while (n && len) { 1163 if (n) {
1112 u8 c = *(in++); 1164 while (n && len) {
1113 *(out++) = c^ctx->EKi.c[n]; 1165 u8 c = *(in++);
1114 ctx->Xi.c[n] ^= c; 1166 *(out++) = c ^ ctx->EKi.c[n];
1115 --len; 1167 ctx->Xi.c[n] ^= c;
1116 n = (n+1)%16; 1168 --len;
1117 } 1169 n = (n + 1) % 16;
1118 if (n==0) GCM_MUL (ctx,Xi); 1170 }
1119 else { 1171 if (n == 0)
1120 ctx->mres = n; 1172 GCM_MUL(ctx, Xi);
1121 return 0; 1173 else {
1174 ctx->mres = n;
1175 return 0;
1176 }
1122 } 1177 }
1123 }
1124#ifdef __STRICT_ALIGNMENT 1178#ifdef __STRICT_ALIGNMENT
1125 if (((size_t)in|(size_t)out)%sizeof(size_t) != 0) 1179 if (((size_t)in|(size_t)out) % sizeof(size_t) != 0)
1126 break; 1180 break;
1127#endif 1181#endif
1128#if defined(GHASH) && defined(GHASH_CHUNK) 1182#if defined(GHASH) && defined(GHASH_CHUNK)
1129 while (len>=GHASH_CHUNK) { 1183 while (len >= GHASH_CHUNK) {
1130 size_t j=GHASH_CHUNK; 1184 size_t j = GHASH_CHUNK;
1131 1185
1132 GHASH(ctx,in,GHASH_CHUNK); 1186 GHASH(ctx, in, GHASH_CHUNK);
1133 while (j) { 1187 while (j) {
1134 size_t *out_t=(size_t *)out; 1188 size_t *out_t = (size_t *)out;
1135 const size_t *in_t=(const size_t *)in; 1189 const size_t *in_t = (const size_t *)in;
1136 1190
1137 (*block)(ctx->Yi.c,ctx->EKi.c,key); 1191 (*block)(ctx->Yi.c, ctx->EKi.c, key);
1138 ++ctr; 1192 ++ctr;
1139#if BYTE_ORDER == LITTLE_ENDIAN 1193#if BYTE_ORDER == LITTLE_ENDIAN
1140#ifdef BSWAP4 1194#ifdef BSWAP4
1141 ctx->Yi.d[3] = BSWAP4(ctr); 1195 ctx->Yi.d[3] = BSWAP4(ctr);
1142#else 1196#else
1143 PUTU32(ctx->Yi.c+12,ctr); 1197 PUTU32(ctx->Yi.c + 12, ctr);
1144#endif 1198#endif
1145#else /* BIG_ENDIAN */ 1199#else /* BIG_ENDIAN */
1146 ctx->Yi.d[3] = ctr; 1200 ctx->Yi.d[3] = ctr;
1147#endif 1201#endif
1148 for (i=0; i<16/sizeof(size_t); ++i) 1202 for (i = 0; i < 16/sizeof(size_t); ++i)
1149 out_t[i] = in_t[i]^ctx->EKi.t[i]; 1203 out_t[i] = in_t[i] ^
1150 out += 16; 1204 ctx->EKi.t[i];
1151 in += 16; 1205 out += 16;
1152 j -= 16; 1206 in += 16;
1153 } 1207 j -= 16;
1154 len -= GHASH_CHUNK; 1208 }
1155 } 1209 len -= GHASH_CHUNK;
1156 if ((i = (len&(size_t)-16))) { 1210 }
1157 GHASH(ctx,in,i); 1211 if ((i = (len & (size_t)-16))) {
1158 while (len>=16) { 1212 GHASH(ctx, in, i);
1159 size_t *out_t=(size_t *)out; 1213 while (len >= 16) {
1160 const size_t *in_t=(const size_t *)in; 1214 size_t *out_t = (size_t *)out;
1161 1215 const size_t *in_t = (const size_t *)in;
1162 (*block)(ctx->Yi.c,ctx->EKi.c,key); 1216
1163 ++ctr; 1217 (*block)(ctx->Yi.c, ctx->EKi.c, key);
1218 ++ctr;
1164#if BYTE_ORDER == LITTLE_ENDIAN 1219#if BYTE_ORDER == LITTLE_ENDIAN
1165#ifdef BSWAP4 1220#ifdef BSWAP4
1166 ctx->Yi.d[3] = BSWAP4(ctr); 1221 ctx->Yi.d[3] = BSWAP4(ctr);
1167#else 1222#else
1168 PUTU32(ctx->Yi.c+12,ctr); 1223 PUTU32(ctx->Yi.c + 12, ctr);
1169#endif 1224#endif
1170#else /* BIG_ENDIAN */ 1225#else /* BIG_ENDIAN */
1171 ctx->Yi.d[3] = ctr; 1226 ctx->Yi.d[3] = ctr;
1172#endif 1227#endif
1173 for (i=0; i<16/sizeof(size_t); ++i) 1228 for (i = 0; i < 16/sizeof(size_t); ++i)
1174 out_t[i] = in_t[i]^ctx->EKi.t[i]; 1229 out_t[i] = in_t[i] ^
1175 out += 16; 1230 ctx->EKi.t[i];
1176 in += 16; 1231 out += 16;
1177 len -= 16; 1232 in += 16;
1178 } 1233 len -= 16;
1179 } 1234 }
1235 }
1180#else 1236#else
1181 while (len>=16) { 1237 while (len >= 16) {
1182 size_t *out_t=(size_t *)out; 1238 size_t *out_t = (size_t *)out;
1183 const size_t *in_t=(const size_t *)in; 1239 const size_t *in_t = (const size_t *)in;
1184 1240
1185 (*block)(ctx->Yi.c,ctx->EKi.c,key); 1241 (*block)(ctx->Yi.c, ctx->EKi.c, key);
1186 ++ctr; 1242 ++ctr;
1187#if BYTE_ORDER == LITTLE_ENDIAN 1243#if BYTE_ORDER == LITTLE_ENDIAN
1188#ifdef BSWAP4 1244#ifdef BSWAP4
1189 ctx->Yi.d[3] = BSWAP4(ctr); 1245 ctx->Yi.d[3] = BSWAP4(ctr);
1190#else 1246#else
1191 PUTU32(ctx->Yi.c+12,ctr); 1247 PUTU32(ctx->Yi.c + 12, ctr);
1192#endif 1248#endif
1193#else /* BIG_ENDIAN */ 1249#else /* BIG_ENDIAN */
1194 ctx->Yi.d[3] = ctr; 1250 ctx->Yi.d[3] = ctr;
1195#endif 1251#endif
1196 for (i=0; i<16/sizeof(size_t); ++i) { 1252 for (i = 0; i < 16/sizeof(size_t); ++i) {
1197 size_t c = in[i]; 1253 size_t c = in[i];
1198 out[i] = c^ctx->EKi.t[i]; 1254 out[i] = c ^ ctx->EKi.t[i];
1199 ctx->Xi.t[i] ^= c; 1255 ctx->Xi.t[i] ^= c;
1256 }
1257 GCM_MUL(ctx, Xi);
1258 out += 16;
1259 in += 16;
1260 len -= 16;
1200 } 1261 }
1201 GCM_MUL(ctx,Xi);
1202 out += 16;
1203 in += 16;
1204 len -= 16;
1205 }
1206#endif 1262#endif
1207 if (len) { 1263 if (len) {
1208 (*block)(ctx->Yi.c,ctx->EKi.c,key); 1264 (*block)(ctx->Yi.c, ctx->EKi.c, key);
1209 ++ctr; 1265 ++ctr;
1210#if BYTE_ORDER == LITTLE_ENDIAN 1266#if BYTE_ORDER == LITTLE_ENDIAN
1211#ifdef BSWAP4 1267#ifdef BSWAP4
1212 ctx->Yi.d[3] = BSWAP4(ctr); 1268 ctx->Yi.d[3] = BSWAP4(ctr);
1213#else 1269#else
1214 PUTU32(ctx->Yi.c+12,ctr); 1270 PUTU32(ctx->Yi.c + 12, ctr);
1215#endif 1271#endif
1216#else /* BIG_ENDIAN */ 1272#else /* BIG_ENDIAN */
1217 ctx->Yi.d[3] = ctr; 1273 ctx->Yi.d[3] = ctr;
1218#endif 1274#endif
1219 while (len--) { 1275 while (len--) {
1220 u8 c = in[n]; 1276 u8 c = in[n];
1221 ctx->Xi.c[n] ^= c; 1277 ctx->Xi.c[n] ^= c;
1222 out[n] = c^ctx->EKi.c[n]; 1278 out[n] = c ^ ctx->EKi.c[n];
1223 ++n; 1279 ++n;
1280 }
1224 } 1281 }
1225 }
1226 1282
1227 ctx->mres = n; 1283 ctx->mres = n;
1228 return 0; 1284 return 0;
1229 } while(0); 1285 } while (0);
1230#endif 1286#endif
1231 for (i=0;i<len;++i) { 1287 for (i = 0; i < len; ++i) {
1232 u8 c; 1288 u8 c;
1233 if (n==0) { 1289 if (n == 0) {
1234 (*block)(ctx->Yi.c,ctx->EKi.c,key); 1290 (*block)(ctx->Yi.c, ctx->EKi.c, key);
1235 ++ctr; 1291 ++ctr;
1236#if BYTE_ORDER == LITTLE_ENDIAN 1292#if BYTE_ORDER == LITTLE_ENDIAN
1237#ifdef BSWAP4 1293#ifdef BSWAP4
1238 ctx->Yi.d[3] = BSWAP4(ctr); 1294 ctx->Yi.d[3] = BSWAP4(ctr);
1239#else 1295#else
1240 PUTU32(ctx->Yi.c+12,ctr); 1296 PUTU32(ctx->Yi.c + 12, ctr);
1241#endif 1297#endif
1242#else /* BIG_ENDIAN */ 1298#else /* BIG_ENDIAN */
1243 ctx->Yi.d[3] = ctr; 1299 ctx->Yi.d[3] = ctr;
1244#endif 1300#endif
1245 } 1301 }
1246 c = in[i]; 1302 c = in[i];
1247 out[i] = c^ctx->EKi.c[n]; 1303 out[i] = c ^ ctx->EKi.c[n];
1248 ctx->Xi.c[n] ^= c; 1304 ctx->Xi.c[n] ^= c;
1249 n = (n+1)%16; 1305 n = (n + 1) % 16;
1250 if (n==0) 1306 if (n == 0)
1251 GCM_MUL(ctx,Xi); 1307 GCM_MUL(ctx, Xi);
1252 } 1308 }
1253 1309
1254 ctx->mres = n; 1310 ctx->mres = n;
1255 return 0; 1311 return 0;
1256} 1312}
1257 1313
1258int CRYPTO_gcm128_encrypt_ctr32(GCM128_CONTEXT *ctx, 1314int
1259 const unsigned char *in, unsigned char *out, 1315CRYPTO_gcm128_encrypt_ctr32(GCM128_CONTEXT *ctx,
1260 size_t len, ctr128_f stream) 1316 const unsigned char *in, unsigned char *out,
1317 size_t len, ctr128_f stream)
1261{ 1318{
1262 unsigned int n, ctr; 1319 unsigned int n, ctr;
1263 size_t i; 1320 size_t i;
1264 u64 mlen = ctx->len.u[1]; 1321 u64 mlen = ctx->len.u[1];
1265 void *key = ctx->key; 1322 void *key = ctx->key;
1266#ifdef GCM_FUNCREF_4BIT 1323#ifdef GCM_FUNCREF_4BIT
1267 void (*gcm_gmult_p)(u64 Xi[2],const u128 Htable[16]) = ctx->gmult; 1324 void (*gcm_gmult_p)(u64 Xi[2], const u128 Htable[16]) = ctx->gmult;
1268# ifdef GHASH 1325# ifdef GHASH
1269 void (*gcm_ghash_p)(u64 Xi[2],const u128 Htable[16], 1326 void (*gcm_ghash_p)(u64 Xi[2], const u128 Htable[16],
1270 const u8 *inp,size_t len) = ctx->ghash; 1327 const u8 *inp, size_t len) = ctx->ghash;
1271# endif 1328# endif
1272#endif 1329#endif
1273 1330
1274 mlen += len; 1331 mlen += len;
1275 if (mlen>((U64(1)<<36)-32) || (sizeof(len)==8 && mlen<len)) 1332 if (mlen > ((U64(1) << 36) - 32) || (sizeof(len) == 8 && mlen < len))
1276 return -1; 1333 return -1;
1277 ctx->len.u[1] = mlen; 1334 ctx->len.u[1] = mlen;
1278 1335
1279 if (ctx->ares) { 1336 if (ctx->ares) {
1280 /* First call to encrypt finalizes GHASH(AAD) */ 1337 /* First call to encrypt finalizes GHASH(AAD) */
1281 GCM_MUL(ctx,Xi); 1338 GCM_MUL(ctx, Xi);
1282 ctx->ares = 0; 1339 ctx->ares = 0;
1283 } 1340 }
1284 1341
@@ -1286,7 +1343,7 @@ int CRYPTO_gcm128_encrypt_ctr32(GCM128_CONTEXT *ctx,
1286#ifdef BSWAP4 1343#ifdef BSWAP4
1287 ctr = BSWAP4(ctx->Yi.d[3]); 1344 ctr = BSWAP4(ctx->Yi.d[3]);
1288#else 1345#else
1289 ctr = GETU32(ctx->Yi.c+12); 1346 ctr = GETU32(ctx->Yi.c + 12);
1290#endif 1347#endif
1291#else /* BIG_ENDIAN */ 1348#else /* BIG_ENDIAN */
1292 ctr = ctx->Yi.d[3]; 1349 ctr = ctx->Yi.d[3];
@@ -1295,76 +1352,78 @@ int CRYPTO_gcm128_encrypt_ctr32(GCM128_CONTEXT *ctx,
1295 n = ctx->mres; 1352 n = ctx->mres;
1296 if (n) { 1353 if (n) {
1297 while (n && len) { 1354 while (n && len) {
1298 ctx->Xi.c[n] ^= *(out++) = *(in++)^ctx->EKi.c[n]; 1355 ctx->Xi.c[n] ^= *(out++) = *(in++) ^ ctx->EKi.c[n];
1299 --len; 1356 --len;
1300 n = (n+1)%16; 1357 n = (n + 1) % 16;
1301 } 1358 }
1302 if (n==0) GCM_MUL(ctx,Xi); 1359 if (n == 0)
1360 GCM_MUL(ctx, Xi);
1303 else { 1361 else {
1304 ctx->mres = n; 1362 ctx->mres = n;
1305 return 0; 1363 return 0;
1306 } 1364 }
1307 } 1365 }
1308#if defined(GHASH) && !defined(OPENSSL_SMALL_FOOTPRINT) 1366#if defined(GHASH) && !defined(OPENSSL_SMALL_FOOTPRINT)
1309 while (len>=GHASH_CHUNK) { 1367 while (len >= GHASH_CHUNK) {
1310 (*stream)(in,out,GHASH_CHUNK/16,key,ctx->Yi.c); 1368 (*stream)(in, out, GHASH_CHUNK/16, key, ctx->Yi.c);
1311 ctr += GHASH_CHUNK/16; 1369 ctr += GHASH_CHUNK/16;
1312#if BYTE_ORDER == LITTLE_ENDIAN 1370#if BYTE_ORDER == LITTLE_ENDIAN
1313#ifdef BSWAP4 1371#ifdef BSWAP4
1314 ctx->Yi.d[3] = BSWAP4(ctr); 1372 ctx->Yi.d[3] = BSWAP4(ctr);
1315#else 1373#else
1316 PUTU32(ctx->Yi.c+12,ctr); 1374 PUTU32(ctx->Yi.c + 12, ctr);
1317#endif 1375#endif
1318#else /* BIG_ENDIAN */ 1376#else /* BIG_ENDIAN */
1319 ctx->Yi.d[3] = ctr; 1377 ctx->Yi.d[3] = ctr;
1320#endif 1378#endif
1321 GHASH(ctx,out,GHASH_CHUNK); 1379 GHASH(ctx, out, GHASH_CHUNK);
1322 out += GHASH_CHUNK; 1380 out += GHASH_CHUNK;
1323 in += GHASH_CHUNK; 1381 in += GHASH_CHUNK;
1324 len -= GHASH_CHUNK; 1382 len -= GHASH_CHUNK;
1325 } 1383 }
1326#endif 1384#endif
1327 if ((i = (len&(size_t)-16))) { 1385 if ((i = (len & (size_t)-16))) {
1328 size_t j=i/16; 1386 size_t j = i/16;
1329 1387
1330 (*stream)(in,out,j,key,ctx->Yi.c); 1388 (*stream)(in, out, j, key, ctx->Yi.c);
1331 ctr += (unsigned int)j; 1389 ctr += (unsigned int)j;
1332#if BYTE_ORDER == LITTLE_ENDIAN 1390#if BYTE_ORDER == LITTLE_ENDIAN
1333#ifdef BSWAP4 1391#ifdef BSWAP4
1334 ctx->Yi.d[3] = BSWAP4(ctr); 1392 ctx->Yi.d[3] = BSWAP4(ctr);
1335#else 1393#else
1336 PUTU32(ctx->Yi.c+12,ctr); 1394 PUTU32(ctx->Yi.c + 12, ctr);
1337#endif 1395#endif
1338#else /* BIG_ENDIAN */ 1396#else /* BIG_ENDIAN */
1339 ctx->Yi.d[3] = ctr; 1397 ctx->Yi.d[3] = ctr;
1340#endif 1398#endif
1341 in += i; 1399 in += i;
1342 len -= i; 1400 len -= i;
1343#if defined(GHASH) 1401#if defined(GHASH)
1344 GHASH(ctx,out,i); 1402 GHASH(ctx, out, i);
1345 out += i; 1403 out += i;
1346#else 1404#else
1347 while (j--) { 1405 while (j--) {
1348 for (i=0;i<16;++i) ctx->Xi.c[i] ^= out[i]; 1406 for (i = 0; i < 16; ++i)
1349 GCM_MUL(ctx,Xi); 1407 ctx->Xi.c[i] ^= out[i];
1408 GCM_MUL(ctx, Xi);
1350 out += 16; 1409 out += 16;
1351 } 1410 }
1352#endif 1411#endif
1353 } 1412 }
1354 if (len) { 1413 if (len) {
1355 (*ctx->block)(ctx->Yi.c,ctx->EKi.c,key); 1414 (*ctx->block)(ctx->Yi.c, ctx->EKi.c, key);
1356 ++ctr; 1415 ++ctr;
1357#if BYTE_ORDER == LITTLE_ENDIAN 1416#if BYTE_ORDER == LITTLE_ENDIAN
1358#ifdef BSWAP4 1417#ifdef BSWAP4
1359 ctx->Yi.d[3] = BSWAP4(ctr); 1418 ctx->Yi.d[3] = BSWAP4(ctr);
1360#else 1419#else
1361 PUTU32(ctx->Yi.c+12,ctr); 1420 PUTU32(ctx->Yi.c + 12, ctr);
1362#endif 1421#endif
1363#else /* BIG_ENDIAN */ 1422#else /* BIG_ENDIAN */
1364 ctx->Yi.d[3] = ctr; 1423 ctx->Yi.d[3] = ctr;
1365#endif 1424#endif
1366 while (len--) { 1425 while (len--) {
1367 ctx->Xi.c[n] ^= out[n] = in[n]^ctx->EKi.c[n]; 1426 ctx->Xi.c[n] ^= out[n] = in[n] ^ ctx->EKi.c[n];
1368 ++n; 1427 ++n;
1369 } 1428 }
1370 } 1429 }
@@ -1373,30 +1432,31 @@ int CRYPTO_gcm128_encrypt_ctr32(GCM128_CONTEXT *ctx,
1373 return 0; 1432 return 0;
1374} 1433}
1375 1434
1376int CRYPTO_gcm128_decrypt_ctr32(GCM128_CONTEXT *ctx, 1435int
1377 const unsigned char *in, unsigned char *out, 1436CRYPTO_gcm128_decrypt_ctr32(GCM128_CONTEXT *ctx,
1378 size_t len,ctr128_f stream) 1437 const unsigned char *in, unsigned char *out,
1438 size_t len, ctr128_f stream)
1379{ 1439{
1380 unsigned int n, ctr; 1440 unsigned int n, ctr;
1381 size_t i; 1441 size_t i;
1382 u64 mlen = ctx->len.u[1]; 1442 u64 mlen = ctx->len.u[1];
1383 void *key = ctx->key; 1443 void *key = ctx->key;
1384#ifdef GCM_FUNCREF_4BIT 1444#ifdef GCM_FUNCREF_4BIT
1385 void (*gcm_gmult_p)(u64 Xi[2],const u128 Htable[16]) = ctx->gmult; 1445 void (*gcm_gmult_p)(u64 Xi[2], const u128 Htable[16]) = ctx->gmult;
1386# ifdef GHASH 1446# ifdef GHASH
1387 void (*gcm_ghash_p)(u64 Xi[2],const u128 Htable[16], 1447 void (*gcm_ghash_p)(u64 Xi[2], const u128 Htable[16],
1388 const u8 *inp,size_t len) = ctx->ghash; 1448 const u8 *inp, size_t len) = ctx->ghash;
1389# endif 1449# endif
1390#endif 1450#endif
1391 1451
1392 mlen += len; 1452 mlen += len;
1393 if (mlen>((U64(1)<<36)-32) || (sizeof(len)==8 && mlen<len)) 1453 if (mlen > ((U64(1) << 36) - 32) || (sizeof(len) == 8 && mlen < len))
1394 return -1; 1454 return -1;
1395 ctx->len.u[1] = mlen; 1455 ctx->len.u[1] = mlen;
1396 1456
1397 if (ctx->ares) { 1457 if (ctx->ares) {
1398 /* First call to decrypt finalizes GHASH(AAD) */ 1458 /* First call to decrypt finalizes GHASH(AAD) */
1399 GCM_MUL(ctx,Xi); 1459 GCM_MUL(ctx, Xi);
1400 ctx->ares = 0; 1460 ctx->ares = 0;
1401 } 1461 }
1402 1462
@@ -1404,7 +1464,7 @@ int CRYPTO_gcm128_decrypt_ctr32(GCM128_CONTEXT *ctx,
1404#ifdef BSWAP4 1464#ifdef BSWAP4
1405 ctr = BSWAP4(ctx->Yi.d[3]); 1465 ctr = BSWAP4(ctx->Yi.d[3]);
1406#else 1466#else
1407 ctr = GETU32(ctx->Yi.c+12); 1467 ctr = GETU32(ctx->Yi.c + 12);
1408#endif 1468#endif
1409#else /* BIG_ENDIAN */ 1469#else /* BIG_ENDIAN */
1410 ctr = ctx->Yi.d[3]; 1470 ctr = ctx->Yi.d[3];
@@ -1414,74 +1474,76 @@ int CRYPTO_gcm128_decrypt_ctr32(GCM128_CONTEXT *ctx,
1414 if (n) { 1474 if (n) {
1415 while (n && len) { 1475 while (n && len) {
1416 u8 c = *(in++); 1476 u8 c = *(in++);
1417 *(out++) = c^ctx->EKi.c[n]; 1477 *(out++) = c ^ ctx->EKi.c[n];
1418 ctx->Xi.c[n] ^= c; 1478 ctx->Xi.c[n] ^= c;
1419 --len; 1479 --len;
1420 n = (n+1)%16; 1480 n = (n + 1) % 16;
1421 } 1481 }
1422 if (n==0) GCM_MUL (ctx,Xi); 1482 if (n == 0)
1483 GCM_MUL(ctx, Xi);
1423 else { 1484 else {
1424 ctx->mres = n; 1485 ctx->mres = n;
1425 return 0; 1486 return 0;
1426 } 1487 }
1427 } 1488 }
1428#if defined(GHASH) && !defined(OPENSSL_SMALL_FOOTPRINT) 1489#if defined(GHASH) && !defined(OPENSSL_SMALL_FOOTPRINT)
1429 while (len>=GHASH_CHUNK) { 1490 while (len >= GHASH_CHUNK) {
1430 GHASH(ctx,in,GHASH_CHUNK); 1491 GHASH(ctx, in, GHASH_CHUNK);
1431 (*stream)(in,out,GHASH_CHUNK/16,key,ctx->Yi.c); 1492 (*stream)(in, out, GHASH_CHUNK/16, key, ctx->Yi.c);
1432 ctr += GHASH_CHUNK/16; 1493 ctr += GHASH_CHUNK/16;
1433#if BYTE_ORDER == LITTLE_ENDIAN 1494#if BYTE_ORDER == LITTLE_ENDIAN
1434#ifdef BSWAP4 1495#ifdef BSWAP4
1435 ctx->Yi.d[3] = BSWAP4(ctr); 1496 ctx->Yi.d[3] = BSWAP4(ctr);
1436#else 1497#else
1437 PUTU32(ctx->Yi.c+12,ctr); 1498 PUTU32(ctx->Yi.c + 12, ctr);
1438#endif 1499#endif
1439#else /* BIG_ENDIAN */ 1500#else /* BIG_ENDIAN */
1440 ctx->Yi.d[3] = ctr; 1501 ctx->Yi.d[3] = ctr;
1441#endif 1502#endif
1442 out += GHASH_CHUNK; 1503 out += GHASH_CHUNK;
1443 in += GHASH_CHUNK; 1504 in += GHASH_CHUNK;
1444 len -= GHASH_CHUNK; 1505 len -= GHASH_CHUNK;
1445 } 1506 }
1446#endif 1507#endif
1447 if ((i = (len&(size_t)-16))) { 1508 if ((i = (len & (size_t)-16))) {
1448 size_t j=i/16; 1509 size_t j = i/16;
1449 1510
1450#if defined(GHASH) 1511#if defined(GHASH)
1451 GHASH(ctx,in,i); 1512 GHASH(ctx, in, i);
1452#else 1513#else
1453 while (j--) { 1514 while (j--) {
1454 size_t k; 1515 size_t k;
1455 for (k=0;k<16;++k) ctx->Xi.c[k] ^= in[k]; 1516 for (k = 0; k < 16; ++k)
1456 GCM_MUL(ctx,Xi); 1517 ctx->Xi.c[k] ^= in[k];
1518 GCM_MUL(ctx, Xi);
1457 in += 16; 1519 in += 16;
1458 } 1520 }
1459 j = i/16; 1521 j = i/16;
1460 in -= i; 1522 in -= i;
1461#endif 1523#endif
1462 (*stream)(in,out,j,key,ctx->Yi.c); 1524 (*stream)(in, out, j, key, ctx->Yi.c);
1463 ctr += (unsigned int)j; 1525 ctr += (unsigned int)j;
1464#if BYTE_ORDER == LITTLE_ENDIAN 1526#if BYTE_ORDER == LITTLE_ENDIAN
1465#ifdef BSWAP4 1527#ifdef BSWAP4
1466 ctx->Yi.d[3] = BSWAP4(ctr); 1528 ctx->Yi.d[3] = BSWAP4(ctr);
1467#else 1529#else
1468 PUTU32(ctx->Yi.c+12,ctr); 1530 PUTU32(ctx->Yi.c + 12, ctr);
1469#endif 1531#endif
1470#else /* BIG_ENDIAN */ 1532#else /* BIG_ENDIAN */
1471 ctx->Yi.d[3] = ctr; 1533 ctx->Yi.d[3] = ctr;
1472#endif 1534#endif
1473 out += i; 1535 out += i;
1474 in += i; 1536 in += i;
1475 len -= i; 1537 len -= i;
1476 } 1538 }
1477 if (len) { 1539 if (len) {
1478 (*ctx->block)(ctx->Yi.c,ctx->EKi.c,key); 1540 (*ctx->block)(ctx->Yi.c, ctx->EKi.c, key);
1479 ++ctr; 1541 ++ctr;
1480#if BYTE_ORDER == LITTLE_ENDIAN 1542#if BYTE_ORDER == LITTLE_ENDIAN
1481#ifdef BSWAP4 1543#ifdef BSWAP4
1482 ctx->Yi.d[3] = BSWAP4(ctr); 1544 ctx->Yi.d[3] = BSWAP4(ctr);
1483#else 1545#else
1484 PUTU32(ctx->Yi.c+12,ctr); 1546 PUTU32(ctx->Yi.c + 12, ctr);
1485#endif 1547#endif
1486#else /* BIG_ENDIAN */ 1548#else /* BIG_ENDIAN */
1487 ctx->Yi.d[3] = ctr; 1549 ctx->Yi.d[3] = ctr;
@@ -1489,7 +1551,7 @@ int CRYPTO_gcm128_decrypt_ctr32(GCM128_CONTEXT *ctx,
1489 while (len--) { 1551 while (len--) {
1490 u8 c = in[n]; 1552 u8 c = in[n];
1491 ctx->Xi.c[n] ^= c; 1553 ctx->Xi.c[n] ^= c;
1492 out[n] = c^ctx->EKi.c[n]; 1554 out[n] = c ^ ctx->EKi.c[n];
1493 ++n; 1555 ++n;
1494 } 1556 }
1495 } 1557 }
@@ -1498,17 +1560,18 @@ int CRYPTO_gcm128_decrypt_ctr32(GCM128_CONTEXT *ctx,
1498 return 0; 1560 return 0;
1499} 1561}
1500 1562
1501int CRYPTO_gcm128_finish(GCM128_CONTEXT *ctx,const unsigned char *tag, 1563int
1502 size_t len) 1564CRYPTO_gcm128_finish(GCM128_CONTEXT *ctx, const unsigned char *tag,
1565 size_t len)
1503{ 1566{
1504 u64 alen = ctx->len.u[0]<<3; 1567 u64 alen = ctx->len.u[0] << 3;
1505 u64 clen = ctx->len.u[1]<<3; 1568 u64 clen = ctx->len.u[1] << 3;
1506#ifdef GCM_FUNCREF_4BIT 1569#ifdef GCM_FUNCREF_4BIT
1507 void (*gcm_gmult_p)(u64 Xi[2],const u128 Htable[16]) = ctx->gmult; 1570 void (*gcm_gmult_p)(u64 Xi[2], const u128 Htable[16]) = ctx->gmult;
1508#endif 1571#endif
1509 1572
1510 if (ctx->mres || ctx->ares) 1573 if (ctx->mres || ctx->ares)
1511 GCM_MUL(ctx,Xi); 1574 GCM_MUL(ctx, Xi);
1512 1575
1513#if BYTE_ORDER == LITTLE_ENDIAN 1576#if BYTE_ORDER == LITTLE_ENDIAN
1514#ifdef BSWAP8 1577#ifdef BSWAP8
@@ -1521,42 +1584,46 @@ int CRYPTO_gcm128_finish(GCM128_CONTEXT *ctx,const unsigned char *tag,
1521 ctx->len.u[0] = alen; 1584 ctx->len.u[0] = alen;
1522 ctx->len.u[1] = clen; 1585 ctx->len.u[1] = clen;
1523 1586
1524 alen = (u64)GETU32(p) <<32|GETU32(p+4); 1587 alen = (u64)GETU32(p) << 32|GETU32(p + 4);
1525 clen = (u64)GETU32(p+8)<<32|GETU32(p+12); 1588 clen = (u64)GETU32(p + 8) << 32|GETU32(p + 12);
1526 } 1589 }
1527#endif 1590#endif
1528#endif 1591#endif
1529 1592
1530 ctx->Xi.u[0] ^= alen; 1593 ctx->Xi.u[0] ^= alen;
1531 ctx->Xi.u[1] ^= clen; 1594 ctx->Xi.u[1] ^= clen;
1532 GCM_MUL(ctx,Xi); 1595 GCM_MUL(ctx, Xi);
1533 1596
1534 ctx->Xi.u[0] ^= ctx->EK0.u[0]; 1597 ctx->Xi.u[0] ^= ctx->EK0.u[0];
1535 ctx->Xi.u[1] ^= ctx->EK0.u[1]; 1598 ctx->Xi.u[1] ^= ctx->EK0.u[1];
1536 1599
1537 if (tag && len<=sizeof(ctx->Xi)) 1600 if (tag && len <= sizeof(ctx->Xi))
1538 return memcmp(ctx->Xi.c,tag,len); 1601 return memcmp(ctx->Xi.c, tag, len);
1539 else 1602 else
1540 return -1; 1603 return -1;
1541} 1604}
1542 1605
1543void CRYPTO_gcm128_tag(GCM128_CONTEXT *ctx, unsigned char *tag, size_t len) 1606void
1607CRYPTO_gcm128_tag(GCM128_CONTEXT *ctx, unsigned char *tag, size_t len)
1544{ 1608{
1545 CRYPTO_gcm128_finish(ctx, NULL, 0); 1609 CRYPTO_gcm128_finish(ctx, NULL, 0);
1546 memcpy(tag, ctx->Xi.c, len<=sizeof(ctx->Xi.c)?len:sizeof(ctx->Xi.c)); 1610 memcpy(tag, ctx->Xi.c,
1611 len <= sizeof(ctx->Xi.c) ? len : sizeof(ctx->Xi.c));
1547} 1612}
1548 1613
1549GCM128_CONTEXT *CRYPTO_gcm128_new(void *key, block128_f block) 1614GCM128_CONTEXT *
1615CRYPTO_gcm128_new(void *key, block128_f block)
1550{ 1616{
1551 GCM128_CONTEXT *ret; 1617 GCM128_CONTEXT *ret;
1552 1618
1553 if ((ret = malloc(sizeof(GCM128_CONTEXT)))) 1619 if ((ret = malloc(sizeof(GCM128_CONTEXT))))
1554 CRYPTO_gcm128_init(ret,key,block); 1620 CRYPTO_gcm128_init(ret, key, block);
1555 1621
1556 return ret; 1622 return ret;
1557} 1623}
1558 1624
1559void CRYPTO_gcm128_release(GCM128_CONTEXT *ctx) 1625void
1626CRYPTO_gcm128_release(GCM128_CONTEXT *ctx)
1560{ 1627{
1561 freezero(ctx, sizeof(*ctx)); 1628 freezero(ctx, sizeof(*ctx));
1562} 1629}
diff --git a/src/lib/libcrypto/modes/modes.h b/src/lib/libcrypto/modes/modes.h
index 44d8326b5b..53fa9afb0d 100644
--- a/src/lib/libcrypto/modes/modes.h
+++ b/src/lib/libcrypto/modes/modes.h
@@ -1,4 +1,4 @@
1/* $OpenBSD: modes.h,v 1.5 2023/04/25 17:54:10 tb Exp $ */ 1/* $OpenBSD: modes.h,v 1.6 2023/07/08 14:55:36 beck Exp $ */
2/* ==================================================================== 2/* ====================================================================
3 * Copyright (c) 2008 The OpenSSL Project. All rights reserved. 3 * Copyright (c) 2008 The OpenSSL Project. All rights reserved.
4 * 4 *
@@ -13,105 +13,105 @@ extern "C" {
13#endif 13#endif
14 14
15typedef void (*block128_f)(const unsigned char in[16], 15typedef void (*block128_f)(const unsigned char in[16],
16 unsigned char out[16], 16 unsigned char out[16],
17 const void *key); 17 const void *key);
18 18
19typedef void (*cbc128_f)(const unsigned char *in, unsigned char *out, 19typedef void (*cbc128_f)(const unsigned char *in, unsigned char *out,
20 size_t len, const void *key, 20 size_t len, const void *key,
21 unsigned char ivec[16], int enc); 21 unsigned char ivec[16], int enc);
22 22
23typedef void (*ctr128_f)(const unsigned char *in, unsigned char *out, 23typedef void (*ctr128_f)(const unsigned char *in, unsigned char *out,
24 size_t blocks, const void *key, 24 size_t blocks, const void *key,
25 const unsigned char ivec[16]); 25 const unsigned char ivec[16]);
26 26
27typedef void (*ccm128_f)(const unsigned char *in, unsigned char *out, 27typedef void (*ccm128_f)(const unsigned char *in, unsigned char *out,
28 size_t blocks, const void *key, 28 size_t blocks, const void *key,
29 const unsigned char ivec[16],unsigned char cmac[16]); 29 const unsigned char ivec[16], unsigned char cmac[16]);
30 30
31void CRYPTO_cbc128_encrypt(const unsigned char *in, unsigned char *out, 31void CRYPTO_cbc128_encrypt(const unsigned char *in, unsigned char *out,
32 size_t len, const void *key, 32 size_t len, const void *key,
33 unsigned char ivec[16], block128_f block); 33 unsigned char ivec[16], block128_f block);
34void CRYPTO_cbc128_decrypt(const unsigned char *in, unsigned char *out, 34void CRYPTO_cbc128_decrypt(const unsigned char *in, unsigned char *out,
35 size_t len, const void *key, 35 size_t len, const void *key,
36 unsigned char ivec[16], block128_f block); 36 unsigned char ivec[16], block128_f block);
37 37
38void CRYPTO_ctr128_encrypt(const unsigned char *in, unsigned char *out, 38void CRYPTO_ctr128_encrypt(const unsigned char *in, unsigned char *out,
39 size_t len, const void *key, 39 size_t len, const void *key,
40 unsigned char ivec[16], unsigned char ecount_buf[16], 40 unsigned char ivec[16], unsigned char ecount_buf[16],
41 unsigned int *num, block128_f block); 41 unsigned int *num, block128_f block);
42 42
43void CRYPTO_ctr128_encrypt_ctr32(const unsigned char *in, unsigned char *out, 43void CRYPTO_ctr128_encrypt_ctr32(const unsigned char *in, unsigned char *out,
44 size_t len, const void *key, 44 size_t len, const void *key,
45 unsigned char ivec[16], unsigned char ecount_buf[16], 45 unsigned char ivec[16], unsigned char ecount_buf[16],
46 unsigned int *num, ctr128_f ctr); 46 unsigned int *num, ctr128_f ctr);
47 47
48void CRYPTO_ofb128_encrypt(const unsigned char *in, unsigned char *out, 48void CRYPTO_ofb128_encrypt(const unsigned char *in, unsigned char *out,
49 size_t len, const void *key, 49 size_t len, const void *key,
50 unsigned char ivec[16], int *num, 50 unsigned char ivec[16], int *num,
51 block128_f block); 51 block128_f block);
52 52
53void CRYPTO_cfb128_encrypt(const unsigned char *in, unsigned char *out, 53void CRYPTO_cfb128_encrypt(const unsigned char *in, unsigned char *out,
54 size_t len, const void *key, 54 size_t len, const void *key,
55 unsigned char ivec[16], int *num, 55 unsigned char ivec[16], int *num,
56 int enc, block128_f block); 56 int enc, block128_f block);
57void CRYPTO_cfb128_8_encrypt(const unsigned char *in, unsigned char *out, 57void CRYPTO_cfb128_8_encrypt(const unsigned char *in, unsigned char *out,
58 size_t length, const void *key, 58 size_t length, const void *key,
59 unsigned char ivec[16], int *num, 59 unsigned char ivec[16], int *num,
60 int enc, block128_f block); 60 int enc, block128_f block);
61void CRYPTO_cfb128_1_encrypt(const unsigned char *in, unsigned char *out, 61void CRYPTO_cfb128_1_encrypt(const unsigned char *in, unsigned char *out,
62 size_t bits, const void *key, 62 size_t bits, const void *key,
63 unsigned char ivec[16], int *num, 63 unsigned char ivec[16], int *num,
64 int enc, block128_f block); 64 int enc, block128_f block);
65 65
66typedef struct gcm128_context GCM128_CONTEXT; 66typedef struct gcm128_context GCM128_CONTEXT;
67 67
68GCM128_CONTEXT *CRYPTO_gcm128_new(void *key, block128_f block); 68GCM128_CONTEXT *CRYPTO_gcm128_new(void *key, block128_f block);
69void CRYPTO_gcm128_init(GCM128_CONTEXT *ctx,void *key,block128_f block); 69void CRYPTO_gcm128_init(GCM128_CONTEXT *ctx, void *key, block128_f block);
70void CRYPTO_gcm128_setiv(GCM128_CONTEXT *ctx, const unsigned char *iv, 70void CRYPTO_gcm128_setiv(GCM128_CONTEXT *ctx, const unsigned char *iv,
71 size_t len); 71 size_t len);
72int CRYPTO_gcm128_aad(GCM128_CONTEXT *ctx, const unsigned char *aad, 72int CRYPTO_gcm128_aad(GCM128_CONTEXT *ctx, const unsigned char *aad,
73 size_t len); 73 size_t len);
74int CRYPTO_gcm128_encrypt(GCM128_CONTEXT *ctx, 74int CRYPTO_gcm128_encrypt(GCM128_CONTEXT *ctx,
75 const unsigned char *in, unsigned char *out, 75 const unsigned char *in, unsigned char *out,
76 size_t len); 76 size_t len);
77int CRYPTO_gcm128_decrypt(GCM128_CONTEXT *ctx, 77int CRYPTO_gcm128_decrypt(GCM128_CONTEXT *ctx,
78 const unsigned char *in, unsigned char *out, 78 const unsigned char *in, unsigned char *out,
79 size_t len); 79 size_t len);
80int CRYPTO_gcm128_encrypt_ctr32(GCM128_CONTEXT *ctx, 80int CRYPTO_gcm128_encrypt_ctr32(GCM128_CONTEXT *ctx,
81 const unsigned char *in, unsigned char *out, 81 const unsigned char *in, unsigned char *out,
82 size_t len, ctr128_f stream); 82 size_t len, ctr128_f stream);
83int CRYPTO_gcm128_decrypt_ctr32(GCM128_CONTEXT *ctx, 83int CRYPTO_gcm128_decrypt_ctr32(GCM128_CONTEXT *ctx,
84 const unsigned char *in, unsigned char *out, 84 const unsigned char *in, unsigned char *out,
85 size_t len, ctr128_f stream); 85 size_t len, ctr128_f stream);
86int CRYPTO_gcm128_finish(GCM128_CONTEXT *ctx,const unsigned char *tag, 86int CRYPTO_gcm128_finish(GCM128_CONTEXT *ctx, const unsigned char *tag,
87 size_t len); 87 size_t len);
88void CRYPTO_gcm128_tag(GCM128_CONTEXT *ctx, unsigned char *tag, size_t len); 88void CRYPTO_gcm128_tag(GCM128_CONTEXT *ctx, unsigned char *tag, size_t len);
89void CRYPTO_gcm128_release(GCM128_CONTEXT *ctx); 89void CRYPTO_gcm128_release(GCM128_CONTEXT *ctx);
90 90
91typedef struct ccm128_context CCM128_CONTEXT; 91typedef struct ccm128_context CCM128_CONTEXT;
92 92
93void CRYPTO_ccm128_init(CCM128_CONTEXT *ctx, 93void CRYPTO_ccm128_init(CCM128_CONTEXT *ctx,
94 unsigned int M, unsigned int L, void *key,block128_f block); 94 unsigned int M, unsigned int L, void *key, block128_f block);
95int CRYPTO_ccm128_setiv(CCM128_CONTEXT *ctx, 95int CRYPTO_ccm128_setiv(CCM128_CONTEXT *ctx,
96 const unsigned char *nonce, size_t nlen, size_t mlen); 96 const unsigned char *nonce, size_t nlen, size_t mlen);
97void CRYPTO_ccm128_aad(CCM128_CONTEXT *ctx, 97void CRYPTO_ccm128_aad(CCM128_CONTEXT *ctx,
98 const unsigned char *aad, size_t alen); 98 const unsigned char *aad, size_t alen);
99int CRYPTO_ccm128_encrypt(CCM128_CONTEXT *ctx, 99int CRYPTO_ccm128_encrypt(CCM128_CONTEXT *ctx,
100 const unsigned char *inp, unsigned char *out, size_t len); 100 const unsigned char *inp, unsigned char *out, size_t len);
101int CRYPTO_ccm128_decrypt(CCM128_CONTEXT *ctx, 101int CRYPTO_ccm128_decrypt(CCM128_CONTEXT *ctx,
102 const unsigned char *inp, unsigned char *out, size_t len); 102 const unsigned char *inp, unsigned char *out, size_t len);
103int CRYPTO_ccm128_encrypt_ccm64(CCM128_CONTEXT *ctx, 103int CRYPTO_ccm128_encrypt_ccm64(CCM128_CONTEXT *ctx,
104 const unsigned char *inp, unsigned char *out, size_t len, 104 const unsigned char *inp, unsigned char *out, size_t len,
105 ccm128_f stream); 105 ccm128_f stream);
106int CRYPTO_ccm128_decrypt_ccm64(CCM128_CONTEXT *ctx, 106int CRYPTO_ccm128_decrypt_ccm64(CCM128_CONTEXT *ctx,
107 const unsigned char *inp, unsigned char *out, size_t len, 107 const unsigned char *inp, unsigned char *out, size_t len,
108 ccm128_f stream); 108 ccm128_f stream);
109size_t CRYPTO_ccm128_tag(CCM128_CONTEXT *ctx, unsigned char *tag, size_t len); 109size_t CRYPTO_ccm128_tag(CCM128_CONTEXT *ctx, unsigned char *tag, size_t len);
110 110
111typedef struct xts128_context XTS128_CONTEXT; 111typedef struct xts128_context XTS128_CONTEXT;
112 112
113int CRYPTO_xts128_encrypt(const XTS128_CONTEXT *ctx, const unsigned char iv[16], 113int CRYPTO_xts128_encrypt(const XTS128_CONTEXT *ctx, const unsigned char iv[16],
114 const unsigned char *inp, unsigned char *out, size_t len, int enc); 114 const unsigned char *inp, unsigned char *out, size_t len, int enc);
115 115
116#ifdef __cplusplus 116#ifdef __cplusplus
117} 117}
diff --git a/src/lib/libcrypto/modes/modes_local.h b/src/lib/libcrypto/modes/modes_local.h
index 943f139245..511855f2e0 100644
--- a/src/lib/libcrypto/modes/modes_local.h
+++ b/src/lib/libcrypto/modes/modes_local.h
@@ -1,4 +1,4 @@
1/* $OpenBSD: modes_local.h,v 1.1 2022/11/26 16:08:53 tb Exp $ */ 1/* $OpenBSD: modes_local.h,v 1.2 2023/07/08 14:55:36 beck Exp $ */
2/* ==================================================================== 2/* ====================================================================
3 * Copyright (c) 2010 The OpenSSL Project. All rights reserved. 3 * Copyright (c) 2010 The OpenSSL Project. All rights reserved.
4 * 4 *
@@ -30,28 +30,28 @@ typedef unsigned char u8;
30#if !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM) 30#if !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM)
31#if defined(__GNUC__) && __GNUC__>=2 31#if defined(__GNUC__) && __GNUC__>=2
32# if defined(__x86_64) || defined(__x86_64__) 32# if defined(__x86_64) || defined(__x86_64__)
33# define BSWAP8(x) ({ u64 ret=(x); \ 33# define BSWAP8(x) ({ u64 ret=(x); \
34 asm ("bswapq %0" \ 34 asm ("bswapq %0" \
35 : "+r"(ret)); ret; }) 35 : "+r"(ret)); ret; })
36# define BSWAP4(x) ({ u32 ret=(x); \ 36# define BSWAP4(x) ({ u32 ret=(x); \
37 asm ("bswapl %0" \ 37 asm ("bswapl %0" \
38 : "+r"(ret)); ret; }) 38 : "+r"(ret)); ret; })
39# elif (defined(__i386) || defined(__i386__)) 39# elif (defined(__i386) || defined(__i386__))
40# define BSWAP8(x) ({ u32 lo=(u64)(x)>>32,hi=(x); \ 40# define BSWAP8(x) ({ u32 lo=(u64)(x)>>32,hi=(x); \
41 asm ("bswapl %0; bswapl %1" \ 41 asm ("bswapl %0; bswapl %1" \
42 : "+r"(hi),"+r"(lo)); \ 42 : "+r"(hi),"+r"(lo)); \
43 (u64)hi<<32|lo; }) 43 (u64)hi<<32|lo; })
44# define BSWAP4(x) ({ u32 ret=(x); \ 44# define BSWAP4(x) ({ u32 ret=(x); \
45 asm ("bswapl %0" \ 45 asm ("bswapl %0" \
46 : "+r"(ret)); ret; }) 46 : "+r"(ret)); ret; })
47# elif (defined(__arm__) || defined(__arm)) && !defined(__STRICT_ALIGNMENT) 47# elif (defined(__arm__) || defined(__arm)) && !defined(__STRICT_ALIGNMENT)
48# define BSWAP8(x) ({ u32 lo=(u64)(x)>>32,hi=(x); \ 48# define BSWAP8(x) ({ u32 lo=(u64)(x)>>32,hi=(x); \
49 asm ("rev %0,%0; rev %1,%1" \ 49 asm ("rev %0,%0; rev %1,%1" \
50 : "+r"(hi),"+r"(lo)); \ 50 : "+r"(hi),"+r"(lo)); \
51 (u64)hi<<32|lo; }) 51 (u64)hi<<32|lo; })
52# define BSWAP4(x) ({ u32 ret; \ 52# define BSWAP4(x) ({ u32 ret; \
53 asm ("rev %0,%1" \ 53 asm ("rev %0,%1" \
54 : "=r"(ret) : "r"((u32)(x))); \ 54 : "=r"(ret) : "r"((u32)(x))); \
55 ret; }) 55 ret; })
56# endif 56# endif
57#endif 57#endif
@@ -67,7 +67,9 @@ typedef unsigned char u8;
67 67
68/* GCM definitions */ 68/* GCM definitions */
69 69
70typedef struct { u64 hi,lo; } u128; 70typedef struct {
71 u64 hi, lo;
72} u128;
71 73
72#ifdef TABLE_BITS 74#ifdef TABLE_BITS
73#undef TABLE_BITS 75#undef TABLE_BITS
@@ -80,16 +82,21 @@ typedef struct { u64 hi,lo; } u128;
80 82
81struct gcm128_context { 83struct gcm128_context {
82 /* Following 6 names follow names in GCM specification */ 84 /* Following 6 names follow names in GCM specification */
83 union { u64 u[2]; u32 d[4]; u8 c[16]; size_t t[16/sizeof(size_t)]; } 85 union {
84 Yi,EKi,EK0,len,Xi,H; 86 u64 u[2];
87 u32 d[4];
88 u8 c[16];
89 size_t t[16/sizeof(size_t)];
90 } Yi, EKi, EK0, len, Xi, H;
85 /* Relative position of Xi, H and pre-computed Htable is used 91 /* Relative position of Xi, H and pre-computed Htable is used
86 * in some assembler modules, i.e. don't change the order! */ 92 * in some assembler modules, i.e. don't change the order! */
87#if TABLE_BITS==8 93#if TABLE_BITS==8
88 u128 Htable[256]; 94 u128 Htable[256];
89#else 95#else
90 u128 Htable[16]; 96 u128 Htable[16];
91 void (*gmult)(u64 Xi[2],const u128 Htable[16]); 97 void (*gmult)(u64 Xi[2], const u128 Htable[16]);
92 void (*ghash)(u64 Xi[2],const u128 Htable[16],const u8 *inp,size_t len); 98 void (*ghash)(u64 Xi[2], const u128 Htable[16], const u8 *inp,
99 size_t len);
93#endif 100#endif
94 unsigned int mres, ares; 101 unsigned int mres, ares;
95 block128_f block; 102 block128_f block;
@@ -98,11 +105,14 @@ struct gcm128_context {
98 105
99struct xts128_context { 106struct xts128_context {
100 void *key1, *key2; 107 void *key1, *key2;
101 block128_f block1,block2; 108 block128_f block1, block2;
102}; 109};
103 110
104struct ccm128_context { 111struct ccm128_context {
105 union { u64 u[2]; u8 c[16]; } nonce, cmac; 112 union {
113 u64 u[2];
114 u8 c[16];
115 } nonce, cmac;
106 u64 blocks; 116 u64 blocks;
107 block128_f block; 117 block128_f block;
108 void *key; 118 void *key;
diff --git a/src/lib/libcrypto/modes/ofb128.c b/src/lib/libcrypto/modes/ofb128.c
index 3cf5d98150..9ef812a08b 100644
--- a/src/lib/libcrypto/modes/ofb128.c
+++ b/src/lib/libcrypto/modes/ofb128.c
@@ -1,4 +1,4 @@
1/* $OpenBSD: ofb128.c,v 1.5 2022/11/26 16:08:53 tb Exp $ */ 1/* $OpenBSD: ofb128.c,v 1.6 2023/07/08 14:55:36 beck Exp $ */
2/* ==================================================================== 2/* ====================================================================
3 * Copyright (c) 2008 The OpenSSL Project. All rights reserved. 3 * Copyright (c) 2008 The OpenSSL Project. All rights reserved.
4 * 4 *
@@ -7,7 +7,7 @@
7 * are met: 7 * are met:
8 * 8 *
9 * 1. Redistributions of source code must retain the above copyright 9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer. 10 * notice, this list of conditions and the following disclaimer.
11 * 11 *
12 * 2. Redistributions in binary form must reproduce the above copyright 12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in 13 * notice, this list of conditions and the following disclaimer in
@@ -63,57 +63,61 @@
63 * used. The extra state information to record how much of the 63 * used. The extra state information to record how much of the
64 * 128bit block we have used is contained in *num; 64 * 128bit block we have used is contained in *num;
65 */ 65 */
66void CRYPTO_ofb128_encrypt(const unsigned char *in, unsigned char *out, 66void
67 size_t len, const void *key, 67CRYPTO_ofb128_encrypt(const unsigned char *in, unsigned char *out,
68 unsigned char ivec[16], int *num, 68 size_t len, const void *key,
69 block128_f block) 69 unsigned char ivec[16], int *num,
70 block128_f block)
70{ 71{
71 unsigned int n; 72 unsigned int n;
72 size_t l=0; 73 size_t l = 0;
73 74
74 n = *num; 75 n = *num;
75 76
76#if !defined(OPENSSL_SMALL_FOOTPRINT) 77#if !defined(OPENSSL_SMALL_FOOTPRINT)
77 if (16%sizeof(size_t) == 0) do { /* always true actually */ 78 if (16 % sizeof(size_t) == 0)
78 while (n && len) { 79 do { /* always true actually */
79 *(out++) = *(in++) ^ ivec[n]; 80 while (n && len) {
80 --len; 81 *(out++) = *(in++) ^ ivec[n];
81 n = (n+1) % 16; 82 --len;
82 } 83 n = (n + 1) % 16;
84 }
83#ifdef __STRICT_ALIGNMENT 85#ifdef __STRICT_ALIGNMENT
84 if (((size_t)in|(size_t)out|(size_t)ivec)%sizeof(size_t) != 0) 86 if (((size_t)in|(size_t)out|(size_t)ivec) %
85 break; 87 sizeof(size_t) != 0)
88 break;
86#endif 89#endif
87 while (len>=16) { 90 while (len >= 16) {
88 (*block)(ivec, ivec, key); 91 (*block)(ivec, ivec, key);
89 for (; n<16; n+=sizeof(size_t)) 92 for (; n < 16; n += sizeof(size_t))
90 *(size_t*)(out+n) = 93 *(size_t *)(out + n) =
91 *(size_t*)(in+n) ^ *(size_t*)(ivec+n); 94 *(size_t *)(in + n) ^ *(size_t *)(ivec +
92 len -= 16; 95 n);
93 out += 16; 96 len -= 16;
94 in += 16; 97 out += 16;
95 n = 0; 98 in += 16;
96 } 99 n = 0;
97 if (len) {
98 (*block)(ivec, ivec, key);
99 while (len--) {
100 out[n] = in[n] ^ ivec[n];
101 ++n;
102 } 100 }
103 } 101 if (len) {
104 *num = n; 102 (*block)(ivec, ivec, key);
105 return; 103 while (len--) {
106 } while(0); 104 out[n] = in[n] ^ ivec[n];
105 ++n;
106 }
107 }
108 *num = n;
109 return;
110 } while (0);
107 /* the rest would be commonly eliminated by x86* compiler */ 111 /* the rest would be commonly eliminated by x86* compiler */
108#endif 112#endif
109 while (l<len) { 113 while (l < len) {
110 if (n==0) { 114 if (n == 0) {
111 (*block)(ivec, ivec, key); 115 (*block)(ivec, ivec, key);
112 } 116 }
113 out[l] = in[l] ^ ivec[n]; 117 out[l] = in[l] ^ ivec[n];
114 ++l; 118 ++l;
115 n = (n+1) % 16; 119 n = (n + 1) % 16;
116 } 120 }
117 121
118 *num=n; 122 *num = n;
119} 123}
diff --git a/src/lib/libcrypto/modes/xts128.c b/src/lib/libcrypto/modes/xts128.c
index 71881227fb..449a802f37 100644
--- a/src/lib/libcrypto/modes/xts128.c
+++ b/src/lib/libcrypto/modes/xts128.c
@@ -1,4 +1,4 @@
1/* $OpenBSD: xts128.c,v 1.10 2023/05/07 14:38:04 tb Exp $ */ 1/* $OpenBSD: xts128.c,v 1.11 2023/07/08 14:55:36 beck Exp $ */
2/* ==================================================================== 2/* ====================================================================
3 * Copyright (c) 2011 The OpenSSL Project. All rights reserved. 3 * Copyright (c) 2011 The OpenSSL Project. All rights reserved.
4 * 4 *
@@ -7,7 +7,7 @@
7 * are met: 7 * are met:
8 * 8 *
9 * 1. Redistributions of source code must retain the above copyright 9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer. 10 * notice, this list of conditions and the following disclaimer.
11 * 11 *
12 * 2. Redistributions in binary form must reproduce the above copyright 12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in 13 * notice, this list of conditions and the following disclaimer in
@@ -60,125 +60,135 @@
60# endif 60# endif
61#endif 61#endif
62 62
63int CRYPTO_xts128_encrypt(const XTS128_CONTEXT *ctx, const unsigned char iv[16], 63int
64 const unsigned char *inp, unsigned char *out, 64CRYPTO_xts128_encrypt(const XTS128_CONTEXT *ctx, const unsigned char iv[16],
65 size_t len, int enc) 65 const unsigned char *inp, unsigned char *out,
66 size_t len, int enc)
66{ 67{
67 union { u64 u[2]; u32 d[4]; u8 c[16]; } tweak, scratch; 68 union {
69 u64 u[2];
70 u32 d[4];
71 u8 c[16];
72 } tweak, scratch;
68 unsigned int i; 73 unsigned int i;
69 74
70 if (len<16) return -1; 75 if (len < 16)
76 return -1;
71 77
72 memcpy(tweak.c, iv, 16); 78 memcpy(tweak.c, iv, 16);
73 79
74 (*ctx->block2)(tweak.c,tweak.c,ctx->key2); 80 (*ctx->block2)(tweak.c, tweak.c, ctx->key2);
75 81
76 if (!enc && (len%16)) len-=16; 82 if (!enc && (len % 16))
83 len -= 16;
77 84
78 while (len>=16) { 85 while (len >= 16) {
79#ifdef __STRICT_ALIGNMENT 86#ifdef __STRICT_ALIGNMENT
80 memcpy(scratch.c,inp,16); 87 memcpy(scratch.c, inp, 16);
81 scratch.u[0] ^= tweak.u[0]; 88 scratch.u[0] ^= tweak.u[0];
82 scratch.u[1] ^= tweak.u[1]; 89 scratch.u[1] ^= tweak.u[1];
83#else 90#else
84 scratch.u[0] = ((u64*)inp)[0]^tweak.u[0]; 91 scratch.u[0] = ((u64 *)inp)[0] ^ tweak.u[0];
85 scratch.u[1] = ((u64*)inp)[1]^tweak.u[1]; 92 scratch.u[1] = ((u64 *)inp)[1] ^ tweak.u[1];
86#endif 93#endif
87 (*ctx->block1)(scratch.c,scratch.c,ctx->key1); 94 (*ctx->block1)(scratch.c, scratch.c, ctx->key1);
88#ifdef __STRICT_ALIGNMENT 95#ifdef __STRICT_ALIGNMENT
89 scratch.u[0] ^= tweak.u[0]; 96 scratch.u[0] ^= tweak.u[0];
90 scratch.u[1] ^= tweak.u[1]; 97 scratch.u[1] ^= tweak.u[1];
91 memcpy(out,scratch.c,16); 98 memcpy(out, scratch.c, 16);
92#else 99#else
93 ((u64*)out)[0] = scratch.u[0]^=tweak.u[0]; 100 ((u64 *)out)[0] = scratch.u[0] ^= tweak.u[0];
94 ((u64*)out)[1] = scratch.u[1]^=tweak.u[1]; 101 ((u64 *)out)[1] = scratch.u[1] ^= tweak.u[1];
95#endif 102#endif
96 inp += 16; 103 inp += 16;
97 out += 16; 104 out += 16;
98 len -= 16; 105 len -= 16;
99 106
100 if (len==0) return 0; 107 if (len == 0)
108 return 0;
101 109
102#if BYTE_ORDER == LITTLE_ENDIAN 110#if BYTE_ORDER == LITTLE_ENDIAN
103 unsigned int carry,res; 111 unsigned int carry, res;
104 112
105 res = 0x87&(((int)tweak.d[3])>>31); 113 res = 0x87 & (((int)tweak.d[3]) >> 31);
106 carry = (unsigned int)(tweak.u[0]>>63); 114 carry = (unsigned int)(tweak.u[0] >> 63);
107 tweak.u[0] = (tweak.u[0]<<1)^res; 115 tweak.u[0] = (tweak.u[0] << 1) ^ res;
108 tweak.u[1] = (tweak.u[1]<<1)|carry; 116 tweak.u[1] = (tweak.u[1] << 1)|carry;
109#else /* BIG_ENDIAN */ 117#else /* BIG_ENDIAN */
110 size_t c; 118 size_t c;
111 119
112 for (c=0,i=0;i<16;++i) { 120 for (c = 0, i = 0; i < 16; ++i) {
113 /*+ substitutes for |, because c is 1 bit */ 121 /*+ substitutes for |, because c is 1 bit */
114 c += ((size_t)tweak.c[i])<<1; 122 c += ((size_t)tweak.c[i]) << 1;
115 tweak.c[i] = (u8)c; 123 tweak.c[i] = (u8)c;
116 c = c>>8; 124 c = c >> 8;
117 } 125 }
118 tweak.c[0] ^= (u8)(0x87&(0-c)); 126 tweak.c[0] ^= (u8)(0x87 & (0 - c));
119#endif 127#endif
120 } 128 }
121 if (enc) { 129 if (enc) {
122 for (i=0;i<len;++i) { 130 for (i = 0; i < len; ++i) {
123 u8 ch = inp[i]; 131 u8 ch = inp[i];
124 out[i] = scratch.c[i]; 132 out[i] = scratch.c[i];
125 scratch.c[i] = ch; 133 scratch.c[i] = ch;
126 } 134 }
127 scratch.u[0] ^= tweak.u[0]; 135 scratch.u[0] ^= tweak.u[0];
128 scratch.u[1] ^= tweak.u[1]; 136 scratch.u[1] ^= tweak.u[1];
129 (*ctx->block1)(scratch.c,scratch.c,ctx->key1); 137 (*ctx->block1)(scratch.c, scratch.c, ctx->key1);
130 scratch.u[0] ^= tweak.u[0]; 138 scratch.u[0] ^= tweak.u[0];
131 scratch.u[1] ^= tweak.u[1]; 139 scratch.u[1] ^= tweak.u[1];
132 memcpy(out-16,scratch.c,16); 140 memcpy(out - 16, scratch.c, 16);
133 } 141 } else {
134 else { 142 union {
135 union { u64 u[2]; u8 c[16]; } tweak1; 143 u64 u[2];
144 u8 c[16];
145 } tweak1;
136 146
137#if BYTE_ORDER == LITTLE_ENDIAN 147#if BYTE_ORDER == LITTLE_ENDIAN
138 unsigned int carry,res; 148 unsigned int carry, res;
139 149
140 res = 0x87&(((int)tweak.d[3])>>31); 150 res = 0x87 & (((int)tweak.d[3]) >> 31);
141 carry = (unsigned int)(tweak.u[0]>>63); 151 carry = (unsigned int)(tweak.u[0] >> 63);
142 tweak1.u[0] = (tweak.u[0]<<1)^res; 152 tweak1.u[0] = (tweak.u[0] << 1) ^ res;
143 tweak1.u[1] = (tweak.u[1]<<1)|carry; 153 tweak1.u[1] = (tweak.u[1] << 1)|carry;
144#else 154#else
145 size_t c; 155 size_t c;
146 156
147 for (c=0,i=0;i<16;++i) { 157 for (c = 0, i = 0; i < 16; ++i) {
148 /*+ substitutes for |, because c is 1 bit */ 158 /*+ substitutes for |, because c is 1 bit */
149 c += ((size_t)tweak.c[i])<<1; 159 c += ((size_t)tweak.c[i]) << 1;
150 tweak1.c[i] = (u8)c; 160 tweak1.c[i] = (u8)c;
151 c = c>>8; 161 c = c >> 8;
152 } 162 }
153 tweak1.c[0] ^= (u8)(0x87&(0-c)); 163 tweak1.c[0] ^= (u8)(0x87 & (0 - c));
154#endif 164#endif
155#ifdef __STRICT_ALIGNMENT 165#ifdef __STRICT_ALIGNMENT
156 memcpy(scratch.c,inp,16); 166 memcpy(scratch.c, inp, 16);
157 scratch.u[0] ^= tweak1.u[0]; 167 scratch.u[0] ^= tweak1.u[0];
158 scratch.u[1] ^= tweak1.u[1]; 168 scratch.u[1] ^= tweak1.u[1];
159#else 169#else
160 scratch.u[0] = ((u64*)inp)[0]^tweak1.u[0]; 170 scratch.u[0] = ((u64 *)inp)[0] ^ tweak1.u[0];
161 scratch.u[1] = ((u64*)inp)[1]^tweak1.u[1]; 171 scratch.u[1] = ((u64 *)inp)[1] ^ tweak1.u[1];
162#endif 172#endif
163 (*ctx->block1)(scratch.c,scratch.c,ctx->key1); 173 (*ctx->block1)(scratch.c, scratch.c, ctx->key1);
164 scratch.u[0] ^= tweak1.u[0]; 174 scratch.u[0] ^= tweak1.u[0];
165 scratch.u[1] ^= tweak1.u[1]; 175 scratch.u[1] ^= tweak1.u[1];
166 176
167 for (i=0;i<len;++i) { 177 for (i = 0; i < len; ++i) {
168 u8 ch = inp[16+i]; 178 u8 ch = inp[16 + i];
169 out[16+i] = scratch.c[i]; 179 out[16 + i] = scratch.c[i];
170 scratch.c[i] = ch; 180 scratch.c[i] = ch;
171 } 181 }
172 scratch.u[0] ^= tweak.u[0]; 182 scratch.u[0] ^= tweak.u[0];
173 scratch.u[1] ^= tweak.u[1]; 183 scratch.u[1] ^= tweak.u[1];
174 (*ctx->block1)(scratch.c,scratch.c,ctx->key1); 184 (*ctx->block1)(scratch.c, scratch.c, ctx->key1);
175#ifdef __STRICT_ALIGNMENT 185#ifdef __STRICT_ALIGNMENT
176 scratch.u[0] ^= tweak.u[0]; 186 scratch.u[0] ^= tweak.u[0];
177 scratch.u[1] ^= tweak.u[1]; 187 scratch.u[1] ^= tweak.u[1];
178 memcpy (out,scratch.c,16); 188 memcpy(out, scratch.c, 16);
179#else 189#else
180 ((u64*)out)[0] = scratch.u[0]^tweak.u[0]; 190 ((u64 *)out)[0] = scratch.u[0] ^ tweak.u[0];
181 ((u64*)out)[1] = scratch.u[1]^tweak.u[1]; 191 ((u64 *)out)[1] = scratch.u[1] ^ tweak.u[1];
182#endif 192#endif
183 } 193 }
184 194