diff options
Diffstat (limited to 'src/lib/libssl/s3_cbc.c')
-rw-r--r-- | src/lib/libssl/s3_cbc.c | 445 |
1 files changed, 207 insertions, 238 deletions
diff --git a/src/lib/libssl/s3_cbc.c b/src/lib/libssl/s3_cbc.c index 443a31e746..d6cc9b4771 100644 --- a/src/lib/libssl/s3_cbc.c +++ b/src/lib/libssl/s3_cbc.c | |||
@@ -73,30 +73,33 @@ | |||
73 | * bits. They use the fact that arithmetic shift shifts-in the sign bit. | 73 | * bits. They use the fact that arithmetic shift shifts-in the sign bit. |
74 | * However, this is not ensured by the C standard so you may need to replace | 74 | * However, this is not ensured by the C standard so you may need to replace |
75 | * them with something else on odd CPUs. */ | 75 | * them with something else on odd CPUs. */ |
76 | #define DUPLICATE_MSB_TO_ALL(x) ( (unsigned)( (int)(x) >> (sizeof(int)*8-1) ) ) | 76 | #define DUPLICATE_MSB_TO_ALL(x) ((unsigned)((int)(x) >> (sizeof(int) * 8 - 1))) |
77 | #define DUPLICATE_MSB_TO_ALL_8(x) ((unsigned char)(DUPLICATE_MSB_TO_ALL(x))) | 77 | #define DUPLICATE_MSB_TO_ALL_8(x) ((unsigned char)(DUPLICATE_MSB_TO_ALL(x))) |
78 | 78 | ||
79 | /* constant_time_lt returns 0xff if a<b and 0x00 otherwise. */ | 79 | /* constant_time_lt returns 0xff if a<b and 0x00 otherwise. */ |
80 | static unsigned constant_time_lt(unsigned a, unsigned b) | 80 | static unsigned |
81 | { | 81 | constant_time_lt(unsigned a, unsigned b) |
82 | { | ||
82 | a -= b; | 83 | a -= b; |
83 | return DUPLICATE_MSB_TO_ALL(a); | 84 | return DUPLICATE_MSB_TO_ALL(a); |
84 | } | 85 | } |
85 | 86 | ||
86 | /* constant_time_ge returns 0xff if a>=b and 0x00 otherwise. */ | 87 | /* constant_time_ge returns 0xff if a>=b and 0x00 otherwise. */ |
87 | static unsigned constant_time_ge(unsigned a, unsigned b) | 88 | static unsigned |
88 | { | 89 | constant_time_ge(unsigned a, unsigned b) |
90 | { | ||
89 | a -= b; | 91 | a -= b; |
90 | return DUPLICATE_MSB_TO_ALL(~a); | 92 | return DUPLICATE_MSB_TO_ALL(~a); |
91 | } | 93 | } |
92 | 94 | ||
93 | /* constant_time_eq_8 returns 0xff if a==b and 0x00 otherwise. */ | 95 | /* constant_time_eq_8 returns 0xff if a==b and 0x00 otherwise. */ |
94 | static unsigned char constant_time_eq_8(unsigned a, unsigned b) | 96 | static unsigned char |
95 | { | 97 | constant_time_eq_8(unsigned a, unsigned b) |
98 | { | ||
96 | unsigned c = a ^ b; | 99 | unsigned c = a ^ b; |
97 | c--; | 100 | c--; |
98 | return DUPLICATE_MSB_TO_ALL_8(c); | 101 | return DUPLICATE_MSB_TO_ALL_8(c); |
99 | } | 102 | } |
100 | 103 | ||
101 | /* ssl3_cbc_remove_padding removes padding from the decrypted, SSLv3, CBC | 104 | /* ssl3_cbc_remove_padding removes padding from the decrypted, SSLv3, CBC |
102 | * record in |rec| by updating |rec->length| in constant time. | 105 | * record in |rec| by updating |rec->length| in constant time. |
@@ -106,11 +109,10 @@ static unsigned char constant_time_eq_8(unsigned a, unsigned b) | |||
106 | * 0: (in non-constant time) if the record is publicly invalid. | 109 | * 0: (in non-constant time) if the record is publicly invalid. |
107 | * 1: if the padding was valid | 110 | * 1: if the padding was valid |
108 | * -1: otherwise. */ | 111 | * -1: otherwise. */ |
109 | int ssl3_cbc_remove_padding(const SSL* s, | 112 | int |
110 | SSL3_RECORD *rec, | 113 | ssl3_cbc_remove_padding(const SSL* s, SSL3_RECORD *rec, unsigned block_size, |
111 | unsigned block_size, | 114 | unsigned mac_size) |
112 | unsigned mac_size) | 115 | { |
113 | { | ||
114 | unsigned padding_length, good; | 116 | unsigned padding_length, good; |
115 | const unsigned overhead = 1 /* padding length byte */ + mac_size; | 117 | const unsigned overhead = 1 /* padding length byte */ + mac_size; |
116 | 118 | ||
@@ -119,13 +121,13 @@ int ssl3_cbc_remove_padding(const SSL* s, | |||
119 | if (overhead > rec->length) | 121 | if (overhead > rec->length) |
120 | return 0; | 122 | return 0; |
121 | 123 | ||
122 | padding_length = rec->data[rec->length-1]; | 124 | padding_length = rec->data[rec->length - 1]; |
123 | good = constant_time_ge(rec->length, padding_length+overhead); | 125 | good = constant_time_ge(rec->length, padding_length + overhead); |
124 | /* SSLv3 requires that the padding is minimal. */ | 126 | /* SSLv3 requires that the padding is minimal. */ |
125 | good &= constant_time_ge(block_size, padding_length+1); | 127 | good &= constant_time_ge(block_size, padding_length + 1); |
126 | padding_length = good & (padding_length+1); | 128 | padding_length = good & (padding_length + 1); |
127 | rec->length -= padding_length; | 129 | rec->length -= padding_length; |
128 | rec->type |= padding_length<<8; /* kludge: pass padding length */ | 130 | rec->type |= padding_length << 8; /* kludge: pass padding length */ |
129 | return (int)((good & 1) | (~good & -1)); | 131 | return (int)((good & 1) | (~good & -1)); |
130 | } | 132 | } |
131 | 133 | ||
@@ -140,16 +142,14 @@ int ssl3_cbc_remove_padding(const SSL* s, | |||
140 | * 0: (in non-constant time) if the record is publicly invalid. | 142 | * 0: (in non-constant time) if the record is publicly invalid. |
141 | * 1: if the padding was valid | 143 | * 1: if the padding was valid |
142 | * -1: otherwise. */ | 144 | * -1: otherwise. */ |
143 | int tls1_cbc_remove_padding(const SSL* s, | 145 | int |
144 | SSL3_RECORD *rec, | 146 | tls1_cbc_remove_padding(const SSL* s, SSL3_RECORD *rec, unsigned block_size, |
145 | unsigned block_size, | 147 | unsigned mac_size) |
146 | unsigned mac_size) | 148 | { |
147 | { | ||
148 | unsigned padding_length, good, to_check, i; | 149 | unsigned padding_length, good, to_check, i; |
149 | const unsigned overhead = 1 /* padding length byte */ + mac_size; | 150 | const unsigned overhead = 1 /* padding length byte */ + mac_size; |
150 | /* Check if version requires explicit IV */ | 151 | /* Check if version requires explicit IV */ |
151 | if (s->version >= TLS1_1_VERSION || s->version == DTLS1_BAD_VER) | 152 | if (s->version >= TLS1_1_VERSION || s->version == DTLS1_BAD_VER) { |
152 | { | ||
153 | /* These lengths are all public so we can test them in | 153 | /* These lengths are all public so we can test them in |
154 | * non-constant time. | 154 | * non-constant time. |
155 | */ | 155 | */ |
@@ -159,40 +159,35 @@ int tls1_cbc_remove_padding(const SSL* s, | |||
159 | rec->data += block_size; | 159 | rec->data += block_size; |
160 | rec->input += block_size; | 160 | rec->input += block_size; |
161 | rec->length -= block_size; | 161 | rec->length -= block_size; |
162 | } | 162 | } else if (overhead > rec->length) |
163 | else if (overhead > rec->length) | ||
164 | return 0; | 163 | return 0; |
165 | 164 | ||
166 | padding_length = rec->data[rec->length-1]; | 165 | padding_length = rec->data[rec->length - 1]; |
167 | 166 | ||
168 | /* NB: if compression is in operation the first packet may not be of | 167 | /* NB: if compression is in operation the first packet may not be of |
169 | * even length so the padding bug check cannot be performed. This bug | 168 | * even length so the padding bug check cannot be performed. This bug |
170 | * workaround has been around since SSLeay so hopefully it is either | 169 | * workaround has been around since SSLeay so hopefully it is either |
171 | * fixed now or no buggy implementation supports compression [steve] | 170 | * fixed now or no buggy implementation supports compression [steve] |
172 | */ | 171 | */ |
173 | if ( (s->options&SSL_OP_TLS_BLOCK_PADDING_BUG) && !s->expand) | 172 | if ((s->options & SSL_OP_TLS_BLOCK_PADDING_BUG) && !s->expand) { |
174 | { | ||
175 | /* First packet is even in size, so check */ | 173 | /* First packet is even in size, so check */ |
176 | if ((memcmp(s->s3->read_sequence, "\0\0\0\0\0\0\0\0",8) == 0) && | 174 | if ((memcmp(s->s3->read_sequence, "\0\0\0\0\0\0\0\0", 8) == 0) && |
177 | !(padding_length & 1)) | 175 | !(padding_length & 1)) { |
178 | { | ||
179 | s->s3->flags|=TLS1_FLAGS_TLS_PADDING_BUG; | 176 | s->s3->flags|=TLS1_FLAGS_TLS_PADDING_BUG; |
180 | } | 177 | } |
181 | if ((s->s3->flags & TLS1_FLAGS_TLS_PADDING_BUG) && | 178 | if ((s->s3->flags & TLS1_FLAGS_TLS_PADDING_BUG) && |
182 | padding_length > 0) | 179 | padding_length > 0) { |
183 | { | ||
184 | padding_length--; | 180 | padding_length--; |
185 | } | ||
186 | } | 181 | } |
182 | } | ||
187 | 183 | ||
188 | if (EVP_CIPHER_flags(s->enc_read_ctx->cipher)&EVP_CIPH_FLAG_AEAD_CIPHER) | 184 | if (EVP_CIPHER_flags(s->enc_read_ctx->cipher) & EVP_CIPH_FLAG_AEAD_CIPHER) { |
189 | { | ||
190 | /* padding is already verified */ | 185 | /* padding is already verified */ |
191 | rec->length -= padding_length + 1; | 186 | rec->length -= padding_length + 1; |
192 | return 1; | 187 | return 1; |
193 | } | 188 | } |
194 | 189 | ||
195 | good = constant_time_ge(rec->length, overhead+padding_length); | 190 | good = constant_time_ge(rec->length, overhead + padding_length); |
196 | /* The padding consists of a length byte at the end of the record and | 191 | /* The padding consists of a length byte at the end of the record and |
197 | * then that many bytes of padding, all with the same value as the | 192 | * then that many bytes of padding, all with the same value as the |
198 | * length byte. Thus, with the length byte included, there are i+1 | 193 | * length byte. Thus, with the length byte included, there are i+1 |
@@ -203,17 +198,16 @@ int tls1_cbc_remove_padding(const SSL* s, | |||
203 | * amount of padding possible. (Again, the length of the record is | 198 | * amount of padding possible. (Again, the length of the record is |
204 | * public information so we can use it.) */ | 199 | * public information so we can use it.) */ |
205 | to_check = 255; /* maximum amount of padding. */ | 200 | to_check = 255; /* maximum amount of padding. */ |
206 | if (to_check > rec->length-1) | 201 | if (to_check > rec->length - 1) |
207 | to_check = rec->length-1; | 202 | to_check = rec->length - 1; |
208 | 203 | ||
209 | for (i = 0; i < to_check; i++) | 204 | for (i = 0; i < to_check; i++) { |
210 | { | ||
211 | unsigned char mask = constant_time_ge(padding_length, i); | 205 | unsigned char mask = constant_time_ge(padding_length, i); |
212 | unsigned char b = rec->data[rec->length-1-i]; | 206 | unsigned char b = rec->data[rec->length - 1 - i]; |
213 | /* The final |padding_length+1| bytes should all have the value | 207 | /* The final |padding_length+1| bytes should all have the value |
214 | * |padding_length|. Therefore the XOR should be zero. */ | 208 | * |padding_length|. Therefore the XOR should be zero. */ |
215 | good &= ~(mask&(padding_length ^ b)); | 209 | good &= ~(mask&(padding_length ^ b)); |
216 | } | 210 | } |
217 | 211 | ||
218 | /* If any of the final |padding_length+1| bytes had the wrong value, | 212 | /* If any of the final |padding_length+1| bytes had the wrong value, |
219 | * one or more of the lower eight bits of |good| will be cleared. We | 213 | * one or more of the lower eight bits of |good| will be cleared. We |
@@ -222,15 +216,15 @@ int tls1_cbc_remove_padding(const SSL* s, | |||
222 | good &= good >> 4; | 216 | good &= good >> 4; |
223 | good &= good >> 2; | 217 | good &= good >> 2; |
224 | good &= good >> 1; | 218 | good &= good >> 1; |
225 | good <<= sizeof(good)*8-1; | 219 | good <<= sizeof(good)*8 - 1; |
226 | good = DUPLICATE_MSB_TO_ALL(good); | 220 | good = DUPLICATE_MSB_TO_ALL(good); |
227 | 221 | ||
228 | padding_length = good & (padding_length+1); | 222 | padding_length = good & (padding_length + 1); |
229 | rec->length -= padding_length; | 223 | rec->length -= padding_length; |
230 | rec->type |= padding_length<<8; /* kludge: pass padding length */ | 224 | rec->type |= padding_length<<8; /* kludge: pass padding length */ |
231 | 225 | ||
232 | return (int)((good & 1) | (~good & -1)); | 226 | return (int)((good & 1) | (~good & -1)); |
233 | } | 227 | } |
234 | 228 | ||
235 | /* ssl3_cbc_copy_mac copies |md_size| bytes from the end of |rec| to |out| in | 229 | /* ssl3_cbc_copy_mac copies |md_size| bytes from the end of |rec| to |out| in |
236 | * constant time (independent of the concrete value of rec->length, which may | 230 | * constant time (independent of the concrete value of rec->length, which may |
@@ -251,12 +245,12 @@ int tls1_cbc_remove_padding(const SSL* s, | |||
251 | */ | 245 | */ |
252 | #define CBC_MAC_ROTATE_IN_PLACE | 246 | #define CBC_MAC_ROTATE_IN_PLACE |
253 | 247 | ||
254 | void ssl3_cbc_copy_mac(unsigned char* out, | 248 | void |
255 | const SSL3_RECORD *rec, | 249 | ssl3_cbc_copy_mac(unsigned char* out, const SSL3_RECORD *rec, |
256 | unsigned md_size,unsigned orig_len) | 250 | unsigned md_size, unsigned orig_len) |
257 | { | 251 | { |
258 | #if defined(CBC_MAC_ROTATE_IN_PLACE) | 252 | #if defined(CBC_MAC_ROTATE_IN_PLACE) |
259 | unsigned char rotated_mac_buf[64+EVP_MAX_MD_SIZE]; | 253 | unsigned char rotated_mac_buf[64 + EVP_MAX_MD_SIZE]; |
260 | unsigned char *rotated_mac; | 254 | unsigned char *rotated_mac; |
261 | #else | 255 | #else |
262 | unsigned char rotated_mac[EVP_MAX_MD_SIZE]; | 256 | unsigned char rotated_mac[EVP_MAX_MD_SIZE]; |
@@ -276,7 +270,7 @@ void ssl3_cbc_copy_mac(unsigned char* out, | |||
276 | OPENSSL_assert(md_size <= EVP_MAX_MD_SIZE); | 270 | OPENSSL_assert(md_size <= EVP_MAX_MD_SIZE); |
277 | 271 | ||
278 | #if defined(CBC_MAC_ROTATE_IN_PLACE) | 272 | #if defined(CBC_MAC_ROTATE_IN_PLACE) |
279 | rotated_mac = rotated_mac_buf + ((0-(size_t)rotated_mac_buf)&63); | 273 | rotated_mac = rotated_mac_buf + ((0 - (size_t)rotated_mac_buf)&63); |
280 | #endif | 274 | #endif |
281 | 275 | ||
282 | /* This information is public so it's safe to branch based on it. */ | 276 | /* This information is public so it's safe to branch based on it. */ |
@@ -290,42 +284,39 @@ void ssl3_cbc_copy_mac(unsigned char* out, | |||
290 | * figure out that it can remove div_spoiler as that would require it | 284 | * figure out that it can remove div_spoiler as that would require it |
291 | * to prove that md_size is always even, which I hope is beyond it. */ | 285 | * to prove that md_size is always even, which I hope is beyond it. */ |
292 | div_spoiler = md_size >> 1; | 286 | div_spoiler = md_size >> 1; |
293 | div_spoiler <<= (sizeof(div_spoiler)-1)*8; | 287 | div_spoiler <<= (sizeof(div_spoiler) - 1) * 8; |
294 | rotate_offset = (div_spoiler + mac_start - scan_start) % md_size; | 288 | rotate_offset = (div_spoiler + mac_start - scan_start) % md_size; |
295 | 289 | ||
296 | memset(rotated_mac, 0, md_size); | 290 | memset(rotated_mac, 0, md_size); |
297 | for (i = scan_start, j = 0; i < orig_len; i++) | 291 | for (i = scan_start, j = 0; i < orig_len; i++) { |
298 | { | ||
299 | unsigned char mac_started = constant_time_ge(i, mac_start); | 292 | unsigned char mac_started = constant_time_ge(i, mac_start); |
300 | unsigned char mac_ended = constant_time_ge(i, mac_end); | 293 | unsigned char mac_ended = constant_time_ge(i, mac_end); |
301 | unsigned char b = rec->data[i]; | 294 | unsigned char b = rec->data[i]; |
302 | rotated_mac[j++] |= b & mac_started & ~mac_ended; | 295 | rotated_mac[j++] |= b & mac_started & ~mac_ended; |
303 | j &= constant_time_lt(j,md_size); | 296 | j &= constant_time_lt(j, md_size); |
304 | } | 297 | } |
305 | 298 | ||
306 | /* Now rotate the MAC */ | 299 | /* Now rotate the MAC */ |
307 | #if defined(CBC_MAC_ROTATE_IN_PLACE) | 300 | #if defined(CBC_MAC_ROTATE_IN_PLACE) |
308 | j = 0; | 301 | j = 0; |
309 | for (i = 0; i < md_size; i++) | 302 | for (i = 0; i < md_size; i++) { |
310 | { | ||
311 | /* in case cache-line is 32 bytes, touch second line */ | 303 | /* in case cache-line is 32 bytes, touch second line */ |
312 | ((volatile unsigned char *)rotated_mac)[rotate_offset^32]; | 304 | ((volatile unsigned char *)rotated_mac)[rotate_offset^32]; |
313 | out[j++] = rotated_mac[rotate_offset++]; | 305 | out[j++] = rotated_mac[rotate_offset++]; |
314 | rotate_offset &= constant_time_lt(rotate_offset,md_size); | 306 | rotate_offset &= constant_time_lt(rotate_offset, md_size); |
315 | } | 307 | } |
316 | #else | 308 | #else |
317 | memset(out, 0, md_size); | 309 | memset(out, 0, md_size); |
318 | rotate_offset = md_size - rotate_offset; | 310 | rotate_offset = md_size - rotate_offset; |
319 | rotate_offset &= constant_time_lt(rotate_offset,md_size); | 311 | rotate_offset &= constant_time_lt(rotate_offset, md_size); |
320 | for (i = 0; i < md_size; i++) | 312 | for (i = 0; i < md_size; i++) { |
321 | { | ||
322 | for (j = 0; j < md_size; j++) | 313 | for (j = 0; j < md_size; j++) |
323 | out[j] |= rotated_mac[i] & constant_time_eq_8(j, rotate_offset); | 314 | out[j] |= rotated_mac[i] & constant_time_eq_8(j, rotate_offset); |
324 | rotate_offset++; | 315 | rotate_offset++; |
325 | rotate_offset &= constant_time_lt(rotate_offset,md_size); | 316 | rotate_offset &= constant_time_lt(rotate_offset, md_size); |
326 | } | ||
327 | #endif | ||
328 | } | 317 | } |
318 | #endif | ||
319 | } | ||
329 | 320 | ||
330 | /* u32toLE serialises an unsigned, 32-bit number (n) as four bytes at (p) in | 321 | /* u32toLE serialises an unsigned, 32-bit number (n) as four bytes at (p) in |
331 | * little-endian order. The value of p is advanced by four. */ | 322 | * little-endian order. The value of p is advanced by four. */ |
@@ -338,81 +329,83 @@ void ssl3_cbc_copy_mac(unsigned char* out, | |||
338 | /* These functions serialize the state of a hash and thus perform the standard | 329 | /* These functions serialize the state of a hash and thus perform the standard |
339 | * "final" operation without adding the padding and length that such a function | 330 | * "final" operation without adding the padding and length that such a function |
340 | * typically does. */ | 331 | * typically does. */ |
341 | static void tls1_md5_final_raw(void* ctx, unsigned char *md_out) | 332 | static void |
342 | { | 333 | tls1_md5_final_raw(void* ctx, unsigned char *md_out) |
334 | { | ||
343 | MD5_CTX *md5 = ctx; | 335 | MD5_CTX *md5 = ctx; |
344 | u32toLE(md5->A, md_out); | 336 | u32toLE(md5->A, md_out); |
345 | u32toLE(md5->B, md_out); | 337 | u32toLE(md5->B, md_out); |
346 | u32toLE(md5->C, md_out); | 338 | u32toLE(md5->C, md_out); |
347 | u32toLE(md5->D, md_out); | 339 | u32toLE(md5->D, md_out); |
348 | } | 340 | } |
349 | 341 | ||
350 | static void tls1_sha1_final_raw(void* ctx, unsigned char *md_out) | 342 | static void |
351 | { | 343 | tls1_sha1_final_raw(void* ctx, unsigned char *md_out) |
344 | { | ||
352 | SHA_CTX *sha1 = ctx; | 345 | SHA_CTX *sha1 = ctx; |
353 | l2n(sha1->h0, md_out); | 346 | l2n(sha1->h0, md_out); |
354 | l2n(sha1->h1, md_out); | 347 | l2n(sha1->h1, md_out); |
355 | l2n(sha1->h2, md_out); | 348 | l2n(sha1->h2, md_out); |
356 | l2n(sha1->h3, md_out); | 349 | l2n(sha1->h3, md_out); |
357 | l2n(sha1->h4, md_out); | 350 | l2n(sha1->h4, md_out); |
358 | } | 351 | } |
359 | #define LARGEST_DIGEST_CTX SHA_CTX | 352 | #define LARGEST_DIGEST_CTX SHA_CTX |
360 | 353 | ||
361 | #ifndef OPENSSL_NO_SHA256 | 354 | #ifndef OPENSSL_NO_SHA256 |
362 | static void tls1_sha256_final_raw(void* ctx, unsigned char *md_out) | 355 | static void |
363 | { | 356 | tls1_sha256_final_raw(void* ctx, unsigned char *md_out) |
357 | { | ||
364 | SHA256_CTX *sha256 = ctx; | 358 | SHA256_CTX *sha256 = ctx; |
365 | unsigned i; | 359 | unsigned i; |
366 | 360 | ||
367 | for (i = 0; i < 8; i++) | 361 | for (i = 0; i < 8; i++) { |
368 | { | ||
369 | l2n(sha256->h[i], md_out); | 362 | l2n(sha256->h[i], md_out); |
370 | } | ||
371 | } | 363 | } |
364 | } | ||
372 | #undef LARGEST_DIGEST_CTX | 365 | #undef LARGEST_DIGEST_CTX |
373 | #define LARGEST_DIGEST_CTX SHA256_CTX | 366 | #define LARGEST_DIGEST_CTX SHA256_CTX |
374 | #endif | 367 | #endif |
375 | 368 | ||
376 | #ifndef OPENSSL_NO_SHA512 | 369 | #ifndef OPENSSL_NO_SHA512 |
377 | static void tls1_sha512_final_raw(void* ctx, unsigned char *md_out) | 370 | static void |
378 | { | 371 | tls1_sha512_final_raw(void* ctx, unsigned char *md_out) |
372 | { | ||
379 | SHA512_CTX *sha512 = ctx; | 373 | SHA512_CTX *sha512 = ctx; |
380 | unsigned i; | 374 | unsigned i; |
381 | 375 | ||
382 | for (i = 0; i < 8; i++) | 376 | for (i = 0; i < 8; i++) { |
383 | { | ||
384 | l2n8(sha512->h[i], md_out); | 377 | l2n8(sha512->h[i], md_out); |
385 | } | ||
386 | } | 378 | } |
379 | } | ||
387 | #undef LARGEST_DIGEST_CTX | 380 | #undef LARGEST_DIGEST_CTX |
388 | #define LARGEST_DIGEST_CTX SHA512_CTX | 381 | #define LARGEST_DIGEST_CTX SHA512_CTX |
389 | #endif | 382 | #endif |
390 | 383 | ||
391 | /* ssl3_cbc_record_digest_supported returns 1 iff |ctx| uses a hash function | 384 | /* ssl3_cbc_record_digest_supported returns 1 iff |ctx| uses a hash function |
392 | * which ssl3_cbc_digest_record supports. */ | 385 | * which ssl3_cbc_digest_record supports. */ |
393 | char ssl3_cbc_record_digest_supported(const EVP_MD_CTX *ctx) | 386 | char |
394 | { | 387 | ssl3_cbc_record_digest_supported(const EVP_MD_CTX *ctx) |
388 | { | ||
395 | #ifdef OPENSSL_FIPS | 389 | #ifdef OPENSSL_FIPS |
396 | if (FIPS_mode()) | 390 | if (FIPS_mode()) |
397 | return 0; | 391 | return 0; |
398 | #endif | 392 | #endif |
399 | switch (EVP_MD_CTX_type(ctx)) | 393 | switch (EVP_MD_CTX_type(ctx)) { |
400 | { | 394 | case NID_md5: |
401 | case NID_md5: | 395 | case NID_sha1: |
402 | case NID_sha1: | ||
403 | #ifndef OPENSSL_NO_SHA256 | 396 | #ifndef OPENSSL_NO_SHA256 |
404 | case NID_sha224: | 397 | case NID_sha224: |
405 | case NID_sha256: | 398 | case NID_sha256: |
406 | #endif | 399 | #endif |
407 | #ifndef OPENSSL_NO_SHA512 | 400 | #ifndef OPENSSL_NO_SHA512 |
408 | case NID_sha384: | 401 | case NID_sha384: |
409 | case NID_sha512: | 402 | case NID_sha512: |
410 | #endif | 403 | #endif |
411 | return 1; | 404 | return 1; |
412 | default: | 405 | default: |
413 | return 0; | 406 | return 0; |
414 | } | ||
415 | } | 407 | } |
408 | } | ||
416 | 409 | ||
417 | /* ssl3_cbc_digest_record computes the MAC of a decrypted, padded SSLv3/TLS | 410 | /* ssl3_cbc_digest_record computes the MAC of a decrypted, padded SSLv3/TLS |
418 | * record. | 411 | * record. |
@@ -433,26 +426,21 @@ char ssl3_cbc_record_digest_supported(const EVP_MD_CTX *ctx) | |||
433 | * functions, above, we know that data_plus_mac_size is large enough to contain | 426 | * functions, above, we know that data_plus_mac_size is large enough to contain |
434 | * a padding byte and MAC. (If the padding was invalid, it might contain the | 427 | * a padding byte and MAC. (If the padding was invalid, it might contain the |
435 | * padding too. ) */ | 428 | * padding too. ) */ |
436 | void ssl3_cbc_digest_record( | 429 | void ssl3_cbc_digest_record(const EVP_MD_CTX *ctx, unsigned char* md_out, |
437 | const EVP_MD_CTX *ctx, | 430 | size_t* md_out_size, const unsigned char header[13], |
438 | unsigned char* md_out, | 431 | const unsigned char *data, size_t data_plus_mac_size, |
439 | size_t* md_out_size, | 432 | size_t data_plus_mac_plus_padding_size, const unsigned char *mac_secret, |
440 | const unsigned char header[13], | 433 | unsigned mac_secret_length, char is_sslv3) |
441 | const unsigned char *data, | 434 | { |
442 | size_t data_plus_mac_size, | ||
443 | size_t data_plus_mac_plus_padding_size, | ||
444 | const unsigned char *mac_secret, | ||
445 | unsigned mac_secret_length, | ||
446 | char is_sslv3) | ||
447 | { | ||
448 | union { double align; | 435 | union { double align; |
449 | unsigned char c[sizeof(LARGEST_DIGEST_CTX)]; } md_state; | 436 | unsigned char c[sizeof(LARGEST_DIGEST_CTX)]; |
437 | } md_state; | ||
450 | void (*md_final_raw)(void *ctx, unsigned char *md_out); | 438 | void (*md_final_raw)(void *ctx, unsigned char *md_out); |
451 | void (*md_transform)(void *ctx, const unsigned char *block); | 439 | void (*md_transform)(void *ctx, const unsigned char *block); |
452 | unsigned md_size, md_block_size = 64; | 440 | unsigned md_size, md_block_size = 64; |
453 | unsigned sslv3_pad_length = 40, header_length, variance_blocks, | 441 | unsigned sslv3_pad_length = 40, header_length, variance_blocks, |
454 | len, max_mac_bytes, num_blocks, | 442 | len, max_mac_bytes, num_blocks, |
455 | num_starting_blocks, k, mac_end_offset, c, index_a, index_b; | 443 | num_starting_blocks, k, mac_end_offset, c, index_a, index_b; |
456 | unsigned int bits; /* at most 18 bits */ | 444 | unsigned int bits; /* at most 18 bits */ |
457 | unsigned char length_bytes[MAX_HASH_BIT_COUNT_BYTES]; | 445 | unsigned char length_bytes[MAX_HASH_BIT_COUNT_BYTES]; |
458 | /* hmac_pad is the masked HMAC key. */ | 446 | /* hmac_pad is the masked HMAC key. */ |
@@ -470,78 +458,74 @@ void ssl3_cbc_digest_record( | |||
470 | * many possible overflows later in this function. */ | 458 | * many possible overflows later in this function. */ |
471 | OPENSSL_assert(data_plus_mac_plus_padding_size < 1024*1024); | 459 | OPENSSL_assert(data_plus_mac_plus_padding_size < 1024*1024); |
472 | 460 | ||
473 | switch (EVP_MD_CTX_type(ctx)) | 461 | switch (EVP_MD_CTX_type(ctx)) { |
474 | { | 462 | case NID_md5: |
475 | case NID_md5: | 463 | MD5_Init((MD5_CTX*)md_state.c); |
476 | MD5_Init((MD5_CTX*)md_state.c); | 464 | md_final_raw = tls1_md5_final_raw; |
477 | md_final_raw = tls1_md5_final_raw; | 465 | md_transform = (void(*)(void *ctx, const unsigned char *block)) MD5_Transform; |
478 | md_transform = (void(*)(void *ctx, const unsigned char *block)) MD5_Transform; | 466 | md_size = 16; |
479 | md_size = 16; | 467 | sslv3_pad_length = 48; |
480 | sslv3_pad_length = 48; | 468 | length_is_big_endian = 0; |
481 | length_is_big_endian = 0; | 469 | break; |
482 | break; | 470 | case NID_sha1: |
483 | case NID_sha1: | 471 | SHA1_Init((SHA_CTX*)md_state.c); |
484 | SHA1_Init((SHA_CTX*)md_state.c); | 472 | md_final_raw = tls1_sha1_final_raw; |
485 | md_final_raw = tls1_sha1_final_raw; | 473 | md_transform = (void(*)(void *ctx, const unsigned char *block)) SHA1_Transform; |
486 | md_transform = (void(*)(void *ctx, const unsigned char *block)) SHA1_Transform; | 474 | md_size = 20; |
487 | md_size = 20; | 475 | break; |
488 | break; | ||
489 | #ifndef OPENSSL_NO_SHA256 | 476 | #ifndef OPENSSL_NO_SHA256 |
490 | case NID_sha224: | 477 | case NID_sha224: |
491 | SHA224_Init((SHA256_CTX*)md_state.c); | 478 | SHA224_Init((SHA256_CTX*)md_state.c); |
492 | md_final_raw = tls1_sha256_final_raw; | 479 | md_final_raw = tls1_sha256_final_raw; |
493 | md_transform = (void(*)(void *ctx, const unsigned char *block)) SHA256_Transform; | 480 | md_transform = (void(*)(void *ctx, const unsigned char *block)) SHA256_Transform; |
494 | md_size = 224/8; | 481 | md_size = 224/8; |
495 | break; | 482 | break; |
496 | case NID_sha256: | 483 | case NID_sha256: |
497 | SHA256_Init((SHA256_CTX*)md_state.c); | 484 | SHA256_Init((SHA256_CTX*)md_state.c); |
498 | md_final_raw = tls1_sha256_final_raw; | 485 | md_final_raw = tls1_sha256_final_raw; |
499 | md_transform = (void(*)(void *ctx, const unsigned char *block)) SHA256_Transform; | 486 | md_transform = (void(*)(void *ctx, const unsigned char *block)) SHA256_Transform; |
500 | md_size = 32; | 487 | md_size = 32; |
501 | break; | 488 | break; |
502 | #endif | 489 | #endif |
503 | #ifndef OPENSSL_NO_SHA512 | 490 | #ifndef OPENSSL_NO_SHA512 |
504 | case NID_sha384: | 491 | case NID_sha384: |
505 | SHA384_Init((SHA512_CTX*)md_state.c); | 492 | SHA384_Init((SHA512_CTX*)md_state.c); |
506 | md_final_raw = tls1_sha512_final_raw; | 493 | md_final_raw = tls1_sha512_final_raw; |
507 | md_transform = (void(*)(void *ctx, const unsigned char *block)) SHA512_Transform; | 494 | md_transform = (void(*)(void *ctx, const unsigned char *block)) SHA512_Transform; |
508 | md_size = 384/8; | 495 | md_size = 384/8; |
509 | md_block_size = 128; | 496 | md_block_size = 128; |
510 | md_length_size = 16; | 497 | md_length_size = 16; |
511 | break; | 498 | break; |
512 | case NID_sha512: | 499 | case NID_sha512: |
513 | SHA512_Init((SHA512_CTX*)md_state.c); | 500 | SHA512_Init((SHA512_CTX*)md_state.c); |
514 | md_final_raw = tls1_sha512_final_raw; | 501 | md_final_raw = tls1_sha512_final_raw; |
515 | md_transform = (void(*)(void *ctx, const unsigned char *block)) SHA512_Transform; | 502 | md_transform = (void(*)(void *ctx, const unsigned char *block)) SHA512_Transform; |
516 | md_size = 64; | 503 | md_size = 64; |
517 | md_block_size = 128; | 504 | md_block_size = 128; |
518 | md_length_size = 16; | 505 | md_length_size = 16; |
519 | break; | 506 | break; |
520 | #endif | 507 | #endif |
521 | default: | 508 | default: |
522 | /* ssl3_cbc_record_digest_supported should have been | 509 | /* ssl3_cbc_record_digest_supported should have been |
523 | * called first to check that the hash function is | 510 | * called first to check that the hash function is |
524 | * supported. */ | 511 | * supported. */ |
525 | OPENSSL_assert(0); | 512 | OPENSSL_assert(0); |
526 | if (md_out_size) | 513 | if (md_out_size) |
527 | *md_out_size = -1; | 514 | *md_out_size = -1; |
528 | return; | 515 | return; |
529 | } | 516 | } |
530 | 517 | ||
531 | OPENSSL_assert(md_length_size <= MAX_HASH_BIT_COUNT_BYTES); | 518 | OPENSSL_assert(md_length_size <= MAX_HASH_BIT_COUNT_BYTES); |
532 | OPENSSL_assert(md_block_size <= MAX_HASH_BLOCK_SIZE); | 519 | OPENSSL_assert(md_block_size <= MAX_HASH_BLOCK_SIZE); |
533 | OPENSSL_assert(md_size <= EVP_MAX_MD_SIZE); | 520 | OPENSSL_assert(md_size <= EVP_MAX_MD_SIZE); |
534 | 521 | ||
535 | header_length = 13; | 522 | header_length = 13; |
536 | if (is_sslv3) | 523 | if (is_sslv3) { |
537 | { | 524 | header_length = mac_secret_length + sslv3_pad_length + |
538 | header_length = | 525 | 8 /* sequence number */ + |
539 | mac_secret_length + | 526 | 1 /* record type */ + |
540 | sslv3_pad_length + | 527 | 2 /* record length */; |
541 | 8 /* sequence number */ + | 528 | } |
542 | 1 /* record type */ + | ||
543 | 2 /* record length */; | ||
544 | } | ||
545 | 529 | ||
546 | /* variance_blocks is the number of blocks of the hash that we have to | 530 | /* variance_blocks is the number of blocks of the hash that we have to |
547 | * calculate in constant time because they could be altered by the | 531 | * calculate in constant time because they could be altered by the |
@@ -597,15 +581,13 @@ void ssl3_cbc_digest_record( | |||
597 | 581 | ||
598 | /* For SSLv3, if we're going to have any starting blocks then we need | 582 | /* For SSLv3, if we're going to have any starting blocks then we need |
599 | * at least two because the header is larger than a single block. */ | 583 | * at least two because the header is larger than a single block. */ |
600 | if (num_blocks > variance_blocks + (is_sslv3 ? 1 : 0)) | 584 | if (num_blocks > variance_blocks + (is_sslv3 ? 1 : 0)) { |
601 | { | ||
602 | num_starting_blocks = num_blocks - variance_blocks; | 585 | num_starting_blocks = num_blocks - variance_blocks; |
603 | k = md_block_size*num_starting_blocks; | 586 | k = md_block_size*num_starting_blocks; |
604 | } | 587 | } |
605 | 588 | ||
606 | bits = 8*mac_end_offset; | 589 | bits = 8*mac_end_offset; |
607 | if (!is_sslv3) | 590 | if (!is_sslv3) { |
608 | { | ||
609 | /* Compute the initial HMAC block. For SSLv3, the padding and | 591 | /* Compute the initial HMAC block. For SSLv3, the padding and |
610 | * secret bytes are included in |header| because they take more | 592 | * secret bytes are included in |header| because they take more |
611 | * than a single block. */ | 593 | * than a single block. */ |
@@ -617,51 +599,44 @@ void ssl3_cbc_digest_record( | |||
617 | hmac_pad[i] ^= 0x36; | 599 | hmac_pad[i] ^= 0x36; |
618 | 600 | ||
619 | md_transform(md_state.c, hmac_pad); | 601 | md_transform(md_state.c, hmac_pad); |
620 | } | 602 | } |
621 | 603 | ||
622 | if (length_is_big_endian) | 604 | if (length_is_big_endian) { |
623 | { | 605 | memset(length_bytes, 0, md_length_size - 4); |
624 | memset(length_bytes,0,md_length_size-4); | 606 | length_bytes[md_length_size - 4] = (unsigned char)(bits >> 24); |
625 | length_bytes[md_length_size-4] = (unsigned char)(bits>>24); | 607 | length_bytes[md_length_size - 3] = (unsigned char)(bits >> 16); |
626 | length_bytes[md_length_size-3] = (unsigned char)(bits>>16); | 608 | length_bytes[md_length_size - 2] = (unsigned char)(bits >> 8); |
627 | length_bytes[md_length_size-2] = (unsigned char)(bits>>8); | 609 | length_bytes[md_length_size - 1] = (unsigned char)bits; |
628 | length_bytes[md_length_size-1] = (unsigned char)bits; | 610 | } else { |
629 | } | 611 | memset(length_bytes, 0, md_length_size); |
630 | else | 612 | length_bytes[md_length_size - 5] = (unsigned char)(bits >> 24); |
631 | { | 613 | length_bytes[md_length_size - 6] = (unsigned char)(bits >> 16); |
632 | memset(length_bytes,0,md_length_size); | 614 | length_bytes[md_length_size - 7] = (unsigned char)(bits >> 8); |
633 | length_bytes[md_length_size-5] = (unsigned char)(bits>>24); | 615 | length_bytes[md_length_size - 8] = (unsigned char)bits; |
634 | length_bytes[md_length_size-6] = (unsigned char)(bits>>16); | 616 | } |
635 | length_bytes[md_length_size-7] = (unsigned char)(bits>>8); | ||
636 | length_bytes[md_length_size-8] = (unsigned char)bits; | ||
637 | } | ||
638 | 617 | ||
639 | if (k > 0) | 618 | if (k > 0) { |
640 | { | 619 | if (is_sslv3) { |
641 | if (is_sslv3) | ||
642 | { | ||
643 | /* The SSLv3 header is larger than a single block. | 620 | /* The SSLv3 header is larger than a single block. |
644 | * overhang is the number of bytes beyond a single | 621 | * overhang is the number of bytes beyond a single |
645 | * block that the header consumes: either 7 bytes | 622 | * block that the header consumes: either 7 bytes |
646 | * (SHA1) or 11 bytes (MD5). */ | 623 | * (SHA1) or 11 bytes (MD5). */ |
647 | unsigned overhang = header_length-md_block_size; | 624 | unsigned overhang = header_length - md_block_size; |
648 | md_transform(md_state.c, header); | 625 | md_transform(md_state.c, header); |
649 | memcpy(first_block, header + md_block_size, overhang); | 626 | memcpy(first_block, header + md_block_size, overhang); |
650 | memcpy(first_block + overhang, data, md_block_size-overhang); | 627 | memcpy(first_block + overhang, data, md_block_size - overhang); |
651 | md_transform(md_state.c, first_block); | 628 | md_transform(md_state.c, first_block); |
652 | for (i = 1; i < k/md_block_size - 1; i++) | 629 | for (i = 1; i < k/md_block_size - 1; i++) |
653 | md_transform(md_state.c, data + md_block_size*i - overhang); | 630 | md_transform(md_state.c, data + md_block_size*i - overhang); |
654 | } | 631 | } else { |
655 | else | ||
656 | { | ||
657 | /* k is a multiple of md_block_size. */ | 632 | /* k is a multiple of md_block_size. */ |
658 | memcpy(first_block, header, 13); | 633 | memcpy(first_block, header, 13); |
659 | memcpy(first_block+13, data, md_block_size-13); | 634 | memcpy(first_block + 13, data, md_block_size - 13); |
660 | md_transform(md_state.c, first_block); | 635 | md_transform(md_state.c, first_block); |
661 | for (i = 1; i < k/md_block_size; i++) | 636 | for (i = 1; i < k/md_block_size; i++) |
662 | md_transform(md_state.c, data + md_block_size*i - 13); | 637 | md_transform(md_state.c, data + md_block_size*i - 13); |
663 | } | ||
664 | } | 638 | } |
639 | } | ||
665 | 640 | ||
666 | memset(mac_out, 0, sizeof(mac_out)); | 641 | memset(mac_out, 0, sizeof(mac_out)); |
667 | 642 | ||
@@ -669,22 +644,20 @@ void ssl3_cbc_digest_record( | |||
669 | * it in constant time. If the |i==index_a| then we'll include the 0x80 | 644 | * it in constant time. If the |i==index_a| then we'll include the 0x80 |
670 | * bytes and zero pad etc. For each block we selectively copy it, in | 645 | * bytes and zero pad etc. For each block we selectively copy it, in |
671 | * constant time, to |mac_out|. */ | 646 | * constant time, to |mac_out|. */ |
672 | for (i = num_starting_blocks; i <= num_starting_blocks+variance_blocks; i++) | 647 | for (i = num_starting_blocks; i <= num_starting_blocks + variance_blocks; i++) { |
673 | { | ||
674 | unsigned char block[MAX_HASH_BLOCK_SIZE]; | 648 | unsigned char block[MAX_HASH_BLOCK_SIZE]; |
675 | unsigned char is_block_a = constant_time_eq_8(i, index_a); | 649 | unsigned char is_block_a = constant_time_eq_8(i, index_a); |
676 | unsigned char is_block_b = constant_time_eq_8(i, index_b); | 650 | unsigned char is_block_b = constant_time_eq_8(i, index_b); |
677 | for (j = 0; j < md_block_size; j++) | 651 | for (j = 0; j < md_block_size; j++) { |
678 | { | ||
679 | unsigned char b = 0, is_past_c, is_past_cp1; | 652 | unsigned char b = 0, is_past_c, is_past_cp1; |
680 | if (k < header_length) | 653 | if (k < header_length) |
681 | b = header[k]; | 654 | b = header[k]; |
682 | else if (k < data_plus_mac_plus_padding_size + header_length) | 655 | else if (k < data_plus_mac_plus_padding_size + header_length) |
683 | b = data[k-header_length]; | 656 | b = data[k - header_length]; |
684 | k++; | 657 | k++; |
685 | 658 | ||
686 | is_past_c = is_block_a & constant_time_ge(j, c); | 659 | is_past_c = is_block_a & constant_time_ge(j, c); |
687 | is_past_cp1 = is_block_a & constant_time_ge(j, c+1); | 660 | is_past_cp1 = is_block_a & constant_time_ge(j, c + 1); |
688 | /* If this is the block containing the end of the | 661 | /* If this is the block containing the end of the |
689 | * application data, and we are at the offset for the | 662 | * application data, and we are at the offset for the |
690 | * 0x80 value, then overwrite b with 0x80. */ | 663 | * 0x80 value, then overwrite b with 0x80. */ |
@@ -701,46 +674,42 @@ void ssl3_cbc_digest_record( | |||
701 | 674 | ||
702 | /* The final bytes of one of the blocks contains the | 675 | /* The final bytes of one of the blocks contains the |
703 | * length. */ | 676 | * length. */ |
704 | if (j >= md_block_size - md_length_size) | 677 | if (j >= md_block_size - md_length_size) { |
705 | { | ||
706 | /* If this is index_b, write a length byte. */ | 678 | /* If this is index_b, write a length byte. */ |
707 | b = (b&~is_block_b) | (is_block_b&length_bytes[j-(md_block_size-md_length_size)]); | 679 | b = (b&~is_block_b) | (is_block_b&length_bytes[j - (md_block_size - md_length_size)]); |
708 | } | ||
709 | block[j] = b; | ||
710 | } | 680 | } |
681 | block[j] = b; | ||
682 | } | ||
711 | 683 | ||
712 | md_transform(md_state.c, block); | 684 | md_transform(md_state.c, block); |
713 | md_final_raw(md_state.c, block); | 685 | md_final_raw(md_state.c, block); |
714 | /* If this is index_b, copy the hash value to |mac_out|. */ | 686 | /* If this is index_b, copy the hash value to |mac_out|. */ |
715 | for (j = 0; j < md_size; j++) | 687 | for (j = 0; j < md_size; j++) |
716 | mac_out[j] |= block[j]&is_block_b; | 688 | mac_out[j] |= block[j]&is_block_b; |
717 | } | 689 | } |
718 | 690 | ||
719 | EVP_MD_CTX_init(&md_ctx); | 691 | EVP_MD_CTX_init(&md_ctx); |
720 | EVP_DigestInit_ex(&md_ctx, ctx->digest, NULL /* engine */); | 692 | EVP_DigestInit_ex(&md_ctx, ctx->digest, NULL /* engine */); |
721 | if (is_sslv3) | 693 | if (is_sslv3) { |
722 | { | ||
723 | /* We repurpose |hmac_pad| to contain the SSLv3 pad2 block. */ | 694 | /* We repurpose |hmac_pad| to contain the SSLv3 pad2 block. */ |
724 | memset(hmac_pad, 0x5c, sslv3_pad_length); | 695 | memset(hmac_pad, 0x5c, sslv3_pad_length); |
725 | 696 | ||
726 | EVP_DigestUpdate(&md_ctx, mac_secret, mac_secret_length); | 697 | EVP_DigestUpdate(&md_ctx, mac_secret, mac_secret_length); |
727 | EVP_DigestUpdate(&md_ctx, hmac_pad, sslv3_pad_length); | 698 | EVP_DigestUpdate(&md_ctx, hmac_pad, sslv3_pad_length); |
728 | EVP_DigestUpdate(&md_ctx, mac_out, md_size); | 699 | EVP_DigestUpdate(&md_ctx, mac_out, md_size); |
729 | } | 700 | } else { |
730 | else | ||
731 | { | ||
732 | /* Complete the HMAC in the standard manner. */ | 701 | /* Complete the HMAC in the standard manner. */ |
733 | for (i = 0; i < md_block_size; i++) | 702 | for (i = 0; i < md_block_size; i++) |
734 | hmac_pad[i] ^= 0x6a; | 703 | hmac_pad[i] ^= 0x6a; |
735 | 704 | ||
736 | EVP_DigestUpdate(&md_ctx, hmac_pad, md_block_size); | 705 | EVP_DigestUpdate(&md_ctx, hmac_pad, md_block_size); |
737 | EVP_DigestUpdate(&md_ctx, mac_out, md_size); | 706 | EVP_DigestUpdate(&md_ctx, mac_out, md_size); |
738 | } | 707 | } |
739 | EVP_DigestFinal(&md_ctx, md_out, &md_out_size_u); | 708 | EVP_DigestFinal(&md_ctx, md_out, &md_out_size_u); |
740 | if (md_out_size) | 709 | if (md_out_size) |
741 | *md_out_size = md_out_size_u; | 710 | *md_out_size = md_out_size_u; |
742 | EVP_MD_CTX_cleanup(&md_ctx); | 711 | EVP_MD_CTX_cleanup(&md_ctx); |
743 | } | 712 | } |
744 | 713 | ||
745 | #ifdef OPENSSL_FIPS | 714 | #ifdef OPENSSL_FIPS |
746 | 715 | ||
@@ -749,10 +718,10 @@ void ssl3_cbc_digest_record( | |||
749 | * by digesting additional data. | 718 | * by digesting additional data. |
750 | */ | 719 | */ |
751 | 720 | ||
752 | void tls_fips_digest_extra( | 721 | void tls_fips_digest_extra(const EVP_CIPHER_CTX *cipher_ctx, |
753 | const EVP_CIPHER_CTX *cipher_ctx, EVP_MD_CTX *mac_ctx, | 722 | EVP_MD_CTX *mac_ctx, const unsigned char *data, size_t data_len, |
754 | const unsigned char *data, size_t data_len, size_t orig_len) | 723 | size_t orig_len) |
755 | { | 724 | { |
756 | size_t block_size, digest_pad, blocks_data, blocks_orig; | 725 | size_t block_size, digest_pad, blocks_data, blocks_orig; |
757 | if (EVP_CIPHER_CTX_mode(cipher_ctx) != EVP_CIPH_CBC_MODE) | 726 | if (EVP_CIPHER_CTX_mode(cipher_ctx) != EVP_CIPH_CBC_MODE) |
758 | return; | 727 | return; |
@@ -785,6 +754,6 @@ void tls_fips_digest_extra( | |||
785 | * length TLS buffer. | 754 | * length TLS buffer. |
786 | */ | 755 | */ |
787 | EVP_DigestSignUpdate(mac_ctx, data, | 756 | EVP_DigestSignUpdate(mac_ctx, data, |
788 | (blocks_orig - blocks_data + 1) * block_size); | 757 | (blocks_orig - blocks_data + 1) * block_size); |
789 | } | 758 | } |
790 | #endif | 759 | #endif |