summaryrefslogtreecommitdiff
path: root/src/lib/libssl/tls12_record_layer.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/lib/libssl/tls12_record_layer.c')
-rw-r--r--src/lib/libssl/tls12_record_layer.c1346
1 files changed, 0 insertions, 1346 deletions
diff --git a/src/lib/libssl/tls12_record_layer.c b/src/lib/libssl/tls12_record_layer.c
deleted file mode 100644
index 43edb6f0f5..0000000000
--- a/src/lib/libssl/tls12_record_layer.c
+++ /dev/null
@@ -1,1346 +0,0 @@
1/* $OpenBSD: tls12_record_layer.c,v 1.32 2021/06/19 16:52:47 jsing Exp $ */
2/*
3 * Copyright (c) 2020 Joel Sing <jsing@openbsd.org>
4 *
5 * Permission to use, copy, modify, and distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18#include <limits.h>
19#include <stdlib.h>
20
21#include <openssl/evp.h>
22
23#include "ssl_locl.h"
24
25#define TLS12_RECORD_SEQ_NUM_LEN 8
26#define TLS12_AEAD_FIXED_NONCE_MAX_LEN 12
27
28struct tls12_record_protection {
29 uint16_t epoch;
30 uint8_t seq_num[TLS12_RECORD_SEQ_NUM_LEN];
31
32 EVP_AEAD_CTX *aead_ctx;
33
34 uint8_t *aead_fixed_nonce;
35 size_t aead_fixed_nonce_len;
36
37 size_t aead_variable_nonce_len;
38 size_t aead_tag_len;
39
40 int aead_xor_nonces;
41 int aead_variable_nonce_in_record;
42
43 EVP_CIPHER_CTX *cipher_ctx;
44 EVP_MD_CTX *hash_ctx;
45
46 int stream_mac;
47
48 uint8_t *mac_key;
49 size_t mac_key_len;
50};
51
52static struct tls12_record_protection *
53tls12_record_protection_new(void)
54{
55 return calloc(1, sizeof(struct tls12_record_protection));
56}
57
58static void
59tls12_record_protection_clear(struct tls12_record_protection *rp)
60{
61 if (rp->aead_ctx != NULL) {
62 EVP_AEAD_CTX_cleanup(rp->aead_ctx);
63 freezero(rp->aead_ctx, sizeof(*rp->aead_ctx));
64 }
65
66 freezero(rp->aead_fixed_nonce, rp->aead_fixed_nonce_len);
67
68 EVP_CIPHER_CTX_free(rp->cipher_ctx);
69 EVP_MD_CTX_free(rp->hash_ctx);
70
71 freezero(rp->mac_key, rp->mac_key_len);
72
73 memset(rp, 0, sizeof(*rp));
74}
75
76static void
77tls12_record_protection_free(struct tls12_record_protection *rp)
78{
79 if (rp == NULL)
80 return;
81
82 tls12_record_protection_clear(rp);
83
84 freezero(rp, sizeof(struct tls12_record_protection));
85}
86
87static int
88tls12_record_protection_engaged(struct tls12_record_protection *rp)
89{
90 return rp->aead_ctx != NULL || rp->cipher_ctx != NULL;
91}
92
93static int
94tls12_record_protection_unused(struct tls12_record_protection *rp)
95{
96 return rp->aead_ctx == NULL && rp->cipher_ctx == NULL &&
97 rp->hash_ctx == NULL && rp->mac_key == NULL;
98}
99
100static int
101tls12_record_protection_eiv_len(struct tls12_record_protection *rp,
102 size_t *out_eiv_len)
103{
104 int eiv_len;
105
106 *out_eiv_len = 0;
107
108 if (rp->cipher_ctx == NULL)
109 return 0;
110
111 eiv_len = 0;
112 if (EVP_CIPHER_CTX_mode(rp->cipher_ctx) == EVP_CIPH_CBC_MODE)
113 eiv_len = EVP_CIPHER_CTX_iv_length(rp->cipher_ctx);
114 if (eiv_len < 0 || eiv_len > EVP_MAX_IV_LENGTH)
115 return 0;
116
117 *out_eiv_len = eiv_len;
118
119 return 1;
120}
121
122static int
123tls12_record_protection_block_size(struct tls12_record_protection *rp,
124 size_t *out_block_size)
125{
126 int block_size;
127
128 *out_block_size = 0;
129
130 if (rp->cipher_ctx == NULL)
131 return 0;
132
133 block_size = EVP_CIPHER_CTX_block_size(rp->cipher_ctx);
134 if (block_size < 0 || block_size > EVP_MAX_BLOCK_LENGTH)
135 return 0;
136
137 *out_block_size = block_size;
138
139 return 1;
140}
141
142static int
143tls12_record_protection_mac_len(struct tls12_record_protection *rp,
144 size_t *out_mac_len)
145{
146 int mac_len;
147
148 *out_mac_len = 0;
149
150 if (rp->hash_ctx == NULL)
151 return 0;
152
153 mac_len = EVP_MD_CTX_size(rp->hash_ctx);
154 if (mac_len <= 0 || mac_len > EVP_MAX_MD_SIZE)
155 return 0;
156
157 *out_mac_len = mac_len;
158
159 return 1;
160}
161
162struct tls12_record_layer {
163 uint16_t version;
164 uint16_t initial_epoch;
165 int dtls;
166
167 uint8_t alert_desc;
168
169 const EVP_AEAD *aead;
170 const EVP_CIPHER *cipher;
171 const EVP_MD *handshake_hash;
172 const EVP_MD *mac_hash;
173
174 /* Pointers to active record protection (memory is not owned). */
175 struct tls12_record_protection *read;
176 struct tls12_record_protection *write;
177
178 struct tls12_record_protection *read_current;
179 struct tls12_record_protection *write_current;
180 struct tls12_record_protection *write_previous;
181};
182
183struct tls12_record_layer *
184tls12_record_layer_new(void)
185{
186 struct tls12_record_layer *rl;
187
188 if ((rl = calloc(1, sizeof(struct tls12_record_layer))) == NULL)
189 goto err;
190 if ((rl->read_current = tls12_record_protection_new()) == NULL)
191 goto err;
192 if ((rl->write_current = tls12_record_protection_new()) == NULL)
193 goto err;
194
195 rl->read = rl->read_current;
196 rl->write = rl->write_current;
197
198 return rl;
199
200 err:
201 tls12_record_layer_free(rl);
202
203 return NULL;
204}
205
206void
207tls12_record_layer_free(struct tls12_record_layer *rl)
208{
209 if (rl == NULL)
210 return;
211
212 tls12_record_protection_free(rl->read_current);
213 tls12_record_protection_free(rl->write_current);
214 tls12_record_protection_free(rl->write_previous);
215
216 freezero(rl, sizeof(struct tls12_record_layer));
217}
218
219void
220tls12_record_layer_alert(struct tls12_record_layer *rl, uint8_t *alert_desc)
221{
222 *alert_desc = rl->alert_desc;
223}
224
225int
226tls12_record_layer_write_overhead(struct tls12_record_layer *rl,
227 size_t *overhead)
228{
229 size_t block_size, eiv_len, mac_len;
230
231 *overhead = 0;
232
233 if (rl->write->aead_ctx != NULL) {
234 *overhead = rl->write->aead_tag_len;
235 } else if (rl->write->cipher_ctx != NULL) {
236 eiv_len = 0;
237 if (rl->version != TLS1_VERSION) {
238 if (!tls12_record_protection_eiv_len(rl->write, &eiv_len))
239 return 0;
240 }
241 if (!tls12_record_protection_block_size(rl->write, &block_size))
242 return 0;
243 if (!tls12_record_protection_mac_len(rl->write, &mac_len))
244 return 0;
245
246 *overhead = eiv_len + block_size + mac_len;
247 }
248
249 return 1;
250}
251
252int
253tls12_record_layer_read_protected(struct tls12_record_layer *rl)
254{
255 return tls12_record_protection_engaged(rl->read);
256}
257
258int
259tls12_record_layer_write_protected(struct tls12_record_layer *rl)
260{
261 return tls12_record_protection_engaged(rl->write);
262}
263
264void
265tls12_record_layer_set_aead(struct tls12_record_layer *rl, const EVP_AEAD *aead)
266{
267 rl->aead = aead;
268}
269
270void
271tls12_record_layer_set_cipher_hash(struct tls12_record_layer *rl,
272 const EVP_CIPHER *cipher, const EVP_MD *handshake_hash,
273 const EVP_MD *mac_hash)
274{
275 rl->cipher = cipher;
276 rl->handshake_hash = handshake_hash;
277 rl->mac_hash = mac_hash;
278}
279
280void
281tls12_record_layer_set_version(struct tls12_record_layer *rl, uint16_t version)
282{
283 rl->version = version;
284 rl->dtls = ((version >> 8) == DTLS1_VERSION_MAJOR);
285}
286
287void
288tls12_record_layer_set_initial_epoch(struct tls12_record_layer *rl,
289 uint16_t epoch)
290{
291 rl->initial_epoch = epoch;
292}
293
294uint16_t
295tls12_record_layer_initial_epoch(struct tls12_record_layer *rl)
296{
297 return rl->initial_epoch;
298}
299
300uint16_t
301tls12_record_layer_write_epoch(struct tls12_record_layer *rl)
302{
303 return rl->write->epoch;
304}
305
306int
307tls12_record_layer_use_write_epoch(struct tls12_record_layer *rl, uint16_t epoch)
308{
309 if (rl->write->epoch == epoch)
310 return 1;
311
312 if (rl->write_current->epoch == epoch) {
313 rl->write = rl->write_current;
314 return 1;
315 }
316
317 if (rl->write_previous != NULL && rl->write_previous->epoch == epoch) {
318 rl->write = rl->write_previous;
319 return 1;
320 }
321
322 return 0;
323}
324
325void
326tls12_record_layer_write_epoch_done(struct tls12_record_layer *rl, uint16_t epoch)
327{
328 if (rl->write_previous == NULL || rl->write_previous->epoch != epoch)
329 return;
330
331 rl->write = rl->write_current;
332
333 tls12_record_protection_free(rl->write_previous);
334 rl->write_previous = NULL;
335}
336
337void
338tls12_record_layer_clear_read_state(struct tls12_record_layer *rl)
339{
340 tls12_record_protection_clear(rl->read);
341 rl->read->epoch = rl->initial_epoch;
342}
343
344void
345tls12_record_layer_clear_write_state(struct tls12_record_layer *rl)
346{
347 tls12_record_protection_clear(rl->write);
348 rl->write->epoch = rl->initial_epoch;
349
350 tls12_record_protection_free(rl->write_previous);
351 rl->write_previous = NULL;
352}
353
354void
355tls12_record_layer_read_cipher_hash(struct tls12_record_layer *rl,
356 EVP_CIPHER_CTX **cipher, EVP_MD_CTX **hash)
357{
358 *cipher = rl->read->cipher_ctx;
359 *hash = rl->read->hash_ctx;
360}
361
362void
363tls12_record_layer_reflect_seq_num(struct tls12_record_layer *rl)
364{
365 memcpy(rl->write->seq_num, rl->read->seq_num,
366 sizeof(rl->write->seq_num));
367}
368
369static const uint8_t tls12_max_seq_num[TLS12_RECORD_SEQ_NUM_LEN] = {
370 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
371};
372
373int
374tls12_record_layer_inc_seq_num(struct tls12_record_layer *rl, uint8_t *seq_num)
375{
376 CBS max_seq_num;
377 int i;
378
379 /*
380 * RFC 5246 section 6.1 and RFC 6347 section 4.1 - both TLS and DTLS
381 * sequence numbers must not wrap. Note that for DTLS the first two
382 * bytes are used as an "epoch" and not part of the sequence number.
383 */
384 CBS_init(&max_seq_num, seq_num, TLS12_RECORD_SEQ_NUM_LEN);
385 if (rl->dtls) {
386 if (!CBS_skip(&max_seq_num, 2))
387 return 0;
388 }
389 if (CBS_mem_equal(&max_seq_num, tls12_max_seq_num,
390 CBS_len(&max_seq_num)))
391 return 0;
392
393 for (i = TLS12_RECORD_SEQ_NUM_LEN - 1; i >= 0; i--) {
394 if (++seq_num[i] != 0)
395 break;
396 }
397
398 return 1;
399}
400
401static int
402tls12_record_layer_set_mac_key(struct tls12_record_protection *rp,
403 const uint8_t *mac_key, size_t mac_key_len)
404{
405 freezero(rp->mac_key, rp->mac_key_len);
406 rp->mac_key = NULL;
407 rp->mac_key_len = 0;
408
409 if (mac_key == NULL || mac_key_len == 0)
410 return 1;
411
412 if ((rp->mac_key = calloc(1, mac_key_len)) == NULL)
413 return 0;
414
415 memcpy(rp->mac_key, mac_key, mac_key_len);
416 rp->mac_key_len = mac_key_len;
417
418 return 1;
419}
420
421static int
422tls12_record_layer_ccs_aead(struct tls12_record_layer *rl,
423 struct tls12_record_protection *rp, int is_write, CBS *mac_key, CBS *key,
424 CBS *iv)
425{
426 size_t aead_nonce_len;
427
428 if (!tls12_record_protection_unused(rp))
429 return 0;
430
431 if ((rp->aead_ctx = calloc(1, sizeof(*rp->aead_ctx))) == NULL)
432 return 0;
433
434 /* AES GCM cipher suites use variable nonce in record. */
435 if (rl->aead == EVP_aead_aes_128_gcm() ||
436 rl->aead == EVP_aead_aes_256_gcm())
437 rp->aead_variable_nonce_in_record = 1;
438
439 /* ChaCha20 Poly1305 XORs the fixed and variable nonces. */
440 if (rl->aead == EVP_aead_chacha20_poly1305())
441 rp->aead_xor_nonces = 1;
442
443 if (!CBS_stow(iv, &rp->aead_fixed_nonce, &rp->aead_fixed_nonce_len))
444 return 0;
445
446 rp->aead_tag_len = EVP_AEAD_max_overhead(rl->aead);
447 rp->aead_variable_nonce_len = 8;
448
449 aead_nonce_len = EVP_AEAD_nonce_length(rl->aead);
450
451 if (rp->aead_xor_nonces) {
452 /* Fixed nonce length must match, variable must not exceed. */
453 if (rp->aead_fixed_nonce_len != aead_nonce_len)
454 return 0;
455 if (rp->aead_variable_nonce_len > aead_nonce_len)
456 return 0;
457 } else {
458 /* Concatenated nonce length must equal AEAD nonce length. */
459 if (rp->aead_fixed_nonce_len +
460 rp->aead_variable_nonce_len != aead_nonce_len)
461 return 0;
462 }
463
464 if (!EVP_AEAD_CTX_init(rp->aead_ctx, rl->aead, CBS_data(key),
465 CBS_len(key), EVP_AEAD_DEFAULT_TAG_LENGTH, NULL))
466 return 0;
467
468 return 1;
469}
470
471static int
472tls12_record_layer_ccs_cipher(struct tls12_record_layer *rl,
473 struct tls12_record_protection *rp, int is_write, CBS *mac_key, CBS *key,
474 CBS *iv)
475{
476 EVP_PKEY *mac_pkey = NULL;
477 int gost_param_nid;
478 int mac_type;
479 int ret = 0;
480
481 if (!tls12_record_protection_unused(rp))
482 goto err;
483
484 mac_type = EVP_PKEY_HMAC;
485 rp->stream_mac = 0;
486
487 if (CBS_len(iv) > INT_MAX || CBS_len(key) > INT_MAX)
488 goto err;
489 if (EVP_CIPHER_iv_length(rl->cipher) != CBS_len(iv))
490 goto err;
491 if (EVP_CIPHER_key_length(rl->cipher) != CBS_len(key))
492 goto err;
493
494 /* Special handling for GOST... */
495 if (EVP_MD_type(rl->mac_hash) == NID_id_Gost28147_89_MAC) {
496 if (CBS_len(mac_key) != 32)
497 goto err;
498 mac_type = EVP_PKEY_GOSTIMIT;
499 rp->stream_mac = 1;
500 } else {
501 if (CBS_len(mac_key) > INT_MAX)
502 goto err;
503 if (EVP_MD_size(rl->mac_hash) != CBS_len(mac_key))
504 goto err;
505 }
506
507 if ((rp->cipher_ctx = EVP_CIPHER_CTX_new()) == NULL)
508 goto err;
509 if ((rp->hash_ctx = EVP_MD_CTX_new()) == NULL)
510 goto err;
511
512 if (!tls12_record_layer_set_mac_key(rp, CBS_data(mac_key),
513 CBS_len(mac_key)))
514 goto err;
515
516 if ((mac_pkey = EVP_PKEY_new_mac_key(mac_type, NULL, CBS_data(mac_key),
517 CBS_len(mac_key))) == NULL)
518 goto err;
519
520 if (!EVP_CipherInit_ex(rp->cipher_ctx, rl->cipher, NULL, CBS_data(key),
521 CBS_data(iv), is_write))
522 goto err;
523
524 if (EVP_DigestSignInit(rp->hash_ctx, NULL, rl->mac_hash, NULL,
525 mac_pkey) <= 0)
526 goto err;
527
528 /* More special handling for GOST... */
529 if (EVP_CIPHER_type(rl->cipher) == NID_gost89_cnt) {
530 gost_param_nid = NID_id_tc26_gost_28147_param_Z;
531 if (EVP_MD_type(rl->handshake_hash) == NID_id_GostR3411_94)
532 gost_param_nid = NID_id_Gost28147_89_CryptoPro_A_ParamSet;
533
534 if (EVP_CIPHER_CTX_ctrl(rp->cipher_ctx, EVP_CTRL_GOST_SET_SBOX,
535 gost_param_nid, 0) <= 0)
536 goto err;
537
538 if (EVP_MD_type(rl->mac_hash) == NID_id_Gost28147_89_MAC) {
539 if (EVP_MD_CTX_ctrl(rp->hash_ctx, EVP_MD_CTRL_GOST_SET_SBOX,
540 gost_param_nid, 0) <= 0)
541 goto err;
542 }
543 }
544
545 ret = 1;
546
547 err:
548 EVP_PKEY_free(mac_pkey);
549
550 return ret;
551}
552
553static int
554tls12_record_layer_change_cipher_state(struct tls12_record_layer *rl,
555 struct tls12_record_protection *rp, int is_write, CBS *mac_key, CBS *key,
556 CBS *iv)
557{
558 if (rl->aead != NULL)
559 return tls12_record_layer_ccs_aead(rl, rp, is_write, mac_key,
560 key, iv);
561
562 return tls12_record_layer_ccs_cipher(rl, rp, is_write, mac_key,
563 key, iv);
564}
565
566int
567tls12_record_layer_change_read_cipher_state(struct tls12_record_layer *rl,
568 CBS *mac_key, CBS *key, CBS *iv)
569{
570 struct tls12_record_protection *read_new = NULL;
571 int ret = 0;
572
573 if ((read_new = tls12_record_protection_new()) == NULL)
574 goto err;
575
576 /* Read sequence number gets reset to zero. */
577
578 if (!tls12_record_layer_change_cipher_state(rl, read_new, 0,
579 mac_key, key, iv))
580 goto err;
581
582 tls12_record_protection_free(rl->read_current);
583 rl->read = rl->read_current = read_new;
584 read_new = NULL;
585
586 ret = 1;
587
588 err:
589 tls12_record_protection_free(read_new);
590
591 return ret;
592}
593
594int
595tls12_record_layer_change_write_cipher_state(struct tls12_record_layer *rl,
596 CBS *mac_key, CBS *key, CBS *iv)
597{
598 struct tls12_record_protection *write_new;
599 int ret = 0;
600
601 if ((write_new = tls12_record_protection_new()) == NULL)
602 goto err;
603
604 /* Write sequence number gets reset to zero. */
605
606 /* DTLS epoch is incremented and is permitted to wrap. */
607 if (rl->dtls)
608 write_new->epoch = rl->write_current->epoch + 1;
609
610 if (!tls12_record_layer_change_cipher_state(rl, write_new, 1,
611 mac_key, key, iv))
612 goto err;
613
614 if (rl->dtls) {
615 tls12_record_protection_free(rl->write_previous);
616 rl->write_previous = rl->write_current;
617 rl->write_current = NULL;
618 }
619 tls12_record_protection_free(rl->write_current);
620 rl->write = rl->write_current = write_new;
621 write_new = NULL;
622
623 ret = 1;
624
625 err:
626 tls12_record_protection_free(write_new);
627
628 return ret;
629}
630
631static int
632tls12_record_layer_build_seq_num(struct tls12_record_layer *rl, CBB *cbb,
633 uint16_t epoch, uint8_t *seq_num, size_t seq_num_len)
634{
635 CBS seq;
636
637 CBS_init(&seq, seq_num, seq_num_len);
638
639 if (rl->dtls) {
640 if (!CBB_add_u16(cbb, epoch))
641 return 0;
642 if (!CBS_skip(&seq, 2))
643 return 0;
644 }
645
646 return CBB_add_bytes(cbb, CBS_data(&seq), CBS_len(&seq));
647}
648
649static int
650tls12_record_layer_pseudo_header(struct tls12_record_layer *rl,
651 uint8_t content_type, uint16_t record_len, CBS *seq_num, uint8_t **out,
652 size_t *out_len)
653{
654 CBB cbb;
655
656 *out = NULL;
657 *out_len = 0;
658
659 /* Build the pseudo-header used for MAC/AEAD. */
660 if (!CBB_init(&cbb, 13))
661 goto err;
662
663 if (!CBB_add_bytes(&cbb, CBS_data(seq_num), CBS_len(seq_num)))
664 goto err;
665 if (!CBB_add_u8(&cbb, content_type))
666 goto err;
667 if (!CBB_add_u16(&cbb, rl->version))
668 goto err;
669 if (!CBB_add_u16(&cbb, record_len))
670 goto err;
671
672 if (!CBB_finish(&cbb, out, out_len))
673 goto err;
674
675 return 1;
676
677 err:
678 CBB_cleanup(&cbb);
679
680 return 0;
681}
682
683static int
684tls12_record_layer_mac(struct tls12_record_layer *rl, CBB *cbb,
685 EVP_MD_CTX *hash_ctx, int stream_mac, CBS *seq_num, uint8_t content_type,
686 const uint8_t *content, size_t content_len, size_t *out_len)
687{
688 EVP_MD_CTX *mac_ctx = NULL;
689 uint8_t *header = NULL;
690 size_t header_len = 0;
691 size_t mac_len;
692 uint8_t *mac;
693 int ret = 0;
694
695 if ((mac_ctx = EVP_MD_CTX_new()) == NULL)
696 goto err;
697 if (!EVP_MD_CTX_copy(mac_ctx, hash_ctx))
698 goto err;
699
700 if (!tls12_record_layer_pseudo_header(rl, content_type, content_len,
701 seq_num, &header, &header_len))
702 goto err;
703
704 if (EVP_DigestSignUpdate(mac_ctx, header, header_len) <= 0)
705 goto err;
706 if (EVP_DigestSignUpdate(mac_ctx, content, content_len) <= 0)
707 goto err;
708 if (EVP_DigestSignFinal(mac_ctx, NULL, &mac_len) <= 0)
709 goto err;
710 if (!CBB_add_space(cbb, &mac, mac_len))
711 goto err;
712 if (EVP_DigestSignFinal(mac_ctx, mac, &mac_len) <= 0)
713 goto err;
714 if (mac_len == 0)
715 goto err;
716
717 if (stream_mac) {
718 if (!EVP_MD_CTX_copy(hash_ctx, mac_ctx))
719 goto err;
720 }
721
722 *out_len = mac_len;
723 ret = 1;
724
725 err:
726 EVP_MD_CTX_free(mac_ctx);
727 freezero(header, header_len);
728
729 return ret;
730}
731
732static int
733tls12_record_layer_read_mac_cbc(struct tls12_record_layer *rl, CBB *cbb,
734 uint8_t content_type, CBS *seq_num, const uint8_t *content,
735 size_t content_len, size_t mac_len, size_t padding_len)
736{
737 uint8_t *header = NULL;
738 size_t header_len = 0;
739 uint8_t *mac = NULL;
740 size_t out_mac_len = 0;
741 int ret = 0;
742
743 /*
744 * Must be constant time to avoid leaking details about CBC padding.
745 */
746
747 if (!ssl3_cbc_record_digest_supported(rl->read->hash_ctx))
748 goto err;
749
750 if (!tls12_record_layer_pseudo_header(rl, content_type, content_len,
751 seq_num, &header, &header_len))
752 goto err;
753
754 if (!CBB_add_space(cbb, &mac, mac_len))
755 goto err;
756 if (!ssl3_cbc_digest_record(rl->read->hash_ctx, mac, &out_mac_len, header,
757 content, content_len + mac_len, content_len + mac_len + padding_len,
758 rl->read->mac_key, rl->read->mac_key_len))
759 goto err;
760 if (mac_len != out_mac_len)
761 goto err;
762
763 ret = 1;
764
765 err:
766 freezero(header, header_len);
767
768 return ret;
769}
770
771static int
772tls12_record_layer_read_mac(struct tls12_record_layer *rl, CBB *cbb,
773 uint8_t content_type, CBS *seq_num, const uint8_t *content,
774 size_t content_len)
775{
776 EVP_CIPHER_CTX *enc = rl->read->cipher_ctx;
777 size_t out_len;
778
779 if (EVP_CIPHER_CTX_mode(enc) == EVP_CIPH_CBC_MODE)
780 return 0;
781
782 return tls12_record_layer_mac(rl, cbb, rl->read->hash_ctx,
783 rl->read->stream_mac, seq_num, content_type, content, content_len,
784 &out_len);
785}
786
787static int
788tls12_record_layer_write_mac(struct tls12_record_layer *rl, CBB *cbb,
789 uint8_t content_type, CBS *seq_num, const uint8_t *content,
790 size_t content_len, size_t *out_len)
791{
792 return tls12_record_layer_mac(rl, cbb, rl->write->hash_ctx,
793 rl->write->stream_mac, seq_num, content_type, content, content_len,
794 out_len);
795}
796
797static int
798tls12_record_layer_aead_concat_nonce(struct tls12_record_layer *rl,
799 struct tls12_record_protection *rp, CBS *seq_num,
800 uint8_t **out, size_t *out_len)
801{
802 CBB cbb;
803
804 if (rp->aead_variable_nonce_len > CBS_len(seq_num))
805 return 0;
806
807 /* Fixed nonce and variable nonce (sequence number) are concatenated. */
808 if (!CBB_init(&cbb, 16))
809 goto err;
810 if (!CBB_add_bytes(&cbb, rp->aead_fixed_nonce,
811 rp->aead_fixed_nonce_len))
812 goto err;
813 if (!CBB_add_bytes(&cbb, CBS_data(seq_num),
814 rp->aead_variable_nonce_len))
815 goto err;
816 if (!CBB_finish(&cbb, out, out_len))
817 goto err;
818
819 return 1;
820
821 err:
822 CBB_cleanup(&cbb);
823
824 return 0;
825}
826
827static int
828tls12_record_layer_aead_xored_nonce(struct tls12_record_layer *rl,
829 struct tls12_record_protection *rp, CBS *seq_num,
830 uint8_t **out, size_t *out_len)
831{
832 uint8_t *nonce = NULL;
833 size_t nonce_len = 0;
834 uint8_t *pad;
835 CBB cbb;
836 int i;
837
838 if (rp->aead_variable_nonce_len > CBS_len(seq_num))
839 return 0;
840 if (rp->aead_fixed_nonce_len < rp->aead_variable_nonce_len)
841 return 0;
842
843 /*
844 * Variable nonce (sequence number) is right padded, before the fixed
845 * nonce is XOR'd in.
846 */
847 if (!CBB_init(&cbb, 16))
848 goto err;
849 if (!CBB_add_space(&cbb, &pad,
850 rp->aead_fixed_nonce_len - rp->aead_variable_nonce_len))
851 goto err;
852 if (!CBB_add_bytes(&cbb, CBS_data(seq_num),
853 rp->aead_variable_nonce_len))
854 goto err;
855 if (!CBB_finish(&cbb, &nonce, &nonce_len))
856 goto err;
857
858 for (i = 0; i < rp->aead_fixed_nonce_len; i++)
859 nonce[i] ^= rp->aead_fixed_nonce[i];
860
861 *out = nonce;
862 *out_len = nonce_len;
863
864 return 1;
865
866 err:
867 CBB_cleanup(&cbb);
868 freezero(nonce, nonce_len);
869
870 return 0;
871}
872
873static int
874tls12_record_layer_open_record_plaintext(struct tls12_record_layer *rl,
875 uint8_t content_type, CBS *fragment, uint8_t **out, size_t *out_len)
876{
877 if (tls12_record_protection_engaged(rl->read))
878 return 0;
879
880 /* XXX - decrypt/process in place for now. */
881 *out = (uint8_t *)CBS_data(fragment);
882 *out_len = CBS_len(fragment);
883
884 return 1;
885}
886
887static int
888tls12_record_layer_open_record_protected_aead(struct tls12_record_layer *rl,
889 uint8_t content_type, CBS *seq_num, CBS *fragment, uint8_t **out,
890 size_t *out_len)
891{
892 struct tls12_record_protection *rp = rl->read;
893 uint8_t *header = NULL, *nonce = NULL;
894 size_t header_len = 0, nonce_len = 0;
895 uint8_t *plain;
896 size_t plain_len;
897 CBS var_nonce;
898 int ret = 0;
899
900 /* XXX - move to nonce allocated in record layer, matching TLSv1.3 */
901 if (rp->aead_xor_nonces) {
902 if (!tls12_record_layer_aead_xored_nonce(rl, rp,
903 seq_num, &nonce, &nonce_len))
904 goto err;
905 } else if (rp->aead_variable_nonce_in_record) {
906 if (!CBS_get_bytes(fragment, &var_nonce,
907 rp->aead_variable_nonce_len))
908 goto err;
909 if (!tls12_record_layer_aead_concat_nonce(rl, rp,
910 &var_nonce, &nonce, &nonce_len))
911 goto err;
912 } else {
913 if (!tls12_record_layer_aead_concat_nonce(rl, rp,
914 seq_num, &nonce, &nonce_len))
915 goto err;
916 }
917
918 /* XXX EVP_AEAD_max_tag_len vs EVP_AEAD_CTX_tag_len. */
919 if (CBS_len(fragment) < rp->aead_tag_len) {
920 rl->alert_desc = SSL_AD_BAD_RECORD_MAC;
921 goto err;
922 }
923 if (CBS_len(fragment) > SSL3_RT_MAX_ENCRYPTED_LENGTH) {
924 rl->alert_desc = SSL_AD_RECORD_OVERFLOW;
925 goto err;
926 }
927
928 /* XXX - decrypt/process in place for now. */
929 plain = (uint8_t *)CBS_data(fragment);
930 plain_len = CBS_len(fragment) - rp->aead_tag_len;
931
932 if (!tls12_record_layer_pseudo_header(rl, content_type, plain_len,
933 seq_num, &header, &header_len))
934 goto err;
935
936 if (!EVP_AEAD_CTX_open(rp->aead_ctx, plain, out_len, plain_len,
937 nonce, nonce_len, CBS_data(fragment), CBS_len(fragment),
938 header, header_len)) {
939 rl->alert_desc = SSL_AD_BAD_RECORD_MAC;
940 goto err;
941 }
942
943 if (*out_len > SSL3_RT_MAX_PLAIN_LENGTH) {
944 rl->alert_desc = SSL_AD_RECORD_OVERFLOW;
945 goto err;
946 }
947
948 if (*out_len != plain_len)
949 goto err;
950
951 *out = plain;
952
953 ret = 1;
954
955 err:
956 freezero(header, header_len);
957 freezero(nonce, nonce_len);
958
959 return ret;
960}
961
962static int
963tls12_record_layer_open_record_protected_cipher(struct tls12_record_layer *rl,
964 uint8_t content_type, CBS *seq_num, CBS *fragment, uint8_t **out,
965 size_t *out_len)
966{
967 EVP_CIPHER_CTX *enc = rl->read->cipher_ctx;
968 SSL3_RECORD_INTERNAL rrec;
969 size_t block_size, eiv_len;
970 uint8_t *mac = NULL;
971 size_t mac_len = 0;
972 uint8_t *out_mac = NULL;
973 size_t out_mac_len = 0;
974 uint8_t *plain;
975 size_t plain_len;
976 size_t min_len;
977 CBB cbb_mac;
978 int ret = 0;
979
980 memset(&cbb_mac, 0, sizeof(cbb_mac));
981 memset(&rrec, 0, sizeof(rrec));
982
983 if (!tls12_record_protection_block_size(rl->read, &block_size))
984 goto err;
985
986 /* Determine explicit IV length. */
987 eiv_len = 0;
988 if (rl->version != TLS1_VERSION) {
989 if (!tls12_record_protection_eiv_len(rl->read, &eiv_len))
990 goto err;
991 }
992
993 mac_len = 0;
994 if (rl->read->hash_ctx != NULL) {
995 if (!tls12_record_protection_mac_len(rl->read, &mac_len))
996 goto err;
997 }
998
999 /* CBC has at least one padding byte. */
1000 min_len = eiv_len + mac_len;
1001 if (EVP_CIPHER_CTX_mode(enc) == EVP_CIPH_CBC_MODE)
1002 min_len += 1;
1003
1004 if (CBS_len(fragment) < min_len) {
1005 rl->alert_desc = SSL_AD_BAD_RECORD_MAC;
1006 goto err;
1007 }
1008 if (CBS_len(fragment) > SSL3_RT_MAX_ENCRYPTED_LENGTH) {
1009 rl->alert_desc = SSL_AD_RECORD_OVERFLOW;
1010 goto err;
1011 }
1012 if (CBS_len(fragment) % block_size != 0) {
1013 rl->alert_desc = SSL_AD_BAD_RECORD_MAC;
1014 goto err;
1015 }
1016
1017 /* XXX - decrypt/process in place for now. */
1018 plain = (uint8_t *)CBS_data(fragment);
1019 plain_len = CBS_len(fragment);
1020
1021 if (!EVP_Cipher(enc, plain, CBS_data(fragment), plain_len))
1022 goto err;
1023
1024 rrec.data = plain;
1025 rrec.input = plain;
1026 rrec.length = plain_len;
1027
1028 /*
1029 * We now have to remove padding, extract MAC, calculate MAC
1030 * and compare MAC in constant time.
1031 */
1032 if (block_size > 1)
1033 ssl3_cbc_remove_padding(&rrec, eiv_len, mac_len);
1034
1035 if ((mac = calloc(1, mac_len)) == NULL)
1036 goto err;
1037
1038 if (!CBB_init(&cbb_mac, EVP_MAX_MD_SIZE))
1039 goto err;
1040 if (EVP_CIPHER_CTX_mode(enc) == EVP_CIPH_CBC_MODE) {
1041 ssl3_cbc_copy_mac(mac, &rrec, mac_len, rrec.length +
1042 rrec.padding_length);
1043 rrec.length -= mac_len;
1044 if (!tls12_record_layer_read_mac_cbc(rl, &cbb_mac, content_type,
1045 seq_num, rrec.input, rrec.length, mac_len,
1046 rrec.padding_length))
1047 goto err;
1048 } else {
1049 rrec.length -= mac_len;
1050 memcpy(mac, rrec.data + rrec.length, mac_len);
1051 if (!tls12_record_layer_read_mac(rl, &cbb_mac, content_type,
1052 seq_num, rrec.input, rrec.length))
1053 goto err;
1054 }
1055 if (!CBB_finish(&cbb_mac, &out_mac, &out_mac_len))
1056 goto err;
1057 if (mac_len != out_mac_len)
1058 goto err;
1059
1060 if (timingsafe_memcmp(mac, out_mac, mac_len) != 0) {
1061 rl->alert_desc = SSL_AD_BAD_RECORD_MAC;
1062 goto err;
1063 }
1064
1065 if (rrec.length > SSL3_RT_MAX_COMPRESSED_LENGTH + mac_len) {
1066 rl->alert_desc = SSL_AD_BAD_RECORD_MAC;
1067 goto err;
1068 }
1069 if (rrec.length > SSL3_RT_MAX_PLAIN_LENGTH) {
1070 rl->alert_desc = SSL_AD_RECORD_OVERFLOW;
1071 goto err;
1072 }
1073
1074 *out = rrec.data;
1075 *out_len = rrec.length;
1076
1077 ret = 1;
1078
1079 err:
1080 CBB_cleanup(&cbb_mac);
1081 freezero(mac, mac_len);
1082 freezero(out_mac, out_mac_len);
1083
1084 return ret;
1085}
1086
1087int
1088tls12_record_layer_open_record(struct tls12_record_layer *rl, uint8_t *buf,
1089 size_t buf_len, uint8_t **out, size_t *out_len)
1090{
1091 CBS cbs, fragment, seq_num;
1092 uint16_t version;
1093 uint8_t content_type;
1094
1095 CBS_init(&cbs, buf, buf_len);
1096 CBS_init(&seq_num, rl->read->seq_num, sizeof(rl->read->seq_num));
1097
1098 if (!CBS_get_u8(&cbs, &content_type))
1099 return 0;
1100 if (!CBS_get_u16(&cbs, &version))
1101 return 0;
1102 if (rl->dtls) {
1103 /*
1104 * The DTLS sequence number is split into a 16 bit epoch and
1105 * 48 bit sequence number, however for the purposes of record
1106 * processing it is treated the same as a TLS 64 bit sequence
1107 * number. DTLS also uses explicit read sequence numbers, which
1108 * we need to extract from the DTLS record header.
1109 */
1110 if (!CBS_get_bytes(&cbs, &seq_num, SSL3_SEQUENCE_SIZE))
1111 return 0;
1112 if (!CBS_write_bytes(&seq_num, rl->read->seq_num,
1113 sizeof(rl->read->seq_num), NULL))
1114 return 0;
1115 }
1116 if (!CBS_get_u16_length_prefixed(&cbs, &fragment))
1117 return 0;
1118
1119 if (rl->read->aead_ctx != NULL) {
1120 if (!tls12_record_layer_open_record_protected_aead(rl,
1121 content_type, &seq_num, &fragment, out, out_len))
1122 return 0;
1123 } else if (rl->read->cipher_ctx != NULL) {
1124 if (!tls12_record_layer_open_record_protected_cipher(rl,
1125 content_type, &seq_num, &fragment, out, out_len))
1126 return 0;
1127 } else {
1128 if (!tls12_record_layer_open_record_plaintext(rl,
1129 content_type, &fragment, out, out_len))
1130 return 0;
1131 }
1132
1133 if (!rl->dtls) {
1134 if (!tls12_record_layer_inc_seq_num(rl, rl->read->seq_num))
1135 return 0;
1136 }
1137
1138 return 1;
1139}
1140
1141static int
1142tls12_record_layer_seal_record_plaintext(struct tls12_record_layer *rl,
1143 uint8_t content_type, const uint8_t *content, size_t content_len, CBB *out)
1144{
1145 if (tls12_record_protection_engaged(rl->write))
1146 return 0;
1147
1148 return CBB_add_bytes(out, content, content_len);
1149}
1150
1151static int
1152tls12_record_layer_seal_record_protected_aead(struct tls12_record_layer *rl,
1153 uint8_t content_type, CBS *seq_num, const uint8_t *content,
1154 size_t content_len, CBB *out)
1155{
1156 struct tls12_record_protection *rp = rl->write;
1157 uint8_t *header = NULL, *nonce = NULL;
1158 size_t header_len = 0, nonce_len = 0;
1159 size_t enc_record_len, out_len;
1160 uint8_t *enc_data;
1161 int ret = 0;
1162
1163 /* XXX - move to nonce allocated in record layer, matching TLSv1.3 */
1164 if (rp->aead_xor_nonces) {
1165 if (!tls12_record_layer_aead_xored_nonce(rl, rp,
1166 seq_num, &nonce, &nonce_len))
1167 goto err;
1168 } else {
1169 if (!tls12_record_layer_aead_concat_nonce(rl, rp,
1170 seq_num, &nonce, &nonce_len))
1171 goto err;
1172 }
1173
1174 if (rp->aead_variable_nonce_in_record) {
1175 if (rp->aead_variable_nonce_len > CBS_len(seq_num))
1176 goto err;
1177 if (!CBB_add_bytes(out, CBS_data(seq_num),
1178 rp->aead_variable_nonce_len))
1179 goto err;
1180 }
1181
1182 if (!tls12_record_layer_pseudo_header(rl, content_type, content_len,
1183 seq_num, &header, &header_len))
1184 goto err;
1185
1186 /* XXX EVP_AEAD_max_tag_len vs EVP_AEAD_CTX_tag_len. */
1187 enc_record_len = content_len + rp->aead_tag_len;
1188 if (enc_record_len > SSL3_RT_MAX_ENCRYPTED_LENGTH)
1189 goto err;
1190 if (!CBB_add_space(out, &enc_data, enc_record_len))
1191 goto err;
1192
1193 if (!EVP_AEAD_CTX_seal(rp->aead_ctx, enc_data, &out_len, enc_record_len,
1194 nonce, nonce_len, content, content_len, header, header_len))
1195 goto err;
1196
1197 if (out_len != enc_record_len)
1198 goto err;
1199
1200 ret = 1;
1201
1202 err:
1203 freezero(header, header_len);
1204 freezero(nonce, nonce_len);
1205
1206 return ret;
1207}
1208
1209static int
1210tls12_record_layer_seal_record_protected_cipher(struct tls12_record_layer *rl,
1211 uint8_t content_type, CBS *seq_num, const uint8_t *content,
1212 size_t content_len, CBB *out)
1213{
1214 EVP_CIPHER_CTX *enc = rl->write->cipher_ctx;
1215 size_t block_size, eiv_len, mac_len, pad_len;
1216 uint8_t *enc_data, *eiv, *pad, pad_val;
1217 uint8_t *plain = NULL;
1218 size_t plain_len = 0;
1219 int ret = 0;
1220 CBB cbb;
1221
1222 if (!CBB_init(&cbb, SSL3_RT_MAX_PLAIN_LENGTH))
1223 goto err;
1224
1225 /* Add explicit IV if necessary. */
1226 eiv_len = 0;
1227 if (rl->version != TLS1_VERSION) {
1228 if (!tls12_record_protection_eiv_len(rl->write, &eiv_len))
1229 goto err;
1230 }
1231 if (eiv_len > 0) {
1232 if (!CBB_add_space(&cbb, &eiv, eiv_len))
1233 goto err;
1234 arc4random_buf(eiv, eiv_len);
1235 }
1236
1237 if (!CBB_add_bytes(&cbb, content, content_len))
1238 goto err;
1239
1240 mac_len = 0;
1241 if (rl->write->hash_ctx != NULL) {
1242 if (!tls12_record_layer_write_mac(rl, &cbb, content_type,
1243 seq_num, content, content_len, &mac_len))
1244 goto err;
1245 }
1246
1247 plain_len = eiv_len + content_len + mac_len;
1248
1249 /* Add padding to block size, if necessary. */
1250 if (!tls12_record_protection_block_size(rl->write, &block_size))
1251 goto err;
1252 if (block_size > 1) {
1253 pad_len = block_size - (plain_len % block_size);
1254 pad_val = pad_len - 1;
1255
1256 if (pad_len > 255)
1257 goto err;
1258 if (!CBB_add_space(&cbb, &pad, pad_len))
1259 goto err;
1260 memset(pad, pad_val, pad_len);
1261 }
1262
1263 if (!CBB_finish(&cbb, &plain, &plain_len))
1264 goto err;
1265
1266 if (plain_len % block_size != 0)
1267 goto err;
1268 if (plain_len > SSL3_RT_MAX_ENCRYPTED_LENGTH)
1269 goto err;
1270
1271 if (!CBB_add_space(out, &enc_data, plain_len))
1272 goto err;
1273 if (!EVP_Cipher(enc, enc_data, plain, plain_len))
1274 goto err;
1275
1276 ret = 1;
1277
1278 err:
1279 CBB_cleanup(&cbb);
1280 freezero(plain, plain_len);
1281
1282 return ret;
1283}
1284
1285int
1286tls12_record_layer_seal_record(struct tls12_record_layer *rl,
1287 uint8_t content_type, const uint8_t *content, size_t content_len, CBB *cbb)
1288{
1289 uint8_t *seq_num_data = NULL;
1290 size_t seq_num_len = 0;
1291 CBB fragment, seq_num_cbb;
1292 CBS seq_num;
1293 int ret = 0;
1294
1295 /*
1296 * Construct the effective sequence number - this is used in both
1297 * the DTLS header and for MAC calculations.
1298 */
1299 if (!CBB_init(&seq_num_cbb, SSL3_SEQUENCE_SIZE))
1300 goto err;
1301 if (!tls12_record_layer_build_seq_num(rl, &seq_num_cbb, rl->write->epoch,
1302 rl->write->seq_num, sizeof(rl->write->seq_num)))
1303 goto err;
1304 if (!CBB_finish(&seq_num_cbb, &seq_num_data, &seq_num_len))
1305 goto err;
1306 CBS_init(&seq_num, seq_num_data, seq_num_len);
1307
1308 if (!CBB_add_u8(cbb, content_type))
1309 goto err;
1310 if (!CBB_add_u16(cbb, rl->version))
1311 goto err;
1312 if (rl->dtls) {
1313 if (!CBB_add_bytes(cbb, CBS_data(&seq_num), CBS_len(&seq_num)))
1314 goto err;
1315 }
1316 if (!CBB_add_u16_length_prefixed(cbb, &fragment))
1317 goto err;
1318
1319 if (rl->write->aead_ctx != NULL) {
1320 if (!tls12_record_layer_seal_record_protected_aead(rl,
1321 content_type, &seq_num, content, content_len, &fragment))
1322 goto err;
1323 } else if (rl->write->cipher_ctx != NULL) {
1324 if (!tls12_record_layer_seal_record_protected_cipher(rl,
1325 content_type, &seq_num, content, content_len, &fragment))
1326 goto err;
1327 } else {
1328 if (!tls12_record_layer_seal_record_plaintext(rl,
1329 content_type, content, content_len, &fragment))
1330 goto err;
1331 }
1332
1333 if (!CBB_flush(cbb))
1334 goto err;
1335
1336 if (!tls12_record_layer_inc_seq_num(rl, rl->write->seq_num))
1337 goto err;
1338
1339 ret = 1;
1340
1341 err:
1342 CBB_cleanup(&seq_num_cbb);
1343 free(seq_num_data);
1344
1345 return ret;
1346}