aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDenys Vlasenko <vda.linux@googlemail.com>2018-11-23 18:48:20 +0100
committerDenys Vlasenko <vda.linux@googlemail.com>2018-11-23 18:48:20 +0100
commit219c9d4b5d12b3b965da838eb467b955ef928170 (patch)
tree12910b1dfa2c227edcc694abdb6db4b1daffe906
parentecc9090cfcccf412288147f385808f8f9df97ebe (diff)
downloadbusybox-w32-219c9d4b5d12b3b965da838eb467b955ef928170.tar.gz
busybox-w32-219c9d4b5d12b3b965da838eb467b955ef928170.tar.bz2
busybox-w32-219c9d4b5d12b3b965da838eb467b955ef928170.zip
tls: code shrink
function old new delta xwrite_encrypted 599 585 -14 Signed-off-by: Denys Vlasenko <vda.linux@googlemail.com>
-rw-r--r--include/platform.h5
-rw-r--r--networking/tls.c31
2 files changed, 21 insertions, 15 deletions
diff --git a/include/platform.h b/include/platform.h
index c365d5c8c..50365a31c 100644
--- a/include/platform.h
+++ b/include/platform.h
@@ -236,6 +236,7 @@ typedef uint64_t bb__aliased_uint64_t FIX_ALIASING;
236# define move_from_unaligned32(v, u32p) ((v) = *(bb__aliased_uint32_t*)(u32p)) 236# define move_from_unaligned32(v, u32p) ((v) = *(bb__aliased_uint32_t*)(u32p))
237# define move_to_unaligned16(u16p, v) (*(bb__aliased_uint16_t*)(u16p) = (v)) 237# define move_to_unaligned16(u16p, v) (*(bb__aliased_uint16_t*)(u16p) = (v))
238# define move_to_unaligned32(u32p, v) (*(bb__aliased_uint32_t*)(u32p) = (v)) 238# define move_to_unaligned32(u32p, v) (*(bb__aliased_uint32_t*)(u32p) = (v))
239# define move_to_unaligned64(u64p, v) (*(bb__aliased_uint64_t*)(u64p) = (v))
239/* #elif ... - add your favorite arch today! */ 240/* #elif ... - add your favorite arch today! */
240#else 241#else
241# define BB_UNALIGNED_MEMACCESS_OK 0 242# define BB_UNALIGNED_MEMACCESS_OK 0
@@ -252,6 +253,10 @@ typedef uint64_t bb__aliased_uint64_t FIX_ALIASING;
252 uint32_t __t = (v); \ 253 uint32_t __t = (v); \
253 memcpy((u32p), &__t, 4); \ 254 memcpy((u32p), &__t, 4); \
254} while (0) 255} while (0)
256# define move_to_unaligned64(u64p, v) do { \
257 uint64_t __t = (v); \
258 memcpy((u64p), &__t, 8); \
259} while (0)
255#endif 260#endif
256 261
257/* Unaligned, fixed-endian accessors */ 262/* Unaligned, fixed-endian accessors */
diff --git a/networking/tls.c b/networking/tls.c
index 3b4f1b7e2..2a0098674 100644
--- a/networking/tls.c
+++ b/networking/tls.c
@@ -788,14 +788,15 @@ static void xwrite_encrypted_aesgcm(tls_state_t *tls, unsigned size, unsigned ty
788{ 788{
789#define COUNTER(v) (*(uint32_t*)(v + 12)) 789#define COUNTER(v) (*(uint32_t*)(v + 12))
790 790
791 uint8_t aad[13 + 3]; /* +3 creates [16] buffer, simplifying GHASH() */ 791 uint8_t aad[13 + 3] ALIGNED(4); /* +3 creates [16] buffer, simplifying GHASH() */
792 uint8_t nonce[12 + 4]; /* +4 creates space for AES block counter */ 792 uint8_t nonce[12 + 4] ALIGNED(4); /* +4 creates space for AES block counter */
793 uint8_t scratch[AES_BLOCK_SIZE]; //[16] 793 uint8_t scratch[AES_BLOCK_SIZE] ALIGNED(4); //[16]
794 uint8_t authtag[AES_BLOCK_SIZE]; //[16] 794 uint8_t authtag[AES_BLOCK_SIZE] ALIGNED(4); //[16]
795 uint8_t *buf; 795 uint8_t *buf;
796 struct record_hdr *xhdr; 796 struct record_hdr *xhdr;
797 unsigned remaining; 797 unsigned remaining;
798 unsigned cnt; 798 unsigned cnt;
799 uint64_t t64;
799 800
800 buf = tls->outbuf + OUTBUF_PFX; /* see above for the byte it points to */ 801 buf = tls->outbuf + OUTBUF_PFX; /* see above for the byte it points to */
801 dump_hex("xwrite_encrypted_aesgcm plaintext:%s\n", buf, size); 802 dump_hex("xwrite_encrypted_aesgcm plaintext:%s\n", buf, size);
@@ -810,13 +811,13 @@ static void xwrite_encrypted_aesgcm(tls_state_t *tls, unsigned size, unsigned ty
810 /* set aad[12], and clear aad[13..15] */ 811 /* set aad[12], and clear aad[13..15] */
811 COUNTER(aad) = SWAP_LE32(size & 0xff); 812 COUNTER(aad) = SWAP_LE32(size & 0xff);
812 813
813 memcpy(nonce, tls->client_write_IV, 4); 814 memcpy(nonce, tls->client_write_IV, 4);
814 memcpy(nonce + 4, &tls->write_seq64_be, 8); 815 t64 = tls->write_seq64_be;
815 memcpy(aad, &tls->write_seq64_be, 8); 816 move_to_unaligned64(nonce + 4, t64);
816 memcpy(buf - 8, &tls->write_seq64_be, 8); 817 move_to_unaligned64(aad, t64);
817//optimize 818 move_to_unaligned64(buf - 8, t64);
818 /* seq64 is not used later in this func, can increment here */ 819 /* seq64 is not used later in this func, can increment here */
819 tls->write_seq64_be = SWAP_BE64(1 + SWAP_BE64(tls->write_seq64_be)); 820 tls->write_seq64_be = SWAP_BE64(1 + SWAP_BE64(t64));
820 821
821 cnt = 1; 822 cnt = 1;
822 remaining = size; 823 remaining = size;
@@ -923,13 +924,14 @@ static void tls_aesgcm_decrypt(tls_state_t *tls, uint8_t *buf, int size)
923{ 924{
924#define COUNTER(v) (*(uint32_t*)(v + 12)) 925#define COUNTER(v) (*(uint32_t*)(v + 12))
925 926
926 //uint8_t aad[13 + 3]; /* +3 creates [16] buffer, simplifying GHASH() */ 927 //uint8_t aad[13 + 3] ALIGNED(4); /* +3 creates [16] buffer, simplifying GHASH() */
927 uint8_t nonce[12 + 4]; /* +4 creates space for AES block counter */ 928 uint8_t nonce[12 + 4] ALIGNED(4); /* +4 creates space for AES block counter */
928 uint8_t scratch[AES_BLOCK_SIZE]; //[16] 929 uint8_t scratch[AES_BLOCK_SIZE] ALIGNED(4); //[16]
929 //uint8_t authtag[AES_BLOCK_SIZE]; //[16] 930 //uint8_t authtag[AES_BLOCK_SIZE] ALIGNED(4); //[16]
930 unsigned remaining; 931 unsigned remaining;
931 unsigned cnt; 932 unsigned cnt;
932 933
934 //memcpy(aad, buf, 8);
933 //aad[8] = type; 935 //aad[8] = type;
934 //aad[9] = TLS_MAJ; 936 //aad[9] = TLS_MAJ;
935 //aad[10] = TLS_MIN; 937 //aad[10] = TLS_MIN;
@@ -937,7 +939,6 @@ static void tls_aesgcm_decrypt(tls_state_t *tls, uint8_t *buf, int size)
937 ///* set aad[12], and clear aad[13..15] */ 939 ///* set aad[12], and clear aad[13..15] */
938 //COUNTER(aad) = SWAP_LE32(size & 0xff); 940 //COUNTER(aad) = SWAP_LE32(size & 0xff);
939 941
940 //memcpy(aad, &tls->write_seq64_be, 8);
941 memcpy(nonce, tls->server_write_IV, 4); 942 memcpy(nonce, tls->server_write_IV, 4);
942 memcpy(nonce + 4, buf, 8); 943 memcpy(nonce + 4, buf, 8);
943 buf += 8; 944 buf += 8;