diff options
author | jsing <> | 2025-05-16 15:09:26 +0000 |
---|---|---|
committer | jsing <> | 2025-05-16 15:09:26 +0000 |
commit | 11049a84952a2f4e389b590cd2cd18b4d3cb463b (patch) | |
tree | cc89b65d0f6265dadc83bd1eeb939eefc33ee71d /src/lib/libcrypto/modes/gcm128.c | |
parent | 9b71ad45c64159fa16d4924d5e2dfa9b4cdb60d8 (diff) | |
download | openbsd-11049a84952a2f4e389b590cd2cd18b4d3cb463b.tar.gz openbsd-11049a84952a2f4e389b590cd2cd18b4d3cb463b.tar.bz2 openbsd-11049a84952a2f4e389b590cd2cd18b4d3cb463b.zip |
Replace GCM_MUL/GHASH defines with static inline functions.
Rather than having defines for GCM_MUL/GHASH (along with the wonder that
is GCM_FUNCREF_4BIT) then conditioning on their availability, provide and
call gcm_mul()/gcm_ghash() unconditionally. This simplifies all of the call
sites.
ok tb@
Diffstat (limited to '')
-rw-r--r-- | src/lib/libcrypto/modes/gcm128.c | 220 |
1 files changed, 99 insertions, 121 deletions
diff --git a/src/lib/libcrypto/modes/gcm128.c b/src/lib/libcrypto/modes/gcm128.c index 21ba9eef57..422aa7d499 100644 --- a/src/lib/libcrypto/modes/gcm128.c +++ b/src/lib/libcrypto/modes/gcm128.c | |||
@@ -1,4 +1,4 @@ | |||
1 | /* $OpenBSD: gcm128.c,v 1.35 2025/04/25 12:08:53 jsing Exp $ */ | 1 | /* $OpenBSD: gcm128.c,v 1.36 2025/05/16 15:09:26 jsing Exp $ */ |
2 | /* ==================================================================== | 2 | /* ==================================================================== |
3 | * Copyright (c) 2010 The OpenSSL Project. All rights reserved. | 3 | * Copyright (c) 2010 The OpenSSL Project. All rights reserved. |
4 | * | 4 | * |
@@ -224,7 +224,26 @@ gcm_gmult_8bit(u64 Xi[2], const u128 Htable[256]) | |||
224 | Xi[0] = htobe64(Z.hi); | 224 | Xi[0] = htobe64(Z.hi); |
225 | Xi[1] = htobe64(Z.lo); | 225 | Xi[1] = htobe64(Z.lo); |
226 | } | 226 | } |
227 | #define GCM_MUL(ctx,Xi) gcm_gmult_8bit(ctx->Xi.u,ctx->Htable) | 227 | |
228 | static inline void | ||
229 | gcm_mul(GCM128_CONTEXT *ctx, u64 u[2]) | ||
230 | { | ||
231 | gcm_gmult_8bit(u, ctx->Htable); | ||
232 | } | ||
233 | |||
234 | static inline void | ||
235 | gcm_ghash(GCM128_CONTEXT *ctx, const uint8_t *in, size_t len) | ||
236 | { | ||
237 | size_t i; | ||
238 | |||
239 | while (len >= 16) { | ||
240 | for (i = 0; i < 16; ++i) | ||
241 | ctx->Xi.c[i] ^= in[i]; | ||
242 | gcm_mul(ctx, ctx->Xi.u); | ||
243 | in += 16; | ||
244 | len -= 16; | ||
245 | } | ||
246 | } | ||
228 | 247 | ||
229 | #elif TABLE_BITS==4 | 248 | #elif TABLE_BITS==4 |
230 | 249 | ||
@@ -487,17 +506,41 @@ gcm_ghash_4bit(u64 Xi[2], const u128 Htable[16], | |||
487 | Xi[1] = htobe64(Z.lo); | 506 | Xi[1] = htobe64(Z.lo); |
488 | } while (inp += 16, len -= 16); | 507 | } while (inp += 16, len -= 16); |
489 | } | 508 | } |
509 | |||
510 | static inline void | ||
511 | gcm_mul(GCM128_CONTEXT *ctx, u64 u[2]) | ||
512 | { | ||
513 | gcm_gmult_4bit(u, ctx->Htable); | ||
514 | } | ||
515 | |||
516 | static inline void | ||
517 | gcm_ghash(GCM128_CONTEXT *ctx, const uint8_t *in, size_t len) | ||
518 | { | ||
519 | gcm_ghash_4bit(ctx->Xi.u, ctx->Htable, in, len); | ||
520 | } | ||
490 | #else | 521 | #else |
491 | void gcm_gmult_4bit(u64 Xi[2], const u128 Htable[16]); | 522 | void gcm_gmult_4bit(u64 Xi[2], const u128 Htable[16]); |
492 | void gcm_ghash_4bit(u64 Xi[2], const u128 Htable[16], const u8 *inp, | 523 | void gcm_ghash_4bit(u64 Xi[2], const u128 Htable[16], const u8 *inp, |
493 | size_t len); | 524 | size_t len); |
525 | |||
526 | static inline void | ||
527 | gcm_mul(GCM128_CONTEXT *ctx, u64 u[2]) | ||
528 | { | ||
529 | ctx->gmult(u, ctx->Htable); | ||
530 | } | ||
531 | |||
532 | static inline void | ||
533 | gcm_ghash(GCM128_CONTEXT *ctx, const uint8_t *in, size_t len) | ||
534 | { | ||
535 | ctx->ghash(ctx->Xi.u, ctx->Htable, in, len); | ||
536 | } | ||
494 | #endif | 537 | #endif |
495 | 538 | ||
496 | #define GCM_MUL(ctx,Xi) gcm_gmult_4bit(ctx->Xi.u,ctx->Htable) | 539 | /* |
497 | #define GHASH(ctx,in,len) gcm_ghash_4bit((ctx)->Xi.u,(ctx)->Htable,in,len) | 540 | * GHASH_CHUNK is "stride parameter" missioned to mitigate cache |
498 | /* GHASH_CHUNK is "stride parameter" missioned to mitigate cache | ||
499 | * trashing effect. In other words idea is to hash data while it's | 541 | * trashing effect. In other words idea is to hash data while it's |
500 | * still in L1 cache after encryption pass... */ | 542 | * still in L1 cache after encryption pass... |
543 | */ | ||
501 | #define GHASH_CHUNK (3*1024) | 544 | #define GHASH_CHUNK (3*1024) |
502 | 545 | ||
503 | #else /* TABLE_BITS */ | 546 | #else /* TABLE_BITS */ |
@@ -528,8 +571,26 @@ gcm_gmult_1bit(u64 Xi[2], const u64 H[2]) | |||
528 | Xi[0] = htobe64(Z.hi); | 571 | Xi[0] = htobe64(Z.hi); |
529 | Xi[1] = htobe64(Z.lo); | 572 | Xi[1] = htobe64(Z.lo); |
530 | } | 573 | } |
531 | #define GCM_MUL(ctx,Xi) gcm_gmult_1bit(ctx->Xi.u,ctx->H.u) | ||
532 | 574 | ||
575 | static inline void | ||
576 | gcm_mul(GCM128_CONTEXT *ctx, u64 u[2]) | ||
577 | { | ||
578 | gcm_gmult_1bit(u, ctx->H.u); | ||
579 | } | ||
580 | |||
581 | static inline void | ||
582 | gcm_ghash(GCM128_CONTEXT *ctx, const uint8_t *in, size_t len) | ||
583 | { | ||
584 | size_t i; | ||
585 | |||
586 | while (len >= 16) { | ||
587 | for (i = 0; i < 16; ++i) | ||
588 | ctx->Xi.c[i] ^= in[i]; | ||
589 | gcm_mul(ctx, ctx->Xi.u); | ||
590 | in += 16; | ||
591 | len -= 16; | ||
592 | } | ||
593 | } | ||
533 | #endif | 594 | #endif |
534 | 595 | ||
535 | #if defined(GHASH_ASM) && \ | 596 | #if defined(GHASH_ASM) && \ |
@@ -544,7 +605,6 @@ gcm_gmult_1bit(u64 Xi[2], const u64 H[2]) | |||
544 | defined(__x86_64) || defined(__x86_64__) || \ | 605 | defined(__x86_64) || defined(__x86_64__) || \ |
545 | defined(_M_IX86) || defined(_M_AMD64) || defined(_M_X64)) | 606 | defined(_M_IX86) || defined(_M_AMD64) || defined(_M_X64)) |
546 | # define GHASH_ASM_X86_OR_64 | 607 | # define GHASH_ASM_X86_OR_64 |
547 | # define GCM_FUNCREF_4BIT | ||
548 | 608 | ||
549 | void gcm_init_clmul(u128 Htable[16], const u64 Xi[2]); | 609 | void gcm_init_clmul(u128 Htable[16], const u64 Xi[2]); |
550 | void gcm_gmult_clmul(u64 Xi[2], const u128 Htable[16]); | 610 | void gcm_gmult_clmul(u64 Xi[2], const u128 Htable[16]); |
@@ -565,7 +625,6 @@ void gcm_ghash_4bit_x86(u64 Xi[2], const u128 Htable[16], const u8 *inp, | |||
565 | # include "arm_arch.h" | 625 | # include "arm_arch.h" |
566 | # if __ARM_ARCH__>=7 && !defined(__STRICT_ALIGNMENT) | 626 | # if __ARM_ARCH__>=7 && !defined(__STRICT_ALIGNMENT) |
567 | # define GHASH_ASM_ARM | 627 | # define GHASH_ASM_ARM |
568 | # define GCM_FUNCREF_4BIT | ||
569 | void gcm_gmult_neon(u64 Xi[2], const u128 Htable[16]); | 628 | void gcm_gmult_neon(u64 Xi[2], const u128 Htable[16]); |
570 | void gcm_ghash_neon(u64 Xi[2], const u128 Htable[16], const u8 *inp, | 629 | void gcm_ghash_neon(u64 Xi[2], const u128 Htable[16], const u8 *inp, |
571 | size_t len); | 630 | size_t len); |
@@ -573,15 +632,6 @@ void gcm_ghash_neon(u64 Xi[2], const u128 Htable[16], const u8 *inp, | |||
573 | # endif | 632 | # endif |
574 | #endif | 633 | #endif |
575 | 634 | ||
576 | #ifdef GCM_FUNCREF_4BIT | ||
577 | # undef GCM_MUL | ||
578 | # define GCM_MUL(ctx,Xi) (*gcm_gmult_p)(ctx->Xi.u,ctx->Htable) | ||
579 | # ifdef GHASH | ||
580 | # undef GHASH | ||
581 | # define GHASH(ctx,in,len) (*gcm_ghash_p)(ctx->Xi.u,ctx->Htable,in,len) | ||
582 | # endif | ||
583 | #endif | ||
584 | |||
585 | void | 635 | void |
586 | CRYPTO_gcm128_init(GCM128_CONTEXT *ctx, void *key, block128_f block) | 636 | CRYPTO_gcm128_init(GCM128_CONTEXT *ctx, void *key, block128_f block) |
587 | { | 637 | { |
@@ -646,9 +696,6 @@ void | |||
646 | CRYPTO_gcm128_setiv(GCM128_CONTEXT *ctx, const unsigned char *iv, size_t len) | 696 | CRYPTO_gcm128_setiv(GCM128_CONTEXT *ctx, const unsigned char *iv, size_t len) |
647 | { | 697 | { |
648 | unsigned int ctr; | 698 | unsigned int ctr; |
649 | #ifdef GCM_FUNCREF_4BIT | ||
650 | void (*gcm_gmult_p)(u64 Xi[2], const u128 Htable[16]) = ctx->gmult; | ||
651 | #endif | ||
652 | 699 | ||
653 | ctx->Yi.u[0] = 0; | 700 | ctx->Yi.u[0] = 0; |
654 | ctx->Yi.u[1] = 0; | 701 | ctx->Yi.u[1] = 0; |
@@ -670,19 +717,19 @@ CRYPTO_gcm128_setiv(GCM128_CONTEXT *ctx, const unsigned char *iv, size_t len) | |||
670 | while (len >= 16) { | 717 | while (len >= 16) { |
671 | for (i = 0; i < 16; ++i) | 718 | for (i = 0; i < 16; ++i) |
672 | ctx->Yi.c[i] ^= iv[i]; | 719 | ctx->Yi.c[i] ^= iv[i]; |
673 | GCM_MUL(ctx, Yi); | 720 | gcm_mul(ctx, ctx->Yi.u); |
674 | iv += 16; | 721 | iv += 16; |
675 | len -= 16; | 722 | len -= 16; |
676 | } | 723 | } |
677 | if (len) { | 724 | if (len) { |
678 | for (i = 0; i < len; ++i) | 725 | for (i = 0; i < len; ++i) |
679 | ctx->Yi.c[i] ^= iv[i]; | 726 | ctx->Yi.c[i] ^= iv[i]; |
680 | GCM_MUL(ctx, Yi); | 727 | gcm_mul(ctx, ctx->Yi.u); |
681 | } | 728 | } |
682 | len0 <<= 3; | 729 | len0 <<= 3; |
683 | ctx->Yi.u[1] ^= htobe64(len0); | 730 | ctx->Yi.u[1] ^= htobe64(len0); |
684 | 731 | ||
685 | GCM_MUL(ctx, Yi); | 732 | gcm_mul(ctx, ctx->Yi.u); |
686 | 733 | ||
687 | ctr = be32toh(ctx->Yi.d[3]); | 734 | ctr = be32toh(ctx->Yi.d[3]); |
688 | } | 735 | } |
@@ -699,13 +746,6 @@ CRYPTO_gcm128_aad(GCM128_CONTEXT *ctx, const unsigned char *aad, size_t len) | |||
699 | size_t i; | 746 | size_t i; |
700 | unsigned int n; | 747 | unsigned int n; |
701 | u64 alen = ctx->len.u[0]; | 748 | u64 alen = ctx->len.u[0]; |
702 | #ifdef GCM_FUNCREF_4BIT | ||
703 | void (*gcm_gmult_p)(u64 Xi[2], const u128 Htable[16]) = ctx->gmult; | ||
704 | # ifdef GHASH | ||
705 | void (*gcm_ghash_p)(u64 Xi[2], const u128 Htable[16], | ||
706 | const u8 *inp, size_t len) = ctx->ghash; | ||
707 | # endif | ||
708 | #endif | ||
709 | 749 | ||
710 | if (ctx->len.u[1]) | 750 | if (ctx->len.u[1]) |
711 | return -2; | 751 | return -2; |
@@ -723,28 +763,18 @@ CRYPTO_gcm128_aad(GCM128_CONTEXT *ctx, const unsigned char *aad, size_t len) | |||
723 | n = (n + 1) % 16; | 763 | n = (n + 1) % 16; |
724 | } | 764 | } |
725 | if (n == 0) | 765 | if (n == 0) |
726 | GCM_MUL(ctx, Xi); | 766 | gcm_mul(ctx, ctx->Xi.u); |
727 | else { | 767 | else { |
728 | ctx->ares = n; | 768 | ctx->ares = n; |
729 | return 0; | 769 | return 0; |
730 | } | 770 | } |
731 | } | 771 | } |
732 | 772 | ||
733 | #ifdef GHASH | ||
734 | if ((i = (len & (size_t)-16))) { | 773 | if ((i = (len & (size_t)-16))) { |
735 | GHASH(ctx, aad, i); | 774 | gcm_ghash(ctx, aad, i); |
736 | aad += i; | 775 | aad += i; |
737 | len -= i; | 776 | len -= i; |
738 | } | 777 | } |
739 | #else | ||
740 | while (len >= 16) { | ||
741 | for (i = 0; i < 16; ++i) | ||
742 | ctx->Xi.c[i] ^= aad[i]; | ||
743 | GCM_MUL(ctx, Xi); | ||
744 | aad += 16; | ||
745 | len -= 16; | ||
746 | } | ||
747 | #endif | ||
748 | if (len) { | 778 | if (len) { |
749 | n = (unsigned int)len; | 779 | n = (unsigned int)len; |
750 | for (i = 0; i < len; ++i) | 780 | for (i = 0; i < len; ++i) |
@@ -766,13 +796,6 @@ CRYPTO_gcm128_encrypt(GCM128_CONTEXT *ctx, | |||
766 | u64 mlen = ctx->len.u[1]; | 796 | u64 mlen = ctx->len.u[1]; |
767 | block128_f block = ctx->block; | 797 | block128_f block = ctx->block; |
768 | void *key = ctx->key; | 798 | void *key = ctx->key; |
769 | #ifdef GCM_FUNCREF_4BIT | ||
770 | void (*gcm_gmult_p)(u64 Xi[2], const u128 Htable[16]) = ctx->gmult; | ||
771 | # ifdef GHASH | ||
772 | void (*gcm_ghash_p)(u64 Xi[2], const u128 Htable[16], | ||
773 | const u8 *inp, size_t len) = ctx->ghash; | ||
774 | # endif | ||
775 | #endif | ||
776 | 799 | ||
777 | mlen += len; | 800 | mlen += len; |
778 | if (mlen > ((U64(1) << 36) - 32) || (sizeof(len) == 8 && mlen < len)) | 801 | if (mlen > ((U64(1) << 36) - 32) || (sizeof(len) == 8 && mlen < len)) |
@@ -781,7 +804,7 @@ CRYPTO_gcm128_encrypt(GCM128_CONTEXT *ctx, | |||
781 | 804 | ||
782 | if (ctx->ares) { | 805 | if (ctx->ares) { |
783 | /* First call to encrypt finalizes GHASH(AAD) */ | 806 | /* First call to encrypt finalizes GHASH(AAD) */ |
784 | GCM_MUL(ctx, Xi); | 807 | gcm_mul(ctx, ctx->Xi.u); |
785 | ctx->ares = 0; | 808 | ctx->ares = 0; |
786 | } | 809 | } |
787 | 810 | ||
@@ -798,7 +821,7 @@ CRYPTO_gcm128_encrypt(GCM128_CONTEXT *ctx, | |||
798 | n = (n + 1) % 16; | 821 | n = (n + 1) % 16; |
799 | } | 822 | } |
800 | if (n == 0) | 823 | if (n == 0) |
801 | GCM_MUL(ctx, Xi); | 824 | gcm_mul(ctx, ctx->Xi.u); |
802 | else { | 825 | else { |
803 | ctx->mres = n; | 826 | ctx->mres = n; |
804 | return 0; | 827 | return 0; |
@@ -808,7 +831,7 @@ CRYPTO_gcm128_encrypt(GCM128_CONTEXT *ctx, | |||
808 | if (((size_t)in|(size_t)out) % sizeof(size_t) != 0) | 831 | if (((size_t)in|(size_t)out) % sizeof(size_t) != 0) |
809 | break; | 832 | break; |
810 | #endif | 833 | #endif |
811 | #if defined(GHASH) && defined(GHASH_CHUNK) | 834 | #if defined(GHASH_CHUNK) |
812 | while (len >= GHASH_CHUNK) { | 835 | while (len >= GHASH_CHUNK) { |
813 | size_t j = GHASH_CHUNK; | 836 | size_t j = GHASH_CHUNK; |
814 | 837 | ||
@@ -827,7 +850,7 @@ CRYPTO_gcm128_encrypt(GCM128_CONTEXT *ctx, | |||
827 | in += 16; | 850 | in += 16; |
828 | j -= 16; | 851 | j -= 16; |
829 | } | 852 | } |
830 | GHASH(ctx, out - GHASH_CHUNK, GHASH_CHUNK); | 853 | gcm_ghash(ctx, out - GHASH_CHUNK, GHASH_CHUNK); |
831 | len -= GHASH_CHUNK; | 854 | len -= GHASH_CHUNK; |
832 | } | 855 | } |
833 | if ((i = (len & (size_t)-16))) { | 856 | if ((i = (len & (size_t)-16))) { |
@@ -848,7 +871,7 @@ CRYPTO_gcm128_encrypt(GCM128_CONTEXT *ctx, | |||
848 | in += 16; | 871 | in += 16; |
849 | len -= 16; | 872 | len -= 16; |
850 | } | 873 | } |
851 | GHASH(ctx, out - j, j); | 874 | gcm_ghash(ctx, out - j, j); |
852 | } | 875 | } |
853 | #else | 876 | #else |
854 | while (len >= 16) { | 877 | while (len >= 16) { |
@@ -862,7 +885,7 @@ CRYPTO_gcm128_encrypt(GCM128_CONTEXT *ctx, | |||
862 | for (i = 0; i < 16/sizeof(size_t); ++i) | 885 | for (i = 0; i < 16/sizeof(size_t); ++i) |
863 | ctx->Xi.t[i] ^= | 886 | ctx->Xi.t[i] ^= |
864 | out_t[i] = in_t[i] ^ ctx->EKi.t[i]; | 887 | out_t[i] = in_t[i] ^ ctx->EKi.t[i]; |
865 | GCM_MUL(ctx, Xi); | 888 | gcm_mul(ctx, ctx->Xi.u); |
866 | out += 16; | 889 | out += 16; |
867 | in += 16; | 890 | in += 16; |
868 | len -= 16; | 891 | len -= 16; |
@@ -892,7 +915,7 @@ CRYPTO_gcm128_encrypt(GCM128_CONTEXT *ctx, | |||
892 | ctx->Xi.c[n] ^= out[i] = in[i] ^ ctx->EKi.c[n]; | 915 | ctx->Xi.c[n] ^= out[i] = in[i] ^ ctx->EKi.c[n]; |
893 | n = (n + 1) % 16; | 916 | n = (n + 1) % 16; |
894 | if (n == 0) | 917 | if (n == 0) |
895 | GCM_MUL(ctx, Xi); | 918 | gcm_mul(ctx, ctx->Xi.u); |
896 | } | 919 | } |
897 | 920 | ||
898 | ctx->mres = n; | 921 | ctx->mres = n; |
@@ -910,13 +933,6 @@ CRYPTO_gcm128_decrypt(GCM128_CONTEXT *ctx, | |||
910 | u64 mlen = ctx->len.u[1]; | 933 | u64 mlen = ctx->len.u[1]; |
911 | block128_f block = ctx->block; | 934 | block128_f block = ctx->block; |
912 | void *key = ctx->key; | 935 | void *key = ctx->key; |
913 | #ifdef GCM_FUNCREF_4BIT | ||
914 | void (*gcm_gmult_p)(u64 Xi[2], const u128 Htable[16]) = ctx->gmult; | ||
915 | # ifdef GHASH | ||
916 | void (*gcm_ghash_p)(u64 Xi[2], const u128 Htable[16], | ||
917 | const u8 *inp, size_t len) = ctx->ghash; | ||
918 | # endif | ||
919 | #endif | ||
920 | 936 | ||
921 | mlen += len; | 937 | mlen += len; |
922 | if (mlen > ((U64(1) << 36) - 32) || (sizeof(len) == 8 && mlen < len)) | 938 | if (mlen > ((U64(1) << 36) - 32) || (sizeof(len) == 8 && mlen < len)) |
@@ -925,7 +941,7 @@ CRYPTO_gcm128_decrypt(GCM128_CONTEXT *ctx, | |||
925 | 941 | ||
926 | if (ctx->ares) { | 942 | if (ctx->ares) { |
927 | /* First call to decrypt finalizes GHASH(AAD) */ | 943 | /* First call to decrypt finalizes GHASH(AAD) */ |
928 | GCM_MUL(ctx, Xi); | 944 | gcm_mul(ctx, ctx->Xi.u); |
929 | ctx->ares = 0; | 945 | ctx->ares = 0; |
930 | } | 946 | } |
931 | 947 | ||
@@ -943,7 +959,7 @@ CRYPTO_gcm128_decrypt(GCM128_CONTEXT *ctx, | |||
943 | n = (n + 1) % 16; | 959 | n = (n + 1) % 16; |
944 | } | 960 | } |
945 | if (n == 0) | 961 | if (n == 0) |
946 | GCM_MUL(ctx, Xi); | 962 | gcm_mul(ctx, ctx->Xi.u); |
947 | else { | 963 | else { |
948 | ctx->mres = n; | 964 | ctx->mres = n; |
949 | return 0; | 965 | return 0; |
@@ -953,11 +969,11 @@ CRYPTO_gcm128_decrypt(GCM128_CONTEXT *ctx, | |||
953 | if (((size_t)in|(size_t)out) % sizeof(size_t) != 0) | 969 | if (((size_t)in|(size_t)out) % sizeof(size_t) != 0) |
954 | break; | 970 | break; |
955 | #endif | 971 | #endif |
956 | #if defined(GHASH) && defined(GHASH_CHUNK) | 972 | #if defined(GHASH_CHUNK) |
957 | while (len >= GHASH_CHUNK) { | 973 | while (len >= GHASH_CHUNK) { |
958 | size_t j = GHASH_CHUNK; | 974 | size_t j = GHASH_CHUNK; |
959 | 975 | ||
960 | GHASH(ctx, in, GHASH_CHUNK); | 976 | gcm_ghash(ctx, in, GHASH_CHUNK); |
961 | while (j) { | 977 | while (j) { |
962 | size_t *out_t = (size_t *)out; | 978 | size_t *out_t = (size_t *)out; |
963 | const size_t *in_t = (const size_t *)in; | 979 | const size_t *in_t = (const size_t *)in; |
@@ -976,7 +992,7 @@ CRYPTO_gcm128_decrypt(GCM128_CONTEXT *ctx, | |||
976 | len -= GHASH_CHUNK; | 992 | len -= GHASH_CHUNK; |
977 | } | 993 | } |
978 | if ((i = (len & (size_t)-16))) { | 994 | if ((i = (len & (size_t)-16))) { |
979 | GHASH(ctx, in, i); | 995 | gcm_ghash(ctx, in, i); |
980 | while (len >= 16) { | 996 | while (len >= 16) { |
981 | size_t *out_t = (size_t *)out; | 997 | size_t *out_t = (size_t *)out; |
982 | const size_t *in_t = (const size_t *)in; | 998 | const size_t *in_t = (const size_t *)in; |
@@ -1007,7 +1023,7 @@ CRYPTO_gcm128_decrypt(GCM128_CONTEXT *ctx, | |||
1007 | out_t[i] = c ^ ctx->EKi.t[i]; | 1023 | out_t[i] = c ^ ctx->EKi.t[i]; |
1008 | ctx->Xi.t[i] ^= c; | 1024 | ctx->Xi.t[i] ^= c; |
1009 | } | 1025 | } |
1010 | GCM_MUL(ctx, Xi); | 1026 | gcm_mul(ctx, ctx->Xi.u); |
1011 | out += 16; | 1027 | out += 16; |
1012 | in += 16; | 1028 | in += 16; |
1013 | len -= 16; | 1029 | len -= 16; |
@@ -1041,7 +1057,7 @@ CRYPTO_gcm128_decrypt(GCM128_CONTEXT *ctx, | |||
1041 | ctx->Xi.c[n] ^= c; | 1057 | ctx->Xi.c[n] ^= c; |
1042 | n = (n + 1) % 16; | 1058 | n = (n + 1) % 16; |
1043 | if (n == 0) | 1059 | if (n == 0) |
1044 | GCM_MUL(ctx, Xi); | 1060 | gcm_mul(ctx, ctx->Xi.u); |
1045 | } | 1061 | } |
1046 | 1062 | ||
1047 | ctx->mres = n; | 1063 | ctx->mres = n; |
@@ -1058,13 +1074,6 @@ CRYPTO_gcm128_encrypt_ctr32(GCM128_CONTEXT *ctx, | |||
1058 | size_t i; | 1074 | size_t i; |
1059 | u64 mlen = ctx->len.u[1]; | 1075 | u64 mlen = ctx->len.u[1]; |
1060 | void *key = ctx->key; | 1076 | void *key = ctx->key; |
1061 | #ifdef GCM_FUNCREF_4BIT | ||
1062 | void (*gcm_gmult_p)(u64 Xi[2], const u128 Htable[16]) = ctx->gmult; | ||
1063 | # ifdef GHASH | ||
1064 | void (*gcm_ghash_p)(u64 Xi[2], const u128 Htable[16], | ||
1065 | const u8 *inp, size_t len) = ctx->ghash; | ||
1066 | # endif | ||
1067 | #endif | ||
1068 | 1077 | ||
1069 | mlen += len; | 1078 | mlen += len; |
1070 | if (mlen > ((U64(1) << 36) - 32) || (sizeof(len) == 8 && mlen < len)) | 1079 | if (mlen > ((U64(1) << 36) - 32) || (sizeof(len) == 8 && mlen < len)) |
@@ -1073,7 +1082,7 @@ CRYPTO_gcm128_encrypt_ctr32(GCM128_CONTEXT *ctx, | |||
1073 | 1082 | ||
1074 | if (ctx->ares) { | 1083 | if (ctx->ares) { |
1075 | /* First call to encrypt finalizes GHASH(AAD) */ | 1084 | /* First call to encrypt finalizes GHASH(AAD) */ |
1076 | GCM_MUL(ctx, Xi); | 1085 | gcm_mul(ctx, ctx->Xi.u); |
1077 | ctx->ares = 0; | 1086 | ctx->ares = 0; |
1078 | } | 1087 | } |
1079 | 1088 | ||
@@ -1087,18 +1096,18 @@ CRYPTO_gcm128_encrypt_ctr32(GCM128_CONTEXT *ctx, | |||
1087 | n = (n + 1) % 16; | 1096 | n = (n + 1) % 16; |
1088 | } | 1097 | } |
1089 | if (n == 0) | 1098 | if (n == 0) |
1090 | GCM_MUL(ctx, Xi); | 1099 | gcm_mul(ctx, ctx->Xi.u); |
1091 | else { | 1100 | else { |
1092 | ctx->mres = n; | 1101 | ctx->mres = n; |
1093 | return 0; | 1102 | return 0; |
1094 | } | 1103 | } |
1095 | } | 1104 | } |
1096 | #if defined(GHASH) && defined(GHASH_CHUNK) | 1105 | #if defined(GHASH_CHUNK) |
1097 | while (len >= GHASH_CHUNK) { | 1106 | while (len >= GHASH_CHUNK) { |
1098 | (*stream)(in, out, GHASH_CHUNK/16, key, ctx->Yi.c); | 1107 | (*stream)(in, out, GHASH_CHUNK/16, key, ctx->Yi.c); |
1099 | ctr += GHASH_CHUNK/16; | 1108 | ctr += GHASH_CHUNK/16; |
1100 | ctx->Yi.d[3] = htobe32(ctr); | 1109 | ctx->Yi.d[3] = htobe32(ctr); |
1101 | GHASH(ctx, out, GHASH_CHUNK); | 1110 | gcm_ghash(ctx, out, GHASH_CHUNK); |
1102 | out += GHASH_CHUNK; | 1111 | out += GHASH_CHUNK; |
1103 | in += GHASH_CHUNK; | 1112 | in += GHASH_CHUNK; |
1104 | len -= GHASH_CHUNK; | 1113 | len -= GHASH_CHUNK; |
@@ -1112,17 +1121,8 @@ CRYPTO_gcm128_encrypt_ctr32(GCM128_CONTEXT *ctx, | |||
1112 | ctx->Yi.d[3] = htobe32(ctr); | 1121 | ctx->Yi.d[3] = htobe32(ctr); |
1113 | in += i; | 1122 | in += i; |
1114 | len -= i; | 1123 | len -= i; |
1115 | #if defined(GHASH) | 1124 | gcm_ghash(ctx, out, i); |
1116 | GHASH(ctx, out, i); | ||
1117 | out += i; | 1125 | out += i; |
1118 | #else | ||
1119 | while (j--) { | ||
1120 | for (i = 0; i < 16; ++i) | ||
1121 | ctx->Xi.c[i] ^= out[i]; | ||
1122 | GCM_MUL(ctx, Xi); | ||
1123 | out += 16; | ||
1124 | } | ||
1125 | #endif | ||
1126 | } | 1126 | } |
1127 | if (len) { | 1127 | if (len) { |
1128 | (*ctx->block)(ctx->Yi.c, ctx->EKi.c, key); | 1128 | (*ctx->block)(ctx->Yi.c, ctx->EKi.c, key); |
@@ -1148,13 +1148,6 @@ CRYPTO_gcm128_decrypt_ctr32(GCM128_CONTEXT *ctx, | |||
1148 | size_t i; | 1148 | size_t i; |
1149 | u64 mlen = ctx->len.u[1]; | 1149 | u64 mlen = ctx->len.u[1]; |
1150 | void *key = ctx->key; | 1150 | void *key = ctx->key; |
1151 | #ifdef GCM_FUNCREF_4BIT | ||
1152 | void (*gcm_gmult_p)(u64 Xi[2], const u128 Htable[16]) = ctx->gmult; | ||
1153 | # ifdef GHASH | ||
1154 | void (*gcm_ghash_p)(u64 Xi[2], const u128 Htable[16], | ||
1155 | const u8 *inp, size_t len) = ctx->ghash; | ||
1156 | # endif | ||
1157 | #endif | ||
1158 | 1151 | ||
1159 | mlen += len; | 1152 | mlen += len; |
1160 | if (mlen > ((U64(1) << 36) - 32) || (sizeof(len) == 8 && mlen < len)) | 1153 | if (mlen > ((U64(1) << 36) - 32) || (sizeof(len) == 8 && mlen < len)) |
@@ -1163,7 +1156,7 @@ CRYPTO_gcm128_decrypt_ctr32(GCM128_CONTEXT *ctx, | |||
1163 | 1156 | ||
1164 | if (ctx->ares) { | 1157 | if (ctx->ares) { |
1165 | /* First call to decrypt finalizes GHASH(AAD) */ | 1158 | /* First call to decrypt finalizes GHASH(AAD) */ |
1166 | GCM_MUL(ctx, Xi); | 1159 | gcm_mul(ctx, ctx->Xi.u); |
1167 | ctx->ares = 0; | 1160 | ctx->ares = 0; |
1168 | } | 1161 | } |
1169 | 1162 | ||
@@ -1179,15 +1172,15 @@ CRYPTO_gcm128_decrypt_ctr32(GCM128_CONTEXT *ctx, | |||
1179 | n = (n + 1) % 16; | 1172 | n = (n + 1) % 16; |
1180 | } | 1173 | } |
1181 | if (n == 0) | 1174 | if (n == 0) |
1182 | GCM_MUL(ctx, Xi); | 1175 | gcm_mul(ctx, ctx->Xi.u); |
1183 | else { | 1176 | else { |
1184 | ctx->mres = n; | 1177 | ctx->mres = n; |
1185 | return 0; | 1178 | return 0; |
1186 | } | 1179 | } |
1187 | } | 1180 | } |
1188 | #if defined(GHASH) && defined(GHASH_CHUNK) | 1181 | #if defined(GHASH_CHUNK) |
1189 | while (len >= GHASH_CHUNK) { | 1182 | while (len >= GHASH_CHUNK) { |
1190 | GHASH(ctx, in, GHASH_CHUNK); | 1183 | gcm_ghash(ctx, in, GHASH_CHUNK); |
1191 | (*stream)(in, out, GHASH_CHUNK/16, key, ctx->Yi.c); | 1184 | (*stream)(in, out, GHASH_CHUNK/16, key, ctx->Yi.c); |
1192 | ctr += GHASH_CHUNK/16; | 1185 | ctr += GHASH_CHUNK/16; |
1193 | ctx->Yi.d[3] = htobe32(ctr); | 1186 | ctx->Yi.d[3] = htobe32(ctr); |
@@ -1199,19 +1192,7 @@ CRYPTO_gcm128_decrypt_ctr32(GCM128_CONTEXT *ctx, | |||
1199 | if ((i = (len & (size_t)-16))) { | 1192 | if ((i = (len & (size_t)-16))) { |
1200 | size_t j = i/16; | 1193 | size_t j = i/16; |
1201 | 1194 | ||
1202 | #if defined(GHASH) | 1195 | gcm_ghash(ctx, in, i); |
1203 | GHASH(ctx, in, i); | ||
1204 | #else | ||
1205 | while (j--) { | ||
1206 | size_t k; | ||
1207 | for (k = 0; k < 16; ++k) | ||
1208 | ctx->Xi.c[k] ^= in[k]; | ||
1209 | GCM_MUL(ctx, Xi); | ||
1210 | in += 16; | ||
1211 | } | ||
1212 | j = i/16; | ||
1213 | in -= i; | ||
1214 | #endif | ||
1215 | (*stream)(in, out, j, key, ctx->Yi.c); | 1196 | (*stream)(in, out, j, key, ctx->Yi.c); |
1216 | ctr += (unsigned int)j; | 1197 | ctr += (unsigned int)j; |
1217 | ctx->Yi.d[3] = htobe32(ctr); | 1198 | ctx->Yi.d[3] = htobe32(ctr); |
@@ -1242,16 +1223,13 @@ CRYPTO_gcm128_finish(GCM128_CONTEXT *ctx, const unsigned char *tag, | |||
1242 | { | 1223 | { |
1243 | u64 alen = ctx->len.u[0] << 3; | 1224 | u64 alen = ctx->len.u[0] << 3; |
1244 | u64 clen = ctx->len.u[1] << 3; | 1225 | u64 clen = ctx->len.u[1] << 3; |
1245 | #ifdef GCM_FUNCREF_4BIT | ||
1246 | void (*gcm_gmult_p)(u64 Xi[2], const u128 Htable[16]) = ctx->gmult; | ||
1247 | #endif | ||
1248 | 1226 | ||
1249 | if (ctx->mres || ctx->ares) | 1227 | if (ctx->mres || ctx->ares) |
1250 | GCM_MUL(ctx, Xi); | 1228 | gcm_mul(ctx, ctx->Xi.u); |
1251 | 1229 | ||
1252 | ctx->Xi.u[0] ^= htobe64(alen); | 1230 | ctx->Xi.u[0] ^= htobe64(alen); |
1253 | ctx->Xi.u[1] ^= htobe64(clen); | 1231 | ctx->Xi.u[1] ^= htobe64(clen); |
1254 | GCM_MUL(ctx, Xi); | 1232 | gcm_mul(ctx, ctx->Xi.u); |
1255 | 1233 | ||
1256 | ctx->Xi.u[0] ^= ctx->EK0.u[0]; | 1234 | ctx->Xi.u[0] ^= ctx->EK0.u[0]; |
1257 | ctx->Xi.u[1] ^= ctx->EK0.u[1]; | 1235 | ctx->Xi.u[1] ^= ctx->EK0.u[1]; |