diff options
Diffstat (limited to 'src')
| -rw-r--r-- | src/lib/libcrypto/sm3/sm3.c | 118 |
1 files changed, 57 insertions, 61 deletions
diff --git a/src/lib/libcrypto/sm3/sm3.c b/src/lib/libcrypto/sm3/sm3.c index d3c7c5b2ea..615a5c8a07 100644 --- a/src/lib/libcrypto/sm3/sm3.c +++ b/src/lib/libcrypto/sm3/sm3.c | |||
| @@ -1,4 +1,4 @@ | |||
| 1 | /* $OpenBSD: sm3.c,v 1.14 2024/03/28 08:37:03 jsing Exp $ */ | 1 | /* $OpenBSD: sm3.c,v 1.15 2024/03/28 11:22:58 jsing Exp $ */ |
| 2 | /* | 2 | /* |
| 3 | * Copyright (c) 2018, Ribose Inc | 3 | * Copyright (c) 2018, Ribose Inc |
| 4 | * | 4 | * |
| @@ -23,27 +23,14 @@ | |||
| 23 | 23 | ||
| 24 | #include "crypto_internal.h" | 24 | #include "crypto_internal.h" |
| 25 | 25 | ||
| 26 | #ifndef OPENSSL_NO_SM3 | 26 | /* Ensure that SM3_WORD and uint32_t are equivalent size. */ |
| 27 | 27 | CTASSERT(sizeof(SM3_WORD) == sizeof(uint32_t)); | |
| 28 | #define DATA_ORDER_IS_BIG_ENDIAN | ||
| 29 | 28 | ||
| 30 | #define HASH_LONG SM3_WORD | 29 | #ifndef OPENSSL_NO_SM3 |
| 31 | #define HASH_CTX SM3_CTX | ||
| 32 | #define HASH_CBLOCK SM3_CBLOCK | ||
| 33 | #define HASH_UPDATE SM3_Update | ||
| 34 | #define HASH_TRANSFORM SM3_Transform | ||
| 35 | #define HASH_FINAL SM3_Final | ||
| 36 | #define HASH_BLOCK_DATA_ORDER SM3_block_data_order | ||
| 37 | 30 | ||
| 38 | void SM3_block_data_order(SM3_CTX *c, const void *p, size_t num); | 31 | void SM3_block_data_order(SM3_CTX *c, const void *p, size_t num); |
| 39 | void SM3_transform(SM3_CTX *c, const unsigned char *data); | 32 | void SM3_transform(SM3_CTX *c, const unsigned char *data); |
| 40 | 33 | ||
| 41 | #define HASH_NO_UPDATE | ||
| 42 | #define HASH_NO_TRANSFORM | ||
| 43 | #define HASH_NO_FINAL | ||
| 44 | |||
| 45 | #include "md32_common.h" | ||
| 46 | |||
| 47 | #define P0(X) (X ^ crypto_rol_u32(X, 9) ^ crypto_rol_u32(X, 17)) | 34 | #define P0(X) (X ^ crypto_rol_u32(X, 9) ^ crypto_rol_u32(X, 17)) |
| 48 | #define P1(X) (X ^ crypto_rol_u32(X, 15) ^ crypto_rol_u32(X, 23)) | 35 | #define P1(X) (X ^ crypto_rol_u32(X, 15) ^ crypto_rol_u32(X, 23)) |
| 49 | 36 | ||
| @@ -75,9 +62,10 @@ void SM3_transform(SM3_CTX *c, const unsigned char *data); | |||
| 75 | ROUND(A, B, C, D, E, F, G, H, TJ, Wi, Wj, FF1, GG1) | 62 | ROUND(A, B, C, D, E, F, G, H, TJ, Wi, Wj, FF1, GG1) |
| 76 | 63 | ||
| 77 | void | 64 | void |
| 78 | SM3_block_data_order(SM3_CTX *ctx, const void *p, size_t num) | 65 | SM3_block_data_order(SM3_CTX *ctx, const void *_in, size_t num) |
| 79 | { | 66 | { |
| 80 | const unsigned char *data = p; | 67 | const uint8_t *in = _in; |
| 68 | const SM3_WORD *in32; | ||
| 81 | SM3_WORD A, B, C, D, E, F, G, H; | 69 | SM3_WORD A, B, C, D, E, F, G, H; |
| 82 | SM3_WORD W00, W01, W02, W03, W04, W05, W06, W07; | 70 | SM3_WORD W00, W01, W02, W03, W04, W05, W06, W07; |
| 83 | SM3_WORD W08, W09, W10, W11, W12, W13, W14, W15; | 71 | SM3_WORD W08, W09, W10, W11, W12, W13, W14, W15; |
| @@ -96,22 +84,45 @@ SM3_block_data_order(SM3_CTX *ctx, const void *p, size_t num) | |||
| 96 | * We have to load all message bytes immediately since SM3 reads | 84 | * We have to load all message bytes immediately since SM3 reads |
| 97 | * them slightly out of order. | 85 | * them slightly out of order. |
| 98 | */ | 86 | */ |
| 99 | HOST_c2l(data, W00); | 87 | if ((uintptr_t)in % 4 == 0) { |
| 100 | HOST_c2l(data, W01); | 88 | /* Input is 32 bit aligned. */ |
| 101 | HOST_c2l(data, W02); | 89 | in32 = (const SM3_WORD *)in; |
| 102 | HOST_c2l(data, W03); | 90 | W00 = be32toh(in32[0]); |
| 103 | HOST_c2l(data, W04); | 91 | W01 = be32toh(in32[1]); |
| 104 | HOST_c2l(data, W05); | 92 | W02 = be32toh(in32[2]); |
| 105 | HOST_c2l(data, W06); | 93 | W03 = be32toh(in32[3]); |
| 106 | HOST_c2l(data, W07); | 94 | W04 = be32toh(in32[4]); |
| 107 | HOST_c2l(data, W08); | 95 | W05 = be32toh(in32[5]); |
| 108 | HOST_c2l(data, W09); | 96 | W06 = be32toh(in32[6]); |
| 109 | HOST_c2l(data, W10); | 97 | W07 = be32toh(in32[7]); |
| 110 | HOST_c2l(data, W11); | 98 | W08 = be32toh(in32[8]); |
| 111 | HOST_c2l(data, W12); | 99 | W09 = be32toh(in32[9]); |
| 112 | HOST_c2l(data, W13); | 100 | W10 = be32toh(in32[10]); |
| 113 | HOST_c2l(data, W14); | 101 | W11 = be32toh(in32[11]); |
| 114 | HOST_c2l(data, W15); | 102 | W12 = be32toh(in32[12]); |
| 103 | W13 = be32toh(in32[13]); | ||
| 104 | W14 = be32toh(in32[14]); | ||
| 105 | W15 = be32toh(in32[15]); | ||
| 106 | } else { | ||
| 107 | /* Input is not 32 bit aligned. */ | ||
| 108 | W00 = crypto_load_be32toh(&in[0 * 4]); | ||
| 109 | W01 = crypto_load_be32toh(&in[1 * 4]); | ||
| 110 | W02 = crypto_load_be32toh(&in[2 * 4]); | ||
| 111 | W03 = crypto_load_be32toh(&in[3 * 4]); | ||
| 112 | W04 = crypto_load_be32toh(&in[4 * 4]); | ||
| 113 | W05 = crypto_load_be32toh(&in[5 * 4]); | ||
| 114 | W06 = crypto_load_be32toh(&in[6 * 4]); | ||
| 115 | W07 = crypto_load_be32toh(&in[7 * 4]); | ||
| 116 | W08 = crypto_load_be32toh(&in[8 * 4]); | ||
| 117 | W09 = crypto_load_be32toh(&in[9 * 4]); | ||
| 118 | W10 = crypto_load_be32toh(&in[10 * 4]); | ||
| 119 | W11 = crypto_load_be32toh(&in[11 * 4]); | ||
| 120 | W12 = crypto_load_be32toh(&in[12 * 4]); | ||
| 121 | W13 = crypto_load_be32toh(&in[13 * 4]); | ||
| 122 | W14 = crypto_load_be32toh(&in[14 * 4]); | ||
| 123 | W15 = crypto_load_be32toh(&in[15 * 4]); | ||
| 124 | } | ||
| 125 | in += SM3_CBLOCK; | ||
| 115 | 126 | ||
| 116 | R1(A, B, C, D, E, F, G, H, 0x79cc4519, W00, W00 ^ W04); | 127 | R1(A, B, C, D, E, F, G, H, 0x79cc4519, W00, W00 ^ W04); |
| 117 | W00 = EXPAND(W00, W07, W13, W03, W10); | 128 | W00 = EXPAND(W00, W07, W13, W03, W10); |
| @@ -325,7 +336,6 @@ SM3_Final(unsigned char *md, SM3_CTX *c) | |||
| 325 | { | 336 | { |
| 326 | unsigned char *p = (unsigned char *)c->data; | 337 | unsigned char *p = (unsigned char *)c->data; |
| 327 | size_t n = c->num; | 338 | size_t n = c->num; |
| 328 | unsigned long ll; | ||
| 329 | 339 | ||
| 330 | p[n] = 0x80; /* there is always room for one */ | 340 | p[n] = 0x80; /* there is always room for one */ |
| 331 | n++; | 341 | n++; |
| @@ -335,37 +345,23 @@ SM3_Final(unsigned char *md, SM3_CTX *c) | |||
| 335 | n = 0; | 345 | n = 0; |
| 336 | SM3_block_data_order(c, p, 1); | 346 | SM3_block_data_order(c, p, 1); |
| 337 | } | 347 | } |
| 348 | |||
| 338 | memset(p + n, 0, SM3_CBLOCK - 8 - n); | 349 | memset(p + n, 0, SM3_CBLOCK - 8 - n); |
| 350 | c->data[SM3_LBLOCK - 2] = htobe32(c->Nh); | ||
| 351 | c->data[SM3_LBLOCK - 1] = htobe32(c->Nl); | ||
| 339 | 352 | ||
| 340 | p += SM3_CBLOCK - 8; | ||
| 341 | #if defined(DATA_ORDER_IS_BIG_ENDIAN) | ||
| 342 | HOST_l2c(c->Nh, p); | ||
| 343 | HOST_l2c(c->Nl, p); | ||
| 344 | #elif defined(DATA_ORDER_IS_LITTLE_ENDIAN) | ||
| 345 | HOST_l2c(c->Nl, p); | ||
| 346 | HOST_l2c(c->Nh, p); | ||
| 347 | #endif | ||
| 348 | p -= SM3_CBLOCK; | ||
| 349 | SM3_block_data_order(c, p, 1); | 353 | SM3_block_data_order(c, p, 1); |
| 350 | c->num = 0; | 354 | c->num = 0; |
| 351 | memset(p, 0, SM3_CBLOCK); | 355 | memset(p, 0, SM3_CBLOCK); |
| 352 | 356 | ||
| 353 | ll = (c)->A; | 357 | crypto_store_htobe32(&md[0 * 4], c->A); |
| 354 | HOST_l2c(ll, md); | 358 | crypto_store_htobe32(&md[1 * 4], c->B); |
| 355 | ll = (c)->B; | 359 | crypto_store_htobe32(&md[2 * 4], c->C); |
| 356 | HOST_l2c(ll, md); | 360 | crypto_store_htobe32(&md[3 * 4], c->D); |
| 357 | ll = (c)->C; | 361 | crypto_store_htobe32(&md[4 * 4], c->E); |
| 358 | HOST_l2c(ll, md); | 362 | crypto_store_htobe32(&md[5 * 4], c->F); |
| 359 | ll = (c)->D; | 363 | crypto_store_htobe32(&md[6 * 4], c->G); |
| 360 | HOST_l2c(ll, md); | 364 | crypto_store_htobe32(&md[7 * 4], c->H); |
| 361 | ll = (c)->E; | ||
| 362 | HOST_l2c(ll, md); | ||
| 363 | ll = (c)->F; | ||
| 364 | HOST_l2c(ll, md); | ||
| 365 | ll = (c)->G; | ||
| 366 | HOST_l2c(ll, md); | ||
| 367 | ll = (c)->H; | ||
| 368 | HOST_l2c(ll, md); | ||
| 369 | 365 | ||
| 370 | return 1; | 366 | return 1; |
| 371 | } | 367 | } |
