aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDenis Vlasenko <vda.linux@googlemail.com>2009-03-12 19:06:18 +0000
committerDenis Vlasenko <vda.linux@googlemail.com>2009-03-12 19:06:18 +0000
commitc8329c956816656364e54d54932c5898a29bed70 (patch)
tree82f055e1ef31d209bd5d5c16bd09be6ed6071552
parent4a43057268a29c613f5482e6795a0a8ead9974ef (diff)
downloadbusybox-w32-c8329c956816656364e54d54932c5898a29bed70.tar.gz
busybox-w32-c8329c956816656364e54d54932c5898a29bed70.tar.bz2
busybox-w32-c8329c956816656364e54d54932c5898a29bed70.zip
sha: reduce sha256/512 context size. Make sha1/sha256 code more similar
function old new delta sha512_end 182 204 +22 sha256_end 137 147 +10 sha1_hash 113 108 -5 sha1_end 143 129 -14 ------------------------------------------------------------------------------ (add/remove: 0/0 grow/shrink: 2/2 up/down: 32/-19) Total: 13 bytes
-rw-r--r--include/libbb.h6
-rw-r--r--libbb/sha1.c145
2 files changed, 79 insertions, 72 deletions
diff --git a/include/libbb.h b/include/libbb.h
index 3f566f8af..b04488a54 100644
--- a/include/libbb.h
+++ b/include/libbb.h
@@ -1322,7 +1322,7 @@ void bb_uuencode(char *store, const void *s, int length, const char *tbl) FAST_F
1322 1322
1323typedef struct sha1_ctx_t { 1323typedef struct sha1_ctx_t {
1324 uint64_t total64; 1324 uint64_t total64;
1325 uint32_t wbuffer[16]; /* NB: always correctly aligned for uint64_t */ 1325 uint8_t wbuffer[64]; /* NB: always correctly aligned for uint64_t */
1326 uint32_t hash[5]; 1326 uint32_t hash[5];
1327} sha1_ctx_t; 1327} sha1_ctx_t;
1328void sha1_begin(sha1_ctx_t *ctx) FAST_FUNC; 1328void sha1_begin(sha1_ctx_t *ctx) FAST_FUNC;
@@ -1331,7 +1331,7 @@ void sha1_end(void *resbuf, sha1_ctx_t *ctx) FAST_FUNC;
1331typedef struct sha256_ctx_t { 1331typedef struct sha256_ctx_t {
1332 uint64_t total64; 1332 uint64_t total64;
1333 uint32_t hash[8]; 1333 uint32_t hash[8];
1334 char wbuffer[64*2]; /* NB: always correctly aligned for uint64_t */ 1334 uint8_t wbuffer[64]; /* NB: always correctly aligned for uint64_t */
1335} sha256_ctx_t; 1335} sha256_ctx_t;
1336void sha256_begin(sha256_ctx_t *ctx) FAST_FUNC; 1336void sha256_begin(sha256_ctx_t *ctx) FAST_FUNC;
1337void sha256_hash(const void *buffer, size_t len, sha256_ctx_t *ctx) FAST_FUNC; 1337void sha256_hash(const void *buffer, size_t len, sha256_ctx_t *ctx) FAST_FUNC;
@@ -1339,7 +1339,7 @@ void sha256_end(void *resbuf, sha256_ctx_t *ctx) FAST_FUNC;
1339typedef struct sha512_ctx_t { 1339typedef struct sha512_ctx_t {
1340 uint64_t total64[2]; 1340 uint64_t total64[2];
1341 uint64_t hash[8]; 1341 uint64_t hash[8];
1342 char wbuffer[128*2]; /* NB: always correctly aligned for uint64_t */ 1342 uint8_t wbuffer[128]; /* NB: always correctly aligned for uint64_t */
1343} sha512_ctx_t; 1343} sha512_ctx_t;
1344void sha512_begin(sha512_ctx_t *ctx) FAST_FUNC; 1344void sha512_begin(sha512_ctx_t *ctx) FAST_FUNC;
1345void sha512_hash(const void *buffer, size_t len, sha512_ctx_t *ctx) FAST_FUNC; 1345void sha512_hash(const void *buffer, size_t len, sha512_ctx_t *ctx) FAST_FUNC;
diff --git a/libbb/sha1.c b/libbb/sha1.c
index 765fedd9b..efdb0f58e 100644
--- a/libbb/sha1.c
+++ b/libbb/sha1.c
@@ -60,12 +60,13 @@ static void sha1_process_block64(sha1_ctx_t *ctx)
60{ 60{
61 unsigned i; 61 unsigned i;
62 uint32_t w[80], a, b, c, d, e, t; 62 uint32_t w[80], a, b, c, d, e, t;
63 uint32_t *words;
63 64
64 /* note that words are compiled from the buffer into 32-bit */ 65 words = (uint32_t*) ctx->wbuffer;
65 /* words in big-endian order so an order reversal is needed */ 66 for (i = 0; i < SHA1_BLOCK_SIZE / 4; ++i) {
66 /* here on little endian machines */ 67 w[i] = ntohl(*words);
67 for (i = 0; i < SHA1_BLOCK_SIZE / 4; ++i) 68 words++;
68 w[i] = ntohl(ctx->wbuffer[i]); 69 }
69 70
70 for (/*i = SHA1_BLOCK_SIZE / 4*/; i < 80; ++i) { 71 for (/*i = SHA1_BLOCK_SIZE / 4*/; i < 80; ++i) {
71 t = w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]; 72 t = w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16];
@@ -232,7 +233,7 @@ static void sha256_process_block64(const void *buffer, size_t len, sha256_ctx_t
232 /* Compute the message schedule according to FIPS 180-2:6.2.2 step 2. */ 233 /* Compute the message schedule according to FIPS 180-2:6.2.2 step 2. */
233 for (t = 0; t < 16; ++t) { 234 for (t = 0; t < 16; ++t) {
234 W[t] = ntohl(*words); 235 W[t] = ntohl(*words);
235 ++words; 236 words++;
236 } 237 }
237 238
238 for (/*t = 16*/; t < 64; ++t) 239 for (/*t = 16*/; t < 64; ++t)
@@ -302,7 +303,7 @@ static void sha512_process_block128(const void *buffer, size_t len, sha512_ctx_t
302 /* Compute the message schedule according to FIPS 180-2:6.3.2 step 2. */ 303 /* Compute the message schedule according to FIPS 180-2:6.3.2 step 2. */
303 for (t = 0; t < 16; ++t) { 304 for (t = 0; t < 16; ++t) {
304 W[t] = ntoh64(*words); 305 W[t] = ntoh64(*words);
305 ++words; 306 words++;
306 } 307 }
307 for (/*t = 16*/; t < 80; ++t) 308 for (/*t = 16*/; t < 80; ++t)
308 W[t] = R1(W[t - 2]) + W[t - 7] + R0(W[t - 15]) + W[t - 16]; 309 W[t] = R1(W[t - 2]) + W[t - 7] + R0(W[t - 15]) + W[t - 16];
@@ -399,7 +400,7 @@ void FAST_FUNC sha1_hash(const void *buffer, size_t len, sha1_ctx_t *ctx)
399 ctx->total64 += len; 400 ctx->total64 += len;
400 401
401 while (len >= add) { /* transfer whole blocks while possible */ 402 while (len >= add) { /* transfer whole blocks while possible */
402 memcpy(((unsigned char *) ctx->wbuffer) + in_buf, buffer, add); 403 memcpy(ctx->wbuffer + in_buf, buffer, add);
403 buffer = (const char *)buffer + add; 404 buffer = (const char *)buffer + add;
404 len -= add; 405 len -= add;
405 add = SHA1_BLOCK_SIZE; 406 add = SHA1_BLOCK_SIZE;
@@ -407,7 +408,7 @@ void FAST_FUNC sha1_hash(const void *buffer, size_t len, sha1_ctx_t *ctx)
407 sha1_process_block64(ctx); 408 sha1_process_block64(ctx);
408 } 409 }
409 410
410 memcpy(((unsigned char *) ctx->wbuffer) + in_buf, buffer, len); 411 memcpy(ctx->wbuffer + in_buf, buffer, len);
411} 412}
412 413
413void FAST_FUNC sha256_hash(const void *buffer, size_t len, sha256_ctx_t *ctx) 414void FAST_FUNC sha256_hash(const void *buffer, size_t len, sha256_ctx_t *ctx)
@@ -424,19 +425,14 @@ void FAST_FUNC sha256_hash(const void *buffer, size_t len, sha256_ctx_t *ctx)
424 if (in_buf != 0) { 425 if (in_buf != 0) {
425 unsigned add; 426 unsigned add;
426 427
427 /* NB: 1/2 of wbuffer is used only in sha256_end 428 add = sizeof(ctx->wbuffer) - in_buf;
428 * when length field is added and hashed.
429 * With buffer twice as small, it may happen that
430 * we have it almost full and can't add length field. */
431
432 add = sizeof(ctx->wbuffer)/2 - in_buf;
433 if (add > len) 429 if (add > len)
434 add = len; 430 add = len;
435 memcpy(&ctx->wbuffer[in_buf], buffer, add); 431 memcpy(ctx->wbuffer + in_buf, buffer, add);
436 in_buf += add; 432 in_buf += add;
437 433
438 /* If we still didn't collect full wbuffer, bail out */ 434 /* If we still didn't collect full wbuffer, bail out */
439 if (in_buf < sizeof(ctx->wbuffer)/2) 435 if (in_buf < sizeof(ctx->wbuffer))
440 return; 436 return;
441 437
442 sha256_process_block64(ctx->wbuffer, 64, ctx); 438 sha256_process_block64(ctx->wbuffer, 64, ctx);
@@ -460,9 +456,8 @@ void FAST_FUNC sha256_hash(const void *buffer, size_t len, sha256_ctx_t *ctx)
460 } 456 }
461 457
462 /* Move remaining bytes into internal buffer. */ 458 /* Move remaining bytes into internal buffer. */
463 if (len > 0) { 459 if (len > 0)
464 memcpy(ctx->wbuffer, buffer, len); 460 memcpy(ctx->wbuffer, buffer, len);
465 }
466} 461}
467 462
468void FAST_FUNC sha512_hash(const void *buffer, size_t len, sha512_ctx_t *ctx) 463void FAST_FUNC sha512_hash(const void *buffer, size_t len, sha512_ctx_t *ctx)
@@ -479,13 +474,13 @@ void FAST_FUNC sha512_hash(const void *buffer, size_t len, sha512_ctx_t *ctx)
479 if (in_buf != 0) { 474 if (in_buf != 0) {
480 unsigned add; 475 unsigned add;
481 476
482 add = sizeof(ctx->wbuffer)/2 - in_buf; 477 add = sizeof(ctx->wbuffer) - in_buf;
483 if (add > len) 478 if (add > len)
484 add = len; 479 add = len;
485 memcpy(&ctx->wbuffer[in_buf], buffer, add); 480 memcpy(ctx->wbuffer + in_buf, buffer, add);
486 in_buf += add; 481 in_buf += add;
487 482
488 if (in_buf < sizeof(ctx->wbuffer)/2) 483 if (in_buf < sizeof(ctx->wbuffer))
489 return; 484 return;
490 485
491 sha512_process_block128(ctx->wbuffer, 128, ctx); 486 sha512_process_block128(ctx->wbuffer, 128, ctx);
@@ -507,9 +502,8 @@ void FAST_FUNC sha512_hash(const void *buffer, size_t len, sha512_ctx_t *ctx)
507 } 502 }
508 } 503 }
509 504
510 if (len > 0) { 505 if (len > 0)
511 memcpy(ctx->wbuffer, buffer, len); 506 memcpy(ctx->wbuffer, buffer, len);
512 }
513} 507}
514 508
515 509
@@ -517,31 +511,29 @@ void FAST_FUNC sha1_end(void *resbuf, sha1_ctx_t *ctx)
517{ 511{
518 unsigned i, pad, in_buf; 512 unsigned i, pad, in_buf;
519 513
520 /* Pad the buffer to the next 64-byte boundary with 0x80,0,0,0... */
521 in_buf = ctx->total64 & SHA1_MASK; 514 in_buf = ctx->total64 & SHA1_MASK;
522 ((uint8_t *)ctx->wbuffer)[in_buf++] = 0x80; 515 /* Pad the buffer to the next 64-byte boundary with 0x80,0,0,0... */
523 pad = SHA1_BLOCK_SIZE - in_buf; 516 ctx->wbuffer[in_buf++] = 0x80;
524 memset(((uint8_t *)ctx->wbuffer) + in_buf, 0, pad);
525
526 /* We need 1+8 or more empty positions, one for the padding byte
527 * (above) and eight for the length count.
528 * If there is not enough space, empty the buffer. */
529 if (pad < 8) {
530 sha1_process_block64(ctx);
531 memset(ctx->wbuffer, 0, SHA1_BLOCK_SIZE - 8);
532 ((uint8_t *)ctx->wbuffer)[0] = 0x80;
533 }
534 517
535 /* Store the 64-bit counter of bits in the buffer in BE format */ 518 /* This loop iterates either once or twice, no more, no less */
536 { 519 while (1) {
537 uint64_t t = ctx->total64 << 3; 520 pad = SHA1_BLOCK_SIZE - in_buf;
538 t = hton64(t); 521 memset(ctx->wbuffer + in_buf, 0, pad);
539 /* wbuffer is suitably aligned for this */ 522 in_buf = 0;
540 *(uint64_t *) &ctx->wbuffer[14] = t; 523 /* Do we have enough space for the length count? */
524 if (pad >= 8) {
525 /* Store the 64-bit counter of bits in the buffer in BE format */
526 uint64_t t = ctx->total64 << 3;
527 t = hton64(t);
528 /* wbuffer is suitably aligned for this */
529 *(uint64_t *) (&ctx->wbuffer[SHA1_BLOCK_SIZE - 8]) = t;
530 }
531 sha1_process_block64(ctx);
532 if (pad >= 8)
533 break;
541 } 534 }
542 535
543 sha1_process_block64(ctx); 536 /* This way we do not impose alignment constraints on resbuf: */
544
545#if BB_LITTLE_ENDIAN 537#if BB_LITTLE_ENDIAN
546 for (i = 0; i < ARRAY_SIZE(ctx->hash); ++i) 538 for (i = 0; i < ARRAY_SIZE(ctx->hash); ++i)
547 ctx->hash[i] = htonl(ctx->hash[i]); 539 ctx->hash[i] = htonl(ctx->hash[i]);
@@ -553,23 +545,25 @@ void FAST_FUNC sha256_end(void *resbuf, sha256_ctx_t *ctx)
553{ 545{
554 unsigned i, pad, in_buf; 546 unsigned i, pad, in_buf;
555 547
556 /* Pad the buffer to the next 64-byte boundary with 0x80,0,0,0...
557 (FIPS 180-2:5.1.1) */
558 in_buf = ctx->total64 & 63; 548 in_buf = ctx->total64 & 63;
559 pad = (in_buf >= 56 ? 64 + 56 - in_buf : 56 - in_buf); 549 /* Pad the buffer to the next 64-byte boundary with 0x80,0,0,0...
560 memset(&ctx->wbuffer[in_buf], 0, pad); 550 * (FIPS 180-2:5.1.1)
561 ctx->wbuffer[in_buf] = 0x80; 551 */
562 552 ctx->wbuffer[in_buf++] = 0x80;
563 /* Put the 64-bit file length in *bits* at the end of the buffer. */
564 {
565 uint64_t t = ctx->total64 << 3;
566 t = hton64(t);
567 /* wbuffer is suitably aligned for this */
568 *(uint64_t *) &ctx->wbuffer[in_buf + pad] = t;
569 }
570 553
571 /* Process last bytes. */ 554 while (1) {
572 sha256_process_block64(ctx->wbuffer, in_buf + pad + 8, ctx); 555 pad = 64 - in_buf;
556 memset(ctx->wbuffer + in_buf, 0, pad);
557 in_buf = 0;
558 if (pad >= 8) {
559 uint64_t t = ctx->total64 << 3;
560 t = hton64(t);
561 *(uint64_t *) (&ctx->wbuffer[64 - 8]) = t;
562 }
563 sha256_process_block64(ctx->wbuffer, 64, ctx);
564 if (pad >= 8)
565 break;
566 }
573 567
574#if BB_LITTLE_ENDIAN 568#if BB_LITTLE_ENDIAN
575 for (i = 0; i < ARRAY_SIZE(ctx->hash); ++i) 569 for (i = 0; i < ARRAY_SIZE(ctx->hash); ++i)
@@ -582,17 +576,30 @@ void FAST_FUNC sha512_end(void *resbuf, sha512_ctx_t *ctx)
582{ 576{
583 unsigned i, pad, in_buf; 577 unsigned i, pad, in_buf;
584 578
585 /* Pad the buffer to the next 128-byte boundary with 0x80,0,0,0...
586 (FIPS 180-2:5.1.2) */
587 in_buf = ctx->total64[0] & 127; 579 in_buf = ctx->total64[0] & 127;
588 pad = in_buf >= 112 ? 128 + 112 - in_buf : 112 - in_buf; 580 /* Pad the buffer to the next 128-byte boundary with 0x80,0,0,0...
589 memset(&ctx->wbuffer[in_buf], 0, pad); 581 * (FIPS 180-2:5.1.2)
590 ctx->wbuffer[in_buf] = 0x80; 582 */
591 583 ctx->wbuffer[in_buf++] = 0x80;
592 *(uint64_t *) &ctx->wbuffer[in_buf + pad + 8] = hton64(ctx->total64[0] << 3);
593 *(uint64_t *) &ctx->wbuffer[in_buf + pad] = hton64((ctx->total64[1] << 3) | (ctx->total64[0] >> 61));
594 584
595 sha512_process_block128(ctx->wbuffer, in_buf + pad + 16, ctx); 585 while (1) {
586 pad = 128 - in_buf;
587 memset(ctx->wbuffer + in_buf, 0, pad);
588 in_buf = 0;
589 if (pad >= 16) {
590 /* Store the 128-bit counter of bits in the buffer in BE format */
591 uint64_t t;
592 t = ctx->total64[0] << 3;
593 t = hton64(t);
594 *(uint64_t *) (&ctx->wbuffer[128 - 8]) = t;
595 t = (ctx->total64[1] << 3) | (ctx->total64[0] >> 61);
596 t = hton64(t);
597 *(uint64_t *) (&ctx->wbuffer[128 - 16]) = t;
598 }
599 sha512_process_block128(ctx->wbuffer, 128, ctx);
600 if (pad >= 16)
601 break;
602 }
596 603
597#if BB_LITTLE_ENDIAN 604#if BB_LITTLE_ENDIAN
598 for (i = 0; i < ARRAY_SIZE(ctx->hash); ++i) 605 for (i = 0; i < ARRAY_SIZE(ctx->hash); ++i)