diff options
Diffstat (limited to '')
| -rw-r--r-- | src/lib/libcrypto/modes/gcm128.c | 510 |
1 files changed, 264 insertions, 246 deletions
diff --git a/src/lib/libcrypto/modes/gcm128.c b/src/lib/libcrypto/modes/gcm128.c index c080d9c16a..0b29d9850d 100644 --- a/src/lib/libcrypto/modes/gcm128.c +++ b/src/lib/libcrypto/modes/gcm128.c | |||
| @@ -1,4 +1,4 @@ | |||
| 1 | /* $OpenBSD: gcm128.c,v 1.16 2017/05/02 03:59:44 deraadt Exp $ */ | 1 | /* $OpenBSD: gcm128.c,v 1.17 2017/08/13 17:46:24 bcook Exp $ */ |
| 2 | /* ==================================================================== | 2 | /* ==================================================================== |
| 3 | * Copyright (c) 2010 The OpenSSL Project. All rights reserved. | 3 | * Copyright (c) 2010 The OpenSSL Project. All rights reserved. |
| 4 | * | 4 | * |
| @@ -224,29 +224,29 @@ static void gcm_gmult_8bit(u64 Xi[2], const u128 Htable[256]) | |||
| 224 | rem = (size_t)Z.lo&0xff; | 224 | rem = (size_t)Z.lo&0xff; |
| 225 | Z.lo = (Z.hi<<56)|(Z.lo>>8); | 225 | Z.lo = (Z.hi<<56)|(Z.lo>>8); |
| 226 | Z.hi = (Z.hi>>8); | 226 | Z.hi = (Z.hi>>8); |
| 227 | if (sizeof(size_t)==8) | 227 | #ifdef _LP64 |
| 228 | Z.hi ^= rem_8bit[rem]; | 228 | Z.hi ^= rem_8bit[rem]; |
| 229 | else | 229 | #else |
| 230 | Z.hi ^= (u64)rem_8bit[rem]<<32; | 230 | Z.hi ^= (u64)rem_8bit[rem]<<32; |
| 231 | #endif | ||
| 231 | } | 232 | } |
| 232 | 233 | ||
| 233 | if (BYTE_ORDER == LITTLE_ENDIAN) { | 234 | #if BYTE_ORDER == LITTLE_ENDIAN |
| 234 | #ifdef BSWAP8 | 235 | #ifdef BSWAP8 |
| 235 | Xi[0] = BSWAP8(Z.hi); | 236 | Xi[0] = BSWAP8(Z.hi); |
| 236 | Xi[1] = BSWAP8(Z.lo); | 237 | Xi[1] = BSWAP8(Z.lo); |
| 237 | #else | 238 | #else |
| 238 | u8 *p = (u8 *)Xi; | 239 | u8 *p = (u8 *)Xi; |
| 239 | u32 v; | 240 | u32 v; |
| 240 | v = (u32)(Z.hi>>32); PUTU32(p,v); | 241 | v = (u32)(Z.hi>>32); PUTU32(p,v); |
| 241 | v = (u32)(Z.hi); PUTU32(p+4,v); | 242 | v = (u32)(Z.hi); PUTU32(p+4,v); |
| 242 | v = (u32)(Z.lo>>32); PUTU32(p+8,v); | 243 | v = (u32)(Z.lo>>32); PUTU32(p+8,v); |
| 243 | v = (u32)(Z.lo); PUTU32(p+12,v); | 244 | v = (u32)(Z.lo); PUTU32(p+12,v); |
| 245 | #endif | ||
| 246 | #else /* BIG_ENDIAN */ | ||
| 247 | Xi[0] = Z.hi; | ||
| 248 | Xi[1] = Z.lo; | ||
| 244 | #endif | 249 | #endif |
| 245 | } | ||
| 246 | else { | ||
| 247 | Xi[0] = Z.hi; | ||
| 248 | Xi[1] = Z.lo; | ||
| 249 | } | ||
| 250 | } | 250 | } |
| 251 | #define GCM_MUL(ctx,Xi) gcm_gmult_8bit(ctx->Xi.u,ctx->Htable) | 251 | #define GCM_MUL(ctx,Xi) gcm_gmult_8bit(ctx->Xi.u,ctx->Htable) |
| 252 | 252 | ||
| @@ -307,19 +307,19 @@ static void gcm_init_4bit(u128 Htable[16], u64 H[2]) | |||
| 307 | { | 307 | { |
| 308 | int j; | 308 | int j; |
| 309 | 309 | ||
| 310 | if (BYTE_ORDER == LITTLE_ENDIAN) | 310 | #if BYTE_ORDER == LITTLE_ENDIAN |
| 311 | for (j=0;j<16;++j) { | 311 | for (j=0;j<16;++j) { |
| 312 | V = Htable[j]; | 312 | V = Htable[j]; |
| 313 | Htable[j].hi = V.lo; | 313 | Htable[j].hi = V.lo; |
| 314 | Htable[j].lo = V.hi; | 314 | Htable[j].lo = V.hi; |
| 315 | } | ||
| 316 | else | ||
| 317 | for (j=0;j<16;++j) { | ||
| 318 | V = Htable[j]; | ||
| 319 | Htable[j].hi = V.lo<<32|V.lo>>32; | ||
| 320 | Htable[j].lo = V.hi<<32|V.hi>>32; | ||
| 321 | } | ||
| 322 | } | 315 | } |
| 316 | #else /* BIG_ENDIAN */ | ||
| 317 | for (j=0;j<16;++j) { | ||
| 318 | V = Htable[j]; | ||
| 319 | Htable[j].hi = V.lo<<32|V.lo>>32; | ||
| 320 | Htable[j].lo = V.hi<<32|V.hi>>32; | ||
| 321 | } | ||
| 322 | #endif | ||
| 323 | #endif | 323 | #endif |
| 324 | } | 324 | } |
| 325 | 325 | ||
| @@ -347,11 +347,11 @@ static void gcm_gmult_4bit(u64 Xi[2], const u128 Htable[16]) | |||
| 347 | rem = (size_t)Z.lo&0xf; | 347 | rem = (size_t)Z.lo&0xf; |
| 348 | Z.lo = (Z.hi<<60)|(Z.lo>>4); | 348 | Z.lo = (Z.hi<<60)|(Z.lo>>4); |
| 349 | Z.hi = (Z.hi>>4); | 349 | Z.hi = (Z.hi>>4); |
| 350 | if (sizeof(size_t)==8) | 350 | #ifdef _LP64 |
| 351 | Z.hi ^= rem_4bit[rem]; | 351 | Z.hi ^= rem_4bit[rem]; |
| 352 | else | 352 | #else |
| 353 | Z.hi ^= (u64)rem_4bit[rem]<<32; | 353 | Z.hi ^= (u64)rem_4bit[rem]<<32; |
| 354 | 354 | #endif | |
| 355 | Z.hi ^= Htable[nhi].hi; | 355 | Z.hi ^= Htable[nhi].hi; |
| 356 | Z.lo ^= Htable[nhi].lo; | 356 | Z.lo ^= Htable[nhi].lo; |
| 357 | 357 | ||
| @@ -364,32 +364,31 @@ static void gcm_gmult_4bit(u64 Xi[2], const u128 Htable[16]) | |||
| 364 | rem = (size_t)Z.lo&0xf; | 364 | rem = (size_t)Z.lo&0xf; |
| 365 | Z.lo = (Z.hi<<60)|(Z.lo>>4); | 365 | Z.lo = (Z.hi<<60)|(Z.lo>>4); |
| 366 | Z.hi = (Z.hi>>4); | 366 | Z.hi = (Z.hi>>4); |
| 367 | if (sizeof(size_t)==8) | 367 | #ifdef _LP64 |
| 368 | Z.hi ^= rem_4bit[rem]; | 368 | Z.hi ^= rem_4bit[rem]; |
| 369 | else | 369 | #else |
| 370 | Z.hi ^= (u64)rem_4bit[rem]<<32; | 370 | Z.hi ^= (u64)rem_4bit[rem]<<32; |
| 371 | 371 | #endif | |
| 372 | Z.hi ^= Htable[nlo].hi; | 372 | Z.hi ^= Htable[nlo].hi; |
| 373 | Z.lo ^= Htable[nlo].lo; | 373 | Z.lo ^= Htable[nlo].lo; |
| 374 | } | 374 | } |
| 375 | 375 | ||
| 376 | if (BYTE_ORDER == LITTLE_ENDIAN) { | 376 | #if BYTE_ORDER == LITTLE_ENDIAN |
| 377 | #ifdef BSWAP8 | 377 | #ifdef BSWAP8 |
| 378 | Xi[0] = BSWAP8(Z.hi); | 378 | Xi[0] = BSWAP8(Z.hi); |
| 379 | Xi[1] = BSWAP8(Z.lo); | 379 | Xi[1] = BSWAP8(Z.lo); |
| 380 | #else | 380 | #else |
| 381 | u8 *p = (u8 *)Xi; | 381 | u8 *p = (u8 *)Xi; |
| 382 | u32 v; | 382 | u32 v; |
| 383 | v = (u32)(Z.hi>>32); PUTU32(p,v); | 383 | v = (u32)(Z.hi>>32); PUTU32(p,v); |
| 384 | v = (u32)(Z.hi); PUTU32(p+4,v); | 384 | v = (u32)(Z.hi); PUTU32(p+4,v); |
| 385 | v = (u32)(Z.lo>>32); PUTU32(p+8,v); | 385 | v = (u32)(Z.lo>>32); PUTU32(p+8,v); |
| 386 | v = (u32)(Z.lo); PUTU32(p+12,v); | 386 | v = (u32)(Z.lo); PUTU32(p+12,v); |
| 387 | #endif | ||
| 388 | #else /* BIG_ENDIAN */ | ||
| 389 | Xi[0] = Z.hi; | ||
| 390 | Xi[1] = Z.lo; | ||
| 387 | #endif | 391 | #endif |
| 388 | } | ||
| 389 | else { | ||
| 390 | Xi[0] = Z.hi; | ||
| 391 | Xi[1] = Z.lo; | ||
| 392 | } | ||
| 393 | } | 392 | } |
| 394 | 393 | ||
| 395 | #if !defined(OPENSSL_SMALL_FOOTPRINT) | 394 | #if !defined(OPENSSL_SMALL_FOOTPRINT) |
| @@ -422,11 +421,11 @@ static void gcm_ghash_4bit(u64 Xi[2],const u128 Htable[16], | |||
| 422 | rem = (size_t)Z.lo&0xf; | 421 | rem = (size_t)Z.lo&0xf; |
| 423 | Z.lo = (Z.hi<<60)|(Z.lo>>4); | 422 | Z.lo = (Z.hi<<60)|(Z.lo>>4); |
| 424 | Z.hi = (Z.hi>>4); | 423 | Z.hi = (Z.hi>>4); |
| 425 | if (sizeof(size_t)==8) | 424 | #ifdef _LP64 |
| 426 | Z.hi ^= rem_4bit[rem]; | 425 | Z.hi ^= rem_4bit[rem]; |
| 427 | else | 426 | #else |
| 428 | Z.hi ^= (u64)rem_4bit[rem]<<32; | 427 | Z.hi ^= (u64)rem_4bit[rem]<<32; |
| 429 | 428 | #endif | |
| 430 | Z.hi ^= Htable[nhi].hi; | 429 | Z.hi ^= Htable[nhi].hi; |
| 431 | Z.lo ^= Htable[nhi].lo; | 430 | Z.lo ^= Htable[nhi].lo; |
| 432 | 431 | ||
| @@ -440,11 +439,11 @@ static void gcm_ghash_4bit(u64 Xi[2],const u128 Htable[16], | |||
| 440 | rem = (size_t)Z.lo&0xf; | 439 | rem = (size_t)Z.lo&0xf; |
| 441 | Z.lo = (Z.hi<<60)|(Z.lo>>4); | 440 | Z.lo = (Z.hi<<60)|(Z.lo>>4); |
| 442 | Z.hi = (Z.hi>>4); | 441 | Z.hi = (Z.hi>>4); |
| 443 | if (sizeof(size_t)==8) | 442 | #ifdef _LP64 |
| 444 | Z.hi ^= rem_4bit[rem]; | 443 | Z.hi ^= rem_4bit[rem]; |
| 445 | else | 444 | #else |
| 446 | Z.hi ^= (u64)rem_4bit[rem]<<32; | 445 | Z.hi ^= (u64)rem_4bit[rem]<<32; |
| 447 | 446 | #endif | |
| 448 | Z.hi ^= Htable[nlo].hi; | 447 | Z.hi ^= Htable[nlo].hi; |
| 449 | Z.lo ^= Htable[nlo].lo; | 448 | Z.lo ^= Htable[nlo].lo; |
| 450 | } | 449 | } |
| @@ -542,23 +541,22 @@ static void gcm_ghash_4bit(u64 Xi[2],const u128 Htable[16], | |||
| 542 | Z.hi ^= ((u64)rem_8bit[rem<<4])<<48; | 541 | Z.hi ^= ((u64)rem_8bit[rem<<4])<<48; |
| 543 | #endif | 542 | #endif |
| 544 | 543 | ||
| 545 | if (BYTE_ORDER == LITTLE_ENDIAN) { | 544 | #if BYTE_ORDER == LITTLE_ENDIAN |
| 546 | #ifdef BSWAP8 | 545 | #ifdef BSWAP8 |
| 547 | Xi[0] = BSWAP8(Z.hi); | 546 | Xi[0] = BSWAP8(Z.hi); |
| 548 | Xi[1] = BSWAP8(Z.lo); | 547 | Xi[1] = BSWAP8(Z.lo); |
| 549 | #else | 548 | #else |
| 550 | u8 *p = (u8 *)Xi; | 549 | u8 *p = (u8 *)Xi; |
| 551 | u32 v; | 550 | u32 v; |
| 552 | v = (u32)(Z.hi>>32); PUTU32(p,v); | 551 | v = (u32)(Z.hi>>32); PUTU32(p,v); |
| 553 | v = (u32)(Z.hi); PUTU32(p+4,v); | 552 | v = (u32)(Z.hi); PUTU32(p+4,v); |
| 554 | v = (u32)(Z.lo>>32); PUTU32(p+8,v); | 553 | v = (u32)(Z.lo>>32); PUTU32(p+8,v); |
| 555 | v = (u32)(Z.lo); PUTU32(p+12,v); | 554 | v = (u32)(Z.lo); PUTU32(p+12,v); |
| 555 | #endif | ||
| 556 | #else /* BIG_ENDIAN */ | ||
| 557 | Xi[0] = Z.hi; | ||
| 558 | Xi[1] = Z.lo; | ||
| 556 | #endif | 559 | #endif |
| 557 | } | ||
| 558 | else { | ||
| 559 | Xi[0] = Z.hi; | ||
| 560 | Xi[1] = Z.lo; | ||
| 561 | } | ||
| 562 | } while (inp+=16, len-=16); | 560 | } while (inp+=16, len-=16); |
| 563 | } | 561 | } |
| 564 | #endif | 562 | #endif |
| @@ -589,22 +587,21 @@ static void gcm_gmult_1bit(u64 Xi[2],const u64 H[2]) | |||
| 589 | V.lo = H[1]; | 587 | V.lo = H[1]; |
| 590 | 588 | ||
| 591 | for (j=0; j<16/sizeof(long); ++j) { | 589 | for (j=0; j<16/sizeof(long); ++j) { |
| 592 | if (BYTE_ORDER == LITTLE_ENDIAN) { | 590 | #if BYTE_ORDER == LITTLE_ENDIAN |
| 593 | if (sizeof(long)==8) { | 591 | #ifdef _LP64 |
| 594 | #ifdef BSWAP8 | 592 | #ifdef BSWAP8 |
| 595 | X = (long)(BSWAP8(xi[j])); | 593 | X = (long)(BSWAP8(xi[j])); |
| 596 | #else | 594 | #else |
| 597 | const u8 *p = (const u8 *)(xi+j); | 595 | const u8 *p = (const u8 *)(xi+j); |
| 598 | X = (long)((u64)GETU32(p)<<32|GETU32(p+4)); | 596 | X = (long)((u64)GETU32(p)<<32|GETU32(p+4)); |
| 597 | #endif | ||
| 598 | #else | ||
| 599 | const u8 *p = (const u8 *)(xi+j); | ||
| 600 | X = (long)GETU32(p); | ||
| 601 | #endif | ||
| 602 | #else /* BIG_ENDIAN */ | ||
| 603 | X = xi[j]; | ||
| 599 | #endif | 604 | #endif |
| 600 | } | ||
| 601 | else { | ||
| 602 | const u8 *p = (const u8 *)(xi+j); | ||
| 603 | X = (long)GETU32(p); | ||
| 604 | } | ||
| 605 | } | ||
| 606 | else | ||
| 607 | X = xi[j]; | ||
| 608 | 605 | ||
| 609 | for (i=0; i<8*sizeof(long); ++i, X<<=1) { | 606 | for (i=0; i<8*sizeof(long); ++i, X<<=1) { |
| 610 | u64 M = (u64)(X>>(8*sizeof(long)-1)); | 607 | u64 M = (u64)(X>>(8*sizeof(long)-1)); |
| @@ -615,23 +612,22 @@ static void gcm_gmult_1bit(u64 Xi[2],const u64 H[2]) | |||
| 615 | } | 612 | } |
| 616 | } | 613 | } |
| 617 | 614 | ||
| 618 | if (BYTE_ORDER == LITTLE_ENDIAN) { | 615 | #if BYTE_ORDER == LITTLE_ENDIAN |
| 619 | #ifdef BSWAP8 | 616 | #ifdef BSWAP8 |
| 620 | Xi[0] = BSWAP8(Z.hi); | 617 | Xi[0] = BSWAP8(Z.hi); |
| 621 | Xi[1] = BSWAP8(Z.lo); | 618 | Xi[1] = BSWAP8(Z.lo); |
| 622 | #else | 619 | #else |
| 623 | u8 *p = (u8 *)Xi; | 620 | u8 *p = (u8 *)Xi; |
| 624 | u32 v; | 621 | u32 v; |
| 625 | v = (u32)(Z.hi>>32); PUTU32(p,v); | 622 | v = (u32)(Z.hi>>32); PUTU32(p,v); |
| 626 | v = (u32)(Z.hi); PUTU32(p+4,v); | 623 | v = (u32)(Z.hi); PUTU32(p+4,v); |
| 627 | v = (u32)(Z.lo>>32); PUTU32(p+8,v); | 624 | v = (u32)(Z.lo>>32); PUTU32(p+8,v); |
| 628 | v = (u32)(Z.lo); PUTU32(p+12,v); | 625 | v = (u32)(Z.lo); PUTU32(p+12,v); |
| 626 | #endif | ||
| 627 | #else /* BIG_ENDIAN */ | ||
| 628 | Xi[0] = Z.hi; | ||
| 629 | Xi[1] = Z.lo; | ||
| 629 | #endif | 630 | #endif |
| 630 | } | ||
| 631 | else { | ||
| 632 | Xi[0] = Z.hi; | ||
| 633 | Xi[1] = Z.lo; | ||
| 634 | } | ||
| 635 | } | 631 | } |
| 636 | #define GCM_MUL(ctx,Xi) gcm_gmult_1bit(ctx->Xi.u,ctx->H.u) | 632 | #define GCM_MUL(ctx,Xi) gcm_gmult_1bit(ctx->Xi.u,ctx->H.u) |
| 637 | 633 | ||
| @@ -691,20 +687,20 @@ void CRYPTO_gcm128_init(GCM128_CONTEXT *ctx,void *key,block128_f block) | |||
| 691 | 687 | ||
| 692 | (*block)(ctx->H.c,ctx->H.c,key); | 688 | (*block)(ctx->H.c,ctx->H.c,key); |
| 693 | 689 | ||
| 694 | if (BYTE_ORDER == LITTLE_ENDIAN) { | 690 | #if BYTE_ORDER == LITTLE_ENDIAN |
| 695 | /* H is stored in host byte order */ | 691 | /* H is stored in host byte order */ |
| 696 | #ifdef BSWAP8 | 692 | #ifdef BSWAP8 |
| 697 | ctx->H.u[0] = BSWAP8(ctx->H.u[0]); | 693 | ctx->H.u[0] = BSWAP8(ctx->H.u[0]); |
| 698 | ctx->H.u[1] = BSWAP8(ctx->H.u[1]); | 694 | ctx->H.u[1] = BSWAP8(ctx->H.u[1]); |
| 699 | #else | 695 | #else |
| 700 | u8 *p = ctx->H.c; | 696 | u8 *p = ctx->H.c; |
| 701 | u64 hi,lo; | 697 | u64 hi,lo; |
| 702 | hi = (u64)GETU32(p) <<32|GETU32(p+4); | 698 | hi = (u64)GETU32(p) <<32|GETU32(p+4); |
| 703 | lo = (u64)GETU32(p+8)<<32|GETU32(p+12); | 699 | lo = (u64)GETU32(p+8)<<32|GETU32(p+12); |
| 704 | ctx->H.u[0] = hi; | 700 | ctx->H.u[0] = hi; |
| 705 | ctx->H.u[1] = lo; | 701 | ctx->H.u[1] = lo; |
| 702 | #endif | ||
| 706 | #endif | 703 | #endif |
| 707 | } | ||
| 708 | 704 | ||
| 709 | #if TABLE_BITS==8 | 705 | #if TABLE_BITS==8 |
| 710 | gcm_init_8bit(ctx->Htable,ctx->H.u); | 706 | gcm_init_8bit(ctx->Htable,ctx->H.u); |
| @@ -788,45 +784,47 @@ void CRYPTO_gcm128_setiv(GCM128_CONTEXT *ctx,const unsigned char *iv,size_t len) | |||
| 788 | GCM_MUL(ctx,Yi); | 784 | GCM_MUL(ctx,Yi); |
| 789 | } | 785 | } |
| 790 | len0 <<= 3; | 786 | len0 <<= 3; |
| 791 | if (BYTE_ORDER == LITTLE_ENDIAN) { | 787 | #if BYTE_ORDER == LITTLE_ENDIAN |
| 792 | #ifdef BSWAP8 | 788 | #ifdef BSWAP8 |
| 793 | ctx->Yi.u[1] ^= BSWAP8(len0); | 789 | ctx->Yi.u[1] ^= BSWAP8(len0); |
| 794 | #else | 790 | #else |
| 795 | ctx->Yi.c[8] ^= (u8)(len0>>56); | 791 | ctx->Yi.c[8] ^= (u8)(len0>>56); |
| 796 | ctx->Yi.c[9] ^= (u8)(len0>>48); | 792 | ctx->Yi.c[9] ^= (u8)(len0>>48); |
| 797 | ctx->Yi.c[10] ^= (u8)(len0>>40); | 793 | ctx->Yi.c[10] ^= (u8)(len0>>40); |
| 798 | ctx->Yi.c[11] ^= (u8)(len0>>32); | 794 | ctx->Yi.c[11] ^= (u8)(len0>>32); |
| 799 | ctx->Yi.c[12] ^= (u8)(len0>>24); | 795 | ctx->Yi.c[12] ^= (u8)(len0>>24); |
| 800 | ctx->Yi.c[13] ^= (u8)(len0>>16); | 796 | ctx->Yi.c[13] ^= (u8)(len0>>16); |
| 801 | ctx->Yi.c[14] ^= (u8)(len0>>8); | 797 | ctx->Yi.c[14] ^= (u8)(len0>>8); |
| 802 | ctx->Yi.c[15] ^= (u8)(len0); | 798 | ctx->Yi.c[15] ^= (u8)(len0); |
| 799 | #endif | ||
| 800 | #else /* BIG_ENDIAN */ | ||
| 801 | ctx->Yi.u[1] ^= len0; | ||
| 803 | #endif | 802 | #endif |
| 804 | } | ||
| 805 | else | ||
| 806 | ctx->Yi.u[1] ^= len0; | ||
| 807 | 803 | ||
| 808 | GCM_MUL(ctx,Yi); | 804 | GCM_MUL(ctx,Yi); |
| 809 | 805 | ||
| 810 | if (BYTE_ORDER == LITTLE_ENDIAN) | 806 | #if BYTE_ORDER == LITTLE_ENDIAN |
| 811 | #ifdef BSWAP4 | 807 | #ifdef BSWAP4 |
| 812 | ctr = BSWAP4(ctx->Yi.d[3]); | 808 | ctr = BSWAP4(ctx->Yi.d[3]); |
| 813 | #else | 809 | #else |
| 814 | ctr = GETU32(ctx->Yi.c+12); | 810 | ctr = GETU32(ctx->Yi.c+12); |
| 811 | #endif | ||
| 812 | #else /* BIG_ENDIAN */ | ||
| 813 | ctr = ctx->Yi.d[3]; | ||
| 815 | #endif | 814 | #endif |
| 816 | else | ||
| 817 | ctr = ctx->Yi.d[3]; | ||
| 818 | } | 815 | } |
| 819 | 816 | ||
| 820 | (*ctx->block)(ctx->Yi.c,ctx->EK0.c,ctx->key); | 817 | (*ctx->block)(ctx->Yi.c,ctx->EK0.c,ctx->key); |
| 821 | ++ctr; | 818 | ++ctr; |
| 822 | if (BYTE_ORDER == LITTLE_ENDIAN) | 819 | #if BYTE_ORDER == LITTLE_ENDIAN |
| 823 | #ifdef BSWAP4 | 820 | #ifdef BSWAP4 |
| 824 | ctx->Yi.d[3] = BSWAP4(ctr); | 821 | ctx->Yi.d[3] = BSWAP4(ctr); |
| 825 | #else | 822 | #else |
| 826 | PUTU32(ctx->Yi.c+12,ctr); | 823 | PUTU32(ctx->Yi.c+12,ctr); |
| 824 | #endif | ||
| 825 | #else /* BIG_ENDIAN */ | ||
| 826 | ctx->Yi.d[3] = ctr; | ||
| 827 | #endif | 827 | #endif |
| 828 | else | ||
| 829 | ctx->Yi.d[3] = ctr; | ||
| 830 | } | 828 | } |
| 831 | 829 | ||
| 832 | int CRYPTO_gcm128_aad(GCM128_CONTEXT *ctx,const unsigned char *aad,size_t len) | 830 | int CRYPTO_gcm128_aad(GCM128_CONTEXT *ctx,const unsigned char *aad,size_t len) |
| @@ -914,14 +912,15 @@ int CRYPTO_gcm128_encrypt(GCM128_CONTEXT *ctx, | |||
| 914 | ctx->ares = 0; | 912 | ctx->ares = 0; |
| 915 | } | 913 | } |
| 916 | 914 | ||
| 917 | if (BYTE_ORDER == LITTLE_ENDIAN) | 915 | #if BYTE_ORDER == LITTLE_ENDIAN |
| 918 | #ifdef BSWAP4 | 916 | #ifdef BSWAP4 |
| 919 | ctr = BSWAP4(ctx->Yi.d[3]); | 917 | ctr = BSWAP4(ctx->Yi.d[3]); |
| 920 | #else | 918 | #else |
| 921 | ctr = GETU32(ctx->Yi.c+12); | 919 | ctr = GETU32(ctx->Yi.c+12); |
| 920 | #endif | ||
| 921 | #else /* BIG_ENDIAN */ | ||
| 922 | ctr = ctx->Yi.d[3]; | ||
| 922 | #endif | 923 | #endif |
| 923 | else | ||
| 924 | ctr = ctx->Yi.d[3]; | ||
| 925 | 924 | ||
| 926 | n = ctx->mres; | 925 | n = ctx->mres; |
| 927 | #if !defined(OPENSSL_SMALL_FOOTPRINT) | 926 | #if !defined(OPENSSL_SMALL_FOOTPRINT) |
| @@ -952,14 +951,15 @@ int CRYPTO_gcm128_encrypt(GCM128_CONTEXT *ctx, | |||
| 952 | 951 | ||
| 953 | (*block)(ctx->Yi.c,ctx->EKi.c,key); | 952 | (*block)(ctx->Yi.c,ctx->EKi.c,key); |
| 954 | ++ctr; | 953 | ++ctr; |
| 955 | if (BYTE_ORDER == LITTLE_ENDIAN) | 954 | #if BYTE_ORDER == LITTLE_ENDIAN |
| 956 | #ifdef BSWAP4 | 955 | #ifdef BSWAP4 |
| 957 | ctx->Yi.d[3] = BSWAP4(ctr); | 956 | ctx->Yi.d[3] = BSWAP4(ctr); |
| 958 | #else | 957 | #else |
| 959 | PUTU32(ctx->Yi.c+12,ctr); | 958 | PUTU32(ctx->Yi.c+12,ctr); |
| 959 | #endif | ||
| 960 | #else /* BIG_ENDIAN */ | ||
| 961 | ctx->Yi.d[3] = ctr; | ||
| 960 | #endif | 962 | #endif |
| 961 | else | ||
| 962 | ctx->Yi.d[3] = ctr; | ||
| 963 | for (i=0; i<16/sizeof(size_t); ++i) | 963 | for (i=0; i<16/sizeof(size_t); ++i) |
| 964 | out_t[i] = in_t[i] ^ ctx->EKi.t[i]; | 964 | out_t[i] = in_t[i] ^ ctx->EKi.t[i]; |
| 965 | out += 16; | 965 | out += 16; |
| @@ -978,14 +978,15 @@ int CRYPTO_gcm128_encrypt(GCM128_CONTEXT *ctx, | |||
| 978 | 978 | ||
| 979 | (*block)(ctx->Yi.c,ctx->EKi.c,key); | 979 | (*block)(ctx->Yi.c,ctx->EKi.c,key); |
| 980 | ++ctr; | 980 | ++ctr; |
| 981 | if (BYTE_ORDER == LITTLE_ENDIAN) | 981 | #if BYTE_ORDER == LITTLE_ENDIAN |
| 982 | #ifdef BSWAP4 | 982 | #ifdef BSWAP4 |
| 983 | ctx->Yi.d[3] = BSWAP4(ctr); | 983 | ctx->Yi.d[3] = BSWAP4(ctr); |
| 984 | #else | 984 | #else |
| 985 | PUTU32(ctx->Yi.c+12,ctr); | 985 | PUTU32(ctx->Yi.c+12,ctr); |
| 986 | #endif | ||
| 987 | #else /* BIG_ENDIAN */ | ||
| 988 | ctx->Yi.d[3] = ctr; | ||
| 986 | #endif | 989 | #endif |
| 987 | else | ||
| 988 | ctx->Yi.d[3] = ctr; | ||
| 989 | for (i=0; i<16/sizeof(size_t); ++i) | 990 | for (i=0; i<16/sizeof(size_t); ++i) |
| 990 | out_t[i] = in_t[i] ^ ctx->EKi.t[i]; | 991 | out_t[i] = in_t[i] ^ ctx->EKi.t[i]; |
| 991 | out += 16; | 992 | out += 16; |
| @@ -1001,14 +1002,15 @@ int CRYPTO_gcm128_encrypt(GCM128_CONTEXT *ctx, | |||
| 1001 | 1002 | ||
| 1002 | (*block)(ctx->Yi.c,ctx->EKi.c,key); | 1003 | (*block)(ctx->Yi.c,ctx->EKi.c,key); |
| 1003 | ++ctr; | 1004 | ++ctr; |
| 1004 | if (BYTE_ORDER == LITTLE_ENDIAN) | 1005 | #if BYTE_ORDER == LITTLE_ENDIAN |
| 1005 | #ifdef BSWAP4 | 1006 | #ifdef BSWAP4 |
| 1006 | ctx->Yi.d[3] = BSWAP4(ctr); | 1007 | ctx->Yi.d[3] = BSWAP4(ctr); |
| 1007 | #else | 1008 | #else |
| 1008 | PUTU32(ctx->Yi.c+12,ctr); | 1009 | PUTU32(ctx->Yi.c+12,ctr); |
| 1010 | #endif | ||
| 1011 | #else /* BIG_ENDIAN */ | ||
| 1012 | ctx->Yi.d[3] = ctr; | ||
| 1009 | #endif | 1013 | #endif |
| 1010 | else | ||
| 1011 | ctx->Yi.d[3] = ctr; | ||
| 1012 | for (i=0; i<16/sizeof(size_t); ++i) | 1014 | for (i=0; i<16/sizeof(size_t); ++i) |
| 1013 | ctx->Xi.t[i] ^= | 1015 | ctx->Xi.t[i] ^= |
| 1014 | out_t[i] = in_t[i]^ctx->EKi.t[i]; | 1016 | out_t[i] = in_t[i]^ctx->EKi.t[i]; |
| @@ -1021,14 +1023,15 @@ int CRYPTO_gcm128_encrypt(GCM128_CONTEXT *ctx, | |||
| 1021 | if (len) { | 1023 | if (len) { |
| 1022 | (*block)(ctx->Yi.c,ctx->EKi.c,key); | 1024 | (*block)(ctx->Yi.c,ctx->EKi.c,key); |
| 1023 | ++ctr; | 1025 | ++ctr; |
| 1024 | if (BYTE_ORDER == LITTLE_ENDIAN) | 1026 | #if BYTE_ORDER == LITTLE_ENDIAN |
| 1025 | #ifdef BSWAP4 | 1027 | #ifdef BSWAP4 |
| 1026 | ctx->Yi.d[3] = BSWAP4(ctr); | 1028 | ctx->Yi.d[3] = BSWAP4(ctr); |
| 1027 | #else | 1029 | #else |
| 1028 | PUTU32(ctx->Yi.c+12,ctr); | 1030 | PUTU32(ctx->Yi.c+12,ctr); |
| 1031 | #endif | ||
| 1032 | #else /* BIG_ENDIAN */ | ||
| 1033 | ctx->Yi.d[3] = ctr; | ||
| 1029 | #endif | 1034 | #endif |
| 1030 | else | ||
| 1031 | ctx->Yi.d[3] = ctr; | ||
| 1032 | while (len--) { | 1035 | while (len--) { |
| 1033 | ctx->Xi.c[n] ^= out[n] = in[n]^ctx->EKi.c[n]; | 1036 | ctx->Xi.c[n] ^= out[n] = in[n]^ctx->EKi.c[n]; |
| 1034 | ++n; | 1037 | ++n; |
| @@ -1043,14 +1046,15 @@ int CRYPTO_gcm128_encrypt(GCM128_CONTEXT *ctx, | |||
| 1043 | if (n==0) { | 1046 | if (n==0) { |
| 1044 | (*block)(ctx->Yi.c,ctx->EKi.c,key); | 1047 | (*block)(ctx->Yi.c,ctx->EKi.c,key); |
| 1045 | ++ctr; | 1048 | ++ctr; |
| 1046 | if (BYTE_ORDER == LITTLE_ENDIAN) | 1049 | #if BYTE_ORDER == LITTLE_ENDIAN |
| 1047 | #ifdef BSWAP4 | 1050 | #ifdef BSWAP4 |
| 1048 | ctx->Yi.d[3] = BSWAP4(ctr); | 1051 | ctx->Yi.d[3] = BSWAP4(ctr); |
| 1049 | #else | 1052 | #else |
| 1050 | PUTU32(ctx->Yi.c+12,ctr); | 1053 | PUTU32(ctx->Yi.c+12,ctr); |
| 1054 | #endif | ||
| 1055 | #else /* BIG_ENDIAN */ | ||
| 1056 | ctx->Yi.d[3] = ctr; | ||
| 1051 | #endif | 1057 | #endif |
| 1052 | else | ||
| 1053 | ctx->Yi.d[3] = ctr; | ||
| 1054 | } | 1058 | } |
| 1055 | ctx->Xi.c[n] ^= out[i] = in[i]^ctx->EKi.c[n]; | 1059 | ctx->Xi.c[n] ^= out[i] = in[i]^ctx->EKi.c[n]; |
| 1056 | n = (n+1)%16; | 1060 | n = (n+1)%16; |
| @@ -1090,14 +1094,15 @@ int CRYPTO_gcm128_decrypt(GCM128_CONTEXT *ctx, | |||
| 1090 | ctx->ares = 0; | 1094 | ctx->ares = 0; |
| 1091 | } | 1095 | } |
| 1092 | 1096 | ||
| 1093 | if (BYTE_ORDER == LITTLE_ENDIAN) | 1097 | #if BYTE_ORDER == LITTLE_ENDIAN |
| 1094 | #ifdef BSWAP4 | 1098 | #ifdef BSWAP4 |
| 1095 | ctr = BSWAP4(ctx->Yi.d[3]); | 1099 | ctr = BSWAP4(ctx->Yi.d[3]); |
| 1096 | #else | 1100 | #else |
| 1097 | ctr = GETU32(ctx->Yi.c+12); | 1101 | ctr = GETU32(ctx->Yi.c+12); |
| 1102 | #endif | ||
| 1103 | #else /* BIG_ENDIAN */ | ||
| 1104 | ctr = ctx->Yi.d[3]; | ||
| 1098 | #endif | 1105 | #endif |
| 1099 | else | ||
| 1100 | ctr = ctx->Yi.d[3]; | ||
| 1101 | 1106 | ||
| 1102 | n = ctx->mres; | 1107 | n = ctx->mres; |
| 1103 | #if !defined(OPENSSL_SMALL_FOOTPRINT) | 1108 | #if !defined(OPENSSL_SMALL_FOOTPRINT) |
| @@ -1131,14 +1136,15 @@ int CRYPTO_gcm128_decrypt(GCM128_CONTEXT *ctx, | |||
| 1131 | 1136 | ||
| 1132 | (*block)(ctx->Yi.c,ctx->EKi.c,key); | 1137 | (*block)(ctx->Yi.c,ctx->EKi.c,key); |
| 1133 | ++ctr; | 1138 | ++ctr; |
| 1134 | if (BYTE_ORDER == LITTLE_ENDIAN) | 1139 | #if BYTE_ORDER == LITTLE_ENDIAN |
| 1135 | #ifdef BSWAP4 | 1140 | #ifdef BSWAP4 |
| 1136 | ctx->Yi.d[3] = BSWAP4(ctr); | 1141 | ctx->Yi.d[3] = BSWAP4(ctr); |
| 1137 | #else | 1142 | #else |
| 1138 | PUTU32(ctx->Yi.c+12,ctr); | 1143 | PUTU32(ctx->Yi.c+12,ctr); |
| 1139 | #endif | 1144 | #endif |
| 1140 | else | 1145 | #else /* BIG_ENDIAN */ |
| 1141 | ctx->Yi.d[3] = ctr; | 1146 | ctx->Yi.d[3] = ctr; |
| 1147 | #endif | ||
| 1142 | for (i=0; i<16/sizeof(size_t); ++i) | 1148 | for (i=0; i<16/sizeof(size_t); ++i) |
| 1143 | out_t[i] = in_t[i]^ctx->EKi.t[i]; | 1149 | out_t[i] = in_t[i]^ctx->EKi.t[i]; |
| 1144 | out += 16; | 1150 | out += 16; |
| @@ -1155,14 +1161,15 @@ int CRYPTO_gcm128_decrypt(GCM128_CONTEXT *ctx, | |||
| 1155 | 1161 | ||
| 1156 | (*block)(ctx->Yi.c,ctx->EKi.c,key); | 1162 | (*block)(ctx->Yi.c,ctx->EKi.c,key); |
| 1157 | ++ctr; | 1163 | ++ctr; |
| 1158 | if (BYTE_ORDER == LITTLE_ENDIAN) | 1164 | #if BYTE_ORDER == LITTLE_ENDIAN |
| 1159 | #ifdef BSWAP4 | 1165 | #ifdef BSWAP4 |
| 1160 | ctx->Yi.d[3] = BSWAP4(ctr); | 1166 | ctx->Yi.d[3] = BSWAP4(ctr); |
| 1161 | #else | 1167 | #else |
| 1162 | PUTU32(ctx->Yi.c+12,ctr); | 1168 | PUTU32(ctx->Yi.c+12,ctr); |
| 1169 | #endif | ||
| 1170 | #else /* BIG_ENDIAN */ | ||
| 1171 | ctx->Yi.d[3] = ctr; | ||
| 1163 | #endif | 1172 | #endif |
| 1164 | else | ||
| 1165 | ctx->Yi.d[3] = ctr; | ||
| 1166 | for (i=0; i<16/sizeof(size_t); ++i) | 1173 | for (i=0; i<16/sizeof(size_t); ++i) |
| 1167 | out_t[i] = in_t[i]^ctx->EKi.t[i]; | 1174 | out_t[i] = in_t[i]^ctx->EKi.t[i]; |
| 1168 | out += 16; | 1175 | out += 16; |
| @@ -1177,14 +1184,15 @@ int CRYPTO_gcm128_decrypt(GCM128_CONTEXT *ctx, | |||
| 1177 | 1184 | ||
| 1178 | (*block)(ctx->Yi.c,ctx->EKi.c,key); | 1185 | (*block)(ctx->Yi.c,ctx->EKi.c,key); |
| 1179 | ++ctr; | 1186 | ++ctr; |
| 1180 | if (BYTE_ORDER == LITTLE_ENDIAN) | 1187 | #ifdef BYTE_ORDER == LITTLE_ENDIAN |
| 1181 | #ifdef BSWAP4 | 1188 | #ifdef BSWAP4 |
| 1182 | ctx->Yi.d[3] = BSWAP4(ctr); | 1189 | ctx->Yi.d[3] = BSWAP4(ctr); |
| 1183 | #else | 1190 | #else |
| 1184 | PUTU32(ctx->Yi.c+12,ctr); | 1191 | PUTU32(ctx->Yi.c+12,ctr); |
| 1192 | #endif | ||
| 1193 | #else /* BIG_ENDIAN */ | ||
| 1194 | ctx->Yi.d[3] = ctr; | ||
| 1185 | #endif | 1195 | #endif |
| 1186 | else | ||
| 1187 | ctx->Yi.d[3] = ctr; | ||
| 1188 | for (i=0; i<16/sizeof(size_t); ++i) { | 1196 | for (i=0; i<16/sizeof(size_t); ++i) { |
| 1189 | size_t c = in[i]; | 1197 | size_t c = in[i]; |
| 1190 | out[i] = c^ctx->EKi.t[i]; | 1198 | out[i] = c^ctx->EKi.t[i]; |
| @@ -1199,14 +1207,15 @@ int CRYPTO_gcm128_decrypt(GCM128_CONTEXT *ctx, | |||
| 1199 | if (len) { | 1207 | if (len) { |
| 1200 | (*block)(ctx->Yi.c,ctx->EKi.c,key); | 1208 | (*block)(ctx->Yi.c,ctx->EKi.c,key); |
| 1201 | ++ctr; | 1209 | ++ctr; |
| 1202 | if (BYTE_ORDER == LITTLE_ENDIAN) | 1210 | #if BYTE_ORDER == LITTLE_ENDIAN |
| 1203 | #ifdef BSWAP4 | 1211 | #ifdef BSWAP4 |
| 1204 | ctx->Yi.d[3] = BSWAP4(ctr); | 1212 | ctx->Yi.d[3] = BSWAP4(ctr); |
| 1205 | #else | 1213 | #else |
| 1206 | PUTU32(ctx->Yi.c+12,ctr); | 1214 | PUTU32(ctx->Yi.c+12,ctr); |
| 1215 | #endif | ||
| 1216 | #else /* BIG_ENDIAN */ | ||
| 1217 | ctx->Yi.d[3] = ctr; | ||
| 1207 | #endif | 1218 | #endif |
| 1208 | else | ||
| 1209 | ctx->Yi.d[3] = ctr; | ||
| 1210 | while (len--) { | 1219 | while (len--) { |
| 1211 | u8 c = in[n]; | 1220 | u8 c = in[n]; |
| 1212 | ctx->Xi.c[n] ^= c; | 1221 | ctx->Xi.c[n] ^= c; |
| @@ -1224,14 +1233,15 @@ int CRYPTO_gcm128_decrypt(GCM128_CONTEXT *ctx, | |||
| 1224 | if (n==0) { | 1233 | if (n==0) { |
| 1225 | (*block)(ctx->Yi.c,ctx->EKi.c,key); | 1234 | (*block)(ctx->Yi.c,ctx->EKi.c,key); |
| 1226 | ++ctr; | 1235 | ++ctr; |
| 1227 | if (BYTE_ORDER == LITTLE_ENDIAN) | 1236 | #if BYTE_ORDER == LITTLE_ENDIAN |
| 1228 | #ifdef BSWAP4 | 1237 | #ifdef BSWAP4 |
| 1229 | ctx->Yi.d[3] = BSWAP4(ctr); | 1238 | ctx->Yi.d[3] = BSWAP4(ctr); |
| 1230 | #else | 1239 | #else |
| 1231 | PUTU32(ctx->Yi.c+12,ctr); | 1240 | PUTU32(ctx->Yi.c+12,ctr); |
| 1241 | #endif | ||
| 1242 | #else /* BIG_ENDIAN */ | ||
| 1243 | ctx->Yi.d[3] = ctr; | ||
| 1232 | #endif | 1244 | #endif |
| 1233 | else | ||
| 1234 | ctx->Yi.d[3] = ctr; | ||
| 1235 | } | 1245 | } |
| 1236 | c = in[i]; | 1246 | c = in[i]; |
| 1237 | out[i] = c^ctx->EKi.c[n]; | 1247 | out[i] = c^ctx->EKi.c[n]; |
| @@ -1272,14 +1282,15 @@ int CRYPTO_gcm128_encrypt_ctr32(GCM128_CONTEXT *ctx, | |||
| 1272 | ctx->ares = 0; | 1282 | ctx->ares = 0; |
| 1273 | } | 1283 | } |
| 1274 | 1284 | ||
| 1275 | if (BYTE_ORDER == LITTLE_ENDIAN) | 1285 | #if BYTE_ORDER == LITTLE_ENDIAN |
| 1276 | #ifdef BSWAP4 | 1286 | #ifdef BSWAP4 |
| 1277 | ctr = BSWAP4(ctx->Yi.d[3]); | 1287 | ctr = BSWAP4(ctx->Yi.d[3]); |
| 1278 | #else | 1288 | #else |
| 1279 | ctr = GETU32(ctx->Yi.c+12); | 1289 | ctr = GETU32(ctx->Yi.c+12); |
| 1290 | #endif | ||
| 1291 | #else /* BIG_ENDIAN */ | ||
| 1292 | ctr = ctx->Yi.d[3]; | ||
| 1280 | #endif | 1293 | #endif |
| 1281 | else | ||
| 1282 | ctr = ctx->Yi.d[3]; | ||
| 1283 | 1294 | ||
| 1284 | n = ctx->mres; | 1295 | n = ctx->mres; |
| 1285 | if (n) { | 1296 | if (n) { |
| @@ -1298,14 +1309,15 @@ int CRYPTO_gcm128_encrypt_ctr32(GCM128_CONTEXT *ctx, | |||
| 1298 | while (len>=GHASH_CHUNK) { | 1309 | while (len>=GHASH_CHUNK) { |
| 1299 | (*stream)(in,out,GHASH_CHUNK/16,key,ctx->Yi.c); | 1310 | (*stream)(in,out,GHASH_CHUNK/16,key,ctx->Yi.c); |
| 1300 | ctr += GHASH_CHUNK/16; | 1311 | ctr += GHASH_CHUNK/16; |
| 1301 | if (BYTE_ORDER == LITTLE_ENDIAN) | 1312 | #if BYTE_ORDER == LITTLE_ENDIAN |
| 1302 | #ifdef BSWAP4 | 1313 | #ifdef BSWAP4 |
| 1303 | ctx->Yi.d[3] = BSWAP4(ctr); | 1314 | ctx->Yi.d[3] = BSWAP4(ctr); |
| 1304 | #else | 1315 | #else |
| 1305 | PUTU32(ctx->Yi.c+12,ctr); | 1316 | PUTU32(ctx->Yi.c+12,ctr); |
| 1317 | #endif | ||
| 1318 | #else /* BIG_ENDIAN */ | ||
| 1319 | ctx->Yi.d[3] = ctr; | ||
| 1306 | #endif | 1320 | #endif |
| 1307 | else | ||
| 1308 | ctx->Yi.d[3] = ctr; | ||
| 1309 | GHASH(ctx,out,GHASH_CHUNK); | 1321 | GHASH(ctx,out,GHASH_CHUNK); |
| 1310 | out += GHASH_CHUNK; | 1322 | out += GHASH_CHUNK; |
| 1311 | in += GHASH_CHUNK; | 1323 | in += GHASH_CHUNK; |
| @@ -1317,14 +1329,15 @@ int CRYPTO_gcm128_encrypt_ctr32(GCM128_CONTEXT *ctx, | |||
| 1317 | 1329 | ||
| 1318 | (*stream)(in,out,j,key,ctx->Yi.c); | 1330 | (*stream)(in,out,j,key,ctx->Yi.c); |
| 1319 | ctr += (unsigned int)j; | 1331 | ctr += (unsigned int)j; |
| 1320 | if (BYTE_ORDER == LITTLE_ENDIAN) | 1332 | #if BYTE_ORDER == LITTLE_ENDIAN |
| 1321 | #ifdef BSWAP4 | 1333 | #ifdef BSWAP4 |
| 1322 | ctx->Yi.d[3] = BSWAP4(ctr); | 1334 | ctx->Yi.d[3] = BSWAP4(ctr); |
| 1323 | #else | 1335 | #else |
| 1324 | PUTU32(ctx->Yi.c+12,ctr); | 1336 | PUTU32(ctx->Yi.c+12,ctr); |
| 1337 | #endif | ||
| 1338 | #else /* BIG_ENDIAN */ | ||
| 1339 | ctx->Yi.d[3] = ctr; | ||
| 1325 | #endif | 1340 | #endif |
| 1326 | else | ||
| 1327 | ctx->Yi.d[3] = ctr; | ||
| 1328 | in += i; | 1341 | in += i; |
| 1329 | len -= i; | 1342 | len -= i; |
| 1330 | #if defined(GHASH) | 1343 | #if defined(GHASH) |
| @@ -1341,14 +1354,15 @@ int CRYPTO_gcm128_encrypt_ctr32(GCM128_CONTEXT *ctx, | |||
| 1341 | if (len) { | 1354 | if (len) { |
| 1342 | (*ctx->block)(ctx->Yi.c,ctx->EKi.c,key); | 1355 | (*ctx->block)(ctx->Yi.c,ctx->EKi.c,key); |
| 1343 | ++ctr; | 1356 | ++ctr; |
| 1344 | if (BYTE_ORDER == LITTLE_ENDIAN) | 1357 | #if BYTE_ORDER == LITTLE_ENDIAN |
| 1345 | #ifdef BSWAP4 | 1358 | #ifdef BSWAP4 |
| 1346 | ctx->Yi.d[3] = BSWAP4(ctr); | 1359 | ctx->Yi.d[3] = BSWAP4(ctr); |
| 1347 | #else | 1360 | #else |
| 1348 | PUTU32(ctx->Yi.c+12,ctr); | 1361 | PUTU32(ctx->Yi.c+12,ctr); |
| 1362 | #endif | ||
| 1363 | #else /* BIG_ENDIAN */ | ||
| 1364 | ctx->Yi.d[3] = ctr; | ||
| 1349 | #endif | 1365 | #endif |
| 1350 | else | ||
| 1351 | ctx->Yi.d[3] = ctr; | ||
| 1352 | while (len--) { | 1366 | while (len--) { |
| 1353 | ctx->Xi.c[n] ^= out[n] = in[n]^ctx->EKi.c[n]; | 1367 | ctx->Xi.c[n] ^= out[n] = in[n]^ctx->EKi.c[n]; |
| 1354 | ++n; | 1368 | ++n; |
| @@ -1386,14 +1400,15 @@ int CRYPTO_gcm128_decrypt_ctr32(GCM128_CONTEXT *ctx, | |||
| 1386 | ctx->ares = 0; | 1400 | ctx->ares = 0; |
| 1387 | } | 1401 | } |
| 1388 | 1402 | ||
| 1389 | if (BYTE_ORDER == LITTLE_ENDIAN) | 1403 | #if BYTE_ORDER == LITTLE_ENDIAN |
| 1390 | #ifdef BSWAP4 | 1404 | #ifdef BSWAP4 |
| 1391 | ctr = BSWAP4(ctx->Yi.d[3]); | 1405 | ctr = BSWAP4(ctx->Yi.d[3]); |
| 1392 | #else | 1406 | #else |
| 1393 | ctr = GETU32(ctx->Yi.c+12); | 1407 | ctr = GETU32(ctx->Yi.c+12); |
| 1408 | #endif | ||
| 1409 | #else /* BIG_ENDIAN */ | ||
| 1410 | ctr = ctx->Yi.d[3]; | ||
| 1394 | #endif | 1411 | #endif |
| 1395 | else | ||
| 1396 | ctr = ctx->Yi.d[3]; | ||
| 1397 | 1412 | ||
| 1398 | n = ctx->mres; | 1413 | n = ctx->mres; |
| 1399 | if (n) { | 1414 | if (n) { |
| @@ -1415,14 +1430,15 @@ int CRYPTO_gcm128_decrypt_ctr32(GCM128_CONTEXT *ctx, | |||
| 1415 | GHASH(ctx,in,GHASH_CHUNK); | 1430 | GHASH(ctx,in,GHASH_CHUNK); |
| 1416 | (*stream)(in,out,GHASH_CHUNK/16,key,ctx->Yi.c); | 1431 | (*stream)(in,out,GHASH_CHUNK/16,key,ctx->Yi.c); |
| 1417 | ctr += GHASH_CHUNK/16; | 1432 | ctr += GHASH_CHUNK/16; |
| 1418 | if (BYTE_ORDER == LITTLE_ENDIAN) | 1433 | #if BYTE_ORDER == LITTLE_ENDIAN |
| 1419 | #ifdef BSWAP4 | 1434 | #ifdef BSWAP4 |
| 1420 | ctx->Yi.d[3] = BSWAP4(ctr); | 1435 | ctx->Yi.d[3] = BSWAP4(ctr); |
| 1421 | #else | 1436 | #else |
| 1422 | PUTU32(ctx->Yi.c+12,ctr); | 1437 | PUTU32(ctx->Yi.c+12,ctr); |
| 1438 | #endif | ||
| 1439 | #else /* BIG_ENDIAN */ | ||
| 1440 | ctx->Yi.d[3] = ctr; | ||
| 1423 | #endif | 1441 | #endif |
| 1424 | else | ||
| 1425 | ctx->Yi.d[3] = ctr; | ||
| 1426 | out += GHASH_CHUNK; | 1442 | out += GHASH_CHUNK; |
| 1427 | in += GHASH_CHUNK; | 1443 | in += GHASH_CHUNK; |
| 1428 | len -= GHASH_CHUNK; | 1444 | len -= GHASH_CHUNK; |
| @@ -1445,14 +1461,15 @@ int CRYPTO_gcm128_decrypt_ctr32(GCM128_CONTEXT *ctx, | |||
| 1445 | #endif | 1461 | #endif |
| 1446 | (*stream)(in,out,j,key,ctx->Yi.c); | 1462 | (*stream)(in,out,j,key,ctx->Yi.c); |
| 1447 | ctr += (unsigned int)j; | 1463 | ctr += (unsigned int)j; |
| 1448 | if (BYTE_ORDER == LITTLE_ENDIAN) | 1464 | #if BYTE_ORDER == LITTLE_ENDIAN |
| 1449 | #ifdef BSWAP4 | 1465 | #ifdef BSWAP4 |
| 1450 | ctx->Yi.d[3] = BSWAP4(ctr); | 1466 | ctx->Yi.d[3] = BSWAP4(ctr); |
| 1451 | #else | 1467 | #else |
| 1452 | PUTU32(ctx->Yi.c+12,ctr); | 1468 | PUTU32(ctx->Yi.c+12,ctr); |
| 1469 | #endif | ||
| 1470 | #else /* BIG_ENDIAN */ | ||
| 1471 | ctx->Yi.d[3] = ctr; | ||
| 1453 | #endif | 1472 | #endif |
| 1454 | else | ||
| 1455 | ctx->Yi.d[3] = ctr; | ||
| 1456 | out += i; | 1473 | out += i; |
| 1457 | in += i; | 1474 | in += i; |
| 1458 | len -= i; | 1475 | len -= i; |
| @@ -1460,14 +1477,15 @@ int CRYPTO_gcm128_decrypt_ctr32(GCM128_CONTEXT *ctx, | |||
| 1460 | if (len) { | 1477 | if (len) { |
| 1461 | (*ctx->block)(ctx->Yi.c,ctx->EKi.c,key); | 1478 | (*ctx->block)(ctx->Yi.c,ctx->EKi.c,key); |
| 1462 | ++ctr; | 1479 | ++ctr; |
| 1463 | if (BYTE_ORDER == LITTLE_ENDIAN) | 1480 | #if BYTE_ORDER == LITTLE_ENDIAN |
| 1464 | #ifdef BSWAP4 | 1481 | #ifdef BSWAP4 |
| 1465 | ctx->Yi.d[3] = BSWAP4(ctr); | 1482 | ctx->Yi.d[3] = BSWAP4(ctr); |
| 1466 | #else | 1483 | #else |
| 1467 | PUTU32(ctx->Yi.c+12,ctr); | 1484 | PUTU32(ctx->Yi.c+12,ctr); |
| 1485 | #endif | ||
| 1486 | #else /* BIG_ENDIAN */ | ||
| 1487 | ctx->Yi.d[3] = ctr; | ||
| 1468 | #endif | 1488 | #endif |
| 1469 | else | ||
| 1470 | ctx->Yi.d[3] = ctr; | ||
| 1471 | while (len--) { | 1489 | while (len--) { |
| 1472 | u8 c = in[n]; | 1490 | u8 c = in[n]; |
| 1473 | ctx->Xi.c[n] ^= c; | 1491 | ctx->Xi.c[n] ^= c; |
| @@ -1492,20 +1510,20 @@ int CRYPTO_gcm128_finish(GCM128_CONTEXT *ctx,const unsigned char *tag, | |||
| 1492 | if (ctx->mres || ctx->ares) | 1510 | if (ctx->mres || ctx->ares) |
| 1493 | GCM_MUL(ctx,Xi); | 1511 | GCM_MUL(ctx,Xi); |
| 1494 | 1512 | ||
| 1495 | if (BYTE_ORDER == LITTLE_ENDIAN) { | 1513 | #if BYTE_ORDER == LITTLE_ENDIAN |
| 1496 | #ifdef BSWAP8 | 1514 | #ifdef BSWAP8 |
| 1497 | alen = BSWAP8(alen); | 1515 | alen = BSWAP8(alen); |
| 1498 | clen = BSWAP8(clen); | 1516 | clen = BSWAP8(clen); |
| 1499 | #else | 1517 | #else |
| 1500 | u8 *p = ctx->len.c; | 1518 | u8 *p = ctx->len.c; |
| 1501 | 1519 | ||
| 1502 | ctx->len.u[0] = alen; | 1520 | ctx->len.u[0] = alen; |
| 1503 | ctx->len.u[1] = clen; | 1521 | ctx->len.u[1] = clen; |
| 1504 | 1522 | ||
| 1505 | alen = (u64)GETU32(p) <<32|GETU32(p+4); | 1523 | alen = (u64)GETU32(p) <<32|GETU32(p+4); |
| 1506 | clen = (u64)GETU32(p+8)<<32|GETU32(p+12); | 1524 | clen = (u64)GETU32(p+8)<<32|GETU32(p+12); |
| 1525 | #endif | ||
| 1507 | #endif | 1526 | #endif |
| 1508 | } | ||
| 1509 | 1527 | ||
| 1510 | ctx->Xi.u[0] ^= alen; | 1528 | ctx->Xi.u[0] ^= alen; |
| 1511 | ctx->Xi.u[1] ^= clen; | 1529 | ctx->Xi.u[1] ^= clen; |
