diff options
Diffstat (limited to 'Cryptlib/OpenSSL/crypto/sha/sha512.c')
-rw-r--r-- | Cryptlib/OpenSSL/crypto/sha/sha512.c | 198 |
1 files changed, 102 insertions, 96 deletions
diff --git a/Cryptlib/OpenSSL/crypto/sha/sha512.c b/Cryptlib/OpenSSL/crypto/sha/sha512.c index d24d1034..3bf66ae1 100644 --- a/Cryptlib/OpenSSL/crypto/sha/sha512.c +++ b/Cryptlib/OpenSSL/crypto/sha/sha512.c @@ -1,19 +1,17 @@ -/* - * Copyright 2004-2016 The OpenSSL Project Authors. All Rights Reserved. - * - * Licensed under the OpenSSL license (the "License"). You may not use - * this file except in compliance with the License. You can obtain a copy - * in the file LICENSE in the source distribution or at - * https://www.openssl.org/source/license.html +/* crypto/sha/sha512.c */ +/* ==================================================================== + * Copyright (c) 2004 The OpenSSL Project. All rights reserved + * according to the OpenSSL license [found in ../../LICENSE]. + * ==================================================================== */ - #include <openssl/opensslconf.h> +#if !defined(OPENSSL_NO_SHA) && !defined(OPENSSL_NO_SHA512) /*- * IMPLEMENTATION NOTES. * * As you might have noticed 32-bit hash algorithms: * - * - permit SHA_LONG to be wider than 32-bit + * - permit SHA_LONG to be wider than 32-bit (case on CRAY); * - optimized versions implement two transform functions: one operating * on [aligned] data in host byte order and one - on data in input * stream byte order; @@ -43,24 +41,26 @@ * 16-bit platforms. * <appro@fy.chalmers.se> */ -#include <stdlib.h> -#include <string.h> +# include <stdlib.h> +# include <string.h> -#include <openssl/crypto.h> -#include <openssl/sha.h> -#include <openssl/opensslv.h> +# include <openssl/crypto.h> +# include <openssl/sha.h> +# include <openssl/opensslv.h> -#include "internal/cryptlib.h" +# include "cryptlib.h" -#if defined(__i386) || defined(__i386__) || defined(_M_IX86) || \ +const char SHA512_version[] = "SHA-512" OPENSSL_VERSION_PTEXT; + +# if defined(__i386) || defined(__i386__) || defined(_M_IX86) || \ defined(__x86_64) || defined(_M_AMD64) || defined(_M_X64) || \ defined(__s390__) || defined(__s390x__) || \ defined(__aarch64__) || \ defined(SHA512_ASM) -# define SHA512_BLOCK_CAN_MANAGE_UNALIGNED_DATA -#endif +# define SHA512_BLOCK_CAN_MANAGE_UNALIGNED_DATA +# endif -int SHA384_Init(SHA512_CTX *c) +fips_md_init_ctx(SHA384, SHA512) { c->h[0] = U64(0xcbbb9d5dc1059ed8); c->h[1] = U64(0x629a292a367cd507); @@ -78,7 +78,7 @@ int SHA384_Init(SHA512_CTX *c) return 1; } -int SHA512_Init(SHA512_CTX *c) +fips_md_init(SHA512) { c->h[0] = U64(0x6a09e667f3bcc908); c->h[1] = U64(0xbb67ae8584caa73b); @@ -96,9 +96,9 @@ int SHA512_Init(SHA512_CTX *c) return 1; } -#ifndef SHA512_ASM +# ifndef SHA512_ASM static -#endif +# endif void sha512_block_data_order(SHA512_CTX *ctx, const void *in, size_t num); int SHA512_Final(unsigned char *md, SHA512_CTX *c) @@ -108,17 +108,15 @@ int SHA512_Final(unsigned char *md, SHA512_CTX *c) p[n] = 0x80; /* There always is a room for one */ n++; - if (n > (sizeof(c->u) - 16)) { - memset(p + n, 0, sizeof(c->u) - n); - n = 0; - sha512_block_data_order(c, p, 1); - } + if (n > (sizeof(c->u) - 16)) + memset(p + n, 0, sizeof(c->u) - n), n = 0, + sha512_block_data_order(c, p, 1); memset(p + n, 0, sizeof(c->u) - 16 - n); -#ifdef B_ENDIAN +# ifdef B_ENDIAN c->u.d[SHA_LBLOCK - 2] = c->Nh; c->u.d[SHA_LBLOCK - 1] = c->Nl; -#else +# else p[sizeof(c->u) - 1] = (unsigned char)(c->Nl); p[sizeof(c->u) - 2] = (unsigned char)(c->Nl >> 8); p[sizeof(c->u) - 3] = (unsigned char)(c->Nl >> 16); @@ -135,7 +133,7 @@ int SHA512_Final(unsigned char *md, SHA512_CTX *c) p[sizeof(c->u) - 14] = (unsigned char)(c->Nh >> 40); p[sizeof(c->u) - 15] = (unsigned char)(c->Nh >> 48); p[sizeof(c->u) - 16] = (unsigned char)(c->Nh >> 56); -#endif +# endif sha512_block_data_order(c, p, 1); @@ -215,14 +213,14 @@ int SHA512_Update(SHA512_CTX *c, const void *_data, size_t len) } if (len >= sizeof(c->u)) { -#ifndef SHA512_BLOCK_CAN_MANAGE_UNALIGNED_DATA +# ifndef SHA512_BLOCK_CAN_MANAGE_UNALIGNED_DATA if ((size_t)data % sizeof(c->u.d[0]) != 0) while (len >= sizeof(c->u)) memcpy(p, data, sizeof(c->u)), sha512_block_data_order(c, p, 1), len -= sizeof(c->u), data += sizeof(c->u); else -#endif +# endif sha512_block_data_order(c, data, len / sizeof(c->u)), data += len, len %= sizeof(c->u), data -= len; } @@ -240,10 +238,10 @@ int SHA384_Update(SHA512_CTX *c, const void *data, size_t len) void SHA512_Transform(SHA512_CTX *c, const unsigned char *data) { -#ifndef SHA512_BLOCK_CAN_MANAGE_UNALIGNED_DATA +# ifndef SHA512_BLOCK_CAN_MANAGE_UNALIGNED_DATA if ((size_t)data % sizeof(c->u.d[0]) != 0) memcpy(c->u.p, data, sizeof(c->u.p)), data = c->u.p; -#endif +# endif sha512_block_data_order(c, data, 1); } @@ -275,7 +273,7 @@ unsigned char *SHA512(const unsigned char *d, size_t n, unsigned char *md) return (md); } -#ifndef SHA512_ASM +# ifndef SHA512_ASM static const SHA_LONG64 K512[80] = { U64(0x428a2f98d728ae22), U64(0x7137449123ef65cd), U64(0xb5c0fbcfec4d3b2f), U64(0xe9b5dba58189dbbc), @@ -319,23 +317,23 @@ static const SHA_LONG64 K512[80] = { U64(0x5fcb6fab3ad6faec), U64(0x6c44198c4a475817) }; -# ifndef PEDANTIC -# if defined(__GNUC__) && __GNUC__>=2 && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM) -# if defined(__x86_64) || defined(__x86_64__) -# define ROTR(a,n) ({ SHA_LONG64 ret; \ +# ifndef PEDANTIC +# if defined(__GNUC__) && __GNUC__>=2 && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM) +# if defined(__x86_64) || defined(__x86_64__) +# define ROTR(a,n) ({ SHA_LONG64 ret; \ asm ("rorq %1,%0" \ : "=r"(ret) \ : "J"(n),"0"(a) \ : "cc"); ret; }) -# if !defined(B_ENDIAN) -# define PULL64(x) ({ SHA_LONG64 ret=*((const SHA_LONG64 *)(&(x))); \ +# if !defined(B_ENDIAN) +# define PULL64(x) ({ SHA_LONG64 ret=*((const SHA_LONG64 *)(&(x))); \ asm ("bswapq %0" \ : "=r"(ret) \ : "0"(ret)); ret; }) -# endif -# elif (defined(__i386) || defined(__i386__)) && !defined(B_ENDIAN) -# if defined(I386_ONLY) -# define PULL64(x) ({ const unsigned int *p=(const unsigned int *)(&(x));\ +# endif +# elif (defined(__i386) || defined(__i386__)) && !defined(B_ENDIAN) +# if defined(I386_ONLY) +# define PULL64(x) ({ const unsigned int *p=(const unsigned int *)(&(x));\ unsigned int hi=p[0],lo=p[1]; \ asm("xchgb %%ah,%%al;xchgb %%dh,%%dl;"\ "roll $16,%%eax; roll $16,%%edx; "\ @@ -343,39 +341,39 @@ static const SHA_LONG64 K512[80] = { : "=a"(lo),"=d"(hi) \ : "0"(lo),"1"(hi) : "cc"); \ ((SHA_LONG64)hi)<<32|lo; }) -# else -# define PULL64(x) ({ const unsigned int *p=(const unsigned int *)(&(x));\ +# else +# define PULL64(x) ({ const unsigned int *p=(const unsigned int *)(&(x));\ unsigned int hi=p[0],lo=p[1]; \ asm ("bswapl %0; bswapl %1;" \ : "=r"(lo),"=r"(hi) \ : "0"(lo),"1"(hi)); \ ((SHA_LONG64)hi)<<32|lo; }) -# endif -# elif (defined(_ARCH_PPC) && defined(__64BIT__)) || defined(_ARCH_PPC64) -# define ROTR(a,n) ({ SHA_LONG64 ret; \ +# endif +# elif (defined(_ARCH_PPC) && defined(__64BIT__)) || defined(_ARCH_PPC64) +# define ROTR(a,n) ({ SHA_LONG64 ret; \ asm ("rotrdi %0,%1,%2" \ : "=r"(ret) \ : "r"(a),"K"(n)); ret; }) -# elif defined(__aarch64__) -# define ROTR(a,n) ({ SHA_LONG64 ret; \ +# elif defined(__aarch64__) +# define ROTR(a,n) ({ SHA_LONG64 ret; \ asm ("ror %0,%1,%2" \ : "=r"(ret) \ : "r"(a),"I"(n)); ret; }) -# if defined(__BYTE_ORDER__) && defined(__ORDER_LITTLE_ENDIAN__) && \ +# if defined(__BYTE_ORDER__) && defined(__ORDER_LITTLE_ENDIAN__) && \ __BYTE_ORDER__==__ORDER_LITTLE_ENDIAN__ -# define PULL64(x) ({ SHA_LONG64 ret; \ +# define PULL64(x) ({ SHA_LONG64 ret; \ asm ("rev %0,%1" \ : "=r"(ret) \ : "r"(*((const SHA_LONG64 *)(&(x))))); ret; }) +# endif # endif -# endif -# elif defined(_MSC_VER) -# if defined(_WIN64) /* applies to both IA-64 and AMD64 */ -# pragma intrinsic(_rotr64) -# define ROTR(a,n) _rotr64((a),n) -# endif -# if defined(_M_IX86) && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM) -# if defined(I386_ONLY) +# elif defined(_MSC_VER) +# if defined(_WIN64) /* applies to both IA-64 and AMD64 */ +# pragma intrinsic(_rotr64) +# define ROTR(a,n) _rotr64((a),n) +# endif +# if defined(_M_IX86) && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM) +# if defined(I386_ONLY) static SHA_LONG64 __fastcall __pull64be(const void *x) { _asm mov edx,[ecx + 0] @@ -383,34 +381,34 @@ static SHA_LONG64 __fastcall __pull64be(const void *x) _asm xchg dh, dl _asm xchg ah, al _asm rol edx, 16 _asm rol eax, 16 _asm xchg dh, dl _asm xchg ah, al} -# else +# else static SHA_LONG64 __fastcall __pull64be(const void *x) { _asm mov edx,[ecx + 0] _asm mov eax,[ecx + 4] _asm bswap edx _asm bswap eax} -# endif -# define PULL64(x) __pull64be(&(x)) -# if _MSC_VER<=1200 -# pragma inline_depth(0) +# endif +# define PULL64(x) __pull64be(&(x)) +# if _MSC_VER<=1200 +# pragma inline_depth(0) +# endif # endif # endif # endif -# endif -# ifndef PULL64 -# define B(x,j) (((SHA_LONG64)(*(((const unsigned char *)(&x))+j)))<<((7-j)*8)) -# define PULL64(x) (B(x,0)|B(x,1)|B(x,2)|B(x,3)|B(x,4)|B(x,5)|B(x,6)|B(x,7)) -# endif -# ifndef ROTR -# define ROTR(x,s) (((x)>>s) | (x)<<(64-s)) -# endif -# define Sigma0(x) (ROTR((x),28) ^ ROTR((x),34) ^ ROTR((x),39)) -# define Sigma1(x) (ROTR((x),14) ^ ROTR((x),18) ^ ROTR((x),41)) -# define sigma0(x) (ROTR((x),1) ^ ROTR((x),8) ^ ((x)>>7)) -# define sigma1(x) (ROTR((x),19) ^ ROTR((x),61) ^ ((x)>>6)) -# define Ch(x,y,z) (((x) & (y)) ^ ((~(x)) & (z))) -# define Maj(x,y,z) (((x) & (y)) ^ ((x) & (z)) ^ ((y) & (z))) -# if defined(__i386) || defined(__i386__) || defined(_M_IX86) +# ifndef PULL64 +# define B(x,j) (((SHA_LONG64)(*(((const unsigned char *)(&x))+j)))<<((7-j)*8)) +# define PULL64(x) (B(x,0)|B(x,1)|B(x,2)|B(x,3)|B(x,4)|B(x,5)|B(x,6)|B(x,7)) +# endif +# ifndef ROTR +# define ROTR(x,s) (((x)>>s) | (x)<<(64-s)) +# endif +# define Sigma0(x) (ROTR((x),28) ^ ROTR((x),34) ^ ROTR((x),39)) +# define Sigma1(x) (ROTR((x),14) ^ ROTR((x),18) ^ ROTR((x),41)) +# define sigma0(x) (ROTR((x),1) ^ ROTR((x),8) ^ ((x)>>7)) +# define sigma1(x) (ROTR((x),19) ^ ROTR((x),61) ^ ((x)>>6)) +# define Ch(x,y,z) (((x) & (y)) ^ ((~(x)) & (z))) +# define Maj(x,y,z) (((x) & (y)) ^ ((x) & (z)) ^ ((y) & (z))) +# if defined(__i386) || defined(__i386__) || defined(_M_IX86) /* * This code should give better results on 32-bit CPU with less than * ~24 registers, both size and performance wise... @@ -435,11 +433,11 @@ _asm bswap edx _asm bswap eax} F[7] = ctx->h[7]; for (i = 0; i < 16; i++, F--) { -# ifdef B_ENDIAN +# ifdef B_ENDIAN T = W[i]; -# else +# else T = PULL64(W[i]); -# endif +# endif F[0] = A; F[4] = E; F[8] = T; @@ -474,7 +472,7 @@ _asm bswap edx _asm bswap eax} } } -# elif defined(OPENSSL_SMALL_FOOTPRINT) +# elif defined(OPENSSL_SMALL_FOOTPRINT) static void sha512_block_data_order(SHA512_CTX *ctx, const void *in, size_t num) { @@ -495,11 +493,11 @@ static void sha512_block_data_order(SHA512_CTX *ctx, const void *in, h = ctx->h[7]; for (i = 0; i < 16; i++) { -# ifdef B_ENDIAN +# ifdef B_ENDIAN T1 = X[i] = W[i]; -# else +# else T1 = X[i] = PULL64(W[i]); -# endif +# endif T1 += h + Sigma1(e) + Ch(e, f, g) + K512[i]; T2 = Sigma0(a) + Maj(a, b, c); h = g; @@ -544,12 +542,12 @@ static void sha512_block_data_order(SHA512_CTX *ctx, const void *in, } } -# else -# define ROUND_00_15(i,a,b,c,d,e,f,g,h) do { \ +# else +# define ROUND_00_15(i,a,b,c,d,e,f,g,h) do { \ T1 += h + Sigma1(e) + Ch(e,f,g) + K512[i]; \ h = Sigma0(a) + Maj(a,b,c); \ d += T1; h += T1; } while (0) -# define ROUND_16_80(i,j,a,b,c,d,e,f,g,h,X) do { \ +# define ROUND_16_80(i,j,a,b,c,d,e,f,g,h,X) do { \ s0 = X[(j+1)&0x0f]; s0 = sigma0(s0); \ s1 = X[(j+14)&0x0f]; s1 = sigma1(s1); \ T1 = X[(j)&0x0f] += s0 + s1 + X[(j+9)&0x0f]; \ @@ -573,7 +571,7 @@ static void sha512_block_data_order(SHA512_CTX *ctx, const void *in, g = ctx->h[6]; h = ctx->h[7]; -# ifdef B_ENDIAN +# ifdef B_ENDIAN T1 = X[0] = W[0]; ROUND_00_15(0, a, b, c, d, e, f, g, h); T1 = X[1] = W[1]; @@ -606,7 +604,7 @@ static void sha512_block_data_order(SHA512_CTX *ctx, const void *in, ROUND_00_15(14, c, d, e, f, g, h, a, b); T1 = X[15] = W[15]; ROUND_00_15(15, b, c, d, e, f, g, h, a); -# else +# else T1 = X[0] = PULL64(W[0]); ROUND_00_15(0, a, b, c, d, e, f, g, h); T1 = X[1] = PULL64(W[1]); @@ -639,7 +637,7 @@ static void sha512_block_data_order(SHA512_CTX *ctx, const void *in, ROUND_00_15(14, c, d, e, f, g, h, a, b); T1 = X[15] = PULL64(W[15]); ROUND_00_15(15, b, c, d, e, f, g, h, a); -# endif +# endif for (i = 16; i < 80; i += 16) { ROUND_16_80(i, 0, a, b, c, d, e, f, g, h, X); @@ -673,6 +671,14 @@ static void sha512_block_data_order(SHA512_CTX *ctx, const void *in, } } +# endif + +# endif /* SHA512_ASM */ + +#else /* !OPENSSL_NO_SHA512 */ + +# if defined(PEDANTIC) || defined(__DECC) || defined(OPENSSL_SYS_MACOSX) +static void *dummy = &dummy; # endif -#endif /* SHA512_ASM */ +#endif /* !OPENSSL_NO_SHA512 */ |