/***************************************** * sha256 compression function * * * * H points to chaining input * * in points to the message input * * * *****************************************/ void sha256_comp (hashblock res, const hashblock hash, const void *in) { uint32_t a,b,c,d,e,f,g,h,s0,s1,T1,T2; uint32_t H[8]; uint32_t X[16],l; int i; // CHANGE type casting added due to c++ const unsigned char *data=static_cast<const unsigned char*>(in); for (i = 0; i < SHA256_DIGEST_LENGTH/4; i++) { HOST_c2l(hash, H[i]); } a = H[0]; b = H[1]; c = H[2]; d = H[3]; e = H[4]; f = H[5]; g = H[6]; h = H[7]; for (i=0;i<16;i++) { HOST_c2l(data,l); T1 = X[i] = l; T1 += h + Sigma1(e) + Ch(e,f,g) + K256[i]; T2 = Sigma0(a) + Maj(a,b,c); h = g; g = f; f = e; e = d + T1; d = c; c = b; b = a; a = T1 + T2; } for (;i<64;i++) { s0 = X[(i+1)&0x0f]; s0 = sigma0(s0); s1 = X[(i+14)&0x0f]; s1 = sigma1(s1); T1 = X[i&0xf] += s0 + s1 + X[(i+9)&0xf]; T1 += h + Sigma1(e) + Ch(e,f,g) + K256[i]; T2 = Sigma0(a) + Maj(a,b,c); h = g; g = f; f = e; e = d + T1; d = c; c = b; b = a; a = T1 + T2; } H[0] += a; H[1] += b; H[2] += c; H[3] += d; H[4] += e; H[5] += f; H[6] += g; H[7] += h; for (i = 0; i < SHA256_DIGEST_LENGTH/4; i++) { HOST_l2c(H[i], res); } }
static void sha256_block (SHA256_CTX *ctx, const void *in, size_t num, int host) { unsigned MD32_REG_T a,b,c,d,e,f,g,h,s0,s1,T1,T2; SHA_LONG X[16]; int i; const unsigned char *data=in; while (num--) { a = ctx->h[0]; b = ctx->h[1]; c = ctx->h[2]; d = ctx->h[3]; e = ctx->h[4]; f = ctx->h[5]; g = ctx->h[6]; h = ctx->h[7]; if (host) { const SHA_LONG *W=(const SHA_LONG *)data; for (i=0;i<16;i++) { T1 = X[i] = W[i]; T1 += h + Sigma1(e) + Ch(e,f,g) + K256[i]; T2 = Sigma0(a) + Maj(a,b,c); h = g; g = f; f = e; e = d + T1; d = c; c = b; b = a; a = T1 + T2; } data += SHA256_CBLOCK; } else { SHA_LONG l; for (i=0;i<16;i++) { HOST_c2l(data,l); T1 = X[i] = l; T1 += h + Sigma1(e) + Ch(e,f,g) + K256[i]; T2 = Sigma0(a) + Maj(a,b,c); h = g; g = f; f = e; e = d + T1; d = c; c = b; b = a; a = T1 + T2; } } for (;i<64;i++) { s0 = X[(i+1)&0x0f]; s0 = sigma0(s0); s1 = X[(i+14)&0x0f]; s1 = sigma1(s1); T1 = X[i&0xf] += s0 + s1 + X[(i+9)&0xf]; T1 += h + Sigma1(e) + Ch(e,f,g) + K256[i]; T2 = Sigma0(a) + Maj(a,b,c); h = g; g = f; f = e; e = d + T1; d = c; c = b; b = a; a = T1 + T2; } ctx->h[0] += a; ctx->h[1] += b; ctx->h[2] += c; ctx->h[3] += d; ctx->h[4] += e; ctx->h[5] += f; ctx->h[6] += g; ctx->h[7] += h; } }
void SHA1Provider::hash_block_data_order (const unsigned char* data, int num) { CRYPTO_U32 A, B, C, D, E, T, l; int i; CRYPTO_U32 X[16]; A = m_sha_ctx.h[0]; B = m_sha_ctx.h[1]; C = m_sha_ctx.h[2]; D = m_sha_ctx.h[3]; E = m_sha_ctx.h[4]; while (true) { for (i = 0; i < 16; i++) { HOST_c2l(data,l); X[i]=l; BODY_00_15(X[i]); } for (i = 0; i < 4; i++) { BODY_16_19(X[i], X[i+2], X[i+8], X[(i+13)&15]); } for (; i < 24; i++) { BODY_20_39(X[i&15], X[(i+2)&15], X[(i+8)&15], X[(i+13)&15]); } for (i = 0; i < 20; i++) { BODY_40_59(X[(i+8)&15], X[(i+10)&15], X[i&15], X[(i+5)&15]); } for (i = 4; i < 24; i++) { BODY_60_79(X[(i+8)&15], X[(i+10)&15], X[i&15], X[(i+5)&15]); } m_sha_ctx.h[0] = (m_sha_ctx.h[0] + A) & 0xffffffffL; m_sha_ctx.h[1] = (m_sha_ctx.h[1] + B) & 0xffffffffL; m_sha_ctx.h[2] = (m_sha_ctx.h[2] + C) & 0xffffffffL; m_sha_ctx.h[3] = (m_sha_ctx.h[3] + D) & 0xffffffffL; m_sha_ctx.h[4] = (m_sha_ctx.h[4] + E) & 0xffffffffL; if (--num == 0) break; A = m_sha_ctx.h[0]; B = m_sha_ctx.h[1]; C = m_sha_ctx.h[2]; D = m_sha_ctx.h[3]; E = m_sha_ctx.h[4]; } }
void sha256_block_data_order (SHA256_CTX *ctx, const void *in) { unsigned MD32_REG_T a,b,c,d,e,f,g,h,s0,s1,T1,T2,t; SHA_LONG X[16],l,Ki; int i; const unsigned char *data=in; a = ctx->h[0]; b = ctx->h[1]; c = ctx->h[2]; d = ctx->h[3]; e = ctx->h[4]; f = ctx->h[5]; g = ctx->h[6]; h = ctx->h[7]; for (i=0;i<16;i++) { HOST_c2l(data,l); X[i] = l; Ki=K256[i]; T1 = l + h + Sigma1(e) + Ch(e,f,g) + Ki; T2 = Sigma0(a) + Maj(a,b,c); h = g; g = f; f = e; e = d + T1; d = c; c = b; b = a; a = T1 + T2; } for (;i<64;i++) { s0 = X[(i+1)&0x0f]; s0 = sigma0(s0); s1 = X[(i+14)&0x0f]; s1 = sigma1(s1); T1 = X[i&0xf]; t = X[(i+9)&0xf]; T1 += s0 + s1 + t; X[i&0xf] = T1; Ki=K256[i]; T1 += h + Sigma1(e) + Ch(e,f,g) + Ki; T2 = Sigma0(a) + Maj(a,b,c); h = g; g = f; f = e; e = d + T1; d = c; c = b; b = a; a = T1 + T2; } t=ctx->h[0]; ctx->h[0]=t+a; t=ctx->h[1]; ctx->h[1]=t+b; t=ctx->h[2]; ctx->h[2]=t+c; t=ctx->h[3]; ctx->h[3]=t+d; t=ctx->h[4]; ctx->h[4]=t+e; t=ctx->h[5]; ctx->h[5]=t+f; t=ctx->h[6]; ctx->h[6]=t+g; t=ctx->h[7]; ctx->h[7]=t+h; return; }
void md4_block_data_order (MD4_CTX *c, const void *data_, size_t num) { const unsigned char *data=data_; register unsigned MD32_REG_T A,B,C,D,l; #ifndef MD32_XARRAY /* See comment in crypto/sha/sha_locl.h for details. */ unsigned MD32_REG_T XX0, XX1, XX2, XX3, XX4, XX5, XX6, XX7, XX8, XX9,XX10,XX11,XX12,XX13,XX14,XX15; # define X(i) XX##i #else MD4_LONG XX[MD4_LBLOCK]; # define X(i) XX[i] #endif A=c->A; B=c->B; C=c->C; D=c->D; for (; num--;) { HOST_c2l(data,l); X( 0)=l; HOST_c2l(data,l); X( 1)=l; /* Round 0 */ R0(A,B,C,D,X( 0), 3,0); HOST_c2l(data,l); X( 2)=l; R0(D,A,B,C,X( 1), 7,0); HOST_c2l(data,l); X( 3)=l; R0(C,D,A,B,X( 2),11,0); HOST_c2l(data,l); X( 4)=l; R0(B,C,D,A,X( 3),19,0); HOST_c2l(data,l); X( 5)=l; R0(A,B,C,D,X( 4), 3,0); HOST_c2l(data,l); X( 6)=l; R0(D,A,B,C,X( 5), 7,0); HOST_c2l(data,l); X( 7)=l; R0(C,D,A,B,X( 6),11,0); HOST_c2l(data,l); X( 8)=l; R0(B,C,D,A,X( 7),19,0); HOST_c2l(data,l); X( 9)=l; R0(A,B,C,D,X( 8), 3,0); HOST_c2l(data,l); X(10)=l; R0(D,A,B,C,X( 9), 7,0); HOST_c2l(data,l); X(11)=l; R0(C,D,A,B,X(10),11,0); HOST_c2l(data,l); X(12)=l; R0(B,C,D,A,X(11),19,0); HOST_c2l(data,l); X(13)=l; R0(A,B,C,D,X(12), 3,0); HOST_c2l(data,l); X(14)=l; R0(D,A,B,C,X(13), 7,0); HOST_c2l(data,l); X(15)=l; R0(C,D,A,B,X(14),11,0); R0(B,C,D,A,X(15),19,0); /* Round 1 */ R1(A,B,C,D,X( 0), 3,0x5A827999L); R1(D,A,B,C,X( 4), 5,0x5A827999L); R1(C,D,A,B,X( 8), 9,0x5A827999L); R1(B,C,D,A,X(12),13,0x5A827999L); R1(A,B,C,D,X( 1), 3,0x5A827999L); R1(D,A,B,C,X( 5), 5,0x5A827999L); R1(C,D,A,B,X( 9), 9,0x5A827999L); R1(B,C,D,A,X(13),13,0x5A827999L); R1(A,B,C,D,X( 2), 3,0x5A827999L); R1(D,A,B,C,X( 6), 5,0x5A827999L); R1(C,D,A,B,X(10), 9,0x5A827999L); R1(B,C,D,A,X(14),13,0x5A827999L); R1(A,B,C,D,X( 3), 3,0x5A827999L); R1(D,A,B,C,X( 7), 5,0x5A827999L); R1(C,D,A,B,X(11), 9,0x5A827999L); R1(B,C,D,A,X(15),13,0x5A827999L); /* Round 2 */ R2(A,B,C,D,X( 0), 3,0x6ED9EBA1L); R2(D,A,B,C,X( 8), 9,0x6ED9EBA1L); R2(C,D,A,B,X( 4),11,0x6ED9EBA1L); R2(B,C,D,A,X(12),15,0x6ED9EBA1L); R2(A,B,C,D,X( 2), 3,0x6ED9EBA1L); R2(D,A,B,C,X(10), 9,0x6ED9EBA1L); R2(C,D,A,B,X( 6),11,0x6ED9EBA1L); R2(B,C,D,A,X(14),15,0x6ED9EBA1L); R2(A,B,C,D,X( 1), 3,0x6ED9EBA1L); R2(D,A,B,C,X( 9), 9,0x6ED9EBA1L); R2(C,D,A,B,X( 5),11,0x6ED9EBA1L); R2(B,C,D,A,X(13),15,0x6ED9EBA1L); R2(A,B,C,D,X( 3), 3,0x6ED9EBA1L); R2(D,A,B,C,X(11), 9,0x6ED9EBA1L); R2(C,D,A,B,X( 7),11,0x6ED9EBA1L); R2(B,C,D,A,X(15),15,0x6ED9EBA1L); A = c->A += A; B = c->B += B; C = c->C += C; D = c->D += D; } }
static void sha256_block_data_order(SHA256_CTX *ctx, const void *in, size_t num) { unsigned MD32_REG_T a, b, c, d, e, f, g, h, s0, s1, T1; SHA_LONG X[16]; int i; const unsigned char *data = in; const union { long one; char little; } is_endian = { 1 }; while (num--) { a = ctx->h[0]; b = ctx->h[1]; c = ctx->h[2]; d = ctx->h[3]; e = ctx->h[4]; f = ctx->h[5]; g = ctx->h[6]; h = ctx->h[7]; if (!is_endian.little && sizeof(SHA_LONG) == 4 && ((size_t)in % 4) == 0) { const SHA_LONG *W = (const SHA_LONG *)data; T1 = X[0] = W[0]; ROUND_00_15(0, a, b, c, d, e, f, g, h); T1 = X[1] = W[1]; ROUND_00_15(1, h, a, b, c, d, e, f, g); T1 = X[2] = W[2]; ROUND_00_15(2, g, h, a, b, c, d, e, f); T1 = X[3] = W[3]; ROUND_00_15(3, f, g, h, a, b, c, d, e); T1 = X[4] = W[4]; ROUND_00_15(4, e, f, g, h, a, b, c, d); T1 = X[5] = W[5]; ROUND_00_15(5, d, e, f, g, h, a, b, c); T1 = X[6] = W[6]; ROUND_00_15(6, c, d, e, f, g, h, a, b); T1 = X[7] = W[7]; ROUND_00_15(7, b, c, d, e, f, g, h, a); T1 = X[8] = W[8]; ROUND_00_15(8, a, b, c, d, e, f, g, h); T1 = X[9] = W[9]; ROUND_00_15(9, h, a, b, c, d, e, f, g); T1 = X[10] = W[10]; ROUND_00_15(10, g, h, a, b, c, d, e, f); T1 = X[11] = W[11]; ROUND_00_15(11, f, g, h, a, b, c, d, e); T1 = X[12] = W[12]; ROUND_00_15(12, e, f, g, h, a, b, c, d); T1 = X[13] = W[13]; ROUND_00_15(13, d, e, f, g, h, a, b, c); T1 = X[14] = W[14]; ROUND_00_15(14, c, d, e, f, g, h, a, b); T1 = X[15] = W[15]; ROUND_00_15(15, b, c, d, e, f, g, h, a); data += SHA256_CBLOCK; } else { SHA_LONG l; HOST_c2l(data, l); T1 = X[0] = l; ROUND_00_15(0, a, b, c, d, e, f, g, h); HOST_c2l(data, l); T1 = X[1] = l; ROUND_00_15(1, h, a, b, c, d, e, f, g); HOST_c2l(data, l); T1 = X[2] = l; ROUND_00_15(2, g, h, a, b, c, d, e, f); HOST_c2l(data, l); T1 = X[3] = l; ROUND_00_15(3, f, g, h, a, b, c, d, e); HOST_c2l(data, l); T1 = X[4] = l; ROUND_00_15(4, e, f, g, h, a, b, c, d); HOST_c2l(data, l); T1 = X[5] = l; ROUND_00_15(5, d, e, f, g, h, a, b, c); HOST_c2l(data, l); T1 = X[6] = l; ROUND_00_15(6, c, d, e, f, g, h, a, b); HOST_c2l(data, l); T1 = X[7] = l; ROUND_00_15(7, b, c, d, e, f, g, h, a); HOST_c2l(data, l); T1 = X[8] = l; ROUND_00_15(8, a, b, c, d, e, f, g, h); HOST_c2l(data, l); T1 = X[9] = l; ROUND_00_15(9, h, a, b, c, d, e, f, g); HOST_c2l(data, l); T1 = X[10] = l; ROUND_00_15(10, g, h, a, b, c, d, e, f); HOST_c2l(data, l); T1 = X[11] = l; ROUND_00_15(11, f, g, h, a, b, c, d, e); HOST_c2l(data, l); T1 = X[12] = l; ROUND_00_15(12, e, f, g, h, a, b, c, d); HOST_c2l(data, l); T1 = X[13] = l; ROUND_00_15(13, d, e, f, g, h, a, b, c); HOST_c2l(data, l); T1 = X[14] = l; ROUND_00_15(14, c, d, e, f, g, h, a, b); HOST_c2l(data, l); T1 = X[15] = l; ROUND_00_15(15, b, c, d, e, f, g, h, a); } for (i = 16; i < 64; i += 8) { ROUND_16_63(i + 0, a, b, c, d, e, f, g, h, X); ROUND_16_63(i + 1, h, a, b, c, d, e, f, g, X); ROUND_16_63(i + 2, g, h, a, b, c, d, e, f, X); ROUND_16_63(i + 3, f, g, h, a, b, c, d, e, X); ROUND_16_63(i + 4, e, f, g, h, a, b, c, d, X); ROUND_16_63(i + 5, d, e, f, g, h, a, b, c, X); ROUND_16_63(i + 6, c, d, e, f, g, h, a, b, X); ROUND_16_63(i + 7, b, c, d, e, f, g, h, a, X); } ctx->h[0] += a; ctx->h[1] += b; ctx->h[2] += c; ctx->h[3] += d; ctx->h[4] += e; ctx->h[5] += f; ctx->h[6] += g; ctx->h[7] += h; } }
void md5_block_data_order (MD5_CTX *c, const void *data_, size_t num) { const unsigned char *data=data_; register unsigned MD32_REG_T A,B,C,D,l; #ifndef MD32_XARRAY /* See comment in crypto/sha/sha_locl.h for details. */ unsigned MD32_REG_T XX0, XX1, XX2, XX3, XX4, XX5, XX6, XX7, XX8, XX9,XX10,XX11,XX12,XX13,XX14,XX15; # define X(i) XX##i #else MD5_LONG XX[MD5_LBLOCK]; # define X(i) XX[i] #endif A=c->A; B=c->B; C=c->C; D=c->D; for (;num--;) { HOST_c2l(data,l); X( 0)=l; HOST_c2l(data,l); X( 1)=l; /* Round 0 */ R0(A,B,C,D,X( 0), 7,0xd76aa478L); HOST_c2l(data,l); X( 2)=l; R0(D,A,B,C,X( 1),12,0xe8c7b756L); HOST_c2l(data,l); X( 3)=l; R0(C,D,A,B,X( 2),17,0x242070dbL); HOST_c2l(data,l); X( 4)=l; R0(B,C,D,A,X( 3),22,0xc1bdceeeL); HOST_c2l(data,l); X( 5)=l; R0(A,B,C,D,X( 4), 7,0xf57c0fafL); HOST_c2l(data,l); X( 6)=l; R0(D,A,B,C,X( 5),12,0x4787c62aL); HOST_c2l(data,l); X( 7)=l; R0(C,D,A,B,X( 6),17,0xa8304613L); HOST_c2l(data,l); X( 8)=l; R0(B,C,D,A,X( 7),22,0xfd469501L); HOST_c2l(data,l); X( 9)=l; R0(A,B,C,D,X( 8), 7,0x698098d8L); HOST_c2l(data,l); X(10)=l; R0(D,A,B,C,X( 9),12,0x8b44f7afL); HOST_c2l(data,l); X(11)=l; R0(C,D,A,B,X(10),17,0xffff5bb1L); HOST_c2l(data,l); X(12)=l; R0(B,C,D,A,X(11),22,0x895cd7beL); HOST_c2l(data,l); X(13)=l; R0(A,B,C,D,X(12), 7,0x6b901122L); HOST_c2l(data,l); X(14)=l; R0(D,A,B,C,X(13),12,0xfd987193L); HOST_c2l(data,l); X(15)=l; R0(C,D,A,B,X(14),17,0xa679438eL); R0(B,C,D,A,X(15),22,0x49b40821L); /* Round 1 */ R1(A,B,C,D,X( 1), 5,0xf61e2562L); R1(D,A,B,C,X( 6), 9,0xc040b340L); R1(C,D,A,B,X(11),14,0x265e5a51L); R1(B,C,D,A,X( 0),20,0xe9b6c7aaL); R1(A,B,C,D,X( 5), 5,0xd62f105dL); R1(D,A,B,C,X(10), 9,0x02441453L); R1(C,D,A,B,X(15),14,0xd8a1e681L); R1(B,C,D,A,X( 4),20,0xe7d3fbc8L); R1(A,B,C,D,X( 9), 5,0x21e1cde6L); R1(D,A,B,C,X(14), 9,0xc33707d6L); R1(C,D,A,B,X( 3),14,0xf4d50d87L); R1(B,C,D,A,X( 8),20,0x455a14edL); R1(A,B,C,D,X(13), 5,0xa9e3e905L); R1(D,A,B,C,X( 2), 9,0xfcefa3f8L); R1(C,D,A,B,X( 7),14,0x676f02d9L); R1(B,C,D,A,X(12),20,0x8d2a4c8aL); /* Round 2 */ R2(A,B,C,D,X( 5), 4,0xfffa3942L); R2(D,A,B,C,X( 8),11,0x8771f681L); R2(C,D,A,B,X(11),16,0x6d9d6122L); R2(B,C,D,A,X(14),23,0xfde5380cL); R2(A,B,C,D,X( 1), 4,0xa4beea44L); R2(D,A,B,C,X( 4),11,0x4bdecfa9L); R2(C,D,A,B,X( 7),16,0xf6bb4b60L); R2(B,C,D,A,X(10),23,0xbebfbc70L); R2(A,B,C,D,X(13), 4,0x289b7ec6L); R2(D,A,B,C,X( 0),11,0xeaa127faL); R2(C,D,A,B,X( 3),16,0xd4ef3085L); R2(B,C,D,A,X( 6),23,0x04881d05L); R2(A,B,C,D,X( 9), 4,0xd9d4d039L); R2(D,A,B,C,X(12),11,0xe6db99e5L); R2(C,D,A,B,X(15),16,0x1fa27cf8L); R2(B,C,D,A,X( 2),23,0xc4ac5665L); /* Round 3 */ R3(A,B,C,D,X( 0), 6,0xf4292244L); R3(D,A,B,C,X( 7),10,0x432aff97L); R3(C,D,A,B,X(14),15,0xab9423a7L); R3(B,C,D,A,X( 5),21,0xfc93a039L); R3(A,B,C,D,X(12), 6,0x655b59c3L); R3(D,A,B,C,X( 3),10,0x8f0ccc92L); R3(C,D,A,B,X(10),15,0xffeff47dL); R3(B,C,D,A,X( 1),21,0x85845dd1L); R3(A,B,C,D,X( 8), 6,0x6fa87e4fL); R3(D,A,B,C,X(15),10,0xfe2ce6e0L); R3(C,D,A,B,X( 6),15,0xa3014314L); R3(B,C,D,A,X(13),21,0x4e0811a1L); R3(A,B,C,D,X( 4), 6,0xf7537e82L); R3(D,A,B,C,X(11),10,0xbd3af235L); R3(C,D,A,B,X( 2),15,0x2ad7d2bbL); R3(B,C,D,A,X( 9),21,0xeb86d391L); A = c->A += A; B = c->B += B; C = c->C += C; D = c->D += D; } }
void md4_block_data_order(uint32_t *state, const uint8_t *data, size_t num) { uint32_t A, B, C, D, l; uint32_t X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15; A = state[0]; B = state[1]; C = state[2]; D = state[3]; for (; num--;) { HOST_c2l(data, l); X0 = l; HOST_c2l(data, l); X1 = l; /* Round 0 */ R0(A, B, C, D, X0, 3, 0); HOST_c2l(data, l); X2 = l; R0(D, A, B, C, X1, 7, 0); HOST_c2l(data, l); X3 = l; R0(C, D, A, B, X2, 11, 0); HOST_c2l(data, l); X4 = l; R0(B, C, D, A, X3, 19, 0); HOST_c2l(data, l); X5 = l; R0(A, B, C, D, X4, 3, 0); HOST_c2l(data, l); X6 = l; R0(D, A, B, C, X5, 7, 0); HOST_c2l(data, l); X7 = l; R0(C, D, A, B, X6, 11, 0); HOST_c2l(data, l); X8 = l; R0(B, C, D, A, X7, 19, 0); HOST_c2l(data, l); X9 = l; R0(A, B, C, D, X8, 3, 0); HOST_c2l(data, l); X10 = l; R0(D, A, B, C, X9, 7, 0); HOST_c2l(data, l); X11 = l; R0(C, D, A, B, X10, 11, 0); HOST_c2l(data, l); X12 = l; R0(B, C, D, A, X11, 19, 0); HOST_c2l(data, l); X13 = l; R0(A, B, C, D, X12, 3, 0); HOST_c2l(data, l); X14 = l; R0(D, A, B, C, X13, 7, 0); HOST_c2l(data, l); X15 = l; R0(C, D, A, B, X14, 11, 0); R0(B, C, D, A, X15, 19, 0); /* Round 1 */ R1(A, B, C, D, X0, 3, 0x5A827999L); R1(D, A, B, C, X4, 5, 0x5A827999L); R1(C, D, A, B, X8, 9, 0x5A827999L); R1(B, C, D, A, X12, 13, 0x5A827999L); R1(A, B, C, D, X1, 3, 0x5A827999L); R1(D, A, B, C, X5, 5, 0x5A827999L); R1(C, D, A, B, X9, 9, 0x5A827999L); R1(B, C, D, A, X13, 13, 0x5A827999L); R1(A, B, C, D, X2, 3, 0x5A827999L); R1(D, A, B, C, X6, 5, 0x5A827999L); R1(C, D, A, B, X10, 9, 0x5A827999L); R1(B, C, D, A, X14, 13, 0x5A827999L); R1(A, B, C, D, X3, 3, 0x5A827999L); R1(D, A, B, C, X7, 5, 0x5A827999L); R1(C, D, A, B, X11, 9, 0x5A827999L); R1(B, C, D, A, X15, 13, 0x5A827999L); /* Round 2 */ R2(A, B, C, D, X0, 3, 0x6ED9EBA1L); R2(D, A, B, C, X8, 9, 0x6ED9EBA1L); R2(C, D, A, B, X4, 11, 0x6ED9EBA1L); R2(B, C, D, A, X12, 15, 0x6ED9EBA1L); R2(A, B, C, D, X2, 3, 0x6ED9EBA1L); R2(D, A, B, C, X10, 9, 0x6ED9EBA1L); R2(C, D, A, B, X6, 11, 0x6ED9EBA1L); R2(B, C, D, A, X14, 15, 0x6ED9EBA1L); R2(A, B, C, D, X1, 3, 0x6ED9EBA1L); R2(D, A, B, C, X9, 9, 0x6ED9EBA1L); R2(C, D, A, B, X5, 11, 0x6ED9EBA1L); R2(B, C, D, A, X13, 15, 0x6ED9EBA1L); R2(A, B, C, D, X3, 3, 0x6ED9EBA1L); R2(D, A, B, C, X11, 9, 0x6ED9EBA1L); R2(C, D, A, B, X7, 11, 0x6ED9EBA1L); R2(B, C, D, A, X15, 15, 0x6ED9EBA1L); A = state[0] += A; B = state[1] += B; C = state[2] += C; D = state[3] += D; } }
void md4_block_data_order(MD4_CTX *c, const void *data_, size_t num) { const uint8_t *data = data_; uint32_t A, B, C, D, l; uint32_t X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15; A = c->A; B = c->B; C = c->C; D = c->D; for (; num--;) { HOST_c2l(data, l); X0 = l; HOST_c2l(data, l); X1 = l; /* Round 0 */ R0(A, B, C, D, X0, 3, 0); HOST_c2l(data, l); X2 = l; R0(D, A, B, C, X1, 7, 0); HOST_c2l(data, l); X3 = l; R0(C, D, A, B, X2, 11, 0); HOST_c2l(data, l); X4 = l; R0(B, C, D, A, X3, 19, 0); HOST_c2l(data, l); X5 = l; R0(A, B, C, D, X4, 3, 0); HOST_c2l(data, l); X6 = l; R0(D, A, B, C, X5, 7, 0); HOST_c2l(data, l); X7 = l; R0(C, D, A, B, X6, 11, 0); HOST_c2l(data, l); X8 = l; R0(B, C, D, A, X7, 19, 0); HOST_c2l(data, l); X9 = l; R0(A, B, C, D, X8, 3, 0); HOST_c2l(data, l); X10 = l; R0(D, A, B, C, X9, 7, 0); HOST_c2l(data, l); X11 = l; R0(C, D, A, B, X10, 11, 0); HOST_c2l(data, l); X12 = l; R0(B, C, D, A, X11, 19, 0); HOST_c2l(data, l); X13 = l; R0(A, B, C, D, X12, 3, 0); HOST_c2l(data, l); X14 = l; R0(D, A, B, C, X13, 7, 0); HOST_c2l(data, l); X15 = l; R0(C, D, A, B, X14, 11, 0); R0(B, C, D, A, X15, 19, 0); /* Round 1 */ R1(A, B, C, D, X0, 3, 0x5A827999L); R1(D, A, B, C, X4, 5, 0x5A827999L); R1(C, D, A, B, X8, 9, 0x5A827999L); R1(B, C, D, A, X12, 13, 0x5A827999L); R1(A, B, C, D, X1, 3, 0x5A827999L); R1(D, A, B, C, X5, 5, 0x5A827999L); R1(C, D, A, B, X9, 9, 0x5A827999L); R1(B, C, D, A, X13, 13, 0x5A827999L); R1(A, B, C, D, X2, 3, 0x5A827999L); R1(D, A, B, C, X6, 5, 0x5A827999L); R1(C, D, A, B, X10, 9, 0x5A827999L); R1(B, C, D, A, X14, 13, 0x5A827999L); R1(A, B, C, D, X3, 3, 0x5A827999L); R1(D, A, B, C, X7, 5, 0x5A827999L); R1(C, D, A, B, X11, 9, 0x5A827999L); R1(B, C, D, A, X15, 13, 0x5A827999L); /* Round 2 */ R2(A, B, C, D, X0, 3, 0x6ED9EBA1L); R2(D, A, B, C, X8, 9, 0x6ED9EBA1L); R2(C, D, A, B, X4, 11, 0x6ED9EBA1L); R2(B, C, D, A, X12, 15, 0x6ED9EBA1L); R2(A, B, C, D, X2, 3, 0x6ED9EBA1L); R2(D, A, B, C, X10, 9, 0x6ED9EBA1L); R2(C, D, A, B, X6, 11, 0x6ED9EBA1L); R2(B, C, D, A, X14, 15, 0x6ED9EBA1L); R2(A, B, C, D, X1, 3, 0x6ED9EBA1L); R2(D, A, B, C, X9, 9, 0x6ED9EBA1L); R2(C, D, A, B, X5, 11, 0x6ED9EBA1L); R2(B, C, D, A, X13, 15, 0x6ED9EBA1L); R2(A, B, C, D, X3, 3, 0x6ED9EBA1L); R2(D, A, B, C, X11, 9, 0x6ED9EBA1L); R2(C, D, A, B, X7, 11, 0x6ED9EBA1L); R2(B, C, D, A, X15, 15, 0x6ED9EBA1L); A = c->A += A; B = c->B += B; C = c->C += C; D = c->D += D; } }
void md5_block_data_order (MD5_CTX *c, const void *data_, int num) { const unsigned char *data=data_; register unsigned long A,B,C,D,l; /* * In case you wonder why A-D are declared as long and not * as MD5_LONG. Doing so results in slight performance * boost on LP64 architectures. The catch is we don't * really care if 32 MSBs of a 64-bit register get polluted * with eventual overflows as we *save* only 32 LSBs in * *either* case. Now declaring 'em long excuses the compiler * from keeping 32 MSBs zeroed resulting in 13% performance * improvement under SPARC Solaris7/64 and 5% under AlphaLinux. * Well, to be honest it should say that this *prevents* * performance degradation. * * <*****@*****.**> */ #ifndef MD32_XARRAY /* See comment in crypto/sha/sha_locl.h for details. */ unsigned long XX0, XX1, XX2, XX3, XX4, XX5, XX6, XX7, XX8, XX9,XX10,XX11,XX12,XX13,XX14,XX15; # define X(i) XX##i #else MD5_LONG XX[MD5_LBLOCK]; # define X(i) XX[i] #endif A=c->A; B=c->B; C=c->C; D=c->D; for (;num--;) { HOST_c2l(data,l); X( 0)=l; HOST_c2l(data,l); X( 1)=l; /* Round 0 */ R0(A,B,C,D,X( 0), 7,0xd76aa478L); HOST_c2l(data,l); X( 2)=l; R0(D,A,B,C,X( 1),12,0xe8c7b756L); HOST_c2l(data,l); X( 3)=l; R0(C,D,A,B,X( 2),17,0x242070dbL); HOST_c2l(data,l); X( 4)=l; R0(B,C,D,A,X( 3),22,0xc1bdceeeL); HOST_c2l(data,l); X( 5)=l; R0(A,B,C,D,X( 4), 7,0xf57c0fafL); HOST_c2l(data,l); X( 6)=l; R0(D,A,B,C,X( 5),12,0x4787c62aL); HOST_c2l(data,l); X( 7)=l; R0(C,D,A,B,X( 6),17,0xa8304613L); HOST_c2l(data,l); X( 8)=l; R0(B,C,D,A,X( 7),22,0xfd469501L); HOST_c2l(data,l); X( 9)=l; R0(A,B,C,D,X( 8), 7,0x698098d8L); HOST_c2l(data,l); X(10)=l; R0(D,A,B,C,X( 9),12,0x8b44f7afL); HOST_c2l(data,l); X(11)=l; R0(C,D,A,B,X(10),17,0xffff5bb1L); HOST_c2l(data,l); X(12)=l; R0(B,C,D,A,X(11),22,0x895cd7beL); HOST_c2l(data,l); X(13)=l; R0(A,B,C,D,X(12), 7,0x6b901122L); HOST_c2l(data,l); X(14)=l; R0(D,A,B,C,X(13),12,0xfd987193L); HOST_c2l(data,l); X(15)=l; R0(C,D,A,B,X(14),17,0xa679438eL); R0(B,C,D,A,X(15),22,0x49b40821L); /* Round 1 */ R1(A,B,C,D,X( 1), 5,0xf61e2562L); R1(D,A,B,C,X( 6), 9,0xc040b340L); R1(C,D,A,B,X(11),14,0x265e5a51L); R1(B,C,D,A,X( 0),20,0xe9b6c7aaL); R1(A,B,C,D,X( 5), 5,0xd62f105dL); R1(D,A,B,C,X(10), 9,0x02441453L); R1(C,D,A,B,X(15),14,0xd8a1e681L); R1(B,C,D,A,X( 4),20,0xe7d3fbc8L); R1(A,B,C,D,X( 9), 5,0x21e1cde6L); R1(D,A,B,C,X(14), 9,0xc33707d6L); R1(C,D,A,B,X( 3),14,0xf4d50d87L); R1(B,C,D,A,X( 8),20,0x455a14edL); R1(A,B,C,D,X(13), 5,0xa9e3e905L); R1(D,A,B,C,X( 2), 9,0xfcefa3f8L); R1(C,D,A,B,X( 7),14,0x676f02d9L); R1(B,C,D,A,X(12),20,0x8d2a4c8aL); /* Round 2 */ R2(A,B,C,D,X( 5), 4,0xfffa3942L); R2(D,A,B,C,X( 8),11,0x8771f681L); R2(C,D,A,B,X(11),16,0x6d9d6122L); R2(B,C,D,A,X(14),23,0xfde5380cL); R2(A,B,C,D,X( 1), 4,0xa4beea44L); R2(D,A,B,C,X( 4),11,0x4bdecfa9L); R2(C,D,A,B,X( 7),16,0xf6bb4b60L); R2(B,C,D,A,X(10),23,0xbebfbc70L); R2(A,B,C,D,X(13), 4,0x289b7ec6L); R2(D,A,B,C,X( 0),11,0xeaa127faL); R2(C,D,A,B,X( 3),16,0xd4ef3085L); R2(B,C,D,A,X( 6),23,0x04881d05L); R2(A,B,C,D,X( 9), 4,0xd9d4d039L); R2(D,A,B,C,X(12),11,0xe6db99e5L); R2(C,D,A,B,X(15),16,0x1fa27cf8L); R2(B,C,D,A,X( 2),23,0xc4ac5665L); /* Round 3 */ R3(A,B,C,D,X( 0), 6,0xf4292244L); R3(D,A,B,C,X( 7),10,0x432aff97L); R3(C,D,A,B,X(14),15,0xab9423a7L); R3(B,C,D,A,X( 5),21,0xfc93a039L); R3(A,B,C,D,X(12), 6,0x655b59c3L); R3(D,A,B,C,X( 3),10,0x8f0ccc92L); R3(C,D,A,B,X(10),15,0xffeff47dL); R3(B,C,D,A,X( 1),21,0x85845dd1L); R3(A,B,C,D,X( 8), 6,0x6fa87e4fL); R3(D,A,B,C,X(15),10,0xfe2ce6e0L); R3(C,D,A,B,X( 6),15,0xa3014314L); R3(B,C,D,A,X(13),21,0x4e0811a1L); R3(A,B,C,D,X( 4), 6,0xf7537e82L); R3(D,A,B,C,X(11),10,0xbd3af235L); R3(C,D,A,B,X( 2),15,0x2ad7d2bbL); R3(B,C,D,A,X( 9),21,0xeb86d391L); A = c->A += A; B = c->B += B; C = c->C += C; D = c->D += D; } }
static void sha256_block_data_order(SHA256_CTX *ctx, const void *in, size_t num) { unsigned MD32_REG_T a, b, c, d, e, f, g, h, s0, s1, T1; SHA_LONG X[16]; int i; const unsigned char *data = in; const union { long one; char little; } is_endian = { 1 }; while (num--) { a = ctx->h[0]; b = ctx->h[1]; c = ctx->h[2]; d = ctx->h[3]; e = ctx->h[4]; f = ctx->h[5]; g = ctx->h[6]; h = ctx->h[7]; if (!is_endian.little && sizeof(SHA_LONG) == 4 && ((size_t)in % 4) == 0) { const SHA_LONG *W = (const SHA_LONG *)data; T1 = X[0] = W[0]; ROUND_00_15(0, a, b, c, d, e, f, g, h); T1 = X[1] = W[1]; ROUND_00_15(1, h, a, b, c, d, e, f, g); T1 = X[2] = W[2]; ROUND_00_15(2, g, h, a, b, c, d, e, f); T1 = X[3] = W[3]; ROUND_00_15(3, f, g, h, a, b, c, d, e); T1 = X[4] = W[4]; ROUND_00_15(4, e, f, g, h, a, b, c, d); T1 = X[5] = W[5]; ROUND_00_15(5, d, e, f, g, h, a, b, c); T1 = X[6] = W[6]; ROUND_00_15(6, c, d, e, f, g, h, a, b); T1 = X[7] = W[7]; ROUND_00_15(7, b, c, d, e, f, g, h, a); T1 = X[8] = W[8]; ROUND_00_15(8, a, b, c, d, e, f, g, h); T1 = X[9] = W[9]; ROUND_00_15(9, h, a, b, c, d, e, f, g); T1 = X[10] = W[10]; ROUND_00_15(10, g, h, a, b, c, d, e, f); T1 = X[11] = W[11]; ROUND_00_15(11, f, g, h, a, b, c, d, e); T1 = X[12] = W[12]; ROUND_00_15(12, e, f, g, h, a, b, c, d); T1 = X[13] = W[13]; ROUND_00_15(13, d, e, f, g, h, a, b, c); T1 = X[14] = W[14]; ROUND_00_15(14, c, d, e, f, g, h, a, b); T1 = X[15] = W[15]; ROUND_00_15(15, b, c, d, e, f, g, h, a); data += SHA256_CBLOCK; } else { SHA_LONG l; E(DBF_ALWAYS, "LITTLE ENDIAN BRANCH!!!: is_endian.little = %ld, in=%08lx, (in %% 4) = %ld, sizeof(SHA_LONG) = %ld", is_endian.little, in, ((size_t)in % 4), sizeof(SHA_LONG)); E(DBF_ALWAYS, "a=%08lx b=%08lx c=%08lx d=%08lx e=%08lx f=%08lx g=%08lx h=%08lx", a, b, c, d, e, f, g, h); #if defined(DEBUG) for(i=0; i < 16; i++) { const unsigned char *d = data + (i*4); D(DBF_ALWAYS, "%ld: %02lx %02lx %02lx %02lx", i, d[0], d[1], d[2], d[3]); } #endif (void)HOST_c2l(data, l); T1 = X[0] = l; ROUND_00_15(0, a, b, c, d, e, f, g, h); (void)HOST_c2l(data, l); T1 = X[1] = l; ROUND_00_15(1, h, a, b, c, d, e, f, g); (void)HOST_c2l(data, l); T1 = X[2] = l; ROUND_00_15(2, g, h, a, b, c, d, e, f); (void)HOST_c2l(data, l); T1 = X[3] = l; ROUND_00_15(3, f, g, h, a, b, c, d, e); (void)HOST_c2l(data, l); T1 = X[4] = l; ROUND_00_15(4, e, f, g, h, a, b, c, d); (void)HOST_c2l(data, l); T1 = X[5] = l; ROUND_00_15(5, d, e, f, g, h, a, b, c); (void)HOST_c2l(data, l); T1 = X[6] = l; ROUND_00_15(6, c, d, e, f, g, h, a, b); (void)HOST_c2l(data, l); T1 = X[7] = l; ROUND_00_15(7, b, c, d, e, f, g, h, a); (void)HOST_c2l(data, l); T1 = X[8] = l; ROUND_00_15(8, a, b, c, d, e, f, g, h); (void)HOST_c2l(data, l); T1 = X[9] = l; ROUND_00_15(9, h, a, b, c, d, e, f, g); (void)HOST_c2l(data, l); T1 = X[10] = l; ROUND_00_15(10, g, h, a, b, c, d, e, f); (void)HOST_c2l(data, l); T1 = X[11] = l; ROUND_00_15(11, f, g, h, a, b, c, d, e); (void)HOST_c2l(data, l); T1 = X[12] = l; ROUND_00_15(12, e, f, g, h, a, b, c, d); (void)HOST_c2l(data, l); T1 = X[13] = l; ROUND_00_15(13, d, e, f, g, h, a, b, c); (void)HOST_c2l(data, l); T1 = X[14] = l; ROUND_00_15(14, c, d, e, f, g, h, a, b); (void)HOST_c2l(data, l); T1 = X[15] = l; ROUND_00_15(15, b, c, d, e, f, g, h, a); E(DBF_ALWAYS, "a=%08lx b=%08lx c=%08lx d=%08lx e=%08lx f=%08lx g=%08lx h=%08lx", a, b, c, d, e, f, g, h); #if defined(DEBUG) for(i=0; i < 16; i++) D(DBF_ALWAYS, "X[%ld] = %08lx", i, X[i]); #endif } for (i = 16; i < 64; i += 8) { ROUND_16_63(i + 0, a, b, c, d, e, f, g, h, X); ROUND_16_63(i + 1, h, a, b, c, d, e, f, g, X); ROUND_16_63(i + 2, g, h, a, b, c, d, e, f, X); ROUND_16_63(i + 3, f, g, h, a, b, c, d, e, X); ROUND_16_63(i + 4, e, f, g, h, a, b, c, d, X); ROUND_16_63(i + 5, d, e, f, g, h, a, b, c, X); ROUND_16_63(i + 6, c, d, e, f, g, h, a, b, X); ROUND_16_63(i + 7, b, c, d, e, f, g, h, a, X); } E(DBF_ALWAYS, "FINAL: a=%08lx b=%08lx c=%08lx d=%08lx e=%08lx f=%08lx g=%08lx h=%08lx", a, b, c, d, e, f, g, h); ctx->h[0] += a; ctx->h[1] += b; ctx->h[2] += c; ctx->h[3] += d; ctx->h[4] += e; ctx->h[5] += f; ctx->h[6] += g; ctx->h[7] += h; } }
static void sha256_block_data_order(uint32_t *state, const uint8_t *data, size_t num) { uint32_t a, b, c, d, e, f, g, h, s0, s1, T1; uint32_t X[16]; int i; while (num--) { a = state[0]; b = state[1]; c = state[2]; d = state[3]; e = state[4]; f = state[5]; g = state[6]; h = state[7]; uint32_t l; HOST_c2l(data, l); T1 = X[0] = l; ROUND_00_15(0, a, b, c, d, e, f, g, h); HOST_c2l(data, l); T1 = X[1] = l; ROUND_00_15(1, h, a, b, c, d, e, f, g); HOST_c2l(data, l); T1 = X[2] = l; ROUND_00_15(2, g, h, a, b, c, d, e, f); HOST_c2l(data, l); T1 = X[3] = l; ROUND_00_15(3, f, g, h, a, b, c, d, e); HOST_c2l(data, l); T1 = X[4] = l; ROUND_00_15(4, e, f, g, h, a, b, c, d); HOST_c2l(data, l); T1 = X[5] = l; ROUND_00_15(5, d, e, f, g, h, a, b, c); HOST_c2l(data, l); T1 = X[6] = l; ROUND_00_15(6, c, d, e, f, g, h, a, b); HOST_c2l(data, l); T1 = X[7] = l; ROUND_00_15(7, b, c, d, e, f, g, h, a); HOST_c2l(data, l); T1 = X[8] = l; ROUND_00_15(8, a, b, c, d, e, f, g, h); HOST_c2l(data, l); T1 = X[9] = l; ROUND_00_15(9, h, a, b, c, d, e, f, g); HOST_c2l(data, l); T1 = X[10] = l; ROUND_00_15(10, g, h, a, b, c, d, e, f); HOST_c2l(data, l); T1 = X[11] = l; ROUND_00_15(11, f, g, h, a, b, c, d, e); HOST_c2l(data, l); T1 = X[12] = l; ROUND_00_15(12, e, f, g, h, a, b, c, d); HOST_c2l(data, l); T1 = X[13] = l; ROUND_00_15(13, d, e, f, g, h, a, b, c); HOST_c2l(data, l); T1 = X[14] = l; ROUND_00_15(14, c, d, e, f, g, h, a, b); HOST_c2l(data, l); T1 = X[15] = l; ROUND_00_15(15, b, c, d, e, f, g, h, a); for (i = 16; i < 64; i += 8) { ROUND_16_63(i + 0, a, b, c, d, e, f, g, h, X); ROUND_16_63(i + 1, h, a, b, c, d, e, f, g, X); ROUND_16_63(i + 2, g, h, a, b, c, d, e, f, X); ROUND_16_63(i + 3, f, g, h, a, b, c, d, e, X); ROUND_16_63(i + 4, e, f, g, h, a, b, c, d, X); ROUND_16_63(i + 5, d, e, f, g, h, a, b, c, X); ROUND_16_63(i + 6, c, d, e, f, g, h, a, b, X); ROUND_16_63(i + 7, b, c, d, e, f, g, h, a, X); } state[0] += a; state[1] += b; state[2] += c; state[3] += d; state[4] += e; state[5] += f; state[6] += g; state[7] += h; } }
static void sha1_compress(ccdigest_state_t s, size_t num, const void *buf) { const unsigned char *data=buf; register uint32_t A,B,C,D,E,T,l; #ifndef MD32_XARRAY uint32_t XX0, XX1, XX2, XX3, XX4, XX5, XX6, XX7, XX8, XX9,XX10,XX11,XX12,XX13,XX14,XX15; #else uint32_t XX[16]; #endif uint32_t *state=ccdigest_u32(s); A=state[0]; B=state[1]; C=state[2]; D=state[3]; E=state[4]; for (;;) { HOST_c2l(data,l); X( 0)=l; HOST_c2l(data,l); X( 1)=l; BODY_00_15( 0,A,B,C,D,E,T,X( 0)); HOST_c2l(data,l); X( 2)=l; BODY_00_15( 1,T,A,B,C,D,E,X( 1)); HOST_c2l(data,l); X( 3)=l; BODY_00_15( 2,E,T,A,B,C,D,X( 2)); HOST_c2l(data,l); X( 4)=l; BODY_00_15( 3,D,E,T,A,B,C,X( 3)); HOST_c2l(data,l); X( 5)=l; BODY_00_15( 4,C,D,E,T,A,B,X( 4)); HOST_c2l(data,l); X( 6)=l; BODY_00_15( 5,B,C,D,E,T,A,X( 5)); HOST_c2l(data,l); X( 7)=l; BODY_00_15( 6,A,B,C,D,E,T,X( 6)); HOST_c2l(data,l); X( 8)=l; BODY_00_15( 7,T,A,B,C,D,E,X( 7)); HOST_c2l(data,l); X( 9)=l; BODY_00_15( 8,E,T,A,B,C,D,X( 8)); HOST_c2l(data,l); X(10)=l; BODY_00_15( 9,D,E,T,A,B,C,X( 9)); HOST_c2l(data,l); X(11)=l; BODY_00_15(10,C,D,E,T,A,B,X(10)); HOST_c2l(data,l); X(12)=l; BODY_00_15(11,B,C,D,E,T,A,X(11)); HOST_c2l(data,l); X(13)=l; BODY_00_15(12,A,B,C,D,E,T,X(12)); HOST_c2l(data,l); X(14)=l; BODY_00_15(13,T,A,B,C,D,E,X(13)); HOST_c2l(data,l); X(15)=l; BODY_00_15(14,E,T,A,B,C,D,X(14)); BODY_00_15(15,D,E,T,A,B,C,X(15)); BODY_16_19(16,C,D,E,T,A,B,X( 0),X( 0),X( 2),X( 8),X(13)); BODY_16_19(17,B,C,D,E,T,A,X( 1),X( 1),X( 3),X( 9),X(14)); BODY_16_19(18,A,B,C,D,E,T,X( 2),X( 2),X( 4),X(10),X(15)); BODY_16_19(19,T,A,B,C,D,E,X( 3),X( 3),X( 5),X(11),X( 0)); BODY_20_31(20,E,T,A,B,C,D,X( 4),X( 4),X( 6),X(12),X( 1)); BODY_20_31(21,D,E,T,A,B,C,X( 5),X( 5),X( 7),X(13),X( 2)); BODY_20_31(22,C,D,E,T,A,B,X( 6),X( 6),X( 8),X(14),X( 3)); BODY_20_31(23,B,C,D,E,T,A,X( 7),X( 7),X( 9),X(15),X( 4)); BODY_20_31(24,A,B,C,D,E,T,X( 8),X( 8),X(10),X( 0),X( 5)); BODY_20_31(25,T,A,B,C,D,E,X( 9),X( 9),X(11),X( 1),X( 6)); BODY_20_31(26,E,T,A,B,C,D,X(10),X(10),X(12),X( 2),X( 7)); BODY_20_31(27,D,E,T,A,B,C,X(11),X(11),X(13),X( 3),X( 8)); BODY_20_31(28,C,D,E,T,A,B,X(12),X(12),X(14),X( 4),X( 9)); BODY_20_31(29,B,C,D,E,T,A,X(13),X(13),X(15),X( 5),X(10)); BODY_20_31(30,A,B,C,D,E,T,X(14),X(14),X( 0),X( 6),X(11)); BODY_20_31(31,T,A,B,C,D,E,X(15),X(15),X( 1),X( 7),X(12)); BODY_32_39(32,E,T,A,B,C,D,X( 0),X( 2),X( 8),X(13)); BODY_32_39(33,D,E,T,A,B,C,X( 1),X( 3),X( 9),X(14)); BODY_32_39(34,C,D,E,T,A,B,X( 2),X( 4),X(10),X(15)); BODY_32_39(35,B,C,D,E,T,A,X( 3),X( 5),X(11),X( 0)); BODY_32_39(36,A,B,C,D,E,T,X( 4),X( 6),X(12),X( 1)); BODY_32_39(37,T,A,B,C,D,E,X( 5),X( 7),X(13),X( 2)); BODY_32_39(38,E,T,A,B,C,D,X( 6),X( 8),X(14),X( 3)); BODY_32_39(39,D,E,T,A,B,C,X( 7),X( 9),X(15),X( 4)); BODY_40_59(40,C,D,E,T,A,B,X( 8),X(10),X( 0),X( 5)); BODY_40_59(41,B,C,D,E,T,A,X( 9),X(11),X( 1),X( 6)); BODY_40_59(42,A,B,C,D,E,T,X(10),X(12),X( 2),X( 7)); BODY_40_59(43,T,A,B,C,D,E,X(11),X(13),X( 3),X( 8)); BODY_40_59(44,E,T,A,B,C,D,X(12),X(14),X( 4),X( 9)); BODY_40_59(45,D,E,T,A,B,C,X(13),X(15),X( 5),X(10)); BODY_40_59(46,C,D,E,T,A,B,X(14),X( 0),X( 6),X(11)); BODY_40_59(47,B,C,D,E,T,A,X(15),X( 1),X( 7),X(12)); BODY_40_59(48,A,B,C,D,E,T,X( 0),X( 2),X( 8),X(13)); BODY_40_59(49,T,A,B,C,D,E,X( 1),X( 3),X( 9),X(14)); BODY_40_59(50,E,T,A,B,C,D,X( 2),X( 4),X(10),X(15)); BODY_40_59(51,D,E,T,A,B,C,X( 3),X( 5),X(11),X( 0)); BODY_40_59(52,C,D,E,T,A,B,X( 4),X( 6),X(12),X( 1)); BODY_40_59(53,B,C,D,E,T,A,X( 5),X( 7),X(13),X( 2)); BODY_40_59(54,A,B,C,D,E,T,X( 6),X( 8),X(14),X( 3)); BODY_40_59(55,T,A,B,C,D,E,X( 7),X( 9),X(15),X( 4)); BODY_40_59(56,E,T,A,B,C,D,X( 8),X(10),X( 0),X( 5)); BODY_40_59(57,D,E,T,A,B,C,X( 9),X(11),X( 1),X( 6)); BODY_40_59(58,C,D,E,T,A,B,X(10),X(12),X( 2),X( 7)); BODY_40_59(59,B,C,D,E,T,A,X(11),X(13),X( 3),X( 8)); BODY_60_79(60,A,B,C,D,E,T,X(12),X(14),X( 4),X( 9)); BODY_60_79(61,T,A,B,C,D,E,X(13),X(15),X( 5),X(10)); BODY_60_79(62,E,T,A,B,C,D,X(14),X( 0),X( 6),X(11)); BODY_60_79(63,D,E,T,A,B,C,X(15),X( 1),X( 7),X(12)); BODY_60_79(64,C,D,E,T,A,B,X( 0),X( 2),X( 8),X(13)); BODY_60_79(65,B,C,D,E,T,A,X( 1),X( 3),X( 9),X(14)); BODY_60_79(66,A,B,C,D,E,T,X( 2),X( 4),X(10),X(15)); BODY_60_79(67,T,A,B,C,D,E,X( 3),X( 5),X(11),X( 0)); BODY_60_79(68,E,T,A,B,C,D,X( 4),X( 6),X(12),X( 1)); BODY_60_79(69,D,E,T,A,B,C,X( 5),X( 7),X(13),X( 2)); BODY_60_79(70,C,D,E,T,A,B,X( 6),X( 8),X(14),X( 3)); BODY_60_79(71,B,C,D,E,T,A,X( 7),X( 9),X(15),X( 4)); BODY_60_79(72,A,B,C,D,E,T,X( 8),X(10),X( 0),X( 5)); BODY_60_79(73,T,A,B,C,D,E,X( 9),X(11),X( 1),X( 6)); BODY_60_79(74,E,T,A,B,C,D,X(10),X(12),X( 2),X( 7)); BODY_60_79(75,D,E,T,A,B,C,X(11),X(13),X( 3),X( 8)); BODY_60_79(76,C,D,E,T,A,B,X(12),X(14),X( 4),X( 9)); BODY_60_79(77,B,C,D,E,T,A,X(13),X(15),X( 5),X(10)); BODY_60_79(78,A,B,C,D,E,T,X(14),X( 0),X( 6),X(11)); BODY_60_79(79,T,A,B,C,D,E,X(15),X( 1),X( 7),X(12)); state[0]=(state[0]+E)&0xffffffff; state[1]=(state[1]+T)&0xffffffff; state[2]=(state[2]+A)&0xffffffff; state[3]=(state[3]+B)&0xffffffff; state[4]=(state[4]+C)&0xffffffff; if (--num <= 0) break; A=state[0]; B=state[1]; C=state[2]; D=state[3]; E=state[4]; } }
void rtsmb_md4_block_data_order (RTSMB_MD4_CTX *c, const void *data_, int num) { const unsigned char *data; register unsigned long A,B,C,D,l; /* * In case you wonder why A-D are declared as long and not * as RTSMB_MD4_LONG. Doing so results in slight performance * boost on LP64 architectures. The catch is we don't * really care if 32 MSBs of a 64-bit register get polluted * with eventual overflows as we *save* only 32 LSBs in * *either* case. Now declaring 'em long excuses the compiler * from keeping 32 MSBs zeroed resulting in 13% performance * improvement under SPARC Solaris7/64 and 5% under AlphaLinux. * Well, to be honest it should say that this *prevents* * performance degradation. * * <*****@*****.**> */ #ifndef MD32_XARRAY /* See comment in crypto/sha/sha_locl.h for details. */ unsigned long XX0, XX1, XX2, XX3, XX4, XX5, XX6, XX7, XX8, XX9,XX10,XX11,XX12,XX13,XX14,XX15; # define X(i) XX##i #else RTSMB_MD4_LONG XX[RTSMB_MD4_LBLOCK]; # define X(i) XX[i] #endif data =data_; A=c->A; B=c->B; C=c->C; D=c->D; for (;num--;) { HOST_c2l(data,l); X( 0)=l; HOST_c2l(data,l); X( 1)=l; /* Round 0 */ R0(A,B,C,D,X( 0), 3,0); HOST_c2l(data,l); X( 2)=l; R0(D,A,B,C,X( 1), 7,0); HOST_c2l(data,l); X( 3)=l; R0(C,D,A,B,X( 2),11,0); HOST_c2l(data,l); X( 4)=l; R0(B,C,D,A,X( 3),19,0); HOST_c2l(data,l); X( 5)=l; R0(A,B,C,D,X( 4), 3,0); HOST_c2l(data,l); X( 6)=l; R0(D,A,B,C,X( 5), 7,0); HOST_c2l(data,l); X( 7)=l; R0(C,D,A,B,X( 6),11,0); HOST_c2l(data,l); X( 8)=l; R0(B,C,D,A,X( 7),19,0); HOST_c2l(data,l); X( 9)=l; R0(A,B,C,D,X( 8), 3,0); HOST_c2l(data,l); X(10)=l; R0(D,A,B,C,X( 9), 7,0); HOST_c2l(data,l); X(11)=l; R0(C,D,A,B,X(10),11,0); HOST_c2l(data,l); X(12)=l; R0(B,C,D,A,X(11),19,0); HOST_c2l(data,l); X(13)=l; R0(A,B,C,D,X(12), 3,0); HOST_c2l(data,l); X(14)=l; R0(D,A,B,C,X(13), 7,0); HOST_c2l(data,l); X(15)=l; R0(C,D,A,B,X(14),11,0); R0(B,C,D,A,X(15),19,0); /* Round 1 */ R1(A,B,C,D,X( 0), 3,0x5A827999L); R1(D,A,B,C,X( 4), 5,0x5A827999L); R1(C,D,A,B,X( 8), 9,0x5A827999L); R1(B,C,D,A,X(12),13,0x5A827999L); R1(A,B,C,D,X( 1), 3,0x5A827999L); R1(D,A,B,C,X( 5), 5,0x5A827999L); R1(C,D,A,B,X( 9), 9,0x5A827999L); R1(B,C,D,A,X(13),13,0x5A827999L); R1(A,B,C,D,X( 2), 3,0x5A827999L); R1(D,A,B,C,X( 6), 5,0x5A827999L); R1(C,D,A,B,X(10), 9,0x5A827999L); R1(B,C,D,A,X(14),13,0x5A827999L); R1(A,B,C,D,X( 3), 3,0x5A827999L); R1(D,A,B,C,X( 7), 5,0x5A827999L); R1(C,D,A,B,X(11), 9,0x5A827999L); R1(B,C,D,A,X(15),13,0x5A827999L); /* Round 2 */ R2(A,B,C,D,X( 0), 3,0x6ED9EBA1L); R2(D,A,B,C,X( 8), 9,0x6ED9EBA1L); R2(C,D,A,B,X( 4),11,0x6ED9EBA1L); R2(B,C,D,A,X(12),15,0x6ED9EBA1L); R2(A,B,C,D,X( 2), 3,0x6ED9EBA1L); R2(D,A,B,C,X(10), 9,0x6ED9EBA1L); R2(C,D,A,B,X( 6),11,0x6ED9EBA1L); R2(B,C,D,A,X(14),15,0x6ED9EBA1L); R2(A,B,C,D,X( 1), 3,0x6ED9EBA1L); R2(D,A,B,C,X( 9), 9,0x6ED9EBA1L); R2(C,D,A,B,X( 5),11,0x6ED9EBA1L); R2(B,C,D,A,X(13),15,0x6ED9EBA1L); R2(A,B,C,D,X( 3), 3,0x6ED9EBA1L); R2(D,A,B,C,X(11), 9,0x6ED9EBA1L); R2(C,D,A,B,X( 7),11,0x6ED9EBA1L); R2(B,C,D,A,X(15),15,0x6ED9EBA1L); A = c->A += A; B = c->B += B; C = c->C += C; D = c->D += D; } }
static void HASH_BLOCK_DATA_ORDER(SHA_CTX *c, const void *p, size_t num) { const uint8_t *data = p; register unsigned MD32_REG_T A, B, C, D, E, T, l; unsigned MD32_REG_T XX0, XX1, XX2, XX3, XX4, XX5, XX6, XX7, XX8, XX9, XX10, XX11, XX12, XX13, XX14, XX15; A = c->h0; B = c->h1; C = c->h2; D = c->h3; E = c->h4; for (;;) { const union { long one; char little; } is_endian = {1}; if (!is_endian.little && ((size_t)p % 4) == 0) { const uint32_t *W = (const uint32_t *)data; X(0) = W[0]; X(1) = W[1]; BODY_00_15(0, A, B, C, D, E, T, X(0)); X(2) = W[2]; BODY_00_15(1, T, A, B, C, D, E, X(1)); X(3) = W[3]; BODY_00_15(2, E, T, A, B, C, D, X(2)); X(4) = W[4]; BODY_00_15(3, D, E, T, A, B, C, X(3)); X(5) = W[5]; BODY_00_15(4, C, D, E, T, A, B, X(4)); X(6) = W[6]; BODY_00_15(5, B, C, D, E, T, A, X(5)); X(7) = W[7]; BODY_00_15(6, A, B, C, D, E, T, X(6)); X(8) = W[8]; BODY_00_15(7, T, A, B, C, D, E, X(7)); X(9) = W[9]; BODY_00_15(8, E, T, A, B, C, D, X(8)); X(10) = W[10]; BODY_00_15(9, D, E, T, A, B, C, X(9)); X(11) = W[11]; BODY_00_15(10, C, D, E, T, A, B, X(10)); X(12) = W[12]; BODY_00_15(11, B, C, D, E, T, A, X(11)); X(13) = W[13]; BODY_00_15(12, A, B, C, D, E, T, X(12)); X(14) = W[14]; BODY_00_15(13, T, A, B, C, D, E, X(13)); X(15) = W[15]; BODY_00_15(14, E, T, A, B, C, D, X(14)); BODY_00_15(15, D, E, T, A, B, C, X(15)); data += HASH_CBLOCK; } else { (void)HOST_c2l(data, l); X(0) = l; (void)HOST_c2l(data, l); X(1) = l; BODY_00_15(0, A, B, C, D, E, T, X(0)); (void)HOST_c2l(data, l); X(2) = l; BODY_00_15(1, T, A, B, C, D, E, X(1)); (void)HOST_c2l(data, l); X(3) = l; BODY_00_15(2, E, T, A, B, C, D, X(2)); (void)HOST_c2l(data, l); X(4) = l; BODY_00_15(3, D, E, T, A, B, C, X(3)); (void)HOST_c2l(data, l); X(5) = l; BODY_00_15(4, C, D, E, T, A, B, X(4)); (void)HOST_c2l(data, l); X(6) = l; BODY_00_15(5, B, C, D, E, T, A, X(5)); (void)HOST_c2l(data, l); X(7) = l; BODY_00_15(6, A, B, C, D, E, T, X(6)); (void)HOST_c2l(data, l); X(8) = l; BODY_00_15(7, T, A, B, C, D, E, X(7)); (void)HOST_c2l(data, l); X(9) = l; BODY_00_15(8, E, T, A, B, C, D, X(8)); (void)HOST_c2l(data, l); X(10) = l; BODY_00_15(9, D, E, T, A, B, C, X(9)); (void)HOST_c2l(data, l); X(11) = l; BODY_00_15(10, C, D, E, T, A, B, X(10)); (void)HOST_c2l(data, l); X(12) = l; BODY_00_15(11, B, C, D, E, T, A, X(11)); (void)HOST_c2l(data, l); X(13) = l; BODY_00_15(12, A, B, C, D, E, T, X(12)); (void)HOST_c2l(data, l); X(14) = l; BODY_00_15(13, T, A, B, C, D, E, X(13)); (void)HOST_c2l(data, l); X(15) = l; BODY_00_15(14, E, T, A, B, C, D, X(14)); BODY_00_15(15, D, E, T, A, B, C, X(15)); } BODY_16_19(16, C, D, E, T, A, B, X(0), X(0), X(2), X(8), X(13)); BODY_16_19(17, B, C, D, E, T, A, X(1), X(1), X(3), X(9), X(14)); BODY_16_19(18, A, B, C, D, E, T, X(2), X(2), X(4), X(10), X(15)); BODY_16_19(19, T, A, B, C, D, E, X(3), X(3), X(5), X(11), X(0)); BODY_20_31(20, E, T, A, B, C, D, X(4), X(4), X(6), X(12), X(1)); BODY_20_31(21, D, E, T, A, B, C, X(5), X(5), X(7), X(13), X(2)); BODY_20_31(22, C, D, E, T, A, B, X(6), X(6), X(8), X(14), X(3)); BODY_20_31(23, B, C, D, E, T, A, X(7), X(7), X(9), X(15), X(4)); BODY_20_31(24, A, B, C, D, E, T, X(8), X(8), X(10), X(0), X(5)); BODY_20_31(25, T, A, B, C, D, E, X(9), X(9), X(11), X(1), X(6)); BODY_20_31(26, E, T, A, B, C, D, X(10), X(10), X(12), X(2), X(7)); BODY_20_31(27, D, E, T, A, B, C, X(11), X(11), X(13), X(3), X(8)); BODY_20_31(28, C, D, E, T, A, B, X(12), X(12), X(14), X(4), X(9)); BODY_20_31(29, B, C, D, E, T, A, X(13), X(13), X(15), X(5), X(10)); BODY_20_31(30, A, B, C, D, E, T, X(14), X(14), X(0), X(6), X(11)); BODY_20_31(31, T, A, B, C, D, E, X(15), X(15), X(1), X(7), X(12)); BODY_32_39(32, E, T, A, B, C, D, X(0), X(2), X(8), X(13)); BODY_32_39(33, D, E, T, A, B, C, X(1), X(3), X(9), X(14)); BODY_32_39(34, C, D, E, T, A, B, X(2), X(4), X(10), X(15)); BODY_32_39(35, B, C, D, E, T, A, X(3), X(5), X(11), X(0)); BODY_32_39(36, A, B, C, D, E, T, X(4), X(6), X(12), X(1)); BODY_32_39(37, T, A, B, C, D, E, X(5), X(7), X(13), X(2)); BODY_32_39(38, E, T, A, B, C, D, X(6), X(8), X(14), X(3)); BODY_32_39(39, D, E, T, A, B, C, X(7), X(9), X(15), X(4)); BODY_40_59(40, C, D, E, T, A, B, X(8), X(10), X(0), X(5)); BODY_40_59(41, B, C, D, E, T, A, X(9), X(11), X(1), X(6)); BODY_40_59(42, A, B, C, D, E, T, X(10), X(12), X(2), X(7)); BODY_40_59(43, T, A, B, C, D, E, X(11), X(13), X(3), X(8)); BODY_40_59(44, E, T, A, B, C, D, X(12), X(14), X(4), X(9)); BODY_40_59(45, D, E, T, A, B, C, X(13), X(15), X(5), X(10)); BODY_40_59(46, C, D, E, T, A, B, X(14), X(0), X(6), X(11)); BODY_40_59(47, B, C, D, E, T, A, X(15), X(1), X(7), X(12)); BODY_40_59(48, A, B, C, D, E, T, X(0), X(2), X(8), X(13)); BODY_40_59(49, T, A, B, C, D, E, X(1), X(3), X(9), X(14)); BODY_40_59(50, E, T, A, B, C, D, X(2), X(4), X(10), X(15)); BODY_40_59(51, D, E, T, A, B, C, X(3), X(5), X(11), X(0)); BODY_40_59(52, C, D, E, T, A, B, X(4), X(6), X(12), X(1)); BODY_40_59(53, B, C, D, E, T, A, X(5), X(7), X(13), X(2)); BODY_40_59(54, A, B, C, D, E, T, X(6), X(8), X(14), X(3)); BODY_40_59(55, T, A, B, C, D, E, X(7), X(9), X(15), X(4)); BODY_40_59(56, E, T, A, B, C, D, X(8), X(10), X(0), X(5)); BODY_40_59(57, D, E, T, A, B, C, X(9), X(11), X(1), X(6)); BODY_40_59(58, C, D, E, T, A, B, X(10), X(12), X(2), X(7)); BODY_40_59(59, B, C, D, E, T, A, X(11), X(13), X(3), X(8)); BODY_60_79(60, A, B, C, D, E, T, X(12), X(14), X(4), X(9)); BODY_60_79(61, T, A, B, C, D, E, X(13), X(15), X(5), X(10)); BODY_60_79(62, E, T, A, B, C, D, X(14), X(0), X(6), X(11)); BODY_60_79(63, D, E, T, A, B, C, X(15), X(1), X(7), X(12)); BODY_60_79(64, C, D, E, T, A, B, X(0), X(2), X(8), X(13)); BODY_60_79(65, B, C, D, E, T, A, X(1), X(3), X(9), X(14)); BODY_60_79(66, A, B, C, D, E, T, X(2), X(4), X(10), X(15)); BODY_60_79(67, T, A, B, C, D, E, X(3), X(5), X(11), X(0)); BODY_60_79(68, E, T, A, B, C, D, X(4), X(6), X(12), X(1)); BODY_60_79(69, D, E, T, A, B, C, X(5), X(7), X(13), X(2)); BODY_60_79(70, C, D, E, T, A, B, X(6), X(8), X(14), X(3)); BODY_60_79(71, B, C, D, E, T, A, X(7), X(9), X(15), X(4)); BODY_60_79(72, A, B, C, D, E, T, X(8), X(10), X(0), X(5)); BODY_60_79(73, T, A, B, C, D, E, X(9), X(11), X(1), X(6)); BODY_60_79(74, E, T, A, B, C, D, X(10), X(12), X(2), X(7)); BODY_60_79(75, D, E, T, A, B, C, X(11), X(13), X(3), X(8)); BODY_60_79(76, C, D, E, T, A, B, X(12), X(14), X(4), X(9)); BODY_60_79(77, B, C, D, E, T, A, X(13), X(15), X(5), X(10)); BODY_60_79(78, A, B, C, D, E, T, X(14), X(0), X(6), X(11)); BODY_60_79(79, T, A, B, C, D, E, X(15), X(1), X(7), X(12)); c->h0 = (c->h0 + E) & 0xffffffffL; c->h1 = (c->h1 + T) & 0xffffffffL; c->h2 = (c->h2 + A) & 0xffffffffL; c->h3 = (c->h3 + B) & 0xffffffffL; c->h4 = (c->h4 + C) & 0xffffffffL; if (--num == 0) break; A = c->h0; B = c->h1; C = c->h2; D = c->h3; E = c->h4; } }
//MD4 Block data order setting void __fastcall MD4_BlockDataOrder( MD4_CTX *c, const void *data_, size_t num) { const unsigned char *data = (const unsigned char *)data_; register uint32_t A = 0, B = 0, C = 0, D = 0, l = 0; uint32_t XX0 = 0, XX1 = 0, XX2 = 0, XX3 = 0, XX4 = 0, XX5 = 0, XX6 = 0, XX7 = 0, XX8 = 0, XX9 = 0, XX10 = 0, XX11 = 0, XX12 = 0, XX13 = 0, XX14 = 0, XX15 = 0; #define X(i) XX ## i A = c->A; B = c->B; C = c->C; D = c->D; for (;num--;) { (void)HOST_c2l(data, l); X(0) = l; (void)HOST_c2l(data, l); X(1) = l; //Round 0 R0(A, B, C, D, X(0), 3, 0); (void)HOST_c2l(data, l); X(2) = l; R0(D, A, B, C, X(1), 7, 0); (void)HOST_c2l(data, l); X(3) = l; R0(C, D, A, B, X(2), 11, 0); (void)HOST_c2l(data, l); X(4) = l; R0(B, C, D, A, X(3), 19, 0); (void)HOST_c2l(data, l); X(5) = l; R0(A, B, C, D, X(4), 3, 0); (void)HOST_c2l(data, l); X(6) = l; R0(D, A, B, C, X(5), 7, 0); (void)HOST_c2l(data, l); X(7) = l; R0(C, D, A, B, X(6), 11, 0); (void)HOST_c2l(data, l); X(8) = l; R0(B, C, D, A, X(7), 19, 0); (void)HOST_c2l(data, l); X(9) = l; R0(A, B, C, D, X(8), 3, 0); (void)HOST_c2l(data, l); X(10) = l; R0(D, A, B, C, X(9), 7, 0); (void)HOST_c2l(data, l); X(11) = l; R0(C, D, A, B, X(10), 11, 0); (void)HOST_c2l(data, l); X(12) = l; R0(B, C, D, A, X(11), 19, 0); (void)HOST_c2l(data, l); X(13) = l; R0(A, B, C, D, X(12), 3, 0); (void)HOST_c2l(data, l); X(14) = l; R0(D, A, B, C, X(13), 7, 0); (void)HOST_c2l(data, l); X(15) = l; R0(C, D, A, B, X(14), 11, 0); R0(B, C, D, A, X(15), 19, 0); //Round 1 R1(A, B, C, D, X(0), 3, 0x5A827999L); R1(D, A, B, C, X(4), 5, 0x5A827999L); R1(C, D, A, B, X(8), 9, 0x5A827999L); R1(B, C, D, A, X(12), 13, 0x5A827999L); R1(A, B, C, D, X(1), 3, 0x5A827999L); R1(D, A, B, C, X(5), 5, 0x5A827999L); R1(C, D, A, B, X(9), 9, 0x5A827999L); R1(B, C, D, A, X(13), 13, 0x5A827999L); R1(A, B, C, D, X(2), 3, 0x5A827999L); R1(D, A, B, C, X(6), 5, 0x5A827999L); R1(C, D, A, B, X(10), 9, 0x5A827999L); R1(B, C, D, A, X(14), 13, 0x5A827999L); R1(A, B, C, D, X(3), 3, 0x5A827999L); R1(D, A, B, C, X(7), 5, 0x5A827999L); R1(C, D, A, B, X(11), 9, 0x5A827999L); R1(B, C, D, A, X(15), 13, 0x5A827999L); //Round 2 R2(A, B, C, D, X(0), 3, 0x6ED9EBA1L); R2(D, A, B, C, X(8), 9, 0x6ED9EBA1L); R2(C, D, A, B, X(4), 11, 0x6ED9EBA1L); R2(B, C, D, A, X(12), 15, 0x6ED9EBA1L); R2(A, B, C, D, X(2), 3, 0x6ED9EBA1L); R2(D, A, B, C, X(10), 9, 0x6ED9EBA1L); R2(C, D, A, B, X(6), 11, 0x6ED9EBA1L); R2(B, C, D, A, X(14), 15, 0x6ED9EBA1L); R2(A, B, C, D, X(1), 3, 0x6ED9EBA1L); R2(D, A, B, C, X(9), 9, 0x6ED9EBA1L); R2(C, D, A, B, X(5), 11, 0x6ED9EBA1L); R2(B, C, D, A, X(13), 15, 0x6ED9EBA1L); R2(A, B, C, D, X(3), 3, 0x6ED9EBA1L); R2(D, A, B, C, X(11), 9, 0x6ED9EBA1L); R2(C, D, A, B, X(7), 11, 0x6ED9EBA1L); R2(B, C, D, A, X(15), 15, 0x6ED9EBA1L); A = c->A += A; B = c->B += B; C = c->C += C; D = c->D += D; } return; }
void ripemd160_block_data_order (RIPEMD160_CTX *ctx, const void *p, size_t num) { const unsigned char *data=p; register unsigned MD32_REG_T A,B,C,D,E; unsigned MD32_REG_T a,b,c,d,e,l; #ifndef MD32_XARRAY /* See comment in crypto/sha/sha_locl.h for details. */ unsigned MD32_REG_T XX0, XX1, XX2, XX3, XX4, XX5, XX6, XX7, XX8, XX9,XX10,XX11,XX12,XX13,XX14,XX15; # define X(i) XX##i #else RIPEMD160_LONG XX[16]; # define X(i) XX[i] #endif for (;num--;) { A=ctx->A; B=ctx->B; C=ctx->C; D=ctx->D; E=ctx->E; (void)HOST_c2l(data,l); X( 0)=l;(void)HOST_c2l(data,l); X( 1)=l; RIP1(A,B,C,D,E,WL00,SL00); (void)HOST_c2l(data,l); X( 2)=l; RIP1(E,A,B,C,D,WL01,SL01); (void)HOST_c2l(data,l); X( 3)=l; RIP1(D,E,A,B,C,WL02,SL02); (void)HOST_c2l(data,l); X( 4)=l; RIP1(C,D,E,A,B,WL03,SL03); (void)HOST_c2l(data,l); X( 5)=l; RIP1(B,C,D,E,A,WL04,SL04); (void)HOST_c2l(data,l); X( 6)=l; RIP1(A,B,C,D,E,WL05,SL05); (void)HOST_c2l(data,l); X( 7)=l; RIP1(E,A,B,C,D,WL06,SL06); (void)HOST_c2l(data,l); X( 8)=l; RIP1(D,E,A,B,C,WL07,SL07); (void)HOST_c2l(data,l); X( 9)=l; RIP1(C,D,E,A,B,WL08,SL08); (void)HOST_c2l(data,l); X(10)=l; RIP1(B,C,D,E,A,WL09,SL09); (void)HOST_c2l(data,l); X(11)=l; RIP1(A,B,C,D,E,WL10,SL10); (void)HOST_c2l(data,l); X(12)=l; RIP1(E,A,B,C,D,WL11,SL11); (void)HOST_c2l(data,l); X(13)=l; RIP1(D,E,A,B,C,WL12,SL12); (void)HOST_c2l(data,l); X(14)=l; RIP1(C,D,E,A,B,WL13,SL13); (void)HOST_c2l(data,l); X(15)=l; RIP1(B,C,D,E,A,WL14,SL14); RIP1(A,B,C,D,E,WL15,SL15); RIP2(E,A,B,C,D,WL16,SL16,KL1); RIP2(D,E,A,B,C,WL17,SL17,KL1); RIP2(C,D,E,A,B,WL18,SL18,KL1); RIP2(B,C,D,E,A,WL19,SL19,KL1); RIP2(A,B,C,D,E,WL20,SL20,KL1); RIP2(E,A,B,C,D,WL21,SL21,KL1); RIP2(D,E,A,B,C,WL22,SL22,KL1); RIP2(C,D,E,A,B,WL23,SL23,KL1); RIP2(B,C,D,E,A,WL24,SL24,KL1); RIP2(A,B,C,D,E,WL25,SL25,KL1); RIP2(E,A,B,C,D,WL26,SL26,KL1); RIP2(D,E,A,B,C,WL27,SL27,KL1); RIP2(C,D,E,A,B,WL28,SL28,KL1); RIP2(B,C,D,E,A,WL29,SL29,KL1); RIP2(A,B,C,D,E,WL30,SL30,KL1); RIP2(E,A,B,C,D,WL31,SL31,KL1); RIP3(D,E,A,B,C,WL32,SL32,KL2); RIP3(C,D,E,A,B,WL33,SL33,KL2); RIP3(B,C,D,E,A,WL34,SL34,KL2); RIP3(A,B,C,D,E,WL35,SL35,KL2); RIP3(E,A,B,C,D,WL36,SL36,KL2); RIP3(D,E,A,B,C,WL37,SL37,KL2); RIP3(C,D,E,A,B,WL38,SL38,KL2); RIP3(B,C,D,E,A,WL39,SL39,KL2); RIP3(A,B,C,D,E,WL40,SL40,KL2); RIP3(E,A,B,C,D,WL41,SL41,KL2); RIP3(D,E,A,B,C,WL42,SL42,KL2); RIP3(C,D,E,A,B,WL43,SL43,KL2); RIP3(B,C,D,E,A,WL44,SL44,KL2); RIP3(A,B,C,D,E,WL45,SL45,KL2); RIP3(E,A,B,C,D,WL46,SL46,KL2); RIP3(D,E,A,B,C,WL47,SL47,KL2); RIP4(C,D,E,A,B,WL48,SL48,KL3); RIP4(B,C,D,E,A,WL49,SL49,KL3); RIP4(A,B,C,D,E,WL50,SL50,KL3); RIP4(E,A,B,C,D,WL51,SL51,KL3); RIP4(D,E,A,B,C,WL52,SL52,KL3); RIP4(C,D,E,A,B,WL53,SL53,KL3); RIP4(B,C,D,E,A,WL54,SL54,KL3); RIP4(A,B,C,D,E,WL55,SL55,KL3); RIP4(E,A,B,C,D,WL56,SL56,KL3); RIP4(D,E,A,B,C,WL57,SL57,KL3); RIP4(C,D,E,A,B,WL58,SL58,KL3); RIP4(B,C,D,E,A,WL59,SL59,KL3); RIP4(A,B,C,D,E,WL60,SL60,KL3); RIP4(E,A,B,C,D,WL61,SL61,KL3); RIP4(D,E,A,B,C,WL62,SL62,KL3); RIP4(C,D,E,A,B,WL63,SL63,KL3); RIP5(B,C,D,E,A,WL64,SL64,KL4); RIP5(A,B,C,D,E,WL65,SL65,KL4); RIP5(E,A,B,C,D,WL66,SL66,KL4); RIP5(D,E,A,B,C,WL67,SL67,KL4); RIP5(C,D,E,A,B,WL68,SL68,KL4); RIP5(B,C,D,E,A,WL69,SL69,KL4); RIP5(A,B,C,D,E,WL70,SL70,KL4); RIP5(E,A,B,C,D,WL71,SL71,KL4); RIP5(D,E,A,B,C,WL72,SL72,KL4); RIP5(C,D,E,A,B,WL73,SL73,KL4); RIP5(B,C,D,E,A,WL74,SL74,KL4); RIP5(A,B,C,D,E,WL75,SL75,KL4); RIP5(E,A,B,C,D,WL76,SL76,KL4); RIP5(D,E,A,B,C,WL77,SL77,KL4); RIP5(C,D,E,A,B,WL78,SL78,KL4); RIP5(B,C,D,E,A,WL79,SL79,KL4); a=A; b=B; c=C; d=D; e=E; /* Do other half */ A=ctx->A; B=ctx->B; C=ctx->C; D=ctx->D; E=ctx->E; RIP5(A,B,C,D,E,WR00,SR00,KR0); RIP5(E,A,B,C,D,WR01,SR01,KR0); RIP5(D,E,A,B,C,WR02,SR02,KR0); RIP5(C,D,E,A,B,WR03,SR03,KR0); RIP5(B,C,D,E,A,WR04,SR04,KR0); RIP5(A,B,C,D,E,WR05,SR05,KR0); RIP5(E,A,B,C,D,WR06,SR06,KR0); RIP5(D,E,A,B,C,WR07,SR07,KR0); RIP5(C,D,E,A,B,WR08,SR08,KR0); RIP5(B,C,D,E,A,WR09,SR09,KR0); RIP5(A,B,C,D,E,WR10,SR10,KR0); RIP5(E,A,B,C,D,WR11,SR11,KR0); RIP5(D,E,A,B,C,WR12,SR12,KR0); RIP5(C,D,E,A,B,WR13,SR13,KR0); RIP5(B,C,D,E,A,WR14,SR14,KR0); RIP5(A,B,C,D,E,WR15,SR15,KR0); RIP4(E,A,B,C,D,WR16,SR16,KR1); RIP4(D,E,A,B,C,WR17,SR17,KR1); RIP4(C,D,E,A,B,WR18,SR18,KR1); RIP4(B,C,D,E,A,WR19,SR19,KR1); RIP4(A,B,C,D,E,WR20,SR20,KR1); RIP4(E,A,B,C,D,WR21,SR21,KR1); RIP4(D,E,A,B,C,WR22,SR22,KR1); RIP4(C,D,E,A,B,WR23,SR23,KR1); RIP4(B,C,D,E,A,WR24,SR24,KR1); RIP4(A,B,C,D,E,WR25,SR25,KR1); RIP4(E,A,B,C,D,WR26,SR26,KR1); RIP4(D,E,A,B,C,WR27,SR27,KR1); RIP4(C,D,E,A,B,WR28,SR28,KR1); RIP4(B,C,D,E,A,WR29,SR29,KR1); RIP4(A,B,C,D,E,WR30,SR30,KR1); RIP4(E,A,B,C,D,WR31,SR31,KR1); RIP3(D,E,A,B,C,WR32,SR32,KR2); RIP3(C,D,E,A,B,WR33,SR33,KR2); RIP3(B,C,D,E,A,WR34,SR34,KR2); RIP3(A,B,C,D,E,WR35,SR35,KR2); RIP3(E,A,B,C,D,WR36,SR36,KR2); RIP3(D,E,A,B,C,WR37,SR37,KR2); RIP3(C,D,E,A,B,WR38,SR38,KR2); RIP3(B,C,D,E,A,WR39,SR39,KR2); RIP3(A,B,C,D,E,WR40,SR40,KR2); RIP3(E,A,B,C,D,WR41,SR41,KR2); RIP3(D,E,A,B,C,WR42,SR42,KR2); RIP3(C,D,E,A,B,WR43,SR43,KR2); RIP3(B,C,D,E,A,WR44,SR44,KR2); RIP3(A,B,C,D,E,WR45,SR45,KR2); RIP3(E,A,B,C,D,WR46,SR46,KR2); RIP3(D,E,A,B,C,WR47,SR47,KR2); RIP2(C,D,E,A,B,WR48,SR48,KR3); RIP2(B,C,D,E,A,WR49,SR49,KR3); RIP2(A,B,C,D,E,WR50,SR50,KR3); RIP2(E,A,B,C,D,WR51,SR51,KR3); RIP2(D,E,A,B,C,WR52,SR52,KR3); RIP2(C,D,E,A,B,WR53,SR53,KR3); RIP2(B,C,D,E,A,WR54,SR54,KR3); RIP2(A,B,C,D,E,WR55,SR55,KR3); RIP2(E,A,B,C,D,WR56,SR56,KR3); RIP2(D,E,A,B,C,WR57,SR57,KR3); RIP2(C,D,E,A,B,WR58,SR58,KR3); RIP2(B,C,D,E,A,WR59,SR59,KR3); RIP2(A,B,C,D,E,WR60,SR60,KR3); RIP2(E,A,B,C,D,WR61,SR61,KR3); RIP2(D,E,A,B,C,WR62,SR62,KR3); RIP2(C,D,E,A,B,WR63,SR63,KR3); RIP1(B,C,D,E,A,WR64,SR64); RIP1(A,B,C,D,E,WR65,SR65); RIP1(E,A,B,C,D,WR66,SR66); RIP1(D,E,A,B,C,WR67,SR67); RIP1(C,D,E,A,B,WR68,SR68); RIP1(B,C,D,E,A,WR69,SR69); RIP1(A,B,C,D,E,WR70,SR70); RIP1(E,A,B,C,D,WR71,SR71); RIP1(D,E,A,B,C,WR72,SR72); RIP1(C,D,E,A,B,WR73,SR73); RIP1(B,C,D,E,A,WR74,SR74); RIP1(A,B,C,D,E,WR75,SR75); RIP1(E,A,B,C,D,WR76,SR76); RIP1(D,E,A,B,C,WR77,SR77); RIP1(C,D,E,A,B,WR78,SR78); RIP1(B,C,D,E,A,WR79,SR79); D =ctx->B+c+D; ctx->B=ctx->C+d+E; ctx->C=ctx->D+e+A; ctx->D=ctx->E+a+B; ctx->E=ctx->A+b+C; ctx->A=D; } }
static void sha256_block (SHA256_CTX *ctx, const void *in, size_t num, int host) { uint32_t a,b,c,d,e,f,g,h,s0,s1,T1; uint32_t X[16]; int i; const unsigned char *data=in; while (num--) { a = ctx->h[0]; b = ctx->h[1]; c = ctx->h[2]; d = ctx->h[3]; e = ctx->h[4]; f = ctx->h[5]; g = ctx->h[6]; h = ctx->h[7]; if (host) { const uint32_t *W=(const uint32_t *)data; T1 = X[0] = W[0]; ROUND_00_15(0,a,b,c,d,e,f,g,h); T1 = X[1] = W[1]; ROUND_00_15(1,h,a,b,c,d,e,f,g); T1 = X[2] = W[2]; ROUND_00_15(2,g,h,a,b,c,d,e,f); T1 = X[3] = W[3]; ROUND_00_15(3,f,g,h,a,b,c,d,e); T1 = X[4] = W[4]; ROUND_00_15(4,e,f,g,h,a,b,c,d); T1 = X[5] = W[5]; ROUND_00_15(5,d,e,f,g,h,a,b,c); T1 = X[6] = W[6]; ROUND_00_15(6,c,d,e,f,g,h,a,b); T1 = X[7] = W[7]; ROUND_00_15(7,b,c,d,e,f,g,h,a); T1 = X[8] = W[8]; ROUND_00_15(8,a,b,c,d,e,f,g,h); T1 = X[9] = W[9]; ROUND_00_15(9,h,a,b,c,d,e,f,g); T1 = X[10] = W[10]; ROUND_00_15(10,g,h,a,b,c,d,e,f); T1 = X[11] = W[11]; ROUND_00_15(11,f,g,h,a,b,c,d,e); T1 = X[12] = W[12]; ROUND_00_15(12,e,f,g,h,a,b,c,d); T1 = X[13] = W[13]; ROUND_00_15(13,d,e,f,g,h,a,b,c); T1 = X[14] = W[14]; ROUND_00_15(14,c,d,e,f,g,h,a,b); T1 = X[15] = W[15]; ROUND_00_15(15,b,c,d,e,f,g,h,a); data += SHA256_CBLOCK; } else { uint32_t l; HOST_c2l(data,l); T1 = X[0] = l; ROUND_00_15(0,a,b,c,d,e,f,g,h); HOST_c2l(data,l); T1 = X[1] = l; ROUND_00_15(1,h,a,b,c,d,e,f,g); HOST_c2l(data,l); T1 = X[2] = l; ROUND_00_15(2,g,h,a,b,c,d,e,f); HOST_c2l(data,l); T1 = X[3] = l; ROUND_00_15(3,f,g,h,a,b,c,d,e); HOST_c2l(data,l); T1 = X[4] = l; ROUND_00_15(4,e,f,g,h,a,b,c,d); HOST_c2l(data,l); T1 = X[5] = l; ROUND_00_15(5,d,e,f,g,h,a,b,c); HOST_c2l(data,l); T1 = X[6] = l; ROUND_00_15(6,c,d,e,f,g,h,a,b); HOST_c2l(data,l); T1 = X[7] = l; ROUND_00_15(7,b,c,d,e,f,g,h,a); HOST_c2l(data,l); T1 = X[8] = l; ROUND_00_15(8,a,b,c,d,e,f,g,h); HOST_c2l(data,l); T1 = X[9] = l; ROUND_00_15(9,h,a,b,c,d,e,f,g); HOST_c2l(data,l); T1 = X[10] = l; ROUND_00_15(10,g,h,a,b,c,d,e,f); HOST_c2l(data,l); T1 = X[11] = l; ROUND_00_15(11,f,g,h,a,b,c,d,e); HOST_c2l(data,l); T1 = X[12] = l; ROUND_00_15(12,e,f,g,h,a,b,c,d); HOST_c2l(data,l); T1 = X[13] = l; ROUND_00_15(13,d,e,f,g,h,a,b,c); HOST_c2l(data,l); T1 = X[14] = l; ROUND_00_15(14,c,d,e,f,g,h,a,b); HOST_c2l(data,l); T1 = X[15] = l; ROUND_00_15(15,b,c,d,e,f,g,h,a); } for (i=16;i<64;i+=8) { ROUND_16_63(i+0,a,b,c,d,e,f,g,h,X); ROUND_16_63(i+1,h,a,b,c,d,e,f,g,X); ROUND_16_63(i+2,g,h,a,b,c,d,e,f,X); ROUND_16_63(i+3,f,g,h,a,b,c,d,e,X); ROUND_16_63(i+4,e,f,g,h,a,b,c,d,X); ROUND_16_63(i+5,d,e,f,g,h,a,b,c,X); ROUND_16_63(i+6,c,d,e,f,g,h,a,b,X); ROUND_16_63(i+7,b,c,d,e,f,g,h,a,X); } ctx->h[0] += a; ctx->h[1] += b; ctx->h[2] += c; ctx->h[3] += d; ctx->h[4] += e; ctx->h[5] += f; ctx->h[6] += g; ctx->h[7] += h; } }