/* hash bit sequence */ HashReturn_gr hash_groestl(int hashbitlen, const BitSequence_gr* data, DataLength_gr databitlen, BitSequence_gr* hashval) { HashReturn_gr ret; hashState_groestl context; /* initialise */ if ((ret = init_groestl( &context, hashbitlen/8 )) != SUCCESS_GR) return ret; /* process message */ if ((ret = update_groestl(&context, data, databitlen)) != SUCCESS_GR) return ret; /* finalise */ ret = final_groestl(&context, hashval); return ret; }
inline void Xhash(void *state, const void *input) { Xhash_context_holder ctx; // uint32_t hashA[16], hashB[16]; memcpy(&ctx, &base_contexts, sizeof(base_contexts)); #ifdef AES_NI_GR init_groestl(&ctx.groestl); #endif DATA_ALIGNXY(unsigned char hashbuf[128],16); size_t hashptr; DATA_ALIGNXY(sph_u64 hashctA,8); DATA_ALIGNXY(sph_u64 hashctB,8); #ifndef AES_NI_GR grsoState sts_grs; #endif DATA_ALIGNXY(unsigned char hash[128],16); /* proably not needed */ memset(hash, 0, 128); //blake1-bmw2-grs3-skein4-jh5-keccak6-luffa7-cubehash8-shavite9-simd10-echo11 //---blake1--- /* //blake init blake512_init(&base_contexts.blake1, 512); blake512_update(&ctx.blake1, input, 512); blake512_final(&ctx.blake1, hash); */ DECL_BLK; BLK_I; BLK_W; BLK_C; //---bmw2--- DECL_BMW; BMW_I; BMW_U; #define M(x) sph_dec64le_aligned(data + 8 * (x)) #define H(x) (h[x]) #define dH(x) (dh[x]) BMW_C; #undef M #undef H #undef dH //---grs3---- #ifdef AES_NI_GR update_groestl(&ctx.groestl, (char*)hash,512); final_groestl(&ctx.groestl, (char*)hash); #else GRS_I; GRS_U; GRS_C; #endif //---skein4--- DECL_SKN; SKN_I; SKN_U; SKN_C; //---jh5------ DECL_JH; JH_H; //---keccak6--- DECL_KEC; KEC_I; KEC_U; KEC_C; // asm volatile ("emms"); //--- luffa7 update_luffa(&ctx.luffa,(const BitSequence*)hash,512); final_luffa(&ctx.luffa,(BitSequence*)hash+64); //---cubehash--- cubehashUpdate(&ctx.cubehash,(const byte*) hash+64,64); cubehashDigest(&ctx.cubehash,(byte*)hash); //---shavite--- sph_shavite512 (&ctx.shavite1, hash, 64); sph_shavite512_close(&ctx.shavite1, hash+64); //sph_simd512 (&ctx.simd1, hashA, 64); // sph_simd512_close(&ctx.simd1, hashB); //-------simd512 vect128 -------------- update_sd(&ctx.ctx_simd1,(const BitSequence *)hash+64,512); final_sd(&ctx.ctx_simd1,(BitSequence *)hash); //---echo--- #ifdef AES_NI update_echo (&ctx.echo1,(const BitSequence *) hash, 512); final_echo(&ctx.echo1, (BitSequence *) hash+64); #else sph_echo512 (&ctx.echo1, hash, 64); sph_echo512_close(&ctx.echo1, hash+64); #endif memcpy(state, hash+64, 32); }
static void x14hash(void *output, const void *input) { unsigned char hash[128]; // uint32_t hashA[16], hashB[16]; #define hashB hash+64 x14_ctx_holder ctx; memcpy(&ctx, &x14_ctx, sizeof(x14_ctx)); #ifdef NO_AES_NI grsoState sts_grs; #endif unsigned char hashbuf[128]; size_t hashptr; sph_u64 hashctA; sph_u64 hashctB; //---blake1--- DECL_BLK; BLK_I; BLK_W; BLK_C; //---bmw2--- DECL_BMW; BMW_I; BMW_U; #define M(x) sph_dec64le_aligned(data + 8 * (x)) #define H(x) (h[x]) #define dH(x) (dh[x]) BMW_C; #undef M #undef H #undef dH //---groestl---- #ifdef NO_AES_NI // use SSE2 optimized GRS if possible GRS_I; GRS_U; GRS_C; // sph_groestl512 (&ctx.groestl, hash, 64); // sph_groestl512_close(&ctx.groestl, hash); #else update_groestl( &ctx.groestl, (char*)hash,512); final_groestl( &ctx.groestl, (char*)hash); #endif //---skein4--- DECL_SKN; SKN_I; SKN_U; SKN_C; //---jh5------ DECL_JH; JH_H; //---keccak6--- DECL_KEC; KEC_I; KEC_U; KEC_C; //--- luffa7 update_luffa( &ctx.luffa, (const BitSequence*)hash,512); final_luffa( &ctx.luffa, (BitSequence*)hashB); // 8 Cube cubehashUpdate( &ctx.cubehash, (const byte*) hashB,64); cubehashDigest( &ctx.cubehash, (byte*)hash); // 9 Shavite sph_shavite512( &ctx.shavite, hash, 64); sph_shavite512_close( &ctx.shavite, hashB); // 10 Simd update_sd( &ctx.simd, (const BitSequence *)hashB,512); final_sd( &ctx.simd, (BitSequence *)hash); //11---echo--- #ifdef NO_AES_NI sph_echo512(&ctx.echo, hash, 64); sph_echo512_close(&ctx.echo, hashB); #else update_echo ( &ctx.echo, (const BitSequence *) hash, 512); final_echo( &ctx.echo, (BitSequence *) hashB); #endif // X13 algos // 12 Hamsi sph_hamsi512(&ctx.hamsi, hashB, 64); sph_hamsi512_close(&ctx.hamsi, hash); // 13 Fugue sph_fugue512(&ctx.fugue, hash, 64); sph_fugue512_close(&ctx.fugue, hashB); // X14 Shabal sph_shabal512(&ctx.shabal, hashB, 64); sph_shabal512_close(&ctx.shabal, hash); asm volatile ("emms"); memcpy(output, hash, 32); }
static void zr5hash(void *state, const void *input) { DATA_ALIGN16(unsigned char hashbuf[128]); DATA_ALIGN16(unsigned char hash[128]); DATA_ALIGN16(size_t hashptr); DATA_ALIGN16(sph_u64 hashctA); DATA_ALIGN16(sph_u64 hashctB); //memset(hash, 0, 128); #ifdef NO_AES_NI grsoState sts_grs; #endif static const int arrOrder[][4] = { { 0, 1, 2, 3 }, { 0, 1, 3, 2 }, { 0, 2, 1, 3 }, { 0, 2, 3, 1 }, { 0, 3, 1, 2 }, { 0, 3, 2, 1 }, { 1, 0, 2, 3 }, { 1, 0, 3, 2 }, { 1, 2, 0, 3 }, { 1, 2, 3, 0 }, { 1, 3, 0, 2 }, { 1, 3, 2, 0 }, { 2, 0, 1, 3 }, { 2, 0, 3, 1 }, { 2, 1, 0, 3 }, { 2, 1, 3, 0 }, { 2, 3, 0, 1 }, { 2, 3, 1, 0 }, { 3, 0, 1, 2 }, { 3, 0, 2, 1 }, { 3, 1, 0, 2 }, { 3, 1, 2, 0 }, { 3, 2, 0, 1 }, { 3, 2, 1, 0 } }; zr5_ctx_holder ctx; memcpy( &ctx, &zr5_ctx, sizeof(zr5_ctx) ); sph_keccak512 (&ctx.keccak, input, 80); sph_keccak512_close(&ctx.keccak, hash); unsigned int nOrder = *(unsigned int *)(&hash) % 24; unsigned int i = 0; for (i = 0; i < 4; i++) { switch (arrOrder[nOrder][i]) { case 0: {DECL_BLK; BLK_I; BLK_U; BLK_C;} break; case 1: #ifdef NO_AES_NI {GRS_I; GRS_U; GRS_C; } #else update_groestl( &ctx.groestl, (char*)hash,512); final_groestl( &ctx.groestl, (char*)hash); #endif break; case 2: {DECL_JH; JH_H;} break; case 3: {DECL_SKN; SKN_I; SKN_U; SKN_C; } break; default: break; } } asm volatile ("emms"); memcpy(state, hash, 32); }