int blake2s_Update( blake2s_state *S, const void *pin, size_t inlen ) { const unsigned char * in = (const unsigned char *)pin; if( inlen > 0 ) { size_t left = S->buflen; size_t fill = BLAKE2S_BLOCKBYTES - left; if( inlen > fill ) { S->buflen = 0; memcpy( S->buf + left, in, fill ); /* Fill buffer */ blake2s_increment_counter( S, BLAKE2S_BLOCKBYTES ); blake2s_compress( S, S->buf ); /* Compress */ in += fill; inlen -= fill; while(inlen > BLAKE2S_BLOCKBYTES) { blake2s_increment_counter(S, BLAKE2S_BLOCKBYTES); blake2s_compress( S, in ); in += BLAKE2S_BLOCKBYTES; inlen -= BLAKE2S_BLOCKBYTES; } } memcpy( S->buf + S->buflen, in, inlen ); S->buflen += inlen; } return 0; }
int blake2s_final( blake2s_state *S, uint8_t *out, size_t outlen ) { uint8_t buffer[BLAKE2S_OUTBYTES]; if(S->outlen != outlen) return -1; if( S->buflen > BLAKE2S_BLOCKBYTES ) { blake2s_increment_counter( S, BLAKE2S_BLOCKBYTES ); blake2s_compress( S, S->buf ); S->buflen -= BLAKE2S_BLOCKBYTES; memcpy( S->buf, S->buf + BLAKE2S_BLOCKBYTES, S->buflen ); } blake2s_increment_counter( S, ( uint32_t )S->buflen ); blake2s_set_lastblock( S ); memset( S->buf + S->buflen, 0, 2 * BLAKE2S_BLOCKBYTES - S->buflen ); /* Padding */ blake2s_compress( S, S->buf ); for( int i = 0; i < 8; ++i ) /* Output full hash to temp buffer */ store32( buffer + sizeof( S->h[i] ) * i, S->h[i] ); memcpy( out, buffer, outlen ); return 0; }
int blake2s_process(hash_state *md, const unsigned char *in, unsigned long inlen) { LTC_ARGCHK(md != NULL); LTC_ARGCHK(in != NULL); if (md->blake2s.curlen > sizeof(md->blake2s.buf)) { return CRYPT_INVALID_ARG; } if (inlen > 0) { unsigned long left = md->blake2s.curlen; unsigned long fill = BLAKE2S_BLOCKBYTES - left; if (inlen > fill) { md->blake2s.curlen = 0; XMEMCPY(md->blake2s.buf + (left % sizeof(md->blake2s.buf)), in, fill); /* Fill buffer */ blake2s_increment_counter(md, BLAKE2S_BLOCKBYTES); blake2s_compress(md, md->blake2s.buf); /* Compress */ in += fill; inlen -= fill; while (inlen > BLAKE2S_BLOCKBYTES) { blake2s_increment_counter(md, BLAKE2S_BLOCKBYTES); blake2s_compress(md, in); in += BLAKE2S_BLOCKBYTES; inlen -= BLAKE2S_BLOCKBYTES; } } XMEMCPY(md->blake2s.buf + md->blake2s.curlen, in, inlen); md->blake2s.curlen += inlen; } return CRYPT_OK; }
int blake2s_update( blake2s_state *S, const uint8_t *in, uint64_t inlen ) { while( inlen > 0 ) { size_t left = S->buflen; size_t fill = 2 * BLAKE2S_BLOCKBYTES - left; if( inlen > fill ) { memcpy( S->buf + left, in, fill ); // Fill buffer S->buflen += fill; blake2s_increment_counter( S, BLAKE2S_BLOCKBYTES ); blake2s_compress( S, S->buf ); // Compress memcpy( S->buf, S->buf + BLAKE2S_BLOCKBYTES, BLAKE2S_BLOCKBYTES ); // Shift buffer left S->buflen -= BLAKE2S_BLOCKBYTES; in += fill; inlen -= fill; } else // inlen <= fill { memcpy( S->buf + left, in, inlen ); S->buflen += inlen; // Be lazy, do not compress in += inlen; inlen -= inlen; } } return 0; }
int blake2s_done(hash_state *md, unsigned char *out) { unsigned char buffer[BLAKE2S_OUTBYTES] = { 0 }; unsigned long i; LTC_ARGCHK(md != NULL); LTC_ARGCHK(out != NULL); /* if(md->blake2s.outlen != outlen) return CRYPT_INVALID_ARG; */ if (blake2s_is_lastblock(md)) return CRYPT_ERROR; blake2s_increment_counter(md, md->blake2s.curlen); blake2s_set_lastblock(md); XMEMSET(md->blake2s.buf + md->blake2s.curlen, 0, BLAKE2S_BLOCKBYTES - md->blake2s.curlen); /* Padding */ blake2s_compress(md, md->blake2s.buf); for (i = 0; i < 8; ++i) /* Output full hash to temp buffer */ STORE32L(md->blake2s.h[i], buffer + i * 4); XMEMCPY(out, buffer, md->blake2s.outlen); zeromem(md, sizeof(hash_state)); #ifdef LTC_CLEAN_STACK zeromem(buffer, sizeof(buffer)); #endif return CRYPT_OK; }
void blake2s_final( blake2s_state *S, byte *digest ) { if( S->buflen > BLAKE2S_BLOCKBYTES ) { blake2s_increment_counter( S, BLAKE2S_BLOCKBYTES ); blake2s_compress( S, S->buf ); S->buflen -= BLAKE2S_BLOCKBYTES; memcpy( S->buf, S->buf + BLAKE2S_BLOCKBYTES, S->buflen ); } blake2s_increment_counter( S, ( uint32 )S->buflen ); blake2s_set_lastblock( S ); memset( S->buf + S->buflen, 0, 2 * BLAKE2S_BLOCKBYTES - S->buflen ); /* Padding */ blake2s_compress( S, S->buf ); for( int i = 0; i < 8; ++i ) /* Output full hash */ RawPut4( S->h[i], digest + 4 * i ); }
/* Absorb the input data into the hash state. Always returns 1. */ int BLAKE2s_Update(BLAKE2S_CTX *c, const void *data, size_t datalen) { const uint8_t *in = data; size_t fill; /* * Intuitively one would expect intermediate buffer, c->buf, to * store incomplete blocks. But in this case we are interested to * temporarily stash even complete blocks, because last one in the * stream has to be treated in special way, and at this point we * don't know if last block in *this* call is last one "ever". This * is the reason for why |datalen| is compared as >, and not >=. */ fill = sizeof(c->buf) - c->buflen; if (datalen > fill) { if (c->buflen) { memcpy(c->buf + c->buflen, in, fill); /* Fill buffer */ blake2s_compress(c, c->buf, BLAKE2S_BLOCKBYTES); c->buflen = 0; in += fill; datalen -= fill; } if (datalen > BLAKE2S_BLOCKBYTES) { size_t stashlen = datalen % BLAKE2S_BLOCKBYTES; /* * If |datalen| is a multiple of the blocksize, stash * last complete block, it can be final one... */ stashlen = stashlen ? stashlen : BLAKE2S_BLOCKBYTES; datalen -= stashlen; blake2s_compress(c, in, datalen); in += datalen; datalen = stashlen; } } assert(datalen <= BLAKE2S_BLOCKBYTES); memcpy(c->buf + c->buflen, in, datalen); c->buflen += datalen; /* Be lazy, do not compress */ return 1; }
void blake2s_update( blake2s_state *S, const byte *in, size_t inlen ) { while( inlen > 0 ) { size_t left = S->buflen; size_t fill = 2 * BLAKE2S_BLOCKBYTES - left; if( inlen > fill ) { memcpy( S->buf + left, in, fill ); // Fill buffer S->buflen += fill; blake2s_increment_counter( S, BLAKE2S_BLOCKBYTES ); #ifdef USE_SSE #ifdef _WIN_32 // We use SSSE3 _mm_shuffle_epi8 only in x64 mode. if (_SSE_Version>=SSE_SSE2) #else if (_SSE_Version>=SSE_SSSE3) #endif blake2s_compress_sse( S, S->buf ); else blake2s_compress( S, S->buf ); // Compress #else blake2s_compress( S, S->buf ); // Compress #endif memcpy( S->buf, S->buf + BLAKE2S_BLOCKBYTES, BLAKE2S_BLOCKBYTES ); // Shift buffer left S->buflen -= BLAKE2S_BLOCKBYTES; in += fill; inlen -= fill; } else // inlen <= fill { memcpy( S->buf + left, in, (size_t)inlen ); S->buflen += (size_t)inlen; // Be lazy, do not compress in += inlen; inlen -= inlen; } } }
void blake2s_update(blake2s_ctx *ctx, const void *in, size_t inlen) // data bytes { size_t i; for (i = 0; i < inlen; i++) { if (ctx->c == 64) { // buffer full ? ctx->t[0] += ctx->c; // add counters if (ctx->t[0] < ctx->c) // carry overflow ? ctx->t[1]++; // high word blake2s_compress(ctx, 0); // compress (not last) ctx->c = 0; // counter to zero } ctx->b[ctx->c++] = ((const uint8_t *) in)[i]; } }
/* * Calculate the final hash and save it in md. * Always returns 1. */ int BLAKE2s_Final(unsigned char *md, BLAKE2S_CTX *c) { int i; blake2s_set_lastblock(c); /* Padding */ memset(c->buf + c->buflen, 0, sizeof(c->buf) - c->buflen); blake2s_compress(c, c->buf, c->buflen); /* Output full hash to temp buffer */ for (i = 0; i < 8; ++i) { store32(md + sizeof(c->h[i]) * i, c->h[i]); } OPENSSL_cleanse(c, sizeof(BLAKE2S_CTX)); return 1; }
/* update with new data */ static void blake2s_update( blake2s_ctx * ctx, const void * in, HB_SIZE inlen ) /* data bytes */ { HB_SIZE i; for( i = 0; i < inlen; i++ ) { if( ctx->c == 64 ) /* buffer full ? */ { ctx->t[ 0 ] += ( HB_U32 ) ctx->c; /* add counters */ if( ctx->t[ 0 ] < ctx->c ) /* carry overflow ? */ ctx->t[ 1 ]++; /* high word */ blake2s_compress( ctx, 0 ); /* compress (not last) */ ctx->c = 0; /* counter to zero */ } ctx->b[ ctx->c++ ] = ( ( const HB_U8 * ) in )[ i ]; } }
void blake2s_final(blake2s_ctx *ctx, void *out) { size_t i; ctx->t[0] += ctx->c; // mark last block offset if (ctx->t[0] < ctx->c) // carry overflow ctx->t[1]++; // high word while (ctx->c < 64) // fill up with zeros ctx->b[ctx->c++] = 0; blake2s_compress(ctx, 1); // final block flag = 1 // little endian convert and store for (i = 0; i < ctx->outlen; i++) { ((uint8_t *) out)[i] = (ctx->h[i >> 2] >> (8 * (i & 3))) & 0xFF; } }
/* finalize */ static void blake2s_final( blake2s_ctx * ctx, void * out ) { HB_SIZE i; ctx->t[ 0 ] += ( HB_U32 ) ctx->c; /* mark last block offset */ if( ctx->t[ 0 ] < ctx->c ) /* carry overflow */ ctx->t[ 1 ]++; /* high word */ while( ctx->c < 64 ) /* fill up with zeros */ ctx->b[ ctx->c++ ] = 0; blake2s_compress( ctx, 1 ); /* final block flag = 1 */ /* little endian convert and store */ for( i = 0; i < ctx->outlen; i++ ) { ( ( HB_U8 * ) out )[ i ] = ( ctx->h[ i >> 2 ] >> ( 8 * ( i & 3 ) ) ) & 0xFF; } }
int blake2s_Final( blake2s_state *S, void *out, size_t outlen ) { uint8_t buffer[BLAKE2S_OUTBYTES] = {0}; size_t i; if( out == NULL || outlen < S->outlen ) return -1; if( blake2s_is_lastblock( S ) ) return -1; blake2s_increment_counter( S, ( uint32_t )S->buflen ); blake2s_set_lastblock( S ); memset( S->buf + S->buflen, 0, BLAKE2S_BLOCKBYTES - S->buflen ); /* Padding */ blake2s_compress( S, S->buf ); for( i = 0; i < 8; ++i ) /* Output full hash to temp buffer */ store32( buffer + sizeof( S->h[i] ) * i, S->h[i] ); memcpy( out, buffer, outlen ); memzero(buffer, sizeof(buffer)); return 0; }
/* Absorb the input data into the hash state. Always returns 1. */ int BLAKE2s_Update(BLAKE2S_CTX *c, const void *data, size_t datalen) { const uint8_t *in = data; size_t fill; while(datalen > 0) { fill = sizeof(c->buf) - c->buflen; /* Must be >, not >=, so that last block can be hashed differently */ if(datalen > fill) { memcpy(c->buf + c->buflen, in, fill); /* Fill buffer */ blake2s_increment_counter(c, BLAKE2S_BLOCKBYTES); blake2s_compress(c, c->buf); /* Compress */ c->buflen = 0; in += fill; datalen -= fill; } else { /* datalen <= fill */ memcpy(c->buf + c->buflen, in, datalen); c->buflen += datalen; /* Be lazy, do not compress */ return 1; } } return 1; }
/* Performance optimised FastKDF with BLAKE2s integrated */ void neoscrypt_fastkdf_opt(const uchar *password, const uchar *salt, uchar *output, uint mode) { const size_t stack_align = 0x40; uint bufptr, output_len, i, j; uchar *A, *B; uint *S; /* Align and set up the buffers in stack */ uchar stack[864 + stack_align]; A = (uchar *) (((size_t)stack & ~(stack_align - 1)) + stack_align); B = &A[320]; S = (uint *) &A[608]; neoscrypt_copy(&A[0], &password[0], 80); neoscrypt_copy(&A[80], &password[0], 80); neoscrypt_copy(&A[160], &password[0], 80); neoscrypt_copy(&A[240], &password[0], 16); neoscrypt_copy(&A[256], &password[0], 64); if(!mode) { output_len = 256; neoscrypt_copy(&B[0], &salt[0], 80); neoscrypt_copy(&B[80], &salt[0], 80); neoscrypt_copy(&B[160], &salt[0], 80); neoscrypt_copy(&B[240], &salt[0], 16); neoscrypt_copy(&B[256], &salt[0], 32); } else { output_len = 32; neoscrypt_copy(&B[0], &salt[0], 256); neoscrypt_copy(&B[256], &salt[0], 32); } for(i = 0, bufptr = 0; i < 32; i++) { /* BLAKE2s: initialise */ neoscrypt_copy(&S[0], blake2s_IV_P_XOR, 32); neoscrypt_erase(&S[8], 16); /* BLAKE2s: update key */ neoscrypt_copy(&S[12], &B[bufptr], 32); neoscrypt_erase(&S[20], 32); /* BLAKE2s: compress IV using key */ S[8] = 64; blake2s_compress((blake2s_state *) S); /* BLAKE2s: update input */ neoscrypt_copy(&S[12], &A[bufptr], 64); /* BLAKE2s: compress again using input */ S[8] = 128; S[10] = ~0U; blake2s_compress((blake2s_state *) S); for(j = 0, bufptr = 0; j < 8; j++) { bufptr += S[j]; bufptr += (S[j] >> 8); bufptr += (S[j] >> 16); bufptr += (S[j] >> 24); } bufptr &= 0xFF; neoscrypt_xor(&B[bufptr], &S[0], 32); if(bufptr < 32) neoscrypt_copy(&B[256 + bufptr], &B[bufptr], 32 - bufptr); else if(bufptr > 224) neoscrypt_copy(&B[0], &B[256], bufptr - 224); } i = 256 - bufptr; if(i >= output_len) { neoscrypt_xor(&B[bufptr], &A[0], output_len); neoscrypt_copy(&output[0], &B[bufptr], output_len); } else { neoscrypt_xor(&B[bufptr], &A[0], i); neoscrypt_xor(&B[0], &A[i], output_len - i); neoscrypt_copy(&output[0], &B[bufptr], i); neoscrypt_copy(&output[i], &B[0], output_len - i); } }