static void bmw64_close(sph_bmw_big_context *sc, unsigned ub, unsigned n, void *dst, size_t out_size_w64) { unsigned char *buf, *out; size_t ptr, u, v; unsigned z; sph_u64 h1[16], h2[16], *h; buf = sc->buf; ptr = sc->ptr; z = 0x80 >> n; buf[ptr ++] = ((ub & -z) | z) & 0xFF; h = sc->H; if (ptr > (sizeof sc->buf) - 8) { memset(buf + ptr, 0, (sizeof sc->buf) - ptr); compress_big(buf, h, h1); ptr = 0; h = h1; } memset(buf + ptr, 0, (sizeof sc->buf) - 8 - ptr); sph_enc64le_aligned(buf + (sizeof sc->buf) - 8, SPH_T64(sc->bit_count + n)); compress_big(buf, h, h2); for (u = 0; u < 16; u ++) sph_enc64le_aligned(buf + 8 * u, h2[u]); compress_big(buf, final_b, h1); out = dst; for (u = 0, v = 16 - out_size_w64; u < out_size_w64; u ++, v ++) sph_enc64le(out + 8 * u, h1[v]); }
static void blake64_close(sph_blake_big_context *sc, unsigned ub, unsigned n, void *dst, size_t out_size_w64) { union { unsigned char buf[128]; sph_u64 dummy; } u; size_t ptr, k; unsigned bit_len; unsigned z; sph_u64 th, tl; unsigned char *out; ptr = sc->ptr; bit_len = ((unsigned)ptr << 3) + n; z = 0x80 >> n; u.buf[ptr] = ((ub & -z) | z) & 0xFF; tl = sc->T0 + bit_len; th = sc->T1; if (ptr == 0 && n == 0) { sc->T0 = SPH_C64(0xFFFFFFFFFFFFFC00); sc->T1 = SPH_C64(0xFFFFFFFFFFFFFFFF); } else if (sc->T0 == 0) { sc->T0 = SPH_C64(0xFFFFFFFFFFFFFC00) + bit_len; sc->T1 = SPH_T64(sc->T1 - 1); } else { sc->T0 -= 1024 - bit_len; } if (bit_len <= 894) { memset(u.buf + ptr + 1, 0, 111 - ptr); if (out_size_w64 == 8) u.buf[111] |= 1; sph_enc64be_aligned(u.buf + 112, th); sph_enc64be_aligned(u.buf + 120, tl); blake64(sc, u.buf + ptr, 128 - ptr); } else { memset(u.buf + ptr + 1, 0, 127 - ptr); blake64(sc, u.buf + ptr, 128 - ptr); sc->T0 = SPH_C64(0xFFFFFFFFFFFFFC00); sc->T1 = SPH_C64(0xFFFFFFFFFFFFFFFF); memset(u.buf, 0, 112); if (out_size_w64 == 8) u.buf[111] = 1; sph_enc64be_aligned(u.buf + 112, th); sph_enc64be_aligned(u.buf + 120, tl); blake64(sc, u.buf, 128); } out = dst; for (k = 0; k < out_size_w64; k ++) sph_enc64be(out + (k << 3), sc->H[k]); }
static void blake64(sph_blake_big_context *sc, const void *data, size_t len) { unsigned char *buf; size_t ptr; DECL_STATE64 buf = sc->buf; ptr = sc->ptr; if (len < (sizeof sc->buf) - ptr) { memcpy(buf + ptr, data, len); ptr += len; sc->ptr = ptr; return; } READ_STATE64(sc); while (len > 0) { size_t clen; clen = (sizeof sc->buf) - ptr; if (clen > len) clen = len; memcpy(buf + ptr, data, clen); ptr += clen; data = (const unsigned char *)data + clen; len -= clen; if (ptr == sizeof sc->buf) { if ((T0 = SPH_T64(T0 + 1024)) < 1024) T1 = SPH_T64(T1 + 1); COMPRESS64; ptr = 0; } } WRITE_STATE64(sc); sc->ptr = ptr; }
CBLAKE512& CBLAKE512::Write(const unsigned char* data, size_t len) { unsigned char *buf; size_t ptr; DECL_STATE64 buf = s.buf; ptr = s.ptr; if (len < (sizeof s.buf) - ptr) { memcpy(buf + ptr, data, len); ptr += len; s.ptr = ptr; return *this; } READ_STATE64(&s); while (len > 0) { size_t clen; clen = (sizeof s.buf) - ptr; if (clen > len) clen = len; memcpy(buf + ptr, data, clen); ptr += clen; data = (const unsigned char *)data + clen; len -= clen; if (ptr == sizeof s.buf) { if ((T0 = SPH_T64(T0 + 1024)) < 1024) T1 = SPH_T64(T1 + 1); COMPRESS64; ptr = 0; } } WRITE_STATE64(&s); s.ptr = ptr; return *this; }
static void jh_close(sph_jh_context *sc, unsigned ub, unsigned n, void *dst, size_t out_size_w32, const void *iv) { unsigned z; unsigned char buf[128]; size_t numz, u; #if SPH_64 sph_u64 l0, l1; #else sph_u32 l0, l1, l2, l3; #endif z = 0x80 >> n; buf[0] = ((ub & -z) | z) & 0xFF; if (sc->ptr == 0 && n == 0) { numz = 47; } else { numz = 111 - sc->ptr; } memset(buf + 1, 0, numz); #if SPH_64 l0 = SPH_T64(sc->block_count << 9) + (sc->ptr << 3) + n; l1 = SPH_T64(sc->block_count >> 55); sph_enc64be(buf + numz + 1, l1); sph_enc64be(buf + numz + 9, l0); #else l0 = SPH_T32(sc->block_count_low << 9) + (sc->ptr << 3) + n; l1 = SPH_T32(sc->block_count_low >> 23) + SPH_T32(sc->block_count_high << 9); l2 = SPH_T32(sc->block_count_high >> 23); l3 = 0; sph_enc32be(buf + numz + 1, l3); sph_enc32be(buf + numz + 5, l2); sph_enc32be(buf + numz + 9, l1); sph_enc32be(buf + numz + 13, l0); #endif jh_core(sc, buf, numz + 17); #if SPH_JH_64 for (u = 0; u < 8; u ++) enc64e(buf + (u << 3), sc->H.wide[u + 8]); #else for (u = 0; u < 16; u ++) enc32e(buf + (u << 2), sc->H.narrow[u + 16]); #endif memcpy(dst, buf + ((16 - out_size_w32) << 2), out_size_w32 << 2); jh_init(sc, iv); }
void CBLAKE512::Finalize(unsigned char hash[OUTPUT_SIZE]) { union { unsigned char buf[128]; sph_u64 dummy; } u; size_t ptr, k; unsigned bit_len; unsigned z; sph_u64 th, tl; ptr = s.ptr; bit_len = ((unsigned)ptr << 3); z = 0x80 >> 0; u.buf[ptr] = ((0 & -z) | z) & 0xFF; tl = s.T0 + bit_len; th = s.T1; if (ptr == 0) { s.T0 = SPH_C64(0xFFFFFFFFFFFFFC00); s.T1 = SPH_C64(0xFFFFFFFFFFFFFFFF); } else if (s.T0 == 0) { s.T0 = SPH_C64(0xFFFFFFFFFFFFFC00) + bit_len; s.T1 = SPH_T64(s.T1 - 1); } else { s.T0 -= 1024 - bit_len; } if (bit_len <= 894) { memset(u.buf + ptr + 1, 0, 111 - ptr); u.buf[111] |= 1; sph_enc64be_aligned(u.buf + 112, th); sph_enc64be_aligned(u.buf + 120, tl); Write(u.buf + ptr, 128 - ptr); } else { memset(u.buf + ptr + 1, 0, 127 - ptr); Write(u.buf + ptr, 128 - ptr); s.T0 = SPH_C64(0xFFFFFFFFFFFFFC00); s.T1 = SPH_C64(0xFFFFFFFFFFFFFFFF); memset(u.buf, 0, 112); u.buf[111] = 1; sph_enc64be_aligned(u.buf + 112, th); sph_enc64be_aligned(u.buf + 120, tl); Write(u.buf, 128); } for (k = 0; k < 8; k ++) sph_enc64be(hash + (k << 3), s.H[k]); Reset(); }
static void bmw32_close(sph_bmw_small_context *sc, unsigned ub, unsigned n, void *dst, size_t out_size_w32) { unsigned char *buf, *out; size_t ptr, u, v; unsigned z; sph_u32 h1[16], h2[16], *h; buf = sc->buf; ptr = sc->ptr; z = 0x80 >> n; buf[ptr ++] = ((ub & -z) | z) & 0xFF; h = sc->H; if (ptr > (sizeof sc->buf) - 8) { memset(buf + ptr, 0, (sizeof sc->buf) - ptr); compress_small(buf, h, h1); ptr = 0; h = h1; } memset(buf + ptr, 0, (sizeof sc->buf) - 8 - ptr); #if SPH_64 sph_enc64le_aligned(buf + (sizeof sc->buf) - 8, SPH_T64(sc->bit_count + n)); #else sph_enc32le_aligned(buf + (sizeof sc->buf) - 8, sc->bit_count_low + n); sph_enc32le_aligned(buf + (sizeof sc->buf) - 4, SPH_T32(sc->bit_count_high)); #endif compress_small(buf, h, h2); for (u = 0; u < 16; u ++) sph_enc32le_aligned(buf + 4 * u, h2[u]); compress_small(buf, final_s, h1); out = dst; for (u = 0, v = 16 - out_size_w32; u < out_size_w32; u ++, v ++) sph_enc32le(out + 4 * u, h1[v]); }