void skein_256_process_block(struct skein_256_ctx *ctx, const u8 *blk_ptr, size_t blk_cnt, size_t byte_cnt_add) { /* do it in C */ enum { WCNT = SKEIN_256_STATE_WORDS }; #undef RCNT #define RCNT (SKEIN_256_ROUNDS_TOTAL/8) #ifdef SKEIN_LOOP /* configure how much to unroll the loop */ #define SKEIN_UNROLL_256 (((SKEIN_LOOP)/100)%10) #else #define SKEIN_UNROLL_256 (0) #endif #if SKEIN_UNROLL_256 #if (RCNT % SKEIN_UNROLL_256) #error "Invalid SKEIN_UNROLL_256" /* sanity check on unroll count */ #endif size_t r; u64 kw[WCNT+4+RCNT*2]; /* key schedule: chaining vars + tweak + "rot"*/ #else u64 kw[WCNT+4]; /* key schedule words : chaining vars + tweak */ #endif u64 X0, X1, X2, X3; /* local copy of context vars, for speed */ u64 w[WCNT]; /* local copy of input block */ #ifdef SKEIN_DEBUG const u64 *X_ptr[4]; /* use for debugging (help cc put Xn in regs) */ X_ptr[0] = &X0; X_ptr[1] = &X1; X_ptr[2] = &X2; X_ptr[3] = &X3; #endif skein_assert(blk_cnt != 0); /* never call with blk_cnt == 0! */ ts[0] = ctx->h.tweak[0]; ts[1] = ctx->h.tweak[1]; do { /* * this implementation only supports 2**64 input bytes * (no carry out here) */ ts[0] += byte_cnt_add; /* update processed length */ /* precompute the key schedule for this block */ ks[0] = ctx->x[0]; ks[1] = ctx->x[1]; ks[2] = ctx->x[2]; ks[3] = ctx->x[3]; ks[4] = ks[0] ^ ks[1] ^ ks[2] ^ ks[3] ^ SKEIN_KS_PARITY; ts[2] = ts[0] ^ ts[1]; /* get input block in little-endian format */ skein_get64_lsb_first(w, blk_ptr, WCNT); debug_save_tweak(ctx); skein_show_block(BLK_BITS, &ctx->h, ctx->x, blk_ptr, w, ks, ts); X0 = w[0] + ks[0]; /* do the first full key injection */ X1 = w[1] + ks[1] + ts[0]; X2 = w[2] + ks[2] + ts[1]; X3 = w[3] + ks[3]; /* show starting state values */ skein_show_r_ptr(BLK_BITS, &ctx->h, SKEIN_RND_KEY_INITIAL, x_ptr); blk_ptr += SKEIN_256_BLOCK_BYTES; /* run the rounds */ #define ROUND256(p0, p1, p2, p3, ROT, r_num) \ do { \ X##p0 += X##p1; X##p1 = rotl_64(X##p1, ROT##_0); X##p1 ^= X##p0; \ X##p2 += X##p3; X##p3 = rotl_64(X##p3, ROT##_1); X##p3 ^= X##p2; \ } while (0) #if SKEIN_UNROLL_256 == 0 #define R256(p0, p1, p2, p3, ROT, r_num) /* fully unrolled */ \ do { \ ROUND256(p0, p1, p2, p3, ROT, r_num); \ skein_show_r_ptr(BLK_BITS, &ctx->h, r_num, X_ptr); \ } while (0) #define I256(R) \ do { \ /* inject the key schedule value */ \ X0 += ks[((R)+1) % 5]; \ X1 += ks[((R)+2) % 5] + ts[((R)+1) % 3]; \ X2 += ks[((R)+3) % 5] + ts[((R)+2) % 3]; \ X3 += ks[((R)+4) % 5] + (R)+1; \ skein_show_r_ptr(BLK_BITS, &ctx->h, SKEIN_RND_KEY_INJECT, X_ptr); \ } while (0) #else /* looping version */ #define R256(p0, p1, p2, p3, ROT, r_num) \ do { \ ROUND256(p0, p1, p2, p3, ROT, r_num); \ skein_show_r_ptr(BLK_BITS, &ctx->h, 4 * (r - 1) + r_num, X_ptr); \ } while (0) #define I256(R) \ do { \ /* inject the key schedule value */ \ X0 += ks[r+(R)+0]; \ X1 += ks[r+(R)+1] + ts[r+(R)+0]; \ X2 += ks[r+(R)+2] + ts[r+(R)+1]; \ X3 += ks[r+(R)+3] + r+(R); \ /* rotate key schedule */ \ ks[r + (R) + 4] = ks[r + (R) - 1]; \ ts[r + (R) + 2] = ts[r + (R) - 1]; \ skein_show_r_ptr(BLK_BITS, &ctx->h, SKEIN_RND_KEY_INJECT, X_ptr); \ } while (0) for (r = 1; r < 2 * RCNT; r += 2 * SKEIN_UNROLL_256) #endif { #define R256_8_ROUNDS(R) \ do { \ R256(0, 1, 2, 3, R_256_0, 8 * (R) + 1); \ R256(0, 3, 2, 1, R_256_1, 8 * (R) + 2); \ R256(0, 1, 2, 3, R_256_2, 8 * (R) + 3); \ R256(0, 3, 2, 1, R_256_3, 8 * (R) + 4); \ I256(2 * (R)); \ R256(0, 1, 2, 3, R_256_4, 8 * (R) + 5); \ R256(0, 3, 2, 1, R_256_5, 8 * (R) + 6); \ R256(0, 1, 2, 3, R_256_6, 8 * (R) + 7); \ R256(0, 3, 2, 1, R_256_7, 8 * (R) + 8); \ I256(2 * (R) + 1); \ } while (0) R256_8_ROUNDS(0); #define R256_UNROLL_R(NN) \ ((SKEIN_UNROLL_256 == 0 && \ SKEIN_256_ROUNDS_TOTAL/8 > (NN)) || \ (SKEIN_UNROLL_256 > (NN))) #if R256_UNROLL_R(1) R256_8_ROUNDS(1); #endif #if R256_UNROLL_R(2) R256_8_ROUNDS(2); #endif #if R256_UNROLL_R(3) R256_8_ROUNDS(3); #endif #if R256_UNROLL_R(4) R256_8_ROUNDS(4); #endif #if R256_UNROLL_R(5) R256_8_ROUNDS(5); #endif #if R256_UNROLL_R(6) R256_8_ROUNDS(6); #endif #if R256_UNROLL_R(7) R256_8_ROUNDS(7); #endif #if R256_UNROLL_R(8) R256_8_ROUNDS(8); #endif #if R256_UNROLL_R(9) R256_8_ROUNDS(9); #endif #if R256_UNROLL_R(10) R256_8_ROUNDS(10); #endif #if R256_UNROLL_R(11) R256_8_ROUNDS(11); #endif #if R256_UNROLL_R(12) R256_8_ROUNDS(12); #endif #if R256_UNROLL_R(13) R256_8_ROUNDS(13); #endif #if R256_UNROLL_R(14) R256_8_ROUNDS(14); #endif #if (SKEIN_UNROLL_256 > 14) #error "need more unrolling in skein_256_process_block" #endif } /* do the final "feedforward" xor, update context chaining */ ctx->x[0] = X0 ^ w[0]; ctx->x[1] = X1 ^ w[1]; ctx->x[2] = X2 ^ w[2]; ctx->x[3] = X3 ^ w[3]; skein_show_round(BLK_BITS, &ctx->h, SKEIN_RND_FEED_FWD, ctx->x); ts[1] &= ~SKEIN_T1_FLAG_FIRST; } while (--blk_cnt); ctx->h.tweak[0] = ts[0]; ctx->h.tweak[1] = ts[1]; }
void skein_256_process_block(struct skein_256_ctx *ctx, const u8 *blk_ptr, size_t blk_cnt, size_t byte_cnt_add) { /* do it in C */ enum { WCNT = SKEIN_256_STATE_WORDS }; size_t r; #if SKEIN_UNROLL_256 /* key schedule: chaining vars + tweak + "rot"*/ u64 kw[WCNT+4+RCNT*2]; #else /* key schedule words : chaining vars + tweak */ u64 kw[WCNT+4]; #endif u64 X0, X1, X2, X3; /* local copy of context vars, for speed */ u64 w[WCNT]; /* local copy of input block */ #ifdef SKEIN_DEBUG const u64 *X_ptr[4]; /* use for debugging (help cc put Xn in regs) */ X_ptr[0] = &X0; X_ptr[1] = &X1; X_ptr[2] = &X2; X_ptr[3] = &X3; #endif skein_assert(blk_cnt != 0); /* never call with blk_cnt == 0! */ ts[0] = ctx->h.tweak[0]; ts[1] = ctx->h.tweak[1]; do { /* * this implementation only supports 2**64 input bytes * (no carry out here) */ ts[0] += byte_cnt_add; /* update processed length */ /* precompute the key schedule for this block */ ks[0] = ctx->x[0]; ks[1] = ctx->x[1]; ks[2] = ctx->x[2]; ks[3] = ctx->x[3]; ks[4] = ks[0] ^ ks[1] ^ ks[2] ^ ks[3] ^ SKEIN_KS_PARITY; ts[2] = ts[0] ^ ts[1]; /* get input block in little-endian format */ skein_get64_lsb_first(w, blk_ptr, WCNT); debug_save_tweak(ctx); /* do the first full key injection */ X0 = w[0] + ks[0]; X1 = w[1] + ks[1] + ts[0]; X2 = w[2] + ks[2] + ts[1]; X3 = w[3] + ks[3]; blk_ptr += SKEIN_256_BLOCK_BYTES; /* run the rounds */ for (r = 1; r < (SKEIN_UNROLL_256 ? 2 * RCNT : 2); r += (SKEIN_UNROLL_256 ? 2 * SKEIN_UNROLL_256 : 1)) { R256_8_ROUNDS(0); #if R256_UNROLL_R(1) R256_8_ROUNDS(1); #endif #if R256_UNROLL_R(2) R256_8_ROUNDS(2); #endif #if R256_UNROLL_R(3) R256_8_ROUNDS(3); #endif #if R256_UNROLL_R(4) R256_8_ROUNDS(4); #endif #if R256_UNROLL_R(5) R256_8_ROUNDS(5); #endif #if R256_UNROLL_R(6) R256_8_ROUNDS(6); #endif #if R256_UNROLL_R(7) R256_8_ROUNDS(7); #endif #if R256_UNROLL_R(8) R256_8_ROUNDS(8); #endif #if R256_UNROLL_R(9) R256_8_ROUNDS(9); #endif #if R256_UNROLL_R(10) R256_8_ROUNDS(10); #endif #if R256_UNROLL_R(11) R256_8_ROUNDS(11); #endif #if R256_UNROLL_R(12) R256_8_ROUNDS(12); #endif #if R256_UNROLL_R(13) R256_8_ROUNDS(13); #endif #if R256_UNROLL_R(14) R256_8_ROUNDS(14); #endif } /* do the final "feedforward" xor, update context chaining */ ctx->x[0] = X0 ^ w[0]; ctx->x[1] = X1 ^ w[1]; ctx->x[2] = X2 ^ w[2]; ctx->x[3] = X3 ^ w[3]; ts[1] &= ~SKEIN_T1_FLAG_FIRST; } while (--blk_cnt); ctx->h.tweak[0] = ts[0]; ctx->h.tweak[1] = ts[1]; }