Esempio n. 1
0
static void
do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset,
	    int src_cnt, size_t len, enum async_tx_flags flags,
	    dma_async_tx_callback cb_fn, void *cb_param)
{
	int i;
	int xor_src_cnt;
	int src_off = 0;
	void *dest_buf;
	void **srcs = (void **) src_list;

	/* reuse the 'src_list' array to convert to buffer pointers */
	for (i = 0; i < src_cnt; i++)
		srcs[i] = page_address(src_list[i]) + offset;

	/* set destination address */
	dest_buf = page_address(dest) + offset;

	if (flags & ASYNC_TX_XOR_ZERO_DST)
		memset(dest_buf, 0, len);

	while (src_cnt > 0) {
		/* process up to 'MAX_XOR_BLOCKS' sources */
		xor_src_cnt = min(src_cnt, MAX_XOR_BLOCKS);
		xor_blocks(xor_src_cnt, len, dest_buf, &srcs[src_off]);

		/* drop completed sources */
		src_cnt -= xor_src_cnt;
		src_off += xor_src_cnt;
	}

	async_tx_sync_epilog(cb_fn, cb_param);
}
Esempio n. 2
0
static void
do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset,
	    int src_cnt, size_t len, struct async_submit_ctl *submit)
{
	int i;
	int xor_src_cnt = 0;
	int src_off = 0;
	void *dest_buf;
	void **srcs;

	if (submit->scribble)
		srcs = submit->scribble;
	else
		srcs = (void **) src_list;

	/* convert to buffer pointers */
	for (i = 0; i < src_cnt; i++)
		if (src_list[i])
			srcs[xor_src_cnt++] = page_address(src_list[i]) + offset;
	src_cnt = xor_src_cnt;
	/* set destination address */
	dest_buf = page_address(dest) + offset;

	if (submit->flags & ASYNC_TX_XOR_ZERO_DST)
		memset(dest_buf, 0, len);

	while (src_cnt > 0) {
		/* process up to 'MAX_XOR_BLOCKS' sources */
		xor_src_cnt = min(src_cnt, MAX_XOR_BLOCKS);
		xor_blocks(xor_src_cnt, len, dest_buf, &srcs[src_off]);

		/* drop
Esempio n. 3
0
static void
do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset,
	unsigned int src_cnt, size_t len, enum async_tx_flags flags,
	struct dma_async_tx_descriptor *depend_tx,
	dma_async_tx_callback cb_fn, void *cb_param)
{
	void *_dest;
	int i;

	pr_debug("%s: len: %zu\n", __func__, len);

	/* reuse the 'src_list' array to convert to buffer pointers */
	for (i = 0; i < src_cnt; i++)
		src_list[i] = (struct page *)
			(page_address(src_list[i]) + offset);

	/* set destination address */
	_dest = page_address(dest) + offset;

	if (flags & ASYNC_TX_XOR_ZERO_DST)
		memset(_dest, 0, len);

	xor_blocks(src_cnt, len, _dest,
		(void **) src_list);

	async_tx_sync_epilog(flags, depend_tx, cb_fn, cb_param);
}
Esempio n. 4
0
/*
 *      Calculate H(i+1) = Hash(Hi,Mi)
 *      Where H and M are 32 bytes long
 */
static int hash_step(gost_ctx * c, byte * H, const byte * M)
{
    byte U[32], W[32], V[32], S[32], Key[32];
    int i;
    /* Compute first key */
    xor_blocks(W, H, M, 32);
    swap_bytes(W, Key);
    /* Encrypt first 8 bytes of H with first key */
    gost_enc_with_key(c, Key, H, S);
    /* Compute second key */
    circle_xor8(H, U);
    circle_xor8(M, V);
    circle_xor8(V, V);
    xor_blocks(W, U, V, 32);
    swap_bytes(W, Key);
    /* encrypt second 8 bytes of H with second key */
    gost_enc_with_key(c, Key, H + 8, S + 8);
    /* compute third key */
    circle_xor8(U, U);
    U[31] = ~U[31];
    U[29] = ~U[29];
    U[28] = ~U[28];
    U[24] = ~U[24];
    U[23] = ~U[23];
    U[20] = ~U[20];
    U[18] = ~U[18];
    U[17] = ~U[17];
    U[14] = ~U[14];
    U[12] = ~U[12];
    U[10] = ~U[10];
    U[8] = ~U[8];
    U[7] = ~U[7];
    U[5] = ~U[5];
    U[3] = ~U[3];
    U[1] = ~U[1];
    circle_xor8(V, V);
    circle_xor8(V, V);
    xor_blocks(W, U, V, 32);
    swap_bytes(W, Key);
    /* encrypt third 8 bytes of H with third key */
    gost_enc_with_key(c, Key, H + 16, S + 16);
    /* Compute fourth key */
    circle_xor8(U, U);
    circle_xor8(V, V);
    circle_xor8(V, V);
    xor_blocks(W, U, V, 32);
    swap_bytes(W, Key);
    /* Encrypt last 8 bytes with fourth key */
    gost_enc_with_key(c, Key, H + 24, S + 24);
    for (i = 0; i < 12; i++)
        transform_3(S);
    xor_blocks(S, S, M, 32);
    transform_3(S);
    xor_blocks(S, S, H, 32);
    for (i = 0; i < 61; i++)
        transform_3(S);
    memcpy(H, S, 32);
    return 1;
}
void break_protocol_1()
{
	uint16_t victim_id = 1234;
	uint16_t my_id;
	uint8_t *message;
	size_t len;
	uint8_t iv[AES_BLOCK_SIZE], fake_iv[AES_BLOCK_SIZE];
	uint8_t mac[AES_BLOCK_SIZE];
	uint8_t modified_block[AES_BLOCK_SIZE + 1], delta_block[AES_BLOCK_SIZE + 1];	/* For the null char added in sprintf. Just ignore it. */

	my_id = client_get_id();
	message = client_generate_request_proto1(&len, iv, mac, my_id, 1000000);

	snprintf(modified_block, sizeof modified_block, "from=#{%05hu}&to", victim_id);
	xor_blocks(delta_block, modified_block, message, AES_BLOCK_SIZE);
	xor_blocks(fake_iv, iv, delta_block, AES_BLOCK_SIZE);
	memcpy(message, modified_block, AES_BLOCK_SIZE);

	printf("Protocol 1 server response:\n");
	server_process_request_proto1(message, len, fake_iv, mac);

	free(message);
}
void break_protocol_2()
{
	uint16_t target_id = 1234;
	uint16_t my_id;
	uint8_t *target_message, *my_message;
	size_t target_len, my_len;
	uint8_t target_mac[AES_BLOCK_SIZE], my_mac[AES_BLOCK_SIZE];
	uint8_t *extended_message;

	/* This part is more ambiguous. I don't think it can be solved
	 * without making some hand-waving in the server's parsing.
	 * We will first sign a valid transaction from our account
	 * to our own account for 1M spacebucks. We then obtain
	 * a valid message from the target (presumably obtained by
	 * sniffing network traffic), and concatenate our own message.
	 * In order for the MAC to remain the same, the first block of
	 * our message will be xored with the mac of the target message.
	 * This "simulates" an IV of 0 for our message, keeping the same
	 * MAC as a result.
	 *
	 * We have (message, mac) pairs (M, t) and (M', t'), and we generate
	 * the pair (M || (M'[0] ^ t) || M'[1..], t').
	 *
	 * Of course, this means there will be junk between the end of
	 * the target's transaction list and the beggining of ours,
	 * so a correct parsing implementation would reject that as invalid
	 * request. For the purpose of the challenge, we will assume the
	 * parsing implementation is very dumb...
	 * 
	 * This still has a slight change of failing if a semicolon appears as
	 * part of the junk data, because the parser will look for the
	 * transactions based on the semicolons. */
	my_id = client_get_id();
	my_message = client_generate_request_proto2(&my_len, my_mac, my_id, 1000000);
	target_message = client_capture_request_proto2(&target_len, target_mac, target_id);

	extended_message = malloc(target_len + my_len);
	memcpy(extended_message, target_message, target_len);
	xor_blocks(extended_message + target_len, my_message, target_mac, AES_BLOCK_SIZE);
	memcpy(extended_message + target_len + AES_BLOCK_SIZE, my_message + AES_BLOCK_SIZE, my_len - AES_BLOCK_SIZE);

	printf("Protocol 2 server response:\n");
	server_process_request_proto2(extended_message, target_len + my_len, my_mac);

	free(extended_message);
	free(target_message);
	free(my_message);
}
static void
do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset,
	    int src_cnt, size_t len, struct async_submit_ctl *submit)
{
	int i;
	int xor_src_cnt = 0;
	int src_off = 0;
	void *dest_buf;
	void **srcs;

	if (submit->scribble)
		srcs = submit->scribble;
	else
		srcs = (void **) src_list;

	
	for (i = 0; i < src_cnt; i++)
		if (src_list[i])
			srcs[xor_src_cnt++] = page_address(src_list[i]) + offset;
	src_cnt = xor_src_cnt;
	
	dest_buf = page_address(dest) + offset;

	if (submit->flags & ASYNC_TX_XOR_ZERO_DST)
		memset(dest_buf, 0, len);

	while (src_cnt > 0) {
		
		xor_src_cnt = min(src_cnt, MAX_XOR_BLOCKS);
		xor_blocks(xor_src_cnt, len, dest_buf, &srcs[src_off]);

		
		src_cnt -= xor_src_cnt;
		src_off += xor_src_cnt;
	}

	async_tx_sync_epilog(submit);
}
Esempio n. 8
0
void cn_slow_hash(const void *data, size_t length, char *hash) {
  uint8_t long_state[MEMORY];
  union cn_slow_hash_state state;
  uint8_t text[INIT_SIZE_BYTE];
  uint8_t a[AES_BLOCK_SIZE];
  uint8_t b[AES_BLOCK_SIZE];
  uint8_t c[AES_BLOCK_SIZE];
  uint8_t d[AES_BLOCK_SIZE];
  size_t i, j;
  uint8_t aes_key[AES_KEY_SIZE];
  oaes_ctx *aes_ctx;

  hash_process(&state.hs, data, length);
  memcpy(text, state.init, INIT_SIZE_BYTE);
  memcpy(aes_key, state.hs.b, AES_KEY_SIZE);
  aes_ctx = (oaes_ctx *) oaes_alloc();

  oaes_key_import_data(aes_ctx, aes_key, AES_KEY_SIZE);
  for (i = 0; i < MEMORY / INIT_SIZE_BYTE; i++) {
    for (j = 0; j < INIT_SIZE_BLK; j++) {
      aesb_pseudo_round(&text[AES_BLOCK_SIZE * j], &text[AES_BLOCK_SIZE * j], aes_ctx->key->exp_data);
	}
    memcpy(&long_state[i * INIT_SIZE_BYTE], text, INIT_SIZE_BYTE);
  }

  for (i = 0; i < 16; i++) {
    a[i] = state.k[     i] ^ state.k[32 + i];
    b[i] = state.k[16 + i] ^ state.k[48 + i];
  }

  for (i = 0; i < ITER / 2; i++) {
    /* Dependency chain: address -> read value ------+
     * written value <-+ hard function (AES or MUL) <+
     * next address  <-+
     */
    /* Iteration 1 */
    j = e2i(a, MEMORY / AES_BLOCK_SIZE);
    copy_block(c, &long_state[j * AES_BLOCK_SIZE]);
    aesb_single_round(c, c, a);
    xor_blocks(b, c);
    swap_blocks(b, c);
    copy_block(&long_state[j * AES_BLOCK_SIZE], c);
    //assert(j == e2i(a, MEMORY / AES_BLOCK_SIZE));
    swap_blocks(a, b);
    /* Iteration 2 */
    j = e2i(a, MEMORY / AES_BLOCK_SIZE);
    copy_block(c, &long_state[j * AES_BLOCK_SIZE]);
    mul(a, c, d);
    sum_half_blocks(b, d);
    swap_blocks(b, c);
    xor_blocks(b, c);
    copy_block(&long_state[j * AES_BLOCK_SIZE], c);
    //assert(j == e2i(a, MEMORY / AES_BLOCK_SIZE));
    swap_blocks(a, b);
  }

  memcpy(text, state.init, INIT_SIZE_BYTE);
  oaes_key_import_data(aes_ctx, &state.hs.b[32], AES_KEY_SIZE);
  for (i = 0; i < MEMORY / INIT_SIZE_BYTE; i++) {
    for (j = 0; j < INIT_SIZE_BLK; j++) {
      xor_blocks(&text[j * AES_BLOCK_SIZE], &long_state[i * INIT_SIZE_BYTE + j * AES_BLOCK_SIZE]);
      aesb_pseudo_round(&text[AES_BLOCK_SIZE * j], &text[AES_BLOCK_SIZE * j], aes_ctx->key->exp_data);
    }
  }
  memcpy(state.init, text, INIT_SIZE_BYTE);
  hash_permutation(&state.hs);
  extra_hashes[state.hs.b[0] & 3](&state, 200, hash);
  oaes_free((OAES_CTX **) &aes_ctx);
}
Esempio n. 9
0
void cryptonight_hash_ctx_aes_ni(void* output, const void* input, size_t len, struct cryptonight_ctx* ctx) {
	hash_process(&ctx->state.hs, (const uint8_t*) input, len);
	ctx->aes_ctx = (oaes_ctx*) oaes_alloc();
	size_t i, j;
	memcpy(ctx->text, ctx->state.init, INIT_SIZE_BYTE);

	oaes_key_import_data(ctx->aes_ctx, ctx->state.hs.b, AES_KEY_SIZE);
	for (i = 0; likely(i < MEMORY); i += INIT_SIZE_BYTE) {
		fast_aesb_pseudo_round_mut(&ctx->text[AES_BLOCK_SIZE * 0], ctx->aes_ctx->key->exp_data);
		fast_aesb_pseudo_round_mut(&ctx->text[AES_BLOCK_SIZE * 1], ctx->aes_ctx->key->exp_data);
		fast_aesb_pseudo_round_mut(&ctx->text[AES_BLOCK_SIZE * 2], ctx->aes_ctx->key->exp_data);
		fast_aesb_pseudo_round_mut(&ctx->text[AES_BLOCK_SIZE * 3], ctx->aes_ctx->key->exp_data);
		fast_aesb_pseudo_round_mut(&ctx->text[AES_BLOCK_SIZE * 4], ctx->aes_ctx->key->exp_data);
		fast_aesb_pseudo_round_mut(&ctx->text[AES_BLOCK_SIZE * 5], ctx->aes_ctx->key->exp_data);
		fast_aesb_pseudo_round_mut(&ctx->text[AES_BLOCK_SIZE * 6], ctx->aes_ctx->key->exp_data);
		fast_aesb_pseudo_round_mut(&ctx->text[AES_BLOCK_SIZE * 7], ctx->aes_ctx->key->exp_data);
		memcpy(&ctx->long_state[i], ctx->text, INIT_SIZE_BYTE);
	}

	xor_blocks_dst(&ctx->state.k[0], &ctx->state.k[32], ctx->a);
	xor_blocks_dst(&ctx->state.k[16], &ctx->state.k[48], ctx->b);

	for (i = 0; likely(i < ITER / 4); ++i) {
		/* Dependency chain: address -> read value ------+
		 * written value <-+ hard function (AES or MUL) <+
		 * next address  <-+
		 */
		/* Iteration 1 */
		j = e2i(ctx->a);
		fast_aesb_single_round(&ctx->long_state[j], ctx->c, ctx->a);
		xor_blocks_dst(ctx->c, ctx->b, &ctx->long_state[j]);
		/* Iteration 2 */
		mul_sum_xor_dst(ctx->c, ctx->a, &ctx->long_state[e2i(ctx->c)]);
		/* Iteration 3 */
		j = e2i(ctx->a);
		fast_aesb_single_round(&ctx->long_state[j], ctx->b, ctx->a);
		xor_blocks_dst(ctx->b, ctx->c, &ctx->long_state[j]);
		/* Iteration 4 */
		mul_sum_xor_dst(ctx->b, ctx->a, &ctx->long_state[e2i(ctx->b)]);
	}

	memcpy(ctx->text, ctx->state.init, INIT_SIZE_BYTE);
	oaes_key_import_data(ctx->aes_ctx, &ctx->state.hs.b[32], AES_KEY_SIZE);
	for (i = 0; likely(i < MEMORY); i += INIT_SIZE_BYTE) {
		xor_blocks(&ctx->text[0 * AES_BLOCK_SIZE], &ctx->long_state[i + 0 * AES_BLOCK_SIZE]);
		fast_aesb_pseudo_round_mut(&ctx->text[0 * AES_BLOCK_SIZE], ctx->aes_ctx->key->exp_data);
		xor_blocks(&ctx->text[1 * AES_BLOCK_SIZE], &ctx->long_state[i + 1 * AES_BLOCK_SIZE]);
		fast_aesb_pseudo_round_mut(&ctx->text[1 * AES_BLOCK_SIZE], ctx->aes_ctx->key->exp_data);
		xor_blocks(&ctx->text[2 * AES_BLOCK_SIZE], &ctx->long_state[i + 2 * AES_BLOCK_SIZE]);
		fast_aesb_pseudo_round_mut(&ctx->text[2 * AES_BLOCK_SIZE], ctx->aes_ctx->key->exp_data);
		xor_blocks(&ctx->text[3 * AES_BLOCK_SIZE], &ctx->long_state[i + 3 * AES_BLOCK_SIZE]);
		fast_aesb_pseudo_round_mut(&ctx->text[3 * AES_BLOCK_SIZE], ctx->aes_ctx->key->exp_data);
		xor_blocks(&ctx->text[4 * AES_BLOCK_SIZE], &ctx->long_state[i + 4 * AES_BLOCK_SIZE]);
		fast_aesb_pseudo_round_mut(&ctx->text[4 * AES_BLOCK_SIZE], ctx->aes_ctx->key->exp_data);
		xor_blocks(&ctx->text[5 * AES_BLOCK_SIZE], &ctx->long_state[i + 5 * AES_BLOCK_SIZE]);
		fast_aesb_pseudo_round_mut(&ctx->text[5 * AES_BLOCK_SIZE], ctx->aes_ctx->key->exp_data);
		xor_blocks(&ctx->text[6 * AES_BLOCK_SIZE], &ctx->long_state[i + 6 * AES_BLOCK_SIZE]);
		fast_aesb_pseudo_round_mut(&ctx->text[6 * AES_BLOCK_SIZE], ctx->aes_ctx->key->exp_data);
		xor_blocks(&ctx->text[7 * AES_BLOCK_SIZE], &ctx->long_state[i + 7 * AES_BLOCK_SIZE]);
		fast_aesb_pseudo_round_mut(&ctx->text[7 * AES_BLOCK_SIZE], ctx->aes_ctx->key->exp_data);
	}
	memcpy(ctx->state.init, ctx->text, INIT_SIZE_BYTE);
	hash_permutation(&ctx->state.hs);
	/*memcpy(hash, &state, 32);*/
	extra_hashes[ctx->state.hs.b[0] & 3](&ctx->state, 200, output);
	oaes_free((OAES_CTX **) &ctx->aes_ctx);
}
Esempio n. 10
0
void decryptMessage(unsigned char* messageIn, unsigned char* decryptedMessage)
{
   unsigned char buffer[16];
   int i, j;
   unsigned char tmp;
   uint32_t key_schedule[11][4];
   int mode = messageIn[12];  // 0,1,2,3
   printf("mode = %02x\n", mode);
   generate_key_schedule(initial_session_key, key_schedule);
      
   // For M0-M6 we follow the same pattern
   for (i = 0; i < 8; i++)
   {      
      // First, copy in the nth block (we must start with the last one)
      for (j = 0; j < 16; j++)
      {
         if (mode == 3)
            buffer[j] = messageIn[(0x80-0x10*i)+j];
         else if (mode == 2 || mode == 1 || mode == 0)
            buffer[j] = messageIn[(0x10*(i+1))+j];   
      }
      // do this permutation and update 9 times. Could this be cycle(), or the reverse of cycle()?
      for (j = 0; j < 9; j++)
      {
         int base = 0x80 - 0x10*j;
         //print_block("About to cycle. Buffer is currently: ", buffer);
         buffer[0x0] = message_table_index(base+0x0)[buffer[0x0]] ^ message_key[mode][base+0x0];
         buffer[0x4] = message_table_index(base+0x4)[buffer[0x4]] ^ message_key[mode][base+0x4];
         buffer[0x8] = message_table_index(base+0x8)[buffer[0x8]] ^ message_key[mode][base+0x8];
         buffer[0xc] = message_table_index(base+0xc)[buffer[0xc]] ^ message_key[mode][base+0xc];

         tmp = buffer[0x0d];
         buffer[0xd] = message_table_index(base+0xd)[buffer[0x9]] ^ message_key[mode][base+0xd];
         buffer[0x9] = message_table_index(base+0x9)[buffer[0x5]] ^ message_key[mode][base+0x9];
         buffer[0x5] = message_table_index(base+0x5)[buffer[0x1]] ^ message_key[mode][base+0x5];
         buffer[0x1] = message_table_index(base+0x1)[tmp]         ^ message_key[mode][base+0x1];

         tmp = buffer[0x02];
         buffer[0x2] = message_table_index(base+0x2)[buffer[0xa]] ^ message_key[mode][base+0x2];
         buffer[0xa] = message_table_index(base+0xa)[tmp]         ^ message_key[mode][base+0xa];
         tmp = buffer[0x06];
         buffer[0x6] = message_table_index(base+0x6)[buffer[0xe]] ^ message_key[mode][base+0x6];
         buffer[0xe] = message_table_index(base+0xe)[tmp]         ^ message_key[mode][base+0xe];

         tmp = buffer[0x3];
         buffer[0x3] = message_table_index(base+0x3)[buffer[0x7]] ^ message_key[mode][base+0x3];
         buffer[0x7] = message_table_index(base+0x7)[buffer[0xb]] ^ message_key[mode][base+0x7];
         buffer[0xb] = message_table_index(base+0xb)[buffer[0xf]] ^ message_key[mode][base+0xb];
         buffer[0xf] = message_table_index(base+0xf)[tmp]         ^ message_key[mode][base+0xf];

         // Now we must replace the entire buffer with 4 words that we read and xor together
         uint32_t word;
         uint32_t* block = (uint32_t*)buffer;
         
         block[0] = table_s9[0x000 + buffer[0x0]] ^ 
                    table_s9[0x100 + buffer[0x1]] ^ 
                    table_s9[0x200 + buffer[0x2]] ^ 
                    table_s9[0x300 + buffer[0x3]];
         block[1] = table_s9[0x000 + buffer[0x4]] ^ 
                    table_s9[0x100 + buffer[0x5]] ^ 
                    table_s9[0x200 + buffer[0x6]] ^ 
                    table_s9[0x300 + buffer[0x7]];
         block[2] = table_s9[0x000 + buffer[0x8]] ^
                    table_s9[0x100 + buffer[0x9]] ^
                    table_s9[0x200 + buffer[0xa]] ^
                    table_s9[0x300 + buffer[0xb]];
         block[3] = table_s9[0x000 + buffer[0xc]] ^
                    table_s9[0x100 + buffer[0xd]] ^
                    table_s9[0x200 + buffer[0xe]] ^
                    table_s9[0x300 + buffer[0xf]];
      }
      // Next, another permute with a different table
      buffer[0x0] = table_s10[(0x0 << 8) + buffer[0x0]];
      buffer[0x4] = table_s10[(0x4 << 8) + buffer[0x4]];
      buffer[0x8] = table_s10[(0x8 << 8) + buffer[0x8]];
      buffer[0xc] = table_s10[(0xc << 8) + buffer[0xc]];

      tmp = buffer[0x0d];
      buffer[0xd] = table_s10[(0xd << 8) + buffer[0x9]];
      buffer[0x9] = table_s10[(0x9 << 8) + buffer[0x5]];
      buffer[0x5] = table_s10[(0x5 << 8) + buffer[0x1]];
      buffer[0x1] = table_s10[(0x1 << 8) + tmp];

      tmp = buffer[0x02];
      buffer[0x2] = table_s10[(0x2 << 8) + buffer[0xa]];
      buffer[0xa] = table_s10[(0xa << 8) + tmp];
      tmp = buffer[0x06];
      buffer[0x6] = table_s10[(0x6 << 8) + buffer[0xe]];
      buffer[0xe] = table_s10[(0xe << 8) + tmp];

      tmp = buffer[0x3];
      buffer[0x3] = table_s10[(0x3 << 8) + buffer[0x7]];
      buffer[0x7] = table_s10[(0x7 << 8) + buffer[0xb]];
      buffer[0xb] = table_s10[(0xb << 8) + buffer[0xf]];
      buffer[0xf] = table_s10[(0xf << 8) + tmp];

      // And finally xor with the previous block of the message, except in mode-2 where we do this in reverse
      if (mode == 2 || mode == 1 || mode == 0)
      {
         if (i > 0)
         {
            xor_blocks(buffer, &messageIn[0x10*i], &decryptedMessage[0x10*i]); // remember that the first 0x10 bytes are the header
         }
         else
            xor_blocks(buffer, message_iv[mode], &decryptedMessage[0x10*i]);
         print_block(" ", &decryptedMessage[0x10*i]);
      }
      else
      {
         if (i < 7)
            xor_blocks(buffer, &messageIn[0x70 - 0x10*i], &decryptedMessage[0x70 - 0x10*i]);
         else
            xor_blocks(buffer, message_iv[mode], &decryptedMessage[0x70 - 0x10*i]);
         printf("Decrypted message block %02X-%02X:", 0x70 - 0x10*i, 0x70 - 0x10*i+0xf);
         print_block(" ", &decryptedMessage[0x70 - 0x10*i]);
      }
   }
}
Esempio n. 11
0
/* Restore data:
 * We are given:
 *  A list of 'fds' of the active disks. Some may be '-1' for not-available.
 *  A geometry: raid_disks, chunk_size, level, layout
 *  An 'fd' to read from.  It is already seeked to the right (Read) location.
 *  A start and length.
 * The length must be a multiple of the stripe size.
 *
 * We build a full stripe in memory and then write it out.
 * We assume that there are enough working devices.
 */
int restore_stripes(int *dest, unsigned long long *offsets,
		    int raid_disks, int chunk_size, int level, int layout,
		    int source, unsigned long long read_offset,
		    unsigned long long start, unsigned long long length,
		    char *src_buf)
{
	char *stripe_buf;
	char **stripes = xmalloc(raid_disks * sizeof(char*));
	char **blocks = xmalloc(raid_disks * sizeof(char*));
	int i;
	int rv;

	int data_disks = raid_disks - (level == 0 ? 0 : level <= 5 ? 1 : 2);

	if (posix_memalign((void**)&stripe_buf, 4096, raid_disks * chunk_size))
		stripe_buf = NULL;

	if (zero == NULL || chunk_size > zero_size) {
		if (zero)
			free(zero);
		zero = xcalloc(1, chunk_size);
		zero_size = chunk_size;
	}

	if (stripe_buf == NULL || stripes == NULL || blocks == NULL
	    || zero == NULL) {
		rv = -2;
		goto abort;
	}
	for (i = 0; i < raid_disks; i++)
		stripes[i] = stripe_buf + i * chunk_size;
	while (length > 0) {
		unsigned int len = data_disks * chunk_size;
		unsigned long long offset;
		int disk, qdisk;
		int syndrome_disks;
		if (length < len) {
			rv = -3;
			goto abort;
		}
		for (i = 0; i < data_disks; i++) {
			int disk = geo_map(i, start/chunk_size/data_disks,
					   raid_disks, level, layout);
			if (src_buf == NULL) {
				/* read from file */
				if (lseek64(source, read_offset, 0) !=
					 (off64_t)read_offset) {
					rv = -1;
					goto abort;
				}
				if (read(source,
					 stripes[disk],
					 chunk_size) != chunk_size) {
					rv = -1;
					goto abort;
				}
			} else {
				/* read from input buffer */
				memcpy(stripes[disk],
				       src_buf + read_offset,
				       chunk_size);
			}
			read_offset += chunk_size;
		}
		/* We have the data, now do the parity */
		offset = (start/chunk_size/data_disks) * chunk_size;
		switch (level) {
		case 4:
		case 5:
			disk = geo_map(-1, start/chunk_size/data_disks,
					   raid_disks, level, layout);
			for (i = 0; i < data_disks; i++)
				blocks[i] = stripes[(disk+1+i) % raid_disks];
			xor_blocks(stripes[disk], blocks, data_disks, chunk_size);
			break;
		case 6:
			disk = geo_map(-1, start/chunk_size/data_disks,
				       raid_disks, level, layout);
			qdisk = geo_map(-2, start/chunk_size/data_disks,
				       raid_disks, level, layout);
			if (is_ddf(layout)) {
				/* q over 'raid_disks' blocks, in device order.
				 * 'p' and 'q' get to be all zero
				 */
				for (i = 0; i < raid_disks; i++)
					if (i == disk || i == qdisk)
						blocks[i] = (char*)zero;
					else
						blocks[i] = stripes[i];
				syndrome_disks = raid_disks;
			} else {
				/* for md, q is over 'data_disks' blocks,
				 * starting immediately after 'q'
				 */
				for (i = 0; i < data_disks; i++)
					blocks[i] = stripes[(qdisk+1+i) % raid_disks];

				syndrome_disks = data_disks;
			}
			qsyndrome((uint8_t*)stripes[disk],
				  (uint8_t*)stripes[qdisk],
				  (uint8_t**)blocks,
				  syndrome_disks, chunk_size);
			break;
		}
		for (i=0; i < raid_disks ; i++)
			if (dest[i] >= 0) {
				if (lseek64(dest[i],
					 offsets[i]+offset, 0) < 0) {
					rv = -1;
					goto abort;
				}
				if (write(dest[i], stripes[i],
					 chunk_size) != chunk_size) {
					rv = -1;
					goto abort;
				}
			}
		length -= len;
		start += len;
	}
	rv = 0;

abort:
	free(stripe_buf);
	free(stripes);
	free(blocks);
	return rv;
}
Esempio n. 12
0
/*******************************************************************************
 * Function:	save_stripes
 * Description:
 *	Function reads data (only data without P and Q) from array and writes
 * it to buf and opcjonaly to backup files
 * Parameters:
 *	source		: A list of 'fds' of the active disks.
 *			  Some may be absent
 *	offsets		: A list of offsets on disk belonging
 *			 to the array [bytes]
 *	raid_disks	: geometry: number of disks in the array
 *	chunk_size	: geometry: chunk size [bytes]
 *	level		: geometry: RAID level
 *	layout		: geometry: layout
 *	nwrites		: number of backup files
 *	dest		: A list of 'fds' for mirrored targets
 *			  (e.g. backup files). They are already seeked to right
 *			  (write) location. If NULL, data will be wrote
 *			  to the buf only
 *	start		: start address of data to read (must be stripe-aligned)
 *			  [bytes]
 *	length	-	: length of data to read (must be stripe-aligned)
 *			  [bytes]
 *	buf		: buffer for data. It is large enough to hold
 *			  one stripe. It is stripe aligned
 * Returns:
 *	 0 : success
 *	-1 : fail
 ******************************************************************************/
int save_stripes(int *source, unsigned long long *offsets,
		 int raid_disks, int chunk_size, int level, int layout,
		 int nwrites, int *dest,
		 unsigned long long start, unsigned long long length,
		 char *buf)
{
	int len;
	int data_disks = raid_disks - (level == 0 ? 0 : level <=5 ? 1 : 2);
	int disk;
	int i;
	unsigned long long length_test;

	if (!tables_ready)
		make_tables();
	ensure_zero_has_size(chunk_size);

	len = data_disks * chunk_size;
	length_test = length / len;
	length_test *= len;

	if (length != length_test) {
		dprintf("Error: save_stripes(): Data are not alligned. EXIT\n");
		dprintf("\tArea for saving stripes (length) = %llu\n", length);
		dprintf("\tWork step (len)                  = %i\n", len);
		dprintf("\tExpected save area (length_test) = %llu\n",
			length_test);
		abort();
	}

	while (length > 0) {
		int failed = 0;
		int fdisk[3], fblock[3];
		for (disk = 0; disk < raid_disks ; disk++) {
			unsigned long long offset;
			int dnum;

			offset = (start/chunk_size/data_disks)*chunk_size;
			dnum = geo_map(disk < data_disks ? disk : data_disks - disk - 1,
				       start/chunk_size/data_disks,
				       raid_disks, level, layout);
			if (dnum < 0) abort();
			if (source[dnum] < 0 ||
			    lseek64(source[dnum], offsets[dnum]+offset, 0) < 0 ||
			    read(source[dnum], buf+disk * chunk_size, chunk_size)
			    != chunk_size)
				if (failed <= 2) {
					fdisk[failed] = dnum;
					fblock[failed] = disk;
					failed++;
				}
		}
		if (failed == 0 || fblock[0] >= data_disks)
			/* all data disks are good */
			;
		else if (failed == 1 || fblock[1] >= data_disks+1) {
			/* one failed data disk and good parity */
			char *bufs[data_disks];
			for (i=0; i < data_disks; i++)
				if (fblock[0] == i)
					bufs[i] = buf + data_disks*chunk_size;
				else
					bufs[i] = buf + i*chunk_size;

			xor_blocks(buf + fblock[0]*chunk_size,
				   bufs, data_disks, chunk_size);
		} else if (failed > 2 || level != 6)
			/* too much failure */
			return -1;
		else {
			/* RAID6 computations needed. */
			uint8_t *bufs[data_disks+4];
			int qdisk;
			int syndrome_disks;
			disk = geo_map(-1, start/chunk_size/data_disks,
				       raid_disks, level, layout);
			qdisk = geo_map(-2, start/chunk_size/data_disks,
				       raid_disks, level, layout);
			if (is_ddf(layout)) {
				/* q over 'raid_disks' blocks, in device order.
				 * 'p' and 'q' get to be all zero
				 */
				for (i = 0; i < raid_disks; i++)
					bufs[i] = zero;
				for (i = 0; i < data_disks; i++) {
					int dnum = geo_map(i,
							   start/chunk_size/data_disks,
							   raid_disks, level, layout);
					int snum;
					/* i is the logical block number, so is index to 'buf'.
					 * dnum is physical disk number
					 * and thus the syndrome number.
					 */
					snum = dnum;
					bufs[snum] = (uint8_t*)buf + chunk_size * i;
				}
				syndrome_disks = raid_disks;
			} else {
				/* for md, q is over 'data_disks' blocks,
				 * starting immediately after 'q'
				 * Note that for the '_6' variety, the p block
				 * makes a hole that we need to be careful of.
				 */
				int j;
				int snum = 0;
				for (j = 0; j < raid_disks; j++) {
					int dnum = (qdisk + 1 + j) % raid_disks;
					if (dnum == disk || dnum == qdisk)
						continue;
					for (i = 0; i < data_disks; i++)
						if (geo_map(i,
							    start/chunk_size/data_disks,
							    raid_disks, level, layout) == dnum)
							break;
					/* i is the logical block number, so is index to 'buf'.
					 * dnum is physical disk number
					 * snum is syndrome disk for which 0 is immediately after Q
					 */
					bufs[snum] = (uint8_t*)buf + chunk_size * i;

					if (fblock[0] == i)
						fdisk[0] = snum;
					if (fblock[1] == i)
						fdisk[1] = snum;
					snum++;
				}

				syndrome_disks = data_disks;
			}

			/* Place P and Q blocks at end of bufs */
			bufs[syndrome_disks] = (uint8_t*)buf + chunk_size * data_disks;
			bufs[syndrome_disks+1] = (uint8_t*)buf + chunk_size * (data_disks+1);

			if (fblock[1] == data_disks)
				/* One data failed, and parity failed */
				raid6_datap_recov(syndrome_disks+2, chunk_size,
						  fdisk[0], bufs, 0);
			else {
				/* Two data blocks failed, P,Q OK */
				raid6_2data_recov(syndrome_disks+2, chunk_size,
						  fdisk[0], fdisk[1], bufs, 0);
			}
		}
		if (dest) {
			for (i = 0; i < nwrites; i++)
				if (write(dest[i], buf, len) != len)
					return -1;
		} else {
			/* build next stripe in buffer */
			buf += len;
		}
		length -= len;
		start += len;
	}
	return 0;
}
Esempio n. 13
0
void cryptonight_hash(const char* input, char* output, uint32_t len) {
    uint8_t long_state[MEMORY];
    union cn_slow_hash_state state;
    uint8_t text[INIT_SIZE_BYTE];
    uint8_t a[AES_BLOCK_SIZE];
    uint8_t b[AES_BLOCK_SIZE];
    uint8_t c[AES_BLOCK_SIZE];
    uint8_t d[AES_BLOCK_SIZE];
    size_t i, j;
    uint8_t aes_key[AES_KEY_SIZE];
    OAES_CTX* aes_ctx;

    hash_process(&state.hs, (const uint8_t*) input, len);
    memcpy(text, state.init, INIT_SIZE_BYTE);
    memcpy(aes_key, state.hs.b, AES_KEY_SIZE);
    aes_ctx = oaes_alloc();

    oaes_key_import_data(aes_ctx, aes_key, AES_KEY_SIZE);
    for (i = 0; i < MEMORY / INIT_SIZE_BYTE; i++) {
        for (j = 0; j < INIT_SIZE_BLK; j++) {
            oaes_pseudo_encrypt_ecb(aes_ctx, &text[AES_BLOCK_SIZE * j]);
        }
        memcpy(&long_state[i * INIT_SIZE_BYTE], text, INIT_SIZE_BYTE);
    }

    for (i = 0; i < 16; i++) {
        a[i] = state.k[i] ^ state.k[32 + i];
        b[i] = state.k[16 + i] ^ state.k[48 + i];
    }

    for (i = 0; i < ITER / 2; i++) {
        /* Dependency chain: address -> read value ------+
         * written value <-+ hard function (AES or MUL) <+
         * next address  <-+
         */
        /* Iteration 1 */
        j = e2i(a, MEMORY / AES_BLOCK_SIZE);
        copy_block(c, &long_state[j * AES_BLOCK_SIZE]);
        oaes_encryption_round(a, c);
        xor_blocks(b, c);
        swap_blocks(b, c);
        copy_block(&long_state[j * AES_BLOCK_SIZE], c);
        assert(j == e2i(a, MEMORY / AES_BLOCK_SIZE));
        swap_blocks(a, b);
        /* Iteration 2 */
        j = e2i(a, MEMORY / AES_BLOCK_SIZE);
        copy_block(c, &long_state[j * AES_BLOCK_SIZE]);
        mul(a, c, d);
        sum_half_blocks(b, d);
        swap_blocks(b, c);
        xor_blocks(b, c);
        copy_block(&long_state[j * AES_BLOCK_SIZE], c);
        swap_blocks(a, b);
    }

    memcpy(text, state.init, INIT_SIZE_BYTE);
    oaes_key_import_data(aes_ctx, &state.hs.b[32], AES_KEY_SIZE);
    for (i = 0; i < MEMORY / INIT_SIZE_BYTE; i++) {
        for (j = 0; j < INIT_SIZE_BLK; j++) {
            xor_blocks(&text[j * AES_BLOCK_SIZE],
                       &long_state[i * INIT_SIZE_BYTE + j * AES_BLOCK_SIZE]);
            oaes_pseudo_encrypt_ecb(aes_ctx, &text[j * AES_BLOCK_SIZE]);
        }
    }
    memcpy(state.init, text, INIT_SIZE_BYTE);
    hash_permutation(&state.hs);
    /*memcpy(hash, &state, 32);*/
    extra_hashes[state.hs.b[0] & 3](&state, 200, output);
    oaes_free(&aes_ctx);
}
Esempio n. 14
0
/* 
 * 	Calculate H(i+1) = Hash(Hi,Mi) 
 * 	Where H and M are 32 bytes long
 */
static int
hash_step(GOSTR341194_CTX *c, unsigned char *H, const unsigned char *M)
{
	unsigned char U[32], W[32], V[32], S[32], Key[32];
	int i;

	/* Compute first key */
	xor_blocks(W, H, M, 32);
	swap_bytes(W, Key);
	/* Encrypt first 8 bytes of H with first key */
	Gost2814789_set_key(&c->cipher, Key, 256);
	Gost2814789_encrypt(H, S, &c->cipher);

	/* Compute second key */
	circle_xor8(H, U);
	circle_xor8(M, V);
	circle_xor8(V, V);
	xor_blocks(W, U, V, 32);
	swap_bytes(W, Key);
	/* encrypt second 8 bytes of H with second key */
	Gost2814789_set_key(&c->cipher, Key, 256);
	Gost2814789_encrypt(H+8, S+8, &c->cipher);

	/* compute third key */
	circle_xor8(U, U);
	U[31] = ~U[31];
	U[29] = ~U[29];
	U[28] = ~U[28];
	U[24] = ~U[24];
	U[23] = ~U[23];
	U[20] = ~U[20];
	U[18] = ~U[18];
	U[17] = ~U[17];
	U[14] = ~U[14];
	U[12] = ~U[12];
	U[10] = ~U[10];
	U[8] = ~U[8];
	U[7] = ~U[7];
	U[5] = ~U[5];
	U[3] = ~U[3];
	U[1] = ~U[1];
	circle_xor8(V, V);
	circle_xor8(V, V);
	xor_blocks(W, U, V, 32);
	swap_bytes(W, Key);
	/* encrypt third 8 bytes of H with third key */
	Gost2814789_set_key(&c->cipher, Key, 256);
	Gost2814789_encrypt(H+16, S+16, &c->cipher);

	/* Compute fourth key */
	circle_xor8(U, U);
	circle_xor8(V, V);
	circle_xor8(V, V);
	xor_blocks(W, U, V, 32);
	swap_bytes(W, Key);
	/* Encrypt last 8 bytes with fourth key */
	Gost2814789_set_key(&c->cipher, Key, 256);
	Gost2814789_encrypt(H+24, S+24, &c->cipher);

	for (i = 0; i < 12; i++)
		transform_3(S);
	xor_blocks(S, S, M, 32);
	transform_3(S);
	xor_blocks(S, S, H, 32);
	for (i = 0; i < 61; i++)
		transform_3(S);
	memcpy(H, S, 32);
	return 1;
}
Esempio n. 15
0
int check_stripes(struct mdinfo *info, int *source, unsigned long long *offsets,
		  int raid_disks, int chunk_size, int level, int layout,
		  unsigned long long start, unsigned long long length, char *name[],
		  int repair, int failed_disk1, int failed_disk2)
{
	/* read the data and p and q blocks, and check we got them right */
	char *stripe_buf = xmalloc(raid_disks * chunk_size);
	char **stripes = xmalloc(raid_disks * sizeof(char*));
	char **blocks = xmalloc(raid_disks * sizeof(char*));
	int *block_index_for_slot = xmalloc(raid_disks * sizeof(int));
	uint8_t *p = xmalloc(chunk_size);
	uint8_t *q = xmalloc(chunk_size);
	int *results = xmalloc(chunk_size * sizeof(int));
	sighandler_t *sig = xmalloc(3 * sizeof(sighandler_t));

	int i;
	int diskP, diskQ;
	int data_disks = raid_disks - 2;
	int err = 0;

	extern int tables_ready;

	if (!tables_ready)
		make_tables();

	for ( i = 0 ; i < raid_disks ; i++)
		stripes[i] = stripe_buf + i * chunk_size;

	while (length > 0) {
		int disk;

		printf("pos --> %llu\n", start);

		err = lock_stripe(info, start, chunk_size, data_disks, sig);
		if(err != 0) {
			if (err != 2)
				unlock_all_stripes(info, sig);
			goto exitCheck;
		}
		for (i = 0 ; i < raid_disks ; i++) {
			lseek64(source[i], offsets[i] + start * chunk_size, 0);
			read(source[i], stripes[i], chunk_size);
		}
		err = unlock_all_stripes(info, sig);
		if(err != 0)
			goto exitCheck;

		for (i = 0 ; i < data_disks ; i++) {
			int disk = geo_map(i, start, raid_disks, level, layout);
			blocks[i] = stripes[disk];
			block_index_for_slot[disk] = i;
			printf("%d->%d\n", i, disk);
		}

		qsyndrome(p, q, (uint8_t**)blocks, data_disks, chunk_size);
		diskP = geo_map(-1, start, raid_disks, level, layout);
		diskQ = geo_map(-2, start, raid_disks, level, layout);
		blocks[data_disks] = stripes[diskP];
		block_index_for_slot[diskP] = data_disks;
		blocks[data_disks+1] = stripes[diskQ];
		block_index_for_slot[diskQ] = data_disks+1;

		if (memcmp(p, stripes[diskP], chunk_size) != 0) {
			printf("P(%d) wrong at %llu\n", diskP, start);
		}
		if (memcmp(q, stripes[diskQ], chunk_size) != 0) {
			printf("Q(%d) wrong at %llu\n", diskQ, start);
		}
		raid6_collect(chunk_size, p, q, stripes[diskP], stripes[diskQ], results);
		disk = raid6_stats(results, raid_disks, chunk_size);

		if(disk >= -2) {
			disk = geo_map(disk, start, raid_disks, level, layout);
		}
		if(disk >= 0) {
			printf("Error detected at %llu: possible failed disk slot: %d --> %s\n",
				start, disk, name[disk]);
		}
		if(disk == -65535) {
			printf("Error detected at %llu: disk slot unknown\n", start);
		}
		if(repair == 1) {
			printf("Repairing stripe %llu\n", start);
			printf("Assuming slots %d (%s) and %d (%s) are incorrect\n",
			       failed_disk1, name[failed_disk1],
			       failed_disk2, name[failed_disk2]);

			if (failed_disk1 == diskQ || failed_disk2 == diskQ) {
				char *all_but_failed_blocks[data_disks];
				int failed_data_or_p;
				int failed_block_index;

				if (failed_disk1 == diskQ)
					failed_data_or_p = failed_disk2;
				else
					failed_data_or_p = failed_disk1;
				printf("Repairing D/P(%d) and Q\n", failed_data_or_p);
				failed_block_index = block_index_for_slot[failed_data_or_p];
				for (i=0; i < data_disks; i++)
					if (failed_block_index == i)
						all_but_failed_blocks[i] = stripes[diskP];
					else
						all_but_failed_blocks[i] = blocks[i];
				xor_blocks(stripes[failed_data_or_p],
					all_but_failed_blocks, data_disks, chunk_size);
				qsyndrome(p, (uint8_t*)stripes[diskQ], (uint8_t**)blocks, data_disks, chunk_size);
			} else {
				ensure_zero_has_size(chunk_size);
				if (failed_disk1 == diskP || failed_disk2 == diskP) {
					int failed_data, failed_block_index;
					if (failed_disk1 == diskP)
						failed_data = failed_disk2;
					else
						failed_data = failed_disk1;
					failed_block_index = block_index_for_slot[failed_data];
					printf("Repairing D(%d) and P\n", failed_data);
					raid6_datap_recov(raid_disks, chunk_size, failed_block_index, (uint8_t**)blocks);
				} else {
					printf("Repairing D and D\n");
					int failed_block_index1 = block_index_for_slot[failed_disk1];
					int failed_block_index2 = block_index_for_slot[failed_disk2];
					if (failed_block_index1 > failed_block_index2) {
						int t = failed_block_index1;
						failed_block_index1 = failed_block_index2;
						failed_block_index2 = t;
					}
					raid6_2data_recov(raid_disks, chunk_size, failed_block_index1, failed_block_index2, (uint8_t**)blocks);
				}
			}

			err = lock_stripe(info, start, chunk_size, data_disks, sig);
			if(err != 0) {
				if (err != 2)
					unlock_all_stripes(info, sig);
				goto exitCheck;
			}

			lseek64(source[failed_disk1], offsets[failed_disk1] + start * chunk_size, 0);
			write(source[failed_disk1], stripes[failed_disk1], chunk_size);
			lseek64(source[failed_disk2], offsets[failed_disk2] + start * chunk_size, 0);
			write(source[failed_disk2], stripes[failed_disk2], chunk_size);

			err = unlock_all_stripes(info, sig);
			if(err != 0)
				goto exitCheck;
		} else if (disk >= 0 && repair == 2) {
			printf("Auto-repairing slot %d (%s)\n", disk, name[disk]);
			if (disk == diskQ) {
				qsyndrome(p, (uint8_t*)stripes[diskQ], (uint8_t**)blocks, data_disks, chunk_size);
			} else {
				char *all_but_failed_blocks[data_disks];
				int failed_block_index = block_index_for_slot[disk];
				for (i=0; i < data_disks; i++)
					if (failed_block_index == i)
						all_but_failed_blocks[i] = stripes[diskP];
					else
						all_but_failed_blocks[i] = blocks[i];
				xor_blocks(stripes[disk],
					all_but_failed_blocks, data_disks, chunk_size);
			}

			err = lock_stripe(info, start, chunk_size, data_disks, sig);
			if(err != 0) {
				if (err != 2)
					unlock_all_stripes(info, sig);
				goto exitCheck;
			}

			lseek64(source[disk], offsets[disk] + start * chunk_size, 0);
			write(source[disk], stripes[disk], chunk_size);

			err = unlock_all_stripes(info, sig);
			if(err != 0)
				goto exitCheck;
		}


		length--;
		start++;
	}

exitCheck:

	free(stripe_buf);
	free(stripes);
	free(blocks);
	free(p);
	free(q);
	free(results);

	return err;
}
static void bitfix_xor32(void *dest, const void *src, size_t count)
{
	xor_blocks(1, PAGE_SIZE, dest, (void **)&src);
}
Esempio n. 17
0
void cryptonight_hash(const char* input, char* output, uint32_t len, int variant, uint64_t height) {
    struct cryptonight_ctx *ctx = alloca(sizeof(struct cryptonight_ctx));
    hash_process(&ctx->state.hs, (const uint8_t*) input, len);
    memcpy(ctx->text, ctx->state.init, INIT_SIZE_BYTE);
    memcpy(ctx->aes_key, ctx->state.hs.b, AES_KEY_SIZE);
    ctx->aes_ctx = (oaes_ctx*) oaes_alloc();
    size_t i, j;

    VARIANT1_INIT();
    VARIANT2_INIT(ctx->b, ctx->state);
    VARIANT4_RANDOM_MATH_INIT(ctx->state);

    oaes_key_import_data(ctx->aes_ctx, ctx->aes_key, AES_KEY_SIZE);
    for (i = 0; i < MEMORY / INIT_SIZE_BYTE; i++) {
        for (j = 0; j < INIT_SIZE_BLK; j++) {
            aesb_pseudo_round(&ctx->text[AES_BLOCK_SIZE * j],
                    &ctx->text[AES_BLOCK_SIZE * j],
                    ctx->aes_ctx->key->exp_data);
        }
        memcpy(&ctx->long_state[i * INIT_SIZE_BYTE], ctx->text, INIT_SIZE_BYTE);
    }

    for (i = 0; i < 16; i++) {
        ctx->a[i] = ctx->state.k[i] ^ ctx->state.k[32 + i];
        ctx->b[i] = ctx->state.k[16 + i] ^ ctx->state.k[48 + i];
    }

    for (i = 0; i < ITER / 2; i++) {
        /* Dependency chain: address -> read value ------+
         * written value <-+ hard function (AES or MUL) <+
         * next address  <-+
         */
        /* Iteration 1 */
        j = e2i(ctx->a);
        aesb_single_round(&ctx->long_state[j * AES_BLOCK_SIZE], ctx->c, ctx->a);
        VARIANT2_SHUFFLE_ADD(ctx->long_state, j * AES_BLOCK_SIZE, ctx->a, ctx->b, ctx->c);
        xor_blocks_dst(ctx->c, ctx->b, &ctx->long_state[j * AES_BLOCK_SIZE]);
        VARIANT1_1((uint8_t*)&ctx->long_state[j * AES_BLOCK_SIZE]);
        /* Iteration 2 */
        j = e2i(ctx->c);

        uint64_t* dst = (uint64_t*)&ctx->long_state[j * AES_BLOCK_SIZE];

        uint64_t t[2];
        t[0] = dst[0];
        t[1] = dst[1];

        VARIANT2_INTEGER_MATH(t, ctx->c);
        copy_block(ctx->a1, ctx->a);
        VARIANT4_RANDOM_MATH(ctx->a, t, r, ctx->b, ctx->b + AES_BLOCK_SIZE);

        uint64_t hi;
        uint64_t lo = mul128(((uint64_t*)ctx->c)[0], t[0], &hi);

        VARIANT2_2();
        VARIANT2_SHUFFLE_ADD(ctx->long_state, j * AES_BLOCK_SIZE, ctx->a1, ctx->b, ctx->c);

        ((uint64_t*)ctx->a)[0] += hi;
        ((uint64_t*)ctx->a)[1] += lo;

        dst[0] = ((uint64_t*)ctx->a)[0];
        dst[1] = ((uint64_t*)ctx->a)[1];

        ((uint64_t*)ctx->a)[0] ^= t[0];
        ((uint64_t*)ctx->a)[1] ^= t[1];

        VARIANT1_2((uint8_t*)&ctx->long_state[j * AES_BLOCK_SIZE]);
        copy_block(ctx->b + AES_BLOCK_SIZE, ctx->b);
        copy_block(ctx->b, ctx->c);
    }

    memcpy(ctx->text, ctx->state.init, INIT_SIZE_BYTE);
    oaes_key_import_data(ctx->aes_ctx, &ctx->state.hs.b[32], AES_KEY_SIZE);
    for (i = 0; i < MEMORY / INIT_SIZE_BYTE; i++) {
        for (j = 0; j < INIT_SIZE_BLK; j++) {
            xor_blocks(&ctx->text[j * AES_BLOCK_SIZE],
                    &ctx->long_state[i * INIT_SIZE_BYTE + j * AES_BLOCK_SIZE]);
            aesb_pseudo_round(&ctx->text[j * AES_BLOCK_SIZE],
                    &ctx->text[j * AES_BLOCK_SIZE],
                    ctx->aes_ctx->key->exp_data);
        }
    }
    memcpy(ctx->state.init, ctx->text, INIT_SIZE_BYTE);
    hash_permutation(&ctx->state.hs);
    /*memcpy(hash, &state, 32);*/
    extra_hashes[ctx->state.hs.b[0] & 3](&ctx->state, 200, output);
    oaes_free((OAES_CTX **) &ctx->aes_ctx);
}