static int rc4_hmac_md5_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
		      const unsigned char *in, size_t len)
	{
	EVP_RC4_HMAC_MD5 *key = data(ctx);
#if defined(STITCHED_CALL)
	size_t	rc4_off = 32-1-(key->ks.x&(32-1)),	/* 32 is $MOD from rc4_md5-x86_64.pl */
		md5_off = MD5_CBLOCK-key->md.num,
		blocks;
	unsigned int l;
#endif
	size_t	plen = key->payload_length;

	if (plen && len!=(plen+MD5_DIGEST_LENGTH)) return 0;

	if (ctx->encrypt) {
		if (plen==0) plen = len;
#if defined(STITCHED_CALL)
		/* cipher has to "fall behind" */
		if (rc4_off>md5_off) md5_off+=MD5_CBLOCK;

		if (plen>md5_off && (blocks=(plen-md5_off)/MD5_CBLOCK)) {
			MD5_Update(&key->md,in,md5_off);
			RC4(&key->ks,rc4_off,in,out);

			rc4_md5_enc(&key->ks,in+rc4_off,out+rc4_off,
				&key->md,in+md5_off,blocks);
			blocks *= MD5_CBLOCK;
			rc4_off += blocks;
			md5_off += blocks;
			key->md.Nh += blocks>>29;
			key->md.Nl += blocks<<=3;
			if (key->md.Nl<(unsigned int)blocks) key->md.Nh++;
		} else {
示例#2
0
static int aead_rc4_md5_tls_seal(const EVP_AEAD_CTX *ctx, uint8_t *out,
                                 size_t *out_len, size_t max_out_len,
                                 const uint8_t *nonce, size_t nonce_len,
                                 const uint8_t *in, size_t in_len,
                                 const uint8_t *ad, size_t ad_len) {
  struct aead_rc4_md5_tls_ctx *rc4_ctx = ctx->aead_state;
  MD5_CTX md;
#if defined(STITCHED_CALL)
  size_t rc4_off, md5_off, blocks;
#else
  const size_t rc4_off = 0;
  const size_t md5_off = 0;
#endif
  uint8_t digest[MD5_DIGEST_LENGTH];

  if (in_len + rc4_ctx->tag_len < in_len) {
    OPENSSL_PUT_ERROR(CIPHER, aead_rc4_md5_tls_seal, CIPHER_R_TOO_LARGE);
    return 0;
  }

  if (nonce_len != 0) {
    OPENSSL_PUT_ERROR(CIPHER, aead_rc4_md5_tls_seal, CIPHER_R_IV_TOO_LARGE);
    return 0;
  }

  if (max_out_len < in_len + rc4_ctx->tag_len) {
    OPENSSL_PUT_ERROR(CIPHER, aead_rc4_md5_tls_seal, CIPHER_R_BUFFER_TOO_SMALL);
    return 0;
  }

  if (nonce_len != 0) {
    OPENSSL_PUT_ERROR(CIPHER, aead_rc4_md5_tls_seal, CIPHER_R_TOO_LARGE);
    return 0;
  }

  memcpy(&md, &rc4_ctx->head, sizeof(MD5_CTX));
  /* The MAC's payload begins with the additional data. See
   * https://tools.ietf.org/html/rfc5246#section-6.2.3.1 */
  MD5_Update(&md, ad, ad_len);

  /* To allow for CBC mode which changes cipher length, |ad| doesn't include the
   * length for legacy ciphers. */
  uint8_t ad_extra[2];
  ad_extra[0] = (uint8_t)(in_len >> 8);
  ad_extra[1] = (uint8_t)(in_len & 0xff);
  MD5_Update(&md, ad_extra, sizeof(ad_extra));

#if defined(STITCHED_CALL)
  /* 32 is $MOD from rc4_md5-x86_64.pl. */
  rc4_off = 32 - 1 - (rc4_ctx->rc4.x & (32 - 1));
  md5_off = MD5_CBLOCK - md.num;
  /* Ensure RC4 is behind MD5. */
  if (rc4_off > md5_off) {
    md5_off += MD5_CBLOCK;
  }
  assert(md5_off >= rc4_off);

  if (in_len > md5_off && (blocks = (in_len - md5_off) / MD5_CBLOCK) &&
      (OPENSSL_ia32cap_P[0] & (1 << 20)) == 0) {
    /* Process the initial portions of the plaintext normally. */
    MD5_Update(&md, in, md5_off);
    RC4(&rc4_ctx->rc4, rc4_off, in, out);

    /* Process the next |blocks| blocks of plaintext with stitched routines. */
    rc4_md5_enc(&rc4_ctx->rc4, in + rc4_off, out + rc4_off, &md, in + md5_off,
                blocks);
    blocks *= MD5_CBLOCK;
    rc4_off += blocks;
    md5_off += blocks;
    md.Nh += blocks >> 29;
    md.Nl += blocks <<= 3;
    if (md.Nl < (unsigned int)blocks) {
      md.Nh++;
    }
  } else {