static int sms4_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key, const unsigned char *iv, int enc) { if(!enc) { if (EVP_CIPHER_CTX_mode(ctx) == EVP_CIPH_OFB_MODE) enc = 1; else if (EVP_CIPHER_CTX_mode(ctx) == EVP_CIPH_CFB_MODE) enc = 1; //encrypt key == decrypt key } if (enc) sms4_set_encrypt_key(ctx->cipher_data, key); else //ecb, cbc sms4_set_decrypt_key(ctx->cipher_data, key); return 1; }
int EVP_CIPHER_param_to_asn1(EVP_CIPHER_CTX *c, ASN1_TYPE *type) { int ret; if (c->cipher->set_asn1_parameters != NULL) ret = c->cipher->set_asn1_parameters(c, type); else if (c->cipher->flags & EVP_CIPH_FLAG_DEFAULT_ASN1) { switch (EVP_CIPHER_CTX_mode(c)) { case EVP_CIPH_WRAP_MODE: if (EVP_CIPHER_CTX_nid(c) == NID_id_smime_alg_CMS3DESwrap) ASN1_TYPE_set(type, V_ASN1_NULL, NULL); ret = 1; break; case EVP_CIPH_GCM_MODE: case EVP_CIPH_CCM_MODE: case EVP_CIPH_XTS_MODE: ret = -1; break; default: ret = EVP_CIPHER_set_asn1_iv(c, type); } } else ret = -1; return (ret); }
/* The subkey for Camellia is generated. */ static int camellia_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key, const unsigned char *iv, int enc) { int ret, mode; EVP_CAMELLIA_KEY *dat = EVP_C_DATA(EVP_CAMELLIA_KEY,ctx); ret = Camellia_set_key(key, EVP_CIPHER_CTX_key_length(ctx) * 8, &dat->ks); if (ret < 0) { EVPerr(EVP_F_CAMELLIA_INIT_KEY, EVP_R_CAMELLIA_KEY_SETUP_FAILED); return 0; } mode = EVP_CIPHER_CTX_mode(ctx); if ((mode == EVP_CIPH_ECB_MODE || mode == EVP_CIPH_CBC_MODE) && !enc) { dat->block = (block128_f) Camellia_decrypt; dat->stream.cbc = mode == EVP_CIPH_CBC_MODE ? (cbc128_f) Camellia_cbc_encrypt : NULL; } else { dat->block = (block128_f) Camellia_encrypt; dat->stream.cbc = mode == EVP_CIPH_CBC_MODE ? (cbc128_f) Camellia_cbc_encrypt : NULL; } return 1; }
static int cms_wrap_init(CMS_KeyAgreeRecipientInfo *kari, const EVP_CIPHER *cipher) { EVP_CIPHER_CTX *ctx = &kari->ctx; const EVP_CIPHER *kekcipher; int keylen = EVP_CIPHER_key_length(cipher); /* If a suitable wrap algorithm is already set nothing to do */ kekcipher = EVP_CIPHER_CTX_cipher(ctx); if (kekcipher) { if (EVP_CIPHER_CTX_mode(ctx) != EVP_CIPH_WRAP_MODE) return 0; return 1; } /* * Pick a cipher based on content encryption cipher. If it is DES3 use * DES3 wrap otherwise use AES wrap similar to key size. */ if (EVP_CIPHER_type(cipher) == NID_des_ede3_cbc) kekcipher = EVP_des_ede3_wrap(); else if (keylen <= 16) kekcipher = EVP_aes_128_wrap(); else if (keylen <= 24) kekcipher = EVP_aes_192_wrap(); else kekcipher = EVP_aes_256_wrap(); return EVP_EncryptInit_ex(ctx, kekcipher, NULL, NULL, NULL); }
int aes_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out, const unsigned char *in, size_t len) { int block; int i, j; unsigned int mode, key_len; mode = EVP_CIPHER_CTX_mode(ctx) - 1; key_len = (ctx->key_len)/8 - 2; block = len / 16; for (i = 0; i < block; i++) { for (j = 0; j < 4; j++) { REG_TEXT(j) = GETU32(in + i*16 + j*4); } REG_AES = ED(0, key_len, ctx->encrypt, mode, 0, 0); int a[5]; a[4] = REG_RESULT(0); for (j = 0; j < 4; j++) { a[j] = REG_RESULT(j); } for (j = 0; j < 4; j++) { PUTU32(a[j], out + i*16 + j*4); } } return 1; }
int aes_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key, const unsigned char *iv, int enc) { int i; unsigned int key_len, mode; mode = EVP_CIPHER_CTX_mode(ctx) - 1; key_len = (ctx->key_len)/8 - 2; REG_MODE = 0x20; REG_AES = BC_INI(0, key_len, enc, mode, 0); for (i = 0; i < (4 + key_len * 2); i++) REG_KEY(i) = GETU32(key + i * 4); if (!mode) { for (i = 0; i < 4; i++) REG_IV(i) = GETU32(iv + i * 4); } // Key expension REG_AES = KEXP(0, key_len, enc, mode, 0); return 1; }
int EVP_CIPHER_asn1_to_param(EVP_CIPHER_CTX *c, ASN1_TYPE *type) { int ret; if (c->cipher->get_asn1_parameters != NULL) ret = c->cipher->get_asn1_parameters(c, type); else if (c->cipher->flags & EVP_CIPH_FLAG_DEFAULT_ASN1) { switch (EVP_CIPHER_CTX_mode(c)) { case EVP_CIPH_WRAP_MODE: ret = 1; break; case EVP_CIPH_GCM_MODE: case EVP_CIPH_CCM_MODE: case EVP_CIPH_XTS_MODE: case EVP_CIPH_OCB_MODE: ret = -1; break; default: ret = EVP_CIPHER_get_asn1_iv(c, type); break; } } else ret = -1; return (ret); }
STDMETHODIMP CBCipher::get_CipherMode(short *pVal) { if(!m_ctx.cipher)return SetErrorInfo(s_strAlgoError); *pVal = (short)EVP_CIPHER_CTX_mode(&m_ctx); return S_OK; }
STDMETHODIMP CBCipher::get_IVSize(short *pVal) { if(!m_ctx.cipher)return SetErrorInfo(s_strAlgoError); if(EVP_CIPHER_CTX_mode(&m_ctx) < 2) *pVal = 0; else *pVal = EVP_CIPHER_CTX_iv_length(&m_ctx); return S_OK; }
static int idea_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key, const unsigned char *iv, int enc) { if(!enc) { if (EVP_CIPHER_CTX_mode(ctx) == EVP_CIPH_OFB_MODE) enc = 1; else if (EVP_CIPHER_CTX_mode(ctx) == EVP_CIPH_CFB_MODE) enc = 1; } if (enc) idea_set_encrypt_key(key,&(ctx->c.idea_ks)); else { IDEA_KEY_SCHEDULE tmp; idea_set_encrypt_key(key,&tmp); idea_set_decrypt_key(&tmp,&(ctx->c.idea_ks)); memset((unsigned char *)&tmp,0, sizeof(IDEA_KEY_SCHEDULE)); } return 1; }
static int idea_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key, const unsigned char *iv, int enc) { if(!enc) { if (EVP_CIPHER_CTX_mode(ctx) == EVP_CIPH_OFB_MODE) enc = 1; else if (EVP_CIPHER_CTX_mode(ctx) == EVP_CIPH_CFB_MODE) enc = 1; } if (enc) idea_set_encrypt_key(key,ctx->cipher_data); else { IDEA_KEY_SCHEDULE tmp; idea_set_encrypt_key(key,&tmp); idea_set_decrypt_key(&tmp,ctx->cipher_data); OPENSSL_cleanse((unsigned char *)&tmp, sizeof(IDEA_KEY_SCHEDULE)); } return 1; }
STDMETHODIMP CBCipher::GenerateIV() { if(!m_ctx.cipher)return SetErrorInfo(s_strAlgoError); if(EVP_CIPHER_CTX_iv_length(&m_ctx) == 0 || EVP_CIPHER_CTX_mode(&m_ctx) < 2) return E_NOTIMPL; m_pIV.Allocate(EVP_CIPHER_CTX_iv_length(&m_ctx)); RAND_bytes(m_pIV, EVP_CIPHER_CTX_iv_length(&m_ctx)); return S_OK; }
static int aes_init(EVP_CIPHER_CTX *ctx, const unsigned char * key, const unsigned char * iv, int encp) { AES_KEY *k = ctx->cipher_data; if (ctx->encrypt || EVP_CIPHER_CTX_mode(ctx) == EVP_CIPH_CFB8_MODE) AES_set_encrypt_key(key, ctx->cipher->key_len * 8, k); else AES_set_decrypt_key(key, ctx->cipher->key_len * 8, k); return 1; }
static int aes_do_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out, const unsigned char *in, unsigned int size) { AES_KEY *k = ctx->cipher_data; if (EVP_CIPHER_CTX_mode(ctx) == EVP_CIPH_CFB8_MODE) AES_cfb8_encrypt(in, out, size, k, ctx->iv, ctx->encrypt); else AES_cbc_encrypt(in, out, size, k, ctx->iv, ctx->encrypt); return 1; }
static LUA_FUNCTION(openssl_cipher_ctx_info) { EVP_CIPHER_CTX *ctx = CHECK_OBJECT(1, EVP_CIPHER_CTX, "openssl.evp_cipher_ctx"); lua_newtable(L); AUXILIAR_SET(L, -1, "block_size", EVP_CIPHER_CTX_block_size(ctx), integer); AUXILIAR_SET(L, -1, "key_length", EVP_CIPHER_CTX_key_length(ctx), integer); AUXILIAR_SET(L, -1, "iv_length", EVP_CIPHER_CTX_iv_length(ctx), integer); AUXILIAR_SET(L, -1, "flags", EVP_CIPHER_CTX_flags(ctx), integer); AUXILIAR_SET(L, -1, "nid", EVP_CIPHER_CTX_nid(ctx), integer); AUXILIAR_SET(L, -1, "type", EVP_CIPHER_CTX_mode(ctx), integer); AUXILIAR_SET(L, -1, "mode", EVP_CIPHER_CTX_type(ctx), integer); AUXILIAR_SETOBJECT(L, EVP_CIPHER_CTX_cipher(ctx), "openssl.evp_cipher", -1, "cipher"); return 1; }
int CBCMAC_Init(CBCMAC_CTX *ctx, const void *key, size_t keylen, const EVP_CIPHER *cipher, ENGINE *eng) { int i, block_size; if (!EVP_EncryptInit_ex(&ctx->cipher_ctx, cipher, key, NULL, eng)) { return 0; } if (EVP_CIPHER_CTX_mode(&ctx->cipher_ctx) != EVP_CIPH_ECB_MODE) { return 0; } ctx->worklen = 0; block_size = EVP_CIPHER_CTX_block_size(&ctx->cipher_ctx); bzero(ctx->cbcstate, block_size); return 0; }
int EVP_CIPHER_asn1_to_param(EVP_CIPHER_CTX *c, ASN1_TYPE *type) { int ret; if (c->cipher->get_asn1_parameters != NULL) ret=c->cipher->get_asn1_parameters(c,type); else if (c->cipher->flags & EVP_CIPH_FLAG_DEFAULT_ASN1) { if (EVP_CIPHER_CTX_mode(c) == EVP_CIPH_WRAP_MODE) return 1; ret=EVP_CIPHER_get_asn1_iv(c, type); } else ret=-1; return(ret); }
static int cms_wrap_init(CMS_KeyAgreeRecipientInfo *kari, const EVP_CIPHER *cipher) { EVP_CIPHER_CTX *ctx = kari->ctx; const EVP_CIPHER *kekcipher; #ifndef OPENSSL_NO_AES int keylen = EVP_CIPHER_key_length(cipher); #endif /* If a suitable wrap algorithm is already set nothing to do */ kekcipher = EVP_CIPHER_CTX_cipher(ctx); if (kekcipher) { if (EVP_CIPHER_CTX_mode(ctx) != EVP_CIPH_WRAP_MODE) return 0; return 1; } /* * Pick a cipher based on content encryption cipher. If it is DES3 use * DES3 wrap otherwise use AES wrap similar to key size. */ #if !defined(OPENSSL_NO_DES) && !defined(OPENSSL_NO_SHA) /* EVP_des_ede3_wrap() depends on EVP_sha1() */ if (EVP_CIPHER_type(cipher) == NID_des_ede3_cbc) kekcipher = EVP_des_ede3_wrap(); else #endif #ifndef OPENSSL_NO_SMS4 if (EVP_CIPHER_type(cipher) == NID_sms4_cbc || EVP_CIPHER_type(cipher) == NID_sm1_cbc || EVP_CIPHER_type(cipher) == NID_ssf33_cbc) kekcipher = EVP_sms4_wrap(); else #endif #ifndef OPENSSL_NO_AES if (keylen <= 16) kekcipher = EVP_aes_128_wrap(); else if (keylen <= 24) kekcipher = EVP_aes_192_wrap(); else kekcipher = EVP_aes_256_wrap(); #endif if (kekcipher == NULL) return 0; return EVP_EncryptInit_ex(ctx, kekcipher, NULL, NULL, NULL); }
static int sms4_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key, const unsigned char *iv, int enc) { int mode; EVP_SMS4_KEY *dat = EVP_C_DATA(EVP_SMS4_KEY, ctx); mode = EVP_CIPHER_CTX_mode(ctx); if ((mode == EVP_CIPH_ECB_MODE || mode == EVP_CIPH_CBC_MODE) && !enc) { sms4_set_decrypt_key(&dat->ks, key); } else { sms4_set_encrypt_key(&dat->ks, key); } dat->block = (block128_f)sms4_encrypt; dat->stream.cbc = mode == EVP_CIPH_CBC_MODE ? (cbc128_f) sms4_cbc_encrypt : NULL; return 1; }
/* The subkey for ARIA is generated. */ static int aria_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key, const unsigned char *iv, int enc) { int ret; int mode = EVP_CIPHER_CTX_mode(ctx); if (enc || (mode != EVP_CIPH_ECB_MODE && mode != EVP_CIPH_CBC_MODE)) ret = aria_set_encrypt_key(key, EVP_CIPHER_CTX_key_length(ctx) * 8, EVP_CIPHER_CTX_get_cipher_data(ctx)); else ret = aria_set_decrypt_key(key, EVP_CIPHER_CTX_key_length(ctx) * 8, EVP_CIPHER_CTX_get_cipher_data(ctx)); if (ret < 0) { EVPerr(EVP_F_ARIA_INIT_KEY,EVP_R_ARIA_KEY_SETUP_FAILED); return 0; } return 1; }
static size_t aead_tls_tag_len(const EVP_AEAD_CTX *ctx, const size_t in_len, const size_t extra_in_len) { assert(extra_in_len == 0); AEAD_TLS_CTX *tls_ctx = (AEAD_TLS_CTX *)ctx->aead_state; const size_t hmac_len = HMAC_size(&tls_ctx->hmac_ctx); if (EVP_CIPHER_CTX_mode(&tls_ctx->cipher_ctx) != EVP_CIPH_CBC_MODE) { // The NULL cipher. return hmac_len; } const size_t block_size = EVP_CIPHER_CTX_block_size(&tls_ctx->cipher_ctx); // An overflow of |in_len + hmac_len| doesn't affect the result mod // |block_size|, provided that |block_size| is a smaller power of two. assert(block_size != 0 && (block_size & (block_size - 1)) == 0); const size_t pad_len = block_size - (in_len + hmac_len) % block_size; return hmac_len + pad_len; }
int EVP_CIPHER_param_to_asn1(EVP_CIPHER_CTX *c, ASN1_TYPE *type) { int ret; if (c->cipher->set_asn1_parameters != NULL) ret=c->cipher->set_asn1_parameters(c,type); else if (c->cipher->flags & EVP_CIPH_FLAG_DEFAULT_ASN1) { if (EVP_CIPHER_CTX_mode(c) == EVP_CIPH_WRAP_MODE) { ASN1_TYPE_set(type, V_ASN1_NULL, NULL); ret = 1; } else ret=EVP_CIPHER_set_asn1_iv(c, type); } else ret=-1; return(ret); }
STDMETHODIMP CBCipher::put_IV(VARIANT Val) { if(!m_ctx.cipher)return SetErrorInfo(s_strAlgoError); if(EVP_CIPHER_CTX_iv_length(&m_ctx) == 0 || EVP_CIPHER_CTX_mode(&m_ctx) < 2) return E_NOTIMPL; HRESULT hr; CBVarPtr varPtr; hr = varPtr.Attach(Val); if(FAILED(hr))return hr; if(varPtr.m_nSize != EVP_CIPHER_CTX_iv_length(&m_ctx)) return E_INVALIDARG; m_pIV.Allocate(EVP_CIPHER_CTX_iv_length(&m_ctx)); CopyMemory(m_pIV, varPtr.m_pData, EVP_CIPHER_CTX_iv_length(&m_ctx)); return S_OK; }
STDMETHODIMP CBCipher::get_IV(VARIANT *pVal) { if(!m_ctx.cipher)return SetErrorInfo(s_strAlgoError); if(EVP_CIPHER_CTX_iv_length(&m_ctx) == 0 || EVP_CIPHER_CTX_mode(&m_ctx) < 2) return E_NOTIMPL; if(m_pIV) { CBVarPtr varPtr; HRESULT hr; hr = varPtr.Create(EVP_CIPHER_CTX_iv_length(&m_ctx)); if(FAILED(hr))return hr; CopyMemory(varPtr.m_pData, m_pIV, EVP_CIPHER_CTX_iv_length(&m_ctx)); return varPtr.GetVariant(pVal); } return S_OK; }
int cipher_ctx_mode (const EVP_CIPHER_CTX *ctx) { return EVP_CIPHER_CTX_mode (ctx); }
int n_ssl3_mac(SSL *ssl, unsigned char *md, int send) { SSL3_RECORD *rec; unsigned char *mac_sec,*seq; EVP_MD_CTX md_ctx; const EVP_MD_CTX *hash; unsigned char *p,rec_char; size_t md_size; int npad; int t; if (send) { rec= &(ssl->s3->wrec); mac_sec= &(ssl->s3->write_mac_secret[0]); seq= &(ssl->s3->write_sequence[0]); hash=ssl->write_hash; } else { rec= &(ssl->s3->rrec); mac_sec= &(ssl->s3->read_mac_secret[0]); seq= &(ssl->s3->read_sequence[0]); hash=ssl->read_hash; } t=EVP_MD_CTX_size(hash); if (t < 0) return -1; md_size=t; npad=(48/md_size)*md_size; if (!send && EVP_CIPHER_CTX_mode(ssl->enc_read_ctx) == EVP_CIPH_CBC_MODE && ssl3_cbc_record_digest_supported(hash)) { /* This is a CBC-encrypted record. We must avoid leaking any * timing-side channel information about how many blocks of * data we are hashing because that gives an attacker a * timing-oracle. */ /* npad is, at most, 48 bytes and that's with MD5: * 16 + 48 + 8 (sequence bytes) + 1 + 2 = 75. * * With SHA-1 (the largest hash speced for SSLv3) the hash size * goes up 4, but npad goes down by 8, resulting in a smaller * total size. */ unsigned char header[75]; unsigned j = 0; memcpy(header+j, mac_sec, md_size); j += md_size; memcpy(header+j, ssl3_pad_1, npad); j += npad; memcpy(header+j, seq, 8); j += 8; header[j++] = rec->type; header[j++] = rec->length >> 8; header[j++] = rec->length & 0xff; ssl3_cbc_digest_record( hash, md, &md_size, header, rec->input, rec->length + md_size, rec->orig_len, mac_sec, md_size, 1 /* is SSLv3 */); }
/* Prepare the encryption key for PadLock usage */ static int padlock_aes_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key, const unsigned char *iv, int enc) { struct padlock_cipher_data *cdata; int key_len = EVP_CIPHER_CTX_key_length(ctx) * 8; unsigned long mode = EVP_CIPHER_CTX_mode(ctx); if (key == NULL) return 0; /* ERROR */ cdata = ALIGNED_CIPHER_DATA(ctx); memset(cdata, 0, sizeof(*cdata)); /* Prepare Control word. */ if (mode == EVP_CIPH_OFB_MODE || mode == EVP_CIPH_CTR_MODE) cdata->cword.b.encdec = 0; else cdata->cword.b.encdec = (EVP_CIPHER_CTX_encrypting(ctx) == 0); cdata->cword.b.rounds = 10 + (key_len - 128) / 32; cdata->cword.b.ksize = (key_len - 128) / 64; switch (key_len) { case 128: /* * PadLock can generate an extended key for AES128 in hardware */ memcpy(cdata->ks.rd_key, key, AES_KEY_SIZE_128); cdata->cword.b.keygen = 0; break; case 192: case 256: /* * Generate an extended AES key in software. Needed for AES192/AES256 */ /* * Well, the above applies to Stepping 8 CPUs and is listed as * hardware errata. They most likely will fix it at some point and * then a check for stepping would be due here. */ if ((mode == EVP_CIPH_ECB_MODE || mode == EVP_CIPH_CBC_MODE) && !enc) AES_set_decrypt_key(key, key_len, &cdata->ks); else AES_set_encrypt_key(key, key_len, &cdata->ks); # ifndef AES_ASM /* * OpenSSL C functions use byte-swapped extended key. */ padlock_key_bswap(&cdata->ks); # endif cdata->cword.b.keygen = 1; break; default: /* ERROR */ return 0; } /* * This is done to cover for cases when user reuses the * context for new key. The catch is that if we don't do * this, padlock_eas_cipher might proceed with old key... */ padlock_reload_key(); return 1; }
static int aead_tls_open(const EVP_AEAD_CTX *ctx, uint8_t *out, size_t *out_len, size_t max_out_len, const uint8_t *nonce, size_t nonce_len, const uint8_t *in, size_t in_len, const uint8_t *ad, size_t ad_len) { AEAD_TLS_CTX *tls_ctx = (AEAD_TLS_CTX *)ctx->aead_state; if (tls_ctx->cipher_ctx.encrypt) { /* Unlike a normal AEAD, a TLS AEAD may only be used in one direction. */ OPENSSL_PUT_ERROR(CIPHER, aead_tls_open, CIPHER_R_INVALID_OPERATION); return 0; } if (in_len < HMAC_size(&tls_ctx->hmac_ctx)) { OPENSSL_PUT_ERROR(CIPHER, aead_tls_open, CIPHER_R_BAD_DECRYPT); return 0; } if (max_out_len < in_len) { /* This requires that the caller provide space for the MAC, even though it * will always be removed on return. */ OPENSSL_PUT_ERROR(CIPHER, aead_tls_open, CIPHER_R_BUFFER_TOO_SMALL); return 0; } if (nonce_len != EVP_AEAD_nonce_length(ctx->aead)) { OPENSSL_PUT_ERROR(CIPHER, aead_tls_open, CIPHER_R_INVALID_NONCE_SIZE); return 0; } if (ad_len != 13 - 2 /* length bytes */) { OPENSSL_PUT_ERROR(CIPHER, aead_tls_open, CIPHER_R_INVALID_AD_SIZE); return 0; } if (in_len > INT_MAX) { /* EVP_CIPHER takes int as input. */ OPENSSL_PUT_ERROR(CIPHER, aead_tls_open, CIPHER_R_TOO_LARGE); return 0; } /* Configure the explicit IV. */ if (EVP_CIPHER_CTX_mode(&tls_ctx->cipher_ctx) == EVP_CIPH_CBC_MODE && !tls_ctx->implicit_iv && !EVP_DecryptInit_ex(&tls_ctx->cipher_ctx, NULL, NULL, NULL, nonce)) { return 0; } /* Decrypt to get the plaintext + MAC + padding. */ size_t total = 0; int len; if (!EVP_DecryptUpdate(&tls_ctx->cipher_ctx, out, &len, in, (int)in_len)) { return 0; } total += len; if (!EVP_DecryptFinal_ex(&tls_ctx->cipher_ctx, out + total, &len)) { return 0; } total += len; assert(total == in_len); /* Remove CBC padding. Code from here on is timing-sensitive with respect to * |padding_ok| and |data_plus_mac_len| for CBC ciphers. */ int padding_ok; unsigned data_plus_mac_len, data_len; if (EVP_CIPHER_CTX_mode(&tls_ctx->cipher_ctx) == EVP_CIPH_CBC_MODE) { padding_ok = EVP_tls_cbc_remove_padding( &data_plus_mac_len, out, total, EVP_CIPHER_CTX_block_size(&tls_ctx->cipher_ctx), (unsigned)HMAC_size(&tls_ctx->hmac_ctx)); /* Publicly invalid. This can be rejected in non-constant time. */ if (padding_ok == 0) { OPENSSL_PUT_ERROR(CIPHER, aead_tls_open, CIPHER_R_BAD_DECRYPT); return 0; } } else { padding_ok = 1; data_plus_mac_len = total; /* |data_plus_mac_len| = |total| = |in_len| at this point. |in_len| has * already been checked against the MAC size at the top of the function. */ assert(data_plus_mac_len >= HMAC_size(&tls_ctx->hmac_ctx)); } data_len = data_plus_mac_len - HMAC_size(&tls_ctx->hmac_ctx); /* At this point, |padding_ok| is 1 or -1. If 1, the padding is valid and the * first |data_plus_mac_size| bytes after |out| are the plaintext and * MAC. Either way, |data_plus_mac_size| is large enough to extract a MAC. */ /* To allow for CBC mode which changes cipher length, |ad| doesn't include the * length for legacy ciphers. */ uint8_t ad_fixed[13]; memcpy(ad_fixed, ad, 11); ad_fixed[11] = (uint8_t)(data_len >> 8); ad_fixed[12] = (uint8_t)(data_len & 0xff); ad_len += 2; /* Compute the MAC and extract the one in the record. */ uint8_t mac[EVP_MAX_MD_SIZE]; size_t mac_len; uint8_t record_mac_tmp[EVP_MAX_MD_SIZE]; uint8_t *record_mac; if (EVP_CIPHER_CTX_mode(&tls_ctx->cipher_ctx) == EVP_CIPH_CBC_MODE && EVP_tls_cbc_record_digest_supported(tls_ctx->hmac_ctx.md)) { if (!EVP_tls_cbc_digest_record(tls_ctx->hmac_ctx.md, mac, &mac_len, ad_fixed, out, data_plus_mac_len, total, tls_ctx->mac_key, tls_ctx->mac_key_len)) { OPENSSL_PUT_ERROR(CIPHER, aead_tls_open, CIPHER_R_BAD_DECRYPT); return 0; } assert(mac_len == HMAC_size(&tls_ctx->hmac_ctx)); record_mac = record_mac_tmp; EVP_tls_cbc_copy_mac(record_mac, mac_len, out, data_plus_mac_len, total); } else { /* We should support the constant-time path for all CBC-mode ciphers * implemented. */ assert(EVP_CIPHER_CTX_mode(&tls_ctx->cipher_ctx) != EVP_CIPH_CBC_MODE); HMAC_CTX hmac_ctx; HMAC_CTX_init(&hmac_ctx); unsigned mac_len_u; if (!HMAC_CTX_copy_ex(&hmac_ctx, &tls_ctx->hmac_ctx) || !HMAC_Update(&hmac_ctx, ad_fixed, ad_len) || !HMAC_Update(&hmac_ctx, out, data_len) || !HMAC_Final(&hmac_ctx, mac, &mac_len_u)) { HMAC_CTX_cleanup(&hmac_ctx); return 0; } mac_len = mac_len_u; HMAC_CTX_cleanup(&hmac_ctx); assert(mac_len == HMAC_size(&tls_ctx->hmac_ctx)); record_mac = &out[data_len]; } /* Perform the MAC check and the padding check in constant-time. It should be * safe to simply perform the padding check first, but it would not be under a * different choice of MAC location on padding failure. See * EVP_tls_cbc_remove_padding. */ unsigned good = constant_time_eq_int(CRYPTO_memcmp(record_mac, mac, mac_len), 0); good &= constant_time_eq_int(padding_ok, 1); if (!good) { OPENSSL_PUT_ERROR(CIPHER, aead_tls_open, CIPHER_R_BAD_DECRYPT); return 0; } /* End of timing-sensitive code. */ *out_len = data_len; return 1; }
static int aead_tls_seal(const EVP_AEAD_CTX *ctx, uint8_t *out, size_t *out_len, size_t max_out_len, const uint8_t *nonce, size_t nonce_len, const uint8_t *in, size_t in_len, const uint8_t *ad, size_t ad_len) { AEAD_TLS_CTX *tls_ctx = (AEAD_TLS_CTX *)ctx->aead_state; size_t total = 0; if (!tls_ctx->cipher_ctx.encrypt) { /* Unlike a normal AEAD, a TLS AEAD may only be used in one direction. */ OPENSSL_PUT_ERROR(CIPHER, aead_tls_seal, CIPHER_R_INVALID_OPERATION); return 0; } if (in_len + EVP_AEAD_max_overhead(ctx->aead) < in_len || in_len > INT_MAX) { /* EVP_CIPHER takes int as input. */ OPENSSL_PUT_ERROR(CIPHER, aead_tls_seal, CIPHER_R_TOO_LARGE); return 0; } if (max_out_len < in_len + EVP_AEAD_max_overhead(ctx->aead)) { OPENSSL_PUT_ERROR(CIPHER, aead_tls_seal, CIPHER_R_BUFFER_TOO_SMALL); return 0; } if (nonce_len != EVP_AEAD_nonce_length(ctx->aead)) { OPENSSL_PUT_ERROR(CIPHER, aead_tls_seal, CIPHER_R_INVALID_NONCE_SIZE); return 0; } if (ad_len != 13 - 2 /* length bytes */) { OPENSSL_PUT_ERROR(CIPHER, aead_tls_seal, CIPHER_R_INVALID_AD_SIZE); return 0; } /* To allow for CBC mode which changes cipher length, |ad| doesn't include the * length for legacy ciphers. */ uint8_t ad_extra[2]; ad_extra[0] = (uint8_t)(in_len >> 8); ad_extra[1] = (uint8_t)(in_len & 0xff); /* Compute the MAC. This must be first in case the operation is being done * in-place. */ uint8_t mac[EVP_MAX_MD_SIZE]; unsigned mac_len; HMAC_CTX hmac_ctx; HMAC_CTX_init(&hmac_ctx); if (!HMAC_CTX_copy_ex(&hmac_ctx, &tls_ctx->hmac_ctx) || !HMAC_Update(&hmac_ctx, ad, ad_len) || !HMAC_Update(&hmac_ctx, ad_extra, sizeof(ad_extra)) || !HMAC_Update(&hmac_ctx, in, in_len) || !HMAC_Final(&hmac_ctx, mac, &mac_len)) { HMAC_CTX_cleanup(&hmac_ctx); return 0; } HMAC_CTX_cleanup(&hmac_ctx); /* Configure the explicit IV. */ if (EVP_CIPHER_CTX_mode(&tls_ctx->cipher_ctx) == EVP_CIPH_CBC_MODE && !tls_ctx->implicit_iv && !EVP_EncryptInit_ex(&tls_ctx->cipher_ctx, NULL, NULL, NULL, nonce)) { return 0; } /* Encrypt the input. */ int len; if (!EVP_EncryptUpdate(&tls_ctx->cipher_ctx, out, &len, in, (int)in_len)) { return 0; } total = len; /* Feed the MAC into the cipher. */ if (!EVP_EncryptUpdate(&tls_ctx->cipher_ctx, out + total, &len, mac, (int)mac_len)) { return 0; } total += len; unsigned block_size = EVP_CIPHER_CTX_block_size(&tls_ctx->cipher_ctx); if (block_size > 1) { assert(block_size <= 256); assert(EVP_CIPHER_CTX_mode(&tls_ctx->cipher_ctx) == EVP_CIPH_CBC_MODE); /* Compute padding and feed that into the cipher. */ uint8_t padding[256]; unsigned padding_len = block_size - ((in_len + mac_len) % block_size); memset(padding, padding_len - 1, padding_len); if (!EVP_EncryptUpdate(&tls_ctx->cipher_ctx, out + total, &len, padding, (int)padding_len)) { return 0; } total += len; } if (!EVP_EncryptFinal_ex(&tls_ctx->cipher_ctx, out + total, &len)) { return 0; } total += len; *out_len = total; return 1; }
int EVP_CipherInit_ex(EVP_CIPHER_CTX *ctx, const EVP_CIPHER *cipher, ENGINE *impl, const unsigned char *key, const unsigned char *iv, int enc) { if (enc == -1) enc = ctx->encrypt; else { if (enc) enc = 1; ctx->encrypt = enc; } #ifdef OPENSSL_NO_FIPS if(FIPS_selftest_failed()) { FIPSerr(FIPS_F_EVP_CIPHERINIT_EX,FIPS_R_FIPS_SELFTEST_FAILED); ctx->cipher = &bad_cipher; return 0; } #endif #ifndef OPENSSL_NO_ENGINE /* Whether it's nice or not, "Inits" can be used on "Final"'d contexts * so this context may already have an ENGINE! Try to avoid releasing * the previous handle, re-querying for an ENGINE, and having a * reinitialisation, when it may all be unecessary. */ if (ctx->engine && ctx->cipher && (!cipher || (cipher && (cipher->nid == ctx->cipher->nid)))) goto skip_to_init; #endif if (cipher) { /* Ensure a context left lying around from last time is cleared * (the previous check attempted to avoid this if the same * ENGINE and EVP_CIPHER could be used). */ EVP_CIPHER_CTX_cleanup(ctx); /* Restore encrypt field: it is zeroed by cleanup */ ctx->encrypt = enc; #ifndef OPENSSL_NO_ENGINE if (!do_evp_enc_engine(ctx, &cipher, impl)) return 0; #endif ctx->cipher=cipher; if (ctx->cipher->ctx_size) { ctx->cipher_data=OPENSSL_malloc(ctx->cipher->ctx_size); if (!ctx->cipher_data) { EVPerr(EVP_F_EVP_CIPHERINIT_EX, ERR_R_MALLOC_FAILURE); return 0; } } else { ctx->cipher_data = NULL; } ctx->key_len = cipher->key_len; ctx->flags = 0; if(ctx->cipher->flags & EVP_CIPH_CTRL_INIT) { if(!EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_INIT, 0, NULL)) { EVPerr(EVP_F_EVP_CIPHERINIT_EX, EVP_R_INITIALIZATION_ERROR); return 0; } } } else if(!ctx->cipher) { EVPerr(EVP_F_EVP_CIPHERINIT_EX, EVP_R_NO_CIPHER_SET); return 0; } #ifndef OPENSSL_NO_ENGINE skip_to_init: #endif /* we assume block size is a power of 2 in *cryptUpdate */ OPENSSL_assert(ctx->cipher->block_size == 1 || ctx->cipher->block_size == 8 || ctx->cipher->block_size == 16); if(!(EVP_CIPHER_CTX_flags(ctx) & EVP_CIPH_CUSTOM_IV)) { switch(EVP_CIPHER_CTX_mode(ctx)) { case EVP_CIPH_STREAM_CIPHER: case EVP_CIPH_ECB_MODE: break; case EVP_CIPH_CFB_MODE: case EVP_CIPH_OFB_MODE: ctx->num = 0; case EVP_CIPH_CBC_MODE: OPENSSL_assert(EVP_CIPHER_CTX_iv_length(ctx) <= (int)sizeof(ctx->iv)); if(iv) memcpy(ctx->oiv, iv, EVP_CIPHER_CTX_iv_length(ctx)); memcpy(ctx->iv, ctx->oiv, EVP_CIPHER_CTX_iv_length(ctx)); break; default: return 0; break; } } #ifdef OPENSSL_FIPS /* After 'key' is set no further parameters changes are permissible. * So only check for non FIPS enabling at this point. */ if (key && FIPS_mode()) { if (!(ctx->cipher->flags & EVP_CIPH_FLAG_FIPS) & !(ctx->flags & EVP_CIPH_FLAG_NON_FIPS_ALLOW)) { EVPerr(EVP_F_EVP_CIPHERINIT_EX, EVP_R_DISABLED_FOR_FIPS); #if 0 ERR_add_error_data(2, "cipher=", EVP_CIPHER_name(ctx->cipher)); #endif ctx->cipher = &bad_cipher; return 0; } } #endif if(key || (ctx->cipher->flags & EVP_CIPH_ALWAYS_CALL_INIT)) { if(!ctx->cipher->init(ctx,key,iv,enc)) return 0; } ctx->buf_len=0; ctx->final_used=0; ctx->block_mask=ctx->cipher->block_size-1; return 1; }